summaryrefslogtreecommitdiff
path: root/libs/ardour/lv2
diff options
context:
space:
mode:
Diffstat (limited to 'libs/ardour/lv2')
-rw-r--r--libs/ardour/lv2/lv2plug.in/ns/ext/atom/atom.h259
-rw-r--r--libs/ardour/lv2/lv2plug.in/ns/ext/atom/forge.h544
-rw-r--r--libs/ardour/lv2/lv2plug.in/ns/ext/atom/util.h424
3 files changed, 1227 insertions, 0 deletions
diff --git a/libs/ardour/lv2/lv2plug.in/ns/ext/atom/atom.h b/libs/ardour/lv2/lv2plug.in/ns/ext/atom/atom.h
new file mode 100644
index 0000000000..9bce7ce7aa
--- /dev/null
+++ b/libs/ardour/lv2/lv2plug.in/ns/ext/atom/atom.h
@@ -0,0 +1,259 @@
+/*
+ Copyright 2008-2012 David Robillard <http://drobilla.net>
+
+ Permission to use, copy, modify, and/or distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notice and this permission notice appear in all copies.
+
+ THIS SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/**
+ @file atom.h C header for the LV2 Atom extension
+ <http://lv2plug.in/ns/ext/atom>.
+*/
+
+#ifndef LV2_ATOM_H
+#define LV2_ATOM_H
+
+#include <stdint.h>
+#include <stddef.h>
+
+#define LV2_ATOM_URI "http://lv2plug.in/ns/ext/atom"
+
+#define LV2_ATOM__Atom LV2_ATOM_URI "#Atom"
+#define LV2_ATOM__AtomPort LV2_ATOM_URI "#AtomPort"
+#define LV2_ATOM__AudioFrames LV2_ATOM_URI "#AudioFrames"
+#define LV2_ATOM__Beats LV2_ATOM_URI "#Beats"
+#define LV2_ATOM__Blank LV2_ATOM_URI "#Blank"
+#define LV2_ATOM__Bool LV2_ATOM_URI "#Bool"
+#define LV2_ATOM__Double LV2_ATOM_URI "#Double"
+#define LV2_ATOM__Event LV2_ATOM_URI "#Event"
+#define LV2_ATOM__Float LV2_ATOM_URI "#Float"
+#define LV2_ATOM__Int32 LV2_ATOM_URI "#Int32"
+#define LV2_ATOM__Int64 LV2_ATOM_URI "#Int64"
+#define LV2_ATOM__Literal LV2_ATOM_URI "#Literal"
+#define LV2_ATOM__MessagePort LV2_ATOM_URI "#MessagePort"
+#define LV2_ATOM__Number LV2_ATOM_URI "#Number"
+#define LV2_ATOM__Object LV2_ATOM_URI "#Object"
+#define LV2_ATOM__Path LV2_ATOM_URI "#Path"
+#define LV2_ATOM__Property LV2_ATOM_URI "#Property"
+#define LV2_ATOM__Resource LV2_ATOM_URI "#Resource"
+#define LV2_ATOM__Sequence LV2_ATOM_URI "#Sequence"
+#define LV2_ATOM__String LV2_ATOM_URI "#String"
+#define LV2_ATOM__TimeUnit LV2_ATOM_URI "#TimeUnit"
+#define LV2_ATOM__Tuple LV2_ATOM_URI "#Tuple"
+#define LV2_ATOM__URI LV2_ATOM_URI "#URI"
+#define LV2_ATOM__URID LV2_ATOM_URI "#URID"
+#define LV2_ATOM__ValuePort LV2_ATOM_URI "#ValuePort"
+#define LV2_ATOM__Vector LV2_ATOM_URI "#Vector"
+#define LV2_ATOM__beatTime LV2_ATOM_URI "#beatTime"
+#define LV2_ATOM__bufferType LV2_ATOM_URI "#bufferType"
+#define LV2_ATOM__eventTransfer LV2_ATOM_URI "#eventTransfer"
+#define LV2_ATOM__frameTime LV2_ATOM_URI "#frameTime"
+#define LV2_ATOM__supports LV2_ATOM_URI "#supports"
+#define LV2_ATOM__timeUnit LV2_ATOM_URI "#timeUnit"
+
+#define LV2_ATOM_REFERENCE_TYPE 0
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** This expression will fail to compile if double does not fit in 64 bits. */
+typedef char lv2_atom_assert_double_fits_in_64_bits[
+ ((sizeof(double) <= sizeof(uint64_t)) * 2) - 1];
+
+/**
+ Return a pointer to the contents of an Atom. The "contents" of an atom
+ is the data past the complete type-specific header.
+ @param type The type of the atom, e.g. LV2_Atom_String.
+ @param atom A variable-sized atom.
+*/
+#define LV2_ATOM_CONTENTS(type, atom) \
+ ((void*)((uint8_t*)(atom) + sizeof(type)))
+
+/**
+ Return a pointer to the body of an Atom. The "body" of an atom is the
+ data just past the LV2_Atom head (i.e. the same offset for all types).
+*/
+#define LV2_ATOM_BODY(atom) LV2_ATOM_CONTENTS(LV2_Atom, atom)
+
+/** The header of an atom:Atom. */
+typedef struct {
+ uint32_t size; /**< Size in bytes, not including type and size. */
+ uint32_t type; /**< Type of this atom (mapped URI). */
+} LV2_Atom;
+
+/** An atom:Int32 or atom:Bool. May be cast to LV2_Atom. */
+typedef struct {
+ LV2_Atom atom; /**< Atom header. */
+ int32_t body; /**< Integer value. */
+} LV2_Atom_Int32;
+
+/** An atom:Int64. May be cast to LV2_Atom. */
+typedef struct {
+ LV2_Atom atom; /**< Atom header. */
+ int64_t body; /**< Integer value. */
+} LV2_Atom_Int64;
+
+/** An atom:Float. May be cast to LV2_Atom. */
+typedef struct {
+ LV2_Atom atom; /**< Atom header. */
+ float body; /**< Floating point value. */
+} LV2_Atom_Float;
+
+/** An atom:Double. May be cast to LV2_Atom. */
+typedef struct {
+ LV2_Atom atom; /**< Atom header. */
+ double body; /**< Floating point value. */
+} LV2_Atom_Double;
+
+/** An atom:Bool. May be cast to LV2_Atom. */
+typedef LV2_Atom_Int32 LV2_Atom_Bool;
+
+/** An atom:URID. May be cast to LV2_Atom. */
+typedef struct {
+ LV2_Atom atom; /**< Atom header. */
+ uint32_t body; /**< URID. */
+} LV2_Atom_URID;
+
+/** An atom:String. May be cast to LV2_Atom. */
+typedef struct {
+ LV2_Atom atom; /**< Atom header. */
+ /* Contents (a null-terminated UTF-8 string) follow here. */
+} LV2_Atom_String;
+
+/** The body of an atom:Literal. */
+typedef struct {
+ uint32_t datatype; /**< Datatype URID. */
+ uint32_t lang; /**< Language URID. */
+ /* Contents (a null-terminated UTF-8 string) follow here. */
+} LV2_Atom_Literal_Body;
+
+/** An atom:Literal. May be cast to LV2_Atom. */
+typedef struct {
+ LV2_Atom atom; /**< Atom header. */
+ LV2_Atom_Literal_Body body; /**< Body. */
+} LV2_Atom_Literal;
+
+/** An atom:Tuple. May be cast to LV2_Atom. */
+typedef struct {
+ LV2_Atom atom; /**< Atom header. */
+ /* Contents (a series of complete atoms) follow here. */
+} LV2_Atom_Tuple;
+
+/** The body of an atom:Vector. */
+typedef struct {
+ uint32_t elem_count; /**< The number of elements in the vector */
+ uint32_t elem_type; /**< The type of each element in the vector */
+ /* Contents (a series of packed atom bodies) follow here. */
+} LV2_Atom_Vector_Body;
+
+/** An atom:Vector. May be cast to LV2_Atom. */
+typedef struct {
+ LV2_Atom atom; /**< Atom header. */
+ LV2_Atom_Vector_Body body; /**< Body. */
+} LV2_Atom_Vector;
+
+/** The body of an atom:Property (e.g. in an atom:Object). */
+typedef struct {
+ uint32_t key; /**< Key (predicate) (mapped URI). */
+ uint32_t context; /**< Context URID (may be, and generally is, 0). */
+ LV2_Atom value; /**< Value atom header. */
+ /* Value atom body follows here. */
+} LV2_Atom_Property_Body;
+
+/** An atom:Property. May be cast to LV2_Atom. */
+typedef struct {
+ LV2_Atom atom; /**< Atom header. */
+ LV2_Atom_Property_Body body; /**< Body. */
+} LV2_Atom_Property;
+
+/** The body of an atom:Object. May be cast to LV2_Atom. */
+typedef struct {
+ uint32_t id; /**< URID (atom:Resource) or blank ID (atom:Blank). */
+ uint32_t otype; /**< Type URID (same as rdf:type, for fast dispatch). */
+ /* Contents (a series of property bodies) follow here. */
+} LV2_Atom_Object_Body;
+
+/** An atom:Object. May be cast to LV2_Atom. */
+typedef struct {
+ LV2_Atom atom; /**< Atom header. */
+ LV2_Atom_Object_Body body; /**< Body. */
+} LV2_Atom_Object;
+
+/** The header of an atom:Event. Note this type is NOT an LV2_Atom. */
+typedef struct {
+ /** Time stamp. Which type is valid is determined by context. */
+ union {
+ int64_t frames; /**< Time in audio frames. */
+ double beats; /**< Time in beats. */
+ } time;
+ LV2_Atom body; /**< Event body atom header. */
+ /* Body atom contents follow here. */
+} LV2_Atom_Event;
+
+/**
+ The body of an atom:Sequence (a sequence of events).
+
+ The unit field is either a URID that described an appropriate time stamp
+ type, or may be 0 where a default stamp type is known. For
+ LV2_Descriptor::run(), the default stamp type is atom:AudioFrames, i.e.
+ LV2_Atom_Audio_Time.
+
+ The contents of a sequence is a series of LV2_Atom_Event, each aligned
+ to 64-bits, e.g.:
+ <pre>
+ | Event 1 (size 6) | Event 2
+ | | | | | | | | |
+ | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
+ |FRAMES |SUBFRMS|TYPE |SIZE |DATADATADATAPAD|FRAMES |SUBFRMS|...
+ </pre>
+*/
+typedef struct {
+ uint32_t unit; /**< URID of unit of event time stamps. */
+ uint32_t pad; /**< Currently unused. */
+ /* Contents (a series of events) follow here. */
+} LV2_Atom_Sequence_Body;
+
+/** An atom:Sequence. */
+typedef struct {
+ LV2_Atom atom; /**< Atom header. */
+ LV2_Atom_Literal_Body body; /**< Body. */
+} LV2_Atom_Sequence;
+
+/**
+ The contents of an atom:AtomPort buffer.
+
+ This contains a pointer to an Atom, which is the data to be
+ processed/written, as well as additional metadata. This struct may be
+ augmented in the future to add more metadata fields as they become
+ necessary. The initial version of this struct contains data, size, and
+ capacity. Implementations MUST check that any other fields they wish to use
+ are actually present by comparing the size with the offset of that field,
+ e.g.:
+
+ @code
+ if (offsetof(LV2_Atom_Port_Buffer, field) < buf->size) {
+ do_stuff_with(buf->field);
+ }
+ @endcode
+*/
+typedef struct {
+ LV2_Atom* data; /** Pointer to data. */
+ uint32_t size; /** Total size of this struct. */
+ uint32_t capacity; /** Available space for data body. */
+} LV2_Atom_Port_Buffer;
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* LV2_ATOM_H */
diff --git a/libs/ardour/lv2/lv2plug.in/ns/ext/atom/forge.h b/libs/ardour/lv2/lv2plug.in/ns/ext/atom/forge.h
new file mode 100644
index 0000000000..b9dd51ca28
--- /dev/null
+++ b/libs/ardour/lv2/lv2plug.in/ns/ext/atom/forge.h
@@ -0,0 +1,544 @@
+/*
+ Copyright 2008-2012 David Robillard <http://drobilla.net>
+
+ Permission to use, copy, modify, and/or distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notice and this permission notice appear in all copies.
+
+ THIS SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/**
+ @file forge.h An API for constructing LV2 atoms.
+
+ This file provides an API for constructing Atoms which makes it relatively
+ simple to build nested atoms of arbitrary complexity without requiring
+ dynamic memory allocation.
+
+ The API is based on successively appending the appropriate pieces to build a
+ complete Atom. The size of containers is automatically updated. Functions
+ that begin a container return (via their frame argument) a stack frame which
+ must be popped when the container is finished.
+
+ All output is written to a user-provided buffer or sink function. This
+ makes it popssible to create create atoms on the stack, on the heap, in LV2
+ port buffers, in a ringbuffer, or elsewhere, all using the same API.
+
+ This entire API is realtime safe if used with a buffer or a realtime safe
+ sink, except lv2_atom_forge_init() which is only realtime safe if the URI
+ map function is.
+
+ Note these functions are all static inline, do not take their address.
+
+ This header is non-normative, it is provided for convenience.
+*/
+
+#ifndef LV2_ATOM_FORGE_H
+#define LV2_ATOM_FORGE_H
+
+#include <assert.h>
+
+#include "lv2/lv2plug.in/ns/ext/atom/atom.h"
+#include "lv2/lv2plug.in/ns/ext/atom/util.h"
+#include "lv2/lv2plug.in/ns/ext/urid/urid.h"
+
+#ifdef __cplusplus
+extern "C" {
+#else
+# include <stdbool.h>
+#endif
+
+/** Handle for LV2_Atom_Forge_Sink. */
+typedef void* LV2_Atom_Forge_Sink_Handle;
+
+/** Sink function for writing output. See lv2_atom_forge_set_sink(). */
+typedef void* (*LV2_Atom_Forge_Sink)(LV2_Atom_Forge_Sink_Handle handle,
+ const void* buf,
+ uint32_t size);
+
+/** A stack frame used for keeping track of nested Atom containers. */
+typedef struct _LV2_Atom_Forge_Frame {
+ struct _LV2_Atom_Forge_Frame* parent;
+ LV2_Atom* atom;
+} LV2_Atom_Forge_Frame;
+
+/** A "forge" for creating atoms by appending to a buffer. */
+typedef struct {
+ uint8_t* buf;
+ uint32_t offset;
+ uint32_t size;
+
+ LV2_Atom_Forge_Sink sink;
+ LV2_Atom_Forge_Sink_Handle handle;
+
+ LV2_Atom_Forge_Frame* stack;
+
+ LV2_URID Blank;
+ LV2_URID Bool;
+ LV2_URID Double;
+ LV2_URID Float;
+ LV2_URID Int32;
+ LV2_URID Int64;
+ LV2_URID Literal;
+ LV2_URID Path;
+ LV2_URID Property;
+ LV2_URID Resource;
+ LV2_URID Sequence;
+ LV2_URID String;
+ LV2_URID Tuple;
+ LV2_URID URI;
+ LV2_URID URID;
+ LV2_URID Vector;
+} LV2_Atom_Forge;
+
+/**
+ Push a stack frame.
+ This is done automatically by container functions (which take a stack frame
+ pointer), but may be called by the user to push the top level container when
+ writing to an existing Atom.
+*/
+static inline LV2_Atom*
+lv2_atom_forge_push(LV2_Atom_Forge* forge,
+ LV2_Atom_Forge_Frame* frame,
+ LV2_Atom* atom)
+{
+ frame->parent = forge->stack;
+ frame->atom = atom;
+ forge->stack = frame;
+ return atom;
+}
+
+/** Pop a stack frame. This must be called when a container is finished. */
+static inline void
+lv2_atom_forge_pop(LV2_Atom_Forge* forge, LV2_Atom_Forge_Frame* frame)
+{
+ assert(frame == forge->stack);
+ forge->stack = frame->parent;
+}
+
+/** Set the output buffer where @p forge will write atoms. */
+static inline void
+lv2_atom_forge_set_buffer(LV2_Atom_Forge* forge, uint8_t* buf, size_t size)
+{
+ forge->buf = buf;
+ forge->size = size;
+ forge->offset = 0;
+ forge->sink = NULL;
+ forge->handle = NULL;
+}
+
+/**
+ Set the sink function where @p forge will write output.
+
+ The return value of forge functions is a pointer to the written data, which
+ is used for updating parent sizes. To enable this, the sink function must
+ return a valid pointer to a contiguous LV2_Atom header. For ringbuffers,
+ this should be possible as long as the size of the buffer is a multiple of
+ sizeof(LV2_Atom), since atoms are always aligned. When using a ringbuffer,
+ the returned pointers may not point to a complete atom (including body).
+ The user must take care to only use these return values in a way compatible
+ with the sink used.
+*/
+static inline void
+lv2_atom_forge_set_sink(LV2_Atom_Forge* forge,
+ LV2_Atom_Forge_Sink sink,
+ LV2_Atom_Forge_Sink_Handle handle)
+{
+ forge->buf = NULL;
+ forge->size = forge->offset = 0;
+ forge->sink = sink;
+ forge->handle = handle;
+}
+
+/**
+ Initialise @p forge.
+
+ URIs will be mapped using @p map and stored, a reference to @p map itself is
+ not held.
+*/
+static inline void
+lv2_atom_forge_init(LV2_Atom_Forge* forge, LV2_URID_Map* map)
+{
+ lv2_atom_forge_set_buffer(forge, NULL, 0);
+ forge->stack = NULL;
+ forge->Blank = map->map(map->handle, LV2_ATOM_URI "#Blank");
+ forge->Bool = map->map(map->handle, LV2_ATOM_URI "#Bool");
+ forge->Double = map->map(map->handle, LV2_ATOM_URI "#Double");
+ forge->Float = map->map(map->handle, LV2_ATOM_URI "#Float");
+ forge->Int32 = map->map(map->handle, LV2_ATOM_URI "#Int32");
+ forge->Int64 = map->map(map->handle, LV2_ATOM_URI "#Int64");
+ forge->Literal = map->map(map->handle, LV2_ATOM_URI "#Literal");
+ forge->Path = map->map(map->handle, LV2_ATOM_URI "#Path");
+ forge->Property = map->map(map->handle, LV2_ATOM_URI "#Property");
+ forge->Resource = map->map(map->handle, LV2_ATOM_URI "#Resource");
+ forge->Sequence = map->map(map->handle, LV2_ATOM_URI "#Sequence");
+ forge->String = map->map(map->handle, LV2_ATOM_URI "#String");
+ forge->Tuple = map->map(map->handle, LV2_ATOM_URI "#Tuple");
+ forge->URI = map->map(map->handle, LV2_ATOM_URI "#URI");
+ forge->URID = map->map(map->handle, LV2_ATOM_URI "#URID");
+ forge->Vector = map->map(map->handle, LV2_ATOM_URI "#Vector");
+}
+
+/**
+ Write raw output. This is used internally, but is also useful for writing
+ atom types not explicitly supported by the forge API. Note the caller is
+ responsible for ensuring the output is approriately padded.
+*/
+static inline void*
+lv2_atom_forge_raw(LV2_Atom_Forge* forge, const void* data, uint32_t size)
+{
+ uint8_t* out = NULL;
+ if (forge->sink) {
+ out = forge->sink(forge->handle, data, size);
+ } else {
+ out = forge->buf + forge->offset;
+ if (forge->offset + size > forge->size) {
+ return NULL;
+ }
+ forge->offset += size;
+ memcpy(out, data, size);
+ }
+ if (out) {
+ for (LV2_Atom_Forge_Frame* f = forge->stack; f; f = f->parent) {
+ f->atom->size += size;
+ }
+ }
+ return out;
+}
+
+/** Pad output accordingly so next write is 64-bit aligned. */
+static inline void
+lv2_atom_forge_pad(LV2_Atom_Forge* forge, uint32_t written)
+{
+ const uint64_t pad = 0;
+ const uint32_t pad_size = lv2_atom_pad_size(written) - written;
+ lv2_atom_forge_raw(forge, &pad, pad_size);
+}
+
+/** Write raw output, padding to 64-bits as necessary. */
+static inline void*
+lv2_atom_forge_write(LV2_Atom_Forge* forge, const void* data, uint32_t size)
+{
+ void* out = lv2_atom_forge_raw(forge, data, size);
+ if (out) {
+ lv2_atom_forge_pad(forge, size);
+ }
+ return out;
+}
+
+/** Write an atom:Atom header. */
+static inline LV2_Atom*
+lv2_atom_forge_atom(LV2_Atom_Forge* forge, uint32_t size, uint32_t type)
+{
+ const LV2_Atom a = { size, type };
+ return (LV2_Atom*)lv2_atom_forge_raw(forge, &a, sizeof(a));
+}
+
+/** Write an atom:Int32. */
+static inline LV2_Atom_Int32*
+lv2_atom_forge_int32(LV2_Atom_Forge* forge, int32_t val)
+{
+ const LV2_Atom_Int32 a = { { sizeof(val), forge->Int32 }, val };
+ return (LV2_Atom_Int32*)lv2_atom_forge_write(forge, &a, sizeof(a));
+}
+
+/** Write an atom:Int64. */
+static inline LV2_Atom_Int64*
+lv2_atom_forge_int64(LV2_Atom_Forge* forge, int64_t val)
+{
+ const LV2_Atom_Int64 a = { { sizeof(val), forge->Int64 }, val };
+ return (LV2_Atom_Int64*)lv2_atom_forge_write(forge, &a, sizeof(a));
+}
+
+/** Write an atom:Float. */
+static inline LV2_Atom_Float*
+lv2_atom_forge_float(LV2_Atom_Forge* forge, float val)
+{
+ const LV2_Atom_Float a = { { sizeof(val), forge->Float }, val };
+ return (LV2_Atom_Float*)lv2_atom_forge_write(forge, &a, sizeof(a));
+}
+
+/** Write an atom:Double. */
+static inline LV2_Atom_Double*
+lv2_atom_forge_double(LV2_Atom_Forge* forge, double val)
+{
+ const LV2_Atom_Double a = { { sizeof(val), forge->Double }, val };
+ return (LV2_Atom_Double*)lv2_atom_forge_write(
+ forge, &a, sizeof(a));
+}
+
+/** Write an atom:Bool. */
+static inline LV2_Atom_Bool*
+lv2_atom_forge_bool(LV2_Atom_Forge* forge, bool val)
+{
+ const LV2_Atom_Bool a = { { sizeof(val), forge->Bool }, val };
+ return (LV2_Atom_Bool*)lv2_atom_forge_write(forge, &a, sizeof(a));
+}
+
+/** Write an atom:URID. */
+static inline LV2_Atom_URID*
+lv2_atom_forge_urid(LV2_Atom_Forge* forge, LV2_URID id)
+{
+ const LV2_Atom_URID a = { { sizeof(id), forge->URID }, id };
+ return (LV2_Atom_URID*)lv2_atom_forge_write(forge, &a, sizeof(a));
+}
+
+/** Write a string body. Used internally. */
+static inline uint8_t*
+lv2_atom_forge_string_body(LV2_Atom_Forge* forge,
+ const uint8_t* str,
+ uint32_t len)
+{
+ uint8_t* out = NULL;
+ if ( (out = lv2_atom_forge_raw(forge, str, len))
+ && (out = lv2_atom_forge_raw(forge, "", 1))) {
+ lv2_atom_forge_pad(forge, len + 1);
+ }
+ return out;
+}
+
+/** Write an atom compatible with atom:String. Used internally. */
+static inline LV2_Atom_String*
+lv2_atom_forge_typed_string(LV2_Atom_Forge* forge,
+ uint32_t type,
+ const uint8_t* str,
+ uint32_t len)
+{
+ const LV2_Atom_String a = { { len + 1, type } };
+ LV2_Atom_String* out = (LV2_Atom_String*)
+ lv2_atom_forge_raw(forge, &a, sizeof(a));
+ if (out) {
+ if (!lv2_atom_forge_string_body(forge, str, len)) {
+ out->atom.size = out->atom.type = 0;
+ out = NULL;
+ }
+ }
+ return out;
+}
+
+/** Write an atom:String. Note that @p str need not be NULL terminated. */
+static inline LV2_Atom_String*
+lv2_atom_forge_string(LV2_Atom_Forge* forge, const uint8_t* str, uint32_t len)
+{
+ return lv2_atom_forge_typed_string(forge, forge->String, str, len);
+}
+
+/**
+ Write an atom:URI. Note that @p uri need not be NULL terminated.
+ This does not map the URI, but writes the complete URI string. To write
+ a mapped URI, use lv2_atom_forge_urid().
+*/
+static inline LV2_Atom_String*
+lv2_atom_forge_uri(LV2_Atom_Forge* forge, const uint8_t* uri, uint32_t len)
+{
+ return lv2_atom_forge_typed_string(forge, forge->URI, uri, len);
+}
+
+/** Write an atom:Path. Note that @p path need not be NULL terminated. */
+static inline LV2_Atom_String*
+lv2_atom_forge_path(LV2_Atom_Forge* forge, const uint8_t* path, uint32_t len)
+{
+ return lv2_atom_forge_typed_string(forge, forge->Path, path, len);
+}
+
+/** Write an atom:Literal. */
+static inline LV2_Atom_Literal*
+lv2_atom_forge_literal(LV2_Atom_Forge* forge,
+ const uint8_t* str,
+ uint32_t len,
+ uint32_t datatype,
+ uint32_t lang)
+{
+ const LV2_Atom_Literal a = {
+ { sizeof(LV2_Atom_Literal) - sizeof(LV2_Atom) + len + 1,
+ forge->Literal },
+ { datatype,
+ lang }
+ };
+ LV2_Atom_Literal* out = (LV2_Atom_Literal*)
+ lv2_atom_forge_raw(forge, &a, sizeof(a));
+ if (out) {
+ if (!lv2_atom_forge_string_body(forge, str, len)) {
+ out->atom.size = out->atom.type = 0;
+ out = NULL;
+ }
+ }
+ return out;
+}
+
+/** Write an atom:Vector header, but not the vector body. */
+static inline LV2_Atom_Vector*
+lv2_atom_forge_vector_head(LV2_Atom_Forge* forge,
+ uint32_t elem_count,
+ uint32_t elem_type,
+ uint32_t elem_size)
+{
+ const uint32_t size = sizeof(LV2_Atom_Vector) + (elem_size * elem_count);
+ const LV2_Atom_Vector a = {
+ { size - sizeof(LV2_Atom), forge->Vector },
+ { elem_count, elem_type }
+ };
+ return (LV2_Atom_Vector*)lv2_atom_forge_write(forge, &a, sizeof(a));
+}
+
+/** Write a complete atom:Vector. */
+static inline LV2_Atom_Vector*
+lv2_atom_forge_vector(LV2_Atom_Forge* forge,
+ uint32_t elem_count,
+ uint32_t elem_type,
+ uint32_t elem_size,
+ void* elems)
+{
+ LV2_Atom_Vector* out = lv2_atom_forge_vector_head(
+ forge, elem_count, elem_type, elem_size);
+ if (out) {
+ lv2_atom_forge_write(forge, elems, elem_size * elem_count);
+ }
+ return out;
+}
+
+/**
+ Write the header of an atom:Tuple.
+
+ The passed frame will be initialised to represent this tuple. To complete
+ the tuple, write a sequence of atoms, then pop the frame with
+ lv2_atom_forge_pop().
+
+ For example:
+ @code
+ // Write tuple (1, 2.0)
+ LV2_Atom_Forge_Frame frame;
+ LV2_Atom* tup = (LV2_Atom*)lv2_atom_forge_tuple(forge, &frame);
+ lv2_atom_forge_int32(forge, 1);
+ lv2_atom_forge_float(forge, 2.0);
+ lv2_atom_forge_pop(forge, &frame);
+ @endcode
+*/
+static inline LV2_Atom_Tuple*
+lv2_atom_forge_tuple(LV2_Atom_Forge* forge, LV2_Atom_Forge_Frame* frame)
+{
+ const LV2_Atom_Tuple a = { { 0, forge->Tuple } };
+ LV2_Atom* atom = lv2_atom_forge_write(forge, &a, sizeof(a));
+ return (LV2_Atom_Tuple*)lv2_atom_forge_push(forge, frame, atom);
+}
+
+/**
+ Write the header of an atom:Resource.
+
+ The passed frame will be initialised to represent this object. To complete
+ the object, write a sequence of properties, then pop the frame with
+ lv2_atom_forge_pop().
+
+ For example:
+ @code
+ LV2_URID eg_Cat = map("http://example.org/Cat");
+ LV2_URID eg_name = map("http://example.org/name");
+
+ // Write object header
+ LV2_Atom_Forge_Frame frame;
+ LV2_Atom* obj = (LV2_Atom*)lv2_atom_forge_resource(forge, &frame, 1, eg_Cat);
+
+ // Write property: eg:name = "Hobbes"
+ lv2_atom_forge_property_head(forge, eg_name, 0);
+ lv2_atom_forge_string(forge, "Hobbes", strlen("Hobbes"));
+
+ // Finish object
+ lv2_atom_forge_pop(forge, &frame);
+ @endcode
+*/
+static inline LV2_Atom_Object*
+lv2_atom_forge_resource(LV2_Atom_Forge* forge,
+ LV2_Atom_Forge_Frame* frame,
+ LV2_URID id,
+ LV2_URID otype)
+{
+ const LV2_Atom_Object a = {
+ { sizeof(LV2_Atom_Object) - sizeof(LV2_Atom), forge->Resource },
+ { id, otype }
+ };
+ LV2_Atom* atom = (LV2_Atom*)lv2_atom_forge_write(forge, &a, sizeof(a));
+ return (LV2_Atom_Object*)lv2_atom_forge_push(forge, frame, atom);
+}
+
+/**
+ The same as lv2_atom_forge_resource(), but for object:Blank.
+*/
+static inline LV2_Atom_Object*
+lv2_atom_forge_blank(LV2_Atom_Forge* forge,
+ LV2_Atom_Forge_Frame* frame,
+ uint32_t id,
+ LV2_URID otype)
+{
+ const LV2_Atom_Object a = {
+ { sizeof(LV2_Atom_Object) - sizeof(LV2_Atom), forge->Blank },
+ { id, otype }
+ };
+ LV2_Atom* atom = (LV2_Atom*)lv2_atom_forge_write(forge, &a, sizeof(a));
+ return (LV2_Atom_Object*)lv2_atom_forge_push(forge, frame, atom);
+}
+
+/**
+ Write the header for a property body (likely in an Object).
+ See lv2_atom_forge_object() documentation for an example.
+*/
+static inline LV2_Atom_Property_Body*
+lv2_atom_forge_property_head(LV2_Atom_Forge* forge,
+ LV2_URID key,
+ LV2_URID context)
+{
+ const LV2_Atom_Property_Body a = { key, context, { 0, 0 } };
+ return (LV2_Atom_Property_Body*)lv2_atom_forge_write(
+ forge, &a, 2 * sizeof(uint32_t));
+}
+
+/**
+ Write the header for a Sequence.
+ The size of the returned sequence will be 0, so passing it as the parent
+ parameter to other forge methods will do the right thing.
+*/
+static inline LV2_Atom_Sequence*
+lv2_atom_forge_sequence_head(LV2_Atom_Forge* forge,
+ LV2_Atom_Forge_Frame* frame,
+ uint32_t unit)
+{
+ const LV2_Atom_Sequence a = {
+ { sizeof(LV2_Atom_Sequence) - sizeof(LV2_Atom), forge->Sequence },
+ { unit, 0 }
+ };
+ LV2_Atom* atom = (LV2_Atom*)lv2_atom_forge_write(forge, &a, sizeof(a));
+ return (LV2_Atom_Sequence*)lv2_atom_forge_push(forge, frame, atom);
+}
+
+/**
+ Write the time stamp header of an Event (in a Sequence) in audio frames.
+ After this, call the appropriate forge method(s) to write the body, passing
+ the same @p parent parameter. Note the returned LV2_Event is NOT an Atom.
+*/
+static inline int64_t*
+lv2_atom_forge_frame_time(LV2_Atom_Forge* forge, int64_t frames)
+{
+ return (int64_t*)lv2_atom_forge_write(forge, &frames, sizeof(frames));
+}
+
+/**
+ Write the time stamp header of an Event (in a Sequence) in beats.
+ After this, call the appropriate forge method(s) to write the body, passing
+ the same @p parent parameter. Note the returned LV2_Event is NOT an Atom.
+*/
+static inline double*
+lv2_atom_forge_beat_time(LV2_Atom_Forge* forge, double beats)
+{
+ return (double*)lv2_atom_forge_write(forge, &beats, sizeof(beats));
+}
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* LV2_ATOM_FORGE_H */
diff --git a/libs/ardour/lv2/lv2plug.in/ns/ext/atom/util.h b/libs/ardour/lv2/lv2plug.in/ns/ext/atom/util.h
new file mode 100644
index 0000000000..6b46a676f8
--- /dev/null
+++ b/libs/ardour/lv2/lv2plug.in/ns/ext/atom/util.h
@@ -0,0 +1,424 @@
+/*
+ Copyright 2008-2012 David Robillard <http://drobilla.net>
+
+ Permission to use, copy, modify, and/or distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notice and this permission notice appear in all copies.
+
+ THIS SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/**
+ @file util.h Helper functions for the LV2 Atom extension.
+
+ Note these functions are all static inline, do not take their address.
+
+ This header is non-normative, it is provided for convenience.
+*/
+
+#ifndef LV2_ATOM_UTIL_H
+#define LV2_ATOM_UTIL_H
+
+#include <stdarg.h>
+#include <stdint.h>
+#include <string.h>
+
+#include "lv2/lv2plug.in/ns/ext/atom/atom.h"
+
+#ifdef __cplusplus
+extern "C" {
+#else
+# include <stdbool.h>
+#endif
+
+/** Pad a size to 64 bits. */
+static inline uint32_t
+lv2_atom_pad_size(uint32_t size)
+{
+ return (size + 7) & (~7);
+}
+
+/** Return the total size of @p atom, including the header. */
+static inline uint32_t
+lv2_atom_total_size(const LV2_Atom* atom)
+{
+ return sizeof(LV2_Atom) + atom->size;
+}
+
+/** Return true iff @p atom is null. */
+static inline bool
+lv2_atom_is_null(const LV2_Atom* atom)
+{
+ return !atom || (atom->type == 0 && atom->size == 0);
+}
+
+/** Return true iff @p a is equal to @p b. */
+static inline bool
+lv2_atom_equals(const LV2_Atom* a, const LV2_Atom* b)
+{
+ return (a == b) || ((a->type == b->type) &&
+ (a->size == b->size) &&
+ !memcmp(a + 1, b + 1, a->size));
+}
+
+/**
+ @name Sequence Iterator
+ @{
+*/
+
+/** An iterator over the elements of an LV2_Atom_Sequence. */
+typedef LV2_Atom_Event* LV2_Atom_Sequence_Iter;
+
+/** Get an iterator pointing to the first element in a Sequence body. */
+static inline LV2_Atom_Sequence_Iter
+lv2_sequence_body_begin(const LV2_Atom_Sequence_Body* body)
+{
+ return (LV2_Atom_Sequence_Iter)(body + 1);
+}
+
+/** Get an iterator pointing to the first element in a Sequence. */
+static inline LV2_Atom_Sequence_Iter
+lv2_sequence_begin(const LV2_Atom_Sequence* seq)
+{
+ return (LV2_Atom_Sequence_Iter)(seq + 1);
+}
+
+/** Return true iff @p i has reached the end of @p body. */
+static inline bool
+lv2_sequence_body_is_end(const LV2_Atom_Sequence_Body* body,
+ uint32_t size,
+ LV2_Atom_Sequence_Iter i)
+{
+ return (uint8_t*)i >= ((uint8_t*)body + size);
+}
+
+/** Return true iff @p i has reached the end of @p seq. */
+static inline bool
+lv2_sequence_is_end(const LV2_Atom_Sequence* seq, LV2_Atom_Sequence_Iter i)
+{
+ return (uint8_t*)i >= ((uint8_t*)seq + sizeof(LV2_Atom) + seq->atom.size);
+}
+
+/** Return an iterator to the element following @p i. */
+static inline LV2_Atom_Sequence_Iter
+lv2_sequence_iter_next(const LV2_Atom_Sequence_Iter i)
+{
+ return (LV2_Atom_Sequence_Iter)((uint8_t*)i
+ + sizeof(LV2_Atom_Event)
+ + lv2_atom_pad_size(i->body.size));
+}
+
+/** Return the element pointed to by @p i. */
+static inline LV2_Atom_Event*
+lv2_sequence_iter_get(LV2_Atom_Sequence_Iter i)
+{
+ return (LV2_Atom_Event*)i;
+}
+
+/**
+ A macro for iterating over all events in a Sequence.
+ @param sequence The sequence to iterate over
+ @param iter The name of the iterator
+
+ This macro is used similarly to a for loop (which it expands to), e.g.:
+ @code
+ LV2_SEQUENCE_FOREACH(sequence, i) {
+ LV2_Atom_Event* ev = lv2_sequence_iter_get(i);
+ // Do something with ev here...
+ }
+ @endcode
+*/
+#define LV2_SEQUENCE_FOREACH(sequence, iter) \
+ for (LV2_Atom_Sequence_Iter (iter) = lv2_sequence_begin(sequence); \
+ !lv2_sequence_is_end(sequence, (iter)); \
+ (iter) = lv2_sequence_iter_next(iter))
+
+/** A version of LV2_SEQUENCE_FOREACH for when only the body is available. */
+#define LV2_SEQUENCE_BODY_FOREACH(body, size, iter) \
+ for (LV2_Atom_Sequence_Iter (iter) = lv2_sequence_body_begin(body); \
+ !lv2_sequence_body_is_end(body, size, (iter)); \
+ (iter) = lv2_sequence_iter_next(iter))
+
+/**
+ @}
+ @name Tuple Iterator
+ @{
+*/
+
+/** An iterator over the elements of an LV2_Atom_Tuple. */
+typedef LV2_Atom* LV2_Atom_Tuple_Iter;
+
+/** Get an iterator pointing to the first element in @p tup. */
+static inline LV2_Atom_Tuple_Iter
+lv2_tuple_begin(const LV2_Atom_Tuple* tup)
+{
+ return (LV2_Atom_Tuple_Iter)(LV2_ATOM_BODY(tup));
+}
+
+/** Return true iff @p i has reached the end of @p body. */
+static inline bool
+lv2_atom_tuple_body_is_end(const void* body,
+ uint32_t size,
+ LV2_Atom_Tuple_Iter i)
+{
+ return (uint8_t*)i >= ((uint8_t*)body + size);
+}
+
+/** Return true iff @p i has reached the end of @p tup. */
+static inline bool
+lv2_tuple_is_end(const LV2_Atom_Tuple* tup, LV2_Atom_Tuple_Iter i)
+{
+ return lv2_atom_tuple_body_is_end(LV2_ATOM_BODY(tup), tup->atom.size, i);
+}
+
+/** Return an iterator to the element following @p i. */
+static inline LV2_Atom_Tuple_Iter
+lv2_tuple_iter_next(const LV2_Atom_Tuple_Iter i)
+{
+ return (LV2_Atom_Tuple_Iter)(
+ (uint8_t*)i + sizeof(LV2_Atom) + lv2_atom_pad_size(i->size));
+}
+
+/** Return the element pointed to by @p i. */
+static inline LV2_Atom*
+lv2_tuple_iter_get(LV2_Atom_Tuple_Iter i)
+{
+ return (LV2_Atom*)i;
+}
+
+/**
+ A macro for iterating over all properties of a Tuple.
+ @param tuple The tuple to iterate over
+ @param iter The name of the iterator
+
+ This macro is used similarly to a for loop (which it expands to), e.g.:
+ @code
+ LV2_TUPLE_FOREACH(tuple, i) {
+ LV2_Atom* elem = lv2_tuple_iter_get(i);
+ // Do something with elem here...
+ }
+ @endcode
+*/
+#define LV2_TUPLE_FOREACH(tuple, iter) \
+ for (LV2_Atom_Tuple_Iter (iter) = lv2_tuple_begin(tuple); \
+ !lv2_tuple_is_end(tuple, (iter)); \
+ (iter) = lv2_tuple_iter_next(iter))
+
+/** A version of LV2_TUPLE_FOREACH for when only the body is available. */
+#define LV2_TUPLE_BODY_FOREACH(body, size, iter) \
+ for (LV2_Atom_Tuple_Iter (iter) = (LV2_Atom_Tuple_Iter)body; \
+ !lv2_atom_tuple_body_is_end(body, size, (iter)); \
+ (iter) = lv2_tuple_iter_next(iter))
+
+/**
+ @}
+ @name Object Iterator
+ @{
+*/
+
+/** An iterator over the properties of an LV2_Atom_Object. */
+typedef LV2_Atom_Property_Body* LV2_Atom_Object_Iter;
+
+static inline LV2_Atom_Object_Iter
+lv2_object_body_begin(const LV2_Atom_Object_Body* body)
+{
+ return (LV2_Atom_Object_Iter)(body + 1);
+}
+
+/** Get an iterator pointing to the first property in @p obj. */
+static inline LV2_Atom_Object_Iter
+lv2_object_begin(const LV2_Atom_Object* obj)
+{
+ return (LV2_Atom_Object_Iter)(obj + 1);
+}
+
+static inline bool
+lv2_atom_object_body_is_end(const LV2_Atom_Object_Body* body,
+ uint32_t size,
+ LV2_Atom_Object_Iter i)
+{
+ return (uint8_t*)i >= ((uint8_t*)body + size);
+}
+
+/** Return true iff @p i has reached the end of @p obj. */
+static inline bool
+lv2_object_is_end(const LV2_Atom_Object* obj, LV2_Atom_Object_Iter i)
+{
+ return (uint8_t*)i >= ((uint8_t*)obj + sizeof(LV2_Atom) + obj->atom.size);
+}
+
+/** Return an iterator to the property following @p i. */
+static inline LV2_Atom_Object_Iter
+lv2_object_iter_next(const LV2_Atom_Object_Iter i)
+{
+ const LV2_Atom* const value = (LV2_Atom*)((uint8_t*)i + sizeof(i));
+ return (LV2_Atom_Object_Iter)((uint8_t*)i
+ + sizeof(LV2_Atom_Property_Body)
+ + lv2_atom_pad_size(value->size));
+}
+
+/** Return the property pointed to by @p i. */
+static inline LV2_Atom_Property_Body*
+lv2_object_iter_get(LV2_Atom_Object_Iter i)
+{
+ return (LV2_Atom_Property_Body*)i;
+}
+
+/**
+ A macro for iterating over all properties of an Object.
+ @param object The object to iterate over
+ @param iter The name of the iterator
+
+ This macro is used similarly to a for loop (which it expands to), e.g.:
+ @code
+ LV2_OBJECT_FOREACH(object, i) {
+ LV2_Atom_Property_Body* prop = lv2_object_iter_get(i);
+ // Do something with prop here...
+ }
+ @endcode
+*/
+#define LV2_OBJECT_FOREACH(object, iter) \
+ for (LV2_Atom_Object_Iter (iter) = lv2_object_begin(object); \
+ !lv2_object_is_end(object, (iter)); \
+ (iter) = lv2_object_iter_next(iter))
+
+/** A version of LV2_OBJECT_FOREACH for when only the body is available. */
+#define LV2_OBJECT_BODY_FOREACH(body, size, iter) \
+ for (LV2_Atom_Object_Iter (iter) = lv2_object_body_begin(body); \
+ !lv2_atom_object_body_is_end(body, size, (iter)); \
+ (iter) = lv2_object_iter_next(iter))
+
+/**
+ @}
+ @name Object Query
+ @{
+*/
+
+/** A single entry in an Object query. */
+typedef struct {
+ uint32_t key; /**< Key to query (input set by user) */
+ const LV2_Atom** value; /**< Found value (output set by query function) */
+} LV2_Atom_Object_Query;
+
+static const LV2_Atom_Object_Query LV2_OBJECT_QUERY_END = { 0, NULL };
+
+/**
+ Get an object's values for various keys.
+
+ The value pointer of each item in @p query will be set to the location of
+ the corresponding value in @p object. Every value pointer in @p query MUST
+ be initialised to NULL. This function reads @p object in a single linear
+ sweep. By allocating @p query on the stack, objects can be "queried"
+ quickly without allocating any memory. This function is realtime safe.
+
+ This function can only do "flat" queries, it is not smart enough to match
+ variables in nested objects.
+
+ For example:
+ @code
+ const LV2_Atom* name = NULL;
+ const LV2_Atom* age = NULL;
+ LV2_Atom_Object_Query q[] = {
+ { urids.eg_name, &name },
+ { urids.eg_age, &age },
+ LV2_OBJECT_QUERY_END
+ };
+ lv2_object_query(obj, q);
+ // name and age are now set to the appropriate values in obj, or NULL.
+ @endcode
+*/
+static inline int
+lv2_object_query(const LV2_Atom_Object* object, LV2_Atom_Object_Query* query)
+{
+ int matches = 0;
+ int n_queries = 0;
+
+ /* Count number of query keys so we can short-circuit when done */
+ for (LV2_Atom_Object_Query* q = query; q->key; ++q) {
+ ++n_queries;
+ }
+
+ LV2_OBJECT_FOREACH(object, o) {
+ const LV2_Atom_Property_Body* prop = lv2_object_iter_get(o);
+ for (LV2_Atom_Object_Query* q = query; q->key; ++q) {
+ if (q->key == prop->key && !*q->value) {
+ *q->value = &prop->value;
+ if (++matches == n_queries) {
+ return matches;
+ }
+ break;
+ }
+ }
+ }
+ return matches;
+}
+
+/**
+ Variable argument version of lv2_object_get().
+
+ This is nicer-looking in code, but a bit more error-prone since it is not
+ type safe and the argument list must be terminated.
+
+ The arguments should be a series of uint32_t key and const LV2_Atom** value
+ pairs, terminated by a zero key. The value pointers MUST be initialized to
+ NULL. For example:
+
+ @code
+ const LV2_Atom* name = NULL;
+ const LV2_Atom* age = NULL;
+ lv2_object_get(obj,
+ uris.name_key, &name,
+ uris.age_key, &age,
+ 0);
+ @endcode
+*/
+static inline int
+lv2_object_get(const LV2_Atom_Object* object, ...)
+{
+ int matches = 0;
+ int n_queries = 0;
+
+ /* Count number of keys so we can short-circuit when done */
+ va_list args;
+ va_start(args, object);
+ for (n_queries = 0; va_arg(args, uint32_t); ++n_queries) {
+ if (!va_arg(args, const LV2_Atom**)) {
+ return -1;
+ }
+ }
+ va_end(args);
+
+ LV2_OBJECT_FOREACH(object, o) {
+ const LV2_Atom_Property_Body* prop = lv2_object_iter_get(o);
+ va_start(args, object);
+ for (int i = 0; i < n_queries; ++i) {
+ uint32_t qkey = va_arg(args, uint32_t);
+ const LV2_Atom** qval = va_arg(args, const LV2_Atom**);
+ if (qkey == prop->key && !*qval) {
+ *qval = &prop->value;
+ if (++matches == n_queries) {
+ return matches;
+ }
+ break;
+ }
+ }
+ va_end(args);
+ }
+ return matches;
+}
+
+/**
+ @}
+*/
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* LV2_ATOM_UTIL_H */