1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
// Copyright 2017 Michael Oswald

// Documentation copied from http://lv2plug.in/ns/ext/atom/util.h

// Copyright text of the original C file:

// Copyright 2012-2016 David Robillard <http://drobilla.net>
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
// THIS SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.


//! Documentation of the corresponding C header files (part of LV2 Atom): http://lv2plug.in/ns/ext/atom/atom.html.


use atom::*;
use std::mem::size_of;
use libc::{memcmp, memcpy, c_void};

/// Pad a size to 64 bits
pub fn lv2_atom_pad_size(size: u32) -> u32 {
    (size + 7) & (!7)
}

/** Return the total size of `atom`, including the header. */
pub fn lv2_atom_total_size(atom: &LV2Atom) -> u32 {
    size_of::<LV2Atom>() as u32 + atom.size
}

/** Return true iff `atom` is null. */
pub unsafe fn lv2_atom_is_null(atom: *const LV2Atom) -> bool {
    atom.is_null() || ((*atom).mytype == 0 && (*atom).size == 0)
}

/** Return true iff `a` is equal to `b`. */
pub unsafe fn lv2_atom_equals(a: *const LV2Atom, b: *const LV2Atom) -> bool {
    (a == b) ||
    (((*a).mytype == (*b).mytype) && ((*a).size == (*b).size) &&
     (memcmp(a.offset(1) as *const c_void,
             b.offset(1) as *const c_void,
             (*a).size as usize) == 0))
}


/** Get an iterator pointing to the first event in a Sequence body. */
pub unsafe fn lv2_atom_sequence_begin(body: *const LV2AtomSequenceBody) -> *mut LV2AtomEvent {
    body.offset(1) as *mut LV2AtomEvent
}

/** Get an iterator pointing to the end of a Sequence body. */
pub unsafe fn lv2_atom_sequence_end(body: *const LV2AtomSequenceBody,
                                    size: u32)
                                    -> *const LV2AtomEvent {

    (body as *const u8).offset(lv2_atom_pad_size(size) as isize) as *const LV2AtomEvent
}

/** Return true iff `i` has reached the end of `body`. */
pub unsafe fn lv2_atom_sequence_is_end(body: *const LV2AtomSequenceBody,
                                       size: u32,
                                       i: *const LV2AtomEvent)
                                       -> bool {

    let result = i as *const u8 >= (body as *const u8).offset(size as isize);
    result
}


/** Return an iterator to the element following `i`. */
pub unsafe fn lv2_atom_sequence_next(i: *const LV2AtomEvent) -> *mut LV2AtomEvent {
    let off = size_of::<LV2AtomEvent>() + lv2_atom_pad_size((*i).body.size) as usize;
    let ptr = (i as *const u8).offset(off as isize);

    ptr as *mut LV2AtomEvent
}

/**
   Clear all events from `sequence`.

   This simply resets the size field, the other fields are left untouched.
*/
pub unsafe fn lv2_atom_sequence_clear(seq: *mut LV2AtomSequence) -> () {
    (*seq).atom.size = size_of::<LV2AtomSequenceBody>() as u32;
}


/**
   Append an event at the end of `sequence`.

   @param seq Sequence to append to.
   @param capacity Total capacity of the sequence atom
   (e.g. as set by the host for sequence output ports).
   @param event Event to write.

   @return A pointer to the newly written event in `seq`,
   or NULL on failure (insufficient space).
*/
pub unsafe fn lv2_atom_sequence_append_event(seq: *mut LV2AtomSequence,
                                             capacity: u32,
                                             event: *const LV2AtomEvent)
                                             -> *const LV2AtomEvent {

    let total_size = size_of::<LV2AtomEvent>() as u32 + (*event).body.size;

    if (capacity - (*seq).atom.size) < total_size {
        return 0 as *const LV2AtomEvent;
    }

    let e = lv2_atom_sequence_end(&(*seq).body, (*seq).atom.size);
    memcpy(e as *mut c_void,
           event as *const c_void,
           total_size as usize);

    (*seq).atom.size += lv2_atom_pad_size(total_size);

    e
}

/** Return a pointer to the first property in `body`. */
pub unsafe fn lv2_atom_object_begin(body: *const LV2AtomObjectBody) -> *mut LV2AtomPropertyBody {

    body.offset(1) as *mut LV2AtomPropertyBody
}

/** Return true iff `i` has reached the end of `obj`. */
pub unsafe fn lv2_atom_object_is_end(body: *const LV2AtomObjectBody,
                                     size: u32,
                                     i: *const LV2AtomPropertyBody)
                                     -> bool {

    i as *const u8 >= (body as *const u8).offset(size as isize)
}

/** Return an iterator to the property following `i`. */
pub unsafe fn lv2_atom_object_next(i: *const LV2AtomPropertyBody) -> *mut LV2AtomPropertyBody {

    let value = (i as *const u8).offset((2 * size_of::<u32>()) as isize) as *const LV2Atom;

    let offset = lv2_atom_pad_size(size_of::<LV2AtomPropertyBody>() as u32 + (*value).size);
    (i as *mut u8).offset(offset as isize) as *mut LV2AtomPropertyBody
}

/** A single entry in an Object query. */
pub struct LV2AtomObjectQuery {
    /**< Key to query (input set by user) */
    pub key: u32,
    /**< Found value (output set by query function) */
    pub value: *mut *mut LV2Atom,
}

/**
   Get an object's values for various keys.

   The value pointer of each item in `query` will be set to the location of
   the corresponding value in `object`.  Every value pointer in `query` MUST
   be initialised to NULL.  This function reads `object` in a single linear
   sweep.  By allocating `query` on the stack, objects can be "queried"
   quickly without allocating any memory.  This function is realtime safe.

   This function can only do "flat" queries, it is not smart enough to match
   variables in nested objects.

   For example:
   @code
   const LV2_Atom* name = NULL;
   const LV2_Atom* age  = NULL;
   LV2_Atom_Object_Query q[] = {
       { urids.eg_name, &name },
       { urids.eg_age,  &age },
       LV2_ATOM_OBJECT_QUERY_END
   };
   lv2_atom_object_query(obj, q);
   // name and age are now set to the appropriate values in obj, or NULL.
   @endcode
*/
pub unsafe fn lv2_atom_object_query(obj: *mut LV2AtomObject,
                                    query: *mut LV2AtomObjectQuery)
                                    -> i32 {

    let ref mut object = *obj;

    let mut n_queries = 0;
    let mut matches = 0;

    let q = query;
    while (*q).key != 0 {
        n_queries += 1;
        q.offset(1);
    }

    {
        let f = |prop: *mut LV2AtomPropertyBody| -> bool {
            let mut q = query;
            while (*q).key != 0 {

                if ((*q).key == (*prop).key) && (!(*q).value.is_null()) {
                    let ref mut val = (*prop).value;
                    *(*q).value = val;

                    matches += 1;
                    if matches == n_queries {
                        return true;
                    }
                    break;
                }
                q.offset(1);
            }
            false
        };

        object.foreach(f);
    }

    return matches;
}


pub struct ObjectHelper {
    pub key: u32,
    pub atom: *mut *mut LV2Atom,
}

/**
   Variable argument version of lv2_atom_object_query().

   This is nicer-looking in code, but a bit more error-prone since it is not
   type safe and the argument list must be terminated.

   The arguments should be a series of uint32_t key and const LV2_Atom** value
   pairs, terminated by a zero key.  The value pointers MUST be initialized to
   NULL.  For example:

   @code
   const LV2_Atom* name = NULL;
   const LV2_Atom* age  = NULL;
   lv2_atom_object_get(obj,
                       uris.name_key, &name,
                       uris.age_key,  &age,
                       0);
   @endcode
*/
pub unsafe fn lv2_atom_object_get(body: *mut LV2AtomObject, query: &[ObjectHelper]) -> i32 {

    let mut matches = 0;
    let mut n_queries = 0;

    for it in query {
        if it.atom.is_null() {
            return -1;
        }
        n_queries += 1;
    }

    {
        let f = |prop: *mut LV2AtomPropertyBody| -> bool {

            for it in query {
                let qkey = it.key;

                if qkey == (*prop).key && (*(it.atom)).is_null() {
                    *(it.atom) = &mut (*prop).value;
                    matches += 1;
                    if matches == n_queries {
                        return matches > 0;
                    }
                    break;
                }
            }
            return true;
        };

        (*body).foreach(f);
    }

    return matches;
}

impl LV2AtomSequenceBody {
    pub unsafe fn foreach<F>(&mut self, size: u32, mut closure: F) -> ()
        where F: FnMut(*const LV2AtomEvent) -> ()
    {

        let mut it = lv2_atom_sequence_begin(self);
        while !lv2_atom_sequence_is_end(self, size, it) {
            closure(it);
            it = lv2_atom_sequence_next(it);
        }
    }
}

/// An iterator for atom sequences.
///
/// This was written by a beginner. Note that
///
/// - The iterator may be implemented incorrectly.
/// - We are not sure whether it is actually advisable to use it in
///   functions with hard real-time requirements.
/// - The `struct` `LV2AtomSequenceIterator` is, by itself, probably not
///   useful. The only reason why its a public struct is that the code
///   doesn't compile otherwise.
///
pub struct LV2AtomSequenceIterator<'a> {
    pub seq: &'a LV2AtomSequence,
    pub current: &'a LV2AtomEvent,
}

impl<'a> Iterator for LV2AtomSequenceIterator<'a> {
    type Item = &'a LV2AtomEvent;
    fn next(&mut self) -> Option<Self::Item> {
        unsafe {
            let body = &self.seq.body;
            let size = self.seq.atom.size;
            if !lv2_atom_sequence_is_end(body, size, self.current) {
                let out = self.current;
                self.current = &*lv2_atom_sequence_next(self.current);
                Some(out)
            } else {
                None
            }
        }
    }
}


// perhaps wrong. TODO: understand this: http://stackoverflow.com/questions/41448232/issues-constraining-implementation-lifetimes-on-type-without-lifetime-parameter
impl<'a> IntoIterator for &'a LV2AtomSequence {
    type Item = &'a LV2AtomEvent;
    type IntoIter = LV2AtomSequenceIterator<'a>;

    fn into_iter(self) -> Self::IntoIter {
        unsafe {
            LV2AtomSequenceIterator {
                seq: self,
                current: &*lv2_atom_sequence_begin(&self.body as *const LV2AtomSequenceBody),
            }
        }
    }
}