tfdeploy 0.0.10

Tiny, no-nonsense, self contained, TensorFlow inference
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
//! TensorFlow Ops
use std::collections::HashMap;
use std::collections::VecDeque;
use std::fmt::Debug;
use std::mem;
use std::ops::{Index, IndexMut};
#[cfg(feature = "serialize")]
use std::result::Result as StdResult;
use std::sync::Arc;

use analyser::prelude::*;
use dim::TDim;
use model::TVec;
use ops::nn::local_patch::{DataFormat, Padding};
use {DatumType, Result, Tensor};

use downcast_rs::Downcast;
use objekt;
#[cfg(feature = "serialize")]
use serde::ser::{Serialize, Serializer};

#[macro_use]
mod macros;

mod array;
mod cast;
#[cfg(features = "image_ops")]
pub mod image;
pub mod konst;
mod math;
pub mod nn;
mod unimpl;

pub mod prelude {
    pub use super::{Attr, Op, OpRegister};
    pub use super::{OpBuffer, QueuesBuffer, StepValue, Stream, StreamInfo, Value};
    pub use dim::TDim;
    pub use model::TVec;
    pub use std::collections::HashMap;
    pub use std::marker::PhantomData;
    pub use tensor::{Datum, DatumType, Tensor};
    pub use Result;
}

#[derive(Debug, Clone)]
pub enum Value {
    Owned(Tensor),
    Shared(Arc<Tensor>),
}

impl Value {
    /// Creates a shared Value from any Value.
    pub fn into_shared(self) -> Value {
        match self {
            Value::Owned(m) => Value::Shared(Arc::new(m)),
            Value::Shared(_) => self,
        }
    }

    /// Creates a Tensor from a Value.
    pub fn into_tensor(self) -> Tensor {
        match self {
            Value::Owned(m) => m,
            Value::Shared(m) => m.as_ref().clone(),
        }
    }

    /// Returns a reference to the Tensor wrapped inside a Value.
    pub fn as_tensor(&self) -> &Tensor {
        match self {
            &Value::Owned(ref m) => &m,
            &Value::Shared(ref m) => m.as_ref(),
        }
    }

    /// Returns a shared copy of the Value, turning the one passed
    /// as argument into a Value::Shared if necessary.
    pub fn share(&mut self) -> Value {
        // This is somewhat ugly, but sadly we couldn't find any other
        // way to implement it. If we try to write something like:
        //   *self = Value::Shared(Arc::new(*m))
        // the borrow checker will complain about *m being moved out of
        // borrowed content, which makes sense but doesn't apply in our
        // case because we will "give m back" to the Value, except
        // wrapped around an Arc. The only way to get ownership of m is
        // to use mem::replace, which means we have to create a "dummy"
        // value to replace self first.
        if let Value::Owned(_) = self {
            let dummy = Value::Owned(Tensor::i32s(&[], &[0]).unwrap());
            let shared = match mem::replace(self, dummy) {
                Value::Owned(m) => Value::Shared(Arc::new(m)),
                _ => panic!(),
            };

            *self = shared;
        }

        self.clone()
    }

    pub fn into_array<'a, D: ::tensor::Datum>(self) -> ::Result<::ndarray::ArrayD<D>> {
        self.into_tensor().into_array()
    }

    pub fn to_array_view<'a, D: ::tensor::Datum>(
        &'a self,
    ) -> ::Result<::ndarray::ArrayViewD<'a, D>> {
        self.as_tensor().to_array_view()
    }
}

impl<M> From<M> for Value
where
    Tensor: From<M>,
{
    fn from(m: M) -> Value {
        Value::Owned(m.into())
    }
}

impl From<Arc<Tensor>> for Value {
    fn from(m: Arc<Tensor>) -> Value {
        Value::Shared(m)
    }
}

impl ::std::ops::Deref for Value {
    type Target = Tensor;
    fn deref(&self) -> &Tensor {
        match self {
            &Value::Owned(ref m) => &m,
            &Value::Shared(ref m) => m.as_ref(),
        }
    }
}

impl PartialEq for Value {
    fn eq(&self, other: &Value) -> bool {
        self.as_tensor() == other.as_tensor()
    }
}

#[derive(Debug, Clone)]
pub enum StepValue {
    Const(Value),
    Stream(Stream),
}

#[derive(Debug, Clone)]
pub struct Stream {
    pub info: StreamInfo,
    pub offset: u64,
    pub chunk: Option<Value>,
}

#[derive(Debug, Copy, Clone, Default)]
pub struct StreamInfo {
    pub axis: usize,
    pub len: TDim,
}

impl StepValue {
    pub fn as_value(&self) -> Option<&Value> {
        match self {
            StepValue::Const(v) => Some(v),
            StepValue::Stream(s) => s.chunk.as_ref(),
        }
    }

    pub fn into_value(self) -> Option<Value> {
        match self {
            StepValue::Const(v) => Some(v),
            StepValue::Stream(s) => s.chunk,
        }
    }

    pub fn as_const(&self) -> Option<&Value> {
        match self {
            StepValue::Const(v) => Some(v),
            _ => None,
        }
    }

    pub fn into_const(self) -> Option<Value> {
        match self {
            StepValue::Const(v) => Some(v),
            _ => None,
        }
    }

    pub fn as_stream(&self) -> Option<&Stream> {
        match self {
            StepValue::Stream(s) => Some(s),
            _ => None,
        }
    }

    pub fn into_stream(self) -> Option<Stream> {
        match self {
            StepValue::Stream(s) => Some(s),
            _ => None,
        }
    }

    pub fn stream_info(&self) -> Option<StreamInfo> {
        self.as_stream().map(|s| s.info)
    }

    pub fn is_const(&self) -> bool {
        match self {
            StepValue::Const(_) => true,
            StepValue::Stream(_) => false,
        }
    }
}

// TODO(liautaud): Find a more generic way to do this.
#[cfg_attr(feature = "serialize", derive(Serialize))]
#[derive(Debug, Clone)]
pub enum Attr {
    I64(i64),
    Usize(usize),
    DatumType(DatumType),
    DataFormat(DataFormat),
    Padding(Padding),
    Tensor(Tensor),
    UsizeVec(Vec<usize>),
    IsizeVec(Vec<isize>),
}

/// A Tensorflow operation.
pub trait Op: Debug + objekt::Clone + Send + Sync + 'static + InferenceOp {
    /// Returns the attributes of the operation and their values.
    fn get_attributes(&self) -> HashMap<&'static str, Attr> {
        hashmap!()
    }

    /// Evaluates the operation given the input tensors.
    fn eval(&self, _inputs: TVec<Value>) -> Result<TVec<Value>> {
        bail!("Unexpected call on op.eval(). {:?}", self)
    }

    /// Returns a new streaming buffer for the operation.
    fn new_buffer(&self) -> Box<OpBuffer> {
        Box::new(EmptyBuffer {})
    }

    /// Evaluates one step of the operation on the given input tensors.
    /// This is only implemented for operators which support streaming.
    ///
    /// The input tensors are annotated with an Option<usize>:
    /// - None if the tensor doesn't have a streaming dimension.
    /// - Option(d) if the tensor is being streamed on dimension d.
    ///
    /// If an input tensor has a streaming dimension, the corresponding
    /// Value will only contain a _chunk_ of input of size 1 along
    /// that dimension. Note that each chunk will only be passed once
    /// to the step function, so it should use the provided buffer to
    /// store whichever chunks it needs for future computations.
    ///
    /// The function should return Some(chunks) when it has computed
    /// new chunks, and None if it has computed an intermediary result
    /// successfully but doesn't have new output chunks ready yet.
    ///
    /// For operators like Concat, multiple input tensors might have a
    /// streaming dimension. In that case, at each call to step, only
    /// one of the streaming inputs will receive new chunk while the
    /// others will receive None.
    fn step(
        &self,
        _inputs: TVec<StepValue>,
        _buffer: &mut Box<OpBuffer>,
    ) -> Result<Option<TVec<Value>>> {
        bail!("Streaming is not available for operator {:?}", self)
    }

    /// Infers properties about the input and output tensors.
    ///
    /// The `inputs` and `outputs` arguments correspond to properties about
    /// the input and output tensors that are already known.
    ///
    /// Returns Err in case of an unrecoverable error during the inference,
    /// and the refined properties about the inputs and outputs otherwise.
    fn infer_and_propagate(
        &self,
        inputs: TVec<TensorFact>,
        outputs: TVec<TensorFact>,
    ) -> Result<(TVec<TensorFact>, TVec<TensorFact>)> {
        let (infered_inputs, infered_outputs) = self.infer(inputs, outputs)?;

        if infered_inputs.iter().all(|i| i.value.is_concrete()) {
            let input_values = infered_inputs
                .iter()
                .map(|i| i.value.concretize().unwrap().clone().into())
                .collect(); // checked
            let output_value = self.eval(input_values)?.pop().unwrap();
            Ok((
                infered_inputs,
                tvec![::analyser::helpers::tensor_to_fact(
                    output_value.into_tensor(),
                )],
            ))
        } else {
            Ok((infered_inputs, infered_outputs))
        }
    }

    fn final_prep(
        &self,
        _inputs: TVec<TensorFact>,
        _outputs: TVec<TensorFact>,
    ) -> Result<Option<Box<Op>>> {
        Ok(None)
    }

    fn const_value(&self) -> Option<Value> {
        None
    }

    fn rounding_errors(&self) -> bool {
        false
    }
}

pub trait InferenceOp {
    fn infer(
        &self,
        inputs: TVec<TensorFact>,
        outputs: TVec<TensorFact>,
    ) -> Result<(TVec<TensorFact>, TVec<TensorFact>)>;
}

clone_trait_object!(Op);

#[cfg(feature = "serialize")]
impl Serialize for Op {
    fn serialize<S>(&self, serializer: S) -> StdResult<S::Ok, S::Error>
    where
        S: Serializer,
    {
        self.get_attributes().serialize(serializer)
    }
}

pub type OpRegister = HashMap<&'static str, fn(&::tfpb::node_def::NodeDef) -> Result<Box<Op>>>;

pub struct OpBuilder(OpRegister);

impl OpBuilder {
    pub fn new() -> OpBuilder {
        let mut reg = OpRegister::new();
        array::register_all_ops(&mut reg);
        cast::register_all_ops(&mut reg);
        konst::register_all_ops(&mut reg);
        math::register_all_ops(&mut reg);
        nn::register_all_ops(&mut reg);
        OpBuilder(reg)
    }

    pub fn build(&self, pb: &::tfpb::node_def::NodeDef) -> Result<Box<Op>> {
        match self.0.get(pb.get_op()) {
            Some(builder) => builder(pb),
            None => Ok(Box::new(unimpl::UnimplementedOp(
                pb.get_op().to_string(),
                pb.to_owned(),
            ))),
        }
    }
}

/// A streaming buffer for a Tensorflow operation.
///
/// This is used during streaming evaluation of models. Each node is given
/// a mutable reference to a buffer which it can use to store intermediary
/// results between evaluation steps. Every operation must provide its own
/// buffer type (or use one of the general ones defined below), which must
/// implement the OpBuffer trait. It should return a new instance of it in
/// the `Op::new_buffer` method, and downcast it from OpBuffer in `step`.
pub trait OpBuffer: Downcast + Debug + objekt::Clone + Send + 'static {}
clone_trait_object!(OpBuffer);
impl_downcast!(OpBuffer);

/// An empty buffer for operations which don't need one.
#[derive(Debug, Clone)]
pub struct EmptyBuffer {}

impl OpBuffer for EmptyBuffer {}

/// A buffer with a variable number of Value queues.
#[derive(Debug, Clone)]
pub struct QueuesBuffer(TVec<VecDeque<Value>>);

impl OpBuffer for QueuesBuffer {}

impl QueuesBuffer {
    /// Creates a new buffer with a given number of queues.
    pub fn new(size: usize) -> QueuesBuffer {
        QueuesBuffer(tvec![VecDeque::new(); size])
    }

    /// Appends a new Value to each queue in the buffer.
    pub fn append(&mut self, views: TVec<StepValue>) -> Result<()> {
        if views.len() > self.0.len() {
            bail!("There are more input Values than queues in the buffer.");
        }

        for (i, view) in views.into_iter().enumerate() {
            if let Some(v) = view.into_value() {
                self.0[i].push_back(v);
            }
        }

        Ok(())
    }

    /// Returns an iterator over all the queues in the buffer.
    pub fn iter<'a>(&'a mut self) -> impl Iterator<Item = &'a VecDeque<Value>> {
        self.0.iter()
    }

    /// Returns a mutable iterator over all the queues in the buffer.
    pub fn iter_mut<'a>(&'a mut self) -> impl Iterator<Item = &'a mut VecDeque<Value>> {
        self.0.iter_mut()
    }
}

impl Index<usize> for QueuesBuffer {
    type Output = VecDeque<Value>;

    fn index(&self, index: usize) -> &VecDeque<Value> {
        &self.0[index]
    }
}

impl IndexMut<usize> for QueuesBuffer {
    fn index_mut(&mut self, index: usize) -> &mut VecDeque<Value> {
        &mut self.0[index]
    }
}