1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
//! ## Models and their lifecycle
//!
//! In order to reason on the model and performs optimisations, a model needs
//! to be `typed`. This means all tensor exchanged between the nodes have a
//! well defined element type (f32, i32, etc) and a shape ([1, 12, 53, 13]).
//!
//! A model typically starts as an `InferenceModel`, with minimum or partial
//! tensor type information. At this stage, the application developper can add
//! types and shapes hints (like the model inputs and output element types
//! and shapes), then `tract` will perform type inference propagating this
//! information. Hopefully `tract` will be able to infer a type and shape for
//! all tensors in the model graph.
//!
//! At this stage, the model can be converted into a `TypedModel`.
//!
//! InferanceModel and TypeModel are two variants of `ModelImpl`, Parameterized
//! by a Fact implementation: TypedModel uses TypedFact, enforcing
//! complete determination of element type and shape, and allowing a constant
//! value for the tensor. InferenceModel uses InferenceFact, which can handle
//! partial information.
//!
//! We call `declutter` the process getting the network closer to a normal
//! form:.  This normal form is akin to an IR in compiler technologies. This is
//! the favourite form on which tract optimisation is implemented.
//!
//! For instance an Add node adding a constant input to a variable
//! tensor input would be replaced by an unary Add operator taking only the
//! variable input and for which the constant to add is a fixed construction
//! attribute. In the same decluttering process, we try and replace proprietary
//! operators (eg, from TensorFlow) by tract core operators: it is not always
//! possible to simply map TensorFlow operators to tract-core while loading the
//! network: their interfaces can be different (what is an input, what is an
//! attribute) and constant propagation may be necessary before the right
//! core operator could be chosen.
//!
use std::collections::HashMap;
use std::str;
use std::borrow::Cow;

use itertools::Itertools;

pub mod compact;
pub mod dsl;
mod fact;
mod model;
mod node;
pub mod order;
mod patch;
pub mod translator;

pub use self::dsl::*;
pub use self::fact::*;
pub use self::model::*;
pub use self::node::*;
pub use self::order::eval_order;
pub use self::patch::ModelPatch;
pub use crate::ops::{Op, TypedOp};

use crate::ops::invariants;
use crate::plan::{SimplePlan, SimpleState};
use crate::TractResult;
use tract_linalg::hash::DynHash;

/// Common methods for all variants of model.
pub trait Model: downcast_rs::Downcast + std::fmt::Debug + dyn_clone::DynClone {
    /// Model label
    fn model_label(&self) -> Option<&str>;

    /// Lookup node id by name
    fn node_id_by_name(&self, name: &str) -> TractResult<usize>;

    /// Node name by id
    fn node_name(&self, id: usize) -> &str;

    /// Node op by id
    fn node_op(&self, id: usize) -> &dyn Op;

    /// Node inputs by id
    fn node_inputs(&self, id: usize) -> &[OutletId];

    /// Number of outputs for a node, by id.
    fn node_output_count(&self, id: usize) -> usize;

    /// Number nodes
    fn nodes_len(&self) -> usize;

    /// Formatted node label
    fn node_display(&self, id: usize) -> String;

    /// Formatted node label
    fn node_debug(&self, id: usize) -> String;

    /// Eval order for the model
    fn eval_order(&self) -> TractResult<Vec<usize>>;

    /// Eval order for the model overriding input and outputs node
    fn eval_order_for_io(&self, inputs: &[usize], outputs: &[usize]) -> TractResult<Vec<usize>>;

    /// Inputs of the model
    fn input_outlets(&self) -> &[OutletId];

    /// Outputs of the model
    fn output_outlets(&self) -> &[OutletId];

    /// Tensorfact for an outlet
    fn outlet_typedfact(&self, outlet: OutletId) -> TractResult<TypedFact>;

    /// Short outlet formatter (id plus fact)
    fn outlet_fact_format(&self, outlet: OutletId) -> String;

    /// Labels for an outlet
    fn outlet_label(&self, id: OutletId) -> Option<&str>;

    /// List consumers of an outlet
    fn outlet_successors(&self, outlet: OutletId) -> &[InletId];

    /// Nested models, with label (for audit).
    fn nested_models(&self, node: usize) -> Vec<(Cow<str>, &dyn Model, Vec<String>, Vec<String>)>;
}

impl_downcast!(Model);

/// A model with completely determined types and shapes.
pub type TypedModel = ModelImpl<TypedFact, Box<dyn TypedOp>>;
/// Node for TypedModel graph
pub type TypedNode = BaseNode<TypedFact, Box<dyn TypedOp>>;
/// A ModelPatch for TypedModel.
pub type TypedModelPatch = ModelPatch<TypedFact, Box<dyn TypedOp>>;
/// An execution plan for TypedModel.
pub type TypedSimplePlan<M> = SimplePlan<TypedFact, Box<dyn TypedOp>, M>;
/// A runnable TypedModel (new name for SimplePlan).
pub type TypedRunnableModel<M> = RunnableModel<TypedFact, Box<dyn TypedOp>, M>;
/// An execution state for TypedModel.
pub type TypedSimpleState<M, P> = SimpleState<TypedFact, Box<dyn TypedOp>, M, P>;

/// A runnable model with fixed inputs and outputs.
pub type RunnableModel<F, O, M> = SimplePlan<F, O, M>;

impl TypedModel {
    pub fn signature(&self) -> u64 {
        use std::hash::{Hash, Hasher};
        let mut hasher = std::collections::hash_map::DefaultHasher::new();
        self.hash(&mut hasher);
        hasher.finish()
    }

    /// Perform declutter passes on the network.
    pub fn declutter(self) -> TractResult<TypedModel> {
        let started = self.signature();
        let mut model = self;
        let model_inputs = model.input_outlets()?.len();
        let model_outputs = model.output_outlets()?.len();
        loop {
            let mut done_something = false;
            for p in crate::optim::declutter() {
                debug!("done_something {:?} before {:?}", done_something, p);
                done_something = done_something || p.pass(&mut model)?;
                debug!("done_something {:?} after {:?}", done_something, p);
                if cfg!(debug_assertions) {
                    model.check_edges()?;
                    assert_eq!(model.input_outlets()?.len(), model_inputs);
                    assert_eq!(model.output_outlets()?.len(), model_outputs);
                }
            }
            if !done_something || model.signature() == started {
                break;
            }
            model = compact::compact(&model)?;
        }
        compact::compact(&model)
    }


    /// Translate the graph to locally optimized operators (LIR or MIR ops).
    pub fn optimize(self) -> TractResult<TypedModel> {
        let mut model = self;
        loop {
            let mut done_something = false;
            for p in crate::optim::codegen() {
                done_something = done_something || p.pass(&mut model)?;
                if cfg!(debug_assertions) {
                    model.check_edges()?;
                }
            }
            if !done_something {
                break;
            }
            model = compact::compact(&model)?;
        }
        Ok(model)
    }

    pub fn invariants(&self) -> TractResult<invariants::Invariants> {
        invariants::for_model(self)
    }
}

#[cfg(test)]
mod test {
    use super::*;

    #[test]
    fn test() {
        fn is_sync<T: Sync>() {}
        is_sync::<TypedModel>();
    }
}