var searchIndex = {}; searchIndex["timely"] = {"doc":"Timely dataflow is framework for managing and executing data-parallel dataflow computations.","items":[[8,"Push","timely","Pushing elements of type `T`.",null,null],[10,"push","","Pushes `element` and provides the opportunity to take ownership.",0,null],[11,"send","","Pushes `element` and drops any resulting resources.",0,null],[11,"done","","Pushes `None`, conventionally signalling a flush.",0,null],[8,"Pull","","Pulling elements of type `T`.",null,null],[10,"pull","","Pulls an element and provides the opportunity to take ownership.",1,null],[11,"recv","","Takes an `Option<T>` and leaves `None` behind.",1,null],[4,"Configuration","","Possible configurations for the communication infrastructure.",null,null],[13,"Thread","","Use one thread.",2,null],[13,"Process","","Use one process with an indicated number of threads.",2,null],[13,"Cluster","","Expect multiple processes indicated by `(threads, process, host_list, report)`.",2,null],[0,"progress","","Progress tracking mechanisms to support notification in timely dataflow",null,null],[0,"count_map","timely::progress","A mapping from general types `T` to `i64`, with zero values absent.",null,null],[3,"CountMap","timely::progress::count_map","",null,null],[11,"fmt","","",3,null],[11,"clone","","",3,null],[11,"default","","",3,{"inputs":[],"output":{"name":"countmap"}}],[11,"update","","",3,null],[11,"into_inner","","",3,null],[11,"iter","","",3,null],[11,"clear","","",3,null],[11,"len","","",3,null],[11,"pop","","",3,null],[11,"new","","",3,{"inputs":[],"output":{"name":"countmap"}}],[11,"new_from","","",3,{"inputs":[{"name":"t"},{"name":"i64"}],"output":{"name":"countmap"}}],[11,"drain_into","","",3,null],[11,"extend","","",3,null],[0,"frontier","timely::progress","Tracks minimal sets of mutually incomparable elements of a partial order.",null,null],[3,"Antichain","timely::progress::frontier","A set of mutually incomparable elements.",null,null],[3,"MutableAntichain","","An antichain based on a multiset whose elements frequencies can be updated.",null,null],[11,"fmt","","",4,null],[11,"clone","","",4,null],[11,"default","","",4,{"inputs":[],"output":{"name":"antichain"}}],[11,"insert","","Updates the `Antichain` if the element is not greater than or equal to some present element.",4,null],[11,"new","","Creates a new empty `Antichain`.",4,{"inputs":[],"output":{"name":"antichain"}}],[11,"from_elem","","Creates a new singleton `Antichain`.",4,{"inputs":[{"name":"t"}],"output":{"name":"antichain"}}],[11,"elements","","Reveals the elements in the `Antichain`.",4,null],[11,"clone","","",5,null],[11,"fmt","","",5,null],[11,"default","","",5,{"inputs":[],"output":{"name":"mutableantichain"}}],[11,"new","","Creates a new empty `MutableAntichain`.",5,{"inputs":[],"output":{"name":"mutableantichain"}}],[11,"clear","","",5,null],[11,"elements","","Reveals the element in the `MutableAntichain`.",5,null],[11,"new_bottom","","Creates a new singleton `MutableAntichain`.",5,{"inputs":[{"name":"t"}],"output":{"name":"mutableantichain"}}],[11,"empty","","Returns true if there are no elements in the `MutableAntichain`.",5,null],[11,"lt","","Returns true if any item in the `MutableAntichain` is strictly less than the argument.",5,null],[11,"le","","Returns true if any item in the `MutableAntichain` is less than or equal to the argument.",5,null],[11,"count","","Returns the number of times an element exists in the set.",5,null],[11,"update_into_cm","","",5,null],[11,"update_weight","","",5,null],[11,"update","","",5,null],[11,"update_iter_and","","",5,null],[11,"test_size","","",5,null],[11,"update_and","","",5,null],[0,"nested","timely::progress","Coordination of progress information between a scope-as-operator and its children operators.",null,null],[0,"pointstamp_counter","timely::progress::nested","Manages pointstamp counts (timestamp, location) within a sub operator.",null,null],[3,"PointstampCounter","timely::progress::nested::pointstamp_counter","",null,null],[12,"source","","",6,null],[12,"target","","",6,null],[12,"pushed","","",6,null],[11,"default","","",6,{"inputs":[],"output":{"name":"pointstampcounter"}}],[11,"update_target","","",6,null],[11,"update_source","","",6,null],[11,"clear","","",6,null],[11,"allocate_for_operator","","",6,null],[0,"summary","timely::progress::nested","Path summaries that are either child local, or leave the scope and re-enter from the parent.",null,null],[4,"Summary","timely::progress::nested::summary","",null,null],[13,"Local","","",7,null],[13,"Outer","","",7,null],[11,"fmt","","",7,null],[11,"eq","","",7,null],[11,"ne","","",7,null],[11,"clone","","",7,null],[11,"default","","",7,{"inputs":[],"output":{"name":"summary"}}],[11,"partial_cmp","","",7,null],[11,"results_in","","",7,null],[11,"followed_by","","",7,null],[11,"fmt","","",7,null],[0,"product","timely::progress::nested","A pair timestamp suitable for use with the product partial order.",null,null],[3,"Product","timely::progress::nested::product","We use `Product` rather than `(TOuter, TInner)`` so that we can derive our own `PartialOrd`,\nbecause Rust just uses the lexicographic total order.",null,null],[12,"outer","","",8,null],[12,"inner","","",8,null],[11,"cmp","","",8,null],[11,"default","","",8,{"inputs":[],"output":{"name":"product"}}],[11,"eq","","",8,null],[11,"ne","","",8,null],[11,"hash","","",8,null],[11,"clone","","",8,null],[11,"new","","",8,{"inputs":[{"name":"touter"},{"name":"tinner"}],"output":{"name":"product"}}],[11,"fmt","","",8,null],[11,"partial_cmp","","",8,null],[11,"le","","",8,null],[11,"ge","","",8,null],[11,"embalm","","",8,null],[11,"entomb","","",8,null],[11,"exhume","","",8,null],[0,"subgraph","timely::progress::nested","Implements `Operate` for a scoped collection of child operators.",null,null],[3,"Source","timely::progress::nested::subgraph","",null,null],[12,"index","","",9,null],[12,"port","","",9,null],[3,"Target","","",null,null],[12,"index","","",10,null],[12,"port","","",10,null],[3,"Subgraph","","",null,null],[12,"path","","",11,null],[12,"index","","",11,null],[11,"fmt","","",9,null],[11,"eq","","",9,null],[11,"ne","","",9,null],[11,"clone","","",9,null],[11,"fmt","","",10,null],[11,"eq","","",10,null],[11,"ne","","",10,null],[11,"clone","","",10,null],[11,"name","","",11,null],[11,"local","","",11,null],[11,"inputs","","",11,null],[11,"outputs","","",11,null],[11,"get_internal_summary","","",11,null],[11,"set_external_summary","","",11,null],[11,"push_external_progress","","",11,null],[11,"pull_internal_progress","","",11,null],[11,"new_input","","",11,null],[11,"new_output","","",11,null],[11,"connect","","",11,null],[11,"new_from","","",11,{"inputs":[{"name":"a"},{"name":"usize"},{"name":"vec"}],"output":{"name":"subgraph"}}],[11,"allocate_child_id","","",11,null],[11,"add_child","","",11,null],[0,"timestamp","timely::progress","A partially ordered measure of progress at each timely dataflow location.",null,null],[3,"RootTimestamp","timely::progress::timestamp","An empty timestamp used by the root scope.",null,null],[3,"RootSummary","","An empty path summary for root timestamps.",null,null],[8,"Timestamp","","A composite trait for types that serve as timestamps in timely dataflow.",null,null],[16,"Summary","","",12,null],[8,"PathSummary","","A summary of how a timestamp advances along a timely dataflow path.",null,null],[10,"results_in","","Advances a timestamp according to the timestamp actions on the path.",13,null],[10,"followed_by","","Composes this path summary with another path summary.",13,null],[11,"default","","",14,{"inputs":[],"output":{"name":"roottimestamp"}}],[11,"eq","","",14,null],[11,"partial_cmp","","",14,null],[11,"cmp","","",14,null],[11,"hash","","",14,null],[11,"clone","","",14,null],[11,"fmt","","",14,null],[11,"new","","Constructs a new `Product<RootTimestamp,T>`.",14,{"inputs":[{"name":"t"}],"output":{"name":"product"}}],[11,"default","","",15,{"inputs":[],"output":{"name":"rootsummary"}}],[11,"fmt","","",15,null],[11,"eq","","",15,null],[11,"partial_cmp","","",15,null],[11,"cmp","","",15,null],[11,"clone","","",15,null],[11,"results_in","","",15,null],[11,"followed_by","","",15,null],[0,"operate","timely::progress","Methods which describe an operators topology, and the progress it makes.",null,null],[8,"Operate","timely::progress::operate","Methods for describing an operators topology, and the progress it makes.",null,null],[11,"local","","Indicates if the operator is strictly local to this worker.",16,null],[10,"inputs","","The number of inputs.",16,null],[10,"outputs","","The number of outputs.",16,null],[11,"get_internal_summary","","Fetches summary information about internal structure of the operator.",16,null],[11,"set_external_summary","","Presents summary information about the external structure around the operator.",16,null],[11,"push_external_progress","","Reports a summary of progress statements external to the operator and its peer group.",16,null],[10,"pull_internal_progress","","Retrieves a summary of progress statements internal to the operator.",16,null],[10,"name","","A descripitive name for the operator",16,null],[11,"notify_me","","Indicates of whether the operator requires `push_external_progress` information or not.",16,null],[0,"broadcast","timely::progress","Broadcasts progress information among workers.",null,null],[3,"Progcaster","timely::progress::broadcast","Manages broadcasting of progress updates to and receiving updates from workers.",null,null],[6,"ProgressVec","","A list of progress updates corresponding to ((child_scope, [in/out]_port, timestamp), delta)",null,null],[11,"new","","Creates a new `Progcaster` using a channel from the supplied allocator.",17,{"inputs":[{"name":"a"}],"output":{"name":"progcaster"}}],[11,"send_and_recv","","Sends and receives progress updates, broadcasting the contents of `messages` and `internal`,\nand updating each with updates from other workers.",17,null],[0,"dataflow","timely","Abstractions for timely dataflow programming.",null,null],[0,"operators","timely::dataflow","Extension traits for `Stream` implementing various operators.",null,null],[3,"InputHandle","timely::dataflow::operators","Handle to an operator's input stream.",null,null],[3,"OutputHandle","","Handle to an operator's output stream.",null,null],[3,"Notificator","","Tracks requests for notification and delivers available notifications.",null,null],[3,"Capability","","A capability for timestamp `t` represents a permit for an operator that holds the capability\nto send data and request notifications at timestamp `t`.",null,null],[0,"enterleave","","Extension traits to move a `Stream` between an outer `Scope` and inner `Scope`.",null,null],[8,"Enter","timely::dataflow::operators::enterleave","Extension trait to move a `Stream` into a child of its current `Scope`.",null,null],[10,"enter","","Moves the `Stream` argument into a child of its current `Scope`.",18,null],[8,"EnterAt","","Extension trait to move a `Stream` into a child of its current `Scope` setting the timestamp for each element.",null,null],[10,"enter_at","","Moves the `Stream` argument into a child of its current `Scope` setting the timestamp for each element by `initial`.",19,null],[8,"Leave","","Extension trait to move a `Stream` to the parent of its current `Scope`.",null,null],[10,"leave","","Moves a `Stream` to the parent of its current `Scope`.",20,null],[0,"unary","timely::dataflow::operators","Methods to construct generic streaming and blocking unary operators.",null,null],[8,"Unary","timely::dataflow::operators::unary","Methods to construct generic streaming and blocking unary operators.",null,null],[10,"unary_stream","","Creates a new dataflow operator that partitions its input stream by a parallelization\nstrategy `pact`, and repeatedly invokes `logic` which can read from the input stream and\nwrite to the output stream.",21,null],[10,"unary_notify","","Creates a new dataflow operator that partitions its input stream by a parallelization\nstrategy `pact`, and repeatedly invokes `logic` which can read from the input stream,\nwrite to the output stream, and request and receive notifications. The method also requires\na vector of the initial notifications the operator requires (commonly none).",21,null],[0,"queue","timely::dataflow::operators","",null,null],[8,"Queue","timely::dataflow::operators::queue","",null,null],[10,"queue","","",22,null],[0,"input","timely::dataflow::operators","Create new `Streams` connected to external inputs.",null,null],[3,"Handle","timely::dataflow::operators::input","A handle to an input `Stream`, used to introduce data to a timely dataflow computation.",null,null],[8,"Input","","Create a new `Stream` and `Handle` through which to supply input.",null,null],[10,"new_input","","Create a new `Stream` and `Handle` through which to supply input.",23,null],[11,"send","","Sends one record into the corresponding timely dataflow `Stream`, at the current epoch.",24,null],[11,"advance_to","","Advances the current epoch to `next`.",24,null],[11,"close","","Closes the input.",24,null],[11,"epoch","","Reports the current epoch.",24,null],[11,"drop","","",24,null],[0,"feedback","timely::dataflow::operators","Create cycles in a timely dataflow graph.",null,null],[3,"Observer","timely::dataflow::operators::feedback","",null,null],[3,"Handle","","A handle used to bind the source of a loop variable.",null,null],[8,"LoopVariable","","Creates a `Stream` and a `Handle` to later bind the source of that `Stream`.",null,null],[10,"loop_variable","","Creates a `Stream` and a `Handle` to later bind the source of that `Stream`.",25,null],[8,"ConnectLoop","","Connect a `Stream` to the input of a loop variable.",null,null],[10,"connect_loop","","Connect a `Stream` to be the input of a loop variable.",26,null],[11,"push","","",27,null],[0,"concat","timely::dataflow::operators","Merges the contents of multiple streams.",null,null],[8,"Concat","timely::dataflow::operators::concat","Merge the contents of two streams.",null,null],[10,"concat","","Merge the contents of two streams.",28,null],[8,"Concatenate","","Merge the contents of multiple streams.",null,null],[10,"concatenate","","Merge the contents of multiple streams.",29,null],[0,"partition","timely::dataflow::operators","Partition a stream of records into multiple streams.",null,null],[3,"Operator","timely::dataflow::operators::partition","",null,null],[8,"Partition","","Partition a stream of records into multiple streams.",null,null],[10,"partition","","Produces `parts` output streams, containing records produced and assigned by `route`.",30,null],[11,"new","","",31,{"inputs":[{"name":"pullcounter"},{"name":"vec"},{"name":"f"}],"output":{"name":"operator"}}],[11,"name","","",31,null],[11,"inputs","","",31,null],[11,"outputs","","",31,null],[11,"pull_internal_progress","","",31,null],[11,"notify_me","","",31,null],[0,"map","timely::dataflow::operators","Extension methods for `Stream` based on record-by-record transformation.",null,null],[8,"Map","timely::dataflow::operators::map","Extension trait for `Stream`.",null,null],[10,"map","","Consumes each element of the stream and yields a new element.",32,null],[10,"map_in_place","","Updates each element of the stream and yields the element, re-using memory where possible.",32,null],[10,"flat_map","","Consumes each element of the stream and yields some number of new elements.",32,null],[0,"inspect","timely::dataflow::operators","Extension trait and implementation for observing and action on streamed data.",null,null],[8,"Inspect","timely::dataflow::operators::inspect","Methods to inspect records and batches of records on a stream.",null,null],[10,"inspect","","Runs a supplied closure on each observed data element.",33,null],[10,"inspect_batch","","Runs a supplied closure on each observed data batch (time and data slice).",33,null],[0,"filter","timely::dataflow::operators","Filters a stream by a predicate.",null,null],[8,"Filter","timely::dataflow::operators::filter","Extension trait for filtering.",null,null],[10,"filter","","Returns a new instance of `self` containing only records satisfying `predicate`.",34,null],[0,"binary","timely::dataflow::operators","Methods to construct generic streaming and blocking binary operators.",null,null],[8,"Binary","timely::dataflow::operators::binary","Methods to construct generic streaming and blocking binary operators.",null,null],[10,"binary_stream","","Creates a new dataflow operator that partitions each of its input stream by a parallelization\nstrategy `pact`, and repeatedly invokes `logic` which can read from the input streams and\nwrite to the output stream.",35,null],[10,"binary_notify","","Creates a new dataflow operator that partitions its input stream by a parallelization\nstrategy `pact`, and repeatedly invokes `logic` which can read from the input streams,\nwrite to the output stream, and request and receive notifications. The method also requires\na vector of the initial notifications the operator requires (commonly none).",35,null],[0,"delay","timely::dataflow::operators","",null,null],[8,"Delay","timely::dataflow::operators::delay","",null,null],[10,"delay","","",36,null],[10,"delay_batch","","",36,null],[0,"exchange","timely::dataflow::operators","Exchange records between workers.",null,null],[8,"Exchange","timely::dataflow::operators::exchange","Exchange records between workers.",null,null],[10,"exchange","","Exchange records so that all records with the same `route` are at the same worker.",37,null],[0,"broadcast","timely::dataflow::operators","Broadcast records to all workers.",null,null],[8,"Broadcast","timely::dataflow::operators::broadcast","Broadcast records to all workers.",null,null],[10,"broadcast","","Broadcast records to all workers.",38,null],[0,"probe","timely::dataflow::operators","Monitor progress at a `Stream`.",null,null],[3,"Handle","timely::dataflow::operators::probe","Reports information about progress at the probe.",null,null],[8,"Probe","","Monitors progress at a `Stream`.",null,null],[10,"probe","","Constructs a progress probe which indicates which timestamps have elapsed at the operator.",39,null],[11,"lt","","returns true iff the frontier is strictly less than `time`.",40,null],[11,"le","","returns true iff the frontier is less than or equal to `time`.",40,null],[11,"done","","returns true iff the frontier is empty.",40,null],[0,"to_stream","timely::dataflow::operators","Conversion to the `Stream` type from iterators.",null,null],[8,"ToStream","timely::dataflow::operators::to_stream","",null,null],[10,"to_stream","","Converts an iterator to a timely `Stream`, with records at the default time.",41,null],[0,"capture","timely::dataflow::operators","Operators which capture and replay streams of records.",null,null],[3,"EventLink","timely::dataflow::operators::capture","A linked list of Event<T, D>.",null,null],[12,"event","","An event.",42,null],[12,"next","","The next event, if it exists.",42,null],[3,"EventWriter","","A wrapper for `W: Write` implementing `EventPusher<T, D>`.",null,null],[3,"EventReader","","A Wrapper for `R: Read` implementing `EventIterator<T, D>`.",null,null],[4,"Event","","Data and progres events of the captured stream.",null,null],[13,"Start","","",43,null],[13,"Progress","","Progress received via `push_external_progress`.",43,null],[13,"Messages","","Messages received via the data stream.",43,null],[8,"Capture","","Capture a stream of timestamped data for later replay.",null,null],[10,"capture_into","","Captures a stream of timestamped data for later replay.",44,null],[8,"EventIterator","","Iterates over contained `Event<T, D>`.",null,null],[10,"next","","Iterates over references to `Event<T, D>` elements.",45,null],[8,"EventPusher","","Receives `Event<T, D>` events.",null,null],[10,"push","","Provides a new `Event<T, D>` to the pusher.",46,null],[8,"Replay","","Replay a capture stream into a scope with the same timestamp.",null,null],[10,"replay_into","","Replays `self` into the provided scope, as a `Stream<S, D>`.",47,null],[11,"embalm","","",43,null],[11,"entomb","","",43,null],[11,"exhume","","",43,null],[11,"new","","",42,{"inputs":[],"output":{"name":"eventlink"}}],[11,"new","","",48,{"inputs":[{"name":"w"}],"output":{"name":"eventwriter"}}],[11,"push","","",48,null],[11,"new","","",49,{"inputs":[{"name":"r"}],"output":{"name":"eventreader"}}],[11,"next","","",49,null],[0,"aggregation","timely::dataflow::operators","Aggregation operators of various flavors",null,null],[0,"state_machine","timely::dataflow::operators::aggregation","General purpose state transition operator.",null,null],[8,"StateMachine","timely::dataflow::operators::aggregation::state_machine","Generic state-transition machinery: each key has a state, and receives a sequence of events.\nEvents are applied in time-order, but no other promises are made. Each state transition can\nproduce output, which is sent. ",null,null],[10,"state_machine","","Tracks a state for each presented key, using user-supplied state transition logic.",50,null],[0,"aggregate","timely::dataflow::operators::aggregation","General purpose intra-timestamp aggregation",null,null],[8,"Aggregate","timely::dataflow::operators::aggregation::aggregate","Generic intra-timestamp aggregation",null,null],[10,"aggregate","","Aggregates data of the form `(key, val)`, using user-supplied logic.",51,null],[11,"next","timely::dataflow::operators","Reads the next input buffer (at some timestamp `t`) and a corresponding capability for `t`.\nThe timestamp `t` of the input buffer can be retrieved by invoking `.time()` on the capability.\nReturns `None` when there's no more data available.",52,null],[11,"for_each","","Repeatedly calls `logic` till exhaustion of the available input data.\n`logic` receives a capability and an input buffer.",52,null],[11,"session","","Obtains a session that can send data at the timestamp associated with capability `cap`.",53,null],[11,"new","","",54,{"inputs":[],"output":{"name":"notificator"}}],[11,"update_frontier_from_cm","","Updates the `Notificator`'s frontiers from a `CountMap` per input.",54,null],[11,"frontier","","Reveals the elements in the frontier of the indicated input.",54,null],[11,"notify_at","","Requests a notification at the time associated with capability `cap`. Takes ownership of\nthe capability.",54,null],[11,"for_each","","Repeatedly calls `logic` till exhaustion of the available notifications.",54,null],[11,"next","","Retrieve the next available notification.",54,null],[11,"time","","The timestamp associated with this capability.",55,null],[11,"delayed","","Makes a new capability for a timestamp that's greater then the timestamp associated with\nthe source capability (`self`).",55,null],[11,"drop","","",55,null],[11,"clone","","",55,null],[11,"deref","","",55,null],[11,"fmt","","",55,null],[0,"channels","timely::dataflow","Structured communication between timely dataflow operators.",null,null],[0,"pushers","timely::dataflow::channels","",null,null],[0,"tee","timely::dataflow::channels::pushers","A `Push` implementor with a list of `Box<Push>` to forward pushes to.",null,null],[3,"Tee","timely::dataflow::channels::pushers::tee","Wraps a shared list of `Box<Push>` to forward pushes to. Owned by `Stream`.",null,null],[3,"TeeHelper","","A shared list of `Box<Push>` used to add `Push` implementors.",null,null],[11,"push","","",56,null],[11,"new","","Allocates a new pair of `Tee` and `TeeHelper`.",56,null],[11,"clone","","",56,null],[11,"add_pusher","","Adds a new `Push` implementor to the list of recipients shared with a `Stream`.",57,null],[11,"clone","","",57,null],[0,"exchange","timely::dataflow::channels::pushers","",null,null],[3,"Exchange","timely::dataflow::channels::pushers::exchange","",null,null],[11,"new","","",58,{"inputs":[{"name":"vec"},{"name":"h"}],"output":{"name":"exchange"}}],[11,"push","","",58,null],[0,"counter","timely::dataflow::channels::pushers","",null,null],[3,"Counter","timely::dataflow::channels::pushers::counter","",null,null],[11,"push","","",59,null],[11,"new","","",59,{"inputs":[{"name":"p"},{"name":"rc"}],"output":{"name":"counter"}}],[11,"pull_progress","","",59,null],[0,"buffer","timely::dataflow::channels::pushers","",null,null],[3,"Buffer","timely::dataflow::channels::pushers::buffer","",null,null],[3,"Session","","",null,null],[11,"new","","",60,{"inputs":[{"name":"p"}],"output":{"name":"buffer"}}],[11,"session","","Returns a Session, which accepts data to send at the associated time",60,null],[11,"inner","","",60,null],[11,"cease","","",60,null],[11,"give","","",61,null],[11,"give_iterator","","",61,null],[11,"give_content","","",61,null],[0,"pullers","timely::dataflow::channels","",null,null],[0,"counter","timely::dataflow::channels::pullers","",null,null],[3,"Counter","timely::dataflow::channels::pullers::counter","",null,null],[11,"next","","",62,null],[11,"new","","",62,{"inputs":[{"name":"box"}],"output":{"name":"counter"}}],[11,"pull_progress","","",62,null],[0,"message","timely::dataflow::channels","Core type for communicating a collection of `D: Data` records.",null,null],[3,"Message","timely::dataflow::channels::message","",null,null],[12,"time","","",63,null],[12,"data","","",63,null],[12,"from","","",63,null],[12,"seq","","",63,null],[4,"Content","","",null,null],[13,"Bytes","","",64,null],[13,"Typed","","",64,null],[11,"clone","","",63,null],[11,"new","","",63,{"inputs":[{"name":"t"},{"name":"content"},{"name":"usize"},{"name":"usize"}],"output":{"name":"message"}}],[11,"into_bytes","","",63,null],[11,"from_bytes","","",63,{"inputs":[{"name":"vec"}],"output":{"name":"self"}}],[11,"clone","","",64,null],[11,"default_length","","Default number of elements in a typed allocated message. This could vary as a function of\n`std::mem::size_of::<D>()`, so is left as a method rather than a constant.",64,{"inputs":[],"output":{"name":"usize"}}],[11,"len","","The length of the underlying typed vector.",64,null],[11,"from_typed","","Constructs a `Message` from typed data, replacing its argument with `Vec::new()`.",64,{"inputs":[{"name":"vec"}],"output":{"name":"content"}}],[11,"into_typed","","Returns the typed vector, cleared, or a Vec::new() if the data are binary (and drops them\non the floor, I guess! Ouch.\nALLOC : dropping of binary data. likely called only by persons who pushed typed data on,\nALLOC : so perhaps not all that common. Could put a panic! here just for fun! :D\nALLOC : casual dropping of contents of `data`, which might have allocated memory.",64,null],[11,"push_at","","",64,{"inputs":[{"name":"vec"},{"name":"t"},{"name":"p"}],"output":null}],[11,"replace_with","","",64,null],[11,"deref","","",64,null],[11,"deref_mut","","",64,null],[0,"pact","timely::dataflow::channels","",null,null],[3,"Pipeline","timely::dataflow::channels::pact","",null,null],[3,"Exchange","","",null,null],[3,"Pusher","","",null,null],[3,"Puller","","",null,null],[8,"ParallelizationContract","","",null,null],[10,"connect","","",65,null],[11,"connect","","",66,null],[11,"new","","",67,{"inputs":[{"name":"f"}],"output":{"name":"exchange"}}],[11,"connect","","",67,null],[11,"new","","",68,{"inputs":[{"name":"box"},{"name":"usize"},{"name":"usize"},{"name":"usize"}],"output":{"name":"pusher"}}],[11,"push","","",68,null],[11,"new","","",69,{"inputs":[{"name":"box"},{"name":"usize"},{"name":"usize"}],"output":{"name":"puller"}}],[11,"pull","","",69,null],[0,"scopes","timely::dataflow","Hierarchical organization of timely dataflow graphs.",null,null],[0,"root","timely::dataflow::scopes","",null,null],[3,"Root","timely::dataflow::scopes::root","A `Root` is the entry point to a timely dataflow computation. It wraps a `Allocate`,\nand has a slot for one child `Operate`. The primary intended use of `Root` is through its\nimplementation of the `Scope` trait.",null,null],[11,"new","","",70,{"inputs":[{"name":"a"}],"output":{"name":"root"}}],[11,"step","","",70,null],[11,"index","","",70,null],[11,"peers","","",70,null],[11,"name","","",70,null],[11,"addr","","",70,null],[11,"add_edge","","",70,null],[11,"add_operator","","",70,null],[11,"add_operator_with_index","","",70,null],[11,"new_identifier","","",70,null],[11,"new_subscope","","",70,null],[11,"index","","",70,null],[11,"peers","","",70,null],[11,"allocate","","",70,null],[11,"clone","","",70,null],[0,"child","timely::dataflow::scopes","",null,null],[3,"Child","timely::dataflow::scopes::child","A `Child` wraps a `Subgraph` and a parent `G: Scope`. It manages the addition\nof `Operate`s to a subgraph, and the connection of edges between them.",null,null],[12,"subgraph","","",71,null],[12,"parent","","",71,null],[11,"name","","",71,null],[11,"addr","","",71,null],[11,"add_edge","","",71,null],[11,"add_operator_with_index","","",71,null],[11,"add_operator","","",71,null],[11,"new_identifier","","",71,null],[11,"new_subscope","","",71,null],[11,"index","","",71,null],[11,"peers","","",71,null],[11,"allocate","","",71,null],[11,"clone","","",71,null],[8,"Scope","timely::dataflow::scopes","The fundamental operations required to add and connect operators in a timely dataflow graph.",null,null],[16,"Timestamp","","The timestamp associated with data in this scope.",72,null],[10,"name","","A useful name describing the scope.",72,null],[10,"addr","","A sequence of scope identifiers describing the path from the `Root` to this scope.",72,null],[10,"add_edge","","Connects a source of data with a target of the data. This only links the two for\nthe purposes of tracking progress, rather than effect any data movement itself.",72,null],[10,"add_operator","","Adds a child `Operate` to the builder's scope. Returns the new child's index.",72,null],[10,"add_operator_with_index","","",72,null],[10,"new_subscope","","Creates a new `Subgraph` with timestamp `T`. Used by `scoped`, but unlikely to be\ncommonly useful to end users.",72,null],[10,"new_identifier","","Allocates a new locally unique identifier.",72,null],[11,"scoped","","Creates a `Subgraph` from a closure acting on a `Child` scope, and returning\nwhatever the closure returns.",72,null],[0,"stream","timely::dataflow","A handle to a typed stream of timely data.",null,null],[3,"Stream","timely::dataflow::stream","Abstraction of a stream of `D: Data` records timestamped with `S::Timestamp`.",null,null],[11,"clone","","",73,null],[11,"connect_to","","",73,null],[11,"new","","",73,{"inputs":[{"name":"source"},{"name":"teehelper"},{"name":"s"}],"output":{"name":"self"}}],[11,"name","","",73,null],[11,"scope","","",73,null],[0,"execute","timely","Starts a timely dataflow execution from configuration information and per-worker logic.",null,null],[5,"example","timely::execute","Executes a single-threaded timely dataflow computation.",null,{"inputs":[{"name":"f"}],"output":null}],[5,"execute","","Executes a timely dataflow from a configuration and per-communicator logic.",null,{"inputs":[{"name":"configuration"},{"name":"f"}],"output":{"name":"result"}}],[5,"execute_from_args","","Executes a timely dataflow from supplied arguments and per-communicator logic.",null,{"inputs":[{"name":"i"},{"name":"f"}],"output":{"name":"result"}}],[0,"logging","timely","Traits, implementations, and macros related to logging timely events.",null,null],[3,"EventStreamLogger","timely::logging","",null,null],[3,"OperatesEvent","","The creation of an `Operate` implementor.",null,null],[12,"id","","Worker-unique identifier for the operator.",74,null],[12,"addr","","Sequence of nested scope identifiers indicating the path from the root to this instance.",74,null],[12,"name","","A helpful name.",74,null],[3,"ChannelsEvent","","The creation of a channel between operators.",null,null],[12,"id","","Worker-unique identifier for the channel",75,null],[12,"scope_addr","","Sequence of nested scope identifiers indicating the path from the root to this instance.",75,null],[12,"source","","Source descriptor, indicating operator index and output port.",75,null],[12,"target","","Target descriptor, indicating operator index and input port.",75,null],[3,"ProgressEvent","","Send or receive of progress information.",null,null],[12,"is_send","","`true` if the event is a send, and `false` if it is a receive.",76,null],[12,"addr","","Sequence of nested scope identifiers indicating the path from the root to this instance.",76,null],[12,"messages","","List of message updates, containing Target descriptor, timestamp as string, and delta.",76,null],[12,"internal","","List of capability updates, containing Source descriptor, timestamp as string, and delta.",76,null],[3,"MessagesEvent","","Message send or receive event",null,null],[12,"is_send","","`true` if send event, `false` if receive event.",77,null],[12,"channel","","Channel identifier",77,null],[12,"source","","Source worker index.",77,null],[12,"target","","Target worker index.",77,null],[12,"seq_no","","Message sequence number.",77,null],[12,"length","","Number of typed records in the message.",77,null],[3,"ScheduleEvent","","Operator start or stop.",null,null],[12,"id","","Worker-unique identifier for the operator, linkable to the identifiers in `OperatesEvent`.",78,null],[12,"start_stop","","`Start` if the operator is starting, `Stop` if it is stopping.\nactiviy is true if it looks like some useful work was performed during this call (data was\nread or written, notifications were requested / delivered)",78,null],[4,"StartStop","","",null,null],[13,"Start","","",79,null],[13,"Stop","","",79,null],[12,"activity","timely::logging::StartStop","",79,null],[5,"log","timely::logging","Logs `record` in `logger` if logging is enabled.",null,{"inputs":[{"name":"localkey"},{"name":"record"}],"output":null}],[5,"initialize","","Initializes logging; called as part of `Root` initialization.",null,{"inputs":[{"name":"root"}],"output":null}],[5,"flush_logs","","Flushes logs; called by `Root::step`.",null,{"inputs":[],"output":null}],[7,"OPERATES","","",null,null],[7,"CHANNELS","","",null,null],[7,"PROGRESS","","",null,null],[7,"MESSAGES","","",null,null],[7,"SCHEDULE","","",null,null],[7,"GUARDED_MESSAGE","","",null,null],[7,"GUARDED_PROGRESS","","",null,null],[8,"Logger","","Logging methods",null,null],[16,"Record","","The type of loggable record.",80,null],[10,"log","","Adds `record` to the log.",80,null],[10,"flush","","Called with some frequency; behavior unspecified.",80,null],[11,"log","","",81,null],[11,"flush","","",81,null],[11,"drop","","",81,null],[11,"clone","","",74,null],[11,"fmt","","",74,null],[11,"entomb","","",74,null],[11,"embalm","","",74,null],[11,"exhume","","",74,null],[11,"clone","","",75,null],[11,"fmt","","",75,null],[11,"entomb","","",75,null],[11,"embalm","","",75,null],[11,"exhume","","",75,null],[11,"clone","","",76,null],[11,"fmt","","",76,null],[11,"entomb","","",76,null],[11,"embalm","","",76,null],[11,"exhume","","",76,null],[11,"clone","","",77,null],[11,"fmt","","",77,null],[11,"eq","","",79,null],[11,"ne","","",79,null],[11,"clone","","",79,null],[11,"fmt","","",79,null],[11,"clone","","",78,null],[11,"fmt","","",78,null],[11,"entomb","","",78,null],[11,"embalm","","",78,null],[11,"exhume","","",78,null],[8,"Data","timely","A composite trait for types usable in timely dataflow.",null,null],[11,"from_args","","Constructs a new configuration by parsing supplied text arguments.",2,{"inputs":[{"name":"i"}],"output":{"name":"result"}}],[11,"enter","timely::dataflow::stream","",73,null],[11,"leave","","",73,null],[11,"unary_notify","","",73,null],[11,"unary_stream","","",73,null],[11,"queue","","",73,null],[11,"new_input","timely::dataflow::scopes::child","",71,null],[11,"loop_variable","","",71,null],[11,"connect_loop","timely::dataflow::stream","",73,null],[11,"concat","","",73,null],[11,"partition","","",73,null],[11,"map","","",73,null],[11,"map_in_place","","",73,null],[11,"flat_map","","",73,null],[11,"inspect","","",73,null],[11,"inspect_batch","","",73,null],[11,"filter","","",73,null],[11,"binary_stream","","",73,null],[11,"binary_notify","","",73,null],[11,"delay","","",73,null],[11,"delay_batch","","",73,null],[11,"exchange","","",73,null],[11,"broadcast","","",73,null],[11,"probe","","",73,null],[11,"capture_into","","",73,null],[11,"state_machine","","",73,null],[11,"aggregate","","",73,null]],"paths":[[8,"Push"],[8,"Pull"],[4,"Configuration"],[3,"CountMap"],[3,"Antichain"],[3,"MutableAntichain"],[3,"PointstampCounter"],[4,"Summary"],[3,"Product"],[3,"Source"],[3,"Target"],[3,"Subgraph"],[8,"Timestamp"],[8,"PathSummary"],[3,"RootTimestamp"],[3,"RootSummary"],[8,"Operate"],[3,"Progcaster"],[8,"Enter"],[8,"EnterAt"],[8,"Leave"],[8,"Unary"],[8,"Queue"],[8,"Input"],[3,"Handle"],[8,"LoopVariable"],[8,"ConnectLoop"],[3,"Observer"],[8,"Concat"],[8,"Concatenate"],[8,"Partition"],[3,"Operator"],[8,"Map"],[8,"Inspect"],[8,"Filter"],[8,"Binary"],[8,"Delay"],[8,"Exchange"],[8,"Broadcast"],[8,"Probe"],[3,"Handle"],[8,"ToStream"],[3,"EventLink"],[4,"Event"],[8,"Capture"],[8,"EventIterator"],[8,"EventPusher"],[8,"Replay"],[3,"EventWriter"],[3,"EventReader"],[8,"StateMachine"],[8,"Aggregate"],[3,"InputHandle"],[3,"OutputHandle"],[3,"Notificator"],[3,"Capability"],[3,"Tee"],[3,"TeeHelper"],[3,"Exchange"],[3,"Counter"],[3,"Buffer"],[3,"Session"],[3,"Counter"],[3,"Message"],[4,"Content"],[8,"ParallelizationContract"],[3,"Pipeline"],[3,"Exchange"],[3,"Pusher"],[3,"Puller"],[3,"Root"],[3,"Child"],[8,"Scope"],[3,"Stream"],[3,"OperatesEvent"],[3,"ChannelsEvent"],[3,"ProgressEvent"],[3,"MessagesEvent"],[3,"ScheduleEvent"],[4,"StartStop"],[8,"Logger"],[3,"EventStreamLogger"]]}; initSearch(searchIndex);