use crate::gen::{Gen, GenContext, GenState};
#[allow(unused)]
use crate::trig;
use crate::{BlockSize, Sample};
use knyst_macro::impl_gen;
use crate as knyst;
#[macro_use]
pub mod connection;
mod graph_gen;
mod node;
pub mod run_graph;
pub use crate::node_buffer::NodeBufferRef;
pub use connection::Connection;
use connection::ConnectionError;
use node::Node;
pub use run_graph::{RunGraph, RunGraphSettings};
use crate::inspection::{EdgeInspection, EdgeSource, GraphInspection, NodeInspection};
use crate::scheduling::MusicalTimeMap;
use crate::time::{Beats, Seconds};
use rtrb::RingBuffer;
use slotmap::{new_key_type, SecondaryMap, SlotMap};
use std::cell::UnsafeCell;
use std::collections::{HashMap, HashSet};
use std::mem;
use std::sync::atomic::Ordering;
use std::sync::atomic::{AtomicBool, AtomicU64};
use std::sync::{Arc, RwLock};
use std::time::{Duration, Instant};
use self::connection::{ConnectionBundle, NodeChannel, NodeInput, NodeOutput};
use crate::resources::Resources;
pub type GraphId = u64;
static NEXT_GRAPH_ID: AtomicU64 = AtomicU64::new(0);
static NEXT_ADDRESS_ID: AtomicU64 = AtomicU64::new(0);
#[derive(Copy, Clone, Debug, PartialEq, Hash, Eq)]
pub struct RawNodeAddress {
key: NodeKey,
graph_id: GraphId,
}
#[derive(Clone, Copy, Debug)]
pub struct NodeId {
unique_id: u64,
graph_id: GraphId,
}
impl PartialEq for NodeId {
fn eq(&self, other: &Self) -> bool {
self.unique_id == other.unique_id
}
}
impl Eq for NodeId {}
impl std::hash::Hash for NodeId {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.unique_id.hash(state);
}
}
impl NodeId {
pub fn new(graph_id: GraphId) -> Self {
Self {
unique_id: NEXT_ADDRESS_ID.fetch_add(1, Ordering::Relaxed),
graph_id,
}
}
pub fn graph_id(&self) -> GraphId {
self.graph_id
}
pub fn out(&self, channel: impl Into<connection::NodeChannel>) -> NodeOutput {
NodeOutput {
from_node: self.clone(),
from_channel: channel.into(),
}
}
pub fn input(&self, channel: impl Into<connection::NodeChannel>) -> NodeInput {
NodeInput {
node: self.clone(),
channel: channel.into(),
}
}
}
impl NodeId {
pub fn to(&self, sink_node: NodeId) -> Connection {
Connection::Node {
source: self.clone(),
from_index: Some(0),
from_label: None,
sink: sink_node.clone(),
to_index: None,
to_label: None,
channels: 1,
feedback: false,
to_index_offset: 0,
}
}
pub fn to_graph_out(&self) -> Connection {
Connection::GraphOutput {
source: self.clone(),
from_index: Some(0),
from_label: None,
to_index: 0,
channels: 1,
}
}
pub fn feedback_to(&self, sink_node: NodeId) -> Connection {
Connection::Node {
source: self.clone(),
from_index: Some(0),
from_label: None,
sink: sink_node.clone(),
to_index: None,
to_label: None,
channels: 1,
feedback: true,
to_index_offset: 0,
}
}
pub fn change(&self) -> NodeChanges {
NodeChanges::new(self.clone())
}
}
pub struct GraphInput;
impl GraphInput {
pub fn to(sink_node: NodeId) -> Connection {
Connection::GraphInput {
from_index: 0,
sink: sink_node,
to_index: None,
to_label: None,
channels: 1,
to_index_offset: 0,
}
}
}
#[derive(Clone, Debug)]
pub struct SimultaneousChanges {
pub time: Time,
pub changes: Vec<NodeChanges>,
}
impl SimultaneousChanges {
pub fn now() -> Self {
Self {
time: Time::Immediately,
changes: vec![],
}
}
pub fn duration_from_now(duration: Duration) -> Self {
Self {
time: Time::DurationFromNow(duration),
changes: vec![],
}
}
pub fn beats(beats: Beats) -> Self {
Self {
time: Time::Beats(beats),
changes: vec![],
}
}
pub fn push(&mut self, node_changes: NodeChanges) -> &mut Self {
self.changes.push(node_changes);
self
}
}
#[derive(Clone, Debug)]
pub struct NodeChanges {
pub(crate) node: NodeId,
pub(crate) parameters: Vec<(NodeChannel, Change)>,
pub(crate) offset: Option<TimeOffset>,
}
impl NodeChanges {
pub fn new(node: NodeId) -> Self {
Self {
node,
parameters: vec![],
offset: None,
}
}
pub fn set(mut self, channel: impl Into<NodeChannel>, value: Sample) -> Self {
self.parameters
.push((channel.into(), Change::Constant(value)));
self
}
pub fn trigger(mut self, channel: impl Into<NodeChannel>) -> Self {
self.parameters.push((channel.into(), Change::Trigger));
self
}
pub fn time_offset(mut self, offset: TimeOffset) -> Self {
self.offset = Some(offset);
self
}
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum Change {
Constant(Sample),
Trigger,
}
impl From<f32> for Change {
fn from(value: f32) -> Self {
Self::Constant(value as Sample)
}
}
impl From<f64> for Change {
fn from(value: f64) -> Self {
Self::Constant(value as Sample)
}
}
#[derive(Clone, Debug)]
pub struct ParameterChange {
pub time: Time,
pub input: NodeInput,
pub value: Change,
}
impl ParameterChange {
pub fn beats(channel: NodeInput, value: impl Into<Change>, beats: Beats) -> Self {
Self {
input: channel,
value: value.into(),
time: Time::Beats(beats),
}
}
pub fn seconds(channel: NodeInput, value: impl Into<Change>, seconds: Seconds) -> Self {
Self {
input: channel,
value: value.into(),
time: Time::Seconds(seconds),
}
}
pub fn duration_from_now(
channel: NodeInput,
value: impl Into<Change>,
from_now: Duration,
) -> Self {
Self {
input: channel,
value: value.into(),
time: Time::DurationFromNow(from_now),
}
}
pub fn now(channel: NodeInput, value: impl Into<Change>) -> Self {
Self {
input: channel,
value: value.into(),
time: Time::Immediately,
}
}
}
#[allow(missing_docs)]
#[derive(Clone, Copy, Debug)]
pub enum Time {
Beats(Beats),
DurationFromNow(Duration),
Seconds(Seconds),
Immediately,
}
#[allow(missing_docs)]
#[derive(Clone, Copy, Debug)]
pub enum TimeOffset {
Frames(i64),
Seconds(Relative<Seconds>),
}
impl TimeOffset {
pub fn to_frames(&self, sample_rate: u64) -> i64 {
match self {
TimeOffset::Frames(frame_offset) => *frame_offset,
TimeOffset::Seconds(relative_supserseconds) => match relative_supserseconds {
Relative::Before(s) => (s.to_samples(sample_rate) as i64) * -1,
Relative::After(s) => s.to_samples(sample_rate) as i64,
},
}
}
}
#[derive(Clone, Copy, Debug)]
pub enum Relative<T> {
Before(T),
After(T),
}
#[derive(Clone, Copy, Debug)]
enum CopyOrAdd {
Copy,
Add,
}
struct Task {
node_key: NodeKey,
input_constants: *mut [Sample],
graph_inputs_to_copy: Vec<(usize, usize)>,
inputs_to_copy: Vec<(*mut Sample, *mut Sample, usize, CopyOrAdd)>,
input_buffers: NodeBufferRef,
gen: *mut dyn Gen,
output_buffers_first_ptr: *mut Sample,
block_size: usize,
num_outputs: usize,
start_node_at_sample: u64,
}
impl Task {
#[inline]
fn init_constants(&mut self) {
let node_constants = unsafe { &*self.input_constants };
for (channel, &constant) in node_constants.iter().enumerate() {
self.input_buffers.fill_channel(constant, channel);
}
}
#[inline]
fn apply_constant_change(&mut self, change: &ScheduledChange, start_sample_in_block: usize) {
match change.kind {
ScheduledChangeKind::Constant { index, value } => {
let node_constants = unsafe { &mut *self.input_constants };
node_constants[index] = value;
for i in start_sample_in_block..self.input_buffers.block_size() {
self.input_buffers.write(value, index, i);
}
}
ScheduledChangeKind::Trigger { index } => {
self.input_buffers.write(1.0, index, start_sample_in_block);
}
}
}
#[inline]
fn run(
&mut self,
graph_inputs: &NodeBufferRef,
resources: &mut Resources,
sample_rate: Sample,
sample_time_at_block_start: u64,
) -> GenState {
for (graph_input_index, node_input_index) in &self.graph_inputs_to_copy {
for i in 0..self.input_buffers.block_size() {
self.input_buffers.write(
graph_inputs.read(*graph_input_index, i),
*node_input_index,
i,
);
}
}
for (from, to, block_size, _copy_or_add) in &self.inputs_to_copy {
let from_slice = unsafe { std::slice::from_raw_parts(*from, *block_size) };
let to_slice = unsafe { std::slice::from_raw_parts_mut(*to, *block_size) };
for (from, to) in from_slice.iter().zip(to_slice.iter_mut()) {
*to += *from;
}
}
if self.start_node_at_sample <= sample_time_at_block_start {
let mut outputs = NodeBufferRef::new(
self.output_buffers_first_ptr,
self.num_outputs,
self.block_size,
);
let ctx = GenContext {
inputs: &self.input_buffers,
outputs: &mut outputs,
sample_rate,
};
assert!(!self.gen.is_null());
unsafe { (*self.gen).process(ctx, resources) }
} else if ((self.start_node_at_sample - sample_time_at_block_start) as usize)
< self.block_size
{
let new_block_size = self.block_size
- ((self.start_node_at_sample - sample_time_at_block_start) as usize);
let mut outputs = NodeBufferRef::new(
self.output_buffers_first_ptr,
self.num_outputs,
self.block_size,
);
let partial_inputs =
unsafe { self.input_buffers.to_partial_block_size(new_block_size) };
let mut partial_outputs = unsafe { outputs.to_partial_block_size(new_block_size) };
let ctx = GenContext {
inputs: &partial_inputs,
outputs: &mut partial_outputs,
sample_rate,
};
assert!(!self.gen.is_null());
unsafe { (*self.gen).process(ctx, resources) }
} else {
GenState::Continue
}
}
}
unsafe impl Send for Task {}
struct InputToOutputTask {
graph_input_index: usize,
graph_output_index: usize,
}
struct OutputTask {
input_buffers: NodeBufferRef,
input_index: usize,
graph_output_index: usize,
}
impl std::fmt::Debug for OutputTask {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("OutputTask")
.field("input_index", &self.input_index)
.field("graph_output_index", &self.graph_output_index)
.finish()
}
}
unsafe impl Send for OutputTask {}
#[allow(missing_docs)]
#[derive(thiserror::Error, Debug)]
pub enum PushError {
#[error("The graph was not started and the given start time of a Gen could therefore not be calculated: `{0:?}`.")]
InvalidStartTimeOnUnstartedGraph(Time),
#[error("The target graph (`{target_graph}`) was not found. The GenOrGraph that was pushed is returned.")]
GraphNotFound {
g: GenOrGraphEnum,
target_graph: GraphId,
},
}
#[allow(missing_docs)]
#[derive(thiserror::Error, Debug, PartialEq)]
pub enum FreeError {
#[error(
"The graph containing the NodeAdress provided was not found. The node itself may or may not exist."
)]
GraphNotFound,
#[error("The NodeId does not exist. The Node may have been freed already.")]
NodeNotFound,
#[error(
"The node you tried to free has been marked as immortal. Make it mortal before freeing."
)]
ImmortalNode,
#[error("The free action required making a new connection, but the connection failed.")]
ConnectionError(#[from] Box<connection::ConnectionError>),
}
#[allow(missing_docs)]
#[derive(thiserror::Error, Debug, PartialEq)]
pub enum ScheduleError {
#[error("Changes for nodes in different graphs were attempted to be scheduled together.`")]
DifferentGraphs,
#[error("The graph containing the NodeId provided was not found: `{0:?}`")]
GraphNotFound(NodeId),
#[error("The NodeId does not exist. The Node may have been freed already.")]
NodeNotFound,
#[error("The input label specified was not registered for the node: `{0}`")]
InputLabelNotFound(&'static str),
#[error(
"No scheduler was created for the Graph so the change cannot be scheduled. This is likely because this Graph was not yet added to another Graph or split into a Node."
)]
SchedulerNotCreated,
#[error("A lock for writing to the MusicalTimeMap cannot be acquired.")]
MusicalTimeMapCannotBeWrittenTo,
#[error("Tried to schedule change `{change:?}` to non existing input `{channel:?}` for node `{node_name}`")]
InputOutOfRange {
node_name: String,
channel: NodeChannel,
change: Change,
},
}
#[allow(missing_docs)]
pub enum GenOrGraphEnum {
Gen(Box<dyn Gen + Send>),
Graph(Graph),
}
impl std::fmt::Debug for GenOrGraphEnum {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
GenOrGraphEnum::Gen(gen) => write!(f, "Gen: {}", gen.name()),
GenOrGraphEnum::Graph(graph) => write!(f, "Graph: {}, {}", graph.id, graph.name),
}
}
}
impl GenOrGraphEnum {
fn components(
self,
parent_graph_block_size: usize,
parent_graph_sample_rate: Sample,
parent_graph_oversampling: Oversampling,
) -> (Option<Graph>, Box<dyn Gen + Send>) {
match self {
GenOrGraphEnum::Gen(boxed_gen) => (None, boxed_gen),
GenOrGraphEnum::Graph(graph) => graph.components(
parent_graph_block_size,
parent_graph_sample_rate,
parent_graph_oversampling,
),
}
}
}
impl<T: GenOrGraph> From<T> for GenOrGraphEnum {
fn from(value: T) -> Self {
value.into_gen_or_graph_enum()
}
}
#[allow(missing_docs)]
pub trait GenOrGraph {
fn components(
self,
parent_graph_block_size: usize,
parent_graph_sample_rate: Sample,
parent_graph_oversampling: Oversampling,
) -> (Option<Graph>, Box<dyn Gen + Send>);
fn into_gen_or_graph_enum(self) -> GenOrGraphEnum;
fn num_outputs(&self) -> usize;
fn num_inputs(&self) -> usize;
}
impl<T: Gen + Send + 'static> GenOrGraph for T {
fn components(
self,
_parent_graph_block_size: usize,
_parent_graph_sample_rate: Sample,
_parent_graph_oversampling: Oversampling,
) -> (Option<Graph>, Box<dyn Gen + Send>) {
(None, Box::new(self))
}
fn into_gen_or_graph_enum(self) -> GenOrGraphEnum {
GenOrGraphEnum::Gen(Box::new(self))
}
fn num_outputs(&self) -> usize {
self.num_outputs()
}
fn num_inputs(&self) -> usize {
self.num_inputs()
}
}
impl GenOrGraph for Graph {
fn components(
mut self,
parent_graph_block_size: usize,
parent_graph_sample_rate: Sample,
parent_graph_oversampling: Oversampling,
) -> (Option<Graph>, Box<dyn Gen + Send>) {
if self.block_size() > parent_graph_block_size && self.num_inputs() > 0 {
panic!(
"Warning: You are pushing a graph with a larger block size and with Graph inputs. An inner Graph with a larger block size cannot have inputs since the inputs for the entire inner block would not have been calculated yet."
);
}
if self.sample_rate != parent_graph_sample_rate {
eprintln!(
"Warning: You are pushing a graph with a different sample rate. This is currently allowed, but no automatic resampling will be allowed."
);
}
if self.oversampling.as_usize() < parent_graph_oversampling.as_usize() {
panic!(
"You tried to push an inner graph with lower oversampling than its parent. This is not currently allowed."
);
}
let gen = self
.create_graph_gen(
parent_graph_block_size,
parent_graph_sample_rate,
parent_graph_oversampling,
)
.unwrap();
(Some(self), gen)
}
fn into_gen_or_graph_enum(self) -> GenOrGraphEnum {
GenOrGraphEnum::Graph(self)
}
fn num_outputs(&self) -> usize {
self.num_outputs()
}
fn num_inputs(&self) -> usize {
self.num_inputs()
}
}
type ProcessFn = Box<dyn (FnMut(GenContext, &mut Resources) -> GenState) + Send>;
pub struct ClosureGen {
process_fn: ProcessFn,
outputs: Vec<&'static str>,
inputs: Vec<&'static str>,
name: &'static str,
}
pub fn gen(
process: impl (FnMut(GenContext, &mut Resources) -> GenState) + 'static + Send,
) -> ClosureGen {
ClosureGen {
process_fn: Box::new(process),
..Default::default()
}
}
impl ClosureGen {
pub fn new(
process: impl (FnMut(GenContext, &mut Resources) -> GenState) + 'static + Send,
) -> Self {
gen(process)
}
pub fn output(mut self, output_name: &'static str) -> Self {
self.outputs.push(output_name);
self
}
pub fn input(mut self, input_name: &'static str) -> Self {
self.inputs.push(input_name);
self
}
pub fn name(mut self, name: &'static str) -> Self {
self.name = name;
self
}
}
impl Default for ClosureGen {
fn default() -> Self {
Self {
process_fn: Box::new(|_ctx, _resources| GenState::Continue),
outputs: Default::default(),
inputs: Default::default(),
name: "ClosureGen",
}
}
}
impl Gen for ClosureGen {
fn process(&mut self, ctx: GenContext, resources: &mut Resources) -> GenState {
(self.process_fn)(ctx, resources)
}
fn num_inputs(&self) -> usize {
self.inputs.len()
}
fn num_outputs(&self) -> usize {
self.outputs.len()
}
fn input_desc(&self, input: usize) -> &'static str {
self.inputs.get(input).unwrap_or(&"")
}
fn output_desc(&self, output: usize) -> &'static str {
self.outputs.get(output).unwrap_or(&"")
}
fn name(&self) -> &'static str {
self.name
}
}
new_key_type! {
struct NodeKey;
}
#[allow(missing_docs)]
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub enum Oversampling {
X1,
X2,
}
impl Oversampling {
pub fn as_usize(&self) -> usize {
match self {
Oversampling::X1 => 1,
Oversampling::X2 => 2,
}
}
pub fn from_usize(x: usize) -> Option<Self> {
match x {
1 => Some(Oversampling::X1),
2 => Some(Oversampling::X2),
_ => None,
}
}
}
#[derive(Clone, Debug)]
pub struct GraphSettings {
pub name: String,
pub num_inputs: usize,
pub max_node_inputs: usize,
pub num_outputs: usize,
pub block_size: usize,
pub num_nodes: usize,
pub sample_rate: Sample,
pub oversampling: Oversampling,
pub ring_buffer_size: usize,
}
impl GraphSettings {
pub fn num_inputs(mut self, num_inputs: usize) -> Self {
self.num_inputs = num_inputs;
self
}
pub fn num_outputs(mut self, num_outputs: usize) -> Self {
self.num_outputs = num_outputs;
self
}
pub fn oversampling(mut self, oversampling: Oversampling) -> Self {
self.oversampling = oversampling;
self
}
pub fn block_size(mut self, block_size: usize) -> Self {
self.block_size = block_size;
self
}
}
impl Default for GraphSettings {
fn default() -> Self {
GraphSettings {
name: String::new(),
num_inputs: 0,
num_outputs: 2,
max_node_inputs: 8,
block_size: 64,
num_nodes: 1024,
sample_rate: 48000.0,
oversampling: Oversampling::X1,
ring_buffer_size: 1000,
}
}
}
struct OwnedRawBuffer {
ptr: *mut [Sample],
}
impl Drop for OwnedRawBuffer {
fn drop(&mut self) {
unsafe { drop(Box::from_raw(self.ptr)) }
}
}
pub struct Graph {
id: GraphId,
name: String,
nodes: Arc<UnsafeCell<SlotMap<NodeKey, Node>>>,
node_keys_to_free_when_safe: Vec<(NodeKey, Arc<AtomicBool>)>,
buffers_to_free_when_safe: Vec<Arc<OwnedRawBuffer>>,
new_inputs_buffers_ptr: bool,
node_keys_pending_removal: HashSet<NodeKey>,
node_input_edges: SecondaryMap<NodeKey, Vec<Edge>>,
node_input_index_to_name: SecondaryMap<NodeKey, Vec<&'static str>>,
node_input_name_to_index: SecondaryMap<NodeKey, HashMap<&'static str, usize>>,
node_output_index_to_name: SecondaryMap<NodeKey, Vec<&'static str>>,
node_output_name_to_index: SecondaryMap<NodeKey, HashMap<&'static str, usize>>,
node_feedback_edges: SecondaryMap<NodeKey, Vec<FeedbackEdge>>,
node_feedback_node_key: SecondaryMap<NodeKey, NodeKey>,
node_ids: SecondaryMap<NodeKey, NodeId>,
node_mortality: SecondaryMap<NodeKey, bool>,
node_order: Vec<NodeKey>,
disconnected_nodes: Vec<NodeKey>,
feedback_node_indices: Vec<NodeKey>,
graphs_per_node: SecondaryMap<NodeKey, Graph>,
output_edges: Vec<Edge>,
graph_input_edges: SecondaryMap<NodeKey, Vec<Edge>>,
graph_input_to_output_edges: Vec<InterGraphEdge>,
recalculation_required: bool,
num_inputs: usize,
num_outputs: usize,
block_size: usize,
sample_rate: Sample,
oversampling: Oversampling,
ring_buffer_size: usize,
initiated: bool,
inputs_buffers_ptr: Arc<OwnedRawBuffer>,
max_node_inputs: usize,
graph_gen_communicator: Option<GraphGenCommunicator>,
scheduled_changes_queue: Vec<(
Vec<(NodeKey, ScheduledChangeKind, Option<TimeOffset>)>,
Time,
)>,
}
impl Default for Graph {
fn default() -> Self {
Self::new(GraphSettings::default())
}
}
impl Graph {
pub fn new(options: GraphSettings) -> Self {
let GraphSettings {
name,
num_inputs,
num_outputs,
max_node_inputs,
block_size,
num_nodes,
sample_rate,
oversampling,
ring_buffer_size,
} = options;
let inputs_buffers_ptr = Box::<[Sample]>::into_raw(
vec![0.0 as Sample; block_size * oversampling.as_usize() * max_node_inputs]
.into_boxed_slice(),
);
let inputs_buffers_ptr = Arc::new(OwnedRawBuffer {
ptr: inputs_buffers_ptr,
});
let id = NEXT_GRAPH_ID.fetch_add(1, std::sync::atomic::Ordering::SeqCst);
let nodes = Arc::new(UnsafeCell::new(SlotMap::with_capacity_and_key(num_nodes)));
let node_input_edges = SecondaryMap::with_capacity(num_nodes);
let node_feedback_edges = SecondaryMap::with_capacity(num_nodes);
let graph_input_edges = SecondaryMap::with_capacity(num_nodes);
let graph_input_to_output_edges = Vec::new();
Self {
id,
name,
nodes,
node_input_edges,
node_input_index_to_name: SecondaryMap::with_capacity(num_nodes),
node_input_name_to_index: SecondaryMap::with_capacity(num_nodes),
node_output_index_to_name: SecondaryMap::with_capacity(num_nodes),
node_output_name_to_index: SecondaryMap::with_capacity(num_nodes),
node_feedback_node_key: SecondaryMap::with_capacity(num_nodes),
node_feedback_edges,
node_ids: SecondaryMap::with_capacity(num_nodes),
node_mortality: SecondaryMap::with_capacity(num_nodes),
node_order: Vec::with_capacity(num_nodes),
disconnected_nodes: vec![],
node_keys_to_free_when_safe: vec![],
node_keys_pending_removal: HashSet::new(),
feedback_node_indices: vec![],
graphs_per_node: SecondaryMap::with_capacity(num_nodes),
output_edges: vec![],
graph_input_edges,
num_inputs,
num_outputs,
block_size,
sample_rate,
oversampling,
initiated: false,
inputs_buffers_ptr,
max_node_inputs,
ring_buffer_size,
graph_gen_communicator: None,
recalculation_required: false,
buffers_to_free_when_safe: vec![],
new_inputs_buffers_ptr: false,
graph_input_to_output_edges,
scheduled_changes_queue: vec![],
}
}
fn split_and_create_top_level_node(&mut self, node_id: NodeId) -> Result<Node, String> {
let block_size = self.block_size();
let graph_gen =
self.create_graph_gen(self.block_size, self.sample_rate, Oversampling::X1)?;
let mut node = Node::new("graph", graph_gen);
node.init(block_size, self.sample_rate, node_id);
self.recalculation_required = true;
Ok(node)
}
pub fn num_inputs(&self) -> usize {
self.num_inputs
}
pub fn num_outputs(&self) -> usize {
self.num_outputs
}
pub fn graph_settings(&self) -> GraphSettings {
GraphSettings {
name: self.name.clone(),
num_inputs: self.num_inputs,
max_node_inputs: self.max_node_inputs,
num_outputs: self.num_outputs,
block_size: self.block_size,
num_nodes: self.get_nodes().capacity(),
sample_rate: self.sample_rate,
oversampling: self.oversampling,
ring_buffer_size: self.ring_buffer_size,
}
}
pub fn num_stored_nodes(&self) -> usize {
self.get_nodes().len()
}
#[allow(missing_docs)]
pub fn id(&self) -> GraphId {
self.id
}
pub fn set_node_mortality(
&mut self,
node_id: NodeId,
is_mortal: bool,
) -> Result<(), ScheduleError> {
let mut node_might_be_in_this_graph = true;
if node_id.graph_id() != self.id {
node_might_be_in_this_graph = false;
}
if node_might_be_in_this_graph {
if let Some(key) = Self::key_from_id(&self.node_ids, node_id) {
if !self.get_nodes_mut().contains_key(key) {
return Err(ScheduleError::NodeNotFound);
}
self.node_mortality[key] = is_mortal;
}
}
if !node_might_be_in_this_graph {
for (_key, graph) in &mut self.graphs_per_node {
match graph.set_node_mortality(node_id, is_mortal) {
Ok(_) => {
return Ok(());
}
Err(e) => match e {
ScheduleError::GraphNotFound(_) => (),
_ => {
return Err(e);
}
},
}
}
return Err(ScheduleError::GraphNotFound(node_id));
}
Ok(())
}
pub fn push(&mut self, to_node: impl Into<GenOrGraphEnum>) -> NodeId {
self.push_to_graph(to_node, self.id).unwrap()
}
pub fn push_at_time(&mut self, to_node: impl Into<GenOrGraphEnum>, start_time: Time) -> NodeId {
self.push_to_graph_at_time(to_node, self.id, start_time)
.unwrap()
}
pub fn push_to_graph(
&mut self,
to_node: impl Into<GenOrGraphEnum>,
graph_id: GraphId,
) -> Result<NodeId, PushError> {
let mut new_node_address = NodeId::new(graph_id);
self.push_with_existing_address_to_graph(to_node, &mut new_node_address, graph_id)?;
Ok(new_node_address)
}
pub fn push_to_graph_at_time(
&mut self,
to_node: impl Into<GenOrGraphEnum>,
graph_id: GraphId,
start_time: Time,
) -> Result<NodeId, PushError> {
let mut new_node_address = NodeId::new(graph_id);
self.push_with_existing_address_to_graph_at_time(
to_node,
&mut new_node_address,
graph_id,
start_time,
)?;
Ok(new_node_address)
}
pub fn push_with_existing_address(
&mut self,
to_node: impl Into<GenOrGraphEnum>,
node_address: &mut NodeId,
) {
self.push_with_existing_address_to_graph(to_node, node_address, self.id)
.unwrap()
}
pub fn push_with_existing_address_at_time(
&mut self,
to_node: impl Into<GenOrGraphEnum>,
node_address: &mut NodeId,
start_time: Time,
) {
self.push_with_existing_address_to_graph_at_time(to_node, node_address, self.id, start_time)
.unwrap()
}
pub fn push_with_existing_address_to_graph(
&mut self,
to_node: impl Into<GenOrGraphEnum>,
node_address: &mut NodeId,
graph_id: GraphId,
) -> Result<(), PushError> {
self.push_with_existing_address_to_graph_at_time(
to_node,
node_address,
graph_id,
Time::Immediately,
)
}
pub fn push_with_existing_address_to_graph_at_time(
&mut self,
to_node: impl Into<GenOrGraphEnum>,
node_address: &mut NodeId,
graph_id: GraphId,
start_time: Time,
) -> Result<(), PushError> {
if graph_id == self.id {
let (graph, gen) =
to_node
.into()
.components(self.block_size, self.sample_rate, self.oversampling);
let mut start_timestamp = 0;
let mut scheduler_ts = false;
if let Some(ggc) = &mut self.graph_gen_communicator {
if let Some(ts) = ggc.scheduler.time_to_frames_timestamp(start_time) {
start_timestamp = ts;
scheduler_ts = true;
}
}
if !scheduler_ts {
match start_time {
Time::Beats(_) => {
return Err(PushError::InvalidStartTimeOnUnstartedGraph(start_time))
}
Time::DurationFromNow(_) => {
return Err(PushError::InvalidStartTimeOnUnstartedGraph(start_time))
}
Time::Seconds(s) => {
start_timestamp = s.to_samples(
self.sample_rate as u64 * self.oversampling.as_usize() as u64,
)
}
Time::Immediately => start_timestamp = 0,
}
}
let mut node = Node::new(gen.name(), gen);
node.start_at_sample(start_timestamp);
let node_key = self.push_node(node, node_address);
if let Some(mut graph) = graph {
if let Some(ggc) = &mut self.graph_gen_communicator {
if let Scheduler::Running {
start_ts,
latency_in_samples,
musical_time_map,
..
} = &mut ggc.scheduler
{
let clock_update = ClockUpdate {
timestamp: ggc.timestamp.clone(),
clock_sample_rate: self.sample_rate,
};
let latency = Duration::from_secs_f64(
*latency_in_samples / (self.sample_rate as f64),
);
graph.start_scheduler(
latency,
*start_ts,
&Some(clock_update),
musical_time_map,
);
}
}
self.graphs_per_node.insert(node_key, graph);
}
node_address.graph_id = self.id;
Ok(())
} else {
let mut to_node = to_node.into();
for (_key, graph) in &mut self.graphs_per_node {
match graph.push_with_existing_address_to_graph_at_time(
to_node,
node_address,
graph_id,
start_time,
) {
Ok(_) => {
return Ok(());
}
Err(e) => match e {
PushError::GraphNotFound {
g: returned_to_node,
target_graph: _graph_id,
} => {
to_node = returned_to_node;
}
_ => return Err(e),
},
}
}
Err(PushError::GraphNotFound {
g: to_node,
target_graph: graph_id,
})
}
}
fn increase_max_node_inputs(&mut self, new_max_node_inputs: usize) {
let inputs_buffers_ptr = Box::<[Sample]>::into_raw(
vec![
0.0 as Sample;
self.block_size * self.oversampling.as_usize() * new_max_node_inputs
]
.into_boxed_slice(),
);
let inputs_buffers_ptr = Arc::new(OwnedRawBuffer {
ptr: inputs_buffers_ptr,
});
let old_input_buffers_ptr = mem::replace(&mut self.inputs_buffers_ptr, inputs_buffers_ptr);
if self.graph_gen_communicator.is_some() {
self.buffers_to_free_when_safe.push(old_input_buffers_ptr);
self.new_inputs_buffers_ptr = true;
} else {
assert_eq!(
std::sync::Arc::<OwnedRawBuffer>::strong_count(&old_input_buffers_ptr),
1
);
drop(old_input_buffers_ptr);
}
self.max_node_inputs = new_max_node_inputs;
}
fn push_node(&mut self, mut node: Node, node_id: &mut NodeId) -> NodeKey {
if node_id.graph_id() != self.id {
eprintln!("Warning: Pushing node to NodeId with a GraphId matching a different Graph than the current Graph.")
}
if node.num_inputs() > self.max_node_inputs {
self.increase_max_node_inputs(node.num_inputs());
}
let nodes = self.get_nodes();
if nodes.capacity() == nodes.len() {
eprintln!(
"Error: Trying to push a node into a Graph that is at capacity. Try increasing the number of node slots and make sure you free the nodes you don't need."
);
}
self.recalculation_required = true;
let input_index_to_name = node.input_indices_to_names();
let input_name_to_index = input_index_to_name
.iter()
.enumerate()
.map(|(i, &name)| (name, i))
.collect();
let output_index_to_name = node.output_indices_to_names();
let output_name_to_index = output_index_to_name
.iter()
.enumerate()
.map(|(i, &name)| (name, i))
.collect();
node.init(
self.block_size * self.oversampling.as_usize(),
self.sample_rate * (self.oversampling.as_usize() as Sample),
*node_id,
);
let key = self.get_nodes_mut().insert(node);
self.node_input_edges.insert(key, vec![]);
self.node_feedback_edges.insert(key, vec![]);
self.graph_input_edges.insert(key, vec![]);
self.node_input_index_to_name
.insert(key, input_index_to_name);
self.node_input_name_to_index
.insert(key, input_name_to_index);
self.node_output_index_to_name
.insert(key, output_index_to_name);
self.node_output_name_to_index
.insert(key, output_name_to_index);
self.node_mortality.insert(key, true);
self.node_ids.insert(key, node_id.clone());
key
}
pub fn free_disconnected_nodes(&mut self) -> Result<(), FreeError> {
let disconnected_nodes = std::mem::take(&mut self.disconnected_nodes);
if disconnected_nodes.len() > 0 {
self.recalculation_required = true;
}
for node in disconnected_nodes {
match self.free_node_from_key(node) {
Ok(_) => (),
Err(e) => match e {
FreeError::ImmortalNode => (),
_ => return Err(e),
},
}
}
Ok(())
}
fn key_from_id(node_ids: &SecondaryMap<NodeKey, NodeId>, node_id: NodeId) -> Option<NodeKey> {
node_ids
.iter()
.find(|(_key, &id)| id == node_id)
.map(|(key, _id)| key)
}
fn id_from_key(&self, node_key: NodeKey) -> Option<NodeId> {
self.node_ids.get(node_key).map(|id| *id)
}
fn free_node_mend_connections_from_key(&mut self, node_key: NodeKey) -> Result<(), FreeError> {
if !self.get_nodes_mut().contains_key(node_key) {
return Err(FreeError::NodeNotFound);
}
self.recalculation_required = true;
let num_inputs = self.node_input_index_to_name
.get(node_key)
.expect(
"Since the key exists in the Graph it should have a corresponding node_input_index_to_name Vec"
)
.len();
let num_outputs = self.node_output_index_to_name
.get(node_key)
.expect(
"Since the key exists in the Graph it should have a corresponding node_output_index_to_name Vec"
)
.len();
let inputs_to_bridge = num_inputs.min(num_outputs);
let mut outputs = vec![vec![]; inputs_to_bridge];
for (destination_node_key, edge_vec) in &self.node_input_edges {
for edge in edge_vec {
if edge.source == node_key && edge.from_output_index < inputs_to_bridge {
outputs[edge.from_output_index].push(Connection::Node {
source: self.id_from_key(node_key).unwrap(),
from_index: Some(edge.from_output_index),
from_label: None,
sink: self.id_from_key(destination_node_key).unwrap(),
to_index: Some(edge.to_input_index),
to_label: None,
channels: 1,
feedback: false,
to_index_offset: 0,
});
}
}
}
for graph_output in &self.output_edges {
if graph_output.source == node_key && graph_output.from_output_index < inputs_to_bridge
{
outputs[graph_output.from_output_index].push(Connection::GraphOutput {
source: self.id_from_key(node_key).unwrap(),
from_index: Some(graph_output.from_output_index),
from_label: None,
to_index: graph_output.to_input_index,
channels: 1,
});
}
}
let mut inputs = vec![vec![]; inputs_to_bridge];
for (inout_index, bridge_input) in inputs.iter_mut().enumerate().take(inputs_to_bridge) {
for input in self
.node_input_edges
.get(node_key)
.expect("Since the key exists in the Graph its edge Vec should also exist")
{
if input.to_input_index == inout_index {
bridge_input.push(*input);
}
}
}
let mut graph_inputs = vec![vec![]; inputs_to_bridge];
for graph_input in self
.graph_input_edges
.get(node_key)
.expect("Since the key exists in the graph its graph input Vec should also exist")
{
if graph_input.to_input_index < inputs_to_bridge {
graph_inputs[graph_input.to_input_index].push(Connection::GraphInput {
sink: self.id_from_key(node_key).unwrap(),
from_index: graph_input.from_output_index,
to_index: Some(graph_input.to_input_index),
to_label: None,
channels: 1,
to_index_offset: 0,
});
}
}
for (inputs, outputs) in inputs.into_iter().zip(outputs.iter()) {
for input in inputs {
for output in outputs {
let num_node_outputs =
self.get_nodes().get(input.source).unwrap().num_outputs();
let mut connection = output.clone();
if let Some(connection_from_index) = connection.get_from_index() {
connection =
connection.from_index(connection_from_index % num_node_outputs);
}
self.connect(connection.from(self.id_from_key(input.source).unwrap()))
.expect("Mended connections should be guaranteed to succeed");
}
}
}
for (graph_inputs, outputs) in graph_inputs.into_iter().zip(outputs.iter()) {
for input in graph_inputs {
for output in outputs {
let connection = input.clone();
match self.connect(
connection
.to(output.get_source_node().unwrap())
.to_index(output.get_to_index().unwrap()),
) {
Ok(_) => (),
Err(e) => {
return Err(FreeError::ConnectionError(Box::new(e)));
}
}
}
}
}
self.free_node_from_key(node_key)
}
pub fn free_node_mend_connections(&mut self, node: NodeId) -> Result<(), FreeError> {
let mut node_might_be_in_graph = true;
if node.graph_id != self.id {
node_might_be_in_graph = false;
}
if node_might_be_in_graph {
if let Some((node_key, _id)) = self.node_ids.iter().find(|(_key, &id)| node == id) {
return self.free_node_mend_connections_from_key(node_key);
}
}
for (_key, graph) in &mut self.graphs_per_node {
match graph.free_node_mend_connections(node) {
Ok(_) => {
return Ok(());
}
Err(e) => match e {
FreeError::GraphNotFound => (),
_ => {
return Err(e);
}
},
}
}
Err(FreeError::GraphNotFound)
}
fn clear_feedback_for_node(&mut self, node_key: NodeKey) -> Result<(), FreeError> {
let mut nodes_to_free = HashSet::new();
if let Some(&feedback_node) = self.node_feedback_node_key.get(node_key) {
nodes_to_free.insert(feedback_node);
self.node_feedback_node_key.remove(node_key);
}
for (feedback_key, feedback_edges) in &mut self.node_feedback_edges {
if !feedback_edges.is_empty() {
let mut i = 0;
while i < feedback_edges.len() {
if feedback_edges[i].source == node_key
|| feedback_edges[i].feedback_destination == node_key
{
feedback_edges.remove(i);
} else {
i += 1;
}
}
if feedback_edges.is_empty() {
nodes_to_free.insert(feedback_key);
let mut node_feedback_node_belongs_to = None;
for (source_node, &feedback_node) in &self.node_feedback_node_key {
if feedback_node == feedback_key {
node_feedback_node_belongs_to = Some(source_node);
}
}
if let Some(key) = node_feedback_node_belongs_to {
self.node_feedback_node_key.remove(key);
}
}
}
}
for na in nodes_to_free {
self.free_node_from_key(na)?;
}
Ok(())
}
fn free_node_from_key(&mut self, node_key: NodeKey) -> Result<(), FreeError> {
if !self.get_nodes_mut().contains_key(node_key) {
return Err(FreeError::NodeNotFound);
}
if !self.node_mortality[node_key] {
return Err(FreeError::ImmortalNode);
}
self.recalculation_required = true;
self.node_input_edges.remove(node_key);
self.graph_input_edges.remove(node_key);
self.node_feedback_edges.remove(node_key);
for (_k, input_edges) in &mut self.node_input_edges {
let mut i = 0;
while i < input_edges.len() {
if input_edges[i].source == node_key {
input_edges.remove(i);
} else {
i += 1;
}
}
}
{
let mut i = 0;
while i < self.output_edges.len() {
if self.output_edges[i].source == node_key {
self.output_edges.remove(i);
} else {
i += 1;
}
}
}
self.clear_feedback_for_node(node_key)?;
if let Some(ggc) = &mut self.graph_gen_communicator {
self.node_keys_to_free_when_safe
.push((node_key, ggc.next_change_flag.clone()));
self.node_keys_pending_removal.insert(node_key);
} else {
self.graphs_per_node.remove(node_key);
self.get_nodes_mut().remove(node_key);
}
Ok(())
}
pub fn free_node(&mut self, node: NodeId) -> Result<(), FreeError> {
let mut node_might_be_in_graph = true;
if node.graph_id() != self.id {
node_might_be_in_graph = false;
}
if node_might_be_in_graph {
if let Some((node_key, _id)) = self.node_ids.iter().find(|(_key, &id)| node == id) {
self.free_node_from_key(node_key)?;
} else {
node_might_be_in_graph = false;
}
}
if !node_might_be_in_graph {
for (_key, graph) in &mut self.graphs_per_node {
match graph.free_node(node) {
Ok(_) => {
return Ok(());
}
Err(e) => match e {
FreeError::GraphNotFound => (),
_ => {
return Err(e);
}
},
}
}
return Err(FreeError::GraphNotFound);
}
Ok(())
}
fn start_scheduler(
&mut self,
latency: Duration,
start_ts: Instant,
clock_update: &Option<ClockUpdate>,
musical_time_map: &Arc<RwLock<MusicalTimeMap>>,
) {
if let Some(ggc) = &mut self.graph_gen_communicator {
if let Some(clock_update) = &clock_update {
ggc.send_clock_update(clock_update.clone()); }
ggc.scheduler.start(
self.sample_rate * (self.oversampling.as_usize() as Sample),
self.block_size * self.oversampling.as_usize(),
latency,
start_ts,
musical_time_map.clone(),
);
}
for (_key, graph) in &mut self.graphs_per_node {
graph.start_scheduler(latency, start_ts, clock_update, musical_time_map);
}
}
pub fn get_current_time_musical(&self) -> Option<Beats> {
if let Some(ggc) = &self.graph_gen_communicator {
let ts_samples = ggc.timestamp.load(Ordering::Relaxed);
let seconds = (ts_samples as f64) / (self.sample_rate as f64);
ggc.scheduler
.seconds_to_musical_time_beats(Seconds::from_seconds_f64(seconds))
} else {
None
}
}
pub fn generate_inspection(&self) -> GraphInspection {
let real_nodes = self.get_nodes();
let mut node_key_processed = Vec::with_capacity(real_nodes.len());
let mut nodes = Vec::with_capacity(real_nodes.len());
for (node_key, node) in real_nodes {
let graph_inspection = self
.graphs_per_node
.get(node_key)
.map(|graph| graph.generate_inspection());
nodes.push(NodeInspection {
name: node.name.to_string(),
address: self.node_ids
.get(node_key)
.expect("All nodes should have their ids stored.")
.clone(),
input_channels: self.node_input_index_to_name
.get(node_key)
.expect(
"All nodes should have a list of input channel names made when pushed to the graph."
)
.iter()
.map(|&s| s.to_string())
.collect(),
output_channels: self.node_output_index_to_name
.get(node_key)
.expect(
"All nodes should have a list of output channel names made when pushed to the graph."
)
.iter()
.map(|&s| s.to_string())
.collect(),
input_edges: vec![],
graph_inspection,
});
node_key_processed.push(node_key);
}
for (node_key, _node) in real_nodes {
let mut input_edges = Vec::new();
if let Some(edges) = self.node_input_edges.get(node_key) {
for edge in edges {
let index = node_key_processed
.iter()
.position(|&k| k == edge.source)
.unwrap();
input_edges.push(EdgeInspection {
source: EdgeSource::Node(index),
from_index: edge.from_output_index,
to_index: edge.to_input_index,
});
}
}
if let Some(edges) = self.graph_input_edges.get(node_key) {
for edge in edges {
input_edges.push(EdgeInspection {
source: EdgeSource::Graph,
from_index: edge.from_output_index,
to_index: edge.to_input_index,
});
}
}
if let Some(index) = node_key_processed.iter().position(|&k| k == node_key) {
nodes[index].input_edges = input_edges;
}
}
let mut graph_output_input_edges = Vec::new();
for edge in &self.output_edges {
if let Some(index) = node_key_processed.iter().position(|&k| k == edge.source) {
graph_output_input_edges.push(EdgeInspection {
source: EdgeSource::Node(index),
from_index: edge.from_output_index,
to_index: edge.to_input_index,
});
}
}
let unconnected_nodes = self
.disconnected_nodes
.iter()
.filter_map(|&disconnected_key| {
node_key_processed
.iter()
.position(|&key| key == disconnected_key)
})
.collect();
let nodes_pending_removal = self
.node_keys_to_free_when_safe
.iter()
.filter_map(|&(freed_key, _)| {
node_key_processed.iter().position(|&key| key == freed_key)
})
.collect();
GraphInspection {
nodes,
unconnected_nodes,
nodes_pending_removal,
graph_output_input_edges,
num_inputs: self.num_inputs,
num_outputs: self.num_outputs,
graph_id: self.id,
}
}
pub fn schedule_changes(
&mut self,
node_changes: Vec<NodeChanges>,
time: Time,
) -> Result<(), ScheduleError> {
if node_changes.is_empty() {
return Ok(());
}
let first_graph = node_changes[0].node.graph_id();
for nc in &node_changes {
if nc.node.graph_id() != first_graph {
return Err(ScheduleError::DifferentGraphs);
}
}
let mut scheduler_changes = vec![];
for node_changes in &node_changes {
let node = node_changes.node;
let change_pairs = &node_changes.parameters;
let time_offset = node_changes.offset;
let mut node_might_be_in_this_graph = true;
if node.graph_id() != self.id {
node_might_be_in_this_graph = false;
}
if node_might_be_in_this_graph {
if let Some((key, _id)) = self.node_ids.iter().find(|(_key, &id)| id == node) {
if !self.get_nodes_mut().contains_key(key) {
eprintln!("Scheduled change for a node that no longer exists");
}
for (channel, change) in change_pairs {
let index = match channel {
NodeChannel::Label(label) => {
if let Some(label_index) =
self.node_input_name_to_index[key].get(label)
{
*label_index
} else {
return Err(ScheduleError::InputLabelNotFound(label));
}
}
NodeChannel::Index(index) => *index,
};
if index >= self.node_input_index_to_name[key].len() {
return Err(ScheduleError::InputOutOfRange {
node_name: self.get_nodes()[key].name.to_string(),
channel: channel.clone(),
change: change.clone(),
});
}
let change_kind = match change {
Change::Constant(value) => ScheduledChangeKind::Constant {
index,
value: *value,
},
Change::Trigger => ScheduledChangeKind::Trigger { index },
};
scheduler_changes.push((key, change_kind, time_offset));
}
} else {
node_might_be_in_this_graph = false;
}
}
if !node_might_be_in_this_graph {
let mut found_graph = false;
for (_key, graph) in &mut self.graphs_per_node {
match graph.schedule_changes(vec![node_changes.clone()], time) {
Ok(_) => {
found_graph = true;
break;
}
Err(e) => match e {
ScheduleError::GraphNotFound { .. } => (),
_ => {
return Err(e);
}
},
}
}
if !found_graph {
return Err(ScheduleError::GraphNotFound(node_changes.node));
}
}
}
if let Some(ggc) = &mut self.graph_gen_communicator {
ggc.scheduler.schedule(scheduler_changes, time);
} else {
self.scheduled_changes_queue.push((scheduler_changes, time));
}
Ok(())
}
pub fn schedule_change(&mut self, change: ParameterChange) -> Result<(), ScheduleError> {
let mut node_might_be_in_this_graph = true;
if change.input.node.graph_id() != self.id {
node_might_be_in_this_graph = false;
}
if node_might_be_in_this_graph {
if let Some(key) = Self::key_from_id(&self.node_ids, change.input.node) {
if !self.get_nodes_mut().contains_key(key) {
return Err(ScheduleError::NodeNotFound);
}
let index = match change.input.channel {
NodeChannel::Label(label) => {
if let Some(label_index) = self.node_input_name_to_index[key].get(label) {
*label_index
} else {
return Err(ScheduleError::InputLabelNotFound(label));
}
}
NodeChannel::Index(i) => i,
};
if index >= self.node_input_index_to_name[key].len() {
return Err(ScheduleError::InputOutOfRange {
node_name: self.get_nodes()[key].name.to_string(),
channel: change.input.channel,
change: change.value,
});
}
let change_kind = match change.value {
Change::Constant(c) => ScheduledChangeKind::Constant { index, value: c },
Change::Trigger => ScheduledChangeKind::Trigger { index },
};
if let Some(ggc) = &mut self.graph_gen_communicator {
ggc.scheduler
.schedule(vec![(key, change_kind, None)], change.time);
} else {
self.scheduled_changes_queue
.push((vec![(key, change_kind, None)], change.time));
}
}
}
if !node_might_be_in_this_graph {
for (_key, graph) in &mut self.graphs_per_node {
match graph.schedule_change(change.clone()) {
Ok(_) => {
return Ok(());
}
Err(e) => match e {
ScheduleError::GraphNotFound(_) => (),
_ => {
return Err(e);
}
},
}
}
return Err(ScheduleError::GraphNotFound(change.input.node));
}
Ok(())
}
pub fn disconnect(&mut self, connection: Connection) -> Result<(), ConnectionError> {
let mut try_disconnect_in_child_graphs = |connection: Connection| {
for (_key, graph) in &mut self.graphs_per_node {
match graph.disconnect(connection.clone()) {
Ok(_) => {
return Ok(());
}
Err(e) => match e {
ConnectionError::NodeNotFound(c) => {
return Err(ConnectionError::NodeNotFound(c));
}
ConnectionError::GraphNotFound(_connection) => (),
_ => {
return Err(e);
}
},
}
}
Err(ConnectionError::GraphNotFound(connection))
};
match connection {
Connection::Node {
ref source,
from_index,
from_label,
ref sink,
to_index: input_index,
to_label: input_label,
channels,
feedback,
to_index_offset,
} => {
if source.graph_id() != sink.graph_id() {
return Err(ConnectionError::DifferentGraphs(connection.clone()));
}
if source.graph_id() != self.id {
return try_disconnect_in_child_graphs(connection.clone());
}
let Some(source_key) = Self::key_from_id(&self.node_ids, *source) else {
return try_disconnect_in_child_graphs(connection.clone());
};
let Some(sink_key) = Self::key_from_id(&self.node_ids, *sink) else {
return try_disconnect_in_child_graphs(connection.clone());
};
if source_key == sink_key {
return Err(ConnectionError::SameNode);
}
if !self.get_nodes().contains_key(source_key) {
return Err(ConnectionError::NodeNotFound(connection.clone()));
}
if !self.get_nodes().contains_key(sink_key) {
return Err(ConnectionError::NodeNotFound(connection.clone()));
}
if self.node_keys_pending_removal.contains(&source_key) {
return Err(ConnectionError::NodeNotFound(connection.clone()));
}
if self.node_keys_pending_removal.contains(&sink_key) {
return Err(ConnectionError::NodeNotFound(connection.clone()));
}
let to_index = if input_index.is_some() {
if let Some(i) = input_index {
i
} else {
0
}
} else if input_label.is_some() {
if let Some(label) = input_label {
if let Some(index) = self.input_index_from_label(sink_key, label) {
index
} else {
return Err(ConnectionError::InvalidInputLabel(label));
}
} else {
0
}
} else {
0
} + to_index_offset;
let from_index = if from_index.is_some() {
if let Some(i) = from_index {
i
} else {
0
}
} else if from_label.is_some() {
if let Some(label) = from_label {
if let Some(index) = self.output_index_from_label(sink_key, label) {
index
} else {
return Err(ConnectionError::InvalidOutputLabel(label));
}
} else {
0
}
} else {
0
};
if channels + from_index
> self
.node_output_index_to_name
.get(source_key)
.unwrap()
.len()
{
return Err(ConnectionError::SourceChannelOutOfBounds);
}
if channels + to_index > self.node_input_index_to_name.get(sink_key).unwrap().len()
{
return Err(ConnectionError::DestinationChannelOutOfBounds);
}
if !feedback {
let edge_list = &mut self.node_input_edges[sink_key];
let mut i = 0;
while i < edge_list.len() {
if edge_list[i].source == source_key
&& edge_list[i].from_output_index >= from_index
&& edge_list[i].from_output_index < from_index + channels
&& edge_list[i].to_input_index >= to_index
&& edge_list[i].to_input_index < to_index + channels
{
edge_list.remove(i);
} else {
i += 1;
}
}
} else if let Some(&feedback_node) = self.node_feedback_node_key.get(source_key) {
let feedback_edge_list = &mut self.node_feedback_edges[feedback_node];
let mut i = 0;
while i < feedback_edge_list.len() {
if feedback_edge_list[i].source == source_key
&& feedback_edge_list[i].feedback_destination == sink_key
&& feedback_edge_list[i].from_output_index >= from_index
&& feedback_edge_list[i].from_output_index < from_index + channels
&& feedback_edge_list[i].to_input_index >= from_index
&& feedback_edge_list[i].to_input_index < from_index + channels
{
feedback_edge_list.remove(i);
} else {
i += 1;
}
}
let edge_list = &mut self.node_input_edges[sink_key];
let mut i = 0;
while i < edge_list.len() {
if edge_list[i].source == feedback_node
&& edge_list[i].from_output_index >= from_index
&& edge_list[i].from_output_index < from_index + channels
&& edge_list[i].to_input_index >= to_index
&& edge_list[i].to_input_index < to_index + channels
{
edge_list.remove(i);
} else {
i += 1;
}
}
if feedback_edge_list.is_empty() {
self.free_node_from_key(feedback_node)?;
}
}
}
Connection::Constant {
value: _,
ref sink,
to_index: input_index,
to_label: input_label,
} => {
if let Some(sink) = sink {
if sink.graph_id() != self.id {
return try_disconnect_in_child_graphs(connection.clone());
}
let Some(sink_key) = Self::key_from_id(&self.node_ids, *sink) else {
return try_disconnect_in_child_graphs(connection.clone());
};
if !self.get_nodes().contains_key(sink_key) {
return Err(ConnectionError::NodeNotFound(connection.clone()));
}
if self.node_keys_pending_removal.contains(&sink_key) {
return Err(ConnectionError::NodeNotFound(connection.clone()));
}
let input = if input_index.is_some() {
if let Some(i) = input_index {
i
} else {
0
}
} else if input_label.is_some() {
if let Some(label) = input_label {
if let Some(index) = self.input_index_from_label(sink_key, label) {
index
} else {
return Err(ConnectionError::InvalidInputLabel(label));
}
} else {
0
}
} else {
0
};
if let Some(ggc) = &mut self.graph_gen_communicator {
ggc.scheduler.schedule(
vec![(
sink_key,
ScheduledChangeKind::Constant {
index: input,
value: 0.0,
},
None,
)],
Time::Immediately,
);
} else {
self.get_nodes_mut()[sink_key].set_constant(0.0, input);
}
} else {
return Err(ConnectionError::SinkNotSet);
}
}
Connection::GraphOutput {
ref source,
from_index,
from_label,
to_index,
channels,
} => {
if source.graph_id != self.id {
return try_disconnect_in_child_graphs(connection.clone());
}
let Some((source_key, _)) = self.node_ids.iter().find(|(_key, &id)| id == *source)
else {
return try_disconnect_in_child_graphs(connection.clone());
};
if !self.get_nodes().contains_key(source_key) {
return Err(ConnectionError::NodeNotFound(connection.clone()));
}
if self.node_keys_pending_removal.contains(&source_key) {
return Err(ConnectionError::NodeNotFound(connection.clone()));
}
if channels + to_index > self.num_outputs {
return Err(ConnectionError::DestinationChannelOutOfBounds);
}
let from_index = if from_index.is_some() {
if let Some(i) = from_index {
i
} else {
0
}
} else if from_label.is_some() {
if let Some(label) = from_label {
if let Some(index) = self.output_index_from_label(source_key, label) {
index
} else {
return Err(ConnectionError::InvalidOutputLabel(label));
}
} else {
0
}
} else {
0
};
let edge_list = &mut self.output_edges;
let mut i = 0;
while i < edge_list.len() {
if edge_list[i].source == source_key
&& edge_list[i].from_output_index >= from_index
&& edge_list[i].from_output_index < from_index + channels
&& edge_list[i].to_input_index >= to_index
&& edge_list[i].to_input_index < to_index + channels
{
edge_list.remove(i);
} else {
i += 1;
}
}
}
Connection::GraphInput {
ref sink,
from_index,
to_index,
to_label,
channels,
to_index_offset,
} => {
if sink.graph_id != self.id {
return try_disconnect_in_child_graphs(connection.clone());
}
let Some((sink_key, _)) = self.node_ids.iter().find(|(_key, &id)| id == *sink)
else {
return try_disconnect_in_child_graphs(connection.clone());
};
if !self.get_nodes().contains_key(sink_key) {
return Err(ConnectionError::NodeNotFound(connection.clone()));
}
if self.node_keys_pending_removal.contains(&sink_key) {
return Err(ConnectionError::NodeNotFound(connection.clone()));
}
let to_index = if to_index.is_some() {
if let Some(i) = to_index {
i
} else {
0
}
} else if to_label.is_some() {
if let Some(label) = to_label {
if let Some(index) = self.input_index_from_label(sink_key, label) {
index
} else {
return Err(ConnectionError::InvalidInputLabel(label));
}
} else {
0
}
} else {
0
} + to_index_offset;
if channels + to_index > self.num_outputs {
return Err(ConnectionError::DestinationChannelOutOfBounds);
}
let edge_list = &mut self.graph_input_edges[sink_key];
let mut i = 0;
while i < edge_list.len() {
if edge_list[i].source == sink_key
&& edge_list[i].from_output_index >= from_index
&& edge_list[i].from_output_index < from_index + channels
&& edge_list[i].to_input_index >= to_index
&& edge_list[i].to_input_index < to_index + channels
{
edge_list.remove(i);
} else {
i += 1;
}
}
}
Connection::Clear { .. } => {
return self.connect(connection);
}
Connection::GraphInputToOutput {
from_input_channel,
to_output_channel,
channels,
graph_id,
} => {
if graph_id != self.id {
return try_disconnect_in_child_graphs(connection.clone());
}
for i in (0..self.graph_input_to_output_edges.len()).rev() {
let edge = self.graph_input_to_output_edges[i];
if edge.from_output_index >= from_input_channel
&& edge.from_output_index < (from_input_channel + channels)
&& edge.to_input_index >= to_output_channel
&& edge.to_input_index < (to_output_channel + channels)
{
self.graph_input_to_output_edges.remove(i);
self.recalculation_required = true;
}
}
}
Connection::ClearGraphInputToOutput {
graph_id,
from_input_channel,
to_output_channel,
channels,
} => {
if graph_id != self.id {
return try_disconnect_in_child_graphs(connection.clone());
}
let from_input_channel = from_input_channel.unwrap_or(0);
let to_output_channel = to_output_channel.unwrap_or(0);
let channels = channels.unwrap_or(self.num_inputs.max(self.num_outputs));
for i in (0..self.graph_input_to_output_edges.len()).rev() {
let edge = self.graph_input_to_output_edges[i];
if edge.from_output_index >= from_input_channel
&& edge.from_output_index < (from_input_channel + channels)
&& edge.to_input_index >= to_output_channel
&& edge.to_input_index < (to_output_channel + channels)
{
self.graph_input_to_output_edges.remove(i);
self.recalculation_required = true;
}
}
}
}
self.recalculation_required = true;
Ok(())
}
pub fn connect_bundle(
&mut self,
bundle: impl Into<ConnectionBundle>,
) -> Result<(), ConnectionError> {
let bundle = bundle.into();
for con in bundle.as_connections() {
match self.connect(con) {
Ok(_) => (),
Err(e) => {
return Err(e);
}
}
}
Ok(())
}
pub fn connect(&mut self, connection: Connection) -> Result<(), ConnectionError> {
let mut try_connect_to_graphs = |connection: Connection| {
for (_key, graph) in &mut self.graphs_per_node {
match graph.connect(connection.clone()) {
Ok(_) => {
return Ok(());
}
Err(e) => match e {
ConnectionError::NodeNotFound(c) => {
return Err(ConnectionError::NodeNotFound(c));
}
ConnectionError::GraphNotFound(_connection) => (),
_ => {
return Err(e);
}
},
}
}
Err(ConnectionError::GraphNotFound(connection))
};
match connection {
Connection::Node {
ref source,
from_index,
from_label,
ref sink,
to_index: input_index,
to_label: input_label,
channels,
feedback,
to_index_offset,
} => {
if source.graph_id() != sink.graph_id() {
return Err(ConnectionError::DifferentGraphs(connection.clone()));
}
if source.graph_id != self.id {
return try_connect_to_graphs(connection.clone());
}
let Some((source_key, _)) = self.node_ids.iter().find(|(_key, &id)| id == *source)
else {
return try_connect_to_graphs(connection.clone());
};
let Some((sink_key, _)) = self.node_ids.iter().find(|(_key, &id)| id == *sink)
else {
return try_connect_to_graphs(connection.clone());
};
if source_key == sink_key {
return Err(ConnectionError::SameNode);
}
if !self.get_nodes().contains_key(source_key) {
return Err(ConnectionError::NodeNotFound(connection.clone()));
}
if !self.get_nodes().contains_key(sink_key) {
return Err(ConnectionError::NodeNotFound(connection.clone()));
}
if self.node_keys_pending_removal.contains(&source_key) {
return Err(ConnectionError::NodeNotFound(connection.clone()));
}
if self.node_keys_pending_removal.contains(&sink_key) {
return Err(ConnectionError::NodeNotFound(connection.clone()));
}
let to_index = if input_index.is_some() {
if let Some(i) = input_index {
i
} else {
0
}
} else if input_label.is_some() {
if let Some(label) = input_label {
if let Some(index) = self.input_index_from_label(sink_key, label) {
index
} else {
return Err(ConnectionError::InvalidInputLabel(label));
}
} else {
0
}
} else {
0
} + to_index_offset;
let from_index = if from_index.is_some() {
if let Some(i) = from_index {
i
} else {
0
}
} else if from_label.is_some() {
if let Some(label) = from_label {
if let Some(index) = self.output_index_from_label(source_key, label) {
index
} else {
return Err(ConnectionError::InvalidOutputLabel(label));
}
} else {
0
}
} else {
0
};
let num_source_outputs = self
.node_output_index_to_name
.get(source_key)
.unwrap()
.len();
let num_sink_inputs = self.node_input_index_to_name.get(sink_key).unwrap().len();
if !feedback {
let edge_list = &mut self.node_input_edges[sink_key];
for i in 0..channels {
edge_list.push(Edge {
from_output_index: (from_index + i) % num_source_outputs,
source: source_key,
to_input_index: (to_index + i) % num_sink_inputs,
});
}
} else {
let feedback_node_key =
if let Some(&index) = self.node_feedback_node_key.get(source_key) {
index
} else {
let feedback_node = FeedbackGen::node(num_source_outputs);
let mut feedback_node_address = NodeId::new(self.id);
let key = self.push_node(feedback_node, &mut feedback_node_address);
self.feedback_node_indices.push(key);
self.node_feedback_node_key.insert(source_key, key);
key
};
let edge_list = &mut self.node_input_edges[sink_key];
for i in 0..channels {
edge_list.push(Edge {
from_output_index: (from_index + i) % num_source_outputs,
source: feedback_node_key,
to_input_index: (to_index + i) % num_sink_inputs,
});
}
let edge_list = &mut self.node_feedback_edges[feedback_node_key];
for i in 0..channels {
edge_list.push(FeedbackEdge {
from_output_index: (from_index + i) % num_source_outputs,
source: source_key,
to_input_index: (from_index + i) % num_source_outputs,
feedback_destination: sink_key,
});
}
}
self.recalculation_required = true;
}
Connection::Constant {
value,
ref sink,
to_index: input_index,
to_label: input_label,
} => {
if let Some(sink) = sink {
if sink.graph_id != self.id {
return try_connect_to_graphs(connection.clone());
}
let Some((sink_key, _)) = self.node_ids.iter().find(|(_key, &id)| id == *sink)
else {
return try_connect_to_graphs(connection.clone());
};
if !self.get_nodes().contains_key(sink_key) {
return Err(ConnectionError::NodeNotFound(connection.clone()));
}
if self.node_keys_pending_removal.contains(&sink_key) {
return Err(ConnectionError::NodeNotFound(connection.clone()));
}
let input = if input_index.is_some() {
if let Some(i) = input_index {
i
} else {
0
}
} else if input_label.is_some() {
if let Some(label) = input_label {
if let Some(index) = self.input_index_from_label(sink_key, label) {
index
} else {
return Err(ConnectionError::InvalidInputLabel(label));
}
} else {
0
}
} else {
0
};
if let Some(ggc) = &mut self.graph_gen_communicator {
ggc.scheduler.schedule(
vec![(
sink_key,
ScheduledChangeKind::Constant {
index: input,
value,
},
None,
)],
Time::Immediately,
);
} else {
self.get_nodes_mut()[sink_key].set_constant(value, input);
}
} else {
return Err(ConnectionError::SinkNotSet);
}
}
Connection::GraphOutput {
ref source,
from_index,
from_label,
to_index,
channels,
} => {
if source.graph_id != self.id {
return try_connect_to_graphs(connection.clone());
}
let Some((source_key, _)) = self.node_ids.iter().find(|(_key, &id)| id == *source)
else {
return try_connect_to_graphs(connection.clone());
};
if !self.get_nodes().contains_key(source_key) {
return Err(ConnectionError::NodeNotFound(connection.clone()));
}
if self.node_keys_pending_removal.contains(&source_key) {
return Err(ConnectionError::NodeNotFound(connection.clone()));
}
let num_source_outputs = self
.node_output_index_to_name
.get(source_key)
.unwrap()
.len();
let from_index = if from_index.is_some() {
if let Some(i) = from_index {
i
} else {
0
}
} else if from_label.is_some() {
if let Some(label) = from_label {
if let Some(index) = self.output_index_from_label(source_key, label) {
index
} else {
return Err(ConnectionError::InvalidOutputLabel(label));
}
} else {
0
}
} else {
0
};
for i in 0..channels {
self.output_edges.push(Edge {
source: source_key,
from_output_index: (from_index + i) % num_source_outputs,
to_input_index: (to_index + i) % self.num_outputs,
});
}
self.recalculation_required = true;
}
Connection::GraphInput {
ref sink,
from_index,
to_index,
to_label,
channels,
to_index_offset,
} => {
if sink.graph_id != self.id {
return try_connect_to_graphs(connection.clone());
}
let Some((sink_key, _)) = self.node_ids.iter().find(|(_key, &id)| id == *sink)
else {
return try_connect_to_graphs(connection.clone());
};
if !self.get_nodes().contains_key(sink_key) {
return Err(ConnectionError::NodeNotFound(connection.clone()));
}
if self.node_keys_pending_removal.contains(&sink_key) {
return Err(ConnectionError::NodeNotFound(connection.clone()));
}
let to_index = if to_index.is_some() {
if let Some(i) = to_index {
i
} else {
0
}
} else if to_label.is_some() {
if let Some(label) = to_label {
if let Some(index) = self.input_index_from_label(sink_key, label) {
index
} else {
return Err(ConnectionError::InvalidInputLabel(label));
}
} else {
0
}
} else {
0
} + to_index_offset;
if channels + from_index > self.num_inputs {
return Err(ConnectionError::SourceChannelOutOfBounds);
}
if channels + to_index > self.node_input_index_to_name[sink_key].len() {
return Err(ConnectionError::DestinationChannelOutOfBounds);
}
for i in 0..channels {
self.graph_input_edges[sink_key].push(Edge {
source: sink_key,
from_output_index: from_index + i,
to_input_index: to_index + i,
});
}
self.recalculation_required = true;
}
Connection::Clear {
ref node,
input_nodes,
input_constants,
output_nodes,
graph_outputs,
graph_inputs,
channel,
} => {
if node.graph_id != self.id {
return try_connect_to_graphs(connection.clone());
}
let Some((node_key, _)) = self.node_ids.iter().find(|(_key, &id)| id == *node)
else {
return try_connect_to_graphs(connection.clone());
};
if !self.get_nodes().contains_key(node_key) {
return Err(ConnectionError::NodeNotFound(connection.clone()));
}
if self.node_keys_pending_removal.contains(&node_key) {
return Err(ConnectionError::NodeNotFound(connection.clone()));
}
self.recalculation_required = true;
let channel_index = match channel {
Some(c) => {
let index = match c {
connection::NodeChannel::Label(label) => {
if let Some(index) = self.input_index_from_label(node_key, label) {
index
} else {
return Err(ConnectionError::InvalidInputLabel(label));
}
}
connection::NodeChannel::Index(i) => i,
};
Some(index)
}
None => None,
};
if input_nodes {
let mut nodes_to_free = HashSet::new();
if let Some(index) = channel_index {
for input_edge in &self.node_input_edges[node_key] {
if input_edge.to_input_index == index {
if self.feedback_node_indices.contains(&input_edge.source) {
let feedback_edges =
&mut self.node_feedback_edges[input_edge.source];
let mut i = 0;
while i < feedback_edges.len() {
if feedback_edges[i].feedback_destination == node_key
&& feedback_edges[i].from_output_index
== input_edge.from_output_index
{
feedback_edges.remove(i);
} else {
i += 1;
}
}
if feedback_edges.is_empty() {
nodes_to_free.insert(input_edge.source);
}
}
}
}
let edges = &mut self.node_input_edges[node_key];
let mut i = 0;
while i > edges.len() {
if edges[i].to_input_index == index {
edges.remove(i);
} else {
i += 1;
}
}
} else {
for input_edge in &self.node_input_edges[node_key] {
if self.feedback_node_indices.contains(&input_edge.source) {
let feedback_edges =
&mut self.node_feedback_edges[input_edge.source];
let mut i = 0;
while i < feedback_edges.len() {
if feedback_edges[i].feedback_destination == node_key
&& feedback_edges[i].from_output_index
== input_edge.from_output_index
{
feedback_edges.remove(i);
} else {
i += 1;
}
}
if feedback_edges.is_empty() {
nodes_to_free.insert(input_edge.source);
}
}
}
self.node_input_edges[node_key].clear();
}
for na in nodes_to_free {
self.free_node_from_key(na)?;
}
}
if graph_inputs {
self.graph_input_edges[node_key].clear();
}
if input_constants {
let num_node_inputs = self.get_nodes_mut()[node_key].num_inputs();
if let Some(ggc) = &mut self.graph_gen_communicator {
if let Some(index) = channel_index {
let change_kind = ScheduledChangeKind::Constant { index, value: 0.0 };
ggc.scheduler.schedule_now(node_key, change_kind);
} else {
for index in 0..num_node_inputs {
let change_kind =
ScheduledChangeKind::Constant { index, value: 0.0 };
ggc.scheduler.schedule_now(node_key, change_kind);
}
}
} else {
if let Some(index) = channel_index {
self.get_nodes_mut()[node_key].set_constant(0.0, index);
} else {
for index in 0..num_node_inputs {
self.get_nodes_mut()[node_key].set_constant(0.0, index);
}
}
}
}
if output_nodes {
for (_key, edges) in &mut self.node_input_edges {
let mut i = 0;
while i < edges.len() {
if edges[i].source == node_key {
if let Some(index) = channel_index {
if edges[i].from_output_index == index {
edges.remove(i);
} else {
i += 1;
}
} else {
edges.remove(i);
}
} else {
i += 1;
}
}
}
}
if graph_outputs {
let mut i = 0;
while i < self.output_edges.len() {
if self.output_edges[i].source == node_key {
if let Some(index) = channel_index {
if self.output_edges[i].from_output_index == index {
self.output_edges.remove(i);
} else {
i += 1;
}
} else {
self.output_edges.remove(i);
}
} else {
i += 1;
}
}
}
}
Connection::GraphInputToOutput {
graph_id,
from_input_channel,
to_output_channel,
channels,
} => {
if graph_id != self.id {
return try_connect_to_graphs(connection.clone());
}
for i in 0..channels {
self.graph_input_to_output_edges.push(InterGraphEdge {
from_output_index: from_input_channel + i,
to_input_index: to_output_channel + i,
})
}
self.recalculation_required = true;
}
Connection::ClearGraphInputToOutput { .. } => self.disconnect(connection.clone())?,
}
Ok(())
}
fn input_index_from_label(&self, node: NodeKey, label: &'static str) -> Option<usize> {
if let Some(&index) = self
.node_input_name_to_index
.get(node)
.unwrap() .get(label)
{
Some(index)
} else {
None
}
}
fn output_index_from_label(&self, node: NodeKey, label: &'static str) -> Option<usize> {
if let Some(&index) = self
.node_output_name_to_index
.get(node)
.unwrap() .get(label)
{
Some(index)
} else {
None
}
}
pub fn change_musical_time_map(
&mut self,
change_fn: impl FnOnce(&mut MusicalTimeMap),
) -> Result<(), ScheduleError> {
if let Some(ggc) = &mut self.graph_gen_communicator {
ggc.scheduler.change_musical_time_map(change_fn)
} else {
Err(ScheduleError::SchedulerNotCreated)
}
}
fn depth_first_search(
&self,
visited: &mut HashSet<NodeKey>,
nodes_to_process: &mut Vec<NodeKey>,
) -> Vec<NodeKey> {
let mut node_order = Vec::with_capacity(self.get_nodes().capacity());
while !nodes_to_process.is_empty() {
let node_index = *nodes_to_process.last().unwrap();
let input_edges = &self.node_input_edges[node_index];
let mut found_unvisited = false;
for edge in input_edges {
if !visited.contains(&edge.source) {
nodes_to_process.push(edge.source);
visited.insert(edge.source);
found_unvisited = true;
break;
}
}
if !found_unvisited {
node_order.push(nodes_to_process.pop().unwrap());
}
}
node_order
}
fn get_deepest_output_node(&self, start_node: NodeKey, visited: &HashSet<NodeKey>) -> NodeKey {
let mut last_connected_node_index = start_node;
let mut last_connected_output_node_index = start_node;
loop {
let mut found_later_node = false;
for (key, input_edges) in &self.node_input_edges {
for input_edge in input_edges {
if input_edge.source == last_connected_node_index
&& !visited.contains(&input_edge.source)
{
last_connected_node_index = key;
found_later_node = true;
for edge in &self.output_edges {
if last_connected_node_index == edge.source {
last_connected_output_node_index = last_connected_node_index;
}
}
break;
}
}
if found_later_node {
break;
}
}
if !found_later_node {
break;
}
}
last_connected_output_node_index
}
pub fn calculate_node_order(&mut self) {
self.node_order.clear();
self.node_order.extend(self.feedback_node_indices.iter());
let mut visited = HashSet::new();
for &feedback_node_index in &self.feedback_node_indices {
visited.insert(feedback_node_index);
}
let mut nodes_to_process = Vec::with_capacity(self.get_nodes_mut().capacity());
for edge in &self.output_edges {
let deepest_node = self.get_deepest_output_node(edge.source, &visited);
if !visited.contains(&deepest_node) {
nodes_to_process.push(deepest_node);
visited.insert(deepest_node);
}
}
let stack = self.depth_first_search(&mut visited, &mut nodes_to_process);
self.node_order.extend(stack.into_iter());
let mut feedback_node_order_addition = vec![];
for (_key, feedback_edges) in self.node_feedback_edges.iter() {
for feedback_edge in feedback_edges {
if !visited.contains(&feedback_edge.source) {
let mut last_connected_node_index = feedback_edge.source;
let mut last_connected_not_visited_ni = feedback_edge.source;
loop {
let mut found_later_node = false;
for (key, input_edges) in self.node_input_edges.iter() {
for input_edge in input_edges {
if input_edge.source == last_connected_node_index {
last_connected_node_index = key;
if !visited.contains(&key) {
last_connected_not_visited_ni = key;
}
found_later_node = true;
break;
}
}
if found_later_node {
break;
}
}
if !found_later_node {
break;
}
}
nodes_to_process.clear();
visited.insert(last_connected_not_visited_ni);
nodes_to_process.push(last_connected_not_visited_ni);
let stack = self.depth_first_search(&mut visited, &mut nodes_to_process);
feedback_node_order_addition.extend(stack);
}
}
}
self.node_order
.extend(feedback_node_order_addition.into_iter());
let mut remaining_nodes = vec![];
for (node_key, _node) in self.get_nodes() {
if !visited.contains(&node_key) && !self.node_keys_pending_removal.contains(&node_key) {
remaining_nodes.push(node_key);
}
}
self.node_order.extend(remaining_nodes.iter());
self.disconnected_nodes = remaining_nodes;
}
pub fn block_size(&self) -> usize {
self.block_size
}
pub fn num_nodes(&self) -> usize {
self.get_nodes().len()
}
fn generate_tasks(&mut self) -> Vec<Task> {
let mut tasks = vec![];
let nodes = unsafe { &mut *self.nodes.get() };
let first_sample = self.inputs_buffers_ptr.ptr.cast::<Sample>();
for &node_key in &self.node_order {
let num_inputs = nodes[node_key].num_inputs();
let mut input_buffers = NodeBufferRef::new(
first_sample,
num_inputs,
self.block_size * self.oversampling.as_usize(),
);
let input_edges = &self.node_input_edges[node_key];
let graph_input_edges = &self.graph_input_edges[node_key];
let feedback_input_edges = &self.node_feedback_edges[node_key];
let mut inputs_to_copy = vec![];
let mut graph_inputs_to_copy = vec![];
let mut inputs_per_channel = vec![0; num_inputs];
for input_edge in input_edges {
inputs_per_channel[input_edge.to_input_index] += 1;
}
for input_edge in feedback_input_edges {
inputs_per_channel[input_edge.to_input_index] += 1;
}
let copy_or_add: Vec<_> = inputs_per_channel
.into_iter()
.map(|num| {
if num > 1 {
CopyOrAdd::Add
} else {
CopyOrAdd::Copy
}
})
.collect();
for input_edge in input_edges {
let source = &nodes[input_edge.source];
let mut output_values = source.output_buffers();
let from_channel = input_edge.from_output_index;
let to_channel = input_edge.to_input_index;
inputs_to_copy.push((
unsafe { output_values.ptr_to_sample(from_channel, 0) },
unsafe { input_buffers.ptr_to_sample(to_channel, 0) },
self.block_size * self.oversampling.as_usize(),
copy_or_add[to_channel],
));
}
for input_edge in graph_input_edges {
graph_inputs_to_copy
.push((input_edge.from_output_index, input_edge.to_input_index));
}
for feedback_edge in feedback_input_edges {
let source = &nodes[feedback_edge.source];
let mut output_values = source.output_buffers();
inputs_to_copy.push((
unsafe { output_values.ptr_to_sample(feedback_edge.from_output_index, 0) },
unsafe { input_buffers.ptr_to_sample(feedback_edge.from_output_index, 0) },
self.block_size * self.oversampling.as_usize(),
copy_or_add[feedback_edge.from_output_index],
));
}
let node = &nodes[node_key];
tasks.push(node.to_task(
node_key,
inputs_to_copy,
graph_inputs_to_copy,
input_buffers,
));
}
tasks
}
fn generate_output_tasks(&mut self) -> Vec<OutputTask> {
let mut output_tasks = vec![];
for output_edge in &self.output_edges {
let source = &self.get_nodes()[output_edge.source];
let graph_output_index = output_edge.to_input_index;
output_tasks.push(OutputTask {
input_buffers: source.output_buffers(),
input_index: output_edge.from_output_index,
graph_output_index,
});
}
output_tasks
}
fn generate_input_to_output_tasks(&mut self) -> Vec<InputToOutputTask> {
let mut output_tasks = vec![];
for output_edge in &self.graph_input_to_output_edges {
output_tasks.push(InputToOutputTask {
graph_input_index: output_edge.from_output_index,
graph_output_index: output_edge.to_input_index,
});
}
output_tasks
}
fn create_graph_gen(
&mut self,
parent_graph_block_size: usize,
parent_graph_sample_rate: Sample,
parent_graph_oversampling: Oversampling,
) -> Result<Box<dyn Gen + Send>, String> {
if self.graph_gen_communicator.is_some() {
return Err(
"create_graph_gen: GraphGenCommunicator already existed for this graph".to_owned(),
);
}
self.init();
let tasks = self.generate_tasks().into_boxed_slice();
let output_tasks = self.generate_output_tasks().into_boxed_slice();
let task_data = TaskData {
applied: Arc::new(AtomicBool::new(false)),
tasks,
output_tasks,
new_inputs_buffers_ptr: Some(self.inputs_buffers_ptr.clone()),
input_to_output_tasks: self.generate_input_to_output_tasks().into_boxed_slice(),
};
let (free_node_queue_producer, free_node_queue_consumer) =
RingBuffer::<(NodeKey, GenState)>::new(self.ring_buffer_size);
let (new_task_data_producer, new_task_data_consumer) =
RingBuffer::<TaskData>::new(self.ring_buffer_size);
let (task_data_to_be_dropped_producer, task_data_to_be_dropped_consumer) =
RingBuffer::<TaskData>::new(self.ring_buffer_size);
let mut scheduler = Scheduler::new();
for (schedule_changes, time) in self.scheduled_changes_queue.drain(..) {
scheduler.schedule(schedule_changes, time);
}
let scheduler_buffer_size = self.ring_buffer_size;
let (scheduled_change_producer, rb_consumer) = RingBuffer::new(scheduler_buffer_size);
let (clock_update_producer, clock_update_consumer) = RingBuffer::new(10);
let schedule_receiver =
ScheduleReceiver::new(rb_consumer, clock_update_consumer, scheduler_buffer_size);
let graph_gen_communicator = GraphGenCommunicator {
free_node_queue_consumer,
scheduler,
scheduled_change_producer,
clock_update_producer,
task_data_to_be_dropped_consumer,
new_task_data_producer,
next_change_flag: task_data.applied.clone(),
timestamp: Arc::new(AtomicU64::new(0)),
};
let graph_gen = graph_gen::make_graph_gen(
self.sample_rate,
parent_graph_sample_rate,
task_data,
self.block_size,
parent_graph_block_size,
self.oversampling,
parent_graph_oversampling,
self.num_outputs,
self.num_inputs,
graph_gen_communicator.timestamp.clone(),
free_node_queue_producer,
schedule_receiver,
self.nodes.clone(),
task_data_to_be_dropped_producer,
new_task_data_consumer,
self.inputs_buffers_ptr.clone(),
);
self.graph_gen_communicator = Some(graph_gen_communicator);
Ok(graph_gen)
}
fn init(&mut self) {
self.calculate_node_order();
let block_size = self.block_size;
let sample_rate = self.sample_rate;
let oversampling = self.oversampling;
for (key, n) in unsafe { &mut *self.nodes.get() } {
let id = self.node_ids[key];
n.init(
block_size * oversampling.as_usize(),
sample_rate * (oversampling.as_usize() as Sample),
id,
);
}
self.initiated = true;
}
pub fn commit_changes(&mut self) {
if self.graph_gen_communicator.is_some() {
self.free_old();
if self.recalculation_required {
self.calculate_node_order();
let output_tasks = self.generate_output_tasks().into_boxed_slice();
let input_to_output_tasks =
self.generate_input_to_output_tasks().into_boxed_slice();
let tasks = self.generate_tasks().into_boxed_slice();
if let Some(ggc) = &mut self.graph_gen_communicator {
let new_inputs_buffers_ptr = if self.new_inputs_buffers_ptr {
Some(self.inputs_buffers_ptr.clone())
} else {
None
};
ggc.send_updated_tasks(
tasks,
output_tasks,
input_to_output_tasks,
new_inputs_buffers_ptr,
);
}
self.recalculation_required = false;
}
}
for (_key, graph) in &mut self.graphs_per_node {
graph.commit_changes();
}
}
pub fn update(&mut self) {
self.commit_changes();
if let Some(ggc) = &mut self.graph_gen_communicator {
ggc.update();
}
for (_key, graph) in &mut self.graphs_per_node {
graph.update();
}
}
fn free_old(&mut self) {
let free_queue = if let Some(ggc) = &mut self.graph_gen_communicator {
ggc.get_nodes_to_free()
} else {
vec![]
};
for (key, state) in free_queue {
match state {
GenState::FreeSelf => {
self.free_node_from_key(key).ok();
}
GenState::FreeSelfMendConnections => {
self.free_node_mend_connections_from_key(key).ok();
}
GenState::FreeGraph(_) | GenState::FreeGraphMendConnections(_) => unreachable!(),
GenState::Continue => unreachable!(),
}
}
let nodes = unsafe { &mut *self.nodes.get() };
let mut i = 0;
while i < self.node_keys_to_free_when_safe.len() {
let (key, flag) = &self.node_keys_to_free_when_safe[i];
if flag.load(Ordering::SeqCst) {
nodes.remove(*key);
self.graphs_per_node.remove(*key);
self.node_keys_pending_removal.remove(key);
self.node_keys_to_free_when_safe.remove(i);
} else {
i += 1;
}
}
if !self.buffers_to_free_when_safe.is_empty() {
let mut i = self.buffers_to_free_when_safe.len() - 1;
loop {
if Arc::<OwnedRawBuffer>::strong_count(&self.buffers_to_free_when_safe[i]) == 1 {
self.buffers_to_free_when_safe.remove(i);
}
if i == 0 {
break;
}
i -= 1;
}
}
}
fn get_nodes_mut(&mut self) -> &mut SlotMap<NodeKey, Node> {
unsafe { &mut *self.nodes.get() }
}
fn get_nodes(&self) -> &SlotMap<NodeKey, Node> {
unsafe { &*self.nodes.get() }
}
pub fn dump_nodes(&self) -> Vec<NodeDump> {
let mut dump = Vec::new();
let nodes = self.get_nodes();
for key in nodes.keys() {
if let Some(graph) = self.graphs_per_node.get(key) {
dump.push(NodeDump::Graph(graph.dump_nodes()));
} else {
dump.push(NodeDump::Node(
nodes
.get(key)
.expect("key from `nodes` should still be valid, but isn't")
.name
.to_string(),
));
}
}
dump
}
}
#[allow(missing_docs)]
#[derive(Clone, Debug)]
pub enum NodeDump {
Node(String),
Graph(Vec<NodeDump>),
}
unsafe impl Send for Graph {}
#[derive(Clone, Copy, Debug)]
struct ScheduledChange {
timestamp: u64,
key: NodeKey,
kind: ScheduledChangeKind,
removal_countdown: u8,
}
#[derive(Clone, Copy, Debug)]
enum ScheduledChangeKind {
Constant { index: usize, value: Sample },
Trigger { index: usize },
}
type SchedulingQueueItem = (
Vec<(NodeKey, ScheduledChangeKind, Option<TimeOffset>)>,
Time,
);
enum Scheduler {
Stopped {
scheduling_queue: Vec<SchedulingQueueItem>,
},
Running {
start_ts: Instant,
sample_rate: u64,
max_duration_to_send: u64,
scheduling_queue: Vec<ScheduledChange>,
latency_in_samples: f64,
musical_time_map: Arc<RwLock<MusicalTimeMap>>,
},
}
impl Scheduler {
fn new() -> Self {
Scheduler::Stopped {
scheduling_queue: vec![],
}
}
fn start(
&mut self,
sample_rate: Sample,
block_size: usize,
latency: Duration,
audio_thread_start_ts: Instant,
musical_time_map: Arc<RwLock<MusicalTimeMap>>,
) {
match self {
Scheduler::Stopped {
ref mut scheduling_queue,
} => {
let scheduling_queue = mem::take(scheduling_queue);
let max_duration_to_send =
((sample_rate * 0.5) as u64).max((block_size as u64) * 2);
let mut new_scheduler = Scheduler::Running {
start_ts: audio_thread_start_ts,
#[allow(clippy::cast_possible_truncation)]
sample_rate: sample_rate as u64,
max_duration_to_send,
scheduling_queue: vec![],
latency_in_samples: latency.as_secs_f64() * (sample_rate as f64),
musical_time_map,
};
for (changes, time) in scheduling_queue {
new_scheduler.schedule(changes, time);
}
*self = new_scheduler;
}
Scheduler::Running { .. } => (),
}
}
fn time_to_frames_timestamp(&mut self, time: Time) -> Option<u64> {
match self {
Scheduler::Stopped { .. } => None,
Scheduler::Running {
start_ts,
sample_rate,
latency_in_samples: latency,
musical_time_map,
..
} => {
Some(match time {
Time::DurationFromNow(duration_from_now) => {
((start_ts.elapsed() + duration_from_now).as_secs_f64()
* (*sample_rate as f64)
+ *latency) as u64
}
Time::Seconds(seconds) => seconds.to_samples(*sample_rate),
Time::Beats(mt) => {
let mtm = musical_time_map.read().unwrap();
let duration_from_start =
Duration::from_secs_f64(mtm.musical_time_to_secs_f64(mt));
let timestamp = (duration_from_start.as_secs_f64() * (*sample_rate as f64)
+ *latency) as u64;
timestamp
}
Time::Immediately => 0,
})
}
}
}
fn schedule(
&mut self,
changes: Vec<(NodeKey, ScheduledChangeKind, Option<TimeOffset>)>,
time: Time,
) {
let timestamp = self.time_to_frames_timestamp(time);
match self {
Scheduler::Stopped { scheduling_queue } => scheduling_queue.push((changes, time)),
Scheduler::Running {
sample_rate,
max_duration_to_send: _,
scheduling_queue,
..
} => {
let timestamp = timestamp.unwrap();
let offset_to_frames = |time_offset: Option<TimeOffset>| {
if let Some(to) = time_offset {
to.to_frames(*sample_rate)
} else {
0
}
};
for (key, change_kind, time_offset) in changes {
let frame_offset = offset_to_frames(time_offset);
let mut ts = timestamp;
if frame_offset >= 0 {
ts = ts
.checked_add(frame_offset as u64)
.unwrap_or_else(|| {
eprintln!(
"Used a time offset that made the timestamp overflow: {frame_offset:?}"
);
timestamp
});
} else {
ts = ts
.checked_sub((frame_offset * -1) as u64)
.unwrap_or_else(|| {
eprintln!(
"Used a time offset that made the timestamp overflow: {frame_offset:?}"
);
timestamp
});
}
scheduling_queue.push(ScheduledChange {
timestamp: ts,
key,
kind: change_kind,
removal_countdown: 0,
});
}
}
}
}
pub fn change_musical_time_map(
&mut self,
change_fn: impl FnOnce(&mut MusicalTimeMap),
) -> Result<(), ScheduleError> {
match self {
Scheduler::Stopped { .. } => Err(ScheduleError::SchedulerNotCreated),
Scheduler::Running {
musical_time_map, ..
} => match musical_time_map.write() {
Ok(mut mtm) => {
change_fn(&mut *mtm);
Ok(())
}
Err(_) => Err(ScheduleError::MusicalTimeMapCannotBeWrittenTo),
},
}
}
fn schedule_now(&mut self, key: NodeKey, change: ScheduledChangeKind) {
self.schedule(
vec![(key, change, None)],
Time::DurationFromNow(Duration::new(0, 0)),
)
}
fn update(&mut self, timestamp: u64, rb_producer: &mut rtrb::Producer<ScheduledChange>) {
match self {
Scheduler::Stopped { .. } => (),
Scheduler::Running {
max_duration_to_send,
scheduling_queue,
..
} => {
scheduling_queue.sort_unstable_by_key(|s| s.timestamp);
let mut i = 0;
while i < scheduling_queue.len() {
if timestamp > scheduling_queue[i].timestamp
|| scheduling_queue[i].timestamp - timestamp < *max_duration_to_send
{
let change = scheduling_queue.remove(i);
if let Err(e) = rb_producer.push(change) {
eprintln!("Unable to push scheduled change into RingBuffer: {e}");
}
} else {
i += 1;
}
}
}
}
}
fn seconds_to_musical_time_beats(&self, ts: Seconds) -> Option<Beats> {
match self {
Scheduler::Stopped { .. } => None,
Scheduler::Running {
musical_time_map, ..
} => {
let mtm = musical_time_map.read().unwrap();
Some(mtm.seconds_to_beats(ts))
}
}
}
}
#[derive(Clone, Debug)]
struct ClockUpdate {
timestamp: Arc<AtomicU64>,
clock_sample_rate: Sample,
}
struct ScheduleReceiver {
rb_consumer: rtrb::Consumer<ScheduledChange>,
schedule_queue: Vec<ScheduledChange>,
clock_update_consumer: rtrb::Consumer<ClockUpdate>,
}
impl ScheduleReceiver {
fn new(
rb_consumer: rtrb::Consumer<ScheduledChange>,
clock_update_consumer: rtrb::Consumer<ClockUpdate>,
capacity: usize,
) -> Self {
Self {
rb_consumer,
schedule_queue: Vec::with_capacity(capacity),
clock_update_consumer,
}
}
fn clock_update(&mut self, sample_rate: Sample) -> Option<u64> {
let mut new_timestamp = None;
while let Ok(clock) = self.clock_update_consumer.pop() {
let samples = clock.timestamp.load(Ordering::SeqCst);
if sample_rate == clock.clock_sample_rate {
new_timestamp = Some(samples);
} else {
new_timestamp = Some(
(((samples as f64) / (clock.clock_sample_rate as f64)) * (sample_rate as f64))
as u64,
);
}
}
new_timestamp
}
fn changes(&mut self) -> &mut Vec<ScheduledChange> {
let num_new_changes = self.rb_consumer.slots();
if num_new_changes > 0 {
let changes_to_read =
num_new_changes.min(self.schedule_queue.capacity() - self.schedule_queue.len());
if changes_to_read == 0 {
eprintln!("ScheduleReceiver: Unable to read any changes, queue is full.");
}
match self.rb_consumer.read_chunk(changes_to_read) {
Ok(chunk) => {
for change in chunk {
self.schedule_queue.push(change);
}
self.schedule_queue.sort_unstable_by_key(|s| s.timestamp);
}
Err(e) => {
eprintln!("Failed to receive changes in ScheduleReceiver: {e}");
}
}
}
&mut self.schedule_queue
}
}
struct TaskData {
applied: Arc<AtomicBool>,
tasks: Box<[Task]>,
output_tasks: Box<[OutputTask]>,
input_to_output_tasks: Box<[InputToOutputTask]>,
new_inputs_buffers_ptr: Option<Arc<OwnedRawBuffer>>,
}
struct GraphGenCommunicator {
scheduler: Scheduler,
clock_update_producer: rtrb::Producer<ClockUpdate>,
scheduled_change_producer: rtrb::Producer<ScheduledChange>,
timestamp: Arc<AtomicU64>,
next_change_flag: Arc<AtomicBool>,
free_node_queue_consumer: rtrb::Consumer<(NodeKey, GenState)>,
task_data_to_be_dropped_consumer: rtrb::Consumer<TaskData>,
new_task_data_producer: rtrb::Producer<TaskData>,
}
unsafe impl Send for GraphGenCommunicator {}
impl GraphGenCommunicator {
fn free_old(&mut self) {
let num_to_remove = self.task_data_to_be_dropped_consumer.slots();
let chunk = self
.task_data_to_be_dropped_consumer
.read_chunk(num_to_remove);
if let Ok(chunk) = chunk {
for td in chunk {
drop(td);
}
}
}
fn send_clock_update(&mut self, clock_update: ClockUpdate) {
self.clock_update_producer.push(clock_update).unwrap();
}
fn send_updated_tasks(
&mut self,
tasks: Box<[Task]>,
output_tasks: Box<[OutputTask]>,
input_to_output_tasks: Box<[InputToOutputTask]>,
new_inputs_buffers_ptr: Option<Arc<OwnedRawBuffer>>,
) {
self.free_old();
let current_change_flag =
mem::replace(&mut self.next_change_flag, Arc::new(AtomicBool::new(false)));
let td = TaskData {
applied: current_change_flag,
tasks,
output_tasks,
new_inputs_buffers_ptr,
input_to_output_tasks,
};
if let Err(e) = self.new_task_data_producer.push(td) {
eprintln!(
"Unable to push new TaskData to the GraphGen. Please increase RingBuffer size. {e}"
)
}
}
fn update(&mut self) {
let timestamp = self.timestamp.load(Ordering::SeqCst);
self.scheduler
.update(timestamp, &mut self.scheduled_change_producer);
}
fn get_nodes_to_free(&mut self) -> Vec<(NodeKey, GenState)> {
let num_items = self.free_node_queue_consumer.slots();
let chunk = self.free_node_queue_consumer.read_chunk(num_items);
if let Ok(chunk) = chunk {
chunk.into_iter().collect()
} else {
vec![]
}
}
}
struct FeedbackGen {
num_channels: usize,
}
impl FeedbackGen {
pub fn node(num_channels: usize) -> Node {
Node::new("feedback_node", Box::new(Self { num_channels }))
}
}
impl Gen for FeedbackGen {
fn process(&mut self, ctx: GenContext, _resources: &mut Resources) -> GenState {
for channel in 0..self.num_channels {
for sample_index in 0..ctx.block_size() {
ctx.outputs.write(
ctx.inputs.read(channel, sample_index),
channel,
sample_index,
);
}
}
GenState::Continue
}
fn num_outputs(&self) -> usize {
self.num_channels
}
fn num_inputs(&self) -> usize {
self.num_channels
}
}
#[derive(Clone, Debug, Copy)]
struct Edge {
source: NodeKey,
from_output_index: usize,
to_input_index: usize,
}
impl Edge {}
#[derive(Clone, Debug, Copy)]
struct InterGraphEdge {
from_output_index: usize,
to_input_index: usize,
}
#[derive(Clone, Debug, Copy)]
struct FeedbackEdge {
source: NodeKey,
from_output_index: usize,
to_input_index: usize,
feedback_destination: NodeKey,
}
pub struct Mult;
#[impl_gen]
impl Mult {
#[process]
fn process(
#[allow(unused)] &mut self,
block_size: BlockSize,
#[allow(unused)] mut value0: &[Sample],
#[allow(unused)] mut value1: &[Sample],
#[allow(unused)] mut product: &mut [Sample],
) -> GenState {
#[cfg(not(feature = "unstable"))]
{
for i in 0..*block_size {
product[i] = value0[i] * value1[i];
}
}
#[cfg(feature = "unstable")]
{
use std::simd::f32x2;
let simd_width = 2;
for _ in 0..(*block_size / simd_width) {
let s_in0 = f32x2::from_slice(&value0[..simd_width]);
let s_in1 = f32x2::from_slice(&value1[..simd_width]);
let prod = s_in0 * s_in1;
prod.copy_to_slice(product);
value0 = &value0[simd_width..];
value1 = &value1[simd_width..];
product = &mut product[simd_width..];
}
}
GenState::Continue
}
}
#[cfg(test)]
mod tests;