use nannou::audio::{self, Buffer};
use nannou::prelude::*;
use std::f64::consts::PI;
fn main() {
nannou::app(model).run();
}
struct Model {
stream: audio::Stream<Audio>,
}
struct Audio {
phase: f64,
hz: f64,
}
fn model(app: &App) -> Model {
app.new_window()
.key_pressed(key_pressed)
.view(view)
.build()
.unwrap();
let model = Audio {
phase: 0.0,
hz: 440.0,
};
let stream = app.audio.new_output_stream(model, audio).build().unwrap();
Model { stream }
}
fn audio(audio: &mut Audio, buffer: &mut Buffer) {
let sample_rate = buffer.sample_rate() as f64;
let volume = 0.5;
for frame in buffer.frames_mut() {
let sine_amp = (2.0 * PI * audio.phase).sin() as f32;
audio.phase += audio.hz / sample_rate;
audio.phase %= sample_rate;
for channel in frame {
*channel = sine_amp * volume;
}
}
}
fn key_pressed(_app: &App, model: &mut Model, key: Key) {
match key {
Key::Space => {
if model.stream.is_playing() {
model.stream.pause();
} else {
model.stream.play();
}
}
Key::Up => {
model
.stream
.send(|audio| {
audio.hz += 10.0;
})
.unwrap();
}
Key::Down => {
model
.stream
.send(|audio| {
audio.hz -= 10.0;
})
.unwrap();
}
_ => {}
}
}
fn view(_app: &App, _model: &Model, frame: Frame) -> Frame {
frame.clear(DARK_CHARCOAL);
frame
}