use bssh::executor::{MultiNodeStreamManager, NodeStream};
use bssh::node::Node;
use bssh::ssh::tokio_client::CommandOutput;
use bssh::ui::tui::app::TuiApp;
use bytes::Bytes;
use criterion::{BenchmarkId, Criterion, Throughput, criterion_group, criterion_main};
use ratatui::Terminal;
use ratatui::backend::TestBackend;
use std::hint::black_box;
use tokio::runtime::Runtime;
use tokio::sync::mpsc;
fn create_runtime() -> Runtime {
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
}
fn bench_large_output_single_stream(c: &mut Criterion) {
let mut group = c.benchmark_group("large_output");
for size in [1024, 100 * 1024, 1024 * 1024, 10 * 1024 * 1024].iter() {
group.throughput(Throughput::Bytes(*size as u64));
group.bench_with_input(
BenchmarkId::new("single_stream", format!("{size} bytes")),
size,
|b, &size| {
let rt = create_runtime();
b.iter(|| {
rt.block_on(async {
let node = Node::new("localhost".to_string(), 22, "user".to_string());
let (tx, rx) = mpsc::channel::<CommandOutput>(1000);
let mut stream = NodeStream::new(node, rx);
let chunk_size = 32 * 1024;
let chunk = Bytes::from(vec![b'x'; chunk_size.min(size)]);
let num_chunks = size.div_ceil(chunk_size);
for _ in 0..num_chunks {
let _ = tx.send(CommandOutput::StdOut(chunk.clone())).await;
}
drop(tx);
while stream.poll() {
}
black_box(stream.stdout().len())
})
});
},
);
}
group.finish();
}
fn bench_rolling_buffer_overflow(c: &mut Criterion) {
let mut group = c.benchmark_group("rolling_buffer_overflow");
for overflow_factor in [1.5_f64, 2.0_f64, 3.0_f64].iter() {
let total_size = (10.0 * 1024.0 * 1024.0 * overflow_factor) as usize;
group.throughput(Throughput::Bytes(total_size as u64));
group.bench_with_input(
BenchmarkId::new("overflow", format!("{overflow_factor}x")),
&total_size,
|b, &total_size| {
let rt = create_runtime();
b.iter(|| {
rt.block_on(async {
let node = Node::new("localhost".to_string(), 22, "user".to_string());
let (tx, rx) = mpsc::channel::<CommandOutput>(1000);
let mut stream = NodeStream::new(node, rx);
let chunk_size = 64 * 1024; let chunk = Bytes::from(vec![b'x'; chunk_size]);
let num_chunks = total_size / chunk_size;
for _ in 0..num_chunks {
let _ = tx.send(CommandOutput::StdOut(chunk.clone())).await;
stream.poll();
}
drop(tx);
while stream.poll() {}
black_box(stream.stdout().len())
})
});
},
);
}
group.finish();
}
fn bench_concurrent_multi_node(c: &mut Criterion) {
let mut group = c.benchmark_group("concurrent_multi_node");
for num_nodes in [4, 16, 64].iter() {
group.bench_with_input(
BenchmarkId::new("nodes", num_nodes),
num_nodes,
|b, &num_nodes| {
let rt = create_runtime();
b.iter(|| {
rt.block_on(async {
let mut manager = MultiNodeStreamManager::new();
let mut senders = Vec::new();
for i in 0..num_nodes {
let node = Node::new(format!("host{i}"), 22, "user".to_string());
let (tx, rx) = mpsc::channel::<CommandOutput>(100);
manager.add_stream(node, rx);
senders.push(tx);
}
let data_per_node = 100 * 1024; let chunk = Bytes::from(vec![b'x'; 1024]);
let chunks_per_node = data_per_node / 1024;
for _ in 0..chunks_per_node {
for tx in &senders {
let _ = tx.send(CommandOutput::StdOut(chunk.clone())).await;
}
manager.poll_all();
}
for tx in senders {
drop(tx);
}
while manager.poll_all() {}
black_box(manager.completed_count())
})
});
},
);
}
group.finish();
}
fn bench_poll_all_throughput(c: &mut Criterion) {
let mut group = c.benchmark_group("poll_all_throughput");
for chunk_size in [256, 1024, 8192, 32768].iter() {
group.throughput(Throughput::Bytes(*chunk_size as u64 * 10));
group.bench_with_input(
BenchmarkId::new("chunk_size", chunk_size),
chunk_size,
|b, &chunk_size| {
let rt = create_runtime();
b.iter(|| {
rt.block_on(async {
let mut manager = MultiNodeStreamManager::new();
let mut senders = Vec::new();
for i in 0..10 {
let node = Node::new(format!("host{i}"), 22, "user".to_string());
let (tx, rx) = mpsc::channel::<CommandOutput>(100);
manager.add_stream(node, rx);
senders.push(tx);
}
let chunk = Bytes::from(vec![b'x'; chunk_size]);
for tx in &senders {
let _ = tx.send(CommandOutput::StdOut(chunk.clone())).await;
}
black_box(manager.poll_all())
})
});
},
);
}
group.finish();
}
fn bench_tui_render_summary(c: &mut Criterion) {
let mut group = c.benchmark_group("tui_render_summary");
for num_nodes in [5, 20, 50].iter() {
group.bench_with_input(
BenchmarkId::new("nodes", num_nodes),
num_nodes,
|b, &num_nodes| {
let mut manager = MultiNodeStreamManager::new();
for i in 0..num_nodes {
let node = Node::new(format!("host{i}.example.com"), 22, format!("user{i}"));
let (_tx, rx) = mpsc::channel::<CommandOutput>(100);
manager.add_stream(node, rx);
}
let backend = TestBackend::new(120, 40);
let mut terminal = Terminal::new(backend).unwrap();
b.iter(|| {
terminal
.draw(|f| {
bssh::ui::tui::views::summary::render(
f,
&manager,
"benchmark-cluster",
"echo test",
false,
);
})
.unwrap();
black_box(())
});
},
);
}
group.finish();
}
fn bench_tui_render_detail(c: &mut Criterion) {
let mut group = c.benchmark_group("tui_render_detail");
for output_lines in [100, 1000, 10000].iter() {
group.bench_with_input(
BenchmarkId::new("lines", output_lines),
output_lines,
|b, &output_lines| {
let rt = create_runtime();
let node = Node::new(
"benchmark-host.example.com".to_string(),
22,
"user".to_string(),
);
let (tx, rx) = mpsc::channel::<CommandOutput>(100);
let mut stream = NodeStream::new(node, rx);
let mut output = String::new();
for i in 0..output_lines {
output.push_str(&format!(
"Line {i}: This is a test line with some content\n"
));
}
rt.block_on(async {
tx.send(CommandOutput::StdOut(Bytes::from(
output.as_bytes().to_vec(),
)))
.await
.unwrap();
drop(tx);
});
stream.poll();
let backend = TestBackend::new(120, 40);
let mut terminal = Terminal::new(backend).unwrap();
b.iter(|| {
terminal
.draw(|f| {
bssh::ui::tui::views::detail::render(f, &stream, 0, 0, false, false);
})
.unwrap();
black_box(())
});
},
);
}
group.finish();
}
fn bench_data_change_detection(c: &mut Criterion) {
let mut group = c.benchmark_group("data_change_detection");
for num_nodes in [10, 50, 100].iter() {
group.bench_with_input(
BenchmarkId::new("nodes", num_nodes),
num_nodes,
|b, &num_nodes| {
let mut manager = MultiNodeStreamManager::new();
for i in 0..num_nodes {
let node = Node::new(format!("host{i}"), 22, "user".to_string());
let (_tx, rx) = mpsc::channel::<CommandOutput>(100);
manager.add_stream(node, rx);
}
let mut app = TuiApp::new();
app.check_data_changes(manager.streams());
b.iter(|| black_box(app.check_data_changes(manager.streams())));
},
);
}
group.finish();
}
fn bench_memory_allocation(c: &mut Criterion) {
let mut group = c.benchmark_group("memory_allocation");
group.sample_size(50);
group.bench_function("create_100_streams", |b| {
b.iter(|| {
let mut manager = MultiNodeStreamManager::new();
for i in 0..100 {
let node = Node::new(format!("host{i}"), 22, "user".to_string());
let (_tx, rx) = mpsc::channel::<CommandOutput>(100);
manager.add_stream(node, rx);
}
black_box(manager.total_count())
});
});
group.bench_function("create_tui_app", |b| {
b.iter(|| {
let app = TuiApp::new();
black_box(app)
});
});
group.finish();
}
criterion_group!(
benches,
bench_large_output_single_stream,
bench_rolling_buffer_overflow,
bench_concurrent_multi_node,
bench_poll_all_throughput,
bench_tui_render_summary,
bench_tui_render_detail,
bench_data_change_detection,
bench_memory_allocation,
);
criterion_main!(benches);