performance_test/
performance_test.rs1use pptx_to_md::{ParserConfig, PptxContainer, Result};
13use rayon::prelude::*;
14use std::env;
15use std::path::Path;
16use std::time::{Duration, Instant};
17
18struct Benchmark {
19 name: String,
20 start_time: Instant,
21 results: Vec<Duration>,
22}
23
24impl Benchmark {
25 fn new(name: &str) -> Self {
26 println!("Starting benchmark: {}", name);
27 Benchmark {
28 name: name.to_string(),
29 start_time: Instant::now(),
30 results: Vec::new(),
31 }
32 }
33
34 fn measure<F, T>(&mut self, mut f: F) -> T
35 where
36 F: FnMut() -> T,
37 {
38 let start = Instant::now();
39 let result = f();
40 let duration = start.elapsed();
41 self.results.push(duration);
42 println!(" Operation took: {:?}", duration);
43 result
44 }
45
46 fn report(&self) {
47 if self.results.is_empty() {
48 println!("No measurements for {}", self.name);
49 return;
50 }
51
52 let total = self.start_time.elapsed();
53 let count = self.results.len();
54 let sum: Duration = self.results.iter().sum();
55 let avg = sum / count as u32;
56 let min = self.results.iter().min().unwrap();
57 let max = self.results.iter().max().unwrap();
58
59 println!("\nBenchmark Results for {}", self.name);
60 println!("----------------------------");
61 println!("Total time: {:?}", total);
62 println!("Operations: {}", count);
63 println!("Average time per operation: {:?}", avg);
64 println!("Min time: {:?}", min);
65 println!("Max time: {:?}", max);
66 println!("----------------------------\n");
67 }
68}
69
70fn main() -> Result<()> {
71 let args: Vec<String> = env::args().collect();
73 let pptx_path = if args.len() > 1 {
74 &args[1]
75 } else {
76 eprintln!("Usage: cargo run --example performance_test <path/to/presentation.pptx> [iterations]");
77 return Ok(());
78 };
79
80 let iterations = if args.len() > 2 {
81 args[2].parse().unwrap_or(5)
82 } else {
83 10 };
85
86 println!("Performance testing with {} iterations on: {}", iterations, pptx_path);
87
88
89
90 let mut single_thread_bench = Benchmark::new("Single-threaded parsing");
92
93 let mut total_slides = 0;
94
95 for i in 0..iterations {
96 println!("\nIteration {} (Single-threaded)", i + 1);
97
98 let mut container = single_thread_bench.measure(|| {
100 let config = ParserConfig::builder()
101 .extract_images(true)
102 .build();
103 PptxContainer::open(Path::new(pptx_path), config).expect("Failed to open PPTX")
104 });
105
106 println!(" Found {} slides in the presentation", container.slide_count);
107
108 let slides = single_thread_bench.measure(|| {
110 container.parse_all().expect("Failed to parse slides")
111 });
112
113 let _md_content = single_thread_bench.measure(|| {
115 slides.iter()
116 .filter_map(|slide| slide.convert_to_md())
117 .collect::<Vec<String>>()
118 });
119
120 total_slides += slides.len();
121 }
122
123 single_thread_bench.report();
124 println!("Average slides per presentation: {}", total_slides / iterations);
125
126
127
128 let mut single_thread_streamed_bench = Benchmark::new("Single-threaded streamed parsing");
130
131 total_slides = 0;
132
133 for i in 0..iterations {
134 println!("\nIteration {} (Single-threaded streamed)", i + 1);
135
136 let mut container = single_thread_streamed_bench.measure(|| {
138 let config = ParserConfig::builder()
139 .extract_images(true)
140 .build();
141 PptxContainer::open(Path::new(pptx_path), config).expect("Failed to open PPTX")
142 });
143
144 println!(" Found {} slides in the presentation", container.slide_count);
145
146 let expected_slides = container.slide_count;
148
149 let slides_processed = single_thread_streamed_bench.measure(|| {
151 let mut processed = 0;
152
153 for slide_result in container.iter_slides() {
155 match slide_result {
156 Ok(slide) => {
157 let _md_content = slide.convert_to_md();
159 processed += 1;
160 },
161 Err(e) => {
162 eprintln!("Error processing slide: {:?}", e);
163 }
164 }
165 }
166
167 processed
168 });
169
170 println!(" Processed {} out of {} slides", slides_processed, expected_slides);
171 total_slides += slides_processed;
172 }
173
174 single_thread_streamed_bench.report();
175 println!("Average slides per presentation: {}", total_slides / iterations);
176
177
178
179 let mut optimized_multi_thread_bench = Benchmark::new("Optimized Multi-threaded parsing");
181
182 total_slides = 0;
183
184 for i in 0..iterations {
185 println!("\nIteration {} (Optimized Multi-threaded)", i + 1);
186
187 let mut container = optimized_multi_thread_bench.measure(|| {
189 let config = ParserConfig::builder()
190 .extract_images(true)
191 .build();
192 PptxContainer::open(Path::new(pptx_path), config).expect("Failed to open PPTX")
193 });
194
195 println!(" Found {} slides in the presentation", container.slide_count);
196
197 let slides = optimized_multi_thread_bench.measure(|| {
199 container.parse_all_multi_threaded().expect("Failed to parse slides")
200 });
201
202 println!(" Successfully processed {} slides", slides.len());
203
204 let _md_content = optimized_multi_thread_bench.measure(|| {
206 slides.par_iter()
207 .filter_map(|slide| slide.convert_to_md())
208 .collect::<Vec<String>>()
209 });
210
211 total_slides += slides.len();
212 }
213
214 optimized_multi_thread_bench.report();
215 println!("Average slides per presentation: {}", total_slides / iterations);
216
217 if !single_thread_bench.results.is_empty() &&
219 !single_thread_streamed_bench.results.is_empty() &&
220 !optimized_multi_thread_bench.results.is_empty() {
221
222 let single_avg: Duration = single_thread_bench.results.iter().sum::<Duration>() /
223 single_thread_bench.results.len() as u32;
224 let single_streamed_avg: Duration = single_thread_streamed_bench.results.iter().sum::<Duration>() /
225 single_thread_streamed_bench.results.len() as u32;
226 let optimized_multi_avg: Duration = optimized_multi_thread_bench.results.iter().sum::<Duration>() /
227 optimized_multi_thread_bench.results.len() as u32;
228
229 println!("\nPerformance Comparison");
230 println!("=====================");
231 println!("Single-threaded average: {:?}", single_avg);
232 println!("Single-threaded streaming average: {:?}", single_streamed_avg);
233 println!("Optimized multi-threaded average: {:?}", optimized_multi_avg);
234
235 if single_avg > single_streamed_avg {
237 let speedup = single_avg.as_secs_f64() / single_streamed_avg.as_secs_f64();
238 println!("Single-threaded streaming is {:.2}x faster than single-threaded", speedup);
239 } else {
240 let slowdown = single_streamed_avg.as_secs_f64() / single_avg.as_secs_f64();
241 println!("Single-threaded streaming is {:.2}x slower than single-threaded", slowdown);
242 }
243
244 if single_avg > optimized_multi_avg {
246 let speedup = single_avg.as_secs_f64() / optimized_multi_avg.as_secs_f64();
247 println!("Optimized multi-threaded is {:.2}x faster than single-threaded", speedup);
248 } else {
249 let slowdown = optimized_multi_avg.as_secs_f64() / single_avg.as_secs_f64();
250 println!("Optimized multi-threaded is {:.2}x slower than single-threaded", slowdown);
251 }
252
253 if single_streamed_avg > optimized_multi_avg {
255 let speedup = single_streamed_avg.as_secs_f64() / optimized_multi_avg.as_secs_f64();
256 println!("Optimized multi-threaded is {:.2}x faster than single-threaded streaming", speedup);
257 } else {
258 let slowdown = optimized_multi_avg.as_secs_f64() / single_streamed_avg.as_secs_f64();
259 println!("Optimized multi-threaded is {:.2}x slower than single-threaded streaming", slowdown);
260 }
261
262 let fastest_approach = if single_avg <= single_streamed_avg && single_avg <= optimized_multi_avg {
264 "Single-threaded"
265 } else if single_streamed_avg <= single_avg && single_streamed_avg <= optimized_multi_avg {
266 "Single-threaded streaming"
267 } else {
268 "Optimized multi-threaded"
269 };
270
271 println!("\nOverall result: {} approach is the fastest for this workload.", fastest_approach);
272 }
273
274 Ok(())
275}