StreamingIterator

Struct StreamingIterator 

Source
pub struct StreamingIterator { /* private fields */ }
Expand description

Iterator over streaming dataset chunks

Implementations§

Source§

impl StreamingIterator

Source

pub fn from_csv<P: AsRef<Path>>(path: P, config: StreamConfig) -> Result<Self>

Create a new streaming iterator from a CSV file

Source

pub fn from_binary<P: AsRef<Path>>( path: P, n_features: usize, config: StreamConfig, ) -> Result<Self>

Create a new streaming iterator from a binary file

Source

pub fn from_generator<F>( generator: F, total_samples: usize, n_features: usize, config: StreamConfig, ) -> Result<Self>
where F: Fn(usize, usize, usize) -> Result<(Array2<f64>, Option<Array1<f64>>)> + Send + 'static,

Create a streaming iterator from a data generator function

Source

pub fn next_chunk(&mut self) -> Result<Option<DataChunk>>

Get the next chunk from the stream

Examples found in repository?
examples/datasets_streaming_demo.rs (line 76)
44fn demonstrate_basic_streaming() -> Result<(), Box<dyn std::error::Error>> {
45    println!("📊 BASIC STREAMING OPERATIONS");
46    println!("{}", "-".repeat(40));
47
48    // Configure streaming
49    let config = StreamConfig {
50        chunk_size: 1000,           // 1K samples per chunk
51        buffer_size: 3,             // Buffer 3 chunks
52        num_workers: 4,             // Use 4 worker threads
53        memory_limit_mb: Some(100), // Limit to 100MB
54        enable_compression: false,
55        enable_prefetch: true,
56        max_chunks: Some(10), // Process only 10 chunks for demo
57    };
58
59    println!("Streaming Configuration:");
60    println!("  Chunk size: {} samples", config.chunk_size);
61    println!("  Buffer size: {} chunks", config.buffer_size);
62    println!("  Workers: {}", config.num_workers);
63    println!("  Memory limit: {:?} MB", config.memory_limit_mb);
64    println!("  Max chunks: {:?}", config.max_chunks);
65
66    // Create streaming classification dataset
67    println!("\nStreaming synthetic classification data...");
68    let mut stream = stream_classification(100_000, 20, 5, config.clone())?;
69
70    let mut total_samples = 0;
71    let mut chunk_count = 0;
72    let mut class_distribution: HashMap<i32, usize> = HashMap::new();
73
74    let start_time = Instant::now();
75
76    while let Some(chunk) = stream.next_chunk()? {
77        total_samples += chunk.n_samples();
78        chunk_count += 1;
79
80        // Analyze this chunk
81        if let Some(target) = &chunk.target {
82            for &class in target.iter() {
83                *class_distribution.entry(class as i32).or_insert(0) += 1;
84            }
85        }
86
87        // Print progress
88        let stats = stream.stats();
89        if let Some(progress) = stats.progress_percent() {
90            println!(
91                "  Chunk {}: {} samples (Progress: {:.1}%, Buffer: {:.1}%)",
92                chunk.chunk_index + 1,
93                chunk.n_samples(),
94                progress,
95                stats.buffer_utilization()
96            );
97        } else {
98            println!(
99                "  Chunk {}: {} samples (Buffer: {:.1}%)",
100                chunk.chunk_index + 1,
101                chunk.n_samples(),
102                stats.buffer_utilization()
103            );
104        }
105
106        // Simulate processing time
107        std::thread::sleep(std::time::Duration::from_millis(50));
108
109        if chunk.is_last {
110            println!("  📋 Reached last chunk");
111            break;
112        }
113    }
114
115    let duration = start_time.elapsed();
116
117    println!("\nStreaming Results:");
118    println!("  Total chunks processed: {chunk_count}");
119    println!("  Total samples: {total_samples}");
120    println!("  Processing time: {:.2}s", duration.as_secs_f64());
121    println!(
122        "  Throughput: {:.1} samples/s",
123        total_samples as f64 / duration.as_secs_f64()
124    );
125    println!("  Class distribution: {class_distribution:?}");
126
127    println!();
128    Ok(())
129}
130
131#[allow(dead_code)]
132fn demonstrate_memory_efficient_processing() -> Result<(), Box<dyn std::error::Error>> {
133    println!("💾 MEMORY-EFFICIENT PROCESSING");
134    println!("{}", "-".repeat(40));
135
136    // Compare memory usage: streaming vs. in-memory
137    let datasetsize = 50_000;
138    let n_features = 50;
139
140    println!("Comparing memory usage for {datasetsize} samples with {n_features} features");
141
142    // In-memory approach (for comparison)
143    println!("\n1. In-memory approach:");
144    let start_mem = get_memory_usage();
145    let start_time = Instant::now();
146
147    let in_memorydataset = make_classification(datasetsize, n_features, 5, 2, 25, Some(42))?;
148    let (train, test) = train_test_split(&in_memorydataset, 0.2, Some(42))?;
149
150    let in_memory_time = start_time.elapsed();
151    let in_memory_mem = get_memory_usage() - start_mem;
152
153    println!("  Time: {:.2}s", in_memory_time.as_secs_f64());
154    println!("  Memory usage: ~{in_memory_mem:.1} MB");
155    println!("  Train samples: {}", train.n_samples());
156    println!("  Test samples: {}", test.n_samples());
157
158    // Streaming approach
159    println!("\n2. Streaming approach:");
160    let stream_start_time = Instant::now();
161    let stream_start_mem = get_memory_usage();
162
163    let config = StreamConfig {
164        chunk_size: 5_000, // Smaller chunks for memory efficiency
165        buffer_size: 2,    // Smaller buffer
166        num_workers: 2,
167        memory_limit_mb: Some(50),
168        ..Default::default()
169    };
170
171    let mut stream = stream_classification(datasetsize, n_features, 5, config)?;
172
173    let mut total_processed = 0;
174    let mut train_samples = 0;
175    let mut test_samples = 0;
176
177    while let Some(chunk) = stream.next_chunk()? {
178        total_processed += chunk.n_samples();
179
180        // Simulate train/test split on chunk level
181        let chunk_trainsize = (chunk.n_samples() as f64 * 0.8) as usize;
182        train_samples += chunk_trainsize;
183        test_samples += chunk.n_samples() - chunk_trainsize;
184
185        // Process chunk (simulate some computation)
186        let _mean = chunk.data.mean_axis(scirs2_core::ndarray::Axis(0));
187        let _std = chunk.data.std_axis(scirs2_core::ndarray::Axis(0), 0.0);
188
189        if chunk.is_last {
190            break;
191        }
192    }
193
194    let stream_time = stream_start_time.elapsed();
195    let stream_mem = get_memory_usage() - stream_start_mem;
196
197    println!("  Time: {:.2}s", stream_time.as_secs_f64());
198    println!("  Memory usage: ~{stream_mem:.1} MB");
199    println!("  Train samples: {train_samples}");
200    println!("  Test samples: {test_samples}");
201    println!("  Total processed: {total_processed}");
202
203    // Comparison
204    println!("\n3. Comparison:");
205    println!(
206        "  Memory savings: {:.1}x less memory",
207        in_memory_mem / stream_mem.max(1.0)
208    );
209    println!(
210        "  Time overhead: {:.1}x",
211        stream_time.as_secs_f64() / in_memory_time.as_secs_f64()
212    );
213    println!("  Streaming is beneficial for large datasets that don't fit in memory");
214
215    println!();
216    Ok(())
217}
218
219#[allow(dead_code)]
220fn demonstrate_stream_transformations() -> Result<(), Box<dyn std::error::Error>> {
221    println!("🔄 STREAM TRANSFORMATIONS");
222    println!("{}", "-".repeat(40));
223
224    // Create a transformer pipeline
225    let transformer = StreamTransformer::new()
226        .add_standard_scaling()
227        .add_missing_value_imputation();
228
229    println!("Created transformation pipeline:");
230    println!("  1. Standard scaling (z-score normalization)");
231    println!("  2. Missing value imputation");
232
233    let config = StreamConfig {
234        chunk_size: 2000,
235        buffer_size: 2,
236        max_chunks: Some(5),
237        ..Default::default()
238    };
239
240    let mut stream = stream_regression(10_000, 15, config)?;
241    let mut transformed_chunks = 0;
242
243    println!("\nProcessing and transforming chunks...");
244
245    while let Some(mut chunk) = stream.next_chunk()? {
246        println!("  Processing chunk {}", chunk.chunk_index + 1);
247
248        // Show statistics before transformation
249        let data_mean_before = chunk
250            .data
251            .mean_axis(scirs2_core::ndarray::Axis(0))
252            .expect("Operation failed");
253        let data_std_before = chunk.data.std_axis(scirs2_core::ndarray::Axis(0), 0.0);
254
255        println!(
256            "    Before: mean = {:.3}, std = {:.3}",
257            data_mean_before[0], data_std_before[0]
258        );
259
260        // Apply transformations
261        transformer.transform_chunk(&mut chunk)?;
262
263        // Show statistics after transformation
264        let data_mean_after = chunk
265            .data
266            .mean_axis(scirs2_core::ndarray::Axis(0))
267            .expect("Operation failed");
268        let data_std_after = chunk.data.std_axis(scirs2_core::ndarray::Axis(0), 0.0);
269
270        println!(
271            "    After:  mean = {:.3}, std = {:.3}",
272            data_mean_after[0], data_std_after[0]
273        );
274
275        transformed_chunks += 1;
276
277        if chunk.is_last {
278            break;
279        }
280    }
281
282    println!("\nTransformation Summary:");
283    println!("  Chunks processed: {transformed_chunks}");
284    println!("  Each chunk was transformed independently");
285    println!("  Memory-efficient: only one chunk in memory at a time");
286
287    println!();
288    Ok(())
289}
290
291#[allow(dead_code)]
292fn demonstrate_parallel_processing() -> Result<(), Box<dyn std::error::Error>> {
293    println!("⚡ PARALLEL STREAM PROCESSING");
294    println!("{}", "-".repeat(40));
295
296    let config = StreamConfig {
297        chunk_size: 1500,
298        buffer_size: 4,
299        num_workers: 4,
300        max_chunks: Some(8),
301        ..Default::default()
302    };
303
304    println!("Parallel processing configuration:");
305    println!("  Workers: {}", config.num_workers);
306    println!("  Chunk size: {}", config.chunk_size);
307    println!("  Buffer size: {}", config.buffer_size);
308
309    // Create a simple processor that computes statistics
310    let _processor: StreamProcessor<DataChunk> = StreamProcessor::new(config.clone());
311
312    // Define a processing function
313    let compute_stats = |chunk: DataChunk| -> Result<
314        HashMap<String, f64>,
315        Box<dyn std::error::Error + Send + Sync>,
316    > {
317        let mut stats = HashMap::new();
318
319        // Compute basic statistics
320        let mean = chunk
321            .data
322            .mean_axis(scirs2_core::ndarray::Axis(0))
323            .expect("Operation failed");
324        let std = chunk.data.std_axis(scirs2_core::ndarray::Axis(0), 0.0);
325
326        stats.insert("mean_feature_0".to_string(), mean[0]);
327        stats.insert("std_feature_0".to_string(), std[0]);
328        stats.insert("n_samples".to_string(), chunk.n_samples() as f64);
329        stats.insert("chunk_index".to_string(), chunk.chunk_index as f64);
330
331        // Simulate some computation time
332        std::thread::sleep(std::time::Duration::from_millis(100));
333
334        Ok(stats)
335    };
336
337    println!("\nProcessing stream with parallel workers...");
338    let start_time = Instant::now();
339
340    let stream = stream_classification(12_000, 10, 3, config)?;
341
342    // For demonstration, we'll process chunks sequentially with timing
343    // In a real implementation, you'd use the processor.process_parallel method
344    let mut stream_iter = stream;
345    let mut chunk_results = Vec::new();
346
347    while let Some(chunk) = stream_iter.next_chunk()? {
348        let chunk_start = Instant::now();
349        let chunk_id = chunk.chunk_index;
350        let chunk_samples = chunk.n_samples();
351
352        // Process chunk
353        let stats = compute_stats(chunk)
354            .map_err(|e| -> Box<dyn std::error::Error> { Box::new(std::io::Error::other(e)) })?;
355        let chunk_time = chunk_start.elapsed();
356
357        println!(
358            "  Chunk {}: {} samples, {:.2}ms",
359            chunk_id + 1,
360            chunk_samples,
361            chunk_time.as_millis()
362        );
363
364        chunk_results.push(stats);
365
366        if chunk_results.len() >= 8 {
367            break;
368        }
369    }
370
371    let total_time = start_time.elapsed();
372
373    println!("\nParallel Processing Results:");
374    println!("  Total chunks: {}", chunk_results.len());
375    println!("  Total time: {:.2}s", total_time.as_secs_f64());
376    println!(
377        "  Average time per chunk: {:.2}ms",
378        total_time.as_millis() as f64 / chunk_results.len() as f64
379    );
380
381    // Aggregate statistics
382    let total_samples: f64 = chunk_results
383        .iter()
384        .map(|stats| stats.get("n_samples").unwrap_or(&0.0))
385        .sum();
386
387    println!("  Total samples processed: {total_samples}");
388    println!(
389        "  Throughput: {:.1} samples/s",
390        total_samples / total_time.as_secs_f64()
391    );
392
393    println!();
394    Ok(())
395}
396
397#[allow(dead_code)]
398fn demonstrate_performance_comparison() -> Result<(), Box<dyn std::error::Error>> {
399    println!("📊 PERFORMANCE COMPARISON");
400    println!("{}", "-".repeat(40));
401
402    let dataset_sizes = vec![10_000, 50_000, 100_000];
403    let chunk_sizes = vec![1_000, 5_000, 10_000];
404
405    println!("Comparing streaming performance across different configurations:");
406    println!();
407
408    for &datasetsize in &dataset_sizes {
409        println!("Dataset size: {datasetsize} samples");
410
411        for &chunksize in &chunk_sizes {
412            let config = StreamConfig {
413                chunk_size: chunksize,
414                buffer_size: 3,
415                num_workers: 2,
416                max_chunks: Some(datasetsize / chunksize),
417                ..Default::default()
418            };
419
420            let start_time = Instant::now();
421            let mut stream = stream_regression(datasetsize, 20, config)?;
422
423            let mut processed_samples = 0;
424            let mut processed_chunks = 0;
425
426            while let Some(chunk) = stream.next_chunk()? {
427                processed_samples += chunk.n_samples();
428                processed_chunks += 1;
429
430                // Simulate minimal processing
431                let _stats = chunk.data.mean_axis(scirs2_core::ndarray::Axis(0));
432
433                if chunk.is_last || processed_samples >= datasetsize {
434                    break;
435                }
436            }
437
438            let duration = start_time.elapsed();
439            let throughput = processed_samples as f64 / duration.as_secs_f64();
440
441            println!(
442                "  Chunk size {}: {:.2}s ({:.1} samples/s, {} chunks)",
443                chunksize,
444                duration.as_secs_f64(),
445                throughput,
446                processed_chunks
447            );
448        }
449        println!();
450    }
451
452    println!("Performance Insights:");
453    println!("  • Larger chunks = fewer iterations, better throughput");
454    println!("  • Smaller chunks = lower memory usage, more responsive");
455    println!("  • Optimal chunk size depends on memory constraints and processing complexity");
456
457    println!();
458    Ok(())
459}
460
461#[allow(dead_code)]
462fn demonstrate_real_world_scenarios() -> Result<(), Box<dyn std::error::Error>> {
463    println!("🌍 REAL-WORLD STREAMING SCENARIOS");
464    println!("{}", "-".repeat(40));
465
466    // Scenario 1: Training on large dataset with limited memory
467    println!("Scenario 1: Large dataset training with memory constraints");
468    simulate_training_scenario()?;
469
470    // Scenario 2: Data preprocessing pipeline
471    println!("\nScenario 2: Data preprocessing pipeline");
472    simulate_preprocessing_pipeline()?;
473
474    // Scenario 3: Model evaluation on large test set
475    println!("\nScenario 3: Model evaluation on large test set");
476    simulate_model_evaluation()?;
477
478    println!();
479    Ok(())
480}
481
482#[allow(dead_code)]
483fn simulate_training_scenario() -> Result<(), Box<dyn std::error::Error>> {
484    println!("  • Dataset: 500K samples, 100 features");
485    println!("  • Memory limit: 200MB");
486    println!("  • Goal: Train incrementally using mini-batches");
487
488    let config = StreamConfig {
489        chunk_size: 5_000, // Mini-batch size
490        buffer_size: 2,    // Keep memory low
491        memory_limit_mb: Some(200),
492        max_chunks: Some(10), // Simulate partial processing
493        ..Default::default()
494    };
495
496    let mut stream = stream_classification(500_000, 100, 10, config)?;
497    let mut total_batches = 0;
498    let mut total_samples = 0;
499
500    let start_time = Instant::now();
501
502    while let Some(chunk) = stream.next_chunk()? {
503        // Simulate training on mini-batch
504        let batchsize = chunk.n_samples();
505
506        // Simulate gradient computation time
507        std::thread::sleep(std::time::Duration::from_millis(20));
508
509        total_batches += 1;
510        total_samples += batchsize;
511
512        if total_batches % 3 == 0 {
513            println!("    Processed {total_batches} batches ({total_samples} samples)");
514        }
515
516        if chunk.is_last {
517            break;
518        }
519    }
520
521    let duration = start_time.elapsed();
522    println!(
523        "  ✅ Training simulation: {} batches, {:.2}s",
524        total_batches,
525        duration.as_secs_f64()
526    );
527
528    Ok(())
529}
530
531#[allow(dead_code)]
532fn simulate_preprocessing_pipeline() -> Result<(), Box<dyn std::error::Error>> {
533    println!("  • Raw data → Clean → Scale → Feature selection");
534    println!("  • Process 200K samples in chunks");
535
536    let config = StreamConfig {
537        chunk_size: 8_000,
538        buffer_size: 3,
539        max_chunks: Some(5),
540        ..Default::default()
541    };
542
543    let transformer = StreamTransformer::new()
544        .add_missing_value_imputation()
545        .add_standard_scaling();
546
547    let mut stream = stream_regression(200_000, 50, config)?;
548    let mut processed_chunks = 0;
549
550    while let Some(mut chunk) = stream.next_chunk()? {
551        // Step 1: Clean data (remove outliers, handle missing values)
552        transformer.transform_chunk(&mut chunk)?;
553
554        // Step 2: Feature selection (simulate by keeping first 30 features)
555        let selecteddata = chunk
556            .data
557            .slice(scirs2_core::ndarray::s![.., ..30])
558            .to_owned();
559
560        processed_chunks += 1;
561        println!(
562            "    Chunk {}: {} → {} features",
563            processed_chunks,
564            chunk.n_features(),
565            selecteddata.ncols()
566        );
567
568        if chunk.is_last {
569            break;
570        }
571    }
572
573    println!("  ✅ Preprocessing pipeline: {processed_chunks} chunks processed");
574
575    Ok(())
576}
577
578#[allow(dead_code)]
579fn simulate_model_evaluation() -> Result<(), Box<dyn std::error::Error>> {
580    println!("  • Evaluate model on 1M test samples");
581    println!("  • Compute accuracy in streaming fashion");
582
583    let config = StreamConfig {
584        chunk_size: 10_000,
585        buffer_size: 2,
586        max_chunks: Some(8),
587        ..Default::default()
588    };
589
590    let mut stream = stream_classification(1_000_000, 20, 5, config)?;
591    let mut correct_predictions = 0;
592    let mut total_predictions = 0;
593
594    while let Some(chunk) = stream.next_chunk()? {
595        if let Some(true_labels) = &chunk.target {
596            // Simulate model predictions (random for demo)
597            let predictions: Vec<f64> = (0..chunk.n_samples())
598                .map(|_| (scirs2_core::random::random::<f64>() * 5.0).floor())
599                .collect();
600
601            // Calculate accuracy for this chunk
602            let chunk_correct = true_labels
603                .iter()
604                .zip(predictions.iter())
605                .filter(|(&true_label, &pred)| (true_label - pred).abs() < 0.5)
606                .count();
607
608            correct_predictions += chunk_correct;
609            total_predictions += chunk.n_samples();
610        }
611
612        if chunk.is_last {
613            break;
614        }
615    }
616
617    let accuracy = correct_predictions as f64 / total_predictions as f64;
618    println!(
619        "  ✅ Model evaluation: {:.1}% accuracy on {} samples",
620        accuracy * 100.0,
621        total_predictions
622    );
623
624    Ok(())
625}
Source

pub fn stats(&self) -> StreamStats

Get streaming statistics

Examples found in repository?
examples/datasets_streaming_demo.rs (line 88)
44fn demonstrate_basic_streaming() -> Result<(), Box<dyn std::error::Error>> {
45    println!("📊 BASIC STREAMING OPERATIONS");
46    println!("{}", "-".repeat(40));
47
48    // Configure streaming
49    let config = StreamConfig {
50        chunk_size: 1000,           // 1K samples per chunk
51        buffer_size: 3,             // Buffer 3 chunks
52        num_workers: 4,             // Use 4 worker threads
53        memory_limit_mb: Some(100), // Limit to 100MB
54        enable_compression: false,
55        enable_prefetch: true,
56        max_chunks: Some(10), // Process only 10 chunks for demo
57    };
58
59    println!("Streaming Configuration:");
60    println!("  Chunk size: {} samples", config.chunk_size);
61    println!("  Buffer size: {} chunks", config.buffer_size);
62    println!("  Workers: {}", config.num_workers);
63    println!("  Memory limit: {:?} MB", config.memory_limit_mb);
64    println!("  Max chunks: {:?}", config.max_chunks);
65
66    // Create streaming classification dataset
67    println!("\nStreaming synthetic classification data...");
68    let mut stream = stream_classification(100_000, 20, 5, config.clone())?;
69
70    let mut total_samples = 0;
71    let mut chunk_count = 0;
72    let mut class_distribution: HashMap<i32, usize> = HashMap::new();
73
74    let start_time = Instant::now();
75
76    while let Some(chunk) = stream.next_chunk()? {
77        total_samples += chunk.n_samples();
78        chunk_count += 1;
79
80        // Analyze this chunk
81        if let Some(target) = &chunk.target {
82            for &class in target.iter() {
83                *class_distribution.entry(class as i32).or_insert(0) += 1;
84            }
85        }
86
87        // Print progress
88        let stats = stream.stats();
89        if let Some(progress) = stats.progress_percent() {
90            println!(
91                "  Chunk {}: {} samples (Progress: {:.1}%, Buffer: {:.1}%)",
92                chunk.chunk_index + 1,
93                chunk.n_samples(),
94                progress,
95                stats.buffer_utilization()
96            );
97        } else {
98            println!(
99                "  Chunk {}: {} samples (Buffer: {:.1}%)",
100                chunk.chunk_index + 1,
101                chunk.n_samples(),
102                stats.buffer_utilization()
103            );
104        }
105
106        // Simulate processing time
107        std::thread::sleep(std::time::Duration::from_millis(50));
108
109        if chunk.is_last {
110            println!("  📋 Reached last chunk");
111            break;
112        }
113    }
114
115    let duration = start_time.elapsed();
116
117    println!("\nStreaming Results:");
118    println!("  Total chunks processed: {chunk_count}");
119    println!("  Total samples: {total_samples}");
120    println!("  Processing time: {:.2}s", duration.as_secs_f64());
121    println!(
122        "  Throughput: {:.1} samples/s",
123        total_samples as f64 / duration.as_secs_f64()
124    );
125    println!("  Class distribution: {class_distribution:?}");
126
127    println!();
128    Ok(())
129}

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T> Instrument for T

Source§

fn instrument(self, span: Span) -> Instrumented<Self>

Instruments this type with the provided Span, returning an Instrumented wrapper. Read more
Source§

fn in_current_span(self) -> Instrumented<Self>

Instruments this type with the current Span, returning an Instrumented wrapper. Read more
Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> IntoEither for T

Source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

impl<T> Pointable for T

Source§

const ALIGN: usize

The alignment of pointer.
Source§

type Init = T

The type for initializers.
Source§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
Source§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
Source§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
Source§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
Source§

impl<T> PolicyExt for T
where T: ?Sized,

Source§

fn and<P, B, E>(self, other: P) -> And<T, P>
where T: Policy<B, E>, P: Policy<B, E>,

Create a new Policy that returns Action::Follow only if self and other return Action::Follow. Read more
Source§

fn or<P, B, E>(self, other: P) -> Or<T, P>
where T: Policy<B, E>, P: Policy<B, E>,

Create a new Policy that returns Action::Follow if either self or other returns Action::Follow. Read more
Source§

impl<T> Same for T

Source§

type Output = T

Should always be Self
Source§

impl<SS, SP> SupersetOf<SS> for SP
where SS: SubsetOf<SP>,

Source§

fn to_subset(&self) -> Option<SS>

The inverse inclusion map: attempts to construct self from the equivalent element of its superset. Read more
Source§

fn is_in_subset(&self) -> bool

Checks if self is actually part of its subset T (and can be converted to it).
Source§

fn to_subset_unchecked(&self) -> SS

Use with care! Same as self.to_subset but without any property checks. Always succeeds.
Source§

fn from_subset(element: &SS) -> SP

The inclusion map: converts self to the equivalent element of its superset.
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V

Source§

impl<T> WithSubscriber for T

Source§

fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self>
where S: Into<Dispatch>,

Attaches the provided Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

fn with_current_subscriber(self) -> WithDispatch<Self>

Attaches the current default Subscriber to this type, returning a WithDispatch wrapper. Read more