pub struct GpuContext { /* private fields */ }Expand description
GPU context for managing device operations
Implementations§
Source§impl GpuContext
impl GpuContext
Sourcepub fn new(config: GpuConfig) -> Result<Self>
pub fn new(config: GpuConfig) -> Result<Self>
Create a new GPU context
Examples found in repository?
examples/advanced_showcase.rs (line 140)
130fn demonstrate_advanced_gpu_optimization() -> Result<(), Box<dyn std::error::Error>> {
131 println!("\n⚡ Advanced-GPU Optimization Demonstration");
132 println!("=====================================");
133
134 // Create GPU context (falls back to CPU if no GPU available)
135 println!("🔧 Initializing GPU context...");
136 let gpu_config = GpuConfig {
137 backend: GpuBackend::Cpu,
138 ..Default::default()
139 };
140 let gpu_context = GpuContext::new(gpu_config)?; // Using CPU backend for demo
141 println!(" Backend: {:?}", gpu_context.backend());
142
143 // Create advanced-GPU optimizer
144 let optimizer = AdvancedGpuOptimizer::new()
145 .with_adaptive_kernels(true)
146 .with_memory_prefetch(true)
147 .with_multi_gpu(false) // Single GPU for demo
148 .with_auto_tuning(true);
149
150 // Generate advanced-optimized matrix
151 println!("🔥 Generating advanced-optimized matrix...");
152 let start_time = Instant::now();
153 let matrix = optimizer.generate_advanced_optimized_matrix(
154 &gpu_context,
155 500, // rows
156 200, // cols
157 "normal", // distribution
158 )?;
159 let generation_time = start_time.elapsed();
160
161 println!(
162 " Generated {}x{} matrix in: {:?}",
163 matrix.nrows(),
164 matrix.ncols(),
165 generation_time
166 );
167 let matrix_mean = matrix.clone().mean();
168 let matrix_std = matrix.var(1.0).sqrt();
169 println!(
170 " Matrix stats: mean={:.3}, std={:.3}",
171 matrix_mean, matrix_std
172 );
173
174 // Benchmark performance
175 println!("📊 Running performance benchmarks...");
176 let datashapes = vec![(100, 50), (500, 200), (1000, 500)];
177 let benchmark_results =
178 optimizer.benchmark_performance(&gpu_context, "matrix_generation", &datashapes)?;
179
180 println!(" Benchmark Results:");
181 println!(
182 " Best Speedup: {:.2}x",
183 benchmark_results.best_speedup()
184 );
185 println!(
186 " Average Speedup: {:.2}x",
187 benchmark_results.average_speedup()
188 );
189 println!(
190 " Total Memory Usage: {:.1} MB",
191 benchmark_results.total_memory_usage()
192 );
193
194 Ok(())
195}More examples
examples/gpu_acceleration.rs (line 172)
138fn demonstrate_backend_comparison() -> Result<(), Box<dyn std::error::Error>> {
139 println!("⚡ GPU BACKEND COMPARISON");
140 println!("{}", "-".repeat(40));
141
142 let testsize = 50_000;
143 let features = 20;
144
145 println!("Comparing backends for {testsize} samples with {features} features:");
146
147 // Test different backends
148 let backends = vec![
149 ("CPU Fallback", GpuBackend::Cpu),
150 ("CUDA", GpuBackend::Cuda { device_id: 0 }),
151 (
152 "OpenCL",
153 GpuBackend::OpenCl {
154 platform_id: 0,
155 device_id: 0,
156 },
157 ),
158 ];
159
160 let mut results: HashMap<String, std::time::Duration> = HashMap::new();
161
162 for (name, backend) in backends {
163 println!("\nTesting {name}:");
164
165 let config = GpuConfig {
166 backend: backend.clone(),
167 threads_per_block: 256,
168 enable_double_precision: true,
169 ..Default::default()
170 };
171
172 match GpuContext::new(config) {
173 Ok(context) => {
174 if context.is_available() {
175 // Test classification generation
176 let start = Instant::now();
177 let dataset =
178 context.make_classification_gpu(testsize, features, 5, 2, 15, Some(42))?;
179 let duration = start.elapsed();
180
181 results.insert(name.to_string(), duration);
182
183 println!(
184 " ✅ Classification: {} samples in {:.2}ms",
185 dataset.n_samples(),
186 duration.as_millis()
187 );
188 println!(
189 " 📊 Throughput: {:.1} samples/s",
190 dataset.n_samples() as f64 / duration.as_secs_f64()
191 );
192 } else {
193 println!(" ❌ Backend not available");
194 }
195 }
196 Err(e) => {
197 println!(" ❌ Error: {e}");
198 }
199 }
200 }
201
202 // Calculate speedups
203 if let Some(cpu_time) = results.get("CPU Fallback") {
204 println!("\nSpeedup Analysis:");
205 for (backend, gpu_time) in &results {
206 if backend != "CPU Fallback" {
207 let speedup = cpu_time.as_secs_f64() / gpu_time.as_secs_f64();
208 println!(" {backend}: {speedup:.1}x faster than CPU");
209 }
210 }
211 }
212
213 println!();
214 Ok(())
215}
216
217#[allow(dead_code)]
218fn demonstrate_performance_benchmarks() -> Result<(), Box<dyn std::error::Error>> {
219 println!("📊 PERFORMANCE BENCHMARKS");
220 println!("{}", "-".repeat(40));
221
222 let config = get_optimal_gpu_config();
223 let benchmark = GpuBenchmark::new(config)?;
224
225 println!("Running data generation benchmarks...");
226 let data_results = benchmark.benchmark_data_generation()?;
227 data_results.print_results();
228
229 println!("\nRunning matrix operation benchmarks...");
230 let matrix_results = benchmark.benchmark_matrix_operations()?;
231 matrix_results.print_results();
232
233 // Compare with CPU baseline
234 println!("\nCPU vs GPU Comparison:");
235 demonstrate_cpu_gpu_comparison()?;
236
237 println!();
238 Ok(())
239}
240
241#[allow(dead_code)]
242fn demonstrate_cpu_gpu_comparison() -> Result<(), Box<dyn std::error::Error>> {
243 let dataset_sizes = vec![10_000, 50_000, 100_000];
244
245 println!(
246 "{:<12} {:<15} {:<15} {:<10}",
247 "Size", "CPU Time", "GPU Time", "Speedup"
248 );
249 println!("{}", "-".repeat(55));
250
251 for &size in &dataset_sizes {
252 // CPU benchmark
253 let cpu_start = Instant::now();
254 let _cpudataset = make_classification(size, 20, 5, 2, 15, Some(42))?;
255 let cpu_time = cpu_start.elapsed();
256
257 // GPU benchmark
258 let gpu_start = Instant::now();
259 let _gpudataset = make_classification_auto_gpu(size, 20, 5, 2, 15, Some(42))?;
260 let gpu_time = gpu_start.elapsed();
261
262 let speedup = cpu_time.as_secs_f64() / gpu_time.as_secs_f64();
263
264 println!(
265 "{:<12} {:<15} {:<15} {:<10.1}x",
266 size,
267 format!("{:.1}ms", cpu_time.as_millis()),
268 format!("{:.1}ms", gpu_time.as_millis()),
269 speedup
270 );
271 }
272
273 Ok(())
274}
275
276#[allow(dead_code)]
277fn demonstrate_memory_management() -> Result<(), Box<dyn std::error::Error>> {
278 println!("💾 GPU MEMORY MANAGEMENT");
279 println!("{}", "-".repeat(40));
280
281 // Configure memory-constrained GPU context
282 let memory_config = GpuMemoryConfig {
283 max_memory_mb: Some(512), // Limit to 512MB
284 pool_size_mb: 256, // 256MB pool
285 enable_coalescing: true, // Enable memory coalescing
286 use_unified_memory: false, // Don't use unified memory
287 };
288
289 let gpu_config = GpuConfig {
290 backend: get_optimal_gpu_config().backend,
291 memory: memory_config,
292 threads_per_block: 256,
293 ..Default::default()
294 };
295
296 println!("Memory Configuration:");
297 println!(
298 " Max Memory: {} MB",
299 gpu_config.memory.max_memory_mb.unwrap_or(0)
300 );
301 println!(" Pool Size: {} MB", gpu_config.memory.pool_size_mb);
302 println!(" Coalescing: {}", gpu_config.memory.enable_coalescing);
303 println!(" Unified Memory: {}", gpu_config.memory.use_unified_memory);
304
305 let context = GpuContext::new(gpu_config)?;
306 let device_info = context.device_info();
307
308 println!("\nDevice Memory Info:");
309 println!(" Total: {} MB", device_info.total_memory_mb);
310 println!(" Available: {} MB", device_info.available_memory_mb);
311 println!(
312 " Utilization: {:.1}%",
313 (device_info.total_memory_mb - device_info.available_memory_mb) as f64
314 / device_info.total_memory_mb as f64
315 * 100.0
316 );
317
318 // Test memory-efficient generation
319 println!("\nTesting memory-efficient dataset generation...");
320
321 let sizes = vec![10_000, 25_000, 50_000];
322 for &size in &sizes {
323 let start = Instant::now();
324
325 match context.make_regression_gpu(size, 50, 30, 0.1, Some(42)) {
326 Ok(dataset) => {
327 let duration = start.elapsed();
328 let memory_estimate = dataset.n_samples() * dataset.n_features() * 8; // 8 bytes per f64
329
330 println!(
331 " {} samples: {:.1}ms (~{:.1} MB)",
332 size,
333 duration.as_millis(),
334 memory_estimate as f64 / (1024.0 * 1024.0)
335 );
336 }
337 Err(e) => {
338 println!(" {size} samples: Failed - {e}");
339 }
340 }
341 }
342
343 println!();
344 Ok(())
345}Sourcepub fn device_info(&self) -> &GpuDeviceInfo
pub fn device_info(&self) -> &GpuDeviceInfo
Get device information
Examples found in repository?
examples/gpu_acceleration.rs (line 306)
277fn demonstrate_memory_management() -> Result<(), Box<dyn std::error::Error>> {
278 println!("💾 GPU MEMORY MANAGEMENT");
279 println!("{}", "-".repeat(40));
280
281 // Configure memory-constrained GPU context
282 let memory_config = GpuMemoryConfig {
283 max_memory_mb: Some(512), // Limit to 512MB
284 pool_size_mb: 256, // 256MB pool
285 enable_coalescing: true, // Enable memory coalescing
286 use_unified_memory: false, // Don't use unified memory
287 };
288
289 let gpu_config = GpuConfig {
290 backend: get_optimal_gpu_config().backend,
291 memory: memory_config,
292 threads_per_block: 256,
293 ..Default::default()
294 };
295
296 println!("Memory Configuration:");
297 println!(
298 " Max Memory: {} MB",
299 gpu_config.memory.max_memory_mb.unwrap_or(0)
300 );
301 println!(" Pool Size: {} MB", gpu_config.memory.pool_size_mb);
302 println!(" Coalescing: {}", gpu_config.memory.enable_coalescing);
303 println!(" Unified Memory: {}", gpu_config.memory.use_unified_memory);
304
305 let context = GpuContext::new(gpu_config)?;
306 let device_info = context.device_info();
307
308 println!("\nDevice Memory Info:");
309 println!(" Total: {} MB", device_info.total_memory_mb);
310 println!(" Available: {} MB", device_info.available_memory_mb);
311 println!(
312 " Utilization: {:.1}%",
313 (device_info.total_memory_mb - device_info.available_memory_mb) as f64
314 / device_info.total_memory_mb as f64
315 * 100.0
316 );
317
318 // Test memory-efficient generation
319 println!("\nTesting memory-efficient dataset generation...");
320
321 let sizes = vec![10_000, 25_000, 50_000];
322 for &size in &sizes {
323 let start = Instant::now();
324
325 match context.make_regression_gpu(size, 50, 30, 0.1, Some(42)) {
326 Ok(dataset) => {
327 let duration = start.elapsed();
328 let memory_estimate = dataset.n_samples() * dataset.n_features() * 8; // 8 bytes per f64
329
330 println!(
331 " {} samples: {:.1}ms (~{:.1} MB)",
332 size,
333 duration.as_millis(),
334 memory_estimate as f64 / (1024.0 * 1024.0)
335 );
336 }
337 Err(e) => {
338 println!(" {size} samples: Failed - {e}");
339 }
340 }
341 }
342
343 println!();
344 Ok(())
345}Sourcepub fn backend(&self) -> &GpuBackend
pub fn backend(&self) -> &GpuBackend
Get the backend type
Examples found in repository?
examples/advanced_showcase.rs (line 141)
130fn demonstrate_advanced_gpu_optimization() -> Result<(), Box<dyn std::error::Error>> {
131 println!("\n⚡ Advanced-GPU Optimization Demonstration");
132 println!("=====================================");
133
134 // Create GPU context (falls back to CPU if no GPU available)
135 println!("🔧 Initializing GPU context...");
136 let gpu_config = GpuConfig {
137 backend: GpuBackend::Cpu,
138 ..Default::default()
139 };
140 let gpu_context = GpuContext::new(gpu_config)?; // Using CPU backend for demo
141 println!(" Backend: {:?}", gpu_context.backend());
142
143 // Create advanced-GPU optimizer
144 let optimizer = AdvancedGpuOptimizer::new()
145 .with_adaptive_kernels(true)
146 .with_memory_prefetch(true)
147 .with_multi_gpu(false) // Single GPU for demo
148 .with_auto_tuning(true);
149
150 // Generate advanced-optimized matrix
151 println!("🔥 Generating advanced-optimized matrix...");
152 let start_time = Instant::now();
153 let matrix = optimizer.generate_advanced_optimized_matrix(
154 &gpu_context,
155 500, // rows
156 200, // cols
157 "normal", // distribution
158 )?;
159 let generation_time = start_time.elapsed();
160
161 println!(
162 " Generated {}x{} matrix in: {:?}",
163 matrix.nrows(),
164 matrix.ncols(),
165 generation_time
166 );
167 let matrix_mean = matrix.clone().mean();
168 let matrix_std = matrix.var(1.0).sqrt();
169 println!(
170 " Matrix stats: mean={:.3}, std={:.3}",
171 matrix_mean, matrix_std
172 );
173
174 // Benchmark performance
175 println!("📊 Running performance benchmarks...");
176 let datashapes = vec![(100, 50), (500, 200), (1000, 500)];
177 let benchmark_results =
178 optimizer.benchmark_performance(&gpu_context, "matrix_generation", &datashapes)?;
179
180 println!(" Benchmark Results:");
181 println!(
182 " Best Speedup: {:.2}x",
183 benchmark_results.best_speedup()
184 );
185 println!(
186 " Average Speedup: {:.2}x",
187 benchmark_results.average_speedup()
188 );
189 println!(
190 " Total Memory Usage: {:.1} MB",
191 benchmark_results.total_memory_usage()
192 );
193
194 Ok(())
195}Sourcepub fn is_available(&self) -> bool
pub fn is_available(&self) -> bool
Check if GPU is available and functional
Examples found in repository?
examples/gpu_acceleration.rs (line 174)
138fn demonstrate_backend_comparison() -> Result<(), Box<dyn std::error::Error>> {
139 println!("⚡ GPU BACKEND COMPARISON");
140 println!("{}", "-".repeat(40));
141
142 let testsize = 50_000;
143 let features = 20;
144
145 println!("Comparing backends for {testsize} samples with {features} features:");
146
147 // Test different backends
148 let backends = vec![
149 ("CPU Fallback", GpuBackend::Cpu),
150 ("CUDA", GpuBackend::Cuda { device_id: 0 }),
151 (
152 "OpenCL",
153 GpuBackend::OpenCl {
154 platform_id: 0,
155 device_id: 0,
156 },
157 ),
158 ];
159
160 let mut results: HashMap<String, std::time::Duration> = HashMap::new();
161
162 for (name, backend) in backends {
163 println!("\nTesting {name}:");
164
165 let config = GpuConfig {
166 backend: backend.clone(),
167 threads_per_block: 256,
168 enable_double_precision: true,
169 ..Default::default()
170 };
171
172 match GpuContext::new(config) {
173 Ok(context) => {
174 if context.is_available() {
175 // Test classification generation
176 let start = Instant::now();
177 let dataset =
178 context.make_classification_gpu(testsize, features, 5, 2, 15, Some(42))?;
179 let duration = start.elapsed();
180
181 results.insert(name.to_string(), duration);
182
183 println!(
184 " ✅ Classification: {} samples in {:.2}ms",
185 dataset.n_samples(),
186 duration.as_millis()
187 );
188 println!(
189 " 📊 Throughput: {:.1} samples/s",
190 dataset.n_samples() as f64 / duration.as_secs_f64()
191 );
192 } else {
193 println!(" ❌ Backend not available");
194 }
195 }
196 Err(e) => {
197 println!(" ❌ Error: {e}");
198 }
199 }
200 }
201
202 // Calculate speedups
203 if let Some(cpu_time) = results.get("CPU Fallback") {
204 println!("\nSpeedup Analysis:");
205 for (backend, gpu_time) in &results {
206 if backend != "CPU Fallback" {
207 let speedup = cpu_time.as_secs_f64() / gpu_time.as_secs_f64();
208 println!(" {backend}: {speedup:.1}x faster than CPU");
209 }
210 }
211 }
212
213 println!();
214 Ok(())
215}Sourcepub fn make_classification_gpu(
&self,
n_samples: usize,
n_features: usize,
n_classes: usize,
n_clusters_per_class: usize,
n_informative: usize,
random_state: Option<u64>,
) -> Result<Dataset>
pub fn make_classification_gpu( &self, n_samples: usize, n_features: usize, n_classes: usize, n_clusters_per_class: usize, n_informative: usize, random_state: Option<u64>, ) -> Result<Dataset>
Generate classification dataset on GPU
Examples found in repository?
examples/gpu_acceleration.rs (line 178)
138fn demonstrate_backend_comparison() -> Result<(), Box<dyn std::error::Error>> {
139 println!("⚡ GPU BACKEND COMPARISON");
140 println!("{}", "-".repeat(40));
141
142 let testsize = 50_000;
143 let features = 20;
144
145 println!("Comparing backends for {testsize} samples with {features} features:");
146
147 // Test different backends
148 let backends = vec![
149 ("CPU Fallback", GpuBackend::Cpu),
150 ("CUDA", GpuBackend::Cuda { device_id: 0 }),
151 (
152 "OpenCL",
153 GpuBackend::OpenCl {
154 platform_id: 0,
155 device_id: 0,
156 },
157 ),
158 ];
159
160 let mut results: HashMap<String, std::time::Duration> = HashMap::new();
161
162 for (name, backend) in backends {
163 println!("\nTesting {name}:");
164
165 let config = GpuConfig {
166 backend: backend.clone(),
167 threads_per_block: 256,
168 enable_double_precision: true,
169 ..Default::default()
170 };
171
172 match GpuContext::new(config) {
173 Ok(context) => {
174 if context.is_available() {
175 // Test classification generation
176 let start = Instant::now();
177 let dataset =
178 context.make_classification_gpu(testsize, features, 5, 2, 15, Some(42))?;
179 let duration = start.elapsed();
180
181 results.insert(name.to_string(), duration);
182
183 println!(
184 " ✅ Classification: {} samples in {:.2}ms",
185 dataset.n_samples(),
186 duration.as_millis()
187 );
188 println!(
189 " 📊 Throughput: {:.1} samples/s",
190 dataset.n_samples() as f64 / duration.as_secs_f64()
191 );
192 } else {
193 println!(" ❌ Backend not available");
194 }
195 }
196 Err(e) => {
197 println!(" ❌ Error: {e}");
198 }
199 }
200 }
201
202 // Calculate speedups
203 if let Some(cpu_time) = results.get("CPU Fallback") {
204 println!("\nSpeedup Analysis:");
205 for (backend, gpu_time) in &results {
206 if backend != "CPU Fallback" {
207 let speedup = cpu_time.as_secs_f64() / gpu_time.as_secs_f64();
208 println!(" {backend}: {speedup:.1}x faster than CPU");
209 }
210 }
211 }
212
213 println!();
214 Ok(())
215}Sourcepub fn make_regression_gpu(
&self,
n_samples: usize,
n_features: usize,
n_informative: usize,
noise: f64,
random_state: Option<u64>,
) -> Result<Dataset>
pub fn make_regression_gpu( &self, n_samples: usize, n_features: usize, n_informative: usize, noise: f64, random_state: Option<u64>, ) -> Result<Dataset>
Generate regression dataset on GPU
Examples found in repository?
examples/gpu_acceleration.rs (line 325)
277fn demonstrate_memory_management() -> Result<(), Box<dyn std::error::Error>> {
278 println!("💾 GPU MEMORY MANAGEMENT");
279 println!("{}", "-".repeat(40));
280
281 // Configure memory-constrained GPU context
282 let memory_config = GpuMemoryConfig {
283 max_memory_mb: Some(512), // Limit to 512MB
284 pool_size_mb: 256, // 256MB pool
285 enable_coalescing: true, // Enable memory coalescing
286 use_unified_memory: false, // Don't use unified memory
287 };
288
289 let gpu_config = GpuConfig {
290 backend: get_optimal_gpu_config().backend,
291 memory: memory_config,
292 threads_per_block: 256,
293 ..Default::default()
294 };
295
296 println!("Memory Configuration:");
297 println!(
298 " Max Memory: {} MB",
299 gpu_config.memory.max_memory_mb.unwrap_or(0)
300 );
301 println!(" Pool Size: {} MB", gpu_config.memory.pool_size_mb);
302 println!(" Coalescing: {}", gpu_config.memory.enable_coalescing);
303 println!(" Unified Memory: {}", gpu_config.memory.use_unified_memory);
304
305 let context = GpuContext::new(gpu_config)?;
306 let device_info = context.device_info();
307
308 println!("\nDevice Memory Info:");
309 println!(" Total: {} MB", device_info.total_memory_mb);
310 println!(" Available: {} MB", device_info.available_memory_mb);
311 println!(
312 " Utilization: {:.1}%",
313 (device_info.total_memory_mb - device_info.available_memory_mb) as f64
314 / device_info.total_memory_mb as f64
315 * 100.0
316 );
317
318 // Test memory-efficient generation
319 println!("\nTesting memory-efficient dataset generation...");
320
321 let sizes = vec![10_000, 25_000, 50_000];
322 for &size in &sizes {
323 let start = Instant::now();
324
325 match context.make_regression_gpu(size, 50, 30, 0.1, Some(42)) {
326 Ok(dataset) => {
327 let duration = start.elapsed();
328 let memory_estimate = dataset.n_samples() * dataset.n_features() * 8; // 8 bytes per f64
329
330 println!(
331 " {} samples: {:.1}ms (~{:.1} MB)",
332 size,
333 duration.as_millis(),
334 memory_estimate as f64 / (1024.0 * 1024.0)
335 );
336 }
337 Err(e) => {
338 println!(" {size} samples: Failed - {e}");
339 }
340 }
341 }
342
343 println!();
344 Ok(())
345}Sourcepub fn make_blobs_gpu(
&self,
n_samples: usize,
n_features: usize,
n_centers: usize,
cluster_std: f64,
random_state: Option<u64>,
) -> Result<Dataset>
pub fn make_blobs_gpu( &self, n_samples: usize, n_features: usize, n_centers: usize, cluster_std: f64, random_state: Option<u64>, ) -> Result<Dataset>
Generate clustering dataset (blobs) on GPU
Auto Trait Implementations§
impl Freeze for GpuContext
impl RefUnwindSafe for GpuContext
impl Send for GpuContext
impl Sync for GpuContext
impl Unpin for GpuContext
impl UnwindSafe for GpuContext
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§impl<T> Pointable for T
impl<T> Pointable for T
Source§impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
Source§fn to_subset(&self) -> Option<SS>
fn to_subset(&self) -> Option<SS>
The inverse inclusion map: attempts to construct
self from the equivalent element of its
superset. Read moreSource§fn is_in_subset(&self) -> bool
fn is_in_subset(&self) -> bool
Checks if
self is actually part of its subset T (and can be converted to it).Source§fn to_subset_unchecked(&self) -> SS
fn to_subset_unchecked(&self) -> SS
Use with care! Same as
self.to_subset but without any property checks. Always succeeds.Source§fn from_subset(element: &SS) -> SP
fn from_subset(element: &SS) -> SP
The inclusion map: converts
self to the equivalent element of its superset.