pub struct GridSearchCV<E> {
pub best_params_: HashMap<String, String>,
pub best_score_: f64,
pub best_estimator_: E,
/* private fields */
}Expand description
Grid search for hyperparameter tuning
Fields§
§best_params_: HashMap<String, String>Best parameters
best_score_: f64Best score
best_estimator_: EBest estimator
Implementations§
Source§impl<E> GridSearchCV<E>where
E: SklearnClassifier + Clone,
impl<E> GridSearchCV<E>where
E: SklearnClassifier + Clone,
Sourcepub fn new(
estimator: E,
param_grid: HashMap<String, Vec<String>>,
cv: usize,
) -> Self
pub fn new( estimator: E, param_grid: HashMap<String, Vec<String>>, cv: usize, ) -> Self
Create new grid search
Examples found in repository?
examples/sklearn_pipeline_demo.rs (lines 136-140)
16fn main() -> Result<()> {
17 println!("=== Scikit-learn Compatible Quantum ML Demo ===\n");
18
19 // Step 1: Create sklearn-style dataset
20 println!("1. Creating scikit-learn style dataset...");
21
22 let (X, y) = create_sklearn_dataset()?;
23 println!(" - Dataset shape: {:?}", X.dim());
24 println!(
25 " - Labels: {} classes",
26 y.iter()
27 .map(|&x| x as i32)
28 .collect::<std::collections::HashSet<_>>()
29 .len()
30 );
31 println!(
32 " - Feature range: [{:.3}, {:.3}]",
33 X.iter().fold(f64::INFINITY, |a, &b| a.min(b)),
34 X.iter().fold(f64::NEG_INFINITY, |a, &b| a.max(b))
35 );
36
37 // Step 2: Create sklearn-compatible quantum estimators
38 println!("\n2. Creating sklearn-compatible quantum estimators...");
39
40 // Quantum Support Vector Classifier
41 let qsvc = QuantumSVC::new();
42
43 // Quantum Multi-Layer Perceptron Classifier
44 let qmlp = QuantumMLPClassifier::new();
45
46 // Quantum K-Means Clustering
47 let mut qkmeans = QuantumKMeans::new(2); // n_clusters
48
49 println!(" - QuantumSVC: quantum kernel");
50 println!(" - QuantumMLP: multi-layer perceptron");
51 println!(" - QuantumKMeans: 2 clusters");
52
53 // Step 3: Create sklearn-style preprocessing pipeline
54 println!("\n3. Building sklearn-compatible preprocessing pipeline...");
55
56 let preprocessing_pipeline = Pipeline::new(vec![
57 ("scaler", Box::new(StandardScaler::new())),
58 (
59 "feature_selection",
60 Box::new(SelectKBest::new(
61 "quantum_mutual_info", // score_func
62 3, // k
63 )),
64 ),
65 (
66 "quantum_encoder",
67 Box::new(QuantumFeatureEncoder::new(
68 "angle", // encoding_type
69 "l2", // normalization
70 )),
71 ),
72 ])?;
73
74 // Step 4: Create complete quantum ML pipeline
75 println!("\n4. Creating complete quantum ML pipeline...");
76
77 let quantum_pipeline = Pipeline::new(vec![
78 ("preprocessing", Box::new(preprocessing_pipeline)),
79 ("classifier", Box::new(qsvc)),
80 ])?;
81
82 println!(" Pipeline steps:");
83 for (i, step_name) in quantum_pipeline.named_steps().iter().enumerate() {
84 println!(" {}. {}", i + 1, step_name);
85 }
86
87 // Step 5: Train-test split (sklearn style)
88 println!("\n5. Performing train-test split...");
89
90 let (X_train, X_test, y_train, y_test) = model_selection::train_test_split(
91 &X,
92 &y,
93 0.3, // test_size
94 Some(42), // random_state
95 )?;
96
97 println!(" - Training set: {:?}", X_train.dim());
98 println!(" - Test set: {:?}", X_test.dim());
99
100 // Step 6: Cross-validation with quantum models
101 println!("\n6. Performing cross-validation...");
102
103 let mut pipeline_clone = quantum_pipeline.clone();
104 let cv_scores = model_selection::cross_val_score(
105 &mut pipeline_clone,
106 &X_train,
107 &y_train,
108 5, // cv
109 )?;
110
111 println!(" Cross-validation scores: {cv_scores:?}");
112 println!(
113 " Mean CV accuracy: {:.3} (+/- {:.3})",
114 cv_scores.mean().unwrap(),
115 cv_scores.std(0.0) * 2.0
116 );
117
118 // Step 7: Hyperparameter grid search
119 println!("\n7. Hyperparameter optimization with GridSearchCV...");
120
121 let param_grid = HashMap::from([
122 (
123 "classifier__C".to_string(),
124 vec!["0.1".to_string(), "1.0".to_string(), "10.0".to_string()],
125 ),
126 (
127 "classifier__feature_map_depth".to_string(),
128 vec!["1".to_string(), "2".to_string(), "3".to_string()],
129 ),
130 (
131 "preprocessing__feature_selection__k".to_string(),
132 vec!["2".to_string(), "3".to_string(), "4".to_string()],
133 ),
134 ]);
135
136 let mut grid_search = model_selection::GridSearchCV::new(
137 quantum_pipeline, // estimator
138 param_grid,
139 3, // cv
140 );
141
142 grid_search.fit(&X_train, &y_train)?;
143
144 println!(" Best parameters: {:?}", grid_search.best_params_);
145 println!(
146 " Best cross-validation score: {:.3}",
147 grid_search.best_score_
148 );
149
150 // Step 8: Train best model and evaluate
151 println!("\n8. Training best model and evaluation...");
152
153 let best_model = grid_search.best_estimator_;
154 let y_pred = best_model.predict(&X_test)?;
155
156 // Calculate metrics using sklearn-style functions
157 let y_test_int = y_test.mapv(|x| x.round() as i32);
158 let accuracy = metrics::accuracy_score(&y_test_int, &y_pred);
159 let precision = metrics::precision_score(&y_test_int, &y_pred, "weighted"); // average
160 let recall = metrics::recall_score(&y_test_int, &y_pred, "weighted"); // average
161 let f1 = metrics::f1_score(&y_test_int, &y_pred, "weighted"); // average
162
163 println!(" Test Results:");
164 println!(" - Accuracy: {accuracy:.3}");
165 println!(" - Precision: {precision:.3}");
166 println!(" - Recall: {recall:.3}");
167 println!(" - F1-score: {f1:.3}");
168
169 // Step 9: Classification report
170 println!("\n9. Detailed classification report...");
171
172 let classification_report = metrics::classification_report(
173 &y_test_int,
174 &y_pred,
175 vec!["Class 0", "Class 1"], // target_names
176 3, // digits
177 );
178 println!("{classification_report}");
179
180 // Step 10: Feature importance analysis
181 println!("\n10. Feature importance analysis...");
182
183 if let Some(feature_importances) = best_model.feature_importances() {
184 println!(" Quantum Feature Importances:");
185 for (i, importance) in feature_importances.iter().enumerate() {
186 println!(" - Feature {i}: {importance:.4}");
187 }
188 }
189
190 // Step 11: Model comparison with classical sklearn models
191 println!("\n11. Comparing with classical sklearn models...");
192
193 let classical_models = vec![
194 (
195 "Logistic Regression",
196 Box::new(LogisticRegression::new()) as Box<dyn SklearnClassifier>,
197 ),
198 (
199 "Random Forest",
200 Box::new(RandomForestClassifier::new()) as Box<dyn SklearnClassifier>,
201 ),
202 ("SVM", Box::new(SVC::new()) as Box<dyn SklearnClassifier>),
203 ];
204
205 let mut comparison_results = Vec::new();
206
207 for (name, mut model) in classical_models {
208 model.fit(&X_train, Some(&y_train))?;
209 let y_pred_classical = model.predict(&X_test)?;
210 let classical_accuracy = metrics::accuracy_score(&y_test_int, &y_pred_classical);
211 comparison_results.push((name, classical_accuracy));
212 }
213
214 println!(" Model Comparison:");
215 println!(" - Quantum Pipeline: {accuracy:.3}");
216 for (name, classical_accuracy) in comparison_results {
217 println!(" - {name}: {classical_accuracy:.3}");
218 }
219
220 // Step 12: Clustering with quantum K-means
221 println!("\n12. Quantum clustering analysis...");
222
223 let cluster_labels = qkmeans.fit_predict(&X)?;
224 let silhouette_score = metrics::silhouette_score(&X, &cluster_labels, "euclidean"); // metric
225 let calinski_score = metrics::calinski_harabasz_score(&X, &cluster_labels);
226
227 println!(" Clustering Results:");
228 println!(" - Silhouette Score: {silhouette_score:.3}");
229 println!(" - Calinski-Harabasz Score: {calinski_score:.3}");
230 println!(
231 " - Unique clusters found: {}",
232 cluster_labels
233 .iter()
234 .collect::<std::collections::HashSet<_>>()
235 .len()
236 );
237
238 // Step 13: Model persistence (sklearn style)
239 println!("\n13. Model persistence (sklearn joblib style)...");
240
241 // Save model
242 best_model.save("quantum_sklearn_model.joblib")?;
243 println!(" - Model saved to: quantum_sklearn_model.joblib");
244
245 // Load model
246 let loaded_model = QuantumSVC::load("quantum_sklearn_model.joblib")?;
247 let test_subset = X_test.slice(s![..5, ..]).to_owned();
248 let y_pred_loaded = loaded_model.predict(&test_subset)?;
249 println!(" - Model loaded and tested on 5 samples");
250
251 // Step 14: Advanced sklearn utilities
252 println!("\n14. Advanced sklearn utilities...");
253
254 // Learning curves (commented out - function not available)
255 // let (train_sizes, train_scores, val_scores) = model_selection::learning_curve(...)?;
256 println!(" Learning Curve Analysis: (Mock results)");
257 let train_sizes = [0.1, 0.33, 0.55, 0.78, 1.0];
258 let train_scores = [0.65, 0.72, 0.78, 0.82, 0.85];
259 let val_scores = [0.62, 0.70, 0.76, 0.79, 0.81];
260
261 for (i, &size) in train_sizes.iter().enumerate() {
262 println!(
263 " - {:.0}% data: train={:.3}, val={:.3}",
264 size * 100.0,
265 train_scores[i],
266 val_scores[i]
267 );
268 }
269
270 // Validation curves (commented out - function not available)
271 // let (train_scores_val, test_scores_val) = model_selection::validation_curve(...)?;
272 println!(" Validation Curve (C parameter): (Mock results)");
273 let param_range = [0.1, 0.5, 1.0, 2.0, 5.0];
274 let train_scores_val = [0.70, 0.75, 0.80, 0.78, 0.75];
275 let test_scores_val = [0.68, 0.73, 0.78, 0.76, 0.72];
276
277 for (i, ¶m_value) in param_range.iter().enumerate() {
278 println!(
279 " - C={}: train={:.3}, test={:.3}",
280 param_value, train_scores_val[i], test_scores_val[i]
281 );
282 }
283
284 // Step 15: Quantum-specific sklearn extensions
285 println!("\n15. Quantum-specific sklearn extensions...");
286
287 // Quantum feature analysis
288 let quantum_feature_analysis = analyze_quantum_features(&best_model, &X_test)?;
289 println!(" Quantum Feature Analysis:");
290 println!(
291 " - Quantum advantage score: {:.3}",
292 quantum_feature_analysis.advantage_score
293 );
294 println!(
295 " - Feature entanglement: {:.3}",
296 quantum_feature_analysis.entanglement_measure
297 );
298 println!(
299 " - Circuit depth efficiency: {:.3}",
300 quantum_feature_analysis.circuit_efficiency
301 );
302
303 // Quantum model interpretation
304 let sample_row = X_test.row(0).to_owned();
305 let quantum_interpretation = interpret_quantum_model(&best_model, &sample_row)?;
306 println!(" Quantum Model Interpretation (sample 0):");
307 println!(
308 " - Quantum state fidelity: {:.3}",
309 quantum_interpretation.state_fidelity
310 );
311 println!(
312 " - Feature contributions: {:?}",
313 quantum_interpretation.feature_contributions
314 );
315
316 println!("\n=== Scikit-learn Integration Demo Complete ===");
317
318 Ok(())
319}Sourcepub fn fit(&mut self, X: &Array2<f64>, y: &Array1<f64>) -> Result<()>
pub fn fit(&mut self, X: &Array2<f64>, y: &Array1<f64>) -> Result<()>
Fit grid search
Examples found in repository?
examples/sklearn_pipeline_demo.rs (line 142)
16fn main() -> Result<()> {
17 println!("=== Scikit-learn Compatible Quantum ML Demo ===\n");
18
19 // Step 1: Create sklearn-style dataset
20 println!("1. Creating scikit-learn style dataset...");
21
22 let (X, y) = create_sklearn_dataset()?;
23 println!(" - Dataset shape: {:?}", X.dim());
24 println!(
25 " - Labels: {} classes",
26 y.iter()
27 .map(|&x| x as i32)
28 .collect::<std::collections::HashSet<_>>()
29 .len()
30 );
31 println!(
32 " - Feature range: [{:.3}, {:.3}]",
33 X.iter().fold(f64::INFINITY, |a, &b| a.min(b)),
34 X.iter().fold(f64::NEG_INFINITY, |a, &b| a.max(b))
35 );
36
37 // Step 2: Create sklearn-compatible quantum estimators
38 println!("\n2. Creating sklearn-compatible quantum estimators...");
39
40 // Quantum Support Vector Classifier
41 let qsvc = QuantumSVC::new();
42
43 // Quantum Multi-Layer Perceptron Classifier
44 let qmlp = QuantumMLPClassifier::new();
45
46 // Quantum K-Means Clustering
47 let mut qkmeans = QuantumKMeans::new(2); // n_clusters
48
49 println!(" - QuantumSVC: quantum kernel");
50 println!(" - QuantumMLP: multi-layer perceptron");
51 println!(" - QuantumKMeans: 2 clusters");
52
53 // Step 3: Create sklearn-style preprocessing pipeline
54 println!("\n3. Building sklearn-compatible preprocessing pipeline...");
55
56 let preprocessing_pipeline = Pipeline::new(vec![
57 ("scaler", Box::new(StandardScaler::new())),
58 (
59 "feature_selection",
60 Box::new(SelectKBest::new(
61 "quantum_mutual_info", // score_func
62 3, // k
63 )),
64 ),
65 (
66 "quantum_encoder",
67 Box::new(QuantumFeatureEncoder::new(
68 "angle", // encoding_type
69 "l2", // normalization
70 )),
71 ),
72 ])?;
73
74 // Step 4: Create complete quantum ML pipeline
75 println!("\n4. Creating complete quantum ML pipeline...");
76
77 let quantum_pipeline = Pipeline::new(vec![
78 ("preprocessing", Box::new(preprocessing_pipeline)),
79 ("classifier", Box::new(qsvc)),
80 ])?;
81
82 println!(" Pipeline steps:");
83 for (i, step_name) in quantum_pipeline.named_steps().iter().enumerate() {
84 println!(" {}. {}", i + 1, step_name);
85 }
86
87 // Step 5: Train-test split (sklearn style)
88 println!("\n5. Performing train-test split...");
89
90 let (X_train, X_test, y_train, y_test) = model_selection::train_test_split(
91 &X,
92 &y,
93 0.3, // test_size
94 Some(42), // random_state
95 )?;
96
97 println!(" - Training set: {:?}", X_train.dim());
98 println!(" - Test set: {:?}", X_test.dim());
99
100 // Step 6: Cross-validation with quantum models
101 println!("\n6. Performing cross-validation...");
102
103 let mut pipeline_clone = quantum_pipeline.clone();
104 let cv_scores = model_selection::cross_val_score(
105 &mut pipeline_clone,
106 &X_train,
107 &y_train,
108 5, // cv
109 )?;
110
111 println!(" Cross-validation scores: {cv_scores:?}");
112 println!(
113 " Mean CV accuracy: {:.3} (+/- {:.3})",
114 cv_scores.mean().unwrap(),
115 cv_scores.std(0.0) * 2.0
116 );
117
118 // Step 7: Hyperparameter grid search
119 println!("\n7. Hyperparameter optimization with GridSearchCV...");
120
121 let param_grid = HashMap::from([
122 (
123 "classifier__C".to_string(),
124 vec!["0.1".to_string(), "1.0".to_string(), "10.0".to_string()],
125 ),
126 (
127 "classifier__feature_map_depth".to_string(),
128 vec!["1".to_string(), "2".to_string(), "3".to_string()],
129 ),
130 (
131 "preprocessing__feature_selection__k".to_string(),
132 vec!["2".to_string(), "3".to_string(), "4".to_string()],
133 ),
134 ]);
135
136 let mut grid_search = model_selection::GridSearchCV::new(
137 quantum_pipeline, // estimator
138 param_grid,
139 3, // cv
140 );
141
142 grid_search.fit(&X_train, &y_train)?;
143
144 println!(" Best parameters: {:?}", grid_search.best_params_);
145 println!(
146 " Best cross-validation score: {:.3}",
147 grid_search.best_score_
148 );
149
150 // Step 8: Train best model and evaluate
151 println!("\n8. Training best model and evaluation...");
152
153 let best_model = grid_search.best_estimator_;
154 let y_pred = best_model.predict(&X_test)?;
155
156 // Calculate metrics using sklearn-style functions
157 let y_test_int = y_test.mapv(|x| x.round() as i32);
158 let accuracy = metrics::accuracy_score(&y_test_int, &y_pred);
159 let precision = metrics::precision_score(&y_test_int, &y_pred, "weighted"); // average
160 let recall = metrics::recall_score(&y_test_int, &y_pred, "weighted"); // average
161 let f1 = metrics::f1_score(&y_test_int, &y_pred, "weighted"); // average
162
163 println!(" Test Results:");
164 println!(" - Accuracy: {accuracy:.3}");
165 println!(" - Precision: {precision:.3}");
166 println!(" - Recall: {recall:.3}");
167 println!(" - F1-score: {f1:.3}");
168
169 // Step 9: Classification report
170 println!("\n9. Detailed classification report...");
171
172 let classification_report = metrics::classification_report(
173 &y_test_int,
174 &y_pred,
175 vec!["Class 0", "Class 1"], // target_names
176 3, // digits
177 );
178 println!("{classification_report}");
179
180 // Step 10: Feature importance analysis
181 println!("\n10. Feature importance analysis...");
182
183 if let Some(feature_importances) = best_model.feature_importances() {
184 println!(" Quantum Feature Importances:");
185 for (i, importance) in feature_importances.iter().enumerate() {
186 println!(" - Feature {i}: {importance:.4}");
187 }
188 }
189
190 // Step 11: Model comparison with classical sklearn models
191 println!("\n11. Comparing with classical sklearn models...");
192
193 let classical_models = vec![
194 (
195 "Logistic Regression",
196 Box::new(LogisticRegression::new()) as Box<dyn SklearnClassifier>,
197 ),
198 (
199 "Random Forest",
200 Box::new(RandomForestClassifier::new()) as Box<dyn SklearnClassifier>,
201 ),
202 ("SVM", Box::new(SVC::new()) as Box<dyn SklearnClassifier>),
203 ];
204
205 let mut comparison_results = Vec::new();
206
207 for (name, mut model) in classical_models {
208 model.fit(&X_train, Some(&y_train))?;
209 let y_pred_classical = model.predict(&X_test)?;
210 let classical_accuracy = metrics::accuracy_score(&y_test_int, &y_pred_classical);
211 comparison_results.push((name, classical_accuracy));
212 }
213
214 println!(" Model Comparison:");
215 println!(" - Quantum Pipeline: {accuracy:.3}");
216 for (name, classical_accuracy) in comparison_results {
217 println!(" - {name}: {classical_accuracy:.3}");
218 }
219
220 // Step 12: Clustering with quantum K-means
221 println!("\n12. Quantum clustering analysis...");
222
223 let cluster_labels = qkmeans.fit_predict(&X)?;
224 let silhouette_score = metrics::silhouette_score(&X, &cluster_labels, "euclidean"); // metric
225 let calinski_score = metrics::calinski_harabasz_score(&X, &cluster_labels);
226
227 println!(" Clustering Results:");
228 println!(" - Silhouette Score: {silhouette_score:.3}");
229 println!(" - Calinski-Harabasz Score: {calinski_score:.3}");
230 println!(
231 " - Unique clusters found: {}",
232 cluster_labels
233 .iter()
234 .collect::<std::collections::HashSet<_>>()
235 .len()
236 );
237
238 // Step 13: Model persistence (sklearn style)
239 println!("\n13. Model persistence (sklearn joblib style)...");
240
241 // Save model
242 best_model.save("quantum_sklearn_model.joblib")?;
243 println!(" - Model saved to: quantum_sklearn_model.joblib");
244
245 // Load model
246 let loaded_model = QuantumSVC::load("quantum_sklearn_model.joblib")?;
247 let test_subset = X_test.slice(s![..5, ..]).to_owned();
248 let y_pred_loaded = loaded_model.predict(&test_subset)?;
249 println!(" - Model loaded and tested on 5 samples");
250
251 // Step 14: Advanced sklearn utilities
252 println!("\n14. Advanced sklearn utilities...");
253
254 // Learning curves (commented out - function not available)
255 // let (train_sizes, train_scores, val_scores) = model_selection::learning_curve(...)?;
256 println!(" Learning Curve Analysis: (Mock results)");
257 let train_sizes = [0.1, 0.33, 0.55, 0.78, 1.0];
258 let train_scores = [0.65, 0.72, 0.78, 0.82, 0.85];
259 let val_scores = [0.62, 0.70, 0.76, 0.79, 0.81];
260
261 for (i, &size) in train_sizes.iter().enumerate() {
262 println!(
263 " - {:.0}% data: train={:.3}, val={:.3}",
264 size * 100.0,
265 train_scores[i],
266 val_scores[i]
267 );
268 }
269
270 // Validation curves (commented out - function not available)
271 // let (train_scores_val, test_scores_val) = model_selection::validation_curve(...)?;
272 println!(" Validation Curve (C parameter): (Mock results)");
273 let param_range = [0.1, 0.5, 1.0, 2.0, 5.0];
274 let train_scores_val = [0.70, 0.75, 0.80, 0.78, 0.75];
275 let test_scores_val = [0.68, 0.73, 0.78, 0.76, 0.72];
276
277 for (i, ¶m_value) in param_range.iter().enumerate() {
278 println!(
279 " - C={}: train={:.3}, test={:.3}",
280 param_value, train_scores_val[i], test_scores_val[i]
281 );
282 }
283
284 // Step 15: Quantum-specific sklearn extensions
285 println!("\n15. Quantum-specific sklearn extensions...");
286
287 // Quantum feature analysis
288 let quantum_feature_analysis = analyze_quantum_features(&best_model, &X_test)?;
289 println!(" Quantum Feature Analysis:");
290 println!(
291 " - Quantum advantage score: {:.3}",
292 quantum_feature_analysis.advantage_score
293 );
294 println!(
295 " - Feature entanglement: {:.3}",
296 quantum_feature_analysis.entanglement_measure
297 );
298 println!(
299 " - Circuit depth efficiency: {:.3}",
300 quantum_feature_analysis.circuit_efficiency
301 );
302
303 // Quantum model interpretation
304 let sample_row = X_test.row(0).to_owned();
305 let quantum_interpretation = interpret_quantum_model(&best_model, &sample_row)?;
306 println!(" Quantum Model Interpretation (sample 0):");
307 println!(
308 " - Quantum state fidelity: {:.3}",
309 quantum_interpretation.state_fidelity
310 );
311 println!(
312 " - Feature contributions: {:?}",
313 quantum_interpretation.feature_contributions
314 );
315
316 println!("\n=== Scikit-learn Integration Demo Complete ===");
317
318 Ok(())
319}Sourcepub fn best_params(&self) -> &HashMap<String, String>
pub fn best_params(&self) -> &HashMap<String, String>
Get best parameters
Sourcepub fn best_score(&self) -> f64
pub fn best_score(&self) -> f64
Get best score
Auto Trait Implementations§
impl<E> Freeze for GridSearchCV<E>where
E: Freeze,
impl<E> RefUnwindSafe for GridSearchCV<E>where
E: RefUnwindSafe,
impl<E> Send for GridSearchCV<E>where
E: Send,
impl<E> Sync for GridSearchCV<E>where
E: Sync,
impl<E> Unpin for GridSearchCV<E>where
E: Unpin,
impl<E> UnwindSafe for GridSearchCV<E>where
E: UnwindSafe,
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§impl<T> Pointable for T
impl<T> Pointable for T
Source§impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
Source§fn to_subset(&self) -> Option<SS>
fn to_subset(&self) -> Option<SS>
The inverse inclusion map: attempts to construct
self from the equivalent element of its
superset. Read moreSource§fn is_in_subset(&self) -> bool
fn is_in_subset(&self) -> bool
Checks if
self is actually part of its subset T (and can be converted to it).Source§fn to_subset_unchecked(&self) -> SS
fn to_subset_unchecked(&self) -> SS
Use with care! Same as
self.to_subset but without any property checks. Always succeeds.Source§fn from_subset(element: &SS) -> SP
fn from_subset(element: &SS) -> SP
The inclusion map: converts
self to the equivalent element of its superset.