pub struct GridSearchCV<E> {
pub best_params_: HashMap<String, String>,
pub best_score_: f64,
pub best_estimator_: E,
/* private fields */
}Expand description
Grid search for hyperparameter tuning
Fields§
§best_params_: HashMap<String, String>Best parameters
best_score_: f64Best score
best_estimator_: EBest estimator
Implementations§
Source§impl<E> GridSearchCV<E>where
E: SklearnClassifier + Clone,
impl<E> GridSearchCV<E>where
E: SklearnClassifier + Clone,
Sourcepub fn new(
estimator: E,
param_grid: HashMap<String, Vec<String>>,
cv: usize,
) -> Self
pub fn new( estimator: E, param_grid: HashMap<String, Vec<String>>, cv: usize, ) -> Self
Create new grid search
Examples found in repository?
examples/sklearn_pipeline_demo.rs (lines 137-141)
17fn main() -> Result<()> {
18 println!("=== Scikit-learn Compatible Quantum ML Demo ===\n");
19
20 // Step 1: Create sklearn-style dataset
21 println!("1. Creating scikit-learn style dataset...");
22
23 let (X, y) = create_sklearn_dataset()?;
24 println!(" - Dataset shape: {:?}", X.dim());
25 println!(
26 " - Labels: {} classes",
27 y.iter()
28 .map(|&x| x as i32)
29 .collect::<std::collections::HashSet<_>>()
30 .len()
31 );
32 println!(
33 " - Feature range: [{:.3}, {:.3}]",
34 X.iter().fold(f64::INFINITY, |a, &b| a.min(b)),
35 X.iter().fold(f64::NEG_INFINITY, |a, &b| a.max(b))
36 );
37
38 // Step 2: Create sklearn-compatible quantum estimators
39 println!("\n2. Creating sklearn-compatible quantum estimators...");
40
41 // Quantum Support Vector Classifier
42 let qsvc = QuantumSVC::new();
43
44 // Quantum Multi-Layer Perceptron Classifier
45 let qmlp = QuantumMLPClassifier::new();
46
47 // Quantum K-Means Clustering
48 let mut qkmeans = QuantumKMeans::new(2); // n_clusters
49
50 println!(" - QuantumSVC: quantum kernel");
51 println!(" - QuantumMLP: multi-layer perceptron");
52 println!(" - QuantumKMeans: 2 clusters");
53
54 // Step 3: Create sklearn-style preprocessing pipeline
55 println!("\n3. Building sklearn-compatible preprocessing pipeline...");
56
57 let preprocessing_pipeline = Pipeline::new(vec![
58 ("scaler", Box::new(StandardScaler::new())),
59 (
60 "feature_selection",
61 Box::new(SelectKBest::new(
62 "quantum_mutual_info", // score_func
63 3, // k
64 )),
65 ),
66 (
67 "quantum_encoder",
68 Box::new(QuantumFeatureEncoder::new(
69 "angle", // encoding_type
70 "l2", // normalization
71 )),
72 ),
73 ])?;
74
75 // Step 4: Create complete quantum ML pipeline
76 println!("\n4. Creating complete quantum ML pipeline...");
77
78 let quantum_pipeline = Pipeline::new(vec![
79 ("preprocessing", Box::new(preprocessing_pipeline)),
80 ("classifier", Box::new(qsvc)),
81 ])?;
82
83 println!(" Pipeline steps:");
84 for (i, step_name) in quantum_pipeline.named_steps().iter().enumerate() {
85 println!(" {}. {}", i + 1, step_name);
86 }
87
88 // Step 5: Train-test split (sklearn style)
89 println!("\n5. Performing train-test split...");
90
91 let (X_train, X_test, y_train, y_test) = model_selection::train_test_split(
92 &X,
93 &y,
94 0.3, // test_size
95 Some(42), // random_state
96 )?;
97
98 println!(" - Training set: {:?}", X_train.dim());
99 println!(" - Test set: {:?}", X_test.dim());
100
101 // Step 6: Cross-validation with quantum models
102 println!("\n6. Performing cross-validation...");
103
104 let mut pipeline_clone = quantum_pipeline.clone();
105 let cv_scores = model_selection::cross_val_score(
106 &mut pipeline_clone,
107 &X_train,
108 &y_train,
109 5, // cv
110 )?;
111
112 println!(" Cross-validation scores: {cv_scores:?}");
113 println!(
114 " Mean CV accuracy: {:.3} (+/- {:.3})",
115 cv_scores.mean().unwrap(),
116 cv_scores.std(0.0) * 2.0
117 );
118
119 // Step 7: Hyperparameter grid search
120 println!("\n7. Hyperparameter optimization with GridSearchCV...");
121
122 let param_grid = HashMap::from([
123 (
124 "classifier__C".to_string(),
125 vec!["0.1".to_string(), "1.0".to_string(), "10.0".to_string()],
126 ),
127 (
128 "classifier__feature_map_depth".to_string(),
129 vec!["1".to_string(), "2".to_string(), "3".to_string()],
130 ),
131 (
132 "preprocessing__feature_selection__k".to_string(),
133 vec!["2".to_string(), "3".to_string(), "4".to_string()],
134 ),
135 ]);
136
137 let mut grid_search = model_selection::GridSearchCV::new(
138 quantum_pipeline, // estimator
139 param_grid,
140 3, // cv
141 );
142
143 grid_search.fit(&X_train, &y_train)?;
144
145 println!(" Best parameters: {:?}", grid_search.best_params_);
146 println!(
147 " Best cross-validation score: {:.3}",
148 grid_search.best_score_
149 );
150
151 // Step 8: Train best model and evaluate
152 println!("\n8. Training best model and evaluation...");
153
154 let best_model = grid_search.best_estimator_;
155 let y_pred = best_model.predict(&X_test)?;
156
157 // Calculate metrics using sklearn-style functions
158 let y_test_int = y_test.mapv(|x| x.round() as i32);
159 let accuracy = metrics::accuracy_score(&y_test_int, &y_pred);
160 let precision = metrics::precision_score(&y_test_int, &y_pred, "weighted"); // average
161 let recall = metrics::recall_score(&y_test_int, &y_pred, "weighted"); // average
162 let f1 = metrics::f1_score(&y_test_int, &y_pred, "weighted"); // average
163
164 println!(" Test Results:");
165 println!(" - Accuracy: {accuracy:.3}");
166 println!(" - Precision: {precision:.3}");
167 println!(" - Recall: {recall:.3}");
168 println!(" - F1-score: {f1:.3}");
169
170 // Step 9: Classification report
171 println!("\n9. Detailed classification report...");
172
173 let classification_report = metrics::classification_report(
174 &y_test_int,
175 &y_pred,
176 vec!["Class 0", "Class 1"], // target_names
177 3, // digits
178 );
179 println!("{classification_report}");
180
181 // Step 10: Feature importance analysis
182 println!("\n10. Feature importance analysis...");
183
184 if let Some(feature_importances) = best_model.feature_importances() {
185 println!(" Quantum Feature Importances:");
186 for (i, importance) in feature_importances.iter().enumerate() {
187 println!(" - Feature {i}: {importance:.4}");
188 }
189 }
190
191 // Step 11: Model comparison with classical sklearn models
192 println!("\n11. Comparing with classical sklearn models...");
193
194 let classical_models = vec![
195 (
196 "Logistic Regression",
197 Box::new(LogisticRegression::new()) as Box<dyn SklearnClassifier>,
198 ),
199 (
200 "Random Forest",
201 Box::new(RandomForestClassifier::new()) as Box<dyn SklearnClassifier>,
202 ),
203 ("SVM", Box::new(SVC::new()) as Box<dyn SklearnClassifier>),
204 ];
205
206 let mut comparison_results = Vec::new();
207
208 for (name, mut model) in classical_models {
209 model.fit(&X_train, Some(&y_train))?;
210 let y_pred_classical = model.predict(&X_test)?;
211 let classical_accuracy = metrics::accuracy_score(&y_test_int, &y_pred_classical);
212 comparison_results.push((name, classical_accuracy));
213 }
214
215 println!(" Model Comparison:");
216 println!(" - Quantum Pipeline: {accuracy:.3}");
217 for (name, classical_accuracy) in comparison_results {
218 println!(" - {name}: {classical_accuracy:.3}");
219 }
220
221 // Step 12: Clustering with quantum K-means
222 println!("\n12. Quantum clustering analysis...");
223
224 let cluster_labels = qkmeans.fit_predict(&X)?;
225 let silhouette_score = metrics::silhouette_score(&X, &cluster_labels, "euclidean"); // metric
226 let calinski_score = metrics::calinski_harabasz_score(&X, &cluster_labels);
227
228 println!(" Clustering Results:");
229 println!(" - Silhouette Score: {silhouette_score:.3}");
230 println!(" - Calinski-Harabasz Score: {calinski_score:.3}");
231 println!(
232 " - Unique clusters found: {}",
233 cluster_labels
234 .iter()
235 .collect::<std::collections::HashSet<_>>()
236 .len()
237 );
238
239 // Step 13: Model persistence (sklearn style)
240 println!("\n13. Model persistence (sklearn joblib style)...");
241
242 // Save model
243 best_model.save("quantum_sklearn_model.joblib")?;
244 println!(" - Model saved to: quantum_sklearn_model.joblib");
245
246 // Load model
247 let loaded_model = QuantumSVC::load("quantum_sklearn_model.joblib")?;
248 let test_subset = X_test.slice(s![..5, ..]).to_owned();
249 let y_pred_loaded = loaded_model.predict(&test_subset)?;
250 println!(" - Model loaded and tested on 5 samples");
251
252 // Step 14: Advanced sklearn utilities
253 println!("\n14. Advanced sklearn utilities...");
254
255 // Learning curves (commented out - function not available)
256 // let (train_sizes, train_scores, val_scores) = model_selection::learning_curve(...)?;
257 println!(" Learning Curve Analysis: (Mock results)");
258 let train_sizes = [0.1, 0.33, 0.55, 0.78, 1.0];
259 let train_scores = [0.65, 0.72, 0.78, 0.82, 0.85];
260 let val_scores = [0.62, 0.70, 0.76, 0.79, 0.81];
261
262 for (i, &size) in train_sizes.iter().enumerate() {
263 println!(
264 " - {:.0}% data: train={:.3}, val={:.3}",
265 size * 100.0,
266 train_scores[i],
267 val_scores[i]
268 );
269 }
270
271 // Validation curves (commented out - function not available)
272 // let (train_scores_val, test_scores_val) = model_selection::validation_curve(...)?;
273 println!(" Validation Curve (C parameter): (Mock results)");
274 let param_range = [0.1, 0.5, 1.0, 2.0, 5.0];
275 let train_scores_val = [0.70, 0.75, 0.80, 0.78, 0.75];
276 let test_scores_val = [0.68, 0.73, 0.78, 0.76, 0.72];
277
278 for (i, ¶m_value) in param_range.iter().enumerate() {
279 println!(
280 " - C={}: train={:.3}, test={:.3}",
281 param_value, train_scores_val[i], test_scores_val[i]
282 );
283 }
284
285 // Step 15: Quantum-specific sklearn extensions
286 println!("\n15. Quantum-specific sklearn extensions...");
287
288 // Quantum feature analysis
289 let quantum_feature_analysis = analyze_quantum_features(&best_model, &X_test)?;
290 println!(" Quantum Feature Analysis:");
291 println!(
292 " - Quantum advantage score: {:.3}",
293 quantum_feature_analysis.advantage_score
294 );
295 println!(
296 " - Feature entanglement: {:.3}",
297 quantum_feature_analysis.entanglement_measure
298 );
299 println!(
300 " - Circuit depth efficiency: {:.3}",
301 quantum_feature_analysis.circuit_efficiency
302 );
303
304 // Quantum model interpretation
305 let sample_row = X_test.row(0).to_owned();
306 let quantum_interpretation = interpret_quantum_model(&best_model, &sample_row)?;
307 println!(" Quantum Model Interpretation (sample 0):");
308 println!(
309 " - Quantum state fidelity: {:.3}",
310 quantum_interpretation.state_fidelity
311 );
312 println!(
313 " - Feature contributions: {:?}",
314 quantum_interpretation.feature_contributions
315 );
316
317 println!("\n=== Scikit-learn Integration Demo Complete ===");
318
319 Ok(())
320}Sourcepub fn fit(&mut self, X: &Array2<f64>, y: &Array1<f64>) -> Result<()>
pub fn fit(&mut self, X: &Array2<f64>, y: &Array1<f64>) -> Result<()>
Fit grid search
Examples found in repository?
examples/sklearn_pipeline_demo.rs (line 143)
17fn main() -> Result<()> {
18 println!("=== Scikit-learn Compatible Quantum ML Demo ===\n");
19
20 // Step 1: Create sklearn-style dataset
21 println!("1. Creating scikit-learn style dataset...");
22
23 let (X, y) = create_sklearn_dataset()?;
24 println!(" - Dataset shape: {:?}", X.dim());
25 println!(
26 " - Labels: {} classes",
27 y.iter()
28 .map(|&x| x as i32)
29 .collect::<std::collections::HashSet<_>>()
30 .len()
31 );
32 println!(
33 " - Feature range: [{:.3}, {:.3}]",
34 X.iter().fold(f64::INFINITY, |a, &b| a.min(b)),
35 X.iter().fold(f64::NEG_INFINITY, |a, &b| a.max(b))
36 );
37
38 // Step 2: Create sklearn-compatible quantum estimators
39 println!("\n2. Creating sklearn-compatible quantum estimators...");
40
41 // Quantum Support Vector Classifier
42 let qsvc = QuantumSVC::new();
43
44 // Quantum Multi-Layer Perceptron Classifier
45 let qmlp = QuantumMLPClassifier::new();
46
47 // Quantum K-Means Clustering
48 let mut qkmeans = QuantumKMeans::new(2); // n_clusters
49
50 println!(" - QuantumSVC: quantum kernel");
51 println!(" - QuantumMLP: multi-layer perceptron");
52 println!(" - QuantumKMeans: 2 clusters");
53
54 // Step 3: Create sklearn-style preprocessing pipeline
55 println!("\n3. Building sklearn-compatible preprocessing pipeline...");
56
57 let preprocessing_pipeline = Pipeline::new(vec![
58 ("scaler", Box::new(StandardScaler::new())),
59 (
60 "feature_selection",
61 Box::new(SelectKBest::new(
62 "quantum_mutual_info", // score_func
63 3, // k
64 )),
65 ),
66 (
67 "quantum_encoder",
68 Box::new(QuantumFeatureEncoder::new(
69 "angle", // encoding_type
70 "l2", // normalization
71 )),
72 ),
73 ])?;
74
75 // Step 4: Create complete quantum ML pipeline
76 println!("\n4. Creating complete quantum ML pipeline...");
77
78 let quantum_pipeline = Pipeline::new(vec![
79 ("preprocessing", Box::new(preprocessing_pipeline)),
80 ("classifier", Box::new(qsvc)),
81 ])?;
82
83 println!(" Pipeline steps:");
84 for (i, step_name) in quantum_pipeline.named_steps().iter().enumerate() {
85 println!(" {}. {}", i + 1, step_name);
86 }
87
88 // Step 5: Train-test split (sklearn style)
89 println!("\n5. Performing train-test split...");
90
91 let (X_train, X_test, y_train, y_test) = model_selection::train_test_split(
92 &X,
93 &y,
94 0.3, // test_size
95 Some(42), // random_state
96 )?;
97
98 println!(" - Training set: {:?}", X_train.dim());
99 println!(" - Test set: {:?}", X_test.dim());
100
101 // Step 6: Cross-validation with quantum models
102 println!("\n6. Performing cross-validation...");
103
104 let mut pipeline_clone = quantum_pipeline.clone();
105 let cv_scores = model_selection::cross_val_score(
106 &mut pipeline_clone,
107 &X_train,
108 &y_train,
109 5, // cv
110 )?;
111
112 println!(" Cross-validation scores: {cv_scores:?}");
113 println!(
114 " Mean CV accuracy: {:.3} (+/- {:.3})",
115 cv_scores.mean().unwrap(),
116 cv_scores.std(0.0) * 2.0
117 );
118
119 // Step 7: Hyperparameter grid search
120 println!("\n7. Hyperparameter optimization with GridSearchCV...");
121
122 let param_grid = HashMap::from([
123 (
124 "classifier__C".to_string(),
125 vec!["0.1".to_string(), "1.0".to_string(), "10.0".to_string()],
126 ),
127 (
128 "classifier__feature_map_depth".to_string(),
129 vec!["1".to_string(), "2".to_string(), "3".to_string()],
130 ),
131 (
132 "preprocessing__feature_selection__k".to_string(),
133 vec!["2".to_string(), "3".to_string(), "4".to_string()],
134 ),
135 ]);
136
137 let mut grid_search = model_selection::GridSearchCV::new(
138 quantum_pipeline, // estimator
139 param_grid,
140 3, // cv
141 );
142
143 grid_search.fit(&X_train, &y_train)?;
144
145 println!(" Best parameters: {:?}", grid_search.best_params_);
146 println!(
147 " Best cross-validation score: {:.3}",
148 grid_search.best_score_
149 );
150
151 // Step 8: Train best model and evaluate
152 println!("\n8. Training best model and evaluation...");
153
154 let best_model = grid_search.best_estimator_;
155 let y_pred = best_model.predict(&X_test)?;
156
157 // Calculate metrics using sklearn-style functions
158 let y_test_int = y_test.mapv(|x| x.round() as i32);
159 let accuracy = metrics::accuracy_score(&y_test_int, &y_pred);
160 let precision = metrics::precision_score(&y_test_int, &y_pred, "weighted"); // average
161 let recall = metrics::recall_score(&y_test_int, &y_pred, "weighted"); // average
162 let f1 = metrics::f1_score(&y_test_int, &y_pred, "weighted"); // average
163
164 println!(" Test Results:");
165 println!(" - Accuracy: {accuracy:.3}");
166 println!(" - Precision: {precision:.3}");
167 println!(" - Recall: {recall:.3}");
168 println!(" - F1-score: {f1:.3}");
169
170 // Step 9: Classification report
171 println!("\n9. Detailed classification report...");
172
173 let classification_report = metrics::classification_report(
174 &y_test_int,
175 &y_pred,
176 vec!["Class 0", "Class 1"], // target_names
177 3, // digits
178 );
179 println!("{classification_report}");
180
181 // Step 10: Feature importance analysis
182 println!("\n10. Feature importance analysis...");
183
184 if let Some(feature_importances) = best_model.feature_importances() {
185 println!(" Quantum Feature Importances:");
186 for (i, importance) in feature_importances.iter().enumerate() {
187 println!(" - Feature {i}: {importance:.4}");
188 }
189 }
190
191 // Step 11: Model comparison with classical sklearn models
192 println!("\n11. Comparing with classical sklearn models...");
193
194 let classical_models = vec![
195 (
196 "Logistic Regression",
197 Box::new(LogisticRegression::new()) as Box<dyn SklearnClassifier>,
198 ),
199 (
200 "Random Forest",
201 Box::new(RandomForestClassifier::new()) as Box<dyn SklearnClassifier>,
202 ),
203 ("SVM", Box::new(SVC::new()) as Box<dyn SklearnClassifier>),
204 ];
205
206 let mut comparison_results = Vec::new();
207
208 for (name, mut model) in classical_models {
209 model.fit(&X_train, Some(&y_train))?;
210 let y_pred_classical = model.predict(&X_test)?;
211 let classical_accuracy = metrics::accuracy_score(&y_test_int, &y_pred_classical);
212 comparison_results.push((name, classical_accuracy));
213 }
214
215 println!(" Model Comparison:");
216 println!(" - Quantum Pipeline: {accuracy:.3}");
217 for (name, classical_accuracy) in comparison_results {
218 println!(" - {name}: {classical_accuracy:.3}");
219 }
220
221 // Step 12: Clustering with quantum K-means
222 println!("\n12. Quantum clustering analysis...");
223
224 let cluster_labels = qkmeans.fit_predict(&X)?;
225 let silhouette_score = metrics::silhouette_score(&X, &cluster_labels, "euclidean"); // metric
226 let calinski_score = metrics::calinski_harabasz_score(&X, &cluster_labels);
227
228 println!(" Clustering Results:");
229 println!(" - Silhouette Score: {silhouette_score:.3}");
230 println!(" - Calinski-Harabasz Score: {calinski_score:.3}");
231 println!(
232 " - Unique clusters found: {}",
233 cluster_labels
234 .iter()
235 .collect::<std::collections::HashSet<_>>()
236 .len()
237 );
238
239 // Step 13: Model persistence (sklearn style)
240 println!("\n13. Model persistence (sklearn joblib style)...");
241
242 // Save model
243 best_model.save("quantum_sklearn_model.joblib")?;
244 println!(" - Model saved to: quantum_sklearn_model.joblib");
245
246 // Load model
247 let loaded_model = QuantumSVC::load("quantum_sklearn_model.joblib")?;
248 let test_subset = X_test.slice(s![..5, ..]).to_owned();
249 let y_pred_loaded = loaded_model.predict(&test_subset)?;
250 println!(" - Model loaded and tested on 5 samples");
251
252 // Step 14: Advanced sklearn utilities
253 println!("\n14. Advanced sklearn utilities...");
254
255 // Learning curves (commented out - function not available)
256 // let (train_sizes, train_scores, val_scores) = model_selection::learning_curve(...)?;
257 println!(" Learning Curve Analysis: (Mock results)");
258 let train_sizes = [0.1, 0.33, 0.55, 0.78, 1.0];
259 let train_scores = [0.65, 0.72, 0.78, 0.82, 0.85];
260 let val_scores = [0.62, 0.70, 0.76, 0.79, 0.81];
261
262 for (i, &size) in train_sizes.iter().enumerate() {
263 println!(
264 " - {:.0}% data: train={:.3}, val={:.3}",
265 size * 100.0,
266 train_scores[i],
267 val_scores[i]
268 );
269 }
270
271 // Validation curves (commented out - function not available)
272 // let (train_scores_val, test_scores_val) = model_selection::validation_curve(...)?;
273 println!(" Validation Curve (C parameter): (Mock results)");
274 let param_range = [0.1, 0.5, 1.0, 2.0, 5.0];
275 let train_scores_val = [0.70, 0.75, 0.80, 0.78, 0.75];
276 let test_scores_val = [0.68, 0.73, 0.78, 0.76, 0.72];
277
278 for (i, ¶m_value) in param_range.iter().enumerate() {
279 println!(
280 " - C={}: train={:.3}, test={:.3}",
281 param_value, train_scores_val[i], test_scores_val[i]
282 );
283 }
284
285 // Step 15: Quantum-specific sklearn extensions
286 println!("\n15. Quantum-specific sklearn extensions...");
287
288 // Quantum feature analysis
289 let quantum_feature_analysis = analyze_quantum_features(&best_model, &X_test)?;
290 println!(" Quantum Feature Analysis:");
291 println!(
292 " - Quantum advantage score: {:.3}",
293 quantum_feature_analysis.advantage_score
294 );
295 println!(
296 " - Feature entanglement: {:.3}",
297 quantum_feature_analysis.entanglement_measure
298 );
299 println!(
300 " - Circuit depth efficiency: {:.3}",
301 quantum_feature_analysis.circuit_efficiency
302 );
303
304 // Quantum model interpretation
305 let sample_row = X_test.row(0).to_owned();
306 let quantum_interpretation = interpret_quantum_model(&best_model, &sample_row)?;
307 println!(" Quantum Model Interpretation (sample 0):");
308 println!(
309 " - Quantum state fidelity: {:.3}",
310 quantum_interpretation.state_fidelity
311 );
312 println!(
313 " - Feature contributions: {:?}",
314 quantum_interpretation.feature_contributions
315 );
316
317 println!("\n=== Scikit-learn Integration Demo Complete ===");
318
319 Ok(())
320}Sourcepub fn best_params(&self) -> &HashMap<String, String>
pub fn best_params(&self) -> &HashMap<String, String>
Get best parameters
Sourcepub fn best_score(&self) -> f64
pub fn best_score(&self) -> f64
Get best score
Auto Trait Implementations§
impl<E> Freeze for GridSearchCV<E>where
E: Freeze,
impl<E> RefUnwindSafe for GridSearchCV<E>where
E: RefUnwindSafe,
impl<E> Send for GridSearchCV<E>where
E: Send,
impl<E> Sync for GridSearchCV<E>where
E: Sync,
impl<E> Unpin for GridSearchCV<E>where
E: Unpin,
impl<E> UnwindSafe for GridSearchCV<E>where
E: UnwindSafe,
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§impl<T> Pointable for T
impl<T> Pointable for T
Source§impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
Source§fn to_subset(&self) -> Option<SS>
fn to_subset(&self) -> Option<SS>
The inverse inclusion map: attempts to construct
self from the equivalent element of its
superset. Read moreSource§fn is_in_subset(&self) -> bool
fn is_in_subset(&self) -> bool
Checks if
self is actually part of its subset T (and can be converted to it).Source§fn to_subset_unchecked(&self) -> SS
fn to_subset_unchecked(&self) -> SS
Use with care! Same as
self.to_subset but without any property checks. Always succeeds.Source§fn from_subset(element: &SS) -> SP
fn from_subset(element: &SS) -> SP
The inclusion map: converts
self to the equivalent element of its superset.