oxirs_core/query/
optimizer.rs

1//! AI-powered query optimization with learned cost models
2//!
3//! This module implements advanced query optimization using machine learning
4//! techniques to improve query performance based on historical patterns.
5
6#![allow(dead_code)]
7
8use crate::indexing::IndexStats;
9use crate::model::Variable;
10use crate::query::algebra::{
11    AlgebraTriplePattern, GraphPattern, Query as AlgebraQuery, QueryForm, TermPattern,
12};
13use crate::query::plan::{ExecutionPlan, QueryPlanner};
14use crate::OxirsError;
15use std::collections::{HashMap, VecDeque};
16use std::sync::{Arc, RwLock};
17use std::time::{Duration, Instant};
18
19/// Cost model for query optimization
20#[derive(Debug, Clone)]
21pub struct CostModel {
22    /// Historical query execution times
23    execution_history: Arc<RwLock<QueryHistory>>,
24    /// Learned parameters for cost estimation
25    learned_parameters: Arc<RwLock<LearnedParameters>>,
26    /// Index statistics
27    index_stats: Arc<IndexStats>,
28}
29
30/// Historical query execution data
31#[derive(Debug, Default)]
32struct QueryHistory {
33    /// Recent query patterns and their execution times
34    patterns: VecDeque<(QueryPattern, ExecutionMetrics)>,
35    /// Maximum history size
36    max_size: usize,
37}
38
39/// Learned parameters from query history
40#[derive(Debug, Default)]
41struct LearnedParameters {
42    /// Cost per triple scan by predicate
43    scan_costs: HashMap<String, f64>,
44    /// Join selectivity by pattern
45    join_selectivities: HashMap<JoinPattern, f64>,
46    /// Filter selectivity by expression type
47    filter_selectivities: HashMap<String, f64>,
48}
49
50/// Query pattern for learning
51#[derive(Debug, Clone, Hash, Eq, PartialEq)]
52struct QueryPattern {
53    /// Number of triple patterns
54    num_patterns: usize,
55    /// Predicates used
56    predicates: Vec<String>,
57    /// Join types
58    join_types: Vec<JoinType>,
59    /// Filter presence
60    has_filter: bool,
61}
62
63/// Join pattern for selectivity estimation
64#[derive(Debug, Clone, Hash, Eq, PartialEq)]
65struct JoinPattern {
66    /// Variables involved in join
67    num_vars: usize,
68    /// Types of terms being joined
69    term_types: Vec<String>,
70}
71
72/// Types of joins
73#[derive(Debug, Clone, Hash, Eq, PartialEq)]
74enum JoinType {
75    SubjectSubject,
76    SubjectObject,
77    ObjectObject,
78    PredicatePredicate,
79}
80
81/// Execution metrics for a query
82#[derive(Debug, Clone)]
83struct ExecutionMetrics {
84    /// Total execution time
85    execution_time: Duration,
86    /// Number of results returned
87    result_count: usize,
88    /// Memory usage
89    memory_used: usize,
90    /// CPU utilization
91    cpu_percent: f32,
92}
93
94/// AI-powered query optimizer
95pub struct AIQueryOptimizer {
96    /// Base query planner
97    base_planner: QueryPlanner,
98    /// Cost model for optimization
99    cost_model: CostModel,
100    /// Query cache for predictive caching
101    query_cache: Arc<RwLock<QueryCache>>,
102    /// Hardware capabilities
103    hardware_info: HardwareInfo,
104}
105
106/// Predictive query cache
107#[derive(Debug, Default)]
108struct QueryCache {
109    /// Cached query results
110    cache: HashMap<String, CachedResult>,
111    /// Access patterns for prediction
112    access_patterns: VecDeque<AccessPattern>,
113    /// Maximum cache size
114    max_size: usize,
115}
116
117/// Cached query result
118#[derive(Debug, Clone)]
119struct CachedResult {
120    /// The cached data
121    data: Vec<u8>,
122    /// When it was cached
123    cached_at: Instant,
124    /// How many times accessed
125    access_count: usize,
126    /// Last access time
127    last_accessed: Instant,
128}
129
130/// Query access pattern
131#[derive(Debug, Clone)]
132struct AccessPattern {
133    /// Query hash
134    query_hash: String,
135    /// Access time
136    accessed_at: Instant,
137    /// User or session ID
138    session_id: String,
139}
140
141/// Hardware information for adaptive optimization
142#[derive(Debug, Clone)]
143struct HardwareInfo {
144    /// Number of CPU cores
145    cpu_cores: usize,
146    /// Available memory in bytes
147    memory_bytes: usize,
148    /// CPU architecture features
149    cpu_features: CpuFeatures,
150    /// GPU availability
151    gpu_available: bool,
152}
153
154/// CPU features for optimization
155#[derive(Debug, Clone)]
156struct CpuFeatures {
157    /// SIMD support
158    has_simd: bool,
159    /// AVX2 support
160    has_avx2: bool,
161    /// Cache line size
162    cache_line_size: usize,
163}
164
165impl AIQueryOptimizer {
166    /// Create a new AI-powered query optimizer
167    pub fn new(index_stats: Arc<IndexStats>) -> Self {
168        Self {
169            base_planner: QueryPlanner::new(),
170            cost_model: CostModel::new(index_stats),
171            query_cache: Arc::new(RwLock::new(QueryCache::new())),
172            hardware_info: HardwareInfo::detect(),
173        }
174    }
175
176    /// Optimize a query using AI techniques
177    pub fn optimize_query(&self, query: &AlgebraQuery) -> Result<OptimizedPlan, OxirsError> {
178        // Extract query pattern for learning
179        let pattern = self.extract_query_pattern(query)?;
180
181        // Check cache for similar queries
182        if let Some(cached) = self.check_predictive_cache(&pattern) {
183            return Ok(cached);
184        }
185
186        // Generate multiple candidate plans
187        let candidates = self.generate_candidate_plans(query)?;
188
189        // Estimate costs using learned model
190        let mut best_plan = None;
191        let mut best_cost = f64::MAX;
192
193        for candidate in candidates {
194            let cost = self.estimate_cost(&candidate, &pattern)?;
195            if cost < best_cost {
196                best_cost = cost;
197                best_plan = Some(candidate);
198            }
199        }
200
201        let plan = best_plan
202            .ok_or_else(|| OxirsError::Query("No valid execution plan found".to_string()))?;
203
204        // Apply hardware-specific optimizations
205        let optimized = self.apply_hardware_optimizations(plan)?;
206
207        // Update learning model
208        self.update_learning_model(&pattern, &optimized);
209
210        Ok(optimized)
211    }
212
213    /// Extract pattern from query for learning
214    fn extract_query_pattern(&self, query: &AlgebraQuery) -> Result<QueryPattern, OxirsError> {
215        match &query.form {
216            QueryForm::Select { where_clause, .. } => {
217                let (num_patterns, predicates, join_types) =
218                    self.analyze_graph_pattern(where_clause)?;
219
220                Ok(QueryPattern {
221                    num_patterns,
222                    predicates,
223                    join_types,
224                    has_filter: self.has_filter(where_clause),
225                })
226            }
227            _ => Err(OxirsError::Query("Unsupported query form".to_string())),
228        }
229    }
230
231    /// Analyze graph pattern for optimization
232    fn analyze_graph_pattern(
233        &self,
234        pattern: &GraphPattern,
235    ) -> Result<(usize, Vec<String>, Vec<JoinType>), OxirsError> {
236        match pattern {
237            GraphPattern::Bgp(patterns) => {
238                let num_patterns = patterns.len();
239                let mut predicates = Vec::new();
240                let mut join_types = Vec::new();
241
242                // Extract predicates
243                for triple in patterns {
244                    if let TermPattern::NamedNode(pred) = &triple.predicate {
245                        predicates.push(pred.as_str().to_string());
246                    }
247                }
248
249                // Analyze join types between patterns
250                for i in 0..patterns.len() {
251                    for j in (i + 1)..patterns.len() {
252                        if let Some(join_type) = self.get_join_type(&patterns[i], &patterns[j]) {
253                            join_types.push(join_type);
254                        }
255                    }
256                }
257
258                Ok((num_patterns, predicates, join_types))
259            }
260            _ => Ok((0, Vec::new(), Vec::new())),
261        }
262    }
263
264    /// Determine join type between triple patterns
265    fn get_join_type(
266        &self,
267        left: &AlgebraTriplePattern,
268        right: &AlgebraTriplePattern,
269    ) -> Option<JoinType> {
270        // Check if subjects match
271        if self.patterns_match(&left.subject, &right.subject) {
272            return Some(JoinType::SubjectSubject);
273        }
274
275        // Check subject-object join
276        if self.patterns_match(&left.subject, &right.object) {
277            return Some(JoinType::SubjectObject);
278        }
279
280        // Check object-object join
281        if self.patterns_match(&left.object, &right.object) {
282            return Some(JoinType::ObjectObject);
283        }
284
285        // Check predicate join (rare)
286        if self.patterns_match(&left.predicate, &right.predicate) {
287            return Some(JoinType::PredicatePredicate);
288        }
289
290        None
291    }
292
293    /// Check if two term patterns match (share a variable)
294    fn patterns_match(&self, left: &TermPattern, right: &TermPattern) -> bool {
295        match (left, right) {
296            (TermPattern::Variable(v1), TermPattern::Variable(v2)) => v1 == v2,
297            _ => false,
298        }
299    }
300
301    /// Check if pattern has filters
302    #[allow(clippy::only_used_in_recursion)]
303    fn has_filter(&self, pattern: &GraphPattern) -> bool {
304        match pattern {
305            GraphPattern::Filter { .. } => true,
306            GraphPattern::Bgp(_) => false,
307            GraphPattern::Union(left, right) => self.has_filter(left) || self.has_filter(right),
308            _ => false,
309        }
310    }
311
312    /// Generate candidate execution plans
313    fn generate_candidate_plans(
314        &self,
315        query: &AlgebraQuery,
316    ) -> Result<Vec<ExecutionPlan>, OxirsError> {
317        let mut candidates = Vec::new();
318
319        // Basic plan from base planner
320        let basic_plan = self.base_planner.plan_query(query)?;
321        candidates.push(basic_plan.clone());
322
323        // Generate join order variations
324        if let QueryForm::Select {
325            where_clause: GraphPattern::Bgp(patterns),
326            ..
327        } = &query.form
328        {
329            // Try different join orders
330            let join_orders = self.generate_join_orders(patterns);
331            for order in join_orders {
332                if let Ok(plan) = self.create_plan_with_order(patterns, &order) {
333                    candidates.push(plan);
334                }
335            }
336        }
337
338        // Add index-based variations
339        candidates.extend(self.generate_index_plans(query)?);
340
341        Ok(candidates)
342    }
343
344    /// Generate different join orders for optimization
345    fn generate_join_orders(&self, patterns: &[AlgebraTriplePattern]) -> Vec<Vec<usize>> {
346        let mut orders = Vec::new();
347
348        // Original order
349        orders.push((0..patterns.len()).collect());
350
351        // Most selective first (based on statistics)
352        let mut selective_order: Vec<usize> = (0..patterns.len()).collect();
353        selective_order.sort_by_key(|&i| self.estimate_selectivity(&patterns[i]));
354        orders.push(selective_order);
355
356        // Limit to reasonable number of variations
357        orders.truncate(5);
358        orders
359    }
360
361    /// Estimate selectivity of a triple pattern  
362    fn estimate_selectivity(&self, pattern: &AlgebraTriplePattern) -> i64 {
363        // Lower score = more selective (better to execute first)
364        let mut score = 0;
365
366        // Concrete terms are more selective
367        if !matches!(pattern.subject, TermPattern::Variable(_)) {
368            score -= 1000;
369        }
370        if !matches!(pattern.predicate, TermPattern::Variable(_)) {
371            score -= 100;
372        }
373        if !matches!(pattern.object, TermPattern::Variable(_)) {
374            score -= 1000;
375        }
376
377        score
378    }
379
380    /// Create execution plan with specific join order
381    fn create_plan_with_order(
382        &self,
383        patterns: &[AlgebraTriplePattern],
384        order: &[usize],
385    ) -> Result<ExecutionPlan, OxirsError> {
386        if order.is_empty() {
387            return Err(OxirsError::Query("Empty join order".to_string()));
388        }
389
390        let mut plan = ExecutionPlan::TripleScan {
391            pattern: crate::query::plan::convert_algebra_triple_pattern(&patterns[order[0]]),
392        };
393
394        for &idx in &order[1..] {
395            let right_plan = ExecutionPlan::TripleScan {
396                pattern: crate::query::plan::convert_algebra_triple_pattern(&patterns[idx]),
397            };
398
399            plan = ExecutionPlan::HashJoin {
400                left: Box::new(plan),
401                right: Box::new(right_plan),
402                join_vars: Vec::new(), // Would compute actual join vars
403            };
404        }
405
406        Ok(plan)
407    }
408
409    /// Generate index-based execution plans
410    fn generate_index_plans(
411        &self,
412        _query: &AlgebraQuery,
413    ) -> Result<Vec<ExecutionPlan>, OxirsError> {
414        // Would generate plans that leverage specific indexes
415        Ok(Vec::new())
416    }
417
418    /// Estimate cost of execution plan
419    fn estimate_cost(
420        &self,
421        plan: &ExecutionPlan,
422        pattern: &QueryPattern,
423    ) -> Result<f64, OxirsError> {
424        let params = self
425            .cost_model
426            .learned_parameters
427            .read()
428            .map_err(|e| OxirsError::Query(format!("Failed to read parameters: {e}")))?;
429
430        let base_cost = self.estimate_plan_cost(plan, &params)?;
431
432        // Adjust based on pattern history
433        let history_factor = self.get_history_factor(pattern);
434
435        Ok(base_cost * history_factor)
436    }
437
438    /// Estimate base cost of a plan
439    #[allow(clippy::only_used_in_recursion)]
440    fn estimate_plan_cost(
441        &self,
442        plan: &ExecutionPlan,
443        params: &LearnedParameters,
444    ) -> Result<f64, OxirsError> {
445        match plan {
446            ExecutionPlan::TripleScan { pattern } => {
447                // Base scan cost
448                let mut cost = 100.0;
449
450                // Adjust based on predicate selectivity
451                if let Some(crate::model::pattern::PredicatePattern::NamedNode(pred)) =
452                    &pattern.predicate
453                {
454                    if let Some(&pred_cost) = params.scan_costs.get(pred.as_str()) {
455                        cost *= pred_cost;
456                    }
457                }
458
459                Ok(cost)
460            }
461            ExecutionPlan::HashJoin { left, right, .. } => {
462                let left_cost = self.estimate_plan_cost(left, params)?;
463                let right_cost = self.estimate_plan_cost(right, params)?;
464
465                // Join cost depends on input sizes
466                Ok(left_cost + right_cost + (left_cost * right_cost * 0.01))
467            }
468            ExecutionPlan::Filter { input, .. } => {
469                let input_cost = self.estimate_plan_cost(input, params)?;
470                // Filters typically reduce result size
471                Ok(input_cost * 0.5)
472            }
473            _ => Ok(1000.0), // Default cost
474        }
475    }
476
477    /// Get historical performance factor
478    fn get_history_factor(&self, pattern: &QueryPattern) -> f64 {
479        // Check if we've seen similar patterns before
480        if let Ok(history) = self.cost_model.execution_history.read() {
481            for (hist_pattern, metrics) in history.patterns.iter() {
482                if self.patterns_similar(pattern, hist_pattern) {
483                    // Adjust based on historical performance
484                    return if metrics.execution_time.as_millis() < 100 {
485                        0.8 // Performed well historically
486                    } else {
487                        1.2 // Performed poorly
488                    };
489                }
490            }
491        }
492        1.0 // No history
493    }
494
495    /// Check if patterns are similar
496    fn patterns_similar(&self, a: &QueryPattern, b: &QueryPattern) -> bool {
497        a.num_patterns == b.num_patterns
498            && a.has_filter == b.has_filter
499            && a.predicates.len() == b.predicates.len()
500    }
501
502    /// Check predictive cache
503    fn check_predictive_cache(&self, _pattern: &QueryPattern) -> Option<OptimizedPlan> {
504        // Would check cache for similar queries
505        None
506    }
507
508    /// Apply hardware-specific optimizations
509    fn apply_hardware_optimizations(
510        &self,
511        plan: ExecutionPlan,
512    ) -> Result<OptimizedPlan, OxirsError> {
513        let mut optimized = OptimizedPlan {
514            base_plan: plan,
515            parallelism_level: 1,
516            use_simd: false,
517            use_gpu: false,
518            memory_budget: 0,
519        };
520
521        // Set parallelism based on CPU cores
522        optimized.parallelism_level = self.calculate_optimal_parallelism();
523
524        // Enable SIMD if available
525        optimized.use_simd = self.hardware_info.cpu_features.has_simd;
526
527        // Consider GPU for large operations
528        optimized.use_gpu =
529            self.hardware_info.gpu_available && self.should_use_gpu(&optimized.base_plan);
530
531        // Set memory budget
532        optimized.memory_budget = self.calculate_memory_budget();
533
534        Ok(optimized)
535    }
536
537    /// Calculate optimal parallelism level
538    fn calculate_optimal_parallelism(&self) -> usize {
539        // Use 75% of cores to leave room for system
540        (self.hardware_info.cpu_cores as f32 * 0.75) as usize
541    }
542
543    /// Determine if GPU should be used
544    fn should_use_gpu(&self, _plan: &ExecutionPlan) -> bool {
545        // Would analyze plan complexity and data size
546        false // Placeholder
547    }
548
549    /// Calculate memory budget for query
550    fn calculate_memory_budget(&self) -> usize {
551        // Use 50% of available memory
552        self.hardware_info.memory_bytes / 2
553    }
554
555    /// Update learning model with execution results
556    fn update_learning_model(&self, pattern: &QueryPattern, _plan: &OptimizedPlan) {
557        // Record pattern for future learning
558        if let Ok(mut history) = self.cost_model.execution_history.write() {
559            let metrics = ExecutionMetrics {
560                execution_time: Duration::from_millis(50), // Would get actual time
561                result_count: 100,                         // Would get actual count
562                memory_used: 1024 * 1024,                  // Would measure actual usage
563                cpu_percent: 25.0,                         // Would measure actual CPU
564            };
565
566            history.add_execution(pattern.clone(), metrics);
567        }
568    }
569}
570
571/// Optimized execution plan with hardware hints
572#[derive(Debug)]
573pub struct OptimizedPlan {
574    /// Base execution plan
575    pub base_plan: ExecutionPlan,
576    /// Parallelism level to use
577    pub parallelism_level: usize,
578    /// Whether to use SIMD instructions
579    pub use_simd: bool,
580    /// Whether to use GPU acceleration
581    pub use_gpu: bool,
582    /// Memory budget in bytes
583    pub memory_budget: usize,
584}
585
586impl CostModel {
587    fn new(index_stats: Arc<IndexStats>) -> Self {
588        Self {
589            execution_history: Arc::new(RwLock::new(QueryHistory::new())),
590            learned_parameters: Arc::new(RwLock::new(LearnedParameters::default())),
591            index_stats,
592        }
593    }
594}
595
596impl QueryHistory {
597    fn new() -> Self {
598        Self {
599            patterns: VecDeque::new(),
600            max_size: 10000,
601        }
602    }
603
604    fn add_execution(&mut self, pattern: QueryPattern, metrics: ExecutionMetrics) {
605        self.patterns.push_back((pattern, metrics));
606
607        // Keep history bounded
608        while self.patterns.len() > self.max_size {
609            self.patterns.pop_front();
610        }
611    }
612}
613
614impl QueryCache {
615    fn new() -> Self {
616        Self {
617            cache: HashMap::new(),
618            access_patterns: VecDeque::new(),
619            max_size: 1000,
620        }
621    }
622}
623
624impl HardwareInfo {
625    fn detect() -> Self {
626        Self {
627            cpu_cores: std::thread::available_parallelism()
628                .map(|p| p.get())
629                .unwrap_or(1),
630            memory_bytes: 8 * 1024 * 1024 * 1024, // 8GB default
631            cpu_features: CpuFeatures {
632                has_simd: cfg!(target_feature = "sse2"),
633                has_avx2: cfg!(target_feature = "avx2"),
634                cache_line_size: 64,
635            },
636            gpu_available: false, // Would detect actual GPU
637        }
638    }
639}
640
641/// Multi-query optimizer for batch processing
642pub struct MultiQueryOptimizer {
643    /// Single query optimizer
644    single_optimizer: AIQueryOptimizer,
645    /// Shared subexpression detection
646    subexpression_cache: Arc<RwLock<HashMap<String, ExecutionPlan>>>,
647}
648
649impl MultiQueryOptimizer {
650    /// Create new multi-query optimizer
651    pub fn new(index_stats: Arc<IndexStats>) -> Self {
652        Self {
653            single_optimizer: AIQueryOptimizer::new(index_stats),
654            subexpression_cache: Arc::new(RwLock::new(HashMap::new())),
655        }
656    }
657
658    /// Optimize multiple queries together
659    pub fn optimize_batch(
660        &self,
661        queries: &[AlgebraQuery],
662    ) -> Result<Vec<OptimizedPlan>, OxirsError> {
663        // Detect common subexpressions
664        let common_subs = self.detect_common_subexpressions(queries)?;
665
666        // Create shared execution plans
667        let mut optimized_plans = Vec::new();
668
669        for query in queries {
670            let mut plan = self.single_optimizer.optimize_query(query)?;
671
672            // Replace common subexpressions with shared plans
673            plan = self.reuse_common_subexpressions(plan, &common_subs)?;
674
675            optimized_plans.push(plan);
676        }
677
678        Ok(optimized_plans)
679    }
680
681    /// Detect common subexpressions across queries
682    fn detect_common_subexpressions(
683        &self,
684        queries: &[AlgebraQuery],
685    ) -> Result<HashMap<String, ExecutionPlan>, OxirsError> {
686        let mut common_subs = HashMap::new();
687
688        // Extract patterns from all queries
689        let mut pattern_counts = HashMap::new();
690
691        for query in queries {
692            self.count_patterns(query, &mut pattern_counts)?;
693        }
694
695        // Find patterns that appear multiple times
696        for (pattern_key, count) in pattern_counts {
697            if count > 1 {
698                // Create shared plan for this pattern
699                // (Simplified - would create actual plan)
700                common_subs.insert(
701                    pattern_key,
702                    ExecutionPlan::TripleScan {
703                        pattern: crate::model::pattern::TriplePattern::new(
704                            Some(crate::model::pattern::SubjectPattern::Variable(
705                                Variable::new("?s").unwrap(),
706                            )),
707                            Some(crate::model::pattern::PredicatePattern::Variable(
708                                Variable::new("?p").unwrap(),
709                            )),
710                            Some(crate::model::pattern::ObjectPattern::Variable(
711                                Variable::new("?o").unwrap(),
712                            )),
713                        ),
714                    },
715                );
716            }
717        }
718
719        Ok(common_subs)
720    }
721
722    /// Count pattern occurrences
723    fn count_patterns(
724        &self,
725        query: &AlgebraQuery,
726        counts: &mut HashMap<String, usize>,
727    ) -> Result<(), OxirsError> {
728        if let QueryForm::Select { where_clause, .. } = &query.form {
729            self.count_graph_patterns(where_clause, counts)?;
730        }
731        Ok(())
732    }
733
734    /// Count patterns in graph pattern
735    fn count_graph_patterns(
736        &self,
737        pattern: &GraphPattern,
738        counts: &mut HashMap<String, usize>,
739    ) -> Result<(), OxirsError> {
740        if let GraphPattern::Bgp(patterns) = pattern {
741            for triple in patterns {
742                let key = format!("{triple}"); // Simplified
743                *counts.entry(key).or_insert(0) += 1;
744            }
745        }
746        Ok(())
747    }
748
749    /// Reuse common subexpressions in plan
750    fn reuse_common_subexpressions(
751        &self,
752        plan: OptimizedPlan,
753        _common: &HashMap<String, ExecutionPlan>,
754    ) -> Result<OptimizedPlan, OxirsError> {
755        // Would traverse plan and replace common parts
756        Ok(plan)
757    }
758}
759
760#[cfg(test)]
761mod tests {
762    use super::*;
763
764    #[test]
765    fn test_ai_optimizer_creation() {
766        let stats = Arc::new(IndexStats::new());
767        let optimizer = AIQueryOptimizer::new(stats);
768
769        assert!(optimizer.hardware_info.cpu_cores > 0);
770    }
771
772    #[test]
773    fn test_cost_model() {
774        let stats = Arc::new(IndexStats::new());
775        let model = CostModel::new(stats);
776
777        let history = model.execution_history.read().unwrap();
778        assert_eq!(history.patterns.len(), 0);
779    }
780
781    #[test]
782    fn test_hardware_detection() {
783        let hw = HardwareInfo::detect();
784
785        assert!(hw.cpu_cores > 0);
786        assert!(hw.memory_bytes > 0);
787        assert_eq!(hw.cpu_features.cache_line_size, 64);
788    }
789}