Skip to main content

oxirs_embed/
lib.rs

1//! # OxiRS Embed: Advanced Knowledge Graph Embeddings
2//!
3//! [![Version](https://img.shields.io/badge/version-0.1.0-blue)](https://github.com/cool-japan/oxirs/releases)
4//! [![docs.rs](https://docs.rs/oxirs-embed/badge.svg)](https://docs.rs/oxirs-embed)
5//!
6//! **Status**: Production Release (v0.1.0)
7//! **Stability**: Public APIs are stable. Production-ready with comprehensive testing.
8//!
9//! State-of-the-art knowledge graph embedding methods including TransE, DistMult, ComplEx,
10//! and RotatE models, enhanced with biomedical AI, GPU acceleration, and specialized text processing.
11//!
12//! ## Key Features
13//!
14//! ### 🧬 Biomedical AI
15//! - Specialized biomedical knowledge graph embeddings
16//! - Gene-disease association prediction
17//! - Drug-target interaction modeling
18//! - Pathway analysis and protein interactions
19//! - Domain-specific text embeddings (SciBERT, BioBERT, etc.)
20//!
21//! ### 🚀 GPU Acceleration
22//! - Advanced GPU memory pooling and management
23//! - Intelligent tensor caching
24//! - Mixed precision training and inference
25//! - Multi-stream parallel processing
26//! - Pipeline parallelism for large-scale training
27//!
28//! ### 🤖 Advanced Models
29//! - Traditional KG embeddings (TransE, DistMult, ComplEx, RotatE, etc.)
30//! - Graph Neural Networks (GCN, GraphSAGE, GAT)
31//! - Transformer-based embeddings with fine-tuning
32//! - Ontology-aware embeddings with reasoning
33//!
34//! ### 📊 Production-Ready
35//! - Comprehensive evaluation and benchmarking
36//! - Model registry and version management
37//! - Intelligent caching and optimization
38//! - API server for deployment
39//!
40//! ## Quick Start
41//!
42//! ```rust,no_run
43//! use oxirs_embed::{TransE, ModelConfig, Triple, NamedNode, EmbeddingModel};
44//!
45//! # async fn example() -> anyhow::Result<()> {
46//! // Create a knowledge graph embedding model
47//! let config = ModelConfig::default().with_dimensions(128);
48//! let mut model = TransE::new(config);
49//!
50//! // Add knowledge triples
51//! let triple = Triple::new(
52//!     NamedNode::new("http://example.org/alice")?,
53//!     NamedNode::new("http://example.org/knows")?,
54//!     NamedNode::new("http://example.org/bob")?,
55//! );
56//! model.add_triple(triple)?;
57//!
58//! // Train the model
59//! let stats = model.train(Some(100)).await?;
60//! println!("Training completed: {stats:?}");
61//! # Ok(())
62//! # }
63//! ```
64//!
65//! ## Biomedical Example
66//!
67//! ```rust,no_run
68//! use oxirs_embed::{BiomedicalEmbedding, BiomedicalEmbeddingConfig, EmbeddingModel};
69//!
70//! # async fn example() -> anyhow::Result<()> {
71//! // Create biomedical embedding model
72//! let config = BiomedicalEmbeddingConfig::default();
73//! let mut model = BiomedicalEmbedding::new(config);
74//!
75//! // Add biomedical knowledge
76//! model.add_gene_disease_association("BRCA1", "breast_cancer", 0.95);
77//! model.add_drug_target_interaction("aspirin", "COX1", 0.92);
78//!
79//! // Train and predict
80//! model.train(Some(100)).await?;
81//! let predictions = model.predict_gene_disease_associations("BRCA1", 5)?;
82//! # Ok(())
83//! # }
84//! ```
85//!
86//! ## GPU Acceleration Example
87//!
88//! ```rust,ignore
89//! use oxirs_embed::{GpuAccelerationConfig, GpuAccelerationManager};
90//!
91//! # async fn example() -> anyhow::Result<()> {
92//! // Configure GPU acceleration
93//! let config = GpuAccelerationConfig {
94//!     enabled: true,
95//!     mixed_precision: true,
96//!     tensor_caching: true,
97//!     multi_stream: true,
98//!     num_streams: 4,
99//!     ..Default::default()
100//! };
101//!
102//! let mut gpu_manager = GpuAccelerationManager::new(config);
103//!
104//! // Use accelerated embedding generation
105//! let entities = vec!["entity1".to_string(), "entity2".to_string()];
106//! let embeddings = gpu_manager.accelerated_embedding_generation(
107//!     entities,
108//!     |entity| { /* compute embedding */ vec![0.0; 128].into() }
109//! ).await?;
110//! # Ok(())
111//! # }
112//! ```
113//!
114//! ## Examples
115//!
116//! See the `examples/` directory for comprehensive demonstrations:
117//! - `biomedical_embedding_demo.rs` - Biomedical AI capabilities
118//! - `gpu_acceleration_demo.rs` - GPU acceleration features  
119//! - `integrated_ai_platform_demo.rs` - Complete AI platform showcase
120
121#![allow(dead_code)]
122
123pub mod ab_testing;
124pub mod acceleration;
125pub mod adaptive_learning;
126pub mod advanced_profiler;
127pub mod alignment;
128#[cfg(feature = "api-server")]
129pub mod api;
130pub mod application_tasks;
131pub mod batch_processing;
132pub mod biomedical_embeddings;
133pub mod caching;
134pub mod causal_representation_learning;
135pub mod cloud_integration;
136pub mod clustering;
137pub mod community_detection;
138pub mod compression;
139pub mod contextual;
140pub mod continual_learning;
141pub mod cross_domain_transfer;
142pub mod cross_module_performance;
143pub mod delta;
144pub mod diffusion_embeddings;
145pub mod distributed_training;
146pub mod embed_compression;
147pub mod enterprise_knowledge;
148pub mod entity_linking;
149pub mod evaluation;
150pub mod federated_learning;
151pub mod fine_tuning;
152#[cfg(feature = "gpu")]
153pub mod gpu_acceleration;
154pub mod graph_models;
155pub mod graphql_api;
156pub mod inference;
157pub mod integration;
158pub mod interpretability;
159pub mod kg_completion;
160pub mod link_prediction;
161pub mod mamba_attention;
162pub mod mixed_precision;
163pub mod model_registry;
164pub mod model_selection;
165pub mod models;
166pub mod monitoring;
167pub mod multimodal;
168pub mod neural_symbolic_integration;
169pub mod neuro_evolution;
170pub mod novel_architectures;
171pub mod performance_profiler;
172pub mod persistence;
173pub mod quantization;
174pub mod real_time_fine_tuning;
175pub mod real_time_optimization;
176pub mod research_networks;
177// pub mod revolutionary_optimization; // Disabled - awaiting scirs2-core API stabilization
178pub mod sparql_extension;
179pub mod storage_backend;
180pub mod temporal_embeddings;
181pub mod training;
182pub mod training_online;
183pub mod utils;
184pub mod validation;
185pub mod vector_search;
186pub mod vision_language_graph;
187pub mod visualization;
188// v1.1.0: Contrastive learning loss functions
189pub mod contrastive_learning;
190
191// v1.1.0: Procrustes alignment for embedding space alignment across models/languages
192pub mod procrustes_alignment;
193
194// v1.1.0: LRU embedding cache with memory-bounded eviction and per-model invalidation
195pub mod embedding_cache;
196
197// v1.1.0 round 5: PCA-based dimensionality reduction for embedding vectors
198pub mod dimensionality_reducer;
199
200// v1.1.0 round 6: Full PCA reducer with fit/transform/inverse_transform and power-iteration eigenvectors
201pub mod pca_reducer;
202
203// v1.1.0 round 7: Fine-tuner with triplet/contrastive/cosine loss
204pub mod fine_tuner;
205
206// v1.1.0 round 10: In-memory vector store with namespace support
207pub mod vector_store;
208
209// v1.1.0 round 11: Cross-encoder re-ranker for embedding search
210pub mod cross_encoder;
211
212// v1.1.0 round 12: Linear embedding projection layer (dimensionality reduction/expansion)
213pub mod projection_layer;
214pub use projection_layer::{ActivationFn, InitMethod, ProjectionLayer, ProjectionMatrix};
215
216// v1.1.0 round 13: In-memory embedding store with label-based lookup
217pub mod embedding_store;
218
219// v1.1.0 round 12: BPE / WordPiece tokenizer with special tokens and batch encoding
220pub mod tokenizer;
221
222// v1.1.0 round 11: Embedding aggregation strategies (mean/max/CLS/attention pooling)
223pub mod embedding_aggregator;
224
225// v1.1.0 round 13: Result reranking (cross-encoder, BM25, reciprocal rank fusion)
226pub mod reranker;
227
228// v1.1.0 round 14: ANN index hyperparameter search (HNSW/IVF grid expansion + Pareto front)
229pub mod index_optimizer;
230
231// v1.1.0 round 15: Batch text encoding pipeline with chunking and pooling
232pub mod batch_encoder;
233
234// v1.1.0 round 16: Random projection for embedding dimensionality reduction
235pub mod embedding_compressor;
236
237// Import Vector from oxirs-vec for type compatibility across the ecosystem
238pub use oxirs_vec::Vector as VecVector;
239
240// Adaptive Learning System exports
241pub use adaptive_learning::{
242    AdaptationMetrics, AdaptationStrategy, AdaptiveLearningConfig, AdaptiveLearningSystem,
243    QualityFeedback,
244};
245
246use anyhow::Result;
247use chrono::{DateTime, Utc};
248use serde::{Deserialize, Serialize};
249use std::collections::HashMap;
250use std::ops::{Add, Sub};
251use uuid::Uuid;
252
253/// Compatibility wrapper for Vector that provides the old interface
254/// while using the sophisticated oxirs-vec Vector internally
255#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
256pub struct Vector {
257    pub values: Vec<f32>,
258    pub dimensions: usize,
259    #[serde(skip)]
260    inner: Option<VecVector>,
261}
262
263impl Vector {
264    pub fn new(values: Vec<f32>) -> Self {
265        let dimensions = values.len();
266        Self {
267            values,
268            dimensions,
269            inner: None,
270        }
271    }
272
273    /// Get or create the inner VecVector
274    fn get_inner(&self) -> VecVector {
275        // Create a new VecVector from values if needed
276        if let Some(ref inner) = self.inner {
277            inner.clone()
278        } else {
279            VecVector::new(self.values.clone())
280        }
281    }
282
283    /// Update internal state when values change
284    fn sync_internal(&mut self) {
285        self.dimensions = self.values.len();
286        self.inner = None; // Will be recreated on next access
287    }
288
289    /// Create from ndarray Array1
290    pub fn from_array1(array: &scirs2_core::ndarray_ext::Array1<f32>) -> Self {
291        Self::new(array.to_vec())
292    }
293
294    /// Convert to ndarray Array1
295    pub fn to_array1(&self) -> scirs2_core::ndarray_ext::Array1<f32> {
296        scirs2_core::ndarray_ext::Array1::from_vec(self.values.clone())
297    }
298
299    /// Element-wise mapping
300    pub fn mapv<F>(&self, f: F) -> Self
301    where
302        F: Fn(f32) -> f32,
303    {
304        Self::new(self.values.iter().copied().map(f).collect())
305    }
306
307    /// Sum of all elements
308    pub fn sum(&self) -> f32 {
309        self.values.iter().sum()
310    }
311
312    /// Square root of the sum
313    pub fn sqrt(&self) -> f32 {
314        self.sum().sqrt()
315    }
316
317    /// Get the inner VecVector for advanced operations
318    pub fn inner(&self) -> VecVector {
319        self.get_inner()
320    }
321
322    /// Convert into the inner VecVector
323    pub fn into_inner(self) -> VecVector {
324        self.inner.unwrap_or_else(|| VecVector::new(self.values))
325    }
326
327    /// Create from VecVector with optimized memory allocation
328    pub fn from_vec_vector(vec_vector: VecVector) -> Self {
329        let values = vec_vector.as_f32().to_vec();
330        let dimensions = values.len();
331        Self {
332            values,
333            dimensions,
334            inner: Some(vec_vector),
335        }
336    }
337
338    /// Create vector with pre-allocated capacity for performance
339    pub fn with_capacity(capacity: usize) -> Self {
340        Self {
341            values: Vec::with_capacity(capacity),
342            dimensions: 0,
343            inner: None,
344        }
345    }
346
347    /// Extend vector with optimized memory reallocation
348    pub fn extend_optimized(&mut self, other_values: &[f32]) {
349        // Reserve capacity to avoid multiple reallocations
350        self.values.reserve(other_values.len());
351        self.values.extend_from_slice(other_values);
352        self.sync_internal();
353    }
354
355    /// Shrink vector memory to fit actual size
356    pub fn shrink_to_fit(&mut self) {
357        self.values.shrink_to_fit();
358        self.sync_internal();
359    }
360
361    /// Get memory usage in bytes
362    pub fn memory_usage(&self) -> usize {
363        self.values.capacity() * std::mem::size_of::<f32>() + std::mem::size_of::<Self>()
364    }
365}
366
367// Arithmetic operations for Vector (compatibility with old interface)
368impl Add for &Vector {
369    type Output = Vector;
370
371    fn add(self, other: &Vector) -> Vector {
372        // Use the sophisticated vector addition from oxirs-vec
373        if let (Some(self_inner), Some(other_inner)) = (&self.inner, &other.inner) {
374            if let Ok(result) = self_inner.add(other_inner) {
375                return Vector::from_vec_vector(result);
376            }
377        }
378        // Fallback to element-wise addition for compatibility
379        assert_eq!(
380            self.values.len(),
381            other.values.len(),
382            "Vector dimensions must match"
383        );
384        let result_values: Vec<f32> = self
385            .values
386            .iter()
387            .zip(other.values.iter())
388            .map(|(a, b)| a + b)
389            .collect();
390        Vector::new(result_values)
391    }
392}
393
394impl Sub for &Vector {
395    type Output = Vector;
396
397    fn sub(self, other: &Vector) -> Vector {
398        // Use the sophisticated vector subtraction from oxirs-vec
399        if let (Some(self_inner), Some(other_inner)) = (&self.inner, &other.inner) {
400            if let Ok(result) = self_inner.subtract(other_inner) {
401                return Vector::from_vec_vector(result);
402            }
403        }
404        // Fallback to element-wise subtraction for compatibility
405        assert_eq!(
406            self.values.len(),
407            other.values.len(),
408            "Vector dimensions must match"
409        );
410        let result_values: Vec<f32> = self
411            .values
412            .iter()
413            .zip(other.values.iter())
414            .map(|(a, b)| a - b)
415            .collect();
416        Vector::new(result_values)
417    }
418}
419
420/// Triple structure for RDF triples
421#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
422pub struct Triple {
423    pub subject: NamedNode,
424    pub predicate: NamedNode,
425    pub object: NamedNode,
426}
427
428impl Triple {
429    pub fn new(subject: NamedNode, predicate: NamedNode, object: NamedNode) -> Self {
430        Self {
431            subject,
432            predicate,
433            object,
434        }
435    }
436}
437
438/// Named node for RDF resources
439#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
440pub struct NamedNode {
441    pub iri: String,
442}
443
444impl NamedNode {
445    pub fn new(iri: &str) -> Result<Self> {
446        Ok(Self {
447            iri: iri.to_string(),
448        })
449    }
450}
451
452impl std::fmt::Display for NamedNode {
453    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
454        write!(f, "{}", self.iri)
455    }
456}
457
458/// Configuration for embedding models
459#[derive(Debug, Clone, Serialize, Deserialize)]
460pub struct ModelConfig {
461    pub dimensions: usize,
462    pub learning_rate: f64,
463    pub l2_reg: f64,
464    pub max_epochs: usize,
465    pub batch_size: usize,
466    pub negative_samples: usize,
467    pub seed: Option<u64>,
468    pub use_gpu: bool,
469    pub model_params: HashMap<String, f64>,
470}
471
472impl Default for ModelConfig {
473    fn default() -> Self {
474        Self {
475            dimensions: 100,
476            learning_rate: 0.01,
477            l2_reg: 0.0001,
478            max_epochs: 1000,
479            batch_size: 1000,
480            negative_samples: 10,
481            seed: None,
482            use_gpu: false,
483            model_params: HashMap::new(),
484        }
485    }
486}
487
488impl ModelConfig {
489    pub fn with_dimensions(mut self, dimensions: usize) -> Self {
490        self.dimensions = dimensions;
491        self
492    }
493
494    pub fn with_learning_rate(mut self, learning_rate: f64) -> Self {
495        self.learning_rate = learning_rate;
496        self
497    }
498
499    pub fn with_max_epochs(mut self, max_epochs: usize) -> Self {
500        self.max_epochs = max_epochs;
501        self
502    }
503
504    pub fn with_seed(mut self, seed: u64) -> Self {
505        self.seed = Some(seed);
506        self
507    }
508
509    pub fn with_batch_size(mut self, batch_size: usize) -> Self {
510        self.batch_size = batch_size;
511        self
512    }
513}
514
515/// Training statistics
516#[derive(Debug, Clone, Serialize, Deserialize)]
517pub struct TrainingStats {
518    pub epochs_completed: usize,
519    pub final_loss: f64,
520    pub training_time_seconds: f64,
521    pub convergence_achieved: bool,
522    pub loss_history: Vec<f64>,
523}
524
525/// Model statistics
526#[derive(Debug, Clone, Serialize, Deserialize)]
527pub struct ModelStats {
528    pub num_entities: usize,
529    pub num_relations: usize,
530    pub num_triples: usize,
531    pub dimensions: usize,
532    pub is_trained: bool,
533    pub model_type: String,
534    pub creation_time: DateTime<Utc>,
535    pub last_training_time: Option<DateTime<Utc>>,
536}
537
538impl Default for ModelStats {
539    fn default() -> Self {
540        Self {
541            num_entities: 0,
542            num_relations: 0,
543            num_triples: 0,
544            dimensions: 0,
545            is_trained: false,
546            model_type: "unknown".to_string(),
547            creation_time: Utc::now(),
548            last_training_time: None,
549        }
550    }
551}
552
553/// Embedding errors
554#[derive(Debug, thiserror::Error)]
555pub enum EmbeddingError {
556    #[error("Model not trained")]
557    ModelNotTrained,
558    #[error("Entity not found: {entity}")]
559    EntityNotFound { entity: String },
560    #[error("Relation not found: {relation}")]
561    RelationNotFound { relation: String },
562    #[error("Other error: {0}")]
563    Other(#[from] anyhow::Error),
564}
565
566/// Basic embedding model trait
567#[async_trait::async_trait]
568pub trait EmbeddingModel: Send + Sync {
569    fn config(&self) -> &ModelConfig;
570    fn model_id(&self) -> &Uuid;
571    fn model_type(&self) -> &'static str;
572    fn add_triple(&mut self, triple: Triple) -> Result<()>;
573    async fn train(&mut self, epochs: Option<usize>) -> Result<TrainingStats>;
574    fn get_entity_embedding(&self, entity: &str) -> Result<Vector>;
575    fn get_relation_embedding(&self, relation: &str) -> Result<Vector>;
576    fn score_triple(&self, subject: &str, predicate: &str, object: &str) -> Result<f64>;
577    fn predict_objects(
578        &self,
579        subject: &str,
580        predicate: &str,
581        k: usize,
582    ) -> Result<Vec<(String, f64)>>;
583    fn predict_subjects(
584        &self,
585        predicate: &str,
586        object: &str,
587        k: usize,
588    ) -> Result<Vec<(String, f64)>>;
589    fn predict_relations(
590        &self,
591        subject: &str,
592        object: &str,
593        k: usize,
594    ) -> Result<Vec<(String, f64)>>;
595    fn get_entities(&self) -> Vec<String>;
596    fn get_relations(&self) -> Vec<String>;
597    fn get_stats(&self) -> ModelStats;
598    fn save(&self, path: &str) -> Result<()>;
599    fn load(&mut self, path: &str) -> Result<()>;
600    fn clear(&mut self);
601    fn is_trained(&self) -> bool;
602
603    /// Encode text strings into embeddings
604    async fn encode(&self, texts: &[String]) -> Result<Vec<Vec<f32>>>;
605}
606
607// Re-export main types
608pub use acceleration::{AdaptiveEmbeddingAccelerator, GpuEmbeddingAccelerator};
609#[cfg(feature = "api-server")]
610pub use api::{start_server, ApiConfig, ApiState};
611pub use batch_processing::{
612    BatchJob, BatchProcessingConfig, BatchProcessingManager, BatchProcessingResult,
613    BatchProcessingStats, IncrementalConfig, JobProgress, JobStatus, OutputFormat,
614    PartitioningStrategy, RetryConfig,
615};
616pub use biomedical_embeddings::{
617    BiomedicalEmbedding, BiomedicalEmbeddingConfig, BiomedicalEntityType, BiomedicalRelationType,
618    FineTuningConfig, PreprocessingRule, SpecializedTextConfig, SpecializedTextEmbedding,
619    SpecializedTextModel,
620};
621pub use caching::{CacheConfig, CacheManager, CachedEmbeddingModel};
622pub use causal_representation_learning::{
623    CausalDiscoveryAlgorithm, CausalDiscoveryConfig, CausalGraph, CausalRepresentationConfig,
624    CausalRepresentationModel, ConstraintSettings, CounterfactualConfig, CounterfactualQuery,
625    DisentanglementConfig, DisentanglementMethod, ExplanationType, IndependenceTest,
626    InterventionConfig, ScoreSettings, StructuralCausalModelConfig,
627};
628pub use cloud_integration::{
629    AWSSageMakerService, AutoScalingConfig, AzureMLService, BackupConfig, CloudIntegrationConfig,
630    CloudIntegrationManager, CloudProvider, CloudService, ClusterStatus, CostEstimate,
631    CostOptimizationResult, CostOptimizationStrategy, DeploymentConfig, DeploymentResult,
632    DeploymentStatus, EndpointInfo, FunctionInvocationResult, GPUClusterConfig, GPUClusterResult,
633    LifecyclePolicy, OptimizationAction, PerformanceTier, ReplicationType,
634    ServerlessDeploymentResult, ServerlessFunctionConfig, ServerlessStatus, StorageConfig,
635    StorageResult, StorageStatus, StorageType,
636};
637pub use compression::{
638    CompressedModel, CompressionStats, CompressionTarget, DistillationConfig,
639    ModelCompressionManager, NASConfig, OptimizationTarget, PruningConfig, PruningMethod,
640    QuantizationConfig, QuantizationMethod,
641};
642// pub use contextual::{
643//     ContextualConfig, ContextualEmbeddingModel, EmbeddingContext,
644// };
645pub use continual_learning::{
646    ArchitectureConfig, BoundaryDetection, ConsolidationConfig, ContinualLearningConfig,
647    ContinualLearningModel, MemoryConfig, MemoryType, MemoryUpdateStrategy, RegularizationConfig,
648    ReplayConfig, ReplayMethod, TaskConfig, TaskDetection, TaskSwitching,
649};
650pub use cross_module_performance::{
651    CoordinatorConfig, CrossModulePerformanceCoordinator, GlobalPerformanceMetrics, ModuleMetrics,
652    ModulePerformanceMonitor, OptimizationCache, PerformanceSnapshot, PredictivePerformanceEngine,
653    ResourceAllocator, ResourceTracker,
654};
655pub use delta::{
656    ChangeRecord, ChangeStatistics, ChangeType, DeltaConfig, DeltaManager, DeltaResult, DeltaStats,
657    IncrementalStrategy,
658};
659pub use enterprise_knowledge::{
660    BehaviorMetrics, CareerPredictions, Category, CategoryHierarchy, CategoryPerformance,
661    ColdStartStrategy, CommunicationFrequency, CommunicationPreferences, CustomerEmbedding,
662    CustomerPreferences, CustomerRatings, CustomerSegment, Department, DepartmentPerformance,
663    EmployeeEmbedding, EnterpriseConfig, EnterpriseKnowledgeAnalyzer, EnterpriseMetrics,
664    ExperienceLevel, FeatureType, MarketAnalysis, OrganizationalStructure,
665    PerformanceMetrics as EnterprisePerformanceMetrics, ProductAvailability, ProductEmbedding,
666    ProductFeature, ProductRecommendation, Project, ProjectOutcome, ProjectParticipation,
667    ProjectPerformance, ProjectStatus, Purchase, PurchaseChannel, RecommendationConfig,
668    RecommendationEngine, RecommendationEngineType, RecommendationPerformance,
669    RecommendationReason, SalesMetrics, Skill, SkillCategory, Team, TeamPerformance,
670};
671pub use evaluation::{
672    AnalogicalReasoningBenchmark, AnalogyQuad, EmbeddingClusteringMetrics, EmbeddingEvaluator,
673    QueryAnsweringEvaluator, QueryEvaluationConfig, QueryEvaluationResults, QueryMetric,
674    QueryResult, QueryTemplate, QueryType, ReasoningChain, ReasoningEvaluationConfig,
675    ReasoningEvaluationResults, ReasoningRule, ReasoningStep, ReasoningTaskEvaluator,
676    ReasoningType, TypeSpecificResults,
677};
678pub use federated_learning::{
679    AggregationEngine, AggregationStrategy, AuthenticationConfig, AuthenticationMethod,
680    CertificateConfig, ClippingMechanisms, ClippingMethod, CommunicationConfig,
681    CommunicationManager, CommunicationProtocol, CompressionAlgorithm, CompressionConfig,
682    CompressionEngine, ConvergenceMetrics, ConvergenceStatus, DataSelectionStrategy,
683    DataStatistics, EncryptionScheme, FederatedConfig, FederatedCoordinator,
684    FederatedEmbeddingModel, FederatedMessage, FederatedRound, FederationStats, GlobalModelState,
685    HardwareAccelerator, KeyManager, LocalModelState, LocalTrainingStats, LocalUpdate,
686    MetaLearningConfig, NoiseGenerator, NoiseMechanism, OutlierAction, OutlierDetection,
687    OutlierDetectionMethod, Participant, ParticipantCapabilities, ParticipantStatus,
688    PersonalizationConfig, PersonalizationStrategy, PrivacyAccountant, PrivacyConfig,
689    PrivacyEngine, PrivacyMetrics, PrivacyParams, RoundMetrics, RoundStatus, SecurityConfig,
690    SecurityFeature, SecurityManager, TrainingConfig, VerificationEngine, VerificationMechanism,
691    VerificationResult, WeightingScheme,
692};
693#[cfg(feature = "gpu")]
694pub use gpu_acceleration::{
695    GpuAccelerationConfig, GpuAccelerationManager, GpuMemoryPool, GpuPerformanceStats,
696    MixedPrecisionProcessor, MultiStreamProcessor, TensorCache,
697};
698pub use graphql_api::{
699    create_schema, BatchEmbeddingInput, BatchEmbeddingResult, BatchStatus, DistanceMetric,
700    EmbeddingFormat, EmbeddingQueryInput, EmbeddingResult, EmbeddingSchema, GraphQLContext,
701    ModelInfo, ModelType, SimilarityResult, SimilaritySearchInput,
702};
703pub use kg_completion::{BatchedTrainingLoop, KgCompletionTask, NegativeSampler, TrainingBatch};
704pub use models::{
705    AggregationType, ComplEx, DistMult, GNNConfig, GNNEmbedding, GNNType, HoLE, HoLEConfig,
706    PoolingStrategy, RotatE, TransE, TransformerConfig, TransformerEmbedding, TransformerType,
707};
708
709pub use contextual::{
710    AccessibilityPreferences, ComplexityLevel, ContextualConfig, ContextualEmbeddingModel,
711    DomainContext, EmbeddingContext, PerformanceRequirements, PriorityLevel, PrivacySettings,
712    QueryContext, QueryType as ContextualQueryType, ResponseFormat, TaskConstraints, TaskContext,
713    TaskType, UserContext, UserHistory, UserPreferences,
714};
715pub use distributed_training::{
716    AggregationMethod, AllReduceStrategy, CommunicationBackend, DataParallelTrainer,
717    DistributedEmbeddingTrainer, DistributedStrategy, DistributedTrainingConfig,
718    DistributedTrainingCoordinator, DistributedTrainingSample, DistributedTrainingStats,
719    FaultToleranceConfig, GradientAggregator, GradientCompressor, ModelUpdate, SparseGradient,
720    WorkerInfo, WorkerStatus, WorkerUpdate,
721};
722#[cfg(feature = "conve")]
723pub use models::{ConvE, ConvEConfig};
724pub use monitoring::{
725    Alert, AlertSeverity, AlertThresholds, AlertType, CacheMetrics, ConsoleAlertHandler,
726    DriftMetrics, ErrorEvent, ErrorMetrics, ErrorSeverity, LatencyMetrics, MonitoringConfig,
727    PerformanceMetrics as MonitoringPerformanceMetrics, PerformanceMonitor, QualityAssessment,
728    QualityMetrics, ResourceMetrics, SlackAlertHandler, ThroughputMetrics,
729};
730pub use multimodal::{
731    AlignmentNetwork, AlignmentObjective, ContrastiveConfig, CrossDomainConfig, CrossModalConfig,
732    KGEncoder, MultiModalEmbedding, MultiModalStats, TextEncoder,
733};
734pub use neural_symbolic_integration::{
735    ConstraintSatisfactionConfig, ConstraintType, KnowledgeIntegrationConfig, KnowledgeRule,
736    LogicIntegrationConfig, LogicProgrammingConfig, LogicalFormula, NeuralSymbolicConfig,
737    NeuralSymbolicModel, NeuroSymbolicArchitectureConfig, OntologicalConfig, ReasoningEngine,
738    RuleBasedConfig, SymbolicReasoningConfig,
739};
740pub use novel_architectures::{
741    ActivationType, ArchitectureParams, ArchitectureState, ArchitectureType, CurvatureComputation,
742    CurvatureMethod, CurvatureType, DynamicsConfig, EntanglementStructure, EquivarianceGroup,
743    FlowType, GeometricConfig, GeometricParams, GeometricSpace, GeometricState,
744    GraphTransformerParams, GraphTransformerState, HyperbolicDistance, HyperbolicInit,
745    HyperbolicManifold, HyperbolicParams, HyperbolicState, IntegrationScheme, IntegrationStats,
746    ManifoldLearning, ManifoldMethod, ManifoldOptimizer, NeuralODEParams, NeuralODEState,
747    NovelArchitectureConfig, NovelArchitectureModel, ODERegularization, ODESolverType,
748    ParallelTransport, QuantumGateSet, QuantumMeasurement, QuantumNoise, QuantumParams,
749    QuantumState, StabilityConstraints, StructuralBias, TimeEvolution, TransportMethod,
750};
751pub use research_networks::{
752    AuthorEmbedding, Citation, CitationNetwork, CitationType, Collaboration, CollaborationNetwork,
753    NetworkMetrics, PaperSection, PublicationEmbedding, PublicationType, ResearchCommunity,
754    ResearchNetworkAnalyzer, ResearchNetworkConfig, TopicModel, TopicModelingConfig,
755};
756pub use sparql_extension::{
757    ExpandedQuery, Expansion, ExpansionType, QueryStatistics as SparqlQueryStatistics,
758    SparqlExtension, SparqlExtensionConfig,
759};
760pub use storage_backend::{
761    DiskBackend, EmbeddingMetadata, EmbeddingVersion, MemoryBackend, StorageBackend,
762    StorageBackendConfig, StorageBackendManager, StorageBackendType, StorageStats,
763};
764pub use temporal_embeddings::{
765    TemporalEmbeddingConfig, TemporalEmbeddingModel, TemporalEvent, TemporalForecast,
766    TemporalGranularity, TemporalScope, TemporalStats, TemporalTriple,
767};
768pub use vision_language_graph::{
769    AggregationFunction, CNNConfig, CrossAttentionConfig, DomainAdaptationConfig,
770    DomainAdaptationMethod, EpisodeConfig, FewShotConfig, FewShotMethod, FusionStrategy,
771    GraphArchitecture, GraphEncoder, GraphEncoderConfig, JointTrainingConfig, LanguageArchitecture,
772    LanguageEncoder, LanguageEncoderConfig, LanguageTransformerConfig, MetaLearner,
773    ModalityEncoding, MultiModalTransformer, MultiModalTransformerConfig, NormalizationType,
774    PoolingType, PositionEncodingType, ReadoutFunction, TaskCategory, TaskSpecificParams,
775    TrainingObjective, TransferLearningConfig, TransferStrategy, ViTConfig, VisionArchitecture,
776    VisionEncoder, VisionEncoderConfig, VisionLanguageGraphConfig, VisionLanguageGraphModel,
777    VisionLanguageGraphStats, ZeroShotConfig, ZeroShotMethod,
778};
779
780#[cfg(feature = "tucker")]
781pub use models::TuckER;
782
783#[cfg(feature = "quatd")]
784pub use models::QuatD;
785
786// Re-export model registry types
787pub use crate::model_registry::{
788    ModelRegistry, ModelVersion, ResourceAllocation as ModelResourceAllocation,
789};
790
791// Re-export model selection types
792pub use crate::model_selection::{
793    DatasetCharacteristics, MemoryRequirement, ModelComparison, ModelComparisonEntry,
794    ModelRecommendation, ModelSelector, ModelType as SelectionModelType, TrainingTime, UseCaseType,
795};
796
797// Re-export performance profiler types
798pub use crate::performance_profiler::{
799    OperationStats, OperationTimer, OperationType, PerformanceProfiler, PerformanceReport,
800};
801
802// Re-export revolutionary optimization types
803// Temporarily disabled - awaiting scirs2-core v0.2.0 API stabilization
804/*
805pub use revolutionary_optimization::{
806    AdvancedMemoryConfig, EmbeddingOptimizationResult, OptimizationPriority, OptimizationStatistics,
807    OptimizationStrategy, PerformanceTargets, QuantumOptimizationStrategy,
808    RevolutionaryEmbeddingOptimizer, RevolutionaryEmbeddingOptimizerFactory,
809    RevolutionaryOptimizationConfig, SimilarityComputationMethod, SimilarityOptimizationResult,
810    StreamingOptimizationConfig,
811};
812*/
813
814/// Convenience functions for quick setup and common operations
815pub mod quick_start {
816    use super::*;
817    use crate::models::TransE;
818
819    /// Create a TransE model with sensible defaults for experimentation
820    pub fn create_simple_transe_model() -> TransE {
821        let config = ModelConfig::default()
822            .with_dimensions(128)
823            .with_learning_rate(0.01)
824            .with_max_epochs(100);
825        TransE::new(config)
826    }
827
828    /// Create a biomedical embedding model for life sciences applications
829    pub fn create_biomedical_model() -> BiomedicalEmbedding {
830        let config = BiomedicalEmbeddingConfig::default();
831        BiomedicalEmbedding::new(config)
832    }
833
834    /// Parse a triple from simple string format "subject predicate object"
835    pub fn parse_triple_from_string(triple_str: &str) -> Result<Triple> {
836        let parts: Vec<&str> = triple_str.split_whitespace().collect();
837        if parts.len() != 3 {
838            return Err(anyhow::anyhow!(
839                "Triple must have exactly 3 parts separated by spaces"
840            ));
841        }
842
843        // Helper function to convert short names to full URIs
844        let expand_uri = |s: &str| -> String {
845            if s.starts_with("http://") || s.starts_with("https://") {
846                s.to_string()
847            } else {
848                format!("http://example.org/{s}")
849            }
850        };
851
852        Ok(Triple::new(
853            NamedNode::new(&expand_uri(parts[0]))?,
854            NamedNode::new(&expand_uri(parts[1]))?,
855            NamedNode::new(&expand_uri(parts[2]))?,
856        ))
857    }
858
859    /// Helper to add multiple triples from string format
860    pub fn add_triples_from_strings<T: EmbeddingModel>(
861        model: &mut T,
862        triple_strings: &[&str],
863    ) -> Result<usize> {
864        let mut count = 0;
865        for triple_str in triple_strings {
866            let triple = parse_triple_from_string(triple_str)?;
867            model.add_triple(triple)?;
868            count += 1;
869        }
870        Ok(count)
871    }
872
873    /// Quick function to compute cosine similarity between two embedding vectors
874    pub fn cosine_similarity(a: &[f64], b: &[f64]) -> Result<f64> {
875        if a.len() != b.len() {
876            return Err(anyhow::anyhow!(
877                "Vector dimensions don't match: {} vs {}",
878                a.len(),
879                b.len()
880            ));
881        }
882
883        let dot_product: f64 = a.iter().zip(b.iter()).map(|(x, y)| x * y).sum();
884        let norm_a: f64 = a.iter().map(|x| x * x).sum::<f64>().sqrt();
885        let norm_b: f64 = b.iter().map(|x| x * x).sum::<f64>().sqrt();
886
887        if norm_a == 0.0 || norm_b == 0.0 {
888            return Ok(0.0);
889        }
890
891        Ok(dot_product / (norm_a * norm_b))
892    }
893
894    /// Generate sample knowledge graph data for testing and prototyping
895    pub fn generate_sample_kg_data(
896        num_entities: usize,
897        num_relations: usize,
898    ) -> Vec<(String, String, String)> {
899        #[allow(unused_imports)]
900        use scirs2_core::random::{Random, Rng};
901
902        let mut random = Random::default();
903        let mut triples = Vec::new();
904
905        let entities: Vec<String> = (0..num_entities)
906            .map(|i| format!("http://example.org/entity_{i}"))
907            .collect();
908
909        let relations: Vec<String> = (0..num_relations)
910            .map(|i| format!("http://example.org/relation_{i}"))
911            .collect();
912
913        // Generate random triples (avoid self-loops)
914        for _ in 0..(num_entities * 2) {
915            let subject_idx = random.random_range(0..entities.len());
916            let relation_idx = random.random_range(0..relations.len());
917            let object_idx = random.random_range(0..entities.len());
918
919            let subject = entities[subject_idx].clone();
920            let relation = relations[relation_idx].clone();
921            let object = entities[object_idx].clone();
922
923            if subject != object {
924                triples.push((subject, relation, object));
925            }
926        }
927
928        triples
929    }
930
931    /// Quick performance measurement utility
932    pub fn quick_performance_test<F>(
933        name: &str,
934        iterations: usize,
935        operation: F,
936    ) -> std::time::Duration
937    where
938        F: Fn(),
939    {
940        let start = std::time::Instant::now();
941        for _ in 0..iterations {
942            operation();
943        }
944        let duration = start.elapsed();
945
946        println!(
947            "Performance test '{name}': {iterations} iterations in {duration:?} ({:.2} ops/sec)",
948            iterations as f64 / duration.as_secs_f64()
949        );
950
951        duration
952    }
953
954    // Revolutionary optimizer functions temporarily disabled - awaiting scirs2-core v0.2.0 API stabilization
955    /*
956    /// Create a revolutionary embedding optimizer with quantum focus
957    pub async fn create_quantum_optimizer() -> anyhow::Result<RevolutionaryEmbeddingOptimizer> {
958        RevolutionaryEmbeddingOptimizerFactory::create_quantum_focused()
959            .await
960            .map_err(|e| anyhow::anyhow!("Failed to create quantum optimizer: {}", e))
961    }
962
963    /// Create a revolutionary embedding optimizer with streaming focus
964    pub async fn create_streaming_optimizer() -> anyhow::Result<RevolutionaryEmbeddingOptimizer> {
965        RevolutionaryEmbeddingOptimizerFactory::create_streaming_focused()
966            .await
967            .map_err(|e| anyhow::anyhow!("Failed to create streaming optimizer: {}", e))
968    }
969
970    /// Create a revolutionary embedding optimizer with GPU focus
971    pub async fn create_gpu_optimizer() -> anyhow::Result<RevolutionaryEmbeddingOptimizer> {
972        RevolutionaryEmbeddingOptimizerFactory::create_gpu_focused()
973            .await
974            .map_err(|e| anyhow::anyhow!("Failed to create GPU optimizer: {}", e))
975    }
976
977    /// Create a balanced revolutionary embedding optimizer
978    pub async fn create_balanced_optimizer() -> anyhow::Result<RevolutionaryEmbeddingOptimizer> {
979        RevolutionaryEmbeddingOptimizerFactory::create_balanced()
980            .await
981            .map_err(|e| anyhow::anyhow!("Failed to create balanced optimizer: {}", e))
982    }
983    */
984
985    /// Quick performance test with revolutionary optimization
986    pub async fn quick_revolutionary_performance_test<F, Fut>(
987        name: &str,
988        iterations: usize,
989        async_operation: F,
990    ) -> std::time::Duration
991    where
992        F: Fn() -> Fut,
993        Fut: std::future::Future<Output = ()>,
994    {
995        let start = std::time::Instant::now();
996        for _ in 0..iterations {
997            async_operation().await;
998        }
999        let duration = start.elapsed();
1000
1001        println!(
1002            "Revolutionary performance test '{name}': {iterations} iterations in {duration:?} ({:.2} ops/sec)",
1003            iterations as f64 / duration.as_secs_f64()
1004        );
1005
1006        duration
1007    }
1008}
1009
1010#[cfg(test)]
1011mod quick_start_tests {
1012    use super::*;
1013    use crate::quick_start::*;
1014
1015    #[test]
1016    fn test_create_simple_transe_model() {
1017        let model = create_simple_transe_model();
1018        let config = model.config();
1019        assert_eq!(config.dimensions, 128);
1020        assert_eq!(config.learning_rate, 0.01);
1021        assert_eq!(config.max_epochs, 100);
1022    }
1023
1024    #[test]
1025    fn test_parse_triple_from_string() {
1026        let triple_str = "http://example.org/alice http://example.org/knows http://example.org/bob";
1027        let triple = parse_triple_from_string(triple_str).unwrap();
1028        assert_eq!(triple.subject.iri, "http://example.org/alice");
1029        assert_eq!(triple.predicate.iri, "http://example.org/knows");
1030        assert_eq!(triple.object.iri, "http://example.org/bob");
1031    }
1032
1033    #[test]
1034    fn test_parse_triple_from_string_invalid() {
1035        let triple_str = "http://example.org/alice http://example.org/knows";
1036        let result = parse_triple_from_string(triple_str);
1037        assert!(result.is_err());
1038    }
1039
1040    #[test]
1041    fn test_add_triples_from_strings() {
1042        let mut model = create_simple_transe_model();
1043        let triple_strings = [
1044            "http://example.org/alice http://example.org/knows http://example.org/bob",
1045            "http://example.org/bob http://example.org/likes http://example.org/music",
1046        ];
1047
1048        let count = add_triples_from_strings(&mut model, &triple_strings).unwrap();
1049        assert_eq!(count, 2);
1050    }
1051
1052    #[test]
1053    fn test_cosine_similarity() {
1054        let a = vec![1.0, 0.0, 0.0];
1055        let b = vec![1.0, 0.0, 0.0];
1056        let similarity = cosine_similarity(&a, &b).unwrap();
1057        assert!((similarity - 1.0).abs() < 1e-10);
1058
1059        let c = vec![0.0, 1.0, 0.0];
1060        let similarity2 = cosine_similarity(&a, &c).unwrap();
1061        assert!((similarity2 - 0.0).abs() < 1e-10);
1062
1063        // Test different dimensions should fail
1064        let d = vec![1.0, 0.0];
1065        assert!(cosine_similarity(&a, &d).is_err());
1066    }
1067
1068    #[test]
1069    fn test_generate_sample_kg_data() {
1070        let triples = generate_sample_kg_data(5, 3);
1071        assert!(!triples.is_empty());
1072
1073        // Check that all subjects and objects are in the expected format
1074        for (subject, relation, object) in &triples {
1075            assert!(subject.starts_with("http://example.org/entity_"));
1076            assert!(relation.starts_with("http://example.org/relation_"));
1077            assert!(object.starts_with("http://example.org/entity_"));
1078            assert_ne!(subject, object); // No self-loops
1079        }
1080    }
1081
1082    #[test]
1083    fn test_quick_performance_test() {
1084        let duration = quick_performance_test("test_operation", 100, || {
1085            // Simple operation for testing
1086            let _sum: i32 = (1..10).sum();
1087        });
1088
1089        // In release mode, operations can be extremely fast
1090        // Just verify the function completes and returns a valid duration
1091        let _nanos = duration.as_nanos();
1092    }
1093}