Skip to main content

spn_core/
backend.rs

1//! Backend types for model management.
2//!
3//! These types are used by spn-ollama (and future backends like llama.cpp)
4//! to provide a unified interface for local model management.
5//!
6//! # Architecture
7//!
8//! ```text
9//! ┌─────────────────────────────────────────────────────────────────────────────┐
10//! │  spn-core (this module)                                                    │
11//! │  ├── PullProgress       Progress updates during model download              │
12//! │  ├── ModelInfo          Information about an installed model                │
13//! │  ├── RunningModel       Currently loaded model with GPU allocation          │
14//! │  ├── GpuInfo            GPU device information                              │
15//! │  ├── LoadConfig         Configuration for loading a model                   │
16//! │  └── BackendError       Error types for backend operations                  │
17//! └─────────────────────────────────────────────────────────────────────────────┘
18//! ```
19//!
20//! # Example
21//!
22//! ```
23//! use spn_core::{LoadConfig, ModelInfo, PullProgress};
24//!
25//! // Create a load configuration
26//! let config = LoadConfig::default()
27//!     .with_gpu_layers(-1)  // Use all GPU layers
28//!     .with_context_size(4096);
29//!
30//! // Model info from backend
31//! let info = ModelInfo {
32//!     name: "llama3.2:7b".to_string(),
33//!     size: 4_000_000_000,
34//!     quantization: Some("Q4_K_M".to_string()),
35//!     parameters: Some("7B".to_string()),
36//!     digest: Some("sha256:abc123".to_string()),
37//! };
38//!
39//! assert!(info.size_gb() > 3.0);
40//! ```
41
42use std::fmt;
43
44#[cfg(feature = "serde")]
45use serde::{Deserialize, Serialize};
46
47/// Progress information during model pull/download.
48#[derive(Debug, Clone, PartialEq, Eq)]
49#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
50pub struct PullProgress {
51    /// Current status message (e.g., "pulling manifest", "downloading").
52    pub status: String,
53    /// Bytes completed.
54    pub completed: u64,
55    /// Total bytes to download.
56    pub total: u64,
57}
58
59impl PullProgress {
60    /// Create a new progress update.
61    #[must_use]
62    pub fn new(status: impl Into<String>, completed: u64, total: u64) -> Self {
63        Self {
64            status: status.into(),
65            completed,
66            total,
67        }
68    }
69
70    /// Get progress as a percentage (0.0 to 100.0).
71    #[must_use]
72    pub fn percent(&self) -> f64 {
73        if self.total == 0 {
74            0.0
75        } else {
76            (self.completed as f64 / self.total as f64) * 100.0
77        }
78    }
79
80    /// Check if download is complete.
81    #[must_use]
82    pub fn is_complete(&self) -> bool {
83        self.total > 0 && self.completed >= self.total
84    }
85}
86
87impl fmt::Display for PullProgress {
88    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
89        write!(f, "{}: {:.1}%", self.status, self.percent())
90    }
91}
92
93/// Information about an installed model.
94#[derive(Debug, Clone, PartialEq, Eq)]
95#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
96pub struct ModelInfo {
97    /// Model name (e.g., "llama3.2:7b").
98    pub name: String,
99    /// Size in bytes.
100    pub size: u64,
101    /// Quantization level (e.g., "Q4_K_M", "Q8_0").
102    pub quantization: Option<String>,
103    /// Parameter count (e.g., "7B", "70B").
104    pub parameters: Option<String>,
105    /// Model digest/hash.
106    pub digest: Option<String>,
107}
108
109impl ModelInfo {
110    /// Get size in gigabytes.
111    #[must_use]
112    pub fn size_gb(&self) -> f64 {
113        self.size as f64 / 1_000_000_000.0
114    }
115
116    /// Get size as human-readable string.
117    #[must_use]
118    pub fn size_human(&self) -> String {
119        let gb = self.size_gb();
120        if gb >= 1.0 {
121            format!("{gb:.1} GB")
122        } else {
123            format!("{:.0} MB", self.size as f64 / 1_000_000.0)
124        }
125    }
126}
127
128impl fmt::Display for ModelInfo {
129    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
130        write!(f, "{} ({})", self.name, self.size_human())
131    }
132}
133
134/// Information about a currently running/loaded model.
135#[derive(Debug, Clone, PartialEq, Eq)]
136#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
137pub struct RunningModel {
138    /// Model name.
139    pub name: String,
140    /// VRAM used in bytes (if available).
141    pub vram_used: Option<u64>,
142    /// GPU IDs this model is loaded on.
143    pub gpu_ids: Vec<u32>,
144}
145
146impl RunningModel {
147    /// Get VRAM used in gigabytes.
148    #[must_use]
149    pub fn vram_gb(&self) -> Option<f64> {
150        self.vram_used.map(|v| v as f64 / 1_000_000_000.0)
151    }
152}
153
154impl fmt::Display for RunningModel {
155    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
156        write!(f, "{}", self.name)?;
157        if !self.gpu_ids.is_empty() {
158            write!(f, " [GPU: {:?}]", self.gpu_ids)?;
159        }
160        if let Some(vram) = self.vram_gb() {
161            write!(f, " ({vram:.1} GB VRAM)")?;
162        }
163        Ok(())
164    }
165}
166
167/// GPU device information.
168#[derive(Debug, Clone, PartialEq, Eq)]
169#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
170pub struct GpuInfo {
171    /// GPU device ID.
172    pub id: u32,
173    /// GPU name (e.g., "NVIDIA RTX 4090").
174    pub name: String,
175    /// Total memory in bytes.
176    pub memory_total: u64,
177    /// Free memory in bytes.
178    pub memory_free: u64,
179}
180
181impl GpuInfo {
182    /// Get total memory in gigabytes.
183    #[must_use]
184    pub fn memory_total_gb(&self) -> f64 {
185        self.memory_total as f64 / 1_000_000_000.0
186    }
187
188    /// Get free memory in gigabytes.
189    #[must_use]
190    pub fn memory_free_gb(&self) -> f64 {
191        self.memory_free as f64 / 1_000_000_000.0
192    }
193
194    /// Get memory usage percentage.
195    #[must_use]
196    pub fn memory_used_percent(&self) -> f64 {
197        if self.memory_total == 0 {
198            0.0
199        } else {
200            let used = self.memory_total.saturating_sub(self.memory_free);
201            (used as f64 / self.memory_total as f64) * 100.0
202        }
203    }
204}
205
206impl fmt::Display for GpuInfo {
207    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
208        write!(
209            f,
210            "GPU {}: {} ({:.1}/{:.1} GB free)",
211            self.id,
212            self.name,
213            self.memory_free_gb(),
214            self.memory_total_gb()
215        )
216    }
217}
218
219/// Error types for backend operations.
220#[derive(Debug, Clone, PartialEq, Eq)]
221#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
222pub enum BackendError {
223    /// Backend server is not running.
224    NotRunning,
225    /// Model not found in registry or locally.
226    ModelNotFound(String),
227    /// Model is already loaded.
228    AlreadyLoaded(String),
229    /// Insufficient GPU/system memory.
230    InsufficientMemory,
231    /// Network error during pull/API call.
232    NetworkError(String),
233    /// Process management error.
234    ProcessError(String),
235    /// Backend-specific error.
236    BackendSpecific(String),
237}
238
239impl std::error::Error for BackendError {}
240
241impl fmt::Display for BackendError {
242    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
243        match self {
244            Self::NotRunning => write!(f, "Backend server is not running"),
245            Self::ModelNotFound(name) => write!(f, "Model not found: {name}"),
246            Self::AlreadyLoaded(name) => write!(f, "Model already loaded: {name}"),
247            Self::InsufficientMemory => write!(f, "Insufficient memory to load model"),
248            Self::NetworkError(msg) => write!(f, "Network error: {msg}"),
249            Self::ProcessError(msg) => write!(f, "Process error: {msg}"),
250            Self::BackendSpecific(msg) => write!(f, "Backend error: {msg}"),
251        }
252    }
253}
254
255/// Configuration for loading a model.
256#[derive(Debug, Clone, PartialEq, Eq)]
257#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
258pub struct LoadConfig {
259    /// GPU IDs to use for this model (empty = auto).
260    pub gpu_ids: Vec<u32>,
261    /// Number of layers to offload to GPU (-1 = all, 0 = none).
262    pub gpu_layers: i32,
263    /// Context size (token window).
264    pub context_size: Option<u32>,
265    /// Keep model loaded in memory (prevent unload).
266    pub keep_alive: bool,
267}
268
269// ============================================================================
270// Chat Types
271// ============================================================================
272
273/// Role in a chat conversation.
274#[derive(Debug, Clone, Copy, PartialEq, Eq)]
275#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
276#[cfg_attr(feature = "serde", serde(rename_all = "lowercase"))]
277pub enum ChatRole {
278    /// System message (instructions).
279    System,
280    /// User message.
281    User,
282    /// Assistant response.
283    Assistant,
284}
285
286impl fmt::Display for ChatRole {
287    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
288        match self {
289            Self::System => write!(f, "system"),
290            Self::User => write!(f, "user"),
291            Self::Assistant => write!(f, "assistant"),
292        }
293    }
294}
295
296/// A message in a chat conversation.
297#[derive(Debug, Clone, PartialEq, Eq)]
298#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
299pub struct ChatMessage {
300    /// Role of the message sender.
301    pub role: ChatRole,
302    /// Content of the message.
303    pub content: String,
304}
305
306impl ChatMessage {
307    /// Create a new system message.
308    #[must_use]
309    pub fn system(content: impl Into<String>) -> Self {
310        Self {
311            role: ChatRole::System,
312            content: content.into(),
313        }
314    }
315
316    /// Create a new user message.
317    #[must_use]
318    pub fn user(content: impl Into<String>) -> Self {
319        Self {
320            role: ChatRole::User,
321            content: content.into(),
322        }
323    }
324
325    /// Create a new assistant message.
326    #[must_use]
327    pub fn assistant(content: impl Into<String>) -> Self {
328        Self {
329            role: ChatRole::Assistant,
330            content: content.into(),
331        }
332    }
333}
334
335/// Options for chat completion.
336#[derive(Debug, Clone, PartialEq, Default)]
337#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
338pub struct ChatOptions {
339    /// Temperature for sampling (0.0 to 2.0).
340    pub temperature: Option<f32>,
341    /// Top-p (nucleus) sampling.
342    pub top_p: Option<f32>,
343    /// Top-k sampling.
344    pub top_k: Option<u32>,
345    /// Maximum tokens to generate.
346    pub max_tokens: Option<u32>,
347    /// Stop sequences.
348    pub stop: Vec<String>,
349    /// Seed for reproducibility.
350    pub seed: Option<u64>,
351}
352
353impl ChatOptions {
354    /// Create new chat options.
355    #[must_use]
356    pub fn new() -> Self {
357        Self::default()
358    }
359
360    /// Set temperature.
361    #[must_use]
362    pub fn with_temperature(mut self, temp: f32) -> Self {
363        self.temperature = Some(temp);
364        self
365    }
366
367    /// Set top-p sampling.
368    #[must_use]
369    pub fn with_top_p(mut self, top_p: f32) -> Self {
370        self.top_p = Some(top_p);
371        self
372    }
373
374    /// Set top-k sampling.
375    #[must_use]
376    pub fn with_top_k(mut self, top_k: u32) -> Self {
377        self.top_k = Some(top_k);
378        self
379    }
380
381    /// Set maximum tokens.
382    #[must_use]
383    pub fn with_max_tokens(mut self, max: u32) -> Self {
384        self.max_tokens = Some(max);
385        self
386    }
387
388    /// Add a stop sequence.
389    #[must_use]
390    pub fn with_stop(mut self, stop: impl Into<String>) -> Self {
391        self.stop.push(stop.into());
392        self
393    }
394
395    /// Set seed for reproducibility.
396    #[must_use]
397    pub fn with_seed(mut self, seed: u64) -> Self {
398        self.seed = Some(seed);
399        self
400    }
401}
402
403/// Response from a chat completion.
404#[derive(Debug, Clone, PartialEq)]
405#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
406pub struct ChatResponse {
407    /// The assistant's response message.
408    pub message: ChatMessage,
409    /// Whether the response is complete (not streaming).
410    pub done: bool,
411    /// Total duration in nanoseconds.
412    pub total_duration: Option<u64>,
413    /// Tokens generated.
414    pub eval_count: Option<u32>,
415    /// Prompt tokens.
416    pub prompt_eval_count: Option<u32>,
417}
418
419impl ChatResponse {
420    /// Get the response content.
421    #[must_use]
422    pub fn content(&self) -> &str {
423        &self.message.content
424    }
425
426    /// Get tokens per second (if metrics available).
427    #[must_use]
428    pub fn tokens_per_second(&self) -> Option<f64> {
429        match (self.eval_count, self.total_duration) {
430            (Some(count), Some(duration)) if duration > 0 => {
431                Some(count as f64 / (duration as f64 / 1_000_000_000.0))
432            }
433            _ => None,
434        }
435    }
436}
437
438// ============================================================================
439// Embedding Types
440// ============================================================================
441
442/// Response from an embedding request.
443#[derive(Debug, Clone, PartialEq)]
444#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
445pub struct EmbeddingResponse {
446    /// The embedding vector.
447    pub embedding: Vec<f32>,
448    /// Total duration in nanoseconds.
449    pub total_duration: Option<u64>,
450    /// Number of tokens in the input.
451    pub prompt_eval_count: Option<u32>,
452}
453
454impl EmbeddingResponse {
455    /// Get the dimension of the embedding.
456    #[must_use]
457    pub fn dimension(&self) -> usize {
458        self.embedding.len()
459    }
460
461    /// Calculate cosine similarity with another embedding.
462    #[must_use]
463    pub fn cosine_similarity(&self, other: &Self) -> f32 {
464        if self.embedding.len() != other.embedding.len() {
465            return 0.0;
466        }
467
468        let dot_product: f32 = self
469            .embedding
470            .iter()
471            .zip(&other.embedding)
472            .map(|(a, b)| a * b)
473            .sum();
474
475        let norm_a: f32 = self.embedding.iter().map(|x| x * x).sum::<f32>().sqrt();
476        let norm_b: f32 = other.embedding.iter().map(|x| x * x).sum::<f32>().sqrt();
477
478        if norm_a == 0.0 || norm_b == 0.0 {
479            0.0
480        } else {
481            dot_product / (norm_a * norm_b)
482        }
483    }
484}
485
486impl Default for LoadConfig {
487    fn default() -> Self {
488        Self {
489            gpu_ids: Vec::new(),
490            gpu_layers: -1, // All layers on GPU by default
491            context_size: None,
492            keep_alive: false,
493        }
494    }
495}
496
497impl LoadConfig {
498    /// Create a new load configuration.
499    #[must_use]
500    pub fn new() -> Self {
501        Self::default()
502    }
503
504    /// Set specific GPU IDs.
505    #[must_use]
506    pub fn with_gpus(mut self, gpu_ids: Vec<u32>) -> Self {
507        self.gpu_ids = gpu_ids;
508        self
509    }
510
511    /// Set GPU layers (-1 = all, 0 = CPU only).
512    #[must_use]
513    pub fn with_gpu_layers(mut self, layers: i32) -> Self {
514        self.gpu_layers = layers;
515        self
516    }
517
518    /// Set context size.
519    #[must_use]
520    pub fn with_context_size(mut self, size: u32) -> Self {
521        self.context_size = Some(size);
522        self
523    }
524
525    /// Set keep alive.
526    #[must_use]
527    pub fn with_keep_alive(mut self, keep: bool) -> Self {
528        self.keep_alive = keep;
529        self
530    }
531
532    /// Check if this is a CPU-only configuration.
533    #[must_use]
534    pub fn is_cpu_only(&self) -> bool {
535        self.gpu_layers == 0
536    }
537
538    /// Check if using all GPU layers.
539    #[must_use]
540    pub fn is_full_gpu(&self) -> bool {
541        self.gpu_layers < 0
542    }
543}
544
545#[cfg(test)]
546mod tests {
547    use super::*;
548
549    #[test]
550    fn test_pull_progress() {
551        let progress = PullProgress::new("downloading", 500, 1000);
552        assert_eq!(progress.percent(), 50.0);
553        assert!(!progress.is_complete());
554
555        let complete = PullProgress::new("complete", 1000, 1000);
556        assert!(complete.is_complete());
557    }
558
559    #[test]
560    fn test_pull_progress_display() {
561        let progress = PullProgress::new("pulling", 750, 1000);
562        assert_eq!(progress.to_string(), "pulling: 75.0%");
563    }
564
565    #[test]
566    fn test_pull_progress_zero_total() {
567        let progress = PullProgress::new("starting", 0, 0);
568        assert_eq!(progress.percent(), 0.0);
569        assert!(!progress.is_complete());
570    }
571
572    #[test]
573    fn test_model_info_size() {
574        let info = ModelInfo {
575            name: "llama3.2:7b".to_string(),
576            size: 4_500_000_000,
577            quantization: Some("Q4_K_M".to_string()),
578            parameters: Some("7B".to_string()),
579            digest: None,
580        };
581
582        assert!((info.size_gb() - 4.5).abs() < 0.01);
583        assert_eq!(info.size_human(), "4.5 GB");
584    }
585
586    #[test]
587    fn test_model_info_display() {
588        let info = ModelInfo {
589            name: "test:latest".to_string(),
590            size: 500_000_000,
591            quantization: None,
592            parameters: None,
593            digest: None,
594        };
595
596        assert!(info.to_string().contains("test:latest"));
597        assert!(info.to_string().contains("500 MB"));
598    }
599
600    #[test]
601    fn test_running_model() {
602        let model = RunningModel {
603            name: "llama3.2".to_string(),
604            vram_used: Some(4_000_000_000),
605            gpu_ids: vec![0],
606        };
607
608        assert!((model.vram_gb().unwrap() - 4.0).abs() < 0.01);
609        assert!(model.to_string().contains("llama3.2"));
610        assert!(model.to_string().contains("GPU"));
611    }
612
613    #[test]
614    fn test_gpu_info() {
615        let gpu = GpuInfo {
616            id: 0,
617            name: "RTX 4090".to_string(),
618            memory_total: 24_000_000_000,
619            memory_free: 20_000_000_000,
620        };
621
622        assert!((gpu.memory_total_gb() - 24.0).abs() < 0.01);
623        assert!((gpu.memory_free_gb() - 20.0).abs() < 0.01);
624        assert!((gpu.memory_used_percent() - 16.67).abs() < 0.5);
625    }
626
627    #[test]
628    fn test_backend_error_display() {
629        let err = BackendError::NotRunning;
630        assert!(err.to_string().contains("not running"));
631
632        let err = BackendError::ModelNotFound("test".to_string());
633        assert!(err.to_string().contains("test"));
634    }
635
636    #[test]
637    fn test_load_config_default() {
638        let config = LoadConfig::default();
639        assert!(config.gpu_ids.is_empty());
640        assert_eq!(config.gpu_layers, -1);
641        assert!(config.is_full_gpu());
642        assert!(!config.is_cpu_only());
643    }
644
645    #[test]
646    fn test_load_config_builder() {
647        let config = LoadConfig::new()
648            .with_gpus(vec![0, 1])
649            .with_gpu_layers(32)
650            .with_context_size(8192)
651            .with_keep_alive(true);
652
653        assert_eq!(config.gpu_ids, vec![0, 1]);
654        assert_eq!(config.gpu_layers, 32);
655        assert_eq!(config.context_size, Some(8192));
656        assert!(config.keep_alive);
657        assert!(!config.is_cpu_only());
658        assert!(!config.is_full_gpu());
659    }
660
661    #[test]
662    fn test_load_config_cpu_only() {
663        let config = LoadConfig::new().with_gpu_layers(0);
664        assert!(config.is_cpu_only());
665        assert!(!config.is_full_gpu());
666    }
667
668    #[test]
669    fn test_chat_role_display() {
670        assert_eq!(ChatRole::System.to_string(), "system");
671        assert_eq!(ChatRole::User.to_string(), "user");
672        assert_eq!(ChatRole::Assistant.to_string(), "assistant");
673    }
674
675    #[test]
676    fn test_chat_message_constructors() {
677        let system = ChatMessage::system("You are helpful");
678        assert_eq!(system.role, ChatRole::System);
679        assert_eq!(system.content, "You are helpful");
680
681        let user = ChatMessage::user("Hello");
682        assert_eq!(user.role, ChatRole::User);
683
684        let assistant = ChatMessage::assistant("Hi there!");
685        assert_eq!(assistant.role, ChatRole::Assistant);
686    }
687
688    #[test]
689    fn test_chat_options_builder() {
690        let options = ChatOptions::new()
691            .with_temperature(0.7)
692            .with_top_p(0.9)
693            .with_top_k(40)
694            .with_max_tokens(100)
695            .with_stop("END")
696            .with_seed(42);
697
698        assert_eq!(options.temperature, Some(0.7));
699        assert_eq!(options.top_p, Some(0.9));
700        assert_eq!(options.top_k, Some(40));
701        assert_eq!(options.max_tokens, Some(100));
702        assert_eq!(options.stop, vec!["END"]);
703        assert_eq!(options.seed, Some(42));
704    }
705
706    #[test]
707    fn test_chat_response_content() {
708        let response = ChatResponse {
709            message: ChatMessage::assistant("Hello!"),
710            done: true,
711            total_duration: Some(1_000_000_000),
712            eval_count: Some(10),
713            prompt_eval_count: Some(5),
714        };
715
716        assert_eq!(response.content(), "Hello!");
717        assert!(response.done);
718    }
719
720    #[test]
721    fn test_chat_response_tokens_per_second() {
722        let response = ChatResponse {
723            message: ChatMessage::assistant("Test"),
724            done: true,
725            total_duration: Some(2_000_000_000), // 2 seconds
726            eval_count: Some(100),
727            prompt_eval_count: None,
728        };
729
730        let tps = response.tokens_per_second().unwrap();
731        assert!((tps - 50.0).abs() < 0.1);
732    }
733
734    #[test]
735    fn test_embedding_response_dimension() {
736        let response = EmbeddingResponse {
737            embedding: vec![0.1, 0.2, 0.3, 0.4],
738            total_duration: None,
739            prompt_eval_count: None,
740        };
741
742        assert_eq!(response.dimension(), 4);
743    }
744
745    #[test]
746    fn test_embedding_cosine_similarity() {
747        let a = EmbeddingResponse {
748            embedding: vec![1.0, 0.0, 0.0],
749            total_duration: None,
750            prompt_eval_count: None,
751        };
752
753        let b = EmbeddingResponse {
754            embedding: vec![1.0, 0.0, 0.0],
755            total_duration: None,
756            prompt_eval_count: None,
757        };
758
759        // Identical vectors should have similarity of 1.0
760        assert!((a.cosine_similarity(&b) - 1.0).abs() < 0.001);
761
762        let c = EmbeddingResponse {
763            embedding: vec![0.0, 1.0, 0.0],
764            total_duration: None,
765            prompt_eval_count: None,
766        };
767
768        // Orthogonal vectors should have similarity of 0.0
769        assert!((a.cosine_similarity(&c)).abs() < 0.001);
770    }
771
772    #[test]
773    fn test_embedding_cosine_similarity_different_dimensions() {
774        let a = EmbeddingResponse {
775            embedding: vec![1.0, 0.0],
776            total_duration: None,
777            prompt_eval_count: None,
778        };
779
780        let b = EmbeddingResponse {
781            embedding: vec![1.0, 0.0, 0.0],
782            total_duration: None,
783            prompt_eval_count: None,
784        };
785
786        // Different dimensions should return 0.0
787        assert_eq!(a.cosine_similarity(&b), 0.0);
788    }
789}