kcl_lib/engine/
mod.rs

1//! Functions for managing engine communications.
2
3pub mod async_tasks;
4#[cfg(not(target_arch = "wasm32"))]
5#[cfg(feature = "engine")]
6pub mod conn;
7pub mod conn_mock;
8#[cfg(target_arch = "wasm32")]
9#[cfg(feature = "engine")]
10pub mod conn_wasm;
11
12use std::{
13    collections::HashMap,
14    sync::{
15        Arc,
16        atomic::{AtomicUsize, Ordering},
17    },
18};
19
20pub use async_tasks::AsyncTasks;
21use indexmap::IndexMap;
22use kcmc::{
23    ModelingCmd, each_cmd as mcmd,
24    length_unit::LengthUnit,
25    ok_response::OkModelingCmdResponse,
26    shared::Color,
27    websocket::{
28        BatchResponse, ModelingBatch, ModelingCmdReq, ModelingSessionData, OkWebSocketResponseData, WebSocketRequest,
29        WebSocketResponse,
30    },
31};
32use kittycad_modeling_cmds as kcmc;
33use parse_display::{Display, FromStr};
34use schemars::JsonSchema;
35use serde::{Deserialize, Serialize};
36use tokio::sync::RwLock;
37use uuid::Uuid;
38use web_time::Instant;
39
40use crate::{
41    SourceRange,
42    errors::{KclError, KclErrorDetails},
43    execution::{DefaultPlanes, IdGenerator, PlaneInfo, Point3d, types::UnitLen},
44};
45
46lazy_static::lazy_static! {
47    pub static ref GRID_OBJECT_ID: uuid::Uuid = uuid::Uuid::parse_str("cfa78409-653d-4c26-96f1-7c45fb784840").unwrap();
48
49    pub static ref GRID_SCALE_TEXT_OBJECT_ID: uuid::Uuid = uuid::Uuid::parse_str("10782f33-f588-4668-8bcd-040502d26590").unwrap();
50
51    pub static ref DEFAULT_PLANE_INFO: IndexMap<PlaneName, PlaneInfo> = IndexMap::from([
52            (PlaneName::Xy,PlaneInfo{
53                origin: Point3d::new(0.0, 0.0, 0.0, UnitLen::Mm),
54                x_axis: Point3d::new(1.0, 0.0, 0.0, UnitLen::Unknown),
55                y_axis: Point3d::new(0.0, 1.0, 0.0, UnitLen::Unknown),
56            }),
57            (PlaneName::NegXy,
58           PlaneInfo{
59                origin: Point3d::new(0.0, 0.0, 0.0, UnitLen::Mm),
60                x_axis: Point3d::new(-1.0, 0.0, 0.0, UnitLen::Unknown),
61                y_axis: Point3d::new(0.0, 1.0, 0.0, UnitLen::Unknown),
62            }),
63            (PlaneName::Xz, PlaneInfo{
64                origin: Point3d::new(0.0, 0.0, 0.0, UnitLen::Mm),
65                x_axis: Point3d::new(1.0, 0.0, 0.0, UnitLen::Unknown),
66                y_axis: Point3d::new(0.0, 0.0, 1.0, UnitLen::Unknown),
67            }),
68            (PlaneName::NegXz, PlaneInfo{
69                origin: Point3d::new(0.0, 0.0, 0.0, UnitLen::Mm),
70                x_axis: Point3d::new(-1.0, 0.0, 0.0, UnitLen::Unknown),
71                y_axis: Point3d::new(0.0, 0.0, 1.0, UnitLen::Unknown),
72            }),
73            (PlaneName::Yz, PlaneInfo{
74                origin: Point3d::new(0.0, 0.0, 0.0, UnitLen::Mm),
75                x_axis: Point3d::new(0.0, 1.0, 0.0, UnitLen::Unknown),
76                y_axis: Point3d::new(0.0, 0.0, 1.0, UnitLen::Unknown),
77            }),
78            (PlaneName::NegYz, PlaneInfo{
79                origin: Point3d::new(0.0, 0.0, 0.0, UnitLen::Mm),
80                x_axis: Point3d::new(0.0, -1.0, 0.0, UnitLen::Unknown),
81                y_axis: Point3d::new(0.0, 0.0, 1.0, UnitLen::Unknown),
82            }),
83            ]);
84}
85
86#[derive(Default, Debug)]
87pub struct EngineStats {
88    pub commands_batched: AtomicUsize,
89    pub batches_sent: AtomicUsize,
90}
91
92impl Clone for EngineStats {
93    fn clone(&self) -> Self {
94        Self {
95            commands_batched: AtomicUsize::new(self.commands_batched.load(Ordering::Relaxed)),
96            batches_sent: AtomicUsize::new(self.batches_sent.load(Ordering::Relaxed)),
97        }
98    }
99}
100
101#[async_trait::async_trait]
102pub trait EngineManager: std::fmt::Debug + Send + Sync + 'static {
103    /// Get the batch of commands to be sent to the engine.
104    fn batch(&self) -> Arc<RwLock<Vec<(WebSocketRequest, SourceRange)>>>;
105
106    /// Get the batch of end commands to be sent to the engine.
107    fn batch_end(&self) -> Arc<RwLock<IndexMap<uuid::Uuid, (WebSocketRequest, SourceRange)>>>;
108
109    /// Get the command responses from the engine.
110    fn responses(&self) -> Arc<RwLock<IndexMap<Uuid, WebSocketResponse>>>;
111
112    /// Get the ids of the async commands we are waiting for.
113    fn ids_of_async_commands(&self) -> Arc<RwLock<IndexMap<Uuid, SourceRange>>>;
114
115    /// Get the async tasks we are waiting for.
116    fn async_tasks(&self) -> AsyncTasks;
117
118    /// Take the batch of commands that have accumulated so far and clear them.
119    async fn take_batch(&self) -> Vec<(WebSocketRequest, SourceRange)> {
120        std::mem::take(&mut *self.batch().write().await)
121    }
122
123    /// Take the batch of end commands that have accumulated so far and clear them.
124    async fn take_batch_end(&self) -> IndexMap<Uuid, (WebSocketRequest, SourceRange)> {
125        std::mem::take(&mut *self.batch_end().write().await)
126    }
127
128    /// Take the ids of async commands that have accumulated so far and clear them.
129    async fn take_ids_of_async_commands(&self) -> IndexMap<Uuid, SourceRange> {
130        std::mem::take(&mut *self.ids_of_async_commands().write().await)
131    }
132
133    /// Take the responses that have accumulated so far and clear them.
134    async fn take_responses(&self) -> IndexMap<Uuid, WebSocketResponse> {
135        std::mem::take(&mut *self.responses().write().await)
136    }
137
138    /// Get the default planes.
139    fn get_default_planes(&self) -> Arc<RwLock<Option<DefaultPlanes>>>;
140
141    fn stats(&self) -> &EngineStats;
142
143    /// Get the default planes, creating them if they don't exist.
144    async fn default_planes(
145        &self,
146        id_generator: &mut IdGenerator,
147        source_range: SourceRange,
148    ) -> Result<DefaultPlanes, KclError> {
149        {
150            let opt = self.get_default_planes().read().await.as_ref().cloned();
151            if let Some(planes) = opt {
152                return Ok(planes);
153            }
154        } // drop the read lock
155
156        let new_planes = self.new_default_planes(id_generator, source_range).await?;
157        *self.get_default_planes().write().await = Some(new_planes.clone());
158
159        Ok(new_planes)
160    }
161
162    /// Helpers to be called after clearing a scene.
163    /// (These really only apply to wasm for now).
164    async fn clear_scene_post_hook(
165        &self,
166        id_generator: &mut IdGenerator,
167        source_range: SourceRange,
168    ) -> Result<(), crate::errors::KclError>;
169
170    async fn clear_queues(&self) {
171        self.batch().write().await.clear();
172        self.batch_end().write().await.clear();
173        self.ids_of_async_commands().write().await.clear();
174        self.async_tasks().clear().await;
175    }
176
177    /// Fetch debug information from the peer.
178    async fn fetch_debug(&self) -> Result<(), crate::errors::KclError>;
179
180    /// Get any debug information (if requested)
181    async fn get_debug(&self) -> Option<OkWebSocketResponseData>;
182
183    /// Send a modeling command and do not wait for the response message.
184    async fn inner_fire_modeling_cmd(
185        &self,
186        id: uuid::Uuid,
187        source_range: SourceRange,
188        cmd: WebSocketRequest,
189        id_to_source_range: HashMap<Uuid, SourceRange>,
190    ) -> Result<(), crate::errors::KclError>;
191
192    /// Send a modeling command and wait for the response message.
193    async fn inner_send_modeling_cmd(
194        &self,
195        id: uuid::Uuid,
196        source_range: SourceRange,
197        cmd: WebSocketRequest,
198        id_to_source_range: HashMap<uuid::Uuid, SourceRange>,
199    ) -> Result<kcmc::websocket::WebSocketResponse, crate::errors::KclError>;
200
201    async fn clear_scene(
202        &self,
203        id_generator: &mut IdGenerator,
204        source_range: SourceRange,
205    ) -> Result<(), crate::errors::KclError> {
206        // Clear any batched commands leftover from previous scenes.
207        self.clear_queues().await;
208
209        self.batch_modeling_cmd(
210            id_generator.next_uuid(),
211            source_range,
212            &ModelingCmd::SceneClearAll(mcmd::SceneClearAll::default()),
213        )
214        .await?;
215
216        // Flush the batch queue, so clear is run right away.
217        // Otherwise the hooks below won't work.
218        self.flush_batch(false, source_range).await?;
219
220        // Do the after clear scene hook.
221        self.clear_scene_post_hook(id_generator, source_range).await?;
222
223        Ok(())
224    }
225
226    /// Ensure a specific async command has been completed.
227    async fn ensure_async_command_completed(
228        &self,
229        id: uuid::Uuid,
230        source_range: Option<SourceRange>,
231    ) -> Result<OkWebSocketResponseData, KclError> {
232        let source_range = if let Some(source_range) = source_range {
233            source_range
234        } else {
235            // Look it up if we don't have it.
236            self.ids_of_async_commands()
237                .read()
238                .await
239                .get(&id)
240                .cloned()
241                .unwrap_or_default()
242        };
243
244        let current_time = Instant::now();
245        while current_time.elapsed().as_secs() < 60 {
246            let responses = self.responses().read().await.clone();
247            let Some(resp) = responses.get(&id) else {
248                // Yield to the event loop so that we don’t block the UI thread.
249                // No seriously WE DO NOT WANT TO PAUSE THE WHOLE APP ON THE JS SIDE.
250                #[cfg(target_arch = "wasm32")]
251                {
252                    let duration = web_time::Duration::from_millis(1);
253                    wasm_timer::Delay::new(duration).await.map_err(|err| {
254                        KclError::new_internal(KclErrorDetails::new(
255                            format!("Failed to sleep: {:?}", err),
256                            vec![source_range],
257                        ))
258                    })?;
259                }
260                #[cfg(not(target_arch = "wasm32"))]
261                tokio::task::yield_now().await;
262                continue;
263            };
264
265            // If the response is an error, return it.
266            // Parsing will do that and we can ignore the result, we don't care.
267            let response = self.parse_websocket_response(resp.clone(), source_range)?;
268            return Ok(response);
269        }
270
271        Err(KclError::new_engine(KclErrorDetails::new(
272            "async command timed out".to_string(),
273            vec![source_range],
274        )))
275    }
276
277    /// Ensure ALL async commands have been completed.
278    async fn ensure_async_commands_completed(&self) -> Result<(), KclError> {
279        // Check if all async commands have been completed.
280        let ids = self.take_ids_of_async_commands().await;
281
282        // Try to get them from the responses.
283        for (id, source_range) in ids {
284            self.ensure_async_command_completed(id, Some(source_range)).await?;
285        }
286
287        // Make sure we check for all async tasks as well.
288        // The reason why we ignore the error here is that, if a model fillets an edge
289        // we previously called something on, it might no longer exist. In which case,
290        // the artifact graph won't care either if its gone since you can't select it
291        // anymore anyways.
292        if let Err(err) = self.async_tasks().join_all().await {
293            crate::log::logln!(
294                "Error waiting for async tasks (this is typically fine and just means that an edge became something else): {:?}",
295                err
296            );
297        }
298
299        // Flush the batch to make sure nothing remains.
300        self.flush_batch(true, SourceRange::default()).await?;
301
302        Ok(())
303    }
304
305    /// Set the visibility of edges.
306    async fn set_edge_visibility(
307        &self,
308        visible: bool,
309        source_range: SourceRange,
310        id_generator: &mut IdGenerator,
311    ) -> Result<(), crate::errors::KclError> {
312        self.batch_modeling_cmd(
313            id_generator.next_uuid(),
314            source_range,
315            &ModelingCmd::from(mcmd::EdgeLinesVisible { hidden: !visible }),
316        )
317        .await?;
318
319        Ok(())
320    }
321
322    /// Re-run the command to apply the settings.
323    async fn reapply_settings(
324        &self,
325        settings: &crate::ExecutorSettings,
326        source_range: SourceRange,
327        id_generator: &mut IdGenerator,
328        grid_scale_unit: GridScaleBehavior,
329    ) -> Result<(), crate::errors::KclError> {
330        // Set the edge visibility.
331        self.set_edge_visibility(settings.highlight_edges, source_range, id_generator)
332            .await?;
333
334        // Send the command to show the grid.
335
336        self.modify_grid(!settings.show_grid, grid_scale_unit, source_range, id_generator)
337            .await?;
338
339        // We do not have commands for changing ssao on the fly.
340
341        // Flush the batch queue, so the settings are applied right away.
342        self.flush_batch(false, source_range).await?;
343
344        Ok(())
345    }
346
347    // Add a modeling command to the batch but don't fire it right away.
348    async fn batch_modeling_cmd(
349        &self,
350        id: uuid::Uuid,
351        source_range: SourceRange,
352        cmd: &ModelingCmd,
353    ) -> Result<(), crate::errors::KclError> {
354        let req = WebSocketRequest::ModelingCmdReq(ModelingCmdReq {
355            cmd: cmd.clone(),
356            cmd_id: id.into(),
357        });
358
359        // Add cmd to the batch.
360        self.batch().write().await.push((req, source_range));
361        self.stats().commands_batched.fetch_add(1, Ordering::Relaxed);
362
363        Ok(())
364    }
365
366    // Add a vector of modeling commands to the batch but don't fire it right away.
367    // This allows you to force them all to be added together in the same order.
368    // When we are running things in parallel this prevents race conditions that might come
369    // if specific commands are run before others.
370    async fn batch_modeling_cmds(
371        &self,
372        source_range: SourceRange,
373        cmds: &[ModelingCmdReq],
374    ) -> Result<(), crate::errors::KclError> {
375        // Add cmds to the batch.
376        let mut extended_cmds = Vec::with_capacity(cmds.len());
377        for cmd in cmds {
378            extended_cmds.push((WebSocketRequest::ModelingCmdReq(cmd.clone()), source_range));
379        }
380        self.stats()
381            .commands_batched
382            .fetch_add(extended_cmds.len(), Ordering::Relaxed);
383        self.batch().write().await.extend(extended_cmds);
384
385        Ok(())
386    }
387
388    /// Add a command to the batch that needs to be executed at the very end.
389    /// This for stuff like fillets or chamfers where if we execute too soon the
390    /// engine will eat the ID and we can't reference it for other commands.
391    async fn batch_end_cmd(
392        &self,
393        id: uuid::Uuid,
394        source_range: SourceRange,
395        cmd: &ModelingCmd,
396    ) -> Result<(), crate::errors::KclError> {
397        let req = WebSocketRequest::ModelingCmdReq(ModelingCmdReq {
398            cmd: cmd.clone(),
399            cmd_id: id.into(),
400        });
401
402        // Add cmd to the batch end.
403        self.batch_end().write().await.insert(id, (req, source_range));
404        self.stats().commands_batched.fetch_add(1, Ordering::Relaxed);
405        Ok(())
406    }
407
408    /// Send the modeling cmd and wait for the response.
409    async fn send_modeling_cmd(
410        &self,
411        id: uuid::Uuid,
412        source_range: SourceRange,
413        cmd: &ModelingCmd,
414    ) -> Result<OkWebSocketResponseData, crate::errors::KclError> {
415        let mut requests = self.take_batch().await.clone();
416
417        // Add the command to the batch.
418        requests.push((
419            WebSocketRequest::ModelingCmdReq(ModelingCmdReq {
420                cmd: cmd.clone(),
421                cmd_id: id.into(),
422            }),
423            source_range,
424        ));
425        self.stats().commands_batched.fetch_add(1, Ordering::Relaxed);
426
427        // Flush the batch queue.
428        self.run_batch(requests, source_range).await
429    }
430
431    /// Send the modeling cmd async and don't wait for the response.
432    /// Add it to our list of async commands.
433    async fn async_modeling_cmd(
434        &self,
435        id: uuid::Uuid,
436        source_range: SourceRange,
437        cmd: &ModelingCmd,
438    ) -> Result<(), crate::errors::KclError> {
439        // Add the command ID to the list of async commands.
440        self.ids_of_async_commands().write().await.insert(id, source_range);
441
442        // Fire off the command now, but don't wait for the response, we don't care about it.
443        self.inner_fire_modeling_cmd(
444            id,
445            source_range,
446            WebSocketRequest::ModelingCmdReq(ModelingCmdReq {
447                cmd: cmd.clone(),
448                cmd_id: id.into(),
449            }),
450            HashMap::from([(id, source_range)]),
451        )
452        .await?;
453
454        Ok(())
455    }
456
457    /// Run the batch for the specific commands.
458    async fn run_batch(
459        &self,
460        orig_requests: Vec<(WebSocketRequest, SourceRange)>,
461        source_range: SourceRange,
462    ) -> Result<OkWebSocketResponseData, crate::errors::KclError> {
463        // Return early if we have no commands to send.
464        if orig_requests.is_empty() {
465            return Ok(OkWebSocketResponseData::Modeling {
466                modeling_response: OkModelingCmdResponse::Empty {},
467            });
468        }
469
470        let requests: Vec<ModelingCmdReq> = orig_requests
471            .iter()
472            .filter_map(|(val, _)| match val {
473                WebSocketRequest::ModelingCmdReq(ModelingCmdReq { cmd, cmd_id }) => Some(ModelingCmdReq {
474                    cmd: cmd.clone(),
475                    cmd_id: *cmd_id,
476                }),
477                _ => None,
478            })
479            .collect();
480
481        let batched_requests = WebSocketRequest::ModelingCmdBatchReq(ModelingBatch {
482            requests,
483            batch_id: uuid::Uuid::new_v4().into(),
484            responses: true,
485        });
486
487        let final_req = if orig_requests.len() == 1 {
488            // We can unwrap here because we know the batch has only one element.
489            orig_requests.first().unwrap().0.clone()
490        } else {
491            batched_requests
492        };
493
494        // Create the map of original command IDs to source range.
495        // This is for the wasm side, kurt needs it for selections.
496        let mut id_to_source_range = HashMap::new();
497        for (req, range) in orig_requests.iter() {
498            match req {
499                WebSocketRequest::ModelingCmdReq(ModelingCmdReq { cmd: _, cmd_id }) => {
500                    id_to_source_range.insert(Uuid::from(*cmd_id), *range);
501                }
502                _ => {
503                    return Err(KclError::new_engine(KclErrorDetails::new(
504                        format!("The request is not a modeling command: {req:?}"),
505                        vec![*range],
506                    )));
507                }
508            }
509        }
510
511        self.stats().batches_sent.fetch_add(1, Ordering::Relaxed);
512
513        // We pop off the responses to cleanup our mappings.
514        match final_req {
515            WebSocketRequest::ModelingCmdBatchReq(ModelingBatch {
516                ref requests,
517                batch_id,
518                responses: _,
519            }) => {
520                // Get the last command ID.
521                let last_id = requests.last().unwrap().cmd_id;
522                let ws_resp = self
523                    .inner_send_modeling_cmd(batch_id.into(), source_range, final_req, id_to_source_range.clone())
524                    .await?;
525                let response = self.parse_websocket_response(ws_resp, source_range)?;
526
527                // If we have a batch response, we want to return the specific id we care about.
528                if let OkWebSocketResponseData::ModelingBatch { responses } = response {
529                    let responses = responses.into_iter().map(|(k, v)| (Uuid::from(k), v)).collect();
530                    self.parse_batch_responses(last_id.into(), id_to_source_range, responses)
531                } else {
532                    // We should never get here.
533                    Err(KclError::new_engine(KclErrorDetails::new(
534                        format!("Failed to get batch response: {response:?}"),
535                        vec![source_range],
536                    )))
537                }
538            }
539            WebSocketRequest::ModelingCmdReq(ModelingCmdReq { cmd: _, cmd_id }) => {
540                // You are probably wondering why we can't just return the source range we were
541                // passed with the function. Well this is actually really important.
542                // If this is the last command in the batch and there is only one and we've reached
543                // the end of the file, this will trigger a flush batch function, but it will just
544                // send default or the end of the file as it's source range not the origin of the
545                // request so we need the original request source range in case the engine returns
546                // an error.
547                let source_range = id_to_source_range.get(cmd_id.as_ref()).cloned().ok_or_else(|| {
548                    KclError::new_engine(KclErrorDetails::new(
549                        format!("Failed to get source range for command ID: {cmd_id:?}"),
550                        vec![],
551                    ))
552                })?;
553                let ws_resp = self
554                    .inner_send_modeling_cmd(cmd_id.into(), source_range, final_req, id_to_source_range)
555                    .await?;
556                self.parse_websocket_response(ws_resp, source_range)
557            }
558            _ => Err(KclError::new_engine(KclErrorDetails::new(
559                format!("The final request is not a modeling command: {final_req:?}"),
560                vec![source_range],
561            ))),
562        }
563    }
564
565    /// Force flush the batch queue.
566    async fn flush_batch(
567        &self,
568        // Whether or not to flush the end commands as well.
569        // We only do this at the very end of the file.
570        batch_end: bool,
571        source_range: SourceRange,
572    ) -> Result<OkWebSocketResponseData, crate::errors::KclError> {
573        let all_requests = if batch_end {
574            let mut requests = self.take_batch().await.clone();
575            requests.extend(self.take_batch_end().await.values().cloned());
576            requests
577        } else {
578            self.take_batch().await.clone()
579        };
580
581        self.run_batch(all_requests, source_range).await
582    }
583
584    async fn make_default_plane(
585        &self,
586        plane_id: uuid::Uuid,
587        info: &PlaneInfo,
588        color: Option<Color>,
589        source_range: SourceRange,
590        id_generator: &mut IdGenerator,
591    ) -> Result<uuid::Uuid, KclError> {
592        // Create new default planes.
593        let default_size = 100.0;
594
595        self.batch_modeling_cmd(
596            plane_id,
597            source_range,
598            &ModelingCmd::from(mcmd::MakePlane {
599                clobber: false,
600                origin: info.origin.into(),
601                size: LengthUnit(default_size),
602                x_axis: info.x_axis.into(),
603                y_axis: info.y_axis.into(),
604                hide: Some(true),
605            }),
606        )
607        .await?;
608
609        if let Some(color) = color {
610            // Set the color.
611            self.batch_modeling_cmd(
612                id_generator.next_uuid(),
613                source_range,
614                &ModelingCmd::from(mcmd::PlaneSetColor { color, plane_id }),
615            )
616            .await?;
617        }
618
619        Ok(plane_id)
620    }
621
622    async fn new_default_planes(
623        &self,
624        id_generator: &mut IdGenerator,
625        source_range: SourceRange,
626    ) -> Result<DefaultPlanes, KclError> {
627        let plane_settings: Vec<(PlaneName, Uuid, Option<Color>)> = vec![
628            (
629                PlaneName::Xy,
630                id_generator.next_uuid(),
631                Some(Color {
632                    r: 0.7,
633                    g: 0.28,
634                    b: 0.28,
635                    a: 0.4,
636                }),
637            ),
638            (
639                PlaneName::Yz,
640                id_generator.next_uuid(),
641                Some(Color {
642                    r: 0.28,
643                    g: 0.7,
644                    b: 0.28,
645                    a: 0.4,
646                }),
647            ),
648            (
649                PlaneName::Xz,
650                id_generator.next_uuid(),
651                Some(Color {
652                    r: 0.28,
653                    g: 0.28,
654                    b: 0.7,
655                    a: 0.4,
656                }),
657            ),
658            (PlaneName::NegXy, id_generator.next_uuid(), None),
659            (PlaneName::NegYz, id_generator.next_uuid(), None),
660            (PlaneName::NegXz, id_generator.next_uuid(), None),
661        ];
662
663        let mut planes = HashMap::new();
664        for (name, plane_id, color) in plane_settings {
665            let info = DEFAULT_PLANE_INFO.get(&name).ok_or_else(|| {
666                // We should never get here.
667                KclError::new_engine(KclErrorDetails::new(
668                    format!("Failed to get default plane info for: {name:?}"),
669                    vec![source_range],
670                ))
671            })?;
672            planes.insert(
673                name,
674                self.make_default_plane(plane_id, info, color, source_range, id_generator)
675                    .await?,
676            );
677        }
678
679        // Flush the batch queue, so these planes are created right away.
680        self.flush_batch(false, source_range).await?;
681
682        Ok(DefaultPlanes {
683            xy: planes[&PlaneName::Xy],
684            neg_xy: planes[&PlaneName::NegXy],
685            xz: planes[&PlaneName::Xz],
686            neg_xz: planes[&PlaneName::NegXz],
687            yz: planes[&PlaneName::Yz],
688            neg_yz: planes[&PlaneName::NegYz],
689        })
690    }
691
692    fn parse_websocket_response(
693        &self,
694        response: WebSocketResponse,
695        source_range: SourceRange,
696    ) -> Result<OkWebSocketResponseData, crate::errors::KclError> {
697        match response {
698            WebSocketResponse::Success(success) => Ok(success.resp),
699            WebSocketResponse::Failure(fail) => {
700                let _request_id = fail.request_id;
701                Err(KclError::new_engine(KclErrorDetails::new(
702                    fail.errors
703                        .iter()
704                        .map(|e| e.message.clone())
705                        .collect::<Vec<_>>()
706                        .join("\n"),
707                    vec![source_range],
708                )))
709            }
710        }
711    }
712
713    fn parse_batch_responses(
714        &self,
715        // The last response we are looking for.
716        id: uuid::Uuid,
717        // The mapping of source ranges to command IDs.
718        id_to_source_range: HashMap<uuid::Uuid, SourceRange>,
719        // The response from the engine.
720        responses: HashMap<uuid::Uuid, BatchResponse>,
721    ) -> Result<OkWebSocketResponseData, crate::errors::KclError> {
722        // Iterate over the responses and check for errors.
723        #[expect(
724            clippy::iter_over_hash_type,
725            reason = "modeling command uses a HashMap and keys are random, so we don't really have a choice"
726        )]
727        for (cmd_id, resp) in responses.iter() {
728            match resp {
729                BatchResponse::Success { response } => {
730                    if cmd_id == &id {
731                        // This is the response we care about.
732                        return Ok(OkWebSocketResponseData::Modeling {
733                            modeling_response: response.clone(),
734                        });
735                    } else {
736                        // Continue the loop if this is not the response we care about.
737                        continue;
738                    }
739                }
740                BatchResponse::Failure { errors } => {
741                    // Get the source range for the command.
742                    let source_range = id_to_source_range.get(cmd_id).cloned().ok_or_else(|| {
743                        KclError::new_engine(KclErrorDetails::new(
744                            format!("Failed to get source range for command ID: {cmd_id:?}"),
745                            vec![],
746                        ))
747                    })?;
748                    return Err(KclError::new_engine(KclErrorDetails::new(
749                        errors.iter().map(|e| e.message.clone()).collect::<Vec<_>>().join("\n"),
750                        vec![source_range],
751                    )));
752                }
753            }
754        }
755
756        // Return an error that we did not get an error or the response we wanted.
757        // This should never happen but who knows.
758        Err(KclError::new_engine(KclErrorDetails::new(
759            format!("Failed to find response for command ID: {id:?}"),
760            vec![],
761        )))
762    }
763
764    async fn modify_grid(
765        &self,
766        hidden: bool,
767        grid_scale_behavior: GridScaleBehavior,
768        source_range: SourceRange,
769        id_generator: &mut IdGenerator,
770    ) -> Result<(), KclError> {
771        // Hide/show the grid.
772        self.batch_modeling_cmd(
773            id_generator.next_uuid(),
774            source_range,
775            &ModelingCmd::from(mcmd::ObjectVisible {
776                hidden,
777                object_id: *GRID_OBJECT_ID,
778            }),
779        )
780        .await?;
781
782        self.batch_modeling_cmd(
783            id_generator.next_uuid(),
784            source_range,
785            &grid_scale_behavior.into_modeling_cmd(),
786        )
787        .await?;
788
789        // Hide/show the grid scale text.
790        self.batch_modeling_cmd(
791            id_generator.next_uuid(),
792            source_range,
793            &ModelingCmd::from(mcmd::ObjectVisible {
794                hidden,
795                object_id: *GRID_SCALE_TEXT_OBJECT_ID,
796            }),
797        )
798        .await?;
799
800        Ok(())
801    }
802
803    /// Get session data, if it has been received.
804    /// Returns None if the server never sent it.
805    async fn get_session_data(&self) -> Option<ModelingSessionData> {
806        None
807    }
808
809    /// Close the engine connection and wait for it to finish.
810    async fn close(&self);
811}
812
813#[derive(Debug, Hash, Eq, Copy, Clone, Deserialize, Serialize, PartialEq, ts_rs::TS, JsonSchema, Display, FromStr)]
814#[ts(export)]
815#[serde(rename_all = "camelCase")]
816pub enum PlaneName {
817    /// The XY plane.
818    #[display("XY")]
819    Xy,
820    /// The opposite side of the XY plane.
821    #[display("-XY")]
822    NegXy,
823    /// The XZ plane.
824    #[display("XZ")]
825    Xz,
826    /// The opposite side of the XZ plane.
827    #[display("-XZ")]
828    NegXz,
829    /// The YZ plane.
830    #[display("YZ")]
831    Yz,
832    /// The opposite side of the YZ plane.
833    #[display("-YZ")]
834    NegYz,
835}
836
837/// Create a new zoo api client.
838#[cfg(not(target_arch = "wasm32"))]
839pub fn new_zoo_client(token: Option<String>, engine_addr: Option<String>) -> anyhow::Result<kittycad::Client> {
840    let user_agent = concat!(env!("CARGO_PKG_NAME"), ".rs/", env!("CARGO_PKG_VERSION"),);
841    let http_client = reqwest::Client::builder()
842        .user_agent(user_agent)
843        // For file conversions we need this to be long.
844        .timeout(std::time::Duration::from_secs(600))
845        .connect_timeout(std::time::Duration::from_secs(60));
846    let ws_client = reqwest::Client::builder()
847        .user_agent(user_agent)
848        // For file conversions we need this to be long.
849        .timeout(std::time::Duration::from_secs(600))
850        .connect_timeout(std::time::Duration::from_secs(60))
851        .connection_verbose(true)
852        .tcp_keepalive(std::time::Duration::from_secs(600))
853        .http1_only();
854
855    let zoo_token_env = std::env::var("ZOO_API_TOKEN");
856
857    let token = if let Some(token) = token {
858        token
859    } else if let Ok(token) = std::env::var("KITTYCAD_API_TOKEN") {
860        if let Ok(zoo_token) = zoo_token_env {
861            if zoo_token != token {
862                return Err(anyhow::anyhow!(
863                    "Both environment variables KITTYCAD_API_TOKEN=`{}` and ZOO_API_TOKEN=`{}` are set. Use only one.",
864                    token,
865                    zoo_token
866                ));
867            }
868        }
869        token
870    } else if let Ok(token) = zoo_token_env {
871        token
872    } else {
873        return Err(anyhow::anyhow!(
874            "No API token found in environment variables. Use KITTYCAD_API_TOKEN or ZOO_API_TOKEN"
875        ));
876    };
877
878    // Create the client.
879    let mut client = kittycad::Client::new_from_reqwest(token, http_client, ws_client);
880    // Set an engine address if it's set.
881    let kittycad_host_env = std::env::var("KITTYCAD_HOST");
882    if let Some(addr) = engine_addr {
883        client.set_base_url(addr);
884    } else if let Ok(addr) = std::env::var("ZOO_HOST") {
885        if let Ok(kittycad_host) = kittycad_host_env {
886            if kittycad_host != addr {
887                return Err(anyhow::anyhow!(
888                    "Both environment variables KITTYCAD_HOST=`{}` and ZOO_HOST=`{}` are set. Use only one.",
889                    kittycad_host,
890                    addr
891                ));
892            }
893        }
894        client.set_base_url(addr);
895    } else if let Ok(addr) = kittycad_host_env {
896        client.set_base_url(addr);
897    }
898
899    Ok(client)
900}
901
902#[derive(Copy, Clone, Debug)]
903pub enum GridScaleBehavior {
904    ScaleWithZoom,
905    Fixed(Option<kcmc::units::UnitLength>),
906}
907
908impl GridScaleBehavior {
909    fn into_modeling_cmd(self) -> ModelingCmd {
910        const NUMBER_OF_GRID_COLUMNS: f32 = 10.0;
911        match self {
912            GridScaleBehavior::ScaleWithZoom => ModelingCmd::from(mcmd::SetGridAutoScale {}),
913            GridScaleBehavior::Fixed(unit_length) => ModelingCmd::from(mcmd::SetGridScale {
914                value: NUMBER_OF_GRID_COLUMNS,
915                units: unit_length.unwrap_or(kcmc::units::UnitLength::Millimeters),
916            }),
917        }
918    }
919}