wdl_analysis/
analyzer.rs

1//! Implementation of the analyzer.
2
3use std::ffi::OsStr;
4use std::fmt;
5use std::future::Future;
6use std::mem::ManuallyDrop;
7use std::ops::Range;
8use std::path::Path;
9use std::path::PathBuf;
10use std::path::absolute;
11use std::sync::Arc;
12use std::thread::JoinHandle;
13
14use anyhow::Context;
15use anyhow::Error;
16use anyhow::Result;
17use anyhow::anyhow;
18use anyhow::bail;
19use indexmap::IndexSet;
20use line_index::LineCol;
21use line_index::LineIndex;
22use line_index::WideEncoding;
23use line_index::WideLineCol;
24use path_clean::clean;
25use tokio::runtime::Handle;
26use tokio::sync::mpsc;
27use tokio::sync::oneshot;
28use url::Url;
29use walkdir::WalkDir;
30use wdl_ast::Severity;
31use wdl_ast::SyntaxNode;
32use wdl_ast::SyntaxNodeExt;
33use wdl_ast::Validator;
34
35use crate::Rule;
36use crate::UNNECESSARY_FUNCTION_CALL;
37use crate::UNUSED_CALL_RULE_ID;
38use crate::UNUSED_DECL_RULE_ID;
39use crate::UNUSED_IMPORT_RULE_ID;
40use crate::UNUSED_INPUT_RULE_ID;
41use crate::document::Document;
42use crate::graph::DocumentGraphNode;
43use crate::graph::ParseState;
44use crate::queue::AddRequest;
45use crate::queue::AnalysisQueue;
46use crate::queue::AnalyzeRequest;
47use crate::queue::FormatRequest;
48use crate::queue::NotifyChangeRequest;
49use crate::queue::NotifyIncrementalChangeRequest;
50use crate::queue::RemoveRequest;
51use crate::queue::Request;
52use crate::rayon::RayonHandle;
53
54/// Represents the kind of analysis progress being reported.
55#[derive(Debug, Clone, Copy, PartialEq, Eq)]
56pub enum ProgressKind {
57    /// The progress is for parsing documents.
58    Parsing,
59    /// The progress is for analyzing documents.
60    Analyzing,
61}
62
63impl fmt::Display for ProgressKind {
64    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
65        match self {
66            Self::Parsing => write!(f, "parsing"),
67            Self::Analyzing => write!(f, "analyzing"),
68        }
69    }
70}
71
72/// Converts a local path to a file schemed URI.
73pub fn path_to_uri(path: impl AsRef<Path>) -> Option<Url> {
74    Url::from_file_path(clean(absolute(path).ok()?)).ok()
75}
76
77/// Represents the result of an analysis.
78///
79/// Analysis results are cheap to clone.
80#[derive(Debug, Clone)]
81pub struct AnalysisResult {
82    /// The error that occurred when attempting to parse the file (e.g. the file
83    /// could not be opened).
84    error: Option<Arc<Error>>,
85    /// The monotonic version of the document that was parsed.
86    ///
87    /// This value comes from incremental changes to the file.
88    ///
89    /// If `None`, the parsed version had no incremental changes.
90    version: Option<i32>,
91    /// The lines indexed for the parsed file.
92    lines: Option<Arc<LineIndex>>,
93    /// The analyzed document.
94    document: Document,
95}
96
97impl AnalysisResult {
98    /// Constructs a new analysis result for the given graph node.
99    pub(crate) fn new(node: &DocumentGraphNode) -> Self {
100        let (error, version, lines) = match node.parse_state() {
101            ParseState::NotParsed => unreachable!("document should have been parsed"),
102            ParseState::Error(e) => (Some(e), None, None),
103            ParseState::Parsed { version, lines, .. } => (None, *version, Some(lines)),
104        };
105
106        Self {
107            error: error.cloned(),
108            version,
109            lines: lines.cloned(),
110            document: node
111                .document()
112                .expect("analysis should have completed")
113                .clone(),
114        }
115    }
116
117    /// Gets the error that occurred when attempting to parse the document.
118    ///
119    /// An example error would be if the file could not be opened.
120    ///
121    /// Returns `None` if the document was parsed successfully.
122    pub fn error(&self) -> Option<&Arc<Error>> {
123        self.error.as_ref()
124    }
125
126    /// Gets the incremental version of the parsed document.
127    ///
128    /// Returns `None` if there was an error parsing the document or if the
129    /// parsed document had no incremental changes.
130    pub fn version(&self) -> Option<i32> {
131        self.version
132    }
133
134    /// Gets the line index of the parsed document.
135    ///
136    /// Returns `None` if there was an error parsing the document.
137    pub fn lines(&self) -> Option<&Arc<LineIndex>> {
138        self.lines.as_ref()
139    }
140
141    /// Gets the analyzed document.
142    pub fn document(&self) -> &Document {
143        &self.document
144    }
145}
146
147/// Represents a position in a document's source.
148#[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Copy, Clone, Default)]
149pub struct SourcePosition {
150    /// Line position in a document (zero-based).
151    // NOTE: this field must come before `character` to maintain a correct sort order.
152    pub line: u32,
153    /// Character offset on a line in a document (zero-based). The meaning of
154    /// this offset is determined by the position encoding.
155    pub character: u32,
156}
157
158impl SourcePosition {
159    /// Constructs a new source position from a line and character offset.
160    pub fn new(line: u32, character: u32) -> Self {
161        Self { line, character }
162    }
163}
164
165/// Represents the encoding of a source position.
166#[derive(Debug, Eq, PartialEq, Copy, Clone)]
167pub enum SourcePositionEncoding {
168    /// The position is UTF8 encoded.
169    ///
170    /// A position's character is the UTF-8 offset from the start of the line.
171    UTF8,
172    /// The position is UTF16 encoded.
173    ///
174    /// A position's character is the UTF-16 offset from the start of the line.
175    UTF16,
176}
177
178/// Represents an edit to a document's source.
179#[derive(Debug, Clone)]
180pub struct SourceEdit {
181    /// The range of the edit.
182    ///
183    /// Note that invalid ranges will cause the edit to be ignored.
184    range: Range<SourcePosition>,
185    /// The encoding of the edit positions.
186    encoding: SourcePositionEncoding,
187    /// The replacement text.
188    text: String,
189}
190
191impl SourceEdit {
192    /// Creates a new source edit for the given range and replacement text.
193    pub fn new(
194        range: Range<SourcePosition>,
195        encoding: SourcePositionEncoding,
196        text: impl Into<String>,
197    ) -> Self {
198        Self {
199            range,
200            encoding,
201            text: text.into(),
202        }
203    }
204
205    /// Gets the range of the edit.
206    pub(crate) fn range(&self) -> Range<SourcePosition> {
207        self.range.start..self.range.end
208    }
209
210    /// Applies the edit to the given string if it's in range.
211    pub(crate) fn apply(&self, source: &mut String, lines: &LineIndex) -> Result<()> {
212        let (start, end) = match self.encoding {
213            SourcePositionEncoding::UTF8 => (
214                LineCol {
215                    line: self.range.start.line,
216                    col: self.range.start.character,
217                },
218                LineCol {
219                    line: self.range.end.line,
220                    col: self.range.end.character,
221                },
222            ),
223            SourcePositionEncoding::UTF16 => (
224                lines
225                    .to_utf8(
226                        WideEncoding::Utf16,
227                        WideLineCol {
228                            line: self.range.start.line,
229                            col: self.range.start.character,
230                        },
231                    )
232                    .context("invalid edit start position")?,
233                lines
234                    .to_utf8(
235                        WideEncoding::Utf16,
236                        WideLineCol {
237                            line: self.range.end.line,
238                            col: self.range.end.character,
239                        },
240                    )
241                    .context("invalid edit end position")?,
242            ),
243        };
244
245        let range: Range<usize> = lines
246            .offset(start)
247            .context("invalid edit start position")?
248            .into()
249            ..lines
250                .offset(end)
251                .context("invalid edit end position")?
252                .into();
253
254        if !source.is_char_boundary(range.start) {
255            bail!("edit start position is not at a character boundary");
256        }
257
258        if !source.is_char_boundary(range.end) {
259            bail!("edit end position is not at a character boundary");
260        }
261
262        source.replace_range(range, &self.text);
263        Ok(())
264    }
265}
266
267/// Represents an incremental change to a document.
268#[derive(Clone, Debug)]
269pub struct IncrementalChange {
270    /// The monotonic version of the document.
271    ///
272    /// This is expected to increase for each incremental change.
273    pub version: i32,
274    /// The source to start from for applying edits.
275    ///
276    /// If this is `Some`, a full reparse will occur after applying edits to
277    /// this string.
278    ///
279    /// If this is `None`, edits will be applied to the existing CST and an
280    /// attempt will be made to incrementally parse the file.
281    pub start: Option<String>,
282    /// The source edits to apply.
283    pub edits: Vec<SourceEdit>,
284}
285
286/// Configuration for analysis diagnostics.
287///
288/// Only the analysis diagnostics that aren't inherently treated as errors are
289/// represented here.
290///
291/// These diagnostics default to a warning severity.
292#[derive(Debug, Clone, Copy)]
293pub struct DiagnosticsConfig {
294    /// The severity for the "unused import" diagnostic.
295    ///
296    /// A value of `None` disables the diagnostic.
297    pub unused_import: Option<Severity>,
298    /// The severity for the "unused input" diagnostic.
299    ///
300    /// A value of `None` disables the diagnostic.
301    pub unused_input: Option<Severity>,
302    /// The severity for the "unused declaration" diagnostic.
303    ///
304    /// A value of `None` disables the diagnostic.
305    pub unused_declaration: Option<Severity>,
306    /// The severity for the "unused call" diagnostic.
307    ///
308    /// A value of `None` disables the diagnostic.
309    pub unused_call: Option<Severity>,
310    /// The severity for the "unnecessary function call" diagnostic.
311    ///
312    /// A value of `None` disables the diagnostic.
313    pub unnecessary_function_call: Option<Severity>,
314}
315
316impl DiagnosticsConfig {
317    /// Creates a new diagnostics configuration from a rule set.
318    pub fn new<T: AsRef<dyn Rule>>(rules: impl IntoIterator<Item = T>) -> Self {
319        let mut unused_import = None;
320        let mut unused_input = None;
321        let mut unused_declaration = None;
322        let mut unused_call = None;
323        let mut unnecessary_function_call = None;
324
325        for rule in rules {
326            let rule = rule.as_ref();
327            match rule.id() {
328                UNUSED_IMPORT_RULE_ID => unused_import = Some(rule.severity()),
329                UNUSED_INPUT_RULE_ID => unused_input = Some(rule.severity()),
330                UNUSED_DECL_RULE_ID => unused_declaration = Some(rule.severity()),
331                UNUSED_CALL_RULE_ID => unused_call = Some(rule.severity()),
332                UNNECESSARY_FUNCTION_CALL => unnecessary_function_call = Some(rule.severity()),
333                _ => {}
334            }
335        }
336
337        Self {
338            unused_import,
339            unused_input,
340            unused_declaration,
341            unused_call,
342            unnecessary_function_call,
343        }
344    }
345
346    /// Gets the excepted set of diagnostics based on any `#@ except` comments
347    /// that precede the given syntax node.
348    pub fn excepted_for_node(mut self, node: &SyntaxNode) -> Self {
349        let exceptions = node.rule_exceptions();
350
351        if exceptions.contains(UNUSED_IMPORT_RULE_ID) {
352            self.unused_import = None;
353        }
354
355        if exceptions.contains(UNUSED_INPUT_RULE_ID) {
356            self.unused_input = None;
357        }
358
359        if exceptions.contains(UNUSED_DECL_RULE_ID) {
360            self.unused_declaration = None;
361        }
362
363        if exceptions.contains(UNUSED_CALL_RULE_ID) {
364            self.unused_call = None;
365        }
366
367        if exceptions.contains(UNNECESSARY_FUNCTION_CALL) {
368            self.unnecessary_function_call = None;
369        }
370
371        self
372    }
373
374    /// Excepts all of the diagnostics.
375    pub fn except_all() -> Self {
376        Self {
377            unused_import: None,
378            unused_input: None,
379            unused_declaration: None,
380            unused_call: None,
381            unnecessary_function_call: None,
382        }
383    }
384}
385
386/// Represents a Workflow Description Language (WDL) document analyzer.
387///
388/// By default, analysis parses documents, performs validation checks, resolves
389/// imports, and performs type checking.
390///
391/// Each analysis operation is processed in order of request; however, the
392/// individual parsing, resolution, and analysis of documents is performed
393/// across a thread pool.
394///
395/// Note that dropping the analyzer is a blocking operation as it will wait for
396/// the queue thread to join.
397///
398/// The type parameter is the context type passed to the progress callback.
399#[derive(Debug)]
400pub struct Analyzer<Context> {
401    /// The sender for sending analysis requests to the queue.
402    sender: ManuallyDrop<mpsc::UnboundedSender<Request<Context>>>,
403    /// The join handle for the queue task.
404    handle: Option<JoinHandle<()>>,
405}
406
407impl<Context> Analyzer<Context>
408where
409    Context: Send + Clone + 'static,
410{
411    /// Constructs a new analyzer with the given diagnostics config.
412    ///
413    /// The provided progress callback will be invoked during analysis.
414    ///
415    /// The analyzer will use a default validator for validation.
416    ///
417    /// The analyzer must be constructed from the context of a Tokio runtime.
418    pub fn new<Progress, Return>(config: DiagnosticsConfig, progress: Progress) -> Self
419    where
420        Progress: Fn(Context, ProgressKind, usize, usize) -> Return + Send + 'static,
421        Return: Future<Output = ()>,
422    {
423        Self::new_with_validator(config, progress, Validator::default)
424    }
425
426    /// Constructs a new analyzer with the given diagnostics config and
427    /// validator function.
428    ///
429    /// The provided progress callback will be invoked during analysis.
430    ///
431    /// This validator function will be called once per worker thread to
432    /// initialize a thread-local validator.
433    ///
434    /// The analyzer must be constructed from the context of a Tokio runtime.
435    pub fn new_with_validator<Progress, Return, Validator>(
436        config: DiagnosticsConfig,
437        progress: Progress,
438        validator: Validator,
439    ) -> Self
440    where
441        Progress: Fn(Context, ProgressKind, usize, usize) -> Return + Send + 'static,
442        Return: Future<Output = ()>,
443        Validator: Fn() -> wdl_ast::Validator + Send + Sync + 'static,
444    {
445        let (tx, rx) = mpsc::unbounded_channel();
446        let tokio = Handle::current();
447        let handle = std::thread::spawn(move || {
448            let queue = AnalysisQueue::new(config, tokio, progress, validator);
449            queue.run(rx);
450        });
451
452        Self {
453            sender: ManuallyDrop::new(tx),
454            handle: Some(handle),
455        }
456    }
457
458    /// Adds a document to the analyzer. Document can be a local file or a URL.
459    ///
460    /// Returns an error if the document could not be added.
461    pub async fn add_document(&self, uri: Url) -> Result<()> {
462        let mut documents = IndexSet::new();
463        documents.insert(uri);
464
465        let (tx, rx) = oneshot::channel();
466        self.sender
467            .send(Request::Add(AddRequest {
468                documents,
469                completed: tx,
470            }))
471            .map_err(|_| {
472                anyhow!("failed to send request to analysis queue because the channel has closed")
473            })?;
474
475        rx.await.map_err(|_| {
476            anyhow!("failed to receive response from analysis queue because the channel has closed")
477        })?;
478
479        Ok(())
480    }
481
482    /// Adds a directory to the analyzer. It will recursively search for WDL
483    /// documents in the supplied directory.
484    ///
485    /// Returns an error if there was a problem discovering documents for the
486    /// specified path.
487    pub async fn add_directory(&self, path: PathBuf) -> Result<()> {
488        // Start by searching for documents
489        let documents = RayonHandle::spawn(move || -> Result<IndexSet<Url>> {
490            let mut documents = IndexSet::new();
491
492            let metadata = path.metadata().with_context(|| {
493                format!(
494                    "failed to read metadata for `{path}`",
495                    path = path.display()
496                )
497            })?;
498
499            if metadata.is_file() {
500                bail!("`{path}` is a file, not a directory", path = path.display());
501            }
502
503            for result in WalkDir::new(&path).follow_links(true) {
504                let entry = result.with_context(|| {
505                    format!("failed to read directory `{path}`", path = path.display())
506                })?;
507                if !entry.file_type().is_file()
508                    || entry.path().extension().and_then(OsStr::to_str) != Some("wdl")
509                {
510                    continue;
511                }
512
513                documents.insert(path_to_uri(entry.path()).with_context(|| {
514                    format!(
515                        "failed to convert path `{path}` to a URI",
516                        path = entry.path().display()
517                    )
518                })?);
519            }
520
521            Ok(documents)
522        })
523        .await?;
524
525        if documents.is_empty() {
526            return Ok(());
527        }
528
529        // Send the add request to the queue
530        let (tx, rx) = oneshot::channel();
531        self.sender
532            .send(Request::Add(AddRequest {
533                documents,
534                completed: tx,
535            }))
536            .map_err(|_| {
537                anyhow!("failed to send request to analysis queue because the channel has closed")
538            })?;
539
540        rx.await.map_err(|_| {
541            anyhow!("failed to receive response from analysis queue because the channel has closed")
542        })?;
543
544        Ok(())
545    }
546
547    /// Removes the specified documents from the analyzer.
548    ///
549    /// If a specified URI is a prefix (i.e. directory) of documents known to
550    /// the analyzer, those documents will be removed.
551    ///
552    /// Documents are only removed when not referenced from importing documents.
553    pub async fn remove_documents(&self, documents: Vec<Url>) -> Result<()> {
554        // Send the remove request to the queue
555        let (tx, rx) = oneshot::channel();
556        self.sender
557            .send(Request::Remove(RemoveRequest {
558                documents,
559                completed: tx,
560            }))
561            .map_err(|_| {
562                anyhow!("failed to send request to analysis queue because the channel has closed")
563            })?;
564
565        rx.await.map_err(|_| {
566            anyhow!("failed to receive response from analysis queue because the channel has closed")
567        })?;
568
569        Ok(())
570    }
571
572    /// Notifies the analyzer that a document has an incremental change.
573    ///
574    /// Changes to documents that aren't known to the analyzer are ignored.
575    pub fn notify_incremental_change(
576        &self,
577        document: Url,
578        change: IncrementalChange,
579    ) -> Result<()> {
580        self.sender
581            .send(Request::NotifyIncrementalChange(
582                NotifyIncrementalChangeRequest { document, change },
583            ))
584            .map_err(|_| {
585                anyhow!("failed to send request to analysis queue because the channel has closed")
586            })
587    }
588
589    /// Notifies the analyzer that a document has fully changed and should be
590    /// fetched again.
591    ///
592    /// Changes to documents that aren't known to the analyzer are ignored.
593    ///
594    /// If `discard_pending` is true, then any pending incremental changes are
595    /// discarded; otherwise, the full change is ignored if there are pending
596    /// incremental changes.
597    pub fn notify_change(&self, document: Url, discard_pending: bool) -> Result<()> {
598        self.sender
599            .send(Request::NotifyChange(NotifyChangeRequest {
600                document,
601                discard_pending,
602            }))
603            .map_err(|_| {
604                anyhow!("failed to send request to analysis queue because the channel has closed")
605            })
606    }
607
608    /// Analyzes a specific document.
609    ///
610    /// The provided context is passed to the progress callback.
611    ///
612    /// If the document is up-to-date and was previously analyzed, the current
613    /// analysis result is returned.
614    ///
615    /// Returns an analysis result for each document that was analyzed.
616    pub async fn analyze_document(
617        &self,
618        context: Context,
619        document: Url,
620    ) -> Result<Vec<AnalysisResult>> {
621        // Send the analyze request to the queue
622        let (tx, rx) = oneshot::channel();
623        self.sender
624            .send(Request::Analyze(AnalyzeRequest {
625                document: Some(document),
626                context,
627                completed: tx,
628            }))
629            .map_err(|_| {
630                anyhow!("failed to send request to analysis queue because the channel has closed")
631            })?;
632
633        rx.await.map_err(|_| {
634            anyhow!("failed to receive response from analysis queue because the channel has closed")
635        })?
636    }
637
638    /// Performs analysis of all documents.
639    ///
640    /// The provided context is passed to the progress callback.
641    ///
642    /// If a document is up-to-date and was previously analyzed, the current
643    /// analysis result is returned.
644    ///
645    /// Returns an analysis result for each document that was analyzed.
646    pub async fn analyze(&self, context: Context) -> Result<Vec<AnalysisResult>> {
647        // Send the analyze request to the queue
648        let (tx, rx) = oneshot::channel();
649        self.sender
650            .send(Request::Analyze(AnalyzeRequest {
651                document: None,
652                context,
653                completed: tx,
654            }))
655            .map_err(|_| {
656                anyhow!("failed to send request to analysis queue because the channel has closed")
657            })?;
658
659        rx.await.map_err(|_| {
660            anyhow!("failed to receive response from analysis queue because the channel has closed")
661        })?
662    }
663
664    /// Formats a document.
665    pub async fn format_document(&self, document: Url) -> Result<Option<(u32, u32, String)>> {
666        let (tx, rx) = oneshot::channel();
667        self.sender
668            .send(Request::Format(FormatRequest {
669                document,
670                completed: tx,
671            }))
672            .map_err(|_| {
673                anyhow!("failed to send format request to the queue because the channel has closed")
674            })?;
675
676        rx.await.map_err(|_| {
677            anyhow!("failed to send format request to the queue because the channel has closed")
678        })
679    }
680}
681
682impl<C> Drop for Analyzer<C> {
683    fn drop(&mut self) {
684        unsafe { ManuallyDrop::drop(&mut self.sender) };
685        if let Some(handle) = self.handle.take() {
686            handle.join().unwrap();
687        }
688    }
689}
690
691/// Constant that asserts `Analyzer` is `Send + Sync`; if not, it fails to
692/// compile.
693const _: () = {
694    /// Helper that will fail to compile if T is not `Send + Sync`.
695    const fn _assert<T: Send + Sync>() {}
696    _assert::<Analyzer<()>>();
697};
698
699#[cfg(test)]
700mod test {
701    use std::fs;
702
703    use tempfile::TempDir;
704    use wdl_ast::Severity;
705
706    use super::*;
707    use crate::rules;
708
709    #[tokio::test]
710    async fn it_returns_empty_results() {
711        let analyzer = Analyzer::new(DiagnosticsConfig::new(rules()), |_: (), _, _, _| async {});
712        let results = analyzer.analyze(()).await.unwrap();
713        assert!(results.is_empty());
714    }
715
716    #[tokio::test]
717    async fn it_analyzes_a_document() {
718        let dir = TempDir::new().expect("failed to create temporary directory");
719        let path = dir.path().join("foo.wdl");
720        fs::write(
721            &path,
722            r#"version 1.1
723
724task test {
725    command <<<>>>
726}
727
728workflow test {
729}
730"#,
731        )
732        .expect("failed to create test file");
733
734        // Analyze the file and check the resulting diagnostic
735        let analyzer = Analyzer::new(DiagnosticsConfig::new(rules()), |_: (), _, _, _| async {});
736        analyzer
737            .add_document(path_to_uri(&path).expect("should convert to URI"))
738            .await
739            .expect("should add document");
740
741        let results = analyzer.analyze(()).await.unwrap();
742        assert_eq!(results.len(), 1);
743        assert_eq!(results[0].document.diagnostics().len(), 1);
744        assert_eq!(results[0].document.diagnostics()[0].rule(), None);
745        assert_eq!(
746            results[0].document.diagnostics()[0].severity(),
747            Severity::Error
748        );
749        assert_eq!(
750            results[0].document.diagnostics()[0].message(),
751            "conflicting workflow name `test`"
752        );
753
754        // Analyze again and ensure the analysis result id is unchanged
755        let id = results[0].document.id().clone();
756        let results = analyzer.analyze(()).await.unwrap();
757        assert_eq!(results.len(), 1);
758        assert_eq!(results[0].document.id().as_ref(), id.as_ref());
759        assert_eq!(results[0].document.diagnostics().len(), 1);
760        assert_eq!(results[0].document.diagnostics()[0].rule(), None);
761        assert_eq!(
762            results[0].document.diagnostics()[0].severity(),
763            Severity::Error
764        );
765        assert_eq!(
766            results[0].document.diagnostics()[0].message(),
767            "conflicting workflow name `test`"
768        );
769    }
770
771    #[tokio::test]
772    async fn it_reanalyzes_a_document_on_change() {
773        let dir = TempDir::new().expect("failed to create temporary directory");
774        let path = dir.path().join("foo.wdl");
775        fs::write(
776            &path,
777            r#"version 1.1
778
779task test {
780    command <<<>>>
781}
782
783workflow test {
784}
785"#,
786        )
787        .expect("failed to create test file");
788
789        // Analyze the file and check the resulting diagnostic
790        let analyzer = Analyzer::new(DiagnosticsConfig::new(rules()), |_: (), _, _, _| async {});
791        analyzer
792            .add_document(path_to_uri(&path).expect("should convert to URI"))
793            .await
794            .expect("should add document");
795
796        let results = analyzer.analyze(()).await.unwrap();
797        assert_eq!(results.len(), 1);
798        assert_eq!(results[0].document.diagnostics().len(), 1);
799        assert_eq!(results[0].document.diagnostics()[0].rule(), None);
800        assert_eq!(
801            results[0].document.diagnostics()[0].severity(),
802            Severity::Error
803        );
804        assert_eq!(
805            results[0].document.diagnostics()[0].message(),
806            "conflicting workflow name `test`"
807        );
808
809        // Rewrite the file to correct the issue
810        fs::write(
811            &path,
812            r#"version 1.1
813
814task test {
815    command <<<>>>
816}
817
818workflow something_else {
819}
820"#,
821        )
822        .expect("failed to create test file");
823
824        let uri = path_to_uri(&path).expect("should convert to URI");
825        analyzer.notify_change(uri.clone(), false).unwrap();
826
827        // Analyze again and ensure the analysis result id is changed and the issue
828        // fixed
829        let id = results[0].document.id().clone();
830        let results = analyzer.analyze(()).await.unwrap();
831        assert_eq!(results.len(), 1);
832        assert!(results[0].document.id().as_ref() != id.as_ref());
833        assert_eq!(results[0].document.diagnostics().len(), 0);
834
835        // Analyze again and ensure the analysis result id is unchanged
836        let id = results[0].document.id().clone();
837        let results = analyzer.analyze_document((), uri).await.unwrap();
838        assert_eq!(results.len(), 1);
839        assert!(results[0].document.id().as_ref() == id.as_ref());
840        assert_eq!(results[0].document.diagnostics().len(), 0);
841    }
842
843    #[tokio::test]
844    async fn it_reanalyzes_a_document_on_incremental_change() {
845        let dir = TempDir::new().expect("failed to create temporary directory");
846        let path = dir.path().join("foo.wdl");
847        fs::write(
848            &path,
849            r#"version 1.1
850
851task test {
852    command <<<>>>
853}
854
855workflow test {
856}
857"#,
858        )
859        .expect("failed to create test file");
860
861        // Analyze the file and check the resulting diagnostic
862        let analyzer = Analyzer::new(DiagnosticsConfig::new(rules()), |_: (), _, _, _| async {});
863        analyzer
864            .add_document(path_to_uri(&path).expect("should convert to URI"))
865            .await
866            .expect("should add document");
867
868        let results = analyzer.analyze(()).await.unwrap();
869        assert_eq!(results.len(), 1);
870        assert_eq!(results[0].document.diagnostics().len(), 1);
871        assert_eq!(results[0].document.diagnostics()[0].rule(), None);
872        assert_eq!(
873            results[0].document.diagnostics()[0].severity(),
874            Severity::Error
875        );
876        assert_eq!(
877            results[0].document.diagnostics()[0].message(),
878            "conflicting workflow name `test`"
879        );
880
881        // Edit the file to correct the issue
882        let uri = path_to_uri(&path).expect("should convert to URI");
883        analyzer
884            .notify_incremental_change(
885                uri.clone(),
886                IncrementalChange {
887                    version: 2,
888                    start: None,
889                    edits: vec![SourceEdit {
890                        range: SourcePosition::new(6, 9)..SourcePosition::new(6, 13),
891                        encoding: SourcePositionEncoding::UTF8,
892                        text: "something_else".to_string(),
893                    }],
894                },
895            )
896            .unwrap();
897
898        // Analyze again and ensure the analysis result id is changed and the issue was
899        // fixed
900        let id = results[0].document.id().clone();
901        let results = analyzer.analyze_document((), uri).await.unwrap();
902        assert_eq!(results.len(), 1);
903        assert!(results[0].document.id().as_ref() != id.as_ref());
904        assert_eq!(results[0].document.diagnostics().len(), 0);
905    }
906
907    #[tokio::test]
908    async fn it_removes_documents() {
909        let dir = TempDir::new().expect("failed to create temporary directory");
910        let foo = dir.path().join("foo.wdl");
911        fs::write(
912            &foo,
913            r#"version 1.1
914workflow test {
915}
916"#,
917        )
918        .expect("failed to create test file");
919
920        let bar = dir.path().join("bar.wdl");
921        fs::write(
922            &bar,
923            r#"version 1.1
924workflow test {
925}
926"#,
927        )
928        .expect("failed to create test file");
929
930        let baz = dir.path().join("baz.wdl");
931        fs::write(
932            &baz,
933            r#"version 1.1
934workflow test {
935}
936"#,
937        )
938        .expect("failed to create test file");
939
940        // Add all three documents to the analyzer
941        let analyzer = Analyzer::new(DiagnosticsConfig::new(rules()), |_: (), _, _, _| async {});
942        analyzer
943            .add_directory(dir.path().to_path_buf())
944            .await
945            .expect("should add documents");
946
947        // Analyze the documents
948        let results = analyzer.analyze(()).await.unwrap();
949        assert_eq!(results.len(), 3);
950        assert!(results[0].document.diagnostics().is_empty());
951        assert!(results[1].document.diagnostics().is_empty());
952        assert!(results[2].document.diagnostics().is_empty());
953
954        // Analyze the documents again
955        let results = analyzer.analyze(()).await.unwrap();
956        assert_eq!(results.len(), 3);
957
958        // Remove the documents by directory
959        analyzer
960            .remove_documents(vec![
961                path_to_uri(dir.path()).expect("should convert to URI"),
962            ])
963            .await
964            .unwrap();
965        let results = analyzer.analyze(()).await.unwrap();
966        assert!(results.is_empty());
967    }
968}