wdl_analysis/
analyzer.rs

1//! Implementation of the analyzer.
2
3use std::ffi::OsStr;
4use std::fmt;
5use std::future::Future;
6use std::mem::ManuallyDrop;
7use std::ops::Range;
8use std::path::Path;
9use std::path::PathBuf;
10use std::path::absolute;
11use std::sync::Arc;
12use std::thread::JoinHandle;
13
14use anyhow::Context;
15use anyhow::Error;
16use anyhow::Result;
17use anyhow::anyhow;
18use anyhow::bail;
19use indexmap::IndexSet;
20use line_index::LineCol;
21use line_index::LineIndex;
22use line_index::WideEncoding;
23use line_index::WideLineCol;
24use path_clean::clean;
25use tokio::runtime::Handle;
26use tokio::sync::mpsc;
27use tokio::sync::oneshot;
28use url::Url;
29use walkdir::WalkDir;
30use wdl_ast::Severity;
31use wdl_ast::SyntaxNode;
32
33use crate::Rule;
34use crate::SyntaxNodeExt;
35use crate::UNNECESSARY_FUNCTION_CALL;
36use crate::UNUSED_CALL_RULE_ID;
37use crate::UNUSED_DECL_RULE_ID;
38use crate::UNUSED_IMPORT_RULE_ID;
39use crate::UNUSED_INPUT_RULE_ID;
40use crate::document::Document;
41use crate::graph::DocumentGraphNode;
42use crate::graph::ParseState;
43use crate::queue::AddRequest;
44use crate::queue::AnalysisQueue;
45use crate::queue::AnalyzeRequest;
46use crate::queue::FormatRequest;
47use crate::queue::NotifyChangeRequest;
48use crate::queue::NotifyIncrementalChangeRequest;
49use crate::queue::RemoveRequest;
50use crate::queue::Request;
51use crate::rayon::RayonHandle;
52use crate::rules;
53
54/// Represents the kind of analysis progress being reported.
55#[derive(Debug, Clone, Copy, PartialEq, Eq)]
56pub enum ProgressKind {
57    /// The progress is for parsing documents.
58    Parsing,
59    /// The progress is for analyzing documents.
60    Analyzing,
61}
62
63impl fmt::Display for ProgressKind {
64    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
65        match self {
66            Self::Parsing => write!(f, "parsing"),
67            Self::Analyzing => write!(f, "analyzing"),
68        }
69    }
70}
71
72/// Converts a local file path to a file schemed URI.
73pub fn path_to_uri(path: impl AsRef<Path>) -> Option<Url> {
74    Url::from_file_path(clean(absolute(path).ok()?)).ok()
75}
76
77/// Represents the result of an analysis.
78///
79/// Analysis results are cheap to clone.
80#[derive(Debug, Clone)]
81pub struct AnalysisResult {
82    /// The error that occurred when attempting to parse the file (e.g. the file
83    /// could not be opened).
84    error: Option<Arc<Error>>,
85    /// The monotonic version of the document that was parsed.
86    ///
87    /// This value comes from incremental changes to the file.
88    ///
89    /// If `None`, the parsed version had no incremental changes.
90    version: Option<i32>,
91    /// The lines indexed for the parsed file.
92    lines: Option<Arc<LineIndex>>,
93    /// The analyzed document.
94    document: Document,
95}
96
97impl AnalysisResult {
98    /// Constructs a new analysis result for the given graph node.
99    pub(crate) fn new(node: &DocumentGraphNode) -> Self {
100        if let Some(error) = node.analysis_error() {
101            return Self {
102                error: Some(error.clone()),
103                version: node.parse_state().version(),
104                lines: node.parse_state().lines().cloned(),
105                document: Document::default_from_uri(node.uri().clone()),
106            };
107        }
108
109        let (error, version, lines) = match node.parse_state() {
110            ParseState::NotParsed => unreachable!("document should have been parsed"),
111            ParseState::Error(e) => (Some(e), None, None),
112            ParseState::Parsed { version, lines, .. } => (None, *version, Some(lines)),
113        };
114
115        Self {
116            error: error.cloned(),
117            version,
118            lines: lines.cloned(),
119            document: node
120                .document()
121                .expect("analysis should have completed")
122                .clone(),
123        }
124    }
125
126    /// Gets the error that occurred when attempting to parse the document.
127    ///
128    /// An example error would be if the file could not be opened.
129    ///
130    /// Returns `None` if the document was parsed successfully.
131    pub fn error(&self) -> Option<&Arc<Error>> {
132        self.error.as_ref()
133    }
134
135    /// Gets the incremental version of the parsed document.
136    ///
137    /// Returns `None` if there was an error parsing the document or if the
138    /// parsed document had no incremental changes.
139    pub fn version(&self) -> Option<i32> {
140        self.version
141    }
142
143    /// Gets the line index of the parsed document.
144    ///
145    /// Returns `None` if there was an error parsing the document.
146    pub fn lines(&self) -> Option<&Arc<LineIndex>> {
147        self.lines.as_ref()
148    }
149
150    /// Gets the analyzed document.
151    pub fn document(&self) -> &Document {
152        &self.document
153    }
154}
155
156/// Represents a position in a document's source.
157#[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Copy, Clone, Default)]
158pub struct SourcePosition {
159    /// Line position in a document (zero-based).
160    // NOTE: this field must come before `character` to maintain a correct sort order.
161    pub line: u32,
162    /// Character offset on a line in a document (zero-based). The meaning of
163    /// this offset is determined by the position encoding.
164    pub character: u32,
165}
166
167impl SourcePosition {
168    /// Constructs a new source position from a line and character offset.
169    pub fn new(line: u32, character: u32) -> Self {
170        Self { line, character }
171    }
172}
173
174/// Represents the encoding of a source position.
175#[derive(Debug, Eq, PartialEq, Copy, Clone)]
176pub enum SourcePositionEncoding {
177    /// The position is UTF8 encoded.
178    ///
179    /// A position's character is the UTF-8 offset from the start of the line.
180    UTF8,
181    /// The position is UTF16 encoded.
182    ///
183    /// A position's character is the UTF-16 offset from the start of the line.
184    UTF16,
185}
186
187/// Represents an edit to a document's source.
188#[derive(Debug, Clone)]
189pub struct SourceEdit {
190    /// The range of the edit.
191    ///
192    /// Note that invalid ranges will cause the edit to be ignored.
193    range: Range<SourcePosition>,
194    /// The encoding of the edit positions.
195    encoding: SourcePositionEncoding,
196    /// The replacement text.
197    text: String,
198}
199
200impl SourceEdit {
201    /// Creates a new source edit for the given range and replacement text.
202    pub fn new(
203        range: Range<SourcePosition>,
204        encoding: SourcePositionEncoding,
205        text: impl Into<String>,
206    ) -> Self {
207        Self {
208            range,
209            encoding,
210            text: text.into(),
211        }
212    }
213
214    /// Gets the range of the edit.
215    pub(crate) fn range(&self) -> Range<SourcePosition> {
216        self.range.start..self.range.end
217    }
218
219    /// Applies the edit to the given string if it's in range.
220    pub(crate) fn apply(&self, source: &mut String, lines: &LineIndex) -> Result<()> {
221        let (start, end) = match self.encoding {
222            SourcePositionEncoding::UTF8 => (
223                LineCol {
224                    line: self.range.start.line,
225                    col: self.range.start.character,
226                },
227                LineCol {
228                    line: self.range.end.line,
229                    col: self.range.end.character,
230                },
231            ),
232            SourcePositionEncoding::UTF16 => (
233                lines
234                    .to_utf8(
235                        WideEncoding::Utf16,
236                        WideLineCol {
237                            line: self.range.start.line,
238                            col: self.range.start.character,
239                        },
240                    )
241                    .context("invalid edit start position")?,
242                lines
243                    .to_utf8(
244                        WideEncoding::Utf16,
245                        WideLineCol {
246                            line: self.range.end.line,
247                            col: self.range.end.character,
248                        },
249                    )
250                    .context("invalid edit end position")?,
251            ),
252        };
253
254        let range: Range<usize> = lines
255            .offset(start)
256            .context("invalid edit start position")?
257            .into()
258            ..lines
259                .offset(end)
260                .context("invalid edit end position")?
261                .into();
262
263        if !source.is_char_boundary(range.start) {
264            bail!("edit start position is not at a character boundary");
265        }
266
267        if !source.is_char_boundary(range.end) {
268            bail!("edit end position is not at a character boundary");
269        }
270
271        source.replace_range(range, &self.text);
272        Ok(())
273    }
274}
275
276/// Represents an incremental change to a document.
277#[derive(Clone, Debug)]
278pub struct IncrementalChange {
279    /// The monotonic version of the document.
280    ///
281    /// This is expected to increase for each incremental change.
282    pub version: i32,
283    /// The source to start from for applying edits.
284    ///
285    /// If this is `Some`, a full reparse will occur after applying edits to
286    /// this string.
287    ///
288    /// If this is `None`, edits will be applied to the existing CST and an
289    /// attempt will be made to incrementally parse the file.
290    pub start: Option<String>,
291    /// The source edits to apply.
292    pub edits: Vec<SourceEdit>,
293}
294
295/// Configuration for analysis diagnostics.
296///
297/// Only the analysis diagnostics that aren't inherently treated as errors are
298/// represented here.
299///
300/// These diagnostics default to a warning severity.
301#[derive(Debug, Clone, Copy)]
302pub struct DiagnosticsConfig {
303    /// The severity for the "unused import" diagnostic.
304    ///
305    /// A value of `None` disables the diagnostic.
306    pub unused_import: Option<Severity>,
307    /// The severity for the "unused input" diagnostic.
308    ///
309    /// A value of `None` disables the diagnostic.
310    pub unused_input: Option<Severity>,
311    /// The severity for the "unused declaration" diagnostic.
312    ///
313    /// A value of `None` disables the diagnostic.
314    pub unused_declaration: Option<Severity>,
315    /// The severity for the "unused call" diagnostic.
316    ///
317    /// A value of `None` disables the diagnostic.
318    pub unused_call: Option<Severity>,
319    /// The severity for the "unnecessary function call" diagnostic.
320    ///
321    /// A value of `None` disables the diagnostic.
322    pub unnecessary_function_call: Option<Severity>,
323}
324
325impl Default for DiagnosticsConfig {
326    fn default() -> Self {
327        let mut unused_import = None;
328        let mut unused_input = None;
329        let mut unused_declaration = None;
330        let mut unused_call = None;
331        let mut unnecessary_function_call = None;
332
333        for rule in rules() {
334            let rule = rule.as_ref();
335            match rule.id() {
336                UNUSED_IMPORT_RULE_ID => unused_import = Some(rule.severity()),
337                UNUSED_INPUT_RULE_ID => unused_input = Some(rule.severity()),
338                UNUSED_DECL_RULE_ID => unused_declaration = Some(rule.severity()),
339                UNUSED_CALL_RULE_ID => unused_call = Some(rule.severity()),
340                UNNECESSARY_FUNCTION_CALL => unnecessary_function_call = Some(rule.severity()),
341                _ => {
342                    unreachable!("unknown rule ID: {}", rule.id());
343                }
344            }
345        }
346
347        Self {
348            unused_import,
349            unused_input,
350            unused_declaration,
351            unused_call,
352            unnecessary_function_call,
353        }
354    }
355}
356
357impl DiagnosticsConfig {
358    /// Creates a new diagnostics configuration from a rule set.
359    pub fn new<T: AsRef<dyn Rule>>(rules: impl IntoIterator<Item = T>) -> Self {
360        let mut unused_import = None;
361        let mut unused_input = None;
362        let mut unused_declaration = None;
363        let mut unused_call = None;
364        let mut unnecessary_function_call = None;
365
366        for rule in rules {
367            let rule = rule.as_ref();
368            match rule.id() {
369                UNUSED_IMPORT_RULE_ID => unused_import = Some(rule.severity()),
370                UNUSED_INPUT_RULE_ID => unused_input = Some(rule.severity()),
371                UNUSED_DECL_RULE_ID => unused_declaration = Some(rule.severity()),
372                UNUSED_CALL_RULE_ID => unused_call = Some(rule.severity()),
373                UNNECESSARY_FUNCTION_CALL => unnecessary_function_call = Some(rule.severity()),
374                _ => {}
375            }
376        }
377
378        Self {
379            unused_import,
380            unused_input,
381            unused_declaration,
382            unused_call,
383            unnecessary_function_call,
384        }
385    }
386
387    /// Gets the excepted set of diagnostics based on any `#@ except` comments
388    /// that precede the given syntax node.
389    pub fn excepted_for_node(mut self, node: &SyntaxNode) -> Self {
390        let exceptions = node.rule_exceptions();
391
392        if exceptions.contains(UNUSED_IMPORT_RULE_ID) {
393            self.unused_import = None;
394        }
395
396        if exceptions.contains(UNUSED_INPUT_RULE_ID) {
397            self.unused_input = None;
398        }
399
400        if exceptions.contains(UNUSED_DECL_RULE_ID) {
401            self.unused_declaration = None;
402        }
403
404        if exceptions.contains(UNUSED_CALL_RULE_ID) {
405            self.unused_call = None;
406        }
407
408        if exceptions.contains(UNNECESSARY_FUNCTION_CALL) {
409            self.unnecessary_function_call = None;
410        }
411
412        self
413    }
414
415    /// Excepts all of the diagnostics.
416    pub fn except_all() -> Self {
417        Self {
418            unused_import: None,
419            unused_input: None,
420            unused_declaration: None,
421            unused_call: None,
422            unnecessary_function_call: None,
423        }
424    }
425}
426
427/// Represents a Workflow Description Language (WDL) document analyzer.
428///
429/// By default, analysis parses documents, performs validation checks, resolves
430/// imports, and performs type checking.
431///
432/// Each analysis operation is processed in order of request; however, the
433/// individual parsing, resolution, and analysis of documents is performed
434/// across a thread pool.
435///
436/// Note that dropping the analyzer is a blocking operation as it will wait for
437/// the queue thread to join.
438///
439/// The type parameter is the context type passed to the progress callback.
440#[derive(Debug)]
441pub struct Analyzer<Context> {
442    /// The sender for sending analysis requests to the queue.
443    sender: ManuallyDrop<mpsc::UnboundedSender<Request<Context>>>,
444    /// The join handle for the queue task.
445    handle: Option<JoinHandle<()>>,
446}
447
448impl<Context> Analyzer<Context>
449where
450    Context: Send + Clone + 'static,
451{
452    /// Constructs a new analyzer with the given diagnostics config.
453    ///
454    /// The provided progress callback will be invoked during analysis.
455    ///
456    /// The analyzer will use a default validator for validation.
457    ///
458    /// The analyzer must be constructed from the context of a Tokio runtime.
459    pub fn new<Progress, Return>(config: DiagnosticsConfig, progress: Progress) -> Self
460    where
461        Progress: Fn(Context, ProgressKind, usize, usize) -> Return + Send + 'static,
462        Return: Future<Output = ()>,
463    {
464        Self::new_with_validator(config, progress, crate::Validator::default)
465    }
466
467    /// Constructs a new analyzer with the given diagnostics config and
468    /// validator function.
469    ///
470    /// The provided progress callback will be invoked during analysis.
471    ///
472    /// This validator function will be called once per worker thread to
473    /// initialize a thread-local validator.
474    ///
475    /// The analyzer must be constructed from the context of a Tokio runtime.
476    pub fn new_with_validator<Progress, Return, Validator>(
477        config: DiagnosticsConfig,
478        progress: Progress,
479        validator: Validator,
480    ) -> Self
481    where
482        Progress: Fn(Context, ProgressKind, usize, usize) -> Return + Send + 'static,
483        Return: Future<Output = ()>,
484        Validator: Fn() -> crate::Validator + Send + Sync + 'static,
485    {
486        let (tx, rx) = mpsc::unbounded_channel();
487        let tokio = Handle::current();
488        let handle = std::thread::spawn(move || {
489            let queue = AnalysisQueue::new(config, tokio, progress, validator);
490            queue.run(rx);
491        });
492
493        Self {
494            sender: ManuallyDrop::new(tx),
495            handle: Some(handle),
496        }
497    }
498
499    /// Adds a document to the analyzer. Document can be a local file or a URL.
500    ///
501    /// Returns an error if the document could not be added.
502    pub async fn add_document(&self, uri: Url) -> Result<()> {
503        let mut documents = IndexSet::new();
504        documents.insert(uri);
505
506        let (tx, rx) = oneshot::channel();
507        self.sender
508            .send(Request::Add(AddRequest {
509                documents,
510                completed: tx,
511            }))
512            .map_err(|_| {
513                anyhow!("failed to send request to analysis queue because the channel has closed")
514            })?;
515
516        rx.await.map_err(|_| {
517            anyhow!("failed to receive response from analysis queue because the channel has closed")
518        })?;
519
520        Ok(())
521    }
522
523    /// Adds a directory to the analyzer. It will recursively search for WDL
524    /// documents in the supplied directory.
525    ///
526    /// Returns an error if there was a problem discovering documents for the
527    /// specified path.
528    pub async fn add_directory(&self, path: PathBuf) -> Result<()> {
529        // Start by searching for documents
530        let documents = RayonHandle::spawn(move || -> Result<IndexSet<Url>> {
531            let mut documents = IndexSet::new();
532
533            let metadata = path.metadata().with_context(|| {
534                format!(
535                    "failed to read metadata for `{path}`",
536                    path = path.display()
537                )
538            })?;
539
540            if metadata.is_file() {
541                bail!("`{path}` is a file, not a directory", path = path.display());
542            }
543
544            for result in WalkDir::new(&path).follow_links(true) {
545                let entry = result.with_context(|| {
546                    format!("failed to read directory `{path}`", path = path.display())
547                })?;
548                if !entry.file_type().is_file()
549                    || entry.path().extension().and_then(OsStr::to_str) != Some("wdl")
550                {
551                    continue;
552                }
553
554                documents.insert(path_to_uri(entry.path()).with_context(|| {
555                    format!(
556                        "failed to convert path `{path}` to a URI",
557                        path = entry.path().display()
558                    )
559                })?);
560            }
561
562            Ok(documents)
563        })
564        .await?;
565
566        if documents.is_empty() {
567            return Ok(());
568        }
569
570        // Send the add request to the queue
571        let (tx, rx) = oneshot::channel();
572        self.sender
573            .send(Request::Add(AddRequest {
574                documents,
575                completed: tx,
576            }))
577            .map_err(|_| {
578                anyhow!("failed to send request to analysis queue because the channel has closed")
579            })?;
580
581        rx.await.map_err(|_| {
582            anyhow!("failed to receive response from analysis queue because the channel has closed")
583        })?;
584
585        Ok(())
586    }
587
588    /// Removes the specified documents from the analyzer.
589    ///
590    /// If a specified URI is a prefix (i.e. directory) of documents known to
591    /// the analyzer, those documents will be removed.
592    ///
593    /// Documents are only removed when not referenced from importing documents.
594    pub async fn remove_documents(&self, documents: Vec<Url>) -> Result<()> {
595        // Send the remove request to the queue
596        let (tx, rx) = oneshot::channel();
597        self.sender
598            .send(Request::Remove(RemoveRequest {
599                documents,
600                completed: tx,
601            }))
602            .map_err(|_| {
603                anyhow!("failed to send request to analysis queue because the channel has closed")
604            })?;
605
606        rx.await.map_err(|_| {
607            anyhow!("failed to receive response from analysis queue because the channel has closed")
608        })?;
609
610        Ok(())
611    }
612
613    /// Notifies the analyzer that a document has an incremental change.
614    ///
615    /// Changes to documents that aren't known to the analyzer are ignored.
616    pub fn notify_incremental_change(
617        &self,
618        document: Url,
619        change: IncrementalChange,
620    ) -> Result<()> {
621        self.sender
622            .send(Request::NotifyIncrementalChange(
623                NotifyIncrementalChangeRequest { document, change },
624            ))
625            .map_err(|_| {
626                anyhow!("failed to send request to analysis queue because the channel has closed")
627            })
628    }
629
630    /// Notifies the analyzer that a document has fully changed and should be
631    /// fetched again.
632    ///
633    /// Changes to documents that aren't known to the analyzer are ignored.
634    ///
635    /// If `discard_pending` is true, then any pending incremental changes are
636    /// discarded; otherwise, the full change is ignored if there are pending
637    /// incremental changes.
638    pub fn notify_change(&self, document: Url, discard_pending: bool) -> Result<()> {
639        self.sender
640            .send(Request::NotifyChange(NotifyChangeRequest {
641                document,
642                discard_pending,
643            }))
644            .map_err(|_| {
645                anyhow!("failed to send request to analysis queue because the channel has closed")
646            })
647    }
648
649    /// Analyzes a specific document.
650    ///
651    /// The provided context is passed to the progress callback.
652    ///
653    /// If the document is up-to-date and was previously analyzed, the current
654    /// analysis result is returned.
655    ///
656    /// Returns an analysis result for each document that was analyzed.
657    pub async fn analyze_document(
658        &self,
659        context: Context,
660        document: Url,
661    ) -> Result<Vec<AnalysisResult>> {
662        // Send the analyze request to the queue
663        let (tx, rx) = oneshot::channel();
664        self.sender
665            .send(Request::Analyze(AnalyzeRequest {
666                document: Some(document),
667                context,
668                completed: tx,
669            }))
670            .map_err(|_| {
671                anyhow!("failed to send request to analysis queue because the channel has closed")
672            })?;
673
674        rx.await.map_err(|_| {
675            anyhow!("failed to receive response from analysis queue because the channel has closed")
676        })?
677    }
678
679    /// Performs analysis of all documents.
680    ///
681    /// The provided context is passed to the progress callback.
682    ///
683    /// If a document is up-to-date and was previously analyzed, the current
684    /// analysis result is returned.
685    ///
686    /// Returns an analysis result for each document that was analyzed.
687    pub async fn analyze(&self, context: Context) -> Result<Vec<AnalysisResult>> {
688        // Send the analyze request to the queue
689        let (tx, rx) = oneshot::channel();
690        self.sender
691            .send(Request::Analyze(AnalyzeRequest {
692                document: None, // analyze all documents
693                context,
694                completed: tx,
695            }))
696            .map_err(|_| {
697                anyhow!("failed to send request to analysis queue because the channel has closed")
698            })?;
699
700        rx.await.map_err(|_| {
701            anyhow!("failed to receive response from analysis queue because the channel has closed")
702        })?
703    }
704
705    /// Formats a document.
706    pub async fn format_document(&self, document: Url) -> Result<Option<(u32, u32, String)>> {
707        let (tx, rx) = oneshot::channel();
708        self.sender
709            .send(Request::Format(FormatRequest {
710                document,
711                completed: tx,
712            }))
713            .map_err(|_| {
714                anyhow!("failed to send format request to the queue because the channel has closed")
715            })?;
716
717        rx.await.map_err(|_| {
718            anyhow!("failed to send format request to the queue because the channel has closed")
719        })
720    }
721}
722
723impl Default for Analyzer<()> {
724    fn default() -> Self {
725        Self::new(DiagnosticsConfig::default(), |_, _, _, _| async {})
726    }
727}
728
729impl<C> Drop for Analyzer<C> {
730    fn drop(&mut self) {
731        unsafe { ManuallyDrop::drop(&mut self.sender) };
732        if let Some(handle) = self.handle.take() {
733            handle.join().unwrap();
734        }
735    }
736}
737
738/// Constant that asserts `Analyzer` is `Send + Sync`; if not, it fails to
739/// compile.
740const _: () = {
741    /// Helper that will fail to compile if T is not `Send + Sync`.
742    const fn _assert<T: Send + Sync>() {}
743    _assert::<Analyzer<()>>();
744};
745
746#[cfg(test)]
747mod test {
748    use std::fs;
749
750    use tempfile::TempDir;
751    use wdl_ast::Severity;
752
753    use super::*;
754    use crate::rules;
755
756    #[tokio::test]
757    async fn it_returns_empty_results() {
758        let analyzer = Analyzer::new(DiagnosticsConfig::new(rules()), |_: (), _, _, _| async {});
759        let results = analyzer.analyze(()).await.unwrap();
760        assert!(results.is_empty());
761    }
762
763    #[tokio::test]
764    async fn it_analyzes_a_document() {
765        let dir = TempDir::new().expect("failed to create temporary directory");
766        let path = dir.path().join("foo.wdl");
767        fs::write(
768            &path,
769            r#"version 1.1
770
771task test {
772    command <<<>>>
773}
774
775workflow test {
776}
777"#,
778        )
779        .expect("failed to create test file");
780
781        // Analyze the file and check the resulting diagnostic
782        let analyzer = Analyzer::new(DiagnosticsConfig::new(rules()), |_: (), _, _, _| async {});
783        analyzer
784            .add_document(path_to_uri(&path).expect("should convert to URI"))
785            .await
786            .expect("should add document");
787
788        let results = analyzer.analyze(()).await.unwrap();
789        assert_eq!(results.len(), 1);
790        assert_eq!(results[0].document.diagnostics().len(), 1);
791        assert_eq!(results[0].document.diagnostics()[0].rule(), None);
792        assert_eq!(
793            results[0].document.diagnostics()[0].severity(),
794            Severity::Error
795        );
796        assert_eq!(
797            results[0].document.diagnostics()[0].message(),
798            "conflicting workflow name `test`"
799        );
800
801        // Analyze again and ensure the analysis result id is unchanged
802        let id = results[0].document.id().clone();
803        let results = analyzer.analyze(()).await.unwrap();
804        assert_eq!(results.len(), 1);
805        assert_eq!(results[0].document.id().as_ref(), id.as_ref());
806        assert_eq!(results[0].document.diagnostics().len(), 1);
807        assert_eq!(results[0].document.diagnostics()[0].rule(), None);
808        assert_eq!(
809            results[0].document.diagnostics()[0].severity(),
810            Severity::Error
811        );
812        assert_eq!(
813            results[0].document.diagnostics()[0].message(),
814            "conflicting workflow name `test`"
815        );
816    }
817
818    #[tokio::test]
819    async fn it_reanalyzes_a_document_on_change() {
820        let dir = TempDir::new().expect("failed to create temporary directory");
821        let path = dir.path().join("foo.wdl");
822        fs::write(
823            &path,
824            r#"version 1.1
825
826task test {
827    command <<<>>>
828}
829
830workflow test {
831}
832"#,
833        )
834        .expect("failed to create test file");
835
836        // Analyze the file and check the resulting diagnostic
837        let analyzer = Analyzer::new(DiagnosticsConfig::new(rules()), |_: (), _, _, _| async {});
838        analyzer
839            .add_document(path_to_uri(&path).expect("should convert to URI"))
840            .await
841            .expect("should add document");
842
843        let results = analyzer.analyze(()).await.unwrap();
844        assert_eq!(results.len(), 1);
845        assert_eq!(results[0].document.diagnostics().len(), 1);
846        assert_eq!(results[0].document.diagnostics()[0].rule(), None);
847        assert_eq!(
848            results[0].document.diagnostics()[0].severity(),
849            Severity::Error
850        );
851        assert_eq!(
852            results[0].document.diagnostics()[0].message(),
853            "conflicting workflow name `test`"
854        );
855
856        // Rewrite the file to correct the issue
857        fs::write(
858            &path,
859            r#"version 1.1
860
861task test {
862    command <<<>>>
863}
864
865workflow something_else {
866}
867"#,
868        )
869        .expect("failed to create test file");
870
871        let uri = path_to_uri(&path).expect("should convert to URI");
872        analyzer.notify_change(uri.clone(), false).unwrap();
873
874        // Analyze again and ensure the analysis result id is changed and the issue
875        // fixed
876        let id = results[0].document.id().clone();
877        let results = analyzer.analyze(()).await.unwrap();
878        assert_eq!(results.len(), 1);
879        assert!(results[0].document.id().as_ref() != id.as_ref());
880        assert_eq!(results[0].document.diagnostics().len(), 0);
881
882        // Analyze again and ensure the analysis result id is unchanged
883        let id = results[0].document.id().clone();
884        let results = analyzer.analyze_document((), uri).await.unwrap();
885        assert_eq!(results.len(), 1);
886        assert!(results[0].document.id().as_ref() == id.as_ref());
887        assert_eq!(results[0].document.diagnostics().len(), 0);
888    }
889
890    #[tokio::test]
891    async fn it_reanalyzes_a_document_on_incremental_change() {
892        let dir = TempDir::new().expect("failed to create temporary directory");
893        let path = dir.path().join("foo.wdl");
894        fs::write(
895            &path,
896            r#"version 1.1
897
898task test {
899    command <<<>>>
900}
901
902workflow test {
903}
904"#,
905        )
906        .expect("failed to create test file");
907
908        // Analyze the file and check the resulting diagnostic
909        let analyzer = Analyzer::new(DiagnosticsConfig::new(rules()), |_: (), _, _, _| async {});
910        analyzer
911            .add_document(path_to_uri(&path).expect("should convert to URI"))
912            .await
913            .expect("should add document");
914
915        let results = analyzer.analyze(()).await.unwrap();
916        assert_eq!(results.len(), 1);
917        assert_eq!(results[0].document.diagnostics().len(), 1);
918        assert_eq!(results[0].document.diagnostics()[0].rule(), None);
919        assert_eq!(
920            results[0].document.diagnostics()[0].severity(),
921            Severity::Error
922        );
923        assert_eq!(
924            results[0].document.diagnostics()[0].message(),
925            "conflicting workflow name `test`"
926        );
927
928        // Edit the file to correct the issue
929        let uri = path_to_uri(&path).expect("should convert to URI");
930        analyzer
931            .notify_incremental_change(
932                uri.clone(),
933                IncrementalChange {
934                    version: 2,
935                    start: None,
936                    edits: vec![SourceEdit {
937                        range: SourcePosition::new(6, 9)..SourcePosition::new(6, 13),
938                        encoding: SourcePositionEncoding::UTF8,
939                        text: "something_else".to_string(),
940                    }],
941                },
942            )
943            .unwrap();
944
945        // Analyze again and ensure the analysis result id is changed and the issue was
946        // fixed
947        let id = results[0].document.id().clone();
948        let results = analyzer.analyze_document((), uri).await.unwrap();
949        assert_eq!(results.len(), 1);
950        assert!(results[0].document.id().as_ref() != id.as_ref());
951        assert_eq!(results[0].document.diagnostics().len(), 0);
952    }
953
954    #[tokio::test]
955    async fn it_removes_documents() {
956        let dir = TempDir::new().expect("failed to create temporary directory");
957        let foo = dir.path().join("foo.wdl");
958        fs::write(
959            &foo,
960            r#"version 1.1
961workflow test {
962}
963"#,
964        )
965        .expect("failed to create test file");
966
967        let bar = dir.path().join("bar.wdl");
968        fs::write(
969            &bar,
970            r#"version 1.1
971workflow test {
972}
973"#,
974        )
975        .expect("failed to create test file");
976
977        let baz = dir.path().join("baz.wdl");
978        fs::write(
979            &baz,
980            r#"version 1.1
981workflow test {
982}
983"#,
984        )
985        .expect("failed to create test file");
986
987        // Add all three documents to the analyzer
988        let analyzer = Analyzer::new(DiagnosticsConfig::new(rules()), |_: (), _, _, _| async {});
989        analyzer
990            .add_directory(dir.path().to_path_buf())
991            .await
992            .expect("should add documents");
993
994        // Analyze the documents
995        let results = analyzer.analyze(()).await.unwrap();
996        assert_eq!(results.len(), 3);
997        assert!(results[0].document.diagnostics().is_empty());
998        assert!(results[1].document.diagnostics().is_empty());
999        assert!(results[2].document.diagnostics().is_empty());
1000
1001        // Analyze the documents again
1002        let results = analyzer.analyze(()).await.unwrap();
1003        assert_eq!(results.len(), 3);
1004
1005        // Remove the documents by directory
1006        analyzer
1007            .remove_documents(vec![
1008                path_to_uri(dir.path()).expect("should convert to URI"),
1009            ])
1010            .await
1011            .unwrap();
1012        let results = analyzer.analyze(()).await.unwrap();
1013        assert!(results.is_empty());
1014    }
1015}