Skip to main content

quillmark_core/
normalize.rs

1//! # Input Normalization
2//!
3//! This module provides input normalization for markdown content before parsing.
4//! Normalization ensures that invisible control characters and other artifacts
5//! that can interfere with markdown parsing are handled consistently.
6//!
7//! ## Overview
8//!
9//! Input text may contain invisible Unicode characters (especially from copy-paste)
10//! that interfere with markdown parsing. This module provides functions to:
11//!
12//! - Strip Unicode bidirectional formatting characters that break delimiter recognition
13//! - Fix HTML comment fences to preserve trailing text
14//! - Apply all normalizations in the correct order
15//!
16//! Double chevrons (`<<` and `>>`) are passed through unchanged without conversion.
17//!
18//! ## Functions
19//!
20//! - [`strip_bidi_formatting`] - Remove Unicode bidi control characters
21//! - [`normalize_markdown`] - Apply all markdown-specific normalizations
22//! - [`normalize_fields`] - Normalize document fields (bidi stripping)
23//!
24//! ## Why Normalize?
25//!
26//! Unicode bidirectional formatting characters (LRO, RLO, LRE, RLE, etc.) are invisible
27//! control characters used for bidirectional text layout. When placed adjacent to markdown
28//! delimiters like `**`, they can prevent parsers from recognizing the delimiters:
29//!
30//! ```text
31//! **bold** or <U+202D>**(1234**
32//!             ^^^^^^^^ invisible LRO here prevents second ** from being recognized as bold
33//! ```
34//!
35//! These characters commonly appear when copying text from:
36//! - Web pages with mixed LTR/RTL content
37//! - PDF documents
38//! - Word processors
39//! - Some clipboard managers
40//!
41//! ## Examples
42//!
43//! ```
44//! use quillmark_core::normalize::strip_bidi_formatting;
45//!
46//! // Input with invisible U+202D (LRO) before second **
47//! let input = "**asdf** or \u{202D}**(1234**";
48//! let cleaned = strip_bidi_formatting(input);
49//! assert_eq!(cleaned, "**asdf** or **(1234**");
50//! ```
51
52use crate::error::MAX_NESTING_DEPTH;
53use crate::parse::BODY_FIELD;
54use crate::value::QuillValue;
55use std::collections::HashMap;
56use unicode_normalization::UnicodeNormalization;
57
58/// Errors that can occur during normalization
59#[derive(Debug, thiserror::Error)]
60pub enum NormalizationError {
61    /// JSON nesting depth exceeded maximum allowed
62    #[error("JSON nesting too deep: {depth} levels (max: {max} levels)")]
63    NestingTooDeep {
64        /// Actual depth
65        depth: usize,
66        /// Maximum allowed depth
67        max: usize,
68    },
69}
70
71/// Check if a character is a Unicode bidirectional formatting character
72#[inline]
73fn is_bidi_char(c: char) -> bool {
74    matches!(
75        c,
76        '\u{061C}' // ARABIC LETTER MARK (ALM)
77        | '\u{200E}' // LEFT-TO-RIGHT MARK (LRM)
78        | '\u{200F}' // RIGHT-TO-LEFT MARK (RLM)
79        | '\u{202A}' // LEFT-TO-RIGHT EMBEDDING (LRE)
80        | '\u{202B}' // RIGHT-TO-LEFT EMBEDDING (RLE)
81        | '\u{202C}' // POP DIRECTIONAL FORMATTING (PDF)
82        | '\u{202D}' // LEFT-TO-RIGHT OVERRIDE (LRO)
83        | '\u{202E}' // RIGHT-TO-LEFT OVERRIDE (RLO)
84        | '\u{2066}' // LEFT-TO-RIGHT ISOLATE (LRI)
85        | '\u{2067}' // RIGHT-TO-LEFT ISOLATE (RLI)
86        | '\u{2068}' // FIRST STRONG ISOLATE (FSI)
87        | '\u{2069}' // POP DIRECTIONAL ISOLATE (PDI)
88    )
89}
90
91/// Strips Unicode bidirectional formatting characters that can interfere with markdown parsing.
92///
93/// These invisible control characters are used for bidirectional text layout but can
94/// break markdown delimiter recognition when placed adjacent to `**`, `*`, `_`, etc.
95///
96/// # Characters Stripped
97///
98/// - U+061C (ARABIC LETTER MARK, ALM)
99/// - U+200E (LEFT-TO-RIGHT MARK, LRM)
100/// - U+200F (RIGHT-TO-LEFT MARK, RLM)
101/// - U+202A (LEFT-TO-RIGHT EMBEDDING, LRE)
102/// - U+202B (RIGHT-TO-LEFT EMBEDDING, RLE)
103/// - U+202C (POP DIRECTIONAL FORMATTING, PDF)
104/// - U+202D (LEFT-TO-RIGHT OVERRIDE, LRO)
105/// - U+202E (RIGHT-TO-LEFT OVERRIDE, RLO)
106/// - U+2066 (LEFT-TO-RIGHT ISOLATE, LRI)
107/// - U+2067 (RIGHT-TO-LEFT ISOLATE, RLI)
108/// - U+2068 (FIRST STRONG ISOLATE, FSI)
109/// - U+2069 (POP DIRECTIONAL ISOLATE, PDI)
110///
111/// # Examples
112///
113/// ```
114/// use quillmark_core::normalize::strip_bidi_formatting;
115///
116/// // Normal text is unchanged
117/// assert_eq!(strip_bidi_formatting("hello"), "hello");
118///
119/// // LRO character is stripped
120/// assert_eq!(strip_bidi_formatting("he\u{202D}llo"), "hello");
121///
122/// // All bidi characters are stripped
123/// let input = "\u{200E}\u{200F}\u{202A}\u{202B}\u{202C}\u{202D}\u{202E}";
124/// assert_eq!(strip_bidi_formatting(input), "");
125/// ```
126pub fn strip_bidi_formatting(s: &str) -> String {
127    // Early return optimization: avoid allocation if no bidi characters present
128    if !s.chars().any(is_bidi_char) {
129        return s.to_string();
130    }
131
132    s.chars().filter(|c| !is_bidi_char(*c)).collect()
133}
134
135/// Fixes HTML comment closing fences to prevent content loss.
136///
137/// According to CommonMark, HTML block type 2 (comments) ends with the line containing `-->`.
138/// This means any text on the same line after `-->` is included in the HTML block and would
139/// be discarded by markdown parsers that ignore HTML blocks.
140///
141/// This function inserts a newline after `-->` when followed by non-whitespace content,
142/// ensuring the trailing text is parsed as regular markdown.
143///
144/// # Examples
145///
146/// ```
147/// use quillmark_core::normalize::fix_html_comment_fences;
148///
149/// // Text on same line as --> is moved to next line
150/// assert_eq!(
151///     fix_html_comment_fences("<!-- comment -->Some text"),
152///     "<!-- comment -->\nSome text"
153/// );
154///
155/// // Already on separate line - no change
156/// assert_eq!(
157///     fix_html_comment_fences("<!-- comment -->\nSome text"),
158///     "<!-- comment -->\nSome text"
159/// );
160///
161/// // Only whitespace after --> - no change needed
162/// assert_eq!(
163///     fix_html_comment_fences("<!-- comment -->   \nSome text"),
164///     "<!-- comment -->   \nSome text"
165/// );
166///
167/// // Multi-line comments with trailing text
168/// assert_eq!(
169///     fix_html_comment_fences("<!--\nmultiline\n-->Trailing text"),
170///     "<!--\nmultiline\n-->\nTrailing text"
171/// );
172/// ```
173pub fn fix_html_comment_fences(s: &str) -> String {
174    // Early return if no HTML comment closing fence present
175    if !s.contains("-->") {
176        return s.to_string();
177    }
178
179    // Context-aware processing: only fix `-->` if we are inside a comment started by `<!--`
180    let mut result = String::with_capacity(s.len() + 16);
181    let mut current_pos = 0;
182
183    // Find first opener
184    while let Some(open_idx) = s[current_pos..].find("<!--") {
185        let abs_open = current_pos + open_idx;
186
187        // Find matching closer AFTER the opener
188        if let Some(close_idx) = s[abs_open..].find("-->") {
189            let abs_close = abs_open + close_idx;
190            let mut after_fence = abs_close + 3;
191
192            // Handle `<!--- ... --->` style fences by treating the extra
193            // hyphen as part of the comment content, not leaked trailing text.
194            // 4 == "<!--".len(); check whether opener is `<!---` (extra hyphen).
195            let opener_has_extra_hyphen = s
196                .get(abs_open + 4..)
197                .is_some_and(|rest| rest.starts_with('-'));
198            if opener_has_extra_hyphen
199                && s.get(after_fence..)
200                    .is_some_and(|rest| rest.starts_with('-'))
201            {
202                after_fence += 1;
203            }
204
205            // Append everything up to and including the closing fence
206            result.push_str(&s[current_pos..after_fence]);
207
208            // Check what comes after the fence
209            let after_content = &s[after_fence..];
210
211            // Determine if we need to insert a newline
212            let needs_newline = if after_content.is_empty() {
213                false
214            } else if after_content.starts_with('\n') || after_content.starts_with("\r\n") {
215                false
216            } else {
217                // Check if there's only whitespace until end of line
218                let next_newline = after_content.find('\n');
219                let until_newline = match next_newline {
220                    Some(pos) => &after_content[..pos],
221                    None => after_content,
222                };
223                !until_newline.trim().is_empty()
224            };
225
226            if needs_newline {
227                result.push('\n');
228            }
229
230            // Move position to after the fence (we'll process the rest in next iteration)
231            current_pos = after_fence;
232        } else {
233            // Unclosed comment at end of string - just append the rest and break
234            // The opener was found but no closer exists.
235            result.push_str(&s[current_pos..]);
236            current_pos = s.len();
237            break;
238        }
239    }
240
241    // Append remaining content (text after last closed comment, or text if no comments found)
242    if current_pos < s.len() {
243        result.push_str(&s[current_pos..]);
244    }
245
246    result
247}
248
249/// Normalizes markdown content by applying all preprocessing steps.
250///
251/// This function applies normalizations in the correct order:
252/// 1. Strip Unicode bidirectional formatting characters
253/// 2. Fix HTML comment closing fences (ensure text after `-->` is preserved)
254///
255/// Note: Guillemet preprocessing (`<<text>>` → `«text»`) is handled separately
256/// in [`normalize_fields`] because it needs to be applied after schema defaults
257/// and coercion.
258///
259/// # Examples
260///
261/// ```
262/// use quillmark_core::normalize::normalize_markdown;
263///
264/// // Bidi characters are stripped
265/// let input = "**bold** \u{202D}**more**";
266/// let normalized = normalize_markdown(input);
267/// assert_eq!(normalized, "**bold** **more**");
268///
269/// // HTML comment trailing text is preserved
270/// let with_comment = "<!-- comment -->Some text";
271/// let normalized = normalize_markdown(with_comment);
272/// assert_eq!(normalized, "<!-- comment -->\nSome text");
273/// ```
274pub fn normalize_markdown(markdown: &str) -> String {
275    let cleaned = strip_bidi_formatting(markdown);
276    fix_html_comment_fences(&cleaned)
277}
278
279/// Normalizes a string value by stripping bidi characters and fixing HTML comment fences.
280///
281/// - For body content: applies `fix_html_comment_fences` to preserve text after `-->`
282/// - For other fields: strips bidi characters only
283///
284/// Double chevrons (`<<` and `>>`) are passed through untouched without conversion to
285/// guillemets. This preserves the original delimiter syntax in the output.
286fn normalize_string(s: &str, is_body: bool) -> String {
287    // First strip bidi formatting characters
288    let cleaned = strip_bidi_formatting(s);
289
290    // Then apply content-specific normalization
291    if is_body {
292        // Fix HTML comment fences (chevrons pass through unchanged)
293        fix_html_comment_fences(&cleaned)
294    } else {
295        // Non-body fields: just return cleaned string (chevrons pass through unchanged)
296        cleaned
297    }
298}
299
300/// Recursively normalize a JSON value with depth tracking.
301///
302/// Returns an error if nesting exceeds MAX_NESTING_DEPTH to prevent stack overflow.
303fn normalize_json_value_inner(
304    value: serde_json::Value,
305    is_body: bool,
306    depth: usize,
307) -> Result<serde_json::Value, NormalizationError> {
308    if depth > MAX_NESTING_DEPTH {
309        return Err(NormalizationError::NestingTooDeep {
310            depth,
311            max: MAX_NESTING_DEPTH,
312        });
313    }
314
315    match value {
316        serde_json::Value::String(s) => {
317            Ok(serde_json::Value::String(normalize_string(&s, is_body)))
318        }
319        serde_json::Value::Array(arr) => {
320            let normalized: Result<Vec<_>, _> = arr
321                .into_iter()
322                .map(|v| normalize_json_value_inner(v, false, depth + 1))
323                .collect();
324            Ok(serde_json::Value::Array(normalized?))
325        }
326        serde_json::Value::Object(map) => {
327            let processed: Result<serde_json::Map<String, serde_json::Value>, _> = map
328                .into_iter()
329                .map(|(k, v)| {
330                    let is_body = k == BODY_FIELD;
331                    normalize_json_value_inner(v, is_body, depth + 1).map(|nv| (k, nv))
332                })
333                .collect();
334            Ok(serde_json::Value::Object(processed?))
335        }
336        // Pass through other types unchanged (numbers, booleans, null)
337        other => Ok(other),
338    }
339}
340
341/// Recursively normalize a JSON value.
342///
343/// This is a convenience wrapper that starts depth tracking at 0.
344/// Logs a warning and returns the original value if depth is exceeded.
345fn normalize_json_value(value: serde_json::Value, is_body: bool) -> serde_json::Value {
346    match normalize_json_value_inner(value.clone(), is_body, 0) {
347        Ok(normalized) => normalized,
348        Err(e) => {
349            // Log warning but don't fail - return original value
350            eprintln!("Warning: {}", e);
351            value
352        }
353    }
354}
355
356/// Normalizes document fields by applying all preprocessing steps.
357///
358/// This function orchestrates input normalization for document fields:
359/// 1. Strips Unicode bidirectional formatting characters from all string values
360/// 2. For the body field: fixes HTML comment fences to preserve trailing text
361///
362/// Double chevrons (`<<` and `>>`) are passed through unchanged in all fields.
363///
364/// # Processing Order
365///
366/// The normalization order is important:
367/// 1. **Bidi stripping** - Must happen first so markdown delimiters are recognized
368/// 2. **HTML comment fence fixing** - Ensures text after `-->` is preserved
369///
370/// # Examples
371///
372/// ```
373/// use quillmark_core::normalize::normalize_fields;
374/// use quillmark_core::QuillValue;
375/// use std::collections::HashMap;
376///
377/// let mut fields = HashMap::new();
378/// fields.insert("title".to_string(), QuillValue::from_json(serde_json::json!("<<hello>>")));
379/// fields.insert("BODY".to_string(), QuillValue::from_json(serde_json::json!("**bold** \u{202D}**more**")));
380///
381/// let result = normalize_fields(fields);
382///
383/// // Title has chevrons preserved (only bidi stripped)
384/// assert_eq!(result.get("title").unwrap().as_str().unwrap(), "<<hello>>");
385///
386/// // Body has bidi chars stripped, chevrons preserved
387/// assert_eq!(result.get("BODY").unwrap().as_str().unwrap(), "**bold** **more**");
388/// ```
389pub fn normalize_fields(fields: HashMap<String, QuillValue>) -> HashMap<String, QuillValue> {
390    fields
391        .into_iter()
392        .map(|(key, value)| {
393            // Normalize field name to NFC form for consistent key comparison
394            // This ensures café (composed) and café (decomposed) are treated as the same key
395            let normalized_key = normalize_field_name(&key);
396            let json = value.into_json();
397            // Treat as body if it's the BODY field (applies HTML comment fence fixes)
398            let treat_as_body = normalized_key == BODY_FIELD;
399            let processed = normalize_json_value(json, treat_as_body);
400            (normalized_key, QuillValue::from_json(processed))
401        })
402        .collect()
403}
404
405/// Normalize field name to Unicode NFC (Canonical Decomposition, followed by Canonical Composition)
406///
407/// This ensures that equivalent Unicode strings (e.g., "café" composed vs decomposed)
408/// are treated as identical field names, preventing subtle bugs where visually
409/// identical keys are treated as different.
410///
411/// # Examples
412///
413/// ```
414/// use quillmark_core::normalize::normalize_field_name;
415///
416/// // Composed form (single code point for é)
417/// let composed = "café";
418/// // Decomposed form (e + combining acute accent)
419/// let decomposed = "cafe\u{0301}";
420///
421/// // Both normalize to the same NFC form
422/// assert_eq!(normalize_field_name(composed), normalize_field_name(decomposed));
423/// ```
424pub fn normalize_field_name(name: &str) -> String {
425    name.nfc().collect()
426}
427
428/// Normalizes a parsed document by applying all field-level normalizations.
429///
430/// This is the **primary entry point** for normalizing documents after parsing.
431/// It ensures consistent processing regardless of how the document was created.
432///
433/// # Normalization Steps
434///
435/// This function applies all normalizations in the correct order:
436/// 1. **Unicode NFC normalization** - Field names are normalized to NFC form
437/// 2. **Bidi stripping** - Invisible bidirectional control characters are removed
438/// 3. **HTML comment fence fixing** - Trailing text after `-->` is preserved (body only)
439///
440/// Double chevrons (`<<` and `>>`) are passed through unchanged without conversion.
441///
442/// # When to Use
443///
444/// Call this function after parsing and before rendering:
445///
446/// ```no_run
447/// use quillmark_core::{ParsedDocument, normalize::normalize_document};
448///
449/// let markdown = "---\ntitle: Example\n---\n\nBody with <<placeholder>>";
450/// let doc = ParsedDocument::from_markdown(markdown).unwrap();
451/// let normalized = normalize_document(doc);
452/// // Use normalized document for rendering...
453/// ```
454///
455/// # Direct API Usage
456///
457/// If you're constructing a `ParsedDocument` directly via [`crate::parse::ParsedDocument::new`]
458/// rather than parsing from markdown, you **MUST** call this function to ensure
459/// consistent normalization:
460///
461/// ```
462/// use quillmark_core::{ParsedDocument, QuillValue, normalize::normalize_document};
463/// use quillmark_core::version::QuillReference;
464/// use std::collections::HashMap;
465///
466/// // Direct construction (e.g., from API or database)
467/// let mut fields = HashMap::new();
468/// fields.insert("title".to_string(), QuillValue::from_json(serde_json::json!("Test")));
469/// fields.insert("BODY".to_string(), QuillValue::from_json(serde_json::json!("<<content>>")));
470///
471/// let quill_ref = QuillReference::latest("my_quill".to_string());
472/// let doc = ParsedDocument::new(fields, quill_ref);
473/// let normalized = normalize_document(doc).expect("Failed to normalize document");
474///
475/// // Body has chevrons preserved
476/// assert_eq!(normalized.body().unwrap(), "<<content>>");
477/// ```
478///
479/// # Idempotency
480///
481/// This function is idempotent - calling it multiple times produces the same result.
482/// However, for performance reasons, avoid unnecessary repeated calls.
483pub fn normalize_document(
484    doc: crate::parse::ParsedDocument,
485) -> Result<crate::parse::ParsedDocument, crate::error::ParseError> {
486    let normalized_fields = normalize_fields(doc.fields().clone());
487    Ok(crate::parse::ParsedDocument::new(
488        normalized_fields,
489        doc.quill_reference().clone(),
490    ))
491}
492
493#[cfg(test)]
494mod tests {
495    use super::*;
496
497    // Tests for strip_bidi_formatting
498
499    #[test]
500    fn test_strip_bidi_no_change() {
501        assert_eq!(strip_bidi_formatting("hello world"), "hello world");
502        assert_eq!(strip_bidi_formatting(""), "");
503        assert_eq!(strip_bidi_formatting("**bold** text"), "**bold** text");
504    }
505
506    #[test]
507    fn test_strip_bidi_lro() {
508        // U+202D (LEFT-TO-RIGHT OVERRIDE)
509        assert_eq!(strip_bidi_formatting("he\u{202D}llo"), "hello");
510        assert_eq!(
511            strip_bidi_formatting("**asdf** or \u{202D}**(1234**"),
512            "**asdf** or **(1234**"
513        );
514    }
515
516    #[test]
517    fn test_strip_bidi_rlo() {
518        // U+202E (RIGHT-TO-LEFT OVERRIDE)
519        assert_eq!(strip_bidi_formatting("he\u{202E}llo"), "hello");
520    }
521
522    #[test]
523    fn test_strip_bidi_marks() {
524        // U+200E (LRM) and U+200F (RLM)
525        assert_eq!(strip_bidi_formatting("a\u{200E}b\u{200F}c"), "abc");
526    }
527
528    #[test]
529    fn test_strip_bidi_embeddings() {
530        // U+202A (LRE), U+202B (RLE), U+202C (PDF)
531        assert_eq!(
532            strip_bidi_formatting("\u{202A}text\u{202B}more\u{202C}"),
533            "textmore"
534        );
535    }
536
537    #[test]
538    fn test_strip_bidi_isolates() {
539        // U+2066 (LRI), U+2067 (RLI), U+2068 (FSI), U+2069 (PDI)
540        assert_eq!(
541            strip_bidi_formatting("\u{2066}a\u{2067}b\u{2068}c\u{2069}"),
542            "abc"
543        );
544    }
545
546    #[test]
547    fn test_strip_bidi_all_chars() {
548        let all_bidi = "\u{061C}\u{200E}\u{200F}\u{202A}\u{202B}\u{202C}\u{202D}\u{202E}\u{2066}\u{2067}\u{2068}\u{2069}";
549        assert_eq!(strip_bidi_formatting(all_bidi), "");
550    }
551
552    #[test]
553    fn test_strip_bidi_arabic_letter_mark() {
554        // U+061C ARABIC LETTER MARK (ALM) should be stripped
555        assert_eq!(strip_bidi_formatting("hello\u{061C}world"), "helloworld");
556        assert_eq!(strip_bidi_formatting("\u{061C}**bold**"), "**bold**");
557    }
558
559    #[test]
560    fn test_strip_bidi_unicode_preserved() {
561        // Non-bidi unicode should be preserved
562        assert_eq!(strip_bidi_formatting("你好世界"), "你好世界");
563        assert_eq!(strip_bidi_formatting("مرحبا"), "مرحبا");
564        assert_eq!(strip_bidi_formatting("🎉"), "🎉");
565    }
566
567    // Tests for normalize_markdown
568
569    #[test]
570    fn test_normalize_markdown_basic() {
571        assert_eq!(normalize_markdown("hello"), "hello");
572        assert_eq!(
573            normalize_markdown("**bold** \u{202D}**more**"),
574            "**bold** **more**"
575        );
576    }
577
578    #[test]
579    fn test_normalize_markdown_html_comment() {
580        assert_eq!(
581            normalize_markdown("<!-- comment -->Some text"),
582            "<!-- comment -->\nSome text"
583        );
584    }
585
586    // Tests for fix_html_comment_fences
587
588    #[test]
589    fn test_fix_html_comment_no_comment() {
590        assert_eq!(fix_html_comment_fences("hello world"), "hello world");
591        assert_eq!(fix_html_comment_fences("**bold** text"), "**bold** text");
592        assert_eq!(fix_html_comment_fences(""), "");
593    }
594
595    #[test]
596    fn test_fix_html_comment_single_line_trailing_text() {
597        // Text on same line as --> should be moved to next line
598        assert_eq!(
599            fix_html_comment_fences("<!-- comment -->Same line text"),
600            "<!-- comment -->\nSame line text"
601        );
602    }
603
604    #[test]
605    fn test_fix_html_comment_already_newline() {
606        // Already has newline after --> - no change
607        assert_eq!(
608            fix_html_comment_fences("<!-- comment -->\nNext line text"),
609            "<!-- comment -->\nNext line text"
610        );
611    }
612
613    #[test]
614    fn test_fix_html_comment_only_whitespace_after() {
615        // Only whitespace after --> until newline - no change needed
616        assert_eq!(
617            fix_html_comment_fences("<!-- comment -->   \nSome text"),
618            "<!-- comment -->   \nSome text"
619        );
620    }
621
622    #[test]
623    fn test_fix_html_comment_multiline_trailing_text() {
624        // Multi-line comment with text on closing line
625        assert_eq!(
626            fix_html_comment_fences("<!--\nmultiline\ncomment\n-->Trailing text"),
627            "<!--\nmultiline\ncomment\n-->\nTrailing text"
628        );
629    }
630
631    #[test]
632    fn test_fix_html_comment_multiline_proper() {
633        // Multi-line comment with proper newline after -->
634        assert_eq!(
635            fix_html_comment_fences("<!--\nmultiline\n-->\n\nParagraph text"),
636            "<!--\nmultiline\n-->\n\nParagraph text"
637        );
638    }
639
640    #[test]
641    fn test_fix_html_comment_multiple_comments() {
642        // Multiple comments in the same document
643        assert_eq!(
644            fix_html_comment_fences("<!-- first -->Text\n\n<!-- second -->More text"),
645            "<!-- first -->\nText\n\n<!-- second -->\nMore text"
646        );
647    }
648
649    #[test]
650    fn test_fix_html_comment_end_of_string() {
651        // Comment at end of string - no trailing content
652        assert_eq!(
653            fix_html_comment_fences("Some text before <!-- comment -->"),
654            "Some text before <!-- comment -->"
655        );
656    }
657
658    #[test]
659    fn test_fix_html_comment_only_comment() {
660        // Just a comment with nothing after
661        assert_eq!(
662            fix_html_comment_fences("<!-- comment -->"),
663            "<!-- comment -->"
664        );
665    }
666
667    #[test]
668    fn test_fix_html_comment_arrow_not_comment() {
669        // --> that's not part of a comment (standalone)
670        // Should NOT be touched by the context-aware fixer
671        assert_eq!(fix_html_comment_fences("-->some text"), "-->some text");
672    }
673
674    #[test]
675    fn test_fix_html_comment_nested_opener() {
676        // Nested openers are just text inside the comment
677        // <!-- <!-- -->Trailing
678        // The first <!-- opens, the first --> closes.
679        assert_eq!(
680            fix_html_comment_fences("<!-- <!-- -->Trailing"),
681            "<!-- <!-- -->\nTrailing"
682        );
683    }
684
685    #[test]
686    fn test_fix_html_comment_unmatched_closer() {
687        // Closer without opener
688        assert_eq!(
689            fix_html_comment_fences("text --> more text"),
690            "text --> more text"
691        );
692    }
693
694    #[test]
695    fn test_fix_html_comment_multiple_valid_invalid() {
696        // Mixed valid and invalid comments
697        // <!-- valid -->FixMe
698        // text --> Ignore
699        // <!-- valid2 -->FixMe2
700        let input = "<!-- valid -->FixMe\ntext --> Ignore\n<!-- valid2 -->FixMe2";
701        let expected = "<!-- valid -->\nFixMe\ntext --> Ignore\n<!-- valid2 -->\nFixMe2";
702        assert_eq!(fix_html_comment_fences(input), expected);
703    }
704
705    #[test]
706    fn test_fix_html_comment_crlf() {
707        // CRLF line endings
708        assert_eq!(
709            fix_html_comment_fences("<!-- comment -->\r\nSome text"),
710            "<!-- comment -->\r\nSome text"
711        );
712    }
713
714    #[test]
715    fn test_fix_html_comment_triple_hyphen_single_line() {
716        assert_eq!(
717            fix_html_comment_fences("<!--- comment --->Trailing text"),
718            "<!--- comment --->\nTrailing text"
719        );
720    }
721
722    #[test]
723    fn test_fix_html_comment_triple_hyphen_multiline() {
724        assert_eq!(
725            fix_html_comment_fences("<!---\ncomment\n--->Trailing text"),
726            "<!---\ncomment\n--->\nTrailing text"
727        );
728    }
729
730    // Tests for normalize_fields
731
732    #[test]
733    fn test_normalize_fields_body_bidi() {
734        let mut fields = HashMap::new();
735        fields.insert(
736            BODY_FIELD.to_string(),
737            QuillValue::from_json(serde_json::json!("**bold** \u{202D}**more**")),
738        );
739
740        let result = normalize_fields(fields);
741        assert_eq!(
742            result.get(BODY_FIELD).unwrap().as_str().unwrap(),
743            "**bold** **more**"
744        );
745    }
746
747    #[test]
748    fn test_normalize_fields_body_chevrons_preserved() {
749        let mut fields = HashMap::new();
750        fields.insert(
751            BODY_FIELD.to_string(),
752            QuillValue::from_json(serde_json::json!("<<raw>>")),
753        );
754
755        let result = normalize_fields(fields);
756        // Chevrons are passed through unchanged
757        assert_eq!(result.get(BODY_FIELD).unwrap().as_str().unwrap(), "<<raw>>");
758    }
759
760    #[test]
761    fn test_normalize_fields_body_chevrons_and_bidi() {
762        let mut fields = HashMap::new();
763        fields.insert(
764            BODY_FIELD.to_string(),
765            QuillValue::from_json(serde_json::json!("<<raw>> \u{202D}**bold**")),
766        );
767
768        let result = normalize_fields(fields);
769        // Bidi stripped, chevrons preserved
770        assert_eq!(
771            result.get(BODY_FIELD).unwrap().as_str().unwrap(),
772            "<<raw>> **bold**"
773        );
774    }
775
776    #[test]
777    fn test_normalize_fields_other_field_chevrons_preserved() {
778        let mut fields = HashMap::new();
779        fields.insert(
780            "title".to_string(),
781            QuillValue::from_json(serde_json::json!("<<hello>>")),
782        );
783
784        let result = normalize_fields(fields);
785        // Chevrons are passed through unchanged
786        assert_eq!(result.get("title").unwrap().as_str().unwrap(), "<<hello>>");
787    }
788
789    #[test]
790    fn test_normalize_fields_other_field_bidi_stripped() {
791        let mut fields = HashMap::new();
792        fields.insert(
793            "title".to_string(),
794            QuillValue::from_json(serde_json::json!("he\u{202D}llo")),
795        );
796
797        let result = normalize_fields(fields);
798        assert_eq!(result.get("title").unwrap().as_str().unwrap(), "hello");
799    }
800
801    #[test]
802    fn test_normalize_fields_nested_values() {
803        let mut fields = HashMap::new();
804        fields.insert(
805            "items".to_string(),
806            QuillValue::from_json(serde_json::json!(["<<a>>", "\u{202D}b"])),
807        );
808
809        let result = normalize_fields(fields);
810        let items = result.get("items").unwrap().as_array().unwrap();
811        // Chevrons are preserved, bidi stripped
812        assert_eq!(items[0].as_str().unwrap(), "<<a>>");
813        assert_eq!(items[1].as_str().unwrap(), "b");
814    }
815
816    #[test]
817    fn test_normalize_fields_object_values() {
818        let mut fields = HashMap::new();
819        fields.insert(
820            "meta".to_string(),
821            QuillValue::from_json(serde_json::json!({
822                "title": "<<hello>>",
823                BODY_FIELD: "<<content>>"
824            })),
825        );
826
827        let result = normalize_fields(fields);
828        let meta = result.get("meta").unwrap();
829        let meta_obj = meta.as_object().unwrap();
830        // Chevrons are preserved in all fields
831        assert_eq!(
832            meta_obj.get("title").unwrap().as_str().unwrap(),
833            "<<hello>>"
834        );
835        assert_eq!(
836            meta_obj.get(BODY_FIELD).unwrap().as_str().unwrap(),
837            "<<content>>"
838        );
839    }
840
841    #[test]
842    fn test_normalize_fields_non_string_unchanged() {
843        let mut fields = HashMap::new();
844        fields.insert(
845            "count".to_string(),
846            QuillValue::from_json(serde_json::json!(42)),
847        );
848        fields.insert(
849            "enabled".to_string(),
850            QuillValue::from_json(serde_json::json!(true)),
851        );
852
853        let result = normalize_fields(fields);
854        assert_eq!(result.get("count").unwrap().as_i64().unwrap(), 42);
855        assert!(result.get("enabled").unwrap().as_bool().unwrap());
856    }
857
858    // Tests for depth limiting
859
860    #[test]
861    fn test_normalize_json_value_inner_depth_exceeded() {
862        // Create a deeply nested JSON structure that exceeds MAX_NESTING_DEPTH
863        let mut value = serde_json::json!("leaf");
864        for _ in 0..=crate::error::MAX_NESTING_DEPTH {
865            value = serde_json::json!([value]);
866        }
867
868        // The inner function should return an error
869        let result = super::normalize_json_value_inner(value, false, 0);
870        assert!(result.is_err());
871
872        if let Err(NormalizationError::NestingTooDeep { depth, max }) = result {
873            assert!(depth > max);
874            assert_eq!(max, crate::error::MAX_NESTING_DEPTH);
875        } else {
876            panic!("Expected NestingTooDeep error");
877        }
878    }
879
880    #[test]
881    fn test_normalize_json_value_inner_within_limit() {
882        // Create a nested structure just within the limit
883        let mut value = serde_json::json!("leaf");
884        for _ in 0..50 {
885            value = serde_json::json!([value]);
886        }
887
888        // This should succeed
889        let result = super::normalize_json_value_inner(value, false, 0);
890        assert!(result.is_ok());
891    }
892
893    // Tests for normalize_document
894
895    #[test]
896    fn test_normalize_document_basic() {
897        use crate::parse::ParsedDocument;
898
899        let mut fields = std::collections::HashMap::new();
900        fields.insert(
901            "title".to_string(),
902            crate::value::QuillValue::from_json(serde_json::json!("<<placeholder>>")),
903        );
904        fields.insert(
905            BODY_FIELD.to_string(),
906            crate::value::QuillValue::from_json(serde_json::json!("<<content>> \u{202D}**bold**")),
907        );
908
909        let doc = ParsedDocument::new(
910            fields,
911            crate::version::QuillReference::latest("test".to_string()),
912        );
913        let normalized = super::normalize_document(doc).unwrap();
914
915        // Title has chevrons preserved (only bidi stripped)
916        assert_eq!(
917            normalized.get_field("title").unwrap().as_str().unwrap(),
918            "<<placeholder>>"
919        );
920
921        // Body has bidi stripped, chevrons preserved
922        assert_eq!(normalized.body().unwrap(), "<<content>> **bold**");
923    }
924
925    #[test]
926    fn test_normalize_document_preserves_quill_tag() {
927        use crate::parse::ParsedDocument;
928        use crate::version::QuillReference;
929        use std::str::FromStr;
930
931        let fields = std::collections::HashMap::new();
932        let quill_ref = QuillReference::from_str("custom_quill").unwrap();
933        let doc = ParsedDocument::new(fields, quill_ref);
934        let normalized = super::normalize_document(doc).unwrap();
935
936        assert_eq!(normalized.quill_reference().name, "custom_quill");
937    }
938
939    #[test]
940    fn test_normalize_document_idempotent() {
941        use crate::parse::ParsedDocument;
942
943        let mut fields = std::collections::HashMap::new();
944        fields.insert(
945            BODY_FIELD.to_string(),
946            crate::value::QuillValue::from_json(serde_json::json!("<<content>>")),
947        );
948
949        let doc = ParsedDocument::new(
950            fields,
951            crate::version::QuillReference::latest("test".to_string()),
952        );
953        let normalized_once = super::normalize_document(doc).unwrap();
954        let normalized_twice = super::normalize_document(normalized_once.clone()).unwrap();
955
956        // Calling normalize_document twice should produce the same result
957        assert_eq!(
958            normalized_once.body().unwrap(),
959            normalized_twice.body().unwrap()
960        );
961    }
962}