use ass_core::tokenizer::{AssTokenizer, TokenType};
#[cfg(test)]
mod tokenizer_edge_paths {
use super::*;
#[test]
fn test_closing_section_bracket_in_section_context() {
let input = "[Script Info]";
let mut tokenizer = AssTokenizer::new(input);
let token = tokenizer.next_token().unwrap().unwrap();
assert!(matches!(token.token_type, TokenType::SectionHeader));
let token = tokenizer.next_token().unwrap().unwrap();
assert!(matches!(token.token_type, TokenType::SectionClose));
}
#[test]
fn test_colon_in_document_context() {
let input = "Title: Value";
let mut tokenizer = AssTokenizer::new(input);
let _text_token = tokenizer.next_token().unwrap().unwrap();
let token = tokenizer.next_token().unwrap().unwrap();
assert!(matches!(token.token_type, TokenType::Colon));
}
#[test]
fn test_opening_style_override_brace() {
let input = "{\\b1}";
let mut tokenizer = AssTokenizer::new(input);
let token = tokenizer.next_token().unwrap().unwrap();
assert!(matches!(token.token_type, TokenType::OverrideBlock));
}
#[test]
fn test_closing_style_override_brace_in_override_context() {
let input = "{\\b1}";
let mut tokenizer = AssTokenizer::new(input);
let _override_token = tokenizer.next_token().unwrap().unwrap();
let token = tokenizer.next_token().unwrap().unwrap();
assert!(matches!(token.token_type, TokenType::OverrideClose));
}
#[test]
fn test_comma_delimiter_handling() {
let input = "field1,field2";
let mut tokenizer = AssTokenizer::new(input);
let _field1 = tokenizer.next_token().unwrap().unwrap();
let token = tokenizer.next_token().unwrap().unwrap();
assert!(matches!(token.token_type, TokenType::Comma));
}
#[test]
fn test_newline_context_reset() {
let input = "line1\nline2";
let mut tokenizer = AssTokenizer::new(input);
let _line1 = tokenizer.next_token().unwrap().unwrap();
let token = tokenizer.next_token().unwrap().unwrap();
assert!(matches!(token.token_type, TokenType::Newline));
}
#[test]
fn test_carriage_return_context_reset() {
let input = "line1\rline2";
let mut tokenizer = AssTokenizer::new(input);
let _line1 = tokenizer.next_token().unwrap().unwrap();
let token = tokenizer.next_token().unwrap().unwrap();
assert!(matches!(token.token_type, TokenType::Newline));
}
#[test]
fn test_semicolon_comment_detection() {
let input = "; This is a comment";
let mut tokenizer = AssTokenizer::new(input);
let token = tokenizer.next_token().unwrap().unwrap();
assert!(matches!(token.token_type, TokenType::Comment));
}
#[test]
fn test_exclamation_comment_detection() {
let input = "!: This is also a comment";
let mut tokenizer = AssTokenizer::new(input);
let token = tokenizer.next_token().unwrap().unwrap();
assert!(matches!(token.token_type, TokenType::Comment));
}
#[test]
fn test_text_fallback_path() {
let input = "regular text without special chars";
let mut tokenizer = AssTokenizer::new(input);
let token = tokenizer.next_token().unwrap().unwrap();
assert!(matches!(token.token_type, TokenType::Text));
assert_eq!(token.span, "regular text without special chars");
}
#[test]
fn test_position_advancement_check() {
let input = "a";
let mut tokenizer = AssTokenizer::new(input);
let token = tokenizer.next_token().unwrap().unwrap();
assert!(matches!(token.token_type, TokenType::Text));
assert_eq!(token.span, "a");
let result = tokenizer.next_token().unwrap();
assert!(result.is_none());
}
#[test]
fn test_token_span_creation() {
let input = "test";
let mut tokenizer = AssTokenizer::new(input);
let token = tokenizer.next_token().unwrap().unwrap();
assert_eq!(token.span, "test");
assert_eq!(token.line, 1);
assert_eq!(token.column, 1);
}
#[test]
fn test_tokenize_all_iteration_boundary() {
let input = "short";
let mut tokenizer = AssTokenizer::new(input);
let tokens = tokenizer.tokenize_all().unwrap();
assert_eq!(tokens.len(), 1);
assert!(matches!(tokens[0].token_type, TokenType::Text));
}
#[test]
fn test_mixed_whitespace_handling() {
let input = " \t\n";
let mut tokenizer = AssTokenizer::new(input);
let token = tokenizer.next_token().unwrap().unwrap();
assert!(matches!(
token.token_type,
TokenType::Whitespace | TokenType::Text | TokenType::Newline
));
while let Some(_token) = tokenizer.next_token().unwrap() {
}
}
#[test]
fn test_context_dependent_character_handling() {
let input = "text:value{override}";
let mut tokenizer = AssTokenizer::new(input);
let tokens = tokenizer.tokenize_all().unwrap();
assert!(tokens.len() >= 4);
}
#[test]
fn test_empty_section_header() {
let input = "[]";
let mut tokenizer = AssTokenizer::new(input);
let token1 = tokenizer.next_token().unwrap().unwrap();
assert!(matches!(token1.token_type, TokenType::SectionHeader));
let token2 = tokenizer.next_token().unwrap().unwrap();
assert!(matches!(token2.token_type, TokenType::SectionClose));
}
#[test]
fn test_empty_style_override() {
let input = "{}";
let mut tokenizer = AssTokenizer::new(input);
let token1 = tokenizer.next_token().unwrap().unwrap();
assert!(matches!(token1.token_type, TokenType::OverrideBlock));
let token2 = tokenizer.next_token().unwrap().unwrap();
assert!(matches!(token2.token_type, TokenType::OverrideClose));
}
#[test]
fn test_unicode_boundary_handling() {
let input = "café🎬";
let mut tokenizer = AssTokenizer::new(input);
let token = tokenizer.next_token().unwrap().unwrap();
assert!(matches!(token.token_type, TokenType::Text));
assert!(token.span.contains("café"));
}
#[test]
fn test_consecutive_delimiters() {
let input = "::,,";
let mut tokenizer = AssTokenizer::new(input);
let tokens = tokenizer.tokenize_all().unwrap();
assert!(tokens.len() >= 4); }
#[test]
fn test_field_value_context_boundary() {
let input = "Key: Value\nNext: Line";
let mut tokenizer = AssTokenizer::new(input);
let tokens = tokenizer.tokenize_all().unwrap();
let has_colon = tokens
.iter()
.any(|t| matches!(t.token_type, TokenType::Colon));
let has_newline = tokens
.iter()
.any(|t| matches!(t.token_type, TokenType::Newline));
assert!(has_colon);
assert!(has_newline);
}
}