use genpdfi::Alignment;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ParseContext {
Root, ListItem, TableCell, BlockQuote, Inline, }
#[derive(Debug, PartialEq, Clone)]
pub enum Token {
Heading(Vec<Token>, usize),
Emphasis { level: usize, content: Vec<Token> },
StrongEmphasis(Vec<Token>),
Code(String, String),
BlockQuote(String),
ListItem {
content: Vec<Token>,
ordered: bool,
number: Option<usize>, },
Link(String, String),
Image(String, String),
Text(String),
Table {
headers: Vec<Vec<Token>>,
aligns: Vec<Alignment>,
rows: Vec<Vec<Vec<Token>>>,
},
TableAlignment(Alignment),
HtmlComment(String),
Newline,
HorizontalRule,
Unknown(String),
}
impl Token {
pub fn collect_all_text(tokens: &[Token]) -> String {
let mut result = String::new();
for token in tokens {
token.collect_text_recursive(&mut result);
}
result
}
fn collect_text_recursive(&self, result: &mut String) {
match self {
Token::Text(s) => result.push_str(s),
Token::Heading(nested, _) => {
for token in nested {
token.collect_text_recursive(result);
}
}
Token::Emphasis { content, .. } => {
for token in content {
token.collect_text_recursive(result);
}
}
Token::StrongEmphasis(nested) => {
for token in nested {
token.collect_text_recursive(result);
}
}
Token::Code(_, code) => result.push_str(code),
Token::BlockQuote(text) => result.push_str(text),
Token::ListItem { content, .. } => {
for token in content {
token.collect_text_recursive(result);
}
}
Token::Link(text, _) => result.push_str(text),
Token::Image(alt, _) => result.push_str(alt),
Token::HtmlComment(comment) => result.push_str(comment),
Token::Unknown(text) => result.push_str(text),
Token::Newline | Token::HorizontalRule => {
}
Token::Table {
headers,
aligns: _,
rows,
} => {
for header in headers {
for token in header {
token.collect_text_recursive(result);
}
}
for row in rows {
for cell in row {
for token in cell {
token.collect_text_recursive(result);
}
}
}
}
Token::TableAlignment(_) => {
}
}
}
}
#[derive(Debug)]
pub enum LexerError {
UnexpectedEndOfInput,
UnknownToken(String),
}
pub struct Lexer {
input: Vec<char>,
position: usize,
}
impl Lexer {
pub fn new(input: String) -> Self {
Lexer {
input: input.chars().collect(),
position: 0,
}
}
pub fn parse(&mut self) -> Result<Vec<Token>, LexerError> {
self.parse_with_context(ParseContext::Root)
}
pub fn parse_with_context(&mut self, ctx: ParseContext) -> Result<Vec<Token>, LexerError> {
let mut tokens = Vec::new();
while self.position < self.input.len() {
if let Some(token) = self.next_token(ctx)? {
tokens.push(token);
}
}
Ok(tokens)
}
fn parse_nested_content<F>(
&mut self,
is_delimiter: F,
ctx: ParseContext,
) -> Result<Vec<Token>, LexerError>
where
F: Fn(char) -> bool,
{
let mut content = Vec::new();
let initial_indent = self.get_current_indent();
while self.position < self.input.len() {
let ch = self.current_char();
if is_delimiter(ch) {
break;
}
if self.is_at_line_start() {
let current_indent = self.get_current_indent();
if current_indent > initial_indent
&& !matches!(ctx, ParseContext::Inline | ParseContext::TableCell)
{
self.position += current_indent;
match self.current_char() {
'-' | '+' => {
if !self.check_horizontal_rule()? {
content.push(self.parse_list_item(false, current_indent, ctx)?);
continue;
}
}
'*' => {
if self.is_list_marker('*') {
content.push(self.parse_list_item(false, current_indent, ctx)?);
continue;
}
}
'0'..='9' => {
if self.check_ordered_list_marker().is_some() {
content.push(self.parse_list_item(true, current_indent, ctx)?);
continue;
}
}
_ => {}
}
}
}
if let Some(token) = self.next_token(ctx)? {
content.push(token);
}
}
Ok(content)
}
fn next_token(&mut self, ctx: ParseContext) -> Result<Option<Token>, LexerError> {
if !self.is_after_special_token() {
self.skip_whitespace();
}
if self.position >= self.input.len() {
return Ok(None);
}
let current_char = self.current_char();
let is_line_start = self.is_at_line_start();
let allow_block_tokens = |context: ParseContext| -> bool {
matches!(
context,
ParseContext::Root | ParseContext::ListItem | ParseContext::BlockQuote
)
};
let token = match current_char {
'#' if is_line_start && allow_block_tokens(ctx) => self.parse_heading()?,
'*' if is_line_start && allow_block_tokens(ctx) && self.is_list_marker('*') => {
self.parse_list_item(false, 0, ctx)?
}
'*' | '_' => self.parse_emphasis()?,
'`' => self.parse_code()?,
'>' if is_line_start && allow_block_tokens(ctx) => self.parse_blockquote()?,
'-' | '+' if is_line_start && allow_block_tokens(ctx) => {
if self.check_horizontal_rule()? {
Token::HorizontalRule
} else {
self.parse_list_item(false, 0, ctx)?
}
}
'0'..='9' if is_line_start && allow_block_tokens(ctx) => {
if let Some(_) = self.check_ordered_list_marker() {
self.parse_list_item(true, 0, ctx)?
} else {
self.parse_text(ctx)?
}
}
'[' => self.parse_link()?,
'!' => {
if self.position + 1 < self.input.len() && self.input[self.position + 1] == '[' {
self.parse_image()?
} else {
self.parse_text(ctx)?
}
}
'<' if self.is_html_comment_start() => self.parse_html_comment()?,
'\n' => self.parse_newline()?,
'|' if is_line_start => {
if self.is_table_start() {
self.parse_table()?
} else {
self.parse_text(ctx)?
}
}
_ => self.parse_text(ctx)?,
};
Ok(Some(token))
}
fn parse_heading(&mut self) -> Result<Token, LexerError> {
let mut level = 0;
while self.current_char() == '#' {
level += 1;
self.advance();
}
self.skip_whitespace();
let content = self.parse_nested_content(|c| c == '\n', ParseContext::Inline)?;
Ok(Token::Heading(content, level))
}
fn parse_emphasis(&mut self) -> Result<Token, LexerError> {
let start_pos = self.position;
let delimiter = self.current_char();
let mut level = 0;
while self.current_char() == delimiter {
level += 1;
self.advance();
}
let mut content = self.parse_nested_content(|c| c == delimiter, ParseContext::Inline)?;
content.push(Token::Text(String::from(" ")));
for _ in 0..level {
if self.current_char() != delimiter {
return Err(LexerError::UnknownToken(format!(
"Unmatched emphasis at position {}",
start_pos
)));
}
self.advance();
}
Ok(Token::Emphasis {
level: level.min(3), content,
})
}
fn parse_code(&mut self) -> Result<Token, LexerError> {
let start_backticks = self.count_backticks();
if start_backticks == 1 {
let mut content = String::new();
while self.position < self.input.len() {
let ch = self.current_char();
if ch == '`' {
self.advance(); break;
}
content.push(ch);
self.advance();
}
return Ok(Token::Code(String::new(), content));
}
self.skip_whitespace();
let language = self.read_until_newline();
let mut content = String::new();
while self.position < self.input.len() {
let current_backticks = self.count_backticks();
if current_backticks == start_backticks {
break;
}
content.push(self.current_char());
self.advance();
}
for _ in 0..start_backticks {
if self.position < self.input.len() && self.current_char() == '`' {
self.advance();
}
}
Ok(Token::Code(
language.trim().to_string(),
content.trim().to_string(),
))
}
fn count_backticks(&mut self) -> usize {
let mut count = 0;
while self.position < self.input.len() && self.current_char() == '`' {
count += 1;
self.advance();
}
count
}
fn parse_blockquote(&mut self) -> Result<Token, LexerError> {
self.advance();
self.skip_whitespace();
let content = self.read_until_newline();
Ok(Token::BlockQuote(content))
}
fn parse_link(&mut self) -> Result<Token, LexerError> {
self.advance(); let text = self.read_until_char(']');
self.advance(); if self.current_char() == '(' {
self.advance(); let url = self.read_until_char(')');
self.advance(); return Ok(Token::Link(text, url));
}
Ok(Token::Link(text, String::new()))
}
fn parse_image(&mut self) -> Result<Token, LexerError> {
let start_pos = self.position;
self.advance();
if self.position < self.input.len() && self.current_char() == '[' {
self.advance();
let alt_text = self.read_until_char(']');
self.advance(); if self.current_char() == '(' {
self.advance(); let url = self.read_until_char(')');
self.advance(); Ok(Token::Image(alt_text, url))
} else {
Err(LexerError::UnknownToken(alt_text))
}
} else {
self.position = start_pos;
self.parse_text(ParseContext::Inline)
}
}
fn parse_newline(&mut self) -> Result<Token, LexerError> {
self.advance();
Ok(Token::Newline)
}
fn parse_text(&mut self, ctx: ParseContext) -> Result<Token, LexerError> {
let mut content = String::new();
let start_pos = self.position;
if self.position > 0 && self.current_char() == ' ' {
content.push(' ');
self.advance();
}
while self.position < self.input.len() {
let ch = self.current_char();
if ch == '\n' || self.is_start_of_special_token(ctx) {
break;
}
content.push(ch);
self.advance();
}
if content.is_empty() {
Err(LexerError::UnknownToken(format!(
"Unexpected character at position {}",
start_pos
)))
} else {
Ok(Token::Text(content))
}
}
fn parse_html_comment(&mut self) -> Result<Token, LexerError> {
self.position += 4; let start = self.position;
while self.position + 2 < self.input.len() {
if self.input[self.position] == '-'
&& self.input[self.position + 1] == '-'
&& self.input[self.position + 2] == '>'
{
break;
}
self.advance();
}
if self.position + 2 < self.input.len() {
let comment: String = self.input[start..self.position].iter().collect();
self.position += 3; Ok(Token::HtmlComment(comment))
} else {
Err(LexerError::UnexpectedEndOfInput)
}
}
fn is_at_line_start(&self) -> bool {
self.position == 0 || self.input.get(self.position - 1) == Some(&'\n')
}
fn skip_whitespace(&mut self) {
while self.position < self.input.len()
&& self.current_char().is_whitespace()
&& self.current_char() != '\n'
{
self.advance();
}
}
fn advance(&mut self) {
self.position += 1;
}
fn current_char(&self) -> char {
*self.input.get(self.position).unwrap_or(&'\0')
}
fn read_until_newline(&mut self) -> String {
let start = self.position;
while self.position < self.input.len() && self.current_char() != '\n' {
self.advance();
}
self.input[start..self.position].iter().collect()
}
fn read_until_char(&mut self, delimiter: char) -> String {
let start = self.position;
while self.position < self.input.len() && self.current_char() != delimiter {
self.advance();
}
self.input[start..self.position].iter().collect()
}
fn is_html_comment_start(&self) -> bool {
self.input[self.position..]
.iter()
.collect::<String>()
.starts_with("<!--")
}
fn is_start_of_special_token(&self, ctx: ParseContext) -> bool {
let ch = self.current_char();
match ch {
'#' if matches!(ctx, ParseContext::Root) => true,
'*' | '_' | '`' | '[' => true,
'!' => {
if self.position + 1 < self.input.len() {
self.input[self.position + 1] == '['
} else {
false
}
}
'<' => {
if matches!(ctx, ParseContext::Root) {
self.is_html_comment_start()
} else {
false
}
}
_ => false,
}
}
fn is_after_special_token(&self) -> bool {
if self.position == 0 {
return false;
}
let prev_char = self.input[self.position - 1];
match prev_char {
'`' | ')' => true,
_ => false,
}
}
fn check_horizontal_rule(&mut self) -> Result<bool, LexerError> {
if self.current_char() == '-' {
let mut count = 1;
let mut pos = self.position + 1;
while pos < self.input.len() && self.input[pos] == '-' {
count += 1;
pos += 1;
}
if count >= 3 {
self.position = pos;
return Ok(true);
}
}
Ok(false)
}
fn check_ordered_list_marker(&mut self) -> Option<usize> {
let start_pos = self.position;
let mut pos = start_pos;
let mut number_str = String::new();
while pos < self.input.len() && self.input[pos].is_ascii_digit() {
number_str.push(self.input[pos]);
pos += 1;
}
if pos < self.input.len() && self.input[pos] == '.' {
if let Ok(number) = number_str.parse::<usize>() {
return Some(number);
}
}
None
}
fn parse_list_item(
&mut self,
ordered: bool,
indent_level: usize,
parent_ctx: ParseContext,
) -> Result<Token, LexerError> {
let mut number = None;
if !ordered {
self.advance();
} else {
number = self.check_ordered_list_marker();
while self.position < self.input.len()
&& (self.current_char().is_ascii_digit() || self.current_char() == '.')
{
self.advance();
}
}
self.skip_whitespace();
let mut content = Vec::new();
while self.position < self.input.len() && self.current_char() != '\n' {
if let Some(token) = self.next_token(ParseContext::ListItem)? {
content.push(token);
}
}
if self.position < self.input.len() && self.current_char() == '\n' {
self.advance();
}
while self.position < self.input.len() {
let current_indent = self.get_current_indent();
if current_indent <= indent_level {
break;
}
self.position += current_indent;
match self.current_char() {
'-' | '+' => {
if !self.check_horizontal_rule()? {
content.push(self.parse_list_item(false, current_indent, parent_ctx)?);
}
}
'*' => {
if self.is_list_marker('*') {
content.push(self.parse_list_item(false, current_indent, parent_ctx)?);
} else {
break;
}
}
'0'..='9' => {
if self.check_ordered_list_marker().is_some() {
content.push(self.parse_list_item(true, current_indent, parent_ctx)?);
}
}
_ => break,
}
}
Ok(Token::ListItem {
content,
ordered,
number,
})
}
fn is_table_start(&self) -> bool {
let rest: String = self.input[self.position..].iter().collect();
if let Some(pos) = rest.find('\n') {
let next_line = rest[pos + 1..].lines().next().unwrap_or("");
next_line.contains('-')
} else {
false
}
}
fn parse_table(&mut self) -> Result<Token, LexerError> {
let header_line = self.read_until_newline();
let header_cells: Vec<String> = header_line
.trim_matches('|')
.split('|')
.map(|s| s.trim().to_string())
.collect();
if self.current_char() == '\n' {
self.advance();
}
let align_line = self.read_until_newline();
let aligns: Vec<Alignment> = align_line
.trim_matches('|')
.split('|')
.map(|s| {
let s = s.trim();
match (s.starts_with(':'), s.ends_with(':')) {
(true, true) => Alignment::Center,
(true, false) => Alignment::Left,
(false, true) => Alignment::Right,
_ => Alignment::Left,
}
})
.collect();
if self.current_char() == '\n' {
self.advance();
}
let mut headers = Vec::new();
for cell in header_cells {
let mut cell_lexer = Lexer::new(cell);
let parsed = cell_lexer.parse_with_context(ParseContext::TableCell)?;
headers.push(parsed);
}
let mut rows = Vec::new();
while self.position < self.input.len() {
let line = self.read_until_newline();
if line.trim().is_empty() {
break;
}
let cell_texts: Vec<String> = line
.trim_matches('|')
.split('|')
.map(|s| s.trim().to_string())
.collect();
let mut row_tokens = Vec::new();
for cell in cell_texts {
let mut cell_lexer = Lexer::new(cell);
let parsed = cell_lexer.parse_with_context(ParseContext::TableCell)?;
row_tokens.push(parsed);
}
rows.push(row_tokens);
if self.current_char() == '\n' {
self.advance();
}
}
Ok(Token::Table {
headers,
aligns,
rows,
})
}
fn get_current_indent(&self) -> usize {
let mut count = 0;
let mut pos = self.position;
while pos < self.input.len() {
match self.input[pos] {
' ' => count += 1,
'\t' => count += 4, _ => break,
}
pos += 1;
}
count
}
fn is_list_marker(&self, marker: char) -> bool {
if self.current_char() != marker {
return false;
}
if self.position + 1 < self.input.len() {
let next_char = self.input[self.position + 1];
next_char == ' ' || next_char == '\t'
} else {
false
}
}
}
#[cfg(test)]
mod tests {
use super::*;
fn parse(input: &str) -> Vec<Token> {
let mut lexer = Lexer::new(input.to_string());
lexer.parse().unwrap()
}
#[test]
fn test_basic_text() {
let tokens = parse("Hello world");
assert_eq!(tokens, vec![Token::Text("Hello world".to_string())]);
}
#[test]
fn test_headings() {
let tests = vec![
(
"# H1",
vec![Token::Heading(vec![Token::Text("H1".to_string())], 1)],
),
(
"## H2",
vec![Token::Heading(vec![Token::Text("H2".to_string())], 2)],
),
(
"### H3",
vec![Token::Heading(vec![Token::Text("H3".to_string())], 3)],
),
];
for (input, expected) in tests {
assert_eq!(parse(input), expected);
}
}
#[test]
fn test_emphasis() {
let tests = vec![
(
"*italic*",
vec![Token::Emphasis {
level: 1,
content: vec![
Token::Text("italic".to_string()),
Token::Text(" ".to_string()),
],
}],
),
(
"**bold**",
vec![Token::Emphasis {
level: 2,
content: vec![
Token::Text("bold".to_string()),
Token::Text(" ".to_string()),
],
}],
),
(
"_also italic_",
vec![Token::Emphasis {
level: 1,
content: vec![
Token::Text("also italic".to_string()),
Token::Text(" ".to_string()),
],
}],
),
];
for (input, expected) in tests {
assert_eq!(parse(input), expected);
}
}
#[test]
fn test_code_blocks() {
let tests = vec![
(
"`inline code`",
vec![Token::Code("".to_string(), "inline code".to_string())],
),
(
"```rust\nfn main() {}\n```",
vec![Token::Code("rust".to_string(), "fn main() {}".to_string())],
),
];
for (input, expected) in tests {
assert_eq!(parse(input), expected);
}
}
#[test]
fn test_blockquotes() {
let tokens = parse("> This is a quote");
assert_eq!(
tokens,
vec![Token::BlockQuote("This is a quote".to_string())]
);
}
#[test]
fn test_lists() {
let tests = vec![
(
"- Item 1\n- Item 2",
vec![
Token::ListItem {
content: vec![Token::Text("Item 1".to_string())],
ordered: false,
number: None,
},
Token::ListItem {
content: vec![Token::Text("Item 2".to_string())],
ordered: false,
number: None,
},
],
),
(
"1. First\n2. Second",
vec![
Token::ListItem {
content: vec![Token::Text("First".to_string())],
ordered: true,
number: Some(1),
},
Token::ListItem {
content: vec![Token::Text("Second".to_string())],
ordered: true,
number: Some(2),
},
],
),
];
for (input, expected) in tests {
assert_eq!(parse(input), expected);
}
}
#[test]
fn test_nested_lists() {
let input = "- Item 1\n - Nested 1\n - Nested 2\n- Item 2";
let expected = vec![
Token::ListItem {
content: vec![
Token::Text("Item 1".to_string()),
Token::ListItem {
content: vec![Token::Text("Nested 1".to_string())],
ordered: false,
number: None,
},
Token::ListItem {
content: vec![Token::Text("Nested 2".to_string())],
ordered: false,
number: None,
},
],
ordered: false,
number: None,
},
Token::ListItem {
content: vec![Token::Text("Item 2".to_string())],
ordered: false,
number: None,
},
];
assert_eq!(parse(input), expected);
}
#[test]
fn test_links() {
let tests = vec",
vec![Token::Link(
"Link".to_string(),
"https://example.com".to_string(),
)],
),
(
"",
vec![Token::Image("Image".to_string(), "image.jpg".to_string())],
),
];
for (input, expected) in tests {
assert_eq!(parse(input), expected);
}
}
#[test]
fn test_horizontal_rule() {
let tests = vec!["---", "----", "-----"];
for input in tests {
assert_eq!(parse(input), vec![Token::HorizontalRule]);
}
}
#[test]
fn test_complex_document() {
let input = r#"# Main Title
This is a paragraph with *italic* and **bold** text.
## Subsection
- List item 1
- Nested item with `code`
- List item 2
> A blockquote
---
[Link](https://example.com)"#;
let tokens = parse(input);
assert!(tokens.len() > 0);
assert!(matches!(tokens[0], Token::Heading(_, 1)));
}
#[test]
fn test_error_cases() {
let mut lexer = Lexer::new("![Invalid".to_string());
assert!(matches!(lexer.parse(), Err(LexerError::UnknownToken(_))));
}
#[test]
fn test_code_block_edge_cases() {
let tests = vec![
(
"```\nempty language\n```",
vec![Token::Code("".to_string(), "empty language".to_string())],
),
(
"`code with *asterisk*`",
vec![Token::Code(
"".to_string(),
"code with *asterisk*".to_string(),
)],
),
(
"```rust\nfn main() {\n println!(\"Hello\");\n}\n```",
vec![Token::Code(
"rust".to_string(),
"fn main() {\n println!(\"Hello\");\n}".to_string(),
)],
),
];
for (input, expected) in tests {
assert_eq!(parse(input), expected);
}
}
#[test]
fn test_nested_list_combinations() {
let input = r#"1. First level
- Nested unordered
- Another unordered
2. Second level
1. Nested ordered
2. Another ordered
- Mixed with unordered"#;
let tokens = parse(input);
assert_eq!(tokens.len(), 2); assert!(matches!(
tokens[0],
Token::ListItem {
ordered: true,
number: Some(1),
..
}
));
assert!(matches!(
tokens[1],
Token::ListItem {
ordered: true,
number: Some(2),
..
}
));
}
#[test]
fn test_blockquote_variations() {
let tests = vec![
(
"> Simple quote",
vec![Token::BlockQuote("Simple quote".to_string())],
),
(
"> Quote with *emphasis*",
vec![Token::BlockQuote("Quote with *emphasis*".to_string())],
),
(
"> Quote with [link](url)",
vec".to_string())],
),
];
for (input, expected) in tests {
assert_eq!(parse(input), expected);
}
}
#[test]
fn test_link_and_image_edge_cases() {
let tests = vec",
vec![Token::Link(
"Link with spaces".to_string(),
"https://example.com/path with spaces".to_string(),
)],
),
(
"",
vec![Token::Image(
"Image with *emphasis* in alt".to_string(),
"image.jpg".to_string(),
)],
),
(
"[Empty]()",
vec![Token::Link("Empty".to_string(), "".to_string())],
),
];
for (input, expected) in tests {
assert_eq!(parse(input), expected);
}
}
#[test]
fn test_whitespace_handling() {
let tests = vec![(
"*emphasis with space after* ",
vec![Token::Emphasis {
level: 1,
content: vec![
Token::Text("emphasis with space after".to_string()),
Token::Text(" ".to_string()),
],
}],
)];
for (input, expected) in tests {
assert_eq!(parse(input), expected);
}
}
#[test]
fn test_mixed_content() {
let input = r#"# Title with *emphasis*
A paragraph with `code` and [link](url).
- List with **bold**
1. Nested with *italic*
2. And `code`
> Quote with [link](url)"#;
let tokens = parse(input);
assert!(tokens.len() > 0);
if let Token::Heading(content, 1) = &tokens[0] {
assert!(content
.iter()
.any(|token| matches!(token, Token::Emphasis { .. })));
} else {
panic!("Expected heading with emphasis");
}
}
#[test]
fn test_html_comment_variations() {
let tests = vec![
(
"<!-- Simple -->",
vec![Token::HtmlComment(" Simple ".to_string())],
),
(
"<!--Multi\nline\ncomment-->",
vec![Token::HtmlComment("Multi\nline\ncomment".to_string())],
),
];
for (input, expected) in tests {
assert_eq!(parse(input), expected);
}
}
#[test]
fn test_standalone_exclamation() {
let tokens = parse("Hello! World");
assert_eq!(tokens, vec![Token::Text("Hello! World".to_string())]);
let tokens = parse("This is exciting!");
assert_eq!(tokens, vec![Token::Text("This is exciting!".to_string())]);
let tokens = parse("Multiple marks!!");
assert_eq!(tokens, vec![Token::Text("Multiple marks!!".to_string())]);
let tokens = parse("");
assert_eq!(
tokens,
vec![Token::Image(
"Alt text".to_string(),
"image.png".to_string()
)]
);
}
#[test]
fn test_tables() {
let input = r#"| Name | Age | City |
|:-----|:---:|----:|
| Alice | 30 | Paris |
| Bob | 25 | Lyon |"#;
let tokens = parse(input);
assert_eq!(
tokens,
vec![Token::Table {
headers: vec![
vec![Token::Text("Name".to_string())],
vec![Token::Text("Age".to_string())],
vec![Token::Text("City".to_string())],
],
aligns: vec![Alignment::Left, Alignment::Center, Alignment::Right],
rows: vec![
vec![
vec![Token::Text("Alice".to_string())],
vec![Token::Text("30".to_string())],
vec![Token::Text("Paris".to_string())],
],
vec![
vec![Token::Text("Bob".to_string())],
vec![Token::Text("25".to_string())],
vec![Token::Text("Lyon".to_string())],
],
],
}]
);
}
}