use crate::recursive_parser::{Lexer, Token};
pub struct TextNavigator;
impl TextNavigator {
#[must_use]
pub fn get_cursor_token_position(query: &str, cursor_pos: usize) -> (usize, usize) {
if query.is_empty() {
return (0, 0);
}
let mut lexer = Lexer::new(query);
let tokens = lexer.tokenize_all_with_positions();
if tokens.is_empty() {
return (0, 0);
}
if cursor_pos == 0 {
return (0, tokens.len());
}
let mut current_token = 0;
for (i, (start, end, _)) in tokens.iter().enumerate() {
if cursor_pos >= *start && cursor_pos <= *end {
current_token = i + 1;
break;
} else if cursor_pos < *start {
current_token = i;
break;
}
}
if current_token == 0 && cursor_pos > 0 {
current_token = tokens.len();
}
(current_token, tokens.len())
}
#[must_use]
pub fn get_token_at_cursor(query: &str, cursor_pos: usize) -> Option<String> {
if query.is_empty() {
return None;
}
let mut lexer = Lexer::new(query);
let tokens = lexer.tokenize_all_with_positions();
for (start, end, token) in &tokens {
if cursor_pos >= *start && cursor_pos <= *end {
let token_str = Self::format_token(token);
return Some(token_str.to_string());
}
}
None
}
#[must_use]
pub fn calculate_prev_token_position(query: &str, cursor_pos: usize) -> Option<usize> {
if cursor_pos == 0 {
return None;
}
let mut lexer = Lexer::new(query);
let tokens = lexer.tokenize_all_with_positions();
let mut in_token = false;
let mut current_token_start = 0;
for (start, end, _) in &tokens {
if cursor_pos > *start && cursor_pos <= *end {
in_token = true;
current_token_start = *start;
break;
}
}
let target_pos = if in_token && cursor_pos > current_token_start {
current_token_start
} else {
let mut prev_start = 0;
for (start, _, _) in tokens.iter().rev() {
if *start < cursor_pos {
prev_start = *start;
break;
}
}
prev_start
};
if target_pos < cursor_pos {
Some(target_pos)
} else {
None
}
}
#[must_use]
pub fn calculate_next_token_position(query: &str, cursor_pos: usize) -> Option<usize> {
let query_len = query.len();
if cursor_pos >= query_len {
return None;
}
let mut lexer = Lexer::new(query);
let tokens = lexer.tokenize_all_with_positions();
let mut in_token = false;
let mut current_token_end = query_len;
for (start, end, _) in &tokens {
if cursor_pos >= *start && cursor_pos < *end {
in_token = true;
current_token_end = *end;
break;
}
}
let target_pos = if in_token && cursor_pos < current_token_end {
let mut next_start = query_len;
for (start, _, _) in &tokens {
if *start > current_token_end {
next_start = *start;
break;
}
}
next_start
} else {
let mut next_start = query_len;
for (start, _, _) in &tokens {
if *start > cursor_pos {
next_start = *start;
break;
}
}
next_start
};
if target_pos > cursor_pos && target_pos <= query_len {
Some(target_pos)
} else {
None
}
}
fn format_token(token: &Token) -> &str {
match token {
Token::Select => "SELECT",
Token::From => "FROM",
Token::Where => "WHERE",
Token::With => "WITH",
Token::GroupBy => "GROUP BY",
Token::OrderBy => "ORDER BY",
Token::Having => "HAVING",
Token::As => "AS",
Token::Asc => "ASC",
Token::Desc => "DESC",
Token::And => "AND",
Token::Or => "OR",
Token::In => "IN",
Token::DateTime => "DateTime",
Token::Case => "CASE",
Token::When => "WHEN",
Token::Then => "THEN",
Token::Else => "ELSE",
Token::End => "END",
Token::Distinct => "DISTINCT",
Token::Exclude => "EXCLUDE",
Token::Pivot => "PIVOT",
Token::Unpivot => "UNPIVOT",
Token::For => "FOR",
Token::Over => "OVER",
Token::Partition => "PARTITION",
Token::By => "BY",
Token::Rows => "ROWS",
Token::Range => "RANGE",
Token::Unbounded => "UNBOUNDED",
Token::Preceding => "PRECEDING",
Token::Following => "FOLLOWING",
Token::Current => "CURRENT",
Token::Row => "ROW",
Token::Union => "UNION",
Token::Intersect => "INTERSECT",
Token::Except => "EXCEPT",
Token::Web => "WEB",
Token::File => "FILE",
Token::Unnest => "UNNEST",
Token::Identifier(s) => s,
Token::QuotedIdentifier(s) => s,
Token::StringLiteral(s) => s,
Token::JsonBlock(s) => s,
Token::NumberLiteral(s) => s,
Token::Star => "*",
Token::Comma => ",",
Token::Colon => ":",
Token::Dot => ".",
Token::LeftParen => "(",
Token::RightParen => ")",
Token::Equal => "=",
Token::NotEqual => "!=",
Token::LessThan => "<",
Token::LessThanOrEqual => "<=",
Token::GreaterThan => ">",
Token::GreaterThanOrEqual => ">=",
Token::Like => "LIKE",
Token::ILike => "ILIKE",
Token::Not => "NOT",
Token::Is => "IS",
Token::Null => "NULL",
Token::Between => "BETWEEN",
Token::Limit => "LIMIT",
Token::Offset => "OFFSET",
Token::Into => "INTO",
Token::Plus => "+",
Token::Minus => "-",
Token::Divide => "/",
Token::Modulo => "%",
Token::Concat => "||",
Token::Join => "JOIN",
Token::Inner => "INNER",
Token::Left => "LEFT",
Token::Right => "RIGHT",
Token::Full => "FULL",
Token::Cross => "CROSS",
Token::Outer => "OUTER",
Token::On => "ON",
Token::LineComment(text) => text,
Token::BlockComment(text) => text,
Token::Eof => "EOF",
Token::Qualify => "QUALIFY",
}
}
}
pub struct TextEditor;
impl TextEditor {
#[must_use]
pub fn kill_line_backward(text: &str, cursor_pos: usize) -> Option<(String, String)> {
if cursor_pos == 0 {
return None;
}
let killed_text = text.chars().take(cursor_pos).collect::<String>();
let remaining_text = text.chars().skip(cursor_pos).collect::<String>();
Some((killed_text, remaining_text))
}
#[must_use]
pub fn kill_line_forward(text: &str, cursor_pos: usize) -> Option<(String, String)> {
if cursor_pos >= text.len() {
return None;
}
let remaining_text = text.chars().take(cursor_pos).collect::<String>();
let killed_text = text.chars().skip(cursor_pos).collect::<String>();
Some((killed_text, remaining_text))
}
#[must_use]
pub fn delete_word_backward(text: &str, cursor_pos: usize) -> Option<(String, String, usize)> {
if cursor_pos == 0 {
return None;
}
let before_cursor = &text[..cursor_pos];
let after_cursor = &text[cursor_pos..];
let mut word_start = before_cursor.len();
let mut chars = before_cursor.chars().rev().peekable();
while let Some(&ch) = chars.peek() {
if ch.is_whitespace() {
word_start -= ch.len_utf8();
chars.next();
} else {
break;
}
}
while let Some(&ch) = chars.peek() {
if !ch.is_alphanumeric() && ch != '_' {
break;
}
word_start -= ch.len_utf8();
chars.next();
}
while let Some(&ch) = chars.peek() {
if ch.is_whitespace() {
word_start -= ch.len_utf8();
chars.next();
} else {
break;
}
}
let deleted_text = text[word_start..cursor_pos].to_string();
let remaining_text = format!("{}{}", &text[..word_start], after_cursor);
Some((deleted_text, remaining_text, word_start))
}
#[must_use]
pub fn delete_word_forward(text: &str, cursor_pos: usize) -> Option<(String, String)> {
if cursor_pos >= text.len() {
return None;
}
let before_cursor = &text[..cursor_pos];
let after_cursor = &text[cursor_pos..];
let mut chars = after_cursor.chars();
let mut word_end = 0;
while let Some(ch) = chars.next() {
word_end += ch.len_utf8();
if ch.is_alphanumeric() || ch == '_' {
for ch in chars.by_ref() {
if !ch.is_alphanumeric() && ch != '_' {
break;
}
word_end += ch.len_utf8();
}
break;
}
}
let deleted_text = text[cursor_pos..cursor_pos + word_end].to_string();
let remaining_text = format!("{}{}", before_cursor, &after_cursor[word_end..]);
Some((deleted_text, remaining_text))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_cursor_token_position() {
let query = "SELECT * FROM users WHERE id = 1";
assert_eq!(TextNavigator::get_cursor_token_position(query, 0), (0, 8));
assert_eq!(TextNavigator::get_cursor_token_position(query, 3), (1, 8));
assert_eq!(TextNavigator::get_cursor_token_position(query, 7), (2, 8));
}
#[test]
fn test_kill_line_backward() {
let text = "SELECT * FROM users";
let result = TextEditor::kill_line_backward(text, 8);
assert_eq!(
result,
Some(("SELECT *".to_string(), " FROM users".to_string()))
);
let result = TextEditor::kill_line_backward(text, 0);
assert_eq!(result, None);
}
#[test]
fn test_delete_word_backward() {
let text = "SELECT * FROM users";
let result = TextEditor::delete_word_backward(text, 13);
assert_eq!(
result,
Some((" FROM".to_string(), "SELECT * users".to_string(), 8))
);
}
}