sql_cli/
text_navigation.rs

1use crate::recursive_parser::{Lexer, Token};
2
3/// Manages text navigation and token-based movement
4/// Extracted from the monolithic `enhanced_tui.rs`
5pub struct TextNavigator;
6
7impl TextNavigator {
8    /// Get the cursor's position in terms of tokens (`current_token`, `total_tokens`)
9    #[must_use]
10    pub fn get_cursor_token_position(query: &str, cursor_pos: usize) -> (usize, usize) {
11        if query.is_empty() {
12            return (0, 0);
13        }
14
15        // Use lexer to tokenize the query
16        let mut lexer = Lexer::new(query);
17        let tokens = lexer.tokenize_all_with_positions();
18
19        if tokens.is_empty() {
20            return (0, 0);
21        }
22
23        // Special case: cursor at position 0 is always before the first token
24        if cursor_pos == 0 {
25            return (0, tokens.len());
26        }
27
28        // Find which token the cursor is in
29        let mut current_token = 0;
30        for (i, (start, end, _)) in tokens.iter().enumerate() {
31            if cursor_pos >= *start && cursor_pos <= *end {
32                current_token = i + 1;
33                break;
34            } else if cursor_pos < *start {
35                // Cursor is between tokens
36                current_token = i;
37                break;
38            }
39        }
40
41        // If cursor is after all tokens
42        if current_token == 0 && cursor_pos > 0 {
43            current_token = tokens.len();
44        }
45
46        (current_token, tokens.len())
47    }
48
49    /// Get the token at the cursor position
50    #[must_use]
51    pub fn get_token_at_cursor(query: &str, cursor_pos: usize) -> Option<String> {
52        if query.is_empty() {
53            return None;
54        }
55
56        // Use lexer to tokenize the query
57        let mut lexer = Lexer::new(query);
58        let tokens = lexer.tokenize_all_with_positions();
59
60        // Find the token at cursor position
61        for (start, end, token) in &tokens {
62            if cursor_pos >= *start && cursor_pos <= *end {
63                // Format token nicely
64                let token_str = Self::format_token(token);
65                return Some(token_str.to_string());
66            }
67        }
68
69        None
70    }
71
72    /// Calculate the target position for jumping to the previous token
73    #[must_use]
74    pub fn calculate_prev_token_position(query: &str, cursor_pos: usize) -> Option<usize> {
75        if cursor_pos == 0 {
76            return None;
77        }
78
79        let mut lexer = Lexer::new(query);
80        let tokens = lexer.tokenize_all_with_positions();
81
82        // Find current token position
83        let mut in_token = false;
84        let mut current_token_start = 0;
85        for (start, end, _) in &tokens {
86            if cursor_pos > *start && cursor_pos <= *end {
87                in_token = true;
88                current_token_start = *start;
89                break;
90            }
91        }
92
93        // Find the previous token start
94        let target_pos = if in_token && cursor_pos > current_token_start {
95            // If we're in the middle of a token, go to its start
96            current_token_start
97        } else {
98            // Otherwise, find the previous token
99            let mut prev_start = 0;
100            for (start, _, _) in tokens.iter().rev() {
101                if *start < cursor_pos {
102                    prev_start = *start;
103                    break;
104                }
105            }
106            prev_start
107        };
108
109        if target_pos < cursor_pos {
110            Some(target_pos)
111        } else {
112            None
113        }
114    }
115
116    /// Calculate the target position for jumping to the next token
117    #[must_use]
118    pub fn calculate_next_token_position(query: &str, cursor_pos: usize) -> Option<usize> {
119        let query_len = query.len();
120        if cursor_pos >= query_len {
121            return None;
122        }
123
124        let mut lexer = Lexer::new(query);
125        let tokens = lexer.tokenize_all_with_positions();
126
127        // Find current token position
128        let mut in_token = false;
129        let mut current_token_end = query_len;
130        for (start, end, _) in &tokens {
131            if cursor_pos >= *start && cursor_pos < *end {
132                in_token = true;
133                current_token_end = *end;
134                break;
135            }
136        }
137
138        // Find the next token start
139        let target_pos = if in_token && cursor_pos < current_token_end {
140            // If we're in a token, go to the start of the next token
141            let mut next_start = query_len;
142            for (start, _, _) in &tokens {
143                if *start > current_token_end {
144                    next_start = *start;
145                    break;
146                }
147            }
148            next_start
149        } else {
150            // Otherwise, find the next token from current position
151            let mut next_start = query_len;
152            for (start, _, _) in &tokens {
153                if *start > cursor_pos {
154                    next_start = *start;
155                    break;
156                }
157            }
158            next_start
159        };
160
161        if target_pos > cursor_pos && target_pos <= query_len {
162            Some(target_pos)
163        } else {
164            None
165        }
166    }
167
168    /// Format a token for display
169    fn format_token(token: &Token) -> &str {
170        match token {
171            Token::Select => "SELECT",
172            Token::From => "FROM",
173            Token::Where => "WHERE",
174            Token::With => "WITH",
175            Token::GroupBy => "GROUP BY",
176            Token::OrderBy => "ORDER BY",
177            Token::Having => "HAVING",
178            Token::As => "AS",
179            Token::Asc => "ASC",
180            Token::Desc => "DESC",
181            Token::And => "AND",
182            Token::Or => "OR",
183            Token::In => "IN",
184            Token::DateTime => "DateTime",
185            Token::Case => "CASE",
186            Token::When => "WHEN",
187            Token::Then => "THEN",
188            Token::Else => "ELSE",
189            Token::End => "END",
190            Token::Distinct => "DISTINCT",
191            Token::Over => "OVER",
192            Token::Partition => "PARTITION",
193            Token::By => "BY",
194            Token::Identifier(s) => s,
195            Token::QuotedIdentifier(s) => s,
196            Token::StringLiteral(s) => s,
197            Token::NumberLiteral(s) => s,
198            Token::Star => "*",
199            Token::Comma => ",",
200            Token::Colon => ":",
201            Token::Dot => ".",
202            Token::LeftParen => "(",
203            Token::RightParen => ")",
204            Token::Equal => "=",
205            Token::NotEqual => "!=",
206            Token::LessThan => "<",
207            Token::LessThanOrEqual => "<=",
208            Token::GreaterThan => ">",
209            Token::GreaterThanOrEqual => ">=",
210            Token::Like => "LIKE",
211            Token::Not => "NOT",
212            Token::Is => "IS",
213            Token::Null => "NULL",
214            Token::Between => "BETWEEN",
215            Token::Limit => "LIMIT",
216            Token::Offset => "OFFSET",
217            Token::Plus => "+",
218            Token::Minus => "-",
219            Token::Divide => "/",
220            Token::Modulo => "%",
221            Token::Join => "JOIN",
222            Token::Inner => "INNER",
223            Token::Left => "LEFT",
224            Token::Right => "RIGHT",
225            Token::Full => "FULL",
226            Token::Cross => "CROSS",
227            Token::Outer => "OUTER",
228            Token::On => "ON",
229            Token::Eof => "EOF",
230        }
231    }
232}
233
234/// Text editing utilities
235pub struct TextEditor;
236
237impl TextEditor {
238    /// Kill text from beginning of line to cursor position
239    /// Returns (`killed_text`, `remaining_text`)
240    #[must_use]
241    pub fn kill_line_backward(text: &str, cursor_pos: usize) -> Option<(String, String)> {
242        if cursor_pos == 0 {
243            return None;
244        }
245
246        let killed_text = text.chars().take(cursor_pos).collect::<String>();
247        let remaining_text = text.chars().skip(cursor_pos).collect::<String>();
248
249        Some((killed_text, remaining_text))
250    }
251
252    /// Kill text from cursor position to end of line
253    /// Returns (`killed_text`, `remaining_text`)
254    #[must_use]
255    pub fn kill_line_forward(text: &str, cursor_pos: usize) -> Option<(String, String)> {
256        if cursor_pos >= text.len() {
257            return None;
258        }
259
260        let remaining_text = text.chars().take(cursor_pos).collect::<String>();
261        let killed_text = text.chars().skip(cursor_pos).collect::<String>();
262
263        Some((killed_text, remaining_text))
264    }
265
266    /// Delete word backward from cursor position
267    /// Returns (`deleted_text`, `remaining_text`, `new_cursor_pos`)
268    #[must_use]
269    pub fn delete_word_backward(text: &str, cursor_pos: usize) -> Option<(String, String, usize)> {
270        if cursor_pos == 0 {
271            return None;
272        }
273
274        let before_cursor = &text[..cursor_pos];
275        let after_cursor = &text[cursor_pos..];
276
277        // Find word boundary, including leading whitespace before the word
278        let mut word_start = before_cursor.len();
279        let mut chars = before_cursor.chars().rev().peekable();
280
281        // Step 1: Skip trailing whitespace (if any)
282        while let Some(&ch) = chars.peek() {
283            if ch.is_whitespace() {
284                word_start -= ch.len_utf8();
285                chars.next();
286            } else {
287                break;
288            }
289        }
290
291        // Step 2: Skip the word itself
292        while let Some(&ch) = chars.peek() {
293            if !ch.is_alphanumeric() && ch != '_' {
294                break;
295            }
296            word_start -= ch.len_utf8();
297            chars.next();
298        }
299
300        // Step 3: Include any whitespace before the word (so deleting at a word boundary includes the space)
301        while let Some(&ch) = chars.peek() {
302            if ch.is_whitespace() {
303                word_start -= ch.len_utf8();
304                chars.next();
305            } else {
306                break;
307            }
308        }
309
310        let deleted_text = text[word_start..cursor_pos].to_string();
311        let remaining_text = format!("{}{}", &text[..word_start], after_cursor);
312
313        Some((deleted_text, remaining_text, word_start))
314    }
315
316    /// Delete word forward from cursor position
317    /// Returns (`deleted_text`, `remaining_text`)
318    #[must_use]
319    pub fn delete_word_forward(text: &str, cursor_pos: usize) -> Option<(String, String)> {
320        if cursor_pos >= text.len() {
321            return None;
322        }
323
324        let before_cursor = &text[..cursor_pos];
325        let after_cursor = &text[cursor_pos..];
326
327        // Find word boundary
328        let mut chars = after_cursor.chars();
329        let mut word_end = 0;
330
331        // Skip any non-alphanumeric chars at the beginning
332        while let Some(ch) = chars.next() {
333            word_end += ch.len_utf8();
334            if ch.is_alphanumeric() || ch == '_' {
335                // Found start of word, now skip the rest of it
336                for ch in chars.by_ref() {
337                    if !ch.is_alphanumeric() && ch != '_' {
338                        break;
339                    }
340                    word_end += ch.len_utf8();
341                }
342                break;
343            }
344        }
345
346        let deleted_text = text[cursor_pos..cursor_pos + word_end].to_string();
347        let remaining_text = format!("{}{}", before_cursor, &after_cursor[word_end..]);
348
349        Some((deleted_text, remaining_text))
350    }
351}
352
353#[cfg(test)]
354mod tests {
355    use super::*;
356
357    #[test]
358    fn test_cursor_token_position() {
359        let query = "SELECT * FROM users WHERE id = 1";
360
361        // Cursor at beginning
362        assert_eq!(TextNavigator::get_cursor_token_position(query, 0), (0, 8));
363
364        // Cursor in SELECT
365        assert_eq!(TextNavigator::get_cursor_token_position(query, 3), (1, 8));
366
367        // Cursor after SELECT
368        assert_eq!(TextNavigator::get_cursor_token_position(query, 7), (2, 8));
369    }
370
371    #[test]
372    fn test_kill_line_backward() {
373        let text = "SELECT * FROM users";
374
375        // Kill from middle
376        let result = TextEditor::kill_line_backward(text, 8);
377        assert_eq!(
378            result,
379            Some(("SELECT *".to_string(), " FROM users".to_string()))
380        );
381
382        // Kill from beginning (no-op)
383        let result = TextEditor::kill_line_backward(text, 0);
384        assert_eq!(result, None);
385    }
386
387    #[test]
388    fn test_delete_word_backward() {
389        let text = "SELECT * FROM users";
390
391        // Delete "FROM"
392        let result = TextEditor::delete_word_backward(text, 13);
393        assert_eq!(
394            result,
395            Some((" FROM".to_string(), "SELECT * users".to_string(), 8))
396        );
397    }
398}