sql_cli/
text_navigation.rs

1use crate::recursive_parser::{Lexer, Token};
2
3/// Manages text navigation and token-based movement
4/// Extracted from the monolithic `enhanced_tui.rs`
5pub struct TextNavigator;
6
7impl TextNavigator {
8    /// Get the cursor's position in terms of tokens (`current_token`, `total_tokens`)
9    #[must_use]
10    pub fn get_cursor_token_position(query: &str, cursor_pos: usize) -> (usize, usize) {
11        if query.is_empty() {
12            return (0, 0);
13        }
14
15        // Use lexer to tokenize the query
16        let mut lexer = Lexer::new(query);
17        let tokens = lexer.tokenize_all_with_positions();
18
19        if tokens.is_empty() {
20            return (0, 0);
21        }
22
23        // Special case: cursor at position 0 is always before the first token
24        if cursor_pos == 0 {
25            return (0, tokens.len());
26        }
27
28        // Find which token the cursor is in
29        let mut current_token = 0;
30        for (i, (start, end, _)) in tokens.iter().enumerate() {
31            if cursor_pos >= *start && cursor_pos <= *end {
32                current_token = i + 1;
33                break;
34            } else if cursor_pos < *start {
35                // Cursor is between tokens
36                current_token = i;
37                break;
38            }
39        }
40
41        // If cursor is after all tokens
42        if current_token == 0 && cursor_pos > 0 {
43            current_token = tokens.len();
44        }
45
46        (current_token, tokens.len())
47    }
48
49    /// Get the token at the cursor position
50    #[must_use]
51    pub fn get_token_at_cursor(query: &str, cursor_pos: usize) -> Option<String> {
52        if query.is_empty() {
53            return None;
54        }
55
56        // Use lexer to tokenize the query
57        let mut lexer = Lexer::new(query);
58        let tokens = lexer.tokenize_all_with_positions();
59
60        // Find the token at cursor position
61        for (start, end, token) in &tokens {
62            if cursor_pos >= *start && cursor_pos <= *end {
63                // Format token nicely
64                let token_str = Self::format_token(token);
65                return Some(token_str.to_string());
66            }
67        }
68
69        None
70    }
71
72    /// Calculate the target position for jumping to the previous token
73    #[must_use]
74    pub fn calculate_prev_token_position(query: &str, cursor_pos: usize) -> Option<usize> {
75        if cursor_pos == 0 {
76            return None;
77        }
78
79        let mut lexer = Lexer::new(query);
80        let tokens = lexer.tokenize_all_with_positions();
81
82        // Find current token position
83        let mut in_token = false;
84        let mut current_token_start = 0;
85        for (start, end, _) in &tokens {
86            if cursor_pos > *start && cursor_pos <= *end {
87                in_token = true;
88                current_token_start = *start;
89                break;
90            }
91        }
92
93        // Find the previous token start
94        let target_pos = if in_token && cursor_pos > current_token_start {
95            // If we're in the middle of a token, go to its start
96            current_token_start
97        } else {
98            // Otherwise, find the previous token
99            let mut prev_start = 0;
100            for (start, _, _) in tokens.iter().rev() {
101                if *start < cursor_pos {
102                    prev_start = *start;
103                    break;
104                }
105            }
106            prev_start
107        };
108
109        if target_pos < cursor_pos {
110            Some(target_pos)
111        } else {
112            None
113        }
114    }
115
116    /// Calculate the target position for jumping to the next token
117    #[must_use]
118    pub fn calculate_next_token_position(query: &str, cursor_pos: usize) -> Option<usize> {
119        let query_len = query.len();
120        if cursor_pos >= query_len {
121            return None;
122        }
123
124        let mut lexer = Lexer::new(query);
125        let tokens = lexer.tokenize_all_with_positions();
126
127        // Find current token position
128        let mut in_token = false;
129        let mut current_token_end = query_len;
130        for (start, end, _) in &tokens {
131            if cursor_pos >= *start && cursor_pos < *end {
132                in_token = true;
133                current_token_end = *end;
134                break;
135            }
136        }
137
138        // Find the next token start
139        let target_pos = if in_token && cursor_pos < current_token_end {
140            // If we're in a token, go to the start of the next token
141            let mut next_start = query_len;
142            for (start, _, _) in &tokens {
143                if *start > current_token_end {
144                    next_start = *start;
145                    break;
146                }
147            }
148            next_start
149        } else {
150            // Otherwise, find the next token from current position
151            let mut next_start = query_len;
152            for (start, _, _) in &tokens {
153                if *start > cursor_pos {
154                    next_start = *start;
155                    break;
156                }
157            }
158            next_start
159        };
160
161        if target_pos > cursor_pos && target_pos <= query_len {
162            Some(target_pos)
163        } else {
164            None
165        }
166    }
167
168    /// Format a token for display
169    fn format_token(token: &Token) -> &str {
170        match token {
171            Token::Select => "SELECT",
172            Token::From => "FROM",
173            Token::Where => "WHERE",
174            Token::With => "WITH",
175            Token::GroupBy => "GROUP BY",
176            Token::OrderBy => "ORDER BY",
177            Token::Having => "HAVING",
178            Token::As => "AS",
179            Token::Asc => "ASC",
180            Token::Desc => "DESC",
181            Token::And => "AND",
182            Token::Or => "OR",
183            Token::In => "IN",
184            Token::DateTime => "DateTime",
185            Token::Case => "CASE",
186            Token::When => "WHEN",
187            Token::Then => "THEN",
188            Token::Else => "ELSE",
189            Token::End => "END",
190            Token::Distinct => "DISTINCT",
191            Token::Over => "OVER",
192            Token::Partition => "PARTITION",
193            Token::By => "BY",
194            Token::Identifier(s) => s,
195            Token::QuotedIdentifier(s) => s,
196            Token::StringLiteral(s) => s,
197            Token::NumberLiteral(s) => s,
198            Token::Star => "*",
199            Token::Comma => ",",
200            Token::Colon => ":",
201            Token::Dot => ".",
202            Token::LeftParen => "(",
203            Token::RightParen => ")",
204            Token::Equal => "=",
205            Token::NotEqual => "!=",
206            Token::LessThan => "<",
207            Token::LessThanOrEqual => "<=",
208            Token::GreaterThan => ">",
209            Token::GreaterThanOrEqual => ">=",
210            Token::Like => "LIKE",
211            Token::Not => "NOT",
212            Token::Is => "IS",
213            Token::Null => "NULL",
214            Token::Between => "BETWEEN",
215            Token::Limit => "LIMIT",
216            Token::Offset => "OFFSET",
217            Token::Plus => "+",
218            Token::Minus => "-",
219            Token::Divide => "/",
220            Token::Modulo => "%",
221            Token::Concat => "||",
222            Token::Join => "JOIN",
223            Token::Inner => "INNER",
224            Token::Left => "LEFT",
225            Token::Right => "RIGHT",
226            Token::Full => "FULL",
227            Token::Cross => "CROSS",
228            Token::Outer => "OUTER",
229            Token::On => "ON",
230            Token::Eof => "EOF",
231        }
232    }
233}
234
235/// Text editing utilities
236pub struct TextEditor;
237
238impl TextEditor {
239    /// Kill text from beginning of line to cursor position
240    /// Returns (`killed_text`, `remaining_text`)
241    #[must_use]
242    pub fn kill_line_backward(text: &str, cursor_pos: usize) -> Option<(String, String)> {
243        if cursor_pos == 0 {
244            return None;
245        }
246
247        let killed_text = text.chars().take(cursor_pos).collect::<String>();
248        let remaining_text = text.chars().skip(cursor_pos).collect::<String>();
249
250        Some((killed_text, remaining_text))
251    }
252
253    /// Kill text from cursor position to end of line
254    /// Returns (`killed_text`, `remaining_text`)
255    #[must_use]
256    pub fn kill_line_forward(text: &str, cursor_pos: usize) -> Option<(String, String)> {
257        if cursor_pos >= text.len() {
258            return None;
259        }
260
261        let remaining_text = text.chars().take(cursor_pos).collect::<String>();
262        let killed_text = text.chars().skip(cursor_pos).collect::<String>();
263
264        Some((killed_text, remaining_text))
265    }
266
267    /// Delete word backward from cursor position
268    /// Returns (`deleted_text`, `remaining_text`, `new_cursor_pos`)
269    #[must_use]
270    pub fn delete_word_backward(text: &str, cursor_pos: usize) -> Option<(String, String, usize)> {
271        if cursor_pos == 0 {
272            return None;
273        }
274
275        let before_cursor = &text[..cursor_pos];
276        let after_cursor = &text[cursor_pos..];
277
278        // Find word boundary, including leading whitespace before the word
279        let mut word_start = before_cursor.len();
280        let mut chars = before_cursor.chars().rev().peekable();
281
282        // Step 1: Skip trailing whitespace (if any)
283        while let Some(&ch) = chars.peek() {
284            if ch.is_whitespace() {
285                word_start -= ch.len_utf8();
286                chars.next();
287            } else {
288                break;
289            }
290        }
291
292        // Step 2: Skip the word itself
293        while let Some(&ch) = chars.peek() {
294            if !ch.is_alphanumeric() && ch != '_' {
295                break;
296            }
297            word_start -= ch.len_utf8();
298            chars.next();
299        }
300
301        // Step 3: Include any whitespace before the word (so deleting at a word boundary includes the space)
302        while let Some(&ch) = chars.peek() {
303            if ch.is_whitespace() {
304                word_start -= ch.len_utf8();
305                chars.next();
306            } else {
307                break;
308            }
309        }
310
311        let deleted_text = text[word_start..cursor_pos].to_string();
312        let remaining_text = format!("{}{}", &text[..word_start], after_cursor);
313
314        Some((deleted_text, remaining_text, word_start))
315    }
316
317    /// Delete word forward from cursor position
318    /// Returns (`deleted_text`, `remaining_text`)
319    #[must_use]
320    pub fn delete_word_forward(text: &str, cursor_pos: usize) -> Option<(String, String)> {
321        if cursor_pos >= text.len() {
322            return None;
323        }
324
325        let before_cursor = &text[..cursor_pos];
326        let after_cursor = &text[cursor_pos..];
327
328        // Find word boundary
329        let mut chars = after_cursor.chars();
330        let mut word_end = 0;
331
332        // Skip any non-alphanumeric chars at the beginning
333        while let Some(ch) = chars.next() {
334            word_end += ch.len_utf8();
335            if ch.is_alphanumeric() || ch == '_' {
336                // Found start of word, now skip the rest of it
337                for ch in chars.by_ref() {
338                    if !ch.is_alphanumeric() && ch != '_' {
339                        break;
340                    }
341                    word_end += ch.len_utf8();
342                }
343                break;
344            }
345        }
346
347        let deleted_text = text[cursor_pos..cursor_pos + word_end].to_string();
348        let remaining_text = format!("{}{}", before_cursor, &after_cursor[word_end..]);
349
350        Some((deleted_text, remaining_text))
351    }
352}
353
354#[cfg(test)]
355mod tests {
356    use super::*;
357
358    #[test]
359    fn test_cursor_token_position() {
360        let query = "SELECT * FROM users WHERE id = 1";
361
362        // Cursor at beginning
363        assert_eq!(TextNavigator::get_cursor_token_position(query, 0), (0, 8));
364
365        // Cursor in SELECT
366        assert_eq!(TextNavigator::get_cursor_token_position(query, 3), (1, 8));
367
368        // Cursor after SELECT
369        assert_eq!(TextNavigator::get_cursor_token_position(query, 7), (2, 8));
370    }
371
372    #[test]
373    fn test_kill_line_backward() {
374        let text = "SELECT * FROM users";
375
376        // Kill from middle
377        let result = TextEditor::kill_line_backward(text, 8);
378        assert_eq!(
379            result,
380            Some(("SELECT *".to_string(), " FROM users".to_string()))
381        );
382
383        // Kill from beginning (no-op)
384        let result = TextEditor::kill_line_backward(text, 0);
385        assert_eq!(result, None);
386    }
387
388    #[test]
389    fn test_delete_word_backward() {
390        let text = "SELECT * FROM users";
391
392        // Delete "FROM"
393        let result = TextEditor::delete_word_backward(text, 13);
394        assert_eq!(
395            result,
396            Some((" FROM".to_string(), "SELECT * users".to_string(), 8))
397        );
398    }
399}