sql-cli 1.73.1

SQL query tool for CSV/JSON with both interactive TUI and non-interactive CLI modes - perfect for exploration and automation
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
use crate::recursive_parser::{Lexer, Token};

/// Manages text navigation and token-based movement
/// Extracted from the monolithic `enhanced_tui.rs`
pub struct TextNavigator;

impl TextNavigator {
    /// Get the cursor's position in terms of tokens (`current_token`, `total_tokens`)
    #[must_use]
    pub fn get_cursor_token_position(query: &str, cursor_pos: usize) -> (usize, usize) {
        if query.is_empty() {
            return (0, 0);
        }

        // Use lexer to tokenize the query
        let mut lexer = Lexer::new(query);
        let tokens = lexer.tokenize_all_with_positions();

        if tokens.is_empty() {
            return (0, 0);
        }

        // Special case: cursor at position 0 is always before the first token
        if cursor_pos == 0 {
            return (0, tokens.len());
        }

        // Find which token the cursor is in
        let mut current_token = 0;
        for (i, (start, end, _)) in tokens.iter().enumerate() {
            if cursor_pos >= *start && cursor_pos <= *end {
                current_token = i + 1;
                break;
            } else if cursor_pos < *start {
                // Cursor is between tokens
                current_token = i;
                break;
            }
        }

        // If cursor is after all tokens
        if current_token == 0 && cursor_pos > 0 {
            current_token = tokens.len();
        }

        (current_token, tokens.len())
    }

    /// Get the token at the cursor position
    #[must_use]
    pub fn get_token_at_cursor(query: &str, cursor_pos: usize) -> Option<String> {
        if query.is_empty() {
            return None;
        }

        // Use lexer to tokenize the query
        let mut lexer = Lexer::new(query);
        let tokens = lexer.tokenize_all_with_positions();

        // Find the token at cursor position
        for (start, end, token) in &tokens {
            if cursor_pos >= *start && cursor_pos <= *end {
                // Format token nicely
                let token_str = Self::format_token(token);
                return Some(token_str.to_string());
            }
        }

        None
    }

    /// Calculate the target position for jumping to the previous token
    #[must_use]
    pub fn calculate_prev_token_position(query: &str, cursor_pos: usize) -> Option<usize> {
        if cursor_pos == 0 {
            return None;
        }

        let mut lexer = Lexer::new(query);
        let tokens = lexer.tokenize_all_with_positions();

        // Find current token position
        let mut in_token = false;
        let mut current_token_start = 0;
        for (start, end, _) in &tokens {
            if cursor_pos > *start && cursor_pos <= *end {
                in_token = true;
                current_token_start = *start;
                break;
            }
        }

        // Find the previous token start
        let target_pos = if in_token && cursor_pos > current_token_start {
            // If we're in the middle of a token, go to its start
            current_token_start
        } else {
            // Otherwise, find the previous token
            let mut prev_start = 0;
            for (start, _, _) in tokens.iter().rev() {
                if *start < cursor_pos {
                    prev_start = *start;
                    break;
                }
            }
            prev_start
        };

        if target_pos < cursor_pos {
            Some(target_pos)
        } else {
            None
        }
    }

    /// Calculate the target position for jumping to the next token
    #[must_use]
    pub fn calculate_next_token_position(query: &str, cursor_pos: usize) -> Option<usize> {
        let query_len = query.len();
        if cursor_pos >= query_len {
            return None;
        }

        let mut lexer = Lexer::new(query);
        let tokens = lexer.tokenize_all_with_positions();

        // Find current token position
        let mut in_token = false;
        let mut current_token_end = query_len;
        for (start, end, _) in &tokens {
            if cursor_pos >= *start && cursor_pos < *end {
                in_token = true;
                current_token_end = *end;
                break;
            }
        }

        // Find the next token start
        let target_pos = if in_token && cursor_pos < current_token_end {
            // If we're in a token, go to the start of the next token
            let mut next_start = query_len;
            for (start, _, _) in &tokens {
                if *start > current_token_end {
                    next_start = *start;
                    break;
                }
            }
            next_start
        } else {
            // Otherwise, find the next token from current position
            let mut next_start = query_len;
            for (start, _, _) in &tokens {
                if *start > cursor_pos {
                    next_start = *start;
                    break;
                }
            }
            next_start
        };

        if target_pos > cursor_pos && target_pos <= query_len {
            Some(target_pos)
        } else {
            None
        }
    }

    /// Format a token for display
    fn format_token(token: &Token) -> &str {
        match token {
            Token::Select => "SELECT",
            Token::From => "FROM",
            Token::Where => "WHERE",
            Token::With => "WITH",
            Token::GroupBy => "GROUP BY",
            Token::OrderBy => "ORDER BY",
            Token::Having => "HAVING",
            Token::As => "AS",
            Token::Asc => "ASC",
            Token::Desc => "DESC",
            Token::And => "AND",
            Token::Or => "OR",
            Token::In => "IN",
            Token::DateTime => "DateTime",
            Token::Case => "CASE",
            Token::When => "WHEN",
            Token::Then => "THEN",
            Token::Else => "ELSE",
            Token::End => "END",
            Token::Distinct => "DISTINCT",
            Token::Exclude => "EXCLUDE",
            Token::Pivot => "PIVOT",
            Token::Unpivot => "UNPIVOT",
            Token::For => "FOR",
            Token::Over => "OVER",
            Token::Partition => "PARTITION",
            Token::By => "BY",
            // Window frame keywords
            Token::Rows => "ROWS",
            Token::Range => "RANGE",
            Token::Unbounded => "UNBOUNDED",
            Token::Preceding => "PRECEDING",
            Token::Following => "FOLLOWING",
            Token::Current => "CURRENT",
            Token::Row => "ROW",
            // Set operation keywords
            Token::Union => "UNION",
            Token::Intersect => "INTERSECT",
            Token::Except => "EXCEPT",
            // Special CTE keywords
            Token::Web => "WEB",
            Token::File => "FILE",
            // Row expansion functions
            Token::Unnest => "UNNEST",
            Token::Identifier(s) => s,
            Token::QuotedIdentifier(s) => s,
            Token::StringLiteral(s) => s,
            Token::JsonBlock(s) => s,
            Token::NumberLiteral(s) => s,
            Token::Star => "*",
            Token::Comma => ",",
            Token::Colon => ":",
            Token::Dot => ".",
            Token::LeftParen => "(",
            Token::RightParen => ")",
            Token::Equal => "=",
            Token::NotEqual => "!=",
            Token::LessThan => "<",
            Token::LessThanOrEqual => "<=",
            Token::GreaterThan => ">",
            Token::GreaterThanOrEqual => ">=",
            Token::Like => "LIKE",
            Token::ILike => "ILIKE",
            Token::Not => "NOT",
            Token::Is => "IS",
            Token::Null => "NULL",
            Token::Between => "BETWEEN",
            Token::Limit => "LIMIT",
            Token::Offset => "OFFSET",
            Token::Into => "INTO",
            Token::Plus => "+",
            Token::Minus => "-",
            Token::Divide => "/",
            Token::Modulo => "%",
            Token::Concat => "||",
            Token::Join => "JOIN",
            Token::Inner => "INNER",
            Token::Left => "LEFT",
            Token::Right => "RIGHT",
            Token::Full => "FULL",
            Token::Cross => "CROSS",
            Token::Outer => "OUTER",
            Token::On => "ON",
            Token::LineComment(text) => text,
            Token::BlockComment(text) => text,
            Token::Eof => "EOF",
            Token::Qualify => "QUALIFY",
        }
    }
}

/// Text editing utilities
pub struct TextEditor;

impl TextEditor {
    /// Kill text from beginning of line to cursor position
    /// Returns (`killed_text`, `remaining_text`)
    #[must_use]
    pub fn kill_line_backward(text: &str, cursor_pos: usize) -> Option<(String, String)> {
        if cursor_pos == 0 {
            return None;
        }

        let killed_text = text.chars().take(cursor_pos).collect::<String>();
        let remaining_text = text.chars().skip(cursor_pos).collect::<String>();

        Some((killed_text, remaining_text))
    }

    /// Kill text from cursor position to end of line
    /// Returns (`killed_text`, `remaining_text`)
    #[must_use]
    pub fn kill_line_forward(text: &str, cursor_pos: usize) -> Option<(String, String)> {
        if cursor_pos >= text.len() {
            return None;
        }

        let remaining_text = text.chars().take(cursor_pos).collect::<String>();
        let killed_text = text.chars().skip(cursor_pos).collect::<String>();

        Some((killed_text, remaining_text))
    }

    /// Delete word backward from cursor position
    /// Returns (`deleted_text`, `remaining_text`, `new_cursor_pos`)
    #[must_use]
    pub fn delete_word_backward(text: &str, cursor_pos: usize) -> Option<(String, String, usize)> {
        if cursor_pos == 0 {
            return None;
        }

        let before_cursor = &text[..cursor_pos];
        let after_cursor = &text[cursor_pos..];

        // Find word boundary, including leading whitespace before the word
        let mut word_start = before_cursor.len();
        let mut chars = before_cursor.chars().rev().peekable();

        // Step 1: Skip trailing whitespace (if any)
        while let Some(&ch) = chars.peek() {
            if ch.is_whitespace() {
                word_start -= ch.len_utf8();
                chars.next();
            } else {
                break;
            }
        }

        // Step 2: Skip the word itself
        while let Some(&ch) = chars.peek() {
            if !ch.is_alphanumeric() && ch != '_' {
                break;
            }
            word_start -= ch.len_utf8();
            chars.next();
        }

        // Step 3: Include any whitespace before the word (so deleting at a word boundary includes the space)
        while let Some(&ch) = chars.peek() {
            if ch.is_whitespace() {
                word_start -= ch.len_utf8();
                chars.next();
            } else {
                break;
            }
        }

        let deleted_text = text[word_start..cursor_pos].to_string();
        let remaining_text = format!("{}{}", &text[..word_start], after_cursor);

        Some((deleted_text, remaining_text, word_start))
    }

    /// Delete word forward from cursor position
    /// Returns (`deleted_text`, `remaining_text`)
    #[must_use]
    pub fn delete_word_forward(text: &str, cursor_pos: usize) -> Option<(String, String)> {
        if cursor_pos >= text.len() {
            return None;
        }

        let before_cursor = &text[..cursor_pos];
        let after_cursor = &text[cursor_pos..];

        // Find word boundary
        let mut chars = after_cursor.chars();
        let mut word_end = 0;

        // Skip any non-alphanumeric chars at the beginning
        while let Some(ch) = chars.next() {
            word_end += ch.len_utf8();
            if ch.is_alphanumeric() || ch == '_' {
                // Found start of word, now skip the rest of it
                for ch in chars.by_ref() {
                    if !ch.is_alphanumeric() && ch != '_' {
                        break;
                    }
                    word_end += ch.len_utf8();
                }
                break;
            }
        }

        let deleted_text = text[cursor_pos..cursor_pos + word_end].to_string();
        let remaining_text = format!("{}{}", before_cursor, &after_cursor[word_end..]);

        Some((deleted_text, remaining_text))
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_cursor_token_position() {
        let query = "SELECT * FROM users WHERE id = 1";

        // Cursor at beginning
        assert_eq!(TextNavigator::get_cursor_token_position(query, 0), (0, 8));

        // Cursor in SELECT
        assert_eq!(TextNavigator::get_cursor_token_position(query, 3), (1, 8));

        // Cursor after SELECT
        assert_eq!(TextNavigator::get_cursor_token_position(query, 7), (2, 8));
    }

    #[test]
    fn test_kill_line_backward() {
        let text = "SELECT * FROM users";

        // Kill from middle
        let result = TextEditor::kill_line_backward(text, 8);
        assert_eq!(
            result,
            Some(("SELECT *".to_string(), " FROM users".to_string()))
        );

        // Kill from beginning (no-op)
        let result = TextEditor::kill_line_backward(text, 0);
        assert_eq!(result, None);
    }

    #[test]
    fn test_delete_word_backward() {
        let text = "SELECT * FROM users";

        // Delete "FROM"
        let result = TextEditor::delete_word_backward(text, 13);
        assert_eq!(
            result,
            Some((" FROM".to_string(), "SELECT * users".to_string(), 8))
        );
    }
}