makepad_widget/
jseditor.rs

1use makepad_render::*;
2
3use crate::textbuffer::*;
4use crate::texteditor::*;
5
6#[derive(Clone)]
7pub struct JSEditor {
8    pub text_editor: TextEditor,
9}
10
11impl JSEditor {
12    pub fn proto(cx: &mut Cx) -> Self {
13        Self {
14            text_editor: TextEditor {
15                folding_depth: 3,
16                ..TextEditor::proto(cx)
17            }
18        }
19    }
20    
21    pub fn handle_js_editor(&mut self, cx: &mut Cx, event: &mut Event, text_buffer: &mut TextBuffer) -> TextEditorEvent {
22        let ce = self.text_editor.handle_text_editor(cx, event, text_buffer);
23        match ce {
24            TextEditorEvent::AutoFormat => {
25                let formatted = JSTokenizer::auto_format(text_buffer).out_lines;
26                self.text_editor.cursors.replace_lines_formatted(formatted, text_buffer);
27                self.text_editor.view.redraw_view_area(cx);
28            },
29            _ => ()
30        }
31        ce
32    }
33    
34    pub fn draw_js_editor(&mut self, cx: &mut Cx, text_buffer: &mut TextBuffer) {
35        if text_buffer.needs_token_chunks() && text_buffer.lines.len() >0 {
36            let mut state = TokenizerState::new(&text_buffer.lines);
37            let mut tokenizer = JSTokenizer::new();
38            let mut pair_stack = Vec::new();
39            loop {
40                let offset = text_buffer.flat_text.len();
41                let token_type = tokenizer.next_token(&mut state, &mut text_buffer.flat_text, &text_buffer.token_chunks);
42                TokenChunk::push_with_pairing(&mut text_buffer.token_chunks, &mut pair_stack, state.next, offset, text_buffer.flat_text.len(), token_type);
43                if token_type == TokenType::Eof {
44                    break
45                }
46            }
47        }
48        
49        if self.text_editor.begin_text_editor(cx, text_buffer).is_err() {return}
50        
51        for (index, token_chunk) in text_buffer.token_chunks.iter_mut().enumerate() {
52            self.text_editor.draw_chunk(cx, index, &text_buffer.flat_text, token_chunk, &text_buffer.messages.cursors);
53        }
54        
55        self.text_editor.end_text_editor(cx, text_buffer);
56    }
57}
58
59pub struct JSTokenizer {
60    pub comment_single: bool,
61    pub comment_depth: usize
62}
63
64impl JSTokenizer {
65    pub fn new() -> JSTokenizer {
66        JSTokenizer {
67            comment_single: false,
68            comment_depth: 0
69        }
70    }
71    
72    pub fn next_token<'a>(&mut self, state: &mut TokenizerState<'a>, chunk: &mut Vec<char>, token_chunks: &Vec<TokenChunk>) -> TokenType {
73        let start = chunk.len();
74        if self.comment_depth >0 { // parse comments
75            loop {
76                if state.next == '\0' {
77                    self.comment_depth = 0;
78                    return TokenType::CommentChunk
79                }
80                else if state.next == '*' {
81                    chunk.push(state.next);
82                    state.advance();
83                    if state.next == '/' {
84                        self.comment_depth -= 1;
85                        chunk.push(state.next);
86                        state.advance();
87                        if self.comment_depth == 0 {
88                            return TokenType::CommentMultiEnd
89                        }
90                    }
91                }
92                else if state.next == '\n' {
93                    if self.comment_single {
94                        self.comment_depth = 0;
95                    }
96                    // output current line
97                    if (chunk.len() - start)>0 {
98                        return TokenType::CommentChunk
99                    }
100                    
101                    chunk.push(state.next);
102                    state.advance();
103                    return TokenType::Newline
104                }
105                else if state.next == ' ' {
106                    if (chunk.len() - start)>0 {
107                        return TokenType::CommentChunk
108                    }
109                    while state.next == ' ' {
110                        chunk.push(state.next);
111                        state.advance();
112                    }
113                    return TokenType::Whitespace
114                }
115                else {
116                    chunk.push(state.next);
117                    state.advance();
118                }
119            }
120        }
121        else {
122            state.advance_with_cur();
123            match state.cur {
124                
125                '\0' => { // eof insert a terminating space and end
126                    chunk.push(' ');
127                    return TokenType::Eof
128                },
129                '\n' => {
130                    chunk.push('\n');
131                    return TokenType::Newline
132                },
133                ' ' | '\t' => { // eat as many spaces as possible
134                    chunk.push(state.cur);
135                    while state.next == ' ' {
136                        chunk.push(state.next);
137                        state.advance();
138                    }
139                    return TokenType::Whitespace;
140                },
141                '/' => { // parse comment or regexp
142                    chunk.push(state.cur);
143                    if state.next == '/' {
144                        chunk.push(state.next);
145                        state.advance();
146                        self.comment_depth = 1;
147                        self.comment_single = true;
148                        return TokenType::CommentLine;
149                    }
150                    else if state.next == '*' { // start parsing a multiline comment
151                        //let mut comment_depth = 1;
152                        chunk.push(state.next);
153                        state.advance();
154                        self.comment_single = false;
155                        self.comment_depth = 1;
156                        return TokenType::CommentMultiBegin;
157                    }
158                    else {
159                        let is_regexp = match TokenChunk::scan_last_token(&token_chunks) {
160                            TokenType::ParenOpen | TokenType::Keyword | TokenType::Operator
161                                | TokenType::Delimiter | TokenType::Colon | TokenType::Looping => true,
162                            _ => false
163                        };
164                        if is_regexp {
165                            while state.next != '\0' && state.next != '\n' {
166                                if state.next != '/' || state.prev != '\\' && state.cur == '\\' && state.next == '/' {
167                                    chunk.push(state.next);
168                                    state.advance_with_prev();
169                                }
170                                else {
171                                    chunk.push(state.next);
172                                    state.advance();
173                                    // lets see what characters we are followed by
174                                    while state.next == 'g' || state.next == 'i' || state.next == 'm'
175                                        || state.next == 's' || state.next == 'u' || state.next == 'y' {
176                                        chunk.push(state.next);
177                                        state.advance();
178                                    }
179                                    return TokenType::Regex;
180                                }
181                            };
182                            return TokenType::Regex;
183                        }
184                        else if state.next == '=' {
185                            chunk.push(state.next);
186                            state.advance();
187                        }
188                        return TokenType::Operator;
189                    }
190                },
191                '"' | '\'' => { // parse string
192                    let end_char = state.cur;
193                    chunk.push(state.cur);
194                    state.prev = '\0';
195                    while state.next != '\0' && state.next != '\n' {
196                        if state.next == '\\' {
197                            Self::parse_js_escape_char(state, chunk);
198                        }
199                        else if state.next != end_char || state.prev != '\\' && state.cur == '\\' && state.next == end_char {
200                            chunk.push(state.next);
201                            state.advance_with_prev();
202                        }
203                        else { // found the end
204                            chunk.push(state.next);
205                            state.advance();
206                            return TokenType::String;
207                        }
208                    };
209                    return TokenType::String;
210                },
211                '0'..='9' => { // try to parse numbers
212                    chunk.push(state.cur);
213                    Self::parse_js_number_tail(state, chunk);
214                    return TokenType::Number;
215                },
216                ':' => {
217                    chunk.push(state.cur);
218                    return TokenType::Colon;
219                },
220                '*' => {
221                    chunk.push(state.cur);
222                    if state.next == '=' {
223                        chunk.push(state.next);
224                        state.advance();
225                        return TokenType::Operator;
226                    }
227                    else if state.next == '/' {
228                        chunk.push(state.next);
229                        state.advance();
230                        return TokenType::Unexpected;
231                    }
232                    else {
233                        return TokenType::Operator;
234                    }
235                },
236                '+' => {
237                    chunk.push(state.cur);
238                    if state.next == '=' || state.next == '+' {
239                        chunk.push(state.next);
240                        state.advance();
241                    }
242                    return TokenType::Operator;
243                },
244                '-' => {
245                    chunk.push(state.cur);
246                    if state.next == '>' || state.next == '=' || state.next == '-' {
247                        chunk.push(state.next);
248                        state.advance();
249                    }
250                    return TokenType::Operator;
251                },
252                '=' => {
253                    chunk.push(state.cur);
254                    if state.next == '>' {
255                        chunk.push(state.next);
256                        state.advance();
257                    }
258                    else if state.next == '=' {
259                        chunk.push(state.next);
260                        state.advance();
261                        if state.next == '=' {
262                            chunk.push(state.next);
263                            state.advance();
264                        }
265                    }
266                    
267                    return TokenType::Operator;
268                },
269                '.' => {
270                    chunk.push(state.cur);
271                    if state.next == '.' {
272                        chunk.push(state.next);
273                        state.advance();
274                        return TokenType::Splat;
275                    }
276                    return TokenType::Operator;
277                },
278                ';' => {
279                    chunk.push(state.cur);
280                    if state.next == '.' {
281                        chunk.push(state.next);
282                        state.advance();
283                    }
284                    return TokenType::Delimiter;
285                },
286                '&' => {
287                    chunk.push(state.cur);
288                    if state.next == '&' || state.next == '=' {
289                        chunk.push(state.next);
290                        state.advance();
291                    }
292                    return TokenType::Operator;
293                },
294                '|' => {
295                    chunk.push(state.cur);
296                    if state.next == '|' || state.next == '=' {
297                        chunk.push(state.next);
298                        state.advance();
299                    }
300                    return TokenType::Operator;
301                },
302                '!' => {
303                    chunk.push(state.cur);
304                    if state.next == '=' {
305                        chunk.push(state.next);
306                        state.advance();
307                        if state.next == '=' {
308                            chunk.push(state.next);
309                            state.advance();
310                        }
311                    }
312                    return TokenType::Operator;
313                },
314                '<' => {
315                    chunk.push(state.cur);
316                    if state.next == '=' || state.next == '<' {
317                        chunk.push(state.next);
318                        state.advance();
319                    }
320                    return TokenType::Operator;
321                },
322                '>' => {
323                    chunk.push(state.cur);
324                    if state.next == '=' || state.next == '>' {
325                        chunk.push(state.next);
326                        state.advance();
327                    }
328                    return TokenType::Operator;
329                },
330                ',' => {
331                    chunk.push(state.cur);
332                    return TokenType::Delimiter;
333                },
334                '(' | '{' | '[' => {
335                    chunk.push(state.cur);
336                    return TokenType::ParenOpen;
337                },
338                ')' | '}' | ']' => {
339                    chunk.push(state.cur);
340                    return TokenType::ParenClose;
341                },
342                '_' | '$' => {
343                    chunk.push(state.cur);
344                    Self::parse_js_ident_tail(state, chunk);
345                    if state.next == '(' {
346                        return TokenType::Call;
347                    }
348                    else {
349                        return TokenType::Identifier;
350                    }
351                },
352                'a'..='z' | 'A'..='Z' => { // try to parse keywords or identifiers
353                    chunk.push(state.cur);
354                    
355                    let keyword_type = Self::parse_js_keyword(state, chunk, token_chunks);
356                    
357                    if Self::parse_js_ident_tail(state, chunk) {
358                        if state.next == '(' {
359                            return TokenType::Call;
360                        }
361                        else {
362                            return TokenType::Identifier;
363                        }
364                    }
365                    else {
366                        return keyword_type
367                    }
368                },
369                _ => {
370                    chunk.push(state.cur);
371                    return TokenType::Operator;
372                }
373            }
374        }
375    }
376    
377    fn parse_js_ident_tail<'a>(state: &mut TokenizerState<'a>, chunk: &mut Vec<char>) -> bool {
378        let mut ret = false;
379        while state.next_is_digit() || state.next_is_letter() || state.next == '_' || state.next == '$' {
380            ret = true;
381            chunk.push(state.next);
382            state.advance();
383        }
384        ret
385    }
386    
387    
388    fn parse_js_escape_char<'a>(state: &mut TokenizerState<'a>, chunk: &mut Vec<char>) -> bool {
389        if state.next == '\\' {
390            chunk.push(state.next);
391            state.advance();
392            if state.next == 'u' {
393                chunk.push(state.next);
394                state.advance();
395                // ! TODO LIMIT THIS TO MAX UNICODE
396                while state.next_is_hex() {
397                    chunk.push(state.next);
398                    state.advance();
399                }
400            }
401            else if state.next != '\n' && state.next != '\0' {
402                // its a single char escape TODO limit this to valid escape chars
403                chunk.push(state.next);
404                state.advance();
405            }
406            return true
407        }
408        return false
409    }
410    fn parse_js_number_tail<'a>(state: &mut TokenizerState<'a>, chunk: &mut Vec<char>) {
411        if state.next == 'x' { // parse a hex number
412            chunk.push(state.next);
413            state.advance();
414            while state.next_is_hex() || state.next == '_' {
415                chunk.push(state.next);
416                state.advance();
417            }
418        }
419        else if state.next == 'b' { // parse a binary
420            chunk.push(state.next);
421            state.advance();
422            while state.next == '0' || state.next == '1' || state.next == '_' {
423                chunk.push(state.next);
424                state.advance();
425            }
426        }
427        else {
428            while state.next_is_digit() || state.next == '_' {
429                chunk.push(state.next);
430                state.advance();
431            }
432            if state.next == '.' {
433                chunk.push(state.next);
434                state.advance();
435                // again eat as many numbers as possible
436                while state.next_is_digit() || state.next == '_' {
437                    chunk.push(state.next);
438                    state.advance();
439                }
440            }
441        }
442    }
443    
444    fn parse_js_keyword<'a>(state: &mut TokenizerState<'a>, chunk: &mut Vec<char>, _token_chunks: &Vec<TokenChunk>) -> TokenType {
445        match state.cur {
446            'b' => {
447                if state.keyword(chunk, "reak") {
448                    return TokenType::Flow
449                }
450            },
451            'c' => {
452                if state.keyword(chunk, "ase") {
453                    return TokenType::Flow
454                }
455                if state.keyword(chunk, "lass") {
456                    return TokenType::Keyword
457                }
458                if state.keyword(chunk, "o") {
459                    if state.keyword(chunk, "nst") {
460                        return TokenType::Keyword
461                    }
462                    if state.keyword(chunk, "ntinue") {
463                        return TokenType::Flow
464                    }
465                }
466            },
467            'd' => {
468                if state.keyword(chunk, "o") {
469                    return TokenType::Looping
470                }
471                if state.keyword(chunk, "e") {
472                    if state.keyword(chunk, "bugger") {
473                        return TokenType::Flow
474                    }
475                    if state.keyword(chunk, "fault") {
476                        return TokenType::Flow
477                    }
478                    if state.keyword(chunk, "lete") {
479                        return TokenType::Keyword
480                    }
481                }
482            },
483            'e' => {
484                if state.keyword(chunk, "lse") {
485                    return TokenType::Flow
486                }
487                if state.keyword(chunk, "num") {
488                    return TokenType::Keyword
489                }
490                if state.keyword(chunk, "xte") {
491                    if state.keyword(chunk, "rn") {
492                        return TokenType::Keyword
493                    }
494                    if state.keyword(chunk, "nds") {
495                        return TokenType::Keyword
496                    }
497                    return TokenType::TypeDef
498                }
499            },
500            'f' => {
501                if state.keyword(chunk, "alse") {
502                    return TokenType::Bool
503                }
504                if state.keyword(chunk, "inally") {
505                    return TokenType::Fn
506                }
507                if state.keyword(chunk, "or") {
508                    return TokenType::Looping;
509                }
510                if state.keyword(chunk, "unction") {
511                    return TokenType::Fn
512                }
513            },
514            'g' => {
515                if state.keyword(chunk, "et") {
516                    return TokenType::Keyword
517                }
518            },
519            'i' => {
520                if state.keyword(chunk, "f") {
521                    return TokenType::Flow
522                }
523                else if state.keyword(chunk, "mport") {
524                    return TokenType::TypeDef
525                }
526                else if state.keyword(chunk, "in") {
527                    if state.next_is_letter() || state.next_is_digit() {
528                        if state.keyword(chunk, "stanceof") {
529                            return TokenType::BuiltinType
530                        }
531                    }
532                    else {
533                        return TokenType::Keyword
534                    }
535                }
536            },
537            'l' => {
538                if state.keyword(chunk, "et") {
539                    return TokenType::Keyword
540                }
541            },
542            'n' => {
543                if state.keyword(chunk, "ew") {
544                    return TokenType::Keyword
545                }
546                if state.keyword(chunk, "ull") {
547                    return TokenType::Keyword
548                }
549            },
550            'r' => {
551                if state.keyword(chunk, "eturn") {
552                    return TokenType::Flow
553                }
554            },
555            's' => {
556                if state.keyword(chunk, "uper") {
557                    return TokenType::Keyword
558                }
559                if state.keyword(chunk, "witch") {
560                    return TokenType::Flow
561                }
562                if state.keyword(chunk, "et") {
563                    return TokenType::Keyword
564                }
565            },
566            't' => {
567                if state.keyword(chunk, "r") {
568                    if state.keyword(chunk, "y") {
569                        return TokenType::Keyword
570                    }
571                    if state.keyword(chunk, "ue") {
572                        return TokenType::Bool
573                    }
574                }
575                if state.keyword(chunk, "ypeof") {
576                    return TokenType::Keyword
577                }
578                if state.keyword(chunk, "h") {
579                    if state.keyword(chunk, "is") {
580                        return TokenType::Keyword
581                    }
582                    if state.keyword(chunk, "row") {
583                        return TokenType::Flow
584                    }
585                }
586            },
587            'v' => { // use
588                if state.keyword(chunk, "ar") {
589                    return TokenType::Keyword
590                }
591                if state.keyword(chunk, "oid") {
592                    return TokenType::Keyword
593                }
594            },
595            'w' => { // use
596                if state.keyword(chunk, "hile") {
597                    return TokenType::Looping
598                }
599                if state.keyword(chunk, "ith") {
600                    return TokenType::Keyword
601                }
602            },
603            'u' => { // use
604                if state.keyword(chunk, "ndefined") {
605                    return TokenType::Keyword
606                }
607            },
608            'y' => { // use
609                if state.keyword(chunk, "ield") {
610                    return TokenType::Flow
611                }
612            },
613            'N' => {
614                if state.keyword(chunk, "aN") {
615                    return TokenType::Keyword
616                }
617            },
618            'I' => {
619                if state.keyword(chunk, "nfinity") {
620                    return TokenType::Keyword
621                }
622            },
623            _ => {}
624        }
625        if state.next == '(' {
626            return TokenType::Call;
627        }
628        else {
629            return TokenType::Identifier;
630        }
631    }
632    
633    // js autoformatter. nothing fancy.
634    pub fn auto_format(text_buffer: &mut TextBuffer) -> FormatOutput {
635        
636        let extra_spacey = false;
637        let pre_spacey = true;
638        let mut out = FormatOutput::new();
639        let mut tp = TokenParser::new(&text_buffer.flat_text, &text_buffer.token_chunks);
640        
641        struct ParenStack {
642            expecting_newlines: bool,
643            expected_indent: usize
644        }
645        
646        let mut paren_stack: Vec<ParenStack> = Vec::new();
647        
648        paren_stack.push(ParenStack {
649            expecting_newlines: true,
650            expected_indent: 0,
651        });
652        
653        out.new_line();
654        
655        let mut first_on_line = true;
656        let mut first_after_open = false;
657        let mut expected_indent = 0;
658        let mut is_unary_operator = true;
659        let mut in_multline_comment = false;
660        let mut in_singleline_comment = false;
661        let mut in_multline_string = false;
662        while tp.advance() {
663            
664            match tp.cur_type() {
665                TokenType::Whitespace => {
666                    if in_singleline_comment || in_multline_comment || in_multline_string {
667                        out.extend(tp.cur_chunk());
668                    }
669                    else if !first_on_line && tp.next_type() != TokenType::Newline
670                        && tp.prev_type() != TokenType::ParenOpen
671                        && tp.prev_type() != TokenType::Namespace
672                        && tp.prev_type() != TokenType::Operator
673                        && tp.prev_type() != TokenType::Delimiter {
674                        out.add_space();
675                    }
676                },
677                TokenType::Newline => {
678                    in_singleline_comment = false;
679                    //paren_stack.last_mut().unwrap().angle_counter = 0;
680                    if first_on_line && !in_singleline_comment && !in_multline_comment && !in_multline_string{
681                        out.indent(expected_indent);
682                    }
683                    else {
684                        out.strip_space();
685                    }
686                    if first_after_open {
687                        paren_stack.last_mut().unwrap().expecting_newlines = true;
688                        expected_indent += 4;
689                    }
690                    if paren_stack.last_mut().unwrap().expecting_newlines { // only insert when expecting newlines
691                        first_after_open = false;
692                        out.new_line();
693                        first_on_line = true;
694                    }
695                },
696                TokenType::Eof => {break},
697                TokenType::ParenOpen => {
698                    if first_on_line {
699                        out.indent(expected_indent);
700                    }
701                    
702                    paren_stack.push(ParenStack {
703                        expecting_newlines: false,
704                        expected_indent: expected_indent,
705                    });
706                    
707                    first_after_open = true;
708                    is_unary_operator = true;
709                    
710                    let is_curly = tp.cur_char() == '{';
711                    if tp.cur_char() == '(' && (
712                        tp.prev_type() == TokenType::Flow || tp.prev_type() == TokenType::Looping || tp.prev_type() == TokenType::Keyword
713                    ) {
714                        out.add_space();
715                    }
716                    if pre_spacey && is_curly && !first_on_line {
717                        if tp.prev_char() != ' ' && tp.prev_char() != '{' && tp.prev_char() != '['
718                            && tp.prev_char() != '(' && tp.prev_char() != ':' {
719                            out.add_space();
720                        }
721                    }
722                    else if !pre_spacey {
723                        out.strip_space();
724                    }
725                    
726                    out.extend(tp.cur_chunk());
727                    
728                    if extra_spacey && is_curly && tp.next_type() != TokenType::Newline {
729                        out.add_space();
730                    }
731                    first_on_line = false;
732                },
733                TokenType::ParenClose => {
734                    
735                    out.strip_space();
736                    
737                    let expecting_newlines = paren_stack.last().unwrap().expecting_newlines;
738                    
739                    if extra_spacey && tp.cur_char() == '}' && !expecting_newlines {
740                        out.add_space();
741                    }
742                    
743                    first_after_open = false;
744                    if !first_on_line && expecting_newlines { // we are expecting newlines!
745                        out.new_line();
746                        first_on_line = true;
747                    }
748                    
749                    expected_indent = if paren_stack.len()>1 {
750                        paren_stack.pop().unwrap().expected_indent
751                    }
752                    else {
753                        0
754                    };
755                    if first_on_line {
756                        first_on_line = false;
757                        out.indent(expected_indent);
758                    }
759                    if tp.cur_char() == '}' {
760                        is_unary_operator = true;
761                    }
762                    else {
763                        is_unary_operator = false;
764                    }
765                    
766                    out.extend(tp.cur_chunk());
767                },
768                TokenType::CommentLine => {
769                    in_singleline_comment = true;
770                    if first_on_line {
771                        first_on_line = false;
772                        out.indent(expected_indent);
773                    }
774                    else {
775                        out.add_space();
776                    }
777                    out.extend(tp.cur_chunk());
778                },
779                TokenType::CommentMultiBegin => {
780                    in_multline_comment = true;
781                    if first_on_line {
782                        first_on_line = false;
783                        out.indent(expected_indent);
784                    }
785                    out.extend(tp.cur_chunk());
786                },
787                TokenType::CommentChunk => {
788                    if first_on_line {
789                        first_on_line = false;
790                    }
791                    out.extend(tp.cur_chunk());
792                },
793                TokenType::CommentMultiEnd => {
794                    in_multline_comment = false;
795                    if first_on_line {
796                        first_on_line = false;
797                    }
798                    out.extend(tp.cur_chunk());
799                },
800                TokenType::StringMultiBegin => {
801                    in_multline_string = true;
802                    if first_on_line {
803                        first_on_line = false;
804                        out.indent(expected_indent);
805                    }
806                    out.extend(tp.cur_chunk());
807                },
808                TokenType::StringChunk => {
809                    if first_on_line {
810                        first_on_line = false;
811                    }
812                    out.extend(tp.cur_chunk());
813                },
814                TokenType::StringMultiEnd => {
815                    in_multline_string = false;
816                    if first_on_line {
817                        first_on_line = false;
818                    }
819                    out.extend(tp.cur_chunk());
820                },
821                TokenType::Colon => {
822                    is_unary_operator = true;
823                    out.strip_space();
824                    out.extend(tp.cur_chunk());
825                    if tp.next_type() != TokenType::Whitespace && tp.next_type() != TokenType::Newline {
826                        out.add_space();
827                    }
828                },
829                TokenType::Delimiter => {
830                    if first_on_line {
831                        first_on_line = false;
832                        out.indent(expected_indent);
833                    }
834                    else {
835                        out.strip_space();
836                    }
837                    out.extend(tp.cur_chunk());
838                    if paren_stack.last().unwrap().expecting_newlines == true
839                        && tp.next_type() != TokenType::Newline { // we are expecting newlines!
840                        // scan forward to see if we really need a newline.
841                        for next in (tp.index + 1)..tp.tokens.len() {
842                            if tp.tokens[next].token_type == TokenType::Newline {
843                                break;
844                            }
845                            if !tp.tokens[next].token_type.should_ignore() {
846                                out.new_line();
847                                first_on_line = true;
848                                break;
849                            }
850                        }
851                    }
852                    else if tp.next_type() != TokenType::Newline {
853                        out.add_space();
854                    }
855                    is_unary_operator = true;
856                },
857                TokenType::Operator => {
858                    
859                    if first_on_line {
860                        first_on_line = false;
861                        let extra_indent = if is_unary_operator {0}else {4};
862                        out.indent(expected_indent + extra_indent);
863                    }
864                    
865                    if (is_unary_operator && (tp.cur_char() == '-' || tp.cur_char() == '*' || tp.cur_char() == '&'))
866                        || tp.cur_char() == '.' || tp.cur_char() == '!' {
867                        out.extend(tp.cur_chunk());
868                    }
869                    else {
870                        if tp.cur_char() == '?' {
871                            out.strip_space();
872                        }
873                        else {
874                            out.add_space();
875                        }
876                        out.extend(tp.cur_chunk());
877                        if tp.next_type() != TokenType::Newline {
878                            out.add_space();
879                        }
880                    }
881                    
882                    is_unary_operator = true;
883                },
884                // these are followed by unary operators (some)
885                TokenType::TypeDef | TokenType::Fn | TokenType::Hash | TokenType::Splat | TokenType::Namespace |
886                TokenType::Keyword | TokenType::Flow | TokenType::Looping => {
887                    is_unary_operator = true;
888                    
889                    first_after_open = false;
890                    if first_on_line {
891                        first_on_line = false;
892                        out.indent(expected_indent);
893                    }
894                    out.extend(tp.cur_chunk());
895                },
896                // these are followeable by non unary operators
897                TokenType::Identifier | TokenType::BuiltinType | TokenType::TypeName | TokenType::ThemeName |
898                TokenType::Call | TokenType::String | TokenType::Regex | TokenType::Number |
899                TokenType::Bool | TokenType::Unexpected | TokenType::Error | TokenType::Warning | TokenType::Defocus => {
900                    is_unary_operator = false;
901                    
902                    first_after_open = false;
903                    if first_on_line {
904                        first_on_line = false;
905                        out.indent(expected_indent);
906                    }
907                    out.extend(tp.cur_chunk());
908                    
909                },
910            }
911        };
912        out
913    }
914}
915