1use std::fmt::{Display, Formatter};
2use std::iter::Peekable;
3use std::slice::Iter;
4use crate::utils::get_line_col_char;
5use crate::tokenize::{TokType, TokenSpan, Tokens};
6#[derive(PartialEq, Debug, Clone)]
7pub enum UnaryOperator {
8 Plus,
9 Minus,
10}
11
12impl Display for UnaryOperator {
13 fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
14 match self {
15 UnaryOperator::Plus => {write!(f, "+")}
16 UnaryOperator::Minus => {write!(f, "-")}
17 }
18 }
19}
20
21type Wsc = String; #[derive(PartialEq, Debug, Clone)]
27pub struct JSONTextContext {
28 pub wsc: (Wsc, Wsc)
32}
33
34#[derive(PartialEq, Debug, Clone)]
38pub struct JSONObjectContext {
39 pub wsc: (Wsc,)
40}
41
42
43#[derive(PartialEq, Debug, Clone)]
45pub struct KeyValuePairContext {
46 pub wsc: (Wsc, Wsc, Wsc, Option<Wsc>),
47 }
49
50
51#[derive(PartialEq, Debug, Clone)]
52pub struct JSONArrayContext {
53 pub wsc: (Wsc,)
57}
58
59#[derive(PartialEq, Debug, Clone)]
60pub struct ArrayValueContext {
61 pub wsc: (Wsc, Option<Wsc>),
67 }
69
70#[derive(PartialEq, Debug, Clone)]
74pub struct JSONKeyValuePair {
75
76 pub key: JSONValue,
79 pub value: JSONValue,
80
81 pub context: Option<KeyValuePairContext>
83}
84
85#[derive(PartialEq, Debug, Clone)]
87pub struct JSONArrayValue {
88 pub value: JSONValue,
89 pub context: Option<ArrayValueContext>
90}
91
92#[derive(PartialEq, Debug, Clone)]
98pub enum JSONValue {
99 JSONObject {
101
102 key_value_pairs: Vec<JSONKeyValuePair>,
104
105 context: Option<JSONObjectContext>
106 },
107
108 JSONArray {
110
111 values: Vec<JSONArrayValue>,
112
113 context: Option<JSONArrayContext>
114 },
115
116 Integer(String),
119
120 Float(String),
123
124 Exponent(String),
127
128
129 Null,
130 Infinity,
131 NaN,
132
133 Hexadecimal(String),
136 Bool(bool),
137
138 DoubleQuotedString(String),
141
142 SingleQuotedString(String),
145
146 Unary { operator: UnaryOperator, value: Box<JSONValue> },
148
149 Identifier(String), }
154
155
156#[derive(PartialEq, Debug)]
171pub struct JSONText {
172
173 pub value: JSONValue,
175
176 pub context: Option<JSONTextContext>
178}
179
180
181impl JSONKeyValuePair {
182 fn to_string(&self) -> String {
184 match &self.context {
185 None => {
186 format!("{}:{}", self.key.to_string(), self.value.to_string())
187 }
188 Some(ctx) => {
189 match &ctx.wsc.3 {
190 None => {
191 format!("{}{}:{}{}{}", self.key.to_string(), ctx.wsc.0, ctx.wsc.1, self.value.to_string(), ctx.wsc.2)
192 }
193 Some(trailing_wsc) => {
194 format!("{}{}:{}{}{},{}", self.key.to_string(), ctx.wsc.0, ctx.wsc.1, self.value.to_string(), ctx.wsc.2, trailing_wsc)
195 }
196 }
197 }
198 }
199 }
200}
201
202
203
204impl JSONText {
205 fn to_string(&self) -> String {
206 match &self.context {
207 None => {
208 self.value.to_string()
209 },
210 Some(ctx) => {
211 format!("{}{}{}", ctx.wsc.0, self.value.to_string(), ctx.wsc.1)
212 }
213 }
214 }
215}
216
217impl JSONArrayValue {
219 fn to_string(&self) -> String {
220 match &self.context {
221 None => {
222 self.value.to_string()
223 }
224 Some(ctx) => {
225 match &ctx.wsc.1 {
226 None => {
227 format!("{}{}", self.value.to_string(), ctx.wsc.0)
228 }
229 Some(trailing_whitespace) => {
230 format!("{}{},{}", self.value.to_string(), ctx.wsc.0, trailing_whitespace)
231 }
232 }
233 }
234 }
235 }
236}
237
238impl JSONValue {
239 fn to_string(&self) -> String {
240 match self {
241 JSONValue::JSONObject { key_value_pairs, context } => {
242 match context {
243 None => {
244 let mut s = String::from("{");
245 for kvp in key_value_pairs {
246 s.push_str(kvp.to_string().as_str())
247 }
248 s.push('}');
249 s
250 }
251 Some(ctx) => {
252 let mut s = format!("{{{}", ctx.wsc.0);
253 for kvp in key_value_pairs {
254 s.push_str(kvp.to_string().as_str());
255 }
256 s.push('}');
257 s
258 }
259 }
260 }
261 JSONValue::JSONArray { values, context } => {
262 match context {
263 None => {
264 let mut s = String::from('[');
265 for array_value in values {
266 s.push_str(array_value.to_string().as_str());
267 }
268 s.push(']');
269 s
270 }
271 Some(ctx) => {
272 let mut s = format!("[{}", ctx.wsc.0);
273 for array_value in values {
274 s.push_str(array_value.to_string().as_str());
275 }
276 s.push(']');
277 s
278 }
279 }
280 }
281 JSONValue::Integer(s) => {s.clone()}
282 JSONValue::Float(s) => {s.clone()}
283 JSONValue::Exponent(s) => {s.clone()}
284 JSONValue::Null => {String::from("null")}
285 JSONValue::Infinity => {String::from("Infinity")}
286 JSONValue::NaN => {String::from("Nan")}
287 JSONValue::Hexadecimal(s) => {s.clone()}
288 JSONValue::Bool(b) => b.to_string(),
289 JSONValue::DoubleQuotedString(s) => {
290 format!("\"{s}\"")
291 }
292 JSONValue::SingleQuotedString(s) => {
293 format!("'{s}'")
294 }
295 JSONValue::Unary { operator, value} => {
296 format!("{operator}{value}")
297 }
298 JSONValue::Identifier(s) => {s.clone()}
299 }
300 }
301}
302
303
304impl Display for JSONValue {
305 fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
306 let res = self.to_string();
307 write!(f, "{res}")
308 }
309}
310
311
312impl Display for JSONText {
313 fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
314 write!(f, "{}", self.to_string())
315 }
316}
317
318
319
320#[derive(Debug, PartialEq)]
321pub struct ParsingError {
322 pub index: usize, pub message: String,
324 pub lineno: usize,
325 pub colno: usize,
326 pub char_index: usize, }
328
329
330
331impl Display for ParsingError {
332 fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
333 write!(f, "ParsingError: {}: line {} column {} (char {})", self.message, self.lineno, self.colno, self.char_index)
334 }
335}
336
337struct JSON5Parser<'toks, 'input> {
338 source: &'input str,
339 source_tokens: Peekable<Iter<'toks, TokenSpan>>,
340 lookahead: Option<&'toks TokenSpan>,
341 current_depth: usize,
342 max_depth: usize,
343}
344
345
346impl<'toks, 'input> JSON5Parser<'toks, 'input> {
347 fn new(tokens: &'toks Tokens<'input>) -> Self {
348 use crate::utils::MAX_DEPTH;
349 JSON5Parser { source_tokens: tokens.tok_spans.iter().peekable(), lookahead: None, source: tokens.source, current_depth: 0, max_depth: MAX_DEPTH }
350 }
351
352 fn with_max_depth(&mut self, tokens: &'toks Tokens<'input>, max_depth: usize) -> Self {
353 JSON5Parser { source_tokens: tokens.tok_spans.iter().peekable(), lookahead: None, source: tokens.source, current_depth: 0, max_depth }
354 }
355
356 fn advance(&mut self) -> Option<&'toks TokenSpan> {
357 match self.source_tokens.next() {
358 None => {
359 self.lookahead = None;
360 None
361 }
362 Some(span) => {
363 self.lookahead = Some(span);
364 self.lookahead
365 }
366 }
367 }
368
369 #[inline]
370 fn get_tok_source(&self, span: &'toks TokenSpan) -> &'input str {
371 &self.source[span.0 .. span.2]
372 }
373
374
375 fn peek(&mut self) -> Option<&'toks TokenSpan> {
376 match self.source_tokens.peek() {
377 None => None,
378 Some(span) => {
379 Some(span)
380 }
381 }
382 }
383
384
385 fn position(&mut self) -> usize {
386 match self.peek() {
387 None => {
388 match self.lookahead {
389 None => 0, Some(span) => {span.2}
391 }
392 }
393 Some(span) => {
394 span.0
395 }
396 }
397 }
398
399 fn make_error(&self, message: String, index: usize) -> ParsingError {
400 let (lineno, colno, char_index) = get_line_col_char(self.source, index);
401 ParsingError {
402 index,
403 message,
404 lineno,
405 colno,
406 char_index,
407 }
408 }
409
410 fn check_and_consume(&mut self, types: Vec<TokType>) -> Option<&'toks TokenSpan> {
411 let next_tok = self.peek()?;
412 for toktype in types {
413 if next_tok.1 == toktype {
414 return self.advance();
415 }
416 }
417 None
418 }
419
420 #[inline]
421 fn check_and_consume_with_source(&mut self, types: Vec<TokType>) -> Option<(&'toks TokenSpan, &'input str)> {
422 let tok = self.check_and_consume(types)?;
423 let source = self.get_tok_source(tok);
424 Some((tok, source))
425 }
426
427 fn parse_key(&mut self) -> Result<JSONValue, ParsingError>{
428 match self.check_and_consume_with_source(vec![TokType::Name, TokType::DoubleQuotedString, TokType::SingleQuotedString]) {
431 None => {
432 match self.peek() {
433 None => {
434 let idx = self.position();
435 Err(self.make_error("Unexpected EOF. Was expecting MemberName at".to_string(), idx))
436 }
437 Some(span) => {
438 let src = self.get_tok_source(span);
439 Err(self.make_error(format!("Invalid token for unquoted key ({}, {:?}) at", span.2, src), span.0))
440 }
441 }
442 },
443 Some((span, lexeme)) => {
444 match span.1 {
445 TokType::DoubleQuotedString => {
446 Ok(JSONValue::DoubleQuotedString(lexeme[1..lexeme.len() - 1].to_string()))
447 },
448 TokType:: SingleQuotedString => {
449 Ok(JSONValue::SingleQuotedString(lexeme[1..lexeme.len() - 1].to_string()))
450 }
451 TokType::Name => {
452 Ok(JSONValue::Identifier(lexeme.to_string()))
453 }
454 _ => unreachable!("Programming error. Please report this as a bug")
455 }
456 }
457 }
458 }
459
460 fn parse_object(&mut self) -> Result<JSONValue, ParsingError> {
461 use crate::tokenize::TokType::*;
462 let mut kvps: Vec<JSONKeyValuePair> = Vec::new();
463 let leading_wsc = &self.consume_whitespace_and_comments();
464 loop {
465 match self.check_and_consume(vec![RightBrace]) {
466 None => {
467 let key = self.parse_key()?;
468 let wsc_0 = self.consume_whitespace_and_comments();
469
470 match self.check_and_consume(vec![Colon]) {
471 None => {
472 let idx = self.position();
473 return Err(self.make_error("Expecting ':' delimiter".to_string(), idx))
474 }
475 Some(_) => {
476 let wsc_1 = self.consume_whitespace_and_comments();
477 let val = self.parse_value()?;
478 let wsc_2 = self.consume_whitespace_and_comments();
479 match self.check_and_consume(vec![Comma]) {
480 None => {
481 let context = KeyValuePairContext{wsc: (
482 self.collect_wsc_vec_to_string(&wsc_0),
483 self.collect_wsc_vec_to_string(&wsc_1),
484 self.collect_wsc_vec_to_string(&wsc_2),
485 None
486 )};
487 let kvp = JSONKeyValuePair{key, value: val, context: Some(context)};
488 kvps.push(kvp);
489 match self.check_and_consume(vec![RightBrace]) {
490 None => {
491 let idx = self.position();
492 return Err(self.make_error("Expecting '}' at end of object".to_string(), idx))
493 },
494 Some(_) => {
495 break Ok(JSONValue::JSONObject {key_value_pairs: kvps, context: Some(JSONObjectContext{wsc: (self.collect_wsc_vec_to_string(leading_wsc), )})})
496 }
497 }
498 }
499 Some(_) => {
500 let wsc_3 = self.consume_whitespace_and_comments();
501 let context = KeyValuePairContext{wsc: (
502 self.collect_wsc_vec_to_string(&wsc_0),
503 self.collect_wsc_vec_to_string(&wsc_1),
504 self.collect_wsc_vec_to_string(&wsc_2),
505 Some(self.collect_wsc_vec_to_string(&wsc_3)),
506 )};
507 let kvp = JSONKeyValuePair{key, value: val, context: Some(context)};
508 kvps.push(kvp);
509 continue
510 }
511 }
512 }
513 }
514 }
515 Some(_) => {
516 break Ok(JSONValue::JSONObject {key_value_pairs: kvps, context: Some(JSONObjectContext{wsc: (self.collect_wsc_vec_to_string(leading_wsc), )})})
517 }
518 }
519 }
520 }
521
522
523 fn collect_wsc_vec_to_string(&self, wsc: &Vec<&'toks TokenSpan>) -> String {
524 if wsc.is_empty() {
525 return String::with_capacity(0);
526 }
527
528 let first = wsc.first().unwrap();
529 if wsc.len() == 1 {
530 self.get_tok_source(first).to_string()
531 } else {
532 let last = wsc.last().unwrap();
533 let mut buff = String::with_capacity(last.2 - first.0);
534 for span in wsc {
535 let src = self.get_tok_source(span);
536 buff.push_str(src);
537 }
538 buff
539 }
540 }
541
542 fn parse_array(&mut self) -> Result<JSONValue, ParsingError> {
543 use crate::tokenize::TokType::*;
544 let mut values:Vec<JSONArrayValue> = Vec::new();
545 let leading_wsc = self.consume_whitespace_and_comments();
546 loop {
547 match self.check_and_consume(vec![TokType::RightBracket]) {
548 None => {
549 let val = self.parse_value()?;
550 let wsc_0 = self.consume_whitespace_and_comments();
551 match self.check_and_consume(vec![Comma]) {
552 None => {
553 let array_val_context = ArrayValueContext{wsc: (self.collect_wsc_vec_to_string(&wsc_0), None)};
554 let array_val = JSONArrayValue {value: val, context: Some(array_val_context)};
555 values.push(array_val);
556 match self.check_and_consume(vec![TokType::RightBracket]) {
557 None => {
558 let idx = self.position();
559 return Err(self.make_error("Expecting ']' at end of array".to_string(), idx))
560 },
561 Some(_) => {
562 break Ok(JSONValue::JSONArray {values, context: Some(JSONArrayContext{wsc: (self.collect_wsc_vec_to_string(&leading_wsc), )})})
563 }
564 }
565 }
566 Some(_) => {
567 let wsc_1 = self.consume_whitespace_and_comments();
568 let array_val_context = ArrayValueContext{wsc: (self.collect_wsc_vec_to_string(&wsc_0), Some(self.collect_wsc_vec_to_string(&wsc_1)))};
569 let array_val = JSONArrayValue {value: val, context: Some(array_val_context)};
570 values.push(array_val);
571 continue
572 }
573 }
574 }
575 Some(_) => {
576 break Ok(JSONValue::JSONArray {values, context: Some(JSONArrayContext{wsc: (self.collect_wsc_vec_to_string(&leading_wsc), )})})
577 }
578 }
579 }
580 }
581
582 fn parse_primary(&mut self) -> Result<JSONValue, ParsingError> {
583 let span = self.advance().unwrap();
584 match &span.1 {
585 TokType::Integer => {Ok(JSONValue::Integer(self.get_tok_source(span).to_string()))}
586 TokType::Float => {Ok(JSONValue::Float(self.get_tok_source(span).to_string()))}
587 TokType::Exponent => { Ok(JSONValue::Exponent(self.get_tok_source(span).to_string()))}
588 TokType::SingleQuotedString => {
589 let lexeme = self.get_tok_source(span);
590 Ok(JSONValue::SingleQuotedString(lexeme[1..lexeme.len() - 1].to_string()))
591 },
592 TokType::DoubleQuotedString => {
593 let lexeme = self.get_tok_source(span);
594 Ok(JSONValue::DoubleQuotedString(lexeme[1..lexeme.len() - 1].to_string()))
595 },
596 TokType::True => Ok(JSONValue::Bool(true)),
597 TokType::False => Ok(JSONValue::Bool(false)),
598 TokType::Null => Ok(JSONValue::Null),
599 TokType::Infinity => Ok(JSONValue::Infinity),
600 TokType::Nan => Ok(JSONValue::NaN),
601 TokType::Hexadecimal => Ok(JSONValue::Hexadecimal(self.get_tok_source(span).to_string())),
602 TokType::EOF => {
603 match self.position() {
604 0 => Err(self.make_error("Unexpected EOF. Was expecting value.".to_string(), 0)),
605 pos => Err(self.make_error("Unexpected EOF".to_string(), pos))
606 }
607 },
608 t => Err(self.make_error(format!("Unexpected token of type {:?}: {:?}", t, self.get_tok_source(span)), span.0))
609 }
610 }
611
612 fn parse_unary(&mut self) -> Result<JSONValue, ParsingError> {
613 match self.check_and_consume(vec![TokType::Plus, TokType::Minus]) {
614 None => self.parse_primary(),
615 Some(span) => {
616 match span.1 {
617 TokType::Plus => {
618 let value = self.parse_unary()?;
619 match value {
620 JSONValue::Float(_) | JSONValue::Integer(_) | JSONValue::Infinity | JSONValue::NaN | JSONValue::Hexadecimal(_) | JSONValue::Exponent(_) => {}
621 JSONValue::Unary{ .. } => {
622 return Err(self.make_error("Only one unary operator is allowed".to_string(), span.2))
623 }
624 val => {
625 return Err(self.make_error(format!("Unary operations not allowed for value {val:?}"), span.2))
626 }
627 }
628 Ok(JSONValue::Unary {operator: UnaryOperator::Plus, value: Box::new(value)})
629 }
630 TokType::Minus => {
631 let value = self.parse_unary()?;
632 match value {
633 JSONValue::Float(_) | JSONValue::Integer(_) | JSONValue::Infinity | JSONValue::NaN | JSONValue::Hexadecimal(_) | JSONValue::Exponent(_) => {}
634 JSONValue::Unary{ .. } => {
635 return Err(self.make_error("Only one unary operator is allowed".to_string(), span.2))
636 }
637 val => {
638 return Err(self.make_error(format!("Unary operations not allowed for value {val:?}"), span.2))
639 }
640 }
641 Ok(JSONValue::Unary {operator: UnaryOperator::Minus, value: Box::new(value)})
642 }
643 _ => unreachable!("no")
644 }
645 }
646 }
647 }
648
649 fn parse_obj_or_array(&mut self) -> Result<JSONValue, ParsingError> {
650 match self.check_and_consume(vec![TokType::LeftBracket, TokType::LeftBrace]) {
651 None => self.parse_unary(),
652 Some(span) => {
653 match span.1 {
654 TokType::LeftBrace => self.parse_object(),
655 TokType::LeftBracket => self.parse_array(),
656 _ => unreachable!("no")
657 }
658 }
659 }
660 }
661
662
663 fn parse_value(&mut self) -> Result<JSONValue, ParsingError> {
664 self.current_depth += 1;
665 if self.current_depth > self.max_depth {
666 let idx = self.position();
667 return Err(self.make_error(format!("max depth ({}) exceeded in nested arrays/objects. To expand the depth, use the ``with_max_depth`` constructor or enable the `unlimited_depth` feature", self.max_depth), idx))
668 }
669 let res = self.parse_obj_or_array();
670 self.current_depth -= 1;
671 res
672 }
673
674 fn parse_text(&mut self) -> Result<JSONText, ParsingError> {
675 let wsc_0 = self.consume_whitespace_and_comments();
676 let value = self.parse_value()?;
677 let wsc_1 = self.consume_whitespace_and_comments();
678 match self.advance() {
679 None => {}
680 Some(span) => {
681 if span.1 != TokType::EOF {
682 return Err(self.make_error(format!("Unexpected {:?} token after value", span.1), span.0))
683 }
684 }
685 }
686 let context = JSONTextContext{wsc: (self.collect_wsc_vec_to_string(&wsc_0), self.collect_wsc_vec_to_string(&wsc_1))};
687 Ok(JSONText { value, context: Some(context) })
688 }
689
690 fn consume_whitespace_and_comments(&mut self) -> Vec<&'toks TokenSpan> {
691 let mut ret: Vec<&TokenSpan> = Vec::new();
692 loop {
693 match self.peek() {
694 None => {return ret}
695 Some(span) => {
696 match span.1 {
697 TokType::BlockComment | TokType::LineComment | TokType::Whitespace => {
698 ret.push(span);
699 self.advance();
700 }
701 _ => {return ret}
702 }
703 }
704 }
705 }
706 }
707}
708
709pub fn from_tokens<'toks, 'input>(tokens: &'toks Tokens<'input>) -> Result<JSONText, ParsingError> {
710 let mut parser = JSON5Parser::new(tokens);
711 parser.parse_text()
712}
713
714pub fn from_str(source: &str) -> Result<JSONText, ParsingError> {
715 use crate::tokenize::tokenize_rt_str;
716 let maybe_toks = tokenize_rt_str(source);
717 match maybe_toks {
718 Err(e) => {
719 Err(ParsingError{index: e.index, message: e.message, char_index: e.char_index, lineno: e.lineno, colno: e.colno})
720 }
721 Ok(toks) => {
722 from_tokens(&toks)
723 }
724 }
725}
726
727#[cfg(test)]
728mod tests {
729 use crate::tokenize::Tokenizer;
730
731 use super::*;
732
733 #[test]
734 fn test_fuzz_1() {
735 let res = from_str("0xA18 {9");
736 assert!(res.is_err());
737 }
738
739 #[cfg(not(feature = "unlimited_depth"))]
740 #[test]
741 fn test_deeply_nested() {
742 let n = 4000;
743 let mut s = String::with_capacity(n * 2);
744 for _ in 0 .. n {
745 s.push('[')
746 }
747 for _ in 0 .. n {
748 s.push(']')
749 }
750 let res = crate::parser::from_str(s.as_str());
751 assert!(res.is_err());
752 assert!(res.unwrap_err().message.contains("max depth"))
753 }
754
755 #[test]
756 fn test_foo() {
757 let res = from_str("{}").unwrap();
758 let expected = JSONText{context: Some(JSONTextContext{wsc: (String::new(), String::new())}), value: JSONValue::JSONObject {key_value_pairs: vec![], context: Some(JSONObjectContext{wsc: (String::new(),)})}};
759 assert_eq!(res.value, expected.value)
760 }
761
762 #[test]
763 fn test_illegal_identifier_escape() {
764 let text = r#"{ \u0031foo: 123 }"#;
765 from_str(text).unwrap_err();
766 }
767
768 #[test]
769 fn test_leading_comma_array() {
770 let sample = r#"[
771 ,null
772]"#;
773 let maybe_tokens = Tokenizer::new(sample).tokenize();
774 if maybe_tokens.is_err() {
775 return
776 } else {
777 let toks = maybe_tokens.unwrap();
778 let res = from_tokens(&toks);
779 assert!(res.is_err());
780 }
781 }
782
783
784 #[test]
785 fn test_lone_trailing_comma_array() {
786 let sample = r#"[
787 ,
788]"#;
789 let maybe_tokens = Tokenizer::new(sample).tokenize();
790 if maybe_tokens.is_err() {
791 return
792 } else {
793 let toks = maybe_tokens.unwrap();
794 let res = from_tokens(&toks);
795 assert!(res.is_err());
796 }
797 }
798
799
800 #[test]
801 fn test_no_comma_array() {
802 let sample = r#"[
803 true
804 false
805]"#;
806 let maybe_tokens = Tokenizer::new(sample).tokenize();
807 if maybe_tokens.is_err() {
808 return
809 } else {
810 let toks = maybe_tokens.unwrap();
811 let res = from_tokens(&toks);
812 assert!(res.is_err());
813 }
814 }
815
816
817 #[test]
818 fn test_regular_array() {
819 let sample = r#"[
820 true,
821 false,
822 null
823]"#;
824 let _res = from_str(sample).unwrap();
825 }
826
827
828
829 #[test]
830 fn test_trailing_comma_array() {
831 let sample = r#"[
832 null,
833]"#;
834 let _res = from_str(sample).unwrap();
835 }
836
837
838
839 #[test]
840 fn test_block_comment_following_array_element() {
841 let sample = r#"[
842 false
843 /*
844 true
845 */
846]"#;
847 let _res = from_str(sample).unwrap();
848 }
849
850
851
852 #[test]
853 fn test_block_comment_following_top_level_value() {
854 let sample = r#"null
855/*
856 Some non-comment top-level value is needed;
857 we use null above.
858*/"#;
859 let _res = from_str(sample).unwrap();
860 }
861
862
863
864 #[test]
865 fn test_block_comment_in_string() {
866 let sample = r#""This /* block comment */ isn't really a block comment.""#;
867 let _res = from_str(sample).unwrap();
868 }
869
870
871
872 #[test]
873 fn test_block_comment_preceding_top_level_value() {
874 let sample = r#"/*
875 Some non-comment top-level value is needed;
876 we use null below.
877*/
878null"#;
879 let _res = from_str(sample).unwrap();
880 }
881
882
883
884 #[test]
885 fn test_block_comment_with_asterisks() {
886 let sample = r#"/**
887 * This is a JavaDoc-like block comment.
888 * It contains asterisks inside of it.
889 * It might also be closed with multiple asterisks.
890 * Like this:
891 **/
892true"#;
893 let _res = from_str(sample).unwrap();
894 }
895
896
897
898 #[test]
899 fn test_inline_comment_following_array_element() {
900 let sample = r#"[
901 false // true
902]"#;
903 let _res = from_str(sample).unwrap();
904 }
905
906
907
908 #[test]
909 fn test_inline_comment_following_top_level_value() {
910 let sample = r#"null // Some non-comment top-level value is needed; we use null here."#;
911 let _res = from_str(sample).unwrap();
912 }
913
914
915
916 #[test]
917 fn test_inline_comment_in_string() {
918 let sample = r#""This inline comment // isn't really an inline comment.""#;
919 let _res = from_str(sample).unwrap();
920 }
921
922
923
924 #[test]
925 fn test_inline_comment_preceding_top_level_value() {
926 let sample = r#"// Some non-comment top-level value is needed; we use null below.
927null"#;
928 let _res = from_str(sample).unwrap();
929 }
930
931
932
933 #[test]
934 fn test_top_level_block_comment() {
935 let sample = r#"/*
936 This should fail;
937 comments cannot be the only top-level value.
938*/"#;
939 let maybe_tokens = Tokenizer::new(sample).tokenize();
940 if maybe_tokens.is_err() {
941 return
942 } else {
943 let toks = maybe_tokens.unwrap();
944 let res = from_tokens(&toks);
945 assert!(res.is_err());
946 }
947 }
948
949
950 #[test]
951 fn test_top_level_inline_comment() {
952 let sample = r#"// This should fail; comments cannot be the only top-level value."#;
953 let maybe_tokens = Tokenizer::new(sample).tokenize();
954 if maybe_tokens.is_err() {
955 return
956 } else {
957 let toks = maybe_tokens.unwrap();
958 let res = from_tokens(&toks);
959 assert!(res.is_err());
960 }
961 }
962
963
964 #[test]
965 fn test_unterminated_block_comment() {
966 let sample = r#"true
967/*
968 This block comment doesn't terminate.
969 There was a legitimate value before this,
970 but this is still invalid JS/JSON5.
971"#;
972 let maybe_tokens = Tokenizer::new(sample).tokenize();
973 if maybe_tokens.is_err() {
974 return
975 } else {
976 let toks = maybe_tokens.unwrap();
977 let res = from_tokens(&toks);
978 assert!(res.is_err());
979 }
980 }
981
982
983 #[test]
984 fn test_empty() {
985 let sample = r#""#;
986 let maybe_tokens = Tokenizer::new(sample).tokenize();
987 if maybe_tokens.is_err() {
988 return
989 } else {
990 let toks = maybe_tokens.unwrap();
991 let res = from_tokens(&toks);
992 assert!(res.is_err());
993 }
994 }
995
996
997 #[test]
998 fn test_npm_package() {
999 let sample = r#"{
1000 "name": "npm",
1001 "publishConfig": {
1002 "proprietary-attribs": false
1003 },
1004 "description": "A package manager for node",
1005 "keywords": [
1006 "package manager",
1007 "modules",
1008 "install",
1009 "package.json"
1010 ],
1011 "version": "1.1.22",
1012 "preferGlobal": true,
1013 "config": {
1014 "publishtest": false
1015 },
1016 "homepage": "http://npmjs.org/",
1017 "author": "Isaac Z. Schlueter <i@izs.me> (http://blog.izs.me)",
1018 "repository": {
1019 "type": "git",
1020 "url": "https://github.com/isaacs/npm"
1021 },
1022 "bugs": {
1023 "email": "npm-@googlegroups.com",
1024 "url": "http://github.com/isaacs/npm/issues"
1025 },
1026 "directories": {
1027 "doc": "./doc",
1028 "man": "./man",
1029 "lib": "./lib",
1030 "bin": "./bin"
1031 },
1032 "main": "./lib/npm.js",
1033 "bin": "./bin/npm-cli.js",
1034 "dependencies": {
1035 "semver": "~1.0.14",
1036 "ini": "1",
1037 "slide": "1",
1038 "abbrev": "1",
1039 "graceful-fs": "~1.1.1",
1040 "minimatch": "~0.2",
1041 "nopt": "1",
1042 "node-uuid": "~1.3",
1043 "proto-list": "1",
1044 "rimraf": "2",
1045 "request": "~2.9",
1046 "which": "1",
1047 "tar": "~0.1.12",
1048 "fstream": "~0.1.17",
1049 "block-stream": "*",
1050 "inherits": "1",
1051 "mkdirp": "0.3",
1052 "read": "0",
1053 "lru-cache": "1",
1054 "node-gyp": "~0.4.1",
1055 "fstream-npm": "0 >=0.0.5",
1056 "uid-number": "0",
1057 "archy": "0",
1058 "chownr": "0"
1059 },
1060 "bundleDependencies": [
1061 "slide",
1062 "ini",
1063 "semver",
1064 "abbrev",
1065 "graceful-fs",
1066 "minimatch",
1067 "nopt",
1068 "node-uuid",
1069 "rimraf",
1070 "request",
1071 "proto-list",
1072 "which",
1073 "tar",
1074 "fstream",
1075 "block-stream",
1076 "inherits",
1077 "mkdirp",
1078 "read",
1079 "lru-cache",
1080 "node-gyp",
1081 "fstream-npm",
1082 "uid-number",
1083 "archy",
1084 "chownr"
1085 ],
1086 "devDependencies": {
1087 "ronn": "https://github.com/isaacs/ronnjs/tarball/master"
1088 },
1089 "engines": {
1090 "node": "0.6 || 0.7 || 0.8",
1091 "npm": "1"
1092 },
1093 "scripts": {
1094 "test": "node ./test/run.js",
1095 "prepublish": "npm prune; rm -rf node_modules/*/{test,example,bench}*; make -j4 doc",
1096 "dumpconf": "env | grep npm | sort | uniq"
1097 },
1098 "licenses": [
1099 {
1100 "type": "MIT +no-false-attribs",
1101 "url": "http://github.com/isaacs/npm/raw/master/LICENSE"
1102 }
1103 ]
1104}
1105"#;
1106 let _res = from_str(sample).unwrap();
1107 }
1108
1109
1110
1111 #[test]
1112 fn test_npm_package2() {
1113 let sample = r#"{
1114 name: 'npm',
1115 publishConfig: {
1116 'proprietary-attribs': false,
1117 },
1118 description: 'A package manager for node',
1119 keywords: [
1120 'package manager',
1121 'modules',
1122 'install',
1123 'package.json',
1124 ],
1125 version: '1.1.22',
1126 preferGlobal: true,
1127 config: {
1128 publishtest: false,
1129 },
1130 homepage: 'http://npmjs.org/',
1131 author: 'Isaac Z. Schlueter <i@izs.me> (http://blog.izs.me)',
1132 repository: {
1133 type: 'git',
1134 url: 'https://github.com/isaacs/npm',
1135 },
1136 bugs: {
1137 email: 'npm-@googlegroups.com',
1138 url: 'http://github.com/isaacs/npm/issues',
1139 },
1140 directories: {
1141 doc: './doc',
1142 man: './man',
1143 lib: './lib',
1144 bin: './bin',
1145 },
1146 main: './lib/npm.js',
1147 bin: './bin/npm-cli.js',
1148 dependencies: {
1149 semver: '~1.0.14',
1150 ini: '1',
1151 slide: '1',
1152 abbrev: '1',
1153 'graceful-fs': '~1.1.1',
1154 minimatch: '~0.2',
1155 nopt: '1',
1156 'node-uuid': '~1.3',
1157 'proto-list': '1',
1158 rimraf: '2',
1159 request: '~2.9',
1160 which: '1',
1161 tar: '~0.1.12',
1162 fstream: '~0.1.17',
1163 'block-stream': '*',
1164 inherits: '1',
1165 mkdirp: '0.3',
1166 read: '0',
1167 'lru-cache': '1',
1168 'node-gyp': '~0.4.1',
1169 'fstream-npm': '0 >=0.0.5',
1170 'uid-number': '0',
1171 archy: '0',
1172 chownr: '0',
1173 },
1174 bundleDependencies: [
1175 'slide',
1176 'ini',
1177 'semver',
1178 'abbrev',
1179 'graceful-fs',
1180 'minimatch',
1181 'nopt',
1182 'node-uuid',
1183 'rimraf',
1184 'request',
1185 'proto-list',
1186 'which',
1187 'tar',
1188 'fstream',
1189 'block-stream',
1190 'inherits',
1191 'mkdirp',
1192 'read',
1193 'lru-cache',
1194 'node-gyp',
1195 'fstream-npm',
1196 'uid-number',
1197 'archy',
1198 'chownr',
1199 ],
1200 devDependencies: {
1201 ronn: 'https://github.com/isaacs/ronnjs/tarball/master',
1202 },
1203 engines: {
1204 node: '0.6 || 0.7 || 0.8',
1205 npm: '1',
1206 },
1207 scripts: {
1208 test: 'node ./test/run.js',
1209 prepublish: 'npm prune; rm -rf node_modules/*/{test,example,bench}*; make -j4 doc',
1210 dumpconf: 'env | grep npm | sort | uniq',
1211 },
1212 licenses: [
1213 {
1214 type: 'MIT +no-false-attribs',
1215 url: 'http://github.com/isaacs/npm/raw/master/LICENSE',
1216 },
1217 ],
1218}
1219"#;
1220 let _res = from_str(sample).unwrap();
1221 }
1222
1223
1224
1225 #[test]
1226 fn test_readme_example() {
1227 let sample = r#"{
1228 foo: 'bar',
1229 while: true,
1230
1231 this: 'is a \
1232multi-line string',
1233
1234 // this is an inline comment
1235 here: 'is another', // inline comment
1236
1237 /* this is a block comment
1238 that continues on another line */
1239
1240 hex: 0xDEADbeef,
1241 half: .5,
1242 delta: +10,
1243 to: Infinity, // and beyond!
1244
1245 finally: 'a trailing comma',
1246 oh: [
1247 "we shouldn't forget",
1248 'arrays can have',
1249 'trailing commas too',
1250 ],
1251}
1252"#;
1253 let _res = from_str(sample).unwrap();
1254 }
1255
1256
1257
1258 #[test]
1259 fn test_valid_whitespace() {
1260 let sample = r#"{
1261 // An invalid form feed character (\x0c) has been entered before this comment.
1262 // Be careful not to delete it.
1263 "a": true
1264}
1265"#;
1266 let _res = from_str(sample).unwrap();
1267 }
1268
1269
1270
1271 #[test]
1272 fn test_comment_cr() {
1273 let sample = r#"{
1274 // This comment is terminated with `\r`.
1275}
1276"#;
1277 let _res = from_str(sample).unwrap();
1278 }
1279
1280
1281
1282 #[test]
1283 fn test_comment_crlf() {
1284 let sample = r#"{
1285 // This comment is terminated with `\r\n`.
1286}
1287"#;
1288 let _res = from_str(sample).unwrap();
1289 }
1290
1291
1292
1293 #[test]
1294 fn test_comment_lf() {
1295 let sample = r#"{
1296 // This comment is terminated with `\n`.
1297}
1298"#;
1299 let _res = from_str(sample).unwrap();
1300 }
1301
1302
1303
1304 #[test]
1305 fn test_escaped_cr() {
1306 let sample = r#"{
1307 // the following string contains an escaped `\r`
1308 a: 'line 1 \
1309line 2'
1310}
1311"#;
1312 let _res = from_str(sample).unwrap();
1313 }
1314
1315
1316
1317 #[test]
1318 fn test_escaped_crlf() {
1319 let sample = r#"{
1320 // the following string contains an escaped `\r\n`
1321 a: 'line 1 \
1322line 2'
1323}
1324"#;
1325 let _res = from_str(sample).unwrap();
1326 }
1327
1328
1329
1330 #[test]
1331 fn test_escaped_lf() {
1332 let sample = r#"{
1333 // the following string contains an escaped `\n`
1334 a: 'line 1 \
1335line 2'
1336}
1337"#;
1338 let _res = from_str(sample).unwrap();
1339 }
1340
1341
1342
1343 #[test]
1344 fn test_float_leading_decimal_point() {
1345 let sample = r#".5
1346"#;
1347 let _res = from_str(sample).unwrap();
1348 }
1349
1350
1351
1352 #[test]
1353 fn test_float_leading_zero() {
1354 let sample = r#"0.5
1355"#;
1356 let _res = from_str(sample).unwrap();
1357 }
1358
1359
1360
1361 #[test]
1362 fn test_float_trailing_decimal_point_with_integer_exponent() {
1363 let sample = r#"5.e4
1364"#;
1365 let _res = from_str(sample).unwrap();
1366 }
1367
1368
1369
1370 #[test]
1371 fn test_float_trailing_decimal_point() {
1372 let sample = r#"5.
1373"#;
1374 let _res = from_str(sample).unwrap();
1375 }
1376
1377
1378
1379 #[test]
1380 fn test_float_with_integer_exponent() {
1381 let sample = r#"1.2e3
1382"#;
1383 let _res = from_str(sample).unwrap();
1384 }
1385
1386
1387
1388 #[test]
1389 fn test_float() {
1390 let sample = r#"1.2
1391"#;
1392 let _res = from_str(sample).unwrap();
1393 }
1394
1395
1396
1397 #[test]
1398 fn test_hexadecimal_empty() {
1399 let sample = r#"0x
1400"#;
1401 let maybe_tokens = Tokenizer::new(sample).tokenize();
1402 if maybe_tokens.is_err() {
1403 return
1404 } else {
1405 let toks = maybe_tokens.unwrap();
1406 let res = from_tokens(&toks);
1407 assert!(res.is_err());
1408 }
1409 }
1410
1411
1412 #[test]
1413 fn test_hexadecimal_lowercase_letter() {
1414 let sample = r#"0xc8
1415"#;
1416 let _res = from_str(sample).unwrap();
1417 }
1418
1419
1420
1421 #[test]
1422 fn test_hexadecimal_uppercase_x() {
1423 let sample = r#"0XC8
1424"#;
1425 let _res = from_str(sample).unwrap();
1426 }
1427
1428
1429
1430 #[test]
1431 fn test_hexadecimal_with_integer_exponent() {
1432 let sample = r#"0xc8e4
1433"#;
1434 let _res = from_str(sample).unwrap();
1435 }
1436
1437
1438
1439 #[test]
1440 fn test_hexadecimal() {
1441 let sample = r#"0xC8
1442"#;
1443 let _res = from_str(sample).unwrap();
1444 }
1445
1446
1447
1448 #[test]
1449 fn test_infinity() {
1450 let sample = r#"Infinity
1451"#;
1452 let _res = from_str(sample).unwrap();
1453 }
1454
1455
1456
1457 #[test]
1458 fn test_integer_with_float_exponent() {
1459 let sample = r#"1e2.3
1460"#;
1461 let maybe_tokens = Tokenizer::new(sample).tokenize();
1462 if maybe_tokens.is_err() {
1463 return
1464 } else {
1465 let toks = maybe_tokens.unwrap();
1466 let res = from_tokens(&toks);
1467 assert!(res.is_err(), "{:?}", res.unwrap());
1468 }
1469 }
1470
1471
1472 #[test]
1473 fn test_integer_with_hexadecimal_exponent() {
1474 let sample = r#"1e0x4
1475"#;
1476 let maybe_tokens = Tokenizer::new(sample).tokenize();
1477 if maybe_tokens.is_err() {
1478 return
1479 } else {
1480 let toks = maybe_tokens.unwrap();
1481 let res = from_tokens(&toks);
1482 assert!(res.is_err());
1483 }
1484 }
1485
1486
1487 #[test]
1488 fn test_integer_with_integer_exponent() {
1489 let sample = r#"2e23
1490"#;
1491 let _res = from_str(sample).unwrap();
1492 }
1493
1494
1495
1496 #[test]
1497 fn test_integer_with_negative_float_exponent() {
1498 let sample = r#"1e-2.3
1499"#;
1500 let maybe_tokens = Tokenizer::new(sample).tokenize();
1501 if maybe_tokens.is_err() {
1502 return
1503 } else {
1504 let toks = maybe_tokens.unwrap();
1505 let res = from_tokens(&toks);
1506 assert!(res.is_err());
1507 }
1508 }
1509
1510
1511 #[test]
1512 fn test_integer_with_negative_hexadecimal_exponent() {
1513 let sample = r#"1e-0x4
1514"#;
1515 let maybe_tokens = Tokenizer::new(sample).tokenize();
1516 if maybe_tokens.is_err() {
1517 return
1518 } else {
1519 let toks = maybe_tokens.unwrap();
1520 let res = from_tokens(&toks);
1521 assert!(res.is_err(), "{:?}", res.unwrap());
1522 }
1523 }
1524
1525
1526 #[test]
1527 fn test_integer_with_negative_integer_exponent() {
1528 let sample = r#"2e-23
1529"#;
1530 let _res = from_str(sample).unwrap();
1531 }
1532
1533
1534
1535 #[test]
1536 fn test_integer_with_negative_zero_integer_exponent() {
1537 let sample = r#"5e-0
1538"#;
1539 let _res = from_str(sample).unwrap();
1540 }
1541
1542
1543
1544 #[test]
1545 fn test_integer_with_positive_float_exponent() {
1546 let sample = r#"1e+2.3
1547"#;
1548 let maybe_tokens = Tokenizer::new(sample).tokenize();
1549 if maybe_tokens.is_err() {
1550 return
1551 } else {
1552 let toks = maybe_tokens.unwrap();
1553 let res = from_tokens(&toks);
1554 assert!(res.is_err());
1555 }
1556 }
1557
1558
1559 #[test]
1560 fn test_integer_with_positive_hexadecimal_exponent() {
1561 let sample = r#"1e+0x4
1562"#;
1563 let maybe_tokens = Tokenizer::new(sample).tokenize();
1564 if maybe_tokens.is_err() {
1565 return
1566 } else {
1567 let toks = maybe_tokens.unwrap();
1568 let res = from_tokens(&toks);
1569 assert!(res.is_err());
1570 }
1571 }
1572
1573
1574 #[test]
1575 fn test_integer_with_positive_integer_exponent() {
1576 let sample = r#"1e+2
1577"#;
1578 let _res = from_str(sample).unwrap();
1579 }
1580
1581
1582
1583 #[test]
1584 fn test_integer_with_positive_zero_integer_exponent() {
1585 let sample = r#"5e+0
1586"#;
1587 let _res = from_str(sample).unwrap();
1588 }
1589
1590
1591
1592 #[test]
1593 fn test_integer_with_zero_integer_exponent() {
1594 let sample = r#"5e0
1595"#;
1596 let _res = from_str(sample).unwrap();
1597 }
1598
1599
1600
1601 #[test]
1602 fn test_integer() {
1603 let sample = r#"15
1604"#;
1605 let _res = from_str(sample).unwrap();
1606 }
1607
1608
1609
1610 #[test]
1611 fn test_lone_decimal_point() {
1612 let sample = r#".
1613"#;
1614 let maybe_tokens = Tokenizer::new(sample).tokenize();
1615 if maybe_tokens.is_err() {
1616 return
1617 } else {
1618 let toks = maybe_tokens.unwrap();
1619 let res = from_tokens(&toks);
1620 assert!(res.is_err(), "{:?}", res.unwrap());
1621 }
1622 }
1623
1624
1625 #[test]
1626 fn test_nan() {
1627 let sample = r#"NaN
1628"#;
1629 let _res = from_str(sample).unwrap();
1630 }
1631
1632
1633
1634 #[test]
1635 fn test_negative_float_leading_decimal_point() {
1636 let sample = r#"-.5
1637"#;
1638 let _res = from_str(sample).unwrap();
1639 }
1640
1641
1642
1643 #[test]
1644 fn test_negative_float_leading_zero() {
1645 let sample = r#"-0.5
1646"#;
1647 let _res = from_str(sample).unwrap();
1648 }
1649
1650
1651
1652 #[test]
1653 fn test_negative_float_trailing_decimal_point() {
1654 let sample = r#"-5.
1655"#;
1656 let _res = from_str(sample).unwrap();
1657 }
1658
1659
1660
1661 #[test]
1662 fn test_negative_float() {
1663 let sample = r#"-1.2
1664"#;
1665 let _res = from_str(sample).unwrap();
1666 }
1667
1668
1669
1670 #[test]
1671 fn test_negative_hexadecimal() {
1672 let sample = r#"-0xC8
1673"#;
1674 let _res = from_str(sample).unwrap();
1675 }
1676
1677
1678
1679 #[test]
1680 fn test_negative_infinity() {
1681 let sample = r#"-Infinity
1682"#;
1683 let _res = from_str(sample).unwrap();
1684 }
1685
1686
1687
1688 #[test]
1689 fn test_negative_integer() {
1690 let sample = r#"-15
1691"#;
1692 let _res = from_str(sample).unwrap();
1693 }
1694
1695
1696
1697 #[test]
1698 fn test_negative_noctal() {
1699 let sample = r#"-098
1700"#;
1701 let maybe_tokens = Tokenizer::new(sample).tokenize();
1702 if maybe_tokens.is_err() {
1703 return
1704 } else {
1705 let toks = maybe_tokens.unwrap();
1706 let res = from_tokens(&toks);
1707 assert!(res.is_err());
1708 }
1709 }
1710
1711
1712 #[test]
1713 fn test_negative_octal() {
1714 let sample = r#"-0123
1715"#;
1716 let maybe_tokens = Tokenizer::new(sample).tokenize();
1717 if maybe_tokens.is_err() {
1718 return
1719 } else {
1720 let toks = maybe_tokens.unwrap();
1721 let res = from_tokens(&toks);
1722 assert!(res.is_err());
1723 }
1724 }
1725
1726
1727 #[test]
1728 fn test_negative_zero_float_leading_decimal_point() {
1729 let sample = r#"-.0
1730"#;
1731 let _res = from_str(sample).unwrap();
1732 }
1733
1734
1735
1736 #[test]
1737 fn test_negative_zero_float_trailing_decimal_point() {
1738 let sample = r#"-0.
1739"#;
1740 let _res = from_str(sample).unwrap();
1741 }
1742
1743
1744
1745 #[test]
1746 fn test_negative_zero_float() {
1747 let sample = r#"-0.0
1748"#;
1749 let _res = from_str(sample).unwrap();
1750 }
1751
1752
1753
1754 #[test]
1755 fn test_negative_zero_hexadecimal() {
1756 let sample = r#"-0x0
1757"#;
1758 let _res = from_str(sample).unwrap();
1759 }
1760
1761
1762
1763 #[test]
1764 fn test_negative_zero_integer() {
1765 let sample = r#"-0
1766"#;
1767 let _res = from_str(sample).unwrap();
1768 }
1769
1770
1771
1772 #[test]
1773 fn test_negative_zero_octal() {
1774 let sample = r#"-00
1775"#;
1776 let maybe_tokens = Tokenizer::new(sample).tokenize();
1777 if maybe_tokens.is_err() {
1778 return
1779 } else {
1780 let toks = maybe_tokens.unwrap();
1781 let res = from_tokens(&toks);
1782 assert!(res.is_err());
1783 }
1784 }
1785
1786
1787 #[test]
1788 fn test_noctal_with_leading_octal_digit() {
1789 let sample = r#"0780
1790"#;
1791 let maybe_tokens = Tokenizer::new(sample).tokenize();
1792 if maybe_tokens.is_err() {
1793 return
1794 } else {
1795 let toks = maybe_tokens.unwrap();
1796 let res = from_tokens(&toks);
1797 assert!(res.is_err());
1798 }
1799 }
1800
1801
1802 #[test]
1803 fn test_noctal() {
1804 let sample = r#"080
1805"#;
1806 let maybe_tokens = Tokenizer::new(sample).tokenize();
1807 if maybe_tokens.is_err() {
1808 return
1809 } else {
1810 let toks = maybe_tokens.unwrap();
1811 let res = from_tokens(&toks);
1812 assert!(res.is_err());
1813 }
1814 }
1815
1816
1817 #[test]
1818 fn test_octal() {
1819 let sample = r#"010
1820"#;
1821 let maybe_tokens = Tokenizer::new(sample).tokenize();
1822 if maybe_tokens.is_err() {
1823 return
1824 } else {
1825 let toks = maybe_tokens.unwrap();
1826 let res = from_tokens(&toks);
1827 assert!(res.is_err());
1828 }
1829 }
1830
1831
1832 #[test]
1833 fn test_positive_float_leading_decimal_point() {
1834 let sample = r#"+.5
1835"#;
1836 let _res = from_str(sample).unwrap();
1837 }
1838
1839
1840
1841 #[test]
1842 fn test_positive_float_leading_zero() {
1843 let sample = r#"+0.5
1844"#;
1845 let _res = from_str(sample).unwrap();
1846 }
1847
1848
1849
1850 #[test]
1851 fn test_positive_float_trailing_decimal_point() {
1852 let sample = r#"+5.
1853"#;
1854 let _res = from_str(sample).unwrap();
1855 }
1856
1857
1858
1859 #[test]
1860 fn test_positive_float() {
1861 let sample = r#"+1.2
1862"#;
1863 let _res = from_str(sample).unwrap();
1864 }
1865
1866
1867
1868 #[test]
1869 fn test_positive_hexadecimal() {
1870 let sample = r#"+0xC8
1871"#;
1872 let _res = from_str(sample).unwrap();
1873 }
1874
1875
1876
1877 #[test]
1878 fn test_positive_infinity() {
1879 let sample = r#"+Infinity
1880"#;
1881 let _res = from_str(sample).unwrap();
1882 }
1883
1884
1885
1886 #[test]
1887 fn test_positive_integer() {
1888 let sample = r#"+15
1889"#;
1890 let _res = from_str(sample).unwrap();
1891 }
1892
1893
1894
1895 #[test]
1896 fn test_positive_noctal() {
1897 let sample = r#"+098
1898"#;
1899 let maybe_tokens = Tokenizer::new(sample).tokenize();
1900 if maybe_tokens.is_err() {
1901 return
1902 } else {
1903 let toks = maybe_tokens.unwrap();
1904 let res = from_tokens(&toks);
1905 assert!(res.is_err());
1906 }
1907 }
1908
1909
1910 #[test]
1911 fn test_positive_octal() {
1912 let sample = r#"+0123
1913"#;
1914 let maybe_tokens = Tokenizer::new(sample).tokenize();
1915 if maybe_tokens.is_err() {
1916 return
1917 } else {
1918 let toks = maybe_tokens.unwrap();
1919 let res = from_tokens(&toks);
1920 assert!(res.is_err());
1921 }
1922 }
1923
1924
1925 #[test]
1926 fn test_positive_zero_float_leading_decimal_point() {
1927 let sample = r#"+.0
1928"#;
1929 let _res = from_str(sample).unwrap();
1930 }
1931
1932
1933
1934 #[test]
1935 fn test_positive_zero_float_trailing_decimal_point() {
1936 let sample = r#"+0.
1937"#;
1938 let _res = from_str(sample).unwrap();
1939 }
1940
1941
1942
1943 #[test]
1944 fn test_positive_zero_float() {
1945 let sample = r#"+0.0
1946"#;
1947 let _res = from_str(sample).unwrap();
1948 }
1949
1950
1951
1952 #[test]
1953 fn test_positive_zero_hexadecimal() {
1954 let sample = r#"+0x0
1955"#;
1956 let _res = from_str(sample).unwrap();
1957 }
1958
1959
1960
1961 #[test]
1962 fn test_positive_zero_integer() {
1963 let sample = r#"+0
1964"#;
1965 let _res = from_str(sample).unwrap();
1966 }
1967
1968
1969
1970 #[test]
1971 fn test_positive_zero_octal() {
1972 let sample = r#"+00
1973"#;
1974 let maybe_tokens = Tokenizer::new(sample).tokenize();
1975 if maybe_tokens.is_err() {
1976 return
1977 } else {
1978 let toks = maybe_tokens.unwrap();
1979 let res = from_tokens(&toks);
1980 assert!(res.is_err());
1981 }
1982 }
1983
1984
1985 #[test]
1986 fn test_zero_float_leading_decimal_point() {
1987 let sample = r#".0
1988"#;
1989 let _res = from_str(sample).unwrap();
1990 }
1991
1992
1993
1994 #[test]
1995 fn test_zero_float_trailing_decimal_point() {
1996 let sample = r#"0.
1997"#;
1998 let _res = from_str(sample).unwrap();
1999 }
2000
2001
2002
2003 #[test]
2004 fn test_zero_float() {
2005 let sample = r#"0.0
2006"#;
2007 let _res = from_str(sample).unwrap();
2008 }
2009
2010
2011
2012 #[test]
2013 fn test_zero_hexadecimal() {
2014 let sample = r#"0x0
2015"#;
2016 let _res = from_str(sample).unwrap();
2017 }
2018
2019
2020
2021 #[test]
2022 fn test_zero_integer_with_integer_exponent() {
2023 let sample = r#"0e23
2024"#;
2025 let _res = from_str(sample).unwrap();
2026 }
2027
2028
2029
2030 #[test]
2031 fn test_zero_integer() {
2032 let sample = r#"0
2033"#;
2034 let _res = from_str(sample).unwrap();
2035 }
2036
2037
2038
2039 #[test]
2040 fn test_zero_octal() {
2041 let sample = r#"00
2042"#;
2043 let maybe_tokens = Tokenizer::new(sample).tokenize();
2044 if maybe_tokens.is_err() {
2045 return
2046 } else {
2047 let toks = maybe_tokens.unwrap();
2048 let res = from_tokens(&toks);
2049 assert!(res.is_err());
2050 }
2051 }
2052
2053
2054 #[test]
2055 fn test_duplicate_keys() {
2056 let sample = r#"{
2057 "a": true,
2058 "a": false
2059}
2060"#;
2061 let _res = from_str(sample).unwrap();
2062 }
2063
2064
2065
2066 #[test]
2067 fn test_empty_object() {
2068 let sample = r#"{}"#;
2069 let _res = from_str(sample).unwrap();
2070 }
2071
2072
2073
2074 #[test]
2075 fn test_illegal_unquoted_key_number() {
2076 let sample = r#"{
2077 10twenty: "ten twenty"
2078}"#;
2079 let maybe_tokens = Tokenizer::new(sample).tokenize();
2080 if maybe_tokens.is_err() {
2081 return
2082 } else {
2083 let toks = maybe_tokens.unwrap();
2084 let res = from_tokens(&toks);
2085 assert!(res.is_err());
2086 }
2087 }
2088
2089
2090 #[test]
2091 fn test_illegal_unquoted_key_symbol() {
2092 let sample = r#"{
2093 multi-word: "multi-word"
2094}"#;
2095 let maybe_tokens = Tokenizer::new(sample).tokenize();
2096 if maybe_tokens.is_err() {
2097 return
2098 } else {
2099 let toks = maybe_tokens.unwrap();
2100 let res = from_tokens(&toks);
2101 assert!(res.is_err());
2102 }
2103 }
2104
2105
2106 #[test]
2107 fn test_leading_comma_object() {
2108 let sample = r#"{
2109 ,"foo": "bar"
2110}"#;
2111 let maybe_tokens = Tokenizer::new(sample).tokenize();
2112 if maybe_tokens.is_err() {
2113 return
2114 } else {
2115 let toks = maybe_tokens.unwrap();
2116 let res = from_tokens(&toks);
2117 assert!(res.is_err());
2118 }
2119 }
2120
2121
2122 #[test]
2123 fn test_lone_trailing_comma_object() {
2124 let sample = r#"{
2125 ,
2126}"#;
2127 let maybe_tokens = Tokenizer::new(sample).tokenize();
2128 if maybe_tokens.is_err() {
2129 return
2130 } else {
2131 let toks = maybe_tokens.unwrap();
2132 let res = from_tokens(&toks);
2133 assert!(res.is_err());
2134 }
2135 }
2136
2137
2138 #[test]
2139 fn test_no_comma_object() {
2140 let sample = r#"{
2141 "foo": "bar"
2142 "hello": "world"
2143}"#;
2144 let maybe_tokens = Tokenizer::new(sample).tokenize();
2145 if maybe_tokens.is_err() {
2146 return
2147 } else {
2148 let toks = maybe_tokens.unwrap();
2149 let res = from_tokens(&toks);
2150 assert!(res.is_err());
2151 }
2152 }
2153
2154
2155 #[test]
2156 fn test_reserved_unquoted_key() {
2157 let sample = r#"{
2158 while: true
2159}"#;
2160 let _res = from_str(sample).unwrap();
2161 }
2162
2163
2164
2165 #[test]
2166 fn test_single_quoted_key() {
2167 let sample = r#"{
2168 'hello': "world"
2169}"#;
2170 let _res = from_str(sample).unwrap();
2171 }
2172
2173
2174
2175 #[test]
2176 fn test_trailing_comma_object() {
2177 let sample = r#"{
2178 "foo": "bar",
2179}"#;
2180 let _res = from_str(sample).unwrap();
2181 }
2182
2183
2184
2185 #[test]
2186 fn test_unquoted_keys() {
2187 let sample = r#"{
2188 hello: "world",
2189 _: "underscore",
2190 $: "dollar sign",
2191 one1: "numerals",
2192 _$_: "multiple symbols",
2193 $_$hello123world_$_: "mixed"
2194}"#;
2195 let _res = from_str(sample).unwrap();
2196 }
2197
2198
2199
2200 #[test]
2201 fn test_escaped_single_quoted_string() {
2202 let sample = r#"'I can\'t wait'"#;
2203 let _res = from_str(sample).unwrap();
2204 }
2205
2206
2207
2208 #[test]
2209 fn test_multi_line_string() {
2210 let sample = r#"'hello\
2211 world'"#;
2212 let _res = from_str(sample).unwrap();
2213 }
2214
2215
2216
2217 #[test]
2218 fn test_single_quoted_string() {
2219 let sample = r#"'hello world'"#;
2220 let _res = from_str(sample).unwrap();
2221 }
2222
2223
2224
2225 #[test]
2226 fn test_unescaped_multi_line_string() {
2227 let sample = r#""foo
2228bar"
2229"#;
2230 let maybe_tokens = Tokenizer::new(sample).tokenize();
2231 if maybe_tokens.is_err() {
2232 return
2233 } else {
2234 let toks = maybe_tokens.unwrap();
2235 let res = from_tokens(&toks);
2236 assert!(res.is_err());
2237 }
2238 }
2239 #[test]
2244 fn test_error_no_comma_array_lineno() {
2245 let sample = r#"[
2246 true
2247 false
2248]"#;
2249 let maybe_tokens = Tokenizer::new(sample).tokenize();
2250 if maybe_tokens.is_err() {
2251 let err = maybe_tokens.unwrap_err();
2252 assert_eq!(err.lineno, 3_usize, "{:?}", err);
2253 } else {
2254 let toks = maybe_tokens.unwrap();
2255 let res = from_tokens(&toks);
2256 let err = res.unwrap_err();
2257 assert_eq!(err.lineno, 3_usize, "{:?}", err);
2258 }
2259 }
2260
2261
2262 #[test]
2263 fn test_error_no_comma_array_index() {
2264 let sample = r#"[
2265 true
2266 false
2267]"#;
2268 let maybe_tokens = Tokenizer::new(sample).tokenize();
2269 if maybe_tokens.is_err() {
2270 let err = maybe_tokens.unwrap_err();
2271 assert_eq!(err.char_index, 15_usize, "{:?}", err)
2272 } else {
2273 let toks = maybe_tokens.unwrap();
2274 let res = from_tokens(&toks);
2275 let err = res.unwrap_err();
2276 assert_eq!(err.char_index, 15_usize, "{:?}", err);
2277 }
2278 }
2279
2280 #[test]
2281 fn test_error_no_comma_array_colno() {
2282 let sample = r#"[
2283 true
2284 false
2285]"#;
2286 let maybe_tokens = Tokenizer::new(sample).tokenize();
2287 if maybe_tokens.is_err() {
2288 let err = maybe_tokens.unwrap_err();
2289 assert_eq!(err.colno, 5_usize, "{:?}", err);
2290 } else {
2291 let toks = maybe_tokens.unwrap();
2292 let res = from_tokens(&toks);
2293 let err = res.unwrap_err();
2294 assert_eq!(err.colno, 5_usize, "{:?}", err);
2295 }
2296 }
2297
2298
2299 #[test]
2300 fn test_error_top_level_block_comment_lineno() {
2301 let sample = r#"/*
2302 This should fail;
2303 comments cannot be the only top-level value.
2304*/"#;
2305 let maybe_tokens = Tokenizer::new(sample).tokenize();
2306 if maybe_tokens.is_err() {
2307 let err = maybe_tokens.unwrap_err();
2308 assert_eq!(err.lineno, 4_usize, "{:?}", err);
2309 } else {
2310 let toks = maybe_tokens.unwrap();
2311 let res = from_tokens(&toks);
2312 let err = res.unwrap_err();
2313 assert_eq!(err.lineno, 4_usize, "{:?}", err);
2314 }
2315 }
2316
2317
2318 #[test]
2319 fn test_error_top_level_block_comment_index() {
2320 let sample = r#"/*
2321 This should fail;
2322 comments cannot be the only top-level value.
2323*/"#;
2324 let maybe_tokens = Tokenizer::new(sample).tokenize();
2325 if maybe_tokens.is_err() {
2326 let err = maybe_tokens.unwrap_err();
2327 assert_eq!(err.char_index, 76_usize, "{:?}", err)
2328 } else {
2329 let toks = maybe_tokens.unwrap();
2330 let res = from_tokens(&toks);
2331 let err = res.unwrap_err();
2332 assert_eq!(err.char_index, 76_usize, "{:?}", err);
2333 }
2334 }
2335
2336 #[test]
2337 fn test_error_top_level_block_comment_colno() {
2338 let sample = r#"/*
2339 This should fail;
2340 comments cannot be the only top-level value.
2341*/"#;
2342 let maybe_tokens = Tokenizer::new(sample).tokenize();
2343 if maybe_tokens.is_err() {
2344 let err = maybe_tokens.unwrap_err();
2345 assert_eq!(err.colno, 3_usize, "{:?}", err);
2346 } else {
2347 let toks = maybe_tokens.unwrap();
2348 let res = from_tokens(&toks);
2349 let err = res.unwrap_err();
2350 assert_eq!(err.colno, 3_usize, "{:?}", err);
2351 }
2352 }
2353
2354
2355
2356 #[test]
2357 fn test_error_top_level_inline_comment_lineno() {
2358 let sample = r#"// This should fail; comments cannot be the only top-level value."#;
2359 let maybe_tokens = Tokenizer::new(sample).tokenize();
2360 if maybe_tokens.is_err() {
2361 let err = maybe_tokens.unwrap_err();
2362 assert_eq!(err.lineno, 1_usize, "{:?}", err);
2363 } else {
2364 let toks = maybe_tokens.unwrap();
2365 let res = from_tokens(&toks);
2366 let err = res.unwrap_err();
2367 assert_eq!(err.lineno, 1_usize, "{:?}", err);
2368 }
2369 }
2370
2371
2372 #[test]
2373 fn test_error_top_level_inline_comment_index() {
2374 let sample = r#"// This should fail; comments cannot be the only top-level value."#;
2375 let maybe_tokens = Tokenizer::new(sample).tokenize();
2376 if maybe_tokens.is_err() {
2377 let err = maybe_tokens.unwrap_err();
2378 assert_eq!(err.char_index, 65_usize, "{:?}", err)
2379 } else {
2380 let toks = maybe_tokens.unwrap();
2381 let res = from_tokens(&toks);
2382 let err = res.unwrap_err();
2383 assert_eq!(err.char_index, 65_usize, "{:?}", err);
2384 }
2385 }
2386
2387 #[test]
2388 fn test_error_top_level_inline_comment_colno() {
2389 let sample = r#"// This should fail; comments cannot be the only top-level value."#;
2390 let maybe_tokens = Tokenizer::new(sample).tokenize();
2391 if maybe_tokens.is_err() {
2392 let err = maybe_tokens.unwrap_err();
2393 assert_eq!(err.colno, 66_usize, "{:?}", err);
2394 } else {
2395 let toks = maybe_tokens.unwrap();
2396 let res = from_tokens(&toks);
2397 let err = res.unwrap_err();
2398 assert_eq!(err.colno, 66_usize, "{:?}", err);
2399 }
2400 }
2401
2402 #[test]
2403 fn test_error_illegal_unquoted_key_number_lineno() {
2404 let sample = r#"{
2405 10twenty: "ten twenty"
2406}"#;
2407 let maybe_tokens = Tokenizer::new(sample).tokenize();
2408 if maybe_tokens.is_err() {
2409 let err = maybe_tokens.unwrap_err();
2410 assert_eq!(err.lineno, 2_usize, "{:?}", err);
2411 } else {
2412 let toks = maybe_tokens.unwrap();
2413 let res = from_tokens(&toks);
2414 let err = res.unwrap_err();
2415 assert_eq!(err.lineno, 2_usize, "{:?}", err);
2416 }
2417 }
2418
2419
2420 #[test]
2421 fn test_error_illegal_unquoted_key_number_index() {
2422 let sample = r#"{
2423 10twenty: "ten twenty"
2424}"#;
2425 let maybe_tokens = Tokenizer::new(sample).tokenize();
2426 if maybe_tokens.is_err() {
2427 let err = maybe_tokens.unwrap_err();
2428 assert_eq!(err.char_index, 6_usize, "{:?}", err)
2429 } else {
2430 let toks = maybe_tokens.unwrap();
2431 let res = from_tokens(&toks);
2432 let err = res.unwrap_err();
2433 assert_eq!(err.char_index, 6_usize, "{:?}", err);
2434 }
2435 }
2436
2437 #[test]
2438 fn test_error_illegal_unquoted_key_number_colno() {
2439 let sample = r#"{
2440 10twenty: "ten twenty"
2441}"#;
2442 let maybe_tokens = Tokenizer::new(sample).tokenize();
2443 if maybe_tokens.is_err() {
2444 let err = maybe_tokens.unwrap_err();
2445 assert_eq!(err.colno, 5_usize, "{:?}", err);
2446 } else {
2447 let toks = maybe_tokens.unwrap();
2448 let res = from_tokens(&toks);
2449 let err = res.unwrap_err();
2450 assert_eq!(err.colno, 5_usize, "{:?}", err);
2451 }
2452 }
2453
2454
2455
2456 #[test]
2457 fn test_error_illegal_unquoted_key_symbol_lineno() {
2458 let sample = r#"{
2459 multi-word: "multi-word"
2460}"#;
2461 let maybe_tokens = Tokenizer::new(sample).tokenize();
2462 if maybe_tokens.is_err() {
2463 let err = maybe_tokens.unwrap_err();
2464 assert_eq!(err.lineno, 2_usize, "{:?}", err);
2465 } else {
2466 let toks = maybe_tokens.unwrap();
2467 let res = from_tokens(&toks);
2468 let err = res.unwrap_err();
2469 assert_eq!(err.lineno, 2_usize, "{:?}", err);
2470 }
2471 }
2472
2473
2474 #[test]
2475 fn test_error_illegal_unquoted_key_symbol_index() {
2476 let sample = r#"{
2477 multi-word: "multi-word"
2478}"#;
2479 let maybe_tokens = Tokenizer::new(sample).tokenize();
2480 if maybe_tokens.is_err() {
2481 let err = maybe_tokens.unwrap_err();
2482 assert_eq!(err.char_index, 11_usize, "{:?}", err)
2483 } else {
2484 let toks = maybe_tokens.unwrap();
2485 let res = from_tokens(&toks);
2486 let err = res.unwrap_err();
2487 assert_eq!(err.char_index, 11_usize, "{:?}", err);
2488 }
2489 }
2490
2491 #[test]
2492 fn test_error_illegal_unquoted_key_symbol_colno() {
2493 let sample = r#"{
2494 multi-word: "multi-word"
2495}"#;
2496 let maybe_tokens = Tokenizer::new(sample).tokenize();
2497 if maybe_tokens.is_err() {
2498 let err = maybe_tokens.unwrap_err();
2499 assert_eq!(err.colno, 10_usize, "{:?}", err);
2500 } else {
2501 let toks = maybe_tokens.unwrap();
2502 let res = from_tokens(&toks);
2503 let err = res.unwrap_err();
2504 assert_eq!(err.colno, 10_usize, "{:?}", err);
2505 }
2506 }
2507
2508
2509
2510 #[test]
2511 fn test_error_leading_comma_object_lineno() {
2512 let sample = r#"{
2513 ,"foo": "bar"
2514}"#;
2515 let maybe_tokens = Tokenizer::new(sample).tokenize();
2516 if maybe_tokens.is_err() {
2517 let err = maybe_tokens.unwrap_err();
2518 assert_eq!(err.lineno, 2_usize, "{:?}", err);
2519 } else {
2520 let toks = maybe_tokens.unwrap();
2521 let res = from_tokens(&toks);
2522 let err = res.unwrap_err();
2523 assert_eq!(err.lineno, 2_usize, "{:?}", err);
2524 }
2525 }
2526
2527
2528 #[test]
2529 fn test_error_leading_comma_object_index() {
2530 let sample = r#"{
2531 ,"foo": "bar"
2532}"#;
2533 let maybe_tokens = Tokenizer::new(sample).tokenize();
2534 if maybe_tokens.is_err() {
2535 let err = maybe_tokens.unwrap_err();
2536 assert_eq!(err.char_index, 6_usize, "{:?}", err)
2537 } else {
2538 let toks = maybe_tokens.unwrap();
2539 let res = from_tokens(&toks);
2540 let err = res.unwrap_err();
2541 assert_eq!(err.char_index, 6_usize, "{:?}", err);
2542 }
2543 }
2544
2545 #[test]
2546 fn test_error_leading_comma_object_colno() {
2547 let sample = r#"{
2548 ,"foo": "bar"
2549}"#;
2550 let maybe_tokens = Tokenizer::new(sample).tokenize();
2551 if maybe_tokens.is_err() {
2552 let err = maybe_tokens.unwrap_err();
2553 assert_eq!(err.colno, 5_usize, "{:?}", err);
2554 } else {
2555 let toks = maybe_tokens.unwrap();
2556 let res = from_tokens(&toks);
2557 let err = res.unwrap_err();
2558 assert_eq!(err.colno, 5_usize, "{:?}", err);
2559 }
2560 }
2561
2562 #[test]
2563 fn test_error_unescaped_multi_line_string_lineno() {
2564 let sample = r#""foo
2565bar"
2566"#;
2567 let maybe_tokens = Tokenizer::new(sample).tokenize();
2568 if maybe_tokens.is_err() {
2569 let err = maybe_tokens.unwrap_err();
2570 assert_eq!(err.lineno, 1_usize, "{:?}", err);
2571 } else {
2572 let toks = maybe_tokens.unwrap();
2573 let res = from_tokens(&toks);
2574 let err = res.unwrap_err();
2575 assert_eq!(err.lineno, 1_usize, "{:?}", err);
2576 }
2577 }
2578
2579
2580 #[test]
2581 fn test_error_unescaped_multi_line_string_index() {
2582 let sample = r#""foo
2583bar"
2584"#;
2585 let maybe_tokens = Tokenizer::new(sample).tokenize();
2586 if maybe_tokens.is_err() {
2587 let err = maybe_tokens.unwrap_err();
2588 assert_eq!(err.char_index, 4_usize, "{:?}", err)
2589 } else {
2590 let toks = maybe_tokens.unwrap();
2591 let res = from_tokens(&toks);
2592 let err = res.unwrap_err();
2593 assert_eq!(err.char_index, 4_usize, "{:?}", err);
2594 }
2595 }
2596
2597 #[test]
2598 fn test_error_unescaped_multi_line_string_colno() {
2599 let sample = r#""foo
2600bar"
2601"#;
2602 let maybe_tokens = Tokenizer::new(sample).tokenize();
2603 if maybe_tokens.is_err() {
2604 let err = maybe_tokens.unwrap_err();
2605 assert_eq!(err.colno, 5_usize, "{:?}", err);
2606 } else {
2607 let toks = maybe_tokens.unwrap();
2608 let res = from_tokens(&toks);
2609 let err = res.unwrap_err();
2610 assert_eq!(err.colno, 5_usize, "{:?}", err);
2611 }
2612 }
2613}