brainfuck/brainfuck.rs
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
//! This is a simple [brainfuck](https://en.wikipedia.org/wiki/Brainfuck) tokenizer.
//! This is intended to show how to use a [TokenizerContext] at the very, very basic level.
#![allow(dead_code)]
use alkale::{TokenizerContext, TokenizerResult};
/// Represents the eight brainfuck tokens
#[derive(Debug, Clone, Copy)]
enum BFTokenType {
Increment,
Decrement,
MoveRight,
MoveLeft,
BeginWhile,
EndWhile,
ReadIO,
WriteIO,
}
/// Tokenizes a string according to brainfuck grammar.
fn tokenize(source: &str) -> TokenizerResult<BFTokenType> {
use BFTokenType::*;
// Create the reader context
let mut context = TokenizerContext::new(source.chars());
// Repeat this code while there are more characters in the source code.
while context.has_next() {
// Attempt to map these characters to their respective tokens.
let pushed_token = context.map_single_char_token(|char| match char {
'+' => Some(Increment),
'-' => Some(Decrement),
'>' => Some(MoveRight),
'<' => Some(MoveLeft),
'[' => Some(BeginWhile),
']' => Some(EndWhile),
'.' => Some(WriteIO),
',' => Some(ReadIO),
_ => None,
});
// If a token was NOT pushed above— i.e. it was a different character, just skip it and move on.
if !pushed_token {
context.skip();
}
}
// Return the result
context.result()
}
fn main() {
let program = r#"
++++++++[>++++[>
++>+++>+++>+<<<<
-]>+>+>->>+[<]<-
]>>.>---.+++++++
..+++.>>.<-.<.++
+.------.-------
-.>>+.>++.
"#;
let result = tokenize(program);
println!("{:#?}", result);
}