var searchIndex = {}; searchIndex["luthor"] = {"doc":"Luthor provides a collection of lexers for various formats and languages.\nIt also exposes types that aid in building lexers of your own.","items":[[3,"Tokenizer","luthor","The Tokenizer type is used to produce and store tokens for lexers.",null,null],[12,"states","","",0,null],[3,"StateFunction","","A recursive function type used by lexers to manage their state.\nBased on Rob Pike's "Lexical Scanning in Go" talk, these functions are\ninvoked in a call/return loop (letting the current function determine\nthe next) until a `None` value is returned, after which lexing is complete.",null,null],[12,"0","","",1,null],[0,"lexers","","Pre-built lexers for various languages/formats.",null,null],[0,"javascript","luthor::lexers","A lexer for the Ruby programming language.",null,null],[5,"lex","luthor::lexers::javascript","Lexes a JavaScript document.",null,{"inputs":[{"name":"str"}],"output":{"name":"vec"}}],[0,"json","luthor::lexers","A lexer for the JSON data format.",null,null],[5,"lex","luthor::lexers::json","Lexes a JSON document.",null,{"inputs":[{"name":"str"}],"output":{"name":"vec"}}],[0,"xml","luthor::lexers","A simple lexer for XML documents.",null,null],[5,"lex","luthor::lexers::xml","Lexes an XML document.",null,{"inputs":[{"name":"str"}],"output":{"name":"vec"}}],[0,"ruby","luthor::lexers","A lexer for the Ruby programming language.",null,null],[5,"lex","luthor::lexers::ruby","Lexes a Ruby document.",null,{"inputs":[{"name":"str"}],"output":{"name":"vec"}}],[0,"rust","luthor::lexers","A lexer for the Ruby programming language.",null,null],[5,"lex","luthor::lexers::rust","",null,{"inputs":[{"name":"str"}],"output":{"name":"vec"}}],[0,"html_erb","luthor::lexers","A simple lexer for HTML data with embedded Ruby. Breaks data into three\nsegments: HTML, erb tags, and Ruby. Defers to other lexers for the HTML\nand Ruby segments.",null,null],[5,"lex","luthor::lexers::html_erb","",null,{"inputs":[{"name":"str"}],"output":{"name":"vec"}}],[0,"default","luthor::lexers","A simple lexer that will produce text and whitespace categorized tokens,\nsuitable as a fallback in situations where a format/language-specific\nequivalent is unavailable.",null,null],[5,"lex","luthor::lexers::default","Lexes any UTF-8 document.",null,{"inputs":[{"name":"str"}],"output":{"name":"vec"}}],[0,"token","luthor","Token-related types.",null,null],[3,"Token","luthor::token","A lexeme and category pairing. Tokens are the final product of a lexer;\ntheir lexemes should join to produce the original data passed to the lexer.",null,null],[12,"lexeme","","",2,null],[12,"category","","",2,null],[4,"Category","","The primary means of classifying a format or language's lexemes.",null,null],[13,"Whitespace","","",3,null],[13,"Identifier","","",3,null],[13,"Keyword","","",3,null],[13,"Brace","","",3,null],[13,"Bracket","","",3,null],[13,"Parenthesis","","",3,null],[13,"Operator","","",3,null],[13,"Integer","","",3,null],[13,"Float","","",3,null],[13,"String","","",3,null],[13,"Boolean","","",3,null],[13,"Text","","",3,null],[13,"Comment","","",3,null],[13,"Function","","",3,null],[13,"Method","","",3,null],[13,"Call","","",3,null],[13,"Literal","","",3,null],[13,"Key","","",3,null],[11,"clone","","",3,null],[11,"fmt","","",3,null],[11,"eq","","",3,null],[11,"clone","","",2,null],[11,"fmt","","",2,null],[11,"eq","","",2,null],[11,"ne","","",2,null],[11,"new","luthor","Initializes a new tokenizer with the given data.",0,{"inputs":[{"name":"str"}],"output":{"name":"tokenizer"}}],[11,"tokens","","Returns a copy of the tokens processed to date, in addition to any\nin-progress or remaining data appended as a text-category token.\nAs a result, the returned tokens always produce the original dataset\nwhen their lexemes are concatenated.",0,null],[11,"advance","","Moves to the next character in the data.\nDoes nothing if there is no more data to process.",0,null],[11,"current_char","","Returns the character at the current position,\nunless all of the data has been processed.",0,null],[11,"next_non_whitespace_char","","Returns the next non-whitespace character, without advancing the cursor.",0,null],[11,"has_prefix","","Whether or not the remaining data starts with the specified prefix.",0,null],[11,"starts_with_lexeme","","Whether or not the remaining data starts with the specified lexeme.\nEnsures that the specified lexeme is not just a prefix by checking that\nthe data that follows it is a newline, space, comma, or nothing at all.",0,null],[11,"tokenize","","Creates and stores a token with the given category containing any\ndata processed using `advance` since the last call to this method.",0,null],[11,"tokenize_next","","Creates and stores a token with the given category and the\nnext `amount` characters of the data. Before doing this, it\ntokenizes any previously processed characters with the generic\n`Category::Text` category.",0,null],[11,"consume_whitespace","","Consumes consecutive whitespace characters as a single token.",0,null]],"paths":[[3,"Tokenizer"],[3,"StateFunction"],[3,"Token"],[4,"Category"]]}; initSearch(searchIndex);