summaryrefslogtreecommitdiff
path: root/src/lib/tokenize.rs
diff options
context:
space:
mode:
authorDominick Allen <dominick.allen1989@gmail.com>2020-06-28 20:17:12 -0500
committerDominick Allen <dominick.allen1989@gmail.com>2020-06-28 20:17:12 -0500
commitd037aaeea1b82903ffd1cffcb9825dedf98f494e (patch)
treee5b3411b6d0d25d8abc65684f280489af4d2ec06 /src/lib/tokenize.rs
parentfd85e096e57d2fad1035f4f5987a282edb2996fc (diff)
Cleanup courtesy of clippy warnings and refactoring of tokenization.HEADv0.1.3masterdevelop
Diffstat (limited to 'src/lib/tokenize.rs')
-rw-r--r--src/lib/tokenize.rs74
1 files changed, 16 insertions, 58 deletions
diff --git a/src/lib/tokenize.rs b/src/lib/tokenize.rs
index 483536f..4a59bdc 100644
--- a/src/lib/tokenize.rs
+++ b/src/lib/tokenize.rs
@@ -5,6 +5,7 @@ use super::types::Op;
use super::sexpr::SExpr;
pub type MaybeToken = (Option<Result<Token, String>>, usize);
+type TokenizeFn = fn(&str) -> MaybeToken;
#[derive(PartialEq, Debug)]
pub enum Token {
@@ -16,7 +17,7 @@ pub enum Token {
pub struct TokenStream {
expr: String,
index: usize,
- rules: Vec<fn(&str) -> MaybeToken>,
+ rules: Vec<TokenizeFn>,
on_err: String,
}
@@ -31,6 +32,7 @@ impl TokenStream {
}
}
+ /// Creates a new TokenStream with the default set of rules.
pub fn default(e: &str) -> TokenStream {
TokenStream {
expr: e.to_string(),
@@ -41,21 +43,14 @@ impl TokenStream {
}
pub fn peek(&self) -> Option<Result<Token, String>> {
-
let i = self.count_whitespace();
if self.index + i == self.expr.len() {
return None
}
- /*
- let (token, _) = analyze(&self.expr[self.index + i..],
- self.rules.as_slice(),
- &self.on_err);
- */
- let (token, _) = analyze2(&self.expr[self.index + i ..]);
+ let (token, _) = self.analyze(&self.expr[self.index + i ..]);
token
}
-
fn count_whitespace(&self) -> usize {
let mut whitespace_count = 0;
for x in self.expr[self.index..].chars() {
@@ -73,6 +68,17 @@ impl TokenStream {
self.index += self.count_whitespace();
}
}
+
+ pub fn analyze(&self, expr: &str) -> MaybeToken {
+ for &fun in self.rules.iter() {
+ let (token, len) = fun(expr);
+ if token.is_some() {
+ return (token, len)
+ }
+ }
+
+ (Some(Err(self.on_err.to_string())), 0)
+ }
}
impl Iterator for TokenStream {
@@ -84,12 +90,7 @@ impl Iterator for TokenStream {
}
self.skip_whitespace();
- /*
- let (token, len) = analyze(
- &self.expr[self.index..],
- self.rules.as_ref(), &self.on_err);
- */
- let (token, len) = analyze2(&self.expr[self.index ..]);
+ let (token, len) = self.analyze(&self.expr[self.index ..]);
self.index += len;
token
}
@@ -103,49 +104,6 @@ impl Iterator for TokenStream {
}
}
-pub fn analyze(expr: &str, funs: &[fn(&str) -> MaybeToken],
- on_err: &str) -> MaybeToken {
- for &fun in funs.iter() {
- let (token, len) = fun(expr);
- if token.is_some() {
- return (token, len)
- }
- }
-
- (Some(Err(on_err.to_string())), 0)
-}
-
-fn analyze2(expr: &str) -> MaybeToken {
- //is_var, is_number
- let c = expr.chars().next().unwrap();
- /* Check for strings, ( and ) */
- if c == '"' {
- let close = get_string_end(expr);
- let value = Token::Value(Type::Str(expr[1 .. close + 1].to_string()));
- let expr_len = close + 2;
- return (Some(Ok(value)), expr_len)
- } else if c == '(' {
- return (Some(Ok(Token::LParen)), 1)
- } else if c == ')' {
- return (Some(Ok(Token::RParen)), 1)
- }
-
- let word = &expr[0 .. get_word_end(expr)];
- if word == "true" {
- (Some(Ok(Token::Value(Type::Bool(true)))), 4)
- } else if word == "false" {
- (Some(Ok(Token::Value(Type::Bool(false)))), 5)
- } else if let Ok(op) = word.parse::<Op>() {
- (Some(Ok(Token::Value(Type::Operator(op)))), word.len())
- } else if c.is_alphabetic() {
- (Some(Ok(Token::Value(Type::Symbol(word.to_string())))), word.len())
- } else if let (Some(x), len) = is_int(&word) {
- (Some(x), len)
- } else {
- is_float(&word)
- }
-}
-
pub fn make_word(expr: &str) -> String {
let word = expr.split(|c: char| {
c.is_whitespace()