about summary refs log tree commit diff
path: root/users/fcuny/exp/monkey
diff options
context:
space:
mode:
Diffstat (limited to 'users/fcuny/exp/monkey')
-rw-r--r--users/fcuny/exp/monkey/.gitignore1
-rw-r--r--users/fcuny/exp/monkey/LICENSE.txt20
-rw-r--r--users/fcuny/exp/monkey/Makefile4
-rw-r--r--users/fcuny/exp/monkey/README.org3
-rw-r--r--users/fcuny/exp/monkey/cmd/repl/main.go12
-rw-r--r--users/fcuny/exp/monkey/go.mod3
-rw-r--r--users/fcuny/exp/monkey/pkg/lexer/lexer.go152
-rw-r--r--users/fcuny/exp/monkey/pkg/lexer/lexer_test.go125
-rw-r--r--users/fcuny/exp/monkey/pkg/repl/repl.go30
-rw-r--r--users/fcuny/exp/monkey/pkg/token/token.go71
10 files changed, 421 insertions, 0 deletions
diff --git a/users/fcuny/exp/monkey/.gitignore b/users/fcuny/exp/monkey/.gitignore
new file mode 100644
index 0000000..2f226a3
--- /dev/null
+++ b/users/fcuny/exp/monkey/.gitignore
@@ -0,0 +1 @@
+/cmd/repl/repl
diff --git a/users/fcuny/exp/monkey/LICENSE.txt b/users/fcuny/exp/monkey/LICENSE.txt
new file mode 100644
index 0000000..b928a6d
--- /dev/null
+++ b/users/fcuny/exp/monkey/LICENSE.txt
@@ -0,0 +1,20 @@
+Copyright (c) 2019 franck cuny
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/users/fcuny/exp/monkey/Makefile b/users/fcuny/exp/monkey/Makefile
new file mode 100644
index 0000000..61168f3
--- /dev/null
+++ b/users/fcuny/exp/monkey/Makefile
@@ -0,0 +1,4 @@
+test:
+	go test -v ./...
+
+.phony: test
diff --git a/users/fcuny/exp/monkey/README.org b/users/fcuny/exp/monkey/README.org
new file mode 100644
index 0000000..d968f4c
--- /dev/null
+++ b/users/fcuny/exp/monkey/README.org
@@ -0,0 +1,3 @@
+#+TITLE: monkey
+
+Implementation of https://interpreterbook.com/
diff --git a/users/fcuny/exp/monkey/cmd/repl/main.go b/users/fcuny/exp/monkey/cmd/repl/main.go
new file mode 100644
index 0000000..46b865c
--- /dev/null
+++ b/users/fcuny/exp/monkey/cmd/repl/main.go
@@ -0,0 +1,12 @@
+package main
+
+import (
+	"fmt"
+	"monkey/pkg/repl"
+	"os"
+)
+
+func main() {
+	fmt.Printf("Welcome to monkey's REPL.")
+	repl.Start(os.Stdin, os.Stdout)
+}
diff --git a/users/fcuny/exp/monkey/go.mod b/users/fcuny/exp/monkey/go.mod
new file mode 100644
index 0000000..34c713d
--- /dev/null
+++ b/users/fcuny/exp/monkey/go.mod
@@ -0,0 +1,3 @@
+module monkey
+
+go 1.12
diff --git a/users/fcuny/exp/monkey/pkg/lexer/lexer.go b/users/fcuny/exp/monkey/pkg/lexer/lexer.go
new file mode 100644
index 0000000..3e98cf0
--- /dev/null
+++ b/users/fcuny/exp/monkey/pkg/lexer/lexer.go
@@ -0,0 +1,152 @@
+// Package lexer provides a lexer to the monkey language.
+package lexer
+
+import "monkey/pkg/token"
+
+// Lexer represents the lexer
+type Lexer struct {
+	input string
+	// current position in input
+	position int
+	// current reading position in input (after a char)
+	readPosition int
+	// current character under examination
+	ch byte
+}
+
+// New returns a new lexer
+func New(input string) *Lexer {
+	l := &Lexer{input: input}
+	l.readChar()
+	return l
+}
+
+// Read the current character and advances our position in the input string.
+func (l *Lexer) readChar() {
+	// if we've reached the end of the input, we set the current character to 0,
+	// which is the ASCII code for NUL.
+	if l.readPosition >= len(l.input) {
+		l.ch = 0
+	} else {
+		l.ch = l.input[l.readPosition]
+	}
+	l.position = l.readPosition
+	l.readPosition++
+}
+
+func (l *Lexer) readIdentifier() string {
+	position := l.position
+	for isLetter(l.ch) {
+		l.readChar()
+	}
+	return l.input[position:l.position]
+}
+
+func (l *Lexer) readNumber() string {
+	position := l.position
+	for isDigit(l.ch) {
+		l.readChar()
+	}
+	return l.input[position:l.position]
+}
+
+// we don't care about white space characters, we skip them when we find them.
+func (l *Lexer) skipWhitespace() {
+	for l.ch == ' ' || l.ch == '\t' || l.ch == '\n' || l.ch == '\r' {
+		l.readChar()
+	}
+}
+
+// peekChar returns the character at position (which is the next charatecter),
+// but does not increment `readPosition` and `position`.
+// This is needed to read tokens that are composed of two characters (e.g. `==`).
+func (l *Lexer) peekChar() byte {
+	if l.readPosition >= len(l.input) {
+		return 0
+	}
+	return l.input[l.readPosition]
+}
+
+// NextToken reads the next token from the lexer and returns the current token.
+func (l *Lexer) NextToken() token.Token {
+	var tok token.Token
+
+	l.skipWhitespace()
+
+	switch l.ch {
+	case '=':
+		if l.peekChar() == '=' {
+			ch := l.ch
+			l.readChar()
+			literal := string(ch) + string(l.ch)
+			tok = token.Token{Type: token.EQ, Literal: literal}
+		} else {
+			tok = newToken(token.ASSIGN, l.ch)
+		}
+	case '+':
+		tok = newToken(token.PLUS, l.ch)
+	case '-':
+		tok = newToken(token.MINUS, l.ch)
+	case '!':
+		if l.peekChar() == '=' {
+			ch := l.ch
+			l.readChar()
+			literal := string(ch) + string(l.ch)
+			tok = token.Token{Type: token.NOT_EQ, Literal: literal}
+		} else {
+			tok = newToken(token.BANG, l.ch)
+		}
+	case '*':
+		tok = newToken(token.ASTERISK, l.ch)
+	case '/':
+		tok = newToken(token.SLASH, l.ch)
+	case '<':
+		tok = newToken(token.LT, l.ch)
+	case '>':
+		tok = newToken(token.GT, l.ch)
+
+	case ';':
+		tok = newToken(token.SEMICOLON, l.ch)
+	case ',':
+		tok = newToken(token.COMMA, l.ch)
+	case '(':
+		tok = newToken(token.LPAREN, l.ch)
+	case ')':
+		tok = newToken(token.RPAREN, l.ch)
+	case '{':
+		tok = newToken(token.LBRACE, l.ch)
+	case '}':
+		tok = newToken(token.RBRACE, l.ch)
+	case 0:
+		tok.Literal = ""
+		tok.Type = token.EOF
+	default:
+		if isLetter(l.ch) {
+			tok.Literal = l.readIdentifier()
+			tok.Type = token.LookupIdent(tok.Literal)
+			return tok
+		} else if isDigit(l.ch) {
+			tok.Type = token.INT
+			tok.Literal = l.readNumber()
+			return tok
+		} else {
+			tok = newToken(token.ILLEGAL, l.ch)
+		}
+
+	}
+
+	l.readChar()
+	return tok
+}
+
+func newToken(tokenType token.TokenType, ch byte) token.Token {
+	return token.Token{Type: tokenType, Literal: string(ch)}
+}
+
+func isLetter(ch byte) bool {
+	return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_'
+}
+
+func isDigit(ch byte) bool {
+	return '0' <= ch && ch <= '9'
+}
diff --git a/users/fcuny/exp/monkey/pkg/lexer/lexer_test.go b/users/fcuny/exp/monkey/pkg/lexer/lexer_test.go
new file mode 100644
index 0000000..fdea1d3
--- /dev/null
+++ b/users/fcuny/exp/monkey/pkg/lexer/lexer_test.go
@@ -0,0 +1,125 @@
+package lexer
+
+import (
+	"monkey/pkg/token"
+	"testing"
+)
+
+func TestNextToken(t *testing.T) {
+	input := `let five = 5;
+let ten = 10;
+
+let add = fn(x, y) {
+  x + y
+};
+
+let result = add(five, ten);
+!-/*5;
+10 > 5;
+
+if (5 < 10) {
+  return true;
+} else {
+  return false;
+}
+
+10 == 10;
+10 != 9;
+`
+
+	tests := []struct {
+		expectedType    token.TokenType
+		expectedLiteral string
+	}{
+		{token.LET, "let"},
+		{token.IDENT, "five"},
+		{token.ASSIGN, "="},
+		{token.INT, "5"},
+		{token.SEMICOLON, ";"},
+
+		{token.LET, "let"},
+		{token.IDENT, "ten"},
+		{token.ASSIGN, "="},
+		{token.INT, "10"},
+		{token.SEMICOLON, ";"},
+
+		{token.LET, "let"},
+		{token.IDENT, "add"},
+		{token.ASSIGN, "="},
+		{token.FUNCTION, "fn"},
+		{token.LPAREN, "("},
+		{token.IDENT, "x"},
+		{token.COMMA, ","},
+		{token.IDENT, "y"},
+		{token.RPAREN, ")"},
+		{token.LBRACE, "{"},
+		{token.IDENT, "x"},
+		{token.PLUS, "+"},
+		{token.IDENT, "y"},
+		{token.RBRACE, "}"},
+		{token.SEMICOLON, ";"},
+
+		{token.LET, "let"},
+		{token.IDENT, "result"},
+		{token.ASSIGN, "="},
+		{token.IDENT, "add"},
+		{token.LPAREN, "("},
+		{token.IDENT, "five"},
+		{token.COMMA, ","},
+		{token.IDENT, "ten"},
+		{token.RPAREN, ")"},
+		{token.SEMICOLON, ";"},
+
+		{token.BANG, "!"},
+		{token.MINUS, "-"},
+		{token.SLASH, "/"},
+		{token.ASTERISK, "*"},
+		{token.INT, "5"},
+		{token.SEMICOLON, ";"},
+
+		{token.INT, "10"},
+		{token.GT, ">"},
+		{token.INT, "5"},
+		{token.SEMICOLON, ";"},
+
+		{token.IF, "if"},
+		{token.LPAREN, "("},
+		{token.INT, "5"},
+		{token.LT, "<"},
+		{token.INT, "10"},
+		{token.RPAREN, ")"},
+		{token.LBRACE, "{"},
+		{token.RETURN, "return"},
+		{token.TRUE, "true"},
+		{token.SEMICOLON, ";"},
+		{token.RBRACE, "}"},
+		{token.ELSE, "else"},
+		{token.LBRACE, "{"},
+		{token.RETURN, "return"},
+		{token.FALSE, "false"},
+		{token.SEMICOLON, ";"},
+		{token.RBRACE, "}"},
+
+		{token.INT, "10"},
+		{token.EQ, "=="},
+		{token.INT, "10"},
+		{token.SEMICOLON, ";"},
+
+		{token.INT, "10"},
+		{token.NOT_EQ, "!="},
+		{token.INT, "9"},
+		{token.SEMICOLON, ";"},
+	}
+
+	l := New(input)
+	for i, tt := range tests {
+		tok := l.NextToken()
+		if tok.Type != tt.expectedType {
+			t.Fatalf("tests[%d] - tokentype wrong. expected=%q, got=%q", i, tt.expectedType, tok.Type)
+		}
+
+		if tok.Literal != tt.expectedLiteral {
+			t.Fatalf("tests[%d] - tokenliteral wrong. expected=%q, got=%q", i, tt.expectedLiteral, tok.Literal)
+		}
+	}
+}
diff --git a/users/fcuny/exp/monkey/pkg/repl/repl.go b/users/fcuny/exp/monkey/pkg/repl/repl.go
new file mode 100644
index 0000000..5e7b1d1
--- /dev/null
+++ b/users/fcuny/exp/monkey/pkg/repl/repl.go
@@ -0,0 +1,30 @@
+// Package repl provides a REPL to the monkey language.
+package repl
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	lexer "monkey/pkg/lexer"
+	token "monkey/pkg/token"
+)
+
+const PROMPT = ">> "
+
+func Start(in io.Reader, out io.Writer) {
+	scanner := bufio.NewScanner(in)
+	for {
+		fmt.Print(PROMPT)
+		scanned := scanner.Scan()
+
+		if !scanned {
+			return
+		}
+
+		line := scanner.Text()
+		l := lexer.New(line)
+		for tok := l.NextToken(); tok.Type != token.EOF; tok = l.NextToken() {
+			fmt.Printf("%+v\n", tok)
+		}
+	}
+}
diff --git a/users/fcuny/exp/monkey/pkg/token/token.go b/users/fcuny/exp/monkey/pkg/token/token.go
new file mode 100644
index 0000000..5eadc5e
--- /dev/null
+++ b/users/fcuny/exp/monkey/pkg/token/token.go
@@ -0,0 +1,71 @@
+// Package token provides a tokenizer for the monkey language.
+package token
+
+// TokenType represents the type of the token
+type TokenType string
+
+// Token represents a token, with the type and the literal value of the token
+type Token struct {
+	Type    TokenType
+	Literal string
+}
+
+const (
+	ILLEGAL = "ILLEGAL"
+	EOF     = "EOF"
+
+	IDENT = "IDENT"
+	INT   = "INT"
+
+	COMMA     = ","
+	SEMICOLON = ";"
+
+	LPAREN = "("
+	RPAREN = ")"
+	LBRACE = "{"
+	RBRACE = "}"
+
+	// The following tokens are keywords
+	FUNCTION = "FUNCTION"
+	LET      = "LET"
+	TRUE     = "TRUE"
+	FALSE    = "FALSE"
+	IF       = "IF"
+	ELSE     = "ELSE"
+	RETURN   = "RETURN"
+
+	// The following tokens are for operators
+	ASSIGN   = "="
+	PLUS     = "+"
+	MINUS    = "-"
+	BANG     = "!"
+	ASTERISK = "*"
+	SLASH    = "/"
+	LT       = "<"
+	GT       = ">"
+
+	EQ     = "=="
+	NOT_EQ = "!="
+)
+
+// List of our keywords for the language
+var keywords = map[string]TokenType{
+	"fn":     FUNCTION,
+	"let":    LET,
+	"true":   TRUE,
+	"false":  FALSE,
+	"if":     IF,
+	"else":   ELSE,
+	"return": RETURN,
+}
+
+// LookupIdent returns the token type for a given identifier.
+// First we check if the identifier is a keyword. If it is, we return they
+// keyword TokenType constant. If it isn't, we return the token.IDENT which is
+// the TokenType for all user-defined identifiers.
+func LookupIdent(ident string) TokenType {
+	if tok, ok := keywords[ident]; ok {
+		return tok
+	}
+	return IDENT
+}