Skip to content

Commit 4df5cc9

Browse files
committed
grouped switch statement in parser, fix quotation issue, add value to token and consume it
1 parent fcd0ed2 commit 4df5cc9

File tree

5 files changed

+49
-22
lines changed

5 files changed

+49
-22
lines changed

interpeter

10.6 KB
Binary file not shown.

main.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ func run(code string) {
2020
tokens := tokenScanner.Scan()
2121
parser := NewParser(tokens)
2222
expression := parser.Parse()
23-
fmt.Printf("expression: %v\n", expression)
23+
fmt.Printf("expression: %#v\n", expression)
2424
// for _, token := range tokens {
2525
// fmt.Printf("token: %v\n", token)
2626
// }

parser.go

Lines changed: 2 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -108,16 +108,8 @@ func (p *Parser) unary() Expression {
108108
func (p *Parser) primary() Expression {
109109
literal := p.currentToken()
110110
switch literal.t {
111-
case NUMBER:
112-
return LiteralExpression{value: literal.lexeme} // should be value
113-
case STRING:
114-
return LiteralExpression{value: literal.lexeme} // should be value
115-
case TRUE:
116-
return LiteralExpression{value: literal.lexeme} // should be value
117-
case FALSE:
118-
return LiteralExpression{value: literal.lexeme} // should be value
119-
case NIL:
120-
return LiteralExpression{value: literal.lexeme} // should be value
111+
case NUMBER, STRING, TRUE, FALSE, NIL:
112+
return LiteralExpression{value: literal.value}
121113
case LEFT_PAREN:
122114
p.advance(1)
123115
expr := p.expression()

scanner.go

Lines changed: 44 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
package main
22

33
import (
4+
"log"
45
"strconv"
56
"unicode"
67
)
@@ -41,7 +42,7 @@ func NewTokenScanner(source string) *TokenScanner {
4142
// Scan takes in source code and emits tokens
4243
func (ts *TokenScanner) Scan() []Token {
4344
var tokens []Token
44-
for ts.current < len(ts.source) {
45+
for !ts.finished() {
4546
ts.start = ts.current
4647
c := ts.source[ts.current]
4748
switch c {
@@ -102,14 +103,22 @@ func (ts *TokenScanner) Scan() []Token {
102103
} else {
103104
tokens = ts.appendToken(tokens, SLASH)
104105
}
105-
case '"': // this is incorrect.
106+
case '"':
106107
for ts.current < len(ts.source) && ts.peek() != '"' {
107108
ts.advance()
108109
}
110+
if ts.peek() == '"' {
111+
ts.advance()
112+
} else { // we didn't close the quotation. Berate the user.
113+
log.Fatalf("stupid fucking user")
114+
}
115+
lexeme := ts.source[ts.start+1 : ts.current+1]
109116
tokens = append(tokens, Token{
110117
t: STRING,
111-
lexeme: ts.source[ts.start+1 : ts.current],
118+
lexeme: lexeme,
119+
value: lexeme,
112120
})
121+
113122
case ' ':
114123
case '\r':
115124
case '\t':
@@ -126,11 +135,15 @@ func (ts *TokenScanner) Scan() []Token {
126135
ts.advance()
127136
}
128137
}
129-
_, err := strconv.ParseFloat(ts.sliceToCurrent(), 64)
138+
value, err := strconv.ParseFloat(ts.sliceToCurrent(), 64)
130139
if err != nil {
131140
panic(err) // oh god
132141
}
133-
tokens = ts.appendToken(tokens, NUMBER)
142+
tokens = append(tokens, Token{
143+
t: NUMBER,
144+
lexeme: ts.sliceToCurrent(), // still dont like this
145+
value: value,
146+
})
134147
} else if unicode.IsLetter(rune(c)) {
135148
for unicode.IsLetter(rune(ts.peek())) || unicode.IsDigit(rune(ts.peek())) {
136149
ts.advance()
@@ -140,13 +153,32 @@ func (ts *TokenScanner) Scan() []Token {
140153
if val, ok := tokenKeywords[text]; ok {
141154
tokenType = val
142155
}
143-
tokenType = IDENTIFIER
144-
tokens = ts.appendToken(tokens, tokenType)
156+
// can we do this elsewhere? I don't like this.
157+
// we're also using append and appendToken a lot. Can this be unified?
158+
switch tokenType {
159+
case TRUE:
160+
tokens = append(tokens, Token{
161+
t: TRUE,
162+
lexeme: ts.sliceToCurrent(),
163+
value: true,
164+
})
165+
case FALSE:
166+
tokens = append(tokens, Token{
167+
t: TRUE,
168+
lexeme: ts.sliceToCurrent(),
169+
value: false,
170+
})
171+
case NIL:
172+
tokens = append(tokens, Token{
173+
t: TRUE,
174+
lexeme: ts.sliceToCurrent(),
175+
value: nil,
176+
})
177+
}
145178

146179
}
147180
}
148-
149-
// advance to the next token on complete
181+
// advance to the next token on complete. I don't like this much either. It's my code. I hate it all.
150182
ts.advance()
151183
}
152184
return tokens
@@ -180,17 +212,19 @@ func (ts *TokenScanner) peekNext() byte {
180212
return ts.source[ts.current+2]
181213
}
182214

215+
// finished checks if we've completed scanning. Might not be useful. Only used in one spot.
183216
func (ts *TokenScanner) finished() bool {
184217
if ts.current >= len(ts.source) {
185218
return true
186219
}
187220
return false
188221
}
189222

223+
// appendToken appends a token to the tokens array. It will not assign a value and its lexeme will be from the start to the current character.
190224
func (ts *TokenScanner) appendToken(tokens []Token, ttype TokenType) []Token { // really should be called "appendTokenWithAssumedLexeme"
191225
token := Token{
192226
t: ttype,
193-
lexeme: ts.sliceToCurrent(),
227+
lexeme: ts.sliceToCurrent(), // I dont like this
194228
}
195229
return append(tokens, token)
196230
}

token.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,8 +57,9 @@ const (
5757
type Token struct {
5858
t TokenType
5959
lexeme string
60+
value interface{}
6061
}
6162

6263
func (t Token) String() string {
63-
return fmt.Sprintf("TokenType: %v, Lexeme: %v", t.t, t.lexeme)
64+
return fmt.Sprintf("TokenType: %v, Lexeme: %v, Value: %v", t.t, t.lexeme, t.value)
6465
}

0 commit comments

Comments
 (0)