1
0
forked from Mirror/wren

Start writing the REPL in Wren.

It doesn't actually execute code yet, but it:

- Supports left and right arrow keys for moving the cursor.
- Ctrl-C, Ctrl-D, Ctrl-A, and Ctrl-E for navigating.
- Syntax highlights the line (!).

The next step is to do a rough parse so that we can tell if the line
is an expression, statement, or needs more input. That will tell us
whether we need to interpret it at the top level (statement) and not
worry about a result, evaluate it as an expression and print the result,
or read more lines.
This commit is contained in:
Bob Nystrom
2016-05-20 20:30:09 -07:00
parent 59e9eb127e
commit 689cd42269
7 changed files with 1370 additions and 19 deletions

View File

@ -5,6 +5,7 @@
#include "io.wren.inc"
#include "os.wren.inc"
#include "repl.wren.inc"
#include "scheduler.wren.inc"
#include "timer.wren.inc"
@ -162,6 +163,8 @@ static ModuleRegistry modules[] =
STATIC_METHOD("allArguments", processAllArguments)
END_CLASS
END_MODULE
MODULE(repl)
END_MODULE
MODULE(scheduler)
CLASS(Scheduler)
STATIC_METHOD("captureMethods_()", schedulerCaptureMethods)

View File

@ -6,8 +6,6 @@
#include "scheduler.h"
#include "vm.h"
#define MAX_LINE_LENGTH 1024 // TODO: Something less arbitrary.
// The single VM instance that the CLI uses.
static WrenVM* vm;
@ -259,23 +257,9 @@ int runRepl()
printf("\\\\/\"-\n");
printf(" \\_/ wren v%s\n", WREN_VERSION_STRING);
char line[MAX_LINE_LENGTH];
for (;;)
{
printf("> ");
if (!fgets(line, MAX_LINE_LENGTH, stdin))
{
printf("\n");
break;
}
// TODO: Handle failure.
wrenInterpret(vm, line);
// TODO: Automatically print the result of expressions.
}
wrenInterpret(vm, "import \"repl\"\n");
uv_run(loop, UV_RUN_DEFAULT);
freeVM();

3
src/module/repl.c Normal file
View File

@ -0,0 +1,3 @@
#include "repl.h"
#include "wren.h"

6
src/module/repl.h Normal file
View File

@ -0,0 +1,6 @@
#ifndef repl_h
#define repl_h
#include "wren.h"
#endif

670
src/module/repl.wren Normal file
View File

@ -0,0 +1,670 @@
import "io" for Stdin
class EscapeBracket {
static up { 65 }
static down { 66 }
static right { 67 }
static left { 68 }
}
class Repl {
construct new() {
_cursor = 0
_line = ""
}
run() {
Stdin.isRaw = true
refreshLine()
while (true) {
var byte = Stdin.readByte()
if (byte == Chars.ctrlA) {
_cursor = 0
} else if (byte == Chars.ctrlC) {
System.print()
return
} else if (byte == Chars.ctrlD) {
// If the line is empty, Ctrl_D exits.
if (_line == "") {
System.print()
return
}
// Otherwise, it deletes the character after the cursor.
deleteRight()
} else if (byte == Chars.ctrlE) {
_cursor = _line.count
} else if (byte == Chars.escape) {
handleEscape()
} else if (byte == Chars.carriageReturn) {
executeLine()
} else if (byte == Chars.delete) {
deleteLeft()
} else if (byte >= Chars.space && byte <= Chars.tilde) {
insertChar(byte)
} else {
// TODO: Handle other non-printing characters.
System.print("Unhandled byte: %(byte)")
}
refreshLine()
}
}
/// Inserts the character with [byte] value at the current cursor position.
insertChar(byte) {
var char = String.fromCodePoint(byte)
_line = _line[0..._cursor] + char + _line[_cursor..-1]
_cursor = _cursor + 1
}
/// Deletes the character before the cursor, if any.
deleteLeft() {
if (_cursor == 0) return
// Delete the character before the cursor.
_line = _line[0...(_cursor - 1)] + _line[_cursor..-1]
_cursor = _cursor - 1
}
/// Deletes the character after the cursor, if any.
deleteRight() {
if (_cursor == _line.count) return
// Delete the character after the cursor.
_line = _line[0..._cursor] + _line[(_cursor + 1)..-1]
}
handleEscape() {
var escapeType = Stdin.readByte()
var value = Stdin.readByte()
if (escapeType == Chars.leftBracket) {
// ESC [ sequence.
if (value == EscapeBracket.up) {
// TODO: Handle this.
System.print("up")
} else if (value == EscapeBracket.down) {
// TODO: Handle this.
System.print("down")
} else if (value == EscapeBracket.left) {
// Move the cursor left one.
if (_cursor > 0) _cursor = _cursor - 1
} else if (value == EscapeBracket.right) {
// Move the cursor right one.
// TODO: Take into account multi-byte characters?
if (_cursor < _line.count) _cursor = _cursor + 1
}
} else {
// TODO: Handle ESC 0 sequences.
}
}
executeLine() {
// TODO: Execute line.
_line = ""
_cursor = 0
System.print()
}
refreshLine() {
// Erase the whole line.
System.write("\x1b[2K")
// Show the prompt at the beginning of the line.
System.write(Color.gray)
System.write("\r>> ")
System.write(Color.none)
// Syntax highlight the line.
var lexer = Lexer.new(_line)
while (true) {
var token = lexer.readToken()
if (token.type == Token.eof) break
System.write(TOKEN_COLORS[token.type])
System.write(token.text)
System.write(Color.none)
}
// Position the cursor.
System.write("\r\x1b[%(3 + _cursor)C")
}
}
/// ANSI color escape sequences.
class Color {
static none { "\x1b[0m" }
static black { "\x1b[30m" }
static red { "\x1b[31m" }
static green { "\x1b[32m" }
static yellow { "\x1b[33m" }
static blue { "\x1b[34m" }
static magenta { "\x1b[35m" }
static cyan { "\x1b[36m" }
static white { "\x1b[37m" }
static gray { "\x1b[30;1m" }
}
/// Utilities for working with characters.
class Chars {
static ctrlA { 0x01 }
static ctrlC { 0x03 }
static ctrlD { 0x04 }
static ctrlE { 0x05 }
static tab { 0x09 }
static lineFeed { 0x0a }
static carriageReturn { 0x0d }
static escape { 0x1b }
static space { 0x20 }
static bang { 0x21 }
static quote { 0x22 }
static percent { 0x25 }
static amp { 0x26 }
static leftParen { 0x28 }
static rightParen { 0x29 }
static star { 0x2a }
static plus { 0x2b }
static comma { 0x2c }
static minus { 0x2d }
static dot { 0x2e }
static slash { 0x2f }
static zero { 0x30 }
static nine { 0x39 }
static colon { 0x3a }
static less { 0x3c }
static equal { 0x3d }
static greater { 0x3e }
static question { 0x3f }
static upperA { 0x41 }
static upperF { 0x46 }
static upperZ { 0x5a }
static leftBracket { 0x5b }
static backslash { 0x5c }
static rightBracket { 0x5d }
static caret { 0x5e }
static underscore { 0x5f }
static lowerA { 0x61 }
static lowerF { 0x66 }
static lowerX { 0x78 }
static lowerZ { 0x7a }
static leftBrace { 0x7b }
static pipe { 0x7c }
static rightBrace { 0x7d }
static tilde { 0x7e }
static delete { 0x7f }
static isAlpha(c) {
return c >= lowerA && c <= lowerZ ||
c >= upperA && c <= upperZ ||
c == underscore
}
static isDigit(c) { c >= zero && c <= nine }
static isAlphaNumeric(c) { isAlpha(c) || isDigit(c) }
static isHexDigit(c) {
return c >= zero && c <= nine ||
c >= lowerA && c <= lowerF ||
c >= upperA && c <= upperF
}
static isLowerAlpha(c) { c >= lowerA && c <= lowerZ }
static isWhitespace(c) { c == space || c == tab || c == carriageReturn }
}
class Token {
// Punctuators.
static leftParen { "leftParen" }
static rightParen { "rightParen" }
static leftBracket { "leftBracket" }
static rightBracket { "rightBracket" }
static leftBrace { "leftBrace" }
static rightBrace { "rightBrace" }
static colon { "colon" }
static dot { "dot" }
static dotDot { "dotDot" }
static dotDotDot { "dotDotDot" }
static comma { "comma" }
static star { "star" }
static slash { "slash" }
static percent { "percent" }
static plus { "plus" }
static minus { "minus" }
static pipe { "pipe" }
static pipePipe { "pipePipe" }
static caret { "caret" }
static amp { "amp" }
static ampAmp { "ampAmp" }
static question { "question" }
static bang { "bang" }
static tilde { "tilde" }
static equal { "equal" }
static less { "less" }
static lessEqual { "lessEqual" }
static lessLess { "lessLess" }
static greater { "greater" }
static greaterEqual { "greaterEqual" }
static greaterGreater { "greaterGreater" }
static equalEqual { "equalEqual" }
static bangEqual { "bangEqual" }
// Keywords.
static breakKeyword { "break" }
static classKeyword { "class" }
static constructKeyword { "construct" }
static elseKeyword { "else" }
static falseKeyword { "false" }
static forKeyword { "for" }
static foreignKeyword { "foreign" }
static ifKeyword { "if" }
static importKeyword { "import" }
static inKeyword { "in" }
static isKeyword { "is" }
static nullKeyword { "null" }
static returnKeyword { "return" }
static staticKeyword { "static" }
static superKeyword { "super" }
static thisKeyword { "this" }
static trueKeyword { "true" }
static varKeyword { "var" }
static whileKeyword { "while" }
static field { "field" }
static name { "name" }
static number { "number" }
static string { "string" }
static interpolation { "interpolation" }
static comment { "comment" }
static whitespace { "whitespace" }
static line { "line" }
static error { "error" }
static eof { "eof" }
construct new(source, type, start, length) {
_source = source
_type = type
_start = start
_length = length
}
type { _type }
text { _source[_start...(_start + _length)] }
start { _start }
length { _length }
toString { text }
}
var KEYWORDS = {
"break": Token.breakKeyword,
"class": Token.classKeyword,
"construct": Token.constructKeyword,
"else": Token.elseKeyword,
"false": Token.falseKeyword,
"for": Token.forKeyword,
"foreign": Token.foreignKeyword,
"if": Token.ifKeyword,
"import": Token.importKeyword,
"in": Token.inKeyword,
"is": Token.isKeyword,
"null": Token.nullKeyword,
"return": Token.returnKeyword,
"static": Token.staticKeyword,
"super": Token.superKeyword,
"this": Token.thisKeyword,
"true": Token.trueKeyword,
"var": Token.varKeyword,
"while": Token.whileKeyword
}
// Data table for tokens that are tokenized using maximal munch.
//
// The key is the character that starts the token or tokens. After that is a
// list of token types and characters. As long as the next character is matched,
// the type will update to the type after that character.
var PUNCTUATORS = {
Chars.leftParen: [Token.leftParen],
Chars.rightParen: [Token.rightParen],
Chars.leftBracket: [Token.leftBracket],
Chars.rightBracket: [Token.rightBracket],
Chars.leftBrace: [Token.leftBrace],
Chars.rightBrace: [Token.rightBrace],
Chars.colon: [Token.colon],
Chars.comma: [Token.comma],
Chars.star: [Token.star],
Chars.percent: [Token.percent],
Chars.plus: [Token.plus],
Chars.minus: [Token.minus],
Chars.tilde: [Token.tilde],
Chars.caret: [Token.caret],
Chars.question: [Token.question],
Chars.lineFeed: [Token.line],
Chars.pipe: [Token.pipe, Chars.pipe, Token.pipePipe],
Chars.amp: [Token.amp, Chars.amp, Token.ampAmp],
Chars.bang: [Token.bang, Chars.equal, Token.bangEqual],
Chars.equal: [Token.equal, Chars.equal, Token.equalEqual],
Chars.dot: [Token.dot, Chars.dot, Token.dotDot, Chars.dot, Token.dotDotDot]
}
class Lexer {
construct new(source) {
_source = source
// Due to the magic of UTF-8, we can safely treat Wren source as a series
// of bytes, since the only code points that are meaningful to Wren fit in
// ASCII. The only place where non-ASCII code points can occur is inside
// string literals and comments and the lexer safely treats those as opaque
// bytes.
_bytes = source.bytes
_start = 0
_current = 0
// The stack of ongoing interpolated strings. Each element in the list is
// a single level of interpolation nesting. The value of the element is the
// number of unbalanced "(" still remaining to be closed.
_interpolations = []
}
readToken() {
if (_current >= _bytes.count) return makeToken(Token.eof)
_start = _current
var c = _bytes[_current]
advance()
if (!_interpolations.isEmpty) {
if (c == Chars.leftParen) {
_interpolations[-1] = _interpolations[-1] + 1
} else if (c == Chars.rightParen) {
_interpolations[-1] = _interpolations[-1] - 1
// The last ")" in an interpolated expression ends the expression and
// resumes the string.
if (_interpolations[-1] == 0) {
// This is the final ")", so the interpolation expression has ended.
// This ")" now begins the next section of the template string.
_interpolations.removeAt(-1)
return readString()
}
}
}
if (PUNCTUATORS.containsKey(c)) {
var punctuator = PUNCTUATORS[c]
var type = punctuator[0]
var i = 1
while (i < punctuator.count) {
if (!match(punctuator[i])) break
type = punctuator[i + 1]
i = i + 2
}
return makeToken(type)
}
// Handle "<", "<<", and "<=".
if (c == Chars.less) {
if (match(Chars.less)) return makeToken(Token.lessLess)
if (match(Chars.equal)) return makeToken(Token.lessEqual)
return makeToken(Token.less)
}
// Handle ">", ">>", and ">=".
if (c == Chars.greater) {
if (match(Chars.greater)) return makeToken(Token.greaterGreater)
if (match(Chars.equal)) return makeToken(Token.greaterEqual)
return makeToken(Token.greater)
}
// Handle "/", "//", and "/*".
if (c == Chars.slash) {
if (match(Chars.slash)) return readLineComment()
if (match(Chars.star)) return readBlockComment()
return makeToken(Token.slash)
}
if (c == Chars.underscore) return readField()
if (c == Chars.quote) return readString()
if (c == Chars.zero && peek() == Chars.lowerX) return readHexNumber()
if (Chars.isWhitespace(c)) return readWhitespace()
if (Chars.isDigit(c)) return readNumber()
if (Chars.isAlpha(c)) return readName()
return makeToken(Token.error)
}
// Reads a line comment until the end of the line is reached.
readLineComment() {
// A line comment stops at the newline since newlines are significant.
while (peek() != Chars.lineFeed && !isAtEnd) {
advance()
}
return makeToken(Token.comment)
}
readBlockComment() {
// Block comments can nest.
var nesting = 1
while (nesting > 0) {
// TODO: Report error.
if (isAtEnd) break
if (peek() == Chars.slash && peek(1) == Chars.star) {
advance()
advance()
nesting = nesting + 1
} else if (peek() == Chars.star && peek(1) == Chars.slash) {
advance()
advance()
nesting = nesting - 1
if (nesting == 0) break
} else {
advance()
}
}
return makeToken(Token.comment)
}
// Reads a static or instance field.
readField() {
var type = Token.field
// Read the rest of the name.
while (match {|c| Chars.isAlphaNumeric(c) }) {}
return makeToken(type)
}
// Reads a string literal.
readString() {
var type = Token.string
while (!isAtEnd) {
var c = _bytes[_current]
advance()
if (c == Chars.backslash) {
// TODO: Process specific escapes and validate them.
advance()
} else if (c == Chars.percent) {
// Consume the '('.
if (!isAtEnd) advance()
// TODO: Handle missing '('.
_interpolations.add(1)
type = Token.interpolation
break
} else if (c == Chars.quote) {
break
}
}
return makeToken(type)
}
// Reads a number literal.
readHexNumber() {
// Skip past the `x`.
advance()
// Read the rest of the number.
while (match {|c| Chars.isHexDigit(c) }) {}
return makeToken(Token.number)
}
// Reads a series of whitespace characters.
readWhitespace() {
// Read the rest of the whitespace.
while (match {|c| Chars.isWhitespace(c) }) {}
return makeToken(Token.whitespace)
}
// Reads a number literal.
readNumber() {
// Read the rest of the number.
while (match {|c| Chars.isDigit(c) }) {}
// TODO: Floating point, scientific.
return makeToken(Token.number)
}
// Reads an identifier or keyword token.
readName() {
// Read the rest of the name.
while (match {|c| Chars.isAlphaNumeric(c) }) {}
var text = _source[_start..._current]
var type = Token.name
if (KEYWORDS.containsKey(text)) {
type = KEYWORDS[text]
}
return Token.new(_source, type, _start, _current - _start)
}
// Returns `true` if we have scanned all characters.
isAtEnd { _current >= _bytes.count }
// Advances past the current character.
advance() {
_current = _current + 1
}
// Returns the byte value of the current character.
peek() { peek(0) }
// Returns the byte value of the character [n] bytes past the current
// character.
peek(n) {
if (_current + n >= _bytes.count) return -1
return _bytes[_current + n]
}
// Consumes the current character if it matches [condition], which can be a
// numeric code point value or a function that takes a code point and returns
// `true` if the code point matches.
match(condition) {
if (isAtEnd) return false
var c = _bytes[_current]
if (condition is Fn) {
if (!condition.call(c)) return false
} else if (c != condition) {
return false
}
advance()
return true
}
// Creates a token of [type] from the current character range.
makeToken(type) { Token.new(_source, type, _start, _current - _start) }
}
var TOKEN_COLORS = {
Token.leftParen: Color.gray,
Token.rightParen: Color.gray,
Token.leftBracket: Color.gray,
Token.rightBracket: Color.gray,
Token.leftBrace: Color.gray,
Token.rightBrace: Color.gray,
Token.colon: Color.gray,
Token.dot: Color.gray,
Token.dotDot: Color.none,
Token.dotDotDot: Color.none,
Token.comma: Color.gray,
Token.star: Color.none,
Token.slash: Color.none,
Token.percent: Color.none,
Token.plus: Color.none,
Token.minus: Color.none,
Token.pipe: Color.none,
Token.pipePipe: Color.none,
Token.caret: Color.none,
Token.amp: Color.none,
Token.ampAmp: Color.none,
Token.question: Color.none,
Token.bang: Color.none,
Token.tilde: Color.none,
Token.equal: Color.none,
Token.less: Color.none,
Token.lessEqual: Color.none,
Token.lessLess: Color.none,
Token.greater: Color.none,
Token.greaterEqual: Color.none,
Token.greaterGreater: Color.none,
Token.equalEqual: Color.none,
Token.bangEqual: Color.none,
// Keywords.
Token.breakKeyword: Color.cyan,
Token.classKeyword: Color.cyan,
Token.constructKeyword: Color.cyan,
Token.elseKeyword: Color.cyan,
Token.falseKeyword: Color.cyan,
Token.forKeyword: Color.cyan,
Token.foreignKeyword: Color.cyan,
Token.ifKeyword: Color.cyan,
Token.importKeyword: Color.cyan,
Token.inKeyword: Color.cyan,
Token.isKeyword: Color.cyan,
Token.nullKeyword: Color.cyan,
Token.returnKeyword: Color.cyan,
Token.staticKeyword: Color.cyan,
Token.superKeyword: Color.cyan,
Token.thisKeyword: Color.cyan,
Token.trueKeyword: Color.cyan,
Token.varKeyword: Color.cyan,
Token.whileKeyword: Color.cyan,
Token.field: Color.none,
Token.name: Color.none,
Token.number: Color.magenta,
Token.string: Color.yellow,
Token.interpolation: Color.yellow,
Token.comment: Color.gray,
Token.whitespace: Color.none,
Token.line: Color.none,
Token.error: Color.red,
Token.eof: Color.none,
}
Repl.new().run()

672
src/module/repl.wren.inc Normal file
View File

@ -0,0 +1,672 @@
// Generated automatically from src/module/repl.wren. Do not edit.
static const char* replModuleSource =
"import \"io\" for Stdin\n"
"\n"
"class EscapeBracket {\n"
" static up { 65 }\n"
" static down { 66 }\n"
" static right { 67 }\n"
" static left { 68 }\n"
"}\n"
"\n"
"class Repl {\n"
" construct new() {\n"
" _cursor = 0\n"
" _line = \"\"\n"
" }\n"
"\n"
" run() {\n"
" Stdin.isRaw = true\n"
" refreshLine()\n"
"\n"
" while (true) {\n"
" var byte = Stdin.readByte()\n"
" if (byte == Chars.ctrlA) {\n"
" _cursor = 0\n"
" } else if (byte == Chars.ctrlC) {\n"
" System.print()\n"
" return\n"
" } else if (byte == Chars.ctrlD) {\n"
" // If the line is empty, Ctrl_D exits.\n"
" if (_line == \"\") {\n"
" System.print()\n"
" return\n"
" }\n"
"\n"
" // Otherwise, it deletes the character after the cursor.\n"
" deleteRight()\n"
" } else if (byte == Chars.ctrlE) {\n"
" _cursor = _line.count\n"
" } else if (byte == Chars.escape) {\n"
" handleEscape()\n"
" } else if (byte == Chars.carriageReturn) {\n"
" executeLine()\n"
" } else if (byte == Chars.delete) {\n"
" deleteLeft()\n"
" } else if (byte >= Chars.space && byte <= Chars.tilde) {\n"
" insertChar(byte)\n"
" } else {\n"
" // TODO: Handle other non-printing characters.\n"
" System.print(\"Unhandled byte: %(byte)\")\n"
" }\n"
"\n"
" refreshLine()\n"
" }\n"
" }\n"
"\n"
" /// Inserts the character with [byte] value at the current cursor position.\n"
" insertChar(byte) {\n"
" var char = String.fromCodePoint(byte)\n"
" _line = _line[0..._cursor] + char + _line[_cursor..-1]\n"
" _cursor = _cursor + 1\n"
" }\n"
"\n"
" /// Deletes the character before the cursor, if any.\n"
" deleteLeft() {\n"
" if (_cursor == 0) return\n"
"\n"
" // Delete the character before the cursor.\n"
" _line = _line[0...(_cursor - 1)] + _line[_cursor..-1]\n"
" _cursor = _cursor - 1\n"
" }\n"
"\n"
" /// Deletes the character after the cursor, if any.\n"
" deleteRight() {\n"
" if (_cursor == _line.count) return\n"
"\n"
" // Delete the character after the cursor.\n"
" _line = _line[0..._cursor] + _line[(_cursor + 1)..-1]\n"
" }\n"
"\n"
" handleEscape() {\n"
" var escapeType = Stdin.readByte()\n"
" var value = Stdin.readByte()\n"
" if (escapeType == Chars.leftBracket) {\n"
" // ESC [ sequence.\n"
" if (value == EscapeBracket.up) {\n"
" // TODO: Handle this.\n"
" System.print(\"up\")\n"
" } else if (value == EscapeBracket.down) {\n"
" // TODO: Handle this.\n"
" System.print(\"down\")\n"
" } else if (value == EscapeBracket.left) {\n"
" // Move the cursor left one.\n"
" if (_cursor > 0) _cursor = _cursor - 1\n"
" } else if (value == EscapeBracket.right) {\n"
" // Move the cursor right one.\n"
" // TODO: Take into account multi-byte characters?\n"
" if (_cursor < _line.count) _cursor = _cursor + 1\n"
" }\n"
" } else {\n"
" // TODO: Handle ESC 0 sequences.\n"
" }\n"
" }\n"
"\n"
" executeLine() {\n"
" // TODO: Execute line.\n"
" _line = \"\"\n"
" _cursor = 0\n"
" System.print()\n"
" }\n"
"\n"
" refreshLine() {\n"
" // Erase the whole line.\n"
" System.write(\"\x1b[2K\")\n"
"\n"
" // Show the prompt at the beginning of the line.\n"
" System.write(Color.gray)\n"
" System.write(\"\r>> \")\n"
" System.write(Color.none)\n"
"\n"
" // Syntax highlight the line.\n"
" var lexer = Lexer.new(_line)\n"
"\n"
" while (true) {\n"
" var token = lexer.readToken()\n"
" if (token.type == Token.eof) break\n"
"\n"
" System.write(TOKEN_COLORS[token.type])\n"
" System.write(token.text)\n"
" System.write(Color.none)\n"
" }\n"
"\n"
" // Position the cursor.\n"
" System.write(\"\r\x1b[%(3 + _cursor)C\")\n"
" }\n"
"}\n"
"\n"
"/// ANSI color escape sequences.\n"
"class Color {\n"
" static none { \"\x1b[0m\" }\n"
" static black { \"\x1b[30m\" }\n"
" static red { \"\x1b[31m\" }\n"
" static green { \"\x1b[32m\" }\n"
" static yellow { \"\x1b[33m\" }\n"
" static blue { \"\x1b[34m\" }\n"
" static magenta { \"\x1b[35m\" }\n"
" static cyan { \"\x1b[36m\" }\n"
" static white { \"\x1b[37m\" }\n"
"\n"
" static gray { \"\x1b[30;1m\" }\n"
"}\n"
"\n"
"/// Utilities for working with characters.\n"
"class Chars {\n"
" static ctrlA { 0x01 }\n"
" static ctrlC { 0x03 }\n"
" static ctrlD { 0x04 }\n"
" static ctrlE { 0x05 }\n"
" static tab { 0x09 }\n"
" static lineFeed { 0x0a }\n"
" static carriageReturn { 0x0d }\n"
" static escape { 0x1b }\n"
" static space { 0x20 }\n"
" static bang { 0x21 }\n"
" static quote { 0x22 }\n"
" static percent { 0x25 }\n"
" static amp { 0x26 }\n"
" static leftParen { 0x28 }\n"
" static rightParen { 0x29 }\n"
" static star { 0x2a }\n"
" static plus { 0x2b }\n"
" static comma { 0x2c }\n"
" static minus { 0x2d }\n"
" static dot { 0x2e }\n"
" static slash { 0x2f }\n"
"\n"
" static zero { 0x30 }\n"
" static nine { 0x39 }\n"
"\n"
" static colon { 0x3a }\n"
" static less { 0x3c }\n"
" static equal { 0x3d }\n"
" static greater { 0x3e }\n"
" static question { 0x3f }\n"
"\n"
" static upperA { 0x41 }\n"
" static upperF { 0x46 }\n"
" static upperZ { 0x5a }\n"
"\n"
" static leftBracket { 0x5b }\n"
" static backslash { 0x5c }\n"
" static rightBracket { 0x5d }\n"
" static caret { 0x5e }\n"
" static underscore { 0x5f }\n"
"\n"
" static lowerA { 0x61 }\n"
" static lowerF { 0x66 }\n"
" static lowerX { 0x78 }\n"
" static lowerZ { 0x7a }\n"
"\n"
" static leftBrace { 0x7b }\n"
" static pipe { 0x7c }\n"
" static rightBrace { 0x7d }\n"
" static tilde { 0x7e }\n"
" static delete { 0x7f }\n"
"\n"
" static isAlpha(c) {\n"
" return c >= lowerA && c <= lowerZ ||\n"
" c >= upperA && c <= upperZ ||\n"
" c == underscore\n"
" }\n"
"\n"
" static isDigit(c) { c >= zero && c <= nine }\n"
"\n"
" static isAlphaNumeric(c) { isAlpha(c) || isDigit(c) }\n"
"\n"
" static isHexDigit(c) {\n"
" return c >= zero && c <= nine ||\n"
" c >= lowerA && c <= lowerF ||\n"
" c >= upperA && c <= upperF\n"
" }\n"
"\n"
" static isLowerAlpha(c) { c >= lowerA && c <= lowerZ }\n"
"\n"
" static isWhitespace(c) { c == space || c == tab || c == carriageReturn }\n"
"}\n"
"\n"
"class Token {\n"
" // Punctuators.\n"
" static leftParen { \"leftParen\" }\n"
" static rightParen { \"rightParen\" }\n"
" static leftBracket { \"leftBracket\" }\n"
" static rightBracket { \"rightBracket\" }\n"
" static leftBrace { \"leftBrace\" }\n"
" static rightBrace { \"rightBrace\" }\n"
" static colon { \"colon\" }\n"
" static dot { \"dot\" }\n"
" static dotDot { \"dotDot\" }\n"
" static dotDotDot { \"dotDotDot\" }\n"
" static comma { \"comma\" }\n"
" static star { \"star\" }\n"
" static slash { \"slash\" }\n"
" static percent { \"percent\" }\n"
" static plus { \"plus\" }\n"
" static minus { \"minus\" }\n"
" static pipe { \"pipe\" }\n"
" static pipePipe { \"pipePipe\" }\n"
" static caret { \"caret\" }\n"
" static amp { \"amp\" }\n"
" static ampAmp { \"ampAmp\" }\n"
" static question { \"question\" }\n"
" static bang { \"bang\" }\n"
" static tilde { \"tilde\" }\n"
" static equal { \"equal\" }\n"
" static less { \"less\" }\n"
" static lessEqual { \"lessEqual\" }\n"
" static lessLess { \"lessLess\" }\n"
" static greater { \"greater\" }\n"
" static greaterEqual { \"greaterEqual\" }\n"
" static greaterGreater { \"greaterGreater\" }\n"
" static equalEqual { \"equalEqual\" }\n"
" static bangEqual { \"bangEqual\" }\n"
"\n"
" // Keywords.\n"
" static breakKeyword { \"break\" }\n"
" static classKeyword { \"class\" }\n"
" static constructKeyword { \"construct\" }\n"
" static elseKeyword { \"else\" }\n"
" static falseKeyword { \"false\" }\n"
" static forKeyword { \"for\" }\n"
" static foreignKeyword { \"foreign\" }\n"
" static ifKeyword { \"if\" }\n"
" static importKeyword { \"import\" }\n"
" static inKeyword { \"in\" }\n"
" static isKeyword { \"is\" }\n"
" static nullKeyword { \"null\" }\n"
" static returnKeyword { \"return\" }\n"
" static staticKeyword { \"static\" }\n"
" static superKeyword { \"super\" }\n"
" static thisKeyword { \"this\" }\n"
" static trueKeyword { \"true\" }\n"
" static varKeyword { \"var\" }\n"
" static whileKeyword { \"while\" }\n"
"\n"
" static field { \"field\" }\n"
" static name { \"name\" }\n"
" static number { \"number\" }\n"
" static string { \"string\" }\n"
" static interpolation { \"interpolation\" }\n"
" static comment { \"comment\" }\n"
" static whitespace { \"whitespace\" }\n"
" static line { \"line\" }\n"
" static error { \"error\" }\n"
" static eof { \"eof\" }\n"
"\n"
" construct new(source, type, start, length) {\n"
" _source = source\n"
" _type = type\n"
" _start = start\n"
" _length = length\n"
" }\n"
"\n"
" type { _type }\n"
" text { _source[_start...(_start + _length)] }\n"
"\n"
" start { _start }\n"
" length { _length }\n"
"\n"
" toString { text }\n"
"}\n"
"\n"
"var KEYWORDS = {\n"
" \"break\": Token.breakKeyword,\n"
" \"class\": Token.classKeyword,\n"
" \"construct\": Token.constructKeyword,\n"
" \"else\": Token.elseKeyword,\n"
" \"false\": Token.falseKeyword,\n"
" \"for\": Token.forKeyword,\n"
" \"foreign\": Token.foreignKeyword,\n"
" \"if\": Token.ifKeyword,\n"
" \"import\": Token.importKeyword,\n"
" \"in\": Token.inKeyword,\n"
" \"is\": Token.isKeyword,\n"
" \"null\": Token.nullKeyword,\n"
" \"return\": Token.returnKeyword,\n"
" \"static\": Token.staticKeyword,\n"
" \"super\": Token.superKeyword,\n"
" \"this\": Token.thisKeyword,\n"
" \"true\": Token.trueKeyword,\n"
" \"var\": Token.varKeyword,\n"
" \"while\": Token.whileKeyword\n"
"}\n"
"\n"
"// Data table for tokens that are tokenized using maximal munch.\n"
"//\n"
"// The key is the character that starts the token or tokens. After that is a\n"
"// list of token types and characters. As long as the next character is matched,\n"
"// the type will update to the type after that character.\n"
"var PUNCTUATORS = {\n"
" Chars.leftParen: [Token.leftParen],\n"
" Chars.rightParen: [Token.rightParen],\n"
" Chars.leftBracket: [Token.leftBracket],\n"
" Chars.rightBracket: [Token.rightBracket],\n"
" Chars.leftBrace: [Token.leftBrace],\n"
" Chars.rightBrace: [Token.rightBrace],\n"
" Chars.colon: [Token.colon],\n"
" Chars.comma: [Token.comma],\n"
" Chars.star: [Token.star],\n"
" Chars.percent: [Token.percent],\n"
" Chars.plus: [Token.plus],\n"
" Chars.minus: [Token.minus],\n"
" Chars.tilde: [Token.tilde],\n"
" Chars.caret: [Token.caret],\n"
" Chars.question: [Token.question],\n"
" Chars.lineFeed: [Token.line],\n"
"\n"
" Chars.pipe: [Token.pipe, Chars.pipe, Token.pipePipe],\n"
" Chars.amp: [Token.amp, Chars.amp, Token.ampAmp],\n"
" Chars.bang: [Token.bang, Chars.equal, Token.bangEqual],\n"
" Chars.equal: [Token.equal, Chars.equal, Token.equalEqual],\n"
"\n"
" Chars.dot: [Token.dot, Chars.dot, Token.dotDot, Chars.dot, Token.dotDotDot]\n"
"}\n"
"\n"
"class Lexer {\n"
" construct new(source) {\n"
" _source = source\n"
"\n"
" // Due to the magic of UTF-8, we can safely treat Wren source as a series\n"
" // of bytes, since the only code points that are meaningful to Wren fit in\n"
" // ASCII. The only place where non-ASCII code points can occur is inside\n"
" // string literals and comments and the lexer safely treats those as opaque\n"
" // bytes.\n"
" _bytes = source.bytes\n"
"\n"
" _start = 0\n"
" _current = 0\n"
"\n"
" // The stack of ongoing interpolated strings. Each element in the list is\n"
" // a single level of interpolation nesting. The value of the element is the\n"
" // number of unbalanced \"(\" still remaining to be closed.\n"
" _interpolations = []\n"
" }\n"
"\n"
" readToken() {\n"
" if (_current >= _bytes.count) return makeToken(Token.eof)\n"
"\n"
" _start = _current\n"
" var c = _bytes[_current]\n"
" advance()\n"
"\n"
" if (!_interpolations.isEmpty) {\n"
" if (c == Chars.leftParen) {\n"
" _interpolations[-1] = _interpolations[-1] + 1\n"
" } else if (c == Chars.rightParen) {\n"
" _interpolations[-1] = _interpolations[-1] - 1\n"
"\n"
" // The last \")\" in an interpolated expression ends the expression and\n"
" // resumes the string.\n"
" if (_interpolations[-1] == 0) {\n"
" // This is the final \")\", so the interpolation expression has ended.\n"
" // This \")\" now begins the next section of the template string.\n"
" _interpolations.removeAt(-1)\n"
" return readString()\n"
" }\n"
" }\n"
" }\n"
"\n"
" if (PUNCTUATORS.containsKey(c)) {\n"
" var punctuator = PUNCTUATORS[c]\n"
" var type = punctuator[0]\n"
" var i = 1\n"
" while (i < punctuator.count) {\n"
" if (!match(punctuator[i])) break\n"
" type = punctuator[i + 1]\n"
" i = i + 2\n"
" }\n"
"\n"
" return makeToken(type)\n"
" }\n"
"\n"
" // Handle \"<\", \"<<\", and \"<=\".\n"
" if (c == Chars.less) {\n"
" if (match(Chars.less)) return makeToken(Token.lessLess)\n"
" if (match(Chars.equal)) return makeToken(Token.lessEqual)\n"
" return makeToken(Token.less)\n"
" }\n"
"\n"
" // Handle \">\", \">>\", and \">=\".\n"
" if (c == Chars.greater) {\n"
" if (match(Chars.greater)) return makeToken(Token.greaterGreater)\n"
" if (match(Chars.equal)) return makeToken(Token.greaterEqual)\n"
" return makeToken(Token.greater)\n"
" }\n"
"\n"
" // Handle \"/\", \"//\", and \"/*\".\n"
" if (c == Chars.slash) {\n"
" if (match(Chars.slash)) return readLineComment()\n"
" if (match(Chars.star)) return readBlockComment()\n"
" return makeToken(Token.slash)\n"
" }\n"
"\n"
" if (c == Chars.underscore) return readField()\n"
" if (c == Chars.quote) return readString()\n"
"\n"
" if (c == Chars.zero && peek() == Chars.lowerX) return readHexNumber()\n"
" if (Chars.isWhitespace(c)) return readWhitespace()\n"
" if (Chars.isDigit(c)) return readNumber()\n"
" if (Chars.isAlpha(c)) return readName()\n"
"\n"
" return makeToken(Token.error)\n"
" }\n"
"\n"
" // Reads a line comment until the end of the line is reached.\n"
" readLineComment() {\n"
" // A line comment stops at the newline since newlines are significant.\n"
" while (peek() != Chars.lineFeed && !isAtEnd) {\n"
" advance()\n"
" }\n"
"\n"
" return makeToken(Token.comment)\n"
" }\n"
"\n"
" readBlockComment() {\n"
" // Block comments can nest.\n"
" var nesting = 1\n"
" while (nesting > 0) {\n"
" // TODO: Report error.\n"
" if (isAtEnd) break\n"
"\n"
" if (peek() == Chars.slash && peek(1) == Chars.star) {\n"
" advance()\n"
" advance()\n"
" nesting = nesting + 1\n"
" } else if (peek() == Chars.star && peek(1) == Chars.slash) {\n"
" advance()\n"
" advance()\n"
" nesting = nesting - 1\n"
" if (nesting == 0) break\n"
" } else {\n"
" advance()\n"
" }\n"
" }\n"
"\n"
" return makeToken(Token.comment)\n"
" }\n"
"\n"
" // Reads a static or instance field.\n"
" readField() {\n"
" var type = Token.field\n"
"\n"
" // Read the rest of the name.\n"
" while (match {|c| Chars.isAlphaNumeric(c) }) {}\n"
"\n"
" return makeToken(type)\n"
" }\n"
"\n"
" // Reads a string literal.\n"
" readString() {\n"
" var type = Token.string\n"
"\n"
" while (!isAtEnd) {\n"
" var c = _bytes[_current]\n"
" advance()\n"
"\n"
" if (c == Chars.backslash) {\n"
" // TODO: Process specific escapes and validate them.\n"
" advance()\n"
" } else if (c == Chars.percent) {\n"
" // Consume the '('.\n"
" if (!isAtEnd) advance()\n"
" // TODO: Handle missing '('.\n"
" _interpolations.add(1)\n"
" type = Token.interpolation\n"
" break\n"
" } else if (c == Chars.quote) {\n"
" break\n"
" }\n"
" }\n"
"\n"
" return makeToken(type)\n"
" }\n"
"\n"
" // Reads a number literal.\n"
" readHexNumber() {\n"
" // Skip past the `x`.\n"
" advance()\n"
"\n"
" // Read the rest of the number.\n"
" while (match {|c| Chars.isHexDigit(c) }) {}\n"
" return makeToken(Token.number)\n"
" }\n"
"\n"
" // Reads a series of whitespace characters.\n"
" readWhitespace() {\n"
" // Read the rest of the whitespace.\n"
" while (match {|c| Chars.isWhitespace(c) }) {}\n"
"\n"
" return makeToken(Token.whitespace)\n"
" }\n"
"\n"
" // Reads a number literal.\n"
" readNumber() {\n"
" // Read the rest of the number.\n"
" while (match {|c| Chars.isDigit(c) }) {}\n"
"\n"
" // TODO: Floating point, scientific.\n"
" return makeToken(Token.number)\n"
" }\n"
"\n"
" // Reads an identifier or keyword token.\n"
" readName() {\n"
" // Read the rest of the name.\n"
" while (match {|c| Chars.isAlphaNumeric(c) }) {}\n"
"\n"
" var text = _source[_start..._current]\n"
" var type = Token.name\n"
" if (KEYWORDS.containsKey(text)) {\n"
" type = KEYWORDS[text]\n"
" }\n"
"\n"
" return Token.new(_source, type, _start, _current - _start)\n"
" }\n"
"\n"
" // Returns `true` if we have scanned all characters.\n"
" isAtEnd { _current >= _bytes.count }\n"
"\n"
" // Advances past the current character.\n"
" advance() {\n"
" _current = _current + 1\n"
" }\n"
"\n"
" // Returns the byte value of the current character.\n"
" peek() { peek(0) }\n"
"\n"
" // Returns the byte value of the character [n] bytes past the current\n"
" // character.\n"
" peek(n) {\n"
" if (_current + n >= _bytes.count) return -1\n"
" return _bytes[_current + n]\n"
" }\n"
"\n"
" // Consumes the current character if it matches [condition], which can be a\n"
" // numeric code point value or a function that takes a code point and returns\n"
" // `true` if the code point matches.\n"
" match(condition) {\n"
" if (isAtEnd) return false\n"
"\n"
" var c = _bytes[_current]\n"
" if (condition is Fn) {\n"
" if (!condition.call(c)) return false\n"
" } else if (c != condition) {\n"
" return false\n"
" }\n"
"\n"
" advance()\n"
" return true\n"
" }\n"
"\n"
" // Creates a token of [type] from the current character range.\n"
" makeToken(type) { Token.new(_source, type, _start, _current - _start) }\n"
"}\n"
"\n"
"var TOKEN_COLORS = {\n"
" Token.leftParen: Color.gray,\n"
" Token.rightParen: Color.gray,\n"
" Token.leftBracket: Color.gray,\n"
" Token.rightBracket: Color.gray,\n"
" Token.leftBrace: Color.gray,\n"
" Token.rightBrace: Color.gray,\n"
" Token.colon: Color.gray,\n"
" Token.dot: Color.gray,\n"
" Token.dotDot: Color.none,\n"
" Token.dotDotDot: Color.none,\n"
" Token.comma: Color.gray,\n"
" Token.star: Color.none,\n"
" Token.slash: Color.none,\n"
" Token.percent: Color.none,\n"
" Token.plus: Color.none,\n"
" Token.minus: Color.none,\n"
" Token.pipe: Color.none,\n"
" Token.pipePipe: Color.none,\n"
" Token.caret: Color.none,\n"
" Token.amp: Color.none,\n"
" Token.ampAmp: Color.none,\n"
" Token.question: Color.none,\n"
" Token.bang: Color.none,\n"
" Token.tilde: Color.none,\n"
" Token.equal: Color.none,\n"
" Token.less: Color.none,\n"
" Token.lessEqual: Color.none,\n"
" Token.lessLess: Color.none,\n"
" Token.greater: Color.none,\n"
" Token.greaterEqual: Color.none,\n"
" Token.greaterGreater: Color.none,\n"
" Token.equalEqual: Color.none,\n"
" Token.bangEqual: Color.none,\n"
"\n"
" // Keywords.\n"
" Token.breakKeyword: Color.cyan,\n"
" Token.classKeyword: Color.cyan,\n"
" Token.constructKeyword: Color.cyan,\n"
" Token.elseKeyword: Color.cyan,\n"
" Token.falseKeyword: Color.cyan,\n"
" Token.forKeyword: Color.cyan,\n"
" Token.foreignKeyword: Color.cyan,\n"
" Token.ifKeyword: Color.cyan,\n"
" Token.importKeyword: Color.cyan,\n"
" Token.inKeyword: Color.cyan,\n"
" Token.isKeyword: Color.cyan,\n"
" Token.nullKeyword: Color.cyan,\n"
" Token.returnKeyword: Color.cyan,\n"
" Token.staticKeyword: Color.cyan,\n"
" Token.superKeyword: Color.cyan,\n"
" Token.thisKeyword: Color.cyan,\n"
" Token.trueKeyword: Color.cyan,\n"
" Token.varKeyword: Color.cyan,\n"
" Token.whileKeyword: Color.cyan,\n"
"\n"
" Token.field: Color.none,\n"
" Token.name: Color.none,\n"
" Token.number: Color.magenta,\n"
" Token.string: Color.yellow,\n"
" Token.interpolation: Color.yellow,\n"
" Token.comment: Color.gray,\n"
" Token.whitespace: Color.none,\n"
" Token.line: Color.none,\n"
" Token.error: Color.red,\n"
" Token.eof: Color.none,\n"
"}\n"
"\n"
"Repl.new().run()\n";

View File

@ -18,6 +18,10 @@
29205C9D1AB4E6430073018D /* wren_utils.c in Sources */ = {isa = PBXBuildFile; fileRef = 29205C961AB4E6430073018D /* wren_utils.c */; };
29205C9E1AB4E6430073018D /* wren_value.c in Sources */ = {isa = PBXBuildFile; fileRef = 29205C971AB4E6430073018D /* wren_value.c */; };
29205C9F1AB4E6430073018D /* wren_vm.c in Sources */ = {isa = PBXBuildFile; fileRef = 29205C981AB4E6430073018D /* wren_vm.c */; };
293B25571CEFD8C7005D9537 /* repl.c in Sources */ = {isa = PBXBuildFile; fileRef = 293B25541CEFD8C7005D9537 /* repl.c */; };
293B25581CEFD8C7005D9537 /* repl.c in Sources */ = {isa = PBXBuildFile; fileRef = 293B25541CEFD8C7005D9537 /* repl.c */; };
293B25591CEFD8C7005D9537 /* repl.wren.inc in Sources */ = {isa = PBXBuildFile; fileRef = 293B25561CEFD8C7005D9537 /* repl.wren.inc */; };
293B255A1CEFD8C7005D9537 /* repl.wren.inc in Sources */ = {isa = PBXBuildFile; fileRef = 293B25561CEFD8C7005D9537 /* repl.wren.inc */; };
293D46961BB43F9900200083 /* call.c in Sources */ = {isa = PBXBuildFile; fileRef = 293D46941BB43F9900200083 /* call.c */; };
2949AA8D1C2F14F000B106BA /* get_variable.c in Sources */ = {isa = PBXBuildFile; fileRef = 2949AA8B1C2F14F000B106BA /* get_variable.c */; };
29512C811B91F8EB008C10E6 /* libuv.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 29512C801B91F8EB008C10E6 /* libuv.a */; };
@ -103,6 +107,9 @@
29205CA61AB4E65E0073018D /* wren_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = wren_utils.h; path = ../../src/vm/wren_utils.h; sourceTree = "<group>"; };
29205CA71AB4E65E0073018D /* wren_value.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = wren_value.h; path = ../../src/vm/wren_value.h; sourceTree = "<group>"; };
29205CA81AB4E65E0073018D /* wren_vm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = wren_vm.h; path = ../../src/vm/wren_vm.h; sourceTree = "<group>"; };
293B25541CEFD8C7005D9537 /* repl.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = repl.c; path = ../../src/module/repl.c; sourceTree = "<group>"; };
293B25551CEFD8C7005D9537 /* repl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = repl.h; path = ../../src/module/repl.h; sourceTree = "<group>"; };
293B25561CEFD8C7005D9537 /* repl.wren.inc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.pascal; name = repl.wren.inc; path = ../../src/module/repl.wren.inc; sourceTree = "<group>"; };
293D46941BB43F9900200083 /* call.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = call.c; path = ../../test/api/call.c; sourceTree = "<group>"; };
293D46951BB43F9900200083 /* call.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = call.h; path = ../../test/api/call.h; sourceTree = "<group>"; };
2949AA8B1C2F14F000B106BA /* get_variable.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = get_variable.c; path = ../../test/api/get_variable.c; sourceTree = "<group>"; };
@ -173,6 +180,9 @@
29D025E11C19CD1000A3BB28 /* os.h */,
29D025E01C19CD1000A3BB28 /* os.c */,
29D025E21C19CD1000A3BB28 /* os.wren.inc */,
293B25551CEFD8C7005D9537 /* repl.h */,
293B25541CEFD8C7005D9537 /* repl.c */,
293B25561CEFD8C7005D9537 /* repl.wren.inc */,
291647C31BA5EA45006142EE /* scheduler.h */,
291647C21BA5EA45006142EE /* scheduler.c */,
291647CD1BA5ED26006142EE /* scheduler.wren.inc */,
@ -373,9 +383,11 @@
291647C41BA5EA45006142EE /* scheduler.c in Sources */,
29A427341BDBE435001E6E22 /* wren_opt_meta.c in Sources */,
29205C9B1AB4E6430073018D /* wren_debug.c in Sources */,
293B25591CEFD8C7005D9537 /* repl.wren.inc in Sources */,
29205C9D1AB4E6430073018D /* wren_utils.c in Sources */,
29D025E51C19CD1000A3BB28 /* os.wren.inc in Sources */,
29D025E31C19CD1000A3BB28 /* os.c in Sources */,
293B25571CEFD8C7005D9537 /* repl.c in Sources */,
29729F311BA70A620099CA20 /* io.c in Sources */,
29A427361BDBE435001E6E22 /* wren_opt_meta.wren.inc in Sources */,
29205C9E1AB4E6430073018D /* wren_value.c in Sources */,
@ -397,6 +409,7 @@
2949AA8D1C2F14F000B106BA /* get_variable.c in Sources */,
29DC14A11BBA2FEC008A8274 /* scheduler.c in Sources */,
29A427391BDBE435001E6E22 /* wren_opt_random.c in Sources */,
293B255A1CEFD8C7005D9537 /* repl.wren.inc in Sources */,
29932D511C20D8C900099DEE /* benchmark.c in Sources */,
29DC14A01BBA2FD6008A8274 /* timer.c in Sources */,
29DC149F1BBA2FCC008A8274 /* vm.c in Sources */,