Skip to content

Commit

Permalink
initial commit
Browse files Browse the repository at this point in the history
  • Loading branch information
vysarge committed Apr 18, 2018
0 parents commit b91e5fe
Show file tree
Hide file tree
Showing 3 changed files with 288 additions and 0 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
*.crl
*.json
*.pyc
105 changes: 105 additions & 0 deletions lab.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
"""6.009 Lab 8A: carlae Interpreter"""

import sys


class EvaluationError(Exception):
"""Exception to be raised if there is an error during evaluation."""
pass



def tokenize(source):
"""
Splits an input string into meaningful tokens (left parens, right parens,
other whitespace-separated values). Returns a list of strings.
Arguments:
source (str): a string containing the source code of a carlae
expression
>>> tokenize("(cat (dog (tomato)))")
['(', 'cat', '(', 'dog', '(', 'tomato', ')', ')', ')']
>>> tokenize("(+ cat dog) ; comment")
['(', '+', 'cat', 'dog', ')']
"""
whitespace_vals = set(" \n") # whitespace values
single_vals = set("()")
curr_token = '' # the current token being processed
commented = False # whether we are currently in a comment
found_tokens = [] # list of found tokens.
for ch in source:
if (commented): # skip comments
if (ch == '\n'):
commented = False # end comments at a carriage return
continue
# otherwise
if (ch == ';'): # set flag to ignore comments
commented = True
if (len(curr_token) > 0):
found_tokens.append(curr_token)
elif (ch in single_vals): # for ( and )
if (len(curr_token) > 0):
found_tokens.append(curr_token)
found_tokens.append(ch)
curr_token = ''
# for whitespace-separated tokens
elif (ch in whitespace_vals):
if (len(curr_token) > 0):
found_tokens.append(curr_token)
curr_token = ''
else:
curr_token += ch

# catch any dangling tokens
if (len(curr_token) > 0):
found_tokens.append(curr_token)

return found_tokens

def parse(tokens):
"""
Parses a list of tokens, constructing a representation where:
* symbols are represented as Python strings
* numbers are represented as Python ints or floats
* S-expressions are represented as Python lists
Arguments:
tokens (list): a list of strings representing tokens
"""
numbers = set('0123456789')
def parse_expression(index):
if (tokens[index] in numbers):
return int(tokens[index]), index+1
if (tokens[index] == '('):
pass
# start off recursion
parsed_expression, next_index = parse_expression(0)
return parsed_expression


carlae_builtins = {
'+': sum,
'-': lambda args: -args[0] if len(args) == 1 else (args[0] - sum(args[1:])),
}


def evaluate(tree):
"""
Evaluate the given syntax tree according to the rules of the carlae
language.
Arguments:
tree (type varies): a fully parsed expression, as the output from the
parse function
"""
raise NotImplementedError


if __name__ == '__main__':
# code in this block will only be executed if lab.py is the main file being
# run (not when this module is imported)
pass
# run doctests-- comment out before submitting!
import doctest
doctest.testmod()
180 changes: 180 additions & 0 deletions test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,180 @@
#!/usr/bin/env python3
import os
import lab
import sys
import json
import unittest

TEST_DIRECTORY = os.path.dirname(__file__)

class LispTest(unittest.TestCase):
@staticmethod
def make_tester(func):
"""
Helper to wrap a function so that, when called, it produces a
dictionary instead of its normal result. If the function call works
without raising an exception, then the results are included.
Otherwise, the dictionary includes information about the exception that
was raised.
"""
def _tester(*args):
try:
return {'ok': True, 'output': func(*args)}
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
return {'ok': False, 'type': exc_type.__name__}
return _tester

@staticmethod
def load_test_values(n):
"""
Helper function to load test inputs/outputs
"""
with open('test_inputs/%s.json' % n) as f:
inputs = json.load(f)
with open('test_outputs/%s.json' % n) as f:
outputs = json.load(f)
return inputs, outputs

@staticmethod
def run_continued_evaluations(ins):
"""
Helper to evaluate a sequence of expressions in an environment.
"""
env = None
outs = []
try:
t = LispTest.make_tester(lab.result_and_env)
except:
t = LispTest.make_tester(lab.evaluate)
for i in ins:
if env is None:
args = (i, )
else:
args = (i, env)
out = t(*args)
if out['ok']:
env = out['output'][1]
if out['ok']:
if isinstance(out['output'][0], (int, float)):
out['output'] = out['output'][0]
else:
out['output'] = 'SOMETHING'
outs.append(out)
return outs

def _compare_outputs(self, x, y):
self.assertEqual(x['ok'], y['ok'])
if x['ok']:
if isinstance(x['output'], (int, float)):
self.assertAlmostEqual(x['output'], y['output'])
else:
self.assertEqual(x['output'], y['output'])
else:
self.assertEqual(x, y)

def _test_continued_evaluations(self, n):
"""
Test that the results from running continued evaluations in the same
environment match the expected values.
"""
inp, out = self.load_test_values(n)
results = self.run_continued_evaluations(inp)
for result, expected in zip(results, out):
self._compare_outputs(result, expected)

def run_test_number(self, n, func):
tester = self.make_tester(func)
inp, out = self.load_test_values(n)
for i, o in zip(inp, out):
self._compare_outputs(tester(i), o)


class Test1_Parse(LispTest):
def test_01_tokenize(self):
self.run_test_number(1, lab.tokenize)

def test_02_parse(self):
self.run_test_number(2, lab.parse)

def test_03_tokenize_and_parse(self):
self.run_test_number(3, lambda i: lab.parse(lab.tokenize(i)))


class Test2_Eval(LispTest):
def test_04_calc(self):
self.run_test_number(4, lab.evaluate)

def test_05_mult_div(self):
self.run_test_number(5, lab.evaluate)

def test_06_simple_assignment(self):
self._test_continued_evaluations(6)

def test_07_simple_assignment(self):
self._test_continued_evaluations(7)

def test_08_bad_lookups(self):
self._test_continued_evaluations(8)

def test_09_rename_builtin(self):
self._test_continued_evaluations(9)


class Test3_Func(LispTest):
def test_10_simple_function(self):
self._test_continued_evaluations(10)

def test_11_inline_lambda(self):
self._test_continued_evaluations(11)

def test_12_closures(self):
self._test_continued_evaluations(12)


class Test4_All(LispTest):
def test_13_short_definition(self):
self._test_continued_evaluations(13)

def test_14_dependent_definition(self):
self._test_continued_evaluations(14)

def test_15_scoping_1(self):
self._test_continued_evaluations(15)

def test_16_scoping_2(self):
self._test_continued_evaluations(16)

def test_17_scoping_3(self):
self._test_continued_evaluations(17)

def test_18_scoping_4(self):
self._test_continued_evaluations(18)

def test_19_scoping_5(self):
self._test_continued_evaluations(19)

def test_20_calling_errors(self):
self._test_continued_evaluations(20)

def test_21_functionception(self):
self._test_continued_evaluations(21)

def test_22_alias(self):
self._test_continued_evaluations(22)

def test_23_big_scoping_1(self):
self._test_continued_evaluations(23)

def test_24_big_scoping_2(self):
self._test_continued_evaluations(24)

def test_25_big_scoping_3(self):
self._test_continued_evaluations(25)

def test_26_big_scoping_4(self):
self._test_continued_evaluations(26)


if __name__ == '__main__':
res = unittest.main(verbosity=3, exit=False)

0 comments on commit b91e5fe

Please sign in to comment.