Skip to content

Commit 8cf12a9

Browse files
Merge pull request #153 from lambda-feedback/tr129-experiment-with-syntactical-equivalence
Further attempt at generating correct feedback string
2 parents 963cbe9 + 5a1f78e commit 8cf12a9

File tree

2 files changed

+17
-14
lines changed

2 files changed

+17
-14
lines changed

app/criteria_graph_utilities.py

+7-4
Original file line numberDiff line numberDiff line change
@@ -29,10 +29,13 @@ def __init__(self, label, summary, details, evaluate, replacement=None):
2929
return
3030

3131
class Criterion(Node):
32-
def __init__(self, label, summary, details, tags=None, feedback_string_generator=lambda x: None):
32+
def __init__(self, label, summary, details, tags=None, feedback_string_generator=None):
3333
super().__init__(label, summary, details)
3434
self.consequences = self.outgoing
35-
self.feedback_string_generator = feedback_string_generator
35+
if feedback_string_generator is not None:
36+
self.feedback_string_generator = feedback_string_generator
37+
else:
38+
self.feedback_string_generator = lambda x: None
3639
if tags is None:
3740
self.tags = set()
3841
else:
@@ -210,7 +213,7 @@ def add_criterion_node(self, label, summary, details, sufficiencies=None, evalua
210213
raise Exception(f"Criterion node {label} is already defined.")
211214
if sufficiencies is not None:
212215
raise Exception(f"Criterion nodes cannot have sufficiencies.")
213-
node = CriteriaGraph.Criterion(label, summary, details, feedback_string_generator)
216+
node = CriteriaGraph.Criterion(label, summary=summary, details=details, feedback_string_generator=feedback_string_generator)
214217
self.criteria.update({label: node})
215218
self.sufficiencies.update({label: sufficiencies})
216219
return node
@@ -268,7 +271,7 @@ def attach(self, source_label, target_label, summary=None, details=None, suffici
268271
if summary is None or details is None:
269272
raise Exception(f"Unknown node {target_label}. If you wish to create a new node summary and details must be specified.")
270273
else:
271-
target = target_generator(target_label, summary, details, sufficiencies, evaluate)
274+
target = target_generator(target_label, summary=summary, details=details, sufficiencies=sufficiencies, evaluate=evaluate, feedback_string_generator=feedback_string_generator)
272275
else:
273276
raise Exception(f"Both {source_label} and {target_label} are {type_name} nodes. Only {other_type_name} nodes can be attached to {type_name} nodes.")
274277

app/feedback/symbolic_comparison.py

+10-10
Original file line numberDiff line numberDiff line change
@@ -1,25 +1,25 @@
1-
from ..criteria_utilities import Criterion, no_feedback
1+
from ..criteria_utilities import Criterion
22

33
# TODO: Find better way of identifying reference criteria
44
# equivalences dictionary should contain a list of variations that are likely to be produced by the following procedure:
55
# - rewrite critera as expr=0,
66
# - parse left hand side of rewritten critera as a sympy expression
77
# - turn sympy expression into a string and remove all whitespace
8-
equivalences = dict()
8+
equivalences = dict()
99
criteria = dict()
1010

1111
criteria["RESPONSE_EQUAL_ANSWER"] = Criterion("response=answer")
12-
equivalences.update({"RESPONSE_EQUAL_ANSWER": ["response=answer","answer=response","answer-response=0","-answer+response=0","answer/response=1","response/answer-1=0"]})
12+
equivalences.update({"RESPONSE_EQUAL_ANSWER": ["response=answer", "answer=response", "answer-response=0", "-answer+response=0", "answer/response=1", "response/answer-1=0"]})
1313
criteria["RESPONSE_EQUAL_ANSWER"][True] = lambda inputs: "The response matches the expected answer."
1414
criteria["RESPONSE_EQUAL_ANSWER"][False] = lambda inputs: "The response does not match the expected answer."
1515

1616
criteria["RESPONSE_DOUBLE_ANSWER"] = Criterion("response=2*answer")
17-
equivalences.update({"RESPONSE_DOUBLE_ANSWER": ["response=2*answer","response/answer=2","2*answer=response","answer=response/2","answer-response/2","-answer+response/2","-2*answer+response","2*answer-response","-2+answer/response","-2+response/answer","answer-1*response/2","-answer+1*response/2","-2+1*answer/response","-2+1*response/answer"]})
17+
equivalences.update({"RESPONSE_DOUBLE_ANSWER": ["response=2*answer","response/answer=2", "2*answer=response", "answer=response/2", "answer-response/2", "-answer+response/2", "-2*answer+response", "2*answer-response", "-2+answer/response", "-2+response/answer", "answer-1*response/2", "-answer+1*response/2", "-2+1*answer/response", "-2+1*response/answer"]})
1818
criteria["RESPONSE_DOUBLE_ANSWER"][True] = lambda inputs: "The response is the expected answer multiplied by 2."
1919
criteria["RESPONSE_DOUBLE_ANSWER"][False] = lambda inputs: "The response is not the expected answer multiplied by 2."
2020

2121
criteria["RESPONSE_NEGATIVE_ANSWER"] = Criterion("response=-answer")
22-
equivalences.update({"RESPONSE_NEGATIVE_ANSWER": ["response=-answer","answer=-response","answer+response=0","answer+response","answer/response=-1","response/answer+1"]})
22+
equivalences.update({"RESPONSE_NEGATIVE_ANSWER": ["response=-answer", "answer=-response", "answer+response=0", "answer+response","answer/response=-1", "response/answer+1"]})
2323
criteria["RESPONSE_NEGATIVE_ANSWER"][True] = lambda inputs: "The response is the expected answer multiplied by -1."
2424
criteria["RESPONSE_NEGATIVE_ANSWER"][False] = lambda inputs: "The response is not the expected answer multiplied by -1."
2525

@@ -36,7 +36,7 @@
3636
"EXPRESSION_NOT_EQUALITY": "The response was an expression but was expected to be an equality.",
3737
"EQUALITY_NOT_EXPRESSION": "The response was an equality but was expected to be an expression.",
3838
"WITHIN_TOLERANCE": "", # "The difference between the response the answer is within specified error tolerance.",
39-
"NOT_NUMERICAL": "", #"The expression cannot be evaluated numerically.",
39+
"NOT_NUMERICAL": "", # "The expression cannot be evaluated numerically.",
4040
}
4141

4242
# Format for feedback string entry: criteria["eval_tag"]("criteria_tag", inputs) = "formatted string" | None
@@ -65,7 +65,7 @@
6565
"EXPRESSION_NOT_EQUALITY": "The response was an expression but was expected to be an equality.",
6666
"EQUALITY_NOT_EXPRESSION": "The response was an equality but was expected to be an expression.",
6767
"WITHIN_TOLERANCE": None, # "The difference between the response the answer is within specified error tolerance.",
68-
"NOT_NUMERICAL": None, #"The expression cannot be evaluated numerically.",
68+
"NOT_NUMERICAL": None, #"The expression cannot be evaluated numerically.",
6969
}[tag]
7070
feedback_generators["GENERIC"] = lambda tag: lambda inputs: {
7171
"TRUE": None,
@@ -98,7 +98,7 @@
9898
"FALSE": "The response can be simplified further.",
9999
}[tag]
100100
feedback_generators["SAME_FORM"] = lambda tag: lambda inputs: {
101-
"CARTESIAN": "Response and answer are both written on Cartesian form", # None,
102-
"EXPONENTIAL": "Response and answer are both written on exponential form", # None,
101+
"CARTESIAN": "Response and answer are both written on Cartesian form", # None,
102+
"EXPONENTIAL": "Response and answer are both written on exponential form", # None,
103103
"UNKNOWN": "The response is not written on the expected form.",
104-
}[tag]
104+
}[tag]

0 commit comments

Comments
 (0)