Skip to content

Commit 7cb32e5

Browse files
committed
First implementtion
1 parent 9f29049 commit 7cb32e5

File tree

8 files changed

+1708
-275
lines changed

8 files changed

+1708
-275
lines changed

Dockerfile

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@ ENV FUNCTION_COMMAND="python"
3232
ENV FUNCTION_ARGS="-m,evaluation_function.main"
3333

3434
# The transport to use for the RPC server
35-
ENV FUNCTION_RPC_TRANSPORT="ipc"
35+
#ENV FUNCTION_RPC_TRANSPORT="ipc"
36+
ENV FUNCTION_INTERFACE="file"
3637

3738
ENV LOG_LEVEL="debug"

README.md

Lines changed: 2 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -1,51 +1,6 @@
1-
# Python Evaluation Function
1+
# Wolfram Middle-Layer Evaluation Function
22

3-
This repository contains the boilerplate code needed to create a containerized evaluation function written in Python.
4-
5-
## Quickstart
6-
7-
This chapter helps you to quickly set up a new Python evaluation function using this template repository.
8-
9-
> [!NOTE]
10-
> After setting up the evaluation function, delete this chapter from the `README.md` file, and add your own documentation.
11-
12-
#### 1. Create a new repository
13-
14-
- In GitHub, choose `Use this template` > `Create a new repository` in the repository toolbar.
15-
16-
- Choose the owner, and pick a name for the new repository.
17-
18-
> [!IMPORTANT]
19-
> If you want to deploy the evaluation function to Lambda Feedback, make sure to choose the Lambda Feedback organization as the owner.
20-
21-
- Set the visibility to `Public` or `Private`.
22-
23-
> [!IMPORTANT]
24-
> If you want to use GitHub [deployment protection rules](https://docs.github.com/en/actions/deployment/targeting-different-environments/using-environments-for-deployment#deployment-protection-rules), make sure to set the visibility to `Public`.
25-
26-
- Click on `Create repository`.
27-
28-
#### 2. Clone the new repository
29-
30-
Clone the new repository to your local machine using the following command:
31-
32-
```bash
33-
git clone <repository-url>
34-
```
35-
36-
#### 3. Configure the evaluation function
37-
38-
When deploying to Lambda Feedback, set the evaluation function name in the `config.json` file. Read the [Deploy to Lambda Feedback](#deploy-to-lambda-feedback) section for more information.
39-
40-
#### 4. Develop the evaluation function
41-
42-
You're ready to start developing your evaluation function. Head over to the [Development](#development) section to learn more.
43-
44-
#### 5. Update the README
45-
46-
In the `README.md` file, change the title and description so it fits the purpose of your evaluation function.
47-
48-
Also, don't forget to delete the Quickstart chapter from the `README.md` file after you've completed these steps.
3+
This repository forwards requests to the Wolfram Cloud API.
494

505
## Usage
516

config.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
{
2-
"EvaluationFunctionName": ""
2+
"EvaluationFunctionName": "wolfram-middle-layer"
33
}

evaluation_function/dev.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2,23 +2,24 @@
22

33
from lf_toolkit.shared.params import Params
44

5-
from .evaluation import evaluation_function
5+
from evaluation_function.evaluation import evaluation_function
6+
from evaluation_function.preview import preview_function
7+
68

79
def dev():
810
"""Run the evaluation function from the command line for development purposes.
911
1012
Usage: python -m evaluation_function.dev <answer> <response>
1113
"""
12-
if len(sys.argv) < 3:
13-
print("Usage: python -m evaluation_function.dev <answer> <response>")
14-
return
1514

16-
answer = sys.argv[1]
17-
response = sys.argv[2]
15+
answer = "x+y"
16+
response = "x+y"
17+
params = {"type": "structure"}
1818

19-
result = evaluation_function(answer, response, Params())
19+
# result = evaluation_function(answer, response, params)
20+
result = preview_function(response, {})
2021

21-
print(result.to_dict())
22+
print(result)
2223

2324
if __name__ == "__main__":
2425
dev()

evaluation_function/evaluation.py

Lines changed: 33 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,8 @@
1+
import json
12
from typing import Any
23
from lf_toolkit.evaluation import Result, Params
4+
import os
5+
import requests
36

47
def evaluation_function(
58
response: Any,
@@ -29,6 +32,33 @@ def evaluation_function(
2932
to output the evaluation response.
3033
"""
3134

32-
return Result(
33-
is_correct=response == answer
34-
)
35+
url = os.getenv("EVALUATE_API")
36+
37+
eval_type = params["type"]
38+
data_payload = {
39+
"type": eval_type,
40+
"response": response,
41+
"answer": answer,
42+
"params": json.dumps(params)
43+
}
44+
45+
response = requests.post(url, data=data_payload)
46+
response_json = response.json()
47+
48+
if "Success" in response_json:
49+
if not response_json["Success"]:
50+
result = Result(is_correct=False)
51+
result.add_feedback(tag="error", feedback="Wolfram Error")
52+
return result
53+
54+
if "error" in response_json:
55+
if response_json["error"] is not None:
56+
result = Result(is_correct=False)
57+
result.add_feedback(tag="error", feedback=response_json["error"])
58+
return result
59+
60+
61+
result = Result(is_correct=response_json["is_correct"])
62+
result.add_feedback(tag="feedback", feedback=response_json["feedback"])
63+
64+
return result

evaluation_function/preview.py

Lines changed: 28 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,7 @@
1+
import os
12
from typing import Any
3+
4+
import requests
25
from lf_toolkit.preview import Result, Params, Preview
36

47
def preview_function(response: Any, params: Params) -> Result:
@@ -21,10 +24,31 @@ def preview_function(response: Any, params: Params) -> Result:
2124
The way you wish to structure you code (all in this function, or
2225
split into many) is entirely up to you.
2326
"""
24-
2527
try:
26-
return Result(preview=Preview(sympy=response))
27-
except FeedbackException as e:
28-
return Result(preview=Preview(feedback=str(e)))
28+
url = os.getenv("PREVIEW_API")
29+
30+
data_payload = {
31+
"response": response
32+
}
33+
34+
wolfram_response = requests.post(url, data=data_payload)
35+
response_json = wolfram_response.json()
36+
37+
print(response_json)
38+
39+
if "Success" in response_json:
40+
if not response_json["Success"]:
41+
result = Result(preview=Preview(feedback=response_json["error"]))
42+
return result
43+
44+
if "error" in response_json:
45+
if response_json["error"] is not None:
46+
result = Result(preview=Preview(feedback=response_json["error"]))
47+
return result
48+
49+
50+
result = Result(preview=Preview(latex=response_json["latexString"],sympy=response_json["sympyString"]))
51+
52+
return result
2953
except Exception as e:
3054
return Result(preview=Preview(feedback=str(e)))

0 commit comments

Comments
 (0)