Skip to content

Commit b7b8286

Browse files
authored
Merge pull request #404 from sts07142/Add-GPT-4o&4o-mini
Add GPT-4o&4o-mini
2 parents 9d3b65a + 4597af1 commit b7b8286

File tree

7 files changed

+30
-2
lines changed

7 files changed

+30
-2
lines changed

camel/model_backend.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,8 @@ def run(self, *args, **kwargs):
9191
"gpt-4-0613": 8192,
9292
"gpt-4-32k": 32768,
9393
"gpt-4-turbo": 100000,
94+
"gpt-4o": 4096, #100000
95+
"gpt-4o-mini": 16384, #100000
9496
}
9597
num_max_token = num_max_token_map[self.model_type.value]
9698
num_max_completion_tokens = num_max_token - num_prompt_tokens
@@ -122,6 +124,8 @@ def run(self, *args, **kwargs):
122124
"gpt-4-0613": 8192,
123125
"gpt-4-32k": 32768,
124126
"gpt-4-turbo": 100000,
127+
"gpt-4o": 4096, #100000
128+
"gpt-4o-mini": 16384, #100000
125129
}
126130
num_max_token = num_max_token_map[self.model_type.value]
127131
num_max_completion_tokens = num_max_token - num_prompt_tokens
@@ -182,6 +186,8 @@ def create(model_type: ModelType, model_config_dict: Dict) -> ModelBackend:
182186
ModelType.GPT_4_32k,
183187
ModelType.GPT_4_TURBO,
184188
ModelType.GPT_4_TURBO_V,
189+
ModelType.GPT_4O,
190+
ModelType.GPT_4O_MINI,
185191
None
186192
}:
187193
model_class = OpenAIModel

camel/typing.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,8 @@ class ModelType(Enum):
5050
GPT_4_32k = "gpt-4-32k"
5151
GPT_4_TURBO = "gpt-4-turbo"
5252
GPT_4_TURBO_V = "gpt-4-turbo"
53+
GPT_4O = "gpt-4o"
54+
GPT_4O_MINI = "gpt-4o-mini"
5355

5456
STUB = "stub"
5557

camel/utils.py

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -89,6 +89,8 @@ def num_tokens_from_messages(
8989
ModelType.GPT_4_32k,
9090
ModelType.GPT_4_TURBO,
9191
ModelType.GPT_4_TURBO_V,
92+
ModelType.GPT_4O,
93+
ModelType.GPT_4O_MINI,
9294
ModelType.STUB
9395
}:
9496
return count_tokens_openai_chat_models(messages, encoding)
@@ -124,6 +126,10 @@ def get_model_token_limit(model: ModelType) -> int:
124126
return 128000
125127
elif model == ModelType.STUB:
126128
return 4096
129+
elif model == ModelType.GPT_4O:
130+
return 128000
131+
elif model == ModelType.GPT_4O_MINI:
132+
return 128000
127133
else:
128134
raise ValueError("Unknown model type")
129135

chatdev/statistics.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,8 @@ def prompt_cost(model_type: str, num_prompt_tokens: float, num_completion_tokens
1313
"gpt-4-0613": 0.03,
1414
"gpt-4-32k": 0.06,
1515
"gpt-4-turbo": 0.01,
16+
"gpt-4o": 0.005,
17+
"gpt-4o-mini": 0.00015,
1618
}
1719

1820
output_cost_map = {
@@ -24,6 +26,8 @@ def prompt_cost(model_type: str, num_prompt_tokens: float, num_completion_tokens
2426
"gpt-4-0613": 0.06,
2527
"gpt-4-32k": 0.12,
2628
"gpt-4-turbo": 0.03,
29+
"gpt-4o": 0.015,
30+
"gpt-4o-mini": 0.0006,
2731
}
2832

2933
if model_type not in input_cost_map or model_type not in output_cost_map:
@@ -111,6 +115,10 @@ def get_info(dir, log_filepath):
111115
model_type = "gpt-4-32k"
112116
elif model_type == "GPT_4_TURBO":
113117
model_type = "gpt-4-turbo"
118+
elif model_type == "GPT_4O":
119+
model_type = "gpt-4o"
120+
elif model_type == "GPT_4O_MINI":
121+
model_type = "gpt-4o-mini"
114122
# print("model_type:", model_type)
115123

116124
lines = open(log_filepath, "r", encoding="utf8").read().split("\n")

ecl/utils.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,8 @@ def calc_max_token(messages, model):
6565
"gpt-4": 8192,
6666
"gpt-4-0613": 8192,
6767
"gpt-4-32k": 32768,
68+
"gpt-4o": 4096, #100000
69+
"gpt-4o-mini": 16384, #100000
6870
}
6971
num_max_token = num_max_token_map[model]
7072
num_max_completion_tokens = num_max_token - num_prompt_tokens
@@ -136,6 +138,8 @@ def run(self, messages) :
136138
"gpt-4": 8192,
137139
"gpt-4-0613": 8192,
138140
"gpt-4-32k": 32768,
141+
"gpt-4o": 4096, #100000
142+
"gpt-4o-mini": 16384, #100000
139143
}
140144
response = client.chat.completions.create(messages = messages,
141145
model = "gpt-3.5-turbo-16k",

requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ openai==1.3.3
77
regex==2023.6.3
88
requests==2.31.0
99
tenacity==8.2.2
10-
tiktoken==0.4.0
10+
tiktoken==0.7.0
1111
virtualenv==20.23.0
1212
Werkzeug==3.0.3
1313
Markdown==3.4.4

run.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,7 @@ def get_config(company):
7979
parser.add_argument('--name', type=str, default="Gomoku",
8080
help="Name of software, your software will be generated in WareHouse/name_org_timestamp")
8181
parser.add_argument('--model', type=str, default="GPT_3_5_TURBO",
82-
help="GPT Model, choose from {'GPT_3_5_TURBO', 'GPT_4', 'GPT_4_TURBO'}")
82+
help="GPT Model, choose from {'GPT_3_5_TURBO', 'GPT_4', 'GPT_4_TURBO', 'GPT_4O', 'GPT_4O_MINI'}")
8383
parser.add_argument('--path', type=str, default="",
8484
help="Your file directory, ChatDev will build upon your software in the Incremental mode")
8585
args = parser.parse_args()
@@ -95,6 +95,8 @@ def get_config(company):
9595
# 'GPT_4_32K': ModelType.GPT_4_32k,
9696
'GPT_4_TURBO': ModelType.GPT_4_TURBO,
9797
# 'GPT_4_TURBO_V': ModelType.GPT_4_TURBO_V
98+
'GPT_4O': ModelType.GPT_4O,
99+
'GPT_4O_MINI': ModelType.GPT_4O_MINI,
98100
}
99101
if openai_new_api:
100102
args2type['GPT_3_5_TURBO'] = ModelType.GPT_3_5_TURBO_NEW

0 commit comments

Comments
 (0)