|
1 | 1 | import os
|
2 | 2 |
|
3 |
| -from lagent.llms import GPTAPI, INTERNLM2_META, LMDeployClient, LMDeployServer |
| 3 | +from lagent.llms import (GPTAPI, INTERNLM2_META, HFTransformerCasualLM, |
| 4 | + LMDeployClient, LMDeployServer) |
4 | 5 |
|
5 | 6 | internlm_server = dict(type=LMDeployServer,
|
6 |
| - path='internlm/internlm2_5-7b', |
| 7 | + path='internlm/internlm2_5-7b-chat', |
7 | 8 | model_name='internlm2',
|
8 | 9 | meta_template=INTERNLM2_META,
|
9 | 10 | top_p=0.8,
|
|
14 | 15 | stop_words=['<|im_end|>'])
|
15 | 16 |
|
16 | 17 | internlm_client = dict(type=LMDeployClient,
|
17 |
| - model_name='internlm2_5-7b', |
| 18 | + model_name='internlm2_5-7b-chat', |
18 | 19 | url='http://127.0.0.1:23333',
|
19 | 20 | meta_template=INTERNLM2_META,
|
20 | 21 | top_p=0.8,
|
|
24 | 25 | repetition_penalty=1.02,
|
25 | 26 | stop_words=['<|im_end|>'])
|
26 | 27 |
|
| 28 | +internlm_hf = dict(type=HFTransformerCasualLM, |
| 29 | + path='internlm/internlm2_5-7b-chat', |
| 30 | + meta_template=INTERNLM2_META, |
| 31 | + top_p=0.8, |
| 32 | + top_k=None, |
| 33 | + temperature=1e-6, |
| 34 | + max_new_tokens=8192, |
| 35 | + repetition_penalty=1.02, |
| 36 | + stop_words=['<|im_end|>']) |
| 37 | + |
27 | 38 | gpt4 = dict(type=GPTAPI,
|
28 | 39 | model_type='gpt-4-turbo',
|
29 | 40 | key=os.environ.get('OPENAI_API_KEY', 'YOUR OPENAI API KEY'))
|
0 commit comments