Skip to content

Commit a0d577a

Browse files
authored
Merge pull request #127 from shadokan87/feat/extendModelList_method
Feat/extend model list method
2 parents 996c958 + 1fed938 commit a0d577a

File tree

2 files changed

+200
-1
lines changed

2 files changed

+200
-1
lines changed

README.md

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -173,6 +173,52 @@ async function main() {
173173
main()
174174
```
175175

176+
### Extending Model Support
177+
178+
Token.js allows you to extend the predefined model list using the `extendModelList` method. Here are some example scenarios where this is useful:
179+
1. Adding AWS Bedrock models with regional prefixes like `us.anthropic.claude-3-sonnet`
180+
2. Supporting new model versions before they're added to the predefined list
181+
3. Using custom model deployments with unique names
182+
4. Adding experimental or beta models during testing
183+
184+
```ts
185+
import { TokenJS } from 'token.js'
186+
187+
// Example in 2 steps: Adding AWS Bedrock Claude models with region prefix
188+
const tokenjs = new TokenJS();
189+
// Step 1: Register the new model name
190+
tokenjs.extendModelList(
191+
"bedrock",
192+
'us.anthropic.claude-3-5-sonnet-20241022-v2:0',
193+
"anthropic.claude-3-sonnet-20240229-v1:0" // Copy features from existing model
194+
);
195+
196+
// Step 2: Using the extended model in a chat completion
197+
const result = await tokenjs.chat.completions.create({
198+
stream: true,
199+
provider: 'bedrock',
200+
model: 'us.anthropic.claude-3-5-sonnet-20241022-v2:0' as any, // Type casting as 'any' required
201+
messages: [
202+
{
203+
role: 'user',
204+
content: 'Tell me about yourself.',
205+
},
206+
],
207+
});
208+
```
209+
210+
Note: When using extended models, type casting (`as any`) is required
211+
212+
The `featureSupport` parameter can be either:
213+
- A string matching an existing model name from the same provider to copy its feature support
214+
- An object specifying which features the model supports:
215+
| Feature | Type | Description |
216+
|------------|---------|----------------------------------------------|
217+
| streaming | boolean | Whether the model supports streaming responses|
218+
| json | boolean | Whether the model supports JSON mode |
219+
| toolCalls | boolean | Whether the model supports function calling |
220+
| images | boolean | Whether the model supports image inputs |
221+
176222
## Feature Compatibility
177223

178224
This table provides an overview of the features that Token.js supports from each LLM provider.

src/index.ts

Lines changed: 154 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,6 @@
1-
import { LLMChat } from './chat/index.js'
1+
import { LLMChat, LLMProvider } from './chat/index.js'
2+
import { InputError } from './handlers/types.js'
3+
import { models } from './models.js'
24
import { ConfigOptions } from './userTypes/index.js'
35
export * from './models.js'
46
export * from './userTypes/index.js'
@@ -21,10 +23,35 @@ export * from './userTypes/index.js'
2123

2224
type TokenJSInterface = {
2325
chat: LLMChat
26+
extendModelList<
27+
P extends Exclude<LLMProvider, 'openrouter' | 'openai-compatible'>
28+
>(
29+
provider: P,
30+
name: string,
31+
featureSupport: extendedModelFeatureSupport<P>
32+
): void
2433
}
2534

35+
export type extendedModelFeatureSupport<P extends LLMProvider> =
36+
| ((typeof models)[P]['models'] extends readonly string[]
37+
? (typeof models)[P]['models'][number]
38+
: never)
39+
| {
40+
streaming: boolean
41+
json: boolean
42+
toolCalls: boolean
43+
images: boolean
44+
}
45+
46+
type extendedModelList = Array<{
47+
provider: LLMProvider
48+
name: string
49+
featureSupport: extendedModelFeatureSupport<any>
50+
}>
51+
2652
export class TokenJS implements TokenJSInterface {
2753
private opts: ConfigOptions
54+
public static extendedModelList: Readonly<extendedModelList> = []
2855
chat: LLMChat
2956

3057
constructor({ ...opts }: ConfigOptions = {}) {
@@ -33,4 +60,130 @@ export class TokenJS implements TokenJSInterface {
3360
// We pass a reference to the LLM instance to the LLMChat instance so that the completions object can access the opts
3461
this.chat = new LLMChat(opts)
3562
}
63+
64+
/**
65+
* Checks if a model exists in the extended model list for a given provider
66+
*
67+
* @param provider - The LLM provider to check
68+
* @param name - The model name to check
69+
* @returns boolean indicating if the model exists
70+
*/
71+
extendedModelExist<
72+
P extends Exclude<LLMProvider, 'openrouter' | 'openai-compatible'>
73+
>(provider: P, name: string): boolean {
74+
return TokenJS.extendedModelList.some(
75+
(model) => model.provider === provider && model.name === name
76+
)
77+
}
78+
79+
/**
80+
* Extends the predefined model list by adding a new model with specified features.
81+
*
82+
* @param provider - The LLM provider (e.g., 'bedrock', 'openai')
83+
* @param name - The model name/identifier to add
84+
* @param featureSupport - Either:
85+
* - A string matching an existing model name from the same provider to copy its feature support
86+
* - An object specifying which features the model supports:
87+
* | Feature | Type | Description |
88+
* |------------|---------|----------------------------------------------|
89+
* | streaming | boolean | Whether the model supports streaming responses|
90+
* | json | boolean | Whether the model supports JSON mode |
91+
* | toolCalls | boolean | Whether the model supports function calling |
92+
* | images | boolean | Whether the model supports image inputs |
93+
* @returns The TokenJS instance for chaining
94+
*
95+
* @example
96+
* ```typescript
97+
* // Example in 2 steps: Adding AWS Bedrock Claude models with region prefix
98+
* const tokenjs = new TokenJS();
99+
*
100+
* // Step 1: Register the new model name
101+
* tokenjs.extendModelList(
102+
* "bedrock",
103+
* 'us.anthropic.claude-3-5-sonnet-20241022-v2:0',
104+
* "anthropic.claude-3-sonnet-20240229-v1:0" // Copy features from existing model
105+
* );
106+
*
107+
* // Step 2: Using the extended model in a chat completion
108+
* const result = await tokenjs.chat.completions.create({
109+
* stream: true,
110+
* provider: 'bedrock',
111+
* model: 'us.anthropic.claude-3-5-sonnet-20241022-v2:0' as any, // Note: Type casting as 'any' required
112+
* messages: [
113+
* {
114+
* role: 'user',
115+
* content: 'Tell me about yourself.',
116+
* },
117+
* ],
118+
* });
119+
* ```
120+
*
121+
* Note: When using extended models, type casting (`as any`) is required
122+
*/
123+
extendModelList<
124+
P extends Exclude<LLMProvider, 'openrouter' | 'openai-compatible'>
125+
>(provider: P, name: string, featureSupport: extendedModelFeatureSupport<P>) {
126+
// Do nothing if the model already added in the extendedModeList
127+
if (this.extendedModelExist(provider, name)) {
128+
return this
129+
}
130+
// If a model name is pre-defined, there is a conflict so we throw an error
131+
if (
132+
Array.isArray(models[provider].models) &&
133+
models[provider].models.includes(name)
134+
) {
135+
throw new InputError(
136+
`You tried to add the following custom model name: "${name}", for provider: "${provider}". But it conflicts with an existing pre-defined model name. Please try again using different name e.g.: "${name}-custom"`
137+
)
138+
}
139+
140+
const modelsRef = models[provider] as any
141+
modelsRef['models'] = [...(models as any)[provider]['models'], name]
142+
143+
const isSupportedFeature = (
144+
_featureSupport: readonly string[] | boolean,
145+
model: string
146+
) => {
147+
if (typeof _featureSupport === 'boolean') {
148+
return _featureSupport
149+
}
150+
return _featureSupport.includes(model)
151+
}
152+
153+
if (typeof featureSupport == 'string') {
154+
// Copy feature support from existing model
155+
if (isSupportedFeature(modelsRef.supportsJSON, featureSupport)) {
156+
modelsRef['supportsJSON'] = [...modelsRef.supportsJSON, name]
157+
}
158+
if (isSupportedFeature(modelsRef.supportsStreaming, featureSupport)) {
159+
modelsRef['supportsStreaming'] = [...modelsRef.supportsStreaming, name]
160+
}
161+
if (isSupportedFeature(modelsRef.supportsImages, featureSupport)) {
162+
modelsRef['supportsImages'] = [...modelsRef.supportsImages, name]
163+
}
164+
if (isSupportedFeature(modelsRef.supportsToolCalls, featureSupport)) {
165+
modelsRef['supportsToolCalls'] = [...modelsRef.supportsToolCalls, name]
166+
}
167+
} else {
168+
// Use explicit feature support object
169+
if (featureSupport.json) {
170+
modelsRef['supportsJSON'] = [...modelsRef.supportsJSON, name]
171+
}
172+
if (featureSupport.streaming) {
173+
modelsRef['supportsStreaming'] = [...modelsRef.supportsStreaming, name]
174+
}
175+
if (featureSupport.toolCalls) {
176+
modelsRef['supportsToolCalls'] = [...modelsRef.supportsToolCalls, name]
177+
}
178+
if (featureSupport.images) {
179+
modelsRef['supportsImages'] = [...modelsRef.supportsImages, name]
180+
}
181+
}
182+
;(TokenJS.extendedModelList as extendedModelList).push({
183+
provider,
184+
name,
185+
featureSupport,
186+
})
187+
return this
188+
}
36189
}

0 commit comments

Comments
 (0)