1
- import { LLMChat } from './chat/index.js'
1
+ import { LLMChat , LLMProvider } from './chat/index.js'
2
+ import { InputError } from './handlers/types.js'
3
+ import { models } from './models.js'
2
4
import { ConfigOptions } from './userTypes/index.js'
3
5
export * from './models.js'
4
6
export * from './userTypes/index.js'
@@ -21,10 +23,35 @@ export * from './userTypes/index.js'
21
23
22
24
type TokenJSInterface = {
23
25
chat : LLMChat
26
+ extendModelList <
27
+ P extends Exclude < LLMProvider , 'openrouter' | 'openai-compatible' >
28
+ > (
29
+ provider : P ,
30
+ name : string ,
31
+ featureSupport : extendedModelFeatureSupport < P >
32
+ ) : void
24
33
}
25
34
35
+ export type extendedModelFeatureSupport < P extends LLMProvider > =
36
+ | ( ( typeof models ) [ P ] [ 'models' ] extends readonly string [ ]
37
+ ? ( typeof models ) [ P ] [ 'models' ] [ number ]
38
+ : never )
39
+ | {
40
+ streaming : boolean
41
+ json : boolean
42
+ toolCalls : boolean
43
+ images : boolean
44
+ }
45
+
46
+ type extendedModelList = Array < {
47
+ provider : LLMProvider
48
+ name : string
49
+ featureSupport : extendedModelFeatureSupport < any >
50
+ } >
51
+
26
52
export class TokenJS implements TokenJSInterface {
27
53
private opts : ConfigOptions
54
+ public static extendedModelList : Readonly < extendedModelList > = [ ]
28
55
chat : LLMChat
29
56
30
57
constructor ( { ...opts } : ConfigOptions = { } ) {
@@ -33,4 +60,130 @@ export class TokenJS implements TokenJSInterface {
33
60
// We pass a reference to the LLM instance to the LLMChat instance so that the completions object can access the opts
34
61
this . chat = new LLMChat ( opts )
35
62
}
63
+
64
+ /**
65
+ * Checks if a model exists in the extended model list for a given provider
66
+ *
67
+ * @param provider - The LLM provider to check
68
+ * @param name - The model name to check
69
+ * @returns boolean indicating if the model exists
70
+ */
71
+ extendedModelExist <
72
+ P extends Exclude < LLMProvider , 'openrouter' | 'openai-compatible' >
73
+ > ( provider : P , name : string ) : boolean {
74
+ return TokenJS . extendedModelList . some (
75
+ ( model ) => model . provider === provider && model . name === name
76
+ )
77
+ }
78
+
79
+ /**
80
+ * Extends the predefined model list by adding a new model with specified features.
81
+ *
82
+ * @param provider - The LLM provider (e.g., 'bedrock', 'openai')
83
+ * @param name - The model name/identifier to add
84
+ * @param featureSupport - Either:
85
+ * - A string matching an existing model name from the same provider to copy its feature support
86
+ * - An object specifying which features the model supports:
87
+ * | Feature | Type | Description |
88
+ * |------------|---------|----------------------------------------------|
89
+ * | streaming | boolean | Whether the model supports streaming responses|
90
+ * | json | boolean | Whether the model supports JSON mode |
91
+ * | toolCalls | boolean | Whether the model supports function calling |
92
+ * | images | boolean | Whether the model supports image inputs |
93
+ * @returns The TokenJS instance for chaining
94
+ *
95
+ * @example
96
+ * ```typescript
97
+ * // Example in 2 steps: Adding AWS Bedrock Claude models with region prefix
98
+ * const tokenjs = new TokenJS();
99
+ *
100
+ * // Step 1: Register the new model name
101
+ * tokenjs.extendModelList(
102
+ * "bedrock",
103
+ * 'us.anthropic.claude-3-5-sonnet-20241022-v2:0',
104
+ * "anthropic.claude-3-sonnet-20240229-v1:0" // Copy features from existing model
105
+ * );
106
+ *
107
+ * // Step 2: Using the extended model in a chat completion
108
+ * const result = await tokenjs.chat.completions.create({
109
+ * stream: true,
110
+ * provider: 'bedrock',
111
+ * model: 'us.anthropic.claude-3-5-sonnet-20241022-v2:0' as any, // Note: Type casting as 'any' required
112
+ * messages: [
113
+ * {
114
+ * role: 'user',
115
+ * content: 'Tell me about yourself.',
116
+ * },
117
+ * ],
118
+ * });
119
+ * ```
120
+ *
121
+ * Note: When using extended models, type casting (`as any`) is required
122
+ */
123
+ extendModelList <
124
+ P extends Exclude < LLMProvider , 'openrouter' | 'openai-compatible' >
125
+ > ( provider : P , name : string , featureSupport : extendedModelFeatureSupport < P > ) {
126
+ // Do nothing if the model already added in the extendedModeList
127
+ if ( this . extendedModelExist ( provider , name ) ) {
128
+ return this
129
+ }
130
+ // If a model name is pre-defined, there is a conflict so we throw an error
131
+ if (
132
+ Array . isArray ( models [ provider ] . models ) &&
133
+ models [ provider ] . models . includes ( name )
134
+ ) {
135
+ throw new InputError (
136
+ `You tried to add the following custom model name: "${ name } ", for provider: "${ provider } ". But it conflicts with an existing pre-defined model name. Please try again using different name e.g.: "${ name } -custom"`
137
+ )
138
+ }
139
+
140
+ const modelsRef = models [ provider ] as any
141
+ modelsRef [ 'models' ] = [ ...( models as any ) [ provider ] [ 'models' ] , name ]
142
+
143
+ const isSupportedFeature = (
144
+ _featureSupport : readonly string [ ] | boolean ,
145
+ model : string
146
+ ) => {
147
+ if ( typeof _featureSupport === 'boolean' ) {
148
+ return _featureSupport
149
+ }
150
+ return _featureSupport . includes ( model )
151
+ }
152
+
153
+ if ( typeof featureSupport == 'string' ) {
154
+ // Copy feature support from existing model
155
+ if ( isSupportedFeature ( modelsRef . supportsJSON , featureSupport ) ) {
156
+ modelsRef [ 'supportsJSON' ] = [ ...modelsRef . supportsJSON , name ]
157
+ }
158
+ if ( isSupportedFeature ( modelsRef . supportsStreaming , featureSupport ) ) {
159
+ modelsRef [ 'supportsStreaming' ] = [ ...modelsRef . supportsStreaming , name ]
160
+ }
161
+ if ( isSupportedFeature ( modelsRef . supportsImages , featureSupport ) ) {
162
+ modelsRef [ 'supportsImages' ] = [ ...modelsRef . supportsImages , name ]
163
+ }
164
+ if ( isSupportedFeature ( modelsRef . supportsToolCalls , featureSupport ) ) {
165
+ modelsRef [ 'supportsToolCalls' ] = [ ...modelsRef . supportsToolCalls , name ]
166
+ }
167
+ } else {
168
+ // Use explicit feature support object
169
+ if ( featureSupport . json ) {
170
+ modelsRef [ 'supportsJSON' ] = [ ...modelsRef . supportsJSON , name ]
171
+ }
172
+ if ( featureSupport . streaming ) {
173
+ modelsRef [ 'supportsStreaming' ] = [ ...modelsRef . supportsStreaming , name ]
174
+ }
175
+ if ( featureSupport . toolCalls ) {
176
+ modelsRef [ 'supportsToolCalls' ] = [ ...modelsRef . supportsToolCalls , name ]
177
+ }
178
+ if ( featureSupport . images ) {
179
+ modelsRef [ 'supportsImages' ] = [ ...modelsRef . supportsImages , name ]
180
+ }
181
+ }
182
+ ; ( TokenJS . extendedModelList as extendedModelList ) . push ( {
183
+ provider,
184
+ name,
185
+ featureSupport,
186
+ } )
187
+ return this
188
+ }
36
189
}
0 commit comments