Skip to content

Commit 9901581

Browse files
author
Sinan Karakaya
committed
feat: added Bearer token support
1 parent 996ac71 commit 9901581

File tree

8 files changed

+35
-19
lines changed

8 files changed

+35
-19
lines changed

package.json

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,11 @@
5858
"description": "Ollama Server Endpoint. Empty for local instance. Example: http://192.168.0.100:11434",
5959
"order": 1
6060
},
61+
"inference.bearerToken": {
62+
"type": "string",
63+
"default": "",
64+
"description": "Auth Bearer token that should be used for secure requests. Leave empty if not desired."
65+
},
6166
"inference.model": {
6267
"type": "string",
6368
"enum": [

src/config.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ class Config {
1515
if (endpoint === '') {
1616
endpoint = 'http://127.0.0.1:11434';
1717
}
18+
let bearerToken = config.get('bearerToken') as string;
1819

1920
// Load general paremeters
2021
let maxLines = config.get('maxLines') as number;
@@ -39,6 +40,7 @@ class Config {
3940

4041
return {
4142
endpoint,
43+
bearerToken,
4244
maxLines,
4345
maxTokens,
4446
temperature,

src/modules/lineGenerator.ts

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,17 @@
1-
export async function* lineGenerator(url: string, data: any): AsyncGenerator<string> {
2-
1+
export async function* lineGenerator(url: string, data: any, authToken: string): AsyncGenerator<string> {
32
// Request
43
const controller = new AbortController();
54
let res = await fetch(url, {
6-
method: 'POST',
7-
body: JSON.stringify(data),
8-
headers: {
9-
"Content-Type": "application/json",
10-
},
11-
signal: controller.signal
12-
});
5+
method: 'POST',
6+
body: JSON.stringify(data),
7+
headers: authToken ? {
8+
'Content-Type': 'application/json',
9+
Authorization: `Bearer ${authToken}`,
10+
} : {
11+
'Content-Type': 'application/json',
12+
},
13+
signal: controller.signal,
14+
})
1315
if (!res.ok || !res.body) {
1416
throw Error('Unable to connect to backend');
1517
}

src/modules/ollamaCheckModel.ts

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,14 @@
11
import { info } from "./log";
22

3-
export async function ollamaCheckModel(endpoint: string, model: string) {
4-
3+
export async function ollamaCheckModel(endpoint: string, model: string, authToken: string) {
54
// Check if exists
6-
let res = await fetch(endpoint + '/api/tags');
5+
let res = await fetch(endpoint + '/api/tags', {
6+
headers: authToken
7+
? {
8+
Authorization: `Bearer ${authToken}`,
9+
}
10+
: {},
11+
});
712
if (!res.ok) {
813
info(await res.text());
914
info(endpoint + '/api/tags');

src/modules/ollamaDownloadModel.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
import { lineGenerator } from "./lineGenerator";
22
import { info } from "./log";
33

4-
export async function ollamaDownloadModel(endpoint: string, model: string) {
4+
export async function ollamaDownloadModel(endpoint: string, model: string, authToken: string) {
55
info('Downloading model from ollama: ' + model);
6-
for await (let line of lineGenerator(endpoint + '/api/pull', { name: model })) {
6+
for await (let line of lineGenerator(endpoint + '/api/pull', { name: model }, authToken)) {
77
info('[DOWNLOAD] ' + line);
88
}
99
}

src/modules/ollamaTokenGenerator.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,8 @@ export type OllamaToken = {
77
done: boolean
88
};
99

10-
export async function* ollamaTokenGenerator(url: string, data: any): AsyncGenerator<OllamaToken> {
11-
for await (let line of lineGenerator(url, data)) {
10+
export async function* ollamaTokenGenerator(url: string, data: any, authToken: string): AsyncGenerator<OllamaToken> {
11+
for await (let line of lineGenerator(url, data, authToken)) {
1212
info('Receive line: ' + line);
1313
let parsed: OllamaToken;
1414
try {

src/prompts/autocomplete.ts

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ import { ModelFormat, adaptPrompt } from './processors/models';
55

66
export async function autocomplete(args: {
77
endpoint: string,
8+
bearerToken: string,
89
model: string,
910
format: ModelFormat,
1011
prefix: string,
@@ -33,7 +34,7 @@ export async function autocomplete(args: {
3334
let res = '';
3435
let totalLines = 1;
3536
let blockStack: ('[' | '(' | '{')[] = [];
36-
outer: for await (let tokens of ollamaTokenGenerator(args.endpoint + '/api/generate', data)) {
37+
outer: for await (let tokens of ollamaTokenGenerator(args.endpoint + '/api/generate', data, args.bearerToken)) {
3738
if (args.canceled && args.canceled()) {
3839
break;
3940
}

src/prompts/provider.ts

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
8686
try {
8787

8888
// Check model exists
89-
let modelExists = await ollamaCheckModel(inferenceConfig.endpoint, inferenceConfig.modelName);
89+
let modelExists = await ollamaCheckModel(inferenceConfig.endpoint, inferenceConfig.modelName, inferenceConfig.bearerToken);
9090
if (token.isCancellationRequested) {
9191
info(`Canceled after AI completion.`);
9292
return;
@@ -111,7 +111,7 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
111111

112112
// Perform download
113113
this.statusbar.text = `$(sync~spin) Downloading`;
114-
await ollamaDownloadModel(inferenceConfig.endpoint, inferenceConfig.modelName);
114+
await ollamaDownloadModel(inferenceConfig.endpoint, inferenceConfig.modelName, inferenceConfig.bearerToken);
115115
this.statusbar.text = `$(sync~spin) Llama Coder`;
116116
}
117117
if (token.isCancellationRequested) {
@@ -125,6 +125,7 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
125125
prefix: prepared.prefix,
126126
suffix: prepared.suffix,
127127
endpoint: inferenceConfig.endpoint,
128+
bearerToken: inferenceConfig.bearerToken,
128129
model: inferenceConfig.modelName,
129130
format: inferenceConfig.modelFormat,
130131
maxLines: inferenceConfig.maxLines,

0 commit comments

Comments
 (0)