Skip to content

Commit 38544c1

Browse files
authored
Merge pull request #43 from Sinan-Karakaya/main
FEAT: Added support for Bearer token in header for protected endpoints
2 parents d6aaa95 + f7bd142 commit 38544c1

8 files changed

+32
-18
lines changed

package.json

+5
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,11 @@
7676
"description": "Ollama Server Endpoint. Empty for local instance. Example: http://192.168.0.100:11434",
7777
"order": 1
7878
},
79+
"inference.bearerToken": {
80+
"type": "string",
81+
"default": "",
82+
"description": "Auth Bearer token that should be used for secure requests. Leave empty if not desired."
83+
},
7984
"inference.model": {
8085
"type": "string",
8186
"enum": [

src/config.ts

+2
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ class Config {
1515
if (endpoint === '') {
1616
endpoint = 'http://127.0.0.1:11434';
1717
}
18+
let bearerToken = config.get('bearerToken') as string;
1819

1920
// Load general paremeters
2021
let maxLines = config.get('maxLines') as number;
@@ -39,6 +40,7 @@ class Config {
3940

4041
return {
4142
endpoint,
43+
bearerToken,
4244
maxLines,
4345
maxTokens,
4446
temperature,

src/modules/lineGenerator.ts

+10-8
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,16 @@
1-
export async function* lineGenerator(url: string, data: any): AsyncGenerator<string> {
2-
1+
export async function* lineGenerator(url: string, data: any, bearerToken: string): AsyncGenerator<string> {
32
// Request
43
const controller = new AbortController();
54
let res = await fetch(url, {
6-
method: 'POST',
7-
body: JSON.stringify(data),
8-
headers: {
9-
"Content-Type": "application/json",
10-
},
11-
signal: controller.signal
5+
method: 'POST',
6+
body: JSON.stringify(data),
7+
headers: bearerToken ? {
8+
'Content-Type': 'application/json',
9+
Authorization: `Bearer ${bearerToken}`,
10+
} : {
11+
'Content-Type': 'application/json',
12+
},
13+
signal: controller.signal,
1214
});
1315
if (!res.ok || !res.body) {
1416
throw Error('Unable to connect to backend');

src/modules/ollamaCheckModel.ts

+6-3
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,12 @@
11
import { info } from "./log";
22

3-
export async function ollamaCheckModel(endpoint: string, model: string) {
4-
3+
export async function ollamaCheckModel(endpoint: string, model: string, bearerToken: string) {
54
// Check if exists
6-
let res = await fetch(endpoint + '/api/tags');
5+
let res = await fetch(endpoint + '/api/tags', {
6+
headers: bearerToken ? {
7+
Authorization: `Bearer ${bearerToken}`,
8+
} : {},
9+
});
710
if (!res.ok) {
811
info(await res.text());
912
info(endpoint + '/api/tags');

src/modules/ollamaDownloadModel.ts

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,9 @@
11
import { lineGenerator } from "./lineGenerator";
22
import { info } from "./log";
33

4-
export async function ollamaDownloadModel(endpoint: string, model: string) {
4+
export async function ollamaDownloadModel(endpoint: string, model: string, bearerToken: string) {
55
info('Downloading model from ollama: ' + model);
6-
for await (let line of lineGenerator(endpoint + '/api/pull', { name: model })) {
6+
for await (let line of lineGenerator(endpoint + '/api/pull', { name: model }, bearerToken)) {
77
info('[DOWNLOAD] ' + line);
88
}
99
}

src/modules/ollamaTokenGenerator.ts

+2-2
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,8 @@ export type OllamaToken = {
77
done: boolean
88
};
99

10-
export async function* ollamaTokenGenerator(url: string, data: any): AsyncGenerator<OllamaToken> {
11-
for await (let line of lineGenerator(url, data)) {
10+
export async function* ollamaTokenGenerator(url: string, data: any, bearerToken: string): AsyncGenerator<OllamaToken> {
11+
for await (let line of lineGenerator(url, data, bearerToken)) {
1212
info('Receive line: ' + line);
1313
let parsed: OllamaToken;
1414
try {

src/prompts/autocomplete.ts

+2-1
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ import { ModelFormat, adaptPrompt } from './processors/models';
55

66
export async function autocomplete(args: {
77
endpoint: string,
8+
bearerToken: string,
89
model: string,
910
format: ModelFormat,
1011
prefix: string,
@@ -33,7 +34,7 @@ export async function autocomplete(args: {
3334
let res = '';
3435
let totalLines = 1;
3536
let blockStack: ('[' | '(' | '{')[] = [];
36-
outer: for await (let tokens of ollamaTokenGenerator(args.endpoint + '/api/generate', data)) {
37+
outer: for await (let tokens of ollamaTokenGenerator(args.endpoint + '/api/generate', data, args.bearerToken)) {
3738
if (args.canceled && args.canceled()) {
3839
break;
3940
}

src/prompts/provider.ts

+3-2
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
122122
try {
123123

124124
// Check model exists
125-
let modelExists = await ollamaCheckModel(inferenceConfig.endpoint, inferenceConfig.modelName);
125+
let modelExists = await ollamaCheckModel(inferenceConfig.endpoint, inferenceConfig.modelName, inferenceConfig.bearerToken);
126126
if (token.isCancellationRequested) {
127127
info(`Canceled after AI completion.`);
128128
return;
@@ -147,7 +147,7 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
147147

148148
// Perform download
149149
this.update('sync~spin', 'Downloading');
150-
await ollamaDownloadModel(inferenceConfig.endpoint, inferenceConfig.modelName);
150+
await ollamaDownloadModel(inferenceConfig.endpoint, inferenceConfig.modelName, inferenceConfig.bearerToken);
151151
this.update('sync~spin', 'Llama Coder')
152152
}
153153
if (token.isCancellationRequested) {
@@ -161,6 +161,7 @@ export class PromptProvider implements vscode.InlineCompletionItemProvider {
161161
prefix: prepared.prefix,
162162
suffix: prepared.suffix,
163163
endpoint: inferenceConfig.endpoint,
164+
bearerToken: inferenceConfig.bearerToken,
164165
model: inferenceConfig.modelName,
165166
format: inferenceConfig.modelFormat,
166167
maxLines: inferenceConfig.maxLines,

0 commit comments

Comments
 (0)