Skip to content

Commit 64e6fb2

Browse files
feat: add analytics integration
1 parent 447ba59 commit 64e6fb2

File tree

5 files changed

+111
-7
lines changed

5 files changed

+111
-7
lines changed

package-lock.json

+52-2
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

package.json

+1
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@
5555
"@playwright/test": "1.52.0-alpha-1743011787000",
5656
"dotenv": "^16.4.7",
5757
"playwright": "1.52.0-alpha-1743011787000",
58+
"posthog-node": "^4.11.3",
5859
"yaml": "^2.7.1",
5960
"zod": "^3.24.2"
6061
},

src/analytics.ts

+36
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
import { PostHog } from 'posthog-node';
2+
import { analyticsProvider, analyticsModel } from './llm';
3+
4+
const ANALYTICS_OPT_IN = process.env.ANALYTICS_OPT_IN !== 'false'; // defaults to true
5+
6+
let client: PostHog | null = null;
7+
8+
if (ANALYTICS_OPT_IN)
9+
{
10+
client = new PostHog(
11+
'phc_4pwxr91oy6WYPfaD13ClVreSbT7F7ClJcAEyBpTQCOl',
12+
{
13+
host: 'https://us.i.posthog.com',
14+
flushAt: 1, // Important for serverless environments
15+
flushInterval: 0 // Important for serverless environments
16+
}
17+
);
18+
}
19+
20+
export async function captureAutoCall() {
21+
if (!client) return;
22+
23+
await client.capture({
24+
distinctId: '120836',
25+
event: 'auto_called',
26+
properties: {
27+
llm_provider: analyticsProvider,
28+
llm_model: analyticsModel
29+
}
30+
});
31+
}
32+
33+
export async function shutdown() {
34+
if (!client) return;
35+
await client.shutdown();
36+
}

src/auto.ts

+8
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ import { test as base } from '@playwright/test';
22
import { z } from 'zod';
33
import { AutoConfig } from './types';
44
import { sessionManager, context } from './browser';
5+
import { captureAutoCall, shutdown } from './analytics';
56
import { createReactAgent } from '@langchain/langgraph/prebuilt';
67
import { HumanMessage } from '@langchain/core/messages';
78
import { createLLMModel } from './llm';
@@ -317,6 +318,7 @@ export async function auto(
317318
config?: AutoConfig
318319
): Promise<any> {
319320
console.log(`[Auto] Processing instruction: "${instruction}"`);
321+
await captureAutoCall();
320322

321323
if (config?.page)
322324
{
@@ -365,10 +367,16 @@ export async function auto(
365367
} catch (error)
366368
{
367369
console.log(`[Auto] Error processing response:`, error);
370+
368371
throw error;
369372
}
370373
}
371374

375+
// Ensure analytics are flushed before the process exits
376+
process.on('beforeExit', async () => {
377+
await shutdown();
378+
});
379+
372380
// Export everything needed for the package
373381
export { sessionManager } from './browser';
374382
export * from './types';

src/llm.ts

+14-5
Original file line numberDiff line numberDiff line change
@@ -8,17 +8,23 @@ import dotenv from 'dotenv';
88
// Load environment variables
99
dotenv.config();
1010

11+
// Export analytics-specific variables
12+
export const analyticsProvider = process.env.LLM_PROVIDER || 'openai';
13+
export const analyticsModel = process.env.AUTOBROWSE_LLM_MODEL || 'gpt-4o-mini';
14+
1115
export function createLLMModel() {
1216
const provider = process.env.LLM_PROVIDER || 'openai';
1317
const model = process.env.AUTOBROWSE_LLM_MODEL || 'gpt-4o-mini';
1418

15-
if (provider === 'google') {
19+
if (provider === 'google')
20+
{
1621
return new ChatGoogleGenerativeAI({
1722
model: model
1823
});
1924
}
2025

21-
if (provider === 'azure') {
26+
if (provider === 'azure')
27+
{
2228
return new AzureChatOpenAI({
2329
temperature: 0,
2430
maxRetries: 2,
@@ -30,21 +36,24 @@ export function createLLMModel() {
3036
});
3137
}
3238

33-
if (provider === 'anthropic') {
39+
if (provider === 'anthropic')
40+
{
3441
return new ChatAnthropic({
3542
model: model,
3643
temperature: 0,
3744
anthropicApiKey: process.env.ANTHROPIC_API_KEY
3845
});
3946
}
4047

41-
if (provider === 'vertex') {
48+
if (provider === 'vertex')
49+
{
4250
return new ChatVertexAI({
4351
model: model,
4452
temperature: 0
4553
});
4654
}
47-
if (provider === 'ollama') {
55+
if (provider === 'ollama')
56+
{
4857
return new ChatOllama({
4958
model: model,
5059
temperature: 0,

0 commit comments

Comments
 (0)