-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.example
More file actions
71 lines (63 loc) · 2.45 KB
/
.env.example
File metadata and controls
71 lines (63 loc) · 2.45 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
# raged environment variables
# Copy to .env and adjust values for local development.
# These are NOT loaded by the application — they document available configuration.
# --- Docker Compose Host Ports ---
# Override these if you have port conflicts on the host machine
POSTGRES_HOST_PORT=5432
OLLAMA_HOST_PORT=11434
API_HOST_PORT=8080
SEAWEEDFS_S3_HOST_PORT=8333
SEAWEEDFS_MASTER_HOST_PORT=9333
# --- RAG API ---
PORT=8080
RAGED_API_TOKEN=
# WARNING: Only disable SSRF checks in trusted local development environments
# DISABLE_SSRF_CHECK=true
BODY_LIMIT_BYTES=10485760
# --- Database ---
DATABASE_URL=postgresql://raged:raged@localhost:5432/raged
# Used by docker compose containers (defaults to in-network postgres service)
COMPOSE_DATABASE_URL=postgresql://raged:raged@postgres:5432/raged
POSTGRES_USER=raged
# NOTE: Local development default only. Use a strong password in production.
POSTGRES_PASSWORD=raged
POSTGRES_DB=raged
# --- Ollama ---
OLLAMA_URL=http://localhost:11434
OLLAMA_HOST_PORT=11434
EMBED_PROVIDER=ollama # ollama (default) or openai
EMBED_MODEL=nomic-embed-text
# --- Query Router (LLM Classification) ---
# The router uses a GENERATIVE model — NOT the same as EMBED_MODEL.
# nomic-embed-text cannot generate text; use llama3, mistral, etc.
ROUTER_LLM_ENABLED=true
ROUTER_LLM_MODEL=llama3
# Call timeout in milliseconds (default: 2000)
ROUTER_LLM_TIMEOUT_MS=2000
# Circuit-breaker cooldown after 5 consecutive failures (default: 30000ms)
ROUTER_LLM_CIRCUIT_BREAK_MS=30000
# --- Enrichment (optional) ---
ENRICHMENT_ENABLED=false
# --- Blob Storage (optional, SeaweedFS S3) ---
BLOB_STORE_URL=http://localhost:8333
SEAWEEDFS_S3_HOST_PORT=8333
SEAWEEDFS_MASTER_HOST_PORT=9333
BLOB_STORE_ACCESS_KEY=seaweedfs
BLOB_STORE_SECRET_KEY=seaweedfs-secret
BLOB_STORE_BUCKET=raged-raw
BLOB_STORE_THRESHOLD_BYTES=1048576 # 1MB - files larger than this are stored in blob storage
# NOTE: For production, replace with strong unique credentials.
# --- Worker (Enrichment) ---
WORKER_CONCURRENCY=4
EXTRACTOR_PROVIDER=auto # auto (detects from API keys), ollama, anthropic, or openai
EXTRACTOR_MODEL_FAST=gpt-4.1-mini
EXTRACTOR_MODEL_CAPABLE=gpt-4.1-mini
EXTRACTOR_MODEL_VISION=gpt-4.1-mini
EXTRACTOR_MAX_OUTPUT_TOKENS=16384
OPENAI_BASE_URL=https://api.openai.com/v1
ANTHROPIC_API_KEY=
OPENAI_API_KEY=
OLLAMA_API_KEY=
# Anthropic-specific model overrides (defaults: claude-3-5-haiku/sonnet)
# ANTHROPIC_MODEL_FAST=claude-3-5-haiku-20241022
# ANTHROPIC_MODEL_CAPABLE=claude-3-5-sonnet-20241022