forked from neo4j-labs/llm-graph-builder
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathdocker-compose.yml
95 lines (93 loc) · 3.5 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
services:
backend:
build:
context: ./backend
dockerfile: Dockerfile
volumes:
- ./backend:/app
environment:
- NEO4J_URI=${NEO4J_URI-neo4j://database:7687}
- NEO4J_PASSWORD=${NEO4J_PASSWORD-password}
- NEO4J_USERNAME=${NEO4J_USERNAME-neo4j}
- OPENAI_API_KEY=${OPENAI_API_KEY-}
- DIFFBOT_API_KEY=${DIFFBOT_API_KEY-}
- EMBEDDING_MODEL=${EMBEDDING_MODEL-all-MiniLM-L6-v2}
- LANGCHAIN_ENDPOINT=${LANGCHAIN_ENDPOINT-}
- LANGCHAIN_TRACING_V2=${LANGCHAIN_TRACING_V2-}
- LANGCHAIN_PROJECT=${LANGCHAIN_PROJECT-}
- LANGCHAIN_API_KEY=${LANGCHAIN_API_KEY-}
- KNN_MIN_SCORE=${KNN_MIN_SCORE-0.94}
- IS_EMBEDDING=${IS_EMBEDDING-true}
- GEMINI_ENABLED=${GEMINI_ENABLED-False}
- GCP_LOG_METRICS_ENABLED=${GCP_LOG_METRICS_ENABLED-False}
- UPDATE_GRAPH_CHUNKS_PROCESSED=${UPDATE_GRAPH_CHUNKS_PROCESSED-20}
- NUMBER_OF_CHUNKS_TO_COMBINE=${NUMBER_OF_CHUNKS_TO_COMBINE-6}
- ENTITY_EMBEDDING=${ENTITY_EMBEDDING-False}
- GCS_FILE_CACHE=${GCS_FILE_CACHE-False}
# - LLM_MODEL_CONFIG_anthropic_claude_35_sonnet=${LLM_MODEL_CONFIG_anthropic_claude_35_sonnet-}
# - LLM_MODEL_CONFIG_fireworks_llama_v3_70b=${LLM_MODEL_CONFIG_fireworks_llama_v3_70b-}
# - LLM_MODEL_CONFIG_azure_ai_gpt_4o=${LLM_MODEL_CONFIG_azure_ai_gpt_4o-}
# - LLM_MODEL_CONFIG_azure_ai_gpt_35=${LLM_MODEL_CONFIG_azure_ai_gpt_35-}
# - LLM_MODEL_CONFIG_groq_llama3_70b=${LLM_MODEL_CONFIG_groq_llama3_70b-}
# - LLM_MODEL_CONFIG_bedrock_claude_3_5_sonnet=${LLM_MODEL_CONFIG_bedrock_claude_3_5_sonnet-}
# - LLM_MODEL_CONFIG_fireworks_qwen_72b=${LLM_MODEL_CONFIG_fireworks_qwen_72b-}
- LLM_MODEL_CONFIG_ollama_llama3=${LLM_MODEL_CONFIG_ollama_llama3-}
container_name: backend
extra_hosts:
- host.docker.internal:host-gateway
ports:
- "8000:8000"
networks:
- net
frontend:
depends_on:
- backend
build:
context: ./frontend
dockerfile: Dockerfile
args:
- BACKEND_API_URL=${BACKEND_API_URL-http://localhost:8000}
- REACT_APP_SOURCES=${REACT_APP_SOURCES-local,youtube,wiki,s3}
- LLM_MODELS=${LLM_MODELS-diffbot,openai-gpt-3.5,openai-gpt-4o}
- GOOGLE_CLIENT_ID=${GOOGLE_CLIENT_ID-""}
- BLOOM_URL=${BLOOM_URL-https://workspace-preview.neo4j.io/workspace/explore?connectURL={CONNECT_URL}&search=Show+me+a+graph&featureGenAISuggestions=true&featureGenAISuggestionsInternal=true}
- TIME_PER_CHUNK=${TIME_PER_CHUNK-4}
- TIME_PER_PAGE=${TIME_PER_PAGE-50}
- CHUNK_SIZE=${CHUNK_SIZE-5242880}
- ENV=${ENV-DEV}
- CHAT_MODES=${CHAT_MODES-""}
volumes:
- ./frontend:/app
- /app/node_modules
container_name: frontend
ports:
- "8080:8080"
networks:
- net
database:
restart: always
image: neo4j:5.21.0
ports:
- "7474:7474"
- "7687:7687"
volumes:
- graphragneo4jdata:/data
- graphragneo4jplugins:/plugins
environment:
NEO4J_AUTH: neo4j/password
NEO4J_apoc_export_file_enabled: true
NEO4J_apoc_import_file_enabled: true
NEO4J_apoc_import_file_use__neo4j__config: true
NEO4J_PLUGINS: '["apoc"]'
NEO4J_dbms_security_procedures_unrestricted: apoc.*
healthcheck:
test: ["CMD-SHELL", "neo4j status"]
# test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://neo4j:7474 || exit 1"]
interval: 8s
timeout: 5s
retries: 10
volumes:
graphragneo4jdata:
graphragneo4jplugins:
networks:
net: