quivr
quivr copied to clipboard
[Bug]: Dock compose file errors
What happened?
Docker compose file does not work. There are errors. here is a fixed one.
version: "2.3"
services:
frontend:
image: quivr-frontend-prebuilt
build:
context: frontend
dockerfile: Dockerfile
args:
- NEXT_PUBLIC_ENV=local
- NEXT_PUBLIC_BACKEND_URL=${NEXT_PUBLIC_BACKEND_URL}
- NEXT_PUBLIC_SUPABASE_URL=${NEXT_PUBLIC_SUPABASE_URL}
- NEXT_PUBLIC_SUPABASE_ANON_KEY=${NEXT_PUBLIC_SUPABASE_ANON_KEY}
- NEXT_PUBLIC_CMS_URL=${NEXT_PUBLIC_CMS_URL}
- NEXT_PUBLIC_FRONTEND_URL=${NEXT_PUBLIC_FRONTEND_URL}
container_name: web
depends_on:
- backend-core
restart: always
ports:
- 13000:3000
backend-core:
image: stangirard/quivr-backend-prebuilt:latest
env_file:
- .env
build:
context: backend
dockerfile: Dockerfile
container_name: backend-core
healthcheck:
test: [ "CMD", "curl", "http://localhost:5050/healthz" ]
command:
- "uvicorn"
- "main:app"
- "--reload"
- "--host"
- "0.0.0.0"
- "--port"
- "5050"
- "--workers"
- "1"
restart: always
volumes:
- ./backend/:/code/
depends_on:
db:
condition: service_healthy
kong:
condition: service_healthy
ports:
- 15050:5050
redis:
image: redis:latest@sha256:a7cee7c8178ff9b5297cb109e6240f5072cdaaafd775ce6b586c3c704b06458e
container_name: quivr-redis
restart: always
depends_on:
db:
condition: service_healthy
worker:
image: stangirard/quivr-backend-prebuilt:latest
env_file:
- .env
build:
context: backend
dockerfile: Dockerfile
container_name: worker
command: celery -A celery_worker worker -l info
restart: always
depends_on:
- redis
- db
beat:
image: stangirard/quivr-backend-prebuilt:latest
env_file:
- .env
build:
context: backend
dockerfile: Dockerfile
container_name: beat
command: celery -A celery_worker beat -l info
restart: always
depends_on:
- redis
flower:
image: stangirard/quivr-backend-prebuilt:latest
env_file:
- .env
build:
context: backend
dockerfile: Dockerfile
container_name: flower
command: celery -A celery_worker flower -l info --port=5555
restart: always
depends_on:
- redis
- worker
- beat
ports:
- 5555:5555
studio:
container_name: supabase-studio
image: supabase/studio:20231123-64a766a@sha256:393669f03b739fa5f196a5a0254442d224c439acff3ebe738cb27847d5fc207d
restart: unless-stopped
healthcheck:
test:
[
"CMD",
"node",
"-e",
"require('http').get('http://localhost:3000/api/profile', (r) => {if (r.statusCode !== 200) throw new Error(r.statusCode)})"
]
timeout: 5s
interval: 5s
retries: 3
depends_on:
analytics:
condition: service_healthy
environment:
STUDIO_PG_META_URL: http://meta:8080
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
DEFAULT_ORGANIZATION_NAME: ${STUDIO_DEFAULT_ORGANIZATION}
DEFAULT_PROJECT_NAME: ${STUDIO_DEFAULT_PROJECT}
SUPABASE_URL: http://kong:8000
SUPABASE_PUBLIC_URL: ${SUPABASE_PUBLIC_URL}
SUPABASE_ANON_KEY: ${ANON_KEY}
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
LOGFLARE_URL: http://analytics:4000
NEXT_PUBLIC_ENABLE_LOGS: "true"
# Comment to use Big Query backend for analytics
NEXT_ANALYTICS_BACKEND_PROVIDER: postgres
# Uncomment to use Big Query backend for analytics
# NEXT_ANALYTICS_BACKEND_PROVIDER: bigquery
kong:
container_name: supabase-kong
image: kong:2.8.1@sha256:1b53405d8680a09d6f44494b7990bf7da2ea43f84a258c59717d4539abf09f6d
restart: unless-stopped
# https://unix.stackexchange.com/a/294837
entrypoint: bash -c 'eval "echo \"$$(cat ~/temp.yml)\"" > ~/kong.yml && /docker-entrypoint.sh kong docker-start'
ports:
- ${KONG_HTTP_PORT}:8000/tcp
- ${KONG_HTTPS_PORT}:8443/tcp
depends_on:
analytics:
condition: service_healthy
environment:
KONG_DATABASE: "off"
KONG_DECLARATIVE_CONFIG: /home/kong/kong.yml
# https://github.com/supabase/cli/issues/14
KONG_DNS_ORDER: LAST,A,CNAME
KONG_PLUGINS: request-transformer,cors,key-auth,acl,basic-auth
KONG_NGINX_PROXY_PROXY_BUFFER_SIZE: 160k
KONG_NGINX_PROXY_PROXY_BUFFERS: 64 160k
SUPABASE_ANON_KEY: ${ANON_KEY}
SUPABASE_SERVICE_KEY: ${SERVICE_ROLE_KEY}
DASHBOARD_USERNAME: ${DASHBOARD_USERNAME}
DASHBOARD_PASSWORD: ${DASHBOARD_PASSWORD}
volumes:
# https://github.com/supabase/supabase/issues/12661
- ./volumes/api/kong.yml:/home/kong/temp.yml:ro
auth:
container_name: supabase-auth
image: supabase/gotrue:v2.99.0@sha256:b503f1fac70544bb5a43d4507e6a0842dc119e4937647b1e7ade34eae7dcdbf0
depends_on:
db:
# Disable this if you are using an external Postgres database
condition: service_healthy
analytics:
condition: service_healthy
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:9999/health"
]
timeout: 5s
interval: 5s
retries: 3
restart: unless-stopped
environment:
GOTRUE_API_HOST: 0.0.0.0
GOTRUE_API_PORT: 9999
API_EXTERNAL_URL: ${API_EXTERNAL_URL}
GOTRUE_DB_DRIVER: postgres
GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
GOTRUE_SITE_URL: ${SITE_URL}
GOTRUE_URI_ALLOW_LIST: ${ADDITIONAL_REDIRECT_URLS}
GOTRUE_DISABLE_SIGNUP: ${DISABLE_SIGNUP}
GOTRUE_JWT_ADMIN_ROLES: service_role
GOTRUE_JWT_AUD: authenticated
GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated
GOTRUE_JWT_EXP: ${JWT_EXPIRY}
GOTRUE_JWT_SECRET: ${JWT_SECRET}
GOTRUE_EXTERNAL_EMAIL_ENABLED: ${ENABLE_EMAIL_SIGNUP}
GOTRUE_MAILER_AUTOCONFIRM: ${ENABLE_EMAIL_AUTOCONFIRM}
# GOTRUE_MAILER_SECURE_EMAIL_CHANGE_ENABLED: true
# GOTRUE_SMTP_MAX_FREQUENCY: 1s
GOTRUE_SMTP_ADMIN_EMAIL: ${SMTP_ADMIN_EMAIL}
GOTRUE_SMTP_HOST: ${SMTP_HOST}
GOTRUE_SMTP_PORT: ${SMTP_PORT}
GOTRUE_SMTP_USER: ${SMTP_USER}
GOTRUE_SMTP_PASS: ${SMTP_PASS}
GOTRUE_SMTP_SENDER_NAME: ${SMTP_SENDER_NAME}
GOTRUE_MAILER_URLPATHS_INVITE: ${MAILER_URLPATHS_INVITE}
GOTRUE_MAILER_URLPATHS_CONFIRMATION: ${MAILER_URLPATHS_CONFIRMATION}
GOTRUE_MAILER_URLPATHS_RECOVERY: ${MAILER_URLPATHS_RECOVERY}
GOTRUE_MAILER_URLPATHS_EMAIL_CHANGE: ${MAILER_URLPATHS_EMAIL_CHANGE}
GOTRUE_EXTERNAL_PHONE_ENABLED: ${ENABLE_PHONE_SIGNUP}
GOTRUE_SMS_AUTOCONFIRM: ${ENABLE_PHONE_AUTOCONFIRM}
rest:
container_name: supabase-rest
image: postgrest/postgrest:v11.2.2@sha256:23b2dabfc7f3f1a6c11b71f2ce277191659da38ab76042a30c3c8d6c4e07b446
depends_on:
db:
# Disable this if you are using an external Postgres database
condition: service_healthy
analytics:
condition: service_healthy
restart: unless-stopped
environment:
PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
PGRST_DB_SCHEMAS: ${PGRST_DB_SCHEMAS}
PGRST_DB_ANON_ROLE: anon
PGRST_JWT_SECRET: ${JWT_SECRET}
PGRST_DB_USE_LEGACY_GUCS: "false"
PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET}
PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXPIRY}
command: "postgrest"
realtime:
# This container name looks inconsistent but is correct because realtime constructs tenant id by parsing the subdomain
container_name: realtime-dev.supabase-realtime
image: supabase/realtime:v2.25.35@sha256:634a59e298fbd012523fbc874f74a18f2ae8523b11c646a2b4052b9b943c2939
depends_on:
db:
# Disable this if you are using an external Postgres database
condition: service_healthy
analytics:
condition: service_healthy
healthcheck:
test:
[
"CMD",
"bash",
"-c",
"printf \\0 > /dev/tcp/localhost/4000"
]
timeout: 5s
interval: 5s
retries: 3
restart: unless-stopped
environment:
PORT: 4000
DB_HOST: ${POSTGRES_HOST}
DB_PORT: ${POSTGRES_PORT}
DB_USER: supabase_admin
DB_PASSWORD: ${POSTGRES_PASSWORD}
DB_NAME: ${POSTGRES_DB}
DB_AFTER_CONNECT_QUERY: 'SET search_path TO _realtime'
DB_ENC_KEY: supabaserealtime
API_JWT_SECRET: ${JWT_SECRET}
FLY_ALLOC_ID: fly123
FLY_APP_NAME: realtime
SECRET_KEY_BASE: UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
ERL_AFLAGS: -proto_dist inet_tcp
ENABLE_TAILSCALE: "false"
DNS_NODES: "''"
command: >
sh -c "/app/bin/migrate && /app/bin/realtime eval 'Realtime.Release.seeds(Realtime.Repo)' && /app/bin/server"
storage:
container_name: supabase-storage
image: supabase/storage-api:v0.43.11@sha256:2cd146f1af313019f0d03e18383e46578e7cb118bc5baa1fb7551ab56c0cb60e
depends_on:
db:
# Disable this if you are using an external Postgres database
condition: service_healthy
rest:
condition: service_started
imgproxy:
condition: service_started
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://localhost:5000/status"
]
timeout: 5s
interval: 5s
retries: 3
restart: unless-stopped
environment:
ANON_KEY: ${ANON_KEY}
SERVICE_KEY: ${SERVICE_ROLE_KEY}
POSTGREST_URL: http://rest:3000
PGRST_JWT_SECRET: ${JWT_SECRET}
DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
FILE_SIZE_LIMIT: 52428800
STORAGE_BACKEND: file
FILE_STORAGE_BACKEND_PATH: /var/lib/storage
TENANT_ID: stub
# TODO: https://github.com/supabase/storage-api/issues/55
REGION: stub
GLOBAL_S3_BUCKET: stub
ENABLE_IMAGE_TRANSFORMATION: "true"
IMGPROXY_URL: http://imgproxy:5001
volumes:
- ./volumes/storage:/var/lib/storage:z
imgproxy:
container_name: supabase-imgproxy
image: darthsim/imgproxy:v3.8.0@sha256:0facd355d50f3be665ebe674486f2b2e9cdaebd3f74404acd9b7fece2f661435
healthcheck:
test: [ "CMD", "imgproxy", "health" ]
timeout: 5s
interval: 5s
retries: 3
environment:
IMGPROXY_BIND: ":5001"
IMGPROXY_LOCAL_FILESYSTEM_ROOT: /
IMGPROXY_USE_ETAG: "true"
IMGPROXY_ENABLE_WEBP_DETECTION: ${IMGPROXY_ENABLE_WEBP_DETECTION}
volumes:
- ./volumes/storage:/var/lib/storage:z
meta:
container_name: supabase-meta
image: supabase/postgres-meta:v0.68.0@sha256:31a107dcfe9257792b49f560a5527d5fbd7128b986acad5431b269bac4d17f12
depends_on:
db:
# Disable this if you are using an external Postgres database
condition: service_healthy
analytics:
condition: service_healthy
restart: unless-stopped
environment:
PG_META_PORT: 8080
PG_META_DB_HOST: ${POSTGRES_HOST}
PG_META_DB_PORT: ${POSTGRES_PORT}
PG_META_DB_NAME: ${POSTGRES_DB}
PG_META_DB_USER: supabase_admin
PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD}
functions:
container_name: supabase-edge-functions
image: supabase/edge-runtime:v1.22.4@sha256:4e02aacd0c8fa7ab103d718e4a0ece11d7d8a9446299f523d8ac5d092fec4908
restart: unless-stopped
depends_on:
analytics:
condition: service_healthy
environment:
JWT_SECRET: ${JWT_SECRET}
SUPABASE_URL: http://kong:8000
SUPABASE_ANON_KEY: ${ANON_KEY}
SUPABASE_SERVICE_ROLE_KEY: ${SERVICE_ROLE_KEY}
SUPABASE_DB_URL: postgresql://postgres:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
# TODO: Allow configuring VERIFY_JWT per function. This PR might help: https://github.com/supabase/cli/pull/786
VERIFY_JWT: "${FUNCTIONS_VERIFY_JWT}"
volumes:
- ./volumes/functions:/home/deno/functions:Z
command:
- start
- --main-service
- /home/deno/functions/main
analytics:
container_name: supabase-analytics
image: supabase/logflare:1.4.0@sha256:e693c787ffe1ae17b6e4e920a3cdd212416d3e1f97e1bd7cb5b67de0abbb0264
healthcheck:
test: [ "CMD", "curl", "http://localhost:4000/health" ]
timeout: 5s
interval: 5s
retries: 10
restart: unless-stopped
depends_on:
db:
# Disable this if you are using an external Postgres database
condition: service_healthy
# Uncomment to use Big Query backend for analytics
# volumes:
# - type: bind
# source: ${PWD}/gcloud.json
# target: /opt/app/rel/logflare/bin/gcloud.json
# read_only: true
environment:
LOGFLARE_NODE_HOST: 127.0.0.1
DB_USERNAME: supabase_admin
DB_DATABASE: ${POSTGRES_DB}
DB_HOSTNAME: ${POSTGRES_HOST}
DB_PORT: ${POSTGRES_PORT}
DB_PASSWORD: ${POSTGRES_PASSWORD}
DB_SCHEMA: _analytics
LOGFLARE_API_KEY: ${LOGFLARE_API_KEY}
LOGFLARE_SINGLE_TENANT: "true"
LOGFLARE_SUPABASE_MODE: "true"
LOGFLARE_MIN_CLUSTER_SIZE: 1
RELEASE_COOKIE: cookie
# Comment variables to use Big Query backend for analytics
POSTGRES_BACKEND_URL: postgresql://supabase_admin:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/${POSTGRES_DB}
POSTGRES_BACKEND_SCHEMA: _analytics
LOGFLARE_FEATURE_FLAG_OVERRIDE: multibackend=true
# Uncomment to use Big Query backend for analytics
# GOOGLE_PROJECT_ID: ${GOOGLE_PROJECT_ID}
# GOOGLE_PROJECT_NUMBER: ${GOOGLE_PROJECT_NUMBER}
ports:
- 4000:4000
entrypoint: |
sh -c `cat <<'EOF' > run.sh && sh run.sh
./logflare eval Logflare.Release.migrate
./logflare start --sname logflare
EOF
`
# Comment out everything below this point if you are using an external Postgres database
db:
container_name: supabase-db
image: supabase/postgres:15.1.0.136@sha256:fb8387fddbb6dd2fc8af512bfb721a02786e0d71ded364ed66d096574e70d81e
healthcheck:
test: pg_isready -U postgres -h localhost
interval: 5s
timeout: 5s
retries: 10
depends_on:
vector:
condition: service_healthy
command:
- postgres
- -c
- config_file=/etc/postgresql/postgresql.conf
- -c
- log_min_messages=fatal # prevents Realtime polling queries from appearing in logs
restart: unless-stopped
ports:
# Pass down internal port because it's set dynamically by other services
- ${POSTGRES_PORT}:${POSTGRES_PORT}
environment:
POSTGRES_HOST: /var/run/postgresql
PGPORT: ${POSTGRES_PORT}
POSTGRES_PORT: ${POSTGRES_PORT}
PGPASSWORD: ${POSTGRES_PASSWORD}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
PGDATABASE: ${POSTGRES_DB}
POSTGRES_DB: ${POSTGRES_DB}
JWT_SECRET: ${JWT_SECRET}
JWT_EXP: ${JWT_EXPIRY}
volumes:
- ./volumes/db/realtime.sql:/docker-entrypoint-initdb.d/migrations/99-realtime.sql:Z
# Must be superuser to create event trigger
- ./volumes/db/webhooks.sql:/docker-entrypoint-initdb.d/init-scripts/98-webhooks.sql:Z
# Must be superuser to alter reserved role
- ./volumes/db/roles.sql:/docker-entrypoint-initdb.d/init-scripts/99-roles.sql:Z
# Initialize the database settings with JWT_SECRET and JWT_EXP
- ./volumes/db/jwt.sql:/docker-entrypoint-initdb.d/init-scripts/99-jwt.sql:Z
# PGDATA directory is persisted between restarts
- ./volumes/db/data:/var/lib/postgresql/data:Z
# Changes required for Analytics support
- ./volumes/db/logs.sql:/docker-entrypoint-initdb.d/migrations/99-logs.sql:Z
- ./scripts/tables.sql:/docker-entrypoint-initdb.d/seed.sql
vector:
container_name: supabase-vector
image: timberio/vector:0.28.1-alpine@sha256:4bc04aca94a44f04b427a490f346e7397ef7ce61fe589d718f744f7d92cb5c80
healthcheck:
test:
[
"CMD",
"wget",
"--no-verbose",
"--tries=1",
"--spider",
"http://vector:9001/health"
]
timeout: 5s
interval: 5s
retries: 3
volumes:
- ./volumes/logs/vector.yml:/etc/vector/vector.yml:ro
- ${DOCKER_SOCKET_LOCATION}:/var/run/docker.sock:ro
command: [ "--config", "etc/vector/vector.yml" ]
🤖
Hey @vontainment! 👋 I'm Dosu, a bot here to help out with bugs, answer queries, or assist with contributions while the human maintainers are away. How's your day going?
Based on the Docker Compose file you've provided, it's difficult to pinpoint the exact issue without knowing the specific error message you're encountering. However, I can provide some general advice based on similar issues found in the Quivr repository.
-
Ensure your network connection is stable and your Docker container can access the internet. If you're behind a proxy, configure Docker to use the proxy settings. This was a suggested solution in the Issue with yarn.lock.
-
Check the line endings in your files. If they're in CRLF format, convert them to LF. This was the solution provided in the start.sh script file issue.
-
Make sure all your environment variables are correctly set. The Docker Compose file references several environment variables, and if any of these are not set correctly, it could cause issues. This was a point raised in the issue about missing environment variables.
If these suggestions don't resolve your issue, could you please provide the exact error message you're receiving when trying to run the Docker Compose file? This will help in diagnosing the problem more accurately.
Sources
- [Bug]: Issue with yarn.lock
- [Issue] start.sh script file
- When using docker compose I get " either NEXT_PUBLIC_SUPABASE_URL and NEXT_PUBLIC_SUPABASE_ANON_KEY env variables or supabaseUrl and supabaseKey are required!"
- docker-compose-ollama.yml
This response is meant to be useful and save you time. It is not meant to be a precise solution, but rather a starting point for your own research.
Help me be more useful! Please leave a 👍 if this is helpful and 👎 if it is irrelevant.
If you want to continue the conversation, start your reply with @dosu-bot.
docker-compose-ollama.yml does not exist anymore fyi
Thanks for your contributions, we'll be closing this issue as it has gone stale. Feel free to reopen if you'd like to continue the discussion.