Skip to content

Instantly share code, notes, and snippets.

@drmikecrowe
Created January 14, 2026 13:35
Show Gist options
  • Select an option

  • Save drmikecrowe/65d23048fe4332ffe0714dde0daae534 to your computer and use it in GitHub Desktop.

Select an option

Save drmikecrowe/65d23048fe4332ffe0714dde0daae534 to your computer and use it in GitHub Desktop.
NAS Docker Compose files
volumes:
hf_cache:
asr_cache:
services:
# === vLLM (OpenAI-compatible server for Cursor) ===
vllm:
image: vllm/vllm-openai:latest
container_name: vllm
restart: unless-stopped
environment:
# nginx-proxy routing (use your wildcard/SAN cert name)
VIRTUAL_HOST: llm.drmikecrowe.net,llm.local
VIRTUAL_PORT: "8000"
CERT_NAME: drmikecrowe.net
# No host port exposure; TLS terminates at jwilder/nginx-proxy
command: >
--model Qwen/Qwen2.5-Coder-7B-Instruct-AWQ
--served-model-name coder7b
--max-model-len 16384
--kv-cache-dtype fp8
--gpu-memory-utilization 0.90
--api-key ${VLLM_API_KEY}
gpus: all
volumes:
- hf_cache:/root/.cache/huggingface
# Optional: 14B “analyst” sidecar (uncomment to enable)
# vllm14:
# image: vllm/vllm-openai:latest
# container_name: vllm14
# restart: unless-stopped
# environment:
# VIRTUAL_HOST: llm14.drmikecrowe.net,llm14.local
# VIRTUAL_PORT: "8000"
# CERT_NAME: drmikecrowe.net
# command: >
# --model Qwen/Qwen2.5-Coder-14B-Instruct-AWQ
# --served-model-name coder14b
# --max-model-len 12000
# --kv-cache-dtype fp8
# --gpu-memory-utilization 0.92
# --api-key ${VLLM_API_KEY}
# gpus: all
# volumes:
# - hf_cache:/root/.cache/huggingface
# === Faster-Whisper (OpenAI-style /v1/audio/transcriptions) ===
asr:
container_name: asr
restart: unless-stopped
environment:
VIRTUAL_HOST: asr.drmikecrowe.net,asr.local
VIRTUAL_PORT: "8000" # container listens on 8000
CERT_NAME: drmikecrowe.net
SERVER__API_KEY: ${ASR_API_KEY}
image: fedirz/faster-whisper-server:latest-cuda
build:
dockerfile: Dockerfile.cuda
context: .
platforms:
- linux/amd64
ports:
- 8000:8000
volumes:
- hf_cache:/root/.cache/huggingface
deploy:
resources:
reservations:
devices:
- capabilities: ["gpu"]
# If you have CDI feature enabled use the following instead
# https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/cdi-support.html
# https://docs.docker.com/reference/cli/dockerd/#enable-cdi-devices
# - driver: cdi
# device_ids:
# - nvidia.com/gpu=all
networks:
proxy:
name: proxy
external: true
volumes:
hf_cache:
asr_cache:
services:
homeassistant:
container_name: homeassistant
image: "ghcr.io/home-assistant/home-assistant:stable"
volumes:
- ${DOCKERDIR}/home-assistant:/config
- /etc/localtime:/etc/localtime:ro
- /run/dbus:/run/dbus:ro
restart: unless-stopped
privileged: true
ports:
- "8123:8123"
networks:
- proxy
env_file: .env
environment:
TZ: ${TZ}
UMASK_SET: "022"
VIRTUAL_HOST: ha.drmikecrowe.net,ha.local
VIRTUAL_PORT: 8123
CERT_NAME: drmikecrowe.net
networks:
proxy:
name: proxy
external: true
backend:
name: backend
external: true
services:
immich-server:
container_name: immich_server
image: ghcr.io/immich-app/immich-server:${IMMICH_VERSION:-release}
env_file: .env
environment:
# Proxy settings
VIRTUAL_HOST: photos.drmikecrowe.net,photos.local
VIRTUAL_PORT: 2283
CERT_NAME: drmikecrowe.net
# Immich settings
DB_PASSWORD: ${IMMICH_DB_PASSWORD}
DB_USERNAME: ${IMMICH_DB_USERNAME}
DB_DATABASE_NAME: ${IMMICH_DB_DATABASE_NAME}
DB_HOSTNAME: immich_postgres # Service name for the postgres container
REDIS_HOSTNAME: immich_redis # Service name for the redis container
UPLOAD_LOCATION: /data # Path inside the container
TZ: ${TZ}
volumes:
- ${IMMICH_UPLOAD_LOCATION}:/data
- /etc/localtime:/etc/localtime:ro
networks:
- proxy
- backend
depends_on:
- immich_redis
- immich_postgres
restart: always
immich-machine-learning:
container_name: immich_machine_learning
image: ghcr.io/immich-app/immich-machine-learning:${IMMICH_VERSION:-release}
env_file: .env
environment:
# Immich settings
DB_PASSWORD: ${IMMICH_DB_PASSWORD}
DB_USERNAME: ${IMMICH_DB_USERNAME}
DB_DATABASE_NAME: ${IMMICH_DB_DATABASE_NAME}
DB_HOSTNAME: immich_postgres # Service name for the postgres container
REDIS_HOSTNAME: immich_redis # Service name for the redis container
TZ: ${TZ}
volumes:
- ${IMMICH_MODEL_CACHE_LOCATION}:/cache
networks:
- backend
restart: always
immich_redis:
container_name: immich_redis
image: docker.io/valkey/valkey:9@sha256:fb8d272e529ea567b9bf1302245796f21a2672b8368ca3fcb938ac334e613c8f
networks:
- backend
restart: always
immich_postgres:
container_name: immich_postgres
image: ghcr.io/immich-app/postgres:14-vectorchord0.4.3-pgvectors0.2.0@sha256:bcf63357191b76a916ae5eb93464d65c07511da41e3bf7a8416db519b40b1c23
environment:
POSTGRES_PASSWORD: ${IMMICH_DB_PASSWORD}
POSTGRES_USER: ${IMMICH_DB_USERNAME}
POSTGRES_DB: ${IMMICH_DB_DATABASE_NAME}
POSTGRES_INITDB_ARGS: "--data-checksums"
volumes:
- ${IMMICH_DB_DATA_LOCATION}:/var/lib/postgresql/data
networks:
- backend
restart: always
# Reuse your .env (paths/ports already defined there)
# env_file: .env # each service references it directly
networks:
proxy:
name: proxy
external: true
backend:
name: backend
external: true
services:
prowlarr:
image: lscr.io/linuxserver/prowlarr:latest
container_name: prowlarr
restart: on-failure
env_file: .env
environment:
TZ: ${TZ}
VIRTUAL_HOST: prowlarr.drmikecrowe.net,prowlarr.local
VIRTUAL_PATH: /
VIRTUAL_PORT: 9696
CERT_NAME: drmikecrowe.net
volumes:
- ${DOCKERDIR}/prowlarr:/config
- ${DOWNLOADDIR}/downloads/blackhole:/blackhole
networks: [proxy]
# If you want direct LAN access too, uncomment:
# ports: ["${PROWLARR_PORT}:9696"]
premiumizer:
image: plaquette/premiumizer:pi
container_name: premiumizer
restart: unless-stopped
env_file: .env
environment:
TZ: ${TZ}
VIRTUAL_HOST: premium.drmikecrowe.net,premium.local
VIRTUAL_PATH: /
VIRTUAL_PORT: 5000
CERT_NAME: drmikecrowe.net
volumes:
- ${DOCKERDIR}/premiumizer2:/conf
- ${DOWNLOADDIR}/downloads:/downloads
- ${DOWNLOADDIR}/downloads/blackhole:/blackhole
networks: [proxy]
# ports: ["${PREMIUM_PORT}:5000"]
radarr:
image: lscr.io/linuxserver/radarr:latest
container_name: radarr
restart: on-failure
env_file: .env
environment:
TZ: ${TZ}
VIRTUAL_HOST: movies.drmikecrowe.net,movies.local
VIRTUAL_PATH: /
VIRTUAL_PORT: 7878
CERT_NAME: drmikecrowe.net
volumes:
- ${DOCKERDIR}/radarr:/config
- ${DOWNLOADDIR}:/data
- ${MOVIEDIR}:/movies
- /etc/localtime:/etc/localtime:ro
networks: [proxy]
# ports: ["${RADARR_PORT}:7878"]
sonarr:
image: lscr.io/linuxserver/sonarr:latest
container_name: sonarr
restart: on-failure
env_file: .env
environment:
TZ: ${TZ}
VIRTUAL_HOST: tv.drmikecrowe.net,tv.local
VIRTUAL_PATH: /
VIRTUAL_PORT: 8989
CERT_NAME: drmikecrowe.net
volumes:
- ${DOCKERDIR}/sonarr:/config
- ${DOWNLOADDIR}:/data
- ${TVDIR}:/tv
- /etc/localtime:/etc/localtime:ro
networks: [proxy]
# ports: ["${SONARR_PORT}:8989"]
lidarr:
image: lscr.io/linuxserver/lidarr:latest
container_name: lidarr
restart: on-failure
env_file: .env
environment:
TZ: ${TZ}
VIRTUAL_HOST: musicdownload.drmikecrowe.net,musicdownload.local
VIRTUAL_PATH: /
VIRTUAL_PORT: 8686
CERT_NAME: drmikecrowe.net
volumes:
- ${DOCKERDIR}/lidarr:/config
- ${DOWNLOADDIR}:/data
- ${MUSICDIR}:/music
networks: [proxy]
# ports: ["${LIDARR_PORT}:8686"]
# readarr:
# image: lscr.io/linuxserver/readarr:nightly
# container_name: readarr
# restart: on-failure
# env_file: .env
# environment:
# TZ: ${TZ}
# VIRTUAL_HOST: readarr.drmikecrowe.net,readarr.local
# VIRTUAL_PATH: /
# VIRTUAL_PORT: 8787
# CERT_NAME: drmikecrowe.net
# volumes:
# - ${DOCKERDIR}/readarr:/config
# - ${DOWNLOADDIR}:/data
# - ${BOOKDIR}:/books
# networks: [proxy]
# ports: ["${READARR_PORT}:8787"]
bazarr:
image: lscr.io/linuxserver/bazarr:latest
container_name: bazarr
restart: on-failure
env_file: .env
environment:
TZ: ${TZ}
VIRTUAL_HOST: bazarr.drmikecrowe.net,bazarr.local
VIRTUAL_PATH: /
VIRTUAL_PORT: 6767
CERT_NAME: drmikecrowe.net
volumes:
- ${DOCKERDIR}/bazarr:/config
- ${MOVIEDIR}:/movies
- ${TVDIR}:/tv
networks: [proxy]
# ports: ["${BAZARR_PORT}:6767"]
# Jellyfin often benefits from host networking for hardware accel and DLNA.
# Keep your original host mode and DO NOT proxy it (remove VIRTUAL_*).
jellyfin:
image: jellyfin/jellyfin
container_name: jellyfin
restart: unless-stopped
env_file: .env
environment:
TZ: ${TZ}
UMASK_SET: "022"
volumes:
- ${DOCKERDIR}/jellyfin:/config
- ${MOVIEDIR}:/data/movies
- ${TVDIR}:/data/tv
- ${MUSICDIR}:/data/music
- ${DOWNLOADDIR}:/data/downloads
ports:
- "8096:8096"
extra_hosts:
- "host.docker.internal:host-gateway"
networks: [backend]
# More info at https://github.com/pi-hole/docker-pi-hole/
services:
pihole:
container_name: pihole
image: pihole/pihole:latest
# For DHCP it is recommended to remove these ports and instead add: network_mode: "host"
ports:
- "53:53/tcp"
- "53:53/udp"
- "8080:80/tcp"
env_file: .env
environment:
TZ: "${TZ}"
WEBPASSWORD: ${PIHOLE_WEBPASSWORD}
VIRTUAL_HOST: pihole.drmikecrowe.net,pihole.local
VIRTUAL_PORT: "80"
# Volumes are optional but recommended for persistence
volumes:
- '${DOCKERDIR}/etc-pihole:/etc/pihole'
- "${DOCKERDIR}/etc-dnsmasq.d:/etc/dnsmasq.d"
# Recommended but not required (DHCP needs NET_ADMIN)
# https://github.com/pi-hole/docker-pi-hole#note-on-capabilities
cap_add:
- NET_ADMIN
restart: unless-stopped
networks:
- proxy
avahi:
container_name: avahi
image: flungo/avahi
network_mode: host
restart: unless-stopped
environment:
- REFLECTOR_ENABLE_REFLECTOR=yes
volumes:
- /var/run/dbus:/var/run/dbus:ro
- /var/run/avahi-daemon/socket:/var/run/avahi-daemon/socket:ro
networks:
proxy:
name: proxy
external: true
volumes:
# existing volumes stay as-is; these three are just for nginx-proxy
nginx_vhostd:
nginx_html:
networks:
proxy:
name: proxy
external: true
services:
jwilder-proxy:
container_name: nginx
image: jwilder/nginx-proxy
env_file: .env
environment:
TZ: ${TZ}
SSL_POLICY: Mozilla-Modern
ports:
- "443:443"
# Optional: expose 80 only if you want HTTP->HTTPS redirect
- "80:80"
volumes:
- "/var/run/docker.sock:/tmp/docker.sock:ro"
# Your purchased cert (full chain + key)
- "/etc/letsencrypt/live/drmikecrowe.net/fullchain.pem:/etc/nginx/certs/drmikecrowe.net.crt:ro"
- "/etc/letsencrypt/live/drmikecrowe.net/privkey.pem:/etc/nginx/certs/drmikecrowe.net.key:ro"
# Per-host snippets for streaming/bigger uploads
- "nginx_vhostd:/etc/nginx/vhost.d"
- "nginx_html:/usr/share/nginx/html"
restart: unless-stopped
networks: [proxy]
# Reuse the same reverse-proxy network so jwilder/nginx-proxy can see ZM
networks:
proxy:
external: true
name: proxy
backend:
name: backend
services:
mariadb:
image: mariadb:11.1-jammy
container_name: mariadb
restart: unless-stopped
env_file: .env
environment:
TZ: ${TZ}
# You can also define MYSQL_DATABASE / MYSQL_USER / MYSQL_PASSWORD in .env if you prefer non-root usage
# MYSQL_DATABASE: zm
# MYSQL_USER: zmuser
MYSQL_PASSWORD: ${MYSQL_PASSWORD}
MARIADB_HEALTHCHECK_PASSWORD: ${MYSQL_PASSWORD}
MYSQL_ROOT_PASSWORD: ${MYSQL_ROOT_PASSWORD}
ports:
- "3306:3306"
volumes:
- ${DOCKERDIR}/Zoneminder/db:/var/lib/mysql
networks: [backend]
healthcheck:
test:
[
"CMD",
"healthcheck.sh",
"--su-mysql",
"--connect",
"--innodb_initialized",
]
interval: 10s
timeout: 5s
retries: 3
start_period: 30s
zoneminder:
image: ghcr.io/jantman/docker-zoneminder:latest
container_name: zoneminder
restart: unless-stopped
shm_size: 8G
env_file: .env
tmpfs:
- /run
- /tmp
environment:
TZ: ${TZ}
UMASK_SET: "22"
MULTI_PORT_START: 0
MULTI_PORT_END: 0
# Reverse proxy (paid cert via jwilder/nginx-proxy)
VIRTUAL_HOST: zoneminder.drmikecrowe.net,zoneminder.local
VIRTUAL_PATH: /
VIRTUAL_PORT: 80
CERT_NAME: drmikecrowe.net
# If the image supports DB env vars, add them here; otherwise your existing config files below apply.
# ZM_DB_HOST: mariadb
# ZM_DB_NAME: zm
# ZM_DB_USER: zmuser
# ZM_DB_PASS: ${MYSQL_PASSWORD}
volumes:
- ${DOCKERDIR}/Zoneminder/cache:/var/cache/zoneminder
- ${DOCKERDIR}/Zoneminder/zm-log:/var/log/zm
- ${DOCKERDIR}/Zoneminder/apache2-log:/var/log/apache2
- ${DOCKERDIR}/Zoneminder/es_rules.json:/etc/zm/es_rules.json
- ${DOCKERDIR}/Zoneminder/secrets.ini:/etc/zm/secrets.ini
- ${DOCKERDIR}/Zoneminder/zmeventnotification.ini:/etc/zm/zmeventnotification.ini
- ${DOCKERDIR}/Zoneminder/objectconfig.ini:/etc/zm/objectconfig.ini
depends_on:
mariadb:
condition: service_healthy
networks: [proxy, backend]
# If you still want direct LAN access, keep these ports; else rely on HTTPS via proxy and remove this block.
ports:
- "9000:9000/tcp"
- "${ZONE_PORT}:80/tcp"
mysqlbackup:
image: zelejs/mysqlbackup
container_name: mysqlbackup
restart: unless-stopped
env_file: .env
environment:
TZ: ${TZ}
MYSQL_HOST: mariadb
MYSQL_PORT: 3306
MYSQL_USER: root
MYSQL_PASSWORD: ${MYSQL_ROOT_PASSWORD}
# Or use MYSQL_USER/ MYSQL_PASSWORD for a non-root backup user with RELOAD/LOCK TABLES
CRONTAB_DAILY_HOUR: 2
CRONTAB_DAILY_MIN: 15
# Optional: include schemas; leave empty to dump all
# MYSQL_DATABASES: "zm"
# Optional: gzip
# COMPRESS: "gzip"
volumes:
- /etc/localtime:/etc/localtime:ro
- ./mysqlbackup/data:/var/mysqlbackup/data
- /var/log/mysqlbackup:/var/mysqlbackup/log
depends_on:
mariadb:
condition: service_healthy
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment