Convert compose files to yaml

toml conversion stopped working, don't want to fix it
This commit is contained in:
Marko Korhonen 2024-11-04 10:27:12 +02:00
parent 9014f87dfd
commit 037967efd7
49 changed files with 904 additions and 773 deletions

View file

@ -1,78 +0,0 @@
[volumes]
redis = {}
media = {}
custom_templates = {}
geoip = {}
backups = {}
[services.redis]
image = "redis:alpine"
container_name = "authentik-redis"
networks = ["authentik"]
restart = "unless-stopped"
volumes = ["redis:/data"]
[services.redis.healthcheck]
test = ["CMD-SHELL", "redis-cli ping | grep PONG"]
start_period = "20s"
interval = "30s"
retries = 5
timeout = "3s"
[services.server]
image = "ghcr.io/goauthentik/server"
container_name = "authentik"
restart = "unless-stopped"
command = "server"
volumes = [
"media:/media",
"custom_templates:/templates",
"geoip:/geoip",
]
env_file = [".env"]
networks = ["authentik", "postgres", "proxy"]
[services.worker]
image = "ghcr.io/goauthentik/server"
container_name = "authentik-worker"
restart = "unless-stopped"
command = "worker"
user = "root"
volumes = [
"backups:/backups",
"custom_templates:/templates",
"geoip:/geoip",
"media:/media",
"/var/run/docker.sock:/var/run/docker.sock",
]
env_file = [".env"]
networks = ["authentik", "postgres"]
[services.geoipupdate]
image = "maxmindinc/geoipupdate"
container_name = "authentik-geoipupdate"
restart = "unless-stopped"
networks = ["authentik"]
volumes = ["geoip:/usr/share/GeoIP"]
env_file = [".env"]
[services.geoipupdate.environment]
GEOIPUPDATE_EDITION_IDS = "GeoLite2-City"
GEOIPUPDATE_FREQUENCY = "8"
[services.whoami-test]
image = "traefik/whoami"
container_name = "whoami-test"
restart = "unless-stopped"
security_opt = ["no-new-privileges:true"]
networks = ["proxy"]
environment = ["TZ"]
[networks.authentik]
external = true
[networks.postgres]
external = true
[networks.proxy]
external = true

View file

@ -0,0 +1,85 @@
volumes:
redis: {}
media: {}
custom_templates: {}
geoip: {}
backups: {}
services:
redis:
image: redis:alpine
container_name: authentik-redis
networks:
- authentik
restart: unless-stopped
volumes:
- redis:/data
healthcheck:
test:
- CMD-SHELL
- redis-cli ping | grep PONG
start_period: 20s
interval: 30s
retries: 5
timeout: 3s
server:
image: ghcr.io/goauthentik/server
container_name: authentik
restart: unless-stopped
command: server
volumes:
- media:/media
- custom_templates:/templates
- geoip:/geoip
env_file:
- .env
networks:
- authentik
- postgres
- proxy
worker:
image: ghcr.io/goauthentik/server
container_name: authentik-worker
restart: unless-stopped
command: worker
user: root
volumes:
- backups:/backups
- custom_templates:/templates
- geoip:/geoip
- media:/media
- /var/run/docker.sock:/var/run/docker.sock
env_file:
- .env
networks:
- authentik
- postgres
geoipupdate:
image: maxmindinc/geoipupdate
container_name: authentik-geoipupdate
restart: unless-stopped
networks:
- authentik
volumes:
- geoip:/usr/share/GeoIP
env_file:
- .env
environment:
GEOIPUPDATE_EDITION_IDS: GeoLite2-City
GEOIPUPDATE_FREQUENCY: "8"
whoami-test:
image: traefik/whoami
container_name: whoami-test
restart: unless-stopped
security_opt:
- no-new-privileges:true
networks:
- proxy
environment:
- TZ
networks:
authentik:
external: true
postgres:
external: true
proxy:
external: true

View file

@ -1,37 +0,0 @@
[volumes.caddy_data]
external = true
[volumes.caddy_config]
external = true
[volumes.caddy_wkd]
external = true
[volumes.homeautomation_hass]
external = true
[services.backup]
image = "offen/docker-volume-backup:v2"
container_name = "volume-backup"
restart = "unless-stopped"
environment = [
"AWS_ENDPOINT",
"AWS_S3_BUCKET_NAME",
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY",
"GPG_PASSPHRASE",
"EMAIL_SMTP_HOST",
"EMAIL_SMTP_PASSWORD",
"EMAIL_SMTP_USERNAME",
"EMAIL_SMTP_PORT",
"BACKUP_COMPRESSION=zst",
"BACKUP_RETENTION_DAYS=7",
]
volumes = [
"/var/run/docker.sock:/var/run/docker.sock:ro",
"/etc/localtime:/etc/localtime:ro",
"caddy_data:/backup/caddy_data:ro",
"caddy_config:/backup/caddy_config:ro",
"caddy_wkd:/backup/caddy_wkd:ro",
"homeautomation_hass:/backup/homeautomation_hass:ro",
]

View file

@ -0,0 +1,33 @@
volumes:
caddy_data:
external: true
caddy_config:
external: true
caddy_wkd:
external: true
homeautomation_hass:
external: true
services:
backup:
image: offen/docker-volume-backup:v2
container_name: volume-backup
restart: unless-stopped
environment:
- AWS_ENDPOINT
- AWS_S3_BUCKET_NAME
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
- GPG_PASSPHRASE
- EMAIL_SMTP_HOST
- EMAIL_SMTP_PASSWORD
- EMAIL_SMTP_USERNAME
- EMAIL_SMTP_PORT
- BACKUP_COMPRESSION=zst
- BACKUP_RETENTION_DAYS=7
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- /etc/localtime:/etc/localtime:ro
- caddy_data:/backup/caddy_data:ro
- caddy_config:/backup/caddy_config:ro
- caddy_wkd:/backup/caddy_wkd:ro
- homeautomation_hass:/backup/homeautomation_hass:ro

View file

@ -1,29 +0,0 @@
[volumes]
data = {}
config = {}
wkd = {}
korhonen_cc = {}
[volumes.nextcloud_config]
external = true
[services.caddy]
image = "git.korhonen.cc/functionalhacker/caddy"
container_name = "caddy"
restart = "unless-stopped"
ports = ["80:80", "443:443/tcp", "443:443/udp"]
networks = ["proxy"]
volumes = [
"./Caddyfile:/etc/caddy/Caddyfile",
"data:/data",
"config:/config",
"wkd:/var/www/wkd",
"korhonen_cc:/var/www/korhonen.cc",
"nextcloud_config:/var/www/nextcloud",
"/var/www/index.korhonen.cc:/var/www/index.korhonen.cc",
]
environment = ["CLOUDFLARE_EMAIL", "CLOUDFLARE_API_TOKEN", "ACME_AGREE=true"]
cap_add = ["NET_ADMIN"]
[networks.proxy]
external = true

View file

@ -0,0 +1,35 @@
volumes:
data: {}
config: {}
wkd: {}
korhonen_cc: {}
nextcloud_config:
external: true
services:
caddy:
image: git.korhonen.cc/functionalhacker/caddy
container_name: caddy
restart: unless-stopped
ports:
- 80:80
- 443:443/tcp
- 443:443/udp
networks:
- proxy
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile
- data:/data
- config:/config
- wkd:/var/www/wkd
- korhonen_cc:/var/www/korhonen.cc
- nextcloud_config:/var/www/nextcloud
- /var/www/index.korhonen.cc:/var/www/index.korhonen.cc
environment:
- CLOUDFLARE_EMAIL
- CLOUDFLARE_API_TOKEN
- ACME_AGREE=true
cap_add:
- NET_ADMIN
networks:
proxy:
external: true

View file

@ -1,8 +0,0 @@
[services.cloudflare-ddns]
image = "timothyjmiller/cloudflare-ddns"
container_name = "cloudflare-ddns"
security_opt = ["no-new-privileges:true"]
network_mode = "host"
environment = ["PUID=1000", "PGID=1000"]
volumes = ["/mnt/Storage/docker/ddns/config.json:/config.json"]
restart = "unless-stopped"

View file

@ -0,0 +1,13 @@
services:
cloudflare-ddns:
image: timothyjmiller/cloudflare-ddns
container_name: cloudflare-ddns
security_opt:
- no-new-privileges:true
network_mode: host
environment:
- PUID=1000
- PGID=1000
volumes:
- /mnt/Storage/docker/ddns/config.json:/config.json
restart: unless-stopped

View file

@ -1,9 +0,0 @@
[services.drop]
image = "lscr.io/linuxserver/pairdrop"
container_name = "drop"
restart = "unless-stopped"
networks = ["proxy"]
environment = ["TZ=Europe/Helsinki", "IPV6_LOCALIZE=4"]
[networks.proxy]
external = true

View file

@ -0,0 +1,13 @@
services:
drop:
image: lscr.io/linuxserver/pairdrop
container_name: drop
restart: unless-stopped
networks:
- proxy
environment:
- TZ=Europe/Helsinki
- IPV6_LOCALIZE=4
networks:
proxy:
external: true

View file

@ -1,41 +0,0 @@
[volumes]
files = {}
config = {}
redis = {}
db = {}
[services.firefish]
image = "registry.firefish.dev/firefish/firefish"
container_name = "firefish"
restart = "unless-stopped"
depends_on = ["redis"]
ports = ["3084:3000"]
networks = ["firefish", "proxy"]
environment = { NODE_ENV = "production" }
volumes = ["files:/firefish/files", "config:/firefish/.config:ro"]
[services.redis]
image = "redis"
container_name = "redis-firefish"
restart = "unless-stopped"
networks = ["firefish"]
volumes = ["redis:/data"]
[services.db]
image = "groonga/pgroonga:3.1.8-alpine-16-slim"
container_name = "firefish-db"
restart = "unless-stopped"
networks = ["firefish"]
volumes = ["db:/var/lib/postgresql/data"]
[services.db.healthcheck]
test = "pg_isready --user=\"firefish\" --dbname=\"firefish\""
interval = "5s"
timeout = "5s"
retries = 5
[networks.firefish]
internal = true
[networks.proxy]
external = true

View file

@ -0,0 +1,48 @@
volumes:
files: {}
config: {}
redis: {}
db: {}
services:
firefish:
image: registry.firefish.dev/firefish/firefish
container_name: firefish
restart: unless-stopped
depends_on:
- redis
ports:
- 3084:3000
networks:
- firefish
- proxy
environment:
NODE_ENV: production
volumes:
- files:/firefish/files
- config:/firefish/.config:ro
redis:
image: redis
container_name: redis-firefish
restart: unless-stopped
networks:
- firefish
volumes:
- redis:/data
db:
image: groonga/pgroonga:3.1.8-alpine-16-slim
container_name: firefish-db
restart: unless-stopped
networks:
- firefish
volumes:
- db:/var/lib/postgresql/data
healthcheck:
test: pg_isready --user="firefish" --dbname="firefish"
interval: 5s
timeout: 5s
retries: 5
networks:
firefish:
internal: true
proxy:
external: true

View file

@ -1,32 +0,0 @@
[volumes]
data = {}
runner = {}
[services.forgejo]
image = "git.korhonen.cc/functionalhacker/forgejo-asciidoc"
container_name = "forgejo"
environment = ["TZ=Europe/Helsinki", "USER_UID=1000", "USER_GID=1000"]
restart = "unless-stopped"
networks = ["postgres", "proxy"]
ports = ["2882:22"]
volumes = ["data:/data", "/etc/localtime:/etc/localtime:ro"]
[services.runner]
image = "code.forgejo.org/forgejo/runner:3.5.1"
container_name = "forgejo-runner"
user = "root:root"
volumes = [
"runner:/data",
"./runner.yaml:/data/config.yaml",
"/var/run/docker.sock:/var/run/docker.sock",
"/etc/localtime:/etc/localtime:ro",
]
environment = ["SOCKFILE=/var/run/docker.sock"]
restart = "unless-stopped"
command = "forgejo-runner daemon -c /data/config.yaml"
[networks.postgres]
external = true
[networks.proxy]
external = true

View file

@ -0,0 +1,38 @@
volumes:
data: {}
runner: {}
services:
forgejo:
image: git.korhonen.cc/functionalhacker/forgejo-asciidoc
container_name: forgejo
environment:
- TZ=Europe/Helsinki
- USER_UID=1000
- USER_GID=1000
restart: unless-stopped
networks:
- postgres
- proxy
ports:
- "2882:22"
volumes:
- data:/data
- /etc/localtime:/etc/localtime:ro
runner:
image: code.forgejo.org/forgejo/runner:3.5.1
container_name: forgejo-runner
user: root:root
volumes:
- runner:/data
- ./runner.yaml:/data/config.yaml
- /var/run/docker.sock:/var/run/docker.sock
- /etc/localtime:/etc/localtime:ro
environment:
- SOCKFILE=/var/run/docker.sock
restart: unless-stopped
command: forgejo-runner daemon -c /data/config.yaml
networks:
postgres:
external: true
proxy:
external: true

View file

@ -1,19 +0,0 @@
[volumes]
config = {}
[services.freshrss]
image = "linuxserver/freshrss"
container_name = "freshrss"
restart = "unless-stopped"
networks = ["freshrss", "postgres", "proxy"]
environment = ["PUID=1000", "PGID=985", "TZ=Europe/Helsinki"]
volumes = ["config:/config", "/etc/localtime:/etc/localtime:ro"]
[networks.freshrss]
external = false
[networks.postgres]
external = true
[networks.proxy]
external = true

View file

@ -0,0 +1,25 @@
volumes:
config: {}
services:
freshrss:
image: linuxserver/freshrss
container_name: freshrss
restart: unless-stopped
networks:
- freshrss
- postgres
- proxy
environment:
- PUID=1000
- PGID=985
- TZ=Europe/Helsinki
volumes:
- config:/config
- /etc/localtime:/etc/localtime:ro
networks:
freshrss:
external: false
postgres:
external: true
proxy:
external: true

View file

@ -1,20 +0,0 @@
[volumes]
config = {}
data = {}
[services.headscale]
image = "headscale/headscale:0"
container_name = "headscale"
volumes = ["config:/etc/headscale", "data:/var/lib/headscale"]
ports = ["3478:3478/udp"]
command = "headscale serve"
restart = "unless-stopped"
networks = ["postgres"]
[services.headscale-ui]
image = "ghcr.io/gurucomputing/headscale-ui"
container_name = "headscale-ui"
restart = "unless-stopped"
[networks.postgres]
external = true

View file

@ -0,0 +1,23 @@
volumes:
config: {}
data: {}
services:
headscale:
image: headscale/headscale:0
container_name: headscale
volumes:
- config:/etc/headscale
- data:/var/lib/headscale
ports:
- 3478:3478/udp
command: headscale serve
restart: unless-stopped
networks:
- postgres
headscale-ui:
image: ghcr.io/gurucomputing/headscale-ui
container_name: headscale-ui
restart: unless-stopped
networks:
postgres:
external: true

View file

@ -1,111 +0,0 @@
[volumes]
hass = {}
mosquitto = {}
piper_english = {}
whisper_english = {}
openwakeword_english = {}
[services.home-assistant]
container_name = "home-assistant"
image = "homeassistant/home-assistant"
restart = "unless-stopped"
environment = ["TZ=Europe/Helsinki"]
devices = ["/dev/ttyACM0"]
volumes = ["hass:/config", "/etc/localtime:/etc/localtime:ro"]
networks = ["homeautomation", "postgres", "proxy"]
ports = ["8123:8123", "8300:8300"]
extra_hosts = ["host.docker.internal:host-gateway"]
[services.mosquitto]
container_name = "mosquitto"
image = "eclipse-mosquitto"
restart = "unless-stopped"
environment = ["TZ=Europe/Helsinki"]
networks = ["homeautomation"]
ports = ["1883:1883", "8866:8866"]
volumes = ["mosquitto:/mosquitto", "/etc/localtime:/etc/localtime:ro"]
[services.piper_english]
container_name = "piper_english"
image = "rhasspy/wyoming-piper"
restart = "unless-stopped"
environment = ["TZ=Europe/Helsinki"]
ports = ["10200:10200"]
networks = ["homeautomation"]
command = ["--voice", "en_US-hfc_male-medium"]
volumes = ["piper_english:/data", "/etc/localtime:/etc/localtime:ro"]
[services.whisper_english]
container_name = "whisper_english"
image = "rhasspy/wyoming-whisper"
restart = "unless-stopped"
environment = ["TZ=Europe/Helsinki"]
ports = ["10300:10300"]
networks = ["homeautomation"]
depends_on = ["home-assistant"]
command = ["--model", "tiny-int8", "--language", "en"]
volumes = ["whisper_english:/data", "/etc/localtime:/etc/localtime:ro"]
[services.openwakeword_english]
container_name = "openwakeword_english"
image = "rhasspy/wyoming-openwakeword"
restart = "unless-stopped"
environment = ["TZ=Europe/Helsinki"]
networks = ["homeautomation"]
command = ["--preload-model", "ok_nabu"]
depends_on = ["home-assistant"]
volumes = ["openwakeword_english:/data", "/etc/localtime:/etc/localtime:ro"]
[services.microphone]
build = "https://github.com/rhasspy/wyoming-mic-external.git"
image = "rhasspy/wyoming-mic-external"
container_name = "microphone"
restart = "unless-stopped"
devices = ["/dev/snd:/dev/snd"]
ports = ["10600:10600"]
group_add = ["audio"]
networks = ["homeautomation"]
command = ["--device", "plughw:CARD=USB,DEV=0", "--debug"]
volumes = ["/etc/localtime:/etc/localtime:ro"]
[services.speaker]
build = "https://github.com/rhasspy/wyoming-snd-external.git"
image = "rhasspy/wyoming-snd-external"
container_name = "speaker"
restart = "unless-stopped"
devices = ["/dev/snd:/dev/snd"]
ports = ["10601:10601"]
group_add = ["audio"]
networks = ["homeautomation"]
command = ["--device", "iec958:CARD=USB,DEV=0", "--debug"]
volumes = ["/etc/localtime:/etc/localtime:ro"]
[services.satellite]
build = "https://github.com/rhasspy/wyoming-satellite.git"
image = "rhasspy/wyoming-satellite"
container_name = "satellite"
restart = "unless-stopped"
ports = ["10700:10700"]
networks = ["homeautomation"]
depends_on = ["speaker", "microphone", "whisper_english"]
command = [
"--name",
"ha-server-satellite",
"--mic-uri",
"tcp://microphone:10600",
"--snd-uri",
"tcp://speaker:10601",
"--wake-uri",
"tcp://openwakeword_english:10400",
"--debug",
]
volumes = ["/etc/localtime:/etc/localtime:ro"]
[networks.homeautomation]
external = false
[networks.postgres]
external = true
[networks.proxy]
external = true

View file

@ -0,0 +1,163 @@
volumes:
hass: {}
mosquitto: {}
piper_english: {}
whisper_english: {}
openwakeword_english: {}
services:
home-assistant:
container_name: home-assistant
image: homeassistant/home-assistant
restart: unless-stopped
environment:
- TZ=Europe/Helsinki
devices:
- /dev/ttyACM0
volumes:
- hass:/config
- /etc/localtime:/etc/localtime:ro
networks:
- homeautomation
- postgres
- proxy
ports:
- 8123:8123
- 8300:8300
extra_hosts:
- host.docker.internal:host-gateway
mosquitto:
container_name: mosquitto
image: eclipse-mosquitto
restart: unless-stopped
environment:
- TZ=Europe/Helsinki
networks:
- homeautomation
ports:
- 1883:1883
- 8866:8866
volumes:
- mosquitto:/mosquitto
- /etc/localtime:/etc/localtime:ro
piper_english:
container_name: piper_english
image: rhasspy/wyoming-piper
restart: unless-stopped
environment:
- TZ=Europe/Helsinki
ports:
- 10200:10200
networks:
- homeautomation
command:
- --voice
- en_US-hfc_male-medium
volumes:
- piper_english:/data
- /etc/localtime:/etc/localtime:ro
whisper_english:
container_name: whisper_english
image: rhasspy/wyoming-whisper
restart: unless-stopped
environment:
- TZ=Europe/Helsinki
ports:
- 10300:10300
networks:
- homeautomation
depends_on:
- home-assistant
command:
- --model
- tiny-int8
- --language
- en
volumes:
- whisper_english:/data
- /etc/localtime:/etc/localtime:ro
openwakeword_english:
container_name: openwakeword_english
image: rhasspy/wyoming-openwakeword
restart: unless-stopped
environment:
- TZ=Europe/Helsinki
networks:
- homeautomation
command:
- --preload-model
- ok_nabu
depends_on:
- home-assistant
volumes:
- openwakeword_english:/data
- /etc/localtime:/etc/localtime:ro
microphone:
build: https://github.com/rhasspy/wyoming-mic-external.git
image: rhasspy/wyoming-mic-external
container_name: microphone
restart: unless-stopped
devices:
- /dev/snd:/dev/snd
ports:
- 10600:10600
group_add:
- audio
networks:
- homeautomation
command:
- --device
- plughw:CARD=USB,DEV=0
- --debug
volumes:
- /etc/localtime:/etc/localtime:ro
speaker:
build: https://github.com/rhasspy/wyoming-snd-external.git
image: rhasspy/wyoming-snd-external
container_name: speaker
restart: unless-stopped
devices:
- /dev/snd:/dev/snd
ports:
- 10601:10601
group_add:
- audio
networks:
- homeautomation
command:
- --device
- iec958:CARD=USB,DEV=0
- --debug
volumes:
- /etc/localtime:/etc/localtime:ro
satellite:
build: https://github.com/rhasspy/wyoming-satellite.git
image: rhasspy/wyoming-satellite
container_name: satellite
restart: unless-stopped
ports:
- 10700:10700
networks:
- homeautomation
depends_on:
- speaker
- microphone
- whisper_english
command:
- --name
- ha-server-satellite
- --mic-uri
- tcp://microphone:10600
- --snd-uri
- tcp://speaker:10601
- --wake-uri
- tcp://openwakeword_english:10400
- --debug
volumes:
- /etc/localtime:/etc/localtime:ro
networks:
homeautomation:
external: false
postgres:
external: true
proxy:
external: true

View file

@ -1,34 +0,0 @@
[volumes]
config = {}
cache = {}
[volumes.media]
external = true
[volumes.nextcloud_data]
external = true
[services.jellyfin]
image = "jellyfin/jellyfin"
container_name = "jellyfin"
environment = ["TZ=Europe/Helsinki"]
ports = ["8096:8096"]
networks = ["proxy", "authentik"]
restart = "unless-stopped"
volumes = [
"config:/config",
"cache:/cache",
"media:/media",
"nextcloud_data:/nextcloud_data",
"/etc/localtime:/etc/localtime:ro",
]
devices = [
"/dev/dri/renderD128:/dev/dri/renderD128",
"/dev/dri/card1:/dev/dri/card0",
]
[networks.proxy]
external = true
[networks.authentik]
external = true

View file

@ -0,0 +1,33 @@
volumes:
config: {}
cache: {}
media:
external: true
nextcloud_data:
external: true
services:
jellyfin:
image: jellyfin/jellyfin
container_name: jellyfin
environment:
- TZ=Europe/Helsinki
ports:
- 8096:8096
networks:
- proxy
- authentik
restart: unless-stopped
volumes:
- config:/config
- cache:/cache
- media:/media
- nextcloud_data:/nextcloud_data
- /etc/localtime:/etc/localtime:ro
devices:
- /dev/dri/renderD128:/dev/dri/renderD128
- /dev/dri/card1:/dev/dri/card0
networks:
proxy:
external: true
authentik:
external: true

View file

@ -1,10 +0,0 @@
[volumes]
config = {}
[services.mumble]
container_name = "mumble"
image = "phlak/mumble"
environment = ["TZ=Europe/Helsinki"]
network_mode = "host"
volumes = ["config:/etc/mumble", "/etc/localtime:/etc/localtime:ro"]
restart = "unless-stopped"

View file

@ -0,0 +1,13 @@
volumes:
config: {}
services:
mumble:
container_name: mumble
image: phlak/mumble
environment:
- TZ=Europe/Helsinki
network_mode: host
volumes:
- config:/etc/mumble
- /etc/localtime:/etc/localtime:ro
restart: unless-stopped

View file

@ -1,20 +0,0 @@
[volumes]
config = {}
assets = {}
[services.netbootxyz]
image = "lscr.io/linuxserver/netbootxyz"
container_name = "netbootxyz"
restart = "unless-stopped"
environment = [
"PUID=1000",
"PGID=1000",
"TZ=Etc/UTC",
"MENU_VERSION=1.9.9",
"PORT_RANGE=30000:30010",
"SUBFOLDER=/",
"NGINX_PORT=80",
"WEB_APP_PORT=3000",
]
ports = ["3000:3000", "69:69/udp", "8081:80"]
volumes = ["config:/config", "assets:/assets"]

View file

@ -0,0 +1,24 @@
volumes:
config: {}
assets: {}
services:
netbootxyz:
image: lscr.io/linuxserver/netbootxyz
container_name: netbootxyz
restart: unless-stopped
environment:
- PUID=1000
- PGID=1000
- TZ=Etc/UTC
- MENU_VERSION=1.9.9
- PORT_RANGE=30000:30010
- SUBFOLDER=/
- NGINX_PORT=80
- WEB_APP_PORT=3000
ports:
- 3000:3000
- 69:69/udp
- 8081:80
volumes:
- config:/config
- assets:/assets

View file

@ -1,84 +0,0 @@
[volumes.nextcloud_config]
external = true
[volumes.nextcloud_data]
external = true
[services.nextcloud]
image = "nextcloud:fpm-alpine"
container_name = "nextcloud"
restart = "unless-stopped"
networks = ["nextcloud", "postgres", "proxy"]
volumes = [
"nextcloud_config:/var/www/html",
"nextcloud_data:/var/www/html/data",
"/etc/localtime:/etc/localtime:ro",
]
environment = [
"REDIS_HOST=redis",
"REDIS_HOST_PASSWORD=123",
"TRUSTED_PROXIES=caddy",
"NEXTCLOUD_TRUSTED_DOMAINS=cloud.korhonen.cc",
"OVERWRITEHOST=cloud.korhonen.cc",
"OVERWRITEPROTOCOL=https",
]
depends_on = ["redis"]
[services.nextcloud.labels]
"ofelia.enabled" = true
"ofelia.job-exec.nextcloud.schedule" = "0 */5 * * * *"
"ofelia.job-exec.nextcloud.command" = "php /var/www/html/cron.php"
"ofelia.job-exec.nextcloud.user" = "www-data"
"ofelia.smtp-host" = "${SMTP_HOST}"
"ofelia.smtp-port" = "${SMTP_PORT}"
"ofelia.smtp-user" = "${SMTP_USER}"
"ofelia.smtp-password" = "${SMTP_PASSWORD}"
"ofelia.email-to" = "${EMAIL_TO}"
"ofelia.email-from" = "${EMAIL_FROM}"
"ofelia.mail-only-on-error" = true
[services.redis]
image = "redis:alpine"
container_name = "redis-nextcloud"
networks = ["nextcloud"]
restart = "unless-stopped"
command = "redis-server --requirepass 123"
[services.coturn]
image = "instrumentisto/coturn"
container_name = "coturn"
restart = "unless-stopped"
env_file = ".env"
ports = ["3478:3478/tcp", "3478:3478/udp", "49160-49200:49160-49200/udp"]
networks = ["nextcloud"]
command = [
"-n",
"--log-file=stdout",
"--min-port=49160",
"--max-port=49200",
"--realm=cloud.korhonen.cc",
"--use-auth-secret",
"--static-auth-secret=${STATIC_AUTH_SECRET}",
]
[services.collabora]
image = "collabora/code"
container_name = "collabora"
restart = "unless-stopped"
env_file = ".env"
environment = [
"username=${COLLABORA_USERNAME}",
"password=${COLLABORA_PASSWORD}",
"domain=cloud.korhonen.cc",
'extra_params=--o:ssl.enable=false --o:ssl.termination=true',
]
networks = ["proxy"]
[networks.nextcloud]
external = false
[networks.postgres]
external = true
[networks.proxy]
external = true

View file

@ -0,0 +1,84 @@
volumes:
nextcloud_config:
external: true
nextcloud_data:
external: true
services:
nextcloud:
image: nextcloud:fpm-alpine
container_name: nextcloud
restart: unless-stopped
networks:
- nextcloud
- postgres
- proxy
volumes:
- nextcloud_config:/var/www/html
- nextcloud_data:/var/www/html/data
- /etc/localtime:/etc/localtime:ro
environment:
- REDIS_HOST=redis
- REDIS_HOST_PASSWORD=123
- TRUSTED_PROXIES=caddy
- NEXTCLOUD_TRUSTED_DOMAINS=cloud.korhonen.cc
- OVERWRITEHOST=cloud.korhonen.cc
- OVERWRITEPROTOCOL=https
depends_on:
- redis
labels:
ofelia.enabled: true
ofelia.job-exec.nextcloud.schedule: 0 */5 * * * *
ofelia.job-exec.nextcloud.command: php /var/www/html/cron.php
ofelia.job-exec.nextcloud.user: www-data
ofelia.smtp-host: ${SMTP_HOST}
ofelia.smtp-port: ${SMTP_PORT}
ofelia.smtp-user: ${SMTP_USER}
ofelia.smtp-password: ${SMTP_PASSWORD}
ofelia.email-to: ${EMAIL_TO}
ofelia.email-from: ${EMAIL_FROM}
ofelia.mail-only-on-error: true
redis:
image: redis:alpine
container_name: redis-nextcloud
networks:
- nextcloud
restart: unless-stopped
command: redis-server --requirepass 123
coturn:
image: instrumentisto/coturn
container_name: coturn
restart: unless-stopped
env_file: .env
ports:
- 3478:3478/tcp
- 3478:3478/udp
- 49160-49200:49160-49200/udp
networks:
- nextcloud
command:
- -n
- --log-file=stdout
- --min-port=49160
- --max-port=49200
- --realm=cloud.korhonen.cc
- --use-auth-secret
- --static-auth-secret=${STATIC_AUTH_SECRET}
collabora:
image: collabora/code
container_name: collabora
restart: unless-stopped
env_file: .env
environment:
- username=${COLLABORA_USERNAME}
- password=${COLLABORA_PASSWORD}
- domain=cloud.korhonen.cc
- extra_params=--o:ssl.enable=false --o:ssl.termination=true
networks:
- proxy
networks:
nextcloud:
external: false
postgres:
external: true
proxy:
external: true

View file

@ -1,9 +0,0 @@
[services.ofelia]
image = "mcuadros/ofelia"
container_name = "ofelia"
restart = "unless-stopped"
volumes = [
"/etc/localtime:/etc/localtime:ro",
"/var/run/docker.sock:/var/run/docker.sock:ro",
]
command = "daemon --docker"

View file

@ -0,0 +1,9 @@
services:
ofelia:
image: mcuadros/ofelia
container_name: ofelia
restart: unless-stopped
volumes:
- /etc/localtime:/etc/localtime:ro
- /var/run/docker.sock:/var/run/docker.sock:ro
command: daemon --docker

View file

@ -1,38 +0,0 @@
[services.opentogethertube]
image = "dyc3/opentogethertube"
container_name = "opentogethertube"
restart = "unless-stopped"
environment = [
"PORT=8080",
"REDIS_HOST=redis",
"REDIS_PORT=6379",
"FFPROBE_PATH=/usr/bin/ffprobe",
"DB_MODE=postgres",
"POSTGRES_USER=opentogethertube",
"POSTGRES_DB=opentogethertube",
"POSTGRES_HOST=postgres",
"POSTGRES_PASSWORD",
"DOCKER=1",
"OTT_HOSTNAME=ott.korhonen.cc",
"YOUTUBE_API_KEY",
"OPENTOGETHERTUBE_API_KEY",
"SESSION_SECRET",
]
ports = ["8080:8080", "3002:3002"]
networks = ["default", "postgres", "proxy"]
depends_on = ["redis"]
volumes = ["./production.toml:/app/env/production.toml"]
[services.redis]
container_name = "redis-opentogethertube"
image = "redis:alpine"
restart = "unless-stopped"
[services.redis.healthcheck]
test = "redis-cli ping"
[networks.postgres]
external = true
[networks.proxy]
external = true

View file

@ -0,0 +1,42 @@
services:
opentogethertube:
image: dyc3/opentogethertube
container_name: opentogethertube
restart: unless-stopped
environment:
- PORT=8080
- REDIS_HOST=redis
- REDIS_PORT=6379
- FFPROBE_PATH=/usr/bin/ffprobe
- DB_MODE=postgres
- POSTGRES_USER=opentogethertube
- POSTGRES_DB=opentogethertube
- POSTGRES_HOST=postgres
- POSTGRES_PASSWORD
- DOCKER=1
- OTT_HOSTNAME=ott.korhonen.cc
- YOUTUBE_API_KEY
- OPENTOGETHERTUBE_API_KEY
- SESSION_SECRET
ports:
- 8080:8080
- 3002:3002
networks:
- default
- postgres
- proxy
depends_on:
- redis
volumes:
- ./production.toml:/app/env/production.toml
redis:
container_name: redis-opentogethertube
image: redis:alpine
restart: unless-stopped
healthcheck:
test: redis-cli ping
networks:
postgres:
external: true
proxy:
external: true

View file

@ -1,23 +0,0 @@
[volumes]
config = {}
dnsmasq = {}
[services.pihole]
container_name = "pihole"
image = "pihole/pihole"
ports = ["53:53/tcp", "53:53/udp", "67:67/udp", "8069:80/tcp"]
networks = ["proxy"]
volumes = [
"config:/etc/pihole/",
"dnsmasq:/etc/dnsmasq.d/",
]
dns = ["127.0.0.1", "1.1.1.1"]
cap_add = ["NET_ADMIN"]
restart = "unless-stopped"
[services.pihole.environment]
TZ = "Europe/Helsinki"
WEBPASSWORD = "${WEBPASSWORD}"
[networks.proxy]
external = true

View file

@ -0,0 +1,29 @@
volumes:
config: {}
dnsmasq: {}
services:
pihole:
container_name: pihole
image: pihole/pihole
ports:
- 53:53/tcp
- 53:53/udp
- 67:67/udp
- 8069:80/tcp
networks:
- proxy
volumes:
- config:/etc/pihole/
- dnsmasq:/etc/dnsmasq.d/
dns:
- 127.0.0.1
- 1.1.1.1
cap_add:
- NET_ADMIN
restart: unless-stopped
environment:
TZ: Europe/Helsinki
WEBPASSWORD: ${WEBPASSWORD}
networks:
proxy:
external: true

View file

@ -1,17 +0,0 @@
[volumes]
data = {}
[services.postgres]
container_name = "postgres"
image = "postgres:16"
environment = ["TZ=Europe/Helsinki"]
ports = ["127.0.0.1:5432:5432"]
networks = ["postgres"]
volumes = [
"data:/var/lib/postgresql/data",
"/etc/localtime:/etc/localtime:ro",
]
restart = "unless-stopped"
[networks.postgres]
external = true

View file

@ -0,0 +1,19 @@
volumes:
data: {}
services:
postgres:
container_name: postgres
image: postgres:16
environment:
- TZ=Europe/Helsinki
ports:
- 127.0.0.1:5432:5432
networks:
- postgres
volumes:
- data:/var/lib/postgresql/data
- /etc/localtime:/etc/localtime:ro
restart: unless-stopped
networks:
postgres:
external: true

View file

@ -1,37 +0,0 @@
[volumes]
config = {}
[services.searx]
container_name = "searx"
image = "searxng/searxng"
restart = "unless-stopped"
networks = ["searx", "proxy"]
volumes = [
"config:/etc/searxng",
#"/docker/searx/logo.png:/usr/local/searxng/searx/static/themes/simple/img/searxng.png:ro"
]
environment = ["SEARXNG_BASE_URL=https://search.korhonen.cc/"]
cap_drop = ["ALL"]
cap_add = ["CHOWN", "SETGID", "SETUID", "DAC_OVERRIDE"]
[services.searx.logging]
driver = "json-file"
[services.searx.logging.options]
max-size = "1m"
max-file = "1"
[services.redis]
container_name = "redis-searx"
image = "redis:alpine"
command = "redis-server --save \"\" --appendonly \"no\""
networks = ["searx"]
tmpfs = ["/var/lib/redis"]
cap_drop = ["ALL"]
cap_add = ["SETGID", "SETUID", "DAC_OVERRIDE"]
[networks.searx.ipam]
driver = "default"
[networks.proxy]
external = true

View file

@ -0,0 +1,46 @@
volumes:
config: {}
services:
searx:
container_name: searx
image: searxng/searxng
restart: unless-stopped
networks:
- searx
- proxy
volumes:
- config:/etc/searxng
environment:
- SEARXNG_BASE_URL=https://search.korhonen.cc/
cap_drop:
- ALL
cap_add:
- CHOWN
- SETGID
- SETUID
- DAC_OVERRIDE
logging:
driver: json-file
options:
max-size: 1m
max-file: "1"
redis:
container_name: redis-searx
image: redis:alpine
command: redis-server --save "" --appendonly "no"
networks:
- searx
tmpfs:
- /var/lib/redis
cap_drop:
- ALL
cap_add:
- SETGID
- SETUID
- DAC_OVERRIDE
networks:
searx:
ipam:
driver: default
proxy:
external: true

View file

@ -1,27 +0,0 @@
[services.grafana]
image = "grafana/grafana"
container_name = "grafana"
volumes = ["grafana:/var/lib/grafana"]
networks = ["stats", "proxy"]
user = "1000:984"
env_file = [".env"]
environment = [
"GF_AUTH_GENERIC_OAUTH_CLIENT_ID",
"GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET",
"GF_AUTH_GENERIC_OAUTH_ENABLED=true",
"GF_AUTH_GENERIC_OAUTH_NAME=authentik",
"GF_AUTH_GENERIC_OAUTH_SCOPES=openid profile email",
"GF_AUTH_GENERIC_OAUTH_AUTH_URL=https://sso.korhonen.cc/application/o/authorize/",
"GF_AUTH_GENERIC_OAUTH_TOKEN_URL=https://sso.korhonen.cc/application/o/token/",
"GF_AUTH_GENERIC_OAUTH_API_URL=https://sso.korhonen.cc/application/o/userinfo/",
"GF_AUTH_SIGNOUT_REDIRECT_URL=https://sso.korhonen.cc/application/o/grafana/end-session/",
"GF_AUTH_OAUTH_AUTO_LOGIN=true",
"GF_SERVER_ROOT_URL=https://grafana.korhonen.cc",
"GF_AUTH_GENERIC_OAUTH_ROLE_ATTRIBUTE_PATH=contains(groups[*], 'Administrators') && 'Admin' || 'Viewer'",
]
[networks.stats]
external = false
[networks.proxy]
external = true

View file

@ -0,0 +1,30 @@
services:
grafana:
image: grafana/grafana
container_name: grafana
volumes:
- grafana:/var/lib/grafana
networks:
- stats
- proxy
user: 1000:984
env_file:
- .env
environment:
- GF_AUTH_GENERIC_OAUTH_CLIENT_ID
- GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET
- GF_AUTH_GENERIC_OAUTH_ENABLED=true
- GF_AUTH_GENERIC_OAUTH_NAME=authentik
- GF_AUTH_GENERIC_OAUTH_SCOPES=openid profile email
- GF_AUTH_GENERIC_OAUTH_AUTH_URL=https://sso.korhonen.cc/application/o/authorize/
- GF_AUTH_GENERIC_OAUTH_TOKEN_URL=https://sso.korhonen.cc/application/o/token/
- GF_AUTH_GENERIC_OAUTH_API_URL=https://sso.korhonen.cc/application/o/userinfo/
- GF_AUTH_SIGNOUT_REDIRECT_URL=https://sso.korhonen.cc/application/o/grafana/end-session/
- GF_AUTH_OAUTH_AUTO_LOGIN=true
- GF_SERVER_ROOT_URL=https://grafana.korhonen.cc
- GF_AUTH_GENERIC_OAUTH_ROLE_ATTRIBUTE_PATH=contains(groups[*], 'Administrators') && 'Admin' || 'Viewer'
networks:
stats:
external: false
proxy:
external: true

View file

@ -1,9 +0,0 @@
[services]
[services.tftp]
container_name = "tftp"
image = "pghalliday/tftp"
environment = ["TZ=Europe/Helsinki"]
restart = "unless-stopped"
ports = ["69:69/udp"]
volumes = ["/etc/localtime:/etc/localtime:ro", "/docker/tftp:/var/tftpboot"]

View file

@ -0,0 +1,12 @@
services:
tftp:
container_name: tftp
image: pghalliday/tftp
environment:
- TZ=Europe/Helsinki
restart: unless-stopped
ports:
- 69:69/udp
volumes:
- /etc/localtime:/etc/localtime:ro
- /docker/tftp:/var/tftpboot

View file

@ -1,21 +0,0 @@
[volumes]
config = {}
picons = {}
[services.tvheadend]
image = "linuxserver/tvheadend"
container_name = "tvheadend"
environment = ["TZ=Europe/Helsinki", "PUID=1000", "PGID=985"]
volumes = [
"config:/config",
"picons:/picons",
"/mnt/Storage/Media/PVR:/recordings",
"/etc/localtime:/etc/localtime:ro",
]
ports = ["9981:9981", "9982:9982"]
devices = ["/dev/dvb:/dev/dvb"]
restart = "unless-stopped"
networks = ["proxy"]
[networks.proxy]
external = true

View file

@ -0,0 +1,27 @@
volumes:
config: {}
picons: {}
services:
tvheadend:
image: linuxserver/tvheadend
container_name: tvheadend
environment:
- TZ=Europe/Helsinki
- PUID=1000
- PGID=985
volumes:
- config:/config
- picons:/picons
- /mnt/Storage/Media/PVR:/recordings
- /etc/localtime:/etc/localtime:ro
ports:
- 9981:9981
- 9982:9982
devices:
- /dev/dvb:/dev/dvb
restart: unless-stopped
networks:
- proxy
networks:
proxy:
external: true

View file

@ -1,20 +0,0 @@
[services.umami]
image = "ghcr.io/umami-software/umami:postgresql-latest"
container_name = "umami"
restart = "unless-stopped"
networks = ["postgres", "proxy"]
env_file = ".env"
[services.umami.environment]
DATABASE_URL = "postgresql://umami:${POSTGRES_PASS}@postgres:5432/umami"
HASH_SALT = "${HASH_SALT}"
[services.umami.logging.options]
max-size = "10m"
max-file = "10"
[networks.postgres]
external = true
[networks.proxy]
external = true

View file

@ -0,0 +1,21 @@
services:
umami:
image: ghcr.io/umami-software/umami:postgresql-latest
container_name: umami
restart: unless-stopped
networks:
- postgres
- proxy
env_file: .env
environment:
DATABASE_URL: postgresql://umami:${POSTGRES_PASS}@postgres:5432/umami
HASH_SALT: ${HASH_SALT}
logging:
options:
max-size: 10m
max-file: "10"
networks:
postgres:
external: true
proxy:
external: true

View file

@ -1,19 +0,0 @@
[services.watchtower]
image = "containrrr/watchtower"
container_name = "watchtower"
restart = "unless-stopped"
environment = [
"WATCHTOWER_CLEANUP=true",
"WATCHTOWER_NOTIFICATION_EMAIL_FROM=watchtower@korhonen.cc",
"WATCHTOWER_NOTIFICATION_EMAIL_TO=admin@korhonen.cc",
"WATCHTOWER_NOTIFICATION_EMAIL_SERVER=smtp.migadu.com",
"WATCHTOWER_NOTIFICATION_EMAIL_SERVER_PORT=587",
"WATCHTOWER_NOTIFICATION_EMAIL_SERVER_USER=${WATCHTOWER_NOTIFICATION_EMAIL_SERVER_USER}",
"WATCHTOWER_NOTIFICATION_EMAIL_SERVER_PASSWORD=${WATCHTOWER_NOTIFICATION_EMAIL_SERVER_PASSWORD}",
"WATCHTOWER_NOTIFICATION_EMAIL_DELAY=30",
"WATCHTOWER_NOTIFICATIONS=email",
]
volumes = [
"/var/run/docker.sock:/var/run/docker.sock",
"/etc/localtime:/etc/localtime:ro",
]

View file

@ -0,0 +1,18 @@
services:
watchtower:
image: containrrr/watchtower
container_name: watchtower
restart: unless-stopped
environment:
- WATCHTOWER_CLEANUP=true
- WATCHTOWER_NOTIFICATION_EMAIL_FROM=watchtower@korhonen.cc
- WATCHTOWER_NOTIFICATION_EMAIL_TO=admin@korhonen.cc
- WATCHTOWER_NOTIFICATION_EMAIL_SERVER=smtp.migadu.com
- WATCHTOWER_NOTIFICATION_EMAIL_SERVER_PORT=587
- WATCHTOWER_NOTIFICATION_EMAIL_SERVER_USER=${WATCHTOWER_NOTIFICATION_EMAIL_SERVER_USER}
- WATCHTOWER_NOTIFICATION_EMAIL_SERVER_PASSWORD=${WATCHTOWER_NOTIFICATION_EMAIL_SERVER_PASSWORD}
- WATCHTOWER_NOTIFICATION_EMAIL_DELAY=30
- WATCHTOWER_NOTIFICATIONS=email
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /etc/localtime:/etc/localtime:ro

View file

@ -232,13 +232,13 @@ update() {
local docker-update() {
prevpwddocker=$PWD
for dir in $HOME/git/dotfiles/docker/*; do
for dir in $HOME/git/dotfiles/docker/*/; do
cd $dir
if [[ -f "$dir/DISABLED" ]]; then
if [[ -f ./DISABLED ]]; then
echo "$(basename $dir) stack is disabled, skipping..."
else
dct -f $dir/docker-compose.toml pull
dct -f $dir/docker-compose.toml up -d
docker compose pull
docker compose up -d
fi
cd ..
done
@ -351,23 +351,23 @@ btw, () {
}
# docker-compose with TOML
dct() {
local file_path=('./docker-compose.toml')
zmodload zsh/zutil
zparseopts -D -K -- \
f:=file_path ||
return 1
file_path=${file_path[-1]}
if [[ ! -a "$file_path" ]]; then
echo "File $file_path does not exist!"
return 1
fi
yj -ty < $file_path | docker compose -f - $@
}
# dct() {
# local file_path=('./docker-compose.toml')
#
# zmodload zsh/zutil
# zparseopts -D -K -- \
# f:=file_path ||
# return 1
#
# file_path=${file_path[-1]}
#
# if [[ ! -a "$file_path" ]]; then
# echo "File $file_path does not exist!"
# return 1
# fi
#
# yj -ty < $file_path | docker compose -f /dev/stdin --env-file ./.env $@
# }
alias dslr-webcam='pkill -f gphoto2; gphoto2 --stdout --capture-movie | ffmpeg -i - -vcodec rawvideo -pix_fmt yuv420p -threads 0 -f v4l2 /dev/video0'