Compare commits

..

45 commits

Author SHA1 Message Date
4dd1cefb30
Merge branch 'main' of ssh://git.ghoscht.com:2222/ghoscht/nix-config 2024-08-09 16:11:57 +02:00
dd03bd6090
Git: Enable merge by default 2024-08-09 16:11:54 +02:00
71393d8ec5
Add diun to media containers 2024-08-09 16:05:52 +02:00
8e4fc00255
Change windscribe location
port forwarding stopped working after a certain amount of time
2024-08-09 16:04:40 +02:00
c1e8037290
Add OIDC to grafana 2024-08-09 16:03:31 +02:00
bfd1b01d65
Arion: Install crowdsec 2024-08-09 16:00:58 +02:00
925a7948a5
Fix sops env file name conflict 2024-08-09 16:00:07 +02:00
4545dd5efe
Arion: Add outline wiki 2024-08-09 15:59:15 +02:00
b5b149474b
Arion: Publically expose navidrome 2024-08-09 15:17:23 +02:00
e91191eacd
Autorestic: Enable eustachius location 2024-08-09 15:16:10 +02:00
b9e66cbecb
Arion: Change vaultwarden domain 2024-08-09 15:15:07 +02:00
b7dcc5e579
Enable zfs autoscrub 2024-07-28 00:35:01 +02:00
9e32507665
Add comics volume to kavita 2024-07-09 08:19:59 +02:00
9367167e92
Arion: Bump Kavita to 0.8.1 2024-07-09 08:19:40 +02:00
5ed372d1f4
Arion: Bump vaultwarden to 1.31.0 2024-07-09 08:19:02 +02:00
28be439381
Arion: Add windscribe port refresh
https://github.com/dumbasPL/deluge-windscribe-ephemeral-port v3

add windscribe port refresh
2024-07-05 15:52:43 +02:00
c6295daafe
Arion: Bump autobrr to 1.43.0 2024-07-05 15:50:54 +02:00
576cf1f077
Arion: Bump radarr to 5.7.0 2024-07-05 15:50:37 +02:00
29fca3dd76
Arion: Bump sonarr to 4.0.6 2024-07-05 15:50:22 +02:00
d83bf9d4ab
Arion: Bump prowlarr to 1.19.0 2024-07-05 15:50:09 +02:00
f4725f1030
Re-enable eustachius restic backup & fix notifications 2024-07-05 15:42:02 +02:00
e5d4bffe8d
Arion: Expose vaultwarden publically 2024-07-05 15:41:20 +02:00
4fd9643425
Arion: Add diun monitoring to forgejo 2024-07-05 15:40:39 +02:00
8a97ba6c49
Arion: Bump Forgejo to 7.0.5 2024-07-05 15:36:59 +02:00
3572685386
Arion: Bump Authentik to 2024.6 2024-07-05 15:36:27 +02:00
5a9cf4c15c
Arion: Add wildcard certs to traefik 2024-06-29 20:27:08 +02:00
099959a30a
Arion: Make Scrutiny data actually persistent 2024-06-25 22:11:34 +02:00
255847b6e1
Arion: Modify Diun tracking to notify for newly created images
Normal Diun behavior is to track the current tag, e.g. 'latest' and check if that has been changed
2024-06-25 22:11:12 +02:00
76a885e0d3
Arion: Add Jellyfin to Diun tracking 2024-06-25 22:09:19 +02:00
470f738403
Arion: Bump Sonarr to 4.0.5
Sonarr: 4.0.4 -> 4.0.5
2024-06-25 22:08:02 +02:00
49217e1bcc
Arion: Bump Jellyfin to 10.9.7
Jellyfin: 10.9.6 -> 10.9.7
2024-06-25 22:07:00 +02:00
a932b4598d
Arion: Add Grafana+Loki log aggregation 2024-06-25 22:06:01 +02:00
13b0e7593e
Arion: Fix nvme drive not being recognized by smartctl --scan
When attaching NVMe devices using `--device=/dev/nvme..`, make sure to provide the device controller (`/dev/nvme0`)
instead of the block device (`/dev/nvme0n1`)
2024-06-15 10:08:52 +02:00
d1b633b086
Add autorestic ntfy notifications 2024-06-14 20:17:21 +02:00
77a765e332
Add ntfy user auth 2024-06-14 13:05:19 +02:00
6043b73a04
Arion: Add diun docker update notifications 2024-06-14 11:57:42 +02:00
c213949f6f
Autorestic: Disable eustachius backup
eustachius currently isn't running
2024-06-13 23:31:39 +02:00
1debdb71d8
Franz: Enable tmux 2024-06-13 23:30:48 +02:00
26591d2175
Franz: Disable FTP
only SFTP is still supported, plain ftp is too unsafe
2024-06-13 23:30:20 +02:00
2bf237f1da
Add initial native smb share
doesn't really work yet from windows side, but linux works
2024-06-13 23:29:40 +02:00
b0e2738905
Arion: Add s3-compatible minio file hosting 2024-06-13 23:27:38 +02:00
20c587bb5e
Arion: Remove docker samba
prepares conversion to natively installed samba
2024-06-13 23:19:26 +02:00
3c7751feda
Arion: Bump Lidarr to 2.3.3
Lidarr: 2.2.5 -> 2.3.3
2024-06-13 23:18:12 +02:00
93fde832ea
Arion: Bump Jellyfin to 10.9.6
Jellyfin: 10.9.1 -> 10.9.6
2024-06-13 23:17:35 +02:00
e6b3ee395c
Arion: Bump Forgejo to 7.0.4
forgejo: 7.0.3 -> 7.0.4
2024-06-13 23:16:19 +02:00
31 changed files with 794 additions and 619 deletions

View file

@ -12,6 +12,7 @@
commit.gpgsign = true;
user.signingkey = "0x2C2C1C62A5388E82";
init.defaultBranch = "main";
pull.rebase = false; # merge by default
};
lfs.enable = true;
aliases = {

View file

@ -7,6 +7,7 @@ in {
imports = [
./global
./features/coding/nvim
./features/coding/tmux.nix
inputs.nix-colors.homeManagerModules.default
];

View file

@ -1,5 +1,5 @@
let
authentikImage = "ghcr.io/goauthentik/server:2024.4.2";
authentikImage = "ghcr.io/goauthentik/server:2024.6";
in {
project.name = "auth";

View file

@ -20,7 +20,7 @@ in {
owner = vars.user;
};
sops.templates."postgres.env" = {
sops.templates."auth-postgres.env" = {
path = "/home/${vars.user}/.docker/auth/postgres.env";
owner = vars.user;
mode = "0775";
@ -31,7 +31,7 @@ in {
'';
};
sops.templates."authentik.env" = {
sops.templates."auth-authentik.env" = {
path = "/home/${vars.user}/.docker/auth/authentik.env";
owner = vars.user;
mode = "0775";

View file

@ -8,7 +8,6 @@
inputs.arion.nixosModules.arion
./dns
./infrastructure
./nas
./nextcloud
./push
./git
@ -21,6 +20,9 @@
./matrix
./headscale
./auth
./minio
./stats
./wiki
];
environment.systemPackages = with pkgs; [arion];

View file

@ -10,7 +10,7 @@
services = {
forgejo.service = {
image = "codeberg.org/forgejo/forgejo:7.0.3";
image = "codeberg.org/forgejo/forgejo:7.0.5";
container_name = "forgejo";
useHostStore = true;
labels = {
@ -30,6 +30,12 @@
"traefik.http.routers.forgejo-external.entrypoints" = "websecure-external";
"traefik.http.routers.forgejo-external.tls" = "true";
"traefik.http.routers.forgejo-external.tls.certresolver" = "letsencrypt";
"diun.enable" = "true";
"diun.watch_repo" = "true";
"diun.sort_tags" = "semver";
"diun.include_tags" = "^\\d+\\.\\d+\\.\\d+$$";
"diun.exclude_tags" = "\\b\\d{4,}\\b";
};
volumes = [
"/storage/dataset/docker/git/forgejo_data:/data"

View file

@ -7,9 +7,7 @@
};
docker-compose.volumes = {
traefik_letsencrypt = null;
scrutiny_data = null;
scrutiny_db = null;
traefik-logs = null;
};
services = {
@ -31,11 +29,19 @@
"traefik.http.services.dashboard.loadbalancer.server.port" = "8080";
"traefik.http.routers.dashboard.tls" = "true";
"traefik.http.routers.dashboard.tls.certresolver" = "letsencrypt";
"traefik.http.routers.dashboard.tls.domains[0].main" = "ghoscht.com";
"traefik.http.routers.dashboard.tls.domains[0].sans" = "*.ghoscht.com";
"traefik.http.middlewares.traefik-https-redirect.redirectscheme.scheme" = "https";
"traefik.http.middlewares.sslheader.headers.customrequestheaders.X-Forwarded-Proto" = "https";
};
volumes = [
"traefik_letsencrypt:/letsencrypt"
"/home/ghoscht/.docker/infrastructure/traefik_data:/etc/traefik"
"/home/ghoscht/.docker/infrastructure/traefik_config/traefik.yml:/traefik.yml:ro"
"/home/ghoscht/.docker/infrastructure/traefik_data/config.yml:/config.yml:ro"
"/storage/dataset/docker/infrastructure/traefik_data/acme.json:/acme.json"
"/var/run/docker.sock:/var/run/docker.sock:ro"
"traefik-logs:/var/log/traefik"
];
env_file = [
"/home/ghoscht/.docker/infrastructure/traefik.env"
@ -45,18 +51,46 @@
"dmz"
];
};
# cloudflared.service = {
# image = "cloudflare/cloudflared:2024.2.1";
# container_name = "cloudflared";
# env_file = [
# "/home/ghoscht/.docker/infrastructure/cloudflared.env"
# ];
# restart = "always";
# command = "tunnel --no-autoupdate --protocol http2 run";
# networks = [
# "dmz"
# ];
# };
crowdsec.service = {
image = "crowdsecurity/crowdsec:v1.6.2";
container_name = "crowdsec";
environment = {
GID = "1000";
COLLECTIONS = "crowdsecurity/linux crowdsecurity/traefik firix/authentik LePresidente/gitea Dominic-Wagner/vaultwarden";
};
volumes = [
"/storage/dataset/docker/infrastructure/crowdsec_config/acquis.yaml:/etc/crowdsec/acquis.yaml"
"/storage/dataset/docker/infrastructure/crowdsec_config/profiles.yaml:/etc/crowdsec/profiles.yaml"
"/storage/dataset/docker/infrastructure/crowdsec_config/ntfy.yaml:/etc/crowdsec/notifications/ntfy.yaml"
"/storage/dataset/docker/infrastructure/crowdsec_db:/var/lib/crowdsec/data/"
"/storage/dataset/docker/infrastructure/crowdsec_data:/etc/crowdsec/"
"traefik-logs:/var/log/traefik/:ro"
"/var/run/docker.sock:/var/run/docker.sock:ro"
];
depends_on = [
"traefik"
];
networks = [
"dmz"
];
restart = "always";
};
bouncer-traefik.service = {
image = "fbonalair/traefik-crowdsec-bouncer:0.5.0";
environment = {
CROWDSEC_AGENT_HOST = "crowdsec:8080";
};
env_file = [
"/home/ghoscht/.docker/infrastructure/traefik-bouncer.env"
];
depends_on = [
"crowdsec"
];
networks = [
"dmz"
];
restart = "always";
};
scrutiny.service = {
image = "ghcr.io/analogj/scrutiny:v0.8.0-omnibus";
container_name = "scrutiny";
@ -75,11 +109,11 @@
};
volumes = [
"/run/udev:/run/udev:ro"
"scrutiny_data:/opt/scrutiny/config"
"scrutiny_db:/opt/scrutiny/influxdb"
"/storage/dataset/docker/infrastructure/scrutiny_data:/opt/scrutiny/config"
"/storage/dataset/docker/infrastructure/scrutiny_influxdb_data:/opt/scrutiny/influxdb"
];
devices = [
"/dev/nvme0n1"
"/dev/nvme0"
"/dev/sda"
"/dev/sdb"
"/dev/sdc"
@ -91,16 +125,36 @@
"dmz"
];
};
dyndns.service = {
image = "ghcr.io/cromefire/fritzbox-cloudflare-dyndns:1.2.1";
container_name = "dyndns";
diun.service = {
image = "crazymax/diun:4.28";
container_name = "diun";
restart = "always";
ports = ["8888:8080"];
command = "serve";
volumes = [
"/storage/dataset/docker/infrastructure/diun_data:/data"
"/var/run/docker.sock:/var/run/docker.sock"
];
environment = {
CLOUDFLARE_ZONES_IPV4 = "ghoscht.com";
TZ = "Europe/Berlin";
LOG_LEVEL = "info";
#Only when setting workers=1 sorting can be actually observed
DIUN_WATCH_WORKERS = "20";
DIUN_WATCH_SCHEDULE = "0 */6 * * *";
DIUN_WATCH_JITTER = "30s";
DIUN_WATCH_RUNONSTARTUP = "true";
DIUN_PROVIDERS_DOCKER = "true";
DIUN_DEFAULTS_MAXTAGS = 1;
DIUN_DEFAULTS_NOTIFYON = "new";
DIUN_NOTIF_NTFY_ENDPOINT = "http://ntfy";
DIUN_NOTIF_NTFY_TOPIC = "docker-updates";
};
env_file = [
"/home/ghoscht/.docker/infrastructure/dyndns.env"
"/home/ghoscht/.docker/infrastructure/diun.env"
];
networks = [
"dmz"
];
};
};

View file

@ -21,7 +21,11 @@ in {
owner = vars.user;
};
sops.secrets."dyndns/cloudflare_api_key" = {
sops.secrets."crowdsec/traefik_bouncer_api_key" = {
owner = vars.user;
};
sops.secrets."diun/ntfy_access_token" = {
owner = vars.user;
};
@ -44,47 +48,84 @@ in {
'';
};
sops.templates."dyndns.env" = {
path = "/home/${vars.user}/.docker/infrastructure/dyndns.env";
sops.templates."traefik-bouncer.env" = {
path = "/home/${vars.user}/.docker/infrastructure/traefik-bouncer.env";
owner = vars.user;
mode = "0775";
content = ''
CLOUDFLARE_API_TOKEN="${config.sops.placeholder."dyndns/cloudflare_api_key"}"
CROWDSEC_BOUNCER_API_KEY="${config.sops.placeholder."crowdsec/traefik_bouncer_api_key"}"
'';
};
sops.templates."traefik.toml" = {
path = "/home/${vars.user}/.docker/infrastructure/traefik_data/traefik.toml";
sops.templates."traefik.yml" = {
path = "/home/${vars.user}/.docker/infrastructure/traefik_config/traefik.yml";
owner = vars.user;
mode = "0775";
content = ''
[entryPoints]
[entryPoints.web]
address = ":80"
[entryPoints.web-external]
address = ":81"
[entryPoints.websecure]
address = ":443"
[entryPoints.websecure-external]
address = ":444"
[api]
dashboard = true
insecure = true
[certificatesResolvers.letsencrypt.acme]
email = "${config.sops.placeholder."traefik/acme_email"}"
storage = "/letsencrypt/acme.json"
[certificatesResolvers.letsencrypt.acme.dnsChallenge]
provider = "cloudflare"
resolvers = ["1.1.1.1:53", "1.0.0.1:53"]
[serversTransport]
insecureSkipVerify = true
[providers.docker]
watch = true
network = "dmz"
exposedByDefault = false # overriden by traefik.enable=true
api:
dashboard: true
debug: true
insecure: true
entryPoints:
web:
address: ":80"
http:
redirections:
entrypoint:
to: websecure
scheme: https
websecure:
address: ":443"
web-external:
address: ":81"
http:
redirections:
entrypoint:
to: websecure-external
scheme: https
middlewares:
- crowdsec-bouncer@file
websecure-external:
address: ":444"
http:
middlewares:
- crowdsec-bouncer@file
providers:
docker:
watch: true
exposedByDefault: false
network: dmz
file:
filename: /config.yml
certificatesResolvers:
letsencrypt:
acme:
email: ${config.sops.placeholder."traefik/acme_email"}
storage: acme.json
dnsChallenge:
provider: cloudflare
resolvers:
- "1.1.1.1:53"
- "1.0.0.1:53"
log:
level: "INFO"
filePath: "/var/log/traefik/traefik.log"
accessLog:
filePath: "/var/log/traefik/access.log"
'';
};
sops.templates."diun.env" = {
path = "/home/${vars.user}/.docker/infrastructure/diun.env";
owner = vars.user;
mode = "0775";
content = ''
DIUN_NOTIF_NTFY_TOKEN="${config.sops.placeholder."diun/ntfy_access_token"}"
'';
};
services.cron = {
enable = true;
systemCronJobs = [
"0 * * * * root . /etc/profile; docker exec crowdsec cscli hub update && docker exec crowdsec cscli hub upgrade >> /var/log/crowdsec-update.log"
];
};
}

View file

@ -5,10 +5,11 @@
name = "dmz";
external = true;
};
networks.internal = {};
services = {
jellyfin.service = {
image = "linuxserver/jellyfin:10.9.1";
image = "linuxserver/jellyfin:10.9.7";
container_name = "jellyfin";
ports = [
"8096:8096"
@ -21,6 +22,12 @@
"traefik.http.services.jellyfin.loadbalancer.passHostHeader" = "true";
"traefik.http.routers.jellyfin.tls" = "true";
"traefik.http.routers.jellyfin.tls.certresolver" = "letsencrypt";
"diun.enable" = "true";
"diun.watch_repo" = "true";
"diun.sort_tags" = "semver";
"diun.include_tags" = "^\\d+\\.\\d+\\.\\d+$$";
"diun.exclude_tags" = "\\b\\d{4,}\\b";
};
volumes = [
"/storage/dataset/docker/media/jellyfin_data:/config"
@ -43,11 +50,27 @@
container_name = "navidrome";
labels = {
"traefik.enable" = "true";
"traefik.http.routers.navidrome.entrypoints" = "websecure";
"traefik.http.routers.navidrome.rule" = "Host(`navidrome.ghoscht.com`)";
"traefik.docker.network" = "dmz";
"traefik.http.services.navidrome.loadbalancer.server.port" = "4533";
"traefik.http.routers.navidrome.service" = "navidrome";
"traefik.http.routers.navidrome.entrypoints" = "websecure";
"traefik.http.routers.navidrome.rule" = "Host(`music.ghoscht.com`)";
"traefik.http.routers.navidrome.tls" = "true";
"traefik.http.routers.navidrome.tls.certresolver" = "letsencrypt";
"traefik.http.services.navidrome-external.loadbalancer.server.port" = "4533";
"traefik.http.routers.navidrome-external.service" = "navidrome-external";
"traefik.http.routers.navidrome-external.rule" = "Host(`music.ghoscht.com`)";
"traefik.http.routers.navidrome-external.entrypoints" = "websecure-external";
"traefik.http.routers.navidrome-external.tls" = "true";
"traefik.http.routers.navidrome-external.tls.certresolver" = "letsencrypt";
"diun.enable" = "true";
"diun.watch_repo" = "true";
"diun.sort_tags" = "semver";
"diun.include_tags" = "^\\d+\\.\\d+\\.\\d+$$";
"diun.exclude_tags" = "\\b\\d{4,}\\b";
};
volumes = [
"/storage/dataset/docker/media/navidrome_data:/data"
@ -65,7 +88,7 @@
];
};
kavita.service = {
image = "jvmilazz0/kavita:0.7.14";
image = "jvmilazz0/kavita:0.8.1";
container_name = "kavita";
labels = {
"traefik.enable" = "true";
@ -78,6 +101,7 @@
volumes = [
"/storage/dataset/docker/media/kavita_data:/kavita/config"
"/storage/dataset/data/media/manga:/manga"
"/storage/dataset/data/media/comics:/comics"
];
restart = "always";
networks = [
@ -105,9 +129,9 @@
PGID = 1000;
TZ = "Europe/Berlin";
OPENVPN_PROVIDER = "WINDSCRIBE";
OPENVPN_CONFIG = "Vienna-Hofburg-udp";
OPENVPN_CONFIG = "Amsterdam-Tulip-udp";
OVPN_PROTOCOL = "udp";
OPENVPN_OPTS = "--pull-filter ignore ping --ping 10 --ping-restart 120";
OPENVPN_OPTS = "--reneg-sec 0 --verb 4";
LOCAL_NETWORK = "192.168.0.0/16";
TRANSMISSION_DOWNLOAD_DIR = "/data/torrents";
TRANSMISSION_INCOMPLETE_DIR = "/data/torrents/incomplete";
@ -124,10 +148,11 @@
restart = "always";
networks = [
"dmz"
"internal"
];
};
prowlarr.service = {
image = "linuxserver/prowlarr:1.16.2";
image = "linuxserver/prowlarr:1.19.0";
container_name = "prowlarr";
labels = {
"traefik.enable" = "true";
@ -137,6 +162,10 @@
"traefik.docker.network" = "dmz";
"traefik.http.routers.prowlarr.tls" = "true";
"traefik.http.routers.prowlarr.tls.certresolver" = "letsencrypt";
"diun.enable" = "true";
"diun.watch_repo" = "true";
"diun.include_tags" = "^\\d+\\.\\d+\\.\\d+$$";
};
volumes = [
"/storage/dataset/docker/media/prowlarr_data:/config"
@ -153,7 +182,7 @@
restart = "always";
};
sonarr.service = {
image = "linuxserver/sonarr:4.0.4";
image = "linuxserver/sonarr:4.0.6";
container_name = "sonarr";
labels = {
"traefik.enable" = "true";
@ -163,6 +192,10 @@
"traefik.docker.network" = "dmz";
"traefik.http.routers.sonarr.tls" = "true";
"traefik.http.routers.sonarr.tls.certresolver" = "letsencrypt";
"diun.enable" = "true";
"diun.watch_repo" = "true";
"diun.include_tags" = "^\\d+\\.\\d+\\.\\d+$$";
};
volumes = [
"/storage/dataset/docker/media/sonarr_data:/config"
@ -181,7 +214,7 @@
restart = "always";
};
radarr.service = {
image = "linuxserver/radarr:5.4.6";
image = "linuxserver/radarr:5.7.0";
container_name = "radarr";
labels = {
"traefik.enable" = "true";
@ -191,6 +224,10 @@
"traefik.docker.network" = "dmz";
"traefik.http.routers.radarr.tls" = "true";
"traefik.http.routers.radarr.tls.certresolver" = "letsencrypt";
"diun.enable" = "true";
"diun.watch_repo" = "true";
"diun.include_tags" = "^\\d+\\.\\d+\\.\\d+$$";
};
volumes = [
"/storage/dataset/docker/media/radarr_data:/config"
@ -209,7 +246,7 @@
restart = "always";
};
lidarr.service = {
image = "linuxserver/lidarr:2.2.5";
image = "linuxserver/lidarr:2.3.3";
container_name = "lidarr";
labels = {
"traefik.enable" = "true";
@ -220,6 +257,12 @@
"traefik.docker.network" = "dmz";
"traefik.http.routers.lidarr.tls" = "true";
"traefik.http.routers.lidarr.tls.certresolver" = "letsencrypt";
"diun.enable" = "true";
"diun.watch_repo" = "true";
"diun.include_tags" = "^\\d+\\.\\d+\\.\\d+$$";
# "diun.max_tags" = "10";
};
volumes = [
"/storage/dataset/docker/media/lidarr_data:/config"
@ -287,7 +330,7 @@
restart = "always";
};
autobrr.service = {
image = "ghcr.io/autobrr/autobrr:v1.41.0";
image = "ghcr.io/autobrr/autobrr:v1.43.0";
container_name = "autobrr";
labels = {
"traefik.enable" = "true";
@ -297,6 +340,10 @@
"traefik.docker.network" = "dmz";
"traefik.http.routers.autobrr.tls" = "true";
"traefik.http.routers.autobrr.tls.certresolver" = "letsencrypt";
"diun.enable" = "true";
"diun.watch_repo" = "true";
"diun.include_tags" = "^v\\d+\\.\\d+\\.\\d+$$";
};
volumes = [
"/storage/dataset/docker/media/autobrr_data:/config"
@ -394,5 +441,18 @@
};
restart = "always";
};
port-refresh.service = {
image = "ghoscht/windscribe-ephemeral-port:latest";
container_name = "port-refresh";
volumes = [
"/storage/dataset/docker/media/port-refresh_config/config.yml:/config/config.yaml"
];
networks = [
"internal"
];
depends_on = {
vpn = {condition = "service_healthy";};
};
};
};
}

View file

@ -0,0 +1,48 @@
{
project.name = "minio";
networks.dmz = {
name = "dmz";
external = true;
};
services = {
minio.service = {
image = "bitnami/minio:2024.5.10";
container_name = "minio";
labels = {
"traefik.enable" = "true";
# API
"traefik.http.routers.minio.rule" = "Host(`files.ghoscht.com`)";
"traefik.http.routers.minio.service" = "minio";
"traefik.http.routers.minio.entrypoints" = "websecure";
"traefik.http.services.minio.loadbalancer.server.port" = "9000";
"traefik.http.routers.minio.tls" = "true";
"traefik.http.routers.minio.tls.certresolver" = "letsencrypt";
# Dashboard
"traefik.http.routers.minio-dash.rule" = "Host(`minio.ghoscht.com`)";
"traefik.http.routers.minio-dash.service" = "minio-dash";
"traefik.http.routers.minio-dash.entrypoints" = "websecure";
"traefik.http.services.minio-dash.loadbalancer.server.port" = "9001";
"traefik.http.routers.minio-dash.tls" = "true";
"traefik.http.routers.minio-dash.tls.certresolver" = "letsencrypt";
};
volumes = [
"/storage/dataset/docker/minio/minio_data:/data"
];
environment = {
MINIO_DATA_DIR = "/data";
MINIO_BROWSER_REDIRECT_URL = "https://minio.ghoscht.com";
};
env_file = [
"/home/ghoscht/.docker/minio/minio.env"
];
restart = "unless-stopped";
networks = [
"dmz"
];
};
};
}

View file

@ -0,0 +1,25 @@
{config, ...}: let
vars = import ../../../../vars.nix;
in {
virtualisation.arion = {
projects.minio.settings = {
imports = [./arion-compose.nix];
};
};
sops.secrets."minio/root_user" = {
owner = vars.user;
};
sops.secrets."minio/root_password" = {
owner = vars.user;
};
sops.templates."minio.env" = {
path = "/home/${vars.user}/.docker/minio/minio.env";
owner = vars.user;
mode = "0775";
content = ''
MINIO_ROOT_USER="${config.sops.placeholder."minio/root_user"}"
MINIO_ROOT_PASSWORD="${config.sops.placeholder."minio/root_password"}"
'';
};
}

View file

@ -1,34 +0,0 @@
{pkgs, ...}: {
project.name = "nas";
networks.dmz = {
name = "dmz";
external = true;
};
services = {
samba.service = {
image = "dperson/samba";
container_name = "samba";
ports = [
"137:137/udp"
"138:138/udp"
"139:139/tcp"
"445:445/tcp"
];
environment = {
USERID = 1000;
GROUPID = 1000;
TZ = "Europe/Berlin";
};
command = "-s 'public;/mount;yes;no;yes' -p";
volumes = [
"/storage/dataset/nas:/mount"
];
restart = "always";
networks = [
"dmz"
];
};
};
}

View file

@ -1,12 +0,0 @@
{
networking.firewall = {
allowedUDPPorts = [137 138];
allowedTCPPorts = [139 445];
};
virtualisation.arion = {
projects.nas.settings = {
imports = [./arion-compose.nix];
};
};
}

View file

@ -8,14 +8,31 @@
services = {
vaultwarden.service = {
image = "vaultwarden/server:1.30.5";
image = "vaultwarden/server:1.31.0";
container_name = "vaultwarden";
labels = {
"traefik.enable" = "true";
"traefik.docker.network" = "dmz";
"traefik.http.services.vaultwarden.loadbalancer.server.port" = "80";
"traefik.http.routers.vaultwarden.service" = "vaultwarden";
"traefik.http.routers.vaultwarden.entrypoints" = "websecure";
"traefik.http.routers.vaultwarden.rule" = "Host(`vaultwarden.ghoscht.com`)";
"traefik.http.routers.vaultwarden.rule" = "Host(`vault.ghoscht.com`)";
"traefik.http.routers.vaultwarden.tls" = "true";
"traefik.http.routers.vaultwarden.tls.certresolver" = "letsencrypt";
"traefik.http.services.vaultwarden-external.loadbalancer.server.port" = "80";
"traefik.http.routers.vaultwarden-external.service" = "vaultwarden-external";
"traefik.http.routers.vaultwarden-external.rule" = "Host(`vault.ghoscht.com`)";
"traefik.http.routers.vaultwarden-external.entrypoints" = "websecure-external";
"traefik.http.routers.vaultwarden-external.tls" = "true";
"traefik.http.routers.vaultwarden-external.tls.certresolver" = "letsencrypt";
"diun.enable" = "true";
"diun.watch_repo" = "true";
"diun.sort_tags" = "semver";
"diun.include_tags" = "^\\d+\\.\\d+\\.\\d+$$";
"diun.exclude_tags" = "\\b\\d{4,}\\b";
};
volumes = [
"/storage/dataset/docker/passwords/vaultwarden_data/:/data"

View file

@ -32,6 +32,7 @@
};
volumes = [
"/home/ghoscht/.docker/push/ntfy_data/server.yml:/etc/ntfy/server.yml"
"/storage/dataset/docker/push/ntfy_data:/etc/ntfy/data"
];
environment = {
TZ = "Europe/Berlin";

View file

@ -0,0 +1,120 @@
{
project.name = "stats";
networks.dmz = {
name = "dmz";
external = true;
};
networks.internal = {};
services = {
grafana.service = {
image = "grafana/grafana:10.4.4";
user = "1000";
container_name = "grafana";
labels = {
"traefik.enable" = "true";
"traefik.http.services.grafana.loadbalancer.server.port" = "3000";
"traefik.http.routers.grafana.service" = "grafana";
"traefik.http.routers.grafana.rule" = "Host(`grafana.ghoscht.com`)";
"traefik.http.routers.grafana.entrypoints" = "websecure";
"traefik.http.routers.grafana.tls" = "true";
"traefik.http.routers.grafana.tls.certresolver" = "letsencrypt";
};
environment = {
GF_SERVER_ROOT_URL = "https://grafana.ghoscht.com";
GF_AUTH_GENERIC_OAUTH_NAME = "authentik";
GF_AUTH_GENERIC_OAUTH_ENABLED = "true";
GF_AUTH_GENERIC_OAUTH_ALLOW_SIGN_UP = "true";
GF_AUTH_GENERIC_OAUTH_SCOPES = "openid profile email";
GF_AUTH_GENERIC_OAUTH_AUTH_URL = "https://auth.ghoscht.com/application/o/authorize/";
GF_AUTH_GENERIC_OAUTH_TOKEN_URL = "https://auth.ghoscht.com/application/o/token/";
GF_AUTH_GENERIC_OAUTH_API_URL = "https://auth.ghoscht.com/application/o/userinfo/";
# GF_AUTH_OAUTH_AUTO_LOGIN = "true";
};
env_file = [
"/home/ghoscht/.docker/stats/grafana.env"
];
volumes = [
"/storage/dataset/docker/stats/grafana_data:/var/lib/grafana"
];
networks = [
"dmz"
"internal"
];
};
loki.service = {
image = "grafana/loki:3.0.0";
volumes = [
"/storage/dataset/docker/stats/loki_data:/etc/loki"
];
ports = [
"3100:3100"
];
command = "-config.file=/etc/loki/loki-config.yml";
networks = [
"internal"
];
};
promtail.service = {
image = "grafana/promtail:3.0.0";
volumes = [
"/var/log:/var/log"
"/storage/dataset/docker/stats/promtail_data/promtail-config.yml:/etc/promtail/promtail-config.yml"
];
command = "-config.file=/etc/promtail/promtail-config.yml";
networks = [
"internal"
];
};
prometheus.service = {
image = "prom/prometheus:v2.53.0";
volumes = [
"/storage/dataset/docker/stats/prometheus_config/prometheus.yml:/etc/prometheus/prometheus.yml"
"/storage/dataset/docker/stats/prometheus_data:/prometheus"
];
command = [
"--config.file=/etc/prometheus/prometheus.yml"
"--web.console.libraries=/etc/prometheus/console_libraries"
"--web.console.templates=/etc/prometheus/consoles"
];
networks = [
"internal"
];
};
node-exporter.service = {
image = "prom/node-exporter:v1.8.1";
volumes = [
"/proc:/host/proc:ro"
"/sys:/host/sys:ro"
"/:/rootfs:ro"
];
command = [
"--path.procfs=/host/proc"
"--path.rootfs=/rootfs"
"--path.sysfs=/host/sys"
"--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)"
];
networks = [
"internal"
];
};
# cadvisor.service = {
# image = "gcr.io/cadvisor/cadvisor:v0.49.1";
# volumes = [
# "/:/rootfs:ro"
# "/var/run:/var/run:ro"
# "/sys:/sys:ro"
# "/var/lib/docker:/var/lib/docker:ro"
# "/dev/disk:/dev/disk:ro"
# ];
# devices = ["/dev/kmsg"];
# networks = [
# "internal"
# ];
# };
};
}

View file

@ -0,0 +1,6 @@
# Instead of pinning Nixpkgs, we can opt to use the one in NIX_PATH
import <nixpkgs> {
# We specify the architecture explicitly. Use a Linux remote builder when
# calling arion from other platforms.
system = "x86_64-linux";
}

View file

@ -0,0 +1,55 @@
{config, ...}: let
vars = import ../../../../vars.nix;
in {
virtualisation.arion = {
projects.stats.settings = {
imports = [./arion-compose.nix];
};
};
sops.secrets."stats/oidc_client_id" = {
owner = vars.user;
};
sops.secrets."stats/oidc_client_secret" = {
owner = vars.user;
};
sops.templates."grafana.env" = {
path = "/home/${vars.user}/.docker/stats/grafana.env";
owner = vars.user;
mode = "0775";
content = ''
GF_AUTH_GENERIC_OAUTH_CLIENT_ID="${config.sops.placeholder."stats/oidc_client_id"}"
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET="${config.sops.placeholder."stats/oidc_client_secret"}"
'';
};
systemd.services.add-loki-logging-driver = {
description = "Add grafana loki docker driver";
after = ["network.target"];
wantedBy = ["multi-user.target"];
serviceConfig.Type = "oneshot";
script = let
dockercli = "${config.virtualisation.docker.package}/bin/docker";
in ''
# Put a true at the end to prevent getting non-zero return code, which will
# crash the whole service.
check=$(${dockercli} plugin ls | grep "loki" || true)
if [ -z "$check" ]; then
${dockercli} plugin install grafana/loki-docker-driver:latest --alias loki --grant-all-permissions
else
echo "loki docker driver already exists in docker"
fi
'';
};
virtualisation.docker.daemon.settings = {
debug = true;
log-driver = "loki";
log-opts = {
loki-url = "http://localhost:3100/loki/api/v1/push";
# loki-url = "http://host.docker.internal:3100/loki/api/v1/push";
};
};
}

View file

@ -0,0 +1,82 @@
{
project.name = "wiki";
networks.dmz = {
name = "dmz";
external = true;
};
networks.internal = {};
services = {
outline.service = {
image = "docker.getoutline.com/outlinewiki/outline:0.77.2";
container_name = "outline";
labels = {
"traefik.enable" = "true";
"traefik.http.services.outline.loadbalancer.server.port" = "3000";
"traefik.http.routers.outline.service" = "outline";
"traefik.http.routers.outline.rule" = "Host(`wiki.ghoscht.com`)";
"traefik.http.routers.outline.entrypoints" = "websecure";
"traefik.http.routers.outline.tls" = "true";
"traefik.http.routers.outline.tls.certresolver" = "letsencrypt";
};
environment = {
NODE_ENV = "production";
PGSSLMODE = "disable";
REDIS_URL = "redis://redis:6379";
URL = "https://wiki.ghoscht.com";
PORT = 3000;
OIDC_AUTH_URI = "https://auth.ghoscht.com/application/o/authorize/";
OIDC_TOKEN_URI = "https://auth.ghoscht.com/application/o/token/";
OIDC_USERINFO_URI = "https://auth.ghoscht.com/application/o/userinfo/";
AWS_REGION = "local";
AWS_S3_UPLOAD_BUCKET_URL = "https://files.ghoscht.com";
AWS_S3_UPLOAD_BUCKET_NAME = "outline";
AWS_S3_UPLOAD_MAX_SIZE = 26214400;
AWS_S3_FORCE_PATH_STYLE = "true";
AWS_S3_ACL = "private";
};
env_file = [
"/home/ghoscht/.docker/wiki/outline.env"
];
restart = "always";
depends_on = {
redis = {condition = "service_healthy";};
postgres = {condition = "service_healthy";};
};
networks = [
"dmz"
"internal"
];
};
redis.service = {
image = "redis:7.2.4";
command = "--save 60 1 --loglevel warning";
restart = "always";
volumes = [
"/storage/dataset/docker/wiki/redis_data:/data"
];
networks = [
"internal"
];
};
postgres.service = {
image = "postgres:12.18";
restart = "always";
volumes = [
"/storage/dataset/docker/wiki/postgres_data:/var/lib/postgresql/data"
];
networks = [
"internal"
];
env_file = [
"/home/ghoscht/.docker/wiki/postgres.env"
];
};
};
}

View file

@ -0,0 +1,6 @@
# Instead of pinning Nixpkgs, we can opt to use the one in NIX_PATH
import <nixpkgs> {
# We specify the architecture explicitly. Use a Linux remote builder when
# calling arion from other platforms.
system = "x86_64-linux";
}

View file

@ -0,0 +1,67 @@
{config, ...}: let
vars = import ../../../../vars.nix;
in {
virtualisation.arion = {
projects.wiki.settings = {
imports = [./arion-compose.nix];
};
};
sops.secrets."wiki/aws_access_key_id" = {
owner = vars.user;
};
sops.secrets."wiki/aws_secret_access_key" = {
owner = vars.user;
};
sops.secrets."wiki/oidc_client_id" = {
owner = vars.user;
};
sops.secrets."wiki/oidc_client_secret" = {
owner = vars.user;
};
sops.secrets."wiki/secret_key" = {
owner = vars.user;
};
sops.secrets."wiki/utils_secret" = {
owner = vars.user;
};
sops.secrets."wiki/db_user" = {
owner = vars.user;
};
sops.secrets."wiki/db_pass" = {
owner = vars.user;
};
sops.secrets."wiki/db_name" = {
owner = vars.user;
};
sops.templates."wiki-postgres.env" = {
path = "/home/${vars.user}/.docker/wiki/postgres.env";
owner = vars.user;
mode = "0775";
content = ''
POSTGRES_PASSWORD="${config.sops.placeholder."wiki/db_pass"}"
POSTGRES_USER="${config.sops.placeholder."wiki/db_user"}"
POSTGRES_DB="${config.sops.placeholder."wiki/db_name"}"
'';
};
sops.templates."wiki-outline.env" = {
path = "/home/${vars.user}/.docker/wiki/outline.env";
owner = vars.user;
mode = "0775";
content = ''
SECRET_KEY="${config.sops.placeholder."wiki/secret_key"}"
UTILS_SECRET="${config.sops.placeholder."wiki/utils_secret"}"
OIDC_CLIENT_ID="${config.sops.placeholder."wiki/oidc_client_id"}"
OIDC_CLIENT_SECRET="${config.sops.placeholder."wiki/oidc_client_secret"}"
AWS_ACCESS_KEY_ID="${config.sops.placeholder."wiki/aws_access_key_id"}"
AWS_SECRET_ACCESS_KEY="${config.sops.placeholder."wiki/aws_secret_access_key"}"
DATABASE_URL="postgres://${config.sops.placeholder."wiki/db_user"}:${config.sops.placeholder."wiki/db_pass"}@postgres:5432/${config.sops.placeholder."wiki/db_name"}"
DATABASE_URL_TEST="postgres://${config.sops.placeholder."wiki/db_user"}:${config.sops.placeholder."wiki/db_pass"}@postgres:5432/${config.sops.placeholder."wiki/db_name"}"
'';
};
}

View file

@ -21,16 +21,17 @@ in {
../common/optional/systemd-boot.nix
../common/optional/gnome-keyring.nix
../common/optional/docker.nix
../common/optional/vsftpd.nix
./sops.nix
./restic.nix
./arion
./hydra.nix
./samba.nix
];
# Enable ZFS
boot.supportedFilesystems = ["zfs"];
networking.hostId = "f014fc43";
services.zfs.autoScrub.enable = true;
systemd.enableEmergencyMode = false;
networking.firewall.enable = true;

View file

@ -37,7 +37,7 @@ in {
services.cron = {
enable = true;
systemCronJobs = [
"*/5 * * * * root . /etc/profile; autorestic -c /home/ghoscht/.autorestic.yml --ci cron"
"*/5 * * * * root . /etc/profile; autorestic -c /home/ghoscht/.autorestic.yml --ci cron > /var/log/autorestic-bin.log"
];
};
@ -52,6 +52,9 @@ in {
sops.secrets."autorestic/eustachius_key" = {
owner = vars.user;
};
sops.secrets."autorestic/ntfy_access_token" = {
owner = vars.user;
};
sops.templates.".autorestic.yml" = {
path = "/home/${vars.user}/.autorestic.yml";
@ -63,6 +66,15 @@ in {
forget:
keep-weekly: 7
keep-monthly: 12
extras:
default_hooks: &default_hooks
success:
- echo "Backup of $AUTORESTIC_LOCATION successful! Added $AUTORESTIC_FILES_ADDED_0 files and changed $AUTORESTIC_FILES_CHANGED_0 files with a total size of $AUTORESTIC_ADDED_SIZE_0. Processed $AUTORESTIC_PROCESSED_FILES_0 files with total size $AUTORESTIC_PROCESSED_SIZE_0 in $AUTORESTIC_PROCESSED_DURATION_0. Snapshot $AUTORESTIC_SNAPSHOT_ID_0" >> /var/log/autorestic-backup.log
failure:
- echo "Backup of $AUTORESTIC_LOCATION failed" >> /var/log/autorestic.log
- 'curl -H "Authorization: Bearer ${config.sops.placeholder."autorestic/ntfy_access_token"}" -H "X-Tags: warning" -H "X-Title: Backup Failure" -d "Backup of location $AUTORESTIC_LOCATION failed" https://push.ghoscht.com/autorestic'
locations:
dashboard:
from: /storage/dataset/docker/dashboard
@ -71,6 +83,7 @@ in {
- eustachius
cron: '0 4 * * 0' # Every Sunday at 4:00
hooks:
<<: *default_hooks
before:
- arion -f ${arionPath}/dashboard/arion-compose.nix -p ${arionPath}/dashboard/arion-pkgs.nix stop
after:
@ -79,9 +92,11 @@ in {
from: /storage/dataset/docker/dns
to:
- zfs
- ssd
- eustachius
cron: '0 4 * * 0' # Every Sunday at 4:00
hooks:
<<: *default_hooks
before:
- arion -f ${arionPath}/dns/arion-compose.nix -p ${arionPath}/dns/arion-pkgs.nix stop
after:
@ -93,6 +108,7 @@ in {
- eustachius
cron: '0 4 * * 0' # Every Sunday at 4:00
hooks:
<<: *default_hooks
before:
- arion -f ${arionPath}/feed/arion-compose.nix -p ${arionPath}/feed/arion-pkgs.nix stop
after:
@ -105,6 +121,7 @@ in {
- eustachius
cron: '0 4 * * *' # Every Day at 4:00
hooks:
<<: *default_hooks
before:
- arion -f ${arionPath}/git/arion-compose.nix -p ${arionPath}/git/arion-pkgs.nix stop
after:
@ -113,9 +130,11 @@ in {
from: /storage/dataset/docker/media
to:
- zfs
- ssd
- eustachius
cron: '0 4 * * *' # Every Day at 4:00
hooks:
<<: *default_hooks
before:
- arion -f ${arionPath}/media/arion-compose.nix -p ${arionPath}/media/arion-pkgs.nix stop
after:
@ -124,9 +143,11 @@ in {
from: /storage/dataset/docker/nextcloud
to:
- zfs
- ssd
- eustachius
cron: '0 4 * * *' # Every Day at 4:00
hooks:
<<: *default_hooks
before:
- arion -f ${arionPath}/nextcloud/arion-compose.nix -p ${arionPath}/nextcloud/arion-pkgs.nix stop
after:
@ -138,6 +159,7 @@ in {
- eustachius
cron: '0 4 * * 0' # Every Sunday at 4:00
hooks:
<<: *default_hooks
before:
- arion -f ${arionPath}/smarthome/arion-compose.nix -p ${arionPath}/smarthome/arion-pkgs.nix stop
after:
@ -150,6 +172,7 @@ in {
- eustachius
cron: '0 4 * * *' # Every Day at 4:00
hooks:
<<: *default_hooks
before:
- arion -f ${arionPath}/passwords/arion-compose.nix -p ${arionPath}/passwords/arion-pkgs.nix stop
after:
@ -161,6 +184,7 @@ in {
- eustachius
cron: '0 4 * * 0' # Every Sunday at 4:00
hooks:
<<: *default_hooks
before:
- arion -f ${arionPath}/matrix/arion-compose.nix -p ${arionPath}/matrix/arion-pkgs.nix stop
after:
@ -171,6 +195,7 @@ in {
- eustachius
cron: '0 4 * * 0' # Every Sunday at 4:00
hooks:
<<: *default_hooks
before:
- arion -f ${arionPath}/media/arion-compose.nix -p ${arionPath}/media/arion-pkgs.nix stop
after:
@ -181,6 +206,7 @@ in {
- zfs
cron: '55 3 * * *' # Every Day at 3:55
hooks:
<<: *default_hooks
before:
- arion -f ${arionPath}/headscale/arion-compose.nix -p ${arionPath}/headscale/arion-pkgs.nix stop
after:
@ -189,9 +215,11 @@ in {
from: /storage/dataset/docker/auth
to:
- zfs
- ssd
- eustachius
cron: '55 3 * * *' # Every Day at 3:55
hooks:
<<: *default_hooks
before:
- arion -f ${arionPath}/auth/arion-compose.nix -p ${arionPath}/auth/arion-pkgs.nix stop
after:
@ -207,7 +235,7 @@ in {
key: '${config.sops.placeholder."autorestic/ssd_key"}'
eustachius:
type: rest
path: http://100.64.0.3:8000/Backups
path: http://100.64.0.3:8000/franz
key: '${config.sops.placeholder."autorestic/eustachius_key"}'
'';
};

57
hosts/franz/samba.nix Normal file
View file

@ -0,0 +1,57 @@
{
services.samba = {
enable = true;
securityType = "user";
openFirewall = true;
extraConfig = ''
workgroup = WORKGROUP
server string = franz
netbios name = franz
security = user
#use sendfile = yes
#max protocol = smb2
# note: localhost is the ipv6 localhost ::1
hosts allow = 192.168.178. 127.0.0.1 localhost
hosts deny = 0.0.0.0/0
guest account = nobody
map to guest = bad user
# debugging
# log file = /var/log/samba/log.%m
# max log size = 1000
# logging = file
'';
# Run sudo smbpasswd -a <username> to set the smb password for an EXISTING linux user
shares = {
software = {
path = "/storage/dataset/data/torrents/misc";
browseable = "yes";
"read only" = "yes";
"guest ok" = "no";
"create mask" = "0644";
"directory mask" = "0755";
"force user" = "ghoscht";
"force group" = "users";
};
max = {
path = "/storage/dataset/nas/max";
browseable = "yes";
"read only" = "no";
"guest ok" = "no";
"valid users" = "max";
"create mask" = "0644";
"directory mask" = "0755";
"force user" = "ghoscht";
"force group" = "users";
};
};
};
services.samba-wsdd = {
enable = true;
openFirewall = true;
};
networking.firewall.allowPing = true;
}

View file

@ -1,114 +0,0 @@
version: '3'
services:
traefik:
image: traefik
container_name: traefik
restart: always
ports:
- "80:80"
- "443:443"
- "6666:8080"
volumes:
- ./traefik_data:/etc/traefik
- /var/run/docker.sock:/var/run/docker.sock:ro
networks:
traefik_net:
labels:
- traefik.enable=true
- traefik.http.routers.dashboard.rule=Host(`traefik.ghoscht.com`)
- traefik.http.routers.dashboard.entrypoints=websecure
- traefik.http.services.dashboard.loadbalancer.server.port=8080
- traefik.http.routers.dashboard.tls=true
- traefik.http.routers.dashboard.tls.certresolver=lencrypt
env_file:
- traefik.env
dns:
- 1.1.1.1
homarr:
container_name: homarr
image: ghcr.io/ajnart/homarr:latest
restart: always
volumes:
- ./homarr_data:/app/data/configs
- ./homarr_icons:/app/public/imgs
networks:
traefik_net:
labels:
- traefik.enable=true
- traefik.http.routers.homarr.entrypoints=websecure
- traefik.http.routers.homarr.rule=Host(`dashboard.ghoscht.com`)
- traefik.http.routers.homarr.tls=true
- traefik.http.routers.homarr.tls.certresolver=lencrypt
dns:
- 1.1.1.1
scrutiny:
container_name: scrutiny
image: ghcr.io/analogj/scrutiny:master-omnibus
restart: always
cap_add:
- SYS_RAWIO
volumes:
- /run/udev:/run/udev:ro
- ./scrutiny_data:/opt/scrutiny/config
- ./scrutiny_db:/opt/scrutiny/influxdb
labels:
- traefik.enable=true
- traefik.http.routers.scrutiny.entrypoints=websecure
- traefik.http.routers.scrutiny.rule=Host(`scrutiny.ghoscht.com`)
- traefik.http.services.scrutiny.loadbalancer.server.port=8080
- traefik.http.routers.scrutiny.tls=true
- traefik.http.routers.scrutiny.tls.certresolver=lencrypt
networks:
traefik_net:
devices:
- "/dev/sda"
- "/dev/sdb"
ntfy:
image: binwiederhier/ntfy
container_name: ntfy
command:
- serve
environment:
- TZ=UTC # optional: set desired timezone
user: 1000:1000 # optional: replace with your own user/group or uid/gid
volumes:
- ./ntfy_data/server.yml:/etc/ntfy/server.yml
labels:
- traefik.enable=true
- traefik.http.routers.ntfy.entrypoints=websecure
- traefik.http.routers.ntfy.rule=Host(`ntfy.ghoscht.com`,`ntfy.local.ghoscht.com`)
- traefik.http.routers.ntfy.tls=true
- traefik.http.routers.ntfy.tls.certresolver=lencrypt
networks:
traefik_net:
homeassistant:
container_name: homeassistant
image: "ghcr.io/home-assistant/home-assistant:stable"
volumes:
- /mnt/hdd/docker/home-assistant_data:/config
- /etc/localtime:/etc/localtime:ro
- /run/dbus:/run/dbus:ro
restart: unless-stopped
privileged: true
labels:
- traefik.enable=true
- traefik.http.routers.homeassistant.entrypoints=websecure
- traefik.http.routers.homeassistant.rule=Host(`home.ghoscht.com`,`home.local.ghoscht.com`)
- traefik.http.routers.homeassistant.tls=true
- traefik.http.routers.homeassistant.tls.certresolver=lencrypt
- traefik.http.services.homeassistant.loadbalancer.server.port=8123
networks:
traefik_net:
cloudflared:
container_name: cloudflared
image: cloudflare/cloudflared:latest
restart: always
command: tunnel --no-autoupdate --protocol http2 run
env_file:
- cloudflared.env
networks:
traefik_net:
networks:
traefik_net:
name: traefik-net
external: true

View file

@ -0,0 +1,6 @@
http:
middlewares:
crowdsec-bouncer:
forwardauth:
address: http://bouncer-traefik:8080/api/v1/forwardAuth
trustForwardHeader: true

View file

@ -1,6 +0,0 @@
http:
middlewares:
httpsredirect:
redirectScheme:
scheme: https
permanent: true

View file

@ -1,9 +0,0 @@
http:
routers:
redirecttohttps:
entryPoints:
- "web"
middlewares:
- "httpsredirect"
rule: "HostRegexp(`{host:.+}`)"
service: "noop@internal"

View file

@ -1,363 +1,8 @@
# ntfy server config file
#
# Please refer to the documentation at https://ntfy.sh/docs/config/ for details.
# All options also support underscores (_) instead of dashes (-) to comply with the YAML spec.
# Public facing base URL of the service (e.g. https://ntfy.sh or https://ntfy.example.com)
#
# This setting is required for any of the following features:
# - attachments (to return a download URL)
# - e-mail sending (for the topic URL in the email footer)
# - iOS push notifications for self-hosted servers (to calculate the Firebase poll_request topic)
# - Matrix Push Gateway (to validate that the pushkey is correct)
#
#Basics
base-url: https://ntfy.ghoscht.com
# Listen address for the HTTP & HTTPS web server. If "listen-https" is set, you must also
# set "key-file" and "cert-file". Format: [<ip>]:<port>, e.g. "1.2.3.4:8080".
#
# To listen on all interfaces, you may omit the IP address, e.g. ":443".
# To disable HTTP, set "listen-http" to "-".
#
# listen-http: ":80"
# listen-https:
# Listen on a Unix socket, e.g. /var/lib/ntfy/ntfy.sock
# This can be useful to avoid port issues on local systems, and to simplify permissions.
#
# listen-unix: <socket-path>
# listen-unix-mode: <linux permissions, e.g. 0700>
# Path to the private key & cert file for the HTTPS web server. Not used if "listen-https" is not set.
#
# key-file: <filename>
# cert-file: <filename>
# If set, also publish messages to a Firebase Cloud Messaging (FCM) topic for your app.
# This is optional and only required to save battery when using the Android app.
#
# firebase-key-file: <filename>
# If "cache-file" is set, messages are cached in a local SQLite database instead of only in-memory.
# This allows for service restarts without losing messages in support of the since= parameter.
#
# The "cache-duration" parameter defines the duration for which messages will be buffered
# before they are deleted. This is required to support the "since=..." and "poll=1" parameter.
# To disable the cache entirely (on-disk/in-memory), set "cache-duration" to 0.
# The cache file is created automatically, provided that the correct permissions are set.
#
# The "cache-startup-queries" parameter allows you to run commands when the database is initialized,
# e.g. to enable WAL mode (see https://phiresky.github.io/blog/2020/sqlite-performance-tuning/)).
# Example:
# cache-startup-queries: |
# pragma journal_mode = WAL;
# pragma synchronous = normal;
# pragma temp_store = memory;
# pragma busy_timeout = 15000;
# vacuum;
#
# The "cache-batch-size" and "cache-batch-timeout" parameter allow enabling async batch writing
# of messages. If set, messages will be queued and written to the database in batches of the given
# size, or after the given timeout. This is only required for high volume servers.
#
# Debian/RPM package users:
# Use /var/cache/ntfy/cache.db as cache file to avoid permission issues. The package
# creates this folder for you.
#
# Check your permissions:
# If you are running ntfy with systemd, make sure this cache file is owned by the
# ntfy user and group by running: chown ntfy.ntfy <filename>.
#
# cache-file: <filename>
# cache-duration: "12h"
# cache-startup-queries:
# cache-batch-size: 0
# cache-batch-timeout: "0ms"
# If set, access to the ntfy server and API can be controlled on a granular level using
# the 'ntfy user' and 'ntfy access' commands. See the --help pages for details, or check the docs.
#
# - auth-file is the SQLite user/access database; it is created automatically if it doesn't already exist
# - auth-default-access defines the default/fallback access if no access control entry is found; it can be
# set to "read-write" (default), "read-only", "write-only" or "deny-all".
# - auth-startup-queries allows you to run commands when the database is initialized, e.g. to enable
# WAL mode. This is similar to cache-startup-queries. See above for details.
#
# Debian/RPM package users:
# Use /var/lib/ntfy/user.db as user database to avoid permission issues. The package
# creates this folder for you.
#
# Check your permissions:
# If you are running ntfy with systemd, make sure this user database file is owned by the
# ntfy user and group by running: chown ntfy.ntfy <filename>.
#
# auth-file: <filename>
# auth-default-access: "read-write"
# auth-startup-queries:
# If set, the X-Forwarded-For header is used to determine the visitor IP address
# instead of the remote address of the connection.
#
# WARNING: If you are behind a proxy, you must set this, otherwise all visitors are rate limited
# as if they are one.
#
# behind-proxy: false
# If enabled, clients can attach files to notifications as attachments. Minimum settings to enable attachments
# are "attachment-cache-dir" and "base-url".
#
# - attachment-cache-dir is the cache directory for attached files
# - attachment-total-size-limit is the limit of the on-disk attachment cache directory (total size)
# - attachment-file-size-limit is the per-file attachment size limit (e.g. 300k, 2M, 100M)
# - attachment-expiry-duration is the duration after which uploaded attachments will be deleted (e.g. 3h, 20h)
#
# attachment-cache-dir:
# attachment-total-size-limit: "5G"
# attachment-file-size-limit: "15M"
# attachment-expiry-duration: "3h"
# If enabled, allow outgoing e-mail notifications via the 'X-Email' header. If this header is set,
# messages will additionally be sent out as e-mail using an external SMTP server.
#
# As of today, only SMTP servers with plain text auth (or no auth at all), and STARTLS are supported.
# Please also refer to the rate limiting settings below (visitor-email-limit-burst & visitor-email-limit-burst).
#
# - smtp-sender-addr is the hostname:port of the SMTP server
# - smtp-sender-from is the e-mail address of the sender
# - smtp-sender-user/smtp-sender-pass are the username and password of the SMTP user (leave blank for no auth)
#
# smtp-sender-addr:
# smtp-sender-from:
# smtp-sender-user:
# smtp-sender-pass:
# If enabled, ntfy will launch a lightweight SMTP server for incoming messages. Once configured, users can send
# emails to a topic e-mail address to publish messages to a topic.
#
# - smtp-server-listen defines the IP address and port the SMTP server will listen on, e.g. :25 or 1.2.3.4:25
# - smtp-server-domain is the e-mail domain, e.g. ntfy.sh
# - smtp-server-addr-prefix is an optional prefix for the e-mail addresses to prevent spam. If set to "ntfy-",
# for instance, only e-mails to ntfy-$topic@ntfy.sh will be accepted. If this is not set, all emails to
# $topic@ntfy.sh will be accepted (which may obviously be a spam problem).
#
# smtp-server-listen:
# smtp-server-domain:
# smtp-server-addr-prefix:
# Web Push support (background notifications for browsers)
#
# If enabled, allows ntfy to receive push notifications, even when the ntfy web app is closed. When enabled, users
# can enable background notifications in the web app. Once enabled, ntfy will forward published messages to the push
# endpoint, which will then forward it to the browser.
#
# You must configure web-push-public/private key, web-push-file, and web-push-email-address below to enable Web Push.
# Run "ntfy webpush keys" to generate the keys.
#
# - web-push-public-key is the generated VAPID public key, e.g. AA1234BBCCddvveekaabcdfqwertyuiopasdfghjklzxcvbnm1234567890
# - web-push-private-key is the generated VAPID private key, e.g. AA2BB1234567890abcdefzxcvbnm1234567890
# - web-push-file is a database file to keep track of browser subscription endpoints, e.g. `/var/cache/ntfy/webpush.db`
# - web-push-email-address is the admin email address send to the push provider, e.g. `sysadmin@example.com`
# - web-push-startup-queries is an optional list of queries to run on startup`
#
# web-push-public-key:
# web-push-private-key:
# web-push-file:
# web-push-email-address:
# web-push-startup-queries:
# If enabled, ntfy can perform voice calls via Twilio via the "X-Call" header.
#
# - twilio-account is the Twilio account SID, e.g. AC12345beefbeef67890beefbeef122586
# - twilio-auth-token is the Twilio auth token, e.g. affebeef258625862586258625862586
# - twilio-phone-number is the outgoing phone number you purchased, e.g. +18775132586
# - twilio-verify-service is the Twilio Verify service SID, e.g. VA12345beefbeef67890beefbeef122586
#
# twilio-account:
# twilio-auth-token:
# twilio-phone-number:
# twilio-verify-service:
# Interval in which keepalive messages are sent to the client. This is to prevent
# intermediaries closing the connection for inactivity.
#
# Note that the Android app has a hardcoded timeout at 77s, so it should be less than that.
#
# keepalive-interval: "45s"
# Interval in which the manager prunes old messages, deletes topics
# and prints the stats.
#
# manager-interval: "1m"
# Defines topic names that are not allowed, because they are otherwise used. There are a few default topics
# that cannot be used (e.g. app, account, settings, ...). To extend the default list, define them here.
#
# Example:
# disallowed-topics:
# - about
# - pricing
# - contact
#
# disallowed-topics:
# Defines the root path of the web app, or disables the web app entirely.
#
# Can be any simple path, e.g. "/", "/app", or "/ntfy". For backwards-compatibility reasons,
# the values "app" (maps to "/"), "home" (maps to "/app"), or "disable" (maps to "") to disable
# the web app entirely.
#
# web-root: /
# Various feature flags used to control the web app, and API access, mainly around user and
# account management.
#
# - enable-signup allows users to sign up via the web app, or API
# - enable-login allows users to log in via the web app, or API
# - enable-reservations allows users to reserve topics (if their tier allows it)
#
# enable-signup: false
# enable-login: false
# enable-reservations: false
# Server URL of a Firebase/APNS-connected ntfy server (likely "https://ntfy.sh").
#
# iOS users:
# If you use the iOS ntfy app, you MUST configure this to receive timely notifications. You'll like want this:
# upstream-base-url: "https://ntfy.sh"
#
# If set, all incoming messages will publish a "poll_request" message to the configured upstream server, containing
# the message ID of the original message, instructing the iOS app to poll this server for the actual message contents.
# This is to prevent the upstream server and Firebase/APNS from being able to read the message.
#
# - upstream-base-url is the base URL of the upstream server. Should be "https://ntfy.sh".
# - upstream-access-token is the token used to authenticate with the upstream server. This is only required
# if you exceed the upstream rate limits, or the uptream server requires authentication.
#
# upstream-base-url:
# upstream-access-token:
# Rate limiting: Total number of topics before the server rejects new topics.
#
# global-topic-limit: 15000
# Rate limiting: Number of subscriptions per visitor (IP address)
#
# visitor-subscription-limit: 30
# Rate limiting: Allowed GET/PUT/POST requests per second, per visitor:
# - visitor-request-limit-burst is the initial bucket of requests each visitor has
# - visitor-request-limit-replenish is the rate at which the bucket is refilled
# - visitor-request-limit-exempt-hosts is a comma-separated list of hostnames, IPs or CIDRs to be
# exempt from request rate limiting. Hostnames are resolved at the time the server is started.
# Example: "1.2.3.4,ntfy.example.com,8.7.6.0/24"
#
# visitor-request-limit-burst: 60
# visitor-request-limit-replenish: "5s"
# visitor-request-limit-exempt-hosts: ""
# Rate limiting: Hard daily limit of messages per visitor and day. The limit is reset
# every day at midnight UTC. If the limit is not set (or set to zero), the request
# limit (see above) governs the upper limit.
#
# visitor-message-daily-limit: 0
# Rate limiting: Allowed emails per visitor:
# - visitor-email-limit-burst is the initial bucket of emails each visitor has
# - visitor-email-limit-replenish is the rate at which the bucket is refilled
#
# visitor-email-limit-burst: 16
# visitor-email-limit-replenish: "1h"
# Rate limiting: Attachment size and bandwidth limits per visitor:
# - visitor-attachment-total-size-limit is the total storage limit used for attachments per visitor
# - visitor-attachment-daily-bandwidth-limit is the total daily attachment download/upload traffic limit per visitor
#
# visitor-attachment-total-size-limit: "100M"
# visitor-attachment-daily-bandwidth-limit: "500M"
# Rate limiting: Enable subscriber-based rate limiting (mostly used for UnifiedPush)
#
# If enabled, subscribers may opt to have published messages counted against their own rate limits, as opposed
# to the publisher's rate limits. This is especially useful to increase the amount of messages that high-volume
# publishers (e.g. Matrix/Mastodon servers) are allowed to send.
#
# Once enabled, a client may send a "Rate-Topics: <topic1>,<topic2>,..." header when subscribing to topics via
# HTTP stream, or websockets, thereby registering itself as the "rate visitor", i.e. the visitor whose rate limits
# to use when publishing on this topic. Note: Setting the rate visitor requires READ-WRITE permission on the topic.
#
# UnifiedPush only: If this setting is enabled, publishing to UnifiedPush topics will lead to a HTTP 507 response if
# no "rate visitor" has been previously registered. This is to avoid burning the publisher's "visitor-message-daily-limit".
#
# visitor-subscriber-rate-limiting: false
# Payments integration via Stripe
#
# - stripe-secret-key is the key used for the Stripe API communication. Setting this values
# enables payments in the ntfy web app (e.g. Upgrade dialog). See https://dashboard.stripe.com/apikeys.
# - stripe-webhook-key is the key required to validate the authenticity of incoming webhooks from Stripe.
# Webhooks are essential up keep the local database in sync with the payment provider. See https://dashboard.stripe.com/webhooks.
# - billing-contact is an email address or website displayed in the "Upgrade tier" dialog to let people reach
# out with billing questions. If unset, nothing will be displayed.
#
# stripe-secret-key:
# stripe-webhook-key:
# billing-contact:
# Metrics
#
# ntfy can expose Prometheus-style metrics via a /metrics endpoint, or on a dedicated listen IP/port.
# Metrics may be considered sensitive information, so before you enable them, be sure you know what you are
# doing, and/or secure access to the endpoint in your reverse proxy.
#
# - enable-metrics enables the /metrics endpoint for the default ntfy server (i.e. HTTP, HTTPS and/or Unix socket)
# - metrics-listen-http exposes the metrics endpoint via a dedicated [IP]:port. If set, this option implicitly
# enables metrics as well, e.g. "10.0.1.1:9090" or ":9090"
#
# enable-metrics: false
# metrics-listen-http:
# Profiling
#
# ntfy can expose Go's net/http/pprof endpoints to support profiling of the ntfy server. If enabled, ntfy will listen
# on a dedicated listen IP/port, which can be accessed via the web browser on http://<ip>:<port>/debug/pprof/.
# This can be helpful to expose bottlenecks, and visualize call flows. See https://pkg.go.dev/net/http/pprof for details.
#
# profile-listen-http:
# Logging options
#
# By default, ntfy logs to the console (stderr), with an "info" log level, and in a human-readable text format.
# ntfy supports five different log levels, can also write to a file, log as JSON, and even supports granular
# log level overrides for easier debugging. Some options (log-level and log-level-overrides) can be hot reloaded
# by calling "kill -HUP $pid" or "systemctl reload ntfy".
#
# - log-format defines the output format, can be "text" (default) or "json"
# - log-file is a filename to write logs to. If this is not set, ntfy logs to stderr.
# - log-level defines the default log level, can be one of "trace", "debug", "info" (default), "warn" or "error".
# Be aware that "debug" (and particularly "trace") can be VERY CHATTY. Only turn them on briefly for debugging purposes.
# - log-level-overrides lets you override the log level if certain fields match. This is incredibly powerful
# for debugging certain parts of the system (e.g. only the account management, or only a certain visitor).
# This is an array of strings in the format:
# - "field=value -> level" to match a value exactly, e.g. "tag=manager -> trace"
# - "field -> level" to match any value, e.g. "time_taken_ms -> debug"
# Warning: Using log-level-overrides has a performance penalty. Only use it for temporary debugging.
#
# Check your permissions:
# If you are running ntfy with systemd, make sure this log file is owned by the
# ntfy user and group by running: chown ntfy.ntfy <filename>.
#
# Example (good for production):
# log-level: info
# log-format: json
# log-file: /var/log/ntfy.log
#
# Example level overrides (for debugging, only use temporarily):
# log-level-overrides:
# - "tag=manager -> trace"
# - "visitor_ip=1.2.3.4 -> debug"
# - "time_taken_ms -> debug"
#
# log-level: info
# log-level-overrides:
# log-format: text
# log-file:
behind-proxy: true
#Auth
auth-file: /etc/ntfy/data/user.db
# auth-default-access: "read-only"
enable-signup: false
enable-login: true

View file

@ -31,6 +31,7 @@ autorestic:
zfs_key: ENC[AES256_GCM,data:HyZBD202BoG6ncw37Tg9LPvfvQPnOaLJKk+gMvdZflt+XZ/7lx6TZOp/loiDhSSBTMusAXaI/aDkAFx2a7yDUQ==,iv:nQAHi9TyUXamSlFq99NYvWLOBSuZstuYNJLgVpxF1JU=,tag:mIS/E4Wr6IdWsZtehNY7UA==,type:str]
ssd_key: ENC[AES256_GCM,data:xgJCpNkmIn8VU+jG++0kLW8WM9RbTBmsZeOuOz1WWmc4sOdN4lWfPvLjcTAHZDIXFvX7NodEcGAYDmcWNw7QBw==,iv:wGJcz7CEjhwsUlVEyuHOBcayzE97PfWi2f0TvITzafg=,tag:wpaJFcQBd/kAmExfD6fwJQ==,type:str]
eustachius_key: ENC[AES256_GCM,data:qiq6Y05bV7mf0OOBDzR09MrW5g01WxmWVHB3vJ04XQaOVMGzl7hZq0ewcLOxitbFw3VcN5GQBpA8smlmahz8VA==,iv:epq7+tXG9QYAjNu8qHI2gjBYUuoPNdZg8+2XCLOwu1Q=,tag:qM8YdSZhwwM3GDrNPfo/Jg==,type:str]
ntfy_access_token: ENC[AES256_GCM,data:BH1/tNYDj2ggzdNByQDYT0cu9hGTgaGEjXUv4HdqO1M=,iv:nq+frAIoNr8uCwGadOqdAP90kjukVTSq3Tc8hWbCi3Q=,tag:r92qKR78keQVgI/VMXipvw==,type:str]
matrix:
postgres_database: ENC[AES256_GCM,data:9O0vYjbTuQ==,iv:L5QCwhFSjPW0OiUMjCQo6BcLktUXJcqTsTXEi5JdaWo=,tag:LUPRSZl0pza5WOWI8RrAmw==,type:str]
postgres_user: ENC[AES256_GCM,data:S9ksmTOAbBg=,iv:q/6Oo9JhiSAqQq3ZKa0dbQGtfYAuD0oeiDLR4YwV0nk=,tag:RIc/1UVs88Jg8+4zGnW6vQ==,type:str]
@ -45,6 +46,26 @@ auth:
homarr:
oidc_client_secret: ENC[AES256_GCM,data:ykaMgcS1x/sMFPmi9vF8RdS7Dj8tTpNFybqwJ5MkK3OCIqYt5FtY8si7ZbKC4IMquOA4w3fWpHdygvFJwJOyNNvznWuasR1afhaAHIHb85J41GWCpMLWWZub+NUuU2pSudvUYk9LeDUBTKwtfHgr4DUzoQeBocG0httGFKBAXbo=,iv:vThB7ZCgEB5yQoiOYhDcHiGm0lYXy1LCJWunH5HwFq0=,tag:68jkMBnCc2e3bKWR/Hnnww==,type:str]
oidc_client_id: ENC[AES256_GCM,data:2KxgJ7rFNru7rf8P9v/LOcA7TjH2ZFerc4PBmetrkB7hre9fHTa+TQ==,iv:9k0YuPNzEjTTBN0l/oyT5mtZKLCGWZ7ZJpE8g2SBu3E=,tag:C/hzffeOVgke1SQZHPjyrA==,type:str]
minio:
root_user: ENC[AES256_GCM,data:Q5yRACtvoQ==,iv:GTLtwwQ5W50w6eDO+PuihNAHWm6xyM9uNa8mbGG3tWI=,tag:O3MUlh2d8iuFTPRq1PvTWw==,type:str]
root_password: ENC[AES256_GCM,data:0//dfGYkV80=,iv:h1b0R2QRpN/RI9kUBU0fiKLOI3PUYmisa7RH1ibSF4c=,tag:ln1cv5LQpb76vK5+eTvSuA==,type:str]
diun:
ntfy_access_token: ENC[AES256_GCM,data:37UYgaMlmpoMW74LqtxkuMqGQmCvLpVdJAgEmVxSULY=,iv:tZPlfIgo1vWvMPlQzCBPXj5xYDiTWJOsVwkxBjGNMDk=,tag:882g2UxFfg5VSKqAtEMk2Q==,type:str]
crowdsec:
traefik_bouncer_api_key: ENC[AES256_GCM,data:qNY3cWNxG2pyrTN1UnYCGWCmx1Yue1WAJZ8DEsLqnZ+RDoaJfvqqJazJUg==,iv:x0K9Vq+ZuojmeHSbS/0PoOQdLIRDMtGdmU+msv4PWzI=,tag:qgxQIBHtARTNv17x7N6zyw==,type:str]
stats:
oidc_client_id: ENC[AES256_GCM,data:/0Y/qLyxGTKskcoQVdlQkEYHa1P7+0PYwv1GoXV5r48btzpPHYysLA==,iv:QT6GM3I38/kSDrzm5phPWnGQxjds0qamduYuIvj4dig=,tag:yGnM4jOwDtC81jrXUG6r+w==,type:str]
oidc_client_secret: ENC[AES256_GCM,data:ETl5Lm8GSk/xwD9+TZZlPwNA8CxdQ2teyjWVWShXrx0o0qdE72lIBnW7mW9bklx1RMhSBvhArZPMA9fFN29nCJ4E9zXNTxFFviHUZTr+8mdm5g9TYu4WJxiJ3rzIavgx4DQR0FIQyXzXXMSoLDpOl+u4oT8vfb3ef4bKIDktBGU=,iv:KMy70+IA8KKj4mjB4sV3uXg8iDjponO+AzYlNYvv3pE=,tag:WMsUg0PNILBz1jNyV6PggQ==,type:str]
wiki:
aws_access_key_id: ENC[AES256_GCM,data:Fqfa6XcDDpQ0l+/entQh6sxobBM=,iv:gbfHxTy0Oj9xYlucpN98CjNIURDrx9BuFF4Pfo90V0M=,tag:df8Z3J2ovO1MHPnzOsCtpg==,type:str]
aws_secret_access_key: ENC[AES256_GCM,data:sbgzvlN5dP4jZIGKtDsMn5o2RqWTl+XNi80ydnOgrQkgnQ/HxluWWA==,iv:xyCKfbf/UF9cFunCYHwVBw4eVvOeZQtfPtrz2s6zIII=,tag:S0wzL8d5iEn20VbOVfrZBw==,type:str]
oidc_client_id: ENC[AES256_GCM,data:SSuRQJfgzeb641U2eeVE8wYZAbEWHYSSx0b8n4687FHLslFPGCAWeA==,iv:khCwIE50KVEtHJoDJBdCBJIDVZiDjkCS2D4yUt3AEOQ=,tag:JjVil9C2HHdTH1fDzDAJkg==,type:str]
oidc_client_secret: ENC[AES256_GCM,data:6TgTZsfaBdsismhK/lAiayMU8uIFOCmumV9tzmqNSocbqQgKAuEwgXTisMtndsk64JA2NYCS2DXhe+NSBO++aBscZ/hbqxBNWqw7c84YugqXMRFeqidb+RSJKdJ6WDmwGBfGm6/kjGJ+FSGuiu1S4sfOlfp2bXM5mhvgAXUygfg=,iv:Q9QoWp2V6uwFJidsL7QzB67TO4uFsqmun8zdZgRXbNI=,tag:wphiixaGZgsmk4sQFyvqbg==,type:str]
secret_key: ENC[AES256_GCM,data:Xr4iRj2oYJYVBBzIOsT6d4LjQo/M+qy7XVoNoM9vQWeWZuZlCnrdy7cfsa6VfHVnPNfdMIccmvBk3VzdDH1ukA==,iv:62LiqANdqrSMGzgaxL3uxgwyZtZd1XrsYEMF/ixt+lM=,tag:OH4xth89KDKONnbniM3itg==,type:str]
utils_secret: ENC[AES256_GCM,data:M062FvE0kFVyjkxIlolLtR/NwIya1Si7r/im1SDLvGNIHn4kDgat5KTHitjjMFMOKeSKT7ipgHc/lWCQYbi6IA==,iv:LWLOt+vZF0xK68LJTw1xWIWG65pkGiMnx/oMRBzeyyQ=,tag:SXKu3UO6IKupBVfvAwCtHw==,type:str]
db_user: ENC[AES256_GCM,data:g2+KPA==,iv:0I7EoGNlnnKf5H0UnmJ++9XDHEqZpXgZkyaW9flxN8c=,tag:b3WrfHGkxIJ1nNFp3FHAjA==,type:str]
db_pass: ENC[AES256_GCM,data:rYmNXQ==,iv:ZnImkMdIkp92jkojLVBSGSN06my3xFwr3AFfENNXgfQ=,tag:AZHqXRLfJ0lFrGyut+Sdug==,type:str]
db_name: ENC[AES256_GCM,data:Ns7vKJxeTw==,iv:GREMMRicS+1n/uk+KOeplqHn/ZdjjOjQ4d0qV5FICy8=,tag:CSeDTNjBiJ4G2VnytpNXiw==,type:str]
sops:
kms: []
gcp_kms: []
@ -60,8 +81,8 @@ sops:
VUUxcEhvYi8zeXlCUUViUTl0eWdhcU0KXOfbnDc+zc8lnBcyEAV5EiJSjcSU6AgI
EfeRw8qVqwChrYn1agslcNnDbE0WQsOCBuA6cE4V3kRofp9HU949ig==
-----END AGE ENCRYPTED FILE-----
lastmodified: "2024-05-18T21:12:01Z"
mac: ENC[AES256_GCM,data:kBGP7V4f8d8JWdMdwPEYM1L2zZ4p6eHfwiepfLpBAr0VyhE9YOpPIdt9Tl+ky3mRyfn/DnX03ThiAKQtTrls3/lJEmJRd1dswRd+Mtls3j1QlxhorHYb8g6QvlmyepNf5j5Egqm9hNX+L3aV29mKoO42VxvfaopKduNGt1BrSFo=,iv:Uq+hQUMF+PBV5f6V9AsnxIxX0fKn84MAPEfTFtOtsus=,tag:6LtblCK7FLnhfS0dHsrcnQ==,type:str]
lastmodified: "2024-08-09T13:53:16Z"
mac: ENC[AES256_GCM,data:5pANdrfnPuDf2mai0UgcFbwr4OzjLzLWraKOt38fX2MySYH2EryMzsk4prhehXPTkD3soMFwaVbuuqZUbkWCWM3CtjuyCisQH4uiZZw+slw6g8atr4h3tpHtD2SwgGVESMJouVQyfb9ko4O1ArBvml/0a6DAGmwoxlQwGboZR5M=,iv:oiZx4BsRBNAn+hjhzhV6oVZrYQJ32DAQlyNNsevaLpc=,tag:A0EsGeaP5vy9vA8WZjbxIQ==,type:str]
pgp: []
unencrypted_suffix: _unencrypted
version: 3.8.1