Compare commits
No commits in common. "4dd1cefb30ac2fabc61e06da7a44759c58e2e6f5" and "c0769b53c88e02636a22a9c709ad100a936ea2b4" have entirely different histories.
4dd1cefb30
...
c0769b53c8
31 changed files with 619 additions and 794 deletions
|
@ -12,7 +12,6 @@
|
|||
commit.gpgsign = true;
|
||||
user.signingkey = "0x2C2C1C62A5388E82";
|
||||
init.defaultBranch = "main";
|
||||
pull.rebase = false; # merge by default
|
||||
};
|
||||
lfs.enable = true;
|
||||
aliases = {
|
||||
|
|
|
@ -7,7 +7,6 @@ in {
|
|||
imports = [
|
||||
./global
|
||||
./features/coding/nvim
|
||||
./features/coding/tmux.nix
|
||||
inputs.nix-colors.homeManagerModules.default
|
||||
];
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
let
|
||||
authentikImage = "ghcr.io/goauthentik/server:2024.6";
|
||||
authentikImage = "ghcr.io/goauthentik/server:2024.4.2";
|
||||
in {
|
||||
project.name = "auth";
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ in {
|
|||
owner = vars.user;
|
||||
};
|
||||
|
||||
sops.templates."auth-postgres.env" = {
|
||||
sops.templates."postgres.env" = {
|
||||
path = "/home/${vars.user}/.docker/auth/postgres.env";
|
||||
owner = vars.user;
|
||||
mode = "0775";
|
||||
|
@ -31,7 +31,7 @@ in {
|
|||
'';
|
||||
};
|
||||
|
||||
sops.templates."auth-authentik.env" = {
|
||||
sops.templates."authentik.env" = {
|
||||
path = "/home/${vars.user}/.docker/auth/authentik.env";
|
||||
owner = vars.user;
|
||||
mode = "0775";
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
inputs.arion.nixosModules.arion
|
||||
./dns
|
||||
./infrastructure
|
||||
./nas
|
||||
./nextcloud
|
||||
./push
|
||||
./git
|
||||
|
@ -20,9 +21,6 @@
|
|||
./matrix
|
||||
./headscale
|
||||
./auth
|
||||
./minio
|
||||
./stats
|
||||
./wiki
|
||||
];
|
||||
|
||||
environment.systemPackages = with pkgs; [arion];
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
|
||||
services = {
|
||||
forgejo.service = {
|
||||
image = "codeberg.org/forgejo/forgejo:7.0.5";
|
||||
image = "codeberg.org/forgejo/forgejo:7.0.3";
|
||||
container_name = "forgejo";
|
||||
useHostStore = true;
|
||||
labels = {
|
||||
|
@ -30,12 +30,6 @@
|
|||
"traefik.http.routers.forgejo-external.entrypoints" = "websecure-external";
|
||||
"traefik.http.routers.forgejo-external.tls" = "true";
|
||||
"traefik.http.routers.forgejo-external.tls.certresolver" = "letsencrypt";
|
||||
|
||||
"diun.enable" = "true";
|
||||
"diun.watch_repo" = "true";
|
||||
"diun.sort_tags" = "semver";
|
||||
"diun.include_tags" = "^\\d+\\.\\d+\\.\\d+$$";
|
||||
"diun.exclude_tags" = "\\b\\d{4,}\\b";
|
||||
};
|
||||
volumes = [
|
||||
"/storage/dataset/docker/git/forgejo_data:/data"
|
||||
|
|
|
@ -7,7 +7,9 @@
|
|||
};
|
||||
|
||||
docker-compose.volumes = {
|
||||
traefik-logs = null;
|
||||
traefik_letsencrypt = null;
|
||||
scrutiny_data = null;
|
||||
scrutiny_db = null;
|
||||
};
|
||||
|
||||
services = {
|
||||
|
@ -29,19 +31,11 @@
|
|||
"traefik.http.services.dashboard.loadbalancer.server.port" = "8080";
|
||||
"traefik.http.routers.dashboard.tls" = "true";
|
||||
"traefik.http.routers.dashboard.tls.certresolver" = "letsencrypt";
|
||||
|
||||
"traefik.http.routers.dashboard.tls.domains[0].main" = "ghoscht.com";
|
||||
"traefik.http.routers.dashboard.tls.domains[0].sans" = "*.ghoscht.com";
|
||||
|
||||
"traefik.http.middlewares.traefik-https-redirect.redirectscheme.scheme" = "https";
|
||||
"traefik.http.middlewares.sslheader.headers.customrequestheaders.X-Forwarded-Proto" = "https";
|
||||
};
|
||||
volumes = [
|
||||
"/home/ghoscht/.docker/infrastructure/traefik_config/traefik.yml:/traefik.yml:ro"
|
||||
"/home/ghoscht/.docker/infrastructure/traefik_data/config.yml:/config.yml:ro"
|
||||
"/storage/dataset/docker/infrastructure/traefik_data/acme.json:/acme.json"
|
||||
"traefik_letsencrypt:/letsencrypt"
|
||||
"/home/ghoscht/.docker/infrastructure/traefik_data:/etc/traefik"
|
||||
"/var/run/docker.sock:/var/run/docker.sock:ro"
|
||||
"traefik-logs:/var/log/traefik"
|
||||
];
|
||||
env_file = [
|
||||
"/home/ghoscht/.docker/infrastructure/traefik.env"
|
||||
|
@ -51,46 +45,18 @@
|
|||
"dmz"
|
||||
];
|
||||
};
|
||||
crowdsec.service = {
|
||||
image = "crowdsecurity/crowdsec:v1.6.2";
|
||||
container_name = "crowdsec";
|
||||
environment = {
|
||||
GID = "1000";
|
||||
COLLECTIONS = "crowdsecurity/linux crowdsecurity/traefik firix/authentik LePresidente/gitea Dominic-Wagner/vaultwarden";
|
||||
};
|
||||
volumes = [
|
||||
"/storage/dataset/docker/infrastructure/crowdsec_config/acquis.yaml:/etc/crowdsec/acquis.yaml"
|
||||
"/storage/dataset/docker/infrastructure/crowdsec_config/profiles.yaml:/etc/crowdsec/profiles.yaml"
|
||||
"/storage/dataset/docker/infrastructure/crowdsec_config/ntfy.yaml:/etc/crowdsec/notifications/ntfy.yaml"
|
||||
"/storage/dataset/docker/infrastructure/crowdsec_db:/var/lib/crowdsec/data/"
|
||||
"/storage/dataset/docker/infrastructure/crowdsec_data:/etc/crowdsec/"
|
||||
"traefik-logs:/var/log/traefik/:ro"
|
||||
"/var/run/docker.sock:/var/run/docker.sock:ro"
|
||||
];
|
||||
depends_on = [
|
||||
"traefik"
|
||||
];
|
||||
networks = [
|
||||
"dmz"
|
||||
];
|
||||
restart = "always";
|
||||
};
|
||||
bouncer-traefik.service = {
|
||||
image = "fbonalair/traefik-crowdsec-bouncer:0.5.0";
|
||||
environment = {
|
||||
CROWDSEC_AGENT_HOST = "crowdsec:8080";
|
||||
};
|
||||
env_file = [
|
||||
"/home/ghoscht/.docker/infrastructure/traefik-bouncer.env"
|
||||
];
|
||||
depends_on = [
|
||||
"crowdsec"
|
||||
];
|
||||
networks = [
|
||||
"dmz"
|
||||
];
|
||||
restart = "always";
|
||||
};
|
||||
# cloudflared.service = {
|
||||
# image = "cloudflare/cloudflared:2024.2.1";
|
||||
# container_name = "cloudflared";
|
||||
# env_file = [
|
||||
# "/home/ghoscht/.docker/infrastructure/cloudflared.env"
|
||||
# ];
|
||||
# restart = "always";
|
||||
# command = "tunnel --no-autoupdate --protocol http2 run";
|
||||
# networks = [
|
||||
# "dmz"
|
||||
# ];
|
||||
# };
|
||||
scrutiny.service = {
|
||||
image = "ghcr.io/analogj/scrutiny:v0.8.0-omnibus";
|
||||
container_name = "scrutiny";
|
||||
|
@ -109,11 +75,11 @@
|
|||
};
|
||||
volumes = [
|
||||
"/run/udev:/run/udev:ro"
|
||||
"/storage/dataset/docker/infrastructure/scrutiny_data:/opt/scrutiny/config"
|
||||
"/storage/dataset/docker/infrastructure/scrutiny_influxdb_data:/opt/scrutiny/influxdb"
|
||||
"scrutiny_data:/opt/scrutiny/config"
|
||||
"scrutiny_db:/opt/scrutiny/influxdb"
|
||||
];
|
||||
devices = [
|
||||
"/dev/nvme0"
|
||||
"/dev/nvme0n1"
|
||||
"/dev/sda"
|
||||
"/dev/sdb"
|
||||
"/dev/sdc"
|
||||
|
@ -125,36 +91,16 @@
|
|||
"dmz"
|
||||
];
|
||||
};
|
||||
diun.service = {
|
||||
image = "crazymax/diun:4.28";
|
||||
container_name = "diun";
|
||||
dyndns.service = {
|
||||
image = "ghcr.io/cromefire/fritzbox-cloudflare-dyndns:1.2.1";
|
||||
container_name = "dyndns";
|
||||
restart = "always";
|
||||
command = "serve";
|
||||
volumes = [
|
||||
"/storage/dataset/docker/infrastructure/diun_data:/data"
|
||||
"/var/run/docker.sock:/var/run/docker.sock"
|
||||
];
|
||||
ports = ["8888:8080"];
|
||||
environment = {
|
||||
TZ = "Europe/Berlin";
|
||||
LOG_LEVEL = "info";
|
||||
#Only when setting workers=1 sorting can be actually observed
|
||||
DIUN_WATCH_WORKERS = "20";
|
||||
DIUN_WATCH_SCHEDULE = "0 */6 * * *";
|
||||
DIUN_WATCH_JITTER = "30s";
|
||||
DIUN_WATCH_RUNONSTARTUP = "true";
|
||||
DIUN_PROVIDERS_DOCKER = "true";
|
||||
|
||||
DIUN_DEFAULTS_MAXTAGS = 1;
|
||||
DIUN_DEFAULTS_NOTIFYON = "new";
|
||||
|
||||
DIUN_NOTIF_NTFY_ENDPOINT = "http://ntfy";
|
||||
DIUN_NOTIF_NTFY_TOPIC = "docker-updates";
|
||||
CLOUDFLARE_ZONES_IPV4 = "ghoscht.com";
|
||||
};
|
||||
env_file = [
|
||||
"/home/ghoscht/.docker/infrastructure/diun.env"
|
||||
];
|
||||
networks = [
|
||||
"dmz"
|
||||
"/home/ghoscht/.docker/infrastructure/dyndns.env"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
|
|
@ -21,11 +21,7 @@ in {
|
|||
owner = vars.user;
|
||||
};
|
||||
|
||||
sops.secrets."crowdsec/traefik_bouncer_api_key" = {
|
||||
owner = vars.user;
|
||||
};
|
||||
|
||||
sops.secrets."diun/ntfy_access_token" = {
|
||||
sops.secrets."dyndns/cloudflare_api_key" = {
|
||||
owner = vars.user;
|
||||
};
|
||||
|
||||
|
@ -48,84 +44,47 @@ in {
|
|||
'';
|
||||
};
|
||||
|
||||
sops.templates."traefik-bouncer.env" = {
|
||||
path = "/home/${vars.user}/.docker/infrastructure/traefik-bouncer.env";
|
||||
sops.templates."dyndns.env" = {
|
||||
path = "/home/${vars.user}/.docker/infrastructure/dyndns.env";
|
||||
owner = vars.user;
|
||||
mode = "0775";
|
||||
content = ''
|
||||
CROWDSEC_BOUNCER_API_KEY="${config.sops.placeholder."crowdsec/traefik_bouncer_api_key"}"
|
||||
CLOUDFLARE_API_TOKEN="${config.sops.placeholder."dyndns/cloudflare_api_key"}"
|
||||
'';
|
||||
};
|
||||
|
||||
sops.templates."traefik.yml" = {
|
||||
path = "/home/${vars.user}/.docker/infrastructure/traefik_config/traefik.yml";
|
||||
sops.templates."traefik.toml" = {
|
||||
path = "/home/${vars.user}/.docker/infrastructure/traefik_data/traefik.toml";
|
||||
owner = vars.user;
|
||||
mode = "0775";
|
||||
content = ''
|
||||
api:
|
||||
dashboard: true
|
||||
debug: true
|
||||
insecure: true
|
||||
entryPoints:
|
||||
web:
|
||||
address: ":80"
|
||||
http:
|
||||
redirections:
|
||||
entrypoint:
|
||||
to: websecure
|
||||
scheme: https
|
||||
websecure:
|
||||
address: ":443"
|
||||
web-external:
|
||||
address: ":81"
|
||||
http:
|
||||
redirections:
|
||||
entrypoint:
|
||||
to: websecure-external
|
||||
scheme: https
|
||||
middlewares:
|
||||
- crowdsec-bouncer@file
|
||||
websecure-external:
|
||||
address: ":444"
|
||||
http:
|
||||
middlewares:
|
||||
- crowdsec-bouncer@file
|
||||
providers:
|
||||
docker:
|
||||
watch: true
|
||||
exposedByDefault: false
|
||||
network: dmz
|
||||
file:
|
||||
filename: /config.yml
|
||||
certificatesResolvers:
|
||||
letsencrypt:
|
||||
acme:
|
||||
email: ${config.sops.placeholder."traefik/acme_email"}
|
||||
storage: acme.json
|
||||
dnsChallenge:
|
||||
provider: cloudflare
|
||||
resolvers:
|
||||
- "1.1.1.1:53"
|
||||
- "1.0.0.1:53"
|
||||
log:
|
||||
level: "INFO"
|
||||
filePath: "/var/log/traefik/traefik.log"
|
||||
accessLog:
|
||||
filePath: "/var/log/traefik/access.log"
|
||||
[entryPoints]
|
||||
[entryPoints.web]
|
||||
address = ":80"
|
||||
[entryPoints.web-external]
|
||||
address = ":81"
|
||||
[entryPoints.websecure]
|
||||
address = ":443"
|
||||
[entryPoints.websecure-external]
|
||||
address = ":444"
|
||||
[api]
|
||||
dashboard = true
|
||||
insecure = true
|
||||
|
||||
[certificatesResolvers.letsencrypt.acme]
|
||||
email = "${config.sops.placeholder."traefik/acme_email"}"
|
||||
storage = "/letsencrypt/acme.json"
|
||||
[certificatesResolvers.letsencrypt.acme.dnsChallenge]
|
||||
provider = "cloudflare"
|
||||
resolvers = ["1.1.1.1:53", "1.0.0.1:53"]
|
||||
|
||||
[serversTransport]
|
||||
insecureSkipVerify = true
|
||||
|
||||
[providers.docker]
|
||||
watch = true
|
||||
network = "dmz"
|
||||
exposedByDefault = false # overriden by traefik.enable=true
|
||||
'';
|
||||
};
|
||||
sops.templates."diun.env" = {
|
||||
path = "/home/${vars.user}/.docker/infrastructure/diun.env";
|
||||
owner = vars.user;
|
||||
mode = "0775";
|
||||
content = ''
|
||||
DIUN_NOTIF_NTFY_TOKEN="${config.sops.placeholder."diun/ntfy_access_token"}"
|
||||
'';
|
||||
};
|
||||
services.cron = {
|
||||
enable = true;
|
||||
systemCronJobs = [
|
||||
"0 * * * * root . /etc/profile; docker exec crowdsec cscli hub update && docker exec crowdsec cscli hub upgrade >> /var/log/crowdsec-update.log"
|
||||
];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -5,11 +5,10 @@
|
|||
name = "dmz";
|
||||
external = true;
|
||||
};
|
||||
networks.internal = {};
|
||||
|
||||
services = {
|
||||
jellyfin.service = {
|
||||
image = "linuxserver/jellyfin:10.9.7";
|
||||
image = "linuxserver/jellyfin:10.9.1";
|
||||
container_name = "jellyfin";
|
||||
ports = [
|
||||
"8096:8096"
|
||||
|
@ -22,12 +21,6 @@
|
|||
"traefik.http.services.jellyfin.loadbalancer.passHostHeader" = "true";
|
||||
"traefik.http.routers.jellyfin.tls" = "true";
|
||||
"traefik.http.routers.jellyfin.tls.certresolver" = "letsencrypt";
|
||||
|
||||
"diun.enable" = "true";
|
||||
"diun.watch_repo" = "true";
|
||||
"diun.sort_tags" = "semver";
|
||||
"diun.include_tags" = "^\\d+\\.\\d+\\.\\d+$$";
|
||||
"diun.exclude_tags" = "\\b\\d{4,}\\b";
|
||||
};
|
||||
volumes = [
|
||||
"/storage/dataset/docker/media/jellyfin_data:/config"
|
||||
|
@ -50,27 +43,11 @@
|
|||
container_name = "navidrome";
|
||||
labels = {
|
||||
"traefik.enable" = "true";
|
||||
"traefik.docker.network" = "dmz";
|
||||
|
||||
"traefik.http.services.navidrome.loadbalancer.server.port" = "4533";
|
||||
"traefik.http.routers.navidrome.service" = "navidrome";
|
||||
"traefik.http.routers.navidrome.entrypoints" = "websecure";
|
||||
"traefik.http.routers.navidrome.rule" = "Host(`music.ghoscht.com`)";
|
||||
"traefik.http.routers.navidrome.rule" = "Host(`navidrome.ghoscht.com`)";
|
||||
"traefik.http.services.navidrome.loadbalancer.server.port" = "4533";
|
||||
"traefik.http.routers.navidrome.tls" = "true";
|
||||
"traefik.http.routers.navidrome.tls.certresolver" = "letsencrypt";
|
||||
|
||||
"traefik.http.services.navidrome-external.loadbalancer.server.port" = "4533";
|
||||
"traefik.http.routers.navidrome-external.service" = "navidrome-external";
|
||||
"traefik.http.routers.navidrome-external.rule" = "Host(`music.ghoscht.com`)";
|
||||
"traefik.http.routers.navidrome-external.entrypoints" = "websecure-external";
|
||||
"traefik.http.routers.navidrome-external.tls" = "true";
|
||||
"traefik.http.routers.navidrome-external.tls.certresolver" = "letsencrypt";
|
||||
|
||||
"diun.enable" = "true";
|
||||
"diun.watch_repo" = "true";
|
||||
"diun.sort_tags" = "semver";
|
||||
"diun.include_tags" = "^\\d+\\.\\d+\\.\\d+$$";
|
||||
"diun.exclude_tags" = "\\b\\d{4,}\\b";
|
||||
};
|
||||
volumes = [
|
||||
"/storage/dataset/docker/media/navidrome_data:/data"
|
||||
|
@ -88,7 +65,7 @@
|
|||
];
|
||||
};
|
||||
kavita.service = {
|
||||
image = "jvmilazz0/kavita:0.8.1";
|
||||
image = "jvmilazz0/kavita:0.7.14";
|
||||
container_name = "kavita";
|
||||
labels = {
|
||||
"traefik.enable" = "true";
|
||||
|
@ -101,7 +78,6 @@
|
|||
volumes = [
|
||||
"/storage/dataset/docker/media/kavita_data:/kavita/config"
|
||||
"/storage/dataset/data/media/manga:/manga"
|
||||
"/storage/dataset/data/media/comics:/comics"
|
||||
];
|
||||
restart = "always";
|
||||
networks = [
|
||||
|
@ -129,9 +105,9 @@
|
|||
PGID = 1000;
|
||||
TZ = "Europe/Berlin";
|
||||
OPENVPN_PROVIDER = "WINDSCRIBE";
|
||||
OPENVPN_CONFIG = "Amsterdam-Tulip-udp";
|
||||
OPENVPN_CONFIG = "Vienna-Hofburg-udp";
|
||||
OVPN_PROTOCOL = "udp";
|
||||
OPENVPN_OPTS = "--reneg-sec 0 --verb 4";
|
||||
OPENVPN_OPTS = "--pull-filter ignore ping --ping 10 --ping-restart 120";
|
||||
LOCAL_NETWORK = "192.168.0.0/16";
|
||||
TRANSMISSION_DOWNLOAD_DIR = "/data/torrents";
|
||||
TRANSMISSION_INCOMPLETE_DIR = "/data/torrents/incomplete";
|
||||
|
@ -148,11 +124,10 @@
|
|||
restart = "always";
|
||||
networks = [
|
||||
"dmz"
|
||||
"internal"
|
||||
];
|
||||
};
|
||||
prowlarr.service = {
|
||||
image = "linuxserver/prowlarr:1.19.0";
|
||||
image = "linuxserver/prowlarr:1.16.2";
|
||||
container_name = "prowlarr";
|
||||
labels = {
|
||||
"traefik.enable" = "true";
|
||||
|
@ -162,10 +137,6 @@
|
|||
"traefik.docker.network" = "dmz";
|
||||
"traefik.http.routers.prowlarr.tls" = "true";
|
||||
"traefik.http.routers.prowlarr.tls.certresolver" = "letsencrypt";
|
||||
|
||||
"diun.enable" = "true";
|
||||
"diun.watch_repo" = "true";
|
||||
"diun.include_tags" = "^\\d+\\.\\d+\\.\\d+$$";
|
||||
};
|
||||
volumes = [
|
||||
"/storage/dataset/docker/media/prowlarr_data:/config"
|
||||
|
@ -182,7 +153,7 @@
|
|||
restart = "always";
|
||||
};
|
||||
sonarr.service = {
|
||||
image = "linuxserver/sonarr:4.0.6";
|
||||
image = "linuxserver/sonarr:4.0.4";
|
||||
container_name = "sonarr";
|
||||
labels = {
|
||||
"traefik.enable" = "true";
|
||||
|
@ -192,10 +163,6 @@
|
|||
"traefik.docker.network" = "dmz";
|
||||
"traefik.http.routers.sonarr.tls" = "true";
|
||||
"traefik.http.routers.sonarr.tls.certresolver" = "letsencrypt";
|
||||
|
||||
"diun.enable" = "true";
|
||||
"diun.watch_repo" = "true";
|
||||
"diun.include_tags" = "^\\d+\\.\\d+\\.\\d+$$";
|
||||
};
|
||||
volumes = [
|
||||
"/storage/dataset/docker/media/sonarr_data:/config"
|
||||
|
@ -214,7 +181,7 @@
|
|||
restart = "always";
|
||||
};
|
||||
radarr.service = {
|
||||
image = "linuxserver/radarr:5.7.0";
|
||||
image = "linuxserver/radarr:5.4.6";
|
||||
container_name = "radarr";
|
||||
labels = {
|
||||
"traefik.enable" = "true";
|
||||
|
@ -224,10 +191,6 @@
|
|||
"traefik.docker.network" = "dmz";
|
||||
"traefik.http.routers.radarr.tls" = "true";
|
||||
"traefik.http.routers.radarr.tls.certresolver" = "letsencrypt";
|
||||
|
||||
"diun.enable" = "true";
|
||||
"diun.watch_repo" = "true";
|
||||
"diun.include_tags" = "^\\d+\\.\\d+\\.\\d+$$";
|
||||
};
|
||||
volumes = [
|
||||
"/storage/dataset/docker/media/radarr_data:/config"
|
||||
|
@ -246,7 +209,7 @@
|
|||
restart = "always";
|
||||
};
|
||||
lidarr.service = {
|
||||
image = "linuxserver/lidarr:2.3.3";
|
||||
image = "linuxserver/lidarr:2.2.5";
|
||||
container_name = "lidarr";
|
||||
labels = {
|
||||
"traefik.enable" = "true";
|
||||
|
@ -257,12 +220,6 @@
|
|||
"traefik.docker.network" = "dmz";
|
||||
"traefik.http.routers.lidarr.tls" = "true";
|
||||
"traefik.http.routers.lidarr.tls.certresolver" = "letsencrypt";
|
||||
|
||||
"diun.enable" = "true";
|
||||
"diun.watch_repo" = "true";
|
||||
"diun.include_tags" = "^\\d+\\.\\d+\\.\\d+$$";
|
||||
|
||||
# "diun.max_tags" = "10";
|
||||
};
|
||||
volumes = [
|
||||
"/storage/dataset/docker/media/lidarr_data:/config"
|
||||
|
@ -330,7 +287,7 @@
|
|||
restart = "always";
|
||||
};
|
||||
autobrr.service = {
|
||||
image = "ghcr.io/autobrr/autobrr:v1.43.0";
|
||||
image = "ghcr.io/autobrr/autobrr:v1.41.0";
|
||||
container_name = "autobrr";
|
||||
labels = {
|
||||
"traefik.enable" = "true";
|
||||
|
@ -340,10 +297,6 @@
|
|||
"traefik.docker.network" = "dmz";
|
||||
"traefik.http.routers.autobrr.tls" = "true";
|
||||
"traefik.http.routers.autobrr.tls.certresolver" = "letsencrypt";
|
||||
|
||||
"diun.enable" = "true";
|
||||
"diun.watch_repo" = "true";
|
||||
"diun.include_tags" = "^v\\d+\\.\\d+\\.\\d+$$";
|
||||
};
|
||||
volumes = [
|
||||
"/storage/dataset/docker/media/autobrr_data:/config"
|
||||
|
@ -441,18 +394,5 @@
|
|||
};
|
||||
restart = "always";
|
||||
};
|
||||
port-refresh.service = {
|
||||
image = "ghoscht/windscribe-ephemeral-port:latest";
|
||||
container_name = "port-refresh";
|
||||
volumes = [
|
||||
"/storage/dataset/docker/media/port-refresh_config/config.yml:/config/config.yaml"
|
||||
];
|
||||
networks = [
|
||||
"internal"
|
||||
];
|
||||
depends_on = {
|
||||
vpn = {condition = "service_healthy";};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,48 +0,0 @@
|
|||
{
|
||||
project.name = "minio";
|
||||
|
||||
networks.dmz = {
|
||||
name = "dmz";
|
||||
external = true;
|
||||
};
|
||||
|
||||
services = {
|
||||
minio.service = {
|
||||
image = "bitnami/minio:2024.5.10";
|
||||
container_name = "minio";
|
||||
labels = {
|
||||
"traefik.enable" = "true";
|
||||
|
||||
# API
|
||||
"traefik.http.routers.minio.rule" = "Host(`files.ghoscht.com`)";
|
||||
"traefik.http.routers.minio.service" = "minio";
|
||||
"traefik.http.routers.minio.entrypoints" = "websecure";
|
||||
"traefik.http.services.minio.loadbalancer.server.port" = "9000";
|
||||
"traefik.http.routers.minio.tls" = "true";
|
||||
"traefik.http.routers.minio.tls.certresolver" = "letsencrypt";
|
||||
|
||||
# Dashboard
|
||||
"traefik.http.routers.minio-dash.rule" = "Host(`minio.ghoscht.com`)";
|
||||
"traefik.http.routers.minio-dash.service" = "minio-dash";
|
||||
"traefik.http.routers.minio-dash.entrypoints" = "websecure";
|
||||
"traefik.http.services.minio-dash.loadbalancer.server.port" = "9001";
|
||||
"traefik.http.routers.minio-dash.tls" = "true";
|
||||
"traefik.http.routers.minio-dash.tls.certresolver" = "letsencrypt";
|
||||
};
|
||||
volumes = [
|
||||
"/storage/dataset/docker/minio/minio_data:/data"
|
||||
];
|
||||
environment = {
|
||||
MINIO_DATA_DIR = "/data";
|
||||
MINIO_BROWSER_REDIRECT_URL = "https://minio.ghoscht.com";
|
||||
};
|
||||
env_file = [
|
||||
"/home/ghoscht/.docker/minio/minio.env"
|
||||
];
|
||||
restart = "unless-stopped";
|
||||
networks = [
|
||||
"dmz"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,25 +0,0 @@
|
|||
{config, ...}: let
|
||||
vars = import ../../../../vars.nix;
|
||||
in {
|
||||
virtualisation.arion = {
|
||||
projects.minio.settings = {
|
||||
imports = [./arion-compose.nix];
|
||||
};
|
||||
};
|
||||
sops.secrets."minio/root_user" = {
|
||||
owner = vars.user;
|
||||
};
|
||||
sops.secrets."minio/root_password" = {
|
||||
owner = vars.user;
|
||||
};
|
||||
|
||||
sops.templates."minio.env" = {
|
||||
path = "/home/${vars.user}/.docker/minio/minio.env";
|
||||
owner = vars.user;
|
||||
mode = "0775";
|
||||
content = ''
|
||||
MINIO_ROOT_USER="${config.sops.placeholder."minio/root_user"}"
|
||||
MINIO_ROOT_PASSWORD="${config.sops.placeholder."minio/root_password"}"
|
||||
'';
|
||||
};
|
||||
}
|
34
hosts/franz/arion/nas/arion-compose.nix
Normal file
34
hosts/franz/arion/nas/arion-compose.nix
Normal file
|
@ -0,0 +1,34 @@
|
|||
{pkgs, ...}: {
|
||||
project.name = "nas";
|
||||
|
||||
networks.dmz = {
|
||||
name = "dmz";
|
||||
external = true;
|
||||
};
|
||||
|
||||
services = {
|
||||
samba.service = {
|
||||
image = "dperson/samba";
|
||||
container_name = "samba";
|
||||
ports = [
|
||||
"137:137/udp"
|
||||
"138:138/udp"
|
||||
"139:139/tcp"
|
||||
"445:445/tcp"
|
||||
];
|
||||
environment = {
|
||||
USERID = 1000;
|
||||
GROUPID = 1000;
|
||||
TZ = "Europe/Berlin";
|
||||
};
|
||||
command = "-s 'public;/mount;yes;no;yes' -p";
|
||||
volumes = [
|
||||
"/storage/dataset/nas:/mount"
|
||||
];
|
||||
restart = "always";
|
||||
networks = [
|
||||
"dmz"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
12
hosts/franz/arion/nas/default.nix
Normal file
12
hosts/franz/arion/nas/default.nix
Normal file
|
@ -0,0 +1,12 @@
|
|||
{
|
||||
networking.firewall = {
|
||||
allowedUDPPorts = [137 138];
|
||||
allowedTCPPorts = [139 445];
|
||||
};
|
||||
|
||||
virtualisation.arion = {
|
||||
projects.nas.settings = {
|
||||
imports = [./arion-compose.nix];
|
||||
};
|
||||
};
|
||||
}
|
|
@ -8,31 +8,14 @@
|
|||
|
||||
services = {
|
||||
vaultwarden.service = {
|
||||
image = "vaultwarden/server:1.31.0";
|
||||
image = "vaultwarden/server:1.30.5";
|
||||
container_name = "vaultwarden";
|
||||
labels = {
|
||||
"traefik.enable" = "true";
|
||||
"traefik.docker.network" = "dmz";
|
||||
|
||||
"traefik.http.services.vaultwarden.loadbalancer.server.port" = "80";
|
||||
"traefik.http.routers.vaultwarden.service" = "vaultwarden";
|
||||
"traefik.http.routers.vaultwarden.entrypoints" = "websecure";
|
||||
"traefik.http.routers.vaultwarden.rule" = "Host(`vault.ghoscht.com`)";
|
||||
"traefik.http.routers.vaultwarden.rule" = "Host(`vaultwarden.ghoscht.com`)";
|
||||
"traefik.http.routers.vaultwarden.tls" = "true";
|
||||
"traefik.http.routers.vaultwarden.tls.certresolver" = "letsencrypt";
|
||||
|
||||
"traefik.http.services.vaultwarden-external.loadbalancer.server.port" = "80";
|
||||
"traefik.http.routers.vaultwarden-external.service" = "vaultwarden-external";
|
||||
"traefik.http.routers.vaultwarden-external.rule" = "Host(`vault.ghoscht.com`)";
|
||||
"traefik.http.routers.vaultwarden-external.entrypoints" = "websecure-external";
|
||||
"traefik.http.routers.vaultwarden-external.tls" = "true";
|
||||
"traefik.http.routers.vaultwarden-external.tls.certresolver" = "letsencrypt";
|
||||
|
||||
"diun.enable" = "true";
|
||||
"diun.watch_repo" = "true";
|
||||
"diun.sort_tags" = "semver";
|
||||
"diun.include_tags" = "^\\d+\\.\\d+\\.\\d+$$";
|
||||
"diun.exclude_tags" = "\\b\\d{4,}\\b";
|
||||
};
|
||||
volumes = [
|
||||
"/storage/dataset/docker/passwords/vaultwarden_data/:/data"
|
||||
|
|
|
@ -32,7 +32,6 @@
|
|||
};
|
||||
volumes = [
|
||||
"/home/ghoscht/.docker/push/ntfy_data/server.yml:/etc/ntfy/server.yml"
|
||||
"/storage/dataset/docker/push/ntfy_data:/etc/ntfy/data"
|
||||
];
|
||||
environment = {
|
||||
TZ = "Europe/Berlin";
|
||||
|
|
|
@ -1,120 +0,0 @@
|
|||
{
|
||||
project.name = "stats";
|
||||
|
||||
networks.dmz = {
|
||||
name = "dmz";
|
||||
external = true;
|
||||
};
|
||||
networks.internal = {};
|
||||
|
||||
services = {
|
||||
grafana.service = {
|
||||
image = "grafana/grafana:10.4.4";
|
||||
user = "1000";
|
||||
container_name = "grafana";
|
||||
labels = {
|
||||
"traefik.enable" = "true";
|
||||
|
||||
"traefik.http.services.grafana.loadbalancer.server.port" = "3000";
|
||||
"traefik.http.routers.grafana.service" = "grafana";
|
||||
"traefik.http.routers.grafana.rule" = "Host(`grafana.ghoscht.com`)";
|
||||
"traefik.http.routers.grafana.entrypoints" = "websecure";
|
||||
"traefik.http.routers.grafana.tls" = "true";
|
||||
"traefik.http.routers.grafana.tls.certresolver" = "letsencrypt";
|
||||
};
|
||||
environment = {
|
||||
GF_SERVER_ROOT_URL = "https://grafana.ghoscht.com";
|
||||
|
||||
GF_AUTH_GENERIC_OAUTH_NAME = "authentik";
|
||||
GF_AUTH_GENERIC_OAUTH_ENABLED = "true";
|
||||
GF_AUTH_GENERIC_OAUTH_ALLOW_SIGN_UP = "true";
|
||||
GF_AUTH_GENERIC_OAUTH_SCOPES = "openid profile email";
|
||||
GF_AUTH_GENERIC_OAUTH_AUTH_URL = "https://auth.ghoscht.com/application/o/authorize/";
|
||||
GF_AUTH_GENERIC_OAUTH_TOKEN_URL = "https://auth.ghoscht.com/application/o/token/";
|
||||
GF_AUTH_GENERIC_OAUTH_API_URL = "https://auth.ghoscht.com/application/o/userinfo/";
|
||||
|
||||
# GF_AUTH_OAUTH_AUTO_LOGIN = "true";
|
||||
};
|
||||
env_file = [
|
||||
"/home/ghoscht/.docker/stats/grafana.env"
|
||||
];
|
||||
volumes = [
|
||||
"/storage/dataset/docker/stats/grafana_data:/var/lib/grafana"
|
||||
];
|
||||
networks = [
|
||||
"dmz"
|
||||
"internal"
|
||||
];
|
||||
};
|
||||
loki.service = {
|
||||
image = "grafana/loki:3.0.0";
|
||||
volumes = [
|
||||
"/storage/dataset/docker/stats/loki_data:/etc/loki"
|
||||
];
|
||||
ports = [
|
||||
"3100:3100"
|
||||
];
|
||||
command = "-config.file=/etc/loki/loki-config.yml";
|
||||
networks = [
|
||||
"internal"
|
||||
];
|
||||
};
|
||||
promtail.service = {
|
||||
image = "grafana/promtail:3.0.0";
|
||||
volumes = [
|
||||
"/var/log:/var/log"
|
||||
"/storage/dataset/docker/stats/promtail_data/promtail-config.yml:/etc/promtail/promtail-config.yml"
|
||||
];
|
||||
command = "-config.file=/etc/promtail/promtail-config.yml";
|
||||
networks = [
|
||||
"internal"
|
||||
];
|
||||
};
|
||||
prometheus.service = {
|
||||
image = "prom/prometheus:v2.53.0";
|
||||
volumes = [
|
||||
"/storage/dataset/docker/stats/prometheus_config/prometheus.yml:/etc/prometheus/prometheus.yml"
|
||||
"/storage/dataset/docker/stats/prometheus_data:/prometheus"
|
||||
];
|
||||
command = [
|
||||
"--config.file=/etc/prometheus/prometheus.yml"
|
||||
"--web.console.libraries=/etc/prometheus/console_libraries"
|
||||
"--web.console.templates=/etc/prometheus/consoles"
|
||||
];
|
||||
networks = [
|
||||
"internal"
|
||||
];
|
||||
};
|
||||
node-exporter.service = {
|
||||
image = "prom/node-exporter:v1.8.1";
|
||||
volumes = [
|
||||
"/proc:/host/proc:ro"
|
||||
"/sys:/host/sys:ro"
|
||||
"/:/rootfs:ro"
|
||||
];
|
||||
command = [
|
||||
"--path.procfs=/host/proc"
|
||||
"--path.rootfs=/rootfs"
|
||||
"--path.sysfs=/host/sys"
|
||||
"--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)"
|
||||
];
|
||||
networks = [
|
||||
"internal"
|
||||
];
|
||||
};
|
||||
# cadvisor.service = {
|
||||
# image = "gcr.io/cadvisor/cadvisor:v0.49.1";
|
||||
# volumes = [
|
||||
# "/:/rootfs:ro"
|
||||
# "/var/run:/var/run:ro"
|
||||
# "/sys:/sys:ro"
|
||||
# "/var/lib/docker:/var/lib/docker:ro"
|
||||
# "/dev/disk:/dev/disk:ro"
|
||||
# ];
|
||||
# devices = ["/dev/kmsg"];
|
||||
# networks = [
|
||||
# "internal"
|
||||
# ];
|
||||
# };
|
||||
};
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
# Instead of pinning Nixpkgs, we can opt to use the one in NIX_PATH
|
||||
import <nixpkgs> {
|
||||
# We specify the architecture explicitly. Use a Linux remote builder when
|
||||
# calling arion from other platforms.
|
||||
system = "x86_64-linux";
|
||||
}
|
|
@ -1,55 +0,0 @@
|
|||
{config, ...}: let
|
||||
vars = import ../../../../vars.nix;
|
||||
in {
|
||||
virtualisation.arion = {
|
||||
projects.stats.settings = {
|
||||
imports = [./arion-compose.nix];
|
||||
};
|
||||
};
|
||||
|
||||
sops.secrets."stats/oidc_client_id" = {
|
||||
owner = vars.user;
|
||||
};
|
||||
sops.secrets."stats/oidc_client_secret" = {
|
||||
owner = vars.user;
|
||||
};
|
||||
|
||||
sops.templates."grafana.env" = {
|
||||
path = "/home/${vars.user}/.docker/stats/grafana.env";
|
||||
owner = vars.user;
|
||||
mode = "0775";
|
||||
content = ''
|
||||
GF_AUTH_GENERIC_OAUTH_CLIENT_ID="${config.sops.placeholder."stats/oidc_client_id"}"
|
||||
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET="${config.sops.placeholder."stats/oidc_client_secret"}"
|
||||
'';
|
||||
};
|
||||
|
||||
systemd.services.add-loki-logging-driver = {
|
||||
description = "Add grafana loki docker driver";
|
||||
after = ["network.target"];
|
||||
wantedBy = ["multi-user.target"];
|
||||
|
||||
serviceConfig.Type = "oneshot";
|
||||
script = let
|
||||
dockercli = "${config.virtualisation.docker.package}/bin/docker";
|
||||
in ''
|
||||
# Put a true at the end to prevent getting non-zero return code, which will
|
||||
# crash the whole service.
|
||||
check=$(${dockercli} plugin ls | grep "loki" || true)
|
||||
if [ -z "$check" ]; then
|
||||
${dockercli} plugin install grafana/loki-docker-driver:latest --alias loki --grant-all-permissions
|
||||
else
|
||||
echo "loki docker driver already exists in docker"
|
||||
fi
|
||||
'';
|
||||
};
|
||||
|
||||
virtualisation.docker.daemon.settings = {
|
||||
debug = true;
|
||||
log-driver = "loki";
|
||||
log-opts = {
|
||||
loki-url = "http://localhost:3100/loki/api/v1/push";
|
||||
# loki-url = "http://host.docker.internal:3100/loki/api/v1/push";
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,82 +0,0 @@
|
|||
{
|
||||
project.name = "wiki";
|
||||
|
||||
networks.dmz = {
|
||||
name = "dmz";
|
||||
external = true;
|
||||
};
|
||||
networks.internal = {};
|
||||
|
||||
services = {
|
||||
outline.service = {
|
||||
image = "docker.getoutline.com/outlinewiki/outline:0.77.2";
|
||||
container_name = "outline";
|
||||
labels = {
|
||||
"traefik.enable" = "true";
|
||||
|
||||
"traefik.http.services.outline.loadbalancer.server.port" = "3000";
|
||||
"traefik.http.routers.outline.service" = "outline";
|
||||
"traefik.http.routers.outline.rule" = "Host(`wiki.ghoscht.com`)";
|
||||
"traefik.http.routers.outline.entrypoints" = "websecure";
|
||||
"traefik.http.routers.outline.tls" = "true";
|
||||
"traefik.http.routers.outline.tls.certresolver" = "letsencrypt";
|
||||
};
|
||||
environment = {
|
||||
NODE_ENV = "production";
|
||||
|
||||
PGSSLMODE = "disable";
|
||||
REDIS_URL = "redis://redis:6379";
|
||||
|
||||
URL = "https://wiki.ghoscht.com";
|
||||
PORT = 3000;
|
||||
|
||||
OIDC_AUTH_URI = "https://auth.ghoscht.com/application/o/authorize/";
|
||||
OIDC_TOKEN_URI = "https://auth.ghoscht.com/application/o/token/";
|
||||
OIDC_USERINFO_URI = "https://auth.ghoscht.com/application/o/userinfo/";
|
||||
|
||||
AWS_REGION = "local";
|
||||
AWS_S3_UPLOAD_BUCKET_URL = "https://files.ghoscht.com";
|
||||
AWS_S3_UPLOAD_BUCKET_NAME = "outline";
|
||||
AWS_S3_UPLOAD_MAX_SIZE = 26214400;
|
||||
AWS_S3_FORCE_PATH_STYLE = "true";
|
||||
AWS_S3_ACL = "private";
|
||||
};
|
||||
env_file = [
|
||||
"/home/ghoscht/.docker/wiki/outline.env"
|
||||
];
|
||||
restart = "always";
|
||||
depends_on = {
|
||||
redis = {condition = "service_healthy";};
|
||||
postgres = {condition = "service_healthy";};
|
||||
};
|
||||
networks = [
|
||||
"dmz"
|
||||
"internal"
|
||||
];
|
||||
};
|
||||
redis.service = {
|
||||
image = "redis:7.2.4";
|
||||
command = "--save 60 1 --loglevel warning";
|
||||
restart = "always";
|
||||
volumes = [
|
||||
"/storage/dataset/docker/wiki/redis_data:/data"
|
||||
];
|
||||
networks = [
|
||||
"internal"
|
||||
];
|
||||
};
|
||||
postgres.service = {
|
||||
image = "postgres:12.18";
|
||||
restart = "always";
|
||||
volumes = [
|
||||
"/storage/dataset/docker/wiki/postgres_data:/var/lib/postgresql/data"
|
||||
];
|
||||
networks = [
|
||||
"internal"
|
||||
];
|
||||
env_file = [
|
||||
"/home/ghoscht/.docker/wiki/postgres.env"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
# Instead of pinning Nixpkgs, we can opt to use the one in NIX_PATH
|
||||
import <nixpkgs> {
|
||||
# We specify the architecture explicitly. Use a Linux remote builder when
|
||||
# calling arion from other platforms.
|
||||
system = "x86_64-linux";
|
||||
}
|
|
@ -1,67 +0,0 @@
|
|||
{config, ...}: let
|
||||
vars = import ../../../../vars.nix;
|
||||
in {
|
||||
virtualisation.arion = {
|
||||
projects.wiki.settings = {
|
||||
imports = [./arion-compose.nix];
|
||||
};
|
||||
};
|
||||
|
||||
sops.secrets."wiki/aws_access_key_id" = {
|
||||
owner = vars.user;
|
||||
};
|
||||
sops.secrets."wiki/aws_secret_access_key" = {
|
||||
owner = vars.user;
|
||||
};
|
||||
sops.secrets."wiki/oidc_client_id" = {
|
||||
owner = vars.user;
|
||||
};
|
||||
sops.secrets."wiki/oidc_client_secret" = {
|
||||
owner = vars.user;
|
||||
};
|
||||
sops.secrets."wiki/secret_key" = {
|
||||
owner = vars.user;
|
||||
};
|
||||
sops.secrets."wiki/utils_secret" = {
|
||||
owner = vars.user;
|
||||
};
|
||||
sops.secrets."wiki/db_user" = {
|
||||
owner = vars.user;
|
||||
};
|
||||
sops.secrets."wiki/db_pass" = {
|
||||
owner = vars.user;
|
||||
};
|
||||
sops.secrets."wiki/db_name" = {
|
||||
owner = vars.user;
|
||||
};
|
||||
|
||||
sops.templates."wiki-postgres.env" = {
|
||||
path = "/home/${vars.user}/.docker/wiki/postgres.env";
|
||||
owner = vars.user;
|
||||
mode = "0775";
|
||||
content = ''
|
||||
POSTGRES_PASSWORD="${config.sops.placeholder."wiki/db_pass"}"
|
||||
POSTGRES_USER="${config.sops.placeholder."wiki/db_user"}"
|
||||
POSTGRES_DB="${config.sops.placeholder."wiki/db_name"}"
|
||||
'';
|
||||
};
|
||||
|
||||
sops.templates."wiki-outline.env" = {
|
||||
path = "/home/${vars.user}/.docker/wiki/outline.env";
|
||||
owner = vars.user;
|
||||
mode = "0775";
|
||||
content = ''
|
||||
SECRET_KEY="${config.sops.placeholder."wiki/secret_key"}"
|
||||
UTILS_SECRET="${config.sops.placeholder."wiki/utils_secret"}"
|
||||
|
||||
OIDC_CLIENT_ID="${config.sops.placeholder."wiki/oidc_client_id"}"
|
||||
OIDC_CLIENT_SECRET="${config.sops.placeholder."wiki/oidc_client_secret"}"
|
||||
|
||||
AWS_ACCESS_KEY_ID="${config.sops.placeholder."wiki/aws_access_key_id"}"
|
||||
AWS_SECRET_ACCESS_KEY="${config.sops.placeholder."wiki/aws_secret_access_key"}"
|
||||
|
||||
DATABASE_URL="postgres://${config.sops.placeholder."wiki/db_user"}:${config.sops.placeholder."wiki/db_pass"}@postgres:5432/${config.sops.placeholder."wiki/db_name"}"
|
||||
DATABASE_URL_TEST="postgres://${config.sops.placeholder."wiki/db_user"}:${config.sops.placeholder."wiki/db_pass"}@postgres:5432/${config.sops.placeholder."wiki/db_name"}"
|
||||
'';
|
||||
};
|
||||
}
|
|
@ -21,17 +21,16 @@ in {
|
|||
../common/optional/systemd-boot.nix
|
||||
../common/optional/gnome-keyring.nix
|
||||
../common/optional/docker.nix
|
||||
../common/optional/vsftpd.nix
|
||||
./sops.nix
|
||||
./restic.nix
|
||||
./arion
|
||||
./hydra.nix
|
||||
./samba.nix
|
||||
];
|
||||
|
||||
# Enable ZFS
|
||||
boot.supportedFilesystems = ["zfs"];
|
||||
networking.hostId = "f014fc43";
|
||||
services.zfs.autoScrub.enable = true;
|
||||
|
||||
systemd.enableEmergencyMode = false;
|
||||
networking.firewall.enable = true;
|
||||
|
|
|
@ -37,7 +37,7 @@ in {
|
|||
services.cron = {
|
||||
enable = true;
|
||||
systemCronJobs = [
|
||||
"*/5 * * * * root . /etc/profile; autorestic -c /home/ghoscht/.autorestic.yml --ci cron > /var/log/autorestic-bin.log"
|
||||
"*/5 * * * * root . /etc/profile; autorestic -c /home/ghoscht/.autorestic.yml --ci cron"
|
||||
];
|
||||
};
|
||||
|
||||
|
@ -52,9 +52,6 @@ in {
|
|||
sops.secrets."autorestic/eustachius_key" = {
|
||||
owner = vars.user;
|
||||
};
|
||||
sops.secrets."autorestic/ntfy_access_token" = {
|
||||
owner = vars.user;
|
||||
};
|
||||
|
||||
sops.templates.".autorestic.yml" = {
|
||||
path = "/home/${vars.user}/.autorestic.yml";
|
||||
|
@ -66,15 +63,6 @@ in {
|
|||
forget:
|
||||
keep-weekly: 7
|
||||
keep-monthly: 12
|
||||
|
||||
extras:
|
||||
default_hooks: &default_hooks
|
||||
success:
|
||||
- echo "Backup of $AUTORESTIC_LOCATION successful! Added $AUTORESTIC_FILES_ADDED_0 files and changed $AUTORESTIC_FILES_CHANGED_0 files with a total size of $AUTORESTIC_ADDED_SIZE_0. Processed $AUTORESTIC_PROCESSED_FILES_0 files with total size $AUTORESTIC_PROCESSED_SIZE_0 in $AUTORESTIC_PROCESSED_DURATION_0. Snapshot $AUTORESTIC_SNAPSHOT_ID_0" >> /var/log/autorestic-backup.log
|
||||
failure:
|
||||
- echo "Backup of $AUTORESTIC_LOCATION failed" >> /var/log/autorestic.log
|
||||
- 'curl -H "Authorization: Bearer ${config.sops.placeholder."autorestic/ntfy_access_token"}" -H "X-Tags: warning" -H "X-Title: Backup Failure" -d "Backup of location $AUTORESTIC_LOCATION failed" https://push.ghoscht.com/autorestic'
|
||||
|
||||
locations:
|
||||
dashboard:
|
||||
from: /storage/dataset/docker/dashboard
|
||||
|
@ -83,7 +71,6 @@ in {
|
|||
- eustachius
|
||||
cron: '0 4 * * 0' # Every Sunday at 4:00
|
||||
hooks:
|
||||
<<: *default_hooks
|
||||
before:
|
||||
- arion -f ${arionPath}/dashboard/arion-compose.nix -p ${arionPath}/dashboard/arion-pkgs.nix stop
|
||||
after:
|
||||
|
@ -92,11 +79,9 @@ in {
|
|||
from: /storage/dataset/docker/dns
|
||||
to:
|
||||
- zfs
|
||||
- ssd
|
||||
- eustachius
|
||||
cron: '0 4 * * 0' # Every Sunday at 4:00
|
||||
hooks:
|
||||
<<: *default_hooks
|
||||
before:
|
||||
- arion -f ${arionPath}/dns/arion-compose.nix -p ${arionPath}/dns/arion-pkgs.nix stop
|
||||
after:
|
||||
|
@ -108,7 +93,6 @@ in {
|
|||
- eustachius
|
||||
cron: '0 4 * * 0' # Every Sunday at 4:00
|
||||
hooks:
|
||||
<<: *default_hooks
|
||||
before:
|
||||
- arion -f ${arionPath}/feed/arion-compose.nix -p ${arionPath}/feed/arion-pkgs.nix stop
|
||||
after:
|
||||
|
@ -121,7 +105,6 @@ in {
|
|||
- eustachius
|
||||
cron: '0 4 * * *' # Every Day at 4:00
|
||||
hooks:
|
||||
<<: *default_hooks
|
||||
before:
|
||||
- arion -f ${arionPath}/git/arion-compose.nix -p ${arionPath}/git/arion-pkgs.nix stop
|
||||
after:
|
||||
|
@ -130,11 +113,9 @@ in {
|
|||
from: /storage/dataset/docker/media
|
||||
to:
|
||||
- zfs
|
||||
- ssd
|
||||
- eustachius
|
||||
cron: '0 4 * * *' # Every Day at 4:00
|
||||
hooks:
|
||||
<<: *default_hooks
|
||||
before:
|
||||
- arion -f ${arionPath}/media/arion-compose.nix -p ${arionPath}/media/arion-pkgs.nix stop
|
||||
after:
|
||||
|
@ -143,11 +124,9 @@ in {
|
|||
from: /storage/dataset/docker/nextcloud
|
||||
to:
|
||||
- zfs
|
||||
- ssd
|
||||
- eustachius
|
||||
cron: '0 4 * * *' # Every Day at 4:00
|
||||
hooks:
|
||||
<<: *default_hooks
|
||||
before:
|
||||
- arion -f ${arionPath}/nextcloud/arion-compose.nix -p ${arionPath}/nextcloud/arion-pkgs.nix stop
|
||||
after:
|
||||
|
@ -159,7 +138,6 @@ in {
|
|||
- eustachius
|
||||
cron: '0 4 * * 0' # Every Sunday at 4:00
|
||||
hooks:
|
||||
<<: *default_hooks
|
||||
before:
|
||||
- arion -f ${arionPath}/smarthome/arion-compose.nix -p ${arionPath}/smarthome/arion-pkgs.nix stop
|
||||
after:
|
||||
|
@ -172,7 +150,6 @@ in {
|
|||
- eustachius
|
||||
cron: '0 4 * * *' # Every Day at 4:00
|
||||
hooks:
|
||||
<<: *default_hooks
|
||||
before:
|
||||
- arion -f ${arionPath}/passwords/arion-compose.nix -p ${arionPath}/passwords/arion-pkgs.nix stop
|
||||
after:
|
||||
|
@ -184,7 +161,6 @@ in {
|
|||
- eustachius
|
||||
cron: '0 4 * * 0' # Every Sunday at 4:00
|
||||
hooks:
|
||||
<<: *default_hooks
|
||||
before:
|
||||
- arion -f ${arionPath}/matrix/arion-compose.nix -p ${arionPath}/matrix/arion-pkgs.nix stop
|
||||
after:
|
||||
|
@ -195,7 +171,6 @@ in {
|
|||
- eustachius
|
||||
cron: '0 4 * * 0' # Every Sunday at 4:00
|
||||
hooks:
|
||||
<<: *default_hooks
|
||||
before:
|
||||
- arion -f ${arionPath}/media/arion-compose.nix -p ${arionPath}/media/arion-pkgs.nix stop
|
||||
after:
|
||||
|
@ -206,7 +181,6 @@ in {
|
|||
- zfs
|
||||
cron: '55 3 * * *' # Every Day at 3:55
|
||||
hooks:
|
||||
<<: *default_hooks
|
||||
before:
|
||||
- arion -f ${arionPath}/headscale/arion-compose.nix -p ${arionPath}/headscale/arion-pkgs.nix stop
|
||||
after:
|
||||
|
@ -215,11 +189,9 @@ in {
|
|||
from: /storage/dataset/docker/auth
|
||||
to:
|
||||
- zfs
|
||||
- ssd
|
||||
- eustachius
|
||||
cron: '55 3 * * *' # Every Day at 3:55
|
||||
hooks:
|
||||
<<: *default_hooks
|
||||
before:
|
||||
- arion -f ${arionPath}/auth/arion-compose.nix -p ${arionPath}/auth/arion-pkgs.nix stop
|
||||
after:
|
||||
|
@ -235,7 +207,7 @@ in {
|
|||
key: '${config.sops.placeholder."autorestic/ssd_key"}'
|
||||
eustachius:
|
||||
type: rest
|
||||
path: http://100.64.0.3:8000/franz
|
||||
path: http://100.64.0.3:8000/Backups
|
||||
key: '${config.sops.placeholder."autorestic/eustachius_key"}'
|
||||
'';
|
||||
};
|
||||
|
|
|
@ -1,57 +0,0 @@
|
|||
{
|
||||
services.samba = {
|
||||
enable = true;
|
||||
securityType = "user";
|
||||
openFirewall = true;
|
||||
extraConfig = ''
|
||||
workgroup = WORKGROUP
|
||||
server string = franz
|
||||
netbios name = franz
|
||||
security = user
|
||||
#use sendfile = yes
|
||||
#max protocol = smb2
|
||||
# note: localhost is the ipv6 localhost ::1
|
||||
hosts allow = 192.168.178. 127.0.0.1 localhost
|
||||
hosts deny = 0.0.0.0/0
|
||||
guest account = nobody
|
||||
map to guest = bad user
|
||||
|
||||
# debugging
|
||||
# log file = /var/log/samba/log.%m
|
||||
# max log size = 1000
|
||||
# logging = file
|
||||
'';
|
||||
|
||||
# Run sudo smbpasswd -a <username> to set the smb password for an EXISTING linux user
|
||||
shares = {
|
||||
software = {
|
||||
path = "/storage/dataset/data/torrents/misc";
|
||||
browseable = "yes";
|
||||
"read only" = "yes";
|
||||
"guest ok" = "no";
|
||||
"create mask" = "0644";
|
||||
"directory mask" = "0755";
|
||||
"force user" = "ghoscht";
|
||||
"force group" = "users";
|
||||
};
|
||||
max = {
|
||||
path = "/storage/dataset/nas/max";
|
||||
browseable = "yes";
|
||||
"read only" = "no";
|
||||
"guest ok" = "no";
|
||||
"valid users" = "max";
|
||||
"create mask" = "0644";
|
||||
"directory mask" = "0755";
|
||||
"force user" = "ghoscht";
|
||||
"force group" = "users";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.samba-wsdd = {
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
};
|
||||
|
||||
networking.firewall.allowPing = true;
|
||||
}
|
114
rsc/docker/franz/infrastructure/docker-compose.yml
Normal file
114
rsc/docker/franz/infrastructure/docker-compose.yml
Normal file
|
@ -0,0 +1,114 @@
|
|||
version: '3'
|
||||
services:
|
||||
traefik:
|
||||
image: traefik
|
||||
container_name: traefik
|
||||
restart: always
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
- "6666:8080"
|
||||
volumes:
|
||||
- ./traefik_data:/etc/traefik
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
networks:
|
||||
traefik_net:
|
||||
labels:
|
||||
- traefik.enable=true
|
||||
- traefik.http.routers.dashboard.rule=Host(`traefik.ghoscht.com`)
|
||||
- traefik.http.routers.dashboard.entrypoints=websecure
|
||||
- traefik.http.services.dashboard.loadbalancer.server.port=8080
|
||||
- traefik.http.routers.dashboard.tls=true
|
||||
- traefik.http.routers.dashboard.tls.certresolver=lencrypt
|
||||
env_file:
|
||||
- traefik.env
|
||||
dns:
|
||||
- 1.1.1.1
|
||||
homarr:
|
||||
container_name: homarr
|
||||
image: ghcr.io/ajnart/homarr:latest
|
||||
restart: always
|
||||
volumes:
|
||||
- ./homarr_data:/app/data/configs
|
||||
- ./homarr_icons:/app/public/imgs
|
||||
networks:
|
||||
traefik_net:
|
||||
labels:
|
||||
- traefik.enable=true
|
||||
- traefik.http.routers.homarr.entrypoints=websecure
|
||||
- traefik.http.routers.homarr.rule=Host(`dashboard.ghoscht.com`)
|
||||
- traefik.http.routers.homarr.tls=true
|
||||
- traefik.http.routers.homarr.tls.certresolver=lencrypt
|
||||
dns:
|
||||
- 1.1.1.1
|
||||
scrutiny:
|
||||
container_name: scrutiny
|
||||
image: ghcr.io/analogj/scrutiny:master-omnibus
|
||||
restart: always
|
||||
cap_add:
|
||||
- SYS_RAWIO
|
||||
volumes:
|
||||
- /run/udev:/run/udev:ro
|
||||
- ./scrutiny_data:/opt/scrutiny/config
|
||||
- ./scrutiny_db:/opt/scrutiny/influxdb
|
||||
labels:
|
||||
- traefik.enable=true
|
||||
- traefik.http.routers.scrutiny.entrypoints=websecure
|
||||
- traefik.http.routers.scrutiny.rule=Host(`scrutiny.ghoscht.com`)
|
||||
- traefik.http.services.scrutiny.loadbalancer.server.port=8080
|
||||
- traefik.http.routers.scrutiny.tls=true
|
||||
- traefik.http.routers.scrutiny.tls.certresolver=lencrypt
|
||||
networks:
|
||||
traefik_net:
|
||||
devices:
|
||||
- "/dev/sda"
|
||||
- "/dev/sdb"
|
||||
ntfy:
|
||||
image: binwiederhier/ntfy
|
||||
container_name: ntfy
|
||||
command:
|
||||
- serve
|
||||
environment:
|
||||
- TZ=UTC # optional: set desired timezone
|
||||
user: 1000:1000 # optional: replace with your own user/group or uid/gid
|
||||
volumes:
|
||||
- ./ntfy_data/server.yml:/etc/ntfy/server.yml
|
||||
labels:
|
||||
- traefik.enable=true
|
||||
- traefik.http.routers.ntfy.entrypoints=websecure
|
||||
- traefik.http.routers.ntfy.rule=Host(`ntfy.ghoscht.com`,`ntfy.local.ghoscht.com`)
|
||||
- traefik.http.routers.ntfy.tls=true
|
||||
- traefik.http.routers.ntfy.tls.certresolver=lencrypt
|
||||
networks:
|
||||
traefik_net:
|
||||
homeassistant:
|
||||
container_name: homeassistant
|
||||
image: "ghcr.io/home-assistant/home-assistant:stable"
|
||||
volumes:
|
||||
- /mnt/hdd/docker/home-assistant_data:/config
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- /run/dbus:/run/dbus:ro
|
||||
restart: unless-stopped
|
||||
privileged: true
|
||||
labels:
|
||||
- traefik.enable=true
|
||||
- traefik.http.routers.homeassistant.entrypoints=websecure
|
||||
- traefik.http.routers.homeassistant.rule=Host(`home.ghoscht.com`,`home.local.ghoscht.com`)
|
||||
- traefik.http.routers.homeassistant.tls=true
|
||||
- traefik.http.routers.homeassistant.tls.certresolver=lencrypt
|
||||
- traefik.http.services.homeassistant.loadbalancer.server.port=8123
|
||||
networks:
|
||||
traefik_net:
|
||||
cloudflared:
|
||||
container_name: cloudflared
|
||||
image: cloudflare/cloudflared:latest
|
||||
restart: always
|
||||
command: tunnel --no-autoupdate --protocol http2 run
|
||||
env_file:
|
||||
- cloudflared.env
|
||||
networks:
|
||||
traefik_net:
|
||||
networks:
|
||||
traefik_net:
|
||||
name: traefik-net
|
||||
external: true
|
|
@ -1,6 +0,0 @@
|
|||
http:
|
||||
middlewares:
|
||||
crowdsec-bouncer:
|
||||
forwardauth:
|
||||
address: http://bouncer-traefik:8080/api/v1/forwardAuth
|
||||
trustForwardHeader: true
|
|
@ -0,0 +1,6 @@
|
|||
http:
|
||||
middlewares:
|
||||
httpsredirect:
|
||||
redirectScheme:
|
||||
scheme: https
|
||||
permanent: true
|
|
@ -0,0 +1,9 @@
|
|||
http:
|
||||
routers:
|
||||
redirecttohttps:
|
||||
entryPoints:
|
||||
- "web"
|
||||
middlewares:
|
||||
- "httpsredirect"
|
||||
rule: "HostRegexp(`{host:.+}`)"
|
||||
service: "noop@internal"
|
|
@ -1,8 +1,363 @@
|
|||
#Basics
|
||||
# ntfy server config file
|
||||
#
|
||||
# Please refer to the documentation at https://ntfy.sh/docs/config/ for details.
|
||||
# All options also support underscores (_) instead of dashes (-) to comply with the YAML spec.
|
||||
|
||||
# Public facing base URL of the service (e.g. https://ntfy.sh or https://ntfy.example.com)
|
||||
#
|
||||
# This setting is required for any of the following features:
|
||||
# - attachments (to return a download URL)
|
||||
# - e-mail sending (for the topic URL in the email footer)
|
||||
# - iOS push notifications for self-hosted servers (to calculate the Firebase poll_request topic)
|
||||
# - Matrix Push Gateway (to validate that the pushkey is correct)
|
||||
#
|
||||
base-url: https://ntfy.ghoscht.com
|
||||
behind-proxy: true
|
||||
#Auth
|
||||
auth-file: /etc/ntfy/data/user.db
|
||||
# auth-default-access: "read-only"
|
||||
enable-signup: false
|
||||
enable-login: true
|
||||
|
||||
# Listen address for the HTTP & HTTPS web server. If "listen-https" is set, you must also
|
||||
# set "key-file" and "cert-file". Format: [<ip>]:<port>, e.g. "1.2.3.4:8080".
|
||||
#
|
||||
# To listen on all interfaces, you may omit the IP address, e.g. ":443".
|
||||
# To disable HTTP, set "listen-http" to "-".
|
||||
#
|
||||
# listen-http: ":80"
|
||||
# listen-https:
|
||||
|
||||
# Listen on a Unix socket, e.g. /var/lib/ntfy/ntfy.sock
|
||||
# This can be useful to avoid port issues on local systems, and to simplify permissions.
|
||||
#
|
||||
# listen-unix: <socket-path>
|
||||
# listen-unix-mode: <linux permissions, e.g. 0700>
|
||||
|
||||
# Path to the private key & cert file for the HTTPS web server. Not used if "listen-https" is not set.
|
||||
#
|
||||
# key-file: <filename>
|
||||
# cert-file: <filename>
|
||||
|
||||
# If set, also publish messages to a Firebase Cloud Messaging (FCM) topic for your app.
|
||||
# This is optional and only required to save battery when using the Android app.
|
||||
#
|
||||
# firebase-key-file: <filename>
|
||||
|
||||
# If "cache-file" is set, messages are cached in a local SQLite database instead of only in-memory.
|
||||
# This allows for service restarts without losing messages in support of the since= parameter.
|
||||
#
|
||||
# The "cache-duration" parameter defines the duration for which messages will be buffered
|
||||
# before they are deleted. This is required to support the "since=..." and "poll=1" parameter.
|
||||
# To disable the cache entirely (on-disk/in-memory), set "cache-duration" to 0.
|
||||
# The cache file is created automatically, provided that the correct permissions are set.
|
||||
#
|
||||
# The "cache-startup-queries" parameter allows you to run commands when the database is initialized,
|
||||
# e.g. to enable WAL mode (see https://phiresky.github.io/blog/2020/sqlite-performance-tuning/)).
|
||||
# Example:
|
||||
# cache-startup-queries: |
|
||||
# pragma journal_mode = WAL;
|
||||
# pragma synchronous = normal;
|
||||
# pragma temp_store = memory;
|
||||
# pragma busy_timeout = 15000;
|
||||
# vacuum;
|
||||
#
|
||||
# The "cache-batch-size" and "cache-batch-timeout" parameter allow enabling async batch writing
|
||||
# of messages. If set, messages will be queued and written to the database in batches of the given
|
||||
# size, or after the given timeout. This is only required for high volume servers.
|
||||
#
|
||||
# Debian/RPM package users:
|
||||
# Use /var/cache/ntfy/cache.db as cache file to avoid permission issues. The package
|
||||
# creates this folder for you.
|
||||
#
|
||||
# Check your permissions:
|
||||
# If you are running ntfy with systemd, make sure this cache file is owned by the
|
||||
# ntfy user and group by running: chown ntfy.ntfy <filename>.
|
||||
#
|
||||
# cache-file: <filename>
|
||||
# cache-duration: "12h"
|
||||
# cache-startup-queries:
|
||||
# cache-batch-size: 0
|
||||
# cache-batch-timeout: "0ms"
|
||||
|
||||
# If set, access to the ntfy server and API can be controlled on a granular level using
|
||||
# the 'ntfy user' and 'ntfy access' commands. See the --help pages for details, or check the docs.
|
||||
#
|
||||
# - auth-file is the SQLite user/access database; it is created automatically if it doesn't already exist
|
||||
# - auth-default-access defines the default/fallback access if no access control entry is found; it can be
|
||||
# set to "read-write" (default), "read-only", "write-only" or "deny-all".
|
||||
# - auth-startup-queries allows you to run commands when the database is initialized, e.g. to enable
|
||||
# WAL mode. This is similar to cache-startup-queries. See above for details.
|
||||
#
|
||||
# Debian/RPM package users:
|
||||
# Use /var/lib/ntfy/user.db as user database to avoid permission issues. The package
|
||||
# creates this folder for you.
|
||||
#
|
||||
# Check your permissions:
|
||||
# If you are running ntfy with systemd, make sure this user database file is owned by the
|
||||
# ntfy user and group by running: chown ntfy.ntfy <filename>.
|
||||
#
|
||||
# auth-file: <filename>
|
||||
# auth-default-access: "read-write"
|
||||
# auth-startup-queries:
|
||||
|
||||
# If set, the X-Forwarded-For header is used to determine the visitor IP address
|
||||
# instead of the remote address of the connection.
|
||||
#
|
||||
# WARNING: If you are behind a proxy, you must set this, otherwise all visitors are rate limited
|
||||
# as if they are one.
|
||||
#
|
||||
# behind-proxy: false
|
||||
|
||||
# If enabled, clients can attach files to notifications as attachments. Minimum settings to enable attachments
|
||||
# are "attachment-cache-dir" and "base-url".
|
||||
#
|
||||
# - attachment-cache-dir is the cache directory for attached files
|
||||
# - attachment-total-size-limit is the limit of the on-disk attachment cache directory (total size)
|
||||
# - attachment-file-size-limit is the per-file attachment size limit (e.g. 300k, 2M, 100M)
|
||||
# - attachment-expiry-duration is the duration after which uploaded attachments will be deleted (e.g. 3h, 20h)
|
||||
#
|
||||
# attachment-cache-dir:
|
||||
# attachment-total-size-limit: "5G"
|
||||
# attachment-file-size-limit: "15M"
|
||||
# attachment-expiry-duration: "3h"
|
||||
|
||||
# If enabled, allow outgoing e-mail notifications via the 'X-Email' header. If this header is set,
|
||||
# messages will additionally be sent out as e-mail using an external SMTP server.
|
||||
#
|
||||
# As of today, only SMTP servers with plain text auth (or no auth at all), and STARTLS are supported.
|
||||
# Please also refer to the rate limiting settings below (visitor-email-limit-burst & visitor-email-limit-burst).
|
||||
#
|
||||
# - smtp-sender-addr is the hostname:port of the SMTP server
|
||||
# - smtp-sender-from is the e-mail address of the sender
|
||||
# - smtp-sender-user/smtp-sender-pass are the username and password of the SMTP user (leave blank for no auth)
|
||||
#
|
||||
# smtp-sender-addr:
|
||||
# smtp-sender-from:
|
||||
# smtp-sender-user:
|
||||
# smtp-sender-pass:
|
||||
|
||||
# If enabled, ntfy will launch a lightweight SMTP server for incoming messages. Once configured, users can send
|
||||
# emails to a topic e-mail address to publish messages to a topic.
|
||||
#
|
||||
# - smtp-server-listen defines the IP address and port the SMTP server will listen on, e.g. :25 or 1.2.3.4:25
|
||||
# - smtp-server-domain is the e-mail domain, e.g. ntfy.sh
|
||||
# - smtp-server-addr-prefix is an optional prefix for the e-mail addresses to prevent spam. If set to "ntfy-",
|
||||
# for instance, only e-mails to ntfy-$topic@ntfy.sh will be accepted. If this is not set, all emails to
|
||||
# $topic@ntfy.sh will be accepted (which may obviously be a spam problem).
|
||||
#
|
||||
# smtp-server-listen:
|
||||
# smtp-server-domain:
|
||||
# smtp-server-addr-prefix:
|
||||
|
||||
# Web Push support (background notifications for browsers)
|
||||
#
|
||||
# If enabled, allows ntfy to receive push notifications, even when the ntfy web app is closed. When enabled, users
|
||||
# can enable background notifications in the web app. Once enabled, ntfy will forward published messages to the push
|
||||
# endpoint, which will then forward it to the browser.
|
||||
#
|
||||
# You must configure web-push-public/private key, web-push-file, and web-push-email-address below to enable Web Push.
|
||||
# Run "ntfy webpush keys" to generate the keys.
|
||||
#
|
||||
# - web-push-public-key is the generated VAPID public key, e.g. AA1234BBCCddvveekaabcdfqwertyuiopasdfghjklzxcvbnm1234567890
|
||||
# - web-push-private-key is the generated VAPID private key, e.g. AA2BB1234567890abcdefzxcvbnm1234567890
|
||||
# - web-push-file is a database file to keep track of browser subscription endpoints, e.g. `/var/cache/ntfy/webpush.db`
|
||||
# - web-push-email-address is the admin email address send to the push provider, e.g. `sysadmin@example.com`
|
||||
# - web-push-startup-queries is an optional list of queries to run on startup`
|
||||
#
|
||||
# web-push-public-key:
|
||||
# web-push-private-key:
|
||||
# web-push-file:
|
||||
# web-push-email-address:
|
||||
# web-push-startup-queries:
|
||||
|
||||
# If enabled, ntfy can perform voice calls via Twilio via the "X-Call" header.
|
||||
#
|
||||
# - twilio-account is the Twilio account SID, e.g. AC12345beefbeef67890beefbeef122586
|
||||
# - twilio-auth-token is the Twilio auth token, e.g. affebeef258625862586258625862586
|
||||
# - twilio-phone-number is the outgoing phone number you purchased, e.g. +18775132586
|
||||
# - twilio-verify-service is the Twilio Verify service SID, e.g. VA12345beefbeef67890beefbeef122586
|
||||
#
|
||||
# twilio-account:
|
||||
# twilio-auth-token:
|
||||
# twilio-phone-number:
|
||||
# twilio-verify-service:
|
||||
|
||||
# Interval in which keepalive messages are sent to the client. This is to prevent
|
||||
# intermediaries closing the connection for inactivity.
|
||||
#
|
||||
# Note that the Android app has a hardcoded timeout at 77s, so it should be less than that.
|
||||
#
|
||||
# keepalive-interval: "45s"
|
||||
|
||||
# Interval in which the manager prunes old messages, deletes topics
|
||||
# and prints the stats.
|
||||
#
|
||||
# manager-interval: "1m"
|
||||
|
||||
# Defines topic names that are not allowed, because they are otherwise used. There are a few default topics
|
||||
# that cannot be used (e.g. app, account, settings, ...). To extend the default list, define them here.
|
||||
#
|
||||
# Example:
|
||||
# disallowed-topics:
|
||||
# - about
|
||||
# - pricing
|
||||
# - contact
|
||||
#
|
||||
# disallowed-topics:
|
||||
|
||||
# Defines the root path of the web app, or disables the web app entirely.
|
||||
#
|
||||
# Can be any simple path, e.g. "/", "/app", or "/ntfy". For backwards-compatibility reasons,
|
||||
# the values "app" (maps to "/"), "home" (maps to "/app"), or "disable" (maps to "") to disable
|
||||
# the web app entirely.
|
||||
#
|
||||
# web-root: /
|
||||
|
||||
# Various feature flags used to control the web app, and API access, mainly around user and
|
||||
# account management.
|
||||
#
|
||||
# - enable-signup allows users to sign up via the web app, or API
|
||||
# - enable-login allows users to log in via the web app, or API
|
||||
# - enable-reservations allows users to reserve topics (if their tier allows it)
|
||||
#
|
||||
# enable-signup: false
|
||||
# enable-login: false
|
||||
# enable-reservations: false
|
||||
|
||||
# Server URL of a Firebase/APNS-connected ntfy server (likely "https://ntfy.sh").
|
||||
#
|
||||
# iOS users:
|
||||
# If you use the iOS ntfy app, you MUST configure this to receive timely notifications. You'll like want this:
|
||||
# upstream-base-url: "https://ntfy.sh"
|
||||
#
|
||||
# If set, all incoming messages will publish a "poll_request" message to the configured upstream server, containing
|
||||
# the message ID of the original message, instructing the iOS app to poll this server for the actual message contents.
|
||||
# This is to prevent the upstream server and Firebase/APNS from being able to read the message.
|
||||
#
|
||||
# - upstream-base-url is the base URL of the upstream server. Should be "https://ntfy.sh".
|
||||
# - upstream-access-token is the token used to authenticate with the upstream server. This is only required
|
||||
# if you exceed the upstream rate limits, or the uptream server requires authentication.
|
||||
#
|
||||
# upstream-base-url:
|
||||
# upstream-access-token:
|
||||
|
||||
# Rate limiting: Total number of topics before the server rejects new topics.
|
||||
#
|
||||
# global-topic-limit: 15000
|
||||
|
||||
# Rate limiting: Number of subscriptions per visitor (IP address)
|
||||
#
|
||||
# visitor-subscription-limit: 30
|
||||
|
||||
# Rate limiting: Allowed GET/PUT/POST requests per second, per visitor:
|
||||
# - visitor-request-limit-burst is the initial bucket of requests each visitor has
|
||||
# - visitor-request-limit-replenish is the rate at which the bucket is refilled
|
||||
# - visitor-request-limit-exempt-hosts is a comma-separated list of hostnames, IPs or CIDRs to be
|
||||
# exempt from request rate limiting. Hostnames are resolved at the time the server is started.
|
||||
# Example: "1.2.3.4,ntfy.example.com,8.7.6.0/24"
|
||||
#
|
||||
# visitor-request-limit-burst: 60
|
||||
# visitor-request-limit-replenish: "5s"
|
||||
# visitor-request-limit-exempt-hosts: ""
|
||||
|
||||
# Rate limiting: Hard daily limit of messages per visitor and day. The limit is reset
|
||||
# every day at midnight UTC. If the limit is not set (or set to zero), the request
|
||||
# limit (see above) governs the upper limit.
|
||||
#
|
||||
# visitor-message-daily-limit: 0
|
||||
|
||||
# Rate limiting: Allowed emails per visitor:
|
||||
# - visitor-email-limit-burst is the initial bucket of emails each visitor has
|
||||
# - visitor-email-limit-replenish is the rate at which the bucket is refilled
|
||||
#
|
||||
# visitor-email-limit-burst: 16
|
||||
# visitor-email-limit-replenish: "1h"
|
||||
|
||||
# Rate limiting: Attachment size and bandwidth limits per visitor:
|
||||
# - visitor-attachment-total-size-limit is the total storage limit used for attachments per visitor
|
||||
# - visitor-attachment-daily-bandwidth-limit is the total daily attachment download/upload traffic limit per visitor
|
||||
#
|
||||
# visitor-attachment-total-size-limit: "100M"
|
||||
# visitor-attachment-daily-bandwidth-limit: "500M"
|
||||
|
||||
# Rate limiting: Enable subscriber-based rate limiting (mostly used for UnifiedPush)
|
||||
#
|
||||
# If enabled, subscribers may opt to have published messages counted against their own rate limits, as opposed
|
||||
# to the publisher's rate limits. This is especially useful to increase the amount of messages that high-volume
|
||||
# publishers (e.g. Matrix/Mastodon servers) are allowed to send.
|
||||
#
|
||||
# Once enabled, a client may send a "Rate-Topics: <topic1>,<topic2>,..." header when subscribing to topics via
|
||||
# HTTP stream, or websockets, thereby registering itself as the "rate visitor", i.e. the visitor whose rate limits
|
||||
# to use when publishing on this topic. Note: Setting the rate visitor requires READ-WRITE permission on the topic.
|
||||
#
|
||||
# UnifiedPush only: If this setting is enabled, publishing to UnifiedPush topics will lead to a HTTP 507 response if
|
||||
# no "rate visitor" has been previously registered. This is to avoid burning the publisher's "visitor-message-daily-limit".
|
||||
#
|
||||
# visitor-subscriber-rate-limiting: false
|
||||
|
||||
# Payments integration via Stripe
|
||||
#
|
||||
# - stripe-secret-key is the key used for the Stripe API communication. Setting this values
|
||||
# enables payments in the ntfy web app (e.g. Upgrade dialog). See https://dashboard.stripe.com/apikeys.
|
||||
# - stripe-webhook-key is the key required to validate the authenticity of incoming webhooks from Stripe.
|
||||
# Webhooks are essential up keep the local database in sync with the payment provider. See https://dashboard.stripe.com/webhooks.
|
||||
# - billing-contact is an email address or website displayed in the "Upgrade tier" dialog to let people reach
|
||||
# out with billing questions. If unset, nothing will be displayed.
|
||||
#
|
||||
# stripe-secret-key:
|
||||
# stripe-webhook-key:
|
||||
# billing-contact:
|
||||
|
||||
# Metrics
|
||||
#
|
||||
# ntfy can expose Prometheus-style metrics via a /metrics endpoint, or on a dedicated listen IP/port.
|
||||
# Metrics may be considered sensitive information, so before you enable them, be sure you know what you are
|
||||
# doing, and/or secure access to the endpoint in your reverse proxy.
|
||||
#
|
||||
# - enable-metrics enables the /metrics endpoint for the default ntfy server (i.e. HTTP, HTTPS and/or Unix socket)
|
||||
# - metrics-listen-http exposes the metrics endpoint via a dedicated [IP]:port. If set, this option implicitly
|
||||
# enables metrics as well, e.g. "10.0.1.1:9090" or ":9090"
|
||||
#
|
||||
# enable-metrics: false
|
||||
# metrics-listen-http:
|
||||
|
||||
# Profiling
|
||||
#
|
||||
# ntfy can expose Go's net/http/pprof endpoints to support profiling of the ntfy server. If enabled, ntfy will listen
|
||||
# on a dedicated listen IP/port, which can be accessed via the web browser on http://<ip>:<port>/debug/pprof/.
|
||||
# This can be helpful to expose bottlenecks, and visualize call flows. See https://pkg.go.dev/net/http/pprof for details.
|
||||
#
|
||||
# profile-listen-http:
|
||||
|
||||
# Logging options
|
||||
#
|
||||
# By default, ntfy logs to the console (stderr), with an "info" log level, and in a human-readable text format.
|
||||
# ntfy supports five different log levels, can also write to a file, log as JSON, and even supports granular
|
||||
# log level overrides for easier debugging. Some options (log-level and log-level-overrides) can be hot reloaded
|
||||
# by calling "kill -HUP $pid" or "systemctl reload ntfy".
|
||||
#
|
||||
# - log-format defines the output format, can be "text" (default) or "json"
|
||||
# - log-file is a filename to write logs to. If this is not set, ntfy logs to stderr.
|
||||
# - log-level defines the default log level, can be one of "trace", "debug", "info" (default), "warn" or "error".
|
||||
# Be aware that "debug" (and particularly "trace") can be VERY CHATTY. Only turn them on briefly for debugging purposes.
|
||||
# - log-level-overrides lets you override the log level if certain fields match. This is incredibly powerful
|
||||
# for debugging certain parts of the system (e.g. only the account management, or only a certain visitor).
|
||||
# This is an array of strings in the format:
|
||||
# - "field=value -> level" to match a value exactly, e.g. "tag=manager -> trace"
|
||||
# - "field -> level" to match any value, e.g. "time_taken_ms -> debug"
|
||||
# Warning: Using log-level-overrides has a performance penalty. Only use it for temporary debugging.
|
||||
#
|
||||
# Check your permissions:
|
||||
# If you are running ntfy with systemd, make sure this log file is owned by the
|
||||
# ntfy user and group by running: chown ntfy.ntfy <filename>.
|
||||
#
|
||||
# Example (good for production):
|
||||
# log-level: info
|
||||
# log-format: json
|
||||
# log-file: /var/log/ntfy.log
|
||||
#
|
||||
# Example level overrides (for debugging, only use temporarily):
|
||||
# log-level-overrides:
|
||||
# - "tag=manager -> trace"
|
||||
# - "visitor_ip=1.2.3.4 -> debug"
|
||||
# - "time_taken_ms -> debug"
|
||||
#
|
||||
# log-level: info
|
||||
# log-level-overrides:
|
||||
# log-format: text
|
||||
# log-file:
|
||||
|
|
|
@ -31,7 +31,6 @@ autorestic:
|
|||
zfs_key: ENC[AES256_GCM,data:HyZBD202BoG6ncw37Tg9LPvfvQPnOaLJKk+gMvdZflt+XZ/7lx6TZOp/loiDhSSBTMusAXaI/aDkAFx2a7yDUQ==,iv:nQAHi9TyUXamSlFq99NYvWLOBSuZstuYNJLgVpxF1JU=,tag:mIS/E4Wr6IdWsZtehNY7UA==,type:str]
|
||||
ssd_key: ENC[AES256_GCM,data:xgJCpNkmIn8VU+jG++0kLW8WM9RbTBmsZeOuOz1WWmc4sOdN4lWfPvLjcTAHZDIXFvX7NodEcGAYDmcWNw7QBw==,iv:wGJcz7CEjhwsUlVEyuHOBcayzE97PfWi2f0TvITzafg=,tag:wpaJFcQBd/kAmExfD6fwJQ==,type:str]
|
||||
eustachius_key: ENC[AES256_GCM,data:qiq6Y05bV7mf0OOBDzR09MrW5g01WxmWVHB3vJ04XQaOVMGzl7hZq0ewcLOxitbFw3VcN5GQBpA8smlmahz8VA==,iv:epq7+tXG9QYAjNu8qHI2gjBYUuoPNdZg8+2XCLOwu1Q=,tag:qM8YdSZhwwM3GDrNPfo/Jg==,type:str]
|
||||
ntfy_access_token: ENC[AES256_GCM,data:BH1/tNYDj2ggzdNByQDYT0cu9hGTgaGEjXUv4HdqO1M=,iv:nq+frAIoNr8uCwGadOqdAP90kjukVTSq3Tc8hWbCi3Q=,tag:r92qKR78keQVgI/VMXipvw==,type:str]
|
||||
matrix:
|
||||
postgres_database: ENC[AES256_GCM,data:9O0vYjbTuQ==,iv:L5QCwhFSjPW0OiUMjCQo6BcLktUXJcqTsTXEi5JdaWo=,tag:LUPRSZl0pza5WOWI8RrAmw==,type:str]
|
||||
postgres_user: ENC[AES256_GCM,data:S9ksmTOAbBg=,iv:q/6Oo9JhiSAqQq3ZKa0dbQGtfYAuD0oeiDLR4YwV0nk=,tag:RIc/1UVs88Jg8+4zGnW6vQ==,type:str]
|
||||
|
@ -46,26 +45,6 @@ auth:
|
|||
homarr:
|
||||
oidc_client_secret: ENC[AES256_GCM,data:ykaMgcS1x/sMFPmi9vF8RdS7Dj8tTpNFybqwJ5MkK3OCIqYt5FtY8si7ZbKC4IMquOA4w3fWpHdygvFJwJOyNNvznWuasR1afhaAHIHb85J41GWCpMLWWZub+NUuU2pSudvUYk9LeDUBTKwtfHgr4DUzoQeBocG0httGFKBAXbo=,iv:vThB7ZCgEB5yQoiOYhDcHiGm0lYXy1LCJWunH5HwFq0=,tag:68jkMBnCc2e3bKWR/Hnnww==,type:str]
|
||||
oidc_client_id: ENC[AES256_GCM,data:2KxgJ7rFNru7rf8P9v/LOcA7TjH2ZFerc4PBmetrkB7hre9fHTa+TQ==,iv:9k0YuPNzEjTTBN0l/oyT5mtZKLCGWZ7ZJpE8g2SBu3E=,tag:C/hzffeOVgke1SQZHPjyrA==,type:str]
|
||||
minio:
|
||||
root_user: ENC[AES256_GCM,data:Q5yRACtvoQ==,iv:GTLtwwQ5W50w6eDO+PuihNAHWm6xyM9uNa8mbGG3tWI=,tag:O3MUlh2d8iuFTPRq1PvTWw==,type:str]
|
||||
root_password: ENC[AES256_GCM,data:0//dfGYkV80=,iv:h1b0R2QRpN/RI9kUBU0fiKLOI3PUYmisa7RH1ibSF4c=,tag:ln1cv5LQpb76vK5+eTvSuA==,type:str]
|
||||
diun:
|
||||
ntfy_access_token: ENC[AES256_GCM,data:37UYgaMlmpoMW74LqtxkuMqGQmCvLpVdJAgEmVxSULY=,iv:tZPlfIgo1vWvMPlQzCBPXj5xYDiTWJOsVwkxBjGNMDk=,tag:882g2UxFfg5VSKqAtEMk2Q==,type:str]
|
||||
crowdsec:
|
||||
traefik_bouncer_api_key: ENC[AES256_GCM,data:qNY3cWNxG2pyrTN1UnYCGWCmx1Yue1WAJZ8DEsLqnZ+RDoaJfvqqJazJUg==,iv:x0K9Vq+ZuojmeHSbS/0PoOQdLIRDMtGdmU+msv4PWzI=,tag:qgxQIBHtARTNv17x7N6zyw==,type:str]
|
||||
stats:
|
||||
oidc_client_id: ENC[AES256_GCM,data:/0Y/qLyxGTKskcoQVdlQkEYHa1P7+0PYwv1GoXV5r48btzpPHYysLA==,iv:QT6GM3I38/kSDrzm5phPWnGQxjds0qamduYuIvj4dig=,tag:yGnM4jOwDtC81jrXUG6r+w==,type:str]
|
||||
oidc_client_secret: ENC[AES256_GCM,data:ETl5Lm8GSk/xwD9+TZZlPwNA8CxdQ2teyjWVWShXrx0o0qdE72lIBnW7mW9bklx1RMhSBvhArZPMA9fFN29nCJ4E9zXNTxFFviHUZTr+8mdm5g9TYu4WJxiJ3rzIavgx4DQR0FIQyXzXXMSoLDpOl+u4oT8vfb3ef4bKIDktBGU=,iv:KMy70+IA8KKj4mjB4sV3uXg8iDjponO+AzYlNYvv3pE=,tag:WMsUg0PNILBz1jNyV6PggQ==,type:str]
|
||||
wiki:
|
||||
aws_access_key_id: ENC[AES256_GCM,data:Fqfa6XcDDpQ0l+/entQh6sxobBM=,iv:gbfHxTy0Oj9xYlucpN98CjNIURDrx9BuFF4Pfo90V0M=,tag:df8Z3J2ovO1MHPnzOsCtpg==,type:str]
|
||||
aws_secret_access_key: ENC[AES256_GCM,data:sbgzvlN5dP4jZIGKtDsMn5o2RqWTl+XNi80ydnOgrQkgnQ/HxluWWA==,iv:xyCKfbf/UF9cFunCYHwVBw4eVvOeZQtfPtrz2s6zIII=,tag:S0wzL8d5iEn20VbOVfrZBw==,type:str]
|
||||
oidc_client_id: ENC[AES256_GCM,data:SSuRQJfgzeb641U2eeVE8wYZAbEWHYSSx0b8n4687FHLslFPGCAWeA==,iv:khCwIE50KVEtHJoDJBdCBJIDVZiDjkCS2D4yUt3AEOQ=,tag:JjVil9C2HHdTH1fDzDAJkg==,type:str]
|
||||
oidc_client_secret: ENC[AES256_GCM,data:6TgTZsfaBdsismhK/lAiayMU8uIFOCmumV9tzmqNSocbqQgKAuEwgXTisMtndsk64JA2NYCS2DXhe+NSBO++aBscZ/hbqxBNWqw7c84YugqXMRFeqidb+RSJKdJ6WDmwGBfGm6/kjGJ+FSGuiu1S4sfOlfp2bXM5mhvgAXUygfg=,iv:Q9QoWp2V6uwFJidsL7QzB67TO4uFsqmun8zdZgRXbNI=,tag:wphiixaGZgsmk4sQFyvqbg==,type:str]
|
||||
secret_key: ENC[AES256_GCM,data:Xr4iRj2oYJYVBBzIOsT6d4LjQo/M+qy7XVoNoM9vQWeWZuZlCnrdy7cfsa6VfHVnPNfdMIccmvBk3VzdDH1ukA==,iv:62LiqANdqrSMGzgaxL3uxgwyZtZd1XrsYEMF/ixt+lM=,tag:OH4xth89KDKONnbniM3itg==,type:str]
|
||||
utils_secret: ENC[AES256_GCM,data:M062FvE0kFVyjkxIlolLtR/NwIya1Si7r/im1SDLvGNIHn4kDgat5KTHitjjMFMOKeSKT7ipgHc/lWCQYbi6IA==,iv:LWLOt+vZF0xK68LJTw1xWIWG65pkGiMnx/oMRBzeyyQ=,tag:SXKu3UO6IKupBVfvAwCtHw==,type:str]
|
||||
db_user: ENC[AES256_GCM,data:g2+KPA==,iv:0I7EoGNlnnKf5H0UnmJ++9XDHEqZpXgZkyaW9flxN8c=,tag:b3WrfHGkxIJ1nNFp3FHAjA==,type:str]
|
||||
db_pass: ENC[AES256_GCM,data:rYmNXQ==,iv:ZnImkMdIkp92jkojLVBSGSN06my3xFwr3AFfENNXgfQ=,tag:AZHqXRLfJ0lFrGyut+Sdug==,type:str]
|
||||
db_name: ENC[AES256_GCM,data:Ns7vKJxeTw==,iv:GREMMRicS+1n/uk+KOeplqHn/ZdjjOjQ4d0qV5FICy8=,tag:CSeDTNjBiJ4G2VnytpNXiw==,type:str]
|
||||
sops:
|
||||
kms: []
|
||||
gcp_kms: []
|
||||
|
@ -81,8 +60,8 @@ sops:
|
|||
VUUxcEhvYi8zeXlCUUViUTl0eWdhcU0KXOfbnDc+zc8lnBcyEAV5EiJSjcSU6AgI
|
||||
EfeRw8qVqwChrYn1agslcNnDbE0WQsOCBuA6cE4V3kRofp9HU949ig==
|
||||
-----END AGE ENCRYPTED FILE-----
|
||||
lastmodified: "2024-08-09T13:53:16Z"
|
||||
mac: ENC[AES256_GCM,data:5pANdrfnPuDf2mai0UgcFbwr4OzjLzLWraKOt38fX2MySYH2EryMzsk4prhehXPTkD3soMFwaVbuuqZUbkWCWM3CtjuyCisQH4uiZZw+slw6g8atr4h3tpHtD2SwgGVESMJouVQyfb9ko4O1ArBvml/0a6DAGmwoxlQwGboZR5M=,iv:oiZx4BsRBNAn+hjhzhV6oVZrYQJ32DAQlyNNsevaLpc=,tag:A0EsGeaP5vy9vA8WZjbxIQ==,type:str]
|
||||
lastmodified: "2024-05-18T21:12:01Z"
|
||||
mac: ENC[AES256_GCM,data:kBGP7V4f8d8JWdMdwPEYM1L2zZ4p6eHfwiepfLpBAr0VyhE9YOpPIdt9Tl+ky3mRyfn/DnX03ThiAKQtTrls3/lJEmJRd1dswRd+Mtls3j1QlxhorHYb8g6QvlmyepNf5j5Egqm9hNX+L3aV29mKoO42VxvfaopKduNGt1BrSFo=,iv:Uq+hQUMF+PBV5f6V9AsnxIxX0fKn84MAPEfTFtOtsus=,tag:6LtblCK7FLnhfS0dHsrcnQ==,type:str]
|
||||
pgp: []
|
||||
unencrypted_suffix: _unencrypted
|
||||
version: 3.8.1
|
||||
|
|
Loading…
Reference in a new issue