Compare commits
45 commits
c0769b53c8
...
4dd1cefb30
Author | SHA1 | Date | |
---|---|---|---|
4dd1cefb30 | |||
dd03bd6090 | |||
71393d8ec5 | |||
8e4fc00255 | |||
c1e8037290 | |||
bfd1b01d65 | |||
925a7948a5 | |||
4545dd5efe | |||
b5b149474b | |||
e91191eacd | |||
b9e66cbecb | |||
b7dcc5e579 | |||
9e32507665 | |||
9367167e92 | |||
5ed372d1f4 | |||
28be439381 | |||
c6295daafe | |||
576cf1f077 | |||
29fca3dd76 | |||
d83bf9d4ab | |||
f4725f1030 | |||
e5d4bffe8d | |||
4fd9643425 | |||
8a97ba6c49 | |||
3572685386 | |||
5a9cf4c15c | |||
099959a30a | |||
255847b6e1 | |||
76a885e0d3 | |||
470f738403 | |||
49217e1bcc | |||
a932b4598d | |||
13b0e7593e | |||
d1b633b086 | |||
77a765e332 | |||
6043b73a04 | |||
c213949f6f | |||
1debdb71d8 | |||
26591d2175 | |||
2bf237f1da | |||
b0e2738905 | |||
20c587bb5e | |||
3c7751feda | |||
93fde832ea | |||
e6b3ee395c |
31 changed files with 794 additions and 619 deletions
|
@ -12,6 +12,7 @@
|
||||||
commit.gpgsign = true;
|
commit.gpgsign = true;
|
||||||
user.signingkey = "0x2C2C1C62A5388E82";
|
user.signingkey = "0x2C2C1C62A5388E82";
|
||||||
init.defaultBranch = "main";
|
init.defaultBranch = "main";
|
||||||
|
pull.rebase = false; # merge by default
|
||||||
};
|
};
|
||||||
lfs.enable = true;
|
lfs.enable = true;
|
||||||
aliases = {
|
aliases = {
|
||||||
|
|
|
@ -7,6 +7,7 @@ in {
|
||||||
imports = [
|
imports = [
|
||||||
./global
|
./global
|
||||||
./features/coding/nvim
|
./features/coding/nvim
|
||||||
|
./features/coding/tmux.nix
|
||||||
inputs.nix-colors.homeManagerModules.default
|
inputs.nix-colors.homeManagerModules.default
|
||||||
];
|
];
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
let
|
let
|
||||||
authentikImage = "ghcr.io/goauthentik/server:2024.4.2";
|
authentikImage = "ghcr.io/goauthentik/server:2024.6";
|
||||||
in {
|
in {
|
||||||
project.name = "auth";
|
project.name = "auth";
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,7 @@ in {
|
||||||
owner = vars.user;
|
owner = vars.user;
|
||||||
};
|
};
|
||||||
|
|
||||||
sops.templates."postgres.env" = {
|
sops.templates."auth-postgres.env" = {
|
||||||
path = "/home/${vars.user}/.docker/auth/postgres.env";
|
path = "/home/${vars.user}/.docker/auth/postgres.env";
|
||||||
owner = vars.user;
|
owner = vars.user;
|
||||||
mode = "0775";
|
mode = "0775";
|
||||||
|
@ -31,7 +31,7 @@ in {
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
sops.templates."authentik.env" = {
|
sops.templates."auth-authentik.env" = {
|
||||||
path = "/home/${vars.user}/.docker/auth/authentik.env";
|
path = "/home/${vars.user}/.docker/auth/authentik.env";
|
||||||
owner = vars.user;
|
owner = vars.user;
|
||||||
mode = "0775";
|
mode = "0775";
|
||||||
|
|
|
@ -8,7 +8,6 @@
|
||||||
inputs.arion.nixosModules.arion
|
inputs.arion.nixosModules.arion
|
||||||
./dns
|
./dns
|
||||||
./infrastructure
|
./infrastructure
|
||||||
./nas
|
|
||||||
./nextcloud
|
./nextcloud
|
||||||
./push
|
./push
|
||||||
./git
|
./git
|
||||||
|
@ -21,6 +20,9 @@
|
||||||
./matrix
|
./matrix
|
||||||
./headscale
|
./headscale
|
||||||
./auth
|
./auth
|
||||||
|
./minio
|
||||||
|
./stats
|
||||||
|
./wiki
|
||||||
];
|
];
|
||||||
|
|
||||||
environment.systemPackages = with pkgs; [arion];
|
environment.systemPackages = with pkgs; [arion];
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
|
|
||||||
services = {
|
services = {
|
||||||
forgejo.service = {
|
forgejo.service = {
|
||||||
image = "codeberg.org/forgejo/forgejo:7.0.3";
|
image = "codeberg.org/forgejo/forgejo:7.0.5";
|
||||||
container_name = "forgejo";
|
container_name = "forgejo";
|
||||||
useHostStore = true;
|
useHostStore = true;
|
||||||
labels = {
|
labels = {
|
||||||
|
@ -30,6 +30,12 @@
|
||||||
"traefik.http.routers.forgejo-external.entrypoints" = "websecure-external";
|
"traefik.http.routers.forgejo-external.entrypoints" = "websecure-external";
|
||||||
"traefik.http.routers.forgejo-external.tls" = "true";
|
"traefik.http.routers.forgejo-external.tls" = "true";
|
||||||
"traefik.http.routers.forgejo-external.tls.certresolver" = "letsencrypt";
|
"traefik.http.routers.forgejo-external.tls.certresolver" = "letsencrypt";
|
||||||
|
|
||||||
|
"diun.enable" = "true";
|
||||||
|
"diun.watch_repo" = "true";
|
||||||
|
"diun.sort_tags" = "semver";
|
||||||
|
"diun.include_tags" = "^\\d+\\.\\d+\\.\\d+$$";
|
||||||
|
"diun.exclude_tags" = "\\b\\d{4,}\\b";
|
||||||
};
|
};
|
||||||
volumes = [
|
volumes = [
|
||||||
"/storage/dataset/docker/git/forgejo_data:/data"
|
"/storage/dataset/docker/git/forgejo_data:/data"
|
||||||
|
|
|
@ -7,9 +7,7 @@
|
||||||
};
|
};
|
||||||
|
|
||||||
docker-compose.volumes = {
|
docker-compose.volumes = {
|
||||||
traefik_letsencrypt = null;
|
traefik-logs = null;
|
||||||
scrutiny_data = null;
|
|
||||||
scrutiny_db = null;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
services = {
|
services = {
|
||||||
|
@ -31,11 +29,19 @@
|
||||||
"traefik.http.services.dashboard.loadbalancer.server.port" = "8080";
|
"traefik.http.services.dashboard.loadbalancer.server.port" = "8080";
|
||||||
"traefik.http.routers.dashboard.tls" = "true";
|
"traefik.http.routers.dashboard.tls" = "true";
|
||||||
"traefik.http.routers.dashboard.tls.certresolver" = "letsencrypt";
|
"traefik.http.routers.dashboard.tls.certresolver" = "letsencrypt";
|
||||||
|
|
||||||
|
"traefik.http.routers.dashboard.tls.domains[0].main" = "ghoscht.com";
|
||||||
|
"traefik.http.routers.dashboard.tls.domains[0].sans" = "*.ghoscht.com";
|
||||||
|
|
||||||
|
"traefik.http.middlewares.traefik-https-redirect.redirectscheme.scheme" = "https";
|
||||||
|
"traefik.http.middlewares.sslheader.headers.customrequestheaders.X-Forwarded-Proto" = "https";
|
||||||
};
|
};
|
||||||
volumes = [
|
volumes = [
|
||||||
"traefik_letsencrypt:/letsencrypt"
|
"/home/ghoscht/.docker/infrastructure/traefik_config/traefik.yml:/traefik.yml:ro"
|
||||||
"/home/ghoscht/.docker/infrastructure/traefik_data:/etc/traefik"
|
"/home/ghoscht/.docker/infrastructure/traefik_data/config.yml:/config.yml:ro"
|
||||||
|
"/storage/dataset/docker/infrastructure/traefik_data/acme.json:/acme.json"
|
||||||
"/var/run/docker.sock:/var/run/docker.sock:ro"
|
"/var/run/docker.sock:/var/run/docker.sock:ro"
|
||||||
|
"traefik-logs:/var/log/traefik"
|
||||||
];
|
];
|
||||||
env_file = [
|
env_file = [
|
||||||
"/home/ghoscht/.docker/infrastructure/traefik.env"
|
"/home/ghoscht/.docker/infrastructure/traefik.env"
|
||||||
|
@ -45,18 +51,46 @@
|
||||||
"dmz"
|
"dmz"
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
# cloudflared.service = {
|
crowdsec.service = {
|
||||||
# image = "cloudflare/cloudflared:2024.2.1";
|
image = "crowdsecurity/crowdsec:v1.6.2";
|
||||||
# container_name = "cloudflared";
|
container_name = "crowdsec";
|
||||||
# env_file = [
|
environment = {
|
||||||
# "/home/ghoscht/.docker/infrastructure/cloudflared.env"
|
GID = "1000";
|
||||||
# ];
|
COLLECTIONS = "crowdsecurity/linux crowdsecurity/traefik firix/authentik LePresidente/gitea Dominic-Wagner/vaultwarden";
|
||||||
# restart = "always";
|
};
|
||||||
# command = "tunnel --no-autoupdate --protocol http2 run";
|
volumes = [
|
||||||
# networks = [
|
"/storage/dataset/docker/infrastructure/crowdsec_config/acquis.yaml:/etc/crowdsec/acquis.yaml"
|
||||||
# "dmz"
|
"/storage/dataset/docker/infrastructure/crowdsec_config/profiles.yaml:/etc/crowdsec/profiles.yaml"
|
||||||
# ];
|
"/storage/dataset/docker/infrastructure/crowdsec_config/ntfy.yaml:/etc/crowdsec/notifications/ntfy.yaml"
|
||||||
# };
|
"/storage/dataset/docker/infrastructure/crowdsec_db:/var/lib/crowdsec/data/"
|
||||||
|
"/storage/dataset/docker/infrastructure/crowdsec_data:/etc/crowdsec/"
|
||||||
|
"traefik-logs:/var/log/traefik/:ro"
|
||||||
|
"/var/run/docker.sock:/var/run/docker.sock:ro"
|
||||||
|
];
|
||||||
|
depends_on = [
|
||||||
|
"traefik"
|
||||||
|
];
|
||||||
|
networks = [
|
||||||
|
"dmz"
|
||||||
|
];
|
||||||
|
restart = "always";
|
||||||
|
};
|
||||||
|
bouncer-traefik.service = {
|
||||||
|
image = "fbonalair/traefik-crowdsec-bouncer:0.5.0";
|
||||||
|
environment = {
|
||||||
|
CROWDSEC_AGENT_HOST = "crowdsec:8080";
|
||||||
|
};
|
||||||
|
env_file = [
|
||||||
|
"/home/ghoscht/.docker/infrastructure/traefik-bouncer.env"
|
||||||
|
];
|
||||||
|
depends_on = [
|
||||||
|
"crowdsec"
|
||||||
|
];
|
||||||
|
networks = [
|
||||||
|
"dmz"
|
||||||
|
];
|
||||||
|
restart = "always";
|
||||||
|
};
|
||||||
scrutiny.service = {
|
scrutiny.service = {
|
||||||
image = "ghcr.io/analogj/scrutiny:v0.8.0-omnibus";
|
image = "ghcr.io/analogj/scrutiny:v0.8.0-omnibus";
|
||||||
container_name = "scrutiny";
|
container_name = "scrutiny";
|
||||||
|
@ -75,11 +109,11 @@
|
||||||
};
|
};
|
||||||
volumes = [
|
volumes = [
|
||||||
"/run/udev:/run/udev:ro"
|
"/run/udev:/run/udev:ro"
|
||||||
"scrutiny_data:/opt/scrutiny/config"
|
"/storage/dataset/docker/infrastructure/scrutiny_data:/opt/scrutiny/config"
|
||||||
"scrutiny_db:/opt/scrutiny/influxdb"
|
"/storage/dataset/docker/infrastructure/scrutiny_influxdb_data:/opt/scrutiny/influxdb"
|
||||||
];
|
];
|
||||||
devices = [
|
devices = [
|
||||||
"/dev/nvme0n1"
|
"/dev/nvme0"
|
||||||
"/dev/sda"
|
"/dev/sda"
|
||||||
"/dev/sdb"
|
"/dev/sdb"
|
||||||
"/dev/sdc"
|
"/dev/sdc"
|
||||||
|
@ -91,16 +125,36 @@
|
||||||
"dmz"
|
"dmz"
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
dyndns.service = {
|
diun.service = {
|
||||||
image = "ghcr.io/cromefire/fritzbox-cloudflare-dyndns:1.2.1";
|
image = "crazymax/diun:4.28";
|
||||||
container_name = "dyndns";
|
container_name = "diun";
|
||||||
restart = "always";
|
restart = "always";
|
||||||
ports = ["8888:8080"];
|
command = "serve";
|
||||||
|
volumes = [
|
||||||
|
"/storage/dataset/docker/infrastructure/diun_data:/data"
|
||||||
|
"/var/run/docker.sock:/var/run/docker.sock"
|
||||||
|
];
|
||||||
environment = {
|
environment = {
|
||||||
CLOUDFLARE_ZONES_IPV4 = "ghoscht.com";
|
TZ = "Europe/Berlin";
|
||||||
|
LOG_LEVEL = "info";
|
||||||
|
#Only when setting workers=1 sorting can be actually observed
|
||||||
|
DIUN_WATCH_WORKERS = "20";
|
||||||
|
DIUN_WATCH_SCHEDULE = "0 */6 * * *";
|
||||||
|
DIUN_WATCH_JITTER = "30s";
|
||||||
|
DIUN_WATCH_RUNONSTARTUP = "true";
|
||||||
|
DIUN_PROVIDERS_DOCKER = "true";
|
||||||
|
|
||||||
|
DIUN_DEFAULTS_MAXTAGS = 1;
|
||||||
|
DIUN_DEFAULTS_NOTIFYON = "new";
|
||||||
|
|
||||||
|
DIUN_NOTIF_NTFY_ENDPOINT = "http://ntfy";
|
||||||
|
DIUN_NOTIF_NTFY_TOPIC = "docker-updates";
|
||||||
};
|
};
|
||||||
env_file = [
|
env_file = [
|
||||||
"/home/ghoscht/.docker/infrastructure/dyndns.env"
|
"/home/ghoscht/.docker/infrastructure/diun.env"
|
||||||
|
];
|
||||||
|
networks = [
|
||||||
|
"dmz"
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
|
@ -21,7 +21,11 @@ in {
|
||||||
owner = vars.user;
|
owner = vars.user;
|
||||||
};
|
};
|
||||||
|
|
||||||
sops.secrets."dyndns/cloudflare_api_key" = {
|
sops.secrets."crowdsec/traefik_bouncer_api_key" = {
|
||||||
|
owner = vars.user;
|
||||||
|
};
|
||||||
|
|
||||||
|
sops.secrets."diun/ntfy_access_token" = {
|
||||||
owner = vars.user;
|
owner = vars.user;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -44,47 +48,84 @@ in {
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
sops.templates."dyndns.env" = {
|
sops.templates."traefik-bouncer.env" = {
|
||||||
path = "/home/${vars.user}/.docker/infrastructure/dyndns.env";
|
path = "/home/${vars.user}/.docker/infrastructure/traefik-bouncer.env";
|
||||||
owner = vars.user;
|
owner = vars.user;
|
||||||
mode = "0775";
|
mode = "0775";
|
||||||
content = ''
|
content = ''
|
||||||
CLOUDFLARE_API_TOKEN="${config.sops.placeholder."dyndns/cloudflare_api_key"}"
|
CROWDSEC_BOUNCER_API_KEY="${config.sops.placeholder."crowdsec/traefik_bouncer_api_key"}"
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
sops.templates."traefik.toml" = {
|
sops.templates."traefik.yml" = {
|
||||||
path = "/home/${vars.user}/.docker/infrastructure/traefik_data/traefik.toml";
|
path = "/home/${vars.user}/.docker/infrastructure/traefik_config/traefik.yml";
|
||||||
owner = vars.user;
|
owner = vars.user;
|
||||||
mode = "0775";
|
mode = "0775";
|
||||||
content = ''
|
content = ''
|
||||||
[entryPoints]
|
api:
|
||||||
[entryPoints.web]
|
dashboard: true
|
||||||
address = ":80"
|
debug: true
|
||||||
[entryPoints.web-external]
|
insecure: true
|
||||||
address = ":81"
|
entryPoints:
|
||||||
[entryPoints.websecure]
|
web:
|
||||||
address = ":443"
|
address: ":80"
|
||||||
[entryPoints.websecure-external]
|
http:
|
||||||
address = ":444"
|
redirections:
|
||||||
[api]
|
entrypoint:
|
||||||
dashboard = true
|
to: websecure
|
||||||
insecure = true
|
scheme: https
|
||||||
|
websecure:
|
||||||
[certificatesResolvers.letsencrypt.acme]
|
address: ":443"
|
||||||
email = "${config.sops.placeholder."traefik/acme_email"}"
|
web-external:
|
||||||
storage = "/letsencrypt/acme.json"
|
address: ":81"
|
||||||
[certificatesResolvers.letsencrypt.acme.dnsChallenge]
|
http:
|
||||||
provider = "cloudflare"
|
redirections:
|
||||||
resolvers = ["1.1.1.1:53", "1.0.0.1:53"]
|
entrypoint:
|
||||||
|
to: websecure-external
|
||||||
[serversTransport]
|
scheme: https
|
||||||
insecureSkipVerify = true
|
middlewares:
|
||||||
|
- crowdsec-bouncer@file
|
||||||
[providers.docker]
|
websecure-external:
|
||||||
watch = true
|
address: ":444"
|
||||||
network = "dmz"
|
http:
|
||||||
exposedByDefault = false # overriden by traefik.enable=true
|
middlewares:
|
||||||
|
- crowdsec-bouncer@file
|
||||||
|
providers:
|
||||||
|
docker:
|
||||||
|
watch: true
|
||||||
|
exposedByDefault: false
|
||||||
|
network: dmz
|
||||||
|
file:
|
||||||
|
filename: /config.yml
|
||||||
|
certificatesResolvers:
|
||||||
|
letsencrypt:
|
||||||
|
acme:
|
||||||
|
email: ${config.sops.placeholder."traefik/acme_email"}
|
||||||
|
storage: acme.json
|
||||||
|
dnsChallenge:
|
||||||
|
provider: cloudflare
|
||||||
|
resolvers:
|
||||||
|
- "1.1.1.1:53"
|
||||||
|
- "1.0.0.1:53"
|
||||||
|
log:
|
||||||
|
level: "INFO"
|
||||||
|
filePath: "/var/log/traefik/traefik.log"
|
||||||
|
accessLog:
|
||||||
|
filePath: "/var/log/traefik/access.log"
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
sops.templates."diun.env" = {
|
||||||
|
path = "/home/${vars.user}/.docker/infrastructure/diun.env";
|
||||||
|
owner = vars.user;
|
||||||
|
mode = "0775";
|
||||||
|
content = ''
|
||||||
|
DIUN_NOTIF_NTFY_TOKEN="${config.sops.placeholder."diun/ntfy_access_token"}"
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
services.cron = {
|
||||||
|
enable = true;
|
||||||
|
systemCronJobs = [
|
||||||
|
"0 * * * * root . /etc/profile; docker exec crowdsec cscli hub update && docker exec crowdsec cscli hub upgrade >> /var/log/crowdsec-update.log"
|
||||||
|
];
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,10 +5,11 @@
|
||||||
name = "dmz";
|
name = "dmz";
|
||||||
external = true;
|
external = true;
|
||||||
};
|
};
|
||||||
|
networks.internal = {};
|
||||||
|
|
||||||
services = {
|
services = {
|
||||||
jellyfin.service = {
|
jellyfin.service = {
|
||||||
image = "linuxserver/jellyfin:10.9.1";
|
image = "linuxserver/jellyfin:10.9.7";
|
||||||
container_name = "jellyfin";
|
container_name = "jellyfin";
|
||||||
ports = [
|
ports = [
|
||||||
"8096:8096"
|
"8096:8096"
|
||||||
|
@ -21,6 +22,12 @@
|
||||||
"traefik.http.services.jellyfin.loadbalancer.passHostHeader" = "true";
|
"traefik.http.services.jellyfin.loadbalancer.passHostHeader" = "true";
|
||||||
"traefik.http.routers.jellyfin.tls" = "true";
|
"traefik.http.routers.jellyfin.tls" = "true";
|
||||||
"traefik.http.routers.jellyfin.tls.certresolver" = "letsencrypt";
|
"traefik.http.routers.jellyfin.tls.certresolver" = "letsencrypt";
|
||||||
|
|
||||||
|
"diun.enable" = "true";
|
||||||
|
"diun.watch_repo" = "true";
|
||||||
|
"diun.sort_tags" = "semver";
|
||||||
|
"diun.include_tags" = "^\\d+\\.\\d+\\.\\d+$$";
|
||||||
|
"diun.exclude_tags" = "\\b\\d{4,}\\b";
|
||||||
};
|
};
|
||||||
volumes = [
|
volumes = [
|
||||||
"/storage/dataset/docker/media/jellyfin_data:/config"
|
"/storage/dataset/docker/media/jellyfin_data:/config"
|
||||||
|
@ -43,11 +50,27 @@
|
||||||
container_name = "navidrome";
|
container_name = "navidrome";
|
||||||
labels = {
|
labels = {
|
||||||
"traefik.enable" = "true";
|
"traefik.enable" = "true";
|
||||||
"traefik.http.routers.navidrome.entrypoints" = "websecure";
|
"traefik.docker.network" = "dmz";
|
||||||
"traefik.http.routers.navidrome.rule" = "Host(`navidrome.ghoscht.com`)";
|
|
||||||
"traefik.http.services.navidrome.loadbalancer.server.port" = "4533";
|
"traefik.http.services.navidrome.loadbalancer.server.port" = "4533";
|
||||||
|
"traefik.http.routers.navidrome.service" = "navidrome";
|
||||||
|
"traefik.http.routers.navidrome.entrypoints" = "websecure";
|
||||||
|
"traefik.http.routers.navidrome.rule" = "Host(`music.ghoscht.com`)";
|
||||||
"traefik.http.routers.navidrome.tls" = "true";
|
"traefik.http.routers.navidrome.tls" = "true";
|
||||||
"traefik.http.routers.navidrome.tls.certresolver" = "letsencrypt";
|
"traefik.http.routers.navidrome.tls.certresolver" = "letsencrypt";
|
||||||
|
|
||||||
|
"traefik.http.services.navidrome-external.loadbalancer.server.port" = "4533";
|
||||||
|
"traefik.http.routers.navidrome-external.service" = "navidrome-external";
|
||||||
|
"traefik.http.routers.navidrome-external.rule" = "Host(`music.ghoscht.com`)";
|
||||||
|
"traefik.http.routers.navidrome-external.entrypoints" = "websecure-external";
|
||||||
|
"traefik.http.routers.navidrome-external.tls" = "true";
|
||||||
|
"traefik.http.routers.navidrome-external.tls.certresolver" = "letsencrypt";
|
||||||
|
|
||||||
|
"diun.enable" = "true";
|
||||||
|
"diun.watch_repo" = "true";
|
||||||
|
"diun.sort_tags" = "semver";
|
||||||
|
"diun.include_tags" = "^\\d+\\.\\d+\\.\\d+$$";
|
||||||
|
"diun.exclude_tags" = "\\b\\d{4,}\\b";
|
||||||
};
|
};
|
||||||
volumes = [
|
volumes = [
|
||||||
"/storage/dataset/docker/media/navidrome_data:/data"
|
"/storage/dataset/docker/media/navidrome_data:/data"
|
||||||
|
@ -65,7 +88,7 @@
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
kavita.service = {
|
kavita.service = {
|
||||||
image = "jvmilazz0/kavita:0.7.14";
|
image = "jvmilazz0/kavita:0.8.1";
|
||||||
container_name = "kavita";
|
container_name = "kavita";
|
||||||
labels = {
|
labels = {
|
||||||
"traefik.enable" = "true";
|
"traefik.enable" = "true";
|
||||||
|
@ -78,6 +101,7 @@
|
||||||
volumes = [
|
volumes = [
|
||||||
"/storage/dataset/docker/media/kavita_data:/kavita/config"
|
"/storage/dataset/docker/media/kavita_data:/kavita/config"
|
||||||
"/storage/dataset/data/media/manga:/manga"
|
"/storage/dataset/data/media/manga:/manga"
|
||||||
|
"/storage/dataset/data/media/comics:/comics"
|
||||||
];
|
];
|
||||||
restart = "always";
|
restart = "always";
|
||||||
networks = [
|
networks = [
|
||||||
|
@ -105,9 +129,9 @@
|
||||||
PGID = 1000;
|
PGID = 1000;
|
||||||
TZ = "Europe/Berlin";
|
TZ = "Europe/Berlin";
|
||||||
OPENVPN_PROVIDER = "WINDSCRIBE";
|
OPENVPN_PROVIDER = "WINDSCRIBE";
|
||||||
OPENVPN_CONFIG = "Vienna-Hofburg-udp";
|
OPENVPN_CONFIG = "Amsterdam-Tulip-udp";
|
||||||
OVPN_PROTOCOL = "udp";
|
OVPN_PROTOCOL = "udp";
|
||||||
OPENVPN_OPTS = "--pull-filter ignore ping --ping 10 --ping-restart 120";
|
OPENVPN_OPTS = "--reneg-sec 0 --verb 4";
|
||||||
LOCAL_NETWORK = "192.168.0.0/16";
|
LOCAL_NETWORK = "192.168.0.0/16";
|
||||||
TRANSMISSION_DOWNLOAD_DIR = "/data/torrents";
|
TRANSMISSION_DOWNLOAD_DIR = "/data/torrents";
|
||||||
TRANSMISSION_INCOMPLETE_DIR = "/data/torrents/incomplete";
|
TRANSMISSION_INCOMPLETE_DIR = "/data/torrents/incomplete";
|
||||||
|
@ -124,10 +148,11 @@
|
||||||
restart = "always";
|
restart = "always";
|
||||||
networks = [
|
networks = [
|
||||||
"dmz"
|
"dmz"
|
||||||
|
"internal"
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
prowlarr.service = {
|
prowlarr.service = {
|
||||||
image = "linuxserver/prowlarr:1.16.2";
|
image = "linuxserver/prowlarr:1.19.0";
|
||||||
container_name = "prowlarr";
|
container_name = "prowlarr";
|
||||||
labels = {
|
labels = {
|
||||||
"traefik.enable" = "true";
|
"traefik.enable" = "true";
|
||||||
|
@ -137,6 +162,10 @@
|
||||||
"traefik.docker.network" = "dmz";
|
"traefik.docker.network" = "dmz";
|
||||||
"traefik.http.routers.prowlarr.tls" = "true";
|
"traefik.http.routers.prowlarr.tls" = "true";
|
||||||
"traefik.http.routers.prowlarr.tls.certresolver" = "letsencrypt";
|
"traefik.http.routers.prowlarr.tls.certresolver" = "letsencrypt";
|
||||||
|
|
||||||
|
"diun.enable" = "true";
|
||||||
|
"diun.watch_repo" = "true";
|
||||||
|
"diun.include_tags" = "^\\d+\\.\\d+\\.\\d+$$";
|
||||||
};
|
};
|
||||||
volumes = [
|
volumes = [
|
||||||
"/storage/dataset/docker/media/prowlarr_data:/config"
|
"/storage/dataset/docker/media/prowlarr_data:/config"
|
||||||
|
@ -153,7 +182,7 @@
|
||||||
restart = "always";
|
restart = "always";
|
||||||
};
|
};
|
||||||
sonarr.service = {
|
sonarr.service = {
|
||||||
image = "linuxserver/sonarr:4.0.4";
|
image = "linuxserver/sonarr:4.0.6";
|
||||||
container_name = "sonarr";
|
container_name = "sonarr";
|
||||||
labels = {
|
labels = {
|
||||||
"traefik.enable" = "true";
|
"traefik.enable" = "true";
|
||||||
|
@ -163,6 +192,10 @@
|
||||||
"traefik.docker.network" = "dmz";
|
"traefik.docker.network" = "dmz";
|
||||||
"traefik.http.routers.sonarr.tls" = "true";
|
"traefik.http.routers.sonarr.tls" = "true";
|
||||||
"traefik.http.routers.sonarr.tls.certresolver" = "letsencrypt";
|
"traefik.http.routers.sonarr.tls.certresolver" = "letsencrypt";
|
||||||
|
|
||||||
|
"diun.enable" = "true";
|
||||||
|
"diun.watch_repo" = "true";
|
||||||
|
"diun.include_tags" = "^\\d+\\.\\d+\\.\\d+$$";
|
||||||
};
|
};
|
||||||
volumes = [
|
volumes = [
|
||||||
"/storage/dataset/docker/media/sonarr_data:/config"
|
"/storage/dataset/docker/media/sonarr_data:/config"
|
||||||
|
@ -181,7 +214,7 @@
|
||||||
restart = "always";
|
restart = "always";
|
||||||
};
|
};
|
||||||
radarr.service = {
|
radarr.service = {
|
||||||
image = "linuxserver/radarr:5.4.6";
|
image = "linuxserver/radarr:5.7.0";
|
||||||
container_name = "radarr";
|
container_name = "radarr";
|
||||||
labels = {
|
labels = {
|
||||||
"traefik.enable" = "true";
|
"traefik.enable" = "true";
|
||||||
|
@ -191,6 +224,10 @@
|
||||||
"traefik.docker.network" = "dmz";
|
"traefik.docker.network" = "dmz";
|
||||||
"traefik.http.routers.radarr.tls" = "true";
|
"traefik.http.routers.radarr.tls" = "true";
|
||||||
"traefik.http.routers.radarr.tls.certresolver" = "letsencrypt";
|
"traefik.http.routers.radarr.tls.certresolver" = "letsencrypt";
|
||||||
|
|
||||||
|
"diun.enable" = "true";
|
||||||
|
"diun.watch_repo" = "true";
|
||||||
|
"diun.include_tags" = "^\\d+\\.\\d+\\.\\d+$$";
|
||||||
};
|
};
|
||||||
volumes = [
|
volumes = [
|
||||||
"/storage/dataset/docker/media/radarr_data:/config"
|
"/storage/dataset/docker/media/radarr_data:/config"
|
||||||
|
@ -209,7 +246,7 @@
|
||||||
restart = "always";
|
restart = "always";
|
||||||
};
|
};
|
||||||
lidarr.service = {
|
lidarr.service = {
|
||||||
image = "linuxserver/lidarr:2.2.5";
|
image = "linuxserver/lidarr:2.3.3";
|
||||||
container_name = "lidarr";
|
container_name = "lidarr";
|
||||||
labels = {
|
labels = {
|
||||||
"traefik.enable" = "true";
|
"traefik.enable" = "true";
|
||||||
|
@ -220,6 +257,12 @@
|
||||||
"traefik.docker.network" = "dmz";
|
"traefik.docker.network" = "dmz";
|
||||||
"traefik.http.routers.lidarr.tls" = "true";
|
"traefik.http.routers.lidarr.tls" = "true";
|
||||||
"traefik.http.routers.lidarr.tls.certresolver" = "letsencrypt";
|
"traefik.http.routers.lidarr.tls.certresolver" = "letsencrypt";
|
||||||
|
|
||||||
|
"diun.enable" = "true";
|
||||||
|
"diun.watch_repo" = "true";
|
||||||
|
"diun.include_tags" = "^\\d+\\.\\d+\\.\\d+$$";
|
||||||
|
|
||||||
|
# "diun.max_tags" = "10";
|
||||||
};
|
};
|
||||||
volumes = [
|
volumes = [
|
||||||
"/storage/dataset/docker/media/lidarr_data:/config"
|
"/storage/dataset/docker/media/lidarr_data:/config"
|
||||||
|
@ -287,7 +330,7 @@
|
||||||
restart = "always";
|
restart = "always";
|
||||||
};
|
};
|
||||||
autobrr.service = {
|
autobrr.service = {
|
||||||
image = "ghcr.io/autobrr/autobrr:v1.41.0";
|
image = "ghcr.io/autobrr/autobrr:v1.43.0";
|
||||||
container_name = "autobrr";
|
container_name = "autobrr";
|
||||||
labels = {
|
labels = {
|
||||||
"traefik.enable" = "true";
|
"traefik.enable" = "true";
|
||||||
|
@ -297,6 +340,10 @@
|
||||||
"traefik.docker.network" = "dmz";
|
"traefik.docker.network" = "dmz";
|
||||||
"traefik.http.routers.autobrr.tls" = "true";
|
"traefik.http.routers.autobrr.tls" = "true";
|
||||||
"traefik.http.routers.autobrr.tls.certresolver" = "letsencrypt";
|
"traefik.http.routers.autobrr.tls.certresolver" = "letsencrypt";
|
||||||
|
|
||||||
|
"diun.enable" = "true";
|
||||||
|
"diun.watch_repo" = "true";
|
||||||
|
"diun.include_tags" = "^v\\d+\\.\\d+\\.\\d+$$";
|
||||||
};
|
};
|
||||||
volumes = [
|
volumes = [
|
||||||
"/storage/dataset/docker/media/autobrr_data:/config"
|
"/storage/dataset/docker/media/autobrr_data:/config"
|
||||||
|
@ -394,5 +441,18 @@
|
||||||
};
|
};
|
||||||
restart = "always";
|
restart = "always";
|
||||||
};
|
};
|
||||||
|
port-refresh.service = {
|
||||||
|
image = "ghoscht/windscribe-ephemeral-port:latest";
|
||||||
|
container_name = "port-refresh";
|
||||||
|
volumes = [
|
||||||
|
"/storage/dataset/docker/media/port-refresh_config/config.yml:/config/config.yaml"
|
||||||
|
];
|
||||||
|
networks = [
|
||||||
|
"internal"
|
||||||
|
];
|
||||||
|
depends_on = {
|
||||||
|
vpn = {condition = "service_healthy";};
|
||||||
|
};
|
||||||
|
};
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
48
hosts/franz/arion/minio/arion-compose.nix
Normal file
48
hosts/franz/arion/minio/arion-compose.nix
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
{
|
||||||
|
project.name = "minio";
|
||||||
|
|
||||||
|
networks.dmz = {
|
||||||
|
name = "dmz";
|
||||||
|
external = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
services = {
|
||||||
|
minio.service = {
|
||||||
|
image = "bitnami/minio:2024.5.10";
|
||||||
|
container_name = "minio";
|
||||||
|
labels = {
|
||||||
|
"traefik.enable" = "true";
|
||||||
|
|
||||||
|
# API
|
||||||
|
"traefik.http.routers.minio.rule" = "Host(`files.ghoscht.com`)";
|
||||||
|
"traefik.http.routers.minio.service" = "minio";
|
||||||
|
"traefik.http.routers.minio.entrypoints" = "websecure";
|
||||||
|
"traefik.http.services.minio.loadbalancer.server.port" = "9000";
|
||||||
|
"traefik.http.routers.minio.tls" = "true";
|
||||||
|
"traefik.http.routers.minio.tls.certresolver" = "letsencrypt";
|
||||||
|
|
||||||
|
# Dashboard
|
||||||
|
"traefik.http.routers.minio-dash.rule" = "Host(`minio.ghoscht.com`)";
|
||||||
|
"traefik.http.routers.minio-dash.service" = "minio-dash";
|
||||||
|
"traefik.http.routers.minio-dash.entrypoints" = "websecure";
|
||||||
|
"traefik.http.services.minio-dash.loadbalancer.server.port" = "9001";
|
||||||
|
"traefik.http.routers.minio-dash.tls" = "true";
|
||||||
|
"traefik.http.routers.minio-dash.tls.certresolver" = "letsencrypt";
|
||||||
|
};
|
||||||
|
volumes = [
|
||||||
|
"/storage/dataset/docker/minio/minio_data:/data"
|
||||||
|
];
|
||||||
|
environment = {
|
||||||
|
MINIO_DATA_DIR = "/data";
|
||||||
|
MINIO_BROWSER_REDIRECT_URL = "https://minio.ghoscht.com";
|
||||||
|
};
|
||||||
|
env_file = [
|
||||||
|
"/home/ghoscht/.docker/minio/minio.env"
|
||||||
|
];
|
||||||
|
restart = "unless-stopped";
|
||||||
|
networks = [
|
||||||
|
"dmz"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
25
hosts/franz/arion/minio/default.nix
Normal file
25
hosts/franz/arion/minio/default.nix
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
{config, ...}: let
|
||||||
|
vars = import ../../../../vars.nix;
|
||||||
|
in {
|
||||||
|
virtualisation.arion = {
|
||||||
|
projects.minio.settings = {
|
||||||
|
imports = [./arion-compose.nix];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
sops.secrets."minio/root_user" = {
|
||||||
|
owner = vars.user;
|
||||||
|
};
|
||||||
|
sops.secrets."minio/root_password" = {
|
||||||
|
owner = vars.user;
|
||||||
|
};
|
||||||
|
|
||||||
|
sops.templates."minio.env" = {
|
||||||
|
path = "/home/${vars.user}/.docker/minio/minio.env";
|
||||||
|
owner = vars.user;
|
||||||
|
mode = "0775";
|
||||||
|
content = ''
|
||||||
|
MINIO_ROOT_USER="${config.sops.placeholder."minio/root_user"}"
|
||||||
|
MINIO_ROOT_PASSWORD="${config.sops.placeholder."minio/root_password"}"
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
}
|
|
@ -1,34 +0,0 @@
|
||||||
{pkgs, ...}: {
|
|
||||||
project.name = "nas";
|
|
||||||
|
|
||||||
networks.dmz = {
|
|
||||||
name = "dmz";
|
|
||||||
external = true;
|
|
||||||
};
|
|
||||||
|
|
||||||
services = {
|
|
||||||
samba.service = {
|
|
||||||
image = "dperson/samba";
|
|
||||||
container_name = "samba";
|
|
||||||
ports = [
|
|
||||||
"137:137/udp"
|
|
||||||
"138:138/udp"
|
|
||||||
"139:139/tcp"
|
|
||||||
"445:445/tcp"
|
|
||||||
];
|
|
||||||
environment = {
|
|
||||||
USERID = 1000;
|
|
||||||
GROUPID = 1000;
|
|
||||||
TZ = "Europe/Berlin";
|
|
||||||
};
|
|
||||||
command = "-s 'public;/mount;yes;no;yes' -p";
|
|
||||||
volumes = [
|
|
||||||
"/storage/dataset/nas:/mount"
|
|
||||||
];
|
|
||||||
restart = "always";
|
|
||||||
networks = [
|
|
||||||
"dmz"
|
|
||||||
];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
|
@ -1,12 +0,0 @@
|
||||||
{
|
|
||||||
networking.firewall = {
|
|
||||||
allowedUDPPorts = [137 138];
|
|
||||||
allowedTCPPorts = [139 445];
|
|
||||||
};
|
|
||||||
|
|
||||||
virtualisation.arion = {
|
|
||||||
projects.nas.settings = {
|
|
||||||
imports = [./arion-compose.nix];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
|
@ -8,14 +8,31 @@
|
||||||
|
|
||||||
services = {
|
services = {
|
||||||
vaultwarden.service = {
|
vaultwarden.service = {
|
||||||
image = "vaultwarden/server:1.30.5";
|
image = "vaultwarden/server:1.31.0";
|
||||||
container_name = "vaultwarden";
|
container_name = "vaultwarden";
|
||||||
labels = {
|
labels = {
|
||||||
"traefik.enable" = "true";
|
"traefik.enable" = "true";
|
||||||
|
"traefik.docker.network" = "dmz";
|
||||||
|
|
||||||
|
"traefik.http.services.vaultwarden.loadbalancer.server.port" = "80";
|
||||||
|
"traefik.http.routers.vaultwarden.service" = "vaultwarden";
|
||||||
"traefik.http.routers.vaultwarden.entrypoints" = "websecure";
|
"traefik.http.routers.vaultwarden.entrypoints" = "websecure";
|
||||||
"traefik.http.routers.vaultwarden.rule" = "Host(`vaultwarden.ghoscht.com`)";
|
"traefik.http.routers.vaultwarden.rule" = "Host(`vault.ghoscht.com`)";
|
||||||
"traefik.http.routers.vaultwarden.tls" = "true";
|
"traefik.http.routers.vaultwarden.tls" = "true";
|
||||||
"traefik.http.routers.vaultwarden.tls.certresolver" = "letsencrypt";
|
"traefik.http.routers.vaultwarden.tls.certresolver" = "letsencrypt";
|
||||||
|
|
||||||
|
"traefik.http.services.vaultwarden-external.loadbalancer.server.port" = "80";
|
||||||
|
"traefik.http.routers.vaultwarden-external.service" = "vaultwarden-external";
|
||||||
|
"traefik.http.routers.vaultwarden-external.rule" = "Host(`vault.ghoscht.com`)";
|
||||||
|
"traefik.http.routers.vaultwarden-external.entrypoints" = "websecure-external";
|
||||||
|
"traefik.http.routers.vaultwarden-external.tls" = "true";
|
||||||
|
"traefik.http.routers.vaultwarden-external.tls.certresolver" = "letsencrypt";
|
||||||
|
|
||||||
|
"diun.enable" = "true";
|
||||||
|
"diun.watch_repo" = "true";
|
||||||
|
"diun.sort_tags" = "semver";
|
||||||
|
"diun.include_tags" = "^\\d+\\.\\d+\\.\\d+$$";
|
||||||
|
"diun.exclude_tags" = "\\b\\d{4,}\\b";
|
||||||
};
|
};
|
||||||
volumes = [
|
volumes = [
|
||||||
"/storage/dataset/docker/passwords/vaultwarden_data/:/data"
|
"/storage/dataset/docker/passwords/vaultwarden_data/:/data"
|
||||||
|
|
|
@ -32,6 +32,7 @@
|
||||||
};
|
};
|
||||||
volumes = [
|
volumes = [
|
||||||
"/home/ghoscht/.docker/push/ntfy_data/server.yml:/etc/ntfy/server.yml"
|
"/home/ghoscht/.docker/push/ntfy_data/server.yml:/etc/ntfy/server.yml"
|
||||||
|
"/storage/dataset/docker/push/ntfy_data:/etc/ntfy/data"
|
||||||
];
|
];
|
||||||
environment = {
|
environment = {
|
||||||
TZ = "Europe/Berlin";
|
TZ = "Europe/Berlin";
|
||||||
|
|
120
hosts/franz/arion/stats/arion-compose.nix
Normal file
120
hosts/franz/arion/stats/arion-compose.nix
Normal file
|
@ -0,0 +1,120 @@
|
||||||
|
{
|
||||||
|
project.name = "stats";
|
||||||
|
|
||||||
|
networks.dmz = {
|
||||||
|
name = "dmz";
|
||||||
|
external = true;
|
||||||
|
};
|
||||||
|
networks.internal = {};
|
||||||
|
|
||||||
|
services = {
|
||||||
|
grafana.service = {
|
||||||
|
image = "grafana/grafana:10.4.4";
|
||||||
|
user = "1000";
|
||||||
|
container_name = "grafana";
|
||||||
|
labels = {
|
||||||
|
"traefik.enable" = "true";
|
||||||
|
|
||||||
|
"traefik.http.services.grafana.loadbalancer.server.port" = "3000";
|
||||||
|
"traefik.http.routers.grafana.service" = "grafana";
|
||||||
|
"traefik.http.routers.grafana.rule" = "Host(`grafana.ghoscht.com`)";
|
||||||
|
"traefik.http.routers.grafana.entrypoints" = "websecure";
|
||||||
|
"traefik.http.routers.grafana.tls" = "true";
|
||||||
|
"traefik.http.routers.grafana.tls.certresolver" = "letsencrypt";
|
||||||
|
};
|
||||||
|
environment = {
|
||||||
|
GF_SERVER_ROOT_URL = "https://grafana.ghoscht.com";
|
||||||
|
|
||||||
|
GF_AUTH_GENERIC_OAUTH_NAME = "authentik";
|
||||||
|
GF_AUTH_GENERIC_OAUTH_ENABLED = "true";
|
||||||
|
GF_AUTH_GENERIC_OAUTH_ALLOW_SIGN_UP = "true";
|
||||||
|
GF_AUTH_GENERIC_OAUTH_SCOPES = "openid profile email";
|
||||||
|
GF_AUTH_GENERIC_OAUTH_AUTH_URL = "https://auth.ghoscht.com/application/o/authorize/";
|
||||||
|
GF_AUTH_GENERIC_OAUTH_TOKEN_URL = "https://auth.ghoscht.com/application/o/token/";
|
||||||
|
GF_AUTH_GENERIC_OAUTH_API_URL = "https://auth.ghoscht.com/application/o/userinfo/";
|
||||||
|
|
||||||
|
# GF_AUTH_OAUTH_AUTO_LOGIN = "true";
|
||||||
|
};
|
||||||
|
env_file = [
|
||||||
|
"/home/ghoscht/.docker/stats/grafana.env"
|
||||||
|
];
|
||||||
|
volumes = [
|
||||||
|
"/storage/dataset/docker/stats/grafana_data:/var/lib/grafana"
|
||||||
|
];
|
||||||
|
networks = [
|
||||||
|
"dmz"
|
||||||
|
"internal"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
loki.service = {
|
||||||
|
image = "grafana/loki:3.0.0";
|
||||||
|
volumes = [
|
||||||
|
"/storage/dataset/docker/stats/loki_data:/etc/loki"
|
||||||
|
];
|
||||||
|
ports = [
|
||||||
|
"3100:3100"
|
||||||
|
];
|
||||||
|
command = "-config.file=/etc/loki/loki-config.yml";
|
||||||
|
networks = [
|
||||||
|
"internal"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
promtail.service = {
|
||||||
|
image = "grafana/promtail:3.0.0";
|
||||||
|
volumes = [
|
||||||
|
"/var/log:/var/log"
|
||||||
|
"/storage/dataset/docker/stats/promtail_data/promtail-config.yml:/etc/promtail/promtail-config.yml"
|
||||||
|
];
|
||||||
|
command = "-config.file=/etc/promtail/promtail-config.yml";
|
||||||
|
networks = [
|
||||||
|
"internal"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
prometheus.service = {
|
||||||
|
image = "prom/prometheus:v2.53.0";
|
||||||
|
volumes = [
|
||||||
|
"/storage/dataset/docker/stats/prometheus_config/prometheus.yml:/etc/prometheus/prometheus.yml"
|
||||||
|
"/storage/dataset/docker/stats/prometheus_data:/prometheus"
|
||||||
|
];
|
||||||
|
command = [
|
||||||
|
"--config.file=/etc/prometheus/prometheus.yml"
|
||||||
|
"--web.console.libraries=/etc/prometheus/console_libraries"
|
||||||
|
"--web.console.templates=/etc/prometheus/consoles"
|
||||||
|
];
|
||||||
|
networks = [
|
||||||
|
"internal"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
node-exporter.service = {
|
||||||
|
image = "prom/node-exporter:v1.8.1";
|
||||||
|
volumes = [
|
||||||
|
"/proc:/host/proc:ro"
|
||||||
|
"/sys:/host/sys:ro"
|
||||||
|
"/:/rootfs:ro"
|
||||||
|
];
|
||||||
|
command = [
|
||||||
|
"--path.procfs=/host/proc"
|
||||||
|
"--path.rootfs=/rootfs"
|
||||||
|
"--path.sysfs=/host/sys"
|
||||||
|
"--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)"
|
||||||
|
];
|
||||||
|
networks = [
|
||||||
|
"internal"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
# cadvisor.service = {
|
||||||
|
# image = "gcr.io/cadvisor/cadvisor:v0.49.1";
|
||||||
|
# volumes = [
|
||||||
|
# "/:/rootfs:ro"
|
||||||
|
# "/var/run:/var/run:ro"
|
||||||
|
# "/sys:/sys:ro"
|
||||||
|
# "/var/lib/docker:/var/lib/docker:ro"
|
||||||
|
# "/dev/disk:/dev/disk:ro"
|
||||||
|
# ];
|
||||||
|
# devices = ["/dev/kmsg"];
|
||||||
|
# networks = [
|
||||||
|
# "internal"
|
||||||
|
# ];
|
||||||
|
# };
|
||||||
|
};
|
||||||
|
}
|
6
hosts/franz/arion/stats/arion-pkgs.nix
Normal file
6
hosts/franz/arion/stats/arion-pkgs.nix
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
# Instead of pinning Nixpkgs, we can opt to use the one in NIX_PATH
|
||||||
|
import <nixpkgs> {
|
||||||
|
# We specify the architecture explicitly. Use a Linux remote builder when
|
||||||
|
# calling arion from other platforms.
|
||||||
|
system = "x86_64-linux";
|
||||||
|
}
|
55
hosts/franz/arion/stats/default.nix
Normal file
55
hosts/franz/arion/stats/default.nix
Normal file
|
@ -0,0 +1,55 @@
|
||||||
|
{config, ...}: let
|
||||||
|
vars = import ../../../../vars.nix;
|
||||||
|
in {
|
||||||
|
virtualisation.arion = {
|
||||||
|
projects.stats.settings = {
|
||||||
|
imports = [./arion-compose.nix];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
sops.secrets."stats/oidc_client_id" = {
|
||||||
|
owner = vars.user;
|
||||||
|
};
|
||||||
|
sops.secrets."stats/oidc_client_secret" = {
|
||||||
|
owner = vars.user;
|
||||||
|
};
|
||||||
|
|
||||||
|
sops.templates."grafana.env" = {
|
||||||
|
path = "/home/${vars.user}/.docker/stats/grafana.env";
|
||||||
|
owner = vars.user;
|
||||||
|
mode = "0775";
|
||||||
|
content = ''
|
||||||
|
GF_AUTH_GENERIC_OAUTH_CLIENT_ID="${config.sops.placeholder."stats/oidc_client_id"}"
|
||||||
|
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET="${config.sops.placeholder."stats/oidc_client_secret"}"
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
systemd.services.add-loki-logging-driver = {
|
||||||
|
description = "Add grafana loki docker driver";
|
||||||
|
after = ["network.target"];
|
||||||
|
wantedBy = ["multi-user.target"];
|
||||||
|
|
||||||
|
serviceConfig.Type = "oneshot";
|
||||||
|
script = let
|
||||||
|
dockercli = "${config.virtualisation.docker.package}/bin/docker";
|
||||||
|
in ''
|
||||||
|
# Put a true at the end to prevent getting non-zero return code, which will
|
||||||
|
# crash the whole service.
|
||||||
|
check=$(${dockercli} plugin ls | grep "loki" || true)
|
||||||
|
if [ -z "$check" ]; then
|
||||||
|
${dockercli} plugin install grafana/loki-docker-driver:latest --alias loki --grant-all-permissions
|
||||||
|
else
|
||||||
|
echo "loki docker driver already exists in docker"
|
||||||
|
fi
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
virtualisation.docker.daemon.settings = {
|
||||||
|
debug = true;
|
||||||
|
log-driver = "loki";
|
||||||
|
log-opts = {
|
||||||
|
loki-url = "http://localhost:3100/loki/api/v1/push";
|
||||||
|
# loki-url = "http://host.docker.internal:3100/loki/api/v1/push";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
82
hosts/franz/arion/wiki/arion-compose.nix
Normal file
82
hosts/franz/arion/wiki/arion-compose.nix
Normal file
|
@ -0,0 +1,82 @@
|
||||||
|
{
|
||||||
|
project.name = "wiki";
|
||||||
|
|
||||||
|
networks.dmz = {
|
||||||
|
name = "dmz";
|
||||||
|
external = true;
|
||||||
|
};
|
||||||
|
networks.internal = {};
|
||||||
|
|
||||||
|
services = {
|
||||||
|
outline.service = {
|
||||||
|
image = "docker.getoutline.com/outlinewiki/outline:0.77.2";
|
||||||
|
container_name = "outline";
|
||||||
|
labels = {
|
||||||
|
"traefik.enable" = "true";
|
||||||
|
|
||||||
|
"traefik.http.services.outline.loadbalancer.server.port" = "3000";
|
||||||
|
"traefik.http.routers.outline.service" = "outline";
|
||||||
|
"traefik.http.routers.outline.rule" = "Host(`wiki.ghoscht.com`)";
|
||||||
|
"traefik.http.routers.outline.entrypoints" = "websecure";
|
||||||
|
"traefik.http.routers.outline.tls" = "true";
|
||||||
|
"traefik.http.routers.outline.tls.certresolver" = "letsencrypt";
|
||||||
|
};
|
||||||
|
environment = {
|
||||||
|
NODE_ENV = "production";
|
||||||
|
|
||||||
|
PGSSLMODE = "disable";
|
||||||
|
REDIS_URL = "redis://redis:6379";
|
||||||
|
|
||||||
|
URL = "https://wiki.ghoscht.com";
|
||||||
|
PORT = 3000;
|
||||||
|
|
||||||
|
OIDC_AUTH_URI = "https://auth.ghoscht.com/application/o/authorize/";
|
||||||
|
OIDC_TOKEN_URI = "https://auth.ghoscht.com/application/o/token/";
|
||||||
|
OIDC_USERINFO_URI = "https://auth.ghoscht.com/application/o/userinfo/";
|
||||||
|
|
||||||
|
AWS_REGION = "local";
|
||||||
|
AWS_S3_UPLOAD_BUCKET_URL = "https://files.ghoscht.com";
|
||||||
|
AWS_S3_UPLOAD_BUCKET_NAME = "outline";
|
||||||
|
AWS_S3_UPLOAD_MAX_SIZE = 26214400;
|
||||||
|
AWS_S3_FORCE_PATH_STYLE = "true";
|
||||||
|
AWS_S3_ACL = "private";
|
||||||
|
};
|
||||||
|
env_file = [
|
||||||
|
"/home/ghoscht/.docker/wiki/outline.env"
|
||||||
|
];
|
||||||
|
restart = "always";
|
||||||
|
depends_on = {
|
||||||
|
redis = {condition = "service_healthy";};
|
||||||
|
postgres = {condition = "service_healthy";};
|
||||||
|
};
|
||||||
|
networks = [
|
||||||
|
"dmz"
|
||||||
|
"internal"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
redis.service = {
|
||||||
|
image = "redis:7.2.4";
|
||||||
|
command = "--save 60 1 --loglevel warning";
|
||||||
|
restart = "always";
|
||||||
|
volumes = [
|
||||||
|
"/storage/dataset/docker/wiki/redis_data:/data"
|
||||||
|
];
|
||||||
|
networks = [
|
||||||
|
"internal"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
postgres.service = {
|
||||||
|
image = "postgres:12.18";
|
||||||
|
restart = "always";
|
||||||
|
volumes = [
|
||||||
|
"/storage/dataset/docker/wiki/postgres_data:/var/lib/postgresql/data"
|
||||||
|
];
|
||||||
|
networks = [
|
||||||
|
"internal"
|
||||||
|
];
|
||||||
|
env_file = [
|
||||||
|
"/home/ghoscht/.docker/wiki/postgres.env"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
6
hosts/franz/arion/wiki/arion-pkgs.nix
Normal file
6
hosts/franz/arion/wiki/arion-pkgs.nix
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
# Instead of pinning Nixpkgs, we can opt to use the one in NIX_PATH
|
||||||
|
import <nixpkgs> {
|
||||||
|
# We specify the architecture explicitly. Use a Linux remote builder when
|
||||||
|
# calling arion from other platforms.
|
||||||
|
system = "x86_64-linux";
|
||||||
|
}
|
67
hosts/franz/arion/wiki/default.nix
Normal file
67
hosts/franz/arion/wiki/default.nix
Normal file
|
@ -0,0 +1,67 @@
|
||||||
|
{config, ...}: let
|
||||||
|
vars = import ../../../../vars.nix;
|
||||||
|
in {
|
||||||
|
virtualisation.arion = {
|
||||||
|
projects.wiki.settings = {
|
||||||
|
imports = [./arion-compose.nix];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
sops.secrets."wiki/aws_access_key_id" = {
|
||||||
|
owner = vars.user;
|
||||||
|
};
|
||||||
|
sops.secrets."wiki/aws_secret_access_key" = {
|
||||||
|
owner = vars.user;
|
||||||
|
};
|
||||||
|
sops.secrets."wiki/oidc_client_id" = {
|
||||||
|
owner = vars.user;
|
||||||
|
};
|
||||||
|
sops.secrets."wiki/oidc_client_secret" = {
|
||||||
|
owner = vars.user;
|
||||||
|
};
|
||||||
|
sops.secrets."wiki/secret_key" = {
|
||||||
|
owner = vars.user;
|
||||||
|
};
|
||||||
|
sops.secrets."wiki/utils_secret" = {
|
||||||
|
owner = vars.user;
|
||||||
|
};
|
||||||
|
sops.secrets."wiki/db_user" = {
|
||||||
|
owner = vars.user;
|
||||||
|
};
|
||||||
|
sops.secrets."wiki/db_pass" = {
|
||||||
|
owner = vars.user;
|
||||||
|
};
|
||||||
|
sops.secrets."wiki/db_name" = {
|
||||||
|
owner = vars.user;
|
||||||
|
};
|
||||||
|
|
||||||
|
sops.templates."wiki-postgres.env" = {
|
||||||
|
path = "/home/${vars.user}/.docker/wiki/postgres.env";
|
||||||
|
owner = vars.user;
|
||||||
|
mode = "0775";
|
||||||
|
content = ''
|
||||||
|
POSTGRES_PASSWORD="${config.sops.placeholder."wiki/db_pass"}"
|
||||||
|
POSTGRES_USER="${config.sops.placeholder."wiki/db_user"}"
|
||||||
|
POSTGRES_DB="${config.sops.placeholder."wiki/db_name"}"
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
sops.templates."wiki-outline.env" = {
|
||||||
|
path = "/home/${vars.user}/.docker/wiki/outline.env";
|
||||||
|
owner = vars.user;
|
||||||
|
mode = "0775";
|
||||||
|
content = ''
|
||||||
|
SECRET_KEY="${config.sops.placeholder."wiki/secret_key"}"
|
||||||
|
UTILS_SECRET="${config.sops.placeholder."wiki/utils_secret"}"
|
||||||
|
|
||||||
|
OIDC_CLIENT_ID="${config.sops.placeholder."wiki/oidc_client_id"}"
|
||||||
|
OIDC_CLIENT_SECRET="${config.sops.placeholder."wiki/oidc_client_secret"}"
|
||||||
|
|
||||||
|
AWS_ACCESS_KEY_ID="${config.sops.placeholder."wiki/aws_access_key_id"}"
|
||||||
|
AWS_SECRET_ACCESS_KEY="${config.sops.placeholder."wiki/aws_secret_access_key"}"
|
||||||
|
|
||||||
|
DATABASE_URL="postgres://${config.sops.placeholder."wiki/db_user"}:${config.sops.placeholder."wiki/db_pass"}@postgres:5432/${config.sops.placeholder."wiki/db_name"}"
|
||||||
|
DATABASE_URL_TEST="postgres://${config.sops.placeholder."wiki/db_user"}:${config.sops.placeholder."wiki/db_pass"}@postgres:5432/${config.sops.placeholder."wiki/db_name"}"
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
}
|
|
@ -21,16 +21,17 @@ in {
|
||||||
../common/optional/systemd-boot.nix
|
../common/optional/systemd-boot.nix
|
||||||
../common/optional/gnome-keyring.nix
|
../common/optional/gnome-keyring.nix
|
||||||
../common/optional/docker.nix
|
../common/optional/docker.nix
|
||||||
../common/optional/vsftpd.nix
|
|
||||||
./sops.nix
|
./sops.nix
|
||||||
./restic.nix
|
./restic.nix
|
||||||
./arion
|
./arion
|
||||||
./hydra.nix
|
./hydra.nix
|
||||||
|
./samba.nix
|
||||||
];
|
];
|
||||||
|
|
||||||
# Enable ZFS
|
# Enable ZFS
|
||||||
boot.supportedFilesystems = ["zfs"];
|
boot.supportedFilesystems = ["zfs"];
|
||||||
networking.hostId = "f014fc43";
|
networking.hostId = "f014fc43";
|
||||||
|
services.zfs.autoScrub.enable = true;
|
||||||
|
|
||||||
systemd.enableEmergencyMode = false;
|
systemd.enableEmergencyMode = false;
|
||||||
networking.firewall.enable = true;
|
networking.firewall.enable = true;
|
||||||
|
|
|
@ -37,7 +37,7 @@ in {
|
||||||
services.cron = {
|
services.cron = {
|
||||||
enable = true;
|
enable = true;
|
||||||
systemCronJobs = [
|
systemCronJobs = [
|
||||||
"*/5 * * * * root . /etc/profile; autorestic -c /home/ghoscht/.autorestic.yml --ci cron"
|
"*/5 * * * * root . /etc/profile; autorestic -c /home/ghoscht/.autorestic.yml --ci cron > /var/log/autorestic-bin.log"
|
||||||
];
|
];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -52,6 +52,9 @@ in {
|
||||||
sops.secrets."autorestic/eustachius_key" = {
|
sops.secrets."autorestic/eustachius_key" = {
|
||||||
owner = vars.user;
|
owner = vars.user;
|
||||||
};
|
};
|
||||||
|
sops.secrets."autorestic/ntfy_access_token" = {
|
||||||
|
owner = vars.user;
|
||||||
|
};
|
||||||
|
|
||||||
sops.templates.".autorestic.yml" = {
|
sops.templates.".autorestic.yml" = {
|
||||||
path = "/home/${vars.user}/.autorestic.yml";
|
path = "/home/${vars.user}/.autorestic.yml";
|
||||||
|
@ -63,6 +66,15 @@ in {
|
||||||
forget:
|
forget:
|
||||||
keep-weekly: 7
|
keep-weekly: 7
|
||||||
keep-monthly: 12
|
keep-monthly: 12
|
||||||
|
|
||||||
|
extras:
|
||||||
|
default_hooks: &default_hooks
|
||||||
|
success:
|
||||||
|
- echo "Backup of $AUTORESTIC_LOCATION successful! Added $AUTORESTIC_FILES_ADDED_0 files and changed $AUTORESTIC_FILES_CHANGED_0 files with a total size of $AUTORESTIC_ADDED_SIZE_0. Processed $AUTORESTIC_PROCESSED_FILES_0 files with total size $AUTORESTIC_PROCESSED_SIZE_0 in $AUTORESTIC_PROCESSED_DURATION_0. Snapshot $AUTORESTIC_SNAPSHOT_ID_0" >> /var/log/autorestic-backup.log
|
||||||
|
failure:
|
||||||
|
- echo "Backup of $AUTORESTIC_LOCATION failed" >> /var/log/autorestic.log
|
||||||
|
- 'curl -H "Authorization: Bearer ${config.sops.placeholder."autorestic/ntfy_access_token"}" -H "X-Tags: warning" -H "X-Title: Backup Failure" -d "Backup of location $AUTORESTIC_LOCATION failed" https://push.ghoscht.com/autorestic'
|
||||||
|
|
||||||
locations:
|
locations:
|
||||||
dashboard:
|
dashboard:
|
||||||
from: /storage/dataset/docker/dashboard
|
from: /storage/dataset/docker/dashboard
|
||||||
|
@ -71,6 +83,7 @@ in {
|
||||||
- eustachius
|
- eustachius
|
||||||
cron: '0 4 * * 0' # Every Sunday at 4:00
|
cron: '0 4 * * 0' # Every Sunday at 4:00
|
||||||
hooks:
|
hooks:
|
||||||
|
<<: *default_hooks
|
||||||
before:
|
before:
|
||||||
- arion -f ${arionPath}/dashboard/arion-compose.nix -p ${arionPath}/dashboard/arion-pkgs.nix stop
|
- arion -f ${arionPath}/dashboard/arion-compose.nix -p ${arionPath}/dashboard/arion-pkgs.nix stop
|
||||||
after:
|
after:
|
||||||
|
@ -79,9 +92,11 @@ in {
|
||||||
from: /storage/dataset/docker/dns
|
from: /storage/dataset/docker/dns
|
||||||
to:
|
to:
|
||||||
- zfs
|
- zfs
|
||||||
|
- ssd
|
||||||
- eustachius
|
- eustachius
|
||||||
cron: '0 4 * * 0' # Every Sunday at 4:00
|
cron: '0 4 * * 0' # Every Sunday at 4:00
|
||||||
hooks:
|
hooks:
|
||||||
|
<<: *default_hooks
|
||||||
before:
|
before:
|
||||||
- arion -f ${arionPath}/dns/arion-compose.nix -p ${arionPath}/dns/arion-pkgs.nix stop
|
- arion -f ${arionPath}/dns/arion-compose.nix -p ${arionPath}/dns/arion-pkgs.nix stop
|
||||||
after:
|
after:
|
||||||
|
@ -93,6 +108,7 @@ in {
|
||||||
- eustachius
|
- eustachius
|
||||||
cron: '0 4 * * 0' # Every Sunday at 4:00
|
cron: '0 4 * * 0' # Every Sunday at 4:00
|
||||||
hooks:
|
hooks:
|
||||||
|
<<: *default_hooks
|
||||||
before:
|
before:
|
||||||
- arion -f ${arionPath}/feed/arion-compose.nix -p ${arionPath}/feed/arion-pkgs.nix stop
|
- arion -f ${arionPath}/feed/arion-compose.nix -p ${arionPath}/feed/arion-pkgs.nix stop
|
||||||
after:
|
after:
|
||||||
|
@ -105,6 +121,7 @@ in {
|
||||||
- eustachius
|
- eustachius
|
||||||
cron: '0 4 * * *' # Every Day at 4:00
|
cron: '0 4 * * *' # Every Day at 4:00
|
||||||
hooks:
|
hooks:
|
||||||
|
<<: *default_hooks
|
||||||
before:
|
before:
|
||||||
- arion -f ${arionPath}/git/arion-compose.nix -p ${arionPath}/git/arion-pkgs.nix stop
|
- arion -f ${arionPath}/git/arion-compose.nix -p ${arionPath}/git/arion-pkgs.nix stop
|
||||||
after:
|
after:
|
||||||
|
@ -113,9 +130,11 @@ in {
|
||||||
from: /storage/dataset/docker/media
|
from: /storage/dataset/docker/media
|
||||||
to:
|
to:
|
||||||
- zfs
|
- zfs
|
||||||
|
- ssd
|
||||||
- eustachius
|
- eustachius
|
||||||
cron: '0 4 * * *' # Every Day at 4:00
|
cron: '0 4 * * *' # Every Day at 4:00
|
||||||
hooks:
|
hooks:
|
||||||
|
<<: *default_hooks
|
||||||
before:
|
before:
|
||||||
- arion -f ${arionPath}/media/arion-compose.nix -p ${arionPath}/media/arion-pkgs.nix stop
|
- arion -f ${arionPath}/media/arion-compose.nix -p ${arionPath}/media/arion-pkgs.nix stop
|
||||||
after:
|
after:
|
||||||
|
@ -124,9 +143,11 @@ in {
|
||||||
from: /storage/dataset/docker/nextcloud
|
from: /storage/dataset/docker/nextcloud
|
||||||
to:
|
to:
|
||||||
- zfs
|
- zfs
|
||||||
|
- ssd
|
||||||
- eustachius
|
- eustachius
|
||||||
cron: '0 4 * * *' # Every Day at 4:00
|
cron: '0 4 * * *' # Every Day at 4:00
|
||||||
hooks:
|
hooks:
|
||||||
|
<<: *default_hooks
|
||||||
before:
|
before:
|
||||||
- arion -f ${arionPath}/nextcloud/arion-compose.nix -p ${arionPath}/nextcloud/arion-pkgs.nix stop
|
- arion -f ${arionPath}/nextcloud/arion-compose.nix -p ${arionPath}/nextcloud/arion-pkgs.nix stop
|
||||||
after:
|
after:
|
||||||
|
@ -138,6 +159,7 @@ in {
|
||||||
- eustachius
|
- eustachius
|
||||||
cron: '0 4 * * 0' # Every Sunday at 4:00
|
cron: '0 4 * * 0' # Every Sunday at 4:00
|
||||||
hooks:
|
hooks:
|
||||||
|
<<: *default_hooks
|
||||||
before:
|
before:
|
||||||
- arion -f ${arionPath}/smarthome/arion-compose.nix -p ${arionPath}/smarthome/arion-pkgs.nix stop
|
- arion -f ${arionPath}/smarthome/arion-compose.nix -p ${arionPath}/smarthome/arion-pkgs.nix stop
|
||||||
after:
|
after:
|
||||||
|
@ -150,6 +172,7 @@ in {
|
||||||
- eustachius
|
- eustachius
|
||||||
cron: '0 4 * * *' # Every Day at 4:00
|
cron: '0 4 * * *' # Every Day at 4:00
|
||||||
hooks:
|
hooks:
|
||||||
|
<<: *default_hooks
|
||||||
before:
|
before:
|
||||||
- arion -f ${arionPath}/passwords/arion-compose.nix -p ${arionPath}/passwords/arion-pkgs.nix stop
|
- arion -f ${arionPath}/passwords/arion-compose.nix -p ${arionPath}/passwords/arion-pkgs.nix stop
|
||||||
after:
|
after:
|
||||||
|
@ -161,6 +184,7 @@ in {
|
||||||
- eustachius
|
- eustachius
|
||||||
cron: '0 4 * * 0' # Every Sunday at 4:00
|
cron: '0 4 * * 0' # Every Sunday at 4:00
|
||||||
hooks:
|
hooks:
|
||||||
|
<<: *default_hooks
|
||||||
before:
|
before:
|
||||||
- arion -f ${arionPath}/matrix/arion-compose.nix -p ${arionPath}/matrix/arion-pkgs.nix stop
|
- arion -f ${arionPath}/matrix/arion-compose.nix -p ${arionPath}/matrix/arion-pkgs.nix stop
|
||||||
after:
|
after:
|
||||||
|
@ -171,6 +195,7 @@ in {
|
||||||
- eustachius
|
- eustachius
|
||||||
cron: '0 4 * * 0' # Every Sunday at 4:00
|
cron: '0 4 * * 0' # Every Sunday at 4:00
|
||||||
hooks:
|
hooks:
|
||||||
|
<<: *default_hooks
|
||||||
before:
|
before:
|
||||||
- arion -f ${arionPath}/media/arion-compose.nix -p ${arionPath}/media/arion-pkgs.nix stop
|
- arion -f ${arionPath}/media/arion-compose.nix -p ${arionPath}/media/arion-pkgs.nix stop
|
||||||
after:
|
after:
|
||||||
|
@ -181,6 +206,7 @@ in {
|
||||||
- zfs
|
- zfs
|
||||||
cron: '55 3 * * *' # Every Day at 3:55
|
cron: '55 3 * * *' # Every Day at 3:55
|
||||||
hooks:
|
hooks:
|
||||||
|
<<: *default_hooks
|
||||||
before:
|
before:
|
||||||
- arion -f ${arionPath}/headscale/arion-compose.nix -p ${arionPath}/headscale/arion-pkgs.nix stop
|
- arion -f ${arionPath}/headscale/arion-compose.nix -p ${arionPath}/headscale/arion-pkgs.nix stop
|
||||||
after:
|
after:
|
||||||
|
@ -189,9 +215,11 @@ in {
|
||||||
from: /storage/dataset/docker/auth
|
from: /storage/dataset/docker/auth
|
||||||
to:
|
to:
|
||||||
- zfs
|
- zfs
|
||||||
|
- ssd
|
||||||
- eustachius
|
- eustachius
|
||||||
cron: '55 3 * * *' # Every Day at 3:55
|
cron: '55 3 * * *' # Every Day at 3:55
|
||||||
hooks:
|
hooks:
|
||||||
|
<<: *default_hooks
|
||||||
before:
|
before:
|
||||||
- arion -f ${arionPath}/auth/arion-compose.nix -p ${arionPath}/auth/arion-pkgs.nix stop
|
- arion -f ${arionPath}/auth/arion-compose.nix -p ${arionPath}/auth/arion-pkgs.nix stop
|
||||||
after:
|
after:
|
||||||
|
@ -207,7 +235,7 @@ in {
|
||||||
key: '${config.sops.placeholder."autorestic/ssd_key"}'
|
key: '${config.sops.placeholder."autorestic/ssd_key"}'
|
||||||
eustachius:
|
eustachius:
|
||||||
type: rest
|
type: rest
|
||||||
path: http://100.64.0.3:8000/Backups
|
path: http://100.64.0.3:8000/franz
|
||||||
key: '${config.sops.placeholder."autorestic/eustachius_key"}'
|
key: '${config.sops.placeholder."autorestic/eustachius_key"}'
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
57
hosts/franz/samba.nix
Normal file
57
hosts/franz/samba.nix
Normal file
|
@ -0,0 +1,57 @@
|
||||||
|
{
|
||||||
|
services.samba = {
|
||||||
|
enable = true;
|
||||||
|
securityType = "user";
|
||||||
|
openFirewall = true;
|
||||||
|
extraConfig = ''
|
||||||
|
workgroup = WORKGROUP
|
||||||
|
server string = franz
|
||||||
|
netbios name = franz
|
||||||
|
security = user
|
||||||
|
#use sendfile = yes
|
||||||
|
#max protocol = smb2
|
||||||
|
# note: localhost is the ipv6 localhost ::1
|
||||||
|
hosts allow = 192.168.178. 127.0.0.1 localhost
|
||||||
|
hosts deny = 0.0.0.0/0
|
||||||
|
guest account = nobody
|
||||||
|
map to guest = bad user
|
||||||
|
|
||||||
|
# debugging
|
||||||
|
# log file = /var/log/samba/log.%m
|
||||||
|
# max log size = 1000
|
||||||
|
# logging = file
|
||||||
|
'';
|
||||||
|
|
||||||
|
# Run sudo smbpasswd -a <username> to set the smb password for an EXISTING linux user
|
||||||
|
shares = {
|
||||||
|
software = {
|
||||||
|
path = "/storage/dataset/data/torrents/misc";
|
||||||
|
browseable = "yes";
|
||||||
|
"read only" = "yes";
|
||||||
|
"guest ok" = "no";
|
||||||
|
"create mask" = "0644";
|
||||||
|
"directory mask" = "0755";
|
||||||
|
"force user" = "ghoscht";
|
||||||
|
"force group" = "users";
|
||||||
|
};
|
||||||
|
max = {
|
||||||
|
path = "/storage/dataset/nas/max";
|
||||||
|
browseable = "yes";
|
||||||
|
"read only" = "no";
|
||||||
|
"guest ok" = "no";
|
||||||
|
"valid users" = "max";
|
||||||
|
"create mask" = "0644";
|
||||||
|
"directory mask" = "0755";
|
||||||
|
"force user" = "ghoscht";
|
||||||
|
"force group" = "users";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
services.samba-wsdd = {
|
||||||
|
enable = true;
|
||||||
|
openFirewall = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
networking.firewall.allowPing = true;
|
||||||
|
}
|
|
@ -1,114 +0,0 @@
|
||||||
version: '3'
|
|
||||||
services:
|
|
||||||
traefik:
|
|
||||||
image: traefik
|
|
||||||
container_name: traefik
|
|
||||||
restart: always
|
|
||||||
ports:
|
|
||||||
- "80:80"
|
|
||||||
- "443:443"
|
|
||||||
- "6666:8080"
|
|
||||||
volumes:
|
|
||||||
- ./traefik_data:/etc/traefik
|
|
||||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
|
||||||
networks:
|
|
||||||
traefik_net:
|
|
||||||
labels:
|
|
||||||
- traefik.enable=true
|
|
||||||
- traefik.http.routers.dashboard.rule=Host(`traefik.ghoscht.com`)
|
|
||||||
- traefik.http.routers.dashboard.entrypoints=websecure
|
|
||||||
- traefik.http.services.dashboard.loadbalancer.server.port=8080
|
|
||||||
- traefik.http.routers.dashboard.tls=true
|
|
||||||
- traefik.http.routers.dashboard.tls.certresolver=lencrypt
|
|
||||||
env_file:
|
|
||||||
- traefik.env
|
|
||||||
dns:
|
|
||||||
- 1.1.1.1
|
|
||||||
homarr:
|
|
||||||
container_name: homarr
|
|
||||||
image: ghcr.io/ajnart/homarr:latest
|
|
||||||
restart: always
|
|
||||||
volumes:
|
|
||||||
- ./homarr_data:/app/data/configs
|
|
||||||
- ./homarr_icons:/app/public/imgs
|
|
||||||
networks:
|
|
||||||
traefik_net:
|
|
||||||
labels:
|
|
||||||
- traefik.enable=true
|
|
||||||
- traefik.http.routers.homarr.entrypoints=websecure
|
|
||||||
- traefik.http.routers.homarr.rule=Host(`dashboard.ghoscht.com`)
|
|
||||||
- traefik.http.routers.homarr.tls=true
|
|
||||||
- traefik.http.routers.homarr.tls.certresolver=lencrypt
|
|
||||||
dns:
|
|
||||||
- 1.1.1.1
|
|
||||||
scrutiny:
|
|
||||||
container_name: scrutiny
|
|
||||||
image: ghcr.io/analogj/scrutiny:master-omnibus
|
|
||||||
restart: always
|
|
||||||
cap_add:
|
|
||||||
- SYS_RAWIO
|
|
||||||
volumes:
|
|
||||||
- /run/udev:/run/udev:ro
|
|
||||||
- ./scrutiny_data:/opt/scrutiny/config
|
|
||||||
- ./scrutiny_db:/opt/scrutiny/influxdb
|
|
||||||
labels:
|
|
||||||
- traefik.enable=true
|
|
||||||
- traefik.http.routers.scrutiny.entrypoints=websecure
|
|
||||||
- traefik.http.routers.scrutiny.rule=Host(`scrutiny.ghoscht.com`)
|
|
||||||
- traefik.http.services.scrutiny.loadbalancer.server.port=8080
|
|
||||||
- traefik.http.routers.scrutiny.tls=true
|
|
||||||
- traefik.http.routers.scrutiny.tls.certresolver=lencrypt
|
|
||||||
networks:
|
|
||||||
traefik_net:
|
|
||||||
devices:
|
|
||||||
- "/dev/sda"
|
|
||||||
- "/dev/sdb"
|
|
||||||
ntfy:
|
|
||||||
image: binwiederhier/ntfy
|
|
||||||
container_name: ntfy
|
|
||||||
command:
|
|
||||||
- serve
|
|
||||||
environment:
|
|
||||||
- TZ=UTC # optional: set desired timezone
|
|
||||||
user: 1000:1000 # optional: replace with your own user/group or uid/gid
|
|
||||||
volumes:
|
|
||||||
- ./ntfy_data/server.yml:/etc/ntfy/server.yml
|
|
||||||
labels:
|
|
||||||
- traefik.enable=true
|
|
||||||
- traefik.http.routers.ntfy.entrypoints=websecure
|
|
||||||
- traefik.http.routers.ntfy.rule=Host(`ntfy.ghoscht.com`,`ntfy.local.ghoscht.com`)
|
|
||||||
- traefik.http.routers.ntfy.tls=true
|
|
||||||
- traefik.http.routers.ntfy.tls.certresolver=lencrypt
|
|
||||||
networks:
|
|
||||||
traefik_net:
|
|
||||||
homeassistant:
|
|
||||||
container_name: homeassistant
|
|
||||||
image: "ghcr.io/home-assistant/home-assistant:stable"
|
|
||||||
volumes:
|
|
||||||
- /mnt/hdd/docker/home-assistant_data:/config
|
|
||||||
- /etc/localtime:/etc/localtime:ro
|
|
||||||
- /run/dbus:/run/dbus:ro
|
|
||||||
restart: unless-stopped
|
|
||||||
privileged: true
|
|
||||||
labels:
|
|
||||||
- traefik.enable=true
|
|
||||||
- traefik.http.routers.homeassistant.entrypoints=websecure
|
|
||||||
- traefik.http.routers.homeassistant.rule=Host(`home.ghoscht.com`,`home.local.ghoscht.com`)
|
|
||||||
- traefik.http.routers.homeassistant.tls=true
|
|
||||||
- traefik.http.routers.homeassistant.tls.certresolver=lencrypt
|
|
||||||
- traefik.http.services.homeassistant.loadbalancer.server.port=8123
|
|
||||||
networks:
|
|
||||||
traefik_net:
|
|
||||||
cloudflared:
|
|
||||||
container_name: cloudflared
|
|
||||||
image: cloudflare/cloudflared:latest
|
|
||||||
restart: always
|
|
||||||
command: tunnel --no-autoupdate --protocol http2 run
|
|
||||||
env_file:
|
|
||||||
- cloudflared.env
|
|
||||||
networks:
|
|
||||||
traefik_net:
|
|
||||||
networks:
|
|
||||||
traefik_net:
|
|
||||||
name: traefik-net
|
|
||||||
external: true
|
|
6
rsc/docker/franz/infrastructure/traefik_data/config.yml
Normal file
6
rsc/docker/franz/infrastructure/traefik_data/config.yml
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
http:
|
||||||
|
middlewares:
|
||||||
|
crowdsec-bouncer:
|
||||||
|
forwardauth:
|
||||||
|
address: http://bouncer-traefik:8080/api/v1/forwardAuth
|
||||||
|
trustForwardHeader: true
|
|
@ -1,6 +0,0 @@
|
||||||
http:
|
|
||||||
middlewares:
|
|
||||||
httpsredirect:
|
|
||||||
redirectScheme:
|
|
||||||
scheme: https
|
|
||||||
permanent: true
|
|
|
@ -1,9 +0,0 @@
|
||||||
http:
|
|
||||||
routers:
|
|
||||||
redirecttohttps:
|
|
||||||
entryPoints:
|
|
||||||
- "web"
|
|
||||||
middlewares:
|
|
||||||
- "httpsredirect"
|
|
||||||
rule: "HostRegexp(`{host:.+}`)"
|
|
||||||
service: "noop@internal"
|
|
|
@ -1,363 +1,8 @@
|
||||||
# ntfy server config file
|
#Basics
|
||||||
#
|
|
||||||
# Please refer to the documentation at https://ntfy.sh/docs/config/ for details.
|
|
||||||
# All options also support underscores (_) instead of dashes (-) to comply with the YAML spec.
|
|
||||||
|
|
||||||
# Public facing base URL of the service (e.g. https://ntfy.sh or https://ntfy.example.com)
|
|
||||||
#
|
|
||||||
# This setting is required for any of the following features:
|
|
||||||
# - attachments (to return a download URL)
|
|
||||||
# - e-mail sending (for the topic URL in the email footer)
|
|
||||||
# - iOS push notifications for self-hosted servers (to calculate the Firebase poll_request topic)
|
|
||||||
# - Matrix Push Gateway (to validate that the pushkey is correct)
|
|
||||||
#
|
|
||||||
base-url: https://ntfy.ghoscht.com
|
base-url: https://ntfy.ghoscht.com
|
||||||
|
behind-proxy: true
|
||||||
# Listen address for the HTTP & HTTPS web server. If "listen-https" is set, you must also
|
#Auth
|
||||||
# set "key-file" and "cert-file". Format: [<ip>]:<port>, e.g. "1.2.3.4:8080".
|
auth-file: /etc/ntfy/data/user.db
|
||||||
#
|
# auth-default-access: "read-only"
|
||||||
# To listen on all interfaces, you may omit the IP address, e.g. ":443".
|
enable-signup: false
|
||||||
# To disable HTTP, set "listen-http" to "-".
|
enable-login: true
|
||||||
#
|
|
||||||
# listen-http: ":80"
|
|
||||||
# listen-https:
|
|
||||||
|
|
||||||
# Listen on a Unix socket, e.g. /var/lib/ntfy/ntfy.sock
|
|
||||||
# This can be useful to avoid port issues on local systems, and to simplify permissions.
|
|
||||||
#
|
|
||||||
# listen-unix: <socket-path>
|
|
||||||
# listen-unix-mode: <linux permissions, e.g. 0700>
|
|
||||||
|
|
||||||
# Path to the private key & cert file for the HTTPS web server. Not used if "listen-https" is not set.
|
|
||||||
#
|
|
||||||
# key-file: <filename>
|
|
||||||
# cert-file: <filename>
|
|
||||||
|
|
||||||
# If set, also publish messages to a Firebase Cloud Messaging (FCM) topic for your app.
|
|
||||||
# This is optional and only required to save battery when using the Android app.
|
|
||||||
#
|
|
||||||
# firebase-key-file: <filename>
|
|
||||||
|
|
||||||
# If "cache-file" is set, messages are cached in a local SQLite database instead of only in-memory.
|
|
||||||
# This allows for service restarts without losing messages in support of the since= parameter.
|
|
||||||
#
|
|
||||||
# The "cache-duration" parameter defines the duration for which messages will be buffered
|
|
||||||
# before they are deleted. This is required to support the "since=..." and "poll=1" parameter.
|
|
||||||
# To disable the cache entirely (on-disk/in-memory), set "cache-duration" to 0.
|
|
||||||
# The cache file is created automatically, provided that the correct permissions are set.
|
|
||||||
#
|
|
||||||
# The "cache-startup-queries" parameter allows you to run commands when the database is initialized,
|
|
||||||
# e.g. to enable WAL mode (see https://phiresky.github.io/blog/2020/sqlite-performance-tuning/)).
|
|
||||||
# Example:
|
|
||||||
# cache-startup-queries: |
|
|
||||||
# pragma journal_mode = WAL;
|
|
||||||
# pragma synchronous = normal;
|
|
||||||
# pragma temp_store = memory;
|
|
||||||
# pragma busy_timeout = 15000;
|
|
||||||
# vacuum;
|
|
||||||
#
|
|
||||||
# The "cache-batch-size" and "cache-batch-timeout" parameter allow enabling async batch writing
|
|
||||||
# of messages. If set, messages will be queued and written to the database in batches of the given
|
|
||||||
# size, or after the given timeout. This is only required for high volume servers.
|
|
||||||
#
|
|
||||||
# Debian/RPM package users:
|
|
||||||
# Use /var/cache/ntfy/cache.db as cache file to avoid permission issues. The package
|
|
||||||
# creates this folder for you.
|
|
||||||
#
|
|
||||||
# Check your permissions:
|
|
||||||
# If you are running ntfy with systemd, make sure this cache file is owned by the
|
|
||||||
# ntfy user and group by running: chown ntfy.ntfy <filename>.
|
|
||||||
#
|
|
||||||
# cache-file: <filename>
|
|
||||||
# cache-duration: "12h"
|
|
||||||
# cache-startup-queries:
|
|
||||||
# cache-batch-size: 0
|
|
||||||
# cache-batch-timeout: "0ms"
|
|
||||||
|
|
||||||
# If set, access to the ntfy server and API can be controlled on a granular level using
|
|
||||||
# the 'ntfy user' and 'ntfy access' commands. See the --help pages for details, or check the docs.
|
|
||||||
#
|
|
||||||
# - auth-file is the SQLite user/access database; it is created automatically if it doesn't already exist
|
|
||||||
# - auth-default-access defines the default/fallback access if no access control entry is found; it can be
|
|
||||||
# set to "read-write" (default), "read-only", "write-only" or "deny-all".
|
|
||||||
# - auth-startup-queries allows you to run commands when the database is initialized, e.g. to enable
|
|
||||||
# WAL mode. This is similar to cache-startup-queries. See above for details.
|
|
||||||
#
|
|
||||||
# Debian/RPM package users:
|
|
||||||
# Use /var/lib/ntfy/user.db as user database to avoid permission issues. The package
|
|
||||||
# creates this folder for you.
|
|
||||||
#
|
|
||||||
# Check your permissions:
|
|
||||||
# If you are running ntfy with systemd, make sure this user database file is owned by the
|
|
||||||
# ntfy user and group by running: chown ntfy.ntfy <filename>.
|
|
||||||
#
|
|
||||||
# auth-file: <filename>
|
|
||||||
# auth-default-access: "read-write"
|
|
||||||
# auth-startup-queries:
|
|
||||||
|
|
||||||
# If set, the X-Forwarded-For header is used to determine the visitor IP address
|
|
||||||
# instead of the remote address of the connection.
|
|
||||||
#
|
|
||||||
# WARNING: If you are behind a proxy, you must set this, otherwise all visitors are rate limited
|
|
||||||
# as if they are one.
|
|
||||||
#
|
|
||||||
# behind-proxy: false
|
|
||||||
|
|
||||||
# If enabled, clients can attach files to notifications as attachments. Minimum settings to enable attachments
|
|
||||||
# are "attachment-cache-dir" and "base-url".
|
|
||||||
#
|
|
||||||
# - attachment-cache-dir is the cache directory for attached files
|
|
||||||
# - attachment-total-size-limit is the limit of the on-disk attachment cache directory (total size)
|
|
||||||
# - attachment-file-size-limit is the per-file attachment size limit (e.g. 300k, 2M, 100M)
|
|
||||||
# - attachment-expiry-duration is the duration after which uploaded attachments will be deleted (e.g. 3h, 20h)
|
|
||||||
#
|
|
||||||
# attachment-cache-dir:
|
|
||||||
# attachment-total-size-limit: "5G"
|
|
||||||
# attachment-file-size-limit: "15M"
|
|
||||||
# attachment-expiry-duration: "3h"
|
|
||||||
|
|
||||||
# If enabled, allow outgoing e-mail notifications via the 'X-Email' header. If this header is set,
|
|
||||||
# messages will additionally be sent out as e-mail using an external SMTP server.
|
|
||||||
#
|
|
||||||
# As of today, only SMTP servers with plain text auth (or no auth at all), and STARTLS are supported.
|
|
||||||
# Please also refer to the rate limiting settings below (visitor-email-limit-burst & visitor-email-limit-burst).
|
|
||||||
#
|
|
||||||
# - smtp-sender-addr is the hostname:port of the SMTP server
|
|
||||||
# - smtp-sender-from is the e-mail address of the sender
|
|
||||||
# - smtp-sender-user/smtp-sender-pass are the username and password of the SMTP user (leave blank for no auth)
|
|
||||||
#
|
|
||||||
# smtp-sender-addr:
|
|
||||||
# smtp-sender-from:
|
|
||||||
# smtp-sender-user:
|
|
||||||
# smtp-sender-pass:
|
|
||||||
|
|
||||||
# If enabled, ntfy will launch a lightweight SMTP server for incoming messages. Once configured, users can send
|
|
||||||
# emails to a topic e-mail address to publish messages to a topic.
|
|
||||||
#
|
|
||||||
# - smtp-server-listen defines the IP address and port the SMTP server will listen on, e.g. :25 or 1.2.3.4:25
|
|
||||||
# - smtp-server-domain is the e-mail domain, e.g. ntfy.sh
|
|
||||||
# - smtp-server-addr-prefix is an optional prefix for the e-mail addresses to prevent spam. If set to "ntfy-",
|
|
||||||
# for instance, only e-mails to ntfy-$topic@ntfy.sh will be accepted. If this is not set, all emails to
|
|
||||||
# $topic@ntfy.sh will be accepted (which may obviously be a spam problem).
|
|
||||||
#
|
|
||||||
# smtp-server-listen:
|
|
||||||
# smtp-server-domain:
|
|
||||||
# smtp-server-addr-prefix:
|
|
||||||
|
|
||||||
# Web Push support (background notifications for browsers)
|
|
||||||
#
|
|
||||||
# If enabled, allows ntfy to receive push notifications, even when the ntfy web app is closed. When enabled, users
|
|
||||||
# can enable background notifications in the web app. Once enabled, ntfy will forward published messages to the push
|
|
||||||
# endpoint, which will then forward it to the browser.
|
|
||||||
#
|
|
||||||
# You must configure web-push-public/private key, web-push-file, and web-push-email-address below to enable Web Push.
|
|
||||||
# Run "ntfy webpush keys" to generate the keys.
|
|
||||||
#
|
|
||||||
# - web-push-public-key is the generated VAPID public key, e.g. AA1234BBCCddvveekaabcdfqwertyuiopasdfghjklzxcvbnm1234567890
|
|
||||||
# - web-push-private-key is the generated VAPID private key, e.g. AA2BB1234567890abcdefzxcvbnm1234567890
|
|
||||||
# - web-push-file is a database file to keep track of browser subscription endpoints, e.g. `/var/cache/ntfy/webpush.db`
|
|
||||||
# - web-push-email-address is the admin email address send to the push provider, e.g. `sysadmin@example.com`
|
|
||||||
# - web-push-startup-queries is an optional list of queries to run on startup`
|
|
||||||
#
|
|
||||||
# web-push-public-key:
|
|
||||||
# web-push-private-key:
|
|
||||||
# web-push-file:
|
|
||||||
# web-push-email-address:
|
|
||||||
# web-push-startup-queries:
|
|
||||||
|
|
||||||
# If enabled, ntfy can perform voice calls via Twilio via the "X-Call" header.
|
|
||||||
#
|
|
||||||
# - twilio-account is the Twilio account SID, e.g. AC12345beefbeef67890beefbeef122586
|
|
||||||
# - twilio-auth-token is the Twilio auth token, e.g. affebeef258625862586258625862586
|
|
||||||
# - twilio-phone-number is the outgoing phone number you purchased, e.g. +18775132586
|
|
||||||
# - twilio-verify-service is the Twilio Verify service SID, e.g. VA12345beefbeef67890beefbeef122586
|
|
||||||
#
|
|
||||||
# twilio-account:
|
|
||||||
# twilio-auth-token:
|
|
||||||
# twilio-phone-number:
|
|
||||||
# twilio-verify-service:
|
|
||||||
|
|
||||||
# Interval in which keepalive messages are sent to the client. This is to prevent
|
|
||||||
# intermediaries closing the connection for inactivity.
|
|
||||||
#
|
|
||||||
# Note that the Android app has a hardcoded timeout at 77s, so it should be less than that.
|
|
||||||
#
|
|
||||||
# keepalive-interval: "45s"
|
|
||||||
|
|
||||||
# Interval in which the manager prunes old messages, deletes topics
|
|
||||||
# and prints the stats.
|
|
||||||
#
|
|
||||||
# manager-interval: "1m"
|
|
||||||
|
|
||||||
# Defines topic names that are not allowed, because they are otherwise used. There are a few default topics
|
|
||||||
# that cannot be used (e.g. app, account, settings, ...). To extend the default list, define them here.
|
|
||||||
#
|
|
||||||
# Example:
|
|
||||||
# disallowed-topics:
|
|
||||||
# - about
|
|
||||||
# - pricing
|
|
||||||
# - contact
|
|
||||||
#
|
|
||||||
# disallowed-topics:
|
|
||||||
|
|
||||||
# Defines the root path of the web app, or disables the web app entirely.
|
|
||||||
#
|
|
||||||
# Can be any simple path, e.g. "/", "/app", or "/ntfy". For backwards-compatibility reasons,
|
|
||||||
# the values "app" (maps to "/"), "home" (maps to "/app"), or "disable" (maps to "") to disable
|
|
||||||
# the web app entirely.
|
|
||||||
#
|
|
||||||
# web-root: /
|
|
||||||
|
|
||||||
# Various feature flags used to control the web app, and API access, mainly around user and
|
|
||||||
# account management.
|
|
||||||
#
|
|
||||||
# - enable-signup allows users to sign up via the web app, or API
|
|
||||||
# - enable-login allows users to log in via the web app, or API
|
|
||||||
# - enable-reservations allows users to reserve topics (if their tier allows it)
|
|
||||||
#
|
|
||||||
# enable-signup: false
|
|
||||||
# enable-login: false
|
|
||||||
# enable-reservations: false
|
|
||||||
|
|
||||||
# Server URL of a Firebase/APNS-connected ntfy server (likely "https://ntfy.sh").
|
|
||||||
#
|
|
||||||
# iOS users:
|
|
||||||
# If you use the iOS ntfy app, you MUST configure this to receive timely notifications. You'll like want this:
|
|
||||||
# upstream-base-url: "https://ntfy.sh"
|
|
||||||
#
|
|
||||||
# If set, all incoming messages will publish a "poll_request" message to the configured upstream server, containing
|
|
||||||
# the message ID of the original message, instructing the iOS app to poll this server for the actual message contents.
|
|
||||||
# This is to prevent the upstream server and Firebase/APNS from being able to read the message.
|
|
||||||
#
|
|
||||||
# - upstream-base-url is the base URL of the upstream server. Should be "https://ntfy.sh".
|
|
||||||
# - upstream-access-token is the token used to authenticate with the upstream server. This is only required
|
|
||||||
# if you exceed the upstream rate limits, or the uptream server requires authentication.
|
|
||||||
#
|
|
||||||
# upstream-base-url:
|
|
||||||
# upstream-access-token:
|
|
||||||
|
|
||||||
# Rate limiting: Total number of topics before the server rejects new topics.
|
|
||||||
#
|
|
||||||
# global-topic-limit: 15000
|
|
||||||
|
|
||||||
# Rate limiting: Number of subscriptions per visitor (IP address)
|
|
||||||
#
|
|
||||||
# visitor-subscription-limit: 30
|
|
||||||
|
|
||||||
# Rate limiting: Allowed GET/PUT/POST requests per second, per visitor:
|
|
||||||
# - visitor-request-limit-burst is the initial bucket of requests each visitor has
|
|
||||||
# - visitor-request-limit-replenish is the rate at which the bucket is refilled
|
|
||||||
# - visitor-request-limit-exempt-hosts is a comma-separated list of hostnames, IPs or CIDRs to be
|
|
||||||
# exempt from request rate limiting. Hostnames are resolved at the time the server is started.
|
|
||||||
# Example: "1.2.3.4,ntfy.example.com,8.7.6.0/24"
|
|
||||||
#
|
|
||||||
# visitor-request-limit-burst: 60
|
|
||||||
# visitor-request-limit-replenish: "5s"
|
|
||||||
# visitor-request-limit-exempt-hosts: ""
|
|
||||||
|
|
||||||
# Rate limiting: Hard daily limit of messages per visitor and day. The limit is reset
|
|
||||||
# every day at midnight UTC. If the limit is not set (or set to zero), the request
|
|
||||||
# limit (see above) governs the upper limit.
|
|
||||||
#
|
|
||||||
# visitor-message-daily-limit: 0
|
|
||||||
|
|
||||||
# Rate limiting: Allowed emails per visitor:
|
|
||||||
# - visitor-email-limit-burst is the initial bucket of emails each visitor has
|
|
||||||
# - visitor-email-limit-replenish is the rate at which the bucket is refilled
|
|
||||||
#
|
|
||||||
# visitor-email-limit-burst: 16
|
|
||||||
# visitor-email-limit-replenish: "1h"
|
|
||||||
|
|
||||||
# Rate limiting: Attachment size and bandwidth limits per visitor:
|
|
||||||
# - visitor-attachment-total-size-limit is the total storage limit used for attachments per visitor
|
|
||||||
# - visitor-attachment-daily-bandwidth-limit is the total daily attachment download/upload traffic limit per visitor
|
|
||||||
#
|
|
||||||
# visitor-attachment-total-size-limit: "100M"
|
|
||||||
# visitor-attachment-daily-bandwidth-limit: "500M"
|
|
||||||
|
|
||||||
# Rate limiting: Enable subscriber-based rate limiting (mostly used for UnifiedPush)
|
|
||||||
#
|
|
||||||
# If enabled, subscribers may opt to have published messages counted against their own rate limits, as opposed
|
|
||||||
# to the publisher's rate limits. This is especially useful to increase the amount of messages that high-volume
|
|
||||||
# publishers (e.g. Matrix/Mastodon servers) are allowed to send.
|
|
||||||
#
|
|
||||||
# Once enabled, a client may send a "Rate-Topics: <topic1>,<topic2>,..." header when subscribing to topics via
|
|
||||||
# HTTP stream, or websockets, thereby registering itself as the "rate visitor", i.e. the visitor whose rate limits
|
|
||||||
# to use when publishing on this topic. Note: Setting the rate visitor requires READ-WRITE permission on the topic.
|
|
||||||
#
|
|
||||||
# UnifiedPush only: If this setting is enabled, publishing to UnifiedPush topics will lead to a HTTP 507 response if
|
|
||||||
# no "rate visitor" has been previously registered. This is to avoid burning the publisher's "visitor-message-daily-limit".
|
|
||||||
#
|
|
||||||
# visitor-subscriber-rate-limiting: false
|
|
||||||
|
|
||||||
# Payments integration via Stripe
|
|
||||||
#
|
|
||||||
# - stripe-secret-key is the key used for the Stripe API communication. Setting this values
|
|
||||||
# enables payments in the ntfy web app (e.g. Upgrade dialog). See https://dashboard.stripe.com/apikeys.
|
|
||||||
# - stripe-webhook-key is the key required to validate the authenticity of incoming webhooks from Stripe.
|
|
||||||
# Webhooks are essential up keep the local database in sync with the payment provider. See https://dashboard.stripe.com/webhooks.
|
|
||||||
# - billing-contact is an email address or website displayed in the "Upgrade tier" dialog to let people reach
|
|
||||||
# out with billing questions. If unset, nothing will be displayed.
|
|
||||||
#
|
|
||||||
# stripe-secret-key:
|
|
||||||
# stripe-webhook-key:
|
|
||||||
# billing-contact:
|
|
||||||
|
|
||||||
# Metrics
|
|
||||||
#
|
|
||||||
# ntfy can expose Prometheus-style metrics via a /metrics endpoint, or on a dedicated listen IP/port.
|
|
||||||
# Metrics may be considered sensitive information, so before you enable them, be sure you know what you are
|
|
||||||
# doing, and/or secure access to the endpoint in your reverse proxy.
|
|
||||||
#
|
|
||||||
# - enable-metrics enables the /metrics endpoint for the default ntfy server (i.e. HTTP, HTTPS and/or Unix socket)
|
|
||||||
# - metrics-listen-http exposes the metrics endpoint via a dedicated [IP]:port. If set, this option implicitly
|
|
||||||
# enables metrics as well, e.g. "10.0.1.1:9090" or ":9090"
|
|
||||||
#
|
|
||||||
# enable-metrics: false
|
|
||||||
# metrics-listen-http:
|
|
||||||
|
|
||||||
# Profiling
|
|
||||||
#
|
|
||||||
# ntfy can expose Go's net/http/pprof endpoints to support profiling of the ntfy server. If enabled, ntfy will listen
|
|
||||||
# on a dedicated listen IP/port, which can be accessed via the web browser on http://<ip>:<port>/debug/pprof/.
|
|
||||||
# This can be helpful to expose bottlenecks, and visualize call flows. See https://pkg.go.dev/net/http/pprof for details.
|
|
||||||
#
|
|
||||||
# profile-listen-http:
|
|
||||||
|
|
||||||
# Logging options
|
|
||||||
#
|
|
||||||
# By default, ntfy logs to the console (stderr), with an "info" log level, and in a human-readable text format.
|
|
||||||
# ntfy supports five different log levels, can also write to a file, log as JSON, and even supports granular
|
|
||||||
# log level overrides for easier debugging. Some options (log-level and log-level-overrides) can be hot reloaded
|
|
||||||
# by calling "kill -HUP $pid" or "systemctl reload ntfy".
|
|
||||||
#
|
|
||||||
# - log-format defines the output format, can be "text" (default) or "json"
|
|
||||||
# - log-file is a filename to write logs to. If this is not set, ntfy logs to stderr.
|
|
||||||
# - log-level defines the default log level, can be one of "trace", "debug", "info" (default), "warn" or "error".
|
|
||||||
# Be aware that "debug" (and particularly "trace") can be VERY CHATTY. Only turn them on briefly for debugging purposes.
|
|
||||||
# - log-level-overrides lets you override the log level if certain fields match. This is incredibly powerful
|
|
||||||
# for debugging certain parts of the system (e.g. only the account management, or only a certain visitor).
|
|
||||||
# This is an array of strings in the format:
|
|
||||||
# - "field=value -> level" to match a value exactly, e.g. "tag=manager -> trace"
|
|
||||||
# - "field -> level" to match any value, e.g. "time_taken_ms -> debug"
|
|
||||||
# Warning: Using log-level-overrides has a performance penalty. Only use it for temporary debugging.
|
|
||||||
#
|
|
||||||
# Check your permissions:
|
|
||||||
# If you are running ntfy with systemd, make sure this log file is owned by the
|
|
||||||
# ntfy user and group by running: chown ntfy.ntfy <filename>.
|
|
||||||
#
|
|
||||||
# Example (good for production):
|
|
||||||
# log-level: info
|
|
||||||
# log-format: json
|
|
||||||
# log-file: /var/log/ntfy.log
|
|
||||||
#
|
|
||||||
# Example level overrides (for debugging, only use temporarily):
|
|
||||||
# log-level-overrides:
|
|
||||||
# - "tag=manager -> trace"
|
|
||||||
# - "visitor_ip=1.2.3.4 -> debug"
|
|
||||||
# - "time_taken_ms -> debug"
|
|
||||||
#
|
|
||||||
# log-level: info
|
|
||||||
# log-level-overrides:
|
|
||||||
# log-format: text
|
|
||||||
# log-file:
|
|
||||||
|
|
|
@ -31,6 +31,7 @@ autorestic:
|
||||||
zfs_key: ENC[AES256_GCM,data:HyZBD202BoG6ncw37Tg9LPvfvQPnOaLJKk+gMvdZflt+XZ/7lx6TZOp/loiDhSSBTMusAXaI/aDkAFx2a7yDUQ==,iv:nQAHi9TyUXamSlFq99NYvWLOBSuZstuYNJLgVpxF1JU=,tag:mIS/E4Wr6IdWsZtehNY7UA==,type:str]
|
zfs_key: ENC[AES256_GCM,data:HyZBD202BoG6ncw37Tg9LPvfvQPnOaLJKk+gMvdZflt+XZ/7lx6TZOp/loiDhSSBTMusAXaI/aDkAFx2a7yDUQ==,iv:nQAHi9TyUXamSlFq99NYvWLOBSuZstuYNJLgVpxF1JU=,tag:mIS/E4Wr6IdWsZtehNY7UA==,type:str]
|
||||||
ssd_key: ENC[AES256_GCM,data:xgJCpNkmIn8VU+jG++0kLW8WM9RbTBmsZeOuOz1WWmc4sOdN4lWfPvLjcTAHZDIXFvX7NodEcGAYDmcWNw7QBw==,iv:wGJcz7CEjhwsUlVEyuHOBcayzE97PfWi2f0TvITzafg=,tag:wpaJFcQBd/kAmExfD6fwJQ==,type:str]
|
ssd_key: ENC[AES256_GCM,data:xgJCpNkmIn8VU+jG++0kLW8WM9RbTBmsZeOuOz1WWmc4sOdN4lWfPvLjcTAHZDIXFvX7NodEcGAYDmcWNw7QBw==,iv:wGJcz7CEjhwsUlVEyuHOBcayzE97PfWi2f0TvITzafg=,tag:wpaJFcQBd/kAmExfD6fwJQ==,type:str]
|
||||||
eustachius_key: ENC[AES256_GCM,data:qiq6Y05bV7mf0OOBDzR09MrW5g01WxmWVHB3vJ04XQaOVMGzl7hZq0ewcLOxitbFw3VcN5GQBpA8smlmahz8VA==,iv:epq7+tXG9QYAjNu8qHI2gjBYUuoPNdZg8+2XCLOwu1Q=,tag:qM8YdSZhwwM3GDrNPfo/Jg==,type:str]
|
eustachius_key: ENC[AES256_GCM,data:qiq6Y05bV7mf0OOBDzR09MrW5g01WxmWVHB3vJ04XQaOVMGzl7hZq0ewcLOxitbFw3VcN5GQBpA8smlmahz8VA==,iv:epq7+tXG9QYAjNu8qHI2gjBYUuoPNdZg8+2XCLOwu1Q=,tag:qM8YdSZhwwM3GDrNPfo/Jg==,type:str]
|
||||||
|
ntfy_access_token: ENC[AES256_GCM,data:BH1/tNYDj2ggzdNByQDYT0cu9hGTgaGEjXUv4HdqO1M=,iv:nq+frAIoNr8uCwGadOqdAP90kjukVTSq3Tc8hWbCi3Q=,tag:r92qKR78keQVgI/VMXipvw==,type:str]
|
||||||
matrix:
|
matrix:
|
||||||
postgres_database: ENC[AES256_GCM,data:9O0vYjbTuQ==,iv:L5QCwhFSjPW0OiUMjCQo6BcLktUXJcqTsTXEi5JdaWo=,tag:LUPRSZl0pza5WOWI8RrAmw==,type:str]
|
postgres_database: ENC[AES256_GCM,data:9O0vYjbTuQ==,iv:L5QCwhFSjPW0OiUMjCQo6BcLktUXJcqTsTXEi5JdaWo=,tag:LUPRSZl0pza5WOWI8RrAmw==,type:str]
|
||||||
postgres_user: ENC[AES256_GCM,data:S9ksmTOAbBg=,iv:q/6Oo9JhiSAqQq3ZKa0dbQGtfYAuD0oeiDLR4YwV0nk=,tag:RIc/1UVs88Jg8+4zGnW6vQ==,type:str]
|
postgres_user: ENC[AES256_GCM,data:S9ksmTOAbBg=,iv:q/6Oo9JhiSAqQq3ZKa0dbQGtfYAuD0oeiDLR4YwV0nk=,tag:RIc/1UVs88Jg8+4zGnW6vQ==,type:str]
|
||||||
|
@ -45,6 +46,26 @@ auth:
|
||||||
homarr:
|
homarr:
|
||||||
oidc_client_secret: ENC[AES256_GCM,data:ykaMgcS1x/sMFPmi9vF8RdS7Dj8tTpNFybqwJ5MkK3OCIqYt5FtY8si7ZbKC4IMquOA4w3fWpHdygvFJwJOyNNvznWuasR1afhaAHIHb85J41GWCpMLWWZub+NUuU2pSudvUYk9LeDUBTKwtfHgr4DUzoQeBocG0httGFKBAXbo=,iv:vThB7ZCgEB5yQoiOYhDcHiGm0lYXy1LCJWunH5HwFq0=,tag:68jkMBnCc2e3bKWR/Hnnww==,type:str]
|
oidc_client_secret: ENC[AES256_GCM,data:ykaMgcS1x/sMFPmi9vF8RdS7Dj8tTpNFybqwJ5MkK3OCIqYt5FtY8si7ZbKC4IMquOA4w3fWpHdygvFJwJOyNNvznWuasR1afhaAHIHb85J41GWCpMLWWZub+NUuU2pSudvUYk9LeDUBTKwtfHgr4DUzoQeBocG0httGFKBAXbo=,iv:vThB7ZCgEB5yQoiOYhDcHiGm0lYXy1LCJWunH5HwFq0=,tag:68jkMBnCc2e3bKWR/Hnnww==,type:str]
|
||||||
oidc_client_id: ENC[AES256_GCM,data:2KxgJ7rFNru7rf8P9v/LOcA7TjH2ZFerc4PBmetrkB7hre9fHTa+TQ==,iv:9k0YuPNzEjTTBN0l/oyT5mtZKLCGWZ7ZJpE8g2SBu3E=,tag:C/hzffeOVgke1SQZHPjyrA==,type:str]
|
oidc_client_id: ENC[AES256_GCM,data:2KxgJ7rFNru7rf8P9v/LOcA7TjH2ZFerc4PBmetrkB7hre9fHTa+TQ==,iv:9k0YuPNzEjTTBN0l/oyT5mtZKLCGWZ7ZJpE8g2SBu3E=,tag:C/hzffeOVgke1SQZHPjyrA==,type:str]
|
||||||
|
minio:
|
||||||
|
root_user: ENC[AES256_GCM,data:Q5yRACtvoQ==,iv:GTLtwwQ5W50w6eDO+PuihNAHWm6xyM9uNa8mbGG3tWI=,tag:O3MUlh2d8iuFTPRq1PvTWw==,type:str]
|
||||||
|
root_password: ENC[AES256_GCM,data:0//dfGYkV80=,iv:h1b0R2QRpN/RI9kUBU0fiKLOI3PUYmisa7RH1ibSF4c=,tag:ln1cv5LQpb76vK5+eTvSuA==,type:str]
|
||||||
|
diun:
|
||||||
|
ntfy_access_token: ENC[AES256_GCM,data:37UYgaMlmpoMW74LqtxkuMqGQmCvLpVdJAgEmVxSULY=,iv:tZPlfIgo1vWvMPlQzCBPXj5xYDiTWJOsVwkxBjGNMDk=,tag:882g2UxFfg5VSKqAtEMk2Q==,type:str]
|
||||||
|
crowdsec:
|
||||||
|
traefik_bouncer_api_key: ENC[AES256_GCM,data:qNY3cWNxG2pyrTN1UnYCGWCmx1Yue1WAJZ8DEsLqnZ+RDoaJfvqqJazJUg==,iv:x0K9Vq+ZuojmeHSbS/0PoOQdLIRDMtGdmU+msv4PWzI=,tag:qgxQIBHtARTNv17x7N6zyw==,type:str]
|
||||||
|
stats:
|
||||||
|
oidc_client_id: ENC[AES256_GCM,data:/0Y/qLyxGTKskcoQVdlQkEYHa1P7+0PYwv1GoXV5r48btzpPHYysLA==,iv:QT6GM3I38/kSDrzm5phPWnGQxjds0qamduYuIvj4dig=,tag:yGnM4jOwDtC81jrXUG6r+w==,type:str]
|
||||||
|
oidc_client_secret: ENC[AES256_GCM,data:ETl5Lm8GSk/xwD9+TZZlPwNA8CxdQ2teyjWVWShXrx0o0qdE72lIBnW7mW9bklx1RMhSBvhArZPMA9fFN29nCJ4E9zXNTxFFviHUZTr+8mdm5g9TYu4WJxiJ3rzIavgx4DQR0FIQyXzXXMSoLDpOl+u4oT8vfb3ef4bKIDktBGU=,iv:KMy70+IA8KKj4mjB4sV3uXg8iDjponO+AzYlNYvv3pE=,tag:WMsUg0PNILBz1jNyV6PggQ==,type:str]
|
||||||
|
wiki:
|
||||||
|
aws_access_key_id: ENC[AES256_GCM,data:Fqfa6XcDDpQ0l+/entQh6sxobBM=,iv:gbfHxTy0Oj9xYlucpN98CjNIURDrx9BuFF4Pfo90V0M=,tag:df8Z3J2ovO1MHPnzOsCtpg==,type:str]
|
||||||
|
aws_secret_access_key: ENC[AES256_GCM,data:sbgzvlN5dP4jZIGKtDsMn5o2RqWTl+XNi80ydnOgrQkgnQ/HxluWWA==,iv:xyCKfbf/UF9cFunCYHwVBw4eVvOeZQtfPtrz2s6zIII=,tag:S0wzL8d5iEn20VbOVfrZBw==,type:str]
|
||||||
|
oidc_client_id: ENC[AES256_GCM,data:SSuRQJfgzeb641U2eeVE8wYZAbEWHYSSx0b8n4687FHLslFPGCAWeA==,iv:khCwIE50KVEtHJoDJBdCBJIDVZiDjkCS2D4yUt3AEOQ=,tag:JjVil9C2HHdTH1fDzDAJkg==,type:str]
|
||||||
|
oidc_client_secret: ENC[AES256_GCM,data:6TgTZsfaBdsismhK/lAiayMU8uIFOCmumV9tzmqNSocbqQgKAuEwgXTisMtndsk64JA2NYCS2DXhe+NSBO++aBscZ/hbqxBNWqw7c84YugqXMRFeqidb+RSJKdJ6WDmwGBfGm6/kjGJ+FSGuiu1S4sfOlfp2bXM5mhvgAXUygfg=,iv:Q9QoWp2V6uwFJidsL7QzB67TO4uFsqmun8zdZgRXbNI=,tag:wphiixaGZgsmk4sQFyvqbg==,type:str]
|
||||||
|
secret_key: ENC[AES256_GCM,data:Xr4iRj2oYJYVBBzIOsT6d4LjQo/M+qy7XVoNoM9vQWeWZuZlCnrdy7cfsa6VfHVnPNfdMIccmvBk3VzdDH1ukA==,iv:62LiqANdqrSMGzgaxL3uxgwyZtZd1XrsYEMF/ixt+lM=,tag:OH4xth89KDKONnbniM3itg==,type:str]
|
||||||
|
utils_secret: ENC[AES256_GCM,data:M062FvE0kFVyjkxIlolLtR/NwIya1Si7r/im1SDLvGNIHn4kDgat5KTHitjjMFMOKeSKT7ipgHc/lWCQYbi6IA==,iv:LWLOt+vZF0xK68LJTw1xWIWG65pkGiMnx/oMRBzeyyQ=,tag:SXKu3UO6IKupBVfvAwCtHw==,type:str]
|
||||||
|
db_user: ENC[AES256_GCM,data:g2+KPA==,iv:0I7EoGNlnnKf5H0UnmJ++9XDHEqZpXgZkyaW9flxN8c=,tag:b3WrfHGkxIJ1nNFp3FHAjA==,type:str]
|
||||||
|
db_pass: ENC[AES256_GCM,data:rYmNXQ==,iv:ZnImkMdIkp92jkojLVBSGSN06my3xFwr3AFfENNXgfQ=,tag:AZHqXRLfJ0lFrGyut+Sdug==,type:str]
|
||||||
|
db_name: ENC[AES256_GCM,data:Ns7vKJxeTw==,iv:GREMMRicS+1n/uk+KOeplqHn/ZdjjOjQ4d0qV5FICy8=,tag:CSeDTNjBiJ4G2VnytpNXiw==,type:str]
|
||||||
sops:
|
sops:
|
||||||
kms: []
|
kms: []
|
||||||
gcp_kms: []
|
gcp_kms: []
|
||||||
|
@ -60,8 +81,8 @@ sops:
|
||||||
VUUxcEhvYi8zeXlCUUViUTl0eWdhcU0KXOfbnDc+zc8lnBcyEAV5EiJSjcSU6AgI
|
VUUxcEhvYi8zeXlCUUViUTl0eWdhcU0KXOfbnDc+zc8lnBcyEAV5EiJSjcSU6AgI
|
||||||
EfeRw8qVqwChrYn1agslcNnDbE0WQsOCBuA6cE4V3kRofp9HU949ig==
|
EfeRw8qVqwChrYn1agslcNnDbE0WQsOCBuA6cE4V3kRofp9HU949ig==
|
||||||
-----END AGE ENCRYPTED FILE-----
|
-----END AGE ENCRYPTED FILE-----
|
||||||
lastmodified: "2024-05-18T21:12:01Z"
|
lastmodified: "2024-08-09T13:53:16Z"
|
||||||
mac: ENC[AES256_GCM,data:kBGP7V4f8d8JWdMdwPEYM1L2zZ4p6eHfwiepfLpBAr0VyhE9YOpPIdt9Tl+ky3mRyfn/DnX03ThiAKQtTrls3/lJEmJRd1dswRd+Mtls3j1QlxhorHYb8g6QvlmyepNf5j5Egqm9hNX+L3aV29mKoO42VxvfaopKduNGt1BrSFo=,iv:Uq+hQUMF+PBV5f6V9AsnxIxX0fKn84MAPEfTFtOtsus=,tag:6LtblCK7FLnhfS0dHsrcnQ==,type:str]
|
mac: ENC[AES256_GCM,data:5pANdrfnPuDf2mai0UgcFbwr4OzjLzLWraKOt38fX2MySYH2EryMzsk4prhehXPTkD3soMFwaVbuuqZUbkWCWM3CtjuyCisQH4uiZZw+slw6g8atr4h3tpHtD2SwgGVESMJouVQyfb9ko4O1ArBvml/0a6DAGmwoxlQwGboZR5M=,iv:oiZx4BsRBNAn+hjhzhV6oVZrYQJ32DAQlyNNsevaLpc=,tag:A0EsGeaP5vy9vA8WZjbxIQ==,type:str]
|
||||||
pgp: []
|
pgp: []
|
||||||
unencrypted_suffix: _unencrypted
|
unencrypted_suffix: _unencrypted
|
||||||
version: 3.8.1
|
version: 3.8.1
|
||||||
|
|
Loading…
Reference in a new issue