commit
db6d4d7490
16 changed files with 552 additions and 8 deletions
|
@ -1,2 +1,3 @@
|
||||||
* xref:index.adoc[Getting Started]
|
* xref:index.adoc[Getting Started]
|
||||||
* xref:options.adoc[Arion Options]
|
* xref:options.adoc[Arion Options]
|
||||||
|
* xref:deployment.adoc[Deployment]
|
||||||
|
|
68
docs/modules/ROOT/pages/deployment.adoc
Normal file
68
docs/modules/ROOT/pages/deployment.adoc
Normal file
|
@ -0,0 +1,68 @@
|
||||||
|
= Deployment with Arion
|
||||||
|
|
||||||
|
Arion projects can be deployed in Nix-like or Docker-like ways.
|
||||||
|
|
||||||
|
== Docker images
|
||||||
|
|
||||||
|
When you disable `useHostStore`, arion will build images, which can be deployed
|
||||||
|
to any Docker host, including non-NixOS hosts.
|
||||||
|
|
||||||
|
=== Remote Docker socket
|
||||||
|
|
||||||
|
NOTE: Access to a Docker socket is equivalent to root access on the host.
|
||||||
|
|
||||||
|
Docker supports authentication via TLS client certificates.
|
||||||
|
|
||||||
|
The xref:hercules-ci-effects:ROOT:reference/nix-functions/runArion.adoc[runArion Effect] uses this technique.
|
||||||
|
|
||||||
|
Because this technique works with a single Docker host, it does not need a registry.
|
||||||
|
|
||||||
|
=== Upload to registry
|
||||||
|
|
||||||
|
You can either use `arion push` or write custom push logic using the `arion cat`
|
||||||
|
command, the `eval` function on the `arion` package, or the `lib.eval` function
|
||||||
|
on the flake to retrieve the images defined in a project.
|
||||||
|
|
||||||
|
== NixOS module
|
||||||
|
|
||||||
|
Arion projects can be deployed as part of a NixOS configuration. This ties the
|
||||||
|
project revision to the system configuration revision, which can be good or bad
|
||||||
|
thing, depending on your deployment strategy. At a low level, a benefit is that
|
||||||
|
no store paths need to be copied locally and remote NixOS deployments can use
|
||||||
|
Nix's copy-closure algorithm for efficient transfers, and transparent binary
|
||||||
|
caches rather than an inherently stateful Docker registry solution.
|
||||||
|
|
||||||
|
Extend your NixOS configuration by adding the configuration elements to an
|
||||||
|
existing configuration. You could create a new module file for it, if your
|
||||||
|
choice of `imports` allows it.
|
||||||
|
|
||||||
|
NOTE: This deployment method does NOT use an `arion-pkgs.nix` file, but reuses
|
||||||
|
the host `pkgs`.
|
||||||
|
|
||||||
|
```nix
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
# Pick one of:
|
||||||
|
# - niv
|
||||||
|
((import ./nix/sources.nix).arion + "/nixos-module.nix")
|
||||||
|
# - flakes (where arion is a flake input)
|
||||||
|
arion.nixosModules.arion
|
||||||
|
# - other
|
||||||
|
arionPath + "/nixos-module.nix")
|
||||||
|
];
|
||||||
|
|
||||||
|
virtualisation.arion = {
|
||||||
|
backend = "podman-socket"; # or "docker"
|
||||||
|
projects.example.settings = {
|
||||||
|
# Specify you project here, or import it from a file.
|
||||||
|
# NOTE: This does NOT use ./arion-pkgs.nix, but defaults to NixOS' pkgs.
|
||||||
|
imports = [ ./arion-compose.nix ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
See also:
|
||||||
|
|
||||||
|
- xref:hercules-ci-effects:ROOT:reference/nix-functions/runNixOS.adoc[runNixOS Effect]
|
||||||
|
- xref:hercules-ci-effects:ROOT:reference/nix-functions/runNixOps2.adoc[runNixOps2 Effect]
|
|
@ -233,7 +233,7 @@ Type:: string
|
||||||
Default::
|
Default::
|
||||||
+
|
+
|
||||||
----
|
----
|
||||||
{"_type":"literalExample","text":"config.service.name"}
|
{"_type":"literalExpression","text":"config.service.name"}
|
||||||
----
|
----
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -38,5 +38,7 @@
|
||||||
in composition.config.out.dockerComposeYaml;
|
in composition.config.out.dockerComposeYaml;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
nixosModules.arion = ./nixos-module.nix;
|
||||||
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,15 +36,15 @@
|
||||||
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
|
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
|
||||||
},
|
},
|
||||||
"nixos-unstable": {
|
"nixos-unstable": {
|
||||||
"branch": "nixos-unstable",
|
"branch": "master",
|
||||||
"description": "A read-only mirror of NixOS/nixpkgs tracking the released channels. Send issues and PRs to",
|
"description": "A read-only mirror of NixOS/nixpkgs tracking the released channels. Send issues and PRs to",
|
||||||
"homepage": "https://github.com/NixOS/nixpkgs",
|
"homepage": "https://github.com/NixOS/nixpkgs",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "c6c4a3d45ab200f17805d2d86a1ff1cc7ca2b186",
|
"rev": "f8232491252438cd70b93554e31fe8238a573636",
|
||||||
"sha256": "1f6q98vx3sqxcn6qp5vpy00223r9hy93w9pxq65h9gdwzy3w4qxv",
|
"sha256": "019bz8dxp9d30ghmi9v0inn0p7mj3mia69lkh5cyivhhp1i0cs5i",
|
||||||
"type": "tarball",
|
"type": "tarball",
|
||||||
"url": "https://github.com/NixOS/nixpkgs/archive/c6c4a3d45ab200f17805d2d86a1ff1cc7ca2b186.tar.gz",
|
"url": "https://github.com/NixOS/nixpkgs/archive/f8232491252438cd70b93554e31fe8238a573636.tar.gz",
|
||||||
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz",
|
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz",
|
||||||
"version": ""
|
"version": ""
|
||||||
},
|
},
|
||||||
|
|
110
nixos-module.nix
Normal file
110
nixos-module.nix
Normal file
|
@ -0,0 +1,110 @@
|
||||||
|
{ config, lib, pkgs, ... }:
|
||||||
|
let
|
||||||
|
inherit (lib)
|
||||||
|
attrValues
|
||||||
|
mkIf
|
||||||
|
mkOption
|
||||||
|
mkMerge
|
||||||
|
types
|
||||||
|
;
|
||||||
|
|
||||||
|
cfg = config.virtualisation.arion;
|
||||||
|
|
||||||
|
projectType = types.submoduleWith {
|
||||||
|
modules = [ projectModule ];
|
||||||
|
};
|
||||||
|
|
||||||
|
projectModule = { config, name, ... }: {
|
||||||
|
options = {
|
||||||
|
settings = mkOption {
|
||||||
|
description = ''
|
||||||
|
Arion project definition, otherwise known as arion-compose.nix contents.
|
||||||
|
|
||||||
|
See <link xlink:href="https://docs.hercules-ci.com/arion/options/">https://docs.hercules-ci.com/arion/options/</link>.
|
||||||
|
'';
|
||||||
|
type = arionSettingsType;
|
||||||
|
visible = "shallow";
|
||||||
|
};
|
||||||
|
_systemd = mkOption { internal = true; };
|
||||||
|
};
|
||||||
|
config = {
|
||||||
|
_systemd.services."arion-${name}" = {
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
after = [ "sockets.target" ];
|
||||||
|
|
||||||
|
path = [
|
||||||
|
cfg.package
|
||||||
|
cfg.docker.client.package
|
||||||
|
];
|
||||||
|
environment.ARION_PREBUILT = config.settings.out.dockerComposeYaml;
|
||||||
|
script = ''
|
||||||
|
echo 1>&2 "docker compose file: $ARION_PREBUILT"
|
||||||
|
arion --prebuilt-file "$ARION_PREBUILT" up
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
arionSettingsType =
|
||||||
|
(cfg.package.eval { modules = [ ]; }).type or (
|
||||||
|
throw "lib.evalModules did not produce a type. Please upgrade Nixpkgs to nixos-unstable or >=nixos-21.11"
|
||||||
|
);
|
||||||
|
|
||||||
|
in
|
||||||
|
{
|
||||||
|
disabledModules = [ "virtualisation/arion.nix" ];
|
||||||
|
|
||||||
|
options = {
|
||||||
|
virtualisation.arion = {
|
||||||
|
backend = mkOption {
|
||||||
|
type = types.enum [ "podman-socket" "docker" ];
|
||||||
|
description = ''
|
||||||
|
Which container implementation to use.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
package = mkOption {
|
||||||
|
type = types.package;
|
||||||
|
|
||||||
|
default = (import ./. { inherit pkgs; }).arion;
|
||||||
|
description = ''
|
||||||
|
Arion package to use. This will provide <literal>arion</literal>
|
||||||
|
executable that starts the project.
|
||||||
|
|
||||||
|
It also must provide the arion <literal>eval</literal> function as
|
||||||
|
an attribute.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
docker.client.package = mkOption {
|
||||||
|
type = types.package;
|
||||||
|
internal = true;
|
||||||
|
};
|
||||||
|
projects = mkOption {
|
||||||
|
type = types.attrsOf projectType;
|
||||||
|
default = { };
|
||||||
|
description = ''
|
||||||
|
Arion projects to be run as a service.
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = mkIf (cfg.projects != { }) (
|
||||||
|
mkMerge [
|
||||||
|
{
|
||||||
|
systemd = mkMerge (map (p: p._systemd) (attrValues cfg.projects));
|
||||||
|
}
|
||||||
|
(mkIf (cfg.backend == "podman-socket") {
|
||||||
|
virtualisation.docker.enable = false;
|
||||||
|
virtualisation.podman.enable = true;
|
||||||
|
virtualisation.podman.dockerSocket.enable = true;
|
||||||
|
virtualisation.podman.defaultNetwork.dnsname.enable = true;
|
||||||
|
|
||||||
|
virtualisation.arion.docker.client.package = pkgs.docker-client;
|
||||||
|
})
|
||||||
|
(mkIf (cfg.backend == "docker") {
|
||||||
|
virtualisation.docker.enable = true;
|
||||||
|
virtualisation.arion.docker.client.package = pkgs.docker;
|
||||||
|
})
|
||||||
|
]
|
||||||
|
);
|
||||||
|
}
|
|
@ -105,7 +105,7 @@ in
|
||||||
image.name = mkOption {
|
image.name = mkOption {
|
||||||
type = str;
|
type = str;
|
||||||
default = config.service.name;
|
default = config.service.name;
|
||||||
defaultText = lib.literalExample "config.service.name";
|
defaultText = lib.literalExpression or lib.literalExample "config.service.name";
|
||||||
description = ''
|
description = ''
|
||||||
A human readable name for the docker image.
|
A human readable name for the docker image.
|
||||||
|
|
||||||
|
|
|
@ -8,12 +8,15 @@ let
|
||||||
};
|
};
|
||||||
|
|
||||||
inherit (lib)
|
inherit (lib)
|
||||||
|
concatMapStringsSep
|
||||||
optionalAttrs
|
optionalAttrs
|
||||||
optionalString
|
optionalString
|
||||||
;
|
;
|
||||||
|
|
||||||
haveSystemd = usePodman || pkgs.arionTestingFlags.dockerSupportsSystemd;
|
haveSystemd = usePodman || pkgs.arionTestingFlags.dockerSupportsSystemd;
|
||||||
|
|
||||||
|
concatPathLines = paths: concatMapStringsSep "\n" (x: "${x}") paths;
|
||||||
|
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
name = "arion-test";
|
name = "arion-test";
|
||||||
|
@ -39,7 +42,8 @@ in
|
||||||
nix.useSandbox = false;
|
nix.useSandbox = false;
|
||||||
|
|
||||||
virtualisation.writableStore = true;
|
virtualisation.writableStore = true;
|
||||||
virtualisation.pathsInNixDB = [
|
# Switch to virtualisation.additionalPaths when dropping all NixOS <= 21.05.
|
||||||
|
environment.etc."extra-paths-for-test".text = concatPathLines [
|
||||||
# Pre-build the image because we don't want to build the world
|
# Pre-build the image because we don't want to build the world
|
||||||
# in the vm.
|
# in the vm.
|
||||||
(preEval [ ../../examples/minimal/arion-compose.nix ]).config.out.dockerComposeYaml
|
(preEval [ ../../examples/minimal/arion-compose.nix ]).config.out.dockerComposeYaml
|
||||||
|
|
|
@ -1,12 +1,33 @@
|
||||||
{ pkgs ? import ../pkgs.nix, arionTestingFlags ? {} }:
|
{ pkgs ? import ../pkgs.nix, arionTestingFlags ? {} }:
|
||||||
let
|
let
|
||||||
inherit (pkgs) nixosTest recurseIntoAttrs arion;
|
inherit (pkgs) nixosTest recurseIntoAttrs arion lib;
|
||||||
|
|
||||||
|
hasEvalModulesType = (lib.evalModules { modules = [ {} ]; })?type;
|
||||||
|
|
||||||
in
|
in
|
||||||
|
|
||||||
recurseIntoAttrs {
|
recurseIntoAttrs {
|
||||||
|
|
||||||
test = nixosTest ./arion-test;
|
test = nixosTest ./arion-test;
|
||||||
|
|
||||||
|
nixosModuleWithDocker =
|
||||||
|
lib.optionalAttrs
|
||||||
|
hasEvalModulesType
|
||||||
|
(
|
||||||
|
import ./nixos-virtualization-arion-test/test.nix pkgs {
|
||||||
|
virtualisation.arion.backend = "docker";
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
nixosModuleWithPodman =
|
||||||
|
lib.optionalAttrs
|
||||||
|
(hasEvalModulesType && arionTestingFlags.nixosHasPodmanDockerSocket)
|
||||||
|
(
|
||||||
|
import ./nixos-virtualization-arion-test/test.nix pkgs {
|
||||||
|
virtualisation.arion.backend = "podman-socket";
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
testWithPodman =
|
testWithPodman =
|
||||||
if arionTestingFlags.nixosHasPodmanDockerSocket
|
if arionTestingFlags.nixosHasPodmanDockerSocket
|
||||||
then nixosTest (pkgs.callPackage ./arion-test { usePodman = true; })
|
then nixosTest (pkgs.callPackage ./arion-test { usePodman = true; })
|
||||||
|
|
6
tests/nixos-virtualization-arion-test/README.md
Normal file
6
tests/nixos-virtualization-arion-test/README.md
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
|
||||||
|
# NixOS module test
|
||||||
|
|
||||||
|
This tests the NixOS module.
|
||||||
|
|
||||||
|
The images used here are experimental and not meant for production.
|
60
tests/nixos-virtualization-arion-test/arion-compose.nix
Normal file
60
tests/nixos-virtualization-arion-test/arion-compose.nix
Normal file
|
@ -0,0 +1,60 @@
|
||||||
|
{ pkgs, ... }: {
|
||||||
|
project.name = "whale";
|
||||||
|
|
||||||
|
docker-compose.raw = {
|
||||||
|
volumes.zookeeper = { };
|
||||||
|
volumes.kafka = { };
|
||||||
|
};
|
||||||
|
|
||||||
|
services.kafka = {
|
||||||
|
service.useHostStore = true;
|
||||||
|
# service.volumes = [
|
||||||
|
# {
|
||||||
|
# type = "volume";
|
||||||
|
# source = "kafka";
|
||||||
|
# target = "/data";
|
||||||
|
# # volume.nocopy = true;
|
||||||
|
# }
|
||||||
|
# ];
|
||||||
|
service.ports = [ "9092:9092" ];
|
||||||
|
service.depends_on = [ "zookeeper" ];
|
||||||
|
image.contents = [
|
||||||
|
(pkgs.runCommand "root" { } ''
|
||||||
|
mkdir -p $out/bin
|
||||||
|
ln -s ${pkgs.runtimeShell} $out/bin/sh
|
||||||
|
'')
|
||||||
|
];
|
||||||
|
image.command = [
|
||||||
|
"${pkgs.apacheKafka}/bin/kafka-server-start.sh"
|
||||||
|
"${./kafka/server.properties}"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
services.zookeeper = {
|
||||||
|
service.useHostStore = true;
|
||||||
|
service.ports = [ "2181:2181" ];
|
||||||
|
# service.volumes = [
|
||||||
|
# {
|
||||||
|
# type = "volume";
|
||||||
|
# source = "zookeeper";
|
||||||
|
# target = "/data";
|
||||||
|
# # volume.nocopy = true;
|
||||||
|
# }
|
||||||
|
# ];
|
||||||
|
image.contents = [
|
||||||
|
(pkgs.buildEnv {
|
||||||
|
name = "root";
|
||||||
|
paths = [
|
||||||
|
# pkgs.sed
|
||||||
|
pkgs.busybox
|
||||||
|
];
|
||||||
|
})
|
||||||
|
];
|
||||||
|
image.command = [
|
||||||
|
"${pkgs.zookeeper.override { jre = pkgs.jdk8_headless; }}/bin/zkServer.sh"
|
||||||
|
"--config"
|
||||||
|
"${./zookeeper}"
|
||||||
|
"start-foreground"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
}
|
6
tests/nixos-virtualization-arion-test/arion-pkgs.nix
Normal file
6
tests/nixos-virtualization-arion-test/arion-pkgs.nix
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
# NOTE: This isn't used in the module!
|
||||||
|
import <nixpkgs> {
|
||||||
|
# We specify the architecture explicitly. Use a Linux remote builder when
|
||||||
|
# calling arion from other platforms.
|
||||||
|
system = "x86_64-linux";
|
||||||
|
}
|
141
tests/nixos-virtualization-arion-test/kafka/server.properties
Normal file
141
tests/nixos-virtualization-arion-test/kafka/server.properties
Normal file
|
@ -0,0 +1,141 @@
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
# contributor license agreements. See the NOTICE file distributed with
|
||||||
|
# this work for additional information regarding copyright ownership.
|
||||||
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
# (the "License"); you may not use this file except in compliance with
|
||||||
|
# the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# see kafka.server.KafkaConfig for additional details and defaults
|
||||||
|
|
||||||
|
############################# Server Basics #############################
|
||||||
|
|
||||||
|
# The id of the broker. This must be set to a unique integer for each broker.
|
||||||
|
broker.id=0
|
||||||
|
|
||||||
|
############################# Socket Server Settings #############################
|
||||||
|
|
||||||
|
# The address the socket server listens on. It will get the value returned from
|
||||||
|
# java.net.InetAddress.getCanonicalHostName() if not configured.
|
||||||
|
# FORMAT:
|
||||||
|
# listeners = listener_name://host_name:port
|
||||||
|
# EXAMPLE:
|
||||||
|
# listeners = PLAINTEXT://your.host.name:9092
|
||||||
|
listeners=LOCALHOST://0.0.0.0:9092,SERVICE://kafka:9093
|
||||||
|
|
||||||
|
# Hostname and port the broker will advertise to producers and consumers. If not set,
|
||||||
|
# it uses the value for "listeners" if configured. Otherwise, it will use the value
|
||||||
|
# returned from java.net.InetAddress.getCanonicalHostName().
|
||||||
|
# advertised.listeners=PLAINTEXT://whale_kafka_1:9092
|
||||||
|
advertised.listeners=LOCALHOST://localhost:9092,SERVICE://kafka:9093
|
||||||
|
|
||||||
|
# ???
|
||||||
|
inter.broker.listener.name=LOCALHOST
|
||||||
|
|
||||||
|
# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
|
||||||
|
#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
|
||||||
|
listener.security.protocol.map=LOCALHOST:PLAINTEXT,SERVICE:PLAINTEXT
|
||||||
|
|
||||||
|
# The number of threads that the server uses for receiving requests from the network and sending responses to the network
|
||||||
|
num.network.threads=3
|
||||||
|
|
||||||
|
# The number of threads that the server uses for processing requests, which may include disk I/O
|
||||||
|
num.io.threads=8
|
||||||
|
|
||||||
|
# The send buffer (SO_SNDBUF) used by the socket server
|
||||||
|
socket.send.buffer.bytes=102400
|
||||||
|
|
||||||
|
# The receive buffer (SO_RCVBUF) used by the socket server
|
||||||
|
socket.receive.buffer.bytes=102400
|
||||||
|
|
||||||
|
# The maximum size of a request that the socket server will accept (protection against OOM)
|
||||||
|
socket.request.max.bytes=104857600
|
||||||
|
|
||||||
|
|
||||||
|
############################# Log Basics #############################
|
||||||
|
|
||||||
|
# A comma separated list of directories under which to store log files
|
||||||
|
log.dirs=/data/kafka
|
||||||
|
|
||||||
|
# The default number of log partitions per topic. More partitions allow greater
|
||||||
|
# parallelism for consumption, but this will also result in more files across
|
||||||
|
# the brokers.
|
||||||
|
num.partitions=1
|
||||||
|
|
||||||
|
# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
|
||||||
|
# This value is recommended to be increased for installations with data dirs located in RAID array.
|
||||||
|
num.recovery.threads.per.data.dir=1
|
||||||
|
|
||||||
|
############################# Internal Topic Settings #############################
|
||||||
|
# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
|
||||||
|
# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
|
||||||
|
offsets.topic.replication.factor=1
|
||||||
|
transaction.state.log.replication.factor=1
|
||||||
|
transaction.state.log.min.isr=1
|
||||||
|
|
||||||
|
############################# Log Flush Policy #############################
|
||||||
|
|
||||||
|
# Messages are immediately written to the filesystem but by default we only fsync() to sync
|
||||||
|
# the OS cache lazily. The following configurations control the flush of data to disk.
|
||||||
|
# There are a few important trade-offs here:
|
||||||
|
# 1. Durability: Unflushed data may be lost if you are not using replication.
|
||||||
|
# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
|
||||||
|
# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
|
||||||
|
# The settings below allow one to configure the flush policy to flush data after a period of time or
|
||||||
|
# every N messages (or both). This can be done globally and overridden on a per-topic basis.
|
||||||
|
|
||||||
|
# The number of messages to accept before forcing a flush of data to disk
|
||||||
|
#log.flush.interval.messages=10000
|
||||||
|
|
||||||
|
# The maximum amount of time a message can sit in a log before we force a flush
|
||||||
|
#log.flush.interval.ms=1000
|
||||||
|
|
||||||
|
############################# Log Retention Policy #############################
|
||||||
|
|
||||||
|
# The following configurations control the disposal of log segments. The policy can
|
||||||
|
# be set to delete segments after a period of time, or after a given size has accumulated.
|
||||||
|
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
|
||||||
|
# from the end of the log.
|
||||||
|
|
||||||
|
# The minimum age of a log file to be eligible for deletion due to age
|
||||||
|
log.retention.hours=168
|
||||||
|
|
||||||
|
# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
|
||||||
|
# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
|
||||||
|
#log.retention.bytes=1073741824
|
||||||
|
|
||||||
|
# The maximum size of a log segment file. When this size is reached a new log segment will be created.
|
||||||
|
log.segment.bytes=1073741824
|
||||||
|
|
||||||
|
# The interval at which log segments are checked to see if they can be deleted according
|
||||||
|
# to the retention policies
|
||||||
|
log.retention.check.interval.ms=300000
|
||||||
|
|
||||||
|
############################# Zookeeper #############################
|
||||||
|
|
||||||
|
# Zookeeper connection string (see zookeeper docs for details).
|
||||||
|
# This is a comma separated host:port pairs, each corresponding to a zk
|
||||||
|
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
|
||||||
|
# You can also append an optional chroot string to the urls to specify the
|
||||||
|
# root directory for all kafka znodes.
|
||||||
|
zookeeper.connect=zookeeper:2181
|
||||||
|
|
||||||
|
# Timeout in ms for connecting to zookeeper
|
||||||
|
zookeeper.connection.timeout.ms=18000
|
||||||
|
|
||||||
|
|
||||||
|
############################# Group Coordinator Settings #############################
|
||||||
|
|
||||||
|
# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
|
||||||
|
# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
|
||||||
|
# The default value for this is 3 seconds.
|
||||||
|
# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
|
||||||
|
# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
|
||||||
|
group.initial.rebalance.delay.ms=0
|
40
tests/nixos-virtualization-arion-test/test.nix
Normal file
40
tests/nixos-virtualization-arion-test/test.nix
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
pkgs: module:
|
||||||
|
|
||||||
|
pkgs.nixosTest {
|
||||||
|
name = "test-basic-arion-kafka";
|
||||||
|
nodes = {
|
||||||
|
machine = { ... }: {
|
||||||
|
virtualisation.memorySize = 3000;
|
||||||
|
virtualisation.diskSize = 10000;
|
||||||
|
imports = [
|
||||||
|
../../nixos-module.nix
|
||||||
|
module
|
||||||
|
];
|
||||||
|
|
||||||
|
virtualisation.arion.projects.whale.settings = {
|
||||||
|
imports = [ ./arion-compose.nix ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
testScript = ''
|
||||||
|
machine.wait_for_unit("sockets.target")
|
||||||
|
machine.wait_for_unit("arion-whale.service")
|
||||||
|
|
||||||
|
machine.succeed("""
|
||||||
|
(echo "hello"; echo "world") \
|
||||||
|
| ${pkgs.apacheKafka}/bin/kafka-console-producer.sh \
|
||||||
|
--topic thetopic --bootstrap-server localhost:9092
|
||||||
|
""")
|
||||||
|
|
||||||
|
machine.succeed("""
|
||||||
|
(
|
||||||
|
set +o pipefail # we only care for head's exit code
|
||||||
|
( ${pkgs.apacheKafka}/bin/kafka-console-consumer.sh \
|
||||||
|
--topic thetopic --from-beginning --bootstrap-server localhost:9092 & \
|
||||||
|
echo $! >pid
|
||||||
|
) | grep --line-buffered hello | { read; kill $(<pid); rm pid; }
|
||||||
|
) 2>/dev/console
|
||||||
|
""")
|
||||||
|
|
||||||
|
'';
|
||||||
|
}
|
|
@ -0,0 +1,82 @@
|
||||||
|
# Copyright 2012 The Apache Software Foundation
|
||||||
|
#
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# Define some default values that can be overridden by system properties
|
||||||
|
zookeeper.root.logger=INFO, CONSOLE
|
||||||
|
|
||||||
|
zookeeper.console.threshold=INFO
|
||||||
|
|
||||||
|
zookeeper.log.dir=.
|
||||||
|
zookeeper.log.file=zookeeper.log
|
||||||
|
zookeeper.log.threshold=INFO
|
||||||
|
zookeeper.log.maxfilesize=256MB
|
||||||
|
zookeeper.log.maxbackupindex=20
|
||||||
|
|
||||||
|
# zookeeper.tracelog.dir=${zookeeper.log.dir}
|
||||||
|
# zookeeper.tracelog.file=zookeeper_trace.log
|
||||||
|
|
||||||
|
log4j.rootLogger=${zookeeper.root.logger}
|
||||||
|
|
||||||
|
#
|
||||||
|
# console
|
||||||
|
# Add "console" to rootlogger above if you want to use this
|
||||||
|
#
|
||||||
|
log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
|
||||||
|
log4j.appender.CONSOLE.Threshold=${zookeeper.console.threshold}
|
||||||
|
log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
|
||||||
|
log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n
|
||||||
|
|
||||||
|
# #
|
||||||
|
# # Add ROLLINGFILE to rootLogger to get log file output
|
||||||
|
# #
|
||||||
|
# log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
|
||||||
|
# log4j.appender.ROLLINGFILE.Threshold=${zookeeper.log.threshold}
|
||||||
|
# log4j.appender.ROLLINGFILE.File=${zookeeper.log.dir}/${zookeeper.log.file}
|
||||||
|
# log4j.appender.ROLLINGFILE.MaxFileSize=${zookeeper.log.maxfilesize}
|
||||||
|
# log4j.appender.ROLLINGFILE.MaxBackupIndex=${zookeeper.log.maxbackupindex}
|
||||||
|
# log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
|
||||||
|
# log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n
|
||||||
|
|
||||||
|
# #
|
||||||
|
# # Add TRACEFILE to rootLogger to get log file output
|
||||||
|
# # Log TRACE level and above messages to a log file
|
||||||
|
# #
|
||||||
|
# log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
|
||||||
|
# log4j.appender.TRACEFILE.Threshold=TRACE
|
||||||
|
# log4j.appender.TRACEFILE.File=${zookeeper.tracelog.dir}/${zookeeper.tracelog.file}
|
||||||
|
|
||||||
|
# log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
|
||||||
|
# ### Notice we are including log4j's NDC here (%x)
|
||||||
|
# log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L][%x] - %m%n
|
||||||
|
# #
|
||||||
|
# # zk audit logging
|
||||||
|
# #
|
||||||
|
# zookeeper.auditlog.file=zookeeper_audit.log
|
||||||
|
# zookeeper.auditlog.threshold=INFO
|
||||||
|
# audit.logger=INFO, CONSOLE
|
||||||
|
# log4j.logger.org.apache.zookeeper.audit.Log4jAuditLogger=${audit.logger}
|
||||||
|
# log4j.additivity.org.apache.zookeeper.audit.Log4jAuditLogger=false
|
||||||
|
# log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
|
||||||
|
# log4j.appender.RFAAUDIT.File=${zookeeper.log.dir}/${zookeeper.auditlog.file}
|
||||||
|
# log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
|
||||||
|
# log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
|
||||||
|
# log4j.appender.RFAAUDIT.Threshold=${zookeeper.auditlog.threshold}
|
||||||
|
|
||||||
|
# # Max log file size of 10MB
|
||||||
|
# log4j.appender.RFAAUDIT.MaxFileSize=10MB
|
||||||
|
# log4j.appender.RFAAUDIT.MaxBackupIndex=10
|
3
tests/nixos-virtualization-arion-test/zookeeper/zoo.cfg
Normal file
3
tests/nixos-virtualization-arion-test/zookeeper/zoo.cfg
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
tickTime=2000
|
||||||
|
dataDir=/data
|
||||||
|
clientPort=2181
|
Loading…
Reference in a new issue