Compare commits

..

No commits in common. "main" and "fix-warnings" have entirely different histories.

87 changed files with 1669 additions and 2422 deletions

2
.envrc
View file

@ -1,6 +1,4 @@
HOST_XDG_DATA_DIRS="${XDG_DATA_DIRS:-}"
eval "$(lorri direnv)"
export XDG_DATA_DIRS="${XDG_DATA_DIRS}:${HOST_XDG_DATA_DIRS}"
# Use system PKI
unset SSL_CERT_FILE

2
.gitignore vendored
View file

@ -5,5 +5,3 @@ dist/
dist-newstyle/
cabal.project.local
*.swp

View file

@ -1,100 +1,9 @@
# Revision history for Arion
## 0.2.1.0 -- 2023-07-26
## Unreleased
### Added
* `service.networks` now supports attribute set values with various options, thanks to @pedorich-n.
* `docker-compose.volumes` can now be specified in multiple modules, thanks to @qaifshaikh.
* `image.fakeRootCommands` for making modifications to the image that aren't "add a link farm".
### Fixed
* Regular maintenance fixes, including one by olebedev
## 0.2.0.0 -- 2022-12-02
### BREAKING
* The `project.name` option is now mandatory for projects that aren't deployed with the NixOS module.
* The NixOS module now sets the default network name to the project name (commonly referred to as `<name>` in the option path).
If this is not desired, for instance if you need the projects to be on the same network, set `networks.default.name` in each of them.
* The NixOS module now sets the default project name. You can still set your own value with the `project.name` option.
If you did not set one, docker compose heuristically determined the name to be `store`, so you may want to set `project.name = "store"` or prepare to rename the network manually.
### Removed
- NixOS 20.09 support. Its docker-compose does not support the
`networks.<name>.name` option, which is important in later versions.
A newer, bundled docker compose may work there, but for now the decision
is to drop this legacy version.
### Changed
* Healthcheck-based dependencies in `service.depends_on`.
### Added
* Support `service.healthcheck` for defining custom healthchecks.
* Arion now declares a `networks.default` by default, with `name` set to
`project.name`. This improves compatibility with container runtimes by
copying pre-existing behavior. Most users will want to keep using this
behavior, but it can be disabled with `enableDefaultNetwork`.
## 0.1.3.0 -- 2020-05-03
### Changed
* `useHostStore` now uses an image derived from the `image.*` options. You may
need to enable `enableRecommendedContents` because with this change, files
like `/bin/sh` aren't added by default anymore.
* Drop obsolete NixOS 19.03, 19.09 and 20.03 from CI.
### Added
* NixOS-based containers can now run on Podman when it is configured to provide a docker socket. See the [installation docs](https://docs.hercules-ci.com/arion/#_nixos).
* Support `service.dns`, for overriding the DNS servers used by containers.
* Support `service.labels`, which is useful for autodiscovery among other things.
* Add a tested example for Traefik with label-based routing.
* Add a `flake.nix` and an experimental flake example
* Add a warning when systemd `DynamicUser` is used but not available to the
container.
* CI with NixOS 21.05
## 0.1.2.0 -- 2020-03-05
* Support use of prebuilt `docker-compose.yaml`.
Separates build and execution without duplicating evaluation.
* Avoid storing tarballs (wasting store space) by using
`dockerTools.streamLayeredImage` if available.
* Project name is now configurable via the `project.name` option
* Support --no-ansi, --compatibility, --log-level options
## 0.1.1.1 -- 2020-03-20
* Fix ambiguous import of `lines`
* Improve base version constraint
* Fix warnings
## 0.1.1.0 -- 2020-03-19
* Support Nixpkgs 20.03
* Fixes for macOS
<!-- TODO: use better template -->
## 0.1.0.0 -- 2019-10-04
* First released version. Released on an unsuspecting world.

View file

@ -1,7 +1,7 @@
cabal-version: 2.4
name: arion-compose
version: 0.2.1.0
version: 0.1.0.0
synopsis: Run docker-compose with help from Nix/NixOS
description: Arion is a tool for building and running applications that consist of multiple docker containers using NixOS modules. It has special support for docker images that are built with Nix, for a smooth development experience and improved performance.
homepage: https://github.com/hercules-ci/arion#readme
@ -17,7 +17,6 @@ extra-source-files: CHANGELOG.md, README.asciidoc,
src/haskell/testdata/**/*.json
data-files: nix/*.nix
, nix/modules/composition/*.nix
, nix/modules/networks/*.nix
, nix/modules/nixos/*.nix
, nix/modules/service/*.nix
, nix/modules/lib/*.nix
@ -25,13 +24,9 @@ data-files: nix/*.nix
-- all data is verbatim from some sources
data-dir: src
source-repository head
type: git
location: https://github.com/hercules-ci/arion
common common
build-depends: base >=4.12.0.0 && <4.99
, aeson >=2
build-depends: base ^>=4.12.0.0
, aeson
, aeson-pretty
, async
, bytestring
@ -41,7 +36,7 @@ common common
, process
, temporary
, text
, protolude >= 0.2
, protolude
, unix
ghc-options: -Wall
@ -54,7 +49,6 @@ library
exposed-modules: Arion.Nix
Arion.Aeson
Arion.DockerCompose
Arion.ExtendedInfo
Arion.Images
Arion.Services
other-modules: Paths_arion_compose

View file

@ -1,5 +1,5 @@
status = [
"ci/hercules/onPush/default",
"ci/hercules/derivations",
"ci/hercules/evaluation",
]
delete_merged_branches = true

View file

@ -1,11 +1,6 @@
let flake = import ./nix/compat.nix;
in
{ pkgs ? import flake.inputs.nixpkgs { }
{ pkgs ? import ./nix {}
, haskellPackages ? pkgs.haskellPackages
}:
let
pkgsWithArion = pkgs.extend flake.overlays.default;
in
{
inherit (pkgsWithArion) arion;
arion = import ./nix/arion.nix { inherit pkgs haskellPackages; };
}

View file

@ -1,4 +0,0 @@
# Documentation
Please refer to the [**rendered documentation**](https://docs.hercules-ci.com/arion), which includes the [**options.**](https://docs.hercules-ci.com/arion/options/)

View file

@ -4,4 +4,3 @@ version: 'master'
nav:
- modules/ROOT/nav.adoc
- modules/reference/nav.adoc
nix: true

View file

@ -1,31 +0,0 @@
{
perSystem = { config, pkgs, lib, ... }: {
packages.generated-option-doc-arion =
# TODO: use the render pipeline in flake-parts,
# which has support for things like {options}`foo`.
let
eval = lib.evalModules {
modules = import ../src/nix/modules.nix;
};
in
(pkgs.nixosOptionsDoc
{
options = eval.options;
}).optionsCommonMark;
packages.generated-antora-files =
pkgs.runCommand "generated-antora-files"
{
nativeBuildInputs = [ pkgs.pandoc ];
doc_arion = config.packages.generated-option-doc-arion;
}
# TODO: use the render pipeline in flake-parts,
# which has support for things like {options}`foo`.
''
mkdir -p $out/modules/ROOT/partials
pandoc --from=markdown --to=asciidoc \
< $doc_arion \
> $out/modules/ROOT/partials/arion-options.adoc
'';
};
}

View file

@ -1 +0,0 @@
../../../../../examples/full-nixos/arion-compose.nix

View file

@ -1 +0,0 @@
../../../../../examples/minimal/arion-compose.nix

View file

@ -1 +0,0 @@
../../../../../examples/nixos-unit/arion-compose.nix

View file

@ -1,3 +1,2 @@
* xref:index.adoc[Getting Started]
* xref:options.adoc[Arion Options]
* xref:deployment.adoc[Deployment]

View file

@ -1,71 +0,0 @@
= Deployment with Arion
Arion projects can be deployed in Nix-like or Docker-like ways.
== Docker images
When you disable `useHostStore`, arion will build images, which can be deployed
to any Docker host, including non-NixOS hosts.
=== Remote Docker socket
NOTE: Access to a Docker socket is equivalent to root access on the host.
Docker supports authentication via TLS client certificates.
The xref:hercules-ci-effects:ROOT:reference/nix-functions/runArion.adoc[runArion Effect] uses this technique.
Because this technique works with a single Docker host, it does not need a registry.
=== Upload to registry
You can either use `arion push` or write custom push logic using the `arion cat`
command, the `eval` function on the `arion` package, or the `lib.eval` function
on the flake to retrieve the images defined in a project.
== NixOS module
Arion projects can be deployed as part of a NixOS configuration. This ties the
project revision to the system configuration revision, which can be good or bad
thing, depending on your deployment strategy. At a low level, a benefit is that
no store paths need to be copied locally and remote NixOS deployments can use
Nix's copy-closure algorithm for efficient transfers, and transparent binary
caches rather than an inherently stateful Docker registry solution.
Extend your NixOS configuration by adding the configuration elements to an
existing configuration. You could create a new module file for it, if your
choice of `imports` allows it.
NOTE: This deployment method does NOT use an `arion-pkgs.nix` file, but reuses
the host `pkgs`.
```nix
{
imports = [
# Pick one of:
# - niv
((import ./nix/sources.nix).arion + "/nixos-module.nix")
# - or flakes (where arion is a flake input)
arion.nixosModules.arion
# - or other: copy commit hash of arion and replace HASH in:
(builtins.fetchTarball "https://github.com/hercules-ci/arion/archive/HASH.tar.gz") + "/nixos-module.nix")
];
virtualisation.arion = {
backend = "podman-socket"; # or "docker"
projects.example = {
serviceName = "example"; # optional systemd service name, defaults to arion-example in this case
settings = {
# Specify you project here, or import it from a file.
# NOTE: This does NOT use ./arion-pkgs.nix, but defaults to NixOS' pkgs.
imports = [ ./arion-compose.nix ];
};
};
};
}
```
See also:
- xref:hercules-ci-effects:ROOT:reference/nix-functions/runNixOS.adoc[runNixOS Effect]
- xref:hercules-ci-effects:ROOT:reference/nix-functions/runNixOps2.adoc[runNixOps2 Effect]

View file

@ -39,13 +39,6 @@ Arion allows to compose containers with different granularity:
* <<NixOS: run full OS>>
* <<Docker image from DockerHub>>
Full NixOS is supported on
* docker-compose + podman with docker socket (NixOS >= 21.05)
* docker-compose + docker, before cgroupsv2 (NixOS < 21.05)
`podman-compose` support is currently WIP on a separate branch.
== Installation
=== Nix
@ -59,24 +52,10 @@ $ nix-env -iA arion -f https://github.com/hercules-ci/arion/tarball/master
Add this module to your NixOS configuration:
```nix
{ pkgs, ... }: {
environment.systemPackages = [
pkgs.arion
# Do install the docker CLI to talk to podman.
# Not needed when virtualisation.docker.enable = true;
pkgs.docker-client
];
# Arion works with Docker, but for NixOS-based containers, you need Podman
# since NixOS 21.05.
virtualisation.docker.enable = false;
virtualisation.podman.enable = true;
virtualisation.podman.dockerSocket.enable = true;
virtualisation.podman.defaultNetwork.dnsname.enable = true;
# Use your username instead of `myuser`
users.extraUsers.myuser.extraGroups = ["podman"];
{ ... }: {
environment.systemPackages = [ (import (builtins.fetchTarball https://github.com/hercules-ci/arion/tarball/master) {}).arion ];
virtualisation.docker.enable = true;
users.extraUsers.myuser.extraGroups = ["docker"];
}
```
@ -113,16 +92,14 @@ Describe containers using NixOS-style modules. There are a few options:
==== Minimal: Plain command using nixpkgs
`examples/minimal/arion-compose.nix`
[,nix]
----
`examples/minimal/arion-compose.nix`:
```nix
{ pkgs, ... }:
{
project.name = "webapp";
services = {
config.services = {
webserver = {
image.enableRecommendedContents = true;
service.useHostStore = true;
service.command = [ "sh" "-c" ''
cd "$$WEB_ROOT"
@ -132,36 +109,62 @@ Describe containers using NixOS-style modules. There are a few options:
"8000:8000" # host:container
];
service.environment.WEB_ROOT = "${pkgs.nix.doc}/share/doc/nix/manual";
service.stop_signal = "SIGINT";
};
};
}
----
==== NixOS: run full OS
`examples/full-nixos/arion-compose.nix`:
```
==== NixOS: run only one systemd service
`examples/nixos-unit/arion-compose.nix`:
```nix
[,nix]
----
{
project.name = "full-nixos";
services.webserver = { pkgs, lib, ... }: {
nixos.useSystemd = true;
nixos.configuration.boot.tmp.useTmpfs = true;
nixos.configuration.services.nginx.enable = true;
nixos.configuration.services.nginx.virtualHosts.localhost.root = "${pkgs.nix.doc}/share/doc/nix/manual";
nixos.configuration.services.nscd.enable = false;
nixos.configuration.system.nssModules = lib.mkForce [];
nixos.configuration.systemd.services.nginx.serviceConfig.AmbientCapabilities =
lib.mkForce [ "CAP_NET_BIND_SERVICE" ];
services.webserver = { config, pkgs, ... }: {
nixos.configuration = {config, pkgs, ...}: {
boot.isContainer = true;
services.nginx.enable = true;
services.nginx.virtualHosts.localhost.root = "${pkgs.nix.doc}/share/doc/nix/manual";
system.build.run-nginx = pkgs.writeScript "run-nginx" ''
#!${pkgs.bash}/bin/bash
PATH='${config.systemd.services.nginx.environment.PATH}'
echo nginx:x:${toString config.users.users.nginx.uid}:${toString config.users.groups.nginx.gid}:nginx web server user:/var/empty:/bin/sh >>/etc/passwd
echo nginx:x:${toString config.users.groups.nginx.gid}:nginx >>/etc/group
${config.systemd.services.nginx.runner}
'';
};
service.command = [ config.nixos.build.run-nginx ];
service.useHostStore = true;
service.ports = [
"8000:80" # host:container
];
};
}
----
```
==== NixOS: run full OS
`examples/full-nixos/arion-compose.nix`:
```nix
{
services.webserver = { pkgs, ... }: {
nixos.useSystemd = true;
nixos.configuration.boot.tmpOnTmpfs = true;
nixos.configuration.services.nginx.enable = true;
nixos.configuration.services.nginx.virtualHosts.localhost.root = "${pkgs.nix.doc}/share/doc/nix/manual";
service.useHostStore = true;
service.ports = [
"8000:80" # host:container
];
};
}
```
==== Docker image from DockerHub
@ -175,11 +178,6 @@ Describe containers using NixOS-style modules. There are a few options:
}
```
==== NixOS: run only one systemd service
Running individual units from NixOS is possible using an experimental script.
See `examples/nixos-unit/arion-compose.nix`.
=== Run
Start containers and watch their logs:
@ -191,47 +189,15 @@ $ arion logs -f
You can go to `examples/*/` and run these commands to give it a quick try.
=== Inspect the config
== A full featured Nix command example
While developing an arion project, you can make use of `arion repl`, which launches
a `nix repl` on the project configuration.
To see how Arion can be used in a project, have a look at
https://github.com/nix-community/todomvc-nix/tree/master/deploy/arion[todomvc-nix].
```
$ arion repl
Launching a repl for you. To get started:
To see deployment-wide configuration
type config. and use tab completion
To bring the top-level Nixpkgs attributes into scope
type :a (config._module.args.pkgs) // { inherit config; }
Welcome to Nix. Type :? for help.
Loading '../../src/nix/eval-composition.nix'...
Added 5 variables.
nix-repl> config.services.webserver.service.command
[ "sh" "-c" "cd \"$$WEB_ROOT\"\n/nix/store/66fbv9mmx1j4hrn9y06kcp73c3yb196r-python3-3.8.9/bin/python -m http.server\n" ]
nix-repl>
```
== Build with Nix
You can build a project with `nix-build` using an expression like
```nix
arion.build { modules = [ ./arion-compose.nix ]; pkgs = import ./arion-pkgs.nix; }
```
If you deploy with xref:hercules-ci-effects:ROOT:reference/nix-functions/runArion.adoc[runArion],
and your `pkgs` variable is equivalent to `import ./arion-pkgs.nix`, you can use:
```nix
let
deployment = pkgs.effects.runArion { /* ... */ });
in deployment.prebuilt
```bash
$ git clone https://github.com/nix-community/todomvc-nix
$ cd todomvc-nix/deploy/arion
$ arion up
```
== Project Status
@ -241,10 +207,10 @@ development environments while working on
https://www.hercules-ci.com[Hercules CI]. (It was also born out of
ancient Greek deities disguised as horses. More on that later.)
Arion can be used for simple single host deployments, using Docker's TLS
client verification, or https://search.nixos.org/options?channel=unstable&show=virtualisation.podman.networkSocket.enable&query=virtualisation.podman[`virtualisation.podman.networkSocket` options].
Remote deployments do not support `useHostStore`, although an SSH-based deployment method could support this.
Docker Swarm is not currently supported.
If you do want to use Arion for production environments, youll probably
want to either build normal container images or manage garbage
collection roots if you control the deployment host. Neither scenario is
made easier by arion at this time.
Arion has run successfully on Linux distributions other than NixOS, but we only perform CI for Arion on NixOS.
@ -265,7 +231,7 @@ configuration that makes the Docker Compose file do the things it needs
to do.
One of the more interesting built-in modules is the
https://github.com/hercules-ci/arion/blob/master/src/nix/modules/service/host-store.nix[host-store.nix module] which
link:src/nix/modules/service/host-store.nix[host-store.nix module] which
performs the bind mounts to make the host Nix store available in the
container.
@ -275,23 +241,21 @@ container.
Nope, its just Nix and Docker Compose under the hood.
It does xref:hercules-ci-effects:ROOT:reference/nix-functions/runArion.adoc[integrate] nicely though.
=== What about garbage collection?
Arion removes the need for garbage collecting docker images, delegating
this task to Nix when using `service.useHostStore`.
this task to Nix.
Arion creates a garbage collection root that it cleans up after completing
the command. This means that `arion up -d` should not be used with `useHostStore`
in production. Instead, disable `useHostStore`, which will use `dockerTools` to
generate images that can be used in production.
Arion creates a garbage collection root and cleans it up after
completing the command. This means that `arion up` without `-d` is safe
with respect to garbage collection. A deployment that is more serious
than local development must leave a GC root on the deployment host. This
use case is not supported as of now.
=== Why is my container not running latest code?
Rebuild the image using `arion up -d --always-recreate-deps <name>` or simply `arion up -d`.
Like `docker-compose restart`, `arion restart` does not update the image before starting.
Restart it with `arion restart <name>` or if you've changed the image rebuild
them using `arion up -d --always-recreate-deps <name>`.
=== What is messing with my environment variables?
@ -302,11 +266,11 @@ reference a script from `pkgs.writeScript` or escape the dollar sign as
=== Why name it ``Arion``?
Arion comes from Greek mythology. Poseidon, the god of Docker -- I mean the seas --
Arion comes from Greek mythology. Poseidon, the god of ~Docker~ the seas
had his eye on Demeter. Demeter tried to trick him by disguising as a
horse, but Poseidon saw through the deception and they had Arion.
So Arion is a super fast divine horse; the result of some weird mixing.
Also it talks.
(And we felt morally obliged to name our stuff after Greek mythology)
(And we feel morally obliged to name our stuff after Greek mythology)

View file

@ -1,3 +1 @@
# Arion Options
include::partial$arion-options.adoc[]
include::partial$NixOSOptions.adoc[]

View file

@ -0,0 +1,941 @@
= Arion options
== docker-compose.extended
Attribute set that will be turned into the x-arion section of the docker-compose.yaml file.
[discrete]
=== details
Type:: attribute set
No Default:: {blank}
No Example:: {blank}
== docker-compose.raw
Attribute set that will be turned into the docker-compose.yaml file, using Nix's toJSON builtin.
[discrete]
=== details
Type:: attribute set
No Default:: {blank}
No Example:: {blank}
== host.nixStorePrefix
Prefixes store paths on the host, allowing the Nix store to be
stored at an alternate location without altering the format of
store paths.
For example: instead of mounting the host's /nix/store as the
container's /nix/store, this will mount /mnt/foo/nix/store
as the container's /nix/store.
[discrete]
=== details
Type:: string
Default::
+
----
""
----
Example::
+
----
"/mnt/foo"
----
== host.uid
The numeric user id (UID) of the user running arion.
This lets you to write modules that interact with the host
user's files, which is helpful for local development, but not
intended for production-like deployment scenarios.
[discrete]
=== details
Type:: signed integer
No Default:: {blank}
No Example:: {blank}
== out.dockerComposeYaml
A derivation that produces a docker-compose.yaml file for this composition.
[discrete]
=== details
Type:: package
No Default:: {blank}
Read Only:: {blank}
No Example:: {blank}
== out.dockerComposeYamlAttrs
The text of out.dockerComposeYaml.
[discrete]
=== details
Type:: attribute set of unspecifieds
No Default:: {blank}
Read Only:: {blank}
No Example:: {blank}
== out.dockerComposeYamlText
The text of out.dockerComposeYaml.
[discrete]
=== details
Type:: string
No Default:: {blank}
Read Only:: {blank}
No Example:: {blank}
== services
An attribute set of service configurations. A service specifies how to run an image as a container.
[discrete]
=== details
Type:: attribute set of submodules
No Default:: {blank}
No Example:: {blank}
== services.<name>.composition
The composition configuration.
[discrete]
=== details
Type:: attribute set
No Default:: {blank}
Read Only:: {blank}
No Example:: {blank}
== services.<name>.host
The composition-level host option values.
[discrete]
=== details
Type:: attribute set
No Default:: {blank}
Read Only:: {blank}
No Example:: {blank}
== services.<name>.image.command
[discrete]
=== details
Type:: list of strings
Default::
+
----
[]
----
No Example:: {blank}
== services.<name>.image.contents
Top level paths in the container.
[discrete]
=== details
Type:: list of packages
Default::
+
----
[]
----
No Example:: {blank}
== services.<name>.image.name
A human readable name for the docker image.
Shows up in the <code>docker ps</code> output in the
<code>IMAGE</code> column, among other places.
[discrete]
=== details
Type:: string
Default::
+
----
{"_type":"literalExample","text":"config.service.name"}
----
No Example:: {blank}
== services.<name>.image.nixBuild
Whether to build this image with Nixpkgs'
<code>dockerTools.buildLayeredImage</code>
and then load it with <code>docker load</code>.
By default, an image will be built with Nix unless <option>service.image</option>
is set. See also <option>image.name</option>, which defaults to
the service name.
[discrete]
=== details
Type:: boolean
No Default:: {blank}
No Example:: {blank}
== services.<name>.image.rawConfig
This is a low-level fallback for when a container option has not
been modeled in the Arion module system.
This attribute set does not have an appropriate merge function.
Please use the specific <code>image</code> options instead.
Run-time configuration of the container. A full list of the
options are available at in the <link xlink:href="https://github.com/moby/moby/blob/master/image/spec/v1.2.md#image-json-field-descriptions">Docker Image Specification
v1.2.0</link>.
[discrete]
=== details
Type:: attribute set of unspecifieds
Default::
+
----
{}
----
No Example:: {blank}
== services.<name>.nixos.build
NixOS build products from <code>config.system.build</code>, such as <code>toplevel</code> and <code>etc</code>.
This option is unused by default, because not all images use NixOS.
One way to use this is to enable <code>nixos.useSystemd</code>, but the
NixOS configuration can be used in other ways.
[discrete]
=== details
Type:: attribute set
No Default:: {blank}
Read Only:: {blank}
No Example:: {blank}
== services.<name>.nixos.configuration
Modules to add to the NixOS configuration.
This option is unused by default, because not all images use NixOS.
One way to use this is to enable <code>nixos.useSystemd</code>, but the
NixOS configuration can be used in other ways.
[discrete]
=== details
Type:: list of unspecifieds or unspecified convertible to it
Default::
+
----
{}
----
No Example:: {blank}
== services.<name>.nixos.evaluatedConfig
Evaluated NixOS configuration, to be read by service-level modules.
This option is unused by default, because not all images use NixOS.
One way to use this is to enable <code>nixos.useSystemd</code>, but the
NixOS configuration can be used in other ways.
[discrete]
=== details
Type:: attribute set
No Default:: {blank}
Read Only:: {blank}
No Example:: {blank}
== services.<name>.nixos.useSystemd
When enabled, call the NixOS systemd-based init system.
Configure NixOS with <code>nixos.configuration</code>.
[discrete]
=== details
Type:: boolean
Default::
+
----
false
----
No Example:: {blank}
== services.<name>.out.extendedInfo
Information about a service to include in the Docker Compose file,
but that will not be used by the <code>docker-compose</code> command
itself.
It will be inserted in <code>x-arion.serviceInfo.&lt;service.name></code>.
[discrete]
=== details
Type:: attribute set of unspecifieds
Default::
+
----
{}
----
No Example:: {blank}
== services.<name>.out.service
Raw input for the service in <code>docker-compose.yaml</code>.
You should not need to use this option. If anything is
missing, please contribute the missing option.
This option is user accessible because it may serve as an
escape hatch for some.
[discrete]
=== details
Type:: attribute set of unspecifieds
No Default:: {blank}
No Example:: {blank}
== services.<name>.service.build.context
Locates a Dockerfile to use for creating an image to use in this service.
See <link xlink:href="https://docs.docker.com/compose/compose-file/#context">Docker Compose#context</link>
[discrete]
=== details
Type:: null or string
Default::
+
----
null
----
No Example:: {blank}
== services.<name>.service.capabilities
Enable/disable linux capabilities, or pick Docker's default.
Setting a capability to <code>true</code> means that it will be
"added". Setting it to <code>false</code> means that it will be "dropped".
See <link xlink:href="https://docs.docker.com/compose/compose-file/#cap_add-cap_drop">Docker Compose#cap_add-cap_drop</link>
Omitted and <code>null</code> capabilities will therefore be set
according to Docker's <link xlink:href="https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities">default list of capabilities.</link>
[discrete]
=== details
Type:: attribute set of null or booleans
Default::
+
----
{}
----
Example::
+
----
{"ALL":true,"NET_ADMIN":false,"SYS_ADMIN":false}
----
== services.<name>.service.command
See <link xlink:href="https://docs.docker.com/compose/compose-file/#command">Docker Compose#command</link>
[discrete]
=== details
Type:: null or unspecified
Default::
+
----
null
----
No Example:: {blank}
== services.<name>.service.container_name
See <link xlink:href="https://docs.docker.com/compose/compose-file/#container_name">Docker Compose#container_name</link>
[discrete]
=== details
Type:: null or string
Default::
+
----
null
----
No Example:: {blank}
== services.<name>.service.defaultExec
Container program and arguments to invoke when calling
<code>arion exec &lt;service.name></code> without further arguments.
[discrete]
=== details
Type:: list of strings
Default::
+
----
["/bin/sh"]
----
No Example:: {blank}
== services.<name>.service.depends_on
See <link xlink:href="https://docs.docker.com/compose/compose-file/#depends_on">Docker Compose#depends_on</link>
[discrete]
=== details
Type:: list of strings
Default::
+
----
[]
----
No Example:: {blank}
== services.<name>.service.devices
See <link xlink:href="https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities"><code>docker run --device</code> documentation</link>
See <link xlink:href="https://docs.docker.com/compose/compose-file/#devices">Docker Compose#devices</link>
[discrete]
=== details
Type:: list of strings
Default::
+
----
[]
----
No Example:: {blank}
== services.<name>.service.entrypoint
See <link xlink:href="https://docs.docker.com/compose/compose-file/#entrypoint">Docker Compose#entrypoint</link>
[discrete]
=== details
Type:: null or string
Default::
+
----
null
----
No Example:: {blank}
== services.<name>.service.env_file
See <link xlink:href="https://docs.docker.com/compose/compose-file/#env_file">Docker Compose#env_file</link>
[discrete]
=== details
Type:: list of strings
Default::
+
----
[]
----
No Example:: {blank}
== services.<name>.service.environment
See <link xlink:href="https://docs.docker.com/compose/compose-file/#environment">Docker Compose#environment</link>
[discrete]
=== details
Type:: attribute set of string or signed integers
Default::
+
----
{}
----
No Example:: {blank}
== services.<name>.service.expose
See <link xlink:href="https://docs.docker.com/compose/compose-file/#expose">Docker Compose#expose</link>
[discrete]
=== details
Type:: list of strings
Default::
+
----
[]
----
No Example:: {blank}
== services.<name>.service.external_links
See <link xlink:href="https://docs.docker.com/compose/compose-file/#external_links">Docker Compose#external_links</link>
[discrete]
=== details
Type:: list of strings
Default::
+
----
[]
----
No Example:: {blank}
== services.<name>.service.extra_hosts
See <link xlink:href="https://docs.docker.com/compose/compose-file/#extra_hosts">Docker Compose#extra_hosts</link>
[discrete]
=== details
Type:: list of strings
Default::
+
----
[]
----
No Example:: {blank}
== services.<name>.service.hostStoreAsReadOnly
Adds a ':ro' (read-only) access mode to the host nix store bind mount.
[discrete]
=== details
Type:: boolean
Default::
+
----
true
----
No Example:: {blank}
== services.<name>.service.hostname
Analogous to the <code>docker run</code> counterpart.
See <link xlink:href="https://docs.docker.com/compose/compose-file/#domainname-hostname-ipc-mac_address-privileged-read_only-shm_size-stdin_open-tty-user-working_dir">Docker Compose#domainname-hostname-ipc-mac_address-privileged-read_only-shm_size-stdin_open-tty-user-working_dir</link>
[discrete]
=== details
Type:: null or string
Default::
+
----
null
----
No Example:: {blank}
== services.<name>.service.image
See <link xlink:href="https://docs.docker.com/compose/compose-file/#image">Docker Compose#image</link>
[discrete]
=== details
Type:: string
No Default:: {blank}
No Example:: {blank}
== services.<name>.service.links
See <link xlink:href="https://docs.docker.com/compose/compose-file/#links">Docker Compose#links</link>
[discrete]
=== details
Type:: list of strings
Default::
+
----
[]
----
No Example:: {blank}
== services.<name>.service.name
The name of the service - <code>&lt;name></code> in the composition-level <code>services.&lt;name></code>
[discrete]
=== details
Type:: string
No Default:: {blank}
Read Only:: {blank}
No Example:: {blank}
== services.<name>.service.network_mode
See <link xlink:href="https://docs.docker.com/compose/compose-file/#network_mode">Docker Compose#network_mode</link>
[discrete]
=== details
Type:: null or string
Default::
+
----
null
----
No Example:: {blank}
== services.<name>.service.networks
See <link xlink:href="https://docs.docker.com/compose/compose-file/#networks">Docker Compose#networks</link>
[discrete]
=== details
Type:: null or list of strings
Default::
+
----
null
----
No Example:: {blank}
== services.<name>.service.ports
Expose ports on host. "host:container" or structured.
See <link xlink:href="https://docs.docker.com/compose/compose-file/#ports">Docker Compose#ports</link>
[discrete]
=== details
Type:: list of unspecifieds
Default::
+
----
[]
----
No Example:: {blank}
== services.<name>.service.privileged
Analogous to the <code>docker run</code> counterpart.
See <link xlink:href="https://docs.docker.com/compose/compose-file/#domainname-hostname-ipc-mac_address-privileged-read_only-shm_size-stdin_open-tty-user-working_dir">Docker Compose#domainname-hostname-ipc-mac_address-privileged-read_only-shm_size-stdin_open-tty-user-working_dir</link>
[discrete]
=== details
Type:: null or boolean
Default::
+
----
null
----
No Example:: {blank}
== services.<name>.service.restart
See <link xlink:href="https://docs.docker.com/compose/compose-file/#restart">Docker Compose#restart</link>
[discrete]
=== details
Type:: null or string
Default::
+
----
null
----
No Example:: {blank}
== services.<name>.service.stop_signal
See <link xlink:href="https://docs.docker.com/compose/compose-file/#stop_signal">Docker Compose#stop_signal</link>
[discrete]
=== details
Type:: null or string
Default::
+
----
null
----
No Example:: {blank}
== services.<name>.service.sysctls
See <link xlink:href="https://docs.docker.com/compose/compose-file/#sysctls">Docker Compose#sysctls</link>
[discrete]
=== details
Type:: attribute set of string or signed integers
Default::
+
----
{}
----
No Example:: {blank}
== services.<name>.service.tmpfs
See <link xlink:href="https://docs.docker.com/compose/compose-file/#tmpfs">Docker Compose#tmpfs</link>
[discrete]
=== details
Type:: list of strings
Default::
+
----
[]
----
No Example:: {blank}
== services.<name>.service.tty
Analogous to the <code>docker run</code> counterpart.
See <link xlink:href="https://docs.docker.com/compose/compose-file/#domainname-hostname-ipc-mac_address-privileged-read_only-shm_size-stdin_open-tty-user-working_dir">Docker Compose#domainname-hostname-ipc-mac_address-privileged-read_only-shm_size-stdin_open-tty-user-working_dir</link>
[discrete]
=== details
Type:: null or boolean
Default::
+
----
null
----
No Example:: {blank}
== services.<name>.service.useHostNixDaemon
Make the host Nix daemon available.
[discrete]
=== details
Type:: boolean
Default::
+
----
false
----
No Example:: {blank}
== services.<name>.service.useHostStore
Bind mounts the host store if enabled, avoiding copying.
[discrete]
=== details
Type:: boolean
Default::
+
----
false
----
No Example:: {blank}
== services.<name>.service.user
Analogous to the <code>docker run</code> counterpart.
See <link xlink:href="https://docs.docker.com/compose/compose-file/#domainname-hostname-ipc-mac_address-privileged-read_only-shm_size-stdin_open-tty-user-working_dir">Docker Compose#domainname-hostname-ipc-mac_address-privileged-read_only-shm_size-stdin_open-tty-user-working_dir</link>
[discrete]
=== details
Type:: null or string
Default::
+
----
null
----
No Example:: {blank}
== services.<name>.service.volumes
See <link xlink:href="https://docs.docker.com/compose/compose-file/#volumes">Docker Compose#volumes</link>
[discrete]
=== details
Type:: list of unspecifieds
Default::
+
----
[]
----
No Example:: {blank}
== services.<name>.service.working_dir
Analogous to the <code>docker run</code> counterpart.
See <link xlink:href="https://docs.docker.com/compose/compose-file/#domainname-hostname-ipc-mac_address-privileged-read_only-shm_size-stdin_open-tty-user-working_dir">Docker Compose#domainname-hostname-ipc-mac_address-privileged-read_only-shm_size-stdin_open-tty-user-working_dir</link>
[discrete]
=== details
Type:: null or string
Default::
+
----
null
----
No Example:: {blank}

View file

@ -8,12 +8,10 @@ let
options = eval.options;
};
in (pkgs.runCommand "agent-options.adoc" { } ''
cat >$out <<EOF
in (pkgs.writeText "agent-options" ''
= Arion options
EOF
cat ${options.optionsAsciiDoc} >>$out
${options.optionsAsciiDoc}
'').overrideAttrs (o: {
# Work around https://github.com/hercules-ci/hercules-ci-agent/issues/168
allowSubstitutes = true;

View file

@ -1,30 +0,0 @@
{ pkgs, ... }:
let
sh = pkgs.stdenv.mkDerivation {
name = "sh";
phases = [ "installPhase" ];
installPhase = ''
mkdir -p "$out"/bin
ln -s ${pkgs.bash}/bin/sh "$out"/bin/sh
'';
};
in{
config.project.name = "webapp";
config.services = {
webserver = {
image.contents = [ sh ];
service.useHostStore = true;
service.command = [ "sh" "-c" ''
cd "$$WEB_ROOT"
${pkgs.python3}/bin/python -m http.server
'' ];
service.ports = [
"8000:8000" # host:container
];
service.environment.WEB_ROOT = "${pkgs.nix.doc}/share/doc/nix/manual";
service.stop_signal = "SIGINT";
};
};
}

View file

@ -1,13 +0,0 @@
let
flake = if builtins ? getFlake
then (builtins.getFlake (toString ./.)).pkgs
else (import flake-compat { src = ./.; }).defaultNix;
# NB: this is lazy
lock = builtins.fromJSON (builtins.readFile ./flake.lock);
inherit (lock.nodes.flake-compat.locked) owner repo rev narHash;
flake-compat = builtins.fetchTarball {
url = "https://github.com/${owner}/${repo}/archive/${rev}.tar.gz";
sha256 = narHash;
};
in
flake.pkgs

View file

@ -1,44 +0,0 @@
{
"nodes": {
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1606424373,
"narHash": "sha256-oq8d4//CJOrVj+EcOaSXvMebvuTkmBJuT5tzlfewUnQ=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "99f1c2157fba4bfe6211a321fd0ee43199025dbf",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1618853290,
"narHash": "sha256-K4fddnrGOcKL+6CEchRrVmepiwvwvHxB87goqBTI5Bs=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "9a1672105db0eebe8ef59f310397435f2d0298d0",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-20.09",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"flake-compat": "flake-compat",
"nixpkgs": "nixpkgs"
}
}
},
"root": "root",
"version": 7
}

View file

@ -1,19 +0,0 @@
{
description = "A very basic flake";
inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-20.09";
inputs.flake-compat.url = "github:edolstra/flake-compat";
inputs.flake-compat.flake = false;
outputs = { self, nixpkgs, ... }: {
pkgs = nixpkgs.legacyPackages.x86_64-linux;
# # alternative:
# pkgs = import nixpkgs { config = { }; overlays = [ ]; system = "x86_64-linux"; };
packages.x86_64-linux.hello = nixpkgs.legacyPackages.x86_64-linux.hello;
defaultPackage.x86_64-linux = self.packages.x86_64-linux.hello;
};
}

View file

@ -1,15 +1,9 @@
{
project.name = "full-nixos";
services.webserver = { pkgs, lib, ... }: {
services.webserver = { pkgs, ... }: {
nixos.useSystemd = true;
nixos.configuration.boot.tmp.useTmpfs = true;
nixos.configuration.networking.useDHCP = false;
nixos.configuration.boot.tmpOnTmpfs = true;
nixos.configuration.services.nginx.enable = true;
nixos.configuration.services.nginx.virtualHosts.localhost.root = "${pkgs.nix.doc}/share/doc/nix/manual";
nixos.configuration.services.nscd.enable = false;
nixos.configuration.system.nssModules = lib.mkForce [];
nixos.configuration.systemd.services.nginx.serviceConfig.AmbientCapabilities =
lib.mkForce [ "CAP_NET_BIND_SERVICE" ];
service.useHostStore = true;
service.ports = [
"8000:80" # host:container

View file

@ -1,10 +1,8 @@
{ pkgs, ... }:
{
project.name = "webapp";
services = {
config.services = {
webserver = {
image.enableRecommendedContents = true;
service.useHostStore = true;
service.command = [ "sh" "-c" ''
cd "$$WEB_ROOT"
@ -14,7 +12,6 @@
"8000:8000" # host:container
];
service.environment.WEB_ROOT = "${pkgs.nix.doc}/share/doc/nix/manual";
service.stop_signal = "SIGINT";
};
};
}

View file

@ -17,27 +17,17 @@
*/
{
project.name = "nixos-unit";
services.webserver = { config, pkgs, ... }: {
nixos.configuration = {config, lib, options, pkgs, ...}: {
nixos.configuration = {config, pkgs, ...}: {
boot.isContainer = true;
services.nginx = {
enable = true;
virtualHosts.localhost.root = "${pkgs.nix.doc}/share/doc/nix/manual";
} // lib.optionalAttrs (options?services.nginx.stateDir) {
# Work around a problem in NixOS 20.03
stateDir = "/var/lib/nginx";
};
services.nginx.enable = true;
services.nginx.virtualHosts.localhost.root = "${pkgs.nix.doc}/share/doc/nix/manual";
system.build.run-nginx = pkgs.writeScript "run-nginx" ''
#!${pkgs.bash}/bin/bash
PATH='${config.systemd.services.nginx.environment.PATH}'
echo nginx:x:${toString config.users.users.nginx.uid}:${toString config.users.groups.nginx.gid}:nginx web server user:/var/empty:/bin/sh >>/etc/passwd
echo nginx:x:${toString config.users.groups.nginx.gid}:nginx >>/etc/group
echo 'nobody:x:65534:65534:Unprivileged account do not use:/var/empty:/run/current-system/sw/bin/nologin' >>/etc/passwd
echo 'nogroup:x:65534:' >>/etc/group
mkdir -p /var/log/nginx /run/nginx/ /var/cache/nginx /var/lib/nginx/{,logs,proxy_temp,client_body_temp,fastcgi_temp,scgi_temp,uwsgi_temp} /tmp/nginx_client_body
chown nginx /var/log/nginx /run/nginx/ /var/cache/nginx /var/lib/nginx/{,logs,proxy_temp,client_body_temp,fastcgi_temp,scgi_temp,uwsgi_temp} /tmp/nginx_client_body
${config.systemd.services.nginx.runner}
'';
};

View file

@ -1,64 +0,0 @@
/*
An example of
- traefik HTTP reverse proxy
- minimal images
- routing via docker labels
Run `arion up -d` and open http://nix-docs.localhost/
*/
{ lib, pkgs, ... }: {
config.project.name = "traefik";
config.networks = {
traefik-custom = {
name = "traefik-custom";
ipam = {
config = [{
subnet = "172.32.0.0/16";
gateway = "172.32.0.1";
}];
};
};
};
config.services = {
traefik = {
image.command = [
"${pkgs.traefik}/bin/traefik"
"--api.insecure=true"
"--providers.docker=true"
"--providers.docker.exposedbydefault=false"
"--entrypoints.web.address=:80"
];
service = {
container_name = "traefik";
stop_signal = "SIGINT";
ports = [ "80:80" "8080:8080" ];
volumes = [ "/var/run/docker.sock:/var/run/docker.sock:ro" ];
networks = [ "traefik-custom" ];
};
};
nix-docs = {
image.command = ["${pkgs.writeScript "entrypoint" ''
#!${pkgs.bash}/bin/bash
cd ${pkgs.nix.doc}/share/doc/nix/manual
${pkgs.python3}/bin/python -m http.server
''}"];
service.container_name = "simple-service";
service.stop_signal = "SIGINT";
service.labels = {
"traefik.enable" = "true";
"traefik.http.routers.nix-docs.rule" = "Host(`nix-docs.localhost`)";
"traefik.http.routers.nix-docs.entrypoints" = "web";
"traefik.http.services.nix-docs.loadBalancer.server.port" = "8000";
};
service.networks = {
traefik-custom = {
ipv4_address = "172.32.0.5";
};
};
};
};
}

View file

@ -1,6 +0,0 @@
# Instead of pinning Nixpkgs, we can opt to use the one in NIX_PATH
import <nixpkgs> {
# We specify the architecture explicitly. Use a Linux remote builder when
# calling arion from other platforms.
system = "x86_64-linux";
}

View file

@ -1,107 +0,0 @@
{
"nodes": {
"flake-parts": {
"inputs": {
"nixpkgs-lib": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1722555600,
"narHash": "sha256-XOQkdLafnb/p9ij77byFQjDf5m5QYl9b2REiVClC+x4=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "8471fe90ad337a8074e957b69ca4d0089218391d",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"flake-parts_2": {
"inputs": {
"nixpkgs-lib": [
"hercules-ci-effects",
"nixpkgs"
]
},
"locked": {
"lastModified": 1712014858,
"narHash": "sha256-sB4SWl2lX95bExY2gMFG5HIzvva5AVMJd4Igm+GpZNw=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "9126214d0a59633752a136528f5f3b9aa8565b7d",
"type": "github"
},
"original": {
"id": "flake-parts",
"type": "indirect"
}
},
"haskell-flake": {
"locked": {
"lastModified": 1675296942,
"narHash": "sha256-u1X1sblozi5qYEcLp1hxcyo8FfDHnRUVX3dJ/tW19jY=",
"owner": "srid",
"repo": "haskell-flake",
"rev": "c2cafce9d57bfca41794dc3b99c593155006c71e",
"type": "github"
},
"original": {
"owner": "srid",
"ref": "0.1.0",
"repo": "haskell-flake",
"type": "github"
}
},
"hercules-ci-effects": {
"inputs": {
"flake-parts": "flake-parts_2",
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1719226092,
"narHash": "sha256-YNkUMcCUCpnULp40g+svYsaH1RbSEj6s4WdZY/SHe38=",
"owner": "hercules-ci",
"repo": "hercules-ci-effects",
"rev": "11e4b8dc112e2f485d7c97e1cee77f9958f498f5",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "hercules-ci-effects",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1722630782,
"narHash": "sha256-hMyG9/WlUi0Ho9VkRrrez7SeNlDzLxalm9FwY7n/Noo=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "d04953086551086b44b6f3c6b7eeb26294f207da",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"flake-parts": "flake-parts",
"haskell-flake": "haskell-flake",
"hercules-ci-effects": "hercules-ci-effects",
"nixpkgs": "nixpkgs"
}
}
},
"root": "root",
"version": 7
}

View file

@ -1,98 +0,0 @@
{
description = "Arion - use Docker Compose via Nix";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
haskell-flake.url = "github:srid/haskell-flake/0.1.0";
flake-parts.url = "github:hercules-ci/flake-parts";
flake-parts.inputs.nixpkgs-lib.follows = "nixpkgs";
hercules-ci-effects.url = "github:hercules-ci/hercules-ci-effects";
hercules-ci-effects.inputs.nixpkgs.follows = "nixpkgs";
};
outputs = inputs@{ self, flake-parts, ... }:
flake-parts.lib.mkFlake { inherit inputs; } ({ config, lib, extendModules, ... }: {
imports = [
inputs.haskell-flake.flakeModule
inputs.hercules-ci-effects.flakeModule
inputs.flake-parts.flakeModules.easyOverlay
./docs/flake-module.nix
./tests/flake-module.nix
];
systems = inputs.nixpkgs.lib.systems.flakeExposed;
perSystem = { config, self', inputs', pkgs, system, final, ... }:
let h = pkgs.haskell.lib.compose; in
{
overlayAttrs = {
inherit (config.packages) arion;
arionTestingFlags = {
dockerSupportsSystemd = false;
};
};
packages.default = config.packages.arion;
packages.overlay-test = final.arion;
packages.arion = import ./nix/arion.nix { inherit pkgs; };
haskellProjects.haskell-package = {
# not autodetected: https://github.com/srid/haskell-flake/issues/49
packages.arion-compose.root = ./.;
overrides =
self: super: {
arion-compose =
lib.pipe super.arion-compose [
(h.addBuildTools [ pkgs.nix ])
(h.overrideCabal (o: {
src = pkgs.lib.sourceByRegex ./. [
".*[.]cabal"
"LICENSE"
"src/?.*"
"README.asciidoc"
"CHANGELOG.md"
];
preCheck = ''
export NIX_LOG_DIR=$TMPDIR
export NIX_STATE_DIR=$TMPDIR
export NIX_PATH=nixpkgs=${pkgs.path}
'';
}))
];
};
};
devShells.default = config.devShells.haskell-package.overrideAttrs (o: {
nativeBuildInputs = o.nativeBuildInputs or [ ] ++ [
pkgs.docker-compose
pkgs.nixpkgs-fmt
config.haskellProjects.haskell-package.haskellPackages.releaser
];
});
};
hercules-ci.flake-update = {
enable = true;
autoMergeMethod = "merge";
when = {
hour = [ 2 ];
dayOfMonth = [ 5 ];
};
};
herculesCI.ciSystems = [
# "aarch64-darwin"
# "aarch64-linux"
"x86_64-darwin"
"x86_64-linux"
];
flake = {
debug = { inherit inputs config lib; };
lib = {
eval = import ./src/nix/eval-composition.nix;
build = args@{ ... }:
let composition = self.lib.eval args;
in composition.config.out.dockerComposeYaml;
};
nixosModules.arion = ./nixos-module.nix;
};
});
}

12
live-check Executable file
View file

@ -0,0 +1,12 @@
#!/usr/bin/env nix-shell
#!nix-shell ./shell.nix
#!nix-shell -i bash
set -eux -o pipefail
cd "$(dirname "${BASH_SOURCE[0]}")"
ghcid \
--command 'ghci -isrc/haskell/exe src/haskell/exe/Main.hs' \
--reload=src/haskell \
--restart=arion-compose.cabal \
;

View file

@ -1,13 +1,42 @@
# Like the upstreamable expression but wired up for the local arion.
{ pkgs ? import ./. {}
, lib ? pkgs.lib
, haskell ? pkgs.haskell
, haskellPackages ? pkgs.haskellPackages
, arion-compose ? import ./haskell-arion-compose.nix { inherit pkgs haskellPackages; }
, runCommand ? pkgs.runCommand
}:
import ./upstreamable/default.nix {
inherit pkgs lib haskell runCommand;
haskellPackages = haskellPackages // { inherit arion-compose; };
evalSrc = ./..;
}
let
inherit (pkgs.haskell.lib) justStaticExecutables overrideCabal;
srcDir = ../src;
eval = import (srcDir + "/nix/eval-composition.nix");
build = args@{...}:
let composition = eval args;
in composition.config.out.dockerComposeYaml;
in
justStaticExecutables (overrideCabal arion-compose (o: {
buildTools = o.buildTools ++ [pkgs.makeWrapper];
passthru = o.passthru // {
inherit eval build;
};
pname = "arion"; # Cover up the needlessly long Haskell package name
# PYTHONPATH
#
# We close off the python module search path!
#
# Accepting directories from the environment into the search path
# tends to break things. Docker Compose does not have a plugin
# system as far as I can tell, so I don't expect this to break a
# feature, but rather to make the program more robustly self-
# contained.
postInstall = ''${o.postInstall or ""}
mkdir -p $out/libexec
mv $out/bin/arion $out/libexec
makeWrapper $out/libexec/arion $out/bin/arion \
--unset PYTHONPATH \
--prefix PATH : ${lib.makeBinPath [ pkgs.docker-compose ]} \
;
'';
}))

43
nix/ci.nix Normal file
View file

@ -0,0 +1,43 @@
let
sources = import ./sources.nix;
lib = import (sources."nixpkgs" + "/lib");
inherit (import sources."project.nix" { inherit lib; }) dimension;
in
dimension "Nixpkgs version" {
"nixos-19_03" = {
nixpkgsSource = "nixpkgs";
isReferenceNixpkgs = true;
enableDoc = false;
};
"nixos-19_09" = {
nixpkgsSource = "nixos-19.09";
enableDoc = true;
};
"nixos-unstable" = {
nixpkgsSource = "nixos-unstable";
enableDoc = true;
};
} (
_name: { nixpkgsSource, isReferenceNixpkgs ? false, enableDoc ? true }:
dimension "System" {
"x86_64-linux" = { isReferenceTarget = isReferenceNixpkgs; };
"x86_64-darwin" = { enableNixOSTests = false; };
} (
system: { isReferenceTarget ? false, enableNixOSTests ? true }:
let
pkgs = import ./. { inherit system; nixpkgsSrc = sources.${nixpkgsSource}; };
in
{
inherit (pkgs) arion;
} // lib.optionalAttrs enableNixOSTests {
inherit (pkgs) tests;
} // lib.optionalAttrs enableDoc {
inherit (pkgs) doc doc-options doc-options-check;
} // lib.optionalAttrs isReferenceTarget {
inherit (pkgs.arion-project.haskellPkgs) arion-compose-checked;
}
)
)

View file

@ -1,10 +0,0 @@
(import
(
let lock = builtins.fromJSON (builtins.readFile ./flake.lock); in
fetchTarball {
url = "https://github.com/edolstra/flake-compat/archive/009399224d5e398d03b22badca40a37ac85412a1.tar.gz";
sha256 = "sha256:0xcr9fibnapa12ywzcnlf54wrmbqqb96fmmv8043zhsycws7bpqy";
}
)
{ src = ../.; }
).defaultNix

17
nix/default.nix Normal file
View file

@ -0,0 +1,17 @@
{ sources ? import ./sources.nix
, nixpkgsName ? "nixos-19.09"
, nixpkgsSrc ? sources.${nixpkgsName}
, system ? builtins.currentSystem
, ...
}:
import nixpkgsSrc ({
# Makes the config pure as well. See <nixpkgs>/top-level/impure.nix:
config = {
};
overlays = [
# all the packages are defined there:
(import ./overlay.nix)
];
inherit system;
})

View file

@ -6,12 +6,6 @@ let
inherit (pkgs.haskell.lib) overrideCabal addBuildTools;
in
overrideCabal (addBuildTools (haskellPackages.callCabal2nix "arion-compose" ./.. {}) [pkgs.nix]) (o: o // {
src = pkgs.lib.sourceByRegex ../. [
".*[.]cabal"
"LICENSE"
"src/?.*"
"README.asciidoc"
];
preCheck = ''
export NIX_LOG_DIR=$TMPDIR
export NIX_STATE_DIR=$TMPDIR

16
nix/haskell-overlay.nix Normal file
View file

@ -0,0 +1,16 @@
self: super: hself: hsuper:
{
arion-compose = import ./haskell-arion-compose.nix { pkgs = self; haskellPackages = hself; };
arion-compose-checked =
let pkg = super.haskell.lib.buildStrictly hself.arion-compose;
checked = super.haskell.lib.overrideCabal pkg (o: {
postConfigure = ''${o.postConfigure or ""}
if ! ${hsuper.cabal-install}/bin/cabal check;
then
echo 1>&2 ERROR: cabal file is invalid. Above warnings were errors.
exit 1
fi
'';
});
in checked;
}

53
nix/overlay.nix Normal file
View file

@ -0,0 +1,53 @@
self: super:
let
inherit (self.arion-project) haskellPkgs;
inherit (super) lib;
sources = import ./sources.nix;
fakeRepo = src: super.runCommand "source" { inherit src; buildInputs = [super.git]; } ''
cp -r --no-preserve=mode $src $out
git init
cp -r .git $out
'';
in
{
inherit (import ./.. { pkgs = self; }) arion;
tests = super.callPackage ../tests {};
doc-options = import ../docs/options.nix {};
doc-options-check = self.runCommand "doc-options-check" {} ''
diff --color -u ${../docs/modules/ROOT/partials/NixOSOptions.adoc} ${self.doc-options}
touch $out
'';
doc = self.stdenv.mkDerivation {
name = "arion-documentation";
buildInputs = [super.antora];
src = fakeRepo ../.;
HOME = ".";
buildPhase = "antora antora-playbook";
installPhase = ''
mkdir $out
mv public/* $out/
'';
};
arion-project = super.recurseIntoAttrs {
haskellPkgs = super.haskellPackages.extend (import ./haskell-overlay.nix self super);
shell = haskellPkgs.shellFor {
packages = p: [p.arion-compose];
buildInputs = [
haskellPkgs.cabal-install
haskellPkgs.ghcid
super.docker-compose
self.niv
self.releaser
];
};
};
inherit (import (sources.niv) {}) niv;
releaser = self.haskellPackages.callCabal2nix "releaser" sources.releaser {};
}

75
nix/sources.json Normal file
View file

@ -0,0 +1,75 @@
{
"niv": {
"branch": "master",
"description": "Easy dependency management for Nix projects",
"homepage": "https://github.com/nmattia/niv",
"owner": "nmattia",
"repo": "niv",
"rev": "1dd094156b249586b66c16200ecfd365c7428dc0",
"sha256": "1b2vjnn8iac5iiqszjc2v1s1ygh0yri998c0k3s4x4kn0dsqik21",
"type": "tarball",
"url": "https://github.com/nmattia/niv/archive/1dd094156b249586b66c16200ecfd365c7428dc0.tar.gz",
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
},
"nixos-19.09": {
"branch": "nixos-19.09",
"description": "A read-only mirror of NixOS/nixpkgs tracking the released channels. Send issues and PRs to",
"homepage": "https://github.com/NixOS/nixpkgs",
"owner": "NixOS",
"repo": "nixpkgs-channels",
"rev": "3ba0d9f75ccffd41e32cfea4046805f8bbab12f5",
"sha256": "0w20drs4mwlq12k1sss1x8adyf5ph5jd52n8wdcgmn4sm60qjmki",
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs-channels/archive/3ba0d9f75ccffd41e32cfea4046805f8bbab12f5.tar.gz",
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
},
"nixos-unstable": {
"branch": "nixos-unstable",
"description": "A read-only mirror of NixOS/nixpkgs tracking the released channels. Send issues and PRs to",
"homepage": "https://github.com/NixOS/nixpkgs",
"owner": "NixOS",
"repo": "nixpkgs-channels",
"rev": "cb4332e3eb6dfdb653f1fc7397a0292df228a533",
"sha256": "1722wphznqhpfny08rcy19l85r2l893ckjc3h1vfivj6aj64fwjr",
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs-channels/archive/cb4332e3eb6dfdb653f1fc7397a0292df228a533.tar.gz",
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz",
"version": ""
},
"nixpkgs": {
"branch": "nixos-19.03",
"description": "A read-only mirror of NixOS/nixpkgs tracking the released channels. Send issues and PRs to",
"homepage": "https://github.com/NixOS/nixpkgs",
"owner": "NixOS",
"repo": "nixpkgs-channels",
"rev": "d15a31f88a261281cd7c79038ae860c5ed95507d",
"sha256": "038iqfwmppnxq6aa89qm6k98lhwg686bmc9qjifibddm8pcp2wd0",
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs-channels/archive/d15a31f88a261281cd7c79038ae860c5ed95507d.tar.gz",
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
},
"project.nix": {
"branch": "master",
"description": "A configuration manager for your projects",
"homepage": null,
"owner": "hercules-ci",
"repo": "project.nix",
"rev": "33e5f3cb25feff4ccd00f8c60a05976e2ee01802",
"sha256": "0c3q3il5h6q3ms8m6da51knvjsfvpz12sh3a3av4d2a5ikm5ncl1",
"type": "tarball",
"url": "https://github.com/hercules-ci/project.nix/archive/33e5f3cb25feff4ccd00f8c60a05976e2ee01802.tar.gz",
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
},
"releaser": {
"branch": "master",
"description": "Automation of Haskell package release process.",
"homepage": null,
"owner": "domenkozar",
"repo": "releaser",
"rev": "43a4e27654f388e8eacab631e24e26792ff88fe2",
"sha256": "072jlbw0hdc4nvs9frd7wdyzdv4mz2dc5ib35iaqi9rzdafq6822",
"type": "tarball",
"url": "https://github.com/domenkozar/releaser/archive/43a4e27654f388e8eacab631e24e26792ff88fe2.tar.gz",
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
}
}

93
nix/sources.nix Normal file
View file

@ -0,0 +1,93 @@
# This file has been generated by Niv.
# A record, from name to path, of the third-party packages
with rec
{
pkgs =
if hasNixpkgsPath
then
if hasThisAsNixpkgsPath
then import (builtins_fetchTarball { inherit (sources_nixpkgs) url sha256; }) {}
else import <nixpkgs> {}
else
import (builtins_fetchTarball { inherit (sources_nixpkgs) url sha256; }) {};
sources_nixpkgs =
if builtins.hasAttr "nixpkgs" sources
then sources.nixpkgs
else abort
''
Please specify either <nixpkgs> (through -I or NIX_PATH=nixpkgs=...) or
add a package called "nixpkgs" to your sources.json.
'';
# fetchTarball version that is compatible between all the versions of Nix
builtins_fetchTarball =
{ url, sha256 }@attrs:
let
inherit (builtins) lessThan nixVersion fetchTarball;
in
if lessThan nixVersion "1.12" then
fetchTarball { inherit url; }
else
fetchTarball attrs;
# fetchurl version that is compatible between all the versions of Nix
builtins_fetchurl =
{ url, sha256 }@attrs:
let
inherit (builtins) lessThan nixVersion fetchurl;
in
if lessThan nixVersion "1.12" then
fetchurl { inherit url; }
else
fetchurl attrs;
# A wrapper around pkgs.fetchzip that has inspectable arguments,
# annoyingly this means we have to specify them
fetchzip = { url, sha256 }@attrs: pkgs.fetchzip attrs;
# A wrapper around pkgs.fetchurl that has inspectable arguments,
# annoyingly this means we have to specify them
fetchurl = { url, sha256 }@attrs: pkgs.fetchurl attrs;
hasNixpkgsPath = (builtins.tryEval <nixpkgs>).success;
hasThisAsNixpkgsPath =
(builtins.tryEval <nixpkgs>).success && <nixpkgs> == ./.;
sources = builtins.fromJSON (builtins.readFile ./sources.json);
mapAttrs = builtins.mapAttrs or
(f: set: with builtins;
listToAttrs (map (attr: { name = attr; value = f attr set.${attr}; }) (attrNames set)));
# borrowed from nixpkgs
functionArgs = f: f.__functionArgs or (builtins.functionArgs f);
callFunctionWith = autoArgs: f: args:
let auto = builtins.intersectAttrs (functionArgs f) autoArgs;
in f (auto // args);
getFetcher = spec:
let fetcherName =
if builtins.hasAttr "type" spec
then builtins.getAttr "type" spec
else "builtin-tarball";
in builtins.getAttr fetcherName {
"tarball" = fetchzip;
"builtin-tarball" = builtins_fetchTarball;
"file" = fetchurl;
"builtin-url" = builtins_fetchurl;
};
};
# NOTE: spec must _not_ have an "outPath" attribute
mapAttrs (_: spec:
if builtins.hasAttr "outPath" spec
then abort
"The values in sources.json should not have an 'outPath' attribute"
else
if builtins.hasAttr "url" spec && builtins.hasAttr "sha256" spec
then
spec //
{ outPath = callFunctionWith spec (getFetcher spec) { }; }
else spec
) sources

View file

@ -1,88 +0,0 @@
args@
{ pkgs
, lib
, haskellPackages
, haskell
, runCommand
# Allow this expression file to be used more efficiently in situations where
# the sources are more readily available. Unpacking haskellPackages.arion-compose.src
# is not always the best choice for arion.eval.
, evalSrc ? null
}:
let
/* This derivation builds the arion tool.
It is based on the arion-compose Haskell package, but adapted and extended to
- have the correct name
- have a smaller closure size
- have functions to use Arion from inside Nix: arion.eval and arion.build
- make it self-contained by including docker-compose
*/
arion =
justStaticExecutables (
overrideCabal
arion-compose
cabalOverrides
);
inherit (haskell.lib) justStaticExecutables overrideCabal;
inherit (haskellPackages) arion-compose;
cabalOverrides = o: {
buildTools = (o.buildTools or []) ++ [pkgs.makeWrapper];
passthru = (o.passthru or {}) // {
inherit eval build;
};
# Patch away the arion-compose name. Unlike the Haskell library, the program
# is called arion (arion was already taken on hackage).
pname = "arion";
src = arion-compose.src;
# PYTHONPATH
#
# We close off the python module search path!
#
# Accepting directories from the environment into the search path
# tends to break things. Docker Compose does not have a plugin
# system as far as I can tell, so I don't expect this to break a
# feature, but rather to make the program more robustly self-
# contained.
postInstall = ''${o.postInstall or ""}
mkdir -p $out/libexec
mv $out/bin/arion $out/libexec
makeWrapper $out/libexec/arion $out/bin/arion \
--unset PYTHONPATH \
--prefix PATH : ${lib.makeBinPath [ pkgs.docker-compose ]} \
;
'';
};
# Unpacked sources for evaluation by `eval`
evalSrc' = args.evalSrc or (runCommand "arion-src" {}
"mkdir $out; tar -C $out --strip-components=1 -xf ${arion-compose.src}");
/* Function for evaluating a composition
Re-uses this Nixpkgs evaluation instead of `arion-pkgs.nix`.
Returns the module system's `config` and `options` variables.
*/
eval = args@{...}:
import (evalSrc' + "/src/nix/eval-composition.nix")
({ inherit pkgs; } // args);
/* Function to derivation of the docker compose yaml file
NOTE: The output will change: https://github.com/hercules-ci/arion/issues/82
This function is particularly useful on CI.
*/
build = args@{...}:
let composition = eval args;
in composition.config.out.dockerComposeYaml;
in arion

View file

@ -1,118 +0,0 @@
{ config, lib, options, pkgs, ... }:
let
inherit (lib)
attrValues
mkIf
mkOption
mkMerge
types
;
cfg = config.virtualisation.arion;
projectType = types.submoduleWith {
modules = [ projectModule ];
};
projectModule = { config, name, ... }: {
options = {
settings = mkOption {
description = ''
Arion project definition, otherwise known as arion-compose.nix contents.
See <link xlink:href="https://docs.hercules-ci.com/arion/options/">https://docs.hercules-ci.com/arion/options/</link>.
'';
type = arionSettingsType name;
visible = "shallow";
};
_systemd = mkOption { internal = true; };
serviceName = mkOption {
description = "The name of the Arion project's systemd service";
type = types.str;
default = "arion-${name}";
};
};
config = {
_systemd.services.${config.serviceName} = {
wantedBy = [ "multi-user.target" ];
after = [ "sockets.target" ];
path = [
cfg.package
cfg.docker.client.package
];
environment.ARION_PREBUILT = config.settings.out.dockerComposeYaml;
script = ''
echo 1>&2 "docker compose file: $ARION_PREBUILT"
arion --prebuilt-file "$ARION_PREBUILT" up
'';
};
};
};
arionSettingsType = name:
(cfg.package.eval { modules = [{ project.name = lib.mkDefault name; }]; }).type or (
throw "lib.evalModules did not produce a type. Please upgrade Nixpkgs to nixos-unstable or >=nixos-21.11"
);
in
{
disabledModules = [ "virtualisation/arion.nix" ];
options = {
virtualisation.arion = {
backend = mkOption {
type = types.enum [ "podman-socket" "docker" ];
description = ''
Which container implementation to use.
'';
};
package = mkOption {
type = types.package;
default = (import ./. { inherit pkgs; }).arion;
description = ''
Arion package to use. This will provide <literal>arion</literal>
executable that starts the project.
It also must provide the arion <literal>eval</literal> function as
an attribute.
'';
};
docker.client.package = mkOption {
type = types.package;
internal = true;
};
projects = mkOption {
type = types.attrsOf projectType;
default = { };
description = ''
Arion projects to be run as a service.
'';
};
};
};
config = mkIf (cfg.projects != { }) (
mkMerge [
{
systemd = mkMerge (map (p: p._systemd) (attrValues cfg.projects));
}
(mkIf (cfg.backend == "podman-socket") {
virtualisation.docker.enable = false;
virtualisation.podman.enable = true;
virtualisation.podman.dockerSocket.enable = true;
virtualisation.podman.defaultNetwork =
if options?virtualisation.podman.defaultNetwork.settings
then { settings.dns_enabled = true; } # since 2023-01 https://github.com/NixOS/nixpkgs/pull/199965
else { dnsname.enable = true; }; # compat <2023
virtualisation.arion.docker.client.package = pkgs.docker-client;
})
(mkIf (cfg.backend == "docker") {
virtualisation.docker.enable = true;
virtualisation.arion.docker.client.package = pkgs.docker;
})
]
);
}

View file

@ -3,4 +3,4 @@
# For manual testing of a hacked arion built via Nix.
# Works when called from outside the project directory.
exec nix run -f "$(dirname ${BASH_SOURCE[0]})" arion "$@"
exec nix run -f "$(dirname ${BASH_SOURCE[0]})" arion -c arion "$@"

View file

@ -1 +1 @@
(builtins.getFlake ("git+file://" + toString ./.)).devShells.${builtins.currentSystem}.default
args@{...}: (import ./nix args).arion-project.shell

View file

@ -10,7 +10,6 @@ import Arion.Aeson
import Arion.Images (loadImages)
import qualified Arion.DockerCompose as DockerCompose
import Arion.Services (getDefaultExec)
import Arion.ExtendedInfo (loadExtendedInfoFromPath, ExtendedInfo(images, projectName))
import Options.Applicative
import Control.Monad.Fail
@ -18,7 +17,7 @@ import Control.Monad.Fail
import qualified Data.Text as T
import qualified Data.Text.IO as T
import Data.Aeson(Value)
import Data.List.NonEmpty (NonEmpty(..))
import System.Posix.User (getRealUserID)
@ -27,10 +26,6 @@ data CommonOptions =
{ files :: NonEmpty FilePath
, pkgs :: Text
, nixArgs :: [Text]
, prebuiltComposeFile :: Maybe FilePath
, noAnsi :: Bool
, compatibility :: Bool
, logLevel :: Maybe Text
}
deriving (Show)
@ -60,18 +55,9 @@ parseOptions = do
<> help "Use Nix expression EXPR to get the Nixpkgs attrset used for bootstrapping \
\and evaluating the configuration." )
showTrace <- flag False True (long "show-trace"
<> help "Causes Nix to print out a stack trace in case of Nix expression evaluation errors. Specify before command.")
<> help "Causes Nix to print out a stack trace in case of Nix expression evaluation errors.")
-- TODO --option support (https://github.com/pcapriotti/optparse-applicative/issues/284)
userNixArgs <- many (T.pack <$> strOption (long "nix-arg" <> metavar "ARG" <> help "Pass an extra argument to nix. Example: --nix-arg --option --nix-arg substitute --nix-arg false"))
prebuiltComposeFile <- optional $ strOption
( long "prebuilt-file"
<> metavar "JSONFILE"
<> help "Do not evaluate and use the prebuilt JSONFILE instead. Causes other evaluation-related options to be ignored." )
noAnsi <- flag False True (long "no-ansi"
<> help "Avoid ANSI control sequences")
compatibility <- flag False True (long "no-ansi"
<> help "If set, Docker Compose will attempt to convert deploy keys in v3 files to their non-Swarm equivalent")
logLevel <- optional $ fmap T.pack $ strOption (long "log-level" <> metavar "LEVEL" <> help "Set log level (DEBUG, INFO, WARNING, ERROR, CRITICAL)")
pure $
let nixArgs = userNixArgs <|> "--show-trace" <$ guard showTrace
in CommonOptions{..}
@ -151,62 +137,22 @@ runDC cmd (DockerComposeArgs args) _opts = do
runBuildAndDC :: Text -> DockerComposeArgs -> CommonOptions -> IO ()
runBuildAndDC cmd dopts opts = do
withBuiltComposeFile opts $ callDC cmd dopts opts True
ea <- defaultEvaluationArgs opts
Arion.Nix.withBuiltComposition ea $ \path -> do
loadImages path
DockerCompose.run DockerCompose.Args
{ files = [path]
, otherArgs = [cmd] ++ unDockerComposeArgs dopts
}
runEvalAndDC :: Text -> DockerComposeArgs -> CommonOptions -> IO ()
runEvalAndDC cmd dopts opts = do
withComposeFile opts $ callDC cmd dopts opts False
callDC :: Text -> DockerComposeArgs -> CommonOptions -> Bool -> FilePath -> IO ()
callDC cmd dopts opts shouldLoadImages path = do
extendedInfo <- loadExtendedInfoFromPath path
when shouldLoadImages $ loadImages (images extendedInfo)
let firstOpts = projectArgs extendedInfo <> commonArgs opts
DockerCompose.run DockerCompose.Args
{ files = [path]
, otherArgs = firstOpts ++ [cmd] ++ unDockerComposeArgs dopts
}
projectArgs :: ExtendedInfo -> [Text]
projectArgs extendedInfo =
do
n <- toList (projectName extendedInfo)
["--project-name", n]
commonArgs :: CommonOptions -> [Text]
commonArgs opts = do
guard (noAnsi opts)
["--no-ansi"]
<> do
guard (compatibility opts)
["--compatibility"]
<> do
l <- toList (logLevel opts)
["--log-level", l]
withBuiltComposeFile :: CommonOptions -> (FilePath -> IO r) -> IO r
withBuiltComposeFile opts cont = case prebuiltComposeFile opts of
Just prebuilt -> do
cont prebuilt
Nothing -> do
args <- defaultEvaluationArgs opts
Arion.Nix.withBuiltComposition args cont
withComposeFile :: CommonOptions -> (FilePath -> IO r) -> IO r
withComposeFile opts cont = case prebuiltComposeFile opts of
Just prebuilt -> do
cont prebuilt
Nothing -> do
args <- defaultEvaluationArgs opts
Arion.Nix.withEvaluatedComposition args cont
getComposeValue :: CommonOptions -> IO Value
getComposeValue opts = case prebuiltComposeFile opts of
Just prebuilt -> do
decodeFile prebuilt
Nothing -> do
args <- defaultEvaluationArgs opts
Arion.Nix.evaluateComposition args
ea <- defaultEvaluationArgs opts
Arion.Nix.withEvaluatedComposition ea $ \path ->
DockerCompose.run DockerCompose.Args
{ files = [path]
, otherArgs = [cmd] ++ unDockerComposeArgs dopts
}
defaultEvaluationArgs :: CommonOptions -> IO EvaluationArgs
defaultEvaluationArgs co = do
@ -222,7 +168,7 @@ defaultEvaluationArgs co = do
runCat :: CommonOptions -> IO ()
runCat co = do
v <- getComposeValue co
v <- Arion.Nix.evaluateComposition =<< defaultEvaluationArgs co
T.hPutStrLn stdout (pretty v)
runRepl :: CommonOptions -> IO ()
@ -282,18 +228,13 @@ orEmpty' :: (Alternative f, Monoid a) => f a -> f a
orEmpty' m = fromMaybe mempty <$> optional m
runExec :: Bool -> Bool -> Maybe Text -> Bool -> Int -> [(Text, Text)] -> Maybe Text -> Text -> [Text] -> CommonOptions -> IO ()
runExec detach privileged user noTTY index envs workDir service commandAndArgs opts =
withComposeFile opts $ \path -> do
extendedInfo <- loadExtendedInfoFromPath path
runExec detach privileged user noTTY index envs workDir service commandAndArgs opts = do
putErrText $ "Service: " <> service
ea <- defaultEvaluationArgs opts
Arion.Nix.withEvaluatedComposition ea $ \path -> do
commandAndArgs'' <- case commandAndArgs of
[] -> do
cmd <- getDefaultExec path service
case cmd of
[] -> do
putErrText "You must provide a command via service.defaultExec or on the command line."
exitFailure
_ ->
pure cmd
[] -> getDefaultExec path service
x -> pure x
let commandAndArgs' = case commandAndArgs'' of
[] -> ["/bin/sh"]
@ -313,7 +254,7 @@ runExec detach privileged user noTTY index envs workDir service commandAndArgs o
]
DockerCompose.run DockerCompose.Args
{ files = [path]
, otherArgs = projectArgs extendedInfo <> commonArgs opts <> args
, otherArgs = args
}
main :: IO ()

View file

@ -1,37 +0,0 @@
{-# LANGUAGE DeriveAnyClass #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE OverloadedStrings #-}
{-
Parses the x-arion field in the generated compose file.
-}
module Arion.ExtendedInfo where
import Prelude()
import Protolude
import Data.Aeson as Aeson
import Arion.Aeson
import Control.Lens
import Data.Aeson.Lens
data Image = Image
{ image :: Maybe Text -- ^ image tar.gz file path
, imageExe :: Maybe Text -- ^ path to exe producing image tar
, imageName :: Text
, imageTag :: Text
} deriving (Eq, Show, Generic, Aeson.ToJSON, Aeson.FromJSON)
data ExtendedInfo = ExtendedInfo {
projectName :: Maybe Text,
images :: [Image]
} deriving (Eq, Show)
loadExtendedInfoFromPath :: FilePath -> IO ExtendedInfo
loadExtendedInfoFromPath fp = do
v <- decodeFile fp
pure ExtendedInfo {
-- TODO: use aeson derived instance?
projectName = v ^? key "x-arion" . key "project" . key "name" . _String,
images = (v :: Aeson.Value) ^.. key "x-arion" . key "images" . _Array . traverse . _JSON
}

View file

@ -1,72 +1,60 @@
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE DeriveAnyClass #-}
{-# LANGUAGE OverloadedStrings #-}
module Arion.Images
module Arion.Images
( loadImages
) where
import Prelude()
import Protolude hiding (to)
import qualified Data.Aeson as Aeson
import Arion.Aeson (decodeFile)
import qualified System.Process as Process
import qualified Data.Text as T
import Arion.ExtendedInfo (Image(..))
import Control.Lens
import Data.Aeson.Lens
import Data.String
import System.IO (withFile, IOMode(ReadMode))
data Image = Image
{ image :: Text -- ^ file path
, imageName :: Text
, imageTag :: Text
} deriving (Generic, Aeson.ToJSON, Aeson.FromJSON, Show)
type TaggedImage = Text
-- | Subject to change
loadImages :: [Image] -> IO ()
loadImages requestedImages = do
loadImages :: FilePath -> IO ()
loadImages fp = do
loaded <- getDockerImages
v <- decodeFile fp
loaded <- dockerImages
let
isNew i =
-- On docker, the image name is unmodified
(imageName i <> ":" <> imageTag i) `notElem` loaded
-- On podman, you used to automatically get a localhost prefix
-- however, since NixOS 22.05, this expected to be part of the name instead
&& ("localhost/" <> imageName i <> ":" <> imageTag i) `notElem` loaded
images :: [Image]
images = (v :: Aeson.Value) ^.. key "x-arion" . key "images" . _Array . traverse . _JSON
traverse_ loadImage . filter isNew $ requestedImages
isNew i = (imageName i <> ":" <> imageTag i) `notElem` loaded
loadImage :: Image -> IO ()
loadImage Image { image = Just imgPath, imageName = name } =
withFile (toS imgPath) ReadMode $ \fileHandle -> do
traverse_ loadImage . map (toS . image) . filter isNew $ images
loadImage :: FilePath -> IO ()
loadImage imgPath = withFile (imgPath) ReadMode $ \fileHandle -> do
let procSpec = (Process.proc "docker" [ "load" ]) {
Process.std_in = Process.UseHandle fileHandle
}
Process.withCreateProcess procSpec $ \_in _out _err procHandle -> do
e <- Process.waitForProcess procHandle
e <- Process.waitForProcess procHandle
case e of
ExitSuccess -> pass
ExitFailure code ->
panic $ "docker load failed with exit code " <> show code <> " for image " <> name <> " from path " <> imgPath
loadImage Image { imageExe = Just imgExe, imageName = name } = do
let loadSpec = (Process.proc "docker" [ "load" ]) { Process.std_in = Process.CreatePipe }
Process.withCreateProcess loadSpec $ \(Just inHandle) _out _err loadProcHandle -> do
let streamSpec = Process.proc (toS imgExe) []
Process.withCreateProcess streamSpec { Process.std_out = Process.UseHandle inHandle } $ \_ _ _ streamProcHandle ->
withAsync (Process.waitForProcess loadProcHandle) $ \loadExitAsync ->
withAsync (Process.waitForProcess streamProcHandle) $ \streamExitAsync -> do
r <- waitEither loadExitAsync streamExitAsync
case r of
Right (ExitFailure code) -> panic $ "image producer for image " <> name <> " failed with exit code " <> show code <> " from executable " <> imgExe
Right ExitSuccess -> pass
Left _ -> pass
loadExit <- wait loadExitAsync
case loadExit of
ExitFailure code -> panic $ "docker load failed with exit code " <> show code <> " for image " <> name <> " produced by executable " <> imgExe
_ -> pass
pass
loadImage Image { imageName = name } = do
panic $ "image " <> name <> " doesn't specify an image file or imageExe executable"
ExitFailure code -> panic $ "docker load (" <> show code <> ") failed for " <> toS imgPath
getDockerImages :: IO [TaggedImage]
getDockerImages = do
dockerImages :: IO [TaggedImage]
dockerImages = do
let procSpec = Process.proc "docker" [ "images", "--filter", "dangling=false", "--format", "{{.Repository}}:{{.Tag}}" ]
map toS . T.lines . toS <$> Process.readCreateProcess procSpec ""
(map toS . lines) <$> Process.readCreateProcess procSpec ""

View file

@ -22,6 +22,7 @@ import Paths_arion_compose
import qualified Data.Text.IO as T
import qualified Data.List.NonEmpty as NE
import Data.List.NonEmpty ( NonEmpty(..) )
import Control.Arrow ( (>>>) )
import System.IO.Temp ( withTempFile )

View file

@ -1,7 +1,6 @@
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE DeriveAnyClass #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE CPP #-}
module Arion.Services
( getDefaultExec
) where
@ -10,28 +9,15 @@ import Prelude()
import Protolude hiding (to)
import qualified Data.Aeson as Aeson
#if MIN_VERSION_lens_aeson(1,2,0)
import qualified Data.Aeson.Key as AK
#endif
import Arion.Aeson (decodeFile)
import Control.Lens
import Data.Aeson.Lens
#if MIN_VERSION_lens_aeson(1,2,0)
type Key = AK.Key
mkKey :: Text -> Key
mkKey = AK.fromText
#else
type Key = Text
mkKey :: Text -> Key
mkKey = identity
#endif
-- | Subject to change
getDefaultExec :: FilePath -> Text -> IO [Text]
getDefaultExec fp service = do
v <- decodeFile fp
pure ((v :: Aeson.Value) ^.. key "x-arion" . key "serviceInfo" . key (mkKey service) . key "defaultExec" . _Array . traverse . _String)
pure ((v :: Aeson.Value) ^.. key "x-arion" . key "serviceInfo" . key service . key "defaultExec" . _Array . traverse . _String)

View file

@ -13,34 +13,19 @@ import qualified Data.Text as T
import qualified Data.Text.IO as T
spec :: Spec
spec = describe "evaluateComposition" $ do
it "matches an example" $ do
x <- Arion.Nix.evaluateComposition EvaluationArgs
{ evalUid = 123
, evalModules = NEL.fromList
["src/haskell/testdata/Arion/NixSpec/arion-compose.nix"]
, evalPkgs = "import <nixpkgs> { system = \"x86_64-linux\"; }"
, evalWorkDir = Nothing
, evalMode = ReadOnly
, evalUserArgs = ["--show-trace"]
}
let actual = pretty x
expected <- T.readFile "src/haskell/testdata/Arion/NixSpec/arion-compose.json"
censorPaths actual `shouldBe` censorPaths expected
it "matches an build.context example" $ do
x <- Arion.Nix.evaluateComposition EvaluationArgs
{ evalUid = 1234
, evalModules = NEL.fromList
["src/haskell/testdata/Arion/NixSpec/arion-context-compose.nix"]
, evalPkgs = "import <nixpkgs> { system = \"x86_64-linux\"; }"
, evalWorkDir = Nothing
, evalMode = ReadOnly
, evalUserArgs = ["--show-trace"]
}
let actual = pretty x
expected <- T.readFile "src/haskell/testdata/Arion/NixSpec/arion-context-compose.json"
censorPaths actual `shouldBe` censorPaths expected
spec = describe "evaluateComposition" $ it "matches an example" $ do
x <- Arion.Nix.evaluateComposition EvaluationArgs
{ evalUid = 123
, evalModules = NEL.fromList
["src/haskell/testdata/Arion/NixSpec/arion-compose.nix"]
, evalPkgs = "import <nixpkgs> { system = \"x86_64-linux\"; }"
, evalWorkDir = Nothing
, evalMode = ReadOnly
, evalUserArgs = ["--show-trace"]
}
let actual = pretty x
expected <- T.readFile "src/haskell/testdata/Arion/NixSpec/arion-compose.json"
censorPaths actual `shouldBe` censorPaths expected
censorPaths :: Text -> Text
censorPaths = censorImages . censorStorePaths

View file

@ -9,4 +9,3 @@ import qualified Arion.NixSpec
spec :: Spec
spec = do
describe "Arion.Nix" Arion.NixSpec.spec

View file

@ -1,20 +1,15 @@
{
"networks": {
"default": {
"name": "unit-test-data"
}
},
"services": {
"webserver": {
"command": [
"/usr/sbin/init"
"/nix/store/b9w61w4g8sqgrm3rid6ca22krslqghb3-nixos-system-unnamed-19.03.173100.e726e8291b2/init"
],
"environment": {
"NIX_REMOTE": "",
"PATH": "/usr/bin:/run/current-system/sw/bin/",
"container": "docker"
},
"image": "localhost/webserver:<HASH>",
"image": "arion-base:<HASH>",
"ports": [
"8000:80"
],
@ -28,23 +23,20 @@
"tty": true,
"volumes": [
"/sys/fs/cgroup:/sys/fs/cgroup:ro",
"/nix/store:/nix/store:ro"
"/nix/store:/nix/store:ro",
"/nix/store/pssdmhzjnhflawv7rwk1yw39350iv40g-container-system-env:/run/system:ro"
]
}
},
"version": "3.4",
"volumes": {},
"x-arion": {
"images": [
{
"imageExe": "<STOREPATH>",
"imageName": "localhost/webserver",
"image": "<STOREPATH>",
"imageName": "arion-base",
"imageTag": "<HASH>"
}
],
"project": {
"name": "unit-test-data"
},
"serviceInfo": {
"webserver": {
"defaultExec": [

View file

@ -1,8 +1,7 @@
{
project.name = "unit-test-data";
services.webserver = { pkgs, ... }: {
nixos.useSystemd = true;
nixos.configuration.boot.tmp.useTmpfs = true;
nixos.configuration.boot.tmpOnTmpfs = true;
nixos.configuration.services.nginx.enable = true;
nixos.configuration.services.nginx.virtualHosts.localhost.root = "${pkgs.nix.doc}/share/doc/nix/manual";
service.useHostStore = true;

View file

@ -1,41 +0,0 @@
{
"networks": {
"default": {
"name": "unit-test-data"
}
},
"services": {
"webserver": {
"build": {
"context": "<STOREPATH>"
},
"environment": {},
"ports": [
"8080:80"
],
"sysctls": {},
"volumes": []
}
},
"version": "3.4",
"volumes": {},
"x-arion": {
"images": [
{
"imageExe": "<STOREPATH>",
"imageName": "localhost/webserver",
"imageTag": "<HASH>"
}
],
"project": {
"name": "unit-test-data"
},
"serviceInfo": {
"webserver": {
"defaultExec": [
"/bin/sh"
]
}
}
}
}

View file

@ -1,9 +0,0 @@
{
project.name = "unit-test-data";
services.webserver.service = {
build.context = "${./build-context}";
ports = [
"8080:80"
];
};
}

View file

@ -1,4 +0,0 @@
FROM nginx
RUN echo this is a dockerfile to be built

View file

@ -30,9 +30,6 @@
"imageTag": "xr4ljmz3qfcwlq9rl4mr4qdrzw93rl70"
}
],
"project": {
"name": null
},
"serviceInfo": {
"webserver": {
"defaultExec": [

View file

@ -12,6 +12,7 @@ let
inherit (pkgs) lib;
composition = lib.evalModules {
check = true;
modules = builtinModules ++ modules;
};
@ -23,7 +24,6 @@ let
_file = ./eval-composition.nix;
key = ./eval-composition.nix;
config._module.args.pkgs = lib.mkIf (pkgs != null) (lib.mkForce pkgs);
config._module.args.check = true;
config.host.nixStorePrefix = hostNixStorePrefix;
config.host.uid = lib.toInt uid;
};
@ -33,5 +33,5 @@ in
composition // {
# throw in lib and pkgs for repl convenience
inherit lib;
inherit (composition._module.args) pkgs;
inherit (composition.config._module.args) pkgs;
}

View file

@ -1,21 +0,0 @@
{ lib }:
let
link = url: text: ''[${text}](${url})'';
composeSpecRev = "55b450aee50799a2f33cc99e1d714518babe305e";
serviceRef = fragment:
''See ${link "https://github.com/compose-spec/compose-spec/blob/${composeSpecRev}/05-services.md#${fragment}" "Compose Spec Services #${fragment}"}'';
networkRef = fragment:
''See ${link "https://github.com/compose-spec/compose-spec/blob/${composeSpecRev}/06-networks.md#${fragment}" "Compose Spec Networks #${fragment}"}'';
in
{
inherit
link
networkRef
serviceRef
;
}

View file

@ -2,7 +2,6 @@
./modules/composition/docker-compose.nix
./modules/composition/host-environment.nix
./modules/composition/images.nix
./modules/composition/networks.nix
./modules/composition/service-info.nix
./modules/composition/composition.nix
./modules/composition/arion-base-image.nix
]

View file

@ -0,0 +1,41 @@
# This module is subject to change.
# In particular, arion-base should use a generic non-service image building system
{ config, lib, pkgs, ... }:
let
tag = lib.head (lib.strings.splitString "-" (baseNameOf builtImage.outPath));
name = "arion-base";
builtImage = pkgs.dockerTools.buildLayeredImage {
inherit name;
contents = pkgs.runCommand "minimal-contents" {} ''
mkdir -p $out/bin $out/usr/bin
ln -s /run/system/bin/sh $out/bin/sh
ln -s /run/system/usr/bin/env $out/usr/bin/env
'';
config = {};
};
in
{
options = {
arionBaseImage = lib.mkOption {
type = lib.types.str;
description = "Image to use when using useHostStore. Don't use this option yourself. It's going away.";
internal = true;
};
};
config = {
arionBaseImage = "${name}:${tag}";
build.imagesToLoad = lib.mkIf (lib.any (s: s.service.useHostStore) (lib.attrValues config.services)) [
{ image = builtImage; imageName = name; imageTag = tag; }
];
};
}

View file

@ -1,28 +0,0 @@
{ config, lib, ... }:
let
inherit (lib) types mkOption;
link = url: text:
''[${text}](${url})'';
in
{
options = {
_module.args = mkOption {
internal = true;
};
project.name = mkOption {
description = ''
Name of the project.
See ${link "https://docs.docker.com/compose/reference/envvars/#compose_project_name" "COMPOSE_PROJECT_NAME"}
This is not optional, because getting the project name from a directory name tends to produce different results for different repo checkout location names.
'';
type = types.str;
};
};
config = {
docker-compose.extended.project.name = config.project.name;
};
}

View file

@ -63,11 +63,6 @@ in
type = lib.types.attrsOf (lib.types.submodule service);
description = "An attribute set of service configurations. A service specifies how to run an image as a container.";
};
docker-compose.volumes = lib.mkOption {
type = lib.types.attrsOf lib.types.unspecified;
description = "A attribute set of volume configurations.";
default = {};
};
};
config = {
out.dockerComposeYaml = pkgs.writeText "docker-compose.yaml" config.out.dockerComposeYamlText;
@ -78,7 +73,6 @@ in
version = "3.4";
services = lib.mapAttrs (k: c: c.out.service) config.services;
x-arion = config.docker-compose.extended;
volumes = config.docker-compose.volumes;
};
};
}

View file

@ -23,9 +23,9 @@
stored at an alternate location without altering the format of
store paths.
For example: instead of mounting the host's `/nix/store` as the
container's `/nix/store`, this will mount `/mnt/foo/nix/store`
as the container's `/nix/store`.
For example: instead of mounting the host's /nix/store as the
container's /nix/store, this will mount /mnt/foo/nix/store
as the container's /nix/store.
'';
};

View file

@ -16,27 +16,20 @@ let
(let
inherit (service) build;
in {
image = build.image.outPath;
imageName = build.imageName or service.image.name;
imageTag =
if build.image.imageTag != ""
then build.image.imageTag
else lib.head (lib.strings.splitString "-" (baseNameOf build.image.outPath));
} // (if build.image.isExe or false
then {
imageExe = build.image.outPath;
}
else {
image = build.image.outPath;
}
)
);
});
in
{
options = {
build.imagesToLoad = lib.mkOption {
type = listOf unspecified;
internal = true;
description = "List of `dockerTools` image derivations.";
description = "List of dockerTools image derivations.";
};
};
config = {

View file

@ -1,53 +0,0 @@
{ config, lib, ... }:
let
inherit (lib)
mkOption
optionalAttrs
types
;
inherit (import ../../lib.nix { inherit lib; })
link
;
in
{
options = {
networks = mkOption {
type = types.lazyAttrsOf (types.submoduleWith {
modules = [
../networks/network.nix
];
});
description = ''
See ${link "https://docs.docker.com/compose/compose-file/06-networks/" "Docker Compose Networks"}
'';
};
enableDefaultNetwork = mkOption {
type = types.bool;
description = ''
Whether to define the default network:
```nix
networks.default = {
name = config.project.name;
};
```
'';
default = true;
};
};
config = {
networks = optionalAttrs config.enableDefaultNetwork {
default = {
name = config.project.name;
};
};
docker-compose.raw.networks =
lib.mapAttrs (k: v: v.out) config.networks;
};
}

View file

@ -3,15 +3,14 @@
# based on nixpkgs/nixos/modules/system/activation/top-level.nix
let
inherit (lib)
concatStringsSep
filter
mkOption
showWarnings
types
;
inherit (lib) filter concatStringsSep types mkOption;
# lib.showWarnings since 19.09
showWarnings = warnings: res: lib.fold (w: x: lib.warn w x) res warnings;
warn = msg: builtins.trace "warning: ${msg}";
# Handle assertions and warnings
failedAssertions = map (x: x.message) (filter (x: !x.assertion) config.assertions);
assertWarn = if failedAssertions != []

View file

@ -1,131 +0,0 @@
{ config, lib, options, ... }:
let
inherit (lib)
mkOption
optionalAttrs
types
;
inherit (import ../../lib.nix { inherit lib; })
networkRef
;
in
{
options = {
driver = mkOption {
description = ''
`"none"`, `"host"`, or a platform-specific value.
${networkRef "driver"}
'';
type = types.str;
};
driver_opts = mkOption {
description = ''
${networkRef "driver_opts"}
'';
type = types.lazyAttrsOf types.raw or types.unspecified;
};
attachable = mkOption {
description = ''
${networkRef "attachable"}
'';
type = types.bool;
example = true;
};
enable_ipv6 = mkOption {
description = ''
Whether we've entered the 21st century yet.
${networkRef "enable_ipv6"}
'';
type = types.bool;
};
ipam = mkOption {
# TODO model sub-options
description = ''
Manage IP addresses.
${networkRef "ipam"}
'';
type = types.raw or types.unspecified;
};
internal = mkOption {
description = ''
Achieves "external isolation".
${networkRef "internal"}
'';
defaultText = false;
type = types.bool;
};
labels = mkOption {
description = ''
Metadata.
${networkRef "labels"}
'';
# no list support, because less expressive wrt overriding
type = types.attrsOf types.str;
};
external = mkOption {
description = ''
When `true`, don't create or destroy the network, but assume that it
exists.
${networkRef "external"}
'';
type = types.bool;
};
name = mkOption {
description = ''
Set a custom name for the network.
It shares a namespace with other projects' networks. `name` is used as-is.
Note the `default` network's default `name` is set to `project.name` by Arion.
${networkRef "name"}
'';
type = types.str;
};
out = mkOption {
internal = true;
description = ''
This network's contribution to the docker compose yaml file
under the `networks.''${name}` key.
'';
type = lib.types.attrsOf lib.types.raw or lib.types.unspecified;
};
};
config = {
out =
lib.mapAttrs
(k: opt: opt.value)
(lib.filterAttrs
(k: opt: opt.isDefined)
{
inherit (options)
driver
driver_opts
attachable
enable_ipv6
ipam
internal
labels
external
name
;
}
);
};
}

View file

@ -5,9 +5,7 @@
./host-store.nix
./context.nix
./image.nix
./image-recommended.nix
./nixos.nix
./nixos-init.nix
../lib/assert.nix
./check-sys_admin.nix
]

View file

@ -1,30 +0,0 @@
{ config, lib, name, ... }:
let
inherit (lib)
concatStringsSep
optional
;
dynamicUserServices = lib.attrNames (
lib.filterAttrs
(k: v:
v.enable &&
v.serviceConfig.DynamicUser or false)
config.nixos.evaluatedConfig.systemd.services
);
in
{
config = {
warnings =
optional (config.nixos.useSystemd && !(config.service.capabilities.SYS_ADMIN or false) && dynamicUserServices != []) (
''In service ${name}, the following units require `SYS_ADMIN` capability
because of DynamicUser.
${concatStringsSep "\n" (map (srv: " - services.${name}.nixos.configuration.systemd.services.${srv}") dynamicUserServices)}
You can avoid DynamicUser or use
services.${name}.service.capabilities.SYS_ADMIN = true;
''
);
};
}

View file

@ -9,7 +9,7 @@ in
default = ["/bin/sh"];
description = ''
Container program and arguments to invoke when calling
`arion exec <service.name>` without further arguments.
<code>arion exec &lt;service.name></code> without further arguments.
'';
};
};

View file

@ -4,16 +4,21 @@
the user-facing options service.image, service.volumes, etc.
*/
{ pkgs, lib, config, options, ... }:
{ pkgs, lib, config, ... }:
let
inherit (lib) mkOption types;
inherit (types) listOf nullOr attrsOf str either int bool submodule enum;
inherit (types) listOf nullOr attrsOf str either int bool;
inherit (import ../../lib.nix { inherit lib; })
link
serviceRef
;
link = url: text:
''<link xlink:href="${url}">${text}</link>'';
dockerComposeRef = fragment:
''See <link xlink:href="https://docs.docker.com/compose/compose-file/#${fragment}">Docker Compose#${fragment}</link>'';
dockerComposeKitchenSink = ''
Analogous to the <code>docker run</code> counterpart.
${dockerComposeRef "domainname-hostname-ipc-mac_address-privileged-read_only-shm_size-stdin_open-tty-user-working_dir"}
'';
cap_add = lib.attrNames (lib.filterAttrs (name: value: value == true) config.service.capabilities);
cap_drop = lib.attrNames (lib.filterAttrs (name: value: value == false) config.service.capabilities);
@ -28,7 +33,7 @@ in
out.service = mkOption {
type = attrsOf types.unspecified;
description = ''
Raw input for the service in `docker-compose.yaml`.
Raw input for the service in <code>docker-compose.yaml</code>.
You should not need to use this option. If anything is
missing, please contribute the missing option.
@ -42,7 +47,7 @@ in
service.name = mkOption {
type = str;
description = ''
The name of the service - `<name>` in the composition-level `services.<name>`
The name of the service - <code>&lt;name></code> in the composition-level <code>services.&lt;name></code>
'';
readOnly = true;
};
@ -50,12 +55,12 @@ in
service.volumes = mkOption {
type = listOf types.unspecified;
default = [];
description = serviceRef "volumes";
description = dockerComposeRef "volumes";
};
service.tmpfs = mkOption {
type = listOf types.str;
default = [];
description = serviceRef "tmpfs";
description = dockerComposeRef "tmpfs";
};
service.build.context = mkOption {
type = nullOr str;
@ -63,193 +68,92 @@ in
description = ''
Locates a Dockerfile to use for creating an image to use in this service.
https://docs.docker.com/compose/compose-file/build/#context
'';
};
service.build.dockerfile = mkOption {
type = nullOr str;
default = null;
description = ''
Sets an alternate Dockerfile. A relative path is resolved from the build context.
https://docs.docker.com/compose/compose-file/build/#dockerfile
'';
};
service.build.target = mkOption {
type = nullOr str;
default = null;
description = ''
Defines the stage to build as defined inside a multi-stage Dockerfile.
https://docs.docker.com/compose/compose-file/build/#target
${dockerComposeRef "context"}
'';
};
service.hostname = mkOption {
type = nullOr str;
default = null;
description = ''
${serviceRef "hostname"}
'';
description = dockerComposeKitchenSink;
};
service.tty = mkOption {
type = nullOr bool;
default = null;
description = ''
${serviceRef "tty"}
'';
description = dockerComposeKitchenSink;
};
service.environment = mkOption {
type = attrsOf (either str int);
default = {};
description = serviceRef "environment";
description = dockerComposeRef "environment";
};
service.image = mkOption {
type = nullOr str;
default = null;
description = serviceRef "image";
type = str;
description = dockerComposeRef "image";
};
service.command = mkOption {
type = nullOr types.unspecified;
default = null;
description = serviceRef "command";
description = dockerComposeRef "command";
};
service.container_name = mkOption {
type = nullOr types.str;
default = null;
description = serviceRef "container_name";
description = dockerComposeRef "container_name";
};
service.depends_on =
let conditionsModule = {
options = {
condition = mkOption {
type = enum ["service_started" "service_healthy" "service_completed_successfully"];
description = serviceRef "depends_on";
default = "service_started";
};
};
};
in mkOption {
type = either (listOf str) (attrsOf (submodule conditionsModule));
default = [];
description = serviceRef "depends_on";
};
service.healthcheck = mkOption {
description = serviceRef "healthcheck";
type = submodule ({ config, options, ...}: {
options = {
_out = mkOption {
internal = true;
default = lib.optionalAttrs (options.test.highestPrio < 1500) {
inherit (config) test interval timeout start_period retries;
};
};
test = mkOption {
type = nullOr (listOf str);
default = null;
example = [ "CMD" "pg_isready" ];
description = serviceRef "healthcheck";
};
interval = mkOption {
type = str;
default = "30s";
example = "1m";
description = serviceRef "healthcheck";
};
timeout = mkOption {
type = str;
default = "30s";
example = "10s";
description = serviceRef "healthcheck";
};
start_period = mkOption {
type = str;
default = "0s";
example = "30s";
description = serviceRef "healthcheck";
};
retries = mkOption {
type = int;
default = 3;
description = serviceRef "healthcheck";
};
};
});
service.depends_on = mkOption {
type = listOf str;
default = [];
description = dockerComposeRef "depends_on";
};
service.devices = mkOption {
type = listOf str;
default = [];
description = ''
See ${link "https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities"
"`docker run --device` documentation"}
"<code>docker run --device</code> documentation"}
${serviceRef "devices"}
${dockerComposeRef "devices"}
'';
};
service.dns = mkOption {
type = listOf str;
default = [];
example = [ "8.8.8.8" "8.8.4.4" ];
description = serviceRef "dns";
};
service.labels = mkOption {
type = attrsOf str;
default = {};
example = {
"com.example.foo" = "bar";
"traefik.enable" = "true";
"traefik.http.routers.my-service.rule" = "Host(`my-service.localhost`)";
"traefik.http.routers.my-service.entrypoints" = "web";
};
description = serviceRef "labels";
};
service.links = mkOption {
type = listOf str;
default = [];
description = serviceRef "links";
description = dockerComposeRef "links";
};
service.external_links = mkOption {
type = listOf str;
default = [];
description = serviceRef "external_links";
};
service.profiles = mkOption {
type = listOf str;
default = [];
description = serviceRef "profiles";
description = dockerComposeRef "external_links";
};
service.extra_hosts = mkOption {
type = listOf str;
default = [];
description = serviceRef "extra_hosts";
description = dockerComposeRef "extra_hosts";
};
service.working_dir = mkOption {
type = nullOr str;
default = null;
description = ''
${serviceRef "working_dir"}
'';
description = dockerComposeKitchenSink;
};
service.privileged = mkOption {
type = nullOr bool;
default = null;
description = ''
${serviceRef "privileged"}
'';
description = dockerComposeKitchenSink;
};
service.entrypoint = mkOption {
type = nullOr str;
default = null;
description = serviceRef "entrypoint";
description = dockerComposeRef "entrypoint";
};
service.restart = mkOption {
type = nullOr str;
default = null;
description = serviceRef "restart";
description = dockerComposeRef "restart";
};
service.user = mkOption {
type = nullOr str;
default = null;
description = ''
${serviceRef "user"}
'';
description = dockerComposeKitchenSink;
};
service.ports = mkOption {
type = listOf types.unspecified;
@ -257,76 +161,38 @@ in
description = ''
Expose ports on host. "host:container" or structured.
${serviceRef "ports"}
${dockerComposeRef "ports"}
'';
};
service.expose = mkOption {
type = listOf str;
default = [];
description = serviceRef "expose";
description = dockerComposeRef "expose";
};
service.env_file = mkOption {
type = listOf str;
default = [];
description = serviceRef "env_file";
description = dockerComposeRef "env_file";
};
service.network_mode = mkOption {
type = nullOr str;
default = null;
description = serviceRef "network_mode";
description = dockerComposeRef "network_mode";
};
service.networks = mkOption {
type = nullOr (listOf types.str);
default = null;
description = dockerComposeRef "networks";
};
service.networks =
let
networksModule = submodule ({ config, options, ...}: {
options = {
_out = mkOption {
internal = true;
readOnly = true;
default = lib.mapAttrs (k: opt: opt.value) (lib.filterAttrs (_: opt: opt.isDefined) { inherit (options) aliases ipv4_address ipv6_address link_local_ips priority; });
};
aliases = mkOption {
type = listOf str;
description = serviceRef "aliases";
default = [ ];
};
ipv4_address = mkOption {
type = str;
description = serviceRef "ipv4_address-ipv6_address";
};
ipv6_address = mkOption {
type = str;
description = serviceRef "ipv4_address-ipv6_address";
};
link_local_ips = mkOption {
type = listOf str;
description = serviceRef "link_local_ips";
};
priority = mkOption {
type = int;
description = serviceRef "priority";
};
};
});
in
mkOption {
type = either (listOf str) (attrsOf networksModule);
default = [];
description = serviceRef "networks";
};
service.stop_signal = mkOption {
type = nullOr str;
default = null;
description = serviceRef "stop_signal";
};
service.stop_grace_period = mkOption {
type = nullOr str;
default = null;
description = serviceRef "stop_grace_period";
description = dockerComposeRef "stop_signal";
};
service.sysctls = mkOption {
type = attrsOf (either str int);
default = {};
description = serviceRef "sysctls";
description = dockerComposeRef "sysctls";
};
service.capabilities = mkOption {
type = attrsOf (nullOr bool);
@ -335,17 +201,15 @@ in
description = ''
Enable/disable linux capabilities, or pick Docker's default.
Setting a capability to `true` means that it will be
"added". Setting it to `false` means that it will be "dropped".
Setting a capability to <code>true</code> means that it will be
"added". Setting it to <code>false</code> means that it will be "dropped".
${dockerComposeRef "cap_add-cap_drop"}
Omitted and `null` capabilities will therefore be set
Omitted and <code>null</code> capabilities will therefore be set
according to Docker's ${
link "https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities"
"default list of capabilities."
}
${serviceRef "cap_add"}
${serviceRef "cap_drop"}
'';
};
};
@ -355,11 +219,10 @@ in
volumes
environment
sysctls
image
;
} // lib.optionalAttrs (config.service.image != null) {
inherit (config.service) image;
} // lib.optionalAttrs (config.service.build.context != null ) {
build = lib.filterAttrs (n: v: v != null) config.service.build;
} // lib.optionalAttrs (config.service.build.context != null) {
inherit (config.service) build;
} // lib.optionalAttrs (cap_add != []) {
inherit cap_add;
} // lib.optionalAttrs (cap_drop != []) {
@ -370,8 +233,6 @@ in
inherit (config.service) container_name;
} // lib.optionalAttrs (config.service.depends_on != []) {
inherit (config.service) depends_on;
} // lib.optionalAttrs (options.service.healthcheck.highestPrio < 1500) {
healthcheck = config.service.healthcheck._out;
} // lib.optionalAttrs (config.service.devices != []) {
inherit (config.service) devices;
} // lib.optionalAttrs (config.service.entrypoint != null) {
@ -386,10 +247,6 @@ in
inherit (config.service) extra_hosts;
} // lib.optionalAttrs (config.service.hostname != null) {
inherit (config.service) hostname;
} // lib.optionalAttrs (config.service.dns != []) {
inherit (config.service) dns;
} // lib.optionalAttrs (config.service.labels != {}) {
inherit (config.service) labels;
} // lib.optionalAttrs (config.service.links != []) {
inherit (config.service) links;
} // lib.optionalAttrs (config.service.ports != []) {
@ -398,16 +255,12 @@ in
inherit (config.service) privileged;
} // lib.optionalAttrs (config.service.network_mode != null) {
inherit (config.service) network_mode;
} // lib.optionalAttrs (config.service.networks != [] && config.service.networks != {}) {
networks =
if (builtins.isAttrs config.service.networks) then builtins.mapAttrs (_: v: v._out) config.service.networks
else config.service.networks;
} // lib.optionalAttrs (config.service.networks != null) {
inherit (config.service) networks;
} // lib.optionalAttrs (config.service.restart != null) {
inherit (config.service) restart;
} // lib.optionalAttrs (config.service.stop_signal != null) {
inherit (config.service) stop_signal;
} // lib.optionalAttrs (config.service.stop_grace_period != null) {
inherit (config.service) stop_grace_period;
} // lib.optionalAttrs (config.service.tmpfs != []) {
inherit (config.service) tmpfs;
} // lib.optionalAttrs (config.service.tty != null) {
@ -416,7 +269,5 @@ in
inherit (config.service) working_dir;
} // lib.optionalAttrs (config.service.user != null) {
inherit (config.service) user;
} // lib.optionalAttrs (config.service.profiles != []) {
inherit (config.service) profiles;
};
}

View file

@ -12,10 +12,10 @@ in
type = attrsOf unspecified;
description = ''
Information about a service to include in the Docker Compose file,
but that will not be used by the `docker-compose` command
but that will not be used by the <code>docker-compose</code> command
itself.
It will be inserted in `x-arion.serviceInfo.<service.name>`.
It will be inserted in <code>x-arion.serviceInfo.&lt;service.name></code>.
'';
default = {};
};

View file

@ -20,7 +20,7 @@ in
service.hostStoreAsReadOnly = mkOption {
type = types.bool;
default = true;
description = "Adds a `:ro` (read-only) access mode to the host nix store bind mount.";
description = "Adds a ':ro' (read-only) access mode to the host nix store bind mount.";
};
service.useHostNixDaemon = mkOption {
type = types.bool;
@ -29,10 +29,12 @@ in
};
};
config = mkIf config.service.useHostStore {
image.includeStorePaths = false;
image.nixBuild = false; # no need to build and load
service.image = config.composition.arionBaseImage;
service.environment.NIX_REMOTE = lib.optionalString config.service.useHostNixDaemon "daemon";
service.volumes = [
"${config.host.nixStorePrefix}/nix/store:/nix/store${lib.optionalString config.service.hostStoreAsReadOnly ":ro"}"
"${config.host.nixStorePrefix}${pkgs.buildEnv { name = "container-system-env"; paths = [ pkgs.bashInteractive pkgs.coreutils ]; }}:/run/system${lib.optionalString config.service.hostStoreAsReadOnly ":ro"}"
] ++ lib.optional config.service.useHostNixDaemon "/nix/var/nix/daemon-socket:/nix/var/nix/daemon-socket";
service.command = lib.mkDefault (map escape (config.image.rawConfig.Cmd or []));
};

View file

@ -1,36 +0,0 @@
{ config, lib, pkgs, ... }:
let
inherit (lib)
mkIf
mkOption
types
;
inherit (types)
bool
;
recommendedContents = { runCommand, bash, coreutils }:
runCommand "recommended-contents" {} ''
mkdir -p $out/bin $out/usr/bin $out/var/empty
ln -s ${bash}/bin/sh $out/bin/sh
ln -s ${coreutils}/bin/env $out/usr/bin/env
'';
in
{
options = {
image.enableRecommendedContents = mkOption {
type = bool;
default = false;
description = ''
Add the `/bin/sh` and `/usr/bin/env` symlinks and some lightweight
files.
'';
};
};
config = {
image.contents = mkIf config.image.enableRecommendedContents [
(pkgs.callPackage recommendedContents {})
];
};
}

View file

@ -1,15 +1,6 @@
{ pkgs, lib, config, options, ... }:
let
inherit (lib)
functionArgs
mkOption
optionalAttrs
types
warn
;
inherit (pkgs)
dockerTools
;
inherit (lib) types mkOption;
inherit (types) attrsOf listOf nullOr package str unspecified bool;
# TODO: dummy-config is a useless layer. Nix 2.3 will let us inspect
@ -18,38 +9,10 @@ let
(pkgs.writeText "dummy-config.json" (builtins.toJSON config.image.rawConfig))
];
includeStorePathsWarningAndDefault = lib.warn ''
You're using a version of Nixpkgs that doesn't support the includeStorePaths
parameter in dockerTools.streamLayeredImage. Without this, Arion's
useHostStore does not achieve the intended speedup.
'' {};
buildOrStreamLayeredImage = args:
let
args_base = builtins.intersectAttrs
{
name = null; tag = null; contents = null; config = null;
created = null; extraCommands = null; maxLayers = null;
fakeRootCommands = null;
}
args;
acceptedArgs = functionArgs dockerTools.streamLayeredImage;
args_no_store = lib.optionalAttrs (!(args.includeStorePaths or true)) (
if acceptedArgs ? includeStorePaths
then { inherit (args) includeStorePaths; }
else includeStorePathsWarningAndDefault
);
args_streamLayered = args_base // args_no_store;
in
if dockerTools?streamLayeredImage
then dockerTools.streamLayeredImage args_streamLayered // { isExe = true; }
else dockerTools.buildLayeredImage args_base;
builtImage = buildOrStreamLayeredImage {
builtImage = pkgs.dockerTools.buildLayeredImage {
inherit (config.image)
name
contents
includeStorePaths
;
config = config.image.rawConfig;
maxLayers = 100;
@ -68,8 +31,6 @@ let
ln -s $i nix/var/nix/gcroots/docker/$(basename $i)
done;
'';
fakeRootCommands = config.image.fakeRootCommands;
};
priorityIsDefault = option: option.highestPrio >= (lib.mkDefault true).priority;
@ -79,41 +40,41 @@ in
build.image = mkOption {
type = nullOr package;
description = ''
Docker image derivation to be `docker load`-ed.
Docker image derivation to be <code>docker load</code>ed.
'';
internal = true;
};
build.imageName = mkOption {
type = str;
description = "Derived from `build.image`";
description = "Derived from build.image";
internal = true;
};
build.imageTag = mkOption {
type = str;
description = "Derived from `build.image`";
description = "Derived from build.image";
internal = true;
};
image.nixBuild = mkOption {
type = bool;
description = ''
Whether to build this image with Nixpkgs'
`dockerTools.buildLayeredImage`
and then load it with `docker load`.
<code>dockerTools.buildLayeredImage</code>
and then load it with <code>docker load</code>.
By default, an image will be built with Nix unless `service.image`
is set. See also `image.name`, which defaults to
By default, an image will be built with Nix unless <option>service.image</option>
is set. See also <option>image.name</option>, which defaults to
the service name.
'';
};
image.name = mkOption {
type = str;
default = "localhost/" + config.service.name;
defaultText = lib.literalExpression or lib.literalExample ''"localhost/" + config.service.name'';
default = config.service.name;
defaultText = lib.literalExample "config.service.name";
description = ''
A human readable name for the docker image.
Shows up in the `docker ps` output in the
`IMAGE` column, among other places.
Shows up in the <code>docker ps</code> output in the
<code>IMAGE</code> column, among other places.
'';
};
image.contents = mkOption {
@ -123,24 +84,6 @@ in
Top level paths in the container.
'';
};
image.fakeRootCommands = mkOption {
type = types.lines;
default = "";
description = ''
Commands that build the root of the container in the current working directory.
See [`dockerTools.buildLayeredImage`](https://nixos.org/manual/nixpkgs/stable/#ssec-pkgs-dockerTools-buildLayeredImage).
'';
};
image.includeStorePaths = mkOption {
type = bool;
default = true;
internal = true;
description = ''
Include all referenced store paths. You generally want this in your
image, unless you load store paths via some other means, like `useHostStore = true`;
'';
};
image.rawConfig = mkOption {
type = attrsOf unspecified;
default = {};
@ -149,11 +92,11 @@ in
been modeled in the Arion module system.
This attribute set does not have an appropriate merge function.
Please use the specific `image` options instead.
Please use the specific <code>image</code> options instead.
Run-time configuration of the container. A full list of the
options is available in the [Docker Image Specification
v1.2.0](https://github.com/moby/moby/blob/master/image/spec/v1.2.md#image-json-field-descriptions).
options are available at in the <link xlink:href="https://github.com/moby/moby/blob/master/image/spec/v1.2.md#image-json-field-descriptions">Docker Image Specification
v1.2.0</link>.
'';
};
image.command = mkOption {
@ -163,19 +106,17 @@ in
'';
};
};
config = lib.mkMerge [{
build.image = builtImage;
build.imageName = config.build.image.imageName;
build.imageTag =
if config.build.image.imageTag != ""
then config.build.image.imageTag
else lib.head (lib.strings.splitString "-" (baseNameOf config.build.image.outPath));
image.rawConfig.Cmd = config.image.command;
image.nixBuild = lib.mkDefault (priorityIsDefault options.service.image);
}
( lib.mkIf (config.service.build.context == null)
{
service.image = lib.mkDefault "${config.build.imageName}:${config.build.imageTag}";
})
];
config = {
build.image = builtImage;
build.imageName = config.build.image.imageName;
build.imageTag =
if config.build.image.imageTag != ""
then config.build.image.imageTag
else lib.head (lib.strings.splitString "-" (baseNameOf config.build.image.outPath));
service.image = lib.mkDefault "${config.build.imageName}:${config.build.imageTag}";
image.rawConfig.Cmd = config.image.command;
image.nixBuild = lib.mkDefault (priorityIsDefault options.service.image);
};
}

View file

@ -13,7 +13,7 @@ in
description = ''
When enabled, call the NixOS systemd-based init system.
Configure NixOS with the `nixos.configuration` option.
Configure NixOS with <code>nixos.configuration</code>.
'';
};
};
@ -24,13 +24,7 @@ in
../nixos/default-shell.nix
(pkgs.path + "/nixos/modules/profiles/minimal.nix")
];
image.command = [ "/usr/sbin/init" ];
image.contents = [
(pkgs.runCommand "root-init" {} ''
mkdir -p $out/usr/sbin
ln -s ${config.nixos.build.toplevel}/init $out/usr/sbin/init
'')
];
image.command = [ "${config.nixos.build.toplevel}/init" ];
service.environment.container = "docker";
service.environment.PATH = "/usr/bin:/run/current-system/sw/bin/";
service.volumes = [
@ -39,7 +33,7 @@ in
service.tmpfs = [
"/run" # noexec is fine because exes should be symlinked from elsewhere anyway
"/run/wrappers" # noexec breaks this intentionally
] ++ lib.optional (config.nixos.evaluatedConfig.boot.tmp.useTmpfs) "/tmp:exec,mode=777";
] ++ lib.optional (config.nixos.evaluatedConfig.boot.tmpOnTmpfs) "/tmp:exec,mode=777";
service.stop_signal = "SIGRTMIN+3";
service.tty = true;

View file

@ -18,7 +18,7 @@ in
This option is unused by default, because not all images use NixOS.
One way to use this is to enable `nixos.useSystemd`, but the
One way to use this is to enable <code>nixos.useSystemd</code>, but the
NixOS configuration can be used in other ways.
'';
};
@ -27,11 +27,11 @@ in
type = attrs;
readOnly = true;
description = ''
NixOS build products from `config.system.build`, such as `toplevel` and `etc`.
NixOS build products from <code>config.system.build</code>, such as <code>toplevel</code> and <code>etc</code>.
This option is unused by default, because not all images use NixOS.
One way to use this is to enable `nixos.useSystemd`, but the
One way to use this is to enable <code>nixos.useSystemd</code>, but the
NixOS configuration can be used in other ways.
'';
};
@ -44,7 +44,7 @@ in
This option is unused by default, because not all images use NixOS.
One way to use this is to enable `nixos.useSystemd`, but the
One way to use this is to enable <code>nixos.useSystemd</code>, but the
NixOS configuration can be used in other ways.
'';
};

View file

@ -1,4 +1,4 @@
{ usePodman ? false, pkgs, lib ? pkgs.lib, ... }:
{ pkgs, ... }:
let
# To make some prebuilt derivations available in the vm
@ -6,168 +6,61 @@ let
inherit modules;
inherit pkgs;
};
inherit (lib)
concatMapStringsSep
optionalAttrs
optionalString
;
haveSystemd = usePodman || pkgs.arionTestingFlags.dockerSupportsSystemd;
concatPathLines = paths: concatMapStringsSep "\n" (x: "${x}") paths;
in
{
name = "arion-test";
nodes.machine = { pkgs, lib, ... }: {
machine = { pkgs, lib, ... }: {
environment.systemPackages = [
pkgs.arion
] ++ lib.optional usePodman pkgs.docker;
virtualisation.docker.enable = !usePodman;
virtualisation.podman = optionalAttrs usePodman {
enable = true;
dockerSocket.enable = true;
};
];
virtualisation.docker.enable = true;
# no caches, because no internet
nix.settings.substituters = lib.mkForce [];
nix.binaryCaches = lib.mkForce [];
# FIXME: Sandbox seems broken with current version of NixOS test
# w/ writable store. Error:
# machine# error: linking '/nix/store/7r8z2zvhwda85pgpdn5hzzz6hs1njklc-stdenv-linux.drv.chroot/nix/store/6v3y7s4q4wd16hsw393gjpxvcf9159bv-patch-shebangs.sh' to '/nix/store/6v3y7s4q4wd16hsw393gjpxvcf9159bv-patch-shebangs.sh': Operation not permitted
#
# There should be no reason why arion can't run without
# sandboxing, so please re-enable.
nix.useSandbox = false;
virtualisation.writableStore = true;
# Switch to virtualisation.additionalPaths when dropping all NixOS <= 21.05.
environment.etc."extra-paths-for-test".text = concatPathLines [
virtualisation.pathsInNixDB = [
# Pre-build the image because we don't want to build the world
# in the vm.
(preEval [ ../../examples/minimal/arion-compose.nix ]).config.out.dockerComposeYaml
(preEval [ ../../examples/full-nixos/arion-compose.nix ]).config.out.dockerComposeYaml
(preEval [ ../../examples/nixos-unit/arion-compose.nix ]).config.out.dockerComposeYaml
(preEval [ ../../examples/traefik/arion-compose.nix ]).config.out.dockerComposeYaml
pkgs.stdenv
];
virtualisation.memorySize = 2048;
virtualisation.diskSize = 8000;
};
testScript = ''
machine.fail("curl --fail localhost:8000")
machine.succeed("docker --version")
$machine->fail("curl localhost:8000");
$machine->succeed("docker --version");
# Tests
# - arion up
# - arion down
# - examples/minimal
with subtest("minimal"):
machine.succeed(
"rm -rf work && cp -frT ${../../examples/minimal} work && cd work && NIX_PATH=nixpkgs='${pkgs.path}' arion up -d"
)
machine.wait_until_succeeds("curl --fail localhost:8000")
machine.succeed(
"cd work && NIX_PATH=nixpkgs='${pkgs.path}' arion down"
)
machine.wait_until_fails("curl --fail localhost:8000")
subtest "minimal", sub {
$machine->succeed("cp -r ${../../examples/minimal} work && cd work && NIX_PATH=nixpkgs='${pkgs.path}' arion up -d");
$machine->waitUntilSucceeds("curl localhost:8000");
$machine->succeed("cd work && NIX_PATH=nixpkgs='${pkgs.path}' arion down && rm -rf work");
$machine->waitUntilFails("curl localhost:8000");
};
# Tests
# - running same image again doesn't require a `docker load`
with subtest("docker load only once"):
# We assume image loading relies on the `docker images` and `docker load` commands, so this should fail
machine.fail(
"export REAL_DOCKER=$(which docker); rm -rf work && cp -frT ${../../examples/minimal} work && cd work && NIX_PATH=nixpkgs='${pkgs.path}' PATH=\"${pkgs.writeScriptBin "docker" ''
#!${pkgs.runtimeShell} -eu
echo 1>&2 "This failure is expected. Args were" "$@"
echo "$@" >/tmp/docker-args
exit 1
''}/bin:$PATH\" arion up -d"
)
machine.succeed(
"export REAL_DOCKER=$(which docker); rm -rf work && cp -frT ${../../examples/minimal} work && cd work && NIX_PATH=nixpkgs='${pkgs.path}' PATH=\"${pkgs.writeScriptBin "docker" ''
#!${pkgs.runtimeShell} -eu
case $1 in
load)
echo 1>&2 "arion must not docker load when upping the same deployment for the second time"
exit 1
;;
images)
echo 1>&2 "execing docker to list images"
exec $REAL_DOCKER "$@"
;;
*)
echo 1>&2 "Unknown docker invocation. This may be a shortcoming of this docker mock."
echo 1>&2 "Invocation: docker" "$@"
;;
esac
''}/bin:$PATH\" arion up -d"
)
machine.wait_until_succeeds("curl --fail localhost:8000")
machine.succeed(
"cd work && NIX_PATH=nixpkgs='${pkgs.path}' arion down"
)
machine.wait_until_fails("curl --fail localhost:8000")
subtest "full-nixos", sub {
$machine->succeed("cp -r ${../../examples/full-nixos} work && cd work && NIX_PATH=nixpkgs='${pkgs.path}' arion up -d");
$machine->waitUntilSucceeds("curl localhost:8000");
# Also test exec with defaultExec
$machine->succeed("cd work && export NIX_PATH=nixpkgs='${pkgs.path}' && (echo 'nix run -f ~/h/arion arion -c arion exec webserver'; echo 'target=world; echo Hello \$target'; echo exit) | script /dev/null | grep 'Hello world'");
$machine->succeed("cd work && NIX_PATH=nixpkgs='${pkgs.path}' arion down && rm -rf work");
$machine->waitUntilFails("curl localhost:8000");
};
# Tests
# - examples/flake
# This _test_ doesn't work because flake-compat fetches the github
# tarballs without sha256 and/or Nix doesn't consult the store before
# downloading.
# See https://github.com/edolstra/flake-compat/pull/12
# with subtest("flake"):
# machine.succeed(
# "rm -rf work && cp -frT ''${../../examples/flake} work && cd work && NIX_PATH= arion up -d"
# )
# machine.wait_until_succeeds("curl --fail localhost:8000")
# machine.succeed("cd work && NIX_PATH= arion down")
# machine.wait_until_fails("curl --fail localhost:8000")
${optionalString haveSystemd ''
# Tests
# - arion exec
# - examples/full-nixos
with subtest("full-nixos"):
machine.succeed(
"rm -rf work && cp -frT ${../../examples/full-nixos} work && cd work && NIX_PATH=nixpkgs='${pkgs.path}' arion up -d"
)
machine.wait_until_succeeds("curl --fail localhost:8000")
machine.succeed(
"""
set -eux -o pipefail
cd work
export NIX_PATH=nixpkgs='${pkgs.path}'
echo 'target=world; echo Hello $target; exit' \
| script 'arion exec webserver' \
| grep 'Hello world'
"""
),
machine.succeed(
"cd work && NIX_PATH=nixpkgs='${pkgs.path}' arion down"
)
machine.wait_until_fails("curl --fail localhost:8000")
''}
# Tests
# - examples/nixos-unit
with subtest("nixos-unit"):
machine.succeed(
"rm -rf work && cp -frT ${../../examples/nixos-unit} work && cd work && NIX_PATH=nixpkgs='${pkgs.path}' arion up -d"
)
machine.wait_until_succeeds("curl --fail localhost:8000")
machine.succeed(
"cd work && NIX_PATH=nixpkgs='${pkgs.path}' arion down"
)
machine.wait_until_fails("curl --fail localhost:8000")
# Tests
# - examples/traefik
# - labels
with subtest("traefik"):
machine.succeed(
"rm -rf work && cp -frT ${../../examples/traefik} work && cd work && NIX_PATH=nixpkgs='${pkgs.path}' arion up -d"
)
machine.wait_until_succeeds("curl --fail nix-docs.localhost")
machine.succeed(
"cd work && NIX_PATH=nixpkgs='${pkgs.path}' arion down"
)
machine.wait_until_fails("curl --fail nix-docs.localhost")
subtest "nixos-unit", sub {
$machine->succeed("cp -r ${../../examples/nixos-unit} work && cd work && NIX_PATH=nixpkgs='${pkgs.path}' arion up -d");
$machine->waitUntilSucceeds("curl localhost:8000");
$machine->succeed("cd work && NIX_PATH=nixpkgs='${pkgs.path}' arion down && rm -rf work");
$machine->waitUntilFails("curl localhost:8000");
};
'';
}

20
tests/default.nix Normal file
View file

@ -0,0 +1,20 @@
{ pkgs ? import ../pkgs.nix }:
let
inherit (pkgs) nixosTest recurseIntoAttrs arion;
in
recurseIntoAttrs {
test = nixosTest ./arion-test;
testBuild = arion.build {
# To be more accurately, you can do
# pkgs = import ../examples/minimal/arion-pkgs.nix;
# but this is quite efficient:
inherit pkgs;
modules = [ ../examples/minimal/arion-compose.nix ];
};
}

View file

@ -1,36 +0,0 @@
{
perSystem = { pkgs, final, ... }:
let
inherit (final) nixosTest arion lib;
in
{
checks = lib.optionalAttrs pkgs.stdenv.isLinux {
test = nixosTest ./arion-test;
nixosModuleWithDocker =
import ./nixos-virtualization-arion-test/test.nix final {
virtualisation.arion.backend = "docker";
};
# Currently broken; kafka can't reach zookeeper
# nixosModuleWithPodman =
# import ./nixos-virtualization-arion-test/test.nix final {
# virtualisation.arion.backend = "podman-socket";
# };
testWithPodman =
nixosTest (import ./arion-test { usePodman = true; pkgs = final; });
testBuild = arion.build {
# To be more accurate, we could do
# pkgs = import ../examples/minimal/arion-pkgs.nix;
# But let's avoid re-evaluating Nixpkgs
pkgs = final;
modules = [ ../examples/minimal/arion-compose.nix ];
};
};
};
}

View file

@ -1,6 +0,0 @@
# NixOS module test
This tests the NixOS module.
The images used here are experimental and not meant for production.

View file

@ -1,62 +0,0 @@
{ pkgs, ... }: {
project.name = "whale";
docker-compose.raw = {
volumes.zookeeper = { };
volumes.kafka = { };
};
services.kafka = {
service.useHostStore = true;
# service.volumes = [
# {
# type = "volume";
# source = "kafka";
# target = "/data";
# # volume.nocopy = true;
# }
# ];
service.ports = [ "9092:9092" ];
service.depends_on = [ "zookeeper" ];
image.name = "localhost/kafka";
image.contents = [
(pkgs.runCommand "root" { } ''
mkdir -p $out/bin
ln -s ${pkgs.runtimeShell} $out/bin/sh
'')
];
image.command = [
"${pkgs.apacheKafka}/bin/kafka-server-start.sh"
"${./kafka/server.properties}"
];
};
services.zookeeper = {
service.useHostStore = true;
service.ports = [ "2181:2181" ];
# service.volumes = [
# {
# type = "volume";
# source = "zookeeper";
# target = "/data";
# # volume.nocopy = true;
# }
# ];
image.name = "localhost/zookeeper";
image.contents = [
(pkgs.buildEnv {
name = "root";
paths = [
# pkgs.sed
pkgs.busybox
];
})
];
image.command = [
"${pkgs.zookeeper}/bin/zkServer.sh"
"--config"
"${./zookeeper}"
"start-foreground"
];
};
}

View file

@ -1,6 +0,0 @@
# NOTE: This isn't used in the module!
import <nixpkgs> {
# We specify the architecture explicitly. Use a Linux remote builder when
# calling arion from other platforms.
system = "x86_64-linux";
}

View file

@ -1,141 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# see kafka.server.KafkaConfig for additional details and defaults
############################# Server Basics #############################
# The id of the broker. This must be set to a unique integer for each broker.
broker.id=0
############################# Socket Server Settings #############################
# The address the socket server listens on. It will get the value returned from
# java.net.InetAddress.getCanonicalHostName() if not configured.
# FORMAT:
# listeners = listener_name://host_name:port
# EXAMPLE:
# listeners = PLAINTEXT://your.host.name:9092
listeners=LOCALHOST://0.0.0.0:9092,SERVICE://kafka:9093
# Hostname and port the broker will advertise to producers and consumers. If not set,
# it uses the value for "listeners" if configured. Otherwise, it will use the value
# returned from java.net.InetAddress.getCanonicalHostName().
# advertised.listeners=PLAINTEXT://whale_kafka_1:9092
advertised.listeners=LOCALHOST://localhost:9092,SERVICE://kafka:9093
# ???
inter.broker.listener.name=LOCALHOST
# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
listener.security.protocol.map=LOCALHOST:PLAINTEXT,SERVICE:PLAINTEXT
# The number of threads that the server uses for receiving requests from the network and sending responses to the network
num.network.threads=3
# The number of threads that the server uses for processing requests, which may include disk I/O
num.io.threads=8
# The send buffer (SO_SNDBUF) used by the socket server
socket.send.buffer.bytes=102400
# The receive buffer (SO_RCVBUF) used by the socket server
socket.receive.buffer.bytes=102400
# The maximum size of a request that the socket server will accept (protection against OOM)
socket.request.max.bytes=104857600
############################# Log Basics #############################
# A comma separated list of directories under which to store log files
log.dirs=/data/kafka
# The default number of log partitions per topic. More partitions allow greater
# parallelism for consumption, but this will also result in more files across
# the brokers.
num.partitions=1
# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
# This value is recommended to be increased for installations with data dirs located in RAID array.
num.recovery.threads.per.data.dir=1
############################# Internal Topic Settings #############################
# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
############################# Log Flush Policy #############################
# Messages are immediately written to the filesystem but by default we only fsync() to sync
# the OS cache lazily. The following configurations control the flush of data to disk.
# There are a few important trade-offs here:
# 1. Durability: Unflushed data may be lost if you are not using replication.
# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
# The settings below allow one to configure the flush policy to flush data after a period of time or
# every N messages (or both). This can be done globally and overridden on a per-topic basis.
# The number of messages to accept before forcing a flush of data to disk
#log.flush.interval.messages=10000
# The maximum amount of time a message can sit in a log before we force a flush
#log.flush.interval.ms=1000
############################# Log Retention Policy #############################
# The following configurations control the disposal of log segments. The policy can
# be set to delete segments after a period of time, or after a given size has accumulated.
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
# from the end of the log.
# The minimum age of a log file to be eligible for deletion due to age
log.retention.hours=168
# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
#log.retention.bytes=1073741824
# The maximum size of a log segment file. When this size is reached a new log segment will be created.
log.segment.bytes=1073741824
# The interval at which log segments are checked to see if they can be deleted according
# to the retention policies
log.retention.check.interval.ms=300000
############################# Zookeeper #############################
# Zookeeper connection string (see zookeeper docs for details).
# This is a comma separated host:port pairs, each corresponding to a zk
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
# You can also append an optional chroot string to the urls to specify the
# root directory for all kafka znodes.
zookeeper.connect=zookeeper:2181
# Timeout in ms for connecting to zookeeper
zookeeper.connection.timeout.ms=18000
############################# Group Coordinator Settings #############################
# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
# The default value for this is 3 seconds.
# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
group.initial.rebalance.delay.ms=0

View file

@ -1,40 +0,0 @@
pkgs: module:
pkgs.nixosTest {
name = "test-basic-arion-kafka";
nodes = {
machine = { ... }: {
virtualisation.memorySize = 4096;
virtualisation.diskSize = 10000;
imports = [
../../nixos-module.nix
module
];
virtualisation.arion.projects.whale.settings = {
imports = [ ./arion-compose.nix ];
};
};
};
testScript = ''
machine.wait_for_unit("sockets.target")
machine.wait_for_unit("arion-whale.service")
machine.succeed("""
(echo "hello"; echo "world") \
| ${pkgs.apacheKafka}/bin/kafka-console-producer.sh \
--topic thetopic --bootstrap-server localhost:9092
""")
machine.succeed("""
(
set +o pipefail # we only care for head's exit code
( ${pkgs.apacheKafka}/bin/kafka-console-consumer.sh \
--topic thetopic --from-beginning --bootstrap-server localhost:9092 & \
echo $! >pid
) | grep --line-buffered hello | { read; kill $(<pid); rm pid; }
) 2>/dev/console
""")
'';
}

View file

@ -1,82 +0,0 @@
# Copyright 2012 The Apache Software Foundation
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Define some default values that can be overridden by system properties
zookeeper.root.logger=INFO, CONSOLE
zookeeper.console.threshold=INFO
zookeeper.log.dir=.
zookeeper.log.file=zookeeper.log
zookeeper.log.threshold=INFO
zookeeper.log.maxfilesize=256MB
zookeeper.log.maxbackupindex=20
# zookeeper.tracelog.dir=${zookeeper.log.dir}
# zookeeper.tracelog.file=zookeeper_trace.log
log4j.rootLogger=${zookeeper.root.logger}
#
# console
# Add "console" to rootlogger above if you want to use this
#
log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
log4j.appender.CONSOLE.Threshold=${zookeeper.console.threshold}
log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n
# #
# # Add ROLLINGFILE to rootLogger to get log file output
# #
# log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
# log4j.appender.ROLLINGFILE.Threshold=${zookeeper.log.threshold}
# log4j.appender.ROLLINGFILE.File=${zookeeper.log.dir}/${zookeeper.log.file}
# log4j.appender.ROLLINGFILE.MaxFileSize=${zookeeper.log.maxfilesize}
# log4j.appender.ROLLINGFILE.MaxBackupIndex=${zookeeper.log.maxbackupindex}
# log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
# log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n
# #
# # Add TRACEFILE to rootLogger to get log file output
# # Log TRACE level and above messages to a log file
# #
# log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
# log4j.appender.TRACEFILE.Threshold=TRACE
# log4j.appender.TRACEFILE.File=${zookeeper.tracelog.dir}/${zookeeper.tracelog.file}
# log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
# ### Notice we are including log4j's NDC here (%x)
# log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L][%x] - %m%n
# #
# # zk audit logging
# #
# zookeeper.auditlog.file=zookeeper_audit.log
# zookeeper.auditlog.threshold=INFO
# audit.logger=INFO, CONSOLE
# log4j.logger.org.apache.zookeeper.audit.Log4jAuditLogger=${audit.logger}
# log4j.additivity.org.apache.zookeeper.audit.Log4jAuditLogger=false
# log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
# log4j.appender.RFAAUDIT.File=${zookeeper.log.dir}/${zookeeper.auditlog.file}
# log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
# log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
# log4j.appender.RFAAUDIT.Threshold=${zookeeper.auditlog.threshold}
# # Max log file size of 10MB
# log4j.appender.RFAAUDIT.MaxFileSize=10MB
# log4j.appender.RFAAUDIT.MaxBackupIndex=10

View file

@ -1,3 +0,0 @@
tickTime=2000
dataDir=/data
clientPort=2181