Compare commits

..

1 commit

Author SHA1 Message Date
19cd0e1422
add note on scoping secrets 2025-07-11 10:56:31 +02:00
18 changed files with 54 additions and 111 deletions

View file

@ -4,7 +4,7 @@ on:
workflow_dispatch: # allows manual triggering
push:
branches:
- main
# - main
jobs:
deploy:
@ -13,7 +13,7 @@ jobs:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up SSH key for age secrets and SSH
- name: Set up SSH key to access age secrets
run: |
env
mkdir -p ~/.ssh
@ -21,4 +21,4 @@ jobs:
chmod 600 ~/.ssh/id_ed25519
- name: Deploy
run: nix-shell --run 'eval "$(ssh-agent -s)" && ssh-add ~/.ssh/id_ed25519 && SHELL=$(which bash) nixops4 apply -v default'
run: nix-shell --run 'nixops4 deploy'

View file

@ -21,23 +21,17 @@ jobs:
- uses: actions/checkout@v4
- run: nix-shell --run 'nix-unit ./deployment/data-model-test.nix'
check-mastodon:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.test-mastodon-service -L
check-peertube:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix build .#checks.x86_64-linux.test-peertube-service -L
- run: nix-build services -A tests.peertube
check-panel:
runs-on: native
steps:
- uses: actions/checkout@v4
- run: nix-build -A tests.panel
- run: nix-build panel -A tests
check-deployment-basic:
runs-on: native

View file

@ -12,7 +12,6 @@ let
inherit (pkgs) lib;
inherit (import sources.flake-inputs) import-flake;
inherit ((import-flake { src = ./.; }).inputs) nixops4;
panel = import ./panel { inherit sources system; };
pre-commit-check =
(import "${git-hooks}/nix" {
inherit nixpkgs system;
@ -72,7 +71,6 @@ in
tests = {
inherit pre-commit-check;
panel = panel.tests;
};
# re-export inputs so they can be overridden granularly

View file

@ -17,20 +17,15 @@
./infra/flake-part.nix
./keys/flake-part.nix
./secrets/flake-part.nix
./services/tests/flake-part.nix
];
perSystem =
{
pkgs,
lib,
system,
...
}:
{
checks = {
panel = (import ./. { inherit sources system; }).tests.panel.basic;
};
formatter = pkgs.nixfmt-rfc-style;
pre-commit.settings.hooks =

View file

@ -1,13 +1,14 @@
# Infra
This directory contains the definition of [the VMs](../machines/machines.md) that host our
This directory contains the definition of [the VMs](machines.md) that host our
infrastructure.
## Provisioning VMs with an initial configuration
> NOTE[Niols]: This is still very manual and clunky. Two things will happen:
> 1. In the near future, I will improve the provisioning script to make this a bit less clunky.
> 2. In the far future, NixOps4 will be able to communicate with Proxmox directly and everything will become much cleaner.
NOTE[Niols]: This is very manual and clunky. Two things will happen. In the near
future, I will improve the provisioning script to make this a bit less clunky.
In the far future, NixOps4 will be able to communicate with Proxmox directly and
everything will become much cleaner.
1. Choose names for your VMs. It is recommended to choose `fediXXX`, with `XXX`
above 100. For instance, `fedi117`.
@ -24,7 +25,8 @@ infrastructure.
Those files need to exist during provisioning, but their content matters only
when updating the machines' configuration.
> FIXME: Remove this step by making the provisioning script not fail with the public key does not exist yet.
FIXME: Remove this step by making the provisioning script not fail with the
public key does not exist yet.
3. Run the provisioning script:
```
@ -42,7 +44,7 @@ infrastructure.
ssh fedi117.abundos.eu 'sudo cat /etc/ssh/ssh_host_ed25519_key.pub' > keys/systems/fedi117.pub
```
> FIXME: Make the provisioning script do that for us.
FIXME: Make the provisioning script do that for us.
7. Regenerate the list of machines:
```
@ -54,7 +56,7 @@ infrastructure.
just enough for it to boot and be reachable. Go on to the next section to
update the machine and put an actual configuration.
> FIXME: Figure out why the full configuration isn't on the machine at this
FIXME: Figure out why the full configuration isn't on the machine at this
point and fix it.
## Updating existing VM configurations

View file

@ -1,13 +1,7 @@
{
config,
...
}:
{
_class = "nixos";
users.users = {
root.openssh.authorizedKeys.keys = config.users.users.procolix.openssh.authorizedKeys.keys;
procolix = {
isNormalUser = true;
extraGroups = [ "wheel" ];

View file

@ -1,10 +1,9 @@
{ sources, ... }:
{ modulesPath, ... }:
{
_class = "nixos";
imports = [
"${sources.nixpkgs}/nixos/modules/profiles/qemu-guest.nix"
];
imports = [ (modulesPath + "/profiles/qemu-guest.nix") ];
boot = {
initrd = {

View file

@ -58,8 +58,6 @@ in
users.users.root.openssh.authorizedKeys.keys = attrValues keys.contributors ++ [
# allow our panel vm access to the test machines
keys.panel
# allow continuous deployment access
keys.cd
];
};

View file

@ -27,19 +27,12 @@ let
_module.args = {
inherit
inputs
sources
keys
secrets
;
};
nixos.module.imports = [
./common/proxmox-qemu-vm.nix
];
nixos.specialArgs = {
inherit sources;
};
imports =
[
./common/resource.nix
@ -47,6 +40,7 @@ let
++ (
if isTestVm then
[
./common/proxmox-qemu-vm.nix
../machines/operator/${vmName}
{
nixos.module.users.users.root.openssh.authorizedKeys.keys = [
@ -69,33 +63,17 @@ let
vmNames:
{ providers, ... }:
{
# XXX: this type merge is for adding `specialArgs` to resource modules
options.resources = mkOption {
type =
with lib.types;
lazyAttrsOf (submoduleWith {
class = "nixops4Resource";
modules = [ ];
# TODO(@fricklerhandwerk): we may want to pass through all of `specialArgs`
# once we're sure it's sane. leaving it here for better control during refactoring.
specialArgs = {
inherit sources;
};
});
};
config = {
providers.local = inputs.nixops4.modules.nixops4Provider.local;
resources = genAttrs vmNames (vmName: {
type = providers.local.exec;
imports = [
inputs.nixops4-nixos.modules.nixops4Resource.nixos
(makeResourceModule {
inherit vmName;
isTestVm = false;
})
];
});
};
providers.local = inputs.nixops4.modules.nixops4Provider.local;
resources = genAttrs vmNames (vmName: {
type = providers.local.exec;
imports = [
inputs.nixops4-nixos.modules.nixops4Resource.nixos
(makeResourceModule {
inherit vmName;
isTestVm = false;
})
];
});
};
makeDeployment' = vmName: makeDeployment [ vmName ];

View file

@ -11,7 +11,7 @@ in
imports = [
(import ../../../panel { }).module
"${sources.home-manager}/nixos"
(import "${sources.home-manager}/nixos")
];
security.acme = {

View file

@ -48,7 +48,7 @@ in
};
## NOTE: This is a physical machine, so is not covered by disko
fileSystems."/" = lib.mkForce {
fileSystems."/" = {
device = "rpool/root";
fsType = "zfs";
};
@ -58,7 +58,7 @@ in
fsType = "zfs";
};
fileSystems."/boot" = lib.mkForce {
fileSystems."/boot" = {
device = "/dev/disk/by-uuid/50B2-DD3F";
fsType = "vfat";
options = [

View file

@ -17,6 +17,7 @@ in
lfs.enable = true;
settings = {
service = {
# FIXME: enable sign-up after scoping secrets, see #335
DISABLE_REGISTRATION = true;
};
server = {

View file

@ -45,7 +45,7 @@ in
'';
};
module = ./nix/configuration.nix;
module = import ./nix/configuration.nix;
tests = pkgs.callPackage ./nix/tests.nix { };
# re-export inputs so they can be overridden granularly

13
services/default.nix Normal file
View file

@ -0,0 +1,13 @@
{
system ? builtins.currentSystem,
sources ? import ../npins,
pkgs ? import sources.nixpkgs { inherit system; },
...
}:
{
tests = {
mastodon = pkgs.nixosTest ./tests/mastodon.nix;
pixelfed-garage = pkgs.nixosTest ./tests/pixelfed-garage.nix;
peertube = pkgs.nixosTest ./tests/peertube.nix;
};
}

View file

@ -49,7 +49,7 @@ in
displayName = mkOption {
type = types.str;
description = "Name of the initial user, for humans";
default = config.fediversity.temp.initialUser.username;
default = config.fediversity.temp.initialUser.name;
};
email = mkOption {
type = types.str;

View file

@ -1,14 +0,0 @@
{ ... }:
{
_class = "flake";
perSystem =
{ pkgs, ... }:
{
checks = {
test-mastodon-service = pkgs.testers.runNixOSTest ./mastodon.nix;
test-pixelfed-garage-service = pkgs.testers.runNixOSTest ./pixelfed-garage.nix;
test-peertube-service = pkgs.testers.runNixOSTest ./peertube.nix;
};
};
}

View file

@ -6,7 +6,7 @@
{ pkgs, ... }:
let
inherit (pkgs) lib writeText;
lib = pkgs.lib;
## FIXME: this binding was not used, but maybe we want a side-effect or something?
# rebuildableTest = import ./rebuildableTest.nix pkgs;
@ -69,17 +69,9 @@ in
expect
];
environment.variables = {
AWS_ACCESS_KEY_ID = "$(cat ${config.fediversity.mastodon.s3AccessKeyFile})";
AWS_SECRET_ACCESS_KEY = "$(cat ${config.fediversity.mastodon.s3SecretKeyFile})";
AWS_ACCESS_KEY_ID = config.fediversity.garage.ensureKeys.mastodon.id;
AWS_SECRET_ACCESS_KEY = config.fediversity.garage.ensureKeys.mastodon.secret;
};
services.mastodon.extraEnvFiles = [
# generate as: cd ${pkgs.mastodon}; IGNORE_ALREADY_SET_SECRETS=true RAILS_ENV=development ${pkgs.mastodon}/bin/rails db:encryption:init
(writeText "rest" ''
ACTIVE_RECORD_ENCRYPTION_DETERMINISTIC_KEY=naGoEzeyjUmwIlmgZZmGQDWJrlWud5eX
ACTIVE_RECORD_ENCRYPTION_KEY_DERIVATION_SALT=A0tE1VJ7S3cjaOQ58mNkhrVFY7o5NKDB
ACTIVE_RECORD_ENCRYPTION_PRIMARY_KEY=tGHhd5Os7hLxa8QTzWwjyVLrvsj5VsCw
'')
];
};
};

View file

@ -113,7 +113,6 @@ let
${seleniumQuit}'';
dummyFile = pkgs.writeText "dummy" "dummy";
in
{
name = "test-pixelfed-garage";
@ -162,8 +161,8 @@ in
];
environment.variables = {
POST_MEDIA = ./fediversity.png;
AWS_ACCESS_KEY_ID = "$(cat ${config.fediversity.pixelfed.s3AccessKeyFile})";
AWS_SECRET_ACCESS_KEY = "$(cat ${config.fediversity.pixelfed.s3SecretKeyFile})";
AWS_ACCESS_KEY_ID = config.fediversity.garage.ensureKeys.pixelfed.id;
AWS_SECRET_ACCESS_KEY = config.fediversity.garage.ensureKeys.pixelfed.secret;
## without this we get frivolous errors in the logs
MC_REGION = "garage";
};
@ -171,12 +170,6 @@ in
users.users.selenium = {
isNormalUser = true;
};
fediversity.temp.initialUser = {
username = "dummy";
displayName = "dummy";
email = "dummy";
passwordFile = dummyFile;
};
};
};