feat(servers): use my custom pacemaker module
All checks were successful
Discord / discord commits (push) Successful in 26s

This commit is contained in:
matt1432 2024-01-26 14:28:42 -05:00
parent bb16ead82d
commit 141299e3ad
10 changed files with 106 additions and 340 deletions

View file

@ -1,7 +1,7 @@
deviceName: {config, ...}: let
inherit (config.vars) mainUser hostName;
clusterIP = (builtins.elemAt config.services.pacemaker.resources.caddy.virtualIps 0).ip;
clusterIP = config.services.pacemaker.virtualIps.caddy-vip.ip;
in {
imports = [
./hardware-configuration.nix
@ -10,7 +10,7 @@ in {
../../modules/sshd.nix
../../modules/tailscale.nix
./modules/pacemaker
./modules/pacemaker.nix
];
vars = {

View file

@ -9,7 +9,7 @@
caddy = caddy-plugins.packages.${pkgs.system}.default;
clusterIP = (builtins.elemAt config.services.pacemaker.resources.caddy.virtualIps 0).ip;
clusterIP = config.services.pacemaker.virtualIps.caddy-vip.ip;
in {
imports = [caddy-plugins.nixosModules.default];

View file

@ -1,23 +0,0 @@
{config, ...}: {
environment.etc."corosync/authkey" = {
source = config.sops.secrets.corosync.path;
};
services.corosync = {
enable = true;
clusterName = "thingies";
nodelist = [
{
nodeid = 1;
name = "thingone";
ring_addrs = ["10.0.0.244"];
}
{
nodeid = 2;
name = "thingtwo";
ring_addrs = ["10.0.0.159"];
}
];
};
}

View file

@ -9,7 +9,7 @@
inherit (config.vars) mainUser hostName;
headscale-flake = headscale.packages.${pkgs.system}.headscale;
clusterIP = (builtins.elemAt config.services.pacemaker.resources.caddy.virtualIps 0).ip;
clusterIP = config.services.pacemaker.virtualIps.caddy-vip.ip;
in {
environment.systemPackages = [headscale-flake];
users.users.${mainUser}.extraGroups = ["headscale"];

View file

@ -0,0 +1,30 @@
{pkgs, ...}: {
# NFS client setup
services.rpcbind.enable = true;
boot.supportedFilesystems = ["nfs"];
environment.systemPackages = with pkgs; [nfs-utils];
systemd.mounts = let
host = "10.0.0.249";
in [
{
type = "nfs";
mountConfig = {
Options = "noatime";
};
what = "${host}:/caddy";
where = "/var/lib/caddy";
requiredBy = ["caddy.service"];
}
{
type = "nfs";
mountConfig = {
Options = "noatime";
};
what = "${host}:/headscale";
where = "/var/lib/headscale";
requiredBy = ["headscale.service"];
}
];
}

View file

@ -0,0 +1,72 @@
{
config,
pacemaker,
...
}: let
inherit (config.sops) secrets;
in {
imports = [
pacemaker.nixosModules.default
./blocky.nix
./caddy.nix
./headscale
./nfs-client.nix
./unbound.nix
];
services.pacemaker = {
enable = true;
clusterName = "thingies";
corosyncKeyFile = secrets.corosync.path;
clusterUserPasswordFile = secrets.PASSWORD.path;
virtualIps = {
"caddy-vip" = {
ip = "10.0.0.130";
interface = "eno1";
group = "caddy";
};
};
systemdResources = {
"caddy" = {
enable = true;
group = "caddy";
startAfter = ["caddy-vip"];
};
"unbound" = {
enable = true;
group = "caddy";
startAfter = ["caddy"];
};
"blocky" = {
enable = true;
group = "caddy";
startAfter = ["unbound"];
};
"headscale" = {
enable = true;
group = "caddy";
startAfter = ["blocky"];
};
};
nodes = [
{
nodeid = 1;
name = "thingone";
ring_addrs = ["10.0.0.244"];
}
{
nodeid = 2;
name = "thingtwo";
ring_addrs = ["10.0.0.159"];
}
];
};
}

View file

@ -1,73 +0,0 @@
{pkgs, ...}: {
imports = [
./options.nix
../corosync.nix
../blocky.nix
../caddy.nix
../headscale
../unbound.nix
];
# TODO: update script
services.pacemaker = {
enable = true;
resources = {
"blocky" = {
enable = true;
dependsOn = ["unbound"];
};
"caddy" = {
enable = true;
virtualIps = [
{
id = "main";
interface = "eno1";
ip = "10.0.0.130";
}
];
};
"headscale" = {
enable = true;
dependsOn = ["caddy"];
};
"unbound" = {
enable = true;
dependsOn = ["caddy"];
};
};
};
# NFS client setup
services.rpcbind.enable = true;
boot.supportedFilesystems = ["nfs"];
environment.systemPackages = with pkgs; [nfs-utils];
systemd.mounts = let
host = "10.0.0.249";
in [
{
type = "nfs";
mountConfig = {
Options = "noatime";
};
what = "${host}:/caddy";
where = "/var/lib/caddy";
requiredBy = ["caddy.service"];
}
{
type = "nfs";
mountConfig = {
Options = "noatime";
};
what = "${host}:/headscale";
where = "/var/lib/headscale";
requiredBy = ["headscale.service"];
}
];
}

View file

@ -1,240 +0,0 @@
{
config,
lib,
nixpkgs-pacemaker,
pkgs,
...
}: let
inherit
(lib)
attrNames
attrValues
concatMapStringsSep
elemAt
filterAttrs
isAttrs
mkIf
mkOption
types
;
inherit (builtins) toFile map listToAttrs;
pacemakerPath = "services/cluster/pacemaker/default.nix";
cfg = config.services.pacemaker;
in {
disabledModules = [pacemakerPath];
imports = ["${nixpkgs-pacemaker}/nixos/modules/${pacemakerPath}"];
options.services.pacemaker = {
resources = mkOption {
default = {};
type = with types;
attrsOf (submodule ({name, ...}: {
options = {
enable = mkOption {
default = true;
type = types.bool;
};
systemdName = mkOption {
default = name;
type = types.str;
};
# TODO: add assertion to not have same id
virtualIps = mkOption {
default = [];
type = with types;
listOf (submodule {
options = {
id = mkOption {
type = types.str;
};
interface = mkOption {
default = "eno1";
type = types.str;
};
ip = mkOption {
type = types.str;
};
cidr = mkOption {
default = 24;
type = types.int;
};
};
});
};
# TODO: add assertion, needs to be an existing systemdName
dependsOn = mkOption {
default = [];
type = types.listOf types.str;
};
# TODO: Add extraResources, extraConstraints ...
};
}));
};
};
config = mkIf cfg.enable {
systemd.services = let
mkVirtIps = res:
concatMapStringsSep "\n" (vip: ''
<primitive class="ocf" id="${res.systemdName}-${vip.id}-vip" provider="heartbeat" type="IPaddr2">
<instance_attributes id="${res.systemdName}-${vip.id}-vip-attrs">
<nvpair
id="${res.systemdName}-${vip.id}-vip-attrs-cidr_netmask"
name="cidr_netmask"
value="${toString vip.cidr}"
/>
<nvpair
id="${res.systemdName}-${vip.id}-vip-attrs-ip"
name="ip"
value="${vip.ip}"
/>
<nvpair
id="${res.systemdName}-${vip.id}-vip-attrs-nic"
name="nic"
value="${vip.interface}"
/>
</instance_attributes>
<operations>
<op
id="${res.systemdName}-${vip.id}-vip-monitor-interval-30s"
interval="30s"
name="monitor"
/>
<op
id="${res.systemdName}-${vip.id}-vip-start-interval-0s"
interval="0s"
name="start"
timeout="20s"
/>
<op
id="${res.systemdName}-${vip.id}-vip-stop-interval-0s"
interval="0s"
name="stop"
timeout="20s"
/>
</operations>
</primitive>
'')
res.virtualIps;
mkSystemdResource = res: ''
<primitive id="${res.systemdName}" class="systemd" type="${res.systemdName}">
<operations>
<op id="stop-${res.systemdName}" name="stop" interval="0" timeout="10s"/>
<op id="start-${res.systemdName}" name="start" interval="0" timeout="10s"/>
<op id="monitor-${res.systemdName}" name="monitor" interval="10s" timeout="20s"/>
</operations>
</primitive>
'';
mkConstraint = res: first: let
firstName =
if isAttrs first
then first.systemdName
else first;
in ''
<rsc_order
id="order-${res.systemdName}-${firstName}"
first="${firstName}"
then="${res.systemdName}"
kind="Mandatory"
/>
<rsc_colocation
id="colocate-${res.systemdName}-${firstName}"
rsc="${firstName}"
with-rsc="${res.systemdName}"
score="INFINITY"
/>
'';
mkDependsOn = res: let
mkConstraint' = first:
mkConstraint res first;
in
concatMapStringsSep "\n" mkConstraint' res.dependsOn;
mkVipConstraint = res:
concatMapStringsSep "\n" (
vip:
mkConstraint
res
"${res.systemdName}-${vip.id}-vip"
)
res.virtualIps;
# If we're updating resources we have to kill constraints to add new resources
constraintsEmpty = toFile "constraints.xml" ''
<constraints>
</constraints>
'';
resEnabled = filterAttrs (n: v: v.enable) cfg.resources;
resWithIp = filterAttrs (n: v: ! isNull v.virtualIps) resEnabled;
resources = toFile "resources.xml" ''
<resources>
${concatMapStringsSep "\n" mkVirtIps (attrValues resWithIp)}
${concatMapStringsSep "\n" mkSystemdResource (attrValues resEnabled)}
</resources>
'';
constraints = toFile "constraints.xml" ''
<constraints>
${concatMapStringsSep "\n" mkVipConstraint (attrValues resWithIp)}
${concatMapStringsSep "\n" mkDependsOn (attrValues resEnabled)}
</constraints>
'';
host1 = (elemAt config.services.corosync.nodelist 0).name;
in
{
"pacemaker-setup" = {
after = ["corosync.service" "pacemaker.service"];
path = with pkgs; [pacemaker];
script = ''
# The config needs to be installed from one node only
# TODO: add assertion, corosync must be enabled with at least one node
if [ "$(uname -n)" = ${host1} ]; then
# TODO: setup stonith / fencing
crm_attribute --type crm_config --name stonith-enabled --update false
crm_attribute --type crm_config --name no-quorum-policy --delete
# Install config
cibadmin --replace --scope constraints --xml-file ${constraintsEmpty}
cibadmin --replace --scope resources --xml-file ${resources}
cibadmin --replace --scope constraints --xml-file ${constraints}
fi
'';
};
}
# Force all systemd units handled by pacemaker to not start automatically
// listToAttrs (map (x: {
name = x;
value = {
wantedBy = lib.mkForce [];
};
}) (attrNames cfg.resources));
# FIXME: https://github.com/NixOS/nixpkgs/pull/208298
nixpkgs.overlays = [
(final: prev: {
inherit
(nixpkgs-pacemaker.legacyPackages.x86_64-linux)
pacemaker
ocf-resource-agents
;
})
];
};
}

Binary file not shown.

BIN
flake.nix

Binary file not shown.