get rid of hainich. migration done.

This commit is contained in:
hexchen 2021-08-23 21:26:11 +02:00
parent 95a0e9f04a
commit fd9e8941c7
15 changed files with 0 additions and 1212 deletions

View file

@ -1,130 +0,0 @@
{ config, lib, pkgs, ... }:
{
imports = [
../../common
./encboot.nix
./hardware.nix
../../common
./services/nginx.nix
./services/ghost_waszumfff.nix
./services/gitlab-runner.nix
./services/lantifa.nix
];
boot.loader.grub.enable = true;
boot.loader.grub.version = 2;
boot.loader.grub.device = "/dev/sda";
boot.supportedFilesystems = [ "zfs" ];
# stop *something* from loading ip_tables and breaking nftables
boot.blacklistedKernelModules = [ "ip_tables" "ip6_tables" "x_tables"];
# networking
networking.hostName = "hainich";
networking.hostId = "8a58cb2f";
networking.useDHCP = true;
networking.interfaces.enp6s0.ipv4.addresses = [
{
address = "46.4.63.148";
prefixLength = 27;
}
{
address = "46.4.63.158";
prefixLength = 27;
}
];
networking.interfaces.enp6s0.ipv6.addresses = [ {
address = "2a01:4f8:140:84c9::1";
prefixLength = 64;
} ];
networking.defaultGateway = "46.4.63.129";
networking.nameservers = [
"1.1.1.1" "1.0.0.1"
"2606:4700:4700::1111" "2606:4700:4700::1001"
];
networking.defaultGateway6 = {
address = "fe80::1";
interface = "enp6s0";
};
hacc.nftables.nat.enable = true;
networking.nat.internalInterfaces = ["ve-+"];
networking.nat.internalIPs = [ "192.168.100.0/24" "172.17.0.0/16" ];
networking.nat.externalInterface = "enp6s0";
networking.firewall.allowedTCPPorts = [ 22 80 443 ];
# networking.firewall.allowedUDPPorts = [ ... ];
# networking.firewall.enable = false;
# misc
time.timeZone = "UTC";
environment.systemPackages = with pkgs; [
wget vim git
];
services.openssh.enable = true;
services.openssh.ports = [ 22 62954 ];
users.users.root = {
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIL6JWi0MBDz0Zy4zjauQv28xYmHyapb8D4zeesq91LLE schweby@txsbcct"
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCvmrk3i04tXfrSlZtHFbG3o6lQgh3ODMWmGDING4TJ4ctidexmMNY15IjVjzXZgQSET1uKLDLITiaPsii8vaWERZfjm3jjub845mpKkKv48nYdM0eCbv7n604CA3lwoB5ebRgULg4oGTi60rQ4trFf3iTkJfmiLsieFBZz7l+DfgeDEjDNSJcrkOggGBrjE5vBXoDimdkNh8kBNwgMDj1kPR/FHDqybSd5hohCJ5FzQg9vzl/x/H1rzJJKYPO4svSgHkYNkeoL84IZNeHom+UEHX0rw2qAIEN6AiHvNUJR38relvQYxbVdDSlaGN3g26H2ehsmolf+U0uQlRAXTHo0NbXNVYOfijFKL/jWxNfH0aRycf09Lu60oY54gkqS/J0GoQe/OGNq1Zy72DI+zAwEzyCGfSDbAgVF7Y3mU2HqcqGqNzu7Ade5oCbLmkT7yzDM3x6IsmT1tO8dYiT8Qv+zFAECkRpw3yDkJkPOxNKg10oM318whMTtM3yqntE90hk= schweby@taxusbaccata"
];
initialHashedPassword = "$6$F316njEF2$GMF4OmPSF6QgZ3P/DblQ/UFMgoo98bztbdw7X0ygvBGC1UMMIc13Vtxjd/ZGRYW/pEHACZZ7sbRZ48t6xhvO7/";
# shell = pkgs.fish;
};
# storage stuffs!
services.zfs = {
autoSnapshot = {
enable = true;
frequent = 12;
hourly = 18;
daily = 3;
weekly = 0;
monthly = 0;
};
autoScrub = {
enable = true;
};
};
boot.kernelPackages = pkgs.linuxPackages;
services.restic.backups.tardis = {
passwordFile = "/etc/restic/system";
s3CredentialsFile = "/etc/restic/system.s3creds";
paths = [
"/data"
"/home"
"/run/florinori"
"/var/lib/containers/codimd/var/lib/codimd"
"/var/lib/containers/codimd/var/backup/postgresql"
"/var/lib/containers/hedgedoc-i4f/var/lib/codimd"
"/var/lib/containers/hedgedoc-i4f/var/backup/postgresql"
"/var/lib/containers/lantifa/var/lib/mediawiki"
"/var/lib/containers/lantifa/var/backup/mysql"
"/var/lib/murmur"
"/var/lib/syncthing"
];
pruneOpts = [
"--keep-daily 7"
"--keep-weekly 5"
"--keep-monthly 3"
];
repository = "b2:tardis-hainich:system";
};
# This value determines the NixOS release from which the default
# settings for stateful data, like file locations and database versions
# on your system were taken. Its perfectly fine and recommended to leave
# this value at the release version of the first install of this system.
# Before changing this value read the documentation for this option
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
system.stateVersion = "20.03"; # Did you read the comment?
}

View file

@ -1,28 +0,0 @@
{ config, lib, pkgs, ... }:
{
boot.initrd.kernelModules = [ "r8169" ]; # add network card driver
boot.kernelParams = ["ip=:::::enp6s0:dhcp"]; # enable dhcp on primary network interface
boot.initrd.network = {
enable = true;
ssh = {
enable = true;
port = 2222;
# TODO: Modify system config so that this works
# authorizedKeys = with lib; concatLists (mapAttrsToList (name: user: if elem "wheel" user.extraGroups then user.openssh.authorizedKeys.keys else []) config.users.users);
authorizedKeys = config.users.users.root.openssh.authorizedKeys.keys;
hostKeys = [ /run/keys/ecdsa_host ];
};
# TODO: curl some webhook here to alert?
# possibly quite hard to do, we only have limited wget or netcat available
# how this all works:
# when someone logs in via ssh, they are prompted to unlock the zfs volume
# afterwards zfs is killed in order for the boot to progress
# timeout of 120s still applies afaik
postCommands = ''
zpool import zroot
zpool import dpool
echo "zfs load-key -a; killall zfs && exit" >> /root/.profile
'';
};
}

View file

@ -1,52 +0,0 @@
{ config, lib, pkgs, ... }:
{
boot.initrd.availableKernelModules = [ "uhci_hcd" "ahci" "sd_mod" ];
boot.kernelModules = [ "kvm-intel" ];
boot.extraModulePackages = [ ];
fileSystems."/" =
{ device = "zroot/root/nixos";
fsType = "zfs";
};
fileSystems."/nix" =
{ device = "zroot/root/nixos/nix";
fsType = "zfs";
};
fileSystems."/home" =
{ device = "dpool/home";
fsType = "zfs";
};
fileSystems."/var/lib/containers" =
{ device = "dpool/containers";
fsType = "zfs";
};
fileSystems."/var/lib/docker" =
{ device = "dpool/docker";
fsType = "zfs";
};
fileSystems."/var/lib/gitlab-runner" =
{ device = "dpool/gitlab-runner";
fsType = "zfs";
};
fileSystems."/data" =
{ device = "dpool/data";
fsType = "zfs";
};
fileSystems."/boot" =
{ device = "/dev/disk/by-uuid/40125f55-7fe8-4850-902e-b4d6e22f0335";
fsType = "ext2";
};
swapDevices = [ ];
nix.maxJobs = lib.mkDefault 12;
powerManagement.cpuFreqGovernor = lib.mkDefault "ondemand";
}

View file

@ -1,125 +0,0 @@
{ config, pkgs, ... }:
{
services.etcd = {
advertiseClientUrls = [
"https://[2a0d:eb04:8:10::1]:2379"
];
listenClientUrls = [
"https://[2a0d:eb04:8:10::1]:2379"
];
listenPeerUrls = [
"https://[::1]:2380"
];
};
services.kubernetes = {
roles = [ "master" "node" ];
flannel.enable = false;
addons.dns = {
enable = true;
clusterIp = "2a0d:eb04:8:11::53";
reconcileMode = "EnsureExists";
};
pki.cfsslAPIExtraSANs = [ "hainich.hacc.space" ];
apiserver = {
advertiseAddress = "2a0d:eb04:8:10::1";
extraSANs = [
"2a0d:eb04:8:10::1" "2a0d:eb04:8:11::1" "hainich.hacc.space"
];
bindAddress = "::";
insecureBindAddress = "::1";
etcd = {
servers = [ "https://[2a0d:eb04:8:10::1]:2379" ];
};
serviceClusterIpRange = "2a0d:eb04:8:11::/120";
extraOpts = "--allow-privileged=true";
};
controllerManager = {
bindAddress = "::";
clusterCidr = "2a0d:eb04:8:12::/64";
};
kubelet = {
address = "::";
clusterDns = "2a0d:eb04:8:11::53";
};
proxy = {
bindAddress = "::";
};
scheduler = {
address = "::1" ;
};
apiserverAddress = "https://[2a0d:eb04:8:10::1]:6443";
clusterCidr = "2a0d:eb04:8:12::/64";
easyCerts = true;
masterAddress = "hainich.hacc.space";
};
networking.firewall = {
allowedTCPPorts = [ 80 443 6443 ];
trustedInterfaces = [
"cbr0" "tunnat64"
];
extraCommands = ''
iptables -t nat -A POSTROUTING -o enp6s0 -j SNAT --to 46.4.63.158
iptables -A FORWARD -i tunnat64 -j ACCEPT
iptables -t nat -A PREROUTING -p tcp -d 46.4.63.158 --dport 80 -j DNAT --to-destination 10.255.255.2:80
iptables -t nat -A PREROUTING -p tcp -d 46.4.63.158 --dport 443 -j DNAT --to-destination 10.255.255.2:443
iptables -t nat -A PREROUTING -p tcp -d 46.4.63.158 --dport 6443 -j DNAT --to-destination 10.255.255.1:443
ip6tables -A FORWARD -i tunnat64 -j ACCEPT
ip6tables -A INPUT -i tunnat64 -j ACCEPT
'';
extraStopCommands = ''
iptables -t nat -D POSTROUTING -o enp6s0 -j SNAT --to 46.4.63.158
iptables -D FORWARD -i tunnat64 -j ACCEPT
iptables -t nat -D PREROUTING -p tcp -d 46.4.63.158 --dport 80 -j DNAT --to-destination 10.255.255.2:80
iptables -t nat -D PREROUTING -p tcp -d 46.4.63.158 --dport 443 -j DNAT --to-destination 10.255.255.2:443
iptables -t nat -D PREROUTING -p tcp -d 46.4.63.158 --dport 6443 -j DNAT --to-destination 10.255.255.1:443
ip6tables -A FORWARD -i tunnat64 -j ACCEPT
ip6tables -A INPUT -i tunnat64 -j ACCEPT
'';
};
systemd.services.tayga = (let
config = pkgs.writeText "tayga.conf" ''
tun-device tunnat64
ipv4-addr 10.255.255.254
prefix 2a0d:eb04:8:10:64::/96
dynamic-pool 10.255.255.0/24
map 10.255.255.1 2a0d:eb04:8:10::1
map 10.255.255.2 2a0d:eb04:8:11::2
strict-frag-hdr 1
'';
startScript = pkgs.writeScriptBin "tayga-start" ''
#! ${pkgs.runtimeShell} -e
${pkgs.iproute}/bin/ip link set up tunnat64 || true
${pkgs.iproute}/bin/ip route add 10.255.255.0/24 dev tunnat64 || true
${pkgs.iproute}/bin/ip -6 route add 2a0d:eb04:8:10:64::/96 dev tunnat64 || true
${pkgs.tayga}/bin/tayga -d --config ${config}
'';
in {
wantedBy = [ "multi-user.target" ];
after = [ "network.target" ];
serviceConfig = {
ExecStart = ''${startScript}/bin/tayga-start'';
};
});
networking.interfaces.cbr0.ipv6.routes = [{
address = "2a0d:eb04:8:10::";
prefixLength = 60;
}];
networking.interfaces.tunnat64 = {
virtual = true;
};
# openebs expects some stuff to be there.
system.activationScripts.openebs = ''
mkdir -p /usr/lib /usr/sbin
ln -sf ${pkgs.zfs.lib}/lib/* /usr/lib/
ln -sf ${pkgs.zfs}/bin/zfs /usr/sbin/
'';
}

View file

@ -1,32 +0,0 @@
{ config, lib, pkgs, ... }:
{
virtualisation.oci-containers.containers."ghost-waszumfff" = {
autoStart = true;
environment = {
url = "https://waszumfff.4future.dev";
};
image = "ghost:alpine";
ports = [ "127.0.0.1:2368:2368" ];
volumes = [ "/run/florinori:/var/lib/ghost/content" ];
};
fileSystems."/run/florinori" =
{ device = "dpool/k8s/florinori";
fsType = "zfs";
};
services.nginx.virtualHosts."waszumfff.4future.dev" = {
enableACME = true;
forceSSL = true;
locations."/" = {
proxyPass = "http://127.0.0.1:2368";
extraConfig = "
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $http_host;
";
};
};
}

View file

@ -1,63 +0,0 @@
{config, pkgs, lib, ...}:
{
services.gitlab-runner = {
enable = true;
concurrent = 4;
services = {
infra4future = {
buildsDir = "/var/lib/gitlab-runner/builds";
dockerImage = "nixos/nix";
executor = "docker";
registrationConfigFile = "/etc/gitlab-runner/gitlab-runner.env";
};
nix = {
limit = 1; # don't run multiple jobs
registrationConfigFile = "/etc/gitlab-runner/gitlab-runner.env";
dockerImage = "alpine";
dockerVolumes = [
"/nix/store:/nix/store:ro"
"/nix/var/nix/db:/nix/var/nix/db:ro"
"/nix/var/nix/daemon-socket:/nix/var/nix/daemon-socket:ro"
];
dockerDisableCache = true;
preBuildScript = pkgs.writeScript "setup-container" ''
mkdir -p -m 0755 /nix/var/log/nix/drvs
mkdir -p -m 0755 /nix/var/nix/gcroots
mkdir -p -m 0755 /nix/var/nix/profiles
mkdir -p -m 0755 /nix/var/nix/temproots
mkdir -p -m 0755 /nix/var/nix/userpool
mkdir -p -m 1777 /nix/var/nix/gcroots/per-user
mkdir -p -m 1777 /nix/var/nix/profiles/per-user
mkdir -p -m 0755 /nix/var/nix/profiles/per-user/root
mkdir -p -m 0700 "$HOME/.nix-defexpr"
. ${pkgs.nix}/etc/profile.d/nix.sh
${pkgs.nix}/bin/nix-env -i ${lib.concatStringsSep " " (with pkgs; [ nix cacert git openssh ])}
${pkgs.nix}/bin/nix-channel --add https://nixos.org/channels/nixpkgs-unstable
${pkgs.nix}/bin/nix-channel --update nixpkgs
'';
environmentVariables = {
ENV = "/etc/profile";
USER = "root";
NIX_REMOTE = "daemon";
PATH = "/nix/var/nix/profiles/default/bin:/nix/var/nix/profiles/default/sbin:/bin:/sbin:/usr/bin:/usr/sbin";
NIX_SSL_CERT_FILE = "/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt";
};
tagList = [ "nix" ];
};
};
};
systemd.services.gitlab-runner.serviceConfig = {
DynamicUser = lib.mkForce false;
User = "gitlab-runner";
};
users.users.gitlab-runner = {
home = "/var/lib/gitlab-runner";
extraGroups = [ "docker" ];
isSystemUser = true;
};
virtualisation.docker.storageDriver = "zfs";
}

View file

@ -1,91 +0,0 @@
{ config, lib, pkgs, ... }:
{
containers.codimd = {
privateNetwork = true;
hostAddress = "192.168.100.1";
localAddress = "192.168.100.3";
autoStart = true;
config = { config, lib, pkgs, ... }: {
networking.firewall.enable = false;
services.coredns = {
enable = true;
config = ''
.:53 {
forward . 1.1.1.1
}
'';
};
services.hedgedoc = {
enable = true;
configuration = {
allowAnonymous = true;
allowFreeURL = true;
allowGravatar = false;
allowOrigin = [ "localhost" "pad.hacc.space" "fff-muc.de" ];
dbURL = "postgres://codimd:codimd@localhost:5432/codimd";
defaultPermission = "limited";
domain = "pad.hacc.space";
host = "0.0.0.0";
protocolUseSSL = true;
hsts.preload = false;
email = false;
oauth2 = {
authorizationURL = "https://auth.infra4future.de/auth/realms/forfuture/protocol/openid-connect/auth";
tokenURL = "https://auth.infra4future.de/auth/realms/forfuture/protocol/openid-connect/token";
clientID = "codimd";
clientSecret = "1a730af1-4d6e-4c1d-8f7e-72375c9b8d62";
};
};
};
systemd.services.hedgedoc.environment = {
"CMD_OAUTH2_USER_PROFILE_URL" = "https://auth.infra4future.de/auth/realms/forfuture/protocol/openid-connect/userinfo";
"CMD_OAUTH2_USER_PROFILE_USERNAME_ATTR" = "name";
"CMD_OAUTH2_USER_PROFILE_DISPLAY_NAME_ATTR" = "display-name";
"CMD_OAUTH2_USER_PROFILE_EMAIL_ATTR" = "email";
"CMD_OAUTH2_PROVIDERNAME" = "Infra4Future";
};
services.postgresql = {
enable = true;
ensureDatabases = [ "codimd" ];
ensureUsers = [{
name = "codimd";
ensurePermissions = {
"DATABASE codimd" = "ALL PRIVILEGES";
};
}];
};
services.postgresqlBackup = {
enable = true;
databases = [ "codimd" ];
startAt = "*-*-* 23:45:00";
};
};
};
services.nginx.virtualHosts."pad.hacc.earth" = {
enableACME = true;
forceSSL = true;
globalRedirect = "pad.hacc.space";
};
services.nginx.virtualHosts."pad.hacc.space" = {
forceSSL = true;
enableACME = true;
locations."/" = {
proxyPass = "http://192.168.100.3:3000";
extraConfig = ''
proxy_pass_request_headers on;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $http_connection;
add_header Access-Control-Allow-Origin "*";
proxy_buffering off;
'';
};
};
}

View file

@ -1,76 +0,0 @@
{ config, lib, pkgs, ... }:
{
containers.pad-i4f = {
privateNetwork = true;
hostAddress = "192.168.100.1";
localAddress = "192.168.100.41";
autoStart = true;
config = { config, lib, pkgs, ... }: {
networking.firewall.enable = false;
services.coredns = {
enable = true;
config = ''
.:53 {
forward . 1.1.1.1
}
'';
};
services.hedgedoc = {
enable = true;
configuration = {
allowAnonymous = true;
allowFreeURL = true;
allowGravatar = false;
allowOrigin = [ "localhost" "pad.infra4future.de" "fff-muc.de" ];
dbURL = "postgres://hedgedoc:hedgedoc@localhost:5432/hedgedoc";
defaultPermission = "freely";
domain = "pad.infra4future.de";
host = "0.0.0.0";
protocolUseSSL = true;
hsts.preload = false;
email = false;
};
};
services.postgresql = {
enable = true;
authentication = ''
local all all trust
host hedgedoc hedgedoc 127.0.0.1/32 trust
'';
ensureDatabases = [ "hedgedoc" ];
ensureUsers = [{
name = "hedgedoc";
ensurePermissions = {
"DATABASE hedgedoc" = "ALL PRIVILEGES";
};
}];
};
services.postgresqlBackup = {
enable = true;
databases = [ "hedgedoc" ];
startAt = "*-*-* 23:45:00";
};
};
};
services.nginx.virtualHosts."pad.infra4future.de" = {
forceSSL = true;
enableACME = true;
locations."/" = {
proxyPass = "http://192.168.100.41:3000";
extraConfig = ''
proxy_pass_request_headers on;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $http_connection;
add_header Access-Control-Allow-Origin "*";
proxy_buffering off;
'';
};
};
}

View file

@ -1,97 +0,0 @@
{ config, lib, pkgs, ... }:
let
unstable = import (import ../../../nix/sources.nix).nixpkgs-unstable {};
in {
containers.lantifa = {
autoStart = true;
privateNetwork = true;
hostAddress6 = "fd00::42:14";
localAddress6 = "fd00::42:15";
config = {config, pkgs, ... }: {
networking.hosts."::1" = [ "wiki.lantifa.org" ];
networking.firewall.enable = false;
users.users.mediawiki.extraGroups = [ "keys" ];
services.mediawiki = {
enable = true;
name = "LANtifa";
package = unstable.mediawiki;
database.createLocally = true;
passwordFile = "/var/lib/mediawiki/mediawiki-password";
extraConfig = let
wikidb = pkgs.fetchzip {
url = "http://www.kennel17.co.uk/uploads/testwiki/archive/e/e9/20210407232657%21WikiDB.zip";
sha256 = "0d4f2ygglz4w515a7lgw59500q3xmr92xxhsmh8p204yaa769x8v";
};
in ''
// Configure short URLs
$wgScriptPath = "";
$wgArticlePath = "/wiki/$1";
$wgUsePathInfo = true;
require_once('${wikidb}/WikiDB.php');
$wgExtraNamespaces = array( 100 => "Table", 101 => "Table_Talk",);
$wgWikiDBNamespaces = 100;
$wgGroupPermissions['user']['writeapi'] = true;
$wgDefaultUserOptions['visualeditor-enable'] = 1;
$wgLogo = "images/c/c5/LantifaLogoFem0.3.png";
// PageForms config
$wgGroupPermissions['*']['viewedittab'] = false;
$wgGroupPermissions['user']['viewedittab'] = true;
// Moderation setting
$wgModerationNotificationEnable = true;
$wgModerationEmail = "wiki_mod@lantifa.org";
$wgLogRestrictions["newusers"] = 'moderation';
// intersection / DynamicPageList config
$wgDLPMaxCacheTime = 5 * 60;
'';
extensions = {
TemplateData = null;
VisualEditor = null;
InputBox = null;
Moderation = pkgs.fetchzip {
url = "https://github.com/edwardspec/mediawiki-moderation/archive/v1.4.20.tar.gz";
sha256 = "1k0z44jfqsxzwy6jjz3yfibiq8wi845d5iwwh8j3yijn2854fj0i";
};
intersection = pkgs.fetchzip { # This is the DynamicPageList extension
url = "https://extdist.wmflabs.org/dist/extensions/intersection-REL1_35-1adb683.tar.gz";
sha256 = "0jh3b22vq1ml3kdj0hhhbfjsilpw39bcjbnkajgx1pcvr7haxld7";
};
PageForms = pkgs.fetchzip {
url = "https://github.com/wikimedia/mediawiki-extensions-PageForms/archive/5.0.1.zip";
sha256 = "172m7p941fbkl29h5bhanx3dn42jfmzgyvgmgm2lgdbmkawwly96";
};
};
virtualHost = {
hostName = "wiki.lantifa.org";
listen = [ { port = 80; } ];
adminAddr = "admin@hacc.space";
extraConfig = ''
RewriteEngine On
RewriteRule ^/?wiki(/.*)?$ %{DOCUMENT_ROOT}/index.php [L]
RewriteRule ^/*$ %{DOCUMENT_ROOT}/index.php [L]
'';
};
};
services.mysqlBackup = {
enable = true;
databases = [ "mediawiki" ];
calendar = "*-*-* 23:45:00";
};
};
};
services.nginx.virtualHosts."wiki.lantifa.org" = {
locations."/".proxyPass = "http://[" + config.containers.lantifa.localAddress6 + "]";
forceSSL = true;
enableACME = true;
};
}

View file

@ -1,231 +0,0 @@
{config, pkgs, lib, ...}:
{
containers.mattermost = {
autoStart = true;
privateNetwork = true;
hostAddress = "192.168.100.30";
localAddress = "192.168.100.31";
bindMounts."/secrets" = {
hostPath = "/var/lib/mattermost/";
isReadOnly = true;
};
config = {pkgs, config, ...}: {
# have to import these here, since container's dont
# inherit imports of their environment.
imports = [ ../../../modules/mattermost.nix ];
networking.firewall.enable = false;
# couldn't figure out how to actually overwrite modules, so now
# there's two mattermost modules ...
services.mattermost-patched = {
enable = true;
siteUrl = "https://mattermost-beta.infra4future.de";
siteName = "Mattermost - Blabla for Future";
listenAddress = "0.0.0.0:3000";
mutableConfig = false;
secretConfig = "/secrets/secrets.json";
extraConfig = {
ServiceSettings = {
TrustedProxyIPHeader = [ "X-Forwarded-For" "X-Real-Ip" ];
ReadTimeout = 300;
WriteTimeout = 600;
IdleTimeout = 60;
MaximumLoginAttempts = 10;
AllowCorsFrom = "*.infra4future.de/*";
WebserverMode = "gzip";
EnableCustomEmoji = true;
EnableEmojiPicker = true;
EnableGifPicker = false;
RestrictCustomEmojiCreation = "all";
RestrictPostDelete = "all";
AllowEditPost = "always";
PostEditTimeout = -1;
EnableTutorial = false;
ExperimentalChannelSidebarOrganization = "default_on";
ExperimentalChannelOrganization = true;
ExperimentalDataPrefetch = true;
EnableEmailInvitations = true;
DisableLegacyMFA = true;
EnableSVGs = true;
EnableLaTeX = true;
ThreadAutoFollow = true;
EnableSecurityFixAlert = false;
};
TeamSettings = {
EnableTeamCreation = true;
EnableUserCreation = true;
EnableOpenServer = false;
EnableUserDeactivation = true;
ExperimentalViewArchivedChannels = true;
ExperimentalEnableAutomaticReplies = true;
};
LogSettings = {
EnableConsole = true;
ConsoleLevel = "ERROR";
EnableDiagnostics = false;
EnableWebhookDebugging = false;
};
NotificationLogSettings = {
EnableConsole = true;
ConsoleLevel = "INFO";
};
PasswordSettings = {
MinimumLength = 10;
# turn of all the bullshit requirements
Lowercase = false;
Number = false;
Uppercase = false;
Symbol = false;
};
FileSettings = {
EnableFileAttachments = true;
MaxFileSize = 52428800;
DriverName = "local";
Directory = "/var/lib/mattermost/uploads-storage";
EnablePublicLink = true;
PublicLinkSalt = "3k7p3yxdhz6798b3b9openfr9rn3ymwu";
};
EmailSettings = {
EnableSignUpWithEmail = false;
EnableSignInWithEmail = false;
EnableSignInWithUsername = false;
SendEmailNotifications = true;
FeedbackName = "mattermost";
FeedbackEmail = "mattermost@infra4future.de";
ReplyToAddress = "mattermost@infra4future.de";
FeedbackOrganization = "infra4future.de";
EnableSMTPAuth = true;
SMTPUsername = "noreply@infra4future.de";
SMTPServer = "mail.hacc.space";
};
RateLimitSettings.Enable = false;
PrivacySettings = {
ShowEmailAddress = false;
ShowFullName = true;
};
SupportSettings = {
TermsOfServiceLink = "https://infra4future.de/nutzungsbedingungen.html";
PrivacyPolicyLink = "https://infra4future.de/nutzungsbedingungen.html";
AboutLink = "https://infra4future.de";
SupportEmail = "info@infra4future.de";
CustomTermsOfServiceEnabled = false;
EnableAskCommunityLink = true;
};
AnnouncementSettings.EnableBanner = false;
GitLabSettings = {
Enable = true;
Id = "mattermost-beta";
Scope = "";
AuthEndpoint = "https://auth.infra4future.de/auth/realms/forfuture/protocol/openid-connect/auth";
TokenEndpoint = "https://auth.infra4future.de/auth/realms/forfuture/protocol/openid-connect/token";
UserApiEndpoint = "https://auth.infra4future.de/auth/realms/forfuture/protocol/openid-connect/userinfo";
};
# for some reason, these don't appear to be working; the startup
# process complaines and sets these back to en
LocalizationSettings = {
DefaultServerLocale = "de";
DefaultClientLocale = "de";
AvailableLocales = "de,en";
};
MessageExportSettings.EnableExport = false;
# plugins appear to have trouble with the read-only filesystem; it may
# be necessary to manually change their paths etc.
PluginSettings = {
Enable = true;
EnableUploads = true;
Plugins = {
bigbluebutton = {
adminonly = false;
base_url = "https://bbb.infra4future.de/bigbluebutton/api";
salt = "zKCsNeaEniC115ynHOsZopgA4iTiJjzgeiPNoCEc";
};
"com.github.matterpoll.matterpoll" = {
experimentalui = true;
trigger = "poll";
};
};
PluginStates = {
bigbluebutton.Enable = true;
"com.github.matterpoll.matterpoll".Enable = true;
};
};
ComplianceSettings.Enable = false;
ClusterSettings.Enable = false;
MetricsSettings.Enable = false;
GuestAccountsSettings.Enable = false;
# this is just the general allow-this-at-all switch; users
# still have to turn it on for themselves
FeatureFlags.CollapsedThreads = true;
};
# turn of the weirder parts of this module (which insist on passwords
# in nix files, instead of just using socket-based authentication)
#
# It will still attempt to use its default password, but postgres will
# just let it in regardless of that.
localDatabaseCreate = false;
};
services.postgresql = {
enable = lib.mkForce true; # mattermost sets this to false. wtf.
ensureDatabases = [ "mattermost" ];
ensureUsers = [ {
name = "mattermost";
ensurePermissions = { "DATABASE mattermost" = "ALL PRIVILEGES"; };
} ];
authentication = lib.mkForce ''
# Generated file; do not edit!
local all all trust
host mattermost mattermost ::1/128 trust
'';
};
networking.firewall.allowedTCPPorts = [ 3000 ];
services.coredns = {
enable = true;
config = ''
.:53 {
forward . 1.1.1.1
}
'';
};
};
};
services.nginx.virtualHosts."mattermost-beta.infra4future.de" = {
locations."/" = {
proxyPass = "http://${config.containers.mattermost.localAddress}:3000";
proxyWebsockets = true;
extraConfig = ''
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Server $host;
# Mattermost CSR Patch
proxy_hide_header Content-Security-Policy;
proxy_hide_header X-Frame-Options;
proxy_redirect off;
'';
};
forceSSL = true;
enableACME = true;
};
networking.nat = {
enable = true;
internalInterfaces = [ "ve-mattermost" ];
externalInterface = "enp6s0";
};
}

View file

@ -1,42 +0,0 @@
{ config, lib, pkgs, ... }:
{
services.prometheus = {
enable = true;
webExternalUrl = "https://stats.hacc.space";
exporters = {
dovecot = {
enable = true;
scopes = [ "user" "global" ];
socketPath = "/var/run/dovecot2/old-stats";
};
nginx.enable = true;
node.enable = true;
postfix = {
enable = true;
systemd.enable = true;
};
rspamd.enable = true;
};
scrapeConfigs = (lib.mapAttrsToList (name: val:
{
job_name = "${name}-${config.networking.hostName}";
static_configs = [{
targets = [ "localhost:${toString val.port}" ];
labels.host = config.networking.hostName;
}];
}
) (lib.filterAttrs (_: val: val.enable) config.services.prometheus.exporters));
};
services.dovecot2.extraConfig = ''
mail_plugins = $mail_plugins old_stats
service old-stats {
unix_listener old-stats {
user = dovecot-exporter
group = dovecot-exporter
}
}
'';
services.nginx.statusPage = true;
}

View file

@ -1,56 +0,0 @@
{ config, lib, pkgs, ... }:
{
security.acme.acceptTerms = true;
security.acme.email = "info+acme@hacc.space";
services.nginx.enable = true;
services.nginx.package = pkgs.nginx.override {
modules = [ pkgs.nginxModules.rtmp ];
};
# services.nginx.recommendedProxySettings = true;
services.nginx.virtualHosts = let
in {
# let all empty subdomains pointing to hainich return 404
"hainich.hacc.space" = {
default = true;
locations."/".return = "404";
};
"hacc.space" = {
enableACME = true;
forceSSL = true;
locations."/".return = "301 https://hacc.earth";
};
};
networking.firewall.allowedTCPPorts = [ 1935 ];
services.nginx = {
appendHttpConfig = ''
add_header Permissions-Policy "interest-cohort=()";
'';
appendConfig = ''
rtmp {
server {
listen 1935;
application cutiestream {
live on;
allow publish all;
allow play all;
}
application ingest {
live on;
record all;
record_path /data/ingest;
record_unique on;
# include /var/secrets/ingest.conf;
}
}
}
'';
};
systemd.services.nginx.serviceConfig.ReadWriteDirectories = "/data/ingest /var/secrets";
}

View file

@ -1,53 +0,0 @@
{ config, lib, pkgs, ... }:
{
services.syncthing = {
enable = true;
relay.enable = false;
openDefaultPorts = true;
declarative = {
devices = {
# schweby
txsbcct = {
addresses = []; # empty = dynamic
id = "AQHOPTO-X3LWJXZ-2SPLSEW-MCVMX3R-VSLPPYE-NIOTDMW-QOYRSDZ-2LR7RAD";
};
octycs = {
addresses = []; # empty = dynamic
id = "KIJVGWZ-GRXPAUX-ZOTZDLS-KUKANCC-A2IBZRM-BT3RZK7-5M43O6R-OZD5IQE";
};
stuebinm-desktop = {
addresses = []; # empty = dynamic
id = "CWZTKG7-F45LE2O-TIT6IBC-RQD6MLH-K5ECUGJ-LOHJXF3-I2F4R6I-JVMRLAJ";
};
raphael-laptop = {
addresses = []; # empty = dynamic
id = "72B3T74-NOMJV3X-EVJXTJF-5GGAEZB-ZDKBHXQ-VQNRYEU-YCPA2JP-L6NGAAG";
};
# zauberberg
conway = {
addresses = []; # empty = dynamic
id = "HV7IU2N-Q4W3A7F-BSASR43-OB575SM-47FY2UW-7N5GMFM-PX3LWRN-HXBXMQF";
};
# hexchen
storah = {
addresses = [ "tcp://46.4.62.95:22000" "quic://46.4.62.95:22000" ];
id = "SGHQ2JA-7FJ6CKM-N3I54R4-UOJC5KO-7W22O62-YLTF26F-S7DLZG4-ZLP7HAM";
};
};
folders = {
"/var/lib/syncthing/hacc" = {
id = "qt2ly-xvvvs";
devices = [ "txsbcct" "octycs" "stuebinm-desktop" "conway" "raphael-laptop" "storah" ];
type = "receiveonly";
versioning = {
type = "simple";
params.keep = "10";
};
};
};
};
};
}

View file

@ -1,102 +0,0 @@
{pkgs, lib, config, ...}:
let
sources = import ../../../nix/sources.nix {};
# why the double outPath? Dunno, just niv things …
workadventure-nix = sources.workadventure.outPath.outPath;
haccmap = sources.haccmap.outPath.outPath;
in
{
# not the most intuitive of container names, but "workadventure" is too long
containers.wa-void = {
# we'll need the outer config to get the turn secret inside the container,
# and I'm feeling haskelly so config' it is!
config = let config' = config; in {config, pkgs, ...}: {
imports = [ workadventure-nix ];
networking.firewall.allowedTCPPorts = [ 80 ];
services.workadventure."void.hacc.space" = {
packageset = (
import "${workadventure-nix}/wapkgs.nix" {
inherit pkgs lib;
}
).workadventure-xce;
nginx = {
default = true;
domain = "void.hacc.space";
maps = {
serve = true;
path = "${haccmap}/";
};
};
frontend.startRoomUrl = "/_/global/void.hacc.space/maps/main.json";
commonConfig = {
webrtc.stun.url = "stun:turn.hacc.space:3478";
webrtc.turn = {
url = "turn:46.4.63.148";
user = "turn";
password = config'.services.coturn.static-auth-secret;
};
jitsi.url = "meet.ffmuc.net";
};
};
};
privateNetwork = true;
hostAddress6 = "fd00::42:14";
localAddress6 = "fd00::42:16";
autoStart = true;
};
services.coturn = {
enable = true;
realm = "turn.hacc.space";
# this is a static "secret" that is also compiled into workadventure,
# so it seems ok to put it into the nix store
static-auth-secret = "990bc6fc68c720a9159f9c7613b2dcc3cc9ffb4f";
use-auth-secret = true;
no-cli = true;
no-tcp-relay = true;
cert = config.security.acme.certs."turn.hacc.space".directory + "full.pem";
pkey = config.security.acme.certs."turn.hacc.space".directory + "key.pem";
};
services.nginx = {
virtualHosts."void.hacc.space" = {
forceSSL = true;
enableACME = true;
locations."/" = {
proxyPass = "http://[${config.containers.wa-void.localAddress6}]";
proxyWebsockets = true;
};
};
# this isn't actually needed, but acme requires a webserver to serve
# challanges, so I guess it's easier to just define a virtualHost here
virtualHosts."turn.hacc.space" = {
enableACME = true;
forceSSL = true;
};
};
networking.firewall = with config.services.coturn;
let
ports = [ listening-port tls-listening-port ];
in {
allowedTCPPorts = [ 80 ] ++ ports;
allowedUDPPorts = ports;
allowedUDPPortRanges = [
{ from = min-port; to = max-port; }
];
};
}

View file

@ -1,34 +0,0 @@
{ config, lib, pkgs, ... }:
{
systemd.services.wireguard-upstream = {
wants = [ "wg-upstream-key.service" ];
after = [ "wg-upstream-key.service" ];
};
networking.wireguard.interfaces.upstream = {
ips = [ "2a0d:eb04:8:ffff:2::2/128" ];
generatePrivateKeyFile = true;
privateKeyFile = "/etc/wireguard/upstream.key";
listenPort = 51820;
peers = [
{
allowedIPs = [ "::/0" ];
endpoint = "103.105.50.220:51823";
publicKey = "qL5xKnQ7xLbtTvu0VmLBwHExteJBhmCe5S/0ZoXBeXY=";
}
];
postSetup = ''
${pkgs.iproute}/bin/ip addr del dev upstream 2a0d:eb04:8:ffff:2::2/128
${pkgs.iproute}/bin/ip addr add dev upstream 2a0d:eb04:8:ffff:2::2/128 peer 2a0d:eb04:8:ffff:2::1/128
'';
};
networking.interfaces.lo.ipv6 = {
addresses = [{
address = "2a0d:eb04:8:10::1";
prefixLength = 128;
}];
};
networking.defaultGateway6 = {
address = "2a0d:eb04:8:ffff:2::1";
interface = "upstream";
};
}