Compare commits
1 commit
Author | SHA1 | Date | |
---|---|---|---|
|
05c4fe4823 |
322 changed files with 26142 additions and 19515 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -2,4 +2,3 @@ result
|
|||
ecdsa_host
|
||||
secrets/
|
||||
.*.swp
|
||||
.deploy-gc/*
|
||||
|
|
31
.gitlab-ci.yml
Normal file
31
.gitlab-ci.yml
Normal file
|
@ -0,0 +1,31 @@
|
|||
stages:
|
||||
- instantiate
|
||||
- build
|
||||
|
||||
instantiate 20.09:
|
||||
tags:
|
||||
- nix
|
||||
stage: instantiate
|
||||
script:
|
||||
- nix-instantiate -I nixpkgs=https://github.com/hexchen/nixpkgs/archive/hexchen-20.09.tar.gz -A deploy.all
|
||||
|
||||
instantiate main:
|
||||
tags:
|
||||
- nix
|
||||
stage: instantiate
|
||||
script:
|
||||
- nix-instantiate -I nixpkgs=https://github.com/hexchen/nixpkgs/archive/hexchen-main.tar.gz -A deploy.all
|
||||
|
||||
build 20.09:
|
||||
tags:
|
||||
- nix
|
||||
stage: build
|
||||
script:
|
||||
- nix-build -A deploy.all -I nixpkgs=https://github.com/hexchen/nixpkgs/archive/hexchen-20.09.tar.gz
|
||||
|
||||
build main:
|
||||
tags:
|
||||
- nix
|
||||
stage: build
|
||||
script:
|
||||
- nix-build -A deploy.all -I nixpkgs=https://github.com/hexchen/nixpkgs/archive/hexchen-main.tar.gz
|
|
@ -1 +0,0 @@
|
|||
websites/*
|
23
.sops.yaml
23
.sops.yaml
|
@ -1,23 +0,0 @@
|
|||
keys:
|
||||
- &parsons age1yql8qaf7upraqy4cq397tt4vgs046hq0v59qymla8t3x0ujqvu4sesgsvw
|
||||
- &hexchen-backup age1zgdegurzlr8cw9948wgf4q5qh3efltwhhzus5tt6az5xvvsux9us2v4tyd
|
||||
- &stuebinm-ilex age18wkr3kjalalzrq9l05q32gnlaqr7t6rqqzde307m83rs9fp4xcfsdtj9gt
|
||||
- &stuebinm-surltesh-echer age1q88az2y5hnx8naqsvrurllqj6y5gtehrpa9emmrxy5ghwsr7pvnqf7tfpx
|
||||
- &stuebinm-abbenay age18nkru4pwvvapdw76nauv2xdtlj8cvyv3ugahe9kcxtvtsptx2eyqw7p0m6
|
||||
- &octycs-m age1fm3e99tdyrsvztdchxxllt9nat35xzvd68d09y8scu9jfc7kvvuquhr49c
|
||||
- &zauberberg-conway age16fk0m26n0fr2vmuxm2mjsmrawclde2mlyj6wg3ee9jvzmu5ru3ustgs5jq
|
||||
- &moira-2022-06 age1l694a4xht7r0eza9r2vjncupmp6cxyk3k9x2ljwynnur4m2lc5jqmy3jut
|
||||
- &moira-openpgp age1m374x78q9eykua32ldrqxh8rh36kz6jyre69a263krf28hcycsqsrmshl0
|
||||
creation_rules:
|
||||
- path_regex: secrets.yaml
|
||||
key_groups:
|
||||
- age:
|
||||
- *parsons
|
||||
- *hexchen-backup
|
||||
- *stuebinm-ilex
|
||||
- *stuebinm-surltesh-echer
|
||||
- *stuebinm-abbenay
|
||||
- *octycs-m
|
||||
- *zauberberg-conway
|
||||
- *moira-2022-06
|
||||
- *moira-openpgp
|
24
LICENSE
24
LICENSE
|
@ -1,24 +0,0 @@
|
|||
This is free and unencumbered software released into the public domain.
|
||||
|
||||
Anyone is free to copy, modify, publish, use, compile, sell, or
|
||||
distribute this software, either in source code form or as a compiled
|
||||
binary, for any purpose, commercial or non-commercial, and by any
|
||||
means.
|
||||
|
||||
In jurisdictions that recognize copyright laws, the author or authors
|
||||
of this software dedicate any and all copyright interest in the
|
||||
software to the public domain. We make this dedication for the benefit
|
||||
of the public at large and to the detriment of our heirs and
|
||||
successors. We intend this dedication to be an overt act of
|
||||
relinquishment in perpetuity of all present and future rights to this
|
||||
software under copyright law.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
For more information, please refer to <http://unlicense.org/>
|
81
README.md
81
README.md
|
@ -1,81 +0,0 @@
|
|||
# hacc nixfiles
|
||||
|
||||
Welcome to the hacc nixfiles (haccfiles). This is how we configure (most of)
|
||||
our infrastructure.
|
||||
|
||||
## General layout
|
||||
|
||||
- `flake.nix`: Entrypoint & dependencies
|
||||
- `modules/`: home-grown modules for hacc-specific services
|
||||
- `pkgs/`: packages we need which aren't in nixpkgs
|
||||
- `websites/`: static websites hosted by us
|
||||
- `common/`: meta-level config, reusable across machines
|
||||
- `parsons/`: our sole server, its config & the services it runs
|
||||
|
||||
Right now, we only have a single host. We might add more again in the future.
|
||||
|
||||
## Working with this repo
|
||||
|
||||
You will need a flake-enabled nix installation, and have your ssh config set up
|
||||
so that `ssh parsons` will connect to `parsons.hacc.space`.
|
||||
|
||||
### Deploying remotely
|
||||
|
||||
It's recommended to use [deploy_rs](https://github.com/serokell/deploy-rs):
|
||||
~~~shell
|
||||
deploy .#parsons -k [--dry-activate]
|
||||
~~~
|
||||
|
||||
Alternatively, using just `nixos-rebuild`:
|
||||
~~~shell
|
||||
nixos-rebuild --flake .#parsons --target-host parsons \
|
||||
--use-remote-sudo --use-substitutes [test|switch|dry-activate]
|
||||
~~~
|
||||
|
||||
### Re-deploying on parsons itself
|
||||
|
||||
Simply do:
|
||||
~~~shell
|
||||
nixos-rebuild --flake .#parsons [test|switch|dry-activate]
|
||||
~~~
|
||||
|
||||
## Working on websites
|
||||
|
||||
Websites are exposed as flake outputs: if you're working on a website & want to
|
||||
check it in a browser, do e.g.
|
||||
|
||||
~~~shell
|
||||
nix run .#\"muc.hacc.earth\"
|
||||
~~~
|
||||
|
||||
to start a local http server (note that some of our websites need a directory
|
||||
to be built in; these use `/tmp/hacc-website`).
|
||||
|
||||
To add a new website, add a new subdirectory to `websites`; nix will generate a
|
||||
vhost config based on that directory's name. Add a `default.nix` in your directory
|
||||
describing how to build the website, and give its derivation a `watch` attribute
|
||||
to make the `nix run` setup work.
|
||||
|
||||
## I don't want to build this long dependency / want a cached version!
|
||||
|
||||
If it's still available on parsons from a previous deploy, do:
|
||||
```shell
|
||||
nix copy --from ssh://parsons /nix/store/...
|
||||
```
|
||||
|
||||
Note: don't just copy the .drv file (which Nix complains about if it can't
|
||||
build something), that's just the description of how to build it! If you
|
||||
don't know the actual outpath, look in the .drv file (should start with
|
||||
`Derive([("out","[the path you want]"...`)
|
||||
|
||||
## committing to haccfiles
|
||||
- Things on `main` should always reflect the config that's actually deployed on
|
||||
parsons, except during testing / debugging sessions
|
||||
- split up commits, every commit is one atomic change
|
||||
- follow the commit format: "place: $change"
|
||||
- place: e.g. `modules/$module`, `services/$service` ...
|
||||
- change: describe your change. Please wrap your lines sensibly (or configure
|
||||
your editor to do this for you)
|
||||
- Exception: autogenerated messages (merge commits, reverts, etc)
|
||||
- don't overuse merge commits, try to rebase things if possible with reasonable
|
||||
effort
|
|
@ -1,91 +0,0 @@
|
|||
{ config, lib, pkgs, modules, sources, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
../modules
|
||||
./users.nix
|
||||
];
|
||||
|
||||
boot.kernelPackages = lib.mkDefault pkgs.linuxPackages;
|
||||
boot.kernelParams = [ "quiet" ];
|
||||
|
||||
networking.domain = lib.mkDefault "hacc.space";
|
||||
|
||||
services.journald.extraConfig = ''
|
||||
SystemMaxUse=512M
|
||||
MaxRetentionSec=48h
|
||||
'';
|
||||
nix.package = pkgs.lix;
|
||||
nix.gc.automatic = lib.mkDefault true;
|
||||
nix.gc.options = lib.mkDefault "--delete-older-than 7d";
|
||||
nix.settings.trusted-users = [ "root" "@wheel" ];
|
||||
nix.extraOptions = ''
|
||||
experimental-features = nix-command flakes
|
||||
'';
|
||||
environment.variables.EDITOR = "vim";
|
||||
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
ports = lib.mkDefault [ 62954 ];
|
||||
settings = {
|
||||
X11Forwarding = true;
|
||||
PermitRootLogin = "prohibit-password";
|
||||
PasswordAuthentication = false;
|
||||
KbdInteractiveAuthentication = false;
|
||||
StreamLocalBindUnlink = true;
|
||||
};
|
||||
};
|
||||
programs.mosh.enable = true;
|
||||
programs.fish.enable = true;
|
||||
security.sudo.wheelNeedsPassword = lib.mkDefault false;
|
||||
|
||||
i18n.defaultLocale = "en_IE.UTF-8";
|
||||
console = {
|
||||
font = "Lat2-Terminus16";
|
||||
keyMap = "de";
|
||||
};
|
||||
programs.mtr.enable = true;
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
smartmontools lm_sensors htop tcpdump nload iftop
|
||||
bottom
|
||||
ripgrep vgrep
|
||||
git wget
|
||||
kitty.terminfo
|
||||
rsync pv progress
|
||||
parallel bc
|
||||
usbutils pciutils
|
||||
cryptsetup gptfdisk
|
||||
zstd p7zip
|
||||
file
|
||||
whois
|
||||
iperf
|
||||
fd
|
||||
eza
|
||||
socat
|
||||
tmux
|
||||
gnupg
|
||||
vim neovim
|
||||
patchelf
|
||||
binutils
|
||||
dnsutils
|
||||
flashrom ifdtool cbfstool nvramtool
|
||||
nmap
|
||||
s-tui stress
|
||||
ffmpeg-full
|
||||
bat
|
||||
niv
|
||||
sqlite-interactive
|
||||
hacc-scripts
|
||||
];
|
||||
|
||||
security.acme.defaults.email = "info+acme@hacc.space";
|
||||
security.acme.acceptTerms = true;
|
||||
|
||||
services.nginx.appendHttpConfig = ''
|
||||
access_log off;
|
||||
add_header Permissions-Policy "interest-cohort=()";
|
||||
'';
|
||||
|
||||
networking.nftables.enable = true;
|
||||
}
|
|
@ -1,86 +0,0 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
|
||||
{
|
||||
|
||||
users.users = {
|
||||
root = {
|
||||
openssh.authorizedKeys.keys = with pkgs.lib; concatLists (mapAttrsToList (name: user: if elem "wheel" user.extraGroups then user.openssh.authorizedKeys.keys else []) config.users.users);
|
||||
};
|
||||
|
||||
# all the actual config is imported from hexchen's nixfiles
|
||||
hexchen = {
|
||||
uid = lib.mkForce 1000;
|
||||
isNormalUser = true;
|
||||
extraGroups = [ "wheel" ];
|
||||
openssh.authorizedKeys.keys = [
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINJ0tCxsEilAzV6LaNpUpcjzyEn4ptw8kFz3R+Z3YjEF hexchen@backup"
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDI3T1eFS77URHZ/HVWkMOqx7W1U54zJtn9C7QWsHOtyH72i/4EVj8SxYqLllElh1kuKUXSUipPeEzVsipFVvfH0wEuTDgFffiSQ3a8lfUgdEBuoySwceEoPgc5deapkOmiDIDeeWlrRe3nqspLRrSWU1DirMxoFPbwqJXRvpl6qJPxRg+2IolDcXlZ6yxB4Vv48vzRfVzZNUz7Pjmy2ebU8PbDoFWL/S3m7yOzQpv3L7KYBz7+rkjuF3AU2vy6CAfIySkVpspZZLtkTGCIJF228ev0e8NvhuN6ZnjzXxVTQOy32HCdPdbBbicu0uHfZ5O7JX9DjGd8kk1r2dnZwwy/ hexchen@yubi5"
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4CLJ+mFfq5XiBXROKewmN9WYmj+79bj/AoaR6Iud2pirulot3tkrrLe2cMjiNWFX8CGVqrsAELKUA8EyUTJfStlcTE0/QNESTRmdDaC+lZL41pWUO9KOiD6/0axAhHXrSJ0ScvbqtD0CtpnCKKxtuOflVPoUGZsH9cLKJNRKfEka0H0GgeKb5Tp618R/WNAQOwaCcXzg/nG4Bgv3gJW4Nm9IKy/MwRZqtILi8Mtd+2diTqpMwyNRmbenmRHCQ1vRw46joYkledVqrmSlfSMFgIHI1zRSBXb/JkG2IvIyB5TGbTkC4N2fqJNpH8wnCKuOvs46xmgdiRA26P48C2em3 hexchen@yubi5c"
|
||||
];
|
||||
};
|
||||
|
||||
octycs = {
|
||||
uid = 1002;
|
||||
isNormalUser = true;
|
||||
extraGroups = [ "wheel" ];
|
||||
openssh.authorizedKeys.keys = [
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIQqFXSlqW+D4ZtVdCiN9IT461iwyqy2taBRD3qkvXqn m@octycs.eu"
|
||||
];
|
||||
hashedPassword = "$6$qQEbD8Ejx/y$6/nkX8CmFBtAlUP/UbFKVMVlA.ZvVbjQZRABqXQjU11tKpY25ww.MCGGMEKFv.7I/UH/126/q0S3ROTqePUEc.";
|
||||
};
|
||||
|
||||
zauberberg = {
|
||||
uid = 1003;
|
||||
isNormalUser = true;
|
||||
extraGroups = [ "wheel" "cdrom" ];
|
||||
openssh.authorizedKeys.keys = [
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOfxXSy22k2EZwz1EtvIMwQKGWsswEBeLn5ClhuiI4Ma lukas@Conway.lan"
|
||||
];
|
||||
packages = with pkgs; [ ffmpeg ];
|
||||
};
|
||||
|
||||
moira = {
|
||||
uid = 1004;
|
||||
shell = pkgs.fish;
|
||||
isNormalUser = true;
|
||||
extraGroups = [ "wheel" "cdrom" ];
|
||||
openssh.authorizedKeys.keys = [
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJrcJRF71+XM5YZj+SaSiGcdVZ0IDxGBXIWssDtHiTtr moira_2022_06"
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINUa7NLrRqQ3j4KSGIw0vSvLMTO0gSZeCypQnJ/Viqm8 openpgp:0xBE0BE8A3"
|
||||
];
|
||||
hashedPassword = "$6$zkAsaVdmIduqZxez$GY9aBlYeP41F0it/VbbZzLLLRQhHAbDdFsa3e/1GS9McTuSimMHODg6HqNVEH1zSqD3afhK/0UHfqbtF5qpi90";
|
||||
};
|
||||
|
||||
stuebinm = {
|
||||
uid = 1005;
|
||||
isNormalUser = true;
|
||||
extraGroups = [ "wheel" ];
|
||||
shell = pkgs.fish;
|
||||
openssh.authorizedKeys.keys = [
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIG7J3peZGB4XGJKI1dV5PdpQS+TzmoJ7qL//ipCG7G5K stuebinm@surltesh-echer"
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKPB74xA2GBXnDwPEEaxWLONdQyBwjDoJHYagKRQXwO2 stuebinm@abbenay"
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIH8e9WrHsknoFwBm/YaigOSz9VI8dXRRR5G9BX4kKt9/ stuebinm@ilex"
|
||||
];
|
||||
};
|
||||
|
||||
leah2 = {
|
||||
uid = 1006;
|
||||
shell = pkgs.fish;
|
||||
isNormalUser = true;
|
||||
extraGroups = [ "wheel" "cdrom" ];
|
||||
openssh.authorizedKeys.keys = [
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIK4o/ncaQUorp/BeZesPnVhzvfoqLJW3WZHtz+CWQvFU"
|
||||
];
|
||||
};
|
||||
|
||||
floppy = {
|
||||
uid = 1007;
|
||||
shell = pkgs.fish;
|
||||
isNormalUser = true;
|
||||
extraGroups = [ "wheel" "cdrom" ];
|
||||
openssh.authorizedKeys.keys = [
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDyVQhFDcoMnoYivQu1h8NCTWa+2WriZ1m5BilkuUk4u"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
83
configuration/common/default.nix
Normal file
83
configuration/common/default.nix
Normal file
|
@ -0,0 +1,83 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
../../modules
|
||||
./external.nix
|
||||
./users.nix
|
||||
];
|
||||
|
||||
nixpkgs.overlays = [
|
||||
(self: super: import ../../pkgs { nixpkgs = super.path; })
|
||||
];
|
||||
|
||||
boot.kernelPackages = lib.mkDefault pkgs.linuxPackages_latest;
|
||||
boot.kernelParams = [ "quiet" ];
|
||||
|
||||
networking.domain = lib.mkDefault "hacc.space";
|
||||
petabyte.nftables = {
|
||||
enable = lib.mkDefault true;
|
||||
};
|
||||
|
||||
services.journald.extraConfig = "SystemMaxUse=512M";
|
||||
nix.gc.automatic = lib.mkDefault true;
|
||||
nix.gc.options = lib.mkDefault "--delete-older-than 1w";
|
||||
nix.trustedUsers = [ "root" "@wheel" ];
|
||||
environment.variables.EDITOR = "vim";
|
||||
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
ports = lib.mkDefault [ 62954 ];
|
||||
passwordAuthentication = false;
|
||||
challengeResponseAuthentication = false;
|
||||
permitRootLogin = lib.mkDefault "prohibit-password";
|
||||
extraConfig = "StreamLocalBindUnlink yes";
|
||||
forwardX11 = true;
|
||||
};
|
||||
security.sudo.wheelNeedsPassword = lib.mkDefault false;
|
||||
|
||||
i18n.defaultLocale = "en_IE.UTF-8";
|
||||
time.timeZone = "UTC";
|
||||
console = {
|
||||
font = "Lat2-Terminus16";
|
||||
keyMap = "de";
|
||||
};
|
||||
programs.mtr.enable = true;
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
smartmontools lm_sensors htop tcpdump nload iftop
|
||||
# bottom
|
||||
ripgrep
|
||||
git wget
|
||||
kitty.terminfo
|
||||
rsync pv progress
|
||||
parallel bc
|
||||
usbutils pciutils
|
||||
cryptsetup gptfdisk
|
||||
zstd p7zip
|
||||
file
|
||||
whois
|
||||
iperf
|
||||
fd
|
||||
exa
|
||||
socat
|
||||
tmux
|
||||
gnupg
|
||||
vim
|
||||
patchelf
|
||||
binutils
|
||||
dnsutils
|
||||
flashrom ifdtool cbfstool nvramtool
|
||||
nmap
|
||||
s-tui stress
|
||||
ffmpeg-full
|
||||
];
|
||||
|
||||
petabyte.vnstat = {
|
||||
enable = true;
|
||||
nginx.enable = true;
|
||||
};
|
||||
|
||||
security.acme.email = "info+acme@hacc.space";
|
||||
security.acme.acceptTerms = true;
|
||||
}
|
32
configuration/common/external.nix
Normal file
32
configuration/common/external.nix
Normal file
|
@ -0,0 +1,32 @@
|
|||
let
|
||||
pbb-nixfiles = fetchGit {
|
||||
url = "https://git.petabyte.dev/petabyteboy/nixfiles";
|
||||
rev = "b15d29e0440716fc37414547d55839717c9ed2f9";
|
||||
};
|
||||
|
||||
qyliss-nixlib = fetchTarball {
|
||||
url = "https://git.qyliss.net/nixlib/snapshot/nixlib-e14330c5be9b005d4310cd4dc0d384cff882aedc.tar.zst";
|
||||
sha256 = "0nan14ixhdzxxddnckqqhaxhr96yw08rgcmxssddhji6aq5a445j";
|
||||
};
|
||||
|
||||
home-manager = fetchGit {
|
||||
url = "https://github.com/nix-community/home-manager";
|
||||
rev = "a98ec6ec158686387d66654ea96153ec06be33d7";
|
||||
};
|
||||
in {
|
||||
imports = [
|
||||
(pbb-nixfiles + "/modules")
|
||||
(home-manager + "/nixos")
|
||||
];
|
||||
|
||||
nixpkgs.overlays = [
|
||||
(self: super: {
|
||||
pleroma = self.callPackage (pbb-nixfiles + "/pkgs/pleroma") {
|
||||
elixir_1_10 = super.elixir;
|
||||
};
|
||||
dino = self.callPackage (qyliss-nixlib + "/overlays/patches/dino") {
|
||||
inherit (super) dino;
|
||||
};
|
||||
})
|
||||
];
|
||||
}
|
79
configuration/common/hexchen.nix
Normal file
79
configuration/common/hexchen.nix
Normal file
|
@ -0,0 +1,79 @@
|
|||
{config, lib, pkgs, ...}:
|
||||
|
||||
{
|
||||
home-manager.users.hexchen = {
|
||||
programs.direnv = {
|
||||
enable = true;
|
||||
enableFishIntegration = true;
|
||||
enableNixDirenvIntegration = true;
|
||||
};
|
||||
programs.fish = {
|
||||
enable = true;
|
||||
shellAliases = {
|
||||
icat = "${pkgs.kitty}/bin/kitty +kitten icat";
|
||||
};
|
||||
plugins = [
|
||||
{
|
||||
name = "bass";
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "edc";
|
||||
repo = "bass";
|
||||
rev = "d63054b24c2f63aaa3a08fb9ec9d0da4c70ab922";
|
||||
sha256 = "0pwci5xxm8308nrb52s5nyxijk0svar8nqrdfvkk2y34z1cg319b";
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
programs.vim = {
|
||||
enable = true;
|
||||
extraConfig = ''
|
||||
set viminfo='20,<1000
|
||||
set mouse=a
|
||||
''; /*
|
||||
set tabstop=2
|
||||
set shiftwidth=2
|
||||
set expandtab
|
||||
'';*/
|
||||
};
|
||||
|
||||
programs.git = {
|
||||
enable = true;
|
||||
userName = "hexchen";
|
||||
userEmail = "hexchen@lilwit.ch";
|
||||
signing = {
|
||||
key = "B1DF5EAD";
|
||||
signByDefault = false;
|
||||
};
|
||||
extraConfig = {
|
||||
pull.rebase = true;
|
||||
};
|
||||
};
|
||||
|
||||
programs.bat.enable = true;
|
||||
programs.jq.enable = true;
|
||||
programs.tmux.enable = true;
|
||||
programs.ssh = {
|
||||
enable = true;
|
||||
controlMaster = "auto";
|
||||
controlPersist = "10m";
|
||||
hashKnownHosts = true;
|
||||
matchBlocks = let
|
||||
hexchen = {
|
||||
forwardAgent = true;
|
||||
extraOptions = {
|
||||
RemoteForward = "/run/user/1000/gnupg/S.gpg-agent /run/user/1000/gnupg/S.gpg-agent.extra";
|
||||
};
|
||||
port = 62954;
|
||||
};
|
||||
in {
|
||||
"*.chaoswit.ch" = hexchen;
|
||||
"*.copyonwit.ch" = hexchen;
|
||||
"*.lilwit.ch" = hexchen;
|
||||
"*.hxchn.de" = hexchen;
|
||||
"*.hacc.space" = hexchen;
|
||||
"*.hacc.media" = hexchen;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
}
|
67
configuration/common/users.nix
Normal file
67
configuration/common/users.nix
Normal file
|
@ -0,0 +1,67 @@
|
|||
{ config, pkgs, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./hexchen.nix
|
||||
];
|
||||
|
||||
home-manager.useGlobalPkgs = true;
|
||||
|
||||
users.users = {
|
||||
root = {
|
||||
openssh.authorizedKeys.keys = with pkgs.lib; concatLists (mapAttrsToList (name: user: if elem "wheel" user.extraGroups then user.openssh.authorizedKeys.keys else []) config.users.users);
|
||||
};
|
||||
|
||||
hexchen = {
|
||||
uid = 1000;
|
||||
isNormalUser = true;
|
||||
extraGroups = [ "wheel" ];
|
||||
openssh.authorizedKeys.keys = [
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDNVUDKx9sukRkb6INny432+2HZBWx/qIEAOvngF1qcj hexchen@montasch"
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEvzA8w0/th/EJcqwogd5LIyTV4lcK6iSbkRYUtKli/V hexchen@mobile"
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDI3T1eFS77URHZ/HVWkMOqx7W1U54zJtn9C7QWsHOtyH72i/4EVj8SxYqLllElh1kuKUXSUipPeEzVsipFVvfH0wEuTDgFffiSQ3a8lfUgdEBuoySwceEoPgc5deapkOmiDIDeeWlrRe3nqspLRrSWU1DirMxoFPbwqJXRvpl6qJPxRg+2IolDcXlZ6yxB4Vv48vzRfVzZNUz7Pjmy2ebU8PbDoFWL/S3m7yOzQpv3L7KYBz7+rkjuF3AU2vy6CAfIySkVpspZZLtkTGCIJF228ev0e8NvhuN6ZnjzXxVTQOy32HCdPdbBbicu0uHfZ5O7JX9DjGd8kk1r2dnZwwy/ hexchen@yubi5"
|
||||
];
|
||||
shell = pkgs.fish;
|
||||
packages = with pkgs; [ python38 go ];
|
||||
};
|
||||
|
||||
stuebinm = {
|
||||
uid = 1005;
|
||||
isNormalUser = true;
|
||||
extraGroups = [ "wheel" ];
|
||||
openssh.authorizedKeys.keys = [
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDQtJQ8fUfwsC9Q39sNpZ41RRbW91QXDLKltsYK+TLidQ5IJj2KsG/lkd433Tod6PzSvB2PcfIfnvUz7GQuS1UwXHMdLEy0/kqeYrSi6QlAxFyFBSTsUZ4d+HHwBBoXhu1Iaoch/FJNI0FhfBciIii05UyYuPj5zGgvWhnfD53Ll8HA6XVXhSK09+9GRGq57Mix5N9AkzfEF83aRUF9Qfl7Jl16rOjIgtS8hbL0kXIKUeCxZA2xi/lNHEQRriCiriPmPGOhiPcNXzbekw7IbFfE3If1CHnj7KA4KnafHAd+uHvQAce5Y4v2vMOPfGVh1cm84VTzdSPEW5V1hFjOlSnnuCQtAzkQLv8zed2NLj73GgFlcUrYKERcH84wydD0gEednNKsW8T2NzgO2eNCBf0LrcFp17qmWLv51A3jofEX5tQ3PZ7zbtR4DMUmrizrsBWDYiHJOMVeMs/9TnmIc3PL17qvVvFI7OcYxl+SPPpPtaBzxXZAMIvGFppzYxRylBcBhNvE+bXXgLFXh5cbUcwgXjvrX0y8Gv/5S4E55+i2rQMqC55+O48snoSeNlQDZV+B9setXoC93K9fBurmCX8ObnNRvvghcwUl9OBSW5K9TBdl6FF3+Z3gOCIxOMGQQKJUS5/g/eLFJ+13Y5qAPS49XJzaBiTmDrRi8x22p7sU1Q== stuebinm@in.tum.de"
|
||||
];
|
||||
};
|
||||
|
||||
octycs = {
|
||||
uid = 1002;
|
||||
isNormalUser = true;
|
||||
extraGroups = [ "wheel" ];
|
||||
openssh.authorizedKeys.keys = [
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDobGLrA6YQAKdJkZMpAsqjlk744G/pCJEvAUNJDuT1Sr59BFKDchPT03exb0o39mjH4iqvw4JDI10RfylKbR1736Ji2yRLlbCzUdgv2CfZc28TAO0rscyT49RHJmzEEE5QD4Ge7MgvFBEmZKXAxntA8M8EbxxEVfzhWp3751BYkzrCbJiHMXcTb+BG9P6rmrraINmgUJxywym5PsMYt2sfHlVus3hSpWnCR/cu0nxmW9E6Tm6CzSkWOXOTdjVuc0Kgh5GXaKDROzJ9K7cJAhd5t8Yzqtpm2xfSU5FVVUH9i7PbXOo8FL82Xi6kWMgdFNLvKimxGqW+bCv3ROlyKWF4I+HQdfdL181KaOQ40jAvjmldrB/ZiEbuWYSBZ/XhxFkKrtBYPDFHq/a5lnH3OvcDm7+/LhwIKUnyZyQ2dXOLOTOEDsO/69xwNveCB8of9o/erDbOeb+d44cXUFpPMUTz4bHXEP6y+zz8TB8/aleGbLQCPUzRZfvazN95jGUDqkumi9B3Lf+W/KpjVUgu3NQsUuJn6khMYW9VefnJvHwzbWpqIzbzNePL4iZFECv4NHPQHO/katajnMbkCie9rfnLk1EjJnrSnZUInEygkW/7Eu4EQM2h7lU4HYfwP1c4ubCFdES0ELGqSuJRwd/ORDbgxbuKOQ7gZ3/lgHdr9KGqJQ== markus.amaseder@amaseder.de"
|
||||
];
|
||||
hashedPassword = "$6$qQEbD8Ejx/y$6/nkX8CmFBtAlUP/UbFKVMVlA.ZvVbjQZRABqXQjU11tKpY25ww.MCGGMEKFv.7I/UH/126/q0S3ROTqePUEc.";
|
||||
};
|
||||
|
||||
zauberberg = {
|
||||
uid = 1003;
|
||||
isNormalUser = true;
|
||||
extraGroups = [ "wheel" "cdrom" ];
|
||||
openssh.authorizedKeys.keys = [
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCt34ou3NYWoUayWrJa5ISzihAAhFiwolJPmm2fF9llPUUA8DP3BQRiKeqDlkDzhWLwztb+dNIUuregiFJdRN5Q2JZBKlM7Gqb1QtPhtK+xe2pyZPX2SWKIsKA6j3VAThhXsQdj3slXu3dG8FF7j+IFg/eTgpeQIFQQkMIc204ha8OP2ASYAJqgJVbXq8Xh3KkAc1HSrjYJLntryvK10wyU8p3ug370dMu3vRUn44FEyDzXFM9rfsgysQTzVgp+sXdRfMLeyvf+SUrE8hiPjzevF2nsUP0Xf/rIaK5VayChPLXJkulognINzvuVWAdwNPDLpgGwkjglF2681Ag88bLX allesmoeglicheundvielmehr@hotmail.de"
|
||||
];
|
||||
packages = with pkgs; [ ffmpeg ];
|
||||
};
|
||||
|
||||
schweby = {
|
||||
uid = 1004;
|
||||
isNormalUser = true;
|
||||
extraGroups = [ "wheel" "cdrom" ];
|
||||
openssh.authorizedKeys.keys = [
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIL6JWi0MBDz0Zy4zjauQv28xYmHyapb8D4zeesq91LLE schweby@txsbcct"
|
||||
];
|
||||
hashedPassword = "$6$zkAsaVdmIduqZxez$GY9aBlYeP41F0it/VbbZzLLLRQhHAbDdFsa3e/1GS9McTuSimMHODg6HqNVEH1zSqD3afhK/0UHfqbtF5qpi90";
|
||||
};
|
||||
};
|
||||
}
|
51
configuration/desktop/default.nix
Normal file
51
configuration/desktop/default.nix
Normal file
|
@ -0,0 +1,51 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
boot.plymouth.enable = true;
|
||||
nixpkgs.config = {
|
||||
mumble.speechdSupport = true;
|
||||
allowUnfree = true;
|
||||
};
|
||||
# boot.plymouth.splashBeforeUnlock = true;
|
||||
users.users.schweby.packages = config.users.users.hexchen.packages
|
||||
++ (with pkgs; [ alacritty picom feh copyq polybar cinnamon.nemo rofi arandr notepadqq nomacs bat ]);
|
||||
users.users.hexchen = {
|
||||
packages = with pkgs; [
|
||||
pulsemixer pavucontrol
|
||||
firefox git kitty j4-dmenu-desktop bemenu
|
||||
breeze-qt5 mako
|
||||
mpv youtube-dl
|
||||
wl-clipboard mumble
|
||||
xdg_utils
|
||||
slurp grim libnotify
|
||||
_1password-gui
|
||||
# gnome3.nautilus
|
||||
];
|
||||
extraGroups = [ "video" "audio" ];
|
||||
};
|
||||
home-manager.users.hexchen = {
|
||||
gtk = {
|
||||
enable = true;
|
||||
iconTheme = {
|
||||
name = "Adwaita";
|
||||
package = pkgs.gnome3.adwaita-icon-theme;
|
||||
};
|
||||
theme = {
|
||||
name = "Adwaita";
|
||||
package = pkgs.gnome3.adwaita-icon-theme;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
sound.enable = true;
|
||||
hardware.pulseaudio = {
|
||||
enable = true;
|
||||
package = pkgs.pulseaudioFull;
|
||||
};
|
||||
networking.useDHCP = lib.mkDefault true;
|
||||
hardware.opengl.enable = true;
|
||||
services.xserver = {
|
||||
windowManager.bspwm.enable = true;
|
||||
layout = "de";
|
||||
};
|
||||
}
|
14
configuration/desktop/gnome.nix
Normal file
14
configuration/desktop/gnome.nix
Normal file
|
@ -0,0 +1,14 @@
|
|||
{config, lib, pkgs, ...}:
|
||||
|
||||
{
|
||||
services.xserver.displayManager.lightdm = {
|
||||
enable = true;
|
||||
};
|
||||
services.xserver = {
|
||||
enable = true;
|
||||
# videoDrivers = [ "nvidia" ];
|
||||
};
|
||||
# hardware.nvidia.modesetting.enable = true;
|
||||
|
||||
services.xserver.desktopManager.plasma5.enable = true;
|
||||
}
|
19
configuration/desktop/streaming.nix
Normal file
19
configuration/desktop/streaming.nix
Normal file
|
@ -0,0 +1,19 @@
|
|||
{ config, pkgs, ...}:
|
||||
|
||||
{
|
||||
boot = {
|
||||
extraModulePackages = with config.boot.kernelPackages; [ v4l2loopback ];
|
||||
kernelModules = [ "v4l2loopback" ];
|
||||
};
|
||||
|
||||
environment.systemPackages = with pkgs; [ obs-studio ];
|
||||
|
||||
home-manager.users.hexchen = {
|
||||
programs.obs-studio = {
|
||||
enable = true;
|
||||
plugins = with pkgs; [
|
||||
obs-wlrobs obs-v4l2sink
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
42
configuration/desktop/sway.nix
Normal file
42
configuration/desktop/sway.nix
Normal file
|
@ -0,0 +1,42 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
|
||||
{
|
||||
fonts.fonts = with pkgs; [ font-awesome nerdfonts ];
|
||||
users.users.hexchen.packages = with pkgs; [ ];
|
||||
home-manager.users.hexchen = {
|
||||
programs.waybar = {
|
||||
enable = true;
|
||||
|
||||
settings = [{
|
||||
modules-left = [ "sway/workspaces" "sway/mode" ];
|
||||
modules-center = [ "sway/window" ];
|
||||
modules-right = [ "pulseaudio" "network" "cpu" "memory" "temperature" "battery" "clock" "tray" ];
|
||||
|
||||
modules = {
|
||||
battery = {
|
||||
states = {
|
||||
good = 95;
|
||||
warning = 30;
|
||||
critical = 15;
|
||||
};
|
||||
format = "{capacity}% {icon}";
|
||||
format-charging = "{capacity}% ";
|
||||
format-plugged = "{capacity}% ";
|
||||
format-alt = "{time} {icon}";
|
||||
format-icons = ["" "" "" "" ""];
|
||||
};
|
||||
network = {
|
||||
format-wifi = "{essid} ({signalStrength}%) ";
|
||||
format-ethernet = "{ifname}: {ipaddr}/{cidr} ";
|
||||
format-linked = "{ifname} (No IP) ";
|
||||
format-disconnected = "Disconnected ⚠";
|
||||
format-alt = "{ifname}: {ipaddr}/{cidr}";
|
||||
};
|
||||
};
|
||||
}];
|
||||
};
|
||||
};
|
||||
|
||||
programs.sway.enable = true;
|
||||
|
||||
}
|
22
configuration/hosts/README.md
Normal file
22
configuration/hosts/README.md
Normal file
|
@ -0,0 +1,22 @@
|
|||
# deploy auf hainich
|
||||
**NICHT** nix os-re build sw itch
|
||||
Auf hainich mit ssh verbinden.
|
||||
Im root Verzeichnis sollte das haccfiles repro (das hier) liegen
|
||||
Falls ja, einfach pullen
|
||||
``git pull origin main;``
|
||||
|
||||
sonst erneut clonen
|
||||
|
||||
``git clone https://gitlab.infra4future.de/infra/haccfiles.git``
|
||||
|
||||
dann in das repro wechseln
|
||||
|
||||
``cd /root/haccfiles``
|
||||
|
||||
Und das ganze bauen
|
||||
|
||||
``nix build -f . -I nixpkgs=https://github.com/hexchen/nixpkgs/archive/hexchen-20.09.tar.gz deploy.hainich``
|
||||
|
||||
und mit ``./result`` als neue boot option auswählen und darauf wechseln
|
||||
oder
|
||||
mit``./result test`` das ganze nur bis zum reboot zu behalten.
|
26
configuration/hosts/cdn/loadbalancer/configuration.nix
Normal file
26
configuration/hosts/cdn/loadbalancer/configuration.nix
Normal file
|
@ -0,0 +1,26 @@
|
|||
{ config, pkgs, ... }:
|
||||
|
||||
{
|
||||
imports =
|
||||
[ # Include the results of the hardware scan.
|
||||
./hardware-config.nix
|
||||
../../../common
|
||||
../../../server/cdn/cdn-lb.nix
|
||||
];
|
||||
|
||||
boot.loader.grub.enable = true;
|
||||
boot.loader.grub.version = 2;
|
||||
boot.loader.grub.devices = [ "/dev/sda" ];
|
||||
|
||||
networking.interfaces.ens3.useDHCP = true;
|
||||
networking.hostName = "cdn-loadbalancer";
|
||||
|
||||
|
||||
# This value determines the NixOS release from which the default
|
||||
# settings for stateful data, like file locations and database versions
|
||||
# on your system were taken. It‘s perfectly fine and recommended to leave
|
||||
# this value at the release version of the first install of this system.
|
||||
# Before changing this value read the documentation for this option
|
||||
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
|
||||
system.stateVersion = "20.03"; # Did you read the comment?
|
||||
}
|
26
configuration/hosts/cdn/loadbalancer/hardware-config.nix
Normal file
26
configuration/hosts/cdn/loadbalancer/hardware-config.nix
Normal file
|
@ -0,0 +1,26 @@
|
|||
# Do not modify this file! It was generated by ‘nixos-generate-config’
|
||||
# and may be overwritten by future invocations. Please make changes
|
||||
# to /etc/nixos/configuration.nix instead.
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
imports =
|
||||
[ <nixpkgs/nixos/modules/profiles/qemu-guest.nix>
|
||||
];
|
||||
|
||||
boot.initrd.availableKernelModules = [ "ata_piix" "virtio_pci" "xhci_pci" "sd_mod" "sr_mod" ];
|
||||
boot.initrd.kernelModules = [ ];
|
||||
boot.kernelModules = [ ];
|
||||
boot.extraModulePackages = [ ];
|
||||
|
||||
fileSystems."/" =
|
||||
{ device = "/dev/disk/by-uuid/e371ee1d-a03f-4964-b03d-4a5c59ff5911";
|
||||
fsType = "ext4";
|
||||
};
|
||||
|
||||
swapDevices = [ ];
|
||||
|
||||
nix.maxJobs = lib.mkDefault 1;
|
||||
}
|
||||
|
||||
|
26
configuration/hosts/cdn/master/configuration.nix
Normal file
26
configuration/hosts/cdn/master/configuration.nix
Normal file
|
@ -0,0 +1,26 @@
|
|||
{ config, pkgs, ... }:
|
||||
|
||||
{
|
||||
imports =
|
||||
[ # Include the results of the hardware scan.
|
||||
./hardware-config.nix
|
||||
../../../common
|
||||
../../../server/cdn/cdn-master.nix
|
||||
];
|
||||
|
||||
boot.loader.grub.enable = true;
|
||||
boot.loader.grub.version = 2;
|
||||
boot.loader.grub.devices = [ "/dev/sda" ];
|
||||
|
||||
networking.interfaces.ens3.useDHCP = true;
|
||||
networking.hostName = "cdn-master";
|
||||
|
||||
|
||||
# This value determines the NixOS release from which the default
|
||||
# settings for stateful data, like file locations and database versions
|
||||
# on your system were taken. It‘s perfectly fine and recommended to leave
|
||||
# this value at the release version of the first install of this system.
|
||||
# Before changing this value read the documentation for this option
|
||||
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
|
||||
system.stateVersion = "20.03"; # Did you read the comment?
|
||||
}
|
25
configuration/hosts/cdn/master/hardware-config.nix
Normal file
25
configuration/hosts/cdn/master/hardware-config.nix
Normal file
|
@ -0,0 +1,25 @@
|
|||
# Do not modify this file! It was generated by ‘nixos-generate-config’
|
||||
# and may be overwritten by future invocations. Please make changes
|
||||
# to /etc/nixos/configuration.nix instead.
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
imports =
|
||||
[ <nixpkgs/nixos/modules/profiles/qemu-guest.nix>
|
||||
];
|
||||
|
||||
boot.initrd.availableKernelModules = [ "ata_piix" "virtio_pci" "xhci_pci" "sd_mod" "sr_mod" ];
|
||||
boot.initrd.kernelModules = [ ];
|
||||
boot.kernelModules = [ ];
|
||||
boot.extraModulePackages = [ ];
|
||||
|
||||
fileSystems."/" =
|
||||
{ device = "/dev/disk/by-uuid/14cc7936-f928-41e3-8f72-ee6bf18d6c19";
|
||||
fsType = "ext4";
|
||||
};
|
||||
|
||||
swapDevices = [ ];
|
||||
|
||||
nix.maxJobs = lib.mkDefault 1;
|
||||
}
|
||||
|
26
configuration/hosts/cdn/node-1/configuration.nix
Normal file
26
configuration/hosts/cdn/node-1/configuration.nix
Normal file
|
@ -0,0 +1,26 @@
|
|||
{ config, pkgs, ... }:
|
||||
|
||||
{
|
||||
imports =
|
||||
[ # Include the results of the hardware scan.
|
||||
./hardware-config.nix
|
||||
../../../common
|
||||
../../../server/cdn/cdn-node.nix
|
||||
];
|
||||
|
||||
boot.loader.grub.enable = true;
|
||||
boot.loader.grub.version = 2;
|
||||
boot.loader.grub.devices = [ "/dev/sda" ];
|
||||
|
||||
networking.interfaces.ens3.useDHCP = true;
|
||||
networking.hostName = "cdn-node-1";
|
||||
|
||||
|
||||
# This value determines the NixOS release from which the default
|
||||
# settings for stateful data, like file locations and database versions
|
||||
# on your system were taken. It‘s perfectly fine and recommended to leave
|
||||
# this value at the release version of the first install of this system.
|
||||
# Before changing this value read the documentation for this option
|
||||
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
|
||||
system.stateVersion = "20.03"; # Did you read the comment?
|
||||
}
|
25
configuration/hosts/cdn/node-1/hardware-config.nix
Normal file
25
configuration/hosts/cdn/node-1/hardware-config.nix
Normal file
|
@ -0,0 +1,25 @@
|
|||
# Do not modify this file! It was generated by ‘nixos-generate-config’
|
||||
# and may be overwritten by future invocations. Please make changes
|
||||
# to /etc/nixos/configuration.nix instead.
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
imports =
|
||||
[ <nixpkgs/nixos/modules/profiles/qemu-guest.nix>
|
||||
];
|
||||
|
||||
boot.initrd.availableKernelModules = [ "ata_piix" "virtio_pci" "xhci_pci" "sd_mod" "sr_mod" ];
|
||||
boot.initrd.kernelModules = [ ];
|
||||
boot.kernelModules = [ ];
|
||||
boot.extraModulePackages = [ ];
|
||||
|
||||
fileSystems."/" =
|
||||
{ device = "/dev/disk/by-uuid/52dddb3d-9294-4105-9157-bf003dc7bdf9";
|
||||
fsType = "ext4";
|
||||
};
|
||||
|
||||
swapDevices = [ ];
|
||||
|
||||
nix.maxJobs = lib.mkDefault 1;
|
||||
}
|
||||
|
26
configuration/hosts/cdn/node-2/configuration.nix
Normal file
26
configuration/hosts/cdn/node-2/configuration.nix
Normal file
|
@ -0,0 +1,26 @@
|
|||
{ config, pkgs, ... }:
|
||||
|
||||
{
|
||||
imports =
|
||||
[ # Include the results of the hardware scan.
|
||||
./hardware-config.nix
|
||||
../../../common
|
||||
../../../server/cdn/cdn-node.nix
|
||||
];
|
||||
|
||||
boot.loader.grub.enable = true;
|
||||
boot.loader.grub.version = 2;
|
||||
boot.loader.grub.devices = [ "/dev/sda" ];
|
||||
|
||||
networking.interfaces.ens3.useDHCP = true;
|
||||
networking.hostName = "cdn-node-2";
|
||||
|
||||
|
||||
# This value determines the NixOS release from which the default
|
||||
# settings for stateful data, like file locations and database versions
|
||||
# on your system were taken. It‘s perfectly fine and recommended to leave
|
||||
# this value at the release version of the first install of this system.
|
||||
# Before changing this value read the documentation for this option
|
||||
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
|
||||
system.stateVersion = "20.03"; # Did you read the comment?
|
||||
}
|
25
configuration/hosts/cdn/node-2/hardware-config.nix
Normal file
25
configuration/hosts/cdn/node-2/hardware-config.nix
Normal file
|
@ -0,0 +1,25 @@
|
|||
# Do not modify this file! It was generated by ‘nixos-generate-config’
|
||||
# and may be overwritten by future invocations. Please make changes
|
||||
# to /etc/nixos/configuration.nix instead.
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
imports =
|
||||
[ <nixpkgs/nixos/modules/profiles/qemu-guest.nix>
|
||||
];
|
||||
|
||||
boot.initrd.availableKernelModules = [ "ata_piix" "virtio_pci" "xhci_pci" "sd_mod" "sr_mod" ];
|
||||
boot.initrd.kernelModules = [ ];
|
||||
boot.kernelModules = [ ];
|
||||
boot.extraModulePackages = [ ];
|
||||
|
||||
fileSystems."/" =
|
||||
{ device = "/dev/disk/by-uuid/a92ff89e-e1c2-4fda-8711-1be7257f6470";
|
||||
fsType = "ext4";
|
||||
};
|
||||
|
||||
swapDevices = [ ];
|
||||
|
||||
nix.maxJobs = lib.mkDefault 1;
|
||||
}
|
||||
|
37
configuration/hosts/default.nix
Normal file
37
configuration/hosts/default.nix
Normal file
|
@ -0,0 +1,37 @@
|
|||
let
|
||||
hosts = {
|
||||
hainich = {
|
||||
ssh.host = "hainich.hacc.space";
|
||||
groups = [ "server" "hacc" ];
|
||||
};
|
||||
nixda = {
|
||||
ssh.host = "nixda.hacc.space";
|
||||
groups = [ "server" "desktop" "hacc" "live" ];
|
||||
};
|
||||
"cdn/node-2" = {
|
||||
ssh.host = "cdn-node-2.live.hacc.media";
|
||||
groups = [ "server" "hacc" "live" "livecdn" "livecdn-node" ];
|
||||
};
|
||||
"cdn/node-1" = {
|
||||
ssh.host = "cdn-node-1.live.hacc.media";
|
||||
groups = [ "server" "hacc" "live" "livecdn" "livecdn-node" ];
|
||||
};
|
||||
"cdn/master" = {
|
||||
ssh.host = "cdn-master.live.hacc.media";
|
||||
groups = [ "server" "hacc" "live" "livecdn" "livecdn-master" ];
|
||||
};
|
||||
"cdn/loadbalancer" = {
|
||||
ssh.host = "cdn-loadbalancer.live.hacc.media";
|
||||
groups = [ "server" "hacc" "live" "livecdn" "livecdn-lb" ];
|
||||
};
|
||||
};
|
||||
pkgs = import <nixpkgs> {};
|
||||
evalConfig = import <nixpkgs/nixos/lib/eval-config.nix>;
|
||||
lib = pkgs.lib;
|
||||
in lib.mapAttrs (name: host: host // {
|
||||
config = if (host ? config) then host.config else (evalConfig {
|
||||
modules = [
|
||||
(import "${toString ./.}/${name}/configuration.nix")
|
||||
];
|
||||
}).config;
|
||||
}) hosts
|
107
configuration/hosts/hainich/configuration.nix
Normal file
107
configuration/hosts/hainich/configuration.nix
Normal file
|
@ -0,0 +1,107 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
../../common
|
||||
./encboot.nix
|
||||
./hardware.nix
|
||||
./services/murmur.nix
|
||||
./services/mail.nix
|
||||
# ./services/engelsystem.nix
|
||||
./services/codimd.nix
|
||||
../../common
|
||||
# ./wireguard.nix
|
||||
./services/nginx.nix
|
||||
# ./k8s.nix
|
||||
./services/docker.nix
|
||||
./services/gitlab-runner.nix
|
||||
./services/funkwhale.nix
|
||||
];
|
||||
boot.loader.grub.enable = true;
|
||||
boot.loader.grub.version = 2;
|
||||
boot.loader.grub.device = "/dev/sda";
|
||||
boot.supportedFilesystems = [ "zfs" ];
|
||||
|
||||
# networking
|
||||
networking.hostName = "hainich";
|
||||
networking.hostId = "8a58cb2f";
|
||||
networking.useDHCP = true;
|
||||
networking.interfaces.enp6s0.ipv4.addresses = [
|
||||
{
|
||||
address = "46.4.63.148";
|
||||
prefixLength = 27;
|
||||
}
|
||||
|
||||
{
|
||||
address = "46.4.63.158";
|
||||
prefixLength = 27;
|
||||
}
|
||||
];
|
||||
networking.interfaces.enp6s0.ipv6.addresses = [ {
|
||||
address = "2a01:4f8:140:84c9::1";
|
||||
prefixLength = 64;
|
||||
} ];
|
||||
networking.defaultGateway = "46.4.63.129";
|
||||
networking.nameservers = [
|
||||
"1.1.1.1" "1.0.0.1"
|
||||
"2606:4700:4700::1111" "2606:4700:4700::1001"
|
||||
];
|
||||
networking.defaultGateway6 = {
|
||||
address = "fe80::1";
|
||||
interface = "enp6s0";
|
||||
};
|
||||
|
||||
networking.nat.enable = true;
|
||||
networking.nat.internalInterfaces = ["ve-+"];
|
||||
networking.nat.externalInterface = "enp6s0";
|
||||
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 22 80 443 ];
|
||||
# networking.firewall.allowedUDPPorts = [ ... ];
|
||||
# networking.firewall.enable = false;
|
||||
|
||||
# misc
|
||||
time.timeZone = "UTC";
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
wget vim git
|
||||
];
|
||||
|
||||
services.openssh.enable = true;
|
||||
services.openssh.ports = [ 22 62954 ];
|
||||
|
||||
users.users.root = {
|
||||
openssh.authorizedKeys.keys = [
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDNVUDKx9sukRkb6INny432+2HZBWx/qIEAOvngF1qcj hexchen@montasch"
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIL6JWi0MBDz0Zy4zjauQv28xYmHyapb8D4zeesq91LLE schweby@txsbcct"
|
||||
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCvmrk3i04tXfrSlZtHFbG3o6lQgh3ODMWmGDING4TJ4ctidexmMNY15IjVjzXZgQSET1uKLDLITiaPsii8vaWERZfjm3jjub845mpKkKv48nYdM0eCbv7n604CA3lwoB5ebRgULg4oGTi60rQ4trFf3iTkJfmiLsieFBZz7l+DfgeDEjDNSJcrkOggGBrjE5vBXoDimdkNh8kBNwgMDj1kPR/FHDqybSd5hohCJ5FzQg9vzl/x/H1rzJJKYPO4svSgHkYNkeoL84IZNeHom+UEHX0rw2qAIEN6AiHvNUJR38relvQYxbVdDSlaGN3g26H2ehsmolf+U0uQlRAXTHo0NbXNVYOfijFKL/jWxNfH0aRycf09Lu60oY54gkqS/J0GoQe/OGNq1Zy72DI+zAwEzyCGfSDbAgVF7Y3mU2HqcqGqNzu7Ade5oCbLmkT7yzDM3x6IsmT1tO8dYiT8Qv+zFAECkRpw3yDkJkPOxNKg10oM318whMTtM3yqntE90hk= schweby@taxusbaccata"
|
||||
];
|
||||
initialHashedPassword = "$6$F316njEF2$GMF4OmPSF6QgZ3P/DblQ/UFMgoo98bztbdw7X0ygvBGC1UMMIc13Vtxjd/ZGRYW/pEHACZZ7sbRZ48t6xhvO7/";
|
||||
# shell = pkgs.fish;
|
||||
};
|
||||
|
||||
# storage stuffs!
|
||||
services.zfs = {
|
||||
autoSnapshot = {
|
||||
enable = true;
|
||||
};
|
||||
autoScrub = {
|
||||
enable = true;
|
||||
};
|
||||
};
|
||||
|
||||
services.journald.extraConfig = ''
|
||||
MaxFileSec=6h
|
||||
MaxRetentionSec=72h
|
||||
'';
|
||||
|
||||
boot.kernelPackages = pkgs.linuxPackages;
|
||||
|
||||
# This value determines the NixOS release from which the default
|
||||
# settings for stateful data, like file locations and database versions
|
||||
# on your system were taken. It‘s perfectly fine and recommended to leave
|
||||
# this value at the release version of the first install of this system.
|
||||
# Before changing this value read the documentation for this option
|
||||
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
|
||||
system.stateVersion = "20.03"; # Did you read the comment?
|
||||
}
|
28
configuration/hosts/hainich/encboot.nix
Normal file
28
configuration/hosts/hainich/encboot.nix
Normal file
|
@ -0,0 +1,28 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
boot.initrd.kernelModules = [ "r8169" ]; # add network card driver
|
||||
boot.kernelParams = ["ip=:::::enp6s0:dhcp"]; # enable dhcp on primary network interface
|
||||
boot.initrd.network = {
|
||||
enable = true;
|
||||
ssh = {
|
||||
enable = true;
|
||||
port = 2222;
|
||||
# TODO: Modify system config so that this works
|
||||
# authorizedKeys = with lib; concatLists (mapAttrsToList (name: user: if elem "wheel" user.extraGroups then user.openssh.authorizedKeys.keys else []) config.users.users);
|
||||
authorizedKeys = config.users.users.root.openssh.authorizedKeys.keys;
|
||||
hostKeys = [ /run/keys/ecdsa_host ];
|
||||
};
|
||||
# TODO: curl some webhook here to alert?
|
||||
# possibly quite hard to do, we only have limited wget or netcat available
|
||||
# how this all works:
|
||||
# when someone logs in via ssh, they are prompted to unlock the zfs volume
|
||||
# afterwards zfs is killed in order for the boot to progress
|
||||
# timeout of 120s still applies afaik
|
||||
postCommands = ''
|
||||
zpool import zroot
|
||||
zpool import dpool
|
||||
echo "zfs load-key -a; killall zfs && exit" >> /root/.profile
|
||||
'';
|
||||
};
|
||||
}
|
52
configuration/hosts/hainich/hardware.nix
Normal file
52
configuration/hosts/hainich/hardware.nix
Normal file
|
@ -0,0 +1,52 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
boot.initrd.availableKernelModules = [ "uhci_hcd" "ahci" "sd_mod" ];
|
||||
boot.kernelModules = [ "kvm-intel" ];
|
||||
boot.extraModulePackages = [ ];
|
||||
|
||||
fileSystems."/" =
|
||||
{ device = "zroot/root/nixos";
|
||||
fsType = "zfs";
|
||||
};
|
||||
|
||||
fileSystems."/nix" =
|
||||
{ device = "zroot/root/nixos/nix";
|
||||
fsType = "zfs";
|
||||
};
|
||||
|
||||
fileSystems."/home" =
|
||||
{ device = "dpool/home";
|
||||
fsType = "zfs";
|
||||
};
|
||||
|
||||
fileSystems."/var/lib/containers" =
|
||||
{ device = "dpool/containers";
|
||||
fsType = "zfs";
|
||||
};
|
||||
|
||||
fileSystems."/var/lib/docker" =
|
||||
{ device = "dpool/docker";
|
||||
fsType = "zfs";
|
||||
};
|
||||
|
||||
fileSystems."/var/lib/gitlab-runner" =
|
||||
{ device = "dpool/gitlab-runner";
|
||||
fsType = "zfs";
|
||||
};
|
||||
|
||||
fileSystems."/data" =
|
||||
{ device = "dpool/data";
|
||||
fsType = "zfs";
|
||||
};
|
||||
|
||||
fileSystems."/boot" =
|
||||
{ device = "/dev/disk/by-uuid/40125f55-7fe8-4850-902e-b4d6e22f0335";
|
||||
fsType = "ext2";
|
||||
};
|
||||
|
||||
swapDevices = [ ];
|
||||
|
||||
nix.maxJobs = lib.mkDefault 12;
|
||||
powerManagement.cpuFreqGovernor = lib.mkDefault "ondemand";
|
||||
}
|
125
configuration/hosts/hainich/k8s.nix
Normal file
125
configuration/hosts/hainich/k8s.nix
Normal file
|
@ -0,0 +1,125 @@
|
|||
{ config, pkgs, ... }:
|
||||
{
|
||||
services.etcd = {
|
||||
advertiseClientUrls = [
|
||||
"https://[2a0d:eb04:8:10::1]:2379"
|
||||
];
|
||||
listenClientUrls = [
|
||||
"https://[2a0d:eb04:8:10::1]:2379"
|
||||
];
|
||||
listenPeerUrls = [
|
||||
"https://[::1]:2380"
|
||||
];
|
||||
};
|
||||
services.kubernetes = {
|
||||
roles = [ "master" "node" ];
|
||||
flannel.enable = false;
|
||||
addons.dns = {
|
||||
enable = true;
|
||||
clusterIp = "2a0d:eb04:8:11::53";
|
||||
reconcileMode = "EnsureExists";
|
||||
};
|
||||
pki.cfsslAPIExtraSANs = [ "hainich.hacc.space" ];
|
||||
apiserver = {
|
||||
advertiseAddress = "2a0d:eb04:8:10::1";
|
||||
extraSANs = [
|
||||
"2a0d:eb04:8:10::1" "2a0d:eb04:8:11::1" "hainich.hacc.space"
|
||||
];
|
||||
bindAddress = "::";
|
||||
insecureBindAddress = "::1";
|
||||
etcd = {
|
||||
servers = [ "https://[2a0d:eb04:8:10::1]:2379" ];
|
||||
};
|
||||
serviceClusterIpRange = "2a0d:eb04:8:11::/120";
|
||||
extraOpts = "--allow-privileged=true";
|
||||
};
|
||||
controllerManager = {
|
||||
bindAddress = "::";
|
||||
clusterCidr = "2a0d:eb04:8:12::/64";
|
||||
};
|
||||
kubelet = {
|
||||
address = "::";
|
||||
clusterDns = "2a0d:eb04:8:11::53";
|
||||
};
|
||||
proxy = {
|
||||
bindAddress = "::";
|
||||
};
|
||||
scheduler = {
|
||||
address = "::1" ;
|
||||
};
|
||||
apiserverAddress = "https://[2a0d:eb04:8:10::1]:6443";
|
||||
clusterCidr = "2a0d:eb04:8:12::/64";
|
||||
easyCerts = true;
|
||||
masterAddress = "hainich.hacc.space";
|
||||
};
|
||||
|
||||
networking.firewall = {
|
||||
allowedTCPPorts = [ 80 443 6443 ];
|
||||
trustedInterfaces = [
|
||||
"cbr0" "tunnat64"
|
||||
];
|
||||
extraCommands = ''
|
||||
iptables -t nat -A POSTROUTING -o enp6s0 -j SNAT --to 46.4.63.158
|
||||
iptables -A FORWARD -i tunnat64 -j ACCEPT
|
||||
|
||||
iptables -t nat -A PREROUTING -p tcp -d 46.4.63.158 --dport 80 -j DNAT --to-destination 10.255.255.2:80
|
||||
iptables -t nat -A PREROUTING -p tcp -d 46.4.63.158 --dport 443 -j DNAT --to-destination 10.255.255.2:443
|
||||
iptables -t nat -A PREROUTING -p tcp -d 46.4.63.158 --dport 6443 -j DNAT --to-destination 10.255.255.1:443
|
||||
|
||||
ip6tables -A FORWARD -i tunnat64 -j ACCEPT
|
||||
ip6tables -A INPUT -i tunnat64 -j ACCEPT
|
||||
'';
|
||||
extraStopCommands = ''
|
||||
iptables -t nat -D POSTROUTING -o enp6s0 -j SNAT --to 46.4.63.158
|
||||
iptables -D FORWARD -i tunnat64 -j ACCEPT
|
||||
|
||||
iptables -t nat -D PREROUTING -p tcp -d 46.4.63.158 --dport 80 -j DNAT --to-destination 10.255.255.2:80
|
||||
iptables -t nat -D PREROUTING -p tcp -d 46.4.63.158 --dport 443 -j DNAT --to-destination 10.255.255.2:443
|
||||
iptables -t nat -D PREROUTING -p tcp -d 46.4.63.158 --dport 6443 -j DNAT --to-destination 10.255.255.1:443
|
||||
|
||||
ip6tables -A FORWARD -i tunnat64 -j ACCEPT
|
||||
ip6tables -A INPUT -i tunnat64 -j ACCEPT
|
||||
'';
|
||||
};
|
||||
|
||||
systemd.services.tayga = (let
|
||||
config = pkgs.writeText "tayga.conf" ''
|
||||
tun-device tunnat64
|
||||
ipv4-addr 10.255.255.254
|
||||
prefix 2a0d:eb04:8:10:64::/96
|
||||
dynamic-pool 10.255.255.0/24
|
||||
map 10.255.255.1 2a0d:eb04:8:10::1
|
||||
map 10.255.255.2 2a0d:eb04:8:11::2
|
||||
strict-frag-hdr 1
|
||||
'';
|
||||
startScript = pkgs.writeScriptBin "tayga-start" ''
|
||||
#! ${pkgs.runtimeShell} -e
|
||||
${pkgs.iproute}/bin/ip link set up tunnat64 || true
|
||||
${pkgs.iproute}/bin/ip route add 10.255.255.0/24 dev tunnat64 || true
|
||||
${pkgs.iproute}/bin/ip -6 route add 2a0d:eb04:8:10:64::/96 dev tunnat64 || true
|
||||
${pkgs.tayga}/bin/tayga -d --config ${config}
|
||||
'';
|
||||
in {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
serviceConfig = {
|
||||
ExecStart = ''${startScript}/bin/tayga-start'';
|
||||
};
|
||||
});
|
||||
|
||||
networking.interfaces.cbr0.ipv6.routes = [{
|
||||
address = "2a0d:eb04:8:10::";
|
||||
prefixLength = 60;
|
||||
}];
|
||||
|
||||
networking.interfaces.tunnat64 = {
|
||||
virtual = true;
|
||||
};
|
||||
|
||||
# openebs expects some stuff to be there.
|
||||
system.activationScripts.openebs = ''
|
||||
mkdir -p /usr/lib /usr/sbin
|
||||
ln -sf ${pkgs.zfs.lib}/lib/* /usr/lib/
|
||||
ln -sf ${pkgs.zfs}/bin/zfs /usr/sbin/
|
||||
'';
|
||||
}
|
80
configuration/hosts/hainich/services/codimd.nix
Normal file
80
configuration/hosts/hainich/services/codimd.nix
Normal file
|
@ -0,0 +1,80 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
containers.codimd = {
|
||||
privateNetwork = true;
|
||||
hostAddress = "192.168.100.1";
|
||||
localAddress = "192.168.100.3";
|
||||
autoStart = true;
|
||||
config = { config, lib, pkgs, ... }: {
|
||||
networking.firewall.allowedTCPPorts = [ 3000 ];
|
||||
services.coredns = {
|
||||
enable = true;
|
||||
config = ''
|
||||
.:53 {
|
||||
forward . 1.1.1.1
|
||||
}
|
||||
'';
|
||||
};
|
||||
services.hedgedoc = {
|
||||
enable = true;
|
||||
configuration = {
|
||||
allowAnonymous = true;
|
||||
allowFreeURL = true;
|
||||
allowGravatar = false;
|
||||
allowOrigin = [ "localhost" "pad.hacc.space" "fff-muc.de" ];
|
||||
dbURL = "postgres://codimd:codimd@localhost:5432/codimd";
|
||||
defaultPermission = "limited";
|
||||
domain = "pad.hacc.space";
|
||||
host = "0.0.0.0";
|
||||
protocolUseSSL = true;
|
||||
hsts.preload = false;
|
||||
email = false;
|
||||
oauth2 = {
|
||||
authorizationURL = "https://auth.infra4future.de/auth/realms/forfuture/protocol/openid-connect/auth";
|
||||
tokenURL = "https://auth.infra4future.de/auth/realms/forfuture/protocol/openid-connect/token";
|
||||
clientID = "codimd";
|
||||
clientSecret = "1a730af1-4d6e-4c1d-8f7e-72375c9b8d62";
|
||||
};
|
||||
};
|
||||
};
|
||||
systemd.services.hedgedoc.environment = {
|
||||
"CMD_OAUTH2_USER_PROFILE_URL" = "https://auth.infra4future.de/auth/realms/forfuture/protocol/openid-connect/userinfo";
|
||||
"CMD_OAUTH2_USER_PROFILE_USERNAME_ATTR" = "name";
|
||||
"CMD_OAUTH2_USER_PROFILE_DISPLAY_NAME_ATTR" = "display-name";
|
||||
"CMD_OAUTH2_USER_PROFILE_EMAIL_ATTR" = "email";
|
||||
"CMD_OAUTH2_PROVIDERNAME" = "Infra4Future";
|
||||
};
|
||||
services.postgresql = {
|
||||
enable = true;
|
||||
ensureDatabases = [ "codimd" ];
|
||||
ensureUsers = [{
|
||||
name = "codimd";
|
||||
ensurePermissions = {
|
||||
"DATABASE codimd" = "ALL PRIVILEGES";
|
||||
};
|
||||
}];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."pad.hacc.space" = {
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
locations."/" = {
|
||||
proxyPass = "http://192.168.100.3:3000";
|
||||
extraConfig = ''
|
||||
proxy_pass_request_headers on;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Host $http_host;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $http_connection;
|
||||
add_header Access-Control-Allow-Origin "*";
|
||||
proxy_buffering off;
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
34
configuration/hosts/hainich/services/docker.nix
Normal file
34
configuration/hosts/hainich/services/docker.nix
Normal file
|
@ -0,0 +1,34 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
# disable nftables since it breaks shit
|
||||
petabyte.nftables.enable = false;
|
||||
virtualisation.oci-containers.containers."ghost-waszumfff" = {
|
||||
autoStart = true;
|
||||
environment = {
|
||||
url = "https://waszumfff.4future.dev";
|
||||
};
|
||||
image = "ghost:alpine";
|
||||
ports = [ "127.0.0.1:2368:2368" ];
|
||||
volumes = [ "/run/florinori:/var/lib/ghost/content" ];
|
||||
};
|
||||
|
||||
fileSystems."/run/florinori" =
|
||||
{ device = "dpool/k8s/florinori";
|
||||
fsType = "zfs";
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."waszumfff.4future.dev" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations."/" = {
|
||||
proxyPass = "http://127.0.0.1:2368";
|
||||
extraConfig = "
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Host $http_host;
|
||||
";
|
||||
};
|
||||
};
|
||||
}
|
93
configuration/hosts/hainich/services/engelsystem.nix
Normal file
93
configuration/hosts/hainich/services/engelsystem.nix
Normal file
|
@ -0,0 +1,93 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
# TODO: Make this confix nix-y, so it doesn't require a metric shitton of
|
||||
# manual intervention to install
|
||||
{
|
||||
containers.engelsystem = {
|
||||
config = { pkgs, lib, config, ... }:
|
||||
let
|
||||
app = "engelsystem";
|
||||
domain = "himmel.hacc.earth";
|
||||
dataDir = "/srv/http/${domain}/public";
|
||||
engelport-py-pack = python-packages: with pkgs.python38Packages; [
|
||||
mysqlclient
|
||||
];
|
||||
engelport-py = pkgs.python38.withPackages engelport-py-pack;
|
||||
in {
|
||||
networking.firewall.enable = false;
|
||||
networking.nameservers = ["1.1.1.1" "1.0.0.1"];
|
||||
networking.hosts."192.168.100.1" = [ "mail.hacc.space" ];
|
||||
services.phpfpm.pools.${app} = {
|
||||
user = app;
|
||||
settings = {
|
||||
"listen.owner" = config.services.nginx.user;
|
||||
"pm" = "dynamic";
|
||||
"pm.max_children" = 32;
|
||||
"pm.max_requests" = 500;
|
||||
"pm.start_servers" = 2;
|
||||
"pm.min_spare_servers" = 2;
|
||||
"pm.max_spare_servers" = 5;
|
||||
"php_admin_value[error_log]" = "stderr";
|
||||
"php_admin_flag[log_errors]" = true;
|
||||
"catch_workers_output" = true;
|
||||
};
|
||||
phpEnv."PATH" = lib.makeBinPath [ pkgs.php ];
|
||||
};
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
virtualHosts.${domain}.locations = {
|
||||
"/" = {
|
||||
extraConfig = "rewrite ^ /index.php;";
|
||||
};
|
||||
"/assets" = {
|
||||
root = dataDir;
|
||||
};
|
||||
"/index.php" = {
|
||||
root = dataDir;
|
||||
extraConfig = ''
|
||||
include ${pkgs.nginx}/conf/fastcgi.conf;
|
||||
fastcgi_split_path_info ^(.+\.php)(\\/.*)$;
|
||||
try_files $fastcgi_script_name =404;
|
||||
fastcgi_param PATH_INFO $fastcgi_path_info;
|
||||
fastcgi_pass unix:${config.services.phpfpm.pools.${app}.socket};
|
||||
fastcgi_intercept_errors on;
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
users.users.${app} = {
|
||||
isSystemUser = true;
|
||||
createHome = true;
|
||||
home = dataDir;
|
||||
group = app;
|
||||
};
|
||||
users.groups.${app} = {};
|
||||
|
||||
services.mysql = {
|
||||
enable = true;
|
||||
ensureDatabases = [ "engelsystem" ];
|
||||
ensureUsers = [{
|
||||
name = "engelsystem";
|
||||
ensurePermissions."engelsystem.*" = "ALL PRIVILEGES";
|
||||
}];
|
||||
package = pkgs.mariadb;
|
||||
};
|
||||
|
||||
environment.systemPackages = [
|
||||
pkgs.php pkgs.php74Packages.composer pkgs.yarn engelport-py
|
||||
];
|
||||
};
|
||||
privateNetwork = true;
|
||||
hostAddress = "192.168.100.1";
|
||||
localAddress = "192.168.100.2";
|
||||
autoStart = true;
|
||||
};
|
||||
services.nginx.virtualHosts."himmel.hacc.earth" = {
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
locations."/" = {
|
||||
proxyPass = "http://192.168.100.2";
|
||||
extraConfig = "add_header Host himmel.hacc.earth;";
|
||||
};
|
||||
};
|
||||
}
|
55
configuration/hosts/hainich/services/funkwhale.nix
Normal file
55
configuration/hosts/hainich/services/funkwhale.nix
Normal file
|
@ -0,0 +1,55 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
containers.funkwhale = {
|
||||
inherit pkgs;
|
||||
privateNetwork = true;
|
||||
hostAddress = "192.168.100.1";
|
||||
localAddress = "192.168.100.4";
|
||||
autoStart = true;
|
||||
config = { config, lib, pkgs, ... }: {
|
||||
imports = [
|
||||
../../../../modules
|
||||
];
|
||||
services.coredns = {
|
||||
enable = true;
|
||||
config = ''
|
||||
.:53 {
|
||||
forward . 1.1.1.1
|
||||
}
|
||||
'';
|
||||
};
|
||||
networking.firewall.enable = false;
|
||||
services.funkwhale = {
|
||||
enable = true;
|
||||
apiIp = "192.168.100.4";
|
||||
hostname = "funkwhale.hacc.media";
|
||||
protocol = "https";
|
||||
defaultFromEmail = "funkwhale@hacc.media";
|
||||
api.djangoSecretKey = "TwsgANNKid+HZ0HwhR/FgTcxFIW6sZ8s4n7HxV6zPdU=";
|
||||
};
|
||||
services.nginx.virtualHosts."funkwhale.hacc.media" = {
|
||||
enableACME = lib.mkForce false;
|
||||
forceSSL = lib.mkForce false;
|
||||
};
|
||||
};
|
||||
};
|
||||
services.nginx.virtualHosts."funkwhale.hacc.media" = {
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
locations."/" = {
|
||||
proxyPass = "http://192.168.100.4";
|
||||
extraConfig = ''
|
||||
proxy_pass_request_headers on;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Host $http_host;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $http_connection;
|
||||
proxy_buffering off;
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
62
configuration/hosts/hainich/services/gitlab-runner.nix
Normal file
62
configuration/hosts/hainich/services/gitlab-runner.nix
Normal file
|
@ -0,0 +1,62 @@
|
|||
{config, pkgs, lib, ...}:
|
||||
|
||||
{
|
||||
services.gitlab-runner = {
|
||||
enable = true;
|
||||
concurrent = 4;
|
||||
services = {
|
||||
infra4future = {
|
||||
buildsDir = "/var/lib/gitlab-runner/builds";
|
||||
dockerImage = "nixos/nix";
|
||||
executor = "docker";
|
||||
registrationConfigFile = "/run/gitlab-runner.env";
|
||||
};
|
||||
nix = {
|
||||
registrationConfigFile = "/run/gitlab-runner.env";
|
||||
dockerImage = "alpine";
|
||||
dockerVolumes = [
|
||||
"/nix/store:/nix/store:ro"
|
||||
"/nix/var/nix/db:/nix/var/nix/db:ro"
|
||||
"/nix/var/nix/daemon-socket:/nix/var/nix/daemon-socket:ro"
|
||||
];
|
||||
dockerDisableCache = true;
|
||||
preBuildScript = pkgs.writeScript "setup-container" ''
|
||||
mkdir -p -m 0755 /nix/var/log/nix/drvs
|
||||
mkdir -p -m 0755 /nix/var/nix/gcroots
|
||||
mkdir -p -m 0755 /nix/var/nix/profiles
|
||||
mkdir -p -m 0755 /nix/var/nix/temproots
|
||||
mkdir -p -m 0755 /nix/var/nix/userpool
|
||||
mkdir -p -m 1777 /nix/var/nix/gcroots/per-user
|
||||
mkdir -p -m 1777 /nix/var/nix/profiles/per-user
|
||||
mkdir -p -m 0755 /nix/var/nix/profiles/per-user/root
|
||||
mkdir -p -m 0700 "$HOME/.nix-defexpr"
|
||||
. ${pkgs.nix}/etc/profile.d/nix.sh
|
||||
${pkgs.nix}/bin/nix-env -i ${lib.concatStringsSep " " (with pkgs; [ nix cacert git openssh ])}
|
||||
${pkgs.nix}/bin/nix-channel --add https://nixos.org/channels/nixpkgs-unstable
|
||||
${pkgs.nix}/bin/nix-channel --update nixpkgs
|
||||
'';
|
||||
environmentVariables = {
|
||||
ENV = "/etc/profile";
|
||||
USER = "root";
|
||||
NIX_REMOTE = "daemon";
|
||||
PATH = "/nix/var/nix/profiles/default/bin:/nix/var/nix/profiles/default/sbin:/bin:/sbin:/usr/bin:/usr/sbin";
|
||||
NIX_SSL_CERT_FILE = "/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt";
|
||||
};
|
||||
tagList = [ "nix" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.gitlab-runner.serviceConfig = {
|
||||
DynamicUser = lib.mkForce false;
|
||||
User = "gitlab-runner";
|
||||
};
|
||||
|
||||
users.users.gitlab-runner = {
|
||||
home = "/var/lib/gitlab-runner";
|
||||
extraGroups = [ "docker" ];
|
||||
isSystemUser = true;
|
||||
};
|
||||
|
||||
virtualisation.docker.storageDriver = "zfs";
|
||||
}
|
154
configuration/hosts/hainich/services/mail.nix
Normal file
154
configuration/hosts/hainich/services/mail.nix
Normal file
|
@ -0,0 +1,154 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
|
||||
{
|
||||
imports = let commit = "02a45d9965133434c7b816cab2f47c8a7505e764"; in [
|
||||
(builtins.fetchTarball {
|
||||
url = "https://gitlab.com/simple-nixos-mailserver/nixos-mailserver/-/archive/${commit}/nixos-mailserver-${commit}.tar.gz";
|
||||
sha256 = "04v66z0ijjm8bqpiqmq1aqrqj6r6jjz591lgijmk4frz7lksnz8k";
|
||||
})
|
||||
];
|
||||
|
||||
mailserver = {
|
||||
mailDirectory = "/data/mail";
|
||||
enable = true;
|
||||
fqdn = "mail.hacc.space";
|
||||
domains = [ "hacc.space" "hacc.earth" "4future.dev" "4futu.re" ];
|
||||
|
||||
loginAccounts = {
|
||||
"hexchen@hacc.space" = {
|
||||
hashedPassword = "$6$x9skYtRp4dgxC$1y8gPC2BuVqG3kJVSMGgzZv0Bg1T9qxcnBWLIDbANy1d//SQ23Y7s3IMYcEPd1/l/MYWD9Y/Qse6HbT5w5Xwq/";
|
||||
|
||||
aliases = [
|
||||
"postmaster@hacc.space"
|
||||
"abuse@hacc.space"
|
||||
];
|
||||
};
|
||||
|
||||
"octycs@hacc.space" = {
|
||||
hashedPassword = "$6$KceTivtJ$58jxhYF6ULfivNsb3Z0J7PnGea0Hs2wTWh3c9FrKRIAmuOD96u2IDgZRCn6P5NrXA0BL.n6HC2RS3r.4JnOmg.";
|
||||
|
||||
aliases = [
|
||||
"markus@hacc.space"
|
||||
];
|
||||
};
|
||||
|
||||
"raphael@hacc.space" = {
|
||||
hashedPassword = "$6$QveHpwMcp9mkFVAU$EFuahOrJIxPg.c.WGFHtrP3.onwJYwvP7fiBHHGb9jhosewZ2tEUP.2D3uyDLhd9Cfny6Yp4jDk/Hkjk7/ME1/";
|
||||
};
|
||||
|
||||
"engelsystem@hacc.space" = {
|
||||
hashedPassword = "$6$5cIAEhJ7af7M$eJBPQc3ONd.N3HKPFpxfG7liZbUXPvWuSpWVgeG7rmsG7f7.Zdxtodvt5VaXoA3AEiv3GqcY.gKHISK/Gg0ib/";
|
||||
};
|
||||
|
||||
"schweby@hacc.space" = {
|
||||
hashedPassword = "$6$BpYhwcZNrkLhVqK$6FMqA/vUkdV4GBlHLSqS5DRCb/CaLDNeIsBcZ8G30heytS/tJj2Ag7b1ovSltTA4PUfhee3pJrz1BkwkA93vN1";
|
||||
};
|
||||
|
||||
"zauberberg@hacc.space" = {
|
||||
hashedPassword = "$6$ISAaU8X6D$oGKe9WXDWrRpGzHUTdxrxdtg9zuGOlBMuDc82IZhegpsv1bqd550FhZZrI40IjZTA5Hy2MZ8j/0efpnQ4fOQH0";
|
||||
aliases = [
|
||||
"lukas@hacc.space"
|
||||
];
|
||||
};
|
||||
|
||||
"talx@hacc.space" = {
|
||||
hashedPassword = "$6$0hIKRoMJS./JSE$tXizRgphhNM3ZYx216VdRv1OiyZoYXsjGqSudTDu8vB8eZb03Axi31VKV87RXiEGGixdvTsHEKpx032aOzzt31";
|
||||
};
|
||||
|
||||
"unms@hacc.space" = {
|
||||
hashedPassword = "$6$pYlNP37913$sGE3L722ceP.1Qm5lsffYUN919hPP1xRTrzco3ic3Op21iiknBkOY04eY2l3Um/Bpk/yV89aJD0eaB/5RCbWR1";
|
||||
};
|
||||
|
||||
"noreply@hacc.space" = {
|
||||
hashedPassword = "$6$YsqMoItITZUzI5wo$5Lejf8XBHRx4LW4VuZ9wJCiBbT4kOV/EZaCdWQ07eVIrkRTZwXWZ5zfsh.olXEFwvpNWN.DBnU.dQc.cC0/ra/";
|
||||
};
|
||||
"stuebinm@hacc.space" = {
|
||||
hashedPassword = "$6$mjrMQG5smqLRlm$WzmbiZnGlEXGT7hj/n2qz0nvVzGyZfMToCyLRi0wErfVEHI7y7jtWoHqIWnpcHAM29UocsIFFsUCb3XqQCwwB.";
|
||||
};
|
||||
"newsletter@hacc.space" = {
|
||||
hashedPassword = "$6$f0xKnQxBInd$zbVIi1lTKWauqW.c8sMNLHNwzn81oQrVOiIfJwPa98n9xWz/NkjuWLYuFpK.MSZwNwP7Yv/a/qaOb9v8qv/.N1";
|
||||
};
|
||||
"lenny@hacc.space" = {
|
||||
hashedPassword = "$6$dR.lhYiJDpsR4.dw$n7bCbyTm97v/O8Ue44n58YwOmmct..Gt5TeAmen8C5FWyPTwTh65XCjwc27gNFVGnZLwsRJwMJ.E9D0oJEzUh0";
|
||||
};
|
||||
};
|
||||
|
||||
extraVirtualAliases = {
|
||||
# address = forward address;
|
||||
"info@hacc.space" = [
|
||||
"hexchen@hacc.space"
|
||||
"octycs@hacc.space"
|
||||
"raphael@hacc.space"
|
||||
"schweby@hacc.space"
|
||||
"zauberberg@hacc.space"
|
||||
"stuebinm@hacc.space"
|
||||
"lenny@hacc.space"
|
||||
];
|
||||
"himmel@hacc.space" = [
|
||||
"hexchen@hacc.space"
|
||||
"schweby@hacc.space"
|
||||
"zauberberg@hacc.space"
|
||||
];
|
||||
"admin@hacc.space" = [
|
||||
"hexchen@hacc.space"
|
||||
"schweby@hacc.space"
|
||||
];
|
||||
"voc@hacc.space" = [
|
||||
"hexchen@hacc.space"
|
||||
"schweby@hacc.space"
|
||||
"octycs@hacc.space"
|
||||
"stuebinm@hacc.space"
|
||||
"zauberberg@hacc.space"
|
||||
"lenny@hacc.space"
|
||||
];
|
||||
"vorstand@hacc.space" = [
|
||||
"raphael@hacc.space"
|
||||
"schweby@hacc.space"
|
||||
"zauberberg@hacc.space"
|
||||
];
|
||||
"mitglieder@hacc.space" = [
|
||||
"raphael@hacc.space"
|
||||
"schweby@hacc.space"
|
||||
"zauberberg@hacc.space"
|
||||
"lenny@hacc.space"
|
||||
"octycs@hacc.space"
|
||||
];
|
||||
};
|
||||
|
||||
# Use Let's Encrypt certificates. Note that this needs to set up a stripped
|
||||
# down nginx and opens port 80.
|
||||
certificateScheme = 3;
|
||||
|
||||
# Enable IMAP and POP3
|
||||
enableImap = true;
|
||||
enablePop3 = true;
|
||||
enableImapSsl = true;
|
||||
enablePop3Ssl = true;
|
||||
|
||||
# Enable the ManageSieve protocol
|
||||
enableManageSieve = true;
|
||||
|
||||
# whether to scan inbound emails for viruses (note that this requires at least
|
||||
# 1 Gb RAM for the server. Without virus scanning 256 MB RAM should be plenty)
|
||||
virusScanning = false;
|
||||
};
|
||||
services.postfix.submissionOptions.smtpd_sender_restrictions = "reject_non_fqdn_sender,reject_unknown_sender_domain,permit";
|
||||
services.postfix.virtual = ''
|
||||
@4future.dev @hacc.space
|
||||
@4futu.re @hacc.space
|
||||
@hacc.earth @hacc.space
|
||||
contact@hacc.space info@hacc.space
|
||||
'';
|
||||
|
||||
systemd.services.alps = {
|
||||
enable = true;
|
||||
script = "${pkgs.alps}/bin/alps -theme alps imaps://mail.hacc.space:993 smtps://mail.hacc.space:465";
|
||||
serviceConfig.WorkingDirectory = "${pkgs.alps}/share/alps";
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."mail.hacc.space" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations."/".proxyPass = "http://[::1]:1323";
|
||||
};
|
||||
}
|
26
configuration/hosts/hainich/services/murmur.nix
Normal file
26
configuration/hosts/hainich/services/murmur.nix
Normal file
|
@ -0,0 +1,26 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
services.murmur = {
|
||||
enable = true;
|
||||
logDays = -1;
|
||||
welcometext = "Welcome to mumble4future! Brought to you by infra4future";
|
||||
sslKey = "/var/lib/acme/mumble.hacc.space/key.pem";
|
||||
sslCert = "/var/lib/acme/mumble.hacc.space/fullchain.pem";
|
||||
bandwidth = 128000;
|
||||
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ config.services.murmur.port ];
|
||||
networking.firewall.allowedUDPPorts = [ config.services.murmur.port ];
|
||||
|
||||
services.nginx.virtualHosts."mumble.hacc.space" = {
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
locations."/" = {
|
||||
proxyPass = "https://stuebinm.4future.dev/mumble.infra4future.de/";
|
||||
};
|
||||
};
|
||||
# set ACLs so that the murmur user can read the certificates
|
||||
security.acme.certs."mumble.hacc.space".postRun = "setfacl -Rm u:murmur:rX /var/lib/acme/mumble.hacc.space";
|
||||
}
|
61
configuration/hosts/hainich/services/nginx.nix
Normal file
61
configuration/hosts/hainich/services/nginx.nix
Normal file
|
@ -0,0 +1,61 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
security.acme.acceptTerms = true;
|
||||
security.acme.email = "info+acme@hacc.space";
|
||||
services.nginx.enable = true;
|
||||
services.nginx.package = pkgs.nginx.override {
|
||||
modules = [ pkgs.nginxModules.rtmp ];
|
||||
};
|
||||
|
||||
# services.nginx.recommendedProxySettings = true;
|
||||
|
||||
services.nginx.virtualHosts = let
|
||||
rc3clustersite = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations."/".proxyPass = "https://stuebinm.4future.dev/about-future-website/";
|
||||
};
|
||||
in {
|
||||
"hainich.chaoswit.ch" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
};
|
||||
"hainich.hacc.space" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations = {
|
||||
"/" = {
|
||||
return = "404";
|
||||
};
|
||||
};
|
||||
};
|
||||
"freedom.rc3.io" = rc3clustersite;
|
||||
"future.rc3.io" = rc3clustersite;
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ 1935 ];
|
||||
services.nginx.appendConfig = ''
|
||||
rtmp {
|
||||
server {
|
||||
listen 1935;
|
||||
application cutiestream {
|
||||
live on;
|
||||
allow publish all;
|
||||
allow play all;
|
||||
}
|
||||
application ingest {
|
||||
live on;
|
||||
|
||||
record all;
|
||||
record_path /data/ingest;
|
||||
record_unique on;
|
||||
|
||||
include /var/secrets/ingest.conf;
|
||||
}
|
||||
}
|
||||
}
|
||||
'';
|
||||
|
||||
systemd.services.nginx.serviceConfig.ReadWriteDirectories = "/data/ingest /var/secrets";
|
||||
}
|
34
configuration/hosts/hainich/wireguard.nix
Normal file
34
configuration/hosts/hainich/wireguard.nix
Normal file
|
@ -0,0 +1,34 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
{
|
||||
systemd.services.wireguard-upstream = {
|
||||
wants = [ "wg-upstream-key.service" ];
|
||||
after = [ "wg-upstream-key.service" ];
|
||||
};
|
||||
networking.wireguard.interfaces.upstream = {
|
||||
ips = [ "2a0d:eb04:8:ffff:2::2/128" ];
|
||||
generatePrivateKeyFile = true;
|
||||
privateKeyFile = "/etc/wireguard/upstream.key";
|
||||
listenPort = 51820;
|
||||
peers = [
|
||||
{
|
||||
allowedIPs = [ "::/0" ];
|
||||
endpoint = "103.105.50.220:51823";
|
||||
publicKey = "qL5xKnQ7xLbtTvu0VmLBwHExteJBhmCe5S/0ZoXBeXY=";
|
||||
}
|
||||
];
|
||||
postSetup = ''
|
||||
${pkgs.iproute}/bin/ip addr del dev upstream 2a0d:eb04:8:ffff:2::2/128
|
||||
${pkgs.iproute}/bin/ip addr add dev upstream 2a0d:eb04:8:ffff:2::2/128 peer 2a0d:eb04:8:ffff:2::1/128
|
||||
'';
|
||||
};
|
||||
networking.interfaces.lo.ipv6 = {
|
||||
addresses = [{
|
||||
address = "2a0d:eb04:8:10::1";
|
||||
prefixLength = 128;
|
||||
}];
|
||||
};
|
||||
networking.defaultGateway6 = {
|
||||
address = "2a0d:eb04:8:ffff:2::1";
|
||||
interface = "upstream";
|
||||
};
|
||||
}
|
52
configuration/hosts/nixda/configuration.nix
Normal file
52
configuration/hosts/nixda/configuration.nix
Normal file
|
@ -0,0 +1,52 @@
|
|||
{ config, pkgs, ... }:
|
||||
|
||||
{
|
||||
imports =
|
||||
[ # Include the results of the hardware scan.
|
||||
./hardware-config.nix
|
||||
../../common
|
||||
../../desktop
|
||||
../../desktop/streaming.nix
|
||||
../../desktop/sway.nix
|
||||
../../desktop/gnome.nix
|
||||
];
|
||||
|
||||
boot.loader.grub ={
|
||||
enable = true;
|
||||
version = 2;
|
||||
efiSupport = true;
|
||||
device = "nodev";
|
||||
};
|
||||
boot.loader.efi = {
|
||||
canTouchEfiVariables = true;
|
||||
efiSysMountPoint = "/boot";
|
||||
};
|
||||
|
||||
hardware.decklink.enable = true;
|
||||
|
||||
networking.hostName = "nixda"; # Define your hostname.
|
||||
|
||||
environment.systemPackages = with pkgs; [ blackmagicDesktopVideo makemkv blender ];
|
||||
|
||||
networking.wg-quick.interfaces.cornbox = {
|
||||
privateKeyFile = "/etc/wireguard/cornbox.key";
|
||||
address = [ "195.39.247.67/28" "2a0f:4ac0:1337::12/64" ];
|
||||
postUp = "ip link set dev cornbox mtu 1400";
|
||||
peers = [
|
||||
{
|
||||
allowedIPs = [ "2a0f:4ac0:1337::/48" "195.39.247.64/27" ];
|
||||
publicKey = "8IWyiQL3wKP9CD/4UdS9b8mcbL67mkUyeSPORgEPvV0=";
|
||||
endpoint = "cornbox.hetzner.chaoswit.ch:51821";
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
# This value determines the NixOS release from which the default
|
||||
# settings for stateful data, like file locations and database versions
|
||||
# on your system were taken. It‘s perfectly fine and recommended to leave
|
||||
# this value at the release version of the first install of this system.
|
||||
# Before changing this value read the documentation for this option
|
||||
# (e.g. man configuration.nix or on https://nixos.org/nixos/options.html).
|
||||
system.stateVersion = "20.09"; # Did you read the comment?
|
||||
|
||||
}
|
31
configuration/hosts/nixda/hardware-config.nix
Normal file
31
configuration/hosts/nixda/hardware-config.nix
Normal file
|
@ -0,0 +1,31 @@
|
|||
{ config, lib, pkgs, modulesPath, ... }:
|
||||
|
||||
{
|
||||
imports =
|
||||
[ (modulesPath + "/installer/scan/not-detected.nix")
|
||||
];
|
||||
|
||||
boot.initrd.availableKernelModules = [ "nvme" "ehci_pci" "xhci_pci" "ahci" "usb_storage" "usbhid" "sd_mod" "sr_mod" ];
|
||||
boot.initrd.kernelModules = [ ];
|
||||
boot.kernelModules = [ "kvm-amd" ];
|
||||
boot.extraModulePackages = [ ];
|
||||
|
||||
fileSystems."/" =
|
||||
{ device = "/dev/disk/by-uuid/dfbfee26-c2c0-4c0c-b145-6362c7650ac9";
|
||||
fsType = "btrfs";
|
||||
options = [ "subvol=@nix" ];
|
||||
};
|
||||
|
||||
fileSystems."/home" =
|
||||
{ device = "/dev/disk/by-uuid/dfbfee26-c2c0-4c0c-b145-6362c7650ac9";
|
||||
fsType = "btrfs";
|
||||
options = [ "subvol=@home" ];
|
||||
};
|
||||
|
||||
fileSystems."/boot" =
|
||||
{ device = "/dev/disk/by-uuid/A358-97BC";
|
||||
fsType = "vfat";
|
||||
};
|
||||
|
||||
swapDevices = [ ];
|
||||
}
|
26
configuration/server/cdn/cdn-lb.nix
Normal file
26
configuration/server/cdn/cdn-lb.nix
Normal file
|
@ -0,0 +1,26 @@
|
|||
{config, lib, pkgs, ...}:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./common.nix
|
||||
];
|
||||
services.nginx = {
|
||||
virtualHosts."${config.networking.hostName}.live.hacc.media" = {
|
||||
locations = {
|
||||
"/" = {
|
||||
return = "301 \"https://$cdnhosts$request_uri\"";
|
||||
extraConfig = ''
|
||||
auth_basic off;
|
||||
add_header 'Access-Control-Allow-Origin' '*';
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
appendHttpConfig = ''
|
||||
split_clients "$remote_addr" $cdnhosts {
|
||||
50% "cdn-node-1.live.hacc.media";
|
||||
50% "cdn-node-2.live.hacc.media";
|
||||
}
|
||||
'';
|
||||
};
|
||||
}
|
42
configuration/server/cdn/cdn-master.nix
Normal file
42
configuration/server/cdn/cdn-master.nix
Normal file
|
@ -0,0 +1,42 @@
|
|||
{config, lib, pkgs, ...}:
|
||||
|
||||
let
|
||||
host-server = "https://rosenbaum.lukas.studio";
|
||||
in {
|
||||
imports = [
|
||||
./common.nix
|
||||
];
|
||||
services.nginx = {
|
||||
virtualHosts."${config.networking.hostName}.live.hacc.media" = {
|
||||
locations = {
|
||||
"~* \\.(m3u8)$" = {
|
||||
|
||||
proxyPass = "${host-server}$request_uri";
|
||||
extraConfig = ''
|
||||
#proxy_cache = off;
|
||||
expires 2s;
|
||||
auth_basic off;
|
||||
'';
|
||||
};
|
||||
"/hls" = {
|
||||
|
||||
proxyPass = "${host-server}$request_uri";
|
||||
extraConfig = ''
|
||||
types {
|
||||
application/vnd.apple.mpegurl m3u8;
|
||||
video/mp2t ts;
|
||||
}
|
||||
proxy_cache hls;
|
||||
proxy_ignore_headers Cache-Control;
|
||||
proxy_cache_valid any 30m;
|
||||
auth_basic off;
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
appendHttpConfig = ''
|
||||
proxy_cache_path /tmp keys_zone=hls:10m max_size=10g inactive=60m use_temp_path=on;
|
||||
resolver 1.1.1.1;
|
||||
'';
|
||||
};
|
||||
}
|
43
configuration/server/cdn/cdn-node.nix
Normal file
43
configuration/server/cdn/cdn-node.nix
Normal file
|
@ -0,0 +1,43 @@
|
|||
{config, lib, pkgs, ...}:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./common.nix
|
||||
];
|
||||
# Enable nginx service
|
||||
services.nginx = {
|
||||
virtualHosts."${config.networking.hostName}.live.hacc.media" = {
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
# basicAuth = basicAuthLogin;
|
||||
locations = {
|
||||
"~* \\.(m3u8)$" = {
|
||||
proxyPass = "https://cdn-master.live.hacc.media$request_uri";
|
||||
extraConfig = ''
|
||||
#proxy_cache = off;
|
||||
expires 3s;
|
||||
auth_basic off;
|
||||
'';
|
||||
};
|
||||
"/hls" = {
|
||||
proxyPass = "https://cdn-master.live.hacc.media$request_uri";
|
||||
extraConfig = ''
|
||||
types {
|
||||
application/vnd.apple.mpegurl m3u8;
|
||||
video/mp2t ts;
|
||||
}
|
||||
proxy_cache hls;
|
||||
proxy_ignore_headers Cache-Control;
|
||||
proxy_cache_valid any 30m;
|
||||
auth_basic off;
|
||||
'';
|
||||
|
||||
};
|
||||
};
|
||||
};
|
||||
appendHttpConfig = ''
|
||||
proxy_cache_path /tmp keys_zone=hls:10m max_size=10g inactive=60m use_temp_path=on;
|
||||
resolver 1.1.1.1;
|
||||
'';
|
||||
};
|
||||
}
|
61
configuration/server/cdn/common.nix
Normal file
61
configuration/server/cdn/common.nix
Normal file
|
@ -0,0 +1,61 @@
|
|||
{config, lib, pkgs, ...}:
|
||||
|
||||
{
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
80 # HTTP
|
||||
443 # HTTPs
|
||||
];
|
||||
|
||||
services.netdata = {
|
||||
enable = true;
|
||||
configText = ''
|
||||
[global]
|
||||
dbengine multihost disk space = 2307
|
||||
'';
|
||||
};
|
||||
|
||||
# Enable nginx service
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
# Use recommended settings
|
||||
# Don't use recommended Proxy settings because it does funky things with the setup
|
||||
recommendedGzipSettings = true;
|
||||
recommendedOptimisation = true;
|
||||
recommendedTlsSettings = true;
|
||||
virtualHosts."${config.networking.hostName}.live.hacc.media" = {
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
# basicAuth = basicAuthLogin;
|
||||
locations = {
|
||||
"/stats" = {
|
||||
return = "301 /stats/";
|
||||
};
|
||||
"~ /stats/(?<ndpath>.*)" = {
|
||||
proxyPass = "http://127.0.0.1:19999/$ndpath$is_args$args";
|
||||
extraConfig = ''
|
||||
proxy_redirect off;
|
||||
proxy_set_header Host $host;
|
||||
|
||||
proxy_set_header X-Forwarded-Host $host;
|
||||
proxy_set_header X-Forwarded-Server $host;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_http_version 1.1;
|
||||
proxy_pass_request_headers on;
|
||||
proxy_set_header Connection "keep-alive";
|
||||
proxy_store off;
|
||||
|
||||
gzip on;
|
||||
gzip_proxied any;
|
||||
gzip_types *;
|
||||
'';
|
||||
};
|
||||
"/nginx_status" = {
|
||||
extraConfig = ''
|
||||
stub_status;
|
||||
auth_basic off;
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
4
default.nix
Normal file
4
default.nix
Normal file
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
inherit (import ./lib/deploy.nix) deploy;
|
||||
pkgs = import ./pkgs;
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
+++
|
||||
title = "hacc infra documentation"
|
||||
page_template = "doc-page.html"
|
||||
sort_by="title"
|
||||
+++
|
||||
|
||||
|
10
docs/auth.md
10
docs/auth.md
|
@ -1,10 +0,0 @@
|
|||
+++
|
||||
title = "Authentication"
|
||||
categories = [ "services", "uffd" ]
|
||||
+++
|
||||
|
||||
We use [uffd](https://git.cccv.de/uffd/uffd) for our SSO, for better or worse.
|
||||
Mostly for worse.
|
||||
|
||||
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
+++
|
||||
title = "Domains"
|
||||
categories = [ "domains", "meta" ]
|
||||
+++
|
||||
|
||||
Perhaps too many of them.
|
||||
|
||||
## Domains
|
||||
|
||||
| domain | mc | status | date | reseller | owner | custody |
|
||||
| :------------------- | :-: | :-----: | :------: | :---------- | :--------- | :-----: |
|
||||
| 4future.dev | yes | | | | | hacc e.V. |
|
||||
| infra4future.de | yes | | | | | hacc e.V. |
|
||||
| hacc.space | yes | | | | | hacc e.V. |
|
||||
| hacc.earth | yes | | | | | hacc e.V. |
|
||||
| hacc.media | yes | | | | | hacc e.V. |
|
||||
| hacc.wiki | no | | | | | |
|
||||
|
||||
mc = managed by cloudflare
|
||||
status = (renewl | autorenewl | expires)
|
|
@ -1,16 +0,0 @@
|
|||
+++
|
||||
title = "Hostname schema"
|
||||
+++
|
||||
|
||||
[Badass Anarchist Women](https://listverse.com/2018/09/27/10-absolutely-badass-anarchist-women-who-challenged-the-system/)
|
||||
- keller
|
||||
- deCleyre
|
||||
- davidNeel
|
||||
- leGuin
|
||||
- [parsons](../parsons)
|
||||
- ohair
|
||||
- berneri
|
||||
- michel
|
||||
- sanger
|
||||
- goldman
|
||||
|
17
docs/lxc.md
17
docs/lxc.md
|
@ -1,17 +0,0 @@
|
|||
+++
|
||||
title = "LXC"
|
||||
categories = [ "lxc" ]
|
||||
+++
|
||||
|
||||
Some things don't easily run on NixOS. For these we have LXC containers running
|
||||
debian.
|
||||
|
||||
Right now, only onlyoffice is left.
|
||||
|
||||
## Useful commands
|
||||
- login to a container as root with a usable shell
|
||||
`lxc-attach -n <name> -- /usr/bin/sudo -i`
|
||||
- restarting the keycloak and ldap containers
|
||||
`lxc-stop -n <name> && lxc-start -n <name>`
|
||||
- restarting their network bridge:
|
||||
`systemctl restart lxcbr0-netdev.services`
|
|
@ -1,18 +0,0 @@
|
|||
+++
|
||||
title = "Rebooting Parsons"
|
||||
categories = [ "nix" ]
|
||||
+++
|
||||
|
||||
## Check integrity after unexpected shutdown
|
||||
These steps are only required if the server shut down unexpectedly or you suspect tampering.
|
||||
|
||||
TODO
|
||||
|
||||
## Unlock full disk encryption
|
||||
Connection to the server via the command listed in the shared password manager.
|
||||
Only the Vorstand has access to it!
|
||||
|
||||
Enter the passwords for dpool and zroot.
|
||||
|
||||
If both are correct, you will be disconnected and the server continues the boot sequence.
|
||||
The server should be up after about minute. Please check all services for availability.
|
|
@ -1,21 +0,0 @@
|
|||
+++
|
||||
title = "Secrets"
|
||||
categories = [ "services", "sops" ]
|
||||
+++
|
||||
|
||||
## Secret management
|
||||
|
||||
We use [sops-nix](https://github.com/Mic92/sops-nix) to manage secrets which we'd
|
||||
like to have in Git but don't want to be public. Entries in `secrets.yaml` are
|
||||
encrypted for each of the age keys listed in `.sops.yaml`, which are themselves
|
||||
derived from ssh keys.
|
||||
|
||||
For the initial set up, please take a look at the sops-nix Readme file.
|
||||
|
||||
To edit the secrets file, run `sops secrets.yaml`, which will decrypt the
|
||||
file & open it in your $EDITOR, then re-encrypt it when you're done.
|
||||
|
||||
To add a new key, use `ssh-to-age` to convert your ssh key to age, and add it to
|
||||
`sops.yaml`. Then do `sops updatekeys secrets.yaml` to re-encrypt the file for
|
||||
the new set of keys.
|
||||
|
|
@ -1,5 +0,0 @@
|
|||
+++
|
||||
title = "Services"
|
||||
sort_by = "title"
|
||||
page_template = "doc-page.html"
|
||||
+++
|
|
@ -1,19 +0,0 @@
|
|||
+++
|
||||
title = "ACME / letsencrypt"
|
||||
categories = [ "domain", "https", "ssl", "tls", "Certificates" ]
|
||||
+++
|
||||
|
||||
|
||||
# Usage
|
||||
|
||||
We use the ACME module's nginx integration for basically everything. Beware of
|
||||
rate limits when redeploying lots of things at once! Let's Encrypt is a little
|
||||
picky about those.
|
||||
|
||||
|
||||
## Workarounds & peculiar configuration choices
|
||||
|
||||
Certs live under `/var/lib/acme/`
|
||||
|
||||
If you need to remove a cert for whatever reason, be aware that there is a
|
||||
hidden `.lego` folder, that contains state as well
|
|
@ -1,68 +0,0 @@
|
|||
+++
|
||||
title = "hedgedoc"
|
||||
taxonomies.categories = [ "services" ]
|
||||
+++
|
||||
|
||||
|
||||
hegedoc was once called codiMD, so container, config and users are still called codimd.
|
||||
|
||||
**Do NOT change this** unless you're sure what you're doing.
|
||||
|
||||
We have two instances:
|
||||
- `pad-hacc`/pad.hacc.space is connected to our SSO/uffd
|
||||
- `pad-i4f`/pad.infra4future.de is not connected to our SSO and meant to be more public
|
||||
|
||||
## Basic Troubleshooting
|
||||
|
||||
Usually if hedgedoc dies, it's because postgresql wasn't there yet. Just restart
|
||||
hedgedoc.
|
||||
|
||||
## More Troubles
|
||||
log into the container and take a look at the logs
|
||||
|
||||
~~~shell
|
||||
sudo nixos-container root-login codimd
|
||||
journalctl -e
|
||||
~~~
|
||||
|
||||
### fixing failed database upgrades
|
||||
|
||||
see https://docs.hedgedoc.org/guides/migration-troubleshooting/ (copied below
|
||||
for convenience?):
|
||||
|
||||
In some cases, HedgeDoc might apply migrations without correctly saving the
|
||||
progress. It will then refuse to start with "already exists"-errors like
|
||||
ERROR: type "enum_Notes_permission" already exists.
|
||||
|
||||
Get the name of the failing migration and append .js to it. For example, if
|
||||
you encounter this error:
|
||||
|
||||
~~~
|
||||
== 20180306150303-fix-enum: migrating =======
|
||||
|
||||
ERROR: type "enum_Notes_permission" already exists
|
||||
~~~
|
||||
|
||||
the name of the failed migration would be 20180306150303-fix-enum.js.
|
||||
|
||||
The SQL-statement may look like this:
|
||||
|
||||
~~~
|
||||
INSERT INTO "SequelizeMeta" (name) VALUES ('20180306150303-fix-enum.js');
|
||||
~~~
|
||||
|
||||
Make sure HedgeDoc does not run and insert the name into the SequelizeMeta table.
|
||||
Enter the container switch to the postgres user, open psql and commect to the
|
||||
codimd database:
|
||||
|
||||
~~~shell
|
||||
su postgres
|
||||
psql
|
||||
\l
|
||||
\c codimd
|
||||
UN adjusted SQL STAMEMENT from above ]
|
||||
\q
|
||||
~~~
|
||||
|
||||
Start HedgeDoc again and observe if it starts correctly. It may be necessary to
|
||||
repeat this process and insert multiple migrations into the SequelizeMeta table.
|
|
@ -1,65 +0,0 @@
|
|||
+++
|
||||
title = "mail"
|
||||
taxonomies.categories = [ "services" ]
|
||||
+++
|
||||
|
||||
Mail is not connected to our SSO!
|
||||
|
||||
## adding a mail account
|
||||
- We use `@hacc.space` for our mails
|
||||
- `@infra4future.de` is reserved for services, old user accounts will be
|
||||
forwarded & logins disabled
|
||||
- choose a name (no aliases or other names can be the same)
|
||||
- generate a sha-512 password hash ```mkpasswd -m sha-512``` - **never add an
|
||||
unhashed password!**
|
||||
- add your account to `loginAccounts =` in `//parsons/mail.nix`
|
||||
- build and redeploy parsons
|
||||
|
||||
**example:**
|
||||
```
|
||||
zwoelfontheshelf@hacc.space" = {
|
||||
hashedPassword = "$6$ISAaU8X6D$oGKe9WXDWrRpGzHUEdxrxdtgvzuGOkBMuDc82IZhegpsv1bqd550FhZZrI40IjZTA5Hy2MZ8j/0efpnQ4fOQH0";
|
||||
};
|
||||
```
|
||||
|
||||
## adding to a forward address
|
||||
- add the mail address to the corresponding `extraVirtualAliases =`
|
||||
- build and redeploy parsons
|
||||
|
||||
## adding a forward address
|
||||
- add the address to `extraVirtualAliases =`
|
||||
- add the addresses it should forward to
|
||||
- build and redeploy parsons
|
||||
|
||||
**example:**
|
||||
```
|
||||
"himmel@hacc.space" = [
|
||||
"hexchen@hacc.space"
|
||||
"zauberberg@hacc.space"
|
||||
];
|
||||
```
|
||||
|
||||
## sending & receiving mail
|
||||
|
||||
### as a user
|
||||
- Your mail client should auto configure correctly
|
||||
|
||||
~~~
|
||||
mailserver: mail.hacc.space (everywhere)
|
||||
username: $your_mail_address
|
||||
sending via smtp: port 587 or 465
|
||||
recieving
|
||||
imap: port 993
|
||||
TLS and STARTTLS are supported
|
||||
~~~
|
||||
|
||||
- You can send mail as you and any alias you receive mail from. Set a second Identity in your e-mail client
|
||||
|
||||
### as an application
|
||||
- mailserver: `mail.hacc.space`
|
||||
- Do **not** use port 25. It's for server to server communication only.
|
||||
- Use smtp ports `587` or `465`
|
||||
- enable TLS if possible
|
||||
- only send mail from `noreply@infra4future.de`
|
||||
- Password is somewhere (TODO!)
|
||||
|
|
@ -1,40 +0,0 @@
|
|||
+++
|
||||
title = "mumble"
|
||||
taxonomies.categories = [ "mumble" ]
|
||||
+++
|
||||
|
||||
|
||||
[offical Docmuentation](https://wiki.mumble.info/wiki/Main_Page)
|
||||
|
||||
Mumble's server is called murmur, but the naming is inconsistent. Sometimes
|
||||
it's also just called mumble server.
|
||||
|
||||
# Usage
|
||||
|
||||
## registration
|
||||
Users need to be registered to join any other channel than public.
|
||||
An already registered user has to register them with the server.
|
||||
1. right click on the username
|
||||
2. choose register in the menu. Done.
|
||||
|
||||
## restricted channels
|
||||
Every channel in the hacc category except for plenum can only be accessed by
|
||||
members of the hacc group.
|
||||
|
||||
## adding users to a group
|
||||
Only admins can edit groups, and only registered users can be added to groups.
|
||||
1. right click on the Root channel
|
||||
2. select Edit...
|
||||
2. In Groups select $groupname
|
||||
3. make the change you want to make
|
||||
4. click "OK"
|
||||
|
||||
# Config details
|
||||
- the server is not registered with mumble & not on the public server list
|
||||
- the bitrate is set to 128kb/s; otherwise the client would complain that the
|
||||
server bitrate is less then the configured (default) in its local settings
|
||||
|
||||
# Hacks
|
||||
- murmur needs a TLS cert, which we get via the ACME module
|
||||
- there's a funny group setup so that hopefully murmurd can read the cert
|
||||
- this seems to work fine now, but was some source of trouble in the past
|
|
@ -1,18 +0,0 @@
|
|||
+++
|
||||
title = "$Service Name"
|
||||
draft = true ## Remove this line to make file appear on website
|
||||
+++
|
||||
|
||||
<general information & pointers to official documentation>
|
||||
|
||||
# Usage
|
||||
<usage from an admin's perspective>
|
||||
|
||||
# Config Notes
|
||||
<what should one keep in mind when reading the nix file?>
|
||||
|
||||
## Updating
|
||||
<anything to keep in mind?>
|
||||
|
||||
# Hacks
|
||||
<ugly things which might break or cause general ???? states>
|
|
@ -1,24 +0,0 @@
|
|||
+++
|
||||
title = "Use ZFS snapshot"
|
||||
taxonomies.categories = [ "zfs", "snapshot", "filesystem", "backup", "update", "upgrade" ]
|
||||
+++
|
||||
|
||||
## Make a ZFS snapshot
|
||||
~~~shell
|
||||
sudo zfs snapshot zroot/safe/persist@<name>
|
||||
~~~
|
||||
|
||||
## Rollback
|
||||
|
||||
### single files
|
||||
The snapshots can be accessed under `<mountpoint>/.zfs/snapshot/...`
|
||||
|
||||
### fully
|
||||
~~~shell
|
||||
sudo zfs rollback zroot/safe/persist@<name>
|
||||
~~~
|
||||
|
||||
## Delete a ZFS snapshot
|
||||
~~~shell
|
||||
sudo zfs destroy zroot/safe/persist@<name>
|
||||
~~~
|
223
flake.lock
223
flake.lock
|
@ -1,223 +0,0 @@
|
|||
{
|
||||
"nodes": {
|
||||
"blobs": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1604995301,
|
||||
"narHash": "sha256-wcLzgLec6SGJA8fx1OEN1yV/Py5b+U5iyYpksUY/yLw=",
|
||||
"owner": "simple-nixos-mailserver",
|
||||
"repo": "blobs",
|
||||
"rev": "2cccdf1ca48316f2cfd1c9a0017e8de5a7156265",
|
||||
"type": "gitlab"
|
||||
},
|
||||
"original": {
|
||||
"owner": "simple-nixos-mailserver",
|
||||
"repo": "blobs",
|
||||
"type": "gitlab"
|
||||
}
|
||||
},
|
||||
"deploy-rs": {
|
||||
"inputs": {
|
||||
"flake-compat": "flake-compat",
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
],
|
||||
"utils": "utils"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1727447169,
|
||||
"narHash": "sha256-3KyjMPUKHkiWhwR91J1YchF6zb6gvckCAY1jOE+ne0U=",
|
||||
"owner": "serokell",
|
||||
"repo": "deploy-rs",
|
||||
"rev": "aa07eb05537d4cd025e2310397a6adcedfe72c76",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "serokell",
|
||||
"repo": "deploy-rs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-compat": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1696426674,
|
||||
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixos-mailserver": {
|
||||
"inputs": {
|
||||
"blobs": "blobs",
|
||||
"flake-compat": [
|
||||
"deploy-rs",
|
||||
"flake-compat"
|
||||
],
|
||||
"nixpkgs": [
|
||||
"nixpkgs-unstable"
|
||||
],
|
||||
"nixpkgs-24_05": [
|
||||
"nixpkgs"
|
||||
],
|
||||
"utils": [
|
||||
"deploy-rs",
|
||||
"utils"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1718084203,
|
||||
"narHash": "sha256-Cx1xoVfSMv1XDLgKg08CUd1EoTYWB45VmB9XIQzhmzI=",
|
||||
"owner": "simple-nixos-mailserver",
|
||||
"repo": "nixos-mailserver",
|
||||
"rev": "29916981e7b3b5782dc5085ad18490113f8ff63b",
|
||||
"type": "gitlab"
|
||||
},
|
||||
"original": {
|
||||
"owner": "simple-nixos-mailserver",
|
||||
"ref": "nixos-24.05",
|
||||
"repo": "nixos-mailserver",
|
||||
"type": "gitlab"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1730066105,
|
||||
"narHash": "sha256-Amh10U62W2wUdJ+5B5uZlqOmz+McBEBgou11Q0ki+WI=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "ef498e16f8a10e92d559e1f6e01412444acefaff",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"id": "nixpkgs",
|
||||
"ref": "nixos-24.05-small",
|
||||
"type": "indirect"
|
||||
}
|
||||
},
|
||||
"nixpkgs-oldstable": {
|
||||
"locked": {
|
||||
"lastModified": 1678761643,
|
||||
"narHash": "sha256-tapXZvg6Kg5Fm7Fm6i+7cRC5Exp2lX7cgMrqsfrGhuc=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "c4aec3c021620d98861639946123214207e98344",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "c4aec3c021620d98861639946123214207e98344",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-unstable": {
|
||||
"locked": {
|
||||
"lastModified": 1730069753,
|
||||
"narHash": "sha256-ekaRUJhg5cnsJCwHTEGXnuAU9eD0NP2d85AYJh3cy8I=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "e4735dbdda8288aef24141f3ae8848a14f06fe08",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"id": "nixpkgs",
|
||||
"ref": "nixos-unstable-small",
|
||||
"type": "indirect"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"deploy-rs": "deploy-rs",
|
||||
"nixos-mailserver": "nixos-mailserver",
|
||||
"nixpkgs": "nixpkgs",
|
||||
"nixpkgs-oldstable": "nixpkgs-oldstable",
|
||||
"nixpkgs-unstable": "nixpkgs-unstable",
|
||||
"sops-nix": "sops-nix",
|
||||
"tracktrain": "tracktrain"
|
||||
}
|
||||
},
|
||||
"sops-nix": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"nixpkgs-unstable"
|
||||
],
|
||||
"nixpkgs-stable": [
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1729999681,
|
||||
"narHash": "sha256-qm0uCtM9bg97LeJTKQ8dqV/FvqRN+ompyW4GIJruLuw=",
|
||||
"owner": "Mic92",
|
||||
"repo": "sops-nix",
|
||||
"rev": "1666d16426abe79af5c47b7c0efa82fd31bf4c56",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "Mic92",
|
||||
"repo": "sops-nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"tracktrain": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1720213096,
|
||||
"narHash": "sha256-GrSXD6WvyiXcHx1s+48PEZVn/MTtBJAXpgds+NdEL2g=",
|
||||
"ref": "main",
|
||||
"rev": "2943327863bfe5c6e793e5c40e473a2755d45642",
|
||||
"revCount": 126,
|
||||
"type": "git",
|
||||
"url": "https://stuebinm.eu/git/tracktrain"
|
||||
},
|
||||
"original": {
|
||||
"ref": "main",
|
||||
"type": "git",
|
||||
"url": "https://stuebinm.eu/git/tracktrain"
|
||||
}
|
||||
},
|
||||
"utils": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1701680307,
|
||||
"narHash": "sha256-kAuep2h5ajznlPMD9rnQyffWG8EM/C73lejGofXvdM8=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "4022d587cbbfd70fe950c1e2083a02621806a725",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
92
flake.nix
92
flake.nix
|
@ -1,92 +0,0 @@
|
|||
{
|
||||
description = "hacc infra stuff";
|
||||
|
||||
inputs = {
|
||||
nixpkgs.url = "nixpkgs/nixos-24.05-small";
|
||||
nixpkgs-unstable.url = "nixpkgs/nixos-unstable-small";
|
||||
nixpkgs-oldstable.url = "github:/NixOS/nixpkgs?rev=c4aec3c021620d98861639946123214207e98344";
|
||||
|
||||
nixos-mailserver.url = "gitlab:simple-nixos-mailserver/nixos-mailserver/nixos-24.05";
|
||||
tracktrain.url = "git+https://stuebinm.eu/git/tracktrain?ref=main";
|
||||
tracktrain.flake = false;
|
||||
|
||||
deploy-rs.url = "github:serokell/deploy-rs";
|
||||
deploy-rs.inputs.nixpkgs.follows = "nixpkgs";
|
||||
sops-nix.url = "github:Mic92/sops-nix";
|
||||
sops-nix.inputs.nixpkgs-stable.follows = "nixpkgs";
|
||||
sops-nix.inputs.nixpkgs.follows = "nixpkgs-unstable";
|
||||
|
||||
# these exist mostly to make the flake.lock somewhat more human-friendly
|
||||
# note that in theory doing this might break things, but it seems fairly unlikely
|
||||
nixos-mailserver.inputs = {
|
||||
"nixpkgs-24_05".follows = "nixpkgs";
|
||||
nixpkgs.follows = "nixpkgs-unstable";
|
||||
utils.follows = "/deploy-rs/utils";
|
||||
flake-compat.follows = "/deploy-rs/flake-compat";
|
||||
};
|
||||
};
|
||||
|
||||
outputs = { self, nixpkgs, deploy-rs, sops-nix, ... }@inputs:
|
||||
let modules = {
|
||||
bindMounts = import ./modules/bindmounts.nix;
|
||||
nopersist = import ./modules/nopersist.nix;
|
||||
encboot = import ./modules/encboot.nix;
|
||||
};
|
||||
profiles = {
|
||||
container = import ./modules/container-profile.nix;
|
||||
};
|
||||
pkgs = import ./pkgs {
|
||||
sources = inputs;
|
||||
system = "x86_64-linux";
|
||||
config.allowUnfree = true;
|
||||
config.permittedInsecurePackages = [ "nextcloud-27.1.11" ];
|
||||
};
|
||||
in {
|
||||
nixosConfigurations.parsons = nixpkgs.lib.nixosSystem {
|
||||
system = "x86_64-linux";
|
||||
modules = [
|
||||
./parsons/configuration.nix
|
||||
./modules/buildinfo.nix
|
||||
./modules/containers.nix
|
||||
sops-nix.nixosModules.sops
|
||||
{ nixpkgs.pkgs = pkgs; }
|
||||
];
|
||||
specialArgs = {
|
||||
sources = inputs;
|
||||
inherit modules profiles;
|
||||
inherit (nixpkgs.lib) nixosSystem;
|
||||
};
|
||||
};
|
||||
|
||||
deploy.nodes.parsons = {
|
||||
hostname = "parsons";
|
||||
profiles.system = {
|
||||
user = "root";
|
||||
autoRollback = false;
|
||||
path = deploy-rs.lib.x86_64-linux.activate.nixos
|
||||
self.nixosConfigurations.parsons;
|
||||
};
|
||||
};
|
||||
|
||||
# This is highly advised, and will prevent many possible mistakes
|
||||
checks = builtins.mapAttrs
|
||||
(system: deployLib: deployLib.deployChecks self.deploy)
|
||||
deploy-rs.lib;
|
||||
|
||||
apps.x86_64-linux =
|
||||
let
|
||||
mkApp = pkg: {
|
||||
type = "app";
|
||||
program = pkgs.lib.getExe pkg;
|
||||
};
|
||||
websites = pkgs.lib.mapAttrs (name: mkApp)
|
||||
self.nixosConfigurations.parsons.config.hacc.websites.builders;
|
||||
in
|
||||
{ docs = websites."docs.hacc.space"; } // websites;
|
||||
|
||||
packages.x86_64-linux = {
|
||||
inherit (pkgs) mattermost;
|
||||
};
|
||||
};
|
||||
|
||||
}
|
76
lib/deploy.nix
Normal file
76
lib/deploy.nix
Normal file
|
@ -0,0 +1,76 @@
|
|||
let
|
||||
pkgs = import <nixpkgs> {};
|
||||
lib = pkgs.lib;
|
||||
|
||||
hosts = import ../configuration/hosts;
|
||||
nixosHosts = lib.filterAttrs (name: host: host ? ssh) hosts;
|
||||
|
||||
allGroups = lib.unique (
|
||||
lib.flatten (
|
||||
lib.mapAttrsToList (
|
||||
name: host: host.groups
|
||||
) hosts
|
||||
)
|
||||
);
|
||||
|
||||
hostsInGroup = group:
|
||||
lib.filterAttrs (
|
||||
k: v: builtins.elem group v.groups
|
||||
) hosts;
|
||||
|
||||
hostsInAllGroups = lib.listToAttrs (
|
||||
map (
|
||||
group: lib.nameValuePair group (
|
||||
lib.attrNames (hostsInGroup group)
|
||||
)
|
||||
) allGroups );
|
||||
|
||||
mkDeploy = hostnames: pkgs.writeScript "deploy-${lib.concatStringsSep "-" hostnames}" ''
|
||||
#!${pkgs.stdenv.shell}
|
||||
set -e -o pipefail
|
||||
export PATH=/run/wrappers/bin/:${with pkgs; lib.makeBinPath [
|
||||
coreutils
|
||||
openssh
|
||||
nix
|
||||
gnutar
|
||||
findutils
|
||||
nettools
|
||||
gzip
|
||||
git
|
||||
]}
|
||||
|
||||
MODE=$1
|
||||
shift || true
|
||||
ARGS=$@
|
||||
|
||||
[ "$MODE" == "" ] && MODE="switch"
|
||||
|
||||
${lib.concatMapStrings (hostname: let
|
||||
hostAttrs = nixosHosts.${hostname};
|
||||
nixosSystem = (import <nixpkgs/nixos/lib/eval-config.nix> {
|
||||
modules = [
|
||||
"${toString ../configuration}/hosts/${hostname}/configuration.nix"
|
||||
];
|
||||
system = if hostAttrs ? system then hostAttrs.system else "x86_64-linux";
|
||||
}).config.system.build.toplevel;
|
||||
in ''
|
||||
(
|
||||
echo "deploying ${hostname}..."
|
||||
nix copy --no-check-sigs -s --to ssh://${hostAttrs.ssh.host} ${nixosSystem}
|
||||
ssh $NIX_SSHOPTS ${hostAttrs.ssh.host} "sudo nix-env -p /nix/var/nix/profiles/system -i ${nixosSystem}"
|
||||
ssh $NIX_SSHOPTS ${hostAttrs.ssh.host} "sudo /nix/var/nix/profiles/system/bin/switch-to-configuration $MODE $ARGS"
|
||||
) &
|
||||
PID_LIST+=" $!"
|
||||
'') hostnames}
|
||||
|
||||
echo "deploys started, waiting for them to finish..."
|
||||
|
||||
trap "kill $PID_LIST" SIGINT
|
||||
wait $PID_LIST
|
||||
'';
|
||||
|
||||
in {
|
||||
deploy = (lib.mapAttrs (hostname: hostAttrs: mkDeploy [ hostname ]) nixosHosts)
|
||||
// (lib.mapAttrs (group: hosts: mkDeploy hosts) hostsInAllGroups)
|
||||
// { all = mkDeploy (lib.attrNames nixosHosts); };
|
||||
}
|
|
@ -1,28 +0,0 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let cfg = config.hacc;
|
||||
|
||||
in {
|
||||
|
||||
options.hacc.bindMounts = mkOption {
|
||||
type = types.attrsOf types.str;
|
||||
default = { };
|
||||
example = { "/etc/asdf" = "/persist/asdf"; };
|
||||
};
|
||||
options.hacc.bindToPersist = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
example = [ "postgres" ];
|
||||
};
|
||||
|
||||
config.fileSystems = mapAttrs (_: device: {
|
||||
inherit device;
|
||||
options = [ "bind" ];
|
||||
}) cfg.bindMounts;
|
||||
|
||||
config.hacc.bindMounts = listToAttrs
|
||||
(map (name: { inherit name; value = "/persist${name}"; })
|
||||
cfg.bindToPersist);
|
||||
}
|
|
@ -1,29 +0,0 @@
|
|||
{ config, lib, pkgs, sources, ... }:
|
||||
|
||||
let
|
||||
self = sources.self;
|
||||
|
||||
formatDate = date: with lib.strings;
|
||||
let
|
||||
year = substring 0 4 date;
|
||||
month = substring 4 2 date;
|
||||
day = substring 6 2 date;
|
||||
hour = substring 8 2 date;
|
||||
minute = substring 10 2 date;
|
||||
second = substring 12 2 date;
|
||||
in
|
||||
"${year}-${month}-${day} ${hour}:${minute}:${second} UTC";
|
||||
in
|
||||
{
|
||||
system.nixos.label = "${config.system.nixos.release}-haccfiles-${self.shortRev or self.dirtyShortRev}";
|
||||
users.motd = ''
|
||||
Welcome to ${config.networking.hostName}, running NixOS ${config.system.nixos.release}!
|
||||
Built from haccfiles ${self.rev or self.dirtyRev}.
|
||||
Last commit was at ${formatDate self.lastModifiedDate}.
|
||||
${if self ? dirtyRev then "\nPlease remember to commit your changes.\n" else ""}
|
||||
'';
|
||||
|
||||
# used by monit
|
||||
environment.etc."haccfiles-commit".text = self.rev or self.dirtyRev;
|
||||
environment.etc."haccfiles-timestamp".text = builtins.toString self.lastModified;
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
{ lib, ...}:
|
||||
|
||||
{
|
||||
boot.isContainer = true;
|
||||
networking.useDHCP = false;
|
||||
users.users.root.hashedPassword = "";
|
||||
networking.firewall.enable = false;
|
||||
services.coredns = {
|
||||
enable = true;
|
||||
config = ''
|
||||
.:53 {
|
||||
forward . 1.1.1.1
|
||||
}
|
||||
'';
|
||||
};
|
||||
|
||||
system.stateVersion = lib.mkDefault "21.05";
|
||||
}
|
|
@ -1,95 +0,0 @@
|
|||
{ config, lib, pkgs, modules, profiles, sources, nixosSystem, ... }:
|
||||
|
||||
let
|
||||
mkIPv4 = index: local:
|
||||
"192.168.${if local then "100" else "101"}.${toString index}";
|
||||
mkIPv6 = index: local:
|
||||
"fd00::${if local then "100" else "101"}:${toString index}";
|
||||
|
||||
evalConfig = nixosConfig: (nixosSystem {
|
||||
inherit (config.nixpkgs) system;
|
||||
modules = [
|
||||
nixosConfig
|
||||
modules.nopersist
|
||||
profiles.container
|
||||
{ nixpkgs.pkgs = lib.mkForce pkgs; }
|
||||
];
|
||||
specialArgs = {
|
||||
inherit modules sources;
|
||||
};
|
||||
}).config.system.build.toplevel;
|
||||
|
||||
in {
|
||||
options.hacc.containers = with lib.options;
|
||||
mkOption {
|
||||
description = ''
|
||||
hacc-specific containers. These are a thin wrapper around "normal" nixos containers:
|
||||
- they automatically get an IPv4/IPv6 address assigned
|
||||
(note that these are not guaranteed to be stable across config changes,
|
||||
so please use {option}`containers.<name>.hostAddress` & friends to
|
||||
reference them elsewhere)
|
||||
- they set a couple default options (e.g. ephemeral, autoStart, privateNetwork)
|
||||
- they are evaluated with our own version of {nix}`evalConfig`, which includes a
|
||||
couple more modules by default, use our version of `nixpkgs`, and includes the
|
||||
{nix}`profiles.containers` profile setting sane defaults for containers.
|
||||
'';
|
||||
default = { };
|
||||
type = with lib.types;
|
||||
types.attrsOf (types.submodule {
|
||||
options = {
|
||||
bindToPersist = mkOption {
|
||||
default = true;
|
||||
type = types.bool;
|
||||
description =
|
||||
"Wether to mount /persist/containers/<name> at /persist into this container.";
|
||||
};
|
||||
|
||||
bindSecrets = mkOption {
|
||||
default = false;
|
||||
type = types.bool;
|
||||
description =
|
||||
"Whether to mount /run/secrets/<name> at /secrets into this container.";
|
||||
};
|
||||
|
||||
config = mkOption {
|
||||
type = types.unspecified;
|
||||
description =
|
||||
"The container's config, to be evaluated with our own {nix}`evalConfig`.";
|
||||
};
|
||||
};
|
||||
});
|
||||
};
|
||||
|
||||
# wrapped into imap1, which enumerates the containers; IP addresses are then
|
||||
# simply assigned based on the order the containers are in the list.
|
||||
config.containers = lib.mkMerge (lib.imap1
|
||||
(index: { name, value }: let container = value; in {
|
||||
${name} = {
|
||||
hostAddress = mkIPv4 index false;
|
||||
localAddress = mkIPv4 index true;
|
||||
hostAddress6 = mkIPv6 index false;
|
||||
localAddress6 = mkIPv6 index true;
|
||||
|
||||
privateNetwork = true;
|
||||
autoStart = true;
|
||||
ephemeral = true;
|
||||
|
||||
bindMounts = lib.mkMerge [
|
||||
(lib.mkIf container.bindToPersist {
|
||||
"/persist" = {
|
||||
hostPath = "/persist/containers/${name}";
|
||||
isReadOnly = false;
|
||||
};
|
||||
})
|
||||
(lib.mkIf container.bindSecrets {
|
||||
"/secrets" = {
|
||||
hostPath = "/run/secrets/${name}";
|
||||
isReadOnly = true;
|
||||
};
|
||||
})
|
||||
];
|
||||
|
||||
path = evalConfig container.config;
|
||||
};
|
||||
}) (lib.attrsToList config.hacc.containers));
|
||||
}
|
|
@ -1,7 +1,13 @@
|
|||
{ ... }:
|
||||
|
||||
{
|
||||
let
|
||||
immaeNix = fetchGit {
|
||||
url = "https://git.immae.eu/perso/Immae/Config/Nix.git";
|
||||
rev = "7ad4966f41db0669a77c7a6ee7f87f0d4e586b0c";
|
||||
};
|
||||
in {
|
||||
imports = [
|
||||
./websites.nix
|
||||
"${immaeNix}/modules/webapps/peertube.nix"
|
||||
./funkwhale
|
||||
];
|
||||
}
|
||||
|
|
|
@ -1,45 +0,0 @@
|
|||
{ config, pkgs, lib, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let cfg = config.hacc.encboot;
|
||||
|
||||
in {
|
||||
options = {
|
||||
hacc.encboot = {
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
};
|
||||
networkDrivers = mkOption { type = with types; listOf str; };
|
||||
dataset = mkOption {
|
||||
type = types.str;
|
||||
default = "zroot";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
boot.initrd.kernelModules = cfg.networkDrivers;
|
||||
|
||||
boot.initrd.network = {
|
||||
enable = true;
|
||||
ssh = {
|
||||
enable = true;
|
||||
port = 2222;
|
||||
authorizedKeys = with lib;
|
||||
concatLists (mapAttrsToList (name: user:
|
||||
if elem "wheel" user.extraGroups then
|
||||
user.openssh.authorizedKeys.keys
|
||||
else
|
||||
[ ]) config.users.users);
|
||||
hostKeys = [ /etc/ssh/encboot_host ];
|
||||
};
|
||||
|
||||
postCommands = ''
|
||||
zpool import ${cfg.dataset}
|
||||
echo "zfs load-key -a; killall zfs && exit" >> /root/.profile
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
563
modules/funkwhale/default.nix
Normal file
563
modules/funkwhale/default.nix
Normal file
|
@ -0,0 +1,563 @@
|
|||
{config, lib, pkgs, ...}:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
django-cacheops = with final; with pkgs.python3.pkgs; ( buildPythonPackage rec {
|
||||
pname = "django-cacheops";
|
||||
version = "5.1";
|
||||
|
||||
src = fetchPypi {
|
||||
inherit pname version;
|
||||
sha256 = "sha256-1YUc178whzhKH87PqN3bj1UDDu39b98SciW3W8oPmd0=";
|
||||
};
|
||||
propagatedBuildInputs = [ django redis six funcy ];
|
||||
doCheck = false;
|
||||
});
|
||||
pythonEnv = (pkgs.python3.override {
|
||||
packageOverrides = self: super: rec {
|
||||
django = self.django_2_2;
|
||||
};
|
||||
}).withPackages (ps: [
|
||||
django-cacheops
|
||||
ps.aioredis
|
||||
ps.aiohttp
|
||||
ps.arrow
|
||||
ps.autobahn
|
||||
ps.av
|
||||
ps.bleach
|
||||
ps.boto3
|
||||
ps.celery
|
||||
ps.channels
|
||||
ps.channels-redis
|
||||
ps.click
|
||||
ps.django
|
||||
ps.django-allauth
|
||||
ps.django-auth-ldap
|
||||
ps.django-oauth-toolkit
|
||||
ps.django-cleanup
|
||||
ps.django-cors-headers
|
||||
ps.django-dynamic-preferences
|
||||
ps.django_environ
|
||||
ps.django-filter
|
||||
ps.django_redis
|
||||
ps.django-rest-auth
|
||||
ps.djangorestframework
|
||||
ps.djangorestframework-jwt
|
||||
ps.django-storages
|
||||
ps.django_taggit
|
||||
ps.django-versatileimagefield
|
||||
ps.feedparser
|
||||
ps.gunicorn
|
||||
ps.kombu
|
||||
ps.ldap
|
||||
ps.markdown
|
||||
ps.mutagen
|
||||
ps.musicbrainzngs
|
||||
ps.pillow
|
||||
ps.pendulum
|
||||
ps.persisting-theory
|
||||
ps.psycopg2
|
||||
ps.pyacoustid
|
||||
ps.pydub
|
||||
ps.PyLD
|
||||
ps.pymemoize
|
||||
ps.pyopenssl
|
||||
ps.python_magic
|
||||
ps.pytz
|
||||
ps.redis
|
||||
ps.requests
|
||||
ps.requests-http-signature
|
||||
ps.service-identity
|
||||
ps.unidecode
|
||||
ps.unicode-slugify
|
||||
ps.uvicorn
|
||||
ps.watchdog
|
||||
]);
|
||||
cfg = config.services.funkwhale;
|
||||
databasePassword = if (cfg.database.passwordFile != null)
|
||||
then builtins.readFile cfg.database.passwordFile
|
||||
else cfg.database.password;
|
||||
databaseUrl = if (cfg.database.createLocally && cfg.database.socket != null)
|
||||
then "postgresql:///${cfg.database.name}?host=${cfg.database.socket}"
|
||||
else "postgresql://${cfg.database.user}:${databasePassword}@${cfg.database.host}:${toString cfg.database.port}/${cfg.database.name}";
|
||||
|
||||
funkwhaleEnvironment = [
|
||||
"FUNKWHALE_URL=${cfg.hostname}"
|
||||
"FUNKWHALE_HOSTNAME=${cfg.hostname}"
|
||||
"FUNKWHALE_PROTOCOL=${cfg.protocol}"
|
||||
"EMAIL_CONFIG=${cfg.emailConfig}"
|
||||
"DEFAULT_FROM_EMAIL=${cfg.defaultFromEmail}"
|
||||
"REVERSE_PROXY_TYPE=nginx"
|
||||
"DATABASE_URL=${databaseUrl}"
|
||||
"CACHE_URL=redis://localhost:${toString config.services.redis.port}/0"
|
||||
"MEDIA_ROOT=${cfg.api.mediaRoot}"
|
||||
"STATIC_ROOT=${cfg.api.staticRoot}"
|
||||
"DJANGO_SECRET_KEY=${cfg.api.djangoSecretKey}"
|
||||
"RAVEN_ENABLED=${boolToString cfg.enableRaven}"
|
||||
"RAVEN_DSN=${cfg.ravenDsn}"
|
||||
"MUSIC_DIRECTORY_PATH=${cfg.musicDirectoryPath}"
|
||||
"MUSIC_DIRECTORY_SERVE_PATH=${cfg.musicDirectoryPath}"
|
||||
"FUNKWHALE_FRONTEND_PATH=${cfg.dataDir}/front/dist"
|
||||
];
|
||||
funkwhaleEnvFileData = builtins.concatStringsSep "\n" funkwhaleEnvironment;
|
||||
funkwhaleEnvScriptData = builtins.concatStringsSep " " funkwhaleEnvironment;
|
||||
|
||||
funkwhaleEnvFile = pkgs.writeText "funkwhale.env" funkwhaleEnvFileData;
|
||||
funkwhaleEnv = {
|
||||
ENV_FILE = "${funkwhaleEnvFile}";
|
||||
};
|
||||
in
|
||||
{
|
||||
|
||||
options = {
|
||||
services.funkwhale = {
|
||||
enable = mkEnableOption "funkwhale";
|
||||
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
default = "funkwhale";
|
||||
description = "User under which Funkwhale is ran.";
|
||||
};
|
||||
|
||||
group = mkOption {
|
||||
type = types.str;
|
||||
default = "funkwhale";
|
||||
description = "Group under which Funkwhale is ran.";
|
||||
};
|
||||
|
||||
database = {
|
||||
host = mkOption {
|
||||
type = types.str;
|
||||
default = "localhost";
|
||||
description = "Database host address.";
|
||||
};
|
||||
|
||||
port = mkOption {
|
||||
type = types.int;
|
||||
default = 5432;
|
||||
defaultText = "5432";
|
||||
description = "Database host port.";
|
||||
};
|
||||
|
||||
name = mkOption {
|
||||
type = types.str;
|
||||
default = "funkwhale";
|
||||
description = "Database name.";
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
type = types.str;
|
||||
default = "funkwhale";
|
||||
description = "Database user.";
|
||||
};
|
||||
|
||||
password = mkOption {
|
||||
type = types.str;
|
||||
default = "";
|
||||
description = ''
|
||||
The password corresponding to <option>database.user</option>.
|
||||
Warning: this is stored in cleartext in the Nix store!
|
||||
Use <option>database.passwordFile</option> instead.
|
||||
'';
|
||||
};
|
||||
|
||||
passwordFile = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
default = null;
|
||||
example = "/run/keys/funkwhale-dbpassword";
|
||||
description = ''
|
||||
A file containing the password corresponding to
|
||||
<option>database.user</option>.
|
||||
'';
|
||||
};
|
||||
|
||||
socket = mkOption {
|
||||
type = types.nullOr types.path;
|
||||
default = "/run/postgresql";
|
||||
defaultText = "/run/postgresql";
|
||||
example = "/run/postgresql";
|
||||
description = "Path to the unix socket file to use for authentication for local connections.";
|
||||
};
|
||||
|
||||
createLocally = mkOption {
|
||||
type = types.bool;
|
||||
default = true;
|
||||
description = "Create the database and database user locally.";
|
||||
};
|
||||
};
|
||||
|
||||
dataDir = mkOption {
|
||||
type = types.str;
|
||||
default = "/srv/funkwhale";
|
||||
description = ''
|
||||
Where to keep the funkwhale data.
|
||||
'';
|
||||
};
|
||||
|
||||
apiIp = mkOption {
|
||||
type = types.str;
|
||||
default = "127.0.0.1";
|
||||
description = ''
|
||||
Funkwhale API IP.
|
||||
'';
|
||||
};
|
||||
|
||||
webWorkers = mkOption {
|
||||
type = types.int;
|
||||
default = 1;
|
||||
description = ''
|
||||
Funkwhale number of web workers.
|
||||
'';
|
||||
};
|
||||
|
||||
apiPort = mkOption {
|
||||
type = types.port;
|
||||
default = 5000;
|
||||
description = ''
|
||||
Funkwhale API Port.
|
||||
'';
|
||||
};
|
||||
|
||||
hostname = mkOption {
|
||||
type = types.str;
|
||||
description = ''
|
||||
The definitive, public domain you will use for your instance.
|
||||
'';
|
||||
example = "funkwhale.yourdomain.net";
|
||||
};
|
||||
|
||||
protocol = mkOption {
|
||||
type = types.enum [ "http" "https" ];
|
||||
default = "https";
|
||||
description = ''
|
||||
Web server protocol.
|
||||
'';
|
||||
};
|
||||
|
||||
emailConfig = mkOption {
|
||||
type = types.str;
|
||||
default = "consolemail://";
|
||||
description = ''
|
||||
Configure email sending. By default, it outputs emails to console instead of sending them. See https://docs.funkwhale.audio/configuration.html#email-config for details.
|
||||
'';
|
||||
example = "smtp+ssl://user@:password@youremail.host:465";
|
||||
};
|
||||
|
||||
defaultFromEmail = mkOption {
|
||||
type = types.str;
|
||||
description = ''
|
||||
The email address to use to send system emails.
|
||||
'';
|
||||
example = "funkwhale@yourdomain.net";
|
||||
};
|
||||
|
||||
api = {
|
||||
mediaRoot = mkOption {
|
||||
type = types.str;
|
||||
default = "/srv/funkwhale/media";
|
||||
description = ''
|
||||
Where media files (such as album covers or audio tracks) should be stored on your system ? Ensure this directory actually exists.
|
||||
'';
|
||||
};
|
||||
|
||||
staticRoot = mkOption {
|
||||
type = types.str;
|
||||
default = "/srv/funkwhale/static";
|
||||
description = ''
|
||||
Where static files (such as API css or icons) should be compiled on your system ? Ensure this directory actually exists.
|
||||
'';
|
||||
};
|
||||
|
||||
djangoSecretKey = mkOption {
|
||||
type = types.str;
|
||||
description = ''
|
||||
Django secret key. Generate one using `openssl rand -base64 45` for example.
|
||||
'';
|
||||
example = "6VhAWVKlqu/dJSdz6TVgEJn/cbbAidwsFvg9ddOwuPRssEs0OtzAhJxLcLVC";
|
||||
};
|
||||
};
|
||||
|
||||
musicDirectoryPath = mkOption {
|
||||
type = types.str;
|
||||
default = "/srv/funkwhale/music";
|
||||
description = ''
|
||||
In-place import settings.
|
||||
'';
|
||||
};
|
||||
|
||||
enableRaven = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
description = ''
|
||||
Sentry/Raven error reporting (server side).
|
||||
Enable Raven if you want to help improve funkwhale by
|
||||
automatically sending error reports to the funkwhale developers Sentry instance.
|
||||
This will help them detect and correct bugs.
|
||||
'';
|
||||
};
|
||||
|
||||
ravenDsn = mkOption {
|
||||
type = types.str;
|
||||
default = "https://44332e9fdd3d42879c7d35bf8562c6a4:0062dc16a22b41679cd5765e5342f716@sentry.eliotberriot.com/5";
|
||||
description = ''
|
||||
Sentry/Raven DSN.
|
||||
The default is the Funkwhale developers instance DSN.
|
||||
'';
|
||||
};
|
||||
|
||||
};
|
||||
};
|
||||
|
||||
config = mkIf cfg.enable {
|
||||
assertions = [
|
||||
{ assertion = cfg.database.passwordFile != null || cfg.database.password != "" || cfg.database.socket != null;
|
||||
message = "one of services.funkwhale.database.socket, services.funkwhale.database.passwordFile, or services.funkwhale.database.password must be set";
|
||||
}
|
||||
{ assertion = cfg.database.createLocally -> cfg.database.user == cfg.user;
|
||||
message = "services.funkwhale.database.user must be set to ${cfg.user} if services.funkwhale.database.createLocally is set true";
|
||||
}
|
||||
{ assertion = cfg.database.createLocally -> cfg.database.socket != null;
|
||||
message = "services.funkwhale.database.socket must be set if services.funkwhale.database.createLocally is set to true";
|
||||
}
|
||||
{ assertion = cfg.database.createLocally -> cfg.database.host == "localhost";
|
||||
message = "services.funkwhale.database.host must be set to localhost if services.funkwhale.database.createLocally is set to true";
|
||||
}
|
||||
];
|
||||
|
||||
users.users.funkwhale = mkIf (cfg.user == "funkwhale")
|
||||
{ name = "funkwhale";
|
||||
group = cfg.group;
|
||||
};
|
||||
|
||||
users.groups.funkwhale = mkIf (cfg.group == "funkwhale") { name = "funkwhale"; };
|
||||
|
||||
services.postgresql = mkIf cfg.database.createLocally {
|
||||
enable = true;
|
||||
ensureDatabases = [ cfg.database.name ];
|
||||
ensureUsers = [
|
||||
{ name = cfg.database.user;
|
||||
ensurePermissions = { "DATABASE ${cfg.database.name}" = "ALL PRIVILEGES"; };
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
services.redis.enable = true;
|
||||
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
appendHttpConfig = ''
|
||||
upstream funkwhale-api {
|
||||
server ${cfg.apiIp}:${toString cfg.apiPort};
|
||||
}
|
||||
'';
|
||||
virtualHosts =
|
||||
let proxyConfig = ''
|
||||
# global proxy conf
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header X-Forwarded-Host $host:$server_port;
|
||||
proxy_set_header X-Forwarded-Port $server_port;
|
||||
proxy_redirect off;
|
||||
|
||||
# websocket support
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
'';
|
||||
withSSL = cfg.protocol == "https";
|
||||
in {
|
||||
"${cfg.hostname}" = {
|
||||
enableACME = withSSL;
|
||||
forceSSL = withSSL;
|
||||
root = "${pkgs.funkwhale}/front";
|
||||
# gzip config is nixos nginx recommendedGzipSettings with gzip_types from funkwhale doc (https://docs.funkwhale.audio/changelog.html#id5)
|
||||
extraConfig = ''
|
||||
add_header Content-Security-Policy "default-src 'self'; script-src 'self'; style-src 'self' 'unsafe-inline'; img-src 'self' data:; font-src 'self' data:; object-src 'none'; media-src 'self' data:";
|
||||
add_header Referrer-Policy "strict-origin-when-cross-origin";
|
||||
|
||||
gzip on;
|
||||
gzip_disable "msie6";
|
||||
gzip_proxied any;
|
||||
gzip_comp_level 5;
|
||||
gzip_types
|
||||
application/javascript
|
||||
application/vnd.geo+json
|
||||
application/vnd.ms-fontobject
|
||||
application/x-font-ttf
|
||||
application/x-web-app-manifest+json
|
||||
font/opentype
|
||||
image/bmp
|
||||
image/svg+xml
|
||||
image/x-icon
|
||||
text/cache-manifest
|
||||
text/css
|
||||
text/plain
|
||||
text/vcard
|
||||
text/vnd.rim.location.xloc
|
||||
text/vtt
|
||||
text/x-component
|
||||
text/x-cross-domain-policy;
|
||||
gzip_vary on;
|
||||
'';
|
||||
locations = {
|
||||
"/" = {
|
||||
extraConfig = proxyConfig;
|
||||
proxyPass = "http://funkwhale-api/";
|
||||
};
|
||||
"/front/" = {
|
||||
alias = "${pkgs.funkwhale}/front/";
|
||||
extraConfig = ''
|
||||
add_header Content-Security-Policy "default-src 'self'; script-src 'self'; style-src 'self' 'unsafe-inline'; img-src 'self' data:; font-src 'self' data:; object-src 'none'; media-src 'self' data:";
|
||||
add_header Referrer-Policy "strict-origin-when-cross-origin";
|
||||
expires 30d;
|
||||
add_header Pragma public;
|
||||
add_header Cache-Control "public, must-revalidate, proxy-revalidate";
|
||||
'';
|
||||
};
|
||||
"= /front/embed.html" = {
|
||||
alias = "${pkgs.funkwhale}/front/embed.html";
|
||||
extraConfig = ''
|
||||
add_header Content-Security-Policy "default-src 'self'; script-src 'self'; style-src 'self' 'unsafe-inline'; img-src 'self' data:; font-src 'self' data:; object-src 'none'; media-src 'self' data:";
|
||||
add_header Referrer-Policy "strict-origin-when-cross-origin";
|
||||
add_header X-Frame-Options "ALLOW";
|
||||
expires 30d;
|
||||
add_header Pragma public;
|
||||
add_header Cache-Control "public, must-revalidate, proxy-revalidate";
|
||||
'';
|
||||
};
|
||||
"/federation/" = {
|
||||
extraConfig = proxyConfig;
|
||||
proxyPass = "http://funkwhale-api/federation/";
|
||||
};
|
||||
"/rest/" = {
|
||||
extraConfig = proxyConfig;
|
||||
proxyPass = "http://funkwhale-api/api/subsonic/rest/";
|
||||
};
|
||||
"/.well-known/" = {
|
||||
extraConfig = proxyConfig;
|
||||
proxyPass = "http://funkwhale-api/.well-known/";
|
||||
};
|
||||
"/media/".alias = "${cfg.api.mediaRoot}/";
|
||||
"/_protected/media/" = {
|
||||
extraConfig = ''
|
||||
internal;
|
||||
'';
|
||||
alias = "${cfg.api.mediaRoot}/";
|
||||
};
|
||||
"/_protected/music/" = {
|
||||
extraConfig = ''
|
||||
internal;
|
||||
'';
|
||||
alias = "${cfg.musicDirectoryPath}/";
|
||||
};
|
||||
"/staticfiles/".alias = "${cfg.api.staticRoot}/";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d ${cfg.dataDir} 0755 ${cfg.user} ${cfg.group} - -"
|
||||
"d ${cfg.api.mediaRoot} 0755 ${cfg.user} ${cfg.group} - -"
|
||||
"d ${cfg.api.staticRoot} 0755 ${cfg.user} ${cfg.group} - -"
|
||||
"d ${cfg.musicDirectoryPath} 0755 ${cfg.user} ${cfg.group} - -"
|
||||
];
|
||||
|
||||
systemd.targets.funkwhale = {
|
||||
description = "Funkwhale";
|
||||
wants = ["funkwhale-server.service" "funkwhale-worker.service" "funkwhale-beat.service"];
|
||||
};
|
||||
systemd.services =
|
||||
let serviceConfig = {
|
||||
User = "${cfg.user}";
|
||||
WorkingDirectory = "${pkgs.funkwhale}";
|
||||
EnvironmentFile = "${funkwhaleEnvFile}";
|
||||
};
|
||||
in {
|
||||
funkwhale-psql-init = mkIf cfg.database.createLocally {
|
||||
description = "Funkwhale database preparation";
|
||||
after = [ "redis.service" "postgresql.service" ];
|
||||
wantedBy = [ "funkwhale-init.service" ];
|
||||
before = [ "funkwhale-init.service" ];
|
||||
serviceConfig = {
|
||||
User = "postgres";
|
||||
ExecStart = '' ${config.services.postgresql.package}/bin/psql -d ${cfg.database.name} -c 'CREATE EXTENSION IF NOT EXISTS "unaccent";CREATE EXTENSION IF NOT EXISTS "citext";' '';
|
||||
};
|
||||
};
|
||||
funkwhale-init = {
|
||||
description = "Funkwhale initialization";
|
||||
wantedBy = [ "funkwhale-server.service" "funkwhale-worker.service" "funkwhale-beat.service" ];
|
||||
before = [ "funkwhale-server.service" "funkwhale-worker.service" "funkwhale-beat.service" ];
|
||||
environment = funkwhaleEnv;
|
||||
serviceConfig = {
|
||||
User = "${cfg.user}";
|
||||
Group = "${cfg.group}";
|
||||
};
|
||||
script = ''
|
||||
${pythonEnv}/bin/python ${pkgs.funkwhale}/manage.py migrate
|
||||
${pythonEnv}/bin/python ${pkgs.funkwhale}/manage.py collectstatic --no-input
|
||||
if ! test -e ${cfg.dataDir}/createSuperUser.sh; then
|
||||
echo "#!/bin/sh
|
||||
|
||||
${funkwhaleEnvScriptData} ${pythonEnv}/bin/python ${pkgs.funkwhale}/manage.py createsuperuser" > ${cfg.dataDir}/createSuperUser.sh
|
||||
chmod u+x ${cfg.dataDir}/createSuperUser.sh
|
||||
chown -R ${cfg.user}.${cfg.group} ${cfg.dataDir}
|
||||
fi
|
||||
if ! test -e ${cfg.dataDir}/config; then
|
||||
mkdir -p ${cfg.dataDir}/config
|
||||
ln -s ${funkwhaleEnvFile} ${cfg.dataDir}/config/.env
|
||||
ln -s ${funkwhaleEnvFile} ${cfg.dataDir}/.env
|
||||
fi
|
||||
'';
|
||||
};
|
||||
|
||||
funkwhale-server = {
|
||||
description = "Funkwhale application server";
|
||||
partOf = [ "funkwhale.target" ];
|
||||
|
||||
serviceConfig = serviceConfig // {
|
||||
ExecStart = "${pythonEnv}/bin/gunicorn config.asgi:application -w ${toString cfg.webWorkers} -k uvicorn.workers.UvicornWorker -b ${cfg.apiIp}:${toString cfg.apiPort}";
|
||||
};
|
||||
environment = funkwhaleEnv;
|
||||
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
|
||||
funkwhale-worker = {
|
||||
description = "Funkwhale celery worker";
|
||||
partOf = [ "funkwhale.target" ];
|
||||
|
||||
serviceConfig = serviceConfig // {
|
||||
RuntimeDirectory = "funkwhaleworker";
|
||||
ExecStart = "${pythonEnv}/bin/celery -A funkwhale_api.taskapp worker -l INFO";
|
||||
};
|
||||
environment = funkwhaleEnv;
|
||||
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
|
||||
funkwhale-beat = {
|
||||
description = "Funkwhale celery beat process";
|
||||
partOf = [ "funkwhale.target" ];
|
||||
|
||||
serviceConfig = serviceConfig // {
|
||||
RuntimeDirectory = "funkwhalebeat";
|
||||
ExecStart = '' ${pythonEnv}/bin/celery -A funkwhale_api.taskapp beat -l INFO --schedule="/run/funkwhalebeat/celerybeat-schedule.db" --pidfile="/run/funkwhalebeat/celerybeat.pid" '';
|
||||
};
|
||||
environment = funkwhaleEnv;
|
||||
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
meta = {
|
||||
maintainers = with lib.maintainers; [ mmai ];
|
||||
};
|
||||
}
|
||||
|
|
@ -1,52 +0,0 @@
|
|||
{ config, lib, pkgs, modules, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
{
|
||||
imports = [ modules.bindMounts ];
|
||||
|
||||
users.mutableUsers = false;
|
||||
|
||||
boot.initrd = mkIf (config.fileSystems."/".fsType == "zfs") {
|
||||
network.ssh.hostKeys = mkIf config.hacc.encboot.enable
|
||||
(mkForce [ /persist/ssh/encboot_host ]);
|
||||
|
||||
postDeviceCommands = mkIf (!config.boot.initrd.systemd.enable)
|
||||
(mkAfter ''
|
||||
zfs rollback -r ${config.fileSystems."/".device}@blank
|
||||
'');
|
||||
|
||||
systemd = mkIf config.boot.initrd.systemd.enable {
|
||||
storePaths = [ pkgs.zfs ];
|
||||
services.rollback = {
|
||||
description = "Rollback ZFS datasets to a pristine state";
|
||||
wantedBy = [ "initrd.target" ];
|
||||
after = [ "zfs-import-${head (splitString "/" config.fileSystems."/".device)}.service" ];
|
||||
before = [ "sysroot.mount" ];
|
||||
path = [ pkgs.zfs ];
|
||||
unitConfig.DefaultDependencies = "no";
|
||||
serviceConfig.Type = "oneshot";
|
||||
script = ''
|
||||
zfs rollback -r ${config.fileSystems."/".device}@blank && echo "rollback complete"
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.openssh = {
|
||||
hostKeys = [
|
||||
{
|
||||
path = "/persist/ssh/ssh_host_ed25519_key";
|
||||
type = "ed25519";
|
||||
}
|
||||
{
|
||||
path = "/persist/ssh/ssh_host_rsa_key";
|
||||
type = "rsa";
|
||||
bits = 4096;
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
services.postgresql.dataDir =
|
||||
"/persist/postgresql/${config.services.postgresql.package.psqlSchema}";
|
||||
}
|
|
@ -1,62 +0,0 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
cfg = config.hacc.websites;
|
||||
in
|
||||
|
||||
{
|
||||
options.hacc.websites = {
|
||||
enable = mkOption {
|
||||
type = types.bool;
|
||||
default = false;
|
||||
};
|
||||
directory = mkOption {
|
||||
type = types.path;
|
||||
description = "all subdirectories of the given path are expected to contain a (static) website";
|
||||
};
|
||||
ignore = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
description = "subdirectories that shouldn't be published";
|
||||
};
|
||||
builders = mkOption {
|
||||
type = types.lazyAttrsOf types.package;
|
||||
default = {};
|
||||
description = "exposes website builders, for use with nix run";
|
||||
};
|
||||
};
|
||||
|
||||
config = let
|
||||
subdirs =
|
||||
let dirAttrs = filterAttrs
|
||||
(n: v: v == "directory" || lists.elem n cfg.ignore)
|
||||
(builtins.readDir cfg.directory);
|
||||
in mapAttrsToList (n: v: n) dirAttrs;
|
||||
mkWebsiteDrv = subdir:
|
||||
pkgs.callPackage "${cfg.directory}/${subdir}" {};
|
||||
mkWebsiteVHost = subdir: {
|
||||
name = subdir;
|
||||
# the nginx virtualhost config (for all sites) goes in here
|
||||
value = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
|
||||
locations."/".root =
|
||||
(mkWebsiteDrv subdir).outPath;
|
||||
};
|
||||
};
|
||||
in mkIf cfg.enable {
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
virtualHosts =
|
||||
listToAttrs (map mkWebsiteVHost subdirs);
|
||||
};
|
||||
hacc.websites.builders =
|
||||
listToAttrs (map (subdir: {
|
||||
name = subdir;
|
||||
value = if (mkWebsiteDrv subdir) ? watch then (mkWebsiteDrv subdir).watch else null;
|
||||
}) subdirs);
|
||||
};
|
||||
}
|
|
@ -1,93 +0,0 @@
|
|||
{ config, lib, pkgs, sources, modules, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
../common
|
||||
./hardware.nix
|
||||
modules.encboot
|
||||
modules.nopersist
|
||||
./nftables.nix
|
||||
./nextcloud.nix
|
||||
./mattermost.nix
|
||||
./murmur.nix
|
||||
./hedgedoc-hacc.nix
|
||||
./hedgedoc-i4f.nix
|
||||
./mail.nix
|
||||
./forgejo.nix
|
||||
./nginx-pages.nix
|
||||
./vaultwarden.nix
|
||||
./tracktrain.nix
|
||||
./uffd.nix
|
||||
./lxc.nix
|
||||
./monit.nix
|
||||
];
|
||||
|
||||
hacc.bindToPersist = [ "/var/lib/acme" ];
|
||||
|
||||
hacc.encboot = {
|
||||
enable = true;
|
||||
dataset = "-a";
|
||||
networkDrivers = [ "igb" ];
|
||||
};
|
||||
|
||||
sops.defaultSopsFile = ../secrets.yaml;
|
||||
sops.age.sshKeyPaths = [ "/persist/ssh/ssh_host_ed25519_key" ];
|
||||
|
||||
boot.loader.grub.enable = true;
|
||||
boot.loader.grub.devices = [ "/dev/nvme0n1" "/dev/nvme1n1" ];
|
||||
boot.supportedFilesystems = [ "zfs" ];
|
||||
|
||||
networking.hostId = "b2867696";
|
||||
networking.useDHCP = true;
|
||||
networking.nftables.enable = true;
|
||||
|
||||
networking.hostName = "parsons";
|
||||
|
||||
networking.interfaces.enp35s0.ipv6.addresses = [{
|
||||
address = "2a01:4f9:3a:2ddb::1";
|
||||
prefixLength = 64;
|
||||
}];
|
||||
networking.defaultGateway6 = {
|
||||
address = "fe80::1";
|
||||
interface = "enp35s0";
|
||||
};
|
||||
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
recommendedProxySettings = true;
|
||||
virtualHosts = {
|
||||
"parsons.hacc.space" = {
|
||||
default = true;
|
||||
locations."/".return = "404";
|
||||
};
|
||||
"hacc.space" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations."/".return = "302 https://hacc.earth";
|
||||
};
|
||||
};
|
||||
};
|
||||
networking.firewall.allowedTCPPorts = [ 80 443 ];
|
||||
|
||||
services.restic.backups.tardis = {
|
||||
passwordFile = "/run/secrets/restic/system";
|
||||
environmentFile = "/run/secrets/restic/s3creds.env";
|
||||
paths = [
|
||||
"/home"
|
||||
"/persist"
|
||||
];
|
||||
pruneOpts = [
|
||||
"--keep-daily 7"
|
||||
"--keep-weekly 5"
|
||||
"--keep-monthly 3"
|
||||
];
|
||||
repository = "b2:tardis-parsons:system";
|
||||
};
|
||||
|
||||
sops.secrets = {
|
||||
"restic/system" = {};
|
||||
"restic/s3creds.env" = {};
|
||||
};
|
||||
|
||||
system.stateVersion = "21.05";
|
||||
}
|
|
@ -1,85 +0,0 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
hacc.containers.forgejo = {
|
||||
config = { lib, pkgs, ... }: {
|
||||
system.stateVersion = "21.11";
|
||||
|
||||
environment.systemPackages = [ pkgs.forgejo ];
|
||||
|
||||
hacc.bindMounts."/var/lib/forgejo" = "/persist/forgejo";
|
||||
|
||||
services.forgejo = {
|
||||
enable = true;
|
||||
lfs.enable = true;
|
||||
database.type = "postgres";
|
||||
settings = {
|
||||
repository = {
|
||||
DEFAULT_PRIVATE = "public";
|
||||
PREFERRED_LICENSES = "Unlicense";
|
||||
DEFAULT_BRANCH = "main";
|
||||
};
|
||||
oauth2_client = {
|
||||
ACCOUNT_LINKING = "auto";
|
||||
ENABLE_AUTO_REGISTRATION = true;
|
||||
};
|
||||
"repository.pull-requests" = {
|
||||
DEFAULT_MERGE_STYLE = "merge";
|
||||
DEFAULT_MERGE_MESSAGE_ALL_AUTHORS = true;
|
||||
};
|
||||
"repository.upload".FILE_MAX_SIZE = 1024;
|
||||
server = {
|
||||
LANDING_PAGE = "explore";
|
||||
OFFLINE_MODE = true;
|
||||
ROOT_URL = "https://git.infra4future.de";
|
||||
HTTP_PORT = 3000;
|
||||
HTTP_ADDR = "0.0.0.0";
|
||||
};
|
||||
security = { INSTALL_LOCK = true; };
|
||||
other = {
|
||||
SHOW_FOOTER_VERSION = false;
|
||||
SHOW_FOOTER_TEMPLATE_LOAD_TIME = false;
|
||||
};
|
||||
cron = {
|
||||
ENABLED = true;
|
||||
NOTICE_ON_SUCCESS = true;
|
||||
};
|
||||
"cron.update_mirrors" = {
|
||||
SCHEDULE = "@every 12h";
|
||||
PULL_LIMIT = "-1";
|
||||
PUSH_LIMIT = "-1";
|
||||
};
|
||||
"cron.git_gc_repos".ENABLED = true;
|
||||
"cron.delete_old_actions".ENABLED = true;
|
||||
log.LEVEL = "Info";
|
||||
service.DISABLE_REGISTRATION = true;
|
||||
session.COOKIE_SECURE = true;
|
||||
default.APP_NAME = "0x0: git for all creatures";
|
||||
};
|
||||
};
|
||||
services.postgresql.package = pkgs.postgresql_15;
|
||||
|
||||
services.postgresqlBackup = {
|
||||
enable = true;
|
||||
databases = [ "forgejo" ];
|
||||
startAt = "*-*-* 23:45:00";
|
||||
location = "/persist/backups/postgres";
|
||||
};
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
settings = {
|
||||
PasswordAuthentication = false;
|
||||
AcceptEnv = "GIT_PROTOCOL";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."git.infra4future.de" = {
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
locations."/" = {
|
||||
proxyPass = "http://${config.containers.forgejo.localAddress}:3000";
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,61 +0,0 @@
|
|||
{ config, lib, pkgs, modulesPath, ... }:
|
||||
|
||||
{
|
||||
imports =
|
||||
[ (modulesPath + "/installer/scan/not-detected.nix")
|
||||
];
|
||||
|
||||
boot.initrd.availableKernelModules = [ "xhci_pci" "ahci" "nvme" "sd_mod" ];
|
||||
boot.initrd.kernelModules = [ ];
|
||||
boot.kernelModules = [ "kvm-amd" ];
|
||||
boot.extraModulePackages = [ ];
|
||||
|
||||
fileSystems."/" =
|
||||
{ device = "zroot/local/root";
|
||||
fsType = "zfs";
|
||||
};
|
||||
|
||||
fileSystems."/boot" =
|
||||
{ device = "/dev/disk/by-uuid/daf2a731-952f-45c7-9c25-49e1a2f56062";
|
||||
fsType = "ext4";
|
||||
};
|
||||
|
||||
fileSystems."/nix" =
|
||||
{ device = "zroot/local/nix";
|
||||
fsType = "zfs";
|
||||
};
|
||||
|
||||
fileSystems."/persist" =
|
||||
{ device = "zroot/safe/persist";
|
||||
fsType = "zfs";
|
||||
neededForBoot = true;
|
||||
};
|
||||
|
||||
fileSystems."/home" =
|
||||
{ device = "zroot/safe/home";
|
||||
fsType = "zfs";
|
||||
};
|
||||
|
||||
fileSystems."/root" =
|
||||
{ device = "zroot/safe/root";
|
||||
fsType = "zfs";
|
||||
};
|
||||
|
||||
fileSystems."/var/cache/restic-backups-tardis" =
|
||||
{ device = "zroot/safe/restic-cache";
|
||||
fsType = "zfs";
|
||||
};
|
||||
|
||||
fileSystems."/tmp" =
|
||||
{ device = "zroot/local/tmp";
|
||||
fsType = "zfs";
|
||||
};
|
||||
|
||||
fileSystems."/persist/data" =
|
||||
{ device = "dpool/safe/data";
|
||||
fsType = "zfs";
|
||||
};
|
||||
|
||||
swapDevices = [ ];
|
||||
|
||||
}
|
|
@ -1,91 +0,0 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
|
||||
sops.secrets = {
|
||||
"hedgedoc-hacc/env" = {};
|
||||
};
|
||||
|
||||
containers.pad-hacc.bindMounts = {
|
||||
"/secrets".hostPath = "/run/secrets/hedgedoc-hacc";
|
||||
};
|
||||
hacc.containers.pad-hacc = {
|
||||
config = { config, lib, ... }: {
|
||||
services.hedgedoc = {
|
||||
enable = true;
|
||||
settings = {
|
||||
allowAnonymous = true;
|
||||
allowFreeURL = true;
|
||||
allowGravatar = false;
|
||||
allowOrigin = [ "localhost" "pad.hacc.space" "fff-muc.de" ];
|
||||
db = {
|
||||
host = "/run/postgresql";
|
||||
username = "codimd";
|
||||
dialect = "postgres";
|
||||
database = "codimd";
|
||||
};
|
||||
defaultPermission = "limited";
|
||||
domain = "pad.hacc.space";
|
||||
host = "0.0.0.0";
|
||||
protocolUseSSL = true;
|
||||
hsts.preload = false;
|
||||
email = false;
|
||||
oauth2 = {
|
||||
authorizationURL = "https://login.infra4future.de/oauth2/authorize";
|
||||
tokenURL = "https://login.infra4future.de/oauth2/token";
|
||||
clientID = "hedgedoc";
|
||||
# must be set to make the NixOS module happy, but env var takes precedence
|
||||
clientSecret = "lol nope";
|
||||
};
|
||||
};
|
||||
environmentFile = "/secrets/env";
|
||||
};
|
||||
systemd.services.hedgedoc.environment = {
|
||||
"CMD_LOGLEVEL" = "warn";
|
||||
"CMD_OAUTH2_USER_PROFILE_URL" = "https://login.infra4future.de/oauth2/userinfo";
|
||||
"CMD_OAUTH2_USER_PROFILE_USERNAME_ATTR" = "nickname";
|
||||
"CMD_OAUTH2_USER_PROFILE_DISPLAY_NAME_ATTR" = "name";
|
||||
"CMD_OAUTH2_USER_PROFILE_EMAIL_ATTR" = "email";
|
||||
"CMD_OAUTH2_PROVIDERNAME" = "Infra4Future";
|
||||
};
|
||||
|
||||
services.postgresql = {
|
||||
enable = true;
|
||||
ensureDatabases = [ "codimd" ];
|
||||
ensureUsers = [{
|
||||
name = "codimd";
|
||||
ensureDBOwnership = true;
|
||||
}];
|
||||
authentication = ''
|
||||
local all all trust
|
||||
host codimd codimd 127.0.0.1/32 trust
|
||||
'';
|
||||
package = pkgs.postgresql_15;
|
||||
};
|
||||
services.postgresqlBackup = {
|
||||
enable = true;
|
||||
databases = [ "codimd" ];
|
||||
startAt = "*-*-* 23:45:00";
|
||||
location = "/persist/backups/postgres";
|
||||
};
|
||||
hacc.bindToPersist = [ "/var/lib/hedgedoc" ];
|
||||
};
|
||||
};
|
||||
services.nginx.virtualHosts."pad.hacc.earth" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
globalRedirect = "pad.hacc.space";
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."pad.hacc.space" = {
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
locations."/" = {
|
||||
proxyPass = "http://${config.containers.pad-hacc.localAddress}:3000";
|
||||
extraConfig = ''
|
||||
add_header Access-Control-Allow-Origin "*";
|
||||
proxy_buffering off;
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,63 +0,0 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
hacc.containers.pad-i4f = {
|
||||
config = { config, lib, ... }: {
|
||||
services.hedgedoc = {
|
||||
enable = true;
|
||||
settings = {
|
||||
allowAnonymous = true;
|
||||
allowFreeURL = true;
|
||||
allowGravatar = false;
|
||||
allowOrigin = [ "localhost" "pad.infra4future.de" "fff-muc.de" ];
|
||||
db = {
|
||||
host = "/run/postgresql";
|
||||
dialect = "postgres";
|
||||
database = "hedgedoc";
|
||||
};
|
||||
defaultPermission = "freely";
|
||||
domain = "pad.infra4future.de";
|
||||
host = "0.0.0.0";
|
||||
protocolUseSSL = true;
|
||||
hsts.preload = false;
|
||||
email = false;
|
||||
};
|
||||
};
|
||||
systemd.services.hedgedoc.environment = {
|
||||
"CMD_LOGLEVEL" = "warn";
|
||||
};
|
||||
services.postgresql = {
|
||||
enable = true;
|
||||
package = pkgs.postgresql_15;
|
||||
authentication = ''
|
||||
local all all trust
|
||||
host hedgedoc hedgedoc 127.0.0.1/32 trust
|
||||
'';
|
||||
ensureDatabases = [ "hedgedoc" ];
|
||||
ensureUsers = [{
|
||||
name = "hedgedoc";
|
||||
ensureDBOwnership = true;
|
||||
}];
|
||||
};
|
||||
services.postgresqlBackup = {
|
||||
enable = true;
|
||||
databases = [ "hedgedoc" ];
|
||||
startAt = "*-*-* 23:45:00";
|
||||
location = "/persist/backups/postgres";
|
||||
};
|
||||
hacc.bindToPersist = [ "/var/lib/hedgedoc" ];
|
||||
};
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."pad.infra4future.de" = {
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
locations."/" = {
|
||||
proxyPass = "http://${config.containers.pad-i4f.localAddress}:3000";
|
||||
extraConfig = ''
|
||||
add_header Access-Control-Allow-Origin "*";
|
||||
proxy_buffering off;
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,29 +0,0 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
networking.bridges.lxcbr0.interfaces = [];
|
||||
networking.interfaces.lxcbr0.ipv4.addresses = [
|
||||
{
|
||||
address = "10.1.2.1";
|
||||
prefixLength = 24;
|
||||
}
|
||||
];
|
||||
|
||||
virtualisation.lxc.enable = true;
|
||||
virtualisation.lxc.systemConfig = ''
|
||||
lxc.bdev.zfs.root = zroot/safe/containers/lxc
|
||||
lxc.lxcpath = /persist/lxc
|
||||
'';
|
||||
|
||||
users.users.root.subUidRanges = [{ count = 65536; startUid = 100000; }];
|
||||
users.users.root.subGidRanges = [{ count = 65536; startGid = 100000; }];
|
||||
|
||||
environment.etc."lxc/share".source = "${pkgs.lxc}/share/lxc";
|
||||
|
||||
|
||||
services.nginx.virtualHosts."onlyoffice.infra4future.de" = {
|
||||
locations."/".proxyPass = "http://10.1.2.233:80";
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
};
|
||||
}
|
208
parsons/mail.nix
208
parsons/mail.nix
|
@ -1,208 +0,0 @@
|
|||
{ config, options, pkgs, lib, sources, ... }:
|
||||
|
||||
{
|
||||
imports = [ sources.nixos-mailserver.outPath ];
|
||||
|
||||
# reduce log spam
|
||||
systemd.services.rspamd.serviceConfig.LogLevelMax =
|
||||
3; # this is set to error because rspamd regularly complains about not enough learns
|
||||
systemd.services.dovecot2.serviceConfig.LogLevelMax = 5; # = notice
|
||||
|
||||
# stop postfix from dying if rspamd hiccups
|
||||
systemd.services.postfix.unitConfig = {
|
||||
Requires = lib.mkForce "dovecot2.service opendkim.service";
|
||||
};
|
||||
|
||||
mailserver = {
|
||||
mailDirectory = "/persist/mail";
|
||||
enable = true;
|
||||
fqdn = "mail.hacc.space";
|
||||
monitoring = {
|
||||
enable = true;
|
||||
alertAddress = "admin@hacc.space";
|
||||
};
|
||||
domains = [
|
||||
"hacc.space"
|
||||
"muc.hacc.space"
|
||||
"hacc.earth"
|
||||
"4future.dev"
|
||||
"4futu.re"
|
||||
"infra4future.de"
|
||||
];
|
||||
|
||||
loginAccounts = {
|
||||
"hexchen@hacc.space".hashedPassword =
|
||||
"$6$x9skYtRp4dgxC$1y8gPC2BuVqG3kJVSMGgzZv0Bg1T9qxcnBWLIDbANy1d//SQ23Y7s3IMYcEPd1/l/MYWD9Y/Qse6HbT5w5Xwq/";
|
||||
|
||||
"octycs@hacc.space".hashedPassword =
|
||||
"$6$KceTivtJ$58jxhYF6ULfivNsb3Z0J7PnGea0Hs2wTWh3c9FrKRIAmuOD96u2IDgZRCn6P5NrXA0BL.n6HC2RS3r.4JnOmg.";
|
||||
"octycs@hacc.space".aliases = [ "markus@hacc.space" ];
|
||||
|
||||
"raphael@hacc.space".hashedPassword =
|
||||
"$6$QveHpwMcp9mkFVAU$EFuahOrJIxPg.c.WGFHtrP3.onwJYwvP7fiBHHGb9jhosewZ2tEUP.2D3uyDLhd9Cfny6Yp4jDk/Hkjk7/ME1/";
|
||||
|
||||
"moira@hacc.space".hashedPassword =
|
||||
"$6$BpYhwcZNrkLhVqK$6FMqA/vUkdV4GBlHLSqS5DRCb/CaLDNeIsBcZ8G30heytS/tJj2Ag7b1ovSltTA4PUfhee3pJrz1BkwkA93vN1";
|
||||
|
||||
"zauberberg@hacc.space".hashedPassword =
|
||||
"$6$ISAaU8X6D$oGKe9WXDWrRpGzHUTdxrxdtg9zuGOlBMuDc82IZhegpsv1bqd550FhZZrI40IjZTA5Hy2MZ8j/0efpnQ4fOQH0";
|
||||
"zauberberg@hacc.space".aliases = [ "lukas@hacc.space" ];
|
||||
|
||||
"stuebinm@hacc.space".hashedPassword =
|
||||
"$6$mjrMQG5smqLRlm$WzmbiZnGlEXGT7hj/n2qz0nvVzGyZfMToCyLRi0wErfVEHI7y7jtWoHqIWnpcHAM29UocsIFFsUCb3XqQCwwB.";
|
||||
|
||||
"lenny@hacc.space".hashedPassword =
|
||||
"$6$EZpv9XImv5F3$p2NSoo5gLxh6NnB3/C6wF8knRTuMHqDXYF3BEscaQuk7qok2Z13xKT/6mFvvSKKBnFCuYptgnfGswmoqIzm/1/";
|
||||
"lenny@hacc.space".aliases = [ "rinderhacc@hacc.space" ];
|
||||
|
||||
"peter@hacc.space".hashedPassword =
|
||||
"$6$yvpfTC.7DDpqpsYy$7TrfmLvz/fRl.k5mSHhI67CNquJa3yEFbLuTJvpyJ8Dj7SaD2eoOHWqef.CNo.T08kYzaqMcM73whAxjXVEmc.";
|
||||
"peter@hacc.space".aliases = [ "linmob@hacc.space" ];
|
||||
|
||||
"finance@muc.hacc.space".hashedPassword =
|
||||
"$6$R3GRmvXwqnMM6q.R$Y9mrUAmMnCScsM6pKjxo2a2XPM7lHrV8FIgK0PzhYvZbxWczo7.O4dk1onYeV1mRx/nXZfkZNjqNCruCn0S2m.";
|
||||
|
||||
"noreply@hacc.space" = {
|
||||
hashedPassword =
|
||||
"$6$YsqMoItITZUzI5wo$5Lejf8XBHRx4LW4VuZ9wJCiBbT4kOV/EZaCdWQ07eVIrkRTZwXWZ5zfsh.olXEFwvpNWN.DBnU.dQc.cC0/ra/";
|
||||
};
|
||||
|
||||
"noreply@infra4future.de" = {
|
||||
hashedPassword =
|
||||
"$6$uaD8bRcT1$gFqhFyu5RUsyUUOG5b.kN.JAJ1rVHvaYhpeRHoMvrERAMgBu1FHu2oDnjTsy.5NKoLc5xpI5uv4Gpy4YbmDmV.";
|
||||
};
|
||||
|
||||
"mattermost@hacc.space" = {
|
||||
hashedPassword =
|
||||
"$6$uaD8bRcT1$gFqhFyu5RUsyUUOG5b.kN.JAJ1rVHvaYhpeRHoMvrERAMgBu1FHu2oDnjTsy.5NKoLc5xpI5uv4Gpy4YbmDmV.";
|
||||
};
|
||||
};
|
||||
|
||||
extraVirtualAliases = {
|
||||
# address = forward address;
|
||||
|
||||
# -- International --
|
||||
# info/contact: main entrypoint, anyone can read or reply to this.
|
||||
"info@hacc.space" = [
|
||||
"hexchen@hacc.space"
|
||||
"octycs@hacc.space"
|
||||
"raphael@hacc.space"
|
||||
"moira@hacc.space"
|
||||
"zauberberg@hacc.space"
|
||||
"stuebinm@hacc.space"
|
||||
"lenny@hacc.space"
|
||||
"peter@hacc.space"
|
||||
];
|
||||
|
||||
# admin: current people with access to the mail server and knowledge on how to use it™
|
||||
"admin@hacc.space" = [
|
||||
"hexchen@hacc.space"
|
||||
"moira@hacc.space"
|
||||
"zauberberg@hacc.space"
|
||||
"stuebinm@hacc.space"
|
||||
];
|
||||
|
||||
# voc: hacc video operation center, various streaming-related things
|
||||
"voc@hacc.space" = [
|
||||
"hexchen@hacc.space"
|
||||
"moira@hacc.space"
|
||||
"octycs@hacc.space"
|
||||
"stuebinm@hacc.space"
|
||||
"zauberberg@hacc.space"
|
||||
"lenny@hacc.space"
|
||||
"raphael@hacc.space"
|
||||
];
|
||||
|
||||
# -- Regional: Germany --
|
||||
# board of hacc e.V.
|
||||
"vorstand@hacc.space" =
|
||||
[ "raphael@hacc.space" "moira@hacc.space" "peter@hacc.space" ];
|
||||
|
||||
# members of hacc e.V.
|
||||
"mitglieder@hacc.space" = [
|
||||
"hexchen@hacc.space"
|
||||
"raphael@hacc.space"
|
||||
"moira@hacc.space"
|
||||
"zauberberg@hacc.space"
|
||||
"lenny@hacc.space"
|
||||
"octycs@hacc.space"
|
||||
"stuebinm@hacc.space"
|
||||
"peter@hacc.space"
|
||||
];
|
||||
|
||||
# -- Regional: Munich --
|
||||
"muc@hacc.space" = [
|
||||
"hexchen@hacc.space"
|
||||
"octycs@hacc.space"
|
||||
"raphael@hacc.space"
|
||||
"moira@hacc.space"
|
||||
"zauberberg@hacc.space"
|
||||
"stuebinm@hacc.space"
|
||||
"lenny@hacc.space"
|
||||
"peter@hacc.space"
|
||||
];
|
||||
};
|
||||
|
||||
# Use Let's Encrypt certificates. Note that this needs to set up a stripped
|
||||
# down nginx and opens port 80.
|
||||
certificateScheme = "acme-nginx";
|
||||
|
||||
# Only allow implict TLS
|
||||
enableImap = false;
|
||||
enablePop3 = false;
|
||||
|
||||
# Enable the ManageSieve protocol
|
||||
enableManageSieve = true;
|
||||
|
||||
};
|
||||
|
||||
services.postfix.submissionOptions.smtpd_sender_restrictions =
|
||||
lib.mkForce "reject_non_fqdn_sender,reject_unknown_sender_domain,permit";
|
||||
services.postfix.submissionsOptions.smtpd_sender_restrictions =
|
||||
lib.mkForce "reject_non_fqdn_sender,reject_unknown_sender_domain,permit";
|
||||
|
||||
services.postfix.virtual = ''
|
||||
postmaster@hacc.space admin@hacc.space
|
||||
abuse@hacc.space admin@hacc.space
|
||||
contact@hacc.space info@hacc.space
|
||||
hello@hacc.space info@hacc.space
|
||||
haccvoc@hacc.space voc@hacc.space
|
||||
@4future.dev @hacc.space
|
||||
@4futu.re @hacc.space
|
||||
@hacc.earth @hacc.space
|
||||
@infra4future.de @hacc.space
|
||||
'';
|
||||
|
||||
services.alps = {
|
||||
enable = true;
|
||||
theme = "alps";
|
||||
smtps = {
|
||||
port = 465;
|
||||
host = "mail.hacc.space";
|
||||
};
|
||||
imaps = {
|
||||
port = 993;
|
||||
host = "mail.hacc.space";
|
||||
};
|
||||
bindIP = "[::1]";
|
||||
};
|
||||
|
||||
systemd.services.alps.after = [ "dovecot2.service" "postfix.service" ];
|
||||
systemd.services.alps.bindsTo = [ "dovecot2.service" "postfix.service" ];
|
||||
|
||||
services.nginx.virtualHosts."mail.hacc.space" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations."/".proxyPass = "http://[::1]:1323";
|
||||
};
|
||||
|
||||
hacc.bindToPersist = [
|
||||
"/var/lib/rspamd"
|
||||
"/var/lib/opendkim"
|
||||
"/var/lib/postfix"
|
||||
"/var/lib/dovecot"
|
||||
"/var/sieve"
|
||||
"/var/lib/redis-rspamd"
|
||||
"/var/dkim"
|
||||
];
|
||||
}
|
|
@ -1,211 +0,0 @@
|
|||
{ config, pkgs, lib, ...}:
|
||||
|
||||
{
|
||||
sops.secrets = {
|
||||
"mattermost/env" = {};
|
||||
};
|
||||
|
||||
hacc.containers.mattermost = {
|
||||
bindSecrets = true;
|
||||
|
||||
config = { config, lib, pkgs, ... }: {
|
||||
environment.systemPackages = [ pkgs.morph pkgs.pgloader ];
|
||||
|
||||
systemd.services.mattermost.serviceConfig.EnvironmentFile =
|
||||
lib.mkForce "/secrets/env";
|
||||
|
||||
services.mattermost = {
|
||||
enable = true;
|
||||
siteUrl = "https://mattermost.infra4future.de";
|
||||
siteName = "Mattermost for Future";
|
||||
listenAddress = "0.0.0.0:3000";
|
||||
mutableConfig = false;
|
||||
|
||||
statePath = "/persist/mattermost";
|
||||
|
||||
extraConfig = {
|
||||
ServiceSettings = {
|
||||
TrustedProxyIPHeader = [ "X-Forwarded-For" "X-Real-Ip" ];
|
||||
ReadTimeout = 300;
|
||||
WriteTimeout = 600;
|
||||
IdleTimeout = 60;
|
||||
MaximumLoginAttempts = 10;
|
||||
AllowCorsFrom = "*.infra4future.de/*";
|
||||
WebserverMode = "gzip";
|
||||
EnableCustomEmoji = true;
|
||||
EnableEmojiPicker = true;
|
||||
EnableGifPicker = false;
|
||||
RestrictCustomEmojiCreation = "all";
|
||||
RestrictPostDelete = "all";
|
||||
AllowEditPost = "always";
|
||||
PostEditTimeout = -1;
|
||||
EnableTutorial = false;
|
||||
ExperimentalChannelSidebarOrganization = "default_on";
|
||||
ExperimentalChannelOrganization = true;
|
||||
ExperimentalDataPrefetch = true;
|
||||
EnableEmailInvitations = true;
|
||||
DisableLegacyMFA = true;
|
||||
EnableSVGs = true;
|
||||
EnableLaTeX = true;
|
||||
ThreadAutoFollow = true;
|
||||
EnableSecurityFixAlert = false;
|
||||
CollapsedThreads = "default_on";
|
||||
};
|
||||
TeamSettings = {
|
||||
EnableTeamCreation = true;
|
||||
EnableUserCreation = true;
|
||||
MaxUsersPerTeam = 250;
|
||||
EnableOpenServer = false;
|
||||
EnableUserDeactivation = true;
|
||||
ExperimentalViewArchivedChannels = true;
|
||||
ExperimentalEnableAutomaticReplies = true;
|
||||
};
|
||||
LogSettings = {
|
||||
EnableConsole = true;
|
||||
ConsoleLevel = "ERROR";
|
||||
EnableDiagnostics = false;
|
||||
EnableWebhookDebugging = false;
|
||||
};
|
||||
NotificationLogSettings = {
|
||||
EnableConsole = true;
|
||||
ConsoleLevel = "INFO";
|
||||
};
|
||||
PasswordSettings = {
|
||||
MinimumLength = 10;
|
||||
# turn of all the bullshit requirements
|
||||
Lowercase = false;
|
||||
Number = false;
|
||||
Uppercase = false;
|
||||
Symbol = false;
|
||||
};
|
||||
FileSettings = {
|
||||
EnableFileAttachments = true;
|
||||
MaxFileSize = 52428800;
|
||||
DriverName = "local";
|
||||
Directory = "/persist/mattermost/upload-storage";
|
||||
EnablePublicLink = true;
|
||||
PublicLinkSalt = "3k7p3yxdhz6798b3b9openfr9rn3ymwu";
|
||||
};
|
||||
EmailSettings = {
|
||||
EnableSignUpWithEmail = false;
|
||||
EnableSignInWithEmail = false;
|
||||
EnableSignInWithUsername = false;
|
||||
SendEmailNotifications = true;
|
||||
FeedbackName = "mattermost";
|
||||
FeedbackEmail = "mattermost@infra4future.de";
|
||||
ReplyToAddress = "mattermost@infra4future.de";
|
||||
FeedbackOrganization = "∆infra4future.de";
|
||||
EnableSMTPAuth = true;
|
||||
SMTPUsername = "noreply@infra4future.de";
|
||||
SMTPServer = "mail.hacc.space";
|
||||
SMTPPort = "465";
|
||||
SMTPServerTimeout = 10;
|
||||
ConnectionSecurity = "TLS";
|
||||
};
|
||||
RateLimitSettings.Enable = false;
|
||||
PrivacySettings = {
|
||||
ShowEmailAddress = false;
|
||||
ShowFullName = true;
|
||||
};
|
||||
# to disable the extra landing page advertising the app
|
||||
NativeAppSettings = {
|
||||
AppDownloadLink = "";
|
||||
AndroidAppDownloadLink = "";
|
||||
IosAppDownloadLink = "";
|
||||
};
|
||||
SupportSettings = {
|
||||
TermsOfServiceLink = "https://infra4future.de/nutzungsbedingungen.html";
|
||||
PrivacyPolicyLink = "https://infra4future.de/nutzungsbedingungen.html";
|
||||
AboutLink = "https://infra4future.de";
|
||||
SupportEmail = "info@infra4future.de";
|
||||
CustomTermsOfServiceEnabled = false;
|
||||
EnableAskCommunityLink = true;
|
||||
};
|
||||
AnnouncementSettings.EnableBanner = false;
|
||||
GitLabSettings = {
|
||||
Enable = true;
|
||||
Id = "mattermost";
|
||||
Scope = "";
|
||||
AuthEndpoint = "https://login.infra4future.de/oauth2/authorize";
|
||||
TokenEndpoint = "https://login.infra4future.de/oauth2/token";
|
||||
UserApiEndpoint = "https://login.infra4future.de/oauth2/userinfo";
|
||||
};
|
||||
# for some reason, these don't appear to be working; the startup
|
||||
# process complaines and sets these back to en
|
||||
LocalizationSettings = {
|
||||
DefaultServerLocale = "de";
|
||||
DefaultClientLocale = "de";
|
||||
AvailableLocales = "de,en";
|
||||
};
|
||||
MessageExportSettings.EnableExport = false;
|
||||
# plugins appear to have trouble with the read-only filesystem; it may
|
||||
# be necessary to manually change their paths etc.
|
||||
PluginSettings = {
|
||||
Enable = true;
|
||||
EnableUploads = true;
|
||||
Plugins = {
|
||||
"com.github.matterpoll.matterpoll" = {
|
||||
experimentalui = true;
|
||||
trigger = "poll";
|
||||
};
|
||||
};
|
||||
PluginStates = {
|
||||
"com.github.matterpoll.matterpoll".Enable = true;
|
||||
};
|
||||
};
|
||||
ComplianceSettings.Enable = false;
|
||||
ClusterSettings.Enable = false;
|
||||
MetricsSettings.Enable = false;
|
||||
GuestAccountsSettings.Enable = false;
|
||||
FeatureFlags.CollapsedThreads = true;
|
||||
SqlSettings.DriverName = "postgres";
|
||||
SqlSettings.DataSource = "postgres:///mattermost?host=/run/postgresql";
|
||||
};
|
||||
|
||||
# turn of the weirder parts of this module (which insist on passwords
|
||||
# in nix files, instead of just using socket-based authentication)
|
||||
#
|
||||
# It will still attempt to use its default password, but postgres will
|
||||
# just let it in regardless of that.
|
||||
localDatabaseCreate = false;
|
||||
};
|
||||
|
||||
services.postgresql = {
|
||||
enable = lib.mkForce true; # mattermost sets this to false. wtf.
|
||||
package = pkgs.postgresql_15;
|
||||
ensureDatabases = [ "mattermost" ];
|
||||
ensureUsers = [ {
|
||||
name = "mattermost";
|
||||
ensureDBOwnership = true;
|
||||
} ];
|
||||
|
||||
authentication = lib.mkForce ''
|
||||
# Generated file; do not edit!
|
||||
local all all trust
|
||||
'';
|
||||
};
|
||||
|
||||
services.postgresqlBackup = {
|
||||
enable = true;
|
||||
databases = [ "mattermost" ];
|
||||
startAt = "*-*-* 23:45:00";
|
||||
location = "/persist/backups/postgres";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."mattermost.infra4future.de" = {
|
||||
locations."/" = {
|
||||
proxyPass = "http://${config.containers.mattermost.localAddress}:3000";
|
||||
proxyWebsockets = true;
|
||||
extraConfig = ''
|
||||
# Mattermost CSR Patch
|
||||
proxy_hide_header Content-Security-Policy;
|
||||
proxy_hide_header X-Frame-Options;
|
||||
proxy_redirect off;
|
||||
'';
|
||||
};
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
};
|
||||
}
|
|
@ -1,64 +0,0 @@
|
|||
{ config, options, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
checkHash = pkgs.writeScriptBin "check-commit-hash" ''
|
||||
#!${lib.getExe pkgs.fish}
|
||||
set wanted (${lib.getExe pkgs.curl} -s https://git.infra4future.de/api/v1/repos/hacc/haccfiles/branches/main \
|
||||
-H 'accept: application/json' | jq -r .commit.id)
|
||||
|
||||
if test $status != 0
|
||||
echo "could not reach git.infra4future.de"
|
||||
exit 2
|
||||
end
|
||||
|
||||
set actual (cat /etc/haccfiles-commit)
|
||||
if test $status != 0
|
||||
echo "/etc/haccfiles-commit does not exist??"
|
||||
exit 2
|
||||
end
|
||||
|
||||
if test $actual != $wanted
|
||||
echo "parsons was built on $actual, but commit on main is $wanted"
|
||||
exit 1
|
||||
end
|
||||
'';
|
||||
|
||||
checkDeployAge = pkgs.writeScriptBin "check-deploy-age" ''
|
||||
#!${lib.getExe pkgs.fish}
|
||||
|
||||
set date (date +%s)
|
||||
# we do this indirection here so monit's config won't change on each deploy
|
||||
set deploytimestamp (cat /etc/haccfiles-timestamp)
|
||||
set age (expr $date - $deploytimestamp)
|
||||
|
||||
if test $age -ge (expr 3600 \* 24 \* 10)
|
||||
echo "${config.networking.hostName} has not been deployed since 10 days, perhaps someone should do updates?"
|
||||
exit 1
|
||||
end
|
||||
'';
|
||||
in
|
||||
{
|
||||
mailserver.monitoring = {
|
||||
enable = true;
|
||||
alertAddress = "admin@hacc.space";
|
||||
config = (lib.replaceStrings ["port 22"] ["port ${toString (lib.head config.services.openssh.ports)}"] options.mailserver.monitoring.config.default);
|
||||
};
|
||||
|
||||
services.monit.config = ''
|
||||
check host onlyoffice with address onlyoffice.infra4future.de
|
||||
start program "/run/current-system/sw/bin/lxc-start -n onlyoffice -f /persist/lxc/onlyoffice/config"
|
||||
stop program "/run/current-system/sw/bin/lxc-stop -n onlyoffice"
|
||||
if failed port 443 protocol https status = 302
|
||||
then restart
|
||||
|
||||
check program deployed-commit-on-main path ${lib.getExe checkHash}
|
||||
if status == 1 for 64 cycles then alert
|
||||
if status == 2 for 3 cycles then alert
|
||||
|
||||
check program is-system-running path ${pkgs.systemd}/bin/systemctl is-system-running
|
||||
if status != 0 then alert
|
||||
|
||||
check program check-deploy-age path ${lib.getExe checkDeployAge}
|
||||
if status == 1 then alert
|
||||
'';
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
services.murmur = {
|
||||
enable = true;
|
||||
logDays = -1;
|
||||
registerName = "hackers against climate change";
|
||||
welcometext = ''
|
||||
<br>Welcome to <b>mumble4future</b>!<br>Brought to you by <b style="color:red">infra4future</b>.<br>On <a href=https://mumble.hacc.space>mumble.hacc.space</a><br>Not confusing at all!
|
||||
'';
|
||||
sslKey = "/var/lib/acme/mumble.hacc.space/key.pem";
|
||||
sslCert = "/var/lib/acme/mumble.hacc.space/fullchain.pem";
|
||||
bandwidth = 128000;
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [ config.services.murmur.port ];
|
||||
networking.firewall.allowedUDPPorts = [ config.services.murmur.port ];
|
||||
|
||||
# the mumble cert has its own group so that both nginx and murmur can read it
|
||||
users.groups.mumblecert = { };
|
||||
security.acme.certs."mumble.hacc.space" = {
|
||||
group = "mumblecert";
|
||||
extraDomainNames = [ "mumble.infra4future.de" ];
|
||||
reloadServices = [ "murmur" ];
|
||||
};
|
||||
users.users.nginx.extraGroups = [ "mumblecert" ];
|
||||
users.users.murmur.extraGroups = [ "mumblecert" ];
|
||||
|
||||
hacc.bindToPersist = [ "/var/lib/murmur" ];
|
||||
}
|
|
@ -1,89 +0,0 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
containers.nextcloud.timeoutStartSec = "10 min";
|
||||
hacc.containers.nextcloud = {
|
||||
config = { config, lib, pkgs, ... }: {
|
||||
environment.systemPackages = [ pkgs.htop ];
|
||||
|
||||
services.nextcloud = {
|
||||
enable = true;
|
||||
|
||||
# must be set manually; may not be incremented by more than one at
|
||||
# a time, otherwise nextcloud WILL break
|
||||
package = pkgs.nextcloud29;
|
||||
|
||||
home = "/persist/nextcloud";
|
||||
https = true;
|
||||
|
||||
hostName = "cloud.infra4future.de";
|
||||
config = {
|
||||
dbtype = "pgsql";
|
||||
dbuser = "nextcloud";
|
||||
dbhost = "/run/postgresql"; # nextcloud will add /.s.PGSQL.5432 by itself
|
||||
dbname = "nextcloud";
|
||||
# socket auth does not needs this, but the module insists it does
|
||||
adminpassFile = "/persist/adminpassfile";
|
||||
adminuser = "root";
|
||||
};
|
||||
|
||||
# multiple pools may be doable using services.phpfpm.pools,
|
||||
# but i have not tried this yet. The nextcloud module defines a
|
||||
# pool "nextcloud"
|
||||
poolSettings = {
|
||||
pm = "dynamic";
|
||||
"pm.max_children" = "32";
|
||||
"pm.max_requests" = "500";
|
||||
"pm.max_spare_servers" = "4";
|
||||
"pm.min_spare_servers" = "2";
|
||||
"pm.start_servers" = "2";
|
||||
};
|
||||
|
||||
settings = {
|
||||
instanceid = "ocxlphb7fbju";
|
||||
datadirectory = "/persist/nextcloud/data";
|
||||
loglevel = 0;
|
||||
"overwrite.cli.url" = "https://cloud.infra4future.de";
|
||||
};
|
||||
};
|
||||
|
||||
services.postgresql = {
|
||||
enable = true;
|
||||
package = pkgs.postgresql_15;
|
||||
ensureDatabases = [ "nextcloud" ];
|
||||
ensureUsers = [
|
||||
{ # by default, postgres has unix sockets enabled, and allows a
|
||||
# system user `nextcloud` to log in without other authentication
|
||||
name = "nextcloud";
|
||||
ensureDBOwnership = true;
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
services.postgresqlBackup = {
|
||||
enable = true;
|
||||
databases = [ "nextcloud" ];
|
||||
startAt = "*-*-* 23:45:00";
|
||||
location = "/persist/backups/postgres";
|
||||
};
|
||||
|
||||
# ensure that postgres is running *before* running the setup
|
||||
systemd.services."nextcloud-setup" = {
|
||||
requires = ["postgresql.service"];
|
||||
after = ["postgresql.service"];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."cloud.infra4future.de" = {
|
||||
locations."/".proxyPass = "http://${config.containers.nextcloud.localAddress}:80";
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
extraConfig = ''
|
||||
proxy_buffering off;
|
||||
client_max_body_size 0;
|
||||
add_header Cache-Control "no-store, no-cache, must-revalidate";
|
||||
'';
|
||||
};
|
||||
|
||||
}
|
|
@ -1,23 +0,0 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
networking.firewall.enable = true;
|
||||
networking.firewall.logRefusedConnections = false;
|
||||
networking.nat.enable = true;
|
||||
|
||||
networking.nftables.enable = true;
|
||||
networking.nftables.tables.nat = {
|
||||
family = "ip";
|
||||
content = ''
|
||||
chain prerouting {
|
||||
type nat hook prerouting priority -100
|
||||
iifname enp35s0 tcp dport { 22 } dnat ${config.containers.forgejo.localAddress}:22
|
||||
}
|
||||
chain postrouting {
|
||||
type nat hook postrouting priority 100
|
||||
iifname lxcbr0 oifname enp35s0 masquerade
|
||||
iifname ve-* oifname enp35s0 masquerade
|
||||
}
|
||||
'';
|
||||
};
|
||||
}
|
|
@ -1,16 +0,0 @@
|
|||
{ config, pkgs, ... }:
|
||||
|
||||
|
||||
{
|
||||
hacc.websites = {
|
||||
enable = true;
|
||||
directory = "${../.}/websites";
|
||||
};
|
||||
|
||||
|
||||
services.nginx.virtualHosts."parsons.hacc.space" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations."/~stuebinm/".root = "/persist/www/";
|
||||
};
|
||||
}
|
|
@ -1,137 +0,0 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
sops.secrets = {
|
||||
"s4f-conference/env" = {};
|
||||
};
|
||||
|
||||
hacc.containers.s4f-conference = {
|
||||
bindSecrets = true;
|
||||
|
||||
config = { config, lib, pkgs, ... }: {
|
||||
systemd.services.mattermost.serviceConfig.EnvironmentFile =
|
||||
lib.mkForce "/secrets/env";
|
||||
|
||||
services.mattermost = {
|
||||
enable = true;
|
||||
siteUrl = "https://s4f-conference.infra4future.de";
|
||||
siteName = "Scientists for Future Chat";
|
||||
listenAddress = "0.0.0.0:3000";
|
||||
mutableConfig = false;
|
||||
|
||||
statePath = "/persist/mattermost";
|
||||
|
||||
extraConfig = {
|
||||
ServiceSettings = {
|
||||
TrustedProxyIPHeader = [ "X-Forwarded-For" "X-Real-Ip" ];
|
||||
EnableEmailInvitations = true;
|
||||
};
|
||||
TeamSettings = {
|
||||
EnableUserCreation = true;
|
||||
MaxUsersPerTeam = 2500;
|
||||
EnableUserDeactivation = true;
|
||||
EnableOpenServer = false;
|
||||
};
|
||||
PasswordSettings = {
|
||||
MinimumLength = 10;
|
||||
};
|
||||
FileSettings = {
|
||||
EnableFileAttachments = true;
|
||||
MaxFileSize = 52428800;
|
||||
DriverName = "local";
|
||||
Directory = "/persist/upload-storage";
|
||||
EnablePublicLink = true;
|
||||
PublicLinkSalt = "3k7p3yxdhz6798b3b9openfr9rn3ymwu";
|
||||
};
|
||||
EmailSettings = {
|
||||
EnableSignUpWithEmail = true;
|
||||
EnableSignInWithEmail = true;
|
||||
EnableSignInWithUsername = true;
|
||||
SendEmailNotifications = true;
|
||||
FeedbackName = "mattermost";
|
||||
FeedbackEmail = "mattermost@infra4future.de";
|
||||
ReplyToAddress = "mattermost@infra4future.de";
|
||||
FeedbackOrganization = "∆infra4future.de";
|
||||
EnableSMTPAuth = true;
|
||||
SMTPUsername = "noreply@infra4future.de";
|
||||
SMTPServer = "mail.hacc.space";
|
||||
SMTPPort = "465";
|
||||
SMTPServerTimeout = 10;
|
||||
ConnectionSecurity = "TLS";
|
||||
};
|
||||
RateLimitSettings.Enable = false;
|
||||
PrivacySettings = {
|
||||
ShowEmailAddress = false;
|
||||
ShowFullName = true;
|
||||
};
|
||||
# to disable the extra landing page advertising the app
|
||||
NativeAppSettings = {
|
||||
AppDownloadLink = "";
|
||||
AndroidAppDownloadLink = "";
|
||||
IosAppDownloadLink = "";
|
||||
};
|
||||
LogSettings = {
|
||||
EnableConsole = true;
|
||||
ConsoleLevel = "ERROR";
|
||||
EnableDiagnostics = false;
|
||||
EnableWebhookDebugging = false;
|
||||
};
|
||||
SupportSettings = {
|
||||
TermsOfServiceLink = "https://infra4future.de/nutzungsbedingungen.html";
|
||||
PrivacyPolicyLink = "https://infra4future.de/nutzungsbedingungen.html";
|
||||
AboutLink = "https://infra4future.de";
|
||||
SupportEmail = "info@infra4future.de";
|
||||
CustomTermsOfServiceEnabled = false;
|
||||
EnableAskCommunityLink = true;
|
||||
};
|
||||
AnnouncementSettings.EnableBanner = false;
|
||||
ComplianceSettings.Enable = false;
|
||||
ClusterSettings.Enable = false;
|
||||
MetricsSettings.Enable = false;
|
||||
GuestAccountsSettings.Enable = true;
|
||||
};
|
||||
|
||||
localDatabaseCreate = false;
|
||||
};
|
||||
|
||||
services.postgresql = {
|
||||
enable = lib.mkForce true; # mattermost sets this to false. wtf.
|
||||
package = pkgs.postgresql_15;
|
||||
ensureDatabases = [ "mattermost" ];
|
||||
ensureUsers = [ {
|
||||
name = "mattermost";
|
||||
ensureDBOwnership = true;
|
||||
} ];
|
||||
|
||||
authentication = lib.mkForce ''
|
||||
# Generated file; do not edit!
|
||||
local all all trust
|
||||
host mattermost mattermost ::1/128 trust
|
||||
'';
|
||||
};
|
||||
services.postgresqlBackup = {
|
||||
enable = true;
|
||||
databases = [ "mattermost" ];
|
||||
startAt = "*-*-* 23:45:00";
|
||||
location = "/persist/backups/postgres";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."s4f-conference.infra4future.de" = {
|
||||
locations."/" = {
|
||||
proxyPass = "http://${config.containers.s4f-conference.localAddress}:3000";
|
||||
proxyWebsockets = true;
|
||||
extraConfig = ''
|
||||
# Mattermost CSR Patch
|
||||
proxy_hide_header Content-Security-Policy;
|
||||
proxy_hide_header X-Frame-Options;
|
||||
proxy_redirect off;
|
||||
|
||||
client_max_body_size 100M;
|
||||
'';
|
||||
};
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
};
|
||||
}
|
|
@ -1,102 +0,0 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
tracktrain-config = ''
|
||||
dbstring: "dbname=tracktrain"
|
||||
gtfs: /persist/gtfs.zip
|
||||
assets: ${pkgs.tracktrain}/assets
|
||||
|
||||
warp:
|
||||
port: 4000
|
||||
|
||||
login:
|
||||
enable: true
|
||||
url: https://login.infra4future.de
|
||||
clientName: tracktrain
|
||||
# clientSecret defined in env file
|
||||
|
||||
logging:
|
||||
ntfyTopic: ping.stuebinm.eu/monit
|
||||
name: ilztalbahn
|
||||
'';
|
||||
in
|
||||
{
|
||||
sops.secrets = {
|
||||
"tracktrain/env" = {};
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."tracktrain.ilztalbahn.eu" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations."/" = {
|
||||
proxyPass = "http://${config.containers.tracktrain.localAddress}:4000";
|
||||
proxyWebsockets = true;
|
||||
};
|
||||
# note: this shadows the /metrics endpoint of tracktrain
|
||||
# in case you remove this, please consider putting something
|
||||
# else here to keep it from being publicly scrapable
|
||||
locations."/metrics/" = {
|
||||
proxyPass = "http://${config.containers.tracktrain.localAddress}:2342";
|
||||
proxyWebsockets = true;
|
||||
extraConfig = ''
|
||||
rewrite ^/metrics/(.*) /$1 break;
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
hacc.containers.tracktrain = {
|
||||
bindSecrets = true;
|
||||
|
||||
config = { config, lib, pkgs, ... }: {
|
||||
|
||||
systemd.services.tracktrain = {
|
||||
enable = true;
|
||||
|
||||
description = "tracks trains, hopefully";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
requires = [ "network.target" ];
|
||||
after = [ "network.target" ];
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
EnvironmentFile = "/secrets/env";
|
||||
DynamicUser = true;
|
||||
};
|
||||
path = [ pkgs.wget pkgs.ntfy-sh ];
|
||||
script = ''
|
||||
cd /tmp
|
||||
ln -sf ${pkgs.writeText "tracktrain-config.yaml" tracktrain-config} config.yaml
|
||||
${pkgs.tracktrain}/bin/tracktrain +RTS -T
|
||||
'';
|
||||
};
|
||||
|
||||
services.postgresql = {
|
||||
enable = true;
|
||||
package = pkgs.postgresql_15;
|
||||
ensureDatabases = [ "tracktrain" ];
|
||||
ensureUsers = [ {
|
||||
name = "tracktrain";
|
||||
ensureDBOwnership = true;
|
||||
} ];
|
||||
authentication = ''
|
||||
local all all trust
|
||||
'';
|
||||
};
|
||||
|
||||
services.prometheus = {
|
||||
enable = true;
|
||||
port = 9001;
|
||||
scrapeConfigs = [ {
|
||||
job_name = "tracktrain";
|
||||
static_configs = [{
|
||||
targets = [ "0.0.0.0:4000" ];
|
||||
}];
|
||||
} ];
|
||||
};
|
||||
|
||||
systemd.services.grafana.serviceConfig.EnvironmentFile =
|
||||
"/secrets/env";
|
||||
hacc.bindToPersist = [ "/var/lib/grafana" ];
|
||||
};
|
||||
};
|
||||
|
||||
}
|
120
parsons/uffd.nix
120
parsons/uffd.nix
|
@ -1,120 +0,0 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
hacc.containers.uffd = {
|
||||
config = { config, lib, pkgs, ... }: {
|
||||
services.uwsgi = {
|
||||
enable = true;
|
||||
plugins = [ "python3" ];
|
||||
instance = {
|
||||
type = "normal";
|
||||
pythonPackages = _: [ pkgs.uffd ];
|
||||
module = "uffd:create_app()";
|
||||
# socket = "${config.services.uwsgi.runDir}/uwsgi.sock";
|
||||
http = ":8080";
|
||||
env = [
|
||||
"CONFIG_PATH=/persist/uffd/uffd.conf"
|
||||
];
|
||||
hook-pre-app = "exec:FLASK_APP=${pkgs.uffd}/lib/python3.10/site-packages/uffd flask db upgrade";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
services.nginx.virtualHosts."login.infra4future.de" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
locations = {
|
||||
"/".proxyPass = "http://${config.containers.uffd.localAddress}:8080";
|
||||
"/static".root = "${pkgs.uffd}/lib/python3.10/site-packages/uffd";
|
||||
"/static/hacc.png".return = "302 https://infra4future.de/assets/img/logo_vernetzung.png";
|
||||
"/static/infra4future.svg".return = "302 https://infra4future.de/assets/img/infra4future.svg";
|
||||
"/static/hedgedoc.svg".return = "302 https://infra4future.de/assets/img/icons/hedgedoc.svg";
|
||||
"/static/mattermost.svg".return = "302 https://infra4future.de/assets/img/icons/mattermost.svg";
|
||||
"/static/nextcloud.svg".return = "302 https://infra4future.de/assets/img/icons/nextcloud.svg";
|
||||
"/static/hot_shit.svg".return = "302 https://infra4future.de/assets/img/icons/hot_shit.svg";
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.auamost = {
|
||||
enable = true;
|
||||
|
||||
description = "mattermost aua gruppensync";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
serviceConfig.Type = "simple";
|
||||
path = [ pkgs.fish pkgs.curl pkgs.jq ];
|
||||
script = (pkgs.writeTextFile {
|
||||
name = "auamost.fish";
|
||||
executable = true;
|
||||
checkPhase = ''
|
||||
${lib.getExe pkgs.fish} -n $target
|
||||
'';
|
||||
text = ''
|
||||
#!${lib.getExe pkgs.fish}
|
||||
source /run/secrets/auamost/secrets.fish
|
||||
|
||||
for i in (seq 1 (count $groups))
|
||||
set team $teams[$i]
|
||||
set group $groups[$i]
|
||||
set users (curl -u $uffd_token --basic https://login.infra4future.de/api/v1/getusers -d group="$group")
|
||||
set usernames (echo "$users" | jq -c "[.[] | .loginname]")
|
||||
for user in (echo "$users" | jq -c ".[]")
|
||||
set id (echo "$user" | jq .id)
|
||||
set username (echo "$user" | jq .loginname)
|
||||
set email (echo "$user" | jq .email)
|
||||
curl -H $mattermost_token \
|
||||
-H "Content-Type: application/json" https://mattermost.infra4future.de/api/v4/users \
|
||||
-d '{"email": '"$email"', "username": '"$username"', "auth_service": "gitlab", "auth_data": "'"$id"'"}'
|
||||
end
|
||||
set userids (curl -H $mattermost_token \
|
||||
-H "Content-Type: application/json" https://mattermost.infra4future.de/api/v4/users/usernames \
|
||||
-d "$usernames" | jq '[.[] | {user_id: .id, team_id: "'$team'"} ]')
|
||||
curl -H $mattermost_token \
|
||||
-H "Content-Type: application/json" https://mattermost.infra4future.de/api/v4/teams/"$team"/members/batch \
|
||||
-d "$userids"
|
||||
|
||||
if test "$group" = "hacc"
|
||||
continue
|
||||
end
|
||||
|
||||
set current_members (curl -H $mattermost_token \
|
||||
-H "Content-Type: application/json" https://mattermost.infra4future.de/api/v4/teams/"$team"/members | jq '[.[] | .user_id]')
|
||||
|
||||
# membership relations don't contain e.g. usernames, so fetch those, too
|
||||
set current_users (curl -H $mattermost_token \
|
||||
-H "Content-Type: application/json" https://mattermost.infra4future.de/api/v4/users/ids \
|
||||
-d "$current_members" | jq -c '.[]')
|
||||
|
||||
set userids (echo "$userids" | jq -c ".[].user_id")
|
||||
for member in $current_users
|
||||
set id (echo $member | jq .id)
|
||||
if not contains -i $id $userids > /dev/null then
|
||||
set id_unquoted (echo $member | jq -r .id)
|
||||
echo removing $id_unquoted (echo $member | jq '.email') from $team \($group\)
|
||||
curl -X DELETE -H $mattermost_token \
|
||||
-H "Content-Type: application/json" https://mattermost.infra4future.de/api/v4/teams/"$team"/members/"$id_unquoted"
|
||||
end
|
||||
end
|
||||
end
|
||||
'';
|
||||
}).outPath;
|
||||
startAt = "*:0/15";
|
||||
};
|
||||
|
||||
systemd.services.uffd-account-expiry-notification = {
|
||||
enable = true;
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
after = [ "network.target" ];
|
||||
serviceConfig.Type = "simple";
|
||||
path = [ pkgs.hacc-scripts pkgs.sqlite-interactive pkgs.postfix ];
|
||||
script = ''
|
||||
uffd-unused-accounts-notification.scm -v admin
|
||||
'';
|
||||
startAt = "weekly";
|
||||
restartIfChanged = false;
|
||||
};
|
||||
|
||||
sops.secrets."auamost/secrets.fish" = { };
|
||||
|
||||
environment.systemPackages = with pkgs; [ curl jq ];
|
||||
}
|
|
@ -1,55 +0,0 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
sops.secrets = {
|
||||
"vaultwarden/env" = {};
|
||||
};
|
||||
|
||||
services.vaultwarden = {
|
||||
enable = true;
|
||||
config = {
|
||||
DATA_FOLDER="/persist/var/lib/vaultwarden/data";
|
||||
LOG_LEVEL="error";
|
||||
SIGNUPS_ALLOWED=false;
|
||||
SIGNUPS_VERIFY=true;
|
||||
SIGNUPS_DOMAINS_WHITELIST="hacc.space";
|
||||
ORG_CREATION_USERS="admin@hacc.space";
|
||||
INVITATIONS_ALLOWED=true;
|
||||
INVITATION_ORG_NAME="haccwarden";
|
||||
|
||||
TRASH_AUTO_DELETE_DAYS=90;
|
||||
|
||||
DOMAIN="https://pw.hacc.space";
|
||||
ROCKET_ADDRESS="127.0.0.1";
|
||||
ROCKET_PORT=5354;
|
||||
ROCKET_WORKERS=2;
|
||||
|
||||
SMTP_HOST="mail.hacc.space";
|
||||
SMTP_FROM="vaultwarden@hacc.space";
|
||||
SMTP_FROM_NAME="haccwarden";
|
||||
SMTP_PORT=587;
|
||||
SMTP_USERNAME="noreply@infra4future.de";
|
||||
|
||||
};
|
||||
environmentFile = "/run/secrets/vaultwarden/env";
|
||||
dbBackend = "sqlite";
|
||||
backupDir = "/persist/data/vaultwarden_backups/";
|
||||
};
|
||||
|
||||
#work around ProtectSystem=strict, cleanup
|
||||
systemd.services.vaultwarden.serviceConfig = {
|
||||
ReadWritePaths = [ "/persist/var/lib/vaultwarden" ];
|
||||
StateDirectory = lib.mkForce "";
|
||||
};
|
||||
systemd.services.backup-vaultwarden.environment.DATA_FOLDER =
|
||||
lib.mkForce "/persist/var/lib/vaultwarden/data";
|
||||
|
||||
services.nginx.virtualHosts."pw.hacc.space" = {
|
||||
locations."/" = {
|
||||
proxyPass = "http://127.0.0.1:5354";
|
||||
proxyWebsockets = true;
|
||||
};
|
||||
forceSSL = true;
|
||||
enableACME = true;
|
||||
};
|
||||
}
|
25
pkgs/alps/default.nix
Normal file
25
pkgs/alps/default.nix
Normal file
|
@ -0,0 +1,25 @@
|
|||
{ lib, buildGoModule }:
|
||||
|
||||
buildGoModule rec {
|
||||
pname = "alps";
|
||||
version = "0.0.1";
|
||||
|
||||
src = fetchGit {
|
||||
url = "https://git.sr.ht/~migadu/alps";
|
||||
rev = "51498a2dc37987f55b022efb961b68a282be17ed";
|
||||
};
|
||||
|
||||
vendorSha256 = "0wc8fb03zlc1gl4nxlsh149gvpvrs3lc0smzrnam9smigg9gw4in";
|
||||
|
||||
subPackages = [ "cmd/alps" ];
|
||||
|
||||
postInstall = ''
|
||||
mkdir -p $out/share/alps
|
||||
cp -r $src/themes $out/share/alps/
|
||||
'';
|
||||
|
||||
meta = with lib; {
|
||||
description = "A simple and extensible webmail.";
|
||||
homepage = "https://git.sr.ht/~migadu/alps";
|
||||
};
|
||||
}
|
|
@ -1,37 +1,25 @@
|
|||
{ sources, ... }@args:
|
||||
{ nixpkgs ? <nixpkgs>, ... }:
|
||||
|
||||
let
|
||||
pkgs = import sources.nixpkgs args;
|
||||
oldstable = import sources.nixpkgs-oldstable args;
|
||||
unstable = import sources.nixpkgs-unstable args;
|
||||
|
||||
pkgs = import nixpkgs {};
|
||||
callPackage = pkgs.lib.callPackageWith (pkgs // newpkgs);
|
||||
|
||||
wasiSrc = fetchGit {
|
||||
url = "https://gitlab.infra4future.de/wasi/wasi-rust";
|
||||
rev = "356dbc23a3683d134f13156af71baeaa06fb80d1";
|
||||
};
|
||||
|
||||
immaeNix = fetchGit {
|
||||
url = "https://git.immae.eu/perso/Immae/Config/Nix.git";
|
||||
rev = "7ad4966f41db0669a77c7a6ee7f87f0d4e586b0c";
|
||||
};
|
||||
|
||||
newpkgs = {
|
||||
|
||||
mattermost = callPackage ./mattermost.nix {
|
||||
buildGoModule = unstable.buildGo122Module;
|
||||
# package = callPackage ./package {};
|
||||
wasi = import wasiSrc { inherit wasiSrc; pkgs = pkgs // newpkgs; };
|
||||
peertube = callPackage ./peertube { mylibs = import "${immaeNix}/lib" { inherit pkgs; }; };
|
||||
alps = callPackage ./alps {};
|
||||
funkwhale = callPackage ./funkwhale {};
|
||||
};
|
||||
|
||||
morph = callPackage ./morph.nix {
|
||||
buildGoModule = unstable.buildGo122Module;
|
||||
};
|
||||
|
||||
forgejo = unstable.forgejo;
|
||||
|
||||
tracktrain = import sources.tracktrain {
|
||||
nixpkgs = unstable;
|
||||
compiler = "default";
|
||||
};
|
||||
|
||||
uffd = oldstable.callPackage ./uffd { };
|
||||
|
||||
hacc-scripts = callPackage ./scripts {};
|
||||
|
||||
inherit (oldstable) uwsgi flask;
|
||||
|
||||
# TODO: once on nixos 24.05, remove this inherit
|
||||
inherit (unstable) lix;
|
||||
};
|
||||
|
||||
in pkgs.extend(_: _: newpkgs)
|
||||
in newpkgs
|
||||
|
|
47
pkgs/funkwhale/default.nix
Normal file
47
pkgs/funkwhale/default.nix
Normal file
|
@ -0,0 +1,47 @@
|
|||
{ stdenv, fetchurl, unzip }:
|
||||
|
||||
# Look for the correct urls for build_front and build_api artifacts on the tags page of the project : https://dev.funkwhale.audio/funkwhale/funkwhale/pipelines?scope=tags
|
||||
# Attention : do not use the url "https://dev.funkwhale.audio/funkwhale/funkwhale/-/jobs/artifacts/${release}/download?job=" : it is not guaranteed to be stable
|
||||
|
||||
let
|
||||
release = "1.0.1";
|
||||
srcs = {
|
||||
api = fetchurl {
|
||||
url = https://dev.funkwhale.audio/funkwhale/funkwhale/-/jobs/56793/artifacts/download;
|
||||
name = "api.zip";
|
||||
sha256 = "0p21r8kbn7sr33chp7404fi9pm4yz6qhfz4z7gxf3vamg9fbsbsc";
|
||||
};
|
||||
frontend = fetchurl {
|
||||
url = https://dev.funkwhale.audio/funkwhale/funkwhale/-/jobs/56790/artifacts/download;
|
||||
name = "frontend.zip";
|
||||
sha256 = "0hz4d59sva6zi5q53wj3f6yaw5didcl9z148s6rsy2m6gyr8566d";
|
||||
};
|
||||
};
|
||||
in stdenv.mkDerivation {
|
||||
name = "funkwhale";
|
||||
version = "${release}";
|
||||
src = srcs.api;
|
||||
nativeBuildInputs = [ unzip ];
|
||||
postPatch = ''
|
||||
substituteInPlace requirements/base.txt \
|
||||
--replace "django-cleanup==3.2.0" django-cleanup
|
||||
'';
|
||||
|
||||
installPhase = ''
|
||||
mkdir $out
|
||||
cp -R ./* $out
|
||||
unzip ${srcs.frontend} -d $out
|
||||
mv $out/front/ $out/front_tmp
|
||||
mv $out/front_tmp/dist $out/front
|
||||
rmdir $out/front_tmp
|
||||
'';
|
||||
|
||||
meta = with stdenv.lib; {
|
||||
description = "A modern, convivial and free music server";
|
||||
homepage = https://funkwhale.audio/;
|
||||
license = licenses.agpl3;
|
||||
platforms = platforms.linux;
|
||||
maintainers = with maintainers; [ mmai ];
|
||||
};
|
||||
}
|
||||
|
|
@ -1,85 +0,0 @@
|
|||
{ lib
|
||||
, buildGoModule
|
||||
, fetchFromGitHub
|
||||
, nix-update-script
|
||||
, fetchurl
|
||||
, nixosTests
|
||||
}:
|
||||
|
||||
buildGoModule rec {
|
||||
pname = "mattermost";
|
||||
# ESR releases only.
|
||||
# See https://docs.mattermost.com/upgrade/extended-support-release.html
|
||||
# When a new ESR version is available (e.g. 8.1.x -> 9.5.x), update
|
||||
# the version regex in passthru.updateScript as well.
|
||||
version = "9.11.3";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "mattermost";
|
||||
repo = "mattermost";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-CuFkydl1ZZUAWmrDIV1Jp9S6jIKYmglAe5XW2lTRgtQ=";
|
||||
};
|
||||
|
||||
# Needed because buildGoModule does not support go workspaces yet.
|
||||
# We use go 1.22's workspace vendor command, which is not yet available
|
||||
# in the default version of go used in nixpkgs, nor is it used by upstream:
|
||||
# https://github.com/mattermost/mattermost/issues/26221#issuecomment-1945351597
|
||||
overrideModAttrs = (_: {
|
||||
buildPhase = ''
|
||||
make setup-go-work
|
||||
go work vendor -e
|
||||
'';
|
||||
});
|
||||
|
||||
webapp = fetchurl {
|
||||
url = "https://releases.mattermost.com/${version}/mattermost-${version}-linux-amd64.tar.gz";
|
||||
hash = "sha256-4JzhL2+G3T98pNFgKugs/eoSrbm7QSk5grVlprrIKEI=";
|
||||
};
|
||||
|
||||
vendorHash = "sha256-Gwv6clnq7ihoFC8ox8iEM5xp/us9jWUrcmqA9/XbxBE=";
|
||||
|
||||
modRoot = "./server";
|
||||
preBuild = ''
|
||||
make setup-go-work
|
||||
'';
|
||||
|
||||
subPackages = [ "cmd/mattermost" ];
|
||||
offlineCache = webapp;
|
||||
|
||||
tags = [ "production" ];
|
||||
|
||||
ldflags = [
|
||||
"-s"
|
||||
"-w"
|
||||
"-X github.com/mattermost/mattermost/server/public/model.Version=${version}"
|
||||
"-X github.com/mattermost/mattermost/server/public/model.BuildNumber=${version}-nixpkgs"
|
||||
"-X github.com/mattermost/mattermost/server/public/model.BuildDate=1970-01-01"
|
||||
"-X github.com/mattermost/mattermost/server/public/model.BuildHash=v${version}"
|
||||
"-X github.com/mattermost/mattermost/server/public/model.BuildHashEnterprise=none"
|
||||
"-X github.com/mattermost/mattermost/server/public/model.BuildEnterpriseReady=false"
|
||||
];
|
||||
|
||||
postInstall = ''
|
||||
tar --strip 1 --directory $out -xf $webapp \
|
||||
mattermost/{client,i18n,fonts,templates,config}
|
||||
|
||||
# For some reason a bunch of these files are executable
|
||||
find $out/{client,i18n,fonts,templates,config} -type f -exec chmod -x {} \;
|
||||
'';
|
||||
|
||||
passthru = {
|
||||
updateScript = nix-update-script {
|
||||
extraArgs = [ "--version-regex" "^v(9\.11\.([0-9.]+))" ];
|
||||
};
|
||||
tests.mattermost = nixosTests.mattermost;
|
||||
};
|
||||
|
||||
meta = with lib; {
|
||||
description = "Mattermost is an open source platform for secure collaboration across the entire software development lifecycle";
|
||||
homepage = "https://www.mattermost.org";
|
||||
license = with licenses; [ agpl3Only asl20 ];
|
||||
maintainers = with maintainers; [ ryantm numinit kranzes mgdelacroix ];
|
||||
mainProgram = "mattermost";
|
||||
};
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
{ buildGoModule
|
||||
, fetchFromGitHub
|
||||
}:
|
||||
|
||||
buildGoModule rec {
|
||||
pname = "mattermost-morph";
|
||||
version = "1.1.0";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "mattermost";
|
||||
repo = "morph";
|
||||
rev = "v${version}";
|
||||
hash = "sha256-Orh/a9OlUVIlDdLXRpDAnHUmWRiM1N2oO+dijbuJzx8=";
|
||||
};
|
||||
|
||||
vendorHash = null;
|
||||
|
||||
subPackages = [ "cmd/morph" ];
|
||||
|
||||
tags = [ "production" ];
|
||||
|
||||
ldflags = [
|
||||
"-s"
|
||||
"-w"
|
||||
"-X github.com/mattermost/mattermost/server/public/model.Version=${version}"
|
||||
"-X github.com/mattermost/mattermost/server/public/model.BuildNumber=${version}-nixpkgs"
|
||||
"-X github.com/mattermost/mattermost/server/public/model.BuildDate=1970-01-01"
|
||||
"-X github.com/mattermost/mattermost/server/public/model.BuildHash=v${version}"
|
||||
"-X github.com/mattermost/mattermost/server/public/model.BuildHashEnterprise=none"
|
||||
"-X github.com/mattermost/mattermost/server/public/model.BuildEnterpriseReady=false"
|
||||
];
|
||||
|
||||
}
|
13640
pkgs/peertube/client-yarn-packages.nix
Normal file
13640
pkgs/peertube/client-yarn-packages.nix
Normal file
File diff suppressed because it is too large
Load diff
207
pkgs/peertube/default.nix
Normal file
207
pkgs/peertube/default.nix
Normal file
|
@ -0,0 +1,207 @@
|
|||
{ ldap ? false, sendmail ? false, light ? null, syden ? false, runCommand, libsass
|
||||
, lib, stdenv, rsync, fetchzip, youtube-dl, fetchurl, mylibs, python, nodejs, nodePackages, yarn2nix-moretea }:
|
||||
let
|
||||
nodeHeaders = fetchurl {
|
||||
url = "https://nodejs.org/download/release/v${nodejs.version}/node-v${nodejs.version}-headers.tar.gz";
|
||||
sha256 = "17kf05a92r4y4n1lj78265wr6zhkpzbr1k8nbwrl8sq71npd6n5j";
|
||||
};
|
||||
source = mylibs.fetchedGithub ./peertube.json;
|
||||
patchedSource = stdenv.mkDerivation (source // rec {
|
||||
phases = [ "unpackPhase" "patchPhase" "installPhase" ];
|
||||
patches = [ ./yarn_fix_http_node.patch ]
|
||||
++ lib.optionals ldap [ ./ldap.patch ]
|
||||
++ lib.optionals sendmail [ ./sendmail.patch ]
|
||||
++ lib.optionals syden [ ./syden.patch ];
|
||||
installPhase = ''
|
||||
mkdir $out
|
||||
cp -a . $out/
|
||||
'';
|
||||
});
|
||||
serverPatchedPackage = runCommand "server-package" {} ''
|
||||
mkdir -p $out
|
||||
cp ${patchedSource}/package.json $out/
|
||||
cp ${patchedSource}/yarn.lock $out/
|
||||
'';
|
||||
clientPatchedPackage = runCommand "client-package" {} ''
|
||||
mkdir -p $out
|
||||
cp ${patchedSource}/client/package.json $out/
|
||||
cp ${patchedSource}/client/yarn.lock $out/
|
||||
'';
|
||||
|
||||
yarnModulesConfig = {
|
||||
bcrypt = {
|
||||
buildInputs = [ nodePackages.node-pre-gyp ];
|
||||
postInstall = let
|
||||
node_module_version = "72";
|
||||
bcrypt_lib = fetchurl {
|
||||
url = "https://github.com/kelektiv/node.bcrypt.js/releases/download/v3.0.7/bcrypt_lib-v3.0.7-node-v${node_module_version}-linux-x64-glibc.tar.gz";
|
||||
sha256 = "0kpm9j0yc4lqsafldfsql3m72rr1fapljlb6ddxvy3zi13rb7ppx";
|
||||
};
|
||||
in
|
||||
''
|
||||
if [ "$(node -e "console.log(process.versions.modules)")" != "${node_module_version}" ]; then
|
||||
echo "mismatching version with nodejs please update bcrypt derivation"
|
||||
false
|
||||
fi
|
||||
mkdir lib && tar -C lib -xf ${bcrypt_lib}
|
||||
patchShebangs ../node-pre-gyp
|
||||
npm run install
|
||||
'';
|
||||
};
|
||||
dtrace-provider = {
|
||||
buildInputs = [ python nodePackages.node-gyp ];
|
||||
postInstall = ''
|
||||
npx node-gyp rebuild --tarball=${nodeHeaders}
|
||||
'';
|
||||
};
|
||||
node-sass = {
|
||||
buildInputs = [ libsass python ];
|
||||
postInstall =
|
||||
''
|
||||
node scripts/build.js --tarball=${nodeHeaders}
|
||||
'';
|
||||
};
|
||||
|
||||
sharp = {
|
||||
buildInputs = [ python nodePackages.node-gyp ];
|
||||
postInstall =
|
||||
let
|
||||
tarball = fetchurl {
|
||||
url = "https://github.com/lovell/sharp-libvips/releases/download/v8.8.1/libvips-8.8.1-linux-x64.tar.gz";
|
||||
sha256 = "0xqv61g6s6rkvc31zq9a3bf8rp56ijnpw0xhr91hc88asqprd5yh";
|
||||
};
|
||||
in
|
||||
''
|
||||
mkdir vendor
|
||||
tar -C vendor -xf ${tarball}
|
||||
patchShebangs ../prebuild-install
|
||||
npx node install/libvips
|
||||
npx node install/dll-copy
|
||||
npx prebuild-install || npx node-gyp rebuild --tarball=${nodeHeaders}
|
||||
'';
|
||||
};
|
||||
utf-8-validate = {
|
||||
buildInputs = [ nodePackages.node-gyp-build ];
|
||||
};
|
||||
youtube-dl = {
|
||||
postInstall = ''
|
||||
mkdir bin
|
||||
ln -s ${youtube-dl}/bin/youtube-dl bin/youtube-dl
|
||||
cat > bin/details <<EOF
|
||||
{"version":"${youtube-dl.version}","path":null,"exec":"youtube-dl"}
|
||||
EOF
|
||||
'';
|
||||
};
|
||||
};
|
||||
serverYarnModulesArg = rec {
|
||||
pname = "peertube-server-yarn-modules";
|
||||
version = source.version;
|
||||
name = "${pname}-${version}";
|
||||
packageJSON = "${serverPatchedPackage}/package.json";
|
||||
yarnLock = "${serverPatchedPackage}/yarn.lock";
|
||||
yarnNix = ./server-yarn-packages.nix;
|
||||
pkgConfig = yarnModulesConfig;
|
||||
};
|
||||
clientYarnModulesArg = rec {
|
||||
pname = "peertube-client-yarn-modules";
|
||||
version = source.version;
|
||||
name = "${pname}-${version}";
|
||||
packageJSON = "${clientPatchedPackage}/package.json";
|
||||
yarnLock = "${clientPatchedPackage}/yarn.lock";
|
||||
yarnNix = ./client-yarn-packages.nix;
|
||||
pkgConfig = yarnModulesConfig;
|
||||
};
|
||||
yarnModulesNoWorkspace = args: (yarn2nix-moretea.mkYarnModules args).overrideAttrs(old: {
|
||||
buildPhase = builtins.replaceStrings [" ./package.json"] [" /dev/null; cp deps/*/package.json ."] old.buildPhase;
|
||||
});
|
||||
|
||||
patchedPackages = stdenv.mkDerivation (source // rec {
|
||||
patches = if ldap then [ ./ldap.patch ] else [ ./yarn_fix_http_node.patch ];
|
||||
installPhase = ''
|
||||
mkdir $out
|
||||
cp package.json yarn.lock $out/
|
||||
'';
|
||||
});
|
||||
serverYarnModules = yarnModulesNoWorkspace serverYarnModulesArg;
|
||||
serverYarnModulesProd = yarnModulesNoWorkspace (serverYarnModulesArg // { yarnFlags = yarn2nix-moretea.defaultYarnFlags ++ [ "--production" ]; });
|
||||
clientYarnModules = yarnModulesNoWorkspace clientYarnModulesArg;
|
||||
|
||||
server = stdenv.mkDerivation ({
|
||||
pname = "peertube-server";
|
||||
version = source.version;
|
||||
src = patchedSource;
|
||||
buildPhase = ''
|
||||
ln -s ${serverYarnModules}/node_modules .
|
||||
npm run build:server
|
||||
'';
|
||||
installPhase = ''
|
||||
mkdir $out
|
||||
cp -a dist $out
|
||||
'';
|
||||
buildInputs = [ nodejs serverYarnModules ];
|
||||
});
|
||||
|
||||
client = stdenv.mkDerivation ({
|
||||
pname = "peertube-client";
|
||||
version = source.version;
|
||||
src = patchedSource;
|
||||
buildPhase = let
|
||||
lightArg = if light == null then "" else if light == true then "--light" else "--light-language";
|
||||
in ''
|
||||
ln -s ${serverYarnModules}/node_modules .
|
||||
cp -a ${clientYarnModules}/node_modules client/
|
||||
chmod +w client/node_modules
|
||||
patchShebangs .
|
||||
npm run build:client -- ${lightArg}
|
||||
'';
|
||||
installPhase = ''
|
||||
mkdir $out
|
||||
cp -a client/dist $out
|
||||
'';
|
||||
buildInputs = [ nodejs clientYarnModules ];
|
||||
});
|
||||
|
||||
package = stdenv.mkDerivation rec {
|
||||
version = source.version;
|
||||
pname = "peertube";
|
||||
src = patchedSource;
|
||||
buildPhase = ''
|
||||
ln -s ${serverYarnModulesProd}/node_modules .
|
||||
ln -s ${clientYarnModules}/node_modules client/
|
||||
rm -rf dist && cp -a ${server}/dist dist
|
||||
rm -rf client/dist && cp -a ${client}/dist client/
|
||||
'';
|
||||
installPhase = ''
|
||||
mkdir $out
|
||||
cp -a * $out
|
||||
ln -s /tmp $out/.cache
|
||||
'';
|
||||
|
||||
meta = {
|
||||
description = "A free software to take back control of your videos";
|
||||
|
||||
longDescription = ''
|
||||
PeerTube aspires to be a decentralized and free/libre alternative to video
|
||||
broadcasting services.
|
||||
PeerTube is not meant to become a huge platform that would centralize
|
||||
videos from all around the world. Rather, it is a network of
|
||||
inter-connected small videos hosters.
|
||||
Anyone with a modicum of technical skills can host a PeerTube server, aka
|
||||
an instance. Each instance hosts its users and their videos. In this way,
|
||||
every instance is created, moderated and maintained independently by
|
||||
various administrators.
|
||||
You can still watch from your account videos hosted by other instances
|
||||
though if the administrator of your instance had previously connected it
|
||||
with other instances.
|
||||
'';
|
||||
|
||||
license = stdenv.lib.licenses.agpl3Plus;
|
||||
|
||||
homepage = "https://joinpeertube.org/";
|
||||
|
||||
platforms = stdenv.lib.platforms.linux; # not sure here
|
||||
maintainers = with stdenv.lib.maintainers; [ matthiasbeyer immae ];
|
||||
};
|
||||
};
|
||||
in
|
||||
package
|
597
pkgs/peertube/ldap.patch
Normal file
597
pkgs/peertube/ldap.patch
Normal file
|
@ -0,0 +1,597 @@
|
|||
commit ffb4a59047a014d6bb050b67a2fc7bc116be4682
|
||||
Author: Ismaël Bouya <ismael.bouya@normalesup.org>
|
||||
Date: Tue Feb 12 18:47:53 2019 +0100
|
||||
|
||||
Add LDAP authentication
|
||||
|
||||
diff --git a/config/default.yaml b/config/default.yaml
|
||||
index 3260c62fc..dcce721b9 100644
|
||||
--- a/config/default.yaml
|
||||
+++ b/config/default.yaml
|
||||
@@ -51,6 +51,19 @@ redis:
|
||||
auth: null
|
||||
db: 0
|
||||
|
||||
+auth:
|
||||
+ local:
|
||||
+ enabled: true
|
||||
+ ldap:
|
||||
+ enabled: true
|
||||
+ url: ldap://localhost:389/dc=example,dc=com
|
||||
+ insecure_tls: false
|
||||
+ bind_dn: cn=admin,dc=example,dc=com
|
||||
+ bind_password: adminPass
|
||||
+ base: dc=example,dc=com
|
||||
+ mail_entry: "mail"
|
||||
+ user_filter: "(|(email=%username%)(uid=%username%))"
|
||||
+
|
||||
smtp:
|
||||
hostname: null
|
||||
port: 465
|
||||
diff --git a/config/production.yaml.example b/config/production.yaml.example
|
||||
index 30cd2ffe0..c56691bf4 100644
|
||||
--- a/config/production.yaml.example
|
||||
+++ b/config/production.yaml.example
|
||||
@@ -51,6 +51,19 @@ redis:
|
||||
auth: null
|
||||
db: 0
|
||||
|
||||
+auth:
|
||||
+ local:
|
||||
+ enabled: true
|
||||
+ ldap:
|
||||
+ enabled: true
|
||||
+ url: ldap://localhost:389/dc=example,dc=com
|
||||
+ insecure_tls: false
|
||||
+ bind_dn: cn=admin,dc=example,dc=com
|
||||
+ bind_password: adminPass
|
||||
+ base: dc=example,dc=com
|
||||
+ mail_entry: "mail"
|
||||
+ user_filter: "(|(email=%username%)(uid=%username%))"
|
||||
+
|
||||
# SMTP server to send emails
|
||||
smtp:
|
||||
hostname: null
|
||||
diff --git a/package.json b/package.json
|
||||
index 49d9faf97..31eccf797 100644
|
||||
--- a/package.json
|
||||
+++ b/package.json
|
||||
@@ -112,6 +112,7 @@
|
||||
"iso-639-3": "^1.0.1",
|
||||
"js-yaml": "^3.5.4",
|
||||
"jsonld": "~2.0.1",
|
||||
+ "ldapjs": "^1.0.2",
|
||||
"lodash": "^4.17.10",
|
||||
"lru-cache": "^5.1.1",
|
||||
"magnet-uri": "^5.1.4",
|
||||
diff --git a/server/initializers/config.ts b/server/initializers/config.ts
|
||||
index 7fd77f3e8..45a667826 100644
|
||||
--- a/server/initializers/config.ts
|
||||
+++ b/server/initializers/config.ts
|
||||
@@ -34,6 +34,21 @@ const CONFIG = {
|
||||
AUTH: config.has('redis.auth') ? config.get<string>('redis.auth') : null,
|
||||
DB: config.has('redis.db') ? config.get<number>('redis.db') : null
|
||||
},
|
||||
+ AUTH: {
|
||||
+ LOCAL: {
|
||||
+ ENABLED: config.has('auth.local.enabled') ? config.get<boolean>('auth.local.enabled') : true,
|
||||
+ },
|
||||
+ LDAP: {
|
||||
+ ENABLED: config.has('auth.ldap.enabled') ? config.get<boolean>('auth.ldap.enabled') : false,
|
||||
+ URL: config.has('auth.ldap.url') ? config.get<string>('auth.ldap.url') : null,
|
||||
+ INSECURE_TLS: config.has('auth.ldap.insecure_tls') ? config.get<boolean>('auth.ldap.insecure_tls') : false,
|
||||
+ BIND_DN: config.has('auth.ldap.bind_dn') ? config.get<string>('auth.ldap.bind_dn') : null,
|
||||
+ BIND_PASSWORD: config.has('auth.ldap.bind_password') ? config.get<string>('auth.ldap.bind_password') : null,
|
||||
+ BASE: config.has('auth.ldap.base') ? config.get<string>('auth.ldap.base') : null,
|
||||
+ MAIL_ENTRY: config.has('auth.ldap.mail_entry') ? config.get<string>('auth.ldap.mail_entry') : 'mail',
|
||||
+ USER_FILTER: config.has('auth.ldap.user_filter') ? config.get<string>('auth.ldap.user_filter') : null
|
||||
+ },
|
||||
+ },
|
||||
SMTP: {
|
||||
HOSTNAME: config.get<string>('smtp.hostname'),
|
||||
PORT: config.get<number>('smtp.port'),
|
||||
diff --git a/server/initializers/migrations/0375-user-ldap-dn.ts b/server/initializers/migrations/0375-user-ldap-dn.ts
|
||||
new file mode 100644
|
||||
index 000000000..a9d68124b
|
||||
--- /dev/null
|
||||
+++ b/server/initializers/migrations/0375-user-ldap-dn.ts
|
||||
@@ -0,0 +1,26 @@
|
||||
+import * as Sequelize from 'sequelize'
|
||||
+
|
||||
+async function up (utils: {
|
||||
+ transaction: Sequelize.Transaction,
|
||||
+ queryInterface: Sequelize.QueryInterface,
|
||||
+ sequelize: Sequelize.Sequelize
|
||||
+}): Promise<void> {
|
||||
+
|
||||
+ {
|
||||
+ const data = {
|
||||
+ type: Sequelize.STRING,
|
||||
+ allowNull: true,
|
||||
+ defaultValue: null
|
||||
+ }
|
||||
+ await utils.queryInterface.addColumn('user', 'ldapDn', data)
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+function down (options) {
|
||||
+ throw new Error('Not implemented.')
|
||||
+}
|
||||
+
|
||||
+export {
|
||||
+ up,
|
||||
+ down
|
||||
+}
|
||||
diff --git a/server/lib/ldap.ts b/server/lib/ldap.ts
|
||||
new file mode 100644
|
||||
index 000000000..e6601e5cb
|
||||
--- /dev/null
|
||||
+++ b/server/lib/ldap.ts
|
||||
@@ -0,0 +1,89 @@
|
||||
+import * as express from 'express'
|
||||
+import { createClient, Client, parseFilter } from 'ldapjs'
|
||||
+import { logger } from '../helpers/logger'
|
||||
+import { CONFIG } from '../initializers/config'
|
||||
+
|
||||
+class Ldap {
|
||||
+
|
||||
+ private static instance: Ldap
|
||||
+ private initialized = false
|
||||
+ private client: Client
|
||||
+ private prefix: string
|
||||
+
|
||||
+ private constructor () {}
|
||||
+
|
||||
+ init () {
|
||||
+ // Already initialized
|
||||
+ if (this.initialized === true) return
|
||||
+ this.initialized = true
|
||||
+
|
||||
+ this.client = createClient(Ldap.getLdapClientOptions())
|
||||
+ }
|
||||
+
|
||||
+ static getLdapClientOptions () {
|
||||
+ return Object.assign({}, {
|
||||
+ url: CONFIG.AUTH.LDAP.URL,
|
||||
+ reconnect: true,
|
||||
+ tlsOptions: { rejectUnauthorized: !CONFIG.AUTH.LDAP.INSECURE_TLS }
|
||||
+ })
|
||||
+ }
|
||||
+
|
||||
+ getClient () {
|
||||
+ this.init()
|
||||
+ return this.client
|
||||
+ }
|
||||
+
|
||||
+ findUser (username: string) {
|
||||
+ const filter = parseFilter(CONFIG.AUTH.LDAP.USER_FILTER)
|
||||
+ filter.forEach(function (element) {
|
||||
+ if (element.value === '%username%') element.value = username
|
||||
+ })
|
||||
+ const opts = {
|
||||
+ filter,
|
||||
+ scope: 'sub',
|
||||
+ attributes: [ CONFIG.AUTH.LDAP.MAIL_ENTRY, 'dn' ]
|
||||
+ }
|
||||
+
|
||||
+ const client = this.getClient()
|
||||
+
|
||||
+ return new Promise(function (resolve, reject) {
|
||||
+ client.bind(CONFIG.AUTH.LDAP.BIND_DN, CONFIG.AUTH.LDAP.BIND_PASSWORD, function (err) {
|
||||
+ if (err) reject(err)
|
||||
+ let entries = []
|
||||
+ client.search(CONFIG.AUTH.LDAP.BASE, opts, function (err, search) {
|
||||
+ if (err) reject(err)
|
||||
+ search.on('searchEntry', function (entry) {
|
||||
+ entries.push(entry.object)
|
||||
+ })
|
||||
+ search.on('end', function (result) {
|
||||
+ if (entries.length === 1) {
|
||||
+ resolve(entries[0])
|
||||
+ } else {
|
||||
+ reject("No user found corresponding to this username")
|
||||
+ }
|
||||
+ })
|
||||
+ })
|
||||
+ })
|
||||
+ })
|
||||
+ }
|
||||
+
|
||||
+ checkUser (dn: string, password: string) {
|
||||
+ const client = this.getClient()
|
||||
+ return new Promise(function (resolve, reject) {
|
||||
+ client.bind(dn, password, function (err) {
|
||||
+ resolve(!err)
|
||||
+ })
|
||||
+ })
|
||||
+ }
|
||||
+
|
||||
+
|
||||
+ static get Instance () {
|
||||
+ return this.instance || (this.instance = new this())
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+// ---------------------------------------------------------------------------
|
||||
+
|
||||
+export {
|
||||
+ Ldap
|
||||
+}
|
||||
diff --git a/server/lib/oauth-model.ts b/server/lib/oauth-model.ts
|
||||
index 086856f41..ab10effd0 100644
|
||||
--- a/server/lib/oauth-model.ts
|
||||
+++ b/server/lib/oauth-model.ts
|
||||
@@ -9,6 +9,7 @@ import { Transaction } from 'sequelize'
|
||||
import { CONFIG } from '../initializers/config'
|
||||
import * as LRUCache from 'lru-cache'
|
||||
import { MOAuthTokenUser } from '@server/typings/models/oauth/oauth-token'
|
||||
+import { MUserDefault } from '@server/typings/models'
|
||||
|
||||
type TokenInfo = { accessToken: string, refreshToken: string, accessTokenExpiresAt: Date, refreshTokenExpiresAt: Date }
|
||||
|
||||
@@ -74,7 +75,13 @@ function getRefreshToken (refreshToken: string) {
|
||||
async function getUser (usernameOrEmail: string, password: string) {
|
||||
logger.debug('Getting User (username/email: ' + usernameOrEmail + ', password: ******).')
|
||||
|
||||
- const user = await UserModel.loadByUsernameOrEmail(usernameOrEmail)
|
||||
+ let user : MUserDefault
|
||||
+ if (CONFIG.AUTH.LDAP.ENABLED) {
|
||||
+ user = await UserModel.findOrCreateLDAPUser(usernameOrEmail)
|
||||
+ }
|
||||
+ if (!user && CONFIG.AUTH.LOCAL.ENABLED) {
|
||||
+ user = await UserModel.loadByUsernameOrEmail(usernameOrEmail)
|
||||
+ }
|
||||
if (!user) return null
|
||||
|
||||
const passwordMatch = await user.isPasswordMatch(password)
|
||||
diff --git a/server/models/account/user.ts b/server/models/account/user.ts
|
||||
index 4c2c5e278..0b38f7cb2 100644
|
||||
--- a/server/models/account/user.ts
|
||||
+++ b/server/models/account/user.ts
|
||||
@@ -1,4 +1,5 @@
|
||||
import { FindOptions, literal, Op, QueryTypes, where, fn, col } from 'sequelize'
|
||||
+import { Ldap } from '../../lib/ldap'
|
||||
import {
|
||||
AfterDestroy,
|
||||
AfterUpdate,
|
||||
@@ -50,7 +51,9 @@ import { AccountModel } from './account'
|
||||
import { NSFWPolicyType } from '../../../shared/models/videos/nsfw-policy.type'
|
||||
import { values } from 'lodash'
|
||||
import { DEFAULT_THEME_NAME, DEFAULT_USER_THEME_NAME, NSFW_POLICY_TYPES } from '../../initializers/constants'
|
||||
+import { CONFIG } from '../../initializers/config'
|
||||
import { clearCacheByUserId } from '../../lib/oauth-model'
|
||||
+import { createUserAccountAndChannelAndPlaylist } from '../../lib/user'
|
||||
import { UserNotificationSettingModel } from './user-notification-setting'
|
||||
import { VideoModel } from '../video/video'
|
||||
import { ActorModel } from '../activitypub/actor'
|
||||
@@ -149,6 +152,11 @@ export class UserModel extends Model<UserModel> {
|
||||
@Column(DataType.STRING(400))
|
||||
pendingEmail: string
|
||||
|
||||
+ @AllowNull(true)
|
||||
+ @Default(null)
|
||||
+ @Column
|
||||
+ ldapDn: string
|
||||
+
|
||||
@AllowNull(true)
|
||||
@Default(null)
|
||||
@Is('UserEmailVerified', value => throwIfNotValid(value, isUserEmailVerifiedValid, 'email verified boolean', true))
|
||||
@@ -440,6 +448,48 @@ export class UserModel extends Model<UserModel> {
|
||||
return UserModel.findOne(query)
|
||||
}
|
||||
|
||||
+ static loadByLdapDn (ldapDn: string) {
|
||||
+ const query = {
|
||||
+ where: {
|
||||
+ ldapDn
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ return UserModel.findOne(query)
|
||||
+ }
|
||||
+
|
||||
+ static async findOrCreateLDAPUser (username: string) {
|
||||
+ try {
|
||||
+ const userInfos = await Ldap.Instance.findUser(username)
|
||||
+ const user = await UserModel.loadByLdapDn(userInfos['dn'])
|
||||
+ if (user) {
|
||||
+ return user
|
||||
+ } else {
|
||||
+ return await UserModel.createLDAPUser(username, userInfos)
|
||||
+ }
|
||||
+ } catch (e) {
|
||||
+ return null
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ static async createLDAPUser (username: string, userInfos: {}) {
|
||||
+ const userToCreate = new UserModel({
|
||||
+ username,
|
||||
+ password: 'SomeInvalidPassword',
|
||||
+ email: userInfos[CONFIG.AUTH.LDAP.MAIL_ENTRY],
|
||||
+ ldapDn: userInfos['dn'],
|
||||
+ nsfwPolicy: CONFIG.INSTANCE.DEFAULT_NSFW_POLICY,
|
||||
+ autoPlayVideo: true,
|
||||
+ role: UserRole.USER,
|
||||
+ videoQuota: CONFIG.USER.VIDEO_QUOTA,
|
||||
+ videoQuotaDaily: CONFIG.USER.VIDEO_QUOTA_DAILY,
|
||||
+ emailVerified: true,
|
||||
+ adminFlags: UserAdminFlag.NONE
|
||||
+ })
|
||||
+ const { user } = await createUserAccountAndChannelAndPlaylist({ userToCreate })
|
||||
+ return user
|
||||
+ }
|
||||
+
|
||||
static loadForMeAPI (username: string): Bluebird<MUserNotifSettingChannelDefault> {
|
||||
const query = {
|
||||
where: {
|
||||
@@ -627,7 +677,11 @@ export class UserModel extends Model<UserModel> {
|
||||
}
|
||||
|
||||
isPasswordMatch (password: string) {
|
||||
- return comparePassword(password, this.password)
|
||||
+ if (this.ldapDn === null) {
|
||||
+ return comparePassword(password, this.password)
|
||||
+ } else {
|
||||
+ return Ldap.Instance.checkUser(this.ldapDn, password)
|
||||
+ }
|
||||
}
|
||||
|
||||
toFormattedJSON (this: MUserFormattable, parameters: { withAdminFlags?: boolean } = {}): User {
|
||||
diff --git a/yarn.lock b/yarn.lock
|
||||
index 76ce7ed27..f087746df 100644
|
||||
--- a/yarn.lock
|
||||
+++ b/yarn.lock
|
||||
@@ -616,6 +616,11 @@ arraybuffer.slice@~0.0.7:
|
||||
resolved "https://registry.yarnpkg.com/arraybuffer.slice/-/arraybuffer.slice-0.0.7.tgz#3bbc4275dd584cc1b10809b89d4e8b63a69e7675"
|
||||
integrity sha512-wGUIVQXuehL5TCqQun8OW81jGzAWycqzFF8lFp+GOM5BXLYj3bKNsYC4daB7n6XjCqxQA/qgTJ+8ANR3acjrog==
|
||||
|
||||
+asn1@0.2.3:
|
||||
+ version "0.2.3"
|
||||
+ resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.3.tgz#dac8787713c9966849fc8180777ebe9c1ddf3b86"
|
||||
+ integrity sha1-2sh4dxPJlmhJ/IGAd36+nB3fO4Y=
|
||||
+
|
||||
asn1@~0.2.3:
|
||||
version "0.2.4"
|
||||
resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.4.tgz#8d2475dfab553bb33e77b54e59e880bb8ce23136"
|
||||
@@ -623,6 +628,11 @@ asn1@~0.2.3:
|
||||
dependencies:
|
||||
safer-buffer "~2.1.0"
|
||||
|
||||
+assert-plus@0.1.5:
|
||||
+ version "0.1.5"
|
||||
+ resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-0.1.5.tgz#ee74009413002d84cec7219c6ac811812e723160"
|
||||
+ integrity sha1-7nQAlBMALYTOxyGcasgRgS5yMWA=
|
||||
+
|
||||
assert-plus@1.0.0, assert-plus@^1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525"
|
||||
@@ -692,6 +702,13 @@ backo2@1.0.2:
|
||||
resolved "https://registry.yarnpkg.com/backo2/-/backo2-1.0.2.tgz#31ab1ac8b129363463e35b3ebb69f4dfcfba7947"
|
||||
integrity sha1-MasayLEpNjRj41s+u2n038+6eUc=
|
||||
|
||||
+backoff@^2.5.0:
|
||||
+ version "2.5.0"
|
||||
+ resolved "https://registry.yarnpkg.com/backoff/-/backoff-2.5.0.tgz#f616eda9d3e4b66b8ca7fca79f695722c5f8e26f"
|
||||
+ integrity sha1-9hbtqdPktmuMp/ynn2lXIsX44m8=
|
||||
+ dependencies:
|
||||
+ precond "0.2"
|
||||
+
|
||||
balanced-match@^1.0.0:
|
||||
version "1.0.0"
|
||||
resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.0.tgz#89b4d199ab2bee49de164ea02b89ce462d71b767"
|
||||
@@ -1001,6 +1018,16 @@ bull@^3.4.2:
|
||||
util.promisify "^1.0.0"
|
||||
uuid "^3.3.3"
|
||||
|
||||
+bunyan@^1.8.3:
|
||||
+ version "1.8.12"
|
||||
+ resolved "https://registry.yarnpkg.com/bunyan/-/bunyan-1.8.12.tgz#f150f0f6748abdd72aeae84f04403be2ef113797"
|
||||
+ integrity sha1-8VDw9nSKvdcq6uhPBEA74u8RN5c=
|
||||
+ optionalDependencies:
|
||||
+ dtrace-provider "~0.8"
|
||||
+ moment "^2.10.6"
|
||||
+ mv "~2"
|
||||
+ safe-json-stringify "~1"
|
||||
+
|
||||
busboy@^0.2.11:
|
||||
version "0.2.14"
|
||||
resolved "https://registry.yarnpkg.com/busboy/-/busboy-0.2.14.tgz#6c2a622efcf47c57bbbe1e2a9c37ad36c7925453"
|
||||
@@ -1619,7 +1646,7 @@ d@1, d@^1.0.1:
|
||||
es5-ext "^0.10.50"
|
||||
type "^1.0.1"
|
||||
|
||||
-dashdash@^1.12.0:
|
||||
+dashdash@^1.12.0, dashdash@^1.14.0:
|
||||
version "1.14.1"
|
||||
resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0"
|
||||
integrity sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=
|
||||
@@ -1845,6 +1872,13 @@ double-ended-queue@^2.1.0-0:
|
||||
resolved "https://registry.yarnpkg.com/double-ended-queue/-/double-ended-queue-2.1.0-0.tgz#103d3527fd31528f40188130c841efdd78264e5c"
|
||||
integrity sha1-ED01J/0xUo9AGIEwyEHv3XgmTlw=
|
||||
|
||||
+dtrace-provider@~0.8:
|
||||
+ version "0.8.8"
|
||||
+ resolved "https://registry.yarnpkg.com/dtrace-provider/-/dtrace-provider-0.8.8.tgz#2996d5490c37e1347be263b423ed7b297fb0d97e"
|
||||
+ integrity sha512-b7Z7cNtHPhH9EJhNNbbeqTcXB8LGFFZhq1PGgEvpeHlzd36bhbdTWoE/Ba/YguqpBSlAPKnARWhVlhunCMwfxg==
|
||||
+ dependencies:
|
||||
+ nan "^2.14.0"
|
||||
+
|
||||
duplexer3@^0.1.4:
|
||||
version "0.1.4"
|
||||
resolved "https://registry.yarnpkg.com/duplexer3/-/duplexer3-0.1.4.tgz#ee01dd1cac0ed3cbc7fdbea37dc0a8f1ce002ce2"
|
||||
@@ -2228,6 +2262,11 @@ extend@^3.0.0, extend@~3.0.0, extend@~3.0.2:
|
||||
resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.2.tgz#f8b1136b4071fbd8eb140aff858b1019ec2915fa"
|
||||
integrity sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==
|
||||
|
||||
+extsprintf@1.2.0:
|
||||
+ version "1.2.0"
|
||||
+ resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.2.0.tgz#5ad946c22f5b32ba7f8cd7426711c6e8a3fc2529"
|
||||
+ integrity sha1-WtlGwi9bMrp/jNdCZxHG6KP8JSk=
|
||||
+
|
||||
extsprintf@1.3.0:
|
||||
version "1.3.0"
|
||||
resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.3.0.tgz#96918440e3041a7a414f8c52e3c574eb3c3e1e05"
|
||||
@@ -2567,6 +2606,17 @@ glob@7.1.3:
|
||||
once "^1.3.0"
|
||||
path-is-absolute "^1.0.0"
|
||||
|
||||
+glob@^6.0.1:
|
||||
+ version "6.0.4"
|
||||
+ resolved "https://registry.yarnpkg.com/glob/-/glob-6.0.4.tgz#0f08860f6a155127b2fadd4f9ce24b1aab6e4d22"
|
||||
+ integrity sha1-DwiGD2oVUSey+t1PnOJLGqtuTSI=
|
||||
+ dependencies:
|
||||
+ inflight "^1.0.4"
|
||||
+ inherits "2"
|
||||
+ minimatch "2 || 3"
|
||||
+ once "^1.3.0"
|
||||
+ path-is-absolute "^1.0.0"
|
||||
+
|
||||
glob@^7.0.3, glob@^7.1.1, glob@^7.1.3:
|
||||
version "7.1.6"
|
||||
resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.6.tgz#141f33b81a7c2492e125594307480c46679278a6"
|
||||
@@ -3356,6 +3406,30 @@ latest-version@^3.0.0:
|
||||
dependencies:
|
||||
package-json "^4.0.0"
|
||||
|
||||
+ldap-filter@0.2.2:
|
||||
+ version "0.2.2"
|
||||
+ resolved "https://registry.yarnpkg.com/ldap-filter/-/ldap-filter-0.2.2.tgz#f2b842be0b86da3352798505b31ebcae590d77d0"
|
||||
+ integrity sha1-8rhCvguG2jNSeYUFsx68rlkNd9A=
|
||||
+ dependencies:
|
||||
+ assert-plus "0.1.5"
|
||||
+
|
||||
+ldapjs@^1.0.2:
|
||||
+ version "1.0.2"
|
||||
+ resolved "https://registry.yarnpkg.com/ldapjs/-/ldapjs-1.0.2.tgz#544ff7032b7b83c68f0701328d9297aa694340f9"
|
||||
+ integrity sha1-VE/3Ayt7g8aPBwEyjZKXqmlDQPk=
|
||||
+ dependencies:
|
||||
+ asn1 "0.2.3"
|
||||
+ assert-plus "^1.0.0"
|
||||
+ backoff "^2.5.0"
|
||||
+ bunyan "^1.8.3"
|
||||
+ dashdash "^1.14.0"
|
||||
+ ldap-filter "0.2.2"
|
||||
+ once "^1.4.0"
|
||||
+ vasync "^1.6.4"
|
||||
+ verror "^1.8.1"
|
||||
+ optionalDependencies:
|
||||
+ dtrace-provider "~0.8"
|
||||
+
|
||||
libxmljs@0.19.7:
|
||||
version "0.19.7"
|
||||
resolved "https://registry.yarnpkg.com/libxmljs/-/libxmljs-0.19.7.tgz#96c2151b0b73f33dd29917edec82902587004e5a"
|
||||
@@ -3724,7 +3798,7 @@ mimic-response@^2.0.0:
|
||||
resolved "https://registry.yarnpkg.com/mimic-response/-/mimic-response-2.0.0.tgz#996a51c60adf12cb8a87d7fb8ef24c2f3d5ebb46"
|
||||
integrity sha512-8ilDoEapqA4uQ3TwS0jakGONKXVJqpy+RpM+3b7pLdOjghCrEiGp9SRkFbUHAmZW9vdnrENWHjaweIoTIJExSQ==
|
||||
|
||||
-minimatch@3.0.4, minimatch@^3.0.4:
|
||||
+"minimatch@2 || 3", minimatch@3.0.4, minimatch@^3.0.4:
|
||||
version "3.0.4"
|
||||
resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.4.tgz#5166e286457f03306064be5497e8dbb0c3d32083"
|
||||
integrity sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==
|
||||
@@ -3825,7 +3899,7 @@ moment-timezone@^0.5.21, moment-timezone@^0.5.25:
|
||||
dependencies:
|
||||
moment ">= 2.9.0"
|
||||
|
||||
-"moment@>= 2.9.0", moment@^2.24.0:
|
||||
+"moment@>= 2.9.0", moment@^2.10.6, moment@^2.24.0:
|
||||
version "2.24.0"
|
||||
resolved "https://registry.yarnpkg.com/moment/-/moment-2.24.0.tgz#0d055d53f5052aa653c9f6eb68bb5d12bf5c2b5b"
|
||||
integrity sha512-bV7f+6l2QigeBBZSM/6yTNq4P2fNpSWj/0e7jQcy87A8e7o2nAfP/34/2ky5Vw4B9S446EtIhodAzkFCcR4dQg==
|
||||
@@ -3898,6 +3972,15 @@ mute-stream@~0.0.4:
|
||||
resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.8.tgz#1630c42b2251ff81e2a283de96a5497ea92e5e0d"
|
||||
integrity sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==
|
||||
|
||||
+mv@~2:
|
||||
+ version "2.1.1"
|
||||
+ resolved "https://registry.yarnpkg.com/mv/-/mv-2.1.1.tgz#ae6ce0d6f6d5e0a4f7d893798d03c1ea9559b6a2"
|
||||
+ integrity sha1-rmzg1vbV4KT32JN5jQPB6pVZtqI=
|
||||
+ dependencies:
|
||||
+ mkdirp "~0.5.1"
|
||||
+ ncp "~2.0.0"
|
||||
+ rimraf "~2.4.0"
|
||||
+
|
||||
nan@2.14.0, nan@^2.14.0, nan@~2.14.0:
|
||||
version "2.14.0"
|
||||
resolved "https://registry.yarnpkg.com/nan/-/nan-2.14.0.tgz#7818f722027b2459a86f0295d434d1fc2336c52c"
|
||||
@@ -3913,6 +3996,11 @@ ncp@1.0.x:
|
||||
resolved "https://registry.yarnpkg.com/ncp/-/ncp-1.0.1.tgz#d15367e5cb87432ba117d2bf80fdf45aecfb4246"
|
||||
integrity sha1-0VNn5cuHQyuhF9K/gP30Wuz7QkY=
|
||||
|
||||
+ncp@~2.0.0:
|
||||
+ version "2.0.0"
|
||||
+ resolved "https://registry.yarnpkg.com/ncp/-/ncp-2.0.0.tgz#195a21d6c46e361d2fb1281ba38b91e9df7bdbb3"
|
||||
+ integrity sha1-GVoh1sRuNh0vsSgbo4uR6d9727M=
|
||||
+
|
||||
needle@^2.2.1:
|
||||
version "2.4.0"
|
||||
resolved "https://registry.yarnpkg.com/needle/-/needle-2.4.0.tgz#6833e74975c444642590e15a750288c5f939b57c"
|
||||
@@ -4597,6 +4685,11 @@ prebuild-install@^5.3.3:
|
||||
tunnel-agent "^0.6.0"
|
||||
which-pm-runs "^1.0.0"
|
||||
|
||||
+precond@0.2:
|
||||
+ version "0.2.3"
|
||||
+ resolved "https://registry.yarnpkg.com/precond/-/precond-0.2.3.tgz#aa9591bcaa24923f1e0f4849d240f47efc1075ac"
|
||||
+ integrity sha1-qpWRvKokkj8eD0hJ0kD0fvwQdaw=
|
||||
+
|
||||
prepend-http@^1.0.1:
|
||||
version "1.0.4"
|
||||
resolved "https://registry.yarnpkg.com/prepend-http/-/prepend-http-1.0.4.tgz#d4f4562b0ce3696e41ac52d0e002e57a635dc6dc"
|
||||
@@ -5032,6 +5125,13 @@ rimraf@^3.0.0:
|
||||
dependencies:
|
||||
glob "^7.1.3"
|
||||
|
||||
+rimraf@~2.4.0:
|
||||
+ version "2.4.5"
|
||||
+ resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.4.5.tgz#ee710ce5d93a8fdb856fb5ea8ff0e2d75934b2da"
|
||||
+ integrity sha1-7nEM5dk6j9uFb7Xqj/Di11k0sto=
|
||||
+ dependencies:
|
||||
+ glob "^6.0.1"
|
||||
+
|
||||
run-parallel-limit@^1.0.3:
|
||||
version "1.0.5"
|
||||
resolved "https://registry.yarnpkg.com/run-parallel-limit/-/run-parallel-limit-1.0.5.tgz#c29a4fd17b4df358cb52a8a697811a63c984f1b7"
|
||||
@@ -5069,6 +5169,11 @@ safe-buffer@^5.0.1, safe-buffer@^5.1.0, safe-buffer@^5.1.1, safe-buffer@^5.1.2,
|
||||
resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.0.tgz#b74daec49b1148f88c64b68d49b1e815c1f2f519"
|
||||
integrity sha512-fZEwUGbVl7kouZs1jCdMLdt95hdIv0ZeHg6L7qPeciMZhZ+/gdesW4wgTARkrFWEpspjEATAzUGPG8N2jJiwbg==
|
||||
|
||||
+safe-json-stringify@~1:
|
||||
+ version "1.2.0"
|
||||
+ resolved "https://registry.yarnpkg.com/safe-json-stringify/-/safe-json-stringify-1.2.0.tgz#356e44bc98f1f93ce45df14bcd7c01cda86e0afd"
|
||||
+ integrity sha512-gH8eh2nZudPQO6TytOvbxnuhYBOvDBBLW52tz5q6X58lJcd/tkmqFR+5Z9adS8aJtURSXWThWy/xJtJwixErvg==
|
||||
+
|
||||
"safer-buffer@>= 2.1.2 < 3", safer-buffer@^2.0.2, safer-buffer@^2.1.0, safer-buffer@~2.1.0:
|
||||
version "2.1.2"
|
||||
resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a"
|
||||
@@ -6337,7 +6442,14 @@ vary@^1, vary@~1.1.2:
|
||||
resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.2.tgz#2299f02c6ded30d4a5961b0b9f74524a18f634fc"
|
||||
integrity sha1-IpnwLG3tMNSllhsLn3RSShj2NPw=
|
||||
|
||||
-verror@1.10.0:
|
||||
+vasync@^1.6.4:
|
||||
+ version "1.6.4"
|
||||
+ resolved "https://registry.yarnpkg.com/vasync/-/vasync-1.6.4.tgz#dfe93616ad0e7ae801b332a9d88bfc5cdc8e1d1f"
|
||||
+ integrity sha1-3+k2Fq0OeugBszKp2Iv8XNyOHR8=
|
||||
+ dependencies:
|
||||
+ verror "1.6.0"
|
||||
+
|
||||
+verror@1.10.0, verror@^1.8.1:
|
||||
version "1.10.0"
|
||||
resolved "https://registry.yarnpkg.com/verror/-/verror-1.10.0.tgz#3a105ca17053af55d6e270c1f8288682e18da400"
|
||||
integrity sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=
|
||||
@@ -6346,6 +6458,13 @@ verror@1.10.0:
|
||||
core-util-is "1.0.2"
|
||||
extsprintf "^1.2.0"
|
||||
|
||||
+verror@1.6.0:
|
||||
+ version "1.6.0"
|
||||
+ resolved "https://registry.yarnpkg.com/verror/-/verror-1.6.0.tgz#7d13b27b1facc2e2da90405eb5ea6e5bdd252ea5"
|
||||
+ integrity sha1-fROyex+swuLakEBetepuW90lLqU=
|
||||
+ dependencies:
|
||||
+ extsprintf "1.2.0"
|
||||
+
|
||||
videostream@^3.2.0:
|
||||
version "3.2.1"
|
||||
resolved "https://registry.yarnpkg.com/videostream/-/videostream-3.2.1.tgz#643688ad4bfbf37570d421e3196b7e0ad38eeebc"
|
15
pkgs/peertube/peertube.json
Normal file
15
pkgs/peertube/peertube.json
Normal file
|
@ -0,0 +1,15 @@
|
|||
{
|
||||
"tag": "v2.1.1",
|
||||
"meta": {
|
||||
"name": "peertube",
|
||||
"url": "https://github.com/Chocobozzz/PeerTube",
|
||||
"branch": "refs/tags/v2.1.1"
|
||||
},
|
||||
"github": {
|
||||
"owner": "Chocobozzz",
|
||||
"repo": "PeerTube",
|
||||
"rev": "76f7b571c04c03ba422bd5790944fe80dbb24067",
|
||||
"sha256": "147gm1j657fkpv2ix1bmkhl7ld5h224q7hgdj9ffj3z14mqgk8hj",
|
||||
"fetchSubmodules": true
|
||||
}
|
||||
}
|
121
pkgs/peertube/sendmail.patch
Normal file
121
pkgs/peertube/sendmail.patch
Normal file
|
@ -0,0 +1,121 @@
|
|||
commit 486964fad93334a52fb05e7d0497ecac3eb684fe
|
||||
Author: Ismaël Bouya <ismael.bouya@normalesup.org>
|
||||
Date: Wed Feb 13 12:16:27 2019 +0100
|
||||
|
||||
Add sendmail
|
||||
|
||||
diff --git a/config/production.yaml.example b/config/production.yaml.example
|
||||
index c56691bf4..8abdfb2a7 100644
|
||||
--- a/config/production.yaml.example
|
||||
+++ b/config/production.yaml.example
|
||||
@@ -66,6 +66,8 @@ auth:
|
||||
|
||||
# SMTP server to send emails
|
||||
smtp:
|
||||
+ transport: smtp
|
||||
+ sendmail: null
|
||||
hostname: null
|
||||
port: 465 # If you use StartTLS: 587
|
||||
username: null
|
||||
diff --git a/server/initializers/config.ts b/server/initializers/config.ts
|
||||
index 45a667826..c1c15f05b 100644
|
||||
--- a/server/initializers/config.ts
|
||||
+++ b/server/initializers/config.ts
|
||||
@@ -50,6 +50,8 @@ const CONFIG = {
|
||||
},
|
||||
},
|
||||
SMTP: {
|
||||
+ TRANSPORT: config.has('smtp.transport') ? config.get<string>('smtp.transport') : 'smtp',
|
||||
+ SENDMAIL: config.has('smtp.sendmail') ? config.get<string>('smtp.sendmail') : null,
|
||||
HOSTNAME: config.get<string>('smtp.hostname'),
|
||||
PORT: config.get<number>('smtp.port'),
|
||||
USERNAME: config.get<string>('smtp.username'),
|
||||
diff --git a/server/lib/emailer.ts b/server/lib/emailer.ts
|
||||
index 7484524a4..512c5c068 100644
|
||||
--- a/server/lib/emailer.ts
|
||||
+++ b/server/lib/emailer.ts
|
||||
@@ -40,33 +40,41 @@ class Emailer {
|
||||
this.initialized = true
|
||||
|
||||
if (Emailer.isEnabled()) {
|
||||
- logger.info('Using %s:%s as SMTP server.', CONFIG.SMTP.HOSTNAME, CONFIG.SMTP.PORT)
|
||||
-
|
||||
- let tls
|
||||
- if (CONFIG.SMTP.CA_FILE) {
|
||||
- tls = {
|
||||
- ca: [ readFileSync(CONFIG.SMTP.CA_FILE) ]
|
||||
+ if (CONFIG.SMTP.TRANSPORT === 'smtp') {
|
||||
+ logger.info('Using %s:%s as SMTP server.', CONFIG.SMTP.HOSTNAME, CONFIG.SMTP.PORT)
|
||||
+
|
||||
+ let tls
|
||||
+ if (CONFIG.SMTP.CA_FILE) {
|
||||
+ tls = {
|
||||
+ ca: [ readFileSync(CONFIG.SMTP.CA_FILE) ]
|
||||
+ }
|
||||
}
|
||||
- }
|
||||
|
||||
- let auth
|
||||
- if (CONFIG.SMTP.USERNAME && CONFIG.SMTP.PASSWORD) {
|
||||
- auth = {
|
||||
- user: CONFIG.SMTP.USERNAME,
|
||||
- pass: CONFIG.SMTP.PASSWORD
|
||||
+ let auth
|
||||
+ if (CONFIG.SMTP.USERNAME && CONFIG.SMTP.PASSWORD) {
|
||||
+ auth = {
|
||||
+ user: CONFIG.SMTP.USERNAME,
|
||||
+ pass: CONFIG.SMTP.PASSWORD
|
||||
+ }
|
||||
}
|
||||
- }
|
||||
|
||||
- this.transporter = createTransport({
|
||||
- host: CONFIG.SMTP.HOSTNAME,
|
||||
- port: CONFIG.SMTP.PORT,
|
||||
- secure: CONFIG.SMTP.TLS,
|
||||
- debug: CONFIG.LOG.LEVEL === 'debug',
|
||||
- logger: bunyanLogger as any,
|
||||
- ignoreTLS: CONFIG.SMTP.DISABLE_STARTTLS,
|
||||
- tls,
|
||||
- auth
|
||||
- })
|
||||
+ this.transporter = createTransport({
|
||||
+ host: CONFIG.SMTP.HOSTNAME,
|
||||
+ port: CONFIG.SMTP.PORT,
|
||||
+ secure: CONFIG.SMTP.TLS,
|
||||
+ debug: CONFIG.LOG.LEVEL === 'debug',
|
||||
+ logger: bunyanLogger as any,
|
||||
+ ignoreTLS: CONFIG.SMTP.DISABLE_STARTTLS,
|
||||
+ tls,
|
||||
+ auth
|
||||
+ })
|
||||
+ } else { // sendmail
|
||||
+ this.transporter = createTransport({
|
||||
+ sendmail: true,
|
||||
+ newline: 'unix',
|
||||
+ path: CONFIG.SMTP.SENDMAIL,
|
||||
+ })
|
||||
+ }
|
||||
} else {
|
||||
if (!isTestInstance()) {
|
||||
logger.error('Cannot use SMTP server because of lack of configuration. PeerTube will not be able to send mails!')
|
||||
@@ -75,11 +83,17 @@ class Emailer {
|
||||
}
|
||||
|
||||
static isEnabled () {
|
||||
- return !!CONFIG.SMTP.HOSTNAME && !!CONFIG.SMTP.PORT
|
||||
+ if (CONFIG.SMTP.TRANSPORT === 'sendmail') {
|
||||
+ return !!CONFIG.SMTP.SENDMAIL
|
||||
+ } else if (CONFIG.SMTP.TRANSPORT === 'smtp') {
|
||||
+ return !!CONFIG.SMTP.HOSTNAME && !!CONFIG.SMTP.PORT
|
||||
+ } else {
|
||||
+ return false
|
||||
+ }
|
||||
}
|
||||
|
||||
async checkConnectionOrDie () {
|
||||
- if (!this.transporter) return
|
||||
+ if (!this.transporter || CONFIG.SMTP.TRANSPORT !== 'smtp') return
|
||||
|
||||
logger.info('Testing SMTP server...')
|
||||
|
8735
pkgs/peertube/server-yarn-packages.nix
Normal file
8735
pkgs/peertube/server-yarn-packages.nix
Normal file
File diff suppressed because it is too large
Load diff
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue