forked from hacc/haccfiles
stuebinm
a00e28d85a
Pluto [1] is one of these interactive notebook thingies that have become so unreasonably popular with people doing machine learning or data analysis, but – somewhat surprisingly – it's actually not shit (e.g. no global mutable state in the notebook, no weird unreadable fileformat that doesn't play well with version control, etc.) In particular, it can be used collaboratively (while it doesn't do real-time collaborative editing like a pad, it /does/ push out global updates each time someone executes a cell, so it's reasonably close), and I think it may be useful to have for julia-hacking sessions. It may also be useful for people running low-end laptops, since code is executed on the host — and I guess hainich has enough unused ressources lying around that we can spare a few. After deploying this, the notebook server should be reachable via: ssh hainich -L 9999:localhost:9999 and then visiting http://localhost:9999 Caveats: by design, pluto allows a user to execute arbitrary code on the host. That is its main function, and not something we can prevent. I've tried to mitigate this as far as possible by: - only allowing access via ssh port forwarding. In theory pluto does have basic access control, but that works via a secret link that it'll spit to stdout on startup (i.e. the journal), which cannot be set in advance, nor regenerted without restarting the entire process. Unfortunately, this means we won't be able to use it at e.g. conference sessions with people who don't have access to our infra - running it in a nixos-container as its own user, so it should never get any kind of access to the "main" directory tree apart from a single directory that we can keep notebooks in (which is currently a bind mount set to /data/pluto) - limiting memory and cpu for that container via systemd (less out of worry for exploits, and more so that a few accidental while-true loops will never consume enough cpu time to noticebly slow down anything else). The current limits for both a chosen relatively low; we'll have to see if they become too limiting should anyone run an actual weather model on this. Things we could also do: - currently, the container does not have its own network (mostly since that would make it slightly less convenient to use with port forwarding); in theory, pluto should even be able to run entirely without internet access of its own, but I'm not sure if this would break things like loading images / raw data into a notebook - make the container ephemeral, and only keep the directory containing the notebooks. I haven't done this since it would require recompilation of pluto each time the container is wiped, which makes for a potentially inconvenient startup time (though still < 3-5 mins) Questions: - have I missed anything important that should definitely be also sandboxed / limited in some way? - in general, are we comfortable running something like this? - would we (in principle) be comfortable opening this up to other people for congress sessions (assuming we figure out a reasonable access control)? Notes to deployer: - while I have not tested this on hainich, it works on my own server - you will probably have to create the /data/pluto directory for the bind mount, and make it world-writable (or chown it to the pluto user inside the container) [1] https://github.com/fonsp/Pluto.jl/
96 lines
2.3 KiB
Nix
96 lines
2.3 KiB
Nix
{ config, lib, pkgs, sources, modules, ... }:
|
|
|
|
{
|
|
imports = [
|
|
../../common
|
|
./hardware.nix
|
|
modules.encboot
|
|
modules.network.nftables modules.nftnat
|
|
((import sources.nix-hexchen) {}).profiles.nopersist
|
|
|
|
../../services/nextcloud
|
|
../../services/mattermost.nix
|
|
../../services/thelounge.nix
|
|
../../services/murmur.nix
|
|
../../services/hedgedoc-hacc.nix
|
|
../../services/hedgedoc-i4f.nix
|
|
../../services/mail.nix
|
|
../../services/syncthing.nix
|
|
../../services/gitlab.nix
|
|
../../services/nginx-pages.nix
|
|
../../services/gitlab-runner.nix
|
|
../../services/unifi.nix
|
|
../../services/lantifa.nix
|
|
../../services/pluto.nix
|
|
|
|
./lxc.nix
|
|
];
|
|
|
|
hexchen.encboot = {
|
|
enable = true;
|
|
dataset = "-a";
|
|
networkDrivers = [ "igb" ];
|
|
};
|
|
|
|
boot.loader.grub.enable = true;
|
|
boot.loader.grub.version = 2;
|
|
boot.loader.grub.devices = [ "/dev/nvme0n1" "/dev/nvme1n1" ];
|
|
boot.supportedFilesystems = [ "zfs" ];
|
|
|
|
networking.hostId = "b2867696";
|
|
networking.useDHCP = true;
|
|
networking.nftables.enable = true;
|
|
hexchen.nftables.nat.enable = true;
|
|
networking.nat.internalInterfaces = ["ve-+"];
|
|
networking.nat.externalInterface = "enp35s0";
|
|
|
|
networking.interfaces.enp35s0.ipv6.addresses = [{
|
|
address = "2a01:4f9:3a:2ddb::1";
|
|
prefixLength = 64;
|
|
}];
|
|
networking.defaultGateway6 = {
|
|
address = "fe80::1";
|
|
interface = "enp35s0";
|
|
};
|
|
boot = {
|
|
kernelModules = [ "nf_nat_ftp" ];
|
|
kernel.sysctl = {
|
|
"net.ipv4.conf.all.forwarding" = lib.mkOverride 90 true;
|
|
"net.ipv4.conf.default.forwarding" = lib.mkOverride 90 true;
|
|
};
|
|
};
|
|
|
|
services.nginx = {
|
|
enable = true;
|
|
recommendedProxySettings = true;
|
|
virtualHosts = {
|
|
"parsons.hacc.space" = {
|
|
default = true;
|
|
locations."/".return = "404";
|
|
};
|
|
"hacc.space" = {
|
|
enableACME = true;
|
|
forceSSL = true;
|
|
locations."/".return = "302 https://hacc.earth";
|
|
};
|
|
};
|
|
};
|
|
networking.firewall.allowedTCPPorts = [ 80 443 ];
|
|
|
|
services.restic.backups.tardis = {
|
|
passwordFile = "/persist/restic/system";
|
|
s3CredentialsFile = "/persist/restic/system.s3creds";
|
|
paths = [
|
|
"/home"
|
|
"/persist"
|
|
];
|
|
pruneOpts = [
|
|
"--keep-daily 7"
|
|
"--keep-weekly 5"
|
|
"--keep-monthly 3"
|
|
];
|
|
repository = "b2:tardis-parsons:system";
|
|
};
|
|
|
|
system.stateVersion = "21.05";
|
|
}
|