WIP: move nixos modules

This commit is contained in:
Filippo Berto 2023-05-14 23:40:45 +02:00
parent d725c3b788
commit 2a5983344d
No known key found for this signature in database
GPG key ID: FE98AE5EC52B1056
29 changed files with 65 additions and 115 deletions

View file

@ -0,0 +1,10 @@
{ lib, ... }:
let
src = ./.;
files = builtins.readDir src;
nixFiles = builtins.attrNames (lib.attrsets.filterAttrs (name: type: type != "directory" && lib.hasSuffix ".nix" name && !(lib.hasSuffix "default.nix" name)) files);
imports = builtins.map (path: src + ("/" + path)) nixFiles;
in
{
inherit imports;
}

View file

@ -0,0 +1,24 @@
{
security.sudo.wheelNeedsPassword = false;
nix.settings = {
substituters = [ "https://hyprland.cachix.org" ];
trusted-users = [ "root" "@wheel" ];
trusted-public-keys = [
"thor:yRx3HglIxjUYocp4/jAP9dPWxWBEpgP6hqj1ofEfn1A="
"odin:ClRXzxmDZl2Y94SG4YlWXGiJDY4L9DgZq/3OLR5+i6k="
"loki:HN1P2nXzIkqitl95MvjcSHxtDo7Ao+I8M8U/RqQLC5k="
"hyprland.cachix.org-1:a7pgxzMz7+chwVL3/pzj6jIBMioiJM7ypFP8PwtkuGc="
];
secret-key-files = [ "/etc/nix/key" ];
# substituters = [
# "ssh-ng://thor.local"
# "ssh-ng://odin.local"
# "ssh-ng://loki.local"
# ];
};
}

View file

@ -0,0 +1,37 @@
{
nix.registry = {
agenix = {
from = {
id = "agenix";
type = "indirect";
};
to = {
type = "github";
owner = "ryantm";
repo = "agenix";
};
};
my-templates = {
from = {
id = "my-templates";
type = "indirect";
};
to = {
type = "github";
owner = "bertof";
repo = "flake-templates";
};
};
tex2nix = {
from = {
id = "tex2nix";
type = "indirect";
};
to = {
type = "github";
owner = "Mic92";
repo = "tex2nix";
};
};
};
}

View file

@ -0,0 +1,15 @@
{ lib, ... }: {
services.openssh = {
enable = true;
openFirewall = true;
settings = {
KbdInteractiveAuthentication = lib.mkDefault false;
PermitRootLogin = lib.mkDefault "prohibit-password";
PasswordAuthentication = lib.mkDefault false;
};
};
users.users.root.openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKp1Rfb2acLM/5TDUahu+AdV/HVw+hoOTdQIeQIjV5p8"
];
}

View file

@ -0,0 +1,6 @@
{
services.zerotierone = {
enable = true;
joinNetworks = [ "8056c2e21cf9c753" ];
};
}

325
modules/nixos/big_data.nix Normal file
View file

@ -0,0 +1,325 @@
{ config, lib, pkgs, modulesPath, ... }:
let
# setup_script = ''
# sudo mkdir -p /hdfs
# sudo chown -R hdfs:hadoop /hdfs
# for p in {nn,dn,jn,rm,nm,jhs,HTTP}; do
# sudo kadmin.local -q "ank -randkey $p/my.engine";
# sudo kadmin.local -q "xst -k /etc/hadoop.keytab $p/my.engine";
# sudo kadmin.local -q "ktrem -k /etc/hadoop.keytab $p/my.engine old"
# done
# sudo chown hdfs:hadoop /etc/hadoop.keytab
# sudo kadmin.local -q "ank -randkey spark/my.engine";
# sudo kadmin.local -q "xst -k /etc/spark.keytab spark/my.engine";
# sudo kadmin.local -q "ktrem -k /etc/spark.keytab spark/my.engine old"
# sudo chown spark:spark /etc/spark.keytab
# '';
hadoop_keytab_path = "/etc/hadoop.keytab";
spark_keytab_path = "/etc/spark.keytab";
pysparkPackageSelector = p: with p; [ numpy pyspark ];
pysparkEnv = pkgs.python3.withPackages pysparkPackageSelector;
hadoopConf = import (modulesPath + "/services/cluster/hadoop/conf.nix") {
inherit pkgs lib;
cfg = config.services.hadoop;
};
hadoopConfDir = "${hadoopConf}/";
spark = pkgs.spark.override {
extraPythonPackages = pysparkPackageSelector pkgs.python3.pkgs;
};
sparkConfDir = pkgs.stdenv.mkDerivation {
name = "spark-conf";
dontUnpack = true;
installPhase = ''
# source standard environment
. $stdenv/setup
# shorthands
base_conf=${pkgs.spark}/lib/${pkgs.spark.untarDir}/conf/
# create output dirs for new derivation
mkdir -p $out/
# link unchanged files from the original gnome-session
for f in $base_conf/*.template ; do
ln -sf $f $out/
done
# change selected files
cp $out/log4j.properties{.template,}
cat > $out/spark-env.sh <<- STOP
export JAVA_HOME="${pkgs.jdk8}"
export SPARK_HOME="${pkgs.spark}/lib/${pkgs.spark.untarDir}"
export SPARK_DIST_CLASSPATH=$(${pkgs.hadoop}/bin/hadoop classpath)
export PYSPARK_PYTHON="${pysparkEnv.outPath}/bin/${pysparkEnv.executable}"
export PYSPARK_DRIVER_PYTHON="${pysparkEnv.outPath}/bin/${pysparkEnv.executable}"
export PYTHONPATH="\$PYTHONPATH:$PYTHONPATH"
export HADOOP_CONF_DIR="${hadoopConfDir}"
export SPARKR_R_SHELL="${pkgs.R}/bin/R"
export PATH="\$PATH:${pkgs.R}/bin"
STOP
cat > $out/spark-defaults.conf <<- STOP
spark.eventLog.enabled true
spark.eventLog.dir hdfs://localhost:/logs/spark
spark.history.fs.logDirectory hdfs://localhost:/logs/spark
# spark.yarn.keytab ${spark_keytab_path}
# spark.yarn.principal spark/my.engine@MY.ENGINE
spark.history.ui.acls.enable true
spark.history.kerberos.enabled true
spark.history.kerberos.keytab ${spark_keytab_path}
spark.history.kerberos.principal spark/my.engine@MY.ENGINE
spark.yarn.appMasterEnv.PYSPARK_PYTHON ${pysparkEnv.outPath}/bin/${pysparkEnv.executable}
spark.yarn.appMasterEnv.PYTHONPATH ${pysparkEnv.outPath}/lib/${pysparkEnv.executable}/site-packages
spark.executorEnv.PYSPARK_PYTHON ${pysparkEnv.outPath}/bin/${pysparkEnv.executable}
STOP
'';
};
in
{
networking = {
hosts = { "127.0.0.1" = [ "ds.my.engine" "kdc.my.engine" "my.engine" ]; };
};
services = {
spark = {
package = spark;
master = {
enable = true;
restartIfChanged = true;
};
worker = {
enable = true;
restartIfChanged = true;
};
confDir = sparkConfDir;
};
hadoop = {
coreSite = {
"fs.defaultFS" = "hdfs://my.engine:8020";
# HDFS IMPERSONATION
"hadoop.proxyuser.hdfs.hosts" = "*";
"hadoop.proxyuser.hdfs.groups" = "*";
# HIVE IMPERSONATION
"hadoop.proxyuser.hive.hosts" = "*";
"hadoop.proxyuser.hive.groups" = "*";
# ENABLE AUTHENTICATION
"hadoop.security.authentication" = "kerberos";
"hadoop.security.authorization" = "true";
"hadoop.rpc.protection" = "privacy";
"hadoop.security.auth_to_local" = ''
RULE:[2:$1/$2@$0]([ndj]n/.*@MY\.ENGINE)s/.*/hdfs/
RULE:[2:$1/$2@$0]([rn]m/.*@MY\.ENGINE)s/.*/yarn/
RULE:[2:$1/$2@$0](jhs/.*@MY\.ENGINE)s/.*/mapred/
DEFAULT
'';
};
hdfsSite = {
# DATA
"dfs.namenode.name.dir" = "/hdfs/dfs/name";
"dfs.datanode.data.dir" = "/hdfs/dfs/data";
"dfs.journalnode.edits.dir" = "/hdfs/dfs/edits";
# HDFS SECURITY
"dfs.block.access.token.enable" = "true";
"dfs.cluster.administrators" = "hdfs,HTTP,bertof";
# NAME NODE SECURITY
"dfs.namenode.keytab.file" = hadoop_keytab_path;
"dfs.namenode.kerberos.principal" = "nn/my.engine@MY.ENGINE";
"dfs.namenode.kerberos.internal.spnego.principal" =
"HTTP/my.engine@MY.ENGINE";
# SECONDARY NAME NODE SECURITY
"dfs.secondary.namenode.keytab.file" = hadoop_keytab_path;
"dfs.secondary.namenode.kerberos.principal" = "nn/my.engine@MY.ENGINE";
"dfs.secondary.namenode.kerberos.internal.spnego.principal" =
"HTTP/my.engine@MY.ENGINE";
# DATA NODE SECURITY
"dfs.datanode.keytab.file" = hadoop_keytab_path;
"dfs.datanode.kerberos.principal" = "dn/my.engine@MY.ENGINE";
# JOURNAL NODE SECURITY
"dfs.journalnode.keytab.file" = hadoop_keytab_path;
"dfs.journalnode.kerberos.principal" = "jn/my.engine@MY.ENGINE";
# WEBHDFS SECURITY
"dfs.webhdfs.enabled" = "true";
# WEB AUTHENTICATION CONFIG
"dfs.web.authentication.kerberos.principal" =
"HTTP/my.engine@MY.ENGINE";
"dfs.web.authentication.kerberos.keytab" = hadoop_keytab_path;
"ignore.secure.ports.for.testing" = "true";
"dfs.http.policy" = "HTTP_ONLY";
"dfs.data.transfer.protection" = "privacy";
# ## MULTIHOMED
# "dfs.namenode.rpc-bind-host" = "0.0.0.0";
# "dfs.namenode.servicerpc-bind-host" = "0.0.0.0";
# "dfs.namenode.http-bind-host" = "0.0.0.0";
# "dfs.namenode.https-bind-host" = "0.0.0.0";
# "dfs.client.use.datanode.hostname" = "true"; # force connection by hostname
# "dfs.datanode.use.datanode.hostname" = "true"; # force connection by hostname
};
yarnSite = {
"yarn.nodemanager.admin-env" = "PATH=$PATH";
"yarn.nodemanager.aux-services" = "mapreduce_shuffle";
"yarn.nodemanager.aux-services.mapreduce_shuffle.class" =
"org.apache.hadoop.mapred.ShuffleHandler";
"yarn.nodemanager.bind-host" = "0.0.0.0";
"yarn.nodemanager.container-executor.class" =
"org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor";
"yarn.nodemanager.env-whitelist" =
"JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_HOME,LANG,TZ";
"yarn.nodemanager.linux-container-executor.group" = "hadoop";
"yarn.nodemanager.linux-container-executor.path" =
"/run/wrappers/yarn-nodemanager/bin/container-executor";
"yarn.nodemanager.log-dirs" = "/var/log/hadoop/yarn/nodemanager";
"yarn.resourcemanager.bind-host" = "0.0.0.0";
"yarn.resourcemanager.scheduler.class" =
"org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler";
"yarn.resourcemanager.keytab" = hadoop_keytab_path;
"yarn.resourcemanager.principal" = "rm/my.engine@MY.ENGINE";
"yarn.nodemanager.keytab" = hadoop_keytab_path;
"yarn.nodemanager.principal" = "nm/my.engine@MY.ENGINE";
# "yarn.nodemanager.container-executor.class" = "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor";
"yarn.scheduler.capacity.root.queues" = "default";
"yarn.scheduler.capacity.root.default.capacity" = 100;
# "yarn.scheduler.capacity.root.default.state" = "RUNNING";
"yarn.scheduler.capacity.root.acl_submit_applications" =
"hadoop,yarn,mapred,hdfs";
};
httpfsSite = {
"kerberos.realm" = "MY.ENGINE";
"httpfs.authentication.type" = "kerberos";
"httpfs.authentication.kerberos.principal " =
"HTTP/my.engine@MY.ENGINE";
"httpfs.authentication.kerberos.keytab" = hadoop_keytab_path;
"httpfs.hadoop.kerberos.principal " = "HTTP/my.engine@MY.ENGINE";
"httpfs.hadoop.kerberos.keytab" = hadoop_keytab_path;
};
extraConfDirs = [ ];
hdfs = {
namenode = {
enable = true;
formatOnInit = true;
restartIfChanged = true;
};
datanode = {
enable = true;
restartIfChanged = true;
};
journalnode = {
enable = true;
restartIfChanged = true;
};
zkfc = {
enable = false;
restartIfChanged = true;
}; # ZOOKEEPER DISABLED, not using High Availability setup
httpfs = {
enable = true;
restartIfChanged = true;
};
};
yarn = {
resourcemanager = {
enable = true;
restartIfChanged = true;
};
nodemanager = {
enable = true;
restartIfChanged = true;
useCGroups = false;
};
};
};
kerberos_server = {
enable = true;
realms."MY.ENGINE".acl = [
{
principal = "*/admin";
access = "all";
}
{
principal = "*/my.engine";
access = "all";
}
];
};
};
krb5 = {
enable = true;
realms = {
"MY.ENGINE" = {
admin_server = "kdc.my.engine";
kdc = "kdc.my.engine";
# default_domain = "my.engine";
# kpasswd_server = "odin";
};
};
domain_realm = {
# ".my.engine" = "MY.ENGINE";
"my.engine" = "MY.ENGINE";
};
libdefaults = {
default_realm = "MY.ENGINE";
dns_lookup_realm = true;
dns_lookup_kdc = true;
ticket_lifetime = "24h";
renew_lifetime = "7d";
forwardable = true;
};
extraConfig = ''
[logging]
default = FILE:/var/log/krb5libs.log
kdc = FILE:/var/log/krb5kdc.log
admin_server = FILE:/var/log/kadmind.log
'';
};
users.users.bertof.extraGroups = [ "hadoop" ];
systemd.services.spark-history = {
path = builtins.attrValues { inherit (pkgs) procps openssh nettools; };
description = "spark history service.";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
restartIfChanged = true;
environment = {
SPARK_CONF_DIR = sparkConfDir;
SPARK_LOG_DIR = "/var/log/spark";
};
serviceConfig = {
Type = "forking";
User = "spark";
Group = "spark";
WorkingDirectory = "${pkgs.spark}/lib/${pkgs.spark.untarDir}";
ExecStart =
"${pkgs.spark}/lib/${pkgs.spark.untarDir}/sbin/start-history-server.sh";
ExecStop =
"${pkgs.spark}/lib/${pkgs.spark.untarDir}/sbin/stop-history-server.sh";
TimeoutSec = 300;
StartLimitBurst = 10;
Restart = "always";
};
};
}

19
modules/nixos/defcon.nix Normal file
View file

@ -0,0 +1,19 @@
{
networking.hosts = {
"54.176.11.243" = [ "vpn.mhackeroni.it" ];
"10.100.0.50" = [
"master.cb.cloud.mhackeroni.it"
"bartender.cb.cloud.mhackeroni.it"
"grafana.cb.cloud.mhackeroni.it"
"menu.cb.cloud.mhackeroni.it"
"maitre.cb.cloud.mhackeroni.it"
"accountant.cb.cloud.mhackeroni.it"
];
"10.100.0.150" =
[ "flowgui.cloud.mhackeroni.it" "smb.cloud.mhackeroni.it" ];
"10.100.0.200" = [ "tunniceddu.cloud.mhackeroni.it" ];
"10.100.0.250" = [ "rev.cloud.mhackeroni.it" ];
"10.100.0.66" = [ "attackerbackup.cloud.mhackeroni.it" ];
"192.168.128.1" = [ "smb.hotel.mhackeroni.it" "rev.hotel.mhackeroni.it" ];
};
}

View file

@ -0,0 +1 @@
{ services.do-agent.enable = true; }

23
modules/nixos/dnsmasq.nix Normal file
View file

@ -0,0 +1,23 @@
let
blocklist = builtins.fetchurl {
sha256 = "sha256:16xcx2z8ziv2fbqhr4ajayxblcs4i1ckrwnf50iina9asgia18za";
url =
"https://github.com/notracking/hosts-blocklists/raw/master/dnsmasq/dnsmasq.blacklist.txt";
};
in
{
networking.firewall.allowedTCPPorts = [ 53 ];
networking.firewall.allowedUDPPorts = [ 53 ];
services.dnsmasq = {
enable = true;
servers = [ "1.1.1.1" "8.8.8.8" "8.8.4.4" ];
extraConfig = ''
cache-size=10000
log-queries
local-ttl=300
conf-file=${blocklist}
'';
};
}

44
modules/nixos/garage.nix Normal file
View file

@ -0,0 +1,44 @@
{ pkgs, config, lib, ... }: {
users.groups.garage = { };
users.users.garage = { isSystemUser = true; group = "garage"; };
age.secrets.garage_rpc_secret = { file = ../../secrets/garage_rpc_secret.age; owner = "garage"; };
networking.firewall.interfaces."ztmjfdwjkp".allowedTCPPorts = [
3901
];
# Not correctly passing mount bindings
systemd.services.garage.serviceConfig = {
ProtectHome = lib.mkForce false;
DynamicUser = false;
};
services.garage = {
package = pkgs.unstable_pkgs.garage;
enable = true;
settings = {
replication_mode = 1;
rpc_secret_file = config.age.secrets.garage_rpc_secret.path;
rpc_bind_addr = "[::]:3901";
bootstrap_peers = [ ];
s3_api = {
api_bind_addr = "[::]:3900";
s3_region = "garage";
root_domain = ".s3.bertof.net";
};
s3_web = {
bind_addr = "[::]:3902";
root_domain = ".web.bertof.net";
};
admin = {
api_bind_addr = "0.0.0.0:3903";
# metrics_token = "72ad105afc44f30c189b2505f5583d3ea9be26a3e0a4730d48381b1ae4b70074";
# admin_token = "05bf164fe1ce3ecc1dff8fb1e5b237331d24b109792be714738fa92b2d14213d";
# trace_sink = "http://localhost:4317";
};
};
};
}

View file

@ -0,0 +1,18 @@
{
services.openssh = {
enable = true;
openFirewall = true;
settings = {
# PermitRootLogin = "prohibit-password";
PasswordAuthentication = false;
};
};
users.users.root = {
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAhxOjo9Ac9hVd3eOR56F6sClUMUh1m7VpcmzA18dslj bertof@odin"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO7mcf8fbMo1eXqSJeVFWaweB+JOU+67dFuf8laZKZZG bertof@thor"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKp1Rfb2acLM/5TDUahu+AdV/HVw+hoOTdQIeQIjV5p8"
];
};
system.stateVersion = "22.11";
}

13
modules/nixos/k3s.nix Normal file
View file

@ -0,0 +1,13 @@
{
services.k3s = { enable = true; };
networking.firewall = {
allowedTCPPorts = [
6443 # Kubernetes API
];
allowedTCPPortRanges = [{
from = 9000;
to = 15000;
}];
};
}

View file

@ -0,0 +1,7 @@
{
# networking.firewall = {
# allowedTCPPortRanges = [{ from = 1714; to = 1764; }];
# allowedUDPPortRanges = [{ from = 1714; to = 1764; }];
# };
programs.kdeconnect.enable = true;
}

12
modules/nixos/mind.nix Normal file
View file

@ -0,0 +1,12 @@
{
services.postgresql = {
enable = true;
ensureDatabases = [ "mfh" ];
ensureUsers = [{
name = "bertof";
ensurePermissions = { "DATABASE \"mfh\"" = "ALL PRIVILEGES"; };
}];
};
services.apache-kafka = { enable = true; };
}

View file

@ -0,0 +1,39 @@
{ pkgs, config, ... }: {
age.secrets = {
nextcloud_admin_secret = { file = ../../secrets/nextcloud_admin_secret.age; owner = "nextcloud"; };
nextcloud_bucket_secret = { file = ../../secrets/nextcloud_bucket_secret.age; owner = "nextcloud"; };
};
# services.nginx.virtualHosts.${config.services.nextcloud.hostName} = {
# enableACME = true;
# forceSSL = true;
# };
services.nextcloud = {
enable = true;
package = pkgs.nextcloud27;
hostName = "my-nextcloud.bertof.net";
maxUploadSize = "24G";
caching.apcu = true;
config.trustedProxies = [ "172.23.4.159" "fd80:56c2:e21c:f9c7:5399:93be:21a9:9fa0" "fe80::3079:d8ff:feb5:7d62" ];
config.extraTrustedDomains = [ config.services.nextcloud.hostName "freya.local" ];
config.adminpassFile = config.age.secrets.nextcloud_admin_secret.path;
config.overwriteProtocol = "https";
config.objectstore.s3 = {
enable = true;
bucket = "nextcloud-storage";
autocreate = false;
key = "GK622e38479552cbbbba48fd04";
secretFile = config.age.secrets.nextcloud_bucket_secret.path;
hostname = "localhost";
port = 3900;
useSsl = false;
region = "garage";
usePathStyle = true;
};
};
networking.firewall.allowedTCPPorts = [ 80 ];
}

View file

@ -0,0 +1,43 @@
{ config, lib, ... }:
let
user_keys = user:
lib.optionals (builtins.hasAttr "bertof" config.users.users)
config.users.users.${user}.openssh.authorizedKeys.keys;
in
{
# nix.buildMachines
nix.distributedBuilds = true;
nix.sshServe = {
enable = true;
keys = user_keys "bertof";
write = true;
protocol = "ssh-ng";
};
services.nix-serve = {
enable = true;
openFirewall = true;
secretKeyFile = "/etc/nix/serve";
};
nix.settings = {
trusted-users = [ "root" "nix-ssh" "@wheel" ];
trusted-public-keys = [
"odin:ukZZy//P0nBAcy4ycX8eYCByRJFOfJRlfW4sYjP/rGE="
"loki:jVAH1bQugXdQ1w29lvVknyPqWwmAn7WhjKf7z4t+q7E="
];
substituters = [
# "https://192.168.0.10"
# "https://192.168.0.100"
# "ssh-ng://loki.local"
# "ssh-ng://odin.local"
# "ssh-ng://192.168.0.10"
# "ssh-ng://192.168.0.100"
];
# trusted-substituters = [
# ];
};
}

View file

@ -0,0 +1,30 @@
{ pkgs, ... }:
let
drivers = pkgs.xp-pen-deco-01-v2-driver.overrideAttrs (_o: rec {
version = "3.2.3.230215-1";
src = pkgs.fetchzip {
urls = [
"https://download01.xp-pen.com/file/2023/03/XPPen-pentablet-${version}.x86_64.tar.gz"
"https://web.archive.org/web/20230424112207/https://download01.xp-pen.com/file/2023/03/XPPen-pentablet-${version}.x86_64.tar.gz"
];
name = "xp-pen-deco-01-v2-driver-${version}.tar.gz";
sha256 = "sha256-CV4ZaGCFFcfy2J0O8leYgcyzFVwJQFQJsShOv9B7jfI=";
};
});
in
{
# udev rules
services.udev.extraRules = ''
KERNEL=="uinput",MODE:="0666",OPTIONS+="static_node=uinput"
SUBSYSTEMS=="usb",ATTRS{idVendor}=="28bd",MODE:="0666"
'';
# XP-Pen tablet driver
environment.systemPackages = [ drivers ];
# hardware.opentabletdriver = {
# enable = true;
# daemon.enable = true;
# };
}

View file

@ -0,0 +1,81 @@
{
boot = {
# kernelModules = [ "snd-seq" "snd-rawmidi" ];
# kernel.sysctl = { "vm.swappiness" = 10; "fs.inotify.max_user_watches" = 524288; };
# kernelParams = [ "threadirq" ];
# kernelPatches = lib.singleton {
# name = "pro_audio";
# patch = null;
# extraConfig = ''
# PREEMPT_RT y
# PREEMPT y
# IOSCHED_DEADLINE y
# DEFAULT_DEADLINE y
# DEFAULT_IOSCHED "deadline"
# HPET_TIMER y
# CPU_FREQ n
# TREE_RCU_TRACE n
# '';
# };
# postBootCommands = ''
# echo 2048 > /sys/class/rtc/rtc0/max_user_freq
# echo 2048 > /proc/sys/dev/hpet/max-user-freq
# # setpci -v -d *:* latency_timer=b0
# # setpci -v -s $00:1b.0 latency_timer=ff
# '';
# The SOUND_CARD_PCI_ID can be obtained like so:
# $ lspci ¦ grep -i audio
};
# powerManagement.cpuFreqGovernor = "performance";
# fileSystems."/" = { options = "noatime errors=remount-ro"; };
security.pam.loginLimits = [
{
domain = "@audio";
item = "memlock";
type = "-";
value = "unlimited";
}
{
domain = "@audio";
item = "rtprio";
type = "-";
value = "99";
}
{
domain = "@audio";
item = "nofile";
type = "soft";
value = "99999";
}
{
domain = "@audio";
item = "nofile";
type = "hard";
value = "524288";
}
];
# services = {
# udev = {
# packages = [ pkgs.ffado ]; # If you have a FireWire audio interface
# extraRules = ''
# KERNEL=="rtc0", GROUP="audio"
# KERNEL=="hpet", GROUP="audio"
# '';
# };
# cron.enable = false;
# };
# environment.shellInit = ''
# export VST_PATH=/nix/var/nix/profiles/default/lib/vst:/var/run/current-system/sw/lib/vst:~/.vst
# export LXVST_PATH=/nix/var/nix/profiles/default/lib/lxvst:/var/run/current-system/sw/lib/lxvst:~/.lxvst
# export LADSPA_PATH=/nix/var/nix/profiles/default/lib/ladspa:/var/run/current-system/sw/lib/ladspa:~/.ladspa
# export LV2_PATH=/nix/var/nix/profiles/default/lib/lv2:/var/run/current-system/sw/lib/lv2:~/.lv2
# export DSSI_PATH=/nix/var/nix/profiles/default/lib/dssi:/var/run/current-system/sw/lib/dssi:~/.dssi
# '';
}

View file

@ -0,0 +1,6 @@
{
nix.gc = {
automatic = true;
options = "--delete-older-than 7d";
};
}

View file

@ -0,0 +1,9 @@
{
system.autoUpgrade = {
enable = true;
flake = "gitlab:bertof/nix-dotfiles";
# dates = "daily"; # default 04:04
randomizedDelaySec = "45min";
flags = [ "--refresh" ];
};
}

View file

@ -0,0 +1,15 @@
{ lib, config, ... }:
let
inherit (builtins) mapAttrs attrValues;
inherit (lib) filterAttrs unique;
btrfsFileSystems =
filterAttrs (_k: v: v.fsType == "btrfs") config.fileSystems;
btrfsDevices =
unique (attrValues (mapAttrs (_: v: v.device) btrfsFileSystems));
in
{
services.btrfs.autoScrub = {
enable = btrfsDevices != [ ];
fileSystems = btrfsDevices;
};
}

View file

@ -0,0 +1,10 @@
{ lib, ... }:
let
src = ./.;
files = builtins.readDir src;
nixFiles = builtins.attrNames (lib.attrsets.filterAttrs (name: type: type != "directory" && lib.hasSuffix ".nix" name && !(lib.hasSuffix "default.nix" name)) files);
imports = builtins.map (path: src + ("/" + path)) nixFiles;
in
{
inherit imports;
}

View file

@ -0,0 +1,3 @@
{
services.fstrim.enable = true;
}

View file

@ -0,0 +1,3 @@
{
services.fwupd.enable = true;
}

9
modules/nixos/sesar.nix Normal file
View file

@ -0,0 +1,9 @@
{
networking = {
hosts = {
"172.20.28.210" = [ "datanode1" "datanode2" "datanode3" "namenode" ];
# "172.20.28.210" = [ "*.engine.sesar.int" ];
"159.149.147.137" = [ "vcenter.sesar.int" ];
};
};
}

View file

@ -0,0 +1,6 @@
{
networking.firewall = {
allowedTCPPorts = [ 27036 27037 ];
allowedUDPPorts = [ 27031 27036 ];
};
}

View file

@ -0,0 +1,8 @@
{
services.tailscale = {
enable = true;
permitCertUid = "filippoberto95@gmail.com";
};
networking.firewall.checkReversePath = "loose";
}

View file

@ -0,0 +1,13 @@
{
users.users.bertof = {
isNormalUser = true;
extraGroups = [ "libvirtd" "kvm" "network" "networkmanager" "wheel" "tss" ];
openssh.authorizedKeys.keys = [
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAhxOjo9Ac9hVd3eOR56F6sClUMUh1m7VpcmzA18dslj bertof@odin"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO7mcf8fbMo1eXqSJeVFWaweB+JOU+67dFuf8laZKZZG bertof@thor"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKbG791lSOl8Rqoy+KkdKiOJnOMRg02+HZ/VrlrWMYAX bertof@baldur"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFviqAN0S+wZ5BQRpWpmsrkduPox3L4C7iLlCOQk7+pE bertof@loki"
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKobKuuJCoQ7gj/NeE57wfSg/Qs4X3osw9xXook3PMAP bertof@extra"
];
};
}