Nix fmt rfc style (more or less)
This commit is contained in:
parent
e7496c447a
commit
515f098644
146 changed files with 2607 additions and 906 deletions
|
|
@ -1,4 +1,9 @@
|
|||
{ config, lib, pkgs, modulesPath, ... }:
|
||||
{ config
|
||||
, lib
|
||||
, pkgs
|
||||
, modulesPath
|
||||
, ...
|
||||
}:
|
||||
let
|
||||
# setup_script = ''
|
||||
# sudo mkdir -p /hdfs
|
||||
|
|
@ -18,16 +23,18 @@ let
|
|||
# '';
|
||||
hadoop_keytab_path = "/etc/hadoop.keytab";
|
||||
spark_keytab_path = "/etc/spark.keytab";
|
||||
pysparkPackageSelector = p: with p; [ numpy pyspark ];
|
||||
pysparkPackageSelector =
|
||||
p: with p; [
|
||||
numpy
|
||||
pyspark
|
||||
];
|
||||
pysparkEnv = pkgs.python3.withPackages pysparkPackageSelector;
|
||||
hadoopConf = import (modulesPath + "/services/cluster/hadoop/conf.nix") {
|
||||
inherit pkgs lib;
|
||||
cfg = config.services.hadoop;
|
||||
};
|
||||
hadoopConfDir = "${hadoopConf}/";
|
||||
spark = pkgs.spark.override {
|
||||
extraPythonPackages = pysparkPackageSelector pkgs.python3.pkgs;
|
||||
};
|
||||
spark = pkgs.spark.override { extraPythonPackages = pysparkPackageSelector pkgs.python3.pkgs; };
|
||||
sparkConfDir = pkgs.stdenv.mkDerivation {
|
||||
name = "spark-conf";
|
||||
dontUnpack = true;
|
||||
|
|
@ -81,7 +88,13 @@ in
|
|||
{
|
||||
|
||||
networking = {
|
||||
hosts = { "127.0.0.1" = [ "ds.my.engine" "kdc.my.engine" "my.engine" ]; };
|
||||
hosts = {
|
||||
"127.0.0.1" = [
|
||||
"ds.my.engine"
|
||||
"kdc.my.engine"
|
||||
"my.engine"
|
||||
];
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
|
|
@ -136,14 +149,12 @@ in
|
|||
# NAME NODE SECURITY
|
||||
"dfs.namenode.keytab.file" = hadoop_keytab_path;
|
||||
"dfs.namenode.kerberos.principal" = "nn/my.engine@MY.ENGINE";
|
||||
"dfs.namenode.kerberos.internal.spnego.principal" =
|
||||
"HTTP/my.engine@MY.ENGINE";
|
||||
"dfs.namenode.kerberos.internal.spnego.principal" = "HTTP/my.engine@MY.ENGINE";
|
||||
|
||||
# SECONDARY NAME NODE SECURITY
|
||||
"dfs.secondary.namenode.keytab.file" = hadoop_keytab_path;
|
||||
"dfs.secondary.namenode.kerberos.principal" = "nn/my.engine@MY.ENGINE";
|
||||
"dfs.secondary.namenode.kerberos.internal.spnego.principal" =
|
||||
"HTTP/my.engine@MY.ENGINE";
|
||||
"dfs.secondary.namenode.kerberos.internal.spnego.principal" = "HTTP/my.engine@MY.ENGINE";
|
||||
|
||||
# DATA NODE SECURITY
|
||||
"dfs.datanode.keytab.file" = hadoop_keytab_path;
|
||||
|
|
@ -157,8 +168,7 @@ in
|
|||
"dfs.webhdfs.enabled" = "true";
|
||||
|
||||
# WEB AUTHENTICATION CONFIG
|
||||
"dfs.web.authentication.kerberos.principal" =
|
||||
"HTTP/my.engine@MY.ENGINE";
|
||||
"dfs.web.authentication.kerberos.principal" = "HTTP/my.engine@MY.ENGINE";
|
||||
"dfs.web.authentication.kerberos.keytab" = hadoop_keytab_path;
|
||||
"ignore.secure.ports.for.testing" = "true";
|
||||
"dfs.http.policy" = "HTTP_ONLY";
|
||||
|
|
@ -175,20 +185,15 @@ in
|
|||
yarnSite = {
|
||||
"yarn.nodemanager.admin-env" = "PATH=$PATH";
|
||||
"yarn.nodemanager.aux-services" = "mapreduce_shuffle";
|
||||
"yarn.nodemanager.aux-services.mapreduce_shuffle.class" =
|
||||
"org.apache.hadoop.mapred.ShuffleHandler";
|
||||
"yarn.nodemanager.aux-services.mapreduce_shuffle.class" = "org.apache.hadoop.mapred.ShuffleHandler";
|
||||
"yarn.nodemanager.bind-host" = "0.0.0.0";
|
||||
"yarn.nodemanager.container-executor.class" =
|
||||
"org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor";
|
||||
"yarn.nodemanager.env-whitelist" =
|
||||
"JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_HOME,LANG,TZ";
|
||||
"yarn.nodemanager.container-executor.class" = "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor";
|
||||
"yarn.nodemanager.env-whitelist" = "JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_HOME,LANG,TZ";
|
||||
"yarn.nodemanager.linux-container-executor.group" = "hadoop";
|
||||
"yarn.nodemanager.linux-container-executor.path" =
|
||||
"/run/wrappers/yarn-nodemanager/bin/container-executor";
|
||||
"yarn.nodemanager.linux-container-executor.path" = "/run/wrappers/yarn-nodemanager/bin/container-executor";
|
||||
"yarn.nodemanager.log-dirs" = "/var/log/hadoop/yarn/nodemanager";
|
||||
"yarn.resourcemanager.bind-host" = "0.0.0.0";
|
||||
"yarn.resourcemanager.scheduler.class" =
|
||||
"org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler";
|
||||
"yarn.resourcemanager.scheduler.class" = "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler";
|
||||
|
||||
"yarn.resourcemanager.keytab" = hadoop_keytab_path;
|
||||
"yarn.resourcemanager.principal" = "rm/my.engine@MY.ENGINE";
|
||||
|
|
@ -200,14 +205,12 @@ in
|
|||
"yarn.scheduler.capacity.root.queues" = "default";
|
||||
"yarn.scheduler.capacity.root.default.capacity" = 100;
|
||||
# "yarn.scheduler.capacity.root.default.state" = "RUNNING";
|
||||
"yarn.scheduler.capacity.root.acl_submit_applications" =
|
||||
"hadoop,yarn,mapred,hdfs";
|
||||
"yarn.scheduler.capacity.root.acl_submit_applications" = "hadoop,yarn,mapred,hdfs";
|
||||
};
|
||||
httpfsSite = {
|
||||
"kerberos.realm" = "MY.ENGINE";
|
||||
"httpfs.authentication.type" = "kerberos";
|
||||
"httpfs.authentication.kerberos.principal " =
|
||||
"HTTP/my.engine@MY.ENGINE";
|
||||
"httpfs.authentication.kerberos.principal " = "HTTP/my.engine@MY.ENGINE";
|
||||
"httpfs.authentication.kerberos.keytab" = hadoop_keytab_path;
|
||||
"httpfs.hadoop.kerberos.principal " = "HTTP/my.engine@MY.ENGINE";
|
||||
"httpfs.hadoop.kerberos.keytab" = hadoop_keytab_path;
|
||||
|
|
@ -312,10 +315,8 @@ in
|
|||
User = "spark";
|
||||
Group = "spark";
|
||||
WorkingDirectory = "${pkgs.spark}/lib/${pkgs.spark.untarDir}";
|
||||
ExecStart =
|
||||
"${pkgs.spark}/lib/${pkgs.spark.untarDir}/sbin/start-history-server.sh";
|
||||
ExecStop =
|
||||
"${pkgs.spark}/lib/${pkgs.spark.untarDir}/sbin/stop-history-server.sh";
|
||||
ExecStart = "${pkgs.spark}/lib/${pkgs.spark.untarDir}/sbin/start-history-server.sh";
|
||||
ExecStop = "${pkgs.spark}/lib/${pkgs.spark.untarDir}/sbin/stop-history-server.sh";
|
||||
TimeoutSec = 300;
|
||||
StartLimitBurst = 10;
|
||||
Restart = "always";
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue