diff options
Diffstat (limited to 'modules')
83 files changed, 7644 insertions, 0 deletions
diff --git a/modules/apps/cli.nix b/modules/apps/cli.nix new file mode 100644 index 0000000..00365d8 --- /dev/null +++ b/modules/apps/cli.nix @@ -0,0 +1,140 @@ +{ config, pkgs, lib, ...}: + +let + + inherit (builtins) + toString ; + inherit (lib) + concatMapStrings filterAttrs mapAttrsToList mkOption + types unique ; + inherit (types) + attrsOf path str submodule ; + + explicit = filterAttrs (n: v: n != "_module" && v != null); + apps = explicit config.nixsap.apps.cli; + + exec = name: { user, command, ... }: + let + uid = toString config.users.users.${user}.uid; + gid = uid; + src = pkgs.writeText "${name}.c" '' + #include <unistd.h> + #include <grp.h> + #include <pwd.h> + #include <stdio.h> + #include <stdlib.h> + #include <sys/types.h> + + int main (int __attribute__((unused)) argc, char *argv[]) + { + int rc; + + if (getuid() != ${uid}) { + if (geteuid() != 0) { + fprintf(stderr, "Forbidden.\n"); + return EXIT_FAILURE; + } + + rc = initgroups("${user}", ${gid}); + if (0 != rc) { + perror("initgroups()"); + return EXIT_FAILURE; + } + + rc = setgid(${gid}); + if (0 != rc) { + perror("setgid()"); + return EXIT_FAILURE; + } + + rc = setuid(${uid}); + if (0 != rc) { + perror("setuid()"); + return EXIT_FAILURE; + } + + if ((getuid() != ${uid}) || (geteuid() != ${uid})) { + fprintf(stderr, "Something went wrong.\n"); + return EXIT_FAILURE; + } + + struct passwd * pw = getpwuid(${uid}); + if (NULL == pw) { + perror("getpwuid()"); + return EXIT_FAILURE; + } + + if (NULL != pw->pw_dir) { + rc = chdir(pw->pw_dir); + if (0 != rc) { + rc = chdir("/"); + } + } else { + rc = chdir("/"); + } + if (0 != rc) { + perror("chdir()"); + return EXIT_FAILURE; + } + } + + argv[0] = "${command}"; + execv(argv[0], argv); + + perror("execv()"); + return EXIT_FAILURE; + } + ''; + in pkgs.runCommand name {} "gcc -Wall -Wextra -Werror -std=gnu99 -O2 ${src} -o $out"; + + cliapp = submodule({name, ...}: + { + options = { + user = mkOption { + description = '' + User (and group) to run as. Only users in this group can execute + this application. + ''; + type = str; + default = name; + }; + command = mkOption { + description = "Path to executable"; + type = path; + }; + }; + }); + +in { + options.nixsap = { + apps.cli = mkOption { + description = '' + Command line applications that should run as other users and likely + have special privileges, e. g. to access secret keys. This is + implemented with setuid-wrappers. Each wrapper is launched as root, + immediately switches to specified user, then executes something + useful. This is like sudo, but access is controlled via wrapper's + group: only users in wrapper's group can execute the wrapper. + + Starting as set-uid-non-root is not sufficient, because we might + need supplementary groups, like "keys". + ''; + type = attrsOf cliapp; + default = {}; + }; + }; + + config = { + nixsap.system.users.daemons = unique (mapAttrsToList (_: a: a.user) apps); + security.setuidOwners = mapAttrsToList (n: a: + { program = n; + owner = "root"; + group = a.user; + setuid = true; + setgid = false; + permissions = "u+rx,g+x,o="; + source = exec n a; + }) apps; + }; +} + diff --git a/modules/apps/default.nix b/modules/apps/default.nix new file mode 100644 index 0000000..240d970 --- /dev/null +++ b/modules/apps/default.nix @@ -0,0 +1,11 @@ +{lib, ... }: + +let + all = lib.filterAttrs + ( n: _: n != "default.nix" && ! lib.hasPrefix "." n ) + (builtins.readDir ./.); + +in { + imports = map (p: ./. + "/${p}") ( builtins.attrNames all ); +} + diff --git a/modules/apps/filebackup.nix b/modules/apps/filebackup.nix new file mode 100644 index 0000000..4aee0a9 --- /dev/null +++ b/modules/apps/filebackup.nix @@ -0,0 +1,282 @@ +{ config, pkgs, lib, ... }: +let + + inherit (builtins) + isBool isList isString toString ; + inherit (lib) + concatMapStringsSep concatStringsSep filter filterAttrs + flatten hasPrefix mapAttrsToList mkIf + mkOption optionalString removeSuffix ; + inherit (lib.types) + attrsOf bool either enum int listOf nullOr path str submodule ; + + cfg = config.nixsap.apps.filebackup; + privateDir = "/run/filebackup"; + + s3cmd = "${pkgs.s3cmd}/bin/s3cmd ${optionalString (cfg.s3cfg != null) "-c '${cfg.s3cfg}'"}"; + + gpgPubKeys = flatten [ cfg.encrypt ]; + gpg = "${pkgs.gpg}/bin/gpg2"; + pubring = pkgs.runCommand "pubring.gpg" {} '' + ${gpg} --homedir . --import ${toString gpgPubKeys} + cp pubring.gpg $out + ''; + + default = d: t: mkOption { type = t; default = d; }; + optional = type: mkOption { type = nullOr type; default = null; }; + sub = options: submodule { inherit options; } ; + mandatory = type: mkOption { inherit type; }; + concatMapAttrsSep = s: f: attrs: concatStringsSep s (mapAttrsToList f attrs); + + command = sub + { + absolute-names = optional bool; + exclude = optional (either str (listOf str)); + exclude-from = optional path; + exclude-vcs = optional bool; + exclude-vcs-ignores = optional bool; + group = optional str; + ignore-case = optional bool; + mode = optional str; + owner = optional str; + path = mandatory (either path (listOf path)); + }; + + job = name: o: + let + args = filterAttrs (k: v: + v != null && k != "_module" + && ( k != "path" ) + ) o; + + mkArg = k: v: + if isBool v then (optionalString v "--${k}") + else if isList v then concatMapStringsSep " " (i: "--${k}='${i}'") v + else if isString v then "--${k}='${v}'" + else "--${k}=${toString v}" ; + + tar = pkgs.writeBashScript "tar-${name}" '' + exec ${pkgs.gnutar}/bin/tar -c -f- \ + ${concatMapAttrsSep " " mkArg args} \ + "$@" + ''; + + in pkgs.writeBashScript "tar-${name}-job" '' + set -euo pipefail + cd "${cfg.tarballDir}/$DATE" + host=$(${pkgs.nettools}/bin/hostname -f) + + tarball="${name}@$host,$DATE.tar.xz" + ${ + if (gpgPubKeys != []) then + ''aim="$tarball.gpg"'' + else + ''aim="$tarball"'' + } + + if ! [ -r "$aim" ]; then + ${tar} ${concatMapStringsSep " " (p: "'${p}'") (flatten [o.path])} \ + | ${pkgs.pxz}/bin/pxz -2 -T2 > "$tarball.tmp" + mv "$tarball".tmp "$tarball" + + ${optionalString (gpgPubKeys != []) '' + recipient=( $(${gpg} --homedir '${privateDir}/gnupg' -k --with-colons --fast-list-mode | \ + ${pkgs.gawk}/bin/awk -F: '/^pub/{print $5}') ) + r=( "''${recipient[@]/#/-r}" ) + ${gpg} --homedir '${privateDir}/gnupg' --batch --no-tty --yes \ + "''${r[@]}" --trust-model always \ + --compress-algo none \ + -v -e "$tarball" + rm -f "$tarball" + ''} + else + echo "$aim exists. Not creating." >&2 + fi + ${optionalString (cfg.s3uri != null) '' + remote="${removeSuffix "/" cfg.s3uri}/$DATE/$aim" + if ! ${s3cmd} ls "$remote" | ${pkgs.gnugrep}/bin/grep -qF "/$aim"; then + ${s3cmd} put "$aim" "$remote" + else + echo "$remote exists. Not uploading." >&2 + fi + ''} + ''; + + preStart = '' + mkdir --mode=0750 -p '${cfg.tarballDir}' + chown -R ${cfg.user}:${cfg.user} '${cfg.tarballDir}' + chmod -R u=rwX,g=rX,o= ${cfg.tarballDir} + + rm -rf '${privateDir}' + mkdir --mode=0700 -p '${privateDir}' + chown ${cfg.user}:${cfg.user} '${privateDir}' + ''; + + main = pkgs.writeBashScriptBin "filebackup" '' + set -euo pipefail + umask 0027 + DATE=$(date --iso-8601) + HOME='${privateDir}' + PARALLEL_SHELL=${pkgs.bash}/bin/bash + export DATE + export HOME + export PARALLEL_SHELL + + clean() { + ${pkgs.findutils}/bin/find '${cfg.tarballDir}' \ + -name '*.tmp' -exec rm -rf {} + || true + } + + listSets() { + ${pkgs.findutils}/bin/find '${cfg.tarballDir}' \ + -maxdepth 1 -mindepth 1 -type d -name '????-??-??' \ + | sort -V + } + + enoughStorage() { + local n + local used + local total + local avg + local p + n=$(listSets | wc -l) + used=$(du -x -s --block-size=1M '${cfg.tarballDir}' | cut -f1) + total=$(df --output=size --block-size=1M '${cfg.tarballDir}' | tail -n 1) + if [ "$n" -eq 0 ]; then + echo "no sets" >&2 + return 0 + fi + + avg=$(( used / n )) + p=$(( 100 * avg * (n + 1) / total )) + printf "estimated storage: %d of %d MiB (%d%%, max ${toString cfg.storage}%%)\n" \ + "$((used + avg))" "$total" "$p" >&2 + if [ "$p" -le ${toString cfg.storage} ]; then + return 0 + else + return 1 + fi + } + + clean + + listSets | head -n -${toString (cfg.slots - 1)} \ + | ${pkgs.findutils}/bin/xargs --no-run-if-empty rm -rfv \ + || true + + while ! enoughStorage; do + listSets | head -n 1 \ + | ${pkgs.findutils}/bin/xargs --no-run-if-empty rm -rfv \ + || true + done + + mkdir -p "${cfg.tarballDir}/$DATE" + + ${optionalString (gpgPubKeys != []) '' + # shellcheck disable=SC2174 + mkdir --mode=0700 -p '${privateDir}/gnupg' + ln -sf ${pubring} '${privateDir}/gnupg/pubring.gpg' + ''} + + failed=0 + log="${cfg.tarballDir}/$DATE/joblog.txt" + + # shellcheck disable=SC2016 + ${pkgs.parallel}/bin/parallel \ + --halt-on-error 0 \ + --joblog "$log" \ + --jobs 50% \ + --line-buffer \ + --no-notice \ + --no-run-if-empty \ + --retries 2 \ + --rpl '{nixbase} s:^/nix/store/[^-]+-tar-(.+)-job$:$1:' \ + --tagstr '* {nixbase}:' \ + --timeout ${toString (6 * 60 * 60)} ::: \ + ${concatMapAttrsSep " " job cfg.files} \ + || failed=$? + + cat "$log" + clean + + du -sh "${cfg.tarballDir}/$DATE" || true + exit "$failed" + ''; + + keys = filter (f: f != null && hasPrefix "/run/keys/" f) ( [cfg.s3cfg] ); + +in { + options.nixsap.apps.filebackup = { + user = mkOption { + description = "User to run as"; + default = "filebackup"; + type = str; + }; + + tarballDir = mkOption { + description = "Directory to save tarballs in"; + default = "/filebackup"; + type = path; + }; + + slots = mkOption { + description = '' + How many backup sets should be kept locally. + However, old sets will be removed anyway if storage + constraints apply. + ''; + default = 60; + type = int; + }; + + storage = mkOption { + description = '' + Percent of storage backups can occupy. + ''; + default = 75; + type = int; + }; + + encrypt = mkOption { + description = "Public GPG key(s) for encrypting the dumps"; + default = [ ]; + type = either path (listOf path); + }; + + s3cfg = mkOption { + description = "s3cmd config file (secret)"; + type = nullOr path; + default = null; + }; + + s3uri = mkOption { + description = "S3 bucket URI with prefix in s3cmd format"; + type = nullOr str; + default = null; + example = "s3://backups/nightly"; + }; + + files = mkOption { + description = "tar commands"; + default = {}; + type = attrsOf command; + }; + }; + + config = mkIf (cfg.files != {}) { + nixsap.system.users.daemons = [ cfg.user ]; + nixsap.deployment.keyrings.${cfg.user} = keys; + systemd.services.filebackup = { + description = "Directory backup with tar"; + after = [ "local-fs.target" "keys.target" ]; + wants = [ "keys.target" ]; + startAt = "02:00"; + inherit preStart; + serviceConfig = { + ExecStart = "${main}/bin/filebackup"; + User = cfg.user; + PermissionsStartOnly = true; + }; + }; + }; +} diff --git a/modules/apps/icinga2.nix b/modules/apps/icinga2.nix new file mode 100644 index 0000000..6df18fc --- /dev/null +++ b/modules/apps/icinga2.nix @@ -0,0 +1,375 @@ +{ config, pkgs, lib, ... }: + +let + inherit (builtins) + attrNames dirOf toString ; + inherit (lib) + concatMapStringsSep mapAttrsToList concatStringsSep filter hasPrefix + isString mkEnableOption mkIf mkOption optionalString types ; + inherit (types) + attrsOf bool either enum int listOf path str ; + + environment = { + SSL_CERT_FILE = "/etc/ssl/certs/ca-bundle.crt"; + }; + + cfg = config.nixsap.apps.icinga2; + rundir = "/run/icinga2"; + pidFile = "${rundir}/icinga2.pid"; + + mutableDir = "mutable.d"; + mutableTmpDir = "mutable.tmp.d"; + mutablePath = "${cfg.stateDir}/etc/icinga2/${mutableDir}"; + mutableTmpPath = "${cfg.stateDir}/etc/icinga2/${mutableTmpDir}"; + mutableRestart = "${mutablePath}/restart"; + + icingaMutableUpdate = + let + job = n: j: pkgs.writeBashScript "icinga-mutable-${n}" '' + set -euo pipefail + f='${mutableTmpPath}/${n}.conf' + ${j} > "$f.tmp" + mv -f "$f.tmp" "$f" + ''; + in pkgs.writeBashScript "icinga-mutable-update" '' + set -euo pipefail + + rm -rf ${mutableTmpPath} + mkdir -p ${mutableTmpPath} + + HOME=${rundir} + PARALLEL_SHELL=${pkgs.bash}/bin/bash + export PARALLEL_SHELL + + # shellcheck disable=SC2016 + ${pkgs.parallel}/bin/parallel \ + --delay 2 \ + --halt-on-error 0 \ + --line-buffer \ + --no-notice \ + --no-run-if-empty \ + --rpl '{name} s:^.*-icinga-mutable-(.+)$:$1:' \ + --timeout 120 \ + --tagstr '* {name}:' \ + ::: \ + ${concatStringsSep " " ( + mapAttrsToList job cfg.mutable.conf + )} \ + || exit 1 # WARNING + + old=$(${pkgs.nix}/bin/nix-hash --type sha1 '${mutablePath}') + new=$(${pkgs.nix}/bin/nix-hash --type sha1 '${mutableTmpPath}') + if [ "$old" != "$new" ]; then + ${pkgs.gnused}/bin/sed 's,${mutablePath},${mutableTmpPath},' \ + ${icingaConf} > \ + ${cfg.stateDir}/etc/icinga2/icinga2.tmp.conf + if ! ${pkgs.icinga2}/bin/icinga2 daemon -C -x critical -c ${cfg.stateDir}/etc/icinga2/icinga2.tmp.conf; then + exit 2 # CRITICAL + fi + rm -f ${cfg.stateDir}/etc/icinga2/icinga2.tmp.conf + rm -rf ${mutablePath}.bak + mv -f ${mutablePath} ${mutablePath}.bak + mv -f ${mutableTmpPath} ${mutablePath} + rm -rf ${mutablePath}.bak + if [ -f ${pidFile} ]; then + pid=$(cat ${pidFile}) + if ${pkgs.coreutils}/bin/kill -0 "$pid"; then + touch ${mutableRestart} + ${pkgs.coreutils}/bin/kill -HUP "$pid" + echo "Restart: $old -> $new" + fi + fi + else + echo "No changes: $old" + fi + ''; + + icingaMutableCheckCommand = pkgs.writeText "icinga-${cfg.mutable.checkCommand}.conf" '' + object CheckCommand "${cfg.mutable.checkCommand}" { + import "plugin-check-command" + command = [ "${icingaMutableUpdate}" ] + } + ''; + + icingaConf = pkgs.writeText "icinga2.conf" + '' + const PluginDir = "${pkgs.monitoringPlugins}/libexec" + const RunAsGroup = "${cfg.user}" + const RunAsUser = "${cfg.user}" + + include <itl> + include <plugins> + + object Endpoint NodeName { + host = NodeName + } + object Zone NodeName { + endpoints = [ NodeName ] + } + + include "${cfg.stateDir}/etc/icinga2/features-enabled/*.conf" + include "${cfg.stateDir}/etc/icinga2/conf.d/*.conf" + include_recursive "${cfg.stateDir}/etc/icinga2/repository.d" + include "${mutablePath}/*.conf" + + ${concatMapStringsSep "\n" (f: + if hasPrefix "/" f + then ''include "${f}"'' + else ''include "${pkgs.writeText "icinga2.inc.conf" f}"'' + ) cfg.configFiles} + ''; + + console = pkgs.writeBashScriptBin "icinga2console" '' + if [ -z "$ICINGA2_API_USERNAME" ] && [ -r ${cfg.stateDir}/etc/icinga2/conf.d/api-users.conf ]; then + pwd=$(${pkgs.gnused}/bin/sed -rn 's,.*password\s*=\s*"(.+)".*,\1,p' ${cfg.stateDir}/etc/icinga2/conf.d/api-users.conf) + export ICINGA2_API_USERNAME=root + export ICINGA2_API_PASSWORD="$pwd" + fi + exec ${pkgs.icinga2}/bin/icinga2 console --connect 'https://localhost/' "$@" + ''; + + configureMySQL = pkgs.writeBashScript "icinga2-mysql" '' + set -euo pipefail + nconn=$(icinga2console --eval 'len(get_objects(IdoMysqlConnection))') + nconn=''${nconn%.*} # float to int + if [ "$nconn" -eq 0 ]; then + exit + fi + for i in $( seq 0 $(( nconn - 1 )) ); do + db=$(icinga2console --eval "get_objects(IdoMysqlConnection)[$i].database") + host=$(icinga2console --eval "get_objects(IdoMysqlConnection)[$i].host") + port=$(icinga2console --eval "get_objects(IdoMysqlConnection)[$i].port") + pwd=$(icinga2console --eval "get_objects(IdoMysqlConnection)[$i].password") + user=$(icinga2console --eval "get_objects(IdoMysqlConnection)[$i].user") + + # XXX Removing quotes: + db=''${db%\"} ; db=''${db#\"} + host=''${host%\"} ; host=''${host#\"} + pwd=''${pwd%\"} ; pwd=''${pwd#\"} + user=''${user%\"} ; user=''${user#\"} + port=''${port%.*} + mysql=(${pkgs.mysql}/bin/mysql --no-defaults "-h$host" "-P$port" "-u$user" "--password=$pwd") + while ! "''${mysql[@]}" -e ';'; do + sleep 20s + done + tt=$("''${mysql[@]}" -N -e "SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = '$db';") + # TODO: Migrations: + if [ "$tt" -eq 0 ]; then + "''${mysql[@]}" -v "$db" < ${pkgs.icinga2}/share/icinga2-ido-mysql/schema/mysql.sql + fi + done + ''; + + configureDBs = pkgs.writeBashScriptBin "icinga2db" '' + set -eu + while ! icinga2console -e '"connected to icinga"'; do + sleep 30s + done + ${configureMySQL} + ''; + + preStart = '' + umask 0077 + mkdir -p \ + ${cfg.stateDir}/cache/icinga2 \ + ${cfg.stateDir}/lib/icinga2/api/log \ + ${cfg.stateDir}/lib/icinga2/api/repository \ + ${cfg.stateDir}/lib/icinga2/api/zones \ + ${cfg.stateDir}/log/icinga2/compat/archives \ + ${cfg.stateDir}/log/icinga2/crash \ + ${cfg.stateDir}/spool/icinga2/perfdata \ + ${cfg.stateDir}/spool/icinga2/tmp + + ${pkgs.findutils}/bin/find \ + ${cfg.stateDir}/etc/icinga2 \ + -mindepth 1 -maxdepth 1 \ + -not -name ${mutableDir} \ + -not -name pki \ + -not -name repository.d \ + -exec rm -rf '{}' \; || true + + mkdir -p \ + ${cfg.stateDir}/etc/icinga2/conf.d \ + ${mutablePath} \ + ${cfg.stateDir}/etc/icinga2/repository.d \ + ${cfg.stateDir}/etc/icinga2/features-enabled + ln -sf ${pkgs.icinga2}${cfg.stateDir}/etc/icinga2/features-available \ + ${cfg.stateDir}/etc/icinga2/features-available + ln -sf ${pkgs.icinga2}${cfg.stateDir}/etc/icinga2/scripts \ + ${cfg.stateDir}/etc/icinga2/scripts + + # XXX Can't include in the main file due to infinite recursion + ln -sf ${icingaMutableCheckCommand} \ + ${cfg.stateDir}/etc/icinga2/conf.d/${cfg.mutable.checkCommand}.conf + + # XXX: requires root (!?) + ${pkgs.icinga2}/bin/icinga2 api setup + ${pkgs.icinga2}/bin/icinga2 feature enable checker + ${pkgs.icinga2}/bin/icinga2 feature enable command + ${pkgs.icinga2}/bin/icinga2 feature enable livestatus + + ${optionalString cfg.notifications '' + ${pkgs.icinga2}/bin/icinga2 feature enable notification + ''} + + rm -rf ${rundir} + mkdir --mode=0755 -p ${rundir} + mkdir --mode=2710 -p ${dirOf cfg.commandPipe} + mkdir --mode=2710 -p ${dirOf cfg.livestatusSocket} + chown -R ${cfg.user}:${cfg.user} ${rundir} + chown -Rc ${cfg.user}:${cfg.user} ${cfg.stateDir} + chmod -R u=rwX,g=rX,o= ${cfg.stateDir} + chown ${cfg.user}:${cfg.commandGroup} ${dirOf cfg.commandPipe} + chown ${cfg.user}:${cfg.commandGroup} ${dirOf cfg.livestatusSocket} + ''; + + ExecStart = pkgs.writeBashScript "icinga2-start" '' + set -euo pipefail + + umask 0077 + + printf 'const TicketSalt = "%s"\n' "$(${pkgs.pwgen}/bin/pwgen -1 -s 23)" \ + > ${cfg.stateDir}/etc/icinga2/conf.d/ticketsalt.conf + + if [ -e ${mutableRestart} ]; then + rm ${mutableRestart} + else + ${icingaMutableUpdate} || true + if ! ${pkgs.icinga2}/bin/icinga2 daemon -C -x critical -c ${icingaConf}; then + rm -rf ${mutablePath} + mkdir -p ${mutablePath} + fi + fi + + exec ${pkgs.icinga2}/bin/icinga2 daemon -x ${cfg.logLevel} -c ${icingaConf} + ''; + +in { + + options.nixsap = { + apps.icinga2 = { + enable = mkEnableOption "icinga2"; + + logLevel = mkOption { + description = "Icinga2 daemon log level"; + type = enum [ "debug" "notice" "information" "warning" "critical" ]; + default = "information"; + }; + + notifications = mkOption { + description = "Enable notifications"; + type = bool; + default = false; + }; + + configFiles = mkOption { + description = '' + Configuration files or inline text + to be included in the main file''; + type = listOf (either str path); + }; + + mutable.conf = mkOption { + description = '' + A set of executables to write mutable config files. + ''; + type = attrsOf path; + default = {}; + }; + mutable.checkCommand = mkOption { + description = '' + Name of the mutable check command. You may need to alter this + only in an unlikely case of conflict with your custom commands. + Mutable files are updated every time icinga2 restart. If you want + better control and observability on this, create a service with + this check command. If exists, this service will make icinga2 + restart when mutable files change (and pass syntax check) via + sending the HUP signal to the main icinga2 process. + ''; + type = str; + default = "mutable-conf-refresh"; + }; + + # these are hard-coded into icinga2 package: + user = mkOption { + type = types.str; + description = "User to run as"; + default = "icinga"; + readOnly = true; + }; + + commandGroup = mkOption { + type = types.str; + description = "Dedicated command group for command pipe and livestatus"; + default = "icingacmd"; + readOnly = true; + }; + + stateDir = mkOption { + type = types.path; + description = "Icinga2 logs, state, config files"; + default = "/icinga2"; + readOnly = true; + }; + + commandPipe = mkOption { + type = types.path; + description = "Icinga2 command pipe"; + default = "${rundir}/cmd/icinga2.cmd"; + readOnly = true; + }; + + livestatusSocket = mkOption { + type = types.path; + description = "Icinga2 Livestatus socket"; + default = "${rundir}/cmd/livestatus"; + readOnly = true; + }; + }; + }; + + config = mkIf cfg.enable { + environment.systemPackages = [ console ]; + nixsap.apps.icinga2.configFiles = [ + "${pkgs.icinga2}/icinga2/etc/icinga2/conf.d/app.conf" + "${pkgs.icinga2}/icinga2/etc/icinga2/conf.d/commands.conf" + "${pkgs.icinga2}/icinga2/etc/icinga2/conf.d/notifications.conf" + "${pkgs.icinga2}/icinga2/etc/icinga2/conf.d/templates.conf" + "${pkgs.icinga2}/icinga2/etc/icinga2/conf.d/timeperiods.conf" + ]; + nixsap.system.users.daemons = [ cfg.user ]; + nixsap.system.groups = [ cfg.commandGroup ]; + nixsap.deployment.keyrings.${cfg.user} = filter (hasPrefix "/run/keys/") cfg.configFiles; + users.users.${cfg.user}.extraGroups = [ "proc" ]; + systemd.services.icinga2 = { + description = "Icinga2 daemon"; + after = [ "local-fs.target" "keys.target" "network.target" ]; + wants = [ "keys.target" ]; + wantedBy = [ "multi-user.target" ]; + inherit environment preStart; + serviceConfig = { + inherit ExecStart; + KillMode = "mixed"; + PermissionsStartOnly = true; + Restart = "always"; + TimeoutSec = 600; + User = cfg.user; + }; + }; + + systemd.services.icinga2db = { + description = "Icinga2 databases configurator"; + after = [ "icinga2.service" ]; + wantedBy = [ "multi-user.target" ]; + path = [ console ]; + inherit environment; + serviceConfig = { + ExecStart = "${configureDBs}/bin/icinga2db"; + User = cfg.user; + RemainAfterExit = true; + Restart = "on-failure"; + }; + }; + }; +} + diff --git a/modules/apps/icingaweb2.nix b/modules/apps/icingaweb2.nix new file mode 100644 index 0000000..ed52f86 --- /dev/null +++ b/modules/apps/icingaweb2.nix @@ -0,0 +1,398 @@ +{ config, pkgs, lib, ... }: + +let + + inherit (lib) types + mkIf mkOption mkEnableOption mkDefault hasPrefix + concatMapStringsSep filterAttrs recursiveUpdate mapAttrsToList + concatStringsSep isString filter genAttrs attrNames + optionalString mkOptionType any; + inherit (types) + bool str int lines path either + nullOr attrsOf listOf enum submodule unspecified; + inherit (builtins) toString; + + localIcinga = config.nixsap.apps.icinga2.enable; + + cfg = config.nixsap.apps.icingaweb2; + + attrs = opts: submodule { options = opts; }; + mandatory = t: mkOption { type = t; }; + optional = t: mkOption { type = nullOr t; default = null; }; + default = d: t: mkOption { type = t; default = d; }; + explicit = filterAttrs (n: v: n != "_module" && v != null); + show = v: optionalString (v != null) (toString v); + + permission = + let + allowed = + [ + "config/authentication/groups" + "config/authentication/roles/show" + "config/authentication/users" + "module" + "monitoring/command" + ]; + in mkOptionType { + name = "string starting with one of ${concatMapStringsSep ", " (s: ''"${s}"'') allowed}"; + check = x: isString x && any (p: hasPrefix p x) allowed; + }; + + role = attrs { + users = default [] (listOf str); + groups = default [] (listOf str); + permissions = mandatory (listOf permission); + objects = mandatory str; + }; + + database = attrs { + db = mandatory str; + host = mandatory str; + passfile = optional path; + port = optional int; + type = mandatory (enum [ "mysql" ]); + user = mandatory str; + }; + + configIni = pkgs.writeText "config.ini" '' + [global] + show_stacktraces = "${if cfg.stacktrace then "1" else "0"}" + config_backend = "db" + config_resource = "icingaweb2db" + + [logging] + level = "${cfg.logLevel}" + ${if cfg.log == "syslog" then '' + log = "syslog" + application = "icingaweb2" + '' else '' + log = "file" + file = "${cfg.log}" + '' + } + ''; + + # XXX Livestatus is not supported by IcingaWeb2 (2.1.0) + # https://dev.icinga.org/issues/8254 + # "We'll postpone this issue because Icinga 2.4 will introduce + # an API for querying monitoring data. Maybe we drop support + # for Livestatus completely" + modules.monitoring.backendsIni = pkgs.writeText "backends.ini" '' + [icinga2] + type = "ido" + resource = "icinga2db" + ''; + + modules.monitoring.configIni = pkgs.writeText "config.ini" '' + [security] + protected_customvars = "${concatStringsSep "," cfg.protectedCustomVars}" + ''; + + modules.monitoring.commandtransportsIni = pkgs.writeText "commandtransports.ini" '' + ${optionalString localIcinga '' + [local] + transport = "local" + path = "${config.nixsap.apps.icinga2.commandPipe}" + '' + } + ''; + + groupsIni = pkgs.writeText "groups.ini" ( + optionalString (cfg.authentication == "database") '' + [database] + backend = "db" + resource = "icingaweb2db" + '' + ); + + authenticationIni = pkgs.writeText "authentication.ini" ( + if cfg.authentication == "sproxy" then '' + [sproxy] + backend = "sproxy" + '' else '' + [database] + backend = "db" + resource = "icingaweb2db" + '' + ); + + rolesIni = pkgs.writeText "roles.ini" '' + [root] + users = "root" + permissions = "config/authentication/roles/show, config/authentication/users/*, config/authentication/groups/*, module/*, monitoring/command/*" + + ${ + concatStringsSep "\n\n" ( + mapAttrsToList (n: s: '' + [${n}] + users = "${concatStringsSep ", " s.users}" + groups = "${concatStringsSep ", " s.groups}" + permissions = "${concatStringsSep ", " s.permissions}" + ${optionalString (s.objects != null) '' + monitoring/filter/objects = "${s.objects}" + ''} + '') (explicit cfg.roles) + ) + } + ''; + + mkResource = name: opts: + let + mkDB = '' + cat <<'__EOF__' + + [${name}] + type = "db" + db = "${opts.type}" + dbname = "${opts.db}" + host = "${opts.host}" + port = "${show opts.port}" + username = "${opts.user}" + __EOF__ + ${optionalString (opts.passfile != null) '' + pwd=$(cat '${opts.passfile}') + printf 'password="%s"\n' "$pwd" + ''} + ''; + in if opts.type == "mysql" then mkDB + else ""; + + genResourcesIni = pkgs.writeBashScript "resources" (concatStringsSep "\n" ( + mapAttrsToList mkResource (explicit cfg.resources) + )); + + defaultPool = { + listen.owner = config.nixsap.apps.nginx.user; + pm.max_children = 10; + pm.max_requests = 1000; + pm.max_spare_servers = 5; + pm.min_spare_servers = 3; + pm.strategy = "dynamic"; + }; + + configureFiles = '' + set -euo pipefail + umask 0277 + mkdir -p '${cfg.configDir}' + ${pkgs.findutils}/bin/find \ + ${cfg.configDir} \ + -mindepth 1 -maxdepth 1 \ + -not -name dashboards \ + -not -name preferences \ + -exec rm -rf '{}' \; || true + + mkdir -p '${cfg.configDir}/dashboards' + mkdir -p '${cfg.configDir}/preferences' + mkdir -p '${cfg.configDir}/enabledModules' + mkdir -p '${cfg.configDir}/modules/monitoring' + + ln -sf '${pkgs.icingaweb2}/modules/monitoring' '${cfg.configDir}/enabledModules/monitoring' + ln -sf '${pkgs.icingaweb2}/modules/translation' '${cfg.configDir}/enabledModules/translation' + ${genResourcesIni} > '${cfg.configDir}/resources.ini' + ln -sf '${authenticationIni}' '${cfg.configDir}/authentication.ini' + ln -sf '${configIni}' '${cfg.configDir}/config.ini' + ln -sf '${groupsIni}' '${cfg.configDir}/groups.ini' + ln -sf '${rolesIni}' '${cfg.configDir}/roles.ini' + + ln -sf '${modules.monitoring.backendsIni}' \ + '${cfg.configDir}/modules/monitoring/backends.ini' + + ln -sf '${modules.monitoring.configIni}' \ + '${cfg.configDir}/modules/monitoring/config.ini' + + ln -sf '${modules.monitoring.commandtransportsIni}' \ + '${cfg.configDir}/modules/monitoring/commandtransports.ini' + + chmod u=rX,g=,o= '${cfg.configDir}' + chmod -R u=rwX,g=,o= '${cfg.configDir}/dashboards' + chmod -R u=rwX,g=,o= '${cfg.configDir}/preferences' + chown -R icingaweb2:icingaweb2 '${cfg.configDir}' + ''; + + configureDB = with cfg.resources.icingaweb2db; + let + mkMyCnf = pkgs.writeBashScript "my.cnf.sh" '' + cat <<'__EOF__' + [client] + host = ${host} + ${optionalString (port != null) "port = ${toString port}"} + user = ${user} + __EOF__ + ${optionalString (passfile != null) '' + pwd=$(cat '${passfile}') + printf 'password = %s\n' "$pwd" + ''} + ''; + in pkgs.writeBashScript "configureDB" '' + set -euo pipefail + cnf=$(mktemp) + trap 'rm -f "$cnf"' EXIT + chmod 0600 "$cnf" + ${mkMyCnf} > "$cnf" + #shellcheck disable=SC2016 + while ! mysql --defaults-file="$cnf" -e 'CREATE DATABASE IF NOT EXISTS `${db}`'; do + sleep 5s + done + tt=$(mysql --defaults-file="$cnf" -N -e 'SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = "${db}"') + if [ "$tt" -eq 0 ]; then + mysql --defaults-file="$cnf" -v '${db}' < '${pkgs.icingaweb2}/etc/schema/mysql.schema.sql' + ${optionalString (cfg.initialRootPasswordHash != "") '' + #shellcheck disable=SC2016 + mysql --defaults-file="$cnf" -e \ + 'INSERT INTO icingaweb_user (name, active, password_hash) VALUES ("root", 1, "${cfg.initialRootPasswordHash}")' '${db}' + '' + } + fi + ''; + + keys = filter (p: p != null && hasPrefix "/run/keys/" p) + [ cfg.resources.icingaweb2db.passfile + cfg.resources.icinga2db.passfile ]; + +in { + + options.nixsap.apps.icingaweb2 = { + enable = mkEnableOption "Icinga Web 2"; + user = mkOption { + description = '' + The user the PHP-FPM pool runs as. And the owner of files. + ''; + default = "icingaweb2"; + type = str; + }; + nginxServer = mkOption { + type = lines; + default = ""; + example = '' + listen 8080; + server_name icinga.example.net; + ''; + }; + configDir = mkOption { + description = "Where to put config files. This directory will be created if does not exist."; + type = path; + default = "/icingaweb2"; + }; + fpmPool = mkOption { + description = "Options for the PHP FPM pool"; + type = attrsOf unspecified; + default = {}; + }; + + resources = mkOption { + description = "Composes resources.ini"; + type = attrs { + icingaweb2db = mkOption { + description = "Database for Icinga Web 2 settings"; + type = database; + }; + icinga2db = mkOption { + description = "Icinga2 database (read-only)"; + type = database; + }; + }; + }; + + authentication = mkOption { + description = '' + Authentication backend: either IcingaWeb2 database or Sproxy. + ''; + type = enum [ "sproxy" "database" ]; + default = "database"; + }; + + protectedCustomVars = mkOption { + description = '' + Icinga2 custom variables to be masked in WebUI. + This can used for example to hide passwords. Wildcard are allowed. + ''; + type = listOf str; + default = [ "*pass*" "*pw*" "community" "http*auth_pair" ]; + }; + + roles = mkOption { + description = "Composes roles.ini"; + type = attrsOf role; + default = {}; + example = { + devops = { + groups = [ "devops" ]; + permissions = [ "module/*" "monitoring/command/*" ]; + objects = "*"; + }; + all = { + groups = [ "all" ]; + permissions = [ "module/*" ]; + objects = "hostgroup_name=Shops"; + }; + }; + }; + + initialRootPasswordHash = mkOption { + description = '' + Initial root password for icingaweb2db. + Use <literal>openssl passwd -1 mysecret</literal> + to generate this hash. It is used only when database + does not exist. So you may choose not to keep/commit + this hash at all. You better change the root password + after the first login. + ''; + type = str; + default = ""; + }; + + stacktrace = mkOption { + description = "whether to show PHP stacktraces"; + type = bool; + default = false; + }; + log = mkOption { + type = either path (enum [ "syslog" ]); + default = "syslog"; + }; + logLevel = mkOption { + type = enum [ "INFO" "WARNING" "ERROR" "CRITICAL" "DEBUG" ]; + default = "WARNING"; + }; + }; + + config = mkIf cfg.enable { + nixsap.deployment.keyrings.root = keys; + users.users.icingaweb2.extraGroups = mkIf localIcinga [ config.nixsap.apps.icinga2.commandGroup ]; + nixsap.apps.php-fpm.icingaweb2.pool = + recursiveUpdate defaultPool (cfg.fpmPool // { user = cfg.user ;}); + + nixsap.apps.nginx.http.servers.icingaweb2 = '' + ${cfg.nginxServer} + + root ${pkgs.icingaweb2}/public; + index index.php; + try_files $1 $uri $uri/ /index.php$is_args$args; + + location ~ ^/index\.php(.*)$ { + fastcgi_pass unix:${config.nixsap.apps.php-fpm.icingaweb2.pool.listen.socket}; + fastcgi_index index.php; + include ${pkgs.nginx}/conf/fastcgi_params; + fastcgi_param SCRIPT_FILENAME ${pkgs.icingaweb2}/public/index.php; + fastcgi_param ICINGAWEB_CONFIGDIR ${cfg.configDir}; + fastcgi_param REMOTE_USER $remote_user; + } + ''; + + systemd.services.icingaweb2cfg = { + description = "configure Icinga Web 2"; + after = [ "network.target" "local-fs.target" "keys.target" ]; + wants = [ "keys.target" ]; + wantedBy = [ "multi-user.target" ]; + path = with pkgs; [ mysql ]; + preStart = configureFiles; + serviceConfig = { + ExecStart = configureDB; + PermissionsStartOnly = true; + RemainAfterExit = true; + User = "icingaweb2"; + }; + }; + }; +} + diff --git a/modules/apps/juandelacosa.nix b/modules/apps/juandelacosa.nix new file mode 100644 index 0000000..8df6af0 --- /dev/null +++ b/modules/apps/juandelacosa.nix @@ -0,0 +1,68 @@ +{ config, pkgs, lib, ... }: + +let + inherit (builtins) filter toString; + inherit (lib) types mkOption mkEnableOption mkIf hasPrefix + concatStrings optionalString; + inherit (types) str path int nullOr; + + cfg = config.nixsap.apps.juandelacosa; + + ExecStart = concatStrings [ + "${pkgs.juandelacosa}/bin/juandelacosa" + (optionalString (cfg.myFile != null) " -f '${cfg.myFile}'") + (optionalString (cfg.myGroup != null) " -g ${cfg.myGroup}") + (if (cfg.port != null) + then " -p ${toString cfg.port}" + else " -s '${cfg.socket}'") + ]; + + keys = filter (f: f != null && hasPrefix "/run/keys/" f) [ cfg.myFile ]; + +in { + options.nixsap.apps.juandelacosa = { + enable = mkEnableOption "Juan de la Cosa"; + user = mkOption { + description = "User to run as"; + default = "juandelacosa"; + type = str; + }; + port = mkOption { + description = "TCP port to listen on"; + default = null; + type = nullOr int; + }; + socket = mkOption { + description = "UNIX socket to listen on. Ignored when TCP port is set"; + default = "/tmp/juandelacosa.sock"; + type = path; + }; + myFile = mkOption { + description = "MySQL client configuration file"; + default = null; + type = nullOr path; + }; + myGroup = mkOption { + description = "Options group in the MySQL client configuration file"; + default = null; + type = nullOr str; + }; + }; + + config = mkIf cfg.enable { + nixsap.system.users.daemons = [ cfg.user ]; + nixsap.deployment.keyrings.${cfg.user} = keys; + systemd.services.juandelacosa = { + description = "captain of the MariaDB"; + wantedBy = [ "multi-user.target" ]; + wants = [ "keys.target" ]; + after = [ "keys.target" "network.target" "local-fs.target" ]; + serviceConfig = { + inherit ExecStart; + User = cfg.user; + Restart = "on-failure"; + }; + }; + }; +} + diff --git a/modules/apps/mariadb/default.nix b/modules/apps/mariadb/default.nix new file mode 100644 index 0000000..cdf5d92 --- /dev/null +++ b/modules/apps/mariadb/default.nix @@ -0,0 +1,442 @@ +{ config, pkgs, lib, ... }: +let + inherit (builtins) + attrNames filter isBool isInt isList isPath isString length replaceStrings + toString ; + + inherit (lib) + mkOption mkEnableOption mkIf types toUpper unique + optionalString hasPrefix concatStringsSep splitString flatten + concatMapStrings concatMapStringsSep concatStrings mapAttrsToList filterAttrs; + + inherit (types) + attrsOf either int lines listOf package str submodule ; + + cfg = config.nixsap.apps.mariadb; + + getDirs = l: map dirOf (filter (p: p != null && hasPrefix "/" p) l); + mydirs = [ cfg.mysqld.datadir ] ++ getDirs [ cfg.mysqld.log_bin cfg.mysqld.relay_log ]; + explicit = filterAttrs (n: v: n != "_module" && v != null); + hasMasters = (explicit cfg.replicate) != {}; + concatNonEmpty = sep: list: concatStringsSep sep (filter (s: s != "") list); + + # XXX /run/mysqld/mysqld.sock is the default socket + rundir = "/run/mysqld"; + initFile = pkgs.writeText "init" '' + CREATE USER IF NOT EXISTS '${cfg.user}'@'localhost' IDENTIFIED VIA unix_socket; + GRANT ALL ON *.* TO '${cfg.user}'@'localhost' WITH GRANT OPTION; + ''; + + mkIgnoreTablesList = quotes: { databases, ignore-tables, ... }: + let + q = optionalString quotes "`"; + hasDot = t: 2 == length (splitString "." t); + all-tbl = filter (t: ! hasDot t) ignore-tables; + db-tbl = (filter hasDot ignore-tables) ++ + flatten (map (t: map (d: "${q}${d}${q}.${q}${t}${q}") databases) all-tbl); + in unique db-tbl; + + mkEntry = name: value: + let + showList = l: concatMapStringsSep "," (toString) (unique l); + optimizer_switch = a: + showList (mapAttrsToList (n: v: + "${n}=${if v then "on" else "off"}" + ) (explicit a)); + in if hasPrefix "skip" name then (optionalString value name) + else if name == "optimizer_switch" then "${name} = ${optimizer_switch value}" + else if isBool value then "${name} = ${if value then "ON" else "OFF"}" + else if isInt value then "${name} = ${toString value}" + else if isList value then "${name} = ${showList value}" + else if isString value then "${name} = ${value}" + else abort "Unrecognized option ${name}"; + + show = n: v: + if isBool v then (if v then "1" else "0") + else if isInt v then toString v + else if isString v then "'${v}'" + else if isPath v then "'${v}'" + else abort "Unrecognized option ${n}"; + + mkReplOpt = ch: args@{databases, ignore-databases, ...}: + let wild_do_table = concatMapStringsSep "\n" (d: + "${ch}.replicate_wild_do_table = ${d}.%" + ) databases; + ignore_table = concatMapStringsSep "\n" (t: + "${ch}.replicate_ignore_table = ${t}" + ) (mkIgnoreTablesList false args); + ignore_db = concatMapStringsSep "\n" (d: + "${ch}.replicate_ignore_db = ${d}" + ) ignore-databases; + in '' + ${ignore_db} + ${ignore_table} + ${wild_do_table} + ''; + + mkDynamicReplOpt = ch: args@{databases, ignore-databases, ...}: + '' + SET default_master_connection = "${ch}"; + SET GLOBAL replicate_ignore_db = "${concatStringsSep "," ignore-databases}"; + SET GLOBAL replicate_wild_do_table = "${concatMapStringsSep "," (d: "${d}.%") databases}"; + SET GLOBAL replicate_ignore_table = "${concatMapStringsSep "," (t: "${t}") (mkIgnoreTablesList false args)}"; + ''; + + replCnf = pkgs.writeText "mysqld-repl.cnf" '' + [mysqld] + ${concatNonEmpty "\n" (mapAttrsToList mkReplOpt (explicit cfg.replicate))} + ''; + + mysqldCnf = + if hasMasters && (cfg.mysqld.server_id == null || cfg.mysqld.server_id < 1) + then throw "Misconfigured slave: server_id was not set to a positive integer" + else pkgs.writeText "mysqld.cnf" '' + [mysqld] + basedir = ${cfg.package} + init_file = ${initFile} + pid_file = ${rundir}/mysqld.pid + plugin_load = unix_socket=auth_socket.so + plugin_load_add = server_audit=server_audit.so + ${concatNonEmpty "\n" (mapAttrsToList mkEntry (explicit cfg.mysqld))} + ${optionalString hasMasters "!include ${replCnf}"} + ''; + + await = pkgs.writeBashScript "await" '' + count=0 + while ! mysql -e ';' 2>/dev/null; do + if ! (( count % 60 )); then + mysql -e ';' + fi + sleep 5s + (( ++count )) + done + ''; + + conf = pkgs.writeBashScriptBin "mariadb-conf" + '' + set -euo pipefail + trap "" SIGHUP + ${await} + ${optionalString (cfg.configure' != "") '' + tmp=$(mktemp) + trap 'rm -f "$tmp"' EXIT + mysql -N mysql < ${pkgs.writeText "mariadb-make-conf2.sql" cfg.configure'} > "$tmp" + mysql -v mysql < "$tmp" + ''} + mysql -v mysql < ${pkgs.writeText "mariadb-conf.sql" cfg.configure} + ''; + + maintenance = pkgs.writeBashScriptBin "mariadb-maint" '' + set -euo pipefail + trap "" SIGHUP + ${await} + ${optionalString hasMasters "mysql -e 'STOP ALL SLAVES SQL_THREAD'"} + mysql_upgrade --user=${cfg.user} + mysql_tzinfo_to_sql "$TZDIR" | mysql mysql + mysql mysql < ${./procedures.sql} + cat <<'__SQL__' | mysql + DROP DATABASE IF EXISTS test; + DELETE FROM mysql.db WHERE Db='test' OR Db='test%'; + DELETE FROM mysql.user WHERE User='${cfg.user}' AND Host NOT IN ('localhost'); + DELETE FROM mysql.user WHERE User=${"''"}; + DELETE FROM mysql.user WHERE User='root'; + DELETE FROM mysql.proxies_priv WHERE User='root'; + FLUSH PRIVILEGES; + ${concatMapStrings (db: '' + CREATE DATABASE IF NOT EXISTS `${db}`; + '') cfg.databases} + __SQL__ + ${optionalString hasMasters "mysql -e 'START ALL SLAVES'"} + ''; + + changeMaster = + let + do = ch: opts: + let + masterOptions = filterAttrs (n: _: n != "password-file") (explicit opts.master); + masterOptionName = n: ''MASTER_${toUpper (replaceStrings ["-"] ["_"] n)}''; + changeMaster = "CHANGE MASTER '${ch}' TO " + (concatStringsSep ", " (mapAttrsToList (n: v: + "${masterOptionName n}=${show n v}") masterOptions)) + ";"; + in pkgs.writeBashScript "change-master-${ch}" '' + cat <<'__SQL__' + ${changeMaster} + ${mkDynamicReplOpt ch opts} + __SQL__ + ${optionalString (opts.master.password-file != null) '' + pwd=$(cat '${opts.master.password-file}') + echo "CHANGE MASTER '${ch}' TO MASTER_PASSWORD='$pwd';"''} + ''; + + in pkgs.writeBashScript "changeMaster" ( + concatStringsSep "\n" (mapAttrsToList (ch: opts: '' + [ "$1" = ${ch} ] && exec ${do ch opts} + '') (explicit cfg.replicate)) + ); + + importDump = + let + do = ch: opts: + let + cnf = "${rundir}/master-${ch}.cnf"; + mysqldumpOptions = filterAttrs (n: _: n != "password-file" && n != "path") + (explicit opts.mysqldump); + binary = if opts.mysqldump.path != null then opts.mysqldump.path else "mysqldump"; + mysqldump = concatStringsSep " " ( + [ binary "--defaults-file=${cnf}" "--skip-comments" "--force" ] + ++ mapAttrsToList (n: v: "--${n}=${show n v}") mysqldumpOptions); + databases = concatStringsSep " " ([ "--databases" ] ++ opts.databases); + ignore-tables = concatMapStringsSep " " (t: "--ignore-table=${t}") (mkIgnoreTablesList false opts); + in pkgs.writeBashScript "import-${ch}" '' + set -euo pipefail + touch '${cnf}' + trap "rm -f '${cnf}'" EXIT + trap "exit 255" TERM INT + chmod 0600 '${cnf}' + ${optionalString (opts.mysqldump.password-file != null) '' + printf '[client]\npassword=' > '${cnf}' + cat '${opts.mysqldump.password-file}' >> '${cnf}' + ''} + echo 'SET default_master_connection="${ch}";' + ${optionalString (!cfg.mysqld.log_slave_updates) "echo 'SET sql_log_bin=0;'"} + ${mysqldump} --master-data=0 --no-data ${databases} + ${mysqldump} --master-data=1 ${ignore-tables} ${databases} + ''; + in pkgs.writeBashScript "importDump" ( + concatStringsSep "\n" (mapAttrsToList (ch: opts: '' + [ "$1" = ${ch} ] && exec ${do ch opts} + '') (explicit cfg.replicate)) + ); + + watchdog = pkgs.writeBashScript "slave-watchdog" + (import ./slave-watchdog.nix {inherit importDump changeMaster;}); + + slaves = + let + channels = attrNames (explicit cfg.replicate); + truncate = ch: concatMapStringsSep "\n" + (t: "TRUNCATE TABLE ${t};") (mkIgnoreTablesList true cfg.replicate.${ch}); + truncateIgnored = pkgs.writeText "truncate.sql" + (concatMapStringsSep "\n" truncate channels); + old = "${rundir}/channels"; + new = pkgs.writeText "channels.new" (concatMapStringsSep "\n" + (ch: "${ch}:${cfg.replicate.${ch}.master.host}") channels); + in pkgs.writeBashScriptBin "mariadb-slaves" '' + set -euo pipefail + rm -f ${rundir}/*.lock + ${await} + touch ${old} + chmod 0600 ${old} + trap 'rm -f ${old}' EXIT + mysql -e 'SHOW ALL SLAVES STATUS\G' \ + | awk '/Connection_name:/ {printf $2 ":"}; /Master_Host:/ {print $2}' \ + | sort > ${old} + obsolete=$(comm -23 ${old} ${new} | cut -d: -f1) + for ch in $obsolete; do + echo "Deleting obsolete slave $ch" + mysql -e "CALL mysql.resetSlave('$ch')" + done + ${optionalString hasMasters '' + mysql -f < ${truncateIgnored} || echo '(errors ignored)' >&2 + export PARALLEL_SHELL=${pkgs.bash}/bin/bash + export HOME='${rundir}' + { + while true; do + printf "${concatStringsSep "\\n" channels}\n" + sleep 10m + done + } | parallel \ + --halt-on-error 0 \ + --jobs '${toString cfg.slaveWatchdogs}' \ + --line-buffer \ + --no-notice \ + --tagstr '* {}:' \ + 'flock -E 0 -n ${rundir}/master-{}.lock ${watchdog} {}' + '' + } + ''; + + all-keys = unique (filter (f: f != null && hasPrefix "/run/keys/" f ) (flatten ( + mapAttrsToList (ch: {master, mysqldump, ...}: + [ master.password-file + master.ssl-key + mysqldump.password-file + mysqldump.ssl-key + ]) (explicit cfg.replicate) + ) ++ [ + cfg.mysqld.ssl_key + ])); + +in { + + imports = [ ./roles.nix ]; + + options.nixsap = { + apps.mariadb = { + enable = mkEnableOption "MySQL"; + + user = mkOption { + description = "User to run as"; + default = "mariadb"; + type = str; + }; + + package = mkOption { + description = "MariaDB Package (10.1.x)"; + type = package; + default = pkgs.mariadb; + }; + + replicate = mkOption { + type = attrsOf (submodule (import ./replicate.nix)); + default = {}; + description = "Replication channels"; + }; + + slaveWatchdogs = mkOption { + type = either str int; + default = "80%"; + description = '' + Number of parallel slave monitoring and recovery processes. + In the format of GNU Parallel, e. g. "100%", -1. +3, 7, etc. + ''; + }; + + mysqld = mkOption { + type = submodule (import ./mysqld.nix); + default = {}; + description = "mysqld options"; + }; + + databases = mkOption { + description = "Databases to create if not exist"; + type = listOf str; + default = []; + }; + + configure = mkOption { + type = lines; + default = ""; + description = '' + Any SQL statements to execute, typically GRANT / REVOKE etc. + This is executed in contect of the `mysql` database. + ''; + example = '' + CREATE USER IF NOT EXISTS 'icinga'@'%' IDENTIFIED BY PASSWORD '*AC8C3BDA823EECFF90A8381D554232C7620345B3'; + GRANT USAGE ON *.* TO 'icinga'@'%' REQUIRE SSL; + REVOKE ALL, GRANT OPTION FROM 'icinga'@'%'; + GRANT PROCESS, REPLICATION CLIENT, SHOW DATABASES ON *.* TO 'icinga'@'%'; + GRANT SELECT ON mysql.* TO 'icinga'@'%'; + ''; + }; + + configure' = mkOption { + type = lines; + default = ""; + internal = true; + description = '' + SQL statements that generate other SQL statements to be executed. + Those generated statements will be executed before `configure`. + ''; + example = '' + SELECT CONCAT('GRANT SELECT ON `', table_schema, '`.`', table_name, '` TO \'_oms_package_vn\';') + FROM information_schema.tables WHERE + table_schema LIKE '%oms_live_vn' AND + table_name LIKE 'oms_package%'; + ''; + }; + }; + }; + + config = mkIf cfg.enable { + environment.systemPackages = [ cfg.package ]; + nixsap.system.users.daemons = [ cfg.user ]; + nixsap.deployment.keyrings.${cfg.user} = all-keys; + + nixsap.apps.mariadb.configure = concatMapStringsSep "\n" + (n: '' + CREATE USER IF NOT EXISTS '${n}'@'localhost' IDENTIFIED VIA unix_socket; + REVOKE ALL, GRANT OPTION FROM '${n}'@'localhost'; + GRANT SELECT, EXECUTE ON mysql.* TO '${n}'@'localhost'; + GRANT PROCESS, REPLICATION CLIENT, SHOW DATABASES, SHOW VIEW ON *.* TO '${n}'@'localhost'; + '') config.nixsap.system.users.sysops; + + systemd.services.mariadb-slaves = { + description = "MariaDB slaves watchdog"; + requires = [ "mariadb.service" ]; + after = [ "mariadb.service" "mariadb-maintenance.service" ]; + wantedBy = [ "multi-user.target" ]; + path = with pkgs; [ gnused gawk cfg.package utillinux parallel ]; + serviceConfig = { + ExecStart = "${slaves}/bin/mariadb-slaves"; + User = cfg.user; + } // (if hasMasters + then { + Restart = "always"; + } + else { + Type = "oneshot"; + }); + }; + + systemd.services.mariadb-maintenance = { + description = "MariaDB maintenance"; + after = [ "mariadb.service" ]; + wantedBy = [ "multi-user.target" ]; + path = [ cfg.package ]; + serviceConfig = { + ExecStart = "${maintenance}/bin/mariadb-maint"; + User = cfg.user; + Type = "oneshot"; + RemainAfterExit = true; + }; + }; + + systemd.services.mariadb-conf = { + description = "MariaDB configuration"; + after = [ "mariadb.service" "mariadb-maintenance.service" ]; + wantedBy = [ "multi-user.target" ]; + path = [ cfg.package ]; + serviceConfig = { + ExecStart = "${conf}/bin/mariadb-conf"; + User = cfg.user; + Type = "oneshot"; + RemainAfterExit = true; + }; + }; + + systemd.services.mariadb = { + description = "MariaDB server"; + wantedBy = [ "multi-user.target" ]; + wants = [ "keys.target" ]; + after = [ "keys.target" "network.target" "local-fs.target" ]; + path = [ pkgs.inetutils ]; + environment = { + UMASK = "0640"; + UMASK_DIR = " 0750"; + }; + preStart = '' + mkdir -p '${rundir}' + chmod 0700 '${rundir}' + mkdir -p ${concatMapStringsSep " " (d: "'${d}'") mydirs} + if [ ! -f '${cfg.mysqld.datadir}/mysql/user.MYI' ]; then + rm -rf '${cfg.mysqld.datadir}/mysql' + ${cfg.package}/bin/mysql_install_db --defaults-file=${mysqldCnf} + fi + chown -Rc '${cfg.user}':$(id -g -n '${cfg.user}') '${rundir}' ${concatMapStringsSep " " (d: "'${d}'") mydirs} + chmod -Rc u=rwX,g=rX,o= ${concatMapStringsSep " " (d: "'${d}'") mydirs} + chmod 0755 '${rundir}' + ''; + + serviceConfig = { + ExecStart = "${cfg.package}/bin/mysqld --defaults-file=${mysqldCnf}"; + PermissionsStartOnly = true; + User = cfg.user; + Restart = "always"; + TimeoutSec = 0; # XXX it can take hours to shutdown, and much more to start if you kill shutdown :-D + LimitNOFILE = "infinity"; + LimitMEMLOCK = "infinity"; + OOMScoreAdjust = -1000; + }; + }; + }; +} diff --git a/modules/apps/mariadb/mysqld.nix b/modules/apps/mariadb/mysqld.nix new file mode 100644 index 0000000..d66d96d --- /dev/null +++ b/modules/apps/mariadb/mysqld.nix @@ -0,0 +1,285 @@ +{ lib, ... }: +with lib; +with lib.types; + +let + engines = [ + "Archive" + "Aria" + "Blackhole" + "CSV" + "Example" + "InnoDB" + "Memory" + "MyISAM" + ]; + + syslog-facilities = [ + "LOG_USER" + "LOG_MAIL" + "LOG_DAEMON" + "LOG_AUTH" + "LOG_SYSLOG" + "LOG_LPR" + "LOG_NEWS" + "LOG_UUCP" + "LOG_CRON" + "LOG_AUTHPRIV" + "LOG_FTP" + "LOG_LOCAL0" + "LOG_LOCAL1" + "LOG_LOCAL2" + "LOG_LOCAL3" + "LOG_LOCAL4" + "LOG_LOCAL5" + "LOG_LOCAL6" + "LOG_LOCAL7" + ]; + + syslog-priorities = [ + "LOG_EMERG" + "LOG_ALERT" + "LOG_CRIT" + "LOG_ERR" + "LOG_WARNING" + "LOG_NOTICE" + "LOG_INFO" + "LOG_DEBUG" + ]; + + sql-modes = [ + "ALLOW_INVALID_DATES" + "ANSI" + "ANSI_QUOTES" + "DB2" + "ERROR_FOR_DIVISION_BY_ZERO" + "HIGH_NOT_PRECEDENCE" + "IGNORE_BAD_TABLE_OPTIONS" + "IGNORE_SPACE" + "MAXDB" + "MSSQL" + "MYSQL323" + "MYSQL40" + "NO_AUTO_CREATE_USER" + "NO_AUTO_VALUE_ON_ZERO" + "NO_BACKSLASH_ESCAPES" + "NO_DIR_IN_CREATE" + "NO_ENGINE_SUBSTITUTION" + "NO_FIELD_OPTIONS" + "NO_KEY_OPTIONS" + "NO_TABLE_OPTIONS" + "NO_UNSIGNED_SUBTRACTION" + "NO_ZERO_DATE" + "NO_ZERO_IN_DATE" + "ONLY_FULL_GROUP_BY" + "ORACLE" + "PAD_CHAR_TO_FULL_LENGTH" + "PIPES_AS_CONCAT" + "POSTGRESQL" + "REAL_AS_FLOAT" + "STRICT_ALL_TABLES" + "STRICT_TRANS_TABLES" + "TRADITIONAL" + ]; + + flush-methods = [ + "ALL_O_DIRECT" + "O_DIRECT" + "O_DSYNC" + "fdatasync" + ]; + + default = v: type: mkOption { type = type; default = v; }; + mandatory = type: mkOption { inherit type; }; + optional = type: mkOption { type = nullOr type; default = null; }; + set = opts: mkOption { type = nullOr (submodule opts); default = null; }; + + oneOrMore = l: let en = enum' l; in either en (uniq (listOf en)); + + # XXX https://github.com/NixOS/nixpkgs/issues/9826 + enum' = values: + let show = v: let t = builtins.typeOf v; + in if t == "string" then ''"${v}"'' + else if t == "int" then builtins.toString v + else ''<${t}>''; + in mkOptionType { + name = "one of ${concatStringsSep ", " (map show values)}"; + check = flip elem values; + merge = mergeOneOption; + }; + + isFloat = x: builtins.match "^[0-9]+(\\.[0-9]+)?$" (builtins.toString x) != null; + + float = mkOptionType { + name = "positive float"; + check = isFloat; + }; + + # https://mariadb.com/kb/en/mariadb/optimizer-switch/ + optimizer = { + options = { + derived_merge = optional bool; + derived_with_keys = optional bool; + exists_to_in = optional bool; + extended_keys = optional bool; + firstmatch = optional bool; + in_to_exists = optional bool; + index_merge = optional bool; + index_merge_intersection = optional bool; + index_merge_sort_intersection = optional bool; + index_merge_sort_union = optional bool; + index_merge_union = optional bool; + join_cache_bka = optional bool; + join_cache_hashed = optional bool; + join_incremental = optional bool; + loosescan = optional bool; + materialization = optional bool; + mrr = optional bool; + mrr_cost_based = optional bool; + mrr_sort_keys = optional bool; + optimize_join_buffer_size = optional bool; + outer_join_with_cache = optional bool; + partial_match_rowid_merge = optional bool; + partial_match_table_scan = optional bool; + semijoin = optional bool; + semijoin_with_cache = optional bool; + subquery_cache = optional bool; + table_elimination = optional bool; + }; + }; + +in { + options = { + binlog_checksum = optional (enum ["NONE" "CRC32"]); + binlog_commit_wait_count = optional int; + binlog_commit_wait_usec = optional int; + binlog_direct_non_transactional_updates = optional bool; + binlog_format = optional (enum ["ROW" "MIXED" "STATEMENT"]); + binlog_optimize_thread_scheduling = optional bool; + binlog_row_image = optional (enum ["FULL" "NOBLOB" "MINIMAL"]); + binlog_stmt_cache_size = optional int; + character_set_server = optional str; + collation_server = optional str; + connect_timeout = optional int; + datadir = mandatory path; + default_storage_engine = optional (enum engines); + default_time_zone = optional str; + encrypt_binlog = optional bool; + event_scheduler = optional (either bool (enum ["DISABLED"])); + expire_logs_days = optional int; + general_log = optional bool; + group_concat_max_len = optional int; + ignore_db_dirs = optional (uniq (listOf str)); + init_connect = optional str; + init_slave = optional str; + innodb_autoinc_lock_mode = optional (enum' [ 0 1 2 ]); + innodb_buffer_pool_dump_at_shutdown = optional bool; + innodb_buffer_pool_instances = optional int; + innodb_buffer_pool_load_at_startup = optional bool; + innodb_buffer_pool_size = optional int; + innodb_doublewrite = optional bool; + innodb_file_format = optional (enum ["antelope" "barracuda"]); + innodb_file_per_table = optional bool; + innodb_flush_log_at_trx_commit = optional (enum' [0 1 2]); + innodb_flush_method = optional (enum flush-methods); + innodb_io_capacity = optional int; + innodb_io_capacity_max = optional int; + innodb_lock_wait_timeout = optional int; + innodb_log_file_size = optional int; + innodb_open_files = optional int; + innodb_read_io_threads = optional int; + innodb_rollback_on_timeout = optional bool; + innodb_thread_concurrency = optional int; + innodb_write_io_threads = optional int; + interactive_timeout = optional int; + join_buffer_size = optional int; + local_infile = optional bool; + log_bin = optional path; + log_bin_index = optional str; + log_output = optional (oneOrMore ["TABLE" "FILE"]); + log_queries_not_using_indexes = optional bool; + log_slave_updates = default false bool; + log_slow_rate_limit = optional int; + log_slow_verbosity = optional (enum' ["query_plan" "innodb" "explain"]); + log_warnings = optional (enum' [ 0 1 2 3 ]); + long_query_time = optional float; + max_allowed_packet = optional int; + max_binlog_cache_size = optional int; + max_binlog_size = optional int; + max_binlog_stmt_cache_size = optional int; + max_connect_errors = optional int; + max_connections = optional int; + max_heap_table_size = optional int; + max_relay_log_size = optional int; + max_user_connections = optional int; + net_read_timeout = optional int; + net_write_timeout = optional int; + optimizer_switch = set optimizer; + port = default 3306 int; + query_alloc_block_size = optional int; + query_cache_limit = optional int; + query_cache_min_res_unit = optional int; + query_cache_size = optional int; + query_cache_strip_comments = optional bool; + query_cache_type = optional (enum' [ 0 1 "DEMAND"]); + query_cache_wlock_invalidate = optional bool; + query_prealloc_size = optional int; + relay_log = optional path; + relay_log_index = optional str; + relay_log_purge = optional bool; + relay_log_recovery = optional bool; + relay_log_space_limit = optional int; + server_audit_events = optional (uniq (listOf (enum ["CONNECT" "QUERY" "TABLE" "QUERY_DDL" "QUERY_DML"]))); + server_audit_excl_users = optional (listOf str); + server_audit_file_path = optional path; + server_audit_file_rotate_size = optional int; + server_audit_file_rotations = optional int; + server_audit_incl_users = optional (listOf str); + server_audit_logging = optional bool; + server_audit_output_type = optional (enum ["SYSLOG" "FILE"]); + server_audit_query_log_limit = optional int; + server_audit_syslog_facility = optional (enum syslog-facilities); + server_audit_syslog_ident = optional str; + server_audit_syslog_info = optional str; + server_audit_syslog_priority = optional (enum syslog-priorities); + server_id = optional int; + skip_log_bin = optional bool; + skip_name_resolve = optional bool; + skip_networking = optional bool; + slave_compressed_protocol = optional bool; + slave_ddl_exec_mode = optional (enum ["IDEMPOTENT" "STRICT"]); + slave_domain_parallel_threads = optional int; + slave_exec_mode = optional (enum ["IDEMPOTENT" "STRICT"]); + slave_load_tmpdir = optional path; + slave_max_allowed_packet = optional int; + slave_net_timeout = optional int; + slave_parallel_max_queued = optional int; + slave_parallel_mode = optional (enum ["conservative" "optimisitic" "none" "aggressive" "minimal"]); + slave_parallel_threads = optional int; + slave_skip_errors = optional (uniq (listOf int)); + slave_sql_verify_checksum = optional bool; + slave_transaction_retries = optional int; + slow_query_log = optional bool; + slow_query_log_file = optional path; + sort_buffer_size = optional int; + sql_mode = optional (uniq (listOf (enum sql-modes))); + ssl_ca = optional path; + ssl_capath = optional path; + ssl_cert = optional path; + ssl_cipher = optional str; + ssl_crl = optional path; + ssl_crlpath = optional path; + ssl_key = optional path; + table_definition_cache = optional int; + table_open_cache = optional int; + thread_cache_size = optional int; + tmp_table_size = optional int; + tmpdir = optional path; + wait_timeout = optional int; + }; + config = { + ignore_db_dirs = [ "lost+found" ]; + }; + +} + diff --git a/modules/apps/mariadb/procedures.sql b/modules/apps/mariadb/procedures.sql new file mode 100644 index 0000000..3aabe80 --- /dev/null +++ b/modules/apps/mariadb/procedures.sql @@ -0,0 +1,134 @@ +-- These procedures belong to the mysql DB, e. g. +-- CALL mysql.resetSlave('foo'); +-- Keep it simple: each procedure should be self-contained. + +DELIMITER $$ + +DROP PROCEDURE IF EXISTS stopSlave $$ +CREATE PROCEDURE stopSlave (IN ch VARCHAR(64)) + COMMENT 'Stops slave channel (both I/O and SQL threads)' +BEGIN + -- Ignore ERROR 1617 (HY000): There is no master connection 'foo' + DECLARE EXIT HANDLER FOR 1617 + BEGIN + SELECT 'No such master connection' + AS warning; + END; + + SET default_master_connection = ch; + STOP SLAVE; +END $$ + +DROP PROCEDURE IF EXISTS startSlave $$ +CREATE PROCEDURE startSlave (IN ch VARCHAR(64)) + COMMENT 'Starts slave channel (both I/O and SQL threads)' +BEGIN + DECLARE EXIT HANDLER FOR 1617 + BEGIN + SELECT 'No such master connection' + AS warning; + END; + + SET default_master_connection = ch; + START SLAVE; +END $$ + +DROP PROCEDURE IF EXISTS kickSlave $$ +CREATE PROCEDURE kickSlave (IN ch VARCHAR(64)) + COMMENT 'Skips the next event from the master' +BEGIN + DECLARE EXIT HANDLER FOR 1617 + BEGIN + SELECT 'No such master connection' + AS warning; + END; + + SET default_master_connection = ch; + STOP SLAVE; + SET GLOBAL sql_slave_skip_counter = 1; + START SLAVE; +END $$ + +DROP PROCEDURE IF EXISTS pauseSlave $$ +CREATE PROCEDURE pauseSlave (IN ch VARCHAR(64)) + COMMENT 'Stops SQL thread of the slave channel' +BEGIN + DECLARE EXIT HANDLER FOR 1617 + BEGIN + SELECT 'No such master connection' + AS warning; + END; + + SET default_master_connection = ch; + STOP SLAVE SQL_THREAD; +END $$ + +DROP PROCEDURE IF EXISTS resetSlave $$ +CREATE PROCEDURE resetSlave (IN ch VARCHAR(64)) + COMMENT 'Stops and deletes slave channel' +BEGIN + DECLARE EXIT HANDLER FOR 1617 + BEGIN + SELECT 'No such master connection' + AS warning; + END; + + SET default_master_connection = ch; + STOP SLAVE; + RESET SLAVE ALL; +END $$ + +DROP PROCEDURE IF EXISTS stopAllSlaves $$ +CREATE PROCEDURE stopAllSlaves () + COMMENT 'Stops all slaves' +BEGIN + STOP ALL SLAVES; +END $$ + +DROP PROCEDURE IF EXISTS pauseAllSlaves $$ +CREATE PROCEDURE pauseAllSlaves () + COMMENT 'Stops SQL thread of all slaves' +BEGIN + STOP ALL SLAVES SQL_THREAD; +END $$ + +DROP PROCEDURE IF EXISTS startAllSlaves $$ +CREATE PROCEDURE startAllSlaves () + COMMENT 'Starts all slaves' +BEGIN + START ALL SLAVES; +END $$ + +DROP PROCEDURE IF EXISTS enableGeneralLog $$ +CREATE PROCEDURE enableGeneralLog () +BEGIN + SET GLOBAL general_log = ON; +END $$ + +DROP PROCEDURE IF EXISTS disableGeneralLog $$ +CREATE PROCEDURE disableGeneralLog () +BEGIN + SET GLOBAL general_log = OFF; +END $$ + +DROP PROCEDURE IF EXISTS truncateGeneralLog $$ +CREATE PROCEDURE truncateGeneralLog () +BEGIN + TRUNCATE mysql.general_log; +END $$ + +DROP PROCEDURE IF EXISTS truncateSlowLog $$ +CREATE PROCEDURE truncateSlowLog () +BEGIN + TRUNCATE mysql.slow_log; +END $$ + +DROP PROCEDURE IF EXISTS showEvents $$ +CREATE PROCEDURE showEvents () + COMMENT 'Shows all events for the mysql schema' +BEGIN + SHOW EVENTS IN mysql; +END $$ + +DELIMITER ; + diff --git a/modules/apps/mariadb/replicate.nix b/modules/apps/mariadb/replicate.nix new file mode 100644 index 0000000..9f51dbf --- /dev/null +++ b/modules/apps/mariadb/replicate.nix @@ -0,0 +1,87 @@ +{ config, lib, ... }: +with lib; +with lib.types; +let + mandatory = type: mkOption { inherit type; }; + optional = type: mkOption { type = nullOr type; default = null; }; + + common = foldl (a: b: a//b) {} [ + { host = mandatory str; } + { password-file = optional path; } + { port = optional int; } + { ssl = optional bool; } + { ssl-ca = optional path; } + { ssl-cert = optional path; } + { ssl-key = optional path; } + { ssl-verify-server-cert = optional bool; } + { user = mandatory str; } + ]; + + master.options = foldl (a: b: a//b) {} [ + { connect-retry = optional int; } + { heartbeat-period = optional int; } + common + ]; + + mysqldump.options = foldl (a: b: a//b) {} [ + { compress = optional bool; } + { lock-tables = optional bool; } + { path = optional path; } + { single-transaction = optional bool; } + common + ]; + +in { + options = { + databases = mkOption { + type = listOf str; + description = '' + List of databases to dump and replicate. This will be written as + `foo.replicate_wild_do_table = db.%`. + ''; + example = [ "oms_live_sg" "bob_live_sg" ]; + }; + + ignore-tables = mkOption { + type = listOf str; + description = '' + List of tables to ignore. This will be written as + `foo.replicate_ignore_table = db.table`. If database prefix is + omitted, expressions for all databases will be generated. + ''; + example = [ "schema_updates" "bob_live_sg.locks" ]; + default = []; + }; + + ignore-databases = mkOption { + type = listOf str; + description = '' + List of databases to ignore. You do not need this in most cases. + See http://dev.mysql.com/doc/refman/en/replication-rules.html. + This will be written as `foo.replicate_ignore_db = mysql`. This is + useful when you want procedures in other databases, like `mysql`, + not to be replicated. + ''; + default = [ "mysql" "test" "tmp" ]; + }; + + master = mkOption { type = submodule (master); }; + mysqldump = mkOption { type = submodule (mysqldump); }; + }; + + config = { + mysqldump = { + compress = mkDefault true; + host = mkDefault config.master.host; + password-file = mkDefault config.master.password-file; + port = mkDefault config.master.port; + single-transaction = mkDefault true; + ssl = mkDefault config.master.ssl; + ssl-ca = mkDefault config.master.ssl-ca; + ssl-cert = mkDefault config.master.ssl-cert; + ssl-key = mkDefault config.master.ssl-key; + user = mkDefault config.master.user; + }; + }; +} + diff --git a/modules/apps/mariadb/roles.nix b/modules/apps/mariadb/roles.nix new file mode 100644 index 0000000..2971242 --- /dev/null +++ b/modules/apps/mariadb/roles.nix @@ -0,0 +1,250 @@ +{ config, lib, pkgs, ... }: +let + inherit (builtins) + elemAt filter isAttrs isList length trace ; + inherit (lib) + attrNames concatMapStrings concatMapStringsSep concatStrings + concatStringsSep filterAttrs flatten mapAttrsToList mkIf mkOption + optionalString replaceStrings splitString types ; + inherit (types) + attrsOf either listOf str submodule ; + + explicit = filterAttrs (n: v: n != "_module" && v != null); + + inherit (config.nixsap.apps.mariadb) roles; + basicRoles = filterAttrs (_: v: isAttrs v) roles; + topRoles = filterAttrs (_: v: isList v) roles; + allRoles = attrNames roles; + sqlList = concatMapStringsSep ", " (i: "'${i}'"); + + concatMapAttrs = f: attrs: concatStrings (mapAttrsToList f attrs); + + schemaName = object: elemAt (splitString "." object) 0; + isSchema = object: + let p = splitString "." object; + n = length p; + in (n == 1) + || (n == 2 && (elemAt p 1) == "%") + || ((elemAt p 1) == "%" && (elemAt p 2) == "%"); + + tableName = object: elemAt (splitString "." object) 1; + isTable = object: + let p = splitString "." object; + n = length p; + in (n == 2 && (elemAt p 1) != "%") + || (n > 2 && (elemAt p 2) == "%"); + + columnName = object: elemAt (splitString "." object) 2; + isColumn = object: + let p = splitString "." object; + n = length p; + in (n > 2 && (elemAt p 2) != "%"); + + grant = role: privileges: + { + schemas = concatMapAttrs (priv: objects: + concatMapStrings (o: + let + db = schemaName o; + p = "${replaceStrings [" "] ["_"] priv}_priv"; + in '' + SELECT 'GRANT ${priv} ON `${db}`.* TO \'${role}\';' + FROM information_schema.schemata -- Not really used, but for syntax and locks + WHERE NOT EXISTS ( + SELECT 1 FROM db + WHERE db.host = ${"''"} -- role, not user + AND db.user = '${role}' + AND '${db}' LIKE db.db + AND db.${p} = 'Y' + ) LIMIT 1; + '') (filter isSchema (flatten [objects])) + ) (explicit privileges); + + tables = concatMapAttrs (priv: objects: + concatMapStrings (o: '' + SELECT CONCAT('GRANT ${priv} ON `', t.table_schema, '`.`', t.table_name, '` TO \'${role}\';') + FROM information_schema.tables t + WHERE t.table_schema LIKE '${schemaName o}' + AND t.table_name LIKE '${tableName o}' + AND NOT EXISTS ( + SELECT 1 FROM mysql.tables_priv + WHERE tables_priv.host = ${"''"} -- role, not user + AND tables_priv.user = '${role}' + AND tables_priv.db = t.table_schema + AND tables_priv.table_name = t.table_name + AND FIND_IN_SET('${priv}', tables_priv.table_priv) > 0 + ); + '') (filter isTable (flatten [objects])) + ) (explicit privileges); + + columns = concatMapAttrs (priv: objects: + let colObjs = filter isColumn (flatten [objects]); + in optionalString ([] != colObjs) ('' + SELECT CONCAT ('GRANT ${priv}(', + GROUP_CONCAT(DISTINCT c.column_name SEPARATOR ','), + ') ON `', c.table_schema, '`.`', c.table_name, '` TO \'${role}\';') + FROM information_schema.columns c WHERE ( + '' + concatMapStringsSep " OR " (o: + '' + ( c.table_schema LIKE '${schemaName o}' AND + c.table_name LIKE '${tableName o}' AND + c.column_name LIKE '${columnName o}') + '') colObjs + + + '' + ) AND NOT EXISTS ( + SELECT 1 FROM columns_priv + WHERE columns_priv.host = ${"''"} -- role, not user + AND columns_priv.user = '${role}' + AND columns_priv.db = c.table_schema + AND columns_priv.table_name = c.table_name + AND columns_priv.column_name = c.column_name + AND FIND_IN_SET('${priv}', columns_priv.column_priv) > 0 + ) GROUP BY CONCAT(c.table_schema, c.table_name); + '') + ) (explicit privileges); + }; + + refreshRolesSQL = + let + sql = concatMapAttrs (role: privileges: '' + ${(grant role privileges).schemas} + ${(grant role privileges).tables} + ${(grant role privileges).columns} + '') basicRoles; + in pkgs.writeText "refresh-roles.sql" sql; + + refreshRoles = pkgs.writeBashScriptBin "refreshRoles" '' + set -euo pipefail + + doze() { + difference=$(($(date -d "08:00" +%s) - $(date +%s))) + if [ $difference -lt 0 ]; then + sleep $((86400 + difference)) + else + sleep $difference + fi + } + + while true; do + while ! mysql -e ';'; do + sleep 5s + done + tmp=$(mktemp) + trap 'rm -f "$tmp"' EXIT + mysql -N mysql < ${refreshRolesSQL} >> "$tmp" + mysql -v mysql < "$tmp" + doze + done + ''; + + addRoles = '' + ${concatMapStrings (r: "CREATE ROLE IF NOT EXISTS '${r}';\n") (attrNames roles)} + + ${concatStrings + (mapAttrsToList (role: subroles: '' + ${concatMapStringsSep "\n" (r: "GRANT '${r}' TO '${role}';") subroles} + '') topRoles) + } + ''; + + revokeRoles = '' + ${concatMapAttrs (role: subroles: '' + SELECT CONCAT('REVOKE \''', role, '\' FROM \''', user, '\';') FROM roles_mapping + WHERE user = '${role}' + AND role NOT IN (${sqlList subroles}); + '') topRoles + } + + SELECT CONCAT('DROP ROLE \''', user, '\';') FROM user WHERE is_role='Y' + ${optionalString (allRoles != []) "AND user NOT IN (${sqlList allRoles})"} + ; + ''; + + roleType = + let + objects = mkOption { + type = either str (listOf str); + default = []; + example = [ + "%bleep.%.created\_at" + "%bob\_live\_sg.brand\_type" + "%bob\_live\_sg.catalog%" + "%bob\_live\_sg.supplier.status" + "bar.%" + "beep" + "foo.%.%" + ]; + }; + basicRole = submodule { + options.nixsap = { + "ALL" = objects; + "ALTER" = objects; + "CREATE" = objects; + "DELETE" = objects; + "DROP" = objects; + "INDEX" = objects; + "INSERT" = objects; + "SELECT" = objects; + "SHOW VIEW" = objects; + "UPDATE" = objects; + }; + }; + topRole = listOf str; + in either basicRole topRole; + +in { + options.nixsap.apps.mariadb = { + roles = mkOption { + type = attrsOf roleType; + default = {}; + description = '' + Defines MariaDB roles. A role can be a "basic" one or a "top" + one. The basic roles are granted of regular privileges like SELECT + or UPDATE, while the top roles are granted of other roles. For basic + roles MySQL wildcards ("%" and "_") can be used to specify objects + to be granted on, including databases, tables and columns names. A + script runs periodically to find all matching objects and grants on + them. Objects are denoted as "schema[.table[.column]]". + ''; + example = { + top_role = [ "basic_role" ]; + basic_role = { + SELECT = [ + "%bob\_live\_sg.brand\_type" + "%bob\_live\_sg.catalog%" + "%bob\_live\_sg.supplier.created\_at" + "%bob\_live\_sg.supplier.id\_supplier" + "%bob\_live\_sg.supplier.name%" + "%bob\_live\_sg.supplier.status" + "%bob\_live\_sg.supplier.type" + "%bob\_live\_sg.supplier.updated\_at" + ]; + }; + monitoring = { + SELECT = [ + "%.%.created_at" + ]; + }; + }; + }; + }; + + config = { + nixsap.apps.mariadb.configure = optionalString (roles != {}) addRoles; + nixsap.apps.mariadb.configure' = revokeRoles; + + systemd.services.mariadb-roles = mkIf (basicRoles != {}) { + description = "refresh MariaDB basic roles"; + after = [ "mariadb.service" "mariadb-maintenance.service" ]; + wantedBy = [ "multi-user.target" ]; + path = [ pkgs.mariadb ]; + serviceConfig = { + ExecStart = "${refreshRoles}/bin/refreshRoles"; + User = config.nixsap.apps.mariadb.user; + Restart = "always"; + }; + }; + }; +} + diff --git a/modules/apps/mariadb/slave-watchdog.nix b/modules/apps/mariadb/slave-watchdog.nix new file mode 100644 index 0000000..8d1147e --- /dev/null +++ b/modules/apps/mariadb/slave-watchdog.nix @@ -0,0 +1,103 @@ +{ changeMaster, importDump }: '' +set -euo pipefail + +ch="$1" +status=$(mktemp) +trap 'rm -f "$status"' EXIT + +slave_status () { + if ! mysql -e ';'; then + echo unknown; return + fi + + if mysql -e "SHOW SLAVE '$1' STATUS\\G" | sed 's,^ *,,' > "$status"; then + if grep -oE '\bMaster_Server_Id:\s*[1-9][0-9]*' "$status" >&2; then + io_errno=$(awk '/Last_IO_Errno:/ {print $2}' "$status") + sql_errno=$(awk '/Last_SQL_Errno:/ {print $2}' "$status") + case "$io_errno:$sql_errno" in + 0:0) + echo ok + return + ;; + 0:*) + awk '/Last_SQL_Error:/ {print $0}' "$status" >&2 + echo "sql_error:$sql_errno" + return + ;; + *:*) + awk '/Last_IO_Error:/ {print $0}' "$status" >&2 + echo "io_error:$io_errno" + return + ;; + esac + fi + fi + echo none +} + +sql_errors=0 +none_count=0 +while true; do + st=$(slave_status "$ch") + + case "$st" in + ok|unknown) + echo "status: $st" >&2 + exit + ;; + none) + # XXX existing slave might not be initialized yet after mariadb restarts + (( ++none_count )) + echo "status: $st (count: $none_count)" >&2 + if [ "$none_count" -lt 10 ]; then + sleep 1m + continue + fi + mysql -v -N -e "CALL mysql.resetSlave('$ch')" >&2 + ${changeMaster} "$ch" | mysql + if ${importDump} "$ch" | mysql; then + mysql -v -N -e "CALL mysql.startSlave('$ch')" >&2 + exit + else + echo 'Import failed. Starting over' >&2 + mysql -v -N -e "CALL mysql.resetSlave('$ch')" >&2 + exit 1 + fi + ;; + io_error:*) + echo "status: $st" >&2 + mysql -v -N -e "CALL mysql.stopSlave('$ch')" >&2 + ${changeMaster} "$ch" | mysql + mysql -v -N -e "CALL mysql.startSlave('$ch')" >&2 + exit 1 + ;; + sql_error:1205) # Lock wait timeout exceeded + echo "status: $st" >&2 + mysql -v -N -e "CALL mysql.startSlave('$ch')" >&2 + exit 1 + ;; + sql_error:*) + (( ++sql_errors )) + echo "status: $st (count: $sql_errors)" >&2 + if [ "$sql_errors" -le 1 ]; then + mysql -v -N -e "CALL mysql.pauseSlave('$ch')" >&2 + sleep 1s + mysql -v -N -e "CALL mysql.startSlave('$ch')" >&2 + elif [ "$sql_errors" -le 2 ]; then + mysql -v -N -e "CALL mysql.stopSlave('$ch')" >&2 + # this *unlikely* *may* change replication option (ignore tables, etc.) + ${changeMaster} "$ch" | mysql + mysql -v -N -e "CALL mysql.startSlave('$ch')" >&2 + else + echo '!!! Resetting slave !!!' >&2 + mysql -v -N -e "CALL mysql.resetSlave('$ch')" >&2 + exit 1 + fi + sleep 2m + ;; + *) echo "BUG: $st" >&2; exit 255;; + esac + sleep 1s +done +'' + diff --git a/modules/apps/mediawiki/default.nix b/modules/apps/mediawiki/default.nix new file mode 100644 index 0000000..584d86a --- /dev/null +++ b/modules/apps/mediawiki/default.nix @@ -0,0 +1,323 @@ +{ config, pkgs, lib, ... }: + +let + + inherit (lib) + concatMapStrings concatMapStringsSep concatStringsSep + filterAttrs genAttrs hasPrefix mapAttrs mapAttrsToList mkDefault + mkEnableOption mkIf mkOption optionalAttrs optionalString + recursiveUpdate types; + inherit (types) + attrsOf bool either enum int lines listOf nullOr path str + submodule unspecified; + inherit (builtins) + attrNames elem filter isAttrs isBool isList isString toString; + + cfg = config.nixsap.apps.mediawiki; + user = config.nixsap.apps.mediawiki.user; + + defaultPool = { + listen.owner = config.nixsap.apps.nginx.user; + pm.max_children = 10; + pm.max_requests = 1000; + pm.max_spare_servers = 5; + pm.min_spare_servers = 3; + pm.strategy = "dynamic"; + env.MEDIAWIKI_LOCAL_SETTINGS = "${localSettings}"; + php_value = optionalAttrs (cfg.maxUploadSize != null) { + post_max_size = 2 * cfg.maxUploadSize; + upload_max_filesize = cfg.maxUploadSize; + }; + }; + + explicit = filterAttrs (n: v: n != "_module" && v != null); + concatMapAttrsSep = s: f: attrs: concatStringsSep s (mapAttrsToList f attrs); + enabledExtentions = attrNames (filterAttrs (_: enabled: enabled) (explicit cfg.extensions)); + + keys = filter (hasPrefix "/run/keys/") (mapAttrsToList (_: o: o.password-file) cfg.users); + + settings = + let + show = s: n: v: + if isBool v then (if v then "TRUE" else "FALSE") + else if isString v then "'${v}'" + else if isList v then "array(${concatMapStringsSep "," (i: "\n${s}'${toString i}'") v})" + else if isAttrs v then "array(${concatMapAttrsSep "," (p: q: "\n${s}'${p}' => ${show "${s} " p q}") (explicit v)})" + else toString v; + in pkgs.writePHPFile "LocalSettings.inc.php" '' + <?php + ${concatMapAttrsSep "\n" + (n: v: if isAttrs v + # XXX This will preserve or replace defaults, + # but would give odd result if any element were a list: + then "\$${n} = array_replace_recursive (\$${n}, ${show " " n v});" + else "\$${n} = ${show " " n v};") + (explicit cfg.localSettings)} + ?> + ''; + + localSettings = pkgs.writePHPFile "LocalSettings.php" '' + <?php + ${concatMapStringsSep "\n " (e: + "require_once ('${pkgs.mediawikiExtensions.${e}}/${e}.php');" + ) enabledExtentions + } + + ${optionalString (elem "GraphViz" enabledExtentions) + "$wgGraphVizSettings->execPath = '${pkgs.graphviz}/bin/';" + } + + ${optionalString (elem "MathJax" enabledExtentions) '' + # MathJax 0.7: + MathJax_Parser::$MathJaxJS = '${pkgs.mathJax}/MathJax.js?config=TeX-AMS-MML_HTMLorMML-full'; + ''} + + $wgDiff = '${pkgs.diffutils}/bin/diff'; + $wgDiff3 = '${pkgs.diffutils}/bin/diff3'; + $wgImageMagickConvertCommand = '${pkgs.imagemagick}/bin/convert'; + ${optionalString (cfg.logo != null) '' + $wgLogo = '${cfg.logo}'; + ''} + ${optionalString (cfg.maxUploadSize != null) + "$wgMaxUploadSize = ${toString cfg.maxUploadSize};" + } + + require_once ('${settings}'); + + $wgDirectoryMode = 0750; + ?> + ''; + + mediawiki-db = + let + psql = pkgs.writeBashScript "mw-psql" '' + set -euo pipefail + exec ${pkgs.postgresql}/bin/psql -t -w \ + -v ON_ERROR_STOP=1 \ + ${optionalString (cfg.localSettings.wgDBserver != "") + "-h '${cfg.localSettings.wgDBserver}'"} \ + -p ${toString cfg.localSettings.wgDBport} \ + -U ${toString cfg.localSettings.wgDBuser} \ + -d '${cfg.localSettings.wgDBname}' \ + "$@" + ''; + mysql = pkgs.writeBashScript "mw-mysql" '' + set -euo pipefail + exec ${pkgs.mysql}/bin/mysql -N \ + ${optionalString (cfg.localSettings.wgDBserver != "") + "-h '${cfg.localSettings.wgDBserver}'"} \ + -u ${toString cfg.localSettings.wgDBuser} \ + -D '${cfg.localSettings.wgDBname}' \ + "$@" + ''; + in pkgs.writeBashScriptBin "mediawiki-db" '' + set -euo pipefail + ${if cfg.localSettings.wgDBtype == "postgres" then '' + while ! ${psql} -c ';'; do + sleep 5s + done + exist=$(${psql} -c "SELECT COUNT(1) FROM pg_class WHERE relname = 'mwuser';") + if [ "''${exist//[[:space:]]/}" -eq 0 ]; then + { + # XXX this script has BEGIN, but no COMMIT: + cat ${pkgs.mediawiki}/maintenance/postgres/tables.sql + echo 'COMMIT;' + } | ${psql} + fi + '' else '' + while ! ${mysql} -e ';'; do + sleep 5s + done + exist=$(${mysql} -e "SELECT COUNT(1) FROM information_schema.tables + WHERE table_schema='${cfg.localSettings.wgDBname}' + AND table_name='mwuser'") + if [ "''${exist//[[:space:]]/}" -eq 0 ]; then + { + cat ${pkgs.mediawiki}/maintenance/tables.sql + } | ${mysql} + fi + ''} + + export MEDIAWIKI_LOCAL_SETTINGS='${localSettings}' + ${pkgs.php}/bin/php ${pkgs.mediawiki}/maintenance/update.php + ${concatMapAttrsSep "" (n: o: '' + pw=$(cat '${o.password-file}') + if [ -z "$pw" ]; then + echo 'WARNING: Using random password, because ${o.password-file} is empty or cannot be read' >&2 + pw=$(${pkgs.pwgen}/bin/pwgen -1 13) + fi + ${pkgs.php}/bin/php ${pkgs.mediawiki}/maintenance/createAndPromote.php \ + --force --${o.role} '${n}' "$pw" + '') cfg.users} + ''; + + mediawiki-upload = pkgs.writeBashScriptBin "mediawiki-upload" '' + set -euo pipefail + mkdir -v -p '${cfg.localSettings.wgUploadDirectory}' + + ${optionalString (elem "GraphViz" enabledExtentions) + # XXX: GraphViz::getUploadSubdir: mkdir(/mediawiki/graphviz/images/, 16872) failed + # GraphViz fails to create the directory until you create the first graph. + "mkdir -v -p '${cfg.localSettings.wgUploadDirectory}/graphviz'" + } + + chmod -Rc u=rwX,g=rX,o= '${cfg.localSettings.wgUploadDirectory}' + chown -Rc '${user}:${user}' '${cfg.localSettings.wgUploadDirectory}' + ''; + + nginx = '' + ${cfg.nginxServer} + + ${optionalString (cfg.maxUploadSize != null) + "client_max_body_size ${toString cfg.maxUploadSize};" + } + + root ${pkgs.mediawiki}; + index index.php; + + ${optionalString (cfg.logo != null) '' + location = ${cfg.logo} { + alias ${cfg.logo}; + } + ''} + + ${optionalString + (cfg.localSettings.wgEnableUploads + && hasPrefix "/" cfg.localSettings.wgUploadPath) '' + location ${cfg.localSettings.wgUploadPath} { + alias ${cfg.localSettings.wgUploadDirectory}; + } + ''} + + ${concatMapStrings (e: '' + location /extensions/${e} { + alias ${pkgs.mediawikiExtensions.${e}}; + } + '') enabledExtentions + } + + ${optionalString (elem "MathJax" enabledExtentions) '' + location ${pkgs.mathJax} { + alias ${pkgs.mathJax}; + } + ''} + + location / { + try_files $uri $uri/ @rewrite; + } + + location @rewrite { + rewrite ^/(.*)$ /index.php?title=$1&$args; + } + + location ^~ /maintenance/ { + return 403; + } + + location ~ \.php$ { + fastcgi_pass unix:${config.nixsap.apps.php-fpm.mediawiki.pool.listen.socket}; + include ${pkgs.nginx}/conf/fastcgi_params; + fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; + } + ''; + +in { + + options.nixsap.apps.mediawiki = { + enable = mkEnableOption "Mediawiki"; + user = mkOption { + description = '' + The user the PHP-FPM pool runs as. And the owner of uploaded files. + ''; + default = "mediawiki"; + type = str; + }; + nginxServer = mkOption { + type = lines; + default = ""; + example = '' + listen 8080; + server_name wiki.example.net; + ''; + }; + fpmPool = mkOption { + description = "Options for the PHP FPM pool"; + type = attrsOf unspecified; + default = {}; + }; + logo = mkOption { + description = "The site logo (the image displayed in the upper-left corner of the page)"; + type = nullOr path; + default = null; + }; + maxUploadSize = mkOption { + description = '' + Maximum allowed size for uploaded files (bytes). + This affects Mediawiki itself, Nginx and PHP. + ''; + type = nullOr int; + default = null; + }; + localSettings = mkOption { + description = "Variables in LocalSettings.php"; + type = submodule (import ./localSettings.nix (explicit cfg.extensions)); + default = {}; + }; + extensions = mkOption { + description = "Mediawiki extensions"; + default = {}; + type = submodule + { options = mapAttrs + (e: _: + mkOption { + description = "Enable the ${e} extension"; + type = bool; + default = false; + }) pkgs.mediawikiExtensions; + }; + }; + users = mkOption { + description = "Mediawiki users (only bots or sysops)"; + default = {}; + type = attrsOf (submodule { options = { + role = mkOption { type = enum [ "bot" "sysop" ]; }; + password-file = mkOption { type = path; }; + }; }); + }; + }; + + config = mkIf cfg.enable { + nixsap.apps.php-fpm.mediawiki.pool = + recursiveUpdate defaultPool (cfg.fpmPool // { user = cfg.user ;}); + nixsap.deployment.keyrings.${user} = keys; + users.users.${config.nixsap.apps.nginx.user}.extraGroups = + mkIf cfg.localSettings.wgEnableUploads [ user ]; + + nixsap.apps.nginx.http.servers.mediawiki = nginx; + + systemd.services.mediawiki-db = { + description = "configure Mediawiki database"; + after = [ "network.target" "local-fs.target" "keys.target" ]; + wants = [ "keys.target" ]; + wantedBy = [ "multi-user.target" ]; + serviceConfig = { + RemainAfterExit = true; + Type = "oneshot"; + User = config.nixsap.apps.php-fpm.mediawiki.pool.user; + ExecStart = "${mediawiki-db}/bin/mediawiki-db"; + }; + }; + + systemd.services.mediawiki-upload = mkIf cfg.localSettings.wgEnableUploads { + description = "configure Mediawiki uploads"; + after = [ "local-fs.target" ]; + wantedBy = [ "multi-user.target" ]; + serviceConfig = { + RemainAfterExit = true; + Type = "oneshot"; + ExecStart = "${mediawiki-upload}/bin/mediawiki-upload"; + }; + }; + }; +} + diff --git a/modules/apps/mediawiki/localSettings.nix b/modules/apps/mediawiki/localSettings.nix new file mode 100644 index 0000000..cbacd07 --- /dev/null +++ b/modules/apps/mediawiki/localSettings.nix @@ -0,0 +1,158 @@ +extensions: + +{ lib, ... }: +let + + inherit (builtins) elem; + + inherit (lib) + concatStringsSep flip genAttrs mergeOneOption mkDefault mkOption + mkOptionType optionalAttrs optionals types ; + inherit (types) + attrsOf bool either enum int listOf nullOr path str submodule ; + + just = t: mkOption { type = t; }; # mergeable defaults + default = d: t: mkOption { type = t; default = d; }; # overridable defaults + optional = t: mkOption { type = nullOr t; default = null; }; + set = options: mkOption { type = submodule { inherit options; }; default = {}; }; + + # XXX https://github.com/NixOS/nixpkgs/issues/9826 + enum' = values: + let show = v: let t = builtins.typeOf v; + in if t == "string" then ''"${v}"'' + else if t == "int" then builtins.toString v + else ''<${t}>''; + in mkOptionType { + name = "one of ${concatStringsSep ", " (map show values)}"; + check = flip elem values; + merge = mergeOneOption; + }; + + rights = [ + "apihighlimits" "applychangetags" "autoconfirmed" "autopatrol" + "bigdelete" "block" "blockemail" "bot" "browsearchive" + "changetags" "createaccount" "createpage" "createtalk" + "delete" "deletedhistory" "deletedtext" "deletelogentry" + "deleterevision" "edit" "editinterface" "editmyoptions" + "editmyprivateinfo" "editmyusercss" "editmyuserjs" + "editmywatchlist" "editprotected" "editsemiprotected" + "editusercss" "editusercssjs" "edituserjs" "hideuser" "import" + "importupload" "ipblock-exempt" "managechangetags" "markbotedits" + "mergehistory" "minoredit" "move" "move-categorypages" + "move-rootuserpages" "move-subpages" "movefile" "nominornewtalk" + "noratelimit" "override-export-depth" "pagelang" "passwordreset" + "patrol" "patrolmarks" "protect" "proxyunbannable" "purge" + "read" "reupload" "reupload-own" "reupload-shared" "rollback" + "sendemail" "siteadmin" "suppressionlog" "suppressredirect" + "suppressrevision" "unblockself" "undelete" "unwatchedpages" + "upload" "upload_by_url" "userrights" "userrights-interwiki" + "viewmyprivateinfo" "viewmywatchlist" "writeapi" + ] + ++ optionals extensions.UserPageEditProtection [ "editalluserpages" ] + ; + + wgGroupPermissions = set ( genAttrs [ + "*" "user" "autoconfirmed" "bot" "sysop" "bureaucrat" + ] (_: + set ( genAttrs rights (_: optional bool) ) + ) + ); + + + wgDefaultUserOptions = set ( + { + diffonly = optional bool; + disablemail = optional bool; + enotifminoredits = optional bool; + enotifrevealaddr = optional bool; + enotifusertalkpages = optional bool; + enotifwatchlistpages = optional bool; + fancysig = optional bool; + gender = optional (enum [ "female" "male" "unknown" ]); + hideminor = optional bool; + justify = optional bool; + minordefault = optional bool; + nickname = optional str; + previewontop = optional bool; + quickbar = optional (enum' [ 0 1 2 3 4 5 ]); + realname = optional str; + rememberpassword = optional bool; + underline = optional (enum' [0 1 2]); + math = optional (enum' [0 1]); + usenewrc = optional bool; + imagesize = optional int; + skin = optional str; + } // optionalAttrs extensions.WikiEditor + { + usebetatoolbar = optional bool; + usebetatoolbar-cgd = optional bool; + usenavigabletoc = optional bool; + wikieditor-preview = optional bool; + wikieditor-publish = optional bool; + } + ); + +in { + options = { + inherit wgDefaultUserOptions; + inherit wgGroupPermissions; + wgAllowCopyUploads = optional bool; + wgArticlePath = optional path; + wgCheckFileExtensions = optional bool; + wgCopyUploadsDomains = default [] (listOf str); + wgCopyUploadsFromSpecialUpload = optional bool; + wgDBcompress = optional bool; + wgDBerrorLog = optional path; + wgDBname = default "mediawiki" str; + wgDBport = default "3456" int; + wgDBserver = default "" str; + wgDBssl = optional bool; + wgDBtype = default "postgres" (enum ["mysql" "postgres"]); + wgDBuser = default "mediawiki" str; + wgDebugLogFile = optional path; + wgEnableUploads = default false bool; + wgFileBlacklist = just (listOf str); + wgFileExtensions = just (listOf str); + wgLanguageCode = optional str; + wgMaxShellMemory = optional int; + wgMaxShellTime = optional int; + wgMimeTypeBlacklist = just (listOf str); + wgScriptPath = optional str; + wgServer = optional str; + wgShowDBErrorBacktrace = optional bool; + wgShowExceptionDetails = optional bool; + wgSitename = default "Wiki" str; + wgStrictFileExtensions = optional bool; + wgStyleDirectory = optional path; + wgStylePath = optional path; + wgUploadDirectory = default "/mediawiki" path; + wgUploadPath = default "/_files" str; + wgUrlProtocols = just (listOf str); + wgUsePrivateIPs = optional bool; + } // optionalAttrs (extensions.UserPageEditProtection) + { + wgOnlyUserEditUserPage = optional bool; + }; + + config = { + wgUrlProtocols = [ + "//" "bitcoin:" "ftp://" "ftps://" "geo:" "git://" "gopher://" + "http://" "https://" "irc://" "ircs://" "magnet:" "mailto:" + "mms://" "news:" "nntp://" "redis://" "sftp://" "sip:" + "sips:" "sms:" "ssh://" "svn://" "tel:" "telnet://" "urn:" + "worldwind://" "xmpp:" ]; + wgFileExtensions = [ "gif" "jpeg" "jpg" "png" ]; + wgFileBlacklist = [ + "bat" "cgi" "cmd" "com" "cpl" "dll" "exe" "htm" "html" "jhtml" + "js" "jsb" "mht" "mhtml" "msi" "php" "php3" "php4" "php5" + "phps" "phtml" "pif" "pl" "py" "scr" "shtml" "vbs" "vxd" + "xht" "xhtml" ]; + wgMimeTypeBlacklist = [ + "application/x-msdownload" "application/x-msmetafile" + "application/x-php" "application/x-shellscript" "text/html" + "text/javascript" "text/scriptlet" "text/x-bash" "text/x-csh" + "text/x-javascript" "text/x-perl" "text/x-php" "text/x-python" + "text/x-sh" ]; + }; +} + diff --git a/modules/apps/mysqlbackup.nix b/modules/apps/mysqlbackup.nix new file mode 100644 index 0000000..509e010 --- /dev/null +++ b/modules/apps/mysqlbackup.nix @@ -0,0 +1,428 @@ +{ config, pkgs, lib, ... }: +let + inherit (lib) mkOption mkIf mkDefault mapAttrsToList flatten hasPrefix filter + concatMapStringsSep concatStringsSep optionalString filterAttrs + splitString removeSuffix; + inherit (lib.types) bool str int path either enum nullOr listOf attrsOf submodule; + inherit (builtins) isString isBool isInt isList isPath toString length; + + cfg = config.nixsap.apps.mysqlbackup; + privateDir = "/run/mysqlbackup"; + + mysql = "${pkgs.mysql}/bin/mysql"; + mysqldump = "${pkgs.mysql}/bin/mysqldump"; + s3cmd = "${pkgs.s3cmd}/bin/s3cmd ${optionalString (cfg.s3cfg != null) "-c '${cfg.s3cfg}'"}"; + + gpgPubKeys = flatten [ cfg.encrypt ]; + gpg = "${pkgs.gpg}/bin/gpg2"; + pubring = pkgs.runCommand "pubring.gpg" {} '' + ${gpg} --homedir . --import ${toString gpgPubKeys} + cp pubring.gpg $out + ''; + + default = d: t: mkOption { type = t; default = d; }; + explicit = filterAttrs (n: v: n != "_module" && v != null); + mandatory = type: mkOption { inherit type; }; + optional = type: mkOption { type = nullOr type; default = null; }; + sub = options: submodule { inherit options; } ; + + connection = mkOption { + description = "Connection options used by mysqlbackup"; + type = sub { + compress = default true bool; + host = mandatory str; + max-allowed-packet = optional int; + password-file = optional path; + port = optional int; + socket = optional path; + ssl = optional bool; + ssl-ca = optional path; + ssl-cert = optional path; + ssl-key = optional path; + ssl-verify-server-cert = optional bool; + user = optional str; + }; + }; + + databases = mkOption { + description = "What to dump and what to ignore"; + default = {}; + type = sub { + like = mkOption { + description = '' + Databases to dump. MySQL wildcards (_ and %) are supported. + Logical OR is applied to all entries. + ''; + type = either str (listOf str); + default = "%"; + example = [ "%\\_live\\_%" ]; + }; + not-like = mkOption { + description = '' + Databases to skip. MySQL wildcards (_ and %) are supported. + You don't need to specify `performance_schema` or `information_schema` + here, they are always ignored. Logical AND is applied to all entries. + ''; + type = either str (listOf str); + default = []; + example = [ "tmp\\_%" "snap\\_%" ]; + }; + empty-tables-like = mkOption { + description = '' + Tables to ignore. MySQL wildcards (_ and %) are supported. + Note that the schemas of these tables will be dumped anyway. + Each table template can be prefixed with a database template. + In that case it will be applied to matching databases only, + instead of all databases''; + type = either str (listOf str); + default = []; + example = [ "bob%.alice\\_message" ]; + }; + skip-tables-like = mkOption { + description = '' + Tables to ignore. MySQL wildcards (_ and %) are supported. + Each table template can be prefixed with a database template. + In that case it will be applied to matching databases only, + instead of all databases''; + type = either str (listOf str); + default = []; + example = [ "tmp%" "%\\_backup" ]; + }; + }; + }; + + server = submodule ({ name, ... }: + { + options = { inherit connection databases; }; + config.connection.host = mkDefault name; + } + ); + + connectionKeys = flatten (mapAttrsToList (_: s: with s.connection; [ password-file ssl-key ]) cfg.servers); + keys = filter (f: f != null && hasPrefix "/run/keys/" f) ( connectionKeys ++ [cfg.s3cfg] ); + + showDatabases = name: server: pkgs.writeText "show-databases-${name}.sql" '' + SHOW DATABASES WHERE `Database` NOT IN ('information_schema', 'performance_schema', 'tmp', 'innodb') + AND (${concatMapStringsSep " OR " (e: "`Database` LIKE '${e}'") (flatten [server.databases.like])}) + ${concatMapStringsSep " " (e: "AND `Database` NOT LIKE '${e}'") (flatten [server.databases.not-like])} + ; + ''; + + defaultsFile = name: server: + let + inc = optionalString (server.connection.password-file != null) + "!include ${privateDir}/cnf/${name}"; + show = n: v: + if isBool v then (if v then "1" else "0") + else if isInt v then toString v + else if isString v then "${v}" + else if isPath v then "'${v}'" + else abort "Unrecognized option ${n}"; + in pkgs.writeText "my-${name}.cnf" + ( concatStringsSep "\n" ( + [ "[client]" ] + ++ mapAttrsToList (k: v: "${k} = ${show k v}") + (filterAttrs (k: _: k != "password-file") (explicit server.connection)) + ++ [ "${inc}\n" ] + ) + ); + + listTables = name: server: tables: + let + anyDb = s: if 1 == length (splitString "." s) + then "%.${s}" else s; + query = optionalString (0 < length tables) '' + set -euo pipefail + db="$1" + cat <<SQL | ${mysql} --defaults-file=${defaultsFile name server} -N + SELECT CONCAT(table_schema, '.', table_name) AS tables + FROM information_schema.tables HAVING tables LIKE '$db.%' + AND ( ${concatMapStringsSep " OR " (e: "tables LIKE '${e}'") + (map anyDb tables)} ); + SQL + ''; + in pkgs.writeBashScript "list-tables-${name}" query; + + job = name: server: pkgs.writeBashScript "job-${name}" '' + set -euo pipefail + db=$(basename "$0") + cd "${cfg.dumpDir}/$DATE" + + dump="$db@${name},$DATE.mysql.xz" + ${if (gpgPubKeys != []) then '' + aim="$dump.gpg" + '' else '' + aim="$dump" + ''} + + if ! [ -r "$aim" ]; then + { + empty=() + + empty+=( $(${listTables name server server.databases.empty-tables-like} "$db") ) + if [ ''${#empty[@]} -gt 0 ]; then + tables=( "''${empty[@]/#*./}" ) + ${mysqldump} --defaults-file=${defaultsFile name server} \ + --skip-comments --force --single-transaction \ + --no-data "$db" "''${tables[@]}" + fi + + empty+=( $(${listTables name server server.databases.skip-tables-like} "$db") ) + + if [ ''${#empty[@]} -gt 0 ]; then + ignoretables+=( "''${empty[@]/#/--ignore-table=}" ) + fi + + ${mysqldump} --defaults-file=${defaultsFile name server} \ + --skip-comments --force --single-transaction \ + "''${ignoretables[@]:+''${ignoretables[@]}}" \ + "$db" + } | ${pkgs.pxz}/bin/pxz -2 -T2 > "$dump".tmp + ${pkgs.xz}/bin/xz -t -v "$dump".tmp + mv "$dump".tmp "$dump" + + ${optionalString (gpgPubKeys != []) '' + recipient=( $(${gpg} --homedir '${privateDir}/gnupg' -k --with-colons --fast-list-mode | \ + ${pkgs.gawk}/bin/awk -F: '/^pub/{print $5}') ) + r=( "''${recipient[@]/#/-r}" ) + ${gpg} --homedir '${privateDir}/gnupg' --batch --no-tty --yes \ + "''${r[@]}" --trust-model always \ + --compress-algo none \ + -v -e "$dump" + rm -f "$dump" + ''} + else + echo "$aim exists. Not dumping." >&2 + fi + ${optionalString (cfg.s3uri != null) '' + remote="${removeSuffix "/" cfg.s3uri}/$DATE/$aim" + if ! ${s3cmd} ls "$remote" | ${pkgs.gnugrep}/bin/grep -qF "/$aim"; then + ${s3cmd} put "$aim" "$remote" + else + echo "$remote exists. Not uploading." >&2 + fi + ''} + ''; + + mkJobs = name: server: pkgs.writeBashScript "mkjobs-${name}" '' + set -euo pipefail + mkdir -p '${privateDir}/jobs/${name}' + for db in $(${mysql} --defaults-file=${defaultsFile name server} -N < ${showDatabases name server} | shuf) + do + ln -svf ${job name server} "${privateDir}/jobs/${name}/$db" + done + ''; + + preStart = '' + mkdir --mode=0750 -p '${cfg.dumpDir}' + chown -R ${cfg.user}:${cfg.user} '${cfg.dumpDir}' + chmod -R u=rwX,g=rX,o= ${cfg.dumpDir} + + rm -rf '${privateDir}' + mkdir --mode=0700 -p '${privateDir}' + chown ${cfg.user}:${cfg.user} '${privateDir}' + ''; + + main = pkgs.writeBashScriptBin "mysqlbackup" '' + set -euo pipefail + umask 0027 + DATE=$(date --iso-8601) + HOME='${privateDir}' + PARALLEL_SHELL=${pkgs.bash}/bin/bash + export DATE + export HOME + export PARALLEL_SHELL + + clean() { + ${pkgs.findutils}/bin/find '${cfg.dumpDir}' -type f -name '*.tmp' -delete || true + } + + listSets() { + ${pkgs.findutils}/bin/find '${cfg.dumpDir}' \ + -maxdepth 1 -mindepth 1 -type d -name '????-??-??' \ + | sort -V + } + + enoughStorage() { + local n + local used + local total + local avg + local p + n=$(listSets | wc -l) + used=$(du -x -s --block-size=1M '${cfg.dumpDir}' | cut -f1) + total=$(df --output=size --block-size=1M '${cfg.dumpDir}' | tail -n 1) + if [ "$n" -eq 0 ]; then + echo "no sets" >&2 + return 0 + fi + + avg=$(( used / n )) + p=$(( 100 * avg * (n + 1) / total )) + printf "estimated storage: %d of %d MiB (%d%%, max ${toString cfg.storage}%%)\n" \ + "$((used + avg))" "$total" "$p" >&2 + if [ "$p" -le ${toString cfg.storage} ]; then + return 0 + else + return 1 + fi + } + + clean + + listSets | head -n -${toString (cfg.slots - 1)} \ + | ${pkgs.findutils}/bin/xargs --no-run-if-empty rm -rfv \ + || true + + while ! enoughStorage; do + listSets | head -n 1 \ + | ${pkgs.findutils}/bin/xargs --no-run-if-empty rm -rfv \ + || true + done + + mkdir -p "${cfg.dumpDir}/$DATE" + mkdir -p '${privateDir}/cnf' + mkdir -p '${privateDir}/jobs' + + ${optionalString (gpgPubKeys != []) '' + # shellcheck disable=SC2174 + mkdir --mode=0700 -p '${privateDir}/gnupg' + ln -sf ${pubring} '${privateDir}/gnupg/pubring.gpg' + ''} + + ${concatStringsSep "\n" ( + mapAttrsToList (n: s: '' + printf '[client]\npassword=' > '${privateDir}/cnf/${n}' + cat '${s.connection.password-file}' >> '${privateDir}/cnf/${n}' + '') (filterAttrs (_: s: s.connection.password-file != null) cfg.servers) + )} + + { + cat <<'LIST' + ${concatStringsSep "\n" (mapAttrsToList (mkJobs) cfg.servers)} + LIST + } | ${pkgs.parallel}/bin/parallel \ + --halt-on-error 0 \ + --jobs 100% \ + --line-buffer \ + --no-notice \ + --no-run-if-empty \ + --retries 2 \ + --shuf \ + --tagstr '* {}:' \ + --timeout ${toString (10 * 60)} \ + || true + + failed=0 + log="${cfg.dumpDir}/$DATE/joblog.txt" + + { + cd '${privateDir}/jobs' && find -type l -printf '%P\n'; + } | ${pkgs.parallel}/bin/parallel \ + --halt-on-error 0 \ + --joblog "$log" \ + --jobs '${toString cfg.jobs}' \ + --line-buffer \ + --no-notice \ + --no-run-if-empty \ + --retries 2 \ + --tagstr '* {}:' \ + --timeout ${toString (6 * 60 * 60)} \ + '${privateDir}/jobs/{}' || failed=$? + + cat "$log" + clean + + du -sh "${cfg.dumpDir}/$DATE" || true + exit "$failed" + ''; + +in { + options.nixsap.apps.mysqlbackup = { + user = mkOption { + description = "User to run as"; + default = "mysqlbackup"; + type = str; + }; + + startAt = mkOption { + description = "Time to start (systemd format)"; + default = "02:00"; + type = str; + }; + + dumpDir = mkOption { + description = "Directory to save dumps in"; + default = "/mysqlbackup"; + type = path; + }; + + slots = mkOption { + description = '' + How many backup sets should be kept locally. + However, old sets will be removed anyway if storage + constraints apply. + ''; + default = 60; + type = int; + }; + + storage = mkOption { + description = '' + Percent of storage backups can occupy. + ''; + default = 75; + type = int; + }; + + encrypt = mkOption { + description = "Public GPG key(s) for encrypting the dumps"; + default = [ ]; + type = either path (listOf path); + }; + + servers = mkOption { + default = {}; + type = attrsOf server; + }; + + jobs = mkOption { + description = '' + Number of jobs (mysqldump) to run in parallel. + In the format of GNU Parallel, e. g. "100%", -1. +3, 7, etc. + ''; + default = "50%"; + type = either int str; + }; + + s3cfg = mkOption { + description = "s3cmd config file (secret)"; + type = nullOr path; + default = null; + }; + + s3uri = mkOption { + description = "S3 bucket URI with prefix in s3cmd format"; + type = nullOr str; + default = null; + example = "s3://backups/nightly"; + }; + }; + + config = mkIf (cfg.servers != {}) { + nixsap.system.users.daemons = [ cfg.user ]; + nixsap.deployment.keyrings.${cfg.user} = keys; + systemd.services.mysqlbackup = { + description = "MySQL backup"; + after = [ "local-fs.target" "keys.target" "network.target" ]; + wants = [ "keys.target" ]; + startAt = cfg.startAt; + inherit preStart; + serviceConfig = { + ExecStart = "${main}/bin/mysqlbackup"; + User = cfg.user; + PermissionsStartOnly = true; + }; + }; + }; +} diff --git a/modules/apps/mywatch.nix b/modules/apps/mywatch.nix new file mode 100644 index 0000000..732033c --- /dev/null +++ b/modules/apps/mywatch.nix @@ -0,0 +1,61 @@ +{ config, pkgs, lib, ... }: + +let + inherit (builtins) filter toString; + inherit (lib) types mkOption mkEnableOption mkIf hasPrefix + concatStrings optionalString; + inherit (types) str path int nullOr; + + cfg = config.nixsap.apps.mywatch; + + ExecStart = concatStrings [ + "${pkgs.mywatch}/bin/mywatch" + (if (cfg.port != null) + then " -p ${toString cfg.port}" + else " -s '${cfg.socket}'") + " '${cfg.myFile}'" + ]; + + keys = filter (f: f != null && hasPrefix "/run/keys/" f) [ cfg.myFile ]; + +in { + options.nixsap.apps.mywatch = { + enable = mkEnableOption "MyWatch"; + user = mkOption { + description = "User to run as"; + default = "mywatch"; + type = str; + }; + port = mkOption { + description = "TCP port to listen on (insecure)"; + default = null; + type = nullOr int; + }; + socket = mkOption { + description = "UNIX socket to listen on. Ignored when TCP port is set"; + default = "/tmp/mywatch.sock"; + type = path; + }; + myFile = mkOption { + description = "MySQL client configuration file"; + type = path; + }; + }; + + config = mkIf cfg.enable { + nixsap.system.users.daemons = [ cfg.user ]; + nixsap.deployment.keyrings.${cfg.user} = keys; + systemd.services.mywatch = { + description = "watch queries on multiple MySQL servers"; + wantedBy = [ "multi-user.target" ]; + wants = [ "keys.target" ]; + after = [ "keys.target" "network.target" ]; + serviceConfig = { + inherit ExecStart; + User = cfg.user; + Restart = "on-failure"; + }; + }; + }; +} + diff --git a/modules/apps/nginx.nix b/modules/apps/nginx.nix new file mode 100644 index 0000000..3765d67 --- /dev/null +++ b/modules/apps/nginx.nix @@ -0,0 +1,146 @@ +{ config, pkgs, lib, ... }: + +let + + inherit (builtins) + filter isBool toString ; + + inherit (lib) + concatMapStrings concatStringsSep filterAttrs mapAttrsToList mkEnableOption + mkIf mkOption ; + + inherit (lib.types) + attrsOf bool either enum int lines nullOr path str submodule ; + + + cfg = config.nixsap.apps.nginx; + explicit = filterAttrs (n: v: n != "_module" && v != null); + + attrs = opts: submodule { options = opts; }; + default = d: t: mkOption { type = t; default = d; }; + optional = t: mkOption { type = nullOr t; default = null; }; + + show = v: if isBool v then (if v then "on" else "off") else toString v; + + format = indent: set: + let mkEntry = k: v: "${indent}${k} ${show v};"; + in concatStringsSep "\n" (mapAttrsToList mkEntry (explicit set)); + + mkServer = name: text: pkgs.writeText "nginx-${name}.conf" '' + # ${name}: + server { + ${text} + } + ''; + + # Hardcode defaults that could be overriden in server context. + # Add options for http-only directives. + nginx-conf = pkgs.writeText "nginx.conf" '' + daemon off; + user ${cfg.user} ${cfg.user}; + pid ${cfg.runDir}/nginx.pid; + + ${format "" cfg.main} + + events { + ${format " " cfg.events} + } + + http { + include ${pkgs.nginx}/conf/mime.types; + default_type application/octet-stream; + + access_log off; + error_log stderr; + + gzip on; + keepalive_timeout 65; + sendfile on; + ssl_prefer_server_ciphers on; + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + tcp_nodelay on; + tcp_nopush on; + types_hash_max_size 2048; + + # https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/ + fastcgi_param HTTP_PROXY ""; + proxy_set_header Proxy ""; + + ${concatMapStrings (s: "include ${s}\n;") (mapAttrsToList mkServer cfg.http.servers)} + } + ''; + + exec = "${pkgs.nginx}/bin/nginx -c ${nginx-conf} -p ${cfg.stateDir}"; + +in { + + options.nixsap.apps.nginx = { + user = mkOption { + description = "User to run as"; + type = str; + default = "nginx"; + }; + stateDir = mkOption { + description = "Directory holding all state for nginx to run"; + type = path; + default = "/nginx"; + }; + runDir = mkOption { + description = '' + Directory for sockets and PID-file. + UNIX-sockets created by nginx are world-writable. + So if you want some privacy, put sockets in this directory. + It is owned by nginx user and group, and has mode 0640. + ''; + type = path; + readOnly = true; + default = "/run/nginx"; + }; + + main = default {} (attrs { + pcre_jit = optional bool; + timer_resolution = optional int; + worker_cpu_affinity = optional str; + worker_priority = optional int; + worker_processes = default "auto" (either int (enum ["auto"])); + worker_rlimit_core = optional int; + worker_rlimit_nofile = optional int; + }); + + events = default {} (attrs { + accept_mutex = optional bool; + accept_mutex_delay = optional int; + multi_accept = optional bool; + worker_aio_requests = optional int; + worker_connections = optional int; + }); + + http = default {} (attrs { + servers = default {} (attrsOf lines); + }); + }; + + config = mkIf ({} != explicit cfg.http.servers) { + nixsap.system.users.daemons = [ cfg.user ]; + systemd.services.nginx = { + description = "web/proxy server"; + wants = [ "keys.target" ]; + after = [ "keys.target" "local-fs.target" "network.target" ]; + wantedBy = [ "multi-user.target" ]; + preStart = '' + rm -rf '${cfg.runDir}' + mkdir -p '${cfg.stateDir}/logs' '${cfg.runDir}' + chown -Rc '${cfg.user}:${cfg.user}' '${cfg.stateDir}' '${cfg.runDir}' + chmod -Rc u=rwX,g=rX,o= '${cfg.stateDir}' '${cfg.runDir}' + ''; + serviceConfig = { + ExecStart = exec; + ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID"; + RestartSec = "10s"; + StartLimitInterval = "1min"; + Restart = "always"; + }; + }; + }; +} + diff --git a/modules/apps/pgbackup.nix b/modules/apps/pgbackup.nix new file mode 100644 index 0000000..3428843 --- /dev/null +++ b/modules/apps/pgbackup.nix @@ -0,0 +1,337 @@ +{ config, pkgs, lib, ... }: +let + + inherit (builtins) + elem isBool isList isString toString ; + inherit (lib) + concatMapStringsSep concatStringsSep filter filterAttrs + findFirst flatten hasPrefix mapAttrsToList mkIf + mkOption optionalString removeSuffix ; + inherit (lib.types) + bool either enum int listOf nullOr path str submodule ; + + cfg = config.nixsap.apps.pgbackup; + privateDir = "/run/pgbackup"; + + s3cmd = "${pkgs.s3cmd}/bin/s3cmd ${optionalString (cfg.s3cfg != null) "-c '${cfg.s3cfg}'"}"; + + gpgPubKeys = flatten [ cfg.encrypt ]; + gpg = "${pkgs.gpg}/bin/gpg2"; + pubring = pkgs.runCommand "pubring.gpg" {} '' + ${gpg} --homedir . --import ${toString gpgPubKeys} + cp pubring.gpg $out + ''; + + default = d: t: mkOption { type = t; default = d; }; + optional = type: mkOption { type = nullOr type; default = null; }; + sub = options: submodule { inherit options; } ; + concatMapAttrsSep = s: f: attrs: concatStringsSep s (mapAttrsToList f attrs); + + command = sub + { + blobs = optional bool; + clean = optional bool; + compress = default 9 int; + create = optional bool; + data-only = optional bool; + dbname = optional str; + exclude-schema = optional (either str (listOf str)); + exclude-table = optional (either str (listOf str)); + exclude-table-data = optional (either str (listOf str)); + format = default "plain" (enum ["plain" "custom" "directory" "tar"]); + host = optional str; + if-exists = optional bool; + inserts = optional bool; + jobs = default 2 int; + oids = optional bool; + port = optional int; + quote-all-identifiers = optional bool; + role = optional str; + schema = optional (either str (listOf str)); + schema-only = optional bool; + serializable-deferrable = optional bool; + table = optional (either str (listOf str)); + username = optional str; + }; + + job = o: + let + dbname = findFirst (n: n != null) cfg.user [ o.dbname o.username ]; + name = "pg_dump" + + optionalString (o.host != null && o.host != "localhost") "-${o.host}" + + optionalString (o.port != null) "-${toString o.port}" + + "-${dbname}" + + "-${o.format}"; + + args = filterAttrs (n: v: + v != null && n != "_module" + && (n == "host" -> v != "localhost") + && (n == "jobs" -> o.format == "directory") + # XXX will use pigz for others: + && (n == "compress" -> elem o.format ["directory" "custom"]) + ) o; + + mkArg = k: v: + if isBool v then (optionalString v "--${k}") + else if isList v then concatMapStringsSep " " (i: "--${k}='${i}'") v + else if isString v then "--${k}='${v}'" + else "--${k}=${toString v}" ; + + # XXX: Use the latest pg_dump: + pg_dump = pkgs.writeBashScript name '' + ${optionalString (cfg.pgpass != null) "export PGPASSFILE='${cfg.pgpass}'"} + exec ${pkgs.postgresql95}/bin/pg_dump \ + ${concatMapAttrsSep " " mkArg args} \ + "$@" + ''; + + compExt = optionalString (o.compress > 0) ".gz"; + compPipe = optionalString (o.compress > 0) + "| ${pkgs.pigz}/bin/pigz -${toString o.compress} -p${toString o.jobs}"; + suff = if o.format == "directory" then "dir.tar" + else if o.format == "tar" then "tar${compExt}" + else if o.format == "custom" then "pgdump" + else "pgsql${compExt}" ; + + in pkgs.writeBashScript "${name}-job" '' + set -euo pipefail + cd "${cfg.dumpDir}/$DATE" + ${ + if o.host != null && o.host != "localhost" then + "host='${o.host}'" + else + "host=$(${pkgs.nettools}/bin/hostname -f)" + } + + dump="${dbname}@''${host}${optionalString (o.port != null) ":${toString o.port}"},$DATE.${suff}" + ${ + if (gpgPubKeys != []) then + ''aim="$dump.gpg"'' + else + ''aim="$dump"'' + } + + if ! [ -r "$aim" ]; then + ${ + if o.format == "directory" then '' + rm -rf "$dump.tmp" + ${pg_dump} -f "$dump.tmp" + ${pkgs.gnutar}/bin/tar \ + --owner=0 --group=0 --mode u=rwX,g=rX,o= \ + --remove-files --transform 's,\.dir\.tar\.tmp,,' -c "$dump.tmp" -f "$dump" + rm -rf "$dump.tmp" + '' else if o.format == "custom" then '' + ${pg_dump} -f "$dump.tmp" + mv "$dump".tmp "$dump" + '' else '' + ${pg_dump} ${compPipe} > "$dump.tmp" + mv "$dump".tmp "$dump" + '' + } + + ${optionalString (gpgPubKeys != []) '' + recipient=( $(${gpg} --homedir '${privateDir}/gnupg' -k --with-colons --fast-list-mode | \ + ${pkgs.gawk}/bin/awk -F: '/^pub/{print $5}') ) + r=( "''${recipient[@]/#/-r}" ) + ${gpg} --homedir '${privateDir}/gnupg' --batch --no-tty --yes \ + "''${r[@]}" --trust-model always \ + -v -e "$dump" + rm -f "$dump" + ''} + else + echo "$aim exists. Not dumping." >&2 + fi + ${optionalString (cfg.s3uri != null) '' + remote="${removeSuffix "/" cfg.s3uri}/$DATE/$aim" + if ! ${s3cmd} ls "$remote" | ${pkgs.gnugrep}/bin/grep -qF "/$aim"; then + ${s3cmd} put "$aim" "$remote" + else + echo "$remote exists. Not uploading." >&2 + fi + ''} + ''; + + preStart = '' + mkdir --mode=0750 -p '${cfg.dumpDir}' + chown -R ${cfg.user}:${cfg.user} '${cfg.dumpDir}' + chmod -R u=rwX,g=rX,o= ${cfg.dumpDir} + + rm -rf '${privateDir}' + mkdir --mode=0700 -p '${privateDir}' + chown ${cfg.user}:${cfg.user} '${privateDir}' + ''; + + main = pkgs.writeBashScriptBin "pgbackup" '' + set -euo pipefail + umask 0027 + DATE=$(date --iso-8601) + HOME='${privateDir}' + PARALLEL_SHELL=${pkgs.bash}/bin/bash + export DATE + export HOME + export PARALLEL_SHELL + + clean() { + ${pkgs.findutils}/bin/find '${cfg.dumpDir}' \ + -name '*.tmp' -exec rm -rf {} + || true + } + + listSets() { + ${pkgs.findutils}/bin/find '${cfg.dumpDir}' \ + -maxdepth 1 -mindepth 1 -type d -name '????-??-??' \ + | sort -V + } + + enoughStorage() { + local n + local used + local total + local avg + local p + n=$(listSets | wc -l) + used=$(du -x -s --block-size=1M '${cfg.dumpDir}' | cut -f1) + total=$(df --output=size --block-size=1M '${cfg.dumpDir}' | tail -n 1) + if [ "$n" -eq 0 ]; then + echo "no sets" >&2 + return 0 + fi + + avg=$(( used / n )) + p=$(( 100 * avg * (n + 1) / total )) + printf "estimated storage: %d of %d MiB (%d%%, max ${toString cfg.storage}%%)\n" \ + "$((used + avg))" "$total" "$p" >&2 + if [ "$p" -le ${toString cfg.storage} ]; then + return 0 + else + return 1 + fi + } + + clean + + listSets | head -n -${toString (cfg.slots - 1)} \ + | ${pkgs.findutils}/bin/xargs --no-run-if-empty rm -rfv \ + || true + + while ! enoughStorage; do + listSets | head -n 1 \ + | ${pkgs.findutils}/bin/xargs --no-run-if-empty rm -rfv \ + || true + done + + mkdir -p "${cfg.dumpDir}/$DATE" + + ${optionalString (gpgPubKeys != []) '' + # shellcheck disable=SC2174 + mkdir --mode=0700 -p '${privateDir}/gnupg' + ln -sf ${pubring} '${privateDir}/gnupg/pubring.gpg' + ''} + + failed=0 + log="${cfg.dumpDir}/$DATE/joblog.txt" + + # shellcheck disable=SC2016 + ${pkgs.parallel}/bin/parallel \ + --halt-on-error 0 \ + --joblog "$log" \ + --jobs 50% \ + --line-buffer \ + --no-notice \ + --no-run-if-empty \ + --retries 2 \ + --rpl '{nixbase} s:^/nix/store/[^-]+-pg_dump-(.+)-job$:$1:' \ + --tagstr '* {nixbase}:' \ + --timeout ${toString (6 * 60 * 60)} ::: \ + ${concatMapStringsSep " " job cfg.pg_dump} \ + || failed=$? + + cat "$log" + clean + + du -sh "${cfg.dumpDir}/$DATE" || true + exit "$failed" + ''; + + keys = filter (f: f != null && hasPrefix "/run/keys/" f) ( [cfg.pgpass cfg.s3cfg] ); + +in { + options.nixsap.apps.pgbackup = { + user = mkOption { + description = "User to run as"; + default = "pgbackup"; + type = str; + }; + + dumpDir = mkOption { + description = "Directory to save dumps in"; + default = "/pgbackup"; + type = path; + }; + + slots = mkOption { + description = '' + How many backup sets should be kept locally. + However, old sets will be removed anyway if storage + constraints apply. + ''; + default = 60; + type = int; + }; + + storage = mkOption { + description = '' + Percent of storage backups can occupy. + ''; + default = 75; + type = int; + }; + + encrypt = mkOption { + description = "Public GPG key(s) for encrypting the dumps"; + default = [ ]; + type = either path (listOf path); + }; + + s3cfg = mkOption { + description = "s3cmd config file (secret)"; + type = nullOr path; + default = null; + }; + + s3uri = mkOption { + description = "S3 bucket URI with prefix in s3cmd format"; + type = nullOr str; + default = null; + example = "s3://backups/nightly"; + }; + + pg_dump = mkOption { + description = "pg_dump commands"; + default = []; + type = listOf command; + }; + + pgpass = mkOption { + description = "The Password File (secret)"; + type = nullOr path; + default = null; + }; + }; + + config = mkIf (cfg.pg_dump != []) { + nixsap.system.users.daemons = [ cfg.user ]; + nixsap.deployment.keyrings.${cfg.user} = keys; + systemd.services.pgbackup = { + description = "PostgreSQL backup"; + after = [ "local-fs.target" "keys.target" "network.target" ]; + wants = [ "keys.target" ]; + startAt = "02:00"; + inherit preStart; + serviceConfig = { + ExecStart = "${main}/bin/pgbackup"; + User = cfg.user; + PermissionsStartOnly = true; + }; + }; + }; +} diff --git a/modules/apps/php-fpm.nix b/modules/apps/php-fpm.nix new file mode 100644 index 0000000..e69be73 --- /dev/null +++ b/modules/apps/php-fpm.nix @@ -0,0 +1,139 @@ +{ config, pkgs, lib, ... }: + +let + + inherit (builtins) + filter isAttrs isBool toString ; + inherit (lib) + concatStringsSep filterAttrs foldl hasPrefix + mapAttrsToList mkIf mkOption types ; + inherit (types) + attrsOf bool either enum int nullOr package path str + submodule ; + + explicit = filterAttrs (n: v: n != "_module" && v != null); + concatNonEmpty = sep: list: concatStringsSep sep (filter (s: s != "") list); + + attrs = opts: submodule { options = opts; }; + default = d: t: mkOption { type = t; default = d; }; + mandatory = t: mkOption { type = t; }; + optional = t: mkOption { type = nullOr t; default = null; }; + + instances = explicit (config.nixsap.apps.php-fpm); + + users = mapAttrsToList (_: v: v.pool.user) instances; + + mkService = name: cfg: + let + show = v: if isBool v then (if v then "yes" else "no") else toString v; + + mkGroup = group: opts: main: + let f = k: v: if k == main + then "${group} = ${show v}" + else "${group}.${k} = ${show v}"; + in concatNonEmpty "\n" (mapAttrsToList f (explicit opts)); + + mkEnv = t: k: v: "${t}[${k}] = ${show v}"; + + mkPool = k: v: + if k == "listen" then mkGroup k v "socket" + else if k == "env" || hasPrefix "php_" k then concatNonEmpty "\n" (mapAttrsToList (mkEnv k) v) + else if k == "pm" then mkGroup k v "strategy" + else if isAttrs v then mkGroup k v "" + else "${k} = ${show v}"; + + mkGlobal = k: v: + if k == "php-ini" || k == "pool" || k == "package" then "" + else if isAttrs v then mkGroup k v "" + else "${k} = ${show v}"; + + conf = pkgs.writeText "php-fpm-${name}.conf" '' + [global] + daemonize = no + ${concatNonEmpty "\n" (mapAttrsToList mkGlobal (explicit cfg))} + + [pool] + ${concatNonEmpty "\n" (mapAttrsToList mkPool (explicit cfg.pool))} + ''; + exec = "${cfg.package}/bin/php-fpm --fpm-config ${conf} " + + ( if cfg.php-ini != null + then "--php-ini ${cfg.php-ini}" + else "--no-php-ini" ); + in { + "php-fpm-${name}" = { + description = "PHP FastCGI Process Manager (${name})"; + after = [ "local-fs.target" ]; + wantedBy = [ "multi-user.target" ]; + serviceConfig = { + ExecStart = exec; + Restart = "always"; + }; + }; + }; + +in { + + options.nixsap.apps.php-fpm = default {} + (attrsOf (submodule( { config, name, ... }: { + options = { + package = default pkgs.php package; + emergency_restart_interval = optional int; + emergency_restart_threshold = optional int; + error_log = default "/var/log/php-fpm-${name}.log" path; + log_level = optional (enum ["alert" "error" "warning" "notice" "debug"]); + php-ini = optional path; + process_control_timeout = optional int; + rlimit_core = optional int; + rlimit_files = optional int; + + process = optional (attrs { + max = optional int; + priority = optional int; + }); + + pool = default {} (submodule({ + options = { + catch_workers_output = optional bool; + chdir = optional path; + clear_env = optional bool; + env = default {} (attrsOf str); + php_admin_flag = default {} (attrsOf bool); + php_admin_value = default {} (attrsOf (either str int)); + php_flag = default {} (attrsOf bool); + php_value = default {} (attrsOf (either str int)); + request_terminate_timeout = optional int; + rlimit_core = optional int; + rlimit_files = optional int; + user = default "php-fpm-${name}" str; + listen = default {} (attrs { + acl_groups = optional str; + backlog = optional int; + group = optional str; + mode = optional str; + owner = default config.pool.user str; + socket = default "/run/php-fpm-${name}.sock" path; + }); + pm = mandatory (attrs { + max_children = mandatory int; + max_requests = optional int; + max_spare_servers = optional int; + min_spare_servers = optional int; + start_servers = optional int; + status_path = optional path; + strategy = mandatory (enum ["static" "ondemand" "dynamic"]); + }); + ping = optional (attrs { + path = optional path; + response = optional str; + }); + }; + })); + }; + }))); + + config = mkIf ({} != instances) { + nixsap.system.users.daemons = users; + systemd.services = foldl (a: b: a//b) {} (mapAttrsToList mkService instances); + }; +} + diff --git a/modules/apps/postgresql/default.nix b/modules/apps/postgresql/default.nix new file mode 100644 index 0000000..847fc75 --- /dev/null +++ b/modules/apps/postgresql/default.nix @@ -0,0 +1,203 @@ +{ config, pkgs, lib, ... }: +let + + inherit (builtins) + match toString ; + + inherit (lib) + concatMapStrings concatStringsSep filter filterAttrs foldl hasPrefix + isBool isInt isList isString length mapAttrs' mapAttrsToList mkDefault + mkIf mkOption nameValuePair types ; + + inherit (types) + attrsOf lines listOf nullOr package path str submodule ; + + concatNonEmpty = sep: list: concatStringsSep sep (filter (s: s != "") list); + explicit = filterAttrs (n: v: n != "_module" && v != null); + + instances = explicit config.nixsap.apps.postgresql; + users = mapAttrsToList (_: v: v.user) instances; + + isFloat = x: match "^[0-9]+(\\.[0-9]+)?$" (toString x) != null; + isKey = s: s != null && hasPrefix "/run/keys/" s; + + keyrings = mapAttrs' (_: i: nameValuePair "${i.user}" [ i.server.ssl_key_file ] + ) (filterAttrs (_: i: isKey i.server.ssl_key_file) instances); + + mkService = name: opts: + let + inherit (opts) user initdb; + inherit (opts.server) data_directory port hba_file ident_file; + ident_file_path = pkgs.writeText "${name}-ident_file" '' + postgres ${user} postgres + ${ident_file} + ''; + hba_file_path = pkgs.writeText "${name}-hba_file" '' + local all postgres peer map=postgres + ${hba_file} + ''; + show = n: v: if isBool v then (if v then "yes" else "no") + else if n == "ident_file" then "'${ident_file_path}'" + else if n == "hba_file" then "'${hba_file_path}'" + else if isFloat v then toString v + else if isString v then "'${v}'" + else if isList v then "'${concatStringsSep "," v}'" + else toString v; + conf = pkgs.writeText "pgsql-${name}.conf" ( + concatStringsSep "\n" (mapAttrsToList (n: v: "${n} = ${show n v}") (explicit opts.server)) + ); + + preStart = '' + mkdir -v -p '${data_directory}' + chown -R '${user}:${user}' '${data_directory}' + chmod -R u=rwX,g=,o= '${data_directory}' + ''; + + main = pkgs.writeBashScriptBin "pgsql-${name}" '' + set -euo pipefail + if [ ! -f '${data_directory}/PG_VERSION' ]; then + ${initdb} '${data_directory}' + rm -f '${data_directory}/'*hba.conf + rm -f '${data_directory}/'*ident.conf + rm -f '${data_directory}/postgresql.conf' + fi + exec '${opts.package}/bin/postgres' -c 'config_file=${conf}' + ''; + + psql = "${opts.package}/bin/psql -v ON_ERROR_STOP=1 -p${toString port} -U postgres"; + + configure = + let + create = pkgs.writeText "pgsql-${name}-create.sql" '' + ${concatMapStrings (r: '' + SELECT create_role_if_not_exists('${r}'); + '') opts.roles} + ${concatMapStrings (d: '' + SELECT create_db_if_not_exists('${d}'); + '') opts.databases} + ''; + in pkgs.writeBashScriptBin "pgsql-${name}-conf" '' + set -euo pipefail + while ! ${psql} -c ';'; do + sleep 5s + done + ${psql} -f ${./functions.pgsql} + ${psql} -f ${create} + ${psql} -f ${pkgs.writeText "pgsql-${name}.sql" opts.configure} + ''; + + needConf = (opts.configure != "") || (opts.roles != []) || (opts.databases != []); + + in { + "pgsql-${name}" = { + wantedBy = [ "multi-user.target" ]; + wants = [ "keys.target" ]; + after = [ "keys.target" "network.target" "local-fs.target" ]; + inherit preStart; + serviceConfig = { + ExecStart = "${main}/bin/pgsql-${name}"; + KillMode = "mixed"; + KillSignal = "SIGINT"; + PermissionsStartOnly = true; + TimeoutSec = 0; + User = user; + }; + }; + "pgsql-${name}-conf" = mkIf needConf { + wantedBy = [ "multi-user.target" ]; + after = [ "pgsql-${name}.service" ]; + requires = [ "pgsql-${name}.service" ]; + serviceConfig = { + ExecStart = "${configure}/bin/pgsql-${name}-conf"; + RemainAfterExit = true; + Type = "oneshot"; + User = user; + }; + }; + }; + + instance = submodule ( { config, name, ... }: { + options = { + user = mkOption { + description = "User to run as. Default is instance name"; + type = str; + default = "pgsql-${name}"; + }; + roles = mkOption { + description = '' + List of roles to be created. These roles will be created if do + not exist. That's it. You will have to ALTER these roles and GRANT + privileges using the `configure` option. Note that if you remove + roles from this list, they will NOT be deleted from the database. + You do not need this if this instance is a replica. + ''; + type = listOf str; + default = []; + }; + databases = mkOption { + description = '' + List of databases to be created. These databases will be created + if do not exist. You do not need this if this instance is a replica. + ''; + type = listOf str; + default = []; + }; + configure = mkOption { + description = '' + SQL statements to be executed. This should be idempotent. + May include creation of roles and databases, granting privileges. + Usage of PL/pgSQL is hightly encouraged. + You do not need this if this instance is a replica. + ''; + type = lines; + default = ""; + example = '' + SELECT create_role_if_not_exists('sproxy'); + ALTER ROLE sproxy RESET ALL; + ALTER ROLE sproxy LOGIN; + SELECT create_db_if_not_exists('sproxy'); + ALTER DATABASE sproxy OWNER TO sproxy; + ''; + }; + package = mkOption { + description = "PostgreSQL package"; + type = package; + default = pkgs.postgresql; + }; + server = mkOption { + description = "PostgreSQL server configuration"; + type = submodule (import ./server.nix); + }; + initdb = mkOption { + description = '' + Specifies the command to initialize data directory. + This command will be executed after the data directory is created. + The path to the data directory will be appended to this command. + ''; + default = "${config.package}/bin/initdb -U postgres"; + example = "\${pkgs.postgresql94}/bin/pg_basebackup ... -R -D"; + type = path; + }; + }; + config = { + server = { + data_directory = mkDefault "/postgresql/${name}"; + syslog_ident = mkDefault "pgsql-${name}"; + }; + }; + }); + +in { + options.nixsap.apps.postgresql = mkOption { + description = "Instances of PostgreSQL."; + type = attrsOf instance; + default = {}; + }; + + config = { + nixsap.deployment.keyrings = keyrings; + environment.systemPackages = [ pkgs.postgresql ]; + systemd.services = foldl (a: b: a//b) {} (mapAttrsToList mkService instances); + nixsap.system.users.daemons = users; + }; +} diff --git a/modules/apps/postgresql/functions.pgsql b/modules/apps/postgresql/functions.pgsql new file mode 100644 index 0000000..085cc5d --- /dev/null +++ b/modules/apps/postgresql/functions.pgsql @@ -0,0 +1,25 @@ +CREATE EXTENSION IF NOT EXISTS dblink; + +DROP FUNCTION IF EXISTS create_role_if_not_exists(TEXT); +CREATE FUNCTION create_role_if_not_exists(IN name TEXT) +RETURNS VOID AS $$ +BEGIN +IF NOT EXISTS (SELECT 1 FROM pg_catalog.pg_roles WHERE rolname = name) THEN + EXECUTE format('CREATE ROLE %I', name); +END IF; +END; +$$ LANGUAGE PLPGSQL; + +DROP FUNCTION IF EXISTS create_db_if_not_exists(TEXT); +CREATE FUNCTION create_db_if_not_exists(IN dbname TEXT) +RETURNS VOID AS $$ +DECLARE port INT; +DECLARE junk TEXT; +BEGIN +IF NOT EXISTS (SELECT 1 FROM pg_catalog.pg_database WHERE datname = dbname) THEN + SELECT setting FROM pg_settings WHERE name = 'port' INTO port; + SELECT dblink_exec('user=postgres dbname=postgres port=' || port, 'CREATE DATABASE ' || quote_ident(dbname)) INTO junk; +END IF; +END; +$$ LANGUAGE PLPGSQL; + diff --git a/modules/apps/postgresql/server.nix b/modules/apps/postgresql/server.nix new file mode 100644 index 0000000..864af5c --- /dev/null +++ b/modules/apps/postgresql/server.nix @@ -0,0 +1,218 @@ +{ lib, ... }: +let + + inherit (lib) mkOption mkOptionType mkIf types isInt isString + all length splitString stringToCharacters filter; + inherit (types) either enum attrsOf nullOr listOf str path lines int bool; + inherit (builtins) toString match; + + default = d: t: mkOption { type = t; default = d; }; + mandatory = t: mkOption { type = t; }; + optional = t: mkOption { type = nullOr t; default = null; }; + + isFloat = x: match "^[0-9]+(\\.[0-9]+)?$" (toString x) != null; + + float = mkOptionType { + name = "positive float"; + check = isFloat; + }; + +in { + options = { + DateStyle = optional str; + IntervalStyle = optional (enum [ "sql_standard" "postgres_verbose" "iso_8601" ]); + TimeZone = optional str; + application_name = optional str; + archive_command = optional path; + archive_mode = optional bool; + archive_timeout = optional int; + array_nulls = optional bool; + authentication_timeout = optional int; + autovacuum = optional bool; + autovacuum_analyze_scale_factor = optional float; + autovacuum_analyze_threshold = optional int; + autovacuum_freeze_max_age = optional int; + autovacuum_max_workers = optional int; + autovacuum_multixact_freeze_max_age = optional int; + autovacuum_naptime = optional int; + autovacuum_vacuum_cost_delay = optional int; + autovacuum_vacuum_cost_limit = optional int; + autovacuum_vacuum_scale_factor = optional float; + autovacuum_vacuum_threshold = optional int; + autovacuum_work_mem = optional int; + backslash_quote = optional (enum [ "on" "off" "safe_encoding" ]); + bgwriter_delay = optional int; + bgwriter_lru_maxpages = optional int; + bgwriter_lru_multiplier = optional int; + bytea_output = optional (enum [ "hex" "escape" ]); + check_function_bodies = optional bool; + checkpoint_completion_target = optional float; + checkpoint_segments = optional int; + checkpoint_timeout = optional int; + checkpoint_warning = optional int; + client_encoding = optional str; + client_min_messages = optional (enum [ "DEBUG5" "DEBUG4" "DEBUG3" "DEBUG2" "DEBUG1" "LOG" "NOTICE" "WARNING" "ERROR" "FATAL" "PANIC" ]); + commit_delay = optional int; + commit_siblings = optional int; + constraint_exclusion = optional (enum [ "on" "partition" "off" ]); + cpu_index_tuple_cost = optional float; + cpu_operator_cost = optional float; + cpu_tuple_cost = optional float; + cursor_tuple_fraction = optional float; + data_directory = mandatory path; + deadlock_timeout = optional int; + debug_pretty_print = optional bool; + debug_print_parse = optional bool; + debug_print_plan = optional bool; + debug_print_rewritten = optional bool; + default_statistics_target = optional int; + default_tablespace = optional str; + default_text_search_config = optional str; + default_transaction_deferrable = optional bool; + default_transaction_isolation = optional (enum [ "read uncommitted" "read committed" "repeatable read" "serializable" ]); + default_transaction_read_only = optional bool; + default_with_oids = optional bool; + dynamic_shared_memory_type = optional (enum [ "posix" "sysv" "mmap" "none" ]); + effective_cache_size = optional int; + effective_io_concurrency = optional int; + enable_bitmapscan = optional bool; + enable_hashagg = optional bool; + enable_hashjoin = optional bool; + enable_indexonlyscan = optional bool; + enable_indexscan = optional bool; + enable_material = optional bool; + enable_mergejoin = optional bool; + enable_nestloop = optional bool; + enable_seqscan = optional bool; + enable_sort = optional bool; + enable_tidscan = optional bool; + escape_string_warning = optional bool; + exit_on_error = optional bool; + extra_float_digits = optional int; + from_collapse_limit = optional int; + fsync = optional bool; + full_page_writes = optional bool; + geqo = optional bool; + geqo_effort = optional int; + geqo_generations = optional int; + geqo_pool_size = optional int; + geqo_seed = optional float; + geqo_selection_bias = optional float; + geqo_threshold = optional int; + hba_file = default "" lines; + hot_standby = optional bool; + hot_standby_feedback = optional bool; + huge_pages = optional (enum [ "on" "off" "try" ]); + ident_file = default "" lines; + join_collapse_limit = optional int; + lc_messages = optional str; + lc_monetary = optional str; + lc_numeric = optional str; + lc_time = optional str; + listen_addresses = optional (either (listOf str) str); + lo_compat_privileges = optional bool; + lock_timeout = optional int; + log_autovacuum_min_duration = optional int; + log_checkpoints = optional bool; + log_connections = optional bool; + log_destination = optional (enum [ "stderr" "csvlog" "syslog" ]); + log_directory = optional path; + log_disconnections = optional bool; + log_duration = optional bool; + log_error_verbosity = optional (enum [ "TERSE" "DEFAULT" "VERBOSE" ]); + log_executor_stats = optional bool; + log_filename = optional str; + log_hostname = optional bool; + log_line_prefix = optional str; + log_lock_waits = optional bool; + log_min_duration_statement = optional int; + log_min_error_statement = optional (enum [ "DEBUG5" "DEBUG4" "DEBUG3" "DEBUG2" "DEBUG1" "LOG" "NOTICE" "WARNING" "ERROR" "FATAL" "PANIC" ]); + log_min_messages = optional (enum [ "DEBUG5" "DEBUG4" "DEBUG3" "DEBUG2" "DEBUG1" "LOG" "NOTICE" "WARNING" "ERROR" "FATAL" "PANIC" ]); + log_parser_stats = optional bool; + log_planner_stats = optional bool; + log_rotation_age = optional int; + log_rotation_size = optional int; + log_statement = optional (enum [ "none" "ddl" "mod" "all" ]); + log_statement_stats = optional bool; + log_temp_files = optional int; + log_timezone = optional str; + log_truncate_on_rotation = optional bool; + logging_collector = optional bool; + maintenance_work_mem = optional int; + max_connections = optional int; + max_files_per_process = optional int; + max_locks_per_transaction = optional int; + max_pred_locks_per_transaction = optional int; + max_prepared_transactions = optional int; + max_replication_slots = optional int; + max_stack_depth = optional int; + max_standby_archive_delay = optional int; + max_standby_streaming_delay = optional int; + max_wal_senders = optional int; + max_worker_processes = optional int; + password_encryption = optional bool; + port = default 5432 int; + quote_all_identifiers = optional bool; + random_page_cost = optional float; + restart_after_crash = optional bool; + search_path = optional (either (listOf str) str); + seq_page_cost = optional float; + session_replication_role = optional (enum [ "origin" "replica" "local" ]); + shared_buffers = optional int; + sql_inheritance = optional bool; + ssl = optional bool; + ssl_ca_file = optional path; + ssl_cert_file = optional path; + ssl_ciphers = optional str; + ssl_crl_file = optional path; + ssl_ecdh_curve = optional str; + ssl_key_file = optional path; + ssl_prefer_server_ciphers = optional bool; + ssl_renegotiation_limit = optional int; + standard_conforming_strings = optional bool; + statement_timeout = optional int; + stats_temp_directory = optional path; + superuser_reserved_connections = optional int; + synchronize_seqscans = optional bool; + synchronous_commit = optional (enum [ "on" "remote_write" "local" "off" ]); + synchronous_standby_names = optional (either (listOf str) str); + syslog_ident = optional str; + tcp_keepalives_count = optional int; + tcp_keepalives_idle = optional int; + tcp_keepalives_interval = optional int; + temp_buffers = optional int; + temp_file_limit = optional int; + temp_tablespaces = optional str; + timezone_abbreviations = optional str; + track_activities = optional bool; + track_activity_query_size = optional int; + track_counts = optional bool; + track_functions = optional (enum [ "none" "pl" "all" ]); + track_io_timing = optional bool; + transform_null_equals = optional bool; + update_process_title = optional bool; + vacuum_cost_delay = optional int; + vacuum_cost_limit = optional int; + vacuum_cost_page_dirty = optional int; + vacuum_cost_page_hit = optional int; + vacuum_cost_page_miss = optional int; + vacuum_defer_cleanup_age = optional int; + vacuum_freeze_min_age = optional int; + vacuum_freeze_table_age = optional int; + vacuum_multixact_freeze_min_age = optional int; + vacuum_multixact_freeze_table_age = optional int; + wal_buffers = optional int; + wal_keep_segments = optional int; + wal_level = optional (enum [ "minimal" "archive" "hot_standby" "logical" ]); + wal_log_hints = optional bool; + wal_receiver_status_interval = optional int; + wal_receiver_timeout = optional int; + wal_sender_timeout = optional int; + wal_sync_method = optional (enum [ "open_datasync" "fdatasync" "fsync" "fsync_writethrough" "open_sync" ]); + wal_writer_delay = optional int; + work_mem = optional int; + xmlbinary = optional (enum [ "base64" "hex" ]); + xmloption = optional (enum [ "DOCUMENT" "CONTENT" ]); + }; +} + diff --git a/modules/apps/sproxy-web.nix b/modules/apps/sproxy-web.nix new file mode 100644 index 0000000..351e82d --- /dev/null +++ b/modules/apps/sproxy-web.nix @@ -0,0 +1,71 @@ +{ config, pkgs, lib, ... }: + +let + + inherit (builtins) toString ; + inherit (lib) + concatStrings filter hasPrefix mkEnableOption mkIf mkOption + optionalString types ; + inherit (types) + int nullOr path str ; + + cfg = config.nixsap.apps.sproxy-web; + + ExecStart = concatStrings [ + "${pkgs.sproxy-web}/bin/sproxy-web" + (optionalString (cfg.connectionString != null) " -c '${cfg.connectionString}'") + (if (cfg.port != null) + then " -p ${toString cfg.port}" + else " -s '${cfg.socket}'") + ]; + + keys = filter (f: f != null && hasPrefix "/run/keys/" f) [ cfg.pgPassFile ]; + +in { + options.nixsap.apps.sproxy-web = { + enable = mkEnableOption "Sproxy Web"; + user = mkOption { + description = "User to run as"; + default = "sproxy-web"; + type = str; + }; + connectionString = mkOption { + description = "PostgreSQL connection string"; + type = str; + example = "user=sproxy-web dbname=sproxy port=6001"; + }; + pgPassFile = mkOption { + description = "postgreSQL password file (secret)"; + default = null; + type = nullOr path; + }; + socket = mkOption { + description = "UNIX socket to listen on. Ignored when TCP port is set"; + default = "/tmp/sproxy-web.sock"; + type = path; + }; + port = mkOption { + description = "TCP port to listen on (insecure)"; + type = nullOr int; + default = null; + }; + }; + + config = mkIf cfg.enable { + nixsap.system.users.daemons = [ cfg.user ]; + nixsap.deployment.keyrings.${cfg.user} = keys; + systemd.services.sproxy-web = { + description = "Web interface to Sproxy database"; + wantedBy = [ "multi-user.target" ]; + wants = [ "keys.target" ]; + after = [ "keys.target" "network.target" "local-fs.target" ]; + serviceConfig = { + inherit ExecStart; + Restart = "on-failure"; + User = cfg.user; + }; + environment.PGPASSFILE = cfg.pgPassFile; + }; + }; +} + diff --git a/modules/apps/sproxy.nix b/modules/apps/sproxy.nix new file mode 100644 index 0000000..2c50554 --- /dev/null +++ b/modules/apps/sproxy.nix @@ -0,0 +1,144 @@ +{ config, pkgs, lib, ... }: + +let + + inherit (builtins) toString; + inherit (lib) + filter filterAttrs hasPrefix mapAttrsToList + mkEnableOption concatStrings mkIf mkOption types ; + inherit (types) + enum int nullOr attrsOf path str submodule ; + + explicit = filterAttrs (n: v: n != "_module" && v != null); + + cfg = config.nixsap.apps.sproxy; + + oauth2Options = concatStrings (mapAttrsToList (n: c: + if n == "google" then '' + client_id : ${c.client_id} + client_secret : ${c.client_secret_file} + '' else '' + ${n}_client_id : ${c.client_id} + ${n}_client_secret : ${c.client_secret_file} + '' + ) (explicit cfg.oauth2)); + + configFile = pkgs.writeText "sproxy.conf" '' + ${oauth2Options} + user : ${cfg.user} + cookie_domain : ${cfg.cookieDomain} + cookie_name : ${cfg.cookieName} + database : "${cfg.database}" + listen : 443 + log_level : ${cfg.logLevel} + log_target : stderr + ssl_certs : ${cfg.sslCert} + ssl_key : ${cfg.sslKey} + session_shelf_life : ${toString cfg.sessionShelfLife} + ${if cfg.backendSocket != null then '' + backend_socket : ${cfg.backendSocket} + '' else '' + backend_address : ${cfg.backendAddress} + backend_port : ${toString cfg.backendPort} + ''} + ''; + + keys = filter (hasPrefix "/run/keys/") + ( [ cfg.sslKey ] + ++ mapAttrsToList (_: c: c.client_secret_file) (explicit cfg.oauth2) + ); + + oauth2 = mkOption { + type = attrsOf (submodule { + options = { + client_id = mkOption { + type = str; + description = "OAuth2 client id"; + }; + client_secret_file = mkOption { + type = path; + description = "File with OAuth2 client secret"; + }; + }; + }); + example = { + google.client_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.apps.googleusercontent.com"; + google.client_secret_file = "/run/keys/google_oauth2_secret"; + }; + }; + +in { + options.nixsap.apps.sproxy = { + enable = mkEnableOption "SProxy"; + inherit oauth2; + user = mkOption { + description = "User to run as"; + default = "sproxy"; + type = str; + }; + cookieDomain = mkOption { + description = "Cookie domain"; + type = str; + example = "example.com"; + }; + cookieName = mkOption { + description = "Cookie name"; + type = str; + example = "sproxy"; + }; + logLevel = mkOption { + description = "Log level"; + default = "info"; + type = enum [ "info" "warn" "debug" ]; + }; + sslCert = mkOption { + description = "SSL certificate (in PEM format)"; + type = path; + }; + sslKey = mkOption { + description = "SSL key (in PEM format) - secret"; + type = path; + }; + backendAddress = mkOption { + description = "Backend TCP address"; + type = str; + default = "127.0.0.1"; + }; + backendPort = mkOption { + description = "Backend TCP port"; + type = int; + example = 8080; + }; + backendSocket = mkOption { + description = "Backend UNIX socket. If set, other backend options are ignored"; + type = nullOr path; + default = null; + }; + database = mkOption { + description = "PostgreSQL connection string"; + type = str; + example = "user=sproxy dbname=sproxy port=6001"; + }; + sessionShelfLife = mkOption { + description = "Session shelf life in seconds"; + type = int; + default = 3600 * 24 * 14; # two weeks + }; + }; + + config = mkIf cfg.enable { + nixsap.system.users.daemons = [ cfg.user ]; + nixsap.deployment.keyrings.${cfg.user} = keys; + systemd.services.sproxy = { + description = "Sproxy secure proxy"; + wantedBy = [ "multi-user.target" ]; + wants = [ "keys.target" ]; + after = [ "keys.target" "network.target" "local-fs.target" ]; + serviceConfig = { + ExecStart = "${pkgs.sproxy}/bin/sproxy --config=${configFile}"; + Restart = "on-failure"; + }; + }; + }; +} + diff --git a/modules/apps/strongswan/default.nix b/modules/apps/strongswan/default.nix new file mode 100644 index 0000000..d9a5034 --- /dev/null +++ b/modules/apps/strongswan/default.nix @@ -0,0 +1,101 @@ +{ config, pkgs, lib, ... }: + +let + + inherit (lib) mkIf mkOption types filterAttrs hasPrefix + mapAttrsToList concatStringsSep concatMapStringsSep; + inherit (types) listOf submodule path attrsOf; + inherit (builtins) filter toString toFile isList isBool; + + cfg = config.nixsap.apps.strongswan; + explicit = filterAttrs (n: v: n != "_module" && v != null); + + ipsecSecrets = toFile "ipsec.secrets" '' + ${concatMapStringsSep "\n" (f: "include ${f}") cfg.secrets} + ''; + + ipsecConf = + let + show = k: v: + if k == "charondebug" then concatStringsSep "," + (mapAttrsToList (t: l: "${t} ${toString l}") (explicit v)) + else if isList v then concatStringsSep "," v + else if isBool v then (if v then "yes" else "no") + else toString v; + makeSections = type: sections: concatStringsSep "\n\n" ( + mapAttrsToList (sec: attrs: + "${type} ${sec}\n" + + (concatStringsSep "\n" ( + mapAttrsToList (k: v: " ${k}=${show k v}") (explicit attrs) + )) + ) (explicit sections) + ); + setupSec = makeSections "config" { inherit (cfg) setup; }; + caSec = makeSections "ca" cfg.ca; + connSec = makeSections "conn" cfg.conn; + in toFile "ipsec.conf" '' + ${setupSec} + ${caSec} + ${connSec} + ''; + + strongswanConf = toFile "strongswan.conf" '' + charon { plugins { stroke { secrets_file = ${ipsecSecrets} } } } + starter { config_file = ${ipsecConf } } + ''; + +in { + options.nixsap.apps.strongswan = { + secrets = mkOption { + description = '' + A list of paths to IPSec secret files. These files will be included into + the main ipsec.secrets file by the "include" directive + ''; + type = listOf path; + default = []; + }; + setup = mkOption { + description = '' + A set of options for the ‘config setup’ section of the + ipsec.conf file. Defines general configuration parameters + ''; + type = submodule (import ./options/setup.nix); + default = {}; + }; + ca = mkOption { + description = '' + A set of CAs (certification authorities) and their options for + the ‘ca xxx’ sections of the ipsec.conf file + ''; + type = attrsOf (submodule (import ./options/ca.nix)); + default = {}; + }; + conn = mkOption { + description = '' + A set of connections and their options for the ‘conn xxx’ + sections of the ipsec.conf file + ''; + type = attrsOf (submodule (import ./options/conn.nix)); + default = {}; + }; + }; + + config = mkIf ({} != explicit cfg.conn) { + nixsap.deployment.keyrings.root = filter (hasPrefix "/run/keys/") cfg.secrets; + environment.systemPackages = [ pkgs.strongswan ]; + systemd.services.strongswan = { + description = "strongSwan IPSec Service"; + wantedBy = [ "multi-user.target" ]; + path = with pkgs; [ config.system.sbin.modprobe iproute iptables utillinux ]; + wants = [ "keys.target" ]; + after = [ "network.target" "keys.target" "local-fs.target" ]; + environment = { + STRONGSWAN_CONF = strongswanConf; + }; + serviceConfig = { + ExecStart = "${pkgs.strongswan}/sbin/ipsec start --nofork"; + Restart = "always"; + }; + }; + }; +} diff --git a/modules/apps/strongswan/options/ca.nix b/modules/apps/strongswan/options/ca.nix new file mode 100644 index 0000000..e52b088 --- /dev/null +++ b/modules/apps/strongswan/options/ca.nix @@ -0,0 +1,20 @@ +{ config, lib, ... }: + +let + + inherit (lib) foldl; + inherit (lib.types) str path enum; + inherit (import ./lib.nix lib) optional; + +in { + options = foldl (a: b: a//b) {} [ + { also = optional str; } + { auto = optional (enum [ "add" "ignore" ]); } + { cacert = optional path; } + { certuribase = optional str; } + { crluri = optional str; } + { crluri2 = optional str; } + { ocspuri = optional str; } + { ocspuri2 = optional str; } + ]; +} diff --git a/modules/apps/strongswan/options/conn.nix b/modules/apps/strongswan/options/conn.nix new file mode 100644 index 0000000..ac1d88c --- /dev/null +++ b/modules/apps/strongswan/options/conn.nix @@ -0,0 +1,88 @@ +{ config, lib, ... }: + +let + + inherit (lib) foldl attrNames head; + inherit (lib.types) int str path either listOf enum; + inherit (import ./lib.nix lib) boolean boolOr default optional; + + leftright = map + (a: let n = head (attrNames a); + in { + "left${n}" = a."${n}"; + "right${n}" = a."${n}"; + }) + [ + { allowany = optional boolean; } + { auth = optional str; } + { auth2 = optional str; } + { ca = optional str; } + { ca2 = optional str; } + { cert = optional path; } + { cert2 = optional path; } + { dns = optional (either str (listOf str)); } + { firewall = optional boolean; } + { groups = optional (either str (listOf str)); } + { hostaccess = optional boolean; } + { id = optional str; } + { id2 = optional str; } + { policy = optional (either str (listOf str)); } + { sendcert = optional (boolOr [ "never" "always" "ifasked" ]); } + { sigkey = optional (either str path); } + { sourceip = optional str; } + { subnet = optional (either str (listOf str)); } + { updown = optional path; } + ]; + + conn = leftright ++ [ + { aaa_identity = optional str; } + { aggressive = optional boolean; } + { ah = optional (either str (listOf str)); } + { also = optional str; } + { authby = optional (enum [ "pubkey" "rsasig" "ecdsasig" "psk" "secret" "xauthrsasig" "xauthpsk" "never" ]); } + { auto = optional (enum [ "ignore" "add" "route" "start" ]); } + { closeaction = optional (enum [ "none" "clear" "hold" "restart" ]); } + { compress = optional boolean; } + { dpdaction = optional (enum [ "none" "clear" "hold" "restart" ]); } + { dpddelay = optional int; } + { dpdtimeout = optional int; } + { eap_identity = optional str; } + { esp = optional (either str (listOf str)); } + { forceencaps = optional boolean; } + { fragmentation = optional (boolOr [ "force" ]); } + { ike = optional (either str (listOf str)); } + { ikedscp = optional str; } + { ikelifetime = optional int; } + { inactivity = optional int; } + { installpolicy = optional boolean; } + { keyexchange = optional (enum [ "ikev1" "ikev2" ]); } + { keyingtries = optional (either int (enum [ "%forever" ])); } + { left = optional str; } + { lifebytes = optional int; } + { lifepackets = optional int; } + { lifetime = optional int; } + { marginbytes = optional int; } + { marginpackets = optional int; } + { mark = optional str; } + { mark_in = optional str; } + { mark_out = optional str; } + { me_peerid = optional str; } + { mediated_by = optional str; } + { mediation = optional boolean; } + { mobike = optional boolean; } + { modeconfig = optional (enum [ "push" "pull" ]); } + { reauth = optional boolean; } + { rekey = optional boolean; } + { rekeyfuzz = optional int; } + { replay_window = optional int; } + { reqid = optional int; } + { right = optional str; } + { tfc = optional (either int (enum [ "%mtu" ])); } + { type = optional (enum [ "tunnel" "transport" "transport_proxy" "passthrough" "drop" ]); } + { xauth = optional (enum [ "client" "server" ]); } + { xauth_identity = optional str; } + ]; + +in { + options = foldl (a: b: a//b) {} conn; +} diff --git a/modules/apps/strongswan/options/lib.nix b/modules/apps/strongswan/options/lib.nix new file mode 100644 index 0000000..5b0808f --- /dev/null +++ b/modules/apps/strongswan/options/lib.nix @@ -0,0 +1,26 @@ +lib: + +let + inherit (lib) mkOption mkOptionType mergeOneOption elem flip concatStringsSep; + inherit (lib.types) nullOr submodule bool either; + +in rec { + default = v: type: mkOption { type = type; default = v; }; + optional = type: mkOption { type = nullOr type; default = null; }; + set = opts: mkOption { type = nullOr (submodule { options = opts; }); default = null; }; + + # XXX https://github.com/NixOS/nixpkgs/issues/9826 + enum' = values: + let show = v: let t = builtins.typeOf v; + in if t == "string" then ''"${v}"'' + else if t == "int" then builtins.toString v + else ''<${t}>''; + in mkOptionType { + name = "one of ${concatStringsSep ", " (map show values)}"; + check = flip elem values; + merge = mergeOneOption; + }; + + boolean = either bool (enum' [ "yes" "no" ]); + boolOr = l: either bool (enum' ([ "yes" "no" ] ++ l)); +} diff --git a/modules/apps/strongswan/options/setup.nix b/modules/apps/strongswan/options/setup.nix new file mode 100644 index 0000000..d60a2af --- /dev/null +++ b/modules/apps/strongswan/options/setup.nix @@ -0,0 +1,24 @@ +{ config, lib, ... }: + +let + + inherit (lib) foldl genAttrs; + inherit (import ./lib.nix lib) boolean boolOr default optional set enum'; + + charondebug = genAttrs [ + "asn" "cfg" "chd" "dmn" + "enc" "esp" "ike" "imc" + "imv" "job" "knl" "lib" + "mgr" "net" "pts" "tls" + "tnc" + ] (_: optional (enum' [ (-1) 0 1 2 3 4 ])); + +in { + options = foldl (a: b: a//b) {} [ + { cachecrls = optional boolean; } + { charondebug = set charondebug; } + { charonstart = optional boolean; } + { strictcrlpolicy = optional (boolOr [ "ifuri" ]); } + { uniqueids = optional (boolOr [ "never" "replace" "keep" ]); } + ]; +} diff --git a/modules/default.nix b/modules/default.nix new file mode 100644 index 0000000..240d970 --- /dev/null +++ b/modules/default.nix @@ -0,0 +1,11 @@ +{lib, ... }: + +let + all = lib.filterAttrs + ( n: _: n != "default.nix" && ! lib.hasPrefix "." n ) + (builtins.readDir ./.); + +in { + imports = map (p: ./. + "/${p}") ( builtins.attrNames all ); +} + diff --git a/modules/deployment/default.nix b/modules/deployment/default.nix new file mode 100644 index 0000000..240d970 --- /dev/null +++ b/modules/deployment/default.nix @@ -0,0 +1,11 @@ +{lib, ... }: + +let + all = lib.filterAttrs + ( n: _: n != "default.nix" && ! lib.hasPrefix "." n ) + (builtins.readDir ./.); + +in { + imports = map (p: ./. + "/${p}") ( builtins.attrNames all ); +} + diff --git a/modules/deployment/keyrings.nix b/modules/deployment/keyrings.nix new file mode 100644 index 0000000..6230107 --- /dev/null +++ b/modules/deployment/keyrings.nix @@ -0,0 +1,64 @@ +{ config, lib, ... }: + +let + + inherit (builtins) + attrNames baseNameOf head match pathExists readFile toString ; + inherit (lib) + foldl genAttrs mapAttrsToList mkOption optionalAttrs types ; + inherit (types) + attrsOf listOf nullOr path ; + + allusers = config.users.users; + cfg = config.nixsap.deployment; + + # XXX If the file is encrypted: + # error: the contents of the file ‘...’ cannot be represented as a Nix string + read = key: + let + m = match "^([^(]*)\\[.+\\]$" key; + s = if m != null then head m else key; + in if cfg.secrets != null + then readFile (cfg.secrets + "/${s}") + else ""; + +in { + options.nixsap.deployment = { + secrets = mkOption { + description = '' + Directory with the secrets. If not specified, + each key will be an empty file. + ''; + type = nullOr path; + default = null; + example = "<secrets>"; + }; + keyrings = mkOption { + type = attrsOf (listOf path); + description = '' + Binds keys to a user. It's possible to share the same key between + multiple users, of course by different names: "/run/keys/foo" and + "/run/keys/foo[bar]" will use the same secret file "foo". + ''; + default = {}; + example = { mysqlbackup = [ "/run/keys/s3cmd.cfg" ]; + pgbackup = [ "/run/keys/s3cmd.cfg[pgbackup]" ]; + }; + }; + }; + + config = { + users.users = genAttrs (attrNames cfg.keyrings) ( + name: optionalAttrs (name != "root") { extraGroups = [ "keys" ]; } + ); + + deployment.keys = foldl (a: b: a//b) {} ( + mapAttrsToList (name: keys: + genAttrs (map baseNameOf keys) + (key: { text = read key; + user = toString allusers.${name}.uid; + }) + ) cfg.keyrings + ); + }; +} diff --git a/modules/pkgs/check_aws_ec2_elb/check_aws_ec2_elb b/modules/pkgs/check_aws_ec2_elb/check_aws_ec2_elb new file mode 100755 index 0000000..7b53cc9 --- /dev/null +++ b/modules/pkgs/check_aws_ec2_elb/check_aws_ec2_elb @@ -0,0 +1,60 @@ +#!/usr/bin/env bash + +set -euo pipefail + +outOfServicePercentWarn=20 +outOfServicePercentCrit=33 +endpoint='' + +while [ $# -gt 0 ]; do + case "$1" in + -f) export BOTO_CONFIG="$2"; shift 2;; + -h) endpoint="$2"; shift 2;; + -w) outOfServicePercentWarn="$2"; shift 2;; + -c) outOfServicePercentCrit="$2"; shift 2;; + *) echo "$0: unsupported argument: $1" >&2; exit 1;; + esac +done + +cmd=( aws elb describe-instance-health ) + +c=0 +while [[ "$endpoint" != *.*.elb.amazonaws.com* ]]; do + endpoint=$(dig "$endpoint" CNAME +short) + (( ++c )) + if (( c > 10 )); then + echo "failed to resolve '$1'" >&2 + exit 255 + fi +done + +cmd+=( --region $(echo "$endpoint" | cut -d. -f2) ) +elbName=$(echo "$endpoint" | cut -d. -f1 | sed -r 's/^(internal-)?(.*)-[0-9]+$/\2/') +cmd+=( --load-balancer-name "$elbName" ) + +json=$("${cmd[@]}") + +totalCount=$(echo "$json" | jq -c '.InstanceStates | length') +outOfServiceInfo=$(echo "$json" | jq -c '.InstanceStates | map(select(.State == "OutOfService") | .InstanceId)') +outOfServiceCount=$(echo "$outOfServiceInfo" | jq -r 'length') + +outOfServiceCountWarn=${outOfServiceCountWarn:-$(( totalCount * outOfServicePercentWarn / 100 ))} +outOfServiceCountCrit=${outOfServiceCountCrit:-$(( totalCount * outOfServicePercentCrit / 100 ))} + +stat="total=$totalCount out_of_service=$outOfServiceCount;$outOfServiceCountWarn;$outOfServiceCountCrit" +outOfServiceInstances=$(echo "$outOfServiceInfo" | jq -r 'join(", ")') + +if [ "$outOfServiceCount" -eq 0 ]; then + echo "OK: $elbName - $totalCount instances|$stat" + exit 0 +elif [ "$outOfServiceCount" -ge "$outOfServiceCountCrit" ]; then + echo "CRITICAL: $elbName - $outOfServiceCount/$totalCount out of service: $outOfServiceInstances|$stat" + exit 2 +elif [ "$outOfServiceCount" -ge "$outOfServiceCountWarn" ]; then + echo "WARNING: $elbName - $outOfServiceCount/$totalCount out of service: $outOfServiceInstances|$stat" + exit 1 +else + echo "OK: $elbName - $outOfServiceCount/$totalCount out of service: $outOfServiceInstances|$stat" + exit 0 +fi + diff --git a/modules/pkgs/check_aws_ec2_elb/check_aws_ec2_elb.conf b/modules/pkgs/check_aws_ec2_elb/check_aws_ec2_elb.conf new file mode 100644 index 0000000..9718e3c --- /dev/null +++ b/modules/pkgs/check_aws_ec2_elb/check_aws_ec2_elb.conf @@ -0,0 +1,14 @@ +object CheckCommand "aws-ec2-elb" { + import "plugin-check-command" + + command = [ "check_aws_ec2_elb" ] + + arguments = { + "-h" = "$aws_ec2_elb_address$" + "-f" = "$aws_ec2_elb_boto_config$" + "-w" = "$aws_ec2_elb_warn$" + "-c" = "$aws_ec2_elb_crit$" + } + vars.aws_ec2_elb_address = "$address$" +} + diff --git a/modules/pkgs/check_aws_ec2_elb/default.nix b/modules/pkgs/check_aws_ec2_elb/default.nix new file mode 100644 index 0000000..5162c9d --- /dev/null +++ b/modules/pkgs/check_aws_ec2_elb/default.nix @@ -0,0 +1,22 @@ +{ stdenv, pkgs, makeWrapper }: + +stdenv.mkDerivation { + name = "check_aws_ec2_elb"; + outputs = [ "out" "conf" ]; + unpackPhase = ":"; + nativeBuildInputs = [ makeWrapper ]; + installPhase = '' + mkdir -p $out/bin + + cp ${./check_aws_ec2_elb} $out/bin/check_aws_ec2_elb + cp ${./check_aws_ec2_elb.conf} $conf + + chmod +x "$out/bin/"* + + substituteInPlace "$conf" \ + --replace check_aws_ec2_elb "$out/bin/check_aws_ec2_elb" + + wrapProgram "$out/bin/check_aws_ec2_elb" \ + --prefix PATH : "${pkgs.awscli}/bin:${pkgs.gnused}/bin:${pkgs.jq}/bin:${pkgs.bind}/bin" + ''; +} diff --git a/modules/pkgs/check_aws_rds/check_aws_rds b/modules/pkgs/check_aws_rds/check_aws_rds new file mode 100644 index 0000000..d4fd965 --- /dev/null +++ b/modules/pkgs/check_aws_rds/check_aws_rds @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +set -euo pipefail + +cmd=( pmp-check-aws-rds ) + +getId () { + local endpoint="$1" + local c=0 + while [[ "$endpoint" != *.*.*.rds.amazonaws.com* ]]; do + endpoint=$(dig "$endpoint" CNAME +short) + (( ++c )) + if (( c > 10 )); then + echo "failed to resolve '$1'" >&2 + exit 255 + fi + done + + cmd+=( -r $(echo "$endpoint" | cut -d. -f3) ) + cmd+=( -i $(echo "$endpoint" | cut -d. -f1) ) +} + +while [ $# -gt 0 ]; do + case "$1" in + -m|-w|-c) cmd+=( $1 $2 ); shift 2;; + -f) export BOTO_CONFIG="$2"; shift 2;; + -h) getId "$2"; shift 2;; + *) echo "$0: unsupported argument: $1" >&2; exit 1;; + esac +done +exec "${cmd[@]}" + diff --git a/modules/pkgs/check_aws_rds/check_aws_rds.conf b/modules/pkgs/check_aws_rds/check_aws_rds.conf new file mode 100644 index 0000000..2f6a84e --- /dev/null +++ b/modules/pkgs/check_aws_rds/check_aws_rds.conf @@ -0,0 +1,41 @@ +template CheckCommand "aws-rds-common" { + import "plugin-check-command" + + command = [ "check_aws_rds" ] + + arguments = { + "-h" = "$aws_rds_address$" + "-f" = "$aws_rds_boto_config$" + "-m" = "$aws_rds_metric$" + "-w" = "$aws_rds_warning$" + "-c" = "$aws_rds_critical$" + } + vars.aws_rds_address = "$address$" +} + +object CheckCommand "aws-rds-status" { + import "aws-rds-common" + vars.aws_rds_metric = "status" +} + +object CheckCommand "aws-rds-load" { + import "aws-rds-common" + vars.aws_rds_metric = "load" + vars.aws_rds_critical = "99,97,95" + vars.aws_rds_warning = "95,93,90" +} + +object CheckCommand "aws-rds-memory" { + import "aws-rds-common" + vars.aws_rds_metric = "memory" + vars.aws_rds_critical = "5" + vars.aws_rds_warning = "9" +} + +object CheckCommand "aws-rds-storage" { + import "aws-rds-common" + vars.aws_rds_metric = "storage" + vars.aws_rds_critical = "5" + vars.aws_rds_warning = "15" +} + diff --git a/modules/pkgs/check_aws_rds/default.nix b/modules/pkgs/check_aws_rds/default.nix new file mode 100644 index 0000000..5e91b33 --- /dev/null +++ b/modules/pkgs/check_aws_rds/default.nix @@ -0,0 +1,46 @@ +{ stdenv, pkgs, fetchurl, python27Packages }: +let + + rev = "556191f6d775f0505fb142c02f13a60ba7829ed9"; + + pmp-check-aws-rds = stdenv.mkDerivation rec { + name = "pmp-check-aws-rds"; + src = fetchurl { + url = "https://raw.githubusercontent.com/percona/percona-monitoring-plugins/${rev}/nagios/bin/pmp-check-aws-rds.py"; + sha256 = "0ghq6nl2529llxz1icf5hyg75k2hjzdkzfwgrs0d69r3f62w4q5y"; + }; + + buildInputs = with python27Packages; [ python wrapPython ]; + pythonPath = with python27Packages; [ boto ]; + phases = [ "installPhase" "fixupPhase" ]; + + installPhase = '' + mkdir -p $out/bin + cp $src $out/bin/${name} + chmod +x $out/bin/${name} + wrapPythonPrograms + ''; + + }; + +in stdenv.mkDerivation { + name = "check_aws_rds"; + outputs = [ "out" "conf" ]; + unpackPhase = ":"; + installPhase = '' + mkdir -p $out/bin + + cp ${./check_aws_rds} $out/bin/check_aws_rds + cp ${./check_aws_rds.conf} $conf + + substituteInPlace "$out/bin/"* \ + --replace pmp-check-aws-rds '${pmp-check-aws-rds}/bin/pmp-check-aws-rds' \ + --replace dig '${pkgs.bind}/bin/dig' + + substituteInPlace "$conf" \ + --replace check_aws_rds "$out/bin/check_aws_rds" + + chmod +x "$out/bin/"* + + ''; +} diff --git a/modules/pkgs/check_mdstat/check_mdstat b/modules/pkgs/check_mdstat/check_mdstat new file mode 100755 index 0000000..32fc168 --- /dev/null +++ b/modules/pkgs/check_mdstat/check_mdstat @@ -0,0 +1,50 @@ +#!/usr/bin/env bash + +stat=/proc/mdstat + +if [ ! -e "$stat" ]; then + echo "WARNING: $stat does not exist" + exit 1 +fi + +if [ ! -r "$stat" ]; then + echo "WARNING: cannot read $stat" + exit 1 +fi + +count=$(grep ^md -c "$stat") + +if [ "$count" -eq 0 ]; then + echo 'WARNING: no arrays found.' + exit 1 +elif [ "$count" -eq 1 ]; then + out="Linux Software RAID: $count array" +else + out="Linux Software RAID: $count arrays" +fi + +degrated=$(grep -c '\[.*_.*\]' "$stat") +recovering=$(awk '/recovery/ {print $4}' "$stat") +resyncing=$(awk '/resync/ {print $4}' "$stat") + +if [ -n "$recovering" ]; then + out="$out, recovering: $recovering" +elif [ -n "$resyncing" ]; then + out="$out, resyncing: $resyncing" +elif [ "$degrated" -gt 0 ]; then + out="$out, degrated: $degrated" +fi + +if [ "$degrated" -gt 0 ]; then + echo "CRITICAL: $out." + exit 2 +fi + +if [ -n "$recovering$resyncing" ]; then + echo "WARNING: $out." + exit 1 +fi + +echo "OK: $out." +exit 0 + diff --git a/modules/pkgs/check_mdstat/default.nix b/modules/pkgs/check_mdstat/default.nix new file mode 100644 index 0000000..5e645fd --- /dev/null +++ b/modules/pkgs/check_mdstat/default.nix @@ -0,0 +1,26 @@ +{ stdenv, gawk, gnugrep }: + +stdenv.mkDerivation { + name = "check_mdstat"; + src = ./check_mdstat; + outputs = [ "out" "conf" ]; + unpackPhase = ":"; + installPhase = '' + mkdir -p $out/bin + + cp "$src" $out/bin/check_mdstat + + substituteInPlace "$out/bin/"* \ + --replace awk '${gawk}/bin/awk' \ + --replace grep '${gnugrep}/bin/grep' + + chmod +x "$out/bin/"* + + cat <<CONF > $conf + object CheckCommand "mdstat" { + import "plugin-check-command" + command = [ "$out/bin/check_mdstat" ] + } + CONF + ''; +} diff --git a/modules/pkgs/check_solr/cabal2nix.nix b/modules/pkgs/check_solr/cabal2nix.nix new file mode 100644 index 0000000..ee3c6b5 --- /dev/null +++ b/modules/pkgs/check_solr/cabal2nix.nix @@ -0,0 +1,23 @@ +{ mkDerivation, aeson, base, base64-bytestring, bytestring, docopt +, fetchgit, HTTP, http-conduit, nagios-check, raw-strings-qq +, regex-tdfa, scientific, stdenv, text, unordered-containers +}: +mkDerivation { + pname = "check-solr"; + version = "0.1.0"; + src = fetchgit { + url = "https://github.com/ip1981/check-solr.git"; + sha256 = "839199942e5cf110428dd589f1d9610ac504d7199b2b7053d5ee136206890309"; + rev = "869c945fb56f0ff187125ee352a6876002eba596"; + }; + isLibrary = true; + isExecutable = true; + libraryHaskellDepends = [ + aeson base base64-bytestring bytestring docopt HTTP http-conduit + nagios-check raw-strings-qq regex-tdfa scientific text + unordered-containers + ]; + executableHaskellDepends = [ base docopt raw-strings-qq ]; + description = "Icinga / Nagios plugin for Solr"; + license = stdenv.lib.licenses.mit; +} diff --git a/modules/pkgs/check_solr/default.nix b/modules/pkgs/check_solr/default.nix new file mode 100644 index 0000000..27aecce --- /dev/null +++ b/modules/pkgs/check_solr/default.nix @@ -0,0 +1,13 @@ +{ stdenv, haskellPackages }: +let + + haskellPackage = haskellPackages.callPackage ./cabal2nix.nix {}; + +in stdenv.mkDerivation { + name = "check-solr-${haskellPackage.version}"; + phases = [ "installPhase" ]; + installPhase = '' + mkdir -p $out/bin + cp -a ${haskellPackage}/bin/* $out/bin/ + ''; +} diff --git a/modules/pkgs/check_systemd/check_systemd b/modules/pkgs/check_systemd/check_systemd new file mode 100755 index 0000000..e668331 --- /dev/null +++ b/modules/pkgs/check_systemd/check_systemd @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +set -euo pipefail + +readarray -t failed < <( systemctl show '*.service' \ + --state=failed --property=Names \ + | sed -r -n 's,Names=(.+)\.service,\1,p' \ + | sort + ) + +if [ ${#failed[@]} -ne 0 ]; then + printf -v list ', %s' "${failed[@]}" + printf 'WARNING: %s failed\n' "${list:2}" + exit 1 +else + echo "OK: no failed services" + exit 0 +fi + + diff --git a/modules/pkgs/check_systemd/default.nix b/modules/pkgs/check_systemd/default.nix new file mode 100644 index 0000000..da92a64 --- /dev/null +++ b/modules/pkgs/check_systemd/default.nix @@ -0,0 +1,25 @@ +{ stdenv, gnused }: + +stdenv.mkDerivation { + name = "check_systemd"; + src = ./check_systemd; + outputs = [ "out" "conf" ]; + unpackPhase = ":"; + installPhase = '' + mkdir -p $out/bin + + cp "$src" $out/bin/check_systemd + + substituteInPlace "$out/bin/"* \ + --replace sed '${gnused}/bin/sed' + + chmod +x "$out/bin/"* + + cat <<CONF > $conf + object CheckCommand "systemd" { + import "plugin-check-command" + command = [ "$out/bin/check_systemd" ] + } + CONF + ''; +} diff --git a/modules/pkgs/default.nix b/modules/pkgs/default.nix new file mode 100644 index 0000000..d66f7ba --- /dev/null +++ b/modules/pkgs/default.nix @@ -0,0 +1,18 @@ +{ lib, ... }: + +let + all = lib.attrNames ( + lib.filterAttrs + ( n: _: n != "default.nix" && ! lib.hasPrefix "." n ) + (builtins.readDir ./.) + ); + + localPackages = super: lib.listToAttrs (map (f: + { name = lib.removeSuffix ".nix" f; + value = super.callPackage (./. + "/${f}") {}; } + ) all); + +in { + nixpkgs.config.packageOverrides = localPackages; +} + diff --git a/modules/pkgs/gpg.nix b/modules/pkgs/gpg.nix new file mode 100644 index 0000000..a0acd20 --- /dev/null +++ b/modules/pkgs/gpg.nix @@ -0,0 +1,4 @@ +{ gnupg }: +gnupg.override { + x11Support = false; +} diff --git a/modules/pkgs/icinga2/check_mysql_slave.patch b/modules/pkgs/icinga2/check_mysql_slave.patch new file mode 100644 index 0000000..0658a8a --- /dev/null +++ b/modules/pkgs/icinga2/check_mysql_slave.patch @@ -0,0 +1,41 @@ +Index: icinga2-2.4.1/itl/command-plugins.conf +=================================================================== +--- icinga2-2.4.1.orig/itl/command-plugins.conf ++++ icinga2-2.4.1/itl/command-plugins.conf +@@ -1775,6 +1775,36 @@ object CheckCommand "mysql" { + vars.mysql_hostname = "$check_address$" + } + ++object CheckCommand "mysql_slave" { ++ import "plugin-check-command" ++ import "ipv4-or-ipv6" ++ ++ command = [ PluginDir + "/check_mysql_slave" ] ++ ++ arguments = { ++ "-H" = "$mysql_hostname$" ++ "-P" = "$mysql_port$" ++ "-s" = "$mysql_socket$" ++ "-f" = "$mysql_file$" ++ "-g" = "$mysql_group$" ++ "-u" = "$mysql_username$" ++ "-p" = "$mysql_password$" ++ "-N" = "$mysql_connection_name$" ++ "-w" = "$mysql_warning$" ++ "-c" = "$mysql_critical$" ++ "-l" = { ++ set_if = "$mysql_ssl$" ++ } ++ "-C" = "$mysql_cacert$" ++ "-a" = "$mysql_cert$" ++ "-k" = "$mysql_key$" ++ "-D" = "$mysql_cadir$" ++ "-L" = "$mysql_ciphers$" ++ } ++ ++ vars.mysql_hostname = "$check_address$" ++} ++ + object CheckCommand "negate" { + import "plugin-check-command" + diff --git a/modules/pkgs/icinga2/default.nix b/modules/pkgs/icinga2/default.nix new file mode 100644 index 0000000..5429a51 --- /dev/null +++ b/modules/pkgs/icinga2/default.nix @@ -0,0 +1,45 @@ +{ stdenv, fetchurl +, bison, boost, cmake, flex +, libedit, mysql, openssl, yajl +}: + +stdenv.mkDerivation rec { + version = "2.4.10"; + name = "icinga2-${version}"; + + src = fetchurl { + url = "https://github.com/Icinga/icinga2/archive/v${version}.tar.gz"; + sha256 = "0pj2y24kgf17106903lnz9gmp5hb3irhafq8sp22qf1wa0q395n2"; + }; + + buildInputs = [ bison boost cmake flex libedit openssl yajl ]; + + patches = [ + ./check_mysql_slave.patch + ]; + + cmakeFlags = [ + "-DCMAKE_INSTALL_LOCALSTATEDIR=/icinga2" + "-DCMAKE_INSTALL_SYSCONFDIR=/icinga2/etc" # this will need runtime support + "-DICINGA2_COMMAND_GROUP=icingacmd" + "-DICINGA2_GROUP=icinga" + "-DICINGA2_RUNDIR=/run" + "-DICINGA2_USER=icinga" + "-DICINGA2_WITH_PGSQL=OFF" + "-DMYSQL_INCLUDE_DIR=${mysql.lib}/include/mysql" + "-DMYSQL_LIB_DIR=${mysql.lib}/lib" + ]; + + # XXX Without DESTDIR it tries to write to /icinga2 and /run: + installPhase = '' + rm -rf tmp + mkdir -p tmp + make install DESTDIR=$(pwd)/tmp + mv tmp/$out $out + mv tmp/icinga2 $out/icinga2 + rm -rf $out/run + for s in $out/icinga2/etc/icinga2/scripts/* ; do + substituteInPlace $s --replace /usr/bin/printf printf + done + ''; +} diff --git a/modules/pkgs/icingaweb2/default.nix b/modules/pkgs/icingaweb2/default.nix new file mode 100644 index 0000000..263ae47 --- /dev/null +++ b/modules/pkgs/icingaweb2/default.nix @@ -0,0 +1,33 @@ +{ stdenv, fetchurl +, php +}: + +stdenv.mkDerivation rec { + version = "2.3.4"; + name = "icingaweb2-${version}"; + + src = fetchurl { + url = "https://github.com/Icinga/icingaweb2/archive/v${version}.tar.gz"; + sha256 = "0kmxvwbr7g6daj2mqabzvmw3910igd85wrzwilkz83fizgmrszh5"; + }; + + buildInputs = [ php ]; + + patches = [ ./sproxy.patch ]; + + buildPhase = "true"; + + installPhase = '' + mkdir -p $out + cp -a * $out + rm -rf $out/.puppet + rm -rf $out/Vagrantfile + rm -rf $out/icingaweb2.spec + rm -rf $out/modules/doc + rm -rf $out/modules/iframe + rm -rf $out/modules/setup + rm -rf $out/modules/test + rm -rf $out/packages + rm -rf $out/test + ''; +} diff --git a/modules/pkgs/icingaweb2/sproxy.patch b/modules/pkgs/icingaweb2/sproxy.patch new file mode 100644 index 0000000..d1b074d --- /dev/null +++ b/modules/pkgs/icingaweb2/sproxy.patch @@ -0,0 +1,78 @@ +commit 04eb7cffa84387070f48f5649a1d5a5a7843fc9c +Author: Igor Pashev <pashev.igor@gmail.com> +Date: Fri Jan 1 11:05:48 2016 +0300 + + Added Sproxy backend + + See https://github.com/zalora/sproxy + +diff --git a/library/Icinga/Authentication/User/SproxyBackend.php b/library/Icinga/Authentication/User/SproxyBackend.php +new file mode 100644 +index 0000000..4b15b0e +--- /dev/null ++++ b/library/Icinga/Authentication/User/SproxyBackend.php +@@ -0,0 +1,40 @@ ++<?php ++/* 2016 Zalora South East Asia Pte. Ltd | GPLv2+ */ ++ ++namespace Icinga\Authentication\User; ++ ++use Icinga\Data\ConfigObject; ++use Icinga\User; ++ ++/** ++ * Login with Sproxy authentication mechanism: ++ * https://github.com/zalora/sproxy ++ */ ++class SproxyBackend extends ExternalBackend ++{ ++ /** ++ * {@inheritdoc} ++ */ ++ public function authenticate(User $user, $password = null) ++ { ++ if (! empty($_SERVER['HTTP_FROM'])) { ++ $email = $_SERVER['HTTP_FROM']; ++ $user->setUsername($email); ++ $user->setEmail($email); ++ $user->setExternalUserInformation($email, 'HTTP_FROM'); ++ ++ if (! empty($_SERVER['HTTP_X_GIVEN_NAME'])) { ++ $user->setFirstname($_SERVER['HTTP_X_GIVEN_NAME']); ++ } ++ if (! empty($_SERVER['HTTP_X_GROUPS'])) { ++ $user->setGroups(explode(',', $_SERVER['HTTP_X_GROUPS'])); ++ } ++ if (! empty($_SERVER['HTTP_X_FAMILY_NAME'])) { ++ $user->setLastname($_SERVER['HTTP_X_FAMILY_NAME']); ++ } ++ ++ return true; ++ } ++ return false; ++ } ++} +diff --git a/library/Icinga/Authentication/User/UserBackend.php b/library/Icinga/Authentication/User/UserBackend.php +index 3b8e210..d264365 100644 +--- a/library/Icinga/Authentication/User/UserBackend.php ++++ b/library/Icinga/Authentication/User/UserBackend.php +@@ -22,6 +22,7 @@ class UserBackend implements ConfigAwareFactory + * @var array + */ + protected static $defaultBackends = array( ++ 'sproxy', + 'external', + 'db', + 'ldap', +@@ -176,6 +177,11 @@ class UserBackend implements ConfigAwareFactory + $backend->setName($name); + return $backend; + } ++ if ($backendType === 'sproxy') { ++ $backend = new SproxyBackend($backendConfig); ++ $backend->setName($name); ++ return $backend; ++ } + if (in_array($backendType, static::$defaultBackends)) { + // The default backend check is the first one because of performance reasons: + // Do not attempt to load a custom user backend unless it's actually required diff --git a/modules/pkgs/juandelacosa/cabal2nix.nix b/modules/pkgs/juandelacosa/cabal2nix.nix new file mode 100644 index 0000000..ddb6af7 --- /dev/null +++ b/modules/pkgs/juandelacosa/cabal2nix.nix @@ -0,0 +1,25 @@ +{ mkDerivation, base, base64-bytestring, bytestring +, data-default-class, docopt, entropy, fast-logger, fetchgit +, http-types, interpolatedstring-perl6, mtl, mysql, mysql-simple +, network, resource-pool, scotty, stdenv, text, unix, wai +, wai-extra, wai-middleware-static, warp +}: +mkDerivation { + pname = "juandelacosa"; + version = "0.1.1"; + src = fetchgit { + url = "https://github.com/zalora/juandelacosa.git"; + sha256 = "c260feae989f518484881e7dc7ebcd51d5b25fcda92412445942a5e34c1f9459"; + rev = "0940da0cdfb1201768d35c58433891feacbaedd5"; + }; + isLibrary = false; + isExecutable = true; + executableHaskellDepends = [ + base base64-bytestring bytestring data-default-class docopt entropy + fast-logger http-types interpolatedstring-perl6 mtl mysql + mysql-simple network resource-pool scotty text unix wai wai-extra + wai-middleware-static warp + ]; + description = "Manage users in MariaDB >= 10.1.1"; + license = stdenv.lib.licenses.mit; +} diff --git a/modules/pkgs/juandelacosa/default.nix b/modules/pkgs/juandelacosa/default.nix new file mode 100644 index 0000000..b37e598 --- /dev/null +++ b/modules/pkgs/juandelacosa/default.nix @@ -0,0 +1,3 @@ +{ haskellPackages }: +haskellPackages.callPackage ./cabal2nix.nix {} + diff --git a/modules/pkgs/mariadb/MDEV-10463.patch b/modules/pkgs/mariadb/MDEV-10463.patch new file mode 100644 index 0000000..c094257 --- /dev/null +++ b/modules/pkgs/mariadb/MDEV-10463.patch @@ -0,0 +1,36 @@ +diff --git a/sql/sql_show.cc b/sql/sql_show.cc +index ae38745..73edb18 100644 +--- a/sql/sql_show.cc ++++ b/sql/sql_show.cc +@@ -4850,6 +4850,7 @@ int fill_schema_schemata(THD *thd, TABLE_LIST *tables, COND *cond) + TABLE *table= tables->table; + #ifndef NO_EMBEDDED_ACCESS_CHECKS + Security_context *sctx= thd->security_ctx; ++ ulong db_access= sctx->db_access; + #endif + DBUG_ENTER("fill_schema_shemata"); + +@@ -4891,9 +4892,20 @@ int fill_schema_schemata(THD *thd, TABLE_LIST *tables, COND *cond) + continue; + } + #ifndef NO_EMBEDDED_ACCESS_CHECKS +- if (sctx->master_access & (DB_ACLS | SHOW_DB_ACL) || +- acl_get(sctx->host, sctx->ip, sctx->priv_user, db_name->str, 0) || +- !check_grant_db(thd, db_name->str)) ++ if (test_all_bits(sctx->master_access, DB_ACLS)) ++ db_access= DB_ACLS; ++ else ++ { ++ db_access= acl_get(sctx->host, sctx->ip, sctx->priv_user, db_name->str, FALSE); ++ if (sctx->priv_role[0]) ++ { ++ /* include a possible currently set role for access */ ++ db_access|= acl_get("", "", sctx->priv_role, db_name->str, FALSE); ++ } ++ } ++ if ((sctx->master_access & SHOW_DB_ACL) || ++ (db_access & DB_ACLS) || ++ !check_grant_db(thd, db_name->str)) + #endif + { + load_db_opt_by_name(thd, db_name->str, &create); diff --git a/modules/pkgs/mariadb/default.nix b/modules/pkgs/mariadb/default.nix new file mode 100644 index 0000000..e26646d --- /dev/null +++ b/modules/pkgs/mariadb/default.nix @@ -0,0 +1,139 @@ +{ stdenv, fetchurl, cmake, ncurses, zlib, xz, lzo, lz4, bzip2, snappy +, openssl, pcre, boost, judy, bison, libxml2 +, libaio, libevent, groff, jemalloc, cracklib, systemd, numactl, perl +}: + +with stdenv.lib; +stdenv.mkDerivation rec { + name = "mariadb-${version}"; + version = "10.1.17"; + + src = fetchurl { + url = "https://downloads.mariadb.org/interstitial/mariadb-${version}/source/mariadb-${version}.tar.gz"; + sha256 = "1ddalhxxcn95qp5b50z213niylcd0s6bqphid0c7c624wg2mm92c"; + }; + + buildInputs = [ + cmake ncurses openssl zlib xz lzo lz4 bzip2 snappy + pcre libxml2 boost judy bison libevent cracklib + ] ++ stdenv.lib.optionals stdenv.isLinux [ jemalloc libaio systemd numactl ]; + + patches = [ + ./MDEV-10463.patch + ]; + + cmakeFlags = [ + "-DBUILD_CONFIG=mysql_release" + "-DDEFAULT_CHARSET=utf8" + "-DDEFAULT_COLLATION=utf8_general_ci" + "-DENABLED_LOCAL_INFILE=ON" + "-DMYSQL_UNIX_ADDR=/run/mysqld/mysqld.sock" + "-DMYSQL_DATADIR=/var/lib/mysql" + "-DINSTALL_SYSCONFDIR=etc/mysql" + "-DINSTALL_INFODIR=share/mysql/docs" + "-DINSTALL_MANDIR=share/man" + "-DINSTALL_PLUGINDIR=lib/mysql/plugin" + "-DINSTALL_SCRIPTDIR=bin" + "-DINSTALL_INCLUDEDIR=include/mysql" + "-DINSTALL_DOCREADMEDIR=share/mysql" + "-DINSTALL_SUPPORTFILESDIR=share/mysql" + "-DINSTALL_MYSQLSHAREDIR=share/mysql" + "-DINSTALL_DOCDIR=share/mysql/docs" + "-DINSTALL_SHAREDIR=share/mysql" + "-DWITH_READLINE=ON" + "-DWITH_ZLIB=system" + "-DWITH_SSL=system" + "-DWITH_PCRE=system" + "-DWITH_EMBEDDED_SERVER=yes" + "-DWITH_EXTRA_CHARSETS=complex" + "-DWITH_EMBEDDED_SERVER=ON" + "-DWITH_ARCHIVE_STORAGE_ENGINE=1" + "-DWITH_BLACKHOLE_STORAGE_ENGINE=1" + "-DWITH_INNOBASE_STORAGE_ENGINE=1" + "-DWITH_PARTITION_STORAGE_ENGINE=1" + "-DWITHOUT_EXAMPLE_STORAGE_ENGINE=1" + "-DWITHOUT_FEDERATED_STORAGE_ENGINE=1" + "-DSECURITY_HARDENED=ON" + "-DWITH_WSREP=ON" + ] ++ stdenv.lib.optionals stdenv.isDarwin [ + "-DWITHOUT_OQGRAPH_STORAGE_ENGINE=1" + "-DWITHOUT_TOKUDB=1" + "-DCURSES_LIBRARY=${ncurses}/lib/libncurses.dylib" + ]; + + # fails to find lex_token.h sometimes + enableParallelBuilding = true; + + outputs = [ "out" "lib" ]; + + prePatch = '' + substituteInPlace cmake/libutils.cmake \ + --replace /usr/bin/libtool libtool + sed -i "s,SET(DEFAULT_MYSQL_HOME.*$,SET(DEFAULT_MYSQL_HOME /not/a/real/dir),g" CMakeLists.txt + sed -i "s,SET(PLUGINDIR.*$,SET(PLUGINDIR $lib/lib/mysql/plugin),g" CMakeLists.txt + sed -i 's,SET(SHAREDIR .*$,SET(SHAREDIR share/mysql),g' CMakeLists.txt + + sed -i "s,SET(pkgincludedir.*$,SET(pkgincludedir $lib/include),g" scripts/CMakeLists.txt + sed -i "s,SET(pkglibdir.*$,SET(pkglibdir $lib/lib),g" scripts/CMakeLists.txt + sed -i "s,SET(pkgplugindir.*$,SET(pkgplugindir $lib/lib/mysql/plugin),g" scripts/CMakeLists.txt + + sed -i "s,set(libdir.*$,SET(libdir $lib/lib),g" storage/mroonga/vendor/groonga/CMakeLists.txt + sed -i "s,set(includedir.*$,SET(includedir $lib/include),g" storage/mroonga/vendor/groonga/CMakeLists.txt + sed -i "/\"\$[{]CMAKE_INSTALL_PREFIX}\/\$[{]GRN_RELATIVE_PLUGINS_DIR}\"/d" storage/mroonga/vendor/groonga/CMakeLists.txt + sed -i "s,set(GRN_PLUGINS_DIR.*$,SET(GRN_PLUGINS_DIR $lib/\$\{GRN_RELATIVE_PLUGINS_DIR}),g" storage/mroonga/vendor/groonga/CMakeLists.txt + sed -i 's,[^"]*/var/log,/var/log,g' storage/mroonga/vendor/groonga/CMakeLists.txt + ''; + + postInstall = '' + substituteInPlace $out/bin/mysql_install_db \ + --replace basedir=\"\" basedir=\"$out\" + + # Remove superfluous files + rm -r $out/mysql-test $out/sql-bench $out/data # Don't need testing data + rm $out/share/man/man1/mysql-test-run.pl.1 + rm $out/bin/rcmysql # Not needed with nixos units + rm $out/bin/mysqlbug # Encodes a path to gcc and not really useful + find $out/bin -name \*test\* -exec rm {} \; + + # Separate libs and includes into their own derivation + mkdir -p $lib + mv $out/lib $lib + mv $out/include $lib + + '' + + stdenv.lib.optionalString stdenv.isDarwin '' + # Fix library rpaths + # TODO: put this in the stdenv to prepare for wide usage of multi-output derivations + for file in $(grep -rl $out/lib $lib); do + install_name_tool -delete_rpath $out/lib -add_rpath $lib $file + done + + '' + '' + # Fix the mysql_config + sed -i $out/bin/mysql_config \ + -e 's,-lz,-L${zlib}/lib -lz,g' \ + -e 's,-lssl,-L${openssl}/lib -lssl,g' + + # Add mysql_config to libs since configure scripts use it + mkdir -p $lib/bin + cp $out/bin/mysql_config $lib/bin + sed -i "/\(execdir\|bindir\)/ s,'[^\"']*',$lib/bin,g" $lib/bin/mysql_config + + # Make sure to propagate lib for compatability + mkdir -p $out/nix-support + echo "$lib" > $out/nix-support/propagated-native-build-inputs + + # Don't install static libraries. + rm $lib/lib/libmysqlclient.a $lib/lib/libmysqld.a + ''; + + passthru.mysqlVersion = "5.6"; + + meta = with stdenv.lib; { + description = "An enhanced, drop-in replacement for MySQL"; + homepage = https://mariadb.org/; + license = stdenv.lib.licenses.gpl2; + maintainers = with stdenv.lib.maintainers; [ thoughtpolice wkennington ]; + platforms = stdenv.lib.platforms.all; + }; +} diff --git a/modules/pkgs/mathJax.nix b/modules/pkgs/mathJax.nix new file mode 100644 index 0000000..04b596f --- /dev/null +++ b/modules/pkgs/mathJax.nix @@ -0,0 +1,18 @@ +{ stdenv, fetchurl }: + +stdenv.mkDerivation rec { + version = "2.6.1"; + name = "mathjax-${version}"; + + src = fetchurl { + url = "https://github.com/mathjax/MathJax/archive/${version}.tar.gz"; + sha256 = "1f7v48s7km9fi9i0bignn8f91z3bk04n4jx407l3xsd4hxfr8in7"; + }; + + installPhase = '' + mkdir -p $out + cp -a * $out/ + rm -rf $out/unpacked + rm -rf "$out/"*.json + ''; +} diff --git a/modules/pkgs/mediawiki/T122487.patch b/modules/pkgs/mediawiki/T122487.patch new file mode 100644 index 0000000..7b868a8 --- /dev/null +++ b/modules/pkgs/mediawiki/T122487.patch @@ -0,0 +1,16 @@ +Description: fix warning on upload page +Bug: https://phabricator.wikimedia.org/T122487 +Index: mediawiki-1.23.13/includes/User.php +=================================================================== +--- mediawiki-1.23.13.orig/includes/User.php ++++ mediawiki-1.23.13/includes/User.php +@@ -3806,6 +3806,9 @@ class User { + * @return boolean: Whether the token matches + */ + public function matchEditToken( $val, $salt = '', $request = null ) { ++ if ($val === null) { ++ return false; ++ } + $sessionToken = $this->getEditToken( $salt, $request ); + $equals = hash_equals( $sessionToken, $val ); + if ( !$equals ) { diff --git a/modules/pkgs/mediawiki/default.nix b/modules/pkgs/mediawiki/default.nix new file mode 100644 index 0000000..ef606f0 --- /dev/null +++ b/modules/pkgs/mediawiki/default.nix @@ -0,0 +1,59 @@ +{ lib, pkgs }: + +let + inherit (builtins) elemAt; + inherit (lib) splitString concatMapStrings; + + bundled = [ + "Cite" "ConfirmEdit" "Gadgets" "ImageMap" "InputBox" "Interwiki" + "LocalisationUpdate" "Nuke" "ParserFunctions" "PdfHandler" "Poem" + "Renameuser" "SpamBlacklist" "SyntaxHighlight_GeSHi" "TitleBlacklist" + "WikiEditor" + ]; + +in pkgs.stdenv.mkDerivation rec { + version = "1.23.13"; + name = "mediawiki-${version}"; + + src = let + v = splitString "." version; + minor = "${elemAt v 0}.${elemAt v 1}"; + in pkgs.fetchurl { + url = "https://releases.wikimedia.org/mediawiki/${minor}/${name}.tar.gz"; + sha256 = "168wpf53n4ksj2g5q5r0hxapx6238dvsfng5ff9ixk6axsn0j5d0"; + }; + + patches = [ + ./T122487.patch + ./file-backend-default-mode.patch + ]; + + outputs = [ "out" ] ++ bundled; + + installPhase = '' + cp -a . $out + + rm -rf $out/tests + rm -rf $out/mw-config + rm -rf $out/maintenance/dev + rm -rf $out/maintenance/hiphop + + sed -i \ + -e 's|/bin/bash|${pkgs.bash}/bin/bash|g' \ + -e 's|/usr/bin/timeout|${pkgs.coreutils}/bin/timeout|g' \ + $out/includes/limit.sh \ + $out/includes/GlobalFunctions.php + + cat <<'EOF' > $out/LocalSettings.php + <?php + if (isset($_ENV['MEDIAWIKI_LOCAL_SETTINGS'])) { + require_once ($_ENV['MEDIAWIKI_LOCAL_SETTINGS']); + }; + ?> + EOF + + ${concatMapStrings (e: '' + mv $out/extensions/${e} ''${${e}} + '') bundled} + ''; +} diff --git a/modules/pkgs/mediawiki/file-backend-default-mode.patch b/modules/pkgs/mediawiki/file-backend-default-mode.patch new file mode 100644 index 0000000..2bd303c --- /dev/null +++ b/modules/pkgs/mediawiki/file-backend-default-mode.patch @@ -0,0 +1,52 @@ +Index: mediawiki-1.23.13/includes/DefaultSettings.php +=================================================================== +--- mediawiki-1.23.13.orig/includes/DefaultSettings.php ++++ mediawiki-1.23.13/includes/DefaultSettings.php +@@ -429,7 +429,7 @@ $wgImgAuthUrlPathMap = array(); + * leave the paths in unchanged, or 'simple' to replace paths with + * placeholders. Default for LocalRepo is 'simple'. + * - fileMode This allows wikis to set the file mode when uploading/moving files. Default +- * is 0644. ++ * is 0640. + * - directory The local filesystem directory where public files are stored. Not used for + * some remote repos. + * - thumbDir The base thumbnail directory. Defaults to "<directory>/thumb". +Index: mediawiki-1.23.13/includes/filerepo/FSRepo.php +=================================================================== +--- mediawiki-1.23.13.orig/includes/filerepo/FSRepo.php ++++ mediawiki-1.23.13/includes/filerepo/FSRepo.php +@@ -50,7 +50,7 @@ class FSRepo extends FileRepo { + : "{$directory}/transcoded"; + $fileMode = isset( $info['fileMode'] ) + ? $info['fileMode'] +- : 0644; ++ : 0640; + + $repoName = $info['name']; + // Get the FS backend configuration +Index: mediawiki-1.23.13/includes/filebackend/FSFileBackend.php +=================================================================== +--- mediawiki-1.23.13.orig/includes/filebackend/FSFileBackend.php ++++ mediawiki-1.23.13/includes/filebackend/FSFileBackend.php +@@ -82,7 +82,7 @@ class FSFileBackend extends FileBackendS + } + } + +- $this->fileMode = isset( $config['fileMode'] ) ? $config['fileMode'] : 0644; ++ $this->fileMode = isset( $config['fileMode'] ) ? $config['fileMode'] : 0640; + if ( isset( $config['fileOwner'] ) && function_exists( 'posix_getuid' ) ) { + $this->fileOwner = $config['fileOwner']; + $info = posix_getpwuid( posix_getuid() ); +Index: mediawiki-1.23.13/includes/filebackend/FileBackendGroup.php +=================================================================== +--- mediawiki-1.23.13.orig/includes/filebackend/FileBackendGroup.php ++++ mediawiki-1.23.13/includes/filebackend/FileBackendGroup.php +@@ -88,7 +88,7 @@ class FileBackendGroup { + : "{$directory}/transcoded"; + $fileMode = isset( $info['fileMode'] ) + ? $info['fileMode'] +- : 0644; ++ : 0640; + // Get the FS backend configuration + $autoBackends[] = array( + 'name' => $backendName, diff --git a/modules/pkgs/mediawikiExtensions/Sproxy/Sproxy.php b/modules/pkgs/mediawikiExtensions/Sproxy/Sproxy.php new file mode 100644 index 0000000..697c596 --- /dev/null +++ b/modules/pkgs/mediawikiExtensions/Sproxy/Sproxy.php @@ -0,0 +1,218 @@ +<?php + +// This program is free software: you can redistribute it and/or modify it +// under the terms of the GNU General Public License as published by the Free +// Software Foundation, either version 2 of the License, or (at your option) +// any later version. +// +// This program is distributed in the hope that it will be useful, but WITHOUT +// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +// more details. +// +// You should have received a copy of the GNU General Public License along with +// this program. If not, see <http://www.gnu.org/licenses/>. +// +// Copyright 2006 Otheus Shelling +// Copyright 2007 Rusty Burchfield +// Copyright 2009 James Kinsman +// Copyright 2010 Daniel Thomas +// Copyright 2010 Ian Ward Comfort +// Copyright 2013-2016 Zalora South East Asia Pte Ltd +// +// In 2009, the copyright holders determined that the original publishing of this code +// under GPLv3 was legally and logistically in error, and re-licensed it under GPLv2. +// +// See http://www.mediawiki.org/wiki/Extension:AutomaticREMOTE_USER +// +// Adapted by Rusty to be compatible with version 1.9 of MediaWiki +// Optional settings from Emmanuel Dreyfus +// Adapted by VibroAxe (James Kinsman) to be compatible with version 1.16 of MediaWiki +// Adapted by VibroAxe (James Kinsman) to allow domain substitution for Integrated Windows Authentication +// Adapted by drt24 (Daniel Thomas) to add the optional $wgAuthRemoteuserMailDomain and remove hardcoding +// of permissions for anonymous users. +// Adapted by Ian Ward Comfort to detect mismatches between the session user and REMOTE_USER +// Adapted to sproxy by Chris Forno +// Extension credits that show up on Special:Version + +$wgExtensionCredits['other'][] = array( + 'name' => 'Sproxy', + 'version' => '0.2.0', + 'author' => array( + 'Otheus Shelling', + 'Rusty Burchfield', + 'James Kinsman', + 'Daniel Thomas', + 'Ian Ward Comfort', + 'Chris Forno' + ) , + 'url' => '', + 'description' => 'Automatically authenticates users using sproxy HTTP headers.', +); + +// We must allow zero length passwords. This extension does not work in MW 1.16 without this. +$wgMinimalPasswordLength = 0; + +function sproxy_hook() +{ + global $wgUser, $wgRequest, $wgAuth; + + // For a few special pages, don't do anything. + $skipPages = array( + Title::makeName(NS_SPECIAL, 'UserLogin') , + Title::makeName(NS_SPECIAL, 'UserLogout') , + ); + + if (in_array($wgRequest->getVal('title') , $skipPages)) { + return; + } + + // Don't do anything if there's already a valid session. + $user = User::newFromSession(); + if (!$user->isAnon()) { + return; + } + + // If the login form returns NEED_TOKEN try once more with the right token + $trycount = 0; + $token = ''; + $errormessage = ''; + do { + $tryagain = false; + // Submit a fake login form to authenticate the user. + $params = new FauxRequest(array( + 'wpName' => sproxy_username() , + 'wpPassword' => '', + 'wpDomain' => '', + 'wpLoginToken' => $token, + 'wpRemember' => '', + )); + // Authenticate user data will automatically create new users. + $loginForm = new LoginForm($params); + $result = $loginForm->authenticateUserData(); + switch ($result) { + case LoginForm::SUCCESS: + $wgUser->setOption('rememberpassword', 1); + $wgUser->setCookies(); + break; + + case LoginForm::NEED_TOKEN: + $token = $loginForm->getLoginToken(); + $tryagain = ($trycount == 0); + break; + + default: + error_log("Unexpected sproxy authentication failure (code: $result)"); + break; + } + $trycount++; + } + while ($tryagain); +} + +$wgExtensionFunctions[] = 'sproxy_hook'; +function sproxy_email() +{ + return $_SERVER['HTTP_FROM']; +} + +function sproxy_username() +{ + // We can't rely on X-Given-Name/X-Family name because they can be + // set by the user. I've personally seen someone set their name to + // "ZALORA". + // + // Instead, we'll try to extract the real name from the first part + // of the email address. + list($username, $_) = explode('@', sproxy_email()); + // So we have something like firstname.lastname or firstname.l or + // firstname. + return $username; +} + +function sproxy_real_name() +{ + return $_SERVER['HTTP_X_GIVEN_NAME'] . ' ' . $_SERVER['HTTP_X_FAMILY_NAME']; +} + +class AuthSproxy extends AuthPlugin +{ + public function userExists($username) + { + // This does not mean does the user already exist in the Mediawiki database. + return true; + } + + public function authenticate($username, $password) + { + // All users are already authenticated. + return true; + } + + public function autoCreate() + { + // Automatically create Mediawiki users for sproxy users. + return true; + } + + function allowPasswordChange() + { + // This doesn't make any sense so don't allow it. + return false; + } + + public function strict() + { + // Don't check passwords against the Mediawiki database; + return true; + } + + public function initUser(&$user, $autocreate = false) + { + $user->setEmail(sproxy_email()); + $user->mEmailAuthenticated = wfTimestampNow(); + $user->setToken(); + $user->setRealName(sproxy_real_name()); + + // turn on e-mail notifications + if (isset($wgAuthRemoteuserNotify) && $wgAuthRemoteuserNotify) { + $user->setOption('enotifwatchlistpages', 1); + $user->setOption('enotifusertalkpages', 1); + $user->setOption('enotifminoredits', 1); + $user->setOption('enotifrevealaddr', 1); + } + $user->saveSettings(); + } +} + +$wgAuth = new AuthSproxy(); + +// Don't let anonymous people do things... +$wgGroupPermissions['*']['createaccount'] = false; +$wgGroupPermissions['*']['read'] = false; +$wgGroupPermissions['*']['edit'] = false; + +// see http://www.mediawiki.org/wiki/Manual:Hooks/SpecialPage_initList +// and http://www.mediawiki.org/w/Manual:Special_pages +// and http://lists.wikimedia.org/pipermail/mediawiki-l/2009-June/031231.html +// disable login and logout functions for all users +function LessSpecialPages(&$list) +{ + unset($list['ChangeEmail']); + unset($list['Userlogin']); + unset($list['Userlogout']); + return true; +} +$wgHooks['SpecialPage_initList'][] = 'LessSpecialPages'; + +// http://www.mediawiki.org/wiki/Extension:Windows_NTLM_LDAP_Auto_Auth +// remove login and logout buttons for all users +function StripLogin(&$personal_urls, &$wgTitle) +{ + unset($personal_urls["login"]); + unset($personal_urls["logout"]); + unset($personal_urls['anonlogin']); + return true; +} +$wgHooks['PersonalUrls'][] = 'StripLogin'; + diff --git a/modules/pkgs/mediawikiExtensions/default.nix b/modules/pkgs/mediawikiExtensions/default.nix new file mode 100644 index 0000000..f2ae6f1 --- /dev/null +++ b/modules/pkgs/mediawikiExtensions/default.nix @@ -0,0 +1,52 @@ +{ lib, fetchgit, mediawiki }: + +let + inherit (lib) filter genAttrs; + + bundled = filter (n: n != "out") mediawiki.outputs; + +in genAttrs bundled (e: mediawiki.${e}) // +{ + + EmbedVideo= fetchgit { + url = https://github.com/HydraWiki/mediawiki-embedvideo.git; + rev = "1c1904bfc040bc948726719cbef41708c62546b3"; + sha256 = "07sxpaks1hik710izilpslnqlcjz2nphqkx9b9qh6qv9xb0a9n6v"; + }; + + GraphViz = fetchgit { + url = https://gerrit.wikimedia.org/r/p/mediawiki/extensions/GraphViz.git; + rev = "c968ec19090ab6febcd12ccd5816c5875fddc9df"; + sha256 = "a0f9b7a67c1b166bba7ce3100b9b2666938af50666a526b1e9e4a83359e4a10d"; + }; + +/* TODO Use with Mediawiki 1.26+ + MathJax = fetchgit { + url = https://github.com/hbshim/mediawiki-mathjax.git; + rev = "56061635eaeffbd13d50d243077e44fcbf3f5da1"; + sha256 = "1xx9cpcl5c8n1jn3qckcva5dnl8z7i1bd2ff4ycpd2cdp930gsy6"; + }; +*/ + + MathJax = fetchgit { + url = https://github.com/zalora/Mediawiki-MathJax.git; + rev = "880adf7f9da55dbe257043fe431f825211ee96e1"; + sha256 = "17s3pbxj6jhywsbdss1hqmss8slb89jkwirlsbd0h16m130q72n8"; + }; + + MsUpload = fetchgit { + url = https://phabricator.wikimedia.org/diffusion/EMSU/extension-msupload.git; + rev = "d2983b9cd44203173b39e64bf25cdcd73612fcc0"; + sha256 = "18n4iyvp85ipgggjgwrk6pn75gciwrkjb7mr1zvqsh9kv3rpd5n9"; + }; + + Sproxy = ./Sproxy; # TODO: review, update & publish + + UserPageEditProtection = fetchgit { + url = https://gerrit.wikimedia.org/r/p/mediawiki/extensions/UserPageEditProtection.git; + rev = "13ff835e8278654ab8cfae03c8b8196bdfe6e410"; + sha256 = "0hjsgq8hhqw6wxqfc14jq1wb09q8zf9xv7jz0hkhl5ma6338j7q9"; + }; + +} + diff --git a/modules/pkgs/monitoringPlugins/default.nix b/modules/pkgs/monitoringPlugins/default.nix new file mode 100644 index 0000000..d0f554e --- /dev/null +++ b/modules/pkgs/monitoringPlugins/default.nix @@ -0,0 +1,37 @@ +{ stdenv, fetchurl +, autoreconfHook +, procps, perl +, fping, openssh, bind +, mariadb +, openssl +}: + +stdenv.mkDerivation rec { + version = "2.1.2"; + name = "monitoring-plugins-${version}"; + src = fetchurl { + url = "https://github.com/monitoring-plugins/monitoring-plugins/archive/v${version}.tar.gz"; + sha256 = "0mgs59326yzvx92pdqmn671d40czixd7k60dvsbz89ah2r96vps7"; + }; + + buildInputs = [ + autoreconfHook + procps perl + fping openssh bind + mariadb.lib + openssl + ]; + + patches = [ + ./mysql_check_slave.patch + ]; + + configurePhase = '' + ./configure \ + --prefix=$out \ + --disable-nls \ + --with-ping-command="/var/setuid-wrappers/ping -n -U -w %d -c %d %s" \ + --with-ping6-command="/var/setuid-wrappers/ping6 -n -U -w %d -c %d %s" \ + --with-trusted-path=/var/setuid-wrappers:/run/current-system/sw/bin:/usr/local/bin:/bin:/usr/bin:/sbin:/usr/sbin + ''; +} diff --git a/modules/pkgs/monitoringPlugins/mysql_check_slave.patch b/modules/pkgs/monitoringPlugins/mysql_check_slave.patch new file mode 100644 index 0000000..09098f6 --- /dev/null +++ b/modules/pkgs/monitoringPlugins/mysql_check_slave.patch @@ -0,0 +1,591 @@ +commit f1623c0309b35b35a3ff8cab447e9ab06b3497f8 +Author: Igor Pashev <pashev.igor@gmail.com> +Date: Sun Jan 3 10:59:51 2016 +0300 + + Added check_mysql_slave + + Supports MariaDB multi-source replication + +diff --git a/configure.ac b/configure.ac +index 0a554af..f1a7aac 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -357,7 +357,7 @@ if test $with_mysql = "no" ; then + AC_MSG_WARN([Skipping mysql plugin]) + AC_MSG_WARN([install mysql client libs to compile this plugin (see REQUIREMENTS).]) + else +- EXTRAS="$EXTRAS check_mysql\$(EXEEXT) check_mysql_query\$(EXEEXT)" ++ EXTRAS="$EXTRAS check_mysql\$(EXEEXT) check_mysql_query\$(EXEEXT) check_mysql_slave\$(EXEEXT)" + MYSQLINCLUDE="$np_mysql_include" + MYSQLLIBS="$np_mysql_libs" + MYSQLCFLAGS="$np_mysql_cflags" +diff --git a/plugins/Makefile.am b/plugins/Makefile.am +index 0ddf9bd..fdcb154 100644 +--- a/plugins/Makefile.am ++++ b/plugins/Makefile.am +@@ -38,7 +38,7 @@ check_tcp_programs = check_ftp check_imap check_nntp check_pop \ + EXTRA_PROGRAMS = check_mysql check_radius check_pgsql check_snmp check_hpjd \ + check_swap check_fping check_ldap check_game check_dig \ + check_nagios check_by_ssh check_dns check_nt check_ide_smart \ +- check_procs check_mysql_query check_apt check_dbi ++ check_procs check_mysql_query check_apt check_dbi check_mysql_slave + + EXTRA_DIST = t tests + +@@ -85,6 +85,9 @@ check_mrtgtraf_LDADD = $(BASEOBJS) + check_mysql_CFLAGS = $(AM_CFLAGS) $(MYSQLCFLAGS) + check_mysql_CPPFLAGS = $(AM_CPPFLAGS) $(MYSQLINCLUDE) + check_mysql_LDADD = $(NETLIBS) $(MYSQLLIBS) ++check_mysql_slave_CFLAGS = $(AM_CFLAGS) $(MYSQLCFLAGS) ++check_mysql_slave_CPPFLAGS = $(AM_CPPFLAGS) $(MYSQLINCLUDE) ++check_mysql_slave_LDADD = $(NETLIBS) $(MYSQLLIBS) + check_mysql_query_CFLAGS = $(AM_CFLAGS) $(MYSQLCFLAGS) + check_mysql_query_CPPFLAGS = $(AM_CPPFLAGS) $(MYSQLINCLUDE) + check_mysql_query_LDADD = $(NETLIBS) $(MYSQLLIBS) +diff --git a/plugins/check_mysql_slave.c b/plugins/check_mysql_slave.c +new file mode 100644 +index 0000000..bc402e0 +--- /dev/null ++++ b/plugins/check_mysql_slave.c +@@ -0,0 +1,541 @@ ++/***************************************************************************** ++* ++* Monitoring check_mysql plugin ++* ++* License: GPL ++* Copyright (c) 1999 Didi Rieder (adrieder@sbox.tu-graz.ac.at) ++* Copyright (c) 2000 Karl DeBisschop (kdebisschop@users.sourceforge.net) ++* Copyright (c) 1999-2011 Monitoring Plugins Development Team ++* Copyright (c) 2016 Zalora South East Asia Pte. Ltd ++* ++* Description: ++* ++* This file contains the check_mysql_slave plugin ++* ++* This program tests MySQL/MariaDB slaves ++* ++* ++* This program is free software: you can redistribute it and/or modify ++* it under the terms of the GNU General Public License as published by ++* the Free Software Foundation, either version 3 of the License, or ++* (at your option) any later version. ++* ++* This program is distributed in the hope that it will be useful, ++* but WITHOUT ANY WARRANTY; without even the implied warranty of ++* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++* GNU General Public License for more details. ++* ++* You should have received a copy of the GNU General Public License ++* along with this program. If not, see <http://www.gnu.org/licenses/>. ++* ++* ++*****************************************************************************/ ++ ++const char *progname = "check_mysql_slave"; ++const char *copyright = "1999-2016"; ++const char *email = "devel@monitoring-plugins.org"; ++ ++#define SLAVERESULTSIZE 256 ++ ++#include "common.h" ++#include "utils.h" ++#include "utils_base.h" ++#include "netutils.h" ++ ++#include <mysql.h> ++#include <mysqld_error.h> ++#include <errmsg.h> ++ ++char *db_user = NULL; ++char *db_host = NULL; ++char *db_socket = NULL; ++char *db_pass = NULL; ++char *ca_cert = NULL; ++char *ca_dir = NULL; ++char *cert = NULL; ++char *key = NULL; ++char *ciphers = NULL; ++bool ssl = false; ++char *opt_file = NULL; ++char *opt_group = NULL; ++unsigned int db_port = MYSQL_PORT; ++int warn_sec = 0, crit_sec = 0; ++char *connection_name = NULL; ++char *query; ++ ++static double warning_time = 0; ++static double critical_time = 0; ++ ++thresholds *my_threshold = NULL; ++ ++int process_arguments (int, char **); ++int validate_arguments (void); ++void print_help (void); ++void print_usage (void); ++ ++int ++main (int argc, char **argv) ++{ ++ ++ MYSQL mysql; ++ MYSQL_RES *res; ++ MYSQL_ROW row; ++ char *perf; ++ perf = strdup (""); ++ ++ char *error = NULL; ++ char slaveresult[SLAVERESULTSIZE]; ++ ++ setlocale (LC_ALL, ""); ++ bindtextdomain (PACKAGE, LOCALEDIR); ++ textdomain (PACKAGE); ++ ++ /* Parse extra opts if any */ ++ argv = np_extra_opts (&argc, argv, progname); ++ ++ if (process_arguments (argc, argv) == ERROR) ++ usage4 (_("Could not parse arguments")); ++ ++ /* initialize mysql */ ++ mysql_init (&mysql); ++ ++ if (opt_file != NULL) ++ mysql_options (&mysql, MYSQL_READ_DEFAULT_FILE, opt_file); ++ ++ if (opt_group != NULL) ++ mysql_options (&mysql, MYSQL_READ_DEFAULT_GROUP, opt_group); ++ else ++ mysql_options (&mysql, MYSQL_READ_DEFAULT_GROUP, "client"); ++ ++ if (ssl) ++ mysql_ssl_set (&mysql, key, cert, ca_cert, ca_dir, ciphers); ++ ++ if (!mysql_real_connect ++ (&mysql, db_host, db_user, db_pass, "", db_port, db_socket, 0)) ++ { ++ if (mysql_errno (&mysql) == CR_UNKNOWN_HOST) ++ die (STATE_WARNING, "%s\n", mysql_error (&mysql)); ++ else if (mysql_errno (&mysql) == CR_VERSION_ERROR) ++ die (STATE_WARNING, "%s\n", mysql_error (&mysql)); ++ else if (mysql_errno (&mysql) == CR_OUT_OF_MEMORY) ++ die (STATE_WARNING, "%s\n", mysql_error (&mysql)); ++ else if (mysql_errno (&mysql) == CR_IPSOCK_ERROR) ++ die (STATE_WARNING, "%s\n", mysql_error (&mysql)); ++ else if (mysql_errno (&mysql) == CR_SOCKET_CREATE_ERROR) ++ die (STATE_WARNING, "%s\n", mysql_error (&mysql)); ++ else ++ die (STATE_CRITICAL, "%s\n", mysql_error (&mysql)); ++ } ++ ++ if (connection_name != NULL && strcmp (connection_name, "") != 0) ++ { ++ xasprintf (&query, "show slave '%s' status", connection_name); ++ } ++ else ++ { ++ xasprintf (&query, "show slave status"); ++ } ++ ++ if (mysql_query (&mysql, query) != 0) ++ { ++ error = strdup (mysql_error (&mysql)); ++ mysql_close (&mysql); ++ die (STATE_CRITICAL, _("slave query error: %s\n"), error); ++ } ++ ++ if ((res = mysql_store_result (&mysql)) == NULL) ++ { ++ error = strdup (mysql_error (&mysql)); ++ mysql_close (&mysql); ++ die (STATE_CRITICAL, _("slave store_result error: %s\n"), error); ++ } ++ ++ /* Check there is some data */ ++ if (mysql_num_rows (res) == 0) ++ { ++ mysql_close (&mysql); ++ die (STATE_WARNING, "%s\n", _("No slaves defined")); ++ } ++ ++ /* fetch the first row */ ++ if ((row = mysql_fetch_row (res)) == NULL) ++ { ++ error = strdup (mysql_error (&mysql)); ++ mysql_free_result (res); ++ mysql_close (&mysql); ++ die (STATE_CRITICAL, _("slave fetch row error: %s\n"), error); ++ } ++ ++ const char *last_io_error = NULL; ++ const char *last_sql_error = NULL; ++ const char *seconds_behind_master = NULL; ++ const char *slave_io = NULL; ++ const char *slave_sql = NULL; ++ MYSQL_FIELD *fields; ++ ++ int i, num_fields; ++ num_fields = mysql_num_fields (res); ++ fields = mysql_fetch_fields (res); ++ for (i = 0; i < num_fields; i++) ++ { ++ if (strcmp (fields[i].name, "Last_IO_Error") == 0 && row[i] ++ && row[i][0]) ++ { ++ last_io_error = row[i]; ++ continue; ++ } ++ if (strcmp (fields[i].name, "Last_SQL_Error") == 0 && row[i] ++ && row[i][0]) ++ { ++ last_sql_error = row[i]; ++ continue; ++ } ++ if (strcmp (fields[i].name, "Slave_IO_Running") == 0) ++ { ++ slave_io = row[i]; ++ continue; ++ } ++ if (strcmp (fields[i].name, "Slave_SQL_Running") == 0) ++ { ++ slave_sql = row[i]; ++ continue; ++ } ++ if (strcmp (fields[i].name, "Seconds_Behind_Master") == 0) ++ { ++ seconds_behind_master = row[i]; ++ continue; ++ } ++ } ++ ++ /* Check if slave status is available */ ++ if ((slave_io == NULL) || (slave_sql == NULL)) ++ { ++ mysql_free_result (res); ++ mysql_close (&mysql); ++ die (STATE_CRITICAL, "Slave status unavailable\n"); ++ } ++ ++ const char *last_error; ++ if (last_sql_error) ++ last_error = last_sql_error; ++ else if (last_io_error) ++ last_error = last_io_error; ++ else ++ last_error = NULL; ++ ++ if ((seconds_behind_master == NULL) ++ || (strcmp (seconds_behind_master, "NULL") == 0)) ++ seconds_behind_master = "N/A"; ++ ++ /* Save slave status in slaveresult */ ++ snprintf (slaveresult, SLAVERESULTSIZE, ++ "Slave IO: %s, Slave SQL: %s, %s: %s", ++ slave_io, slave_sql, ++ (last_error ? "Last Error" : "Seconds Behind Master"), ++ (last_error ? last_error : seconds_behind_master)); ++ ++ if (strcmp (slave_io, "Yes") != 0 || strcmp (slave_sql, "Yes") != 0) ++ { ++ mysql_free_result (res); ++ mysql_close (&mysql); ++ if (last_io_error || last_sql_error) ++ { ++ die (STATE_CRITICAL, "%s\n", slaveresult); ++ } ++ else ++ { ++ die (STATE_WARNING, "%s\n", slaveresult); ++ }; ++ } ++ ++ /* Check Seconds Behind against threshold */ ++ if (strcmp (seconds_behind_master, "N/A") != 0) ++ { ++ double value = atof (seconds_behind_master); ++ int status; ++ ++ status = get_status (value, my_threshold); ++ ++ xasprintf (&perf, "%s %s", perf, ++ fperfdata ("lag", value, "s", TRUE, ++ (double) warning_time, TRUE, ++ (double) critical_time, FALSE, 0, FALSE, 0)); ++ ++ if (status == STATE_WARNING) ++ { ++ printf ("LAG %s: %s|%s\n", _("WARNING"), slaveresult, perf); ++ exit (STATE_WARNING); ++ } ++ else if (status == STATE_CRITICAL) ++ { ++ printf ("LAG %s: %s|%s\n", _("CRITICAL"), slaveresult, perf); ++ exit (STATE_CRITICAL); ++ } ++ } ++ ++ mysql_free_result (res); ++ mysql_close (&mysql); ++ ++ printf ("%s|%s\n", slaveresult, perf); ++ return STATE_OK; ++} ++ ++ ++int ++process_arguments (int argc, char **argv) ++{ ++ int c; ++ char *warning = NULL; ++ char *critical = NULL; ++ ++ int option = 0; ++ static struct option longopts[] = { ++ {"ca-cert", optional_argument, 0, 'C'}, ++ {"ca-dir", required_argument, 0, 'D'}, ++ {"cert", required_argument, 0, 'a'}, ++ {"ciphers", required_argument, 0, 'L'}, ++ {"connection-name", required_argument, 0, 'N'}, ++ {"critical", required_argument, 0, 'c'}, ++ {"file", required_argument, 0, 'f'}, ++ {"group", required_argument, 0, 'g'}, ++ {"help", no_argument, 0, 'h'}, ++ {"hostname", required_argument, 0, 'H'}, ++ {"key", required_argument, 0, 'k'}, ++ {"password", required_argument, 0, 'p'}, ++ {"port", required_argument, 0, 'P'}, ++ {"socket", required_argument, 0, 's'}, ++ {"ssl", no_argument, 0, 'l'}, ++ {"username", required_argument, 0, 'u'}, ++ {"version", no_argument, 0, 'V'}, ++ {"warning", required_argument, 0, 'w'}, ++ {0, 0, 0, 0} ++ }; ++ ++ if (argc < 1) ++ return ERROR; ++ ++ while (1) ++ { ++ c = ++ getopt_long (argc, argv, "hlVnSP:p:u:H:s:c:w:a:k:C:D:L:f:g:N:", ++ longopts, &option); ++ ++ if (c == -1 || c == EOF) ++ break; ++ ++ switch (c) ++ { ++ case 'H': /* hostname */ ++ if (is_host (optarg)) ++ { ++ db_host = optarg; ++ } ++ else ++ { ++ usage2 (_("Invalid hostname/address"), optarg); ++ } ++ break; ++ case 's': /* socket */ ++ db_socket = optarg; ++ break; ++ case 'N': ++ connection_name = optarg; ++ break; ++ case 'l': ++ ssl = true; ++ break; ++ case 'C': ++ ca_cert = optarg; ++ break; ++ case 'a': ++ cert = optarg; ++ break; ++ case 'k': ++ key = optarg; ++ break; ++ case 'D': ++ ca_dir = optarg; ++ break; ++ case 'L': ++ ciphers = optarg; ++ break; ++ case 'u': /* username */ ++ db_user = optarg; ++ break; ++ case 'p': /* authentication information: password */ ++ db_pass = strdup (optarg); ++ ++ /* Delete the password from process list */ ++ while (*optarg != '\0') ++ { ++ *optarg = 'X'; ++ optarg++; ++ } ++ break; ++ case 'f': /* client options file */ ++ opt_file = optarg; ++ break; ++ case 'g': /* client options group */ ++ opt_group = optarg; ++ break; ++ case 'P': /* critical time threshold */ ++ db_port = atoi (optarg); ++ break; ++ case 'w': ++ warning = optarg; ++ warning_time = strtod (warning, NULL); ++ break; ++ case 'c': ++ critical = optarg; ++ critical_time = strtod (critical, NULL); ++ break; ++ case 'V': /* version */ ++ print_revision (progname, NP_VERSION); ++ exit (STATE_OK); ++ case 'h': /* help */ ++ print_help (); ++ exit (STATE_OK); ++ case '?': /* help */ ++ usage5 (); ++ } ++ } ++ ++ c = optind; ++ ++ set_thresholds (&my_threshold, warning, critical); ++ ++ while (argc > c) ++ { ++ ++ if (db_host == NULL) ++ if (is_host (argv[c])) ++ { ++ db_host = argv[c++]; ++ } ++ else ++ { ++ usage2 (_("Invalid hostname/address"), argv[c]); ++ } ++ else if (db_user == NULL) ++ db_user = argv[c++]; ++ else if (db_pass == NULL) ++ db_pass = argv[c++]; ++ else if (is_intnonneg (argv[c])) ++ db_port = atoi (argv[c++]); ++ else ++ break; ++ } ++ ++ return validate_arguments (); ++} ++ ++ ++int ++validate_arguments (void) ++{ ++ if (db_user == NULL) ++ db_user = strdup (""); ++ ++ if (db_host == NULL) ++ db_host = strdup (""); ++ ++ return OK; ++} ++ ++ ++void ++print_help (void) ++{ ++ char *myport; ++ xasprintf (&myport, "%d", MYSQL_PORT); ++ ++ print_revision (progname, NP_VERSION); ++ ++ printf (_(COPYRIGHT), copyright, email); ++ ++ printf ("%s\n", _("This program tests MySQL/MariaDB slaves")); ++ ++ printf ("\n\n"); ++ ++ print_usage (); ++ ++ printf (UT_HELP_VRSN); ++ printf (UT_EXTRA_OPTS); ++ ++ printf (UT_HOST_PORT, 'P', myport); ++ ++ printf (" %s\n", "-s, --socket=STRING"); ++ printf (" %s\n", ++ _("Use the specified socket (has no effect if -H is used)")); ++ ++ printf (" %s\n", "-f, --file=STRING"); ++ printf (" %s\n", _("Read from the specified client options file")); ++ printf (" %s\n", "-g, --group=STRING"); ++ printf (" %s\n", _("Use a client options group")); ++ printf (" %s\n", "-u, --username=STRING"); ++ printf (" %s\n", _("Connect using the indicated username")); ++ printf (" %s\n", "-p, --password=STRING"); ++ printf (" %s\n", ++ _("Use the indicated password to authenticate the connection")); ++ printf (" ==> %s <==\n", ++ _("IMPORTANT: THIS FORM OF AUTHENTICATION IS NOT SECURE!!!")); ++ printf (" %s\n", ++ _ ++ ("Your clear-text password could be visible as a process table entry")); ++ printf (" %s\n", "-N, --connection-name"); ++ printf (" %s\n", _("Connection name if using multi-source replication")); ++ ++ printf (" %s\n", "-w, --warning"); ++ printf (" %s\n", ++ _ ++ ("Exit with WARNING status if slave server is more than INTEGER seconds")); ++ printf (" %s\n", _("behind master")); ++ printf (" %s\n", "-c, --critical"); ++ printf (" %s\n", ++ _ ++ ("Exit with CRITICAL status if slave server is more then INTEGER seconds")); ++ printf (" %s\n", _("behind master")); ++ printf (" %s\n", "-l, --ssl"); ++ printf (" %s\n", _("Use ssl encryptation")); ++ printf (" %s\n", "-C, --ca-cert=STRING"); ++ printf (" %s\n", _("Path to CA signing the cert")); ++ printf (" %s\n", "-a, --cert=STRING"); ++ printf (" %s\n", _("Path to SSL certificate")); ++ printf (" %s\n", "-k, --key=STRING"); ++ printf (" %s\n", _("Path to private SSL key")); ++ printf (" %s\n", "-D, --ca-dir=STRING"); ++ printf (" %s\n", _("Path to CA directory")); ++ printf (" %s\n", "-L, --ciphers=STRING"); ++ printf (" %s\n", _("List of valid SSL ciphers")); ++ ++ ++ printf ("\n"); ++ printf (" %s\n", ++ _ ++ ("There are no required arguments. By default, the local database is checked")); ++ printf (" %s\n", ++ _ ++ ("using the default unix socket. You can force TCP on localhost by using an")); ++ printf (" %s\n", ++ _("IP address or FQDN ('localhost' will use the socket as well).")); ++ ++ printf ("\n"); ++ printf ("%s\n", _("Notes:")); ++ printf (" %s\n", ++ _ ++ ("You must specify -p with an empty string to force an empty password,")); ++ printf (" %s\n", _("overriding any my.cnf settings.")); ++ ++ printf (UT_SUPPORT); ++} ++ ++ ++void ++print_usage (void) ++{ ++ printf ("%s\n", _("Usage:")); ++ printf (" %s [-H host] [-P port] [-s socket]\n", progname); ++ printf (" [-u user] [-p password] [-S] [-l] [-a cert] [-k key]\n"); ++ printf ++ (" [-C ca-cert] [-D ca-dir] [-L ciphers] [-f optfile] [-g group]\n"); ++} diff --git a/modules/pkgs/mydumper.nix b/modules/pkgs/mydumper.nix new file mode 100644 index 0000000..463cb22 --- /dev/null +++ b/modules/pkgs/mydumper.nix @@ -0,0 +1,17 @@ +{ stdenv, fetchbzr +, cmake, glib, mysql, openssl +, pcre, pkgconfig, zlib +}: + +stdenv.mkDerivation rec { + version = "0.9.2"; + name = "mydumper-${version}"; + + src = fetchbzr { + url = "lp:mydumper"; + rev = 188; + sha256 = "0kbhgbh6mqkxwbs5yd20s1k3h3f3jqp2i041dhmlrnzl6irgqbg5"; + }; + + buildInputs = [ cmake glib mysql.lib openssl pcre pkgconfig zlib ]; +} diff --git a/modules/pkgs/mywatch/cabal2nix.nix b/modules/pkgs/mywatch/cabal2nix.nix new file mode 100644 index 0000000..3dfbbcc --- /dev/null +++ b/modules/pkgs/mywatch/cabal2nix.nix @@ -0,0 +1,25 @@ +{ mkDerivation, aeson, base, bytestring, ConfigFile +, data-default-class, docopt, fast-logger, fetchgit, http-types +, interpolatedstring-perl6, MissingH, mtl, mysql, mysql-simple +, network, resource-pool, scotty, stdenv, text, unix +, unordered-containers, wai, wai-extra, wai-middleware-static, warp +}: +mkDerivation { + pname = "mywatch"; + version = "0.2.0"; + src = fetchgit { + url = "https://github.com/zalora/mywatch.git"; + sha256 = "f1ae1b776cdbc11da24819381d5d1fe057be3c5ef69314024c9e0fc043085cd2"; + rev = "afd12c0190f64527a320a99cc6df97f6cfca57d7"; + }; + isLibrary = false; + isExecutable = true; + executableHaskellDepends = [ + aeson base bytestring ConfigFile data-default-class docopt + fast-logger http-types interpolatedstring-perl6 MissingH mtl mysql + mysql-simple network resource-pool scotty text unix + unordered-containers wai wai-extra wai-middleware-static warp + ]; + description = "Web application to view and kill MySQL queries"; + license = stdenv.lib.licenses.mit; +} diff --git a/modules/pkgs/mywatch/default.nix b/modules/pkgs/mywatch/default.nix new file mode 100644 index 0000000..4afc645 --- /dev/null +++ b/modules/pkgs/mywatch/default.nix @@ -0,0 +1,4 @@ +{ stdenv, haskellPackages }: + +haskellPackages.callPackage ./cabal2nix.nix {} + diff --git a/modules/pkgs/nagios-plugins-rabbitmq/default.nix b/modules/pkgs/nagios-plugins-rabbitmq/default.nix new file mode 100644 index 0000000..6ecb2e4 --- /dev/null +++ b/modules/pkgs/nagios-plugins-rabbitmq/default.nix @@ -0,0 +1,36 @@ +{ fetchurl +, makeWrapper +, perl +, perlPackages +, stdenv +}: + +stdenv.mkDerivation rec { + version = "2.0.3"; + name = "nagios-plugins-rabbitmq-${version}"; + + src = fetchurl { + url = "https://github.com/nagios-plugins-rabbitmq/nagios-plugins-rabbitmq/archive/${version}.tar.gz"; + sha256 = "1fw40hzvb8sk5ss0hvrgv338lr019d2q9cc9ayy4hvk1c5bh3ljb"; + }; + + buildInputs = [ + makeWrapper + perl + perlPackages.JSON + perlPackages.LWPUserAgent + perlPackages.ModuleBuild + perlPackages.MonitoringPlugin + perlPackages.URI + ]; + + buildPhase = "perl Build.PL --prefix=$out; ./Build build"; + installPhase = '' + ./Build install + + for n in "$out/bin/"*; do + wrapProgram "$n" --prefix PERL5LIB : "$PERL5LIB" + done + ''; +} + diff --git a/modules/pkgs/probes.nix b/modules/pkgs/probes.nix new file mode 100644 index 0000000..650c996 --- /dev/null +++ b/modules/pkgs/probes.nix @@ -0,0 +1,28 @@ +{ stdenv, pkgs, lib }: + +let + plugins = [ + "check_disk" + "check_file_age" + "check_http" + "check_load" + "check_log" + "check_mysql" + "check_mysql_query" + "check_procs" + "check_swap" + "check_users" + ]; + +in stdenv.mkDerivation { + name = "local-monitoring-plugins"; + phases = [ "installPhase" ]; + installPhase = '' + mkdir -p $out/bin + ${lib.concatMapStringsSep "\n" (p: '' + cp -a ${pkgs.monitoringPlugins}/libexec/${p} $out/bin/${p} + '') plugins} + cp -a '${pkgs.check_mdstat}/bin/'* $out/bin/ + cp -a '${pkgs.check_systemd}/bin/'* $out/bin/ + ''; +} diff --git a/modules/pkgs/rdsdump/default.nix b/modules/pkgs/rdsdump/default.nix new file mode 100644 index 0000000..0efe033 --- /dev/null +++ b/modules/pkgs/rdsdump/default.nix @@ -0,0 +1,14 @@ +{ stdenv, bash, ... }: + +stdenv.mkDerivation { + name = "rdsdump"; + buildInputs = [ bash ]; + phases = [ "installPhase" ]; + installPhase = '' + mkdir -p $out/bin + cp -a ${./rdsdump.bash} $out/bin/rdsdump + chmod +x $out/bin/rdsdump + patchShebangs $out/bin/rdsdump + ''; +} + diff --git a/modules/pkgs/rdsdump/rdsdump.bash b/modules/pkgs/rdsdump/rdsdump.bash new file mode 100644 index 0000000..6b8cbf1 --- /dev/null +++ b/modules/pkgs/rdsdump/rdsdump.bash @@ -0,0 +1,70 @@ +#!/usr/bin/env bash +set -euo pipefail + +mysql_args= +mysqldump_args= +master_data=0 +while [ $# -gt 0 ]; do + case $1 in + --host=*|--password=*|--user=*|\ + --defaults-file=*|--defaults-extra-file=*|\ + --ssl=*|--ssl-ca=*|--ssl-key=*|--ssl-cert=*|\ + -h?*|-u?*|-p?*) + mysql_args="$mysql_args $1" + mysqldump_args="$mysqldump_args $1" + shift 1;; + --host|--user|\ + --defaults-file|--defaults-extra-file|\ + --ssl-ca|--ssl-key|--ssl-cert|\ + -h|-u) + mysql_args="$mysql_args $1 $2" + mysqldump_args="$mysqldump_args $1 $2" + shift 2;; + --master-data=*) + master_data=$(echo "$1" | cut -d= -f2) + shift;; + --master-data) + master_data=$2 + shift 2;; + *) + mysqldump_args="$mysqldump_args $1" + shift;; + esac +done + +replica () { + mysql $mysql_args "$@" +} + +start_replication () { + replica -N -e "CALL mysql.rds_start_replication;" >&2 +} + +stop_replication () { + replica -N -e "CALL mysql.rds_stop_replication;" >&2 +} + +trap 'start_replication' EXIT +stop_replication + +if [ "$master_data" -gt 0 ]; then +if [ "$master_data" -eq 2 ]; then + printf '-- ' +fi +replica -e 'SHOW SLAVE STATUS\G' | awk -f <(cat - <<- 'AWK' + /\<Exec_Master_Log_Pos\>/ { log_pos = $2 }; + /\<Relay_Master_Log_File\>/ { log_file = $2 }; + END { + printf "CHANGE MASTER TO MASTER_LOG_FILE='%s', MASTER_LOG_POS=%d;\n", log_file, log_pos + } +AWK +) +fi + +mysqldump $mysqldump_args & +sleep 30 + +start_replication +trap - EXIT + +wait diff --git a/modules/pkgs/sproxy-web/cabal2nix.nix b/modules/pkgs/sproxy-web/cabal2nix.nix new file mode 100644 index 0000000..0f475f2 --- /dev/null +++ b/modules/pkgs/sproxy-web/cabal2nix.nix @@ -0,0 +1,26 @@ +{ mkDerivation, aeson, base, blaze-html, blaze-markup, bytestring +, data-default-class, directory, docopt, fast-logger, fetchgit +, filepath, http-types, interpolatedstring-perl6, mtl, network +, postgresql-simple, resource-pool, scotty, stdenv, text, unix, wai +, wai-extra, wai-middleware-static, warp +}: +mkDerivation { + pname = "sproxy-web"; + version = "0.4.1"; + src = fetchgit { + url = "https://github.com/zalora/sproxy-web.git"; + sha256 = "529597548a3fbd0999acdbd3a382ed4f152043db01f275b08d4c4883b8fe5f5f"; + rev = "5d7ee61deb55359ae8ce6013dd7fe81bcdc0f9a9"; + }; + isLibrary = false; + isExecutable = true; + executableHaskellDepends = [ + aeson base blaze-html blaze-markup bytestring data-default-class + directory docopt fast-logger filepath http-types + interpolatedstring-perl6 mtl network postgresql-simple + resource-pool scotty text unix wai wai-extra wai-middleware-static + warp + ]; + description = "Web interface to sproxy database"; + license = stdenv.lib.licenses.mit; +} diff --git a/modules/pkgs/sproxy-web/default.nix b/modules/pkgs/sproxy-web/default.nix new file mode 100644 index 0000000..4afc645 --- /dev/null +++ b/modules/pkgs/sproxy-web/default.nix @@ -0,0 +1,4 @@ +{ stdenv, haskellPackages }: + +haskellPackages.callPackage ./cabal2nix.nix {} + diff --git a/modules/pkgs/sproxy/cabal2nix.nix b/modules/pkgs/sproxy/cabal2nix.nix new file mode 100644 index 0000000..1a7b3f6 --- /dev/null +++ b/modules/pkgs/sproxy/cabal2nix.nix @@ -0,0 +1,25 @@ +{ mkDerivation, aeson, attoparsec, base, base64-bytestring +, bytestring, containers, data-default, docopt, entropy, fetchgit +, http-conduit, http-kit, http-types, interpolatedstring-perl6 +, network, postgresql-simple, resource-pool, SHA, split, stdenv +, text, time, tls, unix, utf8-string, x509, yaml +}: +mkDerivation { + pname = "sproxy"; + version = "0.9.8"; + src = fetchgit { + url = "https://github.com/zalora/sproxy.git"; + sha256 = "40d86e00cfbdc96033ca53e773a7467cd3e2206856d27e4a24076d9449c46ca7"; + rev = "507a0984d4ce01ef0d83e7cda37cba5c80a33b75"; + }; + isLibrary = false; + isExecutable = true; + executableHaskellDepends = [ + aeson attoparsec base base64-bytestring bytestring containers + data-default docopt entropy http-conduit http-kit http-types + interpolatedstring-perl6 network postgresql-simple resource-pool + SHA split text time tls unix utf8-string x509 yaml + ]; + description = "HTTP proxy for authenticating users via OAuth2"; + license = stdenv.lib.licenses.mit; +} diff --git a/modules/pkgs/sproxy/default.nix b/modules/pkgs/sproxy/default.nix new file mode 100644 index 0000000..4afc645 --- /dev/null +++ b/modules/pkgs/sproxy/default.nix @@ -0,0 +1,4 @@ +{ stdenv, haskellPackages }: + +haskellPackages.callPackage ./cabal2nix.nix {} + diff --git a/modules/pkgs/writeBashScript.nix b/modules/pkgs/writeBashScript.nix new file mode 100644 index 0000000..15e81c5 --- /dev/null +++ b/modules/pkgs/writeBashScript.nix @@ -0,0 +1,13 @@ +{ bash, writeScript, haskellPackages, runCommand }: + +name: text: +let + f = writeScript name '' + #!${bash}/bin/bash + ${text} + ''; +in +runCommand name { } '' + ${haskellPackages.ShellCheck}/bin/shellcheck ${f} + cp -a ${f} $out +'' diff --git a/modules/pkgs/writeBashScriptBin.nix b/modules/pkgs/writeBashScriptBin.nix new file mode 100644 index 0000000..33b9bf6 --- /dev/null +++ b/modules/pkgs/writeBashScriptBin.nix @@ -0,0 +1,7 @@ +{ writeBashScript, runCommand }: + +name: text: +runCommand name { } '' + mkdir -p $out/bin + cp -a ${writeBashScript name text} $out/bin/${name} +'' diff --git a/modules/pkgs/writePHPFile.nix b/modules/pkgs/writePHPFile.nix new file mode 100644 index 0000000..e031efe --- /dev/null +++ b/modules/pkgs/writePHPFile.nix @@ -0,0 +1,10 @@ +{ php, writeText, runCommand }: + +name: text: +let + f = writeText name text; +in +runCommand name { } '' + ${php}/bin/php -l '${f}' + cp -a '${f}' $out +'' diff --git a/modules/pkgs/writeXML.nix b/modules/pkgs/writeXML.nix new file mode 100644 index 0000000..1cfc075 --- /dev/null +++ b/modules/pkgs/writeXML.nix @@ -0,0 +1,11 @@ +{ writeText, runCommand, libxml2 }: + +name: text: + let + f = writeText "${name}.raw" text; + in + runCommand name { } '' + ${libxml2}/bin/xmllint \ + --format --noblanks --nocdata ${f} \ + > $out + '' diff --git a/modules/system/default.nix b/modules/system/default.nix new file mode 100644 index 0000000..240d970 --- /dev/null +++ b/modules/system/default.nix @@ -0,0 +1,11 @@ +{lib, ... }: + +let + all = lib.filterAttrs + ( n: _: n != "default.nix" && ! lib.hasPrefix "." n ) + (builtins.readDir ./.); + +in { + imports = map (p: ./. + "/${p}") ( builtins.attrNames all ); +} + diff --git a/modules/system/firewall.nix b/modules/system/firewall.nix new file mode 100644 index 0000000..289f635 --- /dev/null +++ b/modules/system/firewall.nix @@ -0,0 +1,52 @@ +{ config, lib, ... }: + +let + inherit (builtins) length toString replaceStrings; + inherit (lib) flatten concatMapStringsSep optionalString splitString mkOption; + inherit (lib.types) listOf int either submodule enum str; + + inherit (config.nixsap.system.firewall) whitelist; + + iptablesAllow = { dport, protocol, source, comment, ... }: + let + ports = concatMapStringsSep "," toString (flatten [dport]); + iptables = if 1 < length (splitString ":" source) + then "ip6tables" else "iptables"; + in "${iptables} -w -A nixos-fw -m multiport " + + "-p ${protocol} --dport ${ports} -s ${source} -j nixos-fw-accept" + + optionalString (comment != "") + " -m comment --comment '${replaceStrings ["'"] ["'\\''"] comment} '"; + +in { + options.nixsap.system.firewall.whitelist = mkOption { + description = "Inbound connection rules (whitelist)"; + default = []; + type = listOf (submodule { + options = { + dport = mkOption { + description = "Destination port or list of ports"; + type = either int (listOf int); + }; + source = mkOption { + description = "Source specification: a network IP address (with optional /mask)"; + type = str; + }; + protocol = mkOption { + description = "The network protocol"; + type = enum [ "tcp" "udp" ]; + default = "tcp"; + }; + comment = mkOption { + description = "Free-form comment"; + type = str; + default = ""; + }; + }; + }); + }; + + config = { + networking.firewall.extraCommands = + concatMapStringsSep "\n" iptablesAllow whitelist; + }; +} diff --git a/modules/system/raid0.nix b/modules/system/raid0.nix new file mode 100644 index 0000000..d260e29 --- /dev/null +++ b/modules/system/raid0.nix @@ -0,0 +1,134 @@ +{ config, pkgs, lib, ... }: + +with lib; +with lib.types; +with builtins; + +let + groups = filterAttrs (n: _: n != "_module") config.nixsap.system.lvm.raid0; + + createLV = vg: lv: s: opts: + let + new = toString s; + stripes = toString opts.stripes; + sizeSpec = if opts.units == "%" + then "--extents ${new}%VG" + else "--size ${new}${opts.units}"; + scale = { + "%" = "* 100 / $(vgs --unit b --noheadings --nosuffix --options vg_size ${vg})"; + "M" = "/ ${toString (1000 * 1000)}"; + "m" = "/ ${toString (1024 * 1024)}"; + "G" = "/ ${toString (1000 * 1000 * 1000)}"; + "g" = "/ ${toString (1024 * 1024 * 1024)}"; + "T" = "/ ${toString (1000 * 1000 * 1000 * 1000)}"; + "t" = "/ ${toString (1024 * 1024 * 1024 * 1024)}"; + }; + in pkgs.writeBashScript "raid0-create-${vg}-${lv}" '' + set -eu + device=/dev/${vg}/${lv} + + lv_size=$(lvs --unit b --noheadings --nosuffix --options lv_size "$device" || echo 0) + old=$(( lv_size ${scale."${opts.units}"} )) + + if (( ${new} == old )) ; then + exit 0 + elif (( old == 0 )); then + lvcreate ${vg} --name ${lv} ${sizeSpec} --stripes ${stripes} + elif (( ${new} < old )) ; then + echo "Cannot shrink volume $device from $old ${opts.units} to ${new} ${opts.units}" >&2 + exit 1 + else + lvextend "$device" ${sizeSpec} + resize2fs "$device" + fi + ''; + + createVG = vg: pv: pkgs.writeBashScript "raid0-create-vg-${vg}" '' + set -eu + for pv in ${toString pv}; do + type=$(blkid -p -s TYPE -o value "$pv" || true) + if [ "$type" != LVM2_member ]; then + pvcreate "$pv" + if ! vgs ${vg}; then + vgcreate ${vg} "$pv" + else + vgextend ${vg} "$pv" + fi + fi + done + ''; + + mkRaidService = vg: opts: + let + ExecStart = pkgs.writeBashScript "raid0-${vg}" '' + set -eu + ${createVG vg opts.physical} + ${concatStringsSep "\n" ( + mapAttrsToList (v: s: + "${createLV vg (baseNameOf v) s opts}") + opts.fileSystems + )} + vgchange -ay ${vg} + udevadm trigger --action=add + ''; + + in nameValuePair "raid0-${vg}" rec { + wantedBy = map (v: "dev-${vg}-${baseNameOf v}.device") (attrNames opts.fileSystems); + requires = map (pv: replaceStrings ["/"] ["-"] (removePrefix "/" pv) + ".device") opts.physical; + after = requires; + before = wantedBy; + unitConfig.DefaultDependencies = false; + path = with pkgs; [ utillinux lvm2 e2fsprogs ]; + serviceConfig = { + inherit ExecStart; + RemainAfterExit = true; + Type = "oneshot"; + }; + }; + +in { + options.nixsap.system = { + lvm.raid0 = mkOption { + description = "Set of LVM2 volume groups"; + default = {}; + type = attrsOf (submodule { + options = { + stripes = mkOption { + description = "Number of stripes"; + type = int; + example = 2; + }; + physical = mkOption { + description = "List of physical devices (must be even for stripes)"; + example = [ "/dev/sdb" "/dev/sdc" ]; + type = listOf path; + }; + fileSystems = mkOption { + description = "Filesystems and their sizes"; + type = attrsOf int; + example = { "/mariadb/db" = 100; }; + }; + units = mkOption { + description = "Units of size"; + type = enum [ "%" "m" "g" "t" "M" "G" "T"]; + }; + }; + }); + }; + }; + + config = { + systemd.services = mapAttrs' mkRaidService groups; + + fileSystems = foldl (a: b: a//b) {} ( + mapAttrsToList (vg: opts: genAttrs (attrNames opts.fileSystems) + (fs: { + fsType = "ext4"; + autoFormat = true; + device = "/dev/${vg}/${baseNameOf fs}"; + }) + ) groups + ); + }; +} + diff --git a/modules/system/sysops.nix b/modules/system/sysops.nix new file mode 100644 index 0000000..ccf6d0b --- /dev/null +++ b/modules/system/sysops.nix @@ -0,0 +1,35 @@ +{ config, lib, ...}: +let + + inherit (lib) concatMapStringsSep concatStringsSep mkOption types; + inherit (types) str listOf; + + bindir = "/run/current-system/sw/bin"; + + commands = concatStringsSep ", " ( + [ + "${bindir}/du *" + "${bindir}/iftop" + "${bindir}/iotop" + "${bindir}/ip6tables -L*" + "${bindir}/ipsec *" + "${bindir}/iptables -L*" + "${bindir}/journalctl *" + "${bindir}/lsof *" + "${bindir}/mtr *" + "${bindir}/nix-collect-garbage *" + "${bindir}/nmap *" + "${bindir}/tcpdump *" + "${bindir}/traceroute *" + ] ++ map (c: "${bindir}/systemctl ${c} *") + [ "kill" "reload" "restart" "start" "status" "stop" ] + ); + +in { + + config = { + security.sudo.extraConfig = '' + %wheel ALL=(ALL) NOPASSWD: ${commands} + ''; + }; +} diff --git a/modules/system/users.nix b/modules/system/users.nix new file mode 100644 index 0000000..022a7e7 --- /dev/null +++ b/modules/system/users.nix @@ -0,0 +1,83 @@ +{ config, pkgs, lib, ... }: + +let + + inherit (builtins) + genList hashString mul substring ; + + inherit (lib) + foldl genAttrs imap mkOption stringToCharacters toLower + types unique ; + + inherit (types) + listOf str ; + + uid = name: + let + dec = { + "0" = 0; "1" = 1; "2" = 2; "3" = 3; + "4" = 4; "5" = 5; "6" = 6; "7" = 7; + "8" = 8; "9" = 9; "a" = 10; "b" = 11; + "c" = 12; "d" = 13; "e" = 14; "f" = 15; + }; + base = 1000000000; # 2^32 > base + 16^7 + hex = toLower (substring 0 7 (hashString "sha1" name)); + pow = b: n: foldl mul 1 (genList (_: b) n); + digits = imap (i: d: {m = pow 16 (i - 1); d = d;}) (stringToCharacters hex); + f = a: {m, d}: a + m * dec.${d}; + + in foldl f base digits; + + daemons = config.nixsap.system.users.daemons; + normal = config.nixsap.system.users.normal; + groups = config.nixsap.system.groups; + + mkGroup = name: { gid = uid name; }; + mkDaemonUser = name: + { + isNormalUser = false; + uid = uid name; + group = name; + }; + + mkNormalUser = name: + { + isNormalUser = true; + uid = uid name; + }; + +in { + options.nixsap.system = { + users.daemons = mkOption { + type = listOf str; + description = "List of system users with automatic UID and group"; + default = []; + }; + users.normal = mkOption { + type = listOf str; + description = "List of regular users with automatic UID"; + default = []; + }; + users.sysops = mkOption { + description = '' + List of local users with special roles in applications or system-wide. + The users in this list are not create automatically. + ''; + type = listOf str; + default = []; + }; + groups = mkOption { + type = listOf str; + description = "List of groups with automatic GID"; + default = []; + }; + }; + + # XXX: Modules for automatic unicity of user names: + imports = [ + { users.groups = genAttrs (unique (daemons ++ groups)) mkGroup; } + { users.users = genAttrs daemons mkDaemonUser; } + { users.users = genAttrs normal mkNormalUser; } + ]; +} + diff --git a/modules/system/worldWritableDirs.nix b/modules/system/worldWritableDirs.nix new file mode 100644 index 0000000..9899696 --- /dev/null +++ b/modules/system/worldWritableDirs.nix @@ -0,0 +1,25 @@ +{ config, pkgs, lib, ... }: +let + dirs = config.nixsap.system.worldWritableDirs; + +in { + options.nixsap.system.worldWritableDirs = lib.mkOption { + type = lib.types.listOf lib.types.path; + description = "These dirs will be chmod'ed 1777"; + default = [ "/tmp" "/var/tmp" ]; + }; + + config = lib.mkIf (dirs != []) { + systemd.services.chmod1777 = { + description = "Make some dirs world-writable"; + unitConfig.RequiresMountsFor = dirs; + before = [ "local-fs.target" ]; + wantedBy = [ "local-fs.target" ]; + serviceConfig = { + ExecStart = "${pkgs.coreutils}/bin/chmod -c 1777 ${lib.concatStringsSep " " dirs}"; + Type = "oneshot"; + RemainAfterExit = true; + }; + }; + }; +} |