Format the entire project.
This commit is contained in:
parent
1dc50ae17d
commit
6f9db5e3a4
115 changed files with 3451 additions and 2901 deletions
|
@ -1,14 +1,19 @@
|
|||
{ options, config, lib, fn, pkgs, ... }:
|
||||
|
||||
{
|
||||
options,
|
||||
config,
|
||||
lib,
|
||||
fn,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
with builtins;
|
||||
with lib;
|
||||
|
||||
let
|
||||
with lib; let
|
||||
cfg = config.machine;
|
||||
in mkIf (elem "acme" cfg.services) {
|
||||
security.acme = {
|
||||
# see https://letsencrypt.org/repository/
|
||||
acceptTerms = true;
|
||||
defaults.email = "${(elemAt cfg.mailAccounts 0).name}@${cfg.domain}";
|
||||
};
|
||||
}
|
||||
in
|
||||
mkIf (elem "acme" cfg.services) {
|
||||
security.acme = {
|
||||
# see https://letsencrypt.org/repository/
|
||||
acceptTerms = true;
|
||||
defaults.email = "${(elemAt cfg.mailAccounts 0).name}@${cfg.domain}";
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,28 +1,36 @@
|
|||
# This Configuration is meant for local DNS setups only!
|
||||
{ options, config, lib, fn, pkgs, ... }:
|
||||
|
||||
{
|
||||
options,
|
||||
config,
|
||||
lib,
|
||||
fn,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
with builtins;
|
||||
with lib;
|
||||
|
||||
let
|
||||
with lib; let
|
||||
cfg = config.machine;
|
||||
in mkIf (elem "bind" cfg.services) {
|
||||
services.bind = {
|
||||
enable = true;
|
||||
listenOn = [ "127.0.0.1" ];
|
||||
forwarders = [
|
||||
# Cloudflare CDN
|
||||
"1.1.1.1" "1.0.0.1"
|
||||
#CCC DNS
|
||||
"204.152.184.76" "159.203.38.175" "207.148.83.241"
|
||||
];
|
||||
# TODO: add DNSSEC
|
||||
extraOptions = ''
|
||||
dnssec-validation auto;
|
||||
in
|
||||
mkIf (elem "bind" cfg.services) {
|
||||
services.bind = {
|
||||
enable = true;
|
||||
listenOn = ["127.0.0.1"];
|
||||
forwarders = [
|
||||
# Cloudflare CDN
|
||||
"1.1.1.1"
|
||||
"1.0.0.1"
|
||||
#CCC DNS
|
||||
"204.152.184.76"
|
||||
"159.203.38.175"
|
||||
"207.148.83.241"
|
||||
];
|
||||
# TODO: add DNSSEC
|
||||
extraOptions = ''
|
||||
dnssec-validation auto;
|
||||
|
||||
recursion yes;
|
||||
allow-recursion { 127.0.0.1; };
|
||||
version none;
|
||||
'';
|
||||
};
|
||||
}
|
||||
recursion yes;
|
||||
allow-recursion { 127.0.0.1; };
|
||||
version none;
|
||||
'';
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,14 +1,15 @@
|
|||
{ config, lib, ... }:
|
||||
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
with lib;
|
||||
|
||||
mkIf (elem "containers" config.machine.services) {
|
||||
containers.CDServer = {
|
||||
privateNetwork = true;
|
||||
hostAddress = "192.168.100.10";
|
||||
localAddress = "192.168.100.11";
|
||||
config =
|
||||
{
|
||||
mkIf (elem "containers" config.machine.services) {
|
||||
containers.CDServer = {
|
||||
privateNetwork = true;
|
||||
hostAddress = "192.168.100.10";
|
||||
localAddress = "192.168.100.11";
|
||||
config = {
|
||||
imports = [
|
||||
../machines/CDServer/options.nix
|
||||
./default.nix
|
||||
|
@ -16,8 +17,8 @@ mkIf (elem "containers" config.machine.services) {
|
|||
../pkgs/nixpkgs.nix
|
||||
../pkgs/pkgsets.nix
|
||||
];
|
||||
services.nixosManual.showManual = false;
|
||||
services.ntp.enable = false;
|
||||
services.nixosManual.showManual = false;
|
||||
services.ntp.enable = false;
|
||||
};
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,11 +1,14 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
with lib;
|
||||
|
||||
mkIf (elem "cups" config.machine.services) {
|
||||
services.printing = {
|
||||
enable = true;
|
||||
startWhenNeeded = true;
|
||||
drivers = with pkgs; [ gutenprint hplip splix samsung-unified-linux-driver ];
|
||||
};
|
||||
}
|
||||
mkIf (elem "cups" config.machine.services) {
|
||||
services.printing = {
|
||||
enable = true;
|
||||
startWhenNeeded = true;
|
||||
drivers = with pkgs; [gutenprint hplip splix samsung-unified-linux-driver];
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,28 +1,36 @@
|
|||
{ config, lib, fn, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
fn,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
with lib; let
|
||||
cfg = config.machine;
|
||||
desktopFiles = fn.lst { p = (toString ./desktop); b = true; };
|
||||
in {
|
||||
imports = desktopFiles;
|
||||
} // mkIf (elem "desktop" cfg.services) {
|
||||
services.gvfs.enable = true;
|
||||
services.xserver = {
|
||||
layout = "de";
|
||||
libinput = {
|
||||
enable = true;
|
||||
touchpad = {
|
||||
tapping = true;
|
||||
disableWhileTyping = false;
|
||||
naturalScrolling = false;
|
||||
horizontalScrolling = true;
|
||||
desktopFiles = fn.lst {
|
||||
p = toString ./desktop;
|
||||
b = true;
|
||||
};
|
||||
in
|
||||
{
|
||||
imports = desktopFiles;
|
||||
}
|
||||
// mkIf (elem "desktop" cfg.services) {
|
||||
services.gvfs.enable = true;
|
||||
services.xserver = {
|
||||
layout = "de";
|
||||
libinput = {
|
||||
enable = true;
|
||||
touchpad = {
|
||||
tapping = true;
|
||||
disableWhileTyping = false;
|
||||
naturalScrolling = false;
|
||||
horizontalScrolling = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
services.udev.extraRules = ''
|
||||
ACTION=="add", SUBSYSTEM=="backlight", RUN+="${pkgs.coreutils}/bin/chgrp video /sys/class/backlight/%k/brightness"
|
||||
ACTION=="add", SUBSYSTEM=="backlight", RUN+="${pkgs.coreutils}/bin/chmod 664 /sys/class/backlight/%k/brightness"
|
||||
'';
|
||||
}
|
||||
services.udev.extraRules = ''
|
||||
ACTION=="add", SUBSYSTEM=="backlight", RUN+="${pkgs.coreutils}/bin/chgrp video /sys/class/backlight/%k/brightness"
|
||||
ACTION=="add", SUBSYSTEM=="backlight", RUN+="${pkgs.coreutils}/bin/chmod 664 /sys/class/backlight/%k/brightness"
|
||||
'';
|
||||
}
|
||||
|
|
|
@ -1,21 +1,24 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
with lib;
|
||||
|
||||
mkIf (elem "desktop::i3" config.machine.services) {
|
||||
services.xserver = {
|
||||
enable = true;
|
||||
windowManager.i3 = {
|
||||
mkIf (elem "desktop::i3" config.machine.services) {
|
||||
services.xserver = {
|
||||
enable = true;
|
||||
configFile = (import ../../config/etc/i3/config.nix { inherit pkgs; });
|
||||
extraPackages = with pkgs; [
|
||||
dmenu
|
||||
file
|
||||
i3lock
|
||||
i3status
|
||||
xdg-user-dirs
|
||||
];
|
||||
windowManager.i3 = {
|
||||
enable = true;
|
||||
configFile = import ../../config/etc/i3/config.nix {inherit pkgs;};
|
||||
extraPackages = with pkgs; [
|
||||
dmenu
|
||||
file
|
||||
i3lock
|
||||
i3status
|
||||
xdg-user-dirs
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
machine.pkgsets.python3.pkgs = with pkgs.python310Packages; [ py3status pytz tzlocal ];
|
||||
}
|
||||
machine.pkgsets.python3.pkgs = with pkgs.python310Packages; [py3status pytz tzlocal];
|
||||
}
|
||||
|
|
|
@ -1,33 +1,39 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
with lib;
|
||||
|
||||
mkIf (elem "desktop::sway" config.machine.services) {
|
||||
programs.sway = {
|
||||
enable = true;
|
||||
extraPackages = with pkgs; [
|
||||
xwayland
|
||||
file i3status dmenu
|
||||
qt5.qtwayland
|
||||
grim slurp
|
||||
swaylock
|
||||
swayidle
|
||||
light
|
||||
mako
|
||||
wl-clipboard
|
||||
wf-recorder
|
||||
];
|
||||
extraSessionCommands = ''
|
||||
export GDK_BACKEND=wayland
|
||||
export CLUTTER_BACKEND=wayland
|
||||
export SDL_VIDEODRIVER=wayland
|
||||
# needs qt5.qtwayland in systemPackages
|
||||
export QT_QPA_PLATFORM=wayland-egl
|
||||
export QT_WAYLAND_FORCE_DPI=physical
|
||||
export QT_WAYLAND_DISABLE_WINDOWDECORATION="1"
|
||||
# Fix for some Java AWT applications (e.g. Android Studio),
|
||||
# use this if they aren't displayed properly:
|
||||
export _JAVA_AWT_WM_NONREPARENTING=1
|
||||
'';
|
||||
};
|
||||
}
|
||||
mkIf (elem "desktop::sway" config.machine.services) {
|
||||
programs.sway = {
|
||||
enable = true;
|
||||
extraPackages = with pkgs; [
|
||||
xwayland
|
||||
file
|
||||
i3status
|
||||
dmenu
|
||||
qt5.qtwayland
|
||||
grim
|
||||
slurp
|
||||
swaylock
|
||||
swayidle
|
||||
light
|
||||
mako
|
||||
wl-clipboard
|
||||
wf-recorder
|
||||
];
|
||||
extraSessionCommands = ''
|
||||
export GDK_BACKEND=wayland
|
||||
export CLUTTER_BACKEND=wayland
|
||||
export SDL_VIDEODRIVER=wayland
|
||||
# needs qt5.qtwayland in systemPackages
|
||||
export QT_QPA_PLATFORM=wayland-egl
|
||||
export QT_WAYLAND_FORCE_DPI=physical
|
||||
export QT_WAYLAND_DISABLE_WINDOWDECORATION="1"
|
||||
# Fix for some Java AWT applications (e.g. Android Studio),
|
||||
# use this if they aren't displayed properly:
|
||||
export _JAVA_AWT_WM_NONREPARENTING=1
|
||||
'';
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,13 +1,16 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
# Note: add privileged users to docker group for access
|
||||
with lib;
|
||||
|
||||
mkIf ((elem "docker" config.machine.services) && !(elem "podman" config.machine.services)) {
|
||||
virtualisation.docker= {
|
||||
enable = true;
|
||||
# Disable live restore as it tends to delay/block system shutdown
|
||||
liveRestore = false;
|
||||
};
|
||||
environment.systemPackages = with pkgs; [ docker-compose docker-machine cntr ];
|
||||
}
|
||||
mkIf ((elem "docker" config.machine.services) && !(elem "podman" config.machine.services)) {
|
||||
virtualisation.docker = {
|
||||
enable = true;
|
||||
# Disable live restore as it tends to delay/block system shutdown
|
||||
liveRestore = false;
|
||||
};
|
||||
environment.systemPackages = with pkgs; [docker-compose docker-machine cntr];
|
||||
}
|
||||
|
|
|
@ -1,102 +1,104 @@
|
|||
{ config, lib, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
with lib; let
|
||||
cfg = config.machine;
|
||||
active = name: (elem name cfg.services);
|
||||
in mkIf (elem "fail2ban" cfg.services) {
|
||||
services.fail2ban = {
|
||||
enable = true;
|
||||
jails = {
|
||||
DEFAULT = ''
|
||||
bantime = 3600
|
||||
blocktype = DROP
|
||||
logpath = /var/log/auth.log
|
||||
'';
|
||||
in
|
||||
mkIf (elem "fail2ban" cfg.services) {
|
||||
services.fail2ban = {
|
||||
enable = true;
|
||||
jails = {
|
||||
DEFAULT = ''
|
||||
bantime = 3600
|
||||
blocktype = DROP
|
||||
logpath = /var/log/auth.log
|
||||
'';
|
||||
|
||||
ssh = ''
|
||||
enabled = ${boolToString (active "openssh")}
|
||||
filter = sshd
|
||||
maxretry = 4
|
||||
action = iptables[name=SSH, port=ssh, protocol=tcp]
|
||||
'';
|
||||
sshd-ddos = ''
|
||||
enabled = ${boolToString (active "openssh")}
|
||||
filter = sshd-ddos
|
||||
maxretry = 4
|
||||
action = iptables[name=ssh, port=ssh, protocol=tcp]
|
||||
'';
|
||||
ssh = ''
|
||||
enabled = ${boolToString (active "openssh")}
|
||||
filter = sshd
|
||||
maxretry = 4
|
||||
action = iptables[name=SSH, port=ssh, protocol=tcp]
|
||||
'';
|
||||
sshd-ddos = ''
|
||||
enabled = ${boolToString (active "openssh")}
|
||||
filter = sshd-ddos
|
||||
maxretry = 4
|
||||
action = iptables[name=ssh, port=ssh, protocol=tcp]
|
||||
'';
|
||||
|
||||
postfix = ''
|
||||
enabled = ${boolToString (active "mailserver")}
|
||||
filter = postfix
|
||||
maxretry = 3
|
||||
action = iptables[name=postfix, port=smtp, protocol=tcp]
|
||||
'';
|
||||
postfix-sasl = ''
|
||||
enabled = ${boolToString (active "mailserver")}
|
||||
filter = postfix-sasl
|
||||
port = postfix,imap3,imaps,pop3,pop3s
|
||||
maxretry = 3
|
||||
action = iptables[name=postfix, port=smtp, protocol=tcp]
|
||||
'';
|
||||
postfix-ddos = ''
|
||||
enabled = ${boolToString (active "mailserver")}
|
||||
filter = postfix-ddos
|
||||
maxretry = 3
|
||||
action = iptables[name=postfix, port=submission, protocol=tcp]
|
||||
bantime = 7200
|
||||
'';
|
||||
postfix = ''
|
||||
enabled = ${boolToString (active "mailserver")}
|
||||
filter = postfix
|
||||
maxretry = 3
|
||||
action = iptables[name=postfix, port=smtp, protocol=tcp]
|
||||
'';
|
||||
postfix-sasl = ''
|
||||
enabled = ${boolToString (active "mailserver")}
|
||||
filter = postfix-sasl
|
||||
port = postfix,imap3,imaps,pop3,pop3s
|
||||
maxretry = 3
|
||||
action = iptables[name=postfix, port=smtp, protocol=tcp]
|
||||
'';
|
||||
postfix-ddos = ''
|
||||
enabled = ${boolToString (active "mailserver")}
|
||||
filter = postfix-ddos
|
||||
maxretry = 3
|
||||
action = iptables[name=postfix, port=submission, protocol=tcp]
|
||||
bantime = 7200
|
||||
'';
|
||||
|
||||
nginx-req-limit = ''
|
||||
enabled = ${boolToString (active "nginx")}
|
||||
filter = nginx-req-limit
|
||||
maxretry = 10
|
||||
action = iptables-multiport[name=ReqLimit, port="http,https", protocol=tcp]
|
||||
findtime = 600
|
||||
bantime = 7200
|
||||
nginx-req-limit = ''
|
||||
enabled = ${boolToString (active "nginx")}
|
||||
filter = nginx-req-limit
|
||||
maxretry = 10
|
||||
action = iptables-multiport[name=ReqLimit, port="http,https", protocol=tcp]
|
||||
findtime = 600
|
||||
bantime = 7200
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
environment.etc."fail2ban/filter.d/sshd-ddos.conf" = {
|
||||
enable = active "openssh";
|
||||
text = ''
|
||||
[Definition]
|
||||
failregex = sshd(?:\[\d+\])?: Did not receive identification string from <HOST>$
|
||||
ignoreregex =
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
environment.etc."fail2ban/filter.d/sshd-ddos.conf" = {
|
||||
enable = (active "openssh");
|
||||
text = ''
|
||||
[Definition]
|
||||
failregex = sshd(?:\[\d+\])?: Did not receive identification string from <HOST>$
|
||||
ignoreregex =
|
||||
'';
|
||||
};
|
||||
environment.etc."fail2ban/filter.d/postfix-sasl.conf" = {
|
||||
enable = active "mailserver";
|
||||
text = ''
|
||||
# Fail2Ban filter for postfix authentication failures
|
||||
[INCLUDES]
|
||||
before = common.conf
|
||||
[Definition]
|
||||
daemon = postfix/smtpd
|
||||
failregex = ^%(__prefix_line)swarning: [-._\w]+\[<HOST>\]: SASL (?:LOGIN|PLAIN|(?:CRAM|DIGEST)-MD5) authentication failed(: [ A-Za-z0-9+/]*={0,2})?\s*$
|
||||
'';
|
||||
};
|
||||
|
||||
environment.etc."fail2ban/filter.d/postfix-sasl.conf" = {
|
||||
enable = (active "mailserver");
|
||||
text = ''
|
||||
# Fail2Ban filter for postfix authentication failures
|
||||
[INCLUDES]
|
||||
before = common.conf
|
||||
[Definition]
|
||||
daemon = postfix/smtpd
|
||||
failregex = ^%(__prefix_line)swarning: [-._\w]+\[<HOST>\]: SASL (?:LOGIN|PLAIN|(?:CRAM|DIGEST)-MD5) authentication failed(: [ A-Za-z0-9+/]*={0,2})?\s*$
|
||||
'';
|
||||
};
|
||||
environment.etc."fail2ban/filter.d/postfix-ddos.conf" = {
|
||||
enable = active "mailserver";
|
||||
text = ''
|
||||
[Definition]
|
||||
failregex = lost connection after EHLO from \S+\[<HOST>\]
|
||||
'';
|
||||
};
|
||||
|
||||
environment.etc."fail2ban/filter.d/postfix-ddos.conf" = {
|
||||
enable = (active "mailserver");
|
||||
text = ''
|
||||
[Definition]
|
||||
failregex = lost connection after EHLO from \S+\[<HOST>\]
|
||||
'';
|
||||
};
|
||||
environment.etc."fail2ban/filter.d/nginx-req-limit.conf" = {
|
||||
enable = active "nginx";
|
||||
text = ''
|
||||
[Definition]
|
||||
failregex = limiting requests, excess:.* by zone.*client: <HOST>
|
||||
'';
|
||||
};
|
||||
|
||||
environment.etc."fail2ban/filter.d/nginx-req-limit.conf" = {
|
||||
enable = (active "nginx");
|
||||
text = ''
|
||||
[Definition]
|
||||
failregex = limiting requests, excess:.* by zone.*client: <HOST>
|
||||
'';
|
||||
};
|
||||
|
||||
# Limit stack size to reduce memory usage
|
||||
systemd.services.fail2ban.serviceConfig.LimitSTACK = 256 * 1024;
|
||||
}
|
||||
# Limit stack size to reduce memory usage
|
||||
systemd.services.fail2ban.serviceConfig.LimitSTACK = 256 * 1024;
|
||||
}
|
||||
|
|
|
@ -1,18 +1,24 @@
|
|||
{ config, lib, ... }:
|
||||
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
with lib;
|
||||
mkIf (elem "fprintd" config.machine.services) {
|
||||
security.pam.services = let
|
||||
unlock = [
|
||||
"sudo"
|
||||
"i3lock"
|
||||
"login"
|
||||
"lightdm"
|
||||
];
|
||||
in
|
||||
listToAttrs (forEach unlock (n: {
|
||||
name = n;
|
||||
value = {fprintAuth = true;};
|
||||
}));
|
||||
|
||||
mkIf (elem "fprintd" config.machine.services) {
|
||||
security.pam.services = let
|
||||
unlock = [
|
||||
"sudo"
|
||||
"i3lock"
|
||||
"login"
|
||||
"lightdm"
|
||||
];
|
||||
in listToAttrs (forEach unlock (n: {name = n; value = { fprintAuth = true; }; }));
|
||||
|
||||
services.fprintd = {
|
||||
enable = true;
|
||||
};
|
||||
}
|
||||
services.fprintd = {
|
||||
enable = true;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,54 +1,56 @@
|
|||
{ config, lib, ... }:
|
||||
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
with lib;
|
||||
|
||||
mkIf (elem "gitea" config.machine.services) {
|
||||
services = {
|
||||
gitea = let
|
||||
cfg = config.machine;
|
||||
domain = (findFirst (s: s.service == "gitea") cfg cfg.vHosts).domain;
|
||||
in {
|
||||
enable = true;
|
||||
user = "git";
|
||||
database = {
|
||||
type = "mysql";
|
||||
mkIf (elem "gitea" config.machine.services) {
|
||||
services = {
|
||||
gitea = let
|
||||
cfg = config.machine;
|
||||
domain = (findFirst (s: s.service == "gitea") cfg cfg.vHosts).domain;
|
||||
in {
|
||||
enable = true;
|
||||
user = "git";
|
||||
name = "gitea";
|
||||
passwordFile = config.sops.secrets."services/gitea/dbPass".path;
|
||||
};
|
||||
settings = {
|
||||
repository = {
|
||||
DISABLE_HTTP_GIT = false;
|
||||
USE_COMPAT_SSH_URI = true;
|
||||
database = {
|
||||
type = "mysql";
|
||||
user = "git";
|
||||
name = "gitea";
|
||||
passwordFile = config.sops.secrets."services/gitea/dbPass".path;
|
||||
};
|
||||
settings = {
|
||||
repository = {
|
||||
DISABLE_HTTP_GIT = false;
|
||||
USE_COMPAT_SSH_URI = true;
|
||||
};
|
||||
|
||||
security = {
|
||||
INSTALL_LOCK = true;
|
||||
COOKIE_USERNAME = "gitea_username";
|
||||
COOKIE_REMEMBER_NAME = "gitea_userauth";
|
||||
};
|
||||
security = {
|
||||
INSTALL_LOCK = true;
|
||||
COOKIE_USERNAME = "gitea_username";
|
||||
COOKIE_REMEMBER_NAME = "gitea_userauth";
|
||||
};
|
||||
|
||||
server = {
|
||||
DOMAIN = domain;
|
||||
ROOT_URL = "https://${domain}/";
|
||||
};
|
||||
server = {
|
||||
DOMAIN = domain;
|
||||
ROOT_URL = "https://${domain}/";
|
||||
};
|
||||
|
||||
service = {
|
||||
DISABLE_REGISTRATION = (lib.mkForce true);
|
||||
};
|
||||
service = {
|
||||
DISABLE_REGISTRATION = lib.mkForce true;
|
||||
};
|
||||
|
||||
session = {
|
||||
cookieSecure = true;
|
||||
session = {
|
||||
cookieSecure = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
sops.secrets."services/gitea/dbPass" = {};
|
||||
users.users.git = {
|
||||
description = "Gitea Service";
|
||||
isNormalUser = true;
|
||||
home = config.services.gitea.stateDir;
|
||||
createHome = true;
|
||||
useDefaultShell = true;
|
||||
};
|
||||
}
|
||||
sops.secrets."services/gitea/dbPass" = {};
|
||||
users.users.git = {
|
||||
description = "Gitea Service";
|
||||
isNormalUser = true;
|
||||
home = config.services.gitea.stateDir;
|
||||
createHome = true;
|
||||
useDefaultShell = true;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,71 +1,71 @@
|
|||
{ config, lib, ... }:
|
||||
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
# hydra user needs to be manually crated
|
||||
# sudo -u hydra -s
|
||||
# hydra-create-user $USERNAME --password $PASSWORD --role admin
|
||||
|
||||
# https://qfpl.io/posts/nix/starting-simple-hydra/
|
||||
# also for reference a well written hydra config:
|
||||
# https://github.com/NixOS/nixos-org-configurations/blob/master/delft/hydra.nix
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
with lib; let
|
||||
cacheDir = "/var/cache/hydra";
|
||||
in mkIf (elem "hydra" config.machine.services) {
|
||||
# also take a look at ../conf/nix.nix
|
||||
nix.buildMachines = [
|
||||
{
|
||||
hostName = "localhost";
|
||||
system = "x86_64-linux";
|
||||
supportedFeatures = ["kvm" "nixos-test" "big-parallel" "benchmark"];
|
||||
maxJobs = 8;
|
||||
}
|
||||
];
|
||||
in
|
||||
mkIf (elem "hydra" config.machine.services) {
|
||||
# also take a look at ../conf/nix.nix
|
||||
nix.buildMachines = [
|
||||
{
|
||||
hostName = "localhost";
|
||||
system = "x86_64-linux";
|
||||
supportedFeatures = ["kvm" "nixos-test" "big-parallel" "benchmark"];
|
||||
maxJobs = 8;
|
||||
}
|
||||
];
|
||||
|
||||
services = let
|
||||
cfg = config.machine;
|
||||
domain = (findFirst (s: s.service == "hydra") cfg cfg.vHosts).domain;
|
||||
in {
|
||||
hydra = {
|
||||
enable = true;
|
||||
hydraURL = domain; # externally visible URL
|
||||
listenHost = "localhost";
|
||||
port = 3001;
|
||||
minimumDiskFree = 15;
|
||||
minimumDiskFreeEvaluator = 15;
|
||||
notificationSender = "hydra@mail.${cfg.domain}"; # e-mail of hydra service
|
||||
useSubstitutes = true;
|
||||
debugServer = false;
|
||||
# Hints from hydra-queue-runner:
|
||||
# binary_cache_dir is deprecated and ignored. use store_uri=file:// instead
|
||||
# hydra.conf: binary_cache_secret_key_file is deprecated and ignored. use store_uri=...?secret-key= instead
|
||||
extraConfig = ''
|
||||
max_output_size = 4294967296
|
||||
store_uri = file://${cacheDir}?secret-key=${config.sops.secrets."services.hydra.secretKey".path}&write-nar-listing=1&ls-compression=br&log-compression=br
|
||||
# add ?local-nar-cache= to set nar cache location
|
||||
server_store_uri = https://cache.${cfg.domain}
|
||||
binary_cache_public_uri https://cache.${cfg.domain}
|
||||
upload_logs_to_binary_cache = true
|
||||
'';
|
||||
};
|
||||
services = let
|
||||
cfg = config.machine;
|
||||
domain = (findFirst (s: s.service == "hydra") cfg cfg.vHosts).domain;
|
||||
in {
|
||||
hydra = {
|
||||
enable = true;
|
||||
hydraURL = domain; # externally visible URL
|
||||
listenHost = "localhost";
|
||||
port = 3001;
|
||||
minimumDiskFree = 15;
|
||||
minimumDiskFreeEvaluator = 15;
|
||||
notificationSender = "hydra@mail.${cfg.domain}"; # e-mail of hydra service
|
||||
useSubstitutes = true;
|
||||
debugServer = false;
|
||||
# Hints from hydra-queue-runner:
|
||||
# binary_cache_dir is deprecated and ignored. use store_uri=file:// instead
|
||||
# hydra.conf: binary_cache_secret_key_file is deprecated and ignored. use store_uri=...?secret-key= instead
|
||||
extraConfig = ''
|
||||
max_output_size = 4294967296
|
||||
store_uri = file://${cacheDir}?secret-key=${config.sops.secrets."services.hydra.secretKey".path}&write-nar-listing=1&ls-compression=br&log-compression=br
|
||||
# add ?local-nar-cache= to set nar cache location
|
||||
server_store_uri = https://cache.${cfg.domain}
|
||||
binary_cache_public_uri https://cache.${cfg.domain}
|
||||
upload_logs_to_binary_cache = true
|
||||
'';
|
||||
};
|
||||
|
||||
nix-serve = {
|
||||
enable = true;
|
||||
bindAddress = "0.0.0.0";
|
||||
port = 5000;
|
||||
secretKeyFile = config.sops.secrets."services.hydra.secretKey".path;
|
||||
extraParams = ''
|
||||
# Dont know how to change the store root yet...
|
||||
# --user hydra-queue-runner
|
||||
# --group hydra
|
||||
'';
|
||||
nix-serve = {
|
||||
enable = true;
|
||||
bindAddress = "0.0.0.0";
|
||||
port = 5000;
|
||||
secretKeyFile = config.sops.secrets."services.hydra.secretKey".path;
|
||||
extraParams = ''
|
||||
# Dont know how to change the store root yet...
|
||||
# --user hydra-queue-runner
|
||||
# --group hydra
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
systemd.services.nix-serve.serviceConfig.User = mkForce "hydra";
|
||||
systemd.services.nix-serve.environment.NIX_STORE_DIR = cacheDir;
|
||||
sops.secrets."services/hydra/secretKey" = {
|
||||
owner = "hydra";
|
||||
group = "hydra";
|
||||
};
|
||||
}
|
||||
systemd.services.nix-serve.serviceConfig.User = mkForce "hydra";
|
||||
systemd.services.nix-serve.environment.NIX_STORE_DIR = cacheDir;
|
||||
sops.secrets."services/hydra/secretKey" = {
|
||||
owner = "hydra";
|
||||
group = "hydra";
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,56 +1,63 @@
|
|||
{ config, lib, fn, mailserver, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
{
|
||||
imports = [
|
||||
mailserver.nixosModules.mailserver
|
||||
];
|
||||
} // mkIf (elem "mailserver" config.machine.services) {
|
||||
mailserver = let
|
||||
cfg = config.machine;
|
||||
domain = cfg.domain;
|
||||
fdomain = (findFirst (s: s.service == "mail") cfg cfg.vHosts).domain;
|
||||
mkFqdnAlias = name: [ "${name}@${domain}" "${name}@${fdomain}" ];
|
||||
mkExDomAlias = name: (map (exDom: "${name}@${exDom}") cfg.extraDomains);
|
||||
mkUser = user: rec {
|
||||
name = "${user.name}@${domain}";
|
||||
value = {
|
||||
hashedPasswordFile = config.sops.secrets."users/${user.name}/mail".path;
|
||||
aliases = [ "${user.name}@${fdomain}" ]
|
||||
++ (flatten (map mkFqdnAlias user.aliases))
|
||||
++ (flatten (map mkExDomAlias ([ user.name ] ++ user.aliases)));
|
||||
config,
|
||||
lib,
|
||||
fn,
|
||||
mailserver,
|
||||
...
|
||||
}:
|
||||
with lib;
|
||||
{
|
||||
imports = [
|
||||
mailserver.nixosModules.mailserver
|
||||
];
|
||||
}
|
||||
// mkIf (elem "mailserver" config.machine.services) {
|
||||
mailserver = let
|
||||
cfg = config.machine;
|
||||
domain = cfg.domain;
|
||||
fdomain = (findFirst (s: s.service == "mail") cfg cfg.vHosts).domain;
|
||||
mkFqdnAlias = name: ["${name}@${domain}" "${name}@${fdomain}"];
|
||||
mkExDomAlias = name: (map (exDom: "${name}@${exDom}") cfg.extraDomains);
|
||||
mkUser = user: rec {
|
||||
name = "${user.name}@${domain}";
|
||||
value = {
|
||||
hashedPasswordFile = config.sops.secrets."users/${user.name}/mail".path;
|
||||
aliases =
|
||||
["${user.name}@${fdomain}"]
|
||||
++ (flatten (map mkFqdnAlias user.aliases))
|
||||
++ (flatten (map mkExDomAlias ([user.name] ++ user.aliases)));
|
||||
};
|
||||
};
|
||||
in rec {
|
||||
enable = true;
|
||||
fqdn = fdomain;
|
||||
domains = [fdomain domain] ++ cfg.extraDomains;
|
||||
loginAccounts = listToAttrs (map mkUser cfg.mailAccounts);
|
||||
|
||||
# Use Let's Encrypt certificates. Note that this needs to set up a stripped
|
||||
# down nginx and opens port 80.
|
||||
certificateScheme = "manual";
|
||||
certificateFile = "/var/lib/acme/" + fdomain + "/fullchain.pem";
|
||||
keyFile = "/var/lib/acme/" + fdomain + "/key.pem";
|
||||
|
||||
#dhParamBitLength = 4096; # this doesn't exist???
|
||||
|
||||
# Enable IMAP and POP3
|
||||
enableImap = true;
|
||||
enablePop3 = false;
|
||||
enableImapSsl = true;
|
||||
enablePop3Ssl = false;
|
||||
|
||||
# Enable the ManageSieve protocol
|
||||
enableManageSieve = true;
|
||||
|
||||
# whether to scan inbound emails for viruses (note that this requires at least
|
||||
# 1 Gb RAM for the server. Without virus scanning 256 MB RAM should be plenty)
|
||||
virusScanning = false;
|
||||
};
|
||||
in rec {
|
||||
enable = true;
|
||||
fqdn = fdomain;
|
||||
domains = ([ fdomain domain ] ++ cfg.extraDomains);
|
||||
loginAccounts = listToAttrs (map mkUser cfg.mailAccounts);
|
||||
|
||||
# Use Let's Encrypt certificates. Note that this needs to set up a stripped
|
||||
# down nginx and opens port 80.
|
||||
certificateScheme = "manual";
|
||||
certificateFile = "/var/lib/acme/" + fdomain + "/fullchain.pem";
|
||||
keyFile = "/var/lib/acme/" + fdomain + "/key.pem";
|
||||
|
||||
#dhParamBitLength = 4096; # this doesn't exist???
|
||||
|
||||
# Enable IMAP and POP3
|
||||
enableImap = true;
|
||||
enablePop3 = false;
|
||||
enableImapSsl = true;
|
||||
enablePop3Ssl = false;
|
||||
|
||||
# Enable the ManageSieve protocol
|
||||
enableManageSieve = true;
|
||||
|
||||
# whether to scan inbound emails for viruses (note that this requires at least
|
||||
# 1 Gb RAM for the server. Without virus scanning 256 MB RAM should be plenty)
|
||||
virusScanning = false;
|
||||
};
|
||||
sops.secrets = (fn.sopsHelper
|
||||
(user: "users/${user.name}/mail")
|
||||
config.machine.mailAccounts
|
||||
{});
|
||||
}
|
||||
sops.secrets =
|
||||
fn.sopsHelper
|
||||
(user: "users/${user.name}/mail")
|
||||
config.machine.mailAccounts
|
||||
{};
|
||||
}
|
||||
|
|
|
@ -1,10 +1,13 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
with lib;
|
||||
|
||||
mkIf (elem "mariaDB" config.machine.services) {
|
||||
services.mysql = rec {
|
||||
enable = true;
|
||||
package = pkgs.mariadb;
|
||||
};
|
||||
}
|
||||
mkIf (elem "mariaDB" config.machine.services) {
|
||||
services.mysql = rec {
|
||||
enable = true;
|
||||
package = pkgs.mariadb;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
{ config, lib, ... }:
|
||||
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
with lib;
|
||||
|
||||
mkIf (elem "mullvad" config.machine.services) {
|
||||
services.mullvad-vpn.enable = true;
|
||||
}
|
||||
mkIf (elem "mullvad" config.machine.services) {
|
||||
services.mullvad-vpn.enable = true;
|
||||
}
|
||||
|
|
|
@ -1,57 +1,65 @@
|
|||
{ config, lib, pkgs, fn, ... }:
|
||||
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
fn,
|
||||
...
|
||||
}:
|
||||
with lib;
|
||||
|
||||
mkIf (elem "nextcloud" config.machine.services) {
|
||||
services = let
|
||||
cfg = config.machine;
|
||||
domain = (findFirst (s: s.service == "nextcloud") cfg cfg.vHosts).domain;
|
||||
in {
|
||||
nextcloud = {
|
||||
enable = true;
|
||||
enableBrokenCiphersForSSE = false;
|
||||
home = "/var/lib/nextcloud";
|
||||
hostName = domain;
|
||||
https = true;
|
||||
maxUploadSize = "1024M";
|
||||
package = pkgs.nextcloud27;
|
||||
config = {
|
||||
adminuser = mkDefault (elemAt cfg.administrators 0).name;
|
||||
adminpassFile = config.sops.secrets."services/nextcloud/adminPass".path;
|
||||
dbtype = "mysql";
|
||||
dbhost = "localhost";
|
||||
dbport = "3306";
|
||||
dbuser = "nextcloud";
|
||||
dbpassFile = config.sops.secrets."services/nextcloud/dbPass".path;
|
||||
dbname = "nextcloud";
|
||||
dbtableprefix = "oc_";
|
||||
extraTrustedDomains = cfg.extraDomains;
|
||||
mkIf (elem "nextcloud" config.machine.services) {
|
||||
services = let
|
||||
cfg = config.machine;
|
||||
domain = (findFirst (s: s.service == "nextcloud") cfg cfg.vHosts).domain;
|
||||
in {
|
||||
nextcloud = {
|
||||
enable = true;
|
||||
enableBrokenCiphersForSSE = false;
|
||||
home = "/var/lib/nextcloud";
|
||||
hostName = domain;
|
||||
https = true;
|
||||
maxUploadSize = "1024M";
|
||||
package = pkgs.nextcloud27;
|
||||
config = {
|
||||
adminuser = mkDefault (elemAt cfg.administrators 0).name;
|
||||
adminpassFile = config.sops.secrets."services/nextcloud/adminPass".path;
|
||||
dbtype = "mysql";
|
||||
dbhost = "localhost";
|
||||
dbport = "3306";
|
||||
dbuser = "nextcloud";
|
||||
dbpassFile = config.sops.secrets."services/nextcloud/dbPass".path;
|
||||
dbname = "nextcloud";
|
||||
dbtableprefix = "oc_";
|
||||
extraTrustedDomains = cfg.extraDomains;
|
||||
};
|
||||
caching = {
|
||||
apcu = true;
|
||||
memcached = true;
|
||||
redis = false;
|
||||
};
|
||||
};
|
||||
caching = {
|
||||
apcu = true;
|
||||
memcached = true;
|
||||
redis = false;
|
||||
# Turn Server used for nextcloud-talk
|
||||
# This stuff is still untested.
|
||||
coturn = mkIf (elem "nextcloud-talk" config.machine.services) {
|
||||
# TLS is not needed as WebRTC is already encrypted.
|
||||
enable = true;
|
||||
realm = domain;
|
||||
listening-port = 3478;
|
||||
use-auth-secret = true;
|
||||
extraConfig = ''
|
||||
fingerprint
|
||||
total-quota=100
|
||||
bps-capacity=0
|
||||
stale-nonce
|
||||
no-multicast-peers
|
||||
'';
|
||||
};
|
||||
};
|
||||
# Turn Server used for nextcloud-talk
|
||||
# This stuff is still untested.
|
||||
coturn = mkIf (elem "nextcloud-talk" config.machine.services) {
|
||||
# TLS is not needed as WebRTC is already encrypted.
|
||||
enable = true;
|
||||
realm = domain;
|
||||
listening-port = 3478;
|
||||
use-auth-secret = true;
|
||||
extraConfig = ''
|
||||
fingerprint
|
||||
total-quota=100
|
||||
bps-capacity=0
|
||||
stale-nonce
|
||||
no-multicast-peers
|
||||
'';
|
||||
};
|
||||
};
|
||||
sops.secrets = (fn.sopsHelper
|
||||
(name: "services/nextcloud/${name}")
|
||||
[ "adminPass" "dbPass" ]
|
||||
{ owner = "nextcloud"; group = "nextcloud"; });
|
||||
}
|
||||
sops.secrets =
|
||||
fn.sopsHelper
|
||||
(name: "services/nextcloud/${name}")
|
||||
["adminPass" "dbPass"]
|
||||
{
|
||||
owner = "nextcloud";
|
||||
group = "nextcloud";
|
||||
};
|
||||
}
|
||||
|
|
|
@ -6,48 +6,54 @@
|
|||
# - Nextcloud #
|
||||
# - Mail ssl root #
|
||||
##############################################################################################
|
||||
|
||||
{ options, config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
options,
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
with lib;
|
||||
with builtins;
|
||||
mkIf (elem "nginx" config.machine.services) {
|
||||
services.nginx = let
|
||||
vHostConfigs = listToAttrs (map
|
||||
(name: {
|
||||
name = replaceStrings [".nix"] [""] name;
|
||||
value = import (./. + (toPath "/nginx_vHosts/${name}")) {inherit options config lib pkgs;};
|
||||
})
|
||||
(attrNames (readDir ./nginx_vHosts)));
|
||||
|
||||
mkIf (elem "nginx" config.machine.services) {
|
||||
services.nginx = let
|
||||
vHostConfigs = listToAttrs (map
|
||||
(name: {
|
||||
name = (replaceStrings [ ".nix" ] [ "" ] name);
|
||||
value = (import (./. + (toPath "/nginx_vHosts/${name}")) { inherit options config lib pkgs; });})
|
||||
(attrNames (readDir ./nginx_vHosts)));
|
||||
mkVHost = vHost: {
|
||||
name = vHost.domain;
|
||||
value =
|
||||
{
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
acmeRoot = "/var/lib/acme/acme-challenge";
|
||||
}
|
||||
// vHostConfigs."${vHost.service}";
|
||||
};
|
||||
|
||||
mkVHost = vHost: {
|
||||
name = vHost.domain;
|
||||
value = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
acmeRoot = "/var/lib/acme/acme-challenge";
|
||||
} // vHostConfigs."${vHost.service}"; };
|
||||
|
||||
vHosts = listToAttrs (map mkVHost config.machine.vHosts);
|
||||
|
||||
in {
|
||||
enable = true;
|
||||
recommendedGzipSettings = true;
|
||||
recommendedOptimisation = true;
|
||||
recommendedProxySettings = true;
|
||||
recommendedTlsSettings = true;
|
||||
sslCiphers = "EECDH+aRSA+AESGCM:EDH+aRSA:EECDH+aRSA:+AES256:+AES128:+SHA1:!CAMELLIA:!SEED:!3DES:!DES:!RC4:!eNULL";
|
||||
sslProtocols = "TLSv1.3 TLSv1.2";
|
||||
commonHttpConfig = ''
|
||||
map $scheme $hsts_header {
|
||||
https "max-age=31536000; includeSubdomains; preload";
|
||||
}
|
||||
add_header Strict-Transport-Security $hsts_header;
|
||||
add_header 'Referrer-Policy' 'origin-when-cross-origin';
|
||||
# add_header X-Frame-Options DENY;
|
||||
add_header X-Content-Type-Options nosniff;
|
||||
add_header X-XSS-Protection "1; mode=block";
|
||||
'';
|
||||
virtualHosts = vHosts;
|
||||
};
|
||||
}
|
||||
vHosts = listToAttrs (map mkVHost config.machine.vHosts);
|
||||
in {
|
||||
enable = true;
|
||||
recommendedGzipSettings = true;
|
||||
recommendedOptimisation = true;
|
||||
recommendedProxySettings = true;
|
||||
recommendedTlsSettings = true;
|
||||
sslCiphers = "EECDH+aRSA+AESGCM:EDH+aRSA:EECDH+aRSA:+AES256:+AES128:+SHA1:!CAMELLIA:!SEED:!3DES:!DES:!RC4:!eNULL";
|
||||
sslProtocols = "TLSv1.3 TLSv1.2";
|
||||
commonHttpConfig = ''
|
||||
map $scheme $hsts_header {
|
||||
https "max-age=31536000; includeSubdomains; preload";
|
||||
}
|
||||
add_header Strict-Transport-Security $hsts_header;
|
||||
add_header 'Referrer-Policy' 'origin-when-cross-origin';
|
||||
# add_header X-Frame-Options DENY;
|
||||
add_header X-Content-Type-Options nosniff;
|
||||
add_header X-XSS-Protection "1; mode=block";
|
||||
'';
|
||||
virtualHosts = vHosts;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1,17 +1,23 @@
|
|||
{ config, lib, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
{
|
||||
vHost = if config.services.nix-serve.enable then {
|
||||
extraConfig = ''
|
||||
location / {
|
||||
proxy_pass http://${config.services.nix-serve.bindAddress}:${toString config.services.nix-serve.port};
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header REMOTE_ADDR $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto https;
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
with lib;
|
||||
{
|
||||
vHost =
|
||||
if config.services.nix-serve.enable
|
||||
then {
|
||||
extraConfig = ''
|
||||
location / {
|
||||
proxy_pass http://${config.services.nix-serve.bindAddress}:${toString config.services.nix-serve.port};
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header REMOTE_ADDR $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto https;
|
||||
}
|
||||
'';
|
||||
}
|
||||
'';
|
||||
} else {};
|
||||
}.vHost
|
||||
else {};
|
||||
}
|
||||
.vHost
|
||||
|
|
|
@ -1,27 +1,33 @@
|
|||
{ config, lib, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
{
|
||||
vHost = if config.services.gitea.enable then {
|
||||
root = "${config.services.gitea.stateDir}/public";
|
||||
extraConfig = ''
|
||||
location / {
|
||||
try_files maintain.html $uri $uri/index.html @node;
|
||||
}
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
with lib;
|
||||
{
|
||||
vHost =
|
||||
if config.services.gitea.enable
|
||||
then {
|
||||
root = "${config.services.gitea.stateDir}/public";
|
||||
extraConfig = ''
|
||||
location / {
|
||||
try_files maintain.html $uri $uri/index.html @node;
|
||||
}
|
||||
|
||||
location @node {
|
||||
client_max_body_size 0;
|
||||
proxy_pass http://${config.services.gitea.settings.server.HTTP_ADDR}:${toString config.services.gitea.settings.server.HTTP_PORT};
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-Ssl on;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_max_temp_file_size 0;
|
||||
proxy_redirect off;
|
||||
proxy_read_timeout 120;
|
||||
location @node {
|
||||
client_max_body_size 0;
|
||||
proxy_pass http://${config.services.gitea.settings.server.HTTP_ADDR}:${toString config.services.gitea.settings.server.HTTP_PORT};
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-Ssl on;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_max_temp_file_size 0;
|
||||
proxy_redirect off;
|
||||
proxy_read_timeout 120;
|
||||
}
|
||||
'';
|
||||
}
|
||||
'';
|
||||
} else {};
|
||||
}.vHost
|
||||
else {};
|
||||
}
|
||||
.vHost
|
||||
|
|
|
@ -1,17 +1,23 @@
|
|||
{ config, lib, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
{
|
||||
vHost = if config.services.hydra.enable then {
|
||||
extraConfig = ''
|
||||
location / {
|
||||
proxy_pass http://${config.services.hydra.listenHost}:${toString config.services.hydra.port};
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header REMOTE_ADDR $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto https;
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
with lib;
|
||||
{
|
||||
vHost =
|
||||
if config.services.hydra.enable
|
||||
then {
|
||||
extraConfig = ''
|
||||
location / {
|
||||
proxy_pass http://${config.services.hydra.listenHost}:${toString config.services.hydra.port};
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header REMOTE_ADDR $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto https;
|
||||
}
|
||||
'';
|
||||
}
|
||||
'';
|
||||
} else {};
|
||||
}.vHost
|
||||
else {};
|
||||
}
|
||||
.vHost
|
||||
|
|
|
@ -1,11 +1,17 @@
|
|||
{ config, lib, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
{
|
||||
vHost = if config.mailserver.enable then {
|
||||
serverName = config.mailserver.fqdn;
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
} else {};
|
||||
}.vHost
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
with lib;
|
||||
{
|
||||
vHost =
|
||||
if config.mailserver.enable
|
||||
then {
|
||||
serverName = config.mailserver.fqdn;
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
}
|
||||
else {};
|
||||
}
|
||||
.vHost
|
||||
|
|
|
@ -1,10 +1,18 @@
|
|||
{ options, config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
options,
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
with lib;
|
||||
|
||||
if (config.services.nextcloud.enable == true) then {
|
||||
vHost = {
|
||||
enableACME = config.services.nextcloud.https;
|
||||
forceSSL = config.services.nextcloud.https;
|
||||
};
|
||||
}.vHost else {}
|
||||
if (config.services.nextcloud.enable == true)
|
||||
then
|
||||
{
|
||||
vHost = {
|
||||
enableACME = config.services.nextcloud.https;
|
||||
forceSSL = config.services.nextcloud.https;
|
||||
};
|
||||
}
|
||||
.vHost
|
||||
else {}
|
||||
|
|
|
@ -1,9 +1,12 @@
|
|||
{ config, lib, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
{
|
||||
vHost = {
|
||||
root = "/var/www";
|
||||
};
|
||||
}.vHost
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
with lib;
|
||||
{
|
||||
vHost = {
|
||||
root = "/var/www";
|
||||
};
|
||||
}
|
||||
.vHost
|
||||
|
|
|
@ -1,35 +1,43 @@
|
|||
{ config, lib, fn, ... }:
|
||||
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
fn,
|
||||
...
|
||||
}:
|
||||
# For reference:
|
||||
# https://infosec.mozilla.org/guidelines/openssh.html
|
||||
# https://stribika.github.io/2015/01/04/secure-secure-shell.html
|
||||
|
||||
with lib;
|
||||
|
||||
mkIf (elem "openssh" config.machine.services) {
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
settings.KexAlgorithms = [ "curve25519-sha256@libssh.org" ];
|
||||
sftpFlags = [ "-f AUTHPRIV" "-l INFO" ];
|
||||
startWhenNeeded = false;
|
||||
settings = {
|
||||
KbdInteractiveAuthentication = false;
|
||||
PasswordAuthentication = false;
|
||||
PermitRootLogin = "no";
|
||||
};
|
||||
extraConfig = let users = concatMapStrings (user: "${user.name} ") config.machine.administrators
|
||||
+ (optionalString config.services.gitea.enable (config.services.gitea.user + " "));
|
||||
in ''
|
||||
mkIf (elem "openssh" config.machine.services) {
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
settings.KexAlgorithms = ["curve25519-sha256@libssh.org"];
|
||||
sftpFlags = ["-f AUTHPRIV" "-l INFO"];
|
||||
startWhenNeeded = false;
|
||||
settings = {
|
||||
KbdInteractiveAuthentication = false;
|
||||
PasswordAuthentication = false;
|
||||
PermitRootLogin = "no";
|
||||
};
|
||||
extraConfig = let
|
||||
users =
|
||||
concatMapStrings (user: "${user.name} ") config.machine.administrators
|
||||
+ (optionalString config.services.gitea.enable (config.services.gitea.user + " "));
|
||||
in ''
|
||||
UsePAM no
|
||||
AllowUsers ${users}
|
||||
LogLevel VERBOSE
|
||||
'';
|
||||
};
|
||||
# Add public keys to /etc/ssh/authorized_keys.d
|
||||
# This replaces users.users.*.openssh.authorizedKeys.*
|
||||
sops.secrets = (fn.sopsHelper
|
||||
(user: "users/${user.name}/publicKey")
|
||||
config.machine.administrators
|
||||
(user: { path = "/etc/ssh/authorized_keys.d/${user.name}"; mode = "444"; })
|
||||
);
|
||||
}
|
||||
'';
|
||||
};
|
||||
# Add public keys to /etc/ssh/authorized_keys.d
|
||||
# This replaces users.users.*.openssh.authorizedKeys.*
|
||||
sops.secrets = (
|
||||
fn.sopsHelper
|
||||
(user: "users/${user.name}/publicKey")
|
||||
config.machine.administrators
|
||||
(user: {
|
||||
path = "/etc/ssh/authorized_keys.d/${user.name}";
|
||||
mode = "444";
|
||||
})
|
||||
);
|
||||
}
|
||||
|
|
|
@ -1,18 +1,22 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
with lib;
|
||||
|
||||
let
|
||||
withDocker = (elem "docker" config.machine.services);
|
||||
in mkIf (elem "podman" config.machine.services) {
|
||||
virtualisation.podman = {
|
||||
enable = true;
|
||||
dockerSocket.enable = withDocker;
|
||||
dockerCompat = withDocker;
|
||||
defaultNetwork = {
|
||||
settings.dns_enabled = true;
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
with lib; let
|
||||
withDocker = elem "docker" config.machine.services;
|
||||
in
|
||||
mkIf (elem "podman" config.machine.services) {
|
||||
virtualisation.podman = {
|
||||
enable = true;
|
||||
dockerSocket.enable = withDocker;
|
||||
dockerCompat = withDocker;
|
||||
defaultNetwork = {
|
||||
settings.dns_enabled = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
environment.systemPackages = (with pkgs; [ podman-compose cntr ]) ++
|
||||
(optional withDocker pkgs.docker-compose);
|
||||
}
|
||||
environment.systemPackages =
|
||||
(with pkgs; [podman-compose cntr])
|
||||
++ (optional withDocker pkgs.docker-compose);
|
||||
}
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
with lib;
|
||||
|
||||
mkIf (elem "udev" config.machine.services) {
|
||||
hardware.steam-hardware.enable = true;
|
||||
}
|
||||
mkIf (elem "udev" config.machine.services) {
|
||||
hardware.steam-hardware.enable = true;
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue