Initial Setup for Sock host; Docker environment, secrets, and backups
- Introduces new Nix modules for comprehensive backup configuration using Borg - Implements orchestrated backup chain with logging, status tracking, and notifications - Establishes Docker container service definitions and network configurations for Komodo services - Updates host and service secrets, including firewall and Docker credentials
This commit is contained in:
parent
56c1e1153b
commit
5879c0bed8
7 changed files with 654 additions and 0 deletions
193
hosts/nixos/sock/config/backups/borg.nix
Normal file
193
hosts/nixos/sock/config/backups/borg.nix
Normal file
|
@ -0,0 +1,193 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
# Common repositories
|
||||
ochreStorageRepo = "/pool/Backups/OchreStorage";
|
||||
|
||||
# Shared environment setup
|
||||
borgCommonSettings = ''
|
||||
# Don't use cache to avoid issues with concurrent backups
|
||||
export BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK=yes
|
||||
export BORG_NON_INTERACTIVE=yes
|
||||
'';
|
||||
|
||||
# Common packages needed for backups
|
||||
commonBorgPath = with pkgs; [
|
||||
borgbackup
|
||||
coreutils
|
||||
apprise
|
||||
gnugrep
|
||||
hostname
|
||||
util-linux
|
||||
gawk
|
||||
];
|
||||
|
||||
# Repository initialization
|
||||
initRepo = repo: ''
|
||||
if [ ! -d "${repo}" ]; then
|
||||
mkdir -p "${repo}"
|
||||
${pkgs.borgbackup}/bin/borg init --encryption=none "${repo}"
|
||||
fi
|
||||
'';
|
||||
|
||||
# Notification system
|
||||
apprise-url = config.secretsSpec.users.admin.smtp.notifyUrl;
|
||||
sendNotification = title: message: ''
|
||||
${pkgs.apprise}/bin/apprise -t "${title}" -b "${message}" "${apprise-url}" || true
|
||||
'';
|
||||
|
||||
# Statistics generation
|
||||
extractBorgStats = logFile: repoPath: ''
|
||||
{
|
||||
echo -e "\n==== BACKUP SUMMARY ====\n"
|
||||
grep -A10 "Archive name:" ${logFile} || echo "No archive stats found"
|
||||
echo -e "\n=== Compression ===\n"
|
||||
grep "Compressed size:" ${logFile} || echo "No compression stats found"
|
||||
echo -e "\n=== Duration ===\n"
|
||||
grep "Duration:" ${logFile} || echo "No duration stats found"
|
||||
grep "Throughput:" ${logFile} || echo "No throughput stats found"
|
||||
echo -e "\n=== Repository ===\n"
|
||||
${pkgs.borgbackup}/bin/borg info ${repoPath} --last 1 2>/dev/null || echo "Could not get repository info"
|
||||
echo -e "\n=== Storage Space ===\n"
|
||||
df -h ${repoPath} | grep -v "Filesystem" || echo "Could not get storage info"
|
||||
} > ${logFile}.stats
|
||||
STATS=$(cat ${logFile}.stats || echo "No stats available")
|
||||
'';
|
||||
|
||||
# Unified backup service generator with optional features
|
||||
mkBorgBackupService =
|
||||
{
|
||||
name,
|
||||
title,
|
||||
repo,
|
||||
sourcePath,
|
||||
keepDaily,
|
||||
keepWeekly,
|
||||
keepMonthly,
|
||||
schedule ? null,
|
||||
enableNotifications ? true,
|
||||
verbose ? false,
|
||||
}:
|
||||
let
|
||||
maybeCreateTimer = lib.optionalAttrs (schedule != null) {
|
||||
timers."backup-${name}" = {
|
||||
description = "Timer for ${title} Backup";
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnCalendar = schedule;
|
||||
Persistent = true;
|
||||
RandomizedDelaySec = "5min";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
logPrefix = if verbose then "set -x;" else "";
|
||||
in
|
||||
{
|
||||
services."backup-${name}" = {
|
||||
description = "Backup ${title} with Borg";
|
||||
inherit (commonServiceConfig) path serviceConfig;
|
||||
|
||||
script = ''
|
||||
${borgCommonSettings}
|
||||
${logPrefix} # Add verbose logging if enabled
|
||||
|
||||
LOG_FILE="/tmp/borg-${name}-backup-$(date +%Y%m%d-%H%M%S).log"
|
||||
${initRepo repo}
|
||||
|
||||
echo "Starting ${title} backup at $(date)" > $LOG_FILE
|
||||
ARCHIVE_NAME="${name}-$(date +%Y-%m-%d_%H%M%S)"
|
||||
START_TIME=$(date +%s)
|
||||
|
||||
# Add verbose output redirection if enabled
|
||||
${if verbose then "exec 3>&1 4>&2" else ""}
|
||||
${pkgs.borgbackup}/bin/borg create \
|
||||
--stats \
|
||||
--compression zstd,15 \
|
||||
--exclude '*.tmp' \
|
||||
--exclude '*/tmp/*' \
|
||||
${repo}::$ARCHIVE_NAME \
|
||||
${sourcePath} >> $LOG_FILE 2>&1 ${if verbose then "| tee /dev/fd/3" else ""}
|
||||
|
||||
BACKUP_STATUS=$?
|
||||
END_TIME=$(date +%s)
|
||||
DURATION=$((END_TIME - START_TIME))
|
||||
echo "Total time: $DURATION seconds ($(date -d@$DURATION -u +%H:%M:%S))" >> $LOG_FILE
|
||||
|
||||
${extractBorgStats "$LOG_FILE" "${repo}"}
|
||||
|
||||
echo -e "\nPruning old backups..." >> $LOG_FILE
|
||||
${pkgs.borgbackup}/bin/borg prune \
|
||||
--keep-daily ${toString keepDaily} \
|
||||
--keep-weekly ${toString keepWeekly} \
|
||||
--keep-monthly ${toString keepMonthly} \
|
||||
${repo} >> $LOG_FILE 2>&1 ${if verbose then "| tee /dev/fd/3" else ""}
|
||||
|
||||
PRUNE_STATUS=$?
|
||||
|
||||
echo -e "\nRemaining archives after pruning:" >> $LOG_FILE
|
||||
${pkgs.borgbackup}/bin/borg list ${repo} >> $LOG_FILE 2>&1 || true
|
||||
|
||||
${
|
||||
if enableNotifications then
|
||||
''
|
||||
if [ $BACKUP_STATUS -eq 0 ] && [ $PRUNE_STATUS -eq 0 ]; then
|
||||
${sendNotification "✅ ${title} Backup Complete" "${title} backup completed successfully on $(hostname) at $(date)\nDuration: $(date -d@$DURATION -u +%H:%M:%S)\n\n$STATS"}
|
||||
else
|
||||
${sendNotification "❌ ${title} Backup Failed" "${title} backup failed on $(hostname) at $(date)\n\nBackup Status: $BACKUP_STATUS\nPrune Status: $PRUNE_STATUS\n\nPartial Stats:\n$STATS\n\nSee $LOG_FILE for details"}
|
||||
fi
|
||||
''
|
||||
else
|
||||
"echo 'Notifications disabled' >> $LOG_FILE"
|
||||
}
|
||||
|
||||
rm -f $LOG_FILE.stats
|
||||
exit $BACKUP_STATUS
|
||||
'';
|
||||
};
|
||||
|
||||
}
|
||||
// maybeCreateTimer;
|
||||
|
||||
# Common service configuration
|
||||
commonServiceConfig = {
|
||||
path = commonBorgPath;
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
IOSchedulingClass = "idle";
|
||||
CPUSchedulingPolicy = "idle";
|
||||
Nice = 19;
|
||||
};
|
||||
};
|
||||
|
||||
in
|
||||
{
|
||||
environment.systemPackages = with pkgs; [
|
||||
borgbackup
|
||||
];
|
||||
|
||||
systemd = lib.mkMerge [
|
||||
(mkBorgBackupService {
|
||||
name = "ochre-storage";
|
||||
title = "Ochre Storage";
|
||||
repo = ochreStorageRepo;
|
||||
sourcePath = "/OchreStorage";
|
||||
# INFO: This shit confusing but basically
|
||||
# keeps the last 7 days,
|
||||
# then keeps AT LEAST ONE for last 4 weeks
|
||||
# and finally AT LEAST ONE for the last 3 months
|
||||
keepDaily = 7;
|
||||
keepWeekly = 4;
|
||||
keepMonthly = 3;
|
||||
# No schedule = no timer created
|
||||
# schedule = "*-*-* 03:00:00";
|
||||
enableNotifications = false;
|
||||
verbose = true;
|
||||
})
|
||||
];
|
||||
}
|
184
hosts/nixos/sock/config/backups/default.nix
Normal file
184
hosts/nixos/sock/config/backups/default.nix
Normal file
|
@ -0,0 +1,184 @@
|
|||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
# Shared configuration
|
||||
logDir = "/var/log/backups";
|
||||
backupServices = [
|
||||
{
|
||||
name = "ochre_storage";
|
||||
title = "Ochre Storage";
|
||||
service = "backup-ochre-storage.service";
|
||||
logPattern = "borg-ochre-storage-backup-*.log";
|
||||
}
|
||||
];
|
||||
|
||||
# Helper functions
|
||||
users = config.secretsSpec.users;
|
||||
notify =
|
||||
title: message: logFile:
|
||||
let
|
||||
attachArg = if logFile == "" then "" else "--attach \"file://${logFile}\"";
|
||||
appriseUrl = lib.custom.mkAppriseUrl users.admin.smtp "relay@ryot.foo";
|
||||
in
|
||||
''
|
||||
${pkgs.apprise}/bin/apprise -vv -i "markdown" -t "${title}" \
|
||||
-b "${message}" \
|
||||
${attachArg} \
|
||||
"${appriseUrl}" || true
|
||||
'';
|
||||
|
||||
findLatestLog = pattern: path: ''
|
||||
find "${path}" -name "${pattern}" -type f -printf "%T@ %p\\n" 2>/dev/null \
|
||||
| sort -nr | head -1 | cut -d' ' -f2
|
||||
'';
|
||||
|
||||
# Generate safe variable name (replace hyphens with underscores)
|
||||
safeName = name: lib.replaceStrings [ "-" ] [ "_" ] name;
|
||||
|
||||
# Generate status variable references
|
||||
statusVarName = name: "STATUS_${safeName name}";
|
||||
|
||||
# Common script utilities
|
||||
scriptPrelude = ''
|
||||
set -uo pipefail
|
||||
LOG_FILE="${logDir}/backup-chain-$(date +%Y%m%d-%H%M%S).log"
|
||||
mkdir -p "${logDir}"
|
||||
exec > >(tee -a "$LOG_FILE") 2>&1
|
||||
|
||||
log() {
|
||||
echo "[$(date "+%Y-%m-%d %H:%M:%S")] $1"
|
||||
}
|
||||
|
||||
# Initialize all status variables
|
||||
${lib.concatMapStringsSep "\n" (s: "${statusVarName s.name}=1") backupServices}
|
||||
'';
|
||||
|
||||
# Service runner template
|
||||
runService =
|
||||
{
|
||||
name,
|
||||
title,
|
||||
service,
|
||||
logPattern,
|
||||
logPath ? "/tmp",
|
||||
}:
|
||||
''
|
||||
log "Starting ${title} maintenance..."
|
||||
systemctl start ${service} || true
|
||||
${statusVarName name}=$?
|
||||
log "${title} completed with status $${statusVarName name}"
|
||||
|
||||
SERVICE_LOG=$(${findLatestLog logPattern logPath})
|
||||
if [ -n "$SERVICE_LOG" ]; then
|
||||
log "Appending ${title} log: $SERVICE_LOG"
|
||||
echo -e "\n\n===== ${title} LOG ($(basename "$SERVICE_LOG")) =====\n" >> "$LOG_FILE"
|
||||
cat "$SERVICE_LOG" >> "$LOG_FILE"
|
||||
|
||||
# Add SnapRAID-specific summary
|
||||
if [ "${name}" = "snapraid" ]; then
|
||||
echo -e "\n=== SnapRAID Summary ===" >> "$LOG_FILE"
|
||||
grep -E '(Scrub|Sync|Diff|smart)' "$SERVICE_LOG" | tail -n 10 >> "$LOG_FILE"
|
||||
fi
|
||||
fi
|
||||
'';
|
||||
|
||||
# Build the service execution script
|
||||
serviceExecution = lib.concatMapStrings runService backupServices;
|
||||
|
||||
# Generate status summary lines
|
||||
statusSummaryLines = lib.concatMapStringsSep "\n" (
|
||||
s:
|
||||
let
|
||||
varName = statusVarName s.name;
|
||||
in
|
||||
"- **${s.title}:** \$([ \$${varName} -eq 0 ] && echo '✅ Success' || echo '❌ Failed') (Exit: \$${varName})"
|
||||
) backupServices;
|
||||
|
||||
# Notification logic with cleaner formatting
|
||||
notificationLogic =
|
||||
let
|
||||
statusVars = map (s: statusVarName s.name) backupServices;
|
||||
statusChecks = lib.concatMapStringsSep "\n" (var: "[ \$${var} -eq 0 ] && ") statusVars;
|
||||
in
|
||||
''
|
||||
# Calculate overall status
|
||||
OVERALL_STATUS=0
|
||||
${lib.concatMapStringsSep "\n" (var: "if [ \$${var} -ne 0 ]; then OVERALL_STATUS=1; fi") statusVars}
|
||||
|
||||
TIMESTAMP=$(date "+%Y-%m-%d %H:%M:%S")
|
||||
HOSTNAME=$(hostname)
|
||||
|
||||
SUMMARY=$(cat << EOF
|
||||
# Backup Chain Complete
|
||||
|
||||
**Host:** $HOSTNAME
|
||||
**Timestamp:** $TIMESTAMP
|
||||
**Overall Status:** $([ $OVERALL_STATUS -eq 0 ] && echo '✅ Success' || echo '⚠️ Failure')
|
||||
|
||||
## Service Status:
|
||||
${statusSummaryLines}
|
||||
|
||||
**Log Path:** $LOG_FILE
|
||||
EOF)
|
||||
|
||||
if [ $OVERALL_STATUS -eq 0 ]; then
|
||||
${notify "✅ Backup Success" "$SUMMARY" "$LOG_FILE"}
|
||||
else
|
||||
${notify "⚠️ Backup Issues" "$SUMMARY" "$LOG_FILE"}
|
||||
fi
|
||||
|
||||
exit $OVERALL_STATUS
|
||||
'';
|
||||
|
||||
in
|
||||
{
|
||||
imports = lib.custom.scanPaths ./.;
|
||||
|
||||
systemd.services.backup-chain = {
|
||||
description = "Orchestrated Backup Chain";
|
||||
path = with pkgs; [
|
||||
apprise
|
||||
coreutils
|
||||
findutils
|
||||
gawk
|
||||
gnugrep
|
||||
hostname
|
||||
systemd
|
||||
util-linux
|
||||
];
|
||||
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
Nice = 19;
|
||||
IOSchedulingClass = "idle";
|
||||
CPUSchedulingPolicy = "idle";
|
||||
};
|
||||
|
||||
script = ''
|
||||
${scriptPrelude}
|
||||
log "Initializing backup chain on $(hostname)"
|
||||
|
||||
${serviceExecution}
|
||||
|
||||
log "Finalizing backup chain"
|
||||
${notificationLogic}
|
||||
'';
|
||||
};
|
||||
|
||||
systemd.timers.backup-chain = {
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnCalendar = "*-*-* 03:00:00";
|
||||
Persistent = true;
|
||||
RandomizedDelaySec = "5min";
|
||||
};
|
||||
};
|
||||
|
||||
environment.systemPackages = [ pkgs.apprise ];
|
||||
systemd.tmpfiles.rules = [ "d ${logDir} 0755 root root -" ];
|
||||
}
|
4
hosts/nixos/sock/config/default.nix
Normal file
4
hosts/nixos/sock/config/default.nix
Normal file
|
@ -0,0 +1,4 @@
|
|||
{ lib, ... }:
|
||||
{
|
||||
imports = lib.custom.scanPaths ./.;
|
||||
}
|
183
hosts/nixos/sock/config/komodo/default.nix
Normal file
183
hosts/nixos/sock/config/komodo/default.nix
Normal file
|
@ -0,0 +1,183 @@
|
|||
# Auto-generated using compose2nix v0.3.1.
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
# Only available in the Sock LXC
|
||||
DockerStorage = "/OchreStorage/komodo";
|
||||
env = config.secretsSpec.docker.komodo-sock;
|
||||
in
|
||||
{
|
||||
# Containers
|
||||
virtualisation.oci-containers.containers."komodo-core" = {
|
||||
image = "ghcr.io/moghtech/komodo-core:latest";
|
||||
environment = env;
|
||||
volumes = [
|
||||
"${DockerStorage}/cache:/repo-cache:rw"
|
||||
];
|
||||
ports = [
|
||||
"9120:9120/tcp"
|
||||
];
|
||||
labels = {
|
||||
"komodo.skip" = "";
|
||||
};
|
||||
dependsOn = [
|
||||
"komodo-mongo"
|
||||
];
|
||||
log-driver = "local";
|
||||
extraOptions = [
|
||||
"--network-alias=core"
|
||||
"--network=komodo_default"
|
||||
"--pull=always"
|
||||
];
|
||||
};
|
||||
|
||||
systemd.services."docker-komodo-core" = {
|
||||
serviceConfig = {
|
||||
Restart = lib.mkOverride 90 "always";
|
||||
RestartMaxDelaySec = lib.mkOverride 90 "1m";
|
||||
RestartSec = lib.mkOverride 90 "100ms";
|
||||
RestartSteps = lib.mkOverride 90 9;
|
||||
};
|
||||
after = [
|
||||
"docker-network-komodo_default.service"
|
||||
# "docker-volume-komodo_repo-cache.service"
|
||||
];
|
||||
requires = [
|
||||
"docker-network-komodo_default.service"
|
||||
# "docker-volume-komodo_repo-cache.service"
|
||||
];
|
||||
partOf = [
|
||||
"docker-compose-komodo-root.target"
|
||||
];
|
||||
wantedBy = [
|
||||
"docker-compose-komodo-root.target"
|
||||
];
|
||||
};
|
||||
|
||||
virtualisation.oci-containers.containers."komodo-mongo" = {
|
||||
image = "mongo";
|
||||
environment = env;
|
||||
volumes = [
|
||||
"${DockerStorage}/mongo/config:/data/configdb:rw"
|
||||
"${DockerStorage}/mongo/data:/data/db:rw"
|
||||
];
|
||||
cmd = [
|
||||
"--quiet"
|
||||
"--wiredTigerCacheSizeGB"
|
||||
"0.25"
|
||||
];
|
||||
labels = {
|
||||
"komodo.skip" = "";
|
||||
};
|
||||
log-driver = "local";
|
||||
extraOptions = [
|
||||
"--network-alias=mongo"
|
||||
"--network=komodo_default"
|
||||
];
|
||||
};
|
||||
|
||||
systemd.services."docker-komodo-mongo" = {
|
||||
serviceConfig = {
|
||||
Restart = lib.mkOverride 90 "always";
|
||||
RestartMaxDelaySec = lib.mkOverride 90 "1m";
|
||||
RestartSec = lib.mkOverride 90 "100ms";
|
||||
RestartSteps = lib.mkOverride 90 9;
|
||||
};
|
||||
after = [
|
||||
"docker-network-komodo_default.service"
|
||||
# "docker-volume-komodo_mongo-config.service"
|
||||
# "docker-volume-komodo_mongo-data.service"
|
||||
];
|
||||
requires = [
|
||||
"docker-network-komodo_default.service"
|
||||
# "docker-volume-komodo_mongo-config.service"
|
||||
# "docker-volume-komodo_mongo-data.service"
|
||||
];
|
||||
partOf = [
|
||||
"docker-compose-komodo-root.target"
|
||||
];
|
||||
wantedBy = [
|
||||
"docker-compose-komodo-root.target"
|
||||
];
|
||||
};
|
||||
|
||||
virtualisation.oci-containers.containers."komodo-periphery" = {
|
||||
image = "ghcr.io/moghtech/komodo-periphery:latest";
|
||||
environment = env;
|
||||
volumes = [
|
||||
"/proc:/proc:rw"
|
||||
"/var/run/docker.sock:/var/run/docker.sock:rw"
|
||||
"${DockerStorage}/repos:/etc/komodo/repos:rw"
|
||||
"${DockerStorage}/ssl:/etc/komodo/ssl:rw"
|
||||
"${DockerStorage}/stacks:${DockerStorage}/stacks:rw"
|
||||
];
|
||||
ports = [
|
||||
"8120:8120/tcp"
|
||||
];
|
||||
labels = {
|
||||
"komodo.skip" = "";
|
||||
};
|
||||
log-driver = "local";
|
||||
extraOptions = [
|
||||
"--network-alias=periphery"
|
||||
"--network=komodo_default"
|
||||
"--pull=always"
|
||||
];
|
||||
};
|
||||
|
||||
systemd.services."docker-komodo-periphery" = {
|
||||
serviceConfig = {
|
||||
Restart = lib.mkOverride 90 "always";
|
||||
RestartMaxDelaySec = lib.mkOverride 90 "1m";
|
||||
RestartSec = lib.mkOverride 90 "100ms";
|
||||
RestartSteps = lib.mkOverride 90 9;
|
||||
};
|
||||
after = [
|
||||
"docker-network-komodo_default.service"
|
||||
# "docker-volume-komodo_repos.service"
|
||||
# "docker-volume-komodo_ssl-certs.service"
|
||||
# "docker-volume-komodo_stacks.service"
|
||||
];
|
||||
requires = [
|
||||
"docker-network-komodo_default.service"
|
||||
# "docker-volume-komodo_repos.service"
|
||||
# "docker-volume-komodo_ssl-certs.service"
|
||||
# "docker-volume-komodo_stacks.service"
|
||||
];
|
||||
partOf = [
|
||||
"docker-compose-komodo-root.target"
|
||||
];
|
||||
wantedBy = [
|
||||
"docker-compose-komodo-root.target"
|
||||
];
|
||||
};
|
||||
|
||||
# Networks
|
||||
systemd.services."docker-network-komodo_default" = {
|
||||
path = [ pkgs.docker ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
ExecStop = "docker network rm -f komodo_default";
|
||||
};
|
||||
script = ''
|
||||
docker network inspect komodo_default || docker network create komodo_default
|
||||
'';
|
||||
partOf = [ "docker-compose-komodo-root.target" ];
|
||||
wantedBy = [ "docker-compose-komodo-root.target" ];
|
||||
};
|
||||
|
||||
# Root service
|
||||
# When started, this will automatically create all resources and start
|
||||
# the containers. When stopped, this will teardown all resources.
|
||||
systemd.targets."docker-compose-komodo-root" = {
|
||||
unitConfig = {
|
||||
Description = "Root target generated by compose2nix.";
|
||||
};
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
};
|
||||
}
|
69
hosts/nixos/sock/default.nix
Normal file
69
hosts/nixos/sock/default.nix
Normal file
|
@ -0,0 +1,69 @@
|
|||
###############################################################
|
||||
#
|
||||
# Sock - LXC Container
|
||||
# NixOS container, Intel N150 (4 Cores), 8GB/2GB RAM/SWAP
|
||||
#
|
||||
# Docker Environment, Managed by with Komodo
|
||||
#
|
||||
###############################################################
|
||||
|
||||
{
|
||||
lib,
|
||||
config,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
username = "toph";
|
||||
user = config.secretsSpec.users.${username};
|
||||
firewall = config.secretsSpec.firewall.sock;
|
||||
in
|
||||
{
|
||||
imports = lib.flatten [
|
||||
## Sock Only ##
|
||||
./config
|
||||
|
||||
## Hardware ##
|
||||
./hardware.nix
|
||||
|
||||
(map lib.custom.relativeToRoot [
|
||||
## Required Configs ##
|
||||
"hosts/global/core"
|
||||
|
||||
## Optional Configs ##
|
||||
"hosts/global/common/acme"
|
||||
"hosts/global/common/docker.nix"
|
||||
])
|
||||
];
|
||||
|
||||
## Host Specifications ##
|
||||
hostSpec = {
|
||||
hostName = "sock";
|
||||
username = username;
|
||||
hashedPassword = user.hashedPassword;
|
||||
email = user.email;
|
||||
handle = user.handle;
|
||||
userFullName = user.fullName;
|
||||
isServer = true;
|
||||
isMinimal = true;
|
||||
};
|
||||
|
||||
networking = {
|
||||
enableIPv6 = false;
|
||||
firewall = {
|
||||
allowedTCPPorts = firewall.allowedTCPPorts;
|
||||
allowedTCPPortRanges = firewall.allowedTCPPortRanges;
|
||||
allowedUDPPorts = firewall.allowedUDPPorts;
|
||||
};
|
||||
};
|
||||
|
||||
## System-wide packages ##
|
||||
programs.nix-ld.enable = true;
|
||||
environment.systemPackages = with pkgs; [
|
||||
lazydocker
|
||||
compose2nix
|
||||
];
|
||||
|
||||
# https://wiki.nixos.org/wiki/FAQ/When_do_I_update_stateVersion
|
||||
system.stateVersion = "25.05";
|
||||
}
|
21
hosts/nixos/sock/hardware.nix
Normal file
21
hosts/nixos/sock/hardware.nix
Normal file
|
@ -0,0 +1,21 @@
|
|||
{
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
let
|
||||
username = config.hostSpec.username;
|
||||
in
|
||||
{
|
||||
imports = lib.flatten [
|
||||
(map lib.custom.relativeToRoot [
|
||||
"hosts/global/common/system/lxc.nix"
|
||||
"hosts/global/common/system/pool.nix"
|
||||
])
|
||||
];
|
||||
|
||||
# Ochre has no access to PVE DockerStorage, so sock will have its own storage
|
||||
systemd.user.tmpfiles.rules = [
|
||||
"d /OchreStorage 2775 ${username} ryot -"
|
||||
];
|
||||
}
|
BIN
secrets.nix
BIN
secrets.nix
Binary file not shown.
Loading…
Add table
Reference in a new issue