Compare commits

...

7 commits

Author SHA1 Message Date
955b61c5a9 Adds snapraid input & database ports
- Adds configuration for snapraid-aio from upstream repository
- Updates allowed ports with entries for MySQL and PostgreSQL
2025-04-29 16:08:15 -04:00
5fb3d590c6 Refactors Komodo host configs
• Moves Caddy configuration from optional to dedicated host config file
• Relocates container configs for authentik and Komodo under new host config path
• Updates default host config to import the new structure
• Bumps system stateVersion to 25.05
2025-04-29 15:53:21 -04:00
2942d4bf9a ACME dns propagation fix 2025-04-29 15:51:35 -04:00
ce6c7db198 Refactor SSH known_hosts handling to use a _source, allowing local changes when needed 2025-04-29 11:16:25 -04:00
0416d8c3c1 MergerFS NFS git permissions fix, and acl disable 2025-04-29 11:15:50 -04:00
72ce184bd4 Refactor and reorganize cloud and proxy configs
- Introduces new modules for cloud, backup, and NFS services
- Removes deprecated Caddy and cloudflared configs
- Migrate /pool from SSHFS to NFS
- Migrate filerun and SnapRAID configurations to cloud only for better modularity
2025-04-29 11:14:59 -04:00
f63f4f737c Replace yay pkg with yay.nix flake 2025-04-28 18:20:56 -04:00
34 changed files with 728 additions and 588 deletions

3
.gitignore vendored
View file

@ -1,5 +1,4 @@
nixos/modules/nextcloud/nextcloud-admin-pass
.BAK/
.chat
.logs
*.bak

View file

@ -63,11 +63,22 @@
# inputs.nixpkgs.follows = "nixpkgs";
# };
snapraid-aio = {
# url = "git+https://git.ryot.foo/toph/snapraid-aio.nix.git";
url = "git+https://git.ryot.foo/toph/snapraid-aio.nix.git";
inputs.nixpkgs.follows = "nixpkgs";
};
vscode-server = {
url = "github:nix-community/nixos-vscode-server";
inputs.nixpkgs.follows = "nixpkgs-unstable";
};
yay = {
url = "git+https://git.ryot.foo/toph/yay.nix.git";
inputs.nixpkgs.follows = "nixpkgs";
};
zen-browser = {
url = "github:youwen5/zen-browser-flake";
inputs.nixpkgs.follows = "nixpkgs-unstable";

View file

@ -0,0 +1,10 @@
{
pkgs,
...
}:
{
imports = [
## Required Configs ##
../common/core # required
];
}

View file

@ -43,7 +43,13 @@ in
'';
};
".ssh/known_hosts".text = lib.concatStringsSep "\n" secretsSpec.ssh.knownHosts;
".ssh/known_hosts_source" = {
source = pkgs.writeText "known-hosts" (lib.concatStringsSep "\n" secretsSpec.ssh.knownHosts);
onChange = ''
cp $HOME/.ssh/known_hosts_source $HOME/.ssh/known_hosts
chmod 644 $HOME/.ssh/known_hosts
'';
};
}
# Dynamically add all SSH private keys using the existing store paths
# Ensures the keys have correct permissions and are not symlinks

View file

@ -1,24 +1,16 @@
{
pkgs,
config,
...
}:
{
imports = [
## Required Configs ##
../common/core # required
## Host-specific Optional Configs ##
];
# Useful for this host
home.file = {
Pool.source = config.lib.file.mkOutOfStoreSymlink "/pool";
DockerStorage.source = config.lib.file.mkOutOfStoreSymlink "/mnt/DockerStorage";
};
## Packages with no needed configs ##
# home.packages = builtins.attrValues {
# inherit (pkgs)
# ;
# };
}

View file

@ -7,6 +7,9 @@
pkgs,
...
}:
let
yay = inputs.yay.packages.${pkgs.system}.default;
in
{
imports = lib.flatten [
inputs.home-manager.nixosModules.home-manager
@ -29,7 +32,7 @@
ranger
sshfs
wget
yay # my yay @ pkgs teehee
yay # my yay teehee
];
# Enable CUPS to print documents.

View file

@ -4,22 +4,27 @@
...
}:
let
cloudflare = pkgs.writeTextFile {
name = "cloudflare.ini";
text = ''
CF_DNS_API_TOKEN=${config.secretsSpec.api.cloudflare}
'';
};
# Create a VERY simple environment file with absolutely minimal formatting
cloudflareEnvFile = pkgs.writeText "cloudflare.env" ''
CLOUDFLARE_DNS_API_TOKEN=${config.secretsSpec.api.cloudflare}
'';
in
{
environment.systemPackages = [ pkgs.lego ];
# letsencrypt
security.acme = {
acceptTerms = true;
defaults = {
email = "chris@toph.cc";
dnsProvider = "cloudflare";
environmentFile = cloudflare;
dnsProvider = "cloudflare"; # Use Cloudflare's DNS
environmentFile = cloudflareEnvFile;
enableDebugLogs = true;
extraLegoFlags = [
"--dns.resolvers=1.1.1.1:53,8.8.8.8:53"
"--dns.propagation-wait=60s" # Wait for 60 seconds for DNS propagation
"--dns-timeout=60"
"--http-timeout=60"
];
};
certs = {
"goldenlemon.cc" = {

View file

@ -1,16 +0,0 @@
{
services.caddy.virtualHosts = {
"drive.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy http://localhost:8181 {
header_up Host {host}
# header_up X-Forwarded-For {remote}
# header_up X-Forwarded-Proto {scheme}
# header_up X-Forwarded-Protocol {scheme}
# header_up X-Forwarded-Port {server_port}
}
'';
};
};
}

View file

@ -1,10 +0,0 @@
{ config, ... }:
{
imports = [
"./${config.hostSpec.hostName}.nix"
];
services.caddy = {
enable = true;
};
}

View file

@ -1,100 +0,0 @@
{
services.caddy.virtualHosts = {
# "ryot.foo" = {
# useACMEHost = "ryot.foo";
# extraConfig = ''
# reverse_proxy 104.40.3.44:80
# '';
# };
"auth.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:9000 {
header_up Host {host}
header_up X-Forwarded-For {remote}
header_up X-Forwarded-Proto {scheme}
header_up X-Forwarded-Protocol {scheme}
header_up X-Forwarded-Port {server_port}
}
'';
};
"frp.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:4041
'';
};
"grafana.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:3001
'';
};
"git.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:3003
'';
};
"influx.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:8086
'';
};
"home.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:7475
'';
};
"komodo.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:9120
'';
};
"mail.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:9002
'';
};
"map.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:25566
'';
};
"outline.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:3480
'';
};
"plane.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:3000
'';
};
"upsnap.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:8090
'';
};
};
}

View file

@ -1,10 +0,0 @@
{
services.caddy.virtualHosts = {
"cloudflared.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:14333
'';
};
};
}

View file

@ -2,12 +2,8 @@
let
username = config.hostSpec.username;
homeDir = config.hostSpec.home;
pve-key = config.secretsSpec.ssh.privateKeys.pve;
in
{
# For less permission issues with SSHFS
programs.fuse.userAllowOther = true;
# Create the directories if they do not exist
systemd.tmpfiles.rules = [
"d /pool 2775 ${username} ryot -"
@ -17,14 +13,16 @@ in
# File system configuration
fileSystems = {
"/pool" = {
device = "${username}@cloud:/pool";
fsType = "sshfs";
device = "cloud:/";
fsType = "nfs";
options = [
"defaults"
"reconnect"
"_netdev"
"allow_other"
"identityfile=${pve-key}"
"defaults"
"nfsvers=4.2"
"noacl"
"noatime"
"nofail"
"sec=sys"
];
};
@ -37,4 +35,16 @@ in
];
};
};
# Ensure NFS client support is complete
boot.supportedFilesystems = [ "nfs" ];
# services.rpcbind.enable = true;
# Optional: Configure ID mapping if needed
services.nfs.idmapd.settings = {
General = {
Domain = "local"; # Must match on server and client
Verbosity = 0;
};
};
}

View file

@ -0,0 +1,140 @@
{
config,
lib,
pkgs,
...
}:
let
# Borg backup destinations
dockerStorageRepo = "/pool/Backups/DockerStorage";
forgejoRepo = "/pool/Backups/forgejo";
# Common borg backup settings
borgCommonSettings = ''
# Don't use cache to avoid issues with concurrent backups
export BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK=yes
# Set this for non-interactive use
export BORG_NON_INTERACTIVE=yes
'';
# Initialize a repo if it doesn't exist
initRepo = repo: ''
if [ ! -d "${repo}" ]; then
mkdir -p "${repo}"
${pkgs.borgbackup}/bin/borg init --encryption=none "${repo}"
fi
'';
in
{
# Make sure borg is installed
environment.systemPackages = [ pkgs.borgbackup ];
# Docker Storage Backup Service
systemd.services.backup-docker-storage = {
description = "Backup Docker storage directory with Borg";
path = with pkgs; [
borgbackup
coreutils
];
script = ''
${borgCommonSettings}
# Initialize repository if needed
${initRepo dockerStorageRepo}
# Create backup
${pkgs.borgbackup}/bin/borg create \
--stats \
--compression zstd,15 \
--exclude '*.tmp' \
--exclude '*/tmp/*' \
${dockerStorageRepo}::docker-{now:%Y-%m-%d_%H%M%S} \
/mnt/drive1/DockerStorage
# Prune old backups
${pkgs.borgbackup}/bin/borg prune \
--keep-daily 7 \
--keep-weekly 4 \
--keep-monthly 3 \
${dockerStorageRepo}
'';
serviceConfig = {
Type = "oneshot";
IOSchedulingClass = "idle";
CPUSchedulingPolicy = "idle";
Nice = 19;
};
};
# Docker Storage Backup Timer (Weekly on Monday at 4am)
systemd.timers.backup-docker-storage = {
description = "Timer for Docker Storage Backup";
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = "Mon *-*-* 04:00:00";
Persistent = true; # Run backup if system was off during scheduled time
RandomizedDelaySec = "5min"; # Add randomized delay
};
};
# Forgejo Backup Service
systemd.services.backup-forgejo = {
description = "Backup Forgejo directory with Borg";
path = with pkgs; [
borgbackup
coreutils
];
script = ''
${borgCommonSettings}
# Initialize repository if needed
${initRepo forgejoRepo}
# Create backup
${pkgs.borgbackup}/bin/borg create \
--stats \
--compression zstd,15 \
--exclude '*.tmp' \
--exclude '*/tmp/*' \
${forgejoRepo}::forgejo-{now:%Y-%m-%d_%H%M%S} \
/pool/forgejo
# Prune old backups
${pkgs.borgbackup}/bin/borg prune \
--keep-daily 14 \
--keep-weekly 4 \
--keep-monthly 3 \
${forgejoRepo}
'';
serviceConfig = {
Type = "oneshot";
IOSchedulingClass = "idle";
CPUSchedulingPolicy = "idle";
Nice = 19;
};
};
# Forgejo Backup Timer (Every 2 days at 4am)
systemd.timers.backup-forgejo = {
description = "Timer for Forgejo Backup";
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = "*-*-1/2 04:00:00"; # Every 2 days at 4am
Persistent = true;
RandomizedDelaySec = "5min";
};
};
}

View file

@ -0,0 +1,19 @@
{
services.caddy = {
enable = true;
virtualHosts = {
"drive.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy http://localhost:8282 {
header_up Host {host}
# header_up X-Forwarded-For {remote}
# header_up X-Forwarded-Proto {scheme}
# header_up X-Forwarded-Protocol {scheme}
# header_up X-Forwarded-Port {server_port}
}
'';
};
};
};
}

View file

@ -0,0 +1,4 @@
{ lib, ... }:
{
imports = lib.custom.scanPaths ./.;
}

View file

@ -0,0 +1,38 @@
name: filerun
services:
db:
image: mariadb:10.11
user: 1001:1004
environment:
MYSQL_ROOT_PASSWORD: ${DB_ROOT_PASS
MYSQL_USER: ${DB_USER}
MYSQL_PASSWORD: ${DB_PASS}
MYSQL_DATABASE: ${DB_NAME}
restart: unless-stopped
volumes:
- /pool/filerun/db:/var/lib/mysql
web:
image: filerun/filerun:8.1
user: root
environment:
FR_DB_HOST: db
FR_DB_PORT: ${DB_PORT}
FR_DB_NAME: ${DB_NAME}
FR_DB_USER: ${DB_USER}
FR_DB_PASS: ${DB_PASS}
APACHE_RUN_USER:
APACHE_RUN_USER_ID: 1001
APACHE_RUN_GROUP:
APACHE_RUN_GROUP_ID: 1004
depends_on:
- db
links:
- db:db
ports:
- "8181:80"
restart: unless-stopped
volumes:
- /pool/filerun/html:/var/www/html
- /pool/filerun/user-files:/user-files
- /pool/:/pool

View file

@ -0,0 +1,119 @@
# Auto-generated using compose2nix v0.3.1.
{
config,
lib,
pkgs,
...
}:
let
env = config.secretsSpec.docker.filerun;
in
{
# Runtime
virtualisation.docker = {
enable = true;
autoPrune.enable = true;
};
virtualisation.oci-containers.backend = "docker";
# Containers
virtualisation.oci-containers.containers."filerun-db" = {
image = "mariadb:10.11";
environment = env;
volumes = [
"/pool/filerun/db:/var/lib/mysql:rw"
];
user = "1001:1004";
log-driver = "journald";
extraOptions = [
"--network-alias=db"
"--network=filerun_default"
];
};
systemd.services."docker-filerun-db" = {
serviceConfig = {
Restart = lib.mkOverride 90 "always";
RestartMaxDelaySec = lib.mkOverride 90 "1m";
RestartSec = lib.mkOverride 90 "100ms";
RestartSteps = lib.mkOverride 90 9;
};
after = [
"docker-network-filerun_default.service"
];
requires = [
"docker-network-filerun_default.service"
];
partOf = [
"docker-compose-filerun-root.target"
];
wantedBy = [
"docker-compose-filerun-root.target"
];
};
virtualisation.oci-containers.containers."filerun-web" = {
image = "filerun/filerun:8.1";
environment = env;
volumes = [
"/pool/:/pool:rw"
"/pool/filerun/html:/var/www/html:rw"
"/pool/filerun/user-files:/user-files:rw"
];
ports = [
"8282:80/tcp"
];
dependsOn = [
"filerun-db"
];
user = "root";
log-driver = "journald";
extraOptions = [
"--network-alias=web"
"--network=filerun_default"
];
};
systemd.services."docker-filerun-web" = {
serviceConfig = {
Restart = lib.mkOverride 90 "always";
RestartMaxDelaySec = lib.mkOverride 90 "1m";
RestartSec = lib.mkOverride 90 "100ms";
RestartSteps = lib.mkOverride 90 9;
};
after = [
"docker-network-filerun_default.service"
];
requires = [
"docker-network-filerun_default.service"
];
partOf = [
"docker-compose-filerun-root.target"
];
wantedBy = [
"docker-compose-filerun-root.target"
];
};
# Networks
systemd.services."docker-network-filerun_default" = {
path = [ pkgs.docker ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
ExecStop = "docker network rm -f filerun_default";
};
script = ''
docker network inspect filerun_default || docker network create filerun_default
'';
partOf = [ "docker-compose-filerun-root.target" ];
wantedBy = [ "docker-compose-filerun-root.target" ];
};
# Root service
# When started, this will automatically create all resources and start
# the containers. When stopped, this will teardown all resources.
systemd.targets."docker-compose-filerun-root" = {
unitConfig = {
Description = "Root target generated by compose2nix.";
};
wantedBy = [ "multi-user.target" ];
};
}

View file

@ -0,0 +1,23 @@
{ config, lib, ... }:
{
# Install and configure NFS server
services.nfs.server = {
enable = true;
exports = ''
# Pool export - seen as root '/' by the client
/pool *(rw,insecure,no_subtree_check,no_root_squash,fsid=0,anonuid=1000,anongid=1004)
'';
extraNfsdConfig = "vers=4,4.1,4.2";
};
# Ensure NFS client support is complete
# services.rpcbind.enable = true;
services.nfs.idmapd.settings = {
General = {
Domain = "local";
Verbosity = 0;
};
};
}

View file

@ -0,0 +1,122 @@
{
pkgs,
inputs,
config,
...
}:
let
apprise-url = config.secretsSpec.api.apprise-url;
snapraid-aio = inputs.snapraid-aio.nixosModules.default;
snapraid-aio-config = pkgs.writeTextFile {
name = "snapraid-aio.config";
text = ''
CONFIG_VERSION="3.4"
CHECK_UPDATES=1
# Notification settings
APPRISE=0
APPRISE_URL=""
APPRISE_ATTACH=1
APPRISE_BIN="${pkgs.apprise}/bin/apprise"
APPRISE_EMAIL=1
APPRISE_EMAIL_URL="${apprise-url}"
TELEGRAM=0
DISCORD=0
# Thresholds for sync operations
DEL_THRESHOLD=500
UP_THRESHOLD=500
IGNORE_PATTERN=""
ADD_DEL_THRESHOLD=0
SYNC_WARN_THRESHOLD=0
# Scrub settings
SCRUB_PERCENT=5
SCRUB_AGE=10
SCRUB_NEW=1
SCRUB_DELAYED_RUN=0
# Performance and behavior settings
PREHASH=1
FORCE_ZERO=0
SPINDOWN=0
VERBOSITY=1
RETENTION_DAYS=30
# Logging settings
SNAPRAID_LOG_DIR="/var/log/snapraid"
SMART_LOG=1
SMART_LOG_NOTIFY=0
SNAP_STATUS=1
SNAP_STATUS_NOTIFY=1
# Critical paths
SNAPRAID_CONF="/etc/snapraid.conf"
SNAPRAID_BIN="${pkgs.snapraid}/bin/snapraid"
# Email settings (optional - uncomment and configure if needed)
# EMAIL_ADDRESS="your-email@example.com"
# FROM_EMAIL_ADDRESS="snapraid@your-server.com"
# Advanced settings - typically no need to modify
CHK_FAIL=0
DO_SYNC=1
EMAIL_SUBJECT_PREFIX="(SnapRAID on $(hostname))"
SERVICES_STOPPED=0
SYNC_WARN_FILE="/var/lib/snapraid-aio/snapRAID.warnCount"
SCRUB_COUNT_FILE="/var/lib/snapraid-aio/snapRAID.scrubCount"
TMP_OUTPUT="/var/lib/snapraid-aio/snapRAID.out"
SNAPRAID_LOG="/var/log/snapraid/snapraid.log"
'';
};
snapraid-conf = pkgs.writeTextFile {
name = "snapraid.conf";
text = ''
## /etc/snapraid.conf ##
# Defines the file to use as parity storage
parity /mnt/parity/snapraid.parity
# Defines the files to use as content list
content /var/snapraid.content
content /mnt/drive1/snapraid.content
content /mnt/drive2/snapraid.content
content /mnt/drive3/snapraid.content
content /mnt/parity/snapraid.content
# Defines the data disks to use
data d1 /mnt/drive1/
data d2 /mnt/drive2/
data d3 /mnt/drive3/
# Defines files and directories to exclude
exclude *.unrecoverable
exclude /tmp/
exclude /lost+found/
'';
};
in
{
imports = [
inputs.snapraid-aio.nixosModules.default
];
# Make sure the SnapRAID config exists
environment.etc."snapraid.conf".source = snapraid-conf;
# Create required directories
systemd.tmpfiles.rules = [
"d /var/lib/snapraid-aio 0755 root root -"
"d /var/log/snapraid 0755 root root -"
];
# Set up snapraid-aio service
services.snapraid-aio = {
enable = true;
configFile = snapraid-aio-config;
schedule = "*-*-* 03:00:00"; # Run daily at 3am
};
}

View file

@ -1,12 +1,11 @@
###############################################################
#
# Prozy - LXC Container
# NixOS container, Ryzen 5 5600G (3 Cores), 2GB/2GB RAM/SWAP
# Cloud - LXC Container
# NixOS container, Ryzen 5 5600G (4th Cores), 4GB/4GB RAM/SWAP
#
###############################################################
{
inputs,
lib,
config,
pkgs,
@ -19,6 +18,10 @@ let
in
{
imports = lib.flatten [
## Cloud Only ##
./config
## Hardware ##
./hardware.nix
@ -28,12 +31,10 @@ in
## Optional Configs ##
"hosts/common/optional/acme"
"hosts/common/optional/caddy"
"hosts/common/optional/docker.nix"
"hosts/common/optional/containers/cloudflared.nix"
## Cloud Specific ##
"hosts/users/${username}" # # Not the best solution but I always have one user so ¯\_(ツ)_/¯
## Host user ##
"hosts/users/${username}" # Not the best solution but I always have one user so ¯\_(ツ)_/¯
])
];
@ -50,21 +51,21 @@ in
networking = {
enableIPv6 = false;
firewall.allowedTCPPorts = firewall.allowedTCPPorts;
firewall = {
allowedTCPPorts = firewall.allowedTCPPorts;
allowedUDPPorts = firewall.allowedUDPPorts;
};
};
## System-wide packages ##
programs.nix-ld.enable = true;
environment.systemPackages = with pkgs; [
apprise
lazydocker
mergerfs
snapraid
];
environment.etc = {
"cloudflared/.keep" = {
text = "This directory is used to store cloudflared configuration files.";
};
};
# https://wiki.nixos.org/wiki/FAQ/When_do_I_update_stateVersion
system.stateVersion = "24.11";
system.stateVersion = "25.05";
}

View file

@ -1,12 +1,48 @@
{
lib,
config,
...
}:
let
username = config.hostSpec.username;
homeDir = config.hostSpec.home;
in
{
imports = lib.flatten [
(map lib.custom.relativeToRoot [
"hosts/common/optional/system/lxc.nix"
"hosts/common/optional/system/pool.nix"
])
];
# Less permission issues with pool
programs.fuse.userAllowOther = true;
# File system co
# INFO: Cloud is the pool provider
fileSystems = {
"/pool" = {
fsType = "fuse.mergerfs";
device = "/mnt/data*";
options = [
"cache.files=auto-full"
"defaults"
"allow_other"
"minfreespace=50G"
"fsname=mergerfs"
"category.create=mfs"
"nfsopenhack=all"
"nonempty"
"uid=1000"
"gid=1004" # Ryot group
];
};
"${homeDir}/git" = {
fsType = "none";
device = "/pool/git";
options = [
"bind"
"nofail"
];
};
};
}

View file

@ -1,6 +1,10 @@
# Auto-generated using compose2nix v0.3.1.
{ pkgs, lib, ... }:
{
pkgs,
lib,
config,
...
}:
let
# Only available in the Komodo LXC
DockerStorage = "/mnt/DockerStorage/komodo/stacks/authentik";

View file

@ -0,0 +1,103 @@
{
services.caddy = {
enable = true;
virtualHosts = {
# "ryot.foo" = {
# useACMEHost = "ryot.foo";
# extraConfig = ''
# reverse_proxy 104.40.3.44:80
# '';
# };
"auth.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:9000 {
header_up Host {host}
header_up X-Forwarded-For {remote}
header_up X-Forwarded-Proto {scheme}
header_up X-Forwarded-Protocol {scheme}
header_up X-Forwarded-Port {server_port}
}
'';
};
"frp.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:4041
'';
};
"grafana.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:3001
'';
};
"git.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:3003
'';
};
"influx.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:8086
'';
};
"home.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:7475
'';
};
"komodo.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:9120
'';
};
"mail.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:9002
'';
};
"map.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:25566
'';
};
"outline.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:3480
'';
};
"plane.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:3000
'';
};
"upsnap.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:8090
'';
};
};
};
}

View file

@ -0,0 +1,4 @@
{ lib, ... }:
{
imports = lib.custom.scanPaths ./.;
}

View file

@ -1,8 +1,8 @@
# Auto-generated using compose2nix v0.3.1.
{
pkgs,
config,
lib,
admin,
pkgs,
...
}:
let

View file

@ -6,7 +6,6 @@
###############################################################
{
inputs,
lib,
config,
pkgs,
@ -19,6 +18,9 @@ let
in
{
imports = lib.flatten [
## Komodo Only ##
./config
## Hardware ##
./hardware.nix
@ -28,12 +30,9 @@ in
## Optional Configs ##
"hosts/common/optional/acme"
"hosts/common/optional/caddy"
"hosts/common/optional/docker.nix"
"hosts/common/containers/authentik"
"hosts/common/containers/komodo"
## Komodo Specific ##
## Host User ##
"hosts/users/${username}" # # Not the best solution but I always have one user so ¯\_(ツ)_/¯
])
];
@ -66,5 +65,5 @@ in
];
# https://wiki.nixos.org/wiki/FAQ/When_do_I_update_stateVersion
system.stateVersion = "24.11";
system.stateVersion = "25.05";
}

View file

@ -0,0 +1,13 @@
{
services.caddy = {
enable = true;
virtualHosts = {
"cloudflared.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:14333
'';
};
};
};
}

View file

@ -0,0 +1,4 @@
{ lib, ... }:
{
imports = lib.custom.scanPaths ./.;
}

View file

@ -1,6 +1,6 @@
###############################################################
#
# Prozy - LXC Container
# Proxy - LXC Container
# NixOS container, Ryzen 5 5600G (3 Cores), 2GB/2GB RAM/SWAP
#
###############################################################
@ -19,6 +19,9 @@ let
in
{
imports = lib.flatten [
## Proxy Only ##
./config
## Hardware ##
./hardware.nix
@ -28,11 +31,9 @@ in
## Optional Configs ##
"hosts/common/optional/acme"
"hosts/common/optional/caddy"
"hosts/common/optional/docker.nix"
"hosts/common/optional/containers/cloudflared.nix"
## Proxy Specific ##
## Proxy User ##
"hosts/users/${username}" # # Not the best solution but I always have one user so ¯\_(ツ)_/¯
])
];

View file

@ -1,390 +0,0 @@
# I just missed writing yay in terminal
{ pkgs, lib, ... }:
let
mainScript = ''
#!${lib.getExe pkgs.fish}
# Helper functions for colored output
function __yay_red
printf "\033[31m[!] %s\033[0m\n" $argv[1]
end
function __yay_green
printf "\033[32m[+] %s\033[0m\n" $argv[1]
end
function __yay_yellow
printf "\033[33m[*] %s\033[0m\n" $argv[1]
end
function __yay_blue
printf "\033[34m[i] %s\033[0m\n" $argv[1]
end
# Function to determine the flake path
function __yay_get_flake_path
set -l path_arg $argv[1]
set -l flake_path ""
# Priority: 1. Path arg, 2. FLAKE env var, 3. Current directory
if test -n "$path_arg"
# redirect diagnostics to stderr so only the path comes out on stdout
__yay_yellow "Using flake path from argument: $path_arg" >&2
set flake_path $path_arg
else if set -q FLAKE
__yay_yellow "Using flake path from FLAKE env var: $FLAKE" >&2
set flake_path $FLAKE
else
set flake_path (pwd)
__yay_yellow "Using current directory as flake path: $flake_path" >&2
end
# Verify the flake path has a flake.nix
if not test -f "$flake_path/flake.nix"
__yay_red "No flake.nix found in $flake_path" >&2
return 1
end
# emit only the path on stdout
echo $flake_path
end
# Function to clean home manager backups
function __yay_clean_hm_backups
__yay_yellow "««« CLEARING HOME-MANAGER BACKUPS »»»"
set total_files (find ~/.config -type f -name "*.homeManagerBackupFileExtension" | wc -l)
if test $total_files -eq 0
__yay_green "No home manager backup files found"
return 0
end
set counter 0
find ~/.config -type f -name "*.homeManagerBackupFileExtension" | while read -l file
set counter (math $counter + 1)
echo -n (printf "\rDeleting file %d of %d" $counter $total_files)
rm $file
end
echo # new line after progress
__yay_green "Removed $total_files home manager backup files"
end
# Command: rebuild
function __yay_rebuild
set -l options h/help 'p/path=' 'H/host=' t/trace
argparse $options -- $argv
or return 1
if set -ql _flag_help
echo "Usage: yay rebuild [OPTIONS]"
echo "Options:"
echo " -p, --path PATH Path to the Nix configuration (overrides FLAKE env var)"
echo " -H, --host HOST Hostname to build for (default: current hostname)"
echo " -t, --trace Enable trace output"
echo " -h, --help Show this help message"
return 0
end
# Get the flake path
set -l flake_path (__yay_get_flake_path $_flag_path)
if test $status -ne 0
return 1
end
# Determine hostname
set -l host
if set -ql _flag_host
set host $_flag_host
else
set host (hostname)
end
# Clean home manager backups first
__yay_clean_hm_backups
# Run the rebuild
__yay_green "««« REBUILDING SYSTEM »»»"
__yay_green "Building configuration for host: $host"
__yay_green "Using flake at: $flake_path"
# Set the repo path for nh
set -x REPO_PATH $flake_path
# Change to the flake directory
set -l original_dir (pwd)
cd $flake_path
# Execute nh os switch
if set -ql _flag_trace
nh os switch . -- --impure --show-trace
else
nh os switch . -- --impure
end
set -l result $status
# Return to original directory
cd $original_dir
if test $result -eq 0
__yay_green "System rebuild completed successfully!"
else
__yay_red "System rebuild failed with exit code $result"
end
return $result
end
# Command: update
function __yay_update
set -l options h/help 'p/path='
argparse $options -- $argv
or return 1
if set -ql _flag_help
echo "Usage: yay update [OPTIONS]"
echo "Options:"
echo " -p, --path PATH Path to the Nix configuration (overrides FLAKE env var)"
echo " -h, --help Show this help message"
return 0
end
# Get the flake path
set -l flake_path (__yay_get_flake_path $_flag_path)
if test $status -ne 0
return 1
end
__yay_green "««« UPDATING FLAKE INPUTS »»»"
__yay_green "Using flake at: $flake_path"
# Change to the flake directory
set -l original_dir (pwd)
cd $flake_path
# Update the flake inputs
nix flake update
set -l result $status
# Return to original directory
cd $original_dir
if test $result -eq 0
__yay_green "Flake inputs updated successfully!"
else
__yay_red "Failed to update flake inputs with exit code $result"
end
return $result
end
# Command: garbage
function __yay_garbage
set -l options h/help
argparse $options -- $argv
or return 1
if set -ql _flag_help
echo "Usage: yay garbage"
echo "Clears all possible garbage from the Nix store"
echo "Options:"
echo " -h, --help Show this help message"
return 0
end
# ask for sudo once up-front
__yay_yellow "Requesting sudo credentials"
sudo -v
__yay_green "««« CLEANING NIX GARBAGE »»»"
__yay_yellow "Running: sudo nh clean all"
sudo nh clean all
__yay_yellow "Running: nh clean all"
nh clean all
__yay_yellow "Running: sudo nix-collect-garbage --delete-old"
sudo nix-collect-garbage --delete-old
__yay_yellow "Running: nix-collect-garbage --delete-old"
nix-collect-garbage --delete-old
__yay_yellow "Running: sudo nix-store --gc"
sudo nix-store --gc
__yay_yellow "Running: nix-store --gc"
nix-store --gc
__yay_green "Garbage collection completed successfully!"
return 0
end
# Command: try
function __yay_try
set -l options h/help
argparse $options -- $argv
or return 1
if set -ql _flag_help || test (count $argv) -eq 0
echo "Usage: yay try PACKAGE [PACKAGE...]"
echo "Creates a shell with the specified package(s)"
echo "Options:"
echo " -h, --help Show this help message"
return 0
end
__yay_green "««« CREATING NIX SHELL »»»"
__yay_yellow "Loading packages: $argv"
# Run nix-shell with the provided packages and launch fish as the interactive shell
nix-shell -p $argv --command fish
return $status
end
# Show help
function __yay_help
echo "Usage: yay COMMAND [OPTIONS]"
echo ""
echo "A wrapper around Nix commands"
echo ""
echo "Commands:"
echo " rebuild Rebuild the NixOS configuration"
echo " update Update flake inputs"
echo " garbage Clean up the Nix store"
echo " try Create a shell with the specified package(s)"
echo " help Show this help message"
echo ""
echo "Run 'yay COMMAND --help' for command-specific help"
end
# Main script entry point
if test (count $argv) -eq 0
__yay_help
exit 1
end
set -l cmd $argv[1]
set -l cmd_args $argv[2..-1]
switch $cmd
case rebuild
__yay_rebuild $cmd_args
case update
__yay_update $cmd_args
case garbage
__yay_garbage $cmd_args
case try
__yay_try $cmd_args
case -h --help help
__yay_help
case '*'
__yay_red "Unknown command: $cmd"
__yay_help
exit 1
end
'';
completionsScript = ''
# Complete the main command
complete -c yay -f
# Complete the top-level subcommands
complete -c yay -n "__fish_use_subcommand" -a rebuild -d "Rebuild the NixOS configuration"
complete -c yay -n "__fish_use_subcommand" -a update -d "Update flake inputs"
complete -c yay -n "__fish_use_subcommand" -a garbage -d "Clean up the Nix store"
complete -c yay -n "__fish_use_subcommand" -a try -d "Create a shell with the specified package(s)"
complete -c yay -n "__fish_use_subcommand" -a help -d "Show help message"
# Options for 'rebuild'
complete -c yay -n "__fish_seen_subcommand_from rebuild" -s p -l path -r -d "Path to the Nix configuration"
complete -c yay -n "__fish_seen_subcommand_from rebuild" -s H -l host -r -d "Hostname to build for"
complete -c yay -n "__fish_seen_subcommand_from rebuild" -s t -l trace -d "Enable trace output"
complete -c yay -n "__fish_seen_subcommand_from rebuild" -s h -l help -d "Show help message"
# Options for 'update'
complete -c yay -n "__fish_seen_subcommand_from update" -s p -l path -r -d "Path to the Nix configuration"
complete -c yay -n "__fish_seen_subcommand_from update" -s h -l help -d "Show help message"
# Options for 'garbage'
complete -c yay -n "__fish_seen_subcommand_from garbage" -s h -l help -d "Show help message"
# Options for 'try'
complete -c yay -n "__fish_seen_subcommand_from try" -s h -l help -d "Show help message"
# Package suggestions for 'try' (using nix-env's available packages)
function __yay_list_packages
# Use persistent cache file in /tmp (lasts until reboot)
set -l cache_file "/tmp/yay_packages_cache"
# Load from cache if it exists
if test -f "$cache_file"
cat "$cache_file"
return 0
end
# Otherwise, fetch packages and store in cache
echo -n "Loading packages..." >&2
# Run nix-env but redirect warnings to /dev/null
set -l packages (nix-env -qa --json 2>/dev/null | jq -r 'keys[]' 2>/dev/null)
# Process packages to remove namespace prefix (like "nixos.", "nixpkgs.", etc.)
set -l cleaned_packages
for pkg in $packages
set -l cleaned_pkg (string replace -r '^[^.]+\.' ''\'''\' $pkg)
set -a cleaned_packages $cleaned_pkg
end
# Save to cache file for future shell sessions
printf "%s\n" $cleaned_packages > "$cache_file"
echo " done!" >&2
# Output the packages
printf "%s\n" $cleaned_packages
end
complete -c yay -n "__fish_seen_subcommand_from try; and not __fish_is_switch" -a "(__yay_list_packages)" -d "Nix package"
'';
# Create the main script
scriptFile = pkgs.writeTextFile {
name = "yay";
text = mainScript;
executable = true;
destination = "/bin/yay";
};
# Create the completions file
completionsFile = pkgs.writeTextFile {
name = "yay-completions";
text = completionsScript;
destination = "/share/fish/vendor_completions.d/yay.fish";
};
in
pkgs.symlinkJoin {
name = "yay";
paths = [
scriptFile
completionsFile
];
buildInputs = [ pkgs.makeWrapper ];
# Make sure nh is always available in PATH
postBuild = ''
wrapProgram $out/bin/yay \
--prefix PATH : ${
lib.makeBinPath [
pkgs.nh
pkgs.jq
]
}
'';
meta = with lib; {
description = "A convenient wrapper around Nix commands with fish completions";
license = licenses.mit;
platforms = platforms.unix;
maintainers = [ "Tophc7" ];
};
}

Binary file not shown.