Compare commits

..

20 commits

Author SHA1 Message Date
02de4d9cb3 Removed AGS, its got its own repo asta.nix 2025-05-10 00:17:37 -04:00
a5b0152a2f Add ags-watch script and update devShell build inputs 2025-05-09 23:50:07 -04:00
f55a2a0fe3 Fix source path in ags.lib.bundle configuration 2025-05-09 23:00:09 -04:00
ff97470010 Reconfigured AGS; types, tsc version, and flake are fixed up and working 2025-05-09 22:57:35 -04:00
52aa56f377 fixed port for filerun 2025-05-08 20:24:30 -04:00
5cfde2d467 Refactors user declaration for /hosts and fixes related configs 2025-05-08 19:19:06 -04:00
704a630a33 Refactors host config file structure
• Migrates configuration files from 'common' to 'global'
• Updates import paths across modules
• Removes outdated macOS configuration
• Adds user configuration files WIP
2025-05-08 17:06:03 -04:00
ae77147d86 Migrates common modules to global
- Updates import paths in multiple configurations
2025-05-08 16:49:55 -04:00
ccd08cf21d Add hyprland keybing for better-control powermenu 2025-05-08 16:45:39 -04:00
6de78e75e6 Enhances backup orchestration and notifications
• Move lazydocker pkg to docker.nix
• Extends backup service generator with optional scheduling, verbose logging, and new mkAppriseUrl
• Refactors backup configurations and renames files for clarity
• Introduces backup chain orchestration for smoother maintenance
• Updates Apprise URL generation and removes deprecated secret spec functions
2025-05-04 17:17:33 -04:00
63fbfe8426 Update backup schedules and exclude paths in SnapRAID 2025-05-03 12:38:42 -04:00
1c1d73fbab Refactors backup engine & SMTP config
• Introduces a unified backup service generator with notification and stats extraction
• Consolidates Borg backup logic, replacing duplicate service definitions
• Updates SMTP configuration and Apprise URL generation in secret specifications
• Refines file exclusion lists for snapraid
2025-04-30 15:05:01 -04:00
981634c923 Refactor pool.nix to create a symlink for the git directory in the home folder and clean up tmpfiles rules 2025-04-30 01:18:46 -04:00
955b61c5a9 Adds snapraid input & database ports
- Adds configuration for snapraid-aio from upstream repository
- Updates allowed ports with entries for MySQL and PostgreSQL
2025-04-29 16:08:15 -04:00
5fb3d590c6 Refactors Komodo host configs
• Moves Caddy configuration from optional to dedicated host config file
• Relocates container configs for authentik and Komodo under new host config path
• Updates default host config to import the new structure
• Bumps system stateVersion to 25.05
2025-04-29 15:53:21 -04:00
2942d4bf9a ACME dns propagation fix 2025-04-29 15:51:35 -04:00
ce6c7db198 Refactor SSH known_hosts handling to use a _source, allowing local changes when needed 2025-04-29 11:16:25 -04:00
0416d8c3c1 MergerFS NFS git permissions fix, and acl disable 2025-04-29 11:15:50 -04:00
72ce184bd4 Refactor and reorganize cloud and proxy configs
- Introduces new modules for cloud, backup, and NFS services
- Removes deprecated Caddy and cloudflared configs
- Migrate /pool from SSHFS to NFS
- Migrate filerun and SnapRAID configurations to cloud only for better modularity
2025-04-29 11:14:59 -04:00
f63f4f737c Replace yay pkg with yay.nix flake 2025-04-28 18:20:56 -04:00
103 changed files with 1448 additions and 1374 deletions

3
.gitignore vendored
View file

@ -1,5 +1,4 @@
nixos/modules/nextcloud/nextcloud-admin-pass
.BAK/ .BAK/
.chat .chat
.logs
*.bak *.bak

View file

@ -1 +0,0 @@
use flake

2
ags/.gitignore vendored
View file

@ -1,2 +0,0 @@
node_modules/
@girs/

View file

@ -1,12 +0,0 @@
{
"typescript.tsdk": "node_modules/typescript/lib",
"explorer.fileNesting.patterns": {
"flake.nix": "*.nix, flake.lock, .envrc",
"package.json": " pnpm-lock.yaml, tsconfig.json, .gitignore"
},
"files.exclude": {
".direnv": true,
"node_modules": true,
"@girs": true
}
}

91
ags/flake.lock generated
View file

@ -1,91 +0,0 @@
{
"nodes": {
"ags": {
"inputs": {
"astal": "astal",
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1738087375,
"narHash": "sha256-GLyNtU9A2VN22jNRHZ2OXuFfTJLh8uEVVt+ftsKUX0c=",
"owner": "aylur",
"repo": "ags",
"rev": "a6a7a0adb17740f4c34a59902701870d46fbb6a4",
"type": "github"
},
"original": {
"owner": "aylur",
"repo": "ags",
"type": "github"
}
},
"astal": {
"inputs": {
"nixpkgs": [
"ags",
"nixpkgs"
]
},
"locked": {
"lastModified": 1737670815,
"narHash": "sha256-ZCxxshGN7XooabArcoGkYSNx5yVunqjKJi2aTv6cznI=",
"owner": "aylur",
"repo": "astal",
"rev": "127e9cdcbf173846a3c40ddc0abfbb038df48042",
"type": "github"
},
"original": {
"owner": "aylur",
"repo": "astal",
"type": "github"
}
},
"astal_2": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1742571008,
"narHash": "sha256-5WgfJAeBpxiKbTR/gJvxrGYfqQRge5aUDcGKmU1YZ1Q=",
"owner": "aylur",
"repo": "astal",
"rev": "dc0e5d37abe9424c53dcbd2506a4886ffee6296e",
"type": "github"
},
"original": {
"owner": "aylur",
"repo": "astal",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1742422364,
"narHash": "sha256-mNqIplmEohk5jRkqYqG19GA8MbQ/D4gQSK0Mu4LvfRQ=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "a84ebe20c6bc2ecbcfb000a50776219f48d134cc",
"type": "github"
},
"original": {
"owner": "nixos",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"ags": "ags",
"astal": "astal_2",
"nixpkgs": "nixpkgs"
}
}
},
"root": "root",
"version": 7
}

View file

@ -1,132 +0,0 @@
{
inputs = {
nixpkgs.url = "github:nixos/nixpkgs?ref=nixos-unstable";
ags = {
url = "github:aylur/ags";
inputs.nixpkgs.follows = "nixpkgs";
};
astal = {
url = "github:aylur/astal";
inputs.nixpkgs.follows = "nixpkgs";
};
};
outputs =
{ self, nixpkgs, ... }@inputs:
let
system = "x86_64-linux";
pkgs = nixpkgs.legacyPackages.${system};
nativeBuildInputs = with pkgs; [
blueprint-compiler
dart-sass
esbuild
fzf
gobject-introspection
libgtop
meson
ninja
pkg-config
wrapGAppsHook4
gtk4
gjs
];
agsPkgs = with inputs.ags.packages.${pkgs.system}; [
apps
astal4
bluetooth
greet
hyprland
io
mpris
network
notifd
tray
wireplumber
];
in
{
packages.${system} = {
default = inputs.ags.lib.bundle {
inherit pkgs;
src = ./.;
name = "yash"; # name of executable
entry = "app.ts";
gtk4 = true;
# additional libraries and executables to add to gjs' runtime
extraPackages = nativeBuildInputs ++ agsPkgs;
};
# TO GEN TYPES, ags types is FUCKED
# npx -y @ts-for-gir/cli generate --ignoreVersionConflicts --outdir ./@girs -g /nix/store/gq0k2imad3ijd0ih87aiinj617wyh34f-gir-dirs/share/gir-1.0
# subtree script to manage git subtree pushes and pulls. Personal use feel free to remove
subtree = pkgs.writeScriptBin "subtree" ''
#!/usr/bin/env fish
if test (count $argv) -ne 1
echo "Usage: $argv0 push|pull"
exit 1
end
set action $argv[1]
set subtree_path "ags"
set remote "yash-origin"
set branch "main"
cd (git rev-parse --show-toplevel)
switch $action
case push
set changes (git status --porcelain "$subtree_path")
if test -n "$changes"
set_color yellow; echo " Cannot push. There are uncommitted changes in $subtree_path."
exit 1
end
git subtree push --prefix="$subtree_path" $remote $branch
case pull
git subtree pull --prefix="$subtree_path" $remote $branch
case commit
set changes (git status --porcelain "$subtree_path")
if test -z "$changes"
echo "No changes to commit in $subtree_path."
exit 0
end
echo " Enter commit message:"
read commit_message
if test -z "$commit_message"
echo "Commit message cannot be empty."
exit 1
end
git add "$subtree_path"
git commit -m "$commit_message"
case '*'
echo "Unknown argument. Use push or pull."
exit 1
end
'';
};
devShells.${system} = {
default = pkgs.mkShell {
nativeBuildInputs = [
(inputs.ags.packages.${system}.agsFull.override {
extraPackages = nativeBuildInputs;
})
];
packages = [
pkgs.gjs
pkgs.pnpm
pkgs.nodejs
self.packages.${system}.subtree
inputs.ags.packages.${system}.agsFull
inputs.astal.packages.${system}.default
] ++ nativeBuildInputs;
};
};
};
}

View file

@ -1,9 +0,0 @@
{
"name": "astal-shell",
"dependencies": {
"astal": "/nix/store/88kb2cyzj1hwad8s43wzdkafyp3hsh31-astal-gjs/share/astal/gjs"
},
"devDependencies": {
"typescript": "5.7.3"
}
}

24
ags/pnpm-lock.yaml generated
View file

@ -1,24 +0,0 @@
lockfileVersion: '9.0'
settings:
autoInstallPeers: true
excludeLinksFromLockfile: false
importers:
.:
devDependencies:
typescript:
specifier: 5.7.3
version: 5.7.3
packages:
typescript@5.7.3:
resolution: {integrity: sha512-84MVSjMEHP+FQRPy3pX9sTVV/INIex71s9TL2Gm5FG/WG1SqXeKyZ0k7/blY/4FdOzI12CBy1vGc4og/eus0fw==}
engines: {node: '>=14.17'}
hasBin: true
snapshots:
typescript@5.7.3: {}

View file

@ -1,10 +0,0 @@
import { App } from "astal/gtk4";
import style from "./style.scss";
import Bar from "./app/windows/Bar";
App.start({
css: style,
main() {
App.get_monitors().map(Bar);
},
});

View file

@ -1,16 +0,0 @@
import { App, Astal, Gtk, Gdk } from "astal/gtk4";
import { GLib, Variable } from "astal";
function Time({ format = "%H:%M - %A %e." }) {
const time = Variable<string>("").poll(1000, () => GLib.DateTime.new_now_local().format(format)!);
return <label cssName="Time" onDestroy={() => time.drop()} label={time()} />;
}
export default function BarCenter() {
return (
<box>
<Time />
</box>
);
}

View file

@ -1,46 +0,0 @@
import { App, Astal, Gtk, Gdk } from "astal/gtk4";
import { bind, GLib, Variable } from "astal";
import Hyprland from "gi://AstalHyprland";
const time = Variable("").poll(1000, "date");
function Launcher() {
return (
<button cssName="barauncher">
<image iconName={GLib.get_os_info("LOGO") || "missing-symbolic"} />
</button>
);
}
function Workspaces() {
const hypr = Hyprland.get_default();
return (
<box cssName="Workspaces">
{bind(hypr, "workspaces").as((wss) =>
wss
.filter((ws) => !(ws.id >= -99 && ws.id <= -2)) // filter out special workspaces
.sort((a, b) => a.id - b.id)
.map((ws) => (
<button
cssName={bind(hypr, "focusedWorkspace")
.as((fw) => (ws === fw ? "focused" : ""))
.get()}
onClicked={() => ws.focus()}
>
{ws.id}
</button>
))
)}
</box>
);
}
export default function BarLeft() {
return (
<box>
<Launcher />
<Workspaces />
</box>
);
}

View file

@ -1,8 +0,0 @@
import { App, Astal, Gtk, Gdk } from "astal/gtk4";
import { Variable } from "astal";
const time = Variable("").poll(1000, "date");
export default function BarRight() {
return <></>;
}

View file

@ -1,3 +0,0 @@
export { default as BarLeft } from "./BarLeft";
export { default as BarCenter } from "./BarCenter";
export { default as BarRight } from "./BarRight";

View file

@ -1,36 +0,0 @@
import { App, Astal, Gtk, Gdk } from "astal/gtk4";
import { Variable } from "astal";
import { BarLeft, BarCenter, BarRight } from "./components";
const time = Variable("").poll(1000, "date");
export default function Bar(gdkmonitor: Gdk.Monitor) {
const { TOP, LEFT, RIGHT } = Astal.WindowAnchor;
return (
<window cssName="Bar" visible gdkmonitor={gdkmonitor} exclusivity={Astal.Exclusivity.EXCLUSIVE} anchor={TOP | LEFT | RIGHT} application={App}>
<centerbox cssName="Bar-centerbox">
<BarLeft />
<BarCenter />
<BarRight />
</centerbox>
</window>
);
// return (
// <window visible cssClasses={["Bar"]} gdkmonitor={gdkmonitor} exclusivity={Astal.Exclusivity.EXCLUSIVE} anchor={TOP | LEFT | RIGHT} application={App}>
// <centerbox cssName="centerbox">
// <button onClicked="echo hello" hexpand halign={Gtk.Align.CENTER}>
// Welcome to AGS!
// </button>
// <box />
// <menubutton hexpand halign={Gtk.Align.CENTER}>
// <label label={time()} />
// <popover>
// <Gtk.Calendar />
// </popover>
// </menubutton>
// </centerbox>
// </window>
// );
}

21
ags/src/env.d.ts vendored
View file

@ -1,21 +0,0 @@
declare const SRC: string
declare module "inline:*" {
const content: string
export default content
}
declare module "*.scss" {
const content: string
export default content
}
declare module "*.blp" {
const content: string
export default content
}
declare module "*.css" {
const content: string
export default content
}

View file

@ -1,20 +0,0 @@
// https://gitlab.gnome.org/GNOME/gtk/-/blob/gtk-3-24/gtk/theme/Adwaita/_colors-public.scss
$fg-color: #{"@theme_fg_color"};
$bg-color: #{"@theme_bg_color"};
window.Bar {
background: transparent;
color: $fg-color;
font-weight: bold;
>centerbox {
background: $bg-color;
border-radius: 10px;
margin: 8px;
}
button {
border-radius: 8px;
margin: 2px;
}
}

View file

@ -1,14 +0,0 @@
{
"$schema": "https://json.schemastore.org/tsconfig",
"compilerOptions": {
"experimentalDecorators": true,
// "checkJs": true,
// "allowJs": true,
"jsx": "react-jsx",
"jsxImportSource": "astal/gtk4",
"module": "ES2022",
"moduleResolution": "Bundler",
"strict": true,
"target": "ES2022"
}
}

152
flake.lock generated
View file

@ -75,11 +75,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1744989088, "lastModified": 1746393010,
"narHash": "sha256-j3HCGYdauq74hZzlgsDpQKx8rKLRX2oNhrsS8Jr8ZE8=", "narHash": "sha256-hKKgN1aR+KFybG+w9FqZpKntd5dC3VULHA1nYMonxiI=",
"owner": "rishabh5321", "owner": "rishabh5321",
"repo": "better-control-flake", "repo": "better-control-flake",
"rev": "0c4480620461c1b5d8bcf1112b915be5a26f8308", "rev": "40d2f963e8e7373839fd044142915a9871b8de61",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -106,7 +106,7 @@
"crane": { "crane": {
"inputs": { "inputs": {
"flake-compat": "flake-compat_2", "flake-compat": "flake-compat_2",
"flake-utils": "flake-utils_5", "flake-utils": "flake-utils_6",
"nixpkgs": [ "nixpkgs": [
"watershot", "watershot",
"std", "std",
@ -308,6 +308,24 @@
} }
}, },
"flake-utils_2": { "flake-utils_2": {
"inputs": {
"systems": "systems_4"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_3": {
"inputs": { "inputs": {
"systems": [ "systems": [
"stylix", "stylix",
@ -328,9 +346,9 @@
"type": "github" "type": "github"
} }
}, },
"flake-utils_3": { "flake-utils_4": {
"inputs": { "inputs": {
"systems": "systems_5" "systems": "systems_6"
}, },
"locked": { "locked": {
"lastModified": 1681202837, "lastModified": 1681202837,
@ -346,7 +364,7 @@
"type": "github" "type": "github"
} }
}, },
"flake-utils_4": { "flake-utils_5": {
"locked": { "locked": {
"lastModified": 1659877975, "lastModified": 1659877975,
"narHash": "sha256-zllb8aq3YO3h8B/U0/J1WBgAL8EX5yWf5pMj3G0NAmc=", "narHash": "sha256-zllb8aq3YO3h8B/U0/J1WBgAL8EX5yWf5pMj3G0NAmc=",
@ -361,7 +379,7 @@
"type": "github" "type": "github"
} }
}, },
"flake-utils_5": { "flake-utils_6": {
"locked": { "locked": {
"lastModified": 1667395993, "lastModified": 1667395993,
"narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=", "narHash": "sha256-nuEHfE/LcWyuSWnS8t12N1wc105Qtau+/OdUAjtQ0rA=",
@ -376,6 +394,24 @@
"type": "github" "type": "github"
} }
}, },
"flake-utils_7": {
"inputs": {
"systems": "systems_7"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"fromYaml": { "fromYaml": {
"flake": false, "flake": false,
"locked": { "locked": {
@ -940,9 +976,11 @@
"nixpkgs-unstable": "nixpkgs-unstable", "nixpkgs-unstable": "nixpkgs-unstable",
"nixvirt": "nixvirt", "nixvirt": "nixvirt",
"rose-pine-hyprcursor": "rose-pine-hyprcursor", "rose-pine-hyprcursor": "rose-pine-hyprcursor",
"snapraid-aio": "snapraid-aio",
"stylix": "stylix", "stylix": "stylix",
"vscode-server": "vscode-server", "vscode-server": "vscode-server",
"watershot": "watershot", "watershot": "watershot",
"yay": "yay",
"zen-browser": "zen-browser" "zen-browser": "zen-browser"
} }
}, },
@ -1014,6 +1052,45 @@
"type": "github" "type": "github"
} }
}, },
"snapraid-aio": {
"inputs": {
"flake-utils": "flake-utils_2",
"nixpkgs": [
"nixpkgs"
],
"snapraid-aio-src": "snapraid-aio-src"
},
"locked": {
"lastModified": 1746380924,
"narHash": "sha256-A/TBPzpKMKQF3n4GVw7AD5nUio+bzPrngvWVLGoOPKU=",
"ref": "refs/heads/main",
"rev": "c13880e2626a88d0ba9b69a75443654a720ae9f8",
"revCount": 2,
"type": "git",
"url": "https://git.ryot.foo/toph/snapraid-aio.nix.git"
},
"original": {
"type": "git",
"url": "https://git.ryot.foo/toph/snapraid-aio.nix.git"
}
},
"snapraid-aio-src": {
"flake": false,
"locked": {
"lastModified": 1744884143,
"narHash": "sha256-GNXn/V4HoFnQtyq7l+V+aXHArObr3zQd4vCgPEqPeRk=",
"owner": "auanasgheps",
"repo": "snapraid-aio-script",
"rev": "a46c7362af385eac945e86a2a0f6097dbe7ca3fb",
"type": "github"
},
"original": {
"owner": "auanasgheps",
"repo": "snapraid-aio-script",
"rev": "a46c7362af385eac945e86a2a0f6097dbe7ca3fb",
"type": "github"
}
},
"std": { "std": {
"inputs": { "inputs": {
"arion": [ "arion": [
@ -1024,7 +1101,7 @@
"blank": "blank", "blank": "blank",
"devshell": "devshell", "devshell": "devshell",
"dmerge": "dmerge", "dmerge": "dmerge",
"flake-utils": "flake-utils_4", "flake-utils": "flake-utils_5",
"incl": "incl", "incl": "incl",
"makes": [ "makes": [
"watershot", "watershot",
@ -1069,13 +1146,13 @@
"base16-vim": "base16-vim", "base16-vim": "base16-vim",
"firefox-gnome-theme": "firefox-gnome-theme", "firefox-gnome-theme": "firefox-gnome-theme",
"flake-compat": "flake-compat", "flake-compat": "flake-compat",
"flake-utils": "flake-utils_2", "flake-utils": "flake-utils_3",
"git-hooks": "git-hooks", "git-hooks": "git-hooks",
"gnome-shell": "gnome-shell", "gnome-shell": "gnome-shell",
"home-manager": "home-manager_2", "home-manager": "home-manager_2",
"nixpkgs": "nixpkgs_3", "nixpkgs": "nixpkgs_3",
"nur": "nur", "nur": "nur",
"systems": "systems_4", "systems": "systems_5",
"tinted-foot": "tinted-foot", "tinted-foot": "tinted-foot",
"tinted-kitty": "tinted-kitty", "tinted-kitty": "tinted-kitty",
"tinted-schemes": "tinted-schemes", "tinted-schemes": "tinted-schemes",
@ -1171,6 +1248,36 @@
"type": "github" "type": "github"
} }
}, },
"systems_6": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_7": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"tinted-foot": { "tinted-foot": {
"flake": false, "flake": false,
"locked": { "locked": {
@ -1295,7 +1402,7 @@
}, },
"vscode-server": { "vscode-server": {
"inputs": { "inputs": {
"flake-utils": "flake-utils_3", "flake-utils": "flake-utils_4",
"nixpkgs": [ "nixpkgs": [
"nixpkgs-unstable" "nixpkgs-unstable"
] ]
@ -1358,6 +1465,27 @@
"type": "github" "type": "github"
} }
}, },
"yay": {
"inputs": {
"flake-utils": "flake-utils_7",
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1745989032,
"narHash": "sha256-qKy5YVu8vhA60VxWpLiLV9QpN8LofL9qFCEAACrCxBw=",
"ref": "refs/heads/main",
"rev": "92d557d0d0393713cb57a970e880efafe6cc2b41",
"revCount": 9,
"type": "git",
"url": "https://git.ryot.foo/toph/yay.nix.git"
},
"original": {
"type": "git",
"url": "https://git.ryot.foo/toph/yay.nix.git"
}
},
"zen-browser": { "zen-browser": {
"inputs": { "inputs": {
"nixpkgs": [ "nixpkgs": [

View file

@ -63,11 +63,22 @@
# inputs.nixpkgs.follows = "nixpkgs"; # inputs.nixpkgs.follows = "nixpkgs";
# }; # };
snapraid-aio = {
# url = "git+https://git.ryot.foo/toph/snapraid-aio.nix.git";
url = "git+https://git.ryot.foo/toph/snapraid-aio.nix.git";
inputs.nixpkgs.follows = "nixpkgs";
};
vscode-server = { vscode-server = {
url = "github:nix-community/nixos-vscode-server"; url = "github:nix-community/nixos-vscode-server";
inputs.nixpkgs.follows = "nixpkgs-unstable"; inputs.nixpkgs.follows = "nixpkgs-unstable";
}; };
yay = {
url = "git+https://git.ryot.foo/toph/yay.nix.git";
inputs.nixpkgs.follows = "nixpkgs";
};
zen-browser = { zen-browser = {
url = "github:youwen5/zen-browser-flake"; url = "github:youwen5/zen-browser-flake";
inputs.nixpkgs.follows = "nixpkgs-unstable"; inputs.nixpkgs.follows = "nixpkgs-unstable";
@ -123,7 +134,7 @@
} }
# Import secrets # Import secrets
./modules/common/secret-spec.nix ./modules/global/secret-spec.nix
./secrets.nix ./secrets.nix
# Host-specific configuration # Host-specific configuration

View file

@ -0,0 +1,10 @@
{
pkgs,
...
}:
{
imports = [
## Required Configs ##
../common/core # required
];
}

View file

@ -8,14 +8,14 @@
... ...
}: }:
let let
username = config.hostSpec.username; username = hostSpec.username;
homeDir = config.hostSpec.home; homeDir = hostSpec.home;
shell = config.hostSpec.shell; shell = hostSpec.shell;
in in
{ {
imports = lib.flatten [ imports = lib.flatten [
(map lib.custom.relativeToRoot [ (map lib.custom.relativeToRoot [
"modules/common" "modules/global"
"modules/home" "modules/home"
]) ])
./asdf.nix ./asdf.nix
@ -31,8 +31,6 @@ in
./zoxide.nix ./zoxide.nix
]; ];
inherit hostSpec;
services.ssh-agent.enable = true; services.ssh-agent.enable = true;
home = { home = {

View file

@ -2,13 +2,14 @@
lib, lib,
pkgs, pkgs,
config, config,
hostSpec,
... ...
}: }:
{ {
#TODO: Scripts might need a rework #TODO: Scripts might need a rework
programs.fastfetch = programs.fastfetch =
let let
hostname = config.hostSpec.hostName; hostname = hostSpec.hostName;
logoFile = ./. + "/host/${hostname}.txt"; logoFile = ./. + "/host/${hostname}.txt";
weather = import ./scripts/weather.nix { inherit pkgs; }; weather = import ./scripts/weather.nix { inherit pkgs; };
title = import ./scripts/title.nix { inherit pkgs; }; title = import ./scripts/title.nix { inherit pkgs; };

View file

@ -4,12 +4,12 @@
lib, lib,
config, config,
inputs, inputs,
hostSpec,
... ...
}: }:
let let
# handle = config.hostSpec.handle; fullName = hostSpec.userFullName;
fullName = config.hostSpec.userFullName; email = hostSpec.email;
email = config.hostSpec.email;
in in
{ {
programs.git = { programs.git = {
@ -48,7 +48,7 @@ in
]; ];
}; };
url = lib.optionalAttrs (!config.hostSpec.isMinimal) { url = lib.optionalAttrs (!hostSpec.isMinimal) {
# Only force ssh if it's not minimal # Only force ssh if it's not minimal
"ssh://git@github.com" = { "ssh://git@github.com" = {
pushInsteadOf = "https://github.com"; pushInsteadOf = "https://github.com";

View file

@ -43,7 +43,13 @@ in
''; '';
}; };
".ssh/known_hosts".text = lib.concatStringsSep "\n" secretsSpec.ssh.knownHosts; ".ssh/known_hosts_source" = {
source = pkgs.writeText "known-hosts" (lib.concatStringsSep "\n" secretsSpec.ssh.knownHosts);
onChange = ''
cp $HOME/.ssh/known_hosts_source $HOME/.ssh/known_hosts
chmod 644 $HOME/.ssh/known_hosts
'';
};
} }
# Dynamically add all SSH private keys using the existing store paths # Dynamically add all SSH private keys using the existing store paths
# Ensures the keys have correct permissions and are not symlinks # Ensures the keys have correct permissions and are not symlinks

View file

@ -3,6 +3,7 @@
pkgs, pkgs,
config, config,
lib, lib,
hostSpec,
... ...
}: }:
@ -14,7 +15,7 @@ let
# inherit pkgs; # inherit pkgs;
# }; # };
homeDir = config.hostSpec.home; homeDir = hostSpec.home;
borg-wrapper = pkgs.writeScript "borg-wrapper" '' borg-wrapper = pkgs.writeScript "borg-wrapper" ''
#!${lib.getExe pkgs.fish} #!${lib.getExe pkgs.fish}

View file

@ -1,5 +1,6 @@
{ {
config, config,
inputs,
lib, lib,
pkgs, pkgs,
... ...
@ -17,6 +18,8 @@ let
#playerctl = lib.getExe pkgs.playerctl; # installed via /home/common/optional/desktops/playerctl.nix #playerctl = lib.getExe pkgs.playerctl; # installed via /home/common/optional/desktops/playerctl.nix
#swaylock = "lib.getExe pkgs.swaylock; #swaylock = "lib.getExe pkgs.swaylock;
betterControl = inputs.better-control.packages.${pkgs.system}.better-control;
defaultApp = defaultApp =
type: "${pkgs.gtk3}/bin/gtk-launch $(${pkgs.xdg-utils}/bin/xdg-mime query default ${type})"; type: "${pkgs.gtk3}/bin/gtk-launch $(${pkgs.xdg-utils}/bin/xdg-mime query default ${type})";
exec = script: "${pkgs.fish}/bin/fish ${script}"; exec = script: "${pkgs.fish}/bin/fish ${script}";
@ -61,6 +64,7 @@ let
# "SUPER, P, exec, ${launcher} --app color" # Color Picker # "SUPER, P, exec, ${launcher} --app color" # Color Picker
# "SUPER, V, exec, ${launcher} --app clip" # Clipboard # "SUPER, V, exec, ${launcher} --app clip" # Clipboard
# "SUPER, X, exec, ${launcher} --app power" # Power Menu # "SUPER, X, exec, ${launcher} --app power" # Power Menu
"SUPER, X, exec, ${lib.getExe betterControl} -pm" # Power Menu
## System ## ## System ##
"SUPER, L, exec, hyprlock" "SUPER, L, exec, hyprlock"

View file

@ -1,24 +1,16 @@
{ {
pkgs, pkgs,
config,
... ...
}: }:
{ {
imports = [ imports = [
## Required Configs ## ## Required Configs ##
../common/core # required ../common/core # required
## Host-specific Optional Configs ##
]; ];
# Useful for this host
home.file = { home.file = {
Pool.source = config.lib.file.mkOutOfStoreSymlink "/pool"; Pool.source = config.lib.file.mkOutOfStoreSymlink "/pool";
DockerStorage.source = config.lib.file.mkOutOfStoreSymlink "/mnt/DockerStorage"; DockerStorage.source = config.lib.file.mkOutOfStoreSymlink "/mnt/DockerStorage";
}; };
## Packages with no needed configs ##
# home.packages = builtins.attrValues {
# inherit (pkgs)
# ;
# };
} }

View file

@ -72,8 +72,4 @@
scale = 1.20; scale = 1.20;
} }
]; ];
# home.file = {
# "run-mac.sh".source = config.lib.file.mkOutOfStoreSymlink "${pkgs.macos-ventura-image.runScript}";
# };
} }

View file

@ -1,46 +0,0 @@
{
pkgs,
config,
...
}:
let
cloudflare = pkgs.writeTextFile {
name = "cloudflare.ini";
text = ''
CF_DNS_API_TOKEN=${config.secretsSpec.api.cloudflare}
'';
};
in
{
# letsencrypt
security.acme = {
acceptTerms = true;
defaults = {
email = "chris@toph.cc";
dnsProvider = "cloudflare";
environmentFile = cloudflare;
};
certs = {
"goldenlemon.cc" = {
extraDomainNames = [ "*.goldenlemon.cc" ];
};
# "kwahson.com" = {
# extraDomainNames = [ "*.kwahson.com" ];
# };
# "kwahson.xyz" = {
# extraDomainNames = [ "*.kwahson.xyz" ];
# };
# "toph.cc" = {
# extraDomainNames = [ "*.toph.cc" ];
# };
"ryot.foo" = {
extraDomainNames = [ "*.ryot.foo" ];
};
};
};
}

View file

@ -1,16 +0,0 @@
{
services.caddy.virtualHosts = {
"drive.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy http://localhost:8181 {
header_up Host {host}
# header_up X-Forwarded-For {remote}
# header_up X-Forwarded-Proto {scheme}
# header_up X-Forwarded-Protocol {scheme}
# header_up X-Forwarded-Port {server_port}
}
'';
};
};
}

View file

@ -1,10 +0,0 @@
{ config, ... }:
{
imports = [
"./${config.hostSpec.hostName}.nix"
];
services.caddy = {
enable = true;
};
}

View file

@ -1,100 +0,0 @@
{
services.caddy.virtualHosts = {
# "ryot.foo" = {
# useACMEHost = "ryot.foo";
# extraConfig = ''
# reverse_proxy 104.40.3.44:80
# '';
# };
"auth.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:9000 {
header_up Host {host}
header_up X-Forwarded-For {remote}
header_up X-Forwarded-Proto {scheme}
header_up X-Forwarded-Protocol {scheme}
header_up X-Forwarded-Port {server_port}
}
'';
};
"frp.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:4041
'';
};
"grafana.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:3001
'';
};
"git.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:3003
'';
};
"influx.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:8086
'';
};
"home.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:7475
'';
};
"komodo.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:9120
'';
};
"mail.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:9002
'';
};
"map.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:25566
'';
};
"outline.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:3480
'';
};
"plane.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:3000
'';
};
"upsnap.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:8090
'';
};
};
}

View file

@ -1,10 +0,0 @@
{
services.caddy.virtualHosts = {
"cloudflared.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:14333
'';
};
};
}

View file

@ -1,40 +0,0 @@
{ config, ... }:
let
username = config.hostSpec.username;
homeDir = config.hostSpec.home;
pve-key = config.secretsSpec.ssh.privateKeys.pve;
in
{
# For less permission issues with SSHFS
programs.fuse.userAllowOther = true;
# Create the directories if they do not exist
systemd.tmpfiles.rules = [
"d /pool 2775 ${username} ryot -"
"d ${homeDir}/git 2775 ${username} ryot -"
];
# File system configuration
fileSystems = {
"/pool" = {
device = "${username}@cloud:/pool";
fsType = "sshfs";
options = [
"defaults"
"reconnect"
"_netdev"
"allow_other"
"identityfile=${pve-key}"
];
};
"${homeDir}/git" = {
fsType = "none";
device = "/pool/git";
options = [
"bind"
"nofail"
];
};
};
}

View file

@ -1,22 +0,0 @@
{
inputs,
pkgs,
...
}:
{
imports = [
inputs.nixtheplanet.nixosModules.macos-ventura
];
services.macos-ventura = {
enable = true;
package = pkgs.makeDarwinImage { diskSizeBytes = 80000000000; };
openFirewall = true;
vncListenAddr = "0.0.0.0";
autoStart = false;
extraQemuFlags = [
"-spice"
"port=5930,addr=127.0.0.1,disable-ticketing"
];
};
}

View file

@ -0,0 +1,51 @@
{
pkgs,
config,
...
}:
let
# Create a VERY simple environment file with absolutely minimal formatting
cloudflareEnvFile = pkgs.writeText "cloudflare.env" ''
CLOUDFLARE_DNS_API_TOKEN=${config.secretsSpec.api.cloudflare}
'';
in
{
environment.systemPackages = [ pkgs.lego ];
security.acme = {
acceptTerms = true;
defaults = {
email = "chris@toph.cc";
dnsProvider = "cloudflare"; # Use Cloudflare's DNS
environmentFile = cloudflareEnvFile;
enableDebugLogs = true;
extraLegoFlags = [
"--dns.resolvers=1.1.1.1:53,8.8.8.8:53"
"--dns.propagation-wait=60s" # Wait for 60 seconds for DNS propagation
"--dns-timeout=60"
"--http-timeout=60"
];
};
certs = {
"goldenlemon.cc" = {
extraDomainNames = [ "*.goldenlemon.cc" ];
};
# "kwahson.com" = {
# extraDomainNames = [ "*.kwahson.com" ];
# };
# "kwahson.xyz" = {
# extraDomainNames = [ "*.kwahson.xyz" ];
# };
# "toph.cc" = {
# extraDomainNames = [ "*.toph.cc" ];
# };
"ryot.foo" = {
extraDomainNames = [ "*.ryot.foo" ];
};
};
};
}

View file

@ -7,4 +7,8 @@
}; };
oci-containers.backend = "docker"; oci-containers.backend = "docker";
}; };
environment.systemPackages = with pkgs; [
lazydocker
];
} }

View file

@ -0,0 +1,64 @@
{ config, ... }:
let
username = config.hostSpec.username;
homeDir = config.hostSpec.home;
in
{
# Create the directories if they do not exist
systemd = {
tmpfiles.rules = [
"d /pool 2775 ${username} ryot -"
];
services.createGitSymlink = {
description = "Create symlink from home directory to pool/git";
after = [
"network.target"
"pool.mount"
];
requires = [ "pool.mount" ];
wantedBy = [ "multi-user.target" ];
script = ''
umount /pool/git
mkdir -p /pool/git
chown ${username}:ryot /pool/git
chmod 2775 /pool/git
ln -sf /pool/git ${homeDir}/git
chown -h ${username}:ryot ${homeDir}/git
'';
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
};
};
};
# File system configuration
fileSystems = {
"/pool" = {
device = "cloud:/";
fsType = "nfs";
options = [
"_netdev"
"defaults"
"nfsvers=4.2"
"noacl"
"noatime"
"nofail"
"sec=sys"
];
};
};
# Ensure NFS client support is complete
boot.supportedFilesystems = [ "nfs" ];
# services.rpcbind.enable = true;
# Optional: Configure ID mapping if needed
services.nfs.idmapd.settings = {
General = {
Domain = "local"; # Must match on server and client
Verbosity = 0;
};
};
}

View file

@ -7,14 +7,16 @@
pkgs, pkgs,
... ...
}: }:
let
yay = inputs.yay.packages.${pkgs.system}.default;
in
{ {
imports = lib.flatten [ imports = lib.flatten [
inputs.home-manager.nixosModules.home-manager inputs.home-manager.nixosModules.home-manager
(lib.custom.scanPaths ./.) (lib.custom.scanPaths ./.)
(map lib.custom.relativeToRoot [ (map lib.custom.relativeToRoot [
"modules/common" "modules/global"
"hosts/users"
]) ])
]; ];
@ -29,7 +31,7 @@
ranger ranger
sshfs sshfs
wget wget
yay # my yay @ pkgs teehee yay # my yay teehee
]; ];
# Enable CUPS to print documents. # Enable CUPS to print documents.
@ -70,17 +72,6 @@
hardware.enableAllFirmware = true; hardware.enableAllFirmware = true;
security.sudo = { security.sudo = {
extraRules = [
{
users = [ config.hostSpec.username ];
commands = [
{
command = "ALL";
options = [ "NOPASSWD" ];
}
];
}
];
extraConfig = '' extraConfig = ''
Defaults lecture = never # rollback results in sudo lectures after each reboot, it's somewhat useless anyway Defaults lecture = never # rollback results in sudo lectures after each reboot, it's somewhat useless anyway
Defaults pwfeedback # password input feedback - makes typed password visible as asterisks Defaults pwfeedback # password input feedback - makes typed password visible as asterisks

104
hosts/global/core/user.nix Normal file
View file

@ -0,0 +1,104 @@
# User config applicable only to nixos
{
inputs,
config,
lib,
pkgs,
...
}:
let
hostSpec = config.hostSpec;
username = hostSpec.username;
# Get user-specific secrets if they exist
user = config.secretsSpec.users.${username} or { };
ifTheyExist = groups: builtins.filter (group: builtins.hasAttr group config.users.groups) groups;
isMinimal = hostSpec.isMinimal or false;
in
{
users.groups = {
ryot = lib.mkIf (!isMinimal) {
gid = 1004;
members = [ username ];
};
};
users.mutableUsers = false;
users.users.${username} = {
home = hostSpec.home;
isNormalUser = true;
createHome = true;
description = "Admin";
homeMode = "750";
hashedPassword = user.hashedPassword or hostSpec.hashedPassword;
uid = 1000;
group = if !isMinimal then "ryot" else "users";
shell = hostSpec.shell or pkgs.fish;
extraGroups = lib.flatten [
"wheel"
(ifTheyExist [
"adbusers"
"audio"
"docker"
"gamemode"
"git"
"libvirtd"
"networkmanager"
"video"
])
];
openssh.authorizedKeys.keys = builtins.attrValues config.secretsSpec.ssh.publicKeys or [ ];
};
# Special sudo config for user
security.sudo.extraRules = [
{
users = [ username ];
commands = [
{
command = "ALL";
options = [ "NOPASSWD" ];
}
];
}
];
# No matter what environment we are in we want these tools for root, and the user(s)
programs.git.enable = true;
# root's ssh key are mainly used for remote deployment, borg, and some other specific ops
users.users.root = {
shell = pkgs.bash;
hashedPassword = lib.mkForce hostSpec.hashedPassword;
openssh.authorizedKeys.keys = builtins.attrValues config.secretsSpec.ssh.publicKeys or [ ];
};
}
// lib.optionalAttrs (inputs ? "home-manager") {
# Setup root home?
home-manager.users.root = lib.optionalAttrs (!isMinimal) {
home.stateVersion = "24.05"; # Avoid error
};
# Set up home-manager for the configured user
home-manager = {
extraSpecialArgs = {
inherit pkgs inputs;
inherit (config) secretsSpec hostSpec;
};
users.${username} = lib.optionalAttrs (!isMinimal) {
imports = [
(
{ config, ... }:
import (lib.custom.relativeToRoot "home/${username}/${hostSpec.hostName}") {
inherit
config
hostSpec
inputs
lib
pkgs
;
}
)
];
};
};
}

View file

@ -0,0 +1,207 @@
{
config,
lib,
pkgs,
...
}:
let
# Common repositories
dockerStorageRepo = "/pool/Backups/DockerStorage";
forgejoRepo = "/pool/Backups/forgejo";
# Shared environment setup
borgCommonSettings = ''
# Don't use cache to avoid issues with concurrent backups
export BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK=yes
export BORG_NON_INTERACTIVE=yes
'';
# Common packages needed for backups
commonBorgPath = with pkgs; [
borgbackup
coreutils
apprise
gnugrep
hostname
util-linux
gawk
];
# Repository initialization
initRepo = repo: ''
if [ ! -d "${repo}" ]; then
mkdir -p "${repo}"
${pkgs.borgbackup}/bin/borg init --encryption=none "${repo}"
fi
'';
# Notification system
apprise-url = config.secretsSpec.users.admin.smtp.notifyUrl;
sendNotification = title: message: ''
${pkgs.apprise}/bin/apprise -t "${title}" -b "${message}" "${apprise-url}" || true
'';
# Statistics generation
extractBorgStats = logFile: repoPath: ''
{
echo -e "\n==== BACKUP SUMMARY ====\n"
grep -A10 "Archive name:" ${logFile} || echo "No archive stats found"
echo -e "\n=== Compression ===\n"
grep "Compressed size:" ${logFile} || echo "No compression stats found"
echo -e "\n=== Duration ===\n"
grep "Duration:" ${logFile} || echo "No duration stats found"
grep "Throughput:" ${logFile} || echo "No throughput stats found"
echo -e "\n=== Repository ===\n"
${pkgs.borgbackup}/bin/borg info ${repoPath} --last 1 2>/dev/null || echo "Could not get repository info"
echo -e "\n=== Storage Space ===\n"
df -h ${repoPath} | grep -v "Filesystem" || echo "Could not get storage info"
} > ${logFile}.stats
STATS=$(cat ${logFile}.stats || echo "No stats available")
'';
# Unified backup service generator with optional features
mkBorgBackupService =
{
name,
title,
repo,
sourcePath,
keepDaily,
keepWeekly,
keepMonthly,
schedule ? null,
enableNotifications ? true,
verbose ? false,
}:
let
maybeCreateTimer = lib.optionalAttrs (schedule != null) {
timers."backup-${name}" = {
description = "Timer for ${title} Backup";
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = schedule;
Persistent = true;
RandomizedDelaySec = "5min";
};
};
};
logPrefix = if verbose then "set -x;" else "";
in
{
services."backup-${name}" = {
description = "Backup ${title} with Borg";
inherit (commonServiceConfig) path serviceConfig;
script = ''
${borgCommonSettings}
${logPrefix} # Add verbose logging if enabled
LOG_FILE="/tmp/borg-${name}-backup-$(date +%Y%m%d-%H%M%S).log"
${initRepo repo}
echo "Starting ${title} backup at $(date)" > $LOG_FILE
ARCHIVE_NAME="${name}-$(date +%Y-%m-%d_%H%M%S)"
START_TIME=$(date +%s)
# Add verbose output redirection if enabled
${if verbose then "exec 3>&1 4>&2" else ""}
${pkgs.borgbackup}/bin/borg create \
--stats \
--compression zstd,15 \
--exclude '*.tmp' \
--exclude '*/tmp/*' \
${repo}::$ARCHIVE_NAME \
${sourcePath} >> $LOG_FILE 2>&1 ${if verbose then "| tee /dev/fd/3" else ""}
BACKUP_STATUS=$?
END_TIME=$(date +%s)
DURATION=$((END_TIME - START_TIME))
echo "Total time: $DURATION seconds ($(date -d@$DURATION -u +%H:%M:%S))" >> $LOG_FILE
${extractBorgStats "$LOG_FILE" "${repo}"}
echo -e "\nPruning old backups..." >> $LOG_FILE
${pkgs.borgbackup}/bin/borg prune \
--keep-daily ${toString keepDaily} \
--keep-weekly ${toString keepWeekly} \
--keep-monthly ${toString keepMonthly} \
${repo} >> $LOG_FILE 2>&1 ${if verbose then "| tee /dev/fd/3" else ""}
PRUNE_STATUS=$?
echo -e "\nRemaining archives after pruning:" >> $LOG_FILE
${pkgs.borgbackup}/bin/borg list ${repo} >> $LOG_FILE 2>&1 || true
${
if enableNotifications then
''
if [ $BACKUP_STATUS -eq 0 ] && [ $PRUNE_STATUS -eq 0 ]; then
${sendNotification " ${title} Backup Complete" "${title} backup completed successfully on $(hostname) at $(date)\nDuration: $(date -d@$DURATION -u +%H:%M:%S)\n\n$STATS"}
else
${sendNotification " ${title} Backup Failed" "${title} backup failed on $(hostname) at $(date)\n\nBackup Status: $BACKUP_STATUS\nPrune Status: $PRUNE_STATUS\n\nPartial Stats:\n$STATS\n\nSee $LOG_FILE for details"}
fi
''
else
"echo 'Notifications disabled' >> $LOG_FILE"
}
rm -f $LOG_FILE.stats
exit $BACKUP_STATUS
'';
};
}
// maybeCreateTimer;
# Common service configuration
commonServiceConfig = {
path = commonBorgPath;
serviceConfig = {
Type = "oneshot";
IOSchedulingClass = "idle";
CPUSchedulingPolicy = "idle";
Nice = 19;
};
};
in
{
environment.systemPackages = with pkgs; [
borgbackup
];
systemd = lib.mkMerge [
(mkBorgBackupService {
name = "docker-storage";
title = "Docker Storage";
repo = dockerStorageRepo;
sourcePath = "/mnt/drive1/DockerStorage";
# INFO: This shit confusing but basically
# keeps the last 7 days,
# then keeps AT LEAST ONE for last 4 weeks
# and finally AT LEAST ONE for the last 3 months
keepDaily = 7;
keepWeekly = 4;
keepMonthly = 3;
# No schedule = no timer created
# schedule = "*-*-* 03:00:00";
enableNotifications = false;
verbose = true;
})
(mkBorgBackupService {
name = "forgejo";
title = "Forgejo";
repo = forgejoRepo;
sourcePath = "/pool/forgejo";
keepDaily = 7;
keepWeekly = 4;
keepMonthly = 3;
# schedule = "*-*-* 03:00:00";
enableNotifications = false;
verbose = true;
})
];
}

View file

@ -0,0 +1,197 @@
{
config,
lib,
pkgs,
...
}:
let
# Shared configuration
logDir = "/var/log/backups";
backupServices = [
{
name = "forgejo";
title = "Forgejo";
service = "backup-forgejo.service";
logPattern = "borg-forgejo-backup-*.log";
}
{
name = "docker_storage";
title = "Docker Storage";
service = "backup-docker-storage.service";
logPattern = "borg-docker-storage-backup-*.log";
}
{
name = "snapraid";
title = "SnapRAID";
service = "snapraid-aio.service";
logPattern = "SnapRAID-*.out";
logPath = "/var/log/snapraid";
}
];
# Helper functions
users = config.secretsSpec.users;
notify =
title: message: logFile:
let
attachArg = if logFile == "" then "" else "--attach \"file://${logFile}\"";
appriseUrl = lib.custom.mkAppriseUrl users.admin.smtp "relay@ryot.foo";
in
''
${pkgs.apprise}/bin/apprise -vv -i "markdown" -t "${title}" \
-b "${message}" \
${attachArg} \
"${appriseUrl}" || true
'';
findLatestLog = pattern: path: ''
find "${path}" -name "${pattern}" -type f -printf "%T@ %p\\n" 2>/dev/null \
| sort -nr | head -1 | cut -d' ' -f2
'';
# Generate safe variable name (replace hyphens with underscores)
safeName = name: lib.replaceStrings [ "-" ] [ "_" ] name;
# Generate status variable references
statusVarName = name: "STATUS_${safeName name}";
# Common script utilities
scriptPrelude = ''
set -uo pipefail
LOG_FILE="${logDir}/backup-chain-$(date +%Y%m%d-%H%M%S).log"
mkdir -p "${logDir}"
exec > >(tee -a "$LOG_FILE") 2>&1
log() {
echo "[$(date "+%Y-%m-%d %H:%M:%S")] $1"
}
# Initialize all status variables
${lib.concatMapStringsSep "\n" (s: "${statusVarName s.name}=1") backupServices}
'';
# Service runner template
runService =
{
name,
title,
service,
logPattern,
logPath ? "/tmp",
}:
''
log "Starting ${title} maintenance..."
systemctl start ${service} || true
${statusVarName name}=$?
log "${title} completed with status $${statusVarName name}"
SERVICE_LOG=$(${findLatestLog logPattern logPath})
if [ -n "$SERVICE_LOG" ]; then
log "Appending ${title} log: $SERVICE_LOG"
echo -e "\n\n===== ${title} LOG ($(basename "$SERVICE_LOG")) =====\n" >> "$LOG_FILE"
cat "$SERVICE_LOG" >> "$LOG_FILE"
# Add SnapRAID-specific summary
if [ "${name}" = "snapraid" ]; then
echo -e "\n=== SnapRAID Summary ===" >> "$LOG_FILE"
grep -E '(Scrub|Sync|Diff|smart)' "$SERVICE_LOG" | tail -n 10 >> "$LOG_FILE"
fi
fi
'';
# Build the service execution script
serviceExecution = lib.concatMapStrings runService backupServices;
# Generate status summary lines
statusSummaryLines = lib.concatMapStringsSep "\n" (
s:
let
varName = statusVarName s.name;
in
"- **${s.title}:** \$([ \$${varName} -eq 0 ] && echo ' Success' || echo ' Failed') (Exit: \$${varName})"
) backupServices;
# Notification logic with cleaner formatting
notificationLogic =
let
statusVars = map (s: statusVarName s.name) backupServices;
statusChecks = lib.concatMapStringsSep "\n" (var: "[ \$${var} -eq 0 ] && ") statusVars;
in
''
# Calculate overall status
OVERALL_STATUS=0
${lib.concatMapStringsSep "\n" (var: "if [ \$${var} -ne 0 ]; then OVERALL_STATUS=1; fi") statusVars}
TIMESTAMP=$(date "+%Y-%m-%d %H:%M:%S")
HOSTNAME=$(hostname)
SUMMARY=$(cat << EOF
# Backup Chain Complete
**Host:** $HOSTNAME
**Timestamp:** $TIMESTAMP
**Overall Status:** $([ $OVERALL_STATUS -eq 0 ] && echo ' Success' || echo ' Failure')
## Service Status:
${statusSummaryLines}
**Log Path:** $LOG_FILE
EOF)
if [ $OVERALL_STATUS -eq 0 ]; then
${notify " Backup Success" "$SUMMARY" "$LOG_FILE"}
else
${notify " Backup Issues" "$SUMMARY" "$LOG_FILE"}
fi
exit $OVERALL_STATUS
'';
in
{
imports = lib.custom.scanPaths ./.;
systemd.services.backup-chain = {
description = "Orchestrated Backup Chain";
path = with pkgs; [
apprise
coreutils
findutils
gawk
gnugrep
hostname
systemd
util-linux
];
serviceConfig = {
Type = "oneshot";
Nice = 19;
IOSchedulingClass = "idle";
CPUSchedulingPolicy = "idle";
};
script = ''
${scriptPrelude}
log "Initializing backup chain on $(hostname)"
${serviceExecution}
log "Finalizing backup chain"
${notificationLogic}
'';
};
systemd.timers.backup-chain = {
wantedBy = [ "timers.target" ];
timerConfig = {
OnCalendar = "*-*-* 03:00:00";
Persistent = true;
RandomizedDelaySec = "5min";
};
};
environment.systemPackages = [ pkgs.apprise ];
systemd.tmpfiles.rules = [ "d ${logDir} 0755 root root -" ];
}

View file

@ -0,0 +1,135 @@
{
pkgs,
inputs,
lib,
config,
...
}:
let
users = config.secretsSpec.users;
apprise-url = lib.custom.mkAppriseUrl users.admin.smtp "relay@ryot.foo";
snapraid-aio = inputs.snapraid-aio.nixosModules.default;
snapraid-aio-config = pkgs.writeTextFile {
name = "snapraid-aio.config";
text = ''
CONFIG_VERSION="3.4"
CHECK_UPDATES=1
# Notification settings
APPRISE=0
APPRISE_URL=""
APPRISE_ATTACH=1
APPRISE_BIN="${pkgs.apprise}/bin/apprise"
APPRISE_EMAIL=0
APPRISE_EMAIL_URL="${apprise-url}"
TELEGRAM=0
DISCORD=0
# Thresholds for sync operations
DEL_THRESHOLD=500
UP_THRESHOLD=500
IGNORE_PATTERN=""
ADD_DEL_THRESHOLD=0
SYNC_WARN_THRESHOLD=0
# Scrub settings
SCRUB_PERCENT=5
SCRUB_AGE=10
SCRUB_NEW=1
SCRUB_DELAYED_RUN=0
# Performance and behavior settings
PREHASH=1
FORCE_ZERO=0
SPINDOWN=0
VERBOSITY=1
RETENTION_DAYS=30
# Logging settings
SNAPRAID_LOG_DIR="/var/log/snapraid"
SMART_LOG=1
SMART_LOG_NOTIFY=0
SNAP_STATUS=1
SNAP_STATUS_NOTIFY=1
# Critical paths
SNAPRAID_CONF="/etc/snapraid.conf"
SNAPRAID_BIN="${pkgs.snapraid}/bin/snapraid"
# Email settings (optional - uncomment and configure if needed)
# EMAIL_ADDRESS="your-email@example.com"
# FROM_EMAIL_ADDRESS="snapraid@your-server.com"
# Advanced settings - typically no need to modify
CHK_FAIL=0
DO_SYNC=1
EMAIL_SUBJECT_PREFIX="(SnapRAID on $(hostname))"
SERVICES_STOPPED=0
SYNC_WARN_FILE="/var/lib/snapraid-aio/snapRAID.warnCount"
SCRUB_COUNT_FILE="/var/lib/snapraid-aio/snapRAID.scrubCount"
TMP_OUTPUT="/var/lib/snapraid-aio/snapRAID.out"
SNAPRAID_LOG="/var/log/snapraid/snapraid.log"
'';
};
snapraid-conf = pkgs.writeTextFile {
name = "snapraid.conf";
text = ''
## /etc/snapraid.conf ##
# Defines the file to use as parity storage
parity /mnt/parity/snapraid.parity
# Defines the files to use as content list
content /var/snapraid.content
content /mnt/drive1/snapraid.content
content /mnt/drive2/snapraid.content
content /mnt/drive3/snapraid.content
content /mnt/parity/snapraid.content
# Defines the data disks to use
data d1 /mnt/drive1/
data d2 /mnt/drive2/
data d3 /mnt/drive3/
# Defines files and directories to exclude
exclude *.unrecoverable
exclude /tmp/
exclude /lost+found/
exclude /var/tmp/
exclude /var/cache/
exclude /var/log/
exclude .trash/
exclude .Trash-1000/
exclude .Trash/
# These dirs change data all the time
# so I back them up in borg repos that are not excluded
exclude /DockerStorage/
exclude /data/forgejo/
exclude /data/forgejo/
exclude /data/forgejo/
'';
};
in
{
imports = [
inputs.snapraid-aio.nixosModules.default
];
# Create required directories
systemd.tmpfiles.rules = [
"d /var/lib/snapraid-aio 0755 root root -"
"d /var/log/snapraid 0755 root root -"
];
environment.systemPackages = [ pkgs.snapraid ];
environment.etc."snapraid.conf".source = snapraid-conf;
# Set up snapraid-aio service
services.snapraid-aio = {
enable = true;
configFile = snapraid-aio-config;
# schedule = "*-*-* 04:00:00"; # Run daily at 3am
};
}

View file

@ -0,0 +1,19 @@
{
services.caddy = {
enable = true;
virtualHosts = {
"drive.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy http://localhost:8181 {
header_up Host {host}
# header_up X-Forwarded-For {remote}
# header_up X-Forwarded-Proto {scheme}
# header_up X-Forwarded-Protocol {scheme}
# header_up X-Forwarded-Port {server_port}
}
'';
};
};
};
}

View file

@ -0,0 +1,4 @@
{ lib, ... }:
{
imports = lib.custom.scanPaths ./.;
}

View file

@ -0,0 +1,38 @@
name: filerun
services:
db:
image: mariadb:10.11
user: 1001:1004
environment:
MYSQL_ROOT_PASSWORD: ${DB_ROOT_PASS
MYSQL_USER: ${DB_USER}
MYSQL_PASSWORD: ${DB_PASS}
MYSQL_DATABASE: ${DB_NAME}
restart: unless-stopped
volumes:
- /pool/filerun/db:/var/lib/mysql
web:
image: filerun/filerun:8.1
user: root
environment:
FR_DB_HOST: db
FR_DB_PORT: ${DB_PORT}
FR_DB_NAME: ${DB_NAME}
FR_DB_USER: ${DB_USER}
FR_DB_PASS: ${DB_PASS}
APACHE_RUN_USER:
APACHE_RUN_USER_ID: 1001
APACHE_RUN_GROUP:
APACHE_RUN_GROUP_ID: 1004
depends_on:
- db
links:
- db:db
ports:
- "8181:80"
restart: unless-stopped
volumes:
- /pool/filerun/html:/var/www/html
- /pool/filerun/user-files:/user-files
- /pool/:/pool

View file

@ -0,0 +1,119 @@
# Auto-generated using compose2nix v0.3.1.
{
config,
lib,
pkgs,
...
}:
let
env = config.secretsSpec.docker.filerun;
in
{
# Runtime
virtualisation.docker = {
enable = true;
autoPrune.enable = true;
};
virtualisation.oci-containers.backend = "docker";
# Containers
virtualisation.oci-containers.containers."filerun-db" = {
image = "mariadb:10.11";
environment = env;
volumes = [
"/pool/filerun/db:/var/lib/mysql:rw"
];
user = "1001:1004";
log-driver = "journald";
extraOptions = [
"--network-alias=db"
"--network=filerun_default"
];
};
systemd.services."docker-filerun-db" = {
serviceConfig = {
Restart = lib.mkOverride 90 "always";
RestartMaxDelaySec = lib.mkOverride 90 "1m";
RestartSec = lib.mkOverride 90 "100ms";
RestartSteps = lib.mkOverride 90 9;
};
after = [
"docker-network-filerun_default.service"
];
requires = [
"docker-network-filerun_default.service"
];
partOf = [
"docker-compose-filerun-root.target"
];
wantedBy = [
"docker-compose-filerun-root.target"
];
};
virtualisation.oci-containers.containers."filerun-web" = {
image = "filerun/filerun:8.1";
environment = env;
volumes = [
"/pool/:/pool:rw"
"/pool/filerun/html:/var/www/html:rw"
"/pool/filerun/user-files:/user-files:rw"
];
ports = [
"8181:80/tcp"
];
dependsOn = [
"filerun-db"
];
user = "root";
log-driver = "journald";
extraOptions = [
"--network-alias=web"
"--network=filerun_default"
];
};
systemd.services."docker-filerun-web" = {
serviceConfig = {
Restart = lib.mkOverride 90 "always";
RestartMaxDelaySec = lib.mkOverride 90 "1m";
RestartSec = lib.mkOverride 90 "100ms";
RestartSteps = lib.mkOverride 90 9;
};
after = [
"docker-network-filerun_default.service"
];
requires = [
"docker-network-filerun_default.service"
];
partOf = [
"docker-compose-filerun-root.target"
];
wantedBy = [
"docker-compose-filerun-root.target"
];
};
# Networks
systemd.services."docker-network-filerun_default" = {
path = [ pkgs.docker ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
ExecStop = "docker network rm -f filerun_default";
};
script = ''
docker network inspect filerun_default || docker network create filerun_default
'';
partOf = [ "docker-compose-filerun-root.target" ];
wantedBy = [ "docker-compose-filerun-root.target" ];
};
# Root service
# When started, this will automatically create all resources and start
# the containers. When stopped, this will teardown all resources.
systemd.targets."docker-compose-filerun-root" = {
unitConfig = {
Description = "Root target generated by compose2nix.";
};
wantedBy = [ "multi-user.target" ];
};
}

View file

@ -0,0 +1,23 @@
{ config, lib, ... }:
{
# Install and configure NFS server
services.nfs.server = {
enable = true;
exports = ''
# Pool export - seen as root '/' by the client
/pool *(rw,insecure,no_subtree_check,no_root_squash,fsid=0,anonuid=1000,anongid=1004)
'';
extraNfsdConfig = "vers=4,4.1,4.2";
};
# Ensure NFS client support is complete
# services.rpcbind.enable = true;
services.nfs.idmapd.settings = {
General = {
Domain = "local";
Verbosity = 0;
};
};
}

View file

@ -1,12 +1,11 @@
############################################################### ###############################################################
# #
# Prozy - LXC Container # Cloud - LXC Container
# NixOS container, Ryzen 5 5600G (3 Cores), 2GB/2GB RAM/SWAP # NixOS container, Ryzen 5 5600G (4th Cores), 4GB/4GB RAM/SWAP
# #
############################################################### ###############################################################
{ {
inputs,
lib, lib,
config, config,
pkgs, pkgs,
@ -19,21 +18,20 @@ let
in in
{ {
imports = lib.flatten [ imports = lib.flatten [
## Cloud Only ##
./config
## Hardware ## ## Hardware ##
./hardware.nix ./hardware.nix
(map lib.custom.relativeToRoot [ (map lib.custom.relativeToRoot [
## Required Configs ## ## Required Configs ##
"hosts/common/core" "hosts/global/core"
## Optional Configs ## ## Optional Configs ##
"hosts/common/optional/acme" "hosts/global/common/acme"
"hosts/common/optional/caddy" "hosts/global/common/docker.nix"
"hosts/common/optional/docker.nix"
"hosts/common/optional/containers/cloudflared.nix"
## Cloud Specific ##
"hosts/users/${username}" # # Not the best solution but I always have one user so ¯\_(ツ)_/¯
]) ])
]; ];
@ -50,21 +48,18 @@ in
networking = { networking = {
enableIPv6 = false; enableIPv6 = false;
firewall.allowedTCPPorts = firewall.allowedTCPPorts; firewall = {
allowedTCPPorts = firewall.allowedTCPPorts;
allowedUDPPorts = firewall.allowedUDPPorts;
};
}; };
## System-wide packages ## ## System-wide packages ##
programs.nix-ld.enable = true; programs.nix-ld.enable = true;
environment.systemPackages = with pkgs; [ environment.systemPackages = with pkgs; [
lazydocker mergerfs
]; ];
environment.etc = {
"cloudflared/.keep" = {
text = "This directory is used to store cloudflared configuration files.";
};
};
# https://wiki.nixos.org/wiki/FAQ/When_do_I_update_stateVersion # https://wiki.nixos.org/wiki/FAQ/When_do_I_update_stateVersion
system.stateVersion = "24.11"; system.stateVersion = "25.05";
} }

View file

@ -1,12 +1,48 @@
{ {
lib, lib,
config,
... ...
}: }:
let
username = config.hostSpec.username;
homeDir = config.hostSpec.home;
in
{ {
imports = lib.flatten [ imports = lib.flatten [
(map lib.custom.relativeToRoot [ (map lib.custom.relativeToRoot [
"hosts/common/optional/system/lxc.nix" "hosts/global/common/system/lxc.nix"
"hosts/common/optional/system/pool.nix"
]) ])
]; ];
# Less permission issues with pool
programs.fuse.userAllowOther = true;
# File system co
# INFO: Cloud is the pool provider
fileSystems = {
"/pool" = {
fsType = "fuse.mergerfs";
device = "/mnt/data*";
options = [
"cache.files=auto-full"
"defaults"
"allow_other"
"minfreespace=50G"
"fsname=mergerfs"
"category.create=mfs"
"nfsopenhack=all"
"nonempty"
"uid=1000"
"gid=1004" # Ryot group
];
};
"${homeDir}/git" = {
fsType = "none";
device = "/pool/git";
options = [
"bind"
"nofail"
];
};
};
} }

View file

@ -1,6 +1,10 @@
# Auto-generated using compose2nix v0.3.1. # Auto-generated using compose2nix v0.3.1.
{ pkgs, lib, ... }: {
pkgs,
lib,
config,
...
}:
let let
# Only available in the Komodo LXC # Only available in the Komodo LXC
DockerStorage = "/mnt/DockerStorage/komodo/stacks/authentik"; DockerStorage = "/mnt/DockerStorage/komodo/stacks/authentik";

View file

@ -0,0 +1,103 @@
{
services.caddy = {
enable = true;
virtualHosts = {
# "ryot.foo" = {
# useACMEHost = "ryot.foo";
# extraConfig = ''
# reverse_proxy 104.40.3.44:80
# '';
# };
"auth.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:9000 {
header_up Host {host}
header_up X-Forwarded-For {remote}
header_up X-Forwarded-Proto {scheme}
header_up X-Forwarded-Protocol {scheme}
header_up X-Forwarded-Port {server_port}
}
'';
};
"frp.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:4041
'';
};
"grafana.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:3001
'';
};
"git.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:3003
'';
};
"influx.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:8086
'';
};
"home.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:7475
'';
};
"komodo.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:9120
'';
};
"mail.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:9002
'';
};
"map.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:25566
'';
};
"outline.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:3480
'';
};
"plane.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:3000
'';
};
"upsnap.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:8090
'';
};
};
};
}

View file

@ -0,0 +1,4 @@
{ lib, ... }:
{
imports = lib.custom.scanPaths ./.;
}

View file

@ -1,8 +1,8 @@
# Auto-generated using compose2nix v0.3.1. # Auto-generated using compose2nix v0.3.1.
{ {
pkgs, config,
lib, lib,
admin, pkgs,
... ...
}: }:
let let

View file

@ -6,7 +6,6 @@
############################################################### ###############################################################
{ {
inputs,
lib, lib,
config, config,
pkgs, pkgs,
@ -19,22 +18,19 @@ let
in in
{ {
imports = lib.flatten [ imports = lib.flatten [
## Komodo Only ##
./config
## Hardware ## ## Hardware ##
./hardware.nix ./hardware.nix
(map lib.custom.relativeToRoot [ (map lib.custom.relativeToRoot [
## Required Configs ## ## Required Configs ##
"hosts/common/core" "hosts/global/core"
## Optional Configs ## ## Optional Configs ##
"hosts/common/optional/acme" "hosts/global/common/acme"
"hosts/common/optional/caddy" "hosts/global/common/docker.nix"
"hosts/common/optional/docker.nix"
"hosts/common/containers/authentik"
"hosts/common/containers/komodo"
## Komodo Specific ##
"hosts/users/${username}" # # Not the best solution but I always have one user so ¯\_(ツ)_/¯
]) ])
]; ];
@ -66,5 +62,5 @@ in
]; ];
# https://wiki.nixos.org/wiki/FAQ/When_do_I_update_stateVersion # https://wiki.nixos.org/wiki/FAQ/When_do_I_update_stateVersion
system.stateVersion = "24.11"; system.stateVersion = "25.05";
} }

View file

@ -5,8 +5,8 @@
{ {
imports = lib.flatten [ imports = lib.flatten [
(map lib.custom.relativeToRoot [ (map lib.custom.relativeToRoot [
"hosts/common/optional/system/lxc.nix" "hosts/global/common/system/lxc.nix"
"hosts/common/optional/system/pool.nix" "hosts/global/common/system/pool.nix"
]) ])
]; ];
} }

View file

@ -25,10 +25,7 @@ in
(map lib.custom.relativeToRoot [ (map lib.custom.relativeToRoot [
## Required Configs ## ## Required Configs ##
"hosts/common/core" "hosts/global/core"
## Proxy Specific ##
"hosts/users/${username}" # # Not the best solution but I always have one user so ¯\_(ツ)_/¯
]) ])
]; ];

View file

@ -5,8 +5,8 @@
{ {
imports = lib.flatten [ imports = lib.flatten [
(map lib.custom.relativeToRoot [ (map lib.custom.relativeToRoot [
"hosts/common/optional/system/lxc.nix" "hosts/global/common/system/lxc.nix"
"hosts/common/optional/system/pool.nix" "hosts/global/common/system/pool.nix"
]) ])
]; ];
} }

View file

@ -25,12 +25,9 @@ in
(map lib.custom.relativeToRoot [ (map lib.custom.relativeToRoot [
## Required Configs ## ## Required Configs ##
"hosts/common/core" "hosts/global/core"
## Optional Configs ## ## Optional Configs ##
## Nix Specific ##
"hosts/users/${username}" # # Not the best solution but I always have one user so ¯\_(ツ)_/¯
]) ])
]; ];

View file

@ -5,8 +5,8 @@
{ {
imports = lib.flatten [ imports = lib.flatten [
(map lib.custom.relativeToRoot [ (map lib.custom.relativeToRoot [
"hosts/common/optional/system/lxc.nix" "hosts/global/common/system/lxc.nix"
"hosts/common/optional/system/pool.nix" "hosts/global/common/system/pool.nix"
]) ])
]; ];
} }

View file

@ -0,0 +1,13 @@
{
services.caddy = {
enable = true;
virtualHosts = {
"cloudflared.ryot.foo" = {
useACMEHost = "ryot.foo";
extraConfig = ''
reverse_proxy localhost:14333
'';
};
};
};
}

View file

@ -0,0 +1,4 @@
{ lib, ... }:
{
imports = lib.custom.scanPaths ./.;
}

View file

@ -1,6 +1,6 @@
############################################################### ###############################################################
# #
# Prozy - LXC Container # Proxy - LXC Container
# NixOS container, Ryzen 5 5600G (3 Cores), 2GB/2GB RAM/SWAP # NixOS container, Ryzen 5 5600G (3 Cores), 2GB/2GB RAM/SWAP
# #
############################################################### ###############################################################
@ -19,21 +19,19 @@ let
in in
{ {
imports = lib.flatten [ imports = lib.flatten [
## Proxy Only ##
./config
## Hardware ## ## Hardware ##
./hardware.nix ./hardware.nix
(map lib.custom.relativeToRoot [ (map lib.custom.relativeToRoot [
## Required Configs ## ## Required Configs ##
"hosts/common/core" "hosts/global/core"
## Optional Configs ## ## Optional Configs ##
"hosts/common/optional/acme" "hosts/global/common/acme"
"hosts/common/optional/caddy" "hosts/global/common/docker.nix"
"hosts/common/optional/docker.nix"
"hosts/common/optional/containers/cloudflared.nix"
## Proxy Specific ##
"hosts/users/${username}" # # Not the best solution but I always have one user so ¯\_(ツ)_/¯
]) ])
]; ];

View file

@ -5,8 +5,8 @@
{ {
imports = lib.flatten [ imports = lib.flatten [
(map lib.custom.relativeToRoot [ (map lib.custom.relativeToRoot [
"hosts/common/optional/system/lxc.nix" "hosts/global/common/system/lxc.nix"
"hosts/common/optional/system/pool.nix" "hosts/global/common/system/pool.nix"
]) ])
]; ];
} }

View file

@ -0,0 +1,4 @@
{ lib, ... }:
{
imports = lib.custom.scanPaths ./.;
}

View file

@ -18,6 +18,8 @@ let
in in
{ {
imports = lib.flatten [ imports = lib.flatten [
## Rune Only ##
# ./config
## Hardware ## ## Hardware ##
./hardware.nix ./hardware.nix
@ -27,26 +29,20 @@ in
(map lib.custom.relativeToRoot [ (map lib.custom.relativeToRoot [
## Required Configs ## ## Required Configs ##
"hosts/common/core" "hosts/global/core"
## Optional Configs ## ## Optional Configs ##
"hosts/common/optional/audio.nix" # pipewire and cli controls "hosts/global/common/audio.nix" # pipewire and cli controls
"hosts/common/optional/adb.nix" # android tools "hosts/global/common/adb.nix" # android tools
"hosts/common/optional/bluetooth.nix" "hosts/global/common/bluetooth.nix"
"hosts/common/optional/ddcutil.nix" # ddcutil for monitor controls "hosts/global/common/ddcutil.nix" # ddcutil for monitor controls
"hosts/common/optional/gaming.nix" # steam, gamescope, gamemode, and related hardware "hosts/global/common/gaming.nix" # steam, gamescope, gamemode, and related hardware
# "hosts/common/optional/gnome.nix" # desktop # "hosts/global/common/gnome.nix" # desktop
"hosts/common/optional/hyprland" # desktop "hosts/global/common/hyprland" # desktop
"hosts/common/optional/libvirt.nix" # vm tools "hosts/global/common/libvirt.nix" # vm tools
"hosts/common/optional/nvtop.nix" # GPU monitor (not available in home-manager) "hosts/global/common/nvtop.nix" # GPU monitor (not available in home-manager)
"hosts/common/optional/plymouth.nix" # fancy boot screen "hosts/global/common/plymouth.nix" # fancy boot screen
"hosts/common/optional/vial.nix" # KB setup "hosts/global/common/vial.nix" # KB setup
# "hosts/common/optional/ventura.nix" # macos vm
## Misc Inputs ##
## Rune Specific ##
"hosts/users/${username}" # # Not the best solution but I always have one user so ¯\_(ツ)_/¯
]) ])
]; ];

View file

@ -14,7 +14,7 @@
imports = lib.flatten [ imports = lib.flatten [
(modulesPath + "/installer/scan/not-detected.nix") (modulesPath + "/installer/scan/not-detected.nix")
(map lib.custom.relativeToRoot [ (map lib.custom.relativeToRoot [
"hosts/common/optional/system/pool.nix" "hosts/global/common/system/pool.nix"
]) ])
]; ];

View file

@ -24,20 +24,17 @@ in
(map lib.custom.relativeToRoot [ (map lib.custom.relativeToRoot [
## Required Configs ## ## Required Configs ##
"hosts/common/core" "hosts/global/core"
## Optional Configs ## ## Optional Configs ##
"hosts/common/optional/audio.nix" # pipewire and cli controls "hosts/global/common/audio.nix" # pipewire and cli controls
# "hosts/common/optional/gaming.nix" # steam, gamescope, gamemode, and related hardware # "hosts/global/common/gaming.nix" # steam, gamescope, gamemode, and related hardware
# "hosts/common/optional/gnome.nix" # desktop # "hosts/global/common/gnome.nix" # desktop
"hosts/common/optional/hyprland" # desktop "hosts/global/common/hyprland" # desktop
# "hosts/common/optional/nvtop.nix" # GPU monitor (not available in home-manager) # "hosts/global/common/nvtop.nix" # GPU monitor (not available in home-manager)
# "hosts/common/optional/plymouth.nix" # fancy boot screen # "hosts/global/common/plymouth.nix" # fancy boot screen
## Misc Inputs ## ## Misc Inputs ##
## VM Specific ##
"hosts/users/${username}" # Not the best solution but I always have just one user so ¯\_(ツ)_/¯
]) ])
]; ];

View file

@ -12,7 +12,7 @@ in
imports = lib.flatten [ imports = lib.flatten [
(modulesPath + "/profiles/qemu-guest.nix") (modulesPath + "/profiles/qemu-guest.nix")
(map lib.custom.relativeToRoot [ (map lib.custom.relativeToRoot [
"hosts/common/optional/system/pool.nix" "hosts/global/common/system/pool.nix"
]) ])
]; ];

7
hosts/readme.md Normal file
View file

@ -0,0 +1,7 @@
# TODO:
- Fix up how DEs are configured, its not modular at all rn, i need to be able to select the DE from hostSpec and it should be able to change config per user
- decouple /pool from places its not needed, or should be optional
- some users should not have access to pool or just cant access it cuz not local
- ssh keys are not setup per user
- should probably fix

View file

@ -1,64 +0,0 @@
# User config applicable only to nixos
{
inputs,
config,
lib,
pkgs,
...
}:
let
hostSpec = config.hostSpec;
ifTheyExist = groups: builtins.filter (group: builtins.hasAttr group config.users.groups) groups;
in
{
users.groups = {
ryot = {
gid = 1004;
members = [ "${hostSpec.username}" ];
};
};
users.mutableUsers = false; # Only allow declarative credentials; Required for password to be set via sops during system activation!
users.users.${hostSpec.username} = {
home = "${hostSpec.home}";
isNormalUser = true;
createHome = true;
description = "Admin";
homeMode = "750";
hashedPassword = hostSpec.hashedPassword;
uid = 1000;
group = "ryot";
extraGroups = lib.flatten [
"wheel"
# Some of these groups are defined elsewhere in the system
# But honestly not sure what runs first so just add them here i guess
(ifTheyExist [
"adbusers"
"audio"
"docker"
"gamemode"
"git"
"libvirtd"
"networkmanager"
"video"
])
];
};
# No matter what environment we are in we want these tools for root, and the user(s)
programs.git.enable = true;
# root's ssh key are mainly used for remote deployment, borg, and some other specific ops
users.users.root = {
shell = pkgs.bash;
hashedPassword = lib.mkForce hostSpec.hashedPassword;
openssh.authorizedKeys.keys = config.users.users.${hostSpec.username}.openssh.authorizedKeys.keys; # root's ssh keys are mainly used for remote deployment.
};
}
// lib.optionalAttrs (inputs ? "home-manager") {
# Setup root home?
home-manager.users.root = lib.optionalAttrs (!hostSpec.isMinimal) {
home.stateVersion = "24.05"; # Avoid error
};
}

View file

@ -1,23 +0,0 @@
{ config, ... }:
let
hostSpec = config.hostSpec;
in
{
users.groups = {
ryot = {
gid = 1004;
members = [ "${hostSpec.username}" ];
};
};
# Set a temp password for use by minimal builds like installer and iso
users.users.${hostSpec.username} = {
isNormalUser = true;
hashedPassword = hostSpec.hashedPassword;
group = "ryot";
extraGroups = [
"wheel"
];
};
}

View file

@ -1,61 +0,0 @@
{
inputs,
pkgs,
config,
lib,
...
}:
let
hostSpec = config.hostSpec;
username = hostSpec.username;
homeDir = hostSpec.home;
_shell = hostSpec.shell;
pubKeys = builtins.attrValues config.secretsSpec.ssh.publicKeys;
in
{
users.users.${username} = {
name = hostSpec.username;
shell = _shell;
# These get placed into /etc/ssh/authorized_keys.d/<name> on nixos
openssh.authorizedKeys.keys = pubKeys;
};
# Create ssh sockets directory for controlpaths when homemanager not loaded (i.e. isMinimal)
systemd.tmpfiles.rules =
let
user = config.users.users.${username}.name;
group = config.users.users.${username}.group;
in
[
"d ${homeDir}/.ssh 0750 ${user} ${group} -"
];
# No matter what environment we are in we want these tools
programs.fish.enable = true;
}
# Import the user's personal/home configurations, unless the environment is minimal
// lib.optionalAttrs (inputs ? "home-manager") {
home-manager = {
extraSpecialArgs = {
inherit pkgs inputs;
inherit (config) secretsSpec hostSpec;
};
users.${username}.imports = lib.flatten (
lib.optional (!hostSpec.isMinimal) [
(
{ config, ... }:
import (lib.custom.relativeToRoot "home/${username}/${hostSpec.hostName}") {
inherit
pkgs
inputs
config
lib
hostSpec
;
}
)
]
);
};
}

View file

@ -18,4 +18,20 @@
) (builtins.readDir path) ) (builtins.readDir path)
) )
); );
# Generate an Apprise URL for sending notifications
# Can be called with smtp config and recipient:
# mkAppriseUrl smtpConfig recipient
# Or with individual parameters:
# mkAppriseUrl { user = "user"; password = "pass"; host = "smtp.example.com"; from = "sender@example.com"; } "recipient@example.com"
mkAppriseUrl =
smtp: recipient:
let
smtpUser = if builtins.isAttrs smtp then smtp.user else smtp;
smtpPass = if builtins.isAttrs smtp then smtp.password else recipient;
smtpHost = if builtins.isAttrs smtp then smtp.host else "";
smtpFrom = if builtins.isAttrs smtp then smtp.from else "";
to = if builtins.isAttrs smtp then recipient else smtp.user;
in
"mailtos://_?user=${smtpUser}&pass=${smtpPass}&smtp=${smtpHost}&from=${smtpFrom}&to=${to}";
} }

Some files were not shown because too many files have changed in this diff Show more