nix fmt
This commit is contained in:
parent
3da42ead11
commit
9ea80639a3
216
data.nix
216
data.nix
@ -65,32 +65,45 @@ rec {
|
||||
jakstIP = "100.89.176.4";
|
||||
};
|
||||
"vno3-rp3b.servers.jakst" = rec {
|
||||
extraHostNames = [jakstIP];
|
||||
extraHostNames = [ jakstIP ];
|
||||
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBudUFFEBpUVdr26vLJup8Hk6wj1iDbOPPQnJbv6GUGC";
|
||||
jakstIP = "100.89.176.2";
|
||||
};
|
||||
"fra1-a.servers.jakst" = rec {
|
||||
extraHostNames = ["fra1-a.jakstys.lt" publicIP jakstIP];
|
||||
extraHostNames = [
|
||||
"fra1-a.jakstys.lt"
|
||||
publicIP
|
||||
jakstIP
|
||||
];
|
||||
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFj9Ktw9SZQlHe/Pl5MI7PRUcCyTgZgZ0SsvWUmO0wBM";
|
||||
initrdPubKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGtYwVhfmdHRK8YcaRQ3JGSIOK55lEMNSPh33Z0iI+pO";
|
||||
publicIP = "168.119.184.134";
|
||||
jakstIP = "100.89.176.5";
|
||||
system = "aarch64-linux";
|
||||
supportedFeatures = ["nixos-test" "benchmark" "big-parallel" "kvm" "gccarch-armv8-a"];
|
||||
supportedFeatures = [
|
||||
"nixos-test"
|
||||
"benchmark"
|
||||
"big-parallel"
|
||||
"kvm"
|
||||
"gccarch-armv8-a"
|
||||
];
|
||||
};
|
||||
"fwminex.motiejus.jakst" = rec {
|
||||
extraHostNames = [jakstIP vno1IP];
|
||||
extraHostNames = [
|
||||
jakstIP
|
||||
vno1IP
|
||||
];
|
||||
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHlWSZ/H6DR5i5aCrlrEQLVF9MXNvls/pjlLPLaav3f+";
|
||||
jakstIP = "100.89.176.6";
|
||||
vno1IP = "192.168.189.10";
|
||||
};
|
||||
"mtworx.motiejus.jakst" = rec {
|
||||
extraHostNames = [jakstIP];
|
||||
extraHostNames = [ jakstIP ];
|
||||
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDRrsOkKkpJ9ZJYhEdxjwrmdVYoPcGDGtcGfBkkpVF6l";
|
||||
jakstIP = "100.89.176.20";
|
||||
};
|
||||
"vno1-vinc.vincentas.jakst" = rec {
|
||||
extraHostNames = [jakstIP];
|
||||
extraHostNames = [ jakstIP ];
|
||||
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJIwK7et5NBM+vaffiwpKLSAJwKfwMhCZwl1JyXo79uL";
|
||||
jakstIP = "100.89.176.7";
|
||||
};
|
||||
@ -118,15 +131,14 @@ rec {
|
||||
range = "100.89.176.0-100.89.191.255";
|
||||
sshPattern = "100.89.176.*"; # until we have more hosts
|
||||
};
|
||||
motiejus.cidrs = let
|
||||
mHosts =
|
||||
attrVals [
|
||||
motiejus.cidrs =
|
||||
let
|
||||
mHosts = attrVals [
|
||||
"mxp10.motiejus.jakst"
|
||||
"mtworx.motiejus.jakst"
|
||||
"fwminex.motiejus.jakst"
|
||||
]
|
||||
hosts;
|
||||
in
|
||||
] hosts;
|
||||
in
|
||||
builtins.catAttrs "jakstIP" mHosts;
|
||||
|
||||
vno1 = {
|
||||
@ -136,99 +148,103 @@ rec {
|
||||
vno3.cidr = "192.168.100.0/24";
|
||||
};
|
||||
|
||||
e11syncZone = let
|
||||
vno1 = hosts."vno1-oh2.servers.jakst".publicIP;
|
||||
fra1a = hosts."fra1-a.servers.jakst".publicIP;
|
||||
in ''
|
||||
$ORIGIN 11sync.net.
|
||||
$TTL 3600
|
||||
@ SOA ns1.11sync.net. motiejus.11sync.net. (2024011500 86400 86400 86400 86400)
|
||||
@ NS ns1.11sync.net.
|
||||
@ NS ns2.11sync.net.
|
||||
@ A ${vno1}
|
||||
@ TXT google-site-verification=nvUYd7_ShhPKvTn_Xbw-vPFONOhPeaYQsGp34DbV-80
|
||||
@ TXT "hosted-email-verify=qeuysotu"
|
||||
@ MX 10 aspmx1.migadu.com.
|
||||
@ MX 20 aspmx2.migadu.com.
|
||||
@ TXT "v=spf1 include:spf.migadu.com -all"
|
||||
ns1 A ${vno1}
|
||||
ns2 A ${fra1a}
|
||||
www A ${vno1}
|
||||
admin A ${hosts."fra1-a.servers.jakst".jakstIP}
|
||||
key1._domainkey CNAME key1.11sync.net._domainkey.migadu.com.
|
||||
key2._domainkey CNAME key2.11sync.net._domainkey.migadu.com.
|
||||
key3._domainkey CNAME key3.11sync.net._domainkey.migadu.com.
|
||||
_dmarc TXT "v=DMARC1; p=quarantine;"
|
||||
autoconfig CNAME autoconfig.migadu.com.
|
||||
_autodiscover._tcp SRV 0 1 443 autodiscover.migadu.com.
|
||||
_submissions._tcp SRV 0 1 465 smtp.migadu.com.
|
||||
_imaps._tcp SRV 0 1 993 imap.migadu.com.
|
||||
_pop3s._tcp SRV 0 1 995 pop.migadu.com.
|
||||
_github-challenge-11sync-org TXT "ff5e813c58"
|
||||
'';
|
||||
e11syncZone =
|
||||
let
|
||||
vno1 = hosts."vno1-oh2.servers.jakst".publicIP;
|
||||
fra1a = hosts."fra1-a.servers.jakst".publicIP;
|
||||
in
|
||||
''
|
||||
$ORIGIN 11sync.net.
|
||||
$TTL 3600
|
||||
@ SOA ns1.11sync.net. motiejus.11sync.net. (2024011500 86400 86400 86400 86400)
|
||||
@ NS ns1.11sync.net.
|
||||
@ NS ns2.11sync.net.
|
||||
@ A ${vno1}
|
||||
@ TXT google-site-verification=nvUYd7_ShhPKvTn_Xbw-vPFONOhPeaYQsGp34DbV-80
|
||||
@ TXT "hosted-email-verify=qeuysotu"
|
||||
@ MX 10 aspmx1.migadu.com.
|
||||
@ MX 20 aspmx2.migadu.com.
|
||||
@ TXT "v=spf1 include:spf.migadu.com -all"
|
||||
ns1 A ${vno1}
|
||||
ns2 A ${fra1a}
|
||||
www A ${vno1}
|
||||
admin A ${hosts."fra1-a.servers.jakst".jakstIP}
|
||||
key1._domainkey CNAME key1.11sync.net._domainkey.migadu.com.
|
||||
key2._domainkey CNAME key2.11sync.net._domainkey.migadu.com.
|
||||
key3._domainkey CNAME key3.11sync.net._domainkey.migadu.com.
|
||||
_dmarc TXT "v=DMARC1; p=quarantine;"
|
||||
autoconfig CNAME autoconfig.migadu.com.
|
||||
_autodiscover._tcp SRV 0 1 443 autodiscover.migadu.com.
|
||||
_submissions._tcp SRV 0 1 465 smtp.migadu.com.
|
||||
_imaps._tcp SRV 0 1 993 imap.migadu.com.
|
||||
_pop3s._tcp SRV 0 1 995 pop.migadu.com.
|
||||
_github-challenge-11sync-org TXT "ff5e813c58"
|
||||
'';
|
||||
|
||||
jakstysLTZone = let
|
||||
fra1a = hosts."fra1-a.servers.jakst".publicIP;
|
||||
vno1 = hosts."vno1-oh2.servers.jakst".publicIP;
|
||||
in ''
|
||||
$ORIGIN jakstys.lt.
|
||||
$TTL 86400
|
||||
@ SOA ns1.jakstys.lt. motiejus.jakstys.lt. (2023100800 86400 86400 86400 86400)
|
||||
@ NS ns1.jakstys.lt.
|
||||
@ NS ns2.jakstys.lt.
|
||||
@ A ${vno1}
|
||||
www A ${vno1}
|
||||
ns1 A ${vno1}
|
||||
ns2 A ${fra1a}
|
||||
vpn A ${vno1}
|
||||
git A ${vno1}
|
||||
auth A ${vno1}
|
||||
dl A ${vno1}
|
||||
fra1-a A ${fra1a}
|
||||
vno1 A ${vno1}
|
||||
jakstysLTZone =
|
||||
let
|
||||
fra1a = hosts."fra1-a.servers.jakst".publicIP;
|
||||
vno1 = hosts."vno1-oh2.servers.jakst".publicIP;
|
||||
in
|
||||
''
|
||||
$ORIGIN jakstys.lt.
|
||||
$TTL 86400
|
||||
@ SOA ns1.jakstys.lt. motiejus.jakstys.lt. (2023100800 86400 86400 86400 86400)
|
||||
@ NS ns1.jakstys.lt.
|
||||
@ NS ns2.jakstys.lt.
|
||||
@ A ${vno1}
|
||||
www A ${vno1}
|
||||
ns1 A ${vno1}
|
||||
ns2 A ${fra1a}
|
||||
vpn A ${vno1}
|
||||
git A ${vno1}
|
||||
auth A ${vno1}
|
||||
dl A ${vno1}
|
||||
fra1-a A ${fra1a}
|
||||
vno1 A ${vno1}
|
||||
|
||||
@ TXT google-site-verification=sU99fmO8gEJF-0lbOY-IzkovC6MXsP3Gozqrs8BR5OM
|
||||
@ TXT hosted-email-verify=rvyd6h64
|
||||
@ MX 10 aspmx1.migadu.com.
|
||||
@ MX 20 aspmx2.migadu.com.
|
||||
* MX 10 aspmx1.migadu.com.
|
||||
* MX 20 aspmx2.migadu.com.
|
||||
key1._domainkey CNAME key1.jakstys.lt._domainkey.migadu.com.
|
||||
key2._domainkey CNAME key2.jakstys.lt._domainkey.migadu.com.
|
||||
key3._domainkey CNAME key3.jakstys.lt._domainkey.migadu.com.
|
||||
@ TXT "v=spf1 include:spf.migadu.com -all"
|
||||
_dmarc TXT "v=DMARC1; p=quarantine;"
|
||||
* MX 10 aspmx1.migadu.com.
|
||||
* MX 20 aspmx2.migadu.com.
|
||||
autoconfig CNAME autoconfig.migadu.com.
|
||||
_autodiscover._tcp SRV 0 1 443 autodiscover.migadu.com.
|
||||
_submissions._tcp SRV 0 1 465 smtp.migadu.com.
|
||||
_imaps._tcp SRV 0 1 993 imap.migadu.com.
|
||||
_pop3s._tcp SRV 0 1 995 imap.migadu.com.
|
||||
@ TXT google-site-verification=sU99fmO8gEJF-0lbOY-IzkovC6MXsP3Gozqrs8BR5OM
|
||||
@ TXT hosted-email-verify=rvyd6h64
|
||||
@ MX 10 aspmx1.migadu.com.
|
||||
@ MX 20 aspmx2.migadu.com.
|
||||
* MX 10 aspmx1.migadu.com.
|
||||
* MX 20 aspmx2.migadu.com.
|
||||
key1._domainkey CNAME key1.jakstys.lt._domainkey.migadu.com.
|
||||
key2._domainkey CNAME key2.jakstys.lt._domainkey.migadu.com.
|
||||
key3._domainkey CNAME key3.jakstys.lt._domainkey.migadu.com.
|
||||
@ TXT "v=spf1 include:spf.migadu.com -all"
|
||||
_dmarc TXT "v=DMARC1; p=quarantine;"
|
||||
* MX 10 aspmx1.migadu.com.
|
||||
* MX 20 aspmx2.migadu.com.
|
||||
autoconfig CNAME autoconfig.migadu.com.
|
||||
_autodiscover._tcp SRV 0 1 443 autodiscover.migadu.com.
|
||||
_submissions._tcp SRV 0 1 465 smtp.migadu.com.
|
||||
_imaps._tcp SRV 0 1 993 imap.migadu.com.
|
||||
_pop3s._tcp SRV 0 1 995 imap.migadu.com.
|
||||
|
||||
grafana A ${hosts."vno1-oh2.servers.jakst".jakstIP}
|
||||
_acme-challenge.grafana CNAME _acme-endpoint.grafana
|
||||
_acme-endpoint.grafana NS ns._acme-endpoint.grafana
|
||||
ns._acme-endpoint.grafana A ${vno1}
|
||||
grafana A ${hosts."vno1-oh2.servers.jakst".jakstIP}
|
||||
_acme-challenge.grafana CNAME _acme-endpoint.grafana
|
||||
_acme-endpoint.grafana NS ns._acme-endpoint.grafana
|
||||
ns._acme-endpoint.grafana A ${vno1}
|
||||
|
||||
irc A ${hosts."vno1-oh2.servers.jakst".jakstIP}
|
||||
_acme-challenge.irc CNAME _acme-endpoint.irc
|
||||
_acme-endpoint.irc NS ns._acme-endpoint.irc
|
||||
ns._acme-endpoint.irc A ${vno1}
|
||||
irc A ${hosts."vno1-oh2.servers.jakst".jakstIP}
|
||||
_acme-challenge.irc CNAME _acme-endpoint.irc
|
||||
_acme-endpoint.irc NS ns._acme-endpoint.irc
|
||||
ns._acme-endpoint.irc A ${vno1}
|
||||
|
||||
hass A ${hosts."vno1-oh2.servers.jakst".jakstIP}
|
||||
_acme-challenge.hass CNAME _acme-endpoint.hass
|
||||
_acme-endpoint.hass NS ns._acme-endpoint.hass
|
||||
ns._acme-endpoint.hass A ${vno1}
|
||||
hass A ${hosts."vno1-oh2.servers.jakst".jakstIP}
|
||||
_acme-challenge.hass CNAME _acme-endpoint.hass
|
||||
_acme-endpoint.hass NS ns._acme-endpoint.hass
|
||||
ns._acme-endpoint.hass A ${vno1}
|
||||
|
||||
bitwarden A ${hosts."vno1-oh2.servers.jakst".jakstIP}
|
||||
_acme-challenge.bitwarden CNAME _acme-endpoint.bitwarden
|
||||
_acme-endpoint.bitwarden NS ns._acme-endpoint.bitwarden
|
||||
ns._acme-endpoint.bitwarden A ${vno1}
|
||||
bitwarden A ${hosts."vno1-oh2.servers.jakst".jakstIP}
|
||||
_acme-challenge.bitwarden CNAME _acme-endpoint.bitwarden
|
||||
_acme-endpoint.bitwarden NS ns._acme-endpoint.bitwarden
|
||||
ns._acme-endpoint.bitwarden A ${vno1}
|
||||
|
||||
hdd A ${hosts."vno3-rp3b.servers.jakst".jakstIP}
|
||||
_acme-challenge.hdd CNAME _acme-endpoint.hdd
|
||||
_acme-endpoint.hdd NS ns._acme-endpoint.hdd
|
||||
ns._acme-endpoint.hdd A ${vno1}
|
||||
'';
|
||||
hdd A ${hosts."vno3-rp3b.servers.jakst".jakstIP}
|
||||
_acme-challenge.hdd CNAME _acme-endpoint.hdd
|
||||
_acme-endpoint.hdd NS ns._acme-endpoint.hdd
|
||||
ns._acme-endpoint.hdd A ${vno1}
|
||||
'';
|
||||
}
|
||||
|
235
flake.nix
235
flake.nix
@ -61,65 +61,68 @@
|
||||
extra-experimental-features = "nix-command flakes";
|
||||
};
|
||||
|
||||
outputs = {
|
||||
self,
|
||||
nixpkgs,
|
||||
nixpkgs-unstable,
|
||||
agenix,
|
||||
deploy-rs,
|
||||
flake-utils,
|
||||
home-manager,
|
||||
nixos-hardware,
|
||||
nix-index-database,
|
||||
pre-commit-hooks,
|
||||
nur,
|
||||
nixgl,
|
||||
...
|
||||
} @ inputs: let
|
||||
myData = import ./data.nix;
|
||||
outputs =
|
||||
{
|
||||
self,
|
||||
nixpkgs,
|
||||
nixpkgs-unstable,
|
||||
agenix,
|
||||
deploy-rs,
|
||||
flake-utils,
|
||||
home-manager,
|
||||
nixos-hardware,
|
||||
nix-index-database,
|
||||
pre-commit-hooks,
|
||||
nur,
|
||||
nixgl,
|
||||
...
|
||||
}@inputs:
|
||||
let
|
||||
myData = import ./data.nix;
|
||||
|
||||
overlays = [
|
||||
nur.overlay
|
||||
nixgl.overlay
|
||||
overlays = [
|
||||
nur.overlay
|
||||
nixgl.overlay
|
||||
|
||||
(_self: super: {deploy-rs-pkg = super.deploy-rs;})
|
||||
deploy-rs.overlay
|
||||
(_self: super: {
|
||||
deploy-rs = {
|
||||
deploy-rs = super.deploy-rs-pkg;
|
||||
inherit (super.deploy-rs) lib;
|
||||
(_self: super: { deploy-rs-pkg = super.deploy-rs; })
|
||||
deploy-rs.overlay
|
||||
(_self: super: {
|
||||
deploy-rs = {
|
||||
deploy-rs = super.deploy-rs-pkg;
|
||||
inherit (super.deploy-rs) lib;
|
||||
};
|
||||
deploy-rs-pkg = null;
|
||||
})
|
||||
(_: super: {
|
||||
compressDrv = super.callPackage ./pkgs/compress-drv { };
|
||||
compressDrvWeb = super.callPackage ./pkgs/compress-drv/web.nix { };
|
||||
|
||||
tmuxbash = super.callPackage ./pkgs/tmuxbash.nix { };
|
||||
btrfs-auto-snapshot = super.callPackage ./pkgs/btrfs-auto-snapshot.nix { };
|
||||
nicer = super.callPackage ./pkgs/nicer.nix { };
|
||||
|
||||
pkgs-unstable = import nixpkgs-unstable { inherit (super) system; };
|
||||
})
|
||||
];
|
||||
|
||||
mkVM =
|
||||
system:
|
||||
nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
modules = [
|
||||
{ nixpkgs.overlays = overlays; }
|
||||
./hosts/vm/configuration.nix
|
||||
|
||||
./modules
|
||||
./modules/profiles/desktop
|
||||
|
||||
home-manager.nixosModules.home-manager
|
||||
];
|
||||
specialArgs = {
|
||||
inherit myData;
|
||||
} // inputs;
|
||||
};
|
||||
deploy-rs-pkg = null;
|
||||
})
|
||||
(_: super: {
|
||||
compressDrv = super.callPackage ./pkgs/compress-drv {};
|
||||
compressDrvWeb = super.callPackage ./pkgs/compress-drv/web.nix {};
|
||||
|
||||
tmuxbash = super.callPackage ./pkgs/tmuxbash.nix {};
|
||||
btrfs-auto-snapshot = super.callPackage ./pkgs/btrfs-auto-snapshot.nix {};
|
||||
nicer = super.callPackage ./pkgs/nicer.nix {};
|
||||
|
||||
pkgs-unstable = import nixpkgs-unstable {
|
||||
inherit (super) system;
|
||||
};
|
||||
})
|
||||
];
|
||||
|
||||
mkVM = system:
|
||||
nixpkgs.lib.nixosSystem {
|
||||
inherit system;
|
||||
modules = [
|
||||
{nixpkgs.overlays = overlays;}
|
||||
./hosts/vm/configuration.nix
|
||||
|
||||
./modules
|
||||
./modules/profiles/desktop
|
||||
|
||||
home-manager.nixosModules.home-manager
|
||||
];
|
||||
specialArgs = {inherit myData;} // inputs;
|
||||
};
|
||||
in
|
||||
in
|
||||
{
|
||||
nixosConfigurations = {
|
||||
vm-x86_64 = mkVM "x86_64-linux";
|
||||
@ -128,7 +131,7 @@
|
||||
mtworx = nixpkgs.lib.nixosSystem {
|
||||
system = "x86_64-linux";
|
||||
modules = [
|
||||
{nixpkgs.overlays = overlays;}
|
||||
{ nixpkgs.overlays = overlays; }
|
||||
./hosts/mtworx/configuration.nix
|
||||
home-manager.nixosModules.home-manager
|
||||
nixos-hardware.nixosModules.lenovo-thinkpad-x1-11th-gen
|
||||
@ -147,13 +150,15 @@
|
||||
}
|
||||
];
|
||||
|
||||
specialArgs = {inherit myData;} // inputs;
|
||||
specialArgs = {
|
||||
inherit myData;
|
||||
} // inputs;
|
||||
};
|
||||
|
||||
vno1-oh2 = nixpkgs.lib.nixosSystem {
|
||||
system = "x86_64-linux";
|
||||
modules = [
|
||||
{nixpkgs.overlays = overlays;}
|
||||
{ nixpkgs.overlays = overlays; }
|
||||
./hosts/vno1-oh2/configuration.nix
|
||||
./modules
|
||||
|
||||
@ -184,13 +189,15 @@
|
||||
}
|
||||
];
|
||||
|
||||
specialArgs = {inherit myData;} // inputs;
|
||||
specialArgs = {
|
||||
inherit myData;
|
||||
} // inputs;
|
||||
};
|
||||
|
||||
fwminex = nixpkgs.lib.nixosSystem {
|
||||
system = "x86_64-linux";
|
||||
modules = [
|
||||
{nixpkgs.overlays = overlays;}
|
||||
{ nixpkgs.overlays = overlays; }
|
||||
./hosts/fwminex/configuration.nix
|
||||
home-manager.nixosModules.home-manager
|
||||
nixos-hardware.nixosModules.framework-12th-gen-intel
|
||||
@ -207,13 +214,15 @@
|
||||
}
|
||||
];
|
||||
|
||||
specialArgs = {inherit myData;} // inputs;
|
||||
specialArgs = {
|
||||
inherit myData;
|
||||
} // inputs;
|
||||
};
|
||||
|
||||
vno3-rp3b = nixpkgs.lib.nixosSystem {
|
||||
system = "aarch64-linux";
|
||||
modules = [
|
||||
{nixpkgs.overlays = overlays;}
|
||||
{ nixpkgs.overlays = overlays; }
|
||||
./hosts/vno3-rp3b/configuration.nix
|
||||
|
||||
./modules
|
||||
@ -232,13 +241,15 @@
|
||||
}
|
||||
];
|
||||
|
||||
specialArgs = {inherit myData;} // inputs;
|
||||
specialArgs = {
|
||||
inherit myData;
|
||||
} // inputs;
|
||||
};
|
||||
|
||||
fra1-a = nixpkgs.lib.nixosSystem {
|
||||
system = "aarch64-linux";
|
||||
modules = [
|
||||
{nixpkgs.overlays = overlays;}
|
||||
{ nixpkgs.overlays = overlays; }
|
||||
agenix.nixosModules.default
|
||||
home-manager.nixosModules.home-manager
|
||||
|
||||
@ -256,7 +267,9 @@
|
||||
}
|
||||
];
|
||||
|
||||
specialArgs = {inherit myData;} // inputs;
|
||||
specialArgs = {
|
||||
inherit myData;
|
||||
} // inputs;
|
||||
};
|
||||
};
|
||||
|
||||
@ -266,8 +279,7 @@
|
||||
profiles = {
|
||||
system = {
|
||||
sshUser = "motiejus";
|
||||
path =
|
||||
self.nixosConfigurations.vno1-oh2.pkgs.deploy-rs.lib.activate.nixos self.nixosConfigurations.vno1-oh2;
|
||||
path = self.nixosConfigurations.vno1-oh2.pkgs.deploy-rs.lib.activate.nixos self.nixosConfigurations.vno1-oh2;
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
@ -278,8 +290,7 @@
|
||||
profiles = {
|
||||
system = {
|
||||
sshUser = "motiejus";
|
||||
path =
|
||||
self.nixosConfigurations.fwminex.pkgs.deploy-rs.lib.activate.nixos self.nixosConfigurations.fwminex;
|
||||
path = self.nixosConfigurations.fwminex.pkgs.deploy-rs.lib.activate.nixos self.nixosConfigurations.fwminex;
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
@ -290,8 +301,7 @@
|
||||
profiles = {
|
||||
system = {
|
||||
sshUser = "motiejus";
|
||||
path =
|
||||
self.nixosConfigurations.mtworx.pkgs.deploy-rs.lib.activate.nixos self.nixosConfigurations.mtworx;
|
||||
path = self.nixosConfigurations.mtworx.pkgs.deploy-rs.lib.activate.nixos self.nixosConfigurations.mtworx;
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
@ -302,8 +312,7 @@
|
||||
profiles = {
|
||||
system = {
|
||||
sshUser = "motiejus";
|
||||
path =
|
||||
self.nixosConfigurations.vno3-rp3b.pkgs.deploy-rs.lib.activate.nixos self.nixosConfigurations.vno3-rp3b;
|
||||
path = self.nixosConfigurations.vno3-rp3b.pkgs.deploy-rs.lib.activate.nixos self.nixosConfigurations.vno3-rp3b;
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
@ -314,51 +323,55 @@
|
||||
profiles = {
|
||||
system = {
|
||||
sshUser = "motiejus";
|
||||
path =
|
||||
self.nixosConfigurations.fra1-a.pkgs.deploy-rs.lib.activate.nixos self.nixosConfigurations.fra1-a;
|
||||
path = self.nixosConfigurations.fra1-a.pkgs.deploy-rs.lib.activate.nixos self.nixosConfigurations.fra1-a;
|
||||
user = "root";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
checks =
|
||||
builtins.mapAttrs (
|
||||
system: deployLib:
|
||||
deployLib.deployChecks self.deploy
|
||||
// {
|
||||
pre-commit-check = inputs.pre-commit-hooks.lib.${system}.run {
|
||||
src = ./.;
|
||||
hooks = {
|
||||
alejandra.enable = true;
|
||||
deadnix.enable = true;
|
||||
statix.enable = true;
|
||||
};
|
||||
checks = builtins.mapAttrs (
|
||||
system: deployLib:
|
||||
let
|
||||
pkgs = import nixpkgs { inherit system overlays; };
|
||||
in
|
||||
deployLib.deployChecks self.deploy
|
||||
// {
|
||||
pre-commit-check = inputs.pre-commit-hooks.lib.${system}.run {
|
||||
src = ./.;
|
||||
hooks = {
|
||||
nixfmt = {
|
||||
enable = true;
|
||||
package = pkgs.nixfmt-rfc-style;
|
||||
};
|
||||
deadnix.enable = true;
|
||||
statix.enable = true;
|
||||
};
|
||||
};
|
||||
|
||||
compress-drv-test = let
|
||||
pkgs = import nixpkgs {inherit system overlays;};
|
||||
in
|
||||
pkgs.callPackage ./pkgs/compress-drv/test.nix {};
|
||||
}
|
||||
)
|
||||
deploy-rs.lib;
|
||||
compress-drv-test = pkgs.callPackage ./pkgs/compress-drv/test.nix { };
|
||||
}
|
||||
) deploy-rs.lib;
|
||||
}
|
||||
// flake-utils.lib.eachDefaultSystem (system: let
|
||||
pkgs = import nixpkgs {inherit system overlays;};
|
||||
in {
|
||||
devShells.default = pkgs.mkShellNoCC {
|
||||
GIT_AUTHOR_EMAIL = "motiejus@jakstys.lt";
|
||||
packages = [
|
||||
pkgs.nix-output-monitor
|
||||
pkgs.rage
|
||||
pkgs.age-plugin-yubikey
|
||||
pkgs.deploy-rs.deploy-rs
|
||||
agenix.packages.${system}.agenix
|
||||
];
|
||||
inherit (self.checks.${system}.pre-commit-check) shellHook;
|
||||
};
|
||||
// flake-utils.lib.eachDefaultSystem (
|
||||
system:
|
||||
let
|
||||
pkgs = import nixpkgs { inherit system overlays; };
|
||||
in
|
||||
{
|
||||
devShells.default = pkgs.mkShellNoCC {
|
||||
GIT_AUTHOR_EMAIL = "motiejus@jakstys.lt";
|
||||
packages = [
|
||||
pkgs.nix-output-monitor
|
||||
pkgs.rage
|
||||
pkgs.age-plugin-yubikey
|
||||
pkgs.deploy-rs.deploy-rs
|
||||
agenix.packages.${system}.agenix
|
||||
];
|
||||
inherit (self.checks.${system}.pre-commit-check) shellHook;
|
||||
};
|
||||
|
||||
formatter = pkgs.alejandra;
|
||||
});
|
||||
formatter = pkgs.nixfmt-rfc-style;
|
||||
}
|
||||
);
|
||||
}
|
||||
|
@ -4,25 +4,31 @@
|
||||
myData,
|
||||
modulesPath,
|
||||
...
|
||||
}: {
|
||||
imports = [(modulesPath + "/profiles/qemu-guest.nix")];
|
||||
}:
|
||||
{
|
||||
imports = [ (modulesPath + "/profiles/qemu-guest.nix") ];
|
||||
|
||||
zfs-root = {
|
||||
boot = {
|
||||
enable = true;
|
||||
devNodes = "/dev/disk/by-id/";
|
||||
bootDevices = ["scsi-0QEMU_QEMU_HARDDISK_36151096"];
|
||||
bootDevices = [ "scsi-0QEMU_QEMU_HARDDISK_36151096" ];
|
||||
immutable = false;
|
||||
availableKernelModules = ["xhci_pci" "virtio_pci" "virtio_scsi" "usbhid" "sr_mod" "virtio_gpu"];
|
||||
availableKernelModules = [
|
||||
"xhci_pci"
|
||||
"virtio_pci"
|
||||
"virtio_scsi"
|
||||
"usbhid"
|
||||
"sr_mod"
|
||||
"virtio_gpu"
|
||||
];
|
||||
removableEfi = true;
|
||||
kernelParams = ["console=tty"];
|
||||
kernelParams = [ "console=tty" ];
|
||||
sshUnlock = {
|
||||
enable = true;
|
||||
authorizedKeys =
|
||||
(builtins.attrValues myData.people_pubkeys)
|
||||
++ [
|
||||
myData.hosts."vno1-oh2.servers.jakst".publicKey
|
||||
];
|
||||
authorizedKeys = (builtins.attrValues myData.people_pubkeys) ++ [
|
||||
myData.hosts."vno1-oh2.servers.jakst".publicKey
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
@ -46,7 +52,7 @@
|
||||
|
||||
snapshot = {
|
||||
enable = true;
|
||||
mountpoints = ["/var/lib"];
|
||||
mountpoints = [ "/var/lib" ];
|
||||
};
|
||||
};
|
||||
|
||||
@ -79,22 +85,24 @@
|
||||
];
|
||||
|
||||
enable = true;
|
||||
sshAllowSubnets = [myData.subnets.tailscale.sshPattern];
|
||||
sshAllowSubnets = [ myData.subnets.tailscale.sshPattern ];
|
||||
uidgid = myData.uidgid.updaterbot-deployee;
|
||||
};
|
||||
};
|
||||
|
||||
zfsunlock = {
|
||||
enable = false;
|
||||
targets."vno1-oh2.servers.jakst" = let
|
||||
host = myData.hosts."vno1-oh2.servers.jakst";
|
||||
in {
|
||||
sshEndpoint = host.publicIP;
|
||||
pingEndpoint = host.jakstIP;
|
||||
remotePubkey = host.initrdPubKey;
|
||||
pwFile = config.age.secrets.zfs-passphrase-vno1-oh2.path;
|
||||
startAt = "*-*-* *:00/5:00";
|
||||
};
|
||||
targets."vno1-oh2.servers.jakst" =
|
||||
let
|
||||
host = myData.hosts."vno1-oh2.servers.jakst";
|
||||
in
|
||||
{
|
||||
sshEndpoint = host.publicIP;
|
||||
pingEndpoint = host.jakstIP;
|
||||
remotePubkey = host.initrdPubKey;
|
||||
pwFile = config.age.secrets.zfs-passphrase-vno1-oh2.path;
|
||||
startAt = "*-*-* *:00/5:00";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
@ -120,7 +128,10 @@
|
||||
|
||||
nsd = {
|
||||
enable = true;
|
||||
interfaces = ["0.0.0.0" "::"];
|
||||
interfaces = [
|
||||
"0.0.0.0"
|
||||
"::"
|
||||
];
|
||||
zones = {
|
||||
"jakstys.lt.".data = myData.jakstysLTZone;
|
||||
"11sync.net.".data = myData.e11syncZone;
|
||||
@ -134,8 +145,16 @@
|
||||
domain = "servers.jakst";
|
||||
useDHCP = true;
|
||||
firewall = {
|
||||
allowedUDPPorts = [53 443];
|
||||
allowedTCPPorts = [22 53 80 443];
|
||||
allowedUDPPorts = [
|
||||
53
|
||||
443
|
||||
];
|
||||
allowedTCPPorts = [
|
||||
22
|
||||
53
|
||||
80
|
||||
443
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -3,20 +3,28 @@
|
||||
pkgs,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
nvme = "/dev/disk/by-id/nvme-Samsung_SSD_970_EVO_Plus_2TB_S6P1NS0TA01331A_1";
|
||||
in {
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
../../modules
|
||||
../../modules/profiles/btrfs
|
||||
];
|
||||
|
||||
boot = {
|
||||
kernelModules = ["kvm-intel"];
|
||||
kernelModules = [ "kvm-intel" ];
|
||||
loader.systemd-boot.enable = true;
|
||||
initrd = {
|
||||
kernelModules = ["usb_storage"];
|
||||
availableKernelModules = ["xhci_pci" "thunderbolt" "nvme" "usbhid" "tpm_tis"];
|
||||
kernelModules = [ "usb_storage" ];
|
||||
availableKernelModules = [
|
||||
"xhci_pci"
|
||||
"thunderbolt"
|
||||
"nvme"
|
||||
"usbhid"
|
||||
"tpm_tis"
|
||||
];
|
||||
systemd.enableTpm2 = true;
|
||||
luks.devices = {
|
||||
luksroot = {
|
||||
@ -44,7 +52,7 @@ in {
|
||||
"/" = {
|
||||
device = "/dev/mapper/luksroot";
|
||||
fsType = "btrfs";
|
||||
options = ["compress=zstd"];
|
||||
options = [ "compress=zstd" ];
|
||||
};
|
||||
"/boot" = {
|
||||
device = "${nvme}-part1";
|
||||
@ -86,18 +94,20 @@ in {
|
||||
verboseLogs = false;
|
||||
};
|
||||
|
||||
remote-builder.client = let
|
||||
host = myData.hosts."fra1-a.servers.jakst";
|
||||
in {
|
||||
enable = true;
|
||||
inherit (host) system supportedFeatures;
|
||||
hostName = host.jakstIP;
|
||||
sshKey = "/etc/ssh/ssh_host_ed25519_key";
|
||||
};
|
||||
remote-builder.client =
|
||||
let
|
||||
host = myData.hosts."fra1-a.servers.jakst";
|
||||
in
|
||||
{
|
||||
enable = true;
|
||||
inherit (host) system supportedFeatures;
|
||||
hostName = host.jakstIP;
|
||||
sshKey = "/etc/ssh/ssh_host_ed25519_key";
|
||||
};
|
||||
|
||||
node_exporter = {
|
||||
enable = true;
|
||||
extraSubnets = [myData.subnets.vno1.cidr];
|
||||
extraSubnets = [ myData.subnets.vno1.cidr ];
|
||||
};
|
||||
|
||||
deployerbot = {
|
||||
@ -127,7 +137,7 @@ in {
|
||||
|
||||
enable = true;
|
||||
uidgid = myData.uidgid.updaterbot-deployee;
|
||||
sshAllowSubnets = with myData.subnets; [tailscale.sshPattern];
|
||||
sshAllowSubnets = with myData.subnets; [ tailscale.sshPattern ];
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -1,10 +1,8 @@
|
||||
{
|
||||
config,
|
||||
myData,
|
||||
...
|
||||
}: let
|
||||
{ config, myData, ... }:
|
||||
let
|
||||
nvme = "/dev/disk/by-id/nvme-WD_PC_SN810_SDCQNRY-1T00-1201_23234W800017";
|
||||
in {
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
../../shared/work
|
||||
../../modules
|
||||
@ -14,10 +12,16 @@ in {
|
||||
];
|
||||
|
||||
boot = {
|
||||
kernelModules = ["kvm-intel"];
|
||||
kernelModules = [ "kvm-intel" ];
|
||||
loader.systemd-boot.enable = true;
|
||||
initrd = {
|
||||
availableKernelModules = ["xhci_pci" "thunderbolt" "nvme" "usbhid" "tpm_tis"];
|
||||
availableKernelModules = [
|
||||
"xhci_pci"
|
||||
"thunderbolt"
|
||||
"nvme"
|
||||
"usbhid"
|
||||
"tpm_tis"
|
||||
];
|
||||
systemd = {
|
||||
enableTpm2 = true;
|
||||
emergencyAccess = true;
|
||||
@ -26,7 +30,7 @@ in {
|
||||
luksroot = {
|
||||
device = "${nvme}-part3";
|
||||
allowDiscards = true;
|
||||
crypttabExtraOpts = ["tpm2-device=auto"];
|
||||
crypttabExtraOpts = [ "tpm2-device=auto" ];
|
||||
};
|
||||
};
|
||||
};
|
||||
@ -43,7 +47,7 @@ in {
|
||||
"/" = {
|
||||
device = "/dev/mapper/luksroot";
|
||||
fsType = "btrfs";
|
||||
options = ["compress=zstd"];
|
||||
options = [ "compress=zstd" ];
|
||||
};
|
||||
"/boot" = {
|
||||
device = "${nvme}-part1";
|
||||
@ -81,18 +85,20 @@ in {
|
||||
toUser = config.mj.username;
|
||||
};
|
||||
|
||||
remote-builder.client = let
|
||||
host = myData.hosts."fra1-a.servers.jakst";
|
||||
in {
|
||||
enable = true;
|
||||
inherit (host) system supportedFeatures;
|
||||
hostName = host.jakstIP;
|
||||
sshKey = "/etc/ssh/ssh_host_ed25519_key";
|
||||
};
|
||||
remote-builder.client =
|
||||
let
|
||||
host = myData.hosts."fra1-a.servers.jakst";
|
||||
in
|
||||
{
|
||||
enable = true;
|
||||
inherit (host) system supportedFeatures;
|
||||
hostName = host.jakstIP;
|
||||
sshKey = "/etc/ssh/ssh_host_ed25519_key";
|
||||
};
|
||||
|
||||
node_exporter = {
|
||||
enable = true;
|
||||
extraSubnets = [myData.subnets.vno1.cidr];
|
||||
extraSubnets = [ myData.subnets.vno1.cidr ];
|
||||
};
|
||||
|
||||
deployerbot = {
|
||||
@ -104,7 +110,7 @@ in {
|
||||
|
||||
enable = true;
|
||||
uidgid = myData.uidgid.updaterbot-deployee;
|
||||
sshAllowSubnets = with myData.subnets; [tailscale.sshPattern];
|
||||
sshAllowSubnets = with myData.subnets; [ tailscale.sshPattern ];
|
||||
};
|
||||
};
|
||||
|
||||
@ -131,7 +137,7 @@ in {
|
||||
};
|
||||
};
|
||||
|
||||
users.extraGroups.vboxusers.members = ["motiejus"];
|
||||
users.extraGroups.vboxusers.members = [ "motiejus" ];
|
||||
|
||||
security.tpm2.enable = true;
|
||||
|
||||
|
@ -1,8 +1,5 @@
|
||||
{ self, modulesPath, ... }:
|
||||
{
|
||||
self,
|
||||
modulesPath,
|
||||
...
|
||||
}: {
|
||||
imports = [
|
||||
"${modulesPath}/profiles/all-hardware.nix"
|
||||
"${modulesPath}/installer/cd-dvd/iso-image.nix"
|
||||
@ -21,7 +18,10 @@
|
||||
};
|
||||
|
||||
boot.loader.systemd-boot.enable = true;
|
||||
boot.supportedFilesystems = ["zfs" "btrfs"];
|
||||
boot.supportedFilesystems = [
|
||||
"zfs"
|
||||
"btrfs"
|
||||
];
|
||||
|
||||
isoImage = {
|
||||
isoName = "toolshed-${self.lastModifiedDate}.iso";
|
||||
@ -31,7 +31,7 @@
|
||||
makeUsbBootable = true; # USB booting
|
||||
};
|
||||
|
||||
swapDevices = [];
|
||||
swapDevices = [ ];
|
||||
|
||||
services = {
|
||||
getty.autologinUser = "nixos";
|
||||
@ -45,7 +45,7 @@
|
||||
networking = {
|
||||
hostName = "vm";
|
||||
domain = "jakstys.lt";
|
||||
firewall.allowedTCPPorts = [22];
|
||||
firewall.allowedTCPPorts = [ 22 ];
|
||||
hostId = "abefef01";
|
||||
};
|
||||
}
|
||||
|
@ -4,12 +4,13 @@
|
||||
pkgs,
|
||||
myData,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
zfs-root = {
|
||||
boot = {
|
||||
enable = true;
|
||||
devNodes = "/dev/disk/by-id/";
|
||||
bootDevices = ["nvme-Samsung_SSD_970_EVO_Plus_2TB_S6P1NX0TA00913P"];
|
||||
bootDevices = [ "nvme-Samsung_SSD_970_EVO_Plus_2TB_S6P1NX0TA00913P" ];
|
||||
immutable = false;
|
||||
availableKernelModules = [
|
||||
"ahci"
|
||||
@ -26,14 +27,14 @@
|
||||
];
|
||||
sshUnlock = {
|
||||
enable = true;
|
||||
authorizedKeys =
|
||||
(builtins.attrValues myData.people_pubkeys)
|
||||
++ [myData.hosts."fra1-a.servers.jakst".publicKey];
|
||||
authorizedKeys = (builtins.attrValues myData.people_pubkeys) ++ [
|
||||
myData.hosts."fra1-a.servers.jakst".publicKey
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
boot.binfmt.emulatedSystems = ["aarch64-linux"];
|
||||
boot.binfmt.emulatedSystems = [ "aarch64-linux" ];
|
||||
|
||||
mj = {
|
||||
stateVersion = "23.05";
|
||||
@ -50,7 +51,11 @@
|
||||
|
||||
snapshot = {
|
||||
enable = true;
|
||||
mountpoints = ["/home" "/var/lib" "/var/log"];
|
||||
mountpoints = [
|
||||
"/home"
|
||||
"/var/lib"
|
||||
"/var/log"
|
||||
];
|
||||
};
|
||||
|
||||
zfsborg = {
|
||||
@ -73,9 +78,7 @@
|
||||
"tailscale"
|
||||
"private/soju"
|
||||
];
|
||||
patterns = [
|
||||
"- gitea/data/repo-archive/"
|
||||
];
|
||||
patterns = [ "- gitea/data/repo-archive/" ];
|
||||
backup_at = "*-*-* 01:00:00 UTC";
|
||||
prune.keep = {
|
||||
within = "1d";
|
||||
@ -86,7 +89,9 @@
|
||||
}
|
||||
{
|
||||
mountpoint = "/var/lib";
|
||||
repo = "borgstor@${myData.hosts."vno3-rp3b.servers.jakst".jakstIP}:${config.networking.hostName}.${config.networking.domain}-var_lib";
|
||||
repo = "borgstor@${
|
||||
myData.hosts."vno3-rp3b.servers.jakst".jakstIP
|
||||
}:${config.networking.hostName}.${config.networking.domain}-var_lib";
|
||||
paths = [
|
||||
"bitwarden_rs"
|
||||
"caddy"
|
||||
@ -98,9 +103,7 @@
|
||||
"tailscale"
|
||||
"private/soju"
|
||||
];
|
||||
patterns = [
|
||||
"- gitea/data/repo-archive/"
|
||||
];
|
||||
patterns = [ "- gitea/data/repo-archive/" ];
|
||||
backup_at = "*-*-* 01:00:00 UTC";
|
||||
}
|
||||
|
||||
@ -108,7 +111,7 @@
|
||||
{
|
||||
mountpoint = "/var/log";
|
||||
repo = "zh2769@zh2769.rsync.net:${config.networking.hostName}.${config.networking.domain}-var_log";
|
||||
paths = ["caddy"];
|
||||
paths = [ "caddy" ];
|
||||
patterns = [
|
||||
"+ caddy/access-jakstys.lt.log-*.zst"
|
||||
"- *"
|
||||
@ -117,8 +120,10 @@
|
||||
}
|
||||
{
|
||||
mountpoint = "/var/log";
|
||||
repo = "borgstor@${myData.hosts."vno3-rp3b.servers.jakst".jakstIP}:${config.networking.hostName}.${config.networking.domain}-var_log";
|
||||
paths = ["caddy"];
|
||||
repo = "borgstor@${
|
||||
myData.hosts."vno3-rp3b.servers.jakst".jakstIP
|
||||
}:${config.networking.hostName}.${config.networking.domain}-var_log";
|
||||
paths = [ "caddy" ];
|
||||
patterns = [
|
||||
"+ caddy/access-jakstys.lt.log-*.zst"
|
||||
"- *"
|
||||
@ -138,7 +143,9 @@
|
||||
}
|
||||
{
|
||||
mountpoint = "/home";
|
||||
repo = "borgstor@${myData.hosts."vno3-rp3b.servers.jakst".jakstIP}:${config.networking.hostName}.${config.networking.domain}-home-motiejus-annex2";
|
||||
repo = "borgstor@${
|
||||
myData.hosts."vno3-rp3b.servers.jakst".jakstIP
|
||||
}:${config.networking.hostName}.${config.networking.domain}-home-motiejus-annex2";
|
||||
paths = [
|
||||
"motiejus/annex2"
|
||||
"motiejus/.config/syncthing"
|
||||
@ -157,7 +164,7 @@
|
||||
services = {
|
||||
friendlyport.ports = [
|
||||
{
|
||||
subnets = [myData.subnets.tailscale.cidr];
|
||||
subnets = [ myData.subnets.tailscale.cidr ];
|
||||
tcp = with myData.ports; [
|
||||
80
|
||||
443
|
||||
@ -181,18 +188,20 @@
|
||||
subnetCIDR = myData.subnets.tailscale.cidr;
|
||||
};
|
||||
|
||||
nsd-acme = let
|
||||
accountKey = config.age.secrets.letsencrypt-account-key.path;
|
||||
in {
|
||||
enable = true;
|
||||
zones = {
|
||||
"irc.jakstys.lt".accountKey = accountKey;
|
||||
"hdd.jakstys.lt".accountKey = accountKey;
|
||||
"hass.jakstys.lt".accountKey = accountKey;
|
||||
"grafana.jakstys.lt".accountKey = accountKey;
|
||||
"bitwarden.jakstys.lt".accountKey = accountKey;
|
||||
nsd-acme =
|
||||
let
|
||||
accountKey = config.age.secrets.letsencrypt-account-key.path;
|
||||
in
|
||||
{
|
||||
enable = true;
|
||||
zones = {
|
||||
"irc.jakstys.lt".accountKey = accountKey;
|
||||
"hdd.jakstys.lt".accountKey = accountKey;
|
||||
"hass.jakstys.lt".accountKey = accountKey;
|
||||
"grafana.jakstys.lt".accountKey = accountKey;
|
||||
"bitwarden.jakstys.lt".accountKey = accountKey;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
deployerbot = {
|
||||
follower = {
|
||||
@ -202,7 +211,7 @@
|
||||
];
|
||||
|
||||
enable = true;
|
||||
sshAllowSubnets = [myData.subnets.tailscale.sshPattern];
|
||||
sshAllowSubnets = [ myData.subnets.tailscale.sshPattern ];
|
||||
uidgid = myData.uidgid.updaterbot-deployee;
|
||||
};
|
||||
};
|
||||
@ -228,25 +237,29 @@
|
||||
|
||||
zfsunlock = {
|
||||
enable = true;
|
||||
targets."fra1-a.servers.jakst" = let
|
||||
host = myData.hosts."fra1-a.servers.jakst";
|
||||
in {
|
||||
sshEndpoint = host.publicIP;
|
||||
pingEndpoint = host.jakstIP;
|
||||
remotePubkey = host.initrdPubKey;
|
||||
pwFile = config.age.secrets.zfs-passphrase-fra1-a.path;
|
||||
startAt = "*-*-* *:00/5:00";
|
||||
};
|
||||
targets."fra1-a.servers.jakst" =
|
||||
let
|
||||
host = myData.hosts."fra1-a.servers.jakst";
|
||||
in
|
||||
{
|
||||
sshEndpoint = host.publicIP;
|
||||
pingEndpoint = host.jakstIP;
|
||||
remotePubkey = host.initrdPubKey;
|
||||
pwFile = config.age.secrets.zfs-passphrase-fra1-a.path;
|
||||
startAt = "*-*-* *:00/5:00";
|
||||
};
|
||||
};
|
||||
|
||||
remote-builder.client = let
|
||||
host = myData.hosts."fra1-a.servers.jakst";
|
||||
in {
|
||||
enable = true;
|
||||
inherit (host) system supportedFeatures;
|
||||
hostName = host.jakstIP;
|
||||
sshKey = "/etc/ssh/ssh_host_ed25519_key";
|
||||
};
|
||||
remote-builder.client =
|
||||
let
|
||||
host = myData.hosts."fra1-a.servers.jakst";
|
||||
in
|
||||
{
|
||||
enable = true;
|
||||
inherit (host) system supportedFeatures;
|
||||
hostName = host.jakstIP;
|
||||
sshKey = "/etc/ssh/ssh_host_ed25519_key";
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
@ -301,25 +314,27 @@
|
||||
"www.jakstys.lt".extraConfig = ''
|
||||
redir https://jakstys.lt
|
||||
'';
|
||||
"irc.jakstys.lt".extraConfig = let
|
||||
gamja = pkgs.compressDrvWeb (pkgs.gamja.override {
|
||||
gamjaConfig = {
|
||||
server = {
|
||||
url = "irc.jakstys.lt:6698";
|
||||
nick = "motiejus";
|
||||
"irc.jakstys.lt".extraConfig =
|
||||
let
|
||||
gamja = pkgs.compressDrvWeb (pkgs.gamja.override {
|
||||
gamjaConfig = {
|
||||
server = {
|
||||
url = "irc.jakstys.lt:6698";
|
||||
nick = "motiejus";
|
||||
};
|
||||
};
|
||||
};
|
||||
}) {};
|
||||
in ''
|
||||
@denied not remote_ip ${myData.subnets.tailscale.cidr}
|
||||
abort @denied
|
||||
tls {$CREDENTIALS_DIRECTORY}/irc.jakstys.lt-cert.pem {$CREDENTIALS_DIRECTORY}/irc.jakstys.lt-key.pem
|
||||
}) { };
|
||||
in
|
||||
''
|
||||
@denied not remote_ip ${myData.subnets.tailscale.cidr}
|
||||
abort @denied
|
||||
tls {$CREDENTIALS_DIRECTORY}/irc.jakstys.lt-cert.pem {$CREDENTIALS_DIRECTORY}/irc.jakstys.lt-key.pem
|
||||
|
||||
root * ${gamja}
|
||||
file_server browse {
|
||||
precompressed br gzip
|
||||
}
|
||||
'';
|
||||
root * ${gamja}
|
||||
file_server browse {
|
||||
precompressed br gzip
|
||||
}
|
||||
'';
|
||||
"dl.jakstys.lt".extraConfig = ''
|
||||
root * /var/www/dl
|
||||
file_server browse {
|
||||
@ -452,47 +467,52 @@
|
||||
evaluation_interval = "1m";
|
||||
};
|
||||
|
||||
scrapeConfigs = let
|
||||
port = builtins.toString myData.ports.exporters.node;
|
||||
in [
|
||||
{
|
||||
job_name = "prometheus";
|
||||
static_configs = [{targets = ["127.0.0.1:${toString myData.ports.prometheus}"];}];
|
||||
}
|
||||
{
|
||||
job_name = "caddy";
|
||||
static_configs = [{targets = ["127.0.0.1:${toString myData.ports.exporters.caddy}"];}];
|
||||
}
|
||||
{
|
||||
job_name = "${config.networking.hostName}.${config.networking.domain}";
|
||||
static_configs = [{targets = ["127.0.0.1:${port}"];}];
|
||||
}
|
||||
{
|
||||
job_name = "fra1-a.servers.jakst";
|
||||
static_configs = [{targets = ["${myData.hosts."fra1-a.servers.jakst".jakstIP}:${port}"];}];
|
||||
}
|
||||
{
|
||||
job_name = "vno3-rp3b.servers.jakst";
|
||||
static_configs = [{targets = ["${myData.hosts."vno3-rp3b.servers.jakst".jakstIP}:${port}"];}];
|
||||
}
|
||||
{
|
||||
job_name = "fwminex.motiejus.jakst";
|
||||
static_configs = [{targets = ["${myData.hosts."fwminex.motiejus.jakst".jakstIP}:${port}"];}];
|
||||
}
|
||||
{
|
||||
job_name = "mtworx.motiejus.jakst";
|
||||
static_configs = [{targets = ["${myData.hosts."mtworx.motiejus.jakst".jakstIP}:${port}"];}];
|
||||
}
|
||||
{
|
||||
job_name = "vno1-vinc.vincentas.jakst";
|
||||
static_configs = [{targets = ["${myData.hosts."vno1-vinc.vincentas.jakst".jakstIP}:9100"];}];
|
||||
}
|
||||
];
|
||||
scrapeConfigs =
|
||||
let
|
||||
port = builtins.toString myData.ports.exporters.node;
|
||||
in
|
||||
[
|
||||
{
|
||||
job_name = "prometheus";
|
||||
static_configs = [ { targets = [ "127.0.0.1:${toString myData.ports.prometheus}" ]; } ];
|
||||
}
|
||||
{
|
||||
job_name = "caddy";
|
||||
static_configs = [ { targets = [ "127.0.0.1:${toString myData.ports.exporters.caddy}" ]; } ];
|
||||
}
|
||||
{
|
||||
job_name = "${config.networking.hostName}.${config.networking.domain}";
|
||||
static_configs = [ { targets = [ "127.0.0.1:${port}" ]; } ];
|
||||
}
|
||||
{
|
||||
job_name = "fra1-a.servers.jakst";
|
||||
static_configs = [ { targets = [ "${myData.hosts."fra1-a.servers.jakst".jakstIP}:${port}" ]; } ];
|
||||
}
|
||||
{
|
||||
job_name = "vno3-rp3b.servers.jakst";
|
||||
static_configs = [ { targets = [ "${myData.hosts."vno3-rp3b.servers.jakst".jakstIP}:${port}" ]; } ];
|
||||
}
|
||||
{
|
||||
job_name = "fwminex.motiejus.jakst";
|
||||
static_configs = [ { targets = [ "${myData.hosts."fwminex.motiejus.jakst".jakstIP}:${port}" ]; } ];
|
||||
}
|
||||
{
|
||||
job_name = "mtworx.motiejus.jakst";
|
||||
static_configs = [ { targets = [ "${myData.hosts."mtworx.motiejus.jakst".jakstIP}:${port}" ]; } ];
|
||||
}
|
||||
{
|
||||
job_name = "vno1-vinc.vincentas.jakst";
|
||||
static_configs = [ { targets = [ "${myData.hosts."vno1-vinc.vincentas.jakst".jakstIP}:9100" ]; } ];
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
nsd = {
|
||||
enable = true;
|
||||
interfaces = ["0.0.0.0" "::"];
|
||||
interfaces = [
|
||||
"0.0.0.0"
|
||||
"::"
|
||||
];
|
||||
zones = {
|
||||
"jakstys.lt.".data = myData.jakstysLTZone;
|
||||
"11sync.net.".data = myData.e11syncZone;
|
||||
@ -509,7 +529,7 @@
|
||||
tlsCertificate = "/run/soju/cert.pem";
|
||||
tlsCertificateKey = "/run/soju/key.pem";
|
||||
hostName = "irc.jakstys.lt";
|
||||
httpOrigins = ["*"];
|
||||
httpOrigins = [ "*" ];
|
||||
extraConfig = ''
|
||||
message-store fs /var/lib/soju
|
||||
'';
|
||||
@ -544,7 +564,7 @@
|
||||
enable = true;
|
||||
openFirewall = true;
|
||||
settings = {
|
||||
media_dir = ["/home/motiejus/video"];
|
||||
media_dir = [ "/home/motiejus/video" ];
|
||||
friendly_name = "vno1-oh2";
|
||||
inotify = "yes";
|
||||
};
|
||||
@ -557,62 +577,64 @@
|
||||
};
|
||||
|
||||
systemd.services = {
|
||||
caddy = let
|
||||
irc = config.mj.services.nsd-acme.zones."irc.jakstys.lt";
|
||||
hass = config.mj.services.nsd-acme.zones."hass.jakstys.lt";
|
||||
grafana = config.mj.services.nsd-acme.zones."grafana.jakstys.lt";
|
||||
bitwarden = config.mj.services.nsd-acme.zones."bitwarden.jakstys.lt";
|
||||
in {
|
||||
serviceConfig.LoadCredential = [
|
||||
"irc.jakstys.lt-cert.pem:${irc.certFile}"
|
||||
"irc.jakstys.lt-key.pem:${irc.keyFile}"
|
||||
"hass.jakstys.lt-cert.pem:${hass.certFile}"
|
||||
"hass.jakstys.lt-key.pem:${hass.keyFile}"
|
||||
"grafana.jakstys.lt-cert.pem:${grafana.certFile}"
|
||||
"grafana.jakstys.lt-key.pem:${grafana.keyFile}"
|
||||
"bitwarden.jakstys.lt-cert.pem:${bitwarden.certFile}"
|
||||
"bitwarden.jakstys.lt-key.pem:${bitwarden.keyFile}"
|
||||
];
|
||||
after = [
|
||||
"nsd-acme-irc.jakstys.lt.service"
|
||||
"nsd-acme-hass.jakstys.lt.service"
|
||||
"nsd-acme-grafana.jakstys.lt.service"
|
||||
"nsd-acme-bitwarden.jakstys.lt.service"
|
||||
];
|
||||
requires = [
|
||||
"nsd-acme-irc.jakstys.lt.service"
|
||||
"nsd-acme-hass.jakstys.lt.service"
|
||||
"nsd-acme-grafana.jakstys.lt.service"
|
||||
"nsd-acme-bitwarden.jakstys.lt.service"
|
||||
];
|
||||
};
|
||||
|
||||
soju = let
|
||||
acme = config.mj.services.nsd-acme.zones."irc.jakstys.lt";
|
||||
in {
|
||||
serviceConfig = {
|
||||
RuntimeDirectory = "soju";
|
||||
LoadCredential = [
|
||||
"irc.jakstys.lt-cert.pem:${acme.certFile}"
|
||||
"irc.jakstys.lt-key.pem:${acme.keyFile}"
|
||||
caddy =
|
||||
let
|
||||
irc = config.mj.services.nsd-acme.zones."irc.jakstys.lt";
|
||||
hass = config.mj.services.nsd-acme.zones."hass.jakstys.lt";
|
||||
grafana = config.mj.services.nsd-acme.zones."grafana.jakstys.lt";
|
||||
bitwarden = config.mj.services.nsd-acme.zones."bitwarden.jakstys.lt";
|
||||
in
|
||||
{
|
||||
serviceConfig.LoadCredential = [
|
||||
"irc.jakstys.lt-cert.pem:${irc.certFile}"
|
||||
"irc.jakstys.lt-key.pem:${irc.keyFile}"
|
||||
"hass.jakstys.lt-cert.pem:${hass.certFile}"
|
||||
"hass.jakstys.lt-key.pem:${hass.keyFile}"
|
||||
"grafana.jakstys.lt-cert.pem:${grafana.certFile}"
|
||||
"grafana.jakstys.lt-key.pem:${grafana.keyFile}"
|
||||
"bitwarden.jakstys.lt-cert.pem:${bitwarden.certFile}"
|
||||
"bitwarden.jakstys.lt-key.pem:${bitwarden.keyFile}"
|
||||
];
|
||||
after = [
|
||||
"nsd-acme-irc.jakstys.lt.service"
|
||||
"nsd-acme-hass.jakstys.lt.service"
|
||||
"nsd-acme-grafana.jakstys.lt.service"
|
||||
"nsd-acme-bitwarden.jakstys.lt.service"
|
||||
];
|
||||
requires = [
|
||||
"nsd-acme-irc.jakstys.lt.service"
|
||||
"nsd-acme-hass.jakstys.lt.service"
|
||||
"nsd-acme-grafana.jakstys.lt.service"
|
||||
"nsd-acme-bitwarden.jakstys.lt.service"
|
||||
];
|
||||
};
|
||||
preStart = ''
|
||||
ln -sf $CREDENTIALS_DIRECTORY/irc.jakstys.lt-cert.pem /run/soju/cert.pem
|
||||
ln -sf $CREDENTIALS_DIRECTORY/irc.jakstys.lt-key.pem /run/soju/key.pem
|
||||
'';
|
||||
after = ["nsd-acme-irc.jakstys.lt.service"];
|
||||
requires = ["nsd-acme-irc.jakstys.lt.service"];
|
||||
};
|
||||
|
||||
soju =
|
||||
let
|
||||
acme = config.mj.services.nsd-acme.zones."irc.jakstys.lt";
|
||||
in
|
||||
{
|
||||
serviceConfig = {
|
||||
RuntimeDirectory = "soju";
|
||||
LoadCredential = [
|
||||
"irc.jakstys.lt-cert.pem:${acme.certFile}"
|
||||
"irc.jakstys.lt-key.pem:${acme.keyFile}"
|
||||
];
|
||||
};
|
||||
preStart = ''
|
||||
ln -sf $CREDENTIALS_DIRECTORY/irc.jakstys.lt-cert.pem /run/soju/cert.pem
|
||||
ln -sf $CREDENTIALS_DIRECTORY/irc.jakstys.lt-key.pem /run/soju/key.pem
|
||||
'';
|
||||
after = [ "nsd-acme-irc.jakstys.lt.service" ];
|
||||
requires = [ "nsd-acme-irc.jakstys.lt.service" ];
|
||||
};
|
||||
|
||||
vaultwarden = {
|
||||
preStart = "ln -sf $CREDENTIALS_DIRECTORY/secrets.env /run/vaultwarden/secrets.env";
|
||||
serviceConfig = {
|
||||
EnvironmentFile = ["-/run/vaultwarden/secrets.env"];
|
||||
EnvironmentFile = [ "-/run/vaultwarden/secrets.env" ];
|
||||
RuntimeDirectory = "vaultwarden";
|
||||
LoadCredential = [
|
||||
"secrets.env:${config.age.secrets.vaultwarden-secrets-env.path}"
|
||||
];
|
||||
LoadCredential = [ "secrets.env:${config.age.secrets.vaultwarden-secrets-env.path}" ];
|
||||
};
|
||||
};
|
||||
|
||||
@ -621,13 +643,13 @@
|
||||
serviceConfig = {
|
||||
LogsDirectory = "grafana";
|
||||
RuntimeDirectory = "grafana";
|
||||
LoadCredential = ["oidc:${config.age.secrets.grafana-oidc.path}"];
|
||||
LoadCredential = [ "oidc:${config.age.secrets.grafana-oidc.path}" ];
|
||||
};
|
||||
};
|
||||
|
||||
cert-watcher = {
|
||||
description = "Restart caddy when tls keys/certs change";
|
||||
wantedBy = ["multi-user.target"];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
unitConfig = {
|
||||
StartLimitIntervalSec = 10;
|
||||
StartLimitBurst = 5;
|
||||
@ -642,7 +664,7 @@
|
||||
serviceConfig = {
|
||||
ProtectSystem = "strict";
|
||||
ProtectHome = "tmpfs";
|
||||
BindReadOnlyPaths = ["/home/motiejus/video"];
|
||||
BindReadOnlyPaths = [ "/home/motiejus/video" ];
|
||||
};
|
||||
};
|
||||
|
||||
@ -659,7 +681,7 @@
|
||||
|
||||
systemd.paths = {
|
||||
cert-watcher = {
|
||||
wantedBy = ["multi-user.target"];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
pathConfig = {
|
||||
PathChanged = [
|
||||
config.mj.services.nsd-acme.zones."irc.jakstys.lt".certFile
|
||||
@ -672,15 +694,17 @@
|
||||
};
|
||||
};
|
||||
|
||||
users = let
|
||||
uidgid = myData.uidgid.photoprism;
|
||||
in {
|
||||
groups.photoprism.gid = uidgid;
|
||||
users.photoprism = {
|
||||
group = "photoprism";
|
||||
uid = uidgid;
|
||||
users =
|
||||
let
|
||||
uidgid = myData.uidgid.photoprism;
|
||||
in
|
||||
{
|
||||
groups.photoprism.gid = uidgid;
|
||||
users.photoprism = {
|
||||
group = "photoprism";
|
||||
uid = uidgid;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
yt-dlp
|
||||
@ -694,7 +718,7 @@
|
||||
hostName = "vno1-oh2";
|
||||
domain = "servers.jakst";
|
||||
defaultGateway = "192.168.189.4";
|
||||
nameservers = ["192.168.189.4"];
|
||||
nameservers = [ "192.168.189.4" ];
|
||||
interfaces.enp0s21f0u2.ipv4.addresses = [
|
||||
{
|
||||
address = "192.168.189.1";
|
||||
@ -702,7 +726,11 @@
|
||||
}
|
||||
];
|
||||
firewall = {
|
||||
allowedUDPPorts = [53 80 443];
|
||||
allowedUDPPorts = [
|
||||
53
|
||||
80
|
||||
443
|
||||
];
|
||||
allowedTCPPorts = [
|
||||
53
|
||||
80
|
||||
|
@ -6,24 +6,26 @@
|
||||
pkgs,
|
||||
myData,
|
||||
...
|
||||
}: {
|
||||
imports = [
|
||||
../../modules/profiles/sdcard
|
||||
];
|
||||
}:
|
||||
{
|
||||
imports = [ ../../modules/profiles/sdcard ];
|
||||
|
||||
boot = {
|
||||
initrd = {
|
||||
availableKernelModules = ["usbhid"];
|
||||
kernelModules = ["vc4" "bcm2835_dma"];
|
||||
availableKernelModules = [ "usbhid" ];
|
||||
kernelModules = [
|
||||
"vc4"
|
||||
"bcm2835_dma"
|
||||
];
|
||||
};
|
||||
loader = {
|
||||
grub.enable = false;
|
||||
generic-extlinux-compatible.enable = true;
|
||||
};
|
||||
|
||||
kernelModules = [];
|
||||
extraModulePackages = [];
|
||||
supportedFilesystems = ["zfs"];
|
||||
kernelModules = [ ];
|
||||
extraModulePackages = [ ];
|
||||
supportedFilesystems = [ "zfs" ];
|
||||
zfs.forceImportRoot = false;
|
||||
};
|
||||
|
||||
@ -48,7 +50,7 @@
|
||||
};
|
||||
};
|
||||
|
||||
swapDevices = [];
|
||||
swapDevices = [ ];
|
||||
|
||||
mj = {
|
||||
stateVersion = "23.05";
|
||||
@ -69,7 +71,7 @@
|
||||
|
||||
snapshot = {
|
||||
enable = true;
|
||||
mountpoints = ["/data/shared"];
|
||||
mountpoints = [ "/data/shared" ];
|
||||
};
|
||||
};
|
||||
|
||||
@ -100,7 +102,7 @@
|
||||
];
|
||||
|
||||
enable = true;
|
||||
sshAllowSubnets = [myData.subnets.tailscale.sshPattern];
|
||||
sshAllowSubnets = [ myData.subnets.tailscale.sshPattern ];
|
||||
uidgid = myData.uidgid.updaterbot-deployee;
|
||||
};
|
||||
};
|
||||
@ -108,7 +110,7 @@
|
||||
jakstpub = {
|
||||
enable = true;
|
||||
dataDir = "/data/shared";
|
||||
requires = ["data-shared.mount"];
|
||||
requires = [ "data-shared.mount" ];
|
||||
uidgid = myData.uidgid.jakstpub;
|
||||
hostname = "hdd.jakstys.lt";
|
||||
};
|
||||
|
@ -3,11 +3,25 @@
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
cfg = config.zfs-root.boot;
|
||||
inherit (lib) mkIf types mkDefault mkOption mkMerge strings;
|
||||
inherit (builtins) head toString map tail;
|
||||
in {
|
||||
inherit (lib)
|
||||
mkIf
|
||||
types
|
||||
mkDefault
|
||||
mkOption
|
||||
mkMerge
|
||||
strings
|
||||
;
|
||||
inherit (builtins)
|
||||
head
|
||||
toString
|
||||
map
|
||||
tail
|
||||
;
|
||||
in
|
||||
{
|
||||
options.zfs-root.boot = {
|
||||
enable = mkOption {
|
||||
description = "Enable root on ZFS support";
|
||||
@ -17,9 +31,10 @@ in {
|
||||
devNodes = mkOption {
|
||||
description = "Specify where to discover ZFS pools";
|
||||
type = types.str;
|
||||
apply = x:
|
||||
assert (strings.hasSuffix "/" x
|
||||
|| abort "devNodes '${x}' must have trailing slash!"); x;
|
||||
apply =
|
||||
x:
|
||||
assert (strings.hasSuffix "/" x || abort "devNodes '${x}' must have trailing slash!");
|
||||
x;
|
||||
default = "/dev/disk/by-id/";
|
||||
};
|
||||
bootDevices = mkOption {
|
||||
@ -28,11 +43,15 @@ in {
|
||||
};
|
||||
availableKernelModules = mkOption {
|
||||
type = types.nonEmptyListOf types.str;
|
||||
default = ["uas" "nvme" "ahci"];
|
||||
default = [
|
||||
"uas"
|
||||
"nvme"
|
||||
"ahci"
|
||||
];
|
||||
};
|
||||
kernelParams = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
default = [ ];
|
||||
};
|
||||
immutable = mkOption {
|
||||
description = "Enable root on ZFS immutable root support";
|
||||
@ -62,7 +81,7 @@ in {
|
||||
};
|
||||
authorizedKeys = mkOption {
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
default = [ ];
|
||||
};
|
||||
};
|
||||
};
|
||||
@ -77,7 +96,9 @@ in {
|
||||
};
|
||||
}
|
||||
(mkIf (!cfg.immutable) {
|
||||
zfs-root.fileSystems.datasets = {"rpool/nixos/root" = "/";};
|
||||
zfs-root.fileSystems.datasets = {
|
||||
"rpool/nixos/root" = "/";
|
||||
};
|
||||
})
|
||||
(mkIf cfg.immutable {
|
||||
zfs-root.fileSystems = {
|
||||
@ -100,32 +121,25 @@ in {
|
||||
})
|
||||
{
|
||||
zfs-root.fileSystems = {
|
||||
efiSystemPartitions =
|
||||
map (diskName: diskName + cfg.partitionScheme.efiBoot)
|
||||
cfg.bootDevices;
|
||||
efiSystemPartitions = map (diskName: diskName + cfg.partitionScheme.efiBoot) cfg.bootDevices;
|
||||
swapPartitions =
|
||||
if cfg.partitionScheme ? swap
|
||||
then map (diskName: diskName + cfg.partitionScheme.swap) cfg.bootDevices
|
||||
else [];
|
||||
if cfg.partitionScheme ? swap then
|
||||
map (diskName: diskName + cfg.partitionScheme.swap) cfg.bootDevices
|
||||
else
|
||||
[ ];
|
||||
};
|
||||
boot = {
|
||||
initrd.availableKernelModules = cfg.availableKernelModules;
|
||||
kernelParams = cfg.kernelParams;
|
||||
supportedFilesystems = ["zfs"];
|
||||
supportedFilesystems = [ "zfs" ];
|
||||
zfs = {
|
||||
devNodes = cfg.devNodes;
|
||||
forceImportRoot = mkDefault false;
|
||||
};
|
||||
loader = {
|
||||
efi = {
|
||||
canTouchEfiVariables =
|
||||
if cfg.removableEfi
|
||||
then false
|
||||
else true;
|
||||
efiSysMountPoint =
|
||||
"/boot/efis/"
|
||||
+ (head cfg.bootDevices)
|
||||
+ cfg.partitionScheme.efiBoot;
|
||||
canTouchEfiVariables = if cfg.removableEfi then false else true;
|
||||
efiSysMountPoint = "/boot/efis/" + (head cfg.bootDevices) + cfg.partitionScheme.efiBoot;
|
||||
};
|
||||
generationsDir.copyKernels = true;
|
||||
grub = {
|
||||
@ -135,11 +149,13 @@ in {
|
||||
copyKernels = true;
|
||||
efiSupport = true;
|
||||
zfsSupport = true;
|
||||
extraInstallCommands = toString (map (diskName: ''
|
||||
set -x
|
||||
${pkgs.coreutils-full}/bin/cp -r ${config.boot.loader.efi.efiSysMountPoint}/EFI /boot/efis/${diskName}${cfg.partitionScheme.efiBoot}
|
||||
set +x
|
||||
'') (tail cfg.bootDevices));
|
||||
extraInstallCommands = toString (
|
||||
map (diskName: ''
|
||||
set -x
|
||||
${pkgs.coreutils-full}/bin/cp -r ${config.boot.loader.efi.efiSysMountPoint}/EFI /boot/efis/${diskName}${cfg.partitionScheme.efiBoot}
|
||||
set +x
|
||||
'') (tail cfg.bootDevices)
|
||||
);
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@ -3,20 +3,23 @@
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
cfg = config.mj.base.btrfssnapshot;
|
||||
in {
|
||||
in
|
||||
{
|
||||
options.mj.base.btrfssnapshot = {
|
||||
enable = lib.mkEnableOption "Enable btrfs snapshots";
|
||||
|
||||
subvolumes = lib.mkOption {
|
||||
default = {};
|
||||
type = with lib.types;
|
||||
default = { };
|
||||
type =
|
||||
with lib.types;
|
||||
attrsOf (submodule {
|
||||
options = {
|
||||
label = lib.mkOption {type = str;};
|
||||
keep = lib.mkOption {type = int;};
|
||||
refreshInterval = lib.mkOption {type = str;};
|
||||
label = lib.mkOption { type = str; };
|
||||
keep = lib.mkOption { type = int; };
|
||||
refreshInterval = lib.mkOption { type = str; };
|
||||
};
|
||||
});
|
||||
};
|
||||
@ -24,30 +27,22 @@ in {
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
systemd = {
|
||||
services =
|
||||
lib.mapAttrs'
|
||||
(
|
||||
subvolume: params:
|
||||
lib.nameValuePair
|
||||
"btrfs-snapshot-${lib.strings.sanitizeDerivationName subvolume}"
|
||||
{
|
||||
description = "${params.label} btrfs snapshot for ${subvolume} (keep ${params.keep}";
|
||||
serviceConfig.ExecStart = "${pkgs.btrfs-auto-snapshot} --verbose --label=${params.label} --keep=${params.keep} ${subvolume}";
|
||||
}
|
||||
);
|
||||
services = lib.mapAttrs' (
|
||||
subvolume: params:
|
||||
lib.nameValuePair "btrfs-snapshot-${lib.strings.sanitizeDerivationName subvolume}" {
|
||||
description = "${params.label} btrfs snapshot for ${subvolume} (keep ${params.keep}";
|
||||
serviceConfig.ExecStart = "${pkgs.btrfs-auto-snapshot} --verbose --label=${params.label} --keep=${params.keep} ${subvolume}";
|
||||
}
|
||||
);
|
||||
|
||||
timers =
|
||||
lib.mapAttrs'
|
||||
(
|
||||
subvolume: params:
|
||||
lib.nameValuePair
|
||||
"btrfs-snapshot-${lib.strings.sanitizeDerivationName subvolume}"
|
||||
{
|
||||
description = "${params.label} btrfs snapshot for ${subvolume}";
|
||||
wantedBy = ["timers.target"];
|
||||
timerConfig.OnCalendar = params.refreshInterval;
|
||||
}
|
||||
);
|
||||
timers = lib.mapAttrs' (
|
||||
subvolume: params:
|
||||
lib.nameValuePair "btrfs-snapshot-${lib.strings.sanitizeDerivationName subvolume}" {
|
||||
description = "${params.label} btrfs snapshot for ${subvolume}";
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig.OnCalendar = params.refreshInterval;
|
||||
}
|
||||
);
|
||||
};
|
||||
};
|
||||
}
|
||||
|
@ -4,9 +4,11 @@
|
||||
pkgs,
|
||||
myData,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
cfg = config.mj;
|
||||
in {
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
./boot
|
||||
./btrfssnapshot
|
||||
@ -32,7 +34,7 @@ in {
|
||||
description = "Time zone for this system";
|
||||
};
|
||||
|
||||
username = lib.mkOption {type = str;};
|
||||
username = lib.mkOption { type = str; };
|
||||
|
||||
skipPerf = lib.mkOption {
|
||||
type = bool;
|
||||
@ -51,7 +53,7 @@ in {
|
||||
|
||||
kernelPackages = lib.mkDefault pkgs.linuxPackages;
|
||||
|
||||
supportedFilesystems = ["btrfs"];
|
||||
supportedFilesystems = [ "btrfs" ];
|
||||
};
|
||||
|
||||
nixpkgs.config.allowUnfree = true;
|
||||
@ -62,15 +64,15 @@ in {
|
||||
|
||||
mj.services.friendlyport.ports = [
|
||||
{
|
||||
subnets = [myData.subnets.tailscale.cidr];
|
||||
tcp = [config.services.iperf3.port];
|
||||
udp = [config.services.iperf3.port];
|
||||
subnets = [ myData.subnets.tailscale.cidr ];
|
||||
tcp = [ config.services.iperf3.port ];
|
||||
udp = [ config.services.iperf3.port ];
|
||||
}
|
||||
];
|
||||
|
||||
i18n = {
|
||||
defaultLocale = "en_US.UTF-8";
|
||||
supportedLocales = ["all"];
|
||||
supportedLocales = [ "all" ];
|
||||
};
|
||||
|
||||
nix = {
|
||||
@ -80,8 +82,11 @@ in {
|
||||
options = "--delete-older-than 14d";
|
||||
};
|
||||
settings = {
|
||||
experimental-features = ["nix-command" "flakes"];
|
||||
trusted-users = [cfg.username];
|
||||
experimental-features = [
|
||||
"nix-command"
|
||||
"flakes"
|
||||
];
|
||||
trusted-users = [ cfg.username ];
|
||||
};
|
||||
};
|
||||
|
||||
@ -95,7 +100,8 @@ in {
|
||||
};
|
||||
|
||||
environment = {
|
||||
systemPackages = with pkgs;
|
||||
systemPackages =
|
||||
with pkgs;
|
||||
lib.mkMerge [
|
||||
[
|
||||
bc
|
||||
@ -165,7 +171,6 @@ in {
|
||||
smartmontools
|
||||
unixtools.xxd
|
||||
bcachefs-tools
|
||||
nixfmt-rfc-style
|
||||
sqlite-interactive
|
||||
|
||||
# networking
|
||||
@ -204,7 +209,7 @@ in {
|
||||
config.boot.kernelPackages.cpupower
|
||||
config.boot.kernelPackages.vm-tools
|
||||
]
|
||||
(lib.mkIf (!cfg.skipPerf) [config.boot.kernelPackages.perf])
|
||||
(lib.mkIf (!cfg.skipPerf) [ config.boot.kernelPackages.perf ])
|
||||
];
|
||||
};
|
||||
|
||||
@ -233,7 +238,7 @@ in {
|
||||
|
||||
chrony = {
|
||||
enable = true;
|
||||
servers = ["time.cloudflare.com"];
|
||||
servers = [ "time.cloudflare.com" ];
|
||||
};
|
||||
|
||||
locate = {
|
||||
|
@ -1,50 +1,60 @@
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: let
|
||||
{ config, lib, ... }:
|
||||
let
|
||||
cfg = config.zfs-root.fileSystems;
|
||||
inherit (lib) types mkDefault mkOption mkMerge mapAttrsToList;
|
||||
in {
|
||||
inherit (lib)
|
||||
types
|
||||
mkDefault
|
||||
mkOption
|
||||
mkMerge
|
||||
mapAttrsToList
|
||||
;
|
||||
in
|
||||
{
|
||||
options.zfs-root.fileSystems = {
|
||||
datasets = mkOption {
|
||||
description = "Set mountpoint for datasets";
|
||||
type = types.attrsOf types.str;
|
||||
default = {};
|
||||
default = { };
|
||||
};
|
||||
bindmounts = mkOption {
|
||||
description = "Set mountpoint for bindmounts";
|
||||
type = types.attrsOf types.str;
|
||||
default = {};
|
||||
default = { };
|
||||
};
|
||||
efiSystemPartitions = mkOption {
|
||||
description = "Set mountpoint for efi system partitions";
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
default = [ ];
|
||||
};
|
||||
swapPartitions = mkOption {
|
||||
description = "Set swap partitions";
|
||||
type = types.listOf types.str;
|
||||
default = [];
|
||||
default = [ ];
|
||||
};
|
||||
};
|
||||
config.fileSystems = mkMerge (mapAttrsToList (dataset: mountpoint: {
|
||||
config.fileSystems = mkMerge (
|
||||
mapAttrsToList (dataset: mountpoint: {
|
||||
"${mountpoint}" = {
|
||||
device = "${dataset}";
|
||||
fsType = "zfs";
|
||||
options = ["X-mount.mkdir" "noatime"];
|
||||
options = [
|
||||
"X-mount.mkdir"
|
||||
"noatime"
|
||||
];
|
||||
neededForBoot = true;
|
||||
};
|
||||
})
|
||||
cfg.datasets
|
||||
}) cfg.datasets
|
||||
++ mapAttrsToList (bindsrc: mountpoint: {
|
||||
"${mountpoint}" = {
|
||||
device = "${bindsrc}";
|
||||
fsType = "none";
|
||||
options = ["bind" "X-mount.mkdir" "noatime"];
|
||||
options = [
|
||||
"bind"
|
||||
"X-mount.mkdir"
|
||||
"noatime"
|
||||
];
|
||||
};
|
||||
})
|
||||
cfg.bindmounts
|
||||
}) cfg.bindmounts
|
||||
++ map (esp: {
|
||||
"/boot/efis/${esp}" = {
|
||||
device = "${config.zfs-root.boot.devNodes}${esp}";
|
||||
@ -58,15 +68,16 @@ in {
|
||||
"X-mount.mkdir"
|
||||
];
|
||||
};
|
||||
})
|
||||
cfg.efiSystemPartitions);
|
||||
config.swapDevices = mkDefault (map (swap: {
|
||||
}) cfg.efiSystemPartitions
|
||||
);
|
||||
config.swapDevices = mkDefault (
|
||||
map (swap: {
|
||||
device = "${config.zfs-root.boot.devNodes}${swap}";
|
||||
discardPolicy = mkDefault "both";
|
||||
randomEncryption = {
|
||||
enable = true;
|
||||
allowDiscards = mkDefault true;
|
||||
};
|
||||
})
|
||||
cfg.swapPartitions);
|
||||
}) cfg.swapPartitions
|
||||
);
|
||||
}
|
||||
|
@ -1,13 +1,10 @@
|
||||
{ config, lib, ... }:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
options.mj.base.snapshot = {
|
||||
enable = lib.mkEnableOption "Enable zfs snapshots";
|
||||
|
||||
mountpoints = lib.mkOption {
|
||||
default = {};
|
||||
default = { };
|
||||
type = with lib.types; listOf str;
|
||||
};
|
||||
};
|
||||
@ -21,23 +18,23 @@
|
||||
autosnap = true;
|
||||
autoprune = true;
|
||||
};
|
||||
extraArgs = ["--verbose"];
|
||||
datasets = let
|
||||
fs_zfs = lib.filterAttrs (_: v: v.fsType == "zfs") config.fileSystems;
|
||||
mountpoint2fs =
|
||||
builtins.listToAttrs
|
||||
(map (mountpoint: {
|
||||
extraArgs = [ "--verbose" ];
|
||||
datasets =
|
||||
let
|
||||
fs_zfs = lib.filterAttrs (_: v: v.fsType == "zfs") config.fileSystems;
|
||||
mountpoint2fs = builtins.listToAttrs (
|
||||
map (mountpoint: {
|
||||
name = mountpoint;
|
||||
value = builtins.getAttr mountpoint fs_zfs;
|
||||
})
|
||||
config.mj.base.snapshot.mountpoints);
|
||||
s_datasets =
|
||||
lib.mapAttrs' (_mountpoint: fs: {
|
||||
}) config.mj.base.snapshot.mountpoints
|
||||
);
|
||||
s_datasets = lib.mapAttrs' (_mountpoint: fs: {
|
||||
name = fs.device;
|
||||
value = {use_template = ["prod"];};
|
||||
})
|
||||
mountpoint2fs;
|
||||
in
|
||||
value = {
|
||||
use_template = [ "prod" ];
|
||||
};
|
||||
}) mountpoint2fs;
|
||||
in
|
||||
s_datasets;
|
||||
};
|
||||
};
|
||||
|
@ -3,7 +3,8 @@
|
||||
lib,
|
||||
myData,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
config = {
|
||||
services.openssh = {
|
||||
enable = true;
|
||||
@ -13,9 +14,13 @@
|
||||
};
|
||||
};
|
||||
programs.mosh.enable = true;
|
||||
programs.ssh.knownHosts = let
|
||||
sshAttrs = lib.genAttrs ["extraHostNames" "publicKey"] (_: null);
|
||||
in
|
||||
programs.ssh.knownHosts =
|
||||
let
|
||||
sshAttrs = lib.genAttrs [
|
||||
"extraHostNames"
|
||||
"publicKey"
|
||||
] (_: null);
|
||||
in
|
||||
lib.mapAttrs (_name: builtins.intersectAttrs sshAttrs) myData.hosts;
|
||||
};
|
||||
}
|
||||
|
@ -3,55 +3,58 @@
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
# TODO:
|
||||
# - assert postfix is configured
|
||||
options.mj.base.unitstatus = with lib.types; {
|
||||
enable = lib.mkEnableOption "alert by email on unit failure";
|
||||
email = lib.mkOption {type = str;};
|
||||
email = lib.mkOption { type = str; };
|
||||
units = lib.mkOption {
|
||||
type = listOf str;
|
||||
default = [];
|
||||
default = [ ];
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.mj.base.unitstatus.enable {
|
||||
systemd.services =
|
||||
{
|
||||
"unit-status-mail@" = let
|
||||
# https://northernlightlabs.se/2014-07-05/systemd-status-mail-on-unit-failure.html
|
||||
script = pkgs.writeShellScript "unit-status-mail" ''
|
||||
set -e
|
||||
MAILTO="${config.mj.base.unitstatus.email}"
|
||||
UNIT=$1
|
||||
EXTRA=""
|
||||
for e in "''${@:2}"; do
|
||||
EXTRA+="$e"$'\n'
|
||||
done
|
||||
UNITSTATUS=$(${pkgs.systemd}/bin/systemctl status "$UNIT" || :)
|
||||
${pkgs.postfix}/bin/sendmail $MAILTO <<EOF
|
||||
Subject:Status mail for unit: $UNIT
|
||||
"unit-status-mail@" =
|
||||
let
|
||||
# https://northernlightlabs.se/2014-07-05/systemd-status-mail-on-unit-failure.html
|
||||
script = pkgs.writeShellScript "unit-status-mail" ''
|
||||
set -e
|
||||
MAILTO="${config.mj.base.unitstatus.email}"
|
||||
UNIT=$1
|
||||
EXTRA=""
|
||||
for e in "''${@:2}"; do
|
||||
EXTRA+="$e"$'\n'
|
||||
done
|
||||
UNITSTATUS=$(${pkgs.systemd}/bin/systemctl status "$UNIT" || :)
|
||||
${pkgs.postfix}/bin/sendmail $MAILTO <<EOF
|
||||
Subject:Status mail for unit: $UNIT
|
||||
|
||||
Status report for unit: $UNIT
|
||||
$EXTRA
|
||||
Status report for unit: $UNIT
|
||||
$EXTRA
|
||||
|
||||
$UNITSTATUS
|
||||
EOF
|
||||
$UNITSTATUS
|
||||
EOF
|
||||
|
||||
echo -e "Status mail sent to: $MAILTO for unit: $UNIT"
|
||||
'';
|
||||
in {
|
||||
description = "Send an email on unit failure";
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
ExecStart = ''${script} "%i" "Hostname: %H" "Machine ID: %m" "Boot ID: %b" '';
|
||||
echo -e "Status mail sent to: $MAILTO for unit: $UNIT"
|
||||
'';
|
||||
in
|
||||
{
|
||||
description = "Send an email on unit failure";
|
||||
serviceConfig = {
|
||||
Type = "simple";
|
||||
ExecStart = ''${script} "%i" "Hostname: %H" "Machine ID: %m" "Boot ID: %b" '';
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
// lib.genAttrs config.mj.base.unitstatus.units (
|
||||
unit: {
|
||||
unitConfig = {OnFailure = "unit-status-mail@${unit}.service";};
|
||||
}
|
||||
);
|
||||
// lib.genAttrs config.mj.base.unitstatus.units (unit: {
|
||||
unitConfig = {
|
||||
OnFailure = "unit-status-mail@${unit}.service";
|
||||
};
|
||||
});
|
||||
};
|
||||
}
|
||||
|
@ -3,7 +3,8 @@
|
||||
lib,
|
||||
myData,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
cfg = config.mj.base.users;
|
||||
props = with lib.types; {
|
||||
hashedPasswordFile = lib.mkOption {
|
||||
@ -21,10 +22,11 @@
|
||||
|
||||
extraGroups = lib.mkOption {
|
||||
type = listOf str;
|
||||
default = [];
|
||||
default = [ ];
|
||||
};
|
||||
};
|
||||
in {
|
||||
in
|
||||
{
|
||||
options.mj.base.users = with lib.types; {
|
||||
enable = lib.mkEnableOption "enable motiejus and root";
|
||||
devTools = lib.mkOption {
|
||||
@ -44,33 +46,37 @@ in {
|
||||
mutableUsers = false;
|
||||
|
||||
users = {
|
||||
${config.mj.username} =
|
||||
{
|
||||
isNormalUser = true;
|
||||
extraGroups = ["wheel" "dialout" "video"] ++ cfg.user.extraGroups;
|
||||
uid = myData.uidgid.motiejus;
|
||||
openssh.authorizedKeys.keys = let
|
||||
${config.mj.username} = {
|
||||
isNormalUser = true;
|
||||
extraGroups = [
|
||||
"wheel"
|
||||
"dialout"
|
||||
"video"
|
||||
] ++ cfg.user.extraGroups;
|
||||
uid = myData.uidgid.motiejus;
|
||||
openssh.authorizedKeys.keys =
|
||||
let
|
||||
fqdn = "${config.networking.hostName}.${config.networking.domain}";
|
||||
in
|
||||
lib.mkMerge [
|
||||
[
|
||||
myData.people_pubkeys.motiejus
|
||||
myData.people_pubkeys.motiejus_work
|
||||
]
|
||||
lib.mkMerge [
|
||||
[
|
||||
myData.people_pubkeys.motiejus
|
||||
myData.people_pubkeys.motiejus_work
|
||||
]
|
||||
|
||||
(lib.mkIf (builtins.hasAttr fqdn myData.hosts) [
|
||||
("from=\"127.0.0.1,::1\" " + myData.hosts.${fqdn}.publicKey)
|
||||
])
|
||||
];
|
||||
}
|
||||
// lib.filterAttrs (n: v: n != "extraGroups" && v != null) cfg.user or {};
|
||||
(lib.mkIf (builtins.hasAttr fqdn myData.hosts) [
|
||||
(''from="127.0.0.1,::1" '' + myData.hosts.${fqdn}.publicKey)
|
||||
])
|
||||
];
|
||||
} // lib.filterAttrs (n: v: n != "extraGroups" && v != null) cfg.user or { };
|
||||
|
||||
root = lib.filterAttrs (_: v: v != null) cfg.root;
|
||||
};
|
||||
};
|
||||
|
||||
home-manager.useGlobalPkgs = true;
|
||||
home-manager.users.${config.mj.username} = {pkgs, ...}:
|
||||
home-manager.users.${config.mj.username} =
|
||||
{ pkgs, ... }:
|
||||
import ../../../shared/home {
|
||||
inherit lib;
|
||||
inherit pkgs;
|
||||
|
@ -1,8 +1,5 @@
|
||||
{ config, lib, ... }:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
options.mj.base.zfs = with lib.types; {
|
||||
enable = lib.mkEnableOption "Enable common zfs options";
|
||||
};
|
||||
@ -16,6 +13,6 @@
|
||||
expandOnBoot = "all";
|
||||
};
|
||||
|
||||
mj.base.unitstatus.units = ["zfs-scrub"];
|
||||
mj.base.unitstatus.units = [ "zfs-scrub" ];
|
||||
};
|
||||
}
|
||||
|
@ -3,7 +3,8 @@
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
mkPreHook = zfs_name: i: ''
|
||||
set -x
|
||||
sleep ${toString i}
|
||||
@ -15,99 +16,103 @@
|
||||
"$RUNTIME_DIRECTORY/snapshot"
|
||||
cd "$RUNTIME_DIRECTORY/snapshot"
|
||||
'';
|
||||
in {
|
||||
in
|
||||
{
|
||||
options.mj.base.zfsborg = with lib.types; {
|
||||
enable = lib.mkEnableOption "backup zfs snapshots with borg";
|
||||
|
||||
passwordPath = lib.mkOption {type = str;};
|
||||
passwordPath = lib.mkOption { type = str; };
|
||||
sshKeyPath = lib.mkOption {
|
||||
type = nullOr path;
|
||||
default = null;
|
||||
};
|
||||
|
||||
dirs = lib.mkOption {
|
||||
default = {};
|
||||
default = { };
|
||||
type = listOf (submodule {
|
||||
options = {
|
||||
mountpoint = lib.mkOption {type = path;};
|
||||
repo = lib.mkOption {type = str;};
|
||||
paths = lib.mkOption {type = listOf str;};
|
||||
mountpoint = lib.mkOption { type = path; };
|
||||
repo = lib.mkOption { type = str; };
|
||||
paths = lib.mkOption { type = listOf str; };
|
||||
patterns = lib.mkOption {
|
||||
type = listOf str;
|
||||
default = [];
|
||||
default = [ ];
|
||||
};
|
||||
prune = lib.mkOption {
|
||||
type = anything;
|
||||
default = {};
|
||||
default = { };
|
||||
};
|
||||
backup_at = lib.mkOption {type = str;};
|
||||
backup_at = lib.mkOption { type = str; };
|
||||
};
|
||||
});
|
||||
};
|
||||
};
|
||||
|
||||
config = with config.mj.base.zfsborg;
|
||||
config =
|
||||
with config.mj.base.zfsborg;
|
||||
lib.mkIf enable {
|
||||
systemd.services = lib.listToAttrs (lib.imap0 (
|
||||
i: attr: let
|
||||
systemd.services = lib.listToAttrs (
|
||||
lib.imap0 (
|
||||
i: attr:
|
||||
let
|
||||
svcName = "borgbackup-job-${lib.strings.sanitizeDerivationName attr.mountpoint}-${toString i}";
|
||||
in
|
||||
lib.nameValuePair svcName {
|
||||
serviceConfig.RuntimeDirectory = svcName;
|
||||
}
|
||||
)
|
||||
dirs);
|
||||
lib.nameValuePair svcName { serviceConfig.RuntimeDirectory = svcName; }
|
||||
) dirs
|
||||
);
|
||||
|
||||
services.borgbackup.jobs = builtins.listToAttrs (
|
||||
lib.imap0 (
|
||||
i: attrs: let
|
||||
i: attrs:
|
||||
let
|
||||
mountpoint = builtins.getAttr "mountpoint" attrs;
|
||||
fs = builtins.getAttr mountpoint config.fileSystems;
|
||||
in
|
||||
assert fs.fsType == "zfs";
|
||||
assert lib.assertMsg
|
||||
config.mj.base.unitstatus.enable
|
||||
assert fs.fsType == "zfs";
|
||||
assert lib.assertMsg config.mj.base.unitstatus.enable
|
||||
"config.mj.base.unitstatus.enable must be true";
|
||||
lib.nameValuePair
|
||||
"${lib.strings.sanitizeDerivationName mountpoint}-${toString i}"
|
||||
({
|
||||
inherit (attrs) repo paths;
|
||||
lib.nameValuePair "${lib.strings.sanitizeDerivationName mountpoint}-${toString i}" (
|
||||
{
|
||||
inherit (attrs) repo paths;
|
||||
|
||||
doInit = true;
|
||||
encryption = {
|
||||
mode = "repokey-blake2";
|
||||
passCommand = "cat ${config.mj.base.zfsborg.passwordPath}";
|
||||
};
|
||||
extraArgs = "--remote-path=borg1";
|
||||
compression = "auto,zstd,10";
|
||||
extraCreateArgs = "--chunker-params buzhash,10,23,16,4095";
|
||||
startAt = attrs.backup_at;
|
||||
preHook = mkPreHook fs.device i;
|
||||
prune.keep = {
|
||||
within = "1d";
|
||||
daily = 7;
|
||||
weekly = 4;
|
||||
monthly = 3;
|
||||
};
|
||||
environment =
|
||||
{
|
||||
BORG_HOST_ID = let
|
||||
h = config.networking;
|
||||
in "${h.hostName}.${h.domain}@${h.hostId}";
|
||||
}
|
||||
// lib.optionalAttrs (sshKeyPath != null) {
|
||||
BORG_RSH = ''ssh -i "${config.mj.base.zfsborg.sshKeyPath}"'';
|
||||
};
|
||||
doInit = true;
|
||||
encryption = {
|
||||
mode = "repokey-blake2";
|
||||
passCommand = "cat ${config.mj.base.zfsborg.passwordPath}";
|
||||
};
|
||||
extraArgs = "--remote-path=borg1";
|
||||
compression = "auto,zstd,10";
|
||||
extraCreateArgs = "--chunker-params buzhash,10,23,16,4095";
|
||||
startAt = attrs.backup_at;
|
||||
preHook = mkPreHook fs.device i;
|
||||
prune.keep = {
|
||||
within = "1d";
|
||||
daily = 7;
|
||||
weekly = 4;
|
||||
monthly = 3;
|
||||
};
|
||||
environment =
|
||||
{
|
||||
BORG_HOST_ID =
|
||||
let
|
||||
h = config.networking;
|
||||
in
|
||||
"${h.hostName}.${h.domain}@${h.hostId}";
|
||||
}
|
||||
// lib.optionalAttrs (attrs ? patterns) {inherit (attrs) patterns;}
|
||||
// lib.optionalAttrs (attrs ? prune) {inherit (attrs) prune;})
|
||||
)
|
||||
dirs
|
||||
// lib.optionalAttrs (sshKeyPath != null) {
|
||||
BORG_RSH = ''ssh -i "${config.mj.base.zfsborg.sshKeyPath}"'';
|
||||
};
|
||||
}
|
||||
// lib.optionalAttrs (attrs ? patterns) { inherit (attrs) patterns; }
|
||||
// lib.optionalAttrs (attrs ? prune) { inherit (attrs) prune; }
|
||||
)
|
||||
) dirs
|
||||
);
|
||||
|
||||
mj.base.unitstatus.units = let
|
||||
sanitized = map lib.strings.sanitizeDerivationName (lib.catAttrs "mountpoint" dirs);
|
||||
in
|
||||
mj.base.unitstatus.units =
|
||||
let
|
||||
sanitized = map lib.strings.sanitizeDerivationName (lib.catAttrs "mountpoint" dirs);
|
||||
in
|
||||
lib.imap0 (i: name: "borgbackup-job-${name}-${toString i}") sanitized;
|
||||
};
|
||||
}
|
||||
|
@ -1,4 +1,5 @@
|
||||
{...}: {
|
||||
{ ... }:
|
||||
{
|
||||
imports = [
|
||||
./base
|
||||
./services
|
||||
|
@ -1,4 +1,5 @@
|
||||
{config, ...}: let
|
||||
{ config, ... }:
|
||||
let
|
||||
eDP-1 = {
|
||||
mtworx = {
|
||||
fingerprint = "00ffffffffffff000e6f041400000000001e0104a51e1378033784a5544d9a240e515500000001010101010101010101010101010101353c80a070b02340302036002ebd10000018000000fd00303c4a4a0f010a202020202020000000fe0043534f542054330a2020202020000000fe004d4e453030374a41312d310a2000b5";
|
||||
@ -24,7 +25,8 @@
|
||||
fingerprint = "00ffffffffffff0010ac5d424c32313804200104b54028783a94f5af4f47a4240e5054a54b00d100d1c0b300a94081808100714f0101e26800a0a0402e603020360081912100001a000000ff003934585a3548330a2020202020000000fc0044454c4c205533303233450a20000000fd00384c1e711c010a20202020202001ee020319f14c90040302011112131f20212223097f0783010000023a801871382d40582c450081912100001e7e3900a080381f4030203a0081912100001a011d007251d01e206e28550081912100001ebf1600a08038134030203a0081912100001a00000000000000000000000000000000000000000000000000000000000052";
|
||||
mode = "2560x1600";
|
||||
};
|
||||
in {
|
||||
in
|
||||
{
|
||||
services.autorandr = {
|
||||
enable = true;
|
||||
matchEdid = true;
|
||||
|
@ -1,5 +1,6 @@
|
||||
{pkgs, ...}: {
|
||||
boot.supportedFilesystems = ["btrfs"];
|
||||
{ pkgs, ... }:
|
||||
{
|
||||
boot.supportedFilesystems = [ "btrfs" ];
|
||||
|
||||
environment.systemPackages = [pkgs.btrfs-auto-snapshot];
|
||||
environment.systemPackages = [ pkgs.btrfs-auto-snapshot ];
|
||||
}
|
||||
|
@ -3,13 +3,13 @@
|
||||
pkgs,
|
||||
config,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
username = config.mj.username;
|
||||
firefox =
|
||||
if (pkgs.stdenv.hostPlatform.system == "x86_64-linux")
|
||||
then pkgs.firefox-bin
|
||||
else pkgs.firefox;
|
||||
in {
|
||||
if (pkgs.stdenv.hostPlatform.system == "x86_64-linux") then pkgs.firefox-bin else pkgs.firefox;
|
||||
in
|
||||
{
|
||||
config = {
|
||||
hardware.bluetooth = {
|
||||
enable = true;
|
||||
@ -27,12 +27,18 @@ in {
|
||||
};
|
||||
};
|
||||
|
||||
mj.base.users.user.extraGroups = ["adbusers" "networkmanager" "wireshark" "podman" "docker"];
|
||||
mj.base.users.user.extraGroups = [
|
||||
"adbusers"
|
||||
"networkmanager"
|
||||
"wireshark"
|
||||
"podman"
|
||||
"docker"
|
||||
];
|
||||
|
||||
services = {
|
||||
fwupd.enable = true;
|
||||
blueman.enable = true;
|
||||
udev.packages = [pkgs.yubikey-personalization];
|
||||
udev.packages = [ pkgs.yubikey-personalization ];
|
||||
acpid.enable = true;
|
||||
gnome.gnome-keyring.enable = true;
|
||||
openssh.settings.X11Forwarding = true;
|
||||
@ -46,8 +52,9 @@ in {
|
||||
enable = true;
|
||||
drivers = [
|
||||
pkgs.samsung-unified-linux-driver_4_01_17
|
||||
(pkgs.writeTextDir "share/cups/model/HP_Color_Laser_15x_Series.ppd"
|
||||
(builtins.readFile ../../../shared/HP_Color_Laser_15x_Series.ppd))
|
||||
(pkgs.writeTextDir "share/cups/model/HP_Color_Laser_15x_Series.ppd" (
|
||||
builtins.readFile ../../../shared/HP_Color_Laser_15x_Series.ppd
|
||||
))
|
||||
];
|
||||
};
|
||||
|
||||
@ -102,7 +109,7 @@ in {
|
||||
|
||||
virtualisation.podman = {
|
||||
enable = lib.mkDefault true;
|
||||
extraPackages = [pkgs.zfs];
|
||||
extraPackages = [ pkgs.zfs ];
|
||||
};
|
||||
|
||||
security.rtkit.enable = true;
|
||||
@ -127,7 +134,8 @@ in {
|
||||
};
|
||||
};
|
||||
|
||||
environment.systemPackages = with pkgs;
|
||||
environment.systemPackages =
|
||||
with pkgs;
|
||||
lib.mkMerge [
|
||||
[
|
||||
# packages defined here
|
||||
@ -225,8 +233,7 @@ in {
|
||||
xorg.xinit
|
||||
|
||||
(texlive.combine {
|
||||
inherit
|
||||
(texlive)
|
||||
inherit (texlive)
|
||||
scheme-medium
|
||||
dvisvgm
|
||||
dvipng
|
||||
@ -250,199 +257,200 @@ in {
|
||||
winetricks
|
||||
wineWowPackages.full
|
||||
])
|
||||
[pkgs.undocker]
|
||||
[ pkgs.undocker ]
|
||||
];
|
||||
|
||||
# https://discourse.nixos.org/t/nixos-rebuild-switch-upgrade-networkmanager-wait-online-service-failure/30746
|
||||
systemd.services.NetworkManager-wait-online.enable = false;
|
||||
|
||||
home-manager.users.${username} = {
|
||||
pkgs,
|
||||
config,
|
||||
...
|
||||
}: {
|
||||
imports = [./plasma.nix];
|
||||
xdg.configFile."awesome/rc.lua".source = ./rc.lua;
|
||||
home-manager.users.${username} =
|
||||
{ pkgs, config, ... }:
|
||||
{
|
||||
imports = [ ./plasma.nix ];
|
||||
xdg.configFile."awesome/rc.lua".source = ./rc.lua;
|
||||
|
||||
programs = {
|
||||
mbsync.enable = true;
|
||||
neomutt.enable = true;
|
||||
notmuch.enable = true;
|
||||
programs = {
|
||||
mbsync.enable = true;
|
||||
neomutt.enable = true;
|
||||
notmuch.enable = true;
|
||||
|
||||
tmux.extraConfig = let
|
||||
cmd = "${pkgs.extract_url}/bin/extract_url";
|
||||
cfg = pkgs.writeText "urlviewrc" "COMMAND systemd-run --user --collect xdg-open %s";
|
||||
in ''
|
||||
bind-key u capture-pane -J \; \
|
||||
save-buffer /tmp/tmux-buffer \; \
|
||||
delete-buffer \; \
|
||||
split-window -l 10 "${cmd} -c ${cfg} /tmp/tmux-buffer"
|
||||
'';
|
||||
};
|
||||
tmux.extraConfig =
|
||||
let
|
||||
cmd = "${pkgs.extract_url}/bin/extract_url";
|
||||
cfg = pkgs.writeText "urlviewrc" "COMMAND systemd-run --user --collect xdg-open %s";
|
||||
in
|
||||
''
|
||||
bind-key u capture-pane -J \; \
|
||||
save-buffer /tmp/tmux-buffer \; \
|
||||
delete-buffer \; \
|
||||
split-window -l 10 "${cmd} -c ${cfg} /tmp/tmux-buffer"
|
||||
'';
|
||||
};
|
||||
|
||||
home.file.".cache/evolution/.stignore".text = "*.db";
|
||||
home.file.".cache/evolution/.stignore".text = "*.db";
|
||||
|
||||
accounts.email = {
|
||||
maildirBasePath = "Maildir";
|
||||
accounts.email = {
|
||||
maildirBasePath = "Maildir";
|
||||
|
||||
accounts.mj = {
|
||||
primary = true;
|
||||
userName = "motiejus@jakstys.lt";
|
||||
address = "motiejus@jakstys.lt";
|
||||
realName = "Motiejus Jakštys";
|
||||
passwordCommand = "cat /home/${username}/.email-creds";
|
||||
imap.host = "imap.migadu.com";
|
||||
smtp.host = "smtp.migadu.com";
|
||||
accounts.mj = {
|
||||
primary = true;
|
||||
userName = "motiejus@jakstys.lt";
|
||||
address = "motiejus@jakstys.lt";
|
||||
realName = "Motiejus Jakštys";
|
||||
passwordCommand = "cat /home/${username}/.email-creds";
|
||||
imap.host = "imap.migadu.com";
|
||||
smtp.host = "smtp.migadu.com";
|
||||
|
||||
mbsync = {
|
||||
enable = true;
|
||||
create = "maildir";
|
||||
};
|
||||
mbsync = {
|
||||
enable = true;
|
||||
create = "maildir";
|
||||
};
|
||||
|
||||
msmtp.enable = true;
|
||||
msmtp.enable = true;
|
||||
|
||||
notmuch = {
|
||||
enable = true;
|
||||
neomutt.enable = true;
|
||||
};
|
||||
notmuch = {
|
||||
enable = true;
|
||||
neomutt.enable = true;
|
||||
};
|
||||
|
||||
neomutt = {
|
||||
enable = true;
|
||||
extraConfig = ''
|
||||
set index_format="%4C %Z %{%F %H:%M} %-15.15L (%?l?%4l&%4c?) %s"
|
||||
neomutt = {
|
||||
enable = true;
|
||||
extraConfig = ''
|
||||
set index_format="%4C %Z %{%F %H:%M} %-15.15L (%?l?%4l&%4c?) %s"
|
||||
|
||||
set mailcap_path = ${
|
||||
pkgs.writeText "mailcaprc" ''
|
||||
set mailcap_path = ${pkgs.writeText "mailcaprc" ''
|
||||
text/html; ${pkgs.elinks}/bin/elinks -dump ; copiousoutput;
|
||||
application/*; ${pkgs.xdg-utils}/bin/xdg-open %s &> /dev/null &;
|
||||
image/*; ${pkgs.xdg-utils}/bin/xdg-open %s &> /dev/null &;
|
||||
''
|
||||
}
|
||||
auto_view text/html
|
||||
unset record
|
||||
set send_charset="utf-8"
|
||||
''}
|
||||
auto_view text/html
|
||||
unset record
|
||||
set send_charset="utf-8"
|
||||
|
||||
macro attach 'V' "<pipe-entry>iconv -c --to-code=UTF8 > ~/.cache/mutt/mail.html<enter><shell-escape>firefox ~/.cache/mutt/mail.html<enter>"
|
||||
macro index,pager \cb "<pipe-message> env BROWSER=firefox urlscan<Enter>" "call urlscan to extract URLs out of a message"
|
||||
macro attach,compose \cb "<pipe-entry> env BROWSER=firefox urlscan<Enter>" "call urlscan to extract URLs out of a message"
|
||||
macro attach 'V' "<pipe-entry>iconv -c --to-code=UTF8 > ~/.cache/mutt/mail.html<enter><shell-escape>firefox ~/.cache/mutt/mail.html<enter>"
|
||||
macro index,pager \cb "<pipe-message> env BROWSER=firefox urlscan<Enter>" "call urlscan to extract URLs out of a message"
|
||||
macro attach,compose \cb "<pipe-entry> env BROWSER=firefox urlscan<Enter>" "call urlscan to extract URLs out of a message"
|
||||
|
||||
set sort_browser=date
|
||||
set sort=reverse-threads
|
||||
set sort_aux=last-date-received
|
||||
set sort_browser=date
|
||||
set sort=reverse-threads
|
||||
set sort_aux=last-date-received
|
||||
|
||||
bind pager g top
|
||||
bind pager G bottom
|
||||
bind attach,index g first-entry
|
||||
bind attach,index G last-entry
|
||||
bind attach,index,pager \CD half-down
|
||||
bind attach,index,pager \CU half-up
|
||||
bind attach,index,pager \Ce next-line
|
||||
bind attach,index,pager \Cy previous-line
|
||||
bind index,pager B sidebar-toggle-visible
|
||||
bind index,pager R group-reply
|
||||
bind pager g top
|
||||
bind pager G bottom
|
||||
bind attach,index g first-entry
|
||||
bind attach,index G last-entry
|
||||
bind attach,index,pager \CD half-down
|
||||
bind attach,index,pager \CU half-up
|
||||
bind attach,index,pager \Ce next-line
|
||||
bind attach,index,pager \Cy previous-line
|
||||
bind index,pager B sidebar-toggle-visible
|
||||
bind index,pager R group-reply
|
||||
|
||||
set sidebar_visible = yes
|
||||
set sidebar_width = 15
|
||||
bind index,pager \Cp sidebar-prev
|
||||
bind index,pager \Cn sidebar-next
|
||||
bind index,pager \Co sidebar-open
|
||||
bind index,pager B sidebar-toggle-visible
|
||||
set sidebar_short_path = yes
|
||||
set sidebar_delim_chars = '/'
|
||||
set sidebar_format = '%B%* %?N?%N?'
|
||||
'';
|
||||
set sidebar_visible = yes
|
||||
set sidebar_width = 15
|
||||
bind index,pager \Cp sidebar-prev
|
||||
bind index,pager \Cn sidebar-next
|
||||
bind index,pager \Co sidebar-open
|
||||
bind index,pager B sidebar-toggle-visible
|
||||
set sidebar_short_path = yes
|
||||
set sidebar_delim_chars = '/'
|
||||
set sidebar_format = '%B%* %?N?%N?'
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services = {
|
||||
cbatticon.enable = true;
|
||||
blueman-applet.enable = true;
|
||||
services = {
|
||||
cbatticon.enable = true;
|
||||
blueman-applet.enable = true;
|
||||
|
||||
syncthing.tray = {
|
||||
syncthing.tray = {
|
||||
enable = true;
|
||||
#extraOptions = ["--wait"];
|
||||
};
|
||||
|
||||
pasystray = {
|
||||
enable = true;
|
||||
extraOptions = [
|
||||
"--key-grabbing"
|
||||
"--notify=all"
|
||||
];
|
||||
};
|
||||
|
||||
gpg-agent = {
|
||||
enable = true;
|
||||
enableSshSupport = true;
|
||||
pinentryPackage = pkgs.pinentry-gtk2;
|
||||
};
|
||||
|
||||
screen-locker = {
|
||||
enable = true;
|
||||
xautolock.enable = false;
|
||||
lockCmd = ''${pkgs.bash}/bin/bash -c "${pkgs.coreutils}/bin/sleep 0.2; ${pkgs.xorg.xset}/bin/xset dpms force off; /run/wrappers/bin/slock"'';
|
||||
};
|
||||
};
|
||||
|
||||
# https://github.com/nix-community/home-manager/issues/2064
|
||||
systemd.user.targets.tray = {
|
||||
Unit = {
|
||||
Description = "Home Manager System Tray";
|
||||
Requires = [ "graphical-session-pre.target" ];
|
||||
};
|
||||
};
|
||||
|
||||
# thanks K900
|
||||
gtk = {
|
||||
enable = true;
|
||||
#extraOptions = ["--wait"];
|
||||
theme = {
|
||||
package = pkgs.plasma5Packages.breeze-gtk;
|
||||
name = "Breeze";
|
||||
};
|
||||
cursorTheme = {
|
||||
package = pkgs.plasma5Packages.breeze-icons;
|
||||
name = "Breeze_Snow";
|
||||
};
|
||||
iconTheme = {
|
||||
package = pkgs.papirus-icon-theme;
|
||||
name = "Papirus-Dark";
|
||||
};
|
||||
gtk2 = {
|
||||
configLocation = "${config.xdg.configHome}/gtk-2.0/gtkrc";
|
||||
extraConfig = ''
|
||||
gtk-alternative-button-order = 1
|
||||
'';
|
||||
};
|
||||
gtk3.extraConfig = {
|
||||
gtk-application-prefer-dark-theme = true;
|
||||
gtk-decoration-layout = "icon:minimize,maximize,close";
|
||||
};
|
||||
gtk4.extraConfig = {
|
||||
gtk-application-prefer-dark-theme = true;
|
||||
gtk-decoration-layout = "icon:minimize,maximize,close";
|
||||
};
|
||||
};
|
||||
|
||||
pasystray = {
|
||||
enable = true;
|
||||
extraOptions = ["--key-grabbing" "--notify=all"];
|
||||
};
|
||||
|
||||
gpg-agent = {
|
||||
enable = true;
|
||||
enableSshSupport = true;
|
||||
pinentryPackage = pkgs.pinentry-gtk2;
|
||||
};
|
||||
|
||||
screen-locker = {
|
||||
enable = true;
|
||||
xautolock.enable = false;
|
||||
lockCmd = ''${pkgs.bash}/bin/bash -c "${pkgs.coreutils}/bin/sleep 0.2; ${pkgs.xorg.xset}/bin/xset dpms force off; /run/wrappers/bin/slock"'';
|
||||
mj.plasma.kconfig = {
|
||||
kdeglobals = {
|
||||
General.ColorScheme = "ArcDark";
|
||||
Icons.Theme = "Papirus-Dark";
|
||||
KDE.widgetStyle = "Breeze";
|
||||
};
|
||||
plasmarc.Theme.name = "Arc-Dark";
|
||||
kscreenlockerrc.Greeter = {
|
||||
Theme = "com.github.varlesh.arc-dark";
|
||||
};
|
||||
ksplashrc.KSplash = {
|
||||
Engine = "KSplashQML";
|
||||
Theme = "com.github.varlesh.arc-dark";
|
||||
};
|
||||
kwinrc."org.kde.kdecoration2" = {
|
||||
library = "org.kde.kwin.aurorae";
|
||||
theme = "__aurorae__svg__Arc-Dark";
|
||||
};
|
||||
kcminputrc.Mouse.cursorTheme = "Breeze_Snow";
|
||||
# don't mess with GTK settings
|
||||
kded5rc."Module-gtkconfig".autoload = false;
|
||||
};
|
||||
};
|
||||
|
||||
# https://github.com/nix-community/home-manager/issues/2064
|
||||
systemd.user.targets.tray = {
|
||||
Unit = {
|
||||
Description = "Home Manager System Tray";
|
||||
Requires = ["graphical-session-pre.target"];
|
||||
};
|
||||
};
|
||||
|
||||
# thanks K900
|
||||
gtk = {
|
||||
enable = true;
|
||||
theme = {
|
||||
package = pkgs.plasma5Packages.breeze-gtk;
|
||||
name = "Breeze";
|
||||
};
|
||||
cursorTheme = {
|
||||
package = pkgs.plasma5Packages.breeze-icons;
|
||||
name = "Breeze_Snow";
|
||||
};
|
||||
iconTheme = {
|
||||
package = pkgs.papirus-icon-theme;
|
||||
name = "Papirus-Dark";
|
||||
};
|
||||
gtk2 = {
|
||||
configLocation = "${config.xdg.configHome}/gtk-2.0/gtkrc";
|
||||
extraConfig = ''
|
||||
gtk-alternative-button-order = 1
|
||||
'';
|
||||
};
|
||||
gtk3.extraConfig = {
|
||||
gtk-application-prefer-dark-theme = true;
|
||||
gtk-decoration-layout = "icon:minimize,maximize,close";
|
||||
};
|
||||
gtk4.extraConfig = {
|
||||
gtk-application-prefer-dark-theme = true;
|
||||
gtk-decoration-layout = "icon:minimize,maximize,close";
|
||||
};
|
||||
};
|
||||
|
||||
mj.plasma.kconfig = {
|
||||
kdeglobals = {
|
||||
General.ColorScheme = "ArcDark";
|
||||
Icons.Theme = "Papirus-Dark";
|
||||
KDE.widgetStyle = "Breeze";
|
||||
};
|
||||
plasmarc.Theme.name = "Arc-Dark";
|
||||
kscreenlockerrc.Greeter = {
|
||||
Theme = "com.github.varlesh.arc-dark";
|
||||
};
|
||||
ksplashrc.KSplash = {
|
||||
Engine = "KSplashQML";
|
||||
Theme = "com.github.varlesh.arc-dark";
|
||||
};
|
||||
kwinrc."org.kde.kdecoration2" = {
|
||||
library = "org.kde.kwin.aurorae";
|
||||
theme = "__aurorae__svg__Arc-Dark";
|
||||
};
|
||||
kcminputrc.Mouse.cursorTheme = "Breeze_Snow";
|
||||
# don't mess with GTK settings
|
||||
kded5rc."Module-gtkconfig".autoload = false;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
@ -4,75 +4,85 @@
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
cfg = config.mj.plasma;
|
||||
|
||||
setValue = v: let
|
||||
setValueArgs = ty: vs: "--type ${ty} ${lib.escapeShellArg vs}";
|
||||
in
|
||||
if builtins.isBool v
|
||||
then
|
||||
setValueArgs "bool" (
|
||||
if v
|
||||
then "true"
|
||||
else "false"
|
||||
)
|
||||
else setValueArgs "str" (builtins.toString v);
|
||||
setValue =
|
||||
v:
|
||||
let
|
||||
setValueArgs = ty: vs: "--type ${ty} ${lib.escapeShellArg vs}";
|
||||
in
|
||||
if builtins.isBool v then
|
||||
setValueArgs "bool" (if v then "true" else "false")
|
||||
else
|
||||
setValueArgs "str" (builtins.toString v);
|
||||
|
||||
pathToArgs = path: let
|
||||
groupArg = item: "--group ${lib.escapeShellArg item}";
|
||||
groupArgs = builtins.map groupArg path;
|
||||
in
|
||||
pathToArgs =
|
||||
path:
|
||||
let
|
||||
groupArg = item: "--group ${lib.escapeShellArg item}";
|
||||
groupArgs = builtins.map groupArg path;
|
||||
in
|
||||
groupArgs;
|
||||
|
||||
entryToArgs = {
|
||||
path,
|
||||
value,
|
||||
}: let
|
||||
file = builtins.head path;
|
||||
subpath = builtins.tail path;
|
||||
groups = lib.lists.init subpath;
|
||||
name = lib.lists.last subpath;
|
||||
entryToArgs =
|
||||
{ path, value }:
|
||||
let
|
||||
file = builtins.head path;
|
||||
subpath = builtins.tail path;
|
||||
groups = lib.lists.init subpath;
|
||||
name = lib.lists.last subpath;
|
||||
|
||||
fileArg = "--file ${lib.escapeShellArg file}";
|
||||
pathArgs = pathToArgs groups;
|
||||
keyArg = "--key ${lib.escapeShellArg name}";
|
||||
valueArg = setValue value;
|
||||
allArgs = pathArgs ++ [fileArg keyArg valueArg];
|
||||
in
|
||||
fileArg = "--file ${lib.escapeShellArg file}";
|
||||
pathArgs = pathToArgs groups;
|
||||
keyArg = "--key ${lib.escapeShellArg name}";
|
||||
valueArg = setValue value;
|
||||
allArgs = pathArgs ++ [
|
||||
fileArg
|
||||
keyArg
|
||||
valueArg
|
||||
];
|
||||
in
|
||||
lib.strings.concatStringsSep " " allArgs;
|
||||
|
||||
flattenAttrs = attrs: pathSoFar:
|
||||
lib.lists.flatten (lib.attrsets.mapAttrsToList (
|
||||
flattenAttrs =
|
||||
attrs: pathSoFar:
|
||||
lib.lists.flatten (
|
||||
lib.attrsets.mapAttrsToList (
|
||||
name: value:
|
||||
if builtins.isAttrs value
|
||||
then flattenAttrs value (pathSoFar ++ [name])
|
||||
else {
|
||||
path = pathSoFar ++ [name];
|
||||
if builtins.isAttrs value then
|
||||
flattenAttrs value (pathSoFar ++ [ name ])
|
||||
else
|
||||
{
|
||||
path = pathSoFar ++ [ name ];
|
||||
inherit value;
|
||||
}
|
||||
)
|
||||
attrs);
|
||||
) attrs
|
||||
);
|
||||
|
||||
configToArgs = attrs: builtins.map entryToArgs (flattenAttrs attrs []);
|
||||
configToArgs = attrs: builtins.map entryToArgs (flattenAttrs attrs [ ]);
|
||||
|
||||
configToScript = attrs: let
|
||||
args = configToArgs attrs;
|
||||
argToCommand = arg: "${pkgs.plasma5Packages.kconfig}/bin/kwriteconfig5 ${arg}";
|
||||
commands = builtins.map argToCommand args;
|
||||
in
|
||||
configToScript =
|
||||
attrs:
|
||||
let
|
||||
args = configToArgs attrs;
|
||||
argToCommand = arg: "${pkgs.plasma5Packages.kconfig}/bin/kwriteconfig5 ${arg}";
|
||||
commands = builtins.map argToCommand args;
|
||||
in
|
||||
lib.strings.concatStringsSep "\n" commands;
|
||||
|
||||
writeConfig = attrs: pkgs.writeScript "kconfig-setup" (configToScript attrs);
|
||||
in {
|
||||
in
|
||||
{
|
||||
options.mj.plasma = {
|
||||
kconfig = lib.mkOption {
|
||||
type = lib.types.attrs;
|
||||
default = {};
|
||||
default = { };
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf (cfg.kconfig != {}) {
|
||||
config = lib.mkIf (cfg.kconfig != { }) {
|
||||
home.activation.kconfig-setup = "$DRY_RUN_CMD ${writeConfig cfg.kconfig}";
|
||||
};
|
||||
}
|
||||
|
@ -1,3 +1 @@
|
||||
_: {
|
||||
services.journald.extraConfig = "Storage=volatile";
|
||||
}
|
||||
_: { services.journald.extraConfig = "Storage=volatile"; }
|
||||
|
@ -4,14 +4,16 @@
|
||||
myData,
|
||||
pkgs,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
options.mj.services.borgstor = with lib.types; {
|
||||
enable = lib.mkEnableOption "Enable borg storage user";
|
||||
dataDir = lib.mkOption {type = path;};
|
||||
sshKeys = lib.mkOption {type = listOf str;};
|
||||
dataDir = lib.mkOption { type = path; };
|
||||
sshKeys = lib.mkOption { type = listOf str; };
|
||||
};
|
||||
|
||||
config = with config.mj.services.borgstor;
|
||||
config =
|
||||
with config.mj.services.borgstor;
|
||||
lib.mkIf enable {
|
||||
users.users.borgstor = {
|
||||
description = "Borg Storage";
|
||||
@ -21,11 +23,9 @@
|
||||
isSystemUser = true;
|
||||
createHome = false;
|
||||
uid = myData.uidgid.borgstor;
|
||||
openssh.authorizedKeys.keys =
|
||||
map (
|
||||
k: "command=\"${pkgs.borgbackup}/bin/borg serve --restrict-to-path ${dataDir}\",restrict ${k}"
|
||||
)
|
||||
sshKeys;
|
||||
openssh.authorizedKeys.keys = map (
|
||||
k: ''command="${pkgs.borgbackup}/bin/borg serve --restrict-to-path ${dataDir}",restrict ${k}''
|
||||
) sshKeys;
|
||||
};
|
||||
|
||||
users.groups.borgstor.gid = myData.uidgid.borgstor;
|
||||
|
@ -1,4 +1,5 @@
|
||||
{...}: {
|
||||
{ ... }:
|
||||
{
|
||||
imports = [
|
||||
./borgstor
|
||||
./deployerbot
|
||||
|
@ -3,34 +3,36 @@
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
options.mj.services.deployerbot.main = with lib.types; {
|
||||
enable = lib.mkEnableOption "Enable system updater orchestrator";
|
||||
deployDerivations = lib.mkOption {type = listOf str;};
|
||||
deployDerivations = lib.mkOption { type = listOf str; };
|
||||
deployIfPresent = lib.mkOption {
|
||||
type = listOf (submodule {
|
||||
options = {
|
||||
derivationTarget = lib.mkOption {type = str;};
|
||||
pingTarget = lib.mkOption {type = str;};
|
||||
derivationTarget = lib.mkOption { type = str; };
|
||||
pingTarget = lib.mkOption { type = str; };
|
||||
};
|
||||
});
|
||||
default = [];
|
||||
default = [ ];
|
||||
};
|
||||
uidgid = lib.mkOption {type = int;};
|
||||
repo = lib.mkOption {type = str;};
|
||||
uidgid = lib.mkOption { type = int; };
|
||||
repo = lib.mkOption { type = str; };
|
||||
};
|
||||
|
||||
options.mj.services.deployerbot.follower = with lib.types; {
|
||||
enable = lib.mkEnableOption "Allow system to be deployed with deployerbot";
|
||||
sshAllowSubnets = lib.mkOption {type = listOf str;};
|
||||
publicKeys = lib.mkOption {type = listOf str;};
|
||||
uidgid = lib.mkOption {type = int;};
|
||||
sshAllowSubnets = lib.mkOption { type = listOf str; };
|
||||
publicKeys = lib.mkOption { type = listOf str; };
|
||||
uidgid = lib.mkOption { type = int; };
|
||||
};
|
||||
|
||||
config = lib.mkMerge [
|
||||
(let
|
||||
cfg = config.mj.services.deployerbot.main;
|
||||
in
|
||||
(
|
||||
let
|
||||
cfg = config.mj.services.deployerbot.main;
|
||||
in
|
||||
lib.mkIf cfg.enable {
|
||||
# TODO: git config --global user.email bot@jakstys.lt
|
||||
users.users.deployerbot-main = {
|
||||
@ -46,49 +48,57 @@
|
||||
|
||||
systemd.services.deployerbot = {
|
||||
description = "Update all known systems";
|
||||
environment = {TZ = "UTC";};
|
||||
path = [pkgs.git pkgs.openssh pkgs.nix];
|
||||
environment = {
|
||||
TZ = "UTC";
|
||||
};
|
||||
path = [
|
||||
pkgs.git
|
||||
pkgs.openssh
|
||||
pkgs.nix
|
||||
];
|
||||
restartIfChanged = false;
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
User = "deployerbot-main";
|
||||
WorkingDirectory = config.users.users.deployerbot-main.home;
|
||||
LoadCredential = ["ssh-key:/etc/ssh/ssh_host_ed25519_key"];
|
||||
LoadCredential = [ "ssh-key:/etc/ssh/ssh_host_ed25519_key" ];
|
||||
};
|
||||
script = let
|
||||
deployDerivationsStr = builtins.concatStringsSep " " cfg.deployDerivations;
|
||||
in ''
|
||||
set -xeuo pipefail
|
||||
script =
|
||||
let
|
||||
deployDerivationsStr = builtins.concatStringsSep " " cfg.deployDerivations;
|
||||
in
|
||||
''
|
||||
set -xeuo pipefail
|
||||
|
||||
export GIT_SSH_COMMAND="ssh -i ''${CREDENTIALS_DIRECTORY}/ssh-key"
|
||||
if [[ ! -d config ]]; then
|
||||
git clone ${cfg.repo} config
|
||||
cd config
|
||||
else
|
||||
cd config
|
||||
git fetch origin
|
||||
git reset --hard origin/main
|
||||
fi
|
||||
export GIT_SSH_COMMAND="ssh -i ''${CREDENTIALS_DIRECTORY}/ssh-key"
|
||||
if [[ ! -d config ]]; then
|
||||
git clone ${cfg.repo} config
|
||||
cd config
|
||||
else
|
||||
cd config
|
||||
git fetch origin
|
||||
git reset --hard origin/main
|
||||
fi
|
||||
|
||||
nix flake update --accept-flake-config --commit-lock-file
|
||||
# TODO --all-systems
|
||||
nix flake check --all-systems --accept-flake-config
|
||||
nix flake update --accept-flake-config --commit-lock-file
|
||||
# TODO --all-systems
|
||||
nix flake check --all-systems --accept-flake-config
|
||||
|
||||
EXITCODE=0
|
||||
${pkgs.deploy-rs.deploy-rs}/bin/deploy \
|
||||
--ssh-opts="-i ''${CREDENTIALS_DIRECTORY}/ssh-key" \
|
||||
--ssh-user=deployerbot-follower \
|
||||
--confirm-timeout 60 \
|
||||
--skip-checks \
|
||||
--targets ${deployDerivationsStr} -- \
|
||||
--accept-flake-config || EXITCODE=1
|
||||
EXITCODE=0
|
||||
${pkgs.deploy-rs.deploy-rs}/bin/deploy \
|
||||
--ssh-opts="-i ''${CREDENTIALS_DIRECTORY}/ssh-key" \
|
||||
--ssh-user=deployerbot-follower \
|
||||
--confirm-timeout 60 \
|
||||
--skip-checks \
|
||||
--targets ${deployDerivationsStr} -- \
|
||||
--accept-flake-config || EXITCODE=1
|
||||
|
||||
if [[ $EXITCODE == 0 ]]; then
|
||||
git push origin main
|
||||
fi
|
||||
if [[ $EXITCODE == 0 ]]; then
|
||||
git push origin main
|
||||
fi
|
||||
|
||||
# Optional deployments
|
||||
${lib.concatMapStringsSep "\n" (t: ''
|
||||
# Optional deployments
|
||||
${lib.concatMapStringsSep "\n" (t: ''
|
||||
if ${pkgs.inetutils}/bin/ping -c 1 ${t.pingTarget}; then
|
||||
${pkgs.deploy-rs.deploy-rs}/bin/deploy \
|
||||
--ssh-opts="-i ''${CREDENTIALS_DIRECTORY}/ssh-key" \
|
||||
@ -98,43 +108,45 @@
|
||||
--targets ${t.derivationTarget} -- \
|
||||
--accept-flake-config || EXITCODE=1
|
||||
fi
|
||||
'')
|
||||
cfg.deployIfPresent}
|
||||
'') cfg.deployIfPresent}
|
||||
|
||||
exit $EXITCODE
|
||||
'';
|
||||
exit $EXITCODE
|
||||
'';
|
||||
};
|
||||
|
||||
systemd.timers.deployerbot = {
|
||||
description = "deployerbot-main timer";
|
||||
wantedBy = ["timers.target"];
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig.OnCalendar = "*-*-* 23:30:00 UTC";
|
||||
};
|
||||
|
||||
mj.base.unitstatus.units = ["deployerbot"];
|
||||
mj.base.unitstatus.units = [ "deployerbot" ];
|
||||
|
||||
nix.settings.trusted-users = ["deployerbot-main"];
|
||||
})
|
||||
nix.settings.trusted-users = [ "deployerbot-main" ];
|
||||
}
|
||||
)
|
||||
|
||||
(let
|
||||
cfg = config.mj.services.deployerbot.follower;
|
||||
in
|
||||
(
|
||||
let
|
||||
cfg = config.mj.services.deployerbot.follower;
|
||||
in
|
||||
lib.mkIf cfg.enable {
|
||||
users.users.deployerbot-follower = {
|
||||
description = "Deployerbot Follower";
|
||||
home = "/var/lib/deployerbot-follower";
|
||||
shell = "/bin/sh";
|
||||
group = "deployerbot-follower";
|
||||
extraGroups = ["wheel"];
|
||||
extraGroups = [ "wheel" ];
|
||||
isSystemUser = true;
|
||||
createHome = true;
|
||||
uid = cfg.uidgid;
|
||||
openssh.authorizedKeys.keys =
|
||||
map (k: "from=\"${builtins.concatStringsSep "," cfg.sshAllowSubnets}\" " + k)
|
||||
cfg.publicKeys;
|
||||
openssh.authorizedKeys.keys = map (
|
||||
k: ''from="${builtins.concatStringsSep "," cfg.sshAllowSubnets}" '' + k
|
||||
) cfg.publicKeys;
|
||||
};
|
||||
users.groups.deployerbot-follower.gid = cfg.uidgid;
|
||||
nix.settings.trusted-users = ["deployerbot-follower"];
|
||||
})
|
||||
nix.settings.trusted-users = [ "deployerbot-follower" ];
|
||||
}
|
||||
)
|
||||
];
|
||||
}
|
||||
|
@ -1,54 +1,59 @@
|
||||
{ config, lib, ... }:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
options.mj.services.friendlyport = with lib.types; {
|
||||
ports = lib.mkOption {
|
||||
type = listOf (submodule {
|
||||
options = {
|
||||
subnets = lib.mkOption {type = listOf str;};
|
||||
subnets = lib.mkOption { type = listOf str; };
|
||||
tcp = lib.mkOption {
|
||||
type = listOf int;
|
||||
default = [];
|
||||
default = [ ];
|
||||
};
|
||||
udp = lib.mkOption {
|
||||
type = listOf int;
|
||||
default = [];
|
||||
default = [ ];
|
||||
};
|
||||
};
|
||||
});
|
||||
};
|
||||
};
|
||||
|
||||
config = let
|
||||
inherit (config.mj.services.friendlyport) ports;
|
||||
config =
|
||||
let
|
||||
inherit (config.mj.services.friendlyport) ports;
|
||||
|
||||
mkAdd = proto: subnets: ints: let
|
||||
subnetsS = builtins.concatStringsSep "," subnets;
|
||||
intsS = builtins.concatStringsSep "," (map builtins.toString ints);
|
||||
mkAdd =
|
||||
proto: subnets: ints:
|
||||
let
|
||||
subnetsS = builtins.concatStringsSep "," subnets;
|
||||
intsS = builtins.concatStringsSep "," (map builtins.toString ints);
|
||||
in
|
||||
if builtins.length ints == 0 then
|
||||
""
|
||||
else
|
||||
"iptables -A INPUT -p ${proto} --match multiport --dports ${intsS} --source ${subnetsS} -j ACCEPT";
|
||||
|
||||
startTCP = map (attr: mkAdd "tcp" attr.subnets attr.tcp) ports;
|
||||
startUDP = map (attr: mkAdd "udp" attr.subnets attr.udp) ports;
|
||||
|
||||
# TODO: when stopping the firewall, systemd uses the old ports. So this is a two-phase process.
|
||||
# How to stop the old one and start the new one?
|
||||
mkDel =
|
||||
proto: subnets: ints:
|
||||
let
|
||||
subnetsS = builtins.concatStringsSep "," subnets;
|
||||
intsS = builtins.concatStringsSep "," (map builtins.toString ints);
|
||||
in
|
||||
if builtins.length ints == 0 then
|
||||
""
|
||||
else
|
||||
"iptables -D INPUT -p ${proto} --match multiport --dports ${intsS} --source ${subnetsS} -j ACCEPT || :";
|
||||
|
||||
stopTCP = map (attr: mkDel "tcp" attr.subnets attr.tcp) ports;
|
||||
stopUDP = map (attr: mkDel "udp" attr.subnets attr.udp) ports;
|
||||
in
|
||||
if builtins.length ints == 0
|
||||
then ""
|
||||
else "iptables -A INPUT -p ${proto} --match multiport --dports ${intsS} --source ${subnetsS} -j ACCEPT";
|
||||
|
||||
startTCP = map (attr: mkAdd "tcp" attr.subnets attr.tcp) ports;
|
||||
startUDP = map (attr: mkAdd "udp" attr.subnets attr.udp) ports;
|
||||
|
||||
# TODO: when stopping the firewall, systemd uses the old ports. So this is a two-phase process.
|
||||
# How to stop the old one and start the new one?
|
||||
mkDel = proto: subnets: ints: let
|
||||
subnetsS = builtins.concatStringsSep "," subnets;
|
||||
intsS = builtins.concatStringsSep "," (map builtins.toString ints);
|
||||
in
|
||||
if builtins.length ints == 0
|
||||
then ""
|
||||
else "iptables -D INPUT -p ${proto} --match multiport --dports ${intsS} --source ${subnetsS} -j ACCEPT || :";
|
||||
|
||||
stopTCP = map (attr: mkDel "tcp" attr.subnets attr.tcp) ports;
|
||||
stopUDP = map (attr: mkDel "udp" attr.subnets attr.udp) ports;
|
||||
in {
|
||||
networking.firewall.extraCommands = lib.concatLines (startTCP ++ startUDP);
|
||||
networking.firewall.extraStopCommands = lib.concatLines (stopTCP ++ stopUDP);
|
||||
};
|
||||
{
|
||||
networking.firewall.extraCommands = lib.concatLines (startTCP ++ startUDP);
|
||||
networking.firewall.extraStopCommands = lib.concatLines (stopTCP ++ stopUDP);
|
||||
};
|
||||
}
|
||||
|
@ -4,7 +4,8 @@
|
||||
pkgs,
|
||||
myData,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
options.mj.services.gitea = with lib.types; {
|
||||
enable = lib.mkEnableOption "Enable gitea";
|
||||
};
|
||||
@ -79,7 +80,7 @@
|
||||
route /static/assets/* {
|
||||
uri strip_prefix /static
|
||||
file_server * {
|
||||
root ${pkgs.compressDrvWeb pkgs.gitea.data {}}/public
|
||||
root ${pkgs.compressDrvWeb pkgs.gitea.data { }}/public
|
||||
precompressed br gzip
|
||||
}
|
||||
}
|
||||
|
@ -3,9 +3,11 @@
|
||||
lib,
|
||||
myData,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
cfg = config.mj.services.hass;
|
||||
in {
|
||||
in
|
||||
{
|
||||
options.mj.services.hass = with lib.types; {
|
||||
enable = lib.mkEnableOption "Enable home-assistant";
|
||||
};
|
||||
@ -13,12 +15,12 @@ in {
|
||||
config = lib.mkIf cfg.enable {
|
||||
mj.services.friendlyport.ports = [
|
||||
{
|
||||
subnets = [myData.subnets.tailscale.cidr];
|
||||
tcp = [myData.ports.hass];
|
||||
subnets = [ myData.subnets.tailscale.cidr ];
|
||||
tcp = [ myData.ports.hass ];
|
||||
}
|
||||
];
|
||||
|
||||
environment.systemPackages = [];
|
||||
environment.systemPackages = [ ];
|
||||
|
||||
services = {
|
||||
home-assistant = {
|
||||
@ -36,11 +38,11 @@ in {
|
||||
"ipp"
|
||||
];
|
||||
config = {
|
||||
default_config = {};
|
||||
default_config = { };
|
||||
|
||||
http = {
|
||||
use_x_forwarded_for = true;
|
||||
trusted_proxies = ["127.0.0.1"];
|
||||
trusted_proxies = [ "127.0.0.1" ];
|
||||
};
|
||||
#homeassistant = {
|
||||
# auth_providers = [
|
||||
@ -51,7 +53,7 @@ in {
|
||||
# ];
|
||||
#};
|
||||
|
||||
wake_on_lan = {};
|
||||
wake_on_lan = { };
|
||||
|
||||
# requires a restore from backup
|
||||
"automation ui" = "!include automations.yaml";
|
||||
@ -67,7 +69,9 @@ in {
|
||||
action = [
|
||||
{
|
||||
service = "wake_on_lan.send_magic_packet";
|
||||
data = {mac = "74:e6:b8:4c:fb:b7";};
|
||||
data = {
|
||||
mac = "74:e6:b8:4c:fb:b7";
|
||||
};
|
||||
}
|
||||
];
|
||||
}
|
||||
|
@ -3,28 +3,32 @@
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
options.mj.services.headscale = with lib.types; {
|
||||
enable = lib.mkEnableOption "Enable headscale";
|
||||
clientOidcPath = lib.mkOption {type = str;};
|
||||
subnetCIDR = lib.mkOption {type = str;};
|
||||
clientOidcPath = lib.mkOption { type = str; };
|
||||
subnetCIDR = lib.mkOption { type = str; };
|
||||
};
|
||||
|
||||
config = lib.mkIf config.mj.services.headscale.enable {
|
||||
environment.systemPackages = [pkgs.headscale];
|
||||
environment.systemPackages = [ pkgs.headscale ];
|
||||
|
||||
networking.firewall.allowedTCPPorts = [3478];
|
||||
networking.firewall.allowedUDPPorts = [3478];
|
||||
networking.firewall.allowedTCPPorts = [ 3478 ];
|
||||
networking.firewall.allowedUDPPorts = [ 3478 ];
|
||||
|
||||
services = {
|
||||
headscale = {
|
||||
enable = true;
|
||||
settings = {
|
||||
server_url = "https://vpn.jakstys.lt";
|
||||
ip_prefixes = [config.mj.services.headscale.subnetCIDR];
|
||||
ip_prefixes = [ config.mj.services.headscale.subnetCIDR ];
|
||||
log.level = "warn";
|
||||
dns_config = {
|
||||
nameservers = ["1.1.1.1" "8.8.4.4"];
|
||||
nameservers = [
|
||||
"1.1.1.1"
|
||||
"8.8.4.4"
|
||||
];
|
||||
magic_dns = false;
|
||||
base_domain = "jakst";
|
||||
};
|
||||
|
@ -3,16 +3,18 @@
|
||||
lib,
|
||||
myData,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
cfg = config.mj.services.jakstpub;
|
||||
in {
|
||||
in
|
||||
{
|
||||
options.mj.services.jakstpub = with lib.types; {
|
||||
enable = lib.mkEnableOption "Enable jakstpub";
|
||||
dataDir = lib.mkOption {type = path;};
|
||||
dataDir = lib.mkOption { type = path; };
|
||||
# RequiresMountsFor is used by upstream, hacking with the unit
|
||||
requires = lib.mkOption {type = listOf str;};
|
||||
uidgid = lib.mkOption {type = int;};
|
||||
hostname = lib.mkOption {type = str;};
|
||||
requires = lib.mkOption { type = listOf str; };
|
||||
uidgid = lib.mkOption { type = int; };
|
||||
hostname = lib.mkOption { type = str; };
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
@ -40,32 +42,30 @@ in {
|
||||
guest account = jakstpub
|
||||
server role = standalone server
|
||||
'';
|
||||
shares = let
|
||||
defaults = {
|
||||
"public" = "yes";
|
||||
"mangled names" = "no";
|
||||
"guest ok" = "yes";
|
||||
"force user" = "jakstpub";
|
||||
"force group" = "jakstpub";
|
||||
};
|
||||
in {
|
||||
public =
|
||||
defaults
|
||||
// {
|
||||
shares =
|
||||
let
|
||||
defaults = {
|
||||
"public" = "yes";
|
||||
"mangled names" = "no";
|
||||
"guest ok" = "yes";
|
||||
"force user" = "jakstpub";
|
||||
"force group" = "jakstpub";
|
||||
};
|
||||
in
|
||||
{
|
||||
public = defaults // {
|
||||
"path" = cfg.dataDir;
|
||||
"writeable" = "yes";
|
||||
"read only" = "no";
|
||||
"create mask" = "0664";
|
||||
"directory mask" = "0775";
|
||||
};
|
||||
snapshots =
|
||||
defaults
|
||||
// {
|
||||
snapshots = defaults // {
|
||||
"path" = cfg.dataDir + "/.zfs/snapshot";
|
||||
"writeable" = "no";
|
||||
"read only" = "yes";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
samba-wsdd = {
|
||||
@ -92,14 +92,18 @@ in {
|
||||
|
||||
mj.services.friendlyport.ports = [
|
||||
{
|
||||
subnets = with myData.subnets; [tailscale.cidr vno1.cidr vno3.cidr];
|
||||
subnets = with myData.subnets; [
|
||||
tailscale.cidr
|
||||
vno1.cidr
|
||||
vno3.cidr
|
||||
];
|
||||
tcp = [
|
||||
80 # caddy above
|
||||
139 # smbd
|
||||
445 # smbd
|
||||
5357 # wsdd
|
||||
];
|
||||
udp = [3702]; # wsdd
|
||||
udp = [ 3702 ]; # wsdd
|
||||
}
|
||||
];
|
||||
};
|
||||
|
@ -3,18 +3,19 @@
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
options.mj.services.matrix-synapse = with lib.types; {
|
||||
enable = lib.mkEnableOption "Enable matrix-synapse";
|
||||
signingKeyPath = lib.mkOption {type = path;};
|
||||
registrationSharedSecretPath = lib.mkOption {type = path;};
|
||||
macaroonSecretKeyPath = lib.mkOption {type = path;};
|
||||
signingKeyPath = lib.mkOption { type = path; };
|
||||
registrationSharedSecretPath = lib.mkOption { type = path; };
|
||||
macaroonSecretKeyPath = lib.mkOption { type = path; };
|
||||
};
|
||||
|
||||
config = lib.mkIf config.mj.services.matrix-synapse.enable {
|
||||
services.matrix-synapse = {
|
||||
enable = true;
|
||||
extraConfigFiles = ["/run/matrix-synapse/secrets.yaml"];
|
||||
extraConfigFiles = [ "/run/matrix-synapse/secrets.yaml" ];
|
||||
settings = {
|
||||
server_name = "jakstys.lt";
|
||||
admin_contact = "motiejus@jakstys.lt";
|
||||
@ -95,31 +96,34 @@
|
||||
};
|
||||
};
|
||||
|
||||
systemd.tmpfiles.rules = [
|
||||
"d /run/matrix-synapse 0700 matrix-synapse matrix-synapse -"
|
||||
];
|
||||
systemd.tmpfiles.rules = [ "d /run/matrix-synapse 0700 matrix-synapse matrix-synapse -" ];
|
||||
|
||||
systemd.services = {
|
||||
matrix-synapse = let
|
||||
# I tried to move this to preStart, but it complains:
|
||||
# Config is missing macaroon_secret_key
|
||||
secretsScript = pkgs.writeShellScript "write-secrets" ''
|
||||
set -xeuo pipefail
|
||||
umask 077
|
||||
ln -sf ''${CREDENTIALS_DIRECTORY}/jakstys_lt_signing_key /run/matrix-synapse/jakstys_lt_signing_key
|
||||
cat > /run/matrix-synapse/secrets.yaml <<EOF
|
||||
registration_shared_secret: "$(cat ''${CREDENTIALS_DIRECTORY}/registration_shared_secret)"
|
||||
macaroon_secret_key: "$(cat ''${CREDENTIALS_DIRECTORY}/macaroon_secret_key)"
|
||||
EOF
|
||||
'';
|
||||
in {
|
||||
serviceConfig.ExecStartPre = ["" secretsScript];
|
||||
serviceConfig.LoadCredential = with config.mj.services.matrix-synapse; [
|
||||
"jakstys_lt_signing_key:${signingKeyPath}"
|
||||
"registration_shared_secret:${registrationSharedSecretPath}"
|
||||
"macaroon_secret_key:${macaroonSecretKeyPath}"
|
||||
];
|
||||
};
|
||||
matrix-synapse =
|
||||
let
|
||||
# I tried to move this to preStart, but it complains:
|
||||
# Config is missing macaroon_secret_key
|
||||
secretsScript = pkgs.writeShellScript "write-secrets" ''
|
||||
set -xeuo pipefail
|
||||
umask 077
|
||||
ln -sf ''${CREDENTIALS_DIRECTORY}/jakstys_lt_signing_key /run/matrix-synapse/jakstys_lt_signing_key
|
||||
cat > /run/matrix-synapse/secrets.yaml <<EOF
|
||||
registration_shared_secret: "$(cat ''${CREDENTIALS_DIRECTORY}/registration_shared_secret)"
|
||||
macaroon_secret_key: "$(cat ''${CREDENTIALS_DIRECTORY}/macaroon_secret_key)"
|
||||
EOF
|
||||
'';
|
||||
in
|
||||
{
|
||||
serviceConfig.ExecStartPre = [
|
||||
""
|
||||
secretsScript
|
||||
];
|
||||
serviceConfig.LoadCredential = with config.mj.services.matrix-synapse; [
|
||||
"jakstys_lt_signing_key:${signingKeyPath}"
|
||||
"registration_shared_secret:${registrationSharedSecretPath}"
|
||||
"macaroon_secret_key:${macaroonSecretKeyPath}"
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
@ -3,21 +3,26 @@
|
||||
lib,
|
||||
myData,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
cfg = config.mj.services.node_exporter;
|
||||
in {
|
||||
in
|
||||
{
|
||||
options.mj.services.node_exporter = with lib.types; {
|
||||
enable = lib.mkEnableOption "Enable node_exporter";
|
||||
extraSubnets = lib.mkOption {
|
||||
type = listOf str;
|
||||
default = [];
|
||||
default = [ ];
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
services.prometheus.exporters.node = {
|
||||
enable = true;
|
||||
enabledCollectors = ["systemd" "processes"];
|
||||
enabledCollectors = [
|
||||
"systemd"
|
||||
"processes"
|
||||
];
|
||||
port = myData.ports.exporters.node;
|
||||
user = "node_exporter";
|
||||
group = "node_exporter";
|
||||
@ -35,8 +40,8 @@ in {
|
||||
|
||||
mj.services.friendlyport.ports = [
|
||||
{
|
||||
subnets = [myData.subnets.tailscale.cidr] ++ cfg.extraSubnets;
|
||||
tcp = [myData.ports.exporters.node];
|
||||
subnets = [ myData.subnets.tailscale.cidr ] ++ cfg.extraSubnets;
|
||||
tcp = [ myData.ports.exporters.node ];
|
||||
}
|
||||
];
|
||||
};
|
||||
|
@ -3,11 +3,14 @@
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
cfg = config.mj.services.nsd-acme;
|
||||
mkHook = zone: let
|
||||
fullZone = "_acme-endpoint.${zone}";
|
||||
in
|
||||
mkHook =
|
||||
zone:
|
||||
let
|
||||
fullZone = "_acme-endpoint.${zone}";
|
||||
in
|
||||
pkgs.writeShellScript "nsd-acme-hook" ''
|
||||
set -euo pipefail
|
||||
METHOD=$1
|
||||
@ -48,38 +51,42 @@
|
||||
;;
|
||||
esac
|
||||
'';
|
||||
in {
|
||||
in
|
||||
{
|
||||
options.mj.services.nsd-acme = with lib.types; {
|
||||
enable = lib.mkEnableOption "enable acme certs via nsd";
|
||||
|
||||
zones = lib.mkOption {
|
||||
default = {};
|
||||
type = attrsOf (submodule (
|
||||
{name, ...}: {
|
||||
options = {
|
||||
accountKey = lib.mkOption {type = path;};
|
||||
days = lib.mkOption {
|
||||
type = int;
|
||||
default = 30;
|
||||
};
|
||||
staging = lib.mkOption {
|
||||
type = bool;
|
||||
default = false;
|
||||
};
|
||||
default = { };
|
||||
type = attrsOf (
|
||||
submodule (
|
||||
{ name, ... }:
|
||||
{
|
||||
options = {
|
||||
accountKey = lib.mkOption { type = path; };
|
||||
days = lib.mkOption {
|
||||
type = int;
|
||||
default = 30;
|
||||
};
|
||||
staging = lib.mkOption {
|
||||
type = bool;
|
||||
default = false;
|
||||
};
|
||||
|
||||
# Warning: paths here are here to be read from. Changing them will
|
||||
# not place the files somewhere else.
|
||||
certFile = lib.mkOption {
|
||||
type = str;
|
||||
default = "/var/lib/nsd-acme/${name}/${name}/cert.pem";
|
||||
# Warning: paths here are here to be read from. Changing them will
|
||||
# not place the files somewhere else.
|
||||
certFile = lib.mkOption {
|
||||
type = str;
|
||||
default = "/var/lib/nsd-acme/${name}/${name}/cert.pem";
|
||||
};
|
||||
keyFile = lib.mkOption {
|
||||
type = str;
|
||||
default = "/var/lib/nsd-acme/${name}/private/${name}/key.pem";
|
||||
};
|
||||
};
|
||||
keyFile = lib.mkOption {
|
||||
type = str;
|
||||
default = "/var/lib/nsd-acme/${name}/private/${name}/key.pem";
|
||||
};
|
||||
};
|
||||
}
|
||||
));
|
||||
}
|
||||
)
|
||||
);
|
||||
};
|
||||
};
|
||||
|
||||
@ -93,121 +100,122 @@ in {
|
||||
'';
|
||||
|
||||
systemd = {
|
||||
tmpfiles.rules = ["d /var/lib/nsd/acmezones 0755 nsd nsd -"];
|
||||
tmpfiles.rules = [ "d /var/lib/nsd/acmezones 0755 nsd nsd -" ];
|
||||
|
||||
services =
|
||||
{
|
||||
nsd-control-setup = {
|
||||
requiredBy = ["nsd.service"];
|
||||
before = ["nsd.service"];
|
||||
unitConfig.ConditionPathExists = let
|
||||
rc = config.services.nsd.remoteControl;
|
||||
in [
|
||||
"|!${rc.controlKeyFile}"
|
||||
"|!${rc.controlCertFile}"
|
||||
"|!${rc.serverKeyFile}"
|
||||
"|!${rc.serverCertFile}"
|
||||
];
|
||||
requiredBy = [ "nsd.service" ];
|
||||
before = [ "nsd.service" ];
|
||||
unitConfig.ConditionPathExists =
|
||||
let
|
||||
rc = config.services.nsd.remoteControl;
|
||||
in
|
||||
[
|
||||
"|!${rc.controlKeyFile}"
|
||||
"|!${rc.controlCertFile}"
|
||||
"|!${rc.serverKeyFile}"
|
||||
"|!${rc.serverCertFile}"
|
||||
];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
UMask = 0077;
|
||||
UMask = 77;
|
||||
};
|
||||
script = ''
|
||||
${pkgs.nsd}/bin/nsd-control-setup
|
||||
chown nsd:nsd /etc/nsd/nsd_{control,server}.{key,pem}
|
||||
'';
|
||||
path = [pkgs.openssl];
|
||||
path = [ pkgs.openssl ];
|
||||
};
|
||||
}
|
||||
// lib.mapAttrs'
|
||||
(
|
||||
// lib.mapAttrs' (
|
||||
zone: cfg:
|
||||
lib.nameValuePair "nsd-acme-${zone}" {
|
||||
description = "dns-01 acme update for ${zone}";
|
||||
path = [pkgs.openssh pkgs.nsd];
|
||||
preStart = ''
|
||||
mkdir -p "$STATE_DIRECTORY/private"
|
||||
ln -sf "$CREDENTIALS_DIRECTORY/letsencrypt-account-key" \
|
||||
"$STATE_DIRECTORY/private/key.pem"
|
||||
'';
|
||||
serviceConfig = {
|
||||
ExecStart = let
|
||||
lib.nameValuePair "nsd-acme-${zone}" {
|
||||
description = "dns-01 acme update for ${zone}";
|
||||
path = [
|
||||
pkgs.openssh
|
||||
pkgs.nsd
|
||||
];
|
||||
preStart = ''
|
||||
mkdir -p "$STATE_DIRECTORY/private"
|
||||
ln -sf "$CREDENTIALS_DIRECTORY/letsencrypt-account-key" \
|
||||
"$STATE_DIRECTORY/private/key.pem"
|
||||
'';
|
||||
serviceConfig = {
|
||||
ExecStart =
|
||||
let
|
||||
hook = mkHook zone;
|
||||
days = builtins.toString cfg.days;
|
||||
in "${pkgs.uacme}/bin/uacme -c \${STATE_DIRECTORY} --verbose --days ${days} --hook ${hook} ${lib.optionalString cfg.staging "--staging"} issue ${zone}";
|
||||
in
|
||||
"${pkgs.uacme}/bin/uacme -c \${STATE_DIRECTORY} --verbose --days ${days} --hook ${hook} ${lib.optionalString cfg.staging "--staging"} issue ${zone}";
|
||||
|
||||
UMask = "0022";
|
||||
User = "nsd";
|
||||
Group = "nsd";
|
||||
StateDirectory = "nsd-acme/${zone}";
|
||||
LoadCredential = ["letsencrypt-account-key:${cfg.accountKey}"];
|
||||
ReadWritePaths = ["/var/lib/nsd/acmezones"];
|
||||
SuccessExitStatus = [0 1];
|
||||
UMask = "0022";
|
||||
User = "nsd";
|
||||
Group = "nsd";
|
||||
StateDirectory = "nsd-acme/${zone}";
|
||||
LoadCredential = [ "letsencrypt-account-key:${cfg.accountKey}" ];
|
||||
ReadWritePaths = [ "/var/lib/nsd/acmezones" ];
|
||||
SuccessExitStatus = [
|
||||
0
|
||||
1
|
||||
];
|
||||
|
||||
# from nixos/modules/security/acme/default.nix
|
||||
ProtectSystem = "strict";
|
||||
PrivateTmp = true;
|
||||
CapabilityBoundingSet = [""];
|
||||
DevicePolicy = "closed";
|
||||
LockPersonality = true;
|
||||
MemoryDenyWriteExecute = true;
|
||||
NoNewPrivileges = true;
|
||||
PrivateDevices = true;
|
||||
ProtectClock = true;
|
||||
ProtectHome = true;
|
||||
ProtectHostname = true;
|
||||
ProtectControlGroups = true;
|
||||
ProtectKernelLogs = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectKernelTunables = true;
|
||||
ProtectProc = "invisible";
|
||||
ProcSubset = "pid";
|
||||
RemoveIPC = true;
|
||||
# "cannot get devices"
|
||||
#RestrictAddressFamilies = [
|
||||
# "AF_INET"
|
||||
# "AF_INET6"
|
||||
#];
|
||||
RestrictNamespaces = true;
|
||||
RestrictRealtime = true;
|
||||
RestrictSUIDSGID = true;
|
||||
SystemCallArchitectures = "native";
|
||||
SystemCallFilter = [
|
||||
# 1. allow a reasonable set of syscalls
|
||||
"@system-service @resources"
|
||||
# 2. and deny unreasonable ones
|
||||
"~@privileged"
|
||||
# 3. then allow the required subset within denied groups
|
||||
"@chown"
|
||||
];
|
||||
};
|
||||
}
|
||||
)
|
||||
cfg.zones;
|
||||
# from nixos/modules/security/acme/default.nix
|
||||
ProtectSystem = "strict";
|
||||
PrivateTmp = true;
|
||||
CapabilityBoundingSet = [ "" ];
|
||||
DevicePolicy = "closed";
|
||||
LockPersonality = true;
|
||||
MemoryDenyWriteExecute = true;
|
||||
NoNewPrivileges = true;
|
||||
PrivateDevices = true;
|
||||
ProtectClock = true;
|
||||
ProtectHome = true;
|
||||
ProtectHostname = true;
|
||||
ProtectControlGroups = true;
|
||||
ProtectKernelLogs = true;
|
||||
ProtectKernelModules = true;
|
||||
ProtectKernelTunables = true;
|
||||
ProtectProc = "invisible";
|
||||
ProcSubset = "pid";
|
||||
RemoveIPC = true;
|
||||
# "cannot get devices"
|
||||
#RestrictAddressFamilies = [
|
||||
# "AF_INET"
|
||||
# "AF_INET6"
|
||||
#];
|
||||
RestrictNamespaces = true;
|
||||
RestrictRealtime = true;
|
||||
RestrictSUIDSGID = true;
|
||||
SystemCallArchitectures = "native";
|
||||
SystemCallFilter = [
|
||||
# 1. allow a reasonable set of syscalls
|
||||
"@system-service @resources"
|
||||
# 2. and deny unreasonable ones
|
||||
"~@privileged"
|
||||
# 3. then allow the required subset within denied groups
|
||||
"@chown"
|
||||
];
|
||||
};
|
||||
}
|
||||
) cfg.zones;
|
||||
|
||||
timers =
|
||||
lib.mapAttrs'
|
||||
(
|
||||
zone: _:
|
||||
lib.nameValuePair "nsd-acme-${zone}" {
|
||||
description = "nsd-acme for zone ${zone}";
|
||||
wantedBy = ["timers.target"];
|
||||
timerConfig = {
|
||||
OnCalendar = "*-*-* 01:30";
|
||||
};
|
||||
after = ["network-online.target"];
|
||||
wants = ["network-online.target"];
|
||||
}
|
||||
)
|
||||
cfg.zones;
|
||||
timers = lib.mapAttrs' (
|
||||
zone: _:
|
||||
lib.nameValuePair "nsd-acme-${zone}" {
|
||||
description = "nsd-acme for zone ${zone}";
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnCalendar = "*-*-* 01:30";
|
||||
};
|
||||
after = [ "network-online.target" ];
|
||||
wants = [ "network-online.target" ];
|
||||
}
|
||||
) cfg.zones;
|
||||
};
|
||||
|
||||
mj.base.unitstatus.units =
|
||||
lib.mkIf config.mj.base.unitstatus.enable
|
||||
(
|
||||
["nsd-control-setup"]
|
||||
++ map (z: "nsd-acme-${z}")
|
||||
(lib.attrNames cfg.zones)
|
||||
);
|
||||
mj.base.unitstatus.units = lib.mkIf config.mj.base.unitstatus.enable (
|
||||
[ "nsd-control-setup" ] ++ map (z: "nsd-acme-${z}") (lib.attrNames cfg.zones)
|
||||
);
|
||||
};
|
||||
}
|
||||
|
@ -4,14 +4,15 @@
|
||||
myData,
|
||||
pkgs,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
options.mj.services.postfix = with lib.types; {
|
||||
enable = lib.mkEnableOption "Enable postfix";
|
||||
saslPasswdPath = lib.mkOption {type = path;};
|
||||
saslPasswdPath = lib.mkOption { type = path; };
|
||||
};
|
||||
|
||||
config = lib.mkIf config.mj.services.postfix.enable {
|
||||
environment.systemPackages = [pkgs.mailutils];
|
||||
environment.systemPackages = [ pkgs.mailutils ];
|
||||
|
||||
services.postfix = {
|
||||
enable = true;
|
||||
|
@ -1,21 +1,23 @@
|
||||
{ config, lib, ... }:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}: {
|
||||
options.mj.services.remote-builder = with lib.types; {
|
||||
server = {
|
||||
enable = lib.mkEnableOption "Enable remote builder server";
|
||||
uidgid = lib.mkOption {type = int;};
|
||||
sshAllowSubnet = lib.mkOption {type = str;};
|
||||
publicKeys = lib.mkOption {type = listOf str;};
|
||||
uidgid = lib.mkOption { type = int; };
|
||||
sshAllowSubnet = lib.mkOption { type = str; };
|
||||
publicKeys = lib.mkOption { type = listOf str; };
|
||||
};
|
||||
client = {
|
||||
enable = lib.mkEnableOption "Enable remote builder client";
|
||||
system = lib.mkOption {type = enum ["aarch64-linux" "x86_64-linux"];};
|
||||
hostName = lib.mkOption {type = str;};
|
||||
sshKey = lib.mkOption {type = path;};
|
||||
supportedFeatures = lib.mkOption {type = listOf str;};
|
||||
system = lib.mkOption {
|
||||
type = enum [
|
||||
"aarch64-linux"
|
||||
"x86_64-linux"
|
||||
];
|
||||
};
|
||||
hostName = lib.mkOption { type = str; };
|
||||
sshKey = lib.mkOption { type = path; };
|
||||
supportedFeatures = lib.mkOption { type = listOf str; };
|
||||
};
|
||||
};
|
||||
|
||||
@ -24,42 +26,43 @@
|
||||
let
|
||||
cfg = config.mj.services.remote-builder.server;
|
||||
in
|
||||
lib.mkIf cfg.enable {
|
||||
users.users.remote-builder = {
|
||||
description = "Remote Builder";
|
||||
home = "/var/lib/remote-builder";
|
||||
shell = "/bin/sh";
|
||||
group = "remote-builder";
|
||||
isSystemUser = true;
|
||||
createHome = true;
|
||||
uid = cfg.uidgid;
|
||||
openssh.authorizedKeys.keys =
|
||||
map (
|
||||
k: "from=\"${cfg.sshAllowSubnet}\" ${k}"
|
||||
)
|
||||
cfg.publicKeys;
|
||||
};
|
||||
users.groups.remote-builder.gid = cfg.uidgid;
|
||||
nix.settings.trusted-users = ["remote-builder"];
|
||||
}
|
||||
lib.mkIf cfg.enable {
|
||||
users.users.remote-builder = {
|
||||
description = "Remote Builder";
|
||||
home = "/var/lib/remote-builder";
|
||||
shell = "/bin/sh";
|
||||
group = "remote-builder";
|
||||
isSystemUser = true;
|
||||
createHome = true;
|
||||
uid = cfg.uidgid;
|
||||
openssh.authorizedKeys.keys = map (k: ''from="${cfg.sshAllowSubnet}" ${k}'') cfg.publicKeys;
|
||||
};
|
||||
users.groups.remote-builder.gid = cfg.uidgid;
|
||||
nix.settings.trusted-users = [ "remote-builder" ];
|
||||
}
|
||||
)
|
||||
(
|
||||
let
|
||||
cfg = config.mj.services.remote-builder.client;
|
||||
in
|
||||
lib.mkIf cfg.enable {
|
||||
nix = {
|
||||
buildMachines = [
|
||||
{
|
||||
inherit (cfg) hostName system sshKey supportedFeatures;
|
||||
protocol = "ssh-ng";
|
||||
sshUser = "remote-builder";
|
||||
}
|
||||
];
|
||||
distributedBuilds = true;
|
||||
extraOptions = ''builders-use-substitutes = true'';
|
||||
};
|
||||
}
|
||||
lib.mkIf cfg.enable {
|
||||
nix = {
|
||||
buildMachines = [
|
||||
{
|
||||
inherit (cfg)
|
||||
hostName
|
||||
system
|
||||
sshKey
|
||||
supportedFeatures
|
||||
;
|
||||
protocol = "ssh-ng";
|
||||
sshUser = "remote-builder";
|
||||
}
|
||||
];
|
||||
distributedBuilds = true;
|
||||
extraOptions = "builders-use-substitutes = true";
|
||||
};
|
||||
}
|
||||
)
|
||||
];
|
||||
}
|
||||
|
@ -3,7 +3,8 @@
|
||||
lib,
|
||||
myData,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
options.mj.services.sshguard = with lib.types; {
|
||||
enable = lib.mkOption {
|
||||
type = bool;
|
||||
@ -15,9 +16,10 @@
|
||||
services.sshguard = {
|
||||
enable = true;
|
||||
blocktime = 900;
|
||||
whitelist =
|
||||
["192.168.0.0/16" myData.subnets.tailscale.cidr]
|
||||
++ (lib.catAttrs "publicIP" (lib.attrValues myData.hosts));
|
||||
whitelist = [
|
||||
"192.168.0.0/16"
|
||||
myData.subnets.tailscale.cidr
|
||||
] ++ (lib.catAttrs "publicIP" (lib.attrValues myData.hosts));
|
||||
};
|
||||
};
|
||||
}
|
||||
|
@ -3,7 +3,8 @@
|
||||
lib,
|
||||
myData,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
cfg = config.mj.services.syncthing;
|
||||
|
||||
devices = {
|
||||
@ -20,17 +21,29 @@
|
||||
};
|
||||
folders = {
|
||||
Books = {
|
||||
devices = ["fwminex" "vno1-oh2" "mxp10"];
|
||||
devices = [
|
||||
"fwminex"
|
||||
"vno1-oh2"
|
||||
"mxp10"
|
||||
];
|
||||
id = "8lk0n-mm63y";
|
||||
label = "Books";
|
||||
};
|
||||
Mail = {
|
||||
devices = ["fwminex" "vno1-oh2"];
|
||||
devices = [
|
||||
"fwminex"
|
||||
"vno1-oh2"
|
||||
];
|
||||
id = "66fmz-x6f1a";
|
||||
label = "Mail";
|
||||
};
|
||||
M-Active = {
|
||||
devices = ["mxp10" "fwminex" "mtworx" "vno1-oh2"];
|
||||
devices = [
|
||||
"mxp10"
|
||||
"fwminex"
|
||||
"mtworx"
|
||||
"vno1-oh2"
|
||||
];
|
||||
id = "f6fma-unkxq";
|
||||
label = "M-Active";
|
||||
versioning = {
|
||||
@ -42,99 +55,157 @@
|
||||
};
|
||||
};
|
||||
M-Documents = {
|
||||
devices = ["fwminex" "vno1-oh2"];
|
||||
devices = [
|
||||
"fwminex"
|
||||
"vno1-oh2"
|
||||
];
|
||||
id = "4fu7z-z6es2";
|
||||
label = "M-Documents";
|
||||
};
|
||||
Vaikai = {
|
||||
devices = ["vno1-vinc" "sqq1-desk" "fwminex" "mtworx" "vno1-oh2" "v-kfire" "rzj-744P2PE" "mxp10" "a-kfire"];
|
||||
devices = [
|
||||
"vno1-vinc"
|
||||
"sqq1-desk"
|
||||
"fwminex"
|
||||
"mtworx"
|
||||
"vno1-oh2"
|
||||
"v-kfire"
|
||||
"rzj-744P2PE"
|
||||
"mxp10"
|
||||
"a-kfire"
|
||||
];
|
||||
id = "xbrfr-mhszm";
|
||||
label = "Vaikai";
|
||||
};
|
||||
M-Camera = {
|
||||
devices = ["mxp10" "fwminex" "mtworx" "vno1-oh2"];
|
||||
devices = [
|
||||
"mxp10"
|
||||
"fwminex"
|
||||
"mtworx"
|
||||
"vno1-oh2"
|
||||
];
|
||||
id = "pixel_xl_dtm3-photos";
|
||||
label = "M-Camera";
|
||||
};
|
||||
R-Documents = {
|
||||
devices = ["rzj-744P2PE" "vno1-oh2"];
|
||||
devices = [
|
||||
"rzj-744P2PE"
|
||||
"vno1-oh2"
|
||||
];
|
||||
id = "nm23h-aog6k";
|
||||
label = "R-Documents";
|
||||
};
|
||||
Pictures = {
|
||||
devices = ["fwminex" "vno1-oh2"];
|
||||
devices = [
|
||||
"fwminex"
|
||||
"vno1-oh2"
|
||||
];
|
||||
id = "d3hur-cbzyw";
|
||||
label = "Pictures";
|
||||
};
|
||||
Music = {
|
||||
devices = ["fwminex" "mtworx" "mxp10" "vno1-oh2"];
|
||||
devices = [
|
||||
"fwminex"
|
||||
"mtworx"
|
||||
"mxp10"
|
||||
"vno1-oh2"
|
||||
];
|
||||
id = "tg94v-cqcwr";
|
||||
label = "music";
|
||||
};
|
||||
video-shared = {
|
||||
devices = ["mxp10" "mtworx" "fwminex" "vno1-oh2"];
|
||||
devices = [
|
||||
"mxp10"
|
||||
"mtworx"
|
||||
"fwminex"
|
||||
"vno1-oh2"
|
||||
];
|
||||
id = "byzmw-f6zhg";
|
||||
label = "video-shared";
|
||||
};
|
||||
stud-cache = {
|
||||
devices = ["fwminex" "vno1-oh2"];
|
||||
devices = [
|
||||
"fwminex"
|
||||
"vno1-oh2"
|
||||
];
|
||||
id = "2kq7n-jqzxj";
|
||||
label = "stud-cache";
|
||||
};
|
||||
M-R = {
|
||||
devices = ["fwminex" "rzj-744P2PE" "mxp10" "vno1-oh2" "mtworx"];
|
||||
devices = [
|
||||
"fwminex"
|
||||
"rzj-744P2PE"
|
||||
"mxp10"
|
||||
"vno1-oh2"
|
||||
"mtworx"
|
||||
];
|
||||
id = "evgn9-ahngz";
|
||||
label = "M-R";
|
||||
};
|
||||
Irenos = {
|
||||
devices = ["sqq1-desk" "vno1-oh2" "vno2-irena"];
|
||||
devices = [
|
||||
"sqq1-desk"
|
||||
"vno1-oh2"
|
||||
"vno2-irena"
|
||||
];
|
||||
id = "wuwai-qkcqj";
|
||||
label = "Irenos";
|
||||
};
|
||||
www-fwminex = {
|
||||
devices = ["fwminex" "vno1-oh2"];
|
||||
devices = [
|
||||
"fwminex"
|
||||
"vno1-oh2"
|
||||
];
|
||||
id = "7z9sw-2nubh";
|
||||
label = "www-fwminex";
|
||||
};
|
||||
www-mxp10 = {
|
||||
devices = ["mxp10" "vno1-oh2"];
|
||||
devices = [
|
||||
"mxp10"
|
||||
"vno1-oh2"
|
||||
];
|
||||
id = "gqrtz-prx9h";
|
||||
label = "www-mxp10";
|
||||
};
|
||||
mykolo = {
|
||||
devices = ["mxp10"];
|
||||
devices = [ "mxp10" ];
|
||||
id = "wslmq-fyw4w";
|
||||
label = "mykolo";
|
||||
};
|
||||
};
|
||||
in {
|
||||
in
|
||||
{
|
||||
options.mj.services.syncthing = with lib.types; {
|
||||
enable = lib.mkEnableOption "Enable services syncthing settings";
|
||||
user = lib.mkOption {type = str;};
|
||||
group = lib.mkOption {type = str;};
|
||||
dataDir = lib.mkOption {type = path;};
|
||||
user = lib.mkOption { type = str; };
|
||||
group = lib.mkOption { type = str; };
|
||||
dataDir = lib.mkOption { type = path; };
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
mj.services.friendlyport.ports = [
|
||||
{
|
||||
subnets = myData.subnets.motiejus.cidrs;
|
||||
tcp = [8384];
|
||||
tcp = [ 8384 ];
|
||||
}
|
||||
];
|
||||
|
||||
services.syncthing = {
|
||||
inherit (cfg) enable user group dataDir;
|
||||
inherit (cfg)
|
||||
enable
|
||||
user
|
||||
group
|
||||
dataDir
|
||||
;
|
||||
openDefaultPorts = true;
|
||||
key = config.age.secrets.syncthing-key.path;
|
||||
cert = config.age.secrets.syncthing-cert.path;
|
||||
|
||||
settings = {
|
||||
devices =
|
||||
{}
|
||||
{ }
|
||||
// (lib.optionalAttrs (config.networking.hostName == "vno1-oh2") {
|
||||
inherit
|
||||
(devices)
|
||||
inherit (devices)
|
||||
fwminex
|
||||
mtworx
|
||||
vno1-oh2
|
||||
@ -148,8 +219,7 @@ in {
|
||||
;
|
||||
})
|
||||
// (lib.optionalAttrs (config.networking.hostName == "fwminex") {
|
||||
inherit
|
||||
(devices)
|
||||
inherit (devices)
|
||||
fwminex
|
||||
mtworx
|
||||
vno1-oh2
|
||||
@ -162,8 +232,7 @@ in {
|
||||
;
|
||||
})
|
||||
// (lib.optionalAttrs (config.networking.hostName == "mtworx") {
|
||||
inherit
|
||||
(devices)
|
||||
inherit (devices)
|
||||
mtworx
|
||||
fwminex
|
||||
vno1-oh2
|
||||
@ -175,55 +244,50 @@ in {
|
||||
v-kfire
|
||||
;
|
||||
})
|
||||
// {};
|
||||
folders = with folders;
|
||||
{}
|
||||
// (
|
||||
lib.optionalAttrs (config.networking.hostName == "vno1-oh2") {
|
||||
"/var/www/dl/tel" = www-mxp10;
|
||||
"/var/www/dl/fwminex" = www-fwminex;
|
||||
"/var/www/dl/mykolo" = mykolo;
|
||||
"${cfg.dataDir}/annex2/Books" = Books;
|
||||
"${cfg.dataDir}/annex2/Mail" = Mail;
|
||||
"${cfg.dataDir}/annex2/M-Active" = M-Active;
|
||||
"${cfg.dataDir}/annex2/M-Camera" = M-Camera;
|
||||
"${cfg.dataDir}/annex2/M-Documents" = M-Documents;
|
||||
"${cfg.dataDir}/annex2/R-Documents" = R-Documents;
|
||||
"${cfg.dataDir}/annex2/Pictures" = Pictures;
|
||||
"${cfg.dataDir}/annex2/M-R" = M-R;
|
||||
"${cfg.dataDir}/stud-cache" = stud-cache;
|
||||
"${cfg.dataDir}/video/shared" = video-shared;
|
||||
"${cfg.dataDir}/video/Vaikai" = Vaikai;
|
||||
"${cfg.dataDir}/music" = Music;
|
||||
"${cfg.dataDir}/irenos" = Irenos;
|
||||
}
|
||||
)
|
||||
// (
|
||||
lib.optionalAttrs (config.networking.hostName == "mtworx") {
|
||||
"${cfg.dataDir}/M-Active" = M-Active;
|
||||
"${cfg.dataDir}/M-Camera" = M-Camera;
|
||||
"${cfg.dataDir}/M-R" = M-R;
|
||||
"${cfg.dataDir}/Vaikai" = Vaikai;
|
||||
"${cfg.dataDir}/Video" = video-shared;
|
||||
"${cfg.dataDir}/music" = Music;
|
||||
}
|
||||
)
|
||||
// (
|
||||
lib.optionalAttrs (config.networking.hostName == "fwminex") {
|
||||
"${cfg.dataDir}/.cache/evolution" = Mail;
|
||||
"${cfg.dataDir}/Books" = Books;
|
||||
"${cfg.dataDir}/M-Active" = M-Active;
|
||||
"${cfg.dataDir}/M-Documents" = M-Documents;
|
||||
"${cfg.dataDir}/M-Camera" = M-Camera;
|
||||
"${cfg.dataDir}/Pictures" = Pictures;
|
||||
"${cfg.dataDir}/Music" = Music;
|
||||
"${cfg.dataDir}/M-R" = M-R;
|
||||
"${cfg.dataDir}/Vaikai" = Vaikai;
|
||||
"${cfg.dataDir}/Video" = video-shared;
|
||||
"${cfg.dataDir}/stud-cache" = stud-cache;
|
||||
"${cfg.dataDir}/www" = www-fwminex;
|
||||
}
|
||||
);
|
||||
// { };
|
||||
folders =
|
||||
with folders;
|
||||
{ }
|
||||
// (lib.optionalAttrs (config.networking.hostName == "vno1-oh2") {
|
||||
"/var/www/dl/tel" = www-mxp10;
|
||||
"/var/www/dl/fwminex" = www-fwminex;
|
||||
"/var/www/dl/mykolo" = mykolo;
|
||||
"${cfg.dataDir}/annex2/Books" = Books;
|
||||
"${cfg.dataDir}/annex2/Mail" = Mail;
|
||||
"${cfg.dataDir}/annex2/M-Active" = M-Active;
|
||||
"${cfg.dataDir}/annex2/M-Camera" = M-Camera;
|
||||
"${cfg.dataDir}/annex2/M-Documents" = M-Documents;
|
||||
"${cfg.dataDir}/annex2/R-Documents" = R-Documents;
|
||||
"${cfg.dataDir}/annex2/Pictures" = Pictures;
|
||||
"${cfg.dataDir}/annex2/M-R" = M-R;
|
||||
"${cfg.dataDir}/stud-cache" = stud-cache;
|
||||
"${cfg.dataDir}/video/shared" = video-shared;
|
||||
"${cfg.dataDir}/video/Vaikai" = Vaikai;
|
||||
"${cfg.dataDir}/music" = Music;
|
||||
"${cfg.dataDir}/irenos" = Irenos;
|
||||
})
|
||||
// (lib.optionalAttrs (config.networking.hostName == "mtworx") {
|
||||
"${cfg.dataDir}/M-Active" = M-Active;
|
||||
"${cfg.dataDir}/M-Camera" = M-Camera;
|
||||
"${cfg.dataDir}/M-R" = M-R;
|
||||
"${cfg.dataDir}/Vaikai" = Vaikai;
|
||||
"${cfg.dataDir}/Video" = video-shared;
|
||||
"${cfg.dataDir}/music" = Music;
|
||||
})
|
||||
// (lib.optionalAttrs (config.networking.hostName == "fwminex") {
|
||||
"${cfg.dataDir}/.cache/evolution" = Mail;
|
||||
"${cfg.dataDir}/Books" = Books;
|
||||
"${cfg.dataDir}/M-Active" = M-Active;
|
||||
"${cfg.dataDir}/M-Documents" = M-Documents;
|
||||
"${cfg.dataDir}/M-Camera" = M-Camera;
|
||||
"${cfg.dataDir}/Pictures" = Pictures;
|
||||
"${cfg.dataDir}/Music" = Music;
|
||||
"${cfg.dataDir}/M-R" = M-R;
|
||||
"${cfg.dataDir}/Vaikai" = Vaikai;
|
||||
"${cfg.dataDir}/Video" = video-shared;
|
||||
"${cfg.dataDir}/stud-cache" = stud-cache;
|
||||
"${cfg.dataDir}/www" = www-fwminex;
|
||||
});
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@ -3,10 +3,18 @@
|
||||
lib,
|
||||
myData,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
cfg = config.mj.services.tailscale;
|
||||
inherit (lib) mkMerge types mkEnableOption mkOption mkIf;
|
||||
in {
|
||||
inherit (lib)
|
||||
mkMerge
|
||||
types
|
||||
mkEnableOption
|
||||
mkOption
|
||||
mkIf
|
||||
;
|
||||
in
|
||||
{
|
||||
options.mj.services.tailscale = with types; {
|
||||
enable = mkEnableOption "Enable tailscale";
|
||||
# https://github.com/tailscale/tailscale/issues/1548
|
||||
@ -20,13 +28,11 @@ in {
|
||||
{
|
||||
services.tailscale = {
|
||||
enable = true;
|
||||
extraUpFlags = ["--operator=${config.mj.username}"];
|
||||
extraUpFlags = [ "--operator=${config.mj.username}" ];
|
||||
};
|
||||
networking.firewall.checkReversePath = "loose";
|
||||
networking.firewall.allowedUDPPorts = [myData.ports.tailscale];
|
||||
networking.firewall.allowedUDPPorts = [ myData.ports.tailscale ];
|
||||
}
|
||||
(mkIf (!cfg.verboseLogs) {
|
||||
systemd.services.tailscaled.serviceConfig.StandardOutput = "null";
|
||||
})
|
||||
(mkIf (!cfg.verboseLogs) { systemd.services.tailscaled.serviceConfig.StandardOutput = "null"; })
|
||||
]);
|
||||
}
|
||||
|
@ -3,7 +3,8 @@
|
||||
config,
|
||||
pkgs,
|
||||
...
|
||||
}: {
|
||||
}:
|
||||
{
|
||||
options.mj.services.wifibackup = with lib.types; {
|
||||
enable = lib.mkEnableOption "enable wifi code backups to M-Active";
|
||||
fromPath = lib.mkOption {
|
||||
@ -20,11 +21,12 @@
|
||||
};
|
||||
};
|
||||
|
||||
config = with config.mj.services.wifibackup;
|
||||
config =
|
||||
with config.mj.services.wifibackup;
|
||||
lib.mkIf enable {
|
||||
systemd.timers.wifibackup = {
|
||||
description = "wifibackup to M-Active";
|
||||
wantedBy = ["timers.target"];
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig.OnCalendar = "*-*-* 22:00:00 UTC";
|
||||
};
|
||||
systemd.services.wifibackup = {
|
||||
@ -32,25 +34,30 @@
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
User = "root";
|
||||
SuccessExitStatus = [0 1];
|
||||
SuccessExitStatus = [
|
||||
0
|
||||
1
|
||||
];
|
||||
};
|
||||
script = let
|
||||
knownHostsCmd = pkgs.writeShellScript "known-hosts-localhost" ''
|
||||
echo -n "localhost "
|
||||
exec ${pkgs.coreutils}/bin/cat /etc/ssh/ssh_host_ed25519_key.pub
|
||||
'';
|
||||
in ''
|
||||
sed -i -E '/^(uuid|interface-name)=/d' ${fromPath}/*.nmconnection
|
||||
script =
|
||||
let
|
||||
knownHostsCmd = pkgs.writeShellScript "known-hosts-localhost" ''
|
||||
echo -n "localhost "
|
||||
exec ${pkgs.coreutils}/bin/cat /etc/ssh/ssh_host_ed25519_key.pub
|
||||
'';
|
||||
in
|
||||
''
|
||||
sed -i -E '/^(uuid|interface-name)=/d' ${fromPath}/*.nmconnection
|
||||
|
||||
exec ${pkgs.unison}/bin/unison \
|
||||
-sshcmd ${pkgs.openssh}/bin/ssh \
|
||||
-sshargs "-i /etc/ssh/ssh_host_ed25519_key -o KnownHostsCommand=${knownHostsCmd} -o UserKnownHostsFile=none -o GlobalKnownHostsFile=/dev/null" \
|
||||
-batch \
|
||||
-backuploc local \
|
||||
-backup "Name *" \
|
||||
${fromPath} \
|
||||
ssh://${toUser}@localhost/${toPath}/
|
||||
'';
|
||||
exec ${pkgs.unison}/bin/unison \
|
||||
-sshcmd ${pkgs.openssh}/bin/ssh \
|
||||
-sshargs "-i /etc/ssh/ssh_host_ed25519_key -o KnownHostsCommand=${knownHostsCmd} -o UserKnownHostsFile=none -o GlobalKnownHostsFile=/dev/null" \
|
||||
-batch \
|
||||
-backuploc local \
|
||||
-backup "Name *" \
|
||||
${fromPath} \
|
||||
ssh://${toUser}@localhost/${toPath}/
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
@ -3,85 +3,84 @@
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: let
|
||||
mkUnlock = {
|
||||
sshEndpoint,
|
||||
pingEndpoint,
|
||||
remotePubkey,
|
||||
pwFile,
|
||||
pingTimeoutSec,
|
||||
}: let
|
||||
timeoutStr = builtins.toString pingTimeoutSec;
|
||||
in ''
|
||||
set -x
|
||||
# if host is reachable via "pingEndpoint", which, we presume is
|
||||
# VPN (which implies the rootfs has been unlocked for VPN to work),
|
||||
# exit successfully.
|
||||
${pkgs.iputils}/bin/ping -q -W ${timeoutStr} -c 1 ${pingEndpoint} && exit 0
|
||||
}:
|
||||
let
|
||||
mkUnlock =
|
||||
{
|
||||
sshEndpoint,
|
||||
pingEndpoint,
|
||||
remotePubkey,
|
||||
pwFile,
|
||||
pingTimeoutSec,
|
||||
}:
|
||||
let
|
||||
timeoutStr = builtins.toString pingTimeoutSec;
|
||||
in
|
||||
''
|
||||
set -x
|
||||
# if host is reachable via "pingEndpoint", which, we presume is
|
||||
# VPN (which implies the rootfs has been unlocked for VPN to work),
|
||||
# exit successfully.
|
||||
${pkgs.iputils}/bin/ping -q -W ${timeoutStr} -c 1 ${pingEndpoint} && exit 0
|
||||
|
||||
exec ${pkgs.openssh}/bin/ssh \
|
||||
-i /etc/ssh/ssh_host_ed25519_key \
|
||||
-o UserKnownHostsFile=none \
|
||||
-o GlobalKnownHostsFile=/dev/null \
|
||||
-o KnownHostsCommand="${pkgs.coreutils}/bin/echo ${sshEndpoint} ${remotePubkey}" \
|
||||
root@${sshEndpoint} < "${pwFile}"
|
||||
'';
|
||||
in {
|
||||
exec ${pkgs.openssh}/bin/ssh \
|
||||
-i /etc/ssh/ssh_host_ed25519_key \
|
||||
-o UserKnownHostsFile=none \
|
||||
-o GlobalKnownHostsFile=/dev/null \
|
||||
-o KnownHostsCommand="${pkgs.coreutils}/bin/echo ${sshEndpoint} ${remotePubkey}" \
|
||||
root@${sshEndpoint} < "${pwFile}"
|
||||
'';
|
||||
in
|
||||
{
|
||||
options.mj.services.zfsunlock = with lib.types; {
|
||||
enable = lib.mkEnableOption "remotely unlock zfs-encrypted root volumes";
|
||||
|
||||
targets = lib.mkOption {
|
||||
default = {};
|
||||
default = { };
|
||||
type = attrsOf (submodule {
|
||||
options = {
|
||||
sshEndpoint = lib.mkOption {type = str;};
|
||||
pingEndpoint = lib.mkOption {type = str;};
|
||||
sshEndpoint = lib.mkOption { type = str; };
|
||||
pingEndpoint = lib.mkOption { type = str; };
|
||||
pingTimeoutSec = lib.mkOption {
|
||||
type = int;
|
||||
default = 20;
|
||||
};
|
||||
remotePubkey = lib.mkOption {type = str;};
|
||||
pwFile = lib.mkOption {type = path;};
|
||||
startAt = lib.mkOption {type = either str (listOf str);};
|
||||
remotePubkey = lib.mkOption { type = str; };
|
||||
pwFile = lib.mkOption { type = path; };
|
||||
startAt = lib.mkOption { type = either str (listOf str); };
|
||||
};
|
||||
});
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf config.mj.services.zfsunlock.enable {
|
||||
systemd.services =
|
||||
lib.mapAttrs'
|
||||
(
|
||||
name: cfg:
|
||||
lib.nameValuePair "zfsunlock-${name}" {
|
||||
description = "zfsunlock service for ${name}";
|
||||
script = mkUnlock (builtins.removeAttrs cfg ["startAt"]);
|
||||
serviceConfig = {
|
||||
User = "root";
|
||||
ProtectSystem = "strict";
|
||||
};
|
||||
}
|
||||
)
|
||||
config.mj.services.zfsunlock.targets;
|
||||
systemd.services = lib.mapAttrs' (
|
||||
name: cfg:
|
||||
lib.nameValuePair "zfsunlock-${name}" {
|
||||
description = "zfsunlock service for ${name}";
|
||||
script = mkUnlock (builtins.removeAttrs cfg [ "startAt" ]);
|
||||
serviceConfig = {
|
||||
User = "root";
|
||||
ProtectSystem = "strict";
|
||||
};
|
||||
}
|
||||
) config.mj.services.zfsunlock.targets;
|
||||
|
||||
systemd.timers =
|
||||
lib.mapAttrs'
|
||||
(
|
||||
name: cfg:
|
||||
lib.nameValuePair "zfsunlock-${name}" {
|
||||
description = "zfsunlock timer for ${name}";
|
||||
wantedBy = ["timers.target"];
|
||||
timerConfig = {
|
||||
OnCalendar = cfg.startAt;
|
||||
};
|
||||
after = ["network-online.target"];
|
||||
wants = ["network-online.target"];
|
||||
}
|
||||
)
|
||||
config.mj.services.zfsunlock.targets;
|
||||
systemd.timers = lib.mapAttrs' (
|
||||
name: cfg:
|
||||
lib.nameValuePair "zfsunlock-${name}" {
|
||||
description = "zfsunlock timer for ${name}";
|
||||
wantedBy = [ "timers.target" ];
|
||||
timerConfig = {
|
||||
OnCalendar = cfg.startAt;
|
||||
};
|
||||
after = [ "network-online.target" ];
|
||||
wants = [ "network-online.target" ];
|
||||
}
|
||||
) config.mj.services.zfsunlock.targets;
|
||||
|
||||
mj.base.unitstatus.units =
|
||||
map (name: "zfsunlock-${name}")
|
||||
(builtins.attrNames config.mj.services.zfsunlock.targets);
|
||||
mj.base.unitstatus.units = map (name: "zfsunlock-${name}") (
|
||||
builtins.attrNames config.mj.services.zfsunlock.targets
|
||||
);
|
||||
};
|
||||
}
|
||||
|
@ -13,62 +13,63 @@
|
||||
util-linux ? null,
|
||||
}:
|
||||
assert syslogSupport -> util-linux != null;
|
||||
stdenv.mkDerivation rec {
|
||||
version = "2.0.4";
|
||||
pname = "btrfs-auto-snapshot";
|
||||
stdenv.mkDerivation rec {
|
||||
version = "2.0.4";
|
||||
pname = "btrfs-auto-snapshot";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "hunleyd";
|
||||
repo = pname;
|
||||
rev = "v${version}";
|
||||
hash = "sha256-QpuwkGaYAkpu5hYyb360Mr5tHsZc2LzMlKtpS8CyyhI=";
|
||||
};
|
||||
src = fetchFromGitHub {
|
||||
owner = "hunleyd";
|
||||
repo = pname;
|
||||
rev = "v${version}";
|
||||
hash = "sha256-QpuwkGaYAkpu5hYyb360Mr5tHsZc2LzMlKtpS8CyyhI=";
|
||||
};
|
||||
|
||||
dontBuild = true;
|
||||
dontBuild = true;
|
||||
|
||||
nativeBuildInputs = [makeWrapper];
|
||||
nativeBuildInputs = [ makeWrapper ];
|
||||
|
||||
installPhase = ''
|
||||
install -Dm755 btrfs-auto-snapshot $out/bin/btrfs-auto-snapshot
|
||||
installPhase = ''
|
||||
install -Dm755 btrfs-auto-snapshot $out/bin/btrfs-auto-snapshot
|
||||
'';
|
||||
|
||||
wrapperPath =
|
||||
with lib;
|
||||
makeBinPath (
|
||||
[
|
||||
coreutils
|
||||
getopt
|
||||
gnugrep
|
||||
gnused
|
||||
gawk
|
||||
btrfs-progs
|
||||
]
|
||||
++ optional syslogSupport util-linux
|
||||
);
|
||||
|
||||
postFixup = ''
|
||||
wrapProgram $out/bin/btrfs-auto-snapshot \
|
||||
--prefix PATH : "${wrapperPath}"
|
||||
'';
|
||||
|
||||
meta = with lib; {
|
||||
description = "BTRFS Automatic Snapshot Service for Linux";
|
||||
homepage = "https://github.com/hunleyd/btrfs-auto-snapshot";
|
||||
license = licenses.gpl2;
|
||||
mainProgram = "btrfs-auto-snapshot";
|
||||
maintainers = with maintainers; [ motiejus ];
|
||||
platforms = platforms.linux;
|
||||
|
||||
longDescription = ''
|
||||
btrfs-auto-snapshot is a Bash script designed to bring as much of the
|
||||
functionality of the wonderful ZFS snapshot tool zfs-auto-snapshot to
|
||||
BTRFS as possible. Designed to run from cron (using
|
||||
/etc/cron.{daily,hourly,weekly}) it automatically creates a snapshot of
|
||||
the specified BTRFS filesystem (or, optionally, all of them) and then
|
||||
automatically purges the oldest snapshots of that type (hourly, daily, et
|
||||
al) based on a user-defined retention policy.
|
||||
|
||||
Snapshots are stored in a '.btrfs' directory at the root of the BTRFS
|
||||
filesystem being snapped and are read-only by default.
|
||||
'';
|
||||
|
||||
wrapperPath = with lib;
|
||||
makeBinPath (
|
||||
[
|
||||
coreutils
|
||||
getopt
|
||||
gnugrep
|
||||
gnused
|
||||
gawk
|
||||
btrfs-progs
|
||||
]
|
||||
++ optional syslogSupport util-linux
|
||||
);
|
||||
|
||||
postFixup = ''
|
||||
wrapProgram $out/bin/btrfs-auto-snapshot \
|
||||
--prefix PATH : "${wrapperPath}"
|
||||
'';
|
||||
|
||||
meta = with lib; {
|
||||
description = "BTRFS Automatic Snapshot Service for Linux";
|
||||
homepage = "https://github.com/hunleyd/btrfs-auto-snapshot";
|
||||
license = licenses.gpl2;
|
||||
mainProgram = "btrfs-auto-snapshot";
|
||||
maintainers = with maintainers; [motiejus];
|
||||
platforms = platforms.linux;
|
||||
|
||||
longDescription = ''
|
||||
btrfs-auto-snapshot is a Bash script designed to bring as much of the
|
||||
functionality of the wonderful ZFS snapshot tool zfs-auto-snapshot to
|
||||
BTRFS as possible. Designed to run from cron (using
|
||||
/etc/cron.{daily,hourly,weekly}) it automatically creates a snapshot of
|
||||
the specified BTRFS filesystem (or, optionally, all of them) and then
|
||||
automatically purges the oldest snapshots of that type (hourly, daily, et
|
||||
al) based on a user-defined retention policy.
|
||||
|
||||
Snapshots are stored in a '.btrfs' directory at the root of the BTRFS
|
||||
filesystem being snapped and are read-only by default.
|
||||
'';
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@ -1,64 +1,63 @@
|
||||
/*
|
||||
compressDrv compresses files in a given derivation.
|
||||
compressDrv compresses files in a given derivation.
|
||||
|
||||
Inputs:
|
||||
Inputs:
|
||||
|
||||
- formats :: [String]
|
||||
- formats :: [String]
|
||||
|
||||
List of file extensions to compress.
|
||||
List of file extensions to compress.
|
||||
|
||||
Example: ["txt" "svg" "xml"]
|
||||
Example: ["txt" "svg" "xml"]
|
||||
|
||||
- compressors :: {String -> String}
|
||||
- compressors :: {String -> String}
|
||||
|
||||
Map a desired extension (e.g. `gz`) to a compress program.
|
||||
Map a desired extension (e.g. `gz`) to a compress program.
|
||||
|
||||
The compressor program that will be executed to get the `COMPRESSOR`
|
||||
extension. The program should have a single " {}", which will be the
|
||||
replaced with the target filename.
|
||||
The compressor program that will be executed to get the `COMPRESSOR`
|
||||
extension. The program should have a single " {}", which will be the
|
||||
replaced with the target filename.
|
||||
|
||||
Compressor must:
|
||||
- read symlinks (thus --force is needed to gzip, zstd, xz).
|
||||
- keep the original file in place (--keep).
|
||||
Compressor must:
|
||||
- read symlinks (thus --force is needed to gzip, zstd, xz).
|
||||
- keep the original file in place (--keep).
|
||||
|
||||
Example:
|
||||
Example:
|
||||
|
||||
{
|
||||
xz = "${xz}/bin/xz --force --keep {}";
|
||||
}
|
||||
{
|
||||
xz = "${xz}/bin/xz --force --keep {}";
|
||||
}
|
||||
|
||||
See compressDrvWeb, which is a wrapper on top of compressDrv, for broader
|
||||
use examples.
|
||||
See compressDrvWeb, which is a wrapper on top of compressDrv, for broader
|
||||
use examples.
|
||||
*/
|
||||
{
|
||||
lib,
|
||||
xorg,
|
||||
runCommand,
|
||||
}: drv: {
|
||||
formats,
|
||||
compressors,
|
||||
...
|
||||
}: let
|
||||
validProg = ext: prog: let
|
||||
matches = (builtins.length (builtins.split "\\{}" prog) - 1) / 2;
|
||||
in
|
||||
lib.assertMsg
|
||||
(matches == 1)
|
||||
"compressor ${ext} needs to have exactly one '{}', found ${builtins.toString matches}";
|
||||
mkCmd = ext: prog:
|
||||
assert validProg ext prog; ''
|
||||
}:
|
||||
drv:
|
||||
{ formats, compressors, ... }:
|
||||
let
|
||||
validProg =
|
||||
ext: prog:
|
||||
let
|
||||
matches = (builtins.length (builtins.split "\\{}" prog) - 1) / 2;
|
||||
in
|
||||
lib.assertMsg (
|
||||
matches == 1
|
||||
) "compressor ${ext} needs to have exactly one '{}', found ${builtins.toString matches}";
|
||||
mkCmd =
|
||||
ext: prog:
|
||||
assert validProg ext prog;
|
||||
''
|
||||
find -L $out -type f -regextype posix-extended -iregex '.*\.(${formatsPipe})' -print0 \
|
||||
| xargs -0 -P$NIX_BUILD_CORES -I{} ${prog}
|
||||
'';
|
||||
formatsPipe = builtins.concatStringsSep "|" formats;
|
||||
in
|
||||
runCommand "${drv.name}-compressed" {} ''
|
||||
mkdir $out
|
||||
(cd $out; ${xorg.lndir}/bin/lndir ${drv})
|
||||
runCommand "${drv.name}-compressed" { } ''
|
||||
mkdir $out
|
||||
(cd $out; ${xorg.lndir}/bin/lndir ${drv})
|
||||
|
||||
${
|
||||
lib.concatStringsSep
|
||||
"\n\n"
|
||||
(lib.mapAttrsToList mkCmd compressors)
|
||||
}
|
||||
''
|
||||
${lib.concatStringsSep "\n\n" (lib.mapAttrsToList mkCmd compressors)}
|
||||
''
|
||||
|
@ -2,40 +2,41 @@
|
||||
gzip,
|
||||
runCommand,
|
||||
compressDrv,
|
||||
}: let
|
||||
example = runCommand "sample-drv" {} ''
|
||||
}:
|
||||
let
|
||||
example = runCommand "sample-drv" { } ''
|
||||
mkdir $out
|
||||
echo 42 > $out/1.txt
|
||||
echo 43 > $out/1.md
|
||||
touch $out/2.png
|
||||
'';
|
||||
drv = compressDrv example {
|
||||
formats = ["txt"];
|
||||
formats = [ "txt" ];
|
||||
compressors.gz = "${gzip}/bin/gzip --force --keep --fast {}";
|
||||
};
|
||||
wrapped = compressDrv drv {
|
||||
formats = ["md"];
|
||||
formats = [ "md" ];
|
||||
compressors.gz = "${gzip}/bin/gzip --force --keep --fast {}";
|
||||
};
|
||||
in
|
||||
runCommand "test-compressDrv" {} ''
|
||||
set -ex
|
||||
runCommand "test-compressDrv" { } ''
|
||||
set -ex
|
||||
|
||||
ls -l ${drv}
|
||||
test -h ${drv}/1.txt
|
||||
test -f ${drv}/1.txt.gz
|
||||
cmp ${drv}/1.txt <(${gzip}/bin/zcat ${drv}/1.txt.gz)
|
||||
ls -l ${drv}
|
||||
test -h ${drv}/1.txt
|
||||
test -f ${drv}/1.txt.gz
|
||||
cmp ${drv}/1.txt <(${gzip}/bin/zcat ${drv}/1.txt.gz)
|
||||
|
||||
test -h ${drv}/2.png
|
||||
test ! -a ${drv}/2.png.gz
|
||||
test -h ${drv}/2.png
|
||||
test ! -a ${drv}/2.png.gz
|
||||
|
||||
# compressDrv always points to the final file, no matter how many times
|
||||
# it's been wrapped
|
||||
cmp <(readlink -e ${drv}/1.txt) <(readlink -e ${wrapped}/1.txt)
|
||||
# compressDrv always points to the final file, no matter how many times
|
||||
# it's been wrapped
|
||||
cmp <(readlink -e ${drv}/1.txt) <(readlink -e ${wrapped}/1.txt)
|
||||
|
||||
test -f ${wrapped}/1.txt.gz
|
||||
test -f ${wrapped}/1.md.gz
|
||||
test ! -f ${drv}/1.md.gz
|
||||
test -f ${wrapped}/1.txt.gz
|
||||
test -f ${wrapped}/1.md.gz
|
||||
test ! -f ${drv}/1.md.gz
|
||||
|
||||
mkdir $out
|
||||
''
|
||||
mkdir $out
|
||||
''
|
||||
|
@ -1,83 +1,96 @@
|
||||
/*
|
||||
compressDrvWeb compresses a derivation for common web server use.
|
||||
compressDrvWeb compresses a derivation for common web server use.
|
||||
|
||||
Useful when one wants to pre-compress certain static assets and pass them to
|
||||
the web server. For example, `pkgs.gamja` creates this derivation:
|
||||
Useful when one wants to pre-compress certain static assets and pass them to
|
||||
the web server. For example, `pkgs.gamja` creates this derivation:
|
||||
|
||||
/nix/store/2wn1qbk8gp4y2m8xvafxv1b2dcdqj8fz-gamja-1.0.0-beta.9/
|
||||
├── index.2fd01148.js
|
||||
├── index.2fd01148.js.map
|
||||
├── index.37aa9a8a.css
|
||||
├── index.37aa9a8a.css.map
|
||||
├── index.html
|
||||
└── manifest.webmanifest
|
||||
/nix/store/2wn1qbk8gp4y2m8xvafxv1b2dcdqj8fz-gamja-1.0.0-beta.9/
|
||||
├── index.2fd01148.js
|
||||
├── index.2fd01148.js.map
|
||||
├── index.37aa9a8a.css
|
||||
├── index.37aa9a8a.css.map
|
||||
├── index.html
|
||||
└── manifest.webmanifest
|
||||
|
||||
`pkgs.compressDrvWeb pkgs.gamja`:
|
||||
`pkgs.compressDrvWeb pkgs.gamja`:
|
||||
|
||||
/nix/store/f5ryid7zrw2hid7h9kil5g5j29q5r2f7-gamja-1.0.0-beta.9-compressed
|
||||
├── index.2fd01148.js -> /nix/store/2wn1qbk8gp4y2m8xvafxv1b2dcdqj8fz-gamja-1.0.0-beta.9/index.2fd01148.js
|
||||
├── index.2fd01148.js.br
|
||||
├── index.2fd01148.js.gz
|
||||
├── index.2fd01148.js.map -> /nix/store/2wn1qbk8gp4y2m8xvafxv1b2dcdqj8fz-gamja-1.0.0-beta.9/index.2fd01148.js.map
|
||||
├── index.2fd01148.js.map.br
|
||||
├── index.2fd01148.js.map.gz
|
||||
├── index.37aa9a8a.css -> /nix/store/2wn1qbk8gp4y2m8xvafxv1b2dcdqj8fz-gamja-1.0.0-beta.9/index.37aa9a8a.css
|
||||
├── index.37aa9a8a.css.br
|
||||
├── index.37aa9a8a.css.gz
|
||||
├── index.37aa9a8a.css.map -> /nix/store/2wn1qbk8gp4y2m8xvafxv1b2dcdqj8fz-gamja-1.0.0-beta.9/index.37aa9a8a.css.map
|
||||
├── index.37aa9a8a.css.map.br
|
||||
├── index.37aa9a8a.css.map.gz
|
||||
├── index.html -> /nix/store/2wn1qbk8gp4y2m8xvafxv1b2dcdqj8fz-gamja-1.0.0-beta.9/index.html
|
||||
├── index.html.br
|
||||
├── index.html.gz
|
||||
├── manifest.webmanifest -> /nix/store/2wn1qbk8gp4y2m8xvafxv1b2dcdqj8fz-gamja-1.0.0-beta.9/manifest.webmanifest
|
||||
├── manifest.webmanifest.br
|
||||
└── manifest.webmanifest.gz
|
||||
/nix/store/f5ryid7zrw2hid7h9kil5g5j29q5r2f7-gamja-1.0.0-beta.9-compressed
|
||||
├── index.2fd01148.js -> /nix/store/2wn1qbk8gp4y2m8xvafxv1b2dcdqj8fz-gamja-1.0.0-beta.9/index.2fd01148.js
|
||||
├── index.2fd01148.js.br
|
||||
├── index.2fd01148.js.gz
|
||||
├── index.2fd01148.js.map -> /nix/store/2wn1qbk8gp4y2m8xvafxv1b2dcdqj8fz-gamja-1.0.0-beta.9/index.2fd01148.js.map
|
||||
├── index.2fd01148.js.map.br
|
||||
├── index.2fd01148.js.map.gz
|
||||
├── index.37aa9a8a.css -> /nix/store/2wn1qbk8gp4y2m8xvafxv1b2dcdqj8fz-gamja-1.0.0-beta.9/index.37aa9a8a.css
|
||||
├── index.37aa9a8a.css.br
|
||||
├── index.37aa9a8a.css.gz
|
||||
├── index.37aa9a8a.css.map -> /nix/store/2wn1qbk8gp4y2m8xvafxv1b2dcdqj8fz-gamja-1.0.0-beta.9/index.37aa9a8a.css.map
|
||||
├── index.37aa9a8a.css.map.br
|
||||
├── index.37aa9a8a.css.map.gz
|
||||
├── index.html -> /nix/store/2wn1qbk8gp4y2m8xvafxv1b2dcdqj8fz-gamja-1.0.0-beta.9/index.html
|
||||
├── index.html.br
|
||||
├── index.html.gz
|
||||
├── manifest.webmanifest -> /nix/store/2wn1qbk8gp4y2m8xvafxv1b2dcdqj8fz-gamja-1.0.0-beta.9/manifest.webmanifest
|
||||
├── manifest.webmanifest.br
|
||||
└── manifest.webmanifest.gz
|
||||
|
||||
When this `-compressed` directory is passed to a properly configured web
|
||||
server, it will serve those pre-compressed files:
|
||||
|
||||
When this `-compressed` directory is passed to a properly configured web
|
||||
server, it will serve those pre-compressed files:
|
||||
$ curl -I -H 'Accept-Encoding: br' https://irc.example.org/
|
||||
<...>
|
||||
content-encoding: br
|
||||
<...>
|
||||
|
||||
$ curl -I -H 'Accept-Encoding: br' https://irc.example.org/
|
||||
<...>
|
||||
content-encoding: br
|
||||
<...>
|
||||
For example, a caddy configuration snippet for gamja to serve
|
||||
the static assets (JS, CSS files) pre-compressed:
|
||||
|
||||
For example, a caddy configuration snippet for gamja to serve
|
||||
the static assets (JS, CSS files) pre-compressed:
|
||||
virtualHosts."irc.example.org".extraConfig = ''
|
||||
root * ${pkgs.compressDrvWeb pkgs.gamja {}}
|
||||
file_server browse {
|
||||
precompressed br gzip
|
||||
}
|
||||
'';
|
||||
|
||||
virtualHosts."irc.example.org".extraConfig = ''
|
||||
root * ${pkgs.compressDrvWeb pkgs.gamja {}}
|
||||
file_server browse {
|
||||
precompressed br gzip
|
||||
}
|
||||
'';
|
||||
This feature is also available in nginx via `ngx_brotli` and
|
||||
`ngx_http_gzip_static_module`.
|
||||
|
||||
This feature is also available in nginx via `ngx_brotli` and
|
||||
`ngx_http_gzip_static_module`.
|
||||
Inputs:
|
||||
- formats :: [String]
|
||||
|
||||
Inputs:
|
||||
- formats :: [String]
|
||||
List of file extensions to compress.
|
||||
|
||||
List of file extensions to compress.
|
||||
Default: common formats that compress well. The list may be expanded.
|
||||
|
||||
Default: common formats that compress well. The list may be expanded.
|
||||
- extraFormats :: [String]
|
||||
|
||||
- extraFormats :: [String]
|
||||
Extra extensions to compress in addition to `formats`.
|
||||
|
||||
Extra extensions to compress in addition to `formats`.
|
||||
- compressors :: {String -> String}
|
||||
|
||||
- compressors :: {String -> String}
|
||||
|
||||
See parameter `compressors` of compressDrv.
|
||||
See parameter `compressors` of compressDrv.
|
||||
*/
|
||||
{
|
||||
zopfli,
|
||||
brotli,
|
||||
compressDrv,
|
||||
}: drv: {
|
||||
formats ? ["css" "js" "svg" "ttf" "eot" "txt" "xml" "map" "html" "json" "webmanifest"],
|
||||
extraFormats ? [],
|
||||
}:
|
||||
drv:
|
||||
{
|
||||
formats ? [
|
||||
"css"
|
||||
"js"
|
||||
"svg"
|
||||
"ttf"
|
||||
"eot"
|
||||
"txt"
|
||||
"xml"
|
||||
"map"
|
||||
"html"
|
||||
"json"
|
||||
"webmanifest"
|
||||
],
|
||||
extraFormats ? [ ],
|
||||
compressors ? {
|
||||
"gz" = "${zopfli}/bin/zopfli --keep {}";
|
||||
"br" = "${brotli}/bin/brotli --keep --no-copy-stat {}";
|
||||
|
@ -1,7 +1,4 @@
|
||||
{
|
||||
coreutils,
|
||||
writeShellApplication,
|
||||
}:
|
||||
{ coreutils, writeShellApplication }:
|
||||
writeShellApplication {
|
||||
name = "nicer";
|
||||
text = ''
|
||||
|
@ -1,8 +1,4 @@
|
||||
{
|
||||
tmux,
|
||||
writeShellApplication,
|
||||
...
|
||||
}:
|
||||
{ tmux, writeShellApplication, ... }:
|
||||
writeShellApplication {
|
||||
name = "tmuxbash";
|
||||
text = ''
|
||||
|
93
secrets.nix
93
secrets.nix
@ -11,57 +11,62 @@ let
|
||||
fra1-a = (import ./data.nix).hosts."fra1-a.servers.jakst".publicKey;
|
||||
vno1-oh2 = (import ./data.nix).hosts."vno1-oh2.servers.jakst".publicKey;
|
||||
vno3-rp3b = (import ./data.nix).hosts."vno3-rp3b.servers.jakst".publicKey;
|
||||
systems = [fra1-a vno1-oh2 vno3-rp3b fwminex];
|
||||
systems = [
|
||||
fra1-a
|
||||
vno1-oh2
|
||||
vno3-rp3b
|
||||
fwminex
|
||||
];
|
||||
|
||||
mk = auth: keyNames:
|
||||
mk =
|
||||
auth: keyNames:
|
||||
builtins.listToAttrs (
|
||||
map (keyName: {
|
||||
name = keyName;
|
||||
value = {publicKeys = auth;};
|
||||
})
|
||||
keyNames
|
||||
value = {
|
||||
publicKeys = auth;
|
||||
};
|
||||
}) keyNames
|
||||
);
|
||||
in
|
||||
{}
|
||||
// mk ([vno1-oh2] ++ motiejus) [
|
||||
"secrets/fra1-a/zfs-passphrase.age"
|
||||
"secrets/vno1-oh2/borgbackup/password.age"
|
||||
"secrets/grafana.jakstys.lt/oidc.age"
|
||||
"secrets/letsencrypt/account.key.age"
|
||||
"secrets/headscale/oidc_client_secret2.age"
|
||||
"secrets/vaultwarden/secrets.env.age"
|
||||
"secrets/photoprism/admin_password.age"
|
||||
{ }
|
||||
// mk ([ vno1-oh2 ] ++ motiejus) [
|
||||
"secrets/fra1-a/zfs-passphrase.age"
|
||||
"secrets/vno1-oh2/borgbackup/password.age"
|
||||
"secrets/grafana.jakstys.lt/oidc.age"
|
||||
"secrets/letsencrypt/account.key.age"
|
||||
"secrets/headscale/oidc_client_secret2.age"
|
||||
"secrets/vaultwarden/secrets.env.age"
|
||||
"secrets/photoprism/admin_password.age"
|
||||
|
||||
"secrets/synapse/jakstys_lt_signing_key.age"
|
||||
"secrets/synapse/registration_shared_secret.age"
|
||||
"secrets/synapse/macaroon_secret_key.age"
|
||||
"secrets/synapse/jakstys_lt_signing_key.age"
|
||||
"secrets/synapse/registration_shared_secret.age"
|
||||
"secrets/synapse/macaroon_secret_key.age"
|
||||
|
||||
"secrets/vno1-oh2/syncthing/key.pem.age"
|
||||
"secrets/vno1-oh2/syncthing/cert.pem.age"
|
||||
]
|
||||
// mk ([fra1-a] ++ motiejus) [
|
||||
"secrets/vno1-oh2/zfs-passphrase.age"
|
||||
"secrets/fra1-a/borgbackup-password.age"
|
||||
]
|
||||
// mk ([vno3-rp3b] ++ motiejus) [
|
||||
"secrets/vno3-rp3b/datapool-passphrase.age"
|
||||
]
|
||||
// mk ([mtworx] ++ motiejus) [
|
||||
"secrets/motiejus_work_passwd_hash.age"
|
||||
"secrets/root_work_passwd_hash.age"
|
||||
"secrets/vno1-oh2/syncthing/key.pem.age"
|
||||
"secrets/vno1-oh2/syncthing/cert.pem.age"
|
||||
]
|
||||
// mk ([ fra1-a ] ++ motiejus) [
|
||||
"secrets/vno1-oh2/zfs-passphrase.age"
|
||||
"secrets/fra1-a/borgbackup-password.age"
|
||||
]
|
||||
// mk ([ vno3-rp3b ] ++ motiejus) [ "secrets/vno3-rp3b/datapool-passphrase.age" ]
|
||||
// mk ([ mtworx ] ++ motiejus) [
|
||||
"secrets/motiejus_work_passwd_hash.age"
|
||||
"secrets/root_work_passwd_hash.age"
|
||||
|
||||
"secrets/mtworx/syncthing/key.pem.age"
|
||||
"secrets/mtworx/syncthing/cert.pem.age"
|
||||
]
|
||||
// mk ([fwminex] ++ motiejus) [
|
||||
"secrets/motiejus_server_passwd_hash.age"
|
||||
"secrets/root_server_passwd_hash.age"
|
||||
"secrets/mtworx/syncthing/key.pem.age"
|
||||
"secrets/mtworx/syncthing/cert.pem.age"
|
||||
]
|
||||
// mk ([ fwminex ] ++ motiejus) [
|
||||
"secrets/motiejus_server_passwd_hash.age"
|
||||
"secrets/root_server_passwd_hash.age"
|
||||
|
||||
"secrets/fwminex/syncthing/key.pem.age"
|
||||
"secrets/fwminex/syncthing/cert.pem.age"
|
||||
]
|
||||
// mk (systems ++ motiejus) [
|
||||
"secrets/motiejus_passwd_hash.age"
|
||||
"secrets/root_passwd_hash.age"
|
||||
"secrets/postfix_sasl_passwd.age"
|
||||
]
|
||||
"secrets/fwminex/syncthing/key.pem.age"
|
||||
"secrets/fwminex/syncthing/cert.pem.age"
|
||||
]
|
||||
// mk (systems ++ motiejus) [
|
||||
"secrets/motiejus_passwd_hash.age"
|
||||
"secrets/root_passwd_hash.age"
|
||||
"secrets/postfix_sasl_passwd.age"
|
||||
]
|
||||
|
@ -7,51 +7,48 @@
|
||||
hmOnly,
|
||||
username,
|
||||
...
|
||||
}: let
|
||||
}:
|
||||
let
|
||||
# from https://github.com/Gerg-L/demoninajar/blob/39964f198dbfa34c21f81c35370fab312b476051/homes/veritas_manjaro/nixGL.nix#L42
|
||||
mkWrapped = wrap: orig-pkg: execName:
|
||||
pkgs.makeOverridable
|
||||
(
|
||||
attrs: let
|
||||
mkWrapped =
|
||||
wrap: orig-pkg: execName:
|
||||
pkgs.makeOverridable (
|
||||
attrs:
|
||||
let
|
||||
pkg = orig-pkg.override attrs;
|
||||
outs = pkg.meta.outputsToInstall;
|
||||
paths = pkgs.lib.attrsets.attrVals outs pkg;
|
||||
nonTrivialOuts = pkgs.lib.lists.remove "out" outs;
|
||||
metaAttributes =
|
||||
pkgs.lib.attrsets.getAttrs
|
||||
(
|
||||
[
|
||||
"name"
|
||||
"pname"
|
||||
"version"
|
||||
"meta"
|
||||
]
|
||||
++ nonTrivialOuts
|
||||
)
|
||||
pkg;
|
||||
metaAttributes = pkgs.lib.attrsets.getAttrs (
|
||||
[
|
||||
"name"
|
||||
"pname"
|
||||
"version"
|
||||
"meta"
|
||||
]
|
||||
++ nonTrivialOuts
|
||||
) pkg;
|
||||
in
|
||||
pkgs.symlinkJoin (
|
||||
{
|
||||
inherit paths;
|
||||
nativeBuildInputs = [pkgs.makeWrapper];
|
||||
postBuild = ''
|
||||
mv $out/bin/${execName} $out/bin/.${execName}-mkWrapped-original
|
||||
makeWrapper \
|
||||
${wrap}/bin/${wrap.name} $out/bin/${execName} \
|
||||
--add-flags $out/bin/.${execName}-mkWrapped-original
|
||||
'';
|
||||
}
|
||||
// metaAttributes
|
||||
)
|
||||
)
|
||||
{};
|
||||
pkgs.symlinkJoin (
|
||||
{
|
||||
inherit paths;
|
||||
nativeBuildInputs = [ pkgs.makeWrapper ];
|
||||
postBuild = ''
|
||||
mv $out/bin/${execName} $out/bin/.${execName}-mkWrapped-original
|
||||
makeWrapper \
|
||||
${wrap}/bin/${wrap.name} $out/bin/${execName} \
|
||||
--add-flags $out/bin/.${execName}-mkWrapped-original
|
||||
'';
|
||||
}
|
||||
// metaAttributes
|
||||
)
|
||||
) { };
|
||||
glintel = mkWrapped pkgs.nixgl.nixGLIntel;
|
||||
firefox =
|
||||
if (pkgs.stdenv.hostPlatform.system == "x86_64-linux")
|
||||
then pkgs.firefox-bin
|
||||
else pkgs.firefox;
|
||||
if (pkgs.stdenv.hostPlatform.system == "x86_64-linux") then pkgs.firefox-bin else pkgs.firefox;
|
||||
homeDirectory = "/home/${username}";
|
||||
in {
|
||||
in
|
||||
{
|
||||
home = {
|
||||
inherit stateVersion username homeDirectory;
|
||||
};
|
||||
@ -61,13 +58,12 @@ in {
|
||||
".parallel/will-cite".text = "";
|
||||
};
|
||||
|
||||
home.sessionVariables = lib.mkIf devTools {
|
||||
GOPATH = "${homeDirectory}/.go";
|
||||
};
|
||||
home.sessionVariables = lib.mkIf devTools { GOPATH = "${homeDirectory}/.go"; };
|
||||
|
||||
home.packages = with pkgs;
|
||||
home.packages =
|
||||
with pkgs;
|
||||
lib.mkMerge [
|
||||
[extract_url]
|
||||
[ extract_url ]
|
||||
|
||||
(lib.mkIf devTools [
|
||||
go_1_22
|
||||
@ -106,17 +102,14 @@ in {
|
||||
chromium = {
|
||||
enable = true;
|
||||
extensions = [
|
||||
{id = "cjpalhdlnbpafiamejdnhcphjbkeiagm";} # ublock origin
|
||||
{id = "mdjildafknihdffpkfmmpnpoiajfjnjd";} # consent-o-matic
|
||||
{ id = "cjpalhdlnbpafiamejdnhcphjbkeiagm"; } # ublock origin
|
||||
{ id = "mdjildafknihdffpkfmmpnpoiajfjnjd"; } # consent-o-matic
|
||||
];
|
||||
};
|
||||
firefox = lib.mkIf devTools {
|
||||
enable = true;
|
||||
# firefox doesn't need the wrapper on the personal laptop
|
||||
package =
|
||||
if hmOnly
|
||||
then (glintel firefox "firefox")
|
||||
else firefox;
|
||||
package = if hmOnly then (glintel firefox "firefox") else firefox;
|
||||
policies.DisableAppUpdate = true;
|
||||
profiles = {
|
||||
xdefault = {
|
||||
@ -148,7 +141,7 @@ in {
|
||||
vimdiffAlias = true;
|
||||
defaultEditor = true;
|
||||
plugins = lib.mkMerge [
|
||||
[pkgs.vimPlugins.fugitive]
|
||||
[ pkgs.vimPlugins.fugitive ]
|
||||
(lib.mkIf devTools [
|
||||
pkgs.vimPlugins.vim-go
|
||||
pkgs.vimPlugins.zig-vim
|
||||
@ -162,11 +155,10 @@ in {
|
||||
(lib.mkIf devTools {
|
||||
extraLuaConfig =
|
||||
builtins.readFile
|
||||
(pkgs.substituteAll {
|
||||
src = ./dev.lua;
|
||||
inherit (pkgs) ripgrep;
|
||||
})
|
||||
.outPath;
|
||||
(pkgs.substituteAll {
|
||||
src = ./dev.lua;
|
||||
inherit (pkgs) ripgrep;
|
||||
}).outPath;
|
||||
})
|
||||
];
|
||||
|
||||
@ -228,22 +220,19 @@ in {
|
||||
'';
|
||||
};
|
||||
}
|
||||
(
|
||||
lib.mkIf (!hmOnly)
|
||||
{
|
||||
bash = {
|
||||
enable = true;
|
||||
shellAliases = {
|
||||
"l" = "echo -n ł | ${pkgs.xclip}/bin/xclip -selection clipboard";
|
||||
"gp" = "${pkgs.git}/bin/git remote | ${pkgs.parallel}/bin/parallel --verbose git push";
|
||||
};
|
||||
initExtra = ''
|
||||
t() { git rev-parse --show-toplevel; }
|
||||
d() { date --utc --date=@$(echo "$1" | sed -E 's/^[^1-9]*([0-9]{10}).*/\1/') +"%F %T"; }
|
||||
source ${./gg.sh}
|
||||
'';
|
||||
(lib.mkIf (!hmOnly) {
|
||||
bash = {
|
||||
enable = true;
|
||||
shellAliases = {
|
||||
"l" = "echo -n ł | ${pkgs.xclip}/bin/xclip -selection clipboard";
|
||||
"gp" = "${pkgs.git}/bin/git remote | ${pkgs.parallel}/bin/parallel --verbose git push";
|
||||
};
|
||||
}
|
||||
)
|
||||
initExtra = ''
|
||||
t() { git rev-parse --show-toplevel; }
|
||||
d() { date --utc --date=@$(echo "$1" | sed -E 's/^[^1-9]*([0-9]{10}).*/\1/') +"%F %T"; }
|
||||
source ${./gg.sh}
|
||||
'';
|
||||
};
|
||||
})
|
||||
];
|
||||
}
|
||||
|
@ -1,10 +1,7 @@
|
||||
{ config, pkgs, ... }:
|
||||
{
|
||||
config,
|
||||
pkgs,
|
||||
...
|
||||
}: {
|
||||
mj.base.users.email = null;
|
||||
mj.base.users.user.extraGroups = ["docker"];
|
||||
mj.base.users.user.extraGroups = [ "docker" ];
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
#swc
|
||||
@ -71,9 +68,9 @@
|
||||
user.useConfigOnly = true;
|
||||
};
|
||||
chromium.extensions = [
|
||||
{id = "aeblfdkhhhdcdjpifhhbdiojplfjncoa";} # 1password
|
||||
{id = "mdkgfdijbhbcbajcdlebbodoppgnmhab";} # GoLinks
|
||||
{id = "kgjfgplpablkjnlkjmjdecgdpfankdle";} # Zoom
|
||||
{ id = "aeblfdkhhhdcdjpifhhbdiojplfjncoa"; } # 1password
|
||||
{ id = "mdkgfdijbhbcbajcdlebbodoppgnmhab"; } # GoLinks
|
||||
{ id = "kgjfgplpablkjnlkjmjdecgdpfankdle"; } # Zoom
|
||||
];
|
||||
bash.initExtra = ''
|
||||
mj_ps1_extra() {
|
||||
|
Loading…
Reference in New Issue
Block a user