config/modules/nixos/hardware/gpu-passthru/default.nix

306 lines
8.8 KiB
Nix
Raw Normal View History

2025-01-13 17:07:11 -05:00
{
inputs,
lib,
config,
pkgs,
...
}:
with lib;
with lib.custom; let
cfg = config.hardware.gpu-passthru;
2025-04-27 11:11:51 -04:00
startScript = ''
#!/run/current-system/sw/bin/bash
# Debugging
exec 19>/home/zoey/Desktop/startlogfile
BASH_XTRACEFD=19
set -x
# Load variables we defined
source "/etc/libvirt/hooks/kvm.conf"
# Change to performance governor
echo performance | tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor
# Isolate host to core 0
2025-07-22 20:21:21 -04:00
systemctl set-property --runtime -- user.slice AllowedCPUs=0-15
systemctl set-property --runtime -- system.slice AllowedCPUs=0-15
systemctl set-property --runtime -- init.scope AllowedCPUs=0-15
2025-04-27 11:11:51 -04:00
# disable vpn
mullvad disconnect -w
# Logout
# source "/home/owner/Desktop/Sync/Files/Tools/logout.sh"
# Stop display manager
systemctl stop display-manager.service
killall gdm-wayland-session
killall niri
killall niri-session
# Unbind VTconsoles
echo 0 > /sys/class/vtconsole/vtcon0/bind
echo 0 > /sys/class/vtconsole/vtcon1/bind
# Unbind EFI Framebuffer
echo efi-framebuffer.0 > /sys/bus/platform/drivers/efi-framebuffer/unbind
# Avoid race condition
sleep 5
# Unload NVIDIA kernel modules
modprobe -r nvidia_drm nvidia_modeset nvidia_uvm nvidia
# Detach GPU devices from host
virsh nodedev-detach $VIRSH_GPU_VIDEO
virsh nodedev-detach $VIRSH_GPU_AUDIO
# Load vfio module
modprobe vfio-pci
'';
stopScript = ''
#!/run/current-system/sw/bin/bash
# Debugging
exec 19>/home/zoey/Desktop/stoplogfile
BASH_XTRACEFD=19
set -x
# Load variables we defined
source "/etc/libvirt/hooks/kvm.conf"
# Unload vfio module
modprobe -r vfio-pci
# Attach GPU devices from host
virsh nodedev-reattach $VIRSH_GPU_VIDEO
virsh nodedev-reattach $VIRSH_GPU_AUDIO
# Read nvidia x config
nvidia-xconfig --query-gpu-info > /dev/null 2>&1
# Load NVIDIA kernel modules
modprobe nvidia_drm nvidia_modeset nvidia_uvm nvidia
# Avoid race condition
sleep 5
# Bind EFI Framebuffer
echo efi-framebuffer.0 > /sys/bus/platform/drivers/efi-framebuffer/bind
# Bind VTconsoles
echo 1 > /sys/class/vtconsole/vtcon0/bind
echo 1 > /sys/class/vtconsole/vtcon1/bind
# Start display manager
systemctl start display-manager.service
# Return host to all cores
systemctl set-property --runtime -- user.slice AllowedCPUs=0-31
systemctl set-property --runtime -- system.slice AllowedCPUs=0-31
systemctl set-property --runtime -- init.scope AllowedCPUs=0-31
# Change to powersave governor
echo powersave | tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor
'';
2025-05-23 11:58:24 -04:00
libvirtd-cfg = config.virtualisation.libvirtd;
qemuConfigFile = pkgs.writeText "qemu.conf" ''
${optionalString libvirtd-cfg.qemu.ovmf.enable ''
nvram = [
"/run/libvirt/nix-ovmf/AAVMF_CODE.fd:/run/libvirt/nix-ovmf/AAVMF_VARS.fd",
"/run/libvirt/nix-ovmf/AAVMF_CODE.ms.fd:/run/libvirt/nix-ovmf/AAVMF_VARS.ms.fd",
"/run/libvirt/nix-ovmf/OVMF_CODE.fd:/run/libvirt/nix-ovmf/OVMF_VARS.fd",
"/run/libvirt/nix-ovmf/OVMF_CODE.ms.fd:/run/libvirt/nix-ovmf/OVMF_VARS.ms.fd"
]
''}
${optionalString (!libvirtd-cfg.qemu.runAsRoot) ''
user = "qemu-libvirtd"
group = "qemu-libvirtd"
''}
${libvirtd-cfg.qemu.verbatimConfig}
'';
dirName = "libvirt";
2025-01-13 17:07:11 -05:00
in {
options.hardware.gpu-passthru = with types; {
enable = mkBoolOpt false "Enable support for single gpu-passthru";
};
config = mkIf cfg.enable {
2025-05-06 20:02:36 -04:00
boot.kernelParams = ["intel_iommu=on" "iommu=pt" "transparent_hugepage=always"];
2025-01-13 17:07:11 -05:00
boot.kernelModules = ["vfio-pci"];
2025-05-06 20:02:36 -04:00
# CachyOS-inspired system performance tweaks
boot.kernel.sysctl = {
# Virtual memory tweaks
"vm.swappiness" = 10;
"vm.dirty_background_ratio" = 5;
"vm.dirty_ratio" = 10;
"vm.vfs_cache_pressure" = 50;
"vm.max_map_count" = 16777216;
# Network optimizations
"net.core.netdev_max_backlog" = 16384;
"net.ipv4.tcp_fastopen" = 3;
"net.ipv4.tcp_max_syn_backlog" = 8192;
"net.core.somaxconn" = 8192;
# IO scheduler optimizations
"kernel.sched_autogroup_enabled" = 0;
};
2025-01-13 17:07:11 -05:00
virtualisation.libvirtd = {
enable = true;
onBoot = "ignore";
onShutdown = "shutdown";
2025-01-13 20:45:19 -05:00
qemu = {
package = pkgs.qemu_kvm;
runAsRoot = true;
swtpm.enable = true;
ovmf = {
enable = true;
packages = [
(pkgs.OVMF.override {
secureBoot = true;
tpmSupport = true;
})
.fd
];
};
};
2025-01-13 17:07:11 -05:00
};
systemd.services.libvirtd = {
path = let
env = pkgs.buildEnv {
name = "qemu-hook-env";
paths = with pkgs; [
bash
libvirt
kmod
systemd
ripgrep
2025-03-22 16:03:07 -04:00
nixos-stable.mullvad
2025-01-19 15:04:19 -05:00
killall
2025-01-13 17:07:11 -05:00
sd
];
};
in [env];
};
2025-05-23 11:58:24 -04:00
systemd.services.libvirtd-config.script = lib.mkForce ''
# Copy default libvirt network config .xml files to /var/lib
# Files modified by the user will not be overwritten
for i in $(cd ${libvirtd-cfg.package}/var/lib && echo \
libvirt/qemu/networks/*.xml \
libvirt/nwfilter/*.xml );
do
# Intended behavior
# shellcheck disable=SC2174
mkdir -p "/var/lib/$(dirname "$i")" -m 755
if [ ! -e "/var/lib/$i" ]; then
cp -pd "${libvirtd-cfg.package}/var/lib/$i" "/var/lib/$i"
fi
done
# Copy generated qemu config to libvirt directory
cp -f ${qemuConfigFile} /var/lib/${dirName}/qemu.conf
# stable (not GC'able as in /nix/store) paths for using in <emulator> section of xml configs
for emulator in ${libvirtd-cfg.package}/libexec/libvirt_lxc ${libvirtd-cfg.qemu.package}/bin/qemu-kvm ${libvirtd-cfg.qemu.package}/bin/qemu-system-*; do
ln -s --force "$emulator" /run/${dirName}/nix-emulators/
done
ln -s --force ${libvirtd-cfg.qemu.package}/bin/qemu-pr-helper /run/${dirName}/nix-helpers/
${optionalString libvirtd-cfg.qemu.ovmf.enable (
let
ovmfpackage = pkgs.buildEnv {
name = "qemu-ovmf";
paths = libvirtd-cfg.qemu.ovmf.packages;
};
in ''
ln -s --force ${ovmfpackage}/FV/AAVMF_CODE{,.ms}.fd /run/${dirName}/nix-ovmf/
ln -s --force ${ovmfpackage}/FV/OVMF_CODE{,.ms}.fd /run/${dirName}/nix-ovmf/
ln -s --force ${ovmfpackage}/FV/AAVMF_VARS{,.ms}.fd /run/${dirName}/nix-ovmf/
ln -s --force ${ovmfpackage}/FV/OVMF_VARS{,.ms}.fd /run/${dirName}/nix-ovmf/
''
)}
'';
2025-05-19 18:28:46 -04:00
2025-01-13 17:07:11 -05:00
system.activationScripts.libvirt-hooks.text = ''
ln -Tfs /etc/libvirt/hooks /var/lib/libvirt/hooks
'';
2025-07-22 20:21:21 -04:00
# environment.systemPackages = with pkgs; [
# libguestfs
# ];
networking.firewall.trustedInterfaces = ["virbr0"];
2025-01-13 17:07:11 -05:00
environment.etc = {
"/libvirt/hooks/qemu" = {
text = ''
#!/run/current-system/sw/bin/bash
#
# Author: Sebastiaan Meijer (sebastiaan@passthroughpo.st)
#
# Copy this file to /etc/libvirt/hooks, make sure it's called "qemu".
# After this file is installed, restart libvirt.
# From now on, you can easily add per-guest qemu hooks.
# Add your hooks in /etc/libvirt/hooks/qemu.d/vm_name/hook_name/state_name.
# For a list of available hooks, please refer to https://www.libvirt.org/hooks.html
#
GUEST_NAME="$1"
HOOK_NAME="$2"
STATE_NAME="$3"
MISC="''${@:4}"
BASEDIR="$(dirname $0)"
HOOKPATH="$BASEDIR/qemu.d/$GUEST_NAME/$HOOK_NAME/$STATE_NAME"
set -e # If a script exits with an error, we should as well.
# check if it's a non-empty executable file
if [ -f "$HOOKPATH" ] && [ -s "$HOOKPATH"] && [ -x "$HOOKPATH" ]; then
eval \"$HOOKPATH\" "$@"
elif [ -d "$HOOKPATH" ]; then
while read file; do
# check for null string
if [ ! -z "$file" ]; then
eval \"$file\" "$@"
fi
done <<< "$(find -L "$HOOKPATH" -maxdepth 1 -type f -executable -print;)"
fi
'';
mode = "0755";
};
"libvirt/hooks/kvm.conf" = {
text = ''
VIRSH_GPU_VIDEO=pci_0000_0B_00_0
VIRSH_GPU_AUDIO=pci_0000_0B_00_1
'';
mode = "0755";
};
"libvirt/hooks/qemu.d/win10/prepare/begin/start.sh" = {
2025-04-27 11:11:51 -04:00
text = startScript;
2025-01-13 17:07:11 -05:00
mode = "0755";
};
"libvirt/hooks/qemu.d/win10/release/end/stop.sh" = {
2025-04-27 11:11:51 -04:00
text = stopScript;
mode = "0755";
};
2025-01-13 17:07:11 -05:00
};
};
}