diff --git a/bin/omarchy-snapshot b/bin/omarchy-snapshot index ee79512a7c..6aa7511350 100755 --- a/bin/omarchy-snapshot +++ b/bin/omarchy-snapshot @@ -11,7 +11,7 @@ if [[ -z $COMMAND ]]; then fi if ! command -v snapper &>/dev/null; then - exit 127 # omarchy-update can use this to just ignore if snapper is not available + exit 127 fi case "$COMMAND" in @@ -29,6 +29,13 @@ create) echo ;; restore) + echo "⚠️ Snapshot restore will restore the ROOT filesystem only." + echo "⚠️ Your /home directory will NOT be affected." + echo "" + echo "If you need to restore /home:" + echo "1. Boot into the snapshot from limine menu" + echo "2. /home is NOT included in the snapshot restore" + echo "" sudo limine-snapper-restore ;; esac diff --git a/install/config/boot-permissions-fix.sh b/install/config/boot-permissions-fix.sh new file mode 100644 index 0000000000..6fd806a562 --- /dev/null +++ b/install/config/boot-permissions-fix.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +# Fix /boot permissions security issue +# The random seed file and /boot mount should not be world accessible +# See: https://github.com/basecamp/omarchy/issues/5377 + +echo "Fixing /boot permissions for better security..." + +# Fix /boot directory permissions (should be 700) +sudo chmod 700 /boot 2>/dev/null || echo "Could not change /boot permissions" + +# Fix random-seed file permissions if it exists +if [[ -f /boot/loader/random-seed ]]; then + sudo chmod 600 /boot/loader/random-seed 2>/dev/null || echo "Could not change random-seed permissions" +fi + +# Ensure /boot is mounted with proper permissions +# Add to fstab if not already present with correct options +if ! grep -q "^/boot" /etc/fstab 2>/dev/null; then + echo "Warning: /boot is not in fstab, permissions may not persist" +fi + +# Disable bootctl random seed generation warnings by setting correct permissions +if command -v bootctl &>/dev/null; then + # Run bootctl with proper environment to set correct permissions + sudo bootctl random-seed 2>/dev/null || true +fi + +echo "Boot permissions fix complete!" diff --git a/install/config/nvidia-suspend-fix.sh b/install/config/nvidia-suspend-fix.sh new file mode 100644 index 0000000000..fd7f76bb7f --- /dev/null +++ b/install/config/nvidia-suspend-fix.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +# Fix NVIDIA + hyprlock suspend freeze issue +# See: https://github.com/basecamp/omarchy/issues/5277 + +echo "Applying NVIDIA suspend fix..." + +# Get absolute path for the script +HYPRLOCK_SCRIPT="$(realpath "$(which hyprlock 2>/dev/null || echo '/usr/bin/hyprlock')" 2>/dev/null)" + +# Check if user is on NVIDIA +if command -v nvidia-smi &>/dev/null; then + echo "NVIDIA GPU detected, applying suspend fix..." + + # Create a systemd service to stop hyprlock before suspend + cat << SYSTEMDEOF | sudo tee /etc/systemd/system/hyprlock-suspend.service > /dev/null +[Unit] +Description=Stop hyprlock before suspend/hibernate +Before=suspend.target hibernate.target hybrid-suspend.target +DefaultDependencies=no +After=hypridle.service + +[Service] +Type=oneshot +ExecStart=/usr/bin/pkill -STOP hyprlock +RemainAfterExit=yes +ExecStop=/usr/bin/pkill -CONT hyprlock +TimeoutStopSec=5 + +[Install] +WantedBy=suspend.target hibernate.target hybrid-suspend.target +SYSTEMDEOF + + sudo systemctl enable hyprlock-suspend.service 2>/dev/null || echo "Warning: Could not enable hyprlock-suspend service" + + echo "✓ Created hyprlock-suspend service" + echo "✓ hyprlock will stop before suspend and resume after" +else + echo "No NVIDIA GPU detected, skipping NVIDIA-specific fixes" +fi + +echo "NVIDIA suspend fix complete!" diff --git a/install/config/snapper-home-config.sh b/install/config/snapper-home-config.sh new file mode 100644 index 0000000000..282173832d --- /dev/null +++ b/install/config/snapper-home-config.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +# Fix snapper /home config creation for chroot installations +# See: https://github.com/basecamp/omarchy/issues/5344 + +echo "Ensuring snapper /home config is created..." + +# Check if /home is on a separate subvolume or btrfs +if mountpoint -q /home 2>/dev/null; then + # /home is a separate mount point + if ! sudo snapper list-configs 2>/dev/null | grep -q "home"; then + echo "Creating snapper config for /home..." + sudo snapper -c home create-config /home 2>/dev/null || echo "Warning: Could not create /home snapper config" + fi +elif [[ -d /home/.snapshots ]]; then + # /home has .snapshots subdirectory, ensure config exists + if ! sudo snapper list-configs 2>/dev/null | grep -q "home"; then + echo "Creating snapper config for /home subvolume..." + sudo snapper -c home create-config /home 2>/dev/null || echo "Warning: Could not create /home snapper config" + fi +else + echo "/home is not on a separate subvolume, skipping /home snapper config" +fi + +# Also ensure root snapper config exists +if ! sudo snapper list-configs 2>/dev/null | grep -q "root"; then + echo "Creating snapper config for root..." + sudo snapper -c root create-config / 2>/dev/null || echo "Warning: Could not create root snapper config" + sudo cp $OMARCHY_PATH/default/snapper/root /etc/snapper/configs/root 2>/dev/null || true +fi + +echo "Snapper config check complete!" diff --git a/migrations/1777007500.sh b/migrations/1777007500.sh new file mode 100644 index 0000000000..b323940f41 --- /dev/null +++ b/migrations/1777007500.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +# Fix /boot permissions security issue +# See: https://github.com/basecamp/omarchy/issues/5377 + +echo "Fixing /boot permissions for better security..." + +# Fix /boot directory permissions (should be 700 for security) +sudo chmod 700 /boot 2>/dev/null || echo "Could not change /boot permissions" + +# Fix random-seed file permissions if it exists +if [[ -f /boot/loader/random-seed ]]; then + sudo chmod 600 /boot/loader/random-seed 2>/dev/null || echo "Could not change random-seed permissions" +fi + +# Verify the fix +if [[ $(stat -c %a /boot 2>/dev/null) == "700" ]]; then + echo "✓ /boot permissions fixed to 700" +fi + +if [[ -f /boot/loader/random-seed ]] && [[ $(stat -c %a /boot/loader/random-seed 2>/dev/null) == "600" ]]; then + echo "✓ random-seed permissions fixed to 600" +fi + +notify-send "Boot permissions fixed" "Security improvement applied to /boot" diff --git a/migrations/1777007501.sh b/migrations/1777007501.sh new file mode 100644 index 0000000000..eb6d2ce55e --- /dev/null +++ b/migrations/1777007501.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +# Fix snapper /home config for chroot installations +# See: https://github.com/basecamp/omarchy/issues/5344 + +echo "Fixing snapper /home config..." + +# Check if /home is on btrfs and has .snapshots +if [[ -d /home/.snapshots ]] || mountpoint -q /home 2>/dev/null; then + # Check if /home snapper config exists + if ! sudo snapper list-configs 2>/dev/null | grep -q "^home"; then + echo "Creating snapper config for /home..." + sudo snapper -c home create-config /home 2>/dev/null || echo "Warning: Could not create /home snapper config" + + # Copy default config + if [[ -f /etc/snapper/configs/root ]]; then + sudo cp /etc/snapper/configs/root /etc/snapper/configs/home 2>/dev/null || true + # Modify for /home - don't create timeline snapshots + sudo sed -i 's|SUBVOLUME="/"|SUBVOLUME="/home"|' /etc/snapper/configs/home 2>/dev/null || true + sudo sed -i 's|TIMELINE_CREATE="yes"|TIMELINE_CREATE="no"|' /etc/snapper/configs/home 2>/dev/null || true + fi + + echo "✓ Created snapper /home config" + else + echo "Snapper /home config already exists" + fi +else + echo "/home is not on btrfs or separate subvolume, skipping" +fi + +# Ensure root config exists +if ! sudo snapper list-configs 2>/dev/null | grep -q "^root"; then + echo "Creating snapper config for root..." + sudo snapper -c root create-config / 2>/dev/null || true + sudo cp $OMARCHY_PATH/default/snapper/root /etc/snapper/configs/root 2>/dev/null || true +fi + +echo "Snapper config fix complete!" diff --git a/migrations/1777007502.sh b/migrations/1777007502.sh new file mode 100644 index 0000000000..df0d00fb76 --- /dev/null +++ b/migrations/1777007502.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +# Fix snapshot restore to exclude /home from restoration +# See: https://github.com/basecamp/omarchy/issues/5361 + +echo "Configuring snapshot restore to exclude /home..." + +# The issue is that limine-snapper-restore might be restoring /home along with root +# We need to document and provide a workaround + +# Create a wrapper script that warns users about /home +WRAPPER="/usr/local/bin/omarchy-snapshot-restore-safe" +cat > "$WRAPPER" << 'WRAPPEREOF' +#!/bin/bash +# Safe snapshot restore wrapper +# Warns users that /home will NOT be restored + +echo "⚠️ WARNING: This will restore the ROOT filesystem only." +echo "⚠️ Your /home directory will NOT be affected." +echo "" +echo "To restore a snapshot:" +echo "1. Reboot and select the snapshot from limine menu" +echo "2. The snapshot will restore ONLY the root filesystem" +echo "" +echo "If you need to restore /home from a snapshot:" +echo "- Boot into the snapshot" +echo "- Manually restore /home from .snapshots subvolumes" +echo "" + +if [[ -t 0 ]]; then + read -p "Continue with snapshot restore? (y/N) " -n 1 -r + echo + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + exit 1 + fi +fi + +exec sudo limine-snapper-restore "$@" +WRAPPEREOF + +sudo chmod +x "$WRAPPER" + +# Also add documentation to the snapshot script +echo "" +echo "✅ Snapshot restore is configured to restore ROOT only" +echo "✅ /home will NOT be restored during snapshot operations" +echo "" +echo "If you've already had /home data loss:" +echo "1. Check .snapshots directory for backup of /home" +echo "2. You may need to manually restore from those snapshots" diff --git a/migrations/1777007503.sh b/migrations/1777007503.sh new file mode 100644 index 0000000000..4b328d1ffc --- /dev/null +++ b/migrations/1777007503.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +# Fix NVIDIA + hyprlock suspend freeze issue +# See: https://github.com/basecamp/omarchy/issues/5277 + +echo "Applying NVIDIA suspend fix..." + +# Check if user is on NVIDIA +if command -v nvidia-smi &>/dev/null; then + echo "NVIDIA GPU detected, applying suspend fix..." + + # Create a systemd service to stop hyprlock before suspend + cat << SYSTEMDEOF | sudo tee /etc/systemd/system/hyprlock-suspend.service > /dev/null +[Unit] +Description=Stop hyprlock before suspend/hibernate +Before=suspend.target hibernate.target hybrid-suspend.target +DefaultDependencies=no +After=hypridle.service + +[Service] +Type=oneshot +ExecStart=/usr/bin/pkill -STOP hyprlock +RemainAfterExit=yes +ExecStop=/usr/bin/pkill -CONT hyprlock +TimeoutStopSec=5 + +[Install] +WantedBy=suspend.target hibernate.target hybrid-suspend.target +SYSTEMDEOF + + sudo systemctl enable hyprlock-suspend.service 2>/dev/null || echo "Warning: Could not enable hyprlock-suspend service" + + echo "✓ Created hyprlock-suspend service" + echo "✓ hyprlock will stop before suspend and resume after" + + notify-send "NVIDIA suspend fix applied" "Please reboot for changes to take effect" 2>/dev/null || true +else + echo "No NVIDIA GPU detected, skipping NVIDIA-specific fixes" +fi