Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions bin/omarchy-launch-walker.bak
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
#!/bin/bash

# Launch the Walker application launcher while ensuring that it's data provider (called elephant) is running first.

# Ensure elephant is running before launching walker
if ! pgrep -x elephant > /dev/null; then
setsid uwsm-app -- elephant &
fi

# Ensure walker service is running
if ! pgrep -f "walker --gapplication-service" > /dev/null; then
setsid uwsm-app -- walker --gapplication-service &
fi

exec walker --width 644 --maxheight 300 --minheight 300 "$@"
9 changes: 8 additions & 1 deletion bin/omarchy-snapshot
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ if [[ -z $COMMAND ]]; then
fi

if ! command -v snapper &>/dev/null; then
exit 127 # omarchy-update can use this to just ignore if snapper is not available
exit 127
fi

case "$COMMAND" in
Expand All @@ -29,6 +29,13 @@ create)
echo
;;
restore)
echo "⚠️ Snapshot restore will restore the ROOT filesystem only."
echo "⚠️ Your /home directory will NOT be affected."
echo ""
echo "If you need to restore /home:"
echo "1. Boot into the snapshot from limine menu"
echo "2. /home is NOT included in the snapshot restore"
echo ""
sudo limine-snapper-restore
;;
esac
2 changes: 1 addition & 1 deletion default/hypr/apps.conf
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,4 @@ source = ~/.local/share/omarchy/default/hypr/apps/telegram.conf
source = ~/.local/share/omarchy/default/hypr/apps/typora.conf
source = ~/.local/share/omarchy/default/hypr/apps/terminals.conf
source = ~/.local/share/omarchy/default/hypr/apps/walker.conf
source = ~/.local/share/omarchy/default/hypr/apps/webcam-overlay.conf
source = ~/.local/share/omarchy/default/hypr/apps/webcam-overlay.conf
4 changes: 4 additions & 0 deletions install/config/all.sh
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,10 @@ run_logged $OMARCHY_INSTALL/config/hardware/usb-autosuspend.sh
run_logged $OMARCHY_INSTALL/config/hardware/ignore-power-button.sh
run_logged $OMARCHY_INSTALL/config/hardware/nvidia.sh
run_logged $OMARCHY_INSTALL/config/hardware/vulkan.sh
run_logged $OMARCHY_INSTALL/config/supergfxd-nvidia-fix.sh
run_logged $OMARCHY_INSTALL/config/nvidia-suspend-fix.sh
run_logged $OMARCHY_INSTALL/config/boot-permissions-fix.sh
Comment on lines +40 to +42
Copy link

Copilot AI Apr 26, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The PR description says only two files are changed for the supergfxd NVIDIA fix, but this commit also adds/executes other fixes (NVIDIA suspend, /boot permissions, snapper messaging, Hyprland config changes, etc.). Please update the PR description/scope so reviewers understand the full impact and can validate each behavior change.

Copilot uses AI. Check for mistakes.
run_logged $OMARCHY_INSTALL/config/snapper-home-config.sh

run_logged $OMARCHY_INSTALL/config/hardware/intel/video-acceleration.sh
run_logged $OMARCHY_INSTALL/config/hardware/intel/lpmd.sh
Expand Down
56 changes: 56 additions & 0 deletions install/config/boot-permissions-fix.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
#!/bin/bash

# Fix /boot permissions security issue
# The random seed file and /boot mount should not be world accessible
# See: https://github.com/basecamp/omarchy/issues/5377

echo "Fixing /boot permissions for better security..."
Comment on lines +1 to +7
Copy link

Copilot AI Apr 24, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This script is placed under install/config, but it isn’t referenced from the installer entrypoints (no calls found from install/config/all.sh or other install scripts). If it’s intended to run during install, it needs to be added to the install flow; otherwise it won’t ever be applied automatically.

Copilot uses AI. Check for mistakes.

# Detect boot filesystem type
boot_fs_type=""
boot_mount_options=""

if command -v findmnt >/dev/null 2>&1 && findmnt -n --target /boot >/dev/null 2>&1; then
boot_fs_type="$(findmnt -n -o FSTYPE --target /boot 2>/dev/null)"
boot_mount_options="$(findmnt -n -o OPTIONS --target /boot 2>/dev/null)"
fi

if [[ "$boot_fs_type" =~ ^(vfat|fat|msdos)$ ]]; then
echo "/boot is on $boot_fs_type; applying mount masks because chmod doesn't change effective permissions on FAT"

# Check if restrictive mount options already exist
if [[ "$boot_mount_options" == *"umask=0077"* ]] || [[ "$boot_mount_options" == *"dmask=0077"* && "$boot_mount_options" == *"fmask=0177"* ]]; then
echo "/boot already has restrictive mount options"
else
sudo mount -o remount,dmask=0077,fmask=0177 /boot 2>/dev/null || echo "Warning: Could not remount /boot with restrictive permissions"
echo "Note: Add dmask=0077,fmask=0177 to /etc/fstab for persistence across reboots"
fi
Comment on lines +18 to +27
Copy link

Copilot AI Apr 26, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In the FAT (/boot on vfat/fat/msdos) branch, the script only remounts with dmask/fmask. That doesn’t persist across reboot; to fully address the bootctl warning, the restrictive masks should be written to the /etc/fstab entry (or a systemd mount unit) for /boot rather than relying on a one-time remount.

Copilot uses AI. Check for mistakes.
else
# /boot is on a normal filesystem (ext4/btrfs etc)

# Check if /boot is a separate mount point
if findmnt -n --target /boot >/dev/null 2>&1; then
# Fix /boot directory permissions (should be 700)
sudo chmod 700 /boot 2>/dev/null || echo "Warning: Could not change /boot permissions"

# Fix random-seed file permissions if it exists
if [[ -f /boot/loader/random-seed ]]; then
sudo chmod 600 /boot/loader/random-seed 2>/dev/null || echo "Warning: Could not change random-seed permissions"
fi

# Verify the fix
boot_perms=$(stat -c %a /boot 2>/dev/null)
if [[ "$boot_perms" == "700" ]]; then
echo "✓ /boot permissions fixed to 700"
fi
else
echo "/boot is not a separate mount (permissions handled by root filesystem)"
fi
fi

# Run bootctl random-seed to ensure correct permissions on random seed
if command -v bootctl >/dev/null 2>&1; then
sudo bootctl random-seed 2>/dev/null || true
fi

echo "Boot permissions fix complete!"
49 changes: 49 additions & 0 deletions install/config/nvidia-suspend-fix.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
#!/bin/bash

# Fix NVIDIA + hyprlock suspend freeze issue
# See: https://github.com/basecamp/omarchy/issues/5277

echo "Applying NVIDIA suspend fix..."
Comment on lines +1 to +6
Copy link

Copilot AI Apr 24, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This script is placed under install/config, but it isn’t referenced from the installer entrypoints (no calls found from install/config/all.sh or other install scripts). If it’s meant to be applied during install, it should be wired into the install flow via run_logged (otherwise it’s currently dead code).

Copilot uses AI. Check for mistakes.

# The issue is that hyprlock holds DRM/GBM resources during suspend,
# preventing NVIDIA from entering proper suspend state

# Check if user is on NVIDIA
if command -v nvidia-smi &>/dev/null; then
echo "NVIDIA GPU detected, applying suspend fix..."

# Create a systemd service to stop hyprlock before suspend
# The - prefix makes pkill non-fatal when hyprlock isn't running
cat << 'SYSTEMD' | sudo tee /etc/systemd/system/hyprlock-suspend.service > /dev/null
[Unit]
Description=Stop hyprlock before suspend/hibernate
Before=suspend.target hibernate.target hybrid-suspend.target
DefaultDependencies=no

[Service]
Type=oneshot
ExecStart=-/usr/bin/pkill -STOP hyprlock
RemainAfterExit=yes
ExecStop=-/usr/bin/pkill -CONT hyprlock
TimeoutStopSec=5
Comment on lines +25 to +28
Copy link

Copilot AI Apr 26, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The unit uses pkill ... hyprlock without -x (exact match) or user scoping, which can signal unintended processes and affect all users’ sessions. Consider exact matching and (if possible) scoping to the active graphical user/session to avoid collateral SIGSTOP/SIGCONT.

Copilot uses AI. Check for mistakes.

[Install]
WantedBy=suspend.target hibernate.target hybrid-suspend.target
SYSTEMD

# Reload systemd daemon to recognize the new unit
sudo systemctl daemon-reload 2>/dev/null || echo "Warning: Could not reload systemd daemon"

# Enable the service using chrootable helper if available
if command -v chrootable_systemctl_enable >/dev/null 2>&1; then
chrootable_systemctl_enable hyprlock-suspend.service 2>/dev/null || echo "Warning: Could not enable hyprlock-suspend service"
else
sudo systemctl enable hyprlock-suspend.service 2>/dev/null || echo "Warning: Could not enable hyprlock-suspend service"
fi

echo "✓ Created hyprlock-suspend service"
else
echo "No NVIDIA GPU detected, skipping NVIDIA-specific fixes"
fi

echo "NVIDIA suspend fix complete!"
32 changes: 32 additions & 0 deletions install/config/snapper-home-config.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
#!/bin/bash

# Fix snapper /home config creation for chroot installations
# See: https://github.com/basecamp/omarchy/issues/5344

echo "Ensuring snapper /home config is created..."
Comment on lines +1 to +6
Copy link

Copilot AI Apr 24, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This script is placed under install/config, but it isn’t referenced from the installer entrypoints (no calls found from install/config/all.sh or other install scripts). If it’s meant to ensure snapper configs during install, it should be wired into the install flow; otherwise it won’t run.

Copilot uses AI. Check for mistakes.

# Check if /home is on a separate subvolume or btrfs
if mountpoint -q /home 2>/dev/null; then
# /home is a separate mount point
if ! sudo snapper list-configs 2>/dev/null | grep -qE '^home[[:space:]]'; then
echo "Creating snapper config for /home..."
sudo snapper -c home create-config /home 2>/dev/null || echo "Warning: Could not create /home snapper config"
fi
elif [[ -d /home/.snapshots ]]; then
# /home has .snapshots subdirectory, ensure config exists
if ! sudo snapper list-configs 2>/dev/null | grep -qE '^home[[:space:]]'; then
echo "Creating snapper config for /home subvolume..."
sudo snapper -c home create-config /home 2>/dev/null || echo "Warning: Could not create /home snapper config"
fi
Comment on lines +16 to +20
Copy link

Copilot AI Apr 26, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same issue as above: grep -q "home" can match the subvolume column rather than the config name, so the check can incorrectly think the home config exists. Match the config name column explicitly (e.g., ^home[[:space:]]).

Copilot uses AI. Check for mistakes.
else
echo "/home is not on a separate subvolume, skipping /home snapper config"
fi

Comment on lines +3 to +24
Copy link

Copilot AI Apr 26, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This script creates a Snapper "home" config, which conflicts with the repo’s established behavior of snapshotting only root (see install/login/limine-snapper.sh:53-57 and migrations/1776927490.sh which deletes the home config). Creating /home snapshots again can reintroduce the accidental user-data rollback the project is trying to avoid; consider removing the /home config creation or gating it behind an explicit opt-in setting.

Suggested change
# Fix snapper /home config creation for chroot installations
# See: https://github.com/basecamp/omarchy/issues/5344
echo "Ensuring snapper /home config is created..."
# Check if /home is on a separate subvolume or btrfs
if mountpoint -q /home 2>/dev/null; then
# /home is a separate mount point
if ! sudo snapper list-configs 2>/dev/null | grep -qE '^home[[:space:]]'; then
echo "Creating snapper config for /home..."
sudo snapper -c home create-config /home 2>/dev/null || echo "Warning: Could not create /home snapper config"
fi
elif [[ -d /home/.snapshots ]]; then
# /home has .snapshots subdirectory, ensure config exists
if ! sudo snapper list-configs 2>/dev/null | grep -qE '^home[[:space:]]'; then
echo "Creating snapper config for /home subvolume..."
sudo snapper -c home create-config /home 2>/dev/null || echo "Warning: Could not create /home snapper config"
fi
else
echo "/home is not on a separate subvolume, skipping /home snapper config"
fi
# Ensure snapper configuration matches the project's root-only snapshot policy
# /home snapshotting is intentionally not auto-created to avoid user-data rollback
echo "Ensuring snapper root config is created..."
echo "Skipping snapper /home config creation; this system snapshots only root."

Copilot uses AI. Check for mistakes.
# Also ensure root snapper config exists
if ! sudo snapper list-configs 2>/dev/null | grep -qE '^root[[:space:]]'; then
echo "Creating snapper config for root..."
sudo snapper -c root create-config / 2>/dev/null || echo "Warning: Could not create root snapper config"
sudo cp $OMARCHY_PATH/default/snapper/root /etc/snapper/configs/root 2>/dev/null || true
fi

echo "Snapper config check complete!"
37 changes: 37 additions & 0 deletions install/config/supergfxd-nvidia-fix.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
#!/bin/bash

# Fix NVIDIA GPU detection when supergfxd is blacklisting modules
# See: https://github.com/basecamp/omarchy/issues/5408

echo "Fixing NVIDIA GPU detection..."
Comment on lines +1 to +6
Copy link

Copilot AI Apr 24, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This script appears to be intended as part of the installer (“New installer script”), but it isn’t referenced anywhere in the install flow (no calls found from install/config/all.sh or other install scripts). If it should run automatically during install, it needs to be wired into the installer (e.g., added to install/config/all.sh via run_logged).

Copilot uses AI. Check for mistakes.

SUPERGFXD_CONF="/etc/modprobe.d/supergfxd.conf"

# Check for persisted NVIDIA blacklists from supergfxd regardless of service state
if grep -Eq '^[[:space:]]*blacklist[[:space:]]+nvidia([_-][[:alnum:]_]+)?([[:space:]]|$)' "$SUPERGFXD_CONF" 2>/dev/null; then
echo "Found nvidia blacklist from supergfxd!"
echo "Disabling supergfxd to enable NVIDIA..."

# Disable supergfxd if active or enabled
if systemctl is-active --quiet supergfxd 2>/dev/null || systemctl is-enabled --quiet supergfxd 2>/dev/null; then
sudo systemctl disable --now supergfxd 2>/dev/null || true
fi

sudo rm -f "$SUPERGFXD_CONF" 2>/dev/null || true

# Regenerate initramfs
sudo mkinitcpio -P 2>/dev/null || true

echo "✓ Removed supergfxd NVIDIA blacklist"
echo "⚠️ Please reboot for changes to take effect"
else
echo "No supergfxd NVIDIA blacklist found, no action needed"
fi

# Also ensure NVIDIA modules are not blocked elsewhere
if ls /etc/modprobe.d/*nvidia*.conf 2>/dev/null | grep -v supergfxd | grep -q .; then
echo "Warning: Other nvidia blacklist files found:"
ls /etc/modprobe.d/*nvidia*.conf 2>/dev/null | grep -v supergfxd
fi

echo "NVIDIA GPU detection fix complete!"
70 changes: 70 additions & 0 deletions migrations/1777007500.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
#!/bin/bash

# Fix /boot permissions security issue
# See: https://github.com/basecamp/omarchy/issues/5377

echo "Fixing /boot permissions for better security..."
Comment on lines +1 to +6
Copy link

Copilot AI Apr 24, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The PR description/title focus on the supergfxd/NVIDIA detection fix, but this PR also adds migrations for /boot permissions, snapper, snapshot restore messaging, and hyprlock suspend behavior. Please update the PR description to cover these additional changes or split them into separate PRs so the scope matches the stated intent.

Copilot uses AI. Check for mistakes.

# Detect boot filesystem type
boot_fs_type=""
boot_mount_options=""

if command -v findmnt >/dev/null 2>&1 && findmnt -n --target /boot >/dev/null 2>&1; then
boot_fs_type="$(findmnt -n -o FSTYPE --target /boot 2>/dev/null)"
fi

if [[ "$boot_fs_type" =~ ^(vfat|fat|msdos)$ ]]; then
echo "/boot is on $boot_fs_type; applying mount masks because chmod doesn't change effective permissions on FAT"

# Get current mount options before remount
boot_mount_options="$(findmnt -n -o OPTIONS --target /boot 2>/dev/null)"

# Check if restrictive mount options already exist
if [[ "$boot_mount_options" == *"umask=0077"* ]] || [[ "$boot_mount_options" == *"dmask=0077"* && "$boot_mount_options" == *"fmask=0177"* ]]; then
echo "/boot already has restrictive mount options"
else
sudo mount -o remount,dmask=0077,fmask=0177 /boot 2>/dev/null || echo "Warning: Could not remount /boot with restrictive permissions"

# Re-read mount options after remount to verify
boot_mount_options="$(findmnt -n -o OPTIONS --target /boot 2>/dev/null)"

if [[ "$boot_mount_options" == *"umask=0077"* ]] || [[ "$boot_mount_options" == *"dmask=0077"* && "$boot_mount_options" == *"fmask=0177"* ]]; then
echo "✓ /boot mount options now include restrictive umask"
else
echo "Warning: /boot remounted but restrictive options not detected. Check /etc/fstab for persistence."
fi
fi
Comment on lines +16 to +36
Copy link

Copilot AI Apr 26, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In the FAT (/boot on vfat/fat/msdos) branch, the script only remounts with dmask/fmask. That change is not persistent across reboots, so the bootctl warning will likely return after reboot unless the restrictive masks are added to the /etc/fstab (or the systemd mount unit) options for /boot.

Copilot uses AI. Check for mistakes.

echo "Note: Add dmask=0077,fmask=0177 to /etc/fstab for persistence across reboots"
else
# Check if /boot is actually a separate mount
if findmnt -n --target /boot >/dev/null 2>&1; then
# Fix /boot directory permissions (should be 700 for security)
sudo chmod 700 /boot 2>/dev/null || echo "Warning: Could not change /boot permissions"

# Fix random-seed file permissions if it exists
if [[ -f /boot/loader/random-seed ]]; then
sudo chmod 600 /boot/loader/random-seed 2>/dev/null || echo "Warning: Could not change random-seed permissions"
fi

# Verify the fix
boot_perms=$(stat -c %a /boot 2>/dev/null)
if [[ "$boot_perms" == "700" ]]; then
echo "✓ /boot permissions fixed to 700"
fi
else
echo "/boot is not a separate mount (permissions handled by root filesystem)"
fi
fi

# Also run bootctl random-seed to regenerate with correct permissions
if command -v bootctl >/dev/null 2>&1; then
sudo bootctl random-seed 2>/dev/null || true
fi

# Guard notify-send for environments without GUI/DBUS
if command -v notify-send >/dev/null 2>&1 && [[ -n "${DBUS_SESSION_BUS_ADDRESS:-}" ]]; then
notify-send "Boot permissions fixed" "Security improvement applied to /boot" || true
fi

exit 0
33 changes: 33 additions & 0 deletions migrations/1777007501.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
#!/bin/bash

# Fix snapper root config for chroot installations
# See: https://github.com/basecamp/omarchy/issues/5344

echo "Fixing snapper root config..."

# Only proceed if snapper is available
if ! command -v snapper >/dev/null 2>&1; then
echo "snapper not installed, skipping"
exit 0
fi

# Ensure root config exists
if ! sudo snapper list-configs 2>/dev/null | grep -qE '^root[[:space:]]'; then
echo "Creating snapper config for root..."
sudo snapper -c root create-config / 2>/dev/null || true

# Copy default omarchy snapper config if available
if [[ -f "$OMARCHY_PATH/default/snapper/root" ]]; then
sudo cp "$OMARCHY_PATH/default/snapper/root" /etc/snapper/configs/root 2>/dev/null || true
fi

echo "✓ Created snapper root config"
else
echo "Snapper root config already exists"
fi

# Note: /home snapper config creation removed as it conflicts with
# migration 1776927490 which intentionally disables /home snapshots
# to prevent accidental user data rollback

echo "Snapper config fix complete!"
32 changes: 32 additions & 0 deletions migrations/1777007502.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
#!/bin/bash

# Update omarchy-snapshot to include /home exclusion warning
# See: https://github.com/basecamp/omarchy/issues/5361

echo "Updating omarchy-snapshot with /home exclusion warning..."

SOURCE_SNAPSHOT="$OMARCHY_PATH/bin/omarchy-snapshot"
TARGET_SNAPSHOT="/usr/local/bin/omarchy-snapshot"

if [[ ! -f "$SOURCE_SNAPSHOT" ]]; then
echo "Error: updated snapshot script not found at $SOURCE_SNAPSHOT"
exit 1
fi

if [[ ! -d "$(dirname "$TARGET_SNAPSHOT")" ]]; then
echo "Error: target directory $(dirname "$TARGET_SNAPSHOT") does not exist"
exit 1
fi

if ! sudo install -m 0755 "$SOURCE_SNAPSHOT" "$TARGET_SNAPSHOT" 2>/dev/null; then
echo "Error: failed to update $TARGET_SNAPSHOT"
exit 1
fi

if ! grep -q "will NOT be affected" "$TARGET_SNAPSHOT" 2>/dev/null; then
echo "Error: $TARGET_SNAPSHOT was updated, but the /home exclusion warning is still missing"
exit 1
fi

echo ""
echo "✓ Updated omarchy-snapshot with /home warning"
Loading