New architecture
This commit is contained in:
parent
e0ae3d219d
commit
d9fbe53af8
104 changed files with 597 additions and 1561 deletions
home
.chezmoiscripts
qubes
universal
.chezmoiscripts_disabled/disabled
.chezmoitemplates/secrets
dot_config
shell
timeshift
vpn
dot_local
bin
executable_backup-apps.tmplexecutable_backup-dconf.tmplexecutable_get-secretexecutable_installxexecutable_update-system
post-installx
executable_post-atuin.shexecutable_post-blocky.shexecutable_post-clamav.shexecutable_post-cloudflared.shexecutable_post-docker-desktop.shexecutable_post-easyengine.shexecutable_post-endlessh.shexecutable_post-fail2ban.shexecutable_post-fig.shexecutable_post-firefox.shexecutable_post-github-runner.shexecutable_post-keybase.shexecutable_post-mise.shexecutable_post-netdata.shexecutable_post-ntfy.shexecutable_post-plymouth.shexecutable_post-postfix.shexecutable_post-rkhunter.shexecutable_post-samba.shexecutable_post-sftpgo.shexecutable_post-tabby.shexecutable_post-tailscale.shexecutable_post-timeshift.shexecutable_post-tor.shexecutable_post-vim.shexecutable_post-virtualbox.shexecutable_post-vmware.shexecutable_post-vscode.shexecutable_post-warp.shexecutable_post-wireguard-tools.sh
etc
local
scripts
software.ymlsystem
Library/LaunchDaemons
etc
fail2ban
keybase
private_wireguard
encrypted_private_readonly_Mullvad WG Belgium (UDP 4888).conf.tmplencrypted_private_readonly_Mullvad WG Japan (UDP 4888).conf.tmplencrypted_private_readonly_Mullvad WG Moldova (UDP 53).conf.tmplencrypted_private_readonly_Mullvad WG Singapore (UDP 4888).conf.tmplencrypted_private_readonly_Proton WG Cambodia (UDP 51820).conf.tmplencrypted_private_readonly_Proton WG Colombia (UDP 51820).conf.tmplencrypted_private_readonly_Proton WG Cyprus (UDP 51820).conf.tmplencrypted_private_readonly_Proton WG Serbia (UDP 51820).conf.tmplencrypted_private_readonly_Proton WG Slovakia (UDP 51820).conf.tmpl
sftpgo
symlink_clamavsymlink_cloudflaredtimeshift
opt
homebrew/etc
quarantine
usr
lib/systemd/system
local
|
@ -93,7 +93,7 @@ else
|
|||
fi
|
||||
|
||||
wait
|
||||
logg success 'Finished installing TemplateVMs'
|
||||
gum log -sl info 'Finished installing TemplateVMs'
|
||||
|
||||
updateTemplates
|
||||
|
||||
|
|
|
@ -13,13 +13,13 @@ debianPasswordlessRoot() {
|
|||
gum log -sl info "Installing qubes-core-agent-passwordless-root on $1"
|
||||
qvm-run -u root "$1" apt-get update
|
||||
qvm-run -u root "$1" apt-get install -y qubes-core-agent-passwordless-root
|
||||
logg success "Successfully installed qubes-core-agent-passwordless-root on $1"
|
||||
gum log -sl info "Successfully installed qubes-core-agent-passwordless-root on $1"
|
||||
}
|
||||
|
||||
fedoraPasswordlessRoot() {
|
||||
gum log -sl info "Installing qubes-core-agent-passwordless-root on $1"
|
||||
qvm-run -u root "$1" dnf install -y qubes-core-agent-passwordless-root
|
||||
logg success "Successfully installed qubes-core-agent-passwordless-root on $1"
|
||||
gum log -sl info "Successfully installed qubes-core-agent-passwordless-root on $1"
|
||||
}
|
||||
|
||||
### Ensure Qubes minimal templates have passwordless sudo
|
||||
|
@ -42,5 +42,5 @@ for TEMPLATE of {{ .qubes.templates | toString | replace "[" "" | replace "]" ""
|
|||
done
|
||||
|
||||
wait
|
||||
logg success 'Finished installing qubes-core-agent-passwordless-root on minimal templates'
|
||||
gum log -sl info 'Finished installing qubes-core-agent-passwordless-root on minimal templates'
|
||||
{{ end -}}
|
||||
|
|
|
@ -67,14 +67,14 @@ applyLinuxThemeFiles() {
|
|||
for ITEM_TO_BE_REMOVED in "/usr/share/backgrounds/images" "/usr/share/backgrounds/f32" "/usr/share/backgrounds/qubes" "/usr/share/wallpapers"; do
|
||||
if [ -d "$ITEM_TO_BE_REMOVED" ] || [ -f "$ITEM_TO_BE_REMOVED" ]; then
|
||||
sudo rm -rf "$ITEM_TO_BE_REMOVED"
|
||||
logg success "Removed $ITEM_TO_BE_REMOVED"
|
||||
gum log -sl info "Removed $ITEM_TO_BE_REMOVED"
|
||||
fi
|
||||
done
|
||||
|
||||
### Ensure /usr/local/share exists
|
||||
if [ ! -d /usr/local/share ]; then
|
||||
sudo mkdir -p /usr/local/share
|
||||
logg success 'Created /usr/local/share'
|
||||
gum log -sl info 'Created /usr/local/share'
|
||||
fi
|
||||
|
||||
### Copy theme files over to /usr/local/share
|
||||
|
@ -122,7 +122,7 @@ applyLinuxThemeFiles() {
|
|||
### Set appropriate platform-specific icon in plymouth theme
|
||||
if [ -f '/usr/local/share/plymouth/themes/{{ .theme }}/icons/{{ .host.distro.id }}.png' ]; then
|
||||
sudo cp -f '/usr/local/share/plymouth/themes/{{ .theme }}/icons/{{ .host.distro.id }}.png' '/usr/local/share/plymouth/themes/{{ .theme }}/icon.png'
|
||||
logg success 'Added platform-specific icon to {{ .theme }} Plymouth theme'
|
||||
gum log -sl info 'Added platform-specific icon to {{ .theme }} Plymouth theme'
|
||||
else
|
||||
gum log -sl warn 'The {{ .host.distro.id }}.png icon is not available in the icons folder insider the {{ .theme }} Plymouth theme'
|
||||
fi
|
||||
|
@ -396,7 +396,7 @@ dconfSettings() {
|
|||
fi
|
||||
gum log -sl info 'Loading versioned dconf settings for '"$DCONF_SETTINGS_ID"''
|
||||
dconf load "$DCONF_SETTINGS_ID" < "$DCONF_CONFIG_FILE"
|
||||
logg success 'Finished applying dconf settings for '"$DCONF_SETTINGS_ID"''
|
||||
gum log -sl info 'Finished applying dconf settings for '"$DCONF_SETTINGS_ID"''
|
||||
done
|
||||
else
|
||||
gum log -sl warn '~/.config/dconf/settings does not exist!'
|
||||
|
@ -570,7 +570,7 @@ gnomeExtensionSettings() {
|
|||
cd /tmp
|
||||
install-gnome-extensions --enable --overwrite --file /tmp/install-gnome-extensions.txt
|
||||
rm -f /tmp/install-gnome-extensions.txt
|
||||
logg success 'Finished installing the GNOME extensions'
|
||||
gum log -sl info 'Finished installing the GNOME extensions'
|
||||
else
|
||||
gum log -sl info 'No new GNOME extensions to install'
|
||||
fi
|
||||
|
@ -608,7 +608,7 @@ gnomeExtensionSettings() {
|
|||
echo "$EXT_SETTINGS"
|
||||
eval "$EXT_SETTINGS"
|
||||
fi
|
||||
logg success 'Applied gsettings configuration for the '"$EXT_ID"' GNOME extension'
|
||||
gum log -sl info 'Applied gsettings configuration for the '"$EXT_ID"' GNOME extension'
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
@ -622,19 +622,19 @@ grubSettings() {
|
|||
### Fix Qubes issue
|
||||
if command -v qubesctl > /dev/null && [ -f /boot/grub2/grubenv ] && [ -d /boot/efi/EFI/qubes ]; then
|
||||
sudo cp -f /boot/grub2/grubenv /boot/efi/EFI/qubes/grubenv
|
||||
logg success 'Copied /boot/grub2/grubenv to /boot/efi/EFI/qubes/grubenv'
|
||||
gum log -sl info 'Copied /boot/grub2/grubenv to /boot/efi/EFI/qubes/grubenv'
|
||||
fi
|
||||
|
||||
### Ensure /boot/grub2/themes is directory
|
||||
if [ ! -d /boot/grub2/themes ]; then
|
||||
sudo mkdir -p /boot/grub2/themes
|
||||
logg success 'Created /boot/grub2/themes'
|
||||
gum log -sl info 'Created /boot/grub2/themes'
|
||||
fi
|
||||
|
||||
### Copy GRUB theme to /boot/grub2/themes
|
||||
if [ -d /usr/local/share/grub/themes ]; then
|
||||
sudo cp -rf /usr/local/share/grub/themes /boot/grub2/
|
||||
logg success 'Copied GRUB themes in /usr/local/share/grub/themes to /boot/grub2/themes'
|
||||
gum log -sl info 'Copied GRUB themes in /usr/local/share/grub/themes to /boot/grub2/themes'
|
||||
else
|
||||
gum log -sl warn '/usr/local/share/grub/themes is missing'
|
||||
fi
|
||||
|
@ -649,7 +649,7 @@ grubSettings() {
|
|||
SCREEN_HEIGHT="$(xrandr --current | grep '*' | uniq | awk '{print $1}' | cut -d 'x' -f2)"
|
||||
SCREEN_RATIO="$(awk -v height="$SCREEN_HEIGHT" -v width="$SCREEN_WIDTH" 'BEGIN { print ((height / width) * 1000) }')"
|
||||
SCREEN_RATIO="${SCREEN_RATIO%.*}"
|
||||
logg success "Screen detected as $SCREEN_WIDTH x $SCREEN_HEIGHT (ratio of $SCREEN_RATIO)"
|
||||
gum log -sl info "Screen detected as $SCREEN_WIDTH x $SCREEN_HEIGHT (ratio of $SCREEN_RATIO)"
|
||||
if (( $(echo "$SCREEN_RATIO $SCREEN_RATIO_ULTRAWIDE" | awk '{print ($1 > $2)}') )); then
|
||||
GRUB_RESOLUTION_TYPE="ultrawide"
|
||||
gum log -sl info 'GRUB resolution registered as ultrawide'
|
||||
|
@ -729,7 +729,7 @@ grubSettings() {
|
|||
# Check looks in /usr/local/share/grub because on some systems the /boot folder is behind permissions for non-root users
|
||||
if [ -f "/usr/local/share/grub/themes/{{ .theme }}-$GRUB_RESOLUTION_TYPE/icons/$GRUB_ICON.png" ]; then
|
||||
sudo cp -f /boot/grub2/themes/{{ .theme }}-$GRUB_RESOLUTION_TYPE/icons/$GRUB_ICON.png /boot/grub2/themes/{{ .theme }}-$GRUB_RESOLUTION_TYPE/icon.png
|
||||
logg success 'Copied platform-specific icon to GRUB2 theme folder'
|
||||
gum log -sl info 'Copied platform-specific icon to GRUB2 theme folder'
|
||||
else
|
||||
gum log -sl warn "/boot/grub2/themes/{{ .theme }}-$GRUB_RESOLUTION_TYPE/icons/$GRUB_ICON.png is missing"
|
||||
fi
|
||||
|
@ -771,11 +771,11 @@ grubSettings() {
|
|||
if [ -f /boot/efi/EFI/qubes/grub.cfg ]; then
|
||||
gum log -sl info 'Running sudo grub2-mkconfig -o /boot/efi/EFI/qubes/grub.cfg'
|
||||
sudo grub2-mkconfig -o /boot/efi/EFI/qubes/grub.cfg
|
||||
logg success 'Applied GRUB2 theme'
|
||||
gum log -sl info 'Applied GRUB2 theme'
|
||||
elif [ -f /boot/efi/EFI/grub.cfg ]; then
|
||||
gum log -sl info 'Running sudo grub2-mkconfig -o /boot/efi/EFI/grub.cfg'
|
||||
sudo grub2-mkconfig -o /boot/efi/EFI/grub.cfg
|
||||
logg success 'Applied GRUB2 theme'
|
||||
gum log -sl info 'Applied GRUB2 theme'
|
||||
else
|
||||
gum log -sl warn 'Unknown GRUB2 configuration - not applying GRUB2 theme'
|
||||
fi
|
||||
|
@ -783,7 +783,7 @@ grubSettings() {
|
|||
gum log -sl info 'Assuming system is non-UEFI since /sys/firmware/efi is not present'
|
||||
gum log -sl info 'Running sudo grub2-mkconfig -o /boot/grub2/grub.cfg'
|
||||
sudo grub2-mkconfig -o /boot/grub2/grub.cfg
|
||||
logg success 'Applied GRUB2 theme'
|
||||
gum log -sl info 'Applied GRUB2 theme'
|
||||
fi
|
||||
elif [ -f /usr/sbin/update-grub ]; then
|
||||
gum log -sl info 'Running sudo update-grub'
|
||||
|
|
|
@ -423,4 +423,4 @@ else
|
|||
wait
|
||||
fi
|
||||
|
||||
logg success 'Finished running the post-install tasks'
|
||||
gum log -sl info 'Finished running the post-install tasks'
|
||||
|
|
|
@ -61,4 +61,4 @@ cleanAptGet &
|
|||
cleanupBrew &
|
||||
wait
|
||||
|
||||
logg success 'Finished cleanup process'
|
||||
gum log -sl info 'Finished cleanup process'
|
||||
|
|
|
@ -67,7 +67,7 @@ ensureFullDiskAccess() {
|
|||
fi
|
||||
exit 0
|
||||
else
|
||||
logg success 'Current terminal has full disk access'
|
||||
gum log -sl info 'Current terminal has full disk access'
|
||||
if [ -f "$HOME/.zshrc" ]; then
|
||||
if command -v gsed > /dev/null; then
|
||||
sudo gsed -i '/# TEMPORARY FOR INSTALL DOCTOR MACOS/d' "$HOME/.zshrc" || gum log -sl warn "Failed to remove kickstart script from .zshrc"
|
||||
|
@ -98,7 +98,7 @@ importCloudFlareCert() {
|
|||
security verify-cert -c "$CRT_TMP" > /dev/null 2>&1
|
||||
if [ $? != 0 ]; then
|
||||
gum log -sl info '**macOS Manual Security Permission** Requesting security authorization for Cloudflare trusted certificate'
|
||||
sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain "$CRT_TMP" && logg success 'Successfully imported Cloudflare_CA.crt into System.keychain'
|
||||
sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain "$CRT_TMP" && gum log -sl info 'Successfully imported Cloudflare_CA.crt into System.keychain'
|
||||
fi
|
||||
|
||||
### Remove temporary file, if necessary
|
||||
|
|
|
@ -71,7 +71,7 @@ decryptKey() {
|
|||
if [ -n "$EXIT_CODE" ]; then
|
||||
decryptionFailure
|
||||
else
|
||||
logg success 'The encryption key was successfully decrypted'
|
||||
gum log -sl info 'The encryption key was successfully decrypted'
|
||||
fi
|
||||
else
|
||||
installExpect
|
||||
|
|
|
@ -91,7 +91,7 @@ configureGPG() {
|
|||
if ! gpg --list-secret-keys --keyid-format=long | grep "$KEYID_TRIMMED" > /dev/null; then
|
||||
if [ -f "${XDG_DATA_HOME:-$HOME/.local/share}/chezmoi/home/private_dot_gnupg/private_public/private_${KEYID}.asc" ]; then
|
||||
gum log -sl info "Importing GPG key stored in ${XDG_DATA_HOME:-$HOME/.local/share}/chezmoi/home/private_dot_gnupg/private_public/private_${KEYID}.asc since its name matches the GPG key ID in .chezmoi.yaml.tmpl"
|
||||
gpg --import "${XDG_DATA_HOME:-$HOME/.local/share}/chezmoi/home/private_dot_gnupg/private_public/private_${KEYID}.asc" && logg success 'Successfully imported master GPG key'
|
||||
gpg --import "${XDG_DATA_HOME:-$HOME/.local/share}/chezmoi/home/private_dot_gnupg/private_public/private_${KEYID}.asc" && gum log -sl info 'Successfully imported master GPG key'
|
||||
else
|
||||
gum log -sl info 'Attempting to download the specified public GPG key ({{ .user.gpg.id }}) from public keyservers'
|
||||
gpg --keyserver https://pgp.mit.edu --recv "$KEYID" || EXIT_CODE=$?
|
||||
|
@ -101,7 +101,7 @@ configureGPG() {
|
|||
if [ -n "$EXIT_CODE" ]; then
|
||||
gum log -sl info 'Non-zero exit code received when trying to retrieve public user GPG key on hkps://pgp.mit.edu'
|
||||
else
|
||||
logg success 'Successfully imported configured public user GPG key'
|
||||
gum log -sl info 'Successfully imported configured public user GPG key'
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
@ -424,13 +424,13 @@ installDocker() {
|
|||
if [ -n "$SOURCE_EXIT_CODE" ]; then
|
||||
gum log -sl error 'All gVisor installation methods failed' && exit 1
|
||||
else
|
||||
logg success 'gVisor installed via source'
|
||||
gum log -sl info 'gVisor installed via source'
|
||||
fi
|
||||
else
|
||||
logg success 'gVisor installed via Go fallback method'
|
||||
gum log -sl info 'gVisor installed via Go fallback method'
|
||||
fi
|
||||
else
|
||||
logg success 'gVisor installed from pre-built Google-provided binaries'
|
||||
gum log -sl info 'gVisor installed from pre-built Google-provided binaries'
|
||||
fi
|
||||
else
|
||||
gum log -sl info 'runsc is installed'
|
||||
|
@ -531,27 +531,27 @@ removeLinuxBloatware() {
|
|||
elif command -v apt-get > /dev/null; then
|
||||
if dpkg -l "$PKG" | grep -E '^ii' > /dev/null; then
|
||||
sudo apt-get remove -y "$PKG"
|
||||
logg success 'Removed '"$PKG"' via apt-get'
|
||||
gum log -sl info 'Removed '"$PKG"' via apt-get'
|
||||
fi
|
||||
elif command -v dnf > /dev/null; then
|
||||
if rpm -qa | grep "$PKG" > /dev/null; then
|
||||
sudo dnf remove -y "$PKG"
|
||||
logg success 'Removed '"$PKG"' via dnf'
|
||||
gum log -sl info 'Removed '"$PKG"' via dnf'
|
||||
fi
|
||||
elif command -v yum > /dev/null; then
|
||||
if rpm -qa | grep "$PKG" > /dev/null; then
|
||||
sudo yum remove -y "$PKG"
|
||||
logg success 'Removed '"$PKG"' via yum'
|
||||
gum log -sl info 'Removed '"$PKG"' via yum'
|
||||
fi
|
||||
elif command -v pacman > /dev/null; then
|
||||
if pacman -Qs "$PKG" > /dev/null; then
|
||||
sudo pacman -R "$PKG"
|
||||
logg success 'Removed '"$PKG"' via pacman'
|
||||
gum log -sl info 'Removed '"$PKG"' via pacman'
|
||||
fi
|
||||
elif command -v zypper > /dev/null; then
|
||||
if rpm -qa | grep "$PKG" > /dev/null; then
|
||||
sudo zypper remove -y "$PKG"
|
||||
logg success 'Removed '"$PKG"' via zypper'
|
||||
gum log -sl info 'Removed '"$PKG"' via zypper'
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
@ -579,10 +579,10 @@ setHostname() {
|
|||
sudo dscacheutil -flushcache
|
||||
elif [ -f /etc/passwd ]; then
|
||||
gum log -sl info 'Setting Linux hostname'
|
||||
sudo hostname "$HOSTNAME" && logg success "Changed hostname to $HOSTNAME"
|
||||
sudo hostname "$HOSTNAME" && gum log -sl info "Changed hostname to $HOSTNAME"
|
||||
if command -v hostnamectl > /dev/null; then
|
||||
gum log -sl info 'Ensuring hostname persists after reboot'
|
||||
sudo hostnamectl set-hostname "$HOSTNAME" && logg success "Permanently changed hostname to $HOSTNAME"
|
||||
sudo hostnamectl set-hostname "$HOSTNAME" && gum log -sl info "Permanently changed hostname to $HOSTNAME"
|
||||
else
|
||||
gum log -sl warn 'hostnamectl was not available in the PATH - this operating system type might be unsupported'
|
||||
fi
|
||||
|
@ -677,7 +677,7 @@ installBrewPackages() {
|
|||
ensureBrewPackageInstalled "zx"
|
||||
ensureBrewPackageInstalled "whalebrew"
|
||||
wait
|
||||
logg success 'Finished installing auxilary Homebrew packages'
|
||||
gum log -sl info 'Finished installing auxilary Homebrew packages'
|
||||
gum log -sl info 'Ensuring Ansible is installed (with plugins)' && installAnsible
|
||||
}
|
||||
|
||||
|
@ -779,7 +779,7 @@ installXcode() {
|
|||
gum log -sl error 'Failed to install Xcode'
|
||||
fi
|
||||
else
|
||||
logg success 'Xcode is already installed'
|
||||
gum log -sl info 'Xcode is already installed'
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
@ -857,4 +857,4 @@ else
|
|||
wait
|
||||
fi
|
||||
|
||||
logg success 'Successfully applied preliminary system tweaks'
|
||||
gum log -sl info 'Successfully applied preliminary system tweaks'
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
# @file VSCodium Extension Pre-Installation
|
||||
# @brief This script pre-installs the extensions contained in ~/.config/Code/User/extensions.json
|
||||
|
||||
export NODE_OPTIONS=--throw-deprecation
|
||||
|
||||
# @description Check for the presence of the `codium` command in the `PATH` and install extensions for VSCodium if it is present
|
||||
if command -v codium > /dev/null; then
|
||||
EXTENSIONS="$(codium --list-extensions)"
|
||||
jq -r '.recommendations[]' "${XDG_CONFIG_HOME:-$HOME/.config}/Code/User/extensions.json" | while read EXTENSION; do
|
||||
if ! echo "$EXTENSIONS" | grep -iF "$EXTENSION" > /dev/null; then
|
||||
gum log -sl info 'Installing VSCodium extension '"$EXTENSION"'' && codium --install-extension "$EXTENSION" && logg success 'Installed '"$EXTENSION"''
|
||||
else
|
||||
gum log -sl info ''"$EXTENSION"' already installed'
|
||||
fi
|
||||
done
|
||||
else
|
||||
gum log -sl info 'codium executable not available - skipping plugin install process for it'
|
||||
fi
|
|
@ -1,19 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
# @file Firewall Configuration
|
||||
# @brief Configures the firewall (built-in for macOS and firewall-cmd / ufw for Linux)
|
||||
# @description
|
||||
# This script configures the firewall for macOS / Linux.
|
||||
|
||||
{{- includeTemplate "universal/profile" }}
|
||||
{{- includeTemplate "universal/logg" }}
|
||||
|
||||
if [ -d /Applications ] && [ -d /System ]; then
|
||||
# System is macOS
|
||||
gum log -sl info 'Enabling macOS firewall'
|
||||
elif command -v firewall-cmd > /dev/null; then
|
||||
# System is Linux and has firewall-cmd present in PATH
|
||||
gum log -sl info 'Setting up firewall using firewall-cmd'
|
||||
elif command -v ufw > /dev/null; then
|
||||
# System is Linux and has ufw present in PATH
|
||||
gum log -sl info 'Setting up firewall using ufw'
|
||||
fi
|
|
@ -1,7 +0,0 @@
|
|||
-----BEGIN AGE ENCRYPTED FILE-----
|
||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBiWUl2YTRURTd4MHQ0ZWdi
|
||||
eEtVT1liTm5ZbExtM0VFUVk1cGdkUDNQUlVJClFjOEs4OURvS0IxRzdpZ0R5VUVh
|
||||
Z3FZN0l4QlFWQnBMQjhRaDFDNEhUVkkKLS0tIG9KNkxiejNma3JRWS9hMEhNcDRB
|
||||
YXRwZUlyWWVFVy9qRm5tUXJ0YUFmZTgKNT2XOgMxVxf/B+ofbjjB1ua8siR7k80R
|
||||
A8xdSfKb0G8rIqfbLTMXcVH1OPmzMUyvw/wSsK+Cr9CELTAkx/aJakh2H3p/7o4=
|
||||
-----END AGE ENCRYPTED FILE-----
|
|
@ -1,7 +0,0 @@
|
|||
-----BEGIN AGE ENCRYPTED FILE-----
|
||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBDb2ROdUlLOXFQbXZRYzZO
|
||||
dWFTcG5PT2VxUXJQcGlnb2tDRkJNaHRtSFh3CmNmeWFBSVdOU0RWRW5MbFBRZm5y
|
||||
OHdhUzM5bFRaWkNTbi9lQnpTUFNRZ28KLS0tIHFwaXFXbmlkZjZMbnN0YW9oK2FE
|
||||
aE9ySjRhUk05WFdVOFlic2NHUDkzL28K5akbLaJAm/eGNgO1DAgqqXxyg7+JxXN7
|
||||
RUq8jR40j0fJheNk9KZOTEls1Zp998FBZB2fn/l0ugmO
|
||||
-----END AGE ENCRYPTED FILE-----
|
|
@ -1,7 +0,0 @@
|
|||
-----BEGIN AGE ENCRYPTED FILE-----
|
||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBxSnBLOVAwSUR2R29KdTU1
|
||||
NTNKK3EydkNZWXgzSDU2czBKWUhQYkpJS1E4CmNpT1JsT0Y5dW82UFpnYlBDeDJD
|
||||
Umo5NWdwQlZwdjhkMmduMzBJbmhEQ0EKLS0tIHlCTEhzeUFxbm5WcWFmZzk2OGN3
|
||||
MXBhbjZsWE1Hd0hGMjhZWGNSNm1XeUkKosMtXcchDa6R6iWTw1a0HDTKBPf0V32X
|
||||
tfMES6PplpobOE5nue9iilDL5KBIc/VimC/Vom0o192t
|
||||
-----END AGE ENCRYPTED FILE-----
|
|
@ -1,7 +0,0 @@
|
|||
-----BEGIN AGE ENCRYPTED FILE-----
|
||||
YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBCMHVqVW4rZlJXZm1ZWkc4
|
||||
VzF0cE9FMjZ4UEwwcXFJenB2ajlUdml0QjI0CjlUREthY3F3U01RQmMvMzBKdkta
|
||||
NGxsbXU2TTFCN3NpOExRQ0N1Z2tSTTAKLS0tIG1KSjJKN2Z2KzBuQlFUZkJJMWEr
|
||||
VUtpOWpISmg0UlVKM0ptckVhNjdaZFUKme/YLpo52H8/FtOIKytsqrcQ6f8MsJzV
|
||||
T01srwUC8pB4TXh6a/TUp1ECJzPmoxPIEFzbUdN+JE2I
|
||||
-----END AGE ENCRYPTED FILE-----
|
|
@ -925,6 +925,6 @@ defaults write com.tapbots.TweetbotMac OpenURLsDirectly -bool true
|
|||
# Set Drift as default screen saver
|
||||
defaults -currentHost write com.apple.screensaver moduleDict -dict moduleName Brooklyn path "/System/Library/Screen Savers/Drift.saver"
|
||||
|
||||
logg success 'Done applying macOS settings'
|
||||
gum log -sl info 'Done applying macOS settings'
|
||||
gum log -sl info 'Some of these changes may require a logout/restart to take effect'
|
||||
{{ end -}}
|
||||
|
|
|
@ -1,22 +0,0 @@
|
|||
{
|
||||
"backup_device_uuid": "",
|
||||
"btrfs_mode": "true",
|
||||
"count_boot": "0",
|
||||
"count_daily": "4",
|
||||
"count_hourly": "8",
|
||||
"count_monthly": "0",
|
||||
"count_weekly": "2",
|
||||
"do_first_run": "true",
|
||||
"exclude": [],
|
||||
"exclude-apps": [],
|
||||
"include_btrfs_home": "false",
|
||||
"parent_device_uuid": "",
|
||||
"schedule_boot": "false",
|
||||
"schedule_daily": "false",
|
||||
"schedule_hourly": "false",
|
||||
"schedule_monthly": "false",
|
||||
"schedule_weekly": "false",
|
||||
"snapshot_count": "0",
|
||||
"snapshot_size": "0",
|
||||
"stop_cron_emails": "true"
|
||||
}
|
|
@ -32,7 +32,7 @@ if command -v mackup > /dev/null; then
|
|||
rm -f ~/.mackup.cfg
|
||||
|
||||
### Print success message
|
||||
logg success 'Successfully ran backup-apps'
|
||||
gum log -sl info 'Successfully ran backup-apps'
|
||||
else
|
||||
gum log -sl error 'mackup is not installed' && exit 1
|
||||
fi
|
||||
|
|
|
@ -19,7 +19,7 @@ find "${XDG_CONFIG_HOME:-$HOME/.config}/dconf/settings" -mindepth 1 -maxdepth 1
|
|||
DCONF_SETTINGS_ID="/$(basename "$DCONF_CONFIG_FILE" | sed 's/\./\//g')/"
|
||||
gum log -sl info 'Dumping '"$DCONF_SETTINGS_ID"' to '"$DCONF_CONFIG_FILE"
|
||||
dconf dump "$DCONF_SETTINGS_ID" > "$DCONF_CONFIG_FILE"
|
||||
logg success 'Saved new configuration to '"$DCONF_CONFIG_FILE"
|
||||
gum log -sl info 'Saved new configuration to '"$DCONF_CONFIG_FILE"
|
||||
gum log -sl info 'Printing diff for '"$DCONF_CONFIG_FILE"
|
||||
chezmoi diff "$DCONF_CONFIG_FILE" || true
|
||||
done
|
||||
|
|
|
@ -35,7 +35,7 @@ if (customArgv.exists && !customArgv._.length) {
|
|||
if (fileExists) {
|
||||
getChezmoiSecret(secretPath)
|
||||
} else {
|
||||
console.error(`The file ${os.homedir()}/.local/share/chezmoi/home/.chezmoitemplates/secrets/${secretName} does not exist!`)
|
||||
console.error(`The file ${secretPath} does not exist!`)
|
||||
process.exit(1)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
import osInfo from 'linux-os-info'
|
||||
|
||||
$.verbose = false
|
||||
// Preserves color from subshells
|
||||
process.env.CLICOLOR_FORCE = 3
|
||||
|
||||
let installOrder, osArch, osId, osType, pkgs, sysType
|
||||
const cacheDir = os.homedir() + '/.cache/installx'
|
||||
|
@ -16,7 +18,7 @@ const customArgv = minimist(process.argv.slice(3), {
|
|||
})
|
||||
|
||||
function log(message) {
|
||||
console.log(`${chalk.cyanBright('instx->')} ${message}`)
|
||||
console.log(`${chalk.greenBright.bold('installx ❯')} ${message}`)
|
||||
}
|
||||
|
||||
async function getOsInfo() {
|
||||
|
|
|
@ -100,4 +100,4 @@ else
|
|||
wait
|
||||
fi
|
||||
|
||||
logg success 'Finished running update-system'
|
||||
gum log -sl info 'Finished running update-system'
|
||||
|
|
|
@ -8,7 +8,7 @@ trap "gum log -sl error 'Script encountered an error!'" ERR
|
|||
if command -v atuin > /dev/null; then
|
||||
if get-secret --exists ATUIN_USERNAME ATUIN_EMAIL ATUIN_PASSWORD ATUIN_KEY; then
|
||||
gum log -sl info 'Registering Atuin account'
|
||||
atuin register -u "$(get-secret ATUIN_USERNAME)" -e "$(get-secret ATUIN_EMAIL)" -p "$(get-secret ATUIN_PASSWORD)"
|
||||
atuin register -u "$(get-secret ATUIN_USERNAME)" -e "$(get-secret ATUIN_EMAIL)" -p "$(get-secret ATUIN_PASSWORD)" || true
|
||||
gum log -sl info 'Logging into Atuin account'
|
||||
atuin login -u "$(get-secret ATUIN_USERNAME)" -p "$(get-secret ATUIN_PASSWORD)" -k "$(get-secret ATUIN_KEY)"
|
||||
gum log -sl info 'Running atuin import auto'
|
||||
|
|
|
@ -1,35 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
# @file Blocky Configuration
|
||||
# @brief Copies over configuration (and service file, in the case of Linux) to the appropriate system location
|
||||
|
||||
set -Eeuo pipefail
|
||||
trap "gum log -sl error 'Script encountered an error!'" ERR
|
||||
|
||||
if command -v blocky > /dev/null; then
|
||||
if [ -d /Applications ] && [ -d /System ]; then
|
||||
### macOS
|
||||
if [ -f "$HOME/.local/etc/blocky/config.yaml" ]; then
|
||||
gum log -sl info 'Ensuring /usr/local/etc/blocky directory is present'
|
||||
sudo mkdir -p /usr/local/etc/blocky
|
||||
gum log -sl info "Copying $HOME/.local/etc/blocky/config.yaml to /usr/local/etc/blocky/config.yaml"
|
||||
sudo cp -f "$HOME/.local/etc/blocky/config.yaml" /usr/local/etc/blocky/config.yaml
|
||||
if [ -d "${HOMEBREW_PREFIX:-/opt/homebrew}/etc/blocky" ] && [ ! -f "${HOMEBREW_PREFIX:-/opt/homebrew}/etc/blocky/config.yaml" ]; then
|
||||
gum log -sl info "Symlinking $HOME/.local/etc/blocky/config.yaml to ${HOMEBREW_PREFIX:-/opt/homebrew}/etc/blocky/config.yaml"
|
||||
ln -s /usr/local/etc/blocky/config.yaml "${HOMEBREW_PREFIX:-/opt/homebrew}/etc/blocky/config.yaml"
|
||||
fi
|
||||
fi
|
||||
else
|
||||
### Linux
|
||||
gum log -sl info 'Ensuring /usr/local/etc/blocky is created'
|
||||
sudo mkdir -p /usr/local/etc/blocky
|
||||
sudo cp -f "$HOME/.local/etc/blocky/config.yaml" /usr/local/etc/blocky/config.yaml
|
||||
if [ -d /usr/lib/systemd/system ]; then
|
||||
gum log -sl info 'Copying blocky service file to system locations'
|
||||
sudo cp -f "$HOME/.local/etc/blocky/blocky.service" /usr/lib/systemd/system/blocky.service
|
||||
else
|
||||
logg "/usr/lib/systemd/system is missing from the file system"
|
||||
fi
|
||||
fi
|
||||
else
|
||||
gum log -sl info 'Blocky is not available in the PATH'
|
||||
fi
|
|
@ -1,43 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
# @file ClamAV Configuration
|
||||
# @brief Applies ClamAV configuration, updates its database, and configures background services
|
||||
|
||||
set -Eeuo pipefail
|
||||
trap "gum log -sl error 'Script encountered an error!'" ERR
|
||||
|
||||
if command -v freshclam > /dev/null; then
|
||||
### Add freshclam.conf
|
||||
if [ -f "$HOME/.local/etc/clamav/freshclam.conf" ]; then
|
||||
sudo mkdir -p /usr/local/etc/clamav
|
||||
sudo cp -f "$HOME/.local/etc/clamav/freshclam.conf" /usr/local/etc/clamav/freshclam.conf
|
||||
if [ -d "${HOMEBREW_PREFIX:-/opt/homebrew}/etc/clamav" ] && [ ! -L "${HOMEBREW_PREFIX:-/opt/homebrew}/etc/clamav/freshclam.conf" ]; then
|
||||
sudo rm -f "${HOMEBREW_PREFIX:-/opt/homebrew}/etc/clamav/freshclam.conf"
|
||||
ln -s /usr/local/etc/clamav/freshclam.conf "${HOMEBREW_PREFIX:-/opt/homebrew}/etc/clamav/freshclam.conf"
|
||||
fi
|
||||
fi
|
||||
|
||||
### Add clamd.conf
|
||||
if [ -f "$HOME/.local/etc/clamav/clamd.conf" ]; then
|
||||
sudo mkdir -p /usr/local/etc/clamav
|
||||
sudo cp -f "$HOME/.local/etc/clamav/clamd.conf" /usr/local/etc/clamav/clamd.conf
|
||||
if [ -d "${HOMEBREW_PREFIX:-/opt/homebrew}/etc/clamav" ] && [ ! -L "${HOMEBREW_PREFIX:-/opt/homebrew}/etc/clamav/clamd.conf" ]; then
|
||||
sudo rm -f "${HOMEBREW_PREFIX:-/opt/homebrew}/etc/clamav/clamd.conf"
|
||||
ln -s /usr/local/etc/clamav/clamd.conf "${HOMEBREW_PREFIX:-/opt/homebrew}/etc/clamav/clamd.conf"
|
||||
fi
|
||||
fi
|
||||
|
||||
### Setting up launchd services on macOS
|
||||
if [ -d /Applications ] && [ -d /System ]; then
|
||||
### clamav.clamdscan
|
||||
load-service clamav.clamdscan
|
||||
|
||||
### clamav.freshclam
|
||||
load-service clamav.freshclam
|
||||
fi
|
||||
|
||||
### Update database
|
||||
gum log -sl info 'Running freshclam to update database'
|
||||
freshclam
|
||||
else
|
||||
gum log -sl info 'freshclam is not available in the PATH'
|
||||
fi
|
|
@ -8,17 +8,12 @@ set -Eeuo pipefail
|
|||
trap "gum log -sl error 'Script encountered an error!'" ERR
|
||||
|
||||
if command -v cloudflared > /dev/null; then
|
||||
# Show warning message about ~/.cloudflared already existing
|
||||
### Show error message about ~/.cloudflared already existing
|
||||
if [ -d "$HOME/.cloudflared" ]; then
|
||||
gum log -sl warn '~/.cloudflared is already in the home directory - to ensure proper deployment, remove previous tunnel configuration folders'
|
||||
gum log -sl error '~/.cloudflared is already in the home directory - to ensure proper deployment, remove the ~/.cloudflared configuration folder' && exit 1
|
||||
fi
|
||||
|
||||
# Copy over configuration files
|
||||
gum log -sl info 'Ensuring /usr/local/etc/cloudflared exists' && sudo mkdir -p /usr/local/etc/cloudflared
|
||||
gum log -sl info 'Copying over configuration files from ~/.local/etc/cloudflared to /usr/local/etc/cloudflared'
|
||||
sudo cp -f "$HOME/.local/etc/cloudflared/cert.pem" /usr/local/etc/cloudflared/cert.pem
|
||||
sudo cp -f "$HOME/.local/etc/cloudflared/config.yml" /usr/local/etc/cloudflared/config.yml
|
||||
|
||||
### Use lowercased hostname / tunnel ID
|
||||
HOSTNAME_LOWER="host-$(hostname -s | tr '[:upper:]' '[:lower:]')"
|
||||
|
||||
### Remove previous tunnels connected to host
|
||||
|
@ -30,16 +25,13 @@ if command -v cloudflared > /dev/null; then
|
|||
gum log -sl info "Removing credentials for $TUNNEL_ID which is not in use"
|
||||
sudo rm -f "/usr/local/etc/cloudflared/${TUNNEL_ID}.json"
|
||||
else
|
||||
logg success "Skipping deletion of $TUNNEL_ID credentials since it is in use"
|
||||
gum log -sl info "Skipping deletion of $TUNNEL_ID credentials since it is in use"
|
||||
fi
|
||||
done< <(sudo cloudflared tunnel list | grep "$HOSTNAME_LOWER" | sed 's/ .*//')
|
||||
|
||||
### Register tunnel (if not already registered)
|
||||
gum log -sl info "Creating CloudFlared tunnel named "$HOSTNAME_LOWER""
|
||||
sudo cloudflared tunnel create "$HOSTNAME_LOWER" || EXIT_CODE=$?
|
||||
if [ -n "${EXIT_CODE:-}" ]; then
|
||||
gum log -sl info 'Failed to create tunnel - it probably already exists'
|
||||
fi
|
||||
sudo cloudflared tunnel create "$HOSTNAME_LOWER" || true
|
||||
|
||||
### Acquire TUNNEL_ID and symlink credentials.json
|
||||
TUNNEL_ID="$(sudo cloudflared tunnel list | grep "$HOSTNAME_LOWER" | sed 's/ .*//')"
|
||||
|
@ -48,24 +40,15 @@ if command -v cloudflared > /dev/null; then
|
|||
sudo rm -f /usr/local/etc/cloudflared/credentials.json
|
||||
sudo ln -s /usr/local/etc/cloudflared/$TUNNEL_ID.json /usr/local/etc/cloudflared/credentials.json
|
||||
|
||||
### Symlink /usr/local/etc/cloudflared to /etc/cloudflared
|
||||
if [ ! -d /etc/cloudflared ]; then
|
||||
gum log -sl info 'Symlinking /usr/local/etc/cloudflared to /etc/cloudflared'
|
||||
sudo ln -s /usr/local/etc/cloudflared /etc/cloudflared
|
||||
else
|
||||
if [ ! -L /etc/cloudflared ]; then
|
||||
gum log -sl warn '/etc/cloudflared is present as a regular directory (not symlinked) but files are being modified in /usr/local/etc/cloudflared'
|
||||
fi
|
||||
fi
|
||||
|
||||
### Configure DNS
|
||||
# Must be deleted manually if no longer used
|
||||
### Note: The DNS records that are added via cloudflared must be deleted manually if no longer used
|
||||
gum log -sl info 'Setting up DNS records for CloudFlare Argo tunnels'
|
||||
while read DOMAIN; do
|
||||
if [ "$DOMAIN" != 'null' ]; then
|
||||
gum log -sl info "Setting up $DOMAIN for access through cloudflared (Tunnel ID: $TUNNEL_ID)"
|
||||
gum log -sl info "Running sudo cloudflared tunnel route dns -f "$TUNNEL_ID" "$DOMAIN""
|
||||
sudo cloudflared tunnel route dns -f "$TUNNEL_ID" "$DOMAIN" && logg success "Successfully routed $DOMAIN to this machine's cloudflared Argo tunnel"
|
||||
sudo cloudflared tunnel route dns -f "$TUNNEL_ID" "$DOMAIN"
|
||||
gum log -sl info "Successfully routed $DOMAIN to this machine's cloudflared Argo tunnel"
|
||||
fi
|
||||
done< <(yq '.ingress[].hostname' /usr/local/etc/cloudflared/config.yml)
|
||||
|
||||
|
@ -82,14 +65,9 @@ if command -v cloudflared > /dev/null; then
|
|||
gum log -sl info 'Running sudo cloudflared service install'
|
||||
sudo cloudflared service install
|
||||
fi
|
||||
sudo cp -f "$HOME/Library/LaunchDaemons/com.cloudflare.cloudflared.plist" /Library/LaunchDaemons/com.cloudflare.cloudflared.plist
|
||||
gum log -sl info 'Ensuring cloudflared service is started'
|
||||
if sudo launchctl list | grep 'com.cloudflare.cloudflared' > /dev/null; then
|
||||
gum log -sl info 'Unloading previous com.cloudflare.cloudflared configuration'
|
||||
sudo launchctl unload /Library/LaunchDaemons/com.cloudflare.cloudflared.plist
|
||||
fi
|
||||
gum log -sl info 'Starting up com.cloudflare.cloudflared configuration'
|
||||
sudo launchctl load -w /Library/LaunchDaemons/com.cloudflare.cloudflared.plist
|
||||
|
||||
### Apply patched version of the LaunchDaemon
|
||||
load-service com.cloudflare.cloudflared
|
||||
elif [ -f /etc/os-release ]; then
|
||||
### Linux
|
||||
if systemctl --all --type service | grep -q "cloudflared" > /dev/null; then
|
||||
|
@ -98,6 +76,8 @@ if command -v cloudflared > /dev/null; then
|
|||
gum log -sl info 'Running sudo cloudflared service install'
|
||||
sudo cloudflared service install
|
||||
fi
|
||||
|
||||
### Start / enabled the systemd service
|
||||
gum log -sl info 'Ensuring cloudflared service is started'
|
||||
sudo systemctl start cloudflared
|
||||
gum log -sl info 'Enabling cloudflared as a boot systemctl service'
|
||||
|
|
|
@ -22,18 +22,15 @@ if command -v docker > /dev/null; then
|
|||
|
||||
### Launch Docker.app
|
||||
if [ -d "/Applications/Docker.app" ] || [ -d "$HOME/Applications/Docker.app" ]; then
|
||||
gum log -sl info 'Ensuring Docker.app is open' && open --background -a Docker --args --accept-license --unattended
|
||||
gum log -sl info 'Ensuring Docker.app is running' && open --background -a Docker --args --accept-license --unattended
|
||||
fi
|
||||
|
||||
### Ensure DOCKERHUB_TOKEN is available
|
||||
get-secret --exists DOCKERHUB_TOKEN
|
||||
|
||||
### Pre-authenticate with DockerHub
|
||||
if get-secret --exists DOCKERHUB_TOKEN; then
|
||||
if [ "$DOCKERHUB_USER" != 'null' ]; then
|
||||
gum log -sl info 'Headlessly authenticating with DockerHub registry'
|
||||
echo "$(get-secret DOCKERHUB_TOKEN)" | docker login -u "$DOCKERHUB_USER" --password-stdin > /dev/null
|
||||
logg success 'Successfully authenticated with DockerHub registry'
|
||||
gum log -sl info 'Successfully authenticated with DockerHub registry'
|
||||
else
|
||||
gum log -sl info 'Skipping logging into DockerHub because DOCKERHUB_USER is undefined'
|
||||
fi
|
||||
|
|
|
@ -1,18 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
# @file EasyEngine
|
||||
# @brief Configures EasyEngine to use the CloudFlare API for configuring Let's Encrypt
|
||||
|
||||
set -Eeuo pipefail
|
||||
trap "gum log -sl error 'Script encountered an error!'" ERR
|
||||
|
||||
if command -v ee > /dev/null; then
|
||||
### Ensure secrets
|
||||
if get-secret --exists CLOUDFLARE_EMAIL CLOUDFLARE_API_KEY; then
|
||||
### Configure EasyEngine
|
||||
gum log -sl info 'Configuring EasyEngine with CloudFlare automatic SSL insuance'
|
||||
ee config set le-mail "$(get-secret CLOUDFLARE_EMAIL)"
|
||||
ee config set cloudflare-api-key "$(get-secret CLOUDFLARE_API_KEY)"
|
||||
else
|
||||
gum log -sl info 'Skipping automated setup of LetsEncrypt with EasyEngine because either CLOUDFLARE_EMAIL or CLOUDFLARE_API_KEY are not defined'
|
||||
fi
|
||||
fi
|
|
@ -29,7 +29,7 @@ if [[ ! "$(test -d proc && grep Microsoft /proc/version > /dev/null)" ]]; then
|
|||
if [ -n "${CONFIGURE_EXIT_CODE:-}" ]; then
|
||||
gum log -sl error 'Configuring endlessh service failed' && exit 1
|
||||
else
|
||||
logg success 'Successfully configured endlessh service'
|
||||
gum log -sl info 'Successfully configured endlessh service'
|
||||
fi
|
||||
elif [ -f /etc/endlessh.conf ]; then
|
||||
gum log -sl info 'Copying ~/.ssh/endlessh/config to /etc/endlessh.conf' && sudo cp -f "$HOME/.ssh/endlessh/config" /etc/endlessh.conf
|
||||
|
@ -37,7 +37,7 @@ if [[ ! "$(test -d proc && grep Microsoft /proc/version > /dev/null)" ]]; then
|
|||
if [ -n "${CONFIGURE_EXIT_CODE:-}" ]; then
|
||||
gum log -sl error 'Configuring endlessh service failed' && exit 1
|
||||
else
|
||||
logg success 'Successfully configured endlessh service'
|
||||
gum log -sl info 'Successfully configured endlessh service'
|
||||
fi
|
||||
else
|
||||
gum log -sl warn 'Neither the /etc/endlessh folder nor the /etc/endlessh.conf file exist'
|
||||
|
|
|
@ -1,48 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
# @file Fail2ban Configuration
|
||||
# @brief Applies the system `fail2ban` jail configuration and then restarts the service
|
||||
# @description
|
||||
# Fail2ban is an SSH security program that temporarily bans IP addresses that could possibly be
|
||||
# attempting to gain unauthorized system access. This script applies the "jail" configuration
|
||||
# located at `home/private_dot_ssh/fail2ban/` to the system location. It then enables and restarts
|
||||
# the `fail2ban` configuration.
|
||||
#
|
||||
# ## Links
|
||||
#
|
||||
# * [`fail2ban` configuration folder](https://github.com/megabyte-labs/install.doctor/tree/master/home/private_dot_ssh/fail2ban)
|
||||
|
||||
set -Eeuo pipefail
|
||||
trap "gum log -sl error 'Script encountered an error!'" ERR
|
||||
|
||||
if command -v fail2ban-client > /dev/null; then
|
||||
if [[ ! "$(test -d /proc && grep Microsoft /proc/version > /dev/null)" ]]; then
|
||||
if [ -f "$HOME/.ssh/fail2ban/jail.local" ]; then
|
||||
### Linux
|
||||
FAIL2BAN_CONFIG=/etc/fail2ban
|
||||
if [ -d /Applications ] && [ -d /System ]; then
|
||||
### macOS
|
||||
FAIL2BAN_CONFIG=/usr/local/etc/fail2ban
|
||||
fi
|
||||
sudo mkdir -p "$FAIL2BAN_CONFIG"
|
||||
sudo cp -f "$HOME/.ssh/fail2ban/jail.local" "$FAIL2BAN_CONFIG/jail.local"
|
||||
if [ -d "${HOMEBREW_PREFIX:-/opt/homebrew}/etc/fail2ban" ] && [ ! -f "${HOMEBREW_PREFIX:-/opt/homebrew}/etc/fail2ban/jail.local" ]; then
|
||||
gum log -sl info "Symlinking $FAIL2BAN_CONFIG/jail.local to ${HOMEBREW_PREFIX:-/opt/homebrew}/etc/fail2ban/jail.local"
|
||||
ln -s "$FAIL2BAN_CONFIG/jail.local" "${HOMEBREW_PREFIX:-/opt/homebrew}/etc/fail2ban/jail.local"
|
||||
fi
|
||||
if [ -d /Applications ] && [ -d /System ]; then
|
||||
### macOS
|
||||
gum log -sl info 'Enabling the fail2ban Homebrew service' && sudo brew services restart fail2ban
|
||||
else
|
||||
### Linux
|
||||
gum log -sl info 'Enabling the fail2ban service' && sudo systemctl enable fail2ban
|
||||
gum log -sl info 'Restarting the fail2ban service' && sudo systemctl restart fail2ban
|
||||
fi
|
||||
else
|
||||
gum log -sl info "The $HOME/.ssh/fail2ban/jail.local configuration is missing so fail2ban will not be set up"
|
||||
fi
|
||||
else
|
||||
gum log -sl info 'The environment is a WSL environment so the fail2ban sshd_config will be skipped'
|
||||
fi
|
||||
else
|
||||
gum log -sl info 'The fail2ban-client executable is not available on the system so fail2ban configuration will be skipped'
|
||||
fi
|
|
@ -1,17 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
# @file Fig Login
|
||||
# @brief Logs into Fig using the FIG_TOKEN
|
||||
|
||||
set -Eeuo pipefail
|
||||
trap "gum log -sl error 'Script encountered an error!'" ERR
|
||||
|
||||
if command -v fig > /dev/null; then
|
||||
### Ensure FIG_TOKEN
|
||||
if get-secret --exists FIG_TOKEN; then
|
||||
### Login to Fig
|
||||
gum log -sl info "Logging into Fig with FIG_TOKEN"
|
||||
fig login --token "$(get-secret FIG_TOKEN)" || gum log -sl info 'Fig login failed - User might already be logged in'
|
||||
fi
|
||||
else
|
||||
gum log -sl warn 'fig is not available in the PATH'
|
||||
fi
|
|
@ -283,7 +283,7 @@ function firefoxSetup() {
|
|||
# rm -f profile.private.tar.gz.age
|
||||
# gum log -sl info 'Decompressing the Firefox private profile'
|
||||
# tar -xzf profile.private.tar.gz
|
||||
# logg success 'The Firefox private profile was successfully installed'
|
||||
# gum log -sl info 'The Firefox private profile was successfully installed'
|
||||
# cp -f "${XDG_CONFIG_HOME:-$HOME/.config}/firefox/user.js" "$SETTINGS_DIR/profile.private"
|
||||
# gum log -sl info 'Copied ~/.config/firefox/user.js to profile.private profile'
|
||||
# else
|
||||
|
@ -324,7 +324,7 @@ function firefoxSetup() {
|
|||
# to the user profile.
|
||||
# gum log -sl info 'Unzipping '"$PLUGIN_FILENAME"' ('"$FIREFOX_PLUGIN"')'
|
||||
# unzip "$SETTINGS_DIR/$SETTINGS_PROFILE/extensions/$PLUGIN_FILENAME" -d "$SETTINGS_DIR/$SETTINGS_PROFILE/extensions/$PLUGIN_FOLDER"
|
||||
logg success 'Installed '"$FIREFOX_PLUGIN"''
|
||||
gum log -sl info 'Installed '"$FIREFOX_PLUGIN"''
|
||||
fi
|
||||
else
|
||||
gum log -sl warn 'A null Firefox add-on filename was detected for '"$FIREFOX_PLUGIN"''
|
||||
|
|
|
@ -61,10 +61,10 @@ if [ -f "${XDG_DATA_HOME:-$HOME/.local/share}/github-runner/config.sh" ]; then
|
|||
### Install / start the service
|
||||
gum log -sl info 'Configuring runner service'
|
||||
"${XDG_DATA_HOME:-$HOME/.local/share}/github-runner/svc.sh" install
|
||||
logg success 'Successfully installed the GitHub Actions runner service'
|
||||
gum log -sl info 'Successfully installed the GitHub Actions runner service'
|
||||
gum log -sl info 'Starting runner service'
|
||||
"${XDG_DATA_HOME:-$HOME/.local/share}/github-runner/svc.sh" start
|
||||
logg success 'Started the GitHub Actions runner service'
|
||||
gum log -sl info 'Started the GitHub Actions runner service'
|
||||
else
|
||||
gum log -sl warn 'jq is required by the GitHub runner configuration script'
|
||||
fi
|
||||
|
|
|
@ -1,20 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
# @file Keybase Configuration
|
||||
# @brief Updates Keybase's system configuration with the Keybase configuration stored in the `home/dot_config/keybase/config.json` location.
|
||||
# @description
|
||||
# This script ensures Keybase utilizes a configuration that, by default, adds a security fix.
|
||||
|
||||
set -Eeuo pipefail
|
||||
trap "gum log -sl error 'Script encountered an error!'" ERR
|
||||
|
||||
if command -v keybase > /dev/null; then
|
||||
KEYBASE_CONFIG="${XDG_CONFIG_HOME:-$HOME/.config}/keybase/config.json"
|
||||
if [ -f "$KEYBASE_CONFIG" ]; then
|
||||
gum log -sl info 'Ensuring /etc/keybase is a directory' && sudo mkdir -p /etc/keybase
|
||||
gum log -sl info "Copying $KEYBASE_CONFIG to /etc/keybase/config.json" && sudo cp -f "$KEYBASE_CONFIG" /etc/keybase/config.json
|
||||
else
|
||||
gum log -sl warn "No Keybase config located at $KEYBASE_CONFIG"
|
||||
fi
|
||||
else
|
||||
gum log -sl info 'The keybase executable is not available'
|
||||
fi
|
|
@ -11,7 +11,7 @@ if command -v mise > /dev/null; then
|
|||
### Symlink Java on macOS
|
||||
if [ -d /Applications ] && [ -d /System ]; then
|
||||
if [ -d "${XDG_DATA_HOME:-$HOME/.local/share}/mise/installs/java/openjdk-20/Contents" ] && [ ! -d "/Library/Java/JavaVirtualMachines/openjdk-20.jdk/Contents" ]; then
|
||||
gum log -sl info "Symlinking ${XDG_DATA_HOME:-$HOME/.local/share}/mise/installs/java/openjdk-20/Contents to /Library/Java/JavaVirtualMachines/openjdk-20.jdk/Contents"
|
||||
gum log -sl info "Symlinking system Java to mise-installed Java" target "${XDG_DATA_HOME:-$HOME/.local/share}/mise/installs/java/openjdk-20/Contents" symlink "/Library/Java/JavaVirtualMachines/openjdk-20.jdk/Contents"
|
||||
sudo mkdir -p /Library/Java/JavaVirtualMachines/openjdk-20.jdk
|
||||
sudo ln -s "${XDG_DATA_HOME:-$HOME/.local/share}/mise/installs/java/openjdk-20/Contents" /Library/Java/JavaVirtualMachines/openjdk-20.jdk/Contents
|
||||
fi
|
||||
|
|
|
@ -44,7 +44,7 @@ if command -v netdata-claim.sh > /dev/null; then
|
|||
|
||||
### netdata-claim.sh must be run as netdata user
|
||||
if sudo -H -u netdata bash -c "yes | netdata-claim.sh -token="$(get-secret NETDATA_TOKEN)" -rooms="$(get-secret NETDATA_ROOM)" -url="https://app.netdata.cloud""; then
|
||||
logg success 'Successfully added device to Netdata Cloud account'
|
||||
gum log -sl info 'Successfully added device to Netdata Cloud account'
|
||||
fi
|
||||
|
||||
### Kernel optimizations
|
||||
|
|
|
@ -6,14 +6,11 @@ set -Eeuo pipefail
|
|||
trap "gum log -sl error 'Script encountered an error!'" ERR
|
||||
|
||||
if command -v ntfy > /dev/null; then
|
||||
### Branding assets
|
||||
gum log -sl info 'Ensuring branding assets are in expected place for ntfy'
|
||||
sudo mkdir -p /usr/local/etc/branding
|
||||
sudo cp -f "$HOME/.local/etc/branding/logo-color-256x256.png" /usr/local/etc/branding/logo-color-256x256.png
|
||||
|
||||
### Sound files
|
||||
gum log -sl info 'Ensuring shared sound files are synced to system location'
|
||||
sudo mkdir -p /usr/local/share/sounds
|
||||
### /usr/share/sounds is ideal spot
|
||||
sudo rsync -rtvp "${XDG_DATA_HOME:-$HOME/.local/share}/sounds/" /usr/local/share/sounds
|
||||
|
||||
### Debian dependency
|
||||
|
|
|
@ -25,10 +25,10 @@ fi
|
|||
if command -v update-alternatives > /dev/null; then
|
||||
if [ -f "/usr/local/share/plymouth/themes/Betelgeuse/Betelgeuse.plymouth" ]; then
|
||||
sudo update-alternatives --install /usr/share/plymouth/themes/default.plymouth default.plymouth "/usr/local/share/plymouth/themes/Betelgeuse/Betelgeuse.plymouth" 100
|
||||
logg success 'Installed default.plymouth'
|
||||
gum log -sl info 'Installed default.plymouth'
|
||||
# Required sometimes
|
||||
sudo update-alternatives --set default.plymouth "/usr/local/share/plymouth/themes/Betelgeuse/Betelgeuse.plymouth"
|
||||
logg success 'Set default.plymouth'
|
||||
gum log -sl info 'Set default.plymouth'
|
||||
else
|
||||
gum log -sl warn "/usr/local/share/plymouth/themes/Betelgeuse/Betelgeuse.plymouth does not exist!"
|
||||
fi
|
||||
|
@ -61,7 +61,7 @@ if command -v plymouth-set-default-theme > /dev/null; then
|
|||
if [ -n "${EXIT_CODE:-}" ]; then
|
||||
gum log -sl warn 'There may have been an issue while setting the Plymouth default theme with plymouth-set-default-theme'
|
||||
else
|
||||
logg success 'Set Plymouth default theme with plymouth-set-default-theme'
|
||||
gum log -sl info 'Set Plymouth default theme with plymouth-set-default-theme'
|
||||
fi
|
||||
else
|
||||
gum log -sl warn 'Could not apply default Plymouth theme because plymouth-set-default-theme is missing'
|
||||
|
@ -72,7 +72,7 @@ if command -v update-alternatives > /dev/null; then
|
|||
if [ -f "/usr/local/share/plymouth/themes/Betelgeuse/Betelgeuse.plymouth" ]; then
|
||||
# Required sometimes
|
||||
sudo update-alternatives --set default.plymouth "/usr/local/share/plymouth/themes/Betelgeuse/Betelgeuse.plymouth"
|
||||
logg success 'Set default.plymouth (second time is required sometimes)'
|
||||
gum log -sl info 'Set default.plymouth (second time is required sometimes)'
|
||||
else
|
||||
gum log -sl warn "/usr/local/share/plymouth/themes/Betelgeuse/Betelgeuse.plymouth does not exist!"
|
||||
fi
|
||||
|
@ -86,11 +86,11 @@ if [ "$DEBUG_MODE" != 'true' ]; then
|
|||
if command -v update-initramfs > /dev/null; then
|
||||
gum log -sl info 'Running sudo update-initramfs -u'
|
||||
sudo update-initramfs -u
|
||||
logg success 'Updated kernel / initrd images for Plymouth'
|
||||
gum log -sl info 'Updated kernel / initrd images for Plymouth'
|
||||
elif command -v dracut > /dev/null; then
|
||||
gum log -sl info 'Running sudo dracut --regenerate-all -f'
|
||||
sudo dracut --regenerate-all -f
|
||||
logg success 'Updated kernel / initrd images for Plymouth'
|
||||
gum log -sl info 'Updated kernel / initrd images for Plymouth'
|
||||
else
|
||||
gum log -sl warn 'Unable to update kernel / initrd images because neither update-initramfs or dracut are available'
|
||||
fi
|
||||
|
|
|
@ -154,7 +154,7 @@ if get-secret --exists SENDGRID_API_KEY; then
|
|||
gum log -sl info 'Unloading previous Postfix launch configuration'
|
||||
sudo launchctl unload /System/Library/LaunchDaemons/com.apple.postfix.master.plist
|
||||
fi
|
||||
sudo launchctl load -w /System/Library/LaunchDaemons/com.apple.postfix.master.plist && logg success 'launchctl load of com.apple.postfix.master successful'
|
||||
sudo launchctl load -w /System/Library/LaunchDaemons/com.apple.postfix.master.plist && gum log -sl info 'launchctl load of com.apple.postfix.master successful'
|
||||
fi
|
||||
if ! sudo postfix status > /dev/null; then
|
||||
gum log -sl info 'Starting postfix'
|
||||
|
|
|
@ -15,14 +15,8 @@ if command -v rkhunter > /dev/null; then
|
|||
### Linux
|
||||
gum log -sl info 'Updating file /etc/rkhunter.conf' && sed -i "s/^#WEB_CMD.*$/WEB_CMD=curl\ -L/" /etc/rkhunter.conf
|
||||
fi
|
||||
sudo rkhunter --propupd || RK_PROPUPD_EXIT_CODE=$?
|
||||
if [ -n "${RK_PROPUPD_EXIT_CODE:-}" ]; then
|
||||
gum log -sl error "sudo rkhunter --propupd returned non-zero exit code"
|
||||
fi
|
||||
sudo rkhunter --update || RK_UPDATE_EXIT_CODE=$?
|
||||
if [ -n "${RK_UPDATE_EXIT_CODE:-}" ]; then
|
||||
gum log -sl error "sudo rkhunter --update returned non-zero exit code"
|
||||
fi
|
||||
sudo rkhunter --propupd || gum log -sl warn "sudo rkhunter --propupd returned non-zero exit code"
|
||||
sudo rkhunter --update || gum log -sl warn "sudo rkhunter --update returned non-zero exit code"
|
||||
else
|
||||
gum log -sl info 'rkhunter is not installed'
|
||||
fi
|
||||
|
|
|
@ -90,7 +90,7 @@ if command -v smbd > /dev/null; then
|
|||
if [ -d /Applications ] && [ -d /System ]; then
|
||||
### System Private Samba Share
|
||||
if SMB_OUTPUT=$(sudo sharing -a "$PRIVATE_SHARE" -S "Private (System)" -n "Private (System)" -g 000 -s 001 -E 1 -R 1 2>&1); then
|
||||
logg success "Configured $PRIVATE_SHARE as a private Samba share"
|
||||
gum log -sl info "Configured $PRIVATE_SHARE as a private Samba share"
|
||||
else
|
||||
if echo $SMB_OUTPUT | grep 'smb name already exists' > /dev/null; then
|
||||
gum log -sl info "$PRIVATE_SHARE Samba share already exists"
|
||||
|
@ -102,7 +102,7 @@ if command -v smbd > /dev/null; then
|
|||
|
||||
### System Public Samba Share
|
||||
if SMB_OUTPUT=$(sudo sharing -a "$PUBLIC_SHARE" -S "Public (System)" -n "Public (System)" -g 001 -s 001 -E 1 -R 0 2>&1); then
|
||||
logg success "Configured $PUBLIC_SHARE as a system public Samba share"
|
||||
gum log -sl info "Configured $PUBLIC_SHARE as a system public Samba share"
|
||||
else
|
||||
if echo $SMB_OUTPUT | grep 'smb name already exists' > /dev/null; then
|
||||
gum log -sl info "$PUBLIC_SHARE Samba share already exists"
|
||||
|
@ -114,7 +114,7 @@ if command -v smbd > /dev/null; then
|
|||
|
||||
### User Shared Samba Share
|
||||
if SMB_OUTPUT=$(sudo sharing -a "$HOME/Shared" -S "Shared (User)" -n "Shared (User)" -g 001 -s 001 -E 1 -R 0 2>&1); then
|
||||
logg success "Configured $HOME/Shared as a user-scoped Samba share"
|
||||
gum log -sl info "Configured $HOME/Shared as a user-scoped Samba share"
|
||||
else
|
||||
if echo $SMB_OUTPUT | grep 'smb name already exists' > /dev/null; then
|
||||
gum log -sl info "$HOME/Shared Samba share already exists"
|
||||
|
|
|
@ -1,27 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
# @file sftpgo configuration
|
||||
# @brief This script copies over the required configuration files for sftpgo and then initializes sftpgo
|
||||
|
||||
set -Eeuo pipefail
|
||||
trap "gum log -sl error 'Script encountered an error!'" ERR
|
||||
|
||||
if command -v sftpgo > /dev/null; then
|
||||
### Copy configuration file
|
||||
sudo mkdir -p /usr/local/etc/sftpgo
|
||||
gum log -sl info 'Copying over sftpgo configuration to /usr/local/etc/sftpgo/sftpgo.json'
|
||||
sudo cp -f "$HOME/.local/etc/sftpgo/sftpgo.json" /usr/local/etc/sftpgo/sftpgo.json
|
||||
|
||||
### Copy branding assets / banner
|
||||
gum log -sl info 'Copying over sftpgo branding assets'
|
||||
sudo cp -f "$HOME/.local/etc/sftpgo/banner" /usr/local/etc/sftpgo/banner
|
||||
sudo mkdir -p /usr/local/etc/branding
|
||||
sudo cp -f "$HOME/.local/etc/branding/favicon.ico" /usr/local/etc/branding/favicon.ico
|
||||
sudo cp -f "$HOME/.local/etc/branding/logo-color-256x256.png" /usr/local/etc/branding/logo-color-256x256.png
|
||||
sudo cp -f "$HOME/.local/etc/branding/logo-color-900x900.png" /usr/local/etc/branding/logo-color-900x900.png
|
||||
|
||||
### Initialize
|
||||
gum log -sl info 'Running sudo sftpgo initprovider'
|
||||
sudo sftpgo initprovider
|
||||
else
|
||||
gum log -sl info 'sftpgo is not installed'
|
||||
fi
|
|
@ -51,7 +51,7 @@ if [ -f "${XDG_CONFIG_HOME:-$HOME/.config}/tabby/plugins/package.json" ]; then
|
|||
gum log -sl info 'Installing Tabby plugins defined in '"${XDG_CONFIG_HOME:-$HOME/.config}/tabby/plugins/package.json"''
|
||||
cd "${XDG_CONFIG_HOME:-$HOME/.config}/tabby/plugins"
|
||||
npm install --quiet --no-progress
|
||||
logg success 'Finished installing Tabby plugins'
|
||||
gum log -sl info 'Finished installing Tabby plugins'
|
||||
fi
|
||||
else
|
||||
gum log -sl info 'Skipping Tabby plugin installation because is not present'
|
||||
|
|
|
@ -53,7 +53,7 @@ if get-secret --exists TAILSCALE_AUTH_KEY; then
|
|||
if [ -n "${EXIT_CODE:-}" ]; then
|
||||
gum log -sl warn 'tailscale up timed out'
|
||||
else
|
||||
logg success 'Connected to Tailscale network'
|
||||
gum log -sl info 'Connected to Tailscale network'
|
||||
fi
|
||||
gum log -sl info 'Disabling notifications about updates'
|
||||
tailscale set --update-check=false
|
||||
|
@ -79,7 +79,7 @@ if command -v warp-cli > /dev/null; then
|
|||
### Connect CloudFlare WARP
|
||||
if warp-cli --accept-tos status | grep 'Disconnected' > /dev/null; then
|
||||
gum log -sl info 'Connecting to CloudFlare WARP'
|
||||
warp-cli --accept-tos connect > /dev/null && logg success 'Connected to CloudFlare WARP'
|
||||
warp-cli --accept-tos connect > /dev/null && gum log -sl info 'Connected to CloudFlare WARP'
|
||||
else
|
||||
gum log -sl info 'Either there is a misconfiguration or the device is already connected with CloudFlare WARP'
|
||||
fi
|
||||
|
|
|
@ -1,18 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
# @file Timeshift Configuration
|
||||
# @brief Updates the Timeshift system configuration with the Timeshift configuration stored in the `home/dot_config/timeshift/timeshift.json` location.
|
||||
# @description
|
||||
# This script applies a Timeshift configuration that defines how Timeshift should maintain system backups.
|
||||
|
||||
set -Eeuo pipefail
|
||||
trap "gum log -sl error 'Script encountered an error!'" ERR
|
||||
|
||||
if command -v timeshift > /dev/null; then
|
||||
gum log -sl info 'Ensuring /etc/timeshift is a directory'
|
||||
sudo mkdir -p /etc/timeshift
|
||||
TIMESHIFT_CONFIG="${XDG_CONFIG_HOME:-$HOME/.config}/timeshift/timeshift.json"
|
||||
gum log -sl info "Copying $TIMESHIFT_CONFIG to /etc/timeshift/timeshift.json"
|
||||
sudo cp -f "$TIMESHIFT_CONFIG" /etc/timeshift/timeshift.json
|
||||
else
|
||||
gum log -sl info 'The timeshift executable is not available'
|
||||
fi
|
|
@ -49,14 +49,14 @@ if command -v torify > /dev/null; then
|
|||
fi
|
||||
fi
|
||||
gum log -sl info 'Running brew services restart tor'
|
||||
brew services restart tor && logg success 'Tor successfully restarted'
|
||||
brew services restart tor && gum log -sl info 'Tor successfully restarted'
|
||||
else
|
||||
if [[ ! "$(test -d /proc && grep Microsoft /proc/version > /dev/null)" ]]; then
|
||||
### Linux
|
||||
gum log -sl info 'Running sudo systemctl enable / restart tor'
|
||||
sudo systemctl enable tor
|
||||
sudo systemctl restart tor
|
||||
logg success 'Tor service enabled and restarted'
|
||||
gum log -sl info 'Tor service enabled and restarted'
|
||||
else
|
||||
gum log -sl info 'Environment is WSL so the Tor systemd service will not be enabled / restarted'
|
||||
fi
|
||||
|
|
|
@ -5,18 +5,22 @@
|
|||
set -Eeuo pipefail
|
||||
trap "gum log -sl error 'Script encountered an error!'" ERR
|
||||
|
||||
gum log -sl info "Installing VIM plugins" && vim +'PlugInstall --sync' +qall
|
||||
|
||||
# @description This script installs the extensions defined in `${XDG_CONFIG_HOME:-$HOME/.config}/coc/extensions/package.json`
|
||||
# which should correlate to the Coc extensions defined in `${XDG_CONFIG_HOME:-$HOME/.config}/vim/vimrc`.
|
||||
installCocExtensions() {
|
||||
if [ -f "${XDG_CONFIG_HOME:-$HOME/.config}/coc/extensions/package.json" ]; then
|
||||
gum log -sl info "Running npm i --no-progress --no-package-lock in ${XDG_CONFIG_HOME:-$HOME/.config}/coc/extensions"
|
||||
cd "${XDG_CONFIG_HOME:-$HOME/.config}/coc/extensions" && npm i --no-progress --no-package-lock
|
||||
gum log -sl info "Running vim +CocUpdateSync +qall" && vim +CocUpdateSync +qall
|
||||
cd "${XDG_CONFIG_HOME:-$HOME/.config}/coc/extensions"
|
||||
npm i --no-progress --no-package-lock
|
||||
gum log -sl info "Running vim +CocUpdateSync +qall"
|
||||
vim +CocUpdateSync +qall
|
||||
else
|
||||
gum log -sl info "Skipping Coc extension installation because ${XDG_CONFIG_HOME:-$HOME/.config}/coc/extensions/package.json is missing"
|
||||
fi
|
||||
}
|
||||
|
||||
### Install VIM plugins
|
||||
gum log -sl info "Installing VIM plugins" && vim +'PlugInstall --sync' +qall
|
||||
|
||||
### Install VIM coc plugins
|
||||
gum log -sl info "Updating VIM coc extensions" && installCocExtensions
|
||||
|
|
|
@ -28,7 +28,7 @@ if command -v VirtualBox > /dev/null; then
|
|||
if [ -f /tmp/vbox/Oracle_VM_VirtualBox_Extension_Pack-$VBOX_VERSION.vbox-extpack ]; then
|
||||
gum log -sl info 'Installing VirtualBox extension pack'
|
||||
echo 'y' | sudo VBoxManage extpack install --replace /tmp/vbox/Oracle_VM_VirtualBox_Extension_Pack-$VBOX_VERSION.vbox-extpack
|
||||
logg success 'Successfully installed VirtualBox extension pack'
|
||||
gum log -sl info 'Successfully installed VirtualBox extension pack'
|
||||
fi
|
||||
else
|
||||
gum log -sl info 'VirtualBox Extension pack is already installed'
|
||||
|
|
|
@ -52,7 +52,7 @@ if command -v vmware > /dev/null; then
|
|||
### Build VMWare host modules
|
||||
gum log -sl info 'Building VMware host modules'
|
||||
if sudo vmware-modconfig --console --install-all; then
|
||||
logg success 'Built VMWare host modules successfully with sudo vmware-modconfig --console --install-all'
|
||||
gum log -sl info 'Built VMWare host modules successfully with sudo vmware-modconfig --console --install-all'
|
||||
else
|
||||
gum log -sl info 'Acquiring VMware version from CLI'
|
||||
VMW_VERSION="$(vmware --version | cut -f 3 -d' ')"
|
||||
|
@ -64,7 +64,7 @@ if command -v vmware > /dev/null; then
|
|||
gum log -sl info 'Running sudo make and sudo make install'
|
||||
sudo make
|
||||
sudo make install
|
||||
logg success 'Successfully configured VMware host module patches'
|
||||
gum log -sl info 'Successfully configured VMware host module patches'
|
||||
fi
|
||||
|
||||
### Sign VMware host modules if Secure Boot is enabled
|
||||
|
@ -76,7 +76,7 @@ if command -v vmware > /dev/null; then
|
|||
"/usr/src/linux-headers-$(uname -r)/scripts/sign-file" sha256 ./MOK.priv ./MOK.der "$(modinfo -n vmmon)"
|
||||
"/usr/src/linux-headers-$(uname -r)/scripts/sign-file" sha256 ./MOK.priv ./MOK.der "$(modinfo -n vmnet)"
|
||||
echo '' | mokutil --import MOK.der
|
||||
logg success 'Successfully signed VMware host modules. Reboot the host before powering on VMs'
|
||||
gum log -sl info 'Successfully signed VMware host modules. Reboot the host before powering on VMs'
|
||||
fi
|
||||
|
||||
### Patch VMware with Unlocker
|
||||
|
@ -91,7 +91,7 @@ if command -v vmware > /dev/null; then
|
|||
cd linux
|
||||
gum log -sl info 'Running the unlocker'
|
||||
echo "y" | sudo ./unlock
|
||||
logg success 'Successfully unlocked VMware for macOS compatibility'
|
||||
gum log -sl info 'Successfully unlocked VMware for macOS compatibility'
|
||||
else
|
||||
gum log -sl info '/usr/lib/vmware/isoimages/darwin.iso is already present on the system so VMware macOS unlocking will not be performed'
|
||||
fi
|
||||
|
@ -137,7 +137,7 @@ if command -v vagrant > /dev/null && command -v vmware-id > /dev/null; then
|
|||
else
|
||||
gum log -sl info 'Generating Vagrant VMWare Utility certificates'
|
||||
sudo vagrant-vmware-utility certificate generate
|
||||
logg success 'Generated Vagrant VMWare Utility certificates via vagrant-vmware-utility certificate generate'
|
||||
gum log -sl info 'Generated Vagrant VMWare Utility certificates via vagrant-vmware-utility certificate generate'
|
||||
fi
|
||||
gum log -sl info 'Ensuring the Vagrant VMWare Utility service is enabled'
|
||||
if VVU_OUTPUT=$(sudo vagrant-vmware-utility service install 2>&1); then
|
||||
|
|
|
@ -87,7 +87,7 @@ if command -v code > /dev/null; then
|
|||
jq -r '.recommendations[]' "${XDG_CONFIG_HOME:-$HOME/.config}/Code/User/extensions.json" | while read EXTENSION; do
|
||||
if ! echo "$EXTENSIONS" | grep -iF "$EXTENSION" > /dev/null; then
|
||||
gum log -sl info 'Installing Visual Studio Code extension '"$EXTENSION"'' && code --install-extension "$EXTENSION"
|
||||
logg success 'Installed '"$EXTENSION"''
|
||||
gum log -sl info 'Installed '"$EXTENSION"''
|
||||
else
|
||||
gum log -sl info ''"$EXTENSION"' already installed'
|
||||
fi
|
||||
|
|
|
@ -244,7 +244,7 @@ if command -v warp-cli > /dev/null; then
|
|||
### Connect CloudFlare WARP
|
||||
if warp-cli --accept-tos status | grep 'Disconnected' > /dev/null; then
|
||||
gum log -sl info 'Connecting to CloudFlare WARP'
|
||||
warp-cli --accept-tos connect > /dev/null && logg success 'Connected to CloudFlare WARP'
|
||||
warp-cli --accept-tos connect > /dev/null && gum log -sl info 'Connected to CloudFlare WARP'
|
||||
else
|
||||
gum log -sl info 'Either there is a misconfiguration or the device is already connected with CloudFlare WARP'
|
||||
fi
|
||||
|
|
|
@ -1,43 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
# @file macOS WireGuard Profiles
|
||||
# @brief Installs WireGuard VPN profiles on macOS devices
|
||||
# @description
|
||||
# This script installs WireGuard VPN profiles on macOS. It scans `${XDG_CONFIG_HOME:-$HOME/.config}/vpn` for all the `*.conf` files
|
||||
# and then copies those profiles to `/etc/wireguard`. It also performs a couple preparation tasks like ensuring the target
|
||||
# WireGuard system configuration file directory exists and is assigned the proper permissions.
|
||||
#
|
||||
# ## Creating VPN Profiles
|
||||
#
|
||||
# More details on embedding your VPN profiles into your Install Doctor fork can be found by reading the [Secrets documentation](https://install.doctor/docs/customization/secrets#vpn-profiles).
|
||||
#
|
||||
# ## TODO
|
||||
#
|
||||
# * Populate Tunnelblick on macOS using the VPN profiles located in `${XDG_CONFIG_HOME:-$HOME/.config}/vpn`
|
||||
# * For the Tunnelblick integration, ensure the username / password is populated from the `OVPN_USERNAME` and `OVPN_PASSWORD` variables
|
||||
#
|
||||
# ## Links
|
||||
#
|
||||
# * [VPN profile folder](https://github.com/megabyte-labs/install.doctor/blob/master/home/dot_config/vpn)
|
||||
# * [VPN profile documentation](https://install.doctor/docs/customization/secrets#vpn-profiles)
|
||||
|
||||
set -Eeuo pipefail
|
||||
trap "gum log -sl error 'Script encountered an error!'" ERR
|
||||
|
||||
### Backs up previous network settings to `/Library/Preferences/com.apple.networkextension.plist.old` before applying new VPN profiles
|
||||
gum log -sl info 'Backing up /Library/Preferences/com.apple.networkextension.plist to /Library/Preferences/com.apple.networkextension.plist.old'
|
||||
sudo cp -f /Library/Preferences/com.apple.networkextension.plist /Library/Preferences/com.apple.networkextension.plist.old
|
||||
|
||||
### Ensures the `/etc/wireguard` directory exists and has the lowest possible permission-level
|
||||
if [ ! -d /etc/wireguard ]; then
|
||||
gum log -sl info 'Creating /etc/wireguard since it does not exist yet'
|
||||
sudo mkdir -p /etc/wireguard
|
||||
sudo chmod 600 /etc/wireguard
|
||||
fi
|
||||
|
||||
### TODO - Should adding the .conf files to /etc/wireguard only be done on macOS or is this useful on Linux as well?
|
||||
### Cycles through the `*.conf` files in `${XDG_CONFIG_HOME:-$HOME/.config}/vpn` and adds them to the `/etc/wireguard` folder
|
||||
find "${XDG_CONFIG_HOME:-$HOME/.config}/vpn" -mindepth 1 -maxdepth 1 -type f -name "*.conf" | while read WG_CONF; do
|
||||
WG_FILE="$(basename "$WG_CONF")"
|
||||
gum log -sl info 'Adding '"$WG_FILE"' to /etc/wireguard'
|
||||
sudo cp -f "$WG_CONF" "/etc/wireguard/$WG_FILE"
|
||||
done
|
|
@ -1,800 +0,0 @@
|
|||
##
|
||||
## Example config file for the Clam AV daemon
|
||||
## Please read the clamd.conf(5) manual before editing this file.
|
||||
##
|
||||
|
||||
|
||||
# Comment or remove the line below.
|
||||
|
||||
# Uncomment this option to enable logging.
|
||||
# LogFile must be writable for the user running daemon.
|
||||
# A full path is required.
|
||||
# Default: disabled
|
||||
#LogFile /tmp/clamd.log
|
||||
|
||||
# By default the log file is locked for writing - the lock protects against
|
||||
# running clamd multiple times (if want to run another clamd, please
|
||||
# copy the configuration file, change the LogFile variable, and run
|
||||
# the daemon with --config-file option).
|
||||
# This option disables log file locking.
|
||||
# Default: no
|
||||
#LogFileUnlock yes
|
||||
|
||||
# Maximum size of the log file.
|
||||
# Value of 0 disables the limit.
|
||||
# You may use 'M' or 'm' for megabytes (1M = 1m = 1048576 bytes)
|
||||
# and 'K' or 'k' for kilobytes (1K = 1k = 1024 bytes). To specify the size
|
||||
# in bytes just don't use modifiers. If LogFileMaxSize is enabled, log
|
||||
# rotation (the LogRotate option) will always be enabled.
|
||||
# Default: 1M
|
||||
#LogFileMaxSize 2M
|
||||
|
||||
# Log time with each message.
|
||||
# Default: no
|
||||
#LogTime yes
|
||||
|
||||
# Also log clean files. Useful in debugging but drastically increases the
|
||||
# log size.
|
||||
# Default: no
|
||||
#LogClean yes
|
||||
|
||||
# Use system logger (can work together with LogFile).
|
||||
# Default: no
|
||||
#LogSyslog yes
|
||||
|
||||
# Specify the type of syslog messages - please refer to 'man syslog'
|
||||
# for facility names.
|
||||
# Default: LOG_LOCAL6
|
||||
#LogFacility LOG_MAIL
|
||||
|
||||
# Enable verbose logging.
|
||||
# Default: no
|
||||
#LogVerbose yes
|
||||
|
||||
# Enable log rotation. Always enabled when LogFileMaxSize is enabled.
|
||||
# Default: no
|
||||
#LogRotate yes
|
||||
|
||||
# Enable Prelude output.
|
||||
# Default: no
|
||||
#PreludeEnable yes
|
||||
#
|
||||
# Set the name of the analyzer used by prelude-admin.
|
||||
# Default: ClamAV
|
||||
#PreludeAnalyzerName ClamAV
|
||||
|
||||
# Log additional information about the infected file, such as its
|
||||
# size and hash, together with the virus name.
|
||||
#ExtendedDetectionInfo yes
|
||||
|
||||
# This option allows you to save a process identifier of the listening
|
||||
# daemon (main thread).
|
||||
# This file will be owned by root, as long as clamd was started by root.
|
||||
# It is recommended that the directory where this file is stored is
|
||||
# also owned by root to keep other users from tampering with it.
|
||||
# Default: disabled
|
||||
#PidFile /var/run/clamd.pid
|
||||
|
||||
# Optional path to the global temporary directory.
|
||||
# Default: system specific (usually /tmp or /var/tmp).
|
||||
#TemporaryDirectory /var/tmp
|
||||
|
||||
# Path to the database directory.
|
||||
# Default: hardcoded (depends on installation options)
|
||||
#DatabaseDirectory /var/lib/clamav
|
||||
|
||||
# Only load the official signatures published by the ClamAV project.
|
||||
# Default: no
|
||||
#OfficialDatabaseOnly no
|
||||
|
||||
# The daemon can work in local mode, network mode or both.
|
||||
# Due to security reasons we recommend the local mode.
|
||||
|
||||
# Path to a local socket file the daemon will listen on.
|
||||
# Default: disabled (must be specified by a user)
|
||||
LocalSocket /var/run/clamd.socket
|
||||
|
||||
# Sets the group ownership on the unix socket.
|
||||
# Default: disabled (the primary group of the user running clamd)
|
||||
#LocalSocketGroup virusgroup
|
||||
|
||||
# Sets the permissions on the unix socket to the specified mode.
|
||||
# Default: disabled (socket is world accessible)
|
||||
#LocalSocketMode 660
|
||||
|
||||
# Remove stale socket after unclean shutdown.
|
||||
# Default: yes
|
||||
#FixStaleSocket yes
|
||||
|
||||
# TCP port address.
|
||||
# Default: no
|
||||
#TCPSocket 3310
|
||||
|
||||
# TCP address.
|
||||
# By default we bind to INADDR_ANY, probably not wise.
|
||||
# Enable the following to provide some degree of protection
|
||||
# from the outside world. This option can be specified multiple
|
||||
# times if you want to listen on multiple IPs. IPv6 is now supported.
|
||||
# Default: no
|
||||
#TCPAddr localhost
|
||||
|
||||
# Maximum length the queue of pending connections may grow to.
|
||||
# Default: 200
|
||||
#MaxConnectionQueueLength 30
|
||||
|
||||
# Clamd uses FTP-like protocol to receive data from remote clients.
|
||||
# If you are using clamav-milter to balance load between remote clamd daemons
|
||||
# on firewall servers you may need to tune the options below.
|
||||
|
||||
# Close the connection when the data size limit is exceeded.
|
||||
# The value should match your MTA's limit for a maximum attachment size.
|
||||
# Default: 100M
|
||||
#StreamMaxLength 25M
|
||||
|
||||
# Limit port range.
|
||||
# Default: 1024
|
||||
#StreamMinPort 30000
|
||||
# Default: 2048
|
||||
#StreamMaxPort 32000
|
||||
|
||||
# Maximum number of threads running at the same time.
|
||||
# Default: 10
|
||||
#MaxThreads 20
|
||||
|
||||
# Waiting for data from a client socket will timeout after this time (seconds).
|
||||
# Default: 120
|
||||
#ReadTimeout 300
|
||||
|
||||
# This option specifies the time (in seconds) after which clamd should
|
||||
# timeout if a client doesn't provide any initial command after connecting.
|
||||
# Default: 30
|
||||
#CommandReadTimeout 30
|
||||
|
||||
# This option specifies how long to wait (in milliseconds) if the send buffer
|
||||
# is full.
|
||||
# Keep this value low to prevent clamd hanging.
|
||||
#
|
||||
# Default: 500
|
||||
#SendBufTimeout 200
|
||||
|
||||
# Maximum number of queued items (including those being processed by
|
||||
# MaxThreads threads).
|
||||
# It is recommended to have this value at least twice MaxThreads if possible.
|
||||
# WARNING: you shouldn't increase this too much to avoid running out of file
|
||||
# descriptors, the following condition should hold:
|
||||
# MaxThreads*MaxRecursion + (MaxQueue - MaxThreads) + 6< RLIMIT_NOFILE (usual
|
||||
# max is 1024).
|
||||
#
|
||||
# Default: 100
|
||||
#MaxQueue 200
|
||||
|
||||
# Waiting for a new job will timeout after this time (seconds).
|
||||
# Default: 30
|
||||
#IdleTimeout 60
|
||||
|
||||
# Don't scan files and directories matching regex
|
||||
# This directive can be used multiple times
|
||||
# Default: scan all
|
||||
#ExcludePath ^/proc/
|
||||
#ExcludePath ^/sys/
|
||||
|
||||
# Maximum depth directories are scanned at.
|
||||
# Default: 15
|
||||
#MaxDirectoryRecursion 20
|
||||
|
||||
# Follow directory symlinks.
|
||||
# Default: no
|
||||
#FollowDirectorySymlinks yes
|
||||
|
||||
# Follow regular file symlinks.
|
||||
# Default: no
|
||||
#FollowFileSymlinks yes
|
||||
|
||||
# Scan files and directories on other filesystems.
|
||||
# Default: yes
|
||||
#CrossFilesystems yes
|
||||
|
||||
# Perform a database check.
|
||||
# Default: 600 (10 min)
|
||||
#SelfCheck 600
|
||||
|
||||
# Enable non-blocking (multi-threaded/concurrent) database reloads.
|
||||
# This feature will temporarily load a second scanning engine while scanning
|
||||
# continues using the first engine. Once loaded, the new engine takes over.
|
||||
# The old engine is removed as soon as all scans using the old engine have
|
||||
# completed.
|
||||
# This feature requires more RAM, so this option is provided in case users are
|
||||
# willing to block scans during reload in exchange for lower RAM requirements.
|
||||
# Default: yes
|
||||
#ConcurrentDatabaseReload no
|
||||
|
||||
# Execute a command when virus is found. In the command string %v will
|
||||
# be replaced with the virus name and %f will be replaced with the file name.
|
||||
# Additionally, two environment variables will be defined: $CLAM_VIRUSEVENT_FILENAME
|
||||
# and $CLAM_VIRUSEVENT_VIRUSNAME.
|
||||
# Default: no
|
||||
#VirusEvent /usr/local/bin/send_sms 123456789 "VIRUS ALERT: %v in %f"
|
||||
|
||||
# Run as another user (clamd must be started by root for this option to work)
|
||||
# Default: don't drop privileges
|
||||
#User clamav
|
||||
|
||||
# Stop daemon when libclamav reports out of memory condition.
|
||||
#ExitOnOOM yes
|
||||
|
||||
# Don't fork into background.
|
||||
# Default: no
|
||||
#Foreground yes
|
||||
|
||||
# Enable debug messages in libclamav.
|
||||
# Default: no
|
||||
#Debug yes
|
||||
|
||||
# Do not remove temporary files (for debug purposes).
|
||||
# Default: no
|
||||
#LeaveTemporaryFiles yes
|
||||
|
||||
# Record metadata about the file being scanned.
|
||||
# Scan metadata is useful for file analysis purposes and for debugging scan behavior.
|
||||
# The JSON metadata will be printed after the scan is complete if Debug is enabled.
|
||||
# A metadata.json file will be written to the scan temp directory if LeaveTemporaryFiles is enabled.
|
||||
# Default: no
|
||||
#GenerateMetadataJson yes
|
||||
|
||||
# Permit use of the ALLMATCHSCAN command. If set to no, clamd will reject
|
||||
# any ALLMATCHSCAN command as invalid.
|
||||
# Default: yes
|
||||
#AllowAllMatchScan no
|
||||
|
||||
# Detect Possibly Unwanted Applications.
|
||||
# Default: no
|
||||
#DetectPUA yes
|
||||
|
||||
# Exclude a specific PUA category. This directive can be used multiple times.
|
||||
# See https://github.com/vrtadmin/clamav-faq/blob/master/faq/faq-pua.md for
|
||||
# the complete list of PUA categories.
|
||||
# Default: Load all categories (if DetectPUA is activated)
|
||||
#ExcludePUA NetTool
|
||||
#ExcludePUA PWTool
|
||||
|
||||
# Only include a specific PUA category. This directive can be used multiple
|
||||
# times.
|
||||
# Default: Load all categories (if DetectPUA is activated)
|
||||
#IncludePUA Spy
|
||||
#IncludePUA Scanner
|
||||
#IncludePUA RAT
|
||||
|
||||
# This option causes memory or nested map scans to dump the content to disk.
|
||||
# If you turn on this option, more data is written to disk and is available
|
||||
# when the LeaveTemporaryFiles option is enabled.
|
||||
#ForceToDisk yes
|
||||
|
||||
# This option allows you to disable the caching feature of the engine. By
|
||||
# default, the engine will store an MD5 in a cache of any files that are
|
||||
# not flagged as virus or that hit limits checks. Disabling the cache will
|
||||
# have a negative performance impact on large scans.
|
||||
# Default: no
|
||||
#DisableCache yes
|
||||
|
||||
# In some cases (eg. complex malware, exploits in graphic files, and others),
|
||||
# ClamAV uses special algorithms to detect abnormal patterns and behaviors that
|
||||
# may be malicious. This option enables alerting on such heuristically
|
||||
# detected potential threats.
|
||||
# Default: yes
|
||||
#HeuristicAlerts yes
|
||||
|
||||
# Allow heuristic alerts to take precedence.
|
||||
# When enabled, if a heuristic scan (such as phishingScan) detects
|
||||
# a possible virus/phish it will stop scan immediately. Recommended, saves CPU
|
||||
# scan-time.
|
||||
# When disabled, virus/phish detected by heuristic scans will be reported only
|
||||
# at the end of a scan. If an archive contains both a heuristically detected
|
||||
# virus/phish, and a real malware, the real malware will be reported
|
||||
#
|
||||
# Keep this disabled if you intend to handle "Heuristics.*" viruses
|
||||
# differently from "real" malware.
|
||||
# If a non-heuristically-detected virus (signature-based) is found first,
|
||||
# the scan is interrupted immediately, regardless of this config option.
|
||||
#
|
||||
# Default: no
|
||||
#HeuristicScanPrecedence yes
|
||||
|
||||
|
||||
##
|
||||
## Heuristic Alerts
|
||||
##
|
||||
|
||||
# With this option clamav will try to detect broken executables (both PE and
|
||||
# ELF) and alert on them with the Broken.Executable heuristic signature.
|
||||
# Default: no
|
||||
#AlertBrokenExecutables yes
|
||||
|
||||
# With this option clamav will try to detect broken media file (JPEG,
|
||||
# TIFF, PNG, GIF) and alert on them with a Broken.Media heuristic signature.
|
||||
# Default: no
|
||||
#AlertBrokenMedia yes
|
||||
|
||||
# Alert on encrypted archives _and_ documents with heuristic signature
|
||||
# (encrypted .zip, .7zip, .rar, .pdf).
|
||||
# Default: no
|
||||
#AlertEncrypted yes
|
||||
|
||||
# Alert on encrypted archives with heuristic signature (encrypted .zip, .7zip,
|
||||
# .rar).
|
||||
# Default: no
|
||||
#AlertEncryptedArchive yes
|
||||
|
||||
# Alert on encrypted archives with heuristic signature (encrypted .pdf).
|
||||
# Default: no
|
||||
#AlertEncryptedDoc yes
|
||||
|
||||
# With this option enabled OLE2 files containing VBA macros, which were not
|
||||
# detected by signatures will be marked as "Heuristics.OLE2.ContainsMacros".
|
||||
# Default: no
|
||||
#AlertOLE2Macros yes
|
||||
|
||||
# Alert on SSL mismatches in URLs, even if the URL isn't in the database.
|
||||
# This can lead to false positives.
|
||||
# Default: no
|
||||
#AlertPhishingSSLMismatch yes
|
||||
|
||||
# Alert on cloaked URLs, even if URL isn't in database.
|
||||
# This can lead to false positives.
|
||||
# Default: no
|
||||
#AlertPhishingCloak yes
|
||||
|
||||
# Alert on raw DMG image files containing partition intersections
|
||||
# Default: no
|
||||
#AlertPartitionIntersection yes
|
||||
|
||||
|
||||
##
|
||||
## Executable files
|
||||
##
|
||||
|
||||
# PE stands for Portable Executable - it's an executable file format used
|
||||
# in all 32 and 64-bit versions of Windows operating systems. This option
|
||||
# allows ClamAV to perform a deeper analysis of executable files and it's also
|
||||
# required for decompression of popular executable packers such as UPX, FSG,
|
||||
# and Petite. If you turn off this option, the original files will still be
|
||||
# scanned, but without additional processing.
|
||||
# Default: yes
|
||||
#ScanPE yes
|
||||
|
||||
# Certain PE files contain an authenticode signature. By default, we check
|
||||
# the signature chain in the PE file against a database of trusted and
|
||||
# revoked certificates if the file being scanned is marked as a virus.
|
||||
# If any certificate in the chain validates against any trusted root, but
|
||||
# does not match any revoked certificate, the file is marked as trusted.
|
||||
# If the file does match a revoked certificate, the file is marked as virus.
|
||||
# The following setting completely turns off authenticode verification.
|
||||
# Default: no
|
||||
#DisableCertCheck yes
|
||||
|
||||
# Executable and Linking Format is a standard format for UN*X executables.
|
||||
# This option allows you to control the scanning of ELF files.
|
||||
# If you turn off this option, the original files will still be scanned, but
|
||||
# without additional processing.
|
||||
# Default: yes
|
||||
#ScanELF yes
|
||||
|
||||
|
||||
##
|
||||
## Documents
|
||||
##
|
||||
|
||||
# This option enables scanning of OLE2 files, such as Microsoft Office
|
||||
# documents and .msi files.
|
||||
# If you turn off this option, the original files will still be scanned, but
|
||||
# without additional processing.
|
||||
# Default: yes
|
||||
#ScanOLE2 yes
|
||||
|
||||
# This option enables scanning within PDF files.
|
||||
# If you turn off this option, the original files will still be scanned, but
|
||||
# without decoding and additional processing.
|
||||
# Default: yes
|
||||
#ScanPDF yes
|
||||
|
||||
# This option enables scanning within SWF files.
|
||||
# If you turn off this option, the original files will still be scanned, but
|
||||
# without decoding and additional processing.
|
||||
# Default: yes
|
||||
#ScanSWF yes
|
||||
|
||||
# This option enables scanning xml-based document files supported by libclamav.
|
||||
# If you turn off this option, the original files will still be scanned, but
|
||||
# without additional processing.
|
||||
# Default: yes
|
||||
#ScanXMLDOCS yes
|
||||
|
||||
# This option enables scanning of HWP3 files.
|
||||
# If you turn off this option, the original files will still be scanned, but
|
||||
# without additional processing.
|
||||
# Default: yes
|
||||
#ScanHWP3 yes
|
||||
|
||||
|
||||
##
|
||||
## Mail files
|
||||
##
|
||||
|
||||
# Enable internal e-mail scanner.
|
||||
# If you turn off this option, the original files will still be scanned, but
|
||||
# without parsing individual messages/attachments.
|
||||
# Default: yes
|
||||
#ScanMail yes
|
||||
|
||||
# Scan RFC1341 messages split over many emails.
|
||||
# You will need to periodically clean up $TemporaryDirectory/clamav-partial
|
||||
# directory.
|
||||
# WARNING: This option may open your system to a DoS attack.
|
||||
# Never use it on loaded servers.
|
||||
# Default: no
|
||||
#ScanPartialMessages yes
|
||||
|
||||
# With this option enabled ClamAV will try to detect phishing attempts by using
|
||||
# HTML.Phishing and Email.Phishing NDB signatures.
|
||||
# Default: yes
|
||||
#PhishingSignatures no
|
||||
|
||||
# With this option enabled ClamAV will try to detect phishing attempts by
|
||||
# analyzing URLs found in emails using WDB and PDB signature databases.
|
||||
# Default: yes
|
||||
#PhishingScanURLs no
|
||||
|
||||
|
||||
##
|
||||
## Data Loss Prevention (DLP)
|
||||
##
|
||||
|
||||
# Enable the DLP module
|
||||
# Default: No
|
||||
#StructuredDataDetection yes
|
||||
|
||||
# This option sets the lowest number of Credit Card numbers found in a file
|
||||
# to generate a detect.
|
||||
# Default: 3
|
||||
#StructuredMinCreditCardCount 5
|
||||
|
||||
# With this option enabled the DLP module will search for valid Credit Card
|
||||
# numbers only. Debit and Private Label cards will not be searched.
|
||||
# Default: no
|
||||
#StructuredCCOnly yes
|
||||
|
||||
# This option sets the lowest number of Social Security Numbers found
|
||||
# in a file to generate a detect.
|
||||
# Default: 3
|
||||
#StructuredMinSSNCount 5
|
||||
|
||||
# With this option enabled the DLP module will search for valid
|
||||
# SSNs formatted as xxx-yy-zzzz
|
||||
# Default: yes
|
||||
#StructuredSSNFormatNormal yes
|
||||
|
||||
# With this option enabled the DLP module will search for valid
|
||||
# SSNs formatted as xxxyyzzzz
|
||||
# Default: no
|
||||
#StructuredSSNFormatStripped yes
|
||||
|
||||
|
||||
##
|
||||
## HTML
|
||||
##
|
||||
|
||||
# Perform HTML normalisation and decryption of MS Script Encoder code.
|
||||
# Default: yes
|
||||
# If you turn off this option, the original files will still be scanned, but
|
||||
# without additional processing.
|
||||
#ScanHTML yes
|
||||
|
||||
|
||||
##
|
||||
## Archives
|
||||
##
|
||||
|
||||
# ClamAV can scan within archives and compressed files.
|
||||
# If you turn off this option, the original files will still be scanned, but
|
||||
# without unpacking and additional processing.
|
||||
# Default: yes
|
||||
#ScanArchive yes
|
||||
|
||||
|
||||
##
|
||||
## Limits
|
||||
##
|
||||
|
||||
# The options below protect your system against Denial of Service attacks
|
||||
# using archive bombs.
|
||||
|
||||
# This option sets the maximum amount of time to a scan may take.
|
||||
# In this version, this field only affects the scan time of ZIP archives.
|
||||
# Value of 0 disables the limit.
|
||||
# Note: disabling this limit or setting it too high may result allow scanning
|
||||
# of certain files to lock up the scanning process/threads resulting in a
|
||||
# Denial of Service.
|
||||
# Time is in milliseconds.
|
||||
# Default: 120000
|
||||
#MaxScanTime 300000
|
||||
|
||||
# This option sets the maximum amount of data to be scanned for each input
|
||||
# file. Archives and other containers are recursively extracted and scanned
|
||||
# up to this value.
|
||||
# Value of 0 disables the limit
|
||||
# Note: disabling this limit or setting it too high may result in severe damage
|
||||
# to the system.
|
||||
# Default: 400M
|
||||
#MaxScanSize 1000M
|
||||
|
||||
# Files larger than this limit won't be scanned. Affects the input file itself
|
||||
# as well as files contained inside it (when the input file is an archive, a
|
||||
# document or some other kind of container).
|
||||
# Value of 0 disables the limit.
|
||||
# Note: disabling this limit or setting it too high may result in severe damage
|
||||
# to the system.
|
||||
# Technical design limitations prevent ClamAV from scanning files greater than
|
||||
# 2 GB at this time.
|
||||
# Default: 100M
|
||||
#MaxFileSize 400M
|
||||
|
||||
# Nested archives are scanned recursively, e.g. if a Zip archive contains a RAR
|
||||
# file, all files within it will also be scanned. This options specifies how
|
||||
# deeply the process should be continued.
|
||||
# Note: setting this limit too high may result in severe damage to the system.
|
||||
# Default: 17
|
||||
#MaxRecursion 10
|
||||
|
||||
# Number of files to be scanned within an archive, a document, or any other
|
||||
# container file.
|
||||
# Value of 0 disables the limit.
|
||||
# Note: disabling this limit or setting it too high may result in severe damage
|
||||
# to the system.
|
||||
# Default: 10000
|
||||
#MaxFiles 15000
|
||||
|
||||
# Maximum size of a file to check for embedded PE. Files larger than this value
|
||||
# will skip the additional analysis step.
|
||||
# Note: disabling this limit or setting it too high may result in severe damage
|
||||
# to the system.
|
||||
# Default: 40M
|
||||
#MaxEmbeddedPE 100M
|
||||
|
||||
# Maximum size of a HTML file to normalize. HTML files larger than this value
|
||||
# will not be normalized or scanned.
|
||||
# Note: disabling this limit or setting it too high may result in severe damage
|
||||
# to the system.
|
||||
# Default: 40M
|
||||
#MaxHTMLNormalize 100M
|
||||
|
||||
# Maximum size of a normalized HTML file to scan. HTML files larger than this
|
||||
# value after normalization will not be scanned.
|
||||
# Note: disabling this limit or setting it too high may result in severe damage
|
||||
# to the system.
|
||||
# Default: 8M
|
||||
#MaxHTMLNoTags 16M
|
||||
|
||||
# Maximum size of a script file to normalize. Script content larger than this
|
||||
# value will not be normalized or scanned.
|
||||
# Note: disabling this limit or setting it too high may result in severe damage
|
||||
# to the system.
|
||||
# Default: 20M
|
||||
#MaxScriptNormalize 50M
|
||||
|
||||
# Maximum size of a ZIP file to reanalyze type recognition. ZIP files larger
|
||||
# than this value will skip the step to potentially reanalyze as PE.
|
||||
# Note: disabling this limit or setting it too high may result in severe damage
|
||||
# to the system.
|
||||
# Default: 1M
|
||||
#MaxZipTypeRcg 1M
|
||||
|
||||
# This option sets the maximum number of partitions of a raw disk image to be
|
||||
# scanned.
|
||||
# Raw disk images with more partitions than this value will have up to
|
||||
# the value number partitions scanned. Negative values are not allowed.
|
||||
# Note: setting this limit too high may result in severe damage or impact
|
||||
# performance.
|
||||
# Default: 50
|
||||
#MaxPartitions 128
|
||||
|
||||
# This option sets the maximum number of icons within a PE to be scanned.
|
||||
# PE files with more icons than this value will have up to the value number
|
||||
# icons scanned.
|
||||
# Negative values are not allowed.
|
||||
# WARNING: setting this limit too high may result in severe damage or impact
|
||||
# performance.
|
||||
# Default: 100
|
||||
#MaxIconsPE 200
|
||||
|
||||
# This option sets the maximum recursive calls for HWP3 parsing during
|
||||
# scanning. HWP3 files using more than this limit will be terminated and
|
||||
# alert the user.
|
||||
# Scans will be unable to scan any HWP3 attachments if the recursive limit
|
||||
# is reached.
|
||||
# Negative values are not allowed.
|
||||
# WARNING: setting this limit too high may result in severe damage or impact
|
||||
# performance.
|
||||
# Default: 16
|
||||
#MaxRecHWP3 16
|
||||
|
||||
# This option sets the maximum calls to the PCRE match function during
|
||||
# an instance of regex matching.
|
||||
# Instances using more than this limit will be terminated and alert the user
|
||||
# but the scan will continue.
|
||||
# For more information on match_limit, see the PCRE documentation.
|
||||
# Negative values are not allowed.
|
||||
# WARNING: setting this limit too high may severely impact performance.
|
||||
# Default: 100000
|
||||
#PCREMatchLimit 20000
|
||||
|
||||
# This option sets the maximum recursive calls to the PCRE match function
|
||||
# during an instance of regex matching.
|
||||
# Instances using more than this limit will be terminated and alert the user
|
||||
# but the scan will continue.
|
||||
# For more information on match_limit_recursion, see the PCRE documentation.
|
||||
# Negative values are not allowed and values > PCREMatchLimit are superfluous.
|
||||
# WARNING: setting this limit too high may severely impact performance.
|
||||
# Default: 2000
|
||||
#PCRERecMatchLimit 10000
|
||||
|
||||
# This option sets the maximum filesize for which PCRE subsigs will be
|
||||
# executed. Files exceeding this limit will not have PCRE subsigs executed
|
||||
# unless a subsig is encompassed to a smaller buffer.
|
||||
# Negative values are not allowed.
|
||||
# Setting this value to zero disables the limit.
|
||||
# WARNING: setting this limit too high or disabling it may severely impact
|
||||
# performance.
|
||||
# Default: 100M
|
||||
#PCREMaxFileSize 400M
|
||||
|
||||
# When AlertExceedsMax is set, files exceeding the MaxFileSize, MaxScanSize, or
|
||||
# MaxRecursion limit will be flagged with the virus name starting with
|
||||
# "Heuristics.Limits.Exceeded".
|
||||
# Default: no
|
||||
#AlertExceedsMax yes
|
||||
|
||||
##
|
||||
## On-access Scan Settings
|
||||
##
|
||||
|
||||
# Don't scan files larger than OnAccessMaxFileSize
|
||||
# Value of 0 disables the limit.
|
||||
# Default: 5M
|
||||
#OnAccessMaxFileSize 10M
|
||||
|
||||
# Max number of scanning threads to allocate to the OnAccess thread pool at
|
||||
# startup. These threads are the ones responsible for creating a connection
|
||||
# with the daemon and kicking off scanning after an event has been processed.
|
||||
# To prevent clamonacc from consuming all clamd's resources keep this lower
|
||||
# than clamd's max threads.
|
||||
# Default: 5
|
||||
#OnAccessMaxThreads 10
|
||||
|
||||
# Max amount of time (in milliseconds) that the OnAccess client should spend
|
||||
# for every connect, send, and recieve attempt when communicating with clamd
|
||||
# via curl.
|
||||
# Default: 5000 (5 seconds)
|
||||
# OnAccessCurlTimeout 10000
|
||||
|
||||
# Toggles dynamic directory determination. Allows for recursively watching
|
||||
# include paths.
|
||||
# Default: no
|
||||
#OnAccessDisableDDD yes
|
||||
|
||||
# Set the include paths (all files inside them will be scanned). You can have
|
||||
# multiple OnAccessIncludePath directives but each directory must be added
|
||||
# in a separate line.
|
||||
# Default: disabled
|
||||
#OnAccessIncludePath /home
|
||||
#OnAccessIncludePath /students
|
||||
|
||||
# Set the exclude paths. All subdirectories are also excluded.
|
||||
# Default: disabled
|
||||
#OnAccessExcludePath /home/user
|
||||
|
||||
# Modifies fanotify blocking behaviour when handling permission events.
|
||||
# If off, fanotify will only notify if the file scanned is a virus,
|
||||
# and not perform any blocking.
|
||||
# Default: no
|
||||
#OnAccessPrevention yes
|
||||
|
||||
# When using prevention, if this option is turned on, any errors that occur
|
||||
# during scanning will result in the event attempt being denied. This could
|
||||
# potentially lead to unwanted system behaviour with certain configurations,
|
||||
# so the client defaults this to off and prefers allowing access events in
|
||||
# case of scan or connection error.
|
||||
# Default: no
|
||||
#OnAccessDenyOnError yes
|
||||
|
||||
# Toggles extra scanning and notifications when a file or directory is
|
||||
# created or moved.
|
||||
# Requires the DDD system to kick-off extra scans.
|
||||
# Default: no
|
||||
#OnAccessExtraScanning yes
|
||||
|
||||
# Set the mount point to be scanned. The mount point specified, or the mount
|
||||
# point containing the specified directory will be watched. If any directories
|
||||
# are specified, this option will preempt (disable and ignore all options
|
||||
# related to) the DDD system. This option will result in verdicts only.
|
||||
# Note that prevention is explicitly disallowed to prevent common, fatal
|
||||
# misconfigurations. (e.g. watching "/" with prevention on and no exclusions
|
||||
# made on vital system directories)
|
||||
# It can be used multiple times.
|
||||
# Default: disabled
|
||||
#OnAccessMountPath /
|
||||
#OnAccessMountPath /home/user
|
||||
|
||||
# With this option you can exclude the root UID (0). Processes run under
|
||||
# root with be able to access all files without triggering scans or
|
||||
# permission denied events.
|
||||
# Note that if clamd cannot check the uid of the process that generated an
|
||||
# on-access scan event (e.g., because OnAccessPrevention was not enabled, and
|
||||
# the process already exited), clamd will perform a scan. Thus, setting
|
||||
# OnAccessExcludeRootUID is not *guaranteed* to prevent every access by the
|
||||
# root user from triggering a scan (unless OnAccessPrevention is enabled).
|
||||
# Default: no
|
||||
#OnAccessExcludeRootUID no
|
||||
|
||||
# With this option you can exclude specific UIDs. Processes with these UIDs
|
||||
# will be able to access all files without triggering scans or permission
|
||||
# denied events.
|
||||
# This option can be used multiple times (one per line).
|
||||
# Using a value of 0 on any line will disable this option entirely.
|
||||
# To exclude the root UID (0) please enable the OnAccessExcludeRootUID
|
||||
# option.
|
||||
# Also note that if clamd cannot check the uid of the process that generated an
|
||||
# on-access scan event (e.g., because OnAccessPrevention was not enabled, and
|
||||
# the process already exited), clamd will perform a scan. Thus, setting
|
||||
# OnAccessExcludeUID is not *guaranteed* to prevent every access by the
|
||||
# specified uid from triggering a scan (unless OnAccessPrevention is enabled).
|
||||
# Default: disabled
|
||||
#OnAccessExcludeUID -1
|
||||
|
||||
# This option allows exclusions via user names when using the on-access
|
||||
# scanning client. It can be used multiple times.
|
||||
# It has the same potential race condition limitations of the
|
||||
# OnAccessExcludeUID option.
|
||||
# Default: disabled
|
||||
#OnAccessExcludeUname clamav
|
||||
|
||||
# Number of times the OnAccess client will retry a failed scan due to
|
||||
# connection problems (or other issues).
|
||||
# Default: 0
|
||||
#OnAccessRetryAttempts 3
|
||||
|
||||
##
|
||||
## Bytecode
|
||||
##
|
||||
|
||||
# With this option enabled ClamAV will load bytecode from the database.
|
||||
# It is highly recommended you keep this option on, otherwise you'll miss
|
||||
# detections for many new viruses.
|
||||
# Default: yes
|
||||
#Bytecode yes
|
||||
|
||||
# Set bytecode security level.
|
||||
# Possible values:
|
||||
# None - No security at all, meant for debugging.
|
||||
# DO NOT USE THIS ON PRODUCTION SYSTEMS.
|
||||
# This value is only available if clamav was built
|
||||
# with --enable-debug!
|
||||
# TrustSigned - Trust bytecode loaded from signed .c[lv]d files, insert
|
||||
# runtime safety checks for bytecode loaded from other sources.
|
||||
# Paranoid - Don't trust any bytecode, insert runtime checks for all.
|
||||
# Recommended: TrustSigned, because bytecode in .cvd files already has these
|
||||
# checks.
|
||||
# Note that by default only signed bytecode is loaded, currently you can only
|
||||
# load unsigned bytecode in --enable-debug mode.
|
||||
#
|
||||
# Default: TrustSigned
|
||||
#BytecodeSecurity TrustSigned
|
||||
|
||||
# Allow loading bytecode from outside digitally signed .c[lv]d files.
|
||||
# **Caution**: You should NEVER run bytecode signatures from untrusted sources.
|
||||
# Doing so may result in arbitrary code execution.
|
||||
# Default: no
|
||||
#BytecodeUnsigned yes
|
||||
|
||||
# Set bytecode timeout in milliseconds.
|
||||
#
|
||||
# Default: 5000
|
||||
# BytecodeTimeout 1000
|
|
@ -1,204 +0,0 @@
|
|||
##
|
||||
## Example config file for freshclam
|
||||
## Please read the freshclam.conf(5) manual before editing this file.
|
||||
##
|
||||
|
||||
|
||||
# Comment or remove the line below.
|
||||
# Example
|
||||
|
||||
# Path to the database directory.
|
||||
# WARNING: It must match clamd.conf's directive!
|
||||
# Default: hardcoded (depends on installation options)
|
||||
#DatabaseDirectory /var/lib/clamav
|
||||
|
||||
# Path to the log file (make sure it has proper permissions)
|
||||
# Default: disabled
|
||||
#UpdateLogFile /var/log/freshclam.log
|
||||
|
||||
# Maximum size of the log file.
|
||||
# Value of 0 disables the limit.
|
||||
# You may use 'M' or 'm' for megabytes (1M = 1m = 1048576 bytes)
|
||||
# and 'K' or 'k' for kilobytes (1K = 1k = 1024 bytes).
|
||||
# in bytes just don't use modifiers. If LogFileMaxSize is enabled,
|
||||
# log rotation (the LogRotate option) will always be enabled.
|
||||
# Default: 1M
|
||||
#LogFileMaxSize 2M
|
||||
|
||||
# Log time with each message.
|
||||
# Default: no
|
||||
#LogTime yes
|
||||
|
||||
# Enable verbose logging.
|
||||
# Default: no
|
||||
#LogVerbose yes
|
||||
|
||||
# Use system logger (can work together with UpdateLogFile).
|
||||
# Default: no
|
||||
#LogSyslog yes
|
||||
|
||||
# Specify the type of syslog messages - please refer to 'man syslog'
|
||||
# for facility names.
|
||||
# Default: LOG_LOCAL6
|
||||
#LogFacility LOG_MAIL
|
||||
|
||||
# Enable log rotation. Always enabled when LogFileMaxSize is enabled.
|
||||
# Default: no
|
||||
#LogRotate yes
|
||||
|
||||
# This option allows you to save the process identifier of the daemon
|
||||
# This file will be owned by root, as long as freshclam was started by root.
|
||||
# It is recommended that the directory where this file is stored is
|
||||
# also owned by root to keep other users from tampering with it.
|
||||
# Default: disabled
|
||||
#PidFile /var/run/freshclam.pid
|
||||
|
||||
# By default when started freshclam drops privileges and switches to the
|
||||
# "clamav" user. This directive allows you to change the database owner.
|
||||
# Default: clamav (may depend on installation options)
|
||||
#DatabaseOwner clamav
|
||||
|
||||
# Use DNS to verify virus database version. FreshClam uses DNS TXT records
|
||||
# to verify database and software versions. With this directive you can change
|
||||
# the database verification domain.
|
||||
# WARNING: Do not touch it unless you're configuring freshclam to use your
|
||||
# own database verification domain.
|
||||
# Default: current.cvd.clamav.net
|
||||
#DNSDatabaseInfo current.cvd.clamav.net
|
||||
|
||||
# database.clamav.net is now the primary domain name to be used world-wide.
|
||||
# Now that CloudFlare is being used as our Content Delivery Network (CDN),
|
||||
# this one domain name works world-wide to direct freshclam to the closest
|
||||
# geographic endpoint.
|
||||
# If the old db.XY.clamav.net domains are set, freshclam will automatically
|
||||
# use database.clamav.net instead.
|
||||
DatabaseMirror database.clamav.net
|
||||
|
||||
# How many attempts to make before giving up.
|
||||
# Default: 3 (per mirror)
|
||||
#MaxAttempts 5
|
||||
|
||||
# With this option you can control scripted updates. It's highly recommended
|
||||
# to keep it enabled.
|
||||
# Default: yes
|
||||
#ScriptedUpdates yes
|
||||
|
||||
# By default freshclam will keep the local databases (.cld) uncompressed to
|
||||
# make their handling faster. With this option you can enable the compression;
|
||||
# the change will take effect with the next database update.
|
||||
# Default: no
|
||||
#CompressLocalDatabase no
|
||||
|
||||
# With this option you can provide custom sources for database files.
|
||||
# This option can be used multiple times. Support for:
|
||||
# http(s)://, ftp(s)://, or file://
|
||||
# Default: no custom URLs
|
||||
#DatabaseCustomURL http://myserver.example.com/mysigs.ndb
|
||||
#DatabaseCustomURL https://myserver.example.com/mysigs.ndb
|
||||
#DatabaseCustomURL https://myserver.example.com:4567/allow_list.wdb
|
||||
#DatabaseCustomURL ftp://myserver.example.com/example.ldb
|
||||
#DatabaseCustomURL ftps://myserver.example.com:4567/example.ndb
|
||||
#DatabaseCustomURL file:///mnt/nfs/local.hdb
|
||||
|
||||
# This option allows you to easily point freshclam to private mirrors.
|
||||
# If PrivateMirror is set, freshclam does not attempt to use DNS
|
||||
# to determine whether its databases are out-of-date, instead it will
|
||||
# use the If-Modified-Since request or directly check the headers of the
|
||||
# remote database files. For each database, freshclam first attempts
|
||||
# to download the CLD file. If that fails, it tries to download the
|
||||
# CVD file. This option overrides DatabaseMirror, DNSDatabaseInfo
|
||||
# and ScriptedUpdates. It can be used multiple times to provide
|
||||
# fall-back mirrors.
|
||||
# Default: disabled
|
||||
#PrivateMirror mirror1.example.com
|
||||
#PrivateMirror mirror2.example.com
|
||||
|
||||
# Number of database checks per day.
|
||||
# Default: 12 (every two hours)
|
||||
Checks 1
|
||||
|
||||
# Proxy settings
|
||||
# The HTTPProxyServer may be prefixed with [scheme]:// to specify which kind
|
||||
# of proxy is used.
|
||||
# http:// HTTP Proxy. Default when no scheme or proxy type is specified.
|
||||
# https:// HTTPS Proxy. (Added in 7.52.0 for OpenSSL, GnuTLS and NSS)
|
||||
# socks4:// SOCKS4 Proxy.
|
||||
# socks4a:// SOCKS4a Proxy. Proxy resolves URL hostname.
|
||||
# socks5:// SOCKS5 Proxy.
|
||||
# socks5h:// SOCKS5 Proxy. Proxy resolves URL hostname.
|
||||
# Default: disabled
|
||||
#HTTPProxyServer https://proxy.example.com
|
||||
#HTTPProxyPort 1234
|
||||
#HTTPProxyUsername myusername
|
||||
#HTTPProxyPassword mypass
|
||||
|
||||
# If your servers are behind a firewall/proxy which applies User-Agent
|
||||
# filtering you can use this option to force the use of a different
|
||||
# User-Agent header.
|
||||
# As of ClamAV 0.103.3, this setting may not be used when updating from the
|
||||
# clamav.net CDN and can only be used when updating from a private mirror.
|
||||
# Default: clamav/version_number (OS: ..., ARCH: ..., CPU: ..., UUID: ...)
|
||||
#HTTPUserAgent SomeUserAgentIdString
|
||||
|
||||
# Use aaa.bbb.ccc.ddd as client address for downloading databases. Useful for
|
||||
# multi-homed systems.
|
||||
# Default: Use OS'es default outgoing IP address.
|
||||
#LocalIPAddress aaa.bbb.ccc.ddd
|
||||
|
||||
# Send the RELOAD command to clamd.
|
||||
# Default: no
|
||||
NotifyClamd /usr/local/etc/clamav/clamd.conf
|
||||
|
||||
# Run command after successful database update.
|
||||
# Use EXIT_1 to return 1 after successful database update.
|
||||
# Default: disabled
|
||||
#OnUpdateExecute command
|
||||
|
||||
# Run command when database update process fails.
|
||||
# Default: disabled
|
||||
#OnErrorExecute command
|
||||
|
||||
# Run command when freshclam reports outdated version.
|
||||
# In the command string %v will be replaced by the new version number.
|
||||
# Default: disabled
|
||||
#OnOutdatedExecute command
|
||||
|
||||
# Don't fork into background.
|
||||
# Default: no
|
||||
#Foreground yes
|
||||
|
||||
# Enable debug messages in libclamav.
|
||||
# Default: no
|
||||
#Debug yes
|
||||
|
||||
# Timeout in seconds when connecting to database server.
|
||||
# Default: 30
|
||||
#ConnectTimeout 60
|
||||
|
||||
# Timeout in seconds when reading from database server. 0 means no timeout.
|
||||
# Default: 60
|
||||
#ReceiveTimeout 300
|
||||
|
||||
# With this option enabled, freshclam will attempt to load new databases into
|
||||
# memory to make sure they are properly handled by libclamav before replacing
|
||||
# the old ones.
|
||||
# Tip: This feature uses a lot of RAM. If your system has limited RAM and you
|
||||
# are actively running ClamD or ClamScan during the update, then you may need
|
||||
# to set `TestDatabases no`.
|
||||
# Default: yes
|
||||
#TestDatabases no
|
||||
|
||||
# This option enables downloading of bytecode.cvd, which includes additional
|
||||
# detection mechanisms and improvements to the ClamAV engine.
|
||||
# Default: yes
|
||||
#Bytecode no
|
||||
|
||||
# Include an optional signature databases (opt-in).
|
||||
# This option can be used multiple times.
|
||||
#ExtraDatabase dbname1
|
||||
#ExtraDatabase dbname2
|
||||
|
||||
# Exclude a standard signature database (opt-out).
|
||||
# This option can be used multiple times.
|
||||
#ExcludeDatabase dbname1
|
||||
#ExcludeDatabase dbname2
|
|
@ -1,3 +0,0 @@
|
|||
# TODO
|
||||
|
||||
Configure this sftpgo.json file and copy over to `/usr/local/etc/sftpgo/sftpgo.json` (macOS)
|
|
@ -1,5 +0,0 @@
|
|||
WARNING! Authorized use only. Your IP address has been logged.
|
||||
|
||||
If you choose to ignore this warning and discover a vulnerability
|
||||
that you can explain how to remediate, then please contact brian@megabyte.space
|
||||
for a bounty.
|
|
@ -186,7 +186,7 @@ ensureBasicDeps() {
|
|||
### This temporary file prompts the 'softwareupdate' utility to list the Command Line Tools
|
||||
touch /tmp/.com.apple.dt.CommandLineTools.installondemand.in-progress;
|
||||
XCODE_PKG="$(softwareupdate -l | grep "\*.*Command Line" | tail -n 1 | sed 's/^[^C]* //')"
|
||||
logg info "Installing from softwareupdate" && softwareupdate -i "$XCODE_PKG" && logg success "Successfully installed $XCODE_PKG"
|
||||
logg info "Installing from softwareupdate" && softwareupdate -i "$XCODE_PKG" && gum log -sl info "Successfully installed $XCODE_PKG"
|
||||
fi
|
||||
if /usr/bin/pgrep -q oahd; then
|
||||
logg info 'Rosetta 2 is already installed'
|
||||
|
@ -365,7 +365,7 @@ ensureFullDiskAccess() {
|
|||
fi
|
||||
exit 0
|
||||
else
|
||||
logg success 'Current terminal has full disk access'
|
||||
gum log -sl info 'Current terminal has full disk access'
|
||||
if [ -f "$HOME/.zshrc" ]; then
|
||||
if command -v gsed > /dev/null; then
|
||||
sudo gsed -i '/# TEMPORARY FOR INSTALL DOCTOR MACOS/d' "$HOME/.zshrc" || logg warn "Failed to remove kickstart script from .zshrc"
|
||||
|
@ -396,7 +396,7 @@ importCloudFlareCert() {
|
|||
security verify-cert -c "$CRT_TMP" > /dev/null 2>&1
|
||||
if [ $? != 0 ]; then
|
||||
logg info '**macOS Manual Security Permission** Requesting security authorization for Cloudflare trusted certificate'
|
||||
sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain "$CRT_TMP" && logg success 'Successfully imported Cloudflare_CA.crt into System.keychain'
|
||||
sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain "$CRT_TMP" && gum log -sl info 'Successfully imported Cloudflare_CA.crt into System.keychain'
|
||||
fi
|
||||
|
||||
### Remove temporary file, if necessary
|
||||
|
@ -427,7 +427,7 @@ setCIEnvironmentVariables() {
|
|||
ensureWarpDisconnected() {
|
||||
if command -v warp-cli > /dev/null; then
|
||||
if warp-cli status | grep 'Connected' > /dev/null; then
|
||||
logg info "Disconnecting from WARP" && warp-cli disconnect && logg success "Disconnected WARP to prevent conflicts"
|
||||
logg info "Disconnecting from WARP" && warp-cli disconnect && gum log -sl info "Disconnected WARP to prevent conflicts"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
@ -711,7 +711,7 @@ runChezmoi() {
|
|||
if [ -n "$CHEZMOI_EXIT_CODE" ]; then
|
||||
logg error "Chezmoi encountered an error and exitted with an exit code of $CHEZMOI_EXIT_CODE"
|
||||
else
|
||||
logg success 'Finished provisioning the system'
|
||||
gum log -sl info 'Finished provisioning the system'
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -726,7 +726,7 @@ removePasswordlessSudo() {
|
|||
|
||||
# @description Render the `docs/terminal/post-install.md` file to the terminal at the end of the provisioning process
|
||||
postProvision() {
|
||||
logg success 'Provisioning complete!'
|
||||
gum log -sl info 'Provisioning complete!'
|
||||
if command -v glow > /dev/null && [ -f "${XDG_DATA_HOME:-$HOME/.local/share}/chezmoi/docs/terminal/post-install.md" ]; then
|
||||
glow "${XDG_DATA_HOME:-$HOME/.local/share}/chezmoi/docs/terminal/post-install.md"
|
||||
fi
|
||||
|
|
|
@ -153,7 +153,7 @@ ensureBasicDeps() {
|
|||
### This temporary file prompts the 'softwareupdate' utility to list the Command Line Tools
|
||||
touch /tmp/.com.apple.dt.CommandLineTools.installondemand.in-progress;
|
||||
XCODE_PKG="$(softwareupdate -l | grep "\*.*Command Line" | tail -n 1 | sed 's/^[^C]* //')"
|
||||
logg info "Installing from softwareupdate" && softwareupdate -i "$XCODE_PKG" && logg success "Successfully installed $XCODE_PKG"
|
||||
logg info "Installing from softwareupdate" && softwareupdate -i "$XCODE_PKG" && gum log -sl info "Successfully installed $XCODE_PKG"
|
||||
fi
|
||||
if /usr/bin/pgrep -q oahd; then
|
||||
logg info 'Rosetta 2 is already installed'
|
||||
|
|
|
@ -122,7 +122,7 @@ ensureBasicDeps() {
|
|||
### This temporary file prompts the 'softwareupdate' utility to list the Command Line Tools
|
||||
touch /tmp/.com.apple.dt.CommandLineTools.installondemand.in-progress;
|
||||
XCODE_PKG="$(softwareupdate -l | grep "\*.*Command Line" | tail -n 1 | sed 's/^[^C]* //')"
|
||||
logg info "Installing from softwareupdate" && softwareupdate -i "$XCODE_PKG" && logg success "Successfully installed $XCODE_PKG"
|
||||
logg info "Installing from softwareupdate" && softwareupdate -i "$XCODE_PKG" && gum log -sl info "Successfully installed $XCODE_PKG"
|
||||
fi
|
||||
if /usr/bin/pgrep -q oahd; then
|
||||
logg info 'Rosetta 2 is already installed'
|
||||
|
|
|
@ -36,7 +36,7 @@ ensureBasicDeps() {
|
|||
### This temporary file prompts the 'softwareupdate' utility to list the Command Line Tools
|
||||
touch /tmp/.com.apple.dt.CommandLineTools.installondemand.in-progress;
|
||||
XCODE_PKG="$(softwareupdate -l | grep "\*.*Command Line" | tail -n 1 | sed 's/^[^C]* //')"
|
||||
logg info "Installing from softwareupdate" && softwareupdate -i "$XCODE_PKG" && logg success "Successfully installed $XCODE_PKG"
|
||||
logg info "Installing from softwareupdate" && softwareupdate -i "$XCODE_PKG" && gum log -sl info "Successfully installed $XCODE_PKG"
|
||||
fi
|
||||
if /usr/bin/pgrep -q oahd; then
|
||||
logg info 'Rosetta 2 is already installed'
|
||||
|
|
|
@ -29,7 +29,7 @@ ensureFullDiskAccess() {
|
|||
fi
|
||||
exit 0
|
||||
else
|
||||
logg success 'Current terminal has full disk access'
|
||||
gum log -sl info 'Current terminal has full disk access'
|
||||
if [ -f "$HOME/.zshrc" ]; then
|
||||
if command -v gsed > /dev/null; then
|
||||
sudo gsed -i '/# TEMPORARY FOR INSTALL DOCTOR MACOS/d' "$HOME/.zshrc" || logg warn "Failed to remove kickstart script from .zshrc"
|
||||
|
|
|
@ -17,7 +17,7 @@ importCloudFlareCert() {
|
|||
security verify-cert -c "$CRT_TMP" > /dev/null 2>&1
|
||||
if [ $? != 0 ]; then
|
||||
logg info '**macOS Manual Security Permission** Requesting security authorization for Cloudflare trusted certificate'
|
||||
sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain "$CRT_TMP" && logg success 'Successfully imported Cloudflare_CA.crt into System.keychain'
|
||||
sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain "$CRT_TMP" && gum log -sl info 'Successfully imported Cloudflare_CA.crt into System.keychain'
|
||||
fi
|
||||
|
||||
### Remove temporary file, if necessary
|
||||
|
|
|
@ -186,7 +186,7 @@ ensureBasicDeps() {
|
|||
### This temporary file prompts the 'softwareupdate' utility to list the Command Line Tools
|
||||
touch /tmp/.com.apple.dt.CommandLineTools.installondemand.in-progress;
|
||||
XCODE_PKG="$(softwareupdate -l | grep "\*.*Command Line" | tail -n 1 | sed 's/^[^C]* //')"
|
||||
logg info "Installing from softwareupdate" && softwareupdate -i "$XCODE_PKG" && logg success "Successfully installed $XCODE_PKG"
|
||||
logg info "Installing from softwareupdate" && softwareupdate -i "$XCODE_PKG" && gum log -sl info "Successfully installed $XCODE_PKG"
|
||||
fi
|
||||
if /usr/bin/pgrep -q oahd; then
|
||||
logg info 'Rosetta 2 is already installed'
|
||||
|
@ -365,7 +365,7 @@ ensureFullDiskAccess() {
|
|||
fi
|
||||
exit 0
|
||||
else
|
||||
logg success 'Current terminal has full disk access'
|
||||
gum log -sl info 'Current terminal has full disk access'
|
||||
if [ -f "$HOME/.zshrc" ]; then
|
||||
if command -v gsed > /dev/null; then
|
||||
sudo gsed -i '/# TEMPORARY FOR INSTALL DOCTOR MACOS/d' "$HOME/.zshrc" || logg warn "Failed to remove kickstart script from .zshrc"
|
||||
|
@ -396,7 +396,7 @@ importCloudFlareCert() {
|
|||
security verify-cert -c "$CRT_TMP" > /dev/null 2>&1
|
||||
if [ $? != 0 ]; then
|
||||
logg info '**macOS Manual Security Permission** Requesting security authorization for Cloudflare trusted certificate'
|
||||
sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain "$CRT_TMP" && logg success 'Successfully imported Cloudflare_CA.crt into System.keychain'
|
||||
sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain "$CRT_TMP" && gum log -sl info 'Successfully imported Cloudflare_CA.crt into System.keychain'
|
||||
fi
|
||||
|
||||
### Remove temporary file, if necessary
|
||||
|
@ -428,7 +428,7 @@ ensureWarpDisconnected() {
|
|||
if [ -z "$DEBUG" ]; then
|
||||
if command -v warp-cli > /dev/null; then
|
||||
if warp-cli status | grep 'Connected' > /dev/null; then
|
||||
logg info "Disconnecting from WARP" && warp-cli disconnect && logg success "Disconnected WARP to prevent conflicts"
|
||||
logg info "Disconnecting from WARP" && warp-cli disconnect && gum log -sl info "Disconnected WARP to prevent conflicts"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
@ -713,7 +713,7 @@ runChezmoi() {
|
|||
if [ -n "$CHEZMOI_EXIT_CODE" ]; then
|
||||
logg error "Chezmoi encountered an error and exitted with an exit code of $CHEZMOI_EXIT_CODE"
|
||||
else
|
||||
logg success 'Finished provisioning the system'
|
||||
gum log -sl info 'Finished provisioning the system'
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -728,7 +728,7 @@ removePasswordlessSudo() {
|
|||
|
||||
# @description Render the `docs/terminal/post-install.md` file to the terminal at the end of the provisioning process
|
||||
postProvision() {
|
||||
logg success 'Provisioning complete!'
|
||||
gum log -sl info 'Provisioning complete!'
|
||||
if command -v glow > /dev/null && [ -f "${XDG_DATA_HOME:-$HOME/.local/share}/chezmoi/docs/terminal/post-install.md" ]; then
|
||||
glow "${XDG_DATA_HOME:-$HOME/.local/share}/chezmoi/docs/terminal/post-install.md"
|
||||
fi
|
||||
|
|
|
@ -2436,7 +2436,7 @@ softwarePackages:
|
|||
_github: https://github.com/docker/compose
|
||||
_home: https://www.docker.com/
|
||||
_name: Docker Desktop
|
||||
_post: "#!/usr/bin/env bash\n# @file DockerHub Login\n# @brief Logs into DockerHub for Docker Desktop\n# @description\n# This script logs into DockerHub so that Docker Desktop is pre-authenticated. This\n# functionality requires that the `DOCKERHUB_USER` be passed in as an environment variable (or \n# directly editted in the `~/.config/chezmoi/chezmoi.yaml` file) and that the `DOCKERHUB_TOKEN`\n# be passed in as a secret (either via the encrypted secret method or passed in as an environment\n# variable).\n\n### Login to DockerHub\nif command -v docker > /dev/null; then\n DOCKERHUB_TOKEN=\"{{ if (stat (joinPath .chezmoi.sourceDir \".chezmoitemplates\" \"secrets\" \"DOCKERHUB_TOKEN\")) }}{{- includeTemplate \"secrets/DOCKERHUB_TOKEN\" | decrypt | trim -}}{{ else }}{{- env \"DOCKERHUB_TOKEN\" -}}{{ end }}\"\n DOCKERHUB_USER=\"{{ .user.docker.username }}\"\n if [ -d \"/Applications/Docker.app\" ] || [ -d \"$HOME/Applications/Docker.app\" ]; then\n gum log -sl info 'Ensuring Docker.app is open' && open --background -a Docker --args --accept-license --unattended\n fi\n gum log -sl info 'Headlessly authenticating with DockerHub registry' && echo \"$DOCKERHUB_TOKEN\" | docker login -u \"$DOCKERHUB_USER\" --password-stdin > /dev/null && logg success 'Successfully authenticated with DockerHub registry'\nfi\n\n### Symlink on macOS\nif [ -f \"$HOME/Library/Containers/com.docker.docker/Data/docker.raw.sock\" ]; then\n gum log -sl info 'Symlinking /var/run/docker.sock to macOS Library location' && sudo ln -s \"$HOME/Library/Containers/com.docker.docker/Data/docker.raw.sock\" /var/run/docker.sock\nfi\n"
|
||||
_post: "#!/usr/bin/env bash\n# @file DockerHub Login\n# @brief Logs into DockerHub for Docker Desktop\n# @description\n# This script logs into DockerHub so that Docker Desktop is pre-authenticated. This\n# functionality requires that the `DOCKERHUB_USER` be passed in as an environment variable (or \n# directly editted in the `~/.config/chezmoi/chezmoi.yaml` file) and that the `DOCKERHUB_TOKEN`\n# be passed in as a secret (either via the encrypted secret method or passed in as an environment\n# variable).\n\n### Login to DockerHub\nif command -v docker > /dev/null; then\n DOCKERHUB_TOKEN=\"{{ if (stat (joinPath .chezmoi.sourceDir \".chezmoitemplates\" \"secrets\" \"DOCKERHUB_TOKEN\")) }}{{- includeTemplate \"secrets/DOCKERHUB_TOKEN\" | decrypt | trim -}}{{ else }}{{- env \"DOCKERHUB_TOKEN\" -}}{{ end }}\"\n DOCKERHUB_USER=\"{{ .user.docker.username }}\"\n if [ -d \"/Applications/Docker.app\" ] || [ -d \"$HOME/Applications/Docker.app\" ]; then\n gum log -sl info 'Ensuring Docker.app is open' && open --background -a Docker --args --accept-license --unattended\n fi\n gum log -sl info 'Headlessly authenticating with DockerHub registry' && echo \"$DOCKERHUB_TOKEN\" | docker login -u \"$DOCKERHUB_USER\" --password-stdin > /dev/null && gum log -sl info 'Successfully authenticated with DockerHub registry'\nfi\n\n### Symlink on macOS\nif [ -f \"$HOME/Library/Containers/com.docker.docker/Data/docker.raw.sock\" ]; then\n gum log -sl info 'Symlinking /var/run/docker.sock to macOS Library location' && sudo ln -s \"$HOME/Library/Containers/com.docker.docker/Data/docker.raw.sock\" /var/run/docker.sock\nfi\n"
|
||||
apt: https://desktop.docker.com/linux/main/amd64/docker-desktop-4.16.2-amd64.deb
|
||||
cask: docker
|
||||
choco: docker-desktop
|
||||
|
@ -2468,7 +2468,7 @@ softwarePackages:
|
|||
mkdir -p "${XDG_CONFIG_HOME:-$HOME/.config}/docker/cli-plugins"
|
||||
gum log -sl info 'Downloading Docker push-rm' && curl https://github.com/christian-korneck/docker-pushrm/releases/download/$RELEASE_TAG/docker-pushrm_darwin_amd64 -o "${XDG_CONFIG_HOME:-$HOME/.config}/docker/cli-plugins/docker-pushrm"
|
||||
chmod +x "${XDG_CONFIG_HOME:-$HOME/.config}/docker/cli-plugins/docker-pushrm"
|
||||
logg success 'Added Docker push-rm'
|
||||
gum log -sl info 'Added Docker push-rm'
|
||||
else
|
||||
gum log -sl info 'Docker push-rm already added'
|
||||
fi
|
||||
|
@ -2803,7 +2803,7 @@ softwarePackages:
|
|||
if [ -n "$CONFIGURE_EXIT_CODE" ]; then
|
||||
gum log -sl error 'Configuring endlessh service failed' && exit 1
|
||||
else
|
||||
logg success 'Successfully configured endlessh service'
|
||||
gum log -sl info 'Successfully configured endlessh service'
|
||||
fi
|
||||
elif [ -f /etc/endlessh.conf ]; then
|
||||
gum log -sl info 'Copying ~/.ssh/endlessh/config to /etc/endlessh.conf' && sudo cp -f "$HOME/.ssh/endlessh/config" /etc/endlessh.conf
|
||||
|
@ -2812,7 +2812,7 @@ softwarePackages:
|
|||
if [ -n "$CONFIGURE_EXIT_CODE" ]; then
|
||||
gum log -sl error 'Configuring endlessh service failed' && exit 1
|
||||
else
|
||||
logg success 'Successfully configured endlessh service'
|
||||
gum log -sl info 'Successfully configured endlessh service'
|
||||
fi
|
||||
else
|
||||
gum log -sl warn 'Neither the /etc/endlessh folder nor the /etc/endlessh.conf file exist'
|
||||
|
@ -3575,7 +3575,7 @@ softwarePackages:
|
|||
# rm -f profile.private.tar.gz.age
|
||||
# gum log -sl info 'Decompressing the Firefox private profile'
|
||||
# tar -xzf profile.private.tar.gz
|
||||
# logg success 'The Firefox private profile was successfully installed'
|
||||
# gum log -sl info 'The Firefox private profile was successfully installed'
|
||||
# cp -f "${XDG_CONFIG_HOME:-$HOME/.config}/firefox/user.js" "$SETTINGS_DIR/profile.private"
|
||||
# gum log -sl info 'Copied ~/.config/firefox/user.js to profile.private profile'
|
||||
# else
|
||||
|
@ -3616,7 +3616,7 @@ softwarePackages:
|
|||
# to the user profile.
|
||||
# gum log -sl info 'Unzipping '"$PLUGIN_FILENAME"' ('"$FIREFOX_PLUGIN"')'
|
||||
# unzip "$SETTINGS_DIR/$SETTINGS_PROFILE/extensions/$PLUGIN_FILENAME" -d "$SETTINGS_DIR/$SETTINGS_PROFILE/extensions/$PLUGIN_FOLDER"
|
||||
logg success 'Installed '"$FIREFOX_PLUGIN"''
|
||||
gum log -sl info 'Installed '"$FIREFOX_PLUGIN"''
|
||||
fi
|
||||
else
|
||||
gum log -sl warn 'A null Firefox add-on filename was detected for '"$FIREFOX_PLUGIN"''
|
||||
|
@ -4277,9 +4277,9 @@ softwarePackages:
|
|||
fi
|
||||
### Install / start the service
|
||||
gum log -sl info 'Configuring runner service'
|
||||
"${XDG_DATA_HOME:-$HOME/.local/share}/github-runner/svc.sh" install && logg success 'Successfully installed the GitHub Actions runner service'
|
||||
"${XDG_DATA_HOME:-$HOME/.local/share}/github-runner/svc.sh" install && gum log -sl info 'Successfully installed the GitHub Actions runner service'
|
||||
gum log -sl info 'Starting runner service'
|
||||
"${XDG_DATA_HOME:-$HOME/.local/share}/github-runner/svc.sh" start && logg success 'Started the GitHub Actions runner service'
|
||||
"${XDG_DATA_HOME:-$HOME/.local/share}/github-runner/svc.sh" start && gum log -sl info 'Started the GitHub Actions runner service'
|
||||
else
|
||||
gum log -sl warn 'jq is required by the GitHub runner configuration script'
|
||||
fi
|
||||
|
@ -8554,10 +8554,10 @@ softwarePackages:
|
|||
if command -v update-alternatives > /dev/null; then
|
||||
if [ -f "/usr/local/share/plymouth/themes/{{ .theme }}/{{ .theme }}.plymouth" ]; then
|
||||
sudo update-alternatives --install /usr/share/plymouth/themes/default.plymouth default.plymouth "/usr/local/share/plymouth/themes/{{ .theme }}/{{ .theme }}.plymouth" 100
|
||||
logg success 'Installed default.plymouth'
|
||||
gum log -sl info 'Installed default.plymouth'
|
||||
# Required sometimes
|
||||
sudo update-alternatives --set default.plymouth "/usr/local/share/plymouth/themes/{{ .theme }}/{{ .theme }}.plymouth"
|
||||
logg success 'Set default.plymouth'
|
||||
gum log -sl info 'Set default.plymouth'
|
||||
else
|
||||
gum log -sl warn "/usr/local/share/plymouth/themes/{{ .theme }}/{{ .theme }}.plymouth does not exist!"
|
||||
fi
|
||||
|
@ -8590,7 +8590,7 @@ softwarePackages:
|
|||
if [ -n "$EXIT_CODE" ]; then
|
||||
gum log -sl warn 'There may have been an issue while setting the Plymouth default theme with plymouth-set-default-theme'
|
||||
else
|
||||
logg success 'Set Plymouth default theme with plymouth-set-default-theme'
|
||||
gum log -sl info 'Set Plymouth default theme with plymouth-set-default-theme'
|
||||
fi
|
||||
else
|
||||
gum log -sl warn 'Could not apply default Plymouth theme because plymouth-set-default-theme is missing'
|
||||
|
@ -8601,7 +8601,7 @@ softwarePackages:
|
|||
if [ -f "/usr/local/share/plymouth/themes/{{ .theme }}/{{ .theme }}.plymouth" ]; then
|
||||
# Required sometimes
|
||||
sudo update-alternatives --set default.plymouth "/usr/local/share/plymouth/themes/{{ .theme }}/{{ .theme }}.plymouth"
|
||||
logg success 'Set default.plymouth (second time is required sometimes)'
|
||||
gum log -sl info 'Set default.plymouth (second time is required sometimes)'
|
||||
else
|
||||
gum log -sl warn "/usr/local/share/plymouth/themes/{{ .theme }}/{{ .theme }}.plymouth does not exist!"
|
||||
fi
|
||||
|
@ -8615,11 +8615,11 @@ softwarePackages:
|
|||
if command -v update-initramfs > /dev/null; then
|
||||
gum log -sl info 'Running sudo update-initramfs -u'
|
||||
sudo update-initramfs -u
|
||||
logg success 'Updated kernel / initrd images for Plymouth'
|
||||
gum log -sl info 'Updated kernel / initrd images for Plymouth'
|
||||
elif command -v dracut > /dev/null; then
|
||||
gum log -sl info 'Running sudo dracut --regenerate-all -f'
|
||||
sudo dracut --regenerate-all -f
|
||||
logg success 'Updated kernel / initrd images for Plymouth'
|
||||
gum log -sl info 'Updated kernel / initrd images for Plymouth'
|
||||
else
|
||||
gum log -sl warn 'Unable to update kernel / initrd images because neither update-initramfs or dracut are available'
|
||||
fi
|
||||
|
@ -8883,7 +8883,7 @@ softwarePackages:
|
|||
if [ -f "${XDG_CONFIG_HOME:-$HOME/.config}/postfix/com.apple.postfix.master.plist" ] && ! sudo launchctl list | grep 'postfix.master' > /dev/null; then
|
||||
gum log -sl info 'Copying com.apple.postfix.master.plist'
|
||||
sudo cp -f "${XDG_CONFIG_HOME:-$HOME/.config}/postfix/com.apple.postfix.master.plist" /System/Library/LaunchDaemons/com.apple.postfix.master.plist
|
||||
sudo launchctl load /System/Library/LaunchDaemons/com.apple.postfix.master.plist && logg success 'launchctl load of com.apple.postfix.master successful'
|
||||
sudo launchctl load /System/Library/LaunchDaemons/com.apple.postfix.master.plist && gum log -sl info 'launchctl load of com.apple.postfix.master successful'
|
||||
fi
|
||||
if ! sudo postfix status > /dev/null; then
|
||||
gum log -sl info 'Starting postfix'
|
||||
|
@ -9520,17 +9520,17 @@ softwarePackages:
|
|||
gum log -sl info 'Adding /Volumes/Private as S3 bucket mount, enabled at boot'
|
||||
sudo mkdir -p /Library/LaunchDaemons
|
||||
sudo cp -f "$HOME/Library/LaunchDaemons/rclone.private.plist" '/Library/LaunchDaemons/rclone.private.plist'
|
||||
sudo launchctl load '/Library/LaunchDaemons/rclone.private.plist' && logg success 'launchctl load successful'
|
||||
sudo launchctl load '/Library/LaunchDaemons/rclone.private.plist' && gum log -sl info 'launchctl load successful'
|
||||
fi
|
||||
if [ -f "$HOME/Library/LaunchDaemons/rclone.public.plist" ] && [ ! -f "/Library/LaunchDaemons/rclone.public.plist" ]; then
|
||||
gum log -sl info 'Adding /Volumes/Public as S3 bucket mount, enabled at boot'
|
||||
sudo mkdir -p /Library/LaunchDaemons
|
||||
sudo cp -f "$HOME/Library/LaunchDaemons/rclone.public.plist" '/Library/LaunchDaemons/rclone.public.plist'
|
||||
sudo launchctl load '/Library/LaunchDaemons/rclone.public.plist' && logg success 'launchctl load successful'
|
||||
sudo launchctl load '/Library/LaunchDaemons/rclone.public.plist' && gum log -sl info 'launchctl load successful'
|
||||
fi
|
||||
if [ -f "$HOME/Library/LaunchDaemons/rclone.user.plist" ] && ! launchctl list | grep 'rclone.user' > /dev/null; then
|
||||
gum log -sl info 'Adding ~/Public as S3 bucket mount, enabled at boot'
|
||||
launchctl load "$HOME/Library/LaunchDaemons/rclone.user.plist" && logg success 'user launchctl load successful'
|
||||
launchctl load "$HOME/Library/LaunchDaemons/rclone.user.plist" && gum log -sl info 'user launchctl load successful'
|
||||
fi
|
||||
elif [ -d /etc/systemd/system ]; then
|
||||
find "${XDG_CONFIG_HOME:-$HOME/.config}/rclone/system" -mindepth 1 -maxdepth 1 -type f | while read RCLONE_SERVICE; do
|
||||
|
@ -10050,9 +10050,9 @@ softwarePackages:
|
|||
|
||||
### Copy the Samba server configuration file
|
||||
if [ -d /Applications ] && [ -d /System ]; then
|
||||
sudo sharing -a "$PRIVATE_SHARE" -S "Private (System)" -n "Private (System)" -g 000 -s 001 -E 1 -R 1 && logg success "Configured $PRIVATE_SHARE as a private Samba share" || gum log -sl info 'sharing command failed - it is likely that the share was already set up'
|
||||
sudo sharing -a "$PUBLIC_SHARE" -S "Public (System)" -n "Public (System)" -g 001 -s 001 -E 1 -R 0 && logg success "Configured $PUBLIC_SHARE as a public Samba share" || gum log -sl info 'sharing command failed - it is likely that the share was already set up'
|
||||
sudo sharing -a "$HOME/Public" -S "Public (User)" -n "Public (User)" -g 001 -s 001 -E 1 -R 0 && logg success "Configured $HOME/Public as a public Samba share" || gum log -sl info 'sharing command failed - it is likely that the share was already set up'
|
||||
sudo sharing -a "$PRIVATE_SHARE" -S "Private (System)" -n "Private (System)" -g 000 -s 001 -E 1 -R 1 && gum log -sl info "Configured $PRIVATE_SHARE as a private Samba share" || gum log -sl info 'sharing command failed - it is likely that the share was already set up'
|
||||
sudo sharing -a "$PUBLIC_SHARE" -S "Public (System)" -n "Public (System)" -g 001 -s 001 -E 1 -R 0 && gum log -sl info "Configured $PUBLIC_SHARE as a public Samba share" || gum log -sl info 'sharing command failed - it is likely that the share was already set up'
|
||||
sudo sharing -a "$HOME/Public" -S "Public (User)" -n "Public (User)" -g 001 -s 001 -E 1 -R 0 && gum log -sl info "Configured $HOME/Public as a public Samba share" || gum log -sl info 'sharing command failed - it is likely that the share was already set up'
|
||||
else
|
||||
gum log -sl info "Copying Samba server configuration to /etc/samba/smb.conf"
|
||||
sudo cp -f "${XDG_CONFIG_HOME:-$HOME/.config}/samba/config" "/etc/samba/smb.conf"
|
||||
|
@ -11106,7 +11106,7 @@ softwarePackages:
|
|||
gum log -sl info 'Installing Tabby plugins defined in '"${XDG_CONFIG_HOME:-$HOME/.config}/tabby/plugins/package.json"''
|
||||
cd "${XDG_CONFIG_HOME:-$HOME/.config}/tabby/plugins"
|
||||
npm install --quiet
|
||||
logg success 'Finished installing Tabby plugins'
|
||||
gum log -sl info 'Finished installing Tabby plugins'
|
||||
fi
|
||||
else
|
||||
gum log -sl info 'Skipping Tabby plugin installation because is not present'
|
||||
|
@ -11153,7 +11153,7 @@ softwarePackages:
|
|||
if [ -n "$EXIT_CODE" ]; then
|
||||
gum log -sl warn 'tailscale up timed out'
|
||||
else
|
||||
logg success 'Connected to Tailscale network'
|
||||
gum log -sl info 'Connected to Tailscale network'
|
||||
fi
|
||||
fi
|
||||
_service:brew: tailscale
|
||||
|
@ -12008,7 +12008,7 @@ softwarePackages:
|
|||
if [ -f /tmp/vbox/Oracle_VM_VirtualBox_Extension_Pack-$VBOX_VERSION.vbox-extpack ]; then
|
||||
gum log -sl info 'Installing VirtualBox extension pack'
|
||||
echo 'y' | sudo VBoxManage extpack install --replace /tmp/vbox/Oracle_VM_VirtualBox_Extension_Pack-$VBOX_VERSION.vbox-extpack
|
||||
logg success 'Successfully installed VirtualBox extension pack'
|
||||
gum log -sl info 'Successfully installed VirtualBox extension pack'
|
||||
fi
|
||||
else
|
||||
gum log -sl info 'VirtualBox Extension pack is already installed'
|
||||
|
@ -12142,7 +12142,7 @@ softwarePackages:
|
|||
### Build VMWare host modules
|
||||
gum log -sl info 'Building VMware host modules'
|
||||
if sudo vmware-modconfig --console --install-all; then
|
||||
logg success 'Built VMWare host modules successfully with sudo vmware-modconfig --console --install-all'
|
||||
gum log -sl info 'Built VMWare host modules successfully with sudo vmware-modconfig --console --install-all'
|
||||
else
|
||||
gum log -sl info 'Acquiring VMware version from CLI'
|
||||
VMW_VERSION="$(vmware --version | cut -f 3 -d' ')"
|
||||
|
@ -12154,7 +12154,7 @@ softwarePackages:
|
|||
gum log -sl info 'Running sudo make and sudo make install'
|
||||
sudo make
|
||||
sudo make install
|
||||
logg success 'Successfully configured VMware host module patches'
|
||||
gum log -sl info 'Successfully configured VMware host module patches'
|
||||
fi
|
||||
|
||||
### Sign VMware host modules if Secure Boot is enabled
|
||||
|
@ -12166,7 +12166,7 @@ softwarePackages:
|
|||
"/usr/src/linux-headers-$(uname -r)/scripts/sign-file" sha256 ./MOK.priv ./MOK.der "$(modinfo -n vmmon)"
|
||||
"/usr/src/linux-headers-$(uname -r)/scripts/sign-file" sha256 ./MOK.priv ./MOK.der "$(modinfo -n vmnet)"
|
||||
echo '' | mokutil --import MOK.der
|
||||
logg success 'Successfully signed VMware host modules. Reboot the host before powering on VMs'
|
||||
gum log -sl info 'Successfully signed VMware host modules. Reboot the host before powering on VMs'
|
||||
fi
|
||||
|
||||
### Patch VMware with Unlocker
|
||||
|
@ -12181,7 +12181,7 @@ softwarePackages:
|
|||
cd linux
|
||||
gum log -sl info 'Running the unlocker'
|
||||
echo "y" | sudo ./unlock
|
||||
logg success 'Successfully unlocked VMware for macOS compatibility'
|
||||
gum log -sl info 'Successfully unlocked VMware for macOS compatibility'
|
||||
else
|
||||
gum log -sl info '/usr/lib/vmware/isoimages/darwin.iso is already present on the system so VMware macOS unlocking will not be performed'
|
||||
fi
|
||||
|
@ -12222,7 +12222,7 @@ softwarePackages:
|
|||
else
|
||||
gum log -sl info 'Generating Vagrant VMWare Utility certificates'
|
||||
sudo vagrant-vmware-utility certificate generate
|
||||
logg success 'Generated Vagrant VMWare Utility certificates via vagrant-vmware-utility certificate generate'
|
||||
gum log -sl info 'Generated Vagrant VMWare Utility certificates via vagrant-vmware-utility certificate generate'
|
||||
fi
|
||||
gum log -sl info 'Ensuring the Vagrant VMWare Utility service is enabled'
|
||||
sudo vagrant-vmware-utility service install || EXIT_CODE=$?
|
||||
|
@ -12249,7 +12249,7 @@ softwarePackages:
|
|||
else
|
||||
gum log -sl info 'Agreeing to VMWare Workstation Pro license (without serial number)' && sudo "$VMWARE_WORKSTATION_DIR/tryworkstation-linux-64.sh" --eulas-agreed --console --required
|
||||
fi
|
||||
logg success 'VMware Workstation installed successfully'
|
||||
gum log -sl info 'VMware Workstation installed successfully'
|
||||
else
|
||||
gum log -sl info 'VMware Workstation is already installed'
|
||||
fi
|
||||
|
@ -12371,7 +12371,7 @@ softwarePackages:
|
|||
jq -r '.recommendations[]' "${XDG_CONFIG_HOME:-$HOME/.config}/Code/User/extensions.json" | while read EXTENSION; do
|
||||
if ! echo "$EXTENSIONS" | grep -iF "$EXTENSION" > /dev/null; then
|
||||
gum log -sl info 'Installing Visual Studio Code extension '"$EXTENSION"'' && code --install-extension "$EXTENSION"
|
||||
logg success 'Installed '"$EXTENSION"''
|
||||
gum log -sl info 'Installed '"$EXTENSION"''
|
||||
else
|
||||
gum log -sl info ''"$EXTENSION"' already installed'
|
||||
fi
|
||||
|
@ -12422,7 +12422,7 @@ softwarePackages:
|
|||
EXTENSIONS="$(codium --list-extensions)"
|
||||
jq -r '.recommendations[]' "${XDG_CONFIG_HOME:-$HOME/.config}/Code/User/extensions.json" | while read EXTENSION; do
|
||||
if ! echo "$EXTENSIONS" | grep -iF "$EXTENSION" > /dev/null; then
|
||||
gum log -sl info 'Installing VSCodium extension '"$EXTENSION"'' && codium --install-extension "$EXTENSION" && logg success 'Installed '"$EXTENSION"''
|
||||
gum log -sl info 'Installing VSCodium extension '"$EXTENSION"'' && codium --install-extension "$EXTENSION" && gum log -sl info 'Installed '"$EXTENSION"''
|
||||
else
|
||||
gum log -sl info ''"$EXTENSION"' already installed'
|
||||
fi
|
||||
|
@ -12720,7 +12720,7 @@ softwarePackages:
|
|||
### Connect CloudFlare WARP
|
||||
if warp-cli --accept-tos status | grep 'Disconnected' > /dev/null; then
|
||||
gum log -sl info 'Connecting to CloudFlare WARP'
|
||||
warp-cli --accept-tos connect > /dev/null && logg success 'Connected to CloudFlare WARP'
|
||||
warp-cli --accept-tos connect > /dev/null && gum log -sl info 'Connected to CloudFlare WARP'
|
||||
else
|
||||
gum log -sl info 'Either there is a misconfiguration or the device is already connected with CloudFlare WARP'
|
||||
fi
|
||||
|
|
|
@ -103,7 +103,7 @@ ensureWarpDisconnected() {
|
|||
if [ -z "$DEBUG" ]; then
|
||||
if command -v warp-cli > /dev/null; then
|
||||
if warp-cli status | grep 'Connected' > /dev/null; then
|
||||
logg info "Disconnecting from WARP" && warp-cli disconnect && logg success "Disconnected WARP to prevent conflicts"
|
||||
logg info "Disconnecting from WARP" && warp-cli disconnect && gum log -sl info "Disconnected WARP to prevent conflicts"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
@ -388,7 +388,7 @@ runChezmoi() {
|
|||
if [ -n "$CHEZMOI_EXIT_CODE" ]; then
|
||||
logg error "Chezmoi encountered an error and exitted with an exit code of $CHEZMOI_EXIT_CODE"
|
||||
else
|
||||
logg success 'Finished provisioning the system'
|
||||
gum log -sl info 'Finished provisioning the system'
|
||||
fi
|
||||
}
|
||||
|
||||
|
@ -403,7 +403,7 @@ removePasswordlessSudo() {
|
|||
|
||||
# @description Render the `docs/terminal/post-install.md` file to the terminal at the end of the provisioning process
|
||||
postProvision() {
|
||||
logg success 'Provisioning complete!'
|
||||
gum log -sl info 'Provisioning complete!'
|
||||
if command -v glow > /dev/null && [ -f "${XDG_DATA_HOME:-$HOME/.local/share}/chezmoi/docs/terminal/post-install.md" ]; then
|
||||
glow "${XDG_DATA_HOME:-$HOME/.local/share}/chezmoi/docs/terminal/post-install.md"
|
||||
fi
|
||||
|
|
16
software.yml
16
software.yml
|
@ -1496,6 +1496,13 @@ softwarePackages:
|
|||
- clamav-daemon
|
||||
- clamav-freshclam
|
||||
_serviceEnabled: true
|
||||
_post: |
|
||||
if [ -d /Applications ] && [ -d /System ]; then
|
||||
load-service clamav.clamdscan
|
||||
load-service clamav.freshclam
|
||||
load-service clamav.notification
|
||||
fi
|
||||
freshclam
|
||||
_short: "ClamAV is an open-source antivirus engine for detecting trojans, viruses, malware, and other malicious threats on Unix-like systems. "
|
||||
apt:
|
||||
- clamav
|
||||
|
@ -2329,7 +2336,7 @@ softwarePackages:
|
|||
mkdir -p "${XDG_CONFIG_HOME:-$HOME/.config}/docker/cli-plugins"
|
||||
gum log -sl info 'Downloading Docker push-rm' && curl https://github.com/christian-korneck/docker-pushrm/releases/download/$RELEASE_TAG/docker-pushrm_darwin_amd64 -o "${XDG_CONFIG_HOME:-$HOME/.config}/docker/cli-plugins/docker-pushrm"
|
||||
chmod +x "${XDG_CONFIG_HOME:-$HOME/.config}/docker/cli-plugins/docker-pushrm"
|
||||
logg success 'Added Docker push-rm'
|
||||
gum log -sl info 'Added Docker push-rm'
|
||||
else
|
||||
gum log -sl info 'Docker push-rm already added'
|
||||
fi
|
||||
|
@ -2554,6 +2561,9 @@ softwarePackages:
|
|||
_home: https://easyengine.io/
|
||||
_name: EasyEngine
|
||||
_short: "EasyEngine is a command-line tool for setting up and managing WordPress sites on Linux servers efficiently. "
|
||||
_post: |
|
||||
ee config set le-mail "$(get-secret CLOUDFLARE_EMAIL)"
|
||||
ee config set cloudflare-api-key "$(get-secret CLOUDFLARE_API_KEY)"
|
||||
brew: easyengine
|
||||
script:linux: wget -qO ee rt.cx/ee4 && sudo bash ee
|
||||
edex-ui:
|
||||
|
@ -9617,6 +9627,8 @@ softwarePackages:
|
|||
_desc: Fully featured and highly configurable SFTP server with optional HTTP/S, FTP/S and WebDAV support - S3, Google Cloud Storage, Azure Blob
|
||||
_github: https://github.com/drakkan/sftpgo
|
||||
_name: sftpgo
|
||||
_post: |
|
||||
sudo sftpgo initprovider
|
||||
_service: sftpgo
|
||||
_service:brew:
|
||||
- name: sftpgo
|
||||
|
@ -11347,7 +11359,7 @@ softwarePackages:
|
|||
else
|
||||
gum log -sl info 'Agreeing to VMWare Workstation Pro license (without serial number)' && sudo "$VMWARE_WORKSTATION_DIR/tryworkstation-linux-64.sh" --eulas-agreed --console --required
|
||||
fi
|
||||
logg success 'VMware Workstation installed successfully'
|
||||
gum log -sl info 'VMware Workstation installed successfully'
|
||||
else
|
||||
gum log -sl info 'VMware Workstation is already installed'
|
||||
fi
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
<string>{{ lookPath "clamdscan" }}</string>
|
||||
<string>-i</string>
|
||||
<string>-r</string>
|
||||
<string>--move=/quarantine</string>
|
||||
<string>--move=/opt/quarantine</string>
|
||||
<string>/</string>
|
||||
</array>
|
||||
<key>StartCalendarInterval</key>
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
</array>
|
||||
<key>WatchPaths</key>
|
||||
<array>
|
||||
<string>/quarantine</string>
|
||||
<string>/opt/quarantine</string>
|
||||
</array>
|
||||
<key>AbandonProcessGroup</key>
|
||||
<true/>
|
||||
|
|
3
system/etc/fail2ban/symlink_jail.local
Normal file
3
system/etc/fail2ban/symlink_jail.local
Normal file
|
@ -0,0 +1,3 @@
|
|||
{{- if lookPath "fail2ban-client" -}}
|
||||
/usr/local/etc/fail2ban/jail.local
|
||||
{{- end -}}
|
3
system/etc/sftpgo/symlink_env.d.tmpl
Normal file
3
system/etc/sftpgo/symlink_env.d.tmpl
Normal file
|
@ -0,0 +1,3 @@
|
|||
{{- if lookPath "sftpgo" -}}
|
||||
/usr/local/etc/sftpgo/env.d/
|
||||
{{- end -}}
|
3
system/etc/sftpgo/symlink_sftpgo.json.tmpl
Normal file
3
system/etc/sftpgo/symlink_sftpgo.json.tmpl
Normal file
|
@ -0,0 +1,3 @@
|
|||
{{- if lookPath "sftpgo" -}}
|
||||
/usr/local/etc/sftpgo/sftpgo.json
|
||||
{{- end -}}
|
|
@ -1 +1,3 @@
|
|||
{{- if lookPath "clamd" -}}
|
||||
/usr/local/etc/clamav/
|
||||
{{- end -}}
|
3
system/etc/symlink_cloudflared
Normal file
3
system/etc/symlink_cloudflared
Normal file
|
@ -0,0 +1,3 @@
|
|||
{{- if lookPath "cloudflared" -}}
|
||||
/usr/local/etc/cloudflared/
|
||||
{{- end -}}
|
23
system/etc/timeshift/timeshift.json
Normal file
23
system/etc/timeshift/timeshift.json
Normal file
|
@ -0,0 +1,23 @@
|
|||
{
|
||||
"backup_device_uuid": "",
|
||||
"btrfs_mode": "true",
|
||||
"count_boot": "0",
|
||||
"count_daily": "4",
|
||||
"count_hourly": "8",
|
||||
"count_monthly": "0",
|
||||
"count_weekly": "2",
|
||||
"do_first_run": "true",
|
||||
"exclude": [],
|
||||
"exclude-apps": [],
|
||||
"include_btrfs_home": "false",
|
||||
"parent_device_uuid": "",
|
||||
"schedule_boot": "false",
|
||||
"schedule_daily": "false",
|
||||
"schedule_hourly": "false",
|
||||
"schedule_monthly": "false",
|
||||
"schedule_weekly": "false",
|
||||
"snapshot_count": "0",
|
||||
"snapshot_size": "0",
|
||||
"stop_cron_emails": "true"
|
||||
}
|
||||
|
3
system/opt/homebrew/etc/blocky/symlink_config.yml
Normal file
3
system/opt/homebrew/etc/blocky/symlink_config.yml
Normal file
|
@ -0,0 +1,3 @@
|
|||
{{- if lookPath "blocky" -}}
|
||||
/usr/local/etc/blocky/config.yml
|
||||
{{- end -}}
|
3
system/opt/homebrew/etc/fail2ban/symlink_jail.local
Normal file
3
system/opt/homebrew/etc/fail2ban/symlink_jail.local
Normal file
|
@ -0,0 +1,3 @@
|
|||
{{- if lookPath "fail2ban-client" -}}
|
||||
/usr/local/etc/fail2ban/jail.local
|
||||
{{- end -}}
|
3
system/opt/homebrew/etc/sftpgo/symlink_env.d.tmpl
Normal file
3
system/opt/homebrew/etc/sftpgo/symlink_env.d.tmpl
Normal file
|
@ -0,0 +1,3 @@
|
|||
{{- if lookPath "sftpgo" -}}
|
||||
/usr/local/etc/sftpgo/env.d/
|
||||
{{- end -}}
|
3
system/opt/homebrew/etc/sftpgo/symlink_sftpgo.json.tmpl
Normal file
3
system/opt/homebrew/etc/sftpgo/symlink_sftpgo.json.tmpl
Normal file
|
@ -0,0 +1,3 @@
|
|||
{{- if lookPath "sftpgo" -}}
|
||||
/usr/local/etc/sftpgo/sftpgo.json
|
||||
{{- end -}}
|
|
@ -4,7 +4,7 @@ After=network-online.target
|
|||
|
||||
[Service]
|
||||
User=blocky
|
||||
ExecStart=/usr/local/bin/blocky --config /usr/local/etc/blocky/config.yaml
|
||||
ExecStart={{- lookPath "blocky" -}} --config /usr/local/etc/blocky/config.yml
|
||||
Restart=on-failure
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
|
|
@ -1,3 +1,3 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
echo "There are new items for review in /quarantine" | mail -s "ClamAV Quarantined Possible Malware" root
|
||||
echo "There are new items for review in the system /opt/quarantine folder" | mail -s "ClamAV Quarantined Possible Malware" root
|
||||
|
|
346
system/usr/local/etc/blocky/config.yml.sample
Normal file
346
system/usr/local/etc/blocky/config.yml.sample
Normal file
|
@ -0,0 +1,346 @@
|
|||
upstreams:
|
||||
init:
|
||||
# Configure startup behavior.
|
||||
# accepted: blocking, failOnError, fast
|
||||
# default: blocking
|
||||
strategy: fast
|
||||
groups:
|
||||
# these external DNS resolvers will be used. Blocky picks 2 random resolvers from the list for each query
|
||||
# format for resolver: [net:]host:[port][/path]. net could be empty (default, shortcut for tcp+udp), tcp+udp, tcp, udp, tcp-tls or https (DoH). If port is empty, default port will be used (53 for udp and tcp, 853 for tcp-tls, 443 for https (Doh))
|
||||
# this configuration is mandatory, please define at least one external DNS resolver
|
||||
default:
|
||||
# example for tcp+udp IPv4 server (https://digitalcourage.de/)
|
||||
- 5.9.164.112
|
||||
# Cloudflare
|
||||
- 1.1.1.1
|
||||
# example for DNS-over-TLS server (DoT)
|
||||
- tcp-tls:fdns1.dismail.de:853
|
||||
# example for DNS-over-HTTPS (DoH)
|
||||
- https://dns.digitale-gesellschaft.ch/dns-query
|
||||
# optional: use client name (with wildcard support: * - sequence of any characters, [0-9] - range)
|
||||
# or single ip address / client subnet as CIDR notation
|
||||
laptop*:
|
||||
- 123.123.123.123
|
||||
# optional: Determines what strategy blocky uses to choose the upstream servers.
|
||||
# accepted: parallel_best, strict, random
|
||||
# default: parallel_best
|
||||
strategy: parallel_best
|
||||
# optional: timeout to query the upstream resolver. Default: 2s
|
||||
timeout: 2s
|
||||
# optional: HTTP User Agent when connecting to upstreams. Default: none
|
||||
userAgent: "custom UA"
|
||||
|
||||
# optional: Determines how blocky will create outgoing connections. This impacts both upstreams, and lists.
|
||||
# accepted: dual, v4, v6
|
||||
# default: dual
|
||||
connectIPVersion: dual
|
||||
|
||||
# optional: custom IP address(es) for domain name (with all sub-domains). Multiple addresses must be separated by a comma
|
||||
# example: query "printer.lan" or "my.printer.lan" will return 192.168.178.3
|
||||
customDNS:
|
||||
customTTL: 1h
|
||||
# optional: if true (default), return empty result for unmapped query types (for example TXT, MX or AAAA if only IPv4 address is defined).
|
||||
# if false, queries with unmapped types will be forwarded to the upstream resolver
|
||||
filterUnmappedTypes: true
|
||||
# optional: replace domain in the query with other domain before resolver lookup in the mapping
|
||||
rewrite:
|
||||
example.com: printer.lan
|
||||
mapping:
|
||||
printer.lan: 192.168.178.3,2001:0db8:85a3:08d3:1319:8a2e:0370:7344
|
||||
|
||||
# optional: definition, which DNS resolver(s) should be used for queries to the domain (with all sub-domains). Multiple resolvers must be separated by a comma
|
||||
# Example: Query client.fritz.box will ask DNS server 192.168.178.1. This is necessary for local network, to resolve clients by host name
|
||||
conditional:
|
||||
# optional: if false (default), return empty result if after rewrite, the mapped resolver returned an empty answer. If true, the original query will be sent to the upstream resolver
|
||||
# Example: The query "blog.example.com" will be rewritten to "blog.fritz.box" and also redirected to the resolver at 192.168.178.1. If not found and if `fallbackUpstream` was set to `true`, the original query "blog.example.com" will be sent upstream.
|
||||
# Usage: One usecase when having split DNS for internal and external (internet facing) users, but not all subdomains are listed in the internal domain.
|
||||
fallbackUpstream: false
|
||||
# optional: replace domain in the query with other domain before resolver lookup in the mapping
|
||||
rewrite:
|
||||
example.com: fritz.box
|
||||
mapping:
|
||||
fritz.box: 192.168.178.1
|
||||
lan.net: 192.168.178.1,192.168.178.2
|
||||
|
||||
# optional: use allow/denylists to block queries (for example ads, trackers, adult pages etc.)
|
||||
blocking:
|
||||
# definition of denylist groups. Can be external link (http/https) or local file
|
||||
denylists:
|
||||
ads:
|
||||
- https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt
|
||||
- https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts
|
||||
- http://sysctl.org/cameleon/hosts
|
||||
- https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt
|
||||
- |
|
||||
# inline definition with YAML literal block scalar style
|
||||
someadsdomain.com
|
||||
*.example.com
|
||||
special:
|
||||
- https://raw.githubusercontent.com/StevenBlack/hosts/master/alternates/fakenews/hosts
|
||||
# definition of allowlist groups.
|
||||
# Note: if the same group has both allow/denylists, allowlists take precedence. Meaning if a domain is both blocked and allowed, it will be allowed.
|
||||
# If a group has only allowlist entries, only domains from this list are allowed, and all others be blocked.
|
||||
allowlists:
|
||||
ads:
|
||||
- allowlist.txt
|
||||
- |
|
||||
# inline definition with YAML literal block scalar style
|
||||
# hosts format
|
||||
allowlistdomain.com
|
||||
# this is a regex
|
||||
/^banners?[_.-]/
|
||||
# definition: which groups should be applied for which client
|
||||
clientGroupsBlock:
|
||||
# default will be used, if no special definition for a client name exists
|
||||
default:
|
||||
- ads
|
||||
- special
|
||||
# use client name (with wildcard support: * - sequence of any characters, [0-9] - range)
|
||||
# or single ip address / client subnet as CIDR notation
|
||||
laptop*:
|
||||
- ads
|
||||
192.168.178.1/24:
|
||||
- special
|
||||
# which response will be sent, if query is blocked:
|
||||
# zeroIp: 0.0.0.0 will be returned (default)
|
||||
# nxDomain: return NXDOMAIN as return code
|
||||
# comma separated list of destination IP addresses (for example: 192.100.100.15, 2001:0db8:85a3:08d3:1319:8a2e:0370:7344). Should contain ipv4 and ipv6 to cover all query types. Useful with running web server on this address to display the "blocked" page.
|
||||
blockType: zeroIp
|
||||
# optional: TTL for answers to blocked domains
|
||||
# default: 6h
|
||||
blockTTL: 1m
|
||||
# optional: Configure how lists, AKA sources, are loaded
|
||||
loading:
|
||||
# optional: list refresh period in duration format.
|
||||
# Set to a value <= 0 to disable.
|
||||
# default: 4h
|
||||
refreshPeriod: 24h
|
||||
# optional: Applies only to lists that are downloaded (HTTP URLs).
|
||||
downloads:
|
||||
# optional: timeout for list download (each url). Use large values for big lists or slow internet connections
|
||||
# default: 5s
|
||||
timeout: 60s
|
||||
# optional: Maximum download attempts
|
||||
# default: 3
|
||||
attempts: 5
|
||||
# optional: Time between the download attempts
|
||||
# default: 500ms
|
||||
cooldown: 10s
|
||||
# optional: Maximum number of lists to process in parallel.
|
||||
# default: 4
|
||||
concurrency: 16
|
||||
# Configure startup behavior.
|
||||
# accepted: blocking, failOnError, fast
|
||||
# default: blocking
|
||||
strategy: failOnError
|
||||
# Number of errors allowed in a list before it is considered invalid.
|
||||
# A value of -1 disables the limit.
|
||||
# default: 5
|
||||
maxErrorsPerSource: 5
|
||||
|
||||
# optional: configuration for caching of DNS responses
|
||||
caching:
|
||||
# duration how long a response must be cached (min value).
|
||||
# If <=0, use response's TTL, if >0 use this value, if TTL is smaller
|
||||
# Default: 0
|
||||
minTime: 5m
|
||||
# duration how long a response must be cached (max value).
|
||||
# If <0, do not cache responses
|
||||
# If 0, use TTL
|
||||
# If > 0, use this value, if TTL is greater
|
||||
# Default: 0
|
||||
maxTime: 30m
|
||||
# Max number of cache entries (responses) to be kept in cache (soft limit). Useful on systems with limited amount of RAM.
|
||||
# Default (0): unlimited
|
||||
maxItemsCount: 0
|
||||
# if true, will preload DNS results for often used queries (default: names queried more than 5 times in a 2-hour time window)
|
||||
# this improves the response time for often used queries, but significantly increases external traffic
|
||||
# default: false
|
||||
prefetching: true
|
||||
# prefetch track time window (in duration format)
|
||||
# default: 120
|
||||
prefetchExpires: 2h
|
||||
# name queries threshold for prefetch
|
||||
# default: 5
|
||||
prefetchThreshold: 5
|
||||
# Max number of domains to be kept in cache for prefetching (soft limit). Useful on systems with limited amount of RAM.
|
||||
# Default (0): unlimited
|
||||
prefetchMaxItemsCount: 0
|
||||
# Time how long negative results (NXDOMAIN response or empty result) are cached. A value of -1 will disable caching for negative results.
|
||||
# Default: 30m
|
||||
cacheTimeNegative: 30m
|
||||
|
||||
# optional: configuration of client name resolution
|
||||
clientLookup:
|
||||
# optional: this DNS resolver will be used to perform reverse DNS lookup (typically local router)
|
||||
upstream: 192.168.178.1
|
||||
# optional: some routers return multiple names for client (host name and user defined name). Define which single name should be used.
|
||||
# Example: take second name if present, if not take first name
|
||||
singleNameOrder:
|
||||
- 2
|
||||
- 1
|
||||
# optional: custom mapping of client name to IP addresses. Useful if reverse DNS does not work properly or just to have custom client names.
|
||||
clients:
|
||||
laptop:
|
||||
- 192.168.178.29
|
||||
|
||||
# optional: configuration for prometheus metrics endpoint
|
||||
prometheus:
|
||||
# enabled if true
|
||||
enable: true
|
||||
# url path, optional (default '/metrics')
|
||||
path: /metrics
|
||||
|
||||
# optional: write query information (question, answer, client, duration etc.) to daily csv file
|
||||
queryLog:
|
||||
# optional one of: mysql, postgresql, csv, csv-client. If empty, log to console
|
||||
type: mysql
|
||||
# directory (should be mounted as volume in docker) for csv, db connection string for mysql/postgresql
|
||||
target: db_user:db_password@tcp(db_host_or_ip:3306)/db_name?charset=utf8mb4&parseTime=True&loc=Local
|
||||
#postgresql target: postgres://user:password@db_host_or_ip:5432/db_name
|
||||
# if > 0, deletes log files which are older than ... days
|
||||
logRetentionDays: 7
|
||||
# optional: Max attempts to create specific query log writer, default: 3
|
||||
creationAttempts: 1
|
||||
# optional: Time between the creation attempts, default: 2s
|
||||
creationCooldown: 2s
|
||||
# optional: Which fields should be logged. You can choose one or more from: clientIP, clientName, responseReason, responseAnswer, question, duration. If not defined, it logs all fields
|
||||
fields:
|
||||
- clientIP
|
||||
- duration
|
||||
# optional: Interval to write data in bulk to the external database, default: 30s
|
||||
flushInterval: 30s
|
||||
|
||||
# optional: Blocky can synchronize its cache and blocking state between multiple instances through redis.
|
||||
redis:
|
||||
# Server address and port or master name if sentinel is used
|
||||
address: redismaster
|
||||
# Username if necessary
|
||||
username: usrname
|
||||
# Password if necessary
|
||||
password: passwd
|
||||
# Database, default: 0
|
||||
database: 2
|
||||
# Connection is required for blocky to start. Default: false
|
||||
required: true
|
||||
# Max connection attempts, default: 3
|
||||
connectionAttempts: 10
|
||||
# Time between the connection attempts, default: 1s
|
||||
connectionCooldown: 3s
|
||||
# Sentinal username if necessary
|
||||
sentinelUsername: usrname
|
||||
# Sentinal password if necessary
|
||||
sentinelPassword: passwd
|
||||
# List with address and port of sentinel hosts(sentinel is activated if at least one sentinel address is configured)
|
||||
sentinelAddresses:
|
||||
- redis-sentinel1:26379
|
||||
- redis-sentinel2:26379
|
||||
- redis-sentinel3:26379
|
||||
|
||||
# optional: Mininal TLS version that the DoH and DoT server will use
|
||||
minTlsServeVersion: 1.3
|
||||
|
||||
# if https port > 0: path to cert and key file for SSL encryption. if not set, self-signed certificate will be generated
|
||||
#certFile: server.crt
|
||||
#keyFile: server.key
|
||||
|
||||
# optional: use these DNS servers to resolve denylist urls and upstream DNS servers. It is useful if no system DNS resolver is configured, and/or to encrypt the bootstrap queries.
|
||||
bootstrapDns:
|
||||
- tcp+udp:1.1.1.1
|
||||
- https://1.1.1.1/dns-query
|
||||
- upstream: https://dns.digitale-gesellschaft.ch/dns-query
|
||||
ips:
|
||||
- 185.95.218.42
|
||||
|
||||
# optional: drop all queries with following query types. Default: empty
|
||||
filtering:
|
||||
queryTypes:
|
||||
- AAAA
|
||||
|
||||
# optional: return NXDOMAIN for queries that are not FQDNs.
|
||||
fqdnOnly:
|
||||
# default: false
|
||||
enable: true
|
||||
|
||||
# optional: if path defined, use this file for query resolution (A, AAAA and rDNS). Default: empty
|
||||
hostsFile:
|
||||
# optional: Hosts files to parse
|
||||
sources:
|
||||
- /etc/hosts
|
||||
- https://example.com/hosts
|
||||
- |
|
||||
# inline hosts
|
||||
127.0.0.1 example.com
|
||||
# optional: TTL, default: 1h
|
||||
hostsTTL: 30m
|
||||
# optional: Whether loopback hosts addresses (127.0.0.0/8 and ::1) should be filtered or not
|
||||
# default: false
|
||||
filterLoopback: true
|
||||
# optional: Configure how sources are loaded
|
||||
loading:
|
||||
# optional: file refresh period in duration format.
|
||||
# Set to a value <= 0 to disable.
|
||||
# default: 4h
|
||||
refreshPeriod: 24h
|
||||
# optional: Applies only to files that are downloaded (HTTP URLs).
|
||||
downloads:
|
||||
# optional: timeout for file download (each url). Use large values for big files or slow internet connections
|
||||
# default: 5s
|
||||
timeout: 60s
|
||||
# optional: Maximum download attempts
|
||||
# default: 3
|
||||
attempts: 5
|
||||
# optional: Time between the download attempts
|
||||
# default: 500ms
|
||||
cooldown: 10s
|
||||
# optional: Maximum number of files to process in parallel.
|
||||
# default: 4
|
||||
concurrency: 16
|
||||
# Configure startup behavior.
|
||||
# accepted: blocking, failOnError, fast
|
||||
# default: blocking
|
||||
strategy: failOnError
|
||||
# Number of errors allowed in a file before it is considered invalid.
|
||||
# A value of -1 disables the limit.
|
||||
# default: 5
|
||||
maxErrorsPerSource: 5
|
||||
|
||||
# optional: ports configuration
|
||||
ports:
|
||||
# optional: DNS listener port(s) and bind ip address(es), default 53 (UDP and TCP). Example: 53, :53, "127.0.0.1:5353,[::1]:5353"
|
||||
dns: 53
|
||||
# optional: Port(s) and bind ip address(es) for DoT (DNS-over-TLS) listener. Example: 853, 127.0.0.1:853
|
||||
tls: 853
|
||||
# optional: Port(s) and optional bind ip address(es) to serve HTTPS used for prometheus metrics, pprof, REST API, DoH... If you wish to specify a specific IP, you can do so such as 192.168.0.1:443. Example: 443, :443, 127.0.0.1:443,[::1]:443
|
||||
https: 443
|
||||
# optional: Port(s) and optional bind ip address(es) to serve HTTP used for prometheus metrics, pprof, REST API, DoH... If you wish to specify a specific IP, you can do so such as 192.168.0.1:4000. Example: 4000, :4000, 127.0.0.1:4000,[::1]:4000
|
||||
http: 4000
|
||||
|
||||
# optional: logging configuration
|
||||
log:
|
||||
# optional: Log level (one from trace, debug, info, warn, error). Default: info
|
||||
level: info
|
||||
# optional: Log format (text or json). Default: text
|
||||
format: text
|
||||
# optional: log timestamps. Default: true
|
||||
timestamp: true
|
||||
# optional: obfuscate log output (replace all alphanumeric characters with *) for user sensitive data like request domains or responses to increase privacy. Default: false
|
||||
privacy: false
|
||||
|
||||
# optional: add EDE error codes to dns response
|
||||
ede:
|
||||
# enabled if true, Default: false
|
||||
enable: true
|
||||
|
||||
# optional: configure optional Special Use Domain Names (SUDN)
|
||||
specialUseDomains:
|
||||
# optional: block recomended private TLDs
|
||||
# default: true
|
||||
rfc6762-appendixG: true
|
||||
|
||||
# optional: configure extended client subnet (ECS) support
|
||||
ecs:
|
||||
# optional: if the request ecs option with a max sice mask the address will be used as client ip
|
||||
useAsClient: true
|
||||
# optional: if the request contains a ecs option it will be forwarded to the upstream resolver
|
||||
forward: true
|
Before ![]() (image error) Size: 2.7 KiB After ![]() (image error) Size: 2.7 KiB ![]() ![]() |
Before Width: 48px | Height: 48px | Size: 15 KiB After Width: 48px | Height: 48px | Size: 15 KiB |
Before ![]() (image error) Size: 12 KiB After ![]() (image error) Size: 12 KiB ![]() ![]() |
Before ![]() (image error) Size: 46 KiB After ![]() (image error) Size: 46 KiB ![]() ![]() |
|
@ -20,12 +20,13 @@ PidFile /var/run/clamav/clamd.pid
|
|||
User clamav
|
||||
VirusEvent /usr/local/etc/clamav/clamav-email
|
||||
|
||||
ExcludePath ^/quarantine/
|
||||
ExcludePath ^/opt/quarantine/
|
||||
|
||||
{{- if eq .host.distro.id "darwin" -}}
|
||||
### macOS
|
||||
ExcludePath ^/Volumes/([^M]|M([^a]|a([^c]|c([^i]|i([^n]|n([^t]|t([^o]|o([^s]|s([^h]|h([^ ]|[ ]([^H]|H([^D]|D([^/])|$)|$)|$)|$)|$)|$)|$)|$)|$)|$)|$))[^/]{0,240}/
|
||||
ExcludePath ^/Network/
|
||||
ExcludePath ^/Quarantine/
|
||||
ExcludePath ^/opt/Quarantine/
|
||||
ExcludePath ^/private/var/db/
|
||||
ExcludePath ^/private/var/folders/
|
||||
ExcludePath ^/dev/
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue