parent
e7c202fafd
commit
f6d197b89e
@ -1,86 +0,0 @@
|
|||||||
#! /bin/bash
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "Mounting /proc/self/mounts..."
|
|
||||||
ln -s /proc/self/mounts /etc/mtab
|
|
||||||
echo "Done."
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "Updating apt repositories inside chroot..."
|
|
||||||
errors=`apt update 2>&1 1>/dev/null`
|
|
||||||
if ! [ "$?" = "0" ]; then
|
|
||||||
echo "Failed to update apt repositories inside chroot - $errors"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo "Done."
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "Setting locale..."
|
|
||||||
echo -e 'LANG="en_US.UTF-8"\nLANGUAGE="en_US:en"\n' > /etc/default/locale
|
|
||||||
echo "Done."
|
|
||||||
|
|
||||||
|
|
||||||
#dpkg-reconfigure tzdata
|
|
||||||
#apt install -y --no-install-recommends linux-image-generic
|
|
||||||
#apt install -y vim bash screen tmux zfs-initramfs dosfstools openssh-server
|
|
||||||
i="0"
|
|
||||||
for disk in "$disks"; do
|
|
||||||
echo "mkdosfs -F 32 -s 1 -n EFI /dev/disk/by-id/${disk}-part2"
|
|
||||||
echo "mkdir /boot/efi${i}"
|
|
||||||
echo "echo PARTUUID=$(blkid -s PARTUUID -o value /dev/disk/by-id/${disk}-part2) /boot/efi${i} vfat nofail,x-systemd.device-timeout=0 0 1 >> /etc/fstab"
|
|
||||||
echo "mount /boot/efi${i}"
|
|
||||||
i=$((i + 1))
|
|
||||||
done
|
|
||||||
|
|
||||||
#apt install -y grub-efi-amd64-signed shim-signed
|
|
||||||
#passwd
|
|
||||||
|
|
||||||
#echo "[Unit]\n DefaultDependencies=no\n Before=zfs-import-scan.service\n Before=zfs-import-cache.service\n \n [Service]\n Type=oneshot\n RemainAfterExit=yes\n ExecStart=/sbin/zpool import -N -o cachefile=none bpool\n [Install]\n WantedBy=zfs-import.target " > /etc/systemd/system/zfs-import-bpool.Service"
|
|
||||||
#systemctl enable zfs-import-bpool.Service
|
|
||||||
|
|
||||||
#cp /usr/sharesystemd/tmp.mount /etc/systemd/system/
|
|
||||||
#systemctl enable tmp.mount
|
|
||||||
#addgroup --system lpadmin
|
|
||||||
#addgroup --system sambashare
|
|
||||||
|
|
||||||
#zfscheck=`grub-probe /boot`
|
|
||||||
#update-initramfs -u -k all
|
|
||||||
|
|
||||||
# you need sed to do this right!
|
|
||||||
# GRUB_CMDLINE_LINUX="root=ZFS=rpool/ROOT/ubuntu" <--- replace this
|
|
||||||
|
|
||||||
#update-grub
|
|
||||||
#i="0"
|
|
||||||
#for f in "$disks"; do
|
|
||||||
#echo "grub-install --target=x86_64-efi --efi-directory=/boot/efi${i} --botloader-id=ubuntu --recheck --no-floppy"
|
|
||||||
#echo "umount /boot/efi${i}"
|
|
||||||
#i=$((i + 1))
|
|
||||||
#done
|
|
||||||
|
|
||||||
#zfs set mountpoint=legacy bpool/BOOT/ubuntu
|
|
||||||
#echo bpool/BOOT/ubuntu /boot zfs nodev,relatime,x-systemd.requires=zfs-import-bpool.service 0 0 >> /etc/fstab
|
|
||||||
#zfs set mountpoint=legacy rpool/var/log
|
|
||||||
#echo rpool/var/log /var/log zfs nodev,relatime 0 0 >> /etc/fstab
|
|
||||||
#zfs set mountpoint=legacy rpool/var/spool
|
|
||||||
#echo rpool/var/spool /var/spool zfs nodev,relatime 0 0 >> /etc/fstab
|
|
||||||
#zfs set mountpoint=legacy rpool/var/tmp
|
|
||||||
#echo rpool/var/tmp /var/tmp zfs nodev,relatime 0 0 >> /etc/fstab
|
|
||||||
#zfs set mountpoint=legacy rpool/tmp
|
|
||||||
#echo rpool/tmp /tmp zfs nodev,relatime 0 0 >> /etc/fstab
|
|
||||||
#systemctl enable openssh-server
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# create parker user automated way
|
|
||||||
#zfs create rpool/home/YOURUSERNAME
|
|
||||||
# adduser YOURUSERNAME
|
|
||||||
# cp -a /etc/skel/.[!.]* /home/YOURUSERNAME
|
|
||||||
# chown -R YOURUSERNAME:YOURUSERNAME /home/YOURUSERNAME
|
|
||||||
# usermod -a -G adm,cdrom,dip,lpadmin,plugdev,sambashare,sudo YOURUSERNAME
|
|
||||||
|
|
||||||
#create swap space, too if necessary
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
exit 0
|
|
||||||
@ -1,139 +0,0 @@
|
|||||||
#! /bin/bash
|
|
||||||
#
|
|
||||||
# Calomel.org
|
|
||||||
# https://calomel.org/zfs_health_check_script.html
|
|
||||||
# FreeBSD 9.1 ZFS Health Check script
|
|
||||||
# zfs_health.sh @ Version 0.15
|
|
||||||
|
|
||||||
# Check health of ZFS volumes and drives. On any faults send email. In FreeBSD
|
|
||||||
# 10 there is supposed to be a ZFSd daemon to monitor the health of the ZFS
|
|
||||||
# pools. For now, in FreeBSD 9, we will make our own checks and run this script
|
|
||||||
# through cron a few times a day.
|
|
||||||
|
|
||||||
# Changelog
|
|
||||||
# Peter van der Does - Always send an email, even if there is no problem.
|
|
||||||
# I prefer to know a script has run even when there is no problem.
|
|
||||||
# June 24, 2015
|
|
||||||
# Peter van der Does - When a scrub is needed the email subject line only has to inform us once.
|
|
||||||
|
|
||||||
# 99 problems but ZFS ain't one
|
|
||||||
problems=0
|
|
||||||
emailSubject="`hostname` - ZFS pool - HEALTH check"
|
|
||||||
emailMessage=""
|
|
||||||
|
|
||||||
# Health - Check if all zfs volumes are in good condition. We are looking for
|
|
||||||
# any keyword signifying a degraded or broken array.
|
|
||||||
|
|
||||||
condition=$(/sbin/zpool status | egrep -i '(DEGRADED|FAULTED|OFFLINE|UNAVAIL|REMOVED|FAIL|DESTROYED|corrupt|cannot|unrecover)')
|
|
||||||
if [ "${condition}" ]; then
|
|
||||||
emailSubject="$emailSubject - fault"
|
|
||||||
problems=1
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
# Capacity - Make sure pool capacities are below 80% for best performance. The
|
|
||||||
# percentage really depends on how large your volume is. If you have a 128GB
|
|
||||||
# SSD then 80% is reasonable. If you have a 60TB raid-z2 array then you can
|
|
||||||
# probably set the warning closer to 95%.
|
|
||||||
#
|
|
||||||
# ZFS uses a copy-on-write scheme. The file system writes new data to
|
|
||||||
# sequential free blocks first and when the uberblock has been updated the new
|
|
||||||
# inode pointers become valid. This method is true only when the pool has
|
|
||||||
# enough free sequential blocks. If the pool is at capacity and space limited,
|
|
||||||
# ZFS will be have to randomly write blocks. This means ZFS can not create an
|
|
||||||
# optimal set of sequential writes and write performance is severely impacted.
|
|
||||||
|
|
||||||
maxCapacity=80
|
|
||||||
|
|
||||||
if [ ${problems} -eq 0 ]; then
|
|
||||||
capacity=$(/sbin/zpool list -H -o capacity)
|
|
||||||
for line in ${capacity//%/}
|
|
||||||
do
|
|
||||||
if [ $line -ge $maxCapacity ]; then
|
|
||||||
emailSubject="$emailSubject - Capacity Exceeded"
|
|
||||||
problems=1
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
# Errors - Check the columns for READ, WRITE and CKSUM (checksum) drive errors
|
|
||||||
# on all volumes and all drives using "zpool status". If any non-zero errors
|
|
||||||
# are reported an email will be sent out. You should then look to replace the
|
|
||||||
# faulty drive and run "zpool scrub" on the affected volume after resilvering.
|
|
||||||
|
|
||||||
if [ ${problems} -eq 0 ]; then
|
|
||||||
errors=$(/sbin/zpool status | grep ONLINE | grep -v state | awk '{print $3 $4 $5}' | grep -v 000)
|
|
||||||
if [ "${errors}" ]; then
|
|
||||||
emailSubject="$emailSubject - Drive Errors"
|
|
||||||
problems=1
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
# Scrub Expired - Check if all volumes have been scrubbed in at least the last
|
|
||||||
# 8 days. The general guide is to scrub volumes on desktop quality drives once
|
|
||||||
# a week and volumes on enterprise class drives once a month. You can always
|
|
||||||
# use cron to schedule "zpool scrub" in off hours. We scrub our volumes every
|
|
||||||
# Sunday morning for example.
|
|
||||||
#
|
|
||||||
# Scrubbing traverses all the data in the pool once and verifies all blocks can
|
|
||||||
# be read. Scrubbing proceeds as fast as the devices allows, though the
|
|
||||||
# priority of any I/O remains below that of normal calls. This operation might
|
|
||||||
# negatively impact performance, but the file system will remain usable and
|
|
||||||
# responsive while scrubbing occurs. To initiate an explicit scrub, use the
|
|
||||||
# "zpool scrub" command.
|
|
||||||
#
|
|
||||||
# The scrubExpire variable is in seconds. So for 8 days we calculate 8 days
|
|
||||||
# times 24 hours times 3600 seconds to equal 691200 seconds.
|
|
||||||
|
|
||||||
scrubExpire=691200
|
|
||||||
|
|
||||||
if [ ${problems} -eq 0 ]; then
|
|
||||||
currentDate=$(date +%s)
|
|
||||||
zfsVolumes=$(/sbin/zpool list -H -o name)
|
|
||||||
|
|
||||||
for volume in ${zfsVolumes}
|
|
||||||
do
|
|
||||||
if [ $(/sbin/zpool status $volume | egrep -c "none requested") -ge 1 ]; then
|
|
||||||
echo "ERROR: You need to run \"zpool scrub $volume\" before this script can monitor the scrub expiration time."
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
if [ $(/sbin/zpool status $volume | egrep -c "scrub in progress|resilver") -ge 1 ]; then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
|
|
||||||
### FreeBSD with *nix supported date format
|
|
||||||
scrubRawDate=$(/sbin/zpool status $volume | grep scrub | awk '{print $15 $12 $13}')
|
|
||||||
scrubDate=$(date -j -f '%Y%b%e-%H%M%S' $scrubRawDate'-000000' +%s)
|
|
||||||
|
|
||||||
### Ubuntu with GNU supported date format
|
|
||||||
#scrubRawDate=$(/sbin/zpool status $volume | grep scrub | awk '{print $11" "$12" " $13" " $14" "$15}')
|
|
||||||
#scrubDate=$(date -d "$scrubRawDate" +%s)
|
|
||||||
|
|
||||||
if [ $(($currentDate - $scrubDate)) -ge $scrubExpire ]; then
|
|
||||||
if [ ${problems} -eq 0 ]; then
|
|
||||||
emailSubject="$emailSubject - Scrub Time Expired. Scrub Needed on Volume(s)"
|
|
||||||
fi
|
|
||||||
problems=1
|
|
||||||
emailMessage="${emailMessage}Pool: $volume needs scrub \n"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
# Notifications - On any problems send email with drive status information and
|
|
||||||
# capacities including a helpful subject line to root. Also use logger to write
|
|
||||||
# the email subject to the local logs. This is the place you may want to put
|
|
||||||
# any other notifications like:
|
|
||||||
#
|
|
||||||
# + Update an anonymous twitter account with your ZFS status (https://twitter.com/zfsmonitor)
|
|
||||||
# + Playing a sound file or beep the internal speaker
|
|
||||||
# + Update Nagios, Cacti, Zabbix, Munin or even BigBrother
|
|
||||||
|
|
||||||
echo -e "$emailMessage \n\n\n `/sbin/zpool list` \n\n\n `/sbin/zpool status`" | mail -s "$emailSubject" root
|
|
||||||
if [ "$problems" -ne 0 ]; then
|
|
||||||
logger $emailSubject
|
|
||||||
fi
|
|
||||||
|
|
||||||
### EOF ###
|
|
||||||
Loading…
Reference in new issue