diff --git a/ct/esphome_container.sh b/ct/esphome_container.sh new file mode 100644 index 00000000..3979e7c2 --- /dev/null +++ b/ct/esphome_container.sh @@ -0,0 +1,168 @@ +#!/usr/bin/env bash + +while true; do + read -p "This will create a New ESPHome LXC Container. Proceed(y/n)?" yn + case $yn in + [Yy]* ) break;; + [Nn]* ) exit;; + * ) echo "Please answer yes or no.";; + esac +done + +# Setup script environment +set -o errexit #Exit immediately if a pipeline returns a non-zero status +set -o errtrace #Trap ERR from shell functions, command substitutions, and commands from subshell +set -o nounset #Treat unset variables as an error +set -o pipefail #Pipe will exit with last non-zero status if applicable +shopt -s expand_aliases +alias die='EXIT=$? LINE=$LINENO error_exit' +trap die ERR +trap cleanup EXIT + +function error_exit() { + trap - ERR + local DEFAULT='Unknown failure occured.' + local REASON="\e[97m${1:-$DEFAULT}\e[39m" + local FLAG="\e[91m[ERROR] \e[93m$EXIT@$LINE" + msg "$FLAG $REASON" + [ ! -z ${CTID-} ] && cleanup_ctid + exit $EXIT +} +function warn() { + local REASON="\e[97m$1\e[39m" + local FLAG="\e[93m[WARNING]\e[39m" + msg "$FLAG $REASON" +} +function info() { + local REASON="$1" + local FLAG="\e[36m[INFO]\e[39m" + msg "$FLAG $REASON" +} +function msg() { + local TEXT="$1" + echo -e "$TEXT" +} +function cleanup_ctid() { + if [ ! -z ${MOUNT+x} ]; then + pct unmount $CTID + fi + if $(pct status $CTID &>/dev/null); then + if [ "$(pct status $CTID | awk '{print $2}')" == "running" ]; then + pct stop $CTID + fi + pct destroy $CTID + elif [ "$(pvesm list $STORAGE --vmid $CTID)" != "" ]; then + pvesm free $ROOTFS + fi +} +function cleanup() { + popd >/dev/null + rm -rf $TEMP_DIR +} +function load_module() { + if ! $(lsmod | grep -Fq $1); then + modprobe $1 &>/dev/null || \ + die "Failed to load '$1' module." + fi + MODULES_PATH=/etc/modules + if ! $(grep -Fxq "$1" $MODULES_PATH); then + echo "$1" >> $MODULES_PATH || \ + die "Failed to add '$1' module to load at boot." + fi +} +TEMP_DIR=$(mktemp -d) +pushd $TEMP_DIR >/dev/null + +# Download setup script +wget -qL https://raw.githubusercontent.com/tteck/Proxmox/main/setup/esphome_setup.sh + +# Detect modules and automatically load at boot +load_module overlay + +# Select storage location +while read -r line; do + TAG=$(echo $line | awk '{print $1}') + TYPE=$(echo $line | awk '{printf "%-10s", $2}') + FREE=$(echo $line | numfmt --field 4-6 --from-unit=K --to=iec --format %.2f | awk '{printf( "%9sB", $6)}') + ITEM=" Type: $TYPE Free: $FREE " + OFFSET=2 + if [[ $((${#ITEM} + $OFFSET)) -gt ${MSG_MAX_LENGTH:-} ]]; then + MSG_MAX_LENGTH=$((${#ITEM} + $OFFSET)) + fi + STORAGE_MENU+=( "$TAG" "$ITEM" "OFF" ) +done < <(pvesm status -content rootdir | awk 'NR>1') +if [ $((${#STORAGE_MENU[@]}/3)) -eq 0 ]; then + warn "'Container' needs to be selected for at least one storage location." + die "Unable to detect valid storage location." +elif [ $((${#STORAGE_MENU[@]}/3)) -eq 1 ]; then + STORAGE=${STORAGE_MENU[0]} +else + while [ -z "${STORAGE:+x}" ]; do + STORAGE=$(whiptail --title "Storage Pools" --radiolist \ + "Which storage pool you would like to use for the container?\n\n" \ + 16 $(($MSG_MAX_LENGTH + 23)) 6 \ + "${STORAGE_MENU[@]}" 3>&1 1>&2 2>&3) || exit + done +fi +info "Using '$STORAGE' for storage location." + +# Get the next guest VM/LXC ID +CTID=$(pvesh get /cluster/nextid) +info "Container ID is $CTID." + +# Download latest Debian 11 LXC template +msg "Updating LXC template list..." +pveam update >/dev/null +msg "Downloading LXC template..." +OSTYPE=debian +OSVERSION=${OSTYPE}-11 +mapfile -t TEMPLATES < <(pveam available -section system | sed -n "s/.*\($OSVERSION.*\)/\1/p" | sort -t - -k 2 -V) +TEMPLATE="${TEMPLATES[-1]}" +pveam download local $TEMPLATE >/dev/null || + die "A problem occured while downloading the LXC template." + +# Create variables for container disk +STORAGE_TYPE=$(pvesm status -storage $STORAGE | awk 'NR>1 {print $2}') +case $STORAGE_TYPE in + dir|nfs) + DISK_EXT=".raw" + DISK_REF="$CTID/" + ;; + zfspool) + DISK_PREFIX="subvol" + DISK_FORMAT="subvol" + ;; +esac +DISK=${DISK_PREFIX:-vm}-${CTID}-disk-0${DISK_EXT-} +ROOTFS=${STORAGE}:${DISK_REF-}${DISK} + +# Create LXC +msg "Creating LXC container..." +DISK_SIZE=4G +pvesm alloc $STORAGE $CTID $DISK $DISK_SIZE --format ${DISK_FORMAT:-raw} >/dev/null +if [ "$STORAGE_TYPE" == "zfspool" ]; then + warn "Some containers may not work properly due to ZFS not supporting 'fallocate'." +else + mkfs.ext4 $(pvesm path $ROOTFS) &>/dev/null +fi +ARCH=$(dpkg --print-architecture) +HOSTNAME=esphome +TEMPLATE_STRING="local:vztmpl/${TEMPLATE}" +pct create $CTID $TEMPLATE_STRING -arch $ARCH -features nesting=1 \ + -hostname $HOSTNAME -net0 name=eth0,bridge=vmbr0,ip=dhcp -onboot 1 -cores 2 -memory 1024 \ + -ostype $OSTYPE -rootfs $ROOTFS,size=$DISK_SIZE -storage $STORAGE >/dev/null + +# Set container timezone to match host +MOUNT=$(pct mount $CTID | cut -d"'" -f 2) +ln -fs $(readlink /etc/localtime) ${MOUNT}/etc/localtime +pct unmount $CTID && unset MOUNT + +# Setup container +msg "Starting LXC container..." +pct start $CTID +pct push $CTID esphome_setup.sh /esphome_setup.sh -perms 755 +pct exec $CTID /esphome_setup.sh + +# Get network details and show completion message +IP=$(pct exec $CTID ip a s dev eth0 | sed -n '/inet / s/\// /p' | awk '{print $2}') +info "Successfully created ESPHome LXC Container to $CTID at IP Address ${IP}:6052"