Compare commits

..

1 Commits

Author SHA1 Message Date
github-actions[bot]
db7b442cb9 Archive old changelog entries 2026-05-03 00:15:28 +00:00
39 changed files with 252 additions and 1995 deletions

View File

@@ -458,100 +458,6 @@ Exercise vigilance regarding copycat or coat-tailing sites that seek to exploit
</details>
## 2026-05-07
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- vm: update disk image URL for Ubuntu 25.04 [@MickLesk](https://github.com/MickLesk) ([#14290](https://github.com/community-scripts/ProxmoxVE/pull/14290))
- #### ✨ New Features
- pangolin: bump version to 1.18.3 [@MickLesk](https://github.com/MickLesk) ([#14297](https://github.com/community-scripts/ProxmoxVE/pull/14297))
### 🗑️ Deleted Scripts
- Remove: LiteLLM [@michelroegl-brunner](https://github.com/michelroegl-brunner) ([#14294](https://github.com/community-scripts/ProxmoxVE/pull/14294))
### 💾 Core
- #### ✨ New Features
- update-apps: some improvements [@MickLesk](https://github.com/MickLesk) ([#14275](https://github.com/community-scripts/ProxmoxVE/pull/14275))
## 2026-05-06
### 🆕 New Scripts
- Hoodik ([#14279](https://github.com/community-scripts/ProxmoxVE/pull/14279))
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- Pelican-Panel: create backup subdirectory before copying storage [@MickLesk](https://github.com/MickLesk) ([#14274](https://github.com/community-scripts/ProxmoxVE/pull/14274))
- Rustdeskserver: remove redundant else with undefined RELEASE var [@MickLesk](https://github.com/MickLesk) ([#14272](https://github.com/community-scripts/ProxmoxVE/pull/14272))
### 🧰 Tools
- #### 🔧 Refactor
- AdguardHome-Sync replace ifconfig with hostname -I for IP detection [@MickLesk](https://github.com/MickLesk) ([#14273](https://github.com/community-scripts/ProxmoxVE/pull/14273))
## 2026-05-05
### 🆕 New Scripts
- LibreChat ([#14247](https://github.com/community-scripts/ProxmoxVE/pull/14247))
- Matomo ([#14248](https://github.com/community-scripts/ProxmoxVE/pull/14248))
- Storyteller ([#14122](https://github.com/community-scripts/ProxmoxVE/pull/14122))
### 🧰 Tools
- Fix container count message in update-apps.sh [@Quotacious](https://github.com/Quotacious) ([#14265](https://github.com/community-scripts/ProxmoxVE/pull/14265))
## 2026-05-04
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- Databasus: move .env to filesystem root so service starts correctly [@Copilot](https://github.com/Copilot) ([#14252](https://github.com/community-scripts/ProxmoxVE/pull/14252))
- Databasus: update mongo-tools fallback to 100.16.1 and use now pnpm instead of npm ci [@MickLesk](https://github.com/MickLesk) ([#14240](https://github.com/community-scripts/ProxmoxVE/pull/14240))
### 💾 Core
- #### ✨ New Features
- tools.func get_latest_gh_tag - add pagination to find prefixed tags beyond first 50 [@MickLesk](https://github.com/MickLesk) ([#14241](https://github.com/community-scripts/ProxmoxVE/pull/14241))
- tools.func: add GitLab release check/fetch/deploy helpers [@MickLesk](https://github.com/MickLesk) ([#14242](https://github.com/community-scripts/ProxmoxVE/pull/14242))
## 2026-05-03
### 🚀 Updated Scripts
- #### 🐞 Bug Fixes
- Hortusfox: fix update issues [@tomfrenzel](https://github.com/tomfrenzel) ([#14214](https://github.com/community-scripts/ProxmoxVE/pull/14214))
- #### ✨ New Features
- Refactor: PeaNUT for v6 [@MickLesk](https://github.com/MickLesk) ([#14224](https://github.com/community-scripts/ProxmoxVE/pull/14224))
- pangolin: pin version, drop manual SQL, use upstream migrator [@MickLesk](https://github.com/MickLesk) ([#14223](https://github.com/community-scripts/ProxmoxVE/pull/14223))
### 💾 Core
- #### 🐞 Bug Fixes
- core: fix validate_bridge function [@MichaelOultram](https://github.com/MichaelOultram) ([#14206](https://github.com/community-scripts/ProxmoxVE/pull/14206))
### 🧰 Tools
- #### 🐞 Bug Fixes
- pve/pbs scripts: guard sed against missing /etc/apt/sources.list [@MickLesk](https://github.com/MickLesk) ([#14222](https://github.com/community-scripts/ProxmoxVE/pull/14222))
## 2026-05-02
### 🆕 New Scripts

View File

@@ -35,8 +35,7 @@ function update_script() {
msg_ok "Stopped Databasus"
msg_info "Backing up Configuration"
cp /.env /opt/databasus.env.bak
chmod 600 /opt/databasus.env.bak
cp /opt/databasus/.env /opt/databasus.env.bak
msg_ok "Backed up Configuration"
msg_info "Ensuring Database Clients"
@@ -47,7 +46,7 @@ function update_script() {
# Install MongoDB Database Tools via direct .deb (no APT repo for Debian 13)
if ! command -v mongodump &>/dev/null; then
[[ "$(get_os_info id)" == "ubuntu" ]] && MONGO_DIST="ubuntu2204" || MONGO_DIST="debian12"
fetch_and_deploy_from_url "https://fastdl.mongodb.org/tools/db/mongodb-database-tools-${MONGO_DIST}-x86_64-100.16.1.deb"
fetch_and_deploy_from_url "https://fastdl.mongodb.org/tools/db/mongodb-database-tools-${MONGO_DIST}-x86_64-100.14.1.deb"
fi
[[ -f /usr/bin/mongodump ]] && ln -sf /usr/bin/mongodump /usr/local/mongodb-database-tools/bin/mongodump
[[ -f /usr/bin/mongorestore ]] && ln -sf /usr/bin/mongorestore /usr/local/mongodb-database-tools/bin/mongorestore
@@ -67,12 +66,9 @@ function update_script() {
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "databasus" "databasus/databasus" "tarball" "latest" "/opt/databasus"
msg_info "Updating Databasus"
export COREPACK_ENABLE_DOWNLOAD_PROMPT=0
cd /opt/databasus/frontend
$STD corepack enable
$STD corepack prepare pnpm@latest --activate
$STD pnpm install --frozen-lockfile
$STD pnpm run build
$STD npm ci
$STD npm run build
cd /opt/databasus/backend
$STD go mod download
$STD /root/go/bin/swag init -g cmd/main.go -o swagger
@@ -85,18 +81,11 @@ function update_script() {
msg_ok "Updated Databasus"
msg_info "Restoring Configuration"
cp /opt/databasus.env.bak /.env
cp /opt/databasus.env.bak /opt/databasus/.env
rm -f /opt/databasus.env.bak
chmod 600 /.env
chown postgres:postgres /opt/databasus/.env
msg_ok "Restored Configuration"
if ! grep -q "EnvironmentFile=/.env" /etc/systemd/system/databasus.service; then
msg_info "Updating Service"
sed -i 's|EnvironmentFile=.*|EnvironmentFile=/.env|' /etc/systemd/system/databasus.service
$STD systemctl daemon-reload
msg_ok "Updated Service"
fi
msg_info "Starting Databasus"
$STD systemctl start databasus
msg_ok "Started Databasus"

View File

@@ -1,6 +0,0 @@
__ __ ___ __
/ / / /___ ____ ____/ (_) /__
/ /_/ / __ \/ __ \/ __ / / //_/
/ __ / /_/ / /_/ / /_/ / / ,<
/_/ /_/\____/\____/\__,_/_/_/|_|

View File

@@ -1,6 +0,0 @@
__ _ __ ________ __
/ / (_) /_ ________ / ____/ /_ ____ _/ /_
/ / / / __ \/ ___/ _ \/ / / __ \/ __ `/ __/
/ /___/ / /_/ / / / __/ /___/ / / / /_/ / /_
/_____/_/_.___/_/ \___/\____/_/ /_/\__,_/\__/

6
ct/headers/litellm Normal file
View File

@@ -0,0 +1,6 @@
__ _ __ __ __ __ ___
/ / (_) /____ / / / / / |/ /
/ / / / __/ _ \/ / / / / /|_/ /
/ /___/ / /_/ __/ /___/ /___/ / / /
/_____/_/\__/\___/_____/_____/_/ /_/

View File

@@ -1,6 +0,0 @@
__ ___ __
/ |/ /___ _/ /_____ ____ ___ ____
/ /|_/ / __ `/ __/ __ \/ __ `__ \/ __ \
/ / / / /_/ / /_/ /_/ / / / / / / /_/ /
/_/ /_/\__,_/\__/\____/_/ /_/ /_/\____/

View File

@@ -1,6 +0,0 @@
_____ __ __ ____
/ ___// /_____ _______ __/ /____ / / /__ _____
\__ \/ __/ __ \/ ___/ / / / __/ _ \/ / / _ \/ ___/
___/ / /_/ /_/ / / / /_/ / /_/ __/ / / __/ /
/____/\__/\____/_/ \__, /\__/\___/_/_/\___/_/
/____/

View File

@@ -1,64 +0,0 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
# Copyright (c) 2021-2026 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/hudikhq/hoodik
APP="Hoodik"
var_tags="${var_tags:-cloud;storage}"
var_cpu="${var_cpu:-1}"
var_ram="${var_ram:-1024}"
var_disk="${var_disk:-5}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -f /opt/hoodik/hoodik ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
if check_for_gh_release "hoodik" "hudikhq/hoodik"; then
msg_info "Stopping Service"
systemctl stop hoodik
msg_ok "Stopped Service"
msg_info "Backing up Configuration"
cp /opt/hoodik/.env /opt/hoodik.env.bak
msg_ok "Backed up Configuration"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "hoodik" "hudikhq/hoodik" "prebuild" "latest" "/opt/hoodik" "*x86_64.tar.gz"
msg_info "Restoring Configuration"
cp /opt/hoodik.env.bak /opt/hoodik/.env
rm -f /opt/hoodik.env.bak
msg_ok "Restored Configuration"
msg_info "Starting Service"
systemctl start hoodik
msg_ok "Started Service"
msg_ok "Updated successfully!"
fi
exit
}
start
build_container
description
msg_ok "Completed successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:5443/auth/register${CL}"

View File

@@ -38,15 +38,13 @@ function update_script() {
mv /opt/hortusfox/ /opt/hortusfox-backup
msg_ok "Backed up current HortusFox installation"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "hortusfox" "danielbrendel/hortusfox-web" "tarball"
fetch_and_deploy_gh_release "hortusfox" "danielbrendel/hortusfox-web" "tarball"
msg_info "Updating HortusFox"
cd /opt/hortusfox
cp /opt/hortusfox-backup/.env /opt/hortusfox/.env
cp -a /opt/hortusfox-backup/public/img/. /opt/hortusfox/public/img/
export COMPOSER_ALLOW_SUPERUSER=1
mv /opt/hortusfox-backup/.env /opt/hortusfox/.env
$STD composer install --no-dev --optimize-autoloader
$STD php asatru migrate:upgrade
$STD php asatru migrate --no-interaction
$STD php asatru plants:attributes
$STD php asatru calendar:classes
chown -R www-data:www-data /opt/hortusfox

View File

@@ -1,101 +0,0 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
# Copyright (c) 2021-2026 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/danny-avila/LibreChat
APP="LibreChat"
var_tags="${var_tags:-ai;chat}"
var_cpu="${var_cpu:-4}"
var_ram="${var_ram:-6144}"
var_disk="${var_disk:-20}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -d /opt/librechat ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
if check_for_gh_tag "librechat" "danny-avila/LibreChat" "v"; then
msg_info "Stopping Services"
systemctl stop librechat rag-api
msg_ok "Stopped Services"
msg_info "Backing up Configuration"
cp /opt/librechat/.env /opt/librechat.env.bak
msg_ok "Backed up Configuration"
CLEAN_INSTALL=1 fetch_and_deploy_gh_tag "librechat" "danny-avila/LibreChat"
msg_info "Installing Dependencies"
cd /opt/librechat
$STD npm ci
msg_ok "Installed Dependencies"
msg_info "Building Frontend"
$STD npm run frontend
$STD npm prune --production
$STD npm cache clean --force
msg_ok "Built Frontend"
msg_info "Restoring Configuration"
cp /opt/librechat.env.bak /opt/librechat/.env
rm -f /opt/librechat.env.bak
msg_ok "Restored Configuration"
msg_info "Starting Services"
systemctl start rag-api librechat
msg_ok "Started Services"
msg_ok "Updated LibreChat Successfully!"
fi
if check_for_gh_release "rag-api" "danny-avila/rag_api"; then
msg_info "Stopping RAG API"
systemctl stop rag-api
msg_ok "Stopped RAG API"
msg_info "Backing up RAG API Configuration"
cp /opt/rag-api/.env /opt/rag-api.env.bak
msg_ok "Backed up RAG API Configuration"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "rag-api" "danny-avila/rag_api" "tarball"
msg_info "Updating RAG API Dependencies"
cd /opt/rag-api
$STD .venv/bin/pip install -r requirements.lite.txt
msg_ok "Updated RAG API Dependencies"
msg_info "Restoring RAG API Configuration"
cp /opt/rag-api.env.bak /opt/rag-api/.env
rm -f /opt/rag-api.env.bak
msg_ok "Restored RAG API Configuration"
msg_info "Starting RAG API"
systemctl start rag-api
msg_ok "Started RAG API"
msg_ok "Updated RAG API Successfully!"
fi
exit
}
start
build_container
description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:3080${CL}"

67
ct/litellm.sh Normal file
View File

@@ -0,0 +1,67 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
# Copyright (c) 2021-2026 community-scripts ORG
# Author: stout01
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/BerriAI/litellm
APP="LiteLLM"
var_tags="${var_tags:-ai;interface}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-2048}"
var_disk="${var_disk:-4}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -f /etc/systemd/system/litellm.service ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
msg_info "Stopping Service"
systemctl stop litellm
msg_ok "Stopped Service"
VENV_PATH="/opt/litellm/.venv"
PYTHON_VERSION="3.13" USE_UVX="YES" setup_uv
msg_info "Updating LiteLLM"
$STD "$VENV_PATH/bin/python" -m pip install --upgrade litellm[proxy] prisma
$STD "$VENV_PATH/bin/prisma" generate
msg_ok "LiteLLM updated"
msg_info "Updating DB Schema"
$STD /opt/litellm/.venv/bin/litellm --config /opt/litellm/litellm.yaml --use_prisma_db_push --skip_server_startup
msg_ok "DB Schema Updated"
msg_info "Updating Service"
sed -i 's|ExecStart=uv --directory=/opt/litellm run litellm|ExecStart=/opt/litellm/.venv/bin/litellm|' /etc/systemd/system/litellm.service
systemctl daemon-reload
msg_ok "Updated Service"
msg_info "Starting Service"
systemctl start litellm
msg_ok "Started Service"
msg_ok "Updated successfully!"
exit
}
start
build_container
description
msg_ok "Completed successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:4000${CL}"

View File

@@ -1,75 +0,0 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
# Copyright (c) 2021-2026 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://matomo.org/
APP="Matomo"
var_tags="${var_tags:-analytics;tracking;privacy}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-2048}"
var_disk="${var_disk:-16}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -d /opt/matomo ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
if check_for_gh_release "matomo" "matomo-org/matomo"; then
msg_info "Stopping Services"
systemctl stop caddy
msg_ok "Stopped Services"
msg_info "Backing up Data"
[[ -f /opt/matomo/config/config.ini.php ]] && cp /opt/matomo/config/config.ini.php /opt/matomo_config.bak
[[ -d /opt/matomo/misc/user ]] && cp -r /opt/matomo/misc/user /opt/matomo_user_backup
[[ -f /root/matomo.creds ]] && cp /root/matomo.creds /opt/matomo_db_creds.bak
msg_ok "Backed up Data"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "matomo" "matomo-org/matomo" "prebuild" "latest" "/opt/matomo" "matomo-*.zip"
msg_info "Restoring Data"
if [[ -f /opt/matomo_config.bak ]]; then
mkdir -p /opt/matomo/config
cp /opt/matomo_config.bak /opt/matomo/config/config.ini.php
fi
if [[ -d /opt/matomo_user_backup ]]; then
mkdir -p /opt/matomo/misc/user
cp -r /opt/matomo_user_backup/. /opt/matomo/misc/user
fi
[[ -f /opt/matomo_db_creds.bak ]] && cp /opt/matomo_db_creds.bak /root/matomo.creds
rm -f /opt/matomo_config.bak /opt/matomo_db_creds.bak
rm -rf /opt/matomo_user_backup
chown -R www-data:www-data /opt/matomo
msg_ok "Restored Data"
msg_info "Starting Services"
systemctl start caddy
msg_ok "Started Services"
msg_ok "Updated successfully!"
fi
exit
}
start
build_container
description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}${CL}"

View File

@@ -81,7 +81,11 @@ STARTEOF
cp -r /opt/mealie/frontend/dist/* /opt/mealie/mealie/frontend/
msg_ok "Copied Frontend"
setup_nltk "averaged_perceptron_tagger_eng" "/nltk_data"
msg_info "Updating NLTK Data"
mkdir -p /nltk_data/
cd /opt/mealie
$STD uv run python -m nltk.downloader -d /nltk_data averaged_perceptron_tagger_eng
msg_ok "Updated NLTK Data"
msg_info "Starting Service"
systemctl start mealie

View File

@@ -6,7 +6,6 @@ source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxV
# Source: https://pangolin.net/ | Github: https://github.com/fosrl/pangolin
APP="Pangolin"
PANGOLIN_VERSION="${PANGOLIN_VERSION:-1.18.3}"
var_tags="${var_tags:-proxy}"
var_cpu="${var_cpu:-2}"
var_ram="${var_ram:-4096}"
@@ -34,7 +33,7 @@ function update_script() {
NODE_VERSION="24" setup_nodejs
if check_for_gh_release "pangolin" "fosrl/pangolin" "$PANGOLIN_VERSION" "Pinned to a tested release because Pangolin's schema changes have repeatedly broken unattended updates. To try a newer version at your own risk, run: 'export PANGOLIN_VERSION=<tag>' and re-run update. If it breaks, please open an issue at https://github.com/community-scripts/ProxmoxVE/issues with the error log."; then
if check_for_gh_release "pangolin" "fosrl/pangolin"; then
msg_info "Stopping Service"
systemctl stop pangolin
systemctl stop gerbil
@@ -42,13 +41,9 @@ function update_script() {
msg_info "Creating backup"
tar -czf /opt/pangolin_config_backup.tar.gz -C /opt/pangolin config
if [[ -f /opt/pangolin/config/db/db.sqlite ]]; then
cp -a /opt/pangolin/config/db/db.sqlite \
"/opt/pangolin/config/db/db.sqlite.pre-${PANGOLIN_VERSION}-$(date +%Y%m%d-%H%M%S).bak"
fi
msg_ok "Created backup"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "pangolin" "fosrl/pangolin" "tarball" "$PANGOLIN_VERSION"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "pangolin" "fosrl/pangolin" "tarball"
CLEAN_INSTALL=1 fetch_and_deploy_gh_release "gerbil" "fosrl/gerbil" "singlefile" "latest" "/usr/bin" "gerbil_linux_amd64"
msg_info "Updating Pangolin"
@@ -72,16 +67,36 @@ function update_script() {
rm -f /opt/pangolin_config_backup.tar.gz
msg_ok "Restored config"
if ! grep -q '^ExecStartPre=/usr/bin/node dist/migrations.mjs' /etc/systemd/system/pangolin.service 2>/dev/null; then
msg_info "Adding migration step to pangolin.service"
sed -i '/^ExecStart=\/usr\/bin\/node --enable-source-maps dist\/server.mjs/i ExecStartPre=/usr/bin/node dist/migrations.mjs' /etc/systemd/system/pangolin.service
systemctl daemon-reload
msg_ok "Updated pangolin.service"
fi
msg_info "Running database migrations"
cd /opt/pangolin
ENVIRONMENT=prod $STD node dist/migrations.mjs
# Pre-apply potentially destructive schema changes safely so drizzle-kit
# does not recreate tables (which would delete all rows).
local DB="/opt/pangolin/config/db/db.sqlite"
if [[ -f "$DB" ]]; then
sqlite3 "$DB" "ALTER TABLE 'orgs' ADD COLUMN 'settingsLogRetentionDaysConnection' integer DEFAULT 0 NOT NULL;" 2>/dev/null || true
sqlite3 "$DB" "ALTER TABLE 'clientSitesAssociationsCache' ADD COLUMN 'isJitMode' integer DEFAULT 0 NOT NULL;" 2>/dev/null || true
sqlite3 "$DB" "ALTER TABLE 'userOrgs' ADD COLUMN 'pamUsername' text;" 2>/dev/null || true
# Create new role-mapping tables and migrate data before drizzle-kit
# drops the roleId columns from userOrgs and userInvites.
sqlite3 "$DB" "CREATE TABLE IF NOT EXISTS 'userOrgRoles' (
'userId' text NOT NULL REFERENCES 'user'('id') ON DELETE CASCADE,
'orgId' text NOT NULL REFERENCES 'orgs'('orgId') ON DELETE CASCADE,
'roleId' integer NOT NULL REFERENCES 'roles'('roleId') ON DELETE CASCADE,
UNIQUE('userId', 'orgId', 'roleId')
);" 2>/dev/null || true
sqlite3 "$DB" "INSERT OR IGNORE INTO 'userOrgRoles' (userId, orgId, roleId) SELECT userId, orgId, roleId FROM 'userOrgs' WHERE roleId IS NOT NULL;" 2>/dev/null || true
sqlite3 "$DB" "CREATE TABLE IF NOT EXISTS 'userInviteRoles' (
'inviteId' text NOT NULL REFERENCES 'userInvites'('inviteId') ON DELETE CASCADE,
'roleId' integer NOT NULL REFERENCES 'roles'('roleId') ON DELETE CASCADE,
PRIMARY KEY('inviteId', 'roleId')
);" 2>/dev/null || true
sqlite3 "$DB" "INSERT OR IGNORE INTO 'userInviteRoles' (inviteId, roleId) SELECT inviteId, roleId FROM 'userInvites' WHERE roleId IS NOT NULL;" 2>/dev/null || true
fi
ENVIRONMENT=prod $STD npx drizzle-kit push --force --config drizzle.sqlite.config.ts
msg_ok "Ran database migrations"
msg_info "Updating Badger plugin version"

View File

@@ -164,7 +164,13 @@ function update_script() {
fi
fi
setup_nltk "snowball_data stopwords punkt_tab" "/usr/share/nltk_data"
msg_info "Updating NLTK Data"
cd /opt/paperless
$STD uv run python -m nltk.downloader -d /usr/share/nltk_data snowball_data
$STD uv run python -m nltk.downloader -d /usr/share/nltk_data stopwords
$STD uv run python -m nltk.downloader -d /usr/share/nltk_data punkt_tab ||
$STD uv run python -m nltk.downloader -d /usr/share/nltk_data punkt
msg_ok "Updated NLTK Data"
msg_info "Starting all Paperless-ngx Services"
systemctl start paperless-consumer paperless-webserver paperless-scheduler paperless-task-queue

View File

@@ -45,33 +45,6 @@ function update_script() {
msg_ok "Fixed entrypoint"
fi
if [[ ! -f /etc/peanut/peanut.env ]]; then
msg_info "Migrating service to EnvironmentFile"
mkdir -p /etc/peanut
cat <<EOF >/etc/peanut/peanut.env
NODE_ENV=production
#WEB_HOST=0.0.0.0
#WEB_PORT=8080
#NUT_HOST=localhost
#NUT_PORT=3493
# Disable auth entirely:
#AUTH_DISABLED=true
# Bootstrap initial account on first start (ignored afterwards):
#WEB_USERNAME=admin
#WEB_PASSWORD=changeme
EOF
chmod 600 /etc/peanut/peanut.env
sed -i '/^Environment=/d' /etc/systemd/system/peanut.service
if ! grep -q '^EnvironmentFile=/etc/peanut/peanut.env' /etc/systemd/system/peanut.service; then
sed -i '/^Type=simple/a EnvironmentFile=/etc/peanut/peanut.env' /etc/systemd/system/peanut.service
fi
systemctl daemon-reload
msg_ok "Migrated to /etc/peanut/peanut.env"
fi
msg_info "Updating PeaNUT"
cd /opt/peanut
$STD pnpm i

View File

@@ -46,7 +46,6 @@ function update_script() {
msg_ok "Stopped Service"
cp -a /opt/pelican-panel/.env /opt/backup
mkdir -p /opt/backup/storage/app/
cp -a /opt/pelican-panel/storage/app/public /opt/backup/storage/app/
SQLITE_INSTALL=$(ls /opt/pelican-panel/database/*.sqlite 1>/dev/null 2>&1 && echo "true" || echo "false")

View File

@@ -48,6 +48,8 @@ function update_script() {
msg_ok "Services started"
msg_ok "Updated successfully!"
else
msg_ok "No update required. ${APP} is already at v${RELEASE}"
fi
exit
}

View File

@@ -1,85 +0,0 @@
#!/usr/bin/env bash
source <(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/misc/build.func)
# Copyright (c) 2021-2026 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://gitlab.com/storyteller-platform/storyteller
APP="Storyteller"
var_tags="${var_tags:-media;ebook;audiobook}"
var_cpu="${var_cpu:-4}"
var_ram="${var_ram:-10240}"
var_disk="${var_disk:-20}"
var_os="${var_os:-debian}"
var_version="${var_version:-13}"
var_unprivileged="${var_unprivileged:-1}"
header_info "$APP"
variables
color
catch_errors
function update_script() {
header_info
check_container_storage
check_container_resources
if [[ ! -d /opt/storyteller ]]; then
msg_error "No ${APP} Installation Found!"
exit
fi
if check_for_gl_release "storyteller" "storyteller-platform/storyteller"; then
msg_info "Stopping Service"
systemctl stop storyteller
msg_ok "Stopped Service"
msg_info "Backing up Data"
cp /opt/storyteller/.env /opt/storyteller_env.bak
msg_ok "Backed up Data"
CLEAN_INSTALL=1 fetch_and_deploy_gl_release "storyteller" "storyteller-platform/storyteller" "tarball" "latest" "/opt/storyteller"
msg_info "Restoring Configuration"
mv /opt/storyteller_env.bak /opt/storyteller/.env
msg_ok "Restored Configuration"
msg_info "Rebuilding Storyteller"
cd /opt/storyteller
export NODE_OPTIONS="--max-old-space-size=4096"
$STD yarn install --network-timeout 600000
$STD gcc -g -fPIC -rdynamic -shared web/sqlite/uuid.c -o web/sqlite/uuid.c.so
export CI=1
export NODE_ENV=production
export NEXT_TELEMETRY_DISABLED=1
export SQLITE_NATIVE_BINDING=/opt/storyteller/node_modules/better-sqlite3/build/Release/better_sqlite3.node
$STD yarn workspaces foreach -Rpt --from @storyteller-platform/web --exclude @storyteller-platform/eslint run build
mkdir -p /opt/storyteller/web/.next/standalone/web/.next/static
cp -rT /opt/storyteller/web/.next/static /opt/storyteller/web/.next/standalone/web/.next/static
if [[ -d /opt/storyteller/web/public ]]; then
mkdir -p /opt/storyteller/web/.next/standalone/web/public
cp -rT /opt/storyteller/web/public /opt/storyteller/web/.next/standalone/web/public
fi
mkdir -p /opt/storyteller/web/.next/standalone/web/migrations
cp -rT /opt/storyteller/web/migrations /opt/storyteller/web/.next/standalone/web/migrations
mkdir -p /opt/storyteller/web/.next/standalone/web/sqlite
cp -rT /opt/storyteller/web/sqlite /opt/storyteller/web/.next/standalone/web/sqlite
ln -sf /opt/storyteller/.env /opt/storyteller/web/.next/standalone/web/.env
msg_ok "Rebuilt Storyteller"
msg_info "Starting Service"
systemctl start storyteller
msg_ok "Started Service"
msg_ok "Updated successfully!"
fi
exit
}
start
build_container
description
msg_ok "Completed Successfully!\n"
echo -e "${CREATING}${GN}${APP} setup has been successfully initialized!${CL}"
echo -e "${INFO}${YW} Access it using the following URL:${CL}"
echo -e "${TAB}${GATEWAY}${BGN}http://${IP}:8001${CL}"

View File

@@ -32,7 +32,7 @@ for v in 12 13 14 15 16 18; do
done
# Install MongoDB Database Tools via direct .deb (no APT repo for Debian 13)
[[ "$(get_os_info id)" == "ubuntu" ]] && MONGO_DIST="ubuntu2204" || MONGO_DIST="debian12"
MONGO_VERSION=$(get_latest_gh_tag "mongodb/mongo-tools" "100." || echo "100.16.1")
MONGO_VERSION=$(get_latest_gh_tag "mongodb/mongo-tools" "100." || echo "100.14.1")
fetch_and_deploy_from_url "https://fastdl.mongodb.org/tools/db/mongodb-database-tools-${MONGO_DIST}-x86_64-${MONGO_VERSION}.deb" ""
mkdir -p /usr/local/mongodb-database-tools/bin
[[ -f /usr/bin/mongodump ]] && ln -sf /usr/bin/mongodump /usr/local/mongodb-database-tools/bin/mongodump
@@ -52,12 +52,9 @@ msg_ok "Installed Database Clients"
fetch_and_deploy_gh_release "databasus" "databasus/databasus" "tarball" "latest" "/opt/databasus"
msg_info "Building Databasus (Patience)"
export COREPACK_ENABLE_DOWNLOAD_PROMPT=0
cd /opt/databasus/frontend
$STD corepack enable
$STD corepack prepare pnpm@latest --activate
$STD pnpm install --frozen-lockfile
$STD pnpm run build
$STD npm ci
$STD npm run build
cd /opt/databasus/backend
$STD go mod tidy
$STD go mod download
@@ -79,7 +76,7 @@ ENCRYPTION_KEY=$(openssl rand -hex 32)
# Install goose for migrations
$STD go install github.com/pressly/goose/v3/cmd/goose@latest
ln -sf /root/go/bin/goose /usr/local/bin/goose
cat <<EOF >/.env
cat <<EOF >/opt/databasus/.env
# Environment
ENV_MODE=production
@@ -109,7 +106,8 @@ DATA_DIR=/databasus-data/data
BACKUP_DIR=/databasus-data/backups
LOG_DIR=/databasus-data/logs
EOF
chmod 600 /.env
chown postgres:postgres /opt/databasus/.env
chmod 600 /opt/databasus/.env
msg_ok "Configured Databasus"
msg_info "Configuring Valkey"
@@ -147,7 +145,7 @@ Requires=postgresql.service valkey.service
[Service]
Type=simple
WorkingDirectory=/opt/databasus
EnvironmentFile=/.env
EnvironmentFile=/opt/databasus/.env
ExecStart=/opt/databasus/databasus
Restart=always
RestartSec=5

View File

@@ -1,58 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/hudikhq/hoodik
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
fetch_and_deploy_gh_release "hoodik" "hudikhq/hoodik" "prebuild" "latest" "/opt/hoodik" "*x86_64.tar.gz"
msg_info "Configuring Hoodik"
mkdir -p /opt/hoodik_data
JWT_SECRET=$(openssl rand -base64 32 | tr -dc 'a-zA-Z0-9' | cut -c1-32)
cat <<EOF >/opt/hoodik/.env
DATA_DIR=/opt/hoodik_data
HTTP_PORT=5443
HTTP_ADDRESS=0.0.0.0
JWT_SECRET=${JWT_SECRET}
APP_URL=http://${LOCAL_IP}:5443
SSL_DISABLED=true
COOKIE_SECURE=false
COOKIE_HTTP_ONLY=false
MAILER_TYPE=none
RUST_LOG=hoodik=info,error=info
EOF
msg_ok "Configured Hoodik"
msg_info "Creating Service"
cat <<EOF >/etc/systemd/system/hoodik.service
[Unit]
Description=Hoodik - Encrypted File Storage
After=network.target
[Service]
Type=simple
User=root
WorkingDirectory=/opt/hoodik_data
EnvironmentFile=/opt/hoodik/.env
ExecStart=/opt/hoodik/hoodik
Restart=always
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now hoodik
msg_ok "Created Service"
motd_ssh
customize
cleanup_lxc

View File

@@ -47,7 +47,8 @@ msg_info "Setting up KitchenOwl"
cd /opt/kitchenowl/backend
$STD uv sync --no-dev
sed -i 's/default=True/default=False/' /opt/kitchenowl/backend/wsgi.py
setup_nltk "averaged_perceptron_tagger_eng" "/nltk_data"
mkdir -p /nltk_data
$STD uv run python -m nltk.downloader -d /nltk_data averaged_perceptron_tagger_eng
JWT_SECRET=$(openssl rand -hex 32)
mkdir -p /opt/kitchenowl/data
cat <<EOF >/opt/kitchenowl/kitchenowl.env

View File

@@ -1,139 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/danny-avila/LibreChat
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
MONGO_VERSION="8.0" setup_mongodb
setup_meilisearch
PG_VERSION="17" PG_MODULES="pgvector" setup_postgresql
PG_DB_NAME="ragapi" PG_DB_USER="ragapi" PG_DB_EXTENSIONS="vector" setup_postgresql_db
NODE_VERSION="24" setup_nodejs
UV_PYTHON="3.12" setup_uv
fetch_and_deploy_gh_tag "librechat" "danny-avila/LibreChat"
fetch_and_deploy_gh_release "rag-api" "danny-avila/rag_api" "tarball"
msg_info "Installing LibreChat Dependencies"
cd /opt/librechat
$STD npm ci
msg_ok "Installed LibreChat Dependencies"
msg_info "Building Frontend"
$STD npm run frontend
$STD npm prune --production
$STD npm cache clean --force
msg_ok "Built Frontend"
msg_info "Installing RAG API Dependencies"
cd /opt/rag-api
$STD uv venv --python 3.12 --seed .venv
$STD .venv/bin/pip install -r requirements.lite.txt
mkdir -p /opt/rag-api/uploads
msg_ok "Installed RAG API Dependencies"
msg_info "Configuring LibreChat"
JWT_SECRET=$(openssl rand -hex 32)
JWT_REFRESH_SECRET=$(openssl rand -hex 32)
CREDS_KEY=$(openssl rand -hex 32)
CREDS_IV=$(openssl rand -hex 16)
cat <<EOF >/opt/librechat/.env
HOST=0.0.0.0
PORT=3080
MONGO_URI=mongodb://127.0.0.1:27017/LibreChat
DOMAIN_CLIENT=http://${LOCAL_IP}:3080
DOMAIN_SERVER=http://${LOCAL_IP}:3080
NO_INDEX=true
TRUST_PROXY=1
JWT_SECRET=${JWT_SECRET}
JWT_REFRESH_SECRET=${JWT_REFRESH_SECRET}
SESSION_EXPIRY=1000 * 60 * 15
REFRESH_TOKEN_EXPIRY=(1000 * 60 * 60 * 24) * 7
CREDS_KEY=${CREDS_KEY}
CREDS_IV=${CREDS_IV}
ALLOW_EMAIL_LOGIN=true
ALLOW_REGISTRATION=true
ALLOW_SOCIAL_LOGIN=false
ALLOW_SOCIAL_REGISTRATION=false
ALLOW_PASSWORD_RESET=false
ALLOW_UNVERIFIED_EMAIL_LOGIN=true
SEARCH=true
MEILI_NO_ANALYTICS=true
MEILI_HOST=http://127.0.0.1:7700
MEILI_MASTER_KEY=${MEILISEARCH_MASTER_KEY}
RAG_PORT=8000
RAG_API_URL=http://127.0.0.1:8000
APP_TITLE=LibreChat
ENDPOINTS=openAI,agents,assistants,anthropic,google
# OPENAI_API_KEY=your-key-here
# OPENAI_MODELS=
# ANTHROPIC_API_KEY=your-key-here
# GOOGLE_KEY=your-key-here
EOF
msg_ok "Configured LibreChat"
msg_info "Configuring RAG API"
cat <<EOF >/opt/rag-api/.env
VECTOR_DB_TYPE=pgvector
DB_HOST=127.0.0.1
DB_PORT=5432
POSTGRES_DB=${PG_DB_NAME}
POSTGRES_USER=${PG_DB_USER}
POSTGRES_PASSWORD=${PG_DB_PASS}
RAG_HOST=0.0.0.0
RAG_PORT=8000
JWT_SECRET=${JWT_SECRET}
RAG_UPLOAD_DIR=/opt/rag-api/uploads/
EOF
msg_ok "Configured RAG API"
msg_info "Creating Services"
cat <<EOF >/etc/systemd/system/librechat.service
[Unit]
Description=LibreChat
After=network.target mongod.service meilisearch.service rag-api.service
[Service]
Type=simple
User=root
WorkingDirectory=/opt/librechat
EnvironmentFile=/opt/librechat/.env
ExecStart=/usr/bin/npm run backend
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
cat <<EOF >/etc/systemd/system/rag-api.service
[Unit]
Description=LibreChat RAG API
After=network.target postgresql.service
[Service]
Type=simple
User=root
WorkingDirectory=/opt/rag-api
EnvironmentFile=/opt/rag-api/.env
ExecStart=/opt/rag-api/.venv/bin/uvicorn main:app --host 0.0.0.0 --port 8000
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now rag-api librechat
msg_ok "Created Services"
motd_ssh
customize
cleanup_lxc

View File

@@ -0,0 +1,65 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: stout01
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://github.com/BerriAI/litellm
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt install -y \
build-essential \
python3-dev
msg_ok "Installed Dependencies"
PG_VERSION="17" setup_postgresql
PG_DB_NAME="litellm_db" PG_DB_USER="litellm" setup_postgresql_db
PYTHON_VERSION="3.13" USE_UVX="YES" setup_uv
msg_info "Setting up Virtual Environment"
mkdir -p /opt/litellm
cd /opt/litellm
$STD uv venv --clear /opt/litellm/.venv
$STD /opt/litellm/.venv/bin/python -m ensurepip --upgrade
$STD /opt/litellm/.venv/bin/python -m pip install --upgrade pip
$STD /opt/litellm/.venv/bin/python -m pip install litellm[proxy] prisma
$STD /opt/litellm/.venv/bin/prisma generate
msg_ok "Installed LiteLLM"
msg_info "Configuring LiteLLM"
mkdir -p /opt
cat <<EOF >/opt/litellm/litellm.yaml
general_settings:
master_key: sk-1234
database_url: postgresql://$PG_DB_USER:$PG_DB_PASS@127.0.0.1:5432/$PG_DB_NAME
store_model_in_db: true
EOF
$STD /opt/litellm/.venv/bin/litellm --config /opt/litellm/litellm.yaml --use_prisma_db_push --skip_server_startup
msg_ok "Configured LiteLLM"
msg_info "Creating Service"
cat <<EOF >/etc/systemd/system/litellm.service
[Unit]
Description=LiteLLM
[Service]
Type=simple
ExecStart=/opt/litellm/.venv/bin/litellm --config /opt/litellm/litellm.yaml
Restart=always
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now litellm
msg_ok "Created Service"
motd_ssh
customize
cleanup_lxc

View File

@@ -1,66 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://matomo.org/
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt install -y caddy
msg_ok "Installed Dependencies"
mkdir -p /opt/matomo
PHP_VERSION="8.3" PHP_FPM="YES" PHP_MODULES="pdo_mysql,gd,mbstring,xml,curl,intl,zip,ldap" setup_php
setup_mariadb
MARIADB_DB_NAME="matomo" MARIADB_DB_USER="matomo" setup_mariadb_db
msg_info "Allowing Local TCP Database Access"
$STD mariadb -u root -e "CREATE USER IF NOT EXISTS '$MARIADB_DB_USER'@'127.0.0.1' IDENTIFIED BY '$MARIADB_DB_PASS';"
$STD mariadb -u root -e "ALTER USER '$MARIADB_DB_USER'@'127.0.0.1' IDENTIFIED BY '$MARIADB_DB_PASS';"
$STD mariadb -u root -e "GRANT ALL ON \`$MARIADB_DB_NAME\`.* TO '$MARIADB_DB_USER'@'127.0.0.1';"
$STD mariadb -u root -e "FLUSH PRIVILEGES;"
msg_ok "Allowed Local TCP Database Access"
fetch_and_deploy_gh_release "matomo" "matomo-org/matomo" "prebuild" "latest" "/opt/matomo" "matomo-*.zip"
msg_info "Setting up Matomo"
if [[ -d /opt/matomo/matomo ]]; then
rm -rf /opt/matomo/tmp "/opt/matomo/How to install Matomo.html"
find /opt/matomo/matomo -mindepth 1 -maxdepth 1 -exec mv -t /opt/matomo {} +
rm -rf /opt/matomo/matomo
fi
mkdir -p /opt/matomo/tmp
chown -R www-data:www-data /opt/matomo
chmod -R 755 /opt/matomo/tmp
msg_ok "Set up Matomo"
msg_info "Configuring Caddy"
PHP_VER=$(php -r 'echo PHP_MAJOR_VERSION . "." . PHP_MINOR_VERSION;')
cat <<EOF >/etc/caddy/Caddyfile
:80 {
root * /opt/matomo
@blocked path /config /config/* /tmp /tmp/* /.* /.*/*
respond @blocked 403
php_fastcgi unix//run/php/php${PHP_VER}-fpm.sock
file_server
encode gzip
}
EOF
usermod -aG www-data caddy
msg_ok "Configured Caddy"
systemctl enable -q --now php${PHP_VER}-fpm
systemctl restart caddy
motd_ssh
customize
cleanup_lxc

View File

@@ -55,7 +55,11 @@ mkdir -p /opt/mealie/mealie/frontend
cp -r /opt/mealie/frontend/dist/* /opt/mealie/mealie/frontend/
msg_ok "Copied Frontend"
setup_nltk "averaged_perceptron_tagger_eng" "/nltk_data"
msg_info "Downloading NLTK Data"
mkdir -p /nltk_data/
cd /opt/mealie
$STD uv run python -m nltk.downloader -d /nltk_data averaged_perceptron_tagger_eng
msg_ok "Downloaded NLTK Data"
msg_info "Writing Environment File"
SECRET=$(openssl rand -hex 32)

View File

@@ -22,8 +22,7 @@ $STD apt install -y \
msg_ok "Installed Dependencies"
NODE_VERSION="24" setup_nodejs
PANGOLIN_VERSION="${PANGOLIN_VERSION:-1.18.3}"
fetch_and_deploy_gh_release "pangolin" "fosrl/pangolin" "tarball" "$PANGOLIN_VERSION"
fetch_and_deploy_gh_release "pangolin" "fosrl/pangolin" "tarball"
fetch_and_deploy_gh_release "gerbil" "fosrl/gerbil" "singlefile" "latest" "/usr/bin" "gerbil_linux_amd64"
fetch_and_deploy_gh_release "traefik" "traefik/traefik" "prebuild" "latest" "/usr/bin" "traefik_v*_linux_amd64.tar.gz"
@@ -205,7 +204,6 @@ User=root
Environment=NODE_ENV=production
Environment=ENVIRONMENT=prod
WorkingDirectory=/opt/pangolin
ExecStartPre=/usr/bin/node dist/migrations.mjs
ExecStart=/usr/bin/node --enable-source-maps dist/server.mjs
Restart=always
RestartSec=10

View File

@@ -94,12 +94,18 @@ user.save()
EOF
msg_ok "Set up admin Paperless-ngx User & Password"
setup_nltk "snowball_data stopwords punkt_tab" "/usr/share/nltk_data"
msg_info "Installing Natural Language Toolkit (Patience)"
cd /opt/paperless
$STD uv run python -m nltk.downloader -d /usr/share/nltk_data snowball_data
$STD uv run python -m nltk.downloader -d /usr/share/nltk_data stopwords
$STD uv run python -m nltk.downloader -d /usr/share/nltk_data punkt_tab ||
$STD uv run python -m nltk.downloader -d /usr/share/nltk_data punkt
for policy_file in /etc/ImageMagick-6/policy.xml /etc/ImageMagick-7/policy.xml; do
if [[ -f "$policy_file" ]]; then
sed -i -e 's/rights="none" pattern="PDF"/rights="read|write" pattern="PDF"/' "$policy_file"
fi
done
msg_ok "Installed Natural Language Toolkit"
msg_info "Creating Services"
cat <<EOF >/etc/systemd/system/paperless-scheduler.service

View File

@@ -29,28 +29,13 @@ cp -r .next/static .next/standalone/.next/
mkdir -p /opt/peanut/.next/standalone/config
mkdir -p /etc/peanut/
ln -sf .next/standalone/server.js server.js
if [[ ! -f /etc/peanut/settings.yml ]]; then
cat <<EOF >/etc/peanut/settings.yml
NUT_SERVERS: []
cat <<EOF >/etc/peanut/settings.yml
WEB_HOST: 0.0.0.0
WEB_PORT: 8080
NUT_HOST: 0.0.0.0
NUT_PORT: 3493
EOF
fi
ln -sf /etc/peanut/settings.yml /opt/peanut/.next/standalone/config/settings.yml
cat <<EOF >/etc/peanut/peanut.env
NODE_ENV=production
#WEB_HOST=0.0.0.0
#WEB_PORT=8080
#NUT_HOST=localhost
#NUT_PORT=3493
# Disable auth entirely:
#AUTH_DISABLED=true
# Bootstrap initial account on first start (ignored afterwards):
#WEB_USERNAME=admin
#WEB_PASSWORD=changeme
EOF
chmod 600 /etc/peanut/peanut.env
msg_ok "Setup Peanut"
msg_info "Creating Service"
@@ -63,7 +48,11 @@ SyslogIdentifier=peanut
Restart=always
RestartSec=5
Type=simple
EnvironmentFile=/etc/peanut/peanut.env
Environment="NODE_ENV=production"
#Environment="NUT_HOST=localhost"
#Environment="NUT_PORT=3493"
#Environment="WEB_HOST=0.0.0.0"
#Environment="WEB_PORT=8080"
WorkingDirectory=/opt/peanut
ExecStart=node /opt/peanut/entrypoint.mjs
TimeoutStopSec=30

View File

@@ -1,98 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) 2021-2026 community-scripts ORG
# Author: MickLesk (CanbiZ)
# License: MIT | https://github.com/community-scripts/ProxmoxVE/raw/main/LICENSE
# Source: https://gitlab.com/storyteller-platform/storyteller
source /dev/stdin <<<"$FUNCTIONS_FILE_PATH"
color
verb_ip6
catch_errors
setting_up_container
network_check
update_os
msg_info "Installing Dependencies"
$STD apt install -y \
build-essential \
git \
pkg-config \
libsqlite3-dev \
sqlite3 \
python3-setuptools \
ffmpeg
msg_ok "Installed Dependencies"
NODE_VERSION="22" NODE_MODULE="yarn" setup_nodejs
fetch_and_deploy_gh_release "readium" "readium/cli" "prebuild" "latest" "/opt/readium" "readium_linux_x86_64.tar.gz"
ln -sf /opt/readium/readium /usr/local/bin/readium
fetch_and_deploy_gl_release "storyteller" "storyteller-platform/storyteller" "tarball" "latest" "/opt/storyteller"
msg_info "Setting up Storyteller"
cd /opt/storyteller
$STD yarn install --network-timeout 600000
$STD gcc -g -fPIC -rdynamic -shared web/sqlite/uuid.c -o web/sqlite/uuid.c.so
STORYTELLER_SECRET_KEY=$(openssl rand -base64 32)
cat <<EOF >/opt/storyteller/.env
STORYTELLER_SECRET_KEY=${STORYTELLER_SECRET_KEY}
STORYTELLER_DATA_DIR=/opt/storyteller/data
PORT=8001
HOSTNAME=0.0.0.0
READIUM_PORT=9000
NODE_ENV=production
NEXT_TELEMETRY_DISABLED=1
EOF
mkdir -p /opt/storyteller/data
{
echo "Storyteller Credentials"
echo "======================="
echo "Secret Key: ${STORYTELLER_SECRET_KEY}"
} >~/storyteller.creds
msg_ok "Set up Storyteller"
msg_info "Building Storyteller"
cd /opt/storyteller
export CI=1
export NODE_ENV=production
export NEXT_TELEMETRY_DISABLED=1
export SQLITE_NATIVE_BINDING=/opt/storyteller/node_modules/better-sqlite3/build/Release/better_sqlite3.node
$STD yarn workspaces foreach -Rpt --from @storyteller-platform/web --exclude @storyteller-platform/eslint run build
mkdir -p /opt/storyteller/web/.next/standalone/web/.next/static
cp -rT /opt/storyteller/web/.next/static /opt/storyteller/web/.next/standalone/web/.next/static
if [[ -d /opt/storyteller/web/public ]]; then
mkdir -p /opt/storyteller/web/.next/standalone/web/public
cp -rT /opt/storyteller/web/public /opt/storyteller/web/.next/standalone/web/public
fi
mkdir -p /opt/storyteller/web/.next/standalone/web/migrations
cp -rT /opt/storyteller/web/migrations /opt/storyteller/web/.next/standalone/web/migrations
mkdir -p /opt/storyteller/web/.next/standalone/web/sqlite
cp -rT /opt/storyteller/web/sqlite /opt/storyteller/web/.next/standalone/web/sqlite
ln -sf /opt/storyteller/.env /opt/storyteller/web/.next/standalone/web/.env
msg_ok "Built Storyteller"
msg_info "Creating Service"
cat <<EOF >/etc/systemd/system/storyteller.service
[Unit]
Description=Storyteller
After=network.target
[Service]
Type=simple
User=root
WorkingDirectory=/opt/storyteller/web/.next/standalone/web
EnvironmentFile=/opt/storyteller/.env
ExecStart=/usr/bin/node --enable-source-maps server.js
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
systemctl enable -q --now storyteller
msg_ok "Created Service"
motd_ssh
customize
cleanup_lxc

View File

@@ -513,7 +513,7 @@ validate_bridge() {
[[ -z "$bridge" ]] && return 1
# Check if bridge interface exists
if ! ip link show dev "$bridge" &>/dev/null; then
if ! ip link show "$bridge" &>/dev/null; then
return 1
fi
@@ -3230,10 +3230,6 @@ check_container_resources() {
if [[ "$current_ram" -lt "$var_ram" ]] || [[ "$current_cpu" -lt "$var_cpu" ]]; then
msg_warn "Under-provisioned: Required ${var_cpu} CPU/${var_ram}MB RAM, Current ${current_cpu} CPU/${current_ram}MB RAM"
echo -e "${YWB}Please ensure that the ${APP} LXC is configured with at least ${var_cpu} vCPU and ${var_ram} MB RAM for the build process.${CL}\n"
if is_unattended; then
msg_error "Aborted: under-provisioned LXC in unattended mode (${current_cpu} CPU/${current_ram}MB RAM < ${var_cpu} CPU/${var_ram}MB RAM)"
exit 113
fi
echo -ne "${INFO}${HOLD} May cause data loss! ${INFO} Continue update with under-provisioned LXC? <yes/No> "
read -r prompt </dev/tty
if [[ ! ${prompt,,} =~ ^(yes)$ ]]; then
@@ -3257,10 +3253,6 @@ check_container_storage() {
usage=$((100 * used_size / total_size))
if ((usage > 80)); then
msg_warn "Storage is dangerously low (${usage}% used on /boot)"
if is_unattended; then
msg_error "Aborted: storage too low in unattended mode (${usage}% used on /boot)"
exit 114
fi
echo -ne "Continue anyway? <y/N> "
read -r prompt </dev/tty
if [[ ! ${prompt,,} =~ ^(y|yes)$ ]]; then

View File

@@ -868,12 +868,6 @@ get_header() {
# - Returns silently if header not available
# ------------------------------------------------------------------------------
header_info() {
# Guard against printing the header twice in the same session (e.g. when
# the ct script calls header_info at global scope AND again inside
# update_script()).
[[ "${_HEADER_SHOWN:-0}" == "1" ]] && return 0
_HEADER_SHOWN=1
local app_name=$(echo "${APP,,}" | tr -d ' ')
local header_content

View File

@@ -2079,33 +2079,15 @@ get_latest_gh_tag() {
local temp_file
temp_file=$(mktemp)
if ! github_api_call "https://api.github.com/repos/${repo}/tags?per_page=50" "$temp_file"; then
rm -f "$temp_file"
return 22
fi
local tag=""
if [[ -n "$prefix" ]]; then
# Use git/matching-refs API for server-side prefix filtering. This avoids
# paging through unrelated tags (e.g. mongodb/mongo-tools where 100.x tags
# only appear after page 4 of /tags). Returns ALL tags matching the prefix
# in a single call, sorted lexicographically ascending; we pick the
# highest version using `sort -V`.
if ! github_api_call "https://api.github.com/repos/${repo}/git/matching-refs/tags/${prefix}" "$temp_file"; then
rm -f "$temp_file"
return 22
fi
local count
count=$(jq 'length' "$temp_file" 2>/dev/null || echo 0)
if [[ "$count" -gt 0 ]]; then
tag=$(jq -r '.[].ref' "$temp_file" |
sed 's|^refs/tags/||' |
sort -V |
tail -n1)
fi
tag=$(jq -r --arg p "$prefix" '[.[] | select(.name | startswith($p))][0].name // empty' "$temp_file")
else
# No prefix: just take the first (newest) tag from /tags
if ! github_api_call "https://api.github.com/repos/${repo}/tags?per_page=1" "$temp_file"; then
rm -f "$temp_file"
return 22
fi
tag=$(jq -r '.[0].name // empty' "$temp_file")
fi
@@ -8683,829 +8665,3 @@ EOF
$STD apt update
return 0
}
# ------------------------------------------------------------------------------
# Get latest GitLab release version.
# Usage: get_latest_gitlab_release "owner/repo" [strip_v]
# ------------------------------------------------------------------------------
get_latest_gitlab_release() {
local repo="$1"
local strip_v="${2:-true}"
local repo_encoded
repo_encoded=$(printf '%s' "$repo" | sed 's|/|%2F|g')
local header=()
[[ -n "${GITLAB_TOKEN:-}" ]] && header=(-H "PRIVATE-TOKEN: $GITLAB_TOKEN")
local temp_file
temp_file=$(mktemp)
local http_code
http_code=$(curl --connect-timeout 10 --max-time 30 -sSL \
-w "%{http_code}" -o "$temp_file" \
"${header[@]}" \
"https://gitlab.com/api/v4/projects/$repo_encoded/releases?per_page=1&order_by=released_at&sort=desc" 2>/dev/null) || true
if [[ "$http_code" != "200" ]]; then
rm -f "$temp_file"
msg_warn "GitLab API call failed for ${repo} (HTTP ${http_code})"
return 22
fi
local version
version=$(jq -r '.[0].tag_name // empty' "$temp_file")
rm -f "$temp_file"
if [[ -z "$version" ]]; then
msg_error "Could not determine latest version for ${repo}"
return 250
fi
if [[ "$strip_v" == "true" ]]; then
[[ "$version" =~ ^v[0-9] ]] && version="${version:1}"
fi
echo "$version"
}
# ------------------------------------------------------------------------------
# Checks for new GitLab release (latest tag).
#
# Description:
# - Queries the GitLab API for the latest release tag
# - Compares it to a local cached version (~/.<app>)
# - If newer, sets global CHECK_UPDATE_RELEASE and returns 0
#
# Usage:
# if check_for_gl_release "myapp" "owner/repo" [optional] "v1.2.3"; then
# # trigger update...
# fi
# exit 0
# } (end of update_script not from the function)
#
# Notes:
# - Requires `jq` (auto-installed if missing)
# - Supports GITLAB_TOKEN env var for private/rate-limited repos
# - Does not modify anything, only checks version state
# ------------------------------------------------------------------------------
check_for_gl_release() {
local app="$1"
local source="$2"
local pinned_version_in="${3:-}" # optional
local pin_reason="${4:-}" # optional reason shown to user
local app_lc="${app,,}"
local current_file="$HOME/.${app_lc}"
msg_info "Checking for update: ${app}"
# DNS check
if ! getent hosts gitlab.com >/dev/null 2>&1; then
msg_error "Network error: cannot resolve gitlab.com"
return 6
fi
ensure_dependencies jq
local repo_encoded
repo_encoded=$(printf '%s' "$source" | sed 's|/|%2F|g')
local header=()
[[ -n "${GITLAB_TOKEN:-}" ]] && header=(-H "PRIVATE-TOKEN: $GITLAB_TOKEN")
local releases_json="" http_code=""
# For pinned versions, try to fetch the specific release tag first
if [[ -n "$pinned_version_in" ]]; then
local pinned_encoded="${pinned_version_in//\//%2F}"
http_code=$(curl -sSL --max-time 20 -w "%{http_code}" -o /tmp/gl_check.json \
"${header[@]}" \
"https://gitlab.com/api/v4/projects/$repo_encoded/releases/$pinned_encoded" 2>/dev/null) || true
if [[ "$http_code" == "200" ]] && [[ -s /tmp/gl_check.json ]]; then
releases_json="[$(</tmp/gl_check.json)]"
fi
rm -f /tmp/gl_check.json
fi
# Fetch full releases list if needed
if [[ -z "$releases_json" ]]; then
http_code=$(curl -sSL --max-time 20 -w "%{http_code}" -o /tmp/gl_check.json \
"${header[@]}" \
"https://gitlab.com/api/v4/projects/$repo_encoded/releases?per_page=100&order_by=released_at&sort=desc" 2>/dev/null) || true
if [[ "$http_code" == "200" ]] && [[ -s /tmp/gl_check.json ]]; then
releases_json=$(</tmp/gl_check.json)
elif [[ "$http_code" == "401" ]]; then
msg_error "GitLab API authentication failed (HTTP 401)."
if [[ -n "${GITLAB_TOKEN:-}" ]]; then
msg_error "Your GITLAB_TOKEN appears to be invalid or expired."
else
msg_error "The repository may require authentication. Try: export GITLAB_TOKEN=\"glpat-your_token\""
fi
rm -f /tmp/gl_check.json
return 22
elif [[ "$http_code" == "404" ]]; then
msg_error "GitLab project not found (HTTP 404). Ensure '${source}' is correct and publicly accessible."
rm -f /tmp/gl_check.json
return 22
elif [[ "$http_code" == "429" ]]; then
msg_error "GitLab API rate limit exceeded (HTTP 429)."
msg_error "To increase the limit, export a GitLab token: export GITLAB_TOKEN=\"glpat-your_token_here\""
rm -f /tmp/gl_check.json
return 22
elif [[ "$http_code" == "000" || -z "$http_code" ]]; then
msg_error "GitLab API connection failed (no response)."
msg_error "Check your network/DNS: curl -sSL https://gitlab.com/api/v4/version"
rm -f /tmp/gl_check.json
return 7
else
msg_error "Unable to fetch releases for ${app} (HTTP ${http_code})"
rm -f /tmp/gl_check.json
return 22
fi
rm -f /tmp/gl_check.json
fi
mapfile -t raw_tags < <(jq -r '.[] | .tag_name' <<<"$releases_json")
if ((${#raw_tags[@]} == 0)); then
msg_error "No releases found for ${app} on GitLab"
return 250
fi
local clean_tags=()
for t in "${raw_tags[@]}"; do
# Only strip leading 'v' when followed by a digit (e.g. v1.2.3)
if [[ "$t" =~ ^v[0-9] ]]; then
clean_tags+=("${t:1}")
else
clean_tags+=("$t")
fi
done
local latest_raw="${raw_tags[0]}"
local latest_clean="${clean_tags[0]}"
# current installed (stored without v)
local current=""
if [[ -f "$current_file" ]]; then
current="$(<"$current_file")"
else
# Migration: search for any /opt/*_version.txt
local legacy_files
mapfile -t legacy_files < <(find /opt -maxdepth 1 -type f -name "*_version.txt" 2>/dev/null)
if ((${#legacy_files[@]} == 1)); then
current="$(<"${legacy_files[0]}")"
echo "${current#v}" >"$current_file"
rm -f "${legacy_files[0]}"
fi
fi
if [[ "$current" =~ ^v[0-9] ]]; then
current="${current:1}"
fi
# Pinned version handling
if [[ -n "$pinned_version_in" ]]; then
local pin_clean
if [[ "$pinned_version_in" =~ ^v[0-9] ]]; then
pin_clean="${pinned_version_in:1}"
else
pin_clean="$pinned_version_in"
fi
local match_raw=""
for i in "${!clean_tags[@]}"; do
if [[ "${clean_tags[$i]}" == "$pin_clean" ]]; then
match_raw="${raw_tags[$i]}"
break
fi
done
if [[ -z "$match_raw" ]]; then
msg_error "Pinned version ${pinned_version_in} not found upstream"
return 250
fi
if [[ "$current" != "$pin_clean" ]]; then
CHECK_UPDATE_RELEASE="$match_raw"
msg_ok "Update available: ${app} ${current:-not installed}${pin_clean}"
return 0
fi
if [[ -n "$pin_reason" ]]; then
msg_ok "No update available: ${app} (${current}) - update held back: ${pin_reason}"
else
msg_ok "No update available: ${app} (${current}) - update temporarily held back due to issues with newer releases"
fi
return 1
fi
# No pinning → use latest
if [[ -z "$current" || "$current" != "$latest_clean" ]]; then
CHECK_UPDATE_RELEASE="$latest_raw"
msg_ok "Update available: ${app} ${current:-not installed}${latest_clean}"
return 0
fi
msg_ok "No update available: ${app} (${latest_clean})"
return 1
}
# ------------------------------------------------------------------------------
# Scan older GitLab releases for a matching asset (fallback helper).
#
# Description:
# When the latest release does not contain the expected asset
# (e.g. .deb for the current arch, or a custom pattern), walks back
# through up to 15 recent releases and returns the first release JSON
# that has a matching asset. Used internally by fetch_and_deploy_gl_release.
#
# Usage (internal):
# _gl_scan_older_releases "owner/repo" "owner%2Frepo" "https://gitlab.com" \
# "binary|prebuild|singlefile" "$asset_pattern" "$skip_tag"
#
# Returns:
# - stdout: JSON of the matching release (single object) on success
# - 0 on success, 22 on API error, 250 if no match found
# ------------------------------------------------------------------------------
_gl_scan_older_releases() {
local repo="$1"
local repo_encoded="$2"
local base_url="${3:-https://gitlab.com}"
local mode="$4"
local asset_pattern="$5"
local skip_tag="$6"
local header=()
[[ -n "${GITLAB_TOKEN:-}" ]] && header=(-H "PRIVATE-TOKEN: $GITLAB_TOKEN")
local releases_list
releases_list=$(curl --connect-timeout 10 --max-time 30 -fsSL \
"${header[@]}" \
"${base_url}/api/v4/projects/${repo_encoded}/releases?per_page=15&order_by=released_at&sort=desc" 2>/dev/null) || {
msg_warn "Failed to fetch older releases for ${repo}"
return 22
}
local count
count=$(echo "$releases_list" | jq 'length' 2>/dev/null || echo 0)
[[ "$count" -eq 0 ]] && return 250
for ((i = 0; i < count; i++)); do
local rel_tag
rel_tag=$(echo "$releases_list" | jq -r ".[$i].tag_name")
# Skip the tag we already checked
[[ "$rel_tag" == "$skip_tag" ]] && continue
# Asset URLs for this release (direct_asset_url preferred, fallback to url)
local asset_urls
asset_urls=$(echo "$releases_list" | jq -r ".[$i].assets.links // [] | .[] | .direct_asset_url // .url")
[[ -z "$asset_urls" ]] && continue
local has_match=false
if [[ "$mode" == "binary" ]]; then
local arch
arch=$(dpkg --print-architecture 2>/dev/null || uname -m)
[[ "$arch" == "x86_64" ]] && arch="amd64"
[[ "$arch" == "aarch64" ]] && arch="arm64"
# Check with explicit pattern first, then arch heuristic, then any .deb
if [[ -n "$asset_pattern" ]]; then
while read -r u; do
case "${u##*/}" in $asset_pattern)
has_match=true
break
;;
esac
done <<<"$asset_urls"
fi
if [[ "$has_match" != "true" ]]; then
echo "$asset_urls" | grep -qE "($arch|amd64|x86_64|aarch64|arm64).*\.deb$" && has_match=true
fi
if [[ "$has_match" != "true" ]]; then
echo "$asset_urls" | grep -qE '\.deb$' && has_match=true
fi
elif [[ "$mode" == "prebuild" || "$mode" == "singlefile" ]]; then
while read -r u; do
case "${u##*/}" in $asset_pattern)
has_match=true
break
;;
esac
done <<<"$asset_urls"
fi
if [[ "$has_match" == "true" ]]; then
local use_fallback="y"
if [[ -t 0 ]]; then
msg_warn "Release ${skip_tag} has no matching asset. Previous release ${rel_tag} has a compatible asset."
read -rp "Use version ${rel_tag} instead? [Y/n] (auto-yes in 60s): " -t 60 use_fallback || use_fallback="y"
use_fallback="${use_fallback:-y}"
fi
if [[ "${use_fallback,,}" == "y" || "${use_fallback,,}" == "yes" ]]; then
echo "$releases_list" | jq ".[$i]"
return 0
else
return 250
fi
fi
done
return 250
}
function fetch_and_deploy_gl_release() {
local app="$1"
local repo="$2"
local mode="${3:-tarball}"
local version="${var_appversion:-${4:-latest}}"
local target="${5:-/opt/$app}"
local asset_pattern="${6:-}"
if [[ -z "$app" ]]; then
app="${repo##*/}"
if [[ -z "$app" ]]; then
msg_error "fetch_and_deploy_gl_release requires app name or valid repo"
return 1
fi
fi
local app_lc=$(echo "${app,,}" | tr -d ' ')
local version_file="$HOME/.${app_lc}"
local api_timeout="--connect-timeout 10 --max-time 60"
local download_timeout="--connect-timeout 15 --max-time 900"
local current_version=""
[[ -f "$version_file" ]] && current_version=$(<"$version_file")
ensure_dependencies jq
local repo_encoded
repo_encoded=$(printf '%s' "$repo" | sed 's|/|%2F|g')
local api_base="https://gitlab.com/api/v4/projects/$repo_encoded/releases"
local api_url
if [[ "$version" != "latest" ]]; then
api_url="$api_base/$version"
else
api_url="$api_base?per_page=1&order_by=released_at&sort=desc"
fi
local header=()
[[ -n "${GITLAB_TOKEN:-}" ]] && header=(-H "PRIVATE-TOKEN: $GITLAB_TOKEN")
local max_retries=3 retry_delay=2 attempt=1 success=false http_code
while ((attempt <= max_retries)); do
http_code=$(curl $api_timeout -sSL -w "%{http_code}" -o /tmp/gl_rel.json "${header[@]}" "$api_url" 2>/dev/null) || true
if [[ "$http_code" == "200" ]]; then
success=true
break
elif [[ "$http_code" == "429" ]]; then
if ((attempt < max_retries)); then
msg_warn "GitLab API rate limit hit, retrying in ${retry_delay}s... (attempt $attempt/$max_retries)"
sleep "$retry_delay"
retry_delay=$((retry_delay * 2))
fi
else
sleep "$retry_delay"
fi
((attempt++))
done
if ! $success; then
if [[ "$http_code" == "401" ]]; then
msg_error "GitLab API authentication failed (HTTP 401)."
if [[ -n "${GITLAB_TOKEN:-}" ]]; then
msg_error "Your GITLAB_TOKEN appears to be invalid or expired."
else
msg_error "The repository may require authentication. Try: export GITLAB_TOKEN=\"glpat-your_token\""
fi
elif [[ "$http_code" == "404" ]]; then
msg_error "GitLab project or release not found (HTTP 404)."
msg_error "Ensure '$repo' is correct and the project is accessible."
elif [[ "$http_code" == "429" ]]; then
msg_error "GitLab API rate limit exceeded (HTTP 429)."
msg_error "To increase the limit, export a GitLab token before running the script:"
msg_error " export GITLAB_TOKEN=\"glpat-your_token_here\""
elif [[ "$http_code" == "000" || -z "$http_code" ]]; then
msg_error "GitLab API connection failed (no response)."
msg_error "Check your network/DNS: curl -sSL https://gitlab.com/api/v4/version"
else
msg_error "Failed to fetch release metadata (HTTP $http_code)"
fi
return 1
fi
local json tag_name
json=$(</tmp/gl_rel.json)
if [[ "$version" == "latest" ]]; then
json=$(echo "$json" | jq '.[0] // empty')
if [[ -z "$json" || "$json" == "null" ]]; then
msg_error "No releases found for $repo on GitLab"
return 1
fi
fi
tag_name=$(echo "$json" | jq -r '.tag_name // empty')
if [[ -z "$tag_name" ]]; then
msg_error "Could not determine tag name from release metadata"
return 1
fi
[[ "$tag_name" =~ ^v[0-9] ]] && version="${tag_name:1}" || version="$tag_name"
local version_safe="${version//\//-}"
if [[ "$current_version" == "$version" ]]; then
$STD msg_ok "$app is already up-to-date (v$version)"
return 0
fi
local tmpdir
tmpdir=$(mktemp -d) || return 1
local filename=""
msg_info "Fetching GitLab release: $app ($version)"
_gl_asset_urls() {
local release_json="$1"
echo "$release_json" | jq -r '
(.assets.links // [])[] | .direct_asset_url // .url
'
}
### Tarball Mode ###
if [[ "$mode" == "tarball" || "$mode" == "source" ]]; then
local direct_tarball_url="https://gitlab.com/$repo/-/archive/$tag_name/${app_lc}-${version_safe}.tar.gz"
filename="${app_lc}-${version_safe}.tar.gz"
curl $download_timeout -fsSL "${header[@]}" -o "$tmpdir/$filename" "$direct_tarball_url" || {
msg_error "Download failed: $direct_tarball_url"
rm -rf "$tmpdir"
return 1
}
mkdir -p "$target"
if [[ "${CLEAN_INSTALL:-0}" == "1" ]]; then
rm -rf "${target:?}/"*
fi
tar --no-same-owner -xzf "$tmpdir/$filename" -C "$tmpdir" || {
msg_error "Failed to extract tarball"
rm -rf "$tmpdir"
return 1
}
local unpack_dir
unpack_dir=$(find "$tmpdir" -mindepth 1 -maxdepth 1 -type d | head -n1)
shopt -s dotglob nullglob
cp -r "$unpack_dir"/* "$target/"
shopt -u dotglob nullglob
### Binary Mode ###
elif [[ "$mode" == "binary" ]]; then
local arch
arch=$(dpkg --print-architecture 2>/dev/null || uname -m)
[[ "$arch" == "x86_64" ]] && arch="amd64"
[[ "$arch" == "aarch64" ]] && arch="arm64"
local assets url_match=""
assets=$(_gl_asset_urls "$json")
if [[ -n "$asset_pattern" ]]; then
for u in $assets; do
case "${u##*/}" in
$asset_pattern)
url_match="$u"
break
;;
esac
done
fi
if [[ -z "$url_match" ]]; then
for u in $assets; do
if [[ "$u" =~ ($arch|amd64|x86_64|aarch64|arm64).*\.deb$ ]]; then
url_match="$u"
break
fi
done
fi
if [[ -z "$url_match" ]]; then
for u in $assets; do
[[ "$u" =~ \.deb$ ]] && url_match="$u" && break
done
fi
if [[ -z "$url_match" ]]; then
local fallback_json
if fallback_json=$(_gl_scan_older_releases "$repo" "$repo_encoded" "https://gitlab.com" "binary" "$asset_pattern" "$tag_name"); then
json="$fallback_json"
tag_name=$(echo "$json" | jq -r '.tag_name // empty')
[[ "$tag_name" =~ ^v[0-9] ]] && version="${tag_name:1}" || version="$tag_name"
msg_info "Fetching GitLab release: $app ($version)"
assets=$(_gl_asset_urls "$json")
if [[ -n "$asset_pattern" ]]; then
for u in $assets; do
case "${u##*/}" in $asset_pattern)
url_match="$u"
break
;;
esac
done
fi
if [[ -z "$url_match" ]]; then
for u in $assets; do
[[ "$u" =~ ($arch|amd64|x86_64|aarch64|arm64).*\.deb$ ]] && url_match="$u" && break
done
fi
if [[ -z "$url_match" ]]; then
for u in $assets; do
[[ "$u" =~ \.deb$ ]] && url_match="$u" && break
done
fi
fi
fi
if [[ -z "$url_match" ]]; then
msg_error "No suitable .deb asset found for $app"
rm -rf "$tmpdir"
return 1
fi
filename="${url_match##*/}"
curl $download_timeout -fsSL "${header[@]}" -o "$tmpdir/$filename" "$url_match" || {
msg_error "Download failed: $url_match"
rm -rf "$tmpdir"
return 1
}
chmod 644 "$tmpdir/$filename"
local dpkg_opts=""
[[ "${DPKG_FORCE_CONFOLD:-}" == "1" ]] && dpkg_opts="-o Dpkg::Options::=--force-confold"
[[ "${DPKG_FORCE_CONFNEW:-}" == "1" ]] && dpkg_opts="-o Dpkg::Options::=--force-confnew"
DEBIAN_FRONTEND=noninteractive SYSTEMD_OFFLINE=1 $STD apt install -y $dpkg_opts "$tmpdir/$filename" || {
SYSTEMD_OFFLINE=1 $STD dpkg -i "$tmpdir/$filename" || {
msg_error "Both apt and dpkg installation failed"
rm -rf "$tmpdir"
return 1
}
}
### Prebuild Mode ###
elif [[ "$mode" == "prebuild" ]]; then
local pattern="${6%\"}"
pattern="${pattern#\"}"
[[ -z "$pattern" ]] && {
msg_error "Mode 'prebuild' requires 6th parameter (asset filename pattern)"
rm -rf "$tmpdir"
return 1
}
local asset_url=""
for u in $(_gl_asset_urls "$json"); do
filename_candidate="${u##*/}"
case "$filename_candidate" in
$pattern)
asset_url="$u"
break
;;
esac
done
if [[ -z "$asset_url" ]]; then
local fallback_json
if fallback_json=$(_gl_scan_older_releases "$repo" "$repo_encoded" "https://gitlab.com" "prebuild" "$pattern" "$tag_name"); then
json="$fallback_json"
tag_name=$(echo "$json" | jq -r '.tag_name // empty')
[[ "$tag_name" =~ ^v[0-9] ]] && version="${tag_name:1}" || version="$tag_name"
msg_info "Fetching GitLab release: $app ($version)"
for u in $(_gl_asset_urls "$json"); do
filename_candidate="${u##*/}"
case "$filename_candidate" in $pattern)
asset_url="$u"
break
;;
esac
done
fi
fi
[[ -z "$asset_url" ]] && {
msg_error "No asset matching '$pattern' found"
rm -rf "$tmpdir"
return 1
}
filename="${asset_url##*/}"
curl $download_timeout -fsSL "${header[@]}" -o "$tmpdir/$filename" "$asset_url" || {
msg_error "Download failed: $asset_url"
rm -rf "$tmpdir"
return 1
}
local unpack_tmp
unpack_tmp=$(mktemp -d)
mkdir -p "$target"
if [[ "${CLEAN_INSTALL:-0}" == "1" ]]; then
rm -rf "${target:?}/"*
fi
if [[ "$filename" == *.zip ]]; then
ensure_dependencies unzip
unzip -q "$tmpdir/$filename" -d "$unpack_tmp" || {
msg_error "Failed to extract ZIP archive"
rm -rf "$tmpdir" "$unpack_tmp"
return 1
}
elif [[ "$filename" == *.tar.* || "$filename" == *.tgz || "$filename" == *.txz ]]; then
tar --no-same-owner -xf "$tmpdir/$filename" -C "$unpack_tmp" || {
msg_error "Failed to extract TAR archive"
rm -rf "$tmpdir" "$unpack_tmp"
return 1
}
else
msg_error "Unsupported archive format: $filename"
rm -rf "$tmpdir" "$unpack_tmp"
return 1
fi
local top_entries inner_dir
top_entries=$(find "$unpack_tmp" -mindepth 1 -maxdepth 1)
if [[ "$(echo "$top_entries" | wc -l)" -eq 1 && -d "$top_entries" ]]; then
inner_dir="$top_entries"
shopt -s dotglob nullglob
if compgen -G "$inner_dir/*" >/dev/null; then
cp -r "$inner_dir"/* "$target/" || {
msg_error "Failed to copy contents from $inner_dir to $target"
rm -rf "$tmpdir" "$unpack_tmp"
return 1
}
else
msg_error "Inner directory is empty: $inner_dir"
rm -rf "$tmpdir" "$unpack_tmp"
return 1
fi
shopt -u dotglob nullglob
else
shopt -s dotglob nullglob
if compgen -G "$unpack_tmp/*" >/dev/null; then
cp -r "$unpack_tmp"/* "$target/" || {
msg_error "Failed to copy contents to $target"
rm -rf "$tmpdir" "$unpack_tmp"
return 1
}
else
msg_error "Unpacked archive is empty"
rm -rf "$tmpdir" "$unpack_tmp"
return 1
fi
shopt -u dotglob nullglob
fi
### Singlefile Mode ###
elif [[ "$mode" == "singlefile" ]]; then
local pattern="${6%\"}"
pattern="${pattern#\"}"
[[ -z "$pattern" ]] && {
msg_error "Mode 'singlefile' requires 6th parameter (asset filename pattern)"
rm -rf "$tmpdir"
return 1
}
local asset_url=""
for u in $(_gl_asset_urls "$json"); do
filename_candidate="${u##*/}"
case "$filename_candidate" in
$pattern)
asset_url="$u"
break
;;
esac
done
if [[ -z "$asset_url" ]]; then
local fallback_json
if fallback_json=$(_gl_scan_older_releases "$repo" "$repo_encoded" "https://gitlab.com" "singlefile" "$pattern" "$tag_name"); then
json="$fallback_json"
tag_name=$(echo "$json" | jq -r '.tag_name // empty')
[[ "$tag_name" =~ ^v[0-9] ]] && version="${tag_name:1}" || version="$tag_name"
msg_info "Fetching GitLab release: $app ($version)"
for u in $(_gl_asset_urls "$json"); do
filename_candidate="${u##*/}"
case "$filename_candidate" in $pattern)
asset_url="$u"
break
;;
esac
done
fi
fi
[[ -z "$asset_url" ]] && {
msg_error "No asset matching '$pattern' found"
rm -rf "$tmpdir"
return 1
}
filename="${asset_url##*/}"
mkdir -p "$target"
local use_filename="${USE_ORIGINAL_FILENAME:-false}"
local target_file="$app"
[[ "$use_filename" == "true" ]] && target_file="$filename"
curl $download_timeout -fsSL "${header[@]}" -o "$target/$target_file" "$asset_url" || {
msg_error "Download failed: $asset_url"
rm -rf "$tmpdir"
return 1
}
if [[ "$target_file" != *.jar && -f "$target/$target_file" ]]; then
chmod +x "$target/$target_file"
fi
else
msg_error "Unknown mode: $mode"
rm -rf "$tmpdir"
return 1
fi
echo "$version" >"$version_file"
msg_ok "Deployed: $app ($version)"
rm -rf "$tmpdir"
}
# ------------------------------------------------------------------------------
# Download NLTK data packages directly from GitHub, bypassing Python.
# Avoids CPU-instruction failures (SIGILL) on older hardware lacking AVX.
#
# Usage:
# setup_nltk "averaged_perceptron_tagger_eng" "/nltk_data"
# setup_nltk "snowball_data stopwords punkt_tab" "/usr/share/nltk_data"
#
# Parameters:
# $1 - Space-separated list of NLTK package IDs
# $2 - Target directory (default: /usr/share/nltk_data)
#
# Returns: 0 on success, non-zero if any package failed
# ------------------------------------------------------------------------------
function setup_nltk() {
local packages="${1:?setup_nltk requires at least one package name}"
local target_dir="${2:-/usr/share/nltk_data}"
local NLTK_INDEX_URL="https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/index.xml"
local index_xml rc=0
ensure_dependencies unzip
index_xml=$(curl_with_retry "$NLTK_INDEX_URL" "-") || {
msg_error "Failed to fetch NLTK package index"
return 1
}
local pkg
for pkg in $packages; do
msg_info "Downloading NLTK: $pkg"
local pkg_line subdir pkg_url do_unzip tmp_zip
pkg_line=$(echo "$index_xml" | grep "id=\"${pkg}\"" | head -1)
if [[ -z "$pkg_line" ]]; then
msg_error "NLTK package not found in index: $pkg"
rc=1
continue
fi
subdir=$(echo "$pkg_line" | grep -oP 'subdir="\K[^"]+')
pkg_url=$(echo "$pkg_line" | grep -oP 'url="\K[^"]+')
do_unzip=$(echo "$pkg_line" | grep -oP 'unzip="\K[^"]+')
if [[ -z "$subdir" || -z "$pkg_url" ]]; then
msg_error "Could not parse NLTK index entry for: $pkg"
rc=1
continue
fi
mkdir -p "${target_dir}/${subdir}"
tmp_zip=$(mktemp --suffix=.zip)
if CURL_TIMEOUT=120 curl_with_retry "$pkg_url" "$tmp_zip"; then
if [[ "$do_unzip" == "1" ]]; then
$STD unzip -q -o "$tmp_zip" -d "${target_dir}/${subdir}/"
rm -f "$tmp_zip"
else
mv "$tmp_zip" "${target_dir}/${subdir}/${pkg}.zip"
fi
msg_ok "Downloaded NLTK: $pkg"
else
msg_error "Failed to download NLTK package: $pkg"
rm -f "$tmp_zip"
rc=1
fi
done
return $rc
}

View File

@@ -55,7 +55,7 @@ EOF
# HELPER FUNCTIONS
# ==============================================================================
get_ip() {
hostname -I 2>/dev/null | awk '{print $1}' || ip -4 addr show scope global 2>/dev/null | awk '/inet / {print $2}' | cut -d/ -f1 | head -n1 || echo "127.0.0.1"
ifconfig | grep -v '127.0.0.1' | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -m1 -Eo '([0-9]*\.){3}[0-9]*' || echo "127.0.0.1"
}
# ==============================================================================

View File

@@ -57,9 +57,7 @@ start_routines() {
yes)
msg_info "Switching to Debian 13 (Trixie) Sources"
rm -f /etc/apt/sources.list.d/*.list
if [ -f /etc/apt/sources.list ]; then
sed -i '/proxmox/d;/bookworm/d' /etc/apt/sources.list
fi
sed -i '/proxmox/d;/bookworm/d' /etc/apt/sources.list || true
cat >/etc/apt/sources.list.d/debian.sources <<EOF
Types: deb
URIs: http://deb.debian.org/debian

View File

@@ -188,9 +188,7 @@ start_routines_4() {
yes)
msg_info "Correcting Debian Sources (deb822)"
rm -f /etc/apt/sources.list.d/*.list
if [ -f /etc/apt/sources.list ]; then
sed -i '/proxmox/d;/bookworm/d' /etc/apt/sources.list
fi
sed -i '/proxmox/d;/bookworm/d' /etc/apt/sources.list || true
cat >/etc/apt/sources.list.d/debian.sources <<EOF
Types: deb
URIs: http://deb.debian.org/debian/

View File

@@ -251,10 +251,8 @@ start_routines_9() {
msg_info "Correcting Proxmox VE Sources (deb822)"
# remove all existing .list files
rm -f /etc/apt/sources.list.d/*.list
# remove bookworm and proxmox entries from sources.list (if it exists)
if [ -f /etc/apt/sources.list ]; then
sed -i '/proxmox/d;/bookworm/d' /etc/apt/sources.list
fi
# remove bookworm and proxmox entries from sources.list
sed -i '/proxmox/d;/bookworm/d' /etc/apt/sources.list || true
# Create new deb822 sources
cat >/etc/apt/sources.list.d/debian.sources <<EOF
Types: deb

View File

@@ -42,17 +42,6 @@ var_skip_confirm="${var_skip_confirm:-no}"
# Options: "yes" | "no" | "" (empty = interactive prompt)
var_auto_reboot="${var_auto_reboot:-}"
# var_continue_on_error: Continue updating remaining containers if one update fails
# Options: "yes" | "no" (default: no = stop on first error)
# Note: containers with backups always attempt restore on failure regardless of this setting
var_continue_on_error="${var_continue_on_error:-no}"
# var_dry_run: Check for available updates without applying them
# Options: "yes" | "no" (default: no)
# Output: lists each container with current vs. latest version
# Note: requires the container to be running; does not modify any container
var_dry_run="${var_dry_run:-no}"
# var_tags: Optionally override the tags used for auto-detection
# Options: "community-script|proxmox-helper-scripts" (default)
var_tags="${var_tags:-community-script|proxmox-helper-scripts}"
@@ -70,8 +59,6 @@ function export_config_json() {
"var_unattended": "${var_unattended}",
"var_skip_confirm": "${var_skip_confirm}",
"var_auto_reboot": "${var_auto_reboot}",
"var_continue_on_error": "${var_continue_on_error}",
"var_dry_run": "${var_dry_run}",
"var_tags": "${var_tags}"
}
EOF
@@ -91,12 +78,10 @@ Environment Variables:
var_backup Enable backup before update (yes/no)
var_backup_storage Storage location for backups
var_container Container selection (all/all_running/all_stopped/101,102,...)
var_unattended Run updates unattended (yes/no)
var_skip_confirm Skip initial confirmation (yes/no)
var_auto_reboot Auto-reboot containers if required (yes/no)
var_continue_on_error Continue to next container on update failure (yes/no)
var_dry_run Check for updates without applying them (yes/no)
var_tags Optionally override auto-detection tags ("prod|smb|community-script")
var_unattended Run updates unattended (yes/no)
var_skip_confirm Skip initial confirmation (yes/no)
var_auto_reboot Auto-reboot containers if required (yes/no)
var_tags Optionally override auto-detection tags ("prod|smb|community-script")
Examples:
# Run interactively
@@ -108,12 +93,6 @@ Examples:
# Update specific containers without backup
var_backup=no var_container=101,102,105 var_unattended=yes var_skip_confirm=yes $(basename "$0")
# Unattended cron-style: skip confirm, continue on error, no backup
var_backup=no var_container=all_running var_unattended=yes var_skip_confirm=yes var_continue_on_error=yes $(basename "$0")
# Dry-run: show available updates for all running containers without applying
var_container=all_running var_skip_confirm=yes var_dry_run=yes $(basename "$0")
# Export current configuration
$(basename "$0") --export-config
EOF
@@ -152,62 +131,6 @@ function detect_service() {
popd >/dev/null
}
function dry_run_container() {
local container="$1"
local service="$2"
# Extract app name and source repo directly from check_for_gh_release call in the ct script
# Pattern: check_for_gh_release "appname" "owner/repo"
local check_line app_name app_lc source_repo
check_line=$(echo "$script" | grep -m1 'check_for_gh_release')
if [[ -z "$check_line" ]]; then
echo -e "${YW}[DRY-RUN]${CL} Container $container ($service): no check_for_gh_release found — skipping"
DRY_RUN_RESULT="no check_for_gh_release found — skipping"
return
fi
app_name=$(echo "$check_line" | cut -d'"' -f2)
source_repo=$(echo "$check_line" | cut -d'"' -f4)
app_lc=$(echo "${app_name,,}" | tr -d ' ')
if [[ -z "$source_repo" || "$source_repo" != *"/"* ]]; then
echo -e "${YW}[DRY-RUN]${CL} Container $container ($service): cannot parse source repo — skipping"
DRY_RUN_RESULT="cannot parse source repo — skipping"
return
fi
# Read installed version from container (stored by check_for_gh_release as ~/.<appname>)
local current_version
current_version=$(pct exec "$container" -- bash -c "cat \$HOME/.${app_lc} 2>/dev/null" 2>/dev/null || true)
current_version="${current_version#v}"
# Query latest release from GitHub API
local latest_version
latest_version=$(curl -sSL --max-time 10 \
-H 'Accept: application/vnd.github+json' \
-H 'X-GitHub-Api-Version: 2022-11-28' \
"https://api.github.com/repos/${source_repo}/releases/latest" 2>/dev/null |
grep '"tag_name"' | head -1 | cut -d'"' -f4 | sed 's/^v//')
if [[ -z "$latest_version" ]]; then
echo -e "${YW}[DRY-RUN]${CL} Container $container ($service): cannot fetch latest version from $source_repo"
DRY_RUN_RESULT="cannot fetch latest version from $source_repo"
return
fi
if [[ -z "$current_version" ]]; then
echo -e "${BL}[DRY-RUN]${CL} Container $container ($service): installed version unknown, latest: ${latest_version} (${source_repo})"
DRY_RUN_RESULT="version unknown — latest: ${latest_version}"
elif [[ "$current_version" == "$latest_version" ]]; then
echo -e "${GN}[DRY-RUN]${CL} Container $container ($service): up to date (${current_version})"
DRY_RUN_RESULT="up to date (${current_version})"
else
echo -e "${YW}[DRY-RUN]${CL} Container $container ($service): update available ${current_version}${latest_version}"
DRY_RUN_RESULT="update available ${current_version}${latest_version}"
fi
}
function backup_container() {
msg_info "Creating backup for container $1"
vzdump $1 --compress zstd --storage $STORAGE_CHOICE -notes-template "{{guestname}} - community-scripts backup updater" >/dev/null 2>&1
@@ -246,32 +169,8 @@ END {
' /etc/pve/storage.cfg)
}
# Structured result tracking for the final summary report
# Each entry: "CTID|service|STATUS|details"
declare -a UPDATE_RESULTS=()
function log_result() {
# log_result <ctid> <service> <STATUS> <details>
UPDATE_RESULTS+=("${1}|${2}|${3}|${4}")
}
header_info
# =============================================================================
# LOGGING SETUP
# Key events are written directly to a timestamped log file under
# /usr/local/community-scripts/update_apps/ — this avoids any stdout
# redirection that would break interactive spinners or whiptail dialogs.
# The full summary table is appended at the end of the run.
# =============================================================================
LOG_DIR="/usr/local/community-scripts/update_apps"
mkdir -p "$LOG_DIR"
LOG_FILE="${LOG_DIR}/$(date '+%Y%m%d_%H%M%S').log"
echo "Update started: $(date '+%Y-%m-%d %H:%M:%S')" >"$LOG_FILE"
function log_write() {
echo "[$(date '+%H:%M:%S')] $*" >>"$LOG_FILE"
}
# Skip confirmation if var_skip_confirm is set to yes
if [[ "$var_skip_confirm" != "yes" ]]; then
whiptail --backtitle "Proxmox VE Helper Scripts" --title "LXC App Update" --yesno "This will update apps in LXCs installed by Helper-Scripts. Proceed?" 10 58 || exit
@@ -300,7 +199,7 @@ while read -r container; do
menu_items+=("$container_id" "$formatted_line" "OFF")
fi
done <<<"$containers"
msg_ok "Loaded $((${#menu_items[@]} / 3)) containers"
msg_ok "Loaded ${#menu_items[@]} containers"
# Determine container selection based on var_container
if [[ -n "$var_container" ]]; then
@@ -361,10 +260,7 @@ fi
header_info
# Determine backup choice based on var_backup
# Dry-run never needs a backup — skip the prompt entirely
if [[ "$var_dry_run" == "yes" ]]; then
BACKUP_CHOICE="no"
elif [[ -n "$var_backup" ]]; then
if [[ -n "$var_backup" ]]; then
BACKUP_CHOICE="$var_backup"
else
BACKUP_CHOICE="no"
@@ -374,10 +270,7 @@ else
fi
# Determine unattended update based on var_unattended
# Dry-run never executes updates — skip the prompt entirely
if [[ "$var_dry_run" == "yes" ]]; then
UNATTENDED_UPDATE="no"
elif [[ -n "$var_unattended" ]]; then
if [[ -n "$var_unattended" ]]; then
UNATTENDED_UPDATE="$var_unattended"
else
UNATTENDED_UPDATE="no"
@@ -428,7 +321,6 @@ fi
containers_needing_reboot=()
for container in $CHOICE; do
echo -e "${BL}[INFO]${CL} Updating container $container"
log_write "Container $container: starting"
if [ "$BACKUP_CHOICE" == "yes" ]; then
backup_container $container
@@ -450,12 +342,9 @@ for container in $CHOICE; do
#1.1) If update script not detected, return
if [ -z "${service}" ]; then
echo -e "${YW}[WARN]${CL} Update script not found. Skipping to next container"
log_result "$container" "(unknown)" "SKIPPED" "No update script found in container"
log_write "Container $container: SKIPPED — no update script found"
continue
else
echo -e "${BL}[INFO]${CL} Detected service: ${GN}${service}${CL}"
log_write "Container $container: detected service '$service'"
fi
#2) Extract service build/update resource requirements from config/installation file
@@ -502,29 +391,17 @@ for container in $CHOICE; do
fi
#3) if build resources are different than run resources, then:
if [ "$UPDATE_BUILD_RESOURCES" -eq "1" ] && [[ "$var_dry_run" != "yes" ]]; then
if [ "$UPDATE_BUILD_RESOURCES" -eq "1" ]; then
pct set "$container" --cores "$build_cpu" --memory "$build_ram"
fi
#3.5) Dry-run: report update availability without applying
if [[ "$var_dry_run" == "yes" ]]; then
DRY_RUN_RESULT=""
dry_run_container "$container" "$service"
log_result "$container" "$service" "DRY-RUN" "${DRY_RUN_RESULT:-version check only}"
log_write "Container $container ($service): DRY-RUN — ${DRY_RUN_RESULT:-version check only}"
continue
fi
#4) Update service, using the update command
# Prepend a no-op 'clear' wrapper to PATH so update scripts calling clear
# don't fail without a TTY — works for all shells incl. ash (no export -f)
SETUP_CMD="mkdir -p /tmp/.nc; printf '#!/bin/sh\n:\n' > /tmp/.nc/clear; chmod +x /tmp/.nc/clear; export PATH=/tmp/.nc:\$PATH; export TERM=dumb; "
case "$os" in
alpine) pct exec "$container" -- ash -c "${SETUP_CMD}${UPDATE_CMD}" ;;
archlinux) pct exec "$container" -- bash -c "${SETUP_CMD}${UPDATE_CMD}" ;;
fedora | rocky | centos | alma) pct exec "$container" -- bash -c "${SETUP_CMD}${UPDATE_CMD}" ;;
ubuntu | debian | devuan) pct exec "$container" -- bash -c "${SETUP_CMD}${UPDATE_CMD}" ;;
opensuse) pct exec "$container" -- bash -c "${SETUP_CMD}${UPDATE_CMD}" ;;
alpine) pct exec "$container" -- ash -c "$UPDATE_CMD" ;;
archlinux) pct exec "$container" -- bash -c "$UPDATE_CMD" ;;
fedora | rocky | centos | alma) pct exec "$container" -- bash -c "$UPDATE_CMD" ;;
ubuntu | debian | devuan) pct exec "$container" -- bash -c "$UPDATE_CMD" ;;
opensuse) pct exec "$container" -- bash -c "$UPDATE_CMD" ;;
esac
exit_code=$?
@@ -546,31 +423,16 @@ for container in $CHOICE; do
if [ $exit_code -eq 0 ]; then
msg_ok "Updated container $container"
log_result "$container" "$service" "OK" "Updated successfully"
log_write "Container $container ($service): OK"
elif [ $exit_code -eq 75 ]; then
echo -e "${YW}[WARN]${CL} Container $container skipped (requires interactive mode)"
log_result "$container" "$service" "SKIPPED" "Requires interactive mode (exit 75)"
log_write "Container $container ($service): SKIPPED — requires interactive mode"
elif [ $exit_code -eq 113 ]; then
echo -e "${YW}[WARN]${CL} Container $container skipped (under-provisioned: increase CPU/RAM to match template)"
log_result "$container" "$service" "SKIPPED" "Under-provisioned — increase CPU/RAM to match template"
log_write "Container $container ($service): SKIPPED — under-provisioned"
elif [ $exit_code -eq 114 ]; then
echo -e "${YW}[WARN]${CL} Container $container skipped (storage critically low on /boot)"
log_result "$container" "$service" "SKIPPED" "Storage critically low on /boot (>80%)"
log_write "Container $container ($service): SKIPPED — storage critically low on /boot"
elif [ "$BACKUP_CHOICE" == "yes" ]; then
msg_error "Update failed for container $container (exit code: $exit_code) — attempting restore"
log_write "Container $container ($service): FAILED (exit $exit_code) — attempting restore"
msg_info "Restoring LXC $container from backup ($STORAGE_CHOICE)"
pct stop $container
LXC_STORAGE=$(pct config $container | awk -F '[:,]' '/rootfs/ {print $2}')
BACKUP_ENTRY=$(pvesm list "$STORAGE_CHOICE" 2>/dev/null | awk -v ctid="$container" '$1 ~ "vzdump-lxc-"ctid"-" || $1 ~ "/ct/"ctid"/" {print $1}' | sort -r | head -n1)
if [ -z "$BACKUP_ENTRY" ]; then
msg_error "No backup found in storage $STORAGE_CHOICE for container $container"
log_result "$container" "$service" "FAILED" "Update failed (exit $exit_code) — no backup found for restore"
log_write "Container $container ($service): FAILED — no backup found for restore"
exit 235
fi
msg_info "Restoring from: $BACKUP_ENTRY"
@@ -579,76 +441,19 @@ for container in $CHOICE; do
if [ $restorestatus -eq 0 ]; then
pct start $container
msg_ok "Container $container successfully restored from backup"
log_result "$container" "$service" "RESTORED" "Update failed (exit $exit_code) — restored from backup"
log_write "Container $container ($service): RESTORED from $BACKUP_ENTRY"
else
msg_error "Restore failed for container $container"
log_result "$container" "$service" "FAILED" "Update failed (exit $exit_code) — restore also failed"
log_write "Container $container ($service): FAILED — restore also failed"
exit 235
fi
else
msg_error "Update failed for container $container (exit code: $exit_code)"
log_result "$container" "$service" "FAILED" "Exit code $exit_code"
log_write "Container $container ($service): FAILED (exit $exit_code)"
if [[ "$var_continue_on_error" == "yes" ]]; then
echo -e "${YW}[WARN]${CL} Continuing to next container (var_continue_on_error=yes)"
continue
else
exit "$exit_code"
fi
msg_error "Update failed for container $container. Exiting"
exit "$exit_code"
fi
done
wait
header_info
if [[ "$var_dry_run" == "yes" ]]; then
echo -e "${GN}Dry-run complete. No containers were modified.${CL}\n"
else
echo -e "${GN}The process is complete, and the containers have been successfully updated.${CL}\n"
fi
# =============================================================================
# SUMMARY REPORT
# =============================================================================
if [ "${#UPDATE_RESULTS[@]}" -gt 0 ]; then
SEPARATOR="━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
HEADER=$(printf " %-8s %-22s %-10s %s" "CTID" "Service" "Status" "Details")
# terminal output (with colours)
echo ""
echo "$SEPARATOR"
echo "$HEADER"
echo "$SEPARATOR"
for entry in "${UPDATE_RESULTS[@]}"; do
IFS='|' read -r _ctid _svc _status _details <<<"$entry"
case "$_status" in
OK) _color="${GN}" ;;
FAILED) _color="${RD}" ;;
RESTORED) _color="${YW}" ;;
*) _color="${YW}" ;;
esac
printf " %-8s %-22s ${_color}%-10s${CL} %s\n" "$_ctid" "$_svc" "$_status" "$_details"
done
echo "$SEPARATOR"
echo ""
echo "Full log: $LOG_FILE"
echo ""
# append plain-text summary to log file
{
echo ""
echo "Update finished: $(date '+%Y-%m-%d %H:%M:%S')"
echo "$SEPARATOR"
echo "$HEADER"
echo "$SEPARATOR"
for entry in "${UPDATE_RESULTS[@]}"; do
IFS='|' read -r _ctid _svc _status _details <<<"$entry"
printf " %-8s %-22s %-10s %s\n" "$_ctid" "$_svc" "$_status" "$_details"
done
echo "$SEPARATOR"
} >>"$LOG_FILE"
fi
echo -e "${GN}The process is complete, and the containers have been successfully updated.${CL}\n"
if [ "${#containers_needing_reboot[@]}" -gt 0 ]; then
echo -e "${RD}The following containers require a reboot:${CL}"
for container_name in "${containers_needing_reboot[@]}"; do

View File

@@ -494,7 +494,7 @@ fi
msg_ok "Using ${CL}${BL}$STORAGE${CL} ${GN}for Storage Location."
msg_ok "Virtual Machine ID is ${CL}${BL}$VMID${CL}."
msg_info "Retrieving the URL for the Ubuntu 25.04 Disk Image"
URL=https://cloud-images.ubuntu.com/releases/server/plucky/release/ubuntu-25.04-server-cloudimg-amd64.img
URL=https://cloud-images.ubuntu.com/plucky/current/plucky-server-cloudimg-amd64.img
sleep 2
msg_ok "${CL}${BL}${URL}${CL}"
curl -f#SL -o "$(basename "$URL")" "$URL"