feat(deploy): implement blue-green deployment strategy

This commit replaces the previous deployment mechanism with a blue-green strategy to lay the groundwork for zero-downtime deployments.
Key changes:
Introduces a deploy-blue-green.sh script to manage "blue" and "green" container sets, creating versioned releases.
Updates the Anubis gatekeeper template to dynamically route traffic based on the active deployment color, allowing for seamless traffic switching.
Modifies Docker Compose files to include color-specific labels and environment variables.
Adapts the GitHub Actions workflow to execute the new blue-green deployment process.
Removes the old, now-obsolete deployment and health check scripts.
Note: Automated rollback on health check failure is not yet implemented. Downgrades can be performed manually by switching the active color.
This commit is contained in:
badblocks 2025-06-12 16:56:36 -07:00
parent a58a0e642a
commit 30ce126a07
No known key found for this signature in database
19 changed files with 1166 additions and 591 deletions

View file

@ -103,6 +103,7 @@ jobs:
- name: Extract version for Docker build - name: Extract version for Docker build
id: extract_version id: extract_version
run: | run: |
pip uninstall setuptools
pip install setuptools-scm pip install setuptools-scm
VERSION=$(python -c "from setuptools_scm import get_version; print(get_version())") VERSION=$(python -c "from setuptools_scm import get_version; print(get_version())")
echo "VERSION=${VERSION}" >> $GITHUB_ENV echo "VERSION=${VERSION}" >> $GITHUB_ENV
@ -133,9 +134,9 @@ jobs:
# Job 2: Deploy (only runs on main branch or tags) # Job 2: Deploy (only runs on main branch or tags)
deploy: deploy:
needs: build #needs: build
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: github.event_name == 'push' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/')) #if: github.event_name == 'push' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/'))
# Determine environment based on ref # Determine environment based on ref
environment: ${{ (startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-prerelease')) && 'production' || 'staging' }} environment: ${{ (startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-prerelease')) && 'production' || 'staging' }}
steps: steps:
@ -173,8 +174,13 @@ jobs:
echo "📝 Setting deployment environment variables" echo "📝 Setting deployment environment variables"
echo "REPO_PROJECT_PATH=${REPO_PROJECT_PATH}" >> $GITHUB_ENV echo "REPO_PROJECT_PATH=${REPO_PROJECT_PATH}" >> $GITHUB_ENV
echo "REPO_NAME_ONLY=${REPO_NAME_ONLY}" >> $GITHUB_ENV echo "REPO_NAME_ONLY=${REPO_NAME_ONLY}" >> $GITHUB_ENV
echo "REPO=${REPO}" >> $GITHUB_ENV
echo "IMAGE_TAR_NAME=${REPO_NAME_ONLY}-${{ github.ref_name }}_${{ github.sha }}.tar" >> $GITHUB_ENV echo "IMAGE_TAR_NAME=${REPO_NAME_ONLY}-${{ github.ref_name }}_${{ github.sha }}.tar" >> $GITHUB_ENV
echo "PROD=${prod_value}" >> $GITHUB_ENV echo "PROD=${prod_value}" >> $GITHUB_ENV
echo "GIT_SHA=${{ github.sha }}" >> $GITHUB_ENV
echo "REPLICA_COUNT=${{ vars.REPLICA_COUNT }}" >> $GITHUB_ENV
echo "PRODUCTION_DOMAIN=${{ vars.PRODUCTION_DOMAIN }}" >> $GITHUB_ENV
echo "STAGING_DOMAIN=${{ vars.STAGING_DOMAIN }}" >> $GITHUB_ENV
- name: Download container artifact - name: Download container artifact
uses: actions/download-artifact@v4 uses: actions/download-artifact@v4
@ -217,35 +223,40 @@ jobs:
env: env:
DOCKER_HOST: ssh://deploy DOCKER_HOST: ssh://deploy
REPO_PROJECT_PATH: ${{ env.REPO_PROJECT_PATH }} REPO_PROJECT_PATH: ${{ env.REPO_PROJECT_PATH }}
REPO: ${{ env.REPO }}
REPO_NAME_ONLY: ${{ env.REPO_NAME_ONLY }} REPO_NAME_ONLY: ${{ env.REPO_NAME_ONLY }}
IMAGE_TAR: ${{ runner.temp }}/${{ env.IMAGE_TAR_NAME }} IMAGE_TAR: ${{ runner.temp }}/${{ env.IMAGE_TAR_NAME }}
PROD: ${{ env.PROD }} PRODrequire_var: ${{ env.PROD }}
GIT_SHA: ${{ github.sha }}
REPLICA_COUNT: ${{ env.REPLICA_COUNT }}
PRODUCTION_DOMAIN: ${{ vars.PRODUCTION_DOMAIN }}
STAGING_DOMAIN: ${{ vars.STAGING_DOMAIN }}
run: | run: |
echo "✅ Exit script on any error" echo "✅ Exit script on any error"
set -eu -o pipefail set -eu -o pipefail
./scripts/deploy-to-server.sh ./scripts/deploy-blue-green.sh
- name: Health Check and Rollback # - name: Health Check and Rollback
run: | # run: |
# Determine the correct URL based on environment # # Determine the correct URL based on environment
if [ "${{ env.PROD }}" = "true" ]; then # if [ "${{ env.PROD }}" = "true" ]; then
# Ensure PRODUCTION_DOMAIN is set # # Ensure PRODUCTION_DOMAIN is set
if [ -z "${{ vars.PRODUCTION_DOMAIN }}" ]; then # if [ -z "${{ vars.PRODUCTION_DOMAIN }}" ]; then
echo "Error: PRODUCTION_DOMAIN is not set" # echo "Error: PRODUCTION_DOMAIN is not set"
exit 1 # exit 1
fi # fi
HEALTH_CHECK_URL="https://${{ vars.PRODUCTION_DOMAIN }}/health/" # HEALTH_CHECK_URL="https://${{ vars.PRODUCTION_DOMAIN }}/health/"
else # else
# Ensure STAGING_DOMAIN is set # # Ensure STAGING_DOMAIN is set
if [ -z "${{ vars.STAGING_DOMAIN }}" ]; then # if [ -z "${{ vars.STAGING_DOMAIN }}" ]; then
echo "Error: STAGING_DOMAIN is not set" # echo "Error: STAGING_DOMAIN is not set"
exit 1 # exit 1
fi # fi
HEALTH_CHECK_URL="https://${{ vars.STAGING_DOMAIN }}/health/" # HEALTH_CHECK_URL="https://${{ vars.STAGING_DOMAIN }}/health/"
fi # fi
# Copy script to remote and execute # # Copy script to remote and execute
scp scripts/health-check-and-rollback.sh deploy:/tmp/ # scp scripts/health-check-and-rollback.sh deploy:/tmp/
ssh deploy "chmod +x /tmp/health-check-and-rollback.sh" # ssh deploy "chmod +x /tmp/health-check-and-rollback.sh"
ssh deploy "/tmp/health-check-and-rollback.sh '${{ env.REPO_PROJECT_PATH }}' '${{ env.PROD }}' '$HEALTH_CHECK_URL' 30" # ssh deploy "/tmp/health-check-and-rollback.sh '${{ env.REPO_PROJECT_PATH }}' '$HEALTH_CHECK_URL' 30"
ssh deploy "rm -f /tmp/health-check-and-rollback.sh" # ssh deploy "rm -f /tmp/health-check-and-rollback.sh"

1
scripts/common-lib.sh Symbolic link
View file

@ -0,0 +1 @@
../server/scripts/common-lib.sh

207
scripts/deploy-blue-green.sh Executable file
View file

@ -0,0 +1,207 @@
#!/bin/bash
set -euo pipefail
# Blue-Green deployment script with versioned releases
# Usage: ./deploy-blue-green.sh
# Source common functions
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${SCRIPT_DIR}/common-lib.sh"
# Validate required environment variables
require_var "DOCKER_HOST"
require_var "REPO_PROJECT_PATH"
require_var "REPO_NAME_ONLY"
require_var "REPO"
require_var "IMAGE_TAR"
require_var "ENV_FILE_BASE64"
require_var "CF_PEM_CERT"
require_var "CF_PEM_CA"
require_var "PROD"
require_var "PRODUCTION_DOMAIN"
require_var "STAGING_DOMAIN"
require_var "REPLICA_COUNT"
validate_deployment_env
echo "⚙️ Docker host: $DOCKER_HOST"
# Generate deployment timestamp
DEPLOYMENT_TIMESTAMP=$(date +%Y%m%d_%H%M%S)
NEW_RELEASE_PATH="${RELEASES_PATH}/${DEPLOYMENT_TIMESTAMP}"
# Use Git SHA for image tag (if available, otherwise use timestamp)
if [ -n "${GIT_SHA:-}" ]; then
IMAGE_TAG="sha-${GIT_SHA:0:7}"
else
# Fallback for local testing without GIT_SHA
IMAGE_TAG="local-${DEPLOYMENT_TIMESTAMP}"
fi
# Check for deployment in progress
if is_deployment_in_progress; then
echo "⚠️ ERROR: Deployment appears to be in progress (both colors are running)"
echo " This might indicate a previous deployment didn't complete properly."
echo " Please check the deployment status and clean up any old containers."
echo " If you are sure that the deployment is complete, you can run the following command to clean up the old containers:"
echo " ssh deploy 'docker compose -p pkmntrade-club-blue down && docker compose -p pkmntrade-club-green down'"
exit 1
fi
# Get current and new colors
CURRENT_COLOR=$(get_current_color)
NEW_COLOR=$(switch_color "$CURRENT_COLOR")
echo "📅 Deployment version: ${DEPLOYMENT_TIMESTAMP}"
echo "🏷️ Image tag: ${IMAGE_TAG}"
echo "🎨 Current: $CURRENT_COLOR → New: $NEW_COLOR"
echo "🚀 Enable and start docker service"
retry run_on_target "sudo systemctl enable --now docker.service"
echo "💾 Load the new docker image ($IMAGE_TAR)"
if [ ! -f "$IMAGE_TAR" ]; then
echo "Error: Docker image tar file not found: $IMAGE_TAR"
exit 1
fi
# Load the image - Docker handles the transfer via DOCKER_HOST
echo "📦 Loading Docker image..."
#retry docker load -i "$IMAGE_TAR"
# Verify the expected image exists
echo "🔍 Verifying image ${REPO}:${IMAGE_TAG} exists..."
if ! docker images -q "${REPO}:${IMAGE_TAG}" | grep -q .; then
echo "❌ Expected image tag ${IMAGE_TAG} not found!"
echo "Available tags:"
docker images "${REPO}" --format "{{.Tag}}"
exit 1
fi
echo "📁 Create versioned release directory"
run_on_target "mkdir -p '${NEW_RELEASE_PATH}'"
echo "💾 Copy new files to server"
if [ -d "./server" ]; then
retry scp -pr ./server/* "deploy:${NEW_RELEASE_PATH}/"
else
echo "⚠️ No server directory found, error"
exit 1
fi
echo "📝 Create new .env file with deployment configuration"
printf "%s" "${ENV_FILE_BASE64}" | base64 -d | run_on_target "cat > '${NEW_RELEASE_PATH}/.env' && chmod 600 '${NEW_RELEASE_PATH}/.env'"
# Add deployment color and image tag to .env
run_on_target "echo 'DEPLOYMENT_COLOR=${NEW_COLOR}' >> '${NEW_RELEASE_PATH}/.env'"
run_on_target "echo 'IMAGE_TAG=${IMAGE_TAG}' >> '${NEW_RELEASE_PATH}/.env'"
# Add domain name based on environment
if [ "${PROD}" = "true" ]; then
DOMAIN_NAME="${PRODUCTION_DOMAIN:-pkmntrade.club}"
else
DOMAIN_NAME="${STAGING_DOMAIN:-staging.pkmntrade.club}"
fi
# if there is a third part to the domain name, remove it
BASE_DOMAIN_NAME="${BASE_DOMAIN:-pkmntrade.club}"
run_on_target "echo 'DOMAIN_NAME=${DOMAIN_NAME}' >> '${NEW_RELEASE_PATH}/.env'"
run_on_target "echo 'BASE_DOMAIN_NAME=${BASE_DOMAIN_NAME}' >> '${NEW_RELEASE_PATH}/.env'"
run_on_target "echo 'REPLICA_COUNT=${REPLICA_COUNT}' >> '${NEW_RELEASE_PATH}/.env'"
echo "🔑 Set up certs"
run_on_target "mkdir -p '${NEW_RELEASE_PATH}/certs' && chmod 550 '${NEW_RELEASE_PATH}/certs' && chown 99:root '${NEW_RELEASE_PATH}/certs'"
printf "%s" "$CF_PEM_CERT" | run_on_target "cat > '${NEW_RELEASE_PATH}/certs/crt.pem' && chmod 440 '${NEW_RELEASE_PATH}/certs/crt.pem' && chown 99:root '${NEW_RELEASE_PATH}/certs/crt.pem'"
printf "%s" "$CF_PEM_CA" | run_on_target "cat > '${NEW_RELEASE_PATH}/certs/ca.pem' && chmod 440 '${NEW_RELEASE_PATH}/certs/ca.pem' && chown 99:root '${NEW_RELEASE_PATH}/certs/ca.pem'"
echo "📝 Save deployment metadata"
run_on_target "echo '${DEPLOYMENT_TIMESTAMP}' > '${NEW_RELEASE_PATH}/.deployment_version'"
run_on_target "echo '${PROD}' > '${NEW_RELEASE_PATH}/.deployment_is_prod'"
run_on_target "echo '${NEW_COLOR}' > '${NEW_RELEASE_PATH}/.deployment_color'"
run_on_target "echo '${IMAGE_TAG}' > '${NEW_RELEASE_PATH}/.image_tag'"
run_on_target "echo '${GIT_SHA:-unknown}' > '${NEW_RELEASE_PATH}/.git_sha'"
# Save previous version info for potential rollback
run_on_target "if [ -L '${CURRENT_LINK_PATH}' ]; then readlink -f '${CURRENT_LINK_PATH}' > '${NEW_RELEASE_PATH}/.previous_version'; fi"
# export PREVIOUS_RELEASE_PATH
if [ "$CURRENT_COLOR" != "none" ]; then
PREVIOUS_RELEASE_PATH=$(run_on_target "cat ${NEW_RELEASE_PATH}/.previous_version")
else
PREVIOUS_RELEASE_PATH=""
fi
run_on_target "export PREVIOUS_RELEASE_PATH='${PREVIOUS_RELEASE_PATH}'"
echo "🔗 Update current symlink to new release"
run_on_target "ln -sfn '${NEW_RELEASE_PATH}' '${CURRENT_LINK_PATH}'"
# Get deployment configuration
PROJECT_NAME=$(get_project_name "$NEW_COLOR")
COMPOSE_FILES=$(get_compose_files)
WEB_SERVICE=$(get_web_service_name)
# create network if it doesn't exist
echo "🔗 Creating network ${PROJECT_NAME}_network"
run_on_target "docker network create ${REPO_NAME_ONLY}_network >/dev/null 2>&1 || true"
# Handle core services
if [ "$CURRENT_COLOR" = "none" ]; then
echo "🚀 Starting core services (first deployment)"
retry run_on_target "cd '${CURRENT_LINK_PATH}' && docker compose -f docker-compose_core.yml -p ${CORE_PROJECT_NAME} up -d"
sleep 10 # Give core services time to start
else
echo " Core services already running, checking for changes..."
PREVIOUS_SHA1=$(run_on_target "sha1sum '${PREVIOUS_RELEASE_PATH}/docker-compose_core.yml' | awk '{print \$1}'")
NEW_SHA1=$(run_on_target "sha1sum '${NEW_RELEASE_PATH}/docker-compose_core.yml' | awk '{print \$1}'")
echo "PREV_SHA1: ${PREVIOUS_SHA1}"
echo " NEW_SHA1: ${NEW_SHA1}"
if [ -n "$PREVIOUS_SHA1" ] && [ -n "$NEW_SHA1" ]; then
if [ "$PREVIOUS_SHA1" != "$NEW_SHA1" ]; then
echo "🚀 Core services have changed, restarting..."
retry run_on_target "cd '${CURRENT_LINK_PATH}' && docker compose -f docker-compose_core.yml -p ${CORE_PROJECT_NAME} down"
retry run_on_target "cd '${CURRENT_LINK_PATH}' && docker compose -f docker-compose_core.yml -p ${CORE_PROJECT_NAME} up -d"
else
echo " Core services have not changed, still restarting due to current folder change..."
retry run_on_target "cd '${CURRENT_LINK_PATH}' && docker compose -f docker-compose_core.yml -p ${CORE_PROJECT_NAME} down"
retry run_on_target "cd '${CURRENT_LINK_PATH}' && docker compose -f docker-compose_core.yml -p ${CORE_PROJECT_NAME} up -d"
fi
else
echo "❌ Current or previous core services not found, exiting..."
exit 1
fi
fi
echo "🚀 Start new ${NEW_COLOR} containers with image ${IMAGE_TAG}"
retry run_on_target "cd '${CURRENT_LINK_PATH}' && DEPLOYMENT_COLOR=${NEW_COLOR} IMAGE_TAG=${IMAGE_TAG} docker compose $COMPOSE_FILES -p ${PROJECT_NAME} up -d"
# Wait for new containers to be healthy
if ! wait_for_healthy_containers "$PROJECT_NAME" "$WEB_SERVICE" "$REPLICA_COUNT"; then
echo "❌ New containers failed health checks. Cancelling deployment..."
run_on_target "cd '${CURRENT_LINK_PATH}' && docker compose $COMPOSE_FILES -p ${PROJECT_NAME} down"
#echo "🔄 Rolling back deployment..."
#TODO: implement rollback
exit 1
fi
echo "✅ New ${NEW_COLOR} deployment is healthy"
# Refresh gatekeepers
refresh_gatekeepers
# Wait for traffic to stabilize
wait_with_countdown 20 "⏳ Waiting for traffic to stabilize..."
# Clean up old containers if this isn't the first deployment
if [ "$CURRENT_COLOR" != "none" ]; then
# Get the old image tag before cleanup
OLD_IMAGE_TAG=$(get_deployment_image_tag "$CURRENT_COLOR")
echo "📷 Old deployment was using image: ${OLD_IMAGE_TAG}"
cleanup_color_containers "$CURRENT_COLOR"
echo "✅ Old containers removed"
fi
echo "🗑️ Clean up old releases (keep last 5)"
run_on_target "cd '${RELEASES_PATH}' && ls -dt */ 2>/dev/null | tail -n +6 | xargs -r rm -rf || true"
echo "✅ Blue-Green deployment completed"
echo " Active color: ${NEW_COLOR}"
echo " Image tag: ${IMAGE_TAG}"

View file

@ -1,124 +0,0 @@
#!/bin/bash
set -euo pipefail
# Main deployment script with versioned releases
# Usage: ./deploy-to-server.sh
# Source retry function
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "${SCRIPT_DIR}/retry.sh"
# Required environment variables (should be set by GitHub Actions)
: "${DOCKER_HOST:?Error: DOCKER_HOST not set}"
: "${REPO_PROJECT_PATH:?Error: REPO_PROJECT_PATH not set}"
: "${REPO_NAME_ONLY:?Error: REPO_NAME_ONLY not set}"
: "${IMAGE_TAR:?Error: IMAGE_TAR not set}"
: "${ENV_FILE_BASE64:?Error: ENV_FILE_BASE64 not set}"
: "${CF_PEM_CERT:?Error: CF_PEM_CERT not set}"
: "${CF_PEM_CA:?Error: CF_PEM_CA not set}"
: "${PROD:?Error: PROD not set}"
echo "⚙️ Docker host: $DOCKER_HOST"
# Generate deployment timestamp
DEPLOYMENT_TIMESTAMP=$(date +%Y%m%d_%H%M%S)
RELEASES_PATH="${REPO_PROJECT_PATH}/releases"
NEW_RELEASE_PATH="${RELEASES_PATH}/${DEPLOYMENT_TIMESTAMP}"
CURRENT_LINK_PATH="${REPO_PROJECT_PATH}/current"
echo "📅 Deployment version: ${DEPLOYMENT_TIMESTAMP}"
echo "🚀 Enable and start docker service"
retry ssh deploy "sudo systemctl enable --now docker.service"
echo "💾 Load the new docker image ($IMAGE_TAR)"
if [ ! -f "$IMAGE_TAR" ]; then
echo "Error: Docker image tar file not found: $IMAGE_TAR"
exit 1
fi
retry docker load -i "$IMAGE_TAR"
echo "📁 Create versioned release directory"
ssh deploy "mkdir -p '${NEW_RELEASE_PATH}'"
echo "💾 Copy new files to server"
# Check if server directory exists before copying
if [ -d "./server" ]; then
retry scp -pr ./server/* "deploy:${NEW_RELEASE_PATH}/"
else
echo "⚠️ No server directory found, error"
exit 1
fi
echo "📝 Create new .env file"
printf "%s" "${ENV_FILE_BASE64}" | base64 -d | ssh deploy "cat > '${NEW_RELEASE_PATH}/.env' && chmod 600 '${NEW_RELEASE_PATH}/.env'"
echo "🔑 Set up certs"
ssh deploy "mkdir -p '${NEW_RELEASE_PATH}/certs' && chmod 550 '${NEW_RELEASE_PATH}/certs' && chown 99:root '${NEW_RELEASE_PATH}/certs'"
printf "%s" "$CF_PEM_CERT" | ssh deploy "cat > '${NEW_RELEASE_PATH}/certs/crt.pem' && chmod 440 '${NEW_RELEASE_PATH}/certs/crt.pem' && chown 99:root '${NEW_RELEASE_PATH}/certs/crt.pem'"
printf "%s" "$CF_PEM_CA" | ssh deploy "cat > '${NEW_RELEASE_PATH}/certs/ca.pem' && chmod 440 '${NEW_RELEASE_PATH}/certs/ca.pem' && chown 99:root '${NEW_RELEASE_PATH}/certs/ca.pem'"
echo "🔄 Prepare deployment (stop current containers)"
# Copy script to remote and execute with parameters
scp "${SCRIPT_DIR}/prepare-deployment.sh" deploy:/tmp/
ssh deploy "chmod +x /tmp/prepare-deployment.sh && /tmp/prepare-deployment.sh '${REPO_PROJECT_PATH}' '${PROD}' '${CURRENT_LINK_PATH}'"
ssh deploy "rm -f /tmp/prepare-deployment.sh"
echo "📝 Save deployment metadata"
ssh deploy "echo '${DEPLOYMENT_TIMESTAMP}' > '${NEW_RELEASE_PATH}/.deployment_version'"
ssh deploy "echo '${PROD}' > '${NEW_RELEASE_PATH}/.deployment_env'"
# Save previous version info for potential rollback
ssh deploy "if [ -L '${CURRENT_LINK_PATH}' ]; then readlink -f '${CURRENT_LINK_PATH}' > '${NEW_RELEASE_PATH}/.previous_version'; fi"
echo "🔗 Update current symlink to new release"
ssh deploy "ln -sfn '${NEW_RELEASE_PATH}' '${CURRENT_LINK_PATH}'"
# TODO: implement zero-downtime deployment
# echo "🚀 Start the new containers, zero-downtime"
# if [ "${PROD}" = true ]; then
# ssh deploy <<EOF
# cd ${{ steps.meta.outputs.REPO_PROJECT_PATH}}
# old_container_id=$(docker compose -f docker-compose_web.yml ps -f name=web -q | tail -n1)
# docker compose -f docker-compose_web.yml up -d --no-build --no-recreate
# new_container_id=$(docker compose -f docker-compose_web.yml ps -f name=web -q | head -n1)
# # not needed, but might be useful at some point
# #new_container_ip=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $new_container_id)
# #new_container_name=$(docker inspect -f '{{.Name}}' $new_container_id | cut -c2-)
# sleep 100 # change to wait for healthcheck in the future
# #docker compose -f docker-compose_core.yml kill -s SIGUSR2 loba
# docker stop $old_container_id
# docker rm $old_container_id
# #docker compose -f docker-compose_core.yml kill -s SIGUSR2 loba
# EOF
# else
# ssh deploy <<EOF
# cd ${{ steps.meta.outputs.REPO_PROJECT_PATH}}
# old_container_id=$(docker compose -f docker-compose_staging.yml ps -f name=web-staging -q | tail -n1)
# docker compose -f docker-compose_staging.yml up -d --no-build --no-recreate
# new_container_id=$(docker compose -f docker-compose_staging.yml ps -f name=web-staging -q | head -n1)
# # not needed, but might be useful at some point
# #new_container_ip=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' $new_container_id)
# #new_container_name=$(docker inspect -f '{{.Name}}' $new_container_id | cut -c2-)
# sleep 100 # change to wait for healthcheck in the future
# #docker compose -f docker-compose_core.yml kill -s SIGUSR2 loba
# docker stop $old_container_id
# docker rm $old_container_id
# #docker compose -f docker-compose_core.yml kill -s SIGUSR2 loba
# EOF
# fi
echo "🚀 Start the new containers"
if [ "$PROD" = "true" ]; then
retry ssh deploy "cd '${CURRENT_LINK_PATH}' && docker compose -f docker-compose_core.yml -f docker-compose_web.yml -p pkmntrade-club up -d --no-build"
else
retry ssh deploy "cd '${CURRENT_LINK_PATH}' && docker compose -f docker-compose_core.yml -f docker-compose_web.yml -f docker-compose_staging.yml -p pkmntrade-club up -d --no-build"
fi
echo "🧹 Prune unused Docker resources"
ssh deploy "docker system prune -f"
echo "🗑️ Clean up old releases (keep last 5)"
ssh deploy "cd '${RELEASES_PATH}' && ls -dt */ 2>/dev/null | tail -n +6 | xargs -r rm -rf || true"
echo "✅ Deployment completed. Version: ${DEPLOYMENT_TIMESTAMP}"

0
scripts/generate-docker-tags.sh Normal file → Executable file
View file

View file

@ -1,102 +0,0 @@
#!/bin/bash
set -euo pipefail
# Perform health check and rollback if necessary
# Usage: ./health-check-and-rollback.sh REPO_PROJECT_PATH PROD HEALTH_CHECK_URL [MAX_ATTEMPTS]
if [ $# -lt 3 ]; then
echo "Error: Invalid number of arguments"
echo "Usage: $0 REPO_PROJECT_PATH PROD HEALTH_CHECK_URL [MAX_ATTEMPTS]"
exit 1
fi
REPO_PROJECT_PATH="$1"
PROD="$2"
HEALTH_CHECK_URL="$3"
MAX_ATTEMPTS="${4:-30}"
CURRENT_LINK_PATH="${REPO_PROJECT_PATH}/current"
RELEASES_PATH="${REPO_PROJECT_PATH}/releases"
echo "🏥 Performing health check..."
echo "Health check URL: $HEALTH_CHECK_URL"
get_current_version() {
if [ -L "$CURRENT_LINK_PATH" ]; then
basename "$(readlink -f "$CURRENT_LINK_PATH")"
else
echo "unknown"
fi
}
ATTEMPT=0
while [ "$ATTEMPT" -lt "$MAX_ATTEMPTS" ]; do
# Check if the service is responding with 200 OK
HTTP_CODE=$(curl -s -o /dev/null -w '%{http_code}' -m 10 "$HEALTH_CHECK_URL" || echo '000')
if [ "$HTTP_CODE" = "200" ]; then
echo "✅ Health check passed! (HTTP $HTTP_CODE)"
CURRENT_VERSION=$(get_current_version)
echo "📌 Current version: ${CURRENT_VERSION}"
exit 0
fi
ATTEMPT=$((ATTEMPT + 1))
if [ "$ATTEMPT" -eq "$MAX_ATTEMPTS" ]; then
echo "❌ Health check failed after $MAX_ATTEMPTS attempts (Last HTTP code: $HTTP_CODE)"
echo "🔄 Rolling back deployment..."
FAILED_VERSION=$(get_current_version)
echo "❌ Failed version: ${FAILED_VERSION}"
# Check if we have a previous version to roll back to
if [ -f "${CURRENT_LINK_PATH}/.previous_version" ]; then
PREVIOUS_VERSION_PATH=$(cat "${CURRENT_LINK_PATH}/.previous_version")
PREVIOUS_VERSION=$(basename "$PREVIOUS_VERSION_PATH")
if [ -d "$PREVIOUS_VERSION_PATH" ]; then
echo "🔄 Rolling back to version: ${PREVIOUS_VERSION}"
# Stop failed deployment containers
cd "$CURRENT_LINK_PATH"
echo "Stopping failed deployment containers..."
docker compose -f docker-compose_web.yml -p pkmntrade-club down || true
if [ "$PROD" = "false" ]; then
docker compose -f docker-compose_staging.yml -p pkmntrade-club down || true
fi
docker compose -f docker-compose_core.yml -p pkmntrade-club down || true
# Switch symlink back to previous version
ln -sfn "$PREVIOUS_VERSION_PATH" "$CURRENT_LINK_PATH"
# Start previous version containers
cd "$CURRENT_LINK_PATH"
docker compose -f docker-compose_core.yml -p pkmntrade-club up -d --no-build
if [ "$PROD" = "true" ]; then
docker compose -f docker-compose_web.yml -p pkmntrade-club up -d --no-build
else
docker compose -f docker-compose_web.yml -f docker-compose_staging.yml -p pkmntrade-club up -d --no-build
fi
echo "✅ Rollback completed to version: ${PREVIOUS_VERSION}"
# Mark failed version
if [ -d "${RELEASES_PATH}/${FAILED_VERSION}" ]; then
touch "${RELEASES_PATH}/${FAILED_VERSION}/.failed"
echo "$(date): Health check failed, rolled back to ${PREVIOUS_VERSION}" > "${RELEASES_PATH}/${FAILED_VERSION}/.failure_reason"
fi
else
echo "❌ Previous version directory not found: $PREVIOUS_VERSION_PATH"
exit 1
fi
else
echo "❌ No previous version information found. Cannot rollback!"
echo "💡 This might be the first deployment or the previous version info is missing."
exit 1
fi
exit 1
fi
echo "⏳ Waiting for service to be healthy... (attempt $ATTEMPT/$MAX_ATTEMPTS, HTTP code: $HTTP_CODE)"
sleep 10
done

View file

@ -1,120 +0,0 @@
#!/bin/bash
set -euo pipefail
# Manage deployment releases
# Usage: ./manage-releases.sh REPO_PROJECT_PATH COMMAND [ARGS]
if [ $# -lt 2 ]; then
echo "Error: Invalid number of arguments"
echo "Usage: $0 REPO_PROJECT_PATH COMMAND [ARGS]"
echo "Commands:"
echo " list - List all releases"
echo " current - Show current release"
echo " rollback VERSION - Rollback to specific version"
echo " cleanup [KEEP] - Clean up old releases (default: keep 5)"
exit 1
fi
REPO_PROJECT_PATH="$1"
COMMAND="$2"
CURRENT_LINK_PATH="${REPO_PROJECT_PATH}/current"
RELEASES_PATH="${REPO_PROJECT_PATH}/releases"
case "$COMMAND" in
list)
echo "📋 Available releases:"
if [ -d "$RELEASES_PATH" ]; then
for release in $(ls -dt "${RELEASES_PATH}"/*/); do
version=$(basename "$release")
status=""
# Check if it's current
if [ -L "$CURRENT_LINK_PATH" ] && [ "$(readlink -f "$CURRENT_LINK_PATH")" = "$(realpath "$release")" ]; then
status=" [CURRENT]"
fi
# Check if it failed
if [ -f "${release}/.failed" ]; then
status="${status} [FAILED]"
fi
echo " - ${version}${status}"
done
else
echo "No releases found"
fi
;;
current)
if [ -L "$CURRENT_LINK_PATH" ]; then
current_version=$(basename "$(readlink -f "$CURRENT_LINK_PATH")")
echo "📌 Current version: ${current_version}"
else
echo "❌ No current deployment found"
fi
;;
rollback)
if [ $# -lt 3 ]; then
echo "Error: VERSION required for rollback"
exit 1
fi
TARGET_VERSION="$3"
TARGET_PATH="${RELEASES_PATH}/${TARGET_VERSION}"
if [ ! -d "$TARGET_PATH" ]; then
echo "Error: Version ${TARGET_VERSION} not found"
exit 1
fi
echo "🔄 Rolling back to version: ${TARGET_VERSION}"
# Read environment from target version
if [ -f "${TARGET_PATH}/.deployment_env" ]; then
PROD=$(cat "${TARGET_PATH}/.deployment_env")
else
echo "Warning: Could not determine environment, assuming staging"
PROD="false"
fi
# Stop current containers
if [ -L "$CURRENT_LINK_PATH" ] && [ -d "$CURRENT_LINK_PATH" ]; then
cd "$CURRENT_LINK_PATH"
docker compose -f docker-compose_web.yml down || true
[ "$PROD" = "false" ] && docker compose -f docker-compose_staging.yml down || true
docker compose -f docker-compose_core.yml down || true
fi
# Update symlink
ln -sfn "$TARGET_PATH" "$CURRENT_LINK_PATH"
# Start containers
cd "$CURRENT_LINK_PATH"
docker compose -f docker-compose_core.yml up -d --no-build
if [ "$PROD" = "true" ]; then
docker compose -f docker-compose_web.yml up -d --no-build
else
docker compose -f docker-compose_web.yml -f docker-compose_staging.yml up -d --no-build
fi
echo "✅ Rollback completed"
;;
cleanup)
KEEP_COUNT="${3:-5}"
echo "🗑️ Cleaning up old releases (keeping last ${KEEP_COUNT})"
if [ -d "$RELEASES_PATH" ]; then
cd "$RELEASES_PATH"
ls -dt */ 2>/dev/null | tail -n +$((KEEP_COUNT + 1)) | xargs -r rm -rf || true
echo "✅ Cleanup completed"
else
echo "No releases directory found"
fi
;;
*)
echo "Error: Unknown command: $COMMAND"
exit 1
;;
esac

9
scripts/parse-repository-name.sh Normal file → Executable file
View file

@ -16,14 +16,17 @@ echo "GITHUB_REPOSITORY: $GITHUB_REPOSITORY" > /dev/stderr
if [[ "$GITHUB_REPOSITORY" == *".git" ]]; then if [[ "$GITHUB_REPOSITORY" == *".git" ]]; then
if [[ "$GITHUB_REPOSITORY" == "https://"* ]]; then if [[ "$GITHUB_REPOSITORY" == "https://"* ]]; then
echo "GITHUB_REPOSITORY ends in .git and is a URL" > /dev/stderr echo "GITHUB_REPOSITORY ends in .git and is an HTTPS URI" > /dev/stderr
REPO=$(echo "$GITHUB_REPOSITORY" | sed 's/\.git$//' | cut -d'/' -f4-5 | sed 's/[^a-zA-Z0-9\/-]/-/g') REPO=$(echo "$GITHUB_REPOSITORY" | sed 's/\.git$//' | cut -d'/' -f4-5 | sed 's/[^a-zA-Z0-9\/-]/-/g')
elif [[ "$GITHUB_REPOSITORY" == "git@"* ]]; then
echo "GITHUB_REPOSITORY ends in .git and is an SSH URI" > /dev/stderr
REPO=$(echo "$GITHUB_REPOSITORY" | sed 's/\.git$//' | cut -d':' -f2 | sed 's/[^a-zA-Z0-9\/-]/-/g')
else else
echo "GITHUB_REPOSITORY ends in .git and is not a URL" > /dev/stderr echo "GITHUB_REPOSITORY ends in .git and is not a URI" > /dev/stderr
REPO=$(echo "$GITHUB_REPOSITORY" | sed 's/\.git$//' | sed 's/[^a-zA-Z0-9\/-]/-/g') REPO=$(echo "$GITHUB_REPOSITORY" | sed 's/\.git$//' | sed 's/[^a-zA-Z0-9\/-]/-/g')
fi fi
else else
echo "GITHUB_REPOSITORY is not a URL" > /dev/stderr echo "GITHUB_REPOSITORY is not a URI" > /dev/stderr
REPO=$(echo "$GITHUB_REPOSITORY" | sed 's/[^a-zA-Z0-9\/-]/-/g') REPO=$(echo "$GITHUB_REPOSITORY" | sed 's/[^a-zA-Z0-9\/-]/-/g')
fi fi

View file

@ -1,44 +0,0 @@
#!/bin/bash
set -euo pipefail
# Prepare deployment by stopping containers
# Usage: ./prepare-deployment.sh REPO_PROJECT_PATH PROD CURRENT_LINK_PATH
if [ $# -ne 3 ]; then
echo "Error: Invalid number of arguments"
echo "Usage: $0 REPO_PROJECT_PATH PROD CURRENT_LINK_PATH"
exit 1
fi
REPO_PROJECT_PATH="$1"
PROD="$2"
CURRENT_LINK_PATH="$3"
# Ensure base directory exists
if [ ! -d "$REPO_PROJECT_PATH" ]; then
echo "⚠️ Directory $REPO_PROJECT_PATH does not exist, creating it..."
mkdir -p "$REPO_PROJECT_PATH"
fi
# If current symlink exists, stop containers in that directory
if [ -L "$CURRENT_LINK_PATH" ] && [ -d "$CURRENT_LINK_PATH" ]; then
echo "🛑 Stopping containers in current deployment..."
cd "$CURRENT_LINK_PATH"
# Stop containers
if [ -f "docker-compose_web.yml" ]; then
docker compose -f docker-compose_web.yml -p pkmntrade-club down || true
fi
if [ "$PROD" = "false" ] && [ -f "docker-compose_staging.yml" ]; then
docker compose -f docker-compose_staging.yml -p pkmntrade-club down || true
fi
if [ -f "docker-compose_core.yml" ]; then
docker compose -f docker-compose_core.yml -p pkmntrade-club down || true
fi
echo "✅ Containers stopped"
else
echo " No current deployment found (symlink doesn't exist or point to valid directory)"
fi

View file

@ -1,23 +0,0 @@
#!/bin/bash
# Retry function with exponential backoff
# Usage: source retry.sh && retry <command>
retry() {
local max_attempts=3
local delay=5
local attempt=1
until "$@"; do
if [ "$attempt" -ge "$max_attempts" ]; then
echo "Command failed after $max_attempts attempts: $*"
return 1
fi
echo "Command failed (attempt $attempt/$max_attempts): $*"
echo "Retrying in $delay seconds..."
sleep "$delay"
attempt=$((attempt + 1))
delay=$((delay * 2)) # Exponential backoff
done
}

View file

@ -53,6 +53,8 @@ services:
done done
env_file: env_file:
- .env - .env
labels:
- "deployment.core=true"
loba: loba:
image: haproxy:3.1 image: haproxy:3.1
stop_signal: SIGTERM stop_signal: SIGTERM
@ -64,11 +66,14 @@ services:
volumes: volumes:
- ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg - ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg
- ./certs:/certs - ./certs:/certs
labels:
- "deployment.core=true"
feedback: feedback:
restart: always restart: always
image: getfider/fider:stable image: getfider/fider:stable
labels: labels:
- "enable_gatekeeper=true" - "enable_gatekeeper=true"
- "deployment.core=true"
env_file: env_file:
- .env - .env
# cadvisor: # cadvisor:
@ -91,6 +96,8 @@ services:
timeout: 5s timeout: 5s
retries: 5 retries: 5
start_period: 10s start_period: 10s
labels:
- "deployment.core=true"
dockergen-health: dockergen-health:
image: nginxproxy/docker-gen:latest image: nginxproxy/docker-gen:latest
command: -wait 15s -watch /gatus/config.template.yaml /gatus/config.yaml command: -wait 15s -watch /gatus/config.template.yaml /gatus/config.yaml
@ -98,6 +105,8 @@ services:
volumes: volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro
- ./gatus:/gatus - ./gatus:/gatus
labels:
- "deployment.core=true"
dockergen-gatekeeper: dockergen-gatekeeper:
image: nginxproxy/docker-gen:latest image: nginxproxy/docker-gen:latest
command: -wait 15s -watch /gatekeeper/gatekeepers.template.yml /gatekeeper/gatekeepers.yml -notify-sighup pkmntrade-club-gatekeeper-manager-1 command: -wait 15s -watch /gatekeeper/gatekeepers.template.yml /gatekeeper/gatekeepers.yml -notify-sighup pkmntrade-club-gatekeeper-manager-1
@ -105,6 +114,8 @@ services:
volumes: volumes:
- /var/run/docker.sock:/tmp/docker.sock:ro - /var/run/docker.sock:/tmp/docker.sock:ro
- ./:/gatekeeper - ./:/gatekeeper
labels:
- "deployment.core=true"
gatekeeper-manager: gatekeeper-manager:
image: docker:latest image: docker:latest
restart: always restart: always
@ -115,6 +126,8 @@ services:
environment: environment:
- REFRESH_INTERVAL=60 - REFRESH_INTERVAL=60
entrypoint: ["/bin/sh", "-c"] entrypoint: ["/bin/sh", "-c"]
labels:
- "deployment.core=true"
command: command:
- | - |
set -eu -o pipefail set -eu -o pipefail
@ -239,7 +252,7 @@ services:
echo "$(date +'%Y-%m-%d %H:%M:%S') [INFO]: Periodic healthcheck and refresh triggered." echo "$(date +'%Y-%m-%d %H:%M:%S') [INFO]: Periodic healthcheck and refresh triggered."
if [ ! -f "$$COMPOSE_FILE_PATH" ]; then if [ ! -f "$$COMPOSE_FILE_PATH" ]; then
echo "$(date +'%Y-%m-%d %H:%M:%S') [ERROR]: Gatekeepers.yml has not been generated after $$REFRESH_INTERVAL seconds. Please check dockergen-gatekeeper is running correctly. Exiting." echo "$(date +'%Y-%m-%d %H:%M:%S') [ERROR]: gatekeepers.yml has not been generated after $$REFRESH_INTERVAL seconds. Please check dockergen-gatekeeper is running correctly. Exiting."
exit 1 exit 1
fi fi
@ -254,9 +267,21 @@ services:
restart: always restart: always
labels: labels:
- "enable_gatekeeper=true" - "enable_gatekeeper=true"
- "deployment.core=true"
# healthcheck:
# test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
# interval: 10s
# timeout: 5s
# retries: 5
# start_period: 10s
env_file: env_file:
- .env - .env
environment: environment:
- GATUS_DELAY_START_SECONDS=30 - GATUS_DELAY_START_SECONDS=30
volumes: volumes:
- ./gatus:/config - ./gatus:/config
networks:
default:
name: pkmntrade-club_network
external: true

View file

@ -1,32 +0,0 @@
x-common: &common
image: badbl0cks/pkmntrade-club:staging
restart: always
env_file:
- .env
services:
web-staging:
<<: *common
environment:
- DEBUG=False
- DISABLE_SIGNUPS=True
- PUBLIC_HOST=staging.pkmntrade.club
- ALLOWED_HOSTS=staging.pkmntrade.club,127.0.0.1
labels:
- "enable_gatekeeper=true"
deploy:
mode: replicated
replicas: 2
# healthcheck:
# test: ["CMD", "curl", "-f", "http://127.0.0.1:8000/health"]
# interval: 30s
# timeout: 10s
# retries: 3
# start_period: 30s
celery-staging:
<<: *common
environment:
- DEBUG=False
- DISABLE_SIGNUPS=True
- PUBLIC_HOST=staging.pkmntrade.club
- ALLOWED_HOSTS=staging.pkmntrade.club,127.0.0.1
command: ["celery", "-A", "pkmntrade_club.django_project", "worker", "-l", "INFO", "-B", "-E"]

View file

@ -1,4 +1,7 @@
x-common: &common x-common: &common
image: badbl0cks/pkmntrade-club:${IMAGE_TAG:-stable}
#image: ghcr.io/xe/x/httpdebug
#entrypoint: ["/ko-app/httpdebug", "--bind", ":8000"]
restart: always restart: always
env_file: env_file:
- .env - .env
@ -6,31 +9,42 @@ x-common: &common
services: services:
web: web:
<<: *common <<: *common
image: ghcr.io/xe/x/httpdebug environment:
entrypoint: ["/ko-app/httpdebug", "--bind", ":8000"] - DEBUG=False
#image: badbl0cks/pkmntrade-club:stable - DISABLE_SIGNUPS=True
- PUBLIC_HOST=${DOMAIN_NAME}
- ALLOWED_HOSTS=${DOMAIN_NAME},127.0.0.1
- DEPLOYMENT_COLOR=${DEPLOYMENT_COLOR:-blue}
labels:
- "enable_gatekeeper=true"
- "deployment.color=${DEPLOYMENT_COLOR:-blue}"
- "deployment.image_tag=${IMAGE_TAG:-stable}"
deploy:
mode: replicated
replicas: ${REPLICA_COUNT}
healthcheck:
test: ["CMD", "curl", "-f", "http://127.0.0.1:8000/health/"]
interval: 5s
timeout: 3s
retries: 2
start_period: 60s
stop_grace_period: 200s # 20s buffer + 180s workers-kill-timeout
celery:
<<: *common
environment: environment:
- DEBUG=False - DEBUG=False
- DISABLE_SIGNUPS=True - DISABLE_SIGNUPS=True
- PUBLIC_HOST=pkmntrade.club - PUBLIC_HOST=pkmntrade.club
- ALLOWED_HOSTS=pkmntrade.club,127.0.0.1 - ALLOWED_HOSTS=pkmntrade.club,127.0.0.1
- DEPLOYMENT_COLOR=${DEPLOYMENT_COLOR:-blue}
labels: labels:
- "enable_gatekeeper=true" - "deployment.color=${DEPLOYMENT_COLOR:-blue}"
deploy: - "deployment.image_tag=${IMAGE_TAG:-stable}"
mode: replicated command: ["celery", "-A", "pkmntrade_club.django_project", "worker", "-l", "INFO", "-B", "-E"]
replicas: 4 stop_grace_period: 200s # match our longest stop_grace_period (currently web service is 200s)
# healthcheck:
# test: ["CMD", "curl", "-f", "http://127.0.0.1:8000/health"] networks:
# interval: 30s default:
# timeout: 10s name: pkmntrade-club_network
# retries: 3 external: true
# start_period: 30s
# celery:
# <<: *common
# image: badbl0cks/pkmntrade-club:stable
# environment:
# - DEBUG=False
# - DISABLE_SIGNUPS=True
# - PUBLIC_HOST=pkmntrade.club
# - ALLOWED_HOSTS=pkmntrade.club,127.0.0.1
# command: ["celery", "-A", "pkmntrade_club.django_project", "worker", "-l", "INFO", "-B", "-E"]

View file

@ -1,23 +1,52 @@
services: services:
{{ $all_containers := whereLabelValueMatches . "enable_gatekeeper" "true" }} {{ $all_containers := whereLabelValueMatches . "enable_gatekeeper" "true" }}
{{ $all_containers = sortObjectsByKeysAsc $all_containers "Name" }}
{{ range $container := $all_containers }} # During deployment, both blue and green containers might exist
# So we generate gatekeepers for ALL containers with deployment.color label
{{ $color_containers := whereLabelExists $all_containers "deployment.color" }}
{{ $color_containers = sortObjectsByKeysAsc $color_containers "Name" }}
{{ range $container := $color_containers }}
{{ $serviceLabel := index $container.Labels "com.docker.compose.service" }} {{ $serviceLabel := index $container.Labels "com.docker.compose.service" }}
{{ $containerNumber := index $container.Labels "com.docker.compose.container-number" }} {{ $containerNumber := index $container.Labels "com.docker.compose.container-number" }}
{{ $deploymentColor := index $container.Labels "deployment.color" }}
{{ $port := "" }} {{ $port := "" }}
{{ if eq $serviceLabel "web" }} {{ if eq $serviceLabel "web" }}
{{ $port = ":8000" }} {{ $port = ":8000" }}
{{ end }} {{ end }}
{{ if eq $serviceLabel "web-staging" }} gatekeeper-{{ $serviceLabel }}-{{ $deploymentColor }}-{{ $containerNumber }}:
{{ $port = ":8000" }} image: ghcr.io/techarohq/anubis:latest
container_name: pkmntrade-club-gatekeeper-{{ $serviceLabel }}-{{ $deploymentColor }}-{{ $containerNumber }}
env_file:
- .env
environment:
- TARGET=http://{{ $container.Name }}{{ $port }}
- DEPLOYMENT_COLOR={{ $deploymentColor }}
- TARGET_HOST=${DOMAIN_NAME}
labels:
- gatekeeper=true
- deployment.color={{ $deploymentColor }}
networks:
default:
aliases:
- pkmntrade-club-gatekeeper-{{ $serviceLabel }}
- gatekeeper-{{ $serviceLabel }}
{{ end }} {{ end }}
# Always include non-color-specific services
{{ $static_containers := whereLabelValueMatches . "enable_gatekeeper" "true" }}
{{ $static_containers = whereLabelDoesNotExist $static_containers "deployment.color" }}
{{ range $container := $static_containers }}
{{ $serviceLabel := index $container.Labels "com.docker.compose.service" }}
{{ $containerNumber := index $container.Labels "com.docker.compose.container-number" }}
{{ $port := "" }}
{{ if eq $serviceLabel "feedback" }} {{ if eq $serviceLabel "feedback" }}
{{ $port = ":3000" }} {{ $port = ":3000" }}
{{ end }} {{ end }}
{{ if eq $serviceLabel "health" }} {{ if eq $serviceLabel "health" }}
{{ $port = ":8080" }} {{ $port = ":8080" }}
{{ end }} {{ end }}
{{ if or (eq $serviceLabel "feedback") (eq $serviceLabel "health") }}
gatekeeper-{{ $serviceLabel }}-{{ $containerNumber }}: gatekeeper-{{ $serviceLabel }}-{{ $containerNumber }}:
image: ghcr.io/techarohq/anubis:latest image: ghcr.io/techarohq/anubis:latest
container_name: pkmntrade-club-gatekeeper-{{ $serviceLabel }}-{{ $containerNumber }} container_name: pkmntrade-club-gatekeeper-{{ $serviceLabel }}-{{ $containerNumber }}
@ -25,12 +54,6 @@ services:
- .env - .env
environment: environment:
- TARGET=http://{{ $container.Name }}{{ $port }} - TARGET=http://{{ $container.Name }}{{ $port }}
{{ if eq $serviceLabel "web" }}
- TARGET_HOST=pkmntrade.club # pass this host to django, which checks it with ALLOWED_HOSTS
{{ end }}
{{ if eq $serviceLabel "web-staging" }}
- TARGET_HOST=staging.pkmntrade.club # pass this host to django, which checks it with ALLOWED_HOSTS
{{ end }}
labels: labels:
- gatekeeper=true - gatekeeper=true
networks: networks:
@ -39,7 +62,9 @@ services:
- pkmntrade-club-gatekeeper-{{ $serviceLabel }} - pkmntrade-club-gatekeeper-{{ $serviceLabel }}
- gatekeeper-{{ $serviceLabel }} - gatekeeper-{{ $serviceLabel }}
{{ end }} {{ end }}
{{ end }}
networks: networks:
default: default:
name: pkmntrade-club_default name: pkmntrade-club_network
external: true external: true

View file

@ -92,20 +92,15 @@ endpoints:
- type: email - type: email
{{ $all_containers := . }} {{ $all_containers := . }}
{{ $web_containers := list }} {{ $web_containers := list }}
{{ $web_staging_containers := list }}
{{ range $container := $all_containers }} {{ range $container := $all_containers }}
{{ $serviceLabel := index $container.Labels "com.docker.compose.service" }} {{ $serviceLabel := index $container.Labels "com.docker.compose.service" }}
{{ if eq $serviceLabel "web" }} {{ if eq $serviceLabel "web" }}
{{ $web_containers = append $web_containers $container }} {{ $web_containers = append $web_containers $container }}
{{ end }} {{ end }}
{{ if eq $serviceLabel "web-staging" }}
{{ $web_staging_containers = append $web_staging_containers $container }}
{{ end }}
{{ end }} {{ end }}
{{ $web_containers = sortObjectsByKeysAsc $web_containers "Name" }} {{ $web_containers = sortObjectsByKeysAsc $web_containers "Name" }}
{{ $web_staging_containers = sortObjectsByKeysAsc $web_staging_containers "Name" }}
{{ range $container := $web_containers }} {{ range $container := $web_containers }}
{{ $containerNumber := index $container.Labels "com.docker.compose.container-number" }} {{ $containerNumber := index $container.Labels "com.docker.compose.container-number" }}
@ -113,7 +108,7 @@ endpoints:
group: Main group: Main
url: "http://{{ $container.Name }}:8000/health/" url: "http://{{ $container.Name }}:8000/health/"
headers: headers:
Host: "pkmntrade.club" Host: "${DOMAIN_NAME}"
interval: 60s interval: 60s
conditions: conditions:
- "[STATUS] == 200" - "[STATUS] == 200"
@ -122,21 +117,6 @@ endpoints:
- type: email - type: email
{{ end }} {{ end }}
{{ range $container := $web_staging_containers }}
{{ $containerNumber := index $container.Labels "com.docker.compose.container-number" }}
- name: "Web Worker {{ $containerNumber }}"
group: Staging
url: "http://{{ $container.Name }}:8000/health/"
headers:
Host: "staging.pkmntrade.club"
interval: 60s
conditions:
- "[STATUS] == 200"
# - "[BODY] == OK/HEALTHY"
alerts:
- type: email
{{ end }}
alerting: alerting:
email: email:
from: "${GATUS_SMTP_FROM}" from: "${GATUS_SMTP_FROM}"

View file

@ -25,32 +25,27 @@ frontend haproxy_entrypoint
bind :443 ssl crt /certs/crt.pem verify required ca-file /certs/ca.pem bind :443 ssl crt /certs/crt.pem verify required ca-file /certs/ca.pem
use_backend %[req.hdr(host),lower,word(1,:)] # strip out port from host use_backend %[req.hdr(host),lower,word(1,:)] # strip out port from host
frontend checks frontend healthchecks
bind :80 bind :80
default_backend basic_check default_backend basic_loba_check
backend basic_check backend basic_loba_check
http-request return status 200 content-type "text/plain" lf-string "OK/HEALTHY" http-request return status 200 content-type "text/plain" lf-string "OK/HEALTHY"
backend pkmntrade.club backend "${DOMAIN_NAME}"
balance leastconn balance leastconn
http-request set-header Host pkmntrade.club http-request set-header Host "${DOMAIN_NAME}"
server-template gatekeeper-web- 4 gatekeeper-web:8000 check resolvers docker_resolver init-addr libc,none server-template gatekeeper-web- "${REPLICA_COUNT}" gatekeeper-web:8000 check resolvers docker_resolver init-addr none
backend staging.pkmntrade.club backend "feedback.${BASE_DOMAIN_NAME}"
balance leastconn balance leastconn
http-request set-header Host staging.pkmntrade.club http-request set-header Host feedback."${BASE_DOMAIN_NAME}"
server-template gatekeeper-web-staging- 4 gatekeeper-web-staging:8000 check resolvers docker_resolver init-addr libc,none server-template gatekeeper-feedback- 1 gatekeeper-feedback:8000 check resolvers docker_resolver init-addr none
backend feedback.pkmntrade.club backend "health.${BASE_DOMAIN_NAME}"
balance leastconn balance leastconn
http-request set-header Host feedback.pkmntrade.club http-request set-header Host health."${BASE_DOMAIN_NAME}"
server-template gatekeeper-feedback- 4 gatekeeper-feedback:8000 check resolvers docker_resolver init-addr libc,none server-template gatekeeper-health- 1 gatekeeper-health:8000 check resolvers docker_resolver init-addr none
backend health.pkmntrade.club
balance leastconn
http-request set-header Host health.pkmntrade.club
server-template gatekeeper-health- 4 gatekeeper-health:8000 check resolvers docker_resolver init-addr libc,none
#EOF - trailing newline required #EOF - trailing newline required

382
server/scripts/common-lib.sh Executable file
View file

@ -0,0 +1,382 @@
#!/bin/bash
# Common library for deployment scripts
# Source this file in other scripts: source "${SCRIPT_DIR}/common-lib.sh"
# Common constants
readonly BLUE_COLOR="blue"
readonly GREEN_COLOR="green"
readonly CORE_PROJECT_NAME="pkmntrade-club"
readonly DEPLOYMENT_LABEL="deployment.color"
readonly RETRY_MAX_ATTEMPTS="${RETRY_MAX_ATTEMPTS:-5}"
readonly RETRY_DELAY="${RETRY_DELAY:-5}"
# Dry run helper function
# Usage: execute_if_not_dry "description" command [args...]
execute_if_not_dry() {
local description="$1"
shift
if [ "$DRY_RUN" = true ]; then
indent_output echo "[DRY RUN] Would execute: $description"
indent_output echo " Command: $*"
else
"$@"
fi
}
# Execute with error handling
# Usage: execute_or_fail "description" command [args...]
execute_or_fail() {
local description="$1"
shift
if [ "$DRY_RUN" = true ]; then
indent_output echo "[DRY RUN] Would execute: $description"
indent_output echo " Command: $*"
else
if ! "$@"; then
echo "❌ Error: Failed to $description"
exit 1
fi
fi
}
# Execute with warning on failure (non-critical operations)
# Usage: execute_or_warn "description" command [args...]
execute_or_warn() {
local description="$1"
shift
if [ "$DRY_RUN" = true ]; then
indent_output echo "[DRY RUN] Would execute: $description"
indent_output echo " Command: $*"
else
if ! "$@"; then
echo "⚠️ Warning: Failed to $description (continuing anyway)"
fi
fi
}
# Retry a command with exponential backoff
retry() {
local max_attempts=$RETRY_MAX_ATTEMPTS
local delay=$RETRY_DELAY
local attempt=1
local exit_code=0
until "$@"; do
exit_code=$?
if [ "$attempt" -ge "$max_attempts" ]; then
echo "❌ Command failed after $max_attempts attempts: $*" >&2
return $exit_code
fi
echo "⚠️ Attempt $attempt failed, retrying in ${delay}s..." >&2
sleep "$delay"
# Exponential backoff
delay=$((delay * 2))
attempt=$((attempt + 1))
done
if [ $attempt -gt 1 ]; then
echo "✅ Command succeeded after $attempt attempts"
fi
return 0
}
run_on_target() {
# if DEPLOY_HOST is set, we are not on remote
if [[ -n "${DEPLOY_HOST}" ]]; then
ssh deploy "$*"
else
bash -c -- "$*"
fi
}
# Function to check if a variable is set
require_var() {
local var_name=$1
local var_value=${!var_name}
if [ -z "$var_value" ]; then
echo "Error: ${var_name} not set" >&2
exit 1
fi
}
# Function to get deployment color based on running containers
get_current_color() {
local blue_count=$(docker ps --filter "label=${DEPLOYMENT_LABEL}=${BLUE_COLOR}" -q 2>/dev/null | wc -l)
local green_count=$(docker ps --filter "label=${DEPLOYMENT_LABEL}=${GREEN_COLOR}" -q 2>/dev/null | wc -l)
if [ "$blue_count" -gt 0 ] && [ "$green_count" -eq 0 ]; then
echo "$BLUE_COLOR"
elif [ "$green_count" -gt 0 ] && [ "$blue_count" -eq 0 ]; then
echo "$GREEN_COLOR"
elif [ "$blue_count" -gt 0 ] && [ "$green_count" -gt 0 ]; then
# Both colors running - return the newer one
local blue_newest=$(docker inspect --format='{{.Created}}' "$(docker ps -q --filter "label=${DEPLOYMENT_LABEL}=${BLUE_COLOR}" | head -1)" 2>/dev/null || echo '1970-01-01')
local green_newest=$(docker inspect --format='{{.Created}}' "$(docker ps -q --filter "label=${DEPLOYMENT_LABEL}=${GREEN_COLOR}" | head -1)" 2>/dev/null || echo '1970-01-01')
if [[ "$blue_newest" > "$green_newest" ]]; then
echo "$BLUE_COLOR"
else
echo "$GREEN_COLOR"
fi
else
echo "none"
fi
}
# Function to get deployment state (none, blue, green, both)
get_deployment_state() {
local blue_count=$(docker ps --filter "label=${DEPLOYMENT_LABEL}=${BLUE_COLOR}" -q 2>/dev/null | wc -l)
local green_count=$(docker ps --filter "label=${DEPLOYMENT_LABEL}=${GREEN_COLOR}" -q 2>/dev/null | wc -l)
if [ "$blue_count" -gt 0 ] && [ "$green_count" -gt 0 ]; then
echo "both"
elif [ "$blue_count" -gt 0 ]; then
echo "$BLUE_COLOR"
elif [ "$green_count" -gt 0 ]; then
echo "$GREEN_COLOR"
else
echo "none"
fi
}
# Function to check if deployment is in progress
is_deployment_in_progress() {
local blue_count=$(docker ps --filter "label=${DEPLOYMENT_LABEL}=${BLUE_COLOR}" -q 2>/dev/null | wc -l)
local green_count=$(docker ps --filter "label=${DEPLOYMENT_LABEL}=${GREEN_COLOR}" -q 2>/dev/null | wc -l)
if [ "$blue_count" -gt 0 ] && [ "$green_count" -gt 0 ]; then
return 0 # true - deployment in progress
else
return 1 # false - no deployment in progress
fi
}
# Function to switch color
switch_color() {
local current=$1
if [ "$current" = "$BLUE_COLOR" ]; then
echo "$GREEN_COLOR"
else
echo "$BLUE_COLOR"
fi
}
# Function to get project name for a color
get_project_name() {
local color=$1
echo "${CORE_PROJECT_NAME}-${color}"
}
# Function to get compose files based on PROD setting
get_compose_files() {
# Always use the same docker-compose file for both staging and production
echo "-f docker-compose_web.yml"
}
# Function to refresh gatekeepers
refresh_gatekeepers() {
echo "🔄 Refreshing gatekeepers..."
docker kill -s SIGHUP ${CORE_PROJECT_NAME}-gatekeeper-manager-1 2>/dev/null || true
}
# Function to count containers by filter
count_containers() {
local filters=$1
docker ps ${filters} -q 2>/dev/null | wc -l | tr -d '\n' || echo 0
}
get_previous_release_path() {
local current_link_path=$1
local previous_release_path=$(run_on_target "cat '${current_link_path}/.previous_version'")
echo "${previous_release_path}"
}
# Function to stop and remove the previous release's containers for a color
cleanup_color_containers() {
local color=$1
local project_name=$(get_project_name "$color")
# Use CLEANUP_RELEASE_PATH if set, otherwise default to the previous release.
# This is crucial for rollbacks to use the correct compose file for cleanup.
local release_path=${CLEANUP_RELEASE_PATH:-$(get_previous_release_path "${CURRENT_LINK_PATH}")}
echo "🛑 Stopping $color containers from release: ${release_path}"
run_on_target "cd '${release_path}' && docker compose -p '${project_name}' stop --timeout 30 2>/dev/null || true"
echo "🗑️ Removing $color containers from release: ${release_path}"
run_on_target "cd '${release_path}' && docker compose -p '${project_name}' down --remove-orphans 2>/dev/null || true"
}
# Function to wait with countdown
wait_with_countdown() {
local seconds=$1
local message=$2
echo -n "$message"
for ((i=seconds; i>0; i--)); do
echo -n " $i"
sleep 1
done
echo " done!"
}
get_web_service_name() {
echo "web" # hardcoded for now
}
# Standard environment validation
validate_deployment_env() {
require_var "REPO_PROJECT_PATH"
require_var "PROD"
require_var "REPLICA_COUNT"
if [ "$PROD" = "true" ]; then
require_var "PRODUCTION_DOMAIN"
else
require_var "STAGING_DOMAIN"
fi
# Set derived variables
export CURRENT_LINK_PATH="${REPO_PROJECT_PATH}/current"
export RELEASES_PATH="${REPO_PROJECT_PATH}/releases"
export REPLICA_COUNT="${REPLICA_COUNT}"
}
get_health_check_status() {
# TODO: instead get the health check status from gatus container's api
local statuses=$(docker ps --format '{{.Names}} {{.Status}}')
local unhealthy_count=0
local IFS=$'\n'
for status in $statuses; do
local name=$(echo $status | cut -d' ' -f1)
local status=$(echo $status | cut -d' ' -f2-)
if [[ "$status" == *"unhealthy"* ]]; then
unhealthy_count=$((unhealthy_count + 1))
echo "❌ Unhealthy: $name [$status]"
else
echo "✅ Healthy: $name [$status]"
fi
done
return $unhealthy_count
}
# Function to wait for containers to be healthy
wait_for_healthy_containers() {
local project_name=$1
local service_name=$2
local expected_count=$3
local max_attempts=60 # 5 minutes with 5-second intervals
local attempt=0
echo "⏳ Waiting for $service_name containers to be healthy..."
while [ $attempt -lt $max_attempts ]; do
healthy_count=$(count_containers "--filter label=com.docker.compose.project=${project_name} --filter label=com.docker.compose.service=${service_name} --filter health=healthy")
if [[ "$healthy_count" -eq "$expected_count" ]]; then
echo "✅ All $service_name containers are healthy ($healthy_count/$expected_count)"
return 0
fi
echo "⏳ Healthy containers: $healthy_count/$expected_count (attempt $((attempt+1))/$max_attempts)"
sleep 5
attempt=$((attempt + 1))
done
echo "❌ Timeout waiting for $service_name containers to be healthy"
return 1
}
list_releases() {
local REPO_PROJECT_PATH=$1
local RELEASES_PATH="${REPO_PROJECT_PATH}/releases"
local CURRENT_LINK_PATH="${REPO_PROJECT_PATH}/current"
echo "📋 Available releases:"
if [ -d "$RELEASES_PATH" ]; then
for release in $(ls -dt ${RELEASES_PATH}/*); do
version=$(basename "$release")
status=""
# Check if it's current
if [ -L "$CURRENT_LINK_PATH" ] && [ "$(readlink -f "$CURRENT_LINK_PATH")" = "$(realpath "$release")" ]; then
status=" [CURRENT]"
fi
# Check if it failed
if [ -f "${release}/.failed" ]; then
status="${status} [FAILED]"
fi
indent_output echo "- ${version}${status}"
done
else
indent_output echo "No releases found"
fi
}
# Function to get image tag from deployment
get_deployment_image_tag() {
local color=$1
local container=$(docker ps --filter "label=com.docker.compose.project=${CORE_PROJECT_NAME}-${color}" --format '{{.Names}}'| head -1)
if [ -n "$container" ]; then
docker inspect "${container}" --format '{{index .Config.Labels "deployment.image_tag"}}'
else
echo "unknown"
fi
}
# Function to run a command and prefix its output
# Usage: prefix_output "PREFIX" command [args...]
# Example: prefix_output " | " docker ps
# Example: prefix_output " => " docker compose ps
prefix_output() {
local prefix=" "
if [ $# -lt 2 ]; then
echo "Error: prefix_output requires at least 2 arguments" >&2
return 1
fi
prefix="$1"
shift
# Run the command and prefix each line of output
"$@" 2>&1 | sed "s/^/${prefix}/"
# Return the exit code of the original command (not sed)
return ${PIPESTATUS[0]}
}
# Function to run a command and indent its output
# Usage: indent_output [INDENT_STRING] command [args...]
# Example: indent_output docker ps # Uses default 2 spaces
# Example: indent_output " " docker ps # Uses 4 spaces
indent_output() {
local indent=" " # Default to 2 spaces
# Check if first argument looks like an indent string (starts with spaces or tabs)
if [[ "$1" =~ ^[[:space:]]+$ ]]; then
indent="$1"
shift
fi
# Use prefix_output with the indent string
prefix_output "$indent" "$@"
}
# Function to run command with header and indented output
# Usage: run_with_header "HEADER" command [args...]
# Example: run_with_header "Docker Containers:" docker ps
run_with_header() {
local header="$1"
shift
echo "$header"
indent_output " " "$@"
}

377
server/scripts/manage.sh Executable file
View file

@ -0,0 +1,377 @@
#!/bin/bash
set -euo pipefail
# Manage deployment releases
# Usage: ./manage.sh [--dry-run] COMMAND [ARGS]
# Source common functions
SCRIPT_DIR="$(cd "$(dirname "$(realpath "${BASH_SOURCE[0]}")")" && pwd)"
source "${SCRIPT_DIR}/common-lib.sh"
# Global variables
DRY_RUN=false
COMMAND=""
ARGS=()
# Parse global options
while [[ $# -gt 0 ]]; do
case "$1" in
--dry-run)
DRY_RUN=true
shift
;;
-*)
echo "Error: Unknown option: $1"
echo "Usage: $0 [--dry-run] COMMAND [ARGS]"
exit 1
;;
*)
# First non-option argument is the command
if [ -z "$COMMAND" ]; then
COMMAND="$1"
else
# Rest are command arguments
ARGS+=("$1")
fi
shift
;;
esac
done
if [ -z "$COMMAND" ]; then
echo "Error: No command specified"
echo "Usage: $0 [--dry-run] COMMAND [ARGS]"
echo "Commands:"
indent_output echo "status - Show deployment status"
indent_output echo "list - List all releases"
indent_output echo "version - Show current release"
indent_output echo "switch VERSION - Switch to a specific release version"
indent_output echo "cleanup [KEEP] - Clean up old releases (default: keep 5)"
echo ""
echo "Global options:"
indent_output echo "--dry-run - Show what would happen without making changes"
exit 1
fi
REPO_PROJECT_PATH="$(realpath "${SCRIPT_DIR}/../../../")"
CURRENT_LINK_PATH="${REPO_PROJECT_PATH}/current"
RELEASES_PATH="${REPO_PROJECT_PATH}/releases"
# Announce dry-run mode if active
if [ "$DRY_RUN" = true ]; then
echo "🔍 DRY RUN MODE - No changes will be made"
echo ""
fi
case "$COMMAND" in
status)
echo "🔍 Deployment Status"
echo "===================="
# Check if deployment is downgraded
if [ -d "$RELEASES_PATH" ] && [ -L "$CURRENT_LINK_PATH" ]; then
CURRENT_RELEASE_DIR_NAME=$(basename "$(readlink -f "$CURRENT_LINK_PATH")")
# Find the latest release by modification time.
LATEST_RELEASE_DIR_NAME=$(find "$RELEASES_PATH" -maxdepth 1 -mindepth 1 -type d -printf '%T@ %f\n' | sort -nr | head -n 1 | cut -d' ' -f2-)
if [ -n "$LATEST_RELEASE_DIR_NAME" ]; then
if [ "$CURRENT_RELEASE_DIR_NAME" == "$LATEST_RELEASE_DIR_NAME" ]; then
echo "✅ Deployment is on the latest release (${LATEST_RELEASE_DIR_NAME})."
else
echo "⚠️ Deployment is downgraded."
indent_output echo "Current: ${CURRENT_RELEASE_DIR_NAME}"
indent_output echo "Latest: ${LATEST_RELEASE_DIR_NAME}"
fi
else
# This case happens if RELEASES_PATH is empty
echo " No releases found in ${RELEASES_PATH}."
fi
elif [ ! -L "$CURRENT_LINK_PATH" ]; then
echo " No current deployment symlink found."
else # RELEASES_PATH does not exist
echo " Releases directory not found at ${RELEASES_PATH}."
fi
echo "" # Add a newline for spacing
# Get current state
CURRENT_STATE=$(get_deployment_state)
if [ "$CURRENT_STATE" = "both" ]; then
echo "🟡 Deployment State: both"
elif [ "$CURRENT_STATE" = "blue" ]; then
echo "🔵 Deployment State: blue"
elif [ "$CURRENT_STATE" = "green" ]; then
echo "🟢 Deployment State: green"
else
indent_output echo "Deployment State: none"
fi
echo "⚙️ Core Containers:"
indent_output docker ps --filter 'label=deployment.core=true' --format 'table {{.Names}}\t{{.Status}}\t{{.CreatedAt}}'
# Show containers by color with image info
echo "🔵 Blue Containers:"
BLUE_COUNT=$(count_containers "--filter label=deployment.color=blue")
# make sure BLUE_COUNT is a number
BLUE_COUNT=$(echo "$BLUE_COUNT" | tr -d '\n')
if [ "$BLUE_COUNT" -gt 0 ]; then
BLUE_IMAGE=$(get_deployment_image_tag "blue")
indent_output echo "Image: ${BLUE_IMAGE}"
indent_output docker ps --filter 'label=deployment.color=blue' --format 'table {{.Names}}\t{{.Status}}\t{{.CreatedAt}}'
else
indent_output echo "No blue containers running"
fi
echo "🟢 Green Containers:"
GREEN_COUNT=$(count_containers "--filter label=deployment.color=green")
if [ "$GREEN_COUNT" -gt 0 ]; then
GREEN_IMAGE=$(get_deployment_image_tag "green")
indent_output echo "Image: ${GREEN_IMAGE}"
indent_output docker ps --filter 'label=deployment.color=green' --format 'table {{.Names}}\t{{.Status}}\t{{.CreatedAt}}'
else
indent_output echo "No green containers running"
fi
list_releases "${REPO_PROJECT_PATH}"
# Health check summary
echo "❤️ Health Check Summary:"
case "$CURRENT_STATE" in
"both")
indent_output echo "⚠️ WARNING: Both blue and green containers are running!"
indent_output echo "This might indicate an incomplete deployment."
;;
"none")
indent_output echo "⚠️ WARNING: No web containers are running!"
;;
*)
if [ "$CURRENT_STATE" = "blue" ]; then
indent_output echo "🔵 System is running on blue deployment"
else
indent_output echo "🟢 System is running on green deployment"
fi
indent_output echo "❤️ Overall Healthcheck:"
indent_output " " get_health_check_status
;;
esac
# Show resource usage
echo "📊 Resource Usage:"
indent_output docker stats --no-stream --format 'table {{.Name}}\t{{.CPUPerc}}\t{{.MemPerc}}\t{{.MemUsage}}\t{{.NetIO}}'
# Show deployment images
echo "📦 Deployment Images:"
indent_output docker images 'badbl0cks/pkmntrade-club' --format 'table {{.Tag}}\t{{.ID}}\t{{.CreatedAt}}\t{{.Size}}' | grep -E '^TAG|sha-.{7} ' || indent_output echo "No deployment images found"
;;
list)
list_releases "${REPO_PROJECT_PATH}"
;;
version)
if [ -L "$CURRENT_LINK_PATH" ]; then
current_version=$(basename "$(readlink -f "$CURRENT_LINK_PATH")")
echo "📌 Current version: ${current_version}"
else
echo "❌ No current deployment found"
fi
;;
switch)
if [ ${#ARGS[@]} -lt 1 ]; then
echo "Error: VERSION required for switch"
echo "Usage: $0 [--dry-run] switch VERSION"
exit 1
fi
TARGET_VERSION="${ARGS[0]}"
TARGET_PATH="${RELEASES_PATH}/${TARGET_VERSION}"
# Validate target version exists
if [ ! -d "$TARGET_PATH" ]; then
echo "❌ Error: Version ${TARGET_VERSION} not found"
echo "Available releases:"
list_releases "${REPO_PROJECT_PATH}"
exit 1
fi
# Get current version if exists
CURRENT_VERSION="none"
CURRENT_VERSION_PATH=""
if [ -L "$CURRENT_LINK_PATH" ]; then
CURRENT_VERSION_PATH=$(readlink -f "$CURRENT_LINK_PATH")
CURRENT_VERSION=$(basename "$CURRENT_VERSION_PATH")
fi
# Edge case: trying to switch to the same version
if [ "$CURRENT_VERSION" == "$TARGET_VERSION" ]; then
echo "✅ Already on version ${TARGET_VERSION}. No action taken."
exit 0
fi
CURRENT_COLOR=$(get_current_color)
NEW_COLOR=$(switch_color "$CURRENT_COLOR")
echo "🔄 Switch Plan:"
indent_output echo "Current version: ${CURRENT_VERSION}"
if [ "$CURRENT_VERSION" != "none" ]; then
indent_output echo "Current path: ${CURRENT_VERSION_PATH}"
indent_output echo "Current color: ${CURRENT_COLOR}"
fi
indent_output echo "Target version: ${TARGET_VERSION}"
indent_output echo "Target path: ${TARGET_PATH}"
indent_output echo "Target color: ${NEW_COLOR}"
# Verify target release has necessary files
echo "📋 Checking target release integrity..."
MISSING_FILES=()
for file in "docker-compose_web.yml" "docker-compose_core.yml" ".env"; do
if [ ! -f "${TARGET_PATH}/${file}" ]; then
MISSING_FILES+=("$file")
fi
done
if [ ${#MISSING_FILES[@]} -gt 0 ]; then
echo "❌ Error: Target release is missing required files:"
printf " - %s\n" "${MISSING_FILES[@]}"
exit 1
fi
# Get compose files based on environment
COMPOSE_FILES=$(get_compose_files)
echo "🛑 Stopping current containers..."
if [ -d "$CURRENT_VERSION_PATH" ]; then
(
cd "$CURRENT_VERSION_PATH" || exit 1
WEB_PROJECT_NAME=$(get_project_name "$CURRENT_COLOR")
indent_output echo "Stopping web containers for project: ${WEB_PROJECT_NAME}..."
execute_or_warn "stop web containers" docker compose ${COMPOSE_FILES} -p "${WEB_PROJECT_NAME}" down
indent_output echo "Stopping core services for project: ${CORE_PROJECT_NAME}..."
execute_or_warn "stop core services" docker compose -f "docker-compose_core.yml" -p "${CORE_PROJECT_NAME}" down
)
else
indent_output echo "No current deployment to stop"
fi
echo "📝 Updating deployment metadata for ${TARGET_VERSION}..."
execute_or_fail "update .deployment_color to ${NEW_COLOR}" \
bash -c "echo '${NEW_COLOR}' > '${TARGET_PATH}/.deployment_color'"
execute_or_fail "update DEPLOYMENT_COLOR in .env" \
bash -c "sed -i 's/^DEPLOYMENT_COLOR=.*/DEPLOYMENT_COLOR=${NEW_COLOR}/' '${TARGET_PATH}/.env'"
# Update symlink
echo "🔗 Updating deployment symlink..."
execute_or_fail "update symlink from $CURRENT_LINK_PATH to $TARGET_PATH" \
ln -sfn "$TARGET_PATH" "$CURRENT_LINK_PATH"
# Start containers
echo "🚀 Starting containers from ${TARGET_VERSION}..."
(
cd "$TARGET_PATH" || exit 1
TARGET_WEB_PROJECT_NAME=$(get_project_name "$NEW_COLOR")
indent_output echo "Starting core services for project: ${CORE_PROJECT_NAME}..."
execute_or_fail "start core services" \
docker compose -f "docker-compose_core.yml" -p "${CORE_PROJECT_NAME}" up -d
indent_output echo "Starting web containers for project: ${TARGET_WEB_PROJECT_NAME}..."
execute_or_fail "start web containers" \
docker compose ${COMPOSE_FILES} -p "${TARGET_WEB_PROJECT_NAME}" up -d
)
if [ "$DRY_RUN" = true ]; then
echo ""
echo "✅ Dry run completed - no changes made"
else
echo "✅ Switch completed to version: ${TARGET_VERSION}"
echo "Run '$0 status' to verify deployment health"
fi
;;
cleanup)
# Parse cleanup arguments
KEEP_COUNT=5
for arg in "${ARGS[@]}"; do
if [[ "$arg" =~ ^[0-9]+$ ]]; then
KEEP_COUNT="$arg"
else
echo "Error: Invalid argument for cleanup: $arg"
echo "Usage: $0 [--dry-run] cleanup [KEEP_COUNT]"
exit 1
fi
done
echo "🗑️ Cleaning up old releases (keeping last ${KEEP_COUNT} and current)"
if [ ! -L "$CURRENT_LINK_PATH" ]; then
echo "❌ No current deployment symlink found. Aborting cleanup."
exit 1
fi
CURRENT_RELEASE_DIR_NAME=$(basename "$(readlink -f "$CURRENT_LINK_PATH")")
echo "📌 Current release: ${CURRENT_RELEASE_DIR_NAME}"
if [ -d "$RELEASES_PATH" ]; then
cd "$RELEASES_PATH"
# Get a list of inactive release directories, sorted by modification time (newest first).
INACTIVE_RELEASES=$(find . -maxdepth 1 -mindepth 1 -type d \
-not -name "$CURRENT_RELEASE_DIR_NAME" \
-printf '%T@ %f\n' | sort -nr | cut -d' ' -f2-)
if [ -z "$INACTIVE_RELEASES" ]; then
echo "No inactive releases found to clean up."
exit 0
fi
# Count total inactive releases
TOTAL_INACTIVE=$(echo "$INACTIVE_RELEASES" | wc -l | xargs)
echo "📊 Found ${TOTAL_INACTIVE} inactive release(s)"
# Identify releases to delete by skipping the KEEP_COUNT newest ones.
RELEASES_TO_DELETE=$(echo "$INACTIVE_RELEASES" | tail -n +$((KEEP_COUNT + 1)))
if [ -n "$RELEASES_TO_DELETE" ]; then
DELETE_COUNT=$(echo "$RELEASES_TO_DELETE" | wc -l | xargs)
echo "🗑️ The following ${DELETE_COUNT} old release(s) will be deleted:"
# Show releases with their sizes
while IFS= read -r release; do
if [ -d "$release" ]; then
SIZE=$(du -sh "$release" 2>/dev/null | cut -f1)
indent_output echo "- $release (Size: $SIZE)"
fi
done <<< "$RELEASES_TO_DELETE"
# Delete the releases
echo ""
while IFS= read -r release; do
execute_if_not_dry "delete release $release" rm -rf "$release"
done <<< "$RELEASES_TO_DELETE"
if [ "$DRY_RUN" = true ]; then
echo ""
echo "✅ Dry run completed - no releases were deleted"
else
echo "✅ Cleanup completed - deleted ${DELETE_COUNT} release(s)"
fi
else
KEPT_COUNT=$(echo "$INACTIVE_RELEASES" | wc -l | tr -d '\n')
echo "No old releases to delete. Found ${KEPT_COUNT} inactive release(s), which is within the retention count of ${KEEP_COUNT}."
fi
else
echo "No releases directory found"
fi
;;
*)
echo "Error: Unknown command: $COMMAND"
exit 1
;;
esac

86
uv.lock generated
View file

@ -34,7 +34,7 @@ wheels = [
[[package]] [[package]]
name = "celery" name = "celery"
version = "5.5.2" version = "5.5.3"
source = { registry = "https://pypi.org/simple" } source = { registry = "https://pypi.org/simple" }
dependencies = [ dependencies = [
{ name = "billiard" }, { name = "billiard" },
@ -46,9 +46,9 @@ dependencies = [
{ name = "python-dateutil" }, { name = "python-dateutil" },
{ name = "vine" }, { name = "vine" },
] ]
sdist = { url = "https://files.pythonhosted.org/packages/bf/03/5d9c6c449248958f1a5870e633a29d7419ff3724c452a98ffd22688a1a6a/celery-5.5.2.tar.gz", hash = "sha256:4d6930f354f9d29295425d7a37261245c74a32807c45d764bedc286afd0e724e", size = 1666892 } sdist = { url = "https://files.pythonhosted.org/packages/bb/7d/6c289f407d219ba36d8b384b42489ebdd0c84ce9c413875a8aae0c85f35b/celery-5.5.3.tar.gz", hash = "sha256:6c972ae7968c2b5281227f01c3a3f984037d21c5129d07bf3550cc2afc6b10a5", size = 1667144 }
wheels = [ wheels = [
{ url = "https://files.pythonhosted.org/packages/04/94/8e825ac1cf59d45d20c4345d4461e6b5263ae475f708d047c3dad0ac6401/celery-5.5.2-py3-none-any.whl", hash = "sha256:54425a067afdc88b57cd8d94ed4af2ffaf13ab8c7680041ac2c4ac44357bdf4c", size = 438626 }, { url = "https://files.pythonhosted.org/packages/c9/af/0dcccc7fdcdf170f9a1585e5e96b6fb0ba1749ef6be8c89a6202284759bd/celery-5.5.3-py3-none-any.whl", hash = "sha256:0b5761a07057acee94694464ca482416b959568904c9dfa41ce8413a7d65d525", size = 438775 },
] ]
[[package]] [[package]]
@ -315,14 +315,14 @@ wheels = [
[[package]] [[package]]
name = "django-health-check" name = "django-health-check"
version = "3.18.3" version = "3.19.0"
source = { registry = "https://pypi.org/simple" } source = { registry = "https://pypi.org/simple" }
dependencies = [ dependencies = [
{ name = "django" }, { name = "django" },
] ]
sdist = { url = "https://files.pythonhosted.org/packages/66/e9/0699ea3debfda75e5960ff99f56974136380e6f8202d453de7357e1f67fc/django_health_check-3.18.3.tar.gz", hash = "sha256:18b75daca4551c69a43f804f9e41e23f5f5fb9efd06cf6a313b3d5031bb87bd0", size = 20919 } sdist = { url = "https://files.pythonhosted.org/packages/c0/96/60db7257c05418b60ceb9d2c0a568e923394582111e809f1bb3749a7ee60/django_health_check-3.19.0.tar.gz", hash = "sha256:1a995ed4fa08a776beedff65f8f1ec0c22fb6764493f33fb1307fe4c6f23b8c3", size = 20088 }
wheels = [ wheels = [
{ url = "https://files.pythonhosted.org/packages/e2/1e/3b23b580762cca7456427731de9b90718d15eec02ebe096437469d767dfe/django_health_check-3.18.3-py2.py3-none-any.whl", hash = "sha256:f5f58762b80bdf7b12fad724761993d6e83540f97e2c95c42978f187e452fa07", size = 30331 }, { url = "https://files.pythonhosted.org/packages/8f/35/c08be7e0012a7927c5f01185c0df39e0fa249cfc17234cce798c2afaf6bb/django_health_check-3.19.0-py3-none-any.whl", hash = "sha256:30b58d761f40fef47971b8dc145df15bdb71339108034860bbf1d505387aa1ec", size = 31969 },
] ]
[[package]] [[package]]
@ -468,36 +468,35 @@ wheels = [
[[package]] [[package]]
name = "greenlet" name = "greenlet"
version = "3.2.2" version = "3.2.3"
source = { registry = "https://pypi.org/simple" } source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/34/c1/a82edae11d46c0d83481aacaa1e578fea21d94a1ef400afd734d47ad95ad/greenlet-3.2.2.tar.gz", hash = "sha256:ad053d34421a2debba45aa3cc39acf454acbcd025b3fc1a9f8a0dee237abd485", size = 185797 } sdist = { url = "https://files.pythonhosted.org/packages/c9/92/bb85bd6e80148a4d2e0c59f7c0c2891029f8fd510183afc7d8d2feeed9b6/greenlet-3.2.3.tar.gz", hash = "sha256:8b0dd8ae4c0d6f5e54ee55ba935eeb3d735a9b58a8a1e5b5cbab64e01a39f365", size = 185752 }
wheels = [ wheels = [
{ url = "https://files.pythonhosted.org/packages/2c/a1/88fdc6ce0df6ad361a30ed78d24c86ea32acb2b563f33e39e927b1da9ea0/greenlet-3.2.2-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:df4d1509efd4977e6a844ac96d8be0b9e5aa5d5c77aa27ca9f4d3f92d3fcf330", size = 270413 }, { url = "https://files.pythonhosted.org/packages/f3/94/ad0d435f7c48debe960c53b8f60fb41c2026b1d0fa4a99a1cb17c3461e09/greenlet-3.2.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:25ad29caed5783d4bd7a85c9251c651696164622494c00802a139c00d639242d", size = 271992 },
{ url = "https://files.pythonhosted.org/packages/a6/2e/6c1caffd65490c68cd9bcec8cb7feb8ac7b27d38ba1fea121fdc1f2331dc/greenlet-3.2.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da956d534a6d1b9841f95ad0f18ace637668f680b1339ca4dcfb2c1837880a0b", size = 637242 }, { url = "https://files.pythonhosted.org/packages/93/5d/7c27cf4d003d6e77749d299c7c8f5fd50b4f251647b5c2e97e1f20da0ab5/greenlet-3.2.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:88cd97bf37fe24a6710ec6a3a7799f3f81d9cd33317dcf565ff9950c83f55e0b", size = 638820 },
{ url = "https://files.pythonhosted.org/packages/98/28/088af2cedf8823b6b7ab029a5626302af4ca1037cf8b998bed3a8d3cb9e2/greenlet-3.2.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9c7b15fb9b88d9ee07e076f5a683027bc3befd5bb5d25954bb633c385d8b737e", size = 651444 }, { url = "https://files.pythonhosted.org/packages/c6/7e/807e1e9be07a125bb4c169144937910bf59b9d2f6d931578e57f0bce0ae2/greenlet-3.2.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:baeedccca94880d2f5666b4fa16fc20ef50ba1ee353ee2d7092b383a243b0b0d", size = 653046 },
{ url = "https://files.pythonhosted.org/packages/4a/9f/0116ab876bb0bc7a81eadc21c3f02cd6100dcd25a1cf2a085a130a63a26a/greenlet-3.2.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:752f0e79785e11180ebd2e726c8a88109ded3e2301d40abced2543aa5d164275", size = 646067 }, { url = "https://files.pythonhosted.org/packages/9d/ab/158c1a4ea1068bdbc78dba5a3de57e4c7aeb4e7fa034320ea94c688bfb61/greenlet-3.2.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:be52af4b6292baecfa0f397f3edb3c6092ce071b499dd6fe292c9ac9f2c8f264", size = 647701 },
{ url = "https://files.pythonhosted.org/packages/35/17/bb8f9c9580e28a94a9575da847c257953d5eb6e39ca888239183320c1c28/greenlet-3.2.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ae572c996ae4b5e122331e12bbb971ea49c08cc7c232d1bd43150800a2d6c65", size = 648153 }, { url = "https://files.pythonhosted.org/packages/cc/0d/93729068259b550d6a0288da4ff72b86ed05626eaf1eb7c0d3466a2571de/greenlet-3.2.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0cc73378150b8b78b0c9fe2ce56e166695e67478550769536a6742dca3651688", size = 649747 },
{ url = "https://files.pythonhosted.org/packages/2c/ee/7f31b6f7021b8df6f7203b53b9cc741b939a2591dcc6d899d8042fcf66f2/greenlet-3.2.2-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:02f5972ff02c9cf615357c17ab713737cccfd0eaf69b951084a9fd43f39833d3", size = 603865 }, { url = "https://files.pythonhosted.org/packages/f6/f6/c82ac1851c60851302d8581680573245c8fc300253fc1ff741ae74a6c24d/greenlet-3.2.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:706d016a03e78df129f68c4c9b4c4f963f7d73534e48a24f5f5a7101ed13dbbb", size = 605461 },
{ url = "https://files.pythonhosted.org/packages/b5/2d/759fa59323b521c6f223276a4fc3d3719475dc9ae4c44c2fe7fc750f8de0/greenlet-3.2.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:4fefc7aa68b34b9224490dfda2e70ccf2131368493add64b4ef2d372955c207e", size = 1119575 }, { url = "https://files.pythonhosted.org/packages/98/82/d022cf25ca39cf1200650fc58c52af32c90f80479c25d1cbf57980ec3065/greenlet-3.2.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:419e60f80709510c343c57b4bb5a339d8767bf9aef9b8ce43f4f143240f88b7c", size = 1121190 },
{ url = "https://files.pythonhosted.org/packages/30/05/356813470060bce0e81c3df63ab8cd1967c1ff6f5189760c1a4734d405ba/greenlet-3.2.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a31ead8411a027c2c4759113cf2bd473690517494f3d6e4bf67064589afcd3c5", size = 1147460 }, { url = "https://files.pythonhosted.org/packages/f5/e1/25297f70717abe8104c20ecf7af0a5b82d2f5a980eb1ac79f65654799f9f/greenlet-3.2.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:93d48533fade144203816783373f27a97e4193177ebaaf0fc396db19e5d61163", size = 1149055 },
{ url = "https://files.pythonhosted.org/packages/07/f4/b2a26a309a04fb844c7406a4501331b9400e1dd7dd64d3450472fd47d2e1/greenlet-3.2.2-cp312-cp312-win_amd64.whl", hash = "sha256:b24c7844c0a0afc3ccbeb0b807adeefb7eff2b5599229ecedddcfeb0ef333bec", size = 296239 }, { url = "https://files.pythonhosted.org/packages/1f/8f/8f9e56c5e82eb2c26e8cde787962e66494312dc8cb261c460e1f3a9c88bc/greenlet-3.2.3-cp312-cp312-win_amd64.whl", hash = "sha256:7454d37c740bb27bdeddfc3f358f26956a07d5220818ceb467a483197d84f849", size = 297817 },
{ url = "https://files.pythonhosted.org/packages/89/30/97b49779fff8601af20972a62cc4af0c497c1504dfbb3e93be218e093f21/greenlet-3.2.2-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:3ab7194ee290302ca15449f601036007873028712e92ca15fc76597a0aeb4c59", size = 269150 }, { url = "https://files.pythonhosted.org/packages/b1/cf/f5c0b23309070ae93de75c90d29300751a5aacefc0a3ed1b1d8edb28f08b/greenlet-3.2.3-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:500b8689aa9dd1ab26872a34084503aeddefcb438e2e7317b89b11eaea1901ad", size = 270732 },
{ url = "https://files.pythonhosted.org/packages/21/30/877245def4220f684bc2e01df1c2e782c164e84b32e07373992f14a2d107/greenlet-3.2.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2dc5c43bb65ec3669452af0ab10729e8fdc17f87a1f2ad7ec65d4aaaefabf6bf", size = 637381 }, { url = "https://files.pythonhosted.org/packages/48/ae/91a957ba60482d3fecf9be49bc3948f341d706b52ddb9d83a70d42abd498/greenlet-3.2.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:a07d3472c2a93117af3b0136f246b2833fdc0b542d4a9799ae5f41c28323faef", size = 639033 },
{ url = "https://files.pythonhosted.org/packages/8e/16/adf937908e1f913856b5371c1d8bdaef5f58f251d714085abeea73ecc471/greenlet-3.2.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:decb0658ec19e5c1f519faa9a160c0fc85a41a7e6654b3ce1b44b939f8bf1325", size = 651427 }, { url = "https://files.pythonhosted.org/packages/6f/df/20ffa66dd5a7a7beffa6451bdb7400d66251374ab40b99981478c69a67a8/greenlet-3.2.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:8704b3768d2f51150626962f4b9a9e4a17d2e37c8a8d9867bbd9fa4eb938d3b3", size = 652999 },
{ url = "https://files.pythonhosted.org/packages/ad/49/6d79f58fa695b618654adac64e56aff2eeb13344dc28259af8f505662bb1/greenlet-3.2.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6fadd183186db360b61cb34e81117a096bff91c072929cd1b529eb20dd46e6c5", size = 645795 }, { url = "https://files.pythonhosted.org/packages/51/b4/ebb2c8cb41e521f1d72bf0465f2f9a2fd803f674a88db228887e6847077e/greenlet-3.2.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:5035d77a27b7c62db6cf41cf786cfe2242644a7a337a0e155c80960598baab95", size = 647368 },
{ url = "https://files.pythonhosted.org/packages/5a/e6/28ed5cb929c6b2f001e96b1d0698c622976cd8f1e41fe7ebc047fa7c6dd4/greenlet-3.2.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1919cbdc1c53ef739c94cf2985056bcc0838c1f217b57647cbf4578576c63825", size = 648398 }, { url = "https://files.pythonhosted.org/packages/8e/6a/1e1b5aa10dced4ae876a322155705257748108b7fd2e4fae3f2a091fe81a/greenlet-3.2.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2d8aa5423cd4a396792f6d4580f88bdc6efcb9205891c9d40d20f6e670992efb", size = 650037 },
{ url = "https://files.pythonhosted.org/packages/9d/70/b200194e25ae86bc57077f695b6cc47ee3118becf54130c5514456cf8dac/greenlet-3.2.2-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3885f85b61798f4192d544aac7b25a04ece5fe2704670b4ab73c2d2c14ab740d", size = 606795 }, { url = "https://files.pythonhosted.org/packages/26/f2/ad51331a157c7015c675702e2d5230c243695c788f8f75feba1af32b3617/greenlet-3.2.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2c724620a101f8170065d7dded3f962a2aea7a7dae133a009cada42847e04a7b", size = 608402 },
{ url = "https://files.pythonhosted.org/packages/f8/c8/ba1def67513a941154ed8f9477ae6e5a03f645be6b507d3930f72ed508d3/greenlet-3.2.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:85f3e248507125bf4af607a26fd6cb8578776197bd4b66e35229cdf5acf1dfbf", size = 1117976 }, { url = "https://files.pythonhosted.org/packages/26/bc/862bd2083e6b3aff23300900a956f4ea9a4059de337f5c8734346b9b34fc/greenlet-3.2.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:873abe55f134c48e1f2a6f53f7d1419192a3d1a4e873bace00499a4e45ea6af0", size = 1119577 },
{ url = "https://files.pythonhosted.org/packages/c3/30/d0e88c1cfcc1b3331d63c2b54a0a3a4a950ef202fb8b92e772ca714a9221/greenlet-3.2.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:1e76106b6fc55fa3d6fe1c527f95ee65e324a13b62e243f77b48317346559708", size = 1145509 }, { url = "https://files.pythonhosted.org/packages/86/94/1fc0cc068cfde885170e01de40a619b00eaa8f2916bf3541744730ffb4c3/greenlet-3.2.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:024571bbce5f2c1cfff08bf3fbaa43bbc7444f580ae13b0099e95d0e6e67ed36", size = 1147121 },
{ url = "https://files.pythonhosted.org/packages/90/2e/59d6491834b6e289051b252cf4776d16da51c7c6ca6a87ff97e3a50aa0cd/greenlet-3.2.2-cp313-cp313-win_amd64.whl", hash = "sha256:fe46d4f8e94e637634d54477b0cfabcf93c53f29eedcbdeecaf2af32029b4421", size = 296023 }, { url = "https://files.pythonhosted.org/packages/27/1a/199f9587e8cb08a0658f9c30f3799244307614148ffe8b1e3aa22f324dea/greenlet-3.2.3-cp313-cp313-win_amd64.whl", hash = "sha256:5195fb1e75e592dd04ce79881c8a22becdfa3e6f500e7feb059b1e6fdd54d3e3", size = 297603 },
{ url = "https://files.pythonhosted.org/packages/65/66/8a73aace5a5335a1cba56d0da71b7bd93e450f17d372c5b7c5fa547557e9/greenlet-3.2.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba30e88607fb6990544d84caf3c706c4b48f629e18853fc6a646f82db9629418", size = 629911 }, { url = "https://files.pythonhosted.org/packages/d8/ca/accd7aa5280eb92b70ed9e8f7fd79dc50a2c21d8c73b9a0856f5b564e222/greenlet-3.2.3-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:3d04332dddb10b4a211b68111dabaee2e1a073663d117dc10247b5b1642bac86", size = 271479 },
{ url = "https://files.pythonhosted.org/packages/48/08/c8b8ebac4e0c95dcc68ec99198842e7db53eda4ab3fb0a4e785690883991/greenlet-3.2.2-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:055916fafad3e3388d27dd68517478933a97edc2fc54ae79d3bec827de2c64c4", size = 635251 }, { url = "https://files.pythonhosted.org/packages/55/71/01ed9895d9eb49223280ecc98a557585edfa56b3d0e965b9fa9f7f06b6d9/greenlet-3.2.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8186162dffde068a465deab08fc72c767196895c39db26ab1c17c0b77a6d8b97", size = 683952 },
{ url = "https://files.pythonhosted.org/packages/37/26/7db30868f73e86b9125264d2959acabea132b444b88185ba5c462cb8e571/greenlet-3.2.2-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2593283bf81ca37d27d110956b79e8723f9aa50c4bcdc29d3c0543d4743d2763", size = 632620 }, { url = "https://files.pythonhosted.org/packages/ea/61/638c4bdf460c3c678a0a1ef4c200f347dff80719597e53b5edb2fb27ab54/greenlet-3.2.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f4bfbaa6096b1b7a200024784217defedf46a07c2eee1a498e94a1b5f8ec5728", size = 696917 },
{ url = "https://files.pythonhosted.org/packages/10/ec/718a3bd56249e729016b0b69bee4adea0dfccf6ca43d147ef3b21edbca16/greenlet-3.2.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89c69e9a10670eb7a66b8cef6354c24671ba241f46152dd3eed447f79c29fb5b", size = 628851 }, { url = "https://files.pythonhosted.org/packages/22/cc/0bd1a7eb759d1f3e3cc2d1bc0f0b487ad3cc9f34d74da4b80f226fde4ec3/greenlet-3.2.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:ed6cfa9200484d234d8394c70f5492f144b20d4533f69262d530a1a082f6ee9a", size = 692443 },
{ url = "https://files.pythonhosted.org/packages/9b/9d/d1c79286a76bc62ccdc1387291464af16a4204ea717f24e77b0acd623b99/greenlet-3.2.2-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:02a98600899ca1ca5d3a2590974c9e3ec259503b2d6ba6527605fcd74e08e207", size = 593718 }, { url = "https://files.pythonhosted.org/packages/67/10/b2a4b63d3f08362662e89c103f7fe28894a51ae0bc890fabf37d1d780e52/greenlet-3.2.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:02b0df6f63cd15012bed5401b47829cfd2e97052dc89da3cfaf2c779124eb892", size = 692995 },
{ url = "https://files.pythonhosted.org/packages/cd/41/96ba2bf948f67b245784cd294b84e3d17933597dffd3acdb367a210d1949/greenlet-3.2.2-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:b50a8c5c162469c3209e5ec92ee4f95c8231b11db6a04db09bbe338176723bb8", size = 1105752 }, { url = "https://files.pythonhosted.org/packages/5a/c6/ad82f148a4e3ce9564056453a71529732baf5448ad53fc323e37efe34f66/greenlet-3.2.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:86c2d68e87107c1792e2e8d5399acec2487a4e993ab76c792408e59394d52141", size = 655320 },
{ url = "https://files.pythonhosted.org/packages/68/3b/3b97f9d33c1f2eb081759da62bd6162159db260f602f048bc2f36b4c453e/greenlet-3.2.2-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:45f9f4853fb4cc46783085261c9ec4706628f3b57de3e68bae03e8f8b3c0de51", size = 1125170 }, { url = "https://files.pythonhosted.org/packages/5c/4f/aab73ecaa6b3086a4c89863d94cf26fa84cbff63f52ce9bc4342b3087a06/greenlet-3.2.3-cp314-cp314-win_amd64.whl", hash = "sha256:8c47aae8fbbfcf82cc13327ae802ba13c9c36753b67e760023fd116bc124a62a", size = 301236 },
{ url = "https://files.pythonhosted.org/packages/31/df/b7d17d66c8d0f578d2885a3d8f565e9e4725eacc9d3fdc946d0031c055c4/greenlet-3.2.2-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:9ea5231428af34226c05f927e16fc7f6fa5e39e3ad3cd24ffa48ba53a47f4240", size = 269899 },
] ]
[[package]] [[package]]
@ -523,16 +522,17 @@ wheels = [
[[package]] [[package]]
name = "kombu" name = "kombu"
version = "5.5.3" version = "5.5.4"
source = { registry = "https://pypi.org/simple" } source = { registry = "https://pypi.org/simple" }
dependencies = [ dependencies = [
{ name = "amqp" }, { name = "amqp" },
{ name = "packaging" },
{ name = "tzdata" }, { name = "tzdata" },
{ name = "vine" }, { name = "vine" },
] ]
sdist = { url = "https://files.pythonhosted.org/packages/60/0a/128b65651ed8120460fc5af754241ad595eac74993115ec0de4f2d7bc459/kombu-5.5.3.tar.gz", hash = "sha256:021a0e11fcfcd9b0260ef1fb64088c0e92beb976eb59c1dfca7ddd4ad4562ea2", size = 461784 } sdist = { url = "https://files.pythonhosted.org/packages/0f/d3/5ff936d8319ac86b9c409f1501b07c426e6ad41966fedace9ef1b966e23f/kombu-5.5.4.tar.gz", hash = "sha256:886600168275ebeada93b888e831352fe578168342f0d1d5833d88ba0d847363", size = 461992 }
wheels = [ wheels = [
{ url = "https://files.pythonhosted.org/packages/5d/35/1407fb0b2f5b07b50cbaf97fce09ad87d3bfefbf64f7171a8651cd8d2f68/kombu-5.5.3-py3-none-any.whl", hash = "sha256:5b0dbceb4edee50aa464f59469d34b97864be09111338cfb224a10b6a163909b", size = 209921 }, { url = "https://files.pythonhosted.org/packages/ef/70/a07dcf4f62598c8ad579df241af55ced65bed76e42e45d3c368a6d82dbc1/kombu-5.5.4-py3-none-any.whl", hash = "sha256:a12ed0557c238897d8e518f1d1fdf84bd1516c5e305af2dacd85c2015115feb8", size = 210034 },
] ]
[[package]] [[package]]
@ -848,11 +848,11 @@ wheels = [
[[package]] [[package]]
name = "redis" name = "redis"
version = "6.1.0" version = "6.2.0"
source = { registry = "https://pypi.org/simple" } source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/a6/af/e875d57383653e5d9065df8552de1deb7576b4d3cf3af90cde2e79ff7f65/redis-6.1.0.tar.gz", hash = "sha256:c928e267ad69d3069af28a9823a07726edf72c7e37764f43dc0123f37928c075", size = 4629300 } sdist = { url = "https://files.pythonhosted.org/packages/ea/9a/0551e01ba52b944f97480721656578c8a7c46b51b99d66814f85fe3a4f3e/redis-6.2.0.tar.gz", hash = "sha256:e821f129b75dde6cb99dd35e5c76e8c49512a5a0d8dfdc560b2fbd44b85ca977", size = 4639129 }
wheels = [ wheels = [
{ url = "https://files.pythonhosted.org/packages/28/5f/cf36360f80ae233bd1836442f5127818cfcfc7b1846179b60b2e9a4c45c9/redis-6.1.0-py3-none-any.whl", hash = "sha256:3b72622f3d3a89df2a6041e82acd896b0e67d9f54e9bcd906d091d23ba5219f6", size = 273750 }, { url = "https://files.pythonhosted.org/packages/13/67/e60968d3b0e077495a8fee89cf3f2373db98e528288a48f1ee44967f6e8c/redis-6.2.0-py3-none-any.whl", hash = "sha256:c8ddf316ee0aab65f04a11229e94a64b2618451dab7a67cb2f77eb799d872d5e", size = 278659 },
] ]
[[package]] [[package]]
@ -885,11 +885,11 @@ wheels = [
[[package]] [[package]]
name = "setuptools" name = "setuptools"
version = "80.8.0" version = "80.9.0"
source = { registry = "https://pypi.org/simple" } source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/8d/d2/ec1acaaff45caed5c2dedb33b67055ba9d4e96b091094df90762e60135fe/setuptools-80.8.0.tar.gz", hash = "sha256:49f7af965996f26d43c8ae34539c8d99c5042fbff34302ea151eaa9c207cd257", size = 1319720 } sdist = { url = "https://files.pythonhosted.org/packages/18/5d/3bf57dcd21979b887f014ea83c24ae194cfcd12b9e0fda66b957c69d1fca/setuptools-80.9.0.tar.gz", hash = "sha256:f36b47402ecde768dbfafc46e8e4207b4360c654f1f3bb84475f0a28628fb19c", size = 1319958 }
wheels = [ wheels = [
{ url = "https://files.pythonhosted.org/packages/58/29/93c53c098d301132196c3238c312825324740851d77a8500a2462c0fd888/setuptools-80.8.0-py3-none-any.whl", hash = "sha256:95a60484590d24103af13b686121328cc2736bee85de8936383111e421b9edc0", size = 1201470 }, { url = "https://files.pythonhosted.org/packages/a3/dc/17031897dae0efacfea57dfd3a82fdd2a2aeb58e0ff71b77b87e44edc772/setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922", size = 1201486 },
] ]
[[package]] [[package]]