feat: add Docker deployment with HAProxy and blue-green strategy
This commit is contained in:
parent
3cfa59d3a5
commit
5be1e5add5
26 changed files with 56198 additions and 582 deletions
25
deploy/docker-compose-haproxy.yml
Normal file
25
deploy/docker-compose-haproxy.yml
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
services:
|
||||
haproxy:
|
||||
image: haproxy:3.2
|
||||
stop_signal: SIGTERM
|
||||
container_name: haproxy
|
||||
command: ["haproxy", "-f", "/usr/local/etc/haproxy"]
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
- "8404:8404"
|
||||
volumes:
|
||||
- ./configs:/usr/local/etc/haproxy/:ro
|
||||
- ./certs:/certs:ro
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- proxynet
|
||||
healthcheck:
|
||||
test: ["CMD", "haproxy", "-c", "-f", "/usr/local/etc/haproxy"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
networks:
|
||||
proxynet:
|
||||
name: proxynet
|
||||
driver: bridge
|
||||
90
deploy/docker-compose.yml
Normal file
90
deploy/docker-compose.yml
Normal file
|
|
@ -0,0 +1,90 @@
|
|||
services:
|
||||
portfolio:
|
||||
image: badbl0cks/portfolio:${IMAGE_TAG:-stable}
|
||||
# image: ghcr.io/xe/x/httpdebug
|
||||
# entrypoint: ["/ko-app/httpdebug", "--bind", ":3000"]
|
||||
container_name: portfolio-${RELEASE_TYPE}-${DEPLOYMENT_COLOR}
|
||||
networks:
|
||||
proxynet:
|
||||
aliases:
|
||||
- portfolio-${RELEASE_TYPE}
|
||||
expose:
|
||||
- "3000"
|
||||
env_file:
|
||||
- .env
|
||||
volumes:
|
||||
- ./data:/app/data
|
||||
restart: unless-stopped
|
||||
labels:
|
||||
- "deployment.color=${DEPLOYMENT_COLOR}"
|
||||
- "deployment.image_tag=${IMAGE_TAG}"
|
||||
- "deployment.release_type=${RELEASE_TYPE}"
|
||||
user: "1000:1000"
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "-s", "--max-time", "5", "http://localhost:3000/api/health"]
|
||||
interval: 30s
|
||||
timeout: 15s
|
||||
retries: 3
|
||||
start_period: 120s
|
||||
wireguard:
|
||||
image: qmcgaw/gluetun
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
container_name: wireguard-${RELEASE_TYPE}-${DEPLOYMENT_COLOR}
|
||||
environment:
|
||||
- VPN_SERVICE_PROVIDER=custom
|
||||
- VPN_TYPE=wireguard
|
||||
- HTTPPROXY=on
|
||||
expose:
|
||||
- "8888"
|
||||
env_file:
|
||||
- .env
|
||||
devices:
|
||||
- /dev/net/tun:/dev/net/tun
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
proxynet:
|
||||
aliases:
|
||||
- wireguard-${RELEASE_TYPE}
|
||||
healthcheck:
|
||||
test: ["CMD", "ping", "-c", "1", "-W", "3", "$$NUXT_ANDROID_SMS_GATEWAY_IP"]
|
||||
interval: 30s
|
||||
timeout: 15s
|
||||
retries: 3
|
||||
start_period: 60s
|
||||
anubis:
|
||||
image: ghcr.io/techarohq/anubis:main
|
||||
container_name: "anubis-${RELEASE_TYPE}-${DEPLOYMENT_COLOR}"
|
||||
expose:
|
||||
- "8080"
|
||||
- "9090"
|
||||
environment:
|
||||
- BIND=:8080
|
||||
- TARGET=http://portfolio-${RELEASE_TYPE}-${DEPLOYMENT_COLOR}:3000
|
||||
- DIFFICULTY=4
|
||||
- COOKIE_DOMAIN=${DOMAIN}
|
||||
- COOKIE_PARTITIONED=true
|
||||
- METRICS_BIND=:9090
|
||||
- SERVE_ROBOTS_TXT=true
|
||||
- XFF_STRIP_PRIVATE=false
|
||||
- USE_REMOTE_ADDRESS=true
|
||||
- OG_PASSTHROUGH=true
|
||||
- OG_EXPIRY_TIME=24h
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
proxynet:
|
||||
aliases:
|
||||
- anubis-${RELEASE_TYPE}
|
||||
labels:
|
||||
- "deployment.color=${DEPLOYMENT_COLOR}"
|
||||
- "deployment.release_type=${RELEASE_TYPE}"
|
||||
healthcheck:
|
||||
test: ["CMD", "anubis", "--healthcheck"]
|
||||
interval: 5s
|
||||
timeout: 30s
|
||||
retries: 5
|
||||
start_period: 500ms
|
||||
networks:
|
||||
proxynet:
|
||||
external: true
|
||||
name: proxynet
|
||||
7
deploy/haproxy.backend.cfg
Normal file
7
deploy/haproxy.backend.cfg
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
backend ${DOMAIN}
|
||||
balance leastconn
|
||||
option httpchk GET /
|
||||
http-check expect status 200
|
||||
|
||||
http-request set-header Host ${DOMAIN}
|
||||
server-template anubis-${RELEASE_TYPE}- 1 anubis-${RELEASE_TYPE}:8080 check resolvers docker resolve-prefer ipv4 init-addr none
|
||||
31
deploy/haproxy.cfg
Normal file
31
deploy/haproxy.cfg
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
global
|
||||
daemon
|
||||
log stdout format raw local0 info
|
||||
maxconn 2000
|
||||
|
||||
defaults
|
||||
mode http
|
||||
log global
|
||||
timeout connect 5s
|
||||
timeout client 30s
|
||||
timeout server 30s
|
||||
timeout check 5s
|
||||
retries 3
|
||||
option httplog
|
||||
option dontlognull
|
||||
option redispatch
|
||||
|
||||
frontend haproxy_entrypoint
|
||||
bind :80
|
||||
bind :443 ssl crt /certs/ verify required ca-file /certs/ca.pem
|
||||
|
||||
http-request redirect scheme https unless { ssl_fc }
|
||||
use_backend %[req.hdr(host),lower,word(1,:)] # strip out port from host
|
||||
|
||||
resolvers docker
|
||||
nameserver dns1 127.0.0.11:53
|
||||
resolve_retries 3
|
||||
timeout resolve 1s
|
||||
timeout retry 1s
|
||||
hold valid 10s
|
||||
hold obsolete 30s
|
||||
374
deploy/scripts/common-lib.sh
Executable file
374
deploy/scripts/common-lib.sh
Executable file
|
|
@ -0,0 +1,374 @@
|
|||
#!/bin/bash
|
||||
|
||||
readonly BLUE_COLOR="blue"
|
||||
readonly GREEN_COLOR="green"
|
||||
readonly CORE_PROJECT_NAME="portfolio"
|
||||
readonly DEPLOYMENT_LABEL="deployment.color"
|
||||
readonly RETRY_MAX_ATTEMPTS="${RETRY_MAX_ATTEMPTS:-5}"
|
||||
readonly RETRY_DELAY="${RETRY_DELAY:-5}"
|
||||
|
||||
execute_if_not_dry() {
|
||||
local description="$1"
|
||||
shift
|
||||
|
||||
if [ "$DRY_RUN" = true ]; then
|
||||
indent_output echo "[DRY RUN] Would execute: $description"
|
||||
indent_output echo " Command: $*"
|
||||
else
|
||||
"$@"
|
||||
fi
|
||||
}
|
||||
|
||||
execute_or_fail() {
|
||||
local description="$1"
|
||||
shift
|
||||
|
||||
if [ "$DRY_RUN" = true ]; then
|
||||
indent_output echo "[DRY RUN] Would execute: $description"
|
||||
indent_output echo " Command: $*"
|
||||
else
|
||||
if ! "$@"; then
|
||||
echo "❌ Error: Failed to $description"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
execute_or_warn() {
|
||||
local description="$1"
|
||||
shift
|
||||
|
||||
if [ "$DRY_RUN" = true ]; then
|
||||
indent_output echo "[DRY RUN] Would execute: $description"
|
||||
indent_output echo " Command: $*"
|
||||
else
|
||||
if ! "$@"; then
|
||||
echo "⚠️ Warning: Failed to $description (continuing anyway)"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
retry() {
|
||||
local max_attempts=$RETRY_MAX_ATTEMPTS
|
||||
local delay=$RETRY_DELAY
|
||||
local attempt=1
|
||||
local exit_code=0
|
||||
|
||||
until "$@"; do
|
||||
exit_code=$?
|
||||
|
||||
if [ "$attempt" -ge "$max_attempts" ]; then
|
||||
echo "❌ Command failed after $max_attempts attempts: $*" >&2
|
||||
return $exit_code
|
||||
fi
|
||||
|
||||
echo "⚠️ Attempt $attempt failed, retrying in ${delay}s..." >&2
|
||||
sleep "$delay"
|
||||
|
||||
delay=$((delay * 2))
|
||||
attempt=$((attempt + 1))
|
||||
done
|
||||
|
||||
if [ $attempt -gt 1 ]; then
|
||||
echo "✅ Command succeeded after $attempt attempts"
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
run_on_target() {
|
||||
if [[ -n "${DEPLOY_HOST}" ]]; then
|
||||
ssh deploy -q -E /dev/null "$*"
|
||||
else
|
||||
bash -c -- "$*"
|
||||
fi
|
||||
}
|
||||
|
||||
require_var() {
|
||||
local var_name=$1
|
||||
local var_value=${!var_name:-}
|
||||
|
||||
if [ -z "$var_value" ]; then
|
||||
echo "❌ Deployment Error: Required environment variable '${var_name}' is not set or is empty" >&2
|
||||
echo " Please check your GitHub Actions secrets, workflow environment variables," >&2
|
||||
echo " or deployment configuration to ensure '${var_name}' is properly defined." >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Helper function for common error handling pattern
|
||||
error_exit() {
|
||||
echo "❌ ERROR: $1" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Helper function for command execution with error handling
|
||||
run_or_exit() {
|
||||
local description="$1"
|
||||
shift
|
||||
|
||||
if ! "$@"; then
|
||||
error_exit "$description"
|
||||
fi
|
||||
}
|
||||
|
||||
get_current_color() {
|
||||
local blue_count=$(count_color_containers "$BLUE_COLOR")
|
||||
local green_count=$(count_color_containers "$GREEN_COLOR")
|
||||
|
||||
if [ "$blue_count" -gt 0 ] && [ "$green_count" -eq 0 ]; then
|
||||
echo "$BLUE_COLOR"
|
||||
elif [ "$green_count" -gt 0 ] && [ "$blue_count" -eq 0 ]; then
|
||||
echo "$GREEN_COLOR"
|
||||
elif [ "$blue_count" -gt 0 ] && [ "$green_count" -gt 0 ]; then
|
||||
local project_name_blue=$(get_project_name "$BLUE_COLOR")
|
||||
local project_name_green=$(get_project_name "$GREEN_COLOR")
|
||||
local blue_newest=$(docker inspect --format='{{.Created}}' "$(docker ps -q --filter "label=com.docker.compose.project=$project_name_blue" | head -1)" 2>/dev/null || echo '1970-01-01')
|
||||
local green_newest=$(docker inspect --format='{{.Created}}' "$(docker ps -q --filter "label=com.docker.compose.project=$project_name_green" | head -1)" 2>/dev/null || echo '1970-01-01')
|
||||
|
||||
if [[ "$blue_newest" > "$green_newest" ]]; then
|
||||
echo "$BLUE_COLOR"
|
||||
else
|
||||
echo "$GREEN_COLOR"
|
||||
fi
|
||||
else
|
||||
echo "none"
|
||||
fi
|
||||
}
|
||||
|
||||
get_deployment_state() {
|
||||
local blue_count=$(count_color_containers "$BLUE_COLOR")
|
||||
local green_count=$(count_color_containers "$GREEN_COLOR")
|
||||
|
||||
if [ "$blue_count" -gt 0 ] && [ "$green_count" -gt 0 ]; then
|
||||
echo "both"
|
||||
elif [ "$blue_count" -gt 0 ]; then
|
||||
echo "$BLUE_COLOR"
|
||||
elif [ "$green_count" -gt 0 ]; then
|
||||
echo "$GREEN_COLOR"
|
||||
else
|
||||
echo "none"
|
||||
fi
|
||||
}
|
||||
|
||||
is_deployment_in_progress() {
|
||||
local deployment_state=$(get_deployment_state)
|
||||
[ "$deployment_state" = "both" ]
|
||||
}
|
||||
|
||||
switch_color() {
|
||||
[ "$1" = "$BLUE_COLOR" ] && echo "$GREEN_COLOR" || echo "$BLUE_COLOR"
|
||||
}
|
||||
|
||||
get_project_name() {
|
||||
local color=$1
|
||||
local env_suffix=$([ "${PROD:-}" = "true" ] && echo "prod" || echo "staging")
|
||||
echo "${CORE_PROJECT_NAME}-${env_suffix}-${color}"
|
||||
}
|
||||
|
||||
get_compose_files() {
|
||||
echo "-f docker-compose_web.yml"
|
||||
}
|
||||
|
||||
refresh_proxy() {
|
||||
echo "🔄 Refreshing proxy configuration..."
|
||||
}
|
||||
|
||||
count_containers() {
|
||||
local filters=$1
|
||||
docker ps ${filters} -q 2>/dev/null | wc -l | tr -d '\n' || echo 0
|
||||
}
|
||||
|
||||
count_color_containers() {
|
||||
local color=$1
|
||||
local project_name=$(get_project_name "$color")
|
||||
docker ps --filter "label=com.docker.compose.project=$project_name" -q 2>/dev/null | wc -l
|
||||
}
|
||||
|
||||
# Removed get_previous_release_path - no longer needed as we use direct container cleanup
|
||||
|
||||
cleanup_color_containers() {
|
||||
local color=$1
|
||||
local project_name=$(get_project_name "$color")
|
||||
|
||||
# Get container IDs using the same filter logic as get_current_color
|
||||
local container_ids=$(run_on_target "docker ps --filter 'label=com.docker.compose.project=$project_name' -q 2>/dev/null")
|
||||
|
||||
if [ -n "$container_ids" ]; then
|
||||
echo "🛑 Stopping $color containers from project: $project_name"
|
||||
# Stop containers directly by ID, with timeout
|
||||
run_on_target "echo '$container_ids' | xargs -r docker stop --timeout 10 2>/dev/null || true"
|
||||
|
||||
echo "🗑️ Removing $color containers from project: $project_name"
|
||||
# Remove containers directly by ID
|
||||
run_on_target "echo '$container_ids' | xargs -r docker rm -f 2>/dev/null || true"
|
||||
else
|
||||
echo "ℹ️ No $color containers found to clean up"
|
||||
fi
|
||||
}
|
||||
|
||||
wait_with_countdown() {
|
||||
local seconds=$1
|
||||
local message=$2
|
||||
|
||||
echo -n "$message"
|
||||
for ((i=seconds; i>0; i--)); do
|
||||
echo -n " $i"
|
||||
sleep 1
|
||||
done
|
||||
echo " done!"
|
||||
}
|
||||
|
||||
get_web_service_name() {
|
||||
echo "portfolio"
|
||||
}
|
||||
validate_deployment_env() {
|
||||
require_var "REPO_PROJECT_PATH"
|
||||
require_var "PROD"
|
||||
require_var "DOMAIN"
|
||||
|
||||
export CURRENT_LINK_PATH="${REPO_PROJECT_PATH}/current"
|
||||
export RELEASES_PATH="${REPO_PROJECT_PATH}/releases"
|
||||
}
|
||||
|
||||
get_health_check_status() {
|
||||
local statuses=$(docker ps --format '{{.Names}} {{.Status}}')
|
||||
local unhealthy_count=0
|
||||
local IFS=$'\n'
|
||||
for status in $statuses; do
|
||||
local name=$(echo $status | cut -d' ' -f1)
|
||||
local status=$(echo $status | cut -d' ' -f2-)
|
||||
if [[ "$status" == *"unhealthy"* ]]; then
|
||||
unhealthy_count=$((unhealthy_count + 1))
|
||||
echo "❌ Unhealthy: $name [$status]"
|
||||
else
|
||||
echo "✅ Healthy: $name [$status]"
|
||||
fi
|
||||
done
|
||||
return $unhealthy_count
|
||||
}
|
||||
|
||||
wait_for_healthy_containers() {
|
||||
local project_name=$1
|
||||
local service_name=$2
|
||||
local expected_count=$3
|
||||
local max_attempts=60
|
||||
local attempt=0
|
||||
|
||||
echo "⏳ Waiting for $service_name containers to be healthy..."
|
||||
|
||||
while [ $attempt -lt $max_attempts ]; do
|
||||
healthy_count=$(count_containers "--filter label=com.docker.compose.project=${project_name} --filter label=com.docker.compose.service=${service_name} --filter health=healthy")
|
||||
|
||||
if [[ "$healthy_count" -eq "$expected_count" ]]; then
|
||||
echo "✅ All $service_name containers are healthy ($healthy_count/$expected_count)"
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo "⏳ Healthy containers: $healthy_count/$expected_count (attempt $((attempt+1))/$max_attempts)"
|
||||
sleep 5
|
||||
attempt=$((attempt + 1))
|
||||
done
|
||||
|
||||
echo "❌ Timeout waiting for $service_name containers to be healthy"
|
||||
return 1
|
||||
}
|
||||
|
||||
list_releases() {
|
||||
local REPO_PROJECT_PATH=$1
|
||||
local RELEASES_PATH="${REPO_PROJECT_PATH}/releases"
|
||||
local CURRENT_LINK_PATH="${REPO_PROJECT_PATH}/current"
|
||||
|
||||
echo "📋 Available releases:"
|
||||
if [ -d "$RELEASES_PATH" ]; then
|
||||
for release in $(ls -dt ${RELEASES_PATH}/*); do
|
||||
version=$(basename "$release")
|
||||
status=""
|
||||
|
||||
if [ -L "$CURRENT_LINK_PATH" ] && [ "$(readlink -f "$CURRENT_LINK_PATH")" = "$(realpath "$release")" ]; then
|
||||
status=" [CURRENT]"
|
||||
fi
|
||||
|
||||
if [ -f "${release}/.failed" ]; then
|
||||
status="${status} [FAILED]"
|
||||
fi
|
||||
|
||||
indent_output echo "- ${version}${status}"
|
||||
done
|
||||
else
|
||||
indent_output echo "No releases found"
|
||||
fi
|
||||
}
|
||||
|
||||
get_deployment_image_tag() {
|
||||
local color=$1
|
||||
local container=$(docker ps --filter "label=com.docker.compose.project=${CORE_PROJECT_NAME}-${color}" --format '{{.Names}}'| head -1)
|
||||
if [ -n "$container" ]; then
|
||||
docker inspect "${container}" --format '{{index .Config.Labels "deployment.image_tag"}}'
|
||||
else
|
||||
echo "unknown"
|
||||
fi
|
||||
}
|
||||
|
||||
prefix_output() {
|
||||
local prefix=" "
|
||||
|
||||
if [ $# -lt 2 ]; then
|
||||
echo "Error: prefix_output requires at least 2 arguments" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
prefix="$1"
|
||||
shift
|
||||
|
||||
"$@" 2>&1 | sed "s/^/${prefix}/"
|
||||
|
||||
return ${PIPESTATUS[0]}
|
||||
}
|
||||
|
||||
indent_output() {
|
||||
local indent=" "
|
||||
|
||||
if [[ "$1" =~ ^[[:space:]]+$ ]]; then
|
||||
indent="$1"
|
||||
shift
|
||||
fi
|
||||
|
||||
prefix_output "$indent" "$@"
|
||||
}
|
||||
|
||||
run_with_header() {
|
||||
local header="$1"
|
||||
shift
|
||||
|
||||
echo "$header"
|
||||
indent_output " " "$@"
|
||||
}
|
||||
|
||||
substitute_env_vars() {
|
||||
local file_path="$1"
|
||||
|
||||
if [ ! -f "$file_path" ]; then
|
||||
echo "❌ Error: File '$file_path' does not exist" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
while IFS= read -r line; do
|
||||
while [[ "$line" =~ \$\{([A-Za-z_][A-Za-z0-9_]*)\} ]]; do
|
||||
local var_name="${BASH_REMATCH[1]}"
|
||||
local var_value="${!var_name:-}"
|
||||
line="${line//\$\{${var_name}\}/${var_value}}"
|
||||
done
|
||||
echo "$line"
|
||||
done < "$file_path" > "$file_path.tmp" && mv "$file_path.tmp" "$file_path"
|
||||
}
|
||||
|
||||
substitute_env_vars_remote() {
|
||||
local file_path="$1"
|
||||
|
||||
if [[ -n "${DEPLOY_HOST}" ]]; then
|
||||
run_on_target "$(declare -f substitute_env_vars); substitute_env_vars '$file_path'"
|
||||
else
|
||||
substitute_env_vars "$file_path"
|
||||
fi
|
||||
}
|
||||
291
deploy/scripts/configure-haproxy.sh
Executable file
291
deploy/scripts/configure-haproxy.sh
Executable file
|
|
@ -0,0 +1,291 @@
|
|||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
if [ -f "${SCRIPT_DIR}/common-lib.sh" ]; then
|
||||
source "${SCRIPT_DIR}/common-lib.sh"
|
||||
else
|
||||
echo "❌ ERROR: common-lib.sh not found at ${SCRIPT_DIR}/common-lib.sh"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
readonly HAPROXY_BASE_DIR="/srv/haproxy"
|
||||
readonly HAPROXY_CONFIGS_DIR="${HAPROXY_BASE_DIR}/configs"
|
||||
readonly HAPROXY_COMPOSE_FILE="${HAPROXY_BASE_DIR}/docker-compose.yml"
|
||||
readonly HAPROXY_MAIN_CONFIG="${HAPROXY_CONFIGS_DIR}/00-haproxy.cfg"
|
||||
readonly BACKEND_TEMPLATE_PATH="${SCRIPT_DIR}/../haproxy.backend.cfg"
|
||||
readonly HAPROXY_COMPOSE_TEMPLATE="${SCRIPT_DIR}/../docker-compose-haproxy.yml"
|
||||
|
||||
show_usage() {
|
||||
cat << EOF
|
||||
Usage: $0 [add|remove] <backend_name>
|
||||
|
||||
Commands:
|
||||
add <backend_name> - Add backend configuration (uses \$DOMAIN and \$RELEASE_TYPE env vars)
|
||||
remove <backend_name> - Remove backend configuration
|
||||
|
||||
Environment Variables:
|
||||
DOMAIN - Domain name for the backend (required for add command)
|
||||
RELEASE_TYPE - Release type (e.g., prod, staging) (required for add command)
|
||||
|
||||
Examples:
|
||||
DOMAIN=badblocks.dev RELEASE_TYPE=prod $0 add portfolio-prod
|
||||
$0 remove portfolio-staging
|
||||
EOF
|
||||
}
|
||||
|
||||
setup_infrastructure_dirs_and_certs() {
|
||||
local certs_dir="${HAPROXY_BASE_DIR}/certs"
|
||||
echo "🏗️ Setting up HAProxy directories..."
|
||||
run_or_exit "Failed to setup HAProxy directories" run_on_target "mkdir -p '${HAPROXY_BASE_DIR}' '${HAPROXY_CONFIGS_DIR}' && chmod 755 '${HAPROXY_BASE_DIR}' '${HAPROXY_CONFIGS_DIR}'"
|
||||
# Add certificate setup if environment variables are set
|
||||
if [[ -n "${CF_PEM_CERT:-}" ]] && [[ -n "${CF_PEM_CA:-}" ]]; then
|
||||
echo "🏗️ Setting up HAProxy certificates..."
|
||||
run_or_exit "Failed to setup HAProxy directories" run_on_target "mkdir -p '${certs_dir}' && chmod 755 '${certs_dir}'"
|
||||
|
||||
run_or_exit "Failed to install crt-${DOMAIN}.pem certificate file" run_on_target "cat > '${certs_dir}/crt-${DOMAIN}.pem' << 'EOF'
|
||||
$CF_PEM_CERT
|
||||
EOF
|
||||
chmod 644 '${certs_dir}/crt-${DOMAIN}.pem'"
|
||||
|
||||
run_or_exit "Failed to install Cloudflare CA file" run_on_target "cat > '${certs_dir}/ca.pem' << 'EOF'
|
||||
$CF_PEM_CA
|
||||
EOF
|
||||
chmod 644 '${certs_dir}/ca.pem'"
|
||||
fi
|
||||
}
|
||||
|
||||
init_haproxy_infrastructure() {
|
||||
echo "🚀 Initializing HAProxy infrastructure..."
|
||||
|
||||
setup_infrastructure_dirs_and_certs
|
||||
|
||||
# Check both config files in parallel and prepare uploads
|
||||
local main_config_missing=false
|
||||
local compose_config_missing=false
|
||||
local upload_commands=()
|
||||
|
||||
if ! run_on_target "test -f '${HAPROXY_MAIN_CONFIG}'"; then
|
||||
main_config_missing=true
|
||||
upload_commands+=("scp '${SCRIPT_DIR}/../haproxy.cfg' deploy:'${HAPROXY_MAIN_CONFIG}'")
|
||||
echo "📋 Main HAProxy config needs creation"
|
||||
else
|
||||
echo "✅ Main config already exists"
|
||||
fi
|
||||
|
||||
if ! run_on_target "test -f '${HAPROXY_COMPOSE_FILE}'"; then
|
||||
[ ! -f "$HAPROXY_COMPOSE_TEMPLATE" ] && error_exit "HAProxy compose template not found at $HAPROXY_COMPOSE_TEMPLATE"
|
||||
compose_config_missing=true
|
||||
upload_commands+=("scp '${HAPROXY_COMPOSE_TEMPLATE}' deploy:'${HAPROXY_COMPOSE_FILE}'")
|
||||
echo "🐳 Docker-compose config needs creation"
|
||||
else
|
||||
echo "✅ Docker-compose config already exists"
|
||||
fi
|
||||
|
||||
# Upload missing config files in parallel if any are needed
|
||||
if [ ${#upload_commands[@]} -gt 0 ]; then
|
||||
echo "📤 Uploading configuration files..."
|
||||
for cmd in "${upload_commands[@]}"; do
|
||||
eval "$cmd" || error_exit "Failed to upload config file: $cmd"
|
||||
done &
|
||||
wait
|
||||
echo "✅ Configuration files uploaded"
|
||||
fi
|
||||
|
||||
# Start HAProxy service (creates network automatically via compose)
|
||||
if ! is_haproxy_running; then
|
||||
echo "▶️ Starting HAProxy service..."
|
||||
|
||||
if run_on_target "cd '${HAPROXY_BASE_DIR}' && docker compose up -d"; then
|
||||
echo "✅ HAProxy containers started"
|
||||
|
||||
echo "⏳ Waiting for HAProxy to be healthy..."
|
||||
if wait_for_haproxy_healthy; then
|
||||
echo "✅ HAProxy is healthy and ready"
|
||||
else
|
||||
echo "❌ ERROR: HAProxy failed to become healthy"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
echo "❌ ERROR: Failed to start HAProxy containers"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "✅ HAProxy is already running"
|
||||
fi
|
||||
|
||||
echo "✅ HAProxy infrastructure ready"
|
||||
}
|
||||
|
||||
wait_for_haproxy_healthy() {
|
||||
local attempts=0
|
||||
local max_attempts=30
|
||||
|
||||
while [ $attempts -lt $max_attempts ]; do
|
||||
attempts=$((attempts + 1))
|
||||
|
||||
# Get status in single call and parse more efficiently
|
||||
local container_info
|
||||
container_info=$(run_on_target "cd '${HAPROXY_BASE_DIR}' && docker compose ps haproxy --format '{{.Status}}'")
|
||||
|
||||
# Check if container is healthy or running (single grep operation)
|
||||
if echo "$container_info" | grep -qE "(health: healthy|Up.*[0-9])"; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
[ $attempts -lt $max_attempts ] && sleep 2
|
||||
done
|
||||
|
||||
echo "❌ ERROR: HAProxy failed to become healthy after $max_attempts attempts"
|
||||
echo " Last status: $container_info"
|
||||
return 1
|
||||
}
|
||||
|
||||
is_haproxy_running() {
|
||||
run_on_target "cd '${HAPROXY_BASE_DIR}' && docker compose ps haproxy --format '{{.Status}}' | grep -q 'Up'"
|
||||
}
|
||||
|
||||
is_haproxy_infrastructure_ready() {
|
||||
if ! run_on_target "test -d '${HAPROXY_BASE_DIR}'"; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! run_on_target "test -f '${HAPROXY_MAIN_CONFIG}'"; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! run_on_target "test -f '${HAPROXY_COMPOSE_FILE}'"; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! is_haproxy_running; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
add_backend() {
|
||||
local backend_name="$1"
|
||||
|
||||
[[ -z "$backend_name" ]] && {
|
||||
show_usage
|
||||
error_exit "add command requires backend_name"
|
||||
}
|
||||
|
||||
[[ -z "${DOMAIN:-}" ]] && {
|
||||
show_usage
|
||||
error_exit "DOMAIN environment variable is required for add command"
|
||||
}
|
||||
|
||||
[[ -z "${RELEASE_TYPE:-}" ]] && {
|
||||
show_usage
|
||||
error_exit "RELEASE_TYPE environment variable is required for add command"
|
||||
}
|
||||
|
||||
echo "➕ Adding backend: $backend_name"
|
||||
|
||||
# TODO: MAKE THIS IDEMPOTENT AND REMOVE THE is_haproxy_infrastructure_ready CHECK
|
||||
if ! is_haproxy_infrastructure_ready; then
|
||||
echo "🔧 Initializing HAProxy infrastructure..."
|
||||
init_haproxy_infrastructure
|
||||
fi
|
||||
|
||||
local backend_config_path="${HAPROXY_CONFIGS_DIR}/${backend_name}.cfg"
|
||||
|
||||
[ ! -f "$BACKEND_TEMPLATE_PATH" ] && error_exit "Backend template not found at $BACKEND_TEMPLATE_PATH"
|
||||
|
||||
local temp_config="/tmp/${backend_name}.cfg"
|
||||
echo "📝 Generating backend config from template..."
|
||||
cp "$BACKEND_TEMPLATE_PATH" "$temp_config"
|
||||
|
||||
substitute_env_vars "$temp_config"
|
||||
echo "✅ Backend config generated"
|
||||
|
||||
echo "📤 Uploading backend config..."
|
||||
scp "$temp_config" deploy:"$backend_config_path" || { rm -f "$temp_config"; error_exit "Failed to upload backend config"; }
|
||||
echo "✅ Backend config uploaded"
|
||||
|
||||
rm -f "$temp_config"
|
||||
|
||||
echo "🔄 Reloading HAProxy..."
|
||||
reload_haproxy
|
||||
}
|
||||
|
||||
remove_backend() {
|
||||
local backend_name="$1"
|
||||
|
||||
[ -z "$backend_name" ] && { show_usage; error_exit "remove command requires backend_name"; }
|
||||
|
||||
echo "➖ Removing backend: $backend_name"
|
||||
|
||||
if ! is_haproxy_infrastructure_ready; then
|
||||
echo "🔧 Initializing HAProxy infrastructure..."
|
||||
init_haproxy_infrastructure
|
||||
fi
|
||||
|
||||
local backend_config_path="${HAPROXY_CONFIGS_DIR}/${backend_name}.cfg"
|
||||
|
||||
if run_on_target "test -f '${backend_config_path}'"; then
|
||||
run_on_target "rm -f '${backend_config_path}'"
|
||||
echo "✅ Backend configuration removed"
|
||||
reload_haproxy
|
||||
else
|
||||
echo "✅ Backend configuration already absent"
|
||||
fi
|
||||
}
|
||||
|
||||
reload_haproxy() {
|
||||
if ! is_haproxy_infrastructure_ready; then
|
||||
echo "🔧 Infrastructure not ready, initializing..."
|
||||
init_haproxy_infrastructure
|
||||
return 0 # Infrastructure init already handles health check
|
||||
fi
|
||||
|
||||
run_or_exit "Failed to send reload signal to HAProxy" run_on_target "cd '${HAPROXY_BASE_DIR}' && docker kill -s HUP haproxy"
|
||||
|
||||
# Brief pause for reload to take effect, then single health check
|
||||
sleep 1
|
||||
if wait_for_haproxy_healthy; then
|
||||
echo "✅ HAProxy configuration reloaded"
|
||||
else
|
||||
error_exit "HAProxy became unhealthy after reload"
|
||||
fi
|
||||
}
|
||||
|
||||
validate_backend_name() {
|
||||
local backend_name="$1"
|
||||
[ -z "$backend_name" ] && error_exit "Backend name cannot be empty"
|
||||
[[ "$backend_name" =~ ^[a-zA-Z0-9][a-zA-Z0-9_-]*$ ]] 2>/dev/null || error_exit "Invalid backend name '$backend_name'. Can only contain alphanumeric characters, hyphens, and underscores."
|
||||
}
|
||||
|
||||
main() {
|
||||
local command="${1:-}"
|
||||
|
||||
case "$command" in
|
||||
"add")
|
||||
local backend_name="${2:-}"
|
||||
|
||||
validate_backend_name "$backend_name"
|
||||
add_backend "$backend_name"
|
||||
;;
|
||||
"remove")
|
||||
local backend_name="${2:-}"
|
||||
|
||||
validate_backend_name "$backend_name"
|
||||
remove_backend "$backend_name"
|
||||
;;
|
||||
"help"|"-h"|"--help")
|
||||
show_usage
|
||||
;;
|
||||
*)
|
||||
show_usage
|
||||
error_exit "Unknown command '$command'"
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "✅ HAProxy configuration completed"
|
||||
}
|
||||
|
||||
[[ "${BASH_SOURCE[0]}" == "${0}" ]] && main "$@"
|
||||
119
deploy/scripts/deploy-blue-green.sh
Executable file
119
deploy/scripts/deploy-blue-green.sh
Executable file
|
|
@ -0,0 +1,119 @@
|
|||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
# Blue-Green deployment script for Nuxt app
|
||||
# Usage: ./deploy-blue-green.sh
|
||||
|
||||
# Source common functions
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
source "${SCRIPT_DIR}/common-lib.sh"
|
||||
|
||||
# Validate required environment variables
|
||||
require_var "DOCKER_HOST"
|
||||
require_var "REPO_PROJECT_PATH"
|
||||
require_var "REPO_NAME_ONLY"
|
||||
require_var "REPO"
|
||||
require_var "IMAGE_TAR"
|
||||
require_var "PROD"
|
||||
require_var "DOMAIN"
|
||||
require_var "WIREGUARD_ENDPOINT_HOST"
|
||||
|
||||
validate_deployment_env
|
||||
|
||||
# Modify project path based on environment for proper isolation
|
||||
if [ "$PROD" = "true" ]; then
|
||||
REPO_PROJECT_PATH="${REPO_PROJECT_PATH}-prod"
|
||||
echo "🏭 Production deployment path: $REPO_PROJECT_PATH"
|
||||
else
|
||||
REPO_PROJECT_PATH="${REPO_PROJECT_PATH}-staging"
|
||||
echo "🧪 Staging deployment path: $REPO_PROJECT_PATH"
|
||||
fi
|
||||
|
||||
# Update derived paths
|
||||
CURRENT_LINK_PATH="${REPO_PROJECT_PATH}/current"
|
||||
RELEASES_PATH="${REPO_PROJECT_PATH}/releases"
|
||||
|
||||
echo "⚙️ Docker host: $DOCKER_HOST"
|
||||
|
||||
# Generate deployment timestamp
|
||||
DEPLOYMENT_TIMESTAMP=$(date +%Y%m%d_%H%M%S)
|
||||
RELEASE_TYPE=$([ "${PROD}" = "true" ] && echo "prod" || echo "staging")
|
||||
NEW_RELEASE_PATH="${RELEASES_PATH}/${DEPLOYMENT_TIMESTAMP}"
|
||||
|
||||
# Use Git SHA for image tag (if available, otherwise use timestamp)
|
||||
if [ -n "${GIT_SHA:-}" ]; then
|
||||
IMAGE_TAG="sha-${GIT_SHA:0:7}"
|
||||
else
|
||||
IMAGE_TAG="dev-${DEPLOYMENT_TIMESTAMP}"
|
||||
fi
|
||||
|
||||
# Check for deployment in progress
|
||||
if is_deployment_in_progress; then
|
||||
echo "⚠️ ERROR: Deployment appears to be in progress (both colors are running)"
|
||||
echo " This might indicate a previous deployment didn't complete properly."
|
||||
echo " Please check the deployment status and clean up any old containers."
|
||||
echo " If you are sure that the deployment is complete, you can run the following command to clean up the old containers:"
|
||||
echo " ssh deploy 'cd ${REPO_PROJECT_PATH}/current && docker compose -p portfolio-\$ENV-blue down && docker compose -p portfolio-\$ENV-green down' (where \$ENV is prod or staging)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
CURRENT_COLOR=$(get_current_color)
|
||||
NEW_COLOR=$(switch_color "$CURRENT_COLOR")
|
||||
|
||||
echo "🎨 Current deployment color: $CURRENT_COLOR"
|
||||
echo "🎨 New deployment color: $NEW_COLOR"
|
||||
|
||||
echo "🔍 Resolving WIREGUARD_ENDPOINT_HOST IP address..."
|
||||
WIREGUARD_ENDPOINT_IP=$(run_on_target "dig +short $WIREGUARD_ENDPOINT_HOST | tail -n1")
|
||||
[ -z "$WIREGUARD_ENDPOINT_IP" ] && error_exit "Failed to resolve IP address for WIREGUARD_ENDPOINT_HOST"
|
||||
|
||||
echo "📁 Creating release directory..."
|
||||
run_on_target "mkdir -p '${NEW_RELEASE_PATH}'"
|
||||
|
||||
echo "🏷️ Setting environment variables..."
|
||||
echo "IMAGE_TAG=\"${IMAGE_TAG}\"" >> .env
|
||||
echo "DEPLOYMENT_COLOR=\"${NEW_COLOR}\"" >> .env
|
||||
echo "RELEASE_TYPE=\"${RELEASE_TYPE}\"" >> .env
|
||||
echo "WIREGUARD_ENDPOINT_IP=\"${WIREGUARD_ENDPOINT_IP}\"" >> .env
|
||||
|
||||
echo "📋 Copying deployment files..."
|
||||
scp deploy/docker-compose.yml deploy/haproxy.cfg .env deploy:"${NEW_RELEASE_PATH}/"
|
||||
|
||||
echo "🐳 Loading Docker image..."
|
||||
docker load -i "${IMAGE_TAR}"
|
||||
|
||||
PROJECT_NAME=$(get_project_name "$NEW_COLOR")
|
||||
WEB_SERVICE_NAME=$(get_web_service_name)
|
||||
|
||||
echo "🔵🟢 Starting ${NEW_COLOR} deployment..."
|
||||
run_on_target "cd '${NEW_RELEASE_PATH}' && docker compose -p '${PROJECT_NAME}' up --no-build -d"
|
||||
|
||||
echo "⏳ Waiting for new containers to be healthy..."
|
||||
if ! wait_for_healthy_containers "$PROJECT_NAME" "$WEB_SERVICE_NAME" 1; then
|
||||
echo "❌ New deployment failed to become healthy. Rolling back..."
|
||||
run_on_target "cd '${NEW_RELEASE_PATH}' && docker compose -p '${PROJECT_NAME}' down"
|
||||
run_on_target "rm -rf '${NEW_RELEASE_PATH}'"
|
||||
error_exit "New deployment failed to become healthy"
|
||||
fi
|
||||
|
||||
echo "✅ New deployment is healthy!"
|
||||
|
||||
echo "🔗 Updating current deployment symlink..."
|
||||
if [ -L "$CURRENT_LINK_PATH" ]; then
|
||||
PREVIOUS_RELEASE=$(run_on_target "readlink -f '${CURRENT_LINK_PATH}'")
|
||||
run_on_target "echo '${PREVIOUS_RELEASE}' > '${NEW_RELEASE_PATH}/.previous_version'"
|
||||
fi
|
||||
|
||||
run_on_target "ln -sfn '${NEW_RELEASE_PATH}' '${CURRENT_LINK_PATH}'"
|
||||
|
||||
if [ "$CURRENT_COLOR" != "none" ]; then
|
||||
echo "🧹 Cleaning up previous deployment (${CURRENT_COLOR})..."
|
||||
cleanup_color_containers "$CURRENT_COLOR"
|
||||
fi
|
||||
|
||||
echo "🗑️ Cleaning up old releases (keep last 5)..."
|
||||
run_on_target "cd '${RELEASES_PATH}' && ls -t | tail -n +6 | xargs -r rm -rf"
|
||||
|
||||
echo "🎉 Deployment completed successfully!"
|
||||
echo "📊 Current deployment: ${NEW_COLOR} (${IMAGE_TAG})"
|
||||
echo "🌍 Domain: ${DOMAIN}"
|
||||
49
deploy/scripts/generate-docker-tags.sh
Executable file
49
deploy/scripts/generate-docker-tags.sh
Executable file
|
|
@ -0,0 +1,49 @@
|
|||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
# Generate Docker tags based on git ref and environment
|
||||
# Usage: ./generate-docker-tags.sh IMAGE_BASE GIT_SHA GIT_REF PROD
|
||||
|
||||
if [ $# -ne 4 ]; then
|
||||
echo "Error: Invalid number of arguments" > /dev/stderr
|
||||
echo "Usage: $0 IMAGE_BASE GIT_SHA GIT_REF PROD" > /dev/stderr
|
||||
exit 1
|
||||
fi
|
||||
|
||||
IMAGE_BASE="$1"
|
||||
GIT_SHA="$2"
|
||||
GIT_REF="$3"
|
||||
PROD="$4"
|
||||
|
||||
# Validate inputs
|
||||
if [ -z "$IMAGE_BASE" ] || [ -z "$GIT_SHA" ]; then
|
||||
echo "Error: IMAGE_BASE and GIT_SHA cannot be empty" > /dev/stderr
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Always include SHA tags
|
||||
echo "${IMAGE_BASE}:sha-${GIT_SHA:0:7}"
|
||||
echo "${IMAGE_BASE}:sha-${GIT_SHA}"
|
||||
|
||||
# Handle version tags
|
||||
if [[ "$GIT_REF" =~ ^refs/tags/v([0-9]+)\.([0-9]+)\.([0-9]+)(-.*)?$ ]]; then
|
||||
MAJOR="${BASH_REMATCH[1]}"
|
||||
MINOR="${BASH_REMATCH[2]}"
|
||||
PATCH="${BASH_REMATCH[3]}"
|
||||
PRERELEASE="${BASH_REMATCH[4]}"
|
||||
|
||||
if [[ -z "$PRERELEASE" ]] && [[ "$PROD" == "true" ]]; then
|
||||
echo "${IMAGE_BASE}:latest"
|
||||
echo "${IMAGE_BASE}:stable"
|
||||
[[ "$MAJOR" -gt 0 ]] && echo "${IMAGE_BASE}:v${MAJOR}"
|
||||
echo "${IMAGE_BASE}:v${MAJOR}.${MINOR}"
|
||||
echo "${IMAGE_BASE}:v${MAJOR}.${MINOR}.${PATCH}"
|
||||
else
|
||||
echo "${IMAGE_BASE}:latest-staging"
|
||||
echo "${IMAGE_BASE}:staging"
|
||||
echo "${IMAGE_BASE}:v${MAJOR}.${MINOR}.${PATCH}-prerelease"
|
||||
fi
|
||||
elif [[ "$PROD" == "false" ]]; then
|
||||
echo "${IMAGE_BASE}:latest-staging"
|
||||
echo "${IMAGE_BASE}:staging"
|
||||
fi
|
||||
40
deploy/scripts/parse-repository-name.sh
Executable file
40
deploy/scripts/parse-repository-name.sh
Executable file
|
|
@ -0,0 +1,40 @@
|
|||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
# Parse repository name and generate project paths
|
||||
# Usage: ./parse-repository-name.sh GITHUB_REPOSITORY
|
||||
|
||||
if [ $# -eq 0 ]; then
|
||||
echo "Error: No repository name provided" > /dev/stderr
|
||||
echo "Usage: $0 GITHUB_REPOSITORY" > /dev/stderr
|
||||
exit 1
|
||||
fi
|
||||
|
||||
GITHUB_REPOSITORY="$1"
|
||||
|
||||
echo "GITHUB_REPOSITORY: $GITHUB_REPOSITORY" > /dev/stderr
|
||||
|
||||
if [[ "$GITHUB_REPOSITORY" == *".git" ]]; then
|
||||
if [[ "$GITHUB_REPOSITORY" == "https://"* ]]; then
|
||||
echo "GITHUB_REPOSITORY ends in .git and is an HTTPS URI" > /dev/stderr
|
||||
REPO=$(echo "$GITHUB_REPOSITORY" | sed 's/\.git$//' | cut -d'/' -f4-5 | sed 's/[^a-zA-Z0-9\/-]/-/g')
|
||||
elif [[ "$GITHUB_REPOSITORY" == "git@"* ]]; then
|
||||
echo "GITHUB_REPOSITORY ends in .git and is an SSH URI" > /dev/stderr
|
||||
REPO=$(echo "$GITHUB_REPOSITORY" | sed 's/\.git$//' | cut -d':' -f2 | sed 's/[^a-zA-Z0-9\/-]/-/g')
|
||||
else
|
||||
echo "GITHUB_REPOSITORY ends in .git and is not a URI" > /dev/stderr
|
||||
REPO=$(echo "$GITHUB_REPOSITORY" | sed 's/\.git$//' | sed 's/[^a-zA-Z0-9\/-]/-/g')
|
||||
fi
|
||||
else
|
||||
echo "GITHUB_REPOSITORY is not a URI" > /dev/stderr
|
||||
REPO=$(echo "$GITHUB_REPOSITORY" | sed 's/[^a-zA-Z0-9\/-]/-/g')
|
||||
fi
|
||||
|
||||
REPO_NAME_ONLY=$(echo "$REPO" | cut -d'/' -f2)
|
||||
# Default path - will be modified by deployment script based on environment
|
||||
REPO_PROJECT_PATH="/srv/${REPO_NAME_ONLY}"
|
||||
|
||||
# Output in format that can be sourced - using printf %q for proper escaping
|
||||
printf "export REPO=%q\n" "$REPO"
|
||||
printf "export REPO_NAME_ONLY=%q\n" "$REPO_NAME_ONLY"
|
||||
printf "export REPO_PROJECT_PATH=%q\n" "$REPO_PROJECT_PATH"
|
||||
Loading…
Add table
Add a link
Reference in a new issue