feat: standalone single-tenant deployment mode
All checks were successful
CI / build (push) Successful in 1m12s
CI / docker (push) Successful in 14s

Single-tenant installations now run the server directly without Logto
or the SaaS management plane. The installer generates a simpler compose
with 5 services: traefik, postgres, clickhouse, cameleer3-server, and
cameleer3-server-ui. Uses local auth (built-in admin), no OIDC.

Multi-tenant (vendor) mode is unchanged — full SaaS stack with Logto.

Changes:
- New DEPLOYMENT_MODE variable (standalone/saas) replaces TENANT_ORG_NAME
- generate_compose_file_standalone() for the 5-service compose
- Standalone traefik-dynamic.yml (no /platform/ redirect)
- Stock postgres:16-alpine (server creates schema via Flyway)
- Standalone health checks (server + UI instead of Logto + SaaS)
- Standalone credentials/docs generation
- Remove Phase 12b from bootstrap (no longer needed)
- Remove setup_single_tenant_record (no longer needed)

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
hsiegeln
2026-04-13 20:12:02 +02:00
parent 17d8d98d5f
commit f254f2700f
3 changed files with 495 additions and 159 deletions

View File

@@ -80,7 +80,6 @@ services:
VENDOR_SEED_ENABLED: "${VENDOR_SEED_ENABLED:-false}"
VENDOR_USER: ${VENDOR_USER:-vendor}
VENDOR_PASS: ${VENDOR_PASS:-vendor}
TENANT_ORG_NAME: ${TENANT_ORG_NAME:-}
healthcheck:
test: ["CMD-SHELL", "node -e \"require('http').get('http://localhost:3001/oidc/.well-known/openid-configuration', r => process.exit(r.statusCode === 200 ? 0 : 1)).on('error', () => process.exit(1))\" && test -f /data/logto-bootstrap.json"]
interval: 10s

View File

@@ -583,11 +583,9 @@ EOF
chmod 644 "$BOOTSTRAP_FILE"
# ============================================================
# Phase 12: Deployment Mode (vendor or single-tenant)
# Phase 12: Vendor Seed (optional)
# ============================================================
TENANT_ORG_NAME="${TENANT_ORG_NAME:-}"
if [ "$VENDOR_SEED_ENABLED" = "true" ]; then
log ""
log "=== Phase 12a: Vendor Seed ==="
@@ -688,53 +686,6 @@ if [ "$VENDOR_SEED_ENABLED" = "true" ]; then
fi
log "Vendor seed complete."
elif [ -n "$TENANT_ORG_NAME" ]; then
log ""
log "=== Phase 12b: Single-Tenant Setup ==="
# Create organization for the tenant
TENANT_SLUG=$(echo "$TENANT_ORG_NAME" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9-]/-/g; s/--*/-/g; s/^-//; s/-$//')
log "Creating organization '$TENANT_ORG_NAME' (slug: $TENANT_SLUG)..."
EXISTING_ORG_ID=$(api_get "/api/organizations" | jq -r ".[] | select(.name == \"$TENANT_ORG_NAME\") | .id")
if [ -n "$EXISTING_ORG_ID" ]; then
log "Organization already exists: $EXISTING_ORG_ID"
TENANT_ORG_ID="$EXISTING_ORG_ID"
else
ORG_RESPONSE=$(api_post "/api/organizations" "{\"name\": \"$TENANT_ORG_NAME\"}")
TENANT_ORG_ID=$(echo "$ORG_RESPONSE" | jq -r '.id')
log "Created organization: $TENANT_ORG_ID"
fi
# Add admin user to organization with owner role
if [ -n "$TENANT_ORG_ID" ] && [ "$TENANT_ORG_ID" != "null" ]; then
api_post "/api/organizations/$TENANT_ORG_ID/users" "{\"userIds\": [\"$ADMIN_USER_ID\"]}" >/dev/null 2>&1
ORG_OWNER_ROLE_ID=$(api_get "/api/organization-roles" | jq -r '.[] | select(.name == "owner") | .id')
if [ -n "$ORG_OWNER_ROLE_ID" ] && [ "$ORG_OWNER_ROLE_ID" != "null" ]; then
curl -s -X PUT -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" $HOST_ARGS \
-d "{\"organizationRoleIds\": [\"$ORG_OWNER_ROLE_ID\"]}" \
"${LOGTO_ENDPOINT}/api/organizations/$TENANT_ORG_ID/users/$ADMIN_USER_ID/roles" >/dev/null 2>&1
fi
log "Added admin user to organization with owner role."
# Register OIDC redirect URIs for the tenant
TRAD_APP=$(api_get "/api/applications" | jq -r ".[] | select(.name == \"$TRAD_APP_NAME\") | .id")
if [ -n "$TRAD_APP" ] && [ "$TRAD_APP" != "null" ]; then
EXISTING_URIS=$(api_get "/api/applications/$TRAD_APP" | jq -r '.oidcClientMetadata.redirectUris')
NEW_URI="${PROTO}://${HOST}/t/${TENANT_SLUG}/oidc/callback"
if ! echo "$EXISTING_URIS" | jq -e ".[] | select(. == \"$NEW_URI\")" >/dev/null 2>&1; then
UPDATED_URIS=$(echo "$EXISTING_URIS" | jq ". + [\"$NEW_URI\"]")
api_patch "/api/applications/$TRAD_APP" "{\"oidcClientMetadata\": {\"redirectUris\": $UPDATED_URIS}}" >/dev/null 2>&1
log "Registered OIDC redirect URI for tenant: $NEW_URI"
fi
fi
# NOTE: Tenant DB record is created by the installer after Flyway migrations
# have run (the tenants table doesn't exist yet at bootstrap time).
fi
log "Single-tenant setup complete."
fi
log ""

View File

@@ -25,6 +25,7 @@ DEFAULT_LOGTO_CONSOLE_EXPOSED="true"
DEFAULT_VENDOR_ENABLED="false"
DEFAULT_VENDOR_USER="vendor"
DEFAULT_COMPOSE_PROJECT="cameleer-saas"
DEFAULT_COMPOSE_PROJECT_STANDALONE="cameleer"
DEFAULT_DOCKER_SOCKET="/var/run/docker.sock"
# --- Config values (set by args/env/config/prompts) ---
@@ -48,6 +49,7 @@ _ENV_MONITORING_NETWORK="${MONITORING_NETWORK:-}"
_ENV_COMPOSE_PROJECT="${COMPOSE_PROJECT:-}"
_ENV_DOCKER_SOCKET="${DOCKER_SOCKET:-}"
_ENV_NODE_TLS_REJECT="${NODE_TLS_REJECT:-}"
_ENV_DEPLOYMENT_MODE="${DEPLOYMENT_MODE:-}"
INSTALL_DIR=""
PUBLIC_HOST=""
@@ -72,7 +74,7 @@ VERSION=""
COMPOSE_PROJECT=""
DOCKER_SOCKET=""
NODE_TLS_REJECT=""
TENANT_ORG_NAME=""
DEPLOYMENT_MODE=""
# --- State ---
MODE="" # simple, expert, silent
@@ -175,7 +177,9 @@ parse_args() {
--compose-project) COMPOSE_PROJECT="$2"; shift ;;
--docker-socket) DOCKER_SOCKET="$2"; shift ;;
--node-tls-reject) NODE_TLS_REJECT="$2"; shift ;;
--tenant-org-name) TENANT_ORG_NAME="$2"; shift ;;
--deployment-mode) DEPLOYMENT_MODE="$2"; shift ;;
--server-admin-user) ADMIN_USER="$2"; shift ;;
--server-admin-password) ADMIN_PASS="$2"; shift ;;
--reconfigure) RERUN_ACTION="reconfigure" ;;
--reinstall) RERUN_ACTION="reinstall" ;;
--confirm-destroy) CONFIRM_DESTROY=true ;;
@@ -260,7 +264,7 @@ load_config_file() {
compose_project) [ -z "$COMPOSE_PROJECT" ] && COMPOSE_PROJECT="$value" ;;
docker_socket) [ -z "$DOCKER_SOCKET" ] && DOCKER_SOCKET="$value" ;;
node_tls_reject) [ -z "$NODE_TLS_REJECT" ] && NODE_TLS_REJECT="$value" ;;
tenant_org_name) [ -z "$TENANT_ORG_NAME" ] && TENANT_ORG_NAME="$value" ;;
deployment_mode) [ -z "$DEPLOYMENT_MODE" ] && DEPLOYMENT_MODE="$value" ;;
esac
done < "$file"
}
@@ -289,6 +293,7 @@ load_env_overrides() {
[ -z "$COMPOSE_PROJECT" ] && COMPOSE_PROJECT="$_ENV_COMPOSE_PROJECT"
[ -z "$DOCKER_SOCKET" ] && DOCKER_SOCKET="$_ENV_DOCKER_SOCKET"
[ -z "$NODE_TLS_REJECT" ] && NODE_TLS_REJECT="$_ENV_NODE_TLS_REJECT"
[ -z "$DEPLOYMENT_MODE" ] && DEPLOYMENT_MODE="$_ENV_DEPLOYMENT_MODE"
}
# --- Prerequisites ---
@@ -329,7 +334,9 @@ check_prerequisites() {
check_port_available "${HTTP_PORT:-$DEFAULT_HTTP_PORT}" "HTTP"
check_port_available "${HTTPS_PORT:-$DEFAULT_HTTPS_PORT}" "HTTPS"
if [ "$DEPLOYMENT_MODE" != "standalone" ]; then
check_port_available "${LOGTO_CONSOLE_PORT:-$DEFAULT_LOGTO_CONSOLE_PORT}" "Logto Console"
fi
if [ $errors -gt 0 ]; then
log_error "$errors prerequisite(s) not met. Please install missing dependencies and retry."
@@ -430,19 +437,19 @@ run_simple_prompts() {
echo ""
echo " Deployment mode:"
echo " [1] Multi-tenant vendor — admin manages platform, creates tenants on demand"
echo " [2] Single tenant — set up one tenant for immediate use"
echo " [1] Multi-tenant vendor — manage platform, provision tenants on demand"
echo " [2] Single-tenant — one server instance, local auth, no identity provider"
echo ""
local deploy_choice
read -rp " Select mode [1]: " deploy_choice
case "${deploy_choice:-1}" in
2)
DEPLOYMENT_MODE="standalone"
VENDOR_ENABLED="false"
prompt TENANT_ORG_NAME "Organization / tenant name" ""
;;
*)
DEPLOYMENT_MODE="saas"
VENDOR_ENABLED="true"
TENANT_ORG_NAME=""
;;
esac
}
@@ -463,6 +470,7 @@ run_expert_prompts() {
prompt_password CLICKHOUSE_PASSWORD "ClickHouse password" ""
fi
if [ "$DEPLOYMENT_MODE" = "saas" ]; then
echo ""
if prompt_yesno "Enable vendor account?"; then
VENDOR_ENABLED="true"
@@ -475,12 +483,15 @@ run_expert_prompts() {
else
VENDOR_ENABLED="false"
fi
fi
echo ""
echo -e "${BOLD} Networking:${NC}"
prompt HTTP_PORT "HTTP port" "${HTTP_PORT:-$DEFAULT_HTTP_PORT}"
prompt HTTPS_PORT "HTTPS port" "${HTTPS_PORT:-$DEFAULT_HTTPS_PORT}"
if [ "$DEPLOYMENT_MODE" = "saas" ]; then
prompt LOGTO_CONSOLE_PORT "Logto admin console port" "${LOGTO_CONSOLE_PORT:-$DEFAULT_LOGTO_CONSOLE_PORT}"
fi
echo ""
echo -e "${BOLD} Docker:${NC}"
@@ -488,6 +499,7 @@ run_expert_prompts() {
prompt COMPOSE_PROJECT "Compose project name" "${COMPOSE_PROJECT:-$DEFAULT_COMPOSE_PROJECT}"
prompt DOCKER_SOCKET "Docker socket path" "${DOCKER_SOCKET:-$DEFAULT_DOCKER_SOCKET}"
if [ "$DEPLOYMENT_MODE" = "saas" ]; then
echo ""
echo -e "${BOLD} Logto:${NC}"
if prompt_yesno "Expose Logto admin console externally?" "y"; then
@@ -495,11 +507,13 @@ run_expert_prompts() {
else
LOGTO_CONSOLE_EXPOSED="false"
fi
fi
}
# --- Config merge and validation ---
merge_config() {
: "${DEPLOYMENT_MODE:=saas}"
: "${INSTALL_DIR:=$DEFAULT_INSTALL_DIR}"
: "${PUBLIC_HOST:=localhost}"
: "${PUBLIC_PROTOCOL:=$DEFAULT_PUBLIC_PROTOCOL}"
@@ -512,12 +526,18 @@ merge_config() {
: "${VENDOR_ENABLED:=$DEFAULT_VENDOR_ENABLED}"
: "${VENDOR_USER:=$DEFAULT_VENDOR_USER}"
: "${VERSION:=$CAMELEER_DEFAULT_VERSION}"
: "${COMPOSE_PROJECT:=$DEFAULT_COMPOSE_PROJECT}"
: "${DOCKER_SOCKET:=$DEFAULT_DOCKER_SOCKET}"
if [ "$DEPLOYMENT_MODE" = "standalone" ]; then
: "${COMPOSE_PROJECT:=$DEFAULT_COMPOSE_PROJECT_STANDALONE}"
else
: "${COMPOSE_PROJECT:=$DEFAULT_COMPOSE_PROJECT}"
fi
# Force lowercase hostname — Logto normalizes internally, case mismatch breaks JWT validation
PUBLIC_HOST=$(echo "$PUBLIC_HOST" | tr '[:upper:]' '[:lower:]')
if [ "$DEPLOYMENT_MODE" != "standalone" ]; then
if [ -z "$NODE_TLS_REJECT" ]; then
if [ "$TLS_MODE" = "custom" ]; then
NODE_TLS_REJECT="1"
@@ -525,6 +545,7 @@ merge_config() {
NODE_TLS_REJECT="0"
fi
fi
fi
}
validate_config() {
@@ -545,7 +566,9 @@ validate_config() {
fi
fi
for port_var in HTTP_PORT HTTPS_PORT LOGTO_CONSOLE_PORT; do
local port_vars="HTTP_PORT HTTPS_PORT"
[ "$DEPLOYMENT_MODE" != "standalone" ] && port_vars="HTTP_PORT HTTPS_PORT LOGTO_CONSOLE_PORT"
for port_var in $port_vars; do
local port_val
eval "port_val=\$$port_var"
if ! echo "$port_val" | grep -qE '^[0-9]+$' || [ "$port_val" -lt 1 ] || [ "$port_val" -gt 65535 ]; then
@@ -595,6 +618,44 @@ copy_certs() {
generate_env_file() {
local f="$INSTALL_DIR/.env"
if [ "$DEPLOYMENT_MODE" = "standalone" ]; then
cat > "$f" << EOF
# Cameleer Server Configuration (standalone)
# Generated by installer v${CAMELEER_INSTALLER_VERSION} on $(date -u '+%Y-%m-%d %H:%M:%S UTC')
VERSION=${VERSION}
PUBLIC_HOST=${PUBLIC_HOST}
PUBLIC_PROTOCOL=${PUBLIC_PROTOCOL}
HTTP_PORT=${HTTP_PORT}
HTTPS_PORT=${HTTPS_PORT}
# PostgreSQL
POSTGRES_USER=cameleer
POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
POSTGRES_DB=cameleer3
# ClickHouse
CLICKHOUSE_PASSWORD=${CLICKHOUSE_PASSWORD}
# Server admin
SERVER_ADMIN_USER=${ADMIN_USER}
SERVER_ADMIN_PASS=${ADMIN_PASS}
# Docker
DOCKER_SOCKET=${DOCKER_SOCKET}
DOCKER_GID=$(stat -c '%g' "${DOCKER_SOCKET}" 2>/dev/null || echo "0")
EOF
if [ "$TLS_MODE" = "custom" ]; then
echo "CERT_FILE=/user-certs/cert.pem" >> "$f"
echo "KEY_FILE=/user-certs/key.pem" >> "$f"
[ -n "$CA_FILE" ] && echo "CA_FILE=/user-certs/ca.pem" >> "$f"
fi
log_info "Generated .env"
cp "$f" "$INSTALL_DIR/.env.bak"
return
fi
cat > "$f" << EOF
# Cameleer SaaS Configuration
# Generated by installer v${CAMELEER_INSTALLER_VERSION} on $(date -u '+%Y-%m-%d %H:%M:%S UTC')
@@ -644,9 +705,6 @@ VENDOR_SEED_ENABLED=${VENDOR_ENABLED}
VENDOR_USER=${VENDOR_USER}
VENDOR_PASS=${VENDOR_PASS:-}
# Single-tenant org (when vendor is disabled)
TENANT_ORG_NAME=${TENANT_ORG_NAME:-}
# Docker
DOCKER_SOCKET=${DOCKER_SOCKET}
DOCKER_GID=$(stat -c '%g' "${DOCKER_SOCKET}" 2>/dev/null || echo "0")
@@ -661,6 +719,10 @@ EOF
}
generate_compose_file() {
if [ "$DEPLOYMENT_MODE" = "standalone" ]; then
generate_compose_file_standalone
return
fi
local f="$INSTALL_DIR/docker-compose.yml"
: > "$f"
@@ -796,7 +858,6 @@ EOF
VENDOR_SEED_ENABLED: "${VENDOR_SEED_ENABLED:-false}"
VENDOR_USER: ${VENDOR_USER:-vendor}
VENDOR_PASS: ${VENDOR_PASS:-vendor}
TENANT_ORG_NAME: ${TENANT_ORG_NAME:-}
healthcheck:
test: ["CMD-SHELL", "node -e \"require('http').get('http://localhost:3001/oidc/.well-known/openid-configuration', r => process.exit(r.statusCode === 200 ? 0 : 1)).on('error', () => process.exit(1))\" && test -f /data/logto-bootstrap.json"]
interval: 10s
@@ -920,6 +981,208 @@ EOF
log_info "Generated docker-compose.yml"
}
generate_compose_file_standalone() {
local f="$INSTALL_DIR/docker-compose.yml"
: > "$f"
cat >> "$f" << 'COMPOSEEOF'
# Cameleer Server (standalone)
# Generated by Cameleer installer — do not edit manually
services:
traefik:
image: ${TRAEFIK_IMAGE:-gitea.siegeln.net/cameleer/cameleer-traefik}:${VERSION:-latest}
restart: unless-stopped
ports:
- "${HTTP_PORT:-80}:80"
- "${HTTPS_PORT:-443}:443"
environment:
PUBLIC_HOST: ${PUBLIC_HOST:-localhost}
CERT_FILE: ${CERT_FILE:-}
KEY_FILE: ${KEY_FILE:-}
CA_FILE: ${CA_FILE:-}
volumes:
- certs:/certs
- ${DOCKER_SOCKET:-/var/run/docker.sock}:/var/run/docker.sock:ro
- ./traefik-dynamic.yml:/etc/traefik/dynamic.yml:ro
COMPOSEEOF
if [ "$TLS_MODE" = "custom" ]; then
echo " - ./certs:/user-certs:ro" >> "$f"
fi
cat >> "$f" << 'COMPOSEEOF'
networks:
- cameleer
- cameleer-traefik
COMPOSEEOF
if [ -n "$MONITORING_NETWORK" ]; then
echo " - ${MONITORING_NETWORK}" >> "$f"
fi
cat >> "$f" << 'COMPOSEEOF'
postgres:
image: postgres:16-alpine
restart: unless-stopped
environment:
POSTGRES_DB: ${POSTGRES_DB:-cameleer3}
POSTGRES_USER: ${POSTGRES_USER:-cameleer}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
volumes:
- pgdata:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U $${POSTGRES_USER:-cameleer} -d $${POSTGRES_DB:-cameleer3}"]
interval: 5s
timeout: 5s
retries: 5
networks:
- cameleer
COMPOSEEOF
if [ -n "$MONITORING_NETWORK" ]; then
echo " - ${MONITORING_NETWORK}" >> "$f"
fi
cat >> "$f" << 'COMPOSEEOF'
clickhouse:
image: ${CLICKHOUSE_IMAGE:-gitea.siegeln.net/cameleer/cameleer-clickhouse}:${VERSION:-latest}
restart: unless-stopped
environment:
CLICKHOUSE_PASSWORD: ${CLICKHOUSE_PASSWORD}
volumes:
- chdata:/var/lib/clickhouse
healthcheck:
test: ["CMD-SHELL", "clickhouse-client --password $${CLICKHOUSE_PASSWORD} --query 'SELECT 1'"]
interval: 10s
timeout: 5s
retries: 3
networks:
- cameleer
COMPOSEEOF
if [ -n "$MONITORING_NETWORK" ]; then
echo " - ${MONITORING_NETWORK}" >> "$f"
fi
# Detect Docker socket GID
local docker_gid
docker_gid=$(stat -c '%g' "${DOCKER_SOCKET:-/var/run/docker.sock}" 2>/dev/null || echo "0")
cat >> "$f" << COMPOSEEOF
server:
image: \${SERVER_IMAGE:-gitea.siegeln.net/cameleer/cameleer3-server}:\${VERSION:-latest}
container_name: cameleer-server
restart: unless-stopped
depends_on:
postgres:
condition: service_healthy
environment:
CAMELEER_SERVER_TENANT_ID: default
SPRING_DATASOURCE_URL: jdbc:postgresql://postgres:5432/\${POSTGRES_DB:-cameleer3}?currentSchema=tenant_default
SPRING_DATASOURCE_USERNAME: \${POSTGRES_USER:-cameleer}
SPRING_DATASOURCE_PASSWORD: \${POSTGRES_PASSWORD}
CAMELEER_SERVER_CLICKHOUSE_URL: jdbc:clickhouse://clickhouse:8123/cameleer
CAMELEER_SERVER_CLICKHOUSE_USERNAME: default
CAMELEER_SERVER_CLICKHOUSE_PASSWORD: \${CLICKHOUSE_PASSWORD}
CAMELEER_SERVER_SECURITY_UIUSER: \${SERVER_ADMIN_USER:-admin}
CAMELEER_SERVER_SECURITY_UIPASSWORD: \${SERVER_ADMIN_PASS:-admin}
CAMELEER_SERVER_SECURITY_CORSALLOWEDORIGINS: \${PUBLIC_PROTOCOL:-https}://\${PUBLIC_HOST:-localhost}
CAMELEER_SERVER_RUNTIME_ENABLED: "true"
CAMELEER_SERVER_RUNTIME_SERVERURL: http://cameleer-server:8081
CAMELEER_SERVER_RUNTIME_ROUTINGDOMAIN: \${PUBLIC_HOST:-localhost}
CAMELEER_SERVER_RUNTIME_ROUTINGMODE: path
CAMELEER_SERVER_RUNTIME_JARSTORAGEPATH: /data/jars
CAMELEER_SERVER_RUNTIME_DOCKERNETWORK: cameleer-apps
CAMELEER_SERVER_RUNTIME_JARDOCKERVOLUME: cameleer-jars
CAMELEER_SERVER_RUNTIME_BASEIMAGE: gitea.siegeln.net/cameleer/cameleer-runtime-base:\${VERSION:-latest}
labels:
- traefik.enable=true
- traefik.http.routers.server-api.rule=PathPrefix(\`/api\`)
- traefik.http.routers.server-api.entrypoints=websecure
- traefik.http.routers.server-api.tls=true
- traefik.http.services.server-api.loadbalancer.server.port=8081
- traefik.docker.network=cameleer-traefik
healthcheck:
test: ["CMD-SHELL", "curl -sf http://localhost:8081/api/v1/health || exit 1"]
interval: 10s
timeout: 5s
retries: 30
start_period: 30s
volumes:
- jars:/data/jars
- certs:/certs:ro
- \${DOCKER_SOCKET:-/var/run/docker.sock}:/var/run/docker.sock
group_add:
- "${docker_gid}"
networks:
- cameleer
- cameleer-traefik
- cameleer-apps
server-ui:
image: \${SERVER_UI_IMAGE:-gitea.siegeln.net/cameleer/cameleer3-server-ui}:\${VERSION:-latest}
restart: unless-stopped
depends_on:
server:
condition: service_healthy
environment:
CAMELEER_API_URL: http://cameleer-server:8081
BASE_PATH: ""
labels:
- traefik.enable=true
- traefik.http.routers.ui.rule=PathPrefix(\`/\`)
- traefik.http.routers.ui.priority=1
- traefik.http.routers.ui.entrypoints=websecure
- traefik.http.routers.ui.tls=true
- traefik.http.services.ui.loadbalancer.server.port=80
- traefik.docker.network=cameleer-traefik
networks:
- cameleer-traefik
COMPOSEEOF
cat >> "$f" << 'COMPOSEEOF'
volumes:
pgdata:
chdata:
certs:
jars:
networks:
cameleer:
driver: bridge
cameleer-traefik:
name: cameleer-traefik
driver: bridge
cameleer-apps:
name: cameleer-apps
driver: bridge
COMPOSEEOF
if [ -n "$MONITORING_NETWORK" ]; then
cat >> "$f" << EOF
${MONITORING_NETWORK}:
external: true
EOF
fi
# Generate standalone traefik dynamic config (overrides baked-in redirect)
cat > "$INSTALL_DIR/traefik-dynamic.yml" << 'TRAEFIKEOF'
tls:
stores:
default:
defaultCertificate:
certFile: /certs/cert.pem
keyFile: /certs/key.pem
TRAEFIKEOF
log_info "Generated docker-compose.yml (standalone)"
}
# --- Docker operations ---
docker_compose_pull() {
@@ -1001,6 +1264,13 @@ verify_health() {
[ $failed -eq 0 ] && \
wait_for_docker_healthy "ClickHouse" "clickhouse" 120 || failed=1
if [ "$DEPLOYMENT_MODE" = "standalone" ]; then
[ $failed -eq 0 ] && \
wait_for_docker_healthy "Cameleer Server" "server" 300 || failed=1
[ $failed -eq 0 ] && \
check_endpoint "Server UI" "https://localhost:${HTTPS_PORT}/" 60 || failed=1
else
[ $failed -eq 0 ] && \
wait_for_docker_healthy "Logto + Bootstrap" "logto" 300 || failed=1
@@ -1009,6 +1279,7 @@ verify_health() {
[ $failed -eq 0 ] && \
check_endpoint "Traefik routing" "https://localhost:${HTTPS_PORT}/" 30 || failed=1
fi
echo ""
if [ $failed -ne 0 ]; then
@@ -1018,58 +1289,6 @@ verify_health() {
log_success "All services healthy."
}
# --- Single-tenant DB record ---
setup_single_tenant_record() {
[ -z "$TENANT_ORG_NAME" ] && return 0
local slug
slug=$(echo "$TENANT_ORG_NAME" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9-]/-/g; s/--*/-/g; s/^-//; s/-$//')
log_info "Creating single-tenant record for '$TENANT_ORG_NAME' (slug: $slug)..."
# Check if tenant already exists
local existing
existing=$(cd "$INSTALL_DIR" && docker compose -p "$COMPOSE_PROJECT" exec -T postgres \
psql -U "${POSTGRES_USER}" -d cameleer_saas -t -A -c \
"SELECT id FROM tenants WHERE slug = '$slug';" 2>/dev/null) || true
if [ -n "$existing" ]; then
printf " ${GREEN}[ok]${NC} Tenant record already exists: %s\n" "$slug"
return 0
fi
# Get Logto org ID from the logto database
local org_id
org_id=$(cd "$INSTALL_DIR" && docker compose -p "$COMPOSE_PROJECT" exec -T postgres \
psql -U "${POSTGRES_USER}" -d logto -t -A -c \
"SELECT id FROM organizations WHERE name = '$TENANT_ORG_NAME' AND tenant_id = 'default';" 2>/dev/null) || true
if [ -z "$org_id" ]; then
log_warn "Could not find Logto organization for '$TENANT_ORG_NAME' — tenant record not created."
log_warn "Create the tenant manually via the vendor console."
return 0
fi
# Generate UUID and insert
local uuid
uuid=$(cat /proc/sys/kernel/random/uuid 2>/dev/null || python3 -c "import uuid; print(uuid.uuid4())" 2>/dev/null || true)
if [ -z "$uuid" ]; then
log_warn "Could not generate UUID — tenant record not created."
return 0
fi
if cd "$INSTALL_DIR" && docker compose -p "$COMPOSE_PROJECT" exec -T postgres \
psql -U "${POSTGRES_USER}" -d cameleer_saas -c \
"INSERT INTO tenants (id, name, slug, tier, status, logto_org_id, created_at, updated_at)
VALUES ('$uuid', '$TENANT_ORG_NAME', '$slug', 'STANDARD', 'PROVISIONING', '$org_id', NOW(), NOW());" >/dev/null 2>&1; then
printf " ${GREEN}[ok]${NC} Tenant record created: %s (status: PROVISIONING)\n" "$slug"
log_info "The SaaS app will provision the tenant's server automatically."
else
log_warn "Failed to create tenant record — create it manually via the vendor console."
fi
}
# --- Output file generation ---
write_config_file() {
@@ -1094,13 +1313,36 @@ version=${VERSION}
compose_project=${COMPOSE_PROJECT}
docker_socket=${DOCKER_SOCKET}
node_tls_reject=${NODE_TLS_REJECT}
tenant_org_name=${TENANT_ORG_NAME}
deployment_mode=${DEPLOYMENT_MODE}
EOF
log_info "Saved installer config to cameleer.conf"
}
generate_credentials_file() {
local f="$INSTALL_DIR/credentials.txt"
if [ "$DEPLOYMENT_MODE" = "standalone" ]; then
cat > "$f" << EOF
===========================================
CAMELEER SERVER CREDENTIALS
Generated: $(date -u '+%Y-%m-%d %H:%M:%S UTC')
SECURE THIS FILE AND DELETE AFTER NOTING
THESE CREDENTIALS CANNOT BE RECOVERED
===========================================
Server Dashboard: ${PUBLIC_PROTOCOL}://${PUBLIC_HOST}/
Admin User: ${ADMIN_USER}
Admin Password: ${ADMIN_PASS}
PostgreSQL: cameleer / ${POSTGRES_PASSWORD}
ClickHouse: default / ${CLICKHOUSE_PASSWORD}
EOF
chmod 600 "$f"
log_info "Saved credentials to credentials.txt"
return
fi
cat > "$f" << EOF
===========================================
CAMELEER PLATFORM CREDENTIALS
@@ -1141,6 +1383,10 @@ EOF
}
generate_install_doc() {
if [ "$DEPLOYMENT_MODE" = "standalone" ]; then
generate_install_doc_standalone
return
fi
local f="$INSTALL_DIR/INSTALL.md"
local tls_desc="Self-signed (auto-generated)"
[ "$TLS_MODE" = "custom" ] && tls_desc="Custom certificate"
@@ -1288,19 +1534,161 @@ EOF
log_info "Generated INSTALL.md"
}
generate_install_doc_standalone() {
local f="$INSTALL_DIR/INSTALL.md"
local tls_desc="Self-signed (auto-generated)"
[ "$TLS_MODE" = "custom" ] && tls_desc="Custom certificate"
cat > "$f" << EOF
# Cameleer Server — Installation Documentation
## Installation Summary
| | |
|---|---|
| **Version** | ${VERSION} |
| **Date** | $(date -u '+%Y-%m-%d %H:%M:%S UTC') |
| **Installer** | v${CAMELEER_INSTALLER_VERSION} |
| **Mode** | Standalone (single-tenant) |
| **Install Directory** | ${INSTALL_DIR} |
| **Hostname** | ${PUBLIC_HOST} |
| **TLS** | ${tls_desc} |
## Service URLs
- **Server Dashboard:** ${PUBLIC_PROTOCOL}://${PUBLIC_HOST}/
- **API Endpoint:** ${PUBLIC_PROTOCOL}://${PUBLIC_HOST}/api/
## First Steps
1. Open the Server Dashboard in your browser
2. Log in with the admin credentials from \`credentials.txt\`
3. Upload a Camel application JAR to deploy your first route
4. Monitor traces, metrics, and logs in the dashboard
## Architecture
| Container | Purpose |
|---|---|
| \`traefik\` | Reverse proxy, TLS termination, routing |
| \`postgres\` | PostgreSQL database (server data) |
| \`clickhouse\` | Time-series storage (traces, metrics, logs) |
| \`server\` | Cameleer Server (Spring Boot backend) |
| \`server-ui\` | Cameleer Dashboard (React frontend) |
## Networking
| Port | Service |
|---|---|
| ${HTTP_PORT} | HTTP (redirects to HTTPS) |
| ${HTTPS_PORT} | HTTPS (main entry point) |
EOF
if [ -n "$MONITORING_NETWORK" ]; then
cat >> "$f" << EOF
### Monitoring
Services are connected to the \`${MONITORING_NETWORK}\` Docker network for Prometheus auto-discovery.
EOF
fi
cat >> "$f" << EOF
## TLS
**Mode:** ${tls_desc}
EOF
if [ "$TLS_MODE" = "self-signed" ]; then
cat >> "$f" << 'EOF'
The platform generated a self-signed certificate on first boot. Replace it by
placing your certificate and key files in the `certs/` directory and restarting.
EOF
fi
cat >> "$f" << EOF
## Data & Backups
| Docker Volume | Contains |
|---|---|
| \`pgdata\` | PostgreSQL data (server config, routes, deployments) |
| \`chdata\` | ClickHouse data (traces, metrics, logs) |
| \`certs\` | TLS certificates |
| \`jars\` | Uploaded application JARs |
### Backup Commands
\`\`\`bash
# PostgreSQL
docker compose -p ${COMPOSE_PROJECT} exec postgres pg_dump -U cameleer cameleer3 > backup.sql
# ClickHouse
docker compose -p ${COMPOSE_PROJECT} exec clickhouse clickhouse-client --query "SELECT * FROM cameleer.traces FORMAT Native" > traces.native
\`\`\`
## Upgrading
Re-run the installer with a new version:
\`\`\`bash
curl -sfL https://install.cameleer.io | bash -s -- --install-dir ${INSTALL_DIR} --version NEW_VERSION
\`\`\`
The installer preserves your \`.env\`, credentials, and data volumes. Only the compose file and images are updated.
## Troubleshooting
| Issue | Command |
|---|---|
| Service not starting | \`docker compose -p ${COMPOSE_PROJECT} logs SERVICE_NAME\` |
| Server issues | \`docker compose -p ${COMPOSE_PROJECT} logs server\` |
| Routing issues | \`docker compose -p ${COMPOSE_PROJECT} logs traefik\` |
| Database issues | \`docker compose -p ${COMPOSE_PROJECT} exec postgres psql -U cameleer -d cameleer3\` |
## Uninstalling
\`\`\`bash
# Stop and remove containers
cd ${INSTALL_DIR} && docker compose -p ${COMPOSE_PROJECT} down
# Remove data volumes (DESTRUCTIVE)
cd ${INSTALL_DIR} && docker compose -p ${COMPOSE_PROJECT} down -v
# Remove install directory
rm -rf ${INSTALL_DIR}
\`\`\`
EOF
log_info "Generated INSTALL.md"
}
print_credentials() {
echo ""
echo -e "${BOLD}==========================================${NC}"
if [ "$DEPLOYMENT_MODE" = "standalone" ]; then
echo -e "${BOLD} CAMELEER SERVER CREDENTIALS${NC}"
else
echo -e "${BOLD} CAMELEER PLATFORM CREDENTIALS${NC}"
fi
echo -e "${BOLD}==========================================${NC}"
echo ""
if [ "$DEPLOYMENT_MODE" = "standalone" ]; then
echo -e " Dashboard: ${BLUE}${PUBLIC_PROTOCOL}://${PUBLIC_HOST}/${NC}"
else
echo -e " Admin Console: ${BLUE}${PUBLIC_PROTOCOL}://${PUBLIC_HOST}/platform/${NC}"
fi
echo -e " Admin User: ${BOLD}${ADMIN_USER}${NC}"
echo -e " Admin Password: ${BOLD}${ADMIN_PASS}${NC}"
echo ""
echo -e " PostgreSQL: cameleer / ${POSTGRES_PASSWORD}"
echo -e " ClickHouse: default / ${CLICKHOUSE_PASSWORD}"
echo ""
if [ "$DEPLOYMENT_MODE" = "saas" ]; then
if [ "$VENDOR_ENABLED" = "true" ]; then
echo -e " Vendor User: ${BOLD}${VENDOR_USER}${NC}"
echo -e " Vendor Password: ${BOLD}${VENDOR_PASS}${NC}"
@@ -1310,6 +1698,8 @@ print_credentials() {
echo -e " Logto Console: ${BLUE}${PUBLIC_PROTOCOL}://${PUBLIC_HOST}:${LOGTO_CONSOLE_PORT}${NC}"
echo ""
fi
fi
echo -e " Credentials saved to: ${INSTALL_DIR}/credentials.txt"
echo -e " ${YELLOW}Secure this file and delete after noting credentials.${NC}"
echo ""
@@ -1382,7 +1772,6 @@ handle_rerun() {
docker_compose_down
docker_compose_up
verify_health
setup_single_tenant_record
generate_install_doc
print_summary
exit 0
@@ -1476,9 +1865,6 @@ main() {
# Verify health
verify_health
# Create single-tenant record (after Flyway migrations have run)
setup_single_tenant_record
# Generate output files
generate_credentials_file
generate_install_doc