feat: clean control plane — remove all example tenant resources
All checks were successful
CI / build (push) Successful in 58s
CI / docker (push) Successful in 11s

- Removed cameleer3-server and cameleer3-server-ui from docker-compose
  (tenants provision their own server instances via the vendor console)
- Removed viewer/camel user from bootstrap (tenant users created during
  provisioning)
- Removed Phase 7 server OIDC configuration (provisioned servers get
  OIDC config from env vars, claim mappings via Logto Custom JWT)
- Removed server-related env vars from bootstrap (SERVER_ENDPOINT, etc.)
- Removed jardata volume from dev overlay

Clean slate: docker compose up gives you Traefik + PostgreSQL +
ClickHouse + Logto + SaaS platform + vendor seed. Everything else
(servers, tenants, users) created through the vendor console.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
hsiegeln
2026-04-10 08:24:28 +02:00
parent aa663a9c9e
commit 3efae43879
3 changed files with 3 additions and 187 deletions

View File

@@ -31,31 +31,6 @@ services:
CAMELEER_NETWORK: cameleer-saas_cameleer CAMELEER_NETWORK: cameleer-saas_cameleer
CAMELEER_TRAEFIK_NETWORK: cameleer-traefik CAMELEER_TRAEFIK_NETWORK: cameleer-traefik
cameleer3-server:
ports:
- "8081:8081"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- jardata:/data/jars
group_add:
- "0"
environment:
CAMELEER_RUNTIME_ENABLED: "true"
CAMELEER_JAR_STORAGE_PATH: /data/jars
CAMELEER_RUNTIME_BASE_IMAGE: gitea.siegeln.net/cameleer/cameleer-runtime-base:latest
CAMELEER_DOCKER_NETWORK: cameleer-saas_cameleer
CAMELEER_SERVER_URL: http://cameleer3-server:8081
CAMELEER_ROUTING_DOMAIN: ${PUBLIC_HOST:-localhost}
CAMELEER_ROUTING_MODE: path
CAMELEER_JAR_DOCKER_VOLUME: cameleer-saas_jardata
cameleer3-server-ui:
ports:
- "8082:80"
clickhouse: clickhouse:
ports: ports:
- "8123:8123" - "8123:8123"
volumes:
jardata:

View File

@@ -103,8 +103,6 @@ services:
depends_on: depends_on:
logto: logto:
condition: service_healthy condition: service_healthy
cameleer3-server:
condition: service_healthy
restart: "no" restart: "no"
entrypoint: ["sh", "/scripts/logto-bootstrap.sh"] entrypoint: ["sh", "/scripts/logto-bootstrap.sh"]
environment: environment:
@@ -119,12 +117,6 @@ services:
PG_DB_SAAS: ${POSTGRES_DB:-cameleer_saas} PG_DB_SAAS: ${POSTGRES_DB:-cameleer_saas}
SAAS_ADMIN_USER: ${SAAS_ADMIN_USER:-admin} SAAS_ADMIN_USER: ${SAAS_ADMIN_USER:-admin}
SAAS_ADMIN_PASS: ${SAAS_ADMIN_PASS:-admin} SAAS_ADMIN_PASS: ${SAAS_ADMIN_PASS:-admin}
TENANT_ADMIN_USER: ${TENANT_ADMIN_USER:-camel}
TENANT_ADMIN_PASS: ${TENANT_ADMIN_PASS:-camel}
CAMELEER_AUTH_TOKEN: ${CAMELEER_AUTH_TOKEN:-default-bootstrap-token}
SERVER_ENDPOINT: http://cameleer3-server:8081
SERVER_UI_USER: ${CAMELEER_UI_USER:-admin}
SERVER_UI_PASS: ${CAMELEER_UI_PASSWORD:-admin}
volumes: volumes:
- ./docker/logto-bootstrap.sh:/scripts/logto-bootstrap.sh:ro - ./docker/logto-bootstrap.sh:/scripts/logto-bootstrap.sh:ro
- bootstrapdata:/data - bootstrapdata:/data
@@ -151,7 +143,6 @@ services:
LOGTO_JWK_SET_URI: ${LOGTO_ENDPOINT:-http://logto:3001}/oidc/jwks LOGTO_JWK_SET_URI: ${LOGTO_ENDPOINT:-http://logto:3001}/oidc/jwks
LOGTO_M2M_CLIENT_ID: ${LOGTO_M2M_CLIENT_ID:-} LOGTO_M2M_CLIENT_ID: ${LOGTO_M2M_CLIENT_ID:-}
LOGTO_M2M_CLIENT_SECRET: ${LOGTO_M2M_CLIENT_SECRET:-} LOGTO_M2M_CLIENT_SECRET: ${LOGTO_M2M_CLIENT_SECRET:-}
CAMELEER3_SERVER_ENDPOINT: http://cameleer3-server:8081
labels: labels:
- traefik.enable=true - traefik.enable=true
- traefik.http.routers.saas.rule=PathPrefix(`/platform`) - traefik.http.routers.saas.rule=PathPrefix(`/platform`)
@@ -161,62 +152,6 @@ services:
networks: networks:
- cameleer - cameleer
cameleer3-server:
image: ${CAMELEER3_SERVER_IMAGE:-gitea.siegeln.net/cameleer/cameleer3-server}:${VERSION:-latest}
restart: unless-stopped
depends_on:
postgres:
condition: service_healthy
clickhouse:
condition: service_started
environment:
SPRING_DATASOURCE_URL: jdbc:postgresql://postgres:5432/cameleer3
SPRING_DATASOURCE_USERNAME: ${POSTGRES_USER:-cameleer}
SPRING_DATASOURCE_PASSWORD: ${POSTGRES_PASSWORD:-cameleer_dev}
CLICKHOUSE_URL: jdbc:clickhouse://clickhouse:8123/cameleer
CAMELEER_AUTH_TOKEN: ${CAMELEER_AUTH_TOKEN:-default-bootstrap-token}
CAMELEER_JWT_SECRET: ${CAMELEER_JWT_SECRET:-cameleer-dev-jwt-secret-change-in-production}
CAMELEER_TENANT_ID: ${CAMELEER_TENANT_SLUG:-default}
CAMELEER_OIDC_ISSUER_URI: ${PUBLIC_PROTOCOL:-https}://${PUBLIC_HOST:-localhost}/oidc
CAMELEER_OIDC_JWK_SET_URI: ${LOGTO_ENDPOINT:-http://logto:3001}/oidc/jwks
CAMELEER_OIDC_TLS_SKIP_VERIFY: "true" # dev only — disable in production with real certs
CAMELEER_OIDC_AUDIENCE: ${CAMELEER_OIDC_AUDIENCE:-https://api.cameleer.local}
CAMELEER_CORS_ALLOWED_ORIGINS: ${PUBLIC_PROTOCOL:-https}://${PUBLIC_HOST:-localhost}
healthcheck:
test: ["CMD-SHELL", "curl -sf http://localhost:8081/api/v1/health || exit 1"]
interval: 5s
timeout: 5s
retries: 30
start_period: 15s
labels:
- traefik.enable=false
networks:
cameleer:
cameleer-traefik:
aliases:
- cameleer3-server
cameleer3-server-ui:
image: ${CAMELEER3_SERVER_UI_IMAGE:-gitea.siegeln.net/cameleer/cameleer3-server-ui}:${VERSION:-latest}
restart: unless-stopped
depends_on:
cameleer3-server:
condition: service_healthy
environment:
CAMELEER_API_URL: http://cameleer3-server:8081
BASE_PATH: /server
labels:
- traefik.enable=true
- traefik.http.routers.server-ui.rule=PathPrefix(`/server`)
- traefik.http.routers.server-ui.entrypoints=websecure
- traefik.http.routers.server-ui.tls=true
- traefik.http.routers.server-ui.middlewares=server-ui-strip
- traefik.http.middlewares.server-ui-strip.stripprefix.prefixes=/server
- traefik.http.routers.server-ui.service=server-ui
- traefik.http.services.server-ui.loadbalancer.server.port=80
networks:
- cameleer
clickhouse: clickhouse:
image: clickhouse/clickhouse-server:latest image: clickhouse/clickhouse-server:latest
restart: unless-stopped restart: unless-stopped

View File

@@ -504,104 +504,10 @@ fi
fi # end: ADMIN_TOKEN check fi # end: ADMIN_TOKEN check
fi # end: M_ADMIN_SECRET check fi # end: M_ADMIN_SECRET check
# --- Viewer user (for testing read-only OIDC role in server) --- # No viewer user — tenant users are created by the vendor during tenant provisioning.
log "Checking for viewer user '$TENANT_ADMIN_USER'..." # No example organization — tenants are created via the vendor console.
TENANT_USER_ID=$(api_get "/api/users?search=$TENANT_ADMIN_USER" | jq -r ".[] | select(.username == \"$TENANT_ADMIN_USER\") | .id") # No server OIDC config — each provisioned server gets OIDC from env vars.
if [ -n "$TENANT_USER_ID" ]; then
log "Viewer user exists: $TENANT_USER_ID"
else
log "Creating viewer user '$TENANT_ADMIN_USER'..."
TENANT_RESPONSE=$(api_post "/api/users" "{
\"username\": \"$TENANT_ADMIN_USER\",
\"password\": \"$TENANT_ADMIN_PASS\",
\"name\": \"Viewer\"
}")
TENANT_USER_ID=$(echo "$TENANT_RESPONSE" | jq -r '.id')
log "Created viewer user: $TENANT_USER_ID"
fi
# ============================================================
# PHASE 6: Create organization + add users
# ============================================================
# No example organization created — the vendor creates tenants via the SaaS UI.
# Users (admin, viewer) are created above but not added to any org.
ORG_ID="" ORG_ID=""
log "Skipping example organization (tenants are created by the vendor)."
# ============================================================
# PHASE 7: Configure cameleer3-server OIDC
# ============================================================
SERVER_HEALTHY="no"
for i in 1 2 3; do
if curl -sf "${SERVER_ENDPOINT}/api/v1/health" >/dev/null 2>&1; then
SERVER_HEALTHY="yes"
break
fi
sleep 2
done
log "Phase 7 check: SERVER_HEALTHY=$SERVER_HEALTHY, TRAD_SECRET length=${#TRAD_SECRET}"
if [ "$SERVER_HEALTHY" = "yes" ] && [ -n "$TRAD_SECRET" ]; then
log "Configuring cameleer3-server OIDC..."
# Login to server as admin
SERVER_TOKEN_RESPONSE=$(curl -s -X POST "${SERVER_ENDPOINT}/api/v1/auth/login" \
-H "Content-Type: application/json" \
-d "{\"username\": \"$SERVER_UI_USER\", \"password\": \"$SERVER_UI_PASS\"}")
SERVER_TOKEN=$(echo "$SERVER_TOKEN_RESPONSE" | jq -r '.accessToken' 2>/dev/null)
if [ -n "$SERVER_TOKEN" ] && [ "$SERVER_TOKEN" != "null" ]; then
# Configure OIDC
OIDC_RESPONSE=$(curl -s -X PUT "${SERVER_ENDPOINT}/api/v1/admin/oidc" \
-H "Authorization: Bearer $SERVER_TOKEN" \
-H "Content-Type: application/json" \
-d "{
\"enabled\": true,
\"issuerUri\": \"$LOGTO_PUBLIC_ENDPOINT/oidc\",
\"clientId\": \"$TRAD_ID\",
\"clientSecret\": \"$TRAD_SECRET\",
\"autoSignup\": true,
\"defaultRoles\": [\"VIEWER\"],
\"displayNameClaim\": \"name\",
\"rolesClaim\": \"roles\",
\"audience\": \"$API_RESOURCE_INDICATOR\",
\"additionalScopes\": []
}")
log "OIDC config response: $(echo "$OIDC_RESPONSE" | head -c 200)"
log "cameleer3-server OIDC configured."
# Seed claim mapping rules (roles → server RBAC)
log "Seeding claim mapping rules..."
EXISTING_MAPPINGS=$(curl -s -H "Authorization: Bearer $SERVER_TOKEN" \
"${SERVER_ENDPOINT}/api/v1/admin/claim-mappings" 2>/dev/null || echo "[]")
seed_claim_mapping() {
local match_value="$1"
local target="$2"
local priority="$3"
local exists=$(echo "$EXISTING_MAPPINGS" | jq -r ".[] | select(.matchValue == \"$match_value\") | .id")
if [ -n "$exists" ]; then
log " Claim mapping '$match_value' → $target exists"
else
local resp=$(curl -s -X POST "${SERVER_ENDPOINT}/api/v1/admin/claim-mappings" \
-H "Authorization: Bearer $SERVER_TOKEN" \
-H "Content-Type: application/json" \
-d "{\"claim\":\"roles\",\"matchType\":\"contains\",\"matchValue\":\"$match_value\",\"action\":\"assignRole\",\"target\":\"$target\",\"priority\":$priority}")
log " Created claim mapping '$match_value' → $target"
fi
}
seed_claim_mapping "server:admin" "ADMIN" 10
seed_claim_mapping "server:operator" "OPERATOR" 20
log "Claim mapping rules seeded."
else
log "WARNING: Could not login to cameleer3-server — skipping OIDC config"
fi
else
log "WARNING: cameleer3-server not available or no Traditional app secret — skipping OIDC config"
fi
# ============================================================ # ============================================================
# PHASE 7b: Configure Logto Custom JWT for access tokens # PHASE 7b: Configure Logto Custom JWT for access tokens