Compare commits
169 Commits
feature/sa
...
24a443ef30
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
24a443ef30 | ||
|
|
d7eb700860 | ||
|
|
c1458e4995 | ||
|
|
b79a7fe405 | ||
|
|
6d6c1f3562 | ||
|
|
0e3f383cf4 | ||
|
|
cd6dd1e5af | ||
|
|
dfa2a6bfa2 | ||
|
|
a7196ff4c1 | ||
|
|
17c6723f7e | ||
|
|
91e93696ed | ||
|
|
57e41e407c | ||
|
|
bc46af5cea | ||
|
|
03fb414981 | ||
|
|
553ecc1490 | ||
|
|
dec1c53d30 | ||
|
|
ace6ad0cf2 | ||
|
|
4a67677158 | ||
|
|
27c3f4d136 | ||
|
|
fe6682e520 | ||
|
|
012c866594 | ||
|
|
4e553a6c42 | ||
|
|
f254f2700f | ||
|
|
17d8d98d5f | ||
|
|
bfb26d9aa5 | ||
|
|
cd4266ffc6 | ||
|
|
74a1e02cb8 | ||
|
|
b3a19098c5 | ||
|
|
6b1dcba876 | ||
|
|
38125f9ecc | ||
|
|
6b95cf78ea | ||
|
|
b70d95cbb9 | ||
|
|
8b9045b0e2 | ||
|
|
4fe642b91d | ||
|
|
7e13b4ee5d | ||
|
|
85eabd86ef | ||
|
|
b44f6338f8 | ||
|
|
4ff04c386e | ||
|
|
b38f02eae3 | ||
|
|
8c504b714d | ||
|
|
83801d2499 | ||
|
|
9042356e81 | ||
|
|
f97e951d87 | ||
|
|
fa6bca0add | ||
|
|
11dd6a354f | ||
|
|
7f15177310 | ||
|
|
b01f6e5109 | ||
|
|
8146f072df | ||
|
|
f13fd3faf0 | ||
|
|
5e5bc97bf5 | ||
|
|
7fc80cad58 | ||
|
|
6eabd0cf2e | ||
|
|
4debee966a | ||
|
|
1e348eb8ca | ||
|
|
f136502a35 | ||
|
|
bf367b1db7 | ||
|
|
f5165add13 | ||
|
|
ec38d0b1c2 | ||
|
|
6cd82de5f9 | ||
|
|
0a0898b2f7 | ||
|
|
6864081550 | ||
|
|
fe5838b40f | ||
|
|
1b57f03973 | ||
|
|
0a06615ae2 | ||
|
|
16a2ff3174 | ||
|
|
c2ccf9d233 | ||
|
|
06c85edd8e | ||
|
|
9514ab69c8 | ||
|
|
d3a9be8f2e | ||
|
|
85e0d6156a | ||
|
|
96aa6579b0 | ||
|
|
da4a263cd7 | ||
|
|
879accfc7f | ||
|
|
35a62463b3 | ||
|
|
92503a1061 | ||
|
|
95a92ae9e5 | ||
|
|
5aa8586940 | ||
|
|
776a01d87b | ||
|
|
0b736a92f9 | ||
|
|
df90814cc3 | ||
|
|
8cf44f6e2c | ||
|
|
5e69628a51 | ||
|
|
9163f919c8 | ||
|
|
3b8b76d53e | ||
|
|
e5523c969e | ||
|
|
e2e5c794a2 | ||
|
|
d5eead888d | ||
|
|
4121bd64b2 | ||
|
|
dd8553a8b4 | ||
|
|
3284304c1f | ||
|
|
6f8b84fb1a | ||
|
|
d2caa737b9 | ||
|
|
875b07fb3a | ||
|
|
4fdf171912 | ||
|
|
2239d3d980 | ||
|
|
8eef7e170b | ||
|
|
d7ce0aaf8c | ||
|
|
a0c12b8ee6 | ||
|
|
a5445e332e | ||
|
|
cab6e409b9 | ||
|
|
0fe084bcb2 | ||
|
|
3ae8fa18cd | ||
|
|
82f62ca0ff | ||
|
|
dd30ee77d4 | ||
|
|
a3a6f99958 | ||
|
|
22752ffcb1 | ||
|
|
a48c4bfd08 | ||
|
|
45bcc954ac | ||
|
|
51a1aef10e | ||
|
|
2607ef5dbe | ||
|
|
0a1e848ef7 | ||
|
|
6dc5e558a3 | ||
|
|
a3a1643b37 | ||
|
|
4447d79c92 | ||
|
|
7e7a07470b | ||
|
|
252c18bcff | ||
|
|
269c679e9c | ||
|
|
e559267f1e | ||
|
|
4341656a5e | ||
|
|
2cda065c06 | ||
|
|
bcad83cc40 | ||
|
|
0d47c2ec7c | ||
|
|
247ec030e5 | ||
|
|
a1acc0bc62 | ||
|
|
8b94937d38 | ||
|
|
1750fe64a2 | ||
|
|
4572a4bb57 | ||
|
|
9824d06824 | ||
|
|
e24c6da025 | ||
|
|
6bdcbf840b | ||
|
|
4699db5465 | ||
|
|
d911fd2201 | ||
|
|
b4f9277220 | ||
|
|
eaf109549d | ||
|
|
3a6b94c1eb | ||
|
|
b727bc771d | ||
|
|
7ee2985626 | ||
|
|
3efae43879 | ||
|
|
aa663a9c9e | ||
|
|
f5ef8e6488 | ||
|
|
0a43a7dcd1 | ||
|
|
3b345881c6 | ||
|
|
2dc75c4361 | ||
|
|
b7a0530466 | ||
|
|
ebdb4f9450 | ||
|
|
5ed33807d8 | ||
|
|
00476c974f | ||
|
|
c674785c82 | ||
|
|
4087ce8f29 | ||
|
|
39c3b39711 | ||
|
|
cdd495d985 | ||
|
|
17fbe73e60 | ||
|
|
faac0048c3 | ||
|
|
e6f2f17fa1 | ||
|
|
28d044efbc | ||
|
|
6a81053d37 | ||
|
|
fd41a056eb | ||
|
|
9ecaf22f09 | ||
|
|
d2f6b02a5f | ||
|
|
bf3aa57274 | ||
|
|
e56e3fca8a | ||
|
|
127834ce4d | ||
|
|
6bdb02ff5a | ||
|
|
96a5b1d9f1 | ||
|
|
771e9d1081 | ||
|
|
ebba021448 | ||
|
|
81d570fd63 | ||
|
|
7b92de4017 | ||
| 0ba896ada4 |
57
.env.example
57
.env.example
@@ -1,32 +1,47 @@
|
||||
# Cameleer SaaS Environment Variables
|
||||
# Copy to .env and fill in values
|
||||
# Cameleer SaaS — Environment Configuration
|
||||
# Copy to .env and fill in values for production
|
||||
|
||||
# Application version
|
||||
# Image version
|
||||
VERSION=latest
|
||||
|
||||
# Public access
|
||||
PUBLIC_HOST=localhost
|
||||
PUBLIC_PROTOCOL=https
|
||||
|
||||
# Ports
|
||||
HTTP_PORT=80
|
||||
HTTPS_PORT=443
|
||||
LOGTO_CONSOLE_PORT=3002
|
||||
|
||||
# PostgreSQL
|
||||
POSTGRES_USER=cameleer
|
||||
POSTGRES_PASSWORD=change_me_in_production
|
||||
POSTGRES_DB=cameleer_saas
|
||||
|
||||
# Logto Identity Provider
|
||||
LOGTO_ENDPOINT=http://logto:3001
|
||||
LOGTO_PUBLIC_ENDPOINT=http://localhost:3001
|
||||
LOGTO_ISSUER_URI=http://localhost:3001/oidc
|
||||
LOGTO_JWK_SET_URI=http://logto:3001/oidc/jwks
|
||||
LOGTO_DB_PASSWORD=change_me_in_production
|
||||
LOGTO_M2M_CLIENT_ID=
|
||||
LOGTO_M2M_CLIENT_SECRET=
|
||||
LOGTO_SPA_CLIENT_ID=
|
||||
# ClickHouse
|
||||
CLICKHOUSE_PASSWORD=change_me_in_production
|
||||
|
||||
# Ed25519 Keys (mount PEM files)
|
||||
CAMELEER_JWT_PRIVATE_KEY_PATH=/etc/cameleer/keys/ed25519.key
|
||||
CAMELEER_JWT_PUBLIC_KEY_PATH=/etc/cameleer/keys/ed25519.pub
|
||||
# Admin user (created by bootstrap)
|
||||
SAAS_ADMIN_USER=admin
|
||||
SAAS_ADMIN_PASS=change_me_in_production
|
||||
|
||||
# Domain (for Traefik TLS)
|
||||
DOMAIN=localhost
|
||||
# TLS (leave empty for self-signed)
|
||||
# NODE_TLS_REJECT=0 # Set to 1 when using real certificates
|
||||
# CERT_FILE=
|
||||
# KEY_FILE=
|
||||
# CA_FILE=
|
||||
|
||||
CAMELEER_AUTH_TOKEN=change_me_bootstrap_token
|
||||
CAMELEER_CONTAINER_MEMORY_LIMIT=512m
|
||||
CAMELEER_CONTAINER_CPU_SHARES=512
|
||||
CAMELEER_TENANT_SLUG=default
|
||||
# Vendor account (optional)
|
||||
VENDOR_SEED_ENABLED=false
|
||||
# VENDOR_USER=vendor
|
||||
# VENDOR_PASS=change_me
|
||||
|
||||
# Docker socket GID (run: stat -c '%g' /var/run/docker.sock)
|
||||
# DOCKER_GID=0
|
||||
|
||||
# Docker images (override for custom registries)
|
||||
# TRAEFIK_IMAGE=gitea.siegeln.net/cameleer/cameleer-traefik
|
||||
# POSTGRES_IMAGE=gitea.siegeln.net/cameleer/cameleer-postgres
|
||||
# CLICKHOUSE_IMAGE=gitea.siegeln.net/cameleer/cameleer-clickhouse
|
||||
# LOGTO_IMAGE=gitea.siegeln.net/cameleer/cameleer-logto
|
||||
# CAMELEER_IMAGE=gitea.siegeln.net/cameleer/cameleer-saas
|
||||
|
||||
@@ -40,7 +40,7 @@ jobs:
|
||||
- name: Build and Test (unit tests only)
|
||||
run: >-
|
||||
mvn clean verify -B
|
||||
-Dsurefire.excludes="**/AuthControllerTest.java,**/TenantControllerTest.java,**/LicenseControllerTest.java,**/AuditRepositoryTest.java,**/CameleerSaasApplicationTest.java,**/EnvironmentControllerTest.java,**/AppControllerTest.java,**/DeploymentControllerTest.java,**/AgentStatusControllerTest.java"
|
||||
-Dsurefire.excludes="**/AuthControllerTest.java,**/TenantControllerTest.java,**/LicenseControllerTest.java,**/AuditRepositoryTest.java,**/CameleerSaasApplicationTest.java,**/EnvironmentControllerTest.java,**/AppControllerTest.java,**/DeploymentControllerTest.java,**/AgentStatusControllerTest.java,**/VendorTenantControllerTest.java,**/TenantPortalControllerTest.java"
|
||||
|
||||
- name: Build sign-in UI
|
||||
run: |
|
||||
@@ -139,6 +139,39 @@ jobs:
|
||||
--cache-from type=registry,ref=gitea.siegeln.net/cameleer/cameleer-logto:buildcache \
|
||||
--cache-to type=registry,ref=gitea.siegeln.net/cameleer/cameleer-logto:buildcache,mode=max \
|
||||
--provenance=false \
|
||||
--push ui/sign-in/
|
||||
--push .
|
||||
env:
|
||||
REGISTRY_TOKEN: ${{ secrets.REGISTRY_TOKEN }}
|
||||
|
||||
- name: Build and push PostgreSQL image
|
||||
run: |
|
||||
TAGS="-t gitea.siegeln.net/cameleer/cameleer-postgres:${{ github.sha }}"
|
||||
for TAG in $IMAGE_TAGS; do
|
||||
TAGS="$TAGS -t gitea.siegeln.net/cameleer/cameleer-postgres:$TAG"
|
||||
done
|
||||
docker buildx build --platform linux/amd64 \
|
||||
$TAGS \
|
||||
--provenance=false \
|
||||
--push docker/cameleer-postgres/
|
||||
|
||||
- name: Build and push ClickHouse image
|
||||
run: |
|
||||
TAGS="-t gitea.siegeln.net/cameleer/cameleer-clickhouse:${{ github.sha }}"
|
||||
for TAG in $IMAGE_TAGS; do
|
||||
TAGS="$TAGS -t gitea.siegeln.net/cameleer/cameleer-clickhouse:$TAG"
|
||||
done
|
||||
docker buildx build --platform linux/amd64 \
|
||||
$TAGS \
|
||||
--provenance=false \
|
||||
--push docker/cameleer-clickhouse/
|
||||
|
||||
- name: Build and push Traefik image
|
||||
run: |
|
||||
TAGS="-t gitea.siegeln.net/cameleer/cameleer-traefik:${{ github.sha }}"
|
||||
for TAG in $IMAGE_TAGS; do
|
||||
TAGS="$TAGS -t gitea.siegeln.net/cameleer/cameleer-traefik:$TAG"
|
||||
done
|
||||
docker buildx build --platform linux/amd64 \
|
||||
$TAGS \
|
||||
--provenance=false \
|
||||
--push docker/cameleer-traefik/
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -25,3 +25,4 @@ Thumbs.db
|
||||
# Generated by postinstall from @cameleer/design-system
|
||||
ui/public/favicon.svg
|
||||
docker/runtime-base/agent.jar
|
||||
.gitnexus
|
||||
|
||||
101
AGENTS.md
Normal file
101
AGENTS.md
Normal file
@@ -0,0 +1,101 @@
|
||||
<!-- gitnexus:start -->
|
||||
# GitNexus — Code Intelligence
|
||||
|
||||
This project is indexed by GitNexus as **cameleer-saas** (2676 symbols, 5768 relationships, 224 execution flows). Use the GitNexus MCP tools to understand code, assess impact, and navigate safely.
|
||||
|
||||
> If any GitNexus tool warns the index is stale, run `npx gitnexus analyze` in terminal first.
|
||||
|
||||
## Always Do
|
||||
|
||||
- **MUST run impact analysis before editing any symbol.** Before modifying a function, class, or method, run `gitnexus_impact({target: "symbolName", direction: "upstream"})` and report the blast radius (direct callers, affected processes, risk level) to the user.
|
||||
- **MUST run `gitnexus_detect_changes()` before committing** to verify your changes only affect expected symbols and execution flows.
|
||||
- **MUST warn the user** if impact analysis returns HIGH or CRITICAL risk before proceeding with edits.
|
||||
- When exploring unfamiliar code, use `gitnexus_query({query: "concept"})` to find execution flows instead of grepping. It returns process-grouped results ranked by relevance.
|
||||
- When you need full context on a specific symbol — callers, callees, which execution flows it participates in — use `gitnexus_context({name: "symbolName"})`.
|
||||
|
||||
## When Debugging
|
||||
|
||||
1. `gitnexus_query({query: "<error or symptom>"})` — find execution flows related to the issue
|
||||
2. `gitnexus_context({name: "<suspect function>"})` — see all callers, callees, and process participation
|
||||
3. `READ gitnexus://repo/cameleer-saas/process/{processName}` — trace the full execution flow step by step
|
||||
4. For regressions: `gitnexus_detect_changes({scope: "compare", base_ref: "main"})` — see what your branch changed
|
||||
|
||||
## When Refactoring
|
||||
|
||||
- **Renaming**: MUST use `gitnexus_rename({symbol_name: "old", new_name: "new", dry_run: true})` first. Review the preview — graph edits are safe, text_search edits need manual review. Then run with `dry_run: false`.
|
||||
- **Extracting/Splitting**: MUST run `gitnexus_context({name: "target"})` to see all incoming/outgoing refs, then `gitnexus_impact({target: "target", direction: "upstream"})` to find all external callers before moving code.
|
||||
- After any refactor: run `gitnexus_detect_changes({scope: "all"})` to verify only expected files changed.
|
||||
|
||||
## Never Do
|
||||
|
||||
- NEVER edit a function, class, or method without first running `gitnexus_impact` on it.
|
||||
- NEVER ignore HIGH or CRITICAL risk warnings from impact analysis.
|
||||
- NEVER rename symbols with find-and-replace — use `gitnexus_rename` which understands the call graph.
|
||||
- NEVER commit changes without running `gitnexus_detect_changes()` to check affected scope.
|
||||
|
||||
## Tools Quick Reference
|
||||
|
||||
| Tool | When to use | Command |
|
||||
|------|-------------|---------|
|
||||
| `query` | Find code by concept | `gitnexus_query({query: "auth validation"})` |
|
||||
| `context` | 360-degree view of one symbol | `gitnexus_context({name: "validateUser"})` |
|
||||
| `impact` | Blast radius before editing | `gitnexus_impact({target: "X", direction: "upstream"})` |
|
||||
| `detect_changes` | Pre-commit scope check | `gitnexus_detect_changes({scope: "staged"})` |
|
||||
| `rename` | Safe multi-file rename | `gitnexus_rename({symbol_name: "old", new_name: "new", dry_run: true})` |
|
||||
| `cypher` | Custom graph queries | `gitnexus_cypher({query: "MATCH ..."})` |
|
||||
|
||||
## Impact Risk Levels
|
||||
|
||||
| Depth | Meaning | Action |
|
||||
|-------|---------|--------|
|
||||
| d=1 | WILL BREAK — direct callers/importers | MUST update these |
|
||||
| d=2 | LIKELY AFFECTED — indirect deps | Should test |
|
||||
| d=3 | MAY NEED TESTING — transitive | Test if critical path |
|
||||
|
||||
## Resources
|
||||
|
||||
| Resource | Use for |
|
||||
|----------|---------|
|
||||
| `gitnexus://repo/cameleer-saas/context` | Codebase overview, check index freshness |
|
||||
| `gitnexus://repo/cameleer-saas/clusters` | All functional areas |
|
||||
| `gitnexus://repo/cameleer-saas/processes` | All execution flows |
|
||||
| `gitnexus://repo/cameleer-saas/process/{name}` | Step-by-step execution trace |
|
||||
|
||||
## Self-Check Before Finishing
|
||||
|
||||
Before completing any code modification task, verify:
|
||||
1. `gitnexus_impact` was run for all modified symbols
|
||||
2. No HIGH/CRITICAL risk warnings were ignored
|
||||
3. `gitnexus_detect_changes()` confirms changes match expected scope
|
||||
4. All d=1 (WILL BREAK) dependents were updated
|
||||
|
||||
## Keeping the Index Fresh
|
||||
|
||||
After committing code changes, the GitNexus index becomes stale. Re-run analyze to update it:
|
||||
|
||||
```bash
|
||||
npx gitnexus analyze
|
||||
```
|
||||
|
||||
If the index previously included embeddings, preserve them by adding `--embeddings`:
|
||||
|
||||
```bash
|
||||
npx gitnexus analyze --embeddings
|
||||
```
|
||||
|
||||
To check whether embeddings exist, inspect `.gitnexus/meta.json` — the `stats.embeddings` field shows the count (0 means no embeddings). **Running analyze without `--embeddings` will delete any previously generated embeddings.**
|
||||
|
||||
> Claude Code users: A PostToolUse hook handles this automatically after `git commit` and `git merge`.
|
||||
|
||||
## CLI
|
||||
|
||||
| Task | Read this skill file |
|
||||
|------|---------------------|
|
||||
| Understand architecture / "How does X work?" | `.claude/skills/gitnexus/gitnexus-exploring/SKILL.md` |
|
||||
| Blast radius / "What breaks if I change X?" | `.claude/skills/gitnexus/gitnexus-impact-analysis/SKILL.md` |
|
||||
| Trace bugs / "Why is X failing?" | `.claude/skills/gitnexus/gitnexus-debugging/SKILL.md` |
|
||||
| Rename / extract / split / refactor | `.claude/skills/gitnexus/gitnexus-refactoring/SKILL.md` |
|
||||
| Tools, resources, schema reference | `.claude/skills/gitnexus/gitnexus-guide/SKILL.md` |
|
||||
| Index, status, clean, wiki CLI commands | `.claude/skills/gitnexus/gitnexus-cli/SKILL.md` |
|
||||
|
||||
<!-- gitnexus:end -->
|
||||
347
CLAUDE.md
347
CLAUDE.md
@@ -4,7 +4,7 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co
|
||||
|
||||
## Project
|
||||
|
||||
Cameleer SaaS — multi-tenant SaaS platform wrapping the Cameleer observability stack (Java agent + server) for Apache Camel applications. Customers get managed observability for their Camel integrations without running infrastructure.
|
||||
Cameleer SaaS — **vendor management plane** for the Cameleer observability stack. Two personas: **vendor** (platform:admin) manages the platform and provisions tenants; **tenant admin** (tenant:manage) manages their observability instance. The vendor creates tenants, which provisions per-tenant cameleer3-server + UI instances via Docker API. No example tenant — clean slate bootstrap, vendor creates everything.
|
||||
|
||||
## Ecosystem
|
||||
|
||||
@@ -29,10 +29,37 @@ Agent-server protocol is defined in `cameleer3/cameleer3-common/PROTOCOL.md`. Th
|
||||
- `PublicConfigController.java` — GET /api/config (Logto endpoint, SPA client ID, scopes)
|
||||
- `MeController.java` — GET /api/me (authenticated user, tenant list)
|
||||
|
||||
**tenant/** — Tenant lifecycle
|
||||
**tenant/** — Tenant data model
|
||||
- `TenantEntity.java` — JPA entity (id, name, slug, tier, status, logto_org_id, stripe IDs, settings JSONB)
|
||||
- `TenantService.java` — create tenant -> Logto org, activate, suspend
|
||||
- `TenantController.java` — POST create, GET list, GET by ID
|
||||
|
||||
**vendor/** — Vendor console (platform:admin only)
|
||||
- `VendorTenantService.java` — orchestrates tenant creation (sync: DB + Logto + license, async: Docker provisioning + config push), suspend/activate, delete, restart server, upgrade server (force-pull + re-provision), license renewal
|
||||
- `VendorTenantController.java` — REST at `/api/vendor/tenants` (platform:admin required). List endpoint returns `VendorTenantSummary` with fleet health data (agentCount, environmentCount, agentLimit) fetched in parallel via `CompletableFuture`.
|
||||
- `InfrastructureService.java` — raw JDBC queries against shared PostgreSQL and ClickHouse for per-tenant infrastructure monitoring (schema sizes, table stats, row counts, disk usage)
|
||||
- `InfrastructureController.java` — REST at `/api/vendor/infrastructure` (platform:admin required). PostgreSQL and ClickHouse overview with per-tenant breakdown.
|
||||
|
||||
**portal/** — Tenant admin portal (org-scoped)
|
||||
- `TenantPortalService.java` — customer-facing: dashboard (health + agent/env counts from server via M2M), license, SSO connectors, team, settings (public endpoint URL), server restart/upgrade, password management (own + team + server admin)
|
||||
- `TenantPortalController.java` — REST at `/api/tenant/*` (org-scoped, includes CA cert management at `/api/tenant/ca`, password endpoints at `/api/tenant/password` and `/api/tenant/server/admin-password`)
|
||||
|
||||
**provisioning/** — Pluggable tenant provisioning
|
||||
- `TenantProvisioner.java` — pluggable interface (like server's RuntimeOrchestrator)
|
||||
- `DockerTenantProvisioner.java` — Docker implementation, creates per-tenant server + UI containers. `upgrade(slug)` force-pulls latest images and removes server+UI containers (preserves app containers, volumes, networks) for re-provisioning. `remove(slug)` does full cleanup: label-based container removal, env networks, tenant network, JAR volume.
|
||||
- `TenantDataCleanupService.java` — GDPR data erasure on tenant delete: drops PostgreSQL `tenant_{slug}` schema, deletes ClickHouse data across all tables with `tenant_id` column
|
||||
- `TenantProvisionerAutoConfig.java` — auto-detects Docker socket
|
||||
- `DockerCertificateManager.java` — file-based cert management with atomic `.wip` swap (Docker volume)
|
||||
- `DisabledCertificateManager.java` — no-op when certs dir unavailable
|
||||
- `CertificateManagerAutoConfig.java` — auto-detects `/certs` directory
|
||||
|
||||
**certificate/** — TLS certificate lifecycle management
|
||||
- `CertificateManager.java` — provider interface (Docker now, K8s later)
|
||||
- `CertificateService.java` — orchestrates stage/activate/restore/discard, DB metadata, tenant CA staleness
|
||||
- `CertificateController.java` — REST at `/api/vendor/certificates` (platform:admin required)
|
||||
- `CertificateEntity.java` — JPA entity (status: ACTIVE/STAGED/ARCHIVED, subject, fingerprint, etc.)
|
||||
- `CertificateStartupListener.java` — seeds DB from filesystem on boot (for bootstrap-generated certs)
|
||||
- `TenantCaCertEntity.java` — JPA entity for per-tenant CA certs (PEM stored in DB, multiple per tenant)
|
||||
- `TenantCaCertRepository.java` — queries by tenant, status, all active across tenants
|
||||
- `TenantCaCertService.java` — stage/activate/delete tenant CAs, rebuilds aggregated `ca.pem` on changes
|
||||
|
||||
**license/** — License management
|
||||
- `LicenseEntity.java` — JPA entity (id, tenant_id, tier, features JSONB, limits JSONB, expires_at)
|
||||
@@ -41,25 +68,26 @@ Agent-server protocol is defined in `cameleer3/cameleer3-common/PROTOCOL.md`. Th
|
||||
|
||||
**identity/** — Logto & server integration
|
||||
- `LogtoConfig.java` — Logto endpoint, M2M credentials (reads from bootstrap file)
|
||||
- `LogtoManagementClient.java` — Logto Management API calls (create org, create user, add to org)
|
||||
- `ServerApiClient.java` — M2M client for cameleer3-server API (Logto M2M token, `X-Cameleer-Protocol-Version: 1` header)
|
||||
- `LogtoManagementClient.java` — Logto Management API calls (create org, create user, add to org, get user, SSO connectors, JIT provisioning, password updates via `PATCH /api/users/{id}/password`)
|
||||
- `ServerApiClient.java` — M2M client for cameleer3-server API (Logto M2M token, `X-Cameleer-Protocol-Version: 1` header). Health checks, license/OIDC push, agent count, environment count, server admin password reset per tenant server.
|
||||
|
||||
**audit/** — Audit logging
|
||||
- `AuditEntity.java` — JPA entity (actor_id, tenant_id, action, resource, status)
|
||||
- `AuditService.java` — log audit events (TENANT_CREATE, TENANT_UPDATE, etc.)
|
||||
- `AuditEntity.java` — JPA entity (actor_id, actor_email, tenant_id, action, resource, status)
|
||||
- `AuditService.java` — log audit events (TENANT_CREATE, TENANT_UPDATE, etc.); auto-resolves actor name from Logto when actorEmail is null (cached in-memory)
|
||||
|
||||
### React Frontend (`ui/src/`)
|
||||
|
||||
- `main.tsx` — React 19 root
|
||||
- `router.tsx` — /login, /callback, / -> OrgResolver -> Layout -> pages
|
||||
- `router.tsx` — `/vendor/*` + `/tenant/*` with `RequireScope` guards and `LandingRedirect` that waits for scopes
|
||||
- `Layout.tsx` — persona-aware sidebar: vendor sees expandable "Vendor" section (Tenants, Audit Log, Certificates, Infrastructure, Identity/Logto), tenant admin sees Dashboard/License/SSO/Team/Audit/Settings
|
||||
- `OrgResolver.tsx` — merges global + org-scoped token scopes (vendor's platform:admin is global)
|
||||
- `config.ts` — fetch Logto config from /platform/api/config
|
||||
- `auth/useAuth.ts` — auth hook (isAuthenticated, logout, signIn)
|
||||
- `auth/useOrganization.ts` — Zustand store for current tenant
|
||||
- `auth/useScopes.ts` — decode JWT scopes, hasScope()
|
||||
- `auth/ProtectedRoute.tsx` — guard (redirects to /login)
|
||||
- `pages/DashboardPage.tsx` — tenant dashboard
|
||||
- `pages/LicensePage.tsx` — license info
|
||||
- `pages/AdminTenantsPage.tsx` — platform admin tenant management
|
||||
- **Vendor pages**: `VendorTenantsPage.tsx`, `CreateTenantPage.tsx`, `TenantDetailPage.tsx`, `VendorAuditPage.tsx`, `CertificatesPage.tsx`
|
||||
- **Tenant pages**: `TenantDashboardPage.tsx` (restart + upgrade server), `TenantLicensePage.tsx`, `SsoPage.tsx`, `TeamPage.tsx` (reset member passwords), `TenantAuditPage.tsx`, `SettingsPage.tsx` (change own password, reset server admin password)
|
||||
|
||||
### Custom Sign-in UI (`ui/sign-in/src/`)
|
||||
|
||||
@@ -68,40 +96,47 @@ Agent-server protocol is defined in `cameleer3/cameleer3-common/PROTOCOL.md`. Th
|
||||
|
||||
## Architecture Context
|
||||
|
||||
The existing cameleer3-server already has single-tenant auth (JWT, RBAC, bootstrap tokens, OIDC). The SaaS layer must:
|
||||
- Add multi-tenancy (tenant isolation of agent data, diagrams, configs)
|
||||
- Provide self-service signup, billing, and team management
|
||||
- Generate per-tenant bootstrap tokens for agent registration
|
||||
- Proxy or federate access to tenant-specific cameleer3-server instances
|
||||
- Enforce usage quotas and metered billing
|
||||
The SaaS platform is a **vendor management plane**. It does not proxy requests to servers — instead it provisions dedicated per-tenant cameleer3-server instances via Docker API. Each tenant gets isolated server + UI containers with their own database schemas, networks, and Traefik routing.
|
||||
|
||||
### Routing (single-domain, path-based via Traefik)
|
||||
|
||||
All services on one hostname. Two env vars control everything: `PUBLIC_HOST` + `PUBLIC_PROTOCOL`.
|
||||
All services on one hostname. Infrastructure containers (Traefik, Logto) use `PUBLIC_HOST` + `PUBLIC_PROTOCOL` env vars directly. The SaaS app reads these via `CAMELEER_SAAS_PROVISIONING_PUBLICHOST` / `CAMELEER_SAAS_PROVISIONING_PUBLICPROTOCOL` (Spring Boot properties `cameleer.saas.provisioning.publichost` / `cameleer.saas.provisioning.publicprotocol`).
|
||||
|
||||
| Path | Target | Notes |
|
||||
|------|--------|-------|
|
||||
| `/platform/*` | cameleer-saas:8080 | SPA + API (`server.servlet.context-path: /platform`) |
|
||||
| `/server/*` | cameleer3-server-ui:80 | Server dashboard (strip-prefix + `BASE_PATH=/server`) |
|
||||
| `/platform/vendor/*` | (SPA routes) | Vendor console (platform:admin) |
|
||||
| `/platform/tenant/*` | (SPA routes) | Tenant admin portal (org-scoped) |
|
||||
| `/t/{slug}/*` | per-tenant server-ui | Provisioned tenant UI containers (Traefik labels) |
|
||||
| `/` | redirect -> `/platform/` | Via `docker/traefik-dynamic.yml` |
|
||||
| `/*` (catch-all) | cameleer-logto:3001 (priority=1) | Custom sign-in UI, OIDC, interaction |
|
||||
|
||||
- SPA assets at `/_app/` (Vite `assetsDir: '_app'`) to avoid conflict with Logto's `/assets/`
|
||||
- Logto `ENDPOINT` = `${PUBLIC_PROTOCOL}://${PUBLIC_HOST}` (same domain, same origin)
|
||||
- TLS: self-signed cert init container (`traefik-certs`) for dev, ACME for production
|
||||
- TLS: `traefik-certs` init container generates self-signed cert (dev) or copies user-supplied cert via `CERT_FILE`/`KEY_FILE`/`CA_FILE` env vars. Default cert configured in `docker/traefik-dynamic.yml` (NOT static `traefik.yml` — Traefik v3 ignores `tls.stores.default` in static config). Runtime cert replacement via vendor UI (stage/activate/restore). ACME for production (future). Server containers import `/certs/ca.pem` into JVM truststore at startup via `docker-entrypoint.sh` for OIDC trust.
|
||||
- Root `/` -> `/platform/` redirect via Traefik file provider (`docker/traefik-dynamic.yml`)
|
||||
- LoginPage auto-redirects to Logto OIDC (no intermediate button)
|
||||
- Per-tenant server containers get Traefik labels for `/t/{slug}/*` routing at provisioning time
|
||||
|
||||
### Docker Networks
|
||||
|
||||
Two networks in docker-compose.yml:
|
||||
Compose-defined networks:
|
||||
|
||||
| Network | Name on Host | Purpose |
|
||||
|---------|-------------|---------|
|
||||
| `cameleer` | `cameleer-saas_cameleer` | Compose default — all services (DB, Logto, SaaS, server) |
|
||||
| `cameleer-traefik` | `cameleer-traefik` (fixed `name:`) | Traefik + server + deployed app containers |
|
||||
| `cameleer` | `cameleer-saas_cameleer` | Compose default — shared services (DB, Logto, SaaS) |
|
||||
| `cameleer-traefik` | `cameleer-traefik` (fixed `name:`) | Traefik + provisioned tenant containers |
|
||||
|
||||
The `cameleer-traefik` network uses `name: cameleer-traefik` (no compose project prefix) so `DockerNetworkManager.ensureNetwork("cameleer-traefik")` in the server finds it. The server joins with DNS alias `cameleer3-server`, matching `CAMELEER_SERVER_URL=http://cameleer3-server:8081`. Per-environment networks (`cameleer-env-{slug}`) are created dynamically by the server's `DockerNetworkManager`.
|
||||
Per-tenant networks (created dynamically by `DockerTenantProvisioner`):
|
||||
|
||||
| Network | Name Pattern | Purpose |
|
||||
|---------|-------------|---------|
|
||||
| Tenant network | `cameleer-tenant-{slug}` | Internal bridge, no internet — isolates tenant server + apps |
|
||||
| Environment network | `cameleer-env-{tenantId}-{envSlug}` | Tenant-scoped (includes tenantId to prevent slug collision across tenants) |
|
||||
|
||||
Server containers join three networks: tenant network (primary), shared services network (`cameleer`), and traefik network. Apps deployed by the server use the tenant network as primary.
|
||||
|
||||
**IMPORTANT:** Dynamically-created containers MUST have `traefik.docker.network=cameleer-traefik` label. Traefik's Docker provider defaults to `network: cameleer` (compose-internal name) for IP resolution, which doesn't match dynamically-created containers connected via Docker API using the host network name (`cameleer-saas_cameleer`). Without this label, Traefik returns 504 Gateway Timeout for `/t/{slug}/api/*` paths.
|
||||
|
||||
### Custom sign-in UI (`ui/sign-in/`)
|
||||
|
||||
@@ -119,32 +154,74 @@ Separate Vite+React SPA replacing Logto's default sign-in page. Visually matches
|
||||
- Tenant isolation enforced by `TenantIsolationInterceptor` (a single `HandlerInterceptor` on `/api/**` that resolves JWT org_id to TenantContext and validates `{tenantId}`, `{environmentId}`, `{appId}` path variables; fail-closed, platform admins bypass)
|
||||
- 13 OAuth2 scopes on the Logto API resource (`https://api.cameleer.local`): 10 platform scopes + 3 server scopes (`server:admin`, `server:operator`, `server:viewer`), served to the frontend from `GET /platform/api/config`
|
||||
- Server scopes map to server RBAC roles via JWT `scope` claim (SaaS platform path) or `roles` claim (server-ui OIDC login path)
|
||||
- 4-role model: `saas-vendor` (global, hosted only), org `owner` -> `server:admin`, org `operator` -> `server:operator`, org `viewer` -> `server:viewer`
|
||||
- `saas-vendor` global role injected via `docker/vendor-seed.sh` (not standard bootstrap) — has `platform:admin` + all tenant scopes
|
||||
- Org roles: `owner` -> `server:admin` + `tenant:manage`, `operator` -> `server:operator`, `viewer` -> `server:viewer`
|
||||
- `saas-vendor` global role created by bootstrap Phase 12 and always assigned to the admin user — has `platform:admin` + all tenant scopes
|
||||
- Custom `JwtDecoder` in `SecurityConfig.java` — ES384 algorithm, `at+jwt` token type, split issuer-uri (string validation) / jwk-set-uri (Docker-internal fetch), audience validation (`https://api.cameleer.local`)
|
||||
- Logto Custom JWT (Phase 7b in bootstrap) injects a `roles` claim into access tokens based on org roles and global roles — this makes role data available to the server without Logto-specific code
|
||||
|
||||
### Server integration (cameleer3-server env vars)
|
||||
### Auth routing by persona
|
||||
|
||||
| Persona | Logto role | Key scope | Landing route |
|
||||
|---------|-----------|-----------|---------------|
|
||||
| SaaS admin | `saas-vendor` (global) | `platform:admin` | `/vendor/tenants` |
|
||||
| Tenant admin | org `owner` | `tenant:manage` | `/tenant` (dashboard) |
|
||||
| Regular user (operator/viewer) | org member | `server:operator` or `server:viewer` | Redirected to server dashboard directly |
|
||||
|
||||
- `LandingRedirect` component waits for scopes to load, then routes to the correct persona landing page
|
||||
- `RequireScope` guard on route groups enforces scope requirements
|
||||
- SSO bridge: Logto session carries over to provisioned server's OIDC flow (Traditional Web App per tenant)
|
||||
|
||||
### Per-tenant server env vars (set by DockerTenantProvisioner)
|
||||
|
||||
These env vars are injected into provisioned per-tenant server containers:
|
||||
|
||||
| Env var | Value | Purpose |
|
||||
|---------|-------|---------|
|
||||
| `CAMELEER_OIDC_ISSUER_URI` | `${PUBLIC_PROTOCOL}://${PUBLIC_HOST}/oidc` | Token issuer claim validation |
|
||||
| `CAMELEER_OIDC_JWK_SET_URI` | `http://logto:3001/oidc/jwks` | Docker-internal JWK fetch |
|
||||
| `CAMELEER_OIDC_TLS_SKIP_VERIFY` | `true` | Skip cert verify for OIDC discovery (dev only — disable in production) |
|
||||
| `CAMELEER_CORS_ALLOWED_ORIGINS` | `${PUBLIC_PROTOCOL}://${PUBLIC_HOST}` | Allow browser requests through Traefik |
|
||||
| `BASE_PATH` (server-ui) | `/server` | React Router basename + `<base>` tag |
|
||||
| `CAMELEER_SERVER_SECURITY_OIDCISSUERURI` | `${PUBLIC_PROTOCOL}://${PUBLIC_HOST}/oidc` | Token issuer claim validation |
|
||||
| `CAMELEER_SERVER_SECURITY_OIDCJWKSETURI` | `http://cameleer-logto:3001/oidc/jwks` | Docker-internal JWK fetch |
|
||||
| `CAMELEER_SERVER_SECURITY_OIDCTLSSKIPVERIFY` | `true` (conditional) | Skip cert verify for OIDC discovery; only set when no `/certs/ca.pem` exists. When ca.pem exists, the server's `docker-entrypoint.sh` imports it into the JVM truststore instead. |
|
||||
| `CAMELEER_SERVER_SECURITY_OIDCAUDIENCE` | `https://api.cameleer.local` | JWT audience validation for OIDC tokens |
|
||||
| `CAMELEER_SERVER_SECURITY_CORSALLOWEDORIGINS` | `${PUBLIC_PROTOCOL}://${PUBLIC_HOST}` | Allow browser requests through Traefik |
|
||||
| `CAMELEER_SERVER_SECURITY_BOOTSTRAPTOKEN` | (generated) | Bootstrap auth token for M2M communication |
|
||||
| `CAMELEER_SERVER_RUNTIME_ENABLED` | `true` | Enable Docker orchestration |
|
||||
| `CAMELEER_SERVER_RUNTIME_SERVERURL` | `http://cameleer3-server-{slug}:8081` | Per-tenant server URL (DNS alias on tenant network) |
|
||||
| `CAMELEER_SERVER_RUNTIME_ROUTINGDOMAIN` | `${PUBLIC_HOST}` | Domain for Traefik routing labels |
|
||||
| `CAMELEER_SERVER_RUNTIME_ROUTINGMODE` | `path` | `path` or `subdomain` routing |
|
||||
| `CAMELEER_SERVER_RUNTIME_JARSTORAGEPATH` | `/data/jars` | Directory for uploaded JARs |
|
||||
| `CAMELEER_SERVER_RUNTIME_DOCKERNETWORK` | `cameleer-tenant-{slug}` | Primary network for deployed app containers |
|
||||
| `CAMELEER_SERVER_RUNTIME_JARDOCKERVOLUME` | `cameleer-jars-{slug}` | Docker volume name for JAR sharing between server and deployed containers |
|
||||
| `CAMELEER_SERVER_TENANT_ID` | (tenant UUID) | Tenant identifier for data isolation |
|
||||
| `CAMELEER_SERVER_SECURITY_INFRASTRUCTUREENDPOINTS` | `false` | Hides Database/ClickHouse admin from tenant admins |
|
||||
| `BASE_PATH` (server-ui) | `/t/{slug}` | React Router basename + `<base>` tag |
|
||||
| `CAMELEER_API_URL` (server-ui) | `http://cameleer-server-{slug}:8081` | Nginx upstream proxy target (NOT `API_URL` — image uses `${CAMELEER_API_URL}`) |
|
||||
|
||||
### Server runtime env vars (docker-compose.dev.yml)
|
||||
### Per-tenant volume mounts (set by DockerTenantProvisioner)
|
||||
|
||||
| Env var | Value | Purpose |
|
||||
|---------|-------|---------|
|
||||
| `CAMELEER_RUNTIME_ENABLED` | `true` | Enable Docker orchestration |
|
||||
| `CAMELEER_JAR_STORAGE_PATH` | `/data/jars` | Where JARs are stored inside server container |
|
||||
| `CAMELEER_RUNTIME_BASE_IMAGE` | `gitea.siegeln.net/cameleer/cameleer-runtime-base:latest` | Base image for deployed apps |
|
||||
| `CAMELEER_SERVER_URL` | `http://cameleer3-server:8081` | Server URL agents connect to |
|
||||
| `CAMELEER_ROUTING_DOMAIN` | `${PUBLIC_HOST}` | Domain for Traefik routing labels |
|
||||
| `CAMELEER_ROUTING_MODE` | `path` | `path` or `subdomain` routing |
|
||||
| `CAMELEER_JAR_DOCKER_VOLUME` | `cameleer-saas_jardata` | Named volume for Docker-in-Docker JAR mounting |
|
||||
| Mount | Container path | Purpose |
|
||||
|-------|---------------|---------|
|
||||
| `/var/run/docker.sock` | `/var/run/docker.sock` | Docker socket for app deployment orchestration |
|
||||
| `cameleer-jars-{slug}` (volume, via `CAMELEER_SERVER_RUNTIME_JARDOCKERVOLUME`) | `/data/jars` | Shared JAR storage — server writes, deployed app containers read |
|
||||
| `cameleer-saas_certs` (volume, ro) | `/certs` | Platform TLS certs + CA bundle for OIDC trust |
|
||||
|
||||
### SaaS app configuration (env vars for cameleer-saas itself)
|
||||
|
||||
SaaS properties use the `cameleer.saas.*` prefix (env vars: `CAMELEER_SAAS_*`). Two groups:
|
||||
|
||||
**Identity** (`cameleer.saas.identity.*` / `CAMELEER_SAAS_IDENTITY_*`):
|
||||
- Logto endpoint, M2M credentials, bootstrap file path — used by `LogtoConfig.java`
|
||||
|
||||
**Provisioning** (`cameleer.saas.provisioning.*` / `CAMELEER_SAAS_PROVISIONING_*`):
|
||||
|
||||
| Env var | Spring property | Purpose |
|
||||
|---------|----------------|---------|
|
||||
| `CAMELEER_SAAS_PROVISIONING_SERVERIMAGE` | `cameleer.saas.provisioning.serverimage` | Docker image for per-tenant server containers |
|
||||
| `CAMELEER_SAAS_PROVISIONING_SERVERUIIMAGE` | `cameleer.saas.provisioning.serveruiimage` | Docker image for per-tenant UI containers |
|
||||
| `CAMELEER_SAAS_PROVISIONING_NETWORKNAME` | `cameleer.saas.provisioning.networkname` | Shared services Docker network (compose default) |
|
||||
| `CAMELEER_SAAS_PROVISIONING_TRAEFIKNETWORK` | `cameleer.saas.provisioning.traefiknetwork` | Traefik Docker network for routing |
|
||||
| `CAMELEER_SAAS_PROVISIONING_PUBLICHOST` | `cameleer.saas.provisioning.publichost` | Public hostname (same value as infrastructure `PUBLIC_HOST`) |
|
||||
| `CAMELEER_SAAS_PROVISIONING_PUBLICPROTOCOL` | `cameleer.saas.provisioning.publicprotocol` | Public protocol (same value as infrastructure `PUBLIC_PROTOCOL`) |
|
||||
|
||||
**Note:** `PUBLIC_HOST` and `PUBLIC_PROTOCOL` remain as infrastructure env vars for Traefik and Logto containers. The SaaS app reads its own copies via the `CAMELEER_SAAS_PROVISIONING_*` prefix. `LOGTO_ENDPOINT` and `LOGTO_DB_PASSWORD` are infrastructure env vars for the Logto service and are unchanged.
|
||||
|
||||
### Server OIDC role extraction (two paths)
|
||||
|
||||
@@ -153,7 +230,9 @@ Separate Vite+React SPA replacing Logto's default sign-in page. Visually matches
|
||||
| SaaS platform -> server API | Logto org-scoped access token | `scope` claim | `JwtAuthenticationFilter.extractRolesFromScopes()` reads `server:admin` from scope |
|
||||
| Server-ui SSO login | Logto JWT access token (via Traditional Web App) | `roles` claim | `OidcTokenExchanger` decodes access_token, reads `roles` injected by Custom JWT |
|
||||
|
||||
The server's OIDC config (`OidcConfig`) includes `audience` (RFC 8707 resource indicator) and `additionalScopes`. The `audience` is sent as `resource` in both the authorization request and token exchange, which makes Logto return a JWT access token instead of opaque. The Custom JWT script maps org roles to `roles: ["server:admin"]`. If OIDC returns no roles and the user already exists, `syncOidcRoles` preserves existing local roles.
|
||||
The server's OIDC config (`OidcConfig`) includes `audience` (RFC 8707 resource indicator) and `additionalScopes`. The `audience` is sent as `resource` in both the authorization request and token exchange, which makes Logto return a JWT access token instead of opaque. The Custom JWT script maps org roles to `roles: ["server:admin"]`.
|
||||
|
||||
**CRITICAL:** `additionalScopes` MUST include `urn:logto:scope:organizations` and `urn:logto:scope:organization_roles` — without these, Logto doesn't populate `context.user.organizationRoles` in the Custom JWT script, so the `roles` claim is empty and all users get `defaultRoles` (VIEWER). The server's `OidcAuthController.applyClaimMappings()` uses OIDC token roles (from Custom JWT) as fallback when no DB claim mapping rules exist: claim mapping rules > OIDC token roles > defaultRoles.
|
||||
|
||||
### Deployment pipeline
|
||||
|
||||
@@ -172,25 +251,75 @@ Key files:
|
||||
- `docker/runtime-base/Dockerfile` — base image with agent JAR, maps env vars to `-D` system properties
|
||||
- `ServerApiClient.java` — M2M token acquisition for SaaS->server API calls (agent status). Uses `X-Cameleer-Protocol-Version: 1` header
|
||||
- Docker socket access: `group_add: ["0"]` in docker-compose.dev.yml (not root group membership in Dockerfile)
|
||||
- Network: deployed containers join `cameleer-traefik` (routing) + `cameleer-env-{slug}` (isolation)
|
||||
- Network: deployed containers join `cameleer-tenant-{slug}` (primary, isolation) + `cameleer-traefik` (routing) + `cameleer-env-{tenantId}-{envSlug}` (environment isolation)
|
||||
|
||||
### Bootstrap (`docker/logto-bootstrap.sh`)
|
||||
|
||||
Idempotent script run via `logto-bootstrap` init container. Phases:
|
||||
1. Wait for Logto + server health
|
||||
Idempotent script run inside the Logto container entrypoint. **Clean slate** — no example tenant, no viewer user, no server configuration. Phases:
|
||||
1. Wait for Logto health (no server to wait for — servers are provisioned per-tenant)
|
||||
2. Get Management API token (reads `m-default` secret from DB)
|
||||
3. Create Logto apps (SPA, Traditional with `skipConsent`, M2M with Management API role + server API role)
|
||||
3. Create Logto apps (SPA, Traditional Web App with `skipConsent`, M2M with Management API role + server API role)
|
||||
3b. Create API resource scopes (10 platform + 3 server scopes)
|
||||
4. Create org roles (owner, operator, viewer with API resource scope assignments) + M2M server role (`cameleer-m2m-server` with `server:admin` scope)
|
||||
5. Create users (platform owner with Logto console access, viewer for testing read-only OIDC)
|
||||
6. Create organization, add users with org roles (owner + viewer)
|
||||
7. Configure cameleer3-server OIDC (`rolesClaim: "roles"`, `audience`, `defaultRoles: ["VIEWER"]`)
|
||||
7b. Configure Logto Custom JWT for access tokens (maps org roles -> `roles` claim: admin->server:admin, member->server:viewer)
|
||||
5. Create admin user (SaaS admin with Logto console access)
|
||||
7b. Configure Logto Custom JWT for access tokens (maps org roles -> `roles` claim: owner->server:admin, operator->server:operator, viewer->server:viewer; saas-vendor global role -> server:admin)
|
||||
8. Configure Logto sign-in branding (Cameleer colors `#C6820E`/`#D4941E`, logo from `/platform/logo.svg`)
|
||||
9. Cleanup seeded Logto apps
|
||||
10. Write bootstrap results to `/data/logto-bootstrap.json`
|
||||
12. Create `saas-vendor` global role with all API scopes and assign to admin user (always runs — admin IS the platform admin).
|
||||
|
||||
Platform owner credentials (`SAAS_ADMIN_USER`/`SAAS_ADMIN_PASS`) work for both the SaaS platform and the Logto console (port 3002). The `saas-vendor` global role (hosted only) is created separately via `docker/vendor-seed.sh`.
|
||||
The multi-tenant compose stack is: Traefik + PostgreSQL + ClickHouse + Logto (with bootstrap entrypoint) + cameleer-saas. No `cameleer3-server` or `cameleer3-server-ui` in compose — those are provisioned per-tenant by `DockerTenantProvisioner`.
|
||||
|
||||
### Deployment Modes (installer)
|
||||
|
||||
The installer (`installer/install.sh`) supports two deployment modes:
|
||||
|
||||
| | Multi-tenant SaaS (`DEPLOYMENT_MODE=saas`) | Standalone (`DEPLOYMENT_MODE=standalone`) |
|
||||
|---|---|---|
|
||||
| **Containers** | traefik, postgres, clickhouse, logto, cameleer-saas | traefik, postgres, clickhouse, server, server-ui |
|
||||
| **Auth** | Logto OIDC (SaaS admin + tenant users) | Local auth (built-in admin, no identity provider) |
|
||||
| **Tenant management** | SaaS admin creates/manages tenants via UI | Single server instance, no fleet management |
|
||||
| **PostgreSQL** | `cameleer-postgres` image (multi-DB init) | Stock `postgres:16-alpine` (server creates schema via Flyway) |
|
||||
| **Use case** | Platform vendor managing multiple customers | Single customer running the product directly |
|
||||
|
||||
Standalone mode generates a simpler compose with the server running directly. No Logto, no SaaS management plane, no bootstrap. The admin logs in with local credentials at `/`.
|
||||
|
||||
### Tenant Provisioning Flow
|
||||
|
||||
When SaaS admin creates a tenant via `VendorTenantService`:
|
||||
|
||||
**Synchronous (in `createAndProvision`):**
|
||||
1. Create `TenantEntity` (status=PROVISIONING) + Logto organization
|
||||
2. Create admin user in Logto with owner org role (if credentials provided)
|
||||
3. Register OIDC redirect URIs for `/t/{slug}/oidc/callback` on Logto Traditional Web App
|
||||
5. Generate license (tier-appropriate, 365 days)
|
||||
6. Return immediately — UI shows provisioning spinner, polls via `refetchInterval`
|
||||
|
||||
**Asynchronous (in `provisionAsync`, `@Async`):**
|
||||
7. Create tenant-isolated Docker network (`cameleer-tenant-{slug}`)
|
||||
8. Create server container with env vars, Traefik labels (`traefik.docker.network`), health check, Docker socket bind, JAR volume, certs volume (ro)
|
||||
9. Create UI container with `CAMELEER_API_URL`, `BASE_PATH`, Traefik strip-prefix labels
|
||||
10. Wait for health check (`/api/v1/health`, not `/actuator/health` which requires auth)
|
||||
11. Push license token to server via M2M API
|
||||
12. Push OIDC config (Traditional Web App credentials + `additionalScopes: [urn:logto:scope:organizations, urn:logto:scope:organization_roles]`) to server for SSO
|
||||
13. Update tenant status -> ACTIVE (or set `provisionError` on failure)
|
||||
|
||||
**Server restart** (available to SaaS admin + tenant admin):
|
||||
- `POST /api/vendor/tenants/{id}/restart` (SaaS admin) and `POST /api/tenant/server/restart` (tenant)
|
||||
- Calls `TenantProvisioner.stop(slug)` then `start(slug)` — restarts server + UI containers only (same image)
|
||||
|
||||
**Server upgrade** (available to SaaS admin + tenant admin):
|
||||
- `POST /api/vendor/tenants/{id}/upgrade` (SaaS admin) and `POST /api/tenant/server/upgrade` (tenant)
|
||||
- Calls `TenantProvisioner.upgrade(slug)` — removes server + UI containers, force-pulls latest images (preserves app containers, volumes, networks), then `provisionAsync()` re-creates containers with the new image + pushes license + OIDC config
|
||||
|
||||
**Tenant delete** cleanup:
|
||||
- `DockerTenantProvisioner.remove(slug)` — label-based container removal (`cameleer.tenant={slug}`), env network cleanup, tenant network removal, JAR volume removal
|
||||
- `TenantDataCleanupService.cleanup(slug)` — drops PostgreSQL `tenant_{slug}` schema, deletes ClickHouse data (GDPR)
|
||||
|
||||
**Password management** (tenant portal):
|
||||
- `POST /api/tenant/password` — tenant admin changes own Logto password (via `@AuthenticationPrincipal` JWT subject)
|
||||
- `POST /api/tenant/team/{userId}/password` — tenant admin resets a team member's Logto password (validates org membership first)
|
||||
- `POST /api/tenant/server/admin-password` — tenant admin resets the server's built-in local admin password (via M2M API to `POST /api/v1/admin/users/user:admin/password`)
|
||||
|
||||
## Database Migrations
|
||||
|
||||
@@ -204,6 +333,9 @@ PostgreSQL (Flyway): `src/main/resources/db/migration/`
|
||||
- V007 — audit_log
|
||||
- V008 — app resource limits
|
||||
- V010 — cleanup of migrated tables
|
||||
- V011 — add provisioning fields (server_endpoint, provision_error)
|
||||
- V012 — certificates table + tenants.ca_applied_at
|
||||
- V013 — tenant_ca_certs (per-tenant CA certificates with PEM storage)
|
||||
|
||||
## Related Conventions
|
||||
|
||||
@@ -211,13 +343,116 @@ PostgreSQL (Flyway): `src/main/resources/db/migration/`
|
||||
- CI: `.gitea/workflows/` — Gitea Actions
|
||||
- K8s target: k3s cluster at 192.168.50.86
|
||||
- Docker images: CI builds and pushes all images — Dockerfiles use multi-stage builds, no local builds needed
|
||||
- `cameleer-saas` — SaaS app (frontend + JAR baked in)
|
||||
- `cameleer-saas` — SaaS vendor management plane (frontend + JAR baked in)
|
||||
- `cameleer-logto` — custom Logto with sign-in UI baked in
|
||||
- `cameleer-runtime-base` — base image for deployed apps (agent JAR + JRE). CI downloads latest agent SNAPSHOT from Gitea Maven registry. Uses `CAMELEER_SERVER_URL` env var (not CAMELEER_EXPORT_ENDPOINT).
|
||||
- `cameleer3-server` / `cameleer3-server-ui` — provisioned per-tenant (not in compose, created by `DockerTenantProvisioner`)
|
||||
- `cameleer-runtime-base` — base image for deployed apps (agent JAR + JRE). CI downloads latest agent SNAPSHOT from Gitea Maven registry. Uses `CAMELEER_SERVER_RUNTIME_SERVERURL` env var (not CAMELEER_EXPORT_ENDPOINT).
|
||||
- Docker builds: `--no-cache`, `--provenance=false` for Gitea compatibility
|
||||
- `docker-compose.dev.yml` — exposes ports for direct access, sets `SPRING_PROFILES_ACTIVE: dev`. Volume-mounts `./ui/dist` into the container so local UI builds are served without rebuilding the Docker image (`SPRING_WEB_RESOURCES_STATIC_LOCATIONS` overrides classpath). Adds Docker socket mount, jardata volume, and runtime env vars for container orchestration.
|
||||
- `docker-compose.dev.yml` — exposes ports for direct access, sets `SPRING_PROFILES_ACTIVE: dev`. Volume-mounts `./ui/dist` into the container so local UI builds are served without rebuilding the Docker image (`SPRING_WEB_RESOURCES_STATIC_LOCATIONS` overrides classpath). Adds Docker socket mount for tenant provisioning.
|
||||
- Design system: import from `@cameleer/design-system` (Gitea npm registry)
|
||||
|
||||
## Disabled Skills
|
||||
|
||||
- Do NOT use any `gsd:*` skills in this project. This includes all `/gsd:` prefixed commands.
|
||||
|
||||
<!-- gitnexus:start -->
|
||||
# GitNexus — Code Intelligence
|
||||
|
||||
This project is indexed by GitNexus as **cameleer-saas** (2676 symbols, 5768 relationships, 224 execution flows). Use the GitNexus MCP tools to understand code, assess impact, and navigate safely.
|
||||
|
||||
> If any GitNexus tool warns the index is stale, run `npx gitnexus analyze` in terminal first.
|
||||
|
||||
## Always Do
|
||||
|
||||
- **MUST run impact analysis before editing any symbol.** Before modifying a function, class, or method, run `gitnexus_impact({target: "symbolName", direction: "upstream"})` and report the blast radius (direct callers, affected processes, risk level) to the user.
|
||||
- **MUST run `gitnexus_detect_changes()` before committing** to verify your changes only affect expected symbols and execution flows.
|
||||
- **MUST warn the user** if impact analysis returns HIGH or CRITICAL risk before proceeding with edits.
|
||||
- When exploring unfamiliar code, use `gitnexus_query({query: "concept"})` to find execution flows instead of grepping. It returns process-grouped results ranked by relevance.
|
||||
- When you need full context on a specific symbol — callers, callees, which execution flows it participates in — use `gitnexus_context({name: "symbolName"})`.
|
||||
|
||||
## When Debugging
|
||||
|
||||
1. `gitnexus_query({query: "<error or symptom>"})` — find execution flows related to the issue
|
||||
2. `gitnexus_context({name: "<suspect function>"})` — see all callers, callees, and process participation
|
||||
3. `READ gitnexus://repo/cameleer-saas/process/{processName}` — trace the full execution flow step by step
|
||||
4. For regressions: `gitnexus_detect_changes({scope: "compare", base_ref: "main"})` — see what your branch changed
|
||||
|
||||
## When Refactoring
|
||||
|
||||
- **Renaming**: MUST use `gitnexus_rename({symbol_name: "old", new_name: "new", dry_run: true})` first. Review the preview — graph edits are safe, text_search edits need manual review. Then run with `dry_run: false`.
|
||||
- **Extracting/Splitting**: MUST run `gitnexus_context({name: "target"})` to see all incoming/outgoing refs, then `gitnexus_impact({target: "target", direction: "upstream"})` to find all external callers before moving code.
|
||||
- After any refactor: run `gitnexus_detect_changes({scope: "all"})` to verify only expected files changed.
|
||||
|
||||
## Never Do
|
||||
|
||||
- NEVER edit a function, class, or method without first running `gitnexus_impact` on it.
|
||||
- NEVER ignore HIGH or CRITICAL risk warnings from impact analysis.
|
||||
- NEVER rename symbols with find-and-replace — use `gitnexus_rename` which understands the call graph.
|
||||
- NEVER commit changes without running `gitnexus_detect_changes()` to check affected scope.
|
||||
|
||||
## Tools Quick Reference
|
||||
|
||||
| Tool | When to use | Command |
|
||||
|------|-------------|---------|
|
||||
| `query` | Find code by concept | `gitnexus_query({query: "auth validation"})` |
|
||||
| `context` | 360-degree view of one symbol | `gitnexus_context({name: "validateUser"})` |
|
||||
| `impact` | Blast radius before editing | `gitnexus_impact({target: "X", direction: "upstream"})` |
|
||||
| `detect_changes` | Pre-commit scope check | `gitnexus_detect_changes({scope: "staged"})` |
|
||||
| `rename` | Safe multi-file rename | `gitnexus_rename({symbol_name: "old", new_name: "new", dry_run: true})` |
|
||||
| `cypher` | Custom graph queries | `gitnexus_cypher({query: "MATCH ..."})` |
|
||||
|
||||
## Impact Risk Levels
|
||||
|
||||
| Depth | Meaning | Action |
|
||||
|-------|---------|--------|
|
||||
| d=1 | WILL BREAK — direct callers/importers | MUST update these |
|
||||
| d=2 | LIKELY AFFECTED — indirect deps | Should test |
|
||||
| d=3 | MAY NEED TESTING — transitive | Test if critical path |
|
||||
|
||||
## Resources
|
||||
|
||||
| Resource | Use for |
|
||||
|----------|---------|
|
||||
| `gitnexus://repo/cameleer-saas/context` | Codebase overview, check index freshness |
|
||||
| `gitnexus://repo/cameleer-saas/clusters` | All functional areas |
|
||||
| `gitnexus://repo/cameleer-saas/processes` | All execution flows |
|
||||
| `gitnexus://repo/cameleer-saas/process/{name}` | Step-by-step execution trace |
|
||||
|
||||
## Self-Check Before Finishing
|
||||
|
||||
Before completing any code modification task, verify:
|
||||
1. `gitnexus_impact` was run for all modified symbols
|
||||
2. No HIGH/CRITICAL risk warnings were ignored
|
||||
3. `gitnexus_detect_changes()` confirms changes match expected scope
|
||||
4. All d=1 (WILL BREAK) dependents were updated
|
||||
|
||||
## Keeping the Index Fresh
|
||||
|
||||
After committing code changes, the GitNexus index becomes stale. Re-run analyze to update it:
|
||||
|
||||
```bash
|
||||
npx gitnexus analyze
|
||||
```
|
||||
|
||||
If the index previously included embeddings, preserve them by adding `--embeddings`:
|
||||
|
||||
```bash
|
||||
npx gitnexus analyze --embeddings
|
||||
```
|
||||
|
||||
To check whether embeddings exist, inspect `.gitnexus/meta.json` — the `stats.embeddings` field shows the count (0 means no embeddings). **Running analyze without `--embeddings` will delete any previously generated embeddings.**
|
||||
|
||||
> Claude Code users: A PostToolUse hook handles this automatically after `git commit` and `git merge`.
|
||||
|
||||
## CLI
|
||||
|
||||
| Task | Read this skill file |
|
||||
|------|---------------------|
|
||||
| Understand architecture / "How does X work?" | `.claude/skills/gitnexus/gitnexus-exploring/SKILL.md` |
|
||||
| Blast radius / "What breaks if I change X?" | `.claude/skills/gitnexus/gitnexus-impact-analysis/SKILL.md` |
|
||||
| Trace bugs / "Why is X failing?" | `.claude/skills/gitnexus/gitnexus-debugging/SKILL.md` |
|
||||
| Rename / extract / split / refactor | `.claude/skills/gitnexus/gitnexus-refactoring/SKILL.md` |
|
||||
| Tools, resources, schema reference | `.claude/skills/gitnexus/gitnexus-guide/SKILL.md` |
|
||||
| Index, status, clean, wiki CLI commands | `.claude/skills/gitnexus/gitnexus-cli/SKILL.md` |
|
||||
|
||||
<!-- gitnexus:end -->
|
||||
|
||||
84
HOWTO.md
84
HOWTO.md
@@ -35,19 +35,21 @@ curl http://localhost:8080/actuator/health
|
||||
|
||||
## Architecture
|
||||
|
||||
The platform runs as a Docker Compose stack with 6 services:
|
||||
The platform runs as a Docker Compose stack:
|
||||
|
||||
| Service | Image | Port | Purpose |
|
||||
|---------|-------|------|---------|
|
||||
| **traefik** | traefik:v3 | 80, 443 | Reverse proxy, TLS, routing |
|
||||
| **traefik-certs** | alpine:latest | — | Init container: generates self-signed cert or copies user-supplied cert |
|
||||
| **traefik** | traefik:v3 | 80, 443, 3002 | Reverse proxy, TLS termination, routing |
|
||||
| **postgres** | postgres:16-alpine | 5432* | Platform database + Logto database |
|
||||
| **logto** | ghcr.io/logto-io/logto | 3001*, 3002* | Identity provider (OIDC) |
|
||||
| **cameleer-saas** | cameleer-saas:latest | 8080* | SaaS API server |
|
||||
| **cameleer3-server** | cameleer3-server:latest | 8081 | Observability backend |
|
||||
| **cameleer-saas** | cameleer-saas:latest | 8080* | SaaS API server + vendor UI |
|
||||
| **clickhouse** | clickhouse-server:latest | 8123* | Trace/metrics/log storage |
|
||||
|
||||
*Ports exposed to host only with `docker-compose.dev.yml` overlay.
|
||||
|
||||
Per-tenant `cameleer3-server` and `cameleer3-server-ui` containers are provisioned dynamically by `DockerTenantProvisioner` — they are NOT part of the compose stack.
|
||||
|
||||
## Installation
|
||||
|
||||
### 1. Environment Configuration
|
||||
@@ -61,12 +63,10 @@ Edit `.env` and set at minimum:
|
||||
```bash
|
||||
# Change in production
|
||||
POSTGRES_PASSWORD=<strong-password>
|
||||
CAMELEER_AUTH_TOKEN=<random-string-for-agent-bootstrap>
|
||||
CAMELEER_TENANT_SLUG=<your-tenant-slug> # e.g., "acme" — tags all observability data
|
||||
|
||||
# Logto M2M credentials (get from Logto admin console after first boot)
|
||||
LOGTO_M2M_CLIENT_ID=
|
||||
LOGTO_M2M_CLIENT_SECRET=
|
||||
# Logto M2M credentials (auto-provisioned by bootstrap, or get from Logto admin console)
|
||||
CAMELEER_SAAS_IDENTITY_M2MCLIENTID=
|
||||
CAMELEER_SAAS_IDENTITY_M2MCLIENTSECRET=
|
||||
```
|
||||
|
||||
### 2. Ed25519 Keys
|
||||
@@ -83,7 +83,25 @@ This creates `keys/ed25519.key` (private) and `keys/ed25519.pub` (public). The k
|
||||
|
||||
If no key files are configured, the platform generates ephemeral keys on startup (suitable for development only -- keys change on every restart).
|
||||
|
||||
### 3. Start the Stack
|
||||
### 3. TLS Certificate (Optional)
|
||||
|
||||
By default, the `traefik-certs` init container generates a self-signed certificate for `PUBLIC_HOST`. To supply your own certificate at bootstrap time, set these env vars in `.env`:
|
||||
|
||||
```bash
|
||||
CERT_FILE=/path/to/cert.pem # PEM-encoded certificate
|
||||
KEY_FILE=/path/to/key.pem # PEM-encoded private key
|
||||
CA_FILE=/path/to/ca.pem # Optional: CA bundle (for private CA trust)
|
||||
```
|
||||
|
||||
The init container validates that the key matches the certificate before accepting. If validation fails, the container exits with an error.
|
||||
|
||||
**Runtime certificate replacement** is available via the vendor UI at `/vendor/certificates`:
|
||||
- Upload a new cert+key+CA bundle (staged, not yet active)
|
||||
- Validate and activate (atomic swap, Traefik hot-reloads)
|
||||
- Roll back to the previous certificate if needed
|
||||
- Track which tenants need a restart to pick up CA bundle changes
|
||||
|
||||
### 4. Start the Stack
|
||||
|
||||
**Development** (ports exposed for direct access):
|
||||
```bash
|
||||
@@ -95,7 +113,7 @@ docker compose -f docker-compose.yml -f docker-compose.dev.yml up -d
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
### 4. Verify Services
|
||||
### 5. Verify Services
|
||||
|
||||
```bash
|
||||
# Health check
|
||||
@@ -119,8 +137,8 @@ On first boot, Logto seeds its database automatically. Access the admin console
|
||||
- Assign the **Logto Management API** resource with all scopes
|
||||
4. Update `.env`:
|
||||
```
|
||||
LOGTO_M2M_CLIENT_ID=<app-id>
|
||||
LOGTO_M2M_CLIENT_SECRET=<app-secret>
|
||||
CAMELEER_SAAS_IDENTITY_M2MCLIENTID=<app-id>
|
||||
CAMELEER_SAAS_IDENTITY_M2MCLIENTSECRET=<app-secret>
|
||||
```
|
||||
5. Restart cameleer-saas: `docker compose restart cameleer-saas`
|
||||
|
||||
@@ -287,6 +305,46 @@ Query params: `since`, `until` (ISO timestamps), `limit` (default 500), `stream`
|
||||
|------|-------------|
|
||||
| `/dashboard` | cameleer3-server observability dashboard (forward-auth protected) |
|
||||
|
||||
### Vendor: Certificates (platform:admin)
|
||||
| Method | Path | Description |
|
||||
|--------|------|-------------|
|
||||
| GET | `/api/vendor/certificates` | Overview (active, staged, archived, stale count) |
|
||||
| POST | `/api/vendor/certificates/stage` | Upload cert+key+CA (multipart) |
|
||||
| POST | `/api/vendor/certificates/activate` | Promote staged -> active |
|
||||
| POST | `/api/vendor/certificates/restore` | Swap archived <-> active |
|
||||
| DELETE | `/api/vendor/certificates/staged` | Discard staged cert |
|
||||
| GET | `/api/vendor/certificates/stale-tenants` | Count tenants needing CA restart |
|
||||
|
||||
### Vendor: Tenants (platform:admin)
|
||||
| Method | Path | Description |
|
||||
|--------|------|-------------|
|
||||
| GET | `/api/vendor/tenants` | List all tenants (includes fleet health: agentCount, environmentCount, agentLimit) |
|
||||
| POST | `/api/vendor/tenants` | Create tenant (async provisioning) |
|
||||
| GET | `/api/vendor/tenants/{id}` | Tenant detail + server state |
|
||||
| POST | `/api/vendor/tenants/{id}/restart` | Restart server containers |
|
||||
| POST | `/api/vendor/tenants/{id}/suspend` | Suspend tenant |
|
||||
| POST | `/api/vendor/tenants/{id}/activate` | Activate tenant |
|
||||
| DELETE | `/api/vendor/tenants/{id}` | Delete tenant |
|
||||
| POST | `/api/vendor/tenants/{id}/license` | Renew license |
|
||||
|
||||
### Tenant Portal (org-scoped)
|
||||
| Method | Path | Description |
|
||||
|--------|------|-------------|
|
||||
| GET | `/api/tenant/dashboard` | Tenant dashboard data |
|
||||
| GET | `/api/tenant/license` | License details |
|
||||
| POST | `/api/tenant/server/restart` | Restart server |
|
||||
| GET | `/api/tenant/team` | List team members |
|
||||
| POST | `/api/tenant/team/invite` | Invite team member |
|
||||
| DELETE | `/api/tenant/team/{userId}` | Remove team member |
|
||||
| GET | `/api/tenant/settings` | Tenant settings |
|
||||
| GET | `/api/tenant/sso` | List SSO connectors |
|
||||
| POST | `/api/tenant/sso` | Create SSO connector |
|
||||
| GET | `/api/tenant/ca` | List tenant CA certificates |
|
||||
| POST | `/api/tenant/ca` | Upload CA cert (staged) |
|
||||
| POST | `/api/tenant/ca/{id}/activate` | Activate staged CA cert |
|
||||
| DELETE | `/api/tenant/ca/{id}` | Remove CA cert |
|
||||
| GET | `/api/tenant/audit` | Tenant audit log |
|
||||
|
||||
### Health
|
||||
| Method | Path | Description |
|
||||
|--------|------|-------------|
|
||||
|
||||
@@ -1,48 +1,36 @@
|
||||
# Development overrides: exposes ports for direct access
|
||||
# Usage: docker compose -f docker-compose.yml -f docker-compose.dev.yml up
|
||||
services:
|
||||
postgres:
|
||||
cameleer-postgres:
|
||||
ports:
|
||||
- "5432:5432"
|
||||
|
||||
logto:
|
||||
cameleer-logto:
|
||||
ports:
|
||||
- "3001:3001"
|
||||
|
||||
logto-bootstrap:
|
||||
environment:
|
||||
VENDOR_SEED_ENABLED: "true"
|
||||
|
||||
cameleer-saas:
|
||||
ports:
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
- ./ui/dist:/app/static
|
||||
environment:
|
||||
SPRING_PROFILES_ACTIVE: dev
|
||||
SPRING_WEB_RESOURCES_STATIC_LOCATIONS: file:/app/static/,classpath:/static/
|
||||
|
||||
cameleer3-server:
|
||||
ports:
|
||||
- "8081:8081"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- jardata:/data/jars
|
||||
group_add:
|
||||
- "0"
|
||||
environment:
|
||||
CAMELEER_RUNTIME_ENABLED: "true"
|
||||
CAMELEER_JAR_STORAGE_PATH: /data/jars
|
||||
CAMELEER_RUNTIME_BASE_IMAGE: gitea.siegeln.net/cameleer/cameleer-runtime-base:latest
|
||||
CAMELEER_DOCKER_NETWORK: cameleer-saas_cameleer
|
||||
CAMELEER_SERVER_URL: http://cameleer3-server:8081
|
||||
CAMELEER_ROUTING_DOMAIN: ${PUBLIC_HOST:-localhost}
|
||||
CAMELEER_ROUTING_MODE: path
|
||||
CAMELEER_JAR_DOCKER_VOLUME: cameleer-saas_jardata
|
||||
SPRING_PROFILES_ACTIVE: dev
|
||||
SPRING_WEB_RESOURCES_STATIC_LOCATIONS: file:/app/static/,classpath:/static/
|
||||
CAMELEER_SAAS_PROVISIONING_PUBLICHOST: ${PUBLIC_HOST:-localhost}
|
||||
CAMELEER_SAAS_PROVISIONING_PUBLICPROTOCOL: ${PUBLIC_PROTOCOL:-https}
|
||||
CAMELEER_SAAS_PROVISIONING_SERVERIMAGE: gitea.siegeln.net/cameleer/cameleer3-server:${VERSION:-latest}
|
||||
CAMELEER_SAAS_PROVISIONING_SERVERUIIMAGE: gitea.siegeln.net/cameleer/cameleer3-server-ui:${VERSION:-latest}
|
||||
CAMELEER_SAAS_PROVISIONING_NETWORKNAME: cameleer-saas_cameleer
|
||||
CAMELEER_SAAS_PROVISIONING_TRAEFIKNETWORK: cameleer-traefik
|
||||
|
||||
cameleer3-server-ui:
|
||||
ports:
|
||||
- "8082:80"
|
||||
|
||||
clickhouse:
|
||||
cameleer-clickhouse:
|
||||
ports:
|
||||
- "8123:8123"
|
||||
|
||||
volumes:
|
||||
jardata:
|
||||
|
||||
@@ -1,55 +1,32 @@
|
||||
services:
|
||||
traefik-certs:
|
||||
image: alpine:latest
|
||||
restart: "no"
|
||||
entrypoint: ["sh", "-c"]
|
||||
command:
|
||||
- |
|
||||
if [ ! -f /certs/cert.pem ]; then
|
||||
apk add --no-cache openssl >/dev/null 2>&1
|
||||
openssl req -x509 -newkey rsa:4096 \
|
||||
-keyout /certs/key.pem -out /certs/cert.pem \
|
||||
-days 365 -nodes \
|
||||
-subj "/CN=$$PUBLIC_HOST" \
|
||||
-addext "subjectAltName=DNS:$$PUBLIC_HOST,DNS:*.$$PUBLIC_HOST"
|
||||
echo "Generated self-signed cert for $$PUBLIC_HOST"
|
||||
else
|
||||
echo "Certs already exist, skipping"
|
||||
fi
|
||||
cameleer-traefik:
|
||||
image: ${TRAEFIK_IMAGE:-gitea.siegeln.net/cameleer/cameleer-traefik}:${VERSION:-latest}
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "${HTTP_PORT:-80}:80"
|
||||
- "${HTTPS_PORT:-443}:443"
|
||||
- "${LOGTO_CONSOLE_PORT:-3002}:3002"
|
||||
environment:
|
||||
PUBLIC_HOST: ${PUBLIC_HOST:-localhost}
|
||||
CERT_FILE: ${CERT_FILE:-}
|
||||
KEY_FILE: ${KEY_FILE:-}
|
||||
CA_FILE: ${CA_FILE:-}
|
||||
volumes:
|
||||
- certs:/certs
|
||||
|
||||
traefik:
|
||||
image: traefik:v3
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
traefik-certs:
|
||||
condition: service_completed_successfully
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
- "3002:3002"
|
||||
volumes:
|
||||
- cameleer-certs:/certs
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
- ./traefik.yml:/etc/traefik/traefik.yml:ro
|
||||
- ./docker/traefik-dynamic.yml:/etc/traefik/dynamic.yml:ro
|
||||
- certs:/etc/traefik/certs:ro
|
||||
networks:
|
||||
- cameleer
|
||||
- cameleer-traefik
|
||||
|
||||
postgres:
|
||||
image: postgres:16-alpine
|
||||
cameleer-postgres:
|
||||
image: ${POSTGRES_IMAGE:-gitea.siegeln.net/cameleer/cameleer-postgres}:${VERSION:-latest}
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_DB: ${POSTGRES_DB:-cameleer_saas}
|
||||
POSTGRES_USER: ${POSTGRES_USER:-cameleer}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-cameleer_dev}
|
||||
volumes:
|
||||
- pgdata:/var/lib/postgresql/data
|
||||
- ./docker/init-databases.sh:/docker-entrypoint-initdb.d/init-databases.sh:ro
|
||||
- cameleer-pgdata:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-cameleer} -d ${POSTGRES_DB:-cameleer_saas}"]
|
||||
interval: 5s
|
||||
@@ -58,76 +35,74 @@ services:
|
||||
networks:
|
||||
- cameleer
|
||||
|
||||
logto:
|
||||
image: ${LOGTO_IMAGE:-gitea.siegeln.net/cameleer/cameleer-logto}:${VERSION:-latest}
|
||||
cameleer-clickhouse:
|
||||
image: ${CLICKHOUSE_IMAGE:-gitea.siegeln.net/cameleer/cameleer-clickhouse}:${VERSION:-latest}
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
entrypoint: ["sh", "-c", "npm run cli db seed -- --swe && npm start"]
|
||||
environment:
|
||||
DB_URL: postgres://${POSTGRES_USER:-cameleer}:${POSTGRES_PASSWORD:-cameleer_dev}@postgres:5432/logto
|
||||
ENDPOINT: ${PUBLIC_PROTOCOL:-https}://${PUBLIC_HOST:-localhost}
|
||||
ADMIN_ENDPOINT: ${PUBLIC_PROTOCOL:-https}://${PUBLIC_HOST:-localhost}:3002
|
||||
TRUST_PROXY_HEADER: 1
|
||||
NODE_TLS_REJECT_UNAUTHORIZED: "0" # dev only — accept self-signed cert for internal OIDC discovery
|
||||
CLICKHOUSE_PASSWORD: ${CLICKHOUSE_PASSWORD:-cameleer_ch}
|
||||
volumes:
|
||||
- cameleer-chdata:/var/lib/clickhouse
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "node -e \"require('http').get('http://localhost:3001/oidc/.well-known/openid-configuration', r => process.exit(r.statusCode === 200 ? 0 : 1)).on('error', () => process.exit(1))\""]
|
||||
interval: 5s
|
||||
test: ["CMD-SHELL", "clickhouse-client --password ${CLICKHOUSE_PASSWORD:-cameleer_ch} --query 'SELECT 1'"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 30
|
||||
start_period: 15s
|
||||
retries: 3
|
||||
labels:
|
||||
- traefik.enable=true
|
||||
- traefik.http.routers.logto.rule=PathPrefix(`/`)
|
||||
- traefik.http.routers.logto.priority=1
|
||||
- traefik.http.routers.logto.entrypoints=websecure
|
||||
- traefik.http.routers.logto.tls=true
|
||||
- traefik.http.routers.logto.service=logto
|
||||
- traefik.http.routers.logto.middlewares=logto-cors
|
||||
- traefik.http.middlewares.logto-cors.headers.accessControlAllowOriginList=${PUBLIC_PROTOCOL:-https}://${PUBLIC_HOST:-localhost}:3002
|
||||
- traefik.http.middlewares.logto-cors.headers.accessControlAllowMethods=GET,POST,PUT,PATCH,DELETE,OPTIONS
|
||||
- traefik.http.middlewares.logto-cors.headers.accessControlAllowHeaders=Authorization,Content-Type
|
||||
- traefik.http.middlewares.logto-cors.headers.accessControlAllowCredentials=true
|
||||
- traefik.http.services.logto.loadbalancer.server.port=3001
|
||||
- traefik.http.routers.logto-console.rule=PathPrefix(`/`)
|
||||
- traefik.http.routers.logto-console.entrypoints=admin-console
|
||||
- traefik.http.routers.logto-console.tls=true
|
||||
- traefik.http.routers.logto-console.service=logto-console
|
||||
- traefik.http.services.logto-console.loadbalancer.server.port=3002
|
||||
- prometheus.scrape=true
|
||||
- prometheus.path=/metrics
|
||||
- prometheus.port=9363
|
||||
networks:
|
||||
- cameleer
|
||||
|
||||
logto-bootstrap:
|
||||
image: postgres:16-alpine
|
||||
cameleer-logto:
|
||||
image: ${LOGTO_IMAGE:-gitea.siegeln.net/cameleer/cameleer-logto}:${VERSION:-latest}
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
logto:
|
||||
cameleer-postgres:
|
||||
condition: service_healthy
|
||||
cameleer3-server:
|
||||
condition: service_healthy
|
||||
restart: "no"
|
||||
entrypoint: ["sh", "/scripts/logto-bootstrap.sh"]
|
||||
environment:
|
||||
LOGTO_ENDPOINT: http://logto:3001
|
||||
LOGTO_ADMIN_ENDPOINT: http://logto:3002
|
||||
DB_URL: postgres://${POSTGRES_USER:-cameleer}:${POSTGRES_PASSWORD:-cameleer_dev}@cameleer-postgres:5432/logto
|
||||
ENDPOINT: ${PUBLIC_PROTOCOL:-https}://${PUBLIC_HOST:-localhost}
|
||||
ADMIN_ENDPOINT: ${PUBLIC_PROTOCOL:-https}://${PUBLIC_HOST:-localhost}:${LOGTO_CONSOLE_PORT:-3002}
|
||||
TRUST_PROXY_HEADER: 1
|
||||
NODE_TLS_REJECT_UNAUTHORIZED: "${NODE_TLS_REJECT:-0}"
|
||||
LOGTO_ENDPOINT: http://cameleer-logto:3001
|
||||
LOGTO_ADMIN_ENDPOINT: http://cameleer-logto:3002
|
||||
LOGTO_PUBLIC_ENDPOINT: ${PUBLIC_PROTOCOL:-https}://${PUBLIC_HOST:-localhost}
|
||||
PUBLIC_HOST: ${PUBLIC_HOST:-localhost}
|
||||
PUBLIC_PROTOCOL: ${PUBLIC_PROTOCOL:-https}
|
||||
PG_HOST: postgres
|
||||
PG_HOST: cameleer-postgres
|
||||
PG_USER: ${POSTGRES_USER:-cameleer}
|
||||
PG_PASSWORD: ${POSTGRES_PASSWORD:-cameleer_dev}
|
||||
PG_DB_SAAS: ${POSTGRES_DB:-cameleer_saas}
|
||||
SAAS_ADMIN_USER: ${SAAS_ADMIN_USER:-admin}
|
||||
SAAS_ADMIN_PASS: ${SAAS_ADMIN_PASS:-admin}
|
||||
TENANT_ADMIN_USER: ${TENANT_ADMIN_USER:-camel}
|
||||
TENANT_ADMIN_PASS: ${TENANT_ADMIN_PASS:-camel}
|
||||
CAMELEER_AUTH_TOKEN: ${CAMELEER_AUTH_TOKEN:-default-bootstrap-token}
|
||||
SERVER_ENDPOINT: http://cameleer3-server:8081
|
||||
SERVER_UI_USER: ${CAMELEER_UI_USER:-admin}
|
||||
SERVER_UI_PASS: ${CAMELEER_UI_PASSWORD:-admin}
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "node -e \"require('http').get('http://localhost:3001/oidc/.well-known/openid-configuration', r => process.exit(r.statusCode === 200 ? 0 : 1)).on('error', () => process.exit(1))\" && test -f /data/logto-bootstrap.json"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 60
|
||||
start_period: 30s
|
||||
labels:
|
||||
- traefik.enable=true
|
||||
- traefik.http.routers.cameleer-logto.rule=PathPrefix(`/`)
|
||||
- traefik.http.routers.cameleer-logto.priority=1
|
||||
- traefik.http.routers.cameleer-logto.entrypoints=websecure
|
||||
- traefik.http.routers.cameleer-logto.tls=true
|
||||
- traefik.http.routers.cameleer-logto.service=cameleer-logto
|
||||
- traefik.http.routers.cameleer-logto.middlewares=cameleer-logto-cors
|
||||
- "traefik.http.middlewares.cameleer-logto-cors.headers.accessControlAllowOriginList=${PUBLIC_PROTOCOL:-https}://${PUBLIC_HOST:-localhost}:${LOGTO_CONSOLE_PORT:-3002}"
|
||||
- traefik.http.middlewares.cameleer-logto-cors.headers.accessControlAllowMethods=GET,POST,PUT,PATCH,DELETE,OPTIONS
|
||||
- traefik.http.middlewares.cameleer-logto-cors.headers.accessControlAllowHeaders=Authorization,Content-Type
|
||||
- traefik.http.middlewares.cameleer-logto-cors.headers.accessControlAllowCredentials=true
|
||||
- traefik.http.services.cameleer-logto.loadbalancer.server.port=3001
|
||||
- traefik.http.routers.cameleer-logto-console.rule=PathPrefix(`/`)
|
||||
- traefik.http.routers.cameleer-logto-console.entrypoints=admin-console
|
||||
- traefik.http.routers.cameleer-logto-console.tls=true
|
||||
- traefik.http.routers.cameleer-logto-console.service=cameleer-logto-console
|
||||
- traefik.http.services.cameleer-logto-console.loadbalancer.server.port=3002
|
||||
volumes:
|
||||
- ./docker/logto-bootstrap.sh:/scripts/logto-bootstrap.sh:ro
|
||||
- bootstrapdata:/data
|
||||
- cameleer-bootstrapdata:/data
|
||||
networks:
|
||||
- cameleer
|
||||
|
||||
@@ -135,100 +110,36 @@ services:
|
||||
image: ${CAMELEER_IMAGE:-gitea.siegeln.net/cameleer/cameleer-saas}:${VERSION:-latest}
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
postgres:
|
||||
cameleer-logto:
|
||||
condition: service_healthy
|
||||
logto-bootstrap:
|
||||
condition: service_completed_successfully
|
||||
volumes:
|
||||
- bootstrapdata:/data/bootstrap:ro
|
||||
- cameleer-bootstrapdata:/data/bootstrap:ro
|
||||
- cameleer-certs:/certs
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
environment:
|
||||
SPRING_DATASOURCE_URL: jdbc:postgresql://postgres:5432/${POSTGRES_DB:-cameleer_saas}
|
||||
# SaaS database
|
||||
SPRING_DATASOURCE_URL: jdbc:postgresql://cameleer-postgres:5432/${POSTGRES_DB:-cameleer_saas}
|
||||
SPRING_DATASOURCE_USERNAME: ${POSTGRES_USER:-cameleer}
|
||||
SPRING_DATASOURCE_PASSWORD: ${POSTGRES_PASSWORD:-cameleer_dev}
|
||||
LOGTO_ENDPOINT: ${LOGTO_ENDPOINT:-http://logto:3001}
|
||||
LOGTO_PUBLIC_ENDPOINT: ${PUBLIC_PROTOCOL:-https}://${PUBLIC_HOST:-localhost}
|
||||
LOGTO_ISSUER_URI: ${PUBLIC_PROTOCOL:-https}://${PUBLIC_HOST:-localhost}/oidc
|
||||
LOGTO_JWK_SET_URI: ${LOGTO_ENDPOINT:-http://logto:3001}/oidc/jwks
|
||||
LOGTO_M2M_CLIENT_ID: ${LOGTO_M2M_CLIENT_ID:-}
|
||||
LOGTO_M2M_CLIENT_SECRET: ${LOGTO_M2M_CLIENT_SECRET:-}
|
||||
CAMELEER3_SERVER_ENDPOINT: http://cameleer3-server:8081
|
||||
# Identity (Logto)
|
||||
CAMELEER_SAAS_IDENTITY_LOGTOENDPOINT: ${LOGTO_ENDPOINT:-http://cameleer-logto:3001}
|
||||
CAMELEER_SAAS_IDENTITY_LOGTOPUBLICENDPOINT: ${PUBLIC_PROTOCOL:-https}://${PUBLIC_HOST:-localhost}
|
||||
CAMELEER_SAAS_IDENTITY_M2MCLIENTID: ${LOGTO_M2M_CLIENT_ID:-}
|
||||
CAMELEER_SAAS_IDENTITY_M2MCLIENTSECRET: ${LOGTO_M2M_CLIENT_SECRET:-}
|
||||
# Provisioning — passed to per-tenant server containers
|
||||
CAMELEER_SAAS_PROVISIONING_PUBLICHOST: ${PUBLIC_HOST:-localhost}
|
||||
CAMELEER_SAAS_PROVISIONING_PUBLICPROTOCOL: ${PUBLIC_PROTOCOL:-https}
|
||||
CAMELEER_SAAS_PROVISIONING_DATASOURCEUSERNAME: ${POSTGRES_USER:-cameleer}
|
||||
CAMELEER_SAAS_PROVISIONING_DATASOURCEPASSWORD: ${POSTGRES_PASSWORD:-cameleer_dev}
|
||||
CAMELEER_SAAS_PROVISIONING_CLICKHOUSEPASSWORD: ${CLICKHOUSE_PASSWORD:-cameleer_ch}
|
||||
labels:
|
||||
- traefik.enable=true
|
||||
- traefik.http.routers.saas.rule=PathPrefix(`/platform`)
|
||||
- traefik.http.routers.saas.entrypoints=websecure
|
||||
- traefik.http.routers.saas.tls=true
|
||||
- traefik.http.services.saas.loadbalancer.server.port=8080
|
||||
networks:
|
||||
- cameleer
|
||||
|
||||
cameleer3-server:
|
||||
image: ${CAMELEER3_SERVER_IMAGE:-gitea.siegeln.net/cameleer/cameleer3-server}:${VERSION:-latest}
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
postgres:
|
||||
condition: service_healthy
|
||||
clickhouse:
|
||||
condition: service_started
|
||||
environment:
|
||||
SPRING_DATASOURCE_URL: jdbc:postgresql://postgres:5432/cameleer3
|
||||
SPRING_DATASOURCE_USERNAME: ${POSTGRES_USER:-cameleer}
|
||||
SPRING_DATASOURCE_PASSWORD: ${POSTGRES_PASSWORD:-cameleer_dev}
|
||||
CLICKHOUSE_URL: jdbc:clickhouse://clickhouse:8123/cameleer
|
||||
CAMELEER_AUTH_TOKEN: ${CAMELEER_AUTH_TOKEN:-default-bootstrap-token}
|
||||
CAMELEER_JWT_SECRET: ${CAMELEER_JWT_SECRET:-cameleer-dev-jwt-secret-change-in-production}
|
||||
CAMELEER_TENANT_ID: ${CAMELEER_TENANT_SLUG:-default}
|
||||
CAMELEER_OIDC_ISSUER_URI: ${PUBLIC_PROTOCOL:-https}://${PUBLIC_HOST:-localhost}/oidc
|
||||
CAMELEER_OIDC_JWK_SET_URI: ${LOGTO_ENDPOINT:-http://logto:3001}/oidc/jwks
|
||||
CAMELEER_OIDC_TLS_SKIP_VERIFY: "true" # dev only — disable in production with real certs
|
||||
CAMELEER_OIDC_AUDIENCE: ${CAMELEER_OIDC_AUDIENCE:-https://api.cameleer.local}
|
||||
CAMELEER_CORS_ALLOWED_ORIGINS: ${PUBLIC_PROTOCOL:-https}://${PUBLIC_HOST:-localhost}
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -sf http://localhost:8081/api/v1/health || exit 1"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 30
|
||||
start_period: 15s
|
||||
labels:
|
||||
- traefik.enable=false
|
||||
networks:
|
||||
cameleer:
|
||||
cameleer-traefik:
|
||||
aliases:
|
||||
- cameleer3-server
|
||||
|
||||
cameleer3-server-ui:
|
||||
image: ${CAMELEER3_SERVER_UI_IMAGE:-gitea.siegeln.net/cameleer/cameleer3-server-ui}:${VERSION:-latest}
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
cameleer3-server:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
CAMELEER_API_URL: http://cameleer3-server:8081
|
||||
BASE_PATH: /server
|
||||
labels:
|
||||
- traefik.enable=true
|
||||
- traefik.http.routers.server-ui.rule=PathPrefix(`/server`)
|
||||
- traefik.http.routers.server-ui.entrypoints=websecure
|
||||
- traefik.http.routers.server-ui.tls=true
|
||||
- traefik.http.routers.server-ui.middlewares=server-ui-strip
|
||||
- traefik.http.middlewares.server-ui-strip.stripprefix.prefixes=/server
|
||||
- traefik.http.routers.server-ui.service=server-ui
|
||||
- traefik.http.services.server-ui.loadbalancer.server.port=80
|
||||
networks:
|
||||
- cameleer
|
||||
|
||||
clickhouse:
|
||||
image: clickhouse/clickhouse-server:latest
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- chdata:/var/lib/clickhouse
|
||||
- ./docker/clickhouse-init.sql:/docker-entrypoint-initdb.d/init.sql:ro
|
||||
- ./docker/clickhouse-users.xml:/etc/clickhouse-server/users.d/default-user.xml:ro
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "clickhouse-client --query 'SELECT 1'"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
group_add:
|
||||
- "${DOCKER_GID:-0}"
|
||||
networks:
|
||||
- cameleer
|
||||
|
||||
@@ -240,7 +151,7 @@ networks:
|
||||
driver: bridge
|
||||
|
||||
volumes:
|
||||
pgdata:
|
||||
chdata:
|
||||
certs:
|
||||
bootstrapdata:
|
||||
cameleer-pgdata:
|
||||
cameleer-chdata:
|
||||
cameleer-certs:
|
||||
cameleer-bootstrapdata:
|
||||
|
||||
4
docker/cameleer-clickhouse/Dockerfile
Normal file
4
docker/cameleer-clickhouse/Dockerfile
Normal file
@@ -0,0 +1,4 @@
|
||||
FROM clickhouse/clickhouse-server:latest
|
||||
COPY init.sql /docker-entrypoint-initdb.d/init.sql
|
||||
COPY users.xml /etc/clickhouse-server/users.d/default-user.xml
|
||||
COPY prometheus.xml /etc/clickhouse-server/config.d/prometheus.xml
|
||||
9
docker/cameleer-clickhouse/prometheus.xml
Normal file
9
docker/cameleer-clickhouse/prometheus.xml
Normal file
@@ -0,0 +1,9 @@
|
||||
<clickhouse>
|
||||
<prometheus>
|
||||
<endpoint>/metrics</endpoint>
|
||||
<port>9363</port>
|
||||
<metrics>true</metrics>
|
||||
<events>true</events>
|
||||
<asynchronous_metrics>true</asynchronous_metrics>
|
||||
</prometheus>
|
||||
</clickhouse>
|
||||
16
docker/cameleer-clickhouse/users.xml
Normal file
16
docker/cameleer-clickhouse/users.xml
Normal file
@@ -0,0 +1,16 @@
|
||||
<clickhouse>
|
||||
<users>
|
||||
<default remove="remove">
|
||||
</default>
|
||||
|
||||
<default>
|
||||
<profile>default</profile>
|
||||
<networks>
|
||||
<ip>::/0</ip>
|
||||
</networks>
|
||||
<password from_env="CLICKHOUSE_PASSWORD" />
|
||||
<quota>default</quota>
|
||||
<access_management>0</access_management>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
||||
65
docker/cameleer-logto/logto-entrypoint.sh
Normal file
65
docker/cameleer-logto/logto-entrypoint.sh
Normal file
@@ -0,0 +1,65 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
# Save the real public endpoints for after bootstrap
|
||||
REAL_ENDPOINT="$ENDPOINT"
|
||||
REAL_ADMIN_ENDPOINT="$ADMIN_ENDPOINT"
|
||||
|
||||
echo "[entrypoint] Seeding Logto database..."
|
||||
npm run cli db seed -- --swe 2>/dev/null || true
|
||||
|
||||
echo "[entrypoint] Deploying database alterations..."
|
||||
npm run cli db alteration deploy 2>/dev/null || true
|
||||
|
||||
# Start Logto with localhost endpoints so it can reach itself without Traefik
|
||||
export ENDPOINT="http://localhost:3001"
|
||||
export ADMIN_ENDPOINT="http://localhost:3002"
|
||||
|
||||
echo "[entrypoint] Starting Logto (bootstrap mode)..."
|
||||
npm start &
|
||||
LOGTO_PID=$!
|
||||
|
||||
echo "[entrypoint] Waiting for Logto to be ready..."
|
||||
for i in $(seq 1 120); do
|
||||
if node -e "require('http').get('http://localhost:3001/oidc/.well-known/openid-configuration', r => process.exit(r.statusCode === 200 ? 0 : 1)).on('error', () => process.exit(1))" 2>/dev/null; then
|
||||
echo "[entrypoint] Logto is ready."
|
||||
break
|
||||
fi
|
||||
if [ "$i" -eq 120 ]; then
|
||||
echo "[entrypoint] ERROR: Logto not ready after 120s"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# Run bootstrap — use localhost endpoints, skip Host headers (BOOTSTRAP_LOCAL flag)
|
||||
# PUBLIC_HOST and PUBLIC_PROTOCOL stay real for redirect URI generation
|
||||
BOOTSTRAP_FILE="/data/logto-bootstrap.json"
|
||||
export LOGTO_ENDPOINT="http://localhost:3001"
|
||||
export LOGTO_ADMIN_ENDPOINT="http://localhost:3002"
|
||||
export BOOTSTRAP_LOCAL="true"
|
||||
|
||||
if [ -f "$BOOTSTRAP_FILE" ]; then
|
||||
CACHED_SECRET=$(jq -r '.m2mClientSecret // empty' "$BOOTSTRAP_FILE" 2>/dev/null)
|
||||
CACHED_SPA=$(jq -r '.spaClientId // empty' "$BOOTSTRAP_FILE" 2>/dev/null)
|
||||
if [ -n "$CACHED_SECRET" ] && [ -n "$CACHED_SPA" ]; then
|
||||
echo "[entrypoint] Bootstrap already complete."
|
||||
else
|
||||
echo "[entrypoint] Incomplete bootstrap found, re-running..."
|
||||
/scripts/logto-bootstrap.sh
|
||||
fi
|
||||
else
|
||||
echo "[entrypoint] Running bootstrap..."
|
||||
/scripts/logto-bootstrap.sh
|
||||
fi
|
||||
|
||||
# Restart Logto with real public endpoints
|
||||
echo "[entrypoint] Bootstrap done. Restarting Logto with public endpoints..."
|
||||
kill $LOGTO_PID 2>/dev/null || true
|
||||
wait $LOGTO_PID 2>/dev/null || true
|
||||
|
||||
export ENDPOINT="$REAL_ENDPOINT"
|
||||
export ADMIN_ENDPOINT="$REAL_ADMIN_ENDPOINT"
|
||||
|
||||
echo "[entrypoint] Starting Logto (production mode)..."
|
||||
exec npm start
|
||||
3
docker/cameleer-postgres/Dockerfile
Normal file
3
docker/cameleer-postgres/Dockerfile
Normal file
@@ -0,0 +1,3 @@
|
||||
FROM postgres:16-alpine
|
||||
COPY init-databases.sh /docker-entrypoint-initdb.d/init-databases.sh
|
||||
RUN chmod +x /docker-entrypoint-initdb.d/init-databases.sh
|
||||
7
docker/cameleer-traefik/Dockerfile
Normal file
7
docker/cameleer-traefik/Dockerfile
Normal file
@@ -0,0 +1,7 @@
|
||||
FROM traefik:v3
|
||||
RUN apk add --no-cache openssl
|
||||
COPY traefik.yml /etc/traefik/traefik.yml
|
||||
COPY traefik-dynamic.yml /etc/traefik/dynamic.yml
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
RUN chmod +x /entrypoint.sh
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
60
docker/cameleer-traefik/entrypoint.sh
Normal file
60
docker/cameleer-traefik/entrypoint.sh
Normal file
@@ -0,0 +1,60 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
CERTS_DIR="/certs"
|
||||
|
||||
# Skip if certs already exist (idempotent)
|
||||
if [ ! -f "$CERTS_DIR/cert.pem" ]; then
|
||||
mkdir -p "$CERTS_DIR"
|
||||
|
||||
if [ -n "$CERT_FILE" ] && [ -n "$KEY_FILE" ]; then
|
||||
# User-supplied certificate
|
||||
echo "[certs] Installing user-supplied certificate..."
|
||||
cp "$CERT_FILE" "$CERTS_DIR/cert.pem"
|
||||
cp "$KEY_FILE" "$CERTS_DIR/key.pem"
|
||||
if [ -n "$CA_FILE" ]; then
|
||||
cp "$CA_FILE" "$CERTS_DIR/ca.pem"
|
||||
fi
|
||||
# Validate key matches cert
|
||||
CERT_MOD=$(openssl x509 -noout -modulus -in "$CERTS_DIR/cert.pem" 2>/dev/null | md5sum)
|
||||
KEY_MOD=$(openssl rsa -noout -modulus -in "$CERTS_DIR/key.pem" 2>/dev/null | md5sum)
|
||||
if [ "$CERT_MOD" != "$KEY_MOD" ]; then
|
||||
echo "[certs] ERROR: Certificate and key do not match!"
|
||||
rm -f "$CERTS_DIR/cert.pem" "$CERTS_DIR/key.pem" "$CERTS_DIR/ca.pem"
|
||||
exit 1
|
||||
fi
|
||||
SELF_SIGNED=false
|
||||
echo "[certs] Installed user-supplied certificate."
|
||||
else
|
||||
# Generate self-signed certificate
|
||||
HOST="${PUBLIC_HOST:-localhost}"
|
||||
echo "[certs] Generating self-signed certificate for $HOST..."
|
||||
openssl req -x509 -newkey rsa:4096 \
|
||||
-keyout "$CERTS_DIR/key.pem" -out "$CERTS_DIR/cert.pem" \
|
||||
-days 365 -nodes \
|
||||
-subj "/CN=$HOST" \
|
||||
-addext "subjectAltName=DNS:$HOST,DNS:*.$HOST"
|
||||
SELF_SIGNED=true
|
||||
echo "[certs] Generated self-signed certificate for $HOST."
|
||||
fi
|
||||
|
||||
# Write metadata for SaaS app to seed DB
|
||||
SUBJECT=$(openssl x509 -noout -subject -in "$CERTS_DIR/cert.pem" 2>/dev/null | sed 's/subject=//')
|
||||
FINGERPRINT=$(openssl x509 -noout -fingerprint -sha256 -in "$CERTS_DIR/cert.pem" 2>/dev/null | sed 's/.*=//')
|
||||
NOT_BEFORE=$(openssl x509 -noout -startdate -in "$CERTS_DIR/cert.pem" 2>/dev/null | sed 's/notBefore=//')
|
||||
NOT_AFTER=$(openssl x509 -noout -enddate -in "$CERTS_DIR/cert.pem" 2>/dev/null | sed 's/notAfter=//')
|
||||
HAS_CA=false
|
||||
[ -f "$CERTS_DIR/ca.pem" ] && HAS_CA=true
|
||||
cat > "$CERTS_DIR/meta.json" <<METAEOF
|
||||
{"subject":"$SUBJECT","fingerprint":"$FINGERPRINT","selfSigned":$SELF_SIGNED,"hasCa":$HAS_CA,"notBefore":"$NOT_BEFORE","notAfter":"$NOT_AFTER"}
|
||||
METAEOF
|
||||
|
||||
mkdir -p "$CERTS_DIR/staged" "$CERTS_DIR/prev"
|
||||
chmod 775 "$CERTS_DIR" "$CERTS_DIR/staged" "$CERTS_DIR/prev"
|
||||
chmod 660 "$CERTS_DIR"/*.pem 2>/dev/null || true
|
||||
else
|
||||
echo "[certs] Certificates already exist, skipping generation."
|
||||
fi
|
||||
|
||||
# Start Traefik
|
||||
exec traefik "$@"
|
||||
@@ -15,3 +15,10 @@ http:
|
||||
regex: "^(https?://[^/]+)/?$"
|
||||
replacement: "${1}/platform/"
|
||||
permanent: false
|
||||
|
||||
tls:
|
||||
stores:
|
||||
default:
|
||||
defaultCertificate:
|
||||
certFile: /certs/cert.pem
|
||||
keyFile: /certs/key.pem
|
||||
@@ -21,10 +21,3 @@ providers:
|
||||
network: cameleer
|
||||
file:
|
||||
filename: /etc/traefik/dynamic.yml
|
||||
|
||||
tls:
|
||||
stores:
|
||||
default:
|
||||
defaultCertificate:
|
||||
certFile: /etc/traefik/certs/cert.pem
|
||||
keyFile: /etc/traefik/certs/key.pem
|
||||
@@ -1,9 +0,0 @@
|
||||
<clickhouse>
|
||||
<users>
|
||||
<default>
|
||||
<networks>
|
||||
<ip>::/0</ip>
|
||||
</networks>
|
||||
</default>
|
||||
</users>
|
||||
</clickhouse>
|
||||
@@ -7,12 +7,12 @@ set -e
|
||||
# Configures cameleer3-server OIDC.
|
||||
# Idempotent: checks existence before creating.
|
||||
|
||||
LOGTO_ENDPOINT="${LOGTO_ENDPOINT:-http://logto:3001}"
|
||||
LOGTO_ADMIN_ENDPOINT="${LOGTO_ADMIN_ENDPOINT:-http://logto:3002}"
|
||||
LOGTO_ENDPOINT="${LOGTO_ENDPOINT:-http://cameleer-logto:3001}"
|
||||
LOGTO_ADMIN_ENDPOINT="${LOGTO_ADMIN_ENDPOINT:-http://cameleer-logto:3002}"
|
||||
LOGTO_PUBLIC_ENDPOINT="${LOGTO_PUBLIC_ENDPOINT:-http://localhost:3001}"
|
||||
MGMT_API_RESOURCE="https://default.logto.app/api"
|
||||
BOOTSTRAP_FILE="/data/logto-bootstrap.json"
|
||||
PG_HOST="${PG_HOST:-postgres}"
|
||||
PG_HOST="${PG_HOST:-cameleer-postgres}"
|
||||
PG_USER="${PG_USER:-cameleer}"
|
||||
PG_DB_LOGTO="logto"
|
||||
PG_DB_SAAS="${PG_DB_SAAS:-cameleer_saas}"
|
||||
@@ -27,32 +27,38 @@ API_RESOURCE_NAME="Cameleer SaaS API"
|
||||
# Users (configurable via env vars)
|
||||
SAAS_ADMIN_USER="${SAAS_ADMIN_USER:-admin}"
|
||||
SAAS_ADMIN_PASS="${SAAS_ADMIN_PASS:-admin}"
|
||||
TENANT_ADMIN_USER="${TENANT_ADMIN_USER:-camel}"
|
||||
TENANT_ADMIN_PASS="${TENANT_ADMIN_PASS:-camel}"
|
||||
|
||||
# Tenant config
|
||||
TENANT_NAME="Example Tenant"
|
||||
TENANT_SLUG="default"
|
||||
BOOTSTRAP_TOKEN="${CAMELEER_AUTH_TOKEN:-default-bootstrap-token}"
|
||||
|
||||
# Server config
|
||||
SERVER_ENDPOINT="${SERVER_ENDPOINT:-http://cameleer3-server:8081}"
|
||||
SERVER_UI_USER="${SERVER_UI_USER:-admin}"
|
||||
SERVER_UI_PASS="${SERVER_UI_PASS:-admin}"
|
||||
# No server config — servers are provisioned dynamically by the admin console
|
||||
|
||||
# Redirect URIs (derived from PUBLIC_HOST and PUBLIC_PROTOCOL)
|
||||
HOST="${PUBLIC_HOST:-localhost}"
|
||||
PROTO="${PUBLIC_PROTOCOL:-https}"
|
||||
SPA_REDIRECT_URIS="[\"${PROTO}://${HOST}/platform/callback\"]"
|
||||
SPA_POST_LOGOUT_URIS="[\"${PROTO}://${HOST}/platform/login\"]"
|
||||
SPA_POST_LOGOUT_URIS="[\"${PROTO}://${HOST}/platform/login\",\"${PROTO}://${HOST}/platform/\"]"
|
||||
TRAD_REDIRECT_URIS="[\"${PROTO}://${HOST}/oidc/callback\",\"${PROTO}://${HOST}/server/oidc/callback\"]"
|
||||
TRAD_POST_LOGOUT_URIS="[\"${PROTO}://${HOST}\",\"${PROTO}://${HOST}/server\",\"${PROTO}://${HOST}/server/login?local\"]"
|
||||
|
||||
log() { echo "[bootstrap] $1"; }
|
||||
pgpass() { PGPASSWORD="${PG_PASSWORD:-cameleer_dev}"; export PGPASSWORD; }
|
||||
|
||||
# Install jq + curl
|
||||
apk add --no-cache jq curl >/dev/null 2>&1
|
||||
# When BOOTSTRAP_LOCAL=true (running inside Logto container with localhost endpoints),
|
||||
# skip Host/X-Forwarded-Proto headers — they cause issuer mismatches with localhost
|
||||
if [ "$BOOTSTRAP_LOCAL" = "true" ]; then
|
||||
HOST_ARGS=""
|
||||
ADMIN_HOST_ARGS=""
|
||||
else
|
||||
HOST_ARGS="-H Host:${HOST}"
|
||||
ADMIN_HOST_ARGS="-H Host:${HOST}:3002 -H X-Forwarded-Proto:https"
|
||||
fi
|
||||
|
||||
# Install jq + curl if not already available (deps are baked into cameleer-logto image)
|
||||
if ! command -v jq >/dev/null 2>&1 || ! command -v curl >/dev/null 2>&1; then
|
||||
if command -v apk >/dev/null 2>&1; then
|
||||
apk add --no-cache jq curl >/dev/null 2>&1
|
||||
elif command -v apt-get >/dev/null 2>&1; then
|
||||
apt-get update -qq && apt-get install -y -qq jq curl >/dev/null 2>&1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Read cached secrets from previous run
|
||||
if [ -f "$BOOTSTRAP_FILE" ]; then
|
||||
@@ -80,15 +86,7 @@ for i in $(seq 1 60); do
|
||||
sleep 1
|
||||
done
|
||||
|
||||
log "Waiting for cameleer3-server..."
|
||||
for i in $(seq 1 60); do
|
||||
if curl -sf "${SERVER_ENDPOINT}/api/v1/health" >/dev/null 2>&1; then
|
||||
log "cameleer3-server is ready."
|
||||
break
|
||||
fi
|
||||
[ "$i" -eq 60 ] && { log "WARNING: cameleer3-server not ready after 60s — skipping OIDC config"; }
|
||||
sleep 1
|
||||
done
|
||||
# No server wait — servers are provisioned dynamically by the admin console
|
||||
|
||||
# ============================================================
|
||||
# PHASE 2: Get Management API token
|
||||
@@ -103,15 +101,14 @@ M_DEFAULT_SECRET=$(psql -h "$PG_HOST" -U "$PG_USER" -d "$PG_DB_LOGTO" -t -A -c \
|
||||
get_admin_token() {
|
||||
curl -s -X POST "${LOGTO_ADMIN_ENDPOINT}/oidc/token" \
|
||||
-H "Content-Type: application/x-www-form-urlencoded" \
|
||||
-H "Host: ${HOST}:3002" \
|
||||
-H "X-Forwarded-Proto: https" \
|
||||
$ADMIN_HOST_ARGS \
|
||||
-d "grant_type=client_credentials&client_id=${1}&client_secret=${2}&resource=${MGMT_API_RESOURCE}&scope=all"
|
||||
}
|
||||
|
||||
get_default_token() {
|
||||
curl -s -X POST "${LOGTO_ENDPOINT}/oidc/token" \
|
||||
-H "Content-Type: application/x-www-form-urlencoded" \
|
||||
-H "Host: ${HOST}" \
|
||||
$HOST_ARGS \
|
||||
-d "grant_type=client_credentials&client_id=${1}&client_secret=${2}&resource=${MGMT_API_RESOURCE}&scope=all"
|
||||
}
|
||||
|
||||
@@ -124,7 +121,7 @@ log "Got Management API token."
|
||||
# Verify Management API is fully ready (Logto may still be initializing internally)
|
||||
log "Verifying Management API is responsive..."
|
||||
for i in $(seq 1 30); do
|
||||
VERIFY_RESPONSE=$(curl -s -H "Authorization: Bearer $TOKEN" -H "Host: ${HOST}" "${LOGTO_ENDPOINT}/api/roles" 2>/dev/null)
|
||||
VERIFY_RESPONSE=$(curl -s -H "Authorization: Bearer $TOKEN" $HOST_ARGS "${LOGTO_ENDPOINT}/api/roles" 2>/dev/null)
|
||||
if echo "$VERIFY_RESPONSE" | jq -e 'type == "array"' >/dev/null 2>&1; then
|
||||
log "Management API is ready."
|
||||
break
|
||||
@@ -135,21 +132,21 @@ done
|
||||
|
||||
# --- Helper: Logto API calls ---
|
||||
api_get() {
|
||||
curl -s -H "Authorization: Bearer $TOKEN" -H "Host: ${HOST}" "${LOGTO_ENDPOINT}${1}" 2>/dev/null || echo "[]"
|
||||
curl -s -H "Authorization: Bearer $TOKEN" $HOST_ARGS "${LOGTO_ENDPOINT}${1}" 2>/dev/null || echo "[]"
|
||||
}
|
||||
api_post() {
|
||||
curl -s -X POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -H "Host: ${HOST}" \
|
||||
curl -s -X POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" $HOST_ARGS \
|
||||
-d "$2" "${LOGTO_ENDPOINT}${1}" 2>/dev/null || true
|
||||
}
|
||||
api_put() {
|
||||
curl -s -X PUT -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -H "Host: ${HOST}" \
|
||||
curl -s -X PUT -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" $HOST_ARGS \
|
||||
-d "$2" "${LOGTO_ENDPOINT}${1}" 2>/dev/null || true
|
||||
}
|
||||
api_delete() {
|
||||
curl -s -X DELETE -H "Authorization: Bearer $TOKEN" -H "Host: ${HOST}" "${LOGTO_ENDPOINT}${1}" 2>/dev/null || true
|
||||
curl -s -X DELETE -H "Authorization: Bearer $TOKEN" $HOST_ARGS "${LOGTO_ENDPOINT}${1}" 2>/dev/null || true
|
||||
}
|
||||
api_patch() {
|
||||
curl -s -X PATCH -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -H "Host: ${HOST}" \
|
||||
curl -s -X PATCH -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" $HOST_ARGS \
|
||||
-d "$2" "${LOGTO_ENDPOINT}${1}" 2>/dev/null || true
|
||||
}
|
||||
|
||||
@@ -342,8 +339,7 @@ fi
|
||||
# ============================================================
|
||||
|
||||
# --- Organization roles: owner, operator, viewer ---
|
||||
# Note: platform-admin / saas-vendor global role is NOT created here.
|
||||
# It is injected via docker/vendor-seed.sh on the hosted SaaS environment only.
|
||||
# Note: saas-vendor global role is created in Phase 12 and assigned to the admin user.
|
||||
log "Creating organization roles..."
|
||||
EXISTING_ORG_ROLES=$(api_get "/api/organization-roles")
|
||||
|
||||
@@ -404,8 +400,6 @@ else
|
||||
}")
|
||||
ADMIN_USER_ID=$(echo "$ADMIN_RESPONSE" | jq -r '.id')
|
||||
log "Created platform owner: $ADMIN_USER_ID"
|
||||
# No global role assigned — owner role is org-scoped.
|
||||
# SaaS vendor role is injected via docker/vendor-seed.sh on hosted environments.
|
||||
fi
|
||||
|
||||
# --- Grant SaaS admin Logto console access (admin tenant, port 3002) ---
|
||||
@@ -422,8 +416,7 @@ if [ -z "$M_ADMIN_SECRET" ]; then
|
||||
else
|
||||
ADMIN_TOKEN_RESPONSE=$(curl -s -X POST "${LOGTO_ADMIN_ENDPOINT}/oidc/token" \
|
||||
-H "Content-Type: application/x-www-form-urlencoded" \
|
||||
-H "Host: ${HOST}:3002" \
|
||||
-H "X-Forwarded-Proto: https" \
|
||||
$ADMIN_HOST_ARGS \
|
||||
-d "grant_type=client_credentials&client_id=m-admin&client_secret=${M_ADMIN_SECRET}&resource=${ADMIN_MGMT_RESOURCE}&scope=all")
|
||||
ADMIN_TOKEN=$(echo "$ADMIN_TOKEN_RESPONSE" | jq -r '.access_token' 2>/dev/null)
|
||||
|
||||
@@ -435,14 +428,14 @@ else
|
||||
|
||||
# Admin-tenant API helpers (port 3002, admin token)
|
||||
admin_api_get() {
|
||||
curl -s -H "Authorization: Bearer $ADMIN_TOKEN" -H "Host: ${HOST}:3002" -H "X-Forwarded-Proto: https" "${LOGTO_ADMIN_ENDPOINT}${1}" 2>/dev/null || echo "[]"
|
||||
curl -s -H "Authorization: Bearer $ADMIN_TOKEN" $ADMIN_HOST_ARGS "${LOGTO_ADMIN_ENDPOINT}${1}" 2>/dev/null || echo "[]"
|
||||
}
|
||||
admin_api_post() {
|
||||
curl -s -X POST -H "Authorization: Bearer $ADMIN_TOKEN" -H "Content-Type: application/json" -H "Host: ${HOST}:3002" -H "X-Forwarded-Proto: https" \
|
||||
curl -s -X POST -H "Authorization: Bearer $ADMIN_TOKEN" -H "Content-Type: application/json" $ADMIN_HOST_ARGS \
|
||||
-d "$2" "${LOGTO_ADMIN_ENDPOINT}${1}" 2>/dev/null || true
|
||||
}
|
||||
admin_api_patch() {
|
||||
curl -s -X PATCH -H "Authorization: Bearer $ADMIN_TOKEN" -H "Content-Type: application/json" -H "Host: ${HOST}:3002" -H "X-Forwarded-Proto: https" \
|
||||
curl -s -X PATCH -H "Authorization: Bearer $ADMIN_TOKEN" -H "Content-Type: application/json" $ADMIN_HOST_ARGS \
|
||||
-d "$2" "${LOGTO_ADMIN_ENDPOINT}${1}" 2>/dev/null || true
|
||||
}
|
||||
|
||||
@@ -479,16 +472,31 @@ if [ -n "$ADMIN_TENANT_USER_ID" ] && [ "$ADMIN_TENANT_USER_ID" != "null" ]; then
|
||||
log "WARNING: admin tenant roles not found"
|
||||
fi
|
||||
|
||||
# Add to t-default organization with admin role
|
||||
admin_api_post "/api/organizations/t-default/users" "{\"userIds\": [\"$ADMIN_TENANT_USER_ID\"]}" >/dev/null 2>&1
|
||||
TENANT_ADMIN_ORG_ROLE_ID=$(admin_api_get "/api/organization-roles" | jq -r '.[] | select(.name == "admin") | .id')
|
||||
if [ -n "$TENANT_ADMIN_ORG_ROLE_ID" ] && [ "$TENANT_ADMIN_ORG_ROLE_ID" != "null" ]; then
|
||||
admin_api_post "/api/organizations/t-default/users/$ADMIN_TENANT_USER_ID/roles" "{\"organizationRoleIds\": [\"$TENANT_ADMIN_ORG_ROLE_ID\"]}" >/dev/null 2>&1
|
||||
log "Added to t-default organization with admin role."
|
||||
fi
|
||||
# Switch admin tenant sign-in mode from Register to SignIn (user already created)
|
||||
# Switch sign-in mode from Register to SignIn (admin user already created)
|
||||
admin_api_patch "/api/sign-in-exp" '{"signInMode": "SignIn"}' >/dev/null 2>&1
|
||||
log "Set admin tenant sign-in mode to SignIn."
|
||||
log "Set sign-in mode to SignIn."
|
||||
|
||||
# Register admin-console redirect URIs (Logto ships with empty URIs)
|
||||
ADMIN_PUBLIC="${ADMIN_ENDPOINT:-${PROTO}://${HOST}:3002}"
|
||||
admin_api_patch "/api/applications/admin-console" "{
|
||||
\"oidcClientMetadata\": {
|
||||
\"redirectUris\": [\"${ADMIN_PUBLIC}/console/callback\"],
|
||||
\"postLogoutRedirectUris\": [\"${ADMIN_PUBLIC}/console\"]
|
||||
}
|
||||
}" >/dev/null 2>&1
|
||||
log "Registered admin-console redirect URIs."
|
||||
|
||||
# Add admin user to Logto's internal organizations (required for console login)
|
||||
for ORG_ID in t-default t-admin; do
|
||||
admin_api_post "/api/organizations/${ORG_ID}/users" "{\"userIds\": [\"$ADMIN_TENANT_USER_ID\"]}" >/dev/null 2>&1
|
||||
done
|
||||
ADMIN_ORG_ROLE_ID=$(admin_api_get "/api/organization-roles" | jq -r '.[] | select(.name == "admin") | .id')
|
||||
if [ -n "$ADMIN_ORG_ROLE_ID" ] && [ "$ADMIN_ORG_ROLE_ID" != "null" ]; then
|
||||
for ORG_ID in t-default t-admin; do
|
||||
admin_api_post "/api/organizations/${ORG_ID}/users/${ADMIN_TENANT_USER_ID}/roles" "{\"organizationRoleIds\": [\"$ADMIN_ORG_ROLE_ID\"]}" >/dev/null 2>&1
|
||||
done
|
||||
fi
|
||||
log "Added admin to Logto console organizations."
|
||||
|
||||
log "SaaS admin granted Logto console access."
|
||||
else
|
||||
@@ -498,130 +506,10 @@ fi
|
||||
fi # end: ADMIN_TOKEN check
|
||||
fi # end: M_ADMIN_SECRET check
|
||||
|
||||
# --- Viewer user (for testing read-only OIDC role in server) ---
|
||||
log "Checking for viewer user '$TENANT_ADMIN_USER'..."
|
||||
TENANT_USER_ID=$(api_get "/api/users?search=$TENANT_ADMIN_USER" | jq -r ".[] | select(.username == \"$TENANT_ADMIN_USER\") | .id")
|
||||
if [ -n "$TENANT_USER_ID" ]; then
|
||||
log "Viewer user exists: $TENANT_USER_ID"
|
||||
else
|
||||
log "Creating viewer user '$TENANT_ADMIN_USER'..."
|
||||
TENANT_RESPONSE=$(api_post "/api/users" "{
|
||||
\"username\": \"$TENANT_ADMIN_USER\",
|
||||
\"password\": \"$TENANT_ADMIN_PASS\",
|
||||
\"name\": \"Viewer\"
|
||||
}")
|
||||
TENANT_USER_ID=$(echo "$TENANT_RESPONSE" | jq -r '.id')
|
||||
log "Created viewer user: $TENANT_USER_ID"
|
||||
fi
|
||||
|
||||
# ============================================================
|
||||
# PHASE 6: Create organization + add users
|
||||
# ============================================================
|
||||
|
||||
log "Checking for organization '$TENANT_NAME'..."
|
||||
EXISTING_ORGS=$(api_get "/api/organizations")
|
||||
ORG_ID=$(echo "$EXISTING_ORGS" | jq -r ".[] | select(.name == \"$TENANT_NAME\") | .id")
|
||||
|
||||
if [ -n "$ORG_ID" ]; then
|
||||
log "Organization exists: $ORG_ID"
|
||||
else
|
||||
log "Creating organization '$TENANT_NAME'..."
|
||||
ORG_RESPONSE=$(api_post "/api/organizations" "{
|
||||
\"name\": \"$TENANT_NAME\",
|
||||
\"description\": \"Bootstrap demo tenant\"
|
||||
}")
|
||||
ORG_ID=$(echo "$ORG_RESPONSE" | jq -r '.id')
|
||||
log "Created organization: $ORG_ID"
|
||||
fi
|
||||
|
||||
# Add users to organization
|
||||
if [ -n "$ADMIN_USER_ID" ] && [ "$ADMIN_USER_ID" != "null" ]; then
|
||||
log "Adding platform owner to organization..."
|
||||
api_post "/api/organizations/$ORG_ID/users" "{\"userIds\": [\"$ADMIN_USER_ID\"]}" >/dev/null 2>&1
|
||||
api_put "/api/organizations/$ORG_ID/users/$ADMIN_USER_ID/roles" "{\"organizationRoleIds\": [\"$ORG_OWNER_ROLE_ID\"]}" >/dev/null 2>&1
|
||||
log "Platform owner added to org with owner role."
|
||||
fi
|
||||
|
||||
if [ -n "$TENANT_USER_ID" ] && [ "$TENANT_USER_ID" != "null" ]; then
|
||||
log "Adding viewer user to organization..."
|
||||
api_post "/api/organizations/$ORG_ID/users" "{\"userIds\": [\"$TENANT_USER_ID\"]}" >/dev/null 2>&1
|
||||
api_put "/api/organizations/$ORG_ID/users/$TENANT_USER_ID/roles" "{\"organizationRoleIds\": [\"$ORG_VIEWER_ROLE_ID\"]}" >/dev/null 2>&1
|
||||
log "Viewer user added to org with viewer role."
|
||||
fi
|
||||
|
||||
# ============================================================
|
||||
# PHASE 7: Configure cameleer3-server OIDC
|
||||
# ============================================================
|
||||
|
||||
SERVER_HEALTHY="no"
|
||||
for i in 1 2 3; do
|
||||
if curl -sf "${SERVER_ENDPOINT}/api/v1/health" >/dev/null 2>&1; then
|
||||
SERVER_HEALTHY="yes"
|
||||
break
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
log "Phase 7 check: SERVER_HEALTHY=$SERVER_HEALTHY, TRAD_SECRET length=${#TRAD_SECRET}"
|
||||
|
||||
if [ "$SERVER_HEALTHY" = "yes" ] && [ -n "$TRAD_SECRET" ]; then
|
||||
log "Configuring cameleer3-server OIDC..."
|
||||
|
||||
# Login to server as admin
|
||||
SERVER_TOKEN_RESPONSE=$(curl -s -X POST "${SERVER_ENDPOINT}/api/v1/auth/login" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"username\": \"$SERVER_UI_USER\", \"password\": \"$SERVER_UI_PASS\"}")
|
||||
SERVER_TOKEN=$(echo "$SERVER_TOKEN_RESPONSE" | jq -r '.accessToken' 2>/dev/null)
|
||||
|
||||
if [ -n "$SERVER_TOKEN" ] && [ "$SERVER_TOKEN" != "null" ]; then
|
||||
# Configure OIDC
|
||||
OIDC_RESPONSE=$(curl -s -X PUT "${SERVER_ENDPOINT}/api/v1/admin/oidc" \
|
||||
-H "Authorization: Bearer $SERVER_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{
|
||||
\"enabled\": true,
|
||||
\"issuerUri\": \"$LOGTO_PUBLIC_ENDPOINT/oidc\",
|
||||
\"clientId\": \"$TRAD_ID\",
|
||||
\"clientSecret\": \"$TRAD_SECRET\",
|
||||
\"autoSignup\": true,
|
||||
\"defaultRoles\": [\"VIEWER\"],
|
||||
\"displayNameClaim\": \"name\",
|
||||
\"rolesClaim\": \"roles\",
|
||||
\"audience\": \"$API_RESOURCE_INDICATOR\",
|
||||
\"additionalScopes\": []
|
||||
}")
|
||||
log "OIDC config response: $(echo "$OIDC_RESPONSE" | head -c 200)"
|
||||
log "cameleer3-server OIDC configured."
|
||||
|
||||
# Seed claim mapping rules (roles → server RBAC)
|
||||
log "Seeding claim mapping rules..."
|
||||
EXISTING_MAPPINGS=$(curl -s -H "Authorization: Bearer $SERVER_TOKEN" \
|
||||
"${SERVER_ENDPOINT}/api/v1/admin/claim-mappings" 2>/dev/null || echo "[]")
|
||||
|
||||
seed_claim_mapping() {
|
||||
local match_value="$1"
|
||||
local target="$2"
|
||||
local priority="$3"
|
||||
local exists=$(echo "$EXISTING_MAPPINGS" | jq -r ".[] | select(.matchValue == \"$match_value\") | .id")
|
||||
if [ -n "$exists" ]; then
|
||||
log " Claim mapping '$match_value' → $target exists"
|
||||
else
|
||||
local resp=$(curl -s -X POST "${SERVER_ENDPOINT}/api/v1/admin/claim-mappings" \
|
||||
-H "Authorization: Bearer $SERVER_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"claim\":\"roles\",\"matchType\":\"contains\",\"matchValue\":\"$match_value\",\"action\":\"assignRole\",\"target\":\"$target\",\"priority\":$priority}")
|
||||
log " Created claim mapping '$match_value' → $target"
|
||||
fi
|
||||
}
|
||||
|
||||
seed_claim_mapping "server:admin" "ADMIN" 10
|
||||
seed_claim_mapping "server:operator" "OPERATOR" 20
|
||||
log "Claim mapping rules seeded."
|
||||
else
|
||||
log "WARNING: Could not login to cameleer3-server — skipping OIDC config"
|
||||
fi
|
||||
else
|
||||
log "WARNING: cameleer3-server not available or no Traditional app secret — skipping OIDC config"
|
||||
fi
|
||||
# No viewer user — tenant users are created by the admin during tenant provisioning.
|
||||
# No example organization — tenants are created via the admin console.
|
||||
# No server OIDC config — each provisioned server gets OIDC from env vars.
|
||||
ORG_ID=""
|
||||
|
||||
# ============================================================
|
||||
# PHASE 7b: Configure Logto Custom JWT for access tokens
|
||||
@@ -702,25 +590,52 @@ cat > "$BOOTSTRAP_FILE" <<EOF
|
||||
"tradAppId": "$TRAD_ID",
|
||||
"tradAppSecret": "$TRAD_SECRET",
|
||||
"apiResourceIndicator": "$API_RESOURCE_INDICATOR",
|
||||
"organizationId": "$ORG_ID",
|
||||
"tenantName": "$TENANT_NAME",
|
||||
"tenantSlug": "$TENANT_SLUG",
|
||||
"bootstrapToken": "$BOOTSTRAP_TOKEN",
|
||||
"platformAdminUser": "$SAAS_ADMIN_USER",
|
||||
"tenantAdminUser": "$TENANT_ADMIN_USER",
|
||||
"oidcIssuerUri": "${LOGTO_ENDPOINT}/oidc",
|
||||
"oidcAudience": "$API_RESOURCE_INDICATOR"
|
||||
}
|
||||
EOF
|
||||
chmod 644 "$BOOTSTRAP_FILE"
|
||||
|
||||
# ============================================================
|
||||
# Phase 12: SaaS Admin Role
|
||||
# ============================================================
|
||||
|
||||
log ""
|
||||
log "=== Phase 12: SaaS Admin Role ==="
|
||||
|
||||
# Create saas-vendor global role with all API scopes
|
||||
log "Checking for saas-vendor role..."
|
||||
EXISTING_ROLES=$(api_get "/api/roles")
|
||||
VENDOR_ROLE_ID=$(echo "$EXISTING_ROLES" | jq -r '.[] | select(.name == "saas-vendor" and .type == "User") | .id')
|
||||
|
||||
if [ -z "$VENDOR_ROLE_ID" ]; then
|
||||
ALL_SCOPE_IDS=$(api_get "/api/resources/$API_RESOURCE_ID/scopes" | jq '[.[].id]')
|
||||
log "Creating saas-vendor role with all scopes..."
|
||||
VENDOR_ROLE_RESPONSE=$(api_post "/api/roles" "{
|
||||
\"name\": \"saas-vendor\",
|
||||
\"description\": \"SaaS vendor — full platform control across all tenants\",
|
||||
\"type\": \"User\",
|
||||
\"scopeIds\": $ALL_SCOPE_IDS
|
||||
}")
|
||||
VENDOR_ROLE_ID=$(echo "$VENDOR_ROLE_RESPONSE" | jq -r '.id')
|
||||
log "Created saas-vendor role: $VENDOR_ROLE_ID"
|
||||
else
|
||||
log "saas-vendor role exists: $VENDOR_ROLE_ID"
|
||||
fi
|
||||
|
||||
# Assign vendor role to admin user
|
||||
if [ -n "$VENDOR_ROLE_ID" ] && [ "$VENDOR_ROLE_ID" != "null" ] && [ -n "$ADMIN_USER_ID" ]; then
|
||||
api_post "/api/users/$ADMIN_USER_ID/roles" "{\"roleIds\": [\"$VENDOR_ROLE_ID\"]}" >/dev/null 2>&1
|
||||
log "Assigned saas-vendor role to admin user."
|
||||
fi
|
||||
|
||||
log "SaaS admin role configured."
|
||||
|
||||
log ""
|
||||
log "=== Bootstrap complete! ==="
|
||||
# dev only — remove credential logging in production
|
||||
log " Platform Owner: $SAAS_ADMIN_USER / $SAAS_ADMIN_PASS (org role: owner)"
|
||||
log " Viewer: $TENANT_ADMIN_USER / $TENANT_ADMIN_PASS (org role: viewer)"
|
||||
log " Tenant: $TENANT_NAME (slug: $TENANT_SLUG)"
|
||||
log " Organization: $ORG_ID"
|
||||
log " SPA Client ID: $SPA_ID"
|
||||
log ""
|
||||
log " To add SaaS Vendor role (hosted only): run docker/vendor-seed.sh"
|
||||
log " No tenants created — use the admin console to create tenants."
|
||||
log ""
|
||||
|
||||
@@ -1,135 +0,0 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
# Cameleer SaaS — Vendor Seed Script
|
||||
# Creates the saas-vendor global role and vendor user.
|
||||
# Run ONCE on the hosted SaaS environment AFTER standard bootstrap.
|
||||
# NOT part of docker-compose.yml — invoked manually or by CI.
|
||||
|
||||
LOGTO_ENDPOINT="${LOGTO_ENDPOINT:-http://logto:3001}"
|
||||
MGMT_API_RESOURCE="https://default.logto.app/api"
|
||||
API_RESOURCE_INDICATOR="https://api.cameleer.local"
|
||||
PG_HOST="${PG_HOST:-postgres}"
|
||||
PG_USER="${PG_USER:-cameleer}"
|
||||
PG_DB_LOGTO="logto"
|
||||
|
||||
# Vendor credentials (override via env vars)
|
||||
VENDOR_USER="${VENDOR_USER:-vendor}"
|
||||
VENDOR_PASS="${VENDOR_PASS:-vendor}"
|
||||
VENDOR_NAME="${VENDOR_NAME:-SaaS Vendor}"
|
||||
|
||||
log() { echo "[vendor-seed] $1"; }
|
||||
pgpass() { PGPASSWORD="${PG_PASSWORD:-cameleer_dev}"; export PGPASSWORD; }
|
||||
|
||||
# Install jq + curl
|
||||
apk add --no-cache jq curl >/dev/null 2>&1
|
||||
|
||||
# ============================================================
|
||||
# Get Management API token
|
||||
# ============================================================
|
||||
|
||||
log "Reading M2M credentials from bootstrap file..."
|
||||
BOOTSTRAP_FILE="/data/logto-bootstrap.json"
|
||||
if [ ! -f "$BOOTSTRAP_FILE" ]; then
|
||||
log "ERROR: Bootstrap file not found at $BOOTSTRAP_FILE — run standard bootstrap first"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
M2M_ID=$(jq -r '.m2mClientId' "$BOOTSTRAP_FILE")
|
||||
M2M_SECRET=$(jq -r '.m2mClientSecret' "$BOOTSTRAP_FILE")
|
||||
|
||||
if [ -z "$M2M_ID" ] || [ "$M2M_ID" = "null" ] || [ -z "$M2M_SECRET" ] || [ "$M2M_SECRET" = "null" ]; then
|
||||
log "ERROR: M2M credentials not found in bootstrap file"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "Getting Management API token..."
|
||||
TOKEN_RESPONSE=$(curl -s -X POST "${LOGTO_ENDPOINT}/oidc/token" \
|
||||
-H "Content-Type: application/x-www-form-urlencoded" \
|
||||
-d "grant_type=client_credentials&client_id=${M2M_ID}&client_secret=${M2M_SECRET}&resource=${MGMT_API_RESOURCE}&scope=all")
|
||||
TOKEN=$(echo "$TOKEN_RESPONSE" | jq -r '.access_token' 2>/dev/null)
|
||||
[ -z "$TOKEN" ] || [ "$TOKEN" = "null" ] && { log "ERROR: Failed to get token"; exit 1; }
|
||||
log "Got Management API token."
|
||||
|
||||
api_get() { curl -s -H "Authorization: Bearer $TOKEN" "${LOGTO_ENDPOINT}${1}" 2>/dev/null || echo "[]"; }
|
||||
api_post() { curl -s -X POST -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" -d "$2" "${LOGTO_ENDPOINT}${1}" 2>/dev/null || true; }
|
||||
|
||||
# ============================================================
|
||||
# Create saas-vendor global role
|
||||
# ============================================================
|
||||
|
||||
log "Checking for saas-vendor role..."
|
||||
EXISTING_ROLES=$(api_get "/api/roles")
|
||||
VENDOR_ROLE_ID=$(echo "$EXISTING_ROLES" | jq -r '.[] | select(.name == "saas-vendor" and .type == "User") | .id')
|
||||
|
||||
if [ -n "$VENDOR_ROLE_ID" ]; then
|
||||
log "saas-vendor role exists: $VENDOR_ROLE_ID"
|
||||
else
|
||||
# Collect all API resource scope IDs
|
||||
EXISTING_RESOURCES=$(api_get "/api/resources")
|
||||
API_RESOURCE_ID=$(echo "$EXISTING_RESOURCES" | jq -r ".[] | select(.indicator == \"$API_RESOURCE_INDICATOR\") | .id")
|
||||
ALL_SCOPE_IDS=$(api_get "/api/resources/$API_RESOURCE_ID/scopes" | jq '[.[].id]')
|
||||
|
||||
log "Creating saas-vendor role with all scopes..."
|
||||
VENDOR_ROLE_RESPONSE=$(api_post "/api/roles" "{
|
||||
\"name\": \"saas-vendor\",
|
||||
\"description\": \"SaaS vendor — full platform control across all tenants\",
|
||||
\"type\": \"User\",
|
||||
\"scopeIds\": $ALL_SCOPE_IDS
|
||||
}")
|
||||
VENDOR_ROLE_ID=$(echo "$VENDOR_ROLE_RESPONSE" | jq -r '.id')
|
||||
log "Created saas-vendor role: $VENDOR_ROLE_ID"
|
||||
fi
|
||||
|
||||
# ============================================================
|
||||
# Create vendor user
|
||||
# ============================================================
|
||||
|
||||
log "Checking for vendor user '$VENDOR_USER'..."
|
||||
VENDOR_USER_ID=$(api_get "/api/users?search=$VENDOR_USER" | jq -r ".[] | select(.username == \"$VENDOR_USER\") | .id")
|
||||
|
||||
if [ -n "$VENDOR_USER_ID" ]; then
|
||||
log "Vendor user exists: $VENDOR_USER_ID"
|
||||
else
|
||||
log "Creating vendor user '$VENDOR_USER'..."
|
||||
VENDOR_RESPONSE=$(api_post "/api/users" "{
|
||||
\"username\": \"$VENDOR_USER\",
|
||||
\"password\": \"$VENDOR_PASS\",
|
||||
\"name\": \"$VENDOR_NAME\"
|
||||
}")
|
||||
VENDOR_USER_ID=$(echo "$VENDOR_RESPONSE" | jq -r '.id')
|
||||
log "Created vendor user: $VENDOR_USER_ID"
|
||||
fi
|
||||
|
||||
# Assign saas-vendor role
|
||||
if [ -n "$VENDOR_ROLE_ID" ] && [ "$VENDOR_ROLE_ID" != "null" ]; then
|
||||
api_post "/api/users/$VENDOR_USER_ID/roles" "{\"roleIds\": [\"$VENDOR_ROLE_ID\"]}" >/dev/null 2>&1
|
||||
log "Assigned saas-vendor role."
|
||||
fi
|
||||
|
||||
# ============================================================
|
||||
# Add vendor to all existing organizations with owner role
|
||||
# ============================================================
|
||||
|
||||
log "Adding vendor to all organizations..."
|
||||
ORG_OWNER_ROLE_ID=$(api_get "/api/organization-roles" | jq -r '.[] | select(.name == "owner") | .id')
|
||||
ORGS=$(api_get "/api/organizations")
|
||||
ORG_COUNT=$(echo "$ORGS" | jq 'length')
|
||||
|
||||
for i in $(seq 0 $((ORG_COUNT - 1))); do
|
||||
ORG_ID=$(echo "$ORGS" | jq -r ".[$i].id")
|
||||
ORG_NAME=$(echo "$ORGS" | jq -r ".[$i].name")
|
||||
api_post "/api/organizations/$ORG_ID/users" "{\"userIds\": [\"$VENDOR_USER_ID\"]}" >/dev/null 2>&1
|
||||
if [ -n "$ORG_OWNER_ROLE_ID" ] && [ "$ORG_OWNER_ROLE_ID" != "null" ]; then
|
||||
curl -s -X PUT -H "Authorization: Bearer $TOKEN" -H "Content-Type: application/json" \
|
||||
-d "{\"organizationRoleIds\": [\"$ORG_OWNER_ROLE_ID\"]}" \
|
||||
"${LOGTO_ENDPOINT}/api/organizations/$ORG_ID/users/$VENDOR_USER_ID/roles" >/dev/null 2>&1
|
||||
fi
|
||||
log " Added to org '$ORG_NAME' ($ORG_ID) with owner role."
|
||||
done
|
||||
|
||||
log ""
|
||||
log "=== Vendor seed complete! ==="
|
||||
log " Vendor user: $VENDOR_USER / $VENDOR_PASS"
|
||||
log " Role: saas-vendor (global) + owner (in all orgs)"
|
||||
log " This user has platform:admin scope and cross-tenant access."
|
||||
@@ -193,7 +193,7 @@ the bootstrap script (`docker/logto-bootstrap.sh`):
|
||||
|
||||
**Agent -> cameleer3-server:**
|
||||
|
||||
1. Agent reads `CAMELEER_AUTH_TOKEN` environment variable (API key).
|
||||
1. Agent reads `CAMELEER_SERVER_SECURITY_BOOTSTRAPTOKEN` environment variable (API key).
|
||||
2. Calls `POST /api/v1/agents/register` with the key as Bearer token.
|
||||
3. Server validates via `BootstrapTokenValidator` (constant-time comparison).
|
||||
4. Server issues internal HMAC JWT (access + refresh) + Ed25519 public key.
|
||||
@@ -493,9 +493,9 @@ The deployment lifecycle is managed by `DeploymentService`:
|
||||
|
||||
| Variable | Value |
|
||||
|-----------------------------|----------------------------------------|
|
||||
| `CAMELEER_AUTH_TOKEN` | API key for agent registration |
|
||||
| `CAMELEER_SERVER_SECURITY_BOOTSTRAPTOKEN` | API key for agent registration |
|
||||
| `CAMELEER_EXPORT_TYPE` | `HTTP` |
|
||||
| `CAMELEER_SERVER_URL` | cameleer3-server internal URL |
|
||||
| `CAMELEER_SERVER_RUNTIME_SERVERURL` | cameleer3-server internal URL |
|
||||
| `CAMELEER_APPLICATION_ID` | App slug |
|
||||
| `CAMELEER_ENVIRONMENT_ID` | Environment slug |
|
||||
| `CAMELEER_DISPLAY_NAME` | `{tenant}-{env}-{app}` |
|
||||
@@ -529,7 +529,7 @@ aspects relevant to the SaaS platform.
|
||||
|
||||
### 6.1 Agent Registration
|
||||
|
||||
1. Agent starts with `CAMELEER_AUTH_TOKEN` environment variable (an API key
|
||||
1. Agent starts with `CAMELEER_SERVER_SECURITY_BOOTSTRAPTOKEN` environment variable (an API key
|
||||
generated by the SaaS platform, prefixed with `cmk_`).
|
||||
2. Agent calls `POST /api/v1/agents/register` on the cameleer3-server with the
|
||||
API key as a Bearer token.
|
||||
@@ -858,51 +858,61 @@ state (`currentTenantId`). Provides `logout` and `signIn` callbacks.
|
||||
|
||||
| Variable | Default | Description |
|
||||
|------------------------------|----------------------------------------------|----------------------------------|
|
||||
| `SPRING_DATASOURCE_URL` | `jdbc:postgresql://postgres:5432/cameleer_saas` | PostgreSQL JDBC URL |
|
||||
| `SPRING_DATASOURCE_URL` | `jdbc:postgresql://cameleer-postgres:5432/cameleer_saas` | PostgreSQL JDBC URL |
|
||||
| `SPRING_DATASOURCE_USERNAME`| `cameleer` | PostgreSQL user |
|
||||
| `SPRING_DATASOURCE_PASSWORD`| `cameleer_dev` | PostgreSQL password |
|
||||
|
||||
**Logto / OIDC:**
|
||||
**Identity / OIDC:**
|
||||
|
||||
| Variable | Default | Description |
|
||||
|---------------------------|------------|--------------------------------------------|
|
||||
| `LOGTO_ENDPOINT` | (empty) | Logto internal URL (Docker-internal) |
|
||||
| `LOGTO_PUBLIC_ENDPOINT` | (empty) | Logto public URL (browser-accessible) |
|
||||
| `LOGTO_ISSUER_URI` | (empty) | OIDC issuer URI for JWT validation |
|
||||
| `LOGTO_JWK_SET_URI` | (empty) | JWKS endpoint for JWT signature validation |
|
||||
| `LOGTO_M2M_CLIENT_ID` | (empty) | M2M app client ID (from bootstrap) |
|
||||
| `LOGTO_M2M_CLIENT_SECRET` | (empty) | M2M app client secret (from bootstrap) |
|
||||
| `LOGTO_SPA_CLIENT_ID` | (empty) | SPA app client ID (fallback; bootstrap preferred) |
|
||||
| `CAMELEER_SAAS_IDENTITY_LOGTOENDPOINT` | (empty) | Logto internal URL (Docker-internal) |
|
||||
| `CAMELEER_SAAS_IDENTITY_LOGTOPUBLICENDPOINT` | (empty) | Logto public URL (browser-accessible) |
|
||||
| `CAMELEER_SAAS_IDENTITY_M2MCLIENTID` | (empty) | M2M app client ID (from bootstrap) |
|
||||
| `CAMELEER_SAAS_IDENTITY_M2MCLIENTSECRET` | (empty) | M2M app client secret (from bootstrap) |
|
||||
| `CAMELEER_SAAS_IDENTITY_SPACLIENTID` | (empty) | SPA app client ID (fallback; bootstrap preferred) |
|
||||
|
||||
**Runtime / Deployment:**
|
||||
**Provisioning** (`cameleer.saas.provisioning.*` / `CAMELEER_SAAS_PROVISIONING_*`):
|
||||
|
||||
| Variable | Default | Description |
|
||||
|-----------------------------------|------------------------------------|----------------------------------|
|
||||
| `CAMELEER3_SERVER_ENDPOINT` | `http://cameleer3-server:8081` | cameleer3-server internal URL |
|
||||
| `CAMELEER_JAR_STORAGE_PATH` | `/data/jars` | JAR upload storage directory |
|
||||
| `CAMELEER_RUNTIME_BASE_IMAGE` | `cameleer-runtime-base:latest` | Base Docker image for app builds |
|
||||
| `CAMELEER_DOCKER_NETWORK` | `cameleer` | Docker network for containers |
|
||||
| `CAMELEER_CONTAINER_MEMORY_LIMIT`| `512m` | Per-container memory limit |
|
||||
| `CAMELEER_CONTAINER_CPU_SHARES` | `512` | Per-container CPU shares |
|
||||
| `CLICKHOUSE_URL` | `jdbc:clickhouse://clickhouse:8123/cameleer` | ClickHouse JDBC URL |
|
||||
| `CLICKHOUSE_ENABLED` | `true` | Enable ClickHouse integration |
|
||||
| `CLICKHOUSE_USERNAME` | `default` | ClickHouse user |
|
||||
| `CLICKHOUSE_PASSWORD` | (empty) | ClickHouse password |
|
||||
| `DOMAIN` | `localhost` | Base domain for Traefik routing |
|
||||
| `CAMELEER_SAAS_PROVISIONING_SERVERIMAGE` | `gitea.siegeln.net/cameleer/cameleer3-server:latest` | Docker image for per-tenant server |
|
||||
| `CAMELEER_SAAS_PROVISIONING_SERVERUIIMAGE` | `gitea.siegeln.net/cameleer/cameleer3-server-ui:latest` | Docker image for per-tenant UI |
|
||||
| `CAMELEER_SAAS_PROVISIONING_NETWORKNAME` | `cameleer-saas_cameleer` | Shared services Docker network |
|
||||
| `CAMELEER_SAAS_PROVISIONING_TRAEFIKNETWORK` | `cameleer-traefik` | Traefik Docker network |
|
||||
| `CAMELEER_SAAS_PROVISIONING_PUBLICHOST` | `localhost` | Public hostname (same as infrastructure `PUBLIC_HOST`) |
|
||||
| `CAMELEER_SAAS_PROVISIONING_PUBLICPROTOCOL` | `https` | Public protocol (same as infrastructure `PUBLIC_PROTOCOL`) |
|
||||
| `CAMELEER_SAAS_PROVISIONING_DATASOURCEURL` | `jdbc:postgresql://cameleer-postgres:5432/cameleer3` | PostgreSQL URL passed to tenant servers |
|
||||
| `CAMELEER_SAAS_PROVISIONING_CLICKHOUSEURL` | `jdbc:clickhouse://cameleer-clickhouse:8123/cameleer` | ClickHouse URL passed to tenant servers |
|
||||
|
||||
### 10.2 cameleer3-server
|
||||
### 10.2 cameleer3-server (per-tenant)
|
||||
|
||||
| Variable | Default | Description |
|
||||
Env vars injected into provisioned per-tenant server containers by `DockerTenantProvisioner`. All server properties use the `cameleer.server.*` prefix (env vars: `CAMELEER_SERVER_*`).
|
||||
|
||||
| Variable | Default / Value | Description |
|
||||
|------------------------------|----------------------------------------------|----------------------------------|
|
||||
| `SPRING_DATASOURCE_URL` | `jdbc:postgresql://postgres:5432/cameleer3` | PostgreSQL JDBC URL |
|
||||
| `SPRING_DATASOURCE_URL` | `jdbc:postgresql://cameleer-postgres:5432/cameleer3` | PostgreSQL JDBC URL |
|
||||
| `SPRING_DATASOURCE_USERNAME`| `cameleer` | PostgreSQL user |
|
||||
| `SPRING_DATASOURCE_PASSWORD`| `cameleer_dev` | PostgreSQL password |
|
||||
| `CLICKHOUSE_URL` | `jdbc:clickhouse://clickhouse:8123/cameleer` | ClickHouse JDBC URL |
|
||||
| `CAMELEER_AUTH_TOKEN` | `default-bootstrap-token` | Agent bootstrap token |
|
||||
| `CAMELEER_SERVER_CLICKHOUSE_URL` | `jdbc:clickhouse://cameleer-clickhouse:8123/cameleer` | ClickHouse JDBC URL |
|
||||
| `CAMELEER_SERVER_TENANT_ID` | *(tenant slug)* | Tenant identifier for data isolation |
|
||||
| `CAMELEER_SERVER_SECURITY_BOOTSTRAPTOKEN` | *(generated)* | Agent bootstrap token |
|
||||
| `CAMELEER_SERVER_SECURITY_JWTSECRET` | *(generated)* | JWT signing secret |
|
||||
| `CAMELEER_SERVER_SECURITY_OIDC_ISSUERURI` | `${PUBLIC_PROTOCOL}://${PUBLIC_HOST}/oidc` | OIDC issuer for M2M tokens |
|
||||
| `CAMELEER_SERVER_SECURITY_OIDC_JWKSETURI` | `http://cameleer-logto:3001/oidc/jwks` | Docker-internal JWK fetch |
|
||||
| `CAMELEER_SERVER_SECURITY_OIDC_AUDIENCE` | `https://api.cameleer.local` | JWT audience validation |
|
||||
| `CAMELEER_SERVER_SECURITY_CORSALLOWEDORIGINS` | `${PUBLIC_PROTOCOL}://${PUBLIC_HOST}` | CORS for browser requests |
|
||||
| `CAMELEER_SERVER_RUNTIME_ENABLED` | `true` | Enable Docker orchestration |
|
||||
| `CAMELEER_SERVER_RUNTIME_SERVERURL` | `http://cameleer-server-{slug}:8081` | Per-tenant server URL |
|
||||
| `CAMELEER_SERVER_RUNTIME_ROUTINGDOMAIN` | `${PUBLIC_HOST}` | Domain for Traefik routing |
|
||||
| `CAMELEER_SERVER_RUNTIME_ROUTINGMODE` | `path` | `path` or `subdomain` routing |
|
||||
| `CAMELEER_SERVER_RUNTIME_JARSTORAGEPATH` | `/data/jars` | JAR file storage directory |
|
||||
| `CAMELEER_SERVER_RUNTIME_DOCKERNETWORK` | `cameleer-tenant-{slug}` | Primary network for app containers |
|
||||
| `CAMELEER_SERVER_RUNTIME_JARDOCKERVOLUME` | `cameleer-jars-{slug}` | Docker volume for JAR sharing |
|
||||
| `CAMELEER_JWT_SECRET` | `cameleer-dev-jwt-secret-...` | HMAC secret for internal JWTs |
|
||||
| `CAMELEER_TENANT_ID` | `default` | Tenant slug for data isolation |
|
||||
| `CAMELEER_OIDC_ISSUER_URI` | (empty) | Logto issuer for M2M token validation |
|
||||
| `CAMELEER_OIDC_AUDIENCE` | (empty) | Expected JWT audience |
|
||||
| `CAMELEER_SERVER_TENANT_ID` | `default` | Tenant slug for data isolation |
|
||||
| `CAMELEER_SERVER_SECURITY_OIDCISSUERURI` | (empty) | Logto issuer for M2M token validation |
|
||||
| `CAMELEER_SERVER_SECURITY_OIDCAUDIENCE` | (empty) | Expected JWT audience |
|
||||
|
||||
### 10.3 logto
|
||||
|
||||
@@ -927,7 +937,7 @@ state (`currentTenantId`). Provides `logout` and `signIn` callbacks.
|
||||
| `SAAS_ADMIN_PASS` | `admin` | Platform admin password |
|
||||
| `TENANT_ADMIN_USER` | `camel` | Default tenant admin username |
|
||||
| `TENANT_ADMIN_PASS` | `camel` | Default tenant admin password |
|
||||
| `CAMELEER_AUTH_TOKEN`| `default-bootstrap-token` | Agent bootstrap token |
|
||||
| `CAMELEER_SERVER_SECURITY_BOOTSTRAPTOKEN`| `default-bootstrap-token` | Agent bootstrap token |
|
||||
|
||||
### 10.6 Bootstrap Output
|
||||
|
||||
@@ -947,7 +957,7 @@ The bootstrap script writes `/data/logto-bootstrap.json` containing:
|
||||
"bootstrapToken": "<from env>",
|
||||
"platformAdminUser": "<from env>",
|
||||
"tenantAdminUser": "<from env>",
|
||||
"oidcIssuerUri": "http://logto:3001/oidc",
|
||||
"oidcIssuerUri": "http://cameleer-logto:3001/oidc",
|
||||
"oidcAudience": "https://api.cameleer.local"
|
||||
}
|
||||
```
|
||||
|
||||
3017
docs/superpowers/plans/2026-04-09-platform-redesign-plan.md
Normal file
3017
docs/superpowers/plans/2026-04-09-platform-redesign-plan.md
Normal file
File diff suppressed because it is too large
Load Diff
2662
docs/superpowers/plans/2026-04-13-install-script-plan.md
Normal file
2662
docs/superpowers/plans/2026-04-13-install-script-plan.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,464 @@
|
||||
# Per-Tenant PostgreSQL Isolation Implementation Plan
|
||||
|
||||
> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking.
|
||||
|
||||
**Goal:** Give each tenant its own PostgreSQL user and schema so tenant servers can only access their own data at the database level.
|
||||
|
||||
**Architecture:** During provisioning, create a dedicated PG user (`tenant_<slug>`) with a matching schema. Pass per-tenant credentials and `currentSchema`/`ApplicationName` JDBC parameters to the server container. On delete, drop both schema and user. Existing tenants without `dbPassword` fall back to shared credentials for backwards compatibility.
|
||||
|
||||
**Tech Stack:** Java 21, Spring Boot 3.4, Flyway, PostgreSQL 16, Docker Java API
|
||||
|
||||
**Spec:** `docs/superpowers/specs/2026-04-15-per-tenant-pg-isolation-design.md`
|
||||
|
||||
---
|
||||
|
||||
### Task 1: Flyway Migration — add `db_password` column
|
||||
|
||||
**Files:**
|
||||
- Create: `src/main/resources/db/migration/V015__add_tenant_db_password.sql`
|
||||
|
||||
- [ ] **Step 1: Create migration file**
|
||||
|
||||
```sql
|
||||
ALTER TABLE tenants ADD COLUMN db_password VARCHAR(255);
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Verify migration applies**
|
||||
|
||||
Run: `mvn flyway:info -pl .` or start the app and check logs for `V015__add_tenant_db_password` in Flyway output.
|
||||
|
||||
- [ ] **Step 3: Commit**
|
||||
|
||||
```bash
|
||||
git add src/main/resources/db/migration/V015__add_tenant_db_password.sql
|
||||
git commit -m "feat: add db_password column to tenants table (V015)"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 2: TenantEntity — add `dbPassword` field
|
||||
|
||||
**Files:**
|
||||
- Modify: `src/main/java/net/siegeln/cameleer/saas/tenant/TenantEntity.java`
|
||||
|
||||
- [ ] **Step 1: Add field and accessors**
|
||||
|
||||
After the `provisionError` field (line 59), add:
|
||||
|
||||
```java
|
||||
@Column(name = "db_password")
|
||||
private String dbPassword;
|
||||
```
|
||||
|
||||
After the `setProvisionError` method (line 102), add:
|
||||
|
||||
```java
|
||||
public String getDbPassword() { return dbPassword; }
|
||||
public void setDbPassword(String dbPassword) { this.dbPassword = dbPassword; }
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Commit**
|
||||
|
||||
```bash
|
||||
git add src/main/java/net/siegeln/cameleer/saas/tenant/TenantEntity.java
|
||||
git commit -m "feat: add dbPassword field to TenantEntity"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 3: Create `TenantDatabaseService`
|
||||
|
||||
**Files:**
|
||||
- Create: `src/main/java/net/siegeln/cameleer/saas/provisioning/TenantDatabaseService.java`
|
||||
|
||||
- [ ] **Step 1: Implement the service**
|
||||
|
||||
```java
|
||||
package net.siegeln.cameleer.saas.provisioning;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.DriverManager;
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.Statement;
|
||||
|
||||
/**
|
||||
* Creates and drops per-tenant PostgreSQL users and schemas
|
||||
* on the shared cameleer3 database for DB-level tenant isolation.
|
||||
*/
|
||||
@Service
|
||||
public class TenantDatabaseService {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(TenantDatabaseService.class);
|
||||
|
||||
private final ProvisioningProperties props;
|
||||
|
||||
public TenantDatabaseService(ProvisioningProperties props) {
|
||||
this.props = props;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a dedicated PG user and schema for a tenant.
|
||||
* Idempotent — skips if user/schema already exist.
|
||||
*/
|
||||
public void createTenantDatabase(String slug, String password) {
|
||||
validateSlug(slug);
|
||||
|
||||
String url = props.datasourceUrl();
|
||||
if (url == null || url.isBlank()) {
|
||||
log.warn("No datasource URL configured — skipping tenant DB setup");
|
||||
return;
|
||||
}
|
||||
|
||||
String user = "tenant_" + slug;
|
||||
String schema = "tenant_" + slug;
|
||||
|
||||
try (Connection conn = DriverManager.getConnection(url, props.datasourceUsername(), props.datasourcePassword());
|
||||
Statement stmt = conn.createStatement()) {
|
||||
|
||||
// Create user if not exists
|
||||
boolean userExists;
|
||||
try (ResultSet rs = stmt.executeQuery(
|
||||
"SELECT 1 FROM pg_roles WHERE rolname = '" + user + "'")) {
|
||||
userExists = rs.next();
|
||||
}
|
||||
if (!userExists) {
|
||||
stmt.execute("CREATE USER \"" + user + "\" WITH PASSWORD '" + escapePassword(password) + "'");
|
||||
log.info("Created PostgreSQL user: {}", user);
|
||||
} else {
|
||||
// Update password on re-provision
|
||||
stmt.execute("ALTER USER \"" + user + "\" WITH PASSWORD '" + escapePassword(password) + "'");
|
||||
log.info("Updated password for existing PostgreSQL user: {}", user);
|
||||
}
|
||||
|
||||
// Create schema if not exists
|
||||
boolean schemaExists;
|
||||
try (ResultSet rs = stmt.executeQuery(
|
||||
"SELECT 1 FROM information_schema.schemata WHERE schema_name = '" + schema + "'")) {
|
||||
schemaExists = rs.next();
|
||||
}
|
||||
if (!schemaExists) {
|
||||
stmt.execute("CREATE SCHEMA \"" + schema + "\" AUTHORIZATION \"" + user + "\"");
|
||||
log.info("Created PostgreSQL schema: {}", schema);
|
||||
} else {
|
||||
// Ensure ownership is correct
|
||||
stmt.execute("ALTER SCHEMA \"" + schema + "\" OWNER TO \"" + user + "\"");
|
||||
log.info("Schema {} already exists — ensured ownership", schema);
|
||||
}
|
||||
|
||||
// Revoke access to public schema
|
||||
stmt.execute("REVOKE ALL ON SCHEMA public FROM \"" + user + "\"");
|
||||
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException("Failed to create tenant database for '" + slug + "': " + e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Drop tenant schema (CASCADE) and user. Idempotent.
|
||||
*/
|
||||
public void dropTenantDatabase(String slug) {
|
||||
validateSlug(slug);
|
||||
|
||||
String url = props.datasourceUrl();
|
||||
if (url == null || url.isBlank()) {
|
||||
log.warn("No datasource URL configured — skipping tenant DB cleanup");
|
||||
return;
|
||||
}
|
||||
|
||||
String user = "tenant_" + slug;
|
||||
String schema = "tenant_" + slug;
|
||||
|
||||
try (Connection conn = DriverManager.getConnection(url, props.datasourceUsername(), props.datasourcePassword());
|
||||
Statement stmt = conn.createStatement()) {
|
||||
stmt.execute("DROP SCHEMA IF EXISTS \"" + schema + "\" CASCADE");
|
||||
log.info("Dropped PostgreSQL schema: {}", schema);
|
||||
|
||||
stmt.execute("DROP USER IF EXISTS \"" + user + "\"");
|
||||
log.info("Dropped PostgreSQL user: {}", user);
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to drop tenant database for '{}': {}", slug, e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
private void validateSlug(String slug) {
|
||||
if (slug == null || !slug.matches("^[a-z0-9-]+$")) {
|
||||
throw new IllegalArgumentException("Invalid tenant slug: " + slug);
|
||||
}
|
||||
}
|
||||
|
||||
private String escapePassword(String password) {
|
||||
return password.replace("'", "''");
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Commit**
|
||||
|
||||
```bash
|
||||
git add src/main/java/net/siegeln/cameleer/saas/provisioning/TenantDatabaseService.java
|
||||
git commit -m "feat: add TenantDatabaseService for per-tenant PG user+schema"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 4: Add `dbPassword` to `TenantProvisionRequest`
|
||||
|
||||
**Files:**
|
||||
- Modify: `src/main/java/net/siegeln/cameleer/saas/provisioning/TenantProvisionRequest.java`
|
||||
|
||||
- [ ] **Step 1: Add field to record**
|
||||
|
||||
Replace the entire record with:
|
||||
|
||||
```java
|
||||
package net.siegeln.cameleer.saas.provisioning;
|
||||
|
||||
import java.util.UUID;
|
||||
|
||||
public record TenantProvisionRequest(
|
||||
UUID tenantId,
|
||||
String slug,
|
||||
String tier,
|
||||
String licenseToken,
|
||||
String dbPassword
|
||||
) {}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Commit**
|
||||
|
||||
```bash
|
||||
git add src/main/java/net/siegeln/cameleer/saas/provisioning/TenantProvisionRequest.java
|
||||
git commit -m "feat: add dbPassword to TenantProvisionRequest"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 5: Update `DockerTenantProvisioner` — per-tenant JDBC URL
|
||||
|
||||
**Files:**
|
||||
- Modify: `src/main/java/net/siegeln/cameleer/saas/provisioning/DockerTenantProvisioner.java:197-200`
|
||||
|
||||
- [ ] **Step 1: Replace shared credentials with per-tenant credentials**
|
||||
|
||||
In `createServerContainer()` (line 197-200), replace:
|
||||
|
||||
```java
|
||||
var env = new java.util.ArrayList<>(List.of(
|
||||
"SPRING_DATASOURCE_URL=" + props.datasourceUrl(),
|
||||
"SPRING_DATASOURCE_USERNAME=" + props.datasourceUsername(),
|
||||
"SPRING_DATASOURCE_PASSWORD=" + props.datasourcePassword(),
|
||||
```
|
||||
|
||||
With:
|
||||
|
||||
```java
|
||||
// Per-tenant DB isolation: dedicated user+schema when dbPassword is set,
|
||||
// shared credentials for backwards compatibility with pre-isolation tenants.
|
||||
String dsUrl;
|
||||
String dsUser;
|
||||
String dsPass;
|
||||
if (req.dbPassword() != null) {
|
||||
dsUrl = props.datasourceUrl() + "?currentSchema=tenant_" + slug + "&ApplicationName=tenant_" + slug;
|
||||
dsUser = "tenant_" + slug;
|
||||
dsPass = req.dbPassword();
|
||||
} else {
|
||||
dsUrl = props.datasourceUrl();
|
||||
dsUser = props.datasourceUsername();
|
||||
dsPass = props.datasourcePassword();
|
||||
}
|
||||
var env = new java.util.ArrayList<>(List.of(
|
||||
"SPRING_DATASOURCE_URL=" + dsUrl,
|
||||
"SPRING_DATASOURCE_USERNAME=" + dsUser,
|
||||
"SPRING_DATASOURCE_PASSWORD=" + dsPass,
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Commit**
|
||||
|
||||
```bash
|
||||
git add src/main/java/net/siegeln/cameleer/saas/provisioning/DockerTenantProvisioner.java
|
||||
git commit -m "feat: construct per-tenant JDBC URL with currentSchema and ApplicationName"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 6: Update `VendorTenantService` — provisioning and delete flows
|
||||
|
||||
**Files:**
|
||||
- Modify: `src/main/java/net/siegeln/cameleer/saas/vendor/VendorTenantService.java`
|
||||
|
||||
- [ ] **Step 1: Inject `TenantDatabaseService`**
|
||||
|
||||
Add to the constructor and field declarations:
|
||||
|
||||
```java
|
||||
private final TenantDatabaseService tenantDatabaseService;
|
||||
```
|
||||
|
||||
Add to the constructor parameter list and assignment. (Follow the existing pattern of other injected services.)
|
||||
|
||||
- [ ] **Step 2: Update `provisionAsync()` — create DB before containers**
|
||||
|
||||
In `provisionAsync()` (around line 120), add DB creation before the provision call. Replace:
|
||||
|
||||
```java
|
||||
var provisionRequest = new TenantProvisionRequest(tenantId, slug, tier, licenseToken);
|
||||
ProvisionResult result = tenantProvisioner.provision(provisionRequest);
|
||||
```
|
||||
|
||||
With:
|
||||
|
||||
```java
|
||||
// Create per-tenant PG user + schema
|
||||
String dbPassword = UUID.randomUUID().toString().replace("-", "")
|
||||
+ UUID.randomUUID().toString().replace("-", "").substring(0, 8);
|
||||
try {
|
||||
tenantDatabaseService.createTenantDatabase(slug, dbPassword);
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to create tenant database for {}: {}", slug, e.getMessage(), e);
|
||||
tenantRepository.findById(tenantId).ifPresent(t -> {
|
||||
t.setProvisionError("Database setup failed: " + e.getMessage());
|
||||
tenantRepository.save(t);
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Store DB password on entity
|
||||
TenantEntity tenantForDb = tenantRepository.findById(tenantId).orElse(null);
|
||||
if (tenantForDb == null) {
|
||||
log.error("Tenant {} disappeared during provisioning", slug);
|
||||
return;
|
||||
}
|
||||
tenantForDb.setDbPassword(dbPassword);
|
||||
tenantRepository.save(tenantForDb);
|
||||
|
||||
var provisionRequest = new TenantProvisionRequest(tenantId, slug, tier, licenseToken, dbPassword);
|
||||
ProvisionResult result = tenantProvisioner.provision(provisionRequest);
|
||||
```
|
||||
|
||||
- [ ] **Step 3: Update the existing `TenantProvisionRequest` constructor call in upgrade flow**
|
||||
|
||||
Search for any other `new TenantProvisionRequest(...)` calls. The `upgradeServer` method (or re-provision after upgrade) also creates a provision request. Update it to pass `dbPassword` from the entity:
|
||||
|
||||
```java
|
||||
TenantEntity tenant = ...;
|
||||
var provisionRequest = new TenantProvisionRequest(
|
||||
tenant.getId(), tenant.getSlug(), tenant.getTier().name(),
|
||||
licenseToken, tenant.getDbPassword());
|
||||
```
|
||||
|
||||
If the tenant has `dbPassword == null` (pre-existing), this is fine — Task 5 handles the null fallback.
|
||||
|
||||
- [ ] **Step 4: Update `delete()` — use TenantDatabaseService**
|
||||
|
||||
In `delete()` (around line 306), replace:
|
||||
|
||||
```java
|
||||
// Erase tenant data from server databases (GDPR)
|
||||
dataCleanupService.cleanup(tenant.getSlug());
|
||||
```
|
||||
|
||||
With:
|
||||
|
||||
```java
|
||||
// Drop per-tenant PG schema + user
|
||||
tenantDatabaseService.dropTenantDatabase(tenant.getSlug());
|
||||
|
||||
// Erase ClickHouse data (GDPR)
|
||||
dataCleanupService.cleanupClickHouse(tenant.getSlug());
|
||||
```
|
||||
|
||||
- [ ] **Step 5: Commit**
|
||||
|
||||
```bash
|
||||
git add src/main/java/net/siegeln/cameleer/saas/vendor/VendorTenantService.java
|
||||
git commit -m "feat: create per-tenant PG database during provisioning, drop on delete"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 7: Refactor `TenantDataCleanupService` — ClickHouse only
|
||||
|
||||
**Files:**
|
||||
- Modify: `src/main/java/net/siegeln/cameleer/saas/provisioning/TenantDataCleanupService.java`
|
||||
|
||||
- [ ] **Step 1: Remove PG logic, rename public method**
|
||||
|
||||
Remove the `dropPostgresSchema()` method and the `cleanup()` method. Replace with a single public method:
|
||||
|
||||
```java
|
||||
/**
|
||||
* Deletes tenant data from ClickHouse tables (GDPR data erasure).
|
||||
* PostgreSQL cleanup is handled by TenantDatabaseService.
|
||||
*/
|
||||
public void cleanupClickHouse(String slug) {
|
||||
deleteClickHouseData(slug);
|
||||
}
|
||||
```
|
||||
|
||||
Remove the `dropPostgresSchema()` private method entirely. Keep `deleteClickHouseData()` unchanged.
|
||||
|
||||
- [ ] **Step 2: Commit**
|
||||
|
||||
```bash
|
||||
git add src/main/java/net/siegeln/cameleer/saas/provisioning/TenantDataCleanupService.java
|
||||
git commit -m "refactor: move PG cleanup to TenantDatabaseService, keep only ClickHouse"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Task 8: Verify end-to-end
|
||||
|
||||
- [ ] **Step 1: Build**
|
||||
|
||||
```bash
|
||||
mvn compile -pl .
|
||||
```
|
||||
|
||||
Verify no compilation errors.
|
||||
|
||||
- [ ] **Step 2: Deploy and test tenant creation**
|
||||
|
||||
Deploy the updated SaaS image. Create a new tenant via the UI. Verify in PostgreSQL:
|
||||
|
||||
```sql
|
||||
-- Should show the new tenant user
|
||||
SELECT rolname FROM pg_roles WHERE rolname LIKE 'tenant_%';
|
||||
|
||||
-- Should show the new tenant schema
|
||||
SELECT schema_name FROM information_schema.schemata WHERE schema_name LIKE 'tenant_%';
|
||||
```
|
||||
|
||||
- [ ] **Step 3: Verify server container env vars**
|
||||
|
||||
```bash
|
||||
docker inspect cameleer-server-<slug> | grep -E "DATASOURCE|currentSchema|ApplicationName"
|
||||
```
|
||||
|
||||
Expected: URL contains `?currentSchema=tenant_<slug>&ApplicationName=tenant_<slug>`, username is `tenant_<slug>`.
|
||||
|
||||
- [ ] **Step 4: Verify Infrastructure page**
|
||||
|
||||
Navigate to Vendor > Infrastructure. The PostgreSQL card should now show the tenant schema with size/tables/rows.
|
||||
|
||||
- [ ] **Step 5: Test tenant deletion**
|
||||
|
||||
Delete the tenant. Verify:
|
||||
|
||||
```sql
|
||||
-- User should be gone
|
||||
SELECT rolname FROM pg_roles WHERE rolname LIKE 'tenant_%';
|
||||
|
||||
-- Schema should be gone
|
||||
SELECT schema_name FROM information_schema.schemata WHERE schema_name LIKE 'tenant_%';
|
||||
```
|
||||
|
||||
- [ ] **Step 6: Commit all remaining changes**
|
||||
|
||||
```bash
|
||||
git add -A
|
||||
git commit -m "feat: per-tenant PostgreSQL isolation — complete implementation"
|
||||
```
|
||||
499
docs/superpowers/specs/2026-04-09-platform-redesign.md
Normal file
499
docs/superpowers/specs/2026-04-09-platform-redesign.md
Normal file
@@ -0,0 +1,499 @@
|
||||
# Cameleer SaaS Platform Redesign — Design Spec
|
||||
|
||||
**Date:** 2026-04-09
|
||||
**Status:** Approved (brainstorming session)
|
||||
**Scope:** Redesign the SaaS platform from a read-only tenant viewer into a functional vendor management plane with tenant provisioning, license management, and customer self-service.
|
||||
|
||||
## Context
|
||||
|
||||
The SaaS platform currently has 3 pages (Dashboard, License, Admin Tenants) — all read-only. It cannot create tenants, provision servers, manage licenses, or let customers configure their own settings. The backend has foundations (TenantService, LicenseService, LogtoManagementClient, ServerApiClient, audit logging) but none are exposed through management workflows.
|
||||
|
||||
This spec redesigns the platform around two personas — **vendor** (us) and **customer** (tenant admin) — with a clear separation of concerns.
|
||||
|
||||
### Architectural Decisions (from brainstorming)
|
||||
|
||||
| Decision | Choice | Rationale |
|
||||
|----------|--------|-----------|
|
||||
| Server isolation | Shared data stores, isolated server per tenant | Server is already standalone; PostgreSQL/ClickHouse shared with tenant_id partitioning |
|
||||
| Auth model | Hybrid — SaaS uses Logto, server uses customer OIDC | Clean separation: SaaS is vendor plane, server is product plane |
|
||||
| Tenant admin access | Both SaaS + server, with SSO bridge | Admin configures in SaaS, jumps to server for operations |
|
||||
| Server data in SaaS | License compliance + health summary | Quick pulse without duplicating the server dashboard |
|
||||
| Provisioning mechanism | Docker API via docker-java | Already a dependency, same pattern as server's RuntimeOrchestrator |
|
||||
| Docker/K8s support | Pluggable interface, Docker first | Mirror server's RuntimeOrchestrator + auto-detection pattern |
|
||||
|
||||
---
|
||||
|
||||
## 1. Personas & User Stories
|
||||
|
||||
### Vendor (platform:admin scope)
|
||||
|
||||
| ID | Story | Acceptance Criteria |
|
||||
|----|-------|-------------------|
|
||||
| V1 | As a vendor, I want to create a tenant so I can onboard a new customer | Form collects name, slug, tier. Creates DB record + Logto org. Status = PROVISIONING. |
|
||||
| V2 | As a vendor, I want to provision a server for a tenant so they have a running Cameleer instance | After tenant creation, SaaS creates a cameleer3-server container via Docker API with correct env vars, network, and Traefik labels. Health check passes → status = ACTIVE. |
|
||||
| V3 | As a vendor, I want to generate and assign a license to a tenant | License created with tier-appropriate features/limits/expiry. Token pushed to tenant's server via M2M API. |
|
||||
| V4 | As a vendor, I want to suspend a tenant who hasn't paid | Suspend stops the server container and marks tenant SUSPENDED. Reactivation restarts it. |
|
||||
| V5 | As a vendor, I want to view fleet health at a glance | Tenant list shows each tenant's server status (running/stopped/error), agent count vs limit, license expiry. |
|
||||
| V6 | As a vendor, I want to delete/offboard a tenant | Stops and removes server container, revokes license, marks tenant DELETED. |
|
||||
|
||||
### Customer (tenant admin, org-scoped JWT)
|
||||
|
||||
| ID | Story | Acceptance Criteria |
|
||||
|----|-------|-------------------|
|
||||
| C1 | As a tenant admin, I want to see my dashboard with server health and license usage | Dashboard shows: server status (up/down), connected agents vs limit, environments vs limit, feature entitlements. |
|
||||
| C2 | As a tenant admin, I want to configure external OIDC for my team | Form to set issuer URI, client ID, client secret, audience, claim mappings. SaaS pushes config to the tenant's server via M2M API. |
|
||||
| C3 | As a tenant admin, I want to manage team members | View/invite/remove users in Logto org. Assign roles (owner/operator/viewer) that flow through to server access. |
|
||||
| C4 | As a tenant admin, I want to access the server dashboard seamlessly | "Open Server Dashboard" navigates to the tenant's server URL. Initial auth via Logto (same OIDC provider until customer configures their own). |
|
||||
| C5 | As a tenant admin, I want to view my license details | Tier, features, limits, validity, days remaining — enriched with actual usage data from server. |
|
||||
| C6 | As a tenant admin, I want to see my organization settings | Tenant name, slug, tier, created date. Read-only (tier changes go through vendor). |
|
||||
|
||||
---
|
||||
|
||||
## 2. Information Architecture
|
||||
|
||||
### Route Structure
|
||||
|
||||
```
|
||||
/platform/
|
||||
├── /vendor/ (platform:admin only)
|
||||
│ ├── /vendor/tenants Tenant list with fleet health overview
|
||||
│ ├── /vendor/tenants/new Create tenant flow (create → provision → license)
|
||||
│ └── /vendor/tenants/:id Tenant detail — server status, license, actions
|
||||
│
|
||||
├── /tenant/ (org-scoped, any authenticated user)
|
||||
│ ├── /tenant/ Dashboard — server health + license usage
|
||||
│ ├── /tenant/license License details + usage vs limits
|
||||
│ ├── /tenant/oidc External OIDC configuration
|
||||
│ ├── /tenant/team Team members + role management
|
||||
│ └── /tenant/settings Organization settings
|
||||
│
|
||||
├── /login Logto OIDC redirect
|
||||
└── /callback Logto callback handler
|
||||
```
|
||||
|
||||
### Navigation
|
||||
|
||||
**Sidebar adapts to persona:**
|
||||
|
||||
- **Vendor** (`platform:admin`): "Tenants" section at top. If a tenant is selected (e.g., viewing detail), the tenant portal sections appear below for support/debugging.
|
||||
- **Customer** (no `platform:admin`): Dashboard, License, OIDC, Team, Settings.
|
||||
- **Footer**: "Open Server Dashboard" (contextual to current tenant).
|
||||
|
||||
**Landing page:**
|
||||
- `platform:admin` → `/vendor/tenants`
|
||||
- Otherwise → `/tenant/`
|
||||
|
||||
### What Happens to Existing Pages
|
||||
|
||||
| Current | Becomes | Changes |
|
||||
|---------|---------|---------|
|
||||
| `DashboardPage` | `/tenant/` | Add health data from server, license usage indicators |
|
||||
| `LicensePage` | `/tenant/license` | Add usage enrichment (agents used/limit, envs used/limit) |
|
||||
| `AdminTenantsPage` | `/vendor/tenants` | Full CRUD, health indicators, provision/suspend/delete actions |
|
||||
|
||||
---
|
||||
|
||||
## 3. Provisioning Architecture
|
||||
|
||||
### Pluggable Interface
|
||||
|
||||
Following the server's `RuntimeOrchestrator` pattern with auto-detection:
|
||||
|
||||
```java
|
||||
public interface TenantProvisioner {
|
||||
boolean isAvailable();
|
||||
ProvisionResult provision(TenantProvisionRequest request);
|
||||
void start(String tenantId);
|
||||
void stop(String tenantId);
|
||||
void remove(String tenantId);
|
||||
ServerStatus getStatus(String tenantId);
|
||||
String getServerEndpoint(String tenantId);
|
||||
}
|
||||
```
|
||||
|
||||
**Auto-detection** (same pattern as server's `RuntimeOrchestratorAutoConfig`):
|
||||
|
||||
```java
|
||||
@Configuration
|
||||
public class TenantProvisionerAutoConfig {
|
||||
@Bean
|
||||
TenantProvisioner tenantProvisioner() {
|
||||
if (Files.exists(Path.of("/var/run/docker.sock"))) {
|
||||
return new DockerTenantProvisioner(dockerClientConfig());
|
||||
}
|
||||
// Future: K8s detection (service account token)
|
||||
return new DisabledTenantProvisioner();
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Docker Implementation
|
||||
|
||||
`DockerTenantProvisioner` uses docker-java to manage per-tenant server containers:
|
||||
|
||||
**Container specification per tenant:**
|
||||
|
||||
| Config | Value | Source |
|
||||
|--------|-------|--------|
|
||||
| Image | `gitea.siegeln.net/cameleer/cameleer3-server:${VERSION}` | Global config |
|
||||
| Name | `cameleer-server-${tenant.slug}` | Derived from tenant |
|
||||
| Network | `cameleer` + `cameleer-traefik` | Fixed networks from compose |
|
||||
| DNS alias | `cameleer-server-${tenant.slug}` | For SaaS→server M2M calls |
|
||||
| Health check | `wget -q -O- http://localhost:8081/actuator/health` | Server's actuator |
|
||||
| Restart policy | `unless-stopped` | Standard for services |
|
||||
|
||||
**Environment variables injected per tenant:**
|
||||
|
||||
| Env var | Value | Purpose |
|
||||
|---------|-------|---------|
|
||||
| `SPRING_DATASOURCE_URL` | `jdbc:postgresql://postgres:5432/cameleer3` | Shared PostgreSQL |
|
||||
| `CAMELEER_TENANT_ID` | `${tenant.slug}` | Tenant isolation key |
|
||||
| `CAMELEER_OIDC_ISSUER_URI` | `${PUBLIC_PROTOCOL}://${PUBLIC_HOST}/oidc` | Logto as initial OIDC |
|
||||
| `CAMELEER_OIDC_JWK_SET_URI` | `http://logto:3001/oidc/jwks` | Docker-internal JWK |
|
||||
| `CAMELEER_CORS_ALLOWED_ORIGINS` | `${PUBLIC_PROTOCOL}://${PUBLIC_HOST}` | Browser CORS |
|
||||
| `CAMELEER_LICENSE_TOKEN` | `${license.token}` | License for this tenant |
|
||||
| `CAMELEER_RUNTIME_ENABLED` | `true` | Enable Docker orchestration |
|
||||
| `CAMELEER_SERVER_URL` | `http://cameleer-server-${slug}:8081` | Self-reference for agents |
|
||||
| `CAMELEER_ROUTING_DOMAIN` | `${PUBLIC_HOST}` | Traefik routing domain |
|
||||
| `CAMELEER_ROUTING_MODE` | `path` | Path-based routing |
|
||||
|
||||
**Traefik labels for per-tenant routing:**
|
||||
|
||||
```
|
||||
traefik.enable=true
|
||||
traefik.http.routers.server-${slug}.rule=PathPrefix(`/t/${slug}`)
|
||||
traefik.http.routers.server-${slug}.tls=true
|
||||
traefik.http.services.server-${slug}.loadbalancer.server.port=8081
|
||||
```
|
||||
|
||||
**Server UI container per tenant:**
|
||||
|
||||
Each tenant also gets a `cameleer3-server-ui` container:
|
||||
|
||||
| Config | Value |
|
||||
|--------|-------|
|
||||
| Name | `cameleer-server-ui-${tenant.slug}` |
|
||||
| Image | `gitea.siegeln.net/cameleer/cameleer3-server-ui:${VERSION}` |
|
||||
| Env | `BASE_PATH=/t/${slug}` |
|
||||
| Traefik | `PathPrefix(/t/${slug})` with `priority=2` (higher than API) |
|
||||
|
||||
The server UI serves static assets and proxies API calls to the backend. The `BASE_PATH` env var configures React Router's basename and nginx proxy target.
|
||||
|
||||
### Provision Flow
|
||||
|
||||
```
|
||||
Vendor clicks "Create Tenant"
|
||||
→ POST /api/vendor/tenants
|
||||
1. Validate slug uniqueness
|
||||
2. Create TenantEntity (status=PROVISIONING)
|
||||
3. Create Logto organization
|
||||
4. Generate license (tier-appropriate, 365 days)
|
||||
5. Create server container (DockerTenantProvisioner.provision())
|
||||
6. Create server UI container
|
||||
7. Wait for health check (poll /actuator/health, timeout 60s)
|
||||
8. Push license to server via M2M API (ServerApiClient)
|
||||
9. Update status → ACTIVE
|
||||
10. Audit log: TENANT_CREATE + TENANT_PROVISION + LICENSE_GENERATE
|
||||
```
|
||||
|
||||
If provisioning fails at any step, the tenant remains in PROVISIONING status with an error message. The vendor can retry or delete.
|
||||
|
||||
### Suspend / Activate Flow
|
||||
|
||||
```
|
||||
Suspend:
|
||||
1. Stop server + UI containers (DockerTenantProvisioner.stop())
|
||||
2. Set tenant status → SUSPENDED
|
||||
3. Audit log: TENANT_SUSPEND
|
||||
|
||||
Activate:
|
||||
1. Start server + UI containers (DockerTenantProvisioner.start())
|
||||
2. Wait for health check
|
||||
3. Set tenant status → ACTIVE
|
||||
4. Audit log: TENANT_ACTIVATE
|
||||
```
|
||||
|
||||
### Delete Flow
|
||||
|
||||
```
|
||||
Delete:
|
||||
1. Stop and remove server + UI containers (DockerTenantProvisioner.remove())
|
||||
2. Revoke active license
|
||||
3. Delete Logto organization (LogtoManagementClient.deleteOrganization())
|
||||
4. Set tenant status → DELETED (soft delete, keep record for audit)
|
||||
5. Audit log: TENANT_DELETE
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 4. Server Communication
|
||||
|
||||
### SaaS → Server (M2M API)
|
||||
|
||||
The existing `ServerApiClient` pattern (Logto M2M token, `X-Cameleer-Protocol-Version: 1` header) is extended for per-tenant endpoints:
|
||||
|
||||
```java
|
||||
public class ServerApiClient {
|
||||
// Existing: uses configured server-endpoint
|
||||
// New: accepts dynamic endpoint per tenant
|
||||
|
||||
public ServerHealth getHealth(String serverEndpoint) { ... }
|
||||
public void pushLicenseToken(String serverEndpoint, String token) { ... }
|
||||
public void pushOidcConfig(String serverEndpoint, OidcConfigRequest config) { ... }
|
||||
public ServerUsage getUsage(String serverEndpoint) { ... }
|
||||
}
|
||||
```
|
||||
|
||||
The `serverEndpoint` is resolved per tenant: `http://cameleer-server-${slug}:8081` (Docker-internal DNS).
|
||||
|
||||
### Health & Usage Data
|
||||
|
||||
**ServerHealth** (from server's `/actuator/health` + `/api/admin/status`):
|
||||
- Server status: UP/DOWN
|
||||
- Connected agents: count
|
||||
- Active applications: count
|
||||
- Error rate (last hour)
|
||||
|
||||
**ServerUsage** (from server API — new endpoint or existing data):
|
||||
- Agent count vs license limit
|
||||
- Environment count vs license limit
|
||||
- Which features are actively used (topology, lineage, etc.)
|
||||
|
||||
The SaaS caches health data per tenant (refresh every 30s for the fleet view, on-demand for detail pages).
|
||||
|
||||
### SSO Bridge
|
||||
|
||||
**Initial state** (before customer OIDC): The tenant's server trusts Logto. The tenant admin has a Logto account. "Open Server Dashboard" navigates to `/t/{slug}/` — the server's OIDC flow detects the existing Logto session and authenticates the user.
|
||||
|
||||
**After customer OIDC**: The SaaS pushes the customer's OIDC config to the server via `ServerApiClient.pushOidcConfig()`. The server switches to trusting the customer's provider. The tenant admin authenticates via their company's OIDC when accessing the server.
|
||||
|
||||
---
|
||||
|
||||
## 5. Backend API Design
|
||||
|
||||
### Vendor Endpoints (platform:admin required)
|
||||
|
||||
| Method | Path | Purpose |
|
||||
|--------|------|---------|
|
||||
| `GET` | `/api/vendor/tenants` | List all tenants with health summary |
|
||||
| `POST` | `/api/vendor/tenants` | Create tenant (triggers full provisioning flow) |
|
||||
| `GET` | `/api/vendor/tenants/{id}` | Tenant detail with server status |
|
||||
| `PATCH` | `/api/vendor/tenants/{id}` | Update tenant metadata (name, tier) |
|
||||
| `POST` | `/api/vendor/tenants/{id}/suspend` | Suspend tenant |
|
||||
| `POST` | `/api/vendor/tenants/{id}/activate` | Reactivate tenant |
|
||||
| `DELETE` | `/api/vendor/tenants/{id}` | Offboard tenant |
|
||||
| `POST` | `/api/vendor/tenants/{id}/license` | Generate/renew license |
|
||||
| `GET` | `/api/vendor/tenants/{id}/health` | Server health check (on-demand) |
|
||||
|
||||
### Tenant Endpoints (org-scoped, tenant from JWT)
|
||||
|
||||
| Method | Path | Purpose |
|
||||
|--------|------|---------|
|
||||
| `GET` | `/api/tenant/dashboard` | Aggregated health + license usage |
|
||||
| `GET` | `/api/tenant/license` | License details with usage data |
|
||||
| `GET` | `/api/tenant/oidc` | Current OIDC configuration |
|
||||
| `PUT` | `/api/tenant/oidc` | Update OIDC config (push to server) |
|
||||
| `GET` | `/api/tenant/team` | Team members (from Logto org) |
|
||||
| `POST` | `/api/tenant/team/invite` | Invite member |
|
||||
| `PATCH` | `/api/tenant/team/{userId}/role` | Change member role |
|
||||
| `DELETE` | `/api/tenant/team/{userId}` | Remove member |
|
||||
| `GET` | `/api/tenant/settings` | Org settings |
|
||||
|
||||
### Existing Endpoints to Modify
|
||||
|
||||
| Current | Change |
|
||||
|---------|--------|
|
||||
| `GET /api/tenants` | Move to `/api/vendor/tenants`, add health data |
|
||||
| `POST /api/tenants` | Move to `/api/vendor/tenants`, add provisioning |
|
||||
| `GET /api/tenants/{id}` | Keep for backward compat, also available at `/api/vendor/tenants/{id}` |
|
||||
| `GET /api/tenants/{id}/license` | Keep, also available at `/api/tenant/license` |
|
||||
| `POST /api/tenants/{id}/license` | Move to `/api/vendor/tenants/{id}/license` |
|
||||
| `GET /api/me` | Keep (used by OrgResolver) |
|
||||
| `GET /api/config` | Keep (used by frontend bootstrap) |
|
||||
|
||||
---
|
||||
|
||||
## 6. Frontend Design
|
||||
|
||||
### Vendor Console
|
||||
|
||||
**Tenant List** (`/vendor/tenants`):
|
||||
- DataTable with columns: Name, Slug, Tier (Badge), Status (Badge), Server (health indicator), Agents (used/limit), License (expiry or "None"), Created
|
||||
- Row click → tenant detail
|
||||
- "+ Create Tenant" button in header
|
||||
- Status badges: ACTIVE (green), PROVISIONING (blue), SUSPENDED (amber), DELETED (gray)
|
||||
- Server health: green dot (UP), red dot (DOWN), gray dot (no server)
|
||||
|
||||
**Create Tenant** (`/vendor/tenants/new`):
|
||||
- Form with: Name, Slug (auto-generated from name, editable), Tier (dropdown: LOW/MID/HIGH/BUSINESS)
|
||||
- On submit: shows provisioning progress (creating record → creating org → generating license → starting server → health check → done)
|
||||
- Progress displayed as a step indicator or timeline
|
||||
- On success: redirect to tenant detail
|
||||
|
||||
**Tenant Detail** (`/vendor/tenants/:id`):
|
||||
- Header: Tenant name + tier badge + status badge
|
||||
- KPI strip: Server Status, Agents (used/limit), Environments (used/limit), License (days remaining)
|
||||
- Sections:
|
||||
- **Server**: Status, endpoint URL, start/stop/restart actions
|
||||
- **License**: Current license details, "Renew" button
|
||||
- **Info**: Slug, created date, Logto org ID
|
||||
- Actions: Suspend/Activate toggle, Delete (with confirmation)
|
||||
|
||||
### Tenant Portal
|
||||
|
||||
**Dashboard** (`/tenant/`):
|
||||
- KPI strip: Server Status, Agents (used/limit), Environments (used/limit), License (days remaining)
|
||||
- Quick links: "Open Server Dashboard", "View License", "Configure OIDC"
|
||||
- If server is DOWN: prominent alert banner
|
||||
|
||||
**License** (`/tenant/license`):
|
||||
- Reuses existing LicensePage layout
|
||||
- Adds usage indicators: "2 of 3 agents", "1 of 1 environments"
|
||||
- Progress bars for limits approaching capacity
|
||||
- License token section (show/hide + copy)
|
||||
|
||||
**OIDC Configuration** (`/tenant/oidc`):
|
||||
- Form: Issuer URI, Client ID, Client Secret (masked), Audience, Roles Claim
|
||||
- Current status: "Using Logto (default)" or "External OIDC configured"
|
||||
- Save pushes config to server via SaaS API
|
||||
- "Test Connection" button (calls server's OIDC discovery endpoint)
|
||||
- "Reset to Logto" button (reverts to default)
|
||||
|
||||
**Team Management** (`/tenant/team`):
|
||||
- DataTable: Name, Email, Role (dropdown: Owner/Operator/Viewer), Actions (Remove)
|
||||
- "+ Invite Member" button → form with email + role
|
||||
- Role changes update Logto org membership
|
||||
- Cannot remove the last owner
|
||||
|
||||
**Settings** (`/tenant/settings`):
|
||||
- Read-only info: Name, Slug, Tier, Status, Created
|
||||
- Server endpoint URL
|
||||
- "Contact support to change tier" message (tier changes go through vendor)
|
||||
|
||||
### Shared Components
|
||||
|
||||
- **ServerStatusBadge**: Green dot + "Running", Red dot + "Stopped", Gray dot + "Provisioning"
|
||||
- **UsageIndicator**: "2 / 3 agents" with progress bar, color-coded (green < 80%, amber < 100%, red = 100%)
|
||||
- **ProvisioningProgress**: Step indicator for tenant creation flow
|
||||
|
||||
### Layout Changes
|
||||
|
||||
- Remove TopBar server controls (status filters, time range, auto-refresh) — these are not relevant to the SaaS platform. Use a simplified TopBar with breadcrumb, theme toggle, and user menu only.
|
||||
- Sidebar: persona-aware navigation (vendor vs customer sections)
|
||||
- Sidebar footer: "Open Server Dashboard" link with tenant-specific URL (`/t/{slug}/`)
|
||||
|
||||
---
|
||||
|
||||
## 7. Files to Create/Modify
|
||||
|
||||
### New Backend Files
|
||||
|
||||
| File | Purpose |
|
||||
|------|---------|
|
||||
| `provisioning/TenantProvisioner.java` | Pluggable provisioning interface |
|
||||
| `provisioning/TenantProvisionRequest.java` | Provision request record |
|
||||
| `provisioning/ProvisionResult.java` | Provision result record |
|
||||
| `provisioning/ServerStatus.java` | Server health status record |
|
||||
| `provisioning/DockerTenantProvisioner.java` | Docker implementation |
|
||||
| `provisioning/DisabledTenantProvisioner.java` | No-op fallback |
|
||||
| `provisioning/TenantProvisionerAutoConfig.java` | Auto-detection config |
|
||||
| `vendor/VendorTenantController.java` | Vendor API endpoints |
|
||||
| `vendor/VendorTenantService.java` | Vendor business logic (orchestrates provisioning + license + Logto) |
|
||||
| `tenant/TenantPortalController.java` | Customer API endpoints |
|
||||
| `tenant/TenantPortalService.java` | Customer business logic (reads from server, manages team) |
|
||||
|
||||
### Modified Backend Files
|
||||
|
||||
| File | Changes |
|
||||
|------|---------|
|
||||
| `identity/ServerApiClient.java` | Add per-tenant endpoint support, health/usage/OIDC methods |
|
||||
| `identity/LogtoManagementClient.java` | Add user invite, role management, list org members |
|
||||
| `tenant/TenantEntity.java` | Add `serverEndpoint` field, `provisionError` field |
|
||||
| `tenant/TenantService.java` | Keep existing methods, used by VendorTenantService |
|
||||
| `license/LicenseService.java` | Keep existing, add revoke method |
|
||||
| `config/SecurityConfig.java` | Add vendor/tenant endpoint security rules |
|
||||
| `config/TenantIsolationInterceptor.java` | Handle `/api/tenant/*` (resolve from JWT, no path variable) |
|
||||
|
||||
### New Frontend Files
|
||||
|
||||
| File | Purpose |
|
||||
|------|---------|
|
||||
| `pages/vendor/VendorTenantsPage.tsx` | Tenant list with fleet health |
|
||||
| `pages/vendor/CreateTenantPage.tsx` | Create tenant wizard |
|
||||
| `pages/vendor/TenantDetailPage.tsx` | Tenant detail with actions |
|
||||
| `pages/tenant/TenantDashboardPage.tsx` | Customer dashboard (evolves from DashboardPage) |
|
||||
| `pages/tenant/TenantLicensePage.tsx` | License with usage (evolves from LicensePage) |
|
||||
| `pages/tenant/OidcConfigPage.tsx` | External OIDC configuration |
|
||||
| `pages/tenant/TeamPage.tsx` | Team management |
|
||||
| `pages/tenant/SettingsPage.tsx` | Organization settings |
|
||||
| `components/ServerStatusBadge.tsx` | Shared server status indicator |
|
||||
| `components/UsageIndicator.tsx` | License usage progress bar |
|
||||
| `api/vendor-hooks.ts` | React Query hooks for vendor API |
|
||||
| `api/tenant-hooks.ts` | React Query hooks for tenant API |
|
||||
|
||||
### Modified Frontend Files
|
||||
|
||||
| File | Changes |
|
||||
|------|---------|
|
||||
| `router.tsx` | Restructure routes: `/vendor/*`, `/tenant/*` |
|
||||
| `components/Layout.tsx` | Persona-aware sidebar, simplified TopBar, tenant-specific server link |
|
||||
| `auth/OrgResolver.tsx` | Handle vendor landing (redirect to `/vendor/tenants`) |
|
||||
| `types/api.ts` | Add vendor/tenant API types |
|
||||
| `api/client.ts` | No changes needed (generic fetch wrapper) |
|
||||
|
||||
### Files to Remove
|
||||
|
||||
| File | Reason |
|
||||
|------|--------|
|
||||
| `pages/DashboardPage.tsx` | Replaced by `tenant/TenantDashboardPage.tsx` |
|
||||
| `pages/LicensePage.tsx` | Replaced by `tenant/TenantLicensePage.tsx` |
|
||||
| `pages/AdminTenantsPage.tsx` | Replaced by `vendor/VendorTenantsPage.tsx` |
|
||||
|
||||
### Docker Changes
|
||||
|
||||
| File | Changes |
|
||||
|------|---------|
|
||||
| `docker-compose.yml` | Mount Docker socket into cameleer-saas container |
|
||||
| `docker-compose.dev.yml` | Add Docker socket mount, group_add for Docker access |
|
||||
|
||||
### Database Migration
|
||||
|
||||
New migration `V011`:
|
||||
- Add `server_endpoint` column to `tenants` (nullable VARCHAR, stores Docker-internal URL)
|
||||
- Add `provision_error` column to `tenants` (nullable TEXT, stores last error message)
|
||||
- Add `DELETED` to status enum (for soft-delete offboarding)
|
||||
|
||||
---
|
||||
|
||||
## 8. Existing Compose Stack Changes
|
||||
|
||||
The default `cameleer3-server` and `cameleer3-server-ui` containers in docker-compose.yml become the "bootstrap" server for the `default` tenant. When provisioning is enabled, new tenants get their own dynamically-created containers.
|
||||
|
||||
The existing compose stack continues to work as-is for development. The provisioner creates additional containers alongside the compose-managed ones.
|
||||
|
||||
For the `default` tenant (created by bootstrap), the SaaS recognizes the existing compose-managed server and doesn't try to provision a new one. This is detected by checking if a container named `cameleer-server-default` (or the compose-managed `cameleer3-server`) already exists.
|
||||
|
||||
---
|
||||
|
||||
## 9. Out of Scope
|
||||
|
||||
- **Kubernetes provisioning** — interface defined, implementation deferred
|
||||
- **Billing/Stripe** — fields exist in DB, no integration in this spec
|
||||
- **Mobile responsiveness** — deferred
|
||||
- **Self-service signup** — tenants created by vendor only
|
||||
- **Custom domains** — deferred
|
||||
- **Email notifications** — deferred
|
||||
- **Usage-based metering** — deferred (license limits are checked but not metered)
|
||||
|
||||
---
|
||||
|
||||
## 10. Related Issues
|
||||
|
||||
| Issue | Relevance |
|
||||
|-------|-----------|
|
||||
| #1 | Epic: SaaS Management Platform |
|
||||
| #3 | Tenant Provisioning & Lifecycle |
|
||||
| #25 | K8s Operational Layer (deferred) |
|
||||
| #29 | Billing & Metering (deferred) |
|
||||
| #37 | Admin: Tenant creation UI — superseded by this spec |
|
||||
| #38 | Cross-app session management — addressed by SSO bridge |
|
||||
@@ -0,0 +1,242 @@
|
||||
# Certificate Management Design
|
||||
|
||||
## Problem
|
||||
|
||||
The platform currently generates a self-signed TLS certificate at bootstrap time via an Alpine init container. There is no way to supply a real certificate at bootstrap, replace it at runtime, or manage CA trust bundles for tenant enterprise SSO providers. Internal services bypass TLS verification with hardcoded flags (`CAMELEER_OIDC_TLS_SKIP_VERIFY=true`, `NODE_TLS_REJECT_UNAUTHORIZED=0`).
|
||||
|
||||
## Goals
|
||||
|
||||
1. Supply a cert+key at bootstrap time (env vars pointing to files)
|
||||
2. Replace the platform TLS certificate at runtime via vendor UI
|
||||
3. Manage a CA trust bundle (`ca.pem`) aggregating platform CA + tenant enterprise CAs
|
||||
4. Stage certificates before activation (shadow certs)
|
||||
5. Roll back to the previous certificate if activation causes issues
|
||||
6. Flag tenants that need restart after CA bundle changes
|
||||
7. Provider-based architecture: Docker now, K8s later
|
||||
|
||||
## Non-Goals
|
||||
|
||||
- ACME/Let's Encrypt integration (separate future work)
|
||||
- Per-tenant TLS certificates (all tenants share the platform cert via Traefik)
|
||||
- Client certificate authentication (mTLS)
|
||||
|
||||
## Architecture
|
||||
|
||||
### Provider Interface
|
||||
|
||||
```java
|
||||
package net.siegeln.cameleer.saas.certificate;
|
||||
|
||||
public interface CertificateManager {
|
||||
boolean isAvailable();
|
||||
|
||||
CertificateInfo getActive();
|
||||
CertificateInfo getStaged();
|
||||
CertificateInfo getArchived();
|
||||
|
||||
CertValidationResult stage(byte[] certPem, byte[] keyPem, byte[] caBundlePem);
|
||||
void activate();
|
||||
void restore();
|
||||
void discardStaged();
|
||||
|
||||
void generateSelfSigned(String hostname);
|
||||
byte[] getCaBundle();
|
||||
}
|
||||
```
|
||||
|
||||
Lives in `net.siegeln.cameleer.saas.certificate`. Implementation in `net.siegeln.cameleer.saas.provisioning` alongside `DockerTenantProvisioner`.
|
||||
|
||||
`DockerCertificateManager` writes to the Docker `certs` volume. Future `K8sCertificateManager` would manage K8s TLS Secrets + cert-manager CRDs.
|
||||
|
||||
### Records
|
||||
|
||||
```java
|
||||
public record CertificateInfo(
|
||||
String subject, String issuer, Instant notBefore, Instant notAfter,
|
||||
boolean hasCaBundle, boolean selfSigned, String fingerprint
|
||||
) {}
|
||||
|
||||
public record CertValidationResult(
|
||||
boolean valid, List<String> errors, CertificateInfo info
|
||||
) {}
|
||||
```
|
||||
|
||||
### File Layout (Docker Volume)
|
||||
|
||||
```
|
||||
/certs/
|
||||
cert.pem <- ACTIVE platform cert (Traefik reads)
|
||||
key.pem <- ACTIVE private key
|
||||
ca.pem <- aggregated CA bundle (platform CA + tenant CAs)
|
||||
meta.json <- bootstrap metadata for DB seeding
|
||||
staged/
|
||||
cert.pem <- STAGED cert
|
||||
key.pem <- STAGED key
|
||||
ca.pem <- STAGED CA bundle
|
||||
prev/
|
||||
cert.pem <- ARCHIVED (one previous)
|
||||
key.pem
|
||||
ca.pem
|
||||
```
|
||||
|
||||
Atomic swap pattern: write to `*.wip`, validate, rename to final path.
|
||||
|
||||
### Database
|
||||
|
||||
```sql
|
||||
-- V011__certificates.sql
|
||||
CREATE TABLE certificates (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
status VARCHAR(10) NOT NULL CHECK (status IN ('ACTIVE', 'STAGED', 'ARCHIVED')),
|
||||
subject VARCHAR(500),
|
||||
issuer VARCHAR(500),
|
||||
not_before TIMESTAMPTZ,
|
||||
not_after TIMESTAMPTZ,
|
||||
fingerprint VARCHAR(128),
|
||||
has_ca BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
self_signed BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
uploaded_by UUID,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
activated_at TIMESTAMPTZ,
|
||||
archived_at TIMESTAMPTZ
|
||||
);
|
||||
```
|
||||
|
||||
At most 3 rows: one per status. On activate: delete ARCHIVED -> ACTIVE becomes ARCHIVED -> STAGED becomes ACTIVE.
|
||||
|
||||
Tenant staleness tracked via `ca_applied_at` column on `tenants` table:
|
||||
|
||||
```sql
|
||||
-- in same migration
|
||||
ALTER TABLE tenants ADD COLUMN ca_applied_at TIMESTAMPTZ;
|
||||
```
|
||||
|
||||
Tenants with `ca_applied_at < (active cert's activated_at)` are stale.
|
||||
|
||||
### State Transitions
|
||||
|
||||
```
|
||||
Upload -> STAGED -> activate -> ACTIVE -> (next activate) -> ARCHIVED
|
||||
^ |
|
||||
+------ restore ---------------+
|
||||
```
|
||||
|
||||
- **Activate staged**: delete ARCHIVED row+files, ACTIVE -> ARCHIVED (move files to prev/), STAGED -> ACTIVE (move files to root)
|
||||
- **Restore archived**: swap ACTIVE <-> ARCHIVED (swap files and DB statuses)
|
||||
- **Discard staged**: delete STAGED row + staged/ files
|
||||
|
||||
### Bootstrap Flow
|
||||
|
||||
The `traefik-certs` init container gains env var support:
|
||||
|
||||
```
|
||||
1. cert.pem + key.pem exist in volume?
|
||||
-> Yes: skip (idempotent)
|
||||
-> No: continue
|
||||
|
||||
2. CERT_FILE + KEY_FILE env vars set?
|
||||
-> Yes: copy to volume, validate (PEM parseable, key matches cert)
|
||||
If CA_FILE set, copy as ca.pem
|
||||
-> No: generate self-signed (current behavior)
|
||||
|
||||
3. Write /certs/meta.json with subject, fingerprint, self_signed flag
|
||||
```
|
||||
|
||||
SaaS app reads `meta.json` on startup to seed the certificates DB table if no ACTIVE row exists.
|
||||
|
||||
### REST API
|
||||
|
||||
All under `platform:admin` scope:
|
||||
|
||||
| Method | Path | Description |
|
||||
|--------|------|-------------|
|
||||
| GET | `/api/vendor/certificates` | List active, staged, archived |
|
||||
| POST | `/api/vendor/certificates/stage` | Upload cert+key+ca (multipart) |
|
||||
| POST | `/api/vendor/certificates/activate` | Promote staged -> active |
|
||||
| POST | `/api/vendor/certificates/restore` | Swap archived <-> active |
|
||||
| DELETE | `/api/vendor/certificates/staged` | Discard staged |
|
||||
| GET | `/api/vendor/certificates/stale-tenants` | Tenants needing restart for CA |
|
||||
|
||||
### Service Layer
|
||||
|
||||
`CertificateService` orchestrates:
|
||||
- Validation (PEM parsing, key-cert match, chain building, expiry check)
|
||||
- Delegates file operations to `CertificateManager` (provider)
|
||||
- Manages DB metadata
|
||||
- Computes tenant CA staleness
|
||||
|
||||
### CA Bundle Management
|
||||
|
||||
`ca.pem` is a concatenation of:
|
||||
- Platform cert's CA (if from a private CA, supplied at bootstrap or upload)
|
||||
- Tenant-supplied CAs (for enterprise SSO with private IdPs)
|
||||
|
||||
On any CA change (platform cert upload with CA, tenant CA add/remove):
|
||||
1. Rebuild: concatenate all CAs into `ca.wip`
|
||||
2. Validate: parse all PEM entries, verify structure
|
||||
3. Atomic swap: `mv ca.wip ca.pem`
|
||||
4. Update `activated_at` on ACTIVE cert row
|
||||
5. Flag tenants as stale
|
||||
|
||||
### Tenant CA Distribution
|
||||
|
||||
At provisioning time (`DockerTenantProvisioner`):
|
||||
- Mount `certs` volume read-only at `/certs` in tenant containers
|
||||
- Java servers: JVM truststore import at entrypoint or `JAVA_OPTS` with custom truststore
|
||||
- Node containers: `NODE_EXTRA_CA_CERTS=/certs/ca.pem`
|
||||
- Set `ca_applied_at = now()` on tenant record
|
||||
- Remove TLS skip flags when `ca.pem` exists
|
||||
|
||||
On tenant restart (manual, after CA change):
|
||||
- Container picks up current `ca.pem` from volume mount
|
||||
- Update `ca_applied_at` on tenant
|
||||
|
||||
### Vendor UI
|
||||
|
||||
New "Certificates" page in vendor sidebar:
|
||||
|
||||
- **Active cert card**: subject, issuer, expiry, fingerprint, self-signed badge, activated date
|
||||
- **Staged cert card** (conditional): same metadata + Activate / Discard buttons, validation errors if any
|
||||
- **Archived cert card** (conditional): same metadata + Restore button (disabled if expired)
|
||||
- **Upload area**: file inputs for cert.pem (required), key.pem (required), ca.pem (optional)
|
||||
- **Stale tenants banner**: "CA bundle updated - N tenants need restart" with restart action
|
||||
|
||||
### React Hooks
|
||||
|
||||
```typescript
|
||||
useVendorCertificates() // GET /vendor/certificates
|
||||
useStageCertificate() // POST multipart
|
||||
useActivateCertificate() // POST activate
|
||||
useRestoreCertificate() // POST restore
|
||||
useDiscardStaged() // DELETE staged
|
||||
useStaleTenants() // GET stale-tenants
|
||||
```
|
||||
|
||||
## File Inventory
|
||||
|
||||
### New Files
|
||||
|
||||
| File | Description |
|
||||
|------|-------------|
|
||||
| `src/.../certificate/CertificateManager.java` | Provider interface |
|
||||
| `src/.../certificate/CertificateInfo.java` | Cert metadata record |
|
||||
| `src/.../certificate/CertValidationResult.java` | Validation result record |
|
||||
| `src/.../certificate/CertificateEntity.java` | JPA entity |
|
||||
| `src/.../certificate/CertificateRepository.java` | Spring Data repo |
|
||||
| `src/.../certificate/CertificateService.java` | Business logic |
|
||||
| `src/.../certificate/CertificateController.java` | REST endpoints |
|
||||
| `src/.../provisioning/DockerCertificateManager.java` | Docker volume implementation |
|
||||
| `src/main/resources/db/migration/V011__certificates.sql` | Migration |
|
||||
| `ui/src/api/certificate-hooks.ts` | React Query hooks |
|
||||
| `ui/src/pages/vendor/CertificatesPage.tsx` | Vendor UI page |
|
||||
|
||||
### Modified Files
|
||||
|
||||
| File | Change |
|
||||
|------|--------|
|
||||
| `docker-compose.yml` | Add CERT_FILE/KEY_FILE/CA_FILE env vars to init container |
|
||||
| `traefik.yml` | No change (already reads from /certs/) |
|
||||
| `src/.../provisioning/DockerTenantProvisioner.java` | Mount certs volume, set CA env vars, remove TLS skip flags |
|
||||
| `ui/src/components/Layout.tsx` | Add Certificates sidebar item |
|
||||
| `ui/src/router.tsx` | Add certificates route |
|
||||
| `ui/src/api/vendor-hooks.ts` | Or new file for cert hooks |
|
||||
444
docs/superpowers/specs/2026-04-13-install-script-design.md
Normal file
444
docs/superpowers/specs/2026-04-13-install-script-design.md
Normal file
@@ -0,0 +1,444 @@
|
||||
# Cameleer SaaS Install Script Design
|
||||
|
||||
## Overview
|
||||
|
||||
A professional installer for the Cameleer SaaS platform, distributed as two native scripts (`install.sh` for Linux, `install.ps1` for Windows). The installer downloads nothing — it embeds compose templates and generates all configuration from user input. All service initialization logic is baked into Docker images, configured via environment variables.
|
||||
|
||||
Distribution model: `curl -sfL https://install.cameleer.io | bash` (Linux), `irm https://install.cameleer.io/windows | iex` (Windows).
|
||||
|
||||
## Platform Simplification (Prerequisites)
|
||||
|
||||
The current architecture uses 7 services with 10+ bind-mounted config files. This design consolidates everything into 5 services with zero bind mounts (except Docker socket and optional user-supplied TLS certs).
|
||||
|
||||
### Image Consolidation
|
||||
|
||||
| Image | Base | Bakes in |
|
||||
|---|---|---|
|
||||
| `cameleer-traefik` | `traefik:v3` | Static/dynamic Traefik config (uses Traefik env var substitution for dynamic values like ports), cert generation entrypoint (`openssl`), self-signed cert logic |
|
||||
| `cameleer-postgres` | `postgres:16-alpine` | `init-databases.sh` (creates `cameleer_saas`, `logto` databases) |
|
||||
| `cameleer-clickhouse` | `clickhouse/clickhouse-server` | Init SQL (`CREATE DATABASE cameleer`), `clickhouse-users.xml`, `clickhouse-config.xml` (Prometheus metrics) |
|
||||
| `cameleer-logto` | `ghcr.io/logto-io/logto` | Custom sign-in UI, bootstrap logic (app/user/role/scope creation), vendor seed (env-var gated). Replaces the separate `logto-bootstrap` init container. |
|
||||
| `cameleer-saas` | `eclipse-temurin:21-jre-alpine` | Spring Boot app + React SPA (already exists, no changes) |
|
||||
|
||||
All images published to `gitea.siegeln.net/cameleer/`.
|
||||
|
||||
### Service Reduction
|
||||
|
||||
| Before | After |
|
||||
|---|---|
|
||||
| traefik-certs (init container) | Merged into `cameleer-traefik` entrypoint |
|
||||
| traefik | `cameleer-traefik` |
|
||||
| postgres + bind-mounted init script | `cameleer-postgres` |
|
||||
| clickhouse + 3 bind-mounted config files | `cameleer-clickhouse` |
|
||||
| logto | `cameleer-logto` (with bootstrap) |
|
||||
| logto-bootstrap (init container) | Merged into `cameleer-logto` entrypoint |
|
||||
| cameleer-saas + bind-mounted UI | `cameleer-saas` |
|
||||
|
||||
**Result: 7 services → 5 services. 10+ bind-mounted files → 0.**
|
||||
|
||||
### Bootstrap Merge
|
||||
|
||||
The `logto-bootstrap` init container logic moves into `cameleer-logto`'s entrypoint as an idempotent startup step:
|
||||
|
||||
1. Logto starts and seeds its own database (`npm run cli db seed -- --swe`)
|
||||
2. Entrypoint runs bootstrap logic (create apps, users, roles, scopes, branding)
|
||||
3. Bootstrap checks for cached results in a Docker volume — skips if already done
|
||||
4. Writes `logto-bootstrap.json` to shared volume
|
||||
5. If `VENDOR_SEED_ENABLED=true`, creates vendor user and global role
|
||||
6. Logto server starts normally
|
||||
|
||||
The `cameleer-saas` service uses `depends_on: logto (healthy)` and reads bootstrap results from the shared volume on startup — same as today.
|
||||
|
||||
## Installer Architecture
|
||||
|
||||
### Distribution
|
||||
|
||||
- Linux: `curl -sfL https://install.cameleer.io | bash`
|
||||
- Windows: `irm https://install.cameleer.io/windows | iex`
|
||||
|
||||
The scripts are self-contained. They embed docker-compose templates and generate all files locally. No secondary downloads.
|
||||
|
||||
### Scripts
|
||||
|
||||
- `install.sh` — Bash, targets Linux with Docker Engine
|
||||
- `install.ps1` — PowerShell, targets Windows with Docker Desktop (WSL2 backend)
|
||||
|
||||
Both implement identical logic and produce identical output. They share a config file format (`cameleer.conf`) so configurations are portable between platforms.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
The installer checks (does not install) these prerequisites:
|
||||
|
||||
- Docker Engine 24+ (Linux) or Docker Desktop 4.25+ (Windows)
|
||||
- Docker Compose v2 (`docker compose` subcommand)
|
||||
- `openssl` (Linux, for password generation) — PowerShell uses `[System.Security.Cryptography.RandomNumberGenerator]`
|
||||
- Ports 80, 443, 3002 are free (or custom ports if specified)
|
||||
- Docker socket accessible
|
||||
|
||||
If any prerequisite is missing, the script prints a clear error message with a link to installation instructions and exits.
|
||||
|
||||
## Installation Modes
|
||||
|
||||
### Simple Mode (default)
|
||||
|
||||
Asks 6 essential questions:
|
||||
|
||||
1. Install directory (default: `./cameleer`)
|
||||
2. Public hostname (auto-detected, default: `localhost`)
|
||||
3. Admin username (default: `admin`)
|
||||
4. Admin password (default: auto-generated)
|
||||
5. Use custom TLS certificates? (default: no → self-signed)
|
||||
- If yes: paths to cert.pem, key.pem, optional ca.pem
|
||||
6. Connect to a monitoring network? (default: none)
|
||||
|
||||
Everything else uses secure defaults. All passwords auto-generated.
|
||||
|
||||
### Expert Mode (`--expert` or chosen at interactive prompt)
|
||||
|
||||
Adds these options, grouped by category:
|
||||
|
||||
**Credentials:**
|
||||
- PostgreSQL password (default: generated)
|
||||
- ClickHouse password (default: generated)
|
||||
- Vendor account enable + username + password
|
||||
|
||||
**Networking:**
|
||||
- HTTP port (default: 80)
|
||||
- HTTPS port (default: 443)
|
||||
- Logto admin console port (default: 3002)
|
||||
|
||||
**Docker:**
|
||||
- Image version/tag (default: `latest`)
|
||||
- Compose project name (default: `cameleer-saas`)
|
||||
- Docker socket path (auto-detected)
|
||||
|
||||
**TLS:**
|
||||
- CA bundle path
|
||||
- `NODE_TLS_REJECT_UNAUTHORIZED` setting
|
||||
|
||||
**Logto:**
|
||||
- Admin console external exposure (default: yes)
|
||||
|
||||
### Silent Mode (`--silent`)
|
||||
|
||||
No interactive prompts. Uses defaults plus overrides.
|
||||
|
||||
**Config precedence:** CLI flags > environment variables > config file (`--config`) > defaults.
|
||||
|
||||
## Configuration Reference
|
||||
|
||||
| Config key | CLI flag | Env var | Default | Simple | Expert |
|
||||
|---|---|---|---|---|---|
|
||||
| `install_dir` | `--install-dir` | `CAMELEER_INSTALL_DIR` | `./cameleer` | yes | yes |
|
||||
| `public_host` | `--public-host` | `PUBLIC_HOST` | auto-detect | yes | yes |
|
||||
| `public_protocol` | `--public-protocol` | `PUBLIC_PROTOCOL` | `https` | no | yes |
|
||||
| `admin_user` | `--admin-user` | `SAAS_ADMIN_USER` | `admin` | yes | yes |
|
||||
| `admin_password` | `--admin-password` | `SAAS_ADMIN_PASS` | generated | yes | yes |
|
||||
| `tls_mode` | `--tls-mode` | `TLS_MODE` | `self-signed` | yes | yes |
|
||||
| `cert_file` | `--cert-file` | `CERT_FILE` | none | yes* | yes |
|
||||
| `key_file` | `--key-file` | `KEY_FILE` | none | yes* | yes |
|
||||
| `ca_file` | `--ca-file` | `CA_FILE` | none | no | yes |
|
||||
| `monitoring_network` | `--monitoring-network` | `MONITORING_NETWORK` | none | yes | yes |
|
||||
| `postgres_password` | `--postgres-password` | `POSTGRES_PASSWORD` | generated | no | yes |
|
||||
| `clickhouse_password` | `--clickhouse-password` | `CLICKHOUSE_PASSWORD` | generated | no | yes |
|
||||
| `http_port` | `--http-port` | `HTTP_PORT` | `80` | no | yes |
|
||||
| `https_port` | `--https-port` | `HTTPS_PORT` | `443` | no | yes |
|
||||
| `logto_console_port` | `--logto-console-port` | `LOGTO_CONSOLE_PORT` | `3002` | no | yes |
|
||||
| `logto_console_exposed` | `--logto-console-exposed` | `LOGTO_CONSOLE_EXPOSED` | `true` | no | yes |
|
||||
| `vendor_enabled` | `--vendor-enabled` | `VENDOR_ENABLED` | `false` | no | yes |
|
||||
| `vendor_user` | `--vendor-user` | `VENDOR_USER` | `vendor` | no | yes |
|
||||
| `vendor_password` | `--vendor-password` | `VENDOR_PASS` | generated | no | yes |
|
||||
| `version` | `--version` | `CAMELEER_VERSION` | `latest` | no | yes |
|
||||
| `compose_project` | `--compose-project` | `COMPOSE_PROJECT` | `cameleer-saas` | no | yes |
|
||||
| `docker_socket` | `--docker-socket` | `DOCKER_SOCKET` | auto-detect | no | yes |
|
||||
| `node_tls_reject` | `--node-tls-reject` | `NODE_TLS_REJECT` | `0` (self-signed) / `1` (custom) | no | yes |
|
||||
|
||||
*\* Only asked in simple mode if the user chooses custom TLS.*
|
||||
|
||||
### Config File Format (`cameleer.conf`)
|
||||
|
||||
```ini
|
||||
# Cameleer installation config
|
||||
# Generated by installer v1.0.0 on 2026-04-13
|
||||
|
||||
install_dir=./cameleer
|
||||
public_host=cameleer.example.com
|
||||
public_protocol=https
|
||||
admin_user=my-admin
|
||||
version=1.0.0
|
||||
tls_mode=custom
|
||||
https_port=443
|
||||
monitoring_network=prometheus
|
||||
```
|
||||
|
||||
Plain `key=value`, `#` comments. Portable between Linux and Windows.
|
||||
|
||||
## Auto-Detection
|
||||
|
||||
The installer auto-detects sensible defaults:
|
||||
|
||||
| Value | Linux | Windows |
|
||||
|---|---|---|
|
||||
| Public hostname | `hostname -f`, reverse DNS of primary IP, fallback `localhost` | `[System.Net.Dns]::GetHostEntry`, fallback `localhost` |
|
||||
| Docker socket | `/var/run/docker.sock` | `//./pipe/docker_engine` |
|
||||
| Port availability | `ss -tlnp` or `netstat` check on 80, 443, 3002 | `Test-NetConnection` on 80, 443, 3002 |
|
||||
| Existing install | Check for `cameleer.conf` in install directory | Same |
|
||||
|
||||
## Output Files
|
||||
|
||||
The installer generates the following in the install directory:
|
||||
|
||||
```
|
||||
./cameleer/
|
||||
docker-compose.yml # Generated from embedded template
|
||||
.env # All service configuration
|
||||
.env.bak # Snapshot of .env at install time
|
||||
cameleer.conf # Installer config (for re-runs, cloning)
|
||||
credentials.txt # All generated passwords in plain text
|
||||
INSTALL.md # Tailored documentation
|
||||
certs/ # Only if user supplies custom TLS certs
|
||||
cert.pem
|
||||
key.pem
|
||||
ca.pem
|
||||
```
|
||||
|
||||
### docker-compose.yml (generated)
|
||||
|
||||
The compose file is generated from a template embedded in the script, with values substituted from the user's configuration. Key characteristics:
|
||||
|
||||
- All services use `${VARIABLE}` references to `.env`
|
||||
- No bind mounts except Docker socket and optional `certs/` directory
|
||||
- Shared volumes: `pgdata`, `chdata`, `bootstrapdata`, `certs`
|
||||
- Networks: `cameleer` (internal), `cameleer-traefik` (for dynamic tenant routing)
|
||||
- Optional external `monitoring_network` with Prometheus labels on services
|
||||
- Health checks on all services
|
||||
- `depends_on` with health conditions for startup ordering
|
||||
|
||||
### credentials.txt
|
||||
|
||||
```
|
||||
===========================================
|
||||
CAMELEER PLATFORM CREDENTIALS
|
||||
Generated: 2026-04-13 14:32:00 UTC
|
||||
|
||||
SECURE THIS FILE AND DELETE AFTER NOTING
|
||||
THESE CREDENTIALS CANNOT BE RECOVERED
|
||||
===========================================
|
||||
|
||||
Admin Console: https://cameleer.example.com/platform/
|
||||
Admin User: my-admin
|
||||
Admin Password: aB3x...generated...9Zq
|
||||
|
||||
PostgreSQL: cameleer / Kx8m...generated...Wp2
|
||||
ClickHouse: default / Rm4n...generated...Ht7
|
||||
|
||||
Vendor User: acme-admin (not enabled)
|
||||
|
||||
Logto Console: https://cameleer.example.com:3002
|
||||
```
|
||||
|
||||
Printed to terminal once at the end of installation. Never displayed again on re-runs.
|
||||
|
||||
### INSTALL.md (generated)
|
||||
|
||||
Tailored to the actual installation values. Sections:
|
||||
|
||||
1. **Installation Summary** — version, date, mode, install directory
|
||||
2. **Service URLs** — platform UI, Logto admin console, API endpoint
|
||||
3. **First Steps** — log in as admin, create first tenant
|
||||
4. **Architecture Overview** — containers running, purpose of each
|
||||
5. **Networking** — ports, monitoring network, Docker networks
|
||||
6. **TLS** — self-signed or custom, cert location, how to replace via vendor UI
|
||||
7. **Data & Backups** — Docker volume names, backup commands (pg_dump, clickhouse-backup)
|
||||
8. **Upgrading** — re-run installer with `--version`, what gets preserved
|
||||
9. **Troubleshooting** — common issues with `docker compose logs` commands
|
||||
10. **Uninstalling** — clean removal steps
|
||||
|
||||
## Password Generation
|
||||
|
||||
When no password is provided, the script generates cryptographically secure random passwords:
|
||||
|
||||
- Linux: `openssl rand -base64 24` (32 characters)
|
||||
- Windows: `[System.Security.Cryptography.RandomNumberGenerator]` → Base64
|
||||
|
||||
### Passwords Generated
|
||||
|
||||
| Credential | Config key | Consumers |
|
||||
|---|---|---|
|
||||
| PostgreSQL password | `postgres_password` | postgres, logto, cameleer-saas |
|
||||
| ClickHouse password | `clickhouse_password` | clickhouse, cameleer-saas (tenant provisioning) |
|
||||
| Admin password | `admin_password` | Logto admin user |
|
||||
| Vendor password | `vendor_password` | Logto vendor user (only if enabled) |
|
||||
|
||||
### Credential Lifecycle
|
||||
|
||||
1. Generated (or user-provided) during install
|
||||
2. Written to `.env` (consumed by Docker Compose)
|
||||
3. Written to `credentials.txt` in plain text
|
||||
4. Printed to terminal once at end of installation
|
||||
5. Never shown again — re-runs preserve existing credentials without displaying them
|
||||
|
||||
## Monitoring Network Integration
|
||||
|
||||
When a monitoring network is configured (simple or expert mode):
|
||||
|
||||
1. The script verifies the network exists via `docker network inspect`
|
||||
- If missing in interactive mode: asks whether to create it or skip
|
||||
- If missing in silent mode: creates it automatically
|
||||
2. The network is added as an external network in the generated `docker-compose.yml`
|
||||
3. Services are attached to it and labeled for Prometheus Docker SD:
|
||||
|
||||
```yaml
|
||||
cameleer-saas:
|
||||
labels:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "8080"
|
||||
prometheus.io/path: "/platform/actuator/prometheus"
|
||||
|
||||
cameleer-traefik:
|
||||
labels:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "8082"
|
||||
prometheus.io/path: "/metrics"
|
||||
|
||||
cameleer-clickhouse:
|
||||
labels:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "9363"
|
||||
prometheus.io/path: "/metrics"
|
||||
```
|
||||
|
||||
No Prometheus configuration needed on the customer's side — Docker service discovery picks up the labels automatically.
|
||||
|
||||
## Idempotent Re-run & Upgrade
|
||||
|
||||
### Detection
|
||||
|
||||
The script checks for `cameleer.conf` in the install directory. If found, it's a re-run.
|
||||
|
||||
### Interactive Re-run Menu
|
||||
|
||||
```
|
||||
Existing Cameleer installation detected (v1.0.0)
|
||||
Install directory: ./cameleer
|
||||
Public host: cameleer.example.com
|
||||
|
||||
[1] Upgrade to v1.1.0 (pull new images, update compose)
|
||||
[2] Reconfigure (re-run interactive setup, preserve data)
|
||||
[3] Reinstall (fresh install, WARNING: destroys data volumes)
|
||||
[4] Cancel
|
||||
```
|
||||
|
||||
### Re-run Behavior
|
||||
|
||||
| Action | Preserve | Regenerate | Pull images |
|
||||
|---|---|---|---|
|
||||
| Upgrade | `.env`, `cameleer.conf`, `credentials.txt`, `certs/`, volumes | `docker-compose.yml`, `INSTALL.md` | yes (new version) |
|
||||
| Reconfigure | Data volumes, `credentials.txt` (unless passwords changed) | `.env`, `docker-compose.yml`, `cameleer.conf`, `INSTALL.md` | optional |
|
||||
| Reinstall | Nothing | Everything | yes |
|
||||
|
||||
### Silent Re-run
|
||||
|
||||
Defaults to upgrade. Override with `--reconfigure` or `--reinstall`.
|
||||
|
||||
### Safety
|
||||
|
||||
- Data volumes (`pgdata`, `chdata`, `bootstrapdata`) are never removed unless `--reinstall` is explicitly chosen
|
||||
- `--reinstall` requires double opt-in: `--reinstall --confirm-destroy`
|
||||
- The script never runs `docker volume rm` without this confirmation
|
||||
|
||||
## Health Verification
|
||||
|
||||
After `docker compose up -d`, the script polls services in dependency order:
|
||||
|
||||
| Step | Service | Check | Timeout |
|
||||
|---|---|---|---|
|
||||
| 1 | PostgreSQL | `pg_isready` via `docker compose exec` | 120s |
|
||||
| 2 | ClickHouse | `clickhouse-client` query via `docker compose exec` | 120s |
|
||||
| 3 | Logto | GET `/oidc/.well-known/openid-configuration` | 120s |
|
||||
| 4 | Bootstrap | Check `logto-bootstrap.json` exists in volume | 120s |
|
||||
| 5 | Cameleer SaaS | GET `/platform/api/config` | 120s |
|
||||
| 6 | Traefik | GET `https://{PUBLIC_HOST}/` (expect redirect) | 120s |
|
||||
|
||||
**Polling interval:** 5 seconds. **Total timeout:** 5 minutes.
|
||||
|
||||
### Output
|
||||
|
||||
```
|
||||
Verifying installation...
|
||||
[ok] PostgreSQL ready (3s)
|
||||
[ok] ClickHouse ready (5s)
|
||||
[ok] Logto ready (18s)
|
||||
[ok] Bootstrap complete (0s)
|
||||
[ok] Cameleer SaaS ready (8s)
|
||||
[ok] Traefik routing ready (1s)
|
||||
|
||||
Installation complete!
|
||||
```
|
||||
|
||||
### Failure
|
||||
|
||||
- Failing service marked with `[FAIL]` and a hint (e.g., "check `docker compose logs logto`")
|
||||
- Remaining checks skipped
|
||||
- Stack left running for inspection
|
||||
- Script exits with code 1
|
||||
|
||||
## Script Structure (both platforms)
|
||||
|
||||
```
|
||||
main()
|
||||
parse_args()
|
||||
detect_existing_install()
|
||||
if existing → show_rerun_menu()
|
||||
check_prerequisites()
|
||||
auto_detect_defaults()
|
||||
select_mode() # simple / expert / silent
|
||||
if interactive → run_prompts()
|
||||
merge_config() # CLI > env > config file > defaults
|
||||
validate_config()
|
||||
generate_passwords() # for any not provided
|
||||
if custom_certs → copy_certs()
|
||||
generate_env_file()
|
||||
generate_compose_file()
|
||||
write_config_file() # cameleer.conf
|
||||
docker_compose_pull()
|
||||
docker_compose_up()
|
||||
verify_health()
|
||||
generate_credentials_file()
|
||||
generate_install_doc()
|
||||
print_credentials()
|
||||
print_summary()
|
||||
```
|
||||
|
||||
Each function has a direct equivalent in both bash and PowerShell. The logic, prompts, and output are identical across platforms.
|
||||
|
||||
## TLS Certificate Flow (Simplified)
|
||||
|
||||
With the `traefik-certs` init container merged into `cameleer-traefik`, the certificate flow works as follows:
|
||||
|
||||
**Shared `certs` Docker volume** remains the mechanism for sharing TLS state between `cameleer-traefik` and `cameleer-saas` (which mounts it read-only for per-tenant server provisioning).
|
||||
|
||||
**Self-signed mode (default):**
|
||||
1. `cameleer-traefik` entrypoint checks if `/certs/cert.pem` exists in the volume
|
||||
2. If not, generates a self-signed cert for `${PUBLIC_HOST}` with wildcard SAN using `openssl`
|
||||
3. Writes `cert.pem`, `key.pem`, `meta.json` to the `certs` volume
|
||||
4. Starts Traefik normally
|
||||
|
||||
**Custom cert mode:**
|
||||
1. The installer copies user-supplied cert files to `./cameleer/certs/` on the host
|
||||
2. The generated `docker-compose.yml` bind-mounts `./certs/:/user-certs:ro` on the `cameleer-traefik` service
|
||||
3. `cameleer-traefik` entrypoint detects `CERT_FILE=/user-certs/cert.pem` and `KEY_FILE=/user-certs/key.pem`
|
||||
4. Validates and copies them to the shared `certs` Docker volume
|
||||
5. Writes `meta.json` with certificate metadata
|
||||
6. Starts Traefik normally
|
||||
|
||||
**Runtime cert replacement** (via vendor UI) continues to work unchanged — `cameleer-saas` writes to the `certs` volume's `staged/` directory and performs atomic swaps.
|
||||
|
||||
## Docker Socket Path
|
||||
|
||||
The generated `docker-compose.yml` uses the platform-appropriate Docker socket path:
|
||||
|
||||
- Linux: `/var/run/docker.sock:/var/run/docker.sock`
|
||||
- Windows (Docker Desktop): `//./pipe/docker_engine://./pipe/docker_engine`
|
||||
|
||||
The installer detects the platform and generates the correct bind mount. The `docker_socket` config key allows overriding this in expert mode.
|
||||
@@ -0,0 +1,147 @@
|
||||
# Per-Tenant PostgreSQL Isolation
|
||||
|
||||
**Date:** 2026-04-15
|
||||
**Status:** Approved
|
||||
|
||||
## Context
|
||||
|
||||
The cameleer3-server team introduced `currentSchema` and `ApplicationName` JDBC parameters (commit `7a63135`) to scope admin diagnostic queries to a single tenant's connections. Previously, all tenant servers shared one PostgreSQL user and connected to the `cameleer3` database without schema isolation — a tenant's server could theoretically see SQL text from other tenants via `pg_stat_activity`.
|
||||
|
||||
This spec adds per-tenant PostgreSQL users and schemas so each tenant server can only access its own data at the database level.
|
||||
|
||||
## Architecture
|
||||
|
||||
### Current State
|
||||
|
||||
- All tenant servers connect as the shared admin PG user to `cameleer3` database, `public` schema.
|
||||
- No per-tenant schemas exist — the server's Flyway runs in `public`.
|
||||
- `TenantDataCleanupService` already attempts `DROP SCHEMA tenant_<slug>` on delete (no-op today since schemas don't exist).
|
||||
- Standalone mode sets `currentSchema=tenant_default` in the compose file and is unaffected by this change.
|
||||
|
||||
### Target State
|
||||
|
||||
- Each tenant gets a dedicated PG user (`tenant_<slug>`) and schema (`tenant_<slug>`).
|
||||
- The tenant user owns only its schema. `REVOKE ALL ON SCHEMA public` prevents cross-tenant access.
|
||||
- The server's Flyway runs inside `tenant_<slug>` via the `currentSchema` JDBC parameter.
|
||||
- `ApplicationName=tenant_<slug>` scopes `pg_stat_activity` visibility per the server team's convention.
|
||||
- On tenant delete, both schema and user are dropped.
|
||||
|
||||
## New Component: `TenantDatabaseService`
|
||||
|
||||
A focused service with two methods:
|
||||
|
||||
```java
|
||||
@Service
|
||||
public class TenantDatabaseService {
|
||||
void createTenantDatabase(String slug, String password);
|
||||
void dropTenantDatabase(String slug);
|
||||
}
|
||||
```
|
||||
|
||||
### `createTenantDatabase(slug, password)`
|
||||
|
||||
Connects to `cameleer3` using the admin PG credentials from `ProvisioningProperties`. Executes:
|
||||
|
||||
1. Validate slug against `^[a-z0-9-]+$` (reject unexpected characters).
|
||||
2. `CREATE USER "tenant_<slug>" WITH PASSWORD '<password>'` (skip if user already exists — idempotent for re-provisioning).
|
||||
3. `CREATE SCHEMA "tenant_<slug>" AUTHORIZATION "tenant_<slug>"` (skip if schema already exists).
|
||||
4. `REVOKE ALL ON SCHEMA public FROM "tenant_<slug>"`.
|
||||
|
||||
All identifiers are double-quoted. The password is a 32-character random alphanumeric string generated by the same `SecureRandom` utility used for other credential generation.
|
||||
|
||||
### `dropTenantDatabase(slug)`
|
||||
|
||||
1. `DROP SCHEMA IF EXISTS "tenant_<slug>" CASCADE`
|
||||
2. `DROP USER IF EXISTS "tenant_<slug>"`
|
||||
|
||||
Schema must be dropped first (with `CASCADE`) because PG won't drop a user that owns objects.
|
||||
|
||||
## Entity Change
|
||||
|
||||
**New Flyway migration:** `V014__add_tenant_db_password.sql`
|
||||
|
||||
```sql
|
||||
ALTER TABLE tenants ADD COLUMN db_password VARCHAR(255);
|
||||
```
|
||||
|
||||
Nullable — existing tenants won't have it. Code checks for null and falls back to shared credentials for backwards compatibility.
|
||||
|
||||
**TenantEntity:** new `dbPassword` field with JPA `@Column` mapping.
|
||||
|
||||
## Provisioning Flow Changes
|
||||
|
||||
### `VendorTenantService.provisionAsync()` — new steps before container creation
|
||||
|
||||
```
|
||||
1. Generate 32-char random password
|
||||
2. tenantDatabaseService.createTenantDatabase(slug, password)
|
||||
3. entity.setDbPassword(password)
|
||||
4. tenantRepository.save(entity)
|
||||
5. tenantProvisioner.provision(request) ← request now includes dbPassword
|
||||
6. ... rest unchanged (health check, license push, OIDC push)
|
||||
```
|
||||
|
||||
If step 2 fails, provisioning aborts with a stored error — no orphaned containers.
|
||||
|
||||
### `DockerTenantProvisioner` — JDBC URL construction
|
||||
|
||||
The `ProvisionRequest` record gains `dbPassword` field.
|
||||
|
||||
**When `dbPassword` is present** (new tenants):
|
||||
|
||||
```
|
||||
SPRING_DATASOURCE_URL=jdbc:postgresql://cameleer-postgres:5432/cameleer3?currentSchema=tenant_<slug>&ApplicationName=tenant_<slug>
|
||||
SPRING_DATASOURCE_USERNAME=tenant_<slug>
|
||||
SPRING_DATASOURCE_PASSWORD=<generated>
|
||||
```
|
||||
|
||||
**When `dbPassword` is null** (pre-existing tenants, backwards compat):
|
||||
|
||||
```
|
||||
SPRING_DATASOURCE_URL=<props.datasourceUrl()> (no currentSchema/ApplicationName)
|
||||
SPRING_DATASOURCE_USERNAME=<props.datasourceUsername()>
|
||||
SPRING_DATASOURCE_PASSWORD=<props.datasourcePassword()>
|
||||
```
|
||||
|
||||
Server restart/upgrade re-creates containers via `provisionAsync()`, which re-reads `dbPassword` from the entity. Restarting an upgraded tenant picks up isolated credentials automatically.
|
||||
|
||||
## Delete Flow Changes
|
||||
|
||||
### `VendorTenantService.delete()`
|
||||
|
||||
```
|
||||
1. tenantProvisioner.remove(slug) ← existing
|
||||
2. licenseService.revokeLicense(...) ← existing
|
||||
3. logtoClient.deleteOrganization(...) ← existing
|
||||
4. tenantDatabaseService.dropTenantDatabase(slug) ← replaces TenantDataCleanupService PG logic
|
||||
5. dataCleanupService.cleanupClickHouse(slug) ← ClickHouse cleanup stays separate
|
||||
6. entity.setStatus(DELETED) ← existing
|
||||
```
|
||||
|
||||
`TenantDataCleanupService` loses its PostgreSQL cleanup responsibility (delegated to `TenantDatabaseService`). It keeps only the ClickHouse cleanup. Rename method to `cleanupClickHouse(slug)` for clarity.
|
||||
|
||||
## Backwards Compatibility
|
||||
|
||||
| Scenario | Behavior |
|
||||
|----------|----------|
|
||||
| **Standalone mode** | Unaffected. Server is in compose, not provisioned by SaaS. Defaults to `tenant_default`. |
|
||||
| **Existing SaaS tenants** (dbPassword=null) | Shared credentials, no `currentSchema`. Same as before. |
|
||||
| **Existing tenants after restart/upgrade** | Still use shared credentials until re-provisioned with new code. |
|
||||
| **New tenants** | Isolated user+schema+JDBC URL. Full isolation. |
|
||||
| **Delete of pre-existing tenant** | `DROP USER IF EXISTS` is a no-op (user doesn't exist). Schema drop unchanged. |
|
||||
|
||||
## InfrastructureService
|
||||
|
||||
No changes needed. Already queries `information_schema.schemata WHERE schema_name LIKE 'tenant_%'`. With per-tenant schemas now created, the PostgreSQL tenant table on the Infrastructure page will populate automatically.
|
||||
|
||||
## Files Changed
|
||||
|
||||
| File | Change |
|
||||
|------|--------|
|
||||
| `TenantDatabaseService.java` | **New** — create/drop PG user+schema |
|
||||
| `TenantEntity.java` | Add `dbPassword` field |
|
||||
| `V014__add_tenant_db_password.sql` | **New** — nullable column |
|
||||
| `VendorTenantService.java` | Call `createTenantDatabase` in provision, `dropTenantDatabase` in delete |
|
||||
| `DockerTenantProvisioner.java` | Construct per-tenant JDBC URL, username, password |
|
||||
| `ProvisionRequest` record | Add `dbPassword` field |
|
||||
| `TenantDataCleanupService.java` | Remove PG logic, keep ClickHouse only, rename method |
|
||||
@@ -435,18 +435,13 @@ Copy `.env.example` to `.env` and configure as needed:
|
||||
| `POSTGRES_USER` | PostgreSQL username | `cameleer` |
|
||||
| `POSTGRES_PASSWORD` | PostgreSQL password | `change_me_in_production` |
|
||||
| `POSTGRES_DB` | PostgreSQL database name | `cameleer_saas` |
|
||||
| `LOGTO_ENDPOINT` | Internal Logto URL (container-to-container) | `http://logto:3001` |
|
||||
| `LOGTO_PUBLIC_ENDPOINT` | Public-facing Logto URL | `http://localhost:3001` |
|
||||
| `LOGTO_ISSUER_URI` | OIDC issuer URI | `http://localhost:3001/oidc` |
|
||||
| `LOGTO_JWK_SET_URI` | OIDC JWK set URI | `http://logto:3001/oidc/jwks` |
|
||||
| `LOGTO_M2M_CLIENT_ID` | Machine-to-machine client ID (auto-set by bootstrap) | _(empty)_ |
|
||||
| `LOGTO_M2M_CLIENT_SECRET` | Machine-to-machine client secret (auto-set by bootstrap) | _(empty)_ |
|
||||
| `LOGTO_SPA_CLIENT_ID` | SPA client ID for the frontend | _(empty)_ |
|
||||
| `CAMELEER_AUTH_TOKEN` | Bootstrap token for agent registration | `change_me_bootstrap_token` |
|
||||
| `CAMELEER_CONTAINER_MEMORY_LIMIT` | Memory limit for deployed containers | `512m` |
|
||||
| `CAMELEER_CONTAINER_CPU_SHARES` | CPU shares for deployed containers | `512` |
|
||||
| `CAMELEER_TENANT_SLUG` | Default tenant slug | `default` |
|
||||
| `DOMAIN` | Domain for Traefik TLS and route URLs | `localhost` |
|
||||
| `CAMELEER_SAAS_IDENTITY_LOGTOENDPOINT` | Internal Logto URL (container-to-container) | `http://cameleer-logto:3001` |
|
||||
| `CAMELEER_SAAS_IDENTITY_LOGTOPUBLICENDPOINT` | Public-facing Logto URL | `http://localhost:3001` |
|
||||
| `CAMELEER_SAAS_IDENTITY_M2MCLIENTID` | Machine-to-machine client ID (auto-set by bootstrap) | _(empty)_ |
|
||||
| `CAMELEER_SAAS_IDENTITY_M2MCLIENTSECRET` | Machine-to-machine client secret (auto-set by bootstrap) | _(empty)_ |
|
||||
| `CAMELEER_SAAS_IDENTITY_SPACLIENTID` | SPA client ID for the frontend | _(empty)_ |
|
||||
| `PUBLIC_HOST` | Public hostname for Traefik, Logto, and SaaS routing | `localhost` |
|
||||
| `PUBLIC_PROTOCOL` | Public protocol (`http` or `https`) | `https` |
|
||||
| `SAAS_ADMIN_USER` | Platform admin username | `admin` |
|
||||
| `SAAS_ADMIN_PASS` | Platform admin password | `admin` |
|
||||
| `TENANT_ADMIN_USER` | Tenant admin username | `camel` |
|
||||
@@ -550,7 +545,7 @@ The Cameleer SaaS application itself does not need any changes -- all identity c
|
||||
**Resolution:**
|
||||
|
||||
1. Check backend logs: `docker compose logs cameleer-saas`.
|
||||
2. Verify that `LOGTO_ISSUER_URI` and `LOGTO_JWK_SET_URI` in `.env` are correct.
|
||||
2. Verify that `CAMELEER_SAAS_IDENTITY_LOGTOENDPOINT` in `.env` is correct (the OIDC issuer and JWK set URIs are derived from it automatically).
|
||||
3. If the issue persists, restart the services: `docker compose restart cameleer-saas logto`.
|
||||
|
||||
### Deployment Stuck in BUILDING
|
||||
@@ -577,14 +572,14 @@ The Cameleer SaaS application itself does not need any changes -- all identity c
|
||||
**Possible causes:**
|
||||
|
||||
- The agent cannot reach the cameleer3-server endpoint. Check network connectivity between the deployed container and the observability server.
|
||||
- The bootstrap token does not match. The agent uses `CAMELEER_AUTH_TOKEN` to register with the server.
|
||||
- The bootstrap token does not match. The agent uses `CAMELEER_SERVER_SECURITY_BOOTSTRAPTOKEN` to register with the server.
|
||||
- The cameleer3-server is not healthy.
|
||||
|
||||
**Resolution:**
|
||||
|
||||
1. Check cameleer3-server health: `docker compose logs cameleer3-server`.
|
||||
2. Verify the app container's logs for agent connection errors (use the Logs tab on the app detail page).
|
||||
3. Confirm that `CAMELEER_AUTH_TOKEN` is the same in both the `cameleer-saas` and `cameleer3-server` service configurations.
|
||||
3. Confirm that `CAMELEER_SERVER_SECURITY_BOOTSTRAPTOKEN` is the same in both the `cameleer-saas` and `cameleer3-server` service configurations.
|
||||
|
||||
### Container Health Check Failing
|
||||
|
||||
@@ -600,7 +595,7 @@ The Cameleer SaaS application itself does not need any changes -- all identity c
|
||||
|
||||
1. Check the container logs from the Logs tab on the app detail page.
|
||||
2. If the app crashes immediately, verify the JAR file is a valid executable Spring Boot or Camel application.
|
||||
3. To increase memory limits, set `CAMELEER_CONTAINER_MEMORY_LIMIT` to a higher value (e.g., `1g`) in `.env` and restart the stack.
|
||||
3. To increase memory limits, set `CAMELEER_SERVER_RUNTIME_CONTAINER_MEMORYLIMIT` to a higher value (e.g., `1g`) on the per-tenant server container and restart it.
|
||||
|
||||
### Bootstrap Script Errors
|
||||
|
||||
|
||||
158
installer/cameleer/docker-compose.yml
Normal file
158
installer/cameleer/docker-compose.yml
Normal file
@@ -0,0 +1,158 @@
|
||||
# Cameleer SaaS Platform
|
||||
# Generated by Cameleer installer — do not edit manually
|
||||
|
||||
services:
|
||||
cameleer-traefik:
|
||||
image: ${TRAEFIK_IMAGE:-gitea.siegeln.net/cameleer/cameleer-traefik}:${VERSION:-latest}
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "${HTTP_PORT:-80}:80"
|
||||
- "${HTTPS_PORT:-443}:443"
|
||||
- "${LOGTO_CONSOLE_PORT:-3002}:3002"
|
||||
environment:
|
||||
PUBLIC_HOST: ${PUBLIC_HOST:-localhost}
|
||||
CERT_FILE: ${CERT_FILE:-}
|
||||
KEY_FILE: ${KEY_FILE:-}
|
||||
CA_FILE: ${CA_FILE:-}
|
||||
volumes:
|
||||
- cameleer-certs:/certs
|
||||
- ${DOCKER_SOCKET:-/var/run/docker.sock}:/var/run/docker.sock:ro
|
||||
networks:
|
||||
- cameleer
|
||||
- cameleer-traefik
|
||||
|
||||
cameleer-postgres:
|
||||
image: ${POSTGRES_IMAGE:-gitea.siegeln.net/cameleer/cameleer-postgres}:${VERSION:-latest}
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_DB: cameleer_saas
|
||||
POSTGRES_USER: ${POSTGRES_USER:-cameleer}
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
volumes:
|
||||
- cameleer-pgdata:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U $${POSTGRES_USER:-cameleer} -d cameleer_saas"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
networks:
|
||||
- cameleer
|
||||
|
||||
cameleer-clickhouse:
|
||||
image: ${CLICKHOUSE_IMAGE:-gitea.siegeln.net/cameleer/cameleer-clickhouse}:${VERSION:-latest}
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
CLICKHOUSE_PASSWORD: ${CLICKHOUSE_PASSWORD}
|
||||
volumes:
|
||||
- cameleer-chdata:/var/lib/clickhouse
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "clickhouse-client --password $${CLICKHOUSE_PASSWORD} --query 'SELECT 1'"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
networks:
|
||||
- cameleer
|
||||
|
||||
cameleer-logto:
|
||||
image: ${LOGTO_IMAGE:-gitea.siegeln.net/cameleer/cameleer-logto}:${VERSION:-latest}
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
cameleer-postgres:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
DB_URL: postgres://${POSTGRES_USER:-cameleer}:${POSTGRES_PASSWORD}@cameleer-postgres:5432/logto
|
||||
ENDPOINT: ${PUBLIC_PROTOCOL:-https}://${PUBLIC_HOST:-localhost}
|
||||
ADMIN_ENDPOINT: ${PUBLIC_PROTOCOL:-https}://${PUBLIC_HOST:-localhost}:${LOGTO_CONSOLE_PORT:-3002}
|
||||
TRUST_PROXY_HEADER: 1
|
||||
NODE_TLS_REJECT_UNAUTHORIZED: "${NODE_TLS_REJECT:-0}"
|
||||
LOGTO_ENDPOINT: http://cameleer-logto:3001
|
||||
LOGTO_ADMIN_ENDPOINT: http://cameleer-logto:3002
|
||||
LOGTO_PUBLIC_ENDPOINT: ${PUBLIC_PROTOCOL:-https}://${PUBLIC_HOST:-localhost}
|
||||
PUBLIC_HOST: ${PUBLIC_HOST:-localhost}
|
||||
PUBLIC_PROTOCOL: ${PUBLIC_PROTOCOL:-https}
|
||||
PG_HOST: cameleer-postgres
|
||||
PG_USER: ${POSTGRES_USER:-cameleer}
|
||||
PG_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
PG_DB_SAAS: cameleer_saas
|
||||
SAAS_ADMIN_USER: ${SAAS_ADMIN_USER:-admin}
|
||||
SAAS_ADMIN_PASS: ${SAAS_ADMIN_PASS:-admin}
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "node -e \"require('http').get('http://localhost:3001/oidc/.well-known/openid-configuration', r => process.exit(r.statusCode === 200 ? 0 : 1)).on('error', () => process.exit(1))\" && test -f /data/logto-bootstrap.json"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 60
|
||||
start_period: 30s
|
||||
labels:
|
||||
- traefik.enable=true
|
||||
- traefik.http.routers.cameleer-logto.rule=PathPrefix(`/`)
|
||||
- traefik.http.routers.cameleer-logto.priority=1
|
||||
- traefik.http.routers.cameleer-logto.entrypoints=websecure
|
||||
- traefik.http.routers.cameleer-logto.tls=true
|
||||
- traefik.http.routers.cameleer-logto.service=cameleer-logto
|
||||
- traefik.http.routers.cameleer-logto.middlewares=cameleer-logto-cors
|
||||
- "traefik.http.middlewares.cameleer-logto-cors.headers.accessControlAllowOriginList=${PUBLIC_PROTOCOL:-https}://${PUBLIC_HOST:-localhost}:${LOGTO_CONSOLE_PORT:-3002}"
|
||||
- traefik.http.middlewares.cameleer-logto-cors.headers.accessControlAllowMethods=GET,POST,PUT,PATCH,DELETE,OPTIONS
|
||||
- traefik.http.middlewares.cameleer-logto-cors.headers.accessControlAllowHeaders=Authorization,Content-Type
|
||||
- traefik.http.middlewares.cameleer-logto-cors.headers.accessControlAllowCredentials=true
|
||||
- traefik.http.services.cameleer-logto.loadbalancer.server.port=3001
|
||||
- traefik.http.routers.cameleer-logto-console.rule=PathPrefix(`/`)
|
||||
- traefik.http.routers.cameleer-logto-console.entrypoints=admin-console
|
||||
- traefik.http.routers.cameleer-logto-console.tls=true
|
||||
- traefik.http.routers.cameleer-logto-console.service=cameleer-logto-console
|
||||
- traefik.http.services.cameleer-logto-console.loadbalancer.server.port=3002
|
||||
volumes:
|
||||
- cameleer-bootstrapdata:/data
|
||||
networks:
|
||||
- cameleer
|
||||
|
||||
cameleer-saas:
|
||||
image: ${CAMELEER_IMAGE:-gitea.siegeln.net/cameleer/cameleer-saas}:${VERSION:-latest}
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
cameleer-logto:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
# SaaS database
|
||||
SPRING_DATASOURCE_URL: jdbc:postgresql://cameleer-postgres:5432/cameleer_saas
|
||||
SPRING_DATASOURCE_USERNAME: ${POSTGRES_USER:-cameleer}
|
||||
SPRING_DATASOURCE_PASSWORD: ${POSTGRES_PASSWORD}
|
||||
# Identity (Logto)
|
||||
CAMELEER_SAAS_IDENTITY_LOGTOENDPOINT: http://cameleer-logto:3001
|
||||
CAMELEER_SAAS_IDENTITY_LOGTOPUBLICENDPOINT: ${PUBLIC_PROTOCOL:-https}://${PUBLIC_HOST:-localhost}
|
||||
# Provisioning — passed to per-tenant server containers
|
||||
CAMELEER_SAAS_PROVISIONING_PUBLICHOST: ${PUBLIC_HOST:-localhost}
|
||||
CAMELEER_SAAS_PROVISIONING_PUBLICPROTOCOL: ${PUBLIC_PROTOCOL:-https}
|
||||
CAMELEER_SAAS_PROVISIONING_NETWORKNAME: ${COMPOSE_PROJECT_NAME:-cameleer-saas}_cameleer
|
||||
CAMELEER_SAAS_PROVISIONING_TRAEFIKNETWORK: cameleer-traefik
|
||||
CAMELEER_SAAS_PROVISIONING_DATASOURCEUSERNAME: ${POSTGRES_USER:-cameleer}
|
||||
CAMELEER_SAAS_PROVISIONING_DATASOURCEPASSWORD: ${POSTGRES_PASSWORD}
|
||||
CAMELEER_SAAS_PROVISIONING_CLICKHOUSEPASSWORD: ${CLICKHOUSE_PASSWORD}
|
||||
CAMELEER_SAAS_PROVISIONING_SERVERIMAGE: ${CAMELEER_SAAS_PROVISIONING_SERVERIMAGE:-gitea.siegeln.net/cameleer/cameleer3-server:latest}
|
||||
CAMELEER_SAAS_PROVISIONING_SERVERUIIMAGE: ${CAMELEER_SAAS_PROVISIONING_SERVERUIIMAGE:-gitea.siegeln.net/cameleer/cameleer3-server-ui:latest}
|
||||
labels:
|
||||
- traefik.enable=true
|
||||
- traefik.http.routers.saas.rule=PathPrefix(`/platform`)
|
||||
- traefik.http.routers.saas.entrypoints=websecure
|
||||
- traefik.http.routers.saas.tls=true
|
||||
- traefik.http.services.saas.loadbalancer.server.port=8080
|
||||
volumes:
|
||||
- cameleer-bootstrapdata:/data/bootstrap:ro
|
||||
- cameleer-certs:/certs
|
||||
- ${DOCKER_SOCKET:-/var/run/docker.sock}:/var/run/docker.sock
|
||||
networks:
|
||||
- cameleer
|
||||
group_add:
|
||||
- "1001"
|
||||
|
||||
volumes:
|
||||
cameleer-pgdata:
|
||||
cameleer-chdata:
|
||||
cameleer-certs:
|
||||
cameleer-bootstrapdata:
|
||||
|
||||
networks:
|
||||
cameleer:
|
||||
driver: bridge
|
||||
cameleer-traefik:
|
||||
name: cameleer-traefik
|
||||
driver: bridge
|
||||
1812
installer/install.ps1
Normal file
1812
installer/install.ps1
Normal file
File diff suppressed because it is too large
Load Diff
1810
installer/install.sh
Normal file
1810
installer/install.sh
Normal file
File diff suppressed because it is too large
Load Diff
20
pom.xml
20
pom.xml
@@ -80,6 +80,26 @@
|
||||
<artifactId>spring-boot-starter-actuator</artifactId>
|
||||
</dependency>
|
||||
|
||||
<!-- ClickHouse JDBC (tenant data cleanup on delete) -->
|
||||
<dependency>
|
||||
<groupId>com.clickhouse</groupId>
|
||||
<artifactId>clickhouse-jdbc</artifactId>
|
||||
<version>0.9.7</version>
|
||||
<classifier>all</classifier>
|
||||
</dependency>
|
||||
|
||||
<!-- Docker Java (tenant provisioning) -->
|
||||
<dependency>
|
||||
<groupId>com.github.docker-java</groupId>
|
||||
<artifactId>docker-java-core</artifactId>
|
||||
<version>3.4.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.docker-java</groupId>
|
||||
<artifactId>docker-java-transport-zerodep</artifactId>
|
||||
<version>3.4.1</version>
|
||||
</dependency>
|
||||
|
||||
<!-- Test -->
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
|
||||
38
src/main/java/net/siegeln/cameleer/saas/audit/AuditDto.java
Normal file
38
src/main/java/net/siegeln/cameleer/saas/audit/AuditDto.java
Normal file
@@ -0,0 +1,38 @@
|
||||
package net.siegeln.cameleer.saas.audit;
|
||||
|
||||
import java.time.Instant;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
|
||||
public final class AuditDto {
|
||||
|
||||
private AuditDto() {}
|
||||
|
||||
public record AuditLogEntry(
|
||||
UUID id,
|
||||
String actorEmail,
|
||||
UUID tenantId,
|
||||
String action,
|
||||
String resource,
|
||||
String environment,
|
||||
String result,
|
||||
String sourceIp,
|
||||
Instant createdAt
|
||||
) {
|
||||
public static AuditLogEntry from(AuditEntity e) {
|
||||
return new AuditLogEntry(
|
||||
e.getId(), e.getActorEmail(), e.getTenantId(),
|
||||
e.getAction(), e.getResource(), e.getEnvironment(),
|
||||
e.getResult(), e.getSourceIp(), e.getCreatedAt()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
public record AuditLogPage(
|
||||
List<AuditLogEntry> content,
|
||||
int page,
|
||||
int size,
|
||||
long totalElements,
|
||||
int totalPages
|
||||
) {}
|
||||
}
|
||||
@@ -8,7 +8,7 @@ import java.util.List;
|
||||
import java.util.UUID;
|
||||
|
||||
@Repository
|
||||
public interface AuditRepository extends JpaRepository<AuditEntity, UUID> {
|
||||
public interface AuditRepository extends JpaRepository<AuditEntity, UUID>, AuditRepositoryCustom {
|
||||
|
||||
List<AuditEntity> findByTenantIdAndCreatedAtBetween(UUID tenantId, Instant from, Instant to);
|
||||
|
||||
|
||||
@@ -0,0 +1,14 @@
|
||||
package net.siegeln.cameleer.saas.audit;
|
||||
|
||||
import org.springframework.data.domain.Page;
|
||||
import org.springframework.data.domain.Pageable;
|
||||
|
||||
import java.time.Instant;
|
||||
import java.util.UUID;
|
||||
|
||||
public interface AuditRepositoryCustom {
|
||||
|
||||
Page<AuditDto.AuditLogEntry> findFiltered(UUID tenantId, String action, String result,
|
||||
Instant from, Instant to, String search,
|
||||
Pageable pageable);
|
||||
}
|
||||
@@ -0,0 +1,88 @@
|
||||
package net.siegeln.cameleer.saas.audit;
|
||||
|
||||
import org.springframework.data.domain.Page;
|
||||
import org.springframework.data.domain.PageImpl;
|
||||
import org.springframework.data.domain.Pageable;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.Timestamp;
|
||||
import java.time.Instant;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
|
||||
@Component
|
||||
public class AuditRepositoryImpl implements AuditRepositoryCustom {
|
||||
|
||||
private final JdbcTemplate jdbc;
|
||||
|
||||
public AuditRepositoryImpl(JdbcTemplate jdbc) {
|
||||
this.jdbc = jdbc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Page<AuditDto.AuditLogEntry> findFiltered(UUID tenantId, String action, String result,
|
||||
Instant from, Instant to, String search,
|
||||
Pageable pageable) {
|
||||
StringBuilder where = new StringBuilder("WHERE 1=1");
|
||||
List<Object> params = new ArrayList<>();
|
||||
|
||||
if (tenantId != null) {
|
||||
where.append(" AND tenant_id = ?");
|
||||
params.add(tenantId);
|
||||
}
|
||||
if (action != null && !action.isBlank()) {
|
||||
where.append(" AND action = ?");
|
||||
params.add(action);
|
||||
}
|
||||
if (result != null && !result.isBlank()) {
|
||||
where.append(" AND result = ?");
|
||||
params.add(result);
|
||||
}
|
||||
if (from != null) {
|
||||
where.append(" AND created_at >= ?");
|
||||
params.add(Timestamp.from(from));
|
||||
}
|
||||
if (to != null) {
|
||||
where.append(" AND created_at <= ?");
|
||||
params.add(Timestamp.from(to));
|
||||
}
|
||||
if (search != null && !search.isBlank()) {
|
||||
where.append(" AND (actor_email ILIKE ? OR resource ILIKE ?)");
|
||||
String like = "%" + search + "%";
|
||||
params.add(like);
|
||||
params.add(like);
|
||||
}
|
||||
|
||||
String countSql = "SELECT COUNT(*) FROM audit_log " + where;
|
||||
Long total = jdbc.queryForObject(countSql, Long.class, params.toArray());
|
||||
long totalCount = total != null ? total : 0;
|
||||
|
||||
String dataSql = "SELECT * FROM audit_log " + where
|
||||
+ " ORDER BY created_at DESC LIMIT ? OFFSET ?";
|
||||
List<Object> dataParams = new ArrayList<>(params);
|
||||
dataParams.add(pageable.getPageSize());
|
||||
dataParams.add(pageable.getOffset());
|
||||
|
||||
List<AuditDto.AuditLogEntry> items = jdbc.query(dataSql, (rs, rowNum) -> mapRow(rs), dataParams.toArray());
|
||||
return new PageImpl<>(items, pageable, totalCount);
|
||||
}
|
||||
|
||||
private AuditDto.AuditLogEntry mapRow(ResultSet rs) throws SQLException {
|
||||
Timestamp ts = rs.getTimestamp("created_at");
|
||||
return new AuditDto.AuditLogEntry(
|
||||
rs.getObject("id", UUID.class),
|
||||
rs.getString("actor_email"),
|
||||
rs.getObject("tenant_id", UUID.class),
|
||||
rs.getString("action"),
|
||||
rs.getString("resource"),
|
||||
rs.getString("environment"),
|
||||
rs.getString("result"),
|
||||
rs.getString("source_ip"),
|
||||
ts != null ? ts.toInstant() : null
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,23 +1,38 @@
|
||||
package net.siegeln.cameleer.saas.audit;
|
||||
|
||||
import net.siegeln.cameleer.saas.identity.LogtoManagementClient;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.data.domain.Page;
|
||||
import org.springframework.data.domain.Pageable;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import java.time.Instant;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
@Service
|
||||
public class AuditService {
|
||||
|
||||
private final AuditRepository auditRepository;
|
||||
private static final Logger log = LoggerFactory.getLogger(AuditService.class);
|
||||
|
||||
public AuditService(AuditRepository auditRepository) {
|
||||
private final AuditRepository auditRepository;
|
||||
private final LogtoManagementClient logtoClient;
|
||||
private final ConcurrentHashMap<String, String> userNameCache = new ConcurrentHashMap<>();
|
||||
|
||||
public AuditService(AuditRepository auditRepository, LogtoManagementClient logtoClient) {
|
||||
this.auditRepository = auditRepository;
|
||||
this.logtoClient = logtoClient;
|
||||
}
|
||||
|
||||
public void log(UUID actorId, String actorEmail, UUID tenantId,
|
||||
AuditAction action, String resource,
|
||||
String environment, String sourceIp,
|
||||
String result, Map<String, Object> metadata) {
|
||||
if (actorEmail == null && actorId != null) {
|
||||
actorEmail = resolveActorName(actorId.toString());
|
||||
}
|
||||
var entry = new AuditEntity();
|
||||
entry.setActorId(actorId);
|
||||
entry.setActorEmail(actorEmail);
|
||||
@@ -30,4 +45,29 @@ public class AuditService {
|
||||
entry.setMetadata(metadata);
|
||||
auditRepository.save(entry);
|
||||
}
|
||||
|
||||
public Page<AuditDto.AuditLogEntry> search(UUID tenantId, String action, String result,
|
||||
Instant from, Instant to, String search,
|
||||
Pageable pageable) {
|
||||
return auditRepository.findFiltered(tenantId, action, result, from, to, search, pageable);
|
||||
}
|
||||
|
||||
private String resolveActorName(String userId) {
|
||||
return userNameCache.computeIfAbsent(userId, id -> {
|
||||
try {
|
||||
var user = logtoClient.getUser(id);
|
||||
if (user == null) return id;
|
||||
var username = user.get("username");
|
||||
if (username != null && !username.toString().isBlank()) return username.toString();
|
||||
var name = user.get("name");
|
||||
if (name != null && !name.toString().isBlank()) return name.toString();
|
||||
var email = user.get("primaryEmail");
|
||||
if (email != null && !email.toString().isBlank()) return email.toString();
|
||||
return id;
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to resolve actor name for {}: {}", id, e.getMessage());
|
||||
return id;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,17 @@
|
||||
package net.siegeln.cameleer.saas.certificate;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public record CertValidationResult(
|
||||
boolean valid,
|
||||
List<String> errors,
|
||||
CertificateInfo info
|
||||
) {
|
||||
public static CertValidationResult ok(CertificateInfo info) {
|
||||
return new CertValidationResult(true, List.of(), info);
|
||||
}
|
||||
|
||||
public static CertValidationResult fail(List<String> errors) {
|
||||
return new CertValidationResult(false, errors, null);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,138 @@
|
||||
package net.siegeln.cameleer.saas.certificate;
|
||||
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.security.access.prepost.PreAuthorize;
|
||||
import org.springframework.security.core.annotation.AuthenticationPrincipal;
|
||||
import org.springframework.security.oauth2.jwt.Jwt;
|
||||
import org.springframework.web.bind.annotation.DeleteMapping;
|
||||
import org.springframework.web.bind.annotation.GetMapping;
|
||||
import org.springframework.web.bind.annotation.PostMapping;
|
||||
import org.springframework.web.bind.annotation.RequestMapping;
|
||||
import org.springframework.web.bind.annotation.RequestParam;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
import org.springframework.web.multipart.MultipartFile;
|
||||
|
||||
import java.time.Instant;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
|
||||
@RestController
|
||||
@RequestMapping("/api/vendor/certificates")
|
||||
@PreAuthorize("hasAuthority('SCOPE_platform:admin')")
|
||||
public class CertificateController {
|
||||
|
||||
private final CertificateService certificateService;
|
||||
|
||||
public CertificateController(CertificateService certificateService) {
|
||||
this.certificateService = certificateService;
|
||||
}
|
||||
|
||||
// --- Response types ---
|
||||
|
||||
public record CertificateResponse(
|
||||
UUID id, String status, String subject, String issuer,
|
||||
Instant notBefore, Instant notAfter, String fingerprint,
|
||||
boolean hasCa, boolean selfSigned, Instant activatedAt, Instant archivedAt
|
||||
) {
|
||||
public static CertificateResponse from(CertificateEntity e) {
|
||||
if (e == null) return null;
|
||||
return new CertificateResponse(
|
||||
e.getId(), e.getStatus().name(), e.getSubject(), e.getIssuer(),
|
||||
e.getNotBefore(), e.getNotAfter(), e.getFingerprint(),
|
||||
e.isHasCa(), e.isSelfSigned(), e.getActivatedAt(), e.getArchivedAt()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
public record OverviewResponse(
|
||||
CertificateResponse active,
|
||||
CertificateResponse staged,
|
||||
CertificateResponse archived,
|
||||
long staleTenantCount
|
||||
) {}
|
||||
|
||||
public record StageResponse(
|
||||
boolean valid,
|
||||
List<String> errors,
|
||||
CertificateResponse certificate
|
||||
) {}
|
||||
|
||||
// --- Endpoints ---
|
||||
|
||||
@GetMapping
|
||||
public ResponseEntity<OverviewResponse> getOverview() {
|
||||
var overview = certificateService.getOverview();
|
||||
long stale = certificateService.countStaleTenants();
|
||||
return ResponseEntity.ok(new OverviewResponse(
|
||||
CertificateResponse.from(overview.active()),
|
||||
CertificateResponse.from(overview.staged()),
|
||||
CertificateResponse.from(overview.archived()),
|
||||
stale
|
||||
));
|
||||
}
|
||||
|
||||
@PostMapping("/stage")
|
||||
public ResponseEntity<StageResponse> stage(
|
||||
@RequestParam("cert") MultipartFile certFile,
|
||||
@RequestParam("key") MultipartFile keyFile,
|
||||
@RequestParam(value = "ca", required = false) MultipartFile caFile,
|
||||
@RequestParam(value = "password", required = false) String keyPassword,
|
||||
@AuthenticationPrincipal Jwt jwt) {
|
||||
try {
|
||||
byte[] certPem = certFile.getBytes();
|
||||
byte[] keyPem = keyFile.getBytes();
|
||||
byte[] caPem = caFile != null ? caFile.getBytes() : null;
|
||||
UUID actorId = resolveActorId(jwt);
|
||||
|
||||
CertValidationResult result = certificateService.stage(certPem, keyPem, caPem, keyPassword, actorId);
|
||||
|
||||
if (!result.valid()) {
|
||||
return ResponseEntity.badRequest().body(
|
||||
new StageResponse(false, result.errors(), null));
|
||||
}
|
||||
|
||||
var overview = certificateService.getOverview();
|
||||
return ResponseEntity.ok(new StageResponse(
|
||||
true, List.of(), CertificateResponse.from(overview.staged())));
|
||||
} catch (Exception e) {
|
||||
return ResponseEntity.badRequest().body(
|
||||
new StageResponse(false, List.of(e.getMessage()), null));
|
||||
}
|
||||
}
|
||||
|
||||
@PostMapping("/activate")
|
||||
public ResponseEntity<Void> activate() {
|
||||
certificateService.activate();
|
||||
return ResponseEntity.noContent().build();
|
||||
}
|
||||
|
||||
@PostMapping("/restore")
|
||||
public ResponseEntity<Void> restore() {
|
||||
try {
|
||||
certificateService.restore();
|
||||
return ResponseEntity.noContent().build();
|
||||
} catch (IllegalStateException e) {
|
||||
return ResponseEntity.badRequest().body(null);
|
||||
}
|
||||
}
|
||||
|
||||
@DeleteMapping("/staged")
|
||||
public ResponseEntity<Void> discardStaged() {
|
||||
certificateService.discardStaged();
|
||||
return ResponseEntity.noContent().build();
|
||||
}
|
||||
|
||||
@GetMapping("/stale-tenants")
|
||||
public ResponseEntity<Map<String, Long>> staleTenants() {
|
||||
return ResponseEntity.ok(Map.of("count", certificateService.countStaleTenants()));
|
||||
}
|
||||
|
||||
private UUID resolveActorId(Jwt jwt) {
|
||||
try {
|
||||
return UUID.fromString(jwt.getSubject());
|
||||
} catch (Exception e) {
|
||||
return UUID.nameUUIDFromBytes(jwt.getSubject().getBytes());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,119 @@
|
||||
package net.siegeln.cameleer.saas.certificate;
|
||||
|
||||
import jakarta.persistence.Column;
|
||||
import jakarta.persistence.Entity;
|
||||
import jakarta.persistence.EnumType;
|
||||
import jakarta.persistence.Enumerated;
|
||||
import jakarta.persistence.GeneratedValue;
|
||||
import jakarta.persistence.GenerationType;
|
||||
import jakarta.persistence.Id;
|
||||
import jakarta.persistence.PrePersist;
|
||||
import jakarta.persistence.Table;
|
||||
|
||||
import java.time.Instant;
|
||||
import java.util.UUID;
|
||||
|
||||
@Entity
|
||||
@Table(name = "certificates")
|
||||
public class CertificateEntity {
|
||||
|
||||
public enum Status { ACTIVE, STAGED, ARCHIVED }
|
||||
|
||||
@Id
|
||||
@GeneratedValue(strategy = GenerationType.UUID)
|
||||
private UUID id;
|
||||
|
||||
@Enumerated(EnumType.STRING)
|
||||
@Column(name = "status", nullable = false, length = 10)
|
||||
private Status status;
|
||||
|
||||
@Column(name = "subject", length = 500)
|
||||
private String subject;
|
||||
|
||||
@Column(name = "issuer", length = 500)
|
||||
private String issuer;
|
||||
|
||||
@Column(name = "not_before")
|
||||
private Instant notBefore;
|
||||
|
||||
@Column(name = "not_after")
|
||||
private Instant notAfter;
|
||||
|
||||
@Column(name = "fingerprint", length = 128)
|
||||
private String fingerprint;
|
||||
|
||||
@Column(name = "has_ca", nullable = false)
|
||||
private boolean hasCa;
|
||||
|
||||
@Column(name = "self_signed", nullable = false)
|
||||
private boolean selfSigned;
|
||||
|
||||
@Column(name = "uploaded_by")
|
||||
private UUID uploadedBy;
|
||||
|
||||
@Column(name = "created_at", nullable = false, updatable = false)
|
||||
private Instant createdAt;
|
||||
|
||||
@Column(name = "activated_at")
|
||||
private Instant activatedAt;
|
||||
|
||||
@Column(name = "archived_at")
|
||||
private Instant archivedAt;
|
||||
|
||||
@PrePersist
|
||||
protected void onCreate() {
|
||||
if (createdAt == null) createdAt = Instant.now();
|
||||
}
|
||||
|
||||
// --- Getters and setters ---
|
||||
|
||||
public UUID getId() { return id; }
|
||||
|
||||
public Status getStatus() { return status; }
|
||||
public void setStatus(Status status) { this.status = status; }
|
||||
|
||||
public String getSubject() { return subject; }
|
||||
public void setSubject(String subject) { this.subject = subject; }
|
||||
|
||||
public String getIssuer() { return issuer; }
|
||||
public void setIssuer(String issuer) { this.issuer = issuer; }
|
||||
|
||||
public Instant getNotBefore() { return notBefore; }
|
||||
public void setNotBefore(Instant notBefore) { this.notBefore = notBefore; }
|
||||
|
||||
public Instant getNotAfter() { return notAfter; }
|
||||
public void setNotAfter(Instant notAfter) { this.notAfter = notAfter; }
|
||||
|
||||
public String getFingerprint() { return fingerprint; }
|
||||
public void setFingerprint(String fingerprint) { this.fingerprint = fingerprint; }
|
||||
|
||||
public boolean isHasCa() { return hasCa; }
|
||||
public void setHasCa(boolean hasCa) { this.hasCa = hasCa; }
|
||||
|
||||
public boolean isSelfSigned() { return selfSigned; }
|
||||
public void setSelfSigned(boolean selfSigned) { this.selfSigned = selfSigned; }
|
||||
|
||||
public UUID getUploadedBy() { return uploadedBy; }
|
||||
public void setUploadedBy(UUID uploadedBy) { this.uploadedBy = uploadedBy; }
|
||||
|
||||
public Instant getCreatedAt() { return createdAt; }
|
||||
|
||||
public Instant getActivatedAt() { return activatedAt; }
|
||||
public void setActivatedAt(Instant activatedAt) { this.activatedAt = activatedAt; }
|
||||
|
||||
public Instant getArchivedAt() { return archivedAt; }
|
||||
public void setArchivedAt(Instant archivedAt) { this.archivedAt = archivedAt; }
|
||||
|
||||
public static CertificateEntity fromInfo(CertificateInfo info, Status status) {
|
||||
var entity = new CertificateEntity();
|
||||
entity.setStatus(status);
|
||||
entity.setSubject(info.subject());
|
||||
entity.setIssuer(info.issuer());
|
||||
entity.setNotBefore(info.notBefore());
|
||||
entity.setNotAfter(info.notAfter());
|
||||
entity.setFingerprint(info.fingerprint());
|
||||
entity.setHasCa(info.hasCaBundle());
|
||||
entity.setSelfSigned(info.selfSigned());
|
||||
return entity;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,13 @@
|
||||
package net.siegeln.cameleer.saas.certificate;
|
||||
|
||||
import java.time.Instant;
|
||||
|
||||
public record CertificateInfo(
|
||||
String subject,
|
||||
String issuer,
|
||||
Instant notBefore,
|
||||
Instant notAfter,
|
||||
boolean hasCaBundle,
|
||||
boolean selfSigned,
|
||||
String fingerprint
|
||||
) {}
|
||||
@@ -0,0 +1,42 @@
|
||||
package net.siegeln.cameleer.saas.certificate;
|
||||
|
||||
/**
|
||||
* Provider interface for certificate file management.
|
||||
* Docker implementation writes to the certs volume.
|
||||
* K8s implementation would manage TLS Secrets.
|
||||
*/
|
||||
public interface CertificateManager {
|
||||
|
||||
boolean isAvailable();
|
||||
|
||||
/** Read metadata of the active certificate from the provider storage. */
|
||||
CertificateInfo getActive();
|
||||
|
||||
/** Read metadata of the staged certificate, or null. */
|
||||
CertificateInfo getStaged();
|
||||
|
||||
/** Read metadata of the archived certificate, or null. */
|
||||
CertificateInfo getArchived();
|
||||
|
||||
/**
|
||||
* Write cert+key+ca to staging area and validate.
|
||||
* Does NOT activate — call {@link #activate()} to promote.
|
||||
* @param keyPassword optional password for encrypted private keys (null if unencrypted)
|
||||
*/
|
||||
CertValidationResult stage(byte[] certPem, byte[] keyPem, byte[] caBundlePem, String keyPassword);
|
||||
|
||||
/** Promote staged -> active. Moves current active to archive (deleting previous archive). */
|
||||
void activate();
|
||||
|
||||
/** Swap archived <-> active. */
|
||||
void restore();
|
||||
|
||||
/** Delete staged files. */
|
||||
void discardStaged();
|
||||
|
||||
/** Generate a self-signed certificate for the given hostname and store as active. */
|
||||
void generateSelfSigned(String hostname);
|
||||
|
||||
/** Read the current CA bundle bytes, or null if none exists. */
|
||||
byte[] getCaBundle();
|
||||
}
|
||||
@@ -0,0 +1,13 @@
|
||||
package net.siegeln.cameleer.saas.certificate;
|
||||
|
||||
import org.springframework.data.jpa.repository.JpaRepository;
|
||||
|
||||
import java.util.Optional;
|
||||
import java.util.UUID;
|
||||
|
||||
public interface CertificateRepository extends JpaRepository<CertificateEntity, UUID> {
|
||||
|
||||
Optional<CertificateEntity> findByStatus(CertificateEntity.Status status);
|
||||
|
||||
void deleteByStatus(CertificateEntity.Status status);
|
||||
}
|
||||
@@ -0,0 +1,155 @@
|
||||
package net.siegeln.cameleer.saas.certificate;
|
||||
|
||||
import net.siegeln.cameleer.saas.tenant.TenantRepository;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.stereotype.Service;
|
||||
import org.springframework.transaction.annotation.Transactional;
|
||||
|
||||
import java.time.Instant;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
|
||||
@Service
|
||||
public class CertificateService {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(CertificateService.class);
|
||||
|
||||
private final CertificateManager certManager;
|
||||
private final CertificateRepository certRepository;
|
||||
private final TenantRepository tenantRepository;
|
||||
|
||||
public CertificateService(CertificateManager certManager,
|
||||
CertificateRepository certRepository,
|
||||
TenantRepository tenantRepository) {
|
||||
this.certManager = certManager;
|
||||
this.certRepository = certRepository;
|
||||
this.tenantRepository = tenantRepository;
|
||||
}
|
||||
|
||||
public record CertificateOverview(
|
||||
CertificateEntity active,
|
||||
CertificateEntity staged,
|
||||
CertificateEntity archived
|
||||
) {}
|
||||
|
||||
public CertificateOverview getOverview() {
|
||||
return new CertificateOverview(
|
||||
certRepository.findByStatus(CertificateEntity.Status.ACTIVE).orElse(null),
|
||||
certRepository.findByStatus(CertificateEntity.Status.STAGED).orElse(null),
|
||||
certRepository.findByStatus(CertificateEntity.Status.ARCHIVED).orElse(null)
|
||||
);
|
||||
}
|
||||
|
||||
@Transactional
|
||||
public CertValidationResult stage(byte[] certPem, byte[] keyPem, byte[] caBundlePem, String keyPassword, UUID actorId) {
|
||||
if (!certManager.isAvailable()) {
|
||||
return CertValidationResult.fail(List.of("Certificate management is not available"));
|
||||
}
|
||||
|
||||
// Discard any existing staged cert
|
||||
certRepository.findByStatus(CertificateEntity.Status.STAGED).ifPresent(certRepository::delete);
|
||||
|
||||
// Stage files and validate
|
||||
CertValidationResult result = certManager.stage(certPem, keyPem, caBundlePem, keyPassword);
|
||||
if (!result.valid()) {
|
||||
return result;
|
||||
}
|
||||
|
||||
// Save metadata to DB
|
||||
var entity = CertificateEntity.fromInfo(result.info(), CertificateEntity.Status.STAGED);
|
||||
entity.setUploadedBy(actorId);
|
||||
certRepository.save(entity);
|
||||
|
||||
log.info("Certificate staged by actor {}: subject={}", actorId, result.info().subject());
|
||||
return result;
|
||||
}
|
||||
|
||||
@Transactional
|
||||
public void activate() {
|
||||
var staged = certRepository.findByStatus(CertificateEntity.Status.STAGED)
|
||||
.orElseThrow(() -> new IllegalStateException("No staged certificate to activate"));
|
||||
|
||||
// File operations: delete archive files, move active -> archive, move staged -> active
|
||||
certManager.activate();
|
||||
|
||||
// DB: delete archived, active -> archived, staged -> active
|
||||
certRepository.findByStatus(CertificateEntity.Status.ARCHIVED).ifPresent(certRepository::delete);
|
||||
|
||||
certRepository.findByStatus(CertificateEntity.Status.ACTIVE).ifPresent(active -> {
|
||||
active.setStatus(CertificateEntity.Status.ARCHIVED);
|
||||
active.setArchivedAt(Instant.now());
|
||||
certRepository.save(active);
|
||||
});
|
||||
|
||||
staged.setStatus(CertificateEntity.Status.ACTIVE);
|
||||
staged.setActivatedAt(Instant.now());
|
||||
certRepository.save(staged);
|
||||
|
||||
log.info("Certificate activated: subject={}", staged.getSubject());
|
||||
}
|
||||
|
||||
@Transactional
|
||||
public void restore() {
|
||||
var archived = certRepository.findByStatus(CertificateEntity.Status.ARCHIVED)
|
||||
.orElseThrow(() -> new IllegalStateException("No archived certificate to restore"));
|
||||
|
||||
if (archived.getNotAfter() != null && archived.getNotAfter().isBefore(Instant.now())) {
|
||||
throw new IllegalStateException("Archived certificate has expired and cannot be restored");
|
||||
}
|
||||
|
||||
// File operations: swap active <-> archive
|
||||
certManager.restore();
|
||||
|
||||
// DB: swap statuses
|
||||
var active = certRepository.findByStatus(CertificateEntity.Status.ACTIVE).orElse(null);
|
||||
|
||||
archived.setStatus(CertificateEntity.Status.ACTIVE);
|
||||
archived.setActivatedAt(Instant.now());
|
||||
archived.setArchivedAt(null);
|
||||
certRepository.save(archived);
|
||||
|
||||
if (active != null) {
|
||||
active.setStatus(CertificateEntity.Status.ARCHIVED);
|
||||
active.setArchivedAt(Instant.now());
|
||||
certRepository.save(active);
|
||||
}
|
||||
|
||||
log.info("Certificate restored from archive: subject={}", archived.getSubject());
|
||||
}
|
||||
|
||||
@Transactional
|
||||
public void discardStaged() {
|
||||
certManager.discardStaged();
|
||||
certRepository.findByStatus(CertificateEntity.Status.STAGED).ifPresent(certRepository::delete);
|
||||
log.info("Staged certificate discarded");
|
||||
}
|
||||
|
||||
/**
|
||||
* Count tenants whose ca_applied_at is before the active cert's activated_at,
|
||||
* meaning they haven't picked up the latest CA bundle.
|
||||
*/
|
||||
public long countStaleTenants() {
|
||||
var active = certRepository.findByStatus(CertificateEntity.Status.ACTIVE).orElse(null);
|
||||
if (active == null || active.getActivatedAt() == null) return 0;
|
||||
if (!active.isHasCa()) return 0; // no CA bundle to propagate
|
||||
return tenantRepository.countByCaAppliedAtBeforeOrCaAppliedAtIsNull(active.getActivatedAt());
|
||||
}
|
||||
|
||||
/**
|
||||
* Seed the DB from the filesystem on startup (for bootstrap-generated certs).
|
||||
*/
|
||||
@Transactional
|
||||
public void seedFromFilesystem() {
|
||||
if (certRepository.findByStatus(CertificateEntity.Status.ACTIVE).isPresent()) {
|
||||
return; // Already seeded
|
||||
}
|
||||
CertificateInfo activeInfo = certManager.getActive();
|
||||
if (activeInfo != null) {
|
||||
var entity = CertificateEntity.fromInfo(activeInfo, CertificateEntity.Status.ACTIVE);
|
||||
entity.setActivatedAt(Instant.now());
|
||||
certRepository.save(entity);
|
||||
log.info("Seeded certificate metadata from filesystem: subject={}", activeInfo.subject());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,20 @@
|
||||
package net.siegeln.cameleer.saas.certificate;
|
||||
|
||||
import org.springframework.boot.context.event.ApplicationReadyEvent;
|
||||
import org.springframework.context.event.EventListener;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
@Component
|
||||
public class CertificateStartupListener {
|
||||
|
||||
private final CertificateService certificateService;
|
||||
|
||||
public CertificateStartupListener(CertificateService certificateService) {
|
||||
this.certificateService = certificateService;
|
||||
}
|
||||
|
||||
@EventListener(ApplicationReadyEvent.class)
|
||||
public void onReady() {
|
||||
certificateService.seedFromFilesystem();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,82 @@
|
||||
package net.siegeln.cameleer.saas.certificate;
|
||||
|
||||
import jakarta.persistence.Column;
|
||||
import jakarta.persistence.Entity;
|
||||
import jakarta.persistence.EnumType;
|
||||
import jakarta.persistence.Enumerated;
|
||||
import jakarta.persistence.GeneratedValue;
|
||||
import jakarta.persistence.GenerationType;
|
||||
import jakarta.persistence.Id;
|
||||
import jakarta.persistence.PrePersist;
|
||||
import jakarta.persistence.Table;
|
||||
|
||||
import java.time.Instant;
|
||||
import java.util.UUID;
|
||||
|
||||
@Entity
|
||||
@Table(name = "tenant_ca_certs")
|
||||
public class TenantCaCertEntity {
|
||||
|
||||
public enum Status { ACTIVE, STAGED }
|
||||
|
||||
@Id
|
||||
@GeneratedValue(strategy = GenerationType.UUID)
|
||||
private UUID id;
|
||||
|
||||
@Column(name = "tenant_id", nullable = false)
|
||||
private UUID tenantId;
|
||||
|
||||
@Enumerated(EnumType.STRING)
|
||||
@Column(name = "status", nullable = false, length = 10)
|
||||
private Status status;
|
||||
|
||||
@Column(name = "label", length = 200)
|
||||
private String label;
|
||||
|
||||
@Column(name = "subject", length = 500)
|
||||
private String subject;
|
||||
|
||||
@Column(name = "issuer", length = 500)
|
||||
private String issuer;
|
||||
|
||||
@Column(name = "fingerprint", length = 128)
|
||||
private String fingerprint;
|
||||
|
||||
@Column(name = "not_before")
|
||||
private Instant notBefore;
|
||||
|
||||
@Column(name = "not_after")
|
||||
private Instant notAfter;
|
||||
|
||||
@Column(name = "cert_pem", nullable = false, columnDefinition = "TEXT")
|
||||
private String certPem;
|
||||
|
||||
@Column(name = "created_at", nullable = false, updatable = false)
|
||||
private Instant createdAt;
|
||||
|
||||
@PrePersist
|
||||
protected void onCreate() {
|
||||
if (createdAt == null) createdAt = Instant.now();
|
||||
}
|
||||
|
||||
public UUID getId() { return id; }
|
||||
public UUID getTenantId() { return tenantId; }
|
||||
public void setTenantId(UUID tenantId) { this.tenantId = tenantId; }
|
||||
public Status getStatus() { return status; }
|
||||
public void setStatus(Status status) { this.status = status; }
|
||||
public String getLabel() { return label; }
|
||||
public void setLabel(String label) { this.label = label; }
|
||||
public String getSubject() { return subject; }
|
||||
public void setSubject(String subject) { this.subject = subject; }
|
||||
public String getIssuer() { return issuer; }
|
||||
public void setIssuer(String issuer) { this.issuer = issuer; }
|
||||
public String getFingerprint() { return fingerprint; }
|
||||
public void setFingerprint(String fingerprint) { this.fingerprint = fingerprint; }
|
||||
public Instant getNotBefore() { return notBefore; }
|
||||
public void setNotBefore(Instant notBefore) { this.notBefore = notBefore; }
|
||||
public Instant getNotAfter() { return notAfter; }
|
||||
public void setNotAfter(Instant notAfter) { this.notAfter = notAfter; }
|
||||
public String getCertPem() { return certPem; }
|
||||
public void setCertPem(String certPem) { this.certPem = certPem; }
|
||||
public Instant getCreatedAt() { return createdAt; }
|
||||
}
|
||||
@@ -0,0 +1,18 @@
|
||||
package net.siegeln.cameleer.saas.certificate;
|
||||
|
||||
import org.springframework.data.jpa.repository.JpaRepository;
|
||||
import org.springframework.data.jpa.repository.Query;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
|
||||
public interface TenantCaCertRepository extends JpaRepository<TenantCaCertEntity, UUID> {
|
||||
|
||||
List<TenantCaCertEntity> findByTenantIdOrderByCreatedAtDesc(UUID tenantId);
|
||||
|
||||
List<TenantCaCertEntity> findByTenantIdAndStatus(UUID tenantId, TenantCaCertEntity.Status status);
|
||||
|
||||
/** All active CAs across all tenants — used to rebuild the aggregated ca.pem. */
|
||||
@Query("SELECT c FROM TenantCaCertEntity c WHERE c.status = 'ACTIVE'")
|
||||
List<TenantCaCertEntity> findAllActive();
|
||||
}
|
||||
@@ -0,0 +1,196 @@
|
||||
package net.siegeln.cameleer.saas.certificate;
|
||||
|
||||
import net.siegeln.cameleer.saas.provisioning.DockerCertificateManager;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.stereotype.Service;
|
||||
import org.springframework.transaction.annotation.Transactional;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.security.MessageDigest;
|
||||
import java.security.cert.CertificateFactory;
|
||||
import java.security.cert.X509Certificate;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HexFormat;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
|
||||
@Service
|
||||
public class TenantCaCertService {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(TenantCaCertService.class);
|
||||
|
||||
private final TenantCaCertRepository caCertRepository;
|
||||
private final CertificateManager certManager;
|
||||
|
||||
public TenantCaCertService(TenantCaCertRepository caCertRepository, CertificateManager certManager) {
|
||||
this.caCertRepository = caCertRepository;
|
||||
this.certManager = certManager;
|
||||
}
|
||||
|
||||
public List<TenantCaCertEntity> listForTenant(UUID tenantId) {
|
||||
return caCertRepository.findByTenantIdOrderByCreatedAtDesc(tenantId);
|
||||
}
|
||||
|
||||
@Transactional
|
||||
public TenantCaCertEntity stage(UUID tenantId, String label, byte[] certPem) {
|
||||
// Parse and validate
|
||||
X509Certificate cert;
|
||||
try {
|
||||
var cf = CertificateFactory.getInstance("X.509");
|
||||
cert = (X509Certificate) cf.generateCertificate(new ByteArrayInputStream(certPem));
|
||||
} catch (Exception e) {
|
||||
throw new IllegalArgumentException("Invalid CA certificate PEM: " + e.getMessage());
|
||||
}
|
||||
|
||||
String fingerprint;
|
||||
try {
|
||||
fingerprint = HexFormat.ofDelimiter(":").formatHex(
|
||||
MessageDigest.getInstance("SHA-256").digest(cert.getEncoded()));
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException("Failed to compute fingerprint", e);
|
||||
}
|
||||
|
||||
var entity = new TenantCaCertEntity();
|
||||
entity.setTenantId(tenantId);
|
||||
entity.setStatus(TenantCaCertEntity.Status.STAGED);
|
||||
entity.setLabel(label);
|
||||
entity.setSubject(cert.getSubjectX500Principal().getName());
|
||||
entity.setIssuer(cert.getIssuerX500Principal().getName());
|
||||
entity.setFingerprint(fingerprint);
|
||||
entity.setNotBefore(cert.getNotBefore().toInstant());
|
||||
entity.setNotAfter(cert.getNotAfter().toInstant());
|
||||
entity.setCertPem(new String(certPem));
|
||||
|
||||
var saved = caCertRepository.save(entity);
|
||||
log.info("Staged tenant CA cert for tenant {}: subject={}", tenantId, entity.getSubject());
|
||||
return saved;
|
||||
}
|
||||
|
||||
@Transactional
|
||||
public TenantCaCertEntity activate(UUID tenantId, UUID certId) {
|
||||
var entity = caCertRepository.findById(certId)
|
||||
.orElseThrow(() -> new IllegalArgumentException("CA certificate not found"));
|
||||
if (!entity.getTenantId().equals(tenantId)) {
|
||||
throw new IllegalArgumentException("CA certificate does not belong to this tenant");
|
||||
}
|
||||
if (entity.getStatus() != TenantCaCertEntity.Status.STAGED) {
|
||||
throw new IllegalStateException("Only staged certificates can be activated");
|
||||
}
|
||||
|
||||
entity.setStatus(TenantCaCertEntity.Status.ACTIVE);
|
||||
caCertRepository.save(entity);
|
||||
|
||||
rebuildCaBundle();
|
||||
log.info("Activated tenant CA cert {} for tenant {}", certId, tenantId);
|
||||
return entity;
|
||||
}
|
||||
|
||||
@Transactional
|
||||
public void delete(UUID tenantId, UUID certId) {
|
||||
var entity = caCertRepository.findById(certId)
|
||||
.orElseThrow(() -> new IllegalArgumentException("CA certificate not found"));
|
||||
if (!entity.getTenantId().equals(tenantId)) {
|
||||
throw new IllegalArgumentException("CA certificate does not belong to this tenant");
|
||||
}
|
||||
|
||||
boolean wasActive = entity.getStatus() == TenantCaCertEntity.Status.ACTIVE;
|
||||
caCertRepository.delete(entity);
|
||||
|
||||
if (wasActive) {
|
||||
rebuildCaBundle();
|
||||
}
|
||||
log.info("Deleted tenant CA cert {} for tenant {}", certId, tenantId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Rebuild the aggregated ca.pem from all active tenant CAs + platform CA.
|
||||
* Uses the .wip atomic swap pattern via CertificateManager.
|
||||
*/
|
||||
public void rebuildCaBundle() {
|
||||
if (!certManager.isAvailable()) {
|
||||
log.warn("Certificate manager not available — skipping CA bundle rebuild");
|
||||
return;
|
||||
}
|
||||
|
||||
List<TenantCaCertEntity> allActive = caCertRepository.findAllActive();
|
||||
|
||||
// Collect all PEM certs
|
||||
var parts = new ArrayList<String>();
|
||||
|
||||
// Platform CA (from existing ca.pem staged with platform cert, if any)
|
||||
// We read the current platform cert's CA from the active cert's staged ca
|
||||
// Actually, the platform CA is managed separately by CertificateService.
|
||||
// We only aggregate tenant CAs here + whatever platform CA exists.
|
||||
byte[] existingPlatformCa = readPlatformCa();
|
||||
if (existingPlatformCa != null) {
|
||||
parts.add(new String(existingPlatformCa).trim());
|
||||
}
|
||||
|
||||
for (var cert : allActive) {
|
||||
parts.add(cert.getCertPem().trim());
|
||||
}
|
||||
|
||||
if (parts.isEmpty()) {
|
||||
// No CAs at all — remove ca.pem
|
||||
try {
|
||||
var certsDir = getCertsPath();
|
||||
if (certsDir != null) {
|
||||
java.nio.file.Files.deleteIfExists(certsDir.resolve("ca.pem"));
|
||||
log.info("Removed ca.pem — no active CA certificates");
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to remove ca.pem: {}", e.getMessage());
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
byte[] bundleBytes = String.join("\n", parts).concat("\n").getBytes();
|
||||
|
||||
// Validate the bundle is parseable
|
||||
try {
|
||||
var cf = CertificateFactory.getInstance("X.509");
|
||||
var certs = cf.generateCertificates(new ByteArrayInputStream(bundleBytes));
|
||||
if (certs.isEmpty()) {
|
||||
log.error("Rebuilt CA bundle contains no valid certificates — aborting");
|
||||
return;
|
||||
}
|
||||
log.info("CA bundle rebuilt with {} certificate(s)", certs.size());
|
||||
} catch (Exception e) {
|
||||
log.error("Rebuilt CA bundle failed validation — aborting: {}", e.getMessage());
|
||||
return;
|
||||
}
|
||||
|
||||
// Atomic write via .wip pattern
|
||||
try {
|
||||
var certsDir = getCertsPath();
|
||||
if (certsDir == null) return;
|
||||
var wipPath = certsDir.resolve("ca.wip");
|
||||
var targetPath = certsDir.resolve("ca.pem");
|
||||
java.nio.file.Files.write(wipPath, bundleBytes);
|
||||
java.nio.file.Files.move(wipPath, targetPath,
|
||||
java.nio.file.StandardCopyOption.REPLACE_EXISTING,
|
||||
java.nio.file.StandardCopyOption.ATOMIC_MOVE);
|
||||
log.info("CA bundle written to {}", targetPath);
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to write CA bundle: {}", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
/** Read the platform CA portion (uploaded with the platform cert, not tenant CAs). */
|
||||
private byte[] readPlatformCa() {
|
||||
// The platform CA is stored alongside the platform cert by CertificateService.
|
||||
// We read it from the cert manager, but we need to distinguish it from the
|
||||
// aggregated bundle. For now, we don't separate platform CA from tenant CAs
|
||||
// in the file — the rebuild always produces the full bundle.
|
||||
// Platform CA would be stored separately if vendor uploaded one with their cert.
|
||||
return null; // TODO: track platform CA separately if needed
|
||||
}
|
||||
|
||||
private java.nio.file.Path getCertsPath() {
|
||||
if (certManager instanceof DockerCertificateManager dcm) {
|
||||
return dcm.getCertsDir();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@@ -1,106 +0,0 @@
|
||||
package net.siegeln.cameleer.saas.config;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import net.siegeln.cameleer.saas.tenant.TenantEntity;
|
||||
import net.siegeln.cameleer.saas.tenant.TenantRepository;
|
||||
import net.siegeln.cameleer.saas.tenant.TenantStatus;
|
||||
import net.siegeln.cameleer.saas.tenant.Tier;
|
||||
import net.siegeln.cameleer.saas.license.LicenseEntity;
|
||||
import net.siegeln.cameleer.saas.license.LicenseRepository;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.boot.ApplicationArguments;
|
||||
import org.springframework.boot.ApplicationRunner;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import java.io.File;
|
||||
import java.time.Instant;
|
||||
import java.time.temporal.ChronoUnit;
|
||||
import java.util.Map;
|
||||
|
||||
@Component
|
||||
public class BootstrapDataSeeder implements ApplicationRunner {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(BootstrapDataSeeder.class);
|
||||
private static final String BOOTSTRAP_FILE = "/data/bootstrap/logto-bootstrap.json";
|
||||
|
||||
private final TenantRepository tenantRepository;
|
||||
private final LicenseRepository licenseRepository;
|
||||
private final ObjectMapper objectMapper = new ObjectMapper();
|
||||
|
||||
public BootstrapDataSeeder(TenantRepository tenantRepository,
|
||||
LicenseRepository licenseRepository) {
|
||||
this.tenantRepository = tenantRepository;
|
||||
this.licenseRepository = licenseRepository;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run(ApplicationArguments args) {
|
||||
File file = new File(BOOTSTRAP_FILE);
|
||||
if (!file.exists()) {
|
||||
log.info("No bootstrap file found at {} — skipping data seeding", BOOTSTRAP_FILE);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
JsonNode config = objectMapper.readTree(file);
|
||||
String orgId = getField(config, "organizationId");
|
||||
String tenantName = getField(config, "tenantName");
|
||||
String tenantSlug = getField(config, "tenantSlug");
|
||||
|
||||
if (orgId == null || tenantSlug == null) {
|
||||
log.info("Bootstrap file missing organizationId or tenantSlug — skipping");
|
||||
return;
|
||||
}
|
||||
|
||||
// Check if tenant already exists
|
||||
if (tenantRepository.existsBySlug(tenantSlug)) {
|
||||
log.info("Tenant '{}' already exists — skipping bootstrap seeding", tenantSlug);
|
||||
return;
|
||||
}
|
||||
|
||||
log.info("Seeding bootstrap tenant '{}'...", tenantSlug);
|
||||
|
||||
// Create tenant
|
||||
TenantEntity tenant = new TenantEntity();
|
||||
tenant.setName(tenantName != null ? tenantName : "Example Tenant");
|
||||
tenant.setSlug(tenantSlug);
|
||||
tenant.setTier(Tier.LOW);
|
||||
tenant.setStatus(TenantStatus.ACTIVE);
|
||||
tenant.setLogtoOrgId(orgId);
|
||||
tenant = tenantRepository.save(tenant);
|
||||
log.info("Created tenant: {} ({})", tenant.getSlug(), tenant.getId());
|
||||
|
||||
// Create license
|
||||
LicenseEntity license = new LicenseEntity();
|
||||
license.setTenantId(tenant.getId());
|
||||
license.setTier("LOW");
|
||||
license.setFeatures(Map.of(
|
||||
"topology", true,
|
||||
"lineage", false,
|
||||
"correlation", false,
|
||||
"debugger", false,
|
||||
"replay", false
|
||||
));
|
||||
license.setLimits(Map.of(
|
||||
"max_agents", 3,
|
||||
"retention_days", 7,
|
||||
"max_environments", 1
|
||||
));
|
||||
license.setIssuedAt(Instant.now());
|
||||
license.setExpiresAt(Instant.now().plus(365, ChronoUnit.DAYS));
|
||||
license.setToken("bootstrap-license");
|
||||
licenseRepository.save(license);
|
||||
log.info("Created license for tenant '{}'", tenantSlug);
|
||||
|
||||
log.info("Bootstrap data seeding complete.");
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to seed bootstrap data: {}", e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
private String getField(JsonNode node, String field) {
|
||||
return node.has(field) ? node.get(field).asText() : null;
|
||||
}
|
||||
}
|
||||
@@ -18,10 +18,10 @@ public class PublicConfigController {
|
||||
private static final Logger log = LoggerFactory.getLogger(PublicConfigController.class);
|
||||
private static final String BOOTSTRAP_FILE = "/data/bootstrap/logto-bootstrap.json";
|
||||
|
||||
@Value("${cameleer.identity.logto-public-endpoint:${cameleer.identity.logto-endpoint:}}")
|
||||
@Value("${cameleer.saas.identity.logtopublicendpoint:${cameleer.saas.identity.logtoendpoint:}}")
|
||||
private String logtoPublicEndpoint;
|
||||
|
||||
@Value("${cameleer.identity.spa-client-id:}")
|
||||
@Value("${cameleer.saas.identity.spaclientid:}")
|
||||
private String spaClientId;
|
||||
|
||||
private final ObjectMapper objectMapper = new ObjectMapper();
|
||||
|
||||
@@ -44,8 +44,11 @@ public class SecurityConfig {
|
||||
.requestMatchers("/actuator/health").permitAll()
|
||||
.requestMatchers("/api/config").permitAll()
|
||||
.requestMatchers("/", "/index.html", "/login", "/callback",
|
||||
"/vendor/**", "/tenant/**",
|
||||
"/environments/**", "/license", "/admin/**").permitAll()
|
||||
.requestMatchers("/_app/**", "/favicon.ico", "/favicon.svg", "/logo.svg", "/logo-dark.svg").permitAll()
|
||||
.requestMatchers("/api/vendor/**").hasAuthority("SCOPE_platform:admin")
|
||||
.requestMatchers("/api/tenant/**").authenticated()
|
||||
.anyRequest().authenticated()
|
||||
)
|
||||
.oauth2ResourceServer(oauth2 -> oauth2.jwt(jwt ->
|
||||
@@ -77,7 +80,7 @@ public class SecurityConfig {
|
||||
public JwtDecoder jwtDecoder(
|
||||
@Value("${spring.security.oauth2.resourceserver.jwt.jwk-set-uri}") String jwkSetUri,
|
||||
@Value("${spring.security.oauth2.resourceserver.jwt.issuer-uri:}") String issuerUri,
|
||||
@Value("${cameleer.identity.audience:}") String audience) throws Exception {
|
||||
@Value("${cameleer.saas.identity.audience:}") String audience) throws Exception {
|
||||
var jwkSource = JWKSourceBuilder.create(new URL(jwkSetUri)).build();
|
||||
var keySelector = new JWSVerificationKeySelector<SecurityContext>(
|
||||
JWSAlgorithm.ES384, jwkSource);
|
||||
|
||||
@@ -1,13 +1,16 @@
|
||||
package net.siegeln.cameleer.saas.config;
|
||||
|
||||
import org.springframework.stereotype.Controller;
|
||||
import org.springframework.web.bind.annotation.GetMapping;
|
||||
import org.springframework.web.bind.annotation.RequestMapping;
|
||||
|
||||
@Controller
|
||||
public class SpaController {
|
||||
|
||||
@GetMapping(value = {"/", "/login", "/callback", "/environments/**", "/license"})
|
||||
public String spa() {
|
||||
@RequestMapping(value = {
|
||||
"/", "/login", "/callback",
|
||||
"/vendor/**", "/tenant/**"
|
||||
})
|
||||
public String forward() {
|
||||
return "forward:/index.html";
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,7 +34,19 @@ public class TenantIsolationInterceptor implements HandlerInterceptor {
|
||||
var authentication = SecurityContextHolder.getContext().getAuthentication();
|
||||
if (!(authentication instanceof JwtAuthenticationToken jwtAuth)) return true;
|
||||
|
||||
// 1. Resolve: JWT organization_id -> TenantContext
|
||||
// Strip context-path prefix to get the application-relative path.
|
||||
// getServletPath() returns empty string in MockMvc, so use getRequestURI() minus contextPath.
|
||||
String contextPath = request.getContextPath();
|
||||
String uri = request.getRequestURI();
|
||||
String path = (contextPath != null && !contextPath.isEmpty() && uri.startsWith(contextPath))
|
||||
? uri.substring(contextPath.length()) : uri;
|
||||
|
||||
// Vendor endpoints: platform:admin already enforced by Spring Security
|
||||
if (path.startsWith("/api/vendor/")) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// 1. Resolve: JWT organization_id -> TenantContext (applies to all non-vendor paths)
|
||||
Jwt jwt = jwtAuth.getToken();
|
||||
String orgId = jwt.getClaimAsString("organization_id");
|
||||
if (orgId != null) {
|
||||
@@ -42,6 +54,15 @@ public class TenantIsolationInterceptor implements HandlerInterceptor {
|
||||
.ifPresent(tenant -> TenantContext.setTenantId(tenant.getId()));
|
||||
}
|
||||
|
||||
// Tenant portal endpoints: tenant resolved from JWT org context (no path variable)
|
||||
if (path.startsWith("/api/tenant/")) {
|
||||
if (TenantContext.getTenantId() == null) {
|
||||
response.sendError(HttpServletResponse.SC_FORBIDDEN, "No organization context");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// 2. Validate: read path variables from Spring's HandlerMapping
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, String> pathVars = (Map<String, String>) request.getAttribute(
|
||||
|
||||
@@ -16,23 +16,26 @@ public class LogtoConfig {
|
||||
private static final Logger log = LoggerFactory.getLogger(LogtoConfig.class);
|
||||
private static final String BOOTSTRAP_FILE = "/data/bootstrap/logto-bootstrap.json";
|
||||
|
||||
@Value("${cameleer.identity.logto-endpoint:}")
|
||||
@Value("${cameleer.saas.identity.logtoendpoint:}")
|
||||
private String logtoEndpoint;
|
||||
|
||||
@Value("${cameleer.identity.m2m-client-id:}")
|
||||
@Value("${cameleer.saas.identity.m2mclientid:}")
|
||||
private String m2mClientId;
|
||||
|
||||
@Value("${cameleer.identity.m2m-client-secret:}")
|
||||
@Value("${cameleer.saas.identity.m2mclientsecret:}")
|
||||
private String m2mClientSecret;
|
||||
|
||||
@Value("${cameleer.identity.server-endpoint:http://cameleer3-server:8081}")
|
||||
@Value("${cameleer.saas.identity.serverendpoint:http://cameleer3-server:8081}")
|
||||
private String serverEndpoint;
|
||||
|
||||
private String tradAppId;
|
||||
private String tradAppSecret;
|
||||
|
||||
@PostConstruct
|
||||
public void init() {
|
||||
if (isConfigured()) return;
|
||||
|
||||
// Fall back to bootstrap file for M2M credentials
|
||||
// Fall back to bootstrap file for M2M credentials + trad app
|
||||
try {
|
||||
File file = new File(BOOTSTRAP_FILE);
|
||||
if (file.exists()) {
|
||||
@@ -43,6 +46,12 @@ public class LogtoConfig {
|
||||
if ((m2mClientSecret == null || m2mClientSecret.isEmpty()) && node.has("m2mClientSecret")) {
|
||||
m2mClientSecret = node.get("m2mClientSecret").asText();
|
||||
}
|
||||
if (node.has("tradAppId")) {
|
||||
tradAppId = node.get("tradAppId").asText();
|
||||
}
|
||||
if (node.has("tradAppSecret")) {
|
||||
tradAppSecret = node.get("tradAppSecret").asText();
|
||||
}
|
||||
log.info("Loaded M2M credentials from bootstrap file");
|
||||
}
|
||||
} catch (Exception e) {
|
||||
@@ -54,6 +63,8 @@ public class LogtoConfig {
|
||||
public String getM2mClientId() { return m2mClientId; }
|
||||
public String getM2mClientSecret() { return m2mClientSecret; }
|
||||
public String getServerEndpoint() { return serverEndpoint; }
|
||||
public String getTradAppId() { return tradAppId; }
|
||||
public String getTradAppSecret() { return tradAppSecret; }
|
||||
|
||||
public boolean isConfigured() {
|
||||
return logtoEndpoint != null && !logtoEndpoint.isEmpty()
|
||||
|
||||
@@ -75,6 +75,50 @@ public class LogtoManagementClient {
|
||||
.toBodilessEntity();
|
||||
}
|
||||
|
||||
/** Add redirect URIs to a Logto application (for OIDC callback registration). */
|
||||
@SuppressWarnings("unchecked")
|
||||
public void addAppRedirectUris(String appId, List<String> redirectUris, List<String> postLogoutUris) {
|
||||
if (!isAvailable() || appId == null) return;
|
||||
try {
|
||||
String token = getAccessToken();
|
||||
// GET current app config
|
||||
var app = (Map<String, Object>) restClient.get()
|
||||
.uri(config.getLogtoEndpoint() + "/api/applications/" + appId)
|
||||
.header("Authorization", "Bearer " + token)
|
||||
.retrieve()
|
||||
.body(Map.class);
|
||||
if (app == null) return;
|
||||
|
||||
var metadata = (Map<String, Object>) app.get("oidcClientMetadata");
|
||||
if (metadata == null) return;
|
||||
|
||||
// Merge new URIs with existing
|
||||
var existingRedirects = new ArrayList<>((List<String>) metadata.getOrDefault("redirectUris", List.of()));
|
||||
var existingPostLogout = new ArrayList<>((List<String>) metadata.getOrDefault("postLogoutRedirectUris", List.of()));
|
||||
for (String uri : redirectUris) {
|
||||
if (!existingRedirects.contains(uri)) existingRedirects.add(uri);
|
||||
}
|
||||
for (String uri : postLogoutUris) {
|
||||
if (!existingPostLogout.contains(uri)) existingPostLogout.add(uri);
|
||||
}
|
||||
|
||||
// PATCH app with updated URIs
|
||||
restClient.patch()
|
||||
.uri(config.getLogtoEndpoint() + "/api/applications/" + appId)
|
||||
.header("Authorization", "Bearer " + token)
|
||||
.contentType(MediaType.APPLICATION_JSON)
|
||||
.body(Map.of("oidcClientMetadata", Map.of(
|
||||
"redirectUris", existingRedirects,
|
||||
"postLogoutRedirectUris", existingPostLogout
|
||||
)))
|
||||
.retrieve()
|
||||
.toBodilessEntity();
|
||||
log.info("Updated redirect URIs for app {}: added {}", appId, redirectUris);
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to update redirect URIs for app {}: {}", appId, e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public List<Map<String, String>> getUserOrganizations(String userId) {
|
||||
if (!isAvailable()) return List.of();
|
||||
|
||||
@@ -101,6 +145,287 @@ public class LogtoManagementClient {
|
||||
}
|
||||
}
|
||||
|
||||
/** List members of a Logto organization. */
|
||||
@SuppressWarnings("unchecked")
|
||||
public List<Map<String, Object>> listOrganizationMembers(String orgId) {
|
||||
if (!isAvailable()) return List.of();
|
||||
try {
|
||||
var resp = restClient.get()
|
||||
.uri(config.getLogtoEndpoint() + "/api/organizations/" + orgId + "/users")
|
||||
.header("Authorization", "Bearer " + getAccessToken())
|
||||
.retrieve()
|
||||
.body(List.class);
|
||||
return resp != null ? resp : List.of();
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to list org members for {}: {}", orgId, e.getMessage());
|
||||
return List.of();
|
||||
}
|
||||
}
|
||||
|
||||
/** Get roles assigned to a user within an organization. */
|
||||
@SuppressWarnings("unchecked")
|
||||
public List<Map<String, Object>> getUserOrganizationRoles(String orgId, String userId) {
|
||||
if (!isAvailable()) return List.of();
|
||||
try {
|
||||
var resp = restClient.get()
|
||||
.uri(config.getLogtoEndpoint() + "/api/organizations/" + orgId + "/users/" + userId + "/roles")
|
||||
.header("Authorization", "Bearer " + getAccessToken())
|
||||
.retrieve()
|
||||
.body(List.class);
|
||||
return resp != null ? resp : List.of();
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to get user roles: {}", e.getMessage());
|
||||
return List.of();
|
||||
}
|
||||
}
|
||||
|
||||
/** Assign a role to a user in an organization. */
|
||||
public void assignOrganizationRole(String orgId, String userId, String roleId) {
|
||||
if (!isAvailable()) return;
|
||||
try {
|
||||
restClient.post()
|
||||
.uri(config.getLogtoEndpoint() + "/api/organizations/" + orgId + "/users/" + userId + "/roles")
|
||||
.header("Authorization", "Bearer " + getAccessToken())
|
||||
.contentType(MediaType.APPLICATION_JSON)
|
||||
.body(Map.of("organizationRoleIds", List.of(roleId)))
|
||||
.retrieve()
|
||||
.toBodilessEntity();
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to assign role: {}", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
/** Remove a user from an organization. */
|
||||
public void removeUserFromOrganization(String orgId, String userId) {
|
||||
if (!isAvailable()) return;
|
||||
try {
|
||||
restClient.delete()
|
||||
.uri(config.getLogtoEndpoint() + "/api/organizations/" + orgId + "/users/" + userId)
|
||||
.header("Authorization", "Bearer " + getAccessToken())
|
||||
.retrieve()
|
||||
.toBodilessEntity();
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to remove user from org: {}", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
/** Create a user in Logto and add to organization with role. */
|
||||
@SuppressWarnings("unchecked")
|
||||
public String createAndInviteUser(String email, String orgId, String roleId) {
|
||||
if (!isAvailable()) return null;
|
||||
try {
|
||||
var userResp = (Map<String, Object>) restClient.post()
|
||||
.uri(config.getLogtoEndpoint() + "/api/users")
|
||||
.header("Authorization", "Bearer " + getAccessToken())
|
||||
.contentType(MediaType.APPLICATION_JSON)
|
||||
.body(Map.of("primaryEmail", email, "name", email.split("@")[0]))
|
||||
.retrieve()
|
||||
.body(Map.class);
|
||||
String userId = String.valueOf(userResp.get("id"));
|
||||
addUserToOrganization(orgId, userId);
|
||||
if (roleId != null) {
|
||||
assignOrganizationRole(orgId, userId, roleId);
|
||||
}
|
||||
return userId;
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to create and invite user: {}", e.getMessage());
|
||||
throw new RuntimeException("Invite failed: " + e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
/** Create a user with username/password and add to org with role. */
|
||||
@SuppressWarnings("unchecked")
|
||||
public String createUserWithPassword(String username, String password, String orgId, String roleId) {
|
||||
if (!isAvailable()) return null;
|
||||
try {
|
||||
var userResp = (Map<String, Object>) restClient.post()
|
||||
.uri(config.getLogtoEndpoint() + "/api/users")
|
||||
.header("Authorization", "Bearer " + getAccessToken())
|
||||
.contentType(MediaType.APPLICATION_JSON)
|
||||
.body(Map.of("username", username, "password", password, "name", username))
|
||||
.retrieve()
|
||||
.body(Map.class);
|
||||
String userId = String.valueOf(userResp.get("id"));
|
||||
addUserToOrganization(orgId, userId);
|
||||
if (roleId != null) {
|
||||
assignOrganizationRole(orgId, userId, roleId);
|
||||
}
|
||||
log.info("Created user '{}' and added to org {} with role {}", username, orgId, roleId);
|
||||
return userId;
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to create user '{}': {}", username, e.getMessage());
|
||||
throw new RuntimeException("User creation failed: " + e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
/** Find org role ID by name (e.g., "owner", "operator", "viewer"). */
|
||||
@SuppressWarnings("unchecked")
|
||||
public String findOrgRoleIdByName(String roleName) {
|
||||
var roles = listOrganizationRoles();
|
||||
return roles.stream()
|
||||
.filter(r -> roleName.equals(r.get("name")))
|
||||
.map(r -> String.valueOf(r.get("id")))
|
||||
.findFirst()
|
||||
.orElse(null);
|
||||
}
|
||||
|
||||
/** List available organization roles. */
|
||||
@SuppressWarnings("unchecked")
|
||||
public List<Map<String, Object>> listOrganizationRoles() {
|
||||
if (!isAvailable()) return List.of();
|
||||
try {
|
||||
var resp = restClient.get()
|
||||
.uri(config.getLogtoEndpoint() + "/api/organization-roles")
|
||||
.header("Authorization", "Bearer " + getAccessToken())
|
||||
.retrieve()
|
||||
.body(List.class);
|
||||
return resp != null ? resp : List.of();
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to list org roles: {}", e.getMessage());
|
||||
return List.of();
|
||||
}
|
||||
}
|
||||
|
||||
// --- SSO Connector Management ---
|
||||
|
||||
/** List all SSO connectors. */
|
||||
@SuppressWarnings("unchecked")
|
||||
public List<Map<String, Object>> listSsoConnectors() {
|
||||
if (!isAvailable()) return List.of();
|
||||
try {
|
||||
var resp = restClient.get()
|
||||
.uri(config.getLogtoEndpoint() + "/api/sso-connectors?page=1&page_size=100")
|
||||
.header("Authorization", "Bearer " + getAccessToken())
|
||||
.retrieve()
|
||||
.body(List.class);
|
||||
return resp != null ? resp : List.of();
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to list SSO connectors: {}", e.getMessage());
|
||||
return List.of();
|
||||
}
|
||||
}
|
||||
|
||||
/** Create an SSO connector. */
|
||||
@SuppressWarnings("unchecked")
|
||||
public Map<String, Object> createSsoConnector(String providerName, String connectorName,
|
||||
Map<String, Object> connectorConfig, List<String> domains) {
|
||||
if (!isAvailable()) return null;
|
||||
var body = new java.util.HashMap<String, Object>();
|
||||
body.put("providerName", providerName);
|
||||
body.put("connectorName", connectorName);
|
||||
if (connectorConfig != null && !connectorConfig.isEmpty()) body.put("config", connectorConfig);
|
||||
if (domains != null && !domains.isEmpty()) body.put("domains", domains);
|
||||
|
||||
return (Map<String, Object>) restClient.post()
|
||||
.uri(config.getLogtoEndpoint() + "/api/sso-connectors")
|
||||
.header("Authorization", "Bearer " + getAccessToken())
|
||||
.contentType(MediaType.APPLICATION_JSON)
|
||||
.body(body)
|
||||
.retrieve()
|
||||
.body(Map.class);
|
||||
}
|
||||
|
||||
/** Get an SSO connector by ID. */
|
||||
@SuppressWarnings("unchecked")
|
||||
public Map<String, Object> getSsoConnector(String connectorId) {
|
||||
if (!isAvailable()) return null;
|
||||
return (Map<String, Object>) restClient.get()
|
||||
.uri(config.getLogtoEndpoint() + "/api/sso-connectors/" + connectorId)
|
||||
.header("Authorization", "Bearer " + getAccessToken())
|
||||
.retrieve()
|
||||
.body(Map.class);
|
||||
}
|
||||
|
||||
/** Update an SSO connector (partial update). */
|
||||
@SuppressWarnings("unchecked")
|
||||
public Map<String, Object> updateSsoConnector(String connectorId, Map<String, Object> updates) {
|
||||
if (!isAvailable()) return null;
|
||||
return (Map<String, Object>) restClient.patch()
|
||||
.uri(config.getLogtoEndpoint() + "/api/sso-connectors/" + connectorId)
|
||||
.header("Authorization", "Bearer " + getAccessToken())
|
||||
.contentType(MediaType.APPLICATION_JSON)
|
||||
.body(updates)
|
||||
.retrieve()
|
||||
.body(Map.class);
|
||||
}
|
||||
|
||||
/** Delete an SSO connector. */
|
||||
public void deleteSsoConnector(String connectorId) {
|
||||
if (!isAvailable()) return;
|
||||
restClient.delete()
|
||||
.uri(config.getLogtoEndpoint() + "/api/sso-connectors/" + connectorId)
|
||||
.header("Authorization", "Bearer " + getAccessToken())
|
||||
.retrieve()
|
||||
.toBodilessEntity();
|
||||
}
|
||||
|
||||
/** List SSO connectors linked to an organization via JIT provisioning. */
|
||||
@SuppressWarnings("unchecked")
|
||||
public List<Map<String, Object>> getOrgJitSsoConnectors(String orgId) {
|
||||
if (!isAvailable()) return List.of();
|
||||
try {
|
||||
var resp = restClient.get()
|
||||
.uri(config.getLogtoEndpoint() + "/api/organizations/" + orgId + "/jit/sso-connectors?page=1&page_size=100")
|
||||
.header("Authorization", "Bearer " + getAccessToken())
|
||||
.retrieve()
|
||||
.body(List.class);
|
||||
return resp != null ? resp : List.of();
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to list org JIT SSO connectors for {}: {}", orgId, e.getMessage());
|
||||
return List.of();
|
||||
}
|
||||
}
|
||||
|
||||
/** Link an SSO connector to an organization for JIT provisioning. */
|
||||
public void linkSsoConnectorToOrg(String orgId, String connectorId) {
|
||||
if (!isAvailable()) return;
|
||||
restClient.post()
|
||||
.uri(config.getLogtoEndpoint() + "/api/organizations/" + orgId + "/jit/sso-connectors")
|
||||
.header("Authorization", "Bearer " + getAccessToken())
|
||||
.contentType(MediaType.APPLICATION_JSON)
|
||||
.body(Map.of("ssoConnectorIds", List.of(connectorId)))
|
||||
.retrieve()
|
||||
.toBodilessEntity();
|
||||
}
|
||||
|
||||
/** Unlink an SSO connector from an organization's JIT provisioning. */
|
||||
public void unlinkSsoConnectorFromOrg(String orgId, String connectorId) {
|
||||
if (!isAvailable()) return;
|
||||
restClient.delete()
|
||||
.uri(config.getLogtoEndpoint() + "/api/organizations/" + orgId + "/jit/sso-connectors/" + connectorId)
|
||||
.header("Authorization", "Bearer " + getAccessToken())
|
||||
.retrieve()
|
||||
.toBodilessEntity();
|
||||
}
|
||||
|
||||
/** Update a user's password. */
|
||||
public void updateUserPassword(String userId, String newPassword) {
|
||||
if (!isAvailable()) throw new IllegalStateException("Logto not configured");
|
||||
restClient.patch()
|
||||
.uri(config.getLogtoEndpoint() + "/api/users/" + userId + "/password")
|
||||
.header("Authorization", "Bearer " + getAccessToken())
|
||||
.contentType(MediaType.APPLICATION_JSON)
|
||||
.body(Map.of("password", newPassword))
|
||||
.retrieve()
|
||||
.toBodilessEntity();
|
||||
}
|
||||
|
||||
/** Get a user by ID. Returns username, primaryEmail, name. */
|
||||
@SuppressWarnings("unchecked")
|
||||
public Map<String, Object> getUser(String userId) {
|
||||
if (!isAvailable() || userId == null) return null;
|
||||
try {
|
||||
return (Map<String, Object>) restClient.get()
|
||||
.uri(config.getLogtoEndpoint() + "/api/users/" + userId)
|
||||
.header("Authorization", "Bearer " + getAccessToken())
|
||||
.retrieve()
|
||||
.body(Map.class);
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to get user {}: {}", userId, e.getMessage());
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private static final String MGMT_API_RESOURCE = "https://default.logto.app/api";
|
||||
|
||||
private synchronized String getAccessToken() {
|
||||
|
||||
@@ -72,18 +72,105 @@ public class ServerApiClient {
|
||||
.toBodilessEntity();
|
||||
}
|
||||
|
||||
/**
|
||||
* Check server health.
|
||||
*/
|
||||
/** Health check for a specific tenant's server. */
|
||||
@SuppressWarnings("unchecked")
|
||||
public Map<String, Object> getHealth(String serverEndpoint) {
|
||||
return RestClient.create(serverEndpoint)
|
||||
.get()
|
||||
.uri("/api/v1/health")
|
||||
public ServerHealthResponse getHealth(String serverEndpoint) {
|
||||
try {
|
||||
String url = serverEndpoint + "/api/v1/health";
|
||||
var resp = RestClient.create().get().uri(url)
|
||||
.header("Authorization", "Bearer " + getAccessToken())
|
||||
.header("X-Cameleer-Protocol-Version", "1")
|
||||
.retrieve()
|
||||
.body(Map.class);
|
||||
String status = resp != null ? String.valueOf(resp.get("status")) : "UNKNOWN";
|
||||
return new ServerHealthResponse("UP".equals(status), status);
|
||||
} catch (Exception e) {
|
||||
log.warn("Health check failed for {}: {}", serverEndpoint, e.getMessage());
|
||||
return new ServerHealthResponse(false, "DOWN");
|
||||
}
|
||||
}
|
||||
|
||||
/** Push OIDC configuration to a tenant's server. */
|
||||
public void pushOidcConfig(String serverEndpoint, Map<String, Object> oidcConfig) {
|
||||
try {
|
||||
RestClient.create().put()
|
||||
.uri(serverEndpoint + "/api/v1/admin/oidc")
|
||||
.header("Authorization", "Bearer " + getAccessToken())
|
||||
.header("X-Cameleer-Protocol-Version", "1")
|
||||
.header("Content-Type", "application/json")
|
||||
.body(oidcConfig)
|
||||
.retrieve()
|
||||
.toBodilessEntity();
|
||||
log.info("Pushed OIDC config to {}", serverEndpoint);
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to push OIDC config to {}: {}", serverEndpoint, e.getMessage());
|
||||
throw new RuntimeException("OIDC config push failed: " + e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
/** Get OIDC configuration from a tenant's server. */
|
||||
@SuppressWarnings("unchecked")
|
||||
public Map<String, Object> getOidcConfig(String serverEndpoint) {
|
||||
try {
|
||||
return RestClient.create().get()
|
||||
.uri(serverEndpoint + "/api/v1/admin/oidc")
|
||||
.header("Authorization", "Bearer " + getAccessToken())
|
||||
.header("X-Cameleer-Protocol-Version", "1")
|
||||
.retrieve()
|
||||
.body(Map.class);
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to get OIDC config from {}: {}", serverEndpoint, e.getMessage());
|
||||
return Map.of();
|
||||
}
|
||||
}
|
||||
|
||||
/** Fetch agent count from a tenant's server. */
|
||||
public int getAgentCount(String serverEndpoint) {
|
||||
try {
|
||||
var resp = RestClient.create().get()
|
||||
.uri(serverEndpoint + "/api/v1/agents")
|
||||
.header("Authorization", "Bearer " + getAccessToken())
|
||||
.header("X-Cameleer-Protocol-Version", "1")
|
||||
.retrieve()
|
||||
.body(java.util.List.class);
|
||||
return resp != null ? resp.size() : 0;
|
||||
} catch (Exception e) {
|
||||
log.warn("Agent count fetch failed for {}: {}", serverEndpoint, e.getMessage());
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/** Fetch environment count from a tenant's server. */
|
||||
public int getEnvironmentCount(String serverEndpoint) {
|
||||
try {
|
||||
var resp = RestClient.create().get()
|
||||
.uri(serverEndpoint + "/api/v1/admin/environments")
|
||||
.header("Authorization", "Bearer " + getAccessToken())
|
||||
.header("X-Cameleer-Protocol-Version", "1")
|
||||
.retrieve()
|
||||
.body(java.util.List.class);
|
||||
return resp != null ? resp.size() : 0;
|
||||
} catch (Exception e) {
|
||||
log.warn("Environment count fetch failed for {}: {}", serverEndpoint, e.getMessage());
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/** Reset the built-in admin password on a tenant's server. */
|
||||
public void resetServerAdminPassword(String serverEndpoint, String newPassword) {
|
||||
RestClient.create(serverEndpoint)
|
||||
.post()
|
||||
.uri("/api/v1/admin/users/user:admin/password")
|
||||
.header("Authorization", "Bearer " + getAccessToken())
|
||||
.header("X-Cameleer-Protocol-Version", "1")
|
||||
.contentType(MediaType.APPLICATION_JSON)
|
||||
.body(Map.of("password", newPassword))
|
||||
.retrieve()
|
||||
.toBodilessEntity();
|
||||
}
|
||||
|
||||
public record ServerHealthResponse(boolean healthy, String status) {}
|
||||
|
||||
private synchronized String getAccessToken() {
|
||||
if (cachedToken != null && Instant.now().isBefore(tokenExpiry.minusSeconds(60))) {
|
||||
return cachedToken;
|
||||
|
||||
@@ -52,6 +52,17 @@ public class LicenseService {
|
||||
return licenseRepository.findFirstByTenantIdAndRevokedAtIsNullOrderByCreatedAtDesc(tenantId);
|
||||
}
|
||||
|
||||
public void revokeLicense(UUID tenantId, UUID actorId) {
|
||||
licenseRepository.findFirstByTenantIdAndRevokedAtIsNullOrderByCreatedAtDesc(tenantId)
|
||||
.ifPresent(license -> {
|
||||
license.setRevokedAt(Instant.now());
|
||||
licenseRepository.save(license);
|
||||
auditService.log(actorId, null, tenantId,
|
||||
AuditAction.LICENSE_REVOKE, "license",
|
||||
null, null, null, Map.of("licenseId", license.getId().toString()));
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Verifies a license token by checking its existence and validity in the database.
|
||||
* Returns the license entity's metadata as a map if found and not expired/revoked,
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
package net.siegeln.cameleer.saas.license.dto;
|
||||
|
||||
import net.siegeln.cameleer.saas.license.LicenseEntity;
|
||||
|
||||
import java.time.Instant;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
@@ -13,4 +15,13 @@ public record LicenseResponse(
|
||||
Instant issuedAt,
|
||||
Instant expiresAt,
|
||||
String token
|
||||
) {}
|
||||
) {
|
||||
public static LicenseResponse from(LicenseEntity e) {
|
||||
return new LicenseResponse(
|
||||
e.getId(), e.getTenantId(), e.getTier(),
|
||||
e.getFeatures(), e.getLimits(),
|
||||
e.getIssuedAt(), e.getExpiresAt(),
|
||||
e.getToken()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,45 @@
|
||||
package net.siegeln.cameleer.saas.portal;
|
||||
|
||||
import net.siegeln.cameleer.saas.audit.AuditDto.AuditLogPage;
|
||||
import net.siegeln.cameleer.saas.audit.AuditService;
|
||||
import net.siegeln.cameleer.saas.config.TenantContext;
|
||||
import org.springframework.data.domain.PageRequest;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.web.bind.annotation.GetMapping;
|
||||
import org.springframework.web.bind.annotation.RequestMapping;
|
||||
import org.springframework.web.bind.annotation.RequestParam;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
|
||||
import java.time.Instant;
|
||||
import java.util.UUID;
|
||||
|
||||
@RestController
|
||||
@RequestMapping("/api/tenant/audit")
|
||||
public class TenantAuditController {
|
||||
|
||||
private final AuditService auditService;
|
||||
|
||||
public TenantAuditController(AuditService auditService) {
|
||||
this.auditService = auditService;
|
||||
}
|
||||
|
||||
@GetMapping
|
||||
public ResponseEntity<AuditLogPage> list(
|
||||
@RequestParam(required = false) String action,
|
||||
@RequestParam(required = false) String result,
|
||||
@RequestParam(required = false) String search,
|
||||
@RequestParam(required = false) Instant from,
|
||||
@RequestParam(required = false) Instant to,
|
||||
@RequestParam(defaultValue = "0") int page,
|
||||
@RequestParam(defaultValue = "25") int size) {
|
||||
|
||||
UUID tenantId = TenantContext.getTenantId();
|
||||
size = Math.min(size, 100);
|
||||
var pageResult = auditService.search(tenantId, action, result, from, to, search,
|
||||
PageRequest.of(page, size));
|
||||
|
||||
return ResponseEntity.ok(new AuditLogPage(
|
||||
pageResult.getContent(), pageResult.getNumber(), pageResult.getSize(),
|
||||
pageResult.getTotalElements(), pageResult.getTotalPages()));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,195 @@
|
||||
package net.siegeln.cameleer.saas.portal;
|
||||
|
||||
import net.siegeln.cameleer.saas.certificate.TenantCaCertEntity;
|
||||
import net.siegeln.cameleer.saas.certificate.TenantCaCertService;
|
||||
import net.siegeln.cameleer.saas.config.TenantContext;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.security.core.annotation.AuthenticationPrincipal;
|
||||
import org.springframework.security.oauth2.jwt.Jwt;
|
||||
import org.springframework.web.bind.annotation.DeleteMapping;
|
||||
import org.springframework.web.bind.annotation.GetMapping;
|
||||
import org.springframework.web.bind.annotation.PatchMapping;
|
||||
import org.springframework.web.bind.annotation.PathVariable;
|
||||
import org.springframework.web.bind.annotation.PostMapping;
|
||||
import org.springframework.web.bind.annotation.PutMapping;
|
||||
import org.springframework.web.bind.annotation.RequestBody;
|
||||
import org.springframework.web.bind.annotation.RequestMapping;
|
||||
import org.springframework.web.bind.annotation.RequestParam;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
import org.springframework.web.multipart.MultipartFile;
|
||||
|
||||
import java.time.Instant;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
|
||||
@RestController
|
||||
@RequestMapping("/api/tenant")
|
||||
public class TenantPortalController {
|
||||
|
||||
private final TenantPortalService portalService;
|
||||
private final TenantCaCertService caCertService;
|
||||
|
||||
public TenantPortalController(TenantPortalService portalService, TenantCaCertService caCertService) {
|
||||
this.portalService = portalService;
|
||||
this.caCertService = caCertService;
|
||||
}
|
||||
|
||||
// --- Request bodies ---
|
||||
|
||||
public record InviteRequest(String email, String roleId) {}
|
||||
|
||||
public record RoleChangeRequest(String roleId) {}
|
||||
|
||||
public record PasswordChangeRequest(String password) {}
|
||||
|
||||
// --- Endpoints ---
|
||||
|
||||
@GetMapping("/dashboard")
|
||||
public ResponseEntity<TenantPortalService.DashboardData> getDashboard() {
|
||||
return ResponseEntity.ok(portalService.getDashboard());
|
||||
}
|
||||
|
||||
@GetMapping("/license")
|
||||
public ResponseEntity<TenantPortalService.LicenseData> getLicense() {
|
||||
var license = portalService.getLicense();
|
||||
if (license == null) {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
return ResponseEntity.ok(license);
|
||||
}
|
||||
|
||||
@GetMapping("/team")
|
||||
public ResponseEntity<List<Map<String, Object>>> listTeamMembers() {
|
||||
return ResponseEntity.ok(portalService.listTeamMembers());
|
||||
}
|
||||
|
||||
@PostMapping("/team/invite")
|
||||
public ResponseEntity<Map<String, String>> inviteTeamMember(@RequestBody InviteRequest body) {
|
||||
String userId = portalService.inviteTeamMember(body.email(), body.roleId());
|
||||
return ResponseEntity.ok(Map.of("userId", userId != null ? userId : ""));
|
||||
}
|
||||
|
||||
@DeleteMapping("/team/{userId}")
|
||||
public ResponseEntity<Void> removeTeamMember(@PathVariable String userId) {
|
||||
portalService.removeTeamMember(userId);
|
||||
return ResponseEntity.noContent().build();
|
||||
}
|
||||
|
||||
@PatchMapping("/team/{userId}/role")
|
||||
public ResponseEntity<Void> changeTeamMemberRole(@PathVariable String userId,
|
||||
@RequestBody RoleChangeRequest body) {
|
||||
portalService.changeTeamMemberRole(userId, body.roleId());
|
||||
return ResponseEntity.ok().build();
|
||||
}
|
||||
|
||||
@PostMapping("/server/admin-password")
|
||||
public ResponseEntity<Void> resetServerAdminPassword(@RequestBody PasswordChangeRequest body) {
|
||||
try {
|
||||
portalService.resetServerAdminPassword(body.password());
|
||||
return ResponseEntity.noContent().build();
|
||||
} catch (IllegalArgumentException e) {
|
||||
return ResponseEntity.badRequest().build();
|
||||
} catch (IllegalStateException e) {
|
||||
return ResponseEntity.badRequest().build();
|
||||
}
|
||||
}
|
||||
|
||||
@PostMapping("/password")
|
||||
public ResponseEntity<Void> changeOwnPassword(@AuthenticationPrincipal Jwt jwt,
|
||||
@RequestBody PasswordChangeRequest body) {
|
||||
try {
|
||||
portalService.changePassword(jwt.getSubject(), body.password());
|
||||
return ResponseEntity.noContent().build();
|
||||
} catch (IllegalArgumentException e) {
|
||||
return ResponseEntity.badRequest().build();
|
||||
}
|
||||
}
|
||||
|
||||
@PostMapping("/team/{userId}/password")
|
||||
public ResponseEntity<Void> resetTeamMemberPassword(@PathVariable String userId,
|
||||
@RequestBody PasswordChangeRequest body) {
|
||||
try {
|
||||
portalService.resetTeamMemberPassword(userId, body.password());
|
||||
return ResponseEntity.noContent().build();
|
||||
} catch (IllegalArgumentException e) {
|
||||
return ResponseEntity.badRequest().build();
|
||||
}
|
||||
}
|
||||
|
||||
@PostMapping("/server/restart")
|
||||
public ResponseEntity<Void> restartServer() {
|
||||
portalService.restartServer();
|
||||
return ResponseEntity.noContent().build();
|
||||
}
|
||||
|
||||
@PostMapping("/server/upgrade")
|
||||
public ResponseEntity<Void> upgradeServer() {
|
||||
portalService.upgradeServer();
|
||||
return ResponseEntity.noContent().build();
|
||||
}
|
||||
|
||||
@GetMapping("/settings")
|
||||
public ResponseEntity<TenantPortalService.TenantSettingsData> getSettings() {
|
||||
return ResponseEntity.ok(portalService.getSettings());
|
||||
}
|
||||
|
||||
// --- CA Certificate management ---
|
||||
|
||||
public record CaCertResponse(
|
||||
UUID id, String status, String label, String subject, String issuer,
|
||||
String fingerprint, Instant notBefore, Instant notAfter, Instant createdAt
|
||||
) {
|
||||
public static CaCertResponse from(TenantCaCertEntity e) {
|
||||
return new CaCertResponse(
|
||||
e.getId(), e.getStatus().name(), e.getLabel(), e.getSubject(), e.getIssuer(),
|
||||
e.getFingerprint(), e.getNotBefore(), e.getNotAfter(), e.getCreatedAt()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@GetMapping("/ca")
|
||||
public ResponseEntity<List<CaCertResponse>> listCaCerts() {
|
||||
UUID tenantId = TenantContext.getTenantId();
|
||||
return ResponseEntity.ok(
|
||||
caCertService.listForTenant(tenantId).stream().map(CaCertResponse::from).toList()
|
||||
);
|
||||
}
|
||||
|
||||
@PostMapping("/ca")
|
||||
public ResponseEntity<CaCertResponse> stageCaCert(
|
||||
@RequestParam("cert") MultipartFile certFile,
|
||||
@RequestParam(value = "label", required = false) String label) {
|
||||
try {
|
||||
UUID tenantId = TenantContext.getTenantId();
|
||||
var entity = caCertService.stage(tenantId, label, certFile.getBytes());
|
||||
return ResponseEntity.ok(CaCertResponse.from(entity));
|
||||
} catch (IllegalArgumentException e) {
|
||||
return ResponseEntity.badRequest().build();
|
||||
} catch (Exception e) {
|
||||
return ResponseEntity.internalServerError().build();
|
||||
}
|
||||
}
|
||||
|
||||
@PostMapping("/ca/{id}/activate")
|
||||
public ResponseEntity<CaCertResponse> activateCaCert(@PathVariable UUID id) {
|
||||
try {
|
||||
UUID tenantId = TenantContext.getTenantId();
|
||||
var entity = caCertService.activate(tenantId, id);
|
||||
return ResponseEntity.ok(CaCertResponse.from(entity));
|
||||
} catch (IllegalArgumentException | IllegalStateException e) {
|
||||
return ResponseEntity.badRequest().build();
|
||||
}
|
||||
}
|
||||
|
||||
@DeleteMapping("/ca/{id}")
|
||||
public ResponseEntity<Void> deleteCaCert(@PathVariable UUID id) {
|
||||
try {
|
||||
UUID tenantId = TenantContext.getTenantId();
|
||||
caCertService.delete(tenantId, id);
|
||||
return ResponseEntity.noContent().build();
|
||||
} catch (IllegalArgumentException e) {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,260 @@
|
||||
package net.siegeln.cameleer.saas.portal;
|
||||
|
||||
import net.siegeln.cameleer.saas.config.TenantContext;
|
||||
import net.siegeln.cameleer.saas.identity.LogtoManagementClient;
|
||||
import net.siegeln.cameleer.saas.identity.ServerApiClient;
|
||||
import net.siegeln.cameleer.saas.license.LicenseEntity;
|
||||
import net.siegeln.cameleer.saas.license.LicenseService;
|
||||
import net.siegeln.cameleer.saas.provisioning.ProvisioningProperties;
|
||||
import net.siegeln.cameleer.saas.provisioning.TenantProvisioner;
|
||||
import net.siegeln.cameleer.saas.tenant.TenantEntity;
|
||||
import net.siegeln.cameleer.saas.tenant.TenantService;
|
||||
import net.siegeln.cameleer.saas.vendor.VendorTenantService;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.context.annotation.Lazy;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import java.time.Instant;
|
||||
import java.time.temporal.ChronoUnit;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
|
||||
@Service
|
||||
public class TenantPortalService {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(TenantPortalService.class);
|
||||
|
||||
private final TenantService tenantService;
|
||||
private final LicenseService licenseService;
|
||||
private final ServerApiClient serverApiClient;
|
||||
private final LogtoManagementClient logtoClient;
|
||||
private final TenantProvisioner tenantProvisioner;
|
||||
private final ProvisioningProperties provisioningProps;
|
||||
private final VendorTenantService vendorTenantService;
|
||||
|
||||
public TenantPortalService(TenantService tenantService,
|
||||
LicenseService licenseService,
|
||||
ServerApiClient serverApiClient,
|
||||
LogtoManagementClient logtoClient,
|
||||
TenantProvisioner tenantProvisioner,
|
||||
ProvisioningProperties provisioningProps,
|
||||
@Lazy VendorTenantService vendorTenantService) {
|
||||
this.tenantService = tenantService;
|
||||
this.licenseService = licenseService;
|
||||
this.serverApiClient = serverApiClient;
|
||||
this.logtoClient = logtoClient;
|
||||
this.tenantProvisioner = tenantProvisioner;
|
||||
this.provisioningProps = provisioningProps;
|
||||
this.vendorTenantService = vendorTenantService;
|
||||
}
|
||||
|
||||
// --- Inner records ---
|
||||
|
||||
public record DashboardData(
|
||||
String name, String slug, String tier, String status,
|
||||
boolean serverHealthy, String serverStatus, String serverEndpoint,
|
||||
String licenseTier, long licenseDaysRemaining,
|
||||
Map<String, Object> limits, Map<String, Object> features,
|
||||
int agentCount, int environmentCount
|
||||
) {}
|
||||
|
||||
public record LicenseData(
|
||||
UUID id, String tier, Map<String, Object> features, Map<String, Object> limits,
|
||||
Instant issuedAt, Instant expiresAt, String token, long daysRemaining
|
||||
) {}
|
||||
|
||||
public record TenantSettingsData(
|
||||
String name, String slug, String tier, String status,
|
||||
String serverEndpoint, Instant createdAt
|
||||
) {}
|
||||
|
||||
// --- Helpers ---
|
||||
|
||||
private TenantEntity resolveTenant() {
|
||||
UUID tenantId = TenantContext.getTenantId();
|
||||
return tenantService.getById(tenantId)
|
||||
.orElseThrow(() -> new IllegalStateException("Tenant not found: " + tenantId));
|
||||
}
|
||||
|
||||
private long daysUntil(Instant instant) {
|
||||
if (instant == null) return 0;
|
||||
long days = ChronoUnit.DAYS.between(Instant.now(), instant);
|
||||
return Math.max(0, days);
|
||||
}
|
||||
|
||||
// --- Service methods ---
|
||||
|
||||
public DashboardData getDashboard() {
|
||||
TenantEntity tenant = resolveTenant();
|
||||
String endpoint = tenant.getServerEndpoint();
|
||||
|
||||
boolean serverHealthy = false;
|
||||
String serverStatus = "NO_ENDPOINT";
|
||||
int agentCount = 0;
|
||||
int environmentCount = 0;
|
||||
if (endpoint != null && !endpoint.isBlank()) {
|
||||
var health = serverApiClient.getHealth(endpoint);
|
||||
serverHealthy = health.healthy();
|
||||
serverStatus = health.status();
|
||||
if (serverHealthy) {
|
||||
agentCount = serverApiClient.getAgentCount(endpoint);
|
||||
environmentCount = serverApiClient.getEnvironmentCount(endpoint);
|
||||
}
|
||||
}
|
||||
|
||||
String licenseTier = null;
|
||||
long licenseDaysRemaining = 0;
|
||||
Map<String, Object> limits = Map.of();
|
||||
Map<String, Object> features = Map.of();
|
||||
|
||||
var licenseOpt = licenseService.getActiveLicense(tenant.getId());
|
||||
if (licenseOpt.isPresent()) {
|
||||
LicenseEntity lic = licenseOpt.get();
|
||||
licenseTier = lic.getTier();
|
||||
licenseDaysRemaining = daysUntil(lic.getExpiresAt());
|
||||
limits = lic.getLimits() != null ? lic.getLimits() : Map.of();
|
||||
features = lic.getFeatures() != null ? lic.getFeatures() : Map.of();
|
||||
}
|
||||
|
||||
return new DashboardData(
|
||||
tenant.getName(), tenant.getSlug(),
|
||||
tenant.getTier().name(), tenant.getStatus().name(),
|
||||
serverHealthy, serverStatus, endpoint,
|
||||
licenseTier, licenseDaysRemaining,
|
||||
limits, features, agentCount, environmentCount
|
||||
);
|
||||
}
|
||||
|
||||
public LicenseData getLicense() {
|
||||
TenantEntity tenant = resolveTenant();
|
||||
return licenseService.getActiveLicense(tenant.getId())
|
||||
.map(lic -> new LicenseData(
|
||||
lic.getId(), lic.getTier(),
|
||||
lic.getFeatures() != null ? lic.getFeatures() : Map.of(),
|
||||
lic.getLimits() != null ? lic.getLimits() : Map.of(),
|
||||
lic.getIssuedAt(), lic.getExpiresAt(),
|
||||
lic.getToken(), daysUntil(lic.getExpiresAt())
|
||||
))
|
||||
.orElse(null);
|
||||
}
|
||||
|
||||
public List<Map<String, Object>> listTeamMembers() {
|
||||
TenantEntity tenant = resolveTenant();
|
||||
String orgId = tenant.getLogtoOrgId();
|
||||
if (orgId == null || orgId.isBlank()) {
|
||||
return List.of();
|
||||
}
|
||||
return logtoClient.listOrganizationMembers(orgId);
|
||||
}
|
||||
|
||||
public String inviteTeamMember(String email, String roleId) {
|
||||
TenantEntity tenant = resolveTenant();
|
||||
String orgId = tenant.getLogtoOrgId();
|
||||
if (orgId == null || orgId.isBlank()) {
|
||||
throw new IllegalStateException("Tenant has no Logto organization configured");
|
||||
}
|
||||
return logtoClient.createAndInviteUser(email, orgId, roleId);
|
||||
}
|
||||
|
||||
public void removeTeamMember(String userId) {
|
||||
TenantEntity tenant = resolveTenant();
|
||||
String orgId = tenant.getLogtoOrgId();
|
||||
if (orgId == null || orgId.isBlank()) {
|
||||
throw new IllegalStateException("Tenant has no Logto organization configured");
|
||||
}
|
||||
logtoClient.removeUserFromOrganization(orgId, userId);
|
||||
}
|
||||
|
||||
public void changeTeamMemberRole(String userId, String roleId) {
|
||||
TenantEntity tenant = resolveTenant();
|
||||
String orgId = tenant.getLogtoOrgId();
|
||||
if (orgId == null || orgId.isBlank()) {
|
||||
throw new IllegalStateException("Tenant has no Logto organization configured");
|
||||
}
|
||||
logtoClient.assignOrganizationRole(orgId, userId, roleId);
|
||||
}
|
||||
|
||||
public void resetServerAdminPassword(String newPassword) {
|
||||
TenantEntity tenant = resolveTenant();
|
||||
String endpoint = tenant.getServerEndpoint();
|
||||
if (endpoint == null || endpoint.isBlank()) {
|
||||
throw new IllegalStateException("Server not provisioned yet");
|
||||
}
|
||||
if (newPassword == null || newPassword.length() < 8) {
|
||||
throw new IllegalArgumentException("Password must be at least 8 characters");
|
||||
}
|
||||
serverApiClient.resetServerAdminPassword(endpoint, newPassword);
|
||||
}
|
||||
|
||||
public void changePassword(String userId, String newPassword) {
|
||||
if (newPassword == null || newPassword.length() < 8) {
|
||||
throw new IllegalArgumentException("Password must be at least 8 characters");
|
||||
}
|
||||
logtoClient.updateUserPassword(userId, newPassword);
|
||||
}
|
||||
|
||||
public void resetTeamMemberPassword(String userId, String newPassword) {
|
||||
TenantEntity tenant = resolveTenant();
|
||||
String orgId = tenant.getLogtoOrgId();
|
||||
if (orgId == null || orgId.isBlank()) {
|
||||
throw new IllegalStateException("Tenant has no Logto organization configured");
|
||||
}
|
||||
// Verify the target user belongs to this tenant's org
|
||||
var members = logtoClient.listOrganizationMembers(orgId);
|
||||
boolean isMember = members.stream()
|
||||
.anyMatch(m -> userId.equals(String.valueOf(m.get("id"))));
|
||||
if (!isMember) {
|
||||
throw new IllegalArgumentException("User is not a member of this organization");
|
||||
}
|
||||
if (newPassword == null || newPassword.length() < 8) {
|
||||
throw new IllegalArgumentException("Password must be at least 8 characters");
|
||||
}
|
||||
logtoClient.updateUserPassword(userId, newPassword);
|
||||
}
|
||||
|
||||
public TenantSettingsData getSettings() {
|
||||
TenantEntity tenant = resolveTenant();
|
||||
String publicEndpoint = provisioningProps.publicProtocol() + "://"
|
||||
+ provisioningProps.publicHost() + "/t/" + tenant.getSlug() + "/";
|
||||
return new TenantSettingsData(
|
||||
tenant.getName(), tenant.getSlug(),
|
||||
tenant.getTier().name(), tenant.getStatus().name(),
|
||||
publicEndpoint, tenant.getCreatedAt()
|
||||
);
|
||||
}
|
||||
|
||||
public void restartServer() {
|
||||
TenantEntity tenant = resolveTenant();
|
||||
if (!tenantProvisioner.isAvailable()) return;
|
||||
|
||||
tenantProvisioner.stop(tenant.getSlug());
|
||||
try {
|
||||
tenantProvisioner.start(tenant.getSlug());
|
||||
} catch (RuntimeException e) {
|
||||
if (e.getMessage() != null && e.getMessage().contains("re-provision required")) {
|
||||
log.info("Containers missing for '{}' — re-provisioning", tenant.getSlug());
|
||||
tenantProvisioner.remove(tenant.getSlug());
|
||||
var license = licenseService.getActiveLicense(tenant.getId()).orElse(null);
|
||||
String token = license != null ? license.getToken() : "";
|
||||
vendorTenantService.provisionAsync(
|
||||
tenant.getId(), tenant.getSlug(), tenant.getTier().name(), token, null);
|
||||
return;
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
public void upgradeServer() {
|
||||
TenantEntity tenant = resolveTenant();
|
||||
if (!tenantProvisioner.isAvailable()) return;
|
||||
|
||||
tenantProvisioner.upgrade(tenant.getSlug());
|
||||
|
||||
var license = licenseService.getActiveLicense(tenant.getId()).orElse(null);
|
||||
String token = license != null ? license.getToken() : "";
|
||||
vendorTenantService.provisionAsync(
|
||||
tenant.getId(), tenant.getSlug(), tenant.getTier().name(), token, null);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,68 @@
|
||||
package net.siegeln.cameleer.saas.portal;
|
||||
|
||||
import org.springframework.http.HttpStatus;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.web.bind.annotation.DeleteMapping;
|
||||
import org.springframework.web.bind.annotation.GetMapping;
|
||||
import org.springframework.web.bind.annotation.PatchMapping;
|
||||
import org.springframework.web.bind.annotation.PathVariable;
|
||||
import org.springframework.web.bind.annotation.PostMapping;
|
||||
import org.springframework.web.bind.annotation.RequestBody;
|
||||
import org.springframework.web.bind.annotation.RequestMapping;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@RestController
|
||||
@RequestMapping("/api/tenant/sso")
|
||||
public class TenantSsoController {
|
||||
|
||||
private final TenantSsoService ssoService;
|
||||
|
||||
public TenantSsoController(TenantSsoService ssoService) {
|
||||
this.ssoService = ssoService;
|
||||
}
|
||||
|
||||
public record CreateSsoConnectorRequest(
|
||||
String providerName,
|
||||
String connectorName,
|
||||
Map<String, Object> config,
|
||||
List<String> domains
|
||||
) {}
|
||||
|
||||
@GetMapping
|
||||
public ResponseEntity<List<Map<String, Object>>> list() {
|
||||
return ResponseEntity.ok(ssoService.listConnectors());
|
||||
}
|
||||
|
||||
@PostMapping
|
||||
public ResponseEntity<Map<String, Object>> create(@RequestBody CreateSsoConnectorRequest request) {
|
||||
var connector = ssoService.createConnector(
|
||||
request.providerName(), request.connectorName(),
|
||||
request.config(), request.domains());
|
||||
return ResponseEntity.status(HttpStatus.CREATED).body(connector);
|
||||
}
|
||||
|
||||
@GetMapping("/{connectorId}")
|
||||
public ResponseEntity<Map<String, Object>> get(@PathVariable String connectorId) {
|
||||
return ResponseEntity.ok(ssoService.getConnector(connectorId));
|
||||
}
|
||||
|
||||
@PatchMapping("/{connectorId}")
|
||||
public ResponseEntity<Map<String, Object>> update(@PathVariable String connectorId,
|
||||
@RequestBody Map<String, Object> updates) {
|
||||
return ResponseEntity.ok(ssoService.updateConnector(connectorId, updates));
|
||||
}
|
||||
|
||||
@DeleteMapping("/{connectorId}")
|
||||
public ResponseEntity<Void> delete(@PathVariable String connectorId) {
|
||||
ssoService.deleteConnector(connectorId);
|
||||
return ResponseEntity.noContent().build();
|
||||
}
|
||||
|
||||
@PostMapping("/{connectorId}/test")
|
||||
public ResponseEntity<Map<String, Object>> test(@PathVariable String connectorId) {
|
||||
return ResponseEntity.ok(ssoService.testConnector(connectorId));
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,124 @@
|
||||
package net.siegeln.cameleer.saas.portal;
|
||||
|
||||
import net.siegeln.cameleer.saas.config.TenantContext;
|
||||
import net.siegeln.cameleer.saas.identity.LogtoManagementClient;
|
||||
import net.siegeln.cameleer.saas.tenant.TenantEntity;
|
||||
import net.siegeln.cameleer.saas.tenant.TenantService;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.stereotype.Service;
|
||||
import org.springframework.web.server.ResponseStatusException;
|
||||
import org.springframework.http.HttpStatus;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
@Service
|
||||
public class TenantSsoService {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(TenantSsoService.class);
|
||||
|
||||
private final LogtoManagementClient logtoClient;
|
||||
private final TenantService tenantService;
|
||||
|
||||
public TenantSsoService(LogtoManagementClient logtoClient, TenantService tenantService) {
|
||||
this.logtoClient = logtoClient;
|
||||
this.tenantService = tenantService;
|
||||
}
|
||||
|
||||
/** List SSO connectors linked to the current tenant's organization. */
|
||||
@SuppressWarnings("unchecked")
|
||||
public List<Map<String, Object>> listConnectors() {
|
||||
String orgId = resolveOrgId();
|
||||
List<Map<String, Object>> jitConnectors = logtoClient.getOrgJitSsoConnectors(orgId);
|
||||
Set<String> linkedIds = jitConnectors.stream()
|
||||
.map(c -> String.valueOf(c.get("id")))
|
||||
.collect(Collectors.toSet());
|
||||
|
||||
if (linkedIds.isEmpty()) return List.of();
|
||||
|
||||
// Enrich with full connector details
|
||||
List<Map<String, Object>> allConnectors = logtoClient.listSsoConnectors();
|
||||
return allConnectors.stream()
|
||||
.filter(c -> linkedIds.contains(String.valueOf(c.get("id"))))
|
||||
.toList();
|
||||
}
|
||||
|
||||
/** Create an SSO connector and link it to the tenant's organization. */
|
||||
public Map<String, Object> createConnector(String providerName, String connectorName,
|
||||
Map<String, Object> config, List<String> domains) {
|
||||
String orgId = resolveOrgId();
|
||||
var connector = logtoClient.createSsoConnector(providerName, connectorName, config, domains);
|
||||
if (connector == null) {
|
||||
throw new ResponseStatusException(HttpStatus.INTERNAL_SERVER_ERROR, "Failed to create SSO connector");
|
||||
}
|
||||
String connectorId = String.valueOf(connector.get("id"));
|
||||
logtoClient.linkSsoConnectorToOrg(orgId, connectorId);
|
||||
log.info("Created SSO connector '{}' ({}) and linked to org {}", connectorName, connectorId, orgId);
|
||||
return connector;
|
||||
}
|
||||
|
||||
/** Get a single SSO connector (validates it belongs to this tenant). */
|
||||
public Map<String, Object> getConnector(String connectorId) {
|
||||
validateConnectorBelongsToTenant(connectorId);
|
||||
return logtoClient.getSsoConnector(connectorId);
|
||||
}
|
||||
|
||||
/** Update an SSO connector (validates it belongs to this tenant). */
|
||||
public Map<String, Object> updateConnector(String connectorId, Map<String, Object> updates) {
|
||||
validateConnectorBelongsToTenant(connectorId);
|
||||
return logtoClient.updateSsoConnector(connectorId, updates);
|
||||
}
|
||||
|
||||
/** Delete an SSO connector (unlinks from org and deletes). */
|
||||
public void deleteConnector(String connectorId) {
|
||||
String orgId = resolveOrgId();
|
||||
validateConnectorBelongsToTenant(connectorId);
|
||||
logtoClient.unlinkSsoConnectorFromOrg(orgId, connectorId);
|
||||
logtoClient.deleteSsoConnector(connectorId);
|
||||
log.info("Deleted SSO connector {} from org {}", connectorId, orgId);
|
||||
}
|
||||
|
||||
/** Test an SSO connector by fetching its details (validates provider metadata). */
|
||||
public Map<String, Object> testConnector(String connectorId) {
|
||||
validateConnectorBelongsToTenant(connectorId);
|
||||
var connector = logtoClient.getSsoConnector(connectorId);
|
||||
if (connector == null) {
|
||||
throw new ResponseStatusException(HttpStatus.NOT_FOUND, "Connector not found");
|
||||
}
|
||||
// Logto resolves providerConfig (OIDC discovery / SAML metadata) when fetching.
|
||||
// If providerConfig is present and non-empty, the IdP is reachable.
|
||||
@SuppressWarnings("unchecked")
|
||||
var providerConfig = (Map<String, Object>) connector.get("providerConfig");
|
||||
boolean reachable = providerConfig != null && !providerConfig.isEmpty();
|
||||
return Map.of(
|
||||
"status", reachable ? "ok" : "unreachable",
|
||||
"providerName", String.valueOf(connector.get("providerName")),
|
||||
"connectorName", String.valueOf(connector.get("connectorName"))
|
||||
);
|
||||
}
|
||||
|
||||
private String resolveOrgId() {
|
||||
UUID tenantId = TenantContext.getTenantId();
|
||||
TenantEntity tenant = tenantService.getById(tenantId)
|
||||
.orElseThrow(() -> new ResponseStatusException(HttpStatus.NOT_FOUND, "Tenant not found"));
|
||||
String orgId = tenant.getLogtoOrgId();
|
||||
if (orgId == null || orgId.isBlank()) {
|
||||
throw new ResponseStatusException(HttpStatus.PRECONDITION_FAILED, "Tenant has no Logto organization");
|
||||
}
|
||||
return orgId;
|
||||
}
|
||||
|
||||
private void validateConnectorBelongsToTenant(String connectorId) {
|
||||
String orgId = resolveOrgId();
|
||||
List<Map<String, Object>> jitConnectors = logtoClient.getOrgJitSsoConnectors(orgId);
|
||||
boolean linked = jitConnectors.stream()
|
||||
.anyMatch(c -> connectorId.equals(String.valueOf(c.get("id"))));
|
||||
if (!linked) {
|
||||
throw new ResponseStatusException(HttpStatus.FORBIDDEN, "SSO connector does not belong to this tenant");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
package net.siegeln.cameleer.saas.provisioning;
|
||||
|
||||
import net.siegeln.cameleer.saas.certificate.CertificateManager;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
|
||||
@Configuration
|
||||
public class CertificateManagerAutoConfig {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(CertificateManagerAutoConfig.class);
|
||||
|
||||
@Bean
|
||||
CertificateManager certificateManager(
|
||||
@Value("${cameleer.saas.certs.path:/certs}") String certsPath) {
|
||||
Path path = Path.of(certsPath);
|
||||
if (Files.isDirectory(path)) {
|
||||
log.info("Certs directory found at {} — enabling Docker certificate manager", certsPath);
|
||||
return new DockerCertificateManager(path);
|
||||
}
|
||||
log.info("No certs directory at {} — certificate management disabled", certsPath);
|
||||
return new DisabledCertificateManager();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,45 @@
|
||||
package net.siegeln.cameleer.saas.provisioning;
|
||||
|
||||
import net.siegeln.cameleer.saas.certificate.CertificateInfo;
|
||||
import net.siegeln.cameleer.saas.certificate.CertificateManager;
|
||||
import net.siegeln.cameleer.saas.certificate.CertValidationResult;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* No-op certificate manager when certs directory is not available.
|
||||
*/
|
||||
public class DisabledCertificateManager implements CertificateManager {
|
||||
|
||||
@Override
|
||||
public boolean isAvailable() { return false; }
|
||||
|
||||
@Override
|
||||
public CertificateInfo getActive() { return null; }
|
||||
|
||||
@Override
|
||||
public CertificateInfo getStaged() { return null; }
|
||||
|
||||
@Override
|
||||
public CertificateInfo getArchived() { return null; }
|
||||
|
||||
@Override
|
||||
public CertValidationResult stage(byte[] certPem, byte[] keyPem, byte[] caBundlePem, String keyPassword) {
|
||||
return CertValidationResult.fail(List.of("Certificate management is not available"));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void activate() {}
|
||||
|
||||
@Override
|
||||
public void restore() {}
|
||||
|
||||
@Override
|
||||
public void discardStaged() {}
|
||||
|
||||
@Override
|
||||
public void generateSelfSigned(String hostname) {}
|
||||
|
||||
@Override
|
||||
public byte[] getCaBundle() { return null; }
|
||||
}
|
||||
@@ -0,0 +1,20 @@
|
||||
package net.siegeln.cameleer.saas.provisioning;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
public class DisabledTenantProvisioner implements TenantProvisioner {
|
||||
private static final Logger log = LoggerFactory.getLogger(DisabledTenantProvisioner.class);
|
||||
|
||||
@Override public boolean isAvailable() { return false; }
|
||||
@Override public ProvisionResult provision(TenantProvisionRequest request) {
|
||||
log.warn("Provisioning disabled — no Docker socket or K8s detected");
|
||||
return ProvisionResult.fail("Provisioning not available");
|
||||
}
|
||||
@Override public void start(String slug) { log.warn("Cannot start: provisioning disabled"); }
|
||||
@Override public void stop(String slug) { log.warn("Cannot stop: provisioning disabled"); }
|
||||
@Override public void remove(String slug) { log.warn("Cannot remove: provisioning disabled"); }
|
||||
@Override public void upgrade(String slug) { log.warn("Cannot upgrade: provisioning disabled"); }
|
||||
@Override public ServerStatus getStatus(String slug) { return ServerStatus.notFound(); }
|
||||
@Override public String getServerEndpoint(String slug) { return null; }
|
||||
}
|
||||
@@ -0,0 +1,388 @@
|
||||
package net.siegeln.cameleer.saas.provisioning;
|
||||
|
||||
import net.siegeln.cameleer.saas.certificate.CertificateInfo;
|
||||
import net.siegeln.cameleer.saas.certificate.CertificateManager;
|
||||
import net.siegeln.cameleer.saas.certificate.CertValidationResult;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.security.KeyFactory;
|
||||
import java.security.MessageDigest;
|
||||
import java.security.cert.CertificateFactory;
|
||||
import java.security.cert.X509Certificate;
|
||||
import java.security.interfaces.RSAPrivateKey;
|
||||
import java.security.interfaces.RSAPublicKey;
|
||||
import java.security.spec.PKCS8EncodedKeySpec;
|
||||
import java.time.Instant;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Base64;
|
||||
import java.util.HexFormat;
|
||||
import java.util.List;
|
||||
|
||||
public class DockerCertificateManager implements CertificateManager {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(DockerCertificateManager.class);
|
||||
|
||||
private final Path certsDir;
|
||||
|
||||
public DockerCertificateManager(Path certsDir) {
|
||||
this.certsDir = certsDir;
|
||||
}
|
||||
|
||||
public Path getCertsDir() {
|
||||
return certsDir;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAvailable() {
|
||||
return Files.isDirectory(certsDir) && Files.isWritable(certsDir);
|
||||
}
|
||||
|
||||
@Override
|
||||
public CertificateInfo getActive() {
|
||||
return readCertInfo(certsDir.resolve("cert.pem"), certsDir.resolve("ca.pem"));
|
||||
}
|
||||
|
||||
@Override
|
||||
public CertificateInfo getStaged() {
|
||||
return readCertInfo(certsDir.resolve("staged/cert.pem"), certsDir.resolve("staged/ca.pem"));
|
||||
}
|
||||
|
||||
@Override
|
||||
public CertificateInfo getArchived() {
|
||||
return readCertInfo(certsDir.resolve("prev/cert.pem"), certsDir.resolve("prev/ca.pem"));
|
||||
}
|
||||
|
||||
@Override
|
||||
public CertValidationResult stage(byte[] certPem, byte[] keyPem, byte[] caBundlePem, String keyPassword) {
|
||||
List<String> errors = new ArrayList<>();
|
||||
|
||||
// Parse certificate
|
||||
X509Certificate cert;
|
||||
try {
|
||||
cert = parseCertificate(certPem);
|
||||
} catch (Exception e) {
|
||||
errors.add("Invalid certificate PEM: " + e.getMessage());
|
||||
return CertValidationResult.fail(errors);
|
||||
}
|
||||
|
||||
// Parse private key (may be password-protected)
|
||||
java.security.PrivateKey privateKey;
|
||||
byte[] decryptedKeyPem = keyPem;
|
||||
try {
|
||||
privateKey = parsePrivateKey(keyPem, keyPassword);
|
||||
// Re-encode as unencrypted PKCS8 PEM for Traefik (which needs cleartext)
|
||||
if (isEncryptedKey(keyPem)) {
|
||||
decryptedKeyPem = encodePrivateKeyPem(privateKey);
|
||||
}
|
||||
// Verify key matches cert
|
||||
if (cert.getPublicKey() instanceof RSAPublicKey rsaPub
|
||||
&& privateKey instanceof RSAPrivateKey rsaPriv) {
|
||||
if (!rsaPub.getModulus().equals(rsaPriv.getModulus())) {
|
||||
errors.add("Private key does not match certificate");
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
errors.add("Invalid private key PEM: " + e.getMessage());
|
||||
}
|
||||
|
||||
// Parse CA bundle if provided
|
||||
if (caBundlePem != null && caBundlePem.length > 0) {
|
||||
try {
|
||||
var certs = parseCertificates(caBundlePem);
|
||||
if (certs.isEmpty()) {
|
||||
errors.add("CA bundle contains no valid certificates");
|
||||
}
|
||||
} catch (Exception e) {
|
||||
errors.add("Invalid CA bundle PEM: " + e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
if (!errors.isEmpty()) {
|
||||
return CertValidationResult.fail(errors);
|
||||
}
|
||||
|
||||
// Write to staged directory
|
||||
try {
|
||||
Path stagedDir = certsDir.resolve("staged");
|
||||
Files.createDirectories(stagedDir);
|
||||
|
||||
writeAtomic(stagedDir.resolve("cert.pem"), certPem);
|
||||
writeAtomicRestricted(stagedDir.resolve("key.pem"), decryptedKeyPem);
|
||||
if (caBundlePem != null && caBundlePem.length > 0) {
|
||||
writeAtomic(stagedDir.resolve("ca.pem"), caBundlePem);
|
||||
} else {
|
||||
Files.deleteIfExists(stagedDir.resolve("ca.pem"));
|
||||
}
|
||||
} catch (IOException e) {
|
||||
log.error("Failed to write staged certificate files", e);
|
||||
return CertValidationResult.fail(List.of("Failed to write staged files: " + e.getMessage()));
|
||||
}
|
||||
|
||||
var info = toCertInfo(cert, caBundlePem != null && caBundlePem.length > 0, false);
|
||||
return CertValidationResult.ok(info);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void activate() {
|
||||
try {
|
||||
Path stagedDir = certsDir.resolve("staged");
|
||||
Path prevDir = certsDir.resolve("prev");
|
||||
|
||||
if (!Files.exists(stagedDir.resolve("cert.pem"))) {
|
||||
throw new IllegalStateException("No staged certificate to activate");
|
||||
}
|
||||
|
||||
// Delete existing archive
|
||||
deleteDirectory(prevDir);
|
||||
|
||||
// Move current active -> prev (archive)
|
||||
if (Files.exists(certsDir.resolve("cert.pem"))) {
|
||||
Files.createDirectories(prevDir);
|
||||
moveFile(certsDir.resolve("cert.pem"), prevDir.resolve("cert.pem"));
|
||||
moveFile(certsDir.resolve("key.pem"), prevDir.resolve("key.pem"));
|
||||
if (Files.exists(certsDir.resolve("ca.pem"))) {
|
||||
Files.copy(certsDir.resolve("ca.pem"), prevDir.resolve("ca.pem"));
|
||||
}
|
||||
}
|
||||
|
||||
// Move staged -> active (atomic swap via .wip)
|
||||
writeAtomic(certsDir.resolve("cert.pem"), Files.readAllBytes(stagedDir.resolve("cert.pem")));
|
||||
writeAtomicRestricted(certsDir.resolve("key.pem"), Files.readAllBytes(stagedDir.resolve("key.pem")));
|
||||
if (Files.exists(stagedDir.resolve("ca.pem"))) {
|
||||
writeAtomic(certsDir.resolve("ca.pem"), Files.readAllBytes(stagedDir.resolve("ca.pem")));
|
||||
}
|
||||
|
||||
// Clean up staged
|
||||
deleteDirectory(stagedDir);
|
||||
log.info("Certificate activated successfully");
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException("Failed to activate certificate", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void restore() {
|
||||
try {
|
||||
Path prevDir = certsDir.resolve("prev");
|
||||
if (!Files.exists(prevDir.resolve("cert.pem"))) {
|
||||
throw new IllegalStateException("No archived certificate to restore");
|
||||
}
|
||||
|
||||
// Swap: active <-> prev using a temp dir
|
||||
Path tempDir = certsDir.resolve("swap-tmp");
|
||||
Files.createDirectories(tempDir);
|
||||
|
||||
// Current active -> temp
|
||||
moveFile(certsDir.resolve("cert.pem"), tempDir.resolve("cert.pem"));
|
||||
moveFile(certsDir.resolve("key.pem"), tempDir.resolve("key.pem"));
|
||||
if (Files.exists(certsDir.resolve("ca.pem"))) {
|
||||
moveFile(certsDir.resolve("ca.pem"), tempDir.resolve("ca.pem"));
|
||||
}
|
||||
|
||||
// Prev -> active
|
||||
writeAtomic(certsDir.resolve("cert.pem"), Files.readAllBytes(prevDir.resolve("cert.pem")));
|
||||
writeAtomicRestricted(certsDir.resolve("key.pem"), Files.readAllBytes(prevDir.resolve("key.pem")));
|
||||
if (Files.exists(prevDir.resolve("ca.pem"))) {
|
||||
writeAtomic(certsDir.resolve("ca.pem"), Files.readAllBytes(prevDir.resolve("ca.pem")));
|
||||
} else {
|
||||
Files.deleteIfExists(certsDir.resolve("ca.pem"));
|
||||
}
|
||||
|
||||
// Temp -> prev
|
||||
deleteDirectory(prevDir);
|
||||
Files.createDirectories(prevDir);
|
||||
moveFile(tempDir.resolve("cert.pem"), prevDir.resolve("cert.pem"));
|
||||
moveFile(tempDir.resolve("key.pem"), prevDir.resolve("key.pem"));
|
||||
if (Files.exists(tempDir.resolve("ca.pem"))) {
|
||||
moveFile(tempDir.resolve("ca.pem"), prevDir.resolve("ca.pem"));
|
||||
}
|
||||
|
||||
deleteDirectory(tempDir);
|
||||
log.info("Certificate restored from archive");
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException("Failed to restore certificate", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void discardStaged() {
|
||||
try {
|
||||
deleteDirectory(certsDir.resolve("staged"));
|
||||
log.info("Staged certificate discarded");
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException("Failed to discard staged certificate", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void generateSelfSigned(String hostname) {
|
||||
try {
|
||||
// Use keytool to generate a self-signed cert, then export to PEM
|
||||
// This is a fallback — the init container normally handles this
|
||||
log.warn("generateSelfSigned called on DockerCertificateManager — " +
|
||||
"this is typically handled by the traefik-certs init container");
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException("Failed to generate self-signed certificate", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getCaBundle() {
|
||||
try {
|
||||
Path caPath = certsDir.resolve("ca.pem");
|
||||
return Files.exists(caPath) ? Files.readAllBytes(caPath) : null;
|
||||
} catch (IOException e) {
|
||||
log.warn("Failed to read CA bundle: {}", e.getMessage());
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// --- Internal helpers ---
|
||||
|
||||
private CertificateInfo readCertInfo(Path certPath, Path caPath) {
|
||||
if (!Files.exists(certPath)) return null;
|
||||
try {
|
||||
byte[] certBytes = Files.readAllBytes(certPath);
|
||||
X509Certificate cert = parseCertificate(certBytes);
|
||||
boolean hasCa = Files.exists(caPath);
|
||||
boolean selfSigned = cert.getIssuerX500Principal().equals(cert.getSubjectX500Principal());
|
||||
return toCertInfo(cert, hasCa, selfSigned);
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to read cert info from {}: {}", certPath, e.getMessage());
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
private CertificateInfo toCertInfo(X509Certificate cert, boolean hasCa, boolean selfSigned) {
|
||||
try {
|
||||
String fingerprint = HexFormat.ofDelimiter(":").formatHex(
|
||||
MessageDigest.getInstance("SHA-256").digest(cert.getEncoded()));
|
||||
// Auto-detect self-signed
|
||||
if (cert.getIssuerX500Principal().equals(cert.getSubjectX500Principal())) {
|
||||
selfSigned = true;
|
||||
}
|
||||
return new CertificateInfo(
|
||||
cert.getSubjectX500Principal().getName(),
|
||||
cert.getIssuerX500Principal().getName(),
|
||||
cert.getNotBefore().toInstant(),
|
||||
cert.getNotAfter().toInstant(),
|
||||
hasCa,
|
||||
selfSigned,
|
||||
fingerprint
|
||||
);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException("Failed to extract cert info", e);
|
||||
}
|
||||
}
|
||||
|
||||
static X509Certificate parseCertificate(byte[] pem) throws Exception {
|
||||
CertificateFactory cf = CertificateFactory.getInstance("X.509");
|
||||
return (X509Certificate) cf.generateCertificate(new ByteArrayInputStream(pem));
|
||||
}
|
||||
|
||||
static List<X509Certificate> parseCertificates(byte[] pem) throws Exception {
|
||||
CertificateFactory cf = CertificateFactory.getInstance("X.509");
|
||||
var certs = cf.generateCertificates(new ByteArrayInputStream(pem));
|
||||
return certs.stream().map(c -> (X509Certificate) c).toList();
|
||||
}
|
||||
|
||||
static boolean isEncryptedKey(byte[] pem) {
|
||||
String s = new String(pem);
|
||||
return s.contains("ENCRYPTED PRIVATE KEY");
|
||||
}
|
||||
|
||||
static java.security.PrivateKey parsePrivateKey(byte[] pem, String password) throws Exception {
|
||||
String pemStr = new String(pem);
|
||||
|
||||
// Encrypted PKCS#8 key
|
||||
if (pemStr.contains("BEGIN ENCRYPTED PRIVATE KEY")) {
|
||||
if (password == null || password.isEmpty()) {
|
||||
throw new IllegalArgumentException("Private key is encrypted but no password was provided");
|
||||
}
|
||||
var matcher = java.util.regex.Pattern
|
||||
.compile("-----BEGIN ENCRYPTED PRIVATE KEY-----(.+?)-----END ENCRYPTED PRIVATE KEY-----",
|
||||
java.util.regex.Pattern.DOTALL)
|
||||
.matcher(pemStr);
|
||||
if (!matcher.find()) {
|
||||
throw new IllegalArgumentException("Malformed encrypted private key PEM");
|
||||
}
|
||||
String base64 = matcher.group(1).replaceAll("\\s+", "");
|
||||
byte[] decoded = Base64.getDecoder().decode(base64);
|
||||
|
||||
var encryptedInfo = new javax.crypto.EncryptedPrivateKeyInfo(decoded);
|
||||
var pbeKeySpec = new javax.crypto.spec.PBEKeySpec(password.toCharArray());
|
||||
var keyFactory = javax.crypto.SecretKeyFactory.getInstance(encryptedInfo.getAlgName());
|
||||
var secretKey = keyFactory.generateSecret(pbeKeySpec);
|
||||
|
||||
var cipher = javax.crypto.Cipher.getInstance(encryptedInfo.getAlgName());
|
||||
cipher.init(javax.crypto.Cipher.DECRYPT_MODE, secretKey, encryptedInfo.getAlgParameters());
|
||||
|
||||
var pkcs8Spec = encryptedInfo.getKeySpec(cipher);
|
||||
return KeyFactory.getInstance("RSA").generatePrivate(pkcs8Spec);
|
||||
}
|
||||
|
||||
// Unencrypted key — extract base64 between PEM markers (handles Bag Attributes etc.)
|
||||
var matcher = java.util.regex.Pattern
|
||||
.compile("-----BEGIN (?:RSA )?PRIVATE KEY-----(.+?)-----END (?:RSA )?PRIVATE KEY-----",
|
||||
java.util.regex.Pattern.DOTALL)
|
||||
.matcher(pemStr);
|
||||
if (!matcher.find()) {
|
||||
throw new IllegalArgumentException("No private key PEM block found");
|
||||
}
|
||||
String base64 = matcher.group(1).replaceAll("\\s+", "");
|
||||
byte[] decoded = Base64.getDecoder().decode(base64);
|
||||
PKCS8EncodedKeySpec spec = new PKCS8EncodedKeySpec(decoded);
|
||||
return KeyFactory.getInstance("RSA").generatePrivate(spec);
|
||||
}
|
||||
|
||||
static byte[] encodePrivateKeyPem(java.security.PrivateKey key) {
|
||||
var sb = new StringBuilder();
|
||||
sb.append("-----BEGIN PRIVATE KEY-----\n");
|
||||
sb.append(Base64.getMimeEncoder(64, "\n".getBytes()).encodeToString(key.getEncoded()));
|
||||
sb.append("\n-----END PRIVATE KEY-----\n");
|
||||
return sb.toString().getBytes();
|
||||
}
|
||||
|
||||
private void writeAtomic(Path target, byte[] data) throws IOException {
|
||||
Path wip = target.resolveSibling(target.getFileName() + ".wip");
|
||||
Files.write(wip, data);
|
||||
Files.move(wip, target, StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.ATOMIC_MOVE);
|
||||
}
|
||||
|
||||
/** Write with owner-only permissions (0600) for private key files. */
|
||||
private void writeAtomicRestricted(Path target, byte[] data) throws IOException {
|
||||
writeAtomic(target, data);
|
||||
try {
|
||||
var posix = Files.getFileAttributeView(target, java.nio.file.attribute.PosixFileAttributeView.class);
|
||||
if (posix != null) {
|
||||
posix.setPermissions(java.util.Set.of(
|
||||
java.nio.file.attribute.PosixFilePermission.OWNER_READ,
|
||||
java.nio.file.attribute.PosixFilePermission.OWNER_WRITE
|
||||
));
|
||||
}
|
||||
} catch (UnsupportedOperationException e) {
|
||||
// Non-POSIX filesystem (e.g., Windows) — skip
|
||||
log.debug("Cannot set POSIX permissions on {}: {}", target, e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
private void moveFile(Path source, Path target) throws IOException {
|
||||
Files.move(source, target, StandardCopyOption.REPLACE_EXISTING);
|
||||
}
|
||||
|
||||
private void deleteDirectory(Path dir) throws IOException {
|
||||
if (!Files.exists(dir)) return;
|
||||
try (var walk = Files.walk(dir)) {
|
||||
walk.sorted(java.util.Comparator.reverseOrder())
|
||||
.forEach(p -> {
|
||||
try { Files.deleteIfExists(p); } catch (IOException ignored) {}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,414 @@
|
||||
package net.siegeln.cameleer.saas.provisioning;
|
||||
|
||||
import com.github.dockerjava.api.DockerClient;
|
||||
import com.github.dockerjava.api.command.CreateContainerResponse;
|
||||
import com.github.dockerjava.api.command.InspectContainerResponse;
|
||||
import com.github.dockerjava.api.exception.NotFoundException;
|
||||
import com.github.dockerjava.api.model.*;
|
||||
import com.github.dockerjava.core.DockerClientConfig;
|
||||
import com.github.dockerjava.core.DockerClientImpl;
|
||||
import com.github.dockerjava.transport.DockerHttpClient;
|
||||
import com.github.dockerjava.zerodep.ZerodepDockerHttpClient;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public class DockerTenantProvisioner implements TenantProvisioner {
|
||||
private static final Logger log = LoggerFactory.getLogger(DockerTenantProvisioner.class);
|
||||
|
||||
private final DockerClient docker;
|
||||
private final ProvisioningProperties props;
|
||||
|
||||
public DockerTenantProvisioner(DockerClientConfig config, ProvisioningProperties props) {
|
||||
this.props = props;
|
||||
DockerHttpClient httpClient = new ZerodepDockerHttpClient.Builder()
|
||||
.dockerHost(config.getDockerHost())
|
||||
.maxConnections(10)
|
||||
.connectionTimeout(Duration.ofSeconds(5))
|
||||
.responseTimeout(Duration.ofSeconds(30))
|
||||
.build();
|
||||
this.docker = DockerClientImpl.getInstance(config, httpClient);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAvailable() { return true; }
|
||||
|
||||
@Override
|
||||
public ProvisionResult provision(TenantProvisionRequest req) {
|
||||
String serverName = serverContainerName(req.slug());
|
||||
String uiName = uiContainerName(req.slug());
|
||||
String tenantNetwork = tenantNetworkName(req.slug());
|
||||
String endpoint = "http://" + serverName + ":8081";
|
||||
|
||||
try {
|
||||
pullIfMissing(props.serverImage());
|
||||
pullIfMissing(props.serverUiImage());
|
||||
|
||||
// Create isolated tenant network
|
||||
ensureNetwork(tenantNetwork);
|
||||
|
||||
createServerContainer(req, serverName, tenantNetwork);
|
||||
docker.startContainerCmd(serverName).exec();
|
||||
|
||||
createUiContainer(req.slug(), uiName, serverName, tenantNetwork);
|
||||
docker.startContainerCmd(uiName).exec();
|
||||
|
||||
if (!waitForHealth(serverName, 60)) {
|
||||
return ProvisionResult.fail("Server did not become healthy within 60s");
|
||||
}
|
||||
|
||||
log.info("Provisioned tenant '{}': server={}, ui={}", req.slug(), serverName, uiName);
|
||||
return ProvisionResult.ok(endpoint);
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to provision tenant '{}'", req.slug(), e);
|
||||
return ProvisionResult.fail(e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void start(String slug) {
|
||||
try {
|
||||
docker.startContainerCmd(serverContainerName(slug)).exec();
|
||||
docker.startContainerCmd(uiContainerName(slug)).exec();
|
||||
} catch (NotFoundException e) {
|
||||
log.warn("Containers for '{}' not found — cannot start (re-provision needed)", slug);
|
||||
throw new RuntimeException("Containers not found for '" + slug + "' — re-provision required", e);
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to start containers for '{}'", slug, e);
|
||||
throw new RuntimeException("Start failed: " + e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop(String slug) {
|
||||
try {
|
||||
stopIfRunning(serverContainerName(slug));
|
||||
stopIfRunning(uiContainerName(slug));
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to stop containers for '{}'", slug, e);
|
||||
throw new RuntimeException("Stop failed: " + e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void remove(String slug) {
|
||||
// 1. Remove ALL containers labeled for this tenant (app containers + server + UI)
|
||||
try {
|
||||
var containers = docker.listContainersCmd()
|
||||
.withLabelFilter(List.of("cameleer.tenant=" + slug))
|
||||
.withShowAll(true)
|
||||
.exec();
|
||||
for (var container : containers) {
|
||||
try {
|
||||
docker.removeContainerCmd(container.getId()).withForce(true).exec();
|
||||
log.info("Removed tenant container: {} ({})", container.getNames(), container.getId().substring(0, 12));
|
||||
} catch (NotFoundException ignored) {}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to list/remove tenant containers for '{}': {}", slug, e.getMessage());
|
||||
// Fall back to named removal for server/UI
|
||||
removeContainer(uiContainerName(slug));
|
||||
removeContainer(serverContainerName(slug));
|
||||
}
|
||||
|
||||
// 2. Remove per-environment networks (cameleer-env-{slug}-*)
|
||||
try {
|
||||
String envNetPrefix = "cameleer-env-" + slug + "-";
|
||||
var networks = docker.listNetworksCmd().exec();
|
||||
for (var network : networks) {
|
||||
if (network.getName().startsWith(envNetPrefix)) {
|
||||
removeNetwork(network.getName());
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to clean up env networks for '{}': {}", slug, e.getMessage());
|
||||
}
|
||||
|
||||
// 3. Remove tenant network
|
||||
removeNetwork(tenantNetworkName(slug));
|
||||
|
||||
// 4. Remove JAR volume
|
||||
try {
|
||||
docker.removeVolumeCmd("cameleer-jars-" + slug).exec();
|
||||
log.info("Removed JAR volume: cameleer-jars-{}", slug);
|
||||
} catch (NotFoundException ignored) {
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to remove JAR volume for '{}': {}", slug, e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void upgrade(String slug) {
|
||||
// 1. Stop and remove server + UI containers (preserve app containers, volumes, networks)
|
||||
stopIfRunning(serverContainerName(slug));
|
||||
stopIfRunning(uiContainerName(slug));
|
||||
removeContainer(serverContainerName(slug));
|
||||
removeContainer(uiContainerName(slug));
|
||||
|
||||
// 2. Force pull latest images
|
||||
forcePull(props.serverImage());
|
||||
forcePull(props.serverUiImage());
|
||||
}
|
||||
|
||||
@Override
|
||||
public ServerStatus getStatus(String slug) {
|
||||
try {
|
||||
InspectContainerResponse info = docker.inspectContainerCmd(serverContainerName(slug)).exec();
|
||||
String state = info.getState().getStatus();
|
||||
String id = info.getId();
|
||||
if ("running".equals(state)) return ServerStatus.running(id);
|
||||
return ServerStatus.stopped(id);
|
||||
} catch (NotFoundException e) {
|
||||
return ServerStatus.notFound();
|
||||
} catch (Exception e) {
|
||||
return ServerStatus.error(e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getServerEndpoint(String slug) {
|
||||
return "http://" + serverContainerName(slug) + ":8081";
|
||||
}
|
||||
|
||||
private void createServerContainer(TenantProvisionRequest req, String name, String tenantNetwork) {
|
||||
String slug = req.slug();
|
||||
String prefix = "/t/" + slug;
|
||||
|
||||
// Traefik labels — need >10 entries, use HashMap
|
||||
var labels = new java.util.HashMap<String, String>();
|
||||
labels.put("traefik.enable", "true");
|
||||
labels.put("traefik.http.routers.server-" + slug + ".rule",
|
||||
"PathPrefix(`" + prefix + "/api`) || PathPrefix(`" + prefix + "/actuator`)");
|
||||
labels.put("traefik.http.routers.server-" + slug + ".tls", "true");
|
||||
labels.put("traefik.http.routers.server-" + slug + ".priority", "10");
|
||||
labels.put("traefik.http.middlewares.server-strip-" + slug + ".stripprefix.prefixes", prefix);
|
||||
labels.put("traefik.http.routers.server-" + slug + ".middlewares", "server-strip-" + slug);
|
||||
labels.put("traefik.http.services.server-" + slug + ".loadbalancer.server.port", "8081");
|
||||
labels.put("traefik.docker.network", props.traefikNetwork());
|
||||
labels.put("cameleer.tenant", slug);
|
||||
labels.put("cameleer.role", "server");
|
||||
labels.put("prometheus.scrape", "true");
|
||||
labels.put("prometheus.path", "/api/v1/prometheus");
|
||||
labels.put("prometheus.port", "8081");
|
||||
|
||||
// Per-tenant DB isolation: dedicated user+schema when dbPassword is set,
|
||||
// shared credentials for backwards compatibility with pre-isolation tenants.
|
||||
String dsUrl;
|
||||
String dsUser;
|
||||
String dsPass;
|
||||
if (req.dbPassword() != null) {
|
||||
dsUrl = props.datasourceUrl() + "?currentSchema=tenant_" + slug + "&ApplicationName=tenant_" + slug;
|
||||
dsUser = "tenant_" + slug;
|
||||
dsPass = req.dbPassword();
|
||||
} else {
|
||||
dsUrl = props.datasourceUrl();
|
||||
dsUser = props.datasourceUsername();
|
||||
dsPass = props.datasourcePassword();
|
||||
}
|
||||
var env = new java.util.ArrayList<>(List.of(
|
||||
"SPRING_DATASOURCE_URL=" + dsUrl,
|
||||
"SPRING_DATASOURCE_USERNAME=" + dsUser,
|
||||
"SPRING_DATASOURCE_PASSWORD=" + dsPass,
|
||||
"CAMELEER_SERVER_CLICKHOUSE_URL=jdbc:clickhouse://cameleer-clickhouse:8123/cameleer",
|
||||
"CAMELEER_SERVER_CLICKHOUSE_USERNAME=" + props.clickhouseUser(),
|
||||
"CAMELEER_SERVER_CLICKHOUSE_PASSWORD=" + props.clickhousePassword(),
|
||||
"CAMELEER_SERVER_TENANT_ID=" + slug,
|
||||
"CAMELEER_SERVER_SECURITY_BOOTSTRAPTOKEN=" + req.licenseToken(),
|
||||
"CAMELEER_SERVER_SECURITY_JWTSECRET=cameleer-dev-jwt-secret-change-in-production",
|
||||
"CAMELEER_SERVER_SECURITY_OIDC_ISSUERURI=" + props.oidcIssuerUri(),
|
||||
"CAMELEER_SERVER_SECURITY_OIDC_JWKSETURI=" + props.oidcJwkSetUri(),
|
||||
"CAMELEER_SERVER_SECURITY_OIDC_AUDIENCE=https://api.cameleer.local",
|
||||
"CAMELEER_SERVER_SECURITY_CORSALLOWEDORIGINS=" + props.corsOrigins(),
|
||||
"CAMELEER_SERVER_LICENSE_TOKEN=" + req.licenseToken(),
|
||||
"CAMELEER_SERVER_RUNTIME_ENABLED=true",
|
||||
"CAMELEER_SERVER_RUNTIME_SERVERURL=http://" + name + ":8081",
|
||||
"CAMELEER_SERVER_RUNTIME_ROUTINGDOMAIN=" + props.publicHost(),
|
||||
"CAMELEER_SERVER_RUNTIME_ROUTINGMODE=path",
|
||||
"CAMELEER_SERVER_RUNTIME_JARSTORAGEPATH=/data/jars",
|
||||
// Apps deployed by this server join the tenant network (isolated)
|
||||
"CAMELEER_SERVER_RUNTIME_DOCKERNETWORK=" + tenantNetwork,
|
||||
"CAMELEER_SERVER_RUNTIME_JARDOCKERVOLUME=cameleer-jars-" + slug,
|
||||
"CAMELEER_SERVER_SECURITY_INFRASTRUCTUREENDPOINTS=false"
|
||||
));
|
||||
// If no CA bundle exists, fall back to TLS skip for OIDC (self-signed dev)
|
||||
if (!java.nio.file.Files.exists(java.nio.file.Path.of("/certs/ca.pem"))) {
|
||||
env.add("CAMELEER_SERVER_SECURITY_OIDC_TLSSKIPVERIFY=true");
|
||||
}
|
||||
|
||||
// Primary network = tenant-isolated network
|
||||
HostConfig hostConfig = HostConfig.newHostConfig()
|
||||
.withRestartPolicy(RestartPolicy.unlessStoppedRestart())
|
||||
.withNetworkMode(tenantNetwork)
|
||||
.withBinds(
|
||||
new Bind("/var/run/docker.sock", new Volume("/var/run/docker.sock")),
|
||||
new Bind("cameleer-jars-" + slug, new Volume("/data/jars")),
|
||||
new Bind("cameleer-saas_certs", new Volume("/certs"), AccessMode.ro)
|
||||
)
|
||||
.withGroupAdd(List.of("0"));
|
||||
|
||||
CreateContainerResponse resp = docker.createContainerCmd(props.serverImage())
|
||||
.withName(name)
|
||||
.withLabels(labels)
|
||||
.withEnv(env)
|
||||
.withHostConfig(hostConfig)
|
||||
.withHealthcheck(new HealthCheck()
|
||||
.withTest(List.of("CMD-SHELL", "curl -sf http://localhost:8081/api/v1/health || exit 1"))
|
||||
.withInterval(5_000_000_000L)
|
||||
.withTimeout(5_000_000_000L)
|
||||
.withRetries(30)
|
||||
.withStartPeriod(15_000_000_000L))
|
||||
.exec();
|
||||
|
||||
String containerId = resp.getId();
|
||||
|
||||
// Connect to shared services network (postgres, clickhouse, logto)
|
||||
docker.connectToNetworkCmd()
|
||||
.withNetworkId(props.networkName())
|
||||
.withContainerId(containerId)
|
||||
.withContainerNetwork(new ContainerNetwork().withAliases(List.of(name)))
|
||||
.exec();
|
||||
|
||||
// Connect to traefik network for routing
|
||||
docker.connectToNetworkCmd()
|
||||
.withNetworkId(props.traefikNetwork())
|
||||
.withContainerId(containerId)
|
||||
.withContainerNetwork(new ContainerNetwork().withAliases(List.of(name)))
|
||||
.exec();
|
||||
}
|
||||
|
||||
private void createUiContainer(String slug, String uiName, String serverName, String tenantNetwork) {
|
||||
String prefix = "/t/" + slug;
|
||||
|
||||
var labels = new java.util.HashMap<String, String>();
|
||||
labels.put("traefik.enable", "true");
|
||||
labels.put("traefik.http.routers.ui-" + slug + ".rule", "PathPrefix(`" + prefix + "`)");
|
||||
labels.put("traefik.http.routers.ui-" + slug + ".tls", "true");
|
||||
labels.put("traefik.http.routers.ui-" + slug + ".priority", "5");
|
||||
labels.put("traefik.http.middlewares.ui-strip-" + slug + ".stripprefix.prefixes", prefix);
|
||||
labels.put("traefik.http.routers.ui-" + slug + ".middlewares", "ui-strip-" + slug);
|
||||
labels.put("traefik.http.services.ui-" + slug + ".loadbalancer.server.port", "80");
|
||||
labels.put("traefik.docker.network", props.traefikNetwork());
|
||||
labels.put("cameleer.tenant", slug);
|
||||
labels.put("cameleer.role", "server-ui");
|
||||
|
||||
List<String> env = List.of(
|
||||
"BASE_PATH=" + prefix,
|
||||
"CAMELEER_API_URL=http://" + serverName + ":8081"
|
||||
);
|
||||
|
||||
// Primary network = tenant network (can reach server via DNS)
|
||||
HostConfig hostConfig = HostConfig.newHostConfig()
|
||||
.withRestartPolicy(RestartPolicy.unlessStoppedRestart())
|
||||
.withNetworkMode(tenantNetwork);
|
||||
|
||||
CreateContainerResponse resp = docker.createContainerCmd(props.serverUiImage())
|
||||
.withName(uiName)
|
||||
.withLabels(labels)
|
||||
.withEnv(env)
|
||||
.withHostConfig(hostConfig)
|
||||
.exec();
|
||||
|
||||
// Connect to traefik for routing
|
||||
docker.connectToNetworkCmd()
|
||||
.withNetworkId(props.traefikNetwork())
|
||||
.withContainerId(resp.getId())
|
||||
.exec();
|
||||
}
|
||||
|
||||
private boolean waitForHealth(String containerName, int timeoutSeconds) {
|
||||
long deadline = System.currentTimeMillis() + timeoutSeconds * 1000L;
|
||||
while (System.currentTimeMillis() < deadline) {
|
||||
try {
|
||||
InspectContainerResponse info = docker.inspectContainerCmd(containerName).exec();
|
||||
InspectContainerResponse.ContainerState state = info.getState();
|
||||
if (state.getHealth() != null && "healthy".equals(state.getHealth().getStatus())) {
|
||||
return true;
|
||||
}
|
||||
Thread.sleep(2000);
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
return false;
|
||||
} catch (Exception e) {
|
||||
log.debug("Health check poll for '{}': {}", containerName, e.getMessage());
|
||||
try { Thread.sleep(2000); } catch (InterruptedException ie) {
|
||||
Thread.currentThread().interrupt();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private void forcePull(String image) {
|
||||
log.info("Force pulling image: {}", image);
|
||||
try {
|
||||
docker.pullImageCmd(image).start().awaitCompletion();
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException("Failed to pull image " + image + ": " + e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
private void pullIfMissing(String image) {
|
||||
try {
|
||||
docker.inspectImageCmd(image).exec();
|
||||
} catch (NotFoundException e) {
|
||||
log.info("Pulling image: {}", image);
|
||||
try {
|
||||
docker.pullImageCmd(image).start().awaitCompletion();
|
||||
} catch (Exception ex) {
|
||||
log.warn("Failed to pull {}: {}", image, ex.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void stopIfRunning(String name) {
|
||||
try {
|
||||
docker.stopContainerCmd(name).withTimeout(30).exec();
|
||||
} catch (NotFoundException ignored) {}
|
||||
}
|
||||
|
||||
private void removeContainer(String name) {
|
||||
try {
|
||||
docker.removeContainerCmd(name).withForce(true).exec();
|
||||
} catch (NotFoundException ignored) {}
|
||||
}
|
||||
|
||||
private void ensureNetwork(String networkName) {
|
||||
try {
|
||||
docker.inspectNetworkCmd().withNetworkId(networkName).exec();
|
||||
log.debug("Network '{}' already exists", networkName);
|
||||
} catch (NotFoundException e) {
|
||||
docker.createNetworkCmd()
|
||||
.withName(networkName)
|
||||
.withDriver("bridge")
|
||||
.withInternal(true) // no external access — isolated
|
||||
.exec();
|
||||
log.info("Created isolated tenant network: {}", networkName);
|
||||
}
|
||||
}
|
||||
|
||||
private void removeNetwork(String networkName) {
|
||||
try {
|
||||
docker.removeNetworkCmd(networkName).exec();
|
||||
log.info("Removed tenant network: {}", networkName);
|
||||
} catch (NotFoundException ignored) {
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to remove network '{}': {}", networkName, e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
private String serverContainerName(String slug) {
|
||||
return "cameleer-server-" + slug;
|
||||
}
|
||||
|
||||
private String uiContainerName(String slug) {
|
||||
return "cameleer-server-ui-" + slug;
|
||||
}
|
||||
|
||||
private String tenantNetworkName(String slug) {
|
||||
return "cameleer-tenant-" + slug;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,14 @@
|
||||
package net.siegeln.cameleer.saas.provisioning;
|
||||
|
||||
public record ProvisionResult(
|
||||
boolean success,
|
||||
String serverEndpoint,
|
||||
String error
|
||||
) {
|
||||
public static ProvisionResult ok(String endpoint) {
|
||||
return new ProvisionResult(true, endpoint, null);
|
||||
}
|
||||
public static ProvisionResult fail(String error) {
|
||||
return new ProvisionResult(false, null, error);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,22 @@
|
||||
package net.siegeln.cameleer.saas.provisioning;
|
||||
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
|
||||
@ConfigurationProperties(prefix = "cameleer.saas.provisioning")
|
||||
public record ProvisioningProperties(
|
||||
String serverImage,
|
||||
String serverUiImage,
|
||||
String networkName,
|
||||
String traefikNetwork,
|
||||
String publicHost,
|
||||
String publicProtocol,
|
||||
String datasourceUrl,
|
||||
String datasourceUsername,
|
||||
String datasourcePassword,
|
||||
String clickhouseUrl,
|
||||
String clickhouseUser,
|
||||
String clickhousePassword,
|
||||
String oidcIssuerUri,
|
||||
String oidcJwkSetUri,
|
||||
String corsOrigins
|
||||
) {}
|
||||
@@ -0,0 +1,22 @@
|
||||
package net.siegeln.cameleer.saas.provisioning;
|
||||
|
||||
public record ServerStatus(
|
||||
State state,
|
||||
String containerId,
|
||||
String error
|
||||
) {
|
||||
public enum State { RUNNING, STOPPED, NOT_FOUND, ERROR }
|
||||
|
||||
public static ServerStatus running(String containerId) {
|
||||
return new ServerStatus(State.RUNNING, containerId, null);
|
||||
}
|
||||
public static ServerStatus stopped(String containerId) {
|
||||
return new ServerStatus(State.STOPPED, containerId, null);
|
||||
}
|
||||
public static ServerStatus notFound() {
|
||||
return new ServerStatus(State.NOT_FOUND, null, null);
|
||||
}
|
||||
public static ServerStatus error(String error) {
|
||||
return new ServerStatus(State.ERROR, null, error);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,70 @@
|
||||
package net.siegeln.cameleer.saas.provisioning;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.DriverManager;
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.Statement;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Deletes tenant data from ClickHouse tables when a tenant is deleted
|
||||
* (GDPR data erasure). PostgreSQL cleanup is handled by TenantDatabaseService.
|
||||
*/
|
||||
@Service
|
||||
public class TenantDataCleanupService {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(TenantDataCleanupService.class);
|
||||
|
||||
private final ProvisioningProperties props;
|
||||
|
||||
public TenantDataCleanupService(ProvisioningProperties props) {
|
||||
this.props = props;
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes tenant data from ClickHouse tables (GDPR data erasure).
|
||||
* PostgreSQL cleanup is handled by TenantDatabaseService.
|
||||
*/
|
||||
public void cleanupClickHouse(String slug) {
|
||||
deleteClickHouseData(slug);
|
||||
}
|
||||
|
||||
private void deleteClickHouseData(String slug) {
|
||||
String url = props.clickhouseUrl();
|
||||
if (url == null || url.isBlank()) {
|
||||
log.warn("No ClickHouse URL configured — skipping ClickHouse data cleanup");
|
||||
return;
|
||||
}
|
||||
|
||||
try (Connection conn = DriverManager.getConnection(url, props.clickhouseUser(), props.clickhousePassword());
|
||||
Statement stmt = conn.createStatement()) {
|
||||
|
||||
// Find all tables with a tenant_id column
|
||||
List<String> tables = new ArrayList<>();
|
||||
try (ResultSet rs = stmt.executeQuery(
|
||||
"SELECT DISTINCT table FROM system.columns " +
|
||||
"WHERE database = currentDatabase() AND name = 'tenant_id'")) {
|
||||
while (rs.next()) {
|
||||
tables.add(rs.getString(1));
|
||||
}
|
||||
}
|
||||
|
||||
for (String table : tables) {
|
||||
try {
|
||||
stmt.execute("ALTER TABLE `" + table + "` DELETE WHERE tenant_id = '" + slug + "'");
|
||||
log.info("Deleted ClickHouse data for tenant '{}' from table '{}'", slug, table);
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to delete from ClickHouse table '{}' for tenant '{}': {}",
|
||||
table, slug, e.getMessage());
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to clean up ClickHouse data for tenant '{}': {}", slug, e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,104 @@
|
||||
package net.siegeln.cameleer.saas.provisioning;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import java.sql.Connection;
|
||||
import java.sql.DriverManager;
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.Statement;
|
||||
|
||||
@Service
|
||||
public class TenantDatabaseService {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(TenantDatabaseService.class);
|
||||
|
||||
private final ProvisioningProperties props;
|
||||
|
||||
public TenantDatabaseService(ProvisioningProperties props) {
|
||||
this.props = props;
|
||||
}
|
||||
|
||||
public void createTenantDatabase(String slug, String password) {
|
||||
validateSlug(slug);
|
||||
|
||||
String url = props.datasourceUrl();
|
||||
if (url == null || url.isBlank()) {
|
||||
log.warn("No datasource URL configured — skipping tenant DB setup");
|
||||
return;
|
||||
}
|
||||
|
||||
String user = "tenant_" + slug;
|
||||
String schema = "tenant_" + slug;
|
||||
|
||||
try (Connection conn = DriverManager.getConnection(url, props.datasourceUsername(), props.datasourcePassword());
|
||||
Statement stmt = conn.createStatement()) {
|
||||
|
||||
boolean userExists;
|
||||
try (ResultSet rs = stmt.executeQuery(
|
||||
"SELECT 1 FROM pg_roles WHERE rolname = '" + user + "'")) {
|
||||
userExists = rs.next();
|
||||
}
|
||||
if (!userExists) {
|
||||
stmt.execute("CREATE USER \"" + user + "\" WITH PASSWORD '" + escapePassword(password) + "'");
|
||||
log.info("Created PostgreSQL user: {}", user);
|
||||
} else {
|
||||
stmt.execute("ALTER USER \"" + user + "\" WITH PASSWORD '" + escapePassword(password) + "'");
|
||||
log.info("Updated password for existing PostgreSQL user: {}", user);
|
||||
}
|
||||
|
||||
boolean schemaExists;
|
||||
try (ResultSet rs = stmt.executeQuery(
|
||||
"SELECT 1 FROM information_schema.schemata WHERE schema_name = '" + schema + "'")) {
|
||||
schemaExists = rs.next();
|
||||
}
|
||||
if (!schemaExists) {
|
||||
stmt.execute("CREATE SCHEMA \"" + schema + "\" AUTHORIZATION \"" + user + "\"");
|
||||
log.info("Created PostgreSQL schema: {}", schema);
|
||||
} else {
|
||||
stmt.execute("ALTER SCHEMA \"" + schema + "\" OWNER TO \"" + user + "\"");
|
||||
log.info("Schema {} already exists — ensured ownership", schema);
|
||||
}
|
||||
|
||||
stmt.execute("REVOKE ALL ON SCHEMA public FROM \"" + user + "\"");
|
||||
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException("Failed to create tenant database for '" + slug + "': " + e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
|
||||
public void dropTenantDatabase(String slug) {
|
||||
validateSlug(slug);
|
||||
|
||||
String url = props.datasourceUrl();
|
||||
if (url == null || url.isBlank()) {
|
||||
log.warn("No datasource URL configured — skipping tenant DB cleanup");
|
||||
return;
|
||||
}
|
||||
|
||||
String user = "tenant_" + slug;
|
||||
String schema = "tenant_" + slug;
|
||||
|
||||
try (Connection conn = DriverManager.getConnection(url, props.datasourceUsername(), props.datasourcePassword());
|
||||
Statement stmt = conn.createStatement()) {
|
||||
stmt.execute("DROP SCHEMA IF EXISTS \"" + schema + "\" CASCADE");
|
||||
log.info("Dropped PostgreSQL schema: {}", schema);
|
||||
|
||||
stmt.execute("DROP USER IF EXISTS \"" + user + "\"");
|
||||
log.info("Dropped PostgreSQL user: {}", user);
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to drop tenant database for '{}': {}", slug, e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
private void validateSlug(String slug) {
|
||||
if (slug == null || !slug.matches("^[a-z0-9-]+$")) {
|
||||
throw new IllegalArgumentException("Invalid tenant slug: " + slug);
|
||||
}
|
||||
}
|
||||
|
||||
private String escapePassword(String password) {
|
||||
return password.replace("'", "''");
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,11 @@
|
||||
package net.siegeln.cameleer.saas.provisioning;
|
||||
|
||||
import java.util.UUID;
|
||||
|
||||
public record TenantProvisionRequest(
|
||||
UUID tenantId,
|
||||
String slug,
|
||||
String tier,
|
||||
String licenseToken,
|
||||
String dbPassword
|
||||
) {}
|
||||
@@ -0,0 +1,12 @@
|
||||
package net.siegeln.cameleer.saas.provisioning;
|
||||
|
||||
public interface TenantProvisioner {
|
||||
boolean isAvailable();
|
||||
ProvisionResult provision(TenantProvisionRequest request);
|
||||
void start(String slug);
|
||||
void stop(String slug);
|
||||
void remove(String slug);
|
||||
void upgrade(String slug);
|
||||
ServerStatus getStatus(String slug);
|
||||
String getServerEndpoint(String slug);
|
||||
}
|
||||
@@ -0,0 +1,31 @@
|
||||
package net.siegeln.cameleer.saas.provisioning;
|
||||
|
||||
import com.github.dockerjava.core.DefaultDockerClientConfig;
|
||||
import com.github.dockerjava.core.DockerClientConfig;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
|
||||
@Configuration
|
||||
@EnableConfigurationProperties(ProvisioningProperties.class)
|
||||
public class TenantProvisionerAutoConfig {
|
||||
private static final Logger log = LoggerFactory.getLogger(TenantProvisionerAutoConfig.class);
|
||||
|
||||
@Bean
|
||||
TenantProvisioner tenantProvisioner(ProvisioningProperties props) {
|
||||
if (Files.exists(Path.of("/var/run/docker.sock"))) {
|
||||
log.info("Docker socket detected — enabling Docker tenant provisioner");
|
||||
DockerClientConfig config = DefaultDockerClientConfig.createDefaultConfigBuilder()
|
||||
.withDockerHost("unix:///var/run/docker.sock")
|
||||
.build();
|
||||
return new DockerTenantProvisioner(config, props);
|
||||
}
|
||||
log.info("No Docker socket — tenant provisioning disabled");
|
||||
return new DisabledTenantProvisioner();
|
||||
}
|
||||
}
|
||||
@@ -70,14 +70,6 @@ public class TenantController {
|
||||
}
|
||||
|
||||
private TenantResponse toResponse(TenantEntity entity) {
|
||||
return new TenantResponse(
|
||||
entity.getId(),
|
||||
entity.getName(),
|
||||
entity.getSlug(),
|
||||
entity.getTier().name(),
|
||||
entity.getStatus().name(),
|
||||
entity.getCreatedAt(),
|
||||
entity.getUpdatedAt()
|
||||
);
|
||||
return TenantResponse.from(entity);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,6 +52,18 @@ public class TenantEntity {
|
||||
@Column(name = "settings", columnDefinition = "jsonb")
|
||||
private Map<String, Object> settings = Map.of();
|
||||
|
||||
@Column(name = "server_endpoint", length = 512)
|
||||
private String serverEndpoint;
|
||||
|
||||
@Column(name = "provision_error", columnDefinition = "TEXT")
|
||||
private String provisionError;
|
||||
|
||||
@Column(name = "db_password")
|
||||
private String dbPassword;
|
||||
|
||||
@Column(name = "ca_applied_at")
|
||||
private Instant caAppliedAt;
|
||||
|
||||
@Column(name = "created_at", nullable = false, updatable = false)
|
||||
private Instant createdAt;
|
||||
|
||||
@@ -87,6 +99,14 @@ public class TenantEntity {
|
||||
public void setStripeSubscriptionId(String stripeSubscriptionId) { this.stripeSubscriptionId = stripeSubscriptionId; }
|
||||
public Map<String, Object> getSettings() { return settings; }
|
||||
public void setSettings(Map<String, Object> settings) { this.settings = settings; }
|
||||
public String getServerEndpoint() { return serverEndpoint; }
|
||||
public void setServerEndpoint(String serverEndpoint) { this.serverEndpoint = serverEndpoint; }
|
||||
public String getProvisionError() { return provisionError; }
|
||||
public void setProvisionError(String provisionError) { this.provisionError = provisionError; }
|
||||
public String getDbPassword() { return dbPassword; }
|
||||
public void setDbPassword(String dbPassword) { this.dbPassword = dbPassword; }
|
||||
public Instant getCaAppliedAt() { return caAppliedAt; }
|
||||
public void setCaAppliedAt(Instant caAppliedAt) { this.caAppliedAt = caAppliedAt; }
|
||||
public Instant getCreatedAt() { return createdAt; }
|
||||
public Instant getUpdatedAt() { return updatedAt; }
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package net.siegeln.cameleer.saas.tenant;
|
||||
import org.springframework.data.jpa.repository.JpaRepository;
|
||||
import org.springframework.stereotype.Repository;
|
||||
|
||||
import java.time.Instant;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.UUID;
|
||||
@@ -13,4 +14,6 @@ public interface TenantRepository extends JpaRepository<TenantEntity, UUID> {
|
||||
Optional<TenantEntity> findByLogtoOrgId(String logtoOrgId);
|
||||
List<TenantEntity> findByStatus(TenantStatus status);
|
||||
boolean existsBySlug(String slug);
|
||||
boolean existsBySlugAndStatusNot(String slug, TenantStatus status);
|
||||
long countByCaAppliedAtBeforeOrCaAppliedAtIsNull(Instant threshold);
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ public class TenantService {
|
||||
}
|
||||
|
||||
public TenantEntity create(CreateTenantRequest request, UUID actorId) {
|
||||
if (tenantRepository.existsBySlug(request.slug())) {
|
||||
if (tenantRepository.existsBySlugAndStatusNot(request.slug(), TenantStatus.DELETED)) {
|
||||
throw new IllegalArgumentException("Slug already taken");
|
||||
}
|
||||
|
||||
|
||||
@@ -7,5 +7,7 @@ import jakarta.validation.constraints.Size;
|
||||
public record CreateTenantRequest(
|
||||
@NotBlank @Size(max = 255) String name,
|
||||
@NotBlank @Size(max = 100) @Pattern(regexp = "^[a-z0-9][a-z0-9-]*[a-z0-9]$", message = "Slug must be lowercase alphanumeric with hyphens") String slug,
|
||||
String tier
|
||||
String tier,
|
||||
String adminUsername,
|
||||
String adminPassword
|
||||
) {}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
package net.siegeln.cameleer.saas.tenant.dto;
|
||||
|
||||
import net.siegeln.cameleer.saas.tenant.TenantEntity;
|
||||
|
||||
import java.time.Instant;
|
||||
import java.util.UUID;
|
||||
|
||||
@@ -9,6 +11,17 @@ public record TenantResponse(
|
||||
String slug,
|
||||
String tier,
|
||||
String status,
|
||||
String serverEndpoint,
|
||||
String provisionError,
|
||||
Instant createdAt,
|
||||
Instant updatedAt
|
||||
) {}
|
||||
) {
|
||||
public static TenantResponse from(TenantEntity e) {
|
||||
return new TenantResponse(
|
||||
e.getId(), e.getName(), e.getSlug(),
|
||||
e.getTier().name(), e.getStatus().name(),
|
||||
e.getServerEndpoint(), e.getProvisionError(),
|
||||
e.getCreatedAt(), e.getUpdatedAt()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
57
src/main/java/net/siegeln/cameleer/saas/vendor/InfrastructureController.java
vendored
Normal file
57
src/main/java/net/siegeln/cameleer/saas/vendor/InfrastructureController.java
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
package net.siegeln.cameleer.saas.vendor;
|
||||
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.security.access.prepost.PreAuthorize;
|
||||
import org.springframework.web.bind.annotation.*;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@RestController
|
||||
@RequestMapping("/api/vendor/infrastructure")
|
||||
@PreAuthorize("hasAuthority('SCOPE_platform:admin')")
|
||||
public class InfrastructureController {
|
||||
|
||||
private final InfrastructureService infraService;
|
||||
|
||||
public InfrastructureController(InfrastructureService infraService) {
|
||||
this.infraService = infraService;
|
||||
}
|
||||
|
||||
public record InfraOverviewResponse(
|
||||
InfrastructureService.PostgresOverview postgres,
|
||||
InfrastructureService.ClickHouseOverview clickhouse) {}
|
||||
|
||||
@GetMapping
|
||||
public ResponseEntity<InfraOverviewResponse> overview() {
|
||||
return ResponseEntity.ok(new InfraOverviewResponse(
|
||||
infraService.getPostgresOverview(),
|
||||
infraService.getClickHouseOverview()));
|
||||
}
|
||||
|
||||
@GetMapping("/postgres")
|
||||
public ResponseEntity<Map<String, Object>> postgres() {
|
||||
return ResponseEntity.ok(Map.of(
|
||||
"overview", infraService.getPostgresOverview(),
|
||||
"tenants", infraService.getPostgresTenantStats()));
|
||||
}
|
||||
|
||||
@GetMapping("/postgres/{slug}")
|
||||
public ResponseEntity<List<InfrastructureService.TableStats>> postgresDetail(
|
||||
@PathVariable String slug) {
|
||||
return ResponseEntity.ok(infraService.getPostgresTenantDetail(slug));
|
||||
}
|
||||
|
||||
@GetMapping("/clickhouse")
|
||||
public ResponseEntity<Map<String, Object>> clickhouse() {
|
||||
return ResponseEntity.ok(Map.of(
|
||||
"overview", infraService.getClickHouseOverview(),
|
||||
"tenants", infraService.getClickHouseTenantStats()));
|
||||
}
|
||||
|
||||
@GetMapping("/clickhouse/{tenantId}")
|
||||
public ResponseEntity<List<InfrastructureService.ChTableStats>> clickhouseDetail(
|
||||
@PathVariable String tenantId) {
|
||||
return ResponseEntity.ok(infraService.getClickHouseTenantDetail(tenantId));
|
||||
}
|
||||
}
|
||||
280
src/main/java/net/siegeln/cameleer/saas/vendor/InfrastructureService.java
vendored
Normal file
280
src/main/java/net/siegeln/cameleer/saas/vendor/InfrastructureService.java
vendored
Normal file
@@ -0,0 +1,280 @@
|
||||
package net.siegeln.cameleer.saas.vendor;
|
||||
|
||||
import net.siegeln.cameleer.saas.provisioning.ProvisioningProperties;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import java.math.BigDecimal;
|
||||
import java.math.RoundingMode;
|
||||
import java.sql.Connection;
|
||||
import java.sql.DriverManager;
|
||||
import java.sql.PreparedStatement;
|
||||
import java.sql.ResultSet;
|
||||
import java.sql.SQLException;
|
||||
import java.sql.Statement;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@Service
|
||||
public class InfrastructureService {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(InfrastructureService.class);
|
||||
|
||||
private static final String[] CH_TABLES = {
|
||||
"executions", "processor_executions", "logs", "agent_events", "usage_events"
|
||||
};
|
||||
|
||||
private final ProvisioningProperties props;
|
||||
|
||||
public InfrastructureService(ProvisioningProperties props) {
|
||||
this.props = props;
|
||||
}
|
||||
|
||||
// --- Response records ---
|
||||
|
||||
public record PostgresOverview(String version, long databaseSizeBytes, int activeConnections) {}
|
||||
|
||||
public record TenantPgStats(String slug, long schemaSizeBytes, int tableCount, long totalRows) {}
|
||||
|
||||
public record TableStats(String tableName, long rowCount, long dataSizeBytes, long indexSizeBytes) {}
|
||||
|
||||
public record ClickHouseOverview(
|
||||
String version,
|
||||
long uptimeSeconds,
|
||||
long totalDiskBytes,
|
||||
long totalUncompressedBytes,
|
||||
double compressionRatio,
|
||||
long totalRows,
|
||||
int activeMerges
|
||||
) {}
|
||||
|
||||
public record TenantChStats(String tenantId, long totalRows, Map<String, Long> rowsByTable) {}
|
||||
|
||||
public record ChTableStats(String tableName, long rowCount) {}
|
||||
|
||||
// --- PostgreSQL methods ---
|
||||
|
||||
public PostgresOverview getPostgresOverview() {
|
||||
try (Connection conn = pgConnection();
|
||||
Statement stmt = conn.createStatement()) {
|
||||
|
||||
String version;
|
||||
try (ResultSet rs = stmt.executeQuery("SELECT version()")) {
|
||||
rs.next();
|
||||
version = rs.getString(1);
|
||||
}
|
||||
|
||||
long dbSize;
|
||||
try (ResultSet rs = stmt.executeQuery("SELECT pg_database_size(current_database())")) {
|
||||
rs.next();
|
||||
dbSize = rs.getLong(1);
|
||||
}
|
||||
|
||||
int activeConnections;
|
||||
try (ResultSet rs = stmt.executeQuery(
|
||||
"SELECT count(*) FROM pg_stat_activity WHERE datname = current_database()")) {
|
||||
rs.next();
|
||||
activeConnections = rs.getInt(1);
|
||||
}
|
||||
|
||||
return new PostgresOverview(version, dbSize, activeConnections);
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to get PostgreSQL overview: {}", e.getMessage(), e);
|
||||
throw new RuntimeException("Failed to get PostgreSQL overview", e);
|
||||
}
|
||||
}
|
||||
|
||||
public List<TenantPgStats> getPostgresTenantStats() {
|
||||
String sql = """
|
||||
SELECT
|
||||
s.schema_name,
|
||||
coalesce(sum(pg_total_relation_size(quote_ident(s.schema_name) || '.' || quote_ident(t.table_name))), 0) AS schema_size,
|
||||
count(t.table_name) AS table_count,
|
||||
coalesce(sum(st.n_live_tup), 0) AS total_rows
|
||||
FROM information_schema.schemata s
|
||||
LEFT JOIN information_schema.tables t
|
||||
ON t.table_schema = s.schema_name AND t.table_type = 'BASE TABLE'
|
||||
LEFT JOIN pg_stat_user_tables st
|
||||
ON st.schemaname = s.schema_name AND st.relname = t.table_name
|
||||
WHERE s.schema_name LIKE 'tenant_%'
|
||||
GROUP BY s.schema_name
|
||||
ORDER BY schema_size DESC
|
||||
""";
|
||||
|
||||
try (Connection conn = pgConnection();
|
||||
Statement stmt = conn.createStatement();
|
||||
ResultSet rs = stmt.executeQuery(sql)) {
|
||||
|
||||
List<TenantPgStats> result = new ArrayList<>();
|
||||
while (rs.next()) {
|
||||
String schemaName = rs.getString("schema_name");
|
||||
String slug = schemaName.substring("tenant_".length());
|
||||
long schemaSize = rs.getLong("schema_size");
|
||||
int tableCount = rs.getInt("table_count");
|
||||
long totalRows = rs.getLong("total_rows");
|
||||
result.add(new TenantPgStats(slug, schemaSize, tableCount, totalRows));
|
||||
}
|
||||
return result;
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to get PostgreSQL tenant stats: {}", e.getMessage(), e);
|
||||
throw new RuntimeException("Failed to get PostgreSQL tenant stats", e);
|
||||
}
|
||||
}
|
||||
|
||||
public List<TableStats> getPostgresTenantDetail(String slug) {
|
||||
String sql = """
|
||||
SELECT
|
||||
st.relname AS table_name,
|
||||
st.n_live_tup AS row_count,
|
||||
pg_table_size(quote_ident(st.schemaname) || '.' || quote_ident(st.relname)) AS data_size,
|
||||
pg_indexes_size(quote_ident(st.schemaname) || '.' || quote_ident(st.relname)) AS index_size
|
||||
FROM pg_stat_user_tables st
|
||||
WHERE st.schemaname = ?
|
||||
ORDER BY data_size DESC
|
||||
""";
|
||||
|
||||
String schema = "tenant_" + slug;
|
||||
try (Connection conn = pgConnection();
|
||||
PreparedStatement ps = conn.prepareStatement(sql)) {
|
||||
|
||||
ps.setString(1, schema);
|
||||
try (ResultSet rs = ps.executeQuery()) {
|
||||
List<TableStats> result = new ArrayList<>();
|
||||
while (rs.next()) {
|
||||
result.add(new TableStats(
|
||||
rs.getString("table_name"),
|
||||
rs.getLong("row_count"),
|
||||
rs.getLong("data_size"),
|
||||
rs.getLong("index_size")
|
||||
));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to get PostgreSQL tenant detail for '{}': {}", slug, e.getMessage(), e);
|
||||
throw new RuntimeException("Failed to get PostgreSQL tenant detail for: " + slug, e);
|
||||
}
|
||||
}
|
||||
|
||||
// --- ClickHouse methods ---
|
||||
|
||||
public ClickHouseOverview getClickHouseOverview() {
|
||||
try (Connection conn = chConnection();
|
||||
Statement stmt = conn.createStatement()) {
|
||||
|
||||
String version;
|
||||
long uptimeSeconds;
|
||||
try (ResultSet rs = stmt.executeQuery("SELECT version(), uptime()")) {
|
||||
rs.next();
|
||||
version = rs.getString(1);
|
||||
uptimeSeconds = rs.getLong(2);
|
||||
}
|
||||
|
||||
long totalDiskBytes;
|
||||
long totalUncompressedBytes;
|
||||
long totalRows;
|
||||
try (ResultSet rs = stmt.executeQuery(
|
||||
"SELECT sum(bytes_on_disk), sum(data_uncompressed_bytes), sum(rows) " +
|
||||
"FROM system.parts WHERE database = currentDatabase() AND active")) {
|
||||
rs.next();
|
||||
totalDiskBytes = rs.getLong(1);
|
||||
totalUncompressedBytes = rs.getLong(2);
|
||||
totalRows = rs.getLong(3);
|
||||
}
|
||||
|
||||
double compressionRatio = totalDiskBytes == 0 ? 0.0
|
||||
: BigDecimal.valueOf((double) totalUncompressedBytes / totalDiskBytes)
|
||||
.setScale(2, RoundingMode.HALF_UP)
|
||||
.doubleValue();
|
||||
|
||||
int activeMerges;
|
||||
try (ResultSet rs = stmt.executeQuery(
|
||||
"SELECT count() FROM system.merges WHERE database = currentDatabase()")) {
|
||||
rs.next();
|
||||
activeMerges = rs.getInt(1);
|
||||
}
|
||||
|
||||
return new ClickHouseOverview(version, uptimeSeconds, totalDiskBytes,
|
||||
totalUncompressedBytes, compressionRatio, totalRows, activeMerges);
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to get ClickHouse overview: {}", e.getMessage(), e);
|
||||
throw new RuntimeException("Failed to get ClickHouse overview", e);
|
||||
}
|
||||
}
|
||||
|
||||
public List<TenantChStats> getClickHouseTenantStats() {
|
||||
// tenantId -> tableName -> count
|
||||
Map<String, Map<String, Long>> aggregated = new HashMap<>();
|
||||
|
||||
try (Connection conn = chConnection();
|
||||
Statement stmt = conn.createStatement()) {
|
||||
|
||||
for (String table : CH_TABLES) {
|
||||
try (ResultSet rs = stmt.executeQuery(
|
||||
"SELECT tenant_id, count() AS cnt FROM " + table + " GROUP BY tenant_id")) {
|
||||
while (rs.next()) {
|
||||
String tenantId = rs.getString("tenant_id");
|
||||
long cnt = rs.getLong("cnt");
|
||||
aggregated
|
||||
.computeIfAbsent(tenantId, k -> new HashMap<>())
|
||||
.put(table, cnt);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to query ClickHouse table '{}' for tenant stats: {}", table, e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to get ClickHouse tenant stats: {}", e.getMessage(), e);
|
||||
throw new RuntimeException("Failed to get ClickHouse tenant stats", e);
|
||||
}
|
||||
|
||||
List<TenantChStats> result = new ArrayList<>();
|
||||
for (Map.Entry<String, Map<String, Long>> entry : aggregated.entrySet()) {
|
||||
String tenantId = entry.getKey();
|
||||
Map<String, Long> rowsByTable = entry.getValue();
|
||||
long totalRows = rowsByTable.values().stream().mapToLong(Long::longValue).sum();
|
||||
result.add(new TenantChStats(tenantId, totalRows, rowsByTable));
|
||||
}
|
||||
result.sort(Comparator.comparingLong(TenantChStats::totalRows).reversed());
|
||||
return result;
|
||||
}
|
||||
|
||||
public List<ChTableStats> getClickHouseTenantDetail(String tenantId) {
|
||||
List<ChTableStats> result = new ArrayList<>();
|
||||
|
||||
try (Connection conn = chConnection()) {
|
||||
for (String table : CH_TABLES) {
|
||||
String sql = "SELECT count() AS cnt FROM " + table + " WHERE tenant_id = ?";
|
||||
try (PreparedStatement ps = conn.prepareStatement(sql)) {
|
||||
ps.setString(1, tenantId);
|
||||
try (ResultSet rs = ps.executeQuery()) {
|
||||
rs.next();
|
||||
result.add(new ChTableStats(table, rs.getLong("cnt")));
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to query ClickHouse table '{}' for tenant '{}': {}",
|
||||
table, tenantId, e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to get ClickHouse tenant detail for '{}': {}", tenantId, e.getMessage(), e);
|
||||
throw new RuntimeException("Failed to get ClickHouse tenant detail for: " + tenantId, e);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// --- Private helpers ---
|
||||
|
||||
private Connection pgConnection() throws SQLException {
|
||||
return DriverManager.getConnection(props.datasourceUrl(), props.datasourceUsername(), props.datasourcePassword());
|
||||
}
|
||||
|
||||
private Connection chConnection() throws SQLException {
|
||||
return DriverManager.getConnection(props.clickhouseUrl(), props.clickhouseUser(), props.clickhousePassword());
|
||||
}
|
||||
}
|
||||
46
src/main/java/net/siegeln/cameleer/saas/vendor/VendorAuditController.java
vendored
Normal file
46
src/main/java/net/siegeln/cameleer/saas/vendor/VendorAuditController.java
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
package net.siegeln.cameleer.saas.vendor;
|
||||
|
||||
import net.siegeln.cameleer.saas.audit.AuditDto.AuditLogPage;
|
||||
import net.siegeln.cameleer.saas.audit.AuditService;
|
||||
import org.springframework.data.domain.PageRequest;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.security.access.prepost.PreAuthorize;
|
||||
import org.springframework.web.bind.annotation.GetMapping;
|
||||
import org.springframework.web.bind.annotation.RequestMapping;
|
||||
import org.springframework.web.bind.annotation.RequestParam;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
|
||||
import java.time.Instant;
|
||||
import java.util.UUID;
|
||||
|
||||
@RestController
|
||||
@RequestMapping("/api/vendor/audit")
|
||||
@PreAuthorize("hasAuthority('SCOPE_platform:admin')")
|
||||
public class VendorAuditController {
|
||||
|
||||
private final AuditService auditService;
|
||||
|
||||
public VendorAuditController(AuditService auditService) {
|
||||
this.auditService = auditService;
|
||||
}
|
||||
|
||||
@GetMapping
|
||||
public ResponseEntity<AuditLogPage> list(
|
||||
@RequestParam(required = false) UUID tenantId,
|
||||
@RequestParam(required = false) String action,
|
||||
@RequestParam(required = false) String result,
|
||||
@RequestParam(required = false) String search,
|
||||
@RequestParam(required = false) Instant from,
|
||||
@RequestParam(required = false) Instant to,
|
||||
@RequestParam(defaultValue = "0") int page,
|
||||
@RequestParam(defaultValue = "25") int size) {
|
||||
|
||||
size = Math.min(size, 100);
|
||||
var pageResult = auditService.search(tenantId, action, result, from, to, search,
|
||||
PageRequest.of(page, size));
|
||||
|
||||
return ResponseEntity.ok(new AuditLogPage(
|
||||
pageResult.getContent(), pageResult.getNumber(), pageResult.getSize(),
|
||||
pageResult.getTotalElements(), pageResult.getTotalPages()));
|
||||
}
|
||||
}
|
||||
219
src/main/java/net/siegeln/cameleer/saas/vendor/VendorTenantController.java
vendored
Normal file
219
src/main/java/net/siegeln/cameleer/saas/vendor/VendorTenantController.java
vendored
Normal file
@@ -0,0 +1,219 @@
|
||||
package net.siegeln.cameleer.saas.vendor;
|
||||
|
||||
import jakarta.validation.Valid;
|
||||
import net.siegeln.cameleer.saas.identity.ServerApiClient.ServerHealthResponse;
|
||||
import net.siegeln.cameleer.saas.license.dto.LicenseResponse;
|
||||
import net.siegeln.cameleer.saas.provisioning.ServerStatus;
|
||||
import net.siegeln.cameleer.saas.tenant.TenantEntity;
|
||||
import net.siegeln.cameleer.saas.tenant.dto.CreateTenantRequest;
|
||||
import net.siegeln.cameleer.saas.tenant.dto.TenantResponse;
|
||||
import org.springframework.http.HttpStatus;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.security.access.prepost.PreAuthorize;
|
||||
import org.springframework.security.core.annotation.AuthenticationPrincipal;
|
||||
import org.springframework.security.oauth2.jwt.Jwt;
|
||||
import org.springframework.web.bind.annotation.DeleteMapping;
|
||||
import org.springframework.web.bind.annotation.GetMapping;
|
||||
import org.springframework.web.bind.annotation.PathVariable;
|
||||
import org.springframework.web.bind.annotation.PostMapping;
|
||||
import org.springframework.web.bind.annotation.RequestBody;
|
||||
import org.springframework.web.bind.annotation.RequestMapping;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
|
||||
@RestController
|
||||
@RequestMapping("/api/vendor/tenants")
|
||||
@PreAuthorize("hasAuthority('SCOPE_platform:admin')")
|
||||
public class VendorTenantController {
|
||||
|
||||
private final VendorTenantService vendorTenantService;
|
||||
|
||||
public VendorTenantController(VendorTenantService vendorTenantService) {
|
||||
this.vendorTenantService = vendorTenantService;
|
||||
}
|
||||
|
||||
// --- Response types ---
|
||||
|
||||
public record VendorTenantSummary(
|
||||
UUID id,
|
||||
String name,
|
||||
String slug,
|
||||
String tier,
|
||||
String status,
|
||||
String serverState,
|
||||
String licenseExpiry,
|
||||
String provisionError,
|
||||
int agentCount,
|
||||
int environmentCount,
|
||||
int agentLimit
|
||||
) {}
|
||||
|
||||
public record VendorTenantDetail(
|
||||
TenantResponse tenant,
|
||||
String serverState,
|
||||
boolean serverHealthy,
|
||||
String serverStatus,
|
||||
LicenseResponse license
|
||||
) {}
|
||||
|
||||
// --- Endpoints ---
|
||||
|
||||
@GetMapping
|
||||
public ResponseEntity<List<VendorTenantSummary>> listAll() {
|
||||
var tenants = vendorTenantService.listAll();
|
||||
var futures = tenants.stream().map(tenant -> java.util.concurrent.CompletableFuture.supplyAsync(() -> {
|
||||
ServerStatus status = vendorTenantService.getServerStatus(tenant);
|
||||
String licenseExpiry = vendorTenantService
|
||||
.getLicenseForTenant(tenant.getId())
|
||||
.map(l -> l.getExpiresAt() != null ? l.getExpiresAt().toString() : null)
|
||||
.orElse(null);
|
||||
int agentCount = 0;
|
||||
int environmentCount = 0;
|
||||
int agentLimit = -1;
|
||||
String endpoint = tenant.getServerEndpoint();
|
||||
boolean isActive = "ACTIVE".equals(tenant.getStatus().name());
|
||||
if (isActive && endpoint != null && !endpoint.isBlank() && "RUNNING".equals(status.state().name())) {
|
||||
var serverApi = vendorTenantService.getServerApiClient();
|
||||
agentCount = serverApi.getAgentCount(endpoint);
|
||||
environmentCount = serverApi.getEnvironmentCount(endpoint);
|
||||
}
|
||||
var license = vendorTenantService.getLicenseForTenant(tenant.getId());
|
||||
if (license.isPresent() && license.get().getLimits() != null) {
|
||||
var limits = license.get().getLimits();
|
||||
if (limits.containsKey("agents")) {
|
||||
agentLimit = ((Number) limits.get("agents")).intValue();
|
||||
}
|
||||
}
|
||||
return new VendorTenantSummary(
|
||||
tenant.getId(), tenant.getName(), tenant.getSlug(),
|
||||
tenant.getTier().name(), tenant.getStatus().name(),
|
||||
status.state().name(), licenseExpiry, tenant.getProvisionError(),
|
||||
agentCount, environmentCount, agentLimit
|
||||
);
|
||||
})).toList();
|
||||
List<VendorTenantSummary> summaries = futures.stream()
|
||||
.map(java.util.concurrent.CompletableFuture::join)
|
||||
.toList();
|
||||
return ResponseEntity.ok(summaries);
|
||||
}
|
||||
|
||||
@PostMapping
|
||||
public ResponseEntity<TenantResponse> create(@Valid @RequestBody CreateTenantRequest request,
|
||||
@AuthenticationPrincipal Jwt jwt) {
|
||||
UUID actorId = resolveActorId(jwt);
|
||||
try {
|
||||
TenantEntity tenant = vendorTenantService.createAndProvision(request, actorId);
|
||||
return ResponseEntity.status(HttpStatus.CREATED).body(TenantResponse.from(tenant));
|
||||
} catch (IllegalArgumentException e) {
|
||||
return ResponseEntity.status(HttpStatus.CONFLICT).build();
|
||||
}
|
||||
}
|
||||
|
||||
@GetMapping("/{id}")
|
||||
public ResponseEntity<VendorTenantDetail> getById(@PathVariable UUID id) {
|
||||
return vendorTenantService.getById(id)
|
||||
.map(tenant -> {
|
||||
ServerStatus serverStatus = vendorTenantService.getServerStatus(tenant);
|
||||
ServerHealthResponse health = vendorTenantService.getServerHealth(tenant);
|
||||
LicenseResponse license = vendorTenantService
|
||||
.getLicenseForTenant(id)
|
||||
.map(LicenseResponse::from)
|
||||
.orElse(null);
|
||||
return ResponseEntity.ok(new VendorTenantDetail(
|
||||
TenantResponse.from(tenant),
|
||||
serverStatus.state().name(),
|
||||
health.healthy(),
|
||||
health.status(),
|
||||
license
|
||||
));
|
||||
})
|
||||
.orElse(ResponseEntity.notFound().build());
|
||||
}
|
||||
|
||||
@PostMapping("/{id}/restart")
|
||||
public ResponseEntity<Void> restart(@PathVariable UUID id) {
|
||||
try {
|
||||
vendorTenantService.restartServer(id);
|
||||
return ResponseEntity.noContent().build();
|
||||
} catch (IllegalArgumentException e) {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
}
|
||||
|
||||
@PostMapping("/{id}/upgrade")
|
||||
public ResponseEntity<Void> upgrade(@PathVariable UUID id) {
|
||||
try {
|
||||
vendorTenantService.upgradeServer(id);
|
||||
return ResponseEntity.noContent().build();
|
||||
} catch (IllegalArgumentException e) {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
}
|
||||
|
||||
@PostMapping("/{id}/suspend")
|
||||
public ResponseEntity<TenantResponse> suspend(@PathVariable UUID id,
|
||||
@AuthenticationPrincipal Jwt jwt) {
|
||||
UUID actorId = resolveActorId(jwt);
|
||||
try {
|
||||
TenantEntity tenant = vendorTenantService.suspend(id, actorId);
|
||||
return ResponseEntity.ok(TenantResponse.from(tenant));
|
||||
} catch (IllegalArgumentException e) {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
}
|
||||
|
||||
@PostMapping("/{id}/activate")
|
||||
public ResponseEntity<TenantResponse> activate(@PathVariable UUID id,
|
||||
@AuthenticationPrincipal Jwt jwt) {
|
||||
UUID actorId = resolveActorId(jwt);
|
||||
try {
|
||||
TenantEntity tenant = vendorTenantService.activate(id, actorId);
|
||||
return ResponseEntity.ok(TenantResponse.from(tenant));
|
||||
} catch (IllegalArgumentException e) {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
}
|
||||
|
||||
@DeleteMapping("/{id}")
|
||||
public ResponseEntity<Void> delete(@PathVariable UUID id,
|
||||
@AuthenticationPrincipal Jwt jwt) {
|
||||
UUID actorId = resolveActorId(jwt);
|
||||
try {
|
||||
vendorTenantService.delete(id, actorId);
|
||||
return ResponseEntity.noContent().build();
|
||||
} catch (IllegalArgumentException e) {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
}
|
||||
|
||||
@PostMapping("/{id}/license")
|
||||
public ResponseEntity<LicenseResponse> renewLicense(@PathVariable UUID id,
|
||||
@AuthenticationPrincipal Jwt jwt) {
|
||||
UUID actorId = resolveActorId(jwt);
|
||||
try {
|
||||
var license = vendorTenantService.renewLicense(id, actorId);
|
||||
return ResponseEntity.ok(LicenseResponse.from(license));
|
||||
} catch (IllegalArgumentException e) {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
}
|
||||
|
||||
@GetMapping("/{id}/health")
|
||||
public ResponseEntity<ServerHealthResponse> health(@PathVariable UUID id) {
|
||||
return vendorTenantService.getById(id)
|
||||
.map(tenant -> ResponseEntity.ok(vendorTenantService.getServerHealth(tenant)))
|
||||
.orElse(ResponseEntity.notFound().build());
|
||||
}
|
||||
|
||||
// --- Helpers ---
|
||||
|
||||
private UUID resolveActorId(Jwt jwt) {
|
||||
try {
|
||||
return UUID.fromString(jwt.getSubject());
|
||||
} catch (Exception e) {
|
||||
return UUID.nameUUIDFromBytes(jwt.getSubject().getBytes());
|
||||
}
|
||||
}
|
||||
}
|
||||
374
src/main/java/net/siegeln/cameleer/saas/vendor/VendorTenantService.java
vendored
Normal file
374
src/main/java/net/siegeln/cameleer/saas/vendor/VendorTenantService.java
vendored
Normal file
@@ -0,0 +1,374 @@
|
||||
package net.siegeln.cameleer.saas.vendor;
|
||||
|
||||
import net.siegeln.cameleer.saas.audit.AuditAction;
|
||||
import net.siegeln.cameleer.saas.audit.AuditService;
|
||||
import net.siegeln.cameleer.saas.identity.LogtoConfig;
|
||||
import net.siegeln.cameleer.saas.identity.LogtoManagementClient;
|
||||
import net.siegeln.cameleer.saas.identity.ServerApiClient;
|
||||
import net.siegeln.cameleer.saas.provisioning.ProvisioningProperties;
|
||||
import net.siegeln.cameleer.saas.provisioning.TenantDatabaseService;
|
||||
import net.siegeln.cameleer.saas.provisioning.TenantDataCleanupService;
|
||||
import net.siegeln.cameleer.saas.identity.ServerApiClient.ServerHealthResponse;
|
||||
import net.siegeln.cameleer.saas.license.LicenseEntity;
|
||||
import net.siegeln.cameleer.saas.license.LicenseService;
|
||||
import net.siegeln.cameleer.saas.provisioning.ProvisionResult;
|
||||
import net.siegeln.cameleer.saas.provisioning.ServerStatus;
|
||||
import net.siegeln.cameleer.saas.provisioning.TenantProvisionRequest;
|
||||
import net.siegeln.cameleer.saas.provisioning.TenantProvisioner;
|
||||
import net.siegeln.cameleer.saas.tenant.TenantEntity;
|
||||
import net.siegeln.cameleer.saas.tenant.TenantRepository;
|
||||
import net.siegeln.cameleer.saas.tenant.TenantService;
|
||||
import net.siegeln.cameleer.saas.tenant.TenantStatus;
|
||||
import net.siegeln.cameleer.saas.tenant.dto.CreateTenantRequest;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.scheduling.annotation.Async;
|
||||
import org.springframework.stereotype.Service;
|
||||
import org.springframework.transaction.annotation.Transactional;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.UUID;
|
||||
|
||||
@Service
|
||||
public class VendorTenantService {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(VendorTenantService.class);
|
||||
private static final Duration DEFAULT_LICENSE_VALIDITY = Duration.ofDays(365);
|
||||
|
||||
private final TenantService tenantService;
|
||||
private final TenantRepository tenantRepository;
|
||||
private final LicenseService licenseService;
|
||||
private final TenantProvisioner tenantProvisioner;
|
||||
private final ServerApiClient serverApiClient;
|
||||
private final LogtoManagementClient logtoClient;
|
||||
private final LogtoConfig logtoConfig;
|
||||
private final AuditService auditService;
|
||||
private final ProvisioningProperties provisioningProps;
|
||||
private final TenantDataCleanupService dataCleanupService;
|
||||
private final TenantDatabaseService tenantDatabaseService;
|
||||
|
||||
public VendorTenantService(TenantService tenantService,
|
||||
TenantRepository tenantRepository,
|
||||
LicenseService licenseService,
|
||||
TenantProvisioner tenantProvisioner,
|
||||
ServerApiClient serverApiClient,
|
||||
LogtoManagementClient logtoClient,
|
||||
LogtoConfig logtoConfig,
|
||||
AuditService auditService,
|
||||
ProvisioningProperties provisioningProps,
|
||||
TenantDataCleanupService dataCleanupService,
|
||||
TenantDatabaseService tenantDatabaseService) {
|
||||
this.tenantService = tenantService;
|
||||
this.tenantRepository = tenantRepository;
|
||||
this.licenseService = licenseService;
|
||||
this.tenantProvisioner = tenantProvisioner;
|
||||
this.serverApiClient = serverApiClient;
|
||||
this.logtoClient = logtoClient;
|
||||
this.logtoConfig = logtoConfig;
|
||||
this.auditService = auditService;
|
||||
this.provisioningProps = provisioningProps;
|
||||
this.dataCleanupService = dataCleanupService;
|
||||
this.tenantDatabaseService = tenantDatabaseService;
|
||||
}
|
||||
|
||||
@Transactional
|
||||
public TenantEntity createAndProvision(CreateTenantRequest request, UUID actorId) {
|
||||
// 1. Create tenant record (sets status = PROVISIONING) + Logto org
|
||||
TenantEntity tenant = tenantService.create(request, actorId);
|
||||
|
||||
// 2. Create initial admin user in Logto org (if credentials provided)
|
||||
if (tenant.getLogtoOrgId() != null && logtoClient.isAvailable()) {
|
||||
String ownerRoleId = logtoClient.findOrgRoleIdByName("owner");
|
||||
|
||||
// Create tenant admin
|
||||
if (request.adminUsername() != null && request.adminPassword() != null) {
|
||||
try {
|
||||
logtoClient.createUserWithPassword(
|
||||
request.adminUsername(), request.adminPassword(),
|
||||
tenant.getLogtoOrgId(), ownerRoleId);
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to create admin user for tenant {}: {}", tenant.getSlug(), e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
// Register OIDC redirect URIs for the tenant's server in the Traditional Web App
|
||||
String tradAppId = logtoConfig.getTradAppId();
|
||||
if (tradAppId != null) {
|
||||
String base = provisioningProps.publicProtocol() + "://" + provisioningProps.publicHost();
|
||||
String slug = tenant.getSlug();
|
||||
logtoClient.addAppRedirectUris(tradAppId,
|
||||
List.of(base + "/t/" + slug + "/oidc/callback"),
|
||||
List.of(base + "/t/" + slug, base + "/t/" + slug + "/login?local"));
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Generate license
|
||||
LicenseEntity license = licenseService.generateLicense(tenant, DEFAULT_LICENSE_VALIDITY, actorId);
|
||||
|
||||
auditService.log(actorId, null, tenant.getId(),
|
||||
AuditAction.TENANT_CREATE, "provision:" + tenant.getSlug(),
|
||||
null, null, "SUCCESS", null);
|
||||
|
||||
// 4. Provision server asynchronously (Docker containers, health check, config push)
|
||||
if (tenantProvisioner.isAvailable()) {
|
||||
provisionAsync(tenant.getId(), tenant.getSlug(), tenant.getTier().name(), license.getToken(), actorId);
|
||||
}
|
||||
|
||||
return tenant;
|
||||
}
|
||||
|
||||
@Async
|
||||
public void provisionAsync(UUID tenantId, String slug, String tier, String licenseToken, UUID actorId) {
|
||||
try {
|
||||
// Create per-tenant PG user + schema
|
||||
String dbPassword = java.util.UUID.randomUUID().toString().replace("-", "")
|
||||
+ java.util.UUID.randomUUID().toString().replace("-", "").substring(0, 8);
|
||||
try {
|
||||
tenantDatabaseService.createTenantDatabase(slug, dbPassword);
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to create tenant database for {}: {}", slug, e.getMessage(), e);
|
||||
tenantRepository.findById(tenantId).ifPresent(t -> {
|
||||
t.setProvisionError("Database setup failed: " + e.getMessage());
|
||||
tenantRepository.save(t);
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Store DB password on entity
|
||||
TenantEntity tenantForDb = tenantRepository.findById(tenantId).orElse(null);
|
||||
if (tenantForDb == null) {
|
||||
log.error("Tenant {} disappeared during provisioning", slug);
|
||||
return;
|
||||
}
|
||||
tenantForDb.setDbPassword(dbPassword);
|
||||
tenantRepository.save(tenantForDb);
|
||||
|
||||
var provisionRequest = new TenantProvisionRequest(tenantId, slug, tier, licenseToken, dbPassword);
|
||||
ProvisionResult result = tenantProvisioner.provision(provisionRequest);
|
||||
|
||||
TenantEntity tenant = tenantRepository.findById(tenantId).orElse(null);
|
||||
if (tenant == null) {
|
||||
log.error("Tenant {} disappeared during provisioning", slug);
|
||||
return;
|
||||
}
|
||||
|
||||
if (result.success()) {
|
||||
tenant.setServerEndpoint(result.serverEndpoint());
|
||||
tenant.setProvisionError(null);
|
||||
tenant.setStatus(TenantStatus.ACTIVE);
|
||||
tenantRepository.save(tenant);
|
||||
|
||||
// Push license to newly provisioned server
|
||||
try {
|
||||
serverApiClient.pushLicense(result.serverEndpoint(), licenseToken);
|
||||
} catch (Exception e) {
|
||||
log.warn("License push failed for tenant {}: {}", slug, e.getMessage());
|
||||
}
|
||||
|
||||
// Configure OIDC on the provisioned server (SSO via Logto)
|
||||
if (logtoConfig.getTradAppId() != null && logtoConfig.getTradAppSecret() != null) {
|
||||
try {
|
||||
String publicBase = provisioningProps.publicProtocol() + "://" + provisioningProps.publicHost();
|
||||
serverApiClient.pushOidcConfig(result.serverEndpoint(), Map.of(
|
||||
"enabled", true,
|
||||
"issuerUri", publicBase + "/oidc",
|
||||
"clientId", logtoConfig.getTradAppId(),
|
||||
"clientSecret", logtoConfig.getTradAppSecret(),
|
||||
"autoSignup", true,
|
||||
"defaultRoles", List.of("VIEWER"),
|
||||
"displayNameClaim", "name",
|
||||
"rolesClaim", "roles",
|
||||
"audience", "https://api.cameleer.local"
|
||||
));
|
||||
log.info("Pushed OIDC config to server for tenant {}", slug);
|
||||
} catch (Exception e) {
|
||||
log.warn("OIDC config push failed for tenant {}: {}", slug, e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
log.info("Tenant {} provisioned successfully", slug);
|
||||
} else {
|
||||
tenant.setProvisionError(result.error());
|
||||
tenantRepository.save(tenant);
|
||||
log.error("Provisioning failed for tenant {}: {}", slug, result.error());
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.error("Unexpected error during async provisioning of tenant {}: {}", slug, e.getMessage(), e);
|
||||
tenantRepository.findById(tenantId).ifPresent(t -> {
|
||||
t.setProvisionError(e.getMessage());
|
||||
tenantRepository.save(t);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
public ServerApiClient getServerApiClient() {
|
||||
return serverApiClient;
|
||||
}
|
||||
|
||||
public List<TenantEntity> listAll() {
|
||||
return tenantService.findAll().stream()
|
||||
.filter(t -> t.getStatus() != TenantStatus.DELETED)
|
||||
.toList();
|
||||
}
|
||||
|
||||
public Optional<TenantEntity> getById(UUID id) {
|
||||
return tenantService.getById(id);
|
||||
}
|
||||
|
||||
public Optional<LicenseEntity> getLicenseForTenant(UUID tenantId) {
|
||||
return licenseService.getActiveLicense(tenantId);
|
||||
}
|
||||
|
||||
public ServerStatus getServerStatus(TenantEntity tenant) {
|
||||
if (!tenantProvisioner.isAvailable()) {
|
||||
return ServerStatus.notFound();
|
||||
}
|
||||
return tenantProvisioner.getStatus(tenant.getSlug());
|
||||
}
|
||||
|
||||
public ServerHealthResponse getServerHealth(TenantEntity tenant) {
|
||||
String endpoint = tenant.getServerEndpoint();
|
||||
if (endpoint == null || endpoint.isBlank()) {
|
||||
return new ServerHealthResponse(false, "NO_ENDPOINT");
|
||||
}
|
||||
return serverApiClient.getHealth(endpoint);
|
||||
}
|
||||
|
||||
public void restartServer(UUID tenantId) {
|
||||
TenantEntity tenant = tenantService.getById(tenantId)
|
||||
.orElseThrow(() -> new IllegalArgumentException("Tenant not found"));
|
||||
if (!tenantProvisioner.isAvailable()) return;
|
||||
|
||||
tenantProvisioner.stop(tenant.getSlug());
|
||||
try {
|
||||
tenantProvisioner.start(tenant.getSlug());
|
||||
} catch (RuntimeException e) {
|
||||
if (e.getMessage() != null && e.getMessage().contains("re-provision required")) {
|
||||
log.info("Containers missing for '{}' — re-provisioning", tenant.getSlug());
|
||||
tenantProvisioner.remove(tenant.getSlug());
|
||||
var license = licenseService.getActiveLicense(tenantId).orElse(null);
|
||||
String token = license != null ? license.getToken() : "";
|
||||
provisionAsync(tenantId, tenant.getSlug(), tenant.getTier().name(), token, null);
|
||||
return;
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
public void upgradeServer(UUID tenantId) {
|
||||
TenantEntity tenant = tenantService.getById(tenantId)
|
||||
.orElseThrow(() -> new IllegalArgumentException("Tenant not found"));
|
||||
if (!tenantProvisioner.isAvailable()) return;
|
||||
|
||||
tenantProvisioner.upgrade(tenant.getSlug());
|
||||
|
||||
// Re-provision with freshly pulled images
|
||||
var license = licenseService.getActiveLicense(tenantId).orElse(null);
|
||||
String token = license != null ? license.getToken() : "";
|
||||
provisionAsync(tenantId, tenant.getSlug(), tenant.getTier().name(), token, null);
|
||||
}
|
||||
|
||||
@Transactional
|
||||
public TenantEntity suspend(UUID tenantId, UUID actorId) {
|
||||
TenantEntity tenant = tenantService.getById(tenantId)
|
||||
.orElseThrow(() -> new IllegalArgumentException("Tenant not found"));
|
||||
|
||||
if (tenantProvisioner.isAvailable()) {
|
||||
try {
|
||||
tenantProvisioner.stop(tenant.getSlug());
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to stop containers for tenant {}: {}", tenant.getSlug(), e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
return tenantService.suspend(tenantId, actorId);
|
||||
}
|
||||
|
||||
@Transactional
|
||||
public TenantEntity activate(UUID tenantId, UUID actorId) {
|
||||
TenantEntity tenant = tenantService.getById(tenantId)
|
||||
.orElseThrow(() -> new IllegalArgumentException("Tenant not found"));
|
||||
|
||||
if (tenantProvisioner.isAvailable()) {
|
||||
try {
|
||||
tenantProvisioner.start(tenant.getSlug());
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to start containers for tenant {}: {}", tenant.getSlug(), e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
return tenantService.activate(tenantId, actorId);
|
||||
}
|
||||
|
||||
@Transactional
|
||||
public void delete(UUID tenantId, UUID actorId) {
|
||||
TenantEntity tenant = tenantService.getById(tenantId)
|
||||
.orElseThrow(() -> new IllegalArgumentException("Tenant not found"));
|
||||
|
||||
// Remove containers
|
||||
if (tenantProvisioner.isAvailable()) {
|
||||
try {
|
||||
tenantProvisioner.remove(tenant.getSlug());
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to remove containers for tenant {}: {}", tenant.getSlug(), e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
// Revoke license
|
||||
licenseService.revokeLicense(tenantId, actorId);
|
||||
|
||||
// Delete Logto org
|
||||
if (logtoClient.isAvailable() && tenant.getLogtoOrgId() != null) {
|
||||
try {
|
||||
logtoClient.deleteOrganization(tenant.getLogtoOrgId());
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to delete Logto org for tenant {}: {}", tenant.getSlug(), e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
// Drop per-tenant PG user + database
|
||||
try {
|
||||
tenantDatabaseService.dropTenantDatabase(tenant.getSlug());
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to drop tenant database for {}: {}", tenant.getSlug(), e.getMessage());
|
||||
}
|
||||
|
||||
// Erase tenant data from ClickHouse (GDPR)
|
||||
dataCleanupService.cleanupClickHouse(tenant.getSlug());
|
||||
|
||||
// Soft-delete
|
||||
tenant.setStatus(TenantStatus.DELETED);
|
||||
tenantRepository.save(tenant);
|
||||
|
||||
auditService.log(actorId, null, tenantId,
|
||||
AuditAction.TENANT_DELETE, tenant.getSlug(),
|
||||
null, null, "SUCCESS", null);
|
||||
}
|
||||
|
||||
@Transactional
|
||||
public LicenseEntity renewLicense(UUID tenantId, UUID actorId) {
|
||||
TenantEntity tenant = tenantService.getById(tenantId)
|
||||
.orElseThrow(() -> new IllegalArgumentException("Tenant not found"));
|
||||
|
||||
// Revoke current license
|
||||
licenseService.revokeLicense(tenantId, actorId);
|
||||
|
||||
// Generate new license
|
||||
LicenseEntity newLicense = licenseService.generateLicense(tenant, DEFAULT_LICENSE_VALIDITY, actorId);
|
||||
|
||||
// Push to server
|
||||
String endpoint = tenant.getServerEndpoint();
|
||||
if (endpoint != null && !endpoint.isBlank()) {
|
||||
try {
|
||||
serverApiClient.pushLicense(endpoint, newLicense.getToken());
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to push renewed license to server for tenant {}: {}", tenant.getSlug(), e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
return newLicense;
|
||||
}
|
||||
}
|
||||
@@ -17,7 +17,9 @@ spring:
|
||||
jwk-set-uri: http://localhost:3001/oidc/jwks
|
||||
|
||||
cameleer:
|
||||
clickhouse:
|
||||
url: jdbc:clickhouse://localhost:8123/cameleer
|
||||
runtime:
|
||||
cameleer3-server-endpoint: http://localhost:8081
|
||||
saas:
|
||||
identity:
|
||||
logtoendpoint: http://localhost:3001
|
||||
serverendpoint: http://localhost:8081
|
||||
provisioning:
|
||||
clickhouseurl: jdbc:clickhouse://localhost:8123/cameleer
|
||||
|
||||
@@ -6,7 +6,7 @@ spring:
|
||||
application:
|
||||
name: cameleer-saas
|
||||
datasource:
|
||||
url: ${SPRING_DATASOURCE_URL:jdbc:postgresql://postgres:5432/cameleer_saas}
|
||||
url: ${SPRING_DATASOURCE_URL:jdbc:postgresql://cameleer-postgres:5432/cameleer_saas}
|
||||
username: ${SPRING_DATASOURCE_USERNAME:cameleer}
|
||||
password: ${SPRING_DATASOURCE_PASSWORD:cameleer_dev}
|
||||
jpa:
|
||||
@@ -20,8 +20,8 @@ spring:
|
||||
oauth2:
|
||||
resourceserver:
|
||||
jwt:
|
||||
issuer-uri: ${LOGTO_ISSUER_URI:}
|
||||
jwk-set-uri: ${LOGTO_JWK_SET_URI:}
|
||||
issuer-uri: ${cameleer.saas.provisioning.publicprotocol:https}://${cameleer.saas.provisioning.publichost:localhost}/oidc
|
||||
jwk-set-uri: ${cameleer.saas.identity.logtoendpoint:http://cameleer-logto:3001}/oidc/jwks
|
||||
|
||||
management:
|
||||
endpoints:
|
||||
@@ -33,11 +33,30 @@ management:
|
||||
show-details: when-authorized
|
||||
|
||||
cameleer:
|
||||
identity:
|
||||
logto-endpoint: ${LOGTO_ENDPOINT:}
|
||||
logto-public-endpoint: ${LOGTO_PUBLIC_ENDPOINT:}
|
||||
m2m-client-id: ${LOGTO_M2M_CLIENT_ID:}
|
||||
m2m-client-secret: ${LOGTO_M2M_CLIENT_SECRET:}
|
||||
spa-client-id: ${LOGTO_SPA_CLIENT_ID:}
|
||||
audience: ${CAMELEER_OIDC_AUDIENCE:https://api.cameleer.local}
|
||||
server-endpoint: ${CAMELEER3_SERVER_ENDPOINT:http://cameleer3-server:8081}
|
||||
saas:
|
||||
identity:
|
||||
logtoendpoint: ${CAMELEER_SAAS_IDENTITY_LOGTOENDPOINT:}
|
||||
logtopublicendpoint: ${CAMELEER_SAAS_IDENTITY_LOGTOPUBLICENDPOINT:}
|
||||
m2mclientid: ${CAMELEER_SAAS_IDENTITY_M2MCLIENTID:}
|
||||
m2mclientsecret: ${CAMELEER_SAAS_IDENTITY_M2MCLIENTSECRET:}
|
||||
spaclientid: ${CAMELEER_SAAS_IDENTITY_SPACLIENTID:}
|
||||
audience: ${CAMELEER_SAAS_IDENTITY_AUDIENCE:https://api.cameleer.local}
|
||||
serverendpoint: ${CAMELEER_SAAS_IDENTITY_SERVERENDPOINT:http://cameleer3-server:8081}
|
||||
provisioning:
|
||||
serverimage: ${CAMELEER_SAAS_PROVISIONING_SERVERIMAGE:gitea.siegeln.net/cameleer/cameleer3-server:latest}
|
||||
serveruiimage: ${CAMELEER_SAAS_PROVISIONING_SERVERUIIMAGE:gitea.siegeln.net/cameleer/cameleer3-server-ui:latest}
|
||||
networkname: ${CAMELEER_SAAS_PROVISIONING_NETWORKNAME:cameleer-saas_cameleer}
|
||||
traefiknetwork: ${CAMELEER_SAAS_PROVISIONING_TRAEFIKNETWORK:cameleer-traefik}
|
||||
publichost: ${CAMELEER_SAAS_PROVISIONING_PUBLICHOST:localhost}
|
||||
publicprotocol: ${CAMELEER_SAAS_PROVISIONING_PUBLICPROTOCOL:https}
|
||||
datasourceurl: ${CAMELEER_SAAS_PROVISIONING_DATASOURCEURL:jdbc:postgresql://cameleer-postgres:5432/cameleer3}
|
||||
datasourceusername: ${CAMELEER_SAAS_PROVISIONING_DATASOURCEUSERNAME:${POSTGRES_USER:cameleer}}
|
||||
datasourcepassword: ${CAMELEER_SAAS_PROVISIONING_DATASOURCEPASSWORD:${POSTGRES_PASSWORD:cameleer_dev}}
|
||||
clickhouseurl: ${CAMELEER_SAAS_PROVISIONING_CLICKHOUSEURL:jdbc:clickhouse://cameleer-clickhouse:8123/cameleer}
|
||||
clickhouseuser: ${CAMELEER_SAAS_PROVISIONING_CLICKHOUSEUSER:default}
|
||||
clickhousepassword: ${CAMELEER_SAAS_PROVISIONING_CLICKHOUSEPASSWORD:${CLICKHOUSE_PASSWORD:cameleer_ch}}
|
||||
oidcissueruri: ${cameleer.saas.provisioning.publicprotocol}://${cameleer.saas.provisioning.publichost}/oidc
|
||||
oidcjwkseturi: http://cameleer-logto:3001/oidc/jwks
|
||||
corsorigins: ${cameleer.saas.provisioning.publicprotocol}://${cameleer.saas.provisioning.publichost}
|
||||
certs:
|
||||
path: ${CAMELEER_SAAS_CERTS_PATH:/certs}
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
CREATE TABLE tenants (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
name VARCHAR(255) NOT NULL,
|
||||
slug VARCHAR(100) NOT NULL UNIQUE,
|
||||
tier VARCHAR(20) NOT NULL DEFAULT 'LOW',
|
||||
status VARCHAR(20) NOT NULL DEFAULT 'PROVISIONING',
|
||||
logto_org_id VARCHAR(255),
|
||||
stripe_customer_id VARCHAR(255),
|
||||
stripe_subscription_id VARCHAR(255),
|
||||
settings JSONB NOT NULL DEFAULT '{}',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX idx_tenants_slug ON tenants (slug);
|
||||
CREATE INDEX idx_tenants_status ON tenants (status);
|
||||
CREATE INDEX idx_tenants_logto_org_id ON tenants (logto_org_id);
|
||||
95
src/main/resources/db/migration/V001__init.sql
Normal file
95
src/main/resources/db/migration/V001__init.sql
Normal file
@@ -0,0 +1,95 @@
|
||||
-- Cameleer SaaS schema baseline
|
||||
-- Consolidated from V001-V015
|
||||
|
||||
-- Tenants
|
||||
CREATE TABLE tenants (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
name VARCHAR(255) NOT NULL,
|
||||
slug VARCHAR(100) NOT NULL,
|
||||
tier VARCHAR(20) NOT NULL DEFAULT 'LOW',
|
||||
status VARCHAR(20) NOT NULL DEFAULT 'PROVISIONING',
|
||||
logto_org_id VARCHAR(255),
|
||||
stripe_customer_id VARCHAR(255),
|
||||
stripe_subscription_id VARCHAR(255),
|
||||
settings JSONB NOT NULL DEFAULT '{}',
|
||||
server_endpoint VARCHAR(512),
|
||||
provision_error TEXT,
|
||||
db_password VARCHAR(255),
|
||||
ca_applied_at TIMESTAMPTZ,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX tenants_slug_active_key ON tenants (slug) WHERE status != 'DELETED';
|
||||
CREATE INDEX idx_tenants_status ON tenants (status);
|
||||
CREATE INDEX idx_tenants_logto_org_id ON tenants (logto_org_id);
|
||||
|
||||
-- Licenses
|
||||
CREATE TABLE licenses (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
|
||||
tier VARCHAR(20) NOT NULL,
|
||||
features JSONB NOT NULL DEFAULT '{}',
|
||||
limits JSONB NOT NULL DEFAULT '{}',
|
||||
issued_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
expires_at TIMESTAMPTZ NOT NULL,
|
||||
revoked_at TIMESTAMPTZ,
|
||||
token TEXT NOT NULL,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX idx_licenses_tenant_id ON licenses (tenant_id);
|
||||
CREATE INDEX idx_licenses_expires_at ON licenses (expires_at);
|
||||
|
||||
-- Audit log
|
||||
CREATE TABLE audit_log (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
actor_id UUID,
|
||||
actor_email VARCHAR(255),
|
||||
tenant_id UUID,
|
||||
action VARCHAR(100) NOT NULL,
|
||||
resource VARCHAR(500),
|
||||
environment VARCHAR(50),
|
||||
source_ip VARCHAR(45),
|
||||
result VARCHAR(20) NOT NULL DEFAULT 'SUCCESS',
|
||||
metadata JSONB,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now()
|
||||
);
|
||||
|
||||
CREATE INDEX idx_audit_log_tenant ON audit_log (tenant_id, created_at DESC);
|
||||
CREATE INDEX idx_audit_log_actor ON audit_log (actor_id, created_at DESC);
|
||||
CREATE INDEX idx_audit_log_action ON audit_log (action, created_at DESC);
|
||||
|
||||
-- Platform TLS certificates
|
||||
CREATE TABLE certificates (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
status VARCHAR(10) NOT NULL CHECK (status IN ('ACTIVE', 'STAGED', 'ARCHIVED')),
|
||||
subject VARCHAR(500),
|
||||
issuer VARCHAR(500),
|
||||
not_before TIMESTAMPTZ,
|
||||
not_after TIMESTAMPTZ,
|
||||
fingerprint VARCHAR(128),
|
||||
has_ca BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
self_signed BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
uploaded_by UUID,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
activated_at TIMESTAMPTZ,
|
||||
archived_at TIMESTAMPTZ
|
||||
);
|
||||
|
||||
-- Per-tenant CA certificates
|
||||
CREATE TABLE tenant_ca_certs (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
|
||||
status VARCHAR(10) NOT NULL CHECK (status IN ('ACTIVE', 'STAGED')),
|
||||
label VARCHAR(200),
|
||||
subject VARCHAR(500),
|
||||
issuer VARCHAR(500),
|
||||
fingerprint VARCHAR(128),
|
||||
not_before TIMESTAMPTZ,
|
||||
not_after TIMESTAMPTZ,
|
||||
cert_pem TEXT NOT NULL,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now()
|
||||
);
|
||||
|
||||
CREATE INDEX idx_tenant_ca_certs_tenant ON tenant_ca_certs(tenant_id);
|
||||
@@ -1,15 +0,0 @@
|
||||
CREATE TABLE licenses (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
|
||||
tier VARCHAR(20) NOT NULL,
|
||||
features JSONB NOT NULL DEFAULT '{}',
|
||||
limits JSONB NOT NULL DEFAULT '{}',
|
||||
issued_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
expires_at TIMESTAMPTZ NOT NULL,
|
||||
revoked_at TIMESTAMPTZ,
|
||||
token TEXT NOT NULL,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
||||
);
|
||||
|
||||
CREATE INDEX idx_licenses_tenant_id ON licenses (tenant_id);
|
||||
CREATE INDEX idx_licenses_expires_at ON licenses (expires_at);
|
||||
@@ -1,12 +0,0 @@
|
||||
CREATE TABLE environments (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
tenant_id UUID NOT NULL REFERENCES tenants(id) ON DELETE CASCADE,
|
||||
slug VARCHAR(100) NOT NULL,
|
||||
display_name VARCHAR(255) NOT NULL,
|
||||
status VARCHAR(20) NOT NULL DEFAULT 'ACTIVE',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
UNIQUE(tenant_id, slug)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_environments_tenant_id ON environments(tenant_id);
|
||||
@@ -1,12 +0,0 @@
|
||||
CREATE TABLE api_keys (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
environment_id UUID NOT NULL REFERENCES environments(id) ON DELETE CASCADE,
|
||||
key_hash VARCHAR(64) NOT NULL,
|
||||
key_prefix VARCHAR(12) NOT NULL,
|
||||
status VARCHAR(20) NOT NULL DEFAULT 'ACTIVE',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
revoked_at TIMESTAMPTZ
|
||||
);
|
||||
|
||||
CREATE INDEX idx_api_keys_env ON api_keys(environment_id);
|
||||
CREATE INDEX idx_api_keys_hash ON api_keys(key_hash);
|
||||
@@ -1,18 +0,0 @@
|
||||
CREATE TABLE apps (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
environment_id UUID NOT NULL REFERENCES environments(id) ON DELETE CASCADE,
|
||||
slug VARCHAR(100) NOT NULL,
|
||||
display_name VARCHAR(255) NOT NULL,
|
||||
jar_storage_path VARCHAR(500),
|
||||
jar_checksum VARCHAR(64),
|
||||
jar_original_filename VARCHAR(255),
|
||||
jar_size_bytes BIGINT,
|
||||
exposed_port INTEGER,
|
||||
current_deployment_id UUID,
|
||||
previous_deployment_id UUID,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
UNIQUE(environment_id, slug)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_apps_environment_id ON apps(environment_id);
|
||||
@@ -1,16 +0,0 @@
|
||||
CREATE TABLE deployments (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
app_id UUID NOT NULL REFERENCES apps(id) ON DELETE CASCADE,
|
||||
version INTEGER NOT NULL,
|
||||
image_ref VARCHAR(500) NOT NULL,
|
||||
desired_status VARCHAR(20) NOT NULL DEFAULT 'RUNNING',
|
||||
observed_status VARCHAR(20) NOT NULL DEFAULT 'BUILDING',
|
||||
orchestrator_metadata JSONB DEFAULT '{}',
|
||||
error_message TEXT,
|
||||
deployed_at TIMESTAMPTZ,
|
||||
stopped_at TIMESTAMPTZ,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
UNIQUE(app_id, version)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_deployments_app_id ON deployments(app_id);
|
||||
@@ -1,17 +0,0 @@
|
||||
CREATE TABLE audit_log (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
actor_id UUID,
|
||||
actor_email VARCHAR(255),
|
||||
tenant_id UUID,
|
||||
action VARCHAR(100) NOT NULL,
|
||||
resource VARCHAR(500),
|
||||
environment VARCHAR(50),
|
||||
source_ip VARCHAR(45),
|
||||
result VARCHAR(20) NOT NULL DEFAULT 'SUCCESS',
|
||||
metadata JSONB,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now()
|
||||
);
|
||||
|
||||
CREATE INDEX idx_audit_log_tenant ON audit_log (tenant_id, created_at DESC);
|
||||
CREATE INDEX idx_audit_log_actor ON audit_log (actor_id, created_at DESC);
|
||||
CREATE INDEX idx_audit_log_action ON audit_log (action, created_at DESC);
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user