Compare commits
109 Commits
0fc9c8cb4c
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1809574fe6 | ||
|
|
858975f03f | ||
|
|
30db609aff | ||
|
|
45b5f473c9 | ||
|
|
71688dea16 | ||
|
|
b63b9aa4bb | ||
|
|
7565cdcf2f | ||
|
|
b7d390adf4 | ||
|
|
29769480be | ||
|
|
657281461d | ||
|
|
af53eca7f6 | ||
|
|
4f6e7ea4dc | ||
| 96fc55b932 | |||
|
|
cd92036f91 | ||
|
|
2f7c6aa005 | ||
|
|
f945d10d48 | ||
|
|
ddb18c4f17 | ||
|
|
f1aa1ea19f | ||
|
|
a3c0e9aa7f | ||
|
|
5216dab043 | ||
|
|
5864553fed | ||
|
|
140ea88460 | ||
|
|
581dc1ad13 | ||
|
|
e198c13e8a | ||
|
|
1e78439ddd | ||
|
|
1a307da6b2 | ||
|
|
885f2be16b | ||
|
|
945ecd78cf | ||
|
|
3f69e546e4 | ||
|
|
340d954fed | ||
|
|
484a55f4f4 | ||
|
|
cc5d88d708 | ||
|
|
046f08fe87 | ||
|
|
56bddcc747 | ||
|
|
71f3b70b86 | ||
|
|
5a579415a1 | ||
|
|
1ff30905f7 | ||
|
|
afdaee628b | ||
|
|
80dafe685b | ||
|
|
198811b752 | ||
|
|
8a64a9e04c | ||
|
|
f291d7c24d | ||
|
|
9b9b56043c | ||
|
|
4985348827 | ||
|
|
e98d790874 | ||
|
|
2bad9c3e48 | ||
|
|
6f658b6648 | ||
|
|
b95e80a24a | ||
|
|
6fbcf10ee4 | ||
|
|
2f75b2865b | ||
|
|
2e51deb511 | ||
|
|
20aefd5bf6 | ||
|
|
f6657f811b | ||
|
|
7300424a49 | ||
|
|
1ae5a1a27e | ||
|
|
896b7e6e91 | ||
|
|
0499a54ebc | ||
|
|
ddc0b686c3 | ||
|
|
cf84d80de7 | ||
|
|
2ebe4989bb | ||
|
|
551a7f12b5 | ||
|
|
ec51aef802 | ||
|
|
e0be6a069f | ||
|
|
0e512a3c0c | ||
|
|
f6b76b2d5e | ||
|
|
8e9ad47077 | ||
|
|
c5b6f2bbad | ||
| 83c3ac3ef3 | |||
| 7dd7317cb8 | |||
| 2654271494 | |||
|
|
888f589934 | ||
|
|
9aad2f3871 | ||
|
|
cbaac2bfa5 | ||
|
|
7529a9ce99 | ||
|
|
09309de982 | ||
|
|
56c41814fc | ||
|
|
68704e15b4 | ||
|
|
510206c752 | ||
|
|
58e9695b4c | ||
|
|
f27a0044f1 | ||
|
|
5c9323cfed | ||
|
|
2dcbd5a772 | ||
|
|
f9b5f235cc | ||
|
|
0b419db9f1 | ||
|
|
5f6f9e523d | ||
|
|
35319dc666 | ||
|
|
3c2409ed6e | ||
|
|
ca401363ec | ||
|
|
b5ee9e1d1f | ||
|
|
75a41929c4 | ||
|
|
d58c8cde2e | ||
|
|
64608a7677 | ||
|
|
48ce75bf38 | ||
|
|
0bbe5d6623 | ||
|
|
e1ac896a6e | ||
|
|
58009d7c23 | ||
|
|
b799d55835 | ||
|
|
166568edea | ||
|
|
f049a0a6a0 | ||
|
|
f8e382c217 | ||
|
|
c7e5c7fa2d | ||
|
|
0995ab35c4 | ||
|
|
480a53c80c | ||
|
|
d3ce5e861b | ||
|
|
e5c8fff0f9 | ||
|
|
21db92ff00 | ||
|
|
165c9f10e3 | ||
|
|
ade1733418 | ||
|
|
0cf64b2928 |
@@ -57,14 +57,14 @@ Env-scoped read-path controllers (`AlertController`, `AlertRuleController`, `Ale
|
|||||||
- `DeploymentController` — `/api/v1/environments/{envSlug}/apps/{appSlug}/deployments`. GET list / POST create (body `{ appVersionId }`) / POST `{id}/stop` / POST `{id}/promote` (body `{ targetEnvironment: slug }` — target app slug must exist in target env) / GET `{id}/logs`. All lifecycle ops (`POST /` deploy, `POST /{id}/stop`, `POST /{id}/promote`) audited under `AuditCategory.DEPLOYMENT`. Action codes: `deploy_app`, `stop_deployment`, `promote_deployment`. Acting user resolved via the `user:` prefix-strip convention; both SUCCESS and FAILURE branches write audit rows. `created_by` (TEXT, nullable) populated from `SecurityContextHolder` and surfaced on the `Deployment` DTO.
|
- `DeploymentController` — `/api/v1/environments/{envSlug}/apps/{appSlug}/deployments`. GET list / POST create (body `{ appVersionId }`) / POST `{id}/stop` / POST `{id}/promote` (body `{ targetEnvironment: slug }` — target app slug must exist in target env) / GET `{id}/logs`. All lifecycle ops (`POST /` deploy, `POST /{id}/stop`, `POST /{id}/promote`) audited under `AuditCategory.DEPLOYMENT`. Action codes: `deploy_app`, `stop_deployment`, `promote_deployment`. Acting user resolved via the `user:` prefix-strip convention; both SUCCESS and FAILURE branches write audit rows. `created_by` (TEXT, nullable) populated from `SecurityContextHolder` and surfaced on the `Deployment` DTO.
|
||||||
- `ApplicationConfigController` — `/api/v1/environments/{envSlug}`. GET `/config` (list), GET/PUT `/apps/{appSlug}/config`, GET `/apps/{appSlug}/processor-routes`, POST `/apps/{appSlug}/config/test-expression`. PUT accepts `?apply=staged|live` (default `live`). `live` saves to DB and pushes `CONFIG_UPDATE` SSE to live agents in this env (existing behavior); `staged` saves to DB only, skipping the SSE push — used by the unified app deployment page. Audit action is `stage_app_config` for staged writes, `update_app_config` for live. Invalid `apply` values return 400.
|
- `ApplicationConfigController` — `/api/v1/environments/{envSlug}`. GET `/config` (list), GET/PUT `/apps/{appSlug}/config`, GET `/apps/{appSlug}/processor-routes`, POST `/apps/{appSlug}/config/test-expression`. PUT accepts `?apply=staged|live` (default `live`). `live` saves to DB and pushes `CONFIG_UPDATE` SSE to live agents in this env (existing behavior); `staged` saves to DB only, skipping the SSE push — used by the unified app deployment page. Audit action is `stage_app_config` for staged writes, `update_app_config` for live. Invalid `apply` values return 400.
|
||||||
- `AppSettingsController` — `/api/v1/environments/{envSlug}`. GET `/app-settings` (list), GET/PUT/DELETE `/apps/{appSlug}/settings`. ADMIN/OPERATOR only.
|
- `AppSettingsController` — `/api/v1/environments/{envSlug}`. GET `/app-settings` (list), GET/PUT/DELETE `/apps/{appSlug}/settings`. ADMIN/OPERATOR only.
|
||||||
- `SearchController` — `/api/v1/environments/{envSlug}`. GET `/executions`, POST `/executions/search`, GET `/stats`, `/stats/timeseries`, `/stats/timeseries/by-app`, `/stats/timeseries/by-route`, `/stats/punchcard`, `/attributes/keys`, `/errors/top`.
|
- `SearchController` — `/api/v1/environments/{envSlug}`. GET `/executions`, POST `/executions/search`, GET `/stats`, `/stats/timeseries`, `/stats/timeseries/by-app`, `/stats/timeseries/by-route`, `/stats/punchcard`, `/attributes/keys`, `/errors/top`. GET `/executions` accepts repeat `attr` query params: `attr=order` (key-exists), `attr=order:47` (exact), `attr=order:4*` (wildcard — `*` maps to SQL LIKE `%`). First `:` splits key/value; later colons stay in the value. Invalid keys → 400. POST `/executions/search` accepts the same filters via `SearchRequest.attributeFilters` in the body.
|
||||||
- `LogQueryController` — GET `/api/v1/environments/{envSlug}/logs` (filters: source (multi, comma-split, OR-joined), level (multi, comma-split, OR-joined), application, agentId, exchangeId, logger, q, time range, instanceIds (multi, comma-split, AND-joined as WHERE instance_id IN (...) — used by the Checkpoint detail drawer to scope logs to a deployment's replicas); sort asc/desc). Cursor-paginated, returns `{ data, nextCursor, hasMore, levelCounts }`; cursor is base64url of `"{timestampIso}|{insert_id_uuid}"` — same-millisecond tiebreak via the `insert_id` UUID column on `logs`.
|
- `LogQueryController` — GET `/api/v1/environments/{envSlug}/logs` (filters: source (multi, comma-split, OR-joined), level (multi, comma-split, OR-joined), application, agentId, exchangeId, logger, q, time range, instanceIds (multi, comma-split, AND-joined as WHERE instance_id IN (...) — used by the Checkpoint detail drawer to scope logs to a deployment's replicas); sort asc/desc). Cursor-paginated, returns `{ data, nextCursor, hasMore, levelCounts }`; cursor is base64url of `"{timestampIso}|{insert_id_uuid}"` — same-millisecond tiebreak via the `insert_id` UUID column on `logs`.
|
||||||
- `RouteCatalogController` — GET `/api/v1/environments/{envSlug}/routes` (merged route catalog from registry + ClickHouse; env filter unconditional).
|
- `RouteCatalogController` — GET `/api/v1/environments/{envSlug}/routes` (merged route catalog from registry + ClickHouse; env filter unconditional).
|
||||||
- `RouteMetricsController` — GET `/api/v1/environments/{envSlug}/routes/metrics`, GET `/api/v1/environments/{envSlug}/routes/metrics/processors`.
|
- `RouteMetricsController` — GET `/api/v1/environments/{envSlug}/routes/metrics`, GET `/api/v1/environments/{envSlug}/routes/metrics/processors`.
|
||||||
- `AgentListController` — GET `/api/v1/environments/{envSlug}/agents` (registered agents with runtime metrics, filtered to env).
|
- `AgentListController` — GET `/api/v1/environments/{envSlug}/agents` (registered agents with runtime metrics, filtered to env).
|
||||||
- `AgentEventsController` — GET `/api/v1/environments/{envSlug}/agents/events` (lifecycle events; cursor-paginated, returns `{ data, nextCursor, hasMore }`; order `(timestamp DESC, insert_id DESC)`; cursor is base64url of `"{timestampIso}|{insert_id_uuid}"` — `insert_id` is a stable UUID column used as a same-millisecond tiebreak).
|
- `AgentEventsController` — GET `/api/v1/environments/{envSlug}/agents/events` (lifecycle events; cursor-paginated, returns `{ data, nextCursor, hasMore }`; order `(timestamp DESC, insert_id DESC)`; cursor is base64url of `"{timestampIso}|{insert_id_uuid}"` — `insert_id` is a stable UUID column used as a same-millisecond tiebreak).
|
||||||
- `AgentMetricsController` — GET `/api/v1/environments/{envSlug}/agents/{agentId}/metrics` (JVM/Camel metrics). Rejects cross-env agents (404) as defence-in-depth.
|
- `AgentMetricsController` — GET `/api/v1/environments/{envSlug}/agents/{agentId}/metrics` (JVM/Camel metrics). Rejects cross-env agents (404) as defence-in-depth.
|
||||||
- `DiagramRenderController` — GET `/api/v1/environments/{envSlug}/apps/{appSlug}/routes/{routeId}/diagram` (env-scoped lookup). Also GET `/api/v1/diagrams/{contentHash}/render` (flat — content hashes are globally unique).
|
- `DiagramRenderController` — GET `/api/v1/environments/{envSlug}/apps/{appSlug}/routes/{routeId}/diagram` returns the most recent diagram for (app, env, route) via `DiagramStore.findLatestContentHashForAppRoute`. Registry-independent — routes whose publishing agents were removed still resolve. Also GET `/api/v1/diagrams/{contentHash}/render` (flat — content hashes are globally unique), the point-in-time path consumed by the exchange viewer via `ExecutionDetail.diagramContentHash`.
|
||||||
- `AlertRuleController` — `/api/v1/environments/{envSlug}/alerts/rules`. GET list / POST create / GET `{id}` / PUT `{id}` / DELETE `{id}` / POST `{id}/enable` / POST `{id}/disable` / POST `{id}/render-preview` / POST `{id}/test-evaluate`. OPERATOR+ for mutations, VIEWER+ for reads. CRITICAL: attribute keys in `ExchangeMatchCondition.filter.attributes` are validated at rule-save time against `^[a-zA-Z0-9._-]+$` — they are later inlined into ClickHouse SQL. `AgentLifecycleCondition` is allowlist-only — the `AgentLifecycleEventType` enum (REGISTERED / RE_REGISTERED / DEREGISTERED / WENT_STALE / WENT_DEAD / RECOVERED) plus the record compact ctor (non-empty `eventTypes`, `withinSeconds ≥ 1`) do the validation; custom agent-emitted event types are tracked in backlog issue #145. Webhook validation: verifies `outboundConnectionId` exists and `isAllowedInEnvironment`. Null notification templates default to `""` (NOT NULL constraint). Audit: `ALERT_RULE_CHANGE`.
|
- `AlertRuleController` — `/api/v1/environments/{envSlug}/alerts/rules`. GET list / POST create / GET `{id}` / PUT `{id}` / DELETE `{id}` / POST `{id}/enable` / POST `{id}/disable` / POST `{id}/render-preview` / POST `{id}/test-evaluate`. OPERATOR+ for mutations, VIEWER+ for reads. CRITICAL: attribute keys in `ExchangeMatchCondition.filter.attributes` are validated at rule-save time against `^[a-zA-Z0-9._-]+$` — they are later inlined into ClickHouse SQL. `AgentLifecycleCondition` is allowlist-only — the `AgentLifecycleEventType` enum (REGISTERED / RE_REGISTERED / DEREGISTERED / WENT_STALE / WENT_DEAD / RECOVERED) plus the record compact ctor (non-empty `eventTypes`, `withinSeconds ≥ 1`) do the validation; custom agent-emitted event types are tracked in backlog issue #145. Webhook validation: verifies `outboundConnectionId` exists and `isAllowedInEnvironment`. Null notification templates default to `""` (NOT NULL constraint). Audit: `ALERT_RULE_CHANGE`.
|
||||||
- `AlertController` — `/api/v1/environments/{envSlug}/alerts`. GET list (inbox filtered by userId/groupIds/roleNames via `InAppInboxQuery`; optional multi-value `state`, `severity`, tri-state `acked`, tri-state `read` query params; soft-deleted rows always excluded) / GET `/unread-count` / GET `{id}` / POST `{id}/ack` / POST `{id}/read` / POST `/bulk-read` / POST `/bulk-ack` (VIEWER+) / DELETE `{id}` (OPERATOR+, soft-delete) / POST `/bulk-delete` (OPERATOR+) / POST `{id}/restore` (OPERATOR+, clears `deleted_at`). `requireLiveInstance` helper returns 404 on soft-deleted rows; `restore` explicitly fetches regardless of `deleted_at`. `BulkIdsRequest` is the shared body for bulk-read/ack/delete (`{ instanceIds }`). `AlertDto` includes `readAt`; `deletedAt` is intentionally NOT on the wire. Inbox SQL: `? = ANY(target_user_ids) OR target_group_ids && ? OR target_role_names && ?` — requires at least one matching target (no broadcast concept).
|
- `AlertController` — `/api/v1/environments/{envSlug}/alerts`. GET list (inbox filtered by userId/groupIds/roleNames via `InAppInboxQuery`; optional multi-value `state`, `severity`, tri-state `acked`, tri-state `read` query params; soft-deleted rows always excluded) / GET `/unread-count` / GET `{id}` / POST `{id}/ack` / POST `{id}/read` / POST `/bulk-read` / POST `/bulk-ack` (VIEWER+) / DELETE `{id}` (OPERATOR+, soft-delete) / POST `/bulk-delete` (OPERATOR+) / POST `{id}/restore` (OPERATOR+, clears `deleted_at`). `requireLiveInstance` helper returns 404 on soft-deleted rows; `restore` explicitly fetches regardless of `deleted_at`. `BulkIdsRequest` is the shared body for bulk-read/ack/delete (`{ instanceIds }`). `AlertDto` includes `readAt`; `deletedAt` is intentionally NOT on the wire. Inbox SQL: `? = ANY(target_user_ids) OR target_group_ids && ? OR target_role_names && ?` — requires at least one matching target (no broadcast concept).
|
||||||
- `AlertSilenceController` — `/api/v1/environments/{envSlug}/alerts/silences`. GET list / POST create / DELETE `{id}`. 422 if `endsAt <= startsAt`. OPERATOR+ for mutations, VIEWER+ for list. Audit: `ALERT_SILENCE_CHANGE`.
|
- `AlertSilenceController` — `/api/v1/environments/{envSlug}/alerts/silences`. GET list / POST create / DELETE `{id}`. 422 if `endsAt <= startsAt`. OPERATOR+ for mutations, VIEWER+ for list. Audit: `ALERT_SILENCE_CHANGE`.
|
||||||
@@ -102,13 +102,21 @@ Env-scoped read-path controllers (`AlertController`, `AlertRuleController`, `Ale
|
|||||||
- `OutboundConnectionAdminController` — `/api/v1/admin/outbound-connections`. GET list / POST create / GET `{id}` / PUT `{id}` / DELETE `{id}` / POST `{id}/test` / GET `{id}/usage`. RBAC: list/get/usage ADMIN|OPERATOR; mutations + test ADMIN.
|
- `OutboundConnectionAdminController` — `/api/v1/admin/outbound-connections`. GET list / POST create / GET `{id}` / PUT `{id}` / DELETE `{id}` / POST `{id}/test` / GET `{id}/usage`. RBAC: list/get/usage ADMIN|OPERATOR; mutations + test ADMIN.
|
||||||
- `SensitiveKeysAdminController` — GET/PUT `/api/v1/admin/sensitive-keys`. GET returns 200 or 204 if not configured. PUT accepts `{ keys: [...] }` with optional `?pushToAgents=true`. Fan-out iterates every distinct `(application, environment)` slice — intentional global baseline + per-env overrides.
|
- `SensitiveKeysAdminController` — GET/PUT `/api/v1/admin/sensitive-keys`. GET returns 200 or 204 if not configured. PUT accepts `{ keys: [...] }` with optional `?pushToAgents=true`. Fan-out iterates every distinct `(application, environment)` slice — intentional global baseline + per-env overrides.
|
||||||
- `ClaimMappingAdminController` — CRUD `/api/v1/admin/claim-mappings`, POST `/test`.
|
- `ClaimMappingAdminController` — CRUD `/api/v1/admin/claim-mappings`, POST `/test`.
|
||||||
- `LicenseAdminController` — GET/POST `/api/v1/admin/license`.
|
- `LicenseAdminController` — GET/POST `/api/v1/admin/license`. ADMIN only. GET returns `{state, invalidReason, envelope, lastValidatedAt?}` — the raw token is deliberately omitted; only the parsed `LicenseInfo` envelope is exposed. POST delegates to `LicenseService.install(token, userId, "api")` (acting userId resolved via the `user:` prefix-strip convention) — install/replace/reject all flow through `LicenseService` so audit, persistence, and `LicenseChangedEvent` publishing are uniform.
|
||||||
|
- `LicenseUsageController` — GET `/api/v1/admin/license/usage`. Returns license `state`, `expiresAt`/`daysRemaining`/`gracePeriodDays`/`tenantId`/`label`/`lastValidatedAt`, the `LicenseMessageRenderer.forState(...)` message, and a `limits[]` array (`{key, current, cap, source}`) covering every effective-limits key. `source` is `"license"` when the cap came from the license override map, `"default"` otherwise. `max_agents` reads from `AgentRegistryService.liveCount()`; all other counts come from `LicenseUsageReader.snapshot()`.
|
||||||
- `ThresholdAdminController` — CRUD `/api/v1/admin/thresholds`.
|
- `ThresholdAdminController` — CRUD `/api/v1/admin/thresholds`.
|
||||||
- `AuditLogController` — GET `/api/v1/admin/audit`.
|
- `AuditLogController` — GET `/api/v1/admin/audit`.
|
||||||
- `RbacStatsController` — GET `/api/v1/admin/rbac/stats`.
|
- `RbacStatsController` — GET `/api/v1/admin/rbac/stats`.
|
||||||
- `UsageAnalyticsController` — GET `/api/v1/admin/usage` (ClickHouse `usage_events`).
|
- `UsageAnalyticsController` — GET `/api/v1/admin/usage` (ClickHouse `usage_events`).
|
||||||
- `ClickHouseAdminController` — GET `/api/v1/admin/clickhouse/**` (conditional on `infrastructureendpoints` flag).
|
- `ClickHouseAdminController` — GET `/api/v1/admin/clickhouse/**` (conditional on `infrastructureendpoints` flag).
|
||||||
- `DatabaseAdminController` — GET `/api/v1/admin/database/**` (conditional on `infrastructureendpoints` flag).
|
- `DatabaseAdminController` — GET `/api/v1/admin/database/**` (conditional on `infrastructureendpoints` flag).
|
||||||
|
- `ServerMetricsAdminController` — `/api/v1/admin/server-metrics/**`. GET `/catalog`, GET `/instances`, POST `/query`. Generic read API over the `server_metrics` ClickHouse table so SaaS dashboards don't need direct CH access. Delegates to `ServerMetricsQueryStore` (impl `ClickHouseServerMetricsQueryStore`). Visibility matches ClickHouse/Database admin: `@ConditionalOnProperty(infrastructureendpoints, matchIfMissing=true)` + class-level `@PreAuthorize("hasRole('ADMIN')")`. Validation: metric/tag regex `^[a-zA-Z0-9._]+$`, statistic regex `^[a-z_]+$`, `to - from ≤ 31 days`, stepSeconds ∈ [10, 3600], response capped at 500 series. `IllegalArgumentException` → 400. `/query` supports `raw` + `delta` modes (delta does per-`server_instance_id` positive-clipped differences, then aggregates across instances). Derived `statistic=mean` for timers computes `sum(total|total_time)/sum(count)` per bucket.
|
||||||
|
|
||||||
|
### Auth (flat)
|
||||||
|
|
||||||
|
- `UiAuthController` — `/api/v1/auth` (login, refresh, me). Local username/password against env-var admin or DB BCrypt hash. Lockout after 5 failed attempts.
|
||||||
|
- `OidcAuthController` — `/api/v1/auth/oidc` (config, callback). Code → token exchange. Roles via custom JWT claim, claim mapping rules, or default roles.
|
||||||
|
- `AuthCapabilitiesController` — `GET /api/v1/auth/capabilities` (unauthenticated). Reports `{oidc:{enabled, providerName, primary}, localAccounts:{enabled, adminRecoveryOnly}}` so the SPA renders the login page deterministically. `oidc.primary == oidc.enabled`; `localAccounts.adminRecoveryOnly == oidc.primary`. `providerName` is best-effort label via `OidcProviderNameDeriver` (Logto / Keycloak / Auth0 / Okta / Single Sign-On). The SPA hides the local form behind `?local` when `adminRecoveryOnly` is true.
|
||||||
|
|
||||||
### Other (flat)
|
### Other (flat)
|
||||||
|
|
||||||
@@ -118,7 +126,7 @@ Env-scoped read-path controllers (`AlertController`, `AlertRuleController`, `Ale
|
|||||||
## runtime/ — Docker orchestration
|
## runtime/ — Docker orchestration
|
||||||
|
|
||||||
- `DockerRuntimeOrchestrator` — implements RuntimeOrchestrator; Docker Java client (zerodep transport), container lifecycle
|
- `DockerRuntimeOrchestrator` — implements RuntimeOrchestrator; Docker Java client (zerodep transport), container lifecycle
|
||||||
- `DeploymentExecutor` — @Async staged deploy: PRE_FLIGHT -> PULL_IMAGE -> CREATE_NETWORK -> START_REPLICAS -> HEALTH_CHECK -> SWAP_TRAFFIC -> COMPLETE. Container names are `{tenantId}-{envSlug}-{appSlug}-{replicaIndex}-{generation}`, where `generation` is the first 8 chars of the deployment UUID — old and new replicas coexist during a blue/green swap. Per-replica `CAMELEER_AGENT_INSTANCEID` env var is `{envSlug}-{appSlug}-{replicaIndex}-{generation}`. Branches on `DeploymentStrategy.fromWire(config.deploymentStrategy())`: **blue-green** (default) starts all N → waits for all healthy → stops old (partial health = FAILED, preserves old untouched); **rolling** replaces replicas one at a time with rollback only for in-flight new containers (already-replaced old stay stopped; un-replaced old keep serving). DEGRADED is now only set by `DockerEventMonitor` post-deploy, never by the executor.
|
- `DeploymentExecutor` — @Async staged deploy: PRE_FLIGHT -> PULL_IMAGE -> CREATE_NETWORK -> START_REPLICAS -> HEALTH_CHECK -> SWAP_TRAFFIC -> COMPLETE. Container names are `{tenantId}-{envSlug}-{appSlug}-{replicaIndex}-{generation}`, where `generation` is the first 8 chars of the deployment UUID — old and new replicas coexist during a blue/green swap. Per-replica `CAMELEER_AGENT_INSTANCEID` env var is `{envSlug}-{appSlug}-{replicaIndex}-{generation}`. Branches on `DeploymentStrategy.fromWire(config.deploymentStrategy())`: **blue-green** (default) starts all N → waits for all healthy → stops old (partial health = FAILED, preserves old untouched); **rolling** replaces replicas one at a time with rollback only for in-flight new containers (already-replaced old stay stopped; un-replaced old keep serving). DEGRADED is now only set by `DockerEventMonitor` post-deploy, never by the executor. **License compute caps**: at PRE_FLIGHT (after `ConfigMerger.resolve`, before image pull / container creation) the executor consults `LicenseUsageReader.computeUsage()` (PG aggregate over non-stopped deployments) and runs three `LicenseEnforcer.assertWithinCap(...)` checks for `max_total_cpu_millis`, `max_total_memory_mb`, and `max_total_replicas`. A `LicenseCapExceededException` propagates to the surrounding `try/catch` which marks the deployment FAILED with the cap message in `deployments.error_message`.
|
||||||
- `DockerNetworkManager` — ensures bridge networks (cameleer-traefik, cameleer-env-{slug}), connects containers
|
- `DockerNetworkManager` — ensures bridge networks (cameleer-traefik, cameleer-env-{slug}), connects containers
|
||||||
- `DockerEventMonitor` — persistent Docker event stream listener (die, oom, start, stop), updates deployment status
|
- `DockerEventMonitor` — persistent Docker event stream listener (die, oom, start, stop), updates deployment status
|
||||||
- `TraefikLabelBuilder` — generates Traefik Docker labels for path-based or subdomain routing. Per-container identity labels: `cameleer.replica` (index), `cameleer.generation` (deployment-scoped 8-char id — for Prometheus/Grafana deploy-boundary annotations), `cameleer.instance-id` (`{envSlug}-{appSlug}-{replicaIndex}-{generation}`). Router/service label keys are generation-agnostic so load balancing spans old + new replicas during a blue/green overlap.
|
- `TraefikLabelBuilder` — generates Traefik Docker labels for path-based or subdomain routing. Per-container identity labels: `cameleer.replica` (index), `cameleer.generation` (deployment-scoped 8-char id — for Prometheus/Grafana deploy-boundary annotations), `cameleer.instance-id` (`{envSlug}-{appSlug}-{replicaIndex}-{generation}`). Router/service label keys are generation-agnostic so load balancing spans old + new replicas during a blue/green overlap.
|
||||||
@@ -129,6 +137,8 @@ Env-scoped read-path controllers (`AlertController`, `AlertRuleController`, `Ale
|
|||||||
## metrics/ — Prometheus observability
|
## metrics/ — Prometheus observability
|
||||||
|
|
||||||
- `ServerMetrics` — centralized business metrics: gauges (agents by state, SSE connections, buffer depths), counters (ingestion drops, agent transitions, deployment outcomes, auth failures), timers (flush duration, deployment duration). Exposed via `/api/v1/prometheus`.
|
- `ServerMetrics` — centralized business metrics: gauges (agents by state, SSE connections, buffer depths), counters (ingestion drops, agent transitions, deployment outcomes, auth failures), timers (flush duration, deployment duration). Exposed via `/api/v1/prometheus`.
|
||||||
|
- `ServerInstanceIdConfig` — `@Configuration`, exposes `@Bean("serverInstanceId") String`. Resolution precedence: `cameleer.server.instance-id` property → `HOSTNAME` env → `InetAddress.getLocalHost()` → random UUID. Fixed at boot; rotates across restarts so counters restart cleanly.
|
||||||
|
- `ServerMetricsSnapshotScheduler` — `@Scheduled(fixedDelayString = "${cameleer.server.self-metrics.interval-ms:60000}")`. Walks `MeterRegistry.getMeters()` each tick, emits one `ServerMetricSample` per `Measurement` (Timer/DistributionSummary produce multiple rows per meter — one per Micrometer `Statistic`). Skips non-finite values; logs and swallows store failures. Disabled via `cameleer.server.self-metrics.enabled=false` (`@ConditionalOnProperty`). Write-only — no query endpoint yet; inspect via `/api/v1/admin/clickhouse/query`.
|
||||||
|
|
||||||
## storage/ — PostgreSQL repositories (JdbcTemplate)
|
## storage/ — PostgreSQL repositories (JdbcTemplate)
|
||||||
|
|
||||||
@@ -145,6 +155,8 @@ Env-scoped read-path controllers (`AlertController`, `AlertRuleController`, `Ale
|
|||||||
- `ClickHouseDiagramStore`, `ClickHouseAgentEventRepository`
|
- `ClickHouseDiagramStore`, `ClickHouseAgentEventRepository`
|
||||||
- `ClickHouseUsageTracker` — usage_events for billing
|
- `ClickHouseUsageTracker` — usage_events for billing
|
||||||
- `ClickHouseRouteCatalogStore` — persistent route catalog with first_seen cache, warm-loaded on startup
|
- `ClickHouseRouteCatalogStore` — persistent route catalog with first_seen cache, warm-loaded on startup
|
||||||
|
- `ClickHouseServerMetricsStore` — periodic dumps of the server's own Micrometer registry into the `server_metrics` table. Tenant-stamped (bound at the scheduler, not the bean); no `environment` column (server straddles envs). Batch-insert via `JdbcTemplate.batchUpdate` with `Map(String, String)` tag binding. Written by `ServerMetricsSnapshotScheduler`.
|
||||||
|
- `ClickHouseServerMetricsQueryStore` — read side of `server_metrics` for dashboards. Implements `ServerMetricsQueryStore`. `catalog(from,to)` returns name+type+statistics+tagKeys, `listInstances(from,to)` returns server_instance_ids with first/last seen, `query(request)` builds bucketed time-series with `raw` or `delta` mode and supports a derived `mean` statistic for timers. All identifier inputs regex-validated; tenant_id always bound; max range 31 days; series count capped at 500. Exposed via `ServerMetricsAdminController`.
|
||||||
|
|
||||||
## search/ — ClickHouse search and log stores
|
## search/ — ClickHouse search and log stores
|
||||||
|
|
||||||
@@ -196,10 +208,27 @@ Env-scoped read-path controllers (`AlertController`, `AlertRuleController`, `Ale
|
|||||||
- `dto/OutboundConnectionTestResult` — result of POST `/{id}/test`: status, latencyMs, responseSnippet (first 512 chars), tlsProtocol/cipherSuite/peerCertSubject (protocol is "TLS" stub; enriched in Plan 02 follow-up), error (nullable).
|
- `dto/OutboundConnectionTestResult` — result of POST `/{id}/test`: status, latencyMs, responseSnippet (first 512 chars), tlsProtocol/cipherSuite/peerCertSubject (protocol is "TLS" stub; enriched in Plan 02 follow-up), error (nullable).
|
||||||
- `config/OutboundBeanConfig` — registers `OutboundConnectionRepository`, `SecretCipher`, `OutboundConnectionService` beans.
|
- `config/OutboundBeanConfig` — registers `OutboundConnectionRepository`, `SecretCipher`, `OutboundConnectionService` beans.
|
||||||
|
|
||||||
|
## license/ — License enforcement & lifecycle
|
||||||
|
|
||||||
|
- `LicenseService` — install / replace / revalidate mediator. `install(token, installedBy, source)` validates via `LicenseValidator`, on failure marks the gate INVALID + audits `reject_license` + publishes `LicenseChangedEvent` and rethrows; on success persists via `LicenseRepository.upsert(...)`, mutates `LicenseGate`, audits `install_license` or `replace_license` (detects existing row), and publishes `LicenseChangedEvent`. `loadInitial(envToken, fileToken)` boot precedence env > file > DB; ABSENT publishes a `LicenseChangedEvent(ABSENT, null)`. `revalidate()` re-runs validation against the persisted token, on success bumps `last_validated_at`; on failure marks INVALID and audits `revalidate_license` FAILURE. `getTenantId()` exposes the tenant for downstream lookups.
|
||||||
|
- `LicenseRepository` — interface in `app/license`. `Optional<LicenseRecord> findByTenantId(String)`, `void upsert(LicenseRecord)`, `int touchValidated(String tenantId, Instant)`, `int delete(String)`.
|
||||||
|
- `LicenseRecord` — record persisted in PG `license` table: `(String tenantId, String token, UUID licenseId, Instant installedAt, String installedBy, Instant expiresAt, Instant lastValidatedAt)`.
|
||||||
|
- `PostgresLicenseRepository` — JdbcTemplate impl of `LicenseRepository`. Targets PG `license` table (V5). Upsert via `INSERT ... ON CONFLICT (tenant_id) DO UPDATE`.
|
||||||
|
- `LicenseChangedEvent` — Spring application event: `(LicenseState state, LicenseInfo current)`. Published on every install / replace / revalidate / boot-time ABSENT path so downstream listeners (retention policy, metrics, etc.) react uniformly.
|
||||||
|
- `LicenseEnforcer` — `@Component`. `assertWithinCap(String limitKey, long currentUsage, long requestedDelta)` consults `LicenseGate.getEffectiveLimits()`. On overflow increments `cameleer_license_cap_rejections_total{limit=...}`, emits an `AuditCategory.LICENSE / cap_exceeded` audit row when `AuditService` is wired (try/catch + log.warn so audit-write failures don't suppress the 403), and throws `LicenseCapExceededException`. Unknown limit keys propagate `IllegalArgumentException` from `LicenseLimits.get(...)` (programmer error, not a 403).
|
||||||
|
- `LicenseUsageReader` — `@Component` over PG. `snapshot()` returns a `Map<String,Long>` of (max_environments, max_apps, max_users, max_outbound_connections, max_alert_rules, max_total_cpu_millis, max_total_memory_mb, max_total_replicas) from PG row counts and a SUM over non-stopped deployments' `deployed_config_snapshot.containerConfig` (replicas × cpuLimit / memoryLimitMb). `computeUsage()` returns the typed `ComputeUsage(cpuMillis, memoryMb, replicas)` tuple consumed by `DeploymentExecutor` PRE_FLIGHT cap checks. `agentCount(int)` echoes a registry-supplied live count (registry is in-memory; not stored in PG).
|
||||||
|
- `LicenseCapExceededException` — typed `RuntimeException(limitKey, current, cap)` with accessors. Mapped to HTTP 403 by `LicenseExceptionAdvice`.
|
||||||
|
- `LicenseExceptionAdvice` — `@ControllerAdvice` mapping `LicenseCapExceededException` → 403 with body `{error:"license cap reached", limit, current, cap, state, message}` where `message` is `LicenseMessageRenderer.forCap(state, info, limit, current, cap, invalidReason)`.
|
||||||
|
- `LicenseMessageRenderer` — pure formatter (utility class, no DI). `forCap(state, info, limit, current, cap[, invalidReason])` per-state human messages for cap-rejection responses; `forState(state, info[, invalidReason])` shorter state-only messages for the `/usage` endpoint and metrics surfaces.
|
||||||
|
- `RetentionPolicyApplier` — `@EventListener(LicenseChangedEvent.class) @Async`. For each environment × table in the static `SPECS` list (`executions`, `processor_executions`, `logs`, `agent_metrics`, `agent_events`) computes `effective = min(licenseCap, env.configuredRetentionDays)` and emits `ALTER TABLE <t> MODIFY TTL toDateTime(<col>) + INTERVAL <n> DAY DELETE WHERE environment = '<slug>'`. ClickHouse failures are logged and swallowed (best-effort; never propagates to the originating license install/revalidate). `route_diagrams` (no TTL clause) and `server_metrics` (no environment column) are intentionally excluded.
|
||||||
|
- `LicenseRevalidationJob` — `@Component`. `@Scheduled(cron = "0 0 3 * * *")` daily revalidation; `@EventListener(ApplicationReadyEvent.class) @Async` 60-second post-startup tick to catch ABSENT→ACTIVE when a license was inserted between server starts. Both paths call `LicenseService.revalidate()` and swallow scheduler-thread crashes.
|
||||||
|
- `LicenseMetrics` — `@Component`. Registers Micrometer gauges: `cameleer_license_state{state=...}` (one-hot per `LicenseState`), `cameleer_license_days_remaining` (negative when ABSENT/INVALID), `cameleer_license_last_validated_age_seconds` (0 when no DB row). Refreshed eagerly on `LicenseChangedEvent` via `@EventListener` and lazily every 60s via `@Scheduled(fixedDelay = 60_000)`.
|
||||||
|
|
||||||
## config/ — Spring beans
|
## config/ — Spring beans
|
||||||
|
|
||||||
- `RuntimeOrchestratorAutoConfig` — conditional Docker/Disabled orchestrator + NetworkManager + EventMonitor
|
- `RuntimeOrchestratorAutoConfig` — conditional Docker/Disabled orchestrator + NetworkManager + EventMonitor
|
||||||
- `RuntimeBeanConfig` — DeploymentExecutor, AppService, EnvironmentService
|
- `RuntimeBeanConfig` — DeploymentExecutor, AppService, EnvironmentService. Wires `CreateGuard` instances per service from `LicenseEnforcer.assertWithinCap(...)` so creation paths (Environment, App, Agent) consult license caps without core depending on the app module.
|
||||||
- `SecurityBeanConfig` — JwtService, Ed25519, BootstrapTokenValidator
|
- `SecurityBeanConfig` — JwtService, Ed25519, BootstrapTokenValidator
|
||||||
- `StorageBeanConfig` — all repositories
|
- `StorageBeanConfig` — all repositories
|
||||||
- `ClickHouseConfig` — ClickHouse JdbcTemplate, schema initializer
|
- `ClickHouseConfig` — ClickHouse JdbcTemplate, schema initializer
|
||||||
|
- `LicenseBeanConfig` — license bean topology in dependency order: `LicenseGate` → `LicenseValidator` (when `cameleer.server.license.publickey` is unset, an always-failing override is returned so any loaded token still routes through `install()` and is audited as INVALID, never silently dropped) → `LicenseService` → `LicenseBootLoader` (`@PostConstruct` drives `loadInitial(envToken, fileToken)` once the context is ready; resolution order env var > license file > persisted DB row).
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ paths:
|
|||||||
|
|
||||||
- `App` — record: id, environmentId, slug, displayName, containerConfig (JSONB)
|
- `App` — record: id, environmentId, slug, displayName, containerConfig (JSONB)
|
||||||
- `AppVersion` — record: id, appId, version, jarPath, detectedRuntimeType, detectedMainClass
|
- `AppVersion` — record: id, appId, version, jarPath, detectedRuntimeType, detectedMainClass
|
||||||
- `Environment` — record: id, slug, displayName, production, enabled, defaultContainerConfig, jarRetentionCount, color, createdAt. `color` is one of the 8 preset palette values validated by `EnvironmentColor.VALUES` and CHECK-constrained in PostgreSQL (V2 migration).
|
- `Environment` — record: id, slug, displayName, production, enabled, defaultContainerConfig, jarRetentionCount, color, createdAt, executionRetentionDays, logRetentionDays, metricRetentionDays. `color` is one of the 8 preset palette values validated by `EnvironmentColor.VALUES` and CHECK-constrained in PostgreSQL (V2 migration). The 3 retention day fields (V5) are `int`-typed (not nullable, since unlimited has no use-case), default to 1 day per the V5 `NOT NULL DEFAULT 1`, validated >= 1 in the canonical constructor.
|
||||||
- `EnvironmentColor` — constants: `DEFAULT = "slate"`, `VALUES = {slate,red,amber,green,teal,blue,purple,pink}`, `isValid(String)`.
|
- `EnvironmentColor` — constants: `DEFAULT = "slate"`, `VALUES = {slate,red,amber,green,teal,blue,purple,pink}`, `isValid(String)`.
|
||||||
- `Deployment` — record: id, appId, appVersionId, environmentId, status, targetState, deploymentStrategy, replicaStates (JSONB), deployStage, containerId, containerName, createdBy (String, user_id reference; nullable for pre-V4 historical rows)
|
- `Deployment` — record: id, appId, appVersionId, environmentId, status, targetState, deploymentStrategy, replicaStates (JSONB), deployStage, containerId, containerName, createdBy (String, user_id reference; nullable for pre-V4 historical rows)
|
||||||
- `DeploymentStatus` — enum: STOPPED, STARTING, RUNNING, DEGRADED, STOPPING, FAILED. `DEGRADED` is reserved for post-deploy drift (a replica died after RUNNING); `DeploymentExecutor` now marks partial-healthy deploys FAILED, not DEGRADED.
|
- `DeploymentStatus` — enum: STOPPED, STARTING, RUNNING, DEGRADED, STOPPING, FAILED. `DEGRADED` is reserved for post-deploy drift (a replica died after RUNNING); `DeploymentExecutor` now marks partial-healthy deploys FAILED, not DEGRADED.
|
||||||
@@ -37,24 +37,41 @@ paths:
|
|||||||
- `RuntimeDetector` — probes JAR files at upload time: detects runtime from manifest Main-Class (Spring Boot loader, Quarkus entry point, plain Java) or native binary (non-ZIP magic bytes)
|
- `RuntimeDetector` — probes JAR files at upload time: detects runtime from manifest Main-Class (Spring Boot loader, Quarkus entry point, plain Java) or native binary (non-ZIP magic bytes)
|
||||||
- `ContainerRequest` — record: 20 fields for Docker container creation (includes runtimeType, customArgs, mainClass)
|
- `ContainerRequest` — record: 20 fields for Docker container creation (includes runtimeType, customArgs, mainClass)
|
||||||
- `ContainerStatus` — record: state, running, exitCode, error
|
- `ContainerStatus` — record: state, running, exitCode, error
|
||||||
- `ResolvedContainerConfig` — record: typed config with memoryLimitMb, memoryReserveMb, cpuRequest, cpuLimit, appPort, exposedPorts, customEnvVars, stripPathPrefix, sslOffloading, routingMode, routingDomain, serverUrl, replicas, deploymentStrategy, routeControlEnabled, replayEnabled, runtimeType, customArgs, extraNetworks
|
- `ResolvedContainerConfig` — record: typed config with memoryLimitMb, memoryReserveMb, cpuRequest, cpuLimit, appPort, exposedPorts, customEnvVars, stripPathPrefix, sslOffloading, routingMode, routingDomain, serverUrl, replicas, deploymentStrategy, routeControlEnabled, replayEnabled, runtimeType, customArgs, extraNetworks, externalRouting (default `true`; when `false`, `TraefikLabelBuilder` strips all `traefik.*` labels so the container is not publicly routed), certResolver (server-wide, sourced from `CAMELEER_SERVER_RUNTIME_CERTRESOLVER`; when blank the `tls.certresolver` label is omitted — use for dev installs with a static TLS store)
|
||||||
- `RoutingMode` — enum for routing strategies
|
- `RoutingMode` — enum for routing strategies
|
||||||
- `ConfigMerger` — pure function: resolve(globalDefaults, envConfig, appConfig) -> ResolvedContainerConfig
|
- `ConfigMerger` — pure function: resolve(globalDefaults, envConfig, appConfig) -> ResolvedContainerConfig
|
||||||
- `RuntimeOrchestrator` — interface: startContainer, stopContainer, getContainerStatus, getLogs, startLogCapture, stopLogCapture
|
- `RuntimeOrchestrator` — interface: startContainer, stopContainer, getContainerStatus, getLogs, startLogCapture, stopLogCapture
|
||||||
- `AppRepository`, `AppVersionRepository`, `EnvironmentRepository`, `DeploymentRepository` — repository interfaces
|
- `AppRepository`, `AppVersionRepository`, `EnvironmentRepository`, `DeploymentRepository` — repository interfaces
|
||||||
- `AppService`, `EnvironmentService` — domain services
|
- `AppService`, `EnvironmentService` — domain services
|
||||||
|
- `CreateGuard` — `@FunctionalInterface`. `void check(long current)` — implementations throw to abort creation. `NOOP` constant is the default. Consulted by `EnvironmentService.create`, `AppService.createApp`, and `AgentRegistryService.register` so license caps can be enforced from the app module without leaking Spring or app-only types into core. Wired in `LicenseBeanConfig` to a `LicenseEnforcer.assertWithinCap(...)` call per limit key.
|
||||||
|
|
||||||
|
## license/ — License domain (signed-token tier system)
|
||||||
|
|
||||||
|
The pure license **contract types** live in the separate `cameleer-license-api` module under package `com.cameleer.license` (no Spring, no server-runtime deps) so consumers like `cameleer-license-minter` and `cameleer-saas` can use them without inheriting server internals. Server-core only contains the runtime state holder (`LicenseGate`).
|
||||||
|
|
||||||
|
Contract types in `cameleer-license-api` (package `com.cameleer.license`):
|
||||||
|
- `LicenseInfo` — record: `(UUID licenseId, String tenantId, String label, Map<String,Integer> limits, Instant issuedAt, Instant expiresAt, int gracePeriodDays)`. `isExpired()` true once `now > expiresAt + gracePeriodDays`; `isAfterRawExpiry()` true once `now > expiresAt`. Constructed via `LicenseValidator`; canonical ctor null-checks all required fields and rejects blank tenantId / negative grace.
|
||||||
|
- `LicenseLimits` — typed limits container backed by `Map<String,Integer>`. `defaultsOnly()` returns the `DefaultTierLimits.DEFAULTS` view; `mergeOverDefaults(overrides)` produces the license-overrides UNION default tier. `get(String key)` returns the cap; throws `IllegalArgumentException` for unknown keys (programmer error). `isDefaultSourced(key, license)` reports whether a key fell through to the default tier.
|
||||||
|
- `DefaultTierLimits` — immutable `LinkedHashMap` of constants for the no-license fallback tier: `max_environments=1, max_apps=3, max_agents=5, max_users=3, max_outbound_connections=1, max_alert_rules=2, max_total_cpu_millis=2000, max_total_memory_mb=2048, max_total_replicas=5, max_execution_retention_days=1, max_log_retention_days=1, max_metric_retention_days=1, max_jar_retention_count=3`.
|
||||||
|
- `LicenseValidator` — verifies signed token. Constructor `(String publicKeyBase64, String expectedTenantId)` decodes an X.509 Ed25519 public key. `validate(String token)` splits `payload.signature`, verifies the Ed25519 signature, parses the JSON payload, enforces `tenantId == expectedTenantId`, and returns `LicenseInfo`. Throws `SecurityException` on signature mismatch / `IllegalArgumentException` on parse failure / expired payload.
|
||||||
|
- `LicenseStateMachine` — pure classifier. `classify(LicenseInfo, String invalidReason)` returns `INVALID` if a reason is set, `ABSENT` if no license, `ACTIVE` if `now <= expiresAt`, `GRACE` if expired but within grace window, `EXPIRED` otherwise.
|
||||||
|
- `LicenseState` — enum: `ABSENT, ACTIVE, GRACE, EXPIRED, INVALID`.
|
||||||
|
|
||||||
|
Runtime state holder in server-core (package `com.cameleer.server.core.license`):
|
||||||
|
- `LicenseGate` — runtime state holder (thread-safe via `AtomicReference<Snapshot>`). `getCurrent()` returns the current `LicenseInfo` (null when ABSENT/INVALID); `getState()` delegates to `LicenseStateMachine.classify(...)`; `getEffectiveLimits()` returns license-overrides UNION defaults in `ACTIVE`/`GRACE`, defaults-only otherwise. `getInvalidReason()`, `load(LicenseInfo)`, `markInvalid(String reason)`, `clear()` are the mutators. `getLimit(key, defaultValue)` shorthand swallows unknown-key errors.
|
||||||
|
|
||||||
## search/ — Execution search and stats
|
## search/ — Execution search and stats
|
||||||
|
|
||||||
- `SearchService` — search, count, stats, statsForApp, statsForRoute, timeseries, timeseriesForApp, timeseriesForRoute, timeseriesGroupedByApp, timeseriesGroupedByRoute, slaCompliance, slaCountsByApp, slaCountsByRoute, topErrors, activeErrorTypes, punchcard, distinctAttributeKeys. `statsForRoute`/`timeseriesForRoute` take `(routeId, applicationId)` — app filter is applied to `stats_1m_route`.
|
- `SearchService` — search, count, stats, statsForApp, statsForRoute, timeseries, timeseriesForApp, timeseriesForRoute, timeseriesGroupedByApp, timeseriesGroupedByRoute, slaCompliance, slaCountsByApp, slaCountsByRoute, topErrors, activeErrorTypes, punchcard, distinctAttributeKeys. `statsForRoute`/`timeseriesForRoute` take `(routeId, applicationId)` — app filter is applied to `stats_1m_route`.
|
||||||
- `SearchRequest` / `SearchResult` — search DTOs
|
- `SearchRequest` / `SearchResult` — search DTOs. `SearchRequest.attributeFilters: List<AttributeFilter>` carries structured facet filters for execution attributes — key-only (exists), exact (key=value), or wildcard (`*` in value). The 21-arg legacy ctor is preserved for call-site churn; the compact ctor normalises null → `List.of()`.
|
||||||
|
- `AttributeFilter(key, value)` — record with key regex `^[a-zA-Z0-9._-]+$` (inlined into SQL, same constraint as alerting), `value == null` means key-exists, `value` containing `*` becomes a SQL LIKE pattern via `toLikePattern()`.
|
||||||
- `ExecutionStats`, `ExecutionSummary` — stats aggregation records
|
- `ExecutionStats`, `ExecutionSummary` — stats aggregation records
|
||||||
- `StatsTimeseries`, `TopError` — timeseries and error DTOs
|
- `StatsTimeseries`, `TopError` — timeseries and error DTOs
|
||||||
- `LogSearchRequest` / `LogSearchResponse` — log search DTOs. `LogSearchRequest.sources` / `levels` are `List<String>` (null-normalized, multi-value OR); `cursor` + `limit` + `sort` drive keyset pagination. Response carries `nextCursor` + `hasMore` + per-level `levelCounts`.
|
- `LogSearchRequest` / `LogSearchResponse` — log search DTOs. `LogSearchRequest.sources` / `levels` are `List<String>` (null-normalized, multi-value OR); `cursor` + `limit` + `sort` drive keyset pagination. Response carries `nextCursor` + `hasMore` + per-level `levelCounts`.
|
||||||
|
|
||||||
## storage/ — Storage abstractions
|
## storage/ — Storage abstractions
|
||||||
|
|
||||||
- `ExecutionStore`, `MetricsStore`, `MetricsQueryStore`, `StatsStore`, `DiagramStore`, `RouteCatalogStore`, `SearchIndex`, `LogIndex` — interfaces
|
- `ExecutionStore`, `MetricsStore`, `MetricsQueryStore`, `StatsStore`, `DiagramStore`, `RouteCatalogStore`, `SearchIndex`, `LogIndex` — interfaces. `DiagramStore.findLatestContentHashForAppRoute(appId, routeId, env)` resolves the latest diagram by (app, env, route) without consulting the agent registry, so routes whose publishing agents were removed between app versions still resolve. `findContentHashForRoute(route, instance)` is retained for the ingestion path that stamps a per-execution `diagramContentHash` at ingest time (point-in-time link from `ExecutionDetail`/`ExecutionSummary`).
|
||||||
- `RouteCatalogEntry` — record: applicationId, routeId, environment, firstSeen, lastSeen
|
- `RouteCatalogEntry` — record: applicationId, routeId, environment, firstSeen, lastSeen
|
||||||
- `LogEntryResult` — log query result record
|
- `LogEntryResult` — log query result record
|
||||||
- `model/` — `ExecutionDocument`, `MetricTimeSeries`, `MetricsSnapshot`
|
- `model/` — `ExecutionDocument`, `MetricTimeSeries`, `MetricsSnapshot`
|
||||||
@@ -80,7 +97,7 @@ paths:
|
|||||||
- `AppSettings`, `AppSettingsRepository` — per-app-per-env settings config and persistence. Record carries `(applicationId, environment, …)`; repository methods are `findByApplicationAndEnvironment`, `findByEnvironment`, `save`, `delete(appId, env)`. `AppSettings.defaults(appId, env)` produces a default instance scoped to an environment.
|
- `AppSettings`, `AppSettingsRepository` — per-app-per-env settings config and persistence. Record carries `(applicationId, environment, …)`; repository methods are `findByApplicationAndEnvironment`, `findByEnvironment`, `save`, `delete(appId, env)`. `AppSettings.defaults(appId, env)` produces a default instance scoped to an environment.
|
||||||
- `ThresholdConfig`, `ThresholdRepository` — alerting threshold config and persistence
|
- `ThresholdConfig`, `ThresholdRepository` — alerting threshold config and persistence
|
||||||
- `AuditService` — audit logging facade
|
- `AuditService` — audit logging facade
|
||||||
- `AuditRecord`, `AuditResult`, `AuditCategory` (enum: `INFRA, AUTH, USER_MGMT, CONFIG, RBAC, AGENT, OUTBOUND_CONNECTION_CHANGE, OUTBOUND_HTTP_TRUST_CHANGE, ALERT_RULE_CHANGE, ALERT_SILENCE_CHANGE, DEPLOYMENT`), `AuditRepository` — audit trail records and persistence
|
- `AuditRecord`, `AuditResult`, `AuditCategory` (enum: `INFRA, AUTH, USER_MGMT, CONFIG, RBAC, AGENT, OUTBOUND_CONNECTION_CHANGE, OUTBOUND_HTTP_TRUST_CHANGE, ALERT_RULE_CHANGE, ALERT_SILENCE_CHANGE, DEPLOYMENT, LICENSE`), `AuditRepository` — audit trail records and persistence
|
||||||
|
|
||||||
## http/ — Outbound HTTP primitives (cross-cutting)
|
## http/ — Outbound HTTP primitives (cross-cutting)
|
||||||
|
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ paths:
|
|||||||
When deployed via the cameleer-saas platform, this server orchestrates customer app containers using Docker. Key components:
|
When deployed via the cameleer-saas platform, this server orchestrates customer app containers using Docker. Key components:
|
||||||
|
|
||||||
- **ConfigMerger** (`core/runtime/ConfigMerger.java`) — pure function: resolve(globalDefaults, envConfig, appConfig) -> ResolvedContainerConfig. Three-layer merge: global (application.yml) -> environment (defaultContainerConfig JSONB) -> app (containerConfig JSONB). Includes `runtimeType` (default `"auto"`) and `customArgs` (default `""`).
|
- **ConfigMerger** (`core/runtime/ConfigMerger.java`) — pure function: resolve(globalDefaults, envConfig, appConfig) -> ResolvedContainerConfig. Three-layer merge: global (application.yml) -> environment (defaultContainerConfig JSONB) -> app (containerConfig JSONB). Includes `runtimeType` (default `"auto"`) and `customArgs` (default `""`).
|
||||||
- **TraefikLabelBuilder** (`app/runtime/TraefikLabelBuilder.java`) — generates Traefik Docker labels for path-based (`/{envSlug}/{appSlug}/`) or subdomain-based (`{appSlug}-{envSlug}.{domain}`) routing. Supports strip-prefix and SSL offloading toggles. Per-replica identity labels: `cameleer.replica` (index), `cameleer.generation` (8-char deployment UUID prefix — pin Prometheus/Grafana deploy boundaries with this), `cameleer.instance-id` (`{envSlug}-{appSlug}-{replicaIndex}-{generation}`). Traefik router/service keys deliberately omit the generation so load balancing spans old + new replicas during a blue/green overlap.
|
- **TraefikLabelBuilder** (`app/runtime/TraefikLabelBuilder.java`) — generates Traefik Docker labels for path-based (`/{envSlug}/{appSlug}/`) or subdomain-based (`{appSlug}-{envSlug}.{domain}`) routing. Supports strip-prefix and SSL offloading toggles. Per-replica identity labels: `cameleer.replica` (index), `cameleer.generation` (8-char deployment UUID prefix — pin Prometheus/Grafana deploy boundaries with this), `cameleer.instance-id` (`{envSlug}-{appSlug}-{replicaIndex}-{generation}`). Traefik router/service keys deliberately omit the generation so load balancing spans old + new replicas during a blue/green overlap. When `ResolvedContainerConfig.externalRouting()` is `false` (UI: Resources → External Routing, default `true`), the builder emits ONLY the identity labels (`managed-by`, `cameleer.*`) and skips every `traefik.*` label — the container stays on `cameleer-traefik` and the per-env network (so sibling containers can still reach it via Docker DNS) but is invisible to Traefik. The `tls.certresolver` label is emitted only when `CAMELEER_SERVER_RUNTIME_CERTRESOLVER` is set to a non-blank resolver name (matching a resolver configured in the Traefik static config). When unset (dev installs backed by a static TLS store) only `tls=true` is emitted and Traefik serves the default cert from the TLS store.
|
||||||
- **PrometheusLabelBuilder** (`app/runtime/PrometheusLabelBuilder.java`) — generates Prometheus `docker_sd_configs` labels per resolved runtime type: Spring Boot `/actuator/prometheus:8081`, Quarkus/native `/q/metrics:9000`, plain Java `/metrics:9464`. Labels merged into container metadata alongside Traefik labels at deploy time.
|
- **PrometheusLabelBuilder** (`app/runtime/PrometheusLabelBuilder.java`) — generates Prometheus `docker_sd_configs` labels per resolved runtime type: Spring Boot `/actuator/prometheus:8081`, Quarkus/native `/q/metrics:9000`, plain Java `/metrics:9464`. Labels merged into container metadata alongside Traefik labels at deploy time.
|
||||||
- **DockerNetworkManager** (`app/runtime/DockerNetworkManager.java`) — manages two Docker network tiers:
|
- **DockerNetworkManager** (`app/runtime/DockerNetworkManager.java`) — manages two Docker network tiers:
|
||||||
- `cameleer-traefik` — shared network; Traefik, server, and all app containers attach here. Server joined via docker-compose with `cameleer-server` DNS alias.
|
- `cameleer-traefik` — shared network; Traefik, server, and all app containers attach here. Server joined via docker-compose with `cameleer-server` DNS alias.
|
||||||
@@ -23,6 +23,18 @@ When deployed via the cameleer-saas platform, this server orchestrates customer
|
|||||||
- **ContainerLogForwarder** (`app/runtime/ContainerLogForwarder.java`) — streams Docker container stdout/stderr to ClickHouse `logs` table with `source='container'`. Uses `docker logs --follow` per container, batches lines every 2s or 50 lines. Parses Docker timestamp prefix, infers log level via regex. `DeploymentExecutor` starts capture after each replica launches with the replica's `instanceId` (`{envSlug}-{appSlug}-{replicaIndex}-{generation}`); `DockerEventMonitor` stops capture on die/oom. 60-second max capture timeout with 30s cleanup scheduler. Thread pool of 10 daemon threads. Container logs use the same `instanceId` as the agent (set via `CAMELEER_AGENT_INSTANCEID` env var) for unified log correlation at the instance level. Instance-id changes per deployment — cross-deploy queries aggregate on `application + environment` (and optionally `replica_index`).
|
- **ContainerLogForwarder** (`app/runtime/ContainerLogForwarder.java`) — streams Docker container stdout/stderr to ClickHouse `logs` table with `source='container'`. Uses `docker logs --follow` per container, batches lines every 2s or 50 lines. Parses Docker timestamp prefix, infers log level via regex. `DeploymentExecutor` starts capture after each replica launches with the replica's `instanceId` (`{envSlug}-{appSlug}-{replicaIndex}-{generation}`); `DockerEventMonitor` stops capture on die/oom. 60-second max capture timeout with 30s cleanup scheduler. Thread pool of 10 daemon threads. Container logs use the same `instanceId` as the agent (set via `CAMELEER_AGENT_INSTANCEID` env var) for unified log correlation at the instance level. Instance-id changes per deployment — cross-deploy queries aggregate on `application + environment` (and optionally `replica_index`).
|
||||||
- **StartupLogPanel** (`ui/src/components/StartupLogPanel.tsx`) — collapsible log panel rendered below `DeploymentProgress`. Queries `/api/v1/logs?source=container&application={appSlug}&environment={envSlug}`. Auto-polls every 3s while deployment is STARTING; shows green "live" badge during polling, red "stopped" badge on FAILED. Uses `useStartupLogs` hook and `LogViewer` (design system).
|
- **StartupLogPanel** (`ui/src/components/StartupLogPanel.tsx`) — collapsible log panel rendered below `DeploymentProgress`. Queries `/api/v1/logs?source=container&application={appSlug}&environment={envSlug}`. Auto-polls every 3s while deployment is STARTING; shows green "live" badge during polling, red "stopped" badge on FAILED. Uses `useStartupLogs` hook and `LogViewer` (design system).
|
||||||
|
|
||||||
|
## Container Hardening (issue #152)
|
||||||
|
|
||||||
|
`DockerRuntimeOrchestrator.startContainer` applies an unconditional hardening contract to every tenant container — Java 17 has no SecurityManager so the JVM is not a security boundary, and isolation must live below it. Defaults are fail-closed and have no opt-out:
|
||||||
|
|
||||||
|
- `cap_drop` = every `Capability.values()` (effectively ALL — docker-java's enum has no `ALL` constant). Outbound TCP still works (no caps needed); raw sockets, ptrace, mounts, and bind <1024 are denied.
|
||||||
|
- `security_opt`: `no-new-privileges:true`, `apparmor=docker-default`. Default seccomp profile is applied implicitly when `seccomp=` is absent.
|
||||||
|
- `read_only` rootfs = true.
|
||||||
|
- `pids_limit` = 512 (`PIDS_LIMIT` constant).
|
||||||
|
- `tmpfs` mount: `/tmp` with `rw,nosuid,size=256m`. **No `noexec`** — Netty/tcnative, Snappy, LZ4, Zstd dlopen native libs from `/tmp` via `mmap(PROT_EXEC)` which `noexec` blocks. Issue #153 will add per-app `writeableVolumes` for stateful tenants (Kafka Streams etc.).
|
||||||
|
|
||||||
|
**Sandboxed runtime auto-detect**: at construction the orchestrator calls `dockerClient.infoCmd().exec().getRuntimes()` and uses `runsc` (gVisor) when present. Override with `cameleer.server.runtime.dockerruntime` (e.g. `kata` to force Kata Containers, or any other registered runtime). Empty/blank = auto. The override always wins over auto-detect. The `DockerRuntimeOrchestrator(DockerClient, String)` constructor is the canonical entry point; the single-arg constructor exists only as a convenience for tests that don't need an override.
|
||||||
|
|
||||||
## DeploymentExecutor Details
|
## DeploymentExecutor Details
|
||||||
|
|
||||||
Primary network for app containers is set via `CAMELEER_SERVER_RUNTIME_DOCKERNETWORK` env var (in SaaS mode: `cameleer-tenant-{slug}`); apps also connect to `cameleer-traefik` (routing) and `cameleer-env-{tenantId}-{envSlug}` (per-environment discovery) as additional networks. Resolves `runtimeType: auto` to concrete type from `AppVersion.detectedRuntimeType` at PRE_FLIGHT (fails deployment if unresolvable). Builds Docker entrypoint per runtime type (all JVM types use `-javaagent:/app/agent.jar -jar`, plain Java uses `-cp` with main class, native runs binary directly). Sets per-replica `CAMELEER_AGENT_INSTANCEID` env var to `{envSlug}-{appSlug}-{replicaIndex}-{generation}` so container logs and agent logs share the same instance identity. Sets `CAMELEER_AGENT_*` env vars from `ResolvedContainerConfig` (routeControlEnabled, replayEnabled, health port). These are startup-only agent properties — changing them requires redeployment.
|
Primary network for app containers is set via `CAMELEER_SERVER_RUNTIME_DOCKERNETWORK` env var (in SaaS mode: `cameleer-tenant-{slug}`); apps also connect to `cameleer-traefik` (routing) and `cameleer-env-{tenantId}-{envSlug}` (per-environment discovery) as additional networks. Resolves `runtimeType: auto` to concrete type from `AppVersion.detectedRuntimeType` at PRE_FLIGHT (fails deployment if unresolvable). Builds Docker entrypoint per runtime type (all JVM types use `-javaagent:/app/agent.jar -jar`, plain Java uses `-cp` with main class, native runs binary directly). Sets per-replica `CAMELEER_AGENT_INSTANCEID` env var to `{envSlug}-{appSlug}-{replicaIndex}-{generation}` so container logs and agent logs share the same instance identity. Sets `CAMELEER_AGENT_*` env vars from `ResolvedContainerConfig` (routeControlEnabled, replayEnabled, health port). These are startup-only agent properties — changing them requires redeployment.
|
||||||
|
|||||||
@@ -8,7 +8,9 @@ paths:
|
|||||||
|
|
||||||
# Prometheus Metrics
|
# Prometheus Metrics
|
||||||
|
|
||||||
Server exposes `/api/v1/prometheus` (unauthenticated, Prometheus text format). Spring Boot Actuator provides JVM, GC, thread pool, and `http.server.requests` metrics automatically. Business metrics via `ServerMetrics` component:
|
Server exposes `/api/v1/prometheus` (unauthenticated, Prometheus text format). Spring Boot Actuator provides JVM, GC, thread pool, and `http.server.requests` metrics automatically. Business metrics via `ServerMetrics` component.
|
||||||
|
|
||||||
|
The same `MeterRegistry` is also snapshotted to ClickHouse every 60 s by `ServerMetricsSnapshotScheduler` (see "Server self-metrics persistence" at the bottom of this file) — so historical server-health data survives restarts without an external Prometheus.
|
||||||
|
|
||||||
## Gauges (auto-polled)
|
## Gauges (auto-polled)
|
||||||
|
|
||||||
@@ -83,3 +85,23 @@ Mean processing time = `camel.route.policy.total_time / camel.route.policy.count
|
|||||||
| `cameleer.sse.reconnects.count` | counter | `instanceId` |
|
| `cameleer.sse.reconnects.count` | counter | `instanceId` |
|
||||||
| `cameleer.taps.evaluated.count` | counter | `instanceId` |
|
| `cameleer.taps.evaluated.count` | counter | `instanceId` |
|
||||||
| `cameleer.metrics.exported.count` | counter | `instanceId` |
|
| `cameleer.metrics.exported.count` | counter | `instanceId` |
|
||||||
|
|
||||||
|
## Server self-metrics persistence
|
||||||
|
|
||||||
|
`ServerMetricsSnapshotScheduler` walks `MeterRegistry.getMeters()` every 60 s (configurable via `cameleer.server.self-metrics.interval-ms`) and writes one row per Micrometer `Measurement` to the ClickHouse `server_metrics` table. Full registry is captured — Spring Boot Actuator series (`jvm.*`, `process.*`, `http.server.requests`, `hikaricp.*`, `jdbc.*`, `tomcat.*`, `logback.events`, `system.*`) plus `cameleer.*` and `alerting_*`.
|
||||||
|
|
||||||
|
**Table** (`cameleer-server-app/src/main/resources/clickhouse/init.sql`):
|
||||||
|
|
||||||
|
```
|
||||||
|
server_metrics(tenant_id, collected_at, server_instance_id,
|
||||||
|
metric_name, metric_type, statistic, metric_value,
|
||||||
|
tags Map(String,String), server_received_at)
|
||||||
|
```
|
||||||
|
|
||||||
|
- `metric_type` — lowercase Micrometer `Meter.Type` (counter, gauge, timer, distribution_summary, long_task_timer, other)
|
||||||
|
- `statistic` — Micrometer `Statistic.getTagValueRepresentation()` (value, count, total, total_time, max, mean, active_tasks, duration). Timers emit 3 rows per tick (count + total_time + max); gauges/counters emit 1 (`statistic='value'` or `'count'`).
|
||||||
|
- No `environment` column — the server is env-agnostic.
|
||||||
|
- `tenant_id` threaded from `cameleer.server.tenant.id` (single-tenant per server).
|
||||||
|
- `server_instance_id` resolved once at boot by `ServerInstanceIdConfig` (property → HOSTNAME → localhost → UUID fallback). Rotates across restarts so counter resets are unambiguous.
|
||||||
|
- TTL: 90 days (vs 365 for `agent_metrics`). Write-only in v1 — no query endpoint or UI page. Inspect via ClickHouse admin: `/api/v1/admin/clickhouse/query` or direct SQL.
|
||||||
|
- Toggle off entirely with `cameleer.server.self-metrics.enabled=false` (uses `@ConditionalOnProperty`).
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ The UI has 4 main tabs: **Exchanges**, **Dashboard**, **Runtime**, **Deployments
|
|||||||
|
|
||||||
**Admin pages** (ADMIN-only, under `/admin/`):
|
**Admin pages** (ADMIN-only, under `/admin/`):
|
||||||
- **Sensitive Keys** (`ui/src/pages/Admin/SensitiveKeysPage.tsx`) — global sensitive key masking config. Shows agent built-in defaults as outlined Badge reference, editable Tag pills for custom keys, amber-highlighted push-to-agents toggle. Keys add to (not replace) agent defaults. Per-app sensitive key additions managed via `ApplicationConfigController` API. Note: `AppConfigDetailPage.tsx` exists but is not routed in `router.tsx`.
|
- **Sensitive Keys** (`ui/src/pages/Admin/SensitiveKeysPage.tsx`) — global sensitive key masking config. Shows agent built-in defaults as outlined Badge reference, editable Tag pills for custom keys, amber-highlighted push-to-agents toggle. Keys add to (not replace) agent defaults. Per-app sensitive key additions managed via `ApplicationConfigController` API. Note: `AppConfigDetailPage.tsx` exists but is not routed in `router.tsx`.
|
||||||
|
- **Server Metrics** (`ui/src/pages/Admin/ServerMetricsAdminPage.tsx`) — dashboard over the `server_metrics` ClickHouse table. Visibility matches Database/ClickHouse pages: gated on `capabilities.infrastructureEndpoints` in `buildAdminTreeNodes`; backend is `@ConditionalOnProperty(infrastructureendpoints) + @PreAuthorize('hasRole(ADMIN)')`. Uses the generic `/api/v1/admin/server-metrics/{catalog,instances,query}` API via `ui/src/api/queries/admin/serverMetrics.ts` hooks (`useServerMetricsCatalog`, `useServerMetricsInstances`, `useServerMetricsSeries`), all three of which take a `ServerMetricsRange = { from: Date; to: Date }`. Time range is driven by the global TopBar picker via `useGlobalFilters()` — no page-local selector; bucket size auto-scales through `stepSecondsFor(windowSeconds)` (10 s up to 1 h buckets). Toolbar is just server-instance badges. Sections: Server health (agents/ingestion/auth), JVM (memory/CPU/GC/threads), HTTP & DB pools, Alerting (conditional on catalog), Deployments (conditional on catalog). Each panel is a `ThemedChart` with `Line`/`Area` children from the design system; multi-series responses are flattened into overlap rows by bucket timestamp. Alerting and Deployments rows are hidden when their metrics aren't in the catalog (zero-deploy / alerting-disabled installs).
|
||||||
|
|
||||||
## Key UI Files
|
## Key UI Files
|
||||||
|
|
||||||
|
|||||||
@@ -84,6 +84,12 @@ jobs:
|
|||||||
- name: Build and Test
|
- name: Build and Test
|
||||||
run: mvn clean verify -DskipITs -U --batch-mode
|
run: mvn clean verify -DskipITs -U --batch-mode
|
||||||
|
|
||||||
|
- name: Deploy minter to Maven registry
|
||||||
|
if: github.event_name == 'push'
|
||||||
|
run: mvn deploy -DskipTests -DskipITs --batch-mode -pl .,cameleer-license-api,cameleer-server-core,cameleer-license-minter
|
||||||
|
env:
|
||||||
|
REGISTRY_TOKEN: ${{ secrets.REGISTRY_TOKEN }}
|
||||||
|
|
||||||
docker:
|
docker:
|
||||||
needs: build
|
needs: build
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
<!-- gitnexus:start -->
|
<!-- gitnexus:start -->
|
||||||
# GitNexus — Code Intelligence
|
# GitNexus — Code Intelligence
|
||||||
|
|
||||||
This project is indexed by GitNexus as **cameleer-server** (9321 symbols, 24004 relationships, 300 execution flows). Use the GitNexus MCP tools to understand code, assess impact, and navigate safely.
|
This project is indexed by GitNexus as **cameleer-server** (9731 symbols, 24987 relationships, 300 execution flows). Use the GitNexus MCP tools to understand code, assess impact, and navigate safely.
|
||||||
|
|
||||||
> If any GitNexus tool warns the index is stale, run `npx gitnexus analyze` in terminal first.
|
> If any GitNexus tool warns the index is stale, run `npx gitnexus analyze` in terminal first.
|
||||||
|
|
||||||
|
|||||||
@@ -14,8 +14,10 @@ Cameleer Server — observability server that receives, stores, and serves Camel
|
|||||||
|
|
||||||
## Modules
|
## Modules
|
||||||
|
|
||||||
|
- `cameleer-license-api` — pure license contract types (`LicenseInfo`, `LicenseValidator`, `LicenseState`, `LicenseStateMachine`, `LicenseLimits`, `DefaultTierLimits`) under package `com.cameleer.license`. No Spring or server-runtime deps; consumed by `cameleer-server-core` (validation/runtime gate) and `cameleer-license-minter` (vendor signing) — and transitively by `cameleer-saas` via the minter — without inheriting server internals.
|
||||||
- `cameleer-server-core` — domain logic, storage interfaces, services (no Spring dependencies)
|
- `cameleer-server-core` — domain logic, storage interfaces, services (no Spring dependencies)
|
||||||
- `cameleer-server-app` — Spring Boot web app, REST controllers, SSE, persistence, Docker orchestration
|
- `cameleer-server-app` — Spring Boot web app, REST controllers, SSE, persistence, Docker orchestration
|
||||||
|
- `cameleer-license-minter` — vendor-only Ed25519 license signing library + CLI. Depends only on `cameleer-license-api` so consumers don't pull in `cameleer-server-core`.
|
||||||
|
|
||||||
## Build Commands
|
## Build Commands
|
||||||
|
|
||||||
@@ -59,6 +61,7 @@ java -jar cameleer-server-app/target/cameleer-server-app-1.0-SNAPSHOT.jar
|
|||||||
- Log processor correlation: The agent sets `cameleer.processorId` in MDC, identifying which processor node emitted a log line.
|
- Log processor correlation: The agent sets `cameleer.processorId` in MDC, identifying which processor node emitted a log line.
|
||||||
- Logging: ClickHouse JDBC set to INFO (`com.clickhouse`), HTTP client to WARN (`org.apache.hc.client5`) in application.yml
|
- Logging: ClickHouse JDBC set to INFO (`com.clickhouse`), HTTP client to WARN (`org.apache.hc.client5`) in application.yml
|
||||||
- Security: JWT auth with RBAC (AGENT/VIEWER/OPERATOR/ADMIN roles), Ed25519 config signing (key derived deterministically from JWT secret via HMAC-SHA256), bootstrap token for registration. CORS: `CAMELEER_SERVER_SECURITY_CORSALLOWEDORIGINS` (comma-separated) overrides `CAMELEER_SERVER_SECURITY_UIORIGIN` for multi-origin setups. Infrastructure access: `CAMELEER_SERVER_SECURITY_INFRASTRUCTUREENDPOINTS=false` disables Database and ClickHouse admin endpoints. Last-ADMIN guard: system prevents removal of the last ADMIN role (409 Conflict). Password policy: min 12 chars, 3-of-4 character classes, no username match. Brute-force protection: 5 failed attempts -> 15 min lockout. Token revocation: `token_revoked_before` column on users, checked in `JwtAuthenticationFilter`, set on password change.
|
- Security: JWT auth with RBAC (AGENT/VIEWER/OPERATOR/ADMIN roles), Ed25519 config signing (key derived deterministically from JWT secret via HMAC-SHA256), bootstrap token for registration. CORS: `CAMELEER_SERVER_SECURITY_CORSALLOWEDORIGINS` (comma-separated) overrides `CAMELEER_SERVER_SECURITY_UIORIGIN` for multi-origin setups. Infrastructure access: `CAMELEER_SERVER_SECURITY_INFRASTRUCTUREENDPOINTS=false` disables Database and ClickHouse admin endpoints. Last-ADMIN guard: system prevents removal of the last ADMIN role (409 Conflict). Password policy: min 12 chars, 3-of-4 character classes, no username match. Brute-force protection: 5 failed attempts -> 15 min lockout. Token revocation: `token_revoked_before` column on users, checked in `JwtAuthenticationFilter`, set on password change.
|
||||||
|
- Login routing: `GET /api/v1/auth/capabilities` (unauthenticated) tells the SPA whether OIDC is the primary entry point. When OIDC is configured, the SSO button is the primary CTA and the local form is hidden behind `?local` (admin-recovery escape hatch). Per RFC 9700 §4.4 we do **not** use `prompt=none` for primary login — that returns `login_required` for first-time users and traps them on a local form.
|
||||||
- OIDC: Optional external identity provider support (token exchange pattern). Configured via admin API/UI, stored in database (`server_config` table). Resource server mode: accepts external access tokens (Logto M2M) via JWKS validation when `CAMELEER_SERVER_SECURITY_OIDCISSUERURI` is set. Scope-based role mapping via `SystemRole.normalizeScope()`. System roles synced on every OIDC login via `applyClaimMappings()` in `OidcAuthController` (calls `clearManagedAssignments` + `assignManagedRole` on `RbacService`) — always overwrites managed role assignments; uses managed assignment origin to avoid touching group-inherited or directly-assigned roles. Supports ES384, ES256, RS256.
|
- OIDC: Optional external identity provider support (token exchange pattern). Configured via admin API/UI, stored in database (`server_config` table). Resource server mode: accepts external access tokens (Logto M2M) via JWKS validation when `CAMELEER_SERVER_SECURITY_OIDCISSUERURI` is set. Scope-based role mapping via `SystemRole.normalizeScope()`. System roles synced on every OIDC login via `applyClaimMappings()` in `OidcAuthController` (calls `clearManagedAssignments` + `assignManagedRole` on `RbacService`) — always overwrites managed role assignments; uses managed assignment origin to avoid touching group-inherited or directly-assigned roles. Supports ES384, ES256, RS256.
|
||||||
- OIDC role extraction: `OidcTokenExchanger` reads roles from the **access_token** first (JWT with `at+jwt` type), then falls back to id_token. `OidcConfig` includes `audience` (RFC 8707 resource indicator) and `additionalScopes`. All provider-specific configuration is external — no provider-specific code in the server.
|
- OIDC role extraction: `OidcTokenExchanger` reads roles from the **access_token** first (JWT with `at+jwt` type), then falls back to id_token. `OidcConfig` includes `audience` (RFC 8707 resource indicator) and `additionalScopes`. All provider-specific configuration is external — no provider-specific code in the server.
|
||||||
- Sensitive keys: Global enforced baseline for masking sensitive data in agent payloads. Merge rule: `final = global UNION per-app` (case-insensitive dedup, per-app can only add, never remove global keys).
|
- Sensitive keys: Global enforced baseline for masking sensitive data in agent payloads. Merge rule: `final = global UNION per-app` (case-insensitive dedup, per-app can only add, never remove global keys).
|
||||||
@@ -96,7 +99,7 @@ When adding, removing, or renaming classes, controllers, endpoints, UI component
|
|||||||
<!-- gitnexus:start -->
|
<!-- gitnexus:start -->
|
||||||
# GitNexus — Code Intelligence
|
# GitNexus — Code Intelligence
|
||||||
|
|
||||||
This project is indexed by GitNexus as **cameleer-server** (9321 symbols, 24004 relationships, 300 execution flows). Use the GitNexus MCP tools to understand code, assess impact, and navigate safely.
|
This project is indexed by GitNexus as **cameleer-server** (10530 symbols, 27383 relationships, 300 execution flows). Use the GitNexus MCP tools to understand code, assess impact, and navigate safely.
|
||||||
|
|
||||||
> If any GitNexus tool warns the index is stale, run `npx gitnexus analyze` in terminal first.
|
> If any GitNexus tool warns the index is stale, run `npx gitnexus analyze` in terminal first.
|
||||||
|
|
||||||
|
|||||||
2
HOWTO.md
2
HOWTO.md
@@ -494,11 +494,13 @@ Key settings in `cameleer-server-app/src/main/resources/application.yml`. All cu
|
|||||||
| `cameleer.server.runtime.enabled` | `true` | `CAMELEER_SERVER_RUNTIME_ENABLED` | Enable Docker orchestration |
|
| `cameleer.server.runtime.enabled` | `true` | `CAMELEER_SERVER_RUNTIME_ENABLED` | Enable Docker orchestration |
|
||||||
| `cameleer.server.runtime.baseimage` | `cameleer-runtime-base:latest` | `CAMELEER_SERVER_RUNTIME_BASEIMAGE` | Base Docker image for app containers |
|
| `cameleer.server.runtime.baseimage` | `cameleer-runtime-base:latest` | `CAMELEER_SERVER_RUNTIME_BASEIMAGE` | Base Docker image for app containers |
|
||||||
| `cameleer.server.runtime.dockernetwork` | `cameleer` | `CAMELEER_SERVER_RUNTIME_DOCKERNETWORK` | Primary Docker network |
|
| `cameleer.server.runtime.dockernetwork` | `cameleer` | `CAMELEER_SERVER_RUNTIME_DOCKERNETWORK` | Primary Docker network |
|
||||||
|
| `cameleer.server.runtime.dockerruntime` | *(empty = auto)* | `CAMELEER_SERVER_RUNTIME_DOCKERRUNTIME` | Container runtime override. Empty auto-detects gVisor (`runsc`) when registered with the daemon and falls back to the daemon default. Set to e.g. `kata` to force a specific runtime, or `runc` to force the default even if `runsc` is installed. |
|
||||||
| `cameleer.server.runtime.jarstoragepath` | `/data/jars` | `CAMELEER_SERVER_RUNTIME_JARSTORAGEPATH` | JAR file storage directory |
|
| `cameleer.server.runtime.jarstoragepath` | `/data/jars` | `CAMELEER_SERVER_RUNTIME_JARSTORAGEPATH` | JAR file storage directory |
|
||||||
| `cameleer.server.runtime.jardockervolume` | *(empty)* | `CAMELEER_SERVER_RUNTIME_JARDOCKERVOLUME` | Docker volume for JAR sharing |
|
| `cameleer.server.runtime.jardockervolume` | *(empty)* | `CAMELEER_SERVER_RUNTIME_JARDOCKERVOLUME` | Docker volume for JAR sharing |
|
||||||
| `cameleer.server.runtime.routingmode` | `path` | `CAMELEER_SERVER_RUNTIME_ROUTINGMODE` | `path` or `subdomain` Traefik routing |
|
| `cameleer.server.runtime.routingmode` | `path` | `CAMELEER_SERVER_RUNTIME_ROUTINGMODE` | `path` or `subdomain` Traefik routing |
|
||||||
| `cameleer.server.runtime.routingdomain` | `localhost` | `CAMELEER_SERVER_RUNTIME_ROUTINGDOMAIN` | Domain for Traefik routing labels |
|
| `cameleer.server.runtime.routingdomain` | `localhost` | `CAMELEER_SERVER_RUNTIME_ROUTINGDOMAIN` | Domain for Traefik routing labels |
|
||||||
| `cameleer.server.runtime.serverurl` | *(empty)* | `CAMELEER_SERVER_RUNTIME_SERVERURL` | Server URL injected into app containers |
|
| `cameleer.server.runtime.serverurl` | *(empty)* | `CAMELEER_SERVER_RUNTIME_SERVERURL` | Server URL injected into app containers |
|
||||||
|
| `cameleer.server.runtime.certresolver` | *(empty)* | `CAMELEER_SERVER_RUNTIME_CERTRESOLVER` | Traefik TLS cert resolver name (e.g. `letsencrypt`). Blank = omit the `tls.certresolver` label and let Traefik serve the default TLS-store cert |
|
||||||
| `cameleer.server.runtime.agenthealthport` | `9464` | `CAMELEER_SERVER_RUNTIME_AGENTHEALTHPORT` | Agent health check port |
|
| `cameleer.server.runtime.agenthealthport` | `9464` | `CAMELEER_SERVER_RUNTIME_AGENTHEALTHPORT` | Agent health check port |
|
||||||
| `cameleer.server.runtime.healthchecktimeout` | `60` | `CAMELEER_SERVER_RUNTIME_HEALTHCHECKTIMEOUT` | Health check timeout (seconds) |
|
| `cameleer.server.runtime.healthchecktimeout` | `60` | `CAMELEER_SERVER_RUNTIME_HEALTHCHECKTIMEOUT` | Health check timeout (seconds) |
|
||||||
| `cameleer.server.runtime.container.memorylimit` | `512m` | `CAMELEER_SERVER_RUNTIME_CONTAINER_MEMORYLIMIT` | Default memory limit for app containers |
|
| `cameleer.server.runtime.container.memorylimit` | `512m` | `CAMELEER_SERVER_RUNTIME_CONTAINER_MEMORYLIMIT` | Default memory limit for app containers |
|
||||||
|
|||||||
54
cameleer-license-api/pom.xml
Normal file
54
cameleer-license-api/pom.xml
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||||
|
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
|
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
|
<parent>
|
||||||
|
<groupId>com.cameleer</groupId>
|
||||||
|
<artifactId>cameleer-server-parent</artifactId>
|
||||||
|
<version>1.0-SNAPSHOT</version>
|
||||||
|
</parent>
|
||||||
|
|
||||||
|
<artifactId>cameleer-license-api</artifactId>
|
||||||
|
<name>Cameleer License API</name>
|
||||||
|
<description>Pure license contract types — LicenseInfo, LicenseValidator, LicenseState, LicenseStateMachine, LicenseLimits, DefaultTierLimits. Shared by server-core (validation/runtime gate) and cameleer-license-minter (vendor-side signing). Has no Spring or server-runtime dependencies so consumers like cameleer-saas can depend on the minter without inheriting server internals.</description>
|
||||||
|
|
||||||
|
<dependencies>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.fasterxml.jackson.core</groupId>
|
||||||
|
<artifactId>jackson-databind</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.slf4j</groupId>
|
||||||
|
<artifactId>slf4j-api</artifactId>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.junit.jupiter</groupId>
|
||||||
|
<artifactId>junit-jupiter</artifactId>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.assertj</groupId>
|
||||||
|
<artifactId>assertj-core</artifactId>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
|
</dependencies>
|
||||||
|
|
||||||
|
<build>
|
||||||
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.springframework.boot</groupId>
|
||||||
|
<artifactId>spring-boot-maven-plugin</artifactId>
|
||||||
|
<executions>
|
||||||
|
<!-- Plain library JAR — no repackage. -->
|
||||||
|
<execution>
|
||||||
|
<id>repackage</id>
|
||||||
|
<phase>none</phase>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
</plugins>
|
||||||
|
</build>
|
||||||
|
</project>
|
||||||
@@ -0,0 +1,30 @@
|
|||||||
|
package com.cameleer.license;
|
||||||
|
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.LinkedHashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
public final class DefaultTierLimits {
|
||||||
|
|
||||||
|
public static final Map<String, Integer> DEFAULTS;
|
||||||
|
|
||||||
|
static {
|
||||||
|
Map<String, Integer> m = new LinkedHashMap<>();
|
||||||
|
m.put("max_environments", 1);
|
||||||
|
m.put("max_apps", 3);
|
||||||
|
m.put("max_agents", 5);
|
||||||
|
m.put("max_users", 3);
|
||||||
|
m.put("max_outbound_connections", 1);
|
||||||
|
m.put("max_alert_rules", 2);
|
||||||
|
m.put("max_total_cpu_millis", 2000);
|
||||||
|
m.put("max_total_memory_mb", 2048);
|
||||||
|
m.put("max_total_replicas", 5);
|
||||||
|
m.put("max_execution_retention_days", 1);
|
||||||
|
m.put("max_log_retention_days", 1);
|
||||||
|
m.put("max_metric_retention_days", 1);
|
||||||
|
m.put("max_jar_retention_count", 3);
|
||||||
|
DEFAULTS = Collections.unmodifiableMap(m);
|
||||||
|
}
|
||||||
|
|
||||||
|
private DefaultTierLimits() {}
|
||||||
|
}
|
||||||
@@ -0,0 +1,46 @@
|
|||||||
|
package com.cameleer.license;
|
||||||
|
|
||||||
|
import java.time.Instant;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Objects;
|
||||||
|
import java.util.UUID;
|
||||||
|
|
||||||
|
/** A parsed and signature-verified license. Construct via {@link LicenseValidator}. */
|
||||||
|
public record LicenseInfo(
|
||||||
|
UUID licenseId,
|
||||||
|
String tenantId,
|
||||||
|
String label,
|
||||||
|
Map<String, Integer> limits,
|
||||||
|
Instant issuedAt,
|
||||||
|
Instant expiresAt,
|
||||||
|
int gracePeriodDays
|
||||||
|
) {
|
||||||
|
public LicenseInfo {
|
||||||
|
Objects.requireNonNull(licenseId, "licenseId is required");
|
||||||
|
Objects.requireNonNull(tenantId, "tenantId is required");
|
||||||
|
Objects.requireNonNull(limits, "limits is required");
|
||||||
|
Objects.requireNonNull(issuedAt, "issuedAt is required");
|
||||||
|
Objects.requireNonNull(expiresAt, "expiresAt is required");
|
||||||
|
if (tenantId.isBlank()) {
|
||||||
|
throw new IllegalArgumentException("tenantId must not be blank");
|
||||||
|
}
|
||||||
|
if (gracePeriodDays < 0) {
|
||||||
|
throw new IllegalArgumentException("gracePeriodDays must be >= 0");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** True iff now > expiresAt + gracePeriodDays. */
|
||||||
|
public boolean isExpired() {
|
||||||
|
Instant deadline = expiresAt.plusSeconds((long) gracePeriodDays * 86400);
|
||||||
|
return Instant.now().isAfter(deadline);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** True iff now > expiresAt (regardless of grace). Used by the state machine to distinguish ACTIVE from GRACE. */
|
||||||
|
public boolean isAfterRawExpiry() {
|
||||||
|
return Instant.now().isAfter(expiresAt);
|
||||||
|
}
|
||||||
|
|
||||||
|
public int getLimit(String key, int defaultValue) {
|
||||||
|
return limits.getOrDefault(key, defaultValue);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,36 @@
|
|||||||
|
package com.cameleer.license;
|
||||||
|
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.LinkedHashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Objects;
|
||||||
|
|
||||||
|
public record LicenseLimits(Map<String, Integer> values) {
|
||||||
|
|
||||||
|
public LicenseLimits {
|
||||||
|
Objects.requireNonNull(values, "values");
|
||||||
|
}
|
||||||
|
|
||||||
|
public static LicenseLimits defaultsOnly() {
|
||||||
|
return new LicenseLimits(DefaultTierLimits.DEFAULTS);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static LicenseLimits mergeOverDefaults(Map<String, Integer> overrides) {
|
||||||
|
Map<String, Integer> merged = new LinkedHashMap<>(DefaultTierLimits.DEFAULTS);
|
||||||
|
if (overrides != null) merged.putAll(overrides);
|
||||||
|
return new LicenseLimits(Collections.unmodifiableMap(merged));
|
||||||
|
}
|
||||||
|
|
||||||
|
public int get(String key) {
|
||||||
|
Integer v = values.get(key);
|
||||||
|
if (v == null) {
|
||||||
|
throw new IllegalArgumentException("Unknown license limit key: " + key);
|
||||||
|
}
|
||||||
|
return v;
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean isDefaultSourced(String key, LicenseInfo license) {
|
||||||
|
if (license == null) return true;
|
||||||
|
return !license.limits().containsKey(key);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,9 @@
|
|||||||
|
package com.cameleer.license;
|
||||||
|
|
||||||
|
public enum LicenseState {
|
||||||
|
ABSENT,
|
||||||
|
ACTIVE,
|
||||||
|
GRACE,
|
||||||
|
EXPIRED,
|
||||||
|
INVALID
|
||||||
|
}
|
||||||
@@ -0,0 +1,26 @@
|
|||||||
|
package com.cameleer.license;
|
||||||
|
|
||||||
|
public final class LicenseStateMachine {
|
||||||
|
|
||||||
|
private LicenseStateMachine() {}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param license parsed license, or null if no license is loaded
|
||||||
|
* @param invalidReason non-null if the last validation attempt failed
|
||||||
|
*/
|
||||||
|
public static LicenseState classify(LicenseInfo license, String invalidReason) {
|
||||||
|
if (invalidReason != null) {
|
||||||
|
return LicenseState.INVALID;
|
||||||
|
}
|
||||||
|
if (license == null) {
|
||||||
|
return LicenseState.ABSENT;
|
||||||
|
}
|
||||||
|
if (!license.isAfterRawExpiry()) {
|
||||||
|
return LicenseState.ACTIVE;
|
||||||
|
}
|
||||||
|
if (!license.isExpired()) {
|
||||||
|
return LicenseState.GRACE;
|
||||||
|
}
|
||||||
|
return LicenseState.EXPIRED;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,14 +1,20 @@
|
|||||||
package com.cameleer.server.core.license;
|
package com.cameleer.license;
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.JsonNode;
|
import com.fasterxml.jackson.databind.JsonNode;
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import java.security.*;
|
import java.security.KeyFactory;
|
||||||
|
import java.security.PublicKey;
|
||||||
|
import java.security.Signature;
|
||||||
import java.security.spec.X509EncodedKeySpec;
|
import java.security.spec.X509EncodedKeySpec;
|
||||||
import java.time.Instant;
|
import java.time.Instant;
|
||||||
import java.util.*;
|
import java.util.Base64;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Objects;
|
||||||
|
import java.util.UUID;
|
||||||
|
|
||||||
public class LicenseValidator {
|
public class LicenseValidator {
|
||||||
|
|
||||||
@@ -16,8 +22,13 @@ public class LicenseValidator {
|
|||||||
private static final ObjectMapper objectMapper = new ObjectMapper();
|
private static final ObjectMapper objectMapper = new ObjectMapper();
|
||||||
|
|
||||||
private final PublicKey publicKey;
|
private final PublicKey publicKey;
|
||||||
|
private final String expectedTenantId;
|
||||||
|
|
||||||
public LicenseValidator(String publicKeyBase64) {
|
public LicenseValidator(String publicKeyBase64, String expectedTenantId) {
|
||||||
|
Objects.requireNonNull(expectedTenantId, "expectedTenantId is required");
|
||||||
|
if (expectedTenantId.isBlank()) {
|
||||||
|
throw new IllegalArgumentException("expectedTenantId must not be blank");
|
||||||
|
}
|
||||||
try {
|
try {
|
||||||
byte[] keyBytes = Base64.getDecoder().decode(publicKeyBase64);
|
byte[] keyBytes = Base64.getDecoder().decode(publicKeyBase64);
|
||||||
KeyFactory kf = KeyFactory.getInstance("Ed25519");
|
KeyFactory kf = KeyFactory.getInstance("Ed25519");
|
||||||
@@ -25,6 +36,7 @@ public class LicenseValidator {
|
|||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
throw new IllegalStateException("Failed to load license public key", e);
|
throw new IllegalStateException("Failed to load license public key", e);
|
||||||
}
|
}
|
||||||
|
this.expectedTenantId = expectedTenantId;
|
||||||
}
|
}
|
||||||
|
|
||||||
public LicenseInfo validate(String token) {
|
public LicenseInfo validate(String token) {
|
||||||
@@ -36,7 +48,6 @@ public class LicenseValidator {
|
|||||||
byte[] payloadBytes = Base64.getDecoder().decode(parts[0]);
|
byte[] payloadBytes = Base64.getDecoder().decode(parts[0]);
|
||||||
byte[] signatureBytes = Base64.getDecoder().decode(parts[1]);
|
byte[] signatureBytes = Base64.getDecoder().decode(parts[1]);
|
||||||
|
|
||||||
// Verify signature
|
|
||||||
try {
|
try {
|
||||||
Signature verifier = Signature.getInstance("Ed25519");
|
Signature verifier = Signature.getInstance("Ed25519");
|
||||||
verifier.initVerify(publicKey);
|
verifier.initVerify(publicKey);
|
||||||
@@ -50,23 +61,25 @@ public class LicenseValidator {
|
|||||||
throw new SecurityException("License signature verification failed", e);
|
throw new SecurityException("License signature verification failed", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parse payload
|
|
||||||
try {
|
try {
|
||||||
JsonNode root = objectMapper.readTree(payloadBytes);
|
JsonNode root = objectMapper.readTree(payloadBytes);
|
||||||
|
|
||||||
String tier = root.get("tier").asText();
|
String licenseIdStr = textOrThrow(root, "licenseId");
|
||||||
|
UUID licenseId;
|
||||||
Set<Feature> features = new HashSet<>();
|
try {
|
||||||
if (root.has("features")) {
|
licenseId = UUID.fromString(licenseIdStr);
|
||||||
for (JsonNode f : root.get("features")) {
|
} catch (IllegalArgumentException e) {
|
||||||
try {
|
throw new IllegalArgumentException("licenseId is not a valid UUID: " + licenseIdStr);
|
||||||
features.add(Feature.valueOf(f.asText()));
|
|
||||||
} catch (IllegalArgumentException e) {
|
|
||||||
log.warn("Unknown feature in license: {}", f.asText());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
String tenantId = textOrThrow(root, "tenantId");
|
||||||
|
if (!tenantId.equals(expectedTenantId)) {
|
||||||
|
throw new IllegalArgumentException(
|
||||||
|
"License tenantId '" + tenantId + "' does not match server tenant '" + expectedTenantId + "'");
|
||||||
|
}
|
||||||
|
|
||||||
|
String label = root.has("label") ? root.get("label").asText() : null;
|
||||||
|
|
||||||
Map<String, Integer> limits = new HashMap<>();
|
Map<String, Integer> limits = new HashMap<>();
|
||||||
if (root.has("limits")) {
|
if (root.has("limits")) {
|
||||||
root.get("limits").fields().forEachRemaining(entry ->
|
root.get("limits").fields().forEachRemaining(entry ->
|
||||||
@@ -74,12 +87,17 @@ public class LicenseValidator {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Instant issuedAt = root.has("iat") ? Instant.ofEpochSecond(root.get("iat").asLong()) : Instant.now();
|
Instant issuedAt = root.has("iat") ? Instant.ofEpochSecond(root.get("iat").asLong()) : Instant.now();
|
||||||
Instant expiresAt = root.has("exp") ? Instant.ofEpochSecond(root.get("exp").asLong()) : null;
|
if (!root.has("exp")) {
|
||||||
|
throw new IllegalArgumentException("exp is required");
|
||||||
|
}
|
||||||
|
Instant expiresAt = Instant.ofEpochSecond(root.get("exp").asLong());
|
||||||
|
int gracePeriodDays = root.has("gracePeriodDays") ? root.get("gracePeriodDays").asInt() : 0;
|
||||||
|
|
||||||
LicenseInfo info = new LicenseInfo(tier, features, limits, issuedAt, expiresAt);
|
LicenseInfo info = new LicenseInfo(licenseId, tenantId, label, limits, issuedAt, expiresAt, gracePeriodDays);
|
||||||
|
|
||||||
if (info.isExpired()) {
|
if (info.isExpired()) {
|
||||||
throw new IllegalArgumentException("License expired at " + expiresAt);
|
throw new IllegalArgumentException("License expired at " + expiresAt
|
||||||
|
+ " (grace period " + gracePeriodDays + " days)");
|
||||||
}
|
}
|
||||||
|
|
||||||
return info;
|
return info;
|
||||||
@@ -89,4 +107,11 @@ public class LicenseValidator {
|
|||||||
throw new IllegalArgumentException("Failed to parse license payload", e);
|
throw new IllegalArgumentException("Failed to parse license payload", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static String textOrThrow(JsonNode root, String field) {
|
||||||
|
if (!root.has(field) || root.get(field).asText().isBlank()) {
|
||||||
|
throw new IllegalArgumentException(field + " is required");
|
||||||
|
}
|
||||||
|
return root.get(field).asText();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
@@ -0,0 +1,30 @@
|
|||||||
|
package com.cameleer.license;
|
||||||
|
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
|
||||||
|
import static org.assertj.core.api.Assertions.assertThat;
|
||||||
|
|
||||||
|
class DefaultTierLimitsTest {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void allDocumentedKeysHaveDefaults() {
|
||||||
|
for (String key : new String[]{
|
||||||
|
"max_environments", "max_apps", "max_agents", "max_users",
|
||||||
|
"max_outbound_connections", "max_alert_rules",
|
||||||
|
"max_total_cpu_millis", "max_total_memory_mb", "max_total_replicas",
|
||||||
|
"max_execution_retention_days", "max_log_retention_days",
|
||||||
|
"max_metric_retention_days", "max_jar_retention_count"
|
||||||
|
}) {
|
||||||
|
assertThat(DefaultTierLimits.DEFAULTS).containsKey(key);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void specificValues() {
|
||||||
|
assertThat(DefaultTierLimits.DEFAULTS.get("max_environments")).isEqualTo(1);
|
||||||
|
assertThat(DefaultTierLimits.DEFAULTS.get("max_apps")).isEqualTo(3);
|
||||||
|
assertThat(DefaultTierLimits.DEFAULTS.get("max_agents")).isEqualTo(5);
|
||||||
|
assertThat(DefaultTierLimits.DEFAULTS.get("max_total_cpu_millis")).isEqualTo(2000);
|
||||||
|
assertThat(DefaultTierLimits.DEFAULTS.get("max_log_retention_days")).isEqualTo(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,64 @@
|
|||||||
|
package com.cameleer.license;
|
||||||
|
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
|
||||||
|
import java.time.Instant;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.UUID;
|
||||||
|
|
||||||
|
import static org.assertj.core.api.Assertions.assertThat;
|
||||||
|
import static org.assertj.core.api.Assertions.assertThatThrownBy;
|
||||||
|
|
||||||
|
class LicenseInfoTest {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void requiresLicenseId() {
|
||||||
|
assertThatThrownBy(() -> new LicenseInfo(
|
||||||
|
null, "acme", "label",
|
||||||
|
Map.of(), Instant.now(), Instant.now().plusSeconds(60), 0))
|
||||||
|
.isInstanceOf(NullPointerException.class)
|
||||||
|
.hasMessageContaining("licenseId");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void requiresTenantId() {
|
||||||
|
assertThatThrownBy(() -> new LicenseInfo(
|
||||||
|
UUID.randomUUID(), null, "label",
|
||||||
|
Map.of(), Instant.now(), Instant.now().plusSeconds(60), 0))
|
||||||
|
.isInstanceOf(NullPointerException.class)
|
||||||
|
.hasMessageContaining("tenantId");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void emptyTenantIdRejected() {
|
||||||
|
assertThatThrownBy(() -> new LicenseInfo(
|
||||||
|
UUID.randomUUID(), " ", "label",
|
||||||
|
Map.of(), Instant.now(), Instant.now().plusSeconds(60), 0))
|
||||||
|
.isInstanceOf(IllegalArgumentException.class);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void getLimit_returnsDefaultWhenMissing() {
|
||||||
|
LicenseInfo info = new LicenseInfo(
|
||||||
|
UUID.randomUUID(), "acme", null,
|
||||||
|
Map.of("max_apps", 5), Instant.now(),
|
||||||
|
Instant.now().plusSeconds(60), 0);
|
||||||
|
assertThat(info.getLimit("max_apps", 99)).isEqualTo(5);
|
||||||
|
assertThat(info.getLimit("max_users", 99)).isEqualTo(99);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void isExpired_honoursGracePeriod() {
|
||||||
|
Instant pastByTen = Instant.now().minusSeconds(10 * 86400);
|
||||||
|
LicenseInfo withinGrace = new LicenseInfo(
|
||||||
|
UUID.randomUUID(), "acme", null, Map.of(),
|
||||||
|
Instant.now().minusSeconds(40 * 86400),
|
||||||
|
pastByTen, 30);
|
||||||
|
assertThat(withinGrace.isExpired()).isFalse(); // 10 days into a 30-day grace
|
||||||
|
LicenseInfo pastGrace = new LicenseInfo(
|
||||||
|
UUID.randomUUID(), "acme", null, Map.of(),
|
||||||
|
Instant.now().minusSeconds(40 * 86400),
|
||||||
|
pastByTen, 5);
|
||||||
|
assertThat(pastGrace.isExpired()).isTrue(); // 10 days is past the 5-day grace
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,57 @@
|
|||||||
|
package com.cameleer.license;
|
||||||
|
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
|
||||||
|
import java.time.Instant;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.UUID;
|
||||||
|
|
||||||
|
import static org.assertj.core.api.Assertions.assertThat;
|
||||||
|
|
||||||
|
class LicenseStateMachineTest {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void noLicense_isAbsent() {
|
||||||
|
assertThat(LicenseStateMachine.classify(null, null)).isEqualTo(LicenseState.ABSENT);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void invalidReason_isInvalid() {
|
||||||
|
assertThat(LicenseStateMachine.classify(null, "signature failed")).isEqualTo(LicenseState.INVALID);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void activeBeforeExp() {
|
||||||
|
LicenseInfo info = info(Instant.now().plusSeconds(86400), 0);
|
||||||
|
assertThat(LicenseStateMachine.classify(info, null)).isEqualTo(LicenseState.ACTIVE);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void graceWithinGracePeriod() {
|
||||||
|
LicenseInfo info = info(Instant.now().minusSeconds(86400), 7);
|
||||||
|
assertThat(LicenseStateMachine.classify(info, null)).isEqualTo(LicenseState.GRACE);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void expiredAfterGrace() {
|
||||||
|
LicenseInfo info = info(Instant.now().minusSeconds(8L * 86400), 7);
|
||||||
|
assertThat(LicenseStateMachine.classify(info, null)).isEqualTo(LicenseState.EXPIRED);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void expiredImmediatelyWithZeroGrace() {
|
||||||
|
LicenseInfo info = info(Instant.now().minusSeconds(60), 0);
|
||||||
|
assertThat(LicenseStateMachine.classify(info, null)).isEqualTo(LicenseState.EXPIRED);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void invalidWinsOverPresentLicense() {
|
||||||
|
LicenseInfo info = info(Instant.now().plusSeconds(86400), 0);
|
||||||
|
assertThat(LicenseStateMachine.classify(info, "tenant mismatch")).isEqualTo(LicenseState.INVALID);
|
||||||
|
}
|
||||||
|
|
||||||
|
private LicenseInfo info(Instant exp, int graceDays) {
|
||||||
|
return new LicenseInfo(UUID.randomUUID(), "acme", null, Map.of(),
|
||||||
|
Instant.now().minusSeconds(3600), exp, graceDays);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,141 @@
|
|||||||
|
package com.cameleer.license;
|
||||||
|
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
|
||||||
|
import java.security.KeyPair;
|
||||||
|
import java.security.KeyPairGenerator;
|
||||||
|
import java.security.PrivateKey;
|
||||||
|
import java.security.Signature;
|
||||||
|
import java.time.Instant;
|
||||||
|
import java.time.temporal.ChronoUnit;
|
||||||
|
import java.util.Base64;
|
||||||
|
import java.util.UUID;
|
||||||
|
|
||||||
|
import static org.assertj.core.api.Assertions.assertThat;
|
||||||
|
import static org.assertj.core.api.Assertions.assertThatThrownBy;
|
||||||
|
|
||||||
|
class LicenseValidatorTest {
|
||||||
|
|
||||||
|
private KeyPair generateKeyPair() throws Exception {
|
||||||
|
KeyPairGenerator kpg = KeyPairGenerator.getInstance("Ed25519");
|
||||||
|
return kpg.generateKeyPair();
|
||||||
|
}
|
||||||
|
|
||||||
|
private String sign(PrivateKey key, String payload) throws Exception {
|
||||||
|
Signature signer = Signature.getInstance("Ed25519");
|
||||||
|
signer.initSign(key);
|
||||||
|
signer.update(payload.getBytes());
|
||||||
|
return Base64.getEncoder().encodeToString(signer.sign());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void validate_validLicense_returnsLicenseInfo() throws Exception {
|
||||||
|
KeyPair kp = generateKeyPair();
|
||||||
|
String publicKeyBase64 = Base64.getEncoder().encodeToString(kp.getPublic().getEncoded());
|
||||||
|
LicenseValidator validator = new LicenseValidator(publicKeyBase64, "acme");
|
||||||
|
|
||||||
|
Instant expires = Instant.now().plus(365, ChronoUnit.DAYS);
|
||||||
|
String payload = """
|
||||||
|
{"licenseId":"%s","tenantId":"acme","label":"HIGH","tier":"HIGH","limits":{"max_agents":50,"retention_days":90},"iat":%d,"exp":%d,"gracePeriodDays":7}
|
||||||
|
""".formatted(UUID.randomUUID(), Instant.now().getEpochSecond(), expires.getEpochSecond()).trim();
|
||||||
|
String signature = sign(kp.getPrivate(), payload);
|
||||||
|
String token = Base64.getEncoder().encodeToString(payload.getBytes()) + "." + signature;
|
||||||
|
|
||||||
|
LicenseInfo info = validator.validate(token);
|
||||||
|
|
||||||
|
assertThat(info.label()).isEqualTo("HIGH");
|
||||||
|
assertThat(info.getLimit("max_agents", 0)).isEqualTo(50);
|
||||||
|
assertThat(info.isExpired()).isFalse();
|
||||||
|
assertThat(info.tenantId()).isEqualTo("acme");
|
||||||
|
assertThat(info.gracePeriodDays()).isEqualTo(7);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void validate_expiredLicense_throwsException() throws Exception {
|
||||||
|
KeyPair kp = generateKeyPair();
|
||||||
|
String publicKeyBase64 = Base64.getEncoder().encodeToString(kp.getPublic().getEncoded());
|
||||||
|
LicenseValidator validator = new LicenseValidator(publicKeyBase64, "acme");
|
||||||
|
|
||||||
|
Instant past = Instant.now().minus(1, ChronoUnit.DAYS);
|
||||||
|
String payload = """
|
||||||
|
{"licenseId":"%s","tenantId":"acme","tier":"LOW","limits":{},"iat":%d,"exp":%d}
|
||||||
|
""".formatted(UUID.randomUUID(), past.minus(30, ChronoUnit.DAYS).getEpochSecond(), past.getEpochSecond()).trim();
|
||||||
|
String signature = sign(kp.getPrivate(), payload);
|
||||||
|
String token = Base64.getEncoder().encodeToString(payload.getBytes()) + "." + signature;
|
||||||
|
|
||||||
|
assertThatThrownBy(() -> validator.validate(token))
|
||||||
|
.isInstanceOf(IllegalArgumentException.class)
|
||||||
|
.hasMessageContaining("expired");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void validate_tamperedPayload_throwsException() throws Exception {
|
||||||
|
KeyPair kp = generateKeyPair();
|
||||||
|
String publicKeyBase64 = Base64.getEncoder().encodeToString(kp.getPublic().getEncoded());
|
||||||
|
LicenseValidator validator = new LicenseValidator(publicKeyBase64, "acme");
|
||||||
|
|
||||||
|
String payload = """
|
||||||
|
{"licenseId":"%s","tenantId":"acme","tier":"LOW","limits":{},"iat":0,"exp":9999999999}
|
||||||
|
""".formatted(UUID.randomUUID()).trim();
|
||||||
|
String signature = sign(kp.getPrivate(), payload);
|
||||||
|
|
||||||
|
// Tamper with payload
|
||||||
|
String tampered = payload.replace("LOW", "BUSINESS");
|
||||||
|
String token = Base64.getEncoder().encodeToString(tampered.getBytes()) + "." + signature;
|
||||||
|
|
||||||
|
assertThatThrownBy(() -> validator.validate(token))
|
||||||
|
.isInstanceOf(SecurityException.class)
|
||||||
|
.hasMessageContaining("signature");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void validate_missingTenantId_throws() throws Exception {
|
||||||
|
KeyPair kp = generateKeyPair();
|
||||||
|
String publicKeyBase64 = Base64.getEncoder().encodeToString(kp.getPublic().getEncoded());
|
||||||
|
LicenseValidator validator = new LicenseValidator(publicKeyBase64, "acme");
|
||||||
|
|
||||||
|
Instant exp = Instant.now().plus(30, ChronoUnit.DAYS);
|
||||||
|
String payload = """
|
||||||
|
{"licenseId":"%s","tier":"X","limits":{},"iat":%d,"exp":%d}
|
||||||
|
""".formatted(UUID.randomUUID(), Instant.now().getEpochSecond(), exp.getEpochSecond()).trim();
|
||||||
|
String token = Base64.getEncoder().encodeToString(payload.getBytes()) + "." + sign(kp.getPrivate(), payload);
|
||||||
|
|
||||||
|
assertThatThrownBy(() -> validator.validate(token))
|
||||||
|
.isInstanceOf(IllegalArgumentException.class)
|
||||||
|
.hasMessageContaining("tenantId");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void validate_tenantIdMismatch_throws() throws Exception {
|
||||||
|
KeyPair kp = generateKeyPair();
|
||||||
|
String publicKeyBase64 = Base64.getEncoder().encodeToString(kp.getPublic().getEncoded());
|
||||||
|
LicenseValidator validator = new LicenseValidator(publicKeyBase64, "beta");
|
||||||
|
|
||||||
|
Instant exp = Instant.now().plus(30, ChronoUnit.DAYS);
|
||||||
|
String payload = """
|
||||||
|
{"licenseId":"%s","tenantId":"acme","tier":"X","limits":{},"iat":%d,"exp":%d}
|
||||||
|
""".formatted(UUID.randomUUID(), Instant.now().getEpochSecond(), exp.getEpochSecond()).trim();
|
||||||
|
String token = Base64.getEncoder().encodeToString(payload.getBytes()) + "." + sign(kp.getPrivate(), payload);
|
||||||
|
|
||||||
|
assertThatThrownBy(() -> validator.validate(token))
|
||||||
|
.isInstanceOf(IllegalArgumentException.class)
|
||||||
|
.hasMessageContaining("tenantId");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void validate_missingLicenseId_throws() throws Exception {
|
||||||
|
KeyPair kp = generateKeyPair();
|
||||||
|
String publicKeyBase64 = Base64.getEncoder().encodeToString(kp.getPublic().getEncoded());
|
||||||
|
LicenseValidator validator = new LicenseValidator(publicKeyBase64, "acme");
|
||||||
|
|
||||||
|
Instant exp = Instant.now().plus(30, ChronoUnit.DAYS);
|
||||||
|
String payload = """
|
||||||
|
{"tenantId":"acme","tier":"X","limits":{},"iat":%d,"exp":%d}
|
||||||
|
""".formatted(Instant.now().getEpochSecond(), exp.getEpochSecond()).trim();
|
||||||
|
String token = Base64.getEncoder().encodeToString(payload.getBytes()) + "." + sign(kp.getPrivate(), payload);
|
||||||
|
|
||||||
|
assertThatThrownBy(() -> validator.validate(token))
|
||||||
|
.isInstanceOf(IllegalArgumentException.class)
|
||||||
|
.hasMessageContaining("licenseId");
|
||||||
|
}
|
||||||
|
}
|
||||||
287
cameleer-license-minter/README.md
Normal file
287
cameleer-license-minter/README.md
Normal file
@@ -0,0 +1,287 @@
|
|||||||
|
# cameleer-license-minter
|
||||||
|
|
||||||
|
Standalone vendor-side tool for producing signed Ed25519 license tokens consumed by `cameleer-server`. The minter is intentionally **not** a runtime or compile-scope dependency of the server — the server only ships with the matching public key and validates tokens via `LicenseValidator`. The private signing key never leaves the vendor's environment.
|
||||||
|
|
||||||
|
- Module GAV: `com.cameleer:cameleer-license-minter:1.0-SNAPSHOT`
|
||||||
|
- Maven coordinates of the runtime server (does **not** transitively pull this module): `com.cameleer:cameleer-server-app:1.0-SNAPSHOT`
|
||||||
|
- Build artifacts (after `mvn -pl cameleer-license-minter package`):
|
||||||
|
- `target/cameleer-license-minter-1.0-SNAPSHOT.jar` — plain library JAR (consumable as a Maven `test` dependency or via the `LicenseMinter` API in custom tooling)
|
||||||
|
- `target/cameleer-license-minter-1.0-SNAPSHOT-cli.jar` — fat CLI JAR with main class `com.cameleer.license.minter.cli.LicenseMinterCli`
|
||||||
|
|
||||||
|
## Table of contents
|
||||||
|
|
||||||
|
## Audience
|
||||||
|
|
||||||
|
## Build
|
||||||
|
|
||||||
|
## Public Java API
|
||||||
|
|
||||||
|
## CLI usage
|
||||||
|
|
||||||
|
## Token format
|
||||||
|
|
||||||
|
## LicenseInfo schema
|
||||||
|
|
||||||
|
## Limits dictionary
|
||||||
|
|
||||||
|
## Generating an Ed25519 key pair
|
||||||
|
|
||||||
|
## Worked example
|
||||||
|
|
||||||
|
## Security guidance
|
||||||
|
|
||||||
|
## Compatibility / runtime separation
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Audience
|
||||||
|
|
||||||
|
Vendors / SaaS operators issuing licenses to customers who run `cameleer-server`. End-customer operators looking for *how to install* a token should read `docs/license-enforcement.md` instead.
|
||||||
|
|
||||||
|
## Build
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# From the repo root
|
||||||
|
mvn -pl cameleer-license-minter package
|
||||||
|
```
|
||||||
|
|
||||||
|
Two JARs land in `cameleer-license-minter/target/`:
|
||||||
|
|
||||||
|
| Artifact | Purpose |
|
||||||
|
|---|---|
|
||||||
|
| `cameleer-license-minter-1.0-SNAPSHOT.jar` | Plain library (the `repackage` execution for the main artifact is disabled; see `pom.xml:50-54`). Use this when embedding the minter inside your own tooling or a unit test that needs a fresh signed token. |
|
||||||
|
| `cameleer-license-minter-1.0-SNAPSHOT-cli.jar` | Fat CLI JAR. Repackaged by Spring Boot's `spring-boot-maven-plugin` with classifier `cli`; main class is `com.cameleer.license.minter.cli.LicenseMinterCli`. |
|
||||||
|
|
||||||
|
## Public Java API
|
||||||
|
|
||||||
|
`com.cameleer.license.minter.LicenseMinter` is the only entry point for the library. It is a final, stateless utility class:
|
||||||
|
|
||||||
|
```java
|
||||||
|
import com.cameleer.license.minter.LicenseMinter;
|
||||||
|
import com.cameleer.license.LicenseInfo;
|
||||||
|
|
||||||
|
LicenseInfo info = new LicenseInfo(
|
||||||
|
java.util.UUID.randomUUID(),
|
||||||
|
"acme-prod", // tenantId — must match server's CAMELEER_SERVER_TENANT_ID
|
||||||
|
"Acme Production (Tier B)", // human label, optional
|
||||||
|
java.util.Map.of(
|
||||||
|
"max_environments", 3,
|
||||||
|
"max_apps", 25,
|
||||||
|
"max_agents", 50,
|
||||||
|
"max_users", 20,
|
||||||
|
"max_total_replicas", 30
|
||||||
|
),
|
||||||
|
java.time.Instant.now(), // issuedAt
|
||||||
|
java.time.Instant.parse("2027-01-01T00:00:00Z"), // expiresAt
|
||||||
|
7 // gracePeriodDays
|
||||||
|
);
|
||||||
|
|
||||||
|
String token = LicenseMinter.mint(info, ed25519PrivateKey);
|
||||||
|
```
|
||||||
|
|
||||||
|
Source: `cameleer-license-minter/src/main/java/com/cameleer/license/minter/LicenseMinter.java:20`.
|
||||||
|
|
||||||
|
The method is thread-safe; the underlying Jackson `ObjectMapper` is configured once with `ORDER_MAP_ENTRIES_BY_KEYS` so canonical-JSON serialization is deterministic across runs and process boundaries.
|
||||||
|
|
||||||
|
`LicenseMinter.mint` will throw `IllegalStateException` if the JCE provider rejects the private key or the payload cannot be serialized.
|
||||||
|
|
||||||
|
## CLI usage
|
||||||
|
|
||||||
|
The CLI entry point is `com.cameleer.license.minter.cli.LicenseMinterCli`. Run it from the fat JAR produced by the build:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
java -jar cameleer-license-minter/target/cameleer-license-minter-1.0-SNAPSHOT-cli.jar \
|
||||||
|
--private-key=/secure/keys/cameleer-license-priv.pem \
|
||||||
|
--tenant=acme-prod \
|
||||||
|
--label="Acme Production (Tier B)" \
|
||||||
|
--expires=2027-01-01 \
|
||||||
|
--grace-days=7 \
|
||||||
|
--max-environments=3 \
|
||||||
|
--max-apps=25 \
|
||||||
|
--max-agents=50 \
|
||||||
|
--max-users=20 \
|
||||||
|
--max-total-replicas=30 \
|
||||||
|
--output=/secure/out/acme-prod.lic \
|
||||||
|
--public-key=/secure/keys/cameleer-license-pub.b64 \
|
||||||
|
--verify
|
||||||
|
```
|
||||||
|
|
||||||
|
### Flag reference
|
||||||
|
|
||||||
|
Source of truth: `cameleer-license-minter/src/main/java/com/cameleer/license/minter/cli/LicenseMinterCli.java:26`.
|
||||||
|
|
||||||
|
| Flag | Required | Meaning |
|
||||||
|
|---|---|---|
|
||||||
|
| `--private-key=<path>` | yes | Path to a PKCS#8-encoded Ed25519 private key. Both PEM (`-----BEGIN PRIVATE KEY-----`) and raw base64 are accepted (`LicenseMinterCli.readEd25519PrivateKey`). |
|
||||||
|
| `--tenant=<tenantId>` | yes | The exact `tenantId` the server will compare against `CAMELEER_SERVER_TENANT_ID`. Mismatch causes the validator to throw at install / revalidation. |
|
||||||
|
| `--expires=<YYYY-MM-DD>` | yes | Expiration date interpreted as midnight UTC. The validator considers tokens expired once `now > exp + gracePeriodDays`. |
|
||||||
|
| `--label=<text>` | no | Human-readable label, surfaced via `GET /api/v1/admin/license` and `/api/v1/admin/license/usage`. |
|
||||||
|
| `--grace-days=<int>` | no | Number of days the license stays usable after `--expires`. Defaults to `0`. |
|
||||||
|
| `--max-<limitkey>=<int>` | no, repeatable | Each `--max-foo-bar` flag becomes the limit key `max_foo_bar`. See the limits dictionary below. Unknown keys are accepted by the minter (the server side ignores keys it does not understand and falls through to defaults). |
|
||||||
|
| `--output=<path>` | no | Write the token to a file. When omitted, the token is printed to stdout. On `--verify` failure the file is deleted. |
|
||||||
|
| `--public-key=<path>` | no, required for `--verify` | Path to the matching base64 X.509 SPKI public key file (one line, no PEM markers). |
|
||||||
|
| `--verify` | no | After minting, parse + signature-check the token using `--public-key` and `--tenant`. Exits non-zero if verification fails. |
|
||||||
|
|
||||||
|
Exit codes: `0` on success, `1` on minting / IO failure, `2` on argument validation failure, `3` on `--verify` failure.
|
||||||
|
|
||||||
|
## Token format
|
||||||
|
|
||||||
|
A token is the concatenation of two **standard** base64 segments joined by a literal `.`:
|
||||||
|
|
||||||
|
```
|
||||||
|
base64(canonicalJson) + "." + base64(ed25519Signature)
|
||||||
|
```
|
||||||
|
|
||||||
|
- The canonical JSON payload is produced by `LicenseMinter.canonicalPayload(...)` with keys sorted lexicographically and `limits` rendered as a sorted object. This makes the byte sequence deterministic given a fixed `LicenseInfo`.
|
||||||
|
- The signature is computed with `Signature.getInstance("Ed25519")` over the canonical payload bytes (not over the base64-encoded form).
|
||||||
|
- Encoding is `Base64.getEncoder()` (RFC 4648 §4 — *not* base64url). The validator decodes with the matching `Base64.getDecoder()`.
|
||||||
|
|
||||||
|
`LicenseValidator.validate(...)` (`cameleer-license-api/src/main/java/com/cameleer/license/LicenseValidator.java:42`) splits on the first `.`, decodes both halves, verifies the signature, then deserializes the payload.
|
||||||
|
|
||||||
|
## LicenseInfo schema
|
||||||
|
|
||||||
|
Source: `cameleer-license-api/src/main/java/com/cameleer/license/LicenseInfo.java`. Field-by-field:
|
||||||
|
|
||||||
|
| Field | Type | Required | Semantics |
|
||||||
|
|---|---|---|---|
|
||||||
|
| `licenseId` | `UUID` | yes | Stable identifier for this token. The server's audit trail records install/replace transitions by license id; renewals must use a fresh UUID so audit history is non-ambiguous. |
|
||||||
|
| `tenantId` | `String` | yes | Must equal the server's `CAMELEER_SERVER_TENANT_ID`. The validator throws `IllegalArgumentException` on mismatch. Blank values are rejected by the canonical record constructor. |
|
||||||
|
| `label` | `String` | no | Free-form human label. Surfaced on the admin/usage endpoints and the operator UI. Has no enforcement semantics. |
|
||||||
|
| `limits` | `Map<String,Integer>` | yes (may be empty) | License-specific overrides. Any key that appears here is unioned over `DefaultTierLimits.DEFAULTS` to form the effective caps in `ACTIVE` / `GRACE` states. Keys not present fall through to defaults. |
|
||||||
|
| `issuedAt` | `Instant` (epoch seconds in JSON `iat`) | yes | Stamped by the minter; not currently consulted by the validator beyond informational logging. |
|
||||||
|
| `expiresAt` | `Instant` (epoch seconds in JSON `exp`) | yes | The validator throws if `now > expiresAt + gracePeriodDays * 86400` at install or revalidation. |
|
||||||
|
| `gracePeriodDays` | `int` | yes (>= 0) | Window after `expiresAt` during which the gate transitions to `GRACE` (license still grants its caps) before flipping to `EXPIRED`. Negative values are rejected at construction. |
|
||||||
|
|
||||||
|
## Limits dictionary
|
||||||
|
|
||||||
|
Canonical key set: `cameleer-license-api/src/main/java/com/cameleer/license/DefaultTierLimits.java`. Any key not listed here is silently ignored by the server's `LicenseGate.getEffectiveLimits()`.
|
||||||
|
|
||||||
|
| CLI flag | Key | Default | What the server enforces |
|
||||||
|
|---|---|---|---|
|
||||||
|
| `--max-environments` | `max_environments` | 1 | `EnvironmentService.create(...)` consults `LicenseEnforcer.assertWithinCap("max_environments", currentCount, 1)`. |
|
||||||
|
| `--max-apps` | `max_apps` | 3 | `AppService.createApp(...)` checks total app count across all envs. |
|
||||||
|
| `--max-agents` | `max_agents` | 5 | `AgentRegistryService.register(...)` checks live agent count. |
|
||||||
|
| `--max-users` | `max_users` | 3 | User creation paths (`UserAdminController`, `UiAuthController` self-signup, `OidcAuthController` first-login). |
|
||||||
|
| `--max-outbound-connections` | `max_outbound_connections` | 1 | `OutboundConnectionServiceImpl.create(...)`. |
|
||||||
|
| `--max-alert-rules` | `max_alert_rules` | 2 | `AlertRuleController.create(...)`. |
|
||||||
|
| `--max-total-cpu-millis` | `max_total_cpu_millis` | 2000 | `DeploymentExecutor` PRE_FLIGHT compute cap (sum of `replicas * cpuLimit` over non-stopped deployments). |
|
||||||
|
| `--max-total-memory-mb` | `max_total_memory_mb` | 2048 | `DeploymentExecutor` PRE_FLIGHT compute cap (sum of `replicas * memoryLimitMb`). |
|
||||||
|
| `--max-total-replicas` | `max_total_replicas` | 5 | `DeploymentExecutor` PRE_FLIGHT compute cap (sum of `replicas`). |
|
||||||
|
| `--max-execution-retention-days` | `max_execution_retention_days` | 1 | ClickHouse TTL cap for `executions`, `processor_executions`. Effective TTL = `min(cap, env.executionRetentionDays)`. |
|
||||||
|
| `--max-log-retention-days` | `max_log_retention_days` | 1 | ClickHouse TTL cap for `logs`. |
|
||||||
|
| `--max-metric-retention-days` | `max_metric_retention_days` | 1 | ClickHouse TTL cap for `agent_metrics`, `agent_events`. |
|
||||||
|
| `--max-jar-retention-count` | `max_jar_retention_count` | 3 | `EnvironmentAdminController` PUT `/{envSlug}/jar-retention` rejects requests above this cap. Also bounds the daily `JarRetentionJob`. |
|
||||||
|
|
||||||
|
## Generating an Ed25519 key pair
|
||||||
|
|
||||||
|
The minter and validator both rely on the JCE `Ed25519` algorithm shipped with JDK 17+. No external crypto library is needed.
|
||||||
|
|
||||||
|
```java
|
||||||
|
import java.security.KeyPair;
|
||||||
|
import java.security.KeyPairGenerator;
|
||||||
|
import java.util.Base64;
|
||||||
|
|
||||||
|
KeyPair kp = KeyPairGenerator.getInstance("Ed25519").generateKeyPair();
|
||||||
|
|
||||||
|
// 32-byte public key, X.509 SubjectPublicKeyInfo wrapped — this is what the server expects.
|
||||||
|
String publicKeyB64 = Base64.getEncoder().encodeToString(kp.getPublic().getEncoded());
|
||||||
|
|
||||||
|
// PKCS#8 private key — the CLI's --private-key reader accepts this either as raw base64
|
||||||
|
// or PEM-wrapped (`-----BEGIN PRIVATE KEY-----`).
|
||||||
|
String privateKeyB64 = Base64.getEncoder().encodeToString(kp.getPrivate().getEncoded());
|
||||||
|
```
|
||||||
|
|
||||||
|
A one-liner using the JDK's `keytool` is **not** sufficient — `keytool` cannot produce raw Ed25519 PKCS#8 in a directly-usable shape for our reader. Generating via the API above (or `openssl genpkey -algorithm ed25519`) is the supported path.
|
||||||
|
|
||||||
|
For OpenSSL:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
openssl genpkey -algorithm ed25519 -out cameleer-license-priv.pem
|
||||||
|
openssl pkey -in cameleer-license-priv.pem -pubout -outform DER \
|
||||||
|
| base64 -w0 > cameleer-license-pub.b64
|
||||||
|
```
|
||||||
|
|
||||||
|
The resulting `cameleer-license-pub.b64` is the value to put into `CAMELEER_SERVER_LICENSE_PUBLICKEY`.
|
||||||
|
|
||||||
|
## Worked example
|
||||||
|
|
||||||
|
End-to-end: generate a key pair, mint a license, install it on a running server, verify enforcement.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Vendor side — generate the keypair
|
||||||
|
openssl genpkey -algorithm ed25519 -out /secrets/cameleer-priv.pem
|
||||||
|
openssl pkey -in /secrets/cameleer-priv.pem -pubout -outform DER \
|
||||||
|
| base64 -w0 > /secrets/cameleer-pub.b64
|
||||||
|
|
||||||
|
# 2. Vendor side — distribute the public key (commit to deployment config / Vault / k8s Secret)
|
||||||
|
cat /secrets/cameleer-pub.b64
|
||||||
|
# MCowBQYDK2VwAyEAxxxxx...
|
||||||
|
|
||||||
|
# 3. Vendor side — mint a license for a customer tenant
|
||||||
|
mvn -pl cameleer-license-minter package -DskipTests
|
||||||
|
java -jar cameleer-license-minter/target/cameleer-license-minter-1.0-SNAPSHOT-cli.jar \
|
||||||
|
--private-key=/secrets/cameleer-priv.pem \
|
||||||
|
--public-key=/secrets/cameleer-pub.b64 \
|
||||||
|
--tenant=acme-prod \
|
||||||
|
--label="Acme Production" \
|
||||||
|
--expires=2027-01-01 \
|
||||||
|
--grace-days=14 \
|
||||||
|
--max-environments=3 \
|
||||||
|
--max-apps=25 \
|
||||||
|
--max-agents=50 \
|
||||||
|
--max-users=20 \
|
||||||
|
--max-total-replicas=30 \
|
||||||
|
--max-total-cpu-millis=15000 \
|
||||||
|
--max-total-memory-mb=16384 \
|
||||||
|
--max-execution-retention-days=30 \
|
||||||
|
--max-log-retention-days=14 \
|
||||||
|
--max-metric-retention-days=14 \
|
||||||
|
--max-jar-retention-count=10 \
|
||||||
|
--output=/tmp/acme.lic \
|
||||||
|
--verify
|
||||||
|
|
||||||
|
# 4. Customer side — server boots with public key + tenant id matching the mint
|
||||||
|
export CAMELEER_SERVER_TENANT_ID=acme-prod
|
||||||
|
export CAMELEER_SERVER_LICENSE_PUBLICKEY=$(cat /secrets/cameleer-pub.b64)
|
||||||
|
|
||||||
|
# 5. Customer side — install via the admin API after boot
|
||||||
|
curl -X POST https://server.example.com/api/v1/admin/license \
|
||||||
|
-H "Authorization: Bearer ${ADMIN_JWT}" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d "{\"token\": \"$(cat /tmp/acme.lic)\"}"
|
||||||
|
|
||||||
|
# 6. Customer side — verify it was accepted
|
||||||
|
curl https://server.example.com/api/v1/admin/license \
|
||||||
|
-H "Authorization: Bearer ${ADMIN_JWT}"
|
||||||
|
# {"state":"ACTIVE","invalidReason":null,"envelope":{...},"lastValidatedAt":"..."}
|
||||||
|
|
||||||
|
curl https://server.example.com/api/v1/admin/license/usage \
|
||||||
|
-H "Authorization: Bearer ${ADMIN_JWT}"
|
||||||
|
# Shows current/cap/source per limit key
|
||||||
|
```
|
||||||
|
|
||||||
|
For boot-time installation (preferred for SaaS-managed deployments), set `CAMELEER_SERVER_LICENSE_TOKEN` instead of POSTing — see `docs/license-enforcement.md`.
|
||||||
|
|
||||||
|
## Security guidance
|
||||||
|
|
||||||
|
- **The Ed25519 private key is the trust root.** Anyone who holds it can mint licenses for any tenant. Treat it like a code-signing key.
|
||||||
|
- **Storage.** Production private keys belong in an HSM, KMS (e.g. AWS KMS / GCP KMS with non-exportable signing), or a sealed Vault transit backend. A sealed file on a laptop is acceptable for low-volume / pre-production minting only and should never be committed to git or shared via chat.
|
||||||
|
- **Rotation.** Rotation is destructive: every customer running with the *old* public key will reject all new tokens signed with the *new* private key. The pragmatic procedure is:
|
||||||
|
1. Generate the new keypair.
|
||||||
|
2. Distribute the new public key (`CAMELEER_SERVER_LICENSE_PUBLICKEY`) to every tenant's server config.
|
||||||
|
3. Once tenants confirm they are running with the new public key, re-mint and re-issue every active license under the new key.
|
||||||
|
4. Decommission the old private key.
|
||||||
|
Practical revocation flows through expiry — keep license terms short enough (12 months or less) that planned rotations stay aligned with renewal cadence.
|
||||||
|
- **Auditing.** The server records every install/replace/reject under `AuditCategory.LICENSE`. The minter itself does not write audit rows; if you need a vendor-side audit trail of mint operations, wrap `LicenseMinter.mint(...)` in your own ticketing pipeline.
|
||||||
|
- **Never commit private keys.** `.gitignore` does not block them by name — use a `secrets/` directory excluded by your repository's policy, or store them entirely outside the working tree.
|
||||||
|
|
||||||
|
## Compatibility / runtime separation
|
||||||
|
|
||||||
|
The minter is intentionally absent from `cameleer-server-app`'s production classpath. To verify after a build:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
mvn -pl cameleer-server-app dependency:tree | grep license-minter
|
||||||
|
# expected: empty output (or, in development branches, a single line scoped 'test')
|
||||||
|
```
|
||||||
|
|
||||||
|
`cameleer-license-minter/pom.xml` depends on `cameleer-license-api` for the pure license contract types (`LicenseInfo`, `LicenseValidator`) used by mint + `--verify`. It deliberately does **not** depend on `cameleer-server-core`, so consumers of the minter (e.g. `cameleer-saas`) do not inherit server-runtime types onto their classpath. The server app intentionally does not depend on the minter — vendors mint outside the customer-deployed runtime, and a compromised customer cannot leverage server code to forge tokens.
|
||||||
69
cameleer-license-minter/pom.xml
Normal file
69
cameleer-license-minter/pom.xml
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||||
|
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
|
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
|
<parent>
|
||||||
|
<groupId>com.cameleer</groupId>
|
||||||
|
<artifactId>cameleer-server-parent</artifactId>
|
||||||
|
<version>1.0-SNAPSHOT</version>
|
||||||
|
</parent>
|
||||||
|
|
||||||
|
<artifactId>cameleer-license-minter</artifactId>
|
||||||
|
<name>Cameleer License Minter</name>
|
||||||
|
<description>Vendor-only Ed25519 license signing library + CLI</description>
|
||||||
|
|
||||||
|
<dependencies>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.cameleer</groupId>
|
||||||
|
<artifactId>cameleer-license-api</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.fasterxml.jackson.core</groupId>
|
||||||
|
<artifactId>jackson-databind</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.slf4j</groupId>
|
||||||
|
<artifactId>slf4j-api</artifactId>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.junit.jupiter</groupId>
|
||||||
|
<artifactId>junit-jupiter</artifactId>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.assertj</groupId>
|
||||||
|
<artifactId>assertj-core</artifactId>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
|
</dependencies>
|
||||||
|
|
||||||
|
<build>
|
||||||
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.springframework.boot</groupId>
|
||||||
|
<artifactId>spring-boot-maven-plugin</artifactId>
|
||||||
|
<executions>
|
||||||
|
<!-- Disable the default repackage so the main artifact stays as a plain library
|
||||||
|
JAR consumable as a Maven test-scope dependency by cameleer-server-app. -->
|
||||||
|
<execution>
|
||||||
|
<id>repackage</id>
|
||||||
|
<phase>none</phase>
|
||||||
|
</execution>
|
||||||
|
<execution>
|
||||||
|
<id>repackage-cli</id>
|
||||||
|
<goals>
|
||||||
|
<goal>repackage</goal>
|
||||||
|
</goals>
|
||||||
|
<configuration>
|
||||||
|
<classifier>cli</classifier>
|
||||||
|
<mainClass>com.cameleer.license.minter.cli.LicenseMinterCli</mainClass>
|
||||||
|
</configuration>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
</plugins>
|
||||||
|
</build>
|
||||||
|
</project>
|
||||||
@@ -0,0 +1,52 @@
|
|||||||
|
package com.cameleer.license.minter;
|
||||||
|
|
||||||
|
import com.cameleer.license.LicenseInfo;
|
||||||
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
import com.fasterxml.jackson.databind.SerializationFeature;
|
||||||
|
import com.fasterxml.jackson.databind.node.ObjectNode;
|
||||||
|
|
||||||
|
import java.security.PrivateKey;
|
||||||
|
import java.security.Signature;
|
||||||
|
import java.util.Base64;
|
||||||
|
import java.util.TreeMap;
|
||||||
|
|
||||||
|
public final class LicenseMinter {
|
||||||
|
|
||||||
|
private static final ObjectMapper MAPPER = new ObjectMapper()
|
||||||
|
.configure(SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS, true);
|
||||||
|
|
||||||
|
private LicenseMinter() {}
|
||||||
|
|
||||||
|
public static String mint(LicenseInfo info, PrivateKey ed25519PrivateKey) {
|
||||||
|
byte[] payload = canonicalPayload(info);
|
||||||
|
try {
|
||||||
|
Signature signer = Signature.getInstance("Ed25519");
|
||||||
|
signer.initSign(ed25519PrivateKey);
|
||||||
|
signer.update(payload);
|
||||||
|
byte[] sig = signer.sign();
|
||||||
|
return Base64.getEncoder().encodeToString(payload) + "." + Base64.getEncoder().encodeToString(sig);
|
||||||
|
} catch (Exception e) {
|
||||||
|
throw new IllegalStateException("Failed to sign license", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static byte[] canonicalPayload(LicenseInfo info) {
|
||||||
|
ObjectNode root = MAPPER.createObjectNode();
|
||||||
|
root.put("exp", info.expiresAt().getEpochSecond());
|
||||||
|
root.put("gracePeriodDays", info.gracePeriodDays());
|
||||||
|
root.put("iat", info.issuedAt().getEpochSecond());
|
||||||
|
if (info.label() != null) {
|
||||||
|
root.put("label", info.label());
|
||||||
|
}
|
||||||
|
root.put("licenseId", info.licenseId().toString());
|
||||||
|
ObjectNode limits = MAPPER.createObjectNode();
|
||||||
|
new TreeMap<>(info.limits()).forEach(limits::put);
|
||||||
|
root.set("limits", limits);
|
||||||
|
root.put("tenantId", info.tenantId());
|
||||||
|
try {
|
||||||
|
return MAPPER.writeValueAsBytes(root);
|
||||||
|
} catch (Exception e) {
|
||||||
|
throw new IllegalStateException("Failed to serialize license payload", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,136 @@
|
|||||||
|
package com.cameleer.license.minter.cli;
|
||||||
|
|
||||||
|
import com.cameleer.license.minter.LicenseMinter;
|
||||||
|
import com.cameleer.license.LicenseInfo;
|
||||||
|
|
||||||
|
import java.io.PrintStream;
|
||||||
|
import java.nio.file.Files;
|
||||||
|
import java.nio.file.Path;
|
||||||
|
import java.security.KeyFactory;
|
||||||
|
import java.security.PrivateKey;
|
||||||
|
import java.security.spec.PKCS8EncodedKeySpec;
|
||||||
|
import java.time.Instant;
|
||||||
|
import java.time.LocalDate;
|
||||||
|
import java.time.ZoneOffset;
|
||||||
|
import java.util.Base64;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.HashSet;
|
||||||
|
import java.util.LinkedHashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.TreeMap;
|
||||||
|
import java.util.UUID;
|
||||||
|
|
||||||
|
public final class LicenseMinterCli {
|
||||||
|
|
||||||
|
private static final Set<String> KNOWN_FLAGS = Set.of(
|
||||||
|
"--private-key", "--public-key", "--tenant", "--label",
|
||||||
|
"--expires", "--grace-days", "--output", "--verify"
|
||||||
|
);
|
||||||
|
|
||||||
|
public static void main(String[] args) {
|
||||||
|
System.exit(run(args));
|
||||||
|
}
|
||||||
|
|
||||||
|
public static int run(String[] args) {
|
||||||
|
return run(args, System.out, System.err);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static int run(String[] args, PrintStream out, PrintStream err) {
|
||||||
|
Map<String, String> flags = new LinkedHashMap<>();
|
||||||
|
Set<String> bool = new HashSet<>();
|
||||||
|
Map<String, Integer> limits = new TreeMap<>();
|
||||||
|
for (String arg : args) {
|
||||||
|
if (!arg.startsWith("--")) {
|
||||||
|
err.println("unexpected positional argument: " + arg);
|
||||||
|
return 2;
|
||||||
|
}
|
||||||
|
int eq = arg.indexOf('=');
|
||||||
|
String key = eq < 0 ? arg : arg.substring(0, eq);
|
||||||
|
String value = eq < 0 ? null : arg.substring(eq + 1);
|
||||||
|
if (key.startsWith("--max-")) {
|
||||||
|
String limitKey = "max_" + key.substring("--max-".length()).replace('-', '_');
|
||||||
|
if (value == null) {
|
||||||
|
err.println("missing value for " + key);
|
||||||
|
return 2;
|
||||||
|
}
|
||||||
|
limits.put(limitKey, Integer.parseInt(value));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (!KNOWN_FLAGS.contains(key)) {
|
||||||
|
err.println("unknown flag: " + key);
|
||||||
|
return 2;
|
||||||
|
}
|
||||||
|
if (value == null) {
|
||||||
|
bool.add(key);
|
||||||
|
} else {
|
||||||
|
flags.put(key, value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
String privPath = flags.get("--private-key");
|
||||||
|
String tenant = flags.get("--tenant");
|
||||||
|
String expiresIso = flags.get("--expires");
|
||||||
|
if (privPath == null || tenant == null || expiresIso == null) {
|
||||||
|
err.println("required: --private-key --tenant --expires");
|
||||||
|
return 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
PrivateKey privateKey = readEd25519PrivateKey(Path.of(privPath));
|
||||||
|
int graceDays = Integer.parseInt(flags.getOrDefault("--grace-days", "0"));
|
||||||
|
Instant exp = LocalDate.parse(expiresIso).atStartOfDay(ZoneOffset.UTC).toInstant();
|
||||||
|
LicenseInfo info = new LicenseInfo(
|
||||||
|
UUID.randomUUID(),
|
||||||
|
tenant,
|
||||||
|
flags.get("--label"),
|
||||||
|
Collections.unmodifiableMap(limits),
|
||||||
|
Instant.now(),
|
||||||
|
exp,
|
||||||
|
graceDays
|
||||||
|
);
|
||||||
|
String token = LicenseMinter.mint(info, privateKey);
|
||||||
|
|
||||||
|
String outPath = flags.get("--output");
|
||||||
|
if (outPath != null) {
|
||||||
|
Files.writeString(Path.of(outPath), token);
|
||||||
|
out.println("wrote " + outPath);
|
||||||
|
} else {
|
||||||
|
out.println(token);
|
||||||
|
}
|
||||||
|
if (bool.contains("--verify")) {
|
||||||
|
String pubPath = flags.get("--public-key");
|
||||||
|
if (pubPath == null) {
|
||||||
|
err.println("--verify requires --public-key");
|
||||||
|
if (outPath != null) Files.deleteIfExists(Path.of(outPath));
|
||||||
|
return 2;
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
String pubB64 = Files.readString(Path.of(pubPath)).trim();
|
||||||
|
new com.cameleer.license.LicenseValidator(pubB64, tenant).validate(token);
|
||||||
|
out.println("verified ok");
|
||||||
|
} catch (Exception ve) {
|
||||||
|
err.println("VERIFY FAILED: " + ve.getMessage());
|
||||||
|
if (outPath != null) Files.deleteIfExists(Path.of(outPath));
|
||||||
|
return 3;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
} catch (Exception e) {
|
||||||
|
err.println("ERROR: " + e.getMessage());
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static PrivateKey readEd25519PrivateKey(Path path) throws Exception {
|
||||||
|
String s = Files.readString(path).trim();
|
||||||
|
if (s.startsWith("-----BEGIN")) {
|
||||||
|
s = s.replaceAll("-----BEGIN [A-Z ]+-----", "")
|
||||||
|
.replaceAll("-----END [A-Z ]+-----", "")
|
||||||
|
.replaceAll("\\s", "");
|
||||||
|
}
|
||||||
|
byte[] der = Base64.getDecoder().decode(s);
|
||||||
|
return KeyFactory.getInstance("Ed25519")
|
||||||
|
.generatePrivate(new PKCS8EncodedKeySpec(der));
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,53 @@
|
|||||||
|
package com.cameleer.license.minter;
|
||||||
|
|
||||||
|
import com.cameleer.license.LicenseInfo;
|
||||||
|
import com.cameleer.license.LicenseValidator;
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
|
||||||
|
import java.security.KeyPair;
|
||||||
|
import java.security.KeyPairGenerator;
|
||||||
|
import java.time.Instant;
|
||||||
|
import java.util.Base64;
|
||||||
|
import java.util.LinkedHashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.UUID;
|
||||||
|
|
||||||
|
import static org.assertj.core.api.Assertions.assertThat;
|
||||||
|
|
||||||
|
class LicenseMinterTest {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void roundTrip_validatorAcceptsMintedToken() throws Exception {
|
||||||
|
KeyPair kp = KeyPairGenerator.getInstance("Ed25519").generateKeyPair();
|
||||||
|
String publicB64 = Base64.getEncoder().encodeToString(kp.getPublic().getEncoded());
|
||||||
|
|
||||||
|
LicenseInfo info = new LicenseInfo(
|
||||||
|
UUID.randomUUID(), "acme", "ACME prod",
|
||||||
|
Map.of("max_apps", 50, "max_agents", 100),
|
||||||
|
Instant.now(), Instant.now().plusSeconds(86400), 7);
|
||||||
|
|
||||||
|
String token = LicenseMinter.mint(info, kp.getPrivate());
|
||||||
|
|
||||||
|
LicenseInfo parsed = new LicenseValidator(publicB64, "acme").validate(token);
|
||||||
|
assertThat(parsed.licenseId()).isEqualTo(info.licenseId());
|
||||||
|
assertThat(parsed.tenantId()).isEqualTo("acme");
|
||||||
|
assertThat(parsed.limits().get("max_apps")).isEqualTo(50);
|
||||||
|
assertThat(parsed.gracePeriodDays()).isEqualTo(7);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void canonicalJson_isStableAcrossRuns() throws Exception {
|
||||||
|
KeyPair kp = KeyPairGenerator.getInstance("Ed25519").generateKeyPair();
|
||||||
|
UUID id = UUID.randomUUID();
|
||||||
|
Instant now = Instant.parse("2026-04-25T10:00:00Z");
|
||||||
|
Instant exp = Instant.parse("2027-04-25T10:00:00Z");
|
||||||
|
LinkedHashMap<String, Integer> limits = new LinkedHashMap<>();
|
||||||
|
limits.put("max_apps", 5);
|
||||||
|
limits.put("max_agents", 10);
|
||||||
|
LicenseInfo info = new LicenseInfo(id, "acme", "label", limits, now, exp, 0);
|
||||||
|
|
||||||
|
String t1 = LicenseMinter.mint(info, kp.getPrivate());
|
||||||
|
String t2 = LicenseMinter.mint(info, kp.getPrivate());
|
||||||
|
assertThat(t1).isEqualTo(t2);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,112 @@
|
|||||||
|
package com.cameleer.license.minter.cli;
|
||||||
|
|
||||||
|
import com.cameleer.license.LicenseValidator;
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
import org.junit.jupiter.api.io.TempDir;
|
||||||
|
|
||||||
|
import java.nio.file.Files;
|
||||||
|
import java.nio.file.Path;
|
||||||
|
import java.security.KeyPair;
|
||||||
|
import java.security.KeyPairGenerator;
|
||||||
|
import java.util.Base64;
|
||||||
|
|
||||||
|
import static org.assertj.core.api.Assertions.assertThat;
|
||||||
|
|
||||||
|
class LicenseMinterCliTest {
|
||||||
|
|
||||||
|
@TempDir Path tmp;
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void mints_validToken_validatorAccepts() throws Exception {
|
||||||
|
KeyPair kp = KeyPairGenerator.getInstance("Ed25519").generateKeyPair();
|
||||||
|
Path priv = tmp.resolve("priv.b64");
|
||||||
|
Path pub = tmp.resolve("pub.b64");
|
||||||
|
Files.writeString(priv, Base64.getEncoder().encodeToString(kp.getPrivate().getEncoded()));
|
||||||
|
Files.writeString(pub, Base64.getEncoder().encodeToString(kp.getPublic().getEncoded()));
|
||||||
|
Path out = tmp.resolve("license.tok");
|
||||||
|
|
||||||
|
int code = LicenseMinterCli.run(new String[]{
|
||||||
|
"--private-key=" + priv,
|
||||||
|
"--tenant=acme",
|
||||||
|
"--label=ACME",
|
||||||
|
"--expires=2099-12-31",
|
||||||
|
"--grace-days=30",
|
||||||
|
"--max-apps=50",
|
||||||
|
"--output=" + out
|
||||||
|
});
|
||||||
|
|
||||||
|
assertThat(code).isEqualTo(0);
|
||||||
|
String token = Files.readString(out).trim();
|
||||||
|
var info = new LicenseValidator(Files.readString(pub).trim(), "acme").validate(token);
|
||||||
|
assertThat(info.tenantId()).isEqualTo("acme");
|
||||||
|
assertThat(info.limits().get("max_apps")).isEqualTo(50);
|
||||||
|
assertThat(info.gracePeriodDays()).isEqualTo(30);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void unknownFlag_failsFast() {
|
||||||
|
int code = LicenseMinterCli.run(new String[]{"--frobnicate=yes"});
|
||||||
|
assertThat(code).isNotZero();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void verify_happyPath_succeeds() throws Exception {
|
||||||
|
KeyPair kp = KeyPairGenerator.getInstance("Ed25519").generateKeyPair();
|
||||||
|
Path priv = tmp.resolve("priv.b64");
|
||||||
|
Path pub = tmp.resolve("pub.b64");
|
||||||
|
Files.writeString(priv, Base64.getEncoder().encodeToString(kp.getPrivate().getEncoded()));
|
||||||
|
Files.writeString(pub, Base64.getEncoder().encodeToString(kp.getPublic().getEncoded()));
|
||||||
|
Path out = tmp.resolve("license.tok");
|
||||||
|
|
||||||
|
int code = LicenseMinterCli.run(new String[]{
|
||||||
|
"--private-key=" + priv,
|
||||||
|
"--public-key=" + pub,
|
||||||
|
"--tenant=acme",
|
||||||
|
"--expires=2099-12-31",
|
||||||
|
"--output=" + out,
|
||||||
|
"--verify"
|
||||||
|
});
|
||||||
|
|
||||||
|
assertThat(code).isEqualTo(0);
|
||||||
|
assertThat(out).exists();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void verify_wrongPublicKey_deletesOutputAndExitsNonZero() throws Exception {
|
||||||
|
KeyPair signing = KeyPairGenerator.getInstance("Ed25519").generateKeyPair();
|
||||||
|
KeyPair other = KeyPairGenerator.getInstance("Ed25519").generateKeyPair();
|
||||||
|
Path priv = tmp.resolve("priv.b64");
|
||||||
|
Path pub = tmp.resolve("pub.b64");
|
||||||
|
Files.writeString(priv, Base64.getEncoder().encodeToString(signing.getPrivate().getEncoded()));
|
||||||
|
Files.writeString(pub, Base64.getEncoder().encodeToString(other.getPublic().getEncoded()));
|
||||||
|
Path out = tmp.resolve("license.tok");
|
||||||
|
|
||||||
|
int code = LicenseMinterCli.run(new String[]{
|
||||||
|
"--private-key=" + priv,
|
||||||
|
"--public-key=" + pub,
|
||||||
|
"--tenant=acme",
|
||||||
|
"--expires=2099-12-31",
|
||||||
|
"--output=" + out,
|
||||||
|
"--verify"
|
||||||
|
});
|
||||||
|
|
||||||
|
assertThat(code).isNotZero();
|
||||||
|
assertThat(out).doesNotExist();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void verify_withoutPublicKey_fails() throws Exception {
|
||||||
|
KeyPair kp = KeyPairGenerator.getInstance("Ed25519").generateKeyPair();
|
||||||
|
Path priv = tmp.resolve("priv.b64");
|
||||||
|
Files.writeString(priv, Base64.getEncoder().encodeToString(kp.getPrivate().getEncoded()));
|
||||||
|
|
||||||
|
int code = LicenseMinterCli.run(new String[]{
|
||||||
|
"--private-key=" + priv,
|
||||||
|
"--tenant=acme",
|
||||||
|
"--expires=2099-12-31",
|
||||||
|
"--verify"
|
||||||
|
});
|
||||||
|
|
||||||
|
assertThat(code).isNotZero();
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -19,6 +19,12 @@
|
|||||||
<groupId>com.cameleer</groupId>
|
<groupId>com.cameleer</groupId>
|
||||||
<artifactId>cameleer-server-core</artifactId>
|
<artifactId>cameleer-server-core</artifactId>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.cameleer</groupId>
|
||||||
|
<artifactId>cameleer-license-minter</artifactId>
|
||||||
|
<version>${project.version}</version>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.springframework.boot</groupId>
|
<groupId>org.springframework.boot</groupId>
|
||||||
<artifactId>spring-boot-starter-web</artifactId>
|
<artifactId>spring-boot-starter-web</artifactId>
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ import com.cameleer.server.app.alerting.eval.EvalContext;
|
|||||||
import com.cameleer.server.app.alerting.eval.EvalResult;
|
import com.cameleer.server.app.alerting.eval.EvalResult;
|
||||||
import com.cameleer.server.app.alerting.eval.TickCache;
|
import com.cameleer.server.app.alerting.eval.TickCache;
|
||||||
import com.cameleer.server.app.alerting.notify.MustacheRenderer;
|
import com.cameleer.server.app.alerting.notify.MustacheRenderer;
|
||||||
|
import com.cameleer.server.app.license.LicenseEnforcer;
|
||||||
import com.cameleer.server.app.web.EnvPath;
|
import com.cameleer.server.app.web.EnvPath;
|
||||||
import com.cameleer.server.core.admin.AuditCategory;
|
import com.cameleer.server.core.admin.AuditCategory;
|
||||||
import com.cameleer.server.core.admin.AuditResult;
|
import com.cameleer.server.core.admin.AuditResult;
|
||||||
@@ -78,6 +79,7 @@ public class AlertRuleController {
|
|||||||
private final Map<ConditionKind, ConditionEvaluator<?>> evaluators;
|
private final Map<ConditionKind, ConditionEvaluator<?>> evaluators;
|
||||||
private final Clock clock;
|
private final Clock clock;
|
||||||
private final String tenantId;
|
private final String tenantId;
|
||||||
|
private final LicenseEnforcer licenseEnforcer;
|
||||||
|
|
||||||
@SuppressWarnings("SpringJavaInjectionPointsAutowiringInspection")
|
@SuppressWarnings("SpringJavaInjectionPointsAutowiringInspection")
|
||||||
public AlertRuleController(AlertRuleRepository ruleRepo,
|
public AlertRuleController(AlertRuleRepository ruleRepo,
|
||||||
@@ -86,7 +88,8 @@ public class AlertRuleController {
|
|||||||
MustacheRenderer renderer,
|
MustacheRenderer renderer,
|
||||||
List<ConditionEvaluator<?>> evaluatorList,
|
List<ConditionEvaluator<?>> evaluatorList,
|
||||||
Clock alertingClock,
|
Clock alertingClock,
|
||||||
@Value("${cameleer.server.tenant.id:default}") String tenantId) {
|
@Value("${cameleer.server.tenant.id:default}") String tenantId,
|
||||||
|
LicenseEnforcer licenseEnforcer) {
|
||||||
this.ruleRepo = ruleRepo;
|
this.ruleRepo = ruleRepo;
|
||||||
this.connectionService = connectionService;
|
this.connectionService = connectionService;
|
||||||
this.auditService = auditService;
|
this.auditService = auditService;
|
||||||
@@ -97,6 +100,7 @@ public class AlertRuleController {
|
|||||||
}
|
}
|
||||||
this.clock = alertingClock;
|
this.clock = alertingClock;
|
||||||
this.tenantId = tenantId;
|
this.tenantId = tenantId;
|
||||||
|
this.licenseEnforcer = licenseEnforcer;
|
||||||
}
|
}
|
||||||
|
|
||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
@@ -126,6 +130,8 @@ public class AlertRuleController {
|
|||||||
@Valid @RequestBody AlertRuleRequest req,
|
@Valid @RequestBody AlertRuleRequest req,
|
||||||
HttpServletRequest httpRequest) {
|
HttpServletRequest httpRequest) {
|
||||||
|
|
||||||
|
licenseEnforcer.assertWithinCap("max_alert_rules", ruleRepo.count(), 1);
|
||||||
|
|
||||||
validateAttributeKeys(req.condition());
|
validateAttributeKeys(req.condition());
|
||||||
validateBusinessRules(req);
|
validateBusinessRules(req);
|
||||||
validateWebhooks(req.webhooks(), env.id());
|
validateWebhooks(req.webhooks(), env.id());
|
||||||
|
|||||||
@@ -113,6 +113,12 @@ public class PostgresAlertRuleRepository implements AlertRuleRepository {
|
|||||||
jdbc.update("DELETE FROM alert_rules WHERE id = ?", id);
|
jdbc.update("DELETE FROM alert_rules WHERE id = ?", id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long count() {
|
||||||
|
Long n = jdbc.queryForObject("SELECT COUNT(*) FROM alert_rules", Long.class);
|
||||||
|
return n == null ? 0L : n;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public List<AlertRule> claimDueRules(String instanceId, int batchSize, int claimTtlSeconds) {
|
public List<AlertRule> claimDueRules(String instanceId, int batchSize, int claimTtlSeconds) {
|
||||||
String sql = """
|
String sql = """
|
||||||
|
|||||||
@@ -17,11 +17,13 @@ import org.springframework.context.annotation.Configuration;
|
|||||||
public class AgentRegistryBeanConfig {
|
public class AgentRegistryBeanConfig {
|
||||||
|
|
||||||
@Bean
|
@Bean
|
||||||
public AgentRegistryService agentRegistryService(AgentRegistryConfig config) {
|
public AgentRegistryService agentRegistryService(AgentRegistryConfig config,
|
||||||
|
com.cameleer.server.app.license.LicenseEnforcer enforcer) {
|
||||||
return new AgentRegistryService(
|
return new AgentRegistryService(
|
||||||
config.getStaleThresholdMs(),
|
config.getStaleThresholdMs(),
|
||||||
config.getDeadThresholdMs(),
|
config.getDeadThresholdMs(),
|
||||||
config.getCommandExpiryMs()
|
config.getCommandExpiryMs(),
|
||||||
|
current -> enforcer.assertWithinCap("max_agents", current, 1)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,22 +1,48 @@
|
|||||||
package com.cameleer.server.app.config;
|
package com.cameleer.server.app.config;
|
||||||
|
|
||||||
|
import com.cameleer.server.app.license.LicenseRepository;
|
||||||
|
import com.cameleer.server.app.license.LicenseService;
|
||||||
|
import com.cameleer.server.core.admin.AuditService;
|
||||||
import com.cameleer.server.core.license.LicenseGate;
|
import com.cameleer.server.core.license.LicenseGate;
|
||||||
import com.cameleer.server.core.license.LicenseInfo;
|
import com.cameleer.license.LicenseInfo;
|
||||||
import com.cameleer.server.core.license.LicenseValidator;
|
import com.cameleer.license.LicenseValidator;
|
||||||
|
import jakarta.annotation.PostConstruct;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
import org.springframework.beans.factory.annotation.Value;
|
import org.springframework.beans.factory.annotation.Value;
|
||||||
|
import org.springframework.context.ApplicationEventPublisher;
|
||||||
import org.springframework.context.annotation.Bean;
|
import org.springframework.context.annotation.Bean;
|
||||||
import org.springframework.context.annotation.Configuration;
|
import org.springframework.context.annotation.Configuration;
|
||||||
|
|
||||||
import java.nio.file.Files;
|
import java.nio.file.Files;
|
||||||
import java.nio.file.Path;
|
import java.nio.file.Path;
|
||||||
|
import java.security.KeyPair;
|
||||||
|
import java.security.KeyPairGenerator;
|
||||||
|
import java.util.Base64;
|
||||||
|
import java.util.Optional;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* License bean topology (4 beans, in dependency order):
|
||||||
|
*
|
||||||
|
* <ol>
|
||||||
|
* <li>{@link LicenseGate} — always present, mutated by {@link LicenseService}.</li>
|
||||||
|
* <li>{@link LicenseValidator} — always present. When no public key is configured, returns an
|
||||||
|
* always-failing override so any loaded token routes through {@code install()} and is
|
||||||
|
* audited as INVALID rather than silently ignored.</li>
|
||||||
|
* <li>{@link LicenseService} — single mediation point for install / replace / revalidate;
|
||||||
|
* audits + persists + publishes {@code LicenseChangedEvent}.</li>
|
||||||
|
* <li>{@link LicenseBootLoader} — {@code @PostConstruct} drives {@code loadInitial} after the
|
||||||
|
* Spring context is ready. Resolution order: env var > license file > persisted DB row.</li>
|
||||||
|
* </ol>
|
||||||
|
*/
|
||||||
@Configuration
|
@Configuration
|
||||||
public class LicenseBeanConfig {
|
public class LicenseBeanConfig {
|
||||||
|
|
||||||
private static final Logger log = LoggerFactory.getLogger(LicenseBeanConfig.class);
|
private static final Logger log = LoggerFactory.getLogger(LicenseBeanConfig.class);
|
||||||
|
|
||||||
|
@Value("${cameleer.server.tenant.id:default}")
|
||||||
|
private String tenantId;
|
||||||
|
|
||||||
@Value("${cameleer.server.license.token:}")
|
@Value("${cameleer.server.license.token:}")
|
||||||
private String licenseToken;
|
private String licenseToken;
|
||||||
|
|
||||||
@@ -28,41 +54,77 @@ public class LicenseBeanConfig {
|
|||||||
|
|
||||||
@Bean
|
@Bean
|
||||||
public LicenseGate licenseGate() {
|
public LicenseGate licenseGate() {
|
||||||
LicenseGate gate = new LicenseGate();
|
return new LicenseGate();
|
||||||
|
|
||||||
String token = resolveLicenseToken();
|
|
||||||
if (token == null || token.isBlank()) {
|
|
||||||
log.info("No license configured — running in open mode (all features enabled)");
|
|
||||||
return gate;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (licensePublicKey == null || licensePublicKey.isBlank()) {
|
|
||||||
log.warn("License token provided but no public key configured (CAMELEER_SERVER_LICENSE_PUBLICKEY). Running in open mode.");
|
|
||||||
return gate;
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
LicenseValidator validator = new LicenseValidator(licensePublicKey);
|
|
||||||
LicenseInfo info = validator.validate(token);
|
|
||||||
gate.load(info);
|
|
||||||
} catch (Exception e) {
|
|
||||||
log.error("Failed to validate license: {}. Running in open mode.", e.getMessage());
|
|
||||||
}
|
|
||||||
|
|
||||||
return gate;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private String resolveLicenseToken() {
|
@Bean
|
||||||
if (licenseToken != null && !licenseToken.isBlank()) {
|
public LicenseValidator licenseValidator() {
|
||||||
return licenseToken;
|
if (licensePublicKey == null || licensePublicKey.isBlank()) {
|
||||||
}
|
log.warn("CAMELEER_SERVER_LICENSE_PUBLICKEY not set — all licenses will be rejected as INVALID");
|
||||||
if (licenseFile != null && !licenseFile.isBlank()) {
|
// Generate a throwaway, structurally-valid Ed25519 keypair just to satisfy the
|
||||||
|
// parent constructor's X.509 SubjectPublicKeyInfo decode + Ed25519 point validation.
|
||||||
|
// The overridden validate(...) always throws, so the dummy key is never used to
|
||||||
|
// verify anything — it only exists so the bean is constructable in misconfigured
|
||||||
|
// installs and any token that is loaded routes to INVALID via install()'s catch.
|
||||||
try {
|
try {
|
||||||
return Files.readString(Path.of(licenseFile)).trim();
|
KeyPair throwaway = KeyPairGenerator.getInstance("Ed25519").generateKeyPair();
|
||||||
|
String dummyPub = Base64.getEncoder().encodeToString(throwaway.getPublic().getEncoded());
|
||||||
|
return new LicenseValidator(dummyPub, tenantId) {
|
||||||
|
@Override
|
||||||
|
public LicenseInfo validate(String token) {
|
||||||
|
throw new IllegalStateException("license public key not configured");
|
||||||
|
}
|
||||||
|
};
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
log.warn("Failed to read license file {}: {}", licenseFile, e.getMessage());
|
throw new IllegalStateException("Failed to construct fallback license validator", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return null;
|
return new LicenseValidator(licensePublicKey, tenantId);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Bean
|
||||||
|
public LicenseService licenseService(LicenseRepository repo,
|
||||||
|
LicenseGate gate,
|
||||||
|
LicenseValidator validator,
|
||||||
|
AuditService audit,
|
||||||
|
ApplicationEventPublisher events) {
|
||||||
|
return new LicenseService(tenantId, repo, gate, validator, audit, events);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Bean
|
||||||
|
public LicenseBootLoader licenseBootLoader(LicenseService svc) {
|
||||||
|
return new LicenseBootLoader(svc, licenseToken, licenseFile);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@code @PostConstruct} bridge that converts env-var/file values into the
|
||||||
|
* {@code Optional<String>} pair {@link LicenseService#loadInitial} expects, so
|
||||||
|
* env-var, file, and DB paths share the same audit + event-publish code path.
|
||||||
|
*/
|
||||||
|
public static class LicenseBootLoader {
|
||||||
|
private final LicenseService svc;
|
||||||
|
private final String envToken;
|
||||||
|
private final String filePath;
|
||||||
|
|
||||||
|
public LicenseBootLoader(LicenseService svc, String envToken, String filePath) {
|
||||||
|
this.svc = svc;
|
||||||
|
this.envToken = envToken;
|
||||||
|
this.filePath = filePath;
|
||||||
|
}
|
||||||
|
|
||||||
|
@PostConstruct
|
||||||
|
public void load() {
|
||||||
|
Optional<String> env = (envToken != null && !envToken.isBlank())
|
||||||
|
? Optional.of(envToken) : Optional.empty();
|
||||||
|
Optional<String> file = Optional.empty();
|
||||||
|
if (filePath != null && !filePath.isBlank()) {
|
||||||
|
try {
|
||||||
|
file = Optional.of(Files.readString(Path.of(filePath)).trim());
|
||||||
|
} catch (Exception e) {
|
||||||
|
log.warn("Failed to read license file {}: {}", filePath, e.getMessage());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
svc.loadInitial(env, file);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -50,14 +50,18 @@ public class RuntimeBeanConfig {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Bean
|
@Bean
|
||||||
public EnvironmentService environmentService(EnvironmentRepository repo) {
|
public EnvironmentService environmentService(EnvironmentRepository repo,
|
||||||
return new EnvironmentService(repo);
|
com.cameleer.server.app.license.LicenseEnforcer enforcer) {
|
||||||
|
return new EnvironmentService(repo, current ->
|
||||||
|
enforcer.assertWithinCap("max_environments", current, 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Bean
|
@Bean
|
||||||
public AppService appService(AppRepository appRepo, AppVersionRepository versionRepo,
|
public AppService appService(AppRepository appRepo, AppVersionRepository versionRepo,
|
||||||
@Value("${cameleer.server.runtime.jarstoragepath:/data/jars}") String jarStoragePath) {
|
@Value("${cameleer.server.runtime.jarstoragepath:/data/jars}") String jarStoragePath,
|
||||||
return new AppService(appRepo, versionRepo, jarStoragePath);
|
com.cameleer.server.app.license.LicenseEnforcer enforcer) {
|
||||||
|
return new AppService(appRepo, versionRepo, jarStoragePath,
|
||||||
|
current -> enforcer.assertWithinCap("max_apps", current, 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Bean
|
@Bean
|
||||||
|
|||||||
@@ -9,6 +9,8 @@ import com.cameleer.server.app.storage.ClickHouseRouteCatalogStore;
|
|||||||
import com.cameleer.server.core.storage.RouteCatalogStore;
|
import com.cameleer.server.core.storage.RouteCatalogStore;
|
||||||
import com.cameleer.server.app.storage.ClickHouseMetricsQueryStore;
|
import com.cameleer.server.app.storage.ClickHouseMetricsQueryStore;
|
||||||
import com.cameleer.server.app.storage.ClickHouseMetricsStore;
|
import com.cameleer.server.app.storage.ClickHouseMetricsStore;
|
||||||
|
import com.cameleer.server.app.storage.ClickHouseServerMetricsQueryStore;
|
||||||
|
import com.cameleer.server.app.storage.ClickHouseServerMetricsStore;
|
||||||
import com.cameleer.server.app.storage.ClickHouseStatsStore;
|
import com.cameleer.server.app.storage.ClickHouseStatsStore;
|
||||||
import com.cameleer.server.core.admin.AuditRepository;
|
import com.cameleer.server.core.admin.AuditRepository;
|
||||||
import com.cameleer.server.core.admin.AuditService;
|
import com.cameleer.server.core.admin.AuditService;
|
||||||
@@ -67,6 +69,19 @@ public class StorageBeanConfig {
|
|||||||
return new ClickHouseMetricsQueryStore(tenantProperties.getId(), clickHouseJdbc);
|
return new ClickHouseMetricsQueryStore(tenantProperties.getId(), clickHouseJdbc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Bean
|
||||||
|
public ServerMetricsStore clickHouseServerMetricsStore(
|
||||||
|
@Qualifier("clickHouseJdbcTemplate") JdbcTemplate clickHouseJdbc) {
|
||||||
|
return new ClickHouseServerMetricsStore(clickHouseJdbc);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Bean
|
||||||
|
public ServerMetricsQueryStore clickHouseServerMetricsQueryStore(
|
||||||
|
TenantProperties tenantProperties,
|
||||||
|
@Qualifier("clickHouseJdbcTemplate") JdbcTemplate clickHouseJdbc) {
|
||||||
|
return new ClickHouseServerMetricsQueryStore(tenantProperties.getId(), clickHouseJdbc);
|
||||||
|
}
|
||||||
|
|
||||||
// ── Execution Store ──────────────────────────────────────────────────
|
// ── Execution Store ──────────────────────────────────────────────────
|
||||||
|
|
||||||
@Bean
|
@Bean
|
||||||
@@ -188,4 +203,12 @@ public class StorageBeanConfig {
|
|||||||
ClickHouseUsageTracker usageTracker) {
|
ClickHouseUsageTracker usageTracker) {
|
||||||
return new com.cameleer.server.app.analytics.UsageFlushScheduler(usageTracker);
|
return new com.cameleer.server.app.analytics.UsageFlushScheduler(usageTracker);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ── License Repository ───────────────────────────────────────────
|
||||||
|
|
||||||
|
@Bean
|
||||||
|
public com.cameleer.server.app.license.LicenseRepository licenseRepository(
|
||||||
|
JdbcTemplate jdbcTemplate) {
|
||||||
|
return new com.cameleer.server.app.license.PostgresLicenseRepository(jdbcTemplate);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -196,7 +196,16 @@ public class CatalogController {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Set<String> routeIds = routesByApp.getOrDefault(slug, Set.of());
|
Set<String> routeIds = routesByApp.getOrDefault(slug, Set.of());
|
||||||
List<String> agentIds = agents.stream().map(AgentInfo::instanceId).toList();
|
|
||||||
|
// Resolve the env slug for this row early so fromUri can survive
|
||||||
|
// cross-env queries (env==null) against managed apps.
|
||||||
|
String rowEnvSlug = envSlug;
|
||||||
|
if (app != null && rowEnvSlug.isEmpty()) {
|
||||||
|
try {
|
||||||
|
rowEnvSlug = envService.getById(app.environmentId()).slug();
|
||||||
|
} catch (Exception ignored) {}
|
||||||
|
}
|
||||||
|
final String resolvedEnvSlug = rowEnvSlug;
|
||||||
|
|
||||||
// Routes
|
// Routes
|
||||||
List<RouteSummary> routeSummaries = routeIds.stream()
|
List<RouteSummary> routeSummaries = routeIds.stream()
|
||||||
@@ -204,7 +213,7 @@ public class CatalogController {
|
|||||||
String key = slug + "/" + routeId;
|
String key = slug + "/" + routeId;
|
||||||
long count = routeExchangeCounts.getOrDefault(key, 0L);
|
long count = routeExchangeCounts.getOrDefault(key, 0L);
|
||||||
Instant lastSeen = routeLastSeen.get(key);
|
Instant lastSeen = routeLastSeen.get(key);
|
||||||
String fromUri = resolveFromEndpointUri(routeId, agentIds);
|
String fromUri = resolveFromEndpointUri(slug, routeId, resolvedEnvSlug);
|
||||||
String state = routeStateRegistry.getState(slug, routeId).name().toLowerCase();
|
String state = routeStateRegistry.getState(slug, routeId).name().toLowerCase();
|
||||||
String routeState = "started".equals(state) ? null : state;
|
String routeState = "started".equals(state) ? null : state;
|
||||||
return new RouteSummary(routeId, count, lastSeen, fromUri, routeState);
|
return new RouteSummary(routeId, count, lastSeen, fromUri, routeState);
|
||||||
@@ -258,15 +267,9 @@ public class CatalogController {
|
|||||||
String healthTooltip = buildHealthTooltip(app != null, deployStatus, agentHealth, agents.size());
|
String healthTooltip = buildHealthTooltip(app != null, deployStatus, agentHealth, agents.size());
|
||||||
|
|
||||||
String displayName = app != null ? app.displayName() : slug;
|
String displayName = app != null ? app.displayName() : slug;
|
||||||
String appEnvSlug = envSlug;
|
|
||||||
if (app != null && appEnvSlug.isEmpty()) {
|
|
||||||
try {
|
|
||||||
appEnvSlug = envService.getById(app.environmentId()).slug();
|
|
||||||
} catch (Exception ignored) {}
|
|
||||||
}
|
|
||||||
|
|
||||||
catalog.add(new CatalogApp(
|
catalog.add(new CatalogApp(
|
||||||
slug, displayName, app != null, appEnvSlug,
|
slug, displayName, app != null, resolvedEnvSlug,
|
||||||
health, healthTooltip, agents.size(), routeSummaries, agentSummaries,
|
health, healthTooltip, agents.size(), routeSummaries, agentSummaries,
|
||||||
totalExchanges, deploymentSummary
|
totalExchanges, deploymentSummary
|
||||||
));
|
));
|
||||||
@@ -275,8 +278,11 @@ public class CatalogController {
|
|||||||
return ResponseEntity.ok(catalog);
|
return ResponseEntity.ok(catalog);
|
||||||
}
|
}
|
||||||
|
|
||||||
private String resolveFromEndpointUri(String routeId, List<String> agentIds) {
|
private String resolveFromEndpointUri(String applicationId, String routeId, String environment) {
|
||||||
return diagramStore.findContentHashForRouteByAgents(routeId, agentIds)
|
if (environment == null || environment.isBlank()) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
return diagramStore.findLatestContentHashForAppRoute(applicationId, routeId, environment)
|
||||||
.flatMap(diagramStore::findByContentHash)
|
.flatMap(diagramStore::findByContentHash)
|
||||||
.map(RouteGraph::getRoot)
|
.map(RouteGraph::getRoot)
|
||||||
.map(root -> root.getEndpointUri())
|
.map(root -> root.getEndpointUri())
|
||||||
|
|||||||
@@ -2,8 +2,6 @@ package com.cameleer.server.app.controller;
|
|||||||
|
|
||||||
import com.cameleer.common.graph.RouteGraph;
|
import com.cameleer.common.graph.RouteGraph;
|
||||||
import com.cameleer.server.app.web.EnvPath;
|
import com.cameleer.server.app.web.EnvPath;
|
||||||
import com.cameleer.server.core.agent.AgentInfo;
|
|
||||||
import com.cameleer.server.core.agent.AgentRegistryService;
|
|
||||||
import com.cameleer.server.core.diagram.DiagramLayout;
|
import com.cameleer.server.core.diagram.DiagramLayout;
|
||||||
import com.cameleer.server.core.diagram.DiagramRenderer;
|
import com.cameleer.server.core.diagram.DiagramRenderer;
|
||||||
import com.cameleer.server.core.runtime.Environment;
|
import com.cameleer.server.core.runtime.Environment;
|
||||||
@@ -21,7 +19,6 @@ import org.springframework.web.bind.annotation.PathVariable;
|
|||||||
import org.springframework.web.bind.annotation.RequestParam;
|
import org.springframework.web.bind.annotation.RequestParam;
|
||||||
import org.springframework.web.bind.annotation.RestController;
|
import org.springframework.web.bind.annotation.RestController;
|
||||||
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -42,14 +39,11 @@ public class DiagramRenderController {
|
|||||||
|
|
||||||
private final DiagramStore diagramStore;
|
private final DiagramStore diagramStore;
|
||||||
private final DiagramRenderer diagramRenderer;
|
private final DiagramRenderer diagramRenderer;
|
||||||
private final AgentRegistryService registryService;
|
|
||||||
|
|
||||||
public DiagramRenderController(DiagramStore diagramStore,
|
public DiagramRenderController(DiagramStore diagramStore,
|
||||||
DiagramRenderer diagramRenderer,
|
DiagramRenderer diagramRenderer) {
|
||||||
AgentRegistryService registryService) {
|
|
||||||
this.diagramStore = diagramStore;
|
this.diagramStore = diagramStore;
|
||||||
this.diagramRenderer = diagramRenderer;
|
this.diagramRenderer = diagramRenderer;
|
||||||
this.registryService = registryService;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@GetMapping("/api/v1/diagrams/{contentHash}/render")
|
@GetMapping("/api/v1/diagrams/{contentHash}/render")
|
||||||
@@ -90,8 +84,8 @@ public class DiagramRenderController {
|
|||||||
|
|
||||||
@GetMapping("/api/v1/environments/{envSlug}/apps/{appSlug}/routes/{routeId}/diagram")
|
@GetMapping("/api/v1/environments/{envSlug}/apps/{appSlug}/routes/{routeId}/diagram")
|
||||||
@Operation(summary = "Find the latest diagram for this app's route in this environment",
|
@Operation(summary = "Find the latest diagram for this app's route in this environment",
|
||||||
description = "Resolves agents in this env for this app, then looks up the latest diagram for the route "
|
description = "Returns the most recently stored diagram for (app, env, route). Independent of the "
|
||||||
+ "they reported. Env scope prevents a dev route from returning a prod diagram.")
|
+ "agent registry, so routes removed from the current app version still resolve.")
|
||||||
@ApiResponse(responseCode = "200", description = "Diagram layout returned")
|
@ApiResponse(responseCode = "200", description = "Diagram layout returned")
|
||||||
@ApiResponse(responseCode = "404", description = "No diagram found")
|
@ApiResponse(responseCode = "404", description = "No diagram found")
|
||||||
public ResponseEntity<DiagramLayout> findByAppAndRoute(
|
public ResponseEntity<DiagramLayout> findByAppAndRoute(
|
||||||
@@ -99,15 +93,7 @@ public class DiagramRenderController {
|
|||||||
@PathVariable String appSlug,
|
@PathVariable String appSlug,
|
||||||
@PathVariable String routeId,
|
@PathVariable String routeId,
|
||||||
@RequestParam(defaultValue = "LR") String direction) {
|
@RequestParam(defaultValue = "LR") String direction) {
|
||||||
List<String> agentIds = registryService.findByApplicationAndEnvironment(appSlug, env.slug()).stream()
|
Optional<String> contentHash = diagramStore.findLatestContentHashForAppRoute(appSlug, routeId, env.slug());
|
||||||
.map(AgentInfo::instanceId)
|
|
||||||
.toList();
|
|
||||||
|
|
||||||
if (agentIds.isEmpty()) {
|
|
||||||
return ResponseEntity.notFound().build();
|
|
||||||
}
|
|
||||||
|
|
||||||
Optional<String> contentHash = diagramStore.findContentHashForRouteByAgents(routeId, agentIds);
|
|
||||||
if (contentHash.isEmpty()) {
|
if (contentHash.isEmpty()) {
|
||||||
return ResponseEntity.notFound().build();
|
return ResponseEntity.notFound().build();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
package com.cameleer.server.app.controller;
|
package com.cameleer.server.app.controller;
|
||||||
|
|
||||||
|
import com.cameleer.server.core.license.LicenseGate;
|
||||||
import com.cameleer.server.core.runtime.Environment;
|
import com.cameleer.server.core.runtime.Environment;
|
||||||
import com.cameleer.server.core.runtime.EnvironmentColor;
|
import com.cameleer.server.core.runtime.EnvironmentColor;
|
||||||
import com.cameleer.server.core.runtime.EnvironmentService;
|
import com.cameleer.server.core.runtime.EnvironmentService;
|
||||||
@@ -7,9 +8,11 @@ import com.cameleer.server.core.runtime.RuntimeType;
|
|||||||
import io.swagger.v3.oas.annotations.Operation;
|
import io.swagger.v3.oas.annotations.Operation;
|
||||||
import io.swagger.v3.oas.annotations.responses.ApiResponse;
|
import io.swagger.v3.oas.annotations.responses.ApiResponse;
|
||||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||||
|
import org.springframework.http.HttpStatus;
|
||||||
import org.springframework.http.ResponseEntity;
|
import org.springframework.http.ResponseEntity;
|
||||||
import org.springframework.security.access.prepost.PreAuthorize;
|
import org.springframework.security.access.prepost.PreAuthorize;
|
||||||
import org.springframework.web.bind.annotation.*;
|
import org.springframework.web.bind.annotation.*;
|
||||||
|
import org.springframework.web.server.ResponseStatusException;
|
||||||
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
@@ -21,9 +24,11 @@ import java.util.Map;
|
|||||||
public class EnvironmentAdminController {
|
public class EnvironmentAdminController {
|
||||||
|
|
||||||
private final EnvironmentService environmentService;
|
private final EnvironmentService environmentService;
|
||||||
|
private final LicenseGate licenseGate;
|
||||||
|
|
||||||
public EnvironmentAdminController(EnvironmentService environmentService) {
|
public EnvironmentAdminController(EnvironmentService environmentService, LicenseGate licenseGate) {
|
||||||
this.environmentService = environmentService;
|
this.environmentService = environmentService;
|
||||||
|
this.licenseGate = licenseGate;
|
||||||
}
|
}
|
||||||
|
|
||||||
@GetMapping
|
@GetMapping
|
||||||
@@ -141,11 +146,24 @@ public class EnvironmentAdminController {
|
|||||||
@Operation(summary = "Update JAR retention policy for an environment")
|
@Operation(summary = "Update JAR retention policy for an environment")
|
||||||
@ApiResponse(responseCode = "200", description = "Retention policy updated")
|
@ApiResponse(responseCode = "200", description = "Retention policy updated")
|
||||||
@ApiResponse(responseCode = "404", description = "Environment not found")
|
@ApiResponse(responseCode = "404", description = "Environment not found")
|
||||||
|
@ApiResponse(responseCode = "422", description = "jarRetentionCount exceeds license cap")
|
||||||
public ResponseEntity<?> updateJarRetention(@PathVariable String envSlug,
|
public ResponseEntity<?> updateJarRetention(@PathVariable String envSlug,
|
||||||
@RequestBody JarRetentionRequest request) {
|
@RequestBody JarRetentionRequest request) {
|
||||||
try {
|
try {
|
||||||
Environment current = environmentService.getBySlug(envSlug);
|
Environment current = environmentService.getBySlug(envSlug);
|
||||||
environmentService.updateJarRetentionCount(current.id(), request.jarRetentionCount());
|
// License cap check: only fires when a non-null value is supplied (null = unlimited).
|
||||||
|
// 422 (not 403) because this is a value-out-of-range, not a creation-quota rejection;
|
||||||
|
// therefore we do NOT route through LicenseEnforcer / LicenseExceptionAdvice.
|
||||||
|
Integer requested = request.jarRetentionCount();
|
||||||
|
if (requested != null) {
|
||||||
|
int cap = licenseGate.getEffectiveLimits().get("max_jar_retention_count");
|
||||||
|
if (requested > cap) {
|
||||||
|
throw new ResponseStatusException(HttpStatus.UNPROCESSABLE_ENTITY,
|
||||||
|
"jarRetentionCount " + requested + " exceeds license cap "
|
||||||
|
+ cap + " (max_jar_retention_count)");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
environmentService.updateJarRetentionCount(current.id(), requested);
|
||||||
return ResponseEntity.ok(environmentService.getBySlug(envSlug));
|
return ResponseEntity.ok(environmentService.getBySlug(envSlug));
|
||||||
} catch (IllegalArgumentException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
if (e.getMessage().contains("not found")) {
|
if (e.getMessage().contains("not found")) {
|
||||||
|
|||||||
@@ -1,51 +1,71 @@
|
|||||||
package com.cameleer.server.app.controller;
|
package com.cameleer.server.app.controller;
|
||||||
|
|
||||||
|
import com.cameleer.server.app.license.LicenseRepository;
|
||||||
|
import com.cameleer.server.app.license.LicenseService;
|
||||||
import com.cameleer.server.core.license.LicenseGate;
|
import com.cameleer.server.core.license.LicenseGate;
|
||||||
import com.cameleer.server.core.license.LicenseInfo;
|
import com.cameleer.license.LicenseInfo;
|
||||||
import com.cameleer.server.core.license.LicenseValidator;
|
|
||||||
import io.swagger.v3.oas.annotations.Operation;
|
import io.swagger.v3.oas.annotations.Operation;
|
||||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||||
import org.springframework.beans.factory.annotation.Value;
|
|
||||||
import org.springframework.http.ResponseEntity;
|
import org.springframework.http.ResponseEntity;
|
||||||
import org.springframework.security.access.prepost.PreAuthorize;
|
import org.springframework.security.access.prepost.PreAuthorize;
|
||||||
import org.springframework.web.bind.annotation.*;
|
import org.springframework.security.core.Authentication;
|
||||||
|
import org.springframework.web.bind.annotation.GetMapping;
|
||||||
|
import org.springframework.web.bind.annotation.PostMapping;
|
||||||
|
import org.springframework.web.bind.annotation.RequestBody;
|
||||||
|
import org.springframework.web.bind.annotation.RequestMapping;
|
||||||
|
import org.springframework.web.bind.annotation.RestController;
|
||||||
|
|
||||||
|
import java.util.LinkedHashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* License management for ADMIN users. All mutation goes through {@link LicenseService} so that
|
||||||
|
* install / replace flows are uniformly audited, persisted, and published to listeners (retention
|
||||||
|
* policy, license metrics, etc.).
|
||||||
|
*
|
||||||
|
* <p>GET returns {@code {state, invalidReason, envelope, lastValidatedAt?}}. The raw JWT-style
|
||||||
|
* token is deliberately omitted from the response — only the parsed {@link LicenseInfo} is
|
||||||
|
* exposed.</p>
|
||||||
|
*/
|
||||||
@RestController
|
@RestController
|
||||||
@RequestMapping("/api/v1/admin/license")
|
@RequestMapping("/api/v1/admin/license")
|
||||||
@PreAuthorize("hasRole('ADMIN')")
|
@PreAuthorize("hasRole('ADMIN')")
|
||||||
@Tag(name = "License Admin", description = "License management")
|
@Tag(name = "License Admin", description = "License management")
|
||||||
public class LicenseAdminController {
|
public class LicenseAdminController {
|
||||||
|
|
||||||
private final LicenseGate licenseGate;
|
private final LicenseService licenseService;
|
||||||
private final String licensePublicKey;
|
private final LicenseGate gate;
|
||||||
|
private final LicenseRepository repo;
|
||||||
|
|
||||||
public LicenseAdminController(LicenseGate licenseGate,
|
public LicenseAdminController(LicenseService svc, LicenseGate gate, LicenseRepository repo) {
|
||||||
@Value("${cameleer.server.license.publickey:}") String licensePublicKey) {
|
this.licenseService = svc;
|
||||||
this.licenseGate = licenseGate;
|
this.gate = gate;
|
||||||
this.licensePublicKey = licensePublicKey;
|
this.repo = repo;
|
||||||
}
|
}
|
||||||
|
|
||||||
@GetMapping
|
@GetMapping
|
||||||
@Operation(summary = "Get current license info")
|
@Operation(summary = "Get current license state, invalid reason, and parsed envelope")
|
||||||
public ResponseEntity<LicenseInfo> getCurrent() {
|
public ResponseEntity<Map<String, Object>> getCurrent() {
|
||||||
return ResponseEntity.ok(licenseGate.getCurrent());
|
Map<String, Object> body = new LinkedHashMap<>();
|
||||||
|
body.put("state", gate.getState().name());
|
||||||
|
body.put("invalidReason", gate.getInvalidReason());
|
||||||
|
body.put("envelope", gate.getCurrent()); // null when ABSENT/INVALID; raw token deliberately omitted
|
||||||
|
repo.findByTenantId(licenseService.getTenantId()).ifPresent(rec ->
|
||||||
|
body.put("lastValidatedAt", rec.lastValidatedAt().toString()));
|
||||||
|
return ResponseEntity.ok(body);
|
||||||
}
|
}
|
||||||
|
|
||||||
record UpdateLicenseRequest(String token) {}
|
public record UpdateLicenseRequest(String token) {}
|
||||||
|
|
||||||
@PostMapping
|
@PostMapping
|
||||||
@Operation(summary = "Update license token at runtime")
|
@Operation(summary = "Install or replace the license token at runtime")
|
||||||
public ResponseEntity<?> update(@RequestBody UpdateLicenseRequest request) {
|
public ResponseEntity<?> update(@RequestBody UpdateLicenseRequest request, Authentication auth) {
|
||||||
if (licensePublicKey == null || licensePublicKey.isBlank()) {
|
String userId = auth == null ? "system" : auth.getName().replaceFirst("^user:", "");
|
||||||
return ResponseEntity.badRequest().body(Map.of("error", "No license public key configured"));
|
|
||||||
}
|
|
||||||
try {
|
try {
|
||||||
LicenseValidator validator = new LicenseValidator(licensePublicKey);
|
LicenseInfo info = licenseService.install(request.token(), userId, "api");
|
||||||
LicenseInfo info = validator.validate(request.token());
|
return ResponseEntity.ok(Map.of(
|
||||||
licenseGate.load(info);
|
"state", gate.getState().name(),
|
||||||
return ResponseEntity.ok(info);
|
"envelope", info));
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
return ResponseEntity.badRequest().body(Map.of("error", e.getMessage()));
|
return ResponseEntity.badRequest().body(Map.of("error", e.getMessage()));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,97 @@
|
|||||||
|
package com.cameleer.server.app.controller;
|
||||||
|
|
||||||
|
import com.cameleer.server.app.license.LicenseMessageRenderer;
|
||||||
|
import com.cameleer.server.app.license.LicenseRepository;
|
||||||
|
import com.cameleer.server.app.license.LicenseService;
|
||||||
|
import com.cameleer.server.app.license.LicenseUsageReader;
|
||||||
|
import com.cameleer.server.core.agent.AgentRegistryService;
|
||||||
|
import com.cameleer.server.core.license.LicenseGate;
|
||||||
|
import org.springframework.http.ResponseEntity;
|
||||||
|
import org.springframework.security.access.prepost.PreAuthorize;
|
||||||
|
import org.springframework.web.bind.annotation.GetMapping;
|
||||||
|
import org.springframework.web.bind.annotation.RequestMapping;
|
||||||
|
import org.springframework.web.bind.annotation.RestController;
|
||||||
|
|
||||||
|
import java.time.Duration;
|
||||||
|
import java.time.Instant;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.LinkedHashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Read-only operator surface returning current license state, key timestamps, the
|
||||||
|
* human-readable message produced by {@link LicenseMessageRenderer}, and a per-limit
|
||||||
|
* usage/cap/source table covering every key exposed by the effective limits map.
|
||||||
|
*
|
||||||
|
* <p>Each limit row carries:
|
||||||
|
* <ul>
|
||||||
|
* <li>{@code key} — the limit key (e.g. {@code max_apps})</li>
|
||||||
|
* <li>{@code current} — current usage (0 when not measured server-side)</li>
|
||||||
|
* <li>{@code cap} — effective cap (license override or default-tier value)</li>
|
||||||
|
* <li>{@code source} — {@code "license"} when the cap came from the license override map,
|
||||||
|
* {@code "default"} otherwise</li>
|
||||||
|
* </ul>
|
||||||
|
*
|
||||||
|
* <p>{@code max_agents} is sourced from the in-memory {@link AgentRegistryService} since the
|
||||||
|
* registry is not persisted; all other counts come from PostgreSQL via
|
||||||
|
* {@link LicenseUsageReader#snapshot()}.</p>
|
||||||
|
*/
|
||||||
|
@RestController
|
||||||
|
@RequestMapping("/api/v1/admin/license/usage")
|
||||||
|
@PreAuthorize("hasRole('ADMIN')")
|
||||||
|
public class LicenseUsageController {
|
||||||
|
|
||||||
|
private final LicenseGate gate;
|
||||||
|
private final LicenseUsageReader reader;
|
||||||
|
private final AgentRegistryService agents;
|
||||||
|
private final LicenseService svc;
|
||||||
|
private final LicenseRepository repo;
|
||||||
|
|
||||||
|
public LicenseUsageController(LicenseGate gate,
|
||||||
|
LicenseUsageReader reader,
|
||||||
|
AgentRegistryService agents,
|
||||||
|
LicenseService svc,
|
||||||
|
LicenseRepository repo) {
|
||||||
|
this.gate = gate;
|
||||||
|
this.reader = reader;
|
||||||
|
this.agents = agents;
|
||||||
|
this.svc = svc;
|
||||||
|
this.repo = repo;
|
||||||
|
}
|
||||||
|
|
||||||
|
@GetMapping
|
||||||
|
public ResponseEntity<Map<String, Object>> get() {
|
||||||
|
var state = gate.getState();
|
||||||
|
var info = gate.getCurrent();
|
||||||
|
var effective = gate.getEffectiveLimits();
|
||||||
|
|
||||||
|
Map<String, Long> usage = new HashMap<>(reader.snapshot());
|
||||||
|
usage.put("max_agents", (long) agents.liveCount());
|
||||||
|
|
||||||
|
List<Map<String, Object>> limitRows = new ArrayList<>();
|
||||||
|
for (var key : effective.values().keySet()) {
|
||||||
|
Map<String, Object> row = new LinkedHashMap<>();
|
||||||
|
row.put("key", key);
|
||||||
|
row.put("current", usage.getOrDefault(key, 0L));
|
||||||
|
row.put("cap", effective.get(key));
|
||||||
|
row.put("source", info != null && info.limits().containsKey(key) ? "license" : "default");
|
||||||
|
limitRows.add(row);
|
||||||
|
}
|
||||||
|
|
||||||
|
Map<String, Object> body = new LinkedHashMap<>();
|
||||||
|
body.put("state", state.name());
|
||||||
|
body.put("expiresAt", info == null ? null : info.expiresAt().toString());
|
||||||
|
body.put("daysRemaining", info == null ? null
|
||||||
|
: Duration.between(Instant.now(), info.expiresAt()).toDays());
|
||||||
|
body.put("gracePeriodDays", info == null ? 0 : info.gracePeriodDays());
|
||||||
|
body.put("tenantId", info == null ? null : info.tenantId());
|
||||||
|
body.put("label", info == null ? null : info.label());
|
||||||
|
repo.findByTenantId(svc.getTenantId()).ifPresent(rec ->
|
||||||
|
body.put("lastValidatedAt", rec.lastValidatedAt().toString()));
|
||||||
|
body.put("message", LicenseMessageRenderer.forState(state, info, gate.getInvalidReason()));
|
||||||
|
body.put("limits", limitRows);
|
||||||
|
return ResponseEntity.ok(body);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -132,13 +132,12 @@ public class RouteCatalogController {
|
|||||||
List<AgentInfo> agents = agentsByApp.getOrDefault(appId, List.of());
|
List<AgentInfo> agents = agentsByApp.getOrDefault(appId, List.of());
|
||||||
|
|
||||||
Set<String> routeIds = routesByApp.getOrDefault(appId, Set.of());
|
Set<String> routeIds = routesByApp.getOrDefault(appId, Set.of());
|
||||||
List<String> agentIds = agents.stream().map(AgentInfo::instanceId).toList();
|
|
||||||
List<RouteSummary> routeSummaries = routeIds.stream()
|
List<RouteSummary> routeSummaries = routeIds.stream()
|
||||||
.map(routeId -> {
|
.map(routeId -> {
|
||||||
String key = appId + "/" + routeId;
|
String key = appId + "/" + routeId;
|
||||||
long count = routeExchangeCounts.getOrDefault(key, 0L);
|
long count = routeExchangeCounts.getOrDefault(key, 0L);
|
||||||
Instant lastSeen = routeLastSeen.get(key);
|
Instant lastSeen = routeLastSeen.get(key);
|
||||||
String fromUri = resolveFromEndpointUri(routeId, agentIds);
|
String fromUri = resolveFromEndpointUri(appId, routeId, envSlug);
|
||||||
String state = routeStateRegistry.getState(appId, routeId).name().toLowerCase();
|
String state = routeStateRegistry.getState(appId, routeId).name().toLowerCase();
|
||||||
String routeState = "started".equals(state) ? null : state;
|
String routeState = "started".equals(state) ? null : state;
|
||||||
return new RouteSummary(routeId, count, lastSeen, fromUri, routeState);
|
return new RouteSummary(routeId, count, lastSeen, fromUri, routeState);
|
||||||
@@ -160,8 +159,8 @@ public class RouteCatalogController {
|
|||||||
return ResponseEntity.ok(catalog);
|
return ResponseEntity.ok(catalog);
|
||||||
}
|
}
|
||||||
|
|
||||||
private String resolveFromEndpointUri(String routeId, List<String> agentIds) {
|
private String resolveFromEndpointUri(String applicationId, String routeId, String environment) {
|
||||||
return diagramStore.findContentHashForRouteByAgents(routeId, agentIds)
|
return diagramStore.findLatestContentHashForAppRoute(applicationId, routeId, environment)
|
||||||
.flatMap(diagramStore::findByContentHash)
|
.flatMap(diagramStore::findByContentHash)
|
||||||
.map(RouteGraph::getRoot)
|
.map(RouteGraph::getRoot)
|
||||||
.map(root -> root.getEndpointUri())
|
.map(root -> root.getEndpointUri())
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import com.cameleer.server.app.web.EnvPath;
|
|||||||
import com.cameleer.server.core.admin.AppSettings;
|
import com.cameleer.server.core.admin.AppSettings;
|
||||||
import com.cameleer.server.core.admin.AppSettingsRepository;
|
import com.cameleer.server.core.admin.AppSettingsRepository;
|
||||||
import com.cameleer.server.core.runtime.Environment;
|
import com.cameleer.server.core.runtime.Environment;
|
||||||
|
import com.cameleer.server.core.search.AttributeFilter;
|
||||||
import com.cameleer.server.core.search.ExecutionStats;
|
import com.cameleer.server.core.search.ExecutionStats;
|
||||||
import com.cameleer.server.core.search.ExecutionSummary;
|
import com.cameleer.server.core.search.ExecutionSummary;
|
||||||
import com.cameleer.server.core.search.SearchRequest;
|
import com.cameleer.server.core.search.SearchRequest;
|
||||||
@@ -14,6 +15,7 @@ import com.cameleer.server.core.search.TopError;
|
|||||||
import com.cameleer.server.core.storage.StatsStore;
|
import com.cameleer.server.core.storage.StatsStore;
|
||||||
import io.swagger.v3.oas.annotations.Operation;
|
import io.swagger.v3.oas.annotations.Operation;
|
||||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||||
|
import org.springframework.http.HttpStatus;
|
||||||
import org.springframework.http.ResponseEntity;
|
import org.springframework.http.ResponseEntity;
|
||||||
import org.springframework.web.bind.annotation.GetMapping;
|
import org.springframework.web.bind.annotation.GetMapping;
|
||||||
import org.springframework.web.bind.annotation.PostMapping;
|
import org.springframework.web.bind.annotation.PostMapping;
|
||||||
@@ -21,8 +23,10 @@ import org.springframework.web.bind.annotation.RequestBody;
|
|||||||
import org.springframework.web.bind.annotation.RequestMapping;
|
import org.springframework.web.bind.annotation.RequestMapping;
|
||||||
import org.springframework.web.bind.annotation.RequestParam;
|
import org.springframework.web.bind.annotation.RequestParam;
|
||||||
import org.springframework.web.bind.annotation.RestController;
|
import org.springframework.web.bind.annotation.RestController;
|
||||||
|
import org.springframework.web.server.ResponseStatusException;
|
||||||
|
|
||||||
import java.time.Instant;
|
import java.time.Instant;
|
||||||
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
@@ -57,11 +61,19 @@ public class SearchController {
|
|||||||
@RequestParam(name = "agentId", required = false) String instanceId,
|
@RequestParam(name = "agentId", required = false) String instanceId,
|
||||||
@RequestParam(required = false) String processorType,
|
@RequestParam(required = false) String processorType,
|
||||||
@RequestParam(required = false) String application,
|
@RequestParam(required = false) String application,
|
||||||
|
@RequestParam(name = "attr", required = false) List<String> attr,
|
||||||
@RequestParam(defaultValue = "0") int offset,
|
@RequestParam(defaultValue = "0") int offset,
|
||||||
@RequestParam(defaultValue = "50") int limit,
|
@RequestParam(defaultValue = "50") int limit,
|
||||||
@RequestParam(required = false) String sortField,
|
@RequestParam(required = false) String sortField,
|
||||||
@RequestParam(required = false) String sortDir) {
|
@RequestParam(required = false) String sortDir) {
|
||||||
|
|
||||||
|
List<AttributeFilter> attributeFilters;
|
||||||
|
try {
|
||||||
|
attributeFilters = parseAttrParams(attr);
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
|
throw new ResponseStatusException(HttpStatus.BAD_REQUEST, e.getMessage(), e);
|
||||||
|
}
|
||||||
|
|
||||||
SearchRequest request = new SearchRequest(
|
SearchRequest request = new SearchRequest(
|
||||||
status, timeFrom, timeTo,
|
status, timeFrom, timeTo,
|
||||||
null, null,
|
null, null,
|
||||||
@@ -72,12 +84,36 @@ public class SearchController {
|
|||||||
offset, limit,
|
offset, limit,
|
||||||
sortField, sortDir,
|
sortField, sortDir,
|
||||||
null,
|
null,
|
||||||
env.slug()
|
env.slug(),
|
||||||
|
attributeFilters
|
||||||
);
|
);
|
||||||
|
|
||||||
return ResponseEntity.ok(searchService.search(request));
|
return ResponseEntity.ok(searchService.search(request));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parses {@code attr} query params of the form {@code key} (key-only) or {@code key:value}
|
||||||
|
* (exact or wildcard via {@code *}). Splits on the first {@code :}; later colons are part of
|
||||||
|
* the value. Blank / null list → empty result. Key validation is delegated to
|
||||||
|
* {@link AttributeFilter}'s compact constructor, which throws {@link IllegalArgumentException}
|
||||||
|
* on invalid keys (mapped to 400 by the caller).
|
||||||
|
*/
|
||||||
|
static List<AttributeFilter> parseAttrParams(List<String> raw) {
|
||||||
|
if (raw == null || raw.isEmpty()) return List.of();
|
||||||
|
List<AttributeFilter> out = new ArrayList<>(raw.size());
|
||||||
|
for (String entry : raw) {
|
||||||
|
if (entry == null || entry.isBlank()) continue;
|
||||||
|
int colon = entry.indexOf(':');
|
||||||
|
if (colon < 0) {
|
||||||
|
out.add(new AttributeFilter(entry.trim(), null));
|
||||||
|
} else {
|
||||||
|
out.add(new AttributeFilter(entry.substring(0, colon).trim(),
|
||||||
|
entry.substring(colon + 1)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return out;
|
||||||
|
}
|
||||||
|
|
||||||
@PostMapping("/executions/search")
|
@PostMapping("/executions/search")
|
||||||
@Operation(summary = "Advanced search with all filters",
|
@Operation(summary = "Advanced search with all filters",
|
||||||
description = "Env from the path overrides any environment field in the body.")
|
description = "Env from the path overrides any environment field in the body.")
|
||||||
|
|||||||
@@ -0,0 +1,148 @@
|
|||||||
|
package com.cameleer.server.app.controller;
|
||||||
|
|
||||||
|
import com.cameleer.server.core.storage.ServerMetricsQueryStore;
|
||||||
|
import com.cameleer.server.core.storage.model.ServerInstanceInfo;
|
||||||
|
import com.cameleer.server.core.storage.model.ServerMetricCatalogEntry;
|
||||||
|
import com.cameleer.server.core.storage.model.ServerMetricQueryRequest;
|
||||||
|
import com.cameleer.server.core.storage.model.ServerMetricQueryResponse;
|
||||||
|
import io.swagger.v3.oas.annotations.Operation;
|
||||||
|
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||||
|
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||||
|
import org.springframework.http.ResponseEntity;
|
||||||
|
import org.springframework.security.access.prepost.PreAuthorize;
|
||||||
|
import org.springframework.web.bind.annotation.ExceptionHandler;
|
||||||
|
import org.springframework.web.bind.annotation.GetMapping;
|
||||||
|
import org.springframework.web.bind.annotation.PostMapping;
|
||||||
|
import org.springframework.web.bind.annotation.RequestBody;
|
||||||
|
import org.springframework.web.bind.annotation.RequestMapping;
|
||||||
|
import org.springframework.web.bind.annotation.RequestParam;
|
||||||
|
import org.springframework.web.bind.annotation.RestController;
|
||||||
|
|
||||||
|
import java.time.Instant;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generic read API over the ClickHouse {@code server_metrics} table. Lets
|
||||||
|
* SaaS control planes build server-health dashboards without requiring direct
|
||||||
|
* ClickHouse access.
|
||||||
|
*
|
||||||
|
* <p>Three endpoints cover all 17 panels in {@code docs/server-self-metrics.md}:
|
||||||
|
* <ul>
|
||||||
|
* <li>{@code GET /catalog} — discover available metric names, types, statistics, and tags</li>
|
||||||
|
* <li>{@code POST /query} — generic time-series query with aggregation, grouping, filtering, and counter-delta mode</li>
|
||||||
|
* <li>{@code GET /instances} — list server instances (useful for partitioning counter math)</li>
|
||||||
|
* </ul>
|
||||||
|
*
|
||||||
|
* <p>Visibility matches {@code ClickHouseAdminController} / {@code DatabaseAdminController}:
|
||||||
|
* <ul>
|
||||||
|
* <li>Conditional on {@code cameleer.server.security.infrastructureendpoints=true} (default).</li>
|
||||||
|
* <li>Class-level {@code @PreAuthorize("hasRole('ADMIN')")} on top of the
|
||||||
|
* {@code /api/v1/admin/**} catch-all in {@code SecurityConfig}.</li>
|
||||||
|
* </ul>
|
||||||
|
*/
|
||||||
|
@ConditionalOnProperty(
|
||||||
|
name = "cameleer.server.security.infrastructureendpoints",
|
||||||
|
havingValue = "true",
|
||||||
|
matchIfMissing = true
|
||||||
|
)
|
||||||
|
@RestController
|
||||||
|
@RequestMapping("/api/v1/admin/server-metrics")
|
||||||
|
@PreAuthorize("hasRole('ADMIN')")
|
||||||
|
@Tag(name = "Server Self-Metrics",
|
||||||
|
description = "Read API over the server's own Micrometer registry snapshots (ADMIN only)")
|
||||||
|
public class ServerMetricsAdminController {
|
||||||
|
|
||||||
|
/** Default lookback window for catalog/instances when from/to are omitted. */
|
||||||
|
private static final long DEFAULT_LOOKBACK_SECONDS = 3_600L;
|
||||||
|
|
||||||
|
private final ServerMetricsQueryStore store;
|
||||||
|
|
||||||
|
public ServerMetricsAdminController(ServerMetricsQueryStore store) {
|
||||||
|
this.store = store;
|
||||||
|
}
|
||||||
|
|
||||||
|
@GetMapping("/catalog")
|
||||||
|
@Operation(summary = "List metric names observed in the window",
|
||||||
|
description = "For each metric_name, returns metric_type, the set of statistics emitted, and the union of tag keys.")
|
||||||
|
public ResponseEntity<List<ServerMetricCatalogEntry>> catalog(
|
||||||
|
@RequestParam(required = false) String from,
|
||||||
|
@RequestParam(required = false) String to) {
|
||||||
|
Instant[] window = resolveWindow(from, to);
|
||||||
|
return ResponseEntity.ok(store.catalog(window[0], window[1]));
|
||||||
|
}
|
||||||
|
|
||||||
|
@GetMapping("/instances")
|
||||||
|
@Operation(summary = "List server_instance_id values observed in the window",
|
||||||
|
description = "Returns first/last seen timestamps — use to partition counter-delta computations.")
|
||||||
|
public ResponseEntity<List<ServerInstanceInfo>> instances(
|
||||||
|
@RequestParam(required = false) String from,
|
||||||
|
@RequestParam(required = false) String to) {
|
||||||
|
Instant[] window = resolveWindow(from, to);
|
||||||
|
return ResponseEntity.ok(store.listInstances(window[0], window[1]));
|
||||||
|
}
|
||||||
|
|
||||||
|
@PostMapping("/query")
|
||||||
|
@Operation(summary = "Generic time-series query",
|
||||||
|
description = "Returns bucketed series for a single metric_name. Supports aggregation (avg/sum/max/min/latest), group-by-tag, filter-by-tag, counter delta mode, and a derived 'mean' statistic for timers.")
|
||||||
|
public ResponseEntity<ServerMetricQueryResponse> query(@RequestBody QueryBody body) {
|
||||||
|
ServerMetricQueryRequest request = new ServerMetricQueryRequest(
|
||||||
|
body.metric(),
|
||||||
|
body.statistic(),
|
||||||
|
parseInstant(body.from(), "from"),
|
||||||
|
parseInstant(body.to(), "to"),
|
||||||
|
body.stepSeconds(),
|
||||||
|
body.groupByTags(),
|
||||||
|
body.filterTags(),
|
||||||
|
body.aggregation(),
|
||||||
|
body.mode(),
|
||||||
|
body.serverInstanceIds());
|
||||||
|
return ResponseEntity.ok(store.query(request));
|
||||||
|
}
|
||||||
|
|
||||||
|
@ExceptionHandler(IllegalArgumentException.class)
|
||||||
|
public ResponseEntity<Map<String, String>> handleBadRequest(IllegalArgumentException e) {
|
||||||
|
return ResponseEntity.badRequest().body(Map.of("error", e.getMessage()));
|
||||||
|
}
|
||||||
|
|
||||||
|
private static Instant[] resolveWindow(String from, String to) {
|
||||||
|
Instant toI = to != null ? parseInstant(to, "to") : Instant.now();
|
||||||
|
Instant fromI = from != null
|
||||||
|
? parseInstant(from, "from")
|
||||||
|
: toI.minusSeconds(DEFAULT_LOOKBACK_SECONDS);
|
||||||
|
if (!fromI.isBefore(toI)) {
|
||||||
|
throw new IllegalArgumentException("from must be strictly before to");
|
||||||
|
}
|
||||||
|
return new Instant[]{fromI, toI};
|
||||||
|
}
|
||||||
|
|
||||||
|
private static Instant parseInstant(String raw, String field) {
|
||||||
|
if (raw == null || raw.isBlank()) {
|
||||||
|
throw new IllegalArgumentException(field + " is required");
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
return Instant.parse(raw);
|
||||||
|
} catch (Exception e) {
|
||||||
|
throw new IllegalArgumentException(
|
||||||
|
field + " must be an ISO-8601 instant (e.g. 2026-04-23T10:00:00Z)");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Request body for {@link #query(QueryBody)}. Uses ISO-8601 strings on
|
||||||
|
* the wire so the OpenAPI schema stays language-neutral.
|
||||||
|
*/
|
||||||
|
public record QueryBody(
|
||||||
|
String metric,
|
||||||
|
String statistic,
|
||||||
|
String from,
|
||||||
|
String to,
|
||||||
|
Integer stepSeconds,
|
||||||
|
List<String> groupByTags,
|
||||||
|
Map<String, String> filterTags,
|
||||||
|
String aggregation,
|
||||||
|
String mode,
|
||||||
|
List<String> serverInstanceIds
|
||||||
|
) {
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
package com.cameleer.server.app.controller;
|
package com.cameleer.server.app.controller;
|
||||||
|
|
||||||
import com.cameleer.server.app.dto.SetPasswordRequest;
|
import com.cameleer.server.app.dto.SetPasswordRequest;
|
||||||
|
import com.cameleer.server.app.license.LicenseEnforcer;
|
||||||
import com.cameleer.server.core.admin.AuditCategory;
|
import com.cameleer.server.core.admin.AuditCategory;
|
||||||
import com.cameleer.server.core.admin.AuditResult;
|
import com.cameleer.server.core.admin.AuditResult;
|
||||||
import com.cameleer.server.core.admin.AuditService;
|
import com.cameleer.server.core.admin.AuditService;
|
||||||
@@ -52,13 +53,16 @@ public class UserAdminController {
|
|||||||
private final RbacService rbacService;
|
private final RbacService rbacService;
|
||||||
private final UserRepository userRepository;
|
private final UserRepository userRepository;
|
||||||
private final AuditService auditService;
|
private final AuditService auditService;
|
||||||
|
private final LicenseEnforcer licenseEnforcer;
|
||||||
private final boolean oidcEnabled;
|
private final boolean oidcEnabled;
|
||||||
|
|
||||||
public UserAdminController(RbacService rbacService, UserRepository userRepository,
|
public UserAdminController(RbacService rbacService, UserRepository userRepository,
|
||||||
AuditService auditService, SecurityProperties securityProperties) {
|
AuditService auditService, SecurityProperties securityProperties,
|
||||||
|
LicenseEnforcer licenseEnforcer) {
|
||||||
this.rbacService = rbacService;
|
this.rbacService = rbacService;
|
||||||
this.userRepository = userRepository;
|
this.userRepository = userRepository;
|
||||||
this.auditService = auditService;
|
this.auditService = auditService;
|
||||||
|
this.licenseEnforcer = licenseEnforcer;
|
||||||
String issuer = securityProperties.getOidc().getIssuerUri();
|
String issuer = securityProperties.getOidc().getIssuerUri();
|
||||||
this.oidcEnabled = issuer != null && !issuer.isBlank();
|
this.oidcEnabled = issuer != null && !issuer.isBlank();
|
||||||
}
|
}
|
||||||
@@ -89,6 +93,9 @@ public class UserAdminController {
|
|||||||
@ApiResponse(responseCode = "400", description = "Disabled in OIDC mode")
|
@ApiResponse(responseCode = "400", description = "Disabled in OIDC mode")
|
||||||
public ResponseEntity<?> createUser(@RequestBody CreateUserRequest request,
|
public ResponseEntity<?> createUser(@RequestBody CreateUserRequest request,
|
||||||
HttpServletRequest httpRequest) {
|
HttpServletRequest httpRequest) {
|
||||||
|
// License cap fires first so over-cap creates short-circuit before any other validation.
|
||||||
|
// Audit emission for the rejection is handled inside LicenseEnforcer (3-arg ctor wires AuditService).
|
||||||
|
licenseEnforcer.assertWithinCap("max_users", userRepository.count(), 1);
|
||||||
if (oidcEnabled) {
|
if (oidcEnabled) {
|
||||||
return ResponseEntity.badRequest()
|
return ResponseEntity.badRequest()
|
||||||
.body(Map.of("error", "Local user creation is disabled when OIDC is enabled. Users are provisioned automatically via SSO."));
|
.body(Map.of("error", "Local user creation is disabled when OIDC is enabled. Users are provisioned automatically via SSO."));
|
||||||
|
|||||||
@@ -0,0 +1,23 @@
|
|||||||
|
package com.cameleer.server.app.dto;
|
||||||
|
|
||||||
|
import io.swagger.v3.oas.annotations.media.Schema;
|
||||||
|
|
||||||
|
@Schema(description = "Authentication capabilities reported to the SPA so it can render the login page deterministically")
|
||||||
|
public record AuthCapabilitiesResponse(
|
||||||
|
@Schema(description = "OIDC interactive login capability") Oidc oidc,
|
||||||
|
@Schema(description = "Local username/password account capability") LocalAccounts localAccounts
|
||||||
|
) {
|
||||||
|
|
||||||
|
@Schema(description = "OIDC interactive login")
|
||||||
|
public record Oidc(
|
||||||
|
@Schema(description = "Whether OIDC is configured AND enabled") boolean enabled,
|
||||||
|
@Schema(description = "Best-effort display label, e.g. \"Logto\", \"Keycloak\", \"Single Sign-On\"") String providerName,
|
||||||
|
@Schema(description = "When true, OIDC is the canonical entry point and the SPA hides the local form unless ?local is set") boolean primary
|
||||||
|
) {}
|
||||||
|
|
||||||
|
@Schema(description = "Local username/password accounts")
|
||||||
|
public record LocalAccounts(
|
||||||
|
@Schema(description = "Whether the local form is reachable at all") boolean enabled,
|
||||||
|
@Schema(description = "When true, the SPA gates the local form behind ?local with an admin-recovery banner") boolean adminRecoveryOnly
|
||||||
|
) {}
|
||||||
|
}
|
||||||
@@ -6,8 +6,10 @@ import com.cameleer.server.core.admin.AuditService;
|
|||||||
import jakarta.servlet.http.HttpServletRequest;
|
import jakarta.servlet.http.HttpServletRequest;
|
||||||
import jakarta.servlet.http.HttpServletResponse;
|
import jakarta.servlet.http.HttpServletResponse;
|
||||||
import org.springframework.stereotype.Component;
|
import org.springframework.stereotype.Component;
|
||||||
|
import org.springframework.util.AntPathMatcher;
|
||||||
import org.springframework.web.servlet.HandlerInterceptor;
|
import org.springframework.web.servlet.HandlerInterceptor;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
@@ -22,7 +24,9 @@ import java.util.Set;
|
|||||||
public class AuditInterceptor implements HandlerInterceptor {
|
public class AuditInterceptor implements HandlerInterceptor {
|
||||||
|
|
||||||
private static final Set<String> AUDITABLE_METHODS = Set.of("POST", "PUT", "DELETE");
|
private static final Set<String> AUDITABLE_METHODS = Set.of("POST", "PUT", "DELETE");
|
||||||
private static final Set<String> EXCLUDED_PATHS = Set.of("/api/v1/search/executions");
|
private static final List<String> EXCLUDED_PATH_PATTERNS = List.of(
|
||||||
|
"/api/v1/environments/*/executions/search");
|
||||||
|
private static final AntPathMatcher PATH_MATCHER = new AntPathMatcher();
|
||||||
|
|
||||||
private final AuditService auditService;
|
private final AuditService auditService;
|
||||||
|
|
||||||
@@ -41,8 +45,10 @@ public class AuditInterceptor implements HandlerInterceptor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
String path = request.getRequestURI();
|
String path = request.getRequestURI();
|
||||||
if (EXCLUDED_PATHS.contains(path)) {
|
for (String pattern : EXCLUDED_PATH_PATTERNS) {
|
||||||
return;
|
if (PATH_MATCHER.match(pattern, path)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
AuditResult result = response.getStatus() < 400 ? AuditResult.SUCCESS : AuditResult.FAILURE;
|
AuditResult result = response.getStatus() < 400 ? AuditResult.SUCCESS : AuditResult.FAILURE;
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,18 @@
|
|||||||
|
package com.cameleer.server.app.license;
|
||||||
|
|
||||||
|
public class LicenseCapExceededException extends RuntimeException {
|
||||||
|
private final String limitKey;
|
||||||
|
private final long current;
|
||||||
|
private final long cap;
|
||||||
|
|
||||||
|
public LicenseCapExceededException(String limitKey, long current, long cap) {
|
||||||
|
super("license cap reached: " + limitKey + " current=" + current + " cap=" + cap);
|
||||||
|
this.limitKey = limitKey;
|
||||||
|
this.current = current;
|
||||||
|
this.cap = cap;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String limitKey() { return limitKey; }
|
||||||
|
public long current() { return current; }
|
||||||
|
public long cap() { return cap; }
|
||||||
|
}
|
||||||
@@ -0,0 +1,12 @@
|
|||||||
|
package com.cameleer.server.app.license;
|
||||||
|
|
||||||
|
import com.cameleer.license.LicenseInfo;
|
||||||
|
import com.cameleer.license.LicenseState;
|
||||||
|
|
||||||
|
import java.util.Objects;
|
||||||
|
|
||||||
|
public record LicenseChangedEvent(LicenseState state, LicenseInfo current) {
|
||||||
|
public LicenseChangedEvent {
|
||||||
|
Objects.requireNonNull(state);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,80 @@
|
|||||||
|
package com.cameleer.server.app.license;
|
||||||
|
|
||||||
|
import com.cameleer.server.core.admin.AuditCategory;
|
||||||
|
import com.cameleer.server.core.admin.AuditResult;
|
||||||
|
import com.cameleer.server.core.admin.AuditService;
|
||||||
|
import com.cameleer.license.LicenseLimits;
|
||||||
|
import com.cameleer.server.core.license.LicenseGate;
|
||||||
|
import io.micrometer.core.instrument.Counter;
|
||||||
|
import io.micrometer.core.instrument.MeterRegistry;
|
||||||
|
import io.micrometer.core.instrument.simple.SimpleMeterRegistry;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
|
import org.springframework.stereotype.Component;
|
||||||
|
|
||||||
|
import java.util.LinkedHashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
import java.util.concurrent.ConcurrentMap;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Single entry point for license cap enforcement (spec §4).
|
||||||
|
*
|
||||||
|
* <p>Consults {@link LicenseGate#getEffectiveLimits()} (license-overrides UNION default tier when
|
||||||
|
* ACTIVE/GRACE; defaults-only otherwise) and rejects calls whose projected usage would exceed the
|
||||||
|
* cap. Rejections increment a per-limit Micrometer counter and, when an {@link AuditService} is
|
||||||
|
* wired, emit an {@link AuditCategory#LICENSE} {@code cap_exceeded} audit row.</p>
|
||||||
|
*
|
||||||
|
* <p>Unknown limit keys are treated as programmer errors and surface as
|
||||||
|
* {@link IllegalArgumentException} (propagated from {@link LicenseLimits#get(String)}), not
|
||||||
|
* {@link LicenseCapExceededException}.</p>
|
||||||
|
*/
|
||||||
|
@Component
|
||||||
|
public class LicenseEnforcer {
|
||||||
|
|
||||||
|
private static final Logger log = LoggerFactory.getLogger(LicenseEnforcer.class);
|
||||||
|
private static final String COUNTER_NAME = "cameleer_license_cap_rejections_total";
|
||||||
|
|
||||||
|
private final LicenseGate gate;
|
||||||
|
private final MeterRegistry meters;
|
||||||
|
private final AuditService audit;
|
||||||
|
private final ConcurrentMap<String, Counter> rejectionCounters = new ConcurrentHashMap<>();
|
||||||
|
|
||||||
|
@Autowired
|
||||||
|
public LicenseEnforcer(LicenseGate gate, MeterRegistry meters, AuditService audit) {
|
||||||
|
this.gate = gate;
|
||||||
|
this.meters = meters;
|
||||||
|
this.audit = audit;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Test-only ctor with no metrics or audit. */
|
||||||
|
public LicenseEnforcer(LicenseGate gate) {
|
||||||
|
this(gate, new SimpleMeterRegistry(), null);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void assertWithinCap(String limitKey, long currentUsage, long requestedDelta) {
|
||||||
|
LicenseLimits effective = gate.getEffectiveLimits();
|
||||||
|
int cap = effective.get(limitKey); // throws IllegalArgumentException if unknown key
|
||||||
|
long projected = currentUsage + requestedDelta;
|
||||||
|
if (projected > cap) {
|
||||||
|
rejectionCounters.computeIfAbsent(limitKey, k -> Counter.builder(COUNTER_NAME)
|
||||||
|
.tag("limit", k).register(meters)).increment();
|
||||||
|
if (audit != null) {
|
||||||
|
try {
|
||||||
|
Map<String, Object> detail = new LinkedHashMap<>();
|
||||||
|
detail.put("limit", limitKey);
|
||||||
|
detail.put("current", currentUsage);
|
||||||
|
detail.put("requested", requestedDelta);
|
||||||
|
detail.put("cap", cap);
|
||||||
|
detail.put("state", gate.getState().name());
|
||||||
|
audit.log("system", "cap_exceeded", AuditCategory.LICENSE, limitKey, detail, AuditResult.FAILURE, null);
|
||||||
|
} catch (RuntimeException e) {
|
||||||
|
// Audit storage degraded; log and continue so the cap rejection still surfaces as 403.
|
||||||
|
log.warn("Failed to write cap_exceeded audit row for limit={}: {}", limitKey, e.toString());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
throw new LicenseCapExceededException(limitKey, projected, cap);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,36 @@
|
|||||||
|
package com.cameleer.server.app.license;
|
||||||
|
|
||||||
|
import com.cameleer.license.LicenseInfo;
|
||||||
|
import com.cameleer.server.core.license.LicenseGate;
|
||||||
|
import org.springframework.http.HttpStatus;
|
||||||
|
import org.springframework.http.ResponseEntity;
|
||||||
|
import org.springframework.web.bind.annotation.ControllerAdvice;
|
||||||
|
import org.springframework.web.bind.annotation.ExceptionHandler;
|
||||||
|
|
||||||
|
import java.util.LinkedHashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
@ControllerAdvice
|
||||||
|
public class LicenseExceptionAdvice {
|
||||||
|
|
||||||
|
private final LicenseGate gate;
|
||||||
|
|
||||||
|
public LicenseExceptionAdvice(LicenseGate gate) {
|
||||||
|
this.gate = gate;
|
||||||
|
}
|
||||||
|
|
||||||
|
@ExceptionHandler(LicenseCapExceededException.class)
|
||||||
|
public ResponseEntity<Map<String, Object>> handle(LicenseCapExceededException e) {
|
||||||
|
var state = gate.getState();
|
||||||
|
LicenseInfo info = gate.getCurrent();
|
||||||
|
String reason = gate.getInvalidReason();
|
||||||
|
Map<String, Object> body = new LinkedHashMap<>();
|
||||||
|
body.put("error", "license cap reached");
|
||||||
|
body.put("limit", e.limitKey());
|
||||||
|
body.put("current", e.current());
|
||||||
|
body.put("cap", e.cap());
|
||||||
|
body.put("state", state.name());
|
||||||
|
body.put("message", LicenseMessageRenderer.forCap(state, info, e.limitKey(), e.current(), e.cap(), reason));
|
||||||
|
return ResponseEntity.status(HttpStatus.FORBIDDEN).body(body);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,83 @@
|
|||||||
|
package com.cameleer.server.app.license;
|
||||||
|
|
||||||
|
import com.cameleer.license.LicenseInfo;
|
||||||
|
import com.cameleer.license.LicenseState;
|
||||||
|
|
||||||
|
import java.time.Duration;
|
||||||
|
import java.time.Instant;
|
||||||
|
|
||||||
|
public final class LicenseMessageRenderer {
|
||||||
|
|
||||||
|
private LicenseMessageRenderer() {}
|
||||||
|
|
||||||
|
public static String forCap(LicenseState state, LicenseInfo info, String limit, long current, long cap) {
|
||||||
|
return forCap(state, info, limit, current, cap, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static String forCap(LicenseState state, LicenseInfo info, String limit, long current, long cap, String invalidReason) {
|
||||||
|
switch (state) {
|
||||||
|
case ABSENT:
|
||||||
|
return "No license installed: default tier applies (cap = " + cap + " for " + limit
|
||||||
|
+ "). Install a license to raise this.";
|
||||||
|
case ACTIVE:
|
||||||
|
return "License cap reached: " + limit + " = " + cap + ". Current usage is " + current
|
||||||
|
+ ". Contact your vendor to raise the cap.";
|
||||||
|
case GRACE: {
|
||||||
|
long expiredDaysAgo = info == null ? 0 : daysSince(info.expiresAt());
|
||||||
|
long graceRemaining = info == null ? 0
|
||||||
|
: Math.max(0, info.gracePeriodDays() - expiredDaysAgo);
|
||||||
|
return "License expired " + expiredDaysAgo + " day(s) ago and is in its grace period "
|
||||||
|
+ "(ends in " + graceRemaining + " days). Cap unchanged at " + cap
|
||||||
|
+ ". Renew before grace ends.";
|
||||||
|
}
|
||||||
|
case EXPIRED: {
|
||||||
|
long expiredDaysAgo = info == null ? 0 : daysSince(info.expiresAt());
|
||||||
|
return "License expired " + expiredDaysAgo + " days ago: system reverted to default tier (cap = "
|
||||||
|
+ cap + " for " + limit + "). Current usage is " + current
|
||||||
|
+ ". Renew the license to lift the cap.";
|
||||||
|
}
|
||||||
|
case INVALID:
|
||||||
|
return "License rejected (" + (invalidReason == null ? "unknown reason" : invalidReason)
|
||||||
|
+ "): default tier applies (cap = " + cap + " for " + limit + "). Fix the license to raise this.";
|
||||||
|
default:
|
||||||
|
return "License cap reached: " + limit + " = " + cap;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* State-only message used by the /usage endpoint and metrics surfaces where no specific
|
||||||
|
* cap is being checked. Mirrors forCap() phrasing but omits limit/current/cap details.
|
||||||
|
*/
|
||||||
|
public static String forState(LicenseState state, LicenseInfo info) {
|
||||||
|
return forState(state, info, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static String forState(LicenseState state, LicenseInfo info, String invalidReason) {
|
||||||
|
switch (state) {
|
||||||
|
case ABSENT:
|
||||||
|
return "No license installed: default tier applies. Install a license to raise the caps.";
|
||||||
|
case ACTIVE:
|
||||||
|
return "License is active.";
|
||||||
|
case GRACE: {
|
||||||
|
long expiredDaysAgo = info == null ? 0 : daysSince(info.expiresAt());
|
||||||
|
long graceRemaining = info == null ? 0
|
||||||
|
: Math.max(0, info.gracePeriodDays() - expiredDaysAgo);
|
||||||
|
return "License expired " + expiredDaysAgo + " day(s) ago and is in its grace period "
|
||||||
|
+ "(ends in " + graceRemaining + " days). Renew before grace ends.";
|
||||||
|
}
|
||||||
|
case EXPIRED: {
|
||||||
|
long expiredDaysAgo = info == null ? 0 : daysSince(info.expiresAt());
|
||||||
|
return "License expired " + expiredDaysAgo + " days ago: system reverted to default tier. Renew the license to lift the caps.";
|
||||||
|
}
|
||||||
|
case INVALID:
|
||||||
|
return "License rejected (" + (invalidReason == null ? "unknown reason" : invalidReason)
|
||||||
|
+ "): default tier applies. Fix the license to raise the caps.";
|
||||||
|
default:
|
||||||
|
return "License state: " + state.name();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static long daysSince(Instant t) {
|
||||||
|
return Math.max(0, Duration.between(t, Instant.now()).toDays());
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,77 @@
|
|||||||
|
package com.cameleer.server.app.license;
|
||||||
|
|
||||||
|
import com.cameleer.server.core.license.LicenseGate;
|
||||||
|
import com.cameleer.license.LicenseState;
|
||||||
|
import io.micrometer.core.instrument.Gauge;
|
||||||
|
import io.micrometer.core.instrument.MeterRegistry;
|
||||||
|
import org.springframework.beans.factory.annotation.Value;
|
||||||
|
import org.springframework.context.event.EventListener;
|
||||||
|
import org.springframework.scheduling.annotation.Scheduled;
|
||||||
|
import org.springframework.stereotype.Component;
|
||||||
|
|
||||||
|
import java.time.Duration;
|
||||||
|
import java.time.Instant;
|
||||||
|
import java.util.EnumMap;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.atomic.AtomicReference;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Prometheus gauges that track the live license posture.
|
||||||
|
*
|
||||||
|
* <ul>
|
||||||
|
* <li>{@code cameleer_license_state{state=...}} — one-hot per {@link LicenseState}, exactly
|
||||||
|
* one tag value carries 1.0 at any time.</li>
|
||||||
|
* <li>{@code cameleer_license_days_remaining} — days until {@code expiresAt}; negative
|
||||||
|
* (-1.0) when ABSENT/INVALID (no license loaded).</li>
|
||||||
|
* <li>{@code cameleer_license_last_validated_age_seconds} — seconds since the persisted
|
||||||
|
* {@code last_validated_at}; 0 when there is no DB row.</li>
|
||||||
|
* </ul>
|
||||||
|
*
|
||||||
|
* <p>Refreshed eagerly on {@link LicenseChangedEvent} and lazily every 60 seconds so values
|
||||||
|
* stay current even without explicit state changes (e.g. days_remaining ticks down across
|
||||||
|
* the day, validated_age grows monotonically).</p>
|
||||||
|
*/
|
||||||
|
@Component
|
||||||
|
public class LicenseMetrics {
|
||||||
|
|
||||||
|
private final LicenseGate gate;
|
||||||
|
private final LicenseRepository repo;
|
||||||
|
private final String tenantId;
|
||||||
|
|
||||||
|
private final Map<LicenseState, AtomicReference<Double>> stateGauges = new EnumMap<>(LicenseState.class);
|
||||||
|
private final AtomicReference<Double> daysRemaining = new AtomicReference<>(0.0);
|
||||||
|
private final AtomicReference<Double> validatedAge = new AtomicReference<>(0.0);
|
||||||
|
|
||||||
|
public LicenseMetrics(LicenseGate gate, LicenseRepository repo, MeterRegistry meters,
|
||||||
|
@Value("${cameleer.server.tenant.id:default}") String tenantId) {
|
||||||
|
this.gate = gate;
|
||||||
|
this.repo = repo;
|
||||||
|
this.tenantId = tenantId;
|
||||||
|
for (var s : LicenseState.values()) {
|
||||||
|
var ref = new AtomicReference<>(0.0);
|
||||||
|
stateGauges.put(s, ref);
|
||||||
|
Gauge.builder("cameleer_license_state", ref, AtomicReference::get)
|
||||||
|
.tag("state", s.name())
|
||||||
|
.register(meters);
|
||||||
|
}
|
||||||
|
Gauge.builder("cameleer_license_days_remaining", daysRemaining, AtomicReference::get)
|
||||||
|
.register(meters);
|
||||||
|
Gauge.builder("cameleer_license_last_validated_age_seconds", validatedAge, AtomicReference::get)
|
||||||
|
.register(meters);
|
||||||
|
}
|
||||||
|
|
||||||
|
@EventListener(LicenseChangedEvent.class)
|
||||||
|
@Scheduled(fixedDelay = 60_000)
|
||||||
|
public void refresh() {
|
||||||
|
var state = gate.getState();
|
||||||
|
for (var s : LicenseState.values()) {
|
||||||
|
stateGauges.get(s).set(s == state ? 1.0 : 0.0);
|
||||||
|
}
|
||||||
|
var info = gate.getCurrent();
|
||||||
|
daysRemaining.set(info == null
|
||||||
|
? -1.0
|
||||||
|
: (double) Duration.between(Instant.now(), info.expiresAt()).toDays());
|
||||||
|
repo.findByTenantId(tenantId).ifPresent(rec ->
|
||||||
|
validatedAge.set((double) Duration.between(rec.lastValidatedAt(), Instant.now()).toSeconds()));
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,14 @@
|
|||||||
|
package com.cameleer.server.app.license;
|
||||||
|
|
||||||
|
import java.time.Instant;
|
||||||
|
import java.util.UUID;
|
||||||
|
|
||||||
|
public record LicenseRecord(
|
||||||
|
String tenantId,
|
||||||
|
String token,
|
||||||
|
UUID licenseId,
|
||||||
|
Instant installedAt,
|
||||||
|
String installedBy,
|
||||||
|
Instant expiresAt,
|
||||||
|
Instant lastValidatedAt
|
||||||
|
) {}
|
||||||
@@ -0,0 +1,17 @@
|
|||||||
|
package com.cameleer.server.app.license;
|
||||||
|
|
||||||
|
import java.time.Instant;
|
||||||
|
import java.util.Optional;
|
||||||
|
|
||||||
|
public interface LicenseRepository {
|
||||||
|
Optional<LicenseRecord> findByTenantId(String tenantId);
|
||||||
|
|
||||||
|
/** Insert or replace the row for tenantId. */
|
||||||
|
void upsert(LicenseRecord record);
|
||||||
|
|
||||||
|
/** Update last_validated_at to `now` and return rows affected (0 = no row). */
|
||||||
|
int touchValidated(String tenantId, Instant now);
|
||||||
|
|
||||||
|
/** Delete the row (used when the operator clears a license; not a public API in v1). */
|
||||||
|
int delete(String tenantId);
|
||||||
|
}
|
||||||
@@ -0,0 +1,58 @@
|
|||||||
|
package com.cameleer.server.app.license;
|
||||||
|
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
import org.springframework.boot.context.event.ApplicationReadyEvent;
|
||||||
|
import org.springframework.context.event.EventListener;
|
||||||
|
import org.springframework.scheduling.annotation.Async;
|
||||||
|
import org.springframework.scheduling.annotation.Scheduled;
|
||||||
|
import org.springframework.stereotype.Component;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Daily revalidation cron + on-startup revalidation 60s after {@link ApplicationReadyEvent}.
|
||||||
|
*
|
||||||
|
* <p>The startup tick catches ABSENT->ACTIVE transitions when the license was written to
|
||||||
|
* PostgreSQL between server starts (e.g. SaaS provisioning), and gives slow downstream
|
||||||
|
* components time to come up before the first license event fires. The daily cron ensures
|
||||||
|
* expirations and clock drift are caught even in long-running deployments.</p>
|
||||||
|
*
|
||||||
|
* <p>Both invocations call {@link LicenseService#revalidate()} which is internally idempotent
|
||||||
|
* and exception-safe; this class additionally swallows any escape so a misbehaving validator
|
||||||
|
* cannot crash the scheduler thread.</p>
|
||||||
|
*/
|
||||||
|
@Component
|
||||||
|
public class LicenseRevalidationJob {
|
||||||
|
|
||||||
|
private static final Logger log = LoggerFactory.getLogger(LicenseRevalidationJob.class);
|
||||||
|
|
||||||
|
private final LicenseService svc;
|
||||||
|
|
||||||
|
public LicenseRevalidationJob(LicenseService svc) {
|
||||||
|
this.svc = svc;
|
||||||
|
}
|
||||||
|
|
||||||
|
@EventListener(ApplicationReadyEvent.class)
|
||||||
|
@Async
|
||||||
|
public void onStartup() {
|
||||||
|
try {
|
||||||
|
Thread.sleep(60_000);
|
||||||
|
} catch (InterruptedException e) {
|
||||||
|
Thread.currentThread().interrupt();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
revalidate();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Scheduled(cron = "0 0 3 * * *")
|
||||||
|
public void daily() {
|
||||||
|
revalidate();
|
||||||
|
}
|
||||||
|
|
||||||
|
private void revalidate() {
|
||||||
|
try {
|
||||||
|
svc.revalidate();
|
||||||
|
} catch (Exception e) {
|
||||||
|
log.error("Revalidation crashed: {}", e.getMessage());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,133 @@
|
|||||||
|
package com.cameleer.server.app.license;
|
||||||
|
|
||||||
|
import com.cameleer.server.core.admin.AuditCategory;
|
||||||
|
import com.cameleer.server.core.admin.AuditResult;
|
||||||
|
import com.cameleer.server.core.admin.AuditService;
|
||||||
|
import com.cameleer.license.LicenseInfo;
|
||||||
|
import com.cameleer.license.LicenseValidator;
|
||||||
|
import com.cameleer.server.core.license.LicenseGate;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
import org.springframework.context.ApplicationEventPublisher;
|
||||||
|
|
||||||
|
import java.time.Instant;
|
||||||
|
import java.util.LinkedHashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Optional;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Single mediation point for license token install / replace / revalidate.
|
||||||
|
*
|
||||||
|
* <p>Audits under {@link AuditCategory#LICENSE}, persists to PostgreSQL via
|
||||||
|
* {@link LicenseRepository}, mutates the in-memory {@link LicenseGate}, and publishes a
|
||||||
|
* {@link LicenseChangedEvent} so downstream listeners (retention policy, license metrics,
|
||||||
|
* etc.) react uniformly to every state change.</p>
|
||||||
|
*/
|
||||||
|
public class LicenseService {
|
||||||
|
|
||||||
|
private static final Logger log = LoggerFactory.getLogger(LicenseService.class);
|
||||||
|
|
||||||
|
private final String tenantId;
|
||||||
|
private final LicenseRepository repo;
|
||||||
|
private final LicenseGate gate;
|
||||||
|
private final LicenseValidator validator;
|
||||||
|
private final AuditService audit;
|
||||||
|
private final ApplicationEventPublisher events;
|
||||||
|
|
||||||
|
public LicenseService(String tenantId, LicenseRepository repo, LicenseGate gate,
|
||||||
|
LicenseValidator validator, AuditService audit,
|
||||||
|
ApplicationEventPublisher events) {
|
||||||
|
this.tenantId = tenantId;
|
||||||
|
this.repo = repo;
|
||||||
|
this.gate = gate;
|
||||||
|
this.validator = validator;
|
||||||
|
this.audit = audit;
|
||||||
|
this.events = events;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Install a token from any source (env, file, api, db). */
|
||||||
|
public LicenseInfo install(String token, String installedBy, String source) {
|
||||||
|
LicenseInfo info;
|
||||||
|
try {
|
||||||
|
info = validator.validate(token);
|
||||||
|
} catch (Exception e) {
|
||||||
|
String reason = e.getMessage();
|
||||||
|
gate.markInvalid(reason);
|
||||||
|
Map<String, Object> detail = new LinkedHashMap<>();
|
||||||
|
detail.put("reason", reason);
|
||||||
|
detail.put("source", source);
|
||||||
|
audit.log(installedBy, "reject_license", AuditCategory.LICENSE,
|
||||||
|
tenantId, detail, AuditResult.FAILURE, null);
|
||||||
|
events.publishEvent(new LicenseChangedEvent(gate.getState(), gate.getCurrent()));
|
||||||
|
throw e instanceof RuntimeException re ? re : new IllegalArgumentException(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
Optional<LicenseRecord> existing = repo.findByTenantId(tenantId);
|
||||||
|
Instant now = Instant.now();
|
||||||
|
repo.upsert(new LicenseRecord(
|
||||||
|
tenantId, token, info.licenseId(),
|
||||||
|
now, installedBy, info.expiresAt(), now));
|
||||||
|
gate.load(info);
|
||||||
|
|
||||||
|
Map<String, Object> detail = new LinkedHashMap<>();
|
||||||
|
detail.put("licenseId", info.licenseId().toString());
|
||||||
|
detail.put("expiresAt", info.expiresAt().toString());
|
||||||
|
detail.put("installedBy", installedBy);
|
||||||
|
detail.put("source", source);
|
||||||
|
if (existing.isPresent()) {
|
||||||
|
detail.put("previousLicenseId", existing.get().licenseId().toString());
|
||||||
|
audit.log(installedBy, "replace_license", AuditCategory.LICENSE,
|
||||||
|
info.licenseId().toString(), detail, AuditResult.SUCCESS, null);
|
||||||
|
} else {
|
||||||
|
audit.log(installedBy, "install_license", AuditCategory.LICENSE,
|
||||||
|
info.licenseId().toString(), detail, AuditResult.SUCCESS, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
events.publishEvent(new LicenseChangedEvent(gate.getState(), info));
|
||||||
|
return info;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Boot-time load: prefer env/file overrides; falls back to DB; ABSENT if none. */
|
||||||
|
public void loadInitial(Optional<String> envToken, Optional<String> fileToken) {
|
||||||
|
if (envToken.isPresent()) {
|
||||||
|
try { install(envToken.get(), "system", "env"); return; }
|
||||||
|
catch (Exception e) { log.error("env-var license rejected: {}", e.getMessage()); }
|
||||||
|
}
|
||||||
|
if (fileToken.isPresent()) {
|
||||||
|
try { install(fileToken.get(), "system", "file"); return; }
|
||||||
|
catch (Exception e) { log.error("file license rejected: {}", e.getMessage()); }
|
||||||
|
}
|
||||||
|
Optional<LicenseRecord> persisted = repo.findByTenantId(tenantId);
|
||||||
|
if (persisted.isPresent()) {
|
||||||
|
try { install(persisted.get().token(), persisted.get().installedBy(), "db"); }
|
||||||
|
catch (Exception e) { log.error("DB license rejected: {}", e.getMessage()); }
|
||||||
|
} else {
|
||||||
|
log.info("No license configured - running in default tier");
|
||||||
|
events.publishEvent(new LicenseChangedEvent(gate.getState(), null));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Re-run validation against the persisted token (daily job). */
|
||||||
|
public void revalidate() {
|
||||||
|
Optional<LicenseRecord> persisted = repo.findByTenantId(tenantId);
|
||||||
|
if (persisted.isEmpty()) return;
|
||||||
|
try {
|
||||||
|
LicenseInfo info = validator.validate(persisted.get().token());
|
||||||
|
repo.touchValidated(tenantId, Instant.now());
|
||||||
|
gate.load(info);
|
||||||
|
events.publishEvent(new LicenseChangedEvent(gate.getState(), info));
|
||||||
|
} catch (Exception e) {
|
||||||
|
String reason = e.getMessage();
|
||||||
|
gate.markInvalid(reason);
|
||||||
|
Map<String, Object> detail = new LinkedHashMap<>();
|
||||||
|
detail.put("licenseId", persisted.get().licenseId().toString());
|
||||||
|
detail.put("reason", reason);
|
||||||
|
audit.log("system", "revalidate_license", AuditCategory.LICENSE,
|
||||||
|
persisted.get().licenseId().toString(), detail, AuditResult.FAILURE, null);
|
||||||
|
events.publishEvent(new LicenseChangedEvent(gate.getState(), null));
|
||||||
|
log.error("Revalidation failed: {}", reason);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getTenantId() { return tenantId; }
|
||||||
|
}
|
||||||
@@ -0,0 +1,88 @@
|
|||||||
|
package com.cameleer.server.app.license;
|
||||||
|
|
||||||
|
import org.springframework.jdbc.core.JdbcTemplate;
|
||||||
|
import org.springframework.stereotype.Component;
|
||||||
|
|
||||||
|
import java.util.LinkedHashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Read-side usage snapshot used by the /api/v1/admin/license/usage endpoint and license metrics.
|
||||||
|
*
|
||||||
|
* <p>Counts come straight from PostgreSQL row counts; compute aggregates SUM over
|
||||||
|
* non-stopped deployments and read replica/cpu/memory from the
|
||||||
|
* {@code deployed_config_snapshot.containerConfig} JSONB sub-object. Pre-RUNNING deployments
|
||||||
|
* (STARTING with no snapshot yet) contribute defaults (1 replica, 0 cpu, 0 memory) until they
|
||||||
|
* roll forward.</p>
|
||||||
|
*
|
||||||
|
* <p>{@code max_agents} is not in PG — the registry is in-memory; callers feed the live count
|
||||||
|
* into {@link #agentCount(int)} which echoes it for assembly into the snapshot map.</p>
|
||||||
|
*/
|
||||||
|
@Component
|
||||||
|
public class LicenseUsageReader {
|
||||||
|
|
||||||
|
private final JdbcTemplate jdbc;
|
||||||
|
|
||||||
|
public LicenseUsageReader(JdbcTemplate jdbc) {
|
||||||
|
this.jdbc = jdbc;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Map<String, Long> snapshot() {
|
||||||
|
Map<String, Long> out = new LinkedHashMap<>();
|
||||||
|
out.put("max_environments", count("environments"));
|
||||||
|
out.put("max_apps", count("apps"));
|
||||||
|
out.put("max_users", count("users"));
|
||||||
|
out.put("max_outbound_connections", count("outbound_connections"));
|
||||||
|
out.put("max_alert_rules", count("alert_rules"));
|
||||||
|
Map<String, Long> compute = jdbc.queryForObject(
|
||||||
|
"SELECT " +
|
||||||
|
" COALESCE(SUM(replicas * cpu_millis), 0) AS cpu, " +
|
||||||
|
" COALESCE(SUM(replicas * memory_mb), 0) AS mem, " +
|
||||||
|
" COALESCE(SUM(replicas), 0) AS reps " +
|
||||||
|
"FROM ( " +
|
||||||
|
" SELECT " +
|
||||||
|
" COALESCE((d.deployed_config_snapshot->'containerConfig'->>'replicas')::int, 1) AS replicas, " +
|
||||||
|
" COALESCE((d.deployed_config_snapshot->'containerConfig'->>'cpuLimit')::int, 0) AS cpu_millis, " +
|
||||||
|
" COALESCE((d.deployed_config_snapshot->'containerConfig'->>'memoryLimitMb')::int, 0) AS memory_mb " +
|
||||||
|
" FROM deployments d " +
|
||||||
|
" WHERE d.status IN ('STARTING','RUNNING','DEGRADED','STOPPING') " +
|
||||||
|
") s",
|
||||||
|
(rs, n) -> Map.of(
|
||||||
|
"max_total_cpu_millis", rs.getLong("cpu"),
|
||||||
|
"max_total_memory_mb", rs.getLong("mem"),
|
||||||
|
"max_total_replicas", rs.getLong("reps")
|
||||||
|
));
|
||||||
|
out.putAll(compute);
|
||||||
|
return out;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Compute-cap usage tuple consumed by {@code DeploymentExecutor} pre-flight enforcement.
|
||||||
|
* Sums over all non-stopped deployments.
|
||||||
|
*/
|
||||||
|
public record ComputeUsage(long cpuMillis, long memoryMb, long replicas) {}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convenience accessor over {@link #snapshot()} that returns just the three compute
|
||||||
|
* aggregates as a typed tuple. Used by {@code DeploymentExecutor.executeAsync} to feed
|
||||||
|
* {@code LicenseEnforcer.assertWithinCap} for the {@code max_total_cpu_millis} /
|
||||||
|
* {@code max_total_memory_mb} / {@code max_total_replicas} caps. Each call re-reads PG
|
||||||
|
* — there is no caching, so cap checks always see the latest committed state.
|
||||||
|
*/
|
||||||
|
public ComputeUsage computeUsage() {
|
||||||
|
Map<String, Long> snap = snapshot();
|
||||||
|
return new ComputeUsage(
|
||||||
|
snap.getOrDefault("max_total_cpu_millis", 0L),
|
||||||
|
snap.getOrDefault("max_total_memory_mb", 0L),
|
||||||
|
snap.getOrDefault("max_total_replicas", 0L));
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Echoes the live agent count fed in by the controller (registry is in-memory). */
|
||||||
|
public long agentCount(int liveAgents) {
|
||||||
|
return liveAgents;
|
||||||
|
}
|
||||||
|
|
||||||
|
private long count(String table) {
|
||||||
|
return jdbc.queryForObject("SELECT COUNT(*) FROM " + table, Long.class);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,66 @@
|
|||||||
|
package com.cameleer.server.app.license;
|
||||||
|
|
||||||
|
import org.springframework.jdbc.core.JdbcTemplate;
|
||||||
|
import org.springframework.jdbc.core.RowMapper;
|
||||||
|
|
||||||
|
import java.sql.Timestamp;
|
||||||
|
import java.time.Instant;
|
||||||
|
import java.util.Optional;
|
||||||
|
import java.util.UUID;
|
||||||
|
|
||||||
|
public class PostgresLicenseRepository implements LicenseRepository {
|
||||||
|
|
||||||
|
private final JdbcTemplate jdbc;
|
||||||
|
|
||||||
|
public PostgresLicenseRepository(JdbcTemplate jdbc) {
|
||||||
|
this.jdbc = jdbc;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static final RowMapper<LicenseRecord> MAPPER = (rs, n) -> new LicenseRecord(
|
||||||
|
rs.getString("tenant_id"),
|
||||||
|
rs.getString("token"),
|
||||||
|
(UUID) rs.getObject("license_id"),
|
||||||
|
rs.getTimestamp("installed_at").toInstant(),
|
||||||
|
rs.getString("installed_by"),
|
||||||
|
rs.getTimestamp("expires_at").toInstant(),
|
||||||
|
rs.getTimestamp("last_validated_at").toInstant()
|
||||||
|
);
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Optional<LicenseRecord> findByTenantId(String tenantId) {
|
||||||
|
return jdbc.query(
|
||||||
|
"SELECT tenant_id, token, license_id, installed_at, installed_by, expires_at, last_validated_at " +
|
||||||
|
"FROM license WHERE tenant_id = ?",
|
||||||
|
MAPPER, tenantId).stream().findFirst();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void upsert(LicenseRecord r) {
|
||||||
|
jdbc.update(
|
||||||
|
"INSERT INTO license (tenant_id, token, license_id, installed_at, installed_by, expires_at, last_validated_at) " +
|
||||||
|
"VALUES (?, ?, ?, ?, ?, ?, ?) " +
|
||||||
|
"ON CONFLICT (tenant_id) DO UPDATE SET " +
|
||||||
|
" token = EXCLUDED.token, " +
|
||||||
|
" license_id = EXCLUDED.license_id, " +
|
||||||
|
" installed_at = EXCLUDED.installed_at, " +
|
||||||
|
" installed_by = EXCLUDED.installed_by, " +
|
||||||
|
" expires_at = EXCLUDED.expires_at, " +
|
||||||
|
" last_validated_at = EXCLUDED.last_validated_at",
|
||||||
|
r.tenantId(), r.token(), r.licenseId(),
|
||||||
|
Timestamp.from(r.installedAt()), r.installedBy(),
|
||||||
|
Timestamp.from(r.expiresAt()), Timestamp.from(r.lastValidatedAt())
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int touchValidated(String tenantId, Instant now) {
|
||||||
|
return jdbc.update(
|
||||||
|
"UPDATE license SET last_validated_at = ? WHERE tenant_id = ?",
|
||||||
|
Timestamp.from(now), tenantId);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int delete(String tenantId) {
|
||||||
|
return jdbc.update("DELETE FROM license WHERE tenant_id = ?", tenantId);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,119 @@
|
|||||||
|
package com.cameleer.server.app.license;
|
||||||
|
|
||||||
|
import com.cameleer.server.core.license.LicenseGate;
|
||||||
|
import com.cameleer.license.LicenseLimits;
|
||||||
|
import com.cameleer.server.core.runtime.Environment;
|
||||||
|
import com.cameleer.server.core.runtime.EnvironmentRepository;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
import org.springframework.beans.factory.annotation.Qualifier;
|
||||||
|
import org.springframework.context.event.EventListener;
|
||||||
|
import org.springframework.jdbc.core.JdbcTemplate;
|
||||||
|
import org.springframework.scheduling.annotation.Async;
|
||||||
|
import org.springframework.stereotype.Component;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Recomputes ClickHouse per-environment TTL on every {@link LicenseChangedEvent}.
|
||||||
|
*
|
||||||
|
* <p>Spec §4.3 — when a license is installed, replaced, or expires, the effective
|
||||||
|
* retention cap may change. For each (table, env) pair this listener emits one
|
||||||
|
* {@code ALTER TABLE … MODIFY TTL <expr> WHERE environment = '<slug>'} statement
|
||||||
|
* with {@code effective = min(licenseCap, env.configuredRetentionDays)}.</p>
|
||||||
|
*
|
||||||
|
* <p>ClickHouse 22.3+ supports per-row TTL via the {@code WHERE} predicate; the
|
||||||
|
* project's CH version (24.12) is well above that floor. ClickHouse failures are
|
||||||
|
* logged and swallowed — TTL recompute is best-effort and must not propagate
|
||||||
|
* to the originating license install/revalidate path.</p>
|
||||||
|
*
|
||||||
|
* <p>NOTE: {@code route_diagrams} has no TTL clause in {@code init.sql} — it's a
|
||||||
|
* {@code ReplacingMergeTree} keyed on content_hash, not a time-series table —
|
||||||
|
* so it is intentionally excluded here. {@code server_metrics} has no
|
||||||
|
* {@code environment} column (server-wide) so it is also excluded; its 90-day
|
||||||
|
* cap is fixed in the schema.</p>
|
||||||
|
*/
|
||||||
|
@Component
|
||||||
|
public class RetentionPolicyApplier {
|
||||||
|
|
||||||
|
private static final Logger log = LoggerFactory.getLogger(RetentionPolicyApplier.class);
|
||||||
|
|
||||||
|
/** (table, time column, license cap key, env-configured-days extractor). */
|
||||||
|
private record TableSpec(String table, String timeCol, String capKey, Extractor extractor) {}
|
||||||
|
|
||||||
|
@FunctionalInterface
|
||||||
|
private interface Extractor {
|
||||||
|
int days(Environment env);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tables with a TTL clause AND an {@code environment} column in {@code init.sql}.
|
||||||
|
* Verified against the schema at task time — keep in sync if new retention-bound
|
||||||
|
* tables are added.
|
||||||
|
*/
|
||||||
|
static final List<TableSpec> SPECS = List.of(
|
||||||
|
new TableSpec("executions", "start_time", "max_execution_retention_days", Environment::executionRetentionDays),
|
||||||
|
new TableSpec("processor_executions", "start_time", "max_execution_retention_days", Environment::executionRetentionDays),
|
||||||
|
new TableSpec("logs", "timestamp", "max_log_retention_days", Environment::logRetentionDays),
|
||||||
|
new TableSpec("agent_metrics", "collected_at", "max_metric_retention_days", Environment::metricRetentionDays),
|
||||||
|
new TableSpec("agent_events", "timestamp", "max_metric_retention_days", Environment::metricRetentionDays)
|
||||||
|
);
|
||||||
|
|
||||||
|
private final LicenseGate gate;
|
||||||
|
private final EnvironmentRepository envRepo;
|
||||||
|
private final JdbcTemplate clickhouseJdbc;
|
||||||
|
|
||||||
|
public RetentionPolicyApplier(LicenseGate gate,
|
||||||
|
EnvironmentRepository envRepo,
|
||||||
|
@Qualifier("clickHouseJdbcTemplate") JdbcTemplate clickhouseJdbc) {
|
||||||
|
this.gate = gate;
|
||||||
|
this.envRepo = envRepo;
|
||||||
|
this.clickhouseJdbc = clickhouseJdbc;
|
||||||
|
}
|
||||||
|
|
||||||
|
@EventListener(LicenseChangedEvent.class)
|
||||||
|
@Async
|
||||||
|
public void onLicenseChanged(LicenseChangedEvent event) {
|
||||||
|
LicenseLimits limits;
|
||||||
|
try {
|
||||||
|
limits = gate.getEffectiveLimits();
|
||||||
|
} catch (Exception e) {
|
||||||
|
log.warn("Skipping TTL recompute — could not read effective limits: {}", e.getMessage());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
List<Environment> envs;
|
||||||
|
try {
|
||||||
|
envs = envRepo.findAll();
|
||||||
|
} catch (Exception e) {
|
||||||
|
log.warn("Skipping TTL recompute — could not load environments: {}", e.getMessage());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
log.info("License changed (state={}) — recomputing TTL across {} environment(s) and {} table(s)",
|
||||||
|
event.state(), envs.size(), SPECS.size());
|
||||||
|
|
||||||
|
for (Environment env : envs) {
|
||||||
|
for (TableSpec spec : SPECS) {
|
||||||
|
int cap = limits.get(spec.capKey);
|
||||||
|
int configured = spec.extractor.days(env);
|
||||||
|
int effective = Math.min(cap, configured);
|
||||||
|
// Slugs are regex-validated `^[a-z0-9][a-z0-9-]{0,63}$`, so the replacement
|
||||||
|
// is defense-in-depth — single quotes can never be present.
|
||||||
|
String envLiteral = env.slug().replace("'", "''");
|
||||||
|
String sql = "ALTER TABLE " + spec.table
|
||||||
|
+ " MODIFY TTL toDateTime(" + spec.timeCol
|
||||||
|
+ ") + INTERVAL " + effective + " DAY DELETE"
|
||||||
|
+ " WHERE environment = '" + envLiteral + "'";
|
||||||
|
try {
|
||||||
|
clickhouseJdbc.execute(sql);
|
||||||
|
log.info("Applied TTL: table={} env={} days={} (cap={}, configured={})",
|
||||||
|
spec.table, env.slug(), effective, cap, configured);
|
||||||
|
} catch (Exception e) {
|
||||||
|
log.warn("Failed to apply TTL for table={} env={}: {}",
|
||||||
|
spec.table, env.slug(), e.getMessage());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,63 @@
|
|||||||
|
package com.cameleer.server.app.metrics;
|
||||||
|
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
import org.springframework.beans.factory.annotation.Value;
|
||||||
|
import org.springframework.context.annotation.Bean;
|
||||||
|
import org.springframework.context.annotation.Configuration;
|
||||||
|
|
||||||
|
import java.net.InetAddress;
|
||||||
|
import java.net.UnknownHostException;
|
||||||
|
import java.util.UUID;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Resolves a stable identifier for this server process, used as the
|
||||||
|
* {@code server_instance_id} on every server_metrics sample. The value is
|
||||||
|
* fixed at boot, so counters restart cleanly whenever the id rotates.
|
||||||
|
*
|
||||||
|
* <p>Precedence:
|
||||||
|
* <ol>
|
||||||
|
* <li>{@code cameleer.server.instance-id} property / {@code CAMELEER_SERVER_INSTANCE_ID} env
|
||||||
|
* <li>{@code HOSTNAME} env (populated by Docker/Kubernetes)
|
||||||
|
* <li>{@link InetAddress#getLocalHost()} hostname
|
||||||
|
* <li>Random UUID (fallback — only hit when DNS and env are both silent)
|
||||||
|
* </ol>
|
||||||
|
*/
|
||||||
|
@Configuration
|
||||||
|
public class ServerInstanceIdConfig {
|
||||||
|
|
||||||
|
private static final Logger log = LoggerFactory.getLogger(ServerInstanceIdConfig.class);
|
||||||
|
|
||||||
|
@Bean("serverInstanceId")
|
||||||
|
public String serverInstanceId(
|
||||||
|
@Value("${cameleer.server.instance-id:}") String configuredId) {
|
||||||
|
if (!isBlank(configuredId)) {
|
||||||
|
log.info("Server instance id resolved from configuration: {}", configuredId);
|
||||||
|
return configuredId;
|
||||||
|
}
|
||||||
|
|
||||||
|
String hostnameEnv = System.getenv("HOSTNAME");
|
||||||
|
if (!isBlank(hostnameEnv)) {
|
||||||
|
log.info("Server instance id resolved from HOSTNAME env: {}", hostnameEnv);
|
||||||
|
return hostnameEnv;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
String localHost = InetAddress.getLocalHost().getHostName();
|
||||||
|
if (!isBlank(localHost)) {
|
||||||
|
log.info("Server instance id resolved from localhost lookup: {}", localHost);
|
||||||
|
return localHost;
|
||||||
|
}
|
||||||
|
} catch (UnknownHostException e) {
|
||||||
|
log.debug("InetAddress.getLocalHost() failed, falling back to UUID: {}", e.getMessage());
|
||||||
|
}
|
||||||
|
|
||||||
|
String fallback = UUID.randomUUID().toString();
|
||||||
|
log.warn("Server instance id could not be resolved; using random UUID {}", fallback);
|
||||||
|
return fallback;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static boolean isBlank(String s) {
|
||||||
|
return s == null || s.isBlank();
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,106 @@
|
|||||||
|
package com.cameleer.server.app.metrics;
|
||||||
|
|
||||||
|
import com.cameleer.server.core.storage.ServerMetricsStore;
|
||||||
|
import com.cameleer.server.core.storage.model.ServerMetricSample;
|
||||||
|
import io.micrometer.core.instrument.Measurement;
|
||||||
|
import io.micrometer.core.instrument.Meter;
|
||||||
|
import io.micrometer.core.instrument.MeterRegistry;
|
||||||
|
import io.micrometer.core.instrument.Tag;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
import org.springframework.beans.factory.annotation.Qualifier;
|
||||||
|
import org.springframework.beans.factory.annotation.Value;
|
||||||
|
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||||
|
import org.springframework.scheduling.annotation.Scheduled;
|
||||||
|
import org.springframework.stereotype.Component;
|
||||||
|
|
||||||
|
import java.time.Instant;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.LinkedHashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Periodically snapshots every meter in the server's {@link MeterRegistry}
|
||||||
|
* and writes the result to ClickHouse via {@link ServerMetricsStore}. This
|
||||||
|
* gives us historical server-health data (buffer depths, agent transitions,
|
||||||
|
* flush latency, JVM memory, HTTP response counts, etc.) without requiring
|
||||||
|
* an external Prometheus.
|
||||||
|
*
|
||||||
|
* <p>Each Micrometer {@link Meter#measure() measurement} becomes one row, so
|
||||||
|
* a single Timer produces rows for {@code count}, {@code total_time}, and
|
||||||
|
* {@code max} each tick. Counter values are cumulative since meter
|
||||||
|
* registration (Prometheus convention) — callers compute rate() themselves.
|
||||||
|
*
|
||||||
|
* <p>Disabled via {@code cameleer.server.self-metrics.enabled=false}.
|
||||||
|
*/
|
||||||
|
@Component
|
||||||
|
@ConditionalOnProperty(
|
||||||
|
prefix = "cameleer.server.self-metrics",
|
||||||
|
name = "enabled",
|
||||||
|
havingValue = "true",
|
||||||
|
matchIfMissing = true)
|
||||||
|
public class ServerMetricsSnapshotScheduler {
|
||||||
|
|
||||||
|
private static final Logger log = LoggerFactory.getLogger(ServerMetricsSnapshotScheduler.class);
|
||||||
|
|
||||||
|
private final MeterRegistry registry;
|
||||||
|
private final ServerMetricsStore store;
|
||||||
|
private final String tenantId;
|
||||||
|
private final String serverInstanceId;
|
||||||
|
|
||||||
|
public ServerMetricsSnapshotScheduler(
|
||||||
|
MeterRegistry registry,
|
||||||
|
ServerMetricsStore store,
|
||||||
|
@Value("${cameleer.server.tenant.id:default}") String tenantId,
|
||||||
|
@Qualifier("serverInstanceId") String serverInstanceId) {
|
||||||
|
this.registry = registry;
|
||||||
|
this.store = store;
|
||||||
|
this.tenantId = tenantId;
|
||||||
|
this.serverInstanceId = serverInstanceId;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Scheduled(fixedDelayString = "${cameleer.server.self-metrics.interval-ms:60000}",
|
||||||
|
initialDelayString = "${cameleer.server.self-metrics.interval-ms:60000}")
|
||||||
|
public void snapshot() {
|
||||||
|
try {
|
||||||
|
Instant now = Instant.now();
|
||||||
|
List<ServerMetricSample> batch = new ArrayList<>();
|
||||||
|
|
||||||
|
for (Meter meter : registry.getMeters()) {
|
||||||
|
Meter.Id id = meter.getId();
|
||||||
|
Map<String, String> tags = flattenTags(id.getTagsAsIterable());
|
||||||
|
String type = id.getType().name().toLowerCase();
|
||||||
|
|
||||||
|
for (Measurement m : meter.measure()) {
|
||||||
|
double v = m.getValue();
|
||||||
|
if (!Double.isFinite(v)) continue;
|
||||||
|
batch.add(new ServerMetricSample(
|
||||||
|
tenantId,
|
||||||
|
now,
|
||||||
|
serverInstanceId,
|
||||||
|
id.getName(),
|
||||||
|
type,
|
||||||
|
m.getStatistic().getTagValueRepresentation(),
|
||||||
|
v,
|
||||||
|
tags));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!batch.isEmpty()) {
|
||||||
|
store.insertBatch(batch);
|
||||||
|
log.debug("Persisted {} server self-metric samples", batch.size());
|
||||||
|
}
|
||||||
|
} catch (Exception e) {
|
||||||
|
log.warn("Server self-metrics snapshot failed: {}", e.getMessage());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static Map<String, String> flattenTags(Iterable<Tag> tags) {
|
||||||
|
Map<String, String> out = new LinkedHashMap<>();
|
||||||
|
for (Tag t : tags) {
|
||||||
|
out.put(t.getKey(), t.getValue());
|
||||||
|
}
|
||||||
|
return out;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,5 +1,6 @@
|
|||||||
package com.cameleer.server.app.outbound;
|
package com.cameleer.server.app.outbound;
|
||||||
|
|
||||||
|
import com.cameleer.server.app.license.LicenseEnforcer;
|
||||||
import com.cameleer.server.core.alerting.AlertRuleRepository;
|
import com.cameleer.server.core.alerting.AlertRuleRepository;
|
||||||
import com.cameleer.server.core.outbound.OutboundConnection;
|
import com.cameleer.server.core.outbound.OutboundConnection;
|
||||||
import com.cameleer.server.core.outbound.OutboundConnectionRepository;
|
import com.cameleer.server.core.outbound.OutboundConnectionRepository;
|
||||||
@@ -18,21 +19,25 @@ public class OutboundConnectionServiceImpl implements OutboundConnectionService
|
|||||||
private final OutboundConnectionRepository repo;
|
private final OutboundConnectionRepository repo;
|
||||||
private final AlertRuleRepository ruleRepo;
|
private final AlertRuleRepository ruleRepo;
|
||||||
private final SsrfGuard ssrfGuard;
|
private final SsrfGuard ssrfGuard;
|
||||||
|
private final LicenseEnforcer licenseEnforcer;
|
||||||
private final String tenantId;
|
private final String tenantId;
|
||||||
|
|
||||||
public OutboundConnectionServiceImpl(
|
public OutboundConnectionServiceImpl(
|
||||||
OutboundConnectionRepository repo,
|
OutboundConnectionRepository repo,
|
||||||
AlertRuleRepository ruleRepo,
|
AlertRuleRepository ruleRepo,
|
||||||
SsrfGuard ssrfGuard,
|
SsrfGuard ssrfGuard,
|
||||||
|
LicenseEnforcer licenseEnforcer,
|
||||||
String tenantId) {
|
String tenantId) {
|
||||||
this.repo = repo;
|
this.repo = repo;
|
||||||
this.ruleRepo = ruleRepo;
|
this.ruleRepo = ruleRepo;
|
||||||
this.ssrfGuard = ssrfGuard;
|
this.ssrfGuard = ssrfGuard;
|
||||||
|
this.licenseEnforcer = licenseEnforcer;
|
||||||
this.tenantId = tenantId;
|
this.tenantId = tenantId;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public OutboundConnection create(OutboundConnection draft, String actingUserId) {
|
public OutboundConnection create(OutboundConnection draft, String actingUserId) {
|
||||||
|
licenseEnforcer.assertWithinCap("max_outbound_connections", repo.listByTenant(tenantId).size(), 1);
|
||||||
assertNameUnique(draft.name(), null);
|
assertNameUnique(draft.name(), null);
|
||||||
validateUrl(draft.url());
|
validateUrl(draft.url());
|
||||||
OutboundConnection c = new OutboundConnection(
|
OutboundConnection c = new OutboundConnection(
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
package com.cameleer.server.app.outbound.config;
|
package com.cameleer.server.app.outbound.config;
|
||||||
|
|
||||||
|
import com.cameleer.server.app.license.LicenseEnforcer;
|
||||||
import com.cameleer.server.app.outbound.OutboundConnectionServiceImpl;
|
import com.cameleer.server.app.outbound.OutboundConnectionServiceImpl;
|
||||||
import com.cameleer.server.app.outbound.SsrfGuard;
|
import com.cameleer.server.app.outbound.SsrfGuard;
|
||||||
import com.cameleer.server.app.outbound.crypto.SecretCipher;
|
import com.cameleer.server.app.outbound.crypto.SecretCipher;
|
||||||
@@ -33,7 +34,8 @@ public class OutboundBeanConfig {
|
|||||||
OutboundConnectionRepository repo,
|
OutboundConnectionRepository repo,
|
||||||
AlertRuleRepository ruleRepo,
|
AlertRuleRepository ruleRepo,
|
||||||
SsrfGuard ssrfGuard,
|
SsrfGuard ssrfGuard,
|
||||||
|
LicenseEnforcer licenseEnforcer,
|
||||||
@Value("${cameleer.server.tenant.id:default}") String tenantId) {
|
@Value("${cameleer.server.tenant.id:default}") String tenantId) {
|
||||||
return new OutboundConnectionServiceImpl(repo, ruleRepo, ssrfGuard, tenantId);
|
return new OutboundConnectionServiceImpl(repo, ruleRepo, ssrfGuard, licenseEnforcer, tenantId);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,8 @@
|
|||||||
package com.cameleer.server.app.runtime;
|
package com.cameleer.server.app.runtime;
|
||||||
|
|
||||||
import com.cameleer.common.model.ApplicationConfig;
|
import com.cameleer.common.model.ApplicationConfig;
|
||||||
|
import com.cameleer.server.app.license.LicenseEnforcer;
|
||||||
|
import com.cameleer.server.app.license.LicenseUsageReader;
|
||||||
import com.cameleer.server.app.metrics.ServerMetrics;
|
import com.cameleer.server.app.metrics.ServerMetrics;
|
||||||
import com.cameleer.server.app.storage.PostgresApplicationConfigRepository;
|
import com.cameleer.server.app.storage.PostgresApplicationConfigRepository;
|
||||||
import com.cameleer.server.app.storage.PostgresDeploymentRepository;
|
import com.cameleer.server.app.storage.PostgresDeploymentRepository;
|
||||||
@@ -28,6 +30,8 @@ public class DeploymentExecutor {
|
|||||||
private final DeploymentRepository deploymentRepository;
|
private final DeploymentRepository deploymentRepository;
|
||||||
private final PostgresDeploymentRepository pgDeployRepo;
|
private final PostgresDeploymentRepository pgDeployRepo;
|
||||||
private final PostgresApplicationConfigRepository applicationConfigRepository;
|
private final PostgresApplicationConfigRepository applicationConfigRepository;
|
||||||
|
private final LicenseEnforcer licenseEnforcer;
|
||||||
|
private final LicenseUsageReader licenseUsageReader;
|
||||||
|
|
||||||
@Autowired(required = false)
|
@Autowired(required = false)
|
||||||
private DockerNetworkManager networkManager;
|
private DockerNetworkManager networkManager;
|
||||||
@@ -62,6 +66,9 @@ public class DeploymentExecutor {
|
|||||||
@Value("${cameleer.server.runtime.serverurl:}")
|
@Value("${cameleer.server.runtime.serverurl:}")
|
||||||
private String globalServerUrl;
|
private String globalServerUrl;
|
||||||
|
|
||||||
|
@Value("${cameleer.server.runtime.certresolver:}")
|
||||||
|
private String globalCertResolver;
|
||||||
|
|
||||||
@Value("${cameleer.server.runtime.jardockervolume:}")
|
@Value("${cameleer.server.runtime.jardockervolume:}")
|
||||||
private String jarDockerVolume;
|
private String jarDockerVolume;
|
||||||
|
|
||||||
@@ -79,7 +86,9 @@ public class DeploymentExecutor {
|
|||||||
AppService appService,
|
AppService appService,
|
||||||
EnvironmentService envService,
|
EnvironmentService envService,
|
||||||
DeploymentRepository deploymentRepository,
|
DeploymentRepository deploymentRepository,
|
||||||
PostgresApplicationConfigRepository applicationConfigRepository) {
|
PostgresApplicationConfigRepository applicationConfigRepository,
|
||||||
|
LicenseEnforcer licenseEnforcer,
|
||||||
|
LicenseUsageReader licenseUsageReader) {
|
||||||
this.orchestrator = orchestrator;
|
this.orchestrator = orchestrator;
|
||||||
this.deploymentService = deploymentService;
|
this.deploymentService = deploymentService;
|
||||||
this.appService = appService;
|
this.appService = appService;
|
||||||
@@ -87,6 +96,8 @@ public class DeploymentExecutor {
|
|||||||
this.deploymentRepository = deploymentRepository;
|
this.deploymentRepository = deploymentRepository;
|
||||||
this.pgDeployRepo = (PostgresDeploymentRepository) deploymentRepository;
|
this.pgDeployRepo = (PostgresDeploymentRepository) deploymentRepository;
|
||||||
this.applicationConfigRepository = applicationConfigRepository;
|
this.applicationConfigRepository = applicationConfigRepository;
|
||||||
|
this.licenseEnforcer = licenseEnforcer;
|
||||||
|
this.licenseUsageReader = licenseUsageReader;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Deployment-scoped id suffix — distinguishes container names and
|
/** Deployment-scoped id suffix — distinguishes container names and
|
||||||
@@ -131,7 +142,8 @@ public class DeploymentExecutor {
|
|||||||
globalCpuShares,
|
globalCpuShares,
|
||||||
globalRoutingMode,
|
globalRoutingMode,
|
||||||
globalRoutingDomain,
|
globalRoutingDomain,
|
||||||
globalServerUrl.isBlank() ? "http://cameleer-server:8081" : globalServerUrl
|
globalServerUrl.isBlank() ? "http://cameleer-server:8081" : globalServerUrl,
|
||||||
|
globalCertResolver.isBlank() ? null : globalCertResolver
|
||||||
);
|
);
|
||||||
ResolvedContainerConfig config = ConfigMerger.resolve(
|
ResolvedContainerConfig config = ConfigMerger.resolve(
|
||||||
globalDefaults, env.defaultContainerConfig(), app.containerConfig());
|
globalDefaults, env.defaultContainerConfig(), app.containerConfig());
|
||||||
@@ -143,6 +155,19 @@ public class DeploymentExecutor {
|
|||||||
updateStage(deployment.id(), DeployStage.PRE_FLIGHT);
|
updateStage(deployment.id(), DeployStage.PRE_FLIGHT);
|
||||||
preFlightChecks(jarPath, config);
|
preFlightChecks(jarPath, config);
|
||||||
|
|
||||||
|
// === LICENSE COMPUTE CAPS ===
|
||||||
|
// Spec §4.1: sum cpu/memory/replicas across non-stopped deployments + new request
|
||||||
|
// must fit within the effective tier caps. Throws LicenseCapExceededException, which
|
||||||
|
// the surrounding try/catch turns into a FAILED deployment with the cap message
|
||||||
|
// landing in deployments.error_message.
|
||||||
|
int reqCpu = config.cpuLimit() == null ? 0 : config.cpuLimit();
|
||||||
|
int reqMem = config.memoryLimitMb();
|
||||||
|
int reqReps = config.replicas();
|
||||||
|
LicenseUsageReader.ComputeUsage usage = licenseUsageReader.computeUsage();
|
||||||
|
licenseEnforcer.assertWithinCap("max_total_cpu_millis", usage.cpuMillis(), (long) reqCpu * reqReps);
|
||||||
|
licenseEnforcer.assertWithinCap("max_total_memory_mb", usage.memoryMb(), (long) reqMem * reqReps);
|
||||||
|
licenseEnforcer.assertWithinCap("max_total_replicas", usage.replicas(), reqReps);
|
||||||
|
|
||||||
// Resolve runtime type
|
// Resolve runtime type
|
||||||
String resolvedRuntimeType = config.runtimeType();
|
String resolvedRuntimeType = config.runtimeType();
|
||||||
String mainClass = null;
|
String mainClass = null;
|
||||||
@@ -605,6 +630,10 @@ public class DeploymentExecutor {
|
|||||||
map.put("runtimeType", config.runtimeType());
|
map.put("runtimeType", config.runtimeType());
|
||||||
map.put("customArgs", config.customArgs());
|
map.put("customArgs", config.customArgs());
|
||||||
map.put("extraNetworks", config.extraNetworks());
|
map.put("extraNetworks", config.extraNetworks());
|
||||||
|
map.put("externalRouting", config.externalRouting());
|
||||||
|
if (config.certResolver() != null) {
|
||||||
|
map.put("certResolver", config.certResolver());
|
||||||
|
}
|
||||||
return map;
|
return map;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import com.github.dockerjava.api.DockerClient;
|
|||||||
import com.github.dockerjava.api.async.ResultCallback;
|
import com.github.dockerjava.api.async.ResultCallback;
|
||||||
import com.github.dockerjava.api.model.AccessMode;
|
import com.github.dockerjava.api.model.AccessMode;
|
||||||
import com.github.dockerjava.api.model.Bind;
|
import com.github.dockerjava.api.model.Bind;
|
||||||
|
import com.github.dockerjava.api.model.Capability;
|
||||||
import com.github.dockerjava.api.model.Frame;
|
import com.github.dockerjava.api.model.Frame;
|
||||||
import com.github.dockerjava.api.model.HealthCheck;
|
import com.github.dockerjava.api.model.HealthCheck;
|
||||||
import com.github.dockerjava.api.model.HostConfig;
|
import com.github.dockerjava.api.model.HostConfig;
|
||||||
@@ -25,12 +26,58 @@ import java.util.stream.Stream;
|
|||||||
public class DockerRuntimeOrchestrator implements RuntimeOrchestrator {
|
public class DockerRuntimeOrchestrator implements RuntimeOrchestrator {
|
||||||
|
|
||||||
private static final Logger log = LoggerFactory.getLogger(DockerRuntimeOrchestrator.class);
|
private static final Logger log = LoggerFactory.getLogger(DockerRuntimeOrchestrator.class);
|
||||||
|
|
||||||
|
/** Sandboxed runtime we prefer when the daemon has it registered. */
|
||||||
|
private static final String SANDBOX_RUNTIME = "runsc";
|
||||||
|
|
||||||
|
/** Hard cap on processes/threads per tenant container. Spring Boot + Camel
|
||||||
|
* + a Kafka client comfortably fits in 512; raise via daemon-wide limits if
|
||||||
|
* a tenant legitimately needs more (and revisit the multi-tenancy threat
|
||||||
|
* model when that happens). */
|
||||||
|
private static final long PIDS_LIMIT = 512L;
|
||||||
|
|
||||||
|
/** /tmp must be writeable for JVM tmpdir, JIT scratch, and JNI native lib
|
||||||
|
* unpacking (Netty tcnative, Snappy, LZ4, Zstd all dlopen from here).
|
||||||
|
* `noexec` would block dlopen via mmap(PROT_EXEC) — keep it off. */
|
||||||
|
private static final String TMPFS_TMP_OPTS = "rw,nosuid,size=256m";
|
||||||
|
|
||||||
private final DockerClient dockerClient;
|
private final DockerClient dockerClient;
|
||||||
|
private final String dockerRuntime;
|
||||||
|
|
||||||
private ContainerLogForwarder logForwarder;
|
private ContainerLogForwarder logForwarder;
|
||||||
|
|
||||||
public DockerRuntimeOrchestrator(DockerClient dockerClient) {
|
public DockerRuntimeOrchestrator(DockerClient dockerClient) {
|
||||||
|
this(dockerClient, "");
|
||||||
|
}
|
||||||
|
|
||||||
|
public DockerRuntimeOrchestrator(DockerClient dockerClient, String runtimeOverride) {
|
||||||
this.dockerClient = dockerClient;
|
this.dockerClient = dockerClient;
|
||||||
|
this.dockerRuntime = resolveRuntime(runtimeOverride);
|
||||||
|
}
|
||||||
|
|
||||||
|
private String resolveRuntime(String override) {
|
||||||
|
if (override != null && !override.isBlank()) {
|
||||||
|
log.info("Container runtime forced to '{}' via cameleer.server.runtime.dockerruntime", override);
|
||||||
|
return override;
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
Map<String, ?> runtimes = dockerClient.infoCmd().exec().getRuntimes();
|
||||||
|
if (runtimes != null && runtimes.containsKey(SANDBOX_RUNTIME)) {
|
||||||
|
log.info("gVisor ({}) detected — sandboxed runtime will be used for tenant containers",
|
||||||
|
SANDBOX_RUNTIME);
|
||||||
|
return SANDBOX_RUNTIME;
|
||||||
|
}
|
||||||
|
} catch (Exception e) {
|
||||||
|
log.warn("Could not query Docker runtimes: {} — falling back to daemon default", e.getMessage());
|
||||||
|
}
|
||||||
|
log.info("No sandboxed runtime detected — using Docker default (runc). Install gVisor on the host "
|
||||||
|
+ "for tenant kernel isolation; see issue #152.");
|
||||||
|
return "";
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Visible for tests / introspection. Empty string = let Docker pick its default. */
|
||||||
|
String getDockerRuntime() {
|
||||||
|
return dockerRuntime;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setLogForwarder(ContainerLogForwarder logForwarder) {
|
public void setLogForwarder(ContainerLogForwarder logForwarder) {
|
||||||
@@ -68,12 +115,36 @@ public class DockerRuntimeOrchestrator implements RuntimeOrchestrator {
|
|||||||
List<String> envList = request.envVars().entrySet().stream()
|
List<String> envList = request.envVars().entrySet().stream()
|
||||||
.map(e -> e.getKey() + "=" + e.getValue()).toList();
|
.map(e -> e.getKey() + "=" + e.getValue()).toList();
|
||||||
|
|
||||||
|
// Tenant containers run untrusted user JVMs — every tenant JAR can call
|
||||||
|
// Runtime.exec, reflective bean dispatch, MVEL/Groovy templating. Java 17
|
||||||
|
// has no SecurityManager, so isolation MUST live below the JVM.
|
||||||
|
// See issue #152 for the full threat model. Defaults are fail-closed:
|
||||||
|
// - cap_drop ALL: outbound TCP still works (no caps needed); raw sockets,
|
||||||
|
// ptrace, mounts, and bind <1024 are all denied.
|
||||||
|
// - no-new-privileges: setuid binaries cannot escalate.
|
||||||
|
// - apparmor=docker-default: Docker's stock MAC profile.
|
||||||
|
// Daemon's default seccomp profile is applied implicitly when no
|
||||||
|
// `seccomp=` override is set — no need to declare it.
|
||||||
|
// - readonly rootfs + /tmp tmpfs: persistence-via-write defeated; apps
|
||||||
|
// needing durable state declare writeableVolumes (issue #153).
|
||||||
|
// - pids-limit: fork bombs cannot exhaust the host PID namespace.
|
||||||
HostConfig hostConfig = HostConfig.newHostConfig()
|
HostConfig hostConfig = HostConfig.newHostConfig()
|
||||||
.withMemory(request.memoryLimitBytes())
|
.withMemory(request.memoryLimitBytes())
|
||||||
.withMemorySwap(request.memoryLimitBytes())
|
.withMemorySwap(request.memoryLimitBytes())
|
||||||
.withCpuShares(request.cpuShares())
|
.withCpuShares(request.cpuShares())
|
||||||
.withNetworkMode(request.network())
|
.withNetworkMode(request.network())
|
||||||
.withRestartPolicy(RestartPolicy.onFailureRestart(request.restartPolicyMaxRetries()));
|
.withRestartPolicy(RestartPolicy.onFailureRestart(request.restartPolicyMaxRetries()))
|
||||||
|
.withCapDrop(Capability.values())
|
||||||
|
.withSecurityOpts(List.of(
|
||||||
|
"no-new-privileges:true",
|
||||||
|
"apparmor=docker-default"))
|
||||||
|
.withReadonlyRootfs(true)
|
||||||
|
.withPidsLimit(PIDS_LIMIT)
|
||||||
|
.withTmpFs(Map.of("/tmp", TMPFS_TMP_OPTS));
|
||||||
|
|
||||||
|
if (!dockerRuntime.isBlank()) {
|
||||||
|
hostConfig.withRuntime(dockerRuntime);
|
||||||
|
}
|
||||||
|
|
||||||
// JAR mounting: volume mount (Docker-in-Docker) or bind mount (host path)
|
// JAR mounting: volume mount (Docker-in-Docker) or bind mount (host path)
|
||||||
if (request.jarVolumeName() != null && !request.jarVolumeName().isBlank()) {
|
if (request.jarVolumeName() != null && !request.jarVolumeName().isBlank()) {
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ import com.github.dockerjava.zerodep.ZerodepDockerHttpClient;
|
|||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
import org.springframework.beans.factory.annotation.Autowired;
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
|
import org.springframework.beans.factory.annotation.Value;
|
||||||
import org.springframework.context.annotation.Bean;
|
import org.springframework.context.annotation.Bean;
|
||||||
import org.springframework.context.annotation.Configuration;
|
import org.springframework.context.annotation.Configuration;
|
||||||
|
|
||||||
@@ -41,10 +42,12 @@ public class RuntimeOrchestratorAutoConfig {
|
|||||||
@Bean
|
@Bean
|
||||||
public RuntimeOrchestrator runtimeOrchestrator(
|
public RuntimeOrchestrator runtimeOrchestrator(
|
||||||
@Autowired(required = false) DockerClient dockerClient,
|
@Autowired(required = false) DockerClient dockerClient,
|
||||||
@Autowired(required = false) ContainerLogForwarder logForwarder) {
|
@Autowired(required = false) ContainerLogForwarder logForwarder,
|
||||||
|
@Value("${cameleer.server.runtime.dockerruntime:}") String dockerRuntimeOverride) {
|
||||||
if (dockerClient != null) {
|
if (dockerClient != null) {
|
||||||
log.info("Docker socket detected - enabling Docker runtime orchestrator");
|
log.info("Docker socket detected - enabling Docker runtime orchestrator");
|
||||||
DockerRuntimeOrchestrator orchestrator = new DockerRuntimeOrchestrator(dockerClient);
|
DockerRuntimeOrchestrator orchestrator =
|
||||||
|
new DockerRuntimeOrchestrator(dockerClient, dockerRuntimeOverride);
|
||||||
if (logForwarder != null) {
|
if (logForwarder != null) {
|
||||||
orchestrator.setLogForwarder(logForwarder);
|
orchestrator.setLogForwarder(logForwarder);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,7 +19,6 @@ public final class TraefikLabelBuilder {
|
|||||||
String instanceId = envSlug + "-" + appSlug + "-" + replicaIndex + "-" + generation;
|
String instanceId = envSlug + "-" + appSlug + "-" + replicaIndex + "-" + generation;
|
||||||
Map<String, String> labels = new LinkedHashMap<>();
|
Map<String, String> labels = new LinkedHashMap<>();
|
||||||
|
|
||||||
labels.put("traefik.enable", "true");
|
|
||||||
labels.put("managed-by", "cameleer-server");
|
labels.put("managed-by", "cameleer-server");
|
||||||
labels.put("cameleer.tenant", tenantId);
|
labels.put("cameleer.tenant", tenantId);
|
||||||
labels.put("cameleer.app", appSlug);
|
labels.put("cameleer.app", appSlug);
|
||||||
@@ -28,6 +27,11 @@ public final class TraefikLabelBuilder {
|
|||||||
labels.put("cameleer.generation", generation);
|
labels.put("cameleer.generation", generation);
|
||||||
labels.put("cameleer.instance-id", instanceId);
|
labels.put("cameleer.instance-id", instanceId);
|
||||||
|
|
||||||
|
if (!config.externalRouting()) {
|
||||||
|
return labels;
|
||||||
|
}
|
||||||
|
|
||||||
|
labels.put("traefik.enable", "true");
|
||||||
labels.put("traefik.http.services." + svc + ".loadbalancer.server.port",
|
labels.put("traefik.http.services." + svc + ".loadbalancer.server.port",
|
||||||
String.valueOf(config.appPort()));
|
String.valueOf(config.appPort()));
|
||||||
|
|
||||||
@@ -51,7 +55,10 @@ public final class TraefikLabelBuilder {
|
|||||||
|
|
||||||
if (config.sslOffloading()) {
|
if (config.sslOffloading()) {
|
||||||
labels.put("traefik.http.routers." + svc + ".tls", "true");
|
labels.put("traefik.http.routers." + svc + ".tls", "true");
|
||||||
labels.put("traefik.http.routers." + svc + ".tls.certresolver", "default");
|
if (config.certResolver() != null && !config.certResolver().isBlank()) {
|
||||||
|
labels.put("traefik.http.routers." + svc + ".tls.certresolver",
|
||||||
|
config.certResolver());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return labels;
|
return labels;
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package com.cameleer.server.app.search;
|
package com.cameleer.server.app.search;
|
||||||
|
|
||||||
import com.cameleer.server.core.alerting.AlertMatchSpec;
|
import com.cameleer.server.core.alerting.AlertMatchSpec;
|
||||||
|
import com.cameleer.server.core.search.AttributeFilter;
|
||||||
import com.cameleer.server.core.search.ExecutionSummary;
|
import com.cameleer.server.core.search.ExecutionSummary;
|
||||||
import com.cameleer.server.core.search.SearchRequest;
|
import com.cameleer.server.core.search.SearchRequest;
|
||||||
import com.cameleer.server.core.search.SearchResult;
|
import com.cameleer.server.core.search.SearchResult;
|
||||||
@@ -256,6 +257,23 @@ public class ClickHouseSearchIndex implements SearchIndex {
|
|||||||
params.add(likeTerm);
|
params.add(likeTerm);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Structured attribute filters. Keys were validated at AttributeFilter construction
|
||||||
|
// time against ^[a-zA-Z0-9._-]+$ so they are safe to single-quote-inline; the JSON path
|
||||||
|
// argument of JSONExtractString does not accept a ? placeholder in ClickHouse JDBC
|
||||||
|
// (same constraint as countExecutionsForAlerting below). Values are parameter-bound.
|
||||||
|
for (AttributeFilter filter : request.attributeFilters()) {
|
||||||
|
String escapedKey = filter.key().replace("'", "\\'");
|
||||||
|
if (filter.isKeyOnly()) {
|
||||||
|
conditions.add("JSONHas(attributes, '" + escapedKey + "')");
|
||||||
|
} else if (filter.isWildcard()) {
|
||||||
|
conditions.add("JSONExtractString(attributes, '" + escapedKey + "') LIKE ?");
|
||||||
|
params.add(filter.toLikePattern());
|
||||||
|
} else {
|
||||||
|
conditions.add("JSONExtractString(attributes, '" + escapedKey + "') = ?");
|
||||||
|
params.add(filter.value());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return String.join(" AND ", conditions);
|
return String.join(" AND ", conditions);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,52 @@
|
|||||||
|
package com.cameleer.server.app.security;
|
||||||
|
|
||||||
|
import com.cameleer.server.app.dto.AuthCapabilitiesResponse;
|
||||||
|
import com.cameleer.server.core.security.OidcConfig;
|
||||||
|
import com.cameleer.server.core.security.OidcConfigRepository;
|
||||||
|
import io.swagger.v3.oas.annotations.Operation;
|
||||||
|
import io.swagger.v3.oas.annotations.responses.ApiResponse;
|
||||||
|
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||||
|
import org.springframework.http.ResponseEntity;
|
||||||
|
import org.springframework.web.bind.annotation.GetMapping;
|
||||||
|
import org.springframework.web.bind.annotation.RequestMapping;
|
||||||
|
import org.springframework.web.bind.annotation.RestController;
|
||||||
|
|
||||||
|
import java.util.Optional;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reports auth capabilities so the SPA renders the login page deterministically
|
||||||
|
* instead of inferring from {@code GET /api/v1/auth/oidc/config} 200/404.
|
||||||
|
*
|
||||||
|
* <p>Unauthenticated by design — the SPA calls this before any sign-in attempt.
|
||||||
|
* Inherits permit-all from the {@code /api/v1/auth/**} matcher in
|
||||||
|
* {@link SecurityConfig}.
|
||||||
|
*
|
||||||
|
* <p>Future deferred work (issue #154) extends this same payload with MFA
|
||||||
|
* enrollment URL and password-reset URL fields.
|
||||||
|
*/
|
||||||
|
@RestController
|
||||||
|
@RequestMapping("/api/v1/auth")
|
||||||
|
@Tag(name = "Authentication", description = "Login and token refresh endpoints")
|
||||||
|
public class AuthCapabilitiesController {
|
||||||
|
|
||||||
|
private final OidcConfigRepository oidcConfigRepository;
|
||||||
|
|
||||||
|
public AuthCapabilitiesController(OidcConfigRepository oidcConfigRepository) {
|
||||||
|
this.oidcConfigRepository = oidcConfigRepository;
|
||||||
|
}
|
||||||
|
|
||||||
|
@GetMapping("/capabilities")
|
||||||
|
@Operation(summary = "Auth capabilities for the SPA login page")
|
||||||
|
@ApiResponse(responseCode = "200", description = "Capabilities resolved")
|
||||||
|
public ResponseEntity<AuthCapabilitiesResponse> getCapabilities() {
|
||||||
|
Optional<OidcConfig> config = oidcConfigRepository.find();
|
||||||
|
boolean oidcEnabled = config.isPresent() && config.get().enabled();
|
||||||
|
String providerName = oidcEnabled
|
||||||
|
? OidcProviderNameDeriver.deriveName(config.get().issuerUri())
|
||||||
|
: "";
|
||||||
|
|
||||||
|
var oidc = new AuthCapabilitiesResponse.Oidc(oidcEnabled, providerName, oidcEnabled);
|
||||||
|
var local = new AuthCapabilitiesResponse.LocalAccounts(true, oidcEnabled);
|
||||||
|
return ResponseEntity.ok(new AuthCapabilitiesResponse(oidc, local));
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -3,6 +3,7 @@ package com.cameleer.server.app.security;
|
|||||||
import com.cameleer.server.app.dto.AuthTokenResponse;
|
import com.cameleer.server.app.dto.AuthTokenResponse;
|
||||||
import com.cameleer.server.app.dto.ErrorResponse;
|
import com.cameleer.server.app.dto.ErrorResponse;
|
||||||
import com.cameleer.server.app.dto.OidcPublicConfigResponse;
|
import com.cameleer.server.app.dto.OidcPublicConfigResponse;
|
||||||
|
import com.cameleer.server.app.license.LicenseEnforcer;
|
||||||
import com.cameleer.server.core.admin.AuditCategory;
|
import com.cameleer.server.core.admin.AuditCategory;
|
||||||
import com.cameleer.server.core.admin.AuditResult;
|
import com.cameleer.server.core.admin.AuditResult;
|
||||||
import com.cameleer.server.core.admin.AuditService;
|
import com.cameleer.server.core.admin.AuditService;
|
||||||
@@ -63,6 +64,7 @@ public class OidcAuthController {
|
|||||||
private final ClaimMappingService claimMappingService;
|
private final ClaimMappingService claimMappingService;
|
||||||
private final ClaimMappingRepository claimMappingRepository;
|
private final ClaimMappingRepository claimMappingRepository;
|
||||||
private final GroupRepository groupRepository;
|
private final GroupRepository groupRepository;
|
||||||
|
private final LicenseEnforcer licenseEnforcer;
|
||||||
|
|
||||||
public OidcAuthController(OidcTokenExchanger tokenExchanger,
|
public OidcAuthController(OidcTokenExchanger tokenExchanger,
|
||||||
OidcConfigRepository configRepository,
|
OidcConfigRepository configRepository,
|
||||||
@@ -72,7 +74,8 @@ public class OidcAuthController {
|
|||||||
RbacService rbacService,
|
RbacService rbacService,
|
||||||
ClaimMappingService claimMappingService,
|
ClaimMappingService claimMappingService,
|
||||||
ClaimMappingRepository claimMappingRepository,
|
ClaimMappingRepository claimMappingRepository,
|
||||||
GroupRepository groupRepository) {
|
GroupRepository groupRepository,
|
||||||
|
LicenseEnforcer licenseEnforcer) {
|
||||||
this.tokenExchanger = tokenExchanger;
|
this.tokenExchanger = tokenExchanger;
|
||||||
this.configRepository = configRepository;
|
this.configRepository = configRepository;
|
||||||
this.jwtService = jwtService;
|
this.jwtService = jwtService;
|
||||||
@@ -82,6 +85,7 @@ public class OidcAuthController {
|
|||||||
this.claimMappingService = claimMappingService;
|
this.claimMappingService = claimMappingService;
|
||||||
this.claimMappingRepository = claimMappingRepository;
|
this.claimMappingRepository = claimMappingRepository;
|
||||||
this.groupRepository = groupRepository;
|
this.groupRepository = groupRepository;
|
||||||
|
this.licenseEnforcer = licenseEnforcer;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -154,6 +158,13 @@ public class OidcAuthController {
|
|||||||
"Account not provisioned. Contact your administrator.");
|
"Account not provisioned. Contact your administrator.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Auto-signup branch: when the user does not yet exist and the IdP is allowed to
|
||||||
|
// provision new accounts, enforce the max_users license cap before persisting.
|
||||||
|
// The global LicenseExceptionAdvice maps this to a structured 403 envelope.
|
||||||
|
if (existingUser.isEmpty() && config.get().autoSignup()) {
|
||||||
|
licenseEnforcer.assertWithinCap("max_users", userRepository.count(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
userRepository.upsert(new UserInfo(
|
userRepository.upsert(new UserInfo(
|
||||||
userId, provider, oidcUser.email(), oidcUser.name(), Instant.now()));
|
userId, provider, oidcUser.email(), oidcUser.name(), Instant.now()));
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,41 @@
|
|||||||
|
package com.cameleer.server.app.security;
|
||||||
|
|
||||||
|
import java.net.URI;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Pure utility — derives a display label for an OIDC provider from its issuer URI.
|
||||||
|
* Used by {@link AuthCapabilitiesController} so the SPA can render
|
||||||
|
* "Sign in with {providerName}" on the login page.
|
||||||
|
*
|
||||||
|
* <p>Pattern-match only — never network-discover. If the issuer doesn't match a
|
||||||
|
* known vendor pattern, we return the generic "Single Sign-On" label rather than
|
||||||
|
* leaking hostnames into the UI.
|
||||||
|
*/
|
||||||
|
public final class OidcProviderNameDeriver {
|
||||||
|
|
||||||
|
private static final String GENERIC = "Single Sign-On";
|
||||||
|
|
||||||
|
private OidcProviderNameDeriver() {}
|
||||||
|
|
||||||
|
public static String deriveName(String issuerUri) {
|
||||||
|
if (issuerUri == null || issuerUri.isBlank()) {
|
||||||
|
return GENERIC;
|
||||||
|
}
|
||||||
|
String host;
|
||||||
|
try {
|
||||||
|
URI uri = URI.create(issuerUri.trim());
|
||||||
|
host = uri.getHost();
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
|
return GENERIC;
|
||||||
|
}
|
||||||
|
if (host == null || host.isBlank()) {
|
||||||
|
return GENERIC;
|
||||||
|
}
|
||||||
|
String h = host.toLowerCase();
|
||||||
|
if (h.contains("logto")) return "Logto";
|
||||||
|
if (h.contains("keycloak")) return "Keycloak";
|
||||||
|
if (h.endsWith("auth0.com")) return "Auth0";
|
||||||
|
if (h.endsWith("okta.com") || h.endsWith("oktapreview.com")) return "Okta";
|
||||||
|
return GENERIC;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -16,8 +16,6 @@ import java.security.MessageDigest;
|
|||||||
import java.security.NoSuchAlgorithmException;
|
import java.security.NoSuchAlgorithmException;
|
||||||
import java.sql.Timestamp;
|
import java.sql.Timestamp;
|
||||||
import java.time.Instant;
|
import java.time.Instant;
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.HexFormat;
|
import java.util.HexFormat;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
@@ -57,6 +55,12 @@ public class ClickHouseDiagramStore implements DiagramStore {
|
|||||||
ORDER BY created_at DESC LIMIT 1
|
ORDER BY created_at DESC LIMIT 1
|
||||||
""";
|
""";
|
||||||
|
|
||||||
|
private static final String SELECT_HASH_FOR_APP_ROUTE = """
|
||||||
|
SELECT content_hash FROM route_diagrams
|
||||||
|
WHERE tenant_id = ? AND application_id = ? AND environment = ? AND route_id = ?
|
||||||
|
ORDER BY created_at DESC LIMIT 1
|
||||||
|
""";
|
||||||
|
|
||||||
private static final String SELECT_DEFINITIONS_FOR_APP = """
|
private static final String SELECT_DEFINITIONS_FOR_APP = """
|
||||||
SELECT DISTINCT route_id, definition FROM route_diagrams
|
SELECT DISTINCT route_id, definition FROM route_diagrams
|
||||||
WHERE tenant_id = ? AND application_id = ? AND environment = ?
|
WHERE tenant_id = ? AND application_id = ? AND environment = ?
|
||||||
@@ -68,6 +72,8 @@ public class ClickHouseDiagramStore implements DiagramStore {
|
|||||||
|
|
||||||
// (routeId + "\0" + instanceId) → contentHash
|
// (routeId + "\0" + instanceId) → contentHash
|
||||||
private final ConcurrentHashMap<String, String> hashCache = new ConcurrentHashMap<>();
|
private final ConcurrentHashMap<String, String> hashCache = new ConcurrentHashMap<>();
|
||||||
|
// (applicationId + "\0" + environment + "\0" + routeId) → most recent contentHash
|
||||||
|
private final ConcurrentHashMap<String, String> appRouteHashCache = new ConcurrentHashMap<>();
|
||||||
// contentHash → deserialized RouteGraph
|
// contentHash → deserialized RouteGraph
|
||||||
private final ConcurrentHashMap<String, RouteGraph> graphCache = new ConcurrentHashMap<>();
|
private final ConcurrentHashMap<String, RouteGraph> graphCache = new ConcurrentHashMap<>();
|
||||||
|
|
||||||
@@ -92,12 +98,37 @@ public class ClickHouseDiagramStore implements DiagramStore {
|
|||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
log.warn("Failed to warm diagram hash cache — lookups will fall back to ClickHouse: {}", e.getMessage());
|
log.warn("Failed to warm diagram hash cache — lookups will fall back to ClickHouse: {}", e.getMessage());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
jdbc.query(
|
||||||
|
"SELECT application_id, environment, route_id, " +
|
||||||
|
"argMax(content_hash, created_at) AS content_hash " +
|
||||||
|
"FROM route_diagrams WHERE tenant_id = ? " +
|
||||||
|
"GROUP BY application_id, environment, route_id",
|
||||||
|
rs -> {
|
||||||
|
String key = appRouteCacheKey(
|
||||||
|
rs.getString("application_id"),
|
||||||
|
rs.getString("environment"),
|
||||||
|
rs.getString("route_id"));
|
||||||
|
appRouteHashCache.put(key, rs.getString("content_hash"));
|
||||||
|
},
|
||||||
|
tenantId);
|
||||||
|
log.info("Diagram app-route cache warmed: {} entries", appRouteHashCache.size());
|
||||||
|
} catch (Exception e) {
|
||||||
|
log.warn("Failed to warm diagram app-route cache — lookups will fall back to ClickHouse: {}", e.getMessage());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static String cacheKey(String routeId, String instanceId) {
|
private static String cacheKey(String routeId, String instanceId) {
|
||||||
return routeId + "\0" + instanceId;
|
return routeId + "\0" + instanceId;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static String appRouteCacheKey(String applicationId, String environment, String routeId) {
|
||||||
|
return (applicationId != null ? applicationId : "") + "\0"
|
||||||
|
+ (environment != null ? environment : "") + "\0"
|
||||||
|
+ (routeId != null ? routeId : "");
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void store(TaggedDiagram diagram) {
|
public void store(TaggedDiagram diagram) {
|
||||||
try {
|
try {
|
||||||
@@ -122,6 +153,7 @@ public class ClickHouseDiagramStore implements DiagramStore {
|
|||||||
|
|
||||||
// Update caches
|
// Update caches
|
||||||
hashCache.put(cacheKey(routeId, agentId), contentHash);
|
hashCache.put(cacheKey(routeId, agentId), contentHash);
|
||||||
|
appRouteHashCache.put(appRouteCacheKey(applicationId, environment, routeId), contentHash);
|
||||||
graphCache.put(contentHash, graph);
|
graphCache.put(contentHash, graph);
|
||||||
|
|
||||||
log.debug("Stored diagram for route={} agent={} with hash={}", routeId, agentId, contentHash);
|
log.debug("Stored diagram for route={} agent={} with hash={}", routeId, agentId, contentHash);
|
||||||
@@ -170,33 +202,29 @@ public class ClickHouseDiagramStore implements DiagramStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Optional<String> findContentHashForRouteByAgents(String routeId, List<String> agentIds) {
|
public Optional<String> findLatestContentHashForAppRoute(String applicationId,
|
||||||
if (agentIds == null || agentIds.isEmpty()) {
|
String routeId,
|
||||||
|
String environment) {
|
||||||
|
if (applicationId == null || applicationId.isBlank()
|
||||||
|
|| routeId == null || routeId.isBlank()
|
||||||
|
|| environment == null || environment.isBlank()) {
|
||||||
return Optional.empty();
|
return Optional.empty();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try cache first — return first hit
|
String key = appRouteCacheKey(applicationId, environment, routeId);
|
||||||
for (String agentId : agentIds) {
|
String cached = appRouteHashCache.get(key);
|
||||||
String cached = hashCache.get(cacheKey(routeId, agentId));
|
if (cached != null) {
|
||||||
if (cached != null) {
|
return Optional.of(cached);
|
||||||
return Optional.of(cached);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fall back to ClickHouse
|
List<Map<String, Object>> rows = jdbc.queryForList(
|
||||||
String placeholders = String.join(", ", Collections.nCopies(agentIds.size(), "?"));
|
SELECT_HASH_FOR_APP_ROUTE, tenantId, applicationId, environment, routeId);
|
||||||
String sql = "SELECT content_hash FROM route_diagrams " +
|
|
||||||
"WHERE tenant_id = ? AND route_id = ? AND instance_id IN (" + placeholders + ") " +
|
|
||||||
"ORDER BY created_at DESC LIMIT 1";
|
|
||||||
var params = new ArrayList<Object>();
|
|
||||||
params.add(tenantId);
|
|
||||||
params.add(routeId);
|
|
||||||
params.addAll(agentIds);
|
|
||||||
List<Map<String, Object>> rows = jdbc.queryForList(sql, params.toArray());
|
|
||||||
if (rows.isEmpty()) {
|
if (rows.isEmpty()) {
|
||||||
return Optional.empty();
|
return Optional.empty();
|
||||||
}
|
}
|
||||||
return Optional.of((String) rows.get(0).get("content_hash"));
|
String hash = (String) rows.get(0).get("content_hash");
|
||||||
|
appRouteHashCache.put(key, hash);
|
||||||
|
return Optional.of(hash);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|||||||
@@ -0,0 +1,408 @@
|
|||||||
|
package com.cameleer.server.app.storage;
|
||||||
|
|
||||||
|
import com.cameleer.server.core.storage.ServerMetricsQueryStore;
|
||||||
|
import com.cameleer.server.core.storage.model.ServerInstanceInfo;
|
||||||
|
import com.cameleer.server.core.storage.model.ServerMetricCatalogEntry;
|
||||||
|
import com.cameleer.server.core.storage.model.ServerMetricPoint;
|
||||||
|
import com.cameleer.server.core.storage.model.ServerMetricQueryRequest;
|
||||||
|
import com.cameleer.server.core.storage.model.ServerMetricQueryResponse;
|
||||||
|
import com.cameleer.server.core.storage.model.ServerMetricSeries;
|
||||||
|
import org.springframework.jdbc.core.JdbcTemplate;
|
||||||
|
|
||||||
|
import java.sql.Array;
|
||||||
|
import java.sql.Timestamp;
|
||||||
|
import java.time.Duration;
|
||||||
|
import java.time.Instant;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.LinkedHashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.TreeSet;
|
||||||
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ClickHouse-backed {@link ServerMetricsQueryStore}.
|
||||||
|
*
|
||||||
|
* <p>Safety rules for every query:
|
||||||
|
* <ul>
|
||||||
|
* <li>tenant_id always bound as a parameter — no cross-tenant reads.</li>
|
||||||
|
* <li>Identifier-like inputs (metric name, statistic, tag keys,
|
||||||
|
* aggregation, mode) are regex-validated. Tag keys flow through the
|
||||||
|
* query as JDBC parameter-bound values of {@code tags[?]} map lookups,
|
||||||
|
* so even with a "safe" regex they cannot inject SQL.</li>
|
||||||
|
* <li>Literal values ({@code from}, {@code to}, tag filter values,
|
||||||
|
* server_instance_id allow-list) always go through {@code ?}.</li>
|
||||||
|
* <li>The time range is capped at {@link #MAX_RANGE}.</li>
|
||||||
|
* <li>Result cardinality is capped at {@link #MAX_SERIES} series.</li>
|
||||||
|
* </ul>
|
||||||
|
*/
|
||||||
|
public class ClickHouseServerMetricsQueryStore implements ServerMetricsQueryStore {
|
||||||
|
|
||||||
|
private static final Pattern SAFE_IDENTIFIER = Pattern.compile("^[a-zA-Z0-9._]+$");
|
||||||
|
private static final Pattern SAFE_STATISTIC = Pattern.compile("^[a-z_]+$");
|
||||||
|
|
||||||
|
private static final Set<String> AGGREGATIONS = Set.of("avg", "sum", "max", "min", "latest");
|
||||||
|
private static final Set<String> MODES = Set.of("raw", "delta");
|
||||||
|
|
||||||
|
/** Maximum {@code to - from} window accepted by the API. */
|
||||||
|
static final Duration MAX_RANGE = Duration.ofDays(31);
|
||||||
|
|
||||||
|
/** Clamp bounds and default for {@code stepSeconds}. */
|
||||||
|
static final int MIN_STEP = 10;
|
||||||
|
static final int MAX_STEP = 3600;
|
||||||
|
static final int DEFAULT_STEP = 60;
|
||||||
|
|
||||||
|
/** Defence against group-by explosion — limit the series count per response. */
|
||||||
|
static final int MAX_SERIES = 500;
|
||||||
|
|
||||||
|
private final String tenantId;
|
||||||
|
private final JdbcTemplate jdbc;
|
||||||
|
|
||||||
|
public ClickHouseServerMetricsQueryStore(String tenantId, JdbcTemplate jdbc) {
|
||||||
|
this.tenantId = tenantId;
|
||||||
|
this.jdbc = jdbc;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── catalog ─────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<ServerMetricCatalogEntry> catalog(Instant from, Instant to) {
|
||||||
|
requireRange(from, to);
|
||||||
|
String sql = """
|
||||||
|
SELECT
|
||||||
|
metric_name,
|
||||||
|
any(metric_type) AS metric_type,
|
||||||
|
arraySort(groupUniqArray(statistic)) AS statistics,
|
||||||
|
arraySort(arrayDistinct(arrayFlatten(groupArray(mapKeys(tags))))) AS tag_keys
|
||||||
|
FROM server_metrics
|
||||||
|
WHERE tenant_id = ?
|
||||||
|
AND collected_at >= ?
|
||||||
|
AND collected_at < ?
|
||||||
|
GROUP BY metric_name
|
||||||
|
ORDER BY metric_name
|
||||||
|
""";
|
||||||
|
return jdbc.query(sql, (rs, n) -> new ServerMetricCatalogEntry(
|
||||||
|
rs.getString("metric_name"),
|
||||||
|
rs.getString("metric_type"),
|
||||||
|
arrayToStringList(rs.getArray("statistics")),
|
||||||
|
arrayToStringList(rs.getArray("tag_keys"))
|
||||||
|
), tenantId, Timestamp.from(from), Timestamp.from(to));
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── instances ───────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public List<ServerInstanceInfo> listInstances(Instant from, Instant to) {
|
||||||
|
requireRange(from, to);
|
||||||
|
String sql = """
|
||||||
|
SELECT
|
||||||
|
server_instance_id,
|
||||||
|
min(collected_at) AS first_seen,
|
||||||
|
max(collected_at) AS last_seen
|
||||||
|
FROM server_metrics
|
||||||
|
WHERE tenant_id = ?
|
||||||
|
AND collected_at >= ?
|
||||||
|
AND collected_at < ?
|
||||||
|
GROUP BY server_instance_id
|
||||||
|
ORDER BY last_seen DESC
|
||||||
|
""";
|
||||||
|
return jdbc.query(sql, (rs, n) -> new ServerInstanceInfo(
|
||||||
|
rs.getString("server_instance_id"),
|
||||||
|
rs.getTimestamp("first_seen").toInstant(),
|
||||||
|
rs.getTimestamp("last_seen").toInstant()
|
||||||
|
), tenantId, Timestamp.from(from), Timestamp.from(to));
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── query ───────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ServerMetricQueryResponse query(ServerMetricQueryRequest request) {
|
||||||
|
if (request == null) throw new IllegalArgumentException("request is required");
|
||||||
|
String metric = requireSafeIdentifier(request.metric(), "metric");
|
||||||
|
requireRange(request.from(), request.to());
|
||||||
|
|
||||||
|
String aggregation = request.aggregation() != null ? request.aggregation().toLowerCase() : "avg";
|
||||||
|
if (!AGGREGATIONS.contains(aggregation)) {
|
||||||
|
throw new IllegalArgumentException("aggregation must be one of " + AGGREGATIONS);
|
||||||
|
}
|
||||||
|
|
||||||
|
String mode = request.mode() != null ? request.mode().toLowerCase() : "raw";
|
||||||
|
if (!MODES.contains(mode)) {
|
||||||
|
throw new IllegalArgumentException("mode must be one of " + MODES);
|
||||||
|
}
|
||||||
|
|
||||||
|
int step = request.stepSeconds() != null ? request.stepSeconds() : DEFAULT_STEP;
|
||||||
|
if (step < MIN_STEP || step > MAX_STEP) {
|
||||||
|
throw new IllegalArgumentException(
|
||||||
|
"stepSeconds must be in [" + MIN_STEP + "," + MAX_STEP + "]");
|
||||||
|
}
|
||||||
|
|
||||||
|
String statistic = request.statistic();
|
||||||
|
if (statistic != null && !SAFE_STATISTIC.matcher(statistic).matches()) {
|
||||||
|
throw new IllegalArgumentException("statistic contains unsafe characters");
|
||||||
|
}
|
||||||
|
|
||||||
|
List<String> groupByTags = request.groupByTags() != null
|
||||||
|
? request.groupByTags() : List.of();
|
||||||
|
for (String t : groupByTags) requireSafeIdentifier(t, "groupByTag");
|
||||||
|
|
||||||
|
Map<String, String> filterTags = request.filterTags() != null
|
||||||
|
? request.filterTags() : Map.of();
|
||||||
|
for (String t : filterTags.keySet()) requireSafeIdentifier(t, "filterTag key");
|
||||||
|
|
||||||
|
List<String> instanceAllowList = request.serverInstanceIds() != null
|
||||||
|
? request.serverInstanceIds() : List.of();
|
||||||
|
|
||||||
|
boolean isDelta = "delta".equals(mode);
|
||||||
|
boolean isMean = "mean".equals(statistic);
|
||||||
|
|
||||||
|
String sql = isDelta
|
||||||
|
? buildDeltaSql(step, groupByTags, filterTags, instanceAllowList, statistic, isMean)
|
||||||
|
: buildRawSql(step, groupByTags, filterTags, instanceAllowList,
|
||||||
|
statistic, aggregation, isMean);
|
||||||
|
|
||||||
|
List<Object> params = buildParams(groupByTags, metric, statistic, isMean,
|
||||||
|
request.from(), request.to(),
|
||||||
|
filterTags, instanceAllowList);
|
||||||
|
|
||||||
|
List<Row> rows = jdbc.query(sql, (rs, n) -> {
|
||||||
|
int idx = 1;
|
||||||
|
Instant bucket = rs.getTimestamp(idx++).toInstant();
|
||||||
|
List<String> tagValues = new ArrayList<>(groupByTags.size());
|
||||||
|
for (int g = 0; g < groupByTags.size(); g++) {
|
||||||
|
tagValues.add(rs.getString(idx++));
|
||||||
|
}
|
||||||
|
double value = rs.getDouble(idx);
|
||||||
|
return new Row(bucket, tagValues, value);
|
||||||
|
}, params.toArray());
|
||||||
|
|
||||||
|
return assembleSeries(rows, metric, statistic, aggregation, mode, step, groupByTags);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── SQL builders ────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Builds a single-pass SQL for raw mode:
|
||||||
|
* <pre>{@code
|
||||||
|
* SELECT bucket, tag0, ..., <agg>(metric_value) AS value
|
||||||
|
* FROM server_metrics WHERE ...
|
||||||
|
* GROUP BY bucket, tag0, ...
|
||||||
|
* ORDER BY bucket, tag0, ...
|
||||||
|
* }</pre>
|
||||||
|
* For {@code statistic=mean}, replaces the aggregate with
|
||||||
|
* {@code sumIf(value, statistic IN ('total','total_time')) / nullIf(sumIf(value, statistic='count'), 0)}.
|
||||||
|
*/
|
||||||
|
private String buildRawSql(int step, List<String> groupByTags,
|
||||||
|
Map<String, String> filterTags,
|
||||||
|
List<String> instanceAllowList,
|
||||||
|
String statistic, String aggregation, boolean isMean) {
|
||||||
|
StringBuilder s = new StringBuilder(512);
|
||||||
|
s.append("SELECT\n toDateTime64(toStartOfInterval(collected_at, INTERVAL ")
|
||||||
|
.append(step).append(" SECOND), 3) AS bucket");
|
||||||
|
for (int i = 0; i < groupByTags.size(); i++) {
|
||||||
|
s.append(",\n tags[?] AS tag").append(i);
|
||||||
|
}
|
||||||
|
s.append(",\n ").append(isMean ? meanExpr() : scalarAggExpr(aggregation))
|
||||||
|
.append(" AS value\nFROM server_metrics\n");
|
||||||
|
appendWhereClause(s, filterTags, instanceAllowList, statistic, isMean);
|
||||||
|
s.append("GROUP BY bucket");
|
||||||
|
for (int i = 0; i < groupByTags.size(); i++) s.append(", tag").append(i);
|
||||||
|
s.append("\nORDER BY bucket");
|
||||||
|
for (int i = 0; i < groupByTags.size(); i++) s.append(", tag").append(i);
|
||||||
|
return s.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Builds a three-level SQL for delta mode. Inner fills one
|
||||||
|
* (bucket, instance, tag-group) row via {@code max(metric_value)};
|
||||||
|
* middle computes positive-clipped per-instance differences via a
|
||||||
|
* window function; outer sums across instances.
|
||||||
|
*/
|
||||||
|
private String buildDeltaSql(int step, List<String> groupByTags,
|
||||||
|
Map<String, String> filterTags,
|
||||||
|
List<String> instanceAllowList,
|
||||||
|
String statistic, boolean isMean) {
|
||||||
|
StringBuilder s = new StringBuilder(1024);
|
||||||
|
s.append("SELECT bucket");
|
||||||
|
for (int i = 0; i < groupByTags.size(); i++) s.append(", tag").append(i);
|
||||||
|
s.append(", sum(delta) AS value FROM (\n");
|
||||||
|
|
||||||
|
// Middle: per-instance positive-clipped delta using window.
|
||||||
|
s.append(" SELECT bucket");
|
||||||
|
for (int i = 0; i < groupByTags.size(); i++) s.append(", tag").append(i);
|
||||||
|
s.append(", server_instance_id, greatest(0, value - coalesce(any(value) OVER (")
|
||||||
|
.append("PARTITION BY server_instance_id");
|
||||||
|
for (int i = 0; i < groupByTags.size(); i++) s.append(", tag").append(i);
|
||||||
|
s.append(" ORDER BY bucket ROWS BETWEEN 1 PRECEDING AND 1 PRECEDING), value)) AS delta FROM (\n");
|
||||||
|
|
||||||
|
// Inner: one representative value per (bucket, instance, tag-group).
|
||||||
|
s.append(" SELECT\n toDateTime64(toStartOfInterval(collected_at, INTERVAL ")
|
||||||
|
.append(step).append(" SECOND), 3) AS bucket,\n server_instance_id");
|
||||||
|
for (int i = 0; i < groupByTags.size(); i++) {
|
||||||
|
s.append(",\n tags[?] AS tag").append(i);
|
||||||
|
}
|
||||||
|
s.append(",\n ").append(isMean ? meanExpr() : "max(metric_value)")
|
||||||
|
.append(" AS value\n FROM server_metrics\n");
|
||||||
|
appendWhereClause(s, filterTags, instanceAllowList, statistic, isMean);
|
||||||
|
s.append(" GROUP BY bucket, server_instance_id");
|
||||||
|
for (int i = 0; i < groupByTags.size(); i++) s.append(", tag").append(i);
|
||||||
|
s.append("\n ) AS bucketed\n) AS deltas\n");
|
||||||
|
|
||||||
|
s.append("GROUP BY bucket");
|
||||||
|
for (int i = 0; i < groupByTags.size(); i++) s.append(", tag").append(i);
|
||||||
|
s.append("\nORDER BY bucket");
|
||||||
|
for (int i = 0; i < groupByTags.size(); i++) s.append(", tag").append(i);
|
||||||
|
return s.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* WHERE clause shared by both raw and delta SQL shapes. Appended at the
|
||||||
|
* correct indent under either the single {@code FROM server_metrics}
|
||||||
|
* (raw) or the innermost one (delta).
|
||||||
|
*/
|
||||||
|
private void appendWhereClause(StringBuilder s, Map<String, String> filterTags,
|
||||||
|
List<String> instanceAllowList,
|
||||||
|
String statistic, boolean isMean) {
|
||||||
|
s.append(" WHERE tenant_id = ?\n")
|
||||||
|
.append(" AND metric_name = ?\n");
|
||||||
|
if (isMean) {
|
||||||
|
s.append(" AND statistic IN ('count', 'total', 'total_time')\n");
|
||||||
|
} else if (statistic != null) {
|
||||||
|
s.append(" AND statistic = ?\n");
|
||||||
|
}
|
||||||
|
s.append(" AND collected_at >= ?\n")
|
||||||
|
.append(" AND collected_at < ?\n");
|
||||||
|
for (int i = 0; i < filterTags.size(); i++) {
|
||||||
|
s.append(" AND tags[?] = ?\n");
|
||||||
|
}
|
||||||
|
if (!instanceAllowList.isEmpty()) {
|
||||||
|
s.append(" AND server_instance_id IN (")
|
||||||
|
.append("?,".repeat(instanceAllowList.size() - 1)).append("?)\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* SQL-positional params for both raw and delta queries (same relative
|
||||||
|
* order because the WHERE clause is emitted by {@link #appendWhereClause}
|
||||||
|
* only once, with the {@code tags[?]} select-list placeholders appearing
|
||||||
|
* earlier in the SQL text).
|
||||||
|
*/
|
||||||
|
private List<Object> buildParams(List<String> groupByTags, String metric,
|
||||||
|
String statistic, boolean isMean,
|
||||||
|
Instant from, Instant to,
|
||||||
|
Map<String, String> filterTags,
|
||||||
|
List<String> instanceAllowList) {
|
||||||
|
List<Object> params = new ArrayList<>();
|
||||||
|
// SELECT-list tags[?] placeholders
|
||||||
|
params.addAll(groupByTags);
|
||||||
|
// WHERE
|
||||||
|
params.add(tenantId);
|
||||||
|
params.add(metric);
|
||||||
|
if (!isMean && statistic != null) params.add(statistic);
|
||||||
|
params.add(Timestamp.from(from));
|
||||||
|
params.add(Timestamp.from(to));
|
||||||
|
for (Map.Entry<String, String> e : filterTags.entrySet()) {
|
||||||
|
params.add(e.getKey());
|
||||||
|
params.add(e.getValue());
|
||||||
|
}
|
||||||
|
params.addAll(instanceAllowList);
|
||||||
|
return params;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static String scalarAggExpr(String aggregation) {
|
||||||
|
return switch (aggregation) {
|
||||||
|
case "avg" -> "avg(metric_value)";
|
||||||
|
case "sum" -> "sum(metric_value)";
|
||||||
|
case "max" -> "max(metric_value)";
|
||||||
|
case "min" -> "min(metric_value)";
|
||||||
|
case "latest" -> "argMax(metric_value, collected_at)";
|
||||||
|
default -> throw new IllegalStateException("unreachable: " + aggregation);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
private static String meanExpr() {
|
||||||
|
return "sumIf(metric_value, statistic IN ('total', 'total_time'))"
|
||||||
|
+ " / nullIf(sumIf(metric_value, statistic = 'count'), 0)";
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── response assembly ───────────────────────────────────────────────
|
||||||
|
|
||||||
|
private ServerMetricQueryResponse assembleSeries(
|
||||||
|
List<Row> rows, String metric, String statistic,
|
||||||
|
String aggregation, String mode, int step, List<String> groupByTags) {
|
||||||
|
|
||||||
|
Map<List<String>, List<ServerMetricPoint>> bySignature = new LinkedHashMap<>();
|
||||||
|
for (Row r : rows) {
|
||||||
|
if (Double.isNaN(r.value) || Double.isInfinite(r.value)) continue;
|
||||||
|
bySignature.computeIfAbsent(r.tagValues, k -> new ArrayList<>())
|
||||||
|
.add(new ServerMetricPoint(r.bucket, r.value));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (bySignature.size() > MAX_SERIES) {
|
||||||
|
throw new IllegalArgumentException(
|
||||||
|
"query produced " + bySignature.size()
|
||||||
|
+ " series; reduce groupByTags or tighten filterTags (max "
|
||||||
|
+ MAX_SERIES + ")");
|
||||||
|
}
|
||||||
|
|
||||||
|
List<ServerMetricSeries> series = new ArrayList<>(bySignature.size());
|
||||||
|
for (Map.Entry<List<String>, List<ServerMetricPoint>> e : bySignature.entrySet()) {
|
||||||
|
Map<String, String> tags = new LinkedHashMap<>();
|
||||||
|
for (int i = 0; i < groupByTags.size(); i++) {
|
||||||
|
tags.put(groupByTags.get(i), e.getKey().get(i));
|
||||||
|
}
|
||||||
|
series.add(new ServerMetricSeries(Collections.unmodifiableMap(tags), e.getValue()));
|
||||||
|
}
|
||||||
|
|
||||||
|
return new ServerMetricQueryResponse(metric,
|
||||||
|
statistic != null ? statistic : "value",
|
||||||
|
aggregation, mode, step, series);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── helpers ─────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
private static void requireRange(Instant from, Instant to) {
|
||||||
|
if (from == null || to == null) {
|
||||||
|
throw new IllegalArgumentException("from and to are required");
|
||||||
|
}
|
||||||
|
if (!from.isBefore(to)) {
|
||||||
|
throw new IllegalArgumentException("from must be strictly before to");
|
||||||
|
}
|
||||||
|
if (Duration.between(from, to).compareTo(MAX_RANGE) > 0) {
|
||||||
|
throw new IllegalArgumentException(
|
||||||
|
"time range exceeds maximum of " + MAX_RANGE.toDays() + " days");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static String requireSafeIdentifier(String value, String field) {
|
||||||
|
if (value == null || value.isBlank()) {
|
||||||
|
throw new IllegalArgumentException(field + " is required");
|
||||||
|
}
|
||||||
|
if (!SAFE_IDENTIFIER.matcher(value).matches()) {
|
||||||
|
throw new IllegalArgumentException(
|
||||||
|
field + " contains unsafe characters (allowed: [a-zA-Z0-9._])");
|
||||||
|
}
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static List<String> arrayToStringList(Array array) {
|
||||||
|
if (array == null) return List.of();
|
||||||
|
try {
|
||||||
|
Object[] values = (Object[]) array.getArray();
|
||||||
|
Set<String> sorted = new TreeSet<>();
|
||||||
|
for (Object v : values) {
|
||||||
|
if (v != null) sorted.add(v.toString());
|
||||||
|
}
|
||||||
|
return List.copyOf(sorted);
|
||||||
|
} catch (Exception e) {
|
||||||
|
return List.of();
|
||||||
|
} finally {
|
||||||
|
try { array.free(); } catch (Exception ignore) { }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private record Row(Instant bucket, List<String> tagValues, double value) {
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,46 @@
|
|||||||
|
package com.cameleer.server.app.storage;
|
||||||
|
|
||||||
|
import com.cameleer.server.core.storage.ServerMetricsStore;
|
||||||
|
import com.cameleer.server.core.storage.model.ServerMetricSample;
|
||||||
|
import org.springframework.jdbc.core.JdbcTemplate;
|
||||||
|
|
||||||
|
import java.sql.Timestamp;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
public class ClickHouseServerMetricsStore implements ServerMetricsStore {
|
||||||
|
|
||||||
|
private final JdbcTemplate jdbc;
|
||||||
|
|
||||||
|
public ClickHouseServerMetricsStore(JdbcTemplate jdbc) {
|
||||||
|
this.jdbc = jdbc;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void insertBatch(List<ServerMetricSample> samples) {
|
||||||
|
if (samples.isEmpty()) return;
|
||||||
|
|
||||||
|
jdbc.batchUpdate("""
|
||||||
|
INSERT INTO server_metrics
|
||||||
|
(tenant_id, collected_at, server_instance_id, metric_name,
|
||||||
|
metric_type, statistic, metric_value, tags)
|
||||||
|
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||||
|
""",
|
||||||
|
samples.stream().map(s -> new Object[]{
|
||||||
|
s.tenantId(),
|
||||||
|
Timestamp.from(s.collectedAt()),
|
||||||
|
s.serverInstanceId(),
|
||||||
|
s.metricName(),
|
||||||
|
s.metricType(),
|
||||||
|
s.statistic(),
|
||||||
|
s.value(),
|
||||||
|
tagsToClickHouseMap(s.tags())
|
||||||
|
}).toList());
|
||||||
|
}
|
||||||
|
|
||||||
|
private Map<String, String> tagsToClickHouseMap(Map<String, String> tags) {
|
||||||
|
if (tags == null || tags.isEmpty()) return new HashMap<>();
|
||||||
|
return new HashMap<>(tags);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -70,6 +70,12 @@ public class PostgresAppRepository implements AppRepository {
|
|||||||
(rs, rowNum) -> mapRow(rs));
|
(rs, rowNum) -> mapRow(rs));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long count() {
|
||||||
|
Long n = jdbc.queryForObject("SELECT COUNT(*) FROM apps", Long.class);
|
||||||
|
return n == null ? 0L : n;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void updateContainerConfig(UUID id, Map<String, Object> containerConfig) {
|
public void updateContainerConfig(UUID id, Map<String, Object> containerConfig) {
|
||||||
try {
|
try {
|
||||||
|
|||||||
@@ -26,7 +26,8 @@ public class PostgresEnvironmentRepository implements EnvironmentRepository {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private static final String SELECT_COLS =
|
private static final String SELECT_COLS =
|
||||||
"id, slug, display_name, production, enabled, default_container_config, jar_retention_count, color, created_at";
|
"id, slug, display_name, production, enabled, default_container_config, jar_retention_count, color, created_at, "
|
||||||
|
+ "execution_retention_days, log_retention_days, metric_retention_days";
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public List<Environment> findAll() {
|
public List<Environment> findAll() {
|
||||||
@@ -35,6 +36,11 @@ public class PostgresEnvironmentRepository implements EnvironmentRepository {
|
|||||||
(rs, rowNum) -> mapRow(rs));
|
(rs, rowNum) -> mapRow(rs));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long count() {
|
||||||
|
return jdbc.queryForObject("SELECT COUNT(*) FROM environments", Long.class);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Optional<Environment> findById(UUID id) {
|
public Optional<Environment> findById(UUID id) {
|
||||||
var results = jdbc.query(
|
var results = jdbc.query(
|
||||||
@@ -108,7 +114,10 @@ public class PostgresEnvironmentRepository implements EnvironmentRepository {
|
|||||||
config,
|
config,
|
||||||
jarRetentionCount,
|
jarRetentionCount,
|
||||||
color,
|
color,
|
||||||
rs.getTimestamp("created_at").toInstant()
|
rs.getTimestamp("created_at").toInstant(),
|
||||||
|
rs.getInt("execution_retention_days"),
|
||||||
|
rs.getInt("log_retention_days"),
|
||||||
|
rs.getInt("metric_retention_days")
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -101,6 +101,12 @@ public class PostgresUserRepository implements UserRepository {
|
|||||||
java.sql.Timestamp.from(timestamp), userId);
|
java.sql.Timestamp.from(timestamp), userId);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long count() {
|
||||||
|
Long n = jdbc.queryForObject("SELECT COUNT(*) FROM users", Long.class);
|
||||||
|
return n == null ? 0L : n;
|
||||||
|
}
|
||||||
|
|
||||||
private UserInfo mapUser(java.sql.ResultSet rs) throws java.sql.SQLException {
|
private UserInfo mapUser(java.sql.ResultSet rs) throws java.sql.SQLException {
|
||||||
java.sql.Timestamp ts = rs.getTimestamp("created_at");
|
java.sql.Timestamp ts = rs.getTimestamp("created_at");
|
||||||
java.time.Instant createdAt = ts != null ? ts.toInstant() : null;
|
java.time.Instant createdAt = ts != null ? ts.toInstant() : null;
|
||||||
|
|||||||
@@ -47,6 +47,11 @@ cameleer:
|
|||||||
jarstoragepath: ${CAMELEER_SERVER_RUNTIME_JARSTORAGEPATH:/data/jars}
|
jarstoragepath: ${CAMELEER_SERVER_RUNTIME_JARSTORAGEPATH:/data/jars}
|
||||||
baseimage: ${CAMELEER_SERVER_RUNTIME_BASEIMAGE:gitea.siegeln.net/cameleer/cameleer-runtime-base:latest}
|
baseimage: ${CAMELEER_SERVER_RUNTIME_BASEIMAGE:gitea.siegeln.net/cameleer/cameleer-runtime-base:latest}
|
||||||
dockernetwork: ${CAMELEER_SERVER_RUNTIME_DOCKERNETWORK:cameleer}
|
dockernetwork: ${CAMELEER_SERVER_RUNTIME_DOCKERNETWORK:cameleer}
|
||||||
|
# Container runtime override. Empty (default) auto-detects: uses runsc
|
||||||
|
# (gVisor) if the daemon has it registered, otherwise the daemon default
|
||||||
|
# (runc). Set to a registered runtime name (e.g. "kata", "runc") to
|
||||||
|
# force a specific runtime. See issue #152 for the threat model.
|
||||||
|
dockerruntime: ${CAMELEER_SERVER_RUNTIME_DOCKERRUNTIME:}
|
||||||
agenthealthport: 9464
|
agenthealthport: 9464
|
||||||
healthchecktimeout: 60
|
healthchecktimeout: 60
|
||||||
container:
|
container:
|
||||||
@@ -55,6 +60,7 @@ cameleer:
|
|||||||
routingmode: ${CAMELEER_SERVER_RUNTIME_ROUTINGMODE:path}
|
routingmode: ${CAMELEER_SERVER_RUNTIME_ROUTINGMODE:path}
|
||||||
routingdomain: ${CAMELEER_SERVER_RUNTIME_ROUTINGDOMAIN:localhost}
|
routingdomain: ${CAMELEER_SERVER_RUNTIME_ROUTINGDOMAIN:localhost}
|
||||||
serverurl: ${CAMELEER_SERVER_RUNTIME_SERVERURL:}
|
serverurl: ${CAMELEER_SERVER_RUNTIME_SERVERURL:}
|
||||||
|
certresolver: ${CAMELEER_SERVER_RUNTIME_CERTRESOLVER:}
|
||||||
jardockervolume: ${CAMELEER_SERVER_RUNTIME_JARDOCKERVOLUME:}
|
jardockervolume: ${CAMELEER_SERVER_RUNTIME_JARDOCKERVOLUME:}
|
||||||
indexer:
|
indexer:
|
||||||
debouncems: ${CAMELEER_SERVER_INDEXER_DEBOUNCEMS:2000}
|
debouncems: ${CAMELEER_SERVER_INDEXER_DEBOUNCEMS:2000}
|
||||||
@@ -111,6 +117,10 @@ cameleer:
|
|||||||
url: ${CAMELEER_SERVER_CLICKHOUSE_URL:jdbc:clickhouse://localhost:8123/cameleer}
|
url: ${CAMELEER_SERVER_CLICKHOUSE_URL:jdbc:clickhouse://localhost:8123/cameleer}
|
||||||
username: ${CAMELEER_SERVER_CLICKHOUSE_USERNAME:default}
|
username: ${CAMELEER_SERVER_CLICKHOUSE_USERNAME:default}
|
||||||
password: ${CAMELEER_SERVER_CLICKHOUSE_PASSWORD:}
|
password: ${CAMELEER_SERVER_CLICKHOUSE_PASSWORD:}
|
||||||
|
self-metrics:
|
||||||
|
enabled: ${CAMELEER_SERVER_SELFMETRICS_ENABLED:true}
|
||||||
|
interval-ms: ${CAMELEER_SERVER_SELFMETRICS_INTERVALMS:60000}
|
||||||
|
instance-id: ${CAMELEER_SERVER_INSTANCE_ID:}
|
||||||
|
|
||||||
springdoc:
|
springdoc:
|
||||||
api-docs:
|
api-docs:
|
||||||
|
|||||||
@@ -401,6 +401,29 @@ CREATE TABLE IF NOT EXISTS route_catalog (
|
|||||||
ENGINE = ReplacingMergeTree(last_seen)
|
ENGINE = ReplacingMergeTree(last_seen)
|
||||||
ORDER BY (tenant_id, environment, application_id, route_id);
|
ORDER BY (tenant_id, environment, application_id, route_id);
|
||||||
|
|
||||||
|
-- ── Server Self-Metrics ────────────────────────────────────────────────
|
||||||
|
-- Periodic snapshot of the server's own Micrometer registry (written by
|
||||||
|
-- ServerMetricsSnapshotScheduler). No `environment` column — the server
|
||||||
|
-- straddles environments. `statistic` distinguishes Timer/DistributionSummary
|
||||||
|
-- sub-measurements (count, total_time, max, mean) from plain counter/gauge values.
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS server_metrics (
|
||||||
|
tenant_id LowCardinality(String) DEFAULT 'default',
|
||||||
|
collected_at DateTime64(3),
|
||||||
|
server_instance_id LowCardinality(String),
|
||||||
|
metric_name LowCardinality(String),
|
||||||
|
metric_type LowCardinality(String),
|
||||||
|
statistic LowCardinality(String) DEFAULT 'value',
|
||||||
|
metric_value Float64,
|
||||||
|
tags Map(String, String) DEFAULT map(),
|
||||||
|
server_received_at DateTime64(3) DEFAULT now64(3)
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree()
|
||||||
|
PARTITION BY (tenant_id, toYYYYMM(collected_at))
|
||||||
|
ORDER BY (tenant_id, collected_at, server_instance_id, metric_name, statistic)
|
||||||
|
TTL toDateTime(collected_at) + INTERVAL 90 DAY DELETE
|
||||||
|
SETTINGS index_granularity = 8192;
|
||||||
|
|
||||||
-- insert_id tiebreak for keyset pagination (fixes same-millisecond cursor collision).
|
-- insert_id tiebreak for keyset pagination (fixes same-millisecond cursor collision).
|
||||||
-- IF NOT EXISTS on ADD COLUMN is idempotent. MATERIALIZE COLUMN is a background mutation,
|
-- IF NOT EXISTS on ADD COLUMN is idempotent. MATERIALIZE COLUMN is a background mutation,
|
||||||
-- effectively a no-op once all parts are already materialized.
|
-- effectively a no-op once all parts are already materialized.
|
||||||
|
|||||||
@@ -0,0 +1,17 @@
|
|||||||
|
-- Per-tenant license row (one server = one tenant)
|
||||||
|
CREATE TABLE license (
|
||||||
|
tenant_id TEXT PRIMARY KEY,
|
||||||
|
token TEXT NOT NULL,
|
||||||
|
license_id UUID NOT NULL,
|
||||||
|
installed_at TIMESTAMPTZ NOT NULL,
|
||||||
|
installed_by TEXT NOT NULL,
|
||||||
|
expires_at TIMESTAMPTZ NOT NULL,
|
||||||
|
last_validated_at TIMESTAMPTZ NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Per-env retention; defaults to default-tier values (1 day) so a fresh
|
||||||
|
-- server lands inside the cap without operator intervention.
|
||||||
|
ALTER TABLE environments
|
||||||
|
ADD COLUMN execution_retention_days INTEGER NOT NULL DEFAULT 1,
|
||||||
|
ADD COLUMN log_retention_days INTEGER NOT NULL DEFAULT 1,
|
||||||
|
ADD COLUMN metric_retention_days INTEGER NOT NULL DEFAULT 1;
|
||||||
@@ -1,13 +1,19 @@
|
|||||||
package com.cameleer.server.app;
|
package com.cameleer.server.app;
|
||||||
|
|
||||||
import com.cameleer.server.core.agent.AgentRegistryService;
|
import com.cameleer.server.core.agent.AgentRegistryService;
|
||||||
|
import com.cameleer.server.core.license.LicenseGate;
|
||||||
|
import com.cameleer.license.LicenseInfo;
|
||||||
import com.cameleer.server.core.security.JwtService;
|
import com.cameleer.server.core.security.JwtService;
|
||||||
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
import org.springframework.http.HttpHeaders;
|
import org.springframework.http.HttpHeaders;
|
||||||
import org.springframework.http.MediaType;
|
import org.springframework.http.MediaType;
|
||||||
import org.springframework.stereotype.Component;
|
import org.springframework.stereotype.Component;
|
||||||
|
|
||||||
|
import java.time.Instant;
|
||||||
|
import java.time.temporal.ChronoUnit;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.UUID;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test utility for creating JWT-authenticated requests in integration tests.
|
* Test utility for creating JWT-authenticated requests in integration tests.
|
||||||
@@ -20,10 +26,39 @@ public class TestSecurityHelper {
|
|||||||
|
|
||||||
private final JwtService jwtService;
|
private final JwtService jwtService;
|
||||||
private final AgentRegistryService agentRegistryService;
|
private final AgentRegistryService agentRegistryService;
|
||||||
|
private final LicenseGate licenseGate;
|
||||||
|
|
||||||
public TestSecurityHelper(JwtService jwtService, AgentRegistryService agentRegistryService) {
|
@Autowired
|
||||||
|
public TestSecurityHelper(JwtService jwtService,
|
||||||
|
AgentRegistryService agentRegistryService,
|
||||||
|
LicenseGate licenseGate) {
|
||||||
this.jwtService = jwtService;
|
this.jwtService = jwtService;
|
||||||
this.agentRegistryService = agentRegistryService;
|
this.agentRegistryService = agentRegistryService;
|
||||||
|
this.licenseGate = licenseGate;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Loads a synthetic, signature-bypassing license into {@link LicenseGate} so the test can
|
||||||
|
* exercise paths that would otherwise be rejected by default-tier caps. The license is
|
||||||
|
* always-ACTIVE (1 day from now, no grace) and limits are merged over defaults — only
|
||||||
|
* supply the keys you want to lift. Use this from {@code @BeforeEach} in ITs that need to
|
||||||
|
* create more than the default-tier allowance of envs/apps/users/etc.
|
||||||
|
*/
|
||||||
|
public void installSyntheticUnsignedLicense(Map<String, Integer> caps) {
|
||||||
|
LicenseInfo info = new LicenseInfo(
|
||||||
|
UUID.randomUUID(),
|
||||||
|
"default",
|
||||||
|
"test-license",
|
||||||
|
Map.copyOf(caps),
|
||||||
|
Instant.now(),
|
||||||
|
Instant.now().plus(1, ChronoUnit.DAYS),
|
||||||
|
0);
|
||||||
|
licenseGate.load(info);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Clears any test license previously installed via {@link #installSyntheticUnsignedLicense}. */
|
||||||
|
public void clearTestLicense() {
|
||||||
|
licenseGate.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -105,6 +105,11 @@ class AlertingFullLifecycleIT extends AbstractPostgresIT {
|
|||||||
.dynamicHttpsPort());
|
.dynamicHttpsPort());
|
||||||
wm.start();
|
wm.start();
|
||||||
|
|
||||||
|
// Lift the default-tier max_alert_rules cap (=2). This lifecycle test creates
|
||||||
|
// multiple rules via REST + repo across @Test methods (PER_CLASS lifecycle) and
|
||||||
|
// is not exercising the license cap. Synthetic license is ACTIVE-state.
|
||||||
|
securityHelper.installSyntheticUnsignedLicense(java.util.Map.of("max_alert_rules", 100));
|
||||||
|
|
||||||
// Default clock behaviour: delegate to simulatedNow
|
// Default clock behaviour: delegate to simulatedNow
|
||||||
stubClock();
|
stubClock();
|
||||||
|
|
||||||
@@ -145,6 +150,7 @@ class AlertingFullLifecycleIT extends AbstractPostgresIT {
|
|||||||
|
|
||||||
@AfterAll
|
@AfterAll
|
||||||
void cleanupFixtures() {
|
void cleanupFixtures() {
|
||||||
|
securityHelper.clearTestLicense();
|
||||||
if (wm != null) wm.stop();
|
if (wm != null) wm.stop();
|
||||||
jdbcTemplate.update("DELETE FROM alert_silences WHERE environment_id = ?", envId);
|
jdbcTemplate.update("DELETE FROM alert_silences WHERE environment_id = ?", envId);
|
||||||
jdbcTemplate.update("DELETE FROM alert_notifications WHERE alert_instance_id IN (SELECT id FROM alert_instances WHERE environment_id = ?)", envId);
|
jdbcTemplate.update("DELETE FROM alert_notifications WHERE alert_instance_id IN (SELECT id FROM alert_instances WHERE environment_id = ?)", envId);
|
||||||
|
|||||||
@@ -56,6 +56,13 @@ class OutboundConnectionAllowedEnvIT extends AbstractPostgresIT {
|
|||||||
void setUp() throws Exception {
|
void setUp() throws Exception {
|
||||||
when(agentRegistryService.findAll()).thenReturn(List.of());
|
when(agentRegistryService.findAll()).thenReturn(List.of());
|
||||||
|
|
||||||
|
// Lift caps so this connection-allowed-env test, which creates one alert rule per
|
||||||
|
// method, is never gated by the default-tier max_alert_rules=2 + sibling residue.
|
||||||
|
// Also lift max_outbound_connections (default=1) — every test creates one connection.
|
||||||
|
securityHelper.installSyntheticUnsignedLicense(java.util.Map.of(
|
||||||
|
"max_alert_rules", 100,
|
||||||
|
"max_outbound_connections", 100));
|
||||||
|
|
||||||
adminJwt = securityHelper.adminToken();
|
adminJwt = securityHelper.adminToken();
|
||||||
operatorJwt = securityHelper.operatorToken();
|
operatorJwt = securityHelper.operatorToken();
|
||||||
|
|
||||||
@@ -93,6 +100,7 @@ class OutboundConnectionAllowedEnvIT extends AbstractPostgresIT {
|
|||||||
|
|
||||||
@AfterEach
|
@AfterEach
|
||||||
void cleanUp() {
|
void cleanUp() {
|
||||||
|
securityHelper.clearTestLicense();
|
||||||
jdbcTemplate.update("DELETE FROM alert_rules WHERE environment_id IN (?, ?, ?)", envIdA, envIdB, envIdC);
|
jdbcTemplate.update("DELETE FROM alert_rules WHERE environment_id IN (?, ?, ?)", envIdA, envIdB, envIdC);
|
||||||
jdbcTemplate.update("DELETE FROM outbound_connections WHERE id = ?", connId);
|
jdbcTemplate.update("DELETE FROM outbound_connections WHERE id = ?", connId);
|
||||||
jdbcTemplate.update("DELETE FROM environments WHERE id IN (?, ?, ?)", envIdA, envIdB, envIdC);
|
jdbcTemplate.update("DELETE FROM environments WHERE id IN (?, ?, ?)", envIdA, envIdB, envIdC);
|
||||||
|
|||||||
@@ -44,6 +44,11 @@ class AlertRuleControllerIT extends AbstractPostgresIT {
|
|||||||
seedUser("test-operator");
|
seedUser("test-operator");
|
||||||
seedUser("test-viewer");
|
seedUser("test-viewer");
|
||||||
|
|
||||||
|
// Lift the default-tier max_alert_rules cap (=2) so this suite — which exercises rule
|
||||||
|
// creation independent of the cap — is not gated by sibling-test residue in the
|
||||||
|
// shared Spring context's Postgres tables. The synthetic license is ACTIVE-state.
|
||||||
|
securityHelper.installSyntheticUnsignedLicense(java.util.Map.of("max_alert_rules", 100));
|
||||||
|
|
||||||
// Create a test environment
|
// Create a test environment
|
||||||
envSlug = "test-env-" + UUID.randomUUID().toString().substring(0, 8);
|
envSlug = "test-env-" + UUID.randomUUID().toString().substring(0, 8);
|
||||||
envId = UUID.randomUUID();
|
envId = UUID.randomUUID();
|
||||||
@@ -54,6 +59,7 @@ class AlertRuleControllerIT extends AbstractPostgresIT {
|
|||||||
|
|
||||||
@AfterEach
|
@AfterEach
|
||||||
void cleanUp() {
|
void cleanUp() {
|
||||||
|
securityHelper.clearTestLicense();
|
||||||
jdbcTemplate.update("DELETE FROM alert_rules WHERE environment_id = ?", envId);
|
jdbcTemplate.update("DELETE FROM alert_rules WHERE environment_id = ?", envId);
|
||||||
jdbcTemplate.update("DELETE FROM environments WHERE id = ?", envId);
|
jdbcTemplate.update("DELETE FROM environments WHERE id = ?", envId);
|
||||||
jdbcTemplate.update("DELETE FROM users WHERE user_id IN ('test-operator','test-viewer')");
|
jdbcTemplate.update("DELETE FROM users WHERE user_id IN ('test-operator','test-viewer')");
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ class AgentLifecycleEvaluatorTest {
|
|||||||
events = mock(AgentEventRepository.class);
|
events = mock(AgentEventRepository.class);
|
||||||
envRepo = mock(EnvironmentRepository.class);
|
envRepo = mock(EnvironmentRepository.class);
|
||||||
when(envRepo.findById(ENV_ID)).thenReturn(Optional.of(
|
when(envRepo.findById(ENV_ID)).thenReturn(Optional.of(
|
||||||
new Environment(ENV_ID, ENV_SLUG, "Prod", true, true, Map.of(), 5, "slate", Instant.EPOCH)));
|
new Environment(ENV_ID, ENV_SLUG, "Prod", true, true, Map.of(), 5, "slate", Instant.EPOCH, 1, 1, 1)));
|
||||||
eval = new AgentLifecycleEvaluator(events, envRepo);
|
eval = new AgentLifecycleEvaluator(events, envRepo);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -41,7 +41,7 @@ class ExchangeMatchEvaluatorTest {
|
|||||||
null, null, null, null, null, null, null, null, null, null, null, null, null, null);
|
null, null, null, null, null, null, null, null, null, null, null, null, null, null);
|
||||||
eval = new ExchangeMatchEvaluator(searchIndex, envRepo, props);
|
eval = new ExchangeMatchEvaluator(searchIndex, envRepo, props);
|
||||||
|
|
||||||
var env = new Environment(ENV_ID, "prod", "Production", false, true, null, null, "slate", null);
|
var env = new Environment(ENV_ID, "prod", "Production", false, true, null, null, "slate", null, 1, 1, 1);
|
||||||
when(envRepo.findById(ENV_ID)).thenReturn(Optional.of(env));
|
when(envRepo.findById(ENV_ID)).thenReturn(Optional.of(env));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ class LogPatternEvaluatorTest {
|
|||||||
envRepo = mock(EnvironmentRepository.class);
|
envRepo = mock(EnvironmentRepository.class);
|
||||||
eval = new LogPatternEvaluator(logStore, envRepo);
|
eval = new LogPatternEvaluator(logStore, envRepo);
|
||||||
|
|
||||||
var env = new Environment(ENV_ID, "prod", "Production", false, true, null, null, "slate", null);
|
var env = new Environment(ENV_ID, "prod", "Production", false, true, null, null, "slate", null, 1, 1, 1);
|
||||||
when(envRepo.findById(ENV_ID)).thenReturn(Optional.of(env));
|
when(envRepo.findById(ENV_ID)).thenReturn(Optional.of(env));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ class RouteMetricEvaluatorTest {
|
|||||||
envRepo = mock(EnvironmentRepository.class);
|
envRepo = mock(EnvironmentRepository.class);
|
||||||
eval = new RouteMetricEvaluator(statsStore, envRepo);
|
eval = new RouteMetricEvaluator(statsStore, envRepo);
|
||||||
|
|
||||||
var env = new Environment(ENV_ID, "prod", "Production", false, true, null, null, "slate", null);
|
var env = new Environment(ENV_ID, "prod", "Production", false, true, null, null, "slate", null, 1, 1, 1);
|
||||||
when(envRepo.findById(ENV_ID)).thenReturn(Optional.of(env));
|
when(envRepo.findById(ENV_ID)).thenReturn(Optional.of(env));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ class NotificationContextBuilderTest {
|
|||||||
// ---- helpers ----
|
// ---- helpers ----
|
||||||
|
|
||||||
private Environment env() {
|
private Environment env() {
|
||||||
return new Environment(ENV_ID, "prod", "Production", true, true, Map.of(), 5, "slate", Instant.EPOCH);
|
return new Environment(ENV_ID, "prod", "Production", true, true, Map.of(), 5, "slate", Instant.EPOCH, 1, 1, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
private AlertRule rule(ConditionKind kind) {
|
private AlertRule rule(ConditionKind kind) {
|
||||||
|
|||||||
@@ -115,6 +115,91 @@ class SchemaBootstrapIT extends AbstractPostgresIT {
|
|||||||
assertThat(isUnique).isTrue();
|
assertThat(isUnique).isTrue();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void licenseTableExists() {
|
||||||
|
// V5 migration: per-tenant license row, PK on tenant_id (one server = one tenant).
|
||||||
|
var rows = jdbcTemplate.queryForList("""
|
||||||
|
SELECT column_name, data_type, is_nullable
|
||||||
|
FROM information_schema.columns
|
||||||
|
WHERE table_name = 'license'
|
||||||
|
AND table_schema = current_schema()
|
||||||
|
""");
|
||||||
|
var byName = new java.util.HashMap<String, java.util.Map<String, Object>>();
|
||||||
|
for (var row : rows) {
|
||||||
|
byName.put((String) row.get("column_name"), row);
|
||||||
|
}
|
||||||
|
|
||||||
|
assertThat(byName).containsKeys(
|
||||||
|
"tenant_id", "license_id", "token", "installed_at",
|
||||||
|
"installed_by", "expires_at", "last_validated_at");
|
||||||
|
|
||||||
|
assertThat(byName.get("tenant_id").get("data_type")).isEqualTo("text");
|
||||||
|
assertThat(byName.get("tenant_id").get("is_nullable")).isEqualTo("NO");
|
||||||
|
|
||||||
|
assertThat(byName.get("license_id").get("data_type")).isEqualTo("uuid");
|
||||||
|
assertThat(byName.get("license_id").get("is_nullable")).isEqualTo("NO");
|
||||||
|
|
||||||
|
assertThat(byName.get("token").get("data_type")).isEqualTo("text");
|
||||||
|
assertThat(byName.get("token").get("is_nullable")).isEqualTo("NO");
|
||||||
|
|
||||||
|
assertThat(byName.get("installed_at").get("data_type"))
|
||||||
|
.isEqualTo("timestamp with time zone");
|
||||||
|
assertThat(byName.get("installed_at").get("is_nullable")).isEqualTo("NO");
|
||||||
|
|
||||||
|
assertThat(byName.get("installed_by").get("data_type")).isEqualTo("text");
|
||||||
|
assertThat(byName.get("installed_by").get("is_nullable")).isEqualTo("NO");
|
||||||
|
|
||||||
|
assertThat(byName.get("expires_at").get("data_type"))
|
||||||
|
.isEqualTo("timestamp with time zone");
|
||||||
|
assertThat(byName.get("expires_at").get("is_nullable")).isEqualTo("NO");
|
||||||
|
|
||||||
|
assertThat(byName.get("last_validated_at").get("data_type"))
|
||||||
|
.isEqualTo("timestamp with time zone");
|
||||||
|
assertThat(byName.get("last_validated_at").get("is_nullable")).isEqualTo("NO");
|
||||||
|
|
||||||
|
// PK: tenant_id (one row per tenant).
|
||||||
|
var pkCols = jdbcTemplate.queryForList("""
|
||||||
|
SELECT a.attname AS column_name
|
||||||
|
FROM pg_index i
|
||||||
|
JOIN pg_class c ON c.oid = i.indrelid
|
||||||
|
JOIN pg_namespace n ON n.oid = c.relnamespace
|
||||||
|
JOIN pg_attribute a ON a.attrelid = c.oid AND a.attnum = ANY(i.indkey)
|
||||||
|
WHERE c.relname = 'license'
|
||||||
|
AND n.nspname = current_schema()
|
||||||
|
AND i.indisprimary
|
||||||
|
""", String.class);
|
||||||
|
assertThat(pkCols).containsExactly("tenant_id");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void environmentsHasRetentionColumns() {
|
||||||
|
// V5 migration adds three retention day columns, NOT NULL DEFAULT 1.
|
||||||
|
var rows = jdbcTemplate.queryForList("""
|
||||||
|
SELECT column_name, data_type, is_nullable, column_default
|
||||||
|
FROM information_schema.columns
|
||||||
|
WHERE table_name = 'environments'
|
||||||
|
AND table_schema = current_schema()
|
||||||
|
AND column_name IN
|
||||||
|
('execution_retention_days','log_retention_days','metric_retention_days')
|
||||||
|
""");
|
||||||
|
var byName = new java.util.HashMap<String, java.util.Map<String, Object>>();
|
||||||
|
for (var row : rows) {
|
||||||
|
byName.put((String) row.get("column_name"), row);
|
||||||
|
}
|
||||||
|
assertThat(byName).containsKeys(
|
||||||
|
"execution_retention_days", "log_retention_days", "metric_retention_days");
|
||||||
|
|
||||||
|
for (var col : java.util.List.of(
|
||||||
|
"execution_retention_days", "log_retention_days", "metric_retention_days")) {
|
||||||
|
assertThat(byName.get(col).get("data_type"))
|
||||||
|
.as("%s data_type", col).isEqualTo("integer");
|
||||||
|
assertThat(byName.get(col).get("is_nullable"))
|
||||||
|
.as("%s is_nullable", col).isEqualTo("NO");
|
||||||
|
assertThat((String) byName.get(col).get("column_default"))
|
||||||
|
.as("%s column_default", col).isEqualTo("1");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
void deleting_environment_cascades_alerting_rows() {
|
void deleting_environment_cascades_alerting_rows() {
|
||||||
testEnvId = UUID.randomUUID();
|
testEnvId = UUID.randomUUID();
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import com.cameleer.server.app.AbstractPostgresIT;
|
|||||||
import com.cameleer.server.app.TestSecurityHelper;
|
import com.cameleer.server.app.TestSecurityHelper;
|
||||||
import com.fasterxml.jackson.databind.JsonNode;
|
import com.fasterxml.jackson.databind.JsonNode;
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
import org.junit.jupiter.api.AfterEach;
|
||||||
import org.junit.jupiter.api.BeforeEach;
|
import org.junit.jupiter.api.BeforeEach;
|
||||||
import org.junit.jupiter.api.Test;
|
import org.junit.jupiter.api.Test;
|
||||||
import org.springframework.beans.factory.annotation.Autowired;
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
@@ -13,6 +14,7 @@ import org.springframework.http.HttpMethod;
|
|||||||
import org.springframework.http.HttpStatus;
|
import org.springframework.http.HttpStatus;
|
||||||
import org.springframework.http.ResponseEntity;
|
import org.springframework.http.ResponseEntity;
|
||||||
|
|
||||||
|
import java.util.Map;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
|
|
||||||
import static org.assertj.core.api.Assertions.assertThat;
|
import static org.assertj.core.api.Assertions.assertThat;
|
||||||
@@ -33,10 +35,18 @@ class AgentCommandControllerIT extends AbstractPostgresIT {
|
|||||||
|
|
||||||
@BeforeEach
|
@BeforeEach
|
||||||
void setUp() {
|
void setUp() {
|
||||||
|
// Lift max_agents cap so this IT (which registers many agents per test) isn't gated
|
||||||
|
// by license enforcement. Cap behaviour itself is exercised by AgentCapEnforcementIT.
|
||||||
|
securityHelper.installSyntheticUnsignedLicense(Map.of("max_agents", 100));
|
||||||
agentJwt = securityHelper.registerTestAgent("test-agent-command-it");
|
agentJwt = securityHelper.registerTestAgent("test-agent-command-it");
|
||||||
operatorJwt = securityHelper.operatorToken();
|
operatorJwt = securityHelper.operatorToken();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@AfterEach
|
||||||
|
void tearDown() {
|
||||||
|
securityHelper.clearTestLicense();
|
||||||
|
}
|
||||||
|
|
||||||
private ResponseEntity<String> registerAgent(String agentId, String name, String application) {
|
private ResponseEntity<String> registerAgent(String agentId, String name, String application) {
|
||||||
String json = """
|
String json = """
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import com.cameleer.server.app.AbstractPostgresIT;
|
|||||||
import com.cameleer.server.app.TestSecurityHelper;
|
import com.cameleer.server.app.TestSecurityHelper;
|
||||||
import com.fasterxml.jackson.databind.JsonNode;
|
import com.fasterxml.jackson.databind.JsonNode;
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
import org.junit.jupiter.api.AfterEach;
|
||||||
import org.junit.jupiter.api.BeforeEach;
|
import org.junit.jupiter.api.BeforeEach;
|
||||||
import org.junit.jupiter.api.Test;
|
import org.junit.jupiter.api.Test;
|
||||||
import org.springframework.beans.factory.annotation.Autowired;
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
@@ -13,6 +14,8 @@ import org.springframework.http.HttpMethod;
|
|||||||
import org.springframework.http.HttpStatus;
|
import org.springframework.http.HttpStatus;
|
||||||
import org.springframework.http.ResponseEntity;
|
import org.springframework.http.ResponseEntity;
|
||||||
|
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
import static org.assertj.core.api.Assertions.assertThat;
|
import static org.assertj.core.api.Assertions.assertThat;
|
||||||
|
|
||||||
class AgentRegistrationControllerIT extends AbstractPostgresIT {
|
class AgentRegistrationControllerIT extends AbstractPostgresIT {
|
||||||
@@ -31,10 +34,18 @@ class AgentRegistrationControllerIT extends AbstractPostgresIT {
|
|||||||
|
|
||||||
@BeforeEach
|
@BeforeEach
|
||||||
void setUp() {
|
void setUp() {
|
||||||
|
// Lift max_agents cap so this IT (which registers many agents per test) isn't gated
|
||||||
|
// by license enforcement. Cap behaviour itself is exercised by AgentCapEnforcementIT.
|
||||||
|
securityHelper.installSyntheticUnsignedLicense(Map.of("max_agents", 100));
|
||||||
jwt = securityHelper.registerTestAgent("test-agent-registration-it");
|
jwt = securityHelper.registerTestAgent("test-agent-registration-it");
|
||||||
viewerJwt = securityHelper.viewerToken();
|
viewerJwt = securityHelper.viewerToken();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@AfterEach
|
||||||
|
void tearDown() {
|
||||||
|
securityHelper.clearTestLicense();
|
||||||
|
}
|
||||||
|
|
||||||
private ResponseEntity<String> registerAgent(String agentId, String name) {
|
private ResponseEntity<String> registerAgent(String agentId, String name) {
|
||||||
String json = """
|
String json = """
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package com.cameleer.server.app.controller;
|
|||||||
import com.cameleer.server.app.AbstractPostgresIT;
|
import com.cameleer.server.app.AbstractPostgresIT;
|
||||||
import com.cameleer.server.app.TestSecurityHelper;
|
import com.cameleer.server.app.TestSecurityHelper;
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
import org.junit.jupiter.api.AfterEach;
|
||||||
import org.junit.jupiter.api.BeforeEach;
|
import org.junit.jupiter.api.BeforeEach;
|
||||||
import org.junit.jupiter.api.Test;
|
import org.junit.jupiter.api.Test;
|
||||||
import org.springframework.beans.factory.annotation.Autowired;
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
@@ -20,6 +21,7 @@ import java.net.http.HttpResponse;
|
|||||||
import java.time.Duration;
|
import java.time.Duration;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
import java.util.concurrent.CompletableFuture;
|
import java.util.concurrent.CompletableFuture;
|
||||||
import java.util.concurrent.CountDownLatch;
|
import java.util.concurrent.CountDownLatch;
|
||||||
@@ -48,10 +50,18 @@ class AgentSseControllerIT extends AbstractPostgresIT {
|
|||||||
|
|
||||||
@BeforeEach
|
@BeforeEach
|
||||||
void setUp() {
|
void setUp() {
|
||||||
|
// Lift max_agents cap so this IT (which registers many agents per test) isn't gated
|
||||||
|
// by license enforcement. Cap behaviour itself is exercised by AgentCapEnforcementIT.
|
||||||
|
securityHelper.installSyntheticUnsignedLicense(Map.of("max_agents", 100));
|
||||||
jwt = securityHelper.registerTestAgent("test-agent-sse-it");
|
jwt = securityHelper.registerTestAgent("test-agent-sse-it");
|
||||||
operatorJwt = securityHelper.operatorToken();
|
operatorJwt = securityHelper.operatorToken();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@AfterEach
|
||||||
|
void tearDown() {
|
||||||
|
securityHelper.clearTestLicense();
|
||||||
|
}
|
||||||
|
|
||||||
private ResponseEntity<String> registerAgent(String agentId, String name, String application) {
|
private ResponseEntity<String> registerAgent(String agentId, String name, String application) {
|
||||||
String json = """
|
String json = """
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -0,0 +1,96 @@
|
|||||||
|
package com.cameleer.server.app.controller;
|
||||||
|
|
||||||
|
import com.cameleer.server.app.AbstractPostgresIT;
|
||||||
|
import com.cameleer.server.app.dto.AuthCapabilitiesResponse;
|
||||||
|
import com.cameleer.server.core.security.OidcConfig;
|
||||||
|
import com.cameleer.server.core.security.OidcConfigRepository;
|
||||||
|
import org.junit.jupiter.api.BeforeEach;
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
|
import org.springframework.boot.test.mock.mockito.MockBean;
|
||||||
|
import org.springframework.boot.test.web.client.TestRestTemplate;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Optional;
|
||||||
|
|
||||||
|
import static org.assertj.core.api.Assertions.assertThat;
|
||||||
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Integration tests for {@link com.cameleer.server.app.security.AuthCapabilitiesController}.
|
||||||
|
* Mocks {@link OidcConfigRepository} so each test controls the OIDC state it observes.
|
||||||
|
*/
|
||||||
|
class AuthCapabilitiesControllerIT extends AbstractPostgresIT {
|
||||||
|
|
||||||
|
@Autowired private TestRestTemplate restTemplate;
|
||||||
|
@MockBean private OidcConfigRepository oidcConfigRepository;
|
||||||
|
|
||||||
|
@BeforeEach
|
||||||
|
void resetMock() {
|
||||||
|
when(oidcConfigRepository.find()).thenReturn(Optional.empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void noOidcConfig_returnsLocalOnlyCaps() {
|
||||||
|
var resp = restTemplate.getForEntity("/api/v1/auth/capabilities", AuthCapabilitiesResponse.class);
|
||||||
|
|
||||||
|
assertThat(resp.getStatusCode().value()).isEqualTo(200);
|
||||||
|
assertThat(resp.getBody()).isNotNull();
|
||||||
|
assertThat(resp.getBody().oidc().enabled()).isFalse();
|
||||||
|
assertThat(resp.getBody().oidc().providerName()).isEqualTo("");
|
||||||
|
assertThat(resp.getBody().oidc().primary()).isFalse();
|
||||||
|
assertThat(resp.getBody().localAccounts().enabled()).isTrue();
|
||||||
|
assertThat(resp.getBody().localAccounts().adminRecoveryOnly()).isFalse();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void oidcDisabledRow_behavesLikeAbsent() {
|
||||||
|
OidcConfig disabled = new OidcConfig(false, "https://auth.logto.example/", "client-id", "secret",
|
||||||
|
"roles", List.of("VIEWER"), true, "name", "sub", "", List.of());
|
||||||
|
when(oidcConfigRepository.find()).thenReturn(Optional.of(disabled));
|
||||||
|
|
||||||
|
var resp = restTemplate.getForEntity("/api/v1/auth/capabilities", AuthCapabilitiesResponse.class);
|
||||||
|
|
||||||
|
assertThat(resp.getStatusCode().value()).isEqualTo(200);
|
||||||
|
assertThat(resp.getBody().oidc().enabled()).isFalse();
|
||||||
|
assertThat(resp.getBody().oidc().providerName()).isEqualTo("");
|
||||||
|
assertThat(resp.getBody().oidc().primary()).isFalse();
|
||||||
|
assertThat(resp.getBody().localAccounts().adminRecoveryOnly()).isFalse();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void oidcEnabledLogto_returnsOidcPrimaryWithProviderName() {
|
||||||
|
OidcConfig enabled = new OidcConfig(true, "https://auth.logto.example/", "client-id", "secret",
|
||||||
|
"roles", List.of("VIEWER"), true, "name", "sub", "", List.of());
|
||||||
|
when(oidcConfigRepository.find()).thenReturn(Optional.of(enabled));
|
||||||
|
|
||||||
|
var resp = restTemplate.getForEntity("/api/v1/auth/capabilities", AuthCapabilitiesResponse.class);
|
||||||
|
|
||||||
|
assertThat(resp.getStatusCode().value()).isEqualTo(200);
|
||||||
|
assertThat(resp.getBody().oidc().enabled()).isTrue();
|
||||||
|
assertThat(resp.getBody().oidc().providerName()).isEqualTo("Logto");
|
||||||
|
assertThat(resp.getBody().oidc().primary()).isTrue();
|
||||||
|
assertThat(resp.getBody().localAccounts().enabled()).isTrue();
|
||||||
|
assertThat(resp.getBody().localAccounts().adminRecoveryOnly()).isTrue();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void oidcEnabledUnknownProvider_returnsGenericProviderName() {
|
||||||
|
OidcConfig enabled = new OidcConfig(true, "https://idp.example.com/", "client-id", "secret",
|
||||||
|
"roles", List.of("VIEWER"), true, "name", "sub", "", List.of());
|
||||||
|
when(oidcConfigRepository.find()).thenReturn(Optional.of(enabled));
|
||||||
|
|
||||||
|
var resp = restTemplate.getForEntity("/api/v1/auth/capabilities", AuthCapabilitiesResponse.class);
|
||||||
|
|
||||||
|
assertThat(resp.getStatusCode().value()).isEqualTo(200);
|
||||||
|
assertThat(resp.getBody().oidc().providerName()).isEqualTo("Single Sign-On");
|
||||||
|
assertThat(resp.getBody().oidc().primary()).isTrue();
|
||||||
|
assertThat(resp.getBody().localAccounts().adminRecoveryOnly()).isTrue();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void endpointIsUnauthenticated() {
|
||||||
|
var resp = restTemplate.getForEntity("/api/v1/auth/capabilities", String.class);
|
||||||
|
assertThat(resp.getStatusCode().value()).isEqualTo(200);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -3,6 +3,7 @@ package com.cameleer.server.app.controller;
|
|||||||
import com.cameleer.server.app.AbstractPostgresIT;
|
import com.cameleer.server.app.AbstractPostgresIT;
|
||||||
import com.cameleer.server.app.TestSecurityHelper;
|
import com.cameleer.server.app.TestSecurityHelper;
|
||||||
import com.cameleer.server.core.ingestion.IngestionService;
|
import com.cameleer.server.core.ingestion.IngestionService;
|
||||||
|
import org.junit.jupiter.api.AfterEach;
|
||||||
import org.junit.jupiter.api.BeforeEach;
|
import org.junit.jupiter.api.BeforeEach;
|
||||||
import org.junit.jupiter.api.Test;
|
import org.junit.jupiter.api.Test;
|
||||||
import org.springframework.beans.factory.annotation.Autowired;
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
@@ -13,6 +14,8 @@ import org.springframework.http.HttpStatus;
|
|||||||
import org.springframework.http.ResponseEntity;
|
import org.springframework.http.ResponseEntity;
|
||||||
import org.springframework.test.context.TestPropertySource;
|
import org.springframework.test.context.TestPropertySource;
|
||||||
|
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
import static org.assertj.core.api.Assertions.assertThat;
|
import static org.assertj.core.api.Assertions.assertThat;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -45,10 +48,18 @@ class BackpressureIT extends AbstractPostgresIT {
|
|||||||
|
|
||||||
@BeforeEach
|
@BeforeEach
|
||||||
void setUp() {
|
void setUp() {
|
||||||
|
// Lift max_agents cap so this IT (which registers an agent) isn't gated by license
|
||||||
|
// enforcement. Cap behaviour itself is exercised by AgentCapEnforcementIT.
|
||||||
|
securityHelper.installSyntheticUnsignedLicense(Map.of("max_agents", 100));
|
||||||
String jwt = securityHelper.registerTestAgent("test-agent-backpressure-it");
|
String jwt = securityHelper.registerTestAgent("test-agent-backpressure-it");
|
||||||
authHeaders = securityHelper.authHeaders(jwt);
|
authHeaders = securityHelper.authHeaders(jwt);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@AfterEach
|
||||||
|
void tearDown() {
|
||||||
|
securityHelper.clearTestLicense();
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
void whenMetricsBufferFull_returns503WithRetryAfter() {
|
void whenMetricsBufferFull_returns503WithRetryAfter() {
|
||||||
// Fill the metrics buffer completely with a batch of 5
|
// Fill the metrics buffer completely with a batch of 5
|
||||||
|
|||||||
@@ -46,6 +46,16 @@ class DeploymentControllerAuditIT extends AbstractPostgresIT {
|
|||||||
aliceJwt = securityHelper.createToken("user:alice", "user", List.of("OPERATOR"));
|
aliceJwt = securityHelper.createToken("user:alice", "user", List.of("OPERATOR"));
|
||||||
adminJwt = securityHelper.adminToken();
|
adminJwt = securityHelper.adminToken();
|
||||||
|
|
||||||
|
// Lift default-tier caps so the promote-target env + apps can be created via the API,
|
||||||
|
// and lift compute caps so the async DeploymentExecutor PRE_FLIGHT cap check (T24)
|
||||||
|
// doesn't fail the deployment before audit assertions complete on long-running runs.
|
||||||
|
securityHelper.installSyntheticUnsignedLicense(Map.of(
|
||||||
|
"max_environments", 100,
|
||||||
|
"max_apps", 100,
|
||||||
|
"max_total_cpu_millis", 100_000,
|
||||||
|
"max_total_memory_mb", 100_000,
|
||||||
|
"max_total_replicas", 100));
|
||||||
|
|
||||||
// Clean up deployment-related tables and test-created environments
|
// Clean up deployment-related tables and test-created environments
|
||||||
jdbcTemplate.update("DELETE FROM deployments");
|
jdbcTemplate.update("DELETE FROM deployments");
|
||||||
jdbcTemplate.update("DELETE FROM app_versions");
|
jdbcTemplate.update("DELETE FROM app_versions");
|
||||||
@@ -90,6 +100,11 @@ class DeploymentControllerAuditIT extends AbstractPostgresIT {
|
|||||||
versionId = objectMapper.readTree(versionResponse.getBody()).path("id").asText();
|
versionId = objectMapper.readTree(versionResponse.getBody()).path("id").asText();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@org.junit.jupiter.api.AfterEach
|
||||||
|
void tearDown() {
|
||||||
|
securityHelper.clearTestLicense();
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
void deploy_writes_audit_row_with_DEPLOYMENT_category_and_alice_actor() throws Exception {
|
void deploy_writes_audit_row_with_DEPLOYMENT_category_and_alice_actor() throws Exception {
|
||||||
String json = String.format("""
|
String json = String.format("""
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import com.cameleer.server.app.AbstractPostgresIT;
|
|||||||
import com.cameleer.server.app.TestSecurityHelper;
|
import com.cameleer.server.app.TestSecurityHelper;
|
||||||
import com.fasterxml.jackson.databind.JsonNode;
|
import com.fasterxml.jackson.databind.JsonNode;
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
import org.junit.jupiter.api.AfterAll;
|
||||||
import org.junit.jupiter.api.BeforeAll;
|
import org.junit.jupiter.api.BeforeAll;
|
||||||
import org.junit.jupiter.api.Test;
|
import org.junit.jupiter.api.Test;
|
||||||
import org.junit.jupiter.api.TestInstance;
|
import org.junit.jupiter.api.TestInstance;
|
||||||
@@ -15,6 +16,8 @@ import org.springframework.http.HttpMethod;
|
|||||||
import org.springframework.http.HttpStatus;
|
import org.springframework.http.HttpStatus;
|
||||||
import org.springframework.http.ResponseEntity;
|
import org.springframework.http.ResponseEntity;
|
||||||
|
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
import static java.util.concurrent.TimeUnit.SECONDS;
|
import static java.util.concurrent.TimeUnit.SECONDS;
|
||||||
import static org.assertj.core.api.Assertions.assertThat;
|
import static org.assertj.core.api.Assertions.assertThat;
|
||||||
import static org.awaitility.Awaitility.await;
|
import static org.awaitility.Awaitility.await;
|
||||||
@@ -49,6 +52,9 @@ class DetailControllerIT extends AbstractPostgresIT {
|
|||||||
*/
|
*/
|
||||||
@BeforeAll
|
@BeforeAll
|
||||||
void seedTestData() {
|
void seedTestData() {
|
||||||
|
// Lift max_agents cap so this IT (which registers an agent) isn't gated by license
|
||||||
|
// enforcement. Cap behaviour itself is exercised by AgentCapEnforcementIT.
|
||||||
|
securityHelper.installSyntheticUnsignedLicense(Map.of("max_agents", 100));
|
||||||
jwt = securityHelper.registerTestAgent("test-agent-detail-it");
|
jwt = securityHelper.registerTestAgent("test-agent-detail-it");
|
||||||
viewerJwt = securityHelper.viewerToken();
|
viewerJwt = securityHelper.viewerToken();
|
||||||
|
|
||||||
@@ -231,4 +237,9 @@ class DetailControllerIT extends AbstractPostgresIT {
|
|||||||
new HttpEntity<>(headers),
|
new HttpEntity<>(headers),
|
||||||
String.class);
|
String.class);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@AfterAll
|
||||||
|
void tearDown() {
|
||||||
|
securityHelper.clearTestLicense();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package com.cameleer.server.app.controller;
|
|||||||
|
|
||||||
import com.cameleer.server.app.AbstractPostgresIT;
|
import com.cameleer.server.app.AbstractPostgresIT;
|
||||||
import com.cameleer.server.app.TestSecurityHelper;
|
import com.cameleer.server.app.TestSecurityHelper;
|
||||||
|
import org.junit.jupiter.api.AfterEach;
|
||||||
import org.junit.jupiter.api.BeforeEach;
|
import org.junit.jupiter.api.BeforeEach;
|
||||||
import org.junit.jupiter.api.Test;
|
import org.junit.jupiter.api.Test;
|
||||||
import org.springframework.beans.factory.annotation.Autowired;
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
@@ -12,6 +13,8 @@ import org.springframework.http.HttpMethod;
|
|||||||
import org.springframework.http.HttpStatus;
|
import org.springframework.http.HttpStatus;
|
||||||
import org.springframework.http.ResponseEntity;
|
import org.springframework.http.ResponseEntity;
|
||||||
|
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
import static java.util.concurrent.TimeUnit.SECONDS;
|
import static java.util.concurrent.TimeUnit.SECONDS;
|
||||||
import static org.assertj.core.api.Assertions.assertThat;
|
import static org.assertj.core.api.Assertions.assertThat;
|
||||||
import static org.awaitility.Awaitility.await;
|
import static org.awaitility.Awaitility.await;
|
||||||
@@ -29,11 +32,19 @@ class DiagramControllerIT extends AbstractPostgresIT {
|
|||||||
|
|
||||||
@BeforeEach
|
@BeforeEach
|
||||||
void setUp() {
|
void setUp() {
|
||||||
|
// Lift max_agents cap so this IT (which registers an agent) isn't gated by license
|
||||||
|
// enforcement. Cap behaviour itself is exercised by AgentCapEnforcementIT.
|
||||||
|
securityHelper.installSyntheticUnsignedLicense(Map.of("max_agents", 100));
|
||||||
String jwt = securityHelper.registerTestAgent("test-agent-diagram-it");
|
String jwt = securityHelper.registerTestAgent("test-agent-diagram-it");
|
||||||
authHeaders = securityHelper.authHeaders(jwt);
|
authHeaders = securityHelper.authHeaders(jwt);
|
||||||
viewerHeaders = securityHelper.authHeadersNoBody(securityHelper.viewerToken());
|
viewerHeaders = securityHelper.authHeadersNoBody(securityHelper.viewerToken());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@AfterEach
|
||||||
|
void tearDown() {
|
||||||
|
securityHelper.clearTestLicense();
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
void postSingleDiagram_returns202() {
|
void postSingleDiagram_returns202() {
|
||||||
String json = """
|
String json = """
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import com.cameleer.server.app.AbstractPostgresIT;
|
|||||||
import com.cameleer.server.app.TestSecurityHelper;
|
import com.cameleer.server.app.TestSecurityHelper;
|
||||||
import com.fasterxml.jackson.databind.JsonNode;
|
import com.fasterxml.jackson.databind.JsonNode;
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
import org.junit.jupiter.api.AfterEach;
|
||||||
import org.junit.jupiter.api.BeforeEach;
|
import org.junit.jupiter.api.BeforeEach;
|
||||||
import org.junit.jupiter.api.Test;
|
import org.junit.jupiter.api.Test;
|
||||||
import org.springframework.beans.factory.annotation.Autowired;
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
@@ -14,6 +15,8 @@ import org.springframework.http.HttpMethod;
|
|||||||
import org.springframework.http.HttpStatus;
|
import org.springframework.http.HttpStatus;
|
||||||
import org.springframework.http.ResponseEntity;
|
import org.springframework.http.ResponseEntity;
|
||||||
|
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
import static java.util.concurrent.TimeUnit.SECONDS;
|
import static java.util.concurrent.TimeUnit.SECONDS;
|
||||||
import static org.assertj.core.api.Assertions.assertThat;
|
import static org.assertj.core.api.Assertions.assertThat;
|
||||||
import static org.awaitility.Awaitility.await;
|
import static org.awaitility.Awaitility.await;
|
||||||
@@ -41,6 +44,9 @@ class DiagramRenderControllerIT extends AbstractPostgresIT {
|
|||||||
|
|
||||||
@BeforeEach
|
@BeforeEach
|
||||||
void seedDiagram() {
|
void seedDiagram() {
|
||||||
|
// Lift max_agents cap so this IT (which registers an agent) isn't gated by license
|
||||||
|
// enforcement. Cap behaviour itself is exercised by AgentCapEnforcementIT.
|
||||||
|
securityHelper.installSyntheticUnsignedLicense(Map.of("max_agents", 100));
|
||||||
jwt = securityHelper.registerTestAgent("test-agent-diagram-render-it");
|
jwt = securityHelper.registerTestAgent("test-agent-diagram-render-it");
|
||||||
viewerJwt = securityHelper.viewerToken();
|
viewerJwt = securityHelper.viewerToken();
|
||||||
|
|
||||||
@@ -115,6 +121,11 @@ class DiagramRenderControllerIT extends AbstractPostgresIT {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@AfterEach
|
||||||
|
void tearDown() {
|
||||||
|
securityHelper.clearTestLicense();
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
void getSvg_withAcceptHeader_returnsSvg() {
|
void getSvg_withAcceptHeader_returnsSvg() {
|
||||||
HttpHeaders headers = securityHelper.authHeadersNoBody(viewerJwt);
|
HttpHeaders headers = securityHelper.authHeadersNoBody(viewerJwt);
|
||||||
@@ -166,6 +177,157 @@ class DiagramRenderControllerIT extends AbstractPostgresIT {
|
|||||||
assertThat(response.getStatusCode()).isEqualTo(HttpStatus.NOT_FOUND);
|
assertThat(response.getStatusCode()).isEqualTo(HttpStatus.NOT_FOUND);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void findByAppAndRoute_returnsLatestDiagram_noLiveAgentPrereq() {
|
||||||
|
// The env-scoped /routes/{routeId}/diagram endpoint no longer depends
|
||||||
|
// on the agent registry — routes whose publishing agents have been
|
||||||
|
// removed must still resolve. The seed step stored a diagram for
|
||||||
|
// route "render-test-route" under app "test-group" / env "default",
|
||||||
|
// so the same lookup must succeed even though the registry-driven
|
||||||
|
// "find agents for app" path used to be a hard 404 prerequisite.
|
||||||
|
HttpHeaders headers = securityHelper.authHeadersNoBody(viewerJwt);
|
||||||
|
headers.set("Accept", "application/json");
|
||||||
|
|
||||||
|
ResponseEntity<String> response = restTemplate.exchange(
|
||||||
|
"/api/v1/environments/default/apps/test-group/routes/render-test-route/diagram",
|
||||||
|
HttpMethod.GET,
|
||||||
|
new HttpEntity<>(headers),
|
||||||
|
String.class);
|
||||||
|
|
||||||
|
assertThat(response.getStatusCode()).isEqualTo(HttpStatus.OK);
|
||||||
|
assertThat(response.getBody()).contains("nodes");
|
||||||
|
assertThat(response.getBody()).contains("edges");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void findByAppAndRoute_returns404ForUnknownRoute() {
|
||||||
|
HttpHeaders headers = securityHelper.authHeadersNoBody(viewerJwt);
|
||||||
|
headers.set("Accept", "application/json");
|
||||||
|
|
||||||
|
ResponseEntity<String> response = restTemplate.exchange(
|
||||||
|
"/api/v1/environments/default/apps/test-group/routes/nonexistent-route/diagram",
|
||||||
|
HttpMethod.GET,
|
||||||
|
new HttpEntity<>(headers),
|
||||||
|
String.class);
|
||||||
|
|
||||||
|
assertThat(response.getStatusCode()).isEqualTo(HttpStatus.NOT_FOUND);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void exchangeDiagramHash_pinsPointInTimeEvenAfterNewerVersion() throws Exception {
|
||||||
|
// Point-in-time guarantee: an execution's stored diagramContentHash
|
||||||
|
// must keep resolving to the route shape captured at execution time,
|
||||||
|
// even after a newer diagram version for the same route is stored.
|
||||||
|
// Content-hash addressing + never-delete of route_diagrams makes this
|
||||||
|
// automatic — this test locks the invariant in.
|
||||||
|
HttpHeaders viewerHeaders = securityHelper.authHeadersNoBody(viewerJwt);
|
||||||
|
viewerHeaders.set("Accept", "application/json");
|
||||||
|
|
||||||
|
// Snapshot the pinned v1 render via the flat content-hash endpoint
|
||||||
|
// BEFORE a newer version is stored, so the post-v2 fetch can compare
|
||||||
|
// byte-for-byte.
|
||||||
|
ResponseEntity<String> pinnedBefore = restTemplate.exchange(
|
||||||
|
"/api/v1/diagrams/{hash}/render",
|
||||||
|
HttpMethod.GET,
|
||||||
|
new HttpEntity<>(viewerHeaders),
|
||||||
|
String.class,
|
||||||
|
contentHash);
|
||||||
|
assertThat(pinnedBefore.getStatusCode()).isEqualTo(HttpStatus.OK);
|
||||||
|
|
||||||
|
// Also snapshot the by-route "latest" render for the same route.
|
||||||
|
ResponseEntity<String> latestBefore = restTemplate.exchange(
|
||||||
|
"/api/v1/environments/default/apps/test-group/routes/render-test-route/diagram",
|
||||||
|
HttpMethod.GET,
|
||||||
|
new HttpEntity<>(viewerHeaders),
|
||||||
|
String.class);
|
||||||
|
assertThat(latestBefore.getStatusCode()).isEqualTo(HttpStatus.OK);
|
||||||
|
|
||||||
|
// Store a materially different v2 for the same (app, env, route).
|
||||||
|
// The renderer walks the `root` tree (not the legacy flat `nodes`
|
||||||
|
// list that the seed payload uses), so v2 uses the tree shape and
|
||||||
|
// will render non-empty output — letting us detect the version flip.
|
||||||
|
String newerDiagramJson = """
|
||||||
|
{
|
||||||
|
"routeId": "render-test-route",
|
||||||
|
"description": "v2 with extra step",
|
||||||
|
"version": 2,
|
||||||
|
"root": {
|
||||||
|
"id": "n1",
|
||||||
|
"type": "ENDPOINT",
|
||||||
|
"label": "timer:tick-v2",
|
||||||
|
"children": [
|
||||||
|
{
|
||||||
|
"id": "n2",
|
||||||
|
"type": "BEAN",
|
||||||
|
"label": "myBeanV2",
|
||||||
|
"children": [
|
||||||
|
{
|
||||||
|
"id": "n3",
|
||||||
|
"type": "TO",
|
||||||
|
"label": "log:out-v2",
|
||||||
|
"children": [
|
||||||
|
{"id": "n4", "type": "TO", "label": "log:audit"}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"edges": [
|
||||||
|
{"source": "n1", "target": "n2", "edgeType": "FLOW"},
|
||||||
|
{"source": "n2", "target": "n3", "edgeType": "FLOW"},
|
||||||
|
{"source": "n3", "target": "n4", "edgeType": "FLOW"}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
""";
|
||||||
|
restTemplate.postForEntity(
|
||||||
|
"/api/v1/data/diagrams",
|
||||||
|
new HttpEntity<>(newerDiagramJson, securityHelper.authHeaders(jwt)),
|
||||||
|
String.class);
|
||||||
|
|
||||||
|
// Invariant 1: The execution's stored diagramContentHash must not
|
||||||
|
// drift — exchanges stay pinned to the version captured at ingest.
|
||||||
|
ResponseEntity<String> detailAfter = restTemplate.exchange(
|
||||||
|
"/api/v1/environments/default/executions?correlationId=render-probe-corr",
|
||||||
|
HttpMethod.GET,
|
||||||
|
new HttpEntity<>(viewerHeaders),
|
||||||
|
String.class);
|
||||||
|
JsonNode search = objectMapper.readTree(detailAfter.getBody());
|
||||||
|
String execId = search.get("data").get(0).get("executionId").asText();
|
||||||
|
ResponseEntity<String> exec = restTemplate.exchange(
|
||||||
|
"/api/v1/executions/" + execId,
|
||||||
|
HttpMethod.GET,
|
||||||
|
new HttpEntity<>(viewerHeaders),
|
||||||
|
String.class);
|
||||||
|
JsonNode execBody = objectMapper.readTree(exec.getBody());
|
||||||
|
assertThat(execBody.path("diagramContentHash").asText()).isEqualTo(contentHash);
|
||||||
|
|
||||||
|
// Invariant 2: The pinned render (by H1) must be byte-identical
|
||||||
|
// before and after v2 is stored — content-hash addressing is stable.
|
||||||
|
ResponseEntity<String> pinnedAfter = restTemplate.exchange(
|
||||||
|
"/api/v1/diagrams/{hash}/render",
|
||||||
|
HttpMethod.GET,
|
||||||
|
new HttpEntity<>(viewerHeaders),
|
||||||
|
String.class,
|
||||||
|
contentHash);
|
||||||
|
assertThat(pinnedAfter.getStatusCode()).isEqualTo(HttpStatus.OK);
|
||||||
|
assertThat(pinnedAfter.getBody()).isEqualTo(pinnedBefore.getBody());
|
||||||
|
|
||||||
|
// Invariant 3: The by-route "latest" endpoint must now surface v2,
|
||||||
|
// so its body differs from the pre-v2 snapshot. Retry briefly to
|
||||||
|
// absorb the diagram-ingest flush path.
|
||||||
|
await().atMost(20, SECONDS).untilAsserted(() -> {
|
||||||
|
ResponseEntity<String> latestAfter = restTemplate.exchange(
|
||||||
|
"/api/v1/environments/default/apps/test-group/routes/render-test-route/diagram",
|
||||||
|
HttpMethod.GET,
|
||||||
|
new HttpEntity<>(viewerHeaders),
|
||||||
|
String.class);
|
||||||
|
assertThat(latestAfter.getStatusCode()).isEqualTo(HttpStatus.OK);
|
||||||
|
assertThat(latestAfter.getBody()).isNotEqualTo(latestBefore.getBody());
|
||||||
|
assertThat(latestAfter.getBody()).contains("myBeanV2");
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
void getWithNoAcceptHeader_defaultsToSvg() {
|
void getWithNoAcceptHeader_defaultsToSvg() {
|
||||||
HttpHeaders headers = securityHelper.authHeadersNoBody(viewerJwt);
|
HttpHeaders headers = securityHelper.authHeadersNoBody(viewerJwt);
|
||||||
|
|||||||
@@ -35,8 +35,21 @@ class EnvironmentAdminControllerIT extends AbstractPostgresIT {
|
|||||||
adminJwt = securityHelper.adminToken();
|
adminJwt = securityHelper.adminToken();
|
||||||
viewerJwt = securityHelper.viewerToken();
|
viewerJwt = securityHelper.viewerToken();
|
||||||
operatorJwt = securityHelper.operatorToken();
|
operatorJwt = securityHelper.operatorToken();
|
||||||
// Clean up test environments (keep default)
|
// Clean up test environments (keep default). Strip dependents first — sibling ITs
|
||||||
|
// (e.g., DeploymentControllerAuditIT) may have left deployments/apps that FK back to
|
||||||
|
// their non-default envs when the testcontainer is reused across runs.
|
||||||
|
jdbcTemplate.update("DELETE FROM deployments");
|
||||||
|
jdbcTemplate.update("DELETE FROM app_versions");
|
||||||
|
jdbcTemplate.update("DELETE FROM apps");
|
||||||
jdbcTemplate.update("DELETE FROM environments WHERE slug != 'default'");
|
jdbcTemplate.update("DELETE FROM environments WHERE slug != 'default'");
|
||||||
|
// Lift max_environments cap so existing IT scenarios that POST envs through the
|
||||||
|
// controller succeed; the cap itself is exercised by EnvironmentCapEnforcementIT.
|
||||||
|
securityHelper.installSyntheticUnsignedLicense(java.util.Map.of("max_environments", 100));
|
||||||
|
}
|
||||||
|
|
||||||
|
@org.junit.jupiter.api.AfterEach
|
||||||
|
void tearDown() {
|
||||||
|
securityHelper.clearTestLicense();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
@@ -92,6 +105,25 @@ class EnvironmentAdminControllerIT extends AbstractPostgresIT {
|
|||||||
assertThat(body.has("id")).isTrue();
|
assertThat(body.has("id")).isTrue();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void createEnvironment_surfacesRetentionDefaults() throws Exception {
|
||||||
|
// V5 columns default to 1 (matching the default-tier license cap). T26 surfaces
|
||||||
|
// them as int fields on the Environment record; the read DTO must expose them.
|
||||||
|
String json = """
|
||||||
|
{"slug": "retention-defaults", "displayName": "Retention", "production": false}
|
||||||
|
""";
|
||||||
|
ResponseEntity<String> response = restTemplate.exchange(
|
||||||
|
"/api/v1/admin/environments", HttpMethod.POST,
|
||||||
|
new HttpEntity<>(json, securityHelper.authHeaders(adminJwt)),
|
||||||
|
String.class);
|
||||||
|
|
||||||
|
assertThat(response.getStatusCode()).isEqualTo(HttpStatus.CREATED);
|
||||||
|
JsonNode body = objectMapper.readTree(response.getBody());
|
||||||
|
assertThat(body.path("executionRetentionDays").asInt()).isEqualTo(1);
|
||||||
|
assertThat(body.path("logRetentionDays").asInt()).isEqualTo(1);
|
||||||
|
assertThat(body.path("metricRetentionDays").asInt()).isEqualTo(1);
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
void updateEnvironment_withValidColor_persists() throws Exception {
|
void updateEnvironment_withValidColor_persists() throws Exception {
|
||||||
restTemplate.exchange(
|
restTemplate.exchange(
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import com.cameleer.server.app.AbstractPostgresIT;
|
|||||||
import com.cameleer.server.app.TestSecurityHelper;
|
import com.cameleer.server.app.TestSecurityHelper;
|
||||||
import com.fasterxml.jackson.databind.JsonNode;
|
import com.fasterxml.jackson.databind.JsonNode;
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
import org.junit.jupiter.api.AfterEach;
|
||||||
import org.junit.jupiter.api.BeforeEach;
|
import org.junit.jupiter.api.BeforeEach;
|
||||||
import org.junit.jupiter.api.Test;
|
import org.junit.jupiter.api.Test;
|
||||||
import org.springframework.beans.factory.annotation.Autowired;
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
@@ -14,6 +15,8 @@ import org.springframework.http.HttpMethod;
|
|||||||
import org.springframework.http.HttpStatus;
|
import org.springframework.http.HttpStatus;
|
||||||
import org.springframework.http.ResponseEntity;
|
import org.springframework.http.ResponseEntity;
|
||||||
|
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
import static java.util.concurrent.TimeUnit.SECONDS;
|
import static java.util.concurrent.TimeUnit.SECONDS;
|
||||||
import static org.assertj.core.api.Assertions.assertThat;
|
import static org.assertj.core.api.Assertions.assertThat;
|
||||||
import static org.awaitility.Awaitility.await;
|
import static org.awaitility.Awaitility.await;
|
||||||
@@ -38,11 +41,19 @@ class ExecutionControllerIT extends AbstractPostgresIT {
|
|||||||
|
|
||||||
@BeforeEach
|
@BeforeEach
|
||||||
void setUp() {
|
void setUp() {
|
||||||
|
// Lift max_agents cap so this IT (which registers an agent) isn't gated by license
|
||||||
|
// enforcement. Cap behaviour itself is exercised by AgentCapEnforcementIT.
|
||||||
|
securityHelper.installSyntheticUnsignedLicense(Map.of("max_agents", 100));
|
||||||
String jwt = securityHelper.registerTestAgent("test-agent-execution-it");
|
String jwt = securityHelper.registerTestAgent("test-agent-execution-it");
|
||||||
authHeaders = securityHelper.authHeaders(jwt);
|
authHeaders = securityHelper.authHeaders(jwt);
|
||||||
viewerHeaders = securityHelper.authHeadersNoBody(securityHelper.viewerToken());
|
viewerHeaders = securityHelper.authHeadersNoBody(securityHelper.viewerToken());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@AfterEach
|
||||||
|
void tearDown() {
|
||||||
|
securityHelper.clearTestLicense();
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
void postSingleExecution_returns202() {
|
void postSingleExecution_returns202() {
|
||||||
String json = """
|
String json = """
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package com.cameleer.server.app.controller;
|
|||||||
|
|
||||||
import com.cameleer.server.app.AbstractPostgresIT;
|
import com.cameleer.server.app.AbstractPostgresIT;
|
||||||
import com.cameleer.server.app.TestSecurityHelper;
|
import com.cameleer.server.app.TestSecurityHelper;
|
||||||
|
import org.junit.jupiter.api.AfterEach;
|
||||||
import org.junit.jupiter.api.BeforeEach;
|
import org.junit.jupiter.api.BeforeEach;
|
||||||
import org.junit.jupiter.api.Test;
|
import org.junit.jupiter.api.Test;
|
||||||
import org.springframework.beans.factory.annotation.Autowired;
|
import org.springframework.beans.factory.annotation.Autowired;
|
||||||
@@ -10,6 +11,8 @@ import org.springframework.http.HttpEntity;
|
|||||||
import org.springframework.http.HttpHeaders;
|
import org.springframework.http.HttpHeaders;
|
||||||
import org.springframework.http.HttpMethod;
|
import org.springframework.http.HttpMethod;
|
||||||
|
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
import static org.assertj.core.api.Assertions.assertThat;
|
import static org.assertj.core.api.Assertions.assertThat;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -28,9 +31,17 @@ class ForwardCompatIT extends AbstractPostgresIT {
|
|||||||
|
|
||||||
@BeforeEach
|
@BeforeEach
|
||||||
void setUp() {
|
void setUp() {
|
||||||
|
// Lift max_agents cap so this IT (which registers an agent) isn't gated by license
|
||||||
|
// enforcement. Cap behaviour itself is exercised by AgentCapEnforcementIT.
|
||||||
|
securityHelper.installSyntheticUnsignedLicense(Map.of("max_agents", 100));
|
||||||
jwt = securityHelper.registerTestAgent("test-agent-forward-compat-it");
|
jwt = securityHelper.registerTestAgent("test-agent-forward-compat-it");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@AfterEach
|
||||||
|
void tearDown() {
|
||||||
|
securityHelper.clearTestLicense();
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
void unknownFieldsInRequestBodyDoNotCauseError() {
|
void unknownFieldsInRequestBodyDoNotCauseError() {
|
||||||
// Valid ExecutionChunk plus extra fields a future agent version
|
// Valid ExecutionChunk plus extra fields a future agent version
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user