chore: rename cameleer3 to cameleer
Rename Java packages from com.cameleer3 to com.cameleer, module directories from cameleer3-* to cameleer-*, and all references throughout workflows, Dockerfiles, docs, migrations, and pom.xml. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
106
cameleer-server-app/src/main/resources/application.yml
Normal file
106
cameleer-server-app/src/main/resources/application.yml
Normal file
@@ -0,0 +1,106 @@
|
||||
server:
|
||||
port: 8081
|
||||
|
||||
spring:
|
||||
servlet:
|
||||
multipart:
|
||||
max-file-size: 200MB
|
||||
max-request-size: 200MB
|
||||
datasource:
|
||||
url: ${SPRING_DATASOURCE_URL:jdbc:postgresql://localhost:5432/cameleer?currentSchema=tenant_${cameleer.server.tenant.id}&ApplicationName=tenant_${cameleer.server.tenant.id}}
|
||||
username: ${SPRING_DATASOURCE_USERNAME:cameleer}
|
||||
password: ${SPRING_DATASOURCE_PASSWORD:cameleer_dev}
|
||||
driver-class-name: org.postgresql.Driver
|
||||
flyway:
|
||||
enabled: true
|
||||
locations: classpath:db/migration
|
||||
create-schemas: true
|
||||
mvc:
|
||||
async:
|
||||
request-timeout: -1
|
||||
jackson:
|
||||
serialization:
|
||||
write-dates-as-timestamps: false
|
||||
deserialization:
|
||||
fail-on-unknown-properties: false
|
||||
|
||||
cameleer:
|
||||
server:
|
||||
tenant:
|
||||
id: ${CAMELEER_SERVER_TENANT_ID:default}
|
||||
agentregistry:
|
||||
heartbeatintervalms: 30000
|
||||
stalethresholdms: 90000
|
||||
deadthresholdms: 300000
|
||||
pingintervalms: 15000
|
||||
commandexpiryms: 60000
|
||||
lifecyclecheckintervalms: 10000
|
||||
ingestion:
|
||||
buffercapacity: 50000
|
||||
batchsize: 5000
|
||||
flushintervalms: 5000
|
||||
bodysizelimit: ${CAMELEER_SERVER_INGESTION_BODYSIZELIMIT:16384}
|
||||
runtime:
|
||||
enabled: ${CAMELEER_SERVER_RUNTIME_ENABLED:true}
|
||||
jarstoragepath: ${CAMELEER_SERVER_RUNTIME_JARSTORAGEPATH:/data/jars}
|
||||
baseimage: ${CAMELEER_SERVER_RUNTIME_BASEIMAGE:gitea.siegeln.net/cameleer/cameleer-runtime-base:latest}
|
||||
dockernetwork: ${CAMELEER_SERVER_RUNTIME_DOCKERNETWORK:cameleer}
|
||||
agenthealthport: 9464
|
||||
healthchecktimeout: 60
|
||||
container:
|
||||
memorylimit: ${CAMELEER_SERVER_RUNTIME_CONTAINER_MEMORYLIMIT:512m}
|
||||
cpushares: ${CAMELEER_SERVER_RUNTIME_CONTAINER_CPUSHARES:512}
|
||||
routingmode: ${CAMELEER_SERVER_RUNTIME_ROUTINGMODE:path}
|
||||
routingdomain: ${CAMELEER_SERVER_RUNTIME_ROUTINGDOMAIN:localhost}
|
||||
serverurl: ${CAMELEER_SERVER_RUNTIME_SERVERURL:}
|
||||
jardockervolume: ${CAMELEER_SERVER_RUNTIME_JARDOCKERVOLUME:}
|
||||
indexer:
|
||||
debouncems: ${CAMELEER_SERVER_INDEXER_DEBOUNCEMS:2000}
|
||||
queuesize: ${CAMELEER_SERVER_INDEXER_QUEUESIZE:10000}
|
||||
catalog:
|
||||
discoveryttldays: ${CAMELEER_SERVER_CATALOG_DISCOVERYTTLDAYS:7}
|
||||
license:
|
||||
token: ${CAMELEER_SERVER_LICENSE_TOKEN:}
|
||||
file: ${CAMELEER_SERVER_LICENSE_FILE:}
|
||||
publickey: ${CAMELEER_SERVER_LICENSE_PUBLICKEY:}
|
||||
security:
|
||||
accesstokenexpiryms: 3600000
|
||||
refreshtokenexpiryms: 604800000
|
||||
bootstraptoken: ${CAMELEER_SERVER_SECURITY_BOOTSTRAPTOKEN:}
|
||||
bootstraptokenprevious: ${CAMELEER_SERVER_SECURITY_BOOTSTRAPTOKENPREVIOUS:}
|
||||
uiuser: ${CAMELEER_SERVER_SECURITY_UIUSER:admin}
|
||||
uipassword: ${CAMELEER_SERVER_SECURITY_UIPASSWORD:admin}
|
||||
uiorigin: ${CAMELEER_SERVER_SECURITY_UIORIGIN:http://localhost:5173}
|
||||
jwtsecret: ${CAMELEER_SERVER_SECURITY_JWTSECRET:}
|
||||
corsallowedorigins: ${CAMELEER_SERVER_SECURITY_CORSALLOWEDORIGINS:}
|
||||
infrastructureendpoints: ${CAMELEER_SERVER_SECURITY_INFRASTRUCTUREENDPOINTS:true}
|
||||
oidc:
|
||||
issueruri: ${CAMELEER_SERVER_SECURITY_OIDC_ISSUERURI:}
|
||||
jwkseturi: ${CAMELEER_SERVER_SECURITY_OIDC_JWKSETURI:}
|
||||
audience: ${CAMELEER_SERVER_SECURITY_OIDC_AUDIENCE:}
|
||||
tlsskipverify: ${CAMELEER_SERVER_SECURITY_OIDC_TLSSKIPVERIFY:false}
|
||||
clickhouse:
|
||||
url: ${CAMELEER_SERVER_CLICKHOUSE_URL:jdbc:clickhouse://localhost:8123/cameleer}
|
||||
username: ${CAMELEER_SERVER_CLICKHOUSE_USERNAME:default}
|
||||
password: ${CAMELEER_SERVER_CLICKHOUSE_PASSWORD:}
|
||||
|
||||
springdoc:
|
||||
api-docs:
|
||||
path: /api/v1/api-docs
|
||||
swagger-ui:
|
||||
path: /api/v1/swagger-ui
|
||||
|
||||
logging:
|
||||
level:
|
||||
com.clickhouse: INFO
|
||||
org.apache.hc.client5: WARN
|
||||
|
||||
management:
|
||||
endpoints:
|
||||
web:
|
||||
base-path: /api/v1
|
||||
exposure:
|
||||
include: health,prometheus
|
||||
endpoint:
|
||||
health:
|
||||
show-details: always
|
||||
387
cameleer-server-app/src/main/resources/clickhouse/init.sql
Normal file
387
cameleer-server-app/src/main/resources/clickhouse/init.sql
Normal file
@@ -0,0 +1,387 @@
|
||||
-- ClickHouse schema initialization (single file, idempotent)
|
||||
-- All statements use IF NOT EXISTS / IF EXISTS for safe re-execution on every startup.
|
||||
-- No DROP or INSERT statements -- this file is safe for repeated runs.
|
||||
|
||||
-- ── Agent Metrics ───────────────────────────────────────────────────────
|
||||
|
||||
CREATE TABLE IF NOT EXISTS agent_metrics (
|
||||
tenant_id LowCardinality(String) DEFAULT 'default',
|
||||
collected_at DateTime64(3),
|
||||
environment LowCardinality(String) DEFAULT 'default',
|
||||
instance_id LowCardinality(String),
|
||||
metric_name LowCardinality(String),
|
||||
metric_value Float64,
|
||||
tags Map(String, String) DEFAULT map(),
|
||||
server_received_at DateTime64(3) DEFAULT now64(3)
|
||||
)
|
||||
ENGINE = MergeTree()
|
||||
PARTITION BY (tenant_id, toYYYYMM(collected_at))
|
||||
ORDER BY (tenant_id, collected_at, environment, instance_id, metric_name)
|
||||
TTL toDateTime(collected_at) + INTERVAL 365 DAY DELETE
|
||||
SETTINGS index_granularity = 8192;
|
||||
|
||||
-- ── Executions ──────────────────────────────────────────────────────────
|
||||
|
||||
CREATE TABLE IF NOT EXISTS executions (
|
||||
tenant_id LowCardinality(String) DEFAULT 'default',
|
||||
execution_id String,
|
||||
start_time DateTime64(3),
|
||||
_version UInt64 DEFAULT 1,
|
||||
route_id LowCardinality(String),
|
||||
instance_id LowCardinality(String),
|
||||
application_id LowCardinality(String),
|
||||
environment LowCardinality(String) DEFAULT 'default',
|
||||
status LowCardinality(String),
|
||||
correlation_id String DEFAULT '',
|
||||
exchange_id String DEFAULT '',
|
||||
end_time Nullable(DateTime64(3)),
|
||||
duration_ms Nullable(Int64),
|
||||
error_message String DEFAULT '',
|
||||
error_stacktrace String DEFAULT '',
|
||||
error_type LowCardinality(String) DEFAULT '',
|
||||
error_category LowCardinality(String) DEFAULT '',
|
||||
root_cause_type String DEFAULT '',
|
||||
root_cause_message String DEFAULT '',
|
||||
diagram_content_hash String DEFAULT '',
|
||||
engine_level LowCardinality(String) DEFAULT '',
|
||||
input_body String DEFAULT '',
|
||||
output_body String DEFAULT '',
|
||||
input_headers String DEFAULT '',
|
||||
output_headers String DEFAULT '',
|
||||
attributes String DEFAULT '',
|
||||
trace_id String DEFAULT '',
|
||||
span_id String DEFAULT '',
|
||||
has_trace_data Bool DEFAULT false,
|
||||
is_replay Bool DEFAULT false,
|
||||
original_exchange_id String DEFAULT '',
|
||||
replay_exchange_id String DEFAULT '',
|
||||
|
||||
_search_text String MATERIALIZED
|
||||
concat(execution_id, ' ', correlation_id, ' ', exchange_id, ' ', route_id,
|
||||
' ', error_message, ' ', error_stacktrace, ' ', attributes,
|
||||
' ', input_body, ' ', output_body, ' ', input_headers,
|
||||
' ', output_headers, ' ', root_cause_message),
|
||||
|
||||
INDEX idx_search _search_text TYPE ngrambf_v1(3, 256, 2, 0) GRANULARITY 4,
|
||||
INDEX idx_error error_message TYPE ngrambf_v1(3, 256, 2, 0) GRANULARITY 4,
|
||||
INDEX idx_bodies concat(input_body, ' ', output_body) TYPE ngrambf_v1(3, 256, 2, 0) GRANULARITY 4,
|
||||
INDEX idx_headers concat(input_headers, ' ', output_headers) TYPE ngrambf_v1(3, 256, 2, 0) GRANULARITY 4,
|
||||
INDEX idx_status status TYPE set(10) GRANULARITY 1,
|
||||
INDEX idx_corr correlation_id TYPE bloom_filter(0.01) GRANULARITY 4
|
||||
)
|
||||
ENGINE = ReplacingMergeTree(_version)
|
||||
PARTITION BY (tenant_id, toYYYYMM(start_time))
|
||||
ORDER BY (tenant_id, start_time, environment, application_id, route_id, execution_id)
|
||||
TTL toDateTime(start_time) + INTERVAL 365 DAY DELETE
|
||||
SETTINGS index_granularity = 8192;
|
||||
|
||||
-- ── Processor Executions ────────────────────────────────────────────────
|
||||
|
||||
CREATE TABLE IF NOT EXISTS processor_executions (
|
||||
tenant_id LowCardinality(String) DEFAULT 'default',
|
||||
execution_id String,
|
||||
seq UInt32,
|
||||
parent_seq Nullable(UInt32),
|
||||
parent_processor_id String DEFAULT '',
|
||||
processor_id String,
|
||||
processor_type LowCardinality(String),
|
||||
start_time DateTime64(3),
|
||||
route_id LowCardinality(String),
|
||||
application_id LowCardinality(String),
|
||||
environment LowCardinality(String) DEFAULT 'default',
|
||||
iteration Nullable(Int32),
|
||||
iteration_size Nullable(Int32),
|
||||
status LowCardinality(String),
|
||||
end_time Nullable(DateTime64(3)),
|
||||
duration_ms Nullable(Int64),
|
||||
error_message String DEFAULT '',
|
||||
error_stacktrace String DEFAULT '',
|
||||
error_type LowCardinality(String) DEFAULT '',
|
||||
error_category LowCardinality(String) DEFAULT '',
|
||||
root_cause_type String DEFAULT '',
|
||||
root_cause_message String DEFAULT '',
|
||||
input_body String DEFAULT '',
|
||||
output_body String DEFAULT '',
|
||||
input_headers String DEFAULT '',
|
||||
output_headers String DEFAULT '',
|
||||
attributes String DEFAULT '',
|
||||
resolved_endpoint_uri String DEFAULT '',
|
||||
circuit_breaker_state LowCardinality(String) DEFAULT '',
|
||||
fallback_triggered Bool DEFAULT false,
|
||||
filter_matched Bool DEFAULT false,
|
||||
duplicate_message Bool DEFAULT false,
|
||||
|
||||
_search_text String MATERIALIZED
|
||||
concat(error_message, ' ', error_stacktrace, ' ', attributes,
|
||||
' ', input_body, ' ', output_body, ' ', input_headers, ' ', output_headers),
|
||||
|
||||
INDEX idx_search _search_text TYPE ngrambf_v1(3, 256, 2, 0) GRANULARITY 4,
|
||||
INDEX idx_exec_id execution_id TYPE bloom_filter(0.01) GRANULARITY 4
|
||||
)
|
||||
ENGINE = MergeTree()
|
||||
PARTITION BY (tenant_id, toYYYYMM(start_time))
|
||||
ORDER BY (tenant_id, start_time, environment, application_id, route_id, execution_id, seq)
|
||||
TTL toDateTime(start_time) + INTERVAL 365 DAY DELETE
|
||||
SETTINGS index_granularity = 8192;
|
||||
|
||||
-- ── Stats: Materialized Views + AggregatingMergeTree ────────────────────
|
||||
-- Counts use uniq(execution_id) to deduplicate chunk retries.
|
||||
-- Processor counts use uniq(concat(execution_id, seq)) to also preserve loop iterations.
|
||||
|
||||
-- stats_1m_all (global)
|
||||
|
||||
CREATE TABLE IF NOT EXISTS stats_1m_all (
|
||||
tenant_id LowCardinality(String),
|
||||
bucket DateTime,
|
||||
environment LowCardinality(String) DEFAULT 'default',
|
||||
total_count AggregateFunction(uniq, String),
|
||||
failed_count AggregateFunction(uniqIf, String, UInt8),
|
||||
running_count AggregateFunction(uniqIf, String, UInt8),
|
||||
duration_sum AggregateFunction(sum, Nullable(Int64)),
|
||||
duration_max AggregateFunction(max, Nullable(Int64)),
|
||||
p99_duration AggregateFunction(quantile(0.99), Nullable(Int64))
|
||||
)
|
||||
ENGINE = AggregatingMergeTree()
|
||||
PARTITION BY (tenant_id, toYYYYMM(bucket))
|
||||
ORDER BY (tenant_id, bucket, environment)
|
||||
TTL bucket + INTERVAL 365 DAY DELETE;
|
||||
|
||||
CREATE MATERIALIZED VIEW IF NOT EXISTS stats_1m_all_mv TO stats_1m_all AS
|
||||
SELECT
|
||||
tenant_id,
|
||||
toStartOfMinute(start_time) AS bucket,
|
||||
environment,
|
||||
uniqState(execution_id) AS total_count,
|
||||
uniqIfState(execution_id, status = 'FAILED') AS failed_count,
|
||||
uniqIfState(execution_id, status = 'RUNNING') AS running_count,
|
||||
sumState(duration_ms) AS duration_sum,
|
||||
maxState(duration_ms) AS duration_max,
|
||||
quantileState(0.99)(duration_ms) AS p99_duration
|
||||
FROM executions
|
||||
GROUP BY tenant_id, bucket, environment;
|
||||
|
||||
-- stats_1m_app (per-application)
|
||||
|
||||
CREATE TABLE IF NOT EXISTS stats_1m_app (
|
||||
tenant_id LowCardinality(String),
|
||||
application_id LowCardinality(String),
|
||||
bucket DateTime,
|
||||
environment LowCardinality(String) DEFAULT 'default',
|
||||
total_count AggregateFunction(uniq, String),
|
||||
failed_count AggregateFunction(uniqIf, String, UInt8),
|
||||
running_count AggregateFunction(uniqIf, String, UInt8),
|
||||
duration_sum AggregateFunction(sum, Nullable(Int64)),
|
||||
duration_max AggregateFunction(max, Nullable(Int64)),
|
||||
p99_duration AggregateFunction(quantile(0.99), Nullable(Int64))
|
||||
)
|
||||
ENGINE = AggregatingMergeTree()
|
||||
PARTITION BY (tenant_id, toYYYYMM(bucket))
|
||||
ORDER BY (tenant_id, bucket, environment, application_id)
|
||||
TTL bucket + INTERVAL 365 DAY DELETE;
|
||||
|
||||
CREATE MATERIALIZED VIEW IF NOT EXISTS stats_1m_app_mv TO stats_1m_app AS
|
||||
SELECT
|
||||
tenant_id,
|
||||
application_id,
|
||||
toStartOfMinute(start_time) AS bucket,
|
||||
environment,
|
||||
uniqState(execution_id) AS total_count,
|
||||
uniqIfState(execution_id, status = 'FAILED') AS failed_count,
|
||||
uniqIfState(execution_id, status = 'RUNNING') AS running_count,
|
||||
sumState(duration_ms) AS duration_sum,
|
||||
maxState(duration_ms) AS duration_max,
|
||||
quantileState(0.99)(duration_ms) AS p99_duration
|
||||
FROM executions
|
||||
GROUP BY tenant_id, application_id, bucket, environment;
|
||||
|
||||
-- stats_1m_route (per-route)
|
||||
|
||||
CREATE TABLE IF NOT EXISTS stats_1m_route (
|
||||
tenant_id LowCardinality(String),
|
||||
application_id LowCardinality(String),
|
||||
route_id LowCardinality(String),
|
||||
bucket DateTime,
|
||||
environment LowCardinality(String) DEFAULT 'default',
|
||||
total_count AggregateFunction(uniq, String),
|
||||
failed_count AggregateFunction(uniqIf, String, UInt8),
|
||||
running_count AggregateFunction(uniqIf, String, UInt8),
|
||||
duration_sum AggregateFunction(sum, Nullable(Int64)),
|
||||
duration_max AggregateFunction(max, Nullable(Int64)),
|
||||
p99_duration AggregateFunction(quantile(0.99), Nullable(Int64))
|
||||
)
|
||||
ENGINE = AggregatingMergeTree()
|
||||
PARTITION BY (tenant_id, toYYYYMM(bucket))
|
||||
ORDER BY (tenant_id, bucket, environment, application_id, route_id)
|
||||
TTL bucket + INTERVAL 365 DAY DELETE;
|
||||
|
||||
CREATE MATERIALIZED VIEW IF NOT EXISTS stats_1m_route_mv TO stats_1m_route AS
|
||||
SELECT
|
||||
tenant_id,
|
||||
application_id,
|
||||
route_id,
|
||||
toStartOfMinute(start_time) AS bucket,
|
||||
environment,
|
||||
uniqState(execution_id) AS total_count,
|
||||
uniqIfState(execution_id, status = 'FAILED') AS failed_count,
|
||||
uniqIfState(execution_id, status = 'RUNNING') AS running_count,
|
||||
sumState(duration_ms) AS duration_sum,
|
||||
maxState(duration_ms) AS duration_max,
|
||||
quantileState(0.99)(duration_ms) AS p99_duration
|
||||
FROM executions
|
||||
GROUP BY tenant_id, application_id, route_id, bucket, environment;
|
||||
|
||||
-- stats_1m_processor (per-processor-type)
|
||||
|
||||
CREATE TABLE IF NOT EXISTS stats_1m_processor (
|
||||
tenant_id LowCardinality(String),
|
||||
application_id LowCardinality(String),
|
||||
processor_type LowCardinality(String),
|
||||
bucket DateTime,
|
||||
environment LowCardinality(String) DEFAULT 'default',
|
||||
total_count AggregateFunction(uniq, String),
|
||||
failed_count AggregateFunction(uniqIf, String, UInt8),
|
||||
duration_sum AggregateFunction(sum, Nullable(Int64)),
|
||||
duration_max AggregateFunction(max, Nullable(Int64)),
|
||||
p99_duration AggregateFunction(quantile(0.99), Nullable(Int64))
|
||||
)
|
||||
ENGINE = AggregatingMergeTree()
|
||||
PARTITION BY (tenant_id, toYYYYMM(bucket))
|
||||
ORDER BY (tenant_id, bucket, environment, application_id, processor_type)
|
||||
TTL bucket + INTERVAL 365 DAY DELETE;
|
||||
|
||||
CREATE MATERIALIZED VIEW IF NOT EXISTS stats_1m_processor_mv TO stats_1m_processor AS
|
||||
SELECT
|
||||
tenant_id,
|
||||
application_id,
|
||||
processor_type,
|
||||
toStartOfMinute(start_time) AS bucket,
|
||||
environment,
|
||||
uniqState(concat(execution_id, toString(seq))) AS total_count,
|
||||
uniqIfState(concat(execution_id, toString(seq)), status = 'FAILED') AS failed_count,
|
||||
sumState(duration_ms) AS duration_sum,
|
||||
maxState(duration_ms) AS duration_max,
|
||||
quantileState(0.99)(duration_ms) AS p99_duration
|
||||
FROM processor_executions
|
||||
GROUP BY tenant_id, application_id, processor_type, bucket, environment;
|
||||
|
||||
-- stats_1m_processor_detail (per-processor-id)
|
||||
|
||||
CREATE TABLE IF NOT EXISTS stats_1m_processor_detail (
|
||||
tenant_id LowCardinality(String),
|
||||
application_id LowCardinality(String),
|
||||
route_id LowCardinality(String),
|
||||
processor_id String,
|
||||
processor_type LowCardinality(String),
|
||||
bucket DateTime,
|
||||
environment LowCardinality(String) DEFAULT 'default',
|
||||
total_count AggregateFunction(uniq, String),
|
||||
failed_count AggregateFunction(uniqIf, String, UInt8),
|
||||
duration_sum AggregateFunction(sum, Nullable(Int64)),
|
||||
duration_max AggregateFunction(max, Nullable(Int64)),
|
||||
p99_duration AggregateFunction(quantile(0.99), Nullable(Int64))
|
||||
)
|
||||
ENGINE = AggregatingMergeTree()
|
||||
PARTITION BY (tenant_id, toYYYYMM(bucket))
|
||||
ORDER BY (tenant_id, bucket, environment, application_id, route_id, processor_id, processor_type)
|
||||
TTL bucket + INTERVAL 365 DAY DELETE;
|
||||
|
||||
CREATE MATERIALIZED VIEW IF NOT EXISTS stats_1m_processor_detail_mv TO stats_1m_processor_detail AS
|
||||
SELECT
|
||||
tenant_id,
|
||||
application_id,
|
||||
route_id,
|
||||
processor_id,
|
||||
processor_type,
|
||||
toStartOfMinute(start_time) AS bucket,
|
||||
environment,
|
||||
uniqState(concat(execution_id, toString(seq))) AS total_count,
|
||||
uniqIfState(concat(execution_id, toString(seq)), status = 'FAILED') AS failed_count,
|
||||
sumState(duration_ms) AS duration_sum,
|
||||
maxState(duration_ms) AS duration_max,
|
||||
quantileState(0.99)(duration_ms) AS p99_duration
|
||||
FROM processor_executions
|
||||
GROUP BY tenant_id, application_id, route_id, processor_id, processor_type, bucket, environment;
|
||||
|
||||
-- ── Route Diagrams ──────────────────────────────────────────────────────
|
||||
|
||||
CREATE TABLE IF NOT EXISTS route_diagrams (
|
||||
tenant_id LowCardinality(String) DEFAULT 'default',
|
||||
content_hash String,
|
||||
route_id LowCardinality(String),
|
||||
instance_id LowCardinality(String),
|
||||
application_id LowCardinality(String),
|
||||
environment LowCardinality(String) DEFAULT 'default',
|
||||
definition String,
|
||||
created_at DateTime64(3) DEFAULT now64(3)
|
||||
)
|
||||
ENGINE = ReplacingMergeTree(created_at)
|
||||
ORDER BY (tenant_id, environment, route_id, instance_id, content_hash)
|
||||
SETTINGS index_granularity = 8192;
|
||||
|
||||
-- ── Agent Events ────────────────────────────────────────────────────────
|
||||
|
||||
CREATE TABLE IF NOT EXISTS agent_events (
|
||||
tenant_id LowCardinality(String) DEFAULT 'default',
|
||||
timestamp DateTime64(3) DEFAULT now64(3),
|
||||
environment LowCardinality(String) DEFAULT 'default',
|
||||
instance_id LowCardinality(String),
|
||||
application_id LowCardinality(String),
|
||||
event_type LowCardinality(String),
|
||||
detail String DEFAULT ''
|
||||
)
|
||||
ENGINE = MergeTree()
|
||||
PARTITION BY (tenant_id, toYYYYMM(timestamp))
|
||||
ORDER BY (tenant_id, timestamp, environment, instance_id)
|
||||
TTL toDateTime(timestamp) + INTERVAL 365 DAY DELETE;
|
||||
|
||||
-- ── Logs ────────────────────────────────────────────────────────────────
|
||||
|
||||
CREATE TABLE IF NOT EXISTS logs (
|
||||
tenant_id LowCardinality(String) DEFAULT 'default',
|
||||
timestamp DateTime64(3),
|
||||
environment LowCardinality(String) DEFAULT 'default',
|
||||
application LowCardinality(String),
|
||||
instance_id LowCardinality(String),
|
||||
level LowCardinality(String),
|
||||
logger_name LowCardinality(String) DEFAULT '',
|
||||
message String,
|
||||
thread_name LowCardinality(String) DEFAULT '',
|
||||
stack_trace String DEFAULT '',
|
||||
exchange_id String DEFAULT '',
|
||||
mdc Map(String, String) DEFAULT map(),
|
||||
|
||||
INDEX idx_msg message TYPE ngrambf_v1(3, 256, 2, 0) GRANULARITY 4,
|
||||
INDEX idx_stack stack_trace TYPE ngrambf_v1(3, 256, 2, 0) GRANULARITY 4,
|
||||
INDEX idx_level level TYPE set(10) GRANULARITY 1
|
||||
)
|
||||
ENGINE = MergeTree()
|
||||
PARTITION BY (tenant_id, toYYYYMM(timestamp))
|
||||
ORDER BY (tenant_id, timestamp, environment, application, instance_id)
|
||||
TTL toDateTime(timestamp) + INTERVAL 365 DAY DELETE
|
||||
SETTINGS index_granularity = 8192;
|
||||
|
||||
ALTER TABLE logs ADD COLUMN IF NOT EXISTS source LowCardinality(String) DEFAULT 'app';
|
||||
|
||||
-- ── Exchange Properties (added for agent protocol v2) ──────────────────
|
||||
ALTER TABLE executions ADD COLUMN IF NOT EXISTS input_properties String DEFAULT '';
|
||||
ALTER TABLE executions ADD COLUMN IF NOT EXISTS output_properties String DEFAULT '';
|
||||
ALTER TABLE processor_executions ADD COLUMN IF NOT EXISTS input_properties String DEFAULT '';
|
||||
ALTER TABLE processor_executions ADD COLUMN IF NOT EXISTS output_properties String DEFAULT '';
|
||||
|
||||
-- ── Usage Events ────────────────────────────────────────────────────────
|
||||
|
||||
CREATE TABLE IF NOT EXISTS usage_events (
|
||||
tenant_id LowCardinality(String) DEFAULT 'default',
|
||||
timestamp DateTime64(3) DEFAULT now64(3),
|
||||
environment LowCardinality(String) DEFAULT 'default',
|
||||
username LowCardinality(String),
|
||||
method LowCardinality(String),
|
||||
path String,
|
||||
normalized LowCardinality(String),
|
||||
status_code UInt16,
|
||||
duration_ms UInt32,
|
||||
query_params String DEFAULT ''
|
||||
)
|
||||
ENGINE = MergeTree()
|
||||
ORDER BY (tenant_id, timestamp, environment, username, normalized)
|
||||
TTL toDateTime(timestamp) + INTERVAL 90 DAY;
|
||||
@@ -0,0 +1,2 @@
|
||||
ALTER TABLE app_versions ADD COLUMN detected_runtime_type VARCHAR;
|
||||
ALTER TABLE app_versions ADD COLUMN detected_main_class VARCHAR;
|
||||
125
cameleer-server-app/src/main/resources/db/migration/V1__init.sql
Normal file
125
cameleer-server-app/src/main/resources/db/migration/V1__init.sql
Normal file
@@ -0,0 +1,125 @@
|
||||
-- V1__init.sql — PostgreSQL schema for Cameleer Server
|
||||
-- PostgreSQL stores RBAC, configuration, and audit data only.
|
||||
-- All observability data (executions, metrics, diagrams, logs, stats) is in ClickHouse.
|
||||
|
||||
-- =============================================================
|
||||
-- RBAC
|
||||
-- =============================================================
|
||||
|
||||
CREATE TABLE users (
|
||||
user_id TEXT PRIMARY KEY,
|
||||
provider TEXT NOT NULL,
|
||||
email TEXT,
|
||||
display_name TEXT,
|
||||
password_hash TEXT,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT now()
|
||||
);
|
||||
|
||||
CREATE TABLE roles (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
name TEXT NOT NULL UNIQUE,
|
||||
description TEXT NOT NULL DEFAULT '',
|
||||
scope TEXT NOT NULL DEFAULT 'custom',
|
||||
system BOOLEAN NOT NULL DEFAULT false,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now()
|
||||
);
|
||||
|
||||
INSERT INTO roles (id, name, description, scope, system) VALUES
|
||||
('00000000-0000-0000-0000-000000000001', 'AGENT', 'Agent registration and data ingestion', 'system-wide', true),
|
||||
('00000000-0000-0000-0000-000000000002', 'VIEWER', 'Read-only access to dashboards and data', 'system-wide', true),
|
||||
('00000000-0000-0000-0000-000000000003', 'OPERATOR', 'Operational commands (start/stop/configure agents)', 'system-wide', true),
|
||||
('00000000-0000-0000-0000-000000000004', 'ADMIN', 'Full administrative access', 'system-wide', true);
|
||||
|
||||
CREATE TABLE groups (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
name TEXT NOT NULL UNIQUE,
|
||||
parent_group_id UUID REFERENCES groups(id) ON DELETE SET NULL,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now()
|
||||
);
|
||||
|
||||
INSERT INTO groups (id, name) VALUES
|
||||
('00000000-0000-0000-0000-000000000010', 'Admins');
|
||||
|
||||
CREATE TABLE group_roles (
|
||||
group_id UUID NOT NULL REFERENCES groups(id) ON DELETE CASCADE,
|
||||
role_id UUID NOT NULL REFERENCES roles(id) ON DELETE CASCADE,
|
||||
PRIMARY KEY (group_id, role_id)
|
||||
);
|
||||
|
||||
INSERT INTO group_roles (group_id, role_id) VALUES
|
||||
('00000000-0000-0000-0000-000000000010', '00000000-0000-0000-0000-000000000004');
|
||||
|
||||
CREATE TABLE user_groups (
|
||||
user_id TEXT NOT NULL REFERENCES users(user_id) ON DELETE CASCADE,
|
||||
group_id UUID NOT NULL REFERENCES groups(id) ON DELETE CASCADE,
|
||||
PRIMARY KEY (user_id, group_id)
|
||||
);
|
||||
|
||||
CREATE TABLE user_roles (
|
||||
user_id TEXT NOT NULL REFERENCES users(user_id) ON DELETE CASCADE,
|
||||
role_id UUID NOT NULL REFERENCES roles(id) ON DELETE CASCADE,
|
||||
PRIMARY KEY (user_id, role_id)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_user_roles_user_id ON user_roles(user_id);
|
||||
CREATE INDEX idx_user_groups_user_id ON user_groups(user_id);
|
||||
CREATE INDEX idx_group_roles_group_id ON group_roles(group_id);
|
||||
CREATE INDEX idx_groups_parent ON groups(parent_group_id);
|
||||
|
||||
-- =============================================================
|
||||
-- Server configuration
|
||||
-- =============================================================
|
||||
|
||||
CREATE TABLE server_config (
|
||||
config_key TEXT PRIMARY KEY,
|
||||
config_val JSONB NOT NULL,
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
updated_by TEXT
|
||||
);
|
||||
|
||||
-- =============================================================
|
||||
-- Application configuration
|
||||
-- =============================================================
|
||||
|
||||
CREATE TABLE application_config (
|
||||
application TEXT PRIMARY KEY,
|
||||
config_val JSONB NOT NULL,
|
||||
version INTEGER NOT NULL DEFAULT 1,
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
updated_by TEXT
|
||||
);
|
||||
|
||||
CREATE TABLE app_settings (
|
||||
application_id TEXT PRIMARY KEY,
|
||||
sla_threshold_ms INTEGER NOT NULL DEFAULT 300,
|
||||
health_error_warn DOUBLE PRECISION NOT NULL DEFAULT 1.0,
|
||||
health_error_crit DOUBLE PRECISION NOT NULL DEFAULT 5.0,
|
||||
health_sla_warn DOUBLE PRECISION NOT NULL DEFAULT 99.0,
|
||||
health_sla_crit DOUBLE PRECISION NOT NULL DEFAULT 95.0,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT now()
|
||||
);
|
||||
|
||||
-- =============================================================
|
||||
-- Audit log
|
||||
-- =============================================================
|
||||
|
||||
CREATE TABLE audit_log (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
timestamp TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
username TEXT NOT NULL,
|
||||
action TEXT NOT NULL,
|
||||
category TEXT NOT NULL,
|
||||
target TEXT,
|
||||
detail JSONB,
|
||||
result TEXT NOT NULL,
|
||||
ip_address TEXT,
|
||||
user_agent TEXT
|
||||
);
|
||||
|
||||
CREATE INDEX idx_audit_log_timestamp ON audit_log (timestamp DESC);
|
||||
CREATE INDEX idx_audit_log_username ON audit_log (username);
|
||||
CREATE INDEX idx_audit_log_category ON audit_log (category);
|
||||
CREATE INDEX idx_audit_log_action ON audit_log (action);
|
||||
CREATE INDEX idx_audit_log_target ON audit_log (target);
|
||||
@@ -0,0 +1,39 @@
|
||||
-- V2__claim_mapping.sql
|
||||
-- Add origin tracking to assignment tables
|
||||
|
||||
ALTER TABLE user_roles ADD COLUMN origin TEXT NOT NULL DEFAULT 'direct';
|
||||
ALTER TABLE user_roles ADD COLUMN mapping_id UUID;
|
||||
|
||||
ALTER TABLE user_groups ADD COLUMN origin TEXT NOT NULL DEFAULT 'direct';
|
||||
ALTER TABLE user_groups ADD COLUMN mapping_id UUID;
|
||||
|
||||
-- Drop old primary keys (they don't include origin)
|
||||
ALTER TABLE user_roles DROP CONSTRAINT user_roles_pkey;
|
||||
ALTER TABLE user_roles ADD PRIMARY KEY (user_id, role_id, origin);
|
||||
|
||||
ALTER TABLE user_groups DROP CONSTRAINT user_groups_pkey;
|
||||
ALTER TABLE user_groups ADD PRIMARY KEY (user_id, group_id, origin);
|
||||
|
||||
-- Claim mapping rules table
|
||||
CREATE TABLE claim_mapping_rules (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
claim TEXT NOT NULL,
|
||||
match_type TEXT NOT NULL,
|
||||
match_value TEXT NOT NULL,
|
||||
action TEXT NOT NULL,
|
||||
target TEXT NOT NULL,
|
||||
priority INT NOT NULL DEFAULT 0,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
CONSTRAINT chk_match_type CHECK (match_type IN ('equals', 'contains', 'regex')),
|
||||
CONSTRAINT chk_action CHECK (action IN ('assignRole', 'addToGroup'))
|
||||
);
|
||||
|
||||
-- Foreign key from assignments to mapping rules
|
||||
ALTER TABLE user_roles ADD CONSTRAINT fk_user_roles_mapping
|
||||
FOREIGN KEY (mapping_id) REFERENCES claim_mapping_rules(id) ON DELETE CASCADE;
|
||||
ALTER TABLE user_groups ADD CONSTRAINT fk_user_groups_mapping
|
||||
FOREIGN KEY (mapping_id) REFERENCES claim_mapping_rules(id) ON DELETE CASCADE;
|
||||
|
||||
-- Index for fast managed assignment cleanup
|
||||
CREATE INDEX idx_user_roles_origin ON user_roles(user_id, origin);
|
||||
CREATE INDEX idx_user_groups_origin ON user_groups(user_id, origin);
|
||||
@@ -0,0 +1,54 @@
|
||||
-- V3__runtime_management.sql
|
||||
-- Runtime management: environments, apps, app versions, deployments
|
||||
|
||||
CREATE TABLE environments (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
slug VARCHAR(100) NOT NULL UNIQUE,
|
||||
display_name VARCHAR(255) NOT NULL,
|
||||
status VARCHAR(20) NOT NULL DEFAULT 'ACTIVE',
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT now()
|
||||
);
|
||||
|
||||
CREATE TABLE apps (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
environment_id UUID NOT NULL REFERENCES environments(id) ON DELETE CASCADE,
|
||||
slug VARCHAR(100) NOT NULL,
|
||||
display_name VARCHAR(255) NOT NULL,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
UNIQUE(environment_id, slug)
|
||||
);
|
||||
CREATE INDEX idx_apps_environment_id ON apps(environment_id);
|
||||
|
||||
CREATE TABLE app_versions (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
app_id UUID NOT NULL REFERENCES apps(id) ON DELETE CASCADE,
|
||||
version INTEGER NOT NULL,
|
||||
jar_path VARCHAR(500) NOT NULL,
|
||||
jar_checksum VARCHAR(64) NOT NULL,
|
||||
jar_filename VARCHAR(255),
|
||||
jar_size_bytes BIGINT,
|
||||
uploaded_at TIMESTAMPTZ NOT NULL DEFAULT now(),
|
||||
UNIQUE(app_id, version)
|
||||
);
|
||||
CREATE INDEX idx_app_versions_app_id ON app_versions(app_id);
|
||||
|
||||
CREATE TABLE deployments (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
app_id UUID NOT NULL REFERENCES apps(id) ON DELETE CASCADE,
|
||||
app_version_id UUID NOT NULL REFERENCES app_versions(id),
|
||||
environment_id UUID NOT NULL REFERENCES environments(id),
|
||||
status VARCHAR(20) NOT NULL DEFAULT 'STARTING',
|
||||
container_id VARCHAR(100),
|
||||
container_name VARCHAR(255),
|
||||
error_message TEXT,
|
||||
deployed_at TIMESTAMPTZ,
|
||||
stopped_at TIMESTAMPTZ,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT now()
|
||||
);
|
||||
CREATE INDEX idx_deployments_app_id ON deployments(app_id);
|
||||
CREATE INDEX idx_deployments_env_id ON deployments(environment_id);
|
||||
|
||||
-- Default environment (standalone mode always has at least one)
|
||||
INSERT INTO environments (slug, display_name) VALUES ('default', 'Default');
|
||||
@@ -0,0 +1,6 @@
|
||||
-- V4__environment_config.sql
|
||||
-- Add production flag and enabled flag to environments, drop unused status column
|
||||
|
||||
ALTER TABLE environments ADD COLUMN production BOOLEAN NOT NULL DEFAULT false;
|
||||
ALTER TABLE environments ADD COLUMN enabled BOOLEAN NOT NULL DEFAULT true;
|
||||
ALTER TABLE environments DROP COLUMN status;
|
||||
@@ -0,0 +1,4 @@
|
||||
-- Add container config to apps and environment defaults
|
||||
ALTER TABLE apps ADD COLUMN container_config JSONB NOT NULL DEFAULT '{}';
|
||||
|
||||
ALTER TABLE environments ADD COLUMN default_container_config JSONB NOT NULL DEFAULT '{}';
|
||||
@@ -0,0 +1 @@
|
||||
ALTER TABLE environments ADD COLUMN jar_retention_count INTEGER DEFAULT 5;
|
||||
@@ -0,0 +1,12 @@
|
||||
-- Deployment orchestration: status model, replicas, strategies, progress tracking
|
||||
|
||||
ALTER TABLE deployments ADD COLUMN target_state VARCHAR(20) NOT NULL DEFAULT 'RUNNING';
|
||||
ALTER TABLE deployments ADD COLUMN deployment_strategy VARCHAR(20) NOT NULL DEFAULT 'BLUE_GREEN';
|
||||
ALTER TABLE deployments ADD COLUMN replica_states JSONB NOT NULL DEFAULT '[]';
|
||||
ALTER TABLE deployments ADD COLUMN deploy_stage VARCHAR(30);
|
||||
|
||||
-- Backfill existing deployments
|
||||
UPDATE deployments SET target_state = CASE
|
||||
WHEN status = 'STOPPED' THEN 'STOPPED'
|
||||
ELSE 'RUNNING'
|
||||
END;
|
||||
@@ -0,0 +1 @@
|
||||
ALTER TABLE deployments ADD COLUMN resolved_config JSONB;
|
||||
@@ -0,0 +1,3 @@
|
||||
ALTER TABLE users ADD COLUMN IF NOT EXISTS failed_login_attempts INTEGER NOT NULL DEFAULT 0;
|
||||
ALTER TABLE users ADD COLUMN IF NOT EXISTS locked_until TIMESTAMPTZ;
|
||||
ALTER TABLE users ADD COLUMN IF NOT EXISTS token_revoked_before TIMESTAMPTZ;
|
||||
Reference in New Issue
Block a user