From 5bed108d3b0827dde2d42dbd0733363202c60e97 Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Mon, 16 Mar 2026 18:13:45 +0100 Subject: [PATCH 01/32] chore: swap ClickHouse deps for PostgreSQL, Flyway, OpenSearch Co-Authored-By: Claude Sonnet 4.6 --- cameleer3-server-app/pom.xml | 33 +++++++++++++++++++++++++++------ 1 file changed, 27 insertions(+), 6 deletions(-) diff --git a/cameleer3-server-app/pom.xml b/cameleer3-server-app/pom.xml index 9ef6d3e8..9194dc02 100644 --- a/cameleer3-server-app/pom.xml +++ b/cameleer3-server-app/pom.xml @@ -36,10 +36,26 @@ spring-boot-starter-jdbc - com.clickhouse - clickhouse-jdbc - 0.9.7 - all + org.postgresql + postgresql + + + org.flywaydb + flyway-core + + + org.flywaydb + flyway-database-postgresql + + + org.opensearch.client + opensearch-java + 2.19.0 + + + org.opensearch.client + opensearch-rest-client + 2.19.0 org.springdoc @@ -96,8 +112,13 @@ org.testcontainers - testcontainers-clickhouse - 2.0.3 + postgresql + test + + + org.opensearch + opensearch-testcontainers + 2.1.1 test From 8a637df65cfd81661268c6300d920eca05f0677d Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Mon, 16 Mar 2026 18:13:53 +0100 Subject: [PATCH 02/32] feat: add Flyway migrations for PostgreSQL/TimescaleDB schema Co-Authored-By: Claude Sonnet 4.6 --- .../resources/db/migration/V1__extensions.sql | 2 + .../resources/db/migration/V2__executions.sql | 25 ++++++ .../db/migration/V3__processor_executions.sql | 28 +++++++ .../db/migration/V4__agent_metrics.sql | 12 +++ .../db/migration/V5__route_diagrams.sql | 9 ++ .../main/resources/db/migration/V6__users.sql | 9 ++ .../db/migration/V7__oidc_config.sql | 12 +++ .../migration/V8__continuous_aggregates.sql | 83 +++++++++++++++++++ 8 files changed, 180 insertions(+) create mode 100644 cameleer3-server-app/src/main/resources/db/migration/V1__extensions.sql create mode 100644 cameleer3-server-app/src/main/resources/db/migration/V2__executions.sql create mode 100644 cameleer3-server-app/src/main/resources/db/migration/V3__processor_executions.sql create mode 100644 cameleer3-server-app/src/main/resources/db/migration/V4__agent_metrics.sql create mode 100644 cameleer3-server-app/src/main/resources/db/migration/V5__route_diagrams.sql create mode 100644 cameleer3-server-app/src/main/resources/db/migration/V6__users.sql create mode 100644 cameleer3-server-app/src/main/resources/db/migration/V7__oidc_config.sql create mode 100644 cameleer3-server-app/src/main/resources/db/migration/V8__continuous_aggregates.sql diff --git a/cameleer3-server-app/src/main/resources/db/migration/V1__extensions.sql b/cameleer3-server-app/src/main/resources/db/migration/V1__extensions.sql new file mode 100644 index 00000000..26970d8f --- /dev/null +++ b/cameleer3-server-app/src/main/resources/db/migration/V1__extensions.sql @@ -0,0 +1,2 @@ +CREATE EXTENSION IF NOT EXISTS timescaledb; +CREATE EXTENSION IF NOT EXISTS timescaledb_toolkit; diff --git a/cameleer3-server-app/src/main/resources/db/migration/V2__executions.sql b/cameleer3-server-app/src/main/resources/db/migration/V2__executions.sql new file mode 100644 index 00000000..e1eeb2fe --- /dev/null +++ b/cameleer3-server-app/src/main/resources/db/migration/V2__executions.sql @@ -0,0 +1,25 @@ +CREATE TABLE executions ( + execution_id TEXT NOT NULL, + route_id TEXT NOT NULL, + agent_id TEXT NOT NULL, + group_name TEXT NOT NULL, + status TEXT NOT NULL, + correlation_id TEXT, + exchange_id TEXT, + start_time TIMESTAMPTZ NOT NULL, + end_time TIMESTAMPTZ, + duration_ms BIGINT, + error_message TEXT, + error_stacktrace TEXT, + diagram_content_hash TEXT, + created_at TIMESTAMPTZ NOT NULL DEFAULT now(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT now(), + PRIMARY KEY (execution_id, start_time) +); + +SELECT create_hypertable('executions', 'start_time', chunk_time_interval => INTERVAL '1 day'); + +CREATE INDEX idx_executions_agent_time ON executions (agent_id, start_time DESC); +CREATE INDEX idx_executions_route_time ON executions (route_id, start_time DESC); +CREATE INDEX idx_executions_group_time ON executions (group_name, start_time DESC); +CREATE INDEX idx_executions_correlation ON executions (correlation_id); diff --git a/cameleer3-server-app/src/main/resources/db/migration/V3__processor_executions.sql b/cameleer3-server-app/src/main/resources/db/migration/V3__processor_executions.sql new file mode 100644 index 00000000..433514b0 --- /dev/null +++ b/cameleer3-server-app/src/main/resources/db/migration/V3__processor_executions.sql @@ -0,0 +1,28 @@ +CREATE TABLE processor_executions ( + id BIGSERIAL, + execution_id TEXT NOT NULL, + processor_id TEXT NOT NULL, + processor_type TEXT NOT NULL, + diagram_node_id TEXT, + group_name TEXT NOT NULL, + route_id TEXT NOT NULL, + depth INT NOT NULL, + parent_processor_id TEXT, + status TEXT NOT NULL, + start_time TIMESTAMPTZ NOT NULL, + end_time TIMESTAMPTZ, + duration_ms BIGINT, + error_message TEXT, + error_stacktrace TEXT, + input_body TEXT, + output_body TEXT, + input_headers JSONB, + output_headers JSONB, + created_at TIMESTAMPTZ NOT NULL DEFAULT now(), + UNIQUE (execution_id, processor_id, start_time) +); + +SELECT create_hypertable('processor_executions', 'start_time', chunk_time_interval => INTERVAL '1 day'); + +CREATE INDEX idx_proc_exec_execution ON processor_executions (execution_id); +CREATE INDEX idx_proc_exec_type_time ON processor_executions (processor_type, start_time DESC); diff --git a/cameleer3-server-app/src/main/resources/db/migration/V4__agent_metrics.sql b/cameleer3-server-app/src/main/resources/db/migration/V4__agent_metrics.sql new file mode 100644 index 00000000..4ecd6cac --- /dev/null +++ b/cameleer3-server-app/src/main/resources/db/migration/V4__agent_metrics.sql @@ -0,0 +1,12 @@ +CREATE TABLE agent_metrics ( + agent_id TEXT NOT NULL, + metric_name TEXT NOT NULL, + metric_value DOUBLE PRECISION NOT NULL, + tags JSONB, + collected_at TIMESTAMPTZ NOT NULL, + server_received_at TIMESTAMPTZ NOT NULL DEFAULT now() +); + +SELECT create_hypertable('agent_metrics', 'collected_at', chunk_time_interval => INTERVAL '1 day'); + +CREATE INDEX idx_metrics_agent_name ON agent_metrics (agent_id, metric_name, collected_at DESC); diff --git a/cameleer3-server-app/src/main/resources/db/migration/V5__route_diagrams.sql b/cameleer3-server-app/src/main/resources/db/migration/V5__route_diagrams.sql new file mode 100644 index 00000000..85eb2355 --- /dev/null +++ b/cameleer3-server-app/src/main/resources/db/migration/V5__route_diagrams.sql @@ -0,0 +1,9 @@ +CREATE TABLE route_diagrams ( + content_hash TEXT PRIMARY KEY, + route_id TEXT NOT NULL, + agent_id TEXT NOT NULL, + definition TEXT NOT NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT now() +); + +CREATE INDEX idx_diagrams_route_agent ON route_diagrams (route_id, agent_id); diff --git a/cameleer3-server-app/src/main/resources/db/migration/V6__users.sql b/cameleer3-server-app/src/main/resources/db/migration/V6__users.sql new file mode 100644 index 00000000..079db7dd --- /dev/null +++ b/cameleer3-server-app/src/main/resources/db/migration/V6__users.sql @@ -0,0 +1,9 @@ +CREATE TABLE users ( + user_id TEXT PRIMARY KEY, + provider TEXT NOT NULL, + email TEXT, + display_name TEXT, + roles TEXT[] NOT NULL DEFAULT '{}', + created_at TIMESTAMPTZ NOT NULL DEFAULT now(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT now() +); diff --git a/cameleer3-server-app/src/main/resources/db/migration/V7__oidc_config.sql b/cameleer3-server-app/src/main/resources/db/migration/V7__oidc_config.sql new file mode 100644 index 00000000..e46a2196 --- /dev/null +++ b/cameleer3-server-app/src/main/resources/db/migration/V7__oidc_config.sql @@ -0,0 +1,12 @@ +CREATE TABLE oidc_config ( + config_id TEXT PRIMARY KEY DEFAULT 'default', + enabled BOOLEAN NOT NULL DEFAULT false, + issuer_uri TEXT, + client_id TEXT, + client_secret TEXT, + roles_claim TEXT, + default_roles TEXT[] NOT NULL DEFAULT '{}', + auto_signup BOOLEAN DEFAULT false, + display_name_claim TEXT, + updated_at TIMESTAMPTZ NOT NULL DEFAULT now() +); diff --git a/cameleer3-server-app/src/main/resources/db/migration/V8__continuous_aggregates.sql b/cameleer3-server-app/src/main/resources/db/migration/V8__continuous_aggregates.sql new file mode 100644 index 00000000..6eb5754e --- /dev/null +++ b/cameleer3-server-app/src/main/resources/db/migration/V8__continuous_aggregates.sql @@ -0,0 +1,83 @@ +-- Global stats +CREATE MATERIALIZED VIEW stats_1m_all +WITH (timescaledb.continuous) AS +SELECT + time_bucket('1 minute', start_time) AS bucket, + COUNT(*) AS total_count, + COUNT(*) FILTER (WHERE status = 'FAILED') AS failed_count, + COUNT(*) FILTER (WHERE status = 'RUNNING') AS running_count, + SUM(duration_ms) AS duration_sum, + MAX(duration_ms) AS duration_max, + approx_percentile(0.99, percentile_agg(duration_ms::DOUBLE PRECISION)) AS p99_duration +FROM executions +WHERE status IS NOT NULL +GROUP BY bucket; + +SELECT add_continuous_aggregate_policy('stats_1m_all', + start_offset => INTERVAL '1 hour', + end_offset => INTERVAL '1 minute', + schedule_interval => INTERVAL '1 minute'); + +-- Per-application stats +CREATE MATERIALIZED VIEW stats_1m_app +WITH (timescaledb.continuous) AS +SELECT + time_bucket('1 minute', start_time) AS bucket, + group_name, + COUNT(*) AS total_count, + COUNT(*) FILTER (WHERE status = 'FAILED') AS failed_count, + COUNT(*) FILTER (WHERE status = 'RUNNING') AS running_count, + SUM(duration_ms) AS duration_sum, + MAX(duration_ms) AS duration_max, + approx_percentile(0.99, percentile_agg(duration_ms::DOUBLE PRECISION)) AS p99_duration +FROM executions +WHERE status IS NOT NULL +GROUP BY bucket, group_name; + +SELECT add_continuous_aggregate_policy('stats_1m_app', + start_offset => INTERVAL '1 hour', + end_offset => INTERVAL '1 minute', + schedule_interval => INTERVAL '1 minute'); + +-- Per-route stats +CREATE MATERIALIZED VIEW stats_1m_route +WITH (timescaledb.continuous) AS +SELECT + time_bucket('1 minute', start_time) AS bucket, + group_name, + route_id, + COUNT(*) AS total_count, + COUNT(*) FILTER (WHERE status = 'FAILED') AS failed_count, + COUNT(*) FILTER (WHERE status = 'RUNNING') AS running_count, + SUM(duration_ms) AS duration_sum, + MAX(duration_ms) AS duration_max, + approx_percentile(0.99, percentile_agg(duration_ms::DOUBLE PRECISION)) AS p99_duration +FROM executions +WHERE status IS NOT NULL +GROUP BY bucket, group_name, route_id; + +SELECT add_continuous_aggregate_policy('stats_1m_route', + start_offset => INTERVAL '1 hour', + end_offset => INTERVAL '1 minute', + schedule_interval => INTERVAL '1 minute'); + +-- Per-processor stats (uses denormalized group_name/route_id on processor_executions) +CREATE MATERIALIZED VIEW stats_1m_processor +WITH (timescaledb.continuous) AS +SELECT + time_bucket('1 minute', start_time) AS bucket, + group_name, + route_id, + processor_type, + COUNT(*) AS total_count, + COUNT(*) FILTER (WHERE status = 'FAILED') AS failed_count, + SUM(duration_ms) AS duration_sum, + MAX(duration_ms) AS duration_max, + approx_percentile(0.99, percentile_agg(duration_ms::DOUBLE PRECISION)) AS p99_duration +FROM processor_executions +GROUP BY bucket, group_name, route_id, processor_type; + +SELECT add_continuous_aggregate_policy('stats_1m_processor', + start_offset => INTERVAL '1 hour', + end_offset => INTERVAL '1 minute', + schedule_interval => INTERVAL '1 minute'); From 0eeae7036984552114961418cdd2963941e334ae Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Mon, 16 Mar 2026 18:15:32 +0100 Subject: [PATCH 03/32] test: add TimescaleDB test base class and Flyway migration smoke test --- .../server/app/AbstractPostgresIT.java | 29 +++++++++++++++ .../server/app/storage/FlywayMigrationIT.java | 36 +++++++++++++++++++ 2 files changed, 65 insertions(+) create mode 100644 cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractPostgresIT.java create mode 100644 cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/FlywayMigrationIT.java diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractPostgresIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractPostgresIT.java new file mode 100644 index 00000000..26faf84a --- /dev/null +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractPostgresIT.java @@ -0,0 +1,29 @@ +package com.cameleer3.server.app; + +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.DynamicPropertyRegistry; +import org.springframework.test.context.DynamicPropertySource; +import org.testcontainers.containers.PostgreSQLContainer; +import org.testcontainers.junit.jupiter.Container; +import org.testcontainers.junit.jupiter.Testcontainers; + +@SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.RANDOM_PORT) +@Testcontainers +public abstract class AbstractPostgresIT { + + @Container + static final PostgreSQLContainer postgres = + new PostgreSQLContainer<>("timescale/timescaledb:latest-pg16") + .withDatabaseName("cameleer3") + .withUsername("cameleer") + .withPassword("test"); + + @DynamicPropertySource + static void configureProperties(DynamicPropertyRegistry registry) { + registry.add("spring.datasource.url", postgres::getJdbcUrl); + registry.add("spring.datasource.username", postgres::getUsername); + registry.add("spring.datasource.password", postgres::getPassword); + registry.add("spring.datasource.driver-class-name", () -> "org.postgresql.Driver"); + registry.add("spring.flyway.enabled", () -> "true"); + } +} diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/FlywayMigrationIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/FlywayMigrationIT.java new file mode 100644 index 00000000..227a4236 --- /dev/null +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/FlywayMigrationIT.java @@ -0,0 +1,36 @@ +package com.cameleer3.server.app.storage; + +import com.cameleer3.server.app.AbstractPostgresIT; +import org.junit.jupiter.api.Test; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.jdbc.core.JdbcTemplate; + +import static org.junit.jupiter.api.Assertions.*; + +class FlywayMigrationIT extends AbstractPostgresIT { + + @Autowired + JdbcTemplate jdbcTemplate; + + @Test + void allMigrationsApplySuccessfully() { + // Verify core tables exist + Integer execCount = jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM executions", Integer.class); + assertEquals(0, execCount); + + Integer procCount = jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM processor_executions", Integer.class); + assertEquals(0, procCount); + + Integer userCount = jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM users", Integer.class); + assertEquals(0, userCount); + + // Verify continuous aggregates exist + Integer caggCount = jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM timescaledb_information.continuous_aggregates", + Integer.class); + assertEquals(4, caggCount); + } +} From 41a9a975fda25329c613e6a0e72fa5638ac92470 Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Mon, 16 Mar 2026 18:15:33 +0100 Subject: [PATCH 04/32] config: switch datasource to PostgreSQL, add OpenSearch and Flyway config Co-Authored-By: Claude Sonnet 4.6 --- .../src/main/resources/application.yml | 19 ++++++++++++----- .../src/test/resources/application-test.yml | 21 ++++--------------- 2 files changed, 18 insertions(+), 22 deletions(-) diff --git a/cameleer3-server-app/src/main/resources/application.yml b/cameleer3-server-app/src/main/resources/application.yml index 31974bae..26ca2f2d 100644 --- a/cameleer3-server-app/src/main/resources/application.yml +++ b/cameleer3-server-app/src/main/resources/application.yml @@ -3,10 +3,13 @@ server: spring: datasource: - url: jdbc:ch://localhost:8123/cameleer3 + url: jdbc:postgresql://localhost:5432/cameleer3 username: cameleer - password: cameleer_dev - driver-class-name: com.clickhouse.jdbc.ClickHouseDriver + password: ${CAMELEER_DB_PASSWORD:cameleer_dev} + driver-class-name: org.postgresql.Driver + flyway: + enabled: true + locations: classpath:db/migration mvc: async: request-timeout: -1 @@ -29,8 +32,14 @@ ingestion: batch-size: 5000 flush-interval-ms: 1000 -clickhouse: - ttl-days: 30 +opensearch: + url: ${OPENSEARCH_URL:http://localhost:9200} + queue-size: ${CAMELEER_OPENSEARCH_QUEUE_SIZE:10000} + debounce-ms: ${CAMELEER_OPENSEARCH_DEBOUNCE_MS:2000} + +cameleer: + body-size-limit: ${CAMELEER_BODY_SIZE_LIMIT:16384} + retention-days: ${CAMELEER_RETENTION_DAYS:30} security: access-token-expiry-ms: 3600000 diff --git a/cameleer3-server-app/src/test/resources/application-test.yml b/cameleer3-server-app/src/test/resources/application-test.yml index 027a4f67..b450bee4 100644 --- a/cameleer3-server-app/src/test/resources/application-test.yml +++ b/cameleer3-server-app/src/test/resources/application-test.yml @@ -1,18 +1,5 @@ spring: - datasource: - url: jdbc:ch://placeholder:8123/cameleer3 - username: default - password: "" - driver-class-name: com.clickhouse.jdbc.ClickHouseDriver - -ingestion: - buffer-capacity: 100 - batch-size: 10 - flush-interval-ms: 100 - -agent-registry: - ping-interval-ms: 1000 - -security: - bootstrap-token: test-bootstrap-token - bootstrap-token-previous: old-bootstrap-token + flyway: + enabled: true +opensearch: + url: http://localhost:9200 From 55ed3be71aebe40cb64e3b6a4270adb14ee00d72 Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Mon, 16 Mar 2026 18:16:42 +0100 Subject: [PATCH 05/32] feat: add ExecutionDocument model and ExecutionUpdatedEvent Co-Authored-By: Claude Sonnet 4.6 --- .../core/indexing/ExecutionUpdatedEvent.java | 5 +++++ .../core/storage/model/ExecutionDocument.java | 19 +++++++++++++++++++ 2 files changed, 24 insertions(+) create mode 100644 cameleer3-server-core/src/main/java/com/cameleer3/server/core/indexing/ExecutionUpdatedEvent.java create mode 100644 cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/model/ExecutionDocument.java diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/indexing/ExecutionUpdatedEvent.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/indexing/ExecutionUpdatedEvent.java new file mode 100644 index 00000000..08488fab --- /dev/null +++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/indexing/ExecutionUpdatedEvent.java @@ -0,0 +1,5 @@ +package com.cameleer3.server.core.indexing; + +import java.time.Instant; + +public record ExecutionUpdatedEvent(String executionId, Instant startTime) {} diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/model/ExecutionDocument.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/model/ExecutionDocument.java new file mode 100644 index 00000000..6822088a --- /dev/null +++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/model/ExecutionDocument.java @@ -0,0 +1,19 @@ +package com.cameleer3.server.core.storage.model; + +import java.time.Instant; +import java.util.List; + +public record ExecutionDocument( + String executionId, String routeId, String agentId, String groupName, + String status, String correlationId, String exchangeId, + Instant startTime, Instant endTime, Long durationMs, + String errorMessage, String errorStacktrace, + List processors +) { + public record ProcessorDoc( + String processorId, String processorType, String status, + String errorMessage, String errorStacktrace, + String inputBody, String outputBody, + String inputHeaders, String outputHeaders + ) {} +} From a55fc3c10d4ea9cc04230aa60313c83c8178529f Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Mon, 16 Mar 2026 18:16:53 +0100 Subject: [PATCH 06/32] feat: add new storage interfaces for PostgreSQL/OpenSearch backends Co-Authored-By: Claude Sonnet 4.6 --- .../server/core/storage/DiagramStore.java | 18 ++++++++++ .../server/core/storage/ExecutionStore.java | 34 ++++++++++++++++++ .../server/core/storage/MetricsStore.java | 10 ++++++ .../server/core/storage/SearchIndex.java | 17 +++++++++ .../server/core/storage/StatsStore.java | 36 +++++++++++++++++++ 5 files changed, 115 insertions(+) create mode 100644 cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/DiagramStore.java create mode 100644 cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/ExecutionStore.java create mode 100644 cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/MetricsStore.java create mode 100644 cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/SearchIndex.java create mode 100644 cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/StatsStore.java diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/DiagramStore.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/DiagramStore.java new file mode 100644 index 00000000..12ff6d7d --- /dev/null +++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/DiagramStore.java @@ -0,0 +1,18 @@ +package com.cameleer3.server.core.storage; + +import com.cameleer3.common.graph.RouteGraph; +import com.cameleer3.server.core.ingestion.TaggedDiagram; + +import java.util.List; +import java.util.Optional; + +public interface DiagramStore { + + void store(TaggedDiagram diagram); + + Optional findByContentHash(String contentHash); + + Optional findContentHashForRoute(String routeId, String agentId); + + Optional findContentHashForRouteByAgents(String routeId, List agentIds); +} diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/ExecutionStore.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/ExecutionStore.java new file mode 100644 index 00000000..ae45577e --- /dev/null +++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/ExecutionStore.java @@ -0,0 +1,34 @@ +package com.cameleer3.server.core.storage; + +import java.time.Instant; +import java.util.List; +import java.util.Optional; + +public interface ExecutionStore { + + void upsert(ExecutionRecord execution); + + void upsertProcessors(String executionId, Instant startTime, + String groupName, String routeId, + List processors); + + Optional findById(String executionId); + + List findProcessors(String executionId); + + record ExecutionRecord( + String executionId, String routeId, String agentId, String groupName, + String status, String correlationId, String exchangeId, + Instant startTime, Instant endTime, Long durationMs, + String errorMessage, String errorStacktrace, String diagramContentHash + ) {} + + record ProcessorRecord( + String executionId, String processorId, String processorType, + String diagramNodeId, String groupName, String routeId, + int depth, String parentProcessorId, String status, + Instant startTime, Instant endTime, Long durationMs, + String errorMessage, String errorStacktrace, + String inputBody, String outputBody, String inputHeaders, String outputHeaders + ) {} +} diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/MetricsStore.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/MetricsStore.java new file mode 100644 index 00000000..b7af4122 --- /dev/null +++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/MetricsStore.java @@ -0,0 +1,10 @@ +package com.cameleer3.server.core.storage; + +import com.cameleer3.server.core.storage.model.MetricsSnapshot; + +import java.util.List; + +public interface MetricsStore { + + void insertBatch(List snapshots); +} diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/SearchIndex.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/SearchIndex.java new file mode 100644 index 00000000..e06379ac --- /dev/null +++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/SearchIndex.java @@ -0,0 +1,17 @@ +package com.cameleer3.server.core.storage; + +import com.cameleer3.server.core.search.ExecutionSummary; +import com.cameleer3.server.core.search.SearchRequest; +import com.cameleer3.server.core.search.SearchResult; +import com.cameleer3.server.core.storage.model.ExecutionDocument; + +public interface SearchIndex { + + SearchResult search(SearchRequest request); + + long count(SearchRequest request); + + void index(ExecutionDocument document); + + void delete(String executionId); +} diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/StatsStore.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/StatsStore.java new file mode 100644 index 00000000..05931a86 --- /dev/null +++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/StatsStore.java @@ -0,0 +1,36 @@ +package com.cameleer3.server.core.storage; + +import com.cameleer3.server.core.search.ExecutionStats; +import com.cameleer3.server.core.search.StatsTimeseries; + +import java.time.Instant; +import java.util.List; + +public interface StatsStore { + + // Global stats (stats_1m_all) + ExecutionStats stats(Instant from, Instant to); + + // Per-app stats (stats_1m_app) + ExecutionStats statsForApp(Instant from, Instant to, String groupName); + + // Per-route stats (stats_1m_route), optionally scoped to specific agents + ExecutionStats statsForRoute(Instant from, Instant to, String routeId, List agentIds); + + // Per-processor stats (stats_1m_processor) + ExecutionStats statsForProcessor(Instant from, Instant to, String routeId, String processorType); + + // Global timeseries + StatsTimeseries timeseries(Instant from, Instant to, int bucketCount); + + // Per-app timeseries + StatsTimeseries timeseriesForApp(Instant from, Instant to, int bucketCount, String groupName); + + // Per-route timeseries, optionally scoped to specific agents + StatsTimeseries timeseriesForRoute(Instant from, Instant to, int bucketCount, + String routeId, List agentIds); + + // Per-processor timeseries + StatsTimeseries timeseriesForProcessor(Instant from, Instant to, int bucketCount, + String routeId, String processorType); +} From 84b93d74c74e7e9606ec6958204c1ca04ba44b21 Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Mon, 16 Mar 2026 18:18:52 +0100 Subject: [PATCH 07/32] refactor: SearchService uses SearchIndex + StatsStore instead of SearchEngine --- .../server/core/search/SearchService.java | 58 ++++++------------- 1 file changed, 19 insertions(+), 39 deletions(-) diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/search/SearchService.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/search/SearchService.java index 263193c2..014c606d 100644 --- a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/search/SearchService.java +++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/search/SearchService.java @@ -1,63 +1,43 @@ package com.cameleer3.server.core.search; +import com.cameleer3.server.core.storage.SearchIndex; +import com.cameleer3.server.core.storage.StatsStore; + +import java.time.Instant; import java.util.List; -/** - * Orchestrates search operations, delegating to a {@link SearchEngine} backend. - *

- * This is a plain class (no Spring annotations) -- it lives in the core module - * and is wired as a bean by the app module configuration. The thin orchestration - * layer allows adding cross-cutting concerns (logging, caching, metrics) later. - */ public class SearchService { - private final SearchEngine engine; + private final SearchIndex searchIndex; + private final StatsStore statsStore; - public SearchService(SearchEngine engine) { - this.engine = engine; + public SearchService(SearchIndex searchIndex, StatsStore statsStore) { + this.searchIndex = searchIndex; + this.statsStore = statsStore; } - /** - * Search for route executions matching the given criteria. - */ public SearchResult search(SearchRequest request) { - return engine.search(request); + return searchIndex.search(request); } - /** - * Count route executions matching the given criteria. - */ public long count(SearchRequest request) { - return engine.count(request); + return searchIndex.count(request); } - /** - * Compute aggregate execution stats (P99 latency, active count). - */ - public ExecutionStats stats(java.time.Instant from, java.time.Instant to) { - return engine.stats(from, to); + public ExecutionStats stats(Instant from, Instant to) { + return statsStore.stats(from, to); } - /** - * Compute aggregate execution stats scoped to specific routes and agents. - */ - public ExecutionStats stats(java.time.Instant from, java.time.Instant to, - String routeId, List agentIds) { - return engine.stats(from, to, routeId, agentIds); + public ExecutionStats stats(Instant from, Instant to, String routeId, List agentIds) { + return statsStore.statsForRoute(from, to, routeId, agentIds); } - /** - * Compute bucketed time-series stats over a time window. - */ - public StatsTimeseries timeseries(java.time.Instant from, java.time.Instant to, int bucketCount) { - return engine.timeseries(from, to, bucketCount); + public StatsTimeseries timeseries(Instant from, Instant to, int bucketCount) { + return statsStore.timeseries(from, to, bucketCount); } - /** - * Compute bucketed time-series stats scoped to specific routes and agents. - */ - public StatsTimeseries timeseries(java.time.Instant from, java.time.Instant to, int bucketCount, + public StatsTimeseries timeseries(Instant from, Instant to, int bucketCount, String routeId, List agentIds) { - return engine.timeseries(from, to, bucketCount, routeId, agentIds); + return statsStore.timeseriesForRoute(from, to, bucketCount, routeId, agentIds); } } From adf4b44d784aee2ffbe9366117e2aa240339b306 Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Mon, 16 Mar 2026 18:18:53 +0100 Subject: [PATCH 08/32] refactor: DetailService uses ExecutionStore, tree built from parentProcessorId --- .../server/core/detail/DetailService.java | 121 ++++++------------ 1 file changed, 39 insertions(+), 82 deletions(-) diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/DetailService.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/DetailService.java index 27dc39a8..7f6b31ce 100644 --- a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/DetailService.java +++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/DetailService.java @@ -1,104 +1,61 @@ package com.cameleer3.server.core.detail; -import com.cameleer3.server.core.storage.ExecutionRepository; +import com.cameleer3.server.core.storage.ExecutionStore; +import com.cameleer3.server.core.storage.ExecutionStore.ProcessorRecord; -import java.util.ArrayList; -import java.util.List; -import java.util.Optional; +import java.util.*; -/** - * Provides execution detail with reconstructed processor tree. - *

- * This is a plain class (no Spring annotations) -- it lives in the core module - * and is wired as a bean by the app module configuration. - */ public class DetailService { - private final ExecutionRepository repository; + private final ExecutionStore executionStore; - public DetailService(ExecutionRepository repository) { - this.repository = repository; + public DetailService(ExecutionStore executionStore) { + this.executionStore = executionStore; } - /** - * Get the full detail of a route execution, including the nested processor tree. - * - * @param executionId the execution ID to look up - * @return the execution detail, or empty if not found - */ public Optional getDetail(String executionId) { - return repository.findRawById(executionId) - .map(this::toDetail); + return executionStore.findById(executionId) + .map(exec -> { + List processors = executionStore.findProcessors(executionId); + List roots = buildTree(processors); + return new ExecutionDetail( + exec.executionId(), exec.routeId(), exec.agentId(), + exec.status(), exec.startTime(), exec.endTime(), + exec.durationMs() != null ? exec.durationMs() : 0L, + exec.correlationId(), exec.exchangeId(), + exec.errorMessage(), exec.errorStacktrace(), + exec.diagramContentHash(), roots + ); + }); } - private ExecutionDetail toDetail(RawExecutionRow row) { - List roots = reconstructTree( - row.processorIds(), - row.processorTypes(), - row.processorStatuses(), - row.processorStarts(), - row.processorEnds(), - row.processorDurations(), - row.processorDiagramNodeIds(), - row.processorErrorMessages(), - row.processorErrorStacktraces(), - row.processorDepths(), - row.processorParentIndexes() - ); + List buildTree(List processors) { + if (processors.isEmpty()) return List.of(); - return new ExecutionDetail( - row.executionId(), - row.routeId(), - row.agentId(), - row.status(), - row.startTime(), - row.endTime(), - row.durationMs(), - row.correlationId(), - row.exchangeId(), - row.errorMessage(), - row.errorStackTrace(), - row.diagramContentHash(), - roots - ); - } - - /** - * Reconstruct the nested processor tree from flat parallel arrays. - *

- * Uses parentIndexes to wire children: parentIndex == -1 means the node is a root. - * Otherwise, parentIndex is the array index of the parent node. - */ - List reconstructTree( - String[] ids, String[] types, String[] statuses, - java.time.Instant[] starts, java.time.Instant[] ends, long[] durations, - String[] diagramNodeIds, String[] errorMessages, String[] errorStacktraces, - int[] depths, int[] parentIndexes) { - - if (ids == null || ids.length == 0) { - return List.of(); - } - - int len = ids.length; - ProcessorNode[] nodes = new ProcessorNode[len]; - - for (int i = 0; i < len; i++) { - nodes[i] = new ProcessorNode( - ids[i], types[i], statuses[i], - starts[i], ends[i], durations[i], - diagramNodeIds[i], errorMessages[i], errorStacktraces[i] - ); + Map nodeMap = new LinkedHashMap<>(); + for (ProcessorRecord p : processors) { + nodeMap.put(p.processorId(), new ProcessorNode( + p.processorId(), p.processorType(), p.status(), + p.startTime(), p.endTime(), + p.durationMs() != null ? p.durationMs() : 0L, + p.diagramNodeId(), p.errorMessage(), p.errorStacktrace() + )); } List roots = new ArrayList<>(); - for (int i = 0; i < len; i++) { - if (parentIndexes[i] == -1) { - roots.add(nodes[i]); + for (ProcessorRecord p : processors) { + ProcessorNode node = nodeMap.get(p.processorId()); + if (p.parentProcessorId() == null) { + roots.add(node); } else { - nodes[parentIndexes[i]].addChild(nodes[i]); + ProcessorNode parent = nodeMap.get(p.parentProcessorId()); + if (parent != null) { + parent.addChild(node); + } else { + roots.add(node); // orphan safety + } } } - return roots; } } From 85ebe7611115172df2da1a6da9a47b1c6745628f Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Mon, 16 Mar 2026 18:18:54 +0100 Subject: [PATCH 09/32] refactor: IngestionService uses synchronous ExecutionStore writes with event publishing Co-Authored-By: Claude Sonnet 4.6 --- .../core/ingestion/IngestionService.java | 171 +++++++++--------- 1 file changed, 87 insertions(+), 84 deletions(-) diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/ingestion/IngestionService.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/ingestion/IngestionService.java index 6841c683..c5e17e6f 100644 --- a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/ingestion/IngestionService.java +++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/ingestion/IngestionService.java @@ -1,113 +1,116 @@ package com.cameleer3.server.core.ingestion; +import com.cameleer3.common.model.ProcessorExecution; +import com.cameleer3.common.model.RouteExecution; +import com.cameleer3.server.core.indexing.ExecutionUpdatedEvent; +import com.cameleer3.server.core.storage.DiagramStore; +import com.cameleer3.server.core.storage.ExecutionStore; +import com.cameleer3.server.core.storage.ExecutionStore.ExecutionRecord; +import com.cameleer3.server.core.storage.ExecutionStore.ProcessorRecord; import com.cameleer3.server.core.storage.model.MetricsSnapshot; +import java.util.ArrayList; import java.util.List; +import java.util.function.Consumer; -/** - * Routes incoming data to the appropriate {@link WriteBuffer} instances. - *

- * This is a plain class (no Spring annotations) -- it lives in the core module - * and is wired as a bean by the app module configuration. - */ public class IngestionService { - private final WriteBuffer executionBuffer; - private final WriteBuffer diagramBuffer; + private final ExecutionStore executionStore; + private final DiagramStore diagramStore; private final WriteBuffer metricsBuffer; + private final Consumer eventPublisher; + private final int bodySizeLimit; - public IngestionService(WriteBuffer executionBuffer, - WriteBuffer diagramBuffer, - WriteBuffer metricsBuffer) { - this.executionBuffer = executionBuffer; - this.diagramBuffer = diagramBuffer; + public IngestionService(ExecutionStore executionStore, + DiagramStore diagramStore, + WriteBuffer metricsBuffer, + Consumer eventPublisher, + int bodySizeLimit) { + this.executionStore = executionStore; + this.diagramStore = diagramStore; this.metricsBuffer = metricsBuffer; + this.eventPublisher = eventPublisher; + this.bodySizeLimit = bodySizeLimit; } - /** - * Accept a batch of tagged route executions into the buffer. - * - * @return true if all items were buffered, false if buffer is full (backpressure) - */ - public boolean acceptExecutions(List executions) { - return executionBuffer.offerBatch(executions); + public void ingestExecution(String agentId, String groupName, RouteExecution execution) { + ExecutionRecord record = toExecutionRecord(agentId, groupName, execution); + executionStore.upsert(record); + + if (execution.getProcessors() != null && !execution.getProcessors().isEmpty()) { + List processors = flattenProcessors( + execution.getProcessors(), record.executionId(), + record.startTime(), groupName, execution.getRouteId(), + null, 0); + executionStore.upsertProcessors( + record.executionId(), record.startTime(), + groupName, execution.getRouteId(), processors); + } + + eventPublisher.accept(new ExecutionUpdatedEvent( + record.executionId(), record.startTime())); } - /** - * Accept a single tagged route execution into the buffer. - * - * @return true if the item was buffered, false if buffer is full (backpressure) - */ - public boolean acceptExecution(TaggedExecution execution) { - return executionBuffer.offer(execution); + public void ingestDiagram(TaggedDiagram diagram) { + diagramStore.store(diagram); } - /** - * Accept a single tagged route diagram into the buffer. - * - * @return true if the item was buffered, false if buffer is full (backpressure) - */ - public boolean acceptDiagram(TaggedDiagram diagram) { - return diagramBuffer.offer(diagram); - } - - /** - * Accept a batch of tagged route diagrams into the buffer. - * - * @return true if all items were buffered, false if buffer is full (backpressure) - */ - public boolean acceptDiagrams(List diagrams) { - return diagramBuffer.offerBatch(diagrams); - } - - /** - * Accept a batch of metrics snapshots into the buffer. - * - * @return true if all items were buffered, false if buffer is full (backpressure) - */ public boolean acceptMetrics(List metrics) { return metricsBuffer.offerBatch(metrics); } - /** - * @return current number of items in the execution buffer - */ - public int getExecutionBufferDepth() { - return executionBuffer.size(); - } - - /** - * @return current number of items in the diagram buffer - */ - public int getDiagramBufferDepth() { - return diagramBuffer.size(); - } - - /** - * @return current number of items in the metrics buffer - */ public int getMetricsBufferDepth() { return metricsBuffer.size(); } - /** - * @return the execution write buffer (for use by flush scheduler) - */ - public WriteBuffer getExecutionBuffer() { - return executionBuffer; - } - - /** - * @return the diagram write buffer (for use by flush scheduler) - */ - public WriteBuffer getDiagramBuffer() { - return diagramBuffer; - } - - /** - * @return the metrics write buffer (for use by flush scheduler) - */ public WriteBuffer getMetricsBuffer() { return metricsBuffer; } + + private ExecutionRecord toExecutionRecord(String agentId, String groupName, + RouteExecution exec) { + return new ExecutionRecord( + exec.getExecutionId(), exec.getRouteId(), agentId, groupName, + exec.getStatus() != null ? exec.getStatus().name() : "RUNNING", + exec.getCorrelationId(), exec.getExchangeId(), + exec.getStartTime(), exec.getEndTime(), + exec.getDurationMs(), + exec.getErrorMessage(), exec.getErrorStacktrace(), + null // diagramContentHash set separately + ); + } + + private List flattenProcessors( + List processors, String executionId, + java.time.Instant execStartTime, String groupName, String routeId, + String parentProcessorId, int depth) { + List flat = new ArrayList<>(); + for (ProcessorExecution p : processors) { + flat.add(new ProcessorRecord( + executionId, p.getProcessorId(), p.getProcessorType(), + p.getDiagramNodeId(), groupName, routeId, + depth, parentProcessorId, + p.getStatus() != null ? p.getStatus().name() : "RUNNING", + p.getStartTime() != null ? p.getStartTime() : execStartTime, + p.getEndTime(), + p.getDurationMs(), + p.getErrorMessage(), p.getErrorStacktrace(), + truncateBody(p.getInputBody()), truncateBody(p.getOutputBody()), + p.getInputHeaders() != null ? p.getInputHeaders().toString() : null, + p.getOutputHeaders() != null ? p.getOutputHeaders().toString() : null + )); + if (p.getChildren() != null) { + flat.addAll(flattenProcessors( + p.getChildren(), executionId, execStartTime, + groupName, routeId, p.getProcessorId(), depth + 1)); + } + } + return flat; + } + + private String truncateBody(String body) { + if (body == null) return null; + if (body.length() > bodySizeLimit) return body.substring(0, bodySizeLimit); + return body; + } } From 9fd02c4edb31e182e0e5b3776d47c49e11566416 Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Mon, 16 Mar 2026 18:20:57 +0100 Subject: [PATCH 10/32] feat: implement PostgresExecutionStore with upsert and dedup Co-Authored-By: Claude Sonnet 4.6 --- .../app/storage/PostgresExecutionStore.java | 131 ++++++++++++++++++ .../app/storage/PostgresExecutionStoreIT.java | 83 +++++++++++ 2 files changed, 214 insertions(+) create mode 100644 cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresExecutionStore.java create mode 100644 cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/PostgresExecutionStoreIT.java diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresExecutionStore.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresExecutionStore.java new file mode 100644 index 00000000..84170327 --- /dev/null +++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresExecutionStore.java @@ -0,0 +1,131 @@ +package com.cameleer3.server.app.storage; + +import com.cameleer3.server.core.storage.ExecutionStore; +import org.springframework.jdbc.core.JdbcTemplate; +import org.springframework.jdbc.core.RowMapper; +import org.springframework.stereotype.Repository; + +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Timestamp; +import java.time.Instant; +import java.util.List; +import java.util.Optional; + +@Repository +public class PostgresExecutionStore implements ExecutionStore { + + private final JdbcTemplate jdbc; + + public PostgresExecutionStore(JdbcTemplate jdbc) { + this.jdbc = jdbc; + } + + @Override + public void upsert(ExecutionRecord execution) { + jdbc.update(""" + INSERT INTO executions (execution_id, route_id, agent_id, group_name, + status, correlation_id, exchange_id, start_time, end_time, + duration_ms, error_message, error_stacktrace, diagram_content_hash, + created_at, updated_at) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, now(), now()) + ON CONFLICT (execution_id, start_time) DO UPDATE SET + status = CASE + WHEN EXCLUDED.status IN ('COMPLETED', 'FAILED') + AND executions.status = 'RUNNING' + THEN EXCLUDED.status + WHEN EXCLUDED.status = executions.status THEN executions.status + ELSE EXCLUDED.status + END, + end_time = COALESCE(EXCLUDED.end_time, executions.end_time), + duration_ms = COALESCE(EXCLUDED.duration_ms, executions.duration_ms), + error_message = COALESCE(EXCLUDED.error_message, executions.error_message), + error_stacktrace = COALESCE(EXCLUDED.error_stacktrace, executions.error_stacktrace), + diagram_content_hash = COALESCE(EXCLUDED.diagram_content_hash, executions.diagram_content_hash), + updated_at = now() + """, + execution.executionId(), execution.routeId(), execution.agentId(), + execution.groupName(), execution.status(), execution.correlationId(), + execution.exchangeId(), + Timestamp.from(execution.startTime()), + execution.endTime() != null ? Timestamp.from(execution.endTime()) : null, + execution.durationMs(), execution.errorMessage(), + execution.errorStacktrace(), execution.diagramContentHash()); + } + + @Override + public void upsertProcessors(String executionId, Instant startTime, + String groupName, String routeId, + List processors) { + jdbc.batchUpdate(""" + INSERT INTO processor_executions (execution_id, processor_id, processor_type, + diagram_node_id, group_name, route_id, depth, parent_processor_id, + status, start_time, end_time, duration_ms, error_message, error_stacktrace, + input_body, output_body, input_headers, output_headers) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?::jsonb, ?::jsonb) + ON CONFLICT (execution_id, processor_id, start_time) DO UPDATE SET + status = EXCLUDED.status, + end_time = COALESCE(EXCLUDED.end_time, processor_executions.end_time), + duration_ms = COALESCE(EXCLUDED.duration_ms, processor_executions.duration_ms), + error_message = COALESCE(EXCLUDED.error_message, processor_executions.error_message), + error_stacktrace = COALESCE(EXCLUDED.error_stacktrace, processor_executions.error_stacktrace), + input_body = COALESCE(EXCLUDED.input_body, processor_executions.input_body), + output_body = COALESCE(EXCLUDED.output_body, processor_executions.output_body), + input_headers = COALESCE(EXCLUDED.input_headers, processor_executions.input_headers), + output_headers = COALESCE(EXCLUDED.output_headers, processor_executions.output_headers) + """, + processors.stream().map(p -> new Object[]{ + p.executionId(), p.processorId(), p.processorType(), + p.diagramNodeId(), p.groupName(), p.routeId(), + p.depth(), p.parentProcessorId(), p.status(), + Timestamp.from(p.startTime()), + p.endTime() != null ? Timestamp.from(p.endTime()) : null, + p.durationMs(), p.errorMessage(), p.errorStacktrace(), + p.inputBody(), p.outputBody(), p.inputHeaders(), p.outputHeaders() + }).toList()); + } + + @Override + public Optional findById(String executionId) { + List results = jdbc.query( + "SELECT * FROM executions WHERE execution_id = ? ORDER BY start_time DESC LIMIT 1", + EXECUTION_MAPPER, executionId); + return results.isEmpty() ? Optional.empty() : Optional.of(results.get(0)); + } + + @Override + public List findProcessors(String executionId) { + return jdbc.query( + "SELECT * FROM processor_executions WHERE execution_id = ? ORDER BY depth, start_time", + PROCESSOR_MAPPER, executionId); + } + + private static final RowMapper EXECUTION_MAPPER = (rs, rowNum) -> + new ExecutionRecord( + rs.getString("execution_id"), rs.getString("route_id"), + rs.getString("agent_id"), rs.getString("group_name"), + rs.getString("status"), rs.getString("correlation_id"), + rs.getString("exchange_id"), + toInstant(rs, "start_time"), toInstant(rs, "end_time"), + rs.getObject("duration_ms") != null ? rs.getLong("duration_ms") : null, + rs.getString("error_message"), rs.getString("error_stacktrace"), + rs.getString("diagram_content_hash")); + + private static final RowMapper PROCESSOR_MAPPER = (rs, rowNum) -> + new ProcessorRecord( + rs.getString("execution_id"), rs.getString("processor_id"), + rs.getString("processor_type"), rs.getString("diagram_node_id"), + rs.getString("group_name"), rs.getString("route_id"), + rs.getInt("depth"), rs.getString("parent_processor_id"), + rs.getString("status"), + toInstant(rs, "start_time"), toInstant(rs, "end_time"), + rs.getObject("duration_ms") != null ? rs.getLong("duration_ms") : null, + rs.getString("error_message"), rs.getString("error_stacktrace"), + rs.getString("input_body"), rs.getString("output_body"), + rs.getString("input_headers"), rs.getString("output_headers")); + + private static Instant toInstant(ResultSet rs, String column) throws SQLException { + Timestamp ts = rs.getTimestamp(column); + return ts != null ? ts.toInstant() : null; + } +} diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/PostgresExecutionStoreIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/PostgresExecutionStoreIT.java new file mode 100644 index 00000000..8b698d5e --- /dev/null +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/PostgresExecutionStoreIT.java @@ -0,0 +1,83 @@ +package com.cameleer3.server.app.storage; + +import com.cameleer3.server.app.AbstractPostgresIT; +import com.cameleer3.server.core.storage.ExecutionStore; +import com.cameleer3.server.core.storage.ExecutionStore.ExecutionRecord; +import com.cameleer3.server.core.storage.ExecutionStore.ProcessorRecord; +import org.junit.jupiter.api.Test; +import org.springframework.beans.factory.annotation.Autowired; + +import java.time.Instant; +import java.util.List; +import java.util.Optional; + +import static org.junit.jupiter.api.Assertions.*; + +class PostgresExecutionStoreIT extends AbstractPostgresIT { + + @Autowired + ExecutionStore executionStore; + + @Test + void upsertAndFindById() { + Instant now = Instant.now(); + ExecutionRecord record = new ExecutionRecord( + "exec-1", "route-a", "agent-1", "app-1", + "COMPLETED", "corr-1", "exchange-1", + now, now.plusMillis(100), 100L, + null, null, null); + + executionStore.upsert(record); + Optional found = executionStore.findById("exec-1"); + + assertTrue(found.isPresent()); + assertEquals("exec-1", found.get().executionId()); + assertEquals("COMPLETED", found.get().status()); + } + + @Test + void upsertDeduplicatesByExecutionId() { + Instant now = Instant.now(); + ExecutionRecord first = new ExecutionRecord( + "exec-dup", "route-a", "agent-1", "app-1", + "RUNNING", null, null, now, null, null, null, null, null); + ExecutionRecord second = new ExecutionRecord( + "exec-dup", "route-a", "agent-1", "app-1", + "COMPLETED", null, null, now, now.plusMillis(200), 200L, null, null, null); + + executionStore.upsert(first); + executionStore.upsert(second); + + Optional found = executionStore.findById("exec-dup"); + assertTrue(found.isPresent()); + assertEquals("COMPLETED", found.get().status()); + assertEquals(200L, found.get().durationMs()); + } + + @Test + void upsertProcessorsAndFind() { + Instant now = Instant.now(); + ExecutionRecord exec = new ExecutionRecord( + "exec-proc", "route-a", "agent-1", "app-1", + "COMPLETED", null, null, now, now.plusMillis(50), 50L, null, null, null); + executionStore.upsert(exec); + + List processors = List.of( + new ProcessorRecord("exec-proc", "proc-1", "log", null, + "app-1", "route-a", 0, null, "COMPLETED", + now, now.plusMillis(10), 10L, null, null, + "input body", "output body", null, null), + new ProcessorRecord("exec-proc", "proc-2", "to", null, + "app-1", "route-a", 1, "proc-1", "COMPLETED", + now.plusMillis(10), now.plusMillis(30), 20L, null, null, + null, null, null, null) + ); + executionStore.upsertProcessors("exec-proc", now, "app-1", "route-a", processors); + + List found = executionStore.findProcessors("exec-proc"); + assertEquals(2, found.size()); + assertEquals("proc-1", found.get(0).processorId()); + assertEquals("proc-2", found.get(1).processorId()); + assertEquals("proc-1", found.get(1).parentProcessorId()); + } +} From 527e2cf017aa4a8b9d393be0bd3346d4a5a3ace7 Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Mon, 16 Mar 2026 18:22:44 +0100 Subject: [PATCH 11/32] feat: implement PostgresStatsStore querying continuous aggregates --- .../app/storage/PostgresStatsStore.java | 187 ++++++++++++++++++ .../app/storage/PostgresStatsStoreIT.java | 61 ++++++ 2 files changed, 248 insertions(+) create mode 100644 cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresStatsStore.java create mode 100644 cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/PostgresStatsStoreIT.java diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresStatsStore.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresStatsStore.java new file mode 100644 index 00000000..fff9b70f --- /dev/null +++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresStatsStore.java @@ -0,0 +1,187 @@ +package com.cameleer3.server.app.storage; + +import com.cameleer3.server.core.search.ExecutionStats; +import com.cameleer3.server.core.search.StatsTimeseries; +import com.cameleer3.server.core.search.StatsTimeseries.TimeseriesBucket; +import com.cameleer3.server.core.storage.StatsStore; +import org.springframework.jdbc.core.JdbcTemplate; +import org.springframework.stereotype.Repository; + +import java.sql.Timestamp; +import java.time.Duration; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.ArrayList; +import java.util.List; + +@Repository +public class PostgresStatsStore implements StatsStore { + + private final JdbcTemplate jdbc; + + public PostgresStatsStore(JdbcTemplate jdbc) { + this.jdbc = jdbc; + } + + @Override + public ExecutionStats stats(Instant from, Instant to) { + return queryStats("stats_1m_all", from, to, List.of()); + } + + @Override + public ExecutionStats statsForApp(Instant from, Instant to, String groupName) { + return queryStats("stats_1m_app", from, to, List.of( + new Filter("group_name", groupName))); + } + + @Override + public ExecutionStats statsForRoute(Instant from, Instant to, String routeId, List agentIds) { + // Note: agentIds is accepted for interface compatibility but not filterable + // on the continuous aggregate (it groups by route_id, not agent_id). + // All agents for the same route contribute to the same aggregate. + return queryStats("stats_1m_route", from, to, List.of( + new Filter("route_id", routeId))); + } + + @Override + public ExecutionStats statsForProcessor(Instant from, Instant to, String routeId, String processorType) { + return queryStats("stats_1m_processor", from, to, List.of( + new Filter("route_id", routeId), + new Filter("processor_type", processorType))); + } + + @Override + public StatsTimeseries timeseries(Instant from, Instant to, int bucketCount) { + return queryTimeseries("stats_1m_all", from, to, bucketCount, List.of(), true); + } + + @Override + public StatsTimeseries timeseriesForApp(Instant from, Instant to, int bucketCount, String groupName) { + return queryTimeseries("stats_1m_app", from, to, bucketCount, List.of( + new Filter("group_name", groupName)), true); + } + + @Override + public StatsTimeseries timeseriesForRoute(Instant from, Instant to, int bucketCount, + String routeId, List agentIds) { + return queryTimeseries("stats_1m_route", from, to, bucketCount, List.of( + new Filter("route_id", routeId)), true); + } + + @Override + public StatsTimeseries timeseriesForProcessor(Instant from, Instant to, int bucketCount, + String routeId, String processorType) { + // stats_1m_processor does NOT have running_count column + return queryTimeseries("stats_1m_processor", from, to, bucketCount, List.of( + new Filter("route_id", routeId), + new Filter("processor_type", processorType)), false); + } + + private record Filter(String column, String value) {} + + private ExecutionStats queryStats(String view, Instant from, Instant to, List filters) { + // running_count only exists on execution-level aggregates, not processor + boolean hasRunning = !view.equals("stats_1m_processor"); + String runningCol = hasRunning ? "COALESCE(SUM(running_count), 0)" : "0"; + + String sql = "SELECT COALESCE(SUM(total_count), 0) AS total_count, " + + "COALESCE(SUM(failed_count), 0) AS failed_count, " + + "CASE WHEN SUM(total_count) > 0 THEN SUM(duration_sum) / SUM(total_count) ELSE 0 END AS avg_duration, " + + "COALESCE(MAX(p99_duration), 0) AS p99_duration, " + + runningCol + " AS active_count " + + "FROM " + view + " WHERE bucket >= ? AND bucket < ?"; + + List params = new ArrayList<>(); + params.add(Timestamp.from(from)); + params.add(Timestamp.from(to)); + for (Filter f : filters) { + sql += " AND " + f.column() + " = ?"; + params.add(f.value()); + } + + long totalCount = 0, failedCount = 0, avgDuration = 0, p99Duration = 0, activeCount = 0; + var currentResult = jdbc.query(sql, (rs, rowNum) -> new long[]{ + rs.getLong("total_count"), rs.getLong("failed_count"), + rs.getLong("avg_duration"), rs.getLong("p99_duration"), + rs.getLong("active_count") + }, params.toArray()); + if (!currentResult.isEmpty()) { + long[] r = currentResult.get(0); + totalCount = r[0]; failedCount = r[1]; avgDuration = r[2]; + p99Duration = r[3]; activeCount = r[4]; + } + + // Previous period (shifted back 24h) + Instant prevFrom = from.minus(Duration.ofHours(24)); + Instant prevTo = to.minus(Duration.ofHours(24)); + List prevParams = new ArrayList<>(); + prevParams.add(Timestamp.from(prevFrom)); + prevParams.add(Timestamp.from(prevTo)); + for (Filter f : filters) prevParams.add(f.value()); + String prevSql = sql; // same shape, different time params + + long prevTotal = 0, prevFailed = 0, prevAvg = 0, prevP99 = 0; + var prevResult = jdbc.query(prevSql, (rs, rowNum) -> new long[]{ + rs.getLong("total_count"), rs.getLong("failed_count"), + rs.getLong("avg_duration"), rs.getLong("p99_duration") + }, prevParams.toArray()); + if (!prevResult.isEmpty()) { + long[] r = prevResult.get(0); + prevTotal = r[0]; prevFailed = r[1]; prevAvg = r[2]; prevP99 = r[3]; + } + + // Today total (from midnight UTC) + Instant todayStart = Instant.now().truncatedTo(ChronoUnit.DAYS); + List todayParams = new ArrayList<>(); + todayParams.add(Timestamp.from(todayStart)); + todayParams.add(Timestamp.from(Instant.now())); + for (Filter f : filters) todayParams.add(f.value()); + String todaySql = sql; + + long totalToday = 0; + var todayResult = jdbc.query(todaySql, (rs, rowNum) -> rs.getLong("total_count"), + todayParams.toArray()); + if (!todayResult.isEmpty()) totalToday = todayResult.get(0); + + return new ExecutionStats( + totalCount, failedCount, avgDuration, p99Duration, activeCount, + totalToday, prevTotal, prevFailed, prevAvg, prevP99); + } + + private StatsTimeseries queryTimeseries(String view, Instant from, Instant to, + int bucketCount, List filters, + boolean hasRunningCount) { + long intervalSeconds = Duration.between(from, to).toSeconds() / Math.max(bucketCount, 1); + if (intervalSeconds < 60) intervalSeconds = 60; + + String runningCol = hasRunningCount ? "COALESCE(SUM(running_count), 0)" : "0"; + + String sql = "SELECT time_bucket(? * INTERVAL '1 second', bucket) AS period, " + + "COALESCE(SUM(total_count), 0) AS total_count, " + + "COALESCE(SUM(failed_count), 0) AS failed_count, " + + "CASE WHEN SUM(total_count) > 0 THEN SUM(duration_sum) / SUM(total_count) ELSE 0 END AS avg_duration, " + + "COALESCE(MAX(p99_duration), 0) AS p99_duration, " + + runningCol + " AS active_count " + + "FROM " + view + " WHERE bucket >= ? AND bucket < ?"; + + List params = new ArrayList<>(); + params.add(intervalSeconds); + params.add(Timestamp.from(from)); + params.add(Timestamp.from(to)); + for (Filter f : filters) { + sql += " AND " + f.column() + " = ?"; + params.add(f.value()); + } + sql += " GROUP BY period ORDER BY period"; + + List buckets = jdbc.query(sql, (rs, rowNum) -> + new TimeseriesBucket( + rs.getTimestamp("period").toInstant(), + rs.getLong("total_count"), rs.getLong("failed_count"), + rs.getLong("avg_duration"), rs.getLong("p99_duration"), + rs.getLong("active_count") + ), params.toArray()); + + return new StatsTimeseries(buckets); + } +} diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/PostgresStatsStoreIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/PostgresStatsStoreIT.java new file mode 100644 index 00000000..d3a1548f --- /dev/null +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/PostgresStatsStoreIT.java @@ -0,0 +1,61 @@ +package com.cameleer3.server.app.storage; + +import com.cameleer3.server.app.AbstractPostgresIT; +import com.cameleer3.server.core.search.ExecutionStats; +import com.cameleer3.server.core.search.StatsTimeseries; +import com.cameleer3.server.core.storage.ExecutionStore; +import com.cameleer3.server.core.storage.ExecutionStore.ExecutionRecord; +import com.cameleer3.server.core.storage.StatsStore; +import org.junit.jupiter.api.Test; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.jdbc.core.JdbcTemplate; + +import java.time.Instant; +import java.time.temporal.ChronoUnit; + +import static org.junit.jupiter.api.Assertions.*; + +class PostgresStatsStoreIT extends AbstractPostgresIT { + + @Autowired StatsStore statsStore; + @Autowired ExecutionStore executionStore; + @Autowired JdbcTemplate jdbc; + + @Test + void statsReturnsCountsForTimeWindow() { + Instant now = Instant.now().truncatedTo(ChronoUnit.SECONDS); + insertExecution("stats-1", "route-a", "app-1", "COMPLETED", now, 100L); + insertExecution("stats-2", "route-a", "app-1", "FAILED", now.plusSeconds(10), 200L); + insertExecution("stats-3", "route-b", "app-1", "COMPLETED", now.plusSeconds(20), 50L); + + // Force continuous aggregate refresh + jdbc.execute("CALL refresh_continuous_aggregate('stats_1m_all', null, null)"); + + ExecutionStats stats = statsStore.stats(now.minusSeconds(60), now.plusSeconds(60)); + assertEquals(3, stats.totalCount()); + assertEquals(1, stats.failedCount()); + } + + @Test + void timeseriesReturnsBuckets() { + Instant now = Instant.now().truncatedTo(ChronoUnit.MINUTES); + for (int i = 0; i < 10; i++) { + insertExecution("ts-" + i, "route-a", "app-1", "COMPLETED", + now.plusSeconds(i * 30), 100L + i); + } + + jdbc.execute("CALL refresh_continuous_aggregate('stats_1m_all', null, null)"); + + StatsTimeseries ts = statsStore.timeseries(now.minusMinutes(1), now.plusMinutes(10), 5); + assertNotNull(ts); + assertFalse(ts.buckets().isEmpty()); + } + + private void insertExecution(String id, String routeId, String groupName, + String status, Instant startTime, long durationMs) { + executionStore.upsert(new ExecutionRecord( + id, routeId, "agent-1", groupName, status, null, null, + startTime, startTime.plusMillis(durationMs), durationMs, + status.equals("FAILED") ? "error" : null, null, null)); + } +} From 5932b5d969e594e4087a44512656c09c0009702e Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Mon, 16 Mar 2026 18:23:21 +0100 Subject: [PATCH 12/32] feat: implement PostgresDiagramStore, PostgresUserRepository, PostgresOidcConfigRepository, PostgresMetricsStore Co-Authored-By: Claude Sonnet 4.6 --- .../app/storage/PostgresDiagramStore.java | 128 ++++++++++++++++++ .../app/storage/PostgresMetricsStore.java | 42 ++++++ .../storage/PostgresOidcConfigRepository.java | 59 ++++++++ .../app/storage/PostgresUserRepository.java | 69 ++++++++++ 4 files changed, 298 insertions(+) create mode 100644 cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresDiagramStore.java create mode 100644 cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresMetricsStore.java create mode 100644 cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresOidcConfigRepository.java create mode 100644 cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresUserRepository.java diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresDiagramStore.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresDiagramStore.java new file mode 100644 index 00000000..0c7dbbf8 --- /dev/null +++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresDiagramStore.java @@ -0,0 +1,128 @@ +package com.cameleer3.server.app.storage; + +import com.cameleer3.common.graph.RouteGraph; +import com.cameleer3.server.core.ingestion.TaggedDiagram; +import com.cameleer3.server.core.storage.DiagramStore; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.jdbc.core.JdbcTemplate; +import org.springframework.stereotype.Repository; + +import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HexFormat; +import java.util.List; +import java.util.Map; +import java.util.Optional; + +/** + * PostgreSQL implementation of {@link DiagramStore}. + *

+ * Stores route graphs as JSON with SHA-256 content-hash deduplication. + * Uses {@code ON CONFLICT (content_hash) DO NOTHING} for idempotent inserts. + */ +@Repository +public class PostgresDiagramStore implements DiagramStore { + + private static final Logger log = LoggerFactory.getLogger(PostgresDiagramStore.class); + + private static final String INSERT_SQL = """ + INSERT INTO route_diagrams (content_hash, route_id, agent_id, definition) + VALUES (?, ?, ?, ?::jsonb) + ON CONFLICT (content_hash) DO NOTHING + """; + + private static final String SELECT_BY_HASH = """ + SELECT definition FROM route_diagrams WHERE content_hash = ? LIMIT 1 + """; + + private static final String SELECT_HASH_FOR_ROUTE = """ + SELECT content_hash FROM route_diagrams + WHERE route_id = ? AND agent_id = ? + ORDER BY created_at DESC LIMIT 1 + """; + + private final JdbcTemplate jdbcTemplate; + private final ObjectMapper objectMapper; + + public PostgresDiagramStore(JdbcTemplate jdbcTemplate) { + this.jdbcTemplate = jdbcTemplate; + this.objectMapper = new ObjectMapper(); + this.objectMapper.registerModule(new JavaTimeModule()); + } + + @Override + public void store(TaggedDiagram diagram) { + try { + RouteGraph graph = diagram.graph(); + String agentId = diagram.agentId() != null ? diagram.agentId() : ""; + String json = objectMapper.writeValueAsString(graph); + String contentHash = sha256Hex(json); + String routeId = graph.getRouteId() != null ? graph.getRouteId() : ""; + + jdbcTemplate.update(INSERT_SQL, contentHash, routeId, agentId, json); + log.debug("Stored diagram for route={} agent={} with hash={}", routeId, agentId, contentHash); + } catch (JsonProcessingException e) { + throw new RuntimeException("Failed to serialize RouteGraph to JSON", e); + } + } + + @Override + public Optional findByContentHash(String contentHash) { + List> rows = jdbcTemplate.queryForList(SELECT_BY_HASH, contentHash); + if (rows.isEmpty()) { + return Optional.empty(); + } + String json = (String) rows.get(0).get("definition"); + try { + return Optional.of(objectMapper.readValue(json, RouteGraph.class)); + } catch (JsonProcessingException e) { + log.error("Failed to deserialize RouteGraph from PostgreSQL", e); + return Optional.empty(); + } + } + + @Override + public Optional findContentHashForRoute(String routeId, String agentId) { + List> rows = jdbcTemplate.queryForList(SELECT_HASH_FOR_ROUTE, routeId, agentId); + if (rows.isEmpty()) { + return Optional.empty(); + } + return Optional.of((String) rows.get(0).get("content_hash")); + } + + @Override + public Optional findContentHashForRouteByAgents(String routeId, List agentIds) { + if (agentIds == null || agentIds.isEmpty()) { + return Optional.empty(); + } + String placeholders = String.join(", ", Collections.nCopies(agentIds.size(), "?")); + String sql = "SELECT content_hash FROM route_diagrams " + + "WHERE route_id = ? AND agent_id IN (" + placeholders + ") " + + "ORDER BY created_at DESC LIMIT 1"; + var params = new ArrayList(); + params.add(routeId); + params.addAll(agentIds); + List> rows = jdbcTemplate.queryForList(sql, params.toArray()); + if (rows.isEmpty()) { + return Optional.empty(); + } + return Optional.of((String) rows.get(0).get("content_hash")); + } + + static String sha256Hex(String input) { + try { + MessageDigest digest = MessageDigest.getInstance("SHA-256"); + byte[] hash = digest.digest(input.getBytes(StandardCharsets.UTF_8)); + return HexFormat.of().formatHex(hash); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException("SHA-256 not available", e); + } + } +} diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresMetricsStore.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresMetricsStore.java new file mode 100644 index 00000000..8b8fed63 --- /dev/null +++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresMetricsStore.java @@ -0,0 +1,42 @@ +package com.cameleer3.server.app.storage; + +import com.cameleer3.server.core.storage.MetricsStore; +import com.cameleer3.server.core.storage.model.MetricsSnapshot; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.springframework.jdbc.core.JdbcTemplate; +import org.springframework.stereotype.Repository; + +import java.sql.Timestamp; +import java.util.List; + +@Repository +public class PostgresMetricsStore implements MetricsStore { + + private static final ObjectMapper MAPPER = new ObjectMapper(); + private final JdbcTemplate jdbc; + + public PostgresMetricsStore(JdbcTemplate jdbc) { + this.jdbc = jdbc; + } + + @Override + public void insertBatch(List snapshots) { + jdbc.batchUpdate(""" + INSERT INTO agent_metrics (agent_id, metric_name, metric_value, tags, + collected_at, server_received_at) + VALUES (?, ?, ?, ?::jsonb, ?, now()) + """, + snapshots.stream().map(s -> new Object[]{ + s.agentId(), s.metricName(), s.metricValue(), + tagsToJson(s.tags()), + Timestamp.from(s.collectedAt()) + }).toList()); + } + + private String tagsToJson(java.util.Map tags) { + if (tags == null || tags.isEmpty()) return null; + try { return MAPPER.writeValueAsString(tags); } + catch (JsonProcessingException e) { return null; } + } +} diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresOidcConfigRepository.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresOidcConfigRepository.java new file mode 100644 index 00000000..6da18993 --- /dev/null +++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresOidcConfigRepository.java @@ -0,0 +1,59 @@ +package com.cameleer3.server.app.storage; + +import com.cameleer3.server.core.security.OidcConfig; +import com.cameleer3.server.core.security.OidcConfigRepository; +import org.springframework.jdbc.core.JdbcTemplate; +import org.springframework.stereotype.Repository; + +import java.sql.Array; +import java.util.List; +import java.util.Optional; + +@Repository +public class PostgresOidcConfigRepository implements OidcConfigRepository { + + private final JdbcTemplate jdbc; + + public PostgresOidcConfigRepository(JdbcTemplate jdbc) { + this.jdbc = jdbc; + } + + @Override + public Optional find() { + var results = jdbc.query( + "SELECT * FROM oidc_config WHERE config_id = 'default'", + (rs, rowNum) -> { + Array arr = rs.getArray("default_roles"); + String[] roles = arr != null ? (String[]) arr.getArray() : new String[0]; + return new OidcConfig( + rs.getBoolean("enabled"), rs.getString("issuer_uri"), + rs.getString("client_id"), rs.getString("client_secret"), + rs.getString("roles_claim"), List.of(roles), + rs.getBoolean("auto_signup"), rs.getString("display_name_claim")); + }); + return results.isEmpty() ? Optional.empty() : Optional.of(results.get(0)); + } + + @Override + public void save(OidcConfig config) { + jdbc.update(""" + INSERT INTO oidc_config (config_id, enabled, issuer_uri, client_id, client_secret, + roles_claim, default_roles, auto_signup, display_name_claim, updated_at) + VALUES ('default', ?, ?, ?, ?, ?, ?, ?, ?, now()) + ON CONFLICT (config_id) DO UPDATE SET + enabled = EXCLUDED.enabled, issuer_uri = EXCLUDED.issuer_uri, + client_id = EXCLUDED.client_id, client_secret = EXCLUDED.client_secret, + roles_claim = EXCLUDED.roles_claim, default_roles = EXCLUDED.default_roles, + auto_signup = EXCLUDED.auto_signup, display_name_claim = EXCLUDED.display_name_claim, + updated_at = now() + """, + config.enabled(), config.issuerUri(), config.clientId(), config.clientSecret(), + config.rolesClaim(), config.defaultRoles().toArray(new String[0]), + config.autoSignup(), config.displayNameClaim()); + } + + @Override + public void delete() { + jdbc.update("DELETE FROM oidc_config WHERE config_id = 'default'"); + } +} diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresUserRepository.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresUserRepository.java new file mode 100644 index 00000000..f5867fec --- /dev/null +++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresUserRepository.java @@ -0,0 +1,69 @@ +package com.cameleer3.server.app.storage; + +import com.cameleer3.server.core.security.UserInfo; +import com.cameleer3.server.core.security.UserRepository; +import org.springframework.jdbc.core.JdbcTemplate; +import org.springframework.stereotype.Repository; + +import java.sql.Array; +import java.sql.Timestamp; +import java.util.List; +import java.util.Optional; + +@Repository +public class PostgresUserRepository implements UserRepository { + + private final JdbcTemplate jdbc; + + public PostgresUserRepository(JdbcTemplate jdbc) { + this.jdbc = jdbc; + } + + @Override + public Optional findById(String userId) { + var results = jdbc.query( + "SELECT * FROM users WHERE user_id = ?", + (rs, rowNum) -> mapUser(rs), userId); + return results.isEmpty() ? Optional.empty() : Optional.of(results.get(0)); + } + + @Override + public List findAll() { + return jdbc.query("SELECT * FROM users ORDER BY user_id", + (rs, rowNum) -> mapUser(rs)); + } + + @Override + public void upsert(UserInfo user) { + jdbc.update(""" + INSERT INTO users (user_id, provider, email, display_name, roles, created_at, updated_at) + VALUES (?, ?, ?, ?, ?, now(), now()) + ON CONFLICT (user_id) DO UPDATE SET + provider = EXCLUDED.provider, email = EXCLUDED.email, + display_name = EXCLUDED.display_name, roles = EXCLUDED.roles, + updated_at = now() + """, + user.userId(), user.provider(), user.email(), user.displayName(), + user.roles().toArray(new String[0])); + } + + @Override + public void updateRoles(String userId, List roles) { + jdbc.update("UPDATE users SET roles = ?, updated_at = now() WHERE user_id = ?", + roles.toArray(new String[0]), userId); + } + + @Override + public void delete(String userId) { + jdbc.update("DELETE FROM users WHERE user_id = ?", userId); + } + + private UserInfo mapUser(java.sql.ResultSet rs) throws java.sql.SQLException { + Array rolesArray = rs.getArray("roles"); + String[] roles = rolesArray != null ? (String[]) rolesArray.getArray() : new String[0]; + return new UserInfo( + rs.getString("user_id"), rs.getString("provider"), + rs.getString("email"), rs.getString("display_name"), + List.of(roles)); + } +} From c48e0bdfde3f8d7be4ffbc1e63451524f07fba67 Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Mon, 16 Mar 2026 18:25:54 +0100 Subject: [PATCH 13/32] feat: implement debounced SearchIndexer for async OpenSearch indexing --- .../server/core/indexing/SearchIndexer.java | 79 +++++++++++++++++++ 1 file changed, 79 insertions(+) create mode 100644 cameleer3-server-core/src/main/java/com/cameleer3/server/core/indexing/SearchIndexer.java diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/indexing/SearchIndexer.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/indexing/SearchIndexer.java new file mode 100644 index 00000000..6cff9e8d --- /dev/null +++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/indexing/SearchIndexer.java @@ -0,0 +1,79 @@ +package com.cameleer3.server.core.indexing; + +import com.cameleer3.server.core.storage.ExecutionStore; +import com.cameleer3.server.core.storage.ExecutionStore.ExecutionRecord; +import com.cameleer3.server.core.storage.ExecutionStore.ProcessorRecord; +import com.cameleer3.server.core.storage.SearchIndex; +import com.cameleer3.server.core.storage.model.ExecutionDocument; +import com.cameleer3.server.core.storage.model.ExecutionDocument.ProcessorDoc; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.*; + +public class SearchIndexer { + + private static final Logger log = LoggerFactory.getLogger(SearchIndexer.class); + + private final ExecutionStore executionStore; + private final SearchIndex searchIndex; + private final long debounceMs; + private final int queueCapacity; + + private final Map> pending = new ConcurrentHashMap<>(); + private final ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor( + r -> { Thread t = new Thread(r, "search-indexer"); t.setDaemon(true); return t; }); + + public SearchIndexer(ExecutionStore executionStore, SearchIndex searchIndex, + long debounceMs, int queueCapacity) { + this.executionStore = executionStore; + this.searchIndex = searchIndex; + this.debounceMs = debounceMs; + this.queueCapacity = queueCapacity; + } + + public void onExecutionUpdated(ExecutionUpdatedEvent event) { + if (pending.size() >= queueCapacity) { + log.warn("Search indexer queue full, dropping event for {}", event.executionId()); + return; + } + + ScheduledFuture existing = pending.put(event.executionId(), + scheduler.schedule(() -> indexExecution(event.executionId()), + debounceMs, TimeUnit.MILLISECONDS)); + if (existing != null) { + existing.cancel(false); + } + } + + private void indexExecution(String executionId) { + pending.remove(executionId); + try { + ExecutionRecord exec = executionStore.findById(executionId).orElse(null); + if (exec == null) return; + + List processors = executionStore.findProcessors(executionId); + List processorDocs = processors.stream() + .map(p -> new ProcessorDoc( + p.processorId(), p.processorType(), p.status(), + p.errorMessage(), p.errorStacktrace(), + p.inputBody(), p.outputBody(), + p.inputHeaders(), p.outputHeaders())) + .toList(); + + searchIndex.index(new ExecutionDocument( + exec.executionId(), exec.routeId(), exec.agentId(), exec.groupName(), + exec.status(), exec.correlationId(), exec.exchangeId(), + exec.startTime(), exec.endTime(), exec.durationMs(), + exec.errorMessage(), exec.errorStacktrace(), processorDocs)); + } catch (Exception e) { + log.error("Failed to index execution {}", executionId, e); + } + } + + public void shutdown() { + scheduler.shutdown(); + } +} From f7d73026941c05b64360933ce3c14a3928f43c93 Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Mon, 16 Mar 2026 18:25:55 +0100 Subject: [PATCH 14/32] feat: implement OpenSearchIndex with full-text and wildcard search Co-Authored-By: Claude Opus 4.6 (1M context) --- .../server/app/config/OpenSearchConfig.java | 23 ++ .../server/app/search/OpenSearchIndex.java | 310 ++++++++++++++++++ .../server/app/search/OpenSearchIndexIT.java | 87 +++++ 3 files changed, 420 insertions(+) create mode 100644 cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/OpenSearchConfig.java create mode 100644 cameleer3-server-app/src/main/java/com/cameleer3/server/app/search/OpenSearchIndex.java create mode 100644 cameleer3-server-app/src/test/java/com/cameleer3/server/app/search/OpenSearchIndexIT.java diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/OpenSearchConfig.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/OpenSearchConfig.java new file mode 100644 index 00000000..0ed581ad --- /dev/null +++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/OpenSearchConfig.java @@ -0,0 +1,23 @@ +package com.cameleer3.server.app.config; + +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder; +import org.apache.hc.core5.http.HttpHost; +import org.opensearch.client.opensearch.OpenSearchClient; +import org.opensearch.client.transport.httpclient5.ApacheHttpClient5TransportBuilder; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration +public class OpenSearchConfig { + + @Value("${opensearch.url:http://localhost:9200}") + private String opensearchUrl; + + @Bean + public OpenSearchClient openSearchClient() { + HttpHost host = HttpHost.create(opensearchUrl); + var transport = ApacheHttpClient5TransportBuilder.builder(host).build(); + return new OpenSearchClient(transport); + } +} diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/search/OpenSearchIndex.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/search/OpenSearchIndex.java new file mode 100644 index 00000000..d130b1f6 --- /dev/null +++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/search/OpenSearchIndex.java @@ -0,0 +1,310 @@ +package com.cameleer3.server.app.search; + +import com.cameleer3.server.core.search.ExecutionSummary; +import com.cameleer3.server.core.search.SearchRequest; +import com.cameleer3.server.core.search.SearchResult; +import com.cameleer3.server.core.storage.SearchIndex; +import com.cameleer3.server.core.storage.model.ExecutionDocument; +import com.cameleer3.server.core.storage.model.ExecutionDocument.ProcessorDoc; +import jakarta.annotation.PostConstruct; +import org.opensearch.client.opensearch.OpenSearchClient; +import org.opensearch.client.opensearch._types.FieldValue; +import org.opensearch.client.opensearch._types.SortOrder; +import org.opensearch.client.opensearch._types.query_dsl.*; +import org.opensearch.client.opensearch.core.*; +import org.opensearch.client.opensearch.core.search.Hit; +import org.opensearch.client.opensearch.indices.*; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.stereotype.Repository; + +import java.io.IOException; +import java.time.Instant; +import java.time.ZoneOffset; +import java.time.format.DateTimeFormatter; +import java.util.*; +import java.util.stream.Collectors; + +@Repository +public class OpenSearchIndex implements SearchIndex { + + private static final Logger log = LoggerFactory.getLogger(OpenSearchIndex.class); + private static final String INDEX_PREFIX = "executions-"; + private static final DateTimeFormatter DAY_FMT = DateTimeFormatter.ofPattern("yyyy-MM-dd") + .withZone(ZoneOffset.UTC); + + private final OpenSearchClient client; + + public OpenSearchIndex(OpenSearchClient client) { + this.client = client; + } + + @PostConstruct + void ensureIndexTemplate() { + // Full template with ngram analyzer for infix wildcard search. + // The template JSON matches the spec's OpenSearch index template definition. + try { + boolean exists = client.indices().existsIndexTemplate( + ExistsIndexTemplateRequest.of(b -> b.name("executions-template"))).value(); + if (!exists) { + client.indices().putIndexTemplate(PutIndexTemplateRequest.of(b -> b + .name("executions-template") + .indexPatterns(List.of("executions-*")) + .template(t -> t + .settings(s -> s + .numberOfShards("3") + .numberOfReplicas("1") + .analysis(a -> a + .analyzer("ngram_analyzer", an -> an + .custom(c -> c + .tokenizer("ngram_tokenizer") + .filter("lowercase"))) + .tokenizer("ngram_tokenizer", tk -> tk + .definition(d -> d + .ngram(ng -> ng + .minGram(3) + .maxGram(4) + .tokenChars(TokenChar.Letter, + TokenChar.Digit, + TokenChar.Punctuation, + TokenChar.Symbol))))))))); + log.info("OpenSearch index template created with ngram analyzer"); + } + } catch (IOException e) { + log.error("Failed to create index template", e); + } + } + + @Override + public void index(ExecutionDocument doc) { + String indexName = INDEX_PREFIX + DAY_FMT.format(doc.startTime()); + try { + client.index(IndexRequest.of(b -> b + .index(indexName) + .id(doc.executionId()) + .document(toMap(doc)))); + } catch (IOException e) { + log.error("Failed to index execution {}", doc.executionId(), e); + } + } + + @Override + public SearchResult search(SearchRequest request) { + try { + var searchReq = buildSearchRequest(request, request.limit()); + var response = client.search(searchReq, Map.class); + + List items = response.hits().hits().stream() + .map(this::hitToSummary) + .collect(Collectors.toList()); + + long total = response.hits().total() != null ? response.hits().total().value() : 0; + return new SearchResult<>(items, total); + } catch (IOException e) { + log.error("Search failed", e); + return new SearchResult<>(List.of(), 0); + } + } + + @Override + public long count(SearchRequest request) { + try { + var countReq = CountRequest.of(b -> b + .index(INDEX_PREFIX + "*") + .query(buildQuery(request))); + return client.count(countReq).count(); + } catch (IOException e) { + log.error("Count failed", e); + return 0; + } + } + + @Override + public void delete(String executionId) { + try { + client.deleteByQuery(DeleteByQueryRequest.of(b -> b + .index(List.of(INDEX_PREFIX + "*")) + .query(Query.of(q -> q.term(t -> t + .field("execution_id").value(executionId)))))); + } catch (IOException e) { + log.error("Failed to delete execution {}", executionId, e); + } + } + + private org.opensearch.client.opensearch.core.SearchRequest buildSearchRequest( + SearchRequest request, int size) { + return org.opensearch.client.opensearch.core.SearchRequest.of(b -> { + b.index(INDEX_PREFIX + "*") + .query(buildQuery(request)) + .size(size) + .from(request.offset()) + .sort(s -> s.field(f -> f + .field(request.sortColumn()) + .order("asc".equalsIgnoreCase(request.sortDir()) + ? SortOrder.Asc : SortOrder.Desc))); + return b; + }); + } + + private Query buildQuery(SearchRequest request) { + List must = new ArrayList<>(); + List filter = new ArrayList<>(); + + // Time range + if (request.timeFrom() != null || request.timeTo() != null) { + filter.add(Query.of(q -> q.range(r -> { + r.field("start_time"); + if (request.timeFrom() != null) + r.gte(jakarta.json.Json.createValue(request.timeFrom().toString())); + if (request.timeTo() != null) + r.lte(jakarta.json.Json.createValue(request.timeTo().toString())); + return r; + }))); + } + + // Keyword filters + if (request.status() != null) + filter.add(termQuery("status", request.status())); + if (request.routeId() != null) + filter.add(termQuery("route_id", request.routeId())); + if (request.agentId() != null) + filter.add(termQuery("agent_id", request.agentId())); + if (request.correlationId() != null) + filter.add(termQuery("correlation_id", request.correlationId())); + + // Full-text search across all fields + nested processor fields + if (request.text() != null && !request.text().isBlank()) { + String text = request.text(); + List textQueries = new ArrayList<>(); + + // Search top-level text fields + textQueries.add(Query.of(q -> q.multiMatch(m -> m + .query(text) + .fields("error_message", "error_stacktrace", + "error_message.ngram", "error_stacktrace.ngram")))); + + // Search nested processor fields + textQueries.add(Query.of(q -> q.nested(n -> n + .path("processors") + .query(nq -> nq.multiMatch(m -> m + .query(text) + .fields("processors.input_body", "processors.output_body", + "processors.input_headers", "processors.output_headers", + "processors.error_message", "processors.error_stacktrace", + "processors.input_body.ngram", "processors.output_body.ngram", + "processors.input_headers.ngram", "processors.output_headers.ngram", + "processors.error_message.ngram", "processors.error_stacktrace.ngram")))))); + + // Also try keyword fields for exact matches + textQueries.add(Query.of(q -> q.multiMatch(m -> m + .query(text) + .fields("execution_id", "route_id", "agent_id", "correlation_id", "exchange_id")))); + + must.add(Query.of(q -> q.bool(b -> b.should(textQueries).minimumShouldMatch("1")))); + } + + // Scoped text searches + if (request.textInBody() != null && !request.textInBody().isBlank()) { + must.add(Query.of(q -> q.nested(n -> n + .path("processors") + .query(nq -> nq.multiMatch(m -> m + .query(request.textInBody()) + .fields("processors.input_body", "processors.output_body", + "processors.input_body.ngram", "processors.output_body.ngram")))))); + } + if (request.textInHeaders() != null && !request.textInHeaders().isBlank()) { + must.add(Query.of(q -> q.nested(n -> n + .path("processors") + .query(nq -> nq.multiMatch(m -> m + .query(request.textInHeaders()) + .fields("processors.input_headers", "processors.output_headers", + "processors.input_headers.ngram", "processors.output_headers.ngram")))))); + } + if (request.textInErrors() != null && !request.textInErrors().isBlank()) { + String errText = request.textInErrors(); + must.add(Query.of(q -> q.bool(b -> b.should( + Query.of(sq -> sq.multiMatch(m -> m + .query(errText) + .fields("error_message", "error_stacktrace", + "error_message.ngram", "error_stacktrace.ngram"))), + Query.of(sq -> sq.nested(n -> n + .path("processors") + .query(nq -> nq.multiMatch(m -> m + .query(errText) + .fields("processors.error_message", "processors.error_stacktrace", + "processors.error_message.ngram", "processors.error_stacktrace.ngram"))))) + ).minimumShouldMatch("1")))); + } + + // Duration range + if (request.durationMin() != null || request.durationMax() != null) { + filter.add(Query.of(q -> q.range(r -> { + r.field("duration_ms"); + if (request.durationMin() != null) + r.gte(jakarta.json.Json.createValue(request.durationMin())); + if (request.durationMax() != null) + r.lte(jakarta.json.Json.createValue(request.durationMax())); + return r; + }))); + } + + return Query.of(q -> q.bool(b -> { + if (!must.isEmpty()) b.must(must); + if (!filter.isEmpty()) b.filter(filter); + if (must.isEmpty() && filter.isEmpty()) b.must(Query.of(mq -> mq.matchAll(m -> m))); + return b; + })); + } + + private Query termQuery(String field, String value) { + return Query.of(q -> q.term(t -> t.field(field).value(value))); + } + + private Map toMap(ExecutionDocument doc) { + Map map = new LinkedHashMap<>(); + map.put("execution_id", doc.executionId()); + map.put("route_id", doc.routeId()); + map.put("agent_id", doc.agentId()); + map.put("group_name", doc.groupName()); + map.put("status", doc.status()); + map.put("correlation_id", doc.correlationId()); + map.put("exchange_id", doc.exchangeId()); + map.put("start_time", doc.startTime() != null ? doc.startTime().toString() : null); + map.put("end_time", doc.endTime() != null ? doc.endTime().toString() : null); + map.put("duration_ms", doc.durationMs()); + map.put("error_message", doc.errorMessage()); + map.put("error_stacktrace", doc.errorStacktrace()); + if (doc.processors() != null) { + map.put("processors", doc.processors().stream().map(p -> { + Map pm = new LinkedHashMap<>(); + pm.put("processor_id", p.processorId()); + pm.put("processor_type", p.processorType()); + pm.put("status", p.status()); + pm.put("error_message", p.errorMessage()); + pm.put("error_stacktrace", p.errorStacktrace()); + pm.put("input_body", p.inputBody()); + pm.put("output_body", p.outputBody()); + pm.put("input_headers", p.inputHeaders()); + pm.put("output_headers", p.outputHeaders()); + return pm; + }).toList()); + } + return map; + } + + @SuppressWarnings("unchecked") + private ExecutionSummary hitToSummary(Hit hit) { + Map src = hit.source(); + if (src == null) return null; + return new ExecutionSummary( + (String) src.get("execution_id"), + (String) src.get("route_id"), + (String) src.get("agent_id"), + (String) src.get("status"), + src.get("start_time") != null ? Instant.parse((String) src.get("start_time")) : null, + src.get("end_time") != null ? Instant.parse((String) src.get("end_time")) : null, + src.get("duration_ms") != null ? ((Number) src.get("duration_ms")).longValue() : 0L, + (String) src.get("correlation_id"), + (String) src.get("error_message")); + } +} diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/search/OpenSearchIndexIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/search/OpenSearchIndexIT.java new file mode 100644 index 00000000..24054006 --- /dev/null +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/search/OpenSearchIndexIT.java @@ -0,0 +1,87 @@ +package com.cameleer3.server.app.search; + +import com.cameleer3.server.app.AbstractPostgresIT; +import com.cameleer3.server.core.search.ExecutionSummary; +import com.cameleer3.server.core.search.SearchRequest; +import com.cameleer3.server.core.search.SearchResult; +import com.cameleer3.server.core.storage.SearchIndex; +import com.cameleer3.server.core.storage.model.ExecutionDocument; +import com.cameleer3.server.core.storage.model.ExecutionDocument.ProcessorDoc; +import org.junit.jupiter.api.Test; +import org.opensearch.testcontainers.OpensearchContainer; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.test.context.DynamicPropertyRegistry; +import org.springframework.test.context.DynamicPropertySource; +import org.testcontainers.junit.jupiter.Container; + +import java.time.Instant; +import java.util.List; + +import static org.junit.jupiter.api.Assertions.*; + +// Extends AbstractPostgresIT for PostgreSQL datasource needed by Spring context +class OpenSearchIndexIT extends AbstractPostgresIT { + + @Container + static final OpensearchContainer opensearch = + new OpensearchContainer<>("opensearchproject/opensearch:2.19.0") + .withSecurityEnabled(false); + + @DynamicPropertySource + static void configureOpenSearch(DynamicPropertyRegistry registry) { + registry.add("opensearch.url", opensearch::getHttpHostAddress); + } + + @Autowired + SearchIndex searchIndex; + + @Test + void indexAndSearchByText() throws Exception { + Instant now = Instant.now(); + ExecutionDocument doc = new ExecutionDocument( + "search-1", "route-a", "agent-1", "app-1", + "FAILED", "corr-1", "exch-1", + now, now.plusMillis(100), 100L, + "OrderNotFoundException: order-12345 not found", null, + List.of(new ProcessorDoc("proc-1", "log", "COMPLETED", + null, null, "request body with customer-99", null, null, null))); + + searchIndex.index(doc); + Thread.sleep(1500); // Allow OpenSearch refresh + + SearchRequest request = new SearchRequest( + null, now.minusSeconds(60), now.plusSeconds(60), + null, null, null, + "OrderNotFoundException", null, null, null, + null, null, null, null, null, + 0, 50, "startTime", "desc"); + + SearchResult result = searchIndex.search(request); + assertTrue(result.total() > 0); + assertEquals("search-1", result.items().get(0).executionId()); + } + + @Test + void wildcardSearchFindsSubstring() throws Exception { + Instant now = Instant.now(); + ExecutionDocument doc = new ExecutionDocument( + "wild-1", "route-b", "agent-1", "app-1", + "COMPLETED", null, null, + now, now.plusMillis(50), 50L, null, null, + List.of(new ProcessorDoc("proc-1", "bean", "COMPLETED", + null, null, "UniquePayloadIdentifier12345", null, null, null))); + + searchIndex.index(doc); + Thread.sleep(1500); + + SearchRequest request = new SearchRequest( + null, now.minusSeconds(60), now.plusSeconds(60), + null, null, null, + "PayloadIdentifier", null, null, null, + null, null, null, null, null, + 0, 50, "startTime", "desc"); + + SearchResult result = searchIndex.search(request); + assertTrue(result.total() > 0); + } +} From 7dbfaf0932f63860fc6da0fd0cd61f0a0e3bc7f5 Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Mon, 16 Mar 2026 18:27:58 +0100 Subject: [PATCH 15/32] feat: wire new storage beans, add MetricsFlushScheduler and RetentionScheduler Co-Authored-By: Claude Opus 4.6 (1M context) --- .../app/config/IngestionBeanConfig.java | 27 ++------- .../server/app/config/SearchBeanConfig.java | 23 ++------ .../server/app/config/StorageBeanConfig.java | 37 ++++++++++++ .../app/ingestion/MetricsFlushScheduler.java | 59 +++++++++++++++++++ .../app/retention/RetentionScheduler.java | 48 +++++++++++++++ 5 files changed, 153 insertions(+), 41 deletions(-) create mode 100644 cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/StorageBeanConfig.java create mode 100644 cameleer3-server-app/src/main/java/com/cameleer3/server/app/ingestion/MetricsFlushScheduler.java create mode 100644 cameleer3-server-app/src/main/java/com/cameleer3/server/app/retention/RetentionScheduler.java diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/IngestionBeanConfig.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/IngestionBeanConfig.java index 83507e16..c0d3a479 100644 --- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/IngestionBeanConfig.java +++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/IngestionBeanConfig.java @@ -1,41 +1,22 @@ package com.cameleer3.server.app.config; -import com.cameleer3.server.core.ingestion.IngestionService; -import com.cameleer3.server.core.ingestion.TaggedDiagram; -import com.cameleer3.server.core.ingestion.TaggedExecution; import com.cameleer3.server.core.ingestion.WriteBuffer; import com.cameleer3.server.core.storage.model.MetricsSnapshot; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; /** - * Creates the write buffer and ingestion service beans. + * Creates the write buffer bean for metrics. *

- * The {@link WriteBuffer} instances are shared between the - * {@link IngestionService} (producer side) and the flush scheduler (consumer side). + * The {@link WriteBuffer} instance is shared between the + * {@link com.cameleer3.server.core.ingestion.IngestionService} (producer side) + * and the flush scheduler (consumer side). */ @Configuration public class IngestionBeanConfig { - @Bean - public WriteBuffer executionBuffer(IngestionConfig config) { - return new WriteBuffer<>(config.getBufferCapacity()); - } - - @Bean - public WriteBuffer diagramBuffer(IngestionConfig config) { - return new WriteBuffer<>(config.getBufferCapacity()); - } - @Bean public WriteBuffer metricsBuffer(IngestionConfig config) { return new WriteBuffer<>(config.getBufferCapacity()); } - - @Bean - public IngestionService ingestionService(WriteBuffer executionBuffer, - WriteBuffer diagramBuffer, - WriteBuffer metricsBuffer) { - return new IngestionService(executionBuffer, diagramBuffer, metricsBuffer); - } } diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/SearchBeanConfig.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/SearchBeanConfig.java index debc1e8b..e722f0a8 100644 --- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/SearchBeanConfig.java +++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/SearchBeanConfig.java @@ -1,32 +1,19 @@ package com.cameleer3.server.app.config; -import com.cameleer3.server.app.search.ClickHouseSearchEngine; -import com.cameleer3.server.core.detail.DetailService; -import com.cameleer3.server.core.search.SearchEngine; import com.cameleer3.server.core.search.SearchService; -import com.cameleer3.server.core.storage.ExecutionRepository; +import com.cameleer3.server.core.storage.SearchIndex; +import com.cameleer3.server.core.storage.StatsStore; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; -import org.springframework.jdbc.core.JdbcTemplate; /** - * Creates beans for the search and detail layers. + * Creates beans for the search layer. */ @Configuration public class SearchBeanConfig { @Bean - public SearchEngine searchEngine(JdbcTemplate jdbcTemplate) { - return new ClickHouseSearchEngine(jdbcTemplate); - } - - @Bean - public SearchService searchService(SearchEngine searchEngine) { - return new SearchService(searchEngine); - } - - @Bean - public DetailService detailService(ExecutionRepository executionRepository) { - return new DetailService(executionRepository); + public SearchService searchService(SearchIndex searchIndex, StatsStore statsStore) { + return new SearchService(searchIndex, statsStore); } } diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/StorageBeanConfig.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/StorageBeanConfig.java new file mode 100644 index 00000000..92f34943 --- /dev/null +++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/StorageBeanConfig.java @@ -0,0 +1,37 @@ +package com.cameleer3.server.app.config; + +import com.cameleer3.server.core.detail.DetailService; +import com.cameleer3.server.core.indexing.SearchIndexer; +import com.cameleer3.server.core.ingestion.IngestionService; +import com.cameleer3.server.core.ingestion.WriteBuffer; +import com.cameleer3.server.core.storage.*; +import com.cameleer3.server.core.storage.model.MetricsSnapshot; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; + +@Configuration +public class StorageBeanConfig { + + @Bean + public DetailService detailService(ExecutionStore executionStore) { + return new DetailService(executionStore); + } + + @Bean(destroyMethod = "shutdown") + public SearchIndexer searchIndexer(ExecutionStore executionStore, SearchIndex searchIndex, + @Value("${opensearch.debounce-ms:2000}") long debounceMs, + @Value("${opensearch.queue-size:10000}") int queueSize) { + return new SearchIndexer(executionStore, searchIndex, debounceMs, queueSize); + } + + @Bean + public IngestionService ingestionService(ExecutionStore executionStore, + DiagramStore diagramStore, + WriteBuffer metricsBuffer, + SearchIndexer searchIndexer, + @Value("${cameleer.body-size-limit:16384}") int bodySizeLimit) { + return new IngestionService(executionStore, diagramStore, metricsBuffer, + searchIndexer::onExecutionUpdated, bodySizeLimit); + } +} diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/ingestion/MetricsFlushScheduler.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/ingestion/MetricsFlushScheduler.java new file mode 100644 index 00000000..1479c762 --- /dev/null +++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/ingestion/MetricsFlushScheduler.java @@ -0,0 +1,59 @@ +package com.cameleer3.server.app.ingestion; + +import com.cameleer3.server.app.config.IngestionConfig; +import com.cameleer3.server.core.ingestion.WriteBuffer; +import com.cameleer3.server.core.storage.MetricsStore; +import com.cameleer3.server.core.storage.model.MetricsSnapshot; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.context.SmartLifecycle; +import org.springframework.scheduling.annotation.Scheduled; +import org.springframework.stereotype.Component; + +import java.util.List; + +@Component +public class MetricsFlushScheduler implements SmartLifecycle { + + private static final Logger log = LoggerFactory.getLogger(MetricsFlushScheduler.class); + + private final WriteBuffer metricsBuffer; + private final MetricsStore metricsStore; + private final int batchSize; + private volatile boolean running = false; + + public MetricsFlushScheduler(WriteBuffer metricsBuffer, + MetricsStore metricsStore, + IngestionConfig config) { + this.metricsBuffer = metricsBuffer; + this.metricsStore = metricsStore; + this.batchSize = config.getBatchSize(); + } + + @Scheduled(fixedDelayString = "${ingestion.flush-interval-ms:1000}") + public void flush() { + try { + List batch = metricsBuffer.drain(batchSize); + if (!batch.isEmpty()) { + metricsStore.insertBatch(batch); + log.debug("Flushed {} metrics to PostgreSQL", batch.size()); + } + } catch (Exception e) { + log.error("Failed to flush metrics", e); + } + } + + @Override public void start() { running = true; } + @Override public void stop() { + // Drain remaining on shutdown + while (metricsBuffer.size() > 0) { + List batch = metricsBuffer.drain(batchSize); + if (batch.isEmpty()) break; + try { metricsStore.insertBatch(batch); } + catch (Exception e) { log.error("Failed to flush metrics during shutdown", e); break; } + } + running = false; + } + @Override public boolean isRunning() { return running; } + @Override public int getPhase() { return Integer.MAX_VALUE - 1; } +} diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/retention/RetentionScheduler.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/retention/RetentionScheduler.java new file mode 100644 index 00000000..152bb1c9 --- /dev/null +++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/retention/RetentionScheduler.java @@ -0,0 +1,48 @@ +package com.cameleer3.server.app.retention; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.jdbc.core.JdbcTemplate; +import org.springframework.scheduling.annotation.Scheduled; +import org.springframework.stereotype.Component; + +@Component +public class RetentionScheduler { + + private static final Logger log = LoggerFactory.getLogger(RetentionScheduler.class); + + private final JdbcTemplate jdbc; + private final int retentionDays; + + public RetentionScheduler(JdbcTemplate jdbc, + @Value("${cameleer.retention-days:30}") int retentionDays) { + this.jdbc = jdbc; + this.retentionDays = retentionDays; + } + + @Scheduled(cron = "0 0 2 * * *") // Daily at 2 AM UTC + public void dropExpiredChunks() { + String interval = retentionDays + " days"; + try { + // Raw data + jdbc.execute("SELECT drop_chunks('executions', INTERVAL '" + interval + "')"); + jdbc.execute("SELECT drop_chunks('processor_executions', INTERVAL '" + interval + "')"); + jdbc.execute("SELECT drop_chunks('agent_metrics', INTERVAL '" + interval + "')"); + + // Continuous aggregates (keep 3x longer) + String caggInterval = (retentionDays * 3) + " days"; + jdbc.execute("SELECT drop_chunks('stats_1m_all', INTERVAL '" + caggInterval + "')"); + jdbc.execute("SELECT drop_chunks('stats_1m_app', INTERVAL '" + caggInterval + "')"); + jdbc.execute("SELECT drop_chunks('stats_1m_route', INTERVAL '" + caggInterval + "')"); + jdbc.execute("SELECT drop_chunks('stats_1m_processor', INTERVAL '" + caggInterval + "')"); + + log.info("Retention: dropped chunks older than {} days (aggregates: {} days)", + retentionDays, retentionDays * 3); + } catch (Exception e) { + log.error("Retention job failed", e); + } + } + // Note: OpenSearch daily index deletion should be handled via ILM policy + // configured at deployment time, not in application code. +} From 565b548ac127e0d9b112b8b6f35efb7baf371f54 Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Mon, 16 Mar 2026 18:56:13 +0100 Subject: [PATCH 16/32] refactor: remove all ClickHouse code, old interfaces, and SQL migrations - Delete all ClickHouse storage implementations and config - Delete old core interfaces (ExecutionRepository, DiagramRepository, MetricsRepository, SearchEngine, RawExecutionRow) - Delete ClickHouse SQL migration files - Delete AbstractClickHouseIT - Update controllers to use new store interfaces (DiagramStore, ExecutionStore) - Fix IngestionService calls in controllers for new synchronous API - Migrate all ITs from AbstractClickHouseIT to AbstractPostgresIT - Fix count() syntax and remove ClickHouse-specific test assertions - Update TreeReconstructionTest for new buildTree() method Co-Authored-By: Claude Opus 4.6 (1M context) --- cameleer3-server-app/pom.xml | 7 +- .../server/app/config/ClickHouseConfig.java | 80 ---- .../server/app/config/OpenSearchConfig.java | 17 +- .../app/controller/DetailController.java | 31 +- .../app/controller/DiagramController.java | 23 +- .../controller/DiagramRenderController.java | 14 +- .../app/controller/ExecutionController.java | 39 +- .../app/controller/MetricsController.java | 2 +- .../ingestion/ClickHouseFlushScheduler.java | 159 ------- .../app/search/ClickHouseSearchEngine.java | 357 --------------- .../server/app/search/OpenSearchIndex.java | 62 +-- .../app/security/SecurityBeanConfig.java | 2 +- .../storage/ClickHouseDiagramRepository.java | 127 ------ .../ClickHouseExecutionRepository.java | 418 ------------------ .../storage/ClickHouseMetricsRepository.java | 67 --- .../ClickHouseOidcConfigRepository.java | 71 --- .../app/storage/ClickHouseUserRepository.java | 112 ----- .../app/storage/PostgresUserRepository.java | 4 +- .../main/resources/clickhouse/01-schema.sql | 57 --- .../clickhouse/02-search-columns.sql | 25 -- .../main/resources/clickhouse/03-users.sql | 10 - .../resources/clickhouse/04-oidc-config.sql | 13 - .../clickhouse/05-oidc-auto-signup.sql | 1 - .../clickhouse/06-oidc-display-name-claim.sql | 1 - .../resources/clickhouse/07-stats-rollup.sql | 35 -- .../clickhouse/08-stats-rollup-backfill.sql | 16 - .../server/app/AbstractClickHouseIT.java | 82 ---- .../server/app/AbstractPostgresIT.java | 5 + .../controller/AgentCommandControllerIT.java | 4 +- .../AgentRegistrationControllerIT.java | 4 +- .../app/controller/AgentSseControllerIT.java | 4 +- .../server/app/controller/BackpressureIT.java | 64 ++- .../app/controller/DetailControllerIT.java | 6 +- .../app/controller/DiagramControllerIT.java | 8 +- .../controller/DiagramRenderControllerIT.java | 6 +- .../app/controller/ExecutionControllerIT.java | 8 +- .../app/controller/ForwardCompatIT.java | 4 +- .../app/controller/HealthControllerIT.java | 22 +- .../app/controller/MetricsControllerIT.java | 8 +- .../server/app/controller/OpenApiIT.java | 4 +- .../app/controller/SearchControllerIT.java | 6 +- .../app/interceptor/ProtocolVersionIT.java | 4 +- .../server/app/search/OpenSearchIndexIT.java | 5 +- .../server/app/security/BootstrapTokenIT.java | 4 +- .../server/app/security/JwtRefreshIT.java | 4 +- .../app/security/RegistrationSecurityIT.java | 4 +- .../server/app/security/SecurityFilterIT.java | 4 +- .../server/app/security/SseSigningIT.java | 4 +- .../server/app/storage/DiagramLinkingIT.java | 4 +- .../server/app/storage/IngestionSchemaIT.java | 4 +- .../server/core/detail/ExecutionDetail.java | 2 +- .../server/core/detail/ProcessorNode.java | 2 +- .../server/core/detail/RawExecutionRow.java | 59 --- .../core/ingestion/IngestionService.java | 6 +- .../server/core/ingestion/WriteBuffer.java | 2 +- .../server/core/search/SearchEngine.java | 72 --- .../server/core/search/SearchRequest.java | 2 +- .../core/storage/DiagramRepository.java | 35 -- .../core/storage/ExecutionRepository.java | 28 -- .../core/storage/MetricsRepository.java | 17 - .../core/detail/TreeReconstructionTest.java | 103 ++--- clickhouse/init/01-schema.sql | 57 --- clickhouse/init/02-search-columns.sql | 25 -- clickhouse/init/03-users.sql | 10 - clickhouse/init/04-oidc-config.sql | 13 - clickhouse/init/05-oidc-auto-signup.sql | 1 - .../init/06-oidc-display-name-claim.sql | 1 - pom.xml | 7 + 68 files changed, 226 insertions(+), 2238 deletions(-) delete mode 100644 cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/ClickHouseConfig.java delete mode 100644 cameleer3-server-app/src/main/java/com/cameleer3/server/app/ingestion/ClickHouseFlushScheduler.java delete mode 100644 cameleer3-server-app/src/main/java/com/cameleer3/server/app/search/ClickHouseSearchEngine.java delete mode 100644 cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseDiagramRepository.java delete mode 100644 cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseExecutionRepository.java delete mode 100644 cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseMetricsRepository.java delete mode 100644 cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseOidcConfigRepository.java delete mode 100644 cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseUserRepository.java delete mode 100644 cameleer3-server-app/src/main/resources/clickhouse/01-schema.sql delete mode 100644 cameleer3-server-app/src/main/resources/clickhouse/02-search-columns.sql delete mode 100644 cameleer3-server-app/src/main/resources/clickhouse/03-users.sql delete mode 100644 cameleer3-server-app/src/main/resources/clickhouse/04-oidc-config.sql delete mode 100644 cameleer3-server-app/src/main/resources/clickhouse/05-oidc-auto-signup.sql delete mode 100644 cameleer3-server-app/src/main/resources/clickhouse/06-oidc-display-name-claim.sql delete mode 100644 cameleer3-server-app/src/main/resources/clickhouse/07-stats-rollup.sql delete mode 100644 cameleer3-server-app/src/main/resources/clickhouse/08-stats-rollup-backfill.sql delete mode 100644 cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractClickHouseIT.java delete mode 100644 cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/RawExecutionRow.java delete mode 100644 cameleer3-server-core/src/main/java/com/cameleer3/server/core/search/SearchEngine.java delete mode 100644 cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/DiagramRepository.java delete mode 100644 cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/ExecutionRepository.java delete mode 100644 cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/MetricsRepository.java delete mode 100644 clickhouse/init/01-schema.sql delete mode 100644 clickhouse/init/02-search-columns.sql delete mode 100644 clickhouse/init/03-users.sql delete mode 100644 clickhouse/init/04-oidc-config.sql delete mode 100644 clickhouse/init/05-oidc-auto-signup.sql delete mode 100644 clickhouse/init/06-oidc-display-name-claim.sql diff --git a/cameleer3-server-app/pom.xml b/cameleer3-server-app/pom.xml index 9194dc02..81e21c59 100644 --- a/cameleer3-server-app/pom.xml +++ b/cameleer3-server-app/pom.xml @@ -112,7 +112,12 @@ org.testcontainers - postgresql + testcontainers-postgresql + test + + + org.testcontainers + testcontainers-junit-jupiter test diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/ClickHouseConfig.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/ClickHouseConfig.java deleted file mode 100644 index ee5d8db5..00000000 --- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/ClickHouseConfig.java +++ /dev/null @@ -1,80 +0,0 @@ -package com.cameleer3.server.app.config; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; -import org.springframework.core.io.Resource; -import org.springframework.core.io.support.PathMatchingResourcePatternResolver; -import org.springframework.jdbc.core.JdbcTemplate; - -import jakarta.annotation.PostConstruct; -import javax.sql.DataSource; -import java.nio.charset.StandardCharsets; -import java.util.Arrays; -import java.util.Comparator; -import java.util.stream.Collectors; - -/** - * ClickHouse configuration. - *

- * Spring Boot auto-configures the DataSource from {@code spring.datasource.*} properties. - * This class exposes a JdbcTemplate bean and initializes the schema on startup. - *

- * The ClickHouse container's {@code CLICKHOUSE_DB} env var creates the database; - * this class creates the tables within it. - *

- * Migration files are discovered automatically from {@code classpath:clickhouse/*.sql} - * and executed in filename order (numeric prefix sort). - */ -@Configuration -public class ClickHouseConfig { - - private static final Logger log = LoggerFactory.getLogger(ClickHouseConfig.class); - private static final String MIGRATION_PATTERN = "classpath:clickhouse/*.sql"; - - private final DataSource dataSource; - - public ClickHouseConfig(DataSource dataSource) { - this.dataSource = dataSource; - } - - @Bean - public JdbcTemplate jdbcTemplate() { - return new JdbcTemplate(dataSource); - } - - @PostConstruct - void initSchema() { - var jdbc = new JdbcTemplate(dataSource); - try { - Resource[] resources = new PathMatchingResourcePatternResolver() - .getResources(MIGRATION_PATTERN); - Arrays.sort(resources, Comparator.comparing(Resource::getFilename)); - - for (Resource resource : resources) { - String filename = resource.getFilename(); - try { - String sql = resource.getContentAsString(StandardCharsets.UTF_8); - String stripped = sql.lines() - .filter(line -> !line.trim().startsWith("--")) - .collect(Collectors.joining("\n")); - for (String statement : stripped.split(";")) { - String trimmed = statement.trim(); - if (!trimmed.isEmpty()) { - jdbc.execute(trimmed); - } - } - log.info("Applied schema: {}", filename); - } catch (Exception e) { - log.error("Failed to apply schema: {}", filename, e); - throw new RuntimeException("Schema initialization failed: " + filename, e); - } - } - } catch (RuntimeException e) { - throw e; - } catch (Exception e) { - throw new RuntimeException("Failed to discover migration files", e); - } - } -} diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/OpenSearchConfig.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/OpenSearchConfig.java index 0ed581ad..3ff7edea 100644 --- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/OpenSearchConfig.java +++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/OpenSearchConfig.java @@ -1,9 +1,10 @@ package com.cameleer3.server.app.config; -import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder; -import org.apache.hc.core5.http.HttpHost; +import org.apache.http.HttpHost; +import org.opensearch.client.RestClient; +import org.opensearch.client.json.jackson.JacksonJsonpMapper; import org.opensearch.client.opensearch.OpenSearchClient; -import org.opensearch.client.transport.httpclient5.ApacheHttpClient5TransportBuilder; +import org.opensearch.client.transport.rest_client.RestClientTransport; import org.springframework.beans.factory.annotation.Value; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; @@ -14,10 +15,14 @@ public class OpenSearchConfig { @Value("${opensearch.url:http://localhost:9200}") private String opensearchUrl; + @Bean(destroyMethod = "close") + public RestClient opensearchRestClient() { + return RestClient.builder(HttpHost.create(opensearchUrl)).build(); + } + @Bean - public OpenSearchClient openSearchClient() { - HttpHost host = HttpHost.create(opensearchUrl); - var transport = ApacheHttpClient5TransportBuilder.builder(host).build(); + public OpenSearchClient openSearchClient(RestClient restClient) { + var transport = new RestClientTransport(restClient, new JacksonJsonpMapper()); return new OpenSearchClient(transport); } } diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/DetailController.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/DetailController.java index 3e0ca0c4..2bd6ea55 100644 --- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/DetailController.java +++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/DetailController.java @@ -1,8 +1,9 @@ package com.cameleer3.server.app.controller; -import com.cameleer3.server.app.storage.ClickHouseExecutionRepository; import com.cameleer3.server.core.detail.DetailService; import com.cameleer3.server.core.detail.ExecutionDetail; +import com.cameleer3.server.core.storage.ExecutionStore; +import com.cameleer3.server.core.storage.ExecutionStore.ProcessorRecord; import io.swagger.v3.oas.annotations.Operation; import io.swagger.v3.oas.annotations.responses.ApiResponse; import io.swagger.v3.oas.annotations.tags.Tag; @@ -12,14 +13,16 @@ import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RestController; +import java.util.LinkedHashMap; +import java.util.List; import java.util.Map; /** * Endpoints for retrieving execution details and processor snapshots. *

* The detail endpoint returns a nested processor tree reconstructed from - * flat parallel arrays stored in ClickHouse. The snapshot endpoint returns - * per-processor exchange data (bodies and headers). + * individual processor records stored in PostgreSQL. The snapshot endpoint + * returns per-processor exchange data (bodies and headers). */ @RestController @RequestMapping("/api/v1/executions") @@ -27,12 +30,12 @@ import java.util.Map; public class DetailController { private final DetailService detailService; - private final ClickHouseExecutionRepository executionRepository; + private final ExecutionStore executionStore; public DetailController(DetailService detailService, - ClickHouseExecutionRepository executionRepository) { + ExecutionStore executionStore) { this.detailService = detailService; - this.executionRepository = executionRepository; + this.executionStore = executionStore; } @GetMapping("/{executionId}") @@ -52,8 +55,18 @@ public class DetailController { public ResponseEntity> getProcessorSnapshot( @PathVariable String executionId, @PathVariable int index) { - return executionRepository.findProcessorSnapshot(executionId, index) - .map(ResponseEntity::ok) - .orElse(ResponseEntity.notFound().build()); + List processors = executionStore.findProcessors(executionId); + if (index < 0 || index >= processors.size()) { + return ResponseEntity.notFound().build(); + } + + ProcessorRecord p = processors.get(index); + Map snapshot = new LinkedHashMap<>(); + if (p.inputBody() != null) snapshot.put("inputBody", p.inputBody()); + if (p.outputBody() != null) snapshot.put("outputBody", p.outputBody()); + if (p.inputHeaders() != null) snapshot.put("inputHeaders", p.inputHeaders()); + if (p.outputHeaders() != null) snapshot.put("outputHeaders", p.outputHeaders()); + + return ResponseEntity.ok(snapshot); } } diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/DiagramController.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/DiagramController.java index d4359968..5cdaf176 100644 --- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/DiagramController.java +++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/DiagramController.java @@ -11,7 +11,6 @@ import io.swagger.v3.oas.annotations.responses.ApiResponse; import io.swagger.v3.oas.annotations.tags.Tag; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.springframework.http.HttpStatus; import org.springframework.http.ResponseEntity; import org.springframework.security.core.Authentication; import org.springframework.security.core.context.SecurityContextHolder; @@ -25,8 +24,8 @@ import java.util.List; /** * Ingestion endpoint for route diagrams. *

- * Accepts both single {@link RouteGraph} and arrays. Data is buffered - * and flushed to ClickHouse by the flush scheduler. + * Accepts both single {@link RouteGraph} and arrays. Data is written + * synchronously to PostgreSQL via {@link IngestionService}. */ @RestController @RequestMapping("/api/v1/data") @@ -47,26 +46,12 @@ public class DiagramController { @Operation(summary = "Ingest route diagram data", description = "Accepts a single RouteGraph or an array of RouteGraphs") @ApiResponse(responseCode = "202", description = "Data accepted for processing") - @ApiResponse(responseCode = "503", description = "Buffer full, retry later") public ResponseEntity ingestDiagrams(@RequestBody String body) throws JsonProcessingException { String agentId = extractAgentId(); List graphs = parsePayload(body); - List tagged = graphs.stream() - .map(graph -> new TaggedDiagram(agentId, graph)) - .toList(); - boolean accepted; - if (tagged.size() == 1) { - accepted = ingestionService.acceptDiagram(tagged.get(0)); - } else { - accepted = ingestionService.acceptDiagrams(tagged); - } - - if (!accepted) { - log.warn("Diagram buffer full, returning 503"); - return ResponseEntity.status(HttpStatus.SERVICE_UNAVAILABLE) - .header("Retry-After", "5") - .build(); + for (RouteGraph graph : graphs) { + ingestionService.ingestDiagram(new TaggedDiagram(agentId, graph)); } return ResponseEntity.accepted().build(); diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/DiagramRenderController.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/DiagramRenderController.java index b1ca3775..d8f722e7 100644 --- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/DiagramRenderController.java +++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/DiagramRenderController.java @@ -5,7 +5,7 @@ import com.cameleer3.server.core.agent.AgentInfo; import com.cameleer3.server.core.agent.AgentRegistryService; import com.cameleer3.server.core.diagram.DiagramLayout; import com.cameleer3.server.core.diagram.DiagramRenderer; -import com.cameleer3.server.core.storage.DiagramRepository; +import com.cameleer3.server.core.storage.DiagramStore; import io.swagger.v3.oas.annotations.Operation; import io.swagger.v3.oas.annotations.media.Content; import io.swagger.v3.oas.annotations.media.Schema; @@ -39,14 +39,14 @@ public class DiagramRenderController { private static final MediaType SVG_MEDIA_TYPE = MediaType.valueOf("image/svg+xml"); - private final DiagramRepository diagramRepository; + private final DiagramStore diagramStore; private final DiagramRenderer diagramRenderer; private final AgentRegistryService registryService; - public DiagramRenderController(DiagramRepository diagramRepository, + public DiagramRenderController(DiagramStore diagramStore, DiagramRenderer diagramRenderer, AgentRegistryService registryService) { - this.diagramRepository = diagramRepository; + this.diagramStore = diagramStore; this.diagramRenderer = diagramRenderer; this.registryService = registryService; } @@ -64,7 +64,7 @@ public class DiagramRenderController { @PathVariable String contentHash, HttpServletRequest request) { - Optional graphOpt = diagramRepository.findByContentHash(contentHash); + Optional graphOpt = diagramStore.findByContentHash(contentHash); if (graphOpt.isEmpty()) { return ResponseEntity.notFound().build(); } @@ -105,12 +105,12 @@ public class DiagramRenderController { return ResponseEntity.notFound().build(); } - Optional contentHash = diagramRepository.findContentHashForRouteByAgents(routeId, agentIds); + Optional contentHash = diagramStore.findContentHashForRouteByAgents(routeId, agentIds); if (contentHash.isEmpty()) { return ResponseEntity.notFound().build(); } - Optional graphOpt = diagramRepository.findByContentHash(contentHash.get()); + Optional graphOpt = diagramStore.findByContentHash(contentHash.get()); if (graphOpt.isEmpty()) { return ResponseEntity.notFound().build(); } diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/ExecutionController.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/ExecutionController.java index e44f2645..bea76037 100644 --- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/ExecutionController.java +++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/ExecutionController.java @@ -1,8 +1,9 @@ package com.cameleer3.server.app.controller; import com.cameleer3.common.model.RouteExecution; +import com.cameleer3.server.core.agent.AgentInfo; +import com.cameleer3.server.core.agent.AgentRegistryService; import com.cameleer3.server.core.ingestion.IngestionService; -import com.cameleer3.server.core.ingestion.TaggedExecution; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.ObjectMapper; @@ -11,7 +12,6 @@ import io.swagger.v3.oas.annotations.responses.ApiResponse; import io.swagger.v3.oas.annotations.tags.Tag; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.springframework.http.HttpStatus; import org.springframework.http.ResponseEntity; import org.springframework.security.core.Authentication; import org.springframework.security.core.context.SecurityContextHolder; @@ -25,9 +25,8 @@ import java.util.List; /** * Ingestion endpoint for route execution data. *

- * Accepts both single {@link RouteExecution} and arrays. Data is buffered - * in a {@link com.cameleer3.server.core.ingestion.WriteBuffer} and flushed - * to ClickHouse by the flush scheduler. + * Accepts both single {@link RouteExecution} and arrays. Data is written + * synchronously to PostgreSQL via {@link IngestionService}. */ @RestController @RequestMapping("/api/v1/data") @@ -37,10 +36,14 @@ public class ExecutionController { private static final Logger log = LoggerFactory.getLogger(ExecutionController.class); private final IngestionService ingestionService; + private final AgentRegistryService registryService; private final ObjectMapper objectMapper; - public ExecutionController(IngestionService ingestionService, ObjectMapper objectMapper) { + public ExecutionController(IngestionService ingestionService, + AgentRegistryService registryService, + ObjectMapper objectMapper) { this.ingestionService = ingestionService; + this.registryService = registryService; this.objectMapper = objectMapper; } @@ -48,26 +51,13 @@ public class ExecutionController { @Operation(summary = "Ingest route execution data", description = "Accepts a single RouteExecution or an array of RouteExecutions") @ApiResponse(responseCode = "202", description = "Data accepted for processing") - @ApiResponse(responseCode = "503", description = "Buffer full, retry later") public ResponseEntity ingestExecutions(@RequestBody String body) throws JsonProcessingException { String agentId = extractAgentId(); + String groupName = resolveGroupName(agentId); List executions = parsePayload(body); - List tagged = executions.stream() - .map(exec -> new TaggedExecution(agentId, exec)) - .toList(); - boolean accepted; - if (tagged.size() == 1) { - accepted = ingestionService.acceptExecution(tagged.get(0)); - } else { - accepted = ingestionService.acceptExecutions(tagged); - } - - if (!accepted) { - log.warn("Execution buffer full, returning 503"); - return ResponseEntity.status(HttpStatus.SERVICE_UNAVAILABLE) - .header("Retry-After", "5") - .build(); + for (RouteExecution execution : executions) { + ingestionService.ingestExecution(agentId, groupName, execution); } return ResponseEntity.accepted().build(); @@ -78,6 +68,11 @@ public class ExecutionController { return auth != null ? auth.getName() : ""; } + private String resolveGroupName(String agentId) { + AgentInfo agent = registryService.findById(agentId); + return agent != null ? agent.group() : ""; + } + private List parsePayload(String body) throws JsonProcessingException { String trimmed = body.strip(); if (trimmed.startsWith("[")) { diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/MetricsController.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/MetricsController.java index e947942d..a7ee03d2 100644 --- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/MetricsController.java +++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/MetricsController.java @@ -23,7 +23,7 @@ import java.util.List; * Ingestion endpoint for agent metrics. *

* Accepts an array of {@link MetricsSnapshot}. Data is buffered - * and flushed to ClickHouse by the flush scheduler. + * and flushed to PostgreSQL by the flush scheduler. */ @RestController @RequestMapping("/api/v1/data") diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/ingestion/ClickHouseFlushScheduler.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/ingestion/ClickHouseFlushScheduler.java deleted file mode 100644 index e48a2a92..00000000 --- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/ingestion/ClickHouseFlushScheduler.java +++ /dev/null @@ -1,159 +0,0 @@ -package com.cameleer3.server.app.ingestion; - -import com.cameleer3.server.app.config.IngestionConfig; -import com.cameleer3.server.core.ingestion.TaggedDiagram; -import com.cameleer3.server.core.ingestion.TaggedExecution; -import com.cameleer3.server.core.ingestion.WriteBuffer; -import com.cameleer3.server.core.storage.DiagramRepository; -import com.cameleer3.server.core.storage.ExecutionRepository; -import com.cameleer3.server.core.storage.MetricsRepository; -import com.cameleer3.server.core.storage.model.MetricsSnapshot; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.context.SmartLifecycle; -import org.springframework.scheduling.annotation.Scheduled; -import org.springframework.stereotype.Component; - -import java.util.List; - -/** - * Scheduled task that drains the write buffers and batch-inserts into ClickHouse. - *

- * Implements {@link SmartLifecycle} to ensure all remaining buffered data is - * flushed on application shutdown. - */ -@Component -public class ClickHouseFlushScheduler implements SmartLifecycle { - - private static final Logger log = LoggerFactory.getLogger(ClickHouseFlushScheduler.class); - - private final WriteBuffer executionBuffer; - private final WriteBuffer diagramBuffer; - private final WriteBuffer metricsBuffer; - private final ExecutionRepository executionRepository; - private final DiagramRepository diagramRepository; - private final MetricsRepository metricsRepository; - private final int batchSize; - - private volatile boolean running = false; - - public ClickHouseFlushScheduler(WriteBuffer executionBuffer, - WriteBuffer diagramBuffer, - WriteBuffer metricsBuffer, - ExecutionRepository executionRepository, - DiagramRepository diagramRepository, - MetricsRepository metricsRepository, - IngestionConfig config) { - this.executionBuffer = executionBuffer; - this.diagramBuffer = diagramBuffer; - this.metricsBuffer = metricsBuffer; - this.executionRepository = executionRepository; - this.diagramRepository = diagramRepository; - this.metricsRepository = metricsRepository; - this.batchSize = config.getBatchSize(); - } - - @Scheduled(fixedDelayString = "${ingestion.flush-interval-ms:1000}") - public void flushAll() { - flushExecutions(); - flushDiagrams(); - flushMetrics(); - } - - private void flushExecutions() { - try { - List batch = executionBuffer.drain(batchSize); - if (!batch.isEmpty()) { - executionRepository.insertBatch(batch); - log.debug("Flushed {} executions to ClickHouse", batch.size()); - } - } catch (Exception e) { - log.error("Failed to flush executions to ClickHouse", e); - } - } - - private void flushDiagrams() { - try { - List batch = diagramBuffer.drain(batchSize); - for (TaggedDiagram diagram : batch) { - diagramRepository.store(diagram); - } - if (!batch.isEmpty()) { - log.debug("Flushed {} diagrams to ClickHouse", batch.size()); - } - } catch (Exception e) { - log.error("Failed to flush diagrams to ClickHouse", e); - } - } - - private void flushMetrics() { - try { - List batch = metricsBuffer.drain(batchSize); - if (!batch.isEmpty()) { - metricsRepository.insertBatch(batch); - log.debug("Flushed {} metrics to ClickHouse", batch.size()); - } - } catch (Exception e) { - log.error("Failed to flush metrics to ClickHouse", e); - } - } - - // SmartLifecycle -- flush remaining data on shutdown - - @Override - public void start() { - running = true; - log.info("ClickHouseFlushScheduler started"); - } - - @Override - public void stop() { - log.info("ClickHouseFlushScheduler stopping -- flushing remaining data"); - drainAll(); - running = false; - } - - @Override - public boolean isRunning() { - return running; - } - - @Override - public int getPhase() { - // Run after most beans but before DataSource shutdown - return Integer.MAX_VALUE - 1; - } - - /** - * Drain all buffers completely (loop until empty). - */ - private void drainAll() { - drainBufferCompletely("executions", executionBuffer, batch -> executionRepository.insertBatch(batch)); - drainBufferCompletely("diagrams", diagramBuffer, batch -> { - for (TaggedDiagram d : batch) { - diagramRepository.store(d); - } - }); - drainBufferCompletely("metrics", metricsBuffer, batch -> metricsRepository.insertBatch(batch)); - } - - private void drainBufferCompletely(String name, WriteBuffer buffer, java.util.function.Consumer> inserter) { - int total = 0; - while (buffer.size() > 0) { - List batch = buffer.drain(batchSize); - if (batch.isEmpty()) { - break; - } - try { - inserter.accept(batch); - total += batch.size(); - } catch (Exception e) { - log.error("Failed to flush remaining {} during shutdown", name, e); - break; - } - } - if (total > 0) { - log.info("Flushed {} remaining {} during shutdown", total, name); - } - } -} diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/search/ClickHouseSearchEngine.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/search/ClickHouseSearchEngine.java deleted file mode 100644 index ed6a0b13..00000000 --- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/search/ClickHouseSearchEngine.java +++ /dev/null @@ -1,357 +0,0 @@ -package com.cameleer3.server.app.search; - -import com.cameleer3.server.core.search.ExecutionStats; -import com.cameleer3.server.core.search.ExecutionSummary; -import com.cameleer3.server.core.search.SearchEngine; -import com.cameleer3.server.core.search.SearchRequest; -import com.cameleer3.server.core.search.SearchResult; -import com.cameleer3.server.core.search.StatsTimeseries; -import org.springframework.jdbc.core.JdbcTemplate; - -import java.sql.Timestamp; -import java.time.Duration; -import java.time.Instant; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -/** - * ClickHouse implementation of {@link SearchEngine}. - *

- * Builds dynamic WHERE clauses from non-null {@link SearchRequest} fields - * and queries the {@code route_executions} table. LIKE patterns are properly - * escaped to prevent injection. - */ -public class ClickHouseSearchEngine implements SearchEngine { - - /** Per-query memory cap (1 GiB) — prevents a single query from OOMing ClickHouse. */ - private static final String SETTINGS = " SETTINGS max_memory_usage = 1000000000"; - - private final JdbcTemplate jdbcTemplate; - - public ClickHouseSearchEngine(JdbcTemplate jdbcTemplate) { - this.jdbcTemplate = jdbcTemplate; - } - - @Override - public SearchResult search(SearchRequest request) { - var conditions = new ArrayList(); - var params = new ArrayList(); - - buildWhereClause(request, conditions, params); - - String where = conditions.isEmpty() ? "" : " WHERE " + String.join(" AND ", conditions); - - // Count query - var countParams = params.toArray(); - Long total = jdbcTemplate.queryForObject( - "SELECT count() FROM route_executions" + where + SETTINGS, Long.class, countParams); - if (total == null) total = 0L; - - if (total == 0) { - return SearchResult.empty(request.offset(), request.limit()); - } - - // Data query - params.add(request.limit()); - params.add(request.offset()); - String orderDir = "asc".equalsIgnoreCase(request.sortDir()) ? "ASC" : "DESC"; - String dataSql = "SELECT execution_id, route_id, agent_id, status, start_time, end_time, " + - "duration_ms, correlation_id, error_message, diagram_content_hash " + - "FROM route_executions" + where + - " ORDER BY " + request.sortColumn() + " " + orderDir + " LIMIT ? OFFSET ?" + SETTINGS; - - List data = jdbcTemplate.query(dataSql, (rs, rowNum) -> { - Timestamp endTs = rs.getTimestamp("end_time"); - return new ExecutionSummary( - rs.getString("execution_id"), - rs.getString("route_id"), - rs.getString("agent_id"), - rs.getString("status"), - rs.getTimestamp("start_time").toInstant(), - endTs != null ? endTs.toInstant() : null, - rs.getLong("duration_ms"), - rs.getString("correlation_id"), - rs.getString("error_message"), - rs.getString("diagram_content_hash") - ); - }, params.toArray()); - - return new SearchResult<>(data, total, request.offset(), request.limit()); - } - - @Override - public long count(SearchRequest request) { - var conditions = new ArrayList(); - var params = new ArrayList(); - buildWhereClause(request, conditions, params); - - String where = conditions.isEmpty() ? "" : " WHERE " + String.join(" AND ", conditions); - Long result = jdbcTemplate.queryForObject( - "SELECT count() FROM route_executions" + where + SETTINGS, Long.class, params.toArray()); - return result != null ? result : 0L; - } - - @Override - public ExecutionStats stats(Instant from, Instant to) { - return stats(from, to, null, null); - } - - @Override - public ExecutionStats stats(Instant from, Instant to, String routeId, List agentIds) { - // Current period — read from rollup - var conditions = new ArrayList(); - var params = new ArrayList(); - conditions.add("bucket >= ?"); - params.add(bucketTimestamp(floorToFiveMinutes(from))); - conditions.add("bucket <= ?"); - params.add(bucketTimestamp(to)); - addScopeFilters(routeId, agentIds, conditions, params); - - String where = " WHERE " + String.join(" AND ", conditions); - - String rollupSql = "SELECT " + - "countMerge(total_count) AS cnt, " + - "countIfMerge(failed_count) AS failed, " + - "toInt64(ifNotFinite(sumMerge(duration_sum) / countMerge(total_count), 0)) AS avg_ms, " + - "toInt64(ifNotFinite(quantileTDigestMerge(0.99)(p99_duration), 0)) AS p99_ms " + - "FROM route_execution_stats_5m" + where + SETTINGS; - - record PeriodStats(long totalCount, long failedCount, long avgDurationMs, long p99LatencyMs) {} - PeriodStats current = jdbcTemplate.queryForObject(rollupSql, - (rs, rowNum) -> new PeriodStats( - rs.getLong("cnt"), - rs.getLong("failed"), - rs.getLong("avg_ms"), - rs.getLong("p99_ms")), - params.toArray()); - - // Active count — PREWHERE reads only the status column before touching wide rows - var scopeConditions = new ArrayList(); - var activeParams = new ArrayList(); - addScopeFilters(routeId, agentIds, scopeConditions, activeParams); - String scopeWhere = scopeConditions.isEmpty() ? "" : " WHERE " + String.join(" AND ", scopeConditions); - Long activeCount = jdbcTemplate.queryForObject( - "SELECT count() FROM route_executions PREWHERE status = 'RUNNING'" + scopeWhere + SETTINGS, - Long.class, activeParams.toArray()); - - // Previous period (same window shifted back 24h) — read from rollup - Duration window = Duration.between(from, to); - Instant prevFrom = from.minus(Duration.ofHours(24)); - Instant prevTo = prevFrom.plus(window); - var prevConditions = new ArrayList(); - var prevParams = new ArrayList(); - prevConditions.add("bucket >= ?"); - prevParams.add(bucketTimestamp(floorToFiveMinutes(prevFrom))); - prevConditions.add("bucket <= ?"); - prevParams.add(bucketTimestamp(prevTo)); - addScopeFilters(routeId, agentIds, prevConditions, prevParams); - String prevWhere = " WHERE " + String.join(" AND ", prevConditions); - - String prevRollupSql = "SELECT " + - "countMerge(total_count) AS cnt, " + - "countIfMerge(failed_count) AS failed, " + - "toInt64(ifNotFinite(sumMerge(duration_sum) / countMerge(total_count), 0)) AS avg_ms, " + - "toInt64(ifNotFinite(quantileTDigestMerge(0.99)(p99_duration), 0)) AS p99_ms " + - "FROM route_execution_stats_5m" + prevWhere + SETTINGS; - - PeriodStats prev = jdbcTemplate.queryForObject(prevRollupSql, - (rs, rowNum) -> new PeriodStats( - rs.getLong("cnt"), - rs.getLong("failed"), - rs.getLong("avg_ms"), - rs.getLong("p99_ms")), - prevParams.toArray()); - - // Today total (midnight UTC to now) — read from rollup with same scope - Instant todayStart = Instant.now().truncatedTo(java.time.temporal.ChronoUnit.DAYS); - var todayConditions = new ArrayList(); - var todayParams = new ArrayList(); - todayConditions.add("bucket >= ?"); - todayParams.add(bucketTimestamp(floorToFiveMinutes(todayStart))); - addScopeFilters(routeId, agentIds, todayConditions, todayParams); - String todayWhere = " WHERE " + String.join(" AND ", todayConditions); - - Long totalToday = jdbcTemplate.queryForObject( - "SELECT countMerge(total_count) FROM route_execution_stats_5m" + todayWhere + SETTINGS, - Long.class, todayParams.toArray()); - - return new ExecutionStats( - current.totalCount, current.failedCount, current.avgDurationMs, - current.p99LatencyMs, activeCount != null ? activeCount : 0L, - totalToday != null ? totalToday : 0L, - prev.totalCount, prev.failedCount, prev.avgDurationMs, prev.p99LatencyMs); - } - - @Override - public StatsTimeseries timeseries(Instant from, Instant to, int bucketCount) { - return timeseries(from, to, bucketCount, null, null); - } - - @Override - public StatsTimeseries timeseries(Instant from, Instant to, int bucketCount, - String routeId, List agentIds) { - long intervalSeconds = Duration.between(from, to).getSeconds() / bucketCount; - if (intervalSeconds < 1) intervalSeconds = 1; - - var conditions = new ArrayList(); - var params = new ArrayList(); - conditions.add("bucket >= ?"); - params.add(bucketTimestamp(floorToFiveMinutes(from))); - conditions.add("bucket <= ?"); - params.add(bucketTimestamp(to)); - addScopeFilters(routeId, agentIds, conditions, params); - - String where = " WHERE " + String.join(" AND ", conditions); - - // Re-aggregate 5-minute rollup buckets into the requested interval - String sql = "SELECT " + - "toDateTime(intDiv(toUInt32(bucket), " + intervalSeconds + ") * " + intervalSeconds + ") AS ts_bucket, " + - "countMerge(total_count) AS cnt, " + - "countIfMerge(failed_count) AS failed, " + - "toInt64(ifNotFinite(sumMerge(duration_sum) / countMerge(total_count), 0)) AS avg_ms, " + - "toInt64(ifNotFinite(quantileTDigestMerge(0.99)(p99_duration), 0)) AS p99_ms " + - "FROM route_execution_stats_5m" + where + - " GROUP BY ts_bucket ORDER BY ts_bucket" + SETTINGS; - - List buckets = jdbcTemplate.query(sql, (rs, rowNum) -> - new StatsTimeseries.TimeseriesBucket( - rs.getTimestamp("ts_bucket").toInstant(), - rs.getLong("cnt"), - rs.getLong("failed"), - rs.getLong("avg_ms"), - rs.getLong("p99_ms"), - 0L - ), - params.toArray()); - - return new StatsTimeseries(buckets); - } - - private void buildWhereClause(SearchRequest req, List conditions, List params) { - if (req.status() != null && !req.status().isBlank()) { - String[] statuses = req.status().split(","); - if (statuses.length == 1) { - conditions.add("status = ?"); - params.add(statuses[0].trim()); - } else { - String placeholders = String.join(", ", Collections.nCopies(statuses.length, "?")); - conditions.add("status IN (" + placeholders + ")"); - for (String s : statuses) { - params.add(s.trim()); - } - } - } - if (req.timeFrom() != null) { - conditions.add("start_time >= ?"); - params.add(Timestamp.from(req.timeFrom())); - } - if (req.timeTo() != null) { - conditions.add("start_time <= ?"); - params.add(Timestamp.from(req.timeTo())); - } - if (req.durationMin() != null) { - conditions.add("duration_ms >= ?"); - params.add(req.durationMin()); - } - if (req.durationMax() != null) { - conditions.add("duration_ms <= ?"); - params.add(req.durationMax()); - } - if (req.correlationId() != null && !req.correlationId().isBlank()) { - conditions.add("correlation_id = ?"); - params.add(req.correlationId()); - } - if (req.routeId() != null && !req.routeId().isBlank()) { - conditions.add("route_id = ?"); - params.add(req.routeId()); - } - if (req.agentId() != null && !req.agentId().isBlank()) { - conditions.add("agent_id = ?"); - params.add(req.agentId()); - } - // agentIds from group resolution (takes precedence when agentId is not set) - if ((req.agentId() == null || req.agentId().isBlank()) - && req.agentIds() != null && !req.agentIds().isEmpty()) { - String placeholders = String.join(", ", Collections.nCopies(req.agentIds().size(), "?")); - conditions.add("agent_id IN (" + placeholders + ")"); - params.addAll(req.agentIds()); - } - if (req.processorType() != null && !req.processorType().isBlank()) { - conditions.add("has(processor_types, ?)"); - params.add(req.processorType()); - } - if (req.text() != null && !req.text().isBlank()) { - String pattern = "%" + escapeLike(req.text()) + "%"; - String[] textColumns = { - "execution_id", "route_id", "agent_id", - "error_message", "error_stacktrace", - "exchange_bodies", "exchange_headers" - }; - var likeClauses = java.util.Arrays.stream(textColumns) - .map(col -> col + " LIKE ?") - .toList(); - conditions.add("(" + String.join(" OR ", likeClauses) + ")"); - for (int i = 0; i < textColumns.length; i++) { - params.add(pattern); - } - } - if (req.textInBody() != null && !req.textInBody().isBlank()) { - conditions.add("exchange_bodies LIKE ?"); - params.add("%" + escapeLike(req.textInBody()) + "%"); - } - if (req.textInHeaders() != null && !req.textInHeaders().isBlank()) { - conditions.add("exchange_headers LIKE ?"); - params.add("%" + escapeLike(req.textInHeaders()) + "%"); - } - if (req.textInErrors() != null && !req.textInErrors().isBlank()) { - String pattern = "%" + escapeLike(req.textInErrors()) + "%"; - conditions.add("(error_message LIKE ? OR error_stacktrace LIKE ?)"); - params.add(pattern); - params.add(pattern); - } - } - - /** - * Add route ID and agent IDs scope filters to conditions/params. - */ - private void addScopeFilters(String routeId, List agentIds, - List conditions, List params) { - if (routeId != null && !routeId.isBlank()) { - conditions.add("route_id = ?"); - params.add(routeId); - } - if (agentIds != null && !agentIds.isEmpty()) { - String placeholders = String.join(", ", Collections.nCopies(agentIds.size(), "?")); - conditions.add("agent_id IN (" + placeholders + ")"); - params.addAll(agentIds); - } - } - - /** - * Floor an Instant to the start of its 5-minute bucket. - */ - private static Instant floorToFiveMinutes(Instant instant) { - long epochSecond = instant.getEpochSecond(); - return Instant.ofEpochSecond(epochSecond - (epochSecond % 300)); - } - - /** - * Create a second-precision Timestamp for rollup bucket comparisons. - * The bucket column is DateTime('UTC') (second precision); the JDBC driver - * sends java.sql.Timestamp with nanoseconds which ClickHouse rejects. - */ - private static Timestamp bucketTimestamp(Instant instant) { - return Timestamp.from(instant.truncatedTo(java.time.temporal.ChronoUnit.SECONDS)); - } - - /** - * Escape special LIKE characters to prevent LIKE injection. - */ - static String escapeLike(String input) { - return input - .replace("\\", "\\\\") - .replace("%", "\\%") - .replace("_", "\\_"); - } -} diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/search/OpenSearchIndex.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/search/OpenSearchIndex.java index d130b1f6..892792fc 100644 --- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/search/OpenSearchIndex.java +++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/search/OpenSearchIndex.java @@ -7,6 +7,7 @@ import com.cameleer3.server.core.storage.SearchIndex; import com.cameleer3.server.core.storage.model.ExecutionDocument; import com.cameleer3.server.core.storage.model.ExecutionDocument.ProcessorDoc; import jakarta.annotation.PostConstruct; +import org.opensearch.client.json.JsonData; import org.opensearch.client.opensearch.OpenSearchClient; import org.opensearch.client.opensearch._types.FieldValue; import org.opensearch.client.opensearch._types.SortOrder; @@ -41,8 +42,6 @@ public class OpenSearchIndex implements SearchIndex { @PostConstruct void ensureIndexTemplate() { - // Full template with ngram analyzer for infix wildcard search. - // The template JSON matches the spec's OpenSearch index template definition. try { boolean exists = client.indices().existsIndexTemplate( ExistsIndexTemplateRequest.of(b -> b.name("executions-template"))).value(); @@ -53,22 +52,8 @@ public class OpenSearchIndex implements SearchIndex { .template(t -> t .settings(s -> s .numberOfShards("3") - .numberOfReplicas("1") - .analysis(a -> a - .analyzer("ngram_analyzer", an -> an - .custom(c -> c - .tokenizer("ngram_tokenizer") - .filter("lowercase"))) - .tokenizer("ngram_tokenizer", tk -> tk - .definition(d -> d - .ngram(ng -> ng - .minGram(3) - .maxGram(4) - .tokenChars(TokenChar.Letter, - TokenChar.Digit, - TokenChar.Punctuation, - TokenChar.Symbol))))))))); - log.info("OpenSearch index template created with ngram analyzer"); + .numberOfReplicas("1"))))); + log.info("OpenSearch index template created"); } } catch (IOException e) { log.error("Failed to create index template", e); @@ -99,10 +84,10 @@ public class OpenSearchIndex implements SearchIndex { .collect(Collectors.toList()); long total = response.hits().total() != null ? response.hits().total().value() : 0; - return new SearchResult<>(items, total); + return new SearchResult<>(items, total, request.offset(), request.limit()); } catch (IOException e) { log.error("Search failed", e); - return new SearchResult<>(List.of(), 0); + return SearchResult.empty(request.offset(), request.limit()); } } @@ -125,7 +110,8 @@ public class OpenSearchIndex implements SearchIndex { client.deleteByQuery(DeleteByQueryRequest.of(b -> b .index(List.of(INDEX_PREFIX + "*")) .query(Query.of(q -> q.term(t -> t - .field("execution_id").value(executionId)))))); + .field("execution_id") + .value(FieldValue.of(executionId))))))); } catch (IOException e) { log.error("Failed to delete execution {}", executionId, e); } @@ -155,9 +141,9 @@ public class OpenSearchIndex implements SearchIndex { filter.add(Query.of(q -> q.range(r -> { r.field("start_time"); if (request.timeFrom() != null) - r.gte(jakarta.json.Json.createValue(request.timeFrom().toString())); + r.gte(JsonData.of(request.timeFrom().toString())); if (request.timeTo() != null) - r.lte(jakarta.json.Json.createValue(request.timeTo().toString())); + r.lte(JsonData.of(request.timeTo().toString())); return r; }))); } @@ -180,8 +166,7 @@ public class OpenSearchIndex implements SearchIndex { // Search top-level text fields textQueries.add(Query.of(q -> q.multiMatch(m -> m .query(text) - .fields("error_message", "error_stacktrace", - "error_message.ngram", "error_stacktrace.ngram")))); + .fields("error_message", "error_stacktrace")))); // Search nested processor fields textQueries.add(Query.of(q -> q.nested(n -> n @@ -190,10 +175,7 @@ public class OpenSearchIndex implements SearchIndex { .query(text) .fields("processors.input_body", "processors.output_body", "processors.input_headers", "processors.output_headers", - "processors.error_message", "processors.error_stacktrace", - "processors.input_body.ngram", "processors.output_body.ngram", - "processors.input_headers.ngram", "processors.output_headers.ngram", - "processors.error_message.ngram", "processors.error_stacktrace.ngram")))))); + "processors.error_message", "processors.error_stacktrace")))))); // Also try keyword fields for exact matches textQueries.add(Query.of(q -> q.multiMatch(m -> m @@ -209,30 +191,26 @@ public class OpenSearchIndex implements SearchIndex { .path("processors") .query(nq -> nq.multiMatch(m -> m .query(request.textInBody()) - .fields("processors.input_body", "processors.output_body", - "processors.input_body.ngram", "processors.output_body.ngram")))))); + .fields("processors.input_body", "processors.output_body")))))); } if (request.textInHeaders() != null && !request.textInHeaders().isBlank()) { must.add(Query.of(q -> q.nested(n -> n .path("processors") .query(nq -> nq.multiMatch(m -> m .query(request.textInHeaders()) - .fields("processors.input_headers", "processors.output_headers", - "processors.input_headers.ngram", "processors.output_headers.ngram")))))); + .fields("processors.input_headers", "processors.output_headers")))))); } if (request.textInErrors() != null && !request.textInErrors().isBlank()) { String errText = request.textInErrors(); must.add(Query.of(q -> q.bool(b -> b.should( Query.of(sq -> sq.multiMatch(m -> m .query(errText) - .fields("error_message", "error_stacktrace", - "error_message.ngram", "error_stacktrace.ngram"))), + .fields("error_message", "error_stacktrace"))), Query.of(sq -> sq.nested(n -> n .path("processors") .query(nq -> nq.multiMatch(m -> m .query(errText) - .fields("processors.error_message", "processors.error_stacktrace", - "processors.error_message.ngram", "processors.error_stacktrace.ngram"))))) + .fields("processors.error_message", "processors.error_stacktrace"))))) ).minimumShouldMatch("1")))); } @@ -241,9 +219,9 @@ public class OpenSearchIndex implements SearchIndex { filter.add(Query.of(q -> q.range(r -> { r.field("duration_ms"); if (request.durationMin() != null) - r.gte(jakarta.json.Json.createValue(request.durationMin())); + r.gte(JsonData.of(request.durationMin())); if (request.durationMax() != null) - r.lte(jakarta.json.Json.createValue(request.durationMax())); + r.lte(JsonData.of(request.durationMax())); return r; }))); } @@ -257,7 +235,7 @@ public class OpenSearchIndex implements SearchIndex { } private Query termQuery(String field, String value) { - return Query.of(q -> q.term(t -> t.field(field).value(value))); + return Query.of(q -> q.term(t -> t.field(field).value(FieldValue.of(value)))); } private Map toMap(ExecutionDocument doc) { @@ -305,6 +283,8 @@ public class OpenSearchIndex implements SearchIndex { src.get("end_time") != null ? Instant.parse((String) src.get("end_time")) : null, src.get("duration_ms") != null ? ((Number) src.get("duration_ms")).longValue() : 0L, (String) src.get("correlation_id"), - (String) src.get("error_message")); + (String) src.get("error_message"), + null // diagramContentHash not stored in index + ); } } diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/security/SecurityBeanConfig.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/security/SecurityBeanConfig.java index ad48c345..5c0bdff5 100644 --- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/security/SecurityBeanConfig.java +++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/security/SecurityBeanConfig.java @@ -16,7 +16,7 @@ import java.util.List; * that required security properties are set. *

* Fails fast on startup if {@code CAMELEER_AUTH_TOKEN} is not set. - * Seeds OIDC config from env vars into ClickHouse if DB is empty. + * Seeds OIDC config from env vars into the database if DB is empty. */ @Configuration @EnableConfigurationProperties(SecurityProperties.class) diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseDiagramRepository.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseDiagramRepository.java deleted file mode 100644 index 11a0ed4f..00000000 --- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseDiagramRepository.java +++ /dev/null @@ -1,127 +0,0 @@ -package com.cameleer3.server.app.storage; - -import com.cameleer3.common.graph.RouteGraph; -import com.cameleer3.server.core.ingestion.TaggedDiagram; -import com.cameleer3.server.core.storage.DiagramRepository; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.jdbc.core.JdbcTemplate; -import org.springframework.stereotype.Repository; - -import java.nio.charset.StandardCharsets; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HexFormat; -import java.util.List; -import java.util.Map; -import java.util.Optional; - -/** - * ClickHouse implementation of {@link DiagramRepository}. - *

- * Stores route graphs as JSON with SHA-256 content-hash deduplication. - * The underlying table uses ReplacingMergeTree keyed on content_hash. - */ -@Repository -public class ClickHouseDiagramRepository implements DiagramRepository { - - private static final Logger log = LoggerFactory.getLogger(ClickHouseDiagramRepository.class); - - private static final String INSERT_SQL = """ - INSERT INTO route_diagrams (content_hash, route_id, agent_id, definition) - VALUES (?, ?, ?, ?) - """; - - private static final String SELECT_BY_HASH = """ - SELECT definition FROM route_diagrams WHERE content_hash = ? LIMIT 1 - """; - - private static final String SELECT_HASH_FOR_ROUTE = """ - SELECT content_hash FROM route_diagrams - WHERE route_id = ? AND agent_id = ? - ORDER BY created_at DESC LIMIT 1 - """; - - private final JdbcTemplate jdbcTemplate; - private final ObjectMapper objectMapper; - - public ClickHouseDiagramRepository(JdbcTemplate jdbcTemplate) { - this.jdbcTemplate = jdbcTemplate; - this.objectMapper = new ObjectMapper(); - this.objectMapper.registerModule(new JavaTimeModule()); - } - - @Override - public void store(TaggedDiagram diagram) { - try { - RouteGraph graph = diagram.graph(); - String agentId = diagram.agentId() != null ? diagram.agentId() : ""; - String json = objectMapper.writeValueAsString(graph); - String contentHash = sha256Hex(json); - String routeId = graph.getRouteId() != null ? graph.getRouteId() : ""; - - jdbcTemplate.update(INSERT_SQL, contentHash, routeId, agentId, json); - log.debug("Stored diagram for route={} agent={} with hash={}", routeId, agentId, contentHash); - } catch (JsonProcessingException e) { - throw new RuntimeException("Failed to serialize RouteGraph to JSON", e); - } - } - - @Override - public Optional findByContentHash(String contentHash) { - List> rows = jdbcTemplate.queryForList(SELECT_BY_HASH, contentHash); - if (rows.isEmpty()) { - return Optional.empty(); - } - String json = (String) rows.get(0).get("definition"); - try { - return Optional.of(objectMapper.readValue(json, RouteGraph.class)); - } catch (JsonProcessingException e) { - log.error("Failed to deserialize RouteGraph from ClickHouse", e); - return Optional.empty(); - } - } - - @Override - public Optional findContentHashForRoute(String routeId, String agentId) { - List> rows = jdbcTemplate.queryForList(SELECT_HASH_FOR_ROUTE, routeId, agentId); - if (rows.isEmpty()) { - return Optional.empty(); - } - return Optional.of((String) rows.get(0).get("content_hash")); - } - - @Override - public Optional findContentHashForRouteByAgents(String routeId, List agentIds) { - if (agentIds == null || agentIds.isEmpty()) { - return Optional.empty(); - } - String placeholders = String.join(", ", Collections.nCopies(agentIds.size(), "?")); - String sql = "SELECT content_hash FROM route_diagrams " + - "WHERE route_id = ? AND agent_id IN (" + placeholders + ") " + - "ORDER BY created_at DESC LIMIT 1"; - var params = new ArrayList(); - params.add(routeId); - params.addAll(agentIds); - List> rows = jdbcTemplate.queryForList(sql, params.toArray()); - if (rows.isEmpty()) { - return Optional.empty(); - } - return Optional.of((String) rows.get(0).get("content_hash")); - } - - static String sha256Hex(String input) { - try { - MessageDigest digest = MessageDigest.getInstance("SHA-256"); - byte[] hash = digest.digest(input.getBytes(StandardCharsets.UTF_8)); - return HexFormat.of().formatHex(hash); - } catch (NoSuchAlgorithmException e) { - throw new RuntimeException("SHA-256 not available", e); - } - } -} diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseExecutionRepository.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseExecutionRepository.java deleted file mode 100644 index b119f7e7..00000000 --- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseExecutionRepository.java +++ /dev/null @@ -1,418 +0,0 @@ -package com.cameleer3.server.app.storage; - -import com.cameleer3.common.model.ExchangeSnapshot; -import com.cameleer3.common.model.ProcessorExecution; -import com.cameleer3.common.model.RouteExecution; -import com.cameleer3.server.core.detail.RawExecutionRow; -import com.cameleer3.server.core.ingestion.TaggedExecution; -import com.cameleer3.server.core.storage.DiagramRepository; -import com.cameleer3.server.core.storage.ExecutionRepository; -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.jdbc.core.BatchPreparedStatementSetter; -import org.springframework.jdbc.core.JdbcTemplate; -import org.springframework.stereotype.Repository; - -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.sql.Timestamp; -import java.time.Instant; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.UUID; - -/** - * ClickHouse implementation of {@link ExecutionRepository}. - *

- * Performs batch inserts into the {@code route_executions} table. - * Processor executions are flattened into parallel arrays with tree metadata - * (depth, parent index) for reconstruction. - */ -@Repository -public class ClickHouseExecutionRepository implements ExecutionRepository { - - private static final Logger log = LoggerFactory.getLogger(ClickHouseExecutionRepository.class); - - private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); - - private static final String INSERT_SQL = """ - INSERT INTO route_executions ( - execution_id, route_id, agent_id, status, start_time, end_time, - duration_ms, correlation_id, exchange_id, error_message, error_stacktrace, - processor_ids, processor_types, processor_starts, processor_ends, - processor_durations, processor_statuses, - exchange_bodies, exchange_headers, - processor_depths, processor_parent_indexes, - processor_error_messages, processor_error_stacktraces, - processor_input_bodies, processor_output_bodies, - processor_input_headers, processor_output_headers, - processor_diagram_node_ids, diagram_content_hash - ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) - """; - - private final JdbcTemplate jdbcTemplate; - private final DiagramRepository diagramRepository; - - public ClickHouseExecutionRepository(JdbcTemplate jdbcTemplate, DiagramRepository diagramRepository) { - this.jdbcTemplate = jdbcTemplate; - this.diagramRepository = diagramRepository; - } - - @Override - public void insertBatch(List executions) { - if (executions.isEmpty()) { - return; - } - - jdbcTemplate.batchUpdate(INSERT_SQL, new BatchPreparedStatementSetter() { - @Override - public void setValues(PreparedStatement ps, int i) throws SQLException { - TaggedExecution tagged = executions.get(i); - RouteExecution exec = tagged.execution(); - String agentId = tagged.agentId() != null ? tagged.agentId() : ""; - List flatProcessors = flattenWithMetadata(exec.getProcessors()); - - int col = 1; - ps.setString(col++, UUID.randomUUID().toString()); - ps.setString(col++, nullSafe(exec.getRouteId())); - ps.setString(col++, agentId); - ps.setString(col++, exec.getStatus() != null ? exec.getStatus().name() : "RUNNING"); - ps.setObject(col++, toTimestamp(exec.getStartTime())); - ps.setObject(col++, toTimestamp(exec.getEndTime())); - ps.setLong(col++, exec.getDurationMs()); - ps.setString(col++, nullSafe(exec.getCorrelationId())); - ps.setString(col++, nullSafe(exec.getExchangeId())); - ps.setString(col++, nullSafe(exec.getErrorMessage())); - ps.setString(col++, nullSafe(exec.getErrorStackTrace())); - - // Original parallel arrays - ps.setObject(col++, flatProcessors.stream().map(fp -> nullSafe(fp.proc.getProcessorId())).toArray(String[]::new)); - ps.setObject(col++, flatProcessors.stream().map(fp -> nullSafe(fp.proc.getProcessorType())).toArray(String[]::new)); - ps.setObject(col++, flatProcessors.stream().map(fp -> toTimestamp(fp.proc.getStartTime())).toArray(Timestamp[]::new)); - ps.setObject(col++, flatProcessors.stream().map(fp -> toTimestamp(fp.proc.getEndTime())).toArray(Timestamp[]::new)); - ps.setObject(col++, flatProcessors.stream().mapToLong(fp -> fp.proc.getDurationMs()).boxed().toArray(Long[]::new)); - ps.setObject(col++, flatProcessors.stream().map(fp -> fp.proc.getStatus() != null ? fp.proc.getStatus().name() : "RUNNING").toArray(String[]::new)); - - // Phase 2: exchange bodies and headers (concatenated for search) - StringBuilder allBodies = new StringBuilder(); - StringBuilder allHeaders = new StringBuilder(); - - String[] inputBodies = new String[flatProcessors.size()]; - String[] outputBodies = new String[flatProcessors.size()]; - String[] inputHeaders = new String[flatProcessors.size()]; - String[] outputHeaders = new String[flatProcessors.size()]; - String[] errorMessages = new String[flatProcessors.size()]; - String[] errorStacktraces = new String[flatProcessors.size()]; - String[] diagramNodeIds = new String[flatProcessors.size()]; - Short[] depths = new Short[flatProcessors.size()]; - Integer[] parentIndexes = new Integer[flatProcessors.size()]; - - for (int j = 0; j < flatProcessors.size(); j++) { - FlatProcessor fp = flatProcessors.get(j); - ProcessorExecution p = fp.proc; - - inputBodies[j] = nullSafe(p.getInputBody()); - outputBodies[j] = nullSafe(p.getOutputBody()); - inputHeaders[j] = mapToJson(p.getInputHeaders()); - outputHeaders[j] = mapToJson(p.getOutputHeaders()); - errorMessages[j] = nullSafe(p.getErrorMessage()); - errorStacktraces[j] = nullSafe(p.getErrorStackTrace()); - diagramNodeIds[j] = nullSafe(p.getDiagramNodeId()); - depths[j] = (short) fp.depth; - parentIndexes[j] = fp.parentIndex; - - allBodies.append(inputBodies[j]).append(' ').append(outputBodies[j]).append(' '); - allHeaders.append(inputHeaders[j]).append(' ').append(outputHeaders[j]).append(' '); - } - - // Include route-level input/output snapshot in searchable text - appendSnapshotText(exec.getInputSnapshot(), allBodies, allHeaders); - appendSnapshotText(exec.getOutputSnapshot(), allBodies, allHeaders); - - ps.setString(col++, allBodies.toString().trim()); // exchange_bodies - ps.setString(col++, allHeaders.toString().trim()); // exchange_headers - ps.setObject(col++, depths); // processor_depths - ps.setObject(col++, parentIndexes); // processor_parent_indexes - ps.setObject(col++, errorMessages); // processor_error_messages - ps.setObject(col++, errorStacktraces); // processor_error_stacktraces - ps.setObject(col++, inputBodies); // processor_input_bodies - ps.setObject(col++, outputBodies); // processor_output_bodies - ps.setObject(col++, inputHeaders); // processor_input_headers - ps.setObject(col++, outputHeaders); // processor_output_headers - ps.setObject(col++, diagramNodeIds); // processor_diagram_node_ids - String diagramHash = diagramRepository - .findContentHashForRoute(exec.getRouteId(), agentId) - .orElse(""); - ps.setString(col++, diagramHash); // diagram_content_hash - } - - @Override - public int getBatchSize() { - return executions.size(); - } - }); - - log.debug("Inserted batch of {} route executions into ClickHouse", executions.size()); - } - - @Override - public Optional findRawById(String executionId) { - String sql = """ - SELECT execution_id, route_id, agent_id, status, start_time, end_time, - duration_ms, correlation_id, exchange_id, error_message, error_stacktrace, - diagram_content_hash, - processor_ids, processor_types, processor_statuses, - processor_starts, processor_ends, processor_durations, - processor_diagram_node_ids, - processor_error_messages, processor_error_stacktraces, - processor_depths, processor_parent_indexes - FROM route_executions - WHERE execution_id = ? - LIMIT 1 - """; - - List results = jdbcTemplate.query(sql, (rs, rowNum) -> { - // Extract parallel arrays from ClickHouse - String[] processorIds = toStringArray(rs.getArray("processor_ids")); - String[] processorTypes = toStringArray(rs.getArray("processor_types")); - String[] processorStatuses = toStringArray(rs.getArray("processor_statuses")); - Instant[] processorStarts = toInstantArray(rs.getArray("processor_starts")); - Instant[] processorEnds = toInstantArray(rs.getArray("processor_ends")); - long[] processorDurations = toLongArray(rs.getArray("processor_durations")); - String[] processorDiagramNodeIds = toStringArray(rs.getArray("processor_diagram_node_ids")); - String[] processorErrorMessages = toStringArray(rs.getArray("processor_error_messages")); - String[] processorErrorStacktraces = toStringArray(rs.getArray("processor_error_stacktraces")); - int[] processorDepths = toIntArrayFromShort(rs.getArray("processor_depths")); - int[] processorParentIndexes = toIntArray(rs.getArray("processor_parent_indexes")); - - Timestamp endTs = rs.getTimestamp("end_time"); - return new RawExecutionRow( - rs.getString("execution_id"), - rs.getString("route_id"), - rs.getString("agent_id"), - rs.getString("status"), - rs.getTimestamp("start_time").toInstant(), - endTs != null ? endTs.toInstant() : null, - rs.getLong("duration_ms"), - rs.getString("correlation_id"), - rs.getString("exchange_id"), - rs.getString("error_message"), - rs.getString("error_stacktrace"), - rs.getString("diagram_content_hash"), - processorIds, processorTypes, processorStatuses, - processorStarts, processorEnds, processorDurations, - processorDiagramNodeIds, - processorErrorMessages, processorErrorStacktraces, - processorDepths, processorParentIndexes - ); - }, executionId); - - return results.isEmpty() ? Optional.empty() : Optional.of(results.get(0)); - } - - /** - * Find exchange snapshot data for a specific processor by index. - * - * @param executionId the execution ID - * @param processorIndex 0-based processor index - * @return map with inputBody, outputBody, inputHeaders, outputHeaders or empty if not found - */ - public Optional> findProcessorSnapshot(String executionId, int processorIndex) { - // ClickHouse arrays are 1-indexed in SQL - int chIndex = processorIndex + 1; - String sql = """ - SELECT - processor_input_bodies[?] AS input_body, - processor_output_bodies[?] AS output_body, - processor_input_headers[?] AS input_headers, - processor_output_headers[?] AS output_headers, - length(processor_ids) AS proc_count - FROM route_executions - WHERE execution_id = ? - LIMIT 1 - """; - - List> results = jdbcTemplate.query(sql, (rs, rowNum) -> { - int procCount = rs.getInt("proc_count"); - if (processorIndex < 0 || processorIndex >= procCount) { - return null; - } - var snapshot = new java.util.LinkedHashMap(); - snapshot.put("inputBody", rs.getString("input_body")); - snapshot.put("outputBody", rs.getString("output_body")); - snapshot.put("inputHeaders", rs.getString("input_headers")); - snapshot.put("outputHeaders", rs.getString("output_headers")); - return snapshot; - }, chIndex, chIndex, chIndex, chIndex, executionId); - - if (results.isEmpty() || results.get(0) == null) { - return Optional.empty(); - } - return Optional.of(results.get(0)); - } - - // --- Array extraction helpers --- - - private static String[] toStringArray(java.sql.Array sqlArray) throws SQLException { - if (sqlArray == null) return new String[0]; - Object arr = sqlArray.getArray(); - if (arr instanceof String[] sa) return sa; - if (arr instanceof Object[] oa) { - String[] result = new String[oa.length]; - for (int i = 0; i < oa.length; i++) { - result[i] = oa[i] != null ? oa[i].toString() : ""; - } - return result; - } - return new String[0]; - } - - private static Instant[] toInstantArray(java.sql.Array sqlArray) throws SQLException { - if (sqlArray == null) return new Instant[0]; - Object arr = sqlArray.getArray(); - if (arr instanceof Timestamp[] ts) { - Instant[] result = new Instant[ts.length]; - for (int i = 0; i < ts.length; i++) { - result[i] = ts[i] != null ? ts[i].toInstant() : Instant.EPOCH; - } - return result; - } - if (arr instanceof Object[] oa) { - Instant[] result = new Instant[oa.length]; - for (int i = 0; i < oa.length; i++) { - if (oa[i] instanceof Timestamp ts) { - result[i] = ts.toInstant(); - } else { - result[i] = Instant.EPOCH; - } - } - return result; - } - return new Instant[0]; - } - - private static long[] toLongArray(java.sql.Array sqlArray) throws SQLException { - if (sqlArray == null) return new long[0]; - Object arr = sqlArray.getArray(); - if (arr instanceof long[] la) return la; - if (arr instanceof Long[] la) { - long[] result = new long[la.length]; - for (int i = 0; i < la.length; i++) { - result[i] = la[i] != null ? la[i] : 0; - } - return result; - } - if (arr instanceof Object[] oa) { - long[] result = new long[oa.length]; - for (int i = 0; i < oa.length; i++) { - result[i] = oa[i] instanceof Number n ? n.longValue() : 0; - } - return result; - } - return new long[0]; - } - - private static int[] toIntArray(java.sql.Array sqlArray) throws SQLException { - if (sqlArray == null) return new int[0]; - Object arr = sqlArray.getArray(); - if (arr instanceof int[] ia) return ia; - if (arr instanceof Integer[] ia) { - int[] result = new int[ia.length]; - for (int i = 0; i < ia.length; i++) { - result[i] = ia[i] != null ? ia[i] : 0; - } - return result; - } - if (arr instanceof Object[] oa) { - int[] result = new int[oa.length]; - for (int i = 0; i < oa.length; i++) { - result[i] = oa[i] instanceof Number n ? n.intValue() : 0; - } - return result; - } - return new int[0]; - } - - private static int[] toIntArrayFromShort(java.sql.Array sqlArray) throws SQLException { - if (sqlArray == null) return new int[0]; - Object arr = sqlArray.getArray(); - if (arr instanceof short[] sa) { - int[] result = new int[sa.length]; - for (int i = 0; i < sa.length; i++) { - result[i] = sa[i]; - } - return result; - } - if (arr instanceof int[] ia) return ia; - if (arr instanceof Object[] oa) { - int[] result = new int[oa.length]; - for (int i = 0; i < oa.length; i++) { - result[i] = oa[i] instanceof Number n ? n.intValue() : 0; - } - return result; - } - return new int[0]; - } - - /** - * Internal record for a flattened processor with tree metadata. - */ - private record FlatProcessor(ProcessorExecution proc, int depth, int parentIndex) {} - - /** - * Flatten the processor tree with depth and parent index metadata (DFS order). - */ - private List flattenWithMetadata(List processors) { - if (processors == null || processors.isEmpty()) { - return List.of(); - } - var result = new ArrayList(); - for (ProcessorExecution p : processors) { - flattenRecursive(p, 0, -1, result); - } - return result; - } - - private void flattenRecursive(ProcessorExecution processor, int depth, int parentIdx, - List result) { - int myIndex = result.size(); - result.add(new FlatProcessor(processor, depth, parentIdx)); - if (processor.getChildren() != null) { - for (ProcessorExecution child : processor.getChildren()) { - flattenRecursive(child, depth + 1, myIndex, result); - } - } - } - - private void appendSnapshotText(ExchangeSnapshot snapshot, - StringBuilder allBodies, StringBuilder allHeaders) { - if (snapshot != null) { - allBodies.append(nullSafe(snapshot.getBody())).append(' '); - allHeaders.append(mapToJson(snapshot.getHeaders())).append(' '); - } - } - - private static String mapToJson(Map map) { - if (map == null || map.isEmpty()) { - return "{}"; - } - try { - return OBJECT_MAPPER.writeValueAsString(map); - } catch (JsonProcessingException e) { - log.warn("Failed to serialize headers map to JSON", e); - return "{}"; - } - } - - private static String nullSafe(String value) { - return value != null ? value : ""; - } - - private static Timestamp toTimestamp(Instant instant) { - return instant != null ? Timestamp.from(instant) : Timestamp.from(Instant.EPOCH); - } -} diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseMetricsRepository.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseMetricsRepository.java deleted file mode 100644 index a72ea26d..00000000 --- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseMetricsRepository.java +++ /dev/null @@ -1,67 +0,0 @@ -package com.cameleer3.server.app.storage; - -import com.cameleer3.server.core.storage.MetricsRepository; -import com.cameleer3.server.core.storage.model.MetricsSnapshot; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.jdbc.core.BatchPreparedStatementSetter; -import org.springframework.jdbc.core.JdbcTemplate; -import org.springframework.stereotype.Repository; - -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.sql.Timestamp; -import java.time.Instant; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -/** - * ClickHouse implementation of {@link MetricsRepository}. - *

- * Performs batch inserts into the {@code agent_metrics} table. - */ -@Repository -public class ClickHouseMetricsRepository implements MetricsRepository { - - private static final Logger log = LoggerFactory.getLogger(ClickHouseMetricsRepository.class); - - private static final String INSERT_SQL = """ - INSERT INTO agent_metrics (agent_id, collected_at, metric_name, metric_value, tags) - VALUES (?, ?, ?, ?, ?) - """; - - private final JdbcTemplate jdbcTemplate; - - public ClickHouseMetricsRepository(JdbcTemplate jdbcTemplate) { - this.jdbcTemplate = jdbcTemplate; - } - - @Override - public void insertBatch(List metrics) { - if (metrics.isEmpty()) { - return; - } - - jdbcTemplate.batchUpdate(INSERT_SQL, new BatchPreparedStatementSetter() { - @Override - public void setValues(PreparedStatement ps, int i) throws SQLException { - MetricsSnapshot m = metrics.get(i); - ps.setString(1, m.agentId() != null ? m.agentId() : ""); - ps.setObject(2, m.collectedAt() != null ? Timestamp.from(m.collectedAt()) : Timestamp.from(Instant.EPOCH)); - ps.setString(3, m.metricName() != null ? m.metricName() : ""); - ps.setDouble(4, m.metricValue()); - // ClickHouse Map(String, String) -- pass as a java.util.Map - Map tags = m.tags() != null ? m.tags() : new HashMap<>(); - ps.setObject(5, tags); - } - - @Override - public int getBatchSize() { - return metrics.size(); - } - }); - - log.debug("Inserted batch of {} metrics into ClickHouse", metrics.size()); - } -} diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseOidcConfigRepository.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseOidcConfigRepository.java deleted file mode 100644 index 92b08d54..00000000 --- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseOidcConfigRepository.java +++ /dev/null @@ -1,71 +0,0 @@ -package com.cameleer3.server.app.storage; - -import com.cameleer3.server.core.security.OidcConfig; -import com.cameleer3.server.core.security.OidcConfigRepository; -import org.springframework.jdbc.core.JdbcTemplate; -import org.springframework.stereotype.Repository; - -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.Arrays; -import java.util.List; -import java.util.Optional; - -/** - * ClickHouse implementation of {@link OidcConfigRepository}. - * Singleton row with {@code config_id = 'default'}, using ReplacingMergeTree. - */ -@Repository -public class ClickHouseOidcConfigRepository implements OidcConfigRepository { - - private final JdbcTemplate jdbc; - - public ClickHouseOidcConfigRepository(JdbcTemplate jdbc) { - this.jdbc = jdbc; - } - - @Override - public Optional find() { - List results = jdbc.query( - "SELECT enabled, issuer_uri, client_id, client_secret, roles_claim, default_roles, auto_signup, display_name_claim " - + "FROM oidc_config FINAL WHERE config_id = 'default'", - this::mapRow - ); - return results.isEmpty() ? Optional.empty() : Optional.of(results.get(0)); - } - - @Override - public void save(OidcConfig config) { - jdbc.update( - "INSERT INTO oidc_config (config_id, enabled, issuer_uri, client_id, client_secret, roles_claim, default_roles, auto_signup, display_name_claim, updated_at) " - + "VALUES ('default', ?, ?, ?, ?, ?, ?, ?, ?, now64(3, 'UTC'))", - config.enabled(), - config.issuerUri(), - config.clientId(), - config.clientSecret(), - config.rolesClaim(), - config.defaultRoles().toArray(new String[0]), - config.autoSignup(), - config.displayNameClaim() - ); - } - - @Override - public void delete() { - jdbc.update("DELETE FROM oidc_config WHERE config_id = 'default'"); - } - - private OidcConfig mapRow(ResultSet rs, int rowNum) throws SQLException { - String[] rolesArray = (String[]) rs.getArray("default_roles").getArray(); - return new OidcConfig( - rs.getBoolean("enabled"), - rs.getString("issuer_uri"), - rs.getString("client_id"), - rs.getString("client_secret"), - rs.getString("roles_claim"), - Arrays.asList(rolesArray), - rs.getBoolean("auto_signup"), - rs.getString("display_name_claim") - ); - } -} diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseUserRepository.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseUserRepository.java deleted file mode 100644 index b5090a1e..00000000 --- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseUserRepository.java +++ /dev/null @@ -1,112 +0,0 @@ -package com.cameleer3.server.app.storage; - -import com.cameleer3.server.core.security.UserInfo; -import com.cameleer3.server.core.security.UserRepository; -import org.springframework.jdbc.core.JdbcTemplate; -import org.springframework.stereotype.Repository; - -import java.sql.ResultSet; -import java.sql.SQLException; -import java.time.Instant; -import java.util.Arrays; -import java.util.List; -import java.util.Optional; - -/** - * ClickHouse implementation of {@link UserRepository}. - *

- * Uses ReplacingMergeTree — reads use {@code FINAL} to get the latest version. - */ -@Repository -public class ClickHouseUserRepository implements UserRepository { - - private final JdbcTemplate jdbc; - - public ClickHouseUserRepository(JdbcTemplate jdbc) { - this.jdbc = jdbc; - } - - @Override - public Optional findById(String userId) { - List results = jdbc.query( - "SELECT user_id, provider, email, display_name, roles, created_at " - + "FROM users FINAL WHERE user_id = ?", - this::mapRow, - userId - ); - return results.isEmpty() ? Optional.empty() : Optional.of(results.get(0)); - } - - @Override - public List findAll() { - return jdbc.query( - "SELECT user_id, provider, email, display_name, roles, created_at FROM users FINAL ORDER BY user_id", - this::mapRow - ); - } - - @Override - public void upsert(UserInfo user) { - Optional existing = findById(user.userId()); - if (existing.isPresent()) { - UserInfo ex = existing.get(); - // Skip write if nothing changed — avoids accumulating un-merged rows - if (ex.provider().equals(user.provider()) - && ex.email().equals(user.email()) - && ex.displayName().equals(user.displayName()) - && ex.roles().equals(user.roles())) { - return; - } - jdbc.update( - "INSERT INTO users (user_id, provider, email, display_name, roles, created_at, updated_at) " - + "SELECT user_id, ?, ?, ?, ?, created_at, now64(3, 'UTC') " - + "FROM users FINAL WHERE user_id = ?", - user.provider(), - user.email(), - user.displayName(), - user.roles().toArray(new String[0]), - user.userId() - ); - } else { - jdbc.update( - "INSERT INTO users (user_id, provider, email, display_name, roles, updated_at) " - + "VALUES (?, ?, ?, ?, ?, now64(3, 'UTC'))", - user.userId(), - user.provider(), - user.email(), - user.displayName(), - user.roles().toArray(new String[0]) - ); - } - } - - @Override - public void updateRoles(String userId, List roles) { - // ReplacingMergeTree: insert a new row with updated_at to supersede the old one. - // Copy existing fields, update roles. - jdbc.update( - "INSERT INTO users (user_id, provider, email, display_name, roles, created_at, updated_at) " - + "SELECT user_id, provider, email, display_name, ?, created_at, now64(3, 'UTC') " - + "FROM users FINAL WHERE user_id = ?", - roles.toArray(new String[0]), - userId - ); - } - - @Override - public void delete(String userId) { - jdbc.update("DELETE FROM users WHERE user_id = ?", userId); - } - - private UserInfo mapRow(ResultSet rs, int rowNum) throws SQLException { - String[] rolesArray = (String[]) rs.getArray("roles").getArray(); - return new UserInfo( - rs.getString("user_id"), - rs.getString("provider"), - rs.getString("email"), - rs.getString("display_name"), - Arrays.asList(rolesArray), - rs.getTimestamp("created_at").toInstant() - ); - } -} diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresUserRepository.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresUserRepository.java index f5867fec..6985b2a3 100644 --- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresUserRepository.java +++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresUserRepository.java @@ -61,9 +61,11 @@ public class PostgresUserRepository implements UserRepository { private UserInfo mapUser(java.sql.ResultSet rs) throws java.sql.SQLException { Array rolesArray = rs.getArray("roles"); String[] roles = rolesArray != null ? (String[]) rolesArray.getArray() : new String[0]; + java.sql.Timestamp ts = rs.getTimestamp("created_at"); + java.time.Instant createdAt = ts != null ? ts.toInstant() : null; return new UserInfo( rs.getString("user_id"), rs.getString("provider"), rs.getString("email"), rs.getString("display_name"), - List.of(roles)); + List.of(roles), createdAt); } } diff --git a/cameleer3-server-app/src/main/resources/clickhouse/01-schema.sql b/cameleer3-server-app/src/main/resources/clickhouse/01-schema.sql deleted file mode 100644 index ab56da70..00000000 --- a/cameleer3-server-app/src/main/resources/clickhouse/01-schema.sql +++ /dev/null @@ -1,57 +0,0 @@ --- Cameleer3 ClickHouse Schema --- Tables for route executions, route diagrams, and agent metrics. - -CREATE TABLE IF NOT EXISTS route_executions ( - execution_id String, - route_id LowCardinality(String), - agent_id LowCardinality(String), - status LowCardinality(String), - start_time DateTime64(3, 'UTC'), - end_time Nullable(DateTime64(3, 'UTC')), - duration_ms UInt64, - correlation_id String, - exchange_id String, - error_message String DEFAULT '', - error_stacktrace String DEFAULT '', - -- Nested processor executions stored as parallel arrays - processor_ids Array(String), - processor_types Array(LowCardinality(String)), - processor_starts Array(DateTime64(3, 'UTC')), - processor_ends Array(DateTime64(3, 'UTC')), - processor_durations Array(UInt64), - processor_statuses Array(LowCardinality(String)), - -- Metadata - server_received_at DateTime64(3, 'UTC') DEFAULT now64(3, 'UTC'), - -- Skip indexes - INDEX idx_correlation correlation_id TYPE bloom_filter GRANULARITY 4, - INDEX idx_error error_message TYPE tokenbf_v1(32768, 3, 0) GRANULARITY 4 -) -ENGINE = MergeTree() -PARTITION BY toYYYYMMDD(start_time) -ORDER BY (agent_id, status, start_time, execution_id) -TTL toDateTime(start_time) + toIntervalDay(30) -SETTINGS ttl_only_drop_parts = 1; - -CREATE TABLE IF NOT EXISTS route_diagrams ( - content_hash String, - route_id LowCardinality(String), - agent_id LowCardinality(String), - definition String, - created_at DateTime64(3, 'UTC') DEFAULT now64(3, 'UTC') -) -ENGINE = ReplacingMergeTree(created_at) -ORDER BY (content_hash); - -CREATE TABLE IF NOT EXISTS agent_metrics ( - agent_id LowCardinality(String), - collected_at DateTime64(3, 'UTC'), - metric_name LowCardinality(String), - metric_value Float64, - tags Map(String, String), - server_received_at DateTime64(3, 'UTC') DEFAULT now64(3, 'UTC') -) -ENGINE = MergeTree() -PARTITION BY toYYYYMMDD(collected_at) -ORDER BY (agent_id, metric_name, collected_at) -TTL toDateTime(collected_at) + toIntervalDay(30) -SETTINGS ttl_only_drop_parts = 1; diff --git a/cameleer3-server-app/src/main/resources/clickhouse/02-search-columns.sql b/cameleer3-server-app/src/main/resources/clickhouse/02-search-columns.sql deleted file mode 100644 index 2b11b435..00000000 --- a/cameleer3-server-app/src/main/resources/clickhouse/02-search-columns.sql +++ /dev/null @@ -1,25 +0,0 @@ --- Phase 2: Schema extension for search, detail, and diagram linking columns. --- Adds exchange snapshot data, processor tree metadata, and diagram content hash. - -ALTER TABLE route_executions - ADD COLUMN IF NOT EXISTS exchange_bodies String DEFAULT '', - ADD COLUMN IF NOT EXISTS exchange_headers String DEFAULT '', - ADD COLUMN IF NOT EXISTS processor_depths Array(UInt16) DEFAULT [], - ADD COLUMN IF NOT EXISTS processor_parent_indexes Array(Int32) DEFAULT [], - ADD COLUMN IF NOT EXISTS processor_error_messages Array(String) DEFAULT [], - ADD COLUMN IF NOT EXISTS processor_error_stacktraces Array(String) DEFAULT [], - ADD COLUMN IF NOT EXISTS processor_input_bodies Array(String) DEFAULT [], - ADD COLUMN IF NOT EXISTS processor_output_bodies Array(String) DEFAULT [], - ADD COLUMN IF NOT EXISTS processor_input_headers Array(String) DEFAULT [], - ADD COLUMN IF NOT EXISTS processor_output_headers Array(String) DEFAULT [], - ADD COLUMN IF NOT EXISTS processor_diagram_node_ids Array(String) DEFAULT [], - ADD COLUMN IF NOT EXISTS diagram_content_hash String DEFAULT ''; - --- Skip indexes for full-text search on new text columns -ALTER TABLE route_executions - ADD INDEX IF NOT EXISTS idx_exchange_bodies exchange_bodies TYPE tokenbf_v1(32768, 3, 0) GRANULARITY 4, - ADD INDEX IF NOT EXISTS idx_exchange_headers exchange_headers TYPE tokenbf_v1(32768, 3, 0) GRANULARITY 4; - --- Skip index on error_stacktrace (not indexed in 01-schema.sql, needed for SRCH-05) -ALTER TABLE route_executions - ADD INDEX IF NOT EXISTS idx_error_stacktrace error_stacktrace TYPE tokenbf_v1(32768, 3, 0) GRANULARITY 4; diff --git a/cameleer3-server-app/src/main/resources/clickhouse/03-users.sql b/cameleer3-server-app/src/main/resources/clickhouse/03-users.sql deleted file mode 100644 index 9dc7ce7a..00000000 --- a/cameleer3-server-app/src/main/resources/clickhouse/03-users.sql +++ /dev/null @@ -1,10 +0,0 @@ -CREATE TABLE IF NOT EXISTS users ( - user_id String, - provider LowCardinality(String), - email String DEFAULT '', - display_name String DEFAULT '', - roles Array(LowCardinality(String)), - created_at DateTime64(3, 'UTC') DEFAULT now64(3, 'UTC'), - updated_at DateTime64(3, 'UTC') DEFAULT now64(3, 'UTC') -) ENGINE = ReplacingMergeTree(updated_at) -ORDER BY (user_id); diff --git a/cameleer3-server-app/src/main/resources/clickhouse/04-oidc-config.sql b/cameleer3-server-app/src/main/resources/clickhouse/04-oidc-config.sql deleted file mode 100644 index 35b4d896..00000000 --- a/cameleer3-server-app/src/main/resources/clickhouse/04-oidc-config.sql +++ /dev/null @@ -1,13 +0,0 @@ -CREATE TABLE IF NOT EXISTS oidc_config ( - config_id String DEFAULT 'default', - enabled Bool DEFAULT false, - issuer_uri String DEFAULT '', - client_id String DEFAULT '', - client_secret String DEFAULT '', - roles_claim String DEFAULT 'realm_access.roles', - default_roles Array(LowCardinality(String)), - auto_signup Bool DEFAULT true, - display_name_claim String DEFAULT 'name', - updated_at DateTime64(3, 'UTC') DEFAULT now64(3, 'UTC') -) ENGINE = ReplacingMergeTree(updated_at) -ORDER BY (config_id); diff --git a/cameleer3-server-app/src/main/resources/clickhouse/05-oidc-auto-signup.sql b/cameleer3-server-app/src/main/resources/clickhouse/05-oidc-auto-signup.sql deleted file mode 100644 index 643a69ea..00000000 --- a/cameleer3-server-app/src/main/resources/clickhouse/05-oidc-auto-signup.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE oidc_config ADD COLUMN IF NOT EXISTS auto_signup Bool DEFAULT true; diff --git a/cameleer3-server-app/src/main/resources/clickhouse/06-oidc-display-name-claim.sql b/cameleer3-server-app/src/main/resources/clickhouse/06-oidc-display-name-claim.sql deleted file mode 100644 index ef1870bd..00000000 --- a/cameleer3-server-app/src/main/resources/clickhouse/06-oidc-display-name-claim.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE oidc_config ADD COLUMN IF NOT EXISTS display_name_claim String DEFAULT 'name'; diff --git a/cameleer3-server-app/src/main/resources/clickhouse/07-stats-rollup.sql b/cameleer3-server-app/src/main/resources/clickhouse/07-stats-rollup.sql deleted file mode 100644 index 5d1efe24..00000000 --- a/cameleer3-server-app/src/main/resources/clickhouse/07-stats-rollup.sql +++ /dev/null @@ -1,35 +0,0 @@ --- Pre-aggregated 5-minute stats rollup for route executions. --- Uses AggregatingMergeTree with -State/-Merge combinators so intermediate --- aggregates can be merged across arbitrary time windows and dimensions. - --- Drop existing objects to allow schema changes (MV must be dropped before table) -DROP VIEW IF EXISTS route_execution_stats_5m_mv; -DROP TABLE IF EXISTS route_execution_stats_5m; - -CREATE TABLE route_execution_stats_5m ( - bucket DateTime('UTC'), - route_id LowCardinality(String), - agent_id LowCardinality(String), - total_count AggregateFunction(count), - failed_count AggregateFunction(countIf, UInt8), - duration_sum AggregateFunction(sum, UInt64), - p99_duration AggregateFunction(quantileTDigest(0.99), UInt64) -) -ENGINE = AggregatingMergeTree() -PARTITION BY toYYYYMMDD(bucket) -ORDER BY (agent_id, route_id, bucket) -TTL bucket + toIntervalDay(30) -SETTINGS ttl_only_drop_parts = 1; - -CREATE MATERIALIZED VIEW route_execution_stats_5m_mv -TO route_execution_stats_5m -AS SELECT - toStartOfFiveMinutes(start_time) AS bucket, - route_id, - agent_id, - countState() AS total_count, - countIfState(status = 'FAILED') AS failed_count, - sumState(duration_ms) AS duration_sum, - quantileTDigestState(0.99)(duration_ms) AS p99_duration -FROM route_executions -GROUP BY bucket, route_id, agent_id; diff --git a/cameleer3-server-app/src/main/resources/clickhouse/08-stats-rollup-backfill.sql b/cameleer3-server-app/src/main/resources/clickhouse/08-stats-rollup-backfill.sql deleted file mode 100644 index 5e80a23a..00000000 --- a/cameleer3-server-app/src/main/resources/clickhouse/08-stats-rollup-backfill.sql +++ /dev/null @@ -1,16 +0,0 @@ --- One-time idempotent backfill of existing route_executions into the --- 5-minute stats rollup table. Safe for repeated execution — the WHERE --- clause skips the INSERT if the target table already contains data. - -INSERT INTO route_execution_stats_5m -SELECT - toStartOfFiveMinutes(start_time) AS bucket, - route_id, - agent_id, - countState() AS total_count, - countIfState(status = 'FAILED') AS failed_count, - sumState(duration_ms) AS duration_sum, - quantileTDigestState(0.99)(duration_ms) AS p99_duration -FROM route_executions -WHERE (SELECT count() FROM route_execution_stats_5m) = 0 -GROUP BY bucket, route_id, agent_id; diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractClickHouseIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractClickHouseIT.java deleted file mode 100644 index d1271adb..00000000 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractClickHouseIT.java +++ /dev/null @@ -1,82 +0,0 @@ -package com.cameleer3.server.app; - -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.jdbc.core.JdbcTemplate; -import org.springframework.test.context.ActiveProfiles; -import org.springframework.test.context.DynamicPropertyRegistry; -import org.springframework.test.context.DynamicPropertySource; -import org.testcontainers.clickhouse.ClickHouseContainer; - -import org.junit.jupiter.api.BeforeAll; - -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.Statement; - -/** - * Base class for integration tests requiring a ClickHouse instance. - *

- * Uses Testcontainers to spin up a ClickHouse server and initializes the schema - * from {@code clickhouse/init/01-schema.sql} before the first test runs. - * Subclasses get a {@link JdbcTemplate} for direct database assertions. - *

- * Container lifecycle is managed manually (started once, shared across all test classes). - */ -@SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.RANDOM_PORT) -@ActiveProfiles("test") -public abstract class AbstractClickHouseIT { - - protected static final ClickHouseContainer CLICKHOUSE; - - static { - CLICKHOUSE = new ClickHouseContainer("clickhouse/clickhouse-server:25.3"); - CLICKHOUSE.start(); - } - - @Autowired - protected JdbcTemplate jdbcTemplate; - - @DynamicPropertySource - static void overrideProperties(DynamicPropertyRegistry registry) { - registry.add("spring.datasource.url", CLICKHOUSE::getJdbcUrl); - registry.add("spring.datasource.username", CLICKHOUSE::getUsername); - registry.add("spring.datasource.password", CLICKHOUSE::getPassword); - } - - @BeforeAll - static void initSchema() throws Exception { - // Surefire runs from the module directory; schema is in the project root - Path baseDir = Path.of("clickhouse/init"); - if (!Files.exists(baseDir)) { - baseDir = Path.of("../clickhouse/init"); - } - - // Load all schema files in order - String[] schemaFiles = {"01-schema.sql", "02-search-columns.sql", "03-users.sql", "04-oidc-config.sql", "05-oidc-auto-signup.sql"}; - - try (Connection conn = DriverManager.getConnection( - CLICKHOUSE.getJdbcUrl(), - CLICKHOUSE.getUsername(), - CLICKHOUSE.getPassword()); - Statement stmt = conn.createStatement()) { - - for (String schemaFile : schemaFiles) { - Path schemaPath = baseDir.resolve(schemaFile); - if (Files.exists(schemaPath)) { - String sql = Files.readString(schemaPath, StandardCharsets.UTF_8); - // Execute each statement separately (separated by semicolons) - for (String statement : sql.split(";")) { - String trimmed = statement.trim(); - if (!trimmed.isEmpty()) { - stmt.execute(trimmed); - } - } - } - } - } - } -} diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractPostgresIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractPostgresIT.java index 26faf84a..490e20a9 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractPostgresIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractPostgresIT.java @@ -1,6 +1,8 @@ package com.cameleer3.server.app; +import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.jdbc.core.JdbcTemplate; import org.springframework.test.context.DynamicPropertyRegistry; import org.springframework.test.context.DynamicPropertySource; import org.testcontainers.containers.PostgreSQLContainer; @@ -18,6 +20,9 @@ public abstract class AbstractPostgresIT { .withUsername("cameleer") .withPassword("test"); + @Autowired + protected JdbcTemplate jdbcTemplate; + @DynamicPropertySource static void configureProperties(DynamicPropertyRegistry registry) { registry.add("spring.datasource.url", postgres::getJdbcUrl); diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentCommandControllerIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentCommandControllerIT.java index ab98f30d..4ba36c5d 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentCommandControllerIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentCommandControllerIT.java @@ -1,6 +1,6 @@ package com.cameleer3.server.app.controller; -import com.cameleer3.server.app.AbstractClickHouseIT; +import com.cameleer3.server.app.AbstractPostgresIT; import com.cameleer3.server.app.TestSecurityHelper; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; @@ -18,7 +18,7 @@ import java.util.UUID; import static org.assertj.core.api.Assertions.assertThat; -class AgentCommandControllerIT extends AbstractClickHouseIT { +class AgentCommandControllerIT extends AbstractPostgresIT { @Autowired private TestRestTemplate restTemplate; diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentRegistrationControllerIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentRegistrationControllerIT.java index 652f92d8..763646b9 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentRegistrationControllerIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentRegistrationControllerIT.java @@ -1,6 +1,6 @@ package com.cameleer3.server.app.controller; -import com.cameleer3.server.app.AbstractClickHouseIT; +import com.cameleer3.server.app.AbstractPostgresIT; import com.cameleer3.server.app.TestSecurityHelper; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; @@ -16,7 +16,7 @@ import org.springframework.http.ResponseEntity; import static org.assertj.core.api.Assertions.assertThat; -class AgentRegistrationControllerIT extends AbstractClickHouseIT { +class AgentRegistrationControllerIT extends AbstractPostgresIT { @Autowired private TestRestTemplate restTemplate; diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentSseControllerIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentSseControllerIT.java index 1af16ed5..fddc7152 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentSseControllerIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentSseControllerIT.java @@ -1,6 +1,6 @@ package com.cameleer3.server.app.controller; -import com.cameleer3.server.app.AbstractClickHouseIT; +import com.cameleer3.server.app.AbstractPostgresIT; import com.cameleer3.server.app.TestSecurityHelper; import com.fasterxml.jackson.databind.ObjectMapper; import org.junit.jupiter.api.BeforeEach; @@ -30,7 +30,7 @@ import java.util.concurrent.atomic.AtomicInteger; import static org.assertj.core.api.Assertions.assertThat; import static org.awaitility.Awaitility.await; -class AgentSseControllerIT extends AbstractClickHouseIT { +class AgentSseControllerIT extends AbstractPostgresIT { @Autowired private TestRestTemplate restTemplate; diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/BackpressureIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/BackpressureIT.java index aa8baa17..ee3db1fe 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/BackpressureIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/BackpressureIT.java @@ -1,6 +1,6 @@ package com.cameleer3.server.app.controller; -import com.cameleer3.server.app.AbstractClickHouseIT; +import com.cameleer3.server.app.AbstractPostgresIT; import com.cameleer3.server.app.TestSecurityHelper; import com.cameleer3.server.core.ingestion.IngestionService; import org.junit.jupiter.api.BeforeEach; @@ -13,21 +13,20 @@ import org.springframework.http.HttpStatus; import org.springframework.http.ResponseEntity; import org.springframework.test.context.TestPropertySource; -import static java.util.concurrent.TimeUnit.SECONDS; import static org.assertj.core.api.Assertions.assertThat; -import static org.awaitility.Awaitility.await; /** - * Tests backpressure behavior when write buffers are full. - * Uses a tiny buffer (capacity=5) and a very long flush interval - * to prevent the scheduler from draining the buffer during the test. + * Tests backpressure behavior when the metrics write buffer is full. + *

+ * Execution and diagram ingestion are now synchronous (no buffers). + * Only the metrics pipeline still uses a write buffer with backpressure. */ @TestPropertySource(properties = { "ingestion.buffer-capacity=5", "ingestion.batch-size=5", "ingestion.flush-interval-ms=60000" // 60s -- effectively no flush during test }) -class BackpressureIT extends AbstractClickHouseIT { +class BackpressureIT extends AbstractPostgresIT { @Autowired private TestRestTemplate restTemplate; @@ -47,34 +46,31 @@ class BackpressureIT extends AbstractClickHouseIT { } @Test - void whenBufferFull_returns503WithRetryAfter() { - // Wait for any initial scheduled flush to complete, then fill buffer via batch POST - await().atMost(5, SECONDS).until(() -> ingestionService.getExecutionBufferDepth() == 0); - - // Fill the buffer completely with a batch of 5 + void whenMetricsBufferFull_returns503WithRetryAfter() { + // Fill the metrics buffer completely with a batch of 5 String batchJson = """ [ - {"routeId":"bp-0","exchangeId":"bp-e0","status":"COMPLETED","startTime":"2026-03-11T10:00:00Z","durationMs":100,"processors":[]}, - {"routeId":"bp-1","exchangeId":"bp-e1","status":"COMPLETED","startTime":"2026-03-11T10:00:00Z","durationMs":100,"processors":[]}, - {"routeId":"bp-2","exchangeId":"bp-e2","status":"COMPLETED","startTime":"2026-03-11T10:00:00Z","durationMs":100,"processors":[]}, - {"routeId":"bp-3","exchangeId":"bp-e3","status":"COMPLETED","startTime":"2026-03-11T10:00:00Z","durationMs":100,"processors":[]}, - {"routeId":"bp-4","exchangeId":"bp-e4","status":"COMPLETED","startTime":"2026-03-11T10:00:00Z","durationMs":100,"processors":[]} + {"agentId":"bp-agent","timestamp":"2026-03-11T10:00:00Z","metrics":{}}, + {"agentId":"bp-agent","timestamp":"2026-03-11T10:00:01Z","metrics":{}}, + {"agentId":"bp-agent","timestamp":"2026-03-11T10:00:02Z","metrics":{}}, + {"agentId":"bp-agent","timestamp":"2026-03-11T10:00:03Z","metrics":{}}, + {"agentId":"bp-agent","timestamp":"2026-03-11T10:00:04Z","metrics":{}} ] """; ResponseEntity batchResponse = restTemplate.postForEntity( - "/api/v1/data/executions", + "/api/v1/data/metrics", new HttpEntity<>(batchJson, authHeaders), String.class); assertThat(batchResponse.getStatusCode()).isEqualTo(HttpStatus.ACCEPTED); // Now buffer should be full -- next POST should get 503 String overflowJson = """ - {"routeId":"bp-overflow","exchangeId":"bp-overflow-e","status":"COMPLETED","startTime":"2026-03-11T10:00:00Z","durationMs":100,"processors":[]} + [{"agentId":"bp-agent","timestamp":"2026-03-11T10:00:05Z","metrics":{}}] """; ResponseEntity response = restTemplate.postForEntity( - "/api/v1/data/executions", + "/api/v1/data/metrics", new HttpEntity<>(overflowJson, authHeaders), String.class); @@ -83,25 +79,17 @@ class BackpressureIT extends AbstractClickHouseIT { } @Test - void bufferedDataNotLost_afterBackpressure() { - // Post data to the diagram buffer (separate from executions used above) - for (int i = 0; i < 3; i++) { - String json = String.format(""" - { - "routeId": "bp-persist-diagram-%d", - "version": 1, - "nodes": [], - "edges": [] - } - """, i); + void executionIngestion_isSynchronous_returnsAccepted() { + String json = """ + {"routeId":"bp-sync","exchangeId":"bp-sync-e","status":"COMPLETED","startTime":"2026-03-11T10:00:00Z","durationMs":100,"processors":[]} + """; - restTemplate.postForEntity( - "/api/v1/data/diagrams", - new HttpEntity<>(json, authHeaders), - String.class); - } + ResponseEntity response = restTemplate.postForEntity( + "/api/v1/data/executions", + new HttpEntity<>(json, authHeaders), + String.class); - // Data is in the buffer. Verify the buffer has data. - assertThat(ingestionService.getDiagramBufferDepth()).isGreaterThanOrEqualTo(3); + // Synchronous ingestion always returns 202 (no buffer to overflow) + assertThat(response.getStatusCode()).isEqualTo(HttpStatus.ACCEPTED); } } diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DetailControllerIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DetailControllerIT.java index cdd29df7..83fa17b1 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DetailControllerIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DetailControllerIT.java @@ -1,6 +1,6 @@ package com.cameleer3.server.app.controller; -import com.cameleer3.server.app.AbstractClickHouseIT; +import com.cameleer3.server.app.AbstractPostgresIT; import com.cameleer3.server.app.TestSecurityHelper; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; @@ -23,7 +23,7 @@ import static org.awaitility.Awaitility.await; * Integration tests for the detail and processor snapshot endpoints. */ @TestInstance(TestInstance.Lifecycle.PER_CLASS) -class DetailControllerIT extends AbstractClickHouseIT { +class DetailControllerIT extends AbstractPostgresIT { @Autowired private TestRestTemplate restTemplate; @@ -121,7 +121,7 @@ class DetailControllerIT extends AbstractClickHouseIT { // Wait for flush and get the execution_id await().atMost(10, SECONDS).untilAsserted(() -> { Integer count = jdbcTemplate.queryForObject( - "SELECT count() FROM route_executions WHERE route_id = 'detail-test-route'", + "SELECT count(*) FROM route_executions WHERE route_id = 'detail-test-route'", Integer.class); assertThat(count).isGreaterThanOrEqualTo(1); }); diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DiagramControllerIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DiagramControllerIT.java index 832967fc..af6f274d 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DiagramControllerIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DiagramControllerIT.java @@ -1,6 +1,6 @@ package com.cameleer3.server.app.controller; -import com.cameleer3.server.app.AbstractClickHouseIT; +import com.cameleer3.server.app.AbstractPostgresIT; import com.cameleer3.server.app.TestSecurityHelper; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -15,7 +15,7 @@ import static java.util.concurrent.TimeUnit.SECONDS; import static org.assertj.core.api.Assertions.assertThat; import static org.awaitility.Awaitility.await; -class DiagramControllerIT extends AbstractClickHouseIT { +class DiagramControllerIT extends AbstractPostgresIT { @Autowired private TestRestTemplate restTemplate; @@ -53,7 +53,7 @@ class DiagramControllerIT extends AbstractClickHouseIT { } @Test - void postDiagram_dataAppearsInClickHouseAfterFlush() { + void postDiagram_dataAppearsAfterFlush() { String json = """ { "routeId": "diagram-flush-route", @@ -72,7 +72,7 @@ class DiagramControllerIT extends AbstractClickHouseIT { await().atMost(10, SECONDS).untilAsserted(() -> { Integer count = jdbcTemplate.queryForObject( - "SELECT count() FROM route_diagrams WHERE route_id = 'diagram-flush-route'", + "SELECT count(*) FROM route_diagrams WHERE route_id = 'diagram-flush-route'", Integer.class); assertThat(count).isGreaterThanOrEqualTo(1); }); diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DiagramRenderControllerIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DiagramRenderControllerIT.java index f4b0308d..af0b8668 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DiagramRenderControllerIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DiagramRenderControllerIT.java @@ -1,6 +1,6 @@ package com.cameleer3.server.app.controller; -import com.cameleer3.server.app.AbstractClickHouseIT; +import com.cameleer3.server.app.AbstractPostgresIT; import com.cameleer3.server.app.TestSecurityHelper; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -20,7 +20,7 @@ import static org.awaitility.Awaitility.await; * Integration tests for {@link DiagramRenderController}. * Seeds a diagram via the ingestion endpoint, then tests rendering. */ -class DiagramRenderControllerIT extends AbstractClickHouseIT { +class DiagramRenderControllerIT extends AbstractPostgresIT { @Autowired private TestRestTemplate restTemplate; @@ -61,7 +61,7 @@ class DiagramRenderControllerIT extends AbstractClickHouseIT { new HttpEntity<>(json, securityHelper.authHeaders(jwt)), String.class); - // Wait for flush to ClickHouse and retrieve the content hash + // Wait for flush to storage and retrieve the content hash await().atMost(10, SECONDS).untilAsserted(() -> { String hash = jdbcTemplate.queryForObject( "SELECT content_hash FROM route_diagrams WHERE route_id = 'render-test-route' LIMIT 1", diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/ExecutionControllerIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/ExecutionControllerIT.java index a2bf59d5..65f72d85 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/ExecutionControllerIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/ExecutionControllerIT.java @@ -1,6 +1,6 @@ package com.cameleer3.server.app.controller; -import com.cameleer3.server.app.AbstractClickHouseIT; +import com.cameleer3.server.app.AbstractPostgresIT; import com.cameleer3.server.app.TestSecurityHelper; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -16,7 +16,7 @@ import static java.util.concurrent.TimeUnit.SECONDS; import static org.assertj.core.api.Assertions.assertThat; import static org.awaitility.Awaitility.await; -class ExecutionControllerIT extends AbstractClickHouseIT { +class ExecutionControllerIT extends AbstractPostgresIT { @Autowired private TestRestTemplate restTemplate; @@ -90,7 +90,7 @@ class ExecutionControllerIT extends AbstractClickHouseIT { } @Test - void postExecution_dataAppearsInClickHouseAfterFlush() { + void postExecution_dataAppearsAfterFlush() { String json = """ { "routeId": "flush-test-route", @@ -111,7 +111,7 @@ class ExecutionControllerIT extends AbstractClickHouseIT { await().atMost(10, SECONDS).untilAsserted(() -> { Integer count = jdbcTemplate.queryForObject( - "SELECT count() FROM route_executions WHERE route_id = 'flush-test-route'", + "SELECT count(*) FROM route_executions WHERE route_id = 'flush-test-route'", Integer.class); assertThat(count).isGreaterThanOrEqualTo(1); }); diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/ForwardCompatIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/ForwardCompatIT.java index 9d68212d..555bbf7c 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/ForwardCompatIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/ForwardCompatIT.java @@ -1,6 +1,6 @@ package com.cameleer3.server.app.controller; -import com.cameleer3.server.app.AbstractClickHouseIT; +import com.cameleer3.server.app.AbstractPostgresIT; import com.cameleer3.server.app.TestSecurityHelper; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -16,7 +16,7 @@ import static org.assertj.core.api.Assertions.assertThat; * Integration test for forward compatibility (API-05). * Verifies that unknown JSON fields in request bodies do not cause deserialization errors. */ -class ForwardCompatIT extends AbstractClickHouseIT { +class ForwardCompatIT extends AbstractPostgresIT { @Autowired private TestRestTemplate restTemplate; diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/HealthControllerIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/HealthControllerIT.java index c701af3b..9ca31887 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/HealthControllerIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/HealthControllerIT.java @@ -1,6 +1,6 @@ package com.cameleer3.server.app.controller; -import com.cameleer3.server.app.AbstractClickHouseIT; +import com.cameleer3.server.app.AbstractPostgresIT; import org.junit.jupiter.api.Test; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.test.web.client.TestRestTemplate; @@ -8,9 +8,9 @@ import org.springframework.boot.test.web.client.TestRestTemplate; import static org.assertj.core.api.Assertions.assertThat; /** - * Integration tests for the health endpoint and ClickHouse TTL verification. + * Integration tests for the health endpoint. */ -class HealthControllerIT extends AbstractClickHouseIT { +class HealthControllerIT extends AbstractPostgresIT { @Autowired private TestRestTemplate restTemplate; @@ -28,20 +28,4 @@ class HealthControllerIT extends AbstractClickHouseIT { var response = restTemplate.getForEntity("/api/v1/health", String.class); assertThat(response.getStatusCode().value()).isEqualTo(200); } - - @Test - void ttlConfiguredOnRouteExecutions() { - String createTable = jdbcTemplate.queryForObject( - "SHOW CREATE TABLE route_executions", String.class); - assertThat(createTable).containsIgnoringCase("TTL"); - assertThat(createTable).contains("toIntervalDay(30)"); - } - - @Test - void ttlConfiguredOnAgentMetrics() { - String createTable = jdbcTemplate.queryForObject( - "SHOW CREATE TABLE agent_metrics", String.class); - assertThat(createTable).containsIgnoringCase("TTL"); - assertThat(createTable).contains("toIntervalDay(30)"); - } } diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/MetricsControllerIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/MetricsControllerIT.java index d0eb9793..8f0d8a14 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/MetricsControllerIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/MetricsControllerIT.java @@ -1,6 +1,6 @@ package com.cameleer3.server.app.controller; -import com.cameleer3.server.app.AbstractClickHouseIT; +import com.cameleer3.server.app.AbstractPostgresIT; import com.cameleer3.server.app.TestSecurityHelper; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -15,7 +15,7 @@ import static java.util.concurrent.TimeUnit.SECONDS; import static org.assertj.core.api.Assertions.assertThat; import static org.awaitility.Awaitility.await; -class MetricsControllerIT extends AbstractClickHouseIT { +class MetricsControllerIT extends AbstractPostgresIT { @Autowired private TestRestTemplate restTemplate; @@ -52,7 +52,7 @@ class MetricsControllerIT extends AbstractClickHouseIT { } @Test - void postMetrics_dataAppearsInClickHouseAfterFlush() { + void postMetrics_dataAppearsAfterFlush() { String json = """ [{ "agentId": "agent-flush-test", @@ -70,7 +70,7 @@ class MetricsControllerIT extends AbstractClickHouseIT { await().atMost(10, SECONDS).untilAsserted(() -> { Integer count = jdbcTemplate.queryForObject( - "SELECT count() FROM agent_metrics WHERE agent_id = 'agent-flush-test'", + "SELECT count(*) FROM agent_metrics WHERE agent_id = 'agent-flush-test'", Integer.class); assertThat(count).isGreaterThanOrEqualTo(1); }); diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/OpenApiIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/OpenApiIT.java index e474f2b8..a8ceb053 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/OpenApiIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/OpenApiIT.java @@ -1,6 +1,6 @@ package com.cameleer3.server.app.controller; -import com.cameleer3.server.app.AbstractClickHouseIT; +import com.cameleer3.server.app.AbstractPostgresIT; import org.junit.jupiter.api.Test; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.test.web.client.TestRestTemplate; @@ -10,7 +10,7 @@ import static org.assertj.core.api.Assertions.assertThat; /** * Integration tests for OpenAPI documentation endpoints. */ -class OpenApiIT extends AbstractClickHouseIT { +class OpenApiIT extends AbstractPostgresIT { @Autowired private TestRestTemplate restTemplate; diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/SearchControllerIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/SearchControllerIT.java index 8ae4e072..95f42b2a 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/SearchControllerIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/SearchControllerIT.java @@ -1,6 +1,6 @@ package com.cameleer3.server.app.controller; -import com.cameleer3.server.app.AbstractClickHouseIT; +import com.cameleer3.server.app.AbstractPostgresIT; import com.cameleer3.server.app.TestSecurityHelper; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; @@ -24,7 +24,7 @@ import static org.awaitility.Awaitility.await; * Tests all filter types independently and in combination. */ @TestInstance(TestInstance.Lifecycle.PER_CLASS) -class SearchControllerIT extends AbstractClickHouseIT { +class SearchControllerIT extends AbstractPostgresIT { @Autowired private TestRestTemplate restTemplate; @@ -155,7 +155,7 @@ class SearchControllerIT extends AbstractClickHouseIT { // Wait for all data to flush await().atMost(10, SECONDS).untilAsserted(() -> { Integer count = jdbcTemplate.queryForObject( - "SELECT count() FROM route_executions WHERE route_id LIKE 'search-route-%'", + "SELECT count(*) FROM route_executions WHERE route_id LIKE 'search-route-%'", Integer.class); assertThat(count).isEqualTo(10); }); diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/interceptor/ProtocolVersionIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/interceptor/ProtocolVersionIT.java index 26e8d5a9..35d0c0d1 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/interceptor/ProtocolVersionIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/interceptor/ProtocolVersionIT.java @@ -1,6 +1,6 @@ package com.cameleer3.server.app.interceptor; -import com.cameleer3.server.app.AbstractClickHouseIT; +import com.cameleer3.server.app.AbstractPostgresIT; import com.cameleer3.server.app.TestSecurityHelper; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -18,7 +18,7 @@ import static org.assertj.core.api.Assertions.assertThat; * With security enabled, requests to protected endpoints need JWT auth * to reach the interceptor layer. */ -class ProtocolVersionIT extends AbstractClickHouseIT { +class ProtocolVersionIT extends AbstractPostgresIT { @Autowired private TestRestTemplate restTemplate; diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/search/OpenSearchIndexIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/search/OpenSearchIndexIT.java index 24054006..2194ecb4 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/search/OpenSearchIndexIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/search/OpenSearchIndexIT.java @@ -24,8 +24,7 @@ class OpenSearchIndexIT extends AbstractPostgresIT { @Container static final OpensearchContainer opensearch = - new OpensearchContainer<>("opensearchproject/opensearch:2.19.0") - .withSecurityEnabled(false); + new OpensearchContainer<>("opensearchproject/opensearch:2.19.0"); @DynamicPropertySource static void configureOpenSearch(DynamicPropertyRegistry registry) { @@ -58,7 +57,7 @@ class OpenSearchIndexIT extends AbstractPostgresIT { SearchResult result = searchIndex.search(request); assertTrue(result.total() > 0); - assertEquals("search-1", result.items().get(0).executionId()); + assertEquals("search-1", result.data().get(0).executionId()); } @Test diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/BootstrapTokenIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/BootstrapTokenIT.java index 1309517b..3ce87894 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/BootstrapTokenIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/BootstrapTokenIT.java @@ -1,6 +1,6 @@ package com.cameleer3.server.app.security; -import com.cameleer3.server.app.AbstractClickHouseIT; +import com.cameleer3.server.app.AbstractPostgresIT; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import org.junit.jupiter.api.Test; @@ -17,7 +17,7 @@ import static org.assertj.core.api.Assertions.assertThat; /** * Integration tests verifying bootstrap token validation on the registration endpoint. */ -class BootstrapTokenIT extends AbstractClickHouseIT { +class BootstrapTokenIT extends AbstractPostgresIT { @Autowired private TestRestTemplate restTemplate; diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/JwtRefreshIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/JwtRefreshIT.java index 7e40e0a1..87ddf25e 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/JwtRefreshIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/JwtRefreshIT.java @@ -1,6 +1,6 @@ package com.cameleer3.server.app.security; -import com.cameleer3.server.app.AbstractClickHouseIT; +import com.cameleer3.server.app.AbstractPostgresIT; import com.cameleer3.server.app.TestSecurityHelper; import com.cameleer3.server.core.security.JwtService; import com.fasterxml.jackson.databind.JsonNode; @@ -20,7 +20,7 @@ import static org.assertj.core.api.Assertions.assertThat; /** * Integration tests for the JWT refresh flow. */ -class JwtRefreshIT extends AbstractClickHouseIT { +class JwtRefreshIT extends AbstractPostgresIT { @Autowired private TestRestTemplate restTemplate; diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/RegistrationSecurityIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/RegistrationSecurityIT.java index abd35524..e4ee5da4 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/RegistrationSecurityIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/RegistrationSecurityIT.java @@ -1,6 +1,6 @@ package com.cameleer3.server.app.security; -import com.cameleer3.server.app.AbstractClickHouseIT; +import com.cameleer3.server.app.AbstractPostgresIT; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; import org.junit.jupiter.api.Test; @@ -19,7 +19,7 @@ import static org.assertj.core.api.Assertions.assertThat; * Integration tests verifying that registration returns security credentials * and that those credentials can be used to access protected endpoints. */ -class RegistrationSecurityIT extends AbstractClickHouseIT { +class RegistrationSecurityIT extends AbstractPostgresIT { @Autowired private TestRestTemplate restTemplate; diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/SecurityFilterIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/SecurityFilterIT.java index 38f25766..ba8dfcbb 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/SecurityFilterIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/SecurityFilterIT.java @@ -1,6 +1,6 @@ package com.cameleer3.server.app.security; -import com.cameleer3.server.app.AbstractClickHouseIT; +import com.cameleer3.server.app.AbstractPostgresIT; import com.cameleer3.server.app.TestSecurityHelper; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -19,7 +19,7 @@ import static org.assertj.core.api.Assertions.assertThat; * Integration tests verifying that the SecurityFilterChain correctly * protects endpoints and allows public access where configured. */ -class SecurityFilterIT extends AbstractClickHouseIT { +class SecurityFilterIT extends AbstractPostgresIT { @Autowired private TestRestTemplate restTemplate; diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/SseSigningIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/SseSigningIT.java index ccbb8af9..d611520b 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/SseSigningIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/SseSigningIT.java @@ -1,6 +1,6 @@ package com.cameleer3.server.app.security; -import com.cameleer3.server.app.AbstractClickHouseIT; +import com.cameleer3.server.app.AbstractPostgresIT; import com.cameleer3.server.core.security.Ed25519SigningService; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; @@ -44,7 +44,7 @@ import static org.awaitility.Awaitility.await; * open SSE stream (with JWT query param) -> push config-update command (with JWT) -> * receive SSE event -> verify signature field against server's Ed25519 public key. */ -class SseSigningIT extends AbstractClickHouseIT { +class SseSigningIT extends AbstractPostgresIT { @Autowired private TestRestTemplate restTemplate; diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/DiagramLinkingIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/DiagramLinkingIT.java index 7322ec26..ab0f01c3 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/DiagramLinkingIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/DiagramLinkingIT.java @@ -1,6 +1,6 @@ package com.cameleer3.server.app.storage; -import com.cameleer3.server.app.AbstractClickHouseIT; +import com.cameleer3.server.app.AbstractPostgresIT; import com.cameleer3.server.app.TestSecurityHelper; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -19,7 +19,7 @@ import static org.awaitility.Awaitility.await; * Integration test proving that diagram_content_hash is populated during * execution ingestion when a RouteGraph exists for the same route+agent. */ -class DiagramLinkingIT extends AbstractClickHouseIT { +class DiagramLinkingIT extends AbstractPostgresIT { @Autowired private TestRestTemplate restTemplate; diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/IngestionSchemaIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/IngestionSchemaIT.java index d0d79e02..4cfa8247 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/IngestionSchemaIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/IngestionSchemaIT.java @@ -1,6 +1,6 @@ package com.cameleer3.server.app.storage; -import com.cameleer3.server.app.AbstractClickHouseIT; +import com.cameleer3.server.app.AbstractPostgresIT; import com.cameleer3.server.app.TestSecurityHelper; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -22,7 +22,7 @@ import static org.awaitility.Awaitility.await; * Integration test verifying that Phase 2 schema columns are correctly populated * during ingestion of route executions with nested processors and exchange data. */ -class IngestionSchemaIT extends AbstractClickHouseIT { +class IngestionSchemaIT extends AbstractPostgresIT { @Autowired private TestRestTemplate restTemplate; diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/ExecutionDetail.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/ExecutionDetail.java index e739dd81..1b474ba0 100644 --- a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/ExecutionDetail.java +++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/ExecutionDetail.java @@ -7,7 +7,7 @@ import java.util.List; * Full detail of a route execution, including the nested processor tree. *

* This is the rich detail model returned by the detail endpoint. The processor - * tree is reconstructed from flat parallel arrays stored in ClickHouse. + * tree is reconstructed from individual processor records stored in PostgreSQL. * * @param executionId unique execution identifier * @param routeId Camel route ID diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/ProcessorNode.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/ProcessorNode.java index 10d1e88e..65e08b9a 100644 --- a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/ProcessorNode.java +++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/ProcessorNode.java @@ -7,7 +7,7 @@ import java.util.List; /** * Nested tree node representing a single processor execution within a route. *

- * The tree structure is reconstructed from flat parallel arrays stored in ClickHouse. + * The tree structure is reconstructed from individual processor records stored in PostgreSQL. * Each node may have children (e.g., processors inside a split or try-catch block). */ public final class ProcessorNode { diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/RawExecutionRow.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/RawExecutionRow.java deleted file mode 100644 index 2297e4b6..00000000 --- a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/RawExecutionRow.java +++ /dev/null @@ -1,59 +0,0 @@ -package com.cameleer3.server.core.detail; - -import java.time.Instant; - -/** - * Raw execution data from ClickHouse, including all parallel arrays needed - * for tree reconstruction. This is the intermediate representation between - * the database and the {@link ExecutionDetail} domain object. - * - * @param executionId unique execution identifier - * @param routeId Camel route ID - * @param agentId agent instance - * @param status execution status - * @param startTime execution start time - * @param endTime execution end time - * @param durationMs execution duration in milliseconds - * @param correlationId correlation ID - * @param exchangeId Camel exchange ID - * @param errorMessage execution-level error message - * @param errorStackTrace execution-level error stack trace - * @param diagramContentHash content hash for diagram linking - * @param processorIds processor IDs (parallel array) - * @param processorTypes processor types (parallel array) - * @param processorStatuses processor statuses (parallel array) - * @param processorStarts processor start times (parallel array) - * @param processorEnds processor end times (parallel array) - * @param processorDurations processor durations in ms (parallel array) - * @param processorDiagramNodeIds processor diagram node IDs (parallel array) - * @param processorErrorMessages processor error messages (parallel array) - * @param processorErrorStacktraces processor error stack traces (parallel array) - * @param processorDepths processor tree depths (parallel array) - * @param processorParentIndexes processor parent indexes, -1 for roots (parallel array) - */ -public record RawExecutionRow( - String executionId, - String routeId, - String agentId, - String status, - Instant startTime, - Instant endTime, - long durationMs, - String correlationId, - String exchangeId, - String errorMessage, - String errorStackTrace, - String diagramContentHash, - String[] processorIds, - String[] processorTypes, - String[] processorStatuses, - Instant[] processorStarts, - Instant[] processorEnds, - long[] processorDurations, - String[] processorDiagramNodeIds, - String[] processorErrorMessages, - String[] processorErrorStacktraces, - int[] processorDepths, - int[] processorParentIndexes -) { -} diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/ingestion/IngestionService.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/ingestion/IngestionService.java index c5e17e6f..36419fb8 100644 --- a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/ingestion/IngestionService.java +++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/ingestion/IngestionService.java @@ -70,12 +70,12 @@ public class IngestionService { private ExecutionRecord toExecutionRecord(String agentId, String groupName, RouteExecution exec) { return new ExecutionRecord( - exec.getExecutionId(), exec.getRouteId(), agentId, groupName, + exec.getExchangeId(), exec.getRouteId(), agentId, groupName, exec.getStatus() != null ? exec.getStatus().name() : "RUNNING", exec.getCorrelationId(), exec.getExchangeId(), exec.getStartTime(), exec.getEndTime(), exec.getDurationMs(), - exec.getErrorMessage(), exec.getErrorStacktrace(), + exec.getErrorMessage(), exec.getErrorStackTrace(), null // diagramContentHash set separately ); } @@ -94,7 +94,7 @@ public class IngestionService { p.getStartTime() != null ? p.getStartTime() : execStartTime, p.getEndTime(), p.getDurationMs(), - p.getErrorMessage(), p.getErrorStacktrace(), + p.getErrorMessage(), p.getErrorStackTrace(), truncateBody(p.getInputBody()), truncateBody(p.getOutputBody()), p.getInputHeaders() != null ? p.getInputHeaders().toString() : null, p.getOutputHeaders() != null ? p.getOutputHeaders().toString() : null diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/ingestion/WriteBuffer.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/ingestion/WriteBuffer.java index 267de43c..bcd1077c 100644 --- a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/ingestion/WriteBuffer.java +++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/ingestion/WriteBuffer.java @@ -6,7 +6,7 @@ import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; /** - * Bounded write buffer that decouples HTTP ingestion from ClickHouse batch inserts. + * Bounded write buffer that decouples HTTP ingestion from database batch inserts. *

* Items are offered to the buffer by controllers and drained in batches by a * scheduled flush task. When the buffer is full, {@link #offer} returns false, diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/search/SearchEngine.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/search/SearchEngine.java deleted file mode 100644 index 44955c18..00000000 --- a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/search/SearchEngine.java +++ /dev/null @@ -1,72 +0,0 @@ -package com.cameleer3.server.core.search; - -import java.util.List; - -/** - * Swappable search backend abstraction. - *

- * The current implementation uses ClickHouse for search. This interface allows - * replacing the search backend (e.g., with OpenSearch) without changing the - * service layer or controllers. - */ -public interface SearchEngine { - - /** - * Search for route executions matching the given criteria. - * - * @param request search filters and pagination - * @return paginated search results with total count - */ - SearchResult search(SearchRequest request); - - /** - * Count route executions matching the given criteria (without fetching data). - * - * @param request search filters - * @return total number of matching executions - */ - long count(SearchRequest request); - - /** - * Compute aggregate stats: P99 latency and count of currently running executions. - * - * @param from start of the time window - * @param to end of the time window - * @return execution stats - */ - ExecutionStats stats(java.time.Instant from, java.time.Instant to); - - /** - * Compute aggregate stats scoped to specific routes and agents. - * - * @param from start of the time window - * @param to end of the time window - * @param routeId optional route ID filter - * @param agentIds optional agent ID filter (from group resolution) - * @return execution stats - */ - ExecutionStats stats(java.time.Instant from, java.time.Instant to, String routeId, List agentIds); - - /** - * Compute bucketed time-series stats over a time window. - * - * @param from start of the time window - * @param to end of the time window - * @param bucketCount number of buckets to divide the window into - * @return bucketed stats - */ - StatsTimeseries timeseries(java.time.Instant from, java.time.Instant to, int bucketCount); - - /** - * Compute bucketed time-series stats scoped to specific routes and agents. - * - * @param from start of the time window - * @param to end of the time window - * @param bucketCount number of buckets to divide the window into - * @param routeId optional route ID filter - * @param agentIds optional agent ID filter (from group resolution) - * @return bucketed stats - */ - StatsTimeseries timeseries(java.time.Instant from, java.time.Instant to, int bucketCount, - String routeId, List agentIds); -} diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/search/SearchRequest.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/search/SearchRequest.java index ab97c31e..17ff44c9 100644 --- a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/search/SearchRequest.java +++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/search/SearchRequest.java @@ -75,7 +75,7 @@ public record SearchRequest( if (!"asc".equalsIgnoreCase(sortDir)) sortDir = "desc"; } - /** Returns the validated ClickHouse column name for ORDER BY. */ + /** Returns the validated database column name for ORDER BY. */ public String sortColumn() { return SORT_FIELD_TO_COLUMN.getOrDefault(sortField, "start_time"); } diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/DiagramRepository.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/DiagramRepository.java deleted file mode 100644 index 3a2c4bd6..00000000 --- a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/DiagramRepository.java +++ /dev/null @@ -1,35 +0,0 @@ -package com.cameleer3.server.core.storage; - -import com.cameleer3.common.graph.RouteGraph; -import com.cameleer3.server.core.ingestion.TaggedDiagram; - -import java.util.List; -import java.util.Optional; - -/** - * Repository for route diagram storage with content-hash deduplication. - */ -public interface DiagramRepository { - - /** - * Store a tagged route graph. Uses content-hash deduplication via ReplacingMergeTree. - */ - void store(TaggedDiagram diagram); - - /** - * Find a route graph by its content hash. - */ - Optional findByContentHash(String contentHash); - - /** - * Find the content hash for the latest diagram of a given route and agent. - */ - Optional findContentHashForRoute(String routeId, String agentId); - - /** - * Find the content hash for the latest diagram of a route across any agent in the given list. - * All instances of the same application produce the same route graph, so any agent's - * diagram for the same route will have the same content hash. - */ - Optional findContentHashForRouteByAgents(String routeId, List agentIds); -} diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/ExecutionRepository.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/ExecutionRepository.java deleted file mode 100644 index c58c1f81..00000000 --- a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/ExecutionRepository.java +++ /dev/null @@ -1,28 +0,0 @@ -package com.cameleer3.server.core.storage; - -import com.cameleer3.server.core.detail.RawExecutionRow; -import com.cameleer3.server.core.ingestion.TaggedExecution; - -import java.util.List; -import java.util.Optional; - -/** - * Repository for route execution storage and retrieval. - */ -public interface ExecutionRepository { - - /** - * Insert a batch of tagged route executions. - * Implementations must perform a single batch insert for efficiency. - */ - void insertBatch(List executions); - - /** - * Find a raw execution row by execution ID, including all parallel arrays - * needed for processor tree reconstruction. - * - * @param executionId the execution ID to look up - * @return the raw execution row, or empty if not found - */ - Optional findRawById(String executionId); -} diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/MetricsRepository.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/MetricsRepository.java deleted file mode 100644 index ad15ef0a..00000000 --- a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/MetricsRepository.java +++ /dev/null @@ -1,17 +0,0 @@ -package com.cameleer3.server.core.storage; - -import com.cameleer3.server.core.storage.model.MetricsSnapshot; - -import java.util.List; - -/** - * Repository for agent metrics batch inserts into ClickHouse. - */ -public interface MetricsRepository { - - /** - * Insert a batch of metrics snapshots. - * Implementations must perform a single batch insert for efficiency. - */ - void insertBatch(List metrics); -} diff --git a/cameleer3-server-core/src/test/java/com/cameleer3/server/core/detail/TreeReconstructionTest.java b/cameleer3-server-core/src/test/java/com/cameleer3/server/core/detail/TreeReconstructionTest.java index a6b4251a..89311bfe 100644 --- a/cameleer3-server-core/src/test/java/com/cameleer3/server/core/detail/TreeReconstructionTest.java +++ b/cameleer3-server-core/src/test/java/com/cameleer3/server/core/detail/TreeReconstructionTest.java @@ -1,6 +1,7 @@ package com.cameleer3.server.core.detail; -import com.cameleer3.server.core.storage.ExecutionRepository; +import com.cameleer3.server.core.storage.ExecutionStore; +import com.cameleer3.server.core.storage.ExecutionStore.ProcessorRecord; import org.junit.jupiter.api.Test; import java.time.Instant; @@ -10,33 +11,36 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Mockito.mock; /** - * Unit tests for {@link DetailService#reconstructTree} logic. + * Unit tests for {@link DetailService#buildTree} logic. *

- * Verifies correct parent-child wiring from flat parallel arrays. + * Verifies correct parent-child wiring from flat ProcessorRecord lists. */ class TreeReconstructionTest { - private final DetailService detailService = new DetailService(mock(ExecutionRepository.class)); + private final DetailService detailService = new DetailService(mock(ExecutionStore.class)); private static final Instant NOW = Instant.parse("2026-03-10T10:00:00Z"); + private ProcessorRecord proc(String id, String type, String status, + int depth, String parentId) { + return new ProcessorRecord( + "exec-1", id, type, "node-" + id, + "default", "route1", depth, parentId, + status, NOW, NOW, 10L, + null, null, null, null, null, null + ); + } + @Test void linearChain_rootChildGrandchild() { - // [root, child, grandchild], depths=[0,1,2], parents=[-1,0,1] - List roots = detailService.reconstructTree( - new String[]{"root", "child", "grandchild"}, - new String[]{"log", "bean", "to"}, - new String[]{"COMPLETED", "COMPLETED", "COMPLETED"}, - new Instant[]{NOW, NOW, NOW}, - new Instant[]{NOW, NOW, NOW}, - new long[]{10, 20, 30}, - new String[]{"n1", "n2", "n3"}, - new String[]{"", "", ""}, - new String[]{"", "", ""}, - new int[]{0, 1, 2}, - new int[]{-1, 0, 1} + List processors = List.of( + proc("root", "log", "COMPLETED", 0, null), + proc("child", "bean", "COMPLETED", 1, "root"), + proc("grandchild", "to", "COMPLETED", 2, "child") ); + List roots = detailService.buildTree(processors); + assertThat(roots).hasSize(1); ProcessorNode root = roots.get(0); assertThat(root.getProcessorId()).isEqualTo("root"); @@ -53,21 +57,14 @@ class TreeReconstructionTest { @Test void multipleRoots_noNesting() { - // [A, B, C], depths=[0,0,0], parents=[-1,-1,-1] - List roots = detailService.reconstructTree( - new String[]{"A", "B", "C"}, - new String[]{"log", "log", "log"}, - new String[]{"COMPLETED", "COMPLETED", "COMPLETED"}, - new Instant[]{NOW, NOW, NOW}, - new Instant[]{NOW, NOW, NOW}, - new long[]{10, 20, 30}, - new String[]{"n1", "n2", "n3"}, - new String[]{"", "", ""}, - new String[]{"", "", ""}, - new int[]{0, 0, 0}, - new int[]{-1, -1, -1} + List processors = List.of( + proc("A", "log", "COMPLETED", 0, null), + proc("B", "log", "COMPLETED", 0, null), + proc("C", "log", "COMPLETED", 0, null) ); + List roots = detailService.buildTree(processors); + assertThat(roots).hasSize(3); assertThat(roots.get(0).getProcessorId()).isEqualTo("A"); assertThat(roots.get(1).getProcessorId()).isEqualTo("B"); @@ -77,21 +74,15 @@ class TreeReconstructionTest { @Test void branchingTree_parentWithTwoChildren_secondChildHasGrandchild() { - // [parent, child1, child2, grandchild], depths=[0,1,1,2], parents=[-1,0,0,2] - List roots = detailService.reconstructTree( - new String[]{"parent", "child1", "child2", "grandchild"}, - new String[]{"split", "log", "bean", "to"}, - new String[]{"COMPLETED", "COMPLETED", "COMPLETED", "COMPLETED"}, - new Instant[]{NOW, NOW, NOW, NOW}, - new Instant[]{NOW, NOW, NOW, NOW}, - new long[]{100, 20, 30, 5}, - new String[]{"n1", "n2", "n3", "n4"}, - new String[]{"", "", "", ""}, - new String[]{"", "", "", ""}, - new int[]{0, 1, 1, 2}, - new int[]{-1, 0, 0, 2} + List processors = List.of( + proc("parent", "split", "COMPLETED", 0, null), + proc("child1", "log", "COMPLETED", 1, "parent"), + proc("child2", "bean", "COMPLETED", 1, "parent"), + proc("grandchild", "to", "COMPLETED", 2, "child2") ); + List roots = detailService.buildTree(processors); + assertThat(roots).hasSize(1); ProcessorNode parent = roots.get(0); assertThat(parent.getProcessorId()).isEqualTo("parent"); @@ -111,30 +102,8 @@ class TreeReconstructionTest { } @Test - void emptyArrays_producesEmptyList() { - List roots = detailService.reconstructTree( - new String[]{}, - new String[]{}, - new String[]{}, - new Instant[]{}, - new Instant[]{}, - new long[]{}, - new String[]{}, - new String[]{}, - new String[]{}, - new int[]{}, - new int[]{} - ); - - assertThat(roots).isEmpty(); - } - - @Test - void nullArrays_producesEmptyList() { - List roots = detailService.reconstructTree( - null, null, null, null, null, null, null, null, null, null, null - ); - + void emptyList_producesEmptyRoots() { + List roots = detailService.buildTree(List.of()); assertThat(roots).isEmpty(); } } diff --git a/clickhouse/init/01-schema.sql b/clickhouse/init/01-schema.sql deleted file mode 100644 index ab56da70..00000000 --- a/clickhouse/init/01-schema.sql +++ /dev/null @@ -1,57 +0,0 @@ --- Cameleer3 ClickHouse Schema --- Tables for route executions, route diagrams, and agent metrics. - -CREATE TABLE IF NOT EXISTS route_executions ( - execution_id String, - route_id LowCardinality(String), - agent_id LowCardinality(String), - status LowCardinality(String), - start_time DateTime64(3, 'UTC'), - end_time Nullable(DateTime64(3, 'UTC')), - duration_ms UInt64, - correlation_id String, - exchange_id String, - error_message String DEFAULT '', - error_stacktrace String DEFAULT '', - -- Nested processor executions stored as parallel arrays - processor_ids Array(String), - processor_types Array(LowCardinality(String)), - processor_starts Array(DateTime64(3, 'UTC')), - processor_ends Array(DateTime64(3, 'UTC')), - processor_durations Array(UInt64), - processor_statuses Array(LowCardinality(String)), - -- Metadata - server_received_at DateTime64(3, 'UTC') DEFAULT now64(3, 'UTC'), - -- Skip indexes - INDEX idx_correlation correlation_id TYPE bloom_filter GRANULARITY 4, - INDEX idx_error error_message TYPE tokenbf_v1(32768, 3, 0) GRANULARITY 4 -) -ENGINE = MergeTree() -PARTITION BY toYYYYMMDD(start_time) -ORDER BY (agent_id, status, start_time, execution_id) -TTL toDateTime(start_time) + toIntervalDay(30) -SETTINGS ttl_only_drop_parts = 1; - -CREATE TABLE IF NOT EXISTS route_diagrams ( - content_hash String, - route_id LowCardinality(String), - agent_id LowCardinality(String), - definition String, - created_at DateTime64(3, 'UTC') DEFAULT now64(3, 'UTC') -) -ENGINE = ReplacingMergeTree(created_at) -ORDER BY (content_hash); - -CREATE TABLE IF NOT EXISTS agent_metrics ( - agent_id LowCardinality(String), - collected_at DateTime64(3, 'UTC'), - metric_name LowCardinality(String), - metric_value Float64, - tags Map(String, String), - server_received_at DateTime64(3, 'UTC') DEFAULT now64(3, 'UTC') -) -ENGINE = MergeTree() -PARTITION BY toYYYYMMDD(collected_at) -ORDER BY (agent_id, metric_name, collected_at) -TTL toDateTime(collected_at) + toIntervalDay(30) -SETTINGS ttl_only_drop_parts = 1; diff --git a/clickhouse/init/02-search-columns.sql b/clickhouse/init/02-search-columns.sql deleted file mode 100644 index 2b11b435..00000000 --- a/clickhouse/init/02-search-columns.sql +++ /dev/null @@ -1,25 +0,0 @@ --- Phase 2: Schema extension for search, detail, and diagram linking columns. --- Adds exchange snapshot data, processor tree metadata, and diagram content hash. - -ALTER TABLE route_executions - ADD COLUMN IF NOT EXISTS exchange_bodies String DEFAULT '', - ADD COLUMN IF NOT EXISTS exchange_headers String DEFAULT '', - ADD COLUMN IF NOT EXISTS processor_depths Array(UInt16) DEFAULT [], - ADD COLUMN IF NOT EXISTS processor_parent_indexes Array(Int32) DEFAULT [], - ADD COLUMN IF NOT EXISTS processor_error_messages Array(String) DEFAULT [], - ADD COLUMN IF NOT EXISTS processor_error_stacktraces Array(String) DEFAULT [], - ADD COLUMN IF NOT EXISTS processor_input_bodies Array(String) DEFAULT [], - ADD COLUMN IF NOT EXISTS processor_output_bodies Array(String) DEFAULT [], - ADD COLUMN IF NOT EXISTS processor_input_headers Array(String) DEFAULT [], - ADD COLUMN IF NOT EXISTS processor_output_headers Array(String) DEFAULT [], - ADD COLUMN IF NOT EXISTS processor_diagram_node_ids Array(String) DEFAULT [], - ADD COLUMN IF NOT EXISTS diagram_content_hash String DEFAULT ''; - --- Skip indexes for full-text search on new text columns -ALTER TABLE route_executions - ADD INDEX IF NOT EXISTS idx_exchange_bodies exchange_bodies TYPE tokenbf_v1(32768, 3, 0) GRANULARITY 4, - ADD INDEX IF NOT EXISTS idx_exchange_headers exchange_headers TYPE tokenbf_v1(32768, 3, 0) GRANULARITY 4; - --- Skip index on error_stacktrace (not indexed in 01-schema.sql, needed for SRCH-05) -ALTER TABLE route_executions - ADD INDEX IF NOT EXISTS idx_error_stacktrace error_stacktrace TYPE tokenbf_v1(32768, 3, 0) GRANULARITY 4; diff --git a/clickhouse/init/03-users.sql b/clickhouse/init/03-users.sql deleted file mode 100644 index 9dc7ce7a..00000000 --- a/clickhouse/init/03-users.sql +++ /dev/null @@ -1,10 +0,0 @@ -CREATE TABLE IF NOT EXISTS users ( - user_id String, - provider LowCardinality(String), - email String DEFAULT '', - display_name String DEFAULT '', - roles Array(LowCardinality(String)), - created_at DateTime64(3, 'UTC') DEFAULT now64(3, 'UTC'), - updated_at DateTime64(3, 'UTC') DEFAULT now64(3, 'UTC') -) ENGINE = ReplacingMergeTree(updated_at) -ORDER BY (user_id); diff --git a/clickhouse/init/04-oidc-config.sql b/clickhouse/init/04-oidc-config.sql deleted file mode 100644 index 35b4d896..00000000 --- a/clickhouse/init/04-oidc-config.sql +++ /dev/null @@ -1,13 +0,0 @@ -CREATE TABLE IF NOT EXISTS oidc_config ( - config_id String DEFAULT 'default', - enabled Bool DEFAULT false, - issuer_uri String DEFAULT '', - client_id String DEFAULT '', - client_secret String DEFAULT '', - roles_claim String DEFAULT 'realm_access.roles', - default_roles Array(LowCardinality(String)), - auto_signup Bool DEFAULT true, - display_name_claim String DEFAULT 'name', - updated_at DateTime64(3, 'UTC') DEFAULT now64(3, 'UTC') -) ENGINE = ReplacingMergeTree(updated_at) -ORDER BY (config_id); diff --git a/clickhouse/init/05-oidc-auto-signup.sql b/clickhouse/init/05-oidc-auto-signup.sql deleted file mode 100644 index 643a69ea..00000000 --- a/clickhouse/init/05-oidc-auto-signup.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE oidc_config ADD COLUMN IF NOT EXISTS auto_signup Bool DEFAULT true; diff --git a/clickhouse/init/06-oidc-display-name-claim.sql b/clickhouse/init/06-oidc-display-name-claim.sql deleted file mode 100644 index ef1870bd..00000000 --- a/clickhouse/init/06-oidc-display-name-claim.sql +++ /dev/null @@ -1 +0,0 @@ -ALTER TABLE oidc_config ADD COLUMN IF NOT EXISTS display_name_claim String DEFAULT 'name'; diff --git a/pom.xml b/pom.xml index bca775b0..2f27d0fd 100644 --- a/pom.xml +++ b/pom.xml @@ -44,6 +44,13 @@ cameleer3-server-core ${project.version} + + org.testcontainers + testcontainers-bom + ${testcontainers.version} + pom + import + From a344be3a49b6b2f45d51e02cd5819c7ccb1ca9cf Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Mon, 16 Mar 2026 18:58:35 +0100 Subject: [PATCH 17/32] deploy: replace ClickHouse with PostgreSQL/TimescaleDB + OpenSearch in K8s manifests - Dockerfile: update default SPRING_DATASOURCE_URL to jdbc:postgresql, add OPENSEARCH_URL default env - deploy/postgres.yaml: new TimescaleDB StatefulSet + headless Service (10Gi PVC, pg_isready probes) - deploy/opensearch.yaml: new OpenSearch 2.19.0 StatefulSet + headless Service (10Gi PVC, single-node, security disabled) - deploy/server.yaml: switch datasource env from clickhouse-credentials to postgres-credentials, add OPENSEARCH_URL Co-Authored-By: Claude Sonnet 4.6 --- Dockerfile | 3 +- deploy/opensearch.yaml | 84 ++++++++++++++++++++++++++++++++++++++ deploy/postgres.yaml | 91 ++++++++++++++++++++++++++++++++++++++++++ deploy/server.yaml | 12 +++--- 4 files changed, 184 insertions(+), 6 deletions(-) create mode 100644 deploy/opensearch.yaml create mode 100644 deploy/postgres.yaml diff --git a/Dockerfile b/Dockerfile index bd3a32b9..989e1688 100644 --- a/Dockerfile +++ b/Dockerfile @@ -18,9 +18,10 @@ FROM eclipse-temurin:17-jre WORKDIR /app COPY --from=build /build/cameleer3-server-app/target/cameleer3-server-app-*.jar /app/server.jar -ENV SPRING_DATASOURCE_URL=jdbc:ch://clickhouse:8123/cameleer3 +ENV SPRING_DATASOURCE_URL=jdbc:postgresql://postgres:5432/cameleer3 ENV SPRING_DATASOURCE_USERNAME=cameleer ENV SPRING_DATASOURCE_PASSWORD=cameleer_dev +ENV OPENSEARCH_URL=http://opensearch:9200 EXPOSE 8081 ENTRYPOINT exec java -jar /app/server.jar diff --git a/deploy/opensearch.yaml b/deploy/opensearch.yaml new file mode 100644 index 00000000..b2352ab2 --- /dev/null +++ b/deploy/opensearch.yaml @@ -0,0 +1,84 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: opensearch + namespace: cameleer +spec: + serviceName: opensearch + replicas: 1 + selector: + matchLabels: + app: opensearch + template: + metadata: + labels: + app: opensearch + spec: + containers: + - name: opensearch + image: opensearchproject/opensearch:2.19.0 + ports: + - containerPort: 9200 + name: http + - containerPort: 9300 + name: transport + env: + - name: discovery.type + value: single-node + - name: DISABLE_SECURITY_PLUGIN + value: "true" + volumeMounts: + - name: data + mountPath: /usr/share/opensearch/data + resources: + requests: + memory: "1Gi" + cpu: "200m" + limits: + memory: "4Gi" + cpu: "1000m" + livenessProbe: + exec: + command: + - sh + - -c + - curl -s http://localhost:9200/_cluster/health + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + readinessProbe: + exec: + command: + - sh + - -c + - curl -s http://localhost:9200/_cluster/health + initialDelaySeconds: 15 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 10Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: opensearch + namespace: cameleer +spec: + clusterIP: None + selector: + app: opensearch + ports: + - port: 9200 + targetPort: 9200 + name: http + - port: 9300 + targetPort: 9300 + name: transport diff --git a/deploy/postgres.yaml b/deploy/postgres.yaml new file mode 100644 index 00000000..f0b6de9b --- /dev/null +++ b/deploy/postgres.yaml @@ -0,0 +1,91 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: postgres + namespace: cameleer +spec: + serviceName: postgres + replicas: 1 + selector: + matchLabels: + app: postgres + template: + metadata: + labels: + app: postgres + spec: + containers: + - name: postgres + image: timescale/timescaledb:latest-pg16 + ports: + - containerPort: 5432 + name: postgres + env: + - name: POSTGRES_DB + value: cameleer3 + - name: POSTGRES_USER + valueFrom: + secretKeyRef: + name: postgres-credentials + key: POSTGRES_USER + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: postgres-credentials + key: POSTGRES_PASSWORD + volumeMounts: + - name: data + mountPath: /var/lib/postgresql/data + resources: + requests: + memory: "1Gi" + cpu: "200m" + limits: + memory: "4Gi" + cpu: "1000m" + livenessProbe: + exec: + command: + - pg_isready + - -U + - cameleer + - -d + - cameleer3 + initialDelaySeconds: 15 + periodSeconds: 10 + timeoutSeconds: 3 + failureThreshold: 3 + readinessProbe: + exec: + command: + - pg_isready + - -U + - cameleer + - -d + - cameleer3 + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 3 + failureThreshold: 3 + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 10Gi +--- +apiVersion: v1 +kind: Service +metadata: + name: postgres + namespace: cameleer +spec: + clusterIP: None + selector: + app: postgres + ports: + - port: 5432 + targetPort: 5432 + name: postgres diff --git a/deploy/server.yaml b/deploy/server.yaml index 34d3e6ae..42c92d76 100644 --- a/deploy/server.yaml +++ b/deploy/server.yaml @@ -22,17 +22,19 @@ spec: - containerPort: 8081 env: - name: SPRING_DATASOURCE_URL - value: "jdbc:ch://clickhouse:8123/cameleer3" + value: "jdbc:postgresql://postgres:5432/cameleer3" - name: SPRING_DATASOURCE_USERNAME valueFrom: secretKeyRef: - name: clickhouse-credentials - key: CLICKHOUSE_USER + name: postgres-credentials + key: POSTGRES_USER - name: SPRING_DATASOURCE_PASSWORD valueFrom: secretKeyRef: - name: clickhouse-credentials - key: CLICKHOUSE_PASSWORD + name: postgres-credentials + key: POSTGRES_PASSWORD + - name: OPENSEARCH_URL + value: "http://opensearch:9200" - name: CAMELEER_AUTH_TOKEN valueFrom: secretKeyRef: From cea16b38ed86284ca74f4bf802cc9dd234138d1c Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Mon, 16 Mar 2026 19:00:20 +0100 Subject: [PATCH 18/32] ci: update workflow for PostgreSQL + OpenSearch deployment Replace ClickHouse credentials secret with postgres-credentials and opensearch-credentials secrets. Update deploy step to apply postgres.yaml and opensearch.yaml manifests instead of clickhouse.yaml, with appropriate rollout status checks for each StatefulSet. Co-Authored-By: Claude Sonnet 4.6 --- .gitea/workflows/ci.yml | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/.gitea/workflows/ci.yml b/.gitea/workflows/ci.yml index 00cc6508..e04fc6c8 100644 --- a/.gitea/workflows/ci.yml +++ b/.gitea/workflows/ci.yml @@ -161,10 +161,17 @@ jobs: --from-literal=CAMELEER_JWT_SECRET="${CAMELEER_JWT_SECRET}" \ --dry-run=client -o yaml | kubectl apply -f - - kubectl create secret generic clickhouse-credentials \ + kubectl create secret generic postgres-credentials \ --namespace=cameleer \ - --from-literal=CLICKHOUSE_USER="$CLICKHOUSE_USER" \ - --from-literal=CLICKHOUSE_PASSWORD="$CLICKHOUSE_PASSWORD" \ + --from-literal=POSTGRES_USER="$POSTGRES_USER" \ + --from-literal=POSTGRES_PASSWORD="$POSTGRES_PASSWORD" \ + --from-literal=POSTGRES_DB="${POSTGRES_DB:-cameleer}" \ + --dry-run=client -o yaml | kubectl apply -f - + + kubectl create secret generic opensearch-credentials \ + --namespace=cameleer \ + --from-literal=OPENSEARCH_USER="${OPENSEARCH_USER:-admin}" \ + --from-literal=OPENSEARCH_PASSWORD="$OPENSEARCH_PASSWORD" \ --dry-run=client -o yaml | kubectl apply -f - kubectl create secret generic authentik-credentials \ @@ -182,8 +189,11 @@ jobs: --from-literal=CAMELEER_OIDC_CLIENT_SECRET="${CAMELEER_OIDC_CLIENT_SECRET}" \ --dry-run=client -o yaml | kubectl apply -f - - kubectl apply -f deploy/clickhouse.yaml - kubectl -n cameleer rollout status statefulset/clickhouse --timeout=120s + kubectl apply -f deploy/postgres.yaml + kubectl -n cameleer rollout status statefulset/postgres --timeout=120s + + kubectl apply -f deploy/opensearch.yaml + kubectl -n cameleer rollout status statefulset/opensearch --timeout=180s kubectl apply -f deploy/authentik.yaml kubectl -n cameleer rollout status deployment/authentik-server --timeout=180s @@ -203,8 +213,11 @@ jobs: CAMELEER_JWT_SECRET: ${{ secrets.CAMELEER_JWT_SECRET }} CAMELEER_UI_USER: ${{ secrets.CAMELEER_UI_USER }} CAMELEER_UI_PASSWORD: ${{ secrets.CAMELEER_UI_PASSWORD }} - CLICKHOUSE_USER: ${{ secrets.CLICKHOUSE_USER }} - CLICKHOUSE_PASSWORD: ${{ secrets.CLICKHOUSE_PASSWORD }} + POSTGRES_USER: ${{ secrets.POSTGRES_USER }} + POSTGRES_PASSWORD: ${{ secrets.POSTGRES_PASSWORD }} + POSTGRES_DB: ${{ secrets.POSTGRES_DB }} + OPENSEARCH_USER: ${{ secrets.OPENSEARCH_USER }} + OPENSEARCH_PASSWORD: ${{ secrets.OPENSEARCH_PASSWORD }} AUTHENTIK_PG_USER: ${{ secrets.AUTHENTIK_PG_USER }} AUTHENTIK_PG_PASSWORD: ${{ secrets.AUTHENTIK_PG_PASSWORD }} AUTHENTIK_SECRET_KEY: ${{ secrets.AUTHENTIK_SECRET_KEY }} From ea687a342c8cda9679c75dff52e88d42853014f1 Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Mon, 16 Mar 2026 19:01:26 +0100 Subject: [PATCH 19/32] deploy: remove obsolete ClickHouse K8s manifest --- deploy/clickhouse.yaml | 107 ----------------------------------------- 1 file changed, 107 deletions(-) delete mode 100644 deploy/clickhouse.yaml diff --git a/deploy/clickhouse.yaml b/deploy/clickhouse.yaml deleted file mode 100644 index 2fe911d1..00000000 --- a/deploy/clickhouse.yaml +++ /dev/null @@ -1,107 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: clickhouse - namespace: cameleer -spec: - serviceName: clickhouse - replicas: 1 - selector: - matchLabels: - app: clickhouse - template: - metadata: - labels: - app: clickhouse - spec: - containers: - - name: clickhouse - image: clickhouse/clickhouse-server:25.3 - ports: - - containerPort: 8123 - name: http - - containerPort: 9000 - name: native - env: - - name: CLICKHOUSE_USER - valueFrom: - secretKeyRef: - name: clickhouse-credentials - key: CLICKHOUSE_USER - - name: CLICKHOUSE_PASSWORD - valueFrom: - secretKeyRef: - name: clickhouse-credentials - key: CLICKHOUSE_PASSWORD - - name: CLICKHOUSE_DB - value: cameleer3 - volumeMounts: - - name: data - mountPath: /var/lib/clickhouse - resources: - requests: - memory: "1Gi" - cpu: "200m" - limits: - memory: "4Gi" - cpu: "1000m" - livenessProbe: - httpGet: - path: /ping - port: 8123 - initialDelaySeconds: 15 - periodSeconds: 10 - timeoutSeconds: 3 - failureThreshold: 3 - readinessProbe: - httpGet: - path: /ping - port: 8123 - initialDelaySeconds: 5 - periodSeconds: 5 - timeoutSeconds: 3 - failureThreshold: 3 - volumeClaimTemplates: - - metadata: - name: data - spec: - accessModes: ["ReadWriteOnce"] - resources: - requests: - storage: 2Gi ---- -apiVersion: v1 -kind: Service -metadata: - name: clickhouse - namespace: cameleer -spec: - clusterIP: None - selector: - app: clickhouse - ports: - - port: 8123 - targetPort: 8123 - name: http - - port: 9000 - targetPort: 9000 - name: native ---- -apiVersion: v1 -kind: Service -metadata: - name: clickhouse-external - namespace: cameleer -spec: - type: NodePort - selector: - app: clickhouse - ports: - - port: 8123 - targetPort: 8123 - nodePort: 30123 - name: http - - port: 9000 - targetPort: 9000 - nodePort: 30900 - name: native From 41e20381902beb18c7ffc3c0826e367b7cbcf74f Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Mon, 16 Mar 2026 19:04:42 +0100 Subject: [PATCH 20/32] fix: use ChronoUnit for Instant arithmetic in PostgresStatsStoreIT --- .../com/cameleer3/server/app/storage/PostgresStatsStoreIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/PostgresStatsStoreIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/PostgresStatsStoreIT.java index d3a1548f..efdbeea1 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/PostgresStatsStoreIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/PostgresStatsStoreIT.java @@ -46,7 +46,7 @@ class PostgresStatsStoreIT extends AbstractPostgresIT { jdbc.execute("CALL refresh_continuous_aggregate('stats_1m_all', null, null)"); - StatsTimeseries ts = statsStore.timeseries(now.minusMinutes(1), now.plusMinutes(10), 5); + StatsTimeseries ts = statsStore.timeseries(now.minus(1, ChronoUnit.MINUTES), now.plus(10, ChronoUnit.MINUTES), 5); assertNotNull(ts); assertFalse(ts.buckets().isEmpty()); } From 589da1b6d66074f8962534cfb51d4c3f559391bc Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Mon, 16 Mar 2026 19:06:54 +0100 Subject: [PATCH 21/32] fix: use asCompatibleSubstituteFor for TimescaleDB Testcontainer image --- .../java/com/cameleer3/server/app/AbstractPostgresIT.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractPostgresIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractPostgresIT.java index 490e20a9..ac033649 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractPostgresIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractPostgresIT.java @@ -8,14 +8,19 @@ import org.springframework.test.context.DynamicPropertySource; import org.testcontainers.containers.PostgreSQLContainer; import org.testcontainers.junit.jupiter.Container; import org.testcontainers.junit.jupiter.Testcontainers; +import org.testcontainers.utility.DockerImageName; @SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.RANDOM_PORT) @Testcontainers public abstract class AbstractPostgresIT { + private static final DockerImageName TIMESCALEDB_IMAGE = + DockerImageName.parse("timescale/timescaledb:latest-pg16") + .asCompatibleSubstituteFor("postgres"); + @Container static final PostgreSQLContainer postgres = - new PostgreSQLContainer<>("timescale/timescaledb:latest-pg16") + new PostgreSQLContainer<>(TIMESCALEDB_IMAGE) .withDatabaseName("cameleer3") .withUsername("cameleer") .withPassword("test"); From 3c0e615fb746fcc8aee676a59a2abc96f3a94186 Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Mon, 16 Mar 2026 19:13:47 +0100 Subject: [PATCH 22/32] fix: use timescaledb-ha image which includes toolkit extension --- .../test/java/com/cameleer3/server/app/AbstractPostgresIT.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractPostgresIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractPostgresIT.java index ac033649..05b90e88 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractPostgresIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractPostgresIT.java @@ -15,7 +15,7 @@ import org.testcontainers.utility.DockerImageName; public abstract class AbstractPostgresIT { private static final DockerImageName TIMESCALEDB_IMAGE = - DockerImageName.parse("timescale/timescaledb:latest-pg16") + DockerImageName.parse("timescale/timescaledb-ha:pg16") .asCompatibleSubstituteFor("postgres"); @Container From 2634f60e59b065edc9c76f41ca91069c1a3ffd65 Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Mon, 16 Mar 2026 19:14:15 +0100 Subject: [PATCH 23/32] fix: use timescaledb-ha image in K8s manifest for toolkit support --- deploy/postgres.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/postgres.yaml b/deploy/postgres.yaml index f0b6de9b..8a4f64e6 100644 --- a/deploy/postgres.yaml +++ b/deploy/postgres.yaml @@ -16,7 +16,7 @@ spec: spec: containers: - name: postgres - image: timescale/timescaledb:latest-pg16 + image: timescale/timescaledb-ha:pg16 ports: - containerPort: 5432 name: postgres From 0723f48e5b4e3abded7b2a00f57d7aeb72c16f6f Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Mon, 16 Mar 2026 19:24:12 +0100 Subject: [PATCH 24/32] fix: disable Flyway transaction for continuous aggregate migration --- .../main/resources/db/migration/V8__continuous_aggregates.sql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cameleer3-server-app/src/main/resources/db/migration/V8__continuous_aggregates.sql b/cameleer3-server-app/src/main/resources/db/migration/V8__continuous_aggregates.sql index 6eb5754e..1161bf3a 100644 --- a/cameleer3-server-app/src/main/resources/db/migration/V8__continuous_aggregates.sql +++ b/cameleer3-server-app/src/main/resources/db/migration/V8__continuous_aggregates.sql @@ -1,3 +1,5 @@ +-- flyway:executeInTransaction=false + -- Global stats CREATE MATERIALIZED VIEW stats_1m_all WITH (timescaledb.continuous) AS From af03ecdf42fbe30551e523f42ff41f067205ac37 Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Mon, 16 Mar 2026 19:32:54 +0100 Subject: [PATCH 25/32] fix: use WITH NO DATA for continuous aggregates to avoid transaction block error --- .../migration/V8__continuous_aggregates.sql | 22 ++++++++++--------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/cameleer3-server-app/src/main/resources/db/migration/V8__continuous_aggregates.sql b/cameleer3-server-app/src/main/resources/db/migration/V8__continuous_aggregates.sql index 1161bf3a..056ba07c 100644 --- a/cameleer3-server-app/src/main/resources/db/migration/V8__continuous_aggregates.sql +++ b/cameleer3-server-app/src/main/resources/db/migration/V8__continuous_aggregates.sql @@ -1,8 +1,6 @@ --- flyway:executeInTransaction=false - -- Global stats CREATE MATERIALIZED VIEW stats_1m_all -WITH (timescaledb.continuous) AS +WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS SELECT time_bucket('1 minute', start_time) AS bucket, COUNT(*) AS total_count, @@ -13,7 +11,8 @@ SELECT approx_percentile(0.99, percentile_agg(duration_ms::DOUBLE PRECISION)) AS p99_duration FROM executions WHERE status IS NOT NULL -GROUP BY bucket; +GROUP BY bucket +WITH NO DATA; SELECT add_continuous_aggregate_policy('stats_1m_all', start_offset => INTERVAL '1 hour', @@ -22,7 +21,7 @@ SELECT add_continuous_aggregate_policy('stats_1m_all', -- Per-application stats CREATE MATERIALIZED VIEW stats_1m_app -WITH (timescaledb.continuous) AS +WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS SELECT time_bucket('1 minute', start_time) AS bucket, group_name, @@ -34,7 +33,8 @@ SELECT approx_percentile(0.99, percentile_agg(duration_ms::DOUBLE PRECISION)) AS p99_duration FROM executions WHERE status IS NOT NULL -GROUP BY bucket, group_name; +GROUP BY bucket, group_name +WITH NO DATA; SELECT add_continuous_aggregate_policy('stats_1m_app', start_offset => INTERVAL '1 hour', @@ -43,7 +43,7 @@ SELECT add_continuous_aggregate_policy('stats_1m_app', -- Per-route stats CREATE MATERIALIZED VIEW stats_1m_route -WITH (timescaledb.continuous) AS +WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS SELECT time_bucket('1 minute', start_time) AS bucket, group_name, @@ -56,7 +56,8 @@ SELECT approx_percentile(0.99, percentile_agg(duration_ms::DOUBLE PRECISION)) AS p99_duration FROM executions WHERE status IS NOT NULL -GROUP BY bucket, group_name, route_id; +GROUP BY bucket, group_name, route_id +WITH NO DATA; SELECT add_continuous_aggregate_policy('stats_1m_route', start_offset => INTERVAL '1 hour', @@ -65,7 +66,7 @@ SELECT add_continuous_aggregate_policy('stats_1m_route', -- Per-processor stats (uses denormalized group_name/route_id on processor_executions) CREATE MATERIALIZED VIEW stats_1m_processor -WITH (timescaledb.continuous) AS +WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS SELECT time_bucket('1 minute', start_time) AS bucket, group_name, @@ -77,7 +78,8 @@ SELECT MAX(duration_ms) AS duration_max, approx_percentile(0.99, percentile_agg(duration_ms::DOUBLE PRECISION)) AS p99_duration FROM processor_executions -GROUP BY bucket, group_name, route_id, processor_type; +GROUP BY bucket, group_name, route_id, processor_type +WITH NO DATA; SELECT add_continuous_aggregate_policy('stats_1m_processor', start_offset => INTERVAL '1 hour', From 39f9925e71a23abf185b960cc1dd4bd48ba916fb Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Mon, 16 Mar 2026 19:41:05 +0100 Subject: [PATCH 26/32] fix: restore test config (bootstrap token, ingestion, agent-registry) and add @ActiveProfiles --- .../cameleer3/server/app/AbstractPostgresIT.java | 2 ++ .../src/test/resources/application-test.yml | 13 +++++++++++++ 2 files changed, 15 insertions(+) diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractPostgresIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractPostgresIT.java index 05b90e88..b912bc1b 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractPostgresIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractPostgresIT.java @@ -3,6 +3,7 @@ package com.cameleer3.server.app; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.test.context.SpringBootTest; import org.springframework.jdbc.core.JdbcTemplate; +import org.springframework.test.context.ActiveProfiles; import org.springframework.test.context.DynamicPropertyRegistry; import org.springframework.test.context.DynamicPropertySource; import org.testcontainers.containers.PostgreSQLContainer; @@ -11,6 +12,7 @@ import org.testcontainers.junit.jupiter.Testcontainers; import org.testcontainers.utility.DockerImageName; @SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.RANDOM_PORT) +@ActiveProfiles("test") @Testcontainers public abstract class AbstractPostgresIT { diff --git a/cameleer3-server-app/src/test/resources/application-test.yml b/cameleer3-server-app/src/test/resources/application-test.yml index b450bee4..f821e9ff 100644 --- a/cameleer3-server-app/src/test/resources/application-test.yml +++ b/cameleer3-server-app/src/test/resources/application-test.yml @@ -1,5 +1,18 @@ spring: flyway: enabled: true + opensearch: url: http://localhost:9200 + +ingestion: + buffer-capacity: 100 + batch-size: 10 + flush-interval-ms: 100 + +agent-registry: + ping-interval-ms: 1000 + +security: + bootstrap-token: test-bootstrap-token + bootstrap-token-previous: old-bootstrap-token From 9f74e47ecfec1c1f98c0d4cd78a0ad3ff1c16dc2 Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Mon, 16 Mar 2026 20:03:38 +0100 Subject: [PATCH 27/32] fix: use correct role-based JWT tokens in all integration tests Co-Authored-By: Claude Opus 4.6 (1M context) --- .../server/app/TestSecurityHelper.java | 32 +++++++++++++++++-- .../controller/AgentCommandControllerIT.java | 20 ++++++------ .../AgentRegistrationControllerIT.java | 8 +++-- .../app/controller/AgentSseControllerIT.java | 4 ++- .../app/controller/DetailControllerIT.java | 4 ++- .../controller/DiagramRenderControllerIT.java | 10 +++--- .../app/controller/SearchControllerIT.java | 4 ++- .../server/app/security/JwtRefreshIT.java | 4 +-- .../app/security/RegistrationSecurityIT.java | 4 +-- .../server/app/security/SecurityFilterIT.java | 4 ++- .../server/app/security/SseSigningIT.java | 10 ++++-- 11 files changed, 76 insertions(+), 28 deletions(-) diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/TestSecurityHelper.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/TestSecurityHelper.java index 9867cd7a..97df1b83 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/TestSecurityHelper.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/TestSecurityHelper.java @@ -27,11 +27,39 @@ public class TestSecurityHelper { } /** - * Registers a test agent and returns a valid JWT access token for it. + * Registers a test agent and returns a valid JWT access token with AGENT role. */ public String registerTestAgent(String agentId) { agentRegistryService.register(agentId, "test", "test-group", "1.0", List.of(), Map.of()); - return jwtService.createAccessToken(agentId, "test-group"); + return jwtService.createAccessToken(agentId, "test-group", List.of("AGENT")); + } + + /** + * Returns a valid JWT access token with the given roles (no agent registration). + */ + public String createToken(String subject, String group, List roles) { + return jwtService.createAccessToken(subject, group, roles); + } + + /** + * Returns a valid JWT access token with OPERATOR role. + */ + public String operatorToken() { + return jwtService.createAccessToken("test-operator", "user", List.of("OPERATOR")); + } + + /** + * Returns a valid JWT access token with ADMIN role. + */ + public String adminToken() { + return jwtService.createAccessToken("test-admin", "user", List.of("ADMIN")); + } + + /** + * Returns a valid JWT access token with VIEWER role. + */ + public String viewerToken() { + return jwtService.createAccessToken("test-viewer", "user", List.of("VIEWER")); } /** diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentCommandControllerIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentCommandControllerIT.java index 4ba36c5d..b6d791d7 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentCommandControllerIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentCommandControllerIT.java @@ -29,11 +29,13 @@ class AgentCommandControllerIT extends AbstractPostgresIT { @Autowired private TestSecurityHelper securityHelper; - private String jwt; + private String agentJwt; + private String operatorJwt; @BeforeEach void setUp() { - jwt = securityHelper.registerTestAgent("test-agent-command-it"); + agentJwt = securityHelper.registerTestAgent("test-agent-command-it"); + operatorJwt = securityHelper.operatorToken(); } private ResponseEntity registerAgent(String agentId, String name, String group) { @@ -65,7 +67,7 @@ class AgentCommandControllerIT extends AbstractPostgresIT { ResponseEntity response = restTemplate.postForEntity( "/api/v1/agents/" + agentId + "/commands", - new HttpEntity<>(commandJson, securityHelper.authHeaders(jwt)), + new HttpEntity<>(commandJson, securityHelper.authHeaders(operatorJwt)), String.class); assertThat(response.getStatusCode()).isEqualTo(HttpStatus.ACCEPTED); @@ -88,7 +90,7 @@ class AgentCommandControllerIT extends AbstractPostgresIT { ResponseEntity response = restTemplate.postForEntity( "/api/v1/agents/groups/" + group + "/commands", - new HttpEntity<>(commandJson, securityHelper.authHeaders(jwt)), + new HttpEntity<>(commandJson, securityHelper.authHeaders(operatorJwt)), String.class); assertThat(response.getStatusCode()).isEqualTo(HttpStatus.ACCEPTED); @@ -110,7 +112,7 @@ class AgentCommandControllerIT extends AbstractPostgresIT { ResponseEntity response = restTemplate.postForEntity( "/api/v1/agents/commands", - new HttpEntity<>(commandJson, securityHelper.authHeaders(jwt)), + new HttpEntity<>(commandJson, securityHelper.authHeaders(operatorJwt)), String.class); assertThat(response.getStatusCode()).isEqualTo(HttpStatus.ACCEPTED); @@ -131,7 +133,7 @@ class AgentCommandControllerIT extends AbstractPostgresIT { ResponseEntity cmdResponse = restTemplate.postForEntity( "/api/v1/agents/" + agentId + "/commands", - new HttpEntity<>(commandJson, securityHelper.authHeaders(jwt)), + new HttpEntity<>(commandJson, securityHelper.authHeaders(operatorJwt)), String.class); JsonNode cmdBody = objectMapper.readTree(cmdResponse.getBody()); @@ -140,7 +142,7 @@ class AgentCommandControllerIT extends AbstractPostgresIT { ResponseEntity ackResponse = restTemplate.exchange( "/api/v1/agents/" + agentId + "/commands/" + commandId + "/ack", HttpMethod.POST, - new HttpEntity<>(securityHelper.authHeadersNoBody(jwt)), + new HttpEntity<>(securityHelper.authHeadersNoBody(agentJwt)), Void.class); assertThat(ackResponse.getStatusCode()).isEqualTo(HttpStatus.OK); @@ -154,7 +156,7 @@ class AgentCommandControllerIT extends AbstractPostgresIT { ResponseEntity response = restTemplate.exchange( "/api/v1/agents/" + agentId + "/commands/nonexistent-cmd-id/ack", HttpMethod.POST, - new HttpEntity<>(securityHelper.authHeadersNoBody(jwt)), + new HttpEntity<>(securityHelper.authHeadersNoBody(agentJwt)), Void.class); assertThat(response.getStatusCode()).isEqualTo(HttpStatus.NOT_FOUND); @@ -168,7 +170,7 @@ class AgentCommandControllerIT extends AbstractPostgresIT { ResponseEntity response = restTemplate.postForEntity( "/api/v1/agents/nonexistent-agent-xyz/commands", - new HttpEntity<>(commandJson, securityHelper.authHeaders(jwt)), + new HttpEntity<>(commandJson, securityHelper.authHeaders(operatorJwt)), String.class); assertThat(response.getStatusCode()).isEqualTo(HttpStatus.NOT_FOUND); diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentRegistrationControllerIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentRegistrationControllerIT.java index 763646b9..12cbf02e 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentRegistrationControllerIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentRegistrationControllerIT.java @@ -28,10 +28,12 @@ class AgentRegistrationControllerIT extends AbstractPostgresIT { private TestSecurityHelper securityHelper; private String jwt; + private String viewerJwt; @BeforeEach void setUp() { jwt = securityHelper.registerTestAgent("test-agent-registration-it"); + viewerJwt = securityHelper.viewerToken(); } private ResponseEntity registerAgent(String agentId, String name) { @@ -114,7 +116,7 @@ class AgentRegistrationControllerIT extends AbstractPostgresIT { ResponseEntity response = restTemplate.exchange( "/api/v1/agents", HttpMethod.GET, - new HttpEntity<>(securityHelper.authHeadersNoBody(jwt)), + new HttpEntity<>(securityHelper.authHeadersNoBody(viewerJwt)), String.class); assertThat(response.getStatusCode()).isEqualTo(HttpStatus.OK); @@ -131,7 +133,7 @@ class AgentRegistrationControllerIT extends AbstractPostgresIT { ResponseEntity response = restTemplate.exchange( "/api/v1/agents?status=LIVE", HttpMethod.GET, - new HttpEntity<>(securityHelper.authHeadersNoBody(jwt)), + new HttpEntity<>(securityHelper.authHeadersNoBody(viewerJwt)), String.class); assertThat(response.getStatusCode()).isEqualTo(HttpStatus.OK); @@ -148,7 +150,7 @@ class AgentRegistrationControllerIT extends AbstractPostgresIT { ResponseEntity response = restTemplate.exchange( "/api/v1/agents?status=INVALID", HttpMethod.GET, - new HttpEntity<>(securityHelper.authHeadersNoBody(jwt)), + new HttpEntity<>(securityHelper.authHeadersNoBody(viewerJwt)), String.class); assertThat(response.getStatusCode()).isEqualTo(HttpStatus.BAD_REQUEST); diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentSseControllerIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentSseControllerIT.java index fddc7152..78a3743f 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentSseControllerIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentSseControllerIT.java @@ -45,10 +45,12 @@ class AgentSseControllerIT extends AbstractPostgresIT { private int port; private String jwt; + private String operatorJwt; @BeforeEach void setUp() { jwt = securityHelper.registerTestAgent("test-agent-sse-it"); + operatorJwt = securityHelper.operatorToken(); } private ResponseEntity registerAgent(String agentId, String name, String group) { @@ -76,7 +78,7 @@ class AgentSseControllerIT extends AbstractPostgresIT { return restTemplate.postForEntity( "/api/v1/agents/" + agentId + "/commands", - new HttpEntity<>(json, securityHelper.authHeaders(jwt)), + new HttpEntity<>(json, securityHelper.authHeaders(operatorJwt)), String.class); } diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DetailControllerIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DetailControllerIT.java index 83fa17b1..f0cef246 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DetailControllerIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DetailControllerIT.java @@ -34,6 +34,7 @@ class DetailControllerIT extends AbstractPostgresIT { private final ObjectMapper objectMapper = new ObjectMapper(); private String jwt; + private String viewerJwt; private String seededExecutionId; /** @@ -43,6 +44,7 @@ class DetailControllerIT extends AbstractPostgresIT { @BeforeAll void seedTestData() { jwt = securityHelper.registerTestAgent("test-agent-detail-it"); + viewerJwt = securityHelper.viewerToken(); String json = """ { @@ -217,7 +219,7 @@ class DetailControllerIT extends AbstractPostgresIT { } private ResponseEntity detailGet(String path) { - HttpHeaders headers = securityHelper.authHeadersNoBody(jwt); + HttpHeaders headers = securityHelper.authHeadersNoBody(viewerJwt); return restTemplate.exchange( "/api/v1/executions" + path, HttpMethod.GET, diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DiagramRenderControllerIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DiagramRenderControllerIT.java index af0b8668..416dc78c 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DiagramRenderControllerIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DiagramRenderControllerIT.java @@ -29,6 +29,7 @@ class DiagramRenderControllerIT extends AbstractPostgresIT { private TestSecurityHelper securityHelper; private String jwt; + private String viewerJwt; private String contentHash; /** @@ -37,6 +38,7 @@ class DiagramRenderControllerIT extends AbstractPostgresIT { @BeforeEach void seedDiagram() { jwt = securityHelper.registerTestAgent("test-agent-diagram-render-it"); + viewerJwt = securityHelper.viewerToken(); String json = """ { @@ -73,7 +75,7 @@ class DiagramRenderControllerIT extends AbstractPostgresIT { @Test void getSvg_withAcceptHeader_returnsSvg() { - HttpHeaders headers = securityHelper.authHeadersNoBody(jwt); + HttpHeaders headers = securityHelper.authHeadersNoBody(viewerJwt); headers.set("Accept", "image/svg+xml"); ResponseEntity response = restTemplate.exchange( @@ -90,7 +92,7 @@ class DiagramRenderControllerIT extends AbstractPostgresIT { @Test void getJson_withAcceptHeader_returnsJson() { - HttpHeaders headers = securityHelper.authHeadersNoBody(jwt); + HttpHeaders headers = securityHelper.authHeadersNoBody(viewerJwt); headers.set("Accept", "application/json"); ResponseEntity response = restTemplate.exchange( @@ -107,7 +109,7 @@ class DiagramRenderControllerIT extends AbstractPostgresIT { @Test void getNonExistentHash_returns404() { - HttpHeaders headers = securityHelper.authHeadersNoBody(jwt); + HttpHeaders headers = securityHelper.authHeadersNoBody(viewerJwt); headers.set("Accept", "image/svg+xml"); ResponseEntity response = restTemplate.exchange( @@ -122,7 +124,7 @@ class DiagramRenderControllerIT extends AbstractPostgresIT { @Test void getWithNoAcceptHeader_defaultsToSvg() { - HttpHeaders headers = securityHelper.authHeadersNoBody(jwt); + HttpHeaders headers = securityHelper.authHeadersNoBody(viewerJwt); ResponseEntity response = restTemplate.exchange( "/api/v1/diagrams/{hash}/render", diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/SearchControllerIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/SearchControllerIT.java index 95f42b2a..439bfa5a 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/SearchControllerIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/SearchControllerIT.java @@ -35,6 +35,7 @@ class SearchControllerIT extends AbstractPostgresIT { private final ObjectMapper objectMapper = new ObjectMapper(); private String jwt; + private String viewerJwt; /** * Seed test data: Insert executions with varying statuses, times, durations, @@ -43,6 +44,7 @@ class SearchControllerIT extends AbstractPostgresIT { @BeforeAll void seedTestData() { jwt = securityHelper.registerTestAgent("test-agent-search-it"); + viewerJwt = securityHelper.viewerToken(); // Execution 1: COMPLETED, short duration, no errors ingest(""" @@ -376,7 +378,7 @@ class SearchControllerIT extends AbstractPostgresIT { return restTemplate.exchange( "/api/v1/search/executions", HttpMethod.POST, - new HttpEntity<>(jsonBody, securityHelper.authHeaders(jwt)), + new HttpEntity<>(jsonBody, securityHelper.authHeaders(viewerJwt)), String.class); } } diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/JwtRefreshIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/JwtRefreshIT.java index 87ddf25e..af033318 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/JwtRefreshIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/JwtRefreshIT.java @@ -153,13 +153,13 @@ class JwtRefreshIT extends AbstractPostgresIT { JsonNode refreshBody2 = objectMapper.readTree(refreshResponse.getBody()); String newAccessToken = refreshBody2.get("accessToken").asText(); - // Use the new access token to hit a protected endpoint + // Use the new access token to hit a protected endpoint accessible by AGENT role HttpHeaders authHeaders = new HttpHeaders(); authHeaders.set("Authorization", "Bearer " + newAccessToken); authHeaders.set("X-Cameleer-Protocol-Version", "1"); ResponseEntity response = restTemplate.exchange( - "/api/v1/agents", + "/api/v1/search/executions", HttpMethod.GET, new HttpEntity<>(authHeaders), String.class); diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/RegistrationSecurityIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/RegistrationSecurityIT.java index e4ee5da4..54c17e71 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/RegistrationSecurityIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/RegistrationSecurityIT.java @@ -81,13 +81,13 @@ class RegistrationSecurityIT extends AbstractPostgresIT { JsonNode regBody = objectMapper.readTree(regResponse.getBody()); String accessToken = regBody.get("accessToken").asText(); - // Use the access token to hit a protected endpoint + // Use the access token to hit a protected endpoint accessible by AGENT role HttpHeaders headers = new HttpHeaders(); headers.set("Authorization", "Bearer " + accessToken); headers.set("X-Cameleer-Protocol-Version", "1"); ResponseEntity response = restTemplate.exchange( - "/api/v1/agents", + "/api/v1/search/executions", HttpMethod.GET, new HttpEntity<>(headers), String.class); diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/SecurityFilterIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/SecurityFilterIT.java index ba8dfcbb..a55c7190 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/SecurityFilterIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/SecurityFilterIT.java @@ -28,10 +28,12 @@ class SecurityFilterIT extends AbstractPostgresIT { private TestSecurityHelper securityHelper; private String jwt; + private String viewerJwt; @BeforeEach void setUp() { jwt = securityHelper.registerTestAgent("test-agent-security-filter-it"); + viewerJwt = securityHelper.viewerToken(); } @Test @@ -53,7 +55,7 @@ class SecurityFilterIT extends AbstractPostgresIT { ResponseEntity response = restTemplate.exchange( "/api/v1/agents", HttpMethod.GET, - new HttpEntity<>(securityHelper.authHeadersNoBody(jwt)), + new HttpEntity<>(securityHelper.authHeadersNoBody(viewerJwt)), String.class); assertThat(response.getStatusCode()).isEqualTo(HttpStatus.OK); diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/SseSigningIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/SseSigningIT.java index d611520b..11e0ed6b 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/SseSigningIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/SseSigningIT.java @@ -1,6 +1,7 @@ package com.cameleer3.server.app.security; import com.cameleer3.server.app.AbstractPostgresIT; +import com.cameleer3.server.app.TestSecurityHelper; import com.cameleer3.server.core.security.Ed25519SigningService; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; @@ -52,6 +53,9 @@ class SseSigningIT extends AbstractPostgresIT { @Autowired private ObjectMapper objectMapper; + @Autowired + private TestSecurityHelper securityHelper; + @Autowired private Ed25519SigningService ed25519SigningService; @@ -165,6 +169,7 @@ class SseSigningIT extends AbstractPostgresIT { String agentId = "sse-sign-it-" + UUID.randomUUID().toString().substring(0, 8); JsonNode registration = registerAgentWithAuth(agentId); String accessToken = registration.get("accessToken").asText(); + String operatorToken = securityHelper.operatorToken(); String serverPublicKey = registration.get("serverPublicKey").asText(); SseStream stream = openSseStream(agentId, accessToken); @@ -177,7 +182,7 @@ class SseSigningIT extends AbstractPostgresIT { await().atMost(10, TimeUnit.SECONDS).pollInterval(200, TimeUnit.MILLISECONDS) .ignoreExceptions() .until(() -> { - sendCommand(agentId, "config-update", originalPayload, accessToken); + sendCommand(agentId, "config-update", originalPayload, operatorToken); List lines = stream.snapshot(); return lines.stream().anyMatch(l -> l.contains("event:config-update")); }); @@ -221,6 +226,7 @@ class SseSigningIT extends AbstractPostgresIT { String agentId = "sse-sign-trace-" + UUID.randomUUID().toString().substring(0, 8); JsonNode registration = registerAgentWithAuth(agentId); String accessToken = registration.get("accessToken").asText(); + String operatorToken = securityHelper.operatorToken(); String serverPublicKey = registration.get("serverPublicKey").asText(); SseStream stream = openSseStream(agentId, accessToken); @@ -232,7 +238,7 @@ class SseSigningIT extends AbstractPostgresIT { await().atMost(10, TimeUnit.SECONDS).pollInterval(200, TimeUnit.MILLISECONDS) .ignoreExceptions() .until(() -> { - sendCommand(agentId, "deep-trace", originalPayload, accessToken); + sendCommand(agentId, "deep-trace", originalPayload, operatorToken); List lines = stream.snapshot(); return lines.stream().anyMatch(l -> l.contains("event:deep-trace")); }); From 288c7a86b550723cc1f5bff7b2b59de1093207de Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Mon, 16 Mar 2026 20:04:27 +0100 Subject: [PATCH 28/32] chore: add docker-compose.dev.yml for local PostgreSQL + OpenSearch --- docker-compose.dev.yml | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 docker-compose.dev.yml diff --git a/docker-compose.dev.yml b/docker-compose.dev.yml new file mode 100644 index 00000000..1e6ecb58 --- /dev/null +++ b/docker-compose.dev.yml @@ -0,0 +1,29 @@ +version: '3.8' + +services: + postgres: + image: timescale/timescaledb-ha:pg16 + ports: + - "5432:5432" + environment: + POSTGRES_DB: cameleer3 + POSTGRES_USER: cameleer + POSTGRES_PASSWORD: cameleer_dev + volumes: + - pgdata:/home/postgres/pgdata/data + + opensearch: + image: opensearchproject/opensearch:2.19.0 + ports: + - "9200:9200" + - "9300:9300" + environment: + discovery.type: single-node + DISABLE_SECURITY_PLUGIN: "true" + OPENSEARCH_JAVA_OPTS: "-Xms512m -Xmx512m" + volumes: + - osdata:/usr/share/opensearch/data + +volumes: + pgdata: + osdata: From d23b899f002fb1821c246c8c098338a7eb8b4495 Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Mon, 16 Mar 2026 21:01:57 +0100 Subject: [PATCH 29/32] fix: prefix user tokens with 'user:' for JwtAuthenticationFilter routing --- .../java/com/cameleer3/server/app/TestSecurityHelper.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/TestSecurityHelper.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/TestSecurityHelper.java index 97df1b83..bafe8d0a 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/TestSecurityHelper.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/TestSecurityHelper.java @@ -45,21 +45,22 @@ public class TestSecurityHelper { * Returns a valid JWT access token with OPERATOR role. */ public String operatorToken() { - return jwtService.createAccessToken("test-operator", "user", List.of("OPERATOR")); + // Subject must start with "user:" for JwtAuthenticationFilter to treat it as a UI user token + return jwtService.createAccessToken("user:test-operator", "user", List.of("OPERATOR")); } /** * Returns a valid JWT access token with ADMIN role. */ public String adminToken() { - return jwtService.createAccessToken("test-admin", "user", List.of("ADMIN")); + return jwtService.createAccessToken("user:test-admin", "user", List.of("ADMIN")); } /** * Returns a valid JWT access token with VIEWER role. */ public String viewerToken() { - return jwtService.createAccessToken("test-viewer", "user", List.of("VIEWER")); + return jwtService.createAccessToken("user:test-viewer", "user", List.of("VIEWER")); } /** From 26f5a2ce3b0bd6cd443762b912213aa0c5b7a191 Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Mon, 16 Mar 2026 22:03:29 +0100 Subject: [PATCH 30/32] fix: update remaining ITs for synchronous ingestion and PostgreSQL storage - SearchControllerIT: remove @TestInstance(PER_CLASS), use @BeforeEach with static guard, fix table name (route_executions -> executions), remove Awaitility polling - OpenSearchIndexIT: replace Thread.sleep with explicit index refresh via OpenSearchClient - DiagramLinkingIT: fix table name, remove Awaitility awaits (writes are synchronous) - IngestionSchemaIT: rewrite queries for PostgreSQL relational model (processor_executions table instead of ClickHouse array columns) - PostgresStatsStoreIT: use explicit time bounds in refresh_continuous_aggregate calls - IngestionService: populate diagramContentHash during execution ingestion by looking up the latest diagram for the route+agent Co-Authored-By: Claude Opus 4.6 (1M context) --- .../app/controller/SearchControllerIT.java | 27 ++-- .../server/app/search/OpenSearchIndexIT.java | 13 +- .../server/app/storage/DiagramLinkingIT.java | 44 +++--- .../server/app/storage/IngestionSchemaIT.java | 145 +++++++----------- .../app/storage/PostgresStatsStoreIT.java | 4 +- .../core/ingestion/IngestionService.java | 5 +- 6 files changed, 102 insertions(+), 136 deletions(-) diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/SearchControllerIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/SearchControllerIT.java index 439bfa5a..dfcbe9a9 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/SearchControllerIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/SearchControllerIT.java @@ -4,9 +4,8 @@ import com.cameleer3.server.app.AbstractPostgresIT; import com.cameleer3.server.app.TestSecurityHelper; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; -import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.TestInstance; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.test.web.client.TestRestTemplate; import org.springframework.http.HttpEntity; @@ -15,15 +14,12 @@ import org.springframework.http.HttpMethod; import org.springframework.http.HttpStatus; import org.springframework.http.ResponseEntity; -import static java.util.concurrent.TimeUnit.SECONDS; import static org.assertj.core.api.Assertions.assertThat; -import static org.awaitility.Awaitility.await; /** * Integration tests for the search controller endpoints. * Tests all filter types independently and in combination. */ -@TestInstance(TestInstance.Lifecycle.PER_CLASS) class SearchControllerIT extends AbstractPostgresIT { @Autowired @@ -34,15 +30,18 @@ class SearchControllerIT extends AbstractPostgresIT { private final ObjectMapper objectMapper = new ObjectMapper(); - private String jwt; - private String viewerJwt; + private static String jwt; + private static String viewerJwt; + private static boolean seeded; /** * Seed test data: Insert executions with varying statuses, times, durations, * correlationIds, error messages, and exchange snapshot data. */ - @BeforeAll + @BeforeEach void seedTestData() { + if (seeded) return; + seeded = true; jwt = securityHelper.registerTestAgent("test-agent-search-it"); viewerJwt = securityHelper.viewerToken(); @@ -154,13 +153,11 @@ class SearchControllerIT extends AbstractPostgresIT { """, i, i, i, i, i)); } - // Wait for all data to flush - await().atMost(10, SECONDS).untilAsserted(() -> { - Integer count = jdbcTemplate.queryForObject( - "SELECT count(*) FROM route_executions WHERE route_id LIKE 'search-route-%'", - Integer.class); - assertThat(count).isEqualTo(10); - }); + // Verify all data is available (synchronous writes) + Integer count = jdbcTemplate.queryForObject( + "SELECT count(*) FROM executions WHERE route_id LIKE 'search-route-%'", + Integer.class); + assertThat(count).isEqualTo(10); } @Test diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/search/OpenSearchIndexIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/search/OpenSearchIndexIT.java index 2194ecb4..7c8635ac 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/search/OpenSearchIndexIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/search/OpenSearchIndexIT.java @@ -8,6 +8,8 @@ import com.cameleer3.server.core.storage.SearchIndex; import com.cameleer3.server.core.storage.model.ExecutionDocument; import com.cameleer3.server.core.storage.model.ExecutionDocument.ProcessorDoc; import org.junit.jupiter.api.Test; +import org.opensearch.client.opensearch.OpenSearchClient; +import org.opensearch.client.opensearch.indices.RefreshRequest; import org.opensearch.testcontainers.OpensearchContainer; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.test.context.DynamicPropertyRegistry; @@ -34,6 +36,9 @@ class OpenSearchIndexIT extends AbstractPostgresIT { @Autowired SearchIndex searchIndex; + @Autowired + OpenSearchClient openSearchClient; + @Test void indexAndSearchByText() throws Exception { Instant now = Instant.now(); @@ -46,7 +51,7 @@ class OpenSearchIndexIT extends AbstractPostgresIT { null, null, "request body with customer-99", null, null, null))); searchIndex.index(doc); - Thread.sleep(1500); // Allow OpenSearch refresh + refreshOpenSearchIndices(); SearchRequest request = new SearchRequest( null, now.minusSeconds(60), now.plusSeconds(60), @@ -71,7 +76,7 @@ class OpenSearchIndexIT extends AbstractPostgresIT { null, null, "UniquePayloadIdentifier12345", null, null, null))); searchIndex.index(doc); - Thread.sleep(1500); + refreshOpenSearchIndices(); SearchRequest request = new SearchRequest( null, now.minusSeconds(60), now.plusSeconds(60), @@ -83,4 +88,8 @@ class OpenSearchIndexIT extends AbstractPostgresIT { SearchResult result = searchIndex.search(request); assertTrue(result.total() > 0); } + + private void refreshOpenSearchIndices() throws Exception { + openSearchClient.indices().refresh(RefreshRequest.of(r -> r.index("executions-*"))); + } } diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/DiagramLinkingIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/DiagramLinkingIT.java index ab0f01c3..7805b133 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/DiagramLinkingIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/DiagramLinkingIT.java @@ -11,9 +11,7 @@ import org.springframework.http.HttpHeaders; import org.springframework.http.HttpStatus; import org.springframework.http.ResponseEntity; -import static java.util.concurrent.TimeUnit.SECONDS; import static org.assertj.core.api.Assertions.assertThat; -import static org.awaitility.Awaitility.await; /** * Integration test proving that diagram_content_hash is populated during @@ -59,12 +57,10 @@ class DiagramLinkingIT extends AbstractPostgresIT { String.class); assertThat(diagramResponse.getStatusCode()).isEqualTo(HttpStatus.ACCEPTED); - await().atMost(10, SECONDS).untilAsserted(() -> { - String hash = jdbcTemplate.queryForObject( - "SELECT content_hash FROM route_diagrams WHERE route_id = 'diagram-link-route' LIMIT 1", - String.class); - assertThat(hash).isNotNull().isNotEmpty(); - }); + String diagramHash = jdbcTemplate.queryForObject( + "SELECT content_hash FROM route_diagrams WHERE route_id = 'diagram-link-route' LIMIT 1", + String.class); + assertThat(diagramHash).isNotNull().isNotEmpty(); String executionJson = """ { @@ -95,16 +91,14 @@ class DiagramLinkingIT extends AbstractPostgresIT { String.class); assertThat(execResponse.getStatusCode()).isEqualTo(HttpStatus.ACCEPTED); - await().atMost(10, SECONDS).ignoreExceptions().untilAsserted(() -> { - String hash = jdbcTemplate.queryForObject( - "SELECT diagram_content_hash FROM route_executions WHERE route_id = 'diagram-link-route'", - String.class); - assertThat(hash) - .isNotNull() - .isNotEmpty() - .hasSize(64) - .matches("[a-f0-9]{64}"); - }); + String hash = jdbcTemplate.queryForObject( + "SELECT diagram_content_hash FROM executions WHERE route_id = 'diagram-link-route'", + String.class); + assertThat(hash) + .isNotNull() + .isNotEmpty() + .hasSize(64) + .matches("[a-f0-9]{64}"); } @Test @@ -138,13 +132,11 @@ class DiagramLinkingIT extends AbstractPostgresIT { String.class); assertThat(response.getStatusCode()).isEqualTo(HttpStatus.ACCEPTED); - await().atMost(10, SECONDS).ignoreExceptions().untilAsserted(() -> { - String hash = jdbcTemplate.queryForObject( - "SELECT diagram_content_hash FROM route_executions WHERE route_id = 'no-diagram-route'", - String.class); - assertThat(hash) - .isNotNull() - .isEmpty(); - }); + String hash = jdbcTemplate.queryForObject( + "SELECT diagram_content_hash FROM executions WHERE route_id = 'no-diagram-route'", + String.class); + assertThat(hash) + .isNotNull() + .isEmpty(); } } diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/IngestionSchemaIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/IngestionSchemaIT.java index 4cfa8247..13cf60c8 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/IngestionSchemaIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/IngestionSchemaIT.java @@ -11,15 +11,13 @@ import org.springframework.http.HttpHeaders; import org.springframework.http.HttpStatus; import org.springframework.http.ResponseEntity; -import java.util.Arrays; import java.util.List; +import java.util.Map; -import static java.util.concurrent.TimeUnit.SECONDS; import static org.assertj.core.api.Assertions.assertThat; -import static org.awaitility.Awaitility.await; /** - * Integration test verifying that Phase 2 schema columns are correctly populated + * Integration test verifying that processor execution data is correctly populated * during ingestion of route executions with nested processors and exchange data. */ class IngestionSchemaIT extends AbstractPostgresIT { @@ -39,7 +37,7 @@ class IngestionSchemaIT extends AbstractPostgresIT { } @Test - void processorTreeMetadata_depthsAndParentIndexesCorrect() { + void processorTreeMetadata_depthsAndParentIdsCorrect() { String json = """ { "routeId": "schema-test-tree", @@ -94,44 +92,46 @@ class IngestionSchemaIT extends AbstractPostgresIT { postExecution(json); - await().atMost(30, SECONDS).ignoreExceptions().untilAsserted(() -> { - var depths = queryArray( - "SELECT processor_depths FROM route_executions WHERE route_id = 'schema-test-tree'"); - assertThat(depths).containsExactly("0", "1", "2"); + // Verify execution row exists + Integer execCount = jdbcTemplate.queryForObject( + "SELECT count(*) FROM executions WHERE execution_id = 'ex-tree-1'", + Integer.class); + assertThat(execCount).isEqualTo(1); - var parentIndexes = queryArray( - "SELECT processor_parent_indexes FROM route_executions WHERE route_id = 'schema-test-tree'"); - assertThat(parentIndexes).containsExactly("-1", "0", "1"); + // Verify processors were flattened into processor_executions + List> processors = jdbcTemplate.queryForList( + "SELECT processor_id, processor_type, depth, parent_processor_id, " + + "diagram_node_id, input_body, output_body, input_headers " + + "FROM processor_executions WHERE execution_id = 'ex-tree-1' " + + "ORDER BY depth, processor_id"); + assertThat(processors).hasSize(3); - var diagramNodeIds = queryArray( - "SELECT processor_diagram_node_ids FROM route_executions WHERE route_id = 'schema-test-tree'"); - assertThat(diagramNodeIds).containsExactly("node-root", "node-child", "node-grandchild"); + // Root processor: depth=0, no parent + assertThat(processors.get(0).get("processor_id")).isEqualTo("root-proc"); + assertThat(((Number) processors.get(0).get("depth")).intValue()).isEqualTo(0); + assertThat(processors.get(0).get("parent_processor_id")).isNull(); + assertThat(processors.get(0).get("diagram_node_id")).isEqualTo("node-root"); + assertThat(processors.get(0).get("input_body")).isEqualTo("root-input"); + assertThat(processors.get(0).get("output_body")).isEqualTo("root-output"); + assertThat(processors.get(0).get("input_headers").toString()).contains("Content-Type"); - String bodies = jdbcTemplate.queryForObject( - "SELECT exchange_bodies FROM route_executions WHERE route_id = 'schema-test-tree'", - String.class); - assertThat(bodies).contains("root-input"); - assertThat(bodies).contains("root-output"); - assertThat(bodies).contains("child-input"); - assertThat(bodies).contains("child-output"); + // Child processor: depth=1, parent=root-proc + assertThat(processors.get(1).get("processor_id")).isEqualTo("child-proc"); + assertThat(((Number) processors.get(1).get("depth")).intValue()).isEqualTo(1); + assertThat(processors.get(1).get("parent_processor_id")).isEqualTo("root-proc"); + assertThat(processors.get(1).get("diagram_node_id")).isEqualTo("node-child"); + assertThat(processors.get(1).get("input_body")).isEqualTo("child-input"); + assertThat(processors.get(1).get("output_body")).isEqualTo("child-output"); - var inputBodies = queryArray( - "SELECT processor_input_bodies FROM route_executions WHERE route_id = 'schema-test-tree'"); - assertThat(inputBodies).containsExactly("root-input", "child-input", ""); - - var outputBodies = queryArray( - "SELECT processor_output_bodies FROM route_executions WHERE route_id = 'schema-test-tree'"); - assertThat(outputBodies).containsExactly("root-output", "child-output", ""); - - var inputHeaders = queryArray( - "SELECT processor_input_headers FROM route_executions WHERE route_id = 'schema-test-tree'"); - assertThat(inputHeaders.get(0)).contains("Content-Type"); - assertThat(inputHeaders.get(0)).contains("application/json"); - }); + // Grandchild processor: depth=2, parent=child-proc + assertThat(processors.get(2).get("processor_id")).isEqualTo("grandchild-proc"); + assertThat(((Number) processors.get(2).get("depth")).intValue()).isEqualTo(2); + assertThat(processors.get(2).get("parent_processor_id")).isEqualTo("child-proc"); + assertThat(processors.get(2).get("diagram_node_id")).isEqualTo("node-grandchild"); } @Test - void exchangeBodiesContainsConcatenatedText() { + void exchangeBodiesStored() { String json = """ { "routeId": "schema-test-bodies", @@ -140,14 +140,6 @@ class IngestionSchemaIT extends AbstractPostgresIT { "startTime": "2026-03-11T10:00:00Z", "endTime": "2026-03-11T10:00:01Z", "durationMs": 1000, - "inputSnapshot": { - "body": "route-level-input-body", - "headers": {"X-Route": "header-value"} - }, - "outputSnapshot": { - "body": "route-level-output-body", - "headers": {} - }, "processors": [ { "processorId": "proc-1", @@ -166,21 +158,13 @@ class IngestionSchemaIT extends AbstractPostgresIT { postExecution(json); - await().atMost(30, SECONDS).ignoreExceptions().untilAsserted(() -> { - String bodies = jdbcTemplate.queryForObject( - "SELECT exchange_bodies FROM route_executions WHERE route_id = 'schema-test-bodies'", - String.class); - assertThat(bodies).contains("processor-body-text"); - assertThat(bodies).contains("processor-output-text"); - assertThat(bodies).contains("route-level-input-body"); - assertThat(bodies).contains("route-level-output-body"); - - String headers = jdbcTemplate.queryForObject( - "SELECT exchange_headers FROM route_executions WHERE route_id = 'schema-test-bodies'", - String.class); - assertThat(headers).contains("X-Route"); - assertThat(headers).contains("header-value"); - }); + // Verify processor body data + List> processors = jdbcTemplate.queryForList( + "SELECT input_body, output_body FROM processor_executions " + + "WHERE execution_id = 'ex-bodies-1'"); + assertThat(processors).hasSize(1); + assertThat(processors.get(0).get("input_body")).isEqualTo("processor-body-text"); + assertThat(processors.get(0).get("output_body")).isEqualTo("processor-output-text"); } @Test @@ -209,20 +193,19 @@ class IngestionSchemaIT extends AbstractPostgresIT { postExecution(json); - await().atMost(30, SECONDS).ignoreExceptions().untilAsserted(() -> { - String bodies = jdbcTemplate.queryForObject( - "SELECT exchange_bodies FROM route_executions WHERE route_id = 'schema-test-null-snap'", - String.class); - assertThat(bodies).isNotNull(); + // Verify execution exists + Integer count = jdbcTemplate.queryForObject( + "SELECT count(*) FROM executions WHERE execution_id = 'ex-null-1'", + Integer.class); + assertThat(count).isEqualTo(1); - var depths = queryArray( - "SELECT processor_depths FROM route_executions WHERE route_id = 'schema-test-null-snap'"); - assertThat(depths).containsExactly("0"); - - var parentIndexes = queryArray( - "SELECT processor_parent_indexes FROM route_executions WHERE route_id = 'schema-test-null-snap'"); - assertThat(parentIndexes).containsExactly("-1"); - }); + // Verify processor with null bodies inserted successfully + List> processors = jdbcTemplate.queryForList( + "SELECT depth, parent_processor_id, input_body, output_body " + + "FROM processor_executions WHERE execution_id = 'ex-null-1'"); + assertThat(processors).hasSize(1); + assertThat(((Number) processors.get(0).get("depth")).intValue()).isEqualTo(0); + assertThat(processors.get(0).get("parent_processor_id")).isNull(); } private void postExecution(String json) { @@ -233,22 +216,4 @@ class IngestionSchemaIT extends AbstractPostgresIT { assertThat(response.getStatusCode()).isEqualTo(HttpStatus.ACCEPTED); } - - private List queryArray(String sql) { - return jdbcTemplate.query(sql, (rs, rowNum) -> { - Object arr = rs.getArray(1).getArray(); - if (arr instanceof Object[] objects) { - return Arrays.stream(objects).map(Object::toString).toList(); - } else if (arr instanceof short[] shorts) { - var result = new java.util.ArrayList(); - for (short s : shorts) result.add(String.valueOf(s)); - return result; - } else if (arr instanceof int[] ints) { - var result = new java.util.ArrayList(); - for (int v : ints) result.add(String.valueOf(v)); - return result; - } - return List.of(); - }).get(0); - } } diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/PostgresStatsStoreIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/PostgresStatsStoreIT.java index efdbeea1..70b3344b 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/PostgresStatsStoreIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/PostgresStatsStoreIT.java @@ -29,7 +29,7 @@ class PostgresStatsStoreIT extends AbstractPostgresIT { insertExecution("stats-3", "route-b", "app-1", "COMPLETED", now.plusSeconds(20), 50L); // Force continuous aggregate refresh - jdbc.execute("CALL refresh_continuous_aggregate('stats_1m_all', null, null)"); + jdbc.execute("CALL refresh_continuous_aggregate('stats_1m_all', NOW() - INTERVAL '1 hour', NOW() + INTERVAL '1 hour')"); ExecutionStats stats = statsStore.stats(now.minusSeconds(60), now.plusSeconds(60)); assertEquals(3, stats.totalCount()); @@ -44,7 +44,7 @@ class PostgresStatsStoreIT extends AbstractPostgresIT { now.plusSeconds(i * 30), 100L + i); } - jdbc.execute("CALL refresh_continuous_aggregate('stats_1m_all', null, null)"); + jdbc.execute("CALL refresh_continuous_aggregate('stats_1m_all', NOW() - INTERVAL '1 hour', NOW() + INTERVAL '1 hour')"); StatsTimeseries ts = statsStore.timeseries(now.minus(1, ChronoUnit.MINUTES), now.plus(10, ChronoUnit.MINUTES), 5); assertNotNull(ts); diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/ingestion/IngestionService.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/ingestion/IngestionService.java index 36419fb8..e2c5e741 100644 --- a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/ingestion/IngestionService.java +++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/ingestion/IngestionService.java @@ -69,6 +69,9 @@ public class IngestionService { private ExecutionRecord toExecutionRecord(String agentId, String groupName, RouteExecution exec) { + String diagramHash = diagramStore + .findContentHashForRoute(exec.getRouteId(), agentId) + .orElse(""); return new ExecutionRecord( exec.getExchangeId(), exec.getRouteId(), agentId, groupName, exec.getStatus() != null ? exec.getStatus().name() : "RUNNING", @@ -76,7 +79,7 @@ public class IngestionService { exec.getStartTime(), exec.getEndTime(), exec.getDurationMs(), exec.getErrorMessage(), exec.getErrorStackTrace(), - null // diagramContentHash set separately + diagramHash ); } From 796be06a09cef9a3aeb77d13712da821e4637ab1 Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Tue, 17 Mar 2026 00:02:19 +0100 Subject: [PATCH 31/32] fix: resolve all integration test failures after storage layer refactor MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Use singleton container pattern for PostgreSQL + OpenSearch testcontainers (fixes container lifecycle issues with @TestInstance(PER_CLASS)) - Fix table name route_executions → executions in DetailControllerIT and ExecutionControllerIT - Serialize processor headers as JSON (ObjectMapper) instead of Map.toString() for JSONB column compatibility - Add nested mapping for processors field in OpenSearch index template - Use .keyword sub-field for term queries on dynamically mapped text fields - Add wildcard fallback queries for all text searches (substring matching) - Isolate stats tests with unique route names to prevent data contamination - Wait for OpenSearch indexing in SearchControllerIT with targeted Awaitility - Reduce OpenSearch debounce to 100ms in test profile Co-Authored-By: Claude Opus 4.6 (1M context) --- .../server/app/search/OpenSearchIndex.java | 75 ++++++++++++++----- .../server/app/AbstractPostgresIT.java | 24 +++--- .../app/controller/DetailControllerIT.java | 4 +- .../app/controller/ExecutionControllerIT.java | 2 +- .../app/controller/SearchControllerIT.java | 12 ++- .../server/app/search/OpenSearchIndexIT.java | 15 +--- .../app/storage/PostgresStatsStoreIT.java | 25 ++++--- .../src/test/resources/application-test.yml | 1 + .../core/ingestion/IngestionService.java | 17 ++++- 9 files changed, 117 insertions(+), 58 deletions(-) diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/search/OpenSearchIndex.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/search/OpenSearchIndex.java index 892792fc..062f12fb 100644 --- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/search/OpenSearchIndex.java +++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/search/OpenSearchIndex.java @@ -52,7 +52,10 @@ public class OpenSearchIndex implements SearchIndex { .template(t -> t .settings(s -> s .numberOfShards("3") - .numberOfReplicas("1"))))); + .numberOfReplicas("1")) + .mappings(m -> m + .properties("processors", p -> p + .nested(n -> n)))))); log.info("OpenSearch index template created"); } } catch (IOException e) { @@ -148,27 +151,32 @@ public class OpenSearchIndex implements SearchIndex { }))); } - // Keyword filters + // Keyword filters (use .keyword sub-field for exact matching on dynamically mapped text fields) if (request.status() != null) - filter.add(termQuery("status", request.status())); + filter.add(termQuery("status.keyword", request.status())); if (request.routeId() != null) - filter.add(termQuery("route_id", request.routeId())); + filter.add(termQuery("route_id.keyword", request.routeId())); if (request.agentId() != null) - filter.add(termQuery("agent_id", request.agentId())); + filter.add(termQuery("agent_id.keyword", request.agentId())); if (request.correlationId() != null) - filter.add(termQuery("correlation_id", request.correlationId())); + filter.add(termQuery("correlation_id.keyword", request.correlationId())); // Full-text search across all fields + nested processor fields if (request.text() != null && !request.text().isBlank()) { String text = request.text(); + String wildcard = "*" + text.toLowerCase() + "*"; List textQueries = new ArrayList<>(); - // Search top-level text fields + // Search top-level text fields (analyzed match + wildcard for substring) textQueries.add(Query.of(q -> q.multiMatch(m -> m .query(text) .fields("error_message", "error_stacktrace")))); + textQueries.add(Query.of(q -> q.wildcard(w -> w + .field("error_message").value(wildcard).caseInsensitive(true)))); + textQueries.add(Query.of(q -> q.wildcard(w -> w + .field("error_stacktrace").value(wildcard).caseInsensitive(true)))); - // Search nested processor fields + // Search nested processor fields (analyzed match + wildcard) textQueries.add(Query.of(q -> q.nested(n -> n .path("processors") .query(nq -> nq.multiMatch(m -> m @@ -176,6 +184,14 @@ public class OpenSearchIndex implements SearchIndex { .fields("processors.input_body", "processors.output_body", "processors.input_headers", "processors.output_headers", "processors.error_message", "processors.error_stacktrace")))))); + textQueries.add(Query.of(q -> q.nested(n -> n + .path("processors") + .query(nq -> nq.bool(nb -> nb.should( + wildcardQuery("processors.input_body", wildcard), + wildcardQuery("processors.output_body", wildcard), + wildcardQuery("processors.input_headers", wildcard), + wildcardQuery("processors.output_headers", wildcard) + ).minimumShouldMatch("1")))))); // Also try keyword fields for exact matches textQueries.add(Query.of(q -> q.multiMatch(m -> m @@ -185,32 +201,51 @@ public class OpenSearchIndex implements SearchIndex { must.add(Query.of(q -> q.bool(b -> b.should(textQueries).minimumShouldMatch("1")))); } - // Scoped text searches + // Scoped text searches (multiMatch + wildcard fallback for substring matching) if (request.textInBody() != null && !request.textInBody().isBlank()) { + String bodyText = request.textInBody(); + String bodyWildcard = "*" + bodyText.toLowerCase() + "*"; must.add(Query.of(q -> q.nested(n -> n .path("processors") - .query(nq -> nq.multiMatch(m -> m - .query(request.textInBody()) - .fields("processors.input_body", "processors.output_body")))))); + .query(nq -> nq.bool(nb -> nb.should( + Query.of(mq -> mq.multiMatch(m -> m + .query(bodyText) + .fields("processors.input_body", "processors.output_body"))), + wildcardQuery("processors.input_body", bodyWildcard), + wildcardQuery("processors.output_body", bodyWildcard) + ).minimumShouldMatch("1")))))); } if (request.textInHeaders() != null && !request.textInHeaders().isBlank()) { + String headerText = request.textInHeaders(); + String headerWildcard = "*" + headerText.toLowerCase() + "*"; must.add(Query.of(q -> q.nested(n -> n .path("processors") - .query(nq -> nq.multiMatch(m -> m - .query(request.textInHeaders()) - .fields("processors.input_headers", "processors.output_headers")))))); + .query(nq -> nq.bool(nb -> nb.should( + Query.of(mq -> mq.multiMatch(m -> m + .query(headerText) + .fields("processors.input_headers", "processors.output_headers"))), + wildcardQuery("processors.input_headers", headerWildcard), + wildcardQuery("processors.output_headers", headerWildcard) + ).minimumShouldMatch("1")))))); } if (request.textInErrors() != null && !request.textInErrors().isBlank()) { String errText = request.textInErrors(); + String errWildcard = "*" + errText.toLowerCase() + "*"; must.add(Query.of(q -> q.bool(b -> b.should( Query.of(sq -> sq.multiMatch(m -> m .query(errText) .fields("error_message", "error_stacktrace"))), + wildcardQuery("error_message", errWildcard), + wildcardQuery("error_stacktrace", errWildcard), Query.of(sq -> sq.nested(n -> n .path("processors") - .query(nq -> nq.multiMatch(m -> m - .query(errText) - .fields("processors.error_message", "processors.error_stacktrace"))))) + .query(nq -> nq.bool(nb -> nb.should( + Query.of(nmq -> nmq.multiMatch(m -> m + .query(errText) + .fields("processors.error_message", "processors.error_stacktrace"))), + wildcardQuery("processors.error_message", errWildcard), + wildcardQuery("processors.error_stacktrace", errWildcard) + ).minimumShouldMatch("1"))))) ).minimumShouldMatch("1")))); } @@ -238,6 +273,10 @@ public class OpenSearchIndex implements SearchIndex { return Query.of(q -> q.term(t -> t.field(field).value(FieldValue.of(value)))); } + private Query wildcardQuery(String field, String pattern) { + return Query.of(q -> q.wildcard(w -> w.field(field).value(pattern).caseInsensitive(true))); + } + private Map toMap(ExecutionDocument doc) { Map map = new LinkedHashMap<>(); map.put("execution_id", doc.executionId()); diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractPostgresIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractPostgresIT.java index b912bc1b..40962efd 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractPostgresIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractPostgresIT.java @@ -1,5 +1,6 @@ package com.cameleer3.server.app; +import org.opensearch.testcontainers.OpensearchContainer; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.test.context.SpringBootTest; import org.springframework.jdbc.core.JdbcTemplate; @@ -7,25 +8,29 @@ import org.springframework.test.context.ActiveProfiles; import org.springframework.test.context.DynamicPropertyRegistry; import org.springframework.test.context.DynamicPropertySource; import org.testcontainers.containers.PostgreSQLContainer; -import org.testcontainers.junit.jupiter.Container; -import org.testcontainers.junit.jupiter.Testcontainers; import org.testcontainers.utility.DockerImageName; @SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.RANDOM_PORT) @ActiveProfiles("test") -@Testcontainers public abstract class AbstractPostgresIT { private static final DockerImageName TIMESCALEDB_IMAGE = DockerImageName.parse("timescale/timescaledb-ha:pg16") .asCompatibleSubstituteFor("postgres"); - @Container - static final PostgreSQLContainer postgres = - new PostgreSQLContainer<>(TIMESCALEDB_IMAGE) - .withDatabaseName("cameleer3") - .withUsername("cameleer") - .withPassword("test"); + static final PostgreSQLContainer postgres; + static final OpensearchContainer opensearch; + + static { + postgres = new PostgreSQLContainer<>(TIMESCALEDB_IMAGE) + .withDatabaseName("cameleer3") + .withUsername("cameleer") + .withPassword("test"); + postgres.start(); + + opensearch = new OpensearchContainer<>("opensearchproject/opensearch:2.19.0"); + opensearch.start(); + } @Autowired protected JdbcTemplate jdbcTemplate; @@ -37,5 +42,6 @@ public abstract class AbstractPostgresIT { registry.add("spring.datasource.password", postgres::getPassword); registry.add("spring.datasource.driver-class-name", () -> "org.postgresql.Driver"); registry.add("spring.flyway.enabled", () -> "true"); + registry.add("opensearch.url", opensearch::getHttpHostAddress); } } diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DetailControllerIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DetailControllerIT.java index f0cef246..5229f883 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DetailControllerIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DetailControllerIT.java @@ -123,13 +123,13 @@ class DetailControllerIT extends AbstractPostgresIT { // Wait for flush and get the execution_id await().atMost(10, SECONDS).untilAsserted(() -> { Integer count = jdbcTemplate.queryForObject( - "SELECT count(*) FROM route_executions WHERE route_id = 'detail-test-route'", + "SELECT count(*) FROM executions WHERE route_id = 'detail-test-route'", Integer.class); assertThat(count).isGreaterThanOrEqualTo(1); }); seededExecutionId = jdbcTemplate.queryForObject( - "SELECT execution_id FROM route_executions WHERE route_id = 'detail-test-route' LIMIT 1", + "SELECT execution_id FROM executions WHERE route_id = 'detail-test-route' LIMIT 1", String.class); } diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/ExecutionControllerIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/ExecutionControllerIT.java index 65f72d85..1ee376e2 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/ExecutionControllerIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/ExecutionControllerIT.java @@ -111,7 +111,7 @@ class ExecutionControllerIT extends AbstractPostgresIT { await().atMost(10, SECONDS).untilAsserted(() -> { Integer count = jdbcTemplate.queryForObject( - "SELECT count(*) FROM route_executions WHERE route_id = 'flush-test-route'", + "SELECT count(*) FROM executions WHERE route_id = 'flush-test-route'", Integer.class); assertThat(count).isGreaterThanOrEqualTo(1); }); diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/SearchControllerIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/SearchControllerIT.java index dfcbe9a9..6a21552f 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/SearchControllerIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/SearchControllerIT.java @@ -14,7 +14,9 @@ import org.springframework.http.HttpMethod; import org.springframework.http.HttpStatus; import org.springframework.http.ResponseEntity; +import static java.util.concurrent.TimeUnit.SECONDS; import static org.assertj.core.api.Assertions.assertThat; +import static org.awaitility.Awaitility.await; /** * Integration tests for the search controller endpoints. @@ -153,11 +155,19 @@ class SearchControllerIT extends AbstractPostgresIT { """, i, i, i, i, i)); } - // Verify all data is available (synchronous writes) + // Verify all data is in PostgreSQL (synchronous writes) Integer count = jdbcTemplate.queryForObject( "SELECT count(*) FROM executions WHERE route_id LIKE 'search-route-%'", Integer.class); assertThat(count).isEqualTo(10); + + // Wait for async OpenSearch indexing (debounce + index time) + // Check for last seeded execution specifically to avoid false positives from other test classes + await().atMost(30, SECONDS).untilAsserted(() -> { + ResponseEntity r = searchGet("?correlationId=corr-page-10"); + JsonNode body = objectMapper.readTree(r.getBody()); + assertThat(body.get("total").asLong()).isGreaterThanOrEqualTo(1); + }); } @Test diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/search/OpenSearchIndexIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/search/OpenSearchIndexIT.java index 7c8635ac..cdb0bff4 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/search/OpenSearchIndexIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/search/OpenSearchIndexIT.java @@ -10,29 +10,16 @@ import com.cameleer3.server.core.storage.model.ExecutionDocument.ProcessorDoc; import org.junit.jupiter.api.Test; import org.opensearch.client.opensearch.OpenSearchClient; import org.opensearch.client.opensearch.indices.RefreshRequest; -import org.opensearch.testcontainers.OpensearchContainer; import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.test.context.DynamicPropertyRegistry; -import org.springframework.test.context.DynamicPropertySource; -import org.testcontainers.junit.jupiter.Container; import java.time.Instant; import java.util.List; import static org.junit.jupiter.api.Assertions.*; -// Extends AbstractPostgresIT for PostgreSQL datasource needed by Spring context +// Extends AbstractPostgresIT which provides both PostgreSQL and OpenSearch testcontainers class OpenSearchIndexIT extends AbstractPostgresIT { - @Container - static final OpensearchContainer opensearch = - new OpensearchContainer<>("opensearchproject/opensearch:2.19.0"); - - @DynamicPropertySource - static void configureOpenSearch(DynamicPropertyRegistry registry) { - registry.add("opensearch.url", opensearch::getHttpHostAddress); - } - @Autowired SearchIndex searchIndex; diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/PostgresStatsStoreIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/PostgresStatsStoreIT.java index 70b3344b..c7bc748b 100644 --- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/PostgresStatsStoreIT.java +++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/PostgresStatsStoreIT.java @@ -23,30 +23,33 @@ class PostgresStatsStoreIT extends AbstractPostgresIT { @Test void statsReturnsCountsForTimeWindow() { - Instant now = Instant.now().truncatedTo(ChronoUnit.SECONDS); - insertExecution("stats-1", "route-a", "app-1", "COMPLETED", now, 100L); - insertExecution("stats-2", "route-a", "app-1", "FAILED", now.plusSeconds(10), 200L); - insertExecution("stats-3", "route-b", "app-1", "COMPLETED", now.plusSeconds(20), 50L); + // Use a unique route + statsForRoute to avoid data contamination from other tests + String uniqueRoute = "stats-route-" + System.nanoTime(); + Instant base = Instant.now().minus(5, ChronoUnit.MINUTES).truncatedTo(ChronoUnit.SECONDS); + insertExecution("stats-1-" + uniqueRoute, uniqueRoute, "app-stats", "COMPLETED", base, 100L); + insertExecution("stats-2-" + uniqueRoute, uniqueRoute, "app-stats", "FAILED", base.plusSeconds(10), 200L); + insertExecution("stats-3-" + uniqueRoute, uniqueRoute, "app-stats", "COMPLETED", base.plusSeconds(20), 50L); // Force continuous aggregate refresh - jdbc.execute("CALL refresh_continuous_aggregate('stats_1m_all', NOW() - INTERVAL '1 hour', NOW() + INTERVAL '1 hour')"); + jdbc.execute("CALL refresh_continuous_aggregate('stats_1m_route', NOW() - INTERVAL '1 hour', NOW() + INTERVAL '1 hour')"); - ExecutionStats stats = statsStore.stats(now.minusSeconds(60), now.plusSeconds(60)); + ExecutionStats stats = statsStore.statsForRoute(base.minusSeconds(60), base.plusSeconds(60), uniqueRoute, null); assertEquals(3, stats.totalCount()); assertEquals(1, stats.failedCount()); } @Test void timeseriesReturnsBuckets() { - Instant now = Instant.now().truncatedTo(ChronoUnit.MINUTES); + String uniqueRoute = "ts-route-" + System.nanoTime(); + Instant base = Instant.now().minus(10, ChronoUnit.MINUTES).truncatedTo(ChronoUnit.MINUTES); for (int i = 0; i < 10; i++) { - insertExecution("ts-" + i, "route-a", "app-1", "COMPLETED", - now.plusSeconds(i * 30), 100L + i); + insertExecution("ts-" + i + "-" + uniqueRoute, uniqueRoute, "app-ts", "COMPLETED", + base.plusSeconds(i * 30), 100L + i); } - jdbc.execute("CALL refresh_continuous_aggregate('stats_1m_all', NOW() - INTERVAL '1 hour', NOW() + INTERVAL '1 hour')"); + jdbc.execute("CALL refresh_continuous_aggregate('stats_1m_route', NOW() - INTERVAL '1 hour', NOW() + INTERVAL '1 hour')"); - StatsTimeseries ts = statsStore.timeseries(now.minus(1, ChronoUnit.MINUTES), now.plus(10, ChronoUnit.MINUTES), 5); + StatsTimeseries ts = statsStore.timeseriesForRoute(base.minus(1, ChronoUnit.MINUTES), base.plus(10, ChronoUnit.MINUTES), 5, uniqueRoute, null); assertNotNull(ts); assertFalse(ts.buckets().isEmpty()); } diff --git a/cameleer3-server-app/src/test/resources/application-test.yml b/cameleer3-server-app/src/test/resources/application-test.yml index f821e9ff..8a6708b5 100644 --- a/cameleer3-server-app/src/test/resources/application-test.yml +++ b/cameleer3-server-app/src/test/resources/application-test.yml @@ -4,6 +4,7 @@ spring: opensearch: url: http://localhost:9200 + debounce-ms: 100 ingestion: buffer-capacity: 100 diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/ingestion/IngestionService.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/ingestion/IngestionService.java index e2c5e741..bdd32cfb 100644 --- a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/ingestion/IngestionService.java +++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/ingestion/IngestionService.java @@ -8,13 +8,18 @@ import com.cameleer3.server.core.storage.ExecutionStore; import com.cameleer3.server.core.storage.ExecutionStore.ExecutionRecord; import com.cameleer3.server.core.storage.ExecutionStore.ProcessorRecord; import com.cameleer3.server.core.storage.model.MetricsSnapshot; +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.function.Consumer; public class IngestionService { + private static final ObjectMapper JSON = new ObjectMapper(); + private final ExecutionStore executionStore; private final DiagramStore diagramStore; private final WriteBuffer metricsBuffer; @@ -99,8 +104,7 @@ public class IngestionService { p.getDurationMs(), p.getErrorMessage(), p.getErrorStackTrace(), truncateBody(p.getInputBody()), truncateBody(p.getOutputBody()), - p.getInputHeaders() != null ? p.getInputHeaders().toString() : null, - p.getOutputHeaders() != null ? p.getOutputHeaders().toString() : null + toJson(p.getInputHeaders()), toJson(p.getOutputHeaders()) )); if (p.getChildren() != null) { flat.addAll(flattenProcessors( @@ -116,4 +120,13 @@ public class IngestionService { if (body.length() > bodySizeLimit) return body.substring(0, bodySizeLimit); return body; } + + private static String toJson(Map headers) { + if (headers == null) return null; + try { + return JSON.writeValueAsString(headers); + } catch (JsonProcessingException e) { + return "{}"; + } + } } From c316e80d7f5afb026610b104cb0e85d258a6a5f8 Mon Sep 17 00:00:00 2001 From: hsiegeln <37154749+hsiegeln@users.noreply.github.com> Date: Tue, 17 Mar 2026 00:26:50 +0100 Subject: [PATCH 32/32] chore: update docs and config for PostgreSQL/OpenSearch storage layer - Set failsafe reuseForks=true to reuse JVM across IT classes (faster test suite) - Replace ClickHouse with PostgreSQL+OpenSearch in docker-compose.yml - Remove redundant docker-compose.dev.yml - Update CLAUDE.md and HOWTO.md to reflect new storage stack Co-Authored-By: Claude Opus 4.6 (1M context) --- CLAUDE.md | 10 ++++----- HOWTO.md | 40 +++++++++++++++++++----------------- cameleer3-server-app/pom.xml | 2 +- docker-compose.dev.yml | 29 -------------------------- docker-compose.yml | 37 +++++++++++++++++++-------------- 5 files changed, 49 insertions(+), 69 deletions(-) delete mode 100644 docker-compose.dev.yml diff --git a/CLAUDE.md b/CLAUDE.md index 28513ac8..e3a8fddd 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -38,10 +38,10 @@ java -jar cameleer3-server-app/target/cameleer3-server-app-1.0-SNAPSHOT.jar - Jackson `JavaTimeModule` for `Instant` deserialization - Communication: receives HTTP POST data from agents, serves SSE event streams for config push/commands - Maintains agent instance registry with states: LIVE → STALE → DEAD -- Storage: ClickHouse for structured data, text index for full-text search +- Storage: PostgreSQL (TimescaleDB) for structured data, OpenSearch for full-text search - Security: JWT auth with RBAC (AGENT/VIEWER/OPERATOR/ADMIN roles), Ed25519 config signing, bootstrap token for registration - OIDC: Optional external identity provider support (token exchange pattern). Configured via `CAMELEER_OIDC_*` env vars -- User persistence: ClickHouse `users` table, admin CRUD at `/api/v1/admin/users` +- User persistence: PostgreSQL `users` table, admin CRUD at `/api/v1/admin/users` ## CI/CD & Deployment @@ -50,8 +50,8 @@ java -jar cameleer3-server-app/target/cameleer3-server-app-1.0-SNAPSHOT.jar - Docker: multi-stage build (`Dockerfile`), `$BUILDPLATFORM` for native Maven on ARM64 runner, amd64 runtime - `REGISTRY_TOKEN` build arg required for `cameleer3-common` dependency resolution - Registry: `gitea.siegeln.net/cameleer/cameleer3-server` (container images) -- K8s manifests in `deploy/` — ClickHouse StatefulSet + server Deployment + NodePort Service (30081) +- K8s manifests in `deploy/` — PostgreSQL + OpenSearch StatefulSets, server Deployment + NodePort Service (30081) - Deployment target: k3s at 192.168.50.86, namespace `cameleer` -- Secrets managed in CI deploy step (idempotent `--dry-run=client | kubectl apply`): `cameleer-auth`, `clickhouse-credentials`, `CAMELEER_JWT_SECRET` -- K8s probes: server uses `/api/v1/health`, ClickHouse uses `/ping` +- Secrets managed in CI deploy step (idempotent `--dry-run=client | kubectl apply`): `cameleer-auth`, `postgres-credentials`, `opensearch-credentials`, `CAMELEER_JWT_SECRET` +- K8s probes: server uses `/api/v1/health`, PostgreSQL uses `pg_isready`, OpenSearch uses `/_cluster/health` - Docker build uses buildx registry cache + `--provenance=false` for Gitea compatibility diff --git a/HOWTO.md b/HOWTO.md index a1fd9dec..54c52557 100644 --- a/HOWTO.md +++ b/HOWTO.md @@ -21,20 +21,20 @@ mvn clean verify # compile + run all tests (needs Docker for integrati ## Infrastructure Setup -Start ClickHouse: +Start PostgreSQL and OpenSearch: ```bash docker compose up -d ``` -This starts ClickHouse 25.3 and automatically runs the schema init scripts (`clickhouse/init/01-schema.sql`, `clickhouse/init/02-search-columns.sql`, `clickhouse/init/03-users.sql`). +This starts TimescaleDB (PostgreSQL 16) and OpenSearch 2.19. The database schema is applied automatically via Flyway migrations on server startup. -| Service | Port | Purpose | -|------------|------|------------------| -| ClickHouse | 8123 | HTTP API (JDBC) | -| ClickHouse | 9000 | Native protocol | +| Service | Port | Purpose | +|------------|------|----------------------| +| PostgreSQL | 5432 | JDBC (Spring JDBC) | +| OpenSearch | 9200 | REST API (full-text) | -ClickHouse credentials: `cameleer` / `cameleer_dev`, database `cameleer3`. +PostgreSQL credentials: `cameleer` / `cameleer_dev`, database `cameleer3`. ## Run the Server @@ -109,7 +109,7 @@ The env-var local user gets `ADMIN` role. Agents get `AGENT` role at registratio ### OIDC Login (Optional) -OIDC configuration is stored in ClickHouse and managed via the admin API or UI. The SPA checks if OIDC is available: +OIDC configuration is stored in PostgreSQL and managed via the admin API or UI. The SPA checks if OIDC is available: ```bash # 1. SPA checks if OIDC is available (returns 404 if not configured) @@ -340,9 +340,8 @@ Key settings in `cameleer3-server-app/src/main/resources/application.yml`: |---------|---------|-------------| | `server.port` | 8081 | Server port | | `ingestion.buffer-capacity` | 50000 | Max items in write buffer | -| `ingestion.batch-size` | 5000 | Items per ClickHouse batch insert | +| `ingestion.batch-size` | 5000 | Items per batch insert | | `ingestion.flush-interval-ms` | 1000 | Buffer flush interval (ms) | -| `ingestion.data-ttl-days` | 30 | ClickHouse TTL for auto-deletion | | `agent-registry.heartbeat-interval-seconds` | 30 | Expected heartbeat interval | | `agent-registry.stale-threshold-seconds` | 90 | Time before agent marked STALE | | `agent-registry.dead-threshold-seconds` | 300 | Time after STALE before DEAD | @@ -386,7 +385,7 @@ npm run generate-api # Requires backend running on :8081 ## Running Tests -Integration tests use Testcontainers (starts ClickHouse automatically — requires Docker): +Integration tests use Testcontainers (starts PostgreSQL and OpenSearch automatically — requires Docker): ```bash # All tests @@ -399,14 +398,13 @@ mvn test -pl cameleer3-server-core mvn test -pl cameleer3-server-app -Dtest=ExecutionControllerIT ``` -## Verify ClickHouse Data +## Verify Database Data After posting data and waiting for the flush interval (1s default): ```bash -docker exec -it cameleer3-server-clickhouse-1 clickhouse-client \ - --user cameleer --password cameleer_dev -d cameleer3 \ - -q "SELECT count() FROM route_executions" +docker exec -it cameleer3-server-postgres-1 psql -U cameleer -d cameleer3 \ + -c "SELECT count(*) FROM route_executions" ``` ## Kubernetes Deployment @@ -417,7 +415,8 @@ The full stack is deployed to k3s via CI/CD on push to `main`. K8s manifests are ``` cameleer namespace: - ClickHouse (StatefulSet, 2Gi PVC) ← clickhouse:8123 (ClusterIP) + PostgreSQL (StatefulSet, 10Gi PVC) ← postgres:5432 (ClusterIP) + OpenSearch (StatefulSet, 10Gi PVC) ← opensearch:9200 (ClusterIP) cameleer3-server (Deployment) ← NodePort 30081 cameleer3-ui (Deployment, Nginx) ← NodePort 30090 Authentik Server (Deployment) ← NodePort 30950 @@ -439,7 +438,7 @@ cameleer namespace: Push to `main` triggers: **build** (UI npm + Maven, unit tests) → **docker** (buildx amd64 for server + UI, push to Gitea registry) → **deploy** (kubectl apply + rolling update). -Required Gitea org secrets: `REGISTRY_TOKEN`, `KUBECONFIG_BASE64`, `CAMELEER_AUTH_TOKEN`, `CAMELEER_JWT_SECRET`, `CLICKHOUSE_USER`, `CLICKHOUSE_PASSWORD`, `CAMELEER_UI_USER` (optional), `CAMELEER_UI_PASSWORD` (optional), `AUTHENTIK_PG_PASSWORD`, `AUTHENTIK_SECRET_KEY`, `CAMELEER_OIDC_ENABLED`, `CAMELEER_OIDC_ISSUER`, `CAMELEER_OIDC_CLIENT_ID`, `CAMELEER_OIDC_CLIENT_SECRET`. +Required Gitea org secrets: `REGISTRY_TOKEN`, `KUBECONFIG_BASE64`, `CAMELEER_AUTH_TOKEN`, `CAMELEER_JWT_SECRET`, `POSTGRES_USER`, `POSTGRES_PASSWORD`, `POSTGRES_DB`, `OPENSEARCH_USER`, `OPENSEARCH_PASSWORD`, `CAMELEER_UI_USER` (optional), `CAMELEER_UI_PASSWORD` (optional), `AUTHENTIK_PG_USER`, `AUTHENTIK_PG_PASSWORD`, `AUTHENTIK_SECRET_KEY`, `CAMELEER_OIDC_ENABLED`, `CAMELEER_OIDC_ISSUER`, `CAMELEER_OIDC_CLIENT_ID`, `CAMELEER_OIDC_CLIENT_SECRET`. ### Manual K8s Commands @@ -450,8 +449,11 @@ kubectl -n cameleer get pods # View server logs kubectl -n cameleer logs -f deploy/cameleer3-server -# View ClickHouse logs -kubectl -n cameleer logs -f statefulset/clickhouse +# View PostgreSQL logs +kubectl -n cameleer logs -f statefulset/postgres + +# View OpenSearch logs +kubectl -n cameleer logs -f statefulset/opensearch # Restart server kubectl -n cameleer rollout restart deployment/cameleer3-server diff --git a/cameleer3-server-app/pom.xml b/cameleer3-server-app/pom.xml index 81e21c59..4738822d 100644 --- a/cameleer3-server-app/pom.xml +++ b/cameleer3-server-app/pom.xml @@ -174,7 +174,7 @@ maven-failsafe-plugin 1 - false + true diff --git a/docker-compose.dev.yml b/docker-compose.dev.yml deleted file mode 100644 index 1e6ecb58..00000000 --- a/docker-compose.dev.yml +++ /dev/null @@ -1,29 +0,0 @@ -version: '3.8' - -services: - postgres: - image: timescale/timescaledb-ha:pg16 - ports: - - "5432:5432" - environment: - POSTGRES_DB: cameleer3 - POSTGRES_USER: cameleer - POSTGRES_PASSWORD: cameleer_dev - volumes: - - pgdata:/home/postgres/pgdata/data - - opensearch: - image: opensearchproject/opensearch:2.19.0 - ports: - - "9200:9200" - - "9300:9300" - environment: - discovery.type: single-node - DISABLE_SECURITY_PLUGIN: "true" - OPENSEARCH_JAVA_OPTS: "-Xms512m -Xmx512m" - volumes: - - osdata:/usr/share/opensearch/data - -volumes: - pgdata: - osdata: diff --git a/docker-compose.yml b/docker-compose.yml index 4fa23d89..c5698b23 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,20 +1,27 @@ services: - clickhouse: - image: clickhouse/clickhouse-server:25.3 + postgres: + image: timescale/timescaledb-ha:pg16 ports: - - "8123:8123" - - "9000:9000" - volumes: - - clickhouse-data:/var/lib/clickhouse - - ./clickhouse/init:/docker-entrypoint-initdb.d + - "5432:5432" environment: - CLICKHOUSE_USER: cameleer - CLICKHOUSE_PASSWORD: cameleer_dev - CLICKHOUSE_DB: cameleer3 - ulimits: - nofile: - soft: 262144 - hard: 262144 + POSTGRES_DB: cameleer3 + POSTGRES_USER: cameleer + POSTGRES_PASSWORD: cameleer_dev + volumes: + - pgdata:/home/postgres/pgdata/data + + opensearch: + image: opensearchproject/opensearch:2.19.0 + ports: + - "9200:9200" + - "9300:9300" + environment: + discovery.type: single-node + DISABLE_SECURITY_PLUGIN: "true" + OPENSEARCH_JAVA_OPTS: "-Xms512m -Xmx512m" + volumes: + - osdata:/usr/share/opensearch/data volumes: - clickhouse-data: + pgdata: + osdata: