diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/ClickHouseConfig.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/ClickHouseConfig.java
deleted file mode 100644
index ee5d8db5..00000000
--- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/ClickHouseConfig.java
+++ /dev/null
@@ -1,80 +0,0 @@
-package com.cameleer3.server.app.config;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.context.annotation.Bean;
-import org.springframework.context.annotation.Configuration;
-import org.springframework.core.io.Resource;
-import org.springframework.core.io.support.PathMatchingResourcePatternResolver;
-import org.springframework.jdbc.core.JdbcTemplate;
-
-import jakarta.annotation.PostConstruct;
-import javax.sql.DataSource;
-import java.nio.charset.StandardCharsets;
-import java.util.Arrays;
-import java.util.Comparator;
-import java.util.stream.Collectors;
-
-/**
- * ClickHouse configuration.
- *
- * Spring Boot auto-configures the DataSource from {@code spring.datasource.*} properties.
- * This class exposes a JdbcTemplate bean and initializes the schema on startup.
- *
- * The ClickHouse container's {@code CLICKHOUSE_DB} env var creates the database;
- * this class creates the tables within it.
- *
- * Migration files are discovered automatically from {@code classpath:clickhouse/*.sql}
- * and executed in filename order (numeric prefix sort).
- */
-@Configuration
-public class ClickHouseConfig {
-
- private static final Logger log = LoggerFactory.getLogger(ClickHouseConfig.class);
- private static final String MIGRATION_PATTERN = "classpath:clickhouse/*.sql";
-
- private final DataSource dataSource;
-
- public ClickHouseConfig(DataSource dataSource) {
- this.dataSource = dataSource;
- }
-
- @Bean
- public JdbcTemplate jdbcTemplate() {
- return new JdbcTemplate(dataSource);
- }
-
- @PostConstruct
- void initSchema() {
- var jdbc = new JdbcTemplate(dataSource);
- try {
- Resource[] resources = new PathMatchingResourcePatternResolver()
- .getResources(MIGRATION_PATTERN);
- Arrays.sort(resources, Comparator.comparing(Resource::getFilename));
-
- for (Resource resource : resources) {
- String filename = resource.getFilename();
- try {
- String sql = resource.getContentAsString(StandardCharsets.UTF_8);
- String stripped = sql.lines()
- .filter(line -> !line.trim().startsWith("--"))
- .collect(Collectors.joining("\n"));
- for (String statement : stripped.split(";")) {
- String trimmed = statement.trim();
- if (!trimmed.isEmpty()) {
- jdbc.execute(trimmed);
- }
- }
- log.info("Applied schema: {}", filename);
- } catch (Exception e) {
- log.error("Failed to apply schema: {}", filename, e);
- throw new RuntimeException("Schema initialization failed: " + filename, e);
- }
- }
- } catch (RuntimeException e) {
- throw e;
- } catch (Exception e) {
- throw new RuntimeException("Failed to discover migration files", e);
- }
- }
-}
diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/IngestionBeanConfig.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/IngestionBeanConfig.java
index 83507e16..c0d3a479 100644
--- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/IngestionBeanConfig.java
+++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/IngestionBeanConfig.java
@@ -1,41 +1,22 @@
package com.cameleer3.server.app.config;
-import com.cameleer3.server.core.ingestion.IngestionService;
-import com.cameleer3.server.core.ingestion.TaggedDiagram;
-import com.cameleer3.server.core.ingestion.TaggedExecution;
import com.cameleer3.server.core.ingestion.WriteBuffer;
import com.cameleer3.server.core.storage.model.MetricsSnapshot;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
/**
- * Creates the write buffer and ingestion service beans.
+ * Creates the write buffer bean for metrics.
*
- * The {@link WriteBuffer} instances are shared between the
- * {@link IngestionService} (producer side) and the flush scheduler (consumer side).
+ * The {@link WriteBuffer} instance is shared between the
+ * {@link com.cameleer3.server.core.ingestion.IngestionService} (producer side)
+ * and the flush scheduler (consumer side).
*/
@Configuration
public class IngestionBeanConfig {
- @Bean
- public WriteBuffer executionBuffer(IngestionConfig config) {
- return new WriteBuffer<>(config.getBufferCapacity());
- }
-
- @Bean
- public WriteBuffer diagramBuffer(IngestionConfig config) {
- return new WriteBuffer<>(config.getBufferCapacity());
- }
-
@Bean
public WriteBuffer metricsBuffer(IngestionConfig config) {
return new WriteBuffer<>(config.getBufferCapacity());
}
-
- @Bean
- public IngestionService ingestionService(WriteBuffer executionBuffer,
- WriteBuffer diagramBuffer,
- WriteBuffer metricsBuffer) {
- return new IngestionService(executionBuffer, diagramBuffer, metricsBuffer);
- }
}
diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/OpenSearchConfig.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/OpenSearchConfig.java
new file mode 100644
index 00000000..3ff7edea
--- /dev/null
+++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/OpenSearchConfig.java
@@ -0,0 +1,28 @@
+package com.cameleer3.server.app.config;
+
+import org.apache.http.HttpHost;
+import org.opensearch.client.RestClient;
+import org.opensearch.client.json.jackson.JacksonJsonpMapper;
+import org.opensearch.client.opensearch.OpenSearchClient;
+import org.opensearch.client.transport.rest_client.RestClientTransport;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+
+@Configuration
+public class OpenSearchConfig {
+
+ @Value("${opensearch.url:http://localhost:9200}")
+ private String opensearchUrl;
+
+ @Bean(destroyMethod = "close")
+ public RestClient opensearchRestClient() {
+ return RestClient.builder(HttpHost.create(opensearchUrl)).build();
+ }
+
+ @Bean
+ public OpenSearchClient openSearchClient(RestClient restClient) {
+ var transport = new RestClientTransport(restClient, new JacksonJsonpMapper());
+ return new OpenSearchClient(transport);
+ }
+}
diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/SearchBeanConfig.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/SearchBeanConfig.java
index debc1e8b..e722f0a8 100644
--- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/SearchBeanConfig.java
+++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/SearchBeanConfig.java
@@ -1,32 +1,19 @@
package com.cameleer3.server.app.config;
-import com.cameleer3.server.app.search.ClickHouseSearchEngine;
-import com.cameleer3.server.core.detail.DetailService;
-import com.cameleer3.server.core.search.SearchEngine;
import com.cameleer3.server.core.search.SearchService;
-import com.cameleer3.server.core.storage.ExecutionRepository;
+import com.cameleer3.server.core.storage.SearchIndex;
+import com.cameleer3.server.core.storage.StatsStore;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
-import org.springframework.jdbc.core.JdbcTemplate;
/**
- * Creates beans for the search and detail layers.
+ * Creates beans for the search layer.
*/
@Configuration
public class SearchBeanConfig {
@Bean
- public SearchEngine searchEngine(JdbcTemplate jdbcTemplate) {
- return new ClickHouseSearchEngine(jdbcTemplate);
- }
-
- @Bean
- public SearchService searchService(SearchEngine searchEngine) {
- return new SearchService(searchEngine);
- }
-
- @Bean
- public DetailService detailService(ExecutionRepository executionRepository) {
- return new DetailService(executionRepository);
+ public SearchService searchService(SearchIndex searchIndex, StatsStore statsStore) {
+ return new SearchService(searchIndex, statsStore);
}
}
diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/StorageBeanConfig.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/StorageBeanConfig.java
new file mode 100644
index 00000000..92f34943
--- /dev/null
+++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/config/StorageBeanConfig.java
@@ -0,0 +1,37 @@
+package com.cameleer3.server.app.config;
+
+import com.cameleer3.server.core.detail.DetailService;
+import com.cameleer3.server.core.indexing.SearchIndexer;
+import com.cameleer3.server.core.ingestion.IngestionService;
+import com.cameleer3.server.core.ingestion.WriteBuffer;
+import com.cameleer3.server.core.storage.*;
+import com.cameleer3.server.core.storage.model.MetricsSnapshot;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.context.annotation.Bean;
+import org.springframework.context.annotation.Configuration;
+
+@Configuration
+public class StorageBeanConfig {
+
+ @Bean
+ public DetailService detailService(ExecutionStore executionStore) {
+ return new DetailService(executionStore);
+ }
+
+ @Bean(destroyMethod = "shutdown")
+ public SearchIndexer searchIndexer(ExecutionStore executionStore, SearchIndex searchIndex,
+ @Value("${opensearch.debounce-ms:2000}") long debounceMs,
+ @Value("${opensearch.queue-size:10000}") int queueSize) {
+ return new SearchIndexer(executionStore, searchIndex, debounceMs, queueSize);
+ }
+
+ @Bean
+ public IngestionService ingestionService(ExecutionStore executionStore,
+ DiagramStore diagramStore,
+ WriteBuffer metricsBuffer,
+ SearchIndexer searchIndexer,
+ @Value("${cameleer.body-size-limit:16384}") int bodySizeLimit) {
+ return new IngestionService(executionStore, diagramStore, metricsBuffer,
+ searchIndexer::onExecutionUpdated, bodySizeLimit);
+ }
+}
diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/DetailController.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/DetailController.java
index 3e0ca0c4..2bd6ea55 100644
--- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/DetailController.java
+++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/DetailController.java
@@ -1,8 +1,9 @@
package com.cameleer3.server.app.controller;
-import com.cameleer3.server.app.storage.ClickHouseExecutionRepository;
import com.cameleer3.server.core.detail.DetailService;
import com.cameleer3.server.core.detail.ExecutionDetail;
+import com.cameleer3.server.core.storage.ExecutionStore;
+import com.cameleer3.server.core.storage.ExecutionStore.ProcessorRecord;
import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.tags.Tag;
@@ -12,14 +13,16 @@ import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
+import java.util.LinkedHashMap;
+import java.util.List;
import java.util.Map;
/**
* Endpoints for retrieving execution details and processor snapshots.
*
* The detail endpoint returns a nested processor tree reconstructed from
- * flat parallel arrays stored in ClickHouse. The snapshot endpoint returns
- * per-processor exchange data (bodies and headers).
+ * individual processor records stored in PostgreSQL. The snapshot endpoint
+ * returns per-processor exchange data (bodies and headers).
*/
@RestController
@RequestMapping("/api/v1/executions")
@@ -27,12 +30,12 @@ import java.util.Map;
public class DetailController {
private final DetailService detailService;
- private final ClickHouseExecutionRepository executionRepository;
+ private final ExecutionStore executionStore;
public DetailController(DetailService detailService,
- ClickHouseExecutionRepository executionRepository) {
+ ExecutionStore executionStore) {
this.detailService = detailService;
- this.executionRepository = executionRepository;
+ this.executionStore = executionStore;
}
@GetMapping("/{executionId}")
@@ -52,8 +55,18 @@ public class DetailController {
public ResponseEntity> getProcessorSnapshot(
@PathVariable String executionId,
@PathVariable int index) {
- return executionRepository.findProcessorSnapshot(executionId, index)
- .map(ResponseEntity::ok)
- .orElse(ResponseEntity.notFound().build());
+ List processors = executionStore.findProcessors(executionId);
+ if (index < 0 || index >= processors.size()) {
+ return ResponseEntity.notFound().build();
+ }
+
+ ProcessorRecord p = processors.get(index);
+ Map snapshot = new LinkedHashMap<>();
+ if (p.inputBody() != null) snapshot.put("inputBody", p.inputBody());
+ if (p.outputBody() != null) snapshot.put("outputBody", p.outputBody());
+ if (p.inputHeaders() != null) snapshot.put("inputHeaders", p.inputHeaders());
+ if (p.outputHeaders() != null) snapshot.put("outputHeaders", p.outputHeaders());
+
+ return ResponseEntity.ok(snapshot);
}
}
diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/DiagramController.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/DiagramController.java
index d4359968..5cdaf176 100644
--- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/DiagramController.java
+++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/DiagramController.java
@@ -11,7 +11,6 @@ import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.tags.Tag;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.security.core.Authentication;
import org.springframework.security.core.context.SecurityContextHolder;
@@ -25,8 +24,8 @@ import java.util.List;
/**
* Ingestion endpoint for route diagrams.
*
- * Accepts both single {@link RouteGraph} and arrays. Data is buffered
- * and flushed to ClickHouse by the flush scheduler.
+ * Accepts both single {@link RouteGraph} and arrays. Data is written
+ * synchronously to PostgreSQL via {@link IngestionService}.
*/
@RestController
@RequestMapping("/api/v1/data")
@@ -47,26 +46,12 @@ public class DiagramController {
@Operation(summary = "Ingest route diagram data",
description = "Accepts a single RouteGraph or an array of RouteGraphs")
@ApiResponse(responseCode = "202", description = "Data accepted for processing")
- @ApiResponse(responseCode = "503", description = "Buffer full, retry later")
public ResponseEntity ingestDiagrams(@RequestBody String body) throws JsonProcessingException {
String agentId = extractAgentId();
List graphs = parsePayload(body);
- List tagged = graphs.stream()
- .map(graph -> new TaggedDiagram(agentId, graph))
- .toList();
- boolean accepted;
- if (tagged.size() == 1) {
- accepted = ingestionService.acceptDiagram(tagged.get(0));
- } else {
- accepted = ingestionService.acceptDiagrams(tagged);
- }
-
- if (!accepted) {
- log.warn("Diagram buffer full, returning 503");
- return ResponseEntity.status(HttpStatus.SERVICE_UNAVAILABLE)
- .header("Retry-After", "5")
- .build();
+ for (RouteGraph graph : graphs) {
+ ingestionService.ingestDiagram(new TaggedDiagram(agentId, graph));
}
return ResponseEntity.accepted().build();
diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/DiagramRenderController.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/DiagramRenderController.java
index b1ca3775..d8f722e7 100644
--- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/DiagramRenderController.java
+++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/DiagramRenderController.java
@@ -5,7 +5,7 @@ import com.cameleer3.server.core.agent.AgentInfo;
import com.cameleer3.server.core.agent.AgentRegistryService;
import com.cameleer3.server.core.diagram.DiagramLayout;
import com.cameleer3.server.core.diagram.DiagramRenderer;
-import com.cameleer3.server.core.storage.DiagramRepository;
+import com.cameleer3.server.core.storage.DiagramStore;
import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.media.Content;
import io.swagger.v3.oas.annotations.media.Schema;
@@ -39,14 +39,14 @@ public class DiagramRenderController {
private static final MediaType SVG_MEDIA_TYPE = MediaType.valueOf("image/svg+xml");
- private final DiagramRepository diagramRepository;
+ private final DiagramStore diagramStore;
private final DiagramRenderer diagramRenderer;
private final AgentRegistryService registryService;
- public DiagramRenderController(DiagramRepository diagramRepository,
+ public DiagramRenderController(DiagramStore diagramStore,
DiagramRenderer diagramRenderer,
AgentRegistryService registryService) {
- this.diagramRepository = diagramRepository;
+ this.diagramStore = diagramStore;
this.diagramRenderer = diagramRenderer;
this.registryService = registryService;
}
@@ -64,7 +64,7 @@ public class DiagramRenderController {
@PathVariable String contentHash,
HttpServletRequest request) {
- Optional graphOpt = diagramRepository.findByContentHash(contentHash);
+ Optional graphOpt = diagramStore.findByContentHash(contentHash);
if (graphOpt.isEmpty()) {
return ResponseEntity.notFound().build();
}
@@ -105,12 +105,12 @@ public class DiagramRenderController {
return ResponseEntity.notFound().build();
}
- Optional contentHash = diagramRepository.findContentHashForRouteByAgents(routeId, agentIds);
+ Optional contentHash = diagramStore.findContentHashForRouteByAgents(routeId, agentIds);
if (contentHash.isEmpty()) {
return ResponseEntity.notFound().build();
}
- Optional graphOpt = diagramRepository.findByContentHash(contentHash.get());
+ Optional graphOpt = diagramStore.findByContentHash(contentHash.get());
if (graphOpt.isEmpty()) {
return ResponseEntity.notFound().build();
}
diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/ExecutionController.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/ExecutionController.java
index e44f2645..bea76037 100644
--- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/ExecutionController.java
+++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/ExecutionController.java
@@ -1,8 +1,9 @@
package com.cameleer3.server.app.controller;
import com.cameleer3.common.model.RouteExecution;
+import com.cameleer3.server.core.agent.AgentInfo;
+import com.cameleer3.server.core.agent.AgentRegistryService;
import com.cameleer3.server.core.ingestion.IngestionService;
-import com.cameleer3.server.core.ingestion.TaggedExecution;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
@@ -11,7 +12,6 @@ import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.tags.Tag;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.security.core.Authentication;
import org.springframework.security.core.context.SecurityContextHolder;
@@ -25,9 +25,8 @@ import java.util.List;
/**
* Ingestion endpoint for route execution data.
*
- * Accepts both single {@link RouteExecution} and arrays. Data is buffered
- * in a {@link com.cameleer3.server.core.ingestion.WriteBuffer} and flushed
- * to ClickHouse by the flush scheduler.
+ * Accepts both single {@link RouteExecution} and arrays. Data is written
+ * synchronously to PostgreSQL via {@link IngestionService}.
*/
@RestController
@RequestMapping("/api/v1/data")
@@ -37,10 +36,14 @@ public class ExecutionController {
private static final Logger log = LoggerFactory.getLogger(ExecutionController.class);
private final IngestionService ingestionService;
+ private final AgentRegistryService registryService;
private final ObjectMapper objectMapper;
- public ExecutionController(IngestionService ingestionService, ObjectMapper objectMapper) {
+ public ExecutionController(IngestionService ingestionService,
+ AgentRegistryService registryService,
+ ObjectMapper objectMapper) {
this.ingestionService = ingestionService;
+ this.registryService = registryService;
this.objectMapper = objectMapper;
}
@@ -48,26 +51,13 @@ public class ExecutionController {
@Operation(summary = "Ingest route execution data",
description = "Accepts a single RouteExecution or an array of RouteExecutions")
@ApiResponse(responseCode = "202", description = "Data accepted for processing")
- @ApiResponse(responseCode = "503", description = "Buffer full, retry later")
public ResponseEntity ingestExecutions(@RequestBody String body) throws JsonProcessingException {
String agentId = extractAgentId();
+ String groupName = resolveGroupName(agentId);
List executions = parsePayload(body);
- List tagged = executions.stream()
- .map(exec -> new TaggedExecution(agentId, exec))
- .toList();
- boolean accepted;
- if (tagged.size() == 1) {
- accepted = ingestionService.acceptExecution(tagged.get(0));
- } else {
- accepted = ingestionService.acceptExecutions(tagged);
- }
-
- if (!accepted) {
- log.warn("Execution buffer full, returning 503");
- return ResponseEntity.status(HttpStatus.SERVICE_UNAVAILABLE)
- .header("Retry-After", "5")
- .build();
+ for (RouteExecution execution : executions) {
+ ingestionService.ingestExecution(agentId, groupName, execution);
}
return ResponseEntity.accepted().build();
@@ -78,6 +68,11 @@ public class ExecutionController {
return auth != null ? auth.getName() : "";
}
+ private String resolveGroupName(String agentId) {
+ AgentInfo agent = registryService.findById(agentId);
+ return agent != null ? agent.group() : "";
+ }
+
private List parsePayload(String body) throws JsonProcessingException {
String trimmed = body.strip();
if (trimmed.startsWith("[")) {
diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/MetricsController.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/MetricsController.java
index e947942d..a7ee03d2 100644
--- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/MetricsController.java
+++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/controller/MetricsController.java
@@ -23,7 +23,7 @@ import java.util.List;
* Ingestion endpoint for agent metrics.
*
* Accepts an array of {@link MetricsSnapshot}. Data is buffered
- * and flushed to ClickHouse by the flush scheduler.
+ * and flushed to PostgreSQL by the flush scheduler.
*/
@RestController
@RequestMapping("/api/v1/data")
diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/ingestion/ClickHouseFlushScheduler.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/ingestion/ClickHouseFlushScheduler.java
deleted file mode 100644
index e48a2a92..00000000
--- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/ingestion/ClickHouseFlushScheduler.java
+++ /dev/null
@@ -1,159 +0,0 @@
-package com.cameleer3.server.app.ingestion;
-
-import com.cameleer3.server.app.config.IngestionConfig;
-import com.cameleer3.server.core.ingestion.TaggedDiagram;
-import com.cameleer3.server.core.ingestion.TaggedExecution;
-import com.cameleer3.server.core.ingestion.WriteBuffer;
-import com.cameleer3.server.core.storage.DiagramRepository;
-import com.cameleer3.server.core.storage.ExecutionRepository;
-import com.cameleer3.server.core.storage.MetricsRepository;
-import com.cameleer3.server.core.storage.model.MetricsSnapshot;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.context.SmartLifecycle;
-import org.springframework.scheduling.annotation.Scheduled;
-import org.springframework.stereotype.Component;
-
-import java.util.List;
-
-/**
- * Scheduled task that drains the write buffers and batch-inserts into ClickHouse.
- *
- * Implements {@link SmartLifecycle} to ensure all remaining buffered data is
- * flushed on application shutdown.
- */
-@Component
-public class ClickHouseFlushScheduler implements SmartLifecycle {
-
- private static final Logger log = LoggerFactory.getLogger(ClickHouseFlushScheduler.class);
-
- private final WriteBuffer executionBuffer;
- private final WriteBuffer diagramBuffer;
- private final WriteBuffer metricsBuffer;
- private final ExecutionRepository executionRepository;
- private final DiagramRepository diagramRepository;
- private final MetricsRepository metricsRepository;
- private final int batchSize;
-
- private volatile boolean running = false;
-
- public ClickHouseFlushScheduler(WriteBuffer executionBuffer,
- WriteBuffer diagramBuffer,
- WriteBuffer metricsBuffer,
- ExecutionRepository executionRepository,
- DiagramRepository diagramRepository,
- MetricsRepository metricsRepository,
- IngestionConfig config) {
- this.executionBuffer = executionBuffer;
- this.diagramBuffer = diagramBuffer;
- this.metricsBuffer = metricsBuffer;
- this.executionRepository = executionRepository;
- this.diagramRepository = diagramRepository;
- this.metricsRepository = metricsRepository;
- this.batchSize = config.getBatchSize();
- }
-
- @Scheduled(fixedDelayString = "${ingestion.flush-interval-ms:1000}")
- public void flushAll() {
- flushExecutions();
- flushDiagrams();
- flushMetrics();
- }
-
- private void flushExecutions() {
- try {
- List batch = executionBuffer.drain(batchSize);
- if (!batch.isEmpty()) {
- executionRepository.insertBatch(batch);
- log.debug("Flushed {} executions to ClickHouse", batch.size());
- }
- } catch (Exception e) {
- log.error("Failed to flush executions to ClickHouse", e);
- }
- }
-
- private void flushDiagrams() {
- try {
- List batch = diagramBuffer.drain(batchSize);
- for (TaggedDiagram diagram : batch) {
- diagramRepository.store(diagram);
- }
- if (!batch.isEmpty()) {
- log.debug("Flushed {} diagrams to ClickHouse", batch.size());
- }
- } catch (Exception e) {
- log.error("Failed to flush diagrams to ClickHouse", e);
- }
- }
-
- private void flushMetrics() {
- try {
- List batch = metricsBuffer.drain(batchSize);
- if (!batch.isEmpty()) {
- metricsRepository.insertBatch(batch);
- log.debug("Flushed {} metrics to ClickHouse", batch.size());
- }
- } catch (Exception e) {
- log.error("Failed to flush metrics to ClickHouse", e);
- }
- }
-
- // SmartLifecycle -- flush remaining data on shutdown
-
- @Override
- public void start() {
- running = true;
- log.info("ClickHouseFlushScheduler started");
- }
-
- @Override
- public void stop() {
- log.info("ClickHouseFlushScheduler stopping -- flushing remaining data");
- drainAll();
- running = false;
- }
-
- @Override
- public boolean isRunning() {
- return running;
- }
-
- @Override
- public int getPhase() {
- // Run after most beans but before DataSource shutdown
- return Integer.MAX_VALUE - 1;
- }
-
- /**
- * Drain all buffers completely (loop until empty).
- */
- private void drainAll() {
- drainBufferCompletely("executions", executionBuffer, batch -> executionRepository.insertBatch(batch));
- drainBufferCompletely("diagrams", diagramBuffer, batch -> {
- for (TaggedDiagram d : batch) {
- diagramRepository.store(d);
- }
- });
- drainBufferCompletely("metrics", metricsBuffer, batch -> metricsRepository.insertBatch(batch));
- }
-
- private void drainBufferCompletely(String name, WriteBuffer buffer, java.util.function.Consumer> inserter) {
- int total = 0;
- while (buffer.size() > 0) {
- List batch = buffer.drain(batchSize);
- if (batch.isEmpty()) {
- break;
- }
- try {
- inserter.accept(batch);
- total += batch.size();
- } catch (Exception e) {
- log.error("Failed to flush remaining {} during shutdown", name, e);
- break;
- }
- }
- if (total > 0) {
- log.info("Flushed {} remaining {} during shutdown", total, name);
- }
- }
-}
diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/ingestion/MetricsFlushScheduler.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/ingestion/MetricsFlushScheduler.java
new file mode 100644
index 00000000..1479c762
--- /dev/null
+++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/ingestion/MetricsFlushScheduler.java
@@ -0,0 +1,59 @@
+package com.cameleer3.server.app.ingestion;
+
+import com.cameleer3.server.app.config.IngestionConfig;
+import com.cameleer3.server.core.ingestion.WriteBuffer;
+import com.cameleer3.server.core.storage.MetricsStore;
+import com.cameleer3.server.core.storage.model.MetricsSnapshot;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.context.SmartLifecycle;
+import org.springframework.scheduling.annotation.Scheduled;
+import org.springframework.stereotype.Component;
+
+import java.util.List;
+
+@Component
+public class MetricsFlushScheduler implements SmartLifecycle {
+
+ private static final Logger log = LoggerFactory.getLogger(MetricsFlushScheduler.class);
+
+ private final WriteBuffer metricsBuffer;
+ private final MetricsStore metricsStore;
+ private final int batchSize;
+ private volatile boolean running = false;
+
+ public MetricsFlushScheduler(WriteBuffer metricsBuffer,
+ MetricsStore metricsStore,
+ IngestionConfig config) {
+ this.metricsBuffer = metricsBuffer;
+ this.metricsStore = metricsStore;
+ this.batchSize = config.getBatchSize();
+ }
+
+ @Scheduled(fixedDelayString = "${ingestion.flush-interval-ms:1000}")
+ public void flush() {
+ try {
+ List batch = metricsBuffer.drain(batchSize);
+ if (!batch.isEmpty()) {
+ metricsStore.insertBatch(batch);
+ log.debug("Flushed {} metrics to PostgreSQL", batch.size());
+ }
+ } catch (Exception e) {
+ log.error("Failed to flush metrics", e);
+ }
+ }
+
+ @Override public void start() { running = true; }
+ @Override public void stop() {
+ // Drain remaining on shutdown
+ while (metricsBuffer.size() > 0) {
+ List batch = metricsBuffer.drain(batchSize);
+ if (batch.isEmpty()) break;
+ try { metricsStore.insertBatch(batch); }
+ catch (Exception e) { log.error("Failed to flush metrics during shutdown", e); break; }
+ }
+ running = false;
+ }
+ @Override public boolean isRunning() { return running; }
+ @Override public int getPhase() { return Integer.MAX_VALUE - 1; }
+}
diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/retention/RetentionScheduler.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/retention/RetentionScheduler.java
new file mode 100644
index 00000000..152bb1c9
--- /dev/null
+++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/retention/RetentionScheduler.java
@@ -0,0 +1,48 @@
+package com.cameleer3.server.app.retention;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.jdbc.core.JdbcTemplate;
+import org.springframework.scheduling.annotation.Scheduled;
+import org.springframework.stereotype.Component;
+
+@Component
+public class RetentionScheduler {
+
+ private static final Logger log = LoggerFactory.getLogger(RetentionScheduler.class);
+
+ private final JdbcTemplate jdbc;
+ private final int retentionDays;
+
+ public RetentionScheduler(JdbcTemplate jdbc,
+ @Value("${cameleer.retention-days:30}") int retentionDays) {
+ this.jdbc = jdbc;
+ this.retentionDays = retentionDays;
+ }
+
+ @Scheduled(cron = "0 0 2 * * *") // Daily at 2 AM UTC
+ public void dropExpiredChunks() {
+ String interval = retentionDays + " days";
+ try {
+ // Raw data
+ jdbc.execute("SELECT drop_chunks('executions', INTERVAL '" + interval + "')");
+ jdbc.execute("SELECT drop_chunks('processor_executions', INTERVAL '" + interval + "')");
+ jdbc.execute("SELECT drop_chunks('agent_metrics', INTERVAL '" + interval + "')");
+
+ // Continuous aggregates (keep 3x longer)
+ String caggInterval = (retentionDays * 3) + " days";
+ jdbc.execute("SELECT drop_chunks('stats_1m_all', INTERVAL '" + caggInterval + "')");
+ jdbc.execute("SELECT drop_chunks('stats_1m_app', INTERVAL '" + caggInterval + "')");
+ jdbc.execute("SELECT drop_chunks('stats_1m_route', INTERVAL '" + caggInterval + "')");
+ jdbc.execute("SELECT drop_chunks('stats_1m_processor', INTERVAL '" + caggInterval + "')");
+
+ log.info("Retention: dropped chunks older than {} days (aggregates: {} days)",
+ retentionDays, retentionDays * 3);
+ } catch (Exception e) {
+ log.error("Retention job failed", e);
+ }
+ }
+ // Note: OpenSearch daily index deletion should be handled via ILM policy
+ // configured at deployment time, not in application code.
+}
diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/search/ClickHouseSearchEngine.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/search/ClickHouseSearchEngine.java
deleted file mode 100644
index ed6a0b13..00000000
--- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/search/ClickHouseSearchEngine.java
+++ /dev/null
@@ -1,357 +0,0 @@
-package com.cameleer3.server.app.search;
-
-import com.cameleer3.server.core.search.ExecutionStats;
-import com.cameleer3.server.core.search.ExecutionSummary;
-import com.cameleer3.server.core.search.SearchEngine;
-import com.cameleer3.server.core.search.SearchRequest;
-import com.cameleer3.server.core.search.SearchResult;
-import com.cameleer3.server.core.search.StatsTimeseries;
-import org.springframework.jdbc.core.JdbcTemplate;
-
-import java.sql.Timestamp;
-import java.time.Duration;
-import java.time.Instant;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-
-/**
- * ClickHouse implementation of {@link SearchEngine}.
- *
- * Builds dynamic WHERE clauses from non-null {@link SearchRequest} fields
- * and queries the {@code route_executions} table. LIKE patterns are properly
- * escaped to prevent injection.
- */
-public class ClickHouseSearchEngine implements SearchEngine {
-
- /** Per-query memory cap (1 GiB) — prevents a single query from OOMing ClickHouse. */
- private static final String SETTINGS = " SETTINGS max_memory_usage = 1000000000";
-
- private final JdbcTemplate jdbcTemplate;
-
- public ClickHouseSearchEngine(JdbcTemplate jdbcTemplate) {
- this.jdbcTemplate = jdbcTemplate;
- }
-
- @Override
- public SearchResult search(SearchRequest request) {
- var conditions = new ArrayList();
- var params = new ArrayList();
-
- buildWhereClause(request, conditions, params);
-
- String where = conditions.isEmpty() ? "" : " WHERE " + String.join(" AND ", conditions);
-
- // Count query
- var countParams = params.toArray();
- Long total = jdbcTemplate.queryForObject(
- "SELECT count() FROM route_executions" + where + SETTINGS, Long.class, countParams);
- if (total == null) total = 0L;
-
- if (total == 0) {
- return SearchResult.empty(request.offset(), request.limit());
- }
-
- // Data query
- params.add(request.limit());
- params.add(request.offset());
- String orderDir = "asc".equalsIgnoreCase(request.sortDir()) ? "ASC" : "DESC";
- String dataSql = "SELECT execution_id, route_id, agent_id, status, start_time, end_time, " +
- "duration_ms, correlation_id, error_message, diagram_content_hash " +
- "FROM route_executions" + where +
- " ORDER BY " + request.sortColumn() + " " + orderDir + " LIMIT ? OFFSET ?" + SETTINGS;
-
- List data = jdbcTemplate.query(dataSql, (rs, rowNum) -> {
- Timestamp endTs = rs.getTimestamp("end_time");
- return new ExecutionSummary(
- rs.getString("execution_id"),
- rs.getString("route_id"),
- rs.getString("agent_id"),
- rs.getString("status"),
- rs.getTimestamp("start_time").toInstant(),
- endTs != null ? endTs.toInstant() : null,
- rs.getLong("duration_ms"),
- rs.getString("correlation_id"),
- rs.getString("error_message"),
- rs.getString("diagram_content_hash")
- );
- }, params.toArray());
-
- return new SearchResult<>(data, total, request.offset(), request.limit());
- }
-
- @Override
- public long count(SearchRequest request) {
- var conditions = new ArrayList();
- var params = new ArrayList();
- buildWhereClause(request, conditions, params);
-
- String where = conditions.isEmpty() ? "" : " WHERE " + String.join(" AND ", conditions);
- Long result = jdbcTemplate.queryForObject(
- "SELECT count() FROM route_executions" + where + SETTINGS, Long.class, params.toArray());
- return result != null ? result : 0L;
- }
-
- @Override
- public ExecutionStats stats(Instant from, Instant to) {
- return stats(from, to, null, null);
- }
-
- @Override
- public ExecutionStats stats(Instant from, Instant to, String routeId, List agentIds) {
- // Current period — read from rollup
- var conditions = new ArrayList();
- var params = new ArrayList();
- conditions.add("bucket >= ?");
- params.add(bucketTimestamp(floorToFiveMinutes(from)));
- conditions.add("bucket <= ?");
- params.add(bucketTimestamp(to));
- addScopeFilters(routeId, agentIds, conditions, params);
-
- String where = " WHERE " + String.join(" AND ", conditions);
-
- String rollupSql = "SELECT " +
- "countMerge(total_count) AS cnt, " +
- "countIfMerge(failed_count) AS failed, " +
- "toInt64(ifNotFinite(sumMerge(duration_sum) / countMerge(total_count), 0)) AS avg_ms, " +
- "toInt64(ifNotFinite(quantileTDigestMerge(0.99)(p99_duration), 0)) AS p99_ms " +
- "FROM route_execution_stats_5m" + where + SETTINGS;
-
- record PeriodStats(long totalCount, long failedCount, long avgDurationMs, long p99LatencyMs) {}
- PeriodStats current = jdbcTemplate.queryForObject(rollupSql,
- (rs, rowNum) -> new PeriodStats(
- rs.getLong("cnt"),
- rs.getLong("failed"),
- rs.getLong("avg_ms"),
- rs.getLong("p99_ms")),
- params.toArray());
-
- // Active count — PREWHERE reads only the status column before touching wide rows
- var scopeConditions = new ArrayList();
- var activeParams = new ArrayList();
- addScopeFilters(routeId, agentIds, scopeConditions, activeParams);
- String scopeWhere = scopeConditions.isEmpty() ? "" : " WHERE " + String.join(" AND ", scopeConditions);
- Long activeCount = jdbcTemplate.queryForObject(
- "SELECT count() FROM route_executions PREWHERE status = 'RUNNING'" + scopeWhere + SETTINGS,
- Long.class, activeParams.toArray());
-
- // Previous period (same window shifted back 24h) — read from rollup
- Duration window = Duration.between(from, to);
- Instant prevFrom = from.minus(Duration.ofHours(24));
- Instant prevTo = prevFrom.plus(window);
- var prevConditions = new ArrayList();
- var prevParams = new ArrayList();
- prevConditions.add("bucket >= ?");
- prevParams.add(bucketTimestamp(floorToFiveMinutes(prevFrom)));
- prevConditions.add("bucket <= ?");
- prevParams.add(bucketTimestamp(prevTo));
- addScopeFilters(routeId, agentIds, prevConditions, prevParams);
- String prevWhere = " WHERE " + String.join(" AND ", prevConditions);
-
- String prevRollupSql = "SELECT " +
- "countMerge(total_count) AS cnt, " +
- "countIfMerge(failed_count) AS failed, " +
- "toInt64(ifNotFinite(sumMerge(duration_sum) / countMerge(total_count), 0)) AS avg_ms, " +
- "toInt64(ifNotFinite(quantileTDigestMerge(0.99)(p99_duration), 0)) AS p99_ms " +
- "FROM route_execution_stats_5m" + prevWhere + SETTINGS;
-
- PeriodStats prev = jdbcTemplate.queryForObject(prevRollupSql,
- (rs, rowNum) -> new PeriodStats(
- rs.getLong("cnt"),
- rs.getLong("failed"),
- rs.getLong("avg_ms"),
- rs.getLong("p99_ms")),
- prevParams.toArray());
-
- // Today total (midnight UTC to now) — read from rollup with same scope
- Instant todayStart = Instant.now().truncatedTo(java.time.temporal.ChronoUnit.DAYS);
- var todayConditions = new ArrayList();
- var todayParams = new ArrayList();
- todayConditions.add("bucket >= ?");
- todayParams.add(bucketTimestamp(floorToFiveMinutes(todayStart)));
- addScopeFilters(routeId, agentIds, todayConditions, todayParams);
- String todayWhere = " WHERE " + String.join(" AND ", todayConditions);
-
- Long totalToday = jdbcTemplate.queryForObject(
- "SELECT countMerge(total_count) FROM route_execution_stats_5m" + todayWhere + SETTINGS,
- Long.class, todayParams.toArray());
-
- return new ExecutionStats(
- current.totalCount, current.failedCount, current.avgDurationMs,
- current.p99LatencyMs, activeCount != null ? activeCount : 0L,
- totalToday != null ? totalToday : 0L,
- prev.totalCount, prev.failedCount, prev.avgDurationMs, prev.p99LatencyMs);
- }
-
- @Override
- public StatsTimeseries timeseries(Instant from, Instant to, int bucketCount) {
- return timeseries(from, to, bucketCount, null, null);
- }
-
- @Override
- public StatsTimeseries timeseries(Instant from, Instant to, int bucketCount,
- String routeId, List agentIds) {
- long intervalSeconds = Duration.between(from, to).getSeconds() / bucketCount;
- if (intervalSeconds < 1) intervalSeconds = 1;
-
- var conditions = new ArrayList();
- var params = new ArrayList();
- conditions.add("bucket >= ?");
- params.add(bucketTimestamp(floorToFiveMinutes(from)));
- conditions.add("bucket <= ?");
- params.add(bucketTimestamp(to));
- addScopeFilters(routeId, agentIds, conditions, params);
-
- String where = " WHERE " + String.join(" AND ", conditions);
-
- // Re-aggregate 5-minute rollup buckets into the requested interval
- String sql = "SELECT " +
- "toDateTime(intDiv(toUInt32(bucket), " + intervalSeconds + ") * " + intervalSeconds + ") AS ts_bucket, " +
- "countMerge(total_count) AS cnt, " +
- "countIfMerge(failed_count) AS failed, " +
- "toInt64(ifNotFinite(sumMerge(duration_sum) / countMerge(total_count), 0)) AS avg_ms, " +
- "toInt64(ifNotFinite(quantileTDigestMerge(0.99)(p99_duration), 0)) AS p99_ms " +
- "FROM route_execution_stats_5m" + where +
- " GROUP BY ts_bucket ORDER BY ts_bucket" + SETTINGS;
-
- List buckets = jdbcTemplate.query(sql, (rs, rowNum) ->
- new StatsTimeseries.TimeseriesBucket(
- rs.getTimestamp("ts_bucket").toInstant(),
- rs.getLong("cnt"),
- rs.getLong("failed"),
- rs.getLong("avg_ms"),
- rs.getLong("p99_ms"),
- 0L
- ),
- params.toArray());
-
- return new StatsTimeseries(buckets);
- }
-
- private void buildWhereClause(SearchRequest req, List conditions, List params) {
- if (req.status() != null && !req.status().isBlank()) {
- String[] statuses = req.status().split(",");
- if (statuses.length == 1) {
- conditions.add("status = ?");
- params.add(statuses[0].trim());
- } else {
- String placeholders = String.join(", ", Collections.nCopies(statuses.length, "?"));
- conditions.add("status IN (" + placeholders + ")");
- for (String s : statuses) {
- params.add(s.trim());
- }
- }
- }
- if (req.timeFrom() != null) {
- conditions.add("start_time >= ?");
- params.add(Timestamp.from(req.timeFrom()));
- }
- if (req.timeTo() != null) {
- conditions.add("start_time <= ?");
- params.add(Timestamp.from(req.timeTo()));
- }
- if (req.durationMin() != null) {
- conditions.add("duration_ms >= ?");
- params.add(req.durationMin());
- }
- if (req.durationMax() != null) {
- conditions.add("duration_ms <= ?");
- params.add(req.durationMax());
- }
- if (req.correlationId() != null && !req.correlationId().isBlank()) {
- conditions.add("correlation_id = ?");
- params.add(req.correlationId());
- }
- if (req.routeId() != null && !req.routeId().isBlank()) {
- conditions.add("route_id = ?");
- params.add(req.routeId());
- }
- if (req.agentId() != null && !req.agentId().isBlank()) {
- conditions.add("agent_id = ?");
- params.add(req.agentId());
- }
- // agentIds from group resolution (takes precedence when agentId is not set)
- if ((req.agentId() == null || req.agentId().isBlank())
- && req.agentIds() != null && !req.agentIds().isEmpty()) {
- String placeholders = String.join(", ", Collections.nCopies(req.agentIds().size(), "?"));
- conditions.add("agent_id IN (" + placeholders + ")");
- params.addAll(req.agentIds());
- }
- if (req.processorType() != null && !req.processorType().isBlank()) {
- conditions.add("has(processor_types, ?)");
- params.add(req.processorType());
- }
- if (req.text() != null && !req.text().isBlank()) {
- String pattern = "%" + escapeLike(req.text()) + "%";
- String[] textColumns = {
- "execution_id", "route_id", "agent_id",
- "error_message", "error_stacktrace",
- "exchange_bodies", "exchange_headers"
- };
- var likeClauses = java.util.Arrays.stream(textColumns)
- .map(col -> col + " LIKE ?")
- .toList();
- conditions.add("(" + String.join(" OR ", likeClauses) + ")");
- for (int i = 0; i < textColumns.length; i++) {
- params.add(pattern);
- }
- }
- if (req.textInBody() != null && !req.textInBody().isBlank()) {
- conditions.add("exchange_bodies LIKE ?");
- params.add("%" + escapeLike(req.textInBody()) + "%");
- }
- if (req.textInHeaders() != null && !req.textInHeaders().isBlank()) {
- conditions.add("exchange_headers LIKE ?");
- params.add("%" + escapeLike(req.textInHeaders()) + "%");
- }
- if (req.textInErrors() != null && !req.textInErrors().isBlank()) {
- String pattern = "%" + escapeLike(req.textInErrors()) + "%";
- conditions.add("(error_message LIKE ? OR error_stacktrace LIKE ?)");
- params.add(pattern);
- params.add(pattern);
- }
- }
-
- /**
- * Add route ID and agent IDs scope filters to conditions/params.
- */
- private void addScopeFilters(String routeId, List agentIds,
- List conditions, List params) {
- if (routeId != null && !routeId.isBlank()) {
- conditions.add("route_id = ?");
- params.add(routeId);
- }
- if (agentIds != null && !agentIds.isEmpty()) {
- String placeholders = String.join(", ", Collections.nCopies(agentIds.size(), "?"));
- conditions.add("agent_id IN (" + placeholders + ")");
- params.addAll(agentIds);
- }
- }
-
- /**
- * Floor an Instant to the start of its 5-minute bucket.
- */
- private static Instant floorToFiveMinutes(Instant instant) {
- long epochSecond = instant.getEpochSecond();
- return Instant.ofEpochSecond(epochSecond - (epochSecond % 300));
- }
-
- /**
- * Create a second-precision Timestamp for rollup bucket comparisons.
- * The bucket column is DateTime('UTC') (second precision); the JDBC driver
- * sends java.sql.Timestamp with nanoseconds which ClickHouse rejects.
- */
- private static Timestamp bucketTimestamp(Instant instant) {
- return Timestamp.from(instant.truncatedTo(java.time.temporal.ChronoUnit.SECONDS));
- }
-
- /**
- * Escape special LIKE characters to prevent LIKE injection.
- */
- static String escapeLike(String input) {
- return input
- .replace("\\", "\\\\")
- .replace("%", "\\%")
- .replace("_", "\\_");
- }
-}
diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/search/OpenSearchIndex.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/search/OpenSearchIndex.java
new file mode 100644
index 00000000..062f12fb
--- /dev/null
+++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/search/OpenSearchIndex.java
@@ -0,0 +1,329 @@
+package com.cameleer3.server.app.search;
+
+import com.cameleer3.server.core.search.ExecutionSummary;
+import com.cameleer3.server.core.search.SearchRequest;
+import com.cameleer3.server.core.search.SearchResult;
+import com.cameleer3.server.core.storage.SearchIndex;
+import com.cameleer3.server.core.storage.model.ExecutionDocument;
+import com.cameleer3.server.core.storage.model.ExecutionDocument.ProcessorDoc;
+import jakarta.annotation.PostConstruct;
+import org.opensearch.client.json.JsonData;
+import org.opensearch.client.opensearch.OpenSearchClient;
+import org.opensearch.client.opensearch._types.FieldValue;
+import org.opensearch.client.opensearch._types.SortOrder;
+import org.opensearch.client.opensearch._types.query_dsl.*;
+import org.opensearch.client.opensearch.core.*;
+import org.opensearch.client.opensearch.core.search.Hit;
+import org.opensearch.client.opensearch.indices.*;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.stereotype.Repository;
+
+import java.io.IOException;
+import java.time.Instant;
+import java.time.ZoneOffset;
+import java.time.format.DateTimeFormatter;
+import java.util.*;
+import java.util.stream.Collectors;
+
+@Repository
+public class OpenSearchIndex implements SearchIndex {
+
+ private static final Logger log = LoggerFactory.getLogger(OpenSearchIndex.class);
+ private static final String INDEX_PREFIX = "executions-";
+ private static final DateTimeFormatter DAY_FMT = DateTimeFormatter.ofPattern("yyyy-MM-dd")
+ .withZone(ZoneOffset.UTC);
+
+ private final OpenSearchClient client;
+
+ public OpenSearchIndex(OpenSearchClient client) {
+ this.client = client;
+ }
+
+ @PostConstruct
+ void ensureIndexTemplate() {
+ try {
+ boolean exists = client.indices().existsIndexTemplate(
+ ExistsIndexTemplateRequest.of(b -> b.name("executions-template"))).value();
+ if (!exists) {
+ client.indices().putIndexTemplate(PutIndexTemplateRequest.of(b -> b
+ .name("executions-template")
+ .indexPatterns(List.of("executions-*"))
+ .template(t -> t
+ .settings(s -> s
+ .numberOfShards("3")
+ .numberOfReplicas("1"))
+ .mappings(m -> m
+ .properties("processors", p -> p
+ .nested(n -> n))))));
+ log.info("OpenSearch index template created");
+ }
+ } catch (IOException e) {
+ log.error("Failed to create index template", e);
+ }
+ }
+
+ @Override
+ public void index(ExecutionDocument doc) {
+ String indexName = INDEX_PREFIX + DAY_FMT.format(doc.startTime());
+ try {
+ client.index(IndexRequest.of(b -> b
+ .index(indexName)
+ .id(doc.executionId())
+ .document(toMap(doc))));
+ } catch (IOException e) {
+ log.error("Failed to index execution {}", doc.executionId(), e);
+ }
+ }
+
+ @Override
+ public SearchResult search(SearchRequest request) {
+ try {
+ var searchReq = buildSearchRequest(request, request.limit());
+ var response = client.search(searchReq, Map.class);
+
+ List items = response.hits().hits().stream()
+ .map(this::hitToSummary)
+ .collect(Collectors.toList());
+
+ long total = response.hits().total() != null ? response.hits().total().value() : 0;
+ return new SearchResult<>(items, total, request.offset(), request.limit());
+ } catch (IOException e) {
+ log.error("Search failed", e);
+ return SearchResult.empty(request.offset(), request.limit());
+ }
+ }
+
+ @Override
+ public long count(SearchRequest request) {
+ try {
+ var countReq = CountRequest.of(b -> b
+ .index(INDEX_PREFIX + "*")
+ .query(buildQuery(request)));
+ return client.count(countReq).count();
+ } catch (IOException e) {
+ log.error("Count failed", e);
+ return 0;
+ }
+ }
+
+ @Override
+ public void delete(String executionId) {
+ try {
+ client.deleteByQuery(DeleteByQueryRequest.of(b -> b
+ .index(List.of(INDEX_PREFIX + "*"))
+ .query(Query.of(q -> q.term(t -> t
+ .field("execution_id")
+ .value(FieldValue.of(executionId)))))));
+ } catch (IOException e) {
+ log.error("Failed to delete execution {}", executionId, e);
+ }
+ }
+
+ private org.opensearch.client.opensearch.core.SearchRequest buildSearchRequest(
+ SearchRequest request, int size) {
+ return org.opensearch.client.opensearch.core.SearchRequest.of(b -> {
+ b.index(INDEX_PREFIX + "*")
+ .query(buildQuery(request))
+ .size(size)
+ .from(request.offset())
+ .sort(s -> s.field(f -> f
+ .field(request.sortColumn())
+ .order("asc".equalsIgnoreCase(request.sortDir())
+ ? SortOrder.Asc : SortOrder.Desc)));
+ return b;
+ });
+ }
+
+ private Query buildQuery(SearchRequest request) {
+ List must = new ArrayList<>();
+ List filter = new ArrayList<>();
+
+ // Time range
+ if (request.timeFrom() != null || request.timeTo() != null) {
+ filter.add(Query.of(q -> q.range(r -> {
+ r.field("start_time");
+ if (request.timeFrom() != null)
+ r.gte(JsonData.of(request.timeFrom().toString()));
+ if (request.timeTo() != null)
+ r.lte(JsonData.of(request.timeTo().toString()));
+ return r;
+ })));
+ }
+
+ // Keyword filters (use .keyword sub-field for exact matching on dynamically mapped text fields)
+ if (request.status() != null)
+ filter.add(termQuery("status.keyword", request.status()));
+ if (request.routeId() != null)
+ filter.add(termQuery("route_id.keyword", request.routeId()));
+ if (request.agentId() != null)
+ filter.add(termQuery("agent_id.keyword", request.agentId()));
+ if (request.correlationId() != null)
+ filter.add(termQuery("correlation_id.keyword", request.correlationId()));
+
+ // Full-text search across all fields + nested processor fields
+ if (request.text() != null && !request.text().isBlank()) {
+ String text = request.text();
+ String wildcard = "*" + text.toLowerCase() + "*";
+ List textQueries = new ArrayList<>();
+
+ // Search top-level text fields (analyzed match + wildcard for substring)
+ textQueries.add(Query.of(q -> q.multiMatch(m -> m
+ .query(text)
+ .fields("error_message", "error_stacktrace"))));
+ textQueries.add(Query.of(q -> q.wildcard(w -> w
+ .field("error_message").value(wildcard).caseInsensitive(true))));
+ textQueries.add(Query.of(q -> q.wildcard(w -> w
+ .field("error_stacktrace").value(wildcard).caseInsensitive(true))));
+
+ // Search nested processor fields (analyzed match + wildcard)
+ textQueries.add(Query.of(q -> q.nested(n -> n
+ .path("processors")
+ .query(nq -> nq.multiMatch(m -> m
+ .query(text)
+ .fields("processors.input_body", "processors.output_body",
+ "processors.input_headers", "processors.output_headers",
+ "processors.error_message", "processors.error_stacktrace"))))));
+ textQueries.add(Query.of(q -> q.nested(n -> n
+ .path("processors")
+ .query(nq -> nq.bool(nb -> nb.should(
+ wildcardQuery("processors.input_body", wildcard),
+ wildcardQuery("processors.output_body", wildcard),
+ wildcardQuery("processors.input_headers", wildcard),
+ wildcardQuery("processors.output_headers", wildcard)
+ ).minimumShouldMatch("1"))))));
+
+ // Also try keyword fields for exact matches
+ textQueries.add(Query.of(q -> q.multiMatch(m -> m
+ .query(text)
+ .fields("execution_id", "route_id", "agent_id", "correlation_id", "exchange_id"))));
+
+ must.add(Query.of(q -> q.bool(b -> b.should(textQueries).minimumShouldMatch("1"))));
+ }
+
+ // Scoped text searches (multiMatch + wildcard fallback for substring matching)
+ if (request.textInBody() != null && !request.textInBody().isBlank()) {
+ String bodyText = request.textInBody();
+ String bodyWildcard = "*" + bodyText.toLowerCase() + "*";
+ must.add(Query.of(q -> q.nested(n -> n
+ .path("processors")
+ .query(nq -> nq.bool(nb -> nb.should(
+ Query.of(mq -> mq.multiMatch(m -> m
+ .query(bodyText)
+ .fields("processors.input_body", "processors.output_body"))),
+ wildcardQuery("processors.input_body", bodyWildcard),
+ wildcardQuery("processors.output_body", bodyWildcard)
+ ).minimumShouldMatch("1"))))));
+ }
+ if (request.textInHeaders() != null && !request.textInHeaders().isBlank()) {
+ String headerText = request.textInHeaders();
+ String headerWildcard = "*" + headerText.toLowerCase() + "*";
+ must.add(Query.of(q -> q.nested(n -> n
+ .path("processors")
+ .query(nq -> nq.bool(nb -> nb.should(
+ Query.of(mq -> mq.multiMatch(m -> m
+ .query(headerText)
+ .fields("processors.input_headers", "processors.output_headers"))),
+ wildcardQuery("processors.input_headers", headerWildcard),
+ wildcardQuery("processors.output_headers", headerWildcard)
+ ).minimumShouldMatch("1"))))));
+ }
+ if (request.textInErrors() != null && !request.textInErrors().isBlank()) {
+ String errText = request.textInErrors();
+ String errWildcard = "*" + errText.toLowerCase() + "*";
+ must.add(Query.of(q -> q.bool(b -> b.should(
+ Query.of(sq -> sq.multiMatch(m -> m
+ .query(errText)
+ .fields("error_message", "error_stacktrace"))),
+ wildcardQuery("error_message", errWildcard),
+ wildcardQuery("error_stacktrace", errWildcard),
+ Query.of(sq -> sq.nested(n -> n
+ .path("processors")
+ .query(nq -> nq.bool(nb -> nb.should(
+ Query.of(nmq -> nmq.multiMatch(m -> m
+ .query(errText)
+ .fields("processors.error_message", "processors.error_stacktrace"))),
+ wildcardQuery("processors.error_message", errWildcard),
+ wildcardQuery("processors.error_stacktrace", errWildcard)
+ ).minimumShouldMatch("1")))))
+ ).minimumShouldMatch("1"))));
+ }
+
+ // Duration range
+ if (request.durationMin() != null || request.durationMax() != null) {
+ filter.add(Query.of(q -> q.range(r -> {
+ r.field("duration_ms");
+ if (request.durationMin() != null)
+ r.gte(JsonData.of(request.durationMin()));
+ if (request.durationMax() != null)
+ r.lte(JsonData.of(request.durationMax()));
+ return r;
+ })));
+ }
+
+ return Query.of(q -> q.bool(b -> {
+ if (!must.isEmpty()) b.must(must);
+ if (!filter.isEmpty()) b.filter(filter);
+ if (must.isEmpty() && filter.isEmpty()) b.must(Query.of(mq -> mq.matchAll(m -> m)));
+ return b;
+ }));
+ }
+
+ private Query termQuery(String field, String value) {
+ return Query.of(q -> q.term(t -> t.field(field).value(FieldValue.of(value))));
+ }
+
+ private Query wildcardQuery(String field, String pattern) {
+ return Query.of(q -> q.wildcard(w -> w.field(field).value(pattern).caseInsensitive(true)));
+ }
+
+ private Map toMap(ExecutionDocument doc) {
+ Map map = new LinkedHashMap<>();
+ map.put("execution_id", doc.executionId());
+ map.put("route_id", doc.routeId());
+ map.put("agent_id", doc.agentId());
+ map.put("group_name", doc.groupName());
+ map.put("status", doc.status());
+ map.put("correlation_id", doc.correlationId());
+ map.put("exchange_id", doc.exchangeId());
+ map.put("start_time", doc.startTime() != null ? doc.startTime().toString() : null);
+ map.put("end_time", doc.endTime() != null ? doc.endTime().toString() : null);
+ map.put("duration_ms", doc.durationMs());
+ map.put("error_message", doc.errorMessage());
+ map.put("error_stacktrace", doc.errorStacktrace());
+ if (doc.processors() != null) {
+ map.put("processors", doc.processors().stream().map(p -> {
+ Map pm = new LinkedHashMap<>();
+ pm.put("processor_id", p.processorId());
+ pm.put("processor_type", p.processorType());
+ pm.put("status", p.status());
+ pm.put("error_message", p.errorMessage());
+ pm.put("error_stacktrace", p.errorStacktrace());
+ pm.put("input_body", p.inputBody());
+ pm.put("output_body", p.outputBody());
+ pm.put("input_headers", p.inputHeaders());
+ pm.put("output_headers", p.outputHeaders());
+ return pm;
+ }).toList());
+ }
+ return map;
+ }
+
+ @SuppressWarnings("unchecked")
+ private ExecutionSummary hitToSummary(Hit hit) {
+ Map src = hit.source();
+ if (src == null) return null;
+ return new ExecutionSummary(
+ (String) src.get("execution_id"),
+ (String) src.get("route_id"),
+ (String) src.get("agent_id"),
+ (String) src.get("status"),
+ src.get("start_time") != null ? Instant.parse((String) src.get("start_time")) : null,
+ src.get("end_time") != null ? Instant.parse((String) src.get("end_time")) : null,
+ src.get("duration_ms") != null ? ((Number) src.get("duration_ms")).longValue() : 0L,
+ (String) src.get("correlation_id"),
+ (String) src.get("error_message"),
+ null // diagramContentHash not stored in index
+ );
+ }
+}
diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/security/SecurityBeanConfig.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/security/SecurityBeanConfig.java
index ad48c345..5c0bdff5 100644
--- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/security/SecurityBeanConfig.java
+++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/security/SecurityBeanConfig.java
@@ -16,7 +16,7 @@ import java.util.List;
* that required security properties are set.
*
* Fails fast on startup if {@code CAMELEER_AUTH_TOKEN} is not set.
- * Seeds OIDC config from env vars into ClickHouse if DB is empty.
+ * Seeds OIDC config from env vars into the database if DB is empty.
*/
@Configuration
@EnableConfigurationProperties(SecurityProperties.class)
diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseExecutionRepository.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseExecutionRepository.java
deleted file mode 100644
index b119f7e7..00000000
--- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseExecutionRepository.java
+++ /dev/null
@@ -1,418 +0,0 @@
-package com.cameleer3.server.app.storage;
-
-import com.cameleer3.common.model.ExchangeSnapshot;
-import com.cameleer3.common.model.ProcessorExecution;
-import com.cameleer3.common.model.RouteExecution;
-import com.cameleer3.server.core.detail.RawExecutionRow;
-import com.cameleer3.server.core.ingestion.TaggedExecution;
-import com.cameleer3.server.core.storage.DiagramRepository;
-import com.cameleer3.server.core.storage.ExecutionRepository;
-import com.fasterxml.jackson.core.JsonProcessingException;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.jdbc.core.BatchPreparedStatementSetter;
-import org.springframework.jdbc.core.JdbcTemplate;
-import org.springframework.stereotype.Repository;
-
-import java.sql.PreparedStatement;
-import java.sql.SQLException;
-import java.sql.Timestamp;
-import java.time.Instant;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.UUID;
-
-/**
- * ClickHouse implementation of {@link ExecutionRepository}.
- *
- * Performs batch inserts into the {@code route_executions} table.
- * Processor executions are flattened into parallel arrays with tree metadata
- * (depth, parent index) for reconstruction.
- */
-@Repository
-public class ClickHouseExecutionRepository implements ExecutionRepository {
-
- private static final Logger log = LoggerFactory.getLogger(ClickHouseExecutionRepository.class);
-
- private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
-
- private static final String INSERT_SQL = """
- INSERT INTO route_executions (
- execution_id, route_id, agent_id, status, start_time, end_time,
- duration_ms, correlation_id, exchange_id, error_message, error_stacktrace,
- processor_ids, processor_types, processor_starts, processor_ends,
- processor_durations, processor_statuses,
- exchange_bodies, exchange_headers,
- processor_depths, processor_parent_indexes,
- processor_error_messages, processor_error_stacktraces,
- processor_input_bodies, processor_output_bodies,
- processor_input_headers, processor_output_headers,
- processor_diagram_node_ids, diagram_content_hash
- ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
- """;
-
- private final JdbcTemplate jdbcTemplate;
- private final DiagramRepository diagramRepository;
-
- public ClickHouseExecutionRepository(JdbcTemplate jdbcTemplate, DiagramRepository diagramRepository) {
- this.jdbcTemplate = jdbcTemplate;
- this.diagramRepository = diagramRepository;
- }
-
- @Override
- public void insertBatch(List executions) {
- if (executions.isEmpty()) {
- return;
- }
-
- jdbcTemplate.batchUpdate(INSERT_SQL, new BatchPreparedStatementSetter() {
- @Override
- public void setValues(PreparedStatement ps, int i) throws SQLException {
- TaggedExecution tagged = executions.get(i);
- RouteExecution exec = tagged.execution();
- String agentId = tagged.agentId() != null ? tagged.agentId() : "";
- List flatProcessors = flattenWithMetadata(exec.getProcessors());
-
- int col = 1;
- ps.setString(col++, UUID.randomUUID().toString());
- ps.setString(col++, nullSafe(exec.getRouteId()));
- ps.setString(col++, agentId);
- ps.setString(col++, exec.getStatus() != null ? exec.getStatus().name() : "RUNNING");
- ps.setObject(col++, toTimestamp(exec.getStartTime()));
- ps.setObject(col++, toTimestamp(exec.getEndTime()));
- ps.setLong(col++, exec.getDurationMs());
- ps.setString(col++, nullSafe(exec.getCorrelationId()));
- ps.setString(col++, nullSafe(exec.getExchangeId()));
- ps.setString(col++, nullSafe(exec.getErrorMessage()));
- ps.setString(col++, nullSafe(exec.getErrorStackTrace()));
-
- // Original parallel arrays
- ps.setObject(col++, flatProcessors.stream().map(fp -> nullSafe(fp.proc.getProcessorId())).toArray(String[]::new));
- ps.setObject(col++, flatProcessors.stream().map(fp -> nullSafe(fp.proc.getProcessorType())).toArray(String[]::new));
- ps.setObject(col++, flatProcessors.stream().map(fp -> toTimestamp(fp.proc.getStartTime())).toArray(Timestamp[]::new));
- ps.setObject(col++, flatProcessors.stream().map(fp -> toTimestamp(fp.proc.getEndTime())).toArray(Timestamp[]::new));
- ps.setObject(col++, flatProcessors.stream().mapToLong(fp -> fp.proc.getDurationMs()).boxed().toArray(Long[]::new));
- ps.setObject(col++, flatProcessors.stream().map(fp -> fp.proc.getStatus() != null ? fp.proc.getStatus().name() : "RUNNING").toArray(String[]::new));
-
- // Phase 2: exchange bodies and headers (concatenated for search)
- StringBuilder allBodies = new StringBuilder();
- StringBuilder allHeaders = new StringBuilder();
-
- String[] inputBodies = new String[flatProcessors.size()];
- String[] outputBodies = new String[flatProcessors.size()];
- String[] inputHeaders = new String[flatProcessors.size()];
- String[] outputHeaders = new String[flatProcessors.size()];
- String[] errorMessages = new String[flatProcessors.size()];
- String[] errorStacktraces = new String[flatProcessors.size()];
- String[] diagramNodeIds = new String[flatProcessors.size()];
- Short[] depths = new Short[flatProcessors.size()];
- Integer[] parentIndexes = new Integer[flatProcessors.size()];
-
- for (int j = 0; j < flatProcessors.size(); j++) {
- FlatProcessor fp = flatProcessors.get(j);
- ProcessorExecution p = fp.proc;
-
- inputBodies[j] = nullSafe(p.getInputBody());
- outputBodies[j] = nullSafe(p.getOutputBody());
- inputHeaders[j] = mapToJson(p.getInputHeaders());
- outputHeaders[j] = mapToJson(p.getOutputHeaders());
- errorMessages[j] = nullSafe(p.getErrorMessage());
- errorStacktraces[j] = nullSafe(p.getErrorStackTrace());
- diagramNodeIds[j] = nullSafe(p.getDiagramNodeId());
- depths[j] = (short) fp.depth;
- parentIndexes[j] = fp.parentIndex;
-
- allBodies.append(inputBodies[j]).append(' ').append(outputBodies[j]).append(' ');
- allHeaders.append(inputHeaders[j]).append(' ').append(outputHeaders[j]).append(' ');
- }
-
- // Include route-level input/output snapshot in searchable text
- appendSnapshotText(exec.getInputSnapshot(), allBodies, allHeaders);
- appendSnapshotText(exec.getOutputSnapshot(), allBodies, allHeaders);
-
- ps.setString(col++, allBodies.toString().trim()); // exchange_bodies
- ps.setString(col++, allHeaders.toString().trim()); // exchange_headers
- ps.setObject(col++, depths); // processor_depths
- ps.setObject(col++, parentIndexes); // processor_parent_indexes
- ps.setObject(col++, errorMessages); // processor_error_messages
- ps.setObject(col++, errorStacktraces); // processor_error_stacktraces
- ps.setObject(col++, inputBodies); // processor_input_bodies
- ps.setObject(col++, outputBodies); // processor_output_bodies
- ps.setObject(col++, inputHeaders); // processor_input_headers
- ps.setObject(col++, outputHeaders); // processor_output_headers
- ps.setObject(col++, diagramNodeIds); // processor_diagram_node_ids
- String diagramHash = diagramRepository
- .findContentHashForRoute(exec.getRouteId(), agentId)
- .orElse("");
- ps.setString(col++, diagramHash); // diagram_content_hash
- }
-
- @Override
- public int getBatchSize() {
- return executions.size();
- }
- });
-
- log.debug("Inserted batch of {} route executions into ClickHouse", executions.size());
- }
-
- @Override
- public Optional findRawById(String executionId) {
- String sql = """
- SELECT execution_id, route_id, agent_id, status, start_time, end_time,
- duration_ms, correlation_id, exchange_id, error_message, error_stacktrace,
- diagram_content_hash,
- processor_ids, processor_types, processor_statuses,
- processor_starts, processor_ends, processor_durations,
- processor_diagram_node_ids,
- processor_error_messages, processor_error_stacktraces,
- processor_depths, processor_parent_indexes
- FROM route_executions
- WHERE execution_id = ?
- LIMIT 1
- """;
-
- List results = jdbcTemplate.query(sql, (rs, rowNum) -> {
- // Extract parallel arrays from ClickHouse
- String[] processorIds = toStringArray(rs.getArray("processor_ids"));
- String[] processorTypes = toStringArray(rs.getArray("processor_types"));
- String[] processorStatuses = toStringArray(rs.getArray("processor_statuses"));
- Instant[] processorStarts = toInstantArray(rs.getArray("processor_starts"));
- Instant[] processorEnds = toInstantArray(rs.getArray("processor_ends"));
- long[] processorDurations = toLongArray(rs.getArray("processor_durations"));
- String[] processorDiagramNodeIds = toStringArray(rs.getArray("processor_diagram_node_ids"));
- String[] processorErrorMessages = toStringArray(rs.getArray("processor_error_messages"));
- String[] processorErrorStacktraces = toStringArray(rs.getArray("processor_error_stacktraces"));
- int[] processorDepths = toIntArrayFromShort(rs.getArray("processor_depths"));
- int[] processorParentIndexes = toIntArray(rs.getArray("processor_parent_indexes"));
-
- Timestamp endTs = rs.getTimestamp("end_time");
- return new RawExecutionRow(
- rs.getString("execution_id"),
- rs.getString("route_id"),
- rs.getString("agent_id"),
- rs.getString("status"),
- rs.getTimestamp("start_time").toInstant(),
- endTs != null ? endTs.toInstant() : null,
- rs.getLong("duration_ms"),
- rs.getString("correlation_id"),
- rs.getString("exchange_id"),
- rs.getString("error_message"),
- rs.getString("error_stacktrace"),
- rs.getString("diagram_content_hash"),
- processorIds, processorTypes, processorStatuses,
- processorStarts, processorEnds, processorDurations,
- processorDiagramNodeIds,
- processorErrorMessages, processorErrorStacktraces,
- processorDepths, processorParentIndexes
- );
- }, executionId);
-
- return results.isEmpty() ? Optional.empty() : Optional.of(results.get(0));
- }
-
- /**
- * Find exchange snapshot data for a specific processor by index.
- *
- * @param executionId the execution ID
- * @param processorIndex 0-based processor index
- * @return map with inputBody, outputBody, inputHeaders, outputHeaders or empty if not found
- */
- public Optional> findProcessorSnapshot(String executionId, int processorIndex) {
- // ClickHouse arrays are 1-indexed in SQL
- int chIndex = processorIndex + 1;
- String sql = """
- SELECT
- processor_input_bodies[?] AS input_body,
- processor_output_bodies[?] AS output_body,
- processor_input_headers[?] AS input_headers,
- processor_output_headers[?] AS output_headers,
- length(processor_ids) AS proc_count
- FROM route_executions
- WHERE execution_id = ?
- LIMIT 1
- """;
-
- List> results = jdbcTemplate.query(sql, (rs, rowNum) -> {
- int procCount = rs.getInt("proc_count");
- if (processorIndex < 0 || processorIndex >= procCount) {
- return null;
- }
- var snapshot = new java.util.LinkedHashMap();
- snapshot.put("inputBody", rs.getString("input_body"));
- snapshot.put("outputBody", rs.getString("output_body"));
- snapshot.put("inputHeaders", rs.getString("input_headers"));
- snapshot.put("outputHeaders", rs.getString("output_headers"));
- return snapshot;
- }, chIndex, chIndex, chIndex, chIndex, executionId);
-
- if (results.isEmpty() || results.get(0) == null) {
- return Optional.empty();
- }
- return Optional.of(results.get(0));
- }
-
- // --- Array extraction helpers ---
-
- private static String[] toStringArray(java.sql.Array sqlArray) throws SQLException {
- if (sqlArray == null) return new String[0];
- Object arr = sqlArray.getArray();
- if (arr instanceof String[] sa) return sa;
- if (arr instanceof Object[] oa) {
- String[] result = new String[oa.length];
- for (int i = 0; i < oa.length; i++) {
- result[i] = oa[i] != null ? oa[i].toString() : "";
- }
- return result;
- }
- return new String[0];
- }
-
- private static Instant[] toInstantArray(java.sql.Array sqlArray) throws SQLException {
- if (sqlArray == null) return new Instant[0];
- Object arr = sqlArray.getArray();
- if (arr instanceof Timestamp[] ts) {
- Instant[] result = new Instant[ts.length];
- for (int i = 0; i < ts.length; i++) {
- result[i] = ts[i] != null ? ts[i].toInstant() : Instant.EPOCH;
- }
- return result;
- }
- if (arr instanceof Object[] oa) {
- Instant[] result = new Instant[oa.length];
- for (int i = 0; i < oa.length; i++) {
- if (oa[i] instanceof Timestamp ts) {
- result[i] = ts.toInstant();
- } else {
- result[i] = Instant.EPOCH;
- }
- }
- return result;
- }
- return new Instant[0];
- }
-
- private static long[] toLongArray(java.sql.Array sqlArray) throws SQLException {
- if (sqlArray == null) return new long[0];
- Object arr = sqlArray.getArray();
- if (arr instanceof long[] la) return la;
- if (arr instanceof Long[] la) {
- long[] result = new long[la.length];
- for (int i = 0; i < la.length; i++) {
- result[i] = la[i] != null ? la[i] : 0;
- }
- return result;
- }
- if (arr instanceof Object[] oa) {
- long[] result = new long[oa.length];
- for (int i = 0; i < oa.length; i++) {
- result[i] = oa[i] instanceof Number n ? n.longValue() : 0;
- }
- return result;
- }
- return new long[0];
- }
-
- private static int[] toIntArray(java.sql.Array sqlArray) throws SQLException {
- if (sqlArray == null) return new int[0];
- Object arr = sqlArray.getArray();
- if (arr instanceof int[] ia) return ia;
- if (arr instanceof Integer[] ia) {
- int[] result = new int[ia.length];
- for (int i = 0; i < ia.length; i++) {
- result[i] = ia[i] != null ? ia[i] : 0;
- }
- return result;
- }
- if (arr instanceof Object[] oa) {
- int[] result = new int[oa.length];
- for (int i = 0; i < oa.length; i++) {
- result[i] = oa[i] instanceof Number n ? n.intValue() : 0;
- }
- return result;
- }
- return new int[0];
- }
-
- private static int[] toIntArrayFromShort(java.sql.Array sqlArray) throws SQLException {
- if (sqlArray == null) return new int[0];
- Object arr = sqlArray.getArray();
- if (arr instanceof short[] sa) {
- int[] result = new int[sa.length];
- for (int i = 0; i < sa.length; i++) {
- result[i] = sa[i];
- }
- return result;
- }
- if (arr instanceof int[] ia) return ia;
- if (arr instanceof Object[] oa) {
- int[] result = new int[oa.length];
- for (int i = 0; i < oa.length; i++) {
- result[i] = oa[i] instanceof Number n ? n.intValue() : 0;
- }
- return result;
- }
- return new int[0];
- }
-
- /**
- * Internal record for a flattened processor with tree metadata.
- */
- private record FlatProcessor(ProcessorExecution proc, int depth, int parentIndex) {}
-
- /**
- * Flatten the processor tree with depth and parent index metadata (DFS order).
- */
- private List flattenWithMetadata(List processors) {
- if (processors == null || processors.isEmpty()) {
- return List.of();
- }
- var result = new ArrayList();
- for (ProcessorExecution p : processors) {
- flattenRecursive(p, 0, -1, result);
- }
- return result;
- }
-
- private void flattenRecursive(ProcessorExecution processor, int depth, int parentIdx,
- List result) {
- int myIndex = result.size();
- result.add(new FlatProcessor(processor, depth, parentIdx));
- if (processor.getChildren() != null) {
- for (ProcessorExecution child : processor.getChildren()) {
- flattenRecursive(child, depth + 1, myIndex, result);
- }
- }
- }
-
- private void appendSnapshotText(ExchangeSnapshot snapshot,
- StringBuilder allBodies, StringBuilder allHeaders) {
- if (snapshot != null) {
- allBodies.append(nullSafe(snapshot.getBody())).append(' ');
- allHeaders.append(mapToJson(snapshot.getHeaders())).append(' ');
- }
- }
-
- private static String mapToJson(Map map) {
- if (map == null || map.isEmpty()) {
- return "{}";
- }
- try {
- return OBJECT_MAPPER.writeValueAsString(map);
- } catch (JsonProcessingException e) {
- log.warn("Failed to serialize headers map to JSON", e);
- return "{}";
- }
- }
-
- private static String nullSafe(String value) {
- return value != null ? value : "";
- }
-
- private static Timestamp toTimestamp(Instant instant) {
- return instant != null ? Timestamp.from(instant) : Timestamp.from(Instant.EPOCH);
- }
-}
diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseMetricsRepository.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseMetricsRepository.java
deleted file mode 100644
index a72ea26d..00000000
--- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseMetricsRepository.java
+++ /dev/null
@@ -1,67 +0,0 @@
-package com.cameleer3.server.app.storage;
-
-import com.cameleer3.server.core.storage.MetricsRepository;
-import com.cameleer3.server.core.storage.model.MetricsSnapshot;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.springframework.jdbc.core.BatchPreparedStatementSetter;
-import org.springframework.jdbc.core.JdbcTemplate;
-import org.springframework.stereotype.Repository;
-
-import java.sql.PreparedStatement;
-import java.sql.SQLException;
-import java.sql.Timestamp;
-import java.time.Instant;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-/**
- * ClickHouse implementation of {@link MetricsRepository}.
- *
- * Performs batch inserts into the {@code agent_metrics} table.
- */
-@Repository
-public class ClickHouseMetricsRepository implements MetricsRepository {
-
- private static final Logger log = LoggerFactory.getLogger(ClickHouseMetricsRepository.class);
-
- private static final String INSERT_SQL = """
- INSERT INTO agent_metrics (agent_id, collected_at, metric_name, metric_value, tags)
- VALUES (?, ?, ?, ?, ?)
- """;
-
- private final JdbcTemplate jdbcTemplate;
-
- public ClickHouseMetricsRepository(JdbcTemplate jdbcTemplate) {
- this.jdbcTemplate = jdbcTemplate;
- }
-
- @Override
- public void insertBatch(List metrics) {
- if (metrics.isEmpty()) {
- return;
- }
-
- jdbcTemplate.batchUpdate(INSERT_SQL, new BatchPreparedStatementSetter() {
- @Override
- public void setValues(PreparedStatement ps, int i) throws SQLException {
- MetricsSnapshot m = metrics.get(i);
- ps.setString(1, m.agentId() != null ? m.agentId() : "");
- ps.setObject(2, m.collectedAt() != null ? Timestamp.from(m.collectedAt()) : Timestamp.from(Instant.EPOCH));
- ps.setString(3, m.metricName() != null ? m.metricName() : "");
- ps.setDouble(4, m.metricValue());
- // ClickHouse Map(String, String) -- pass as a java.util.Map
- Map tags = m.tags() != null ? m.tags() : new HashMap<>();
- ps.setObject(5, tags);
- }
-
- @Override
- public int getBatchSize() {
- return metrics.size();
- }
- });
-
- log.debug("Inserted batch of {} metrics into ClickHouse", metrics.size());
- }
-}
diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseOidcConfigRepository.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseOidcConfigRepository.java
deleted file mode 100644
index 92b08d54..00000000
--- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseOidcConfigRepository.java
+++ /dev/null
@@ -1,71 +0,0 @@
-package com.cameleer3.server.app.storage;
-
-import com.cameleer3.server.core.security.OidcConfig;
-import com.cameleer3.server.core.security.OidcConfigRepository;
-import org.springframework.jdbc.core.JdbcTemplate;
-import org.springframework.stereotype.Repository;
-
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Optional;
-
-/**
- * ClickHouse implementation of {@link OidcConfigRepository}.
- * Singleton row with {@code config_id = 'default'}, using ReplacingMergeTree.
- */
-@Repository
-public class ClickHouseOidcConfigRepository implements OidcConfigRepository {
-
- private final JdbcTemplate jdbc;
-
- public ClickHouseOidcConfigRepository(JdbcTemplate jdbc) {
- this.jdbc = jdbc;
- }
-
- @Override
- public Optional find() {
- List results = jdbc.query(
- "SELECT enabled, issuer_uri, client_id, client_secret, roles_claim, default_roles, auto_signup, display_name_claim "
- + "FROM oidc_config FINAL WHERE config_id = 'default'",
- this::mapRow
- );
- return results.isEmpty() ? Optional.empty() : Optional.of(results.get(0));
- }
-
- @Override
- public void save(OidcConfig config) {
- jdbc.update(
- "INSERT INTO oidc_config (config_id, enabled, issuer_uri, client_id, client_secret, roles_claim, default_roles, auto_signup, display_name_claim, updated_at) "
- + "VALUES ('default', ?, ?, ?, ?, ?, ?, ?, ?, now64(3, 'UTC'))",
- config.enabled(),
- config.issuerUri(),
- config.clientId(),
- config.clientSecret(),
- config.rolesClaim(),
- config.defaultRoles().toArray(new String[0]),
- config.autoSignup(),
- config.displayNameClaim()
- );
- }
-
- @Override
- public void delete() {
- jdbc.update("DELETE FROM oidc_config WHERE config_id = 'default'");
- }
-
- private OidcConfig mapRow(ResultSet rs, int rowNum) throws SQLException {
- String[] rolesArray = (String[]) rs.getArray("default_roles").getArray();
- return new OidcConfig(
- rs.getBoolean("enabled"),
- rs.getString("issuer_uri"),
- rs.getString("client_id"),
- rs.getString("client_secret"),
- rs.getString("roles_claim"),
- Arrays.asList(rolesArray),
- rs.getBoolean("auto_signup"),
- rs.getString("display_name_claim")
- );
- }
-}
diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseUserRepository.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseUserRepository.java
deleted file mode 100644
index b5090a1e..00000000
--- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseUserRepository.java
+++ /dev/null
@@ -1,112 +0,0 @@
-package com.cameleer3.server.app.storage;
-
-import com.cameleer3.server.core.security.UserInfo;
-import com.cameleer3.server.core.security.UserRepository;
-import org.springframework.jdbc.core.JdbcTemplate;
-import org.springframework.stereotype.Repository;
-
-import java.sql.ResultSet;
-import java.sql.SQLException;
-import java.time.Instant;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Optional;
-
-/**
- * ClickHouse implementation of {@link UserRepository}.
- *
- * Uses ReplacingMergeTree — reads use {@code FINAL} to get the latest version.
- */
-@Repository
-public class ClickHouseUserRepository implements UserRepository {
-
- private final JdbcTemplate jdbc;
-
- public ClickHouseUserRepository(JdbcTemplate jdbc) {
- this.jdbc = jdbc;
- }
-
- @Override
- public Optional findById(String userId) {
- List results = jdbc.query(
- "SELECT user_id, provider, email, display_name, roles, created_at "
- + "FROM users FINAL WHERE user_id = ?",
- this::mapRow,
- userId
- );
- return results.isEmpty() ? Optional.empty() : Optional.of(results.get(0));
- }
-
- @Override
- public List findAll() {
- return jdbc.query(
- "SELECT user_id, provider, email, display_name, roles, created_at FROM users FINAL ORDER BY user_id",
- this::mapRow
- );
- }
-
- @Override
- public void upsert(UserInfo user) {
- Optional existing = findById(user.userId());
- if (existing.isPresent()) {
- UserInfo ex = existing.get();
- // Skip write if nothing changed — avoids accumulating un-merged rows
- if (ex.provider().equals(user.provider())
- && ex.email().equals(user.email())
- && ex.displayName().equals(user.displayName())
- && ex.roles().equals(user.roles())) {
- return;
- }
- jdbc.update(
- "INSERT INTO users (user_id, provider, email, display_name, roles, created_at, updated_at) "
- + "SELECT user_id, ?, ?, ?, ?, created_at, now64(3, 'UTC') "
- + "FROM users FINAL WHERE user_id = ?",
- user.provider(),
- user.email(),
- user.displayName(),
- user.roles().toArray(new String[0]),
- user.userId()
- );
- } else {
- jdbc.update(
- "INSERT INTO users (user_id, provider, email, display_name, roles, updated_at) "
- + "VALUES (?, ?, ?, ?, ?, now64(3, 'UTC'))",
- user.userId(),
- user.provider(),
- user.email(),
- user.displayName(),
- user.roles().toArray(new String[0])
- );
- }
- }
-
- @Override
- public void updateRoles(String userId, List roles) {
- // ReplacingMergeTree: insert a new row with updated_at to supersede the old one.
- // Copy existing fields, update roles.
- jdbc.update(
- "INSERT INTO users (user_id, provider, email, display_name, roles, created_at, updated_at) "
- + "SELECT user_id, provider, email, display_name, ?, created_at, now64(3, 'UTC') "
- + "FROM users FINAL WHERE user_id = ?",
- roles.toArray(new String[0]),
- userId
- );
- }
-
- @Override
- public void delete(String userId) {
- jdbc.update("DELETE FROM users WHERE user_id = ?", userId);
- }
-
- private UserInfo mapRow(ResultSet rs, int rowNum) throws SQLException {
- String[] rolesArray = (String[]) rs.getArray("roles").getArray();
- return new UserInfo(
- rs.getString("user_id"),
- rs.getString("provider"),
- rs.getString("email"),
- rs.getString("display_name"),
- Arrays.asList(rolesArray),
- rs.getTimestamp("created_at").toInstant()
- );
- }
-}
diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseDiagramRepository.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresDiagramStore.java
similarity index 88%
rename from cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseDiagramRepository.java
rename to cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresDiagramStore.java
index 11a0ed4f..0c7dbbf8 100644
--- a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/ClickHouseDiagramRepository.java
+++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresDiagramStore.java
@@ -2,7 +2,7 @@ package com.cameleer3.server.app.storage;
import com.cameleer3.common.graph.RouteGraph;
import com.cameleer3.server.core.ingestion.TaggedDiagram;
-import com.cameleer3.server.core.storage.DiagramRepository;
+import com.cameleer3.server.core.storage.DiagramStore;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule;
@@ -22,19 +22,20 @@ import java.util.Map;
import java.util.Optional;
/**
- * ClickHouse implementation of {@link DiagramRepository}.
+ * PostgreSQL implementation of {@link DiagramStore}.
*
* Stores route graphs as JSON with SHA-256 content-hash deduplication.
- * The underlying table uses ReplacingMergeTree keyed on content_hash.
+ * Uses {@code ON CONFLICT (content_hash) DO NOTHING} for idempotent inserts.
*/
@Repository
-public class ClickHouseDiagramRepository implements DiagramRepository {
+public class PostgresDiagramStore implements DiagramStore {
- private static final Logger log = LoggerFactory.getLogger(ClickHouseDiagramRepository.class);
+ private static final Logger log = LoggerFactory.getLogger(PostgresDiagramStore.class);
private static final String INSERT_SQL = """
INSERT INTO route_diagrams (content_hash, route_id, agent_id, definition)
- VALUES (?, ?, ?, ?)
+ VALUES (?, ?, ?, ?::jsonb)
+ ON CONFLICT (content_hash) DO NOTHING
""";
private static final String SELECT_BY_HASH = """
@@ -50,7 +51,7 @@ public class ClickHouseDiagramRepository implements DiagramRepository {
private final JdbcTemplate jdbcTemplate;
private final ObjectMapper objectMapper;
- public ClickHouseDiagramRepository(JdbcTemplate jdbcTemplate) {
+ public PostgresDiagramStore(JdbcTemplate jdbcTemplate) {
this.jdbcTemplate = jdbcTemplate;
this.objectMapper = new ObjectMapper();
this.objectMapper.registerModule(new JavaTimeModule());
@@ -82,7 +83,7 @@ public class ClickHouseDiagramRepository implements DiagramRepository {
try {
return Optional.of(objectMapper.readValue(json, RouteGraph.class));
} catch (JsonProcessingException e) {
- log.error("Failed to deserialize RouteGraph from ClickHouse", e);
+ log.error("Failed to deserialize RouteGraph from PostgreSQL", e);
return Optional.empty();
}
}
diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresExecutionStore.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresExecutionStore.java
new file mode 100644
index 00000000..84170327
--- /dev/null
+++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresExecutionStore.java
@@ -0,0 +1,131 @@
+package com.cameleer3.server.app.storage;
+
+import com.cameleer3.server.core.storage.ExecutionStore;
+import org.springframework.jdbc.core.JdbcTemplate;
+import org.springframework.jdbc.core.RowMapper;
+import org.springframework.stereotype.Repository;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Timestamp;
+import java.time.Instant;
+import java.util.List;
+import java.util.Optional;
+
+@Repository
+public class PostgresExecutionStore implements ExecutionStore {
+
+ private final JdbcTemplate jdbc;
+
+ public PostgresExecutionStore(JdbcTemplate jdbc) {
+ this.jdbc = jdbc;
+ }
+
+ @Override
+ public void upsert(ExecutionRecord execution) {
+ jdbc.update("""
+ INSERT INTO executions (execution_id, route_id, agent_id, group_name,
+ status, correlation_id, exchange_id, start_time, end_time,
+ duration_ms, error_message, error_stacktrace, diagram_content_hash,
+ created_at, updated_at)
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, now(), now())
+ ON CONFLICT (execution_id, start_time) DO UPDATE SET
+ status = CASE
+ WHEN EXCLUDED.status IN ('COMPLETED', 'FAILED')
+ AND executions.status = 'RUNNING'
+ THEN EXCLUDED.status
+ WHEN EXCLUDED.status = executions.status THEN executions.status
+ ELSE EXCLUDED.status
+ END,
+ end_time = COALESCE(EXCLUDED.end_time, executions.end_time),
+ duration_ms = COALESCE(EXCLUDED.duration_ms, executions.duration_ms),
+ error_message = COALESCE(EXCLUDED.error_message, executions.error_message),
+ error_stacktrace = COALESCE(EXCLUDED.error_stacktrace, executions.error_stacktrace),
+ diagram_content_hash = COALESCE(EXCLUDED.diagram_content_hash, executions.diagram_content_hash),
+ updated_at = now()
+ """,
+ execution.executionId(), execution.routeId(), execution.agentId(),
+ execution.groupName(), execution.status(), execution.correlationId(),
+ execution.exchangeId(),
+ Timestamp.from(execution.startTime()),
+ execution.endTime() != null ? Timestamp.from(execution.endTime()) : null,
+ execution.durationMs(), execution.errorMessage(),
+ execution.errorStacktrace(), execution.diagramContentHash());
+ }
+
+ @Override
+ public void upsertProcessors(String executionId, Instant startTime,
+ String groupName, String routeId,
+ List processors) {
+ jdbc.batchUpdate("""
+ INSERT INTO processor_executions (execution_id, processor_id, processor_type,
+ diagram_node_id, group_name, route_id, depth, parent_processor_id,
+ status, start_time, end_time, duration_ms, error_message, error_stacktrace,
+ input_body, output_body, input_headers, output_headers)
+ VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?::jsonb, ?::jsonb)
+ ON CONFLICT (execution_id, processor_id, start_time) DO UPDATE SET
+ status = EXCLUDED.status,
+ end_time = COALESCE(EXCLUDED.end_time, processor_executions.end_time),
+ duration_ms = COALESCE(EXCLUDED.duration_ms, processor_executions.duration_ms),
+ error_message = COALESCE(EXCLUDED.error_message, processor_executions.error_message),
+ error_stacktrace = COALESCE(EXCLUDED.error_stacktrace, processor_executions.error_stacktrace),
+ input_body = COALESCE(EXCLUDED.input_body, processor_executions.input_body),
+ output_body = COALESCE(EXCLUDED.output_body, processor_executions.output_body),
+ input_headers = COALESCE(EXCLUDED.input_headers, processor_executions.input_headers),
+ output_headers = COALESCE(EXCLUDED.output_headers, processor_executions.output_headers)
+ """,
+ processors.stream().map(p -> new Object[]{
+ p.executionId(), p.processorId(), p.processorType(),
+ p.diagramNodeId(), p.groupName(), p.routeId(),
+ p.depth(), p.parentProcessorId(), p.status(),
+ Timestamp.from(p.startTime()),
+ p.endTime() != null ? Timestamp.from(p.endTime()) : null,
+ p.durationMs(), p.errorMessage(), p.errorStacktrace(),
+ p.inputBody(), p.outputBody(), p.inputHeaders(), p.outputHeaders()
+ }).toList());
+ }
+
+ @Override
+ public Optional findById(String executionId) {
+ List results = jdbc.query(
+ "SELECT * FROM executions WHERE execution_id = ? ORDER BY start_time DESC LIMIT 1",
+ EXECUTION_MAPPER, executionId);
+ return results.isEmpty() ? Optional.empty() : Optional.of(results.get(0));
+ }
+
+ @Override
+ public List findProcessors(String executionId) {
+ return jdbc.query(
+ "SELECT * FROM processor_executions WHERE execution_id = ? ORDER BY depth, start_time",
+ PROCESSOR_MAPPER, executionId);
+ }
+
+ private static final RowMapper EXECUTION_MAPPER = (rs, rowNum) ->
+ new ExecutionRecord(
+ rs.getString("execution_id"), rs.getString("route_id"),
+ rs.getString("agent_id"), rs.getString("group_name"),
+ rs.getString("status"), rs.getString("correlation_id"),
+ rs.getString("exchange_id"),
+ toInstant(rs, "start_time"), toInstant(rs, "end_time"),
+ rs.getObject("duration_ms") != null ? rs.getLong("duration_ms") : null,
+ rs.getString("error_message"), rs.getString("error_stacktrace"),
+ rs.getString("diagram_content_hash"));
+
+ private static final RowMapper PROCESSOR_MAPPER = (rs, rowNum) ->
+ new ProcessorRecord(
+ rs.getString("execution_id"), rs.getString("processor_id"),
+ rs.getString("processor_type"), rs.getString("diagram_node_id"),
+ rs.getString("group_name"), rs.getString("route_id"),
+ rs.getInt("depth"), rs.getString("parent_processor_id"),
+ rs.getString("status"),
+ toInstant(rs, "start_time"), toInstant(rs, "end_time"),
+ rs.getObject("duration_ms") != null ? rs.getLong("duration_ms") : null,
+ rs.getString("error_message"), rs.getString("error_stacktrace"),
+ rs.getString("input_body"), rs.getString("output_body"),
+ rs.getString("input_headers"), rs.getString("output_headers"));
+
+ private static Instant toInstant(ResultSet rs, String column) throws SQLException {
+ Timestamp ts = rs.getTimestamp(column);
+ return ts != null ? ts.toInstant() : null;
+ }
+}
diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresMetricsStore.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresMetricsStore.java
new file mode 100644
index 00000000..8b8fed63
--- /dev/null
+++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresMetricsStore.java
@@ -0,0 +1,42 @@
+package com.cameleer3.server.app.storage;
+
+import com.cameleer3.server.core.storage.MetricsStore;
+import com.cameleer3.server.core.storage.model.MetricsSnapshot;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import org.springframework.jdbc.core.JdbcTemplate;
+import org.springframework.stereotype.Repository;
+
+import java.sql.Timestamp;
+import java.util.List;
+
+@Repository
+public class PostgresMetricsStore implements MetricsStore {
+
+ private static final ObjectMapper MAPPER = new ObjectMapper();
+ private final JdbcTemplate jdbc;
+
+ public PostgresMetricsStore(JdbcTemplate jdbc) {
+ this.jdbc = jdbc;
+ }
+
+ @Override
+ public void insertBatch(List snapshots) {
+ jdbc.batchUpdate("""
+ INSERT INTO agent_metrics (agent_id, metric_name, metric_value, tags,
+ collected_at, server_received_at)
+ VALUES (?, ?, ?, ?::jsonb, ?, now())
+ """,
+ snapshots.stream().map(s -> new Object[]{
+ s.agentId(), s.metricName(), s.metricValue(),
+ tagsToJson(s.tags()),
+ Timestamp.from(s.collectedAt())
+ }).toList());
+ }
+
+ private String tagsToJson(java.util.Map tags) {
+ if (tags == null || tags.isEmpty()) return null;
+ try { return MAPPER.writeValueAsString(tags); }
+ catch (JsonProcessingException e) { return null; }
+ }
+}
diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresOidcConfigRepository.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresOidcConfigRepository.java
new file mode 100644
index 00000000..6da18993
--- /dev/null
+++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresOidcConfigRepository.java
@@ -0,0 +1,59 @@
+package com.cameleer3.server.app.storage;
+
+import com.cameleer3.server.core.security.OidcConfig;
+import com.cameleer3.server.core.security.OidcConfigRepository;
+import org.springframework.jdbc.core.JdbcTemplate;
+import org.springframework.stereotype.Repository;
+
+import java.sql.Array;
+import java.util.List;
+import java.util.Optional;
+
+@Repository
+public class PostgresOidcConfigRepository implements OidcConfigRepository {
+
+ private final JdbcTemplate jdbc;
+
+ public PostgresOidcConfigRepository(JdbcTemplate jdbc) {
+ this.jdbc = jdbc;
+ }
+
+ @Override
+ public Optional find() {
+ var results = jdbc.query(
+ "SELECT * FROM oidc_config WHERE config_id = 'default'",
+ (rs, rowNum) -> {
+ Array arr = rs.getArray("default_roles");
+ String[] roles = arr != null ? (String[]) arr.getArray() : new String[0];
+ return new OidcConfig(
+ rs.getBoolean("enabled"), rs.getString("issuer_uri"),
+ rs.getString("client_id"), rs.getString("client_secret"),
+ rs.getString("roles_claim"), List.of(roles),
+ rs.getBoolean("auto_signup"), rs.getString("display_name_claim"));
+ });
+ return results.isEmpty() ? Optional.empty() : Optional.of(results.get(0));
+ }
+
+ @Override
+ public void save(OidcConfig config) {
+ jdbc.update("""
+ INSERT INTO oidc_config (config_id, enabled, issuer_uri, client_id, client_secret,
+ roles_claim, default_roles, auto_signup, display_name_claim, updated_at)
+ VALUES ('default', ?, ?, ?, ?, ?, ?, ?, ?, now())
+ ON CONFLICT (config_id) DO UPDATE SET
+ enabled = EXCLUDED.enabled, issuer_uri = EXCLUDED.issuer_uri,
+ client_id = EXCLUDED.client_id, client_secret = EXCLUDED.client_secret,
+ roles_claim = EXCLUDED.roles_claim, default_roles = EXCLUDED.default_roles,
+ auto_signup = EXCLUDED.auto_signup, display_name_claim = EXCLUDED.display_name_claim,
+ updated_at = now()
+ """,
+ config.enabled(), config.issuerUri(), config.clientId(), config.clientSecret(),
+ config.rolesClaim(), config.defaultRoles().toArray(new String[0]),
+ config.autoSignup(), config.displayNameClaim());
+ }
+
+ @Override
+ public void delete() {
+ jdbc.update("DELETE FROM oidc_config WHERE config_id = 'default'");
+ }
+}
diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresStatsStore.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresStatsStore.java
new file mode 100644
index 00000000..fff9b70f
--- /dev/null
+++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresStatsStore.java
@@ -0,0 +1,187 @@
+package com.cameleer3.server.app.storage;
+
+import com.cameleer3.server.core.search.ExecutionStats;
+import com.cameleer3.server.core.search.StatsTimeseries;
+import com.cameleer3.server.core.search.StatsTimeseries.TimeseriesBucket;
+import com.cameleer3.server.core.storage.StatsStore;
+import org.springframework.jdbc.core.JdbcTemplate;
+import org.springframework.stereotype.Repository;
+
+import java.sql.Timestamp;
+import java.time.Duration;
+import java.time.Instant;
+import java.time.temporal.ChronoUnit;
+import java.util.ArrayList;
+import java.util.List;
+
+@Repository
+public class PostgresStatsStore implements StatsStore {
+
+ private final JdbcTemplate jdbc;
+
+ public PostgresStatsStore(JdbcTemplate jdbc) {
+ this.jdbc = jdbc;
+ }
+
+ @Override
+ public ExecutionStats stats(Instant from, Instant to) {
+ return queryStats("stats_1m_all", from, to, List.of());
+ }
+
+ @Override
+ public ExecutionStats statsForApp(Instant from, Instant to, String groupName) {
+ return queryStats("stats_1m_app", from, to, List.of(
+ new Filter("group_name", groupName)));
+ }
+
+ @Override
+ public ExecutionStats statsForRoute(Instant from, Instant to, String routeId, List agentIds) {
+ // Note: agentIds is accepted for interface compatibility but not filterable
+ // on the continuous aggregate (it groups by route_id, not agent_id).
+ // All agents for the same route contribute to the same aggregate.
+ return queryStats("stats_1m_route", from, to, List.of(
+ new Filter("route_id", routeId)));
+ }
+
+ @Override
+ public ExecutionStats statsForProcessor(Instant from, Instant to, String routeId, String processorType) {
+ return queryStats("stats_1m_processor", from, to, List.of(
+ new Filter("route_id", routeId),
+ new Filter("processor_type", processorType)));
+ }
+
+ @Override
+ public StatsTimeseries timeseries(Instant from, Instant to, int bucketCount) {
+ return queryTimeseries("stats_1m_all", from, to, bucketCount, List.of(), true);
+ }
+
+ @Override
+ public StatsTimeseries timeseriesForApp(Instant from, Instant to, int bucketCount, String groupName) {
+ return queryTimeseries("stats_1m_app", from, to, bucketCount, List.of(
+ new Filter("group_name", groupName)), true);
+ }
+
+ @Override
+ public StatsTimeseries timeseriesForRoute(Instant from, Instant to, int bucketCount,
+ String routeId, List agentIds) {
+ return queryTimeseries("stats_1m_route", from, to, bucketCount, List.of(
+ new Filter("route_id", routeId)), true);
+ }
+
+ @Override
+ public StatsTimeseries timeseriesForProcessor(Instant from, Instant to, int bucketCount,
+ String routeId, String processorType) {
+ // stats_1m_processor does NOT have running_count column
+ return queryTimeseries("stats_1m_processor", from, to, bucketCount, List.of(
+ new Filter("route_id", routeId),
+ new Filter("processor_type", processorType)), false);
+ }
+
+ private record Filter(String column, String value) {}
+
+ private ExecutionStats queryStats(String view, Instant from, Instant to, List filters) {
+ // running_count only exists on execution-level aggregates, not processor
+ boolean hasRunning = !view.equals("stats_1m_processor");
+ String runningCol = hasRunning ? "COALESCE(SUM(running_count), 0)" : "0";
+
+ String sql = "SELECT COALESCE(SUM(total_count), 0) AS total_count, " +
+ "COALESCE(SUM(failed_count), 0) AS failed_count, " +
+ "CASE WHEN SUM(total_count) > 0 THEN SUM(duration_sum) / SUM(total_count) ELSE 0 END AS avg_duration, " +
+ "COALESCE(MAX(p99_duration), 0) AS p99_duration, " +
+ runningCol + " AS active_count " +
+ "FROM " + view + " WHERE bucket >= ? AND bucket < ?";
+
+ List params = new ArrayList<>();
+ params.add(Timestamp.from(from));
+ params.add(Timestamp.from(to));
+ for (Filter f : filters) {
+ sql += " AND " + f.column() + " = ?";
+ params.add(f.value());
+ }
+
+ long totalCount = 0, failedCount = 0, avgDuration = 0, p99Duration = 0, activeCount = 0;
+ var currentResult = jdbc.query(sql, (rs, rowNum) -> new long[]{
+ rs.getLong("total_count"), rs.getLong("failed_count"),
+ rs.getLong("avg_duration"), rs.getLong("p99_duration"),
+ rs.getLong("active_count")
+ }, params.toArray());
+ if (!currentResult.isEmpty()) {
+ long[] r = currentResult.get(0);
+ totalCount = r[0]; failedCount = r[1]; avgDuration = r[2];
+ p99Duration = r[3]; activeCount = r[4];
+ }
+
+ // Previous period (shifted back 24h)
+ Instant prevFrom = from.minus(Duration.ofHours(24));
+ Instant prevTo = to.minus(Duration.ofHours(24));
+ List prevParams = new ArrayList<>();
+ prevParams.add(Timestamp.from(prevFrom));
+ prevParams.add(Timestamp.from(prevTo));
+ for (Filter f : filters) prevParams.add(f.value());
+ String prevSql = sql; // same shape, different time params
+
+ long prevTotal = 0, prevFailed = 0, prevAvg = 0, prevP99 = 0;
+ var prevResult = jdbc.query(prevSql, (rs, rowNum) -> new long[]{
+ rs.getLong("total_count"), rs.getLong("failed_count"),
+ rs.getLong("avg_duration"), rs.getLong("p99_duration")
+ }, prevParams.toArray());
+ if (!prevResult.isEmpty()) {
+ long[] r = prevResult.get(0);
+ prevTotal = r[0]; prevFailed = r[1]; prevAvg = r[2]; prevP99 = r[3];
+ }
+
+ // Today total (from midnight UTC)
+ Instant todayStart = Instant.now().truncatedTo(ChronoUnit.DAYS);
+ List todayParams = new ArrayList<>();
+ todayParams.add(Timestamp.from(todayStart));
+ todayParams.add(Timestamp.from(Instant.now()));
+ for (Filter f : filters) todayParams.add(f.value());
+ String todaySql = sql;
+
+ long totalToday = 0;
+ var todayResult = jdbc.query(todaySql, (rs, rowNum) -> rs.getLong("total_count"),
+ todayParams.toArray());
+ if (!todayResult.isEmpty()) totalToday = todayResult.get(0);
+
+ return new ExecutionStats(
+ totalCount, failedCount, avgDuration, p99Duration, activeCount,
+ totalToday, prevTotal, prevFailed, prevAvg, prevP99);
+ }
+
+ private StatsTimeseries queryTimeseries(String view, Instant from, Instant to,
+ int bucketCount, List filters,
+ boolean hasRunningCount) {
+ long intervalSeconds = Duration.between(from, to).toSeconds() / Math.max(bucketCount, 1);
+ if (intervalSeconds < 60) intervalSeconds = 60;
+
+ String runningCol = hasRunningCount ? "COALESCE(SUM(running_count), 0)" : "0";
+
+ String sql = "SELECT time_bucket(? * INTERVAL '1 second', bucket) AS period, " +
+ "COALESCE(SUM(total_count), 0) AS total_count, " +
+ "COALESCE(SUM(failed_count), 0) AS failed_count, " +
+ "CASE WHEN SUM(total_count) > 0 THEN SUM(duration_sum) / SUM(total_count) ELSE 0 END AS avg_duration, " +
+ "COALESCE(MAX(p99_duration), 0) AS p99_duration, " +
+ runningCol + " AS active_count " +
+ "FROM " + view + " WHERE bucket >= ? AND bucket < ?";
+
+ List params = new ArrayList<>();
+ params.add(intervalSeconds);
+ params.add(Timestamp.from(from));
+ params.add(Timestamp.from(to));
+ for (Filter f : filters) {
+ sql += " AND " + f.column() + " = ?";
+ params.add(f.value());
+ }
+ sql += " GROUP BY period ORDER BY period";
+
+ List buckets = jdbc.query(sql, (rs, rowNum) ->
+ new TimeseriesBucket(
+ rs.getTimestamp("period").toInstant(),
+ rs.getLong("total_count"), rs.getLong("failed_count"),
+ rs.getLong("avg_duration"), rs.getLong("p99_duration"),
+ rs.getLong("active_count")
+ ), params.toArray());
+
+ return new StatsTimeseries(buckets);
+ }
+}
diff --git a/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresUserRepository.java b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresUserRepository.java
new file mode 100644
index 00000000..6985b2a3
--- /dev/null
+++ b/cameleer3-server-app/src/main/java/com/cameleer3/server/app/storage/PostgresUserRepository.java
@@ -0,0 +1,71 @@
+package com.cameleer3.server.app.storage;
+
+import com.cameleer3.server.core.security.UserInfo;
+import com.cameleer3.server.core.security.UserRepository;
+import org.springframework.jdbc.core.JdbcTemplate;
+import org.springframework.stereotype.Repository;
+
+import java.sql.Array;
+import java.sql.Timestamp;
+import java.util.List;
+import java.util.Optional;
+
+@Repository
+public class PostgresUserRepository implements UserRepository {
+
+ private final JdbcTemplate jdbc;
+
+ public PostgresUserRepository(JdbcTemplate jdbc) {
+ this.jdbc = jdbc;
+ }
+
+ @Override
+ public Optional findById(String userId) {
+ var results = jdbc.query(
+ "SELECT * FROM users WHERE user_id = ?",
+ (rs, rowNum) -> mapUser(rs), userId);
+ return results.isEmpty() ? Optional.empty() : Optional.of(results.get(0));
+ }
+
+ @Override
+ public List findAll() {
+ return jdbc.query("SELECT * FROM users ORDER BY user_id",
+ (rs, rowNum) -> mapUser(rs));
+ }
+
+ @Override
+ public void upsert(UserInfo user) {
+ jdbc.update("""
+ INSERT INTO users (user_id, provider, email, display_name, roles, created_at, updated_at)
+ VALUES (?, ?, ?, ?, ?, now(), now())
+ ON CONFLICT (user_id) DO UPDATE SET
+ provider = EXCLUDED.provider, email = EXCLUDED.email,
+ display_name = EXCLUDED.display_name, roles = EXCLUDED.roles,
+ updated_at = now()
+ """,
+ user.userId(), user.provider(), user.email(), user.displayName(),
+ user.roles().toArray(new String[0]));
+ }
+
+ @Override
+ public void updateRoles(String userId, List roles) {
+ jdbc.update("UPDATE users SET roles = ?, updated_at = now() WHERE user_id = ?",
+ roles.toArray(new String[0]), userId);
+ }
+
+ @Override
+ public void delete(String userId) {
+ jdbc.update("DELETE FROM users WHERE user_id = ?", userId);
+ }
+
+ private UserInfo mapUser(java.sql.ResultSet rs) throws java.sql.SQLException {
+ Array rolesArray = rs.getArray("roles");
+ String[] roles = rolesArray != null ? (String[]) rolesArray.getArray() : new String[0];
+ java.sql.Timestamp ts = rs.getTimestamp("created_at");
+ java.time.Instant createdAt = ts != null ? ts.toInstant() : null;
+ return new UserInfo(
+ rs.getString("user_id"), rs.getString("provider"),
+ rs.getString("email"), rs.getString("display_name"),
+ List.of(roles), createdAt);
+ }
+}
diff --git a/cameleer3-server-app/src/main/resources/application.yml b/cameleer3-server-app/src/main/resources/application.yml
index 31974bae..26ca2f2d 100644
--- a/cameleer3-server-app/src/main/resources/application.yml
+++ b/cameleer3-server-app/src/main/resources/application.yml
@@ -3,10 +3,13 @@ server:
spring:
datasource:
- url: jdbc:ch://localhost:8123/cameleer3
+ url: jdbc:postgresql://localhost:5432/cameleer3
username: cameleer
- password: cameleer_dev
- driver-class-name: com.clickhouse.jdbc.ClickHouseDriver
+ password: ${CAMELEER_DB_PASSWORD:cameleer_dev}
+ driver-class-name: org.postgresql.Driver
+ flyway:
+ enabled: true
+ locations: classpath:db/migration
mvc:
async:
request-timeout: -1
@@ -29,8 +32,14 @@ ingestion:
batch-size: 5000
flush-interval-ms: 1000
-clickhouse:
- ttl-days: 30
+opensearch:
+ url: ${OPENSEARCH_URL:http://localhost:9200}
+ queue-size: ${CAMELEER_OPENSEARCH_QUEUE_SIZE:10000}
+ debounce-ms: ${CAMELEER_OPENSEARCH_DEBOUNCE_MS:2000}
+
+cameleer:
+ body-size-limit: ${CAMELEER_BODY_SIZE_LIMIT:16384}
+ retention-days: ${CAMELEER_RETENTION_DAYS:30}
security:
access-token-expiry-ms: 3600000
diff --git a/cameleer3-server-app/src/main/resources/clickhouse/01-schema.sql b/cameleer3-server-app/src/main/resources/clickhouse/01-schema.sql
deleted file mode 100644
index ab56da70..00000000
--- a/cameleer3-server-app/src/main/resources/clickhouse/01-schema.sql
+++ /dev/null
@@ -1,57 +0,0 @@
--- Cameleer3 ClickHouse Schema
--- Tables for route executions, route diagrams, and agent metrics.
-
-CREATE TABLE IF NOT EXISTS route_executions (
- execution_id String,
- route_id LowCardinality(String),
- agent_id LowCardinality(String),
- status LowCardinality(String),
- start_time DateTime64(3, 'UTC'),
- end_time Nullable(DateTime64(3, 'UTC')),
- duration_ms UInt64,
- correlation_id String,
- exchange_id String,
- error_message String DEFAULT '',
- error_stacktrace String DEFAULT '',
- -- Nested processor executions stored as parallel arrays
- processor_ids Array(String),
- processor_types Array(LowCardinality(String)),
- processor_starts Array(DateTime64(3, 'UTC')),
- processor_ends Array(DateTime64(3, 'UTC')),
- processor_durations Array(UInt64),
- processor_statuses Array(LowCardinality(String)),
- -- Metadata
- server_received_at DateTime64(3, 'UTC') DEFAULT now64(3, 'UTC'),
- -- Skip indexes
- INDEX idx_correlation correlation_id TYPE bloom_filter GRANULARITY 4,
- INDEX idx_error error_message TYPE tokenbf_v1(32768, 3, 0) GRANULARITY 4
-)
-ENGINE = MergeTree()
-PARTITION BY toYYYYMMDD(start_time)
-ORDER BY (agent_id, status, start_time, execution_id)
-TTL toDateTime(start_time) + toIntervalDay(30)
-SETTINGS ttl_only_drop_parts = 1;
-
-CREATE TABLE IF NOT EXISTS route_diagrams (
- content_hash String,
- route_id LowCardinality(String),
- agent_id LowCardinality(String),
- definition String,
- created_at DateTime64(3, 'UTC') DEFAULT now64(3, 'UTC')
-)
-ENGINE = ReplacingMergeTree(created_at)
-ORDER BY (content_hash);
-
-CREATE TABLE IF NOT EXISTS agent_metrics (
- agent_id LowCardinality(String),
- collected_at DateTime64(3, 'UTC'),
- metric_name LowCardinality(String),
- metric_value Float64,
- tags Map(String, String),
- server_received_at DateTime64(3, 'UTC') DEFAULT now64(3, 'UTC')
-)
-ENGINE = MergeTree()
-PARTITION BY toYYYYMMDD(collected_at)
-ORDER BY (agent_id, metric_name, collected_at)
-TTL toDateTime(collected_at) + toIntervalDay(30)
-SETTINGS ttl_only_drop_parts = 1;
diff --git a/cameleer3-server-app/src/main/resources/clickhouse/02-search-columns.sql b/cameleer3-server-app/src/main/resources/clickhouse/02-search-columns.sql
deleted file mode 100644
index 2b11b435..00000000
--- a/cameleer3-server-app/src/main/resources/clickhouse/02-search-columns.sql
+++ /dev/null
@@ -1,25 +0,0 @@
--- Phase 2: Schema extension for search, detail, and diagram linking columns.
--- Adds exchange snapshot data, processor tree metadata, and diagram content hash.
-
-ALTER TABLE route_executions
- ADD COLUMN IF NOT EXISTS exchange_bodies String DEFAULT '',
- ADD COLUMN IF NOT EXISTS exchange_headers String DEFAULT '',
- ADD COLUMN IF NOT EXISTS processor_depths Array(UInt16) DEFAULT [],
- ADD COLUMN IF NOT EXISTS processor_parent_indexes Array(Int32) DEFAULT [],
- ADD COLUMN IF NOT EXISTS processor_error_messages Array(String) DEFAULT [],
- ADD COLUMN IF NOT EXISTS processor_error_stacktraces Array(String) DEFAULT [],
- ADD COLUMN IF NOT EXISTS processor_input_bodies Array(String) DEFAULT [],
- ADD COLUMN IF NOT EXISTS processor_output_bodies Array(String) DEFAULT [],
- ADD COLUMN IF NOT EXISTS processor_input_headers Array(String) DEFAULT [],
- ADD COLUMN IF NOT EXISTS processor_output_headers Array(String) DEFAULT [],
- ADD COLUMN IF NOT EXISTS processor_diagram_node_ids Array(String) DEFAULT [],
- ADD COLUMN IF NOT EXISTS diagram_content_hash String DEFAULT '';
-
--- Skip indexes for full-text search on new text columns
-ALTER TABLE route_executions
- ADD INDEX IF NOT EXISTS idx_exchange_bodies exchange_bodies TYPE tokenbf_v1(32768, 3, 0) GRANULARITY 4,
- ADD INDEX IF NOT EXISTS idx_exchange_headers exchange_headers TYPE tokenbf_v1(32768, 3, 0) GRANULARITY 4;
-
--- Skip index on error_stacktrace (not indexed in 01-schema.sql, needed for SRCH-05)
-ALTER TABLE route_executions
- ADD INDEX IF NOT EXISTS idx_error_stacktrace error_stacktrace TYPE tokenbf_v1(32768, 3, 0) GRANULARITY 4;
diff --git a/cameleer3-server-app/src/main/resources/clickhouse/03-users.sql b/cameleer3-server-app/src/main/resources/clickhouse/03-users.sql
deleted file mode 100644
index 9dc7ce7a..00000000
--- a/cameleer3-server-app/src/main/resources/clickhouse/03-users.sql
+++ /dev/null
@@ -1,10 +0,0 @@
-CREATE TABLE IF NOT EXISTS users (
- user_id String,
- provider LowCardinality(String),
- email String DEFAULT '',
- display_name String DEFAULT '',
- roles Array(LowCardinality(String)),
- created_at DateTime64(3, 'UTC') DEFAULT now64(3, 'UTC'),
- updated_at DateTime64(3, 'UTC') DEFAULT now64(3, 'UTC')
-) ENGINE = ReplacingMergeTree(updated_at)
-ORDER BY (user_id);
diff --git a/cameleer3-server-app/src/main/resources/clickhouse/04-oidc-config.sql b/cameleer3-server-app/src/main/resources/clickhouse/04-oidc-config.sql
deleted file mode 100644
index 35b4d896..00000000
--- a/cameleer3-server-app/src/main/resources/clickhouse/04-oidc-config.sql
+++ /dev/null
@@ -1,13 +0,0 @@
-CREATE TABLE IF NOT EXISTS oidc_config (
- config_id String DEFAULT 'default',
- enabled Bool DEFAULT false,
- issuer_uri String DEFAULT '',
- client_id String DEFAULT '',
- client_secret String DEFAULT '',
- roles_claim String DEFAULT 'realm_access.roles',
- default_roles Array(LowCardinality(String)),
- auto_signup Bool DEFAULT true,
- display_name_claim String DEFAULT 'name',
- updated_at DateTime64(3, 'UTC') DEFAULT now64(3, 'UTC')
-) ENGINE = ReplacingMergeTree(updated_at)
-ORDER BY (config_id);
diff --git a/cameleer3-server-app/src/main/resources/clickhouse/05-oidc-auto-signup.sql b/cameleer3-server-app/src/main/resources/clickhouse/05-oidc-auto-signup.sql
deleted file mode 100644
index 643a69ea..00000000
--- a/cameleer3-server-app/src/main/resources/clickhouse/05-oidc-auto-signup.sql
+++ /dev/null
@@ -1 +0,0 @@
-ALTER TABLE oidc_config ADD COLUMN IF NOT EXISTS auto_signup Bool DEFAULT true;
diff --git a/cameleer3-server-app/src/main/resources/clickhouse/06-oidc-display-name-claim.sql b/cameleer3-server-app/src/main/resources/clickhouse/06-oidc-display-name-claim.sql
deleted file mode 100644
index ef1870bd..00000000
--- a/cameleer3-server-app/src/main/resources/clickhouse/06-oidc-display-name-claim.sql
+++ /dev/null
@@ -1 +0,0 @@
-ALTER TABLE oidc_config ADD COLUMN IF NOT EXISTS display_name_claim String DEFAULT 'name';
diff --git a/cameleer3-server-app/src/main/resources/clickhouse/07-stats-rollup.sql b/cameleer3-server-app/src/main/resources/clickhouse/07-stats-rollup.sql
deleted file mode 100644
index 5d1efe24..00000000
--- a/cameleer3-server-app/src/main/resources/clickhouse/07-stats-rollup.sql
+++ /dev/null
@@ -1,35 +0,0 @@
--- Pre-aggregated 5-minute stats rollup for route executions.
--- Uses AggregatingMergeTree with -State/-Merge combinators so intermediate
--- aggregates can be merged across arbitrary time windows and dimensions.
-
--- Drop existing objects to allow schema changes (MV must be dropped before table)
-DROP VIEW IF EXISTS route_execution_stats_5m_mv;
-DROP TABLE IF EXISTS route_execution_stats_5m;
-
-CREATE TABLE route_execution_stats_5m (
- bucket DateTime('UTC'),
- route_id LowCardinality(String),
- agent_id LowCardinality(String),
- total_count AggregateFunction(count),
- failed_count AggregateFunction(countIf, UInt8),
- duration_sum AggregateFunction(sum, UInt64),
- p99_duration AggregateFunction(quantileTDigest(0.99), UInt64)
-)
-ENGINE = AggregatingMergeTree()
-PARTITION BY toYYYYMMDD(bucket)
-ORDER BY (agent_id, route_id, bucket)
-TTL bucket + toIntervalDay(30)
-SETTINGS ttl_only_drop_parts = 1;
-
-CREATE MATERIALIZED VIEW route_execution_stats_5m_mv
-TO route_execution_stats_5m
-AS SELECT
- toStartOfFiveMinutes(start_time) AS bucket,
- route_id,
- agent_id,
- countState() AS total_count,
- countIfState(status = 'FAILED') AS failed_count,
- sumState(duration_ms) AS duration_sum,
- quantileTDigestState(0.99)(duration_ms) AS p99_duration
-FROM route_executions
-GROUP BY bucket, route_id, agent_id;
diff --git a/cameleer3-server-app/src/main/resources/clickhouse/08-stats-rollup-backfill.sql b/cameleer3-server-app/src/main/resources/clickhouse/08-stats-rollup-backfill.sql
deleted file mode 100644
index 5e80a23a..00000000
--- a/cameleer3-server-app/src/main/resources/clickhouse/08-stats-rollup-backfill.sql
+++ /dev/null
@@ -1,16 +0,0 @@
--- One-time idempotent backfill of existing route_executions into the
--- 5-minute stats rollup table. Safe for repeated execution — the WHERE
--- clause skips the INSERT if the target table already contains data.
-
-INSERT INTO route_execution_stats_5m
-SELECT
- toStartOfFiveMinutes(start_time) AS bucket,
- route_id,
- agent_id,
- countState() AS total_count,
- countIfState(status = 'FAILED') AS failed_count,
- sumState(duration_ms) AS duration_sum,
- quantileTDigestState(0.99)(duration_ms) AS p99_duration
-FROM route_executions
-WHERE (SELECT count() FROM route_execution_stats_5m) = 0
-GROUP BY bucket, route_id, agent_id;
diff --git a/cameleer3-server-app/src/main/resources/db/migration/V1__extensions.sql b/cameleer3-server-app/src/main/resources/db/migration/V1__extensions.sql
new file mode 100644
index 00000000..26970d8f
--- /dev/null
+++ b/cameleer3-server-app/src/main/resources/db/migration/V1__extensions.sql
@@ -0,0 +1,2 @@
+CREATE EXTENSION IF NOT EXISTS timescaledb;
+CREATE EXTENSION IF NOT EXISTS timescaledb_toolkit;
diff --git a/cameleer3-server-app/src/main/resources/db/migration/V2__executions.sql b/cameleer3-server-app/src/main/resources/db/migration/V2__executions.sql
new file mode 100644
index 00000000..e1eeb2fe
--- /dev/null
+++ b/cameleer3-server-app/src/main/resources/db/migration/V2__executions.sql
@@ -0,0 +1,25 @@
+CREATE TABLE executions (
+ execution_id TEXT NOT NULL,
+ route_id TEXT NOT NULL,
+ agent_id TEXT NOT NULL,
+ group_name TEXT NOT NULL,
+ status TEXT NOT NULL,
+ correlation_id TEXT,
+ exchange_id TEXT,
+ start_time TIMESTAMPTZ NOT NULL,
+ end_time TIMESTAMPTZ,
+ duration_ms BIGINT,
+ error_message TEXT,
+ error_stacktrace TEXT,
+ diagram_content_hash TEXT,
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ PRIMARY KEY (execution_id, start_time)
+);
+
+SELECT create_hypertable('executions', 'start_time', chunk_time_interval => INTERVAL '1 day');
+
+CREATE INDEX idx_executions_agent_time ON executions (agent_id, start_time DESC);
+CREATE INDEX idx_executions_route_time ON executions (route_id, start_time DESC);
+CREATE INDEX idx_executions_group_time ON executions (group_name, start_time DESC);
+CREATE INDEX idx_executions_correlation ON executions (correlation_id);
diff --git a/cameleer3-server-app/src/main/resources/db/migration/V3__processor_executions.sql b/cameleer3-server-app/src/main/resources/db/migration/V3__processor_executions.sql
new file mode 100644
index 00000000..433514b0
--- /dev/null
+++ b/cameleer3-server-app/src/main/resources/db/migration/V3__processor_executions.sql
@@ -0,0 +1,28 @@
+CREATE TABLE processor_executions (
+ id BIGSERIAL,
+ execution_id TEXT NOT NULL,
+ processor_id TEXT NOT NULL,
+ processor_type TEXT NOT NULL,
+ diagram_node_id TEXT,
+ group_name TEXT NOT NULL,
+ route_id TEXT NOT NULL,
+ depth INT NOT NULL,
+ parent_processor_id TEXT,
+ status TEXT NOT NULL,
+ start_time TIMESTAMPTZ NOT NULL,
+ end_time TIMESTAMPTZ,
+ duration_ms BIGINT,
+ error_message TEXT,
+ error_stacktrace TEXT,
+ input_body TEXT,
+ output_body TEXT,
+ input_headers JSONB,
+ output_headers JSONB,
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ UNIQUE (execution_id, processor_id, start_time)
+);
+
+SELECT create_hypertable('processor_executions', 'start_time', chunk_time_interval => INTERVAL '1 day');
+
+CREATE INDEX idx_proc_exec_execution ON processor_executions (execution_id);
+CREATE INDEX idx_proc_exec_type_time ON processor_executions (processor_type, start_time DESC);
diff --git a/cameleer3-server-app/src/main/resources/db/migration/V4__agent_metrics.sql b/cameleer3-server-app/src/main/resources/db/migration/V4__agent_metrics.sql
new file mode 100644
index 00000000..4ecd6cac
--- /dev/null
+++ b/cameleer3-server-app/src/main/resources/db/migration/V4__agent_metrics.sql
@@ -0,0 +1,12 @@
+CREATE TABLE agent_metrics (
+ agent_id TEXT NOT NULL,
+ metric_name TEXT NOT NULL,
+ metric_value DOUBLE PRECISION NOT NULL,
+ tags JSONB,
+ collected_at TIMESTAMPTZ NOT NULL,
+ server_received_at TIMESTAMPTZ NOT NULL DEFAULT now()
+);
+
+SELECT create_hypertable('agent_metrics', 'collected_at', chunk_time_interval => INTERVAL '1 day');
+
+CREATE INDEX idx_metrics_agent_name ON agent_metrics (agent_id, metric_name, collected_at DESC);
diff --git a/cameleer3-server-app/src/main/resources/db/migration/V5__route_diagrams.sql b/cameleer3-server-app/src/main/resources/db/migration/V5__route_diagrams.sql
new file mode 100644
index 00000000..85eb2355
--- /dev/null
+++ b/cameleer3-server-app/src/main/resources/db/migration/V5__route_diagrams.sql
@@ -0,0 +1,9 @@
+CREATE TABLE route_diagrams (
+ content_hash TEXT PRIMARY KEY,
+ route_id TEXT NOT NULL,
+ agent_id TEXT NOT NULL,
+ definition TEXT NOT NULL,
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now()
+);
+
+CREATE INDEX idx_diagrams_route_agent ON route_diagrams (route_id, agent_id);
diff --git a/cameleer3-server-app/src/main/resources/db/migration/V6__users.sql b/cameleer3-server-app/src/main/resources/db/migration/V6__users.sql
new file mode 100644
index 00000000..079db7dd
--- /dev/null
+++ b/cameleer3-server-app/src/main/resources/db/migration/V6__users.sql
@@ -0,0 +1,9 @@
+CREATE TABLE users (
+ user_id TEXT PRIMARY KEY,
+ provider TEXT NOT NULL,
+ email TEXT,
+ display_name TEXT,
+ roles TEXT[] NOT NULL DEFAULT '{}',
+ created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT now()
+);
diff --git a/cameleer3-server-app/src/main/resources/db/migration/V7__oidc_config.sql b/cameleer3-server-app/src/main/resources/db/migration/V7__oidc_config.sql
new file mode 100644
index 00000000..e46a2196
--- /dev/null
+++ b/cameleer3-server-app/src/main/resources/db/migration/V7__oidc_config.sql
@@ -0,0 +1,12 @@
+CREATE TABLE oidc_config (
+ config_id TEXT PRIMARY KEY DEFAULT 'default',
+ enabled BOOLEAN NOT NULL DEFAULT false,
+ issuer_uri TEXT,
+ client_id TEXT,
+ client_secret TEXT,
+ roles_claim TEXT,
+ default_roles TEXT[] NOT NULL DEFAULT '{}',
+ auto_signup BOOLEAN DEFAULT false,
+ display_name_claim TEXT,
+ updated_at TIMESTAMPTZ NOT NULL DEFAULT now()
+);
diff --git a/cameleer3-server-app/src/main/resources/db/migration/V8__continuous_aggregates.sql b/cameleer3-server-app/src/main/resources/db/migration/V8__continuous_aggregates.sql
new file mode 100644
index 00000000..056ba07c
--- /dev/null
+++ b/cameleer3-server-app/src/main/resources/db/migration/V8__continuous_aggregates.sql
@@ -0,0 +1,87 @@
+-- Global stats
+CREATE MATERIALIZED VIEW stats_1m_all
+WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS
+SELECT
+ time_bucket('1 minute', start_time) AS bucket,
+ COUNT(*) AS total_count,
+ COUNT(*) FILTER (WHERE status = 'FAILED') AS failed_count,
+ COUNT(*) FILTER (WHERE status = 'RUNNING') AS running_count,
+ SUM(duration_ms) AS duration_sum,
+ MAX(duration_ms) AS duration_max,
+ approx_percentile(0.99, percentile_agg(duration_ms::DOUBLE PRECISION)) AS p99_duration
+FROM executions
+WHERE status IS NOT NULL
+GROUP BY bucket
+WITH NO DATA;
+
+SELECT add_continuous_aggregate_policy('stats_1m_all',
+ start_offset => INTERVAL '1 hour',
+ end_offset => INTERVAL '1 minute',
+ schedule_interval => INTERVAL '1 minute');
+
+-- Per-application stats
+CREATE MATERIALIZED VIEW stats_1m_app
+WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS
+SELECT
+ time_bucket('1 minute', start_time) AS bucket,
+ group_name,
+ COUNT(*) AS total_count,
+ COUNT(*) FILTER (WHERE status = 'FAILED') AS failed_count,
+ COUNT(*) FILTER (WHERE status = 'RUNNING') AS running_count,
+ SUM(duration_ms) AS duration_sum,
+ MAX(duration_ms) AS duration_max,
+ approx_percentile(0.99, percentile_agg(duration_ms::DOUBLE PRECISION)) AS p99_duration
+FROM executions
+WHERE status IS NOT NULL
+GROUP BY bucket, group_name
+WITH NO DATA;
+
+SELECT add_continuous_aggregate_policy('stats_1m_app',
+ start_offset => INTERVAL '1 hour',
+ end_offset => INTERVAL '1 minute',
+ schedule_interval => INTERVAL '1 minute');
+
+-- Per-route stats
+CREATE MATERIALIZED VIEW stats_1m_route
+WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS
+SELECT
+ time_bucket('1 minute', start_time) AS bucket,
+ group_name,
+ route_id,
+ COUNT(*) AS total_count,
+ COUNT(*) FILTER (WHERE status = 'FAILED') AS failed_count,
+ COUNT(*) FILTER (WHERE status = 'RUNNING') AS running_count,
+ SUM(duration_ms) AS duration_sum,
+ MAX(duration_ms) AS duration_max,
+ approx_percentile(0.99, percentile_agg(duration_ms::DOUBLE PRECISION)) AS p99_duration
+FROM executions
+WHERE status IS NOT NULL
+GROUP BY bucket, group_name, route_id
+WITH NO DATA;
+
+SELECT add_continuous_aggregate_policy('stats_1m_route',
+ start_offset => INTERVAL '1 hour',
+ end_offset => INTERVAL '1 minute',
+ schedule_interval => INTERVAL '1 minute');
+
+-- Per-processor stats (uses denormalized group_name/route_id on processor_executions)
+CREATE MATERIALIZED VIEW stats_1m_processor
+WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS
+SELECT
+ time_bucket('1 minute', start_time) AS bucket,
+ group_name,
+ route_id,
+ processor_type,
+ COUNT(*) AS total_count,
+ COUNT(*) FILTER (WHERE status = 'FAILED') AS failed_count,
+ SUM(duration_ms) AS duration_sum,
+ MAX(duration_ms) AS duration_max,
+ approx_percentile(0.99, percentile_agg(duration_ms::DOUBLE PRECISION)) AS p99_duration
+FROM processor_executions
+GROUP BY bucket, group_name, route_id, processor_type
+WITH NO DATA;
+
+SELECT add_continuous_aggregate_policy('stats_1m_processor',
+ start_offset => INTERVAL '1 hour',
+ end_offset => INTERVAL '1 minute',
+ schedule_interval => INTERVAL '1 minute');
diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractClickHouseIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractClickHouseIT.java
deleted file mode 100644
index d1271adb..00000000
--- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractClickHouseIT.java
+++ /dev/null
@@ -1,82 +0,0 @@
-package com.cameleer3.server.app;
-
-import org.springframework.beans.factory.annotation.Autowired;
-import org.springframework.boot.test.context.SpringBootTest;
-import org.springframework.jdbc.core.JdbcTemplate;
-import org.springframework.test.context.ActiveProfiles;
-import org.springframework.test.context.DynamicPropertyRegistry;
-import org.springframework.test.context.DynamicPropertySource;
-import org.testcontainers.clickhouse.ClickHouseContainer;
-
-import org.junit.jupiter.api.BeforeAll;
-
-import java.nio.charset.StandardCharsets;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.Statement;
-
-/**
- * Base class for integration tests requiring a ClickHouse instance.
- *
- * Uses Testcontainers to spin up a ClickHouse server and initializes the schema
- * from {@code clickhouse/init/01-schema.sql} before the first test runs.
- * Subclasses get a {@link JdbcTemplate} for direct database assertions.
- *
- * Container lifecycle is managed manually (started once, shared across all test classes).
- */
-@SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.RANDOM_PORT)
-@ActiveProfiles("test")
-public abstract class AbstractClickHouseIT {
-
- protected static final ClickHouseContainer CLICKHOUSE;
-
- static {
- CLICKHOUSE = new ClickHouseContainer("clickhouse/clickhouse-server:25.3");
- CLICKHOUSE.start();
- }
-
- @Autowired
- protected JdbcTemplate jdbcTemplate;
-
- @DynamicPropertySource
- static void overrideProperties(DynamicPropertyRegistry registry) {
- registry.add("spring.datasource.url", CLICKHOUSE::getJdbcUrl);
- registry.add("spring.datasource.username", CLICKHOUSE::getUsername);
- registry.add("spring.datasource.password", CLICKHOUSE::getPassword);
- }
-
- @BeforeAll
- static void initSchema() throws Exception {
- // Surefire runs from the module directory; schema is in the project root
- Path baseDir = Path.of("clickhouse/init");
- if (!Files.exists(baseDir)) {
- baseDir = Path.of("../clickhouse/init");
- }
-
- // Load all schema files in order
- String[] schemaFiles = {"01-schema.sql", "02-search-columns.sql", "03-users.sql", "04-oidc-config.sql", "05-oidc-auto-signup.sql"};
-
- try (Connection conn = DriverManager.getConnection(
- CLICKHOUSE.getJdbcUrl(),
- CLICKHOUSE.getUsername(),
- CLICKHOUSE.getPassword());
- Statement stmt = conn.createStatement()) {
-
- for (String schemaFile : schemaFiles) {
- Path schemaPath = baseDir.resolve(schemaFile);
- if (Files.exists(schemaPath)) {
- String sql = Files.readString(schemaPath, StandardCharsets.UTF_8);
- // Execute each statement separately (separated by semicolons)
- for (String statement : sql.split(";")) {
- String trimmed = statement.trim();
- if (!trimmed.isEmpty()) {
- stmt.execute(trimmed);
- }
- }
- }
- }
- }
- }
-}
diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractPostgresIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractPostgresIT.java
new file mode 100644
index 00000000..40962efd
--- /dev/null
+++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/AbstractPostgresIT.java
@@ -0,0 +1,47 @@
+package com.cameleer3.server.app;
+
+import org.opensearch.testcontainers.OpensearchContainer;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.test.context.SpringBootTest;
+import org.springframework.jdbc.core.JdbcTemplate;
+import org.springframework.test.context.ActiveProfiles;
+import org.springframework.test.context.DynamicPropertyRegistry;
+import org.springframework.test.context.DynamicPropertySource;
+import org.testcontainers.containers.PostgreSQLContainer;
+import org.testcontainers.utility.DockerImageName;
+
+@SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.RANDOM_PORT)
+@ActiveProfiles("test")
+public abstract class AbstractPostgresIT {
+
+ private static final DockerImageName TIMESCALEDB_IMAGE =
+ DockerImageName.parse("timescale/timescaledb-ha:pg16")
+ .asCompatibleSubstituteFor("postgres");
+
+ static final PostgreSQLContainer> postgres;
+ static final OpensearchContainer> opensearch;
+
+ static {
+ postgres = new PostgreSQLContainer<>(TIMESCALEDB_IMAGE)
+ .withDatabaseName("cameleer3")
+ .withUsername("cameleer")
+ .withPassword("test");
+ postgres.start();
+
+ opensearch = new OpensearchContainer<>("opensearchproject/opensearch:2.19.0");
+ opensearch.start();
+ }
+
+ @Autowired
+ protected JdbcTemplate jdbcTemplate;
+
+ @DynamicPropertySource
+ static void configureProperties(DynamicPropertyRegistry registry) {
+ registry.add("spring.datasource.url", postgres::getJdbcUrl);
+ registry.add("spring.datasource.username", postgres::getUsername);
+ registry.add("spring.datasource.password", postgres::getPassword);
+ registry.add("spring.datasource.driver-class-name", () -> "org.postgresql.Driver");
+ registry.add("spring.flyway.enabled", () -> "true");
+ registry.add("opensearch.url", opensearch::getHttpHostAddress);
+ }
+}
diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/TestSecurityHelper.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/TestSecurityHelper.java
index 9867cd7a..bafe8d0a 100644
--- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/TestSecurityHelper.java
+++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/TestSecurityHelper.java
@@ -27,11 +27,40 @@ public class TestSecurityHelper {
}
/**
- * Registers a test agent and returns a valid JWT access token for it.
+ * Registers a test agent and returns a valid JWT access token with AGENT role.
*/
public String registerTestAgent(String agentId) {
agentRegistryService.register(agentId, "test", "test-group", "1.0", List.of(), Map.of());
- return jwtService.createAccessToken(agentId, "test-group");
+ return jwtService.createAccessToken(agentId, "test-group", List.of("AGENT"));
+ }
+
+ /**
+ * Returns a valid JWT access token with the given roles (no agent registration).
+ */
+ public String createToken(String subject, String group, List roles) {
+ return jwtService.createAccessToken(subject, group, roles);
+ }
+
+ /**
+ * Returns a valid JWT access token with OPERATOR role.
+ */
+ public String operatorToken() {
+ // Subject must start with "user:" for JwtAuthenticationFilter to treat it as a UI user token
+ return jwtService.createAccessToken("user:test-operator", "user", List.of("OPERATOR"));
+ }
+
+ /**
+ * Returns a valid JWT access token with ADMIN role.
+ */
+ public String adminToken() {
+ return jwtService.createAccessToken("user:test-admin", "user", List.of("ADMIN"));
+ }
+
+ /**
+ * Returns a valid JWT access token with VIEWER role.
+ */
+ public String viewerToken() {
+ return jwtService.createAccessToken("user:test-viewer", "user", List.of("VIEWER"));
}
/**
diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentCommandControllerIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentCommandControllerIT.java
index ab98f30d..b6d791d7 100644
--- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentCommandControllerIT.java
+++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentCommandControllerIT.java
@@ -1,6 +1,6 @@
package com.cameleer3.server.app.controller;
-import com.cameleer3.server.app.AbstractClickHouseIT;
+import com.cameleer3.server.app.AbstractPostgresIT;
import com.cameleer3.server.app.TestSecurityHelper;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
@@ -18,7 +18,7 @@ import java.util.UUID;
import static org.assertj.core.api.Assertions.assertThat;
-class AgentCommandControllerIT extends AbstractClickHouseIT {
+class AgentCommandControllerIT extends AbstractPostgresIT {
@Autowired
private TestRestTemplate restTemplate;
@@ -29,11 +29,13 @@ class AgentCommandControllerIT extends AbstractClickHouseIT {
@Autowired
private TestSecurityHelper securityHelper;
- private String jwt;
+ private String agentJwt;
+ private String operatorJwt;
@BeforeEach
void setUp() {
- jwt = securityHelper.registerTestAgent("test-agent-command-it");
+ agentJwt = securityHelper.registerTestAgent("test-agent-command-it");
+ operatorJwt = securityHelper.operatorToken();
}
private ResponseEntity registerAgent(String agentId, String name, String group) {
@@ -65,7 +67,7 @@ class AgentCommandControllerIT extends AbstractClickHouseIT {
ResponseEntity response = restTemplate.postForEntity(
"/api/v1/agents/" + agentId + "/commands",
- new HttpEntity<>(commandJson, securityHelper.authHeaders(jwt)),
+ new HttpEntity<>(commandJson, securityHelper.authHeaders(operatorJwt)),
String.class);
assertThat(response.getStatusCode()).isEqualTo(HttpStatus.ACCEPTED);
@@ -88,7 +90,7 @@ class AgentCommandControllerIT extends AbstractClickHouseIT {
ResponseEntity response = restTemplate.postForEntity(
"/api/v1/agents/groups/" + group + "/commands",
- new HttpEntity<>(commandJson, securityHelper.authHeaders(jwt)),
+ new HttpEntity<>(commandJson, securityHelper.authHeaders(operatorJwt)),
String.class);
assertThat(response.getStatusCode()).isEqualTo(HttpStatus.ACCEPTED);
@@ -110,7 +112,7 @@ class AgentCommandControllerIT extends AbstractClickHouseIT {
ResponseEntity response = restTemplate.postForEntity(
"/api/v1/agents/commands",
- new HttpEntity<>(commandJson, securityHelper.authHeaders(jwt)),
+ new HttpEntity<>(commandJson, securityHelper.authHeaders(operatorJwt)),
String.class);
assertThat(response.getStatusCode()).isEqualTo(HttpStatus.ACCEPTED);
@@ -131,7 +133,7 @@ class AgentCommandControllerIT extends AbstractClickHouseIT {
ResponseEntity cmdResponse = restTemplate.postForEntity(
"/api/v1/agents/" + agentId + "/commands",
- new HttpEntity<>(commandJson, securityHelper.authHeaders(jwt)),
+ new HttpEntity<>(commandJson, securityHelper.authHeaders(operatorJwt)),
String.class);
JsonNode cmdBody = objectMapper.readTree(cmdResponse.getBody());
@@ -140,7 +142,7 @@ class AgentCommandControllerIT extends AbstractClickHouseIT {
ResponseEntity ackResponse = restTemplate.exchange(
"/api/v1/agents/" + agentId + "/commands/" + commandId + "/ack",
HttpMethod.POST,
- new HttpEntity<>(securityHelper.authHeadersNoBody(jwt)),
+ new HttpEntity<>(securityHelper.authHeadersNoBody(agentJwt)),
Void.class);
assertThat(ackResponse.getStatusCode()).isEqualTo(HttpStatus.OK);
@@ -154,7 +156,7 @@ class AgentCommandControllerIT extends AbstractClickHouseIT {
ResponseEntity response = restTemplate.exchange(
"/api/v1/agents/" + agentId + "/commands/nonexistent-cmd-id/ack",
HttpMethod.POST,
- new HttpEntity<>(securityHelper.authHeadersNoBody(jwt)),
+ new HttpEntity<>(securityHelper.authHeadersNoBody(agentJwt)),
Void.class);
assertThat(response.getStatusCode()).isEqualTo(HttpStatus.NOT_FOUND);
@@ -168,7 +170,7 @@ class AgentCommandControllerIT extends AbstractClickHouseIT {
ResponseEntity response = restTemplate.postForEntity(
"/api/v1/agents/nonexistent-agent-xyz/commands",
- new HttpEntity<>(commandJson, securityHelper.authHeaders(jwt)),
+ new HttpEntity<>(commandJson, securityHelper.authHeaders(operatorJwt)),
String.class);
assertThat(response.getStatusCode()).isEqualTo(HttpStatus.NOT_FOUND);
diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentRegistrationControllerIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentRegistrationControllerIT.java
index 652f92d8..12cbf02e 100644
--- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentRegistrationControllerIT.java
+++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentRegistrationControllerIT.java
@@ -1,6 +1,6 @@
package com.cameleer3.server.app.controller;
-import com.cameleer3.server.app.AbstractClickHouseIT;
+import com.cameleer3.server.app.AbstractPostgresIT;
import com.cameleer3.server.app.TestSecurityHelper;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
@@ -16,7 +16,7 @@ import org.springframework.http.ResponseEntity;
import static org.assertj.core.api.Assertions.assertThat;
-class AgentRegistrationControllerIT extends AbstractClickHouseIT {
+class AgentRegistrationControllerIT extends AbstractPostgresIT {
@Autowired
private TestRestTemplate restTemplate;
@@ -28,10 +28,12 @@ class AgentRegistrationControllerIT extends AbstractClickHouseIT {
private TestSecurityHelper securityHelper;
private String jwt;
+ private String viewerJwt;
@BeforeEach
void setUp() {
jwt = securityHelper.registerTestAgent("test-agent-registration-it");
+ viewerJwt = securityHelper.viewerToken();
}
private ResponseEntity registerAgent(String agentId, String name) {
@@ -114,7 +116,7 @@ class AgentRegistrationControllerIT extends AbstractClickHouseIT {
ResponseEntity response = restTemplate.exchange(
"/api/v1/agents",
HttpMethod.GET,
- new HttpEntity<>(securityHelper.authHeadersNoBody(jwt)),
+ new HttpEntity<>(securityHelper.authHeadersNoBody(viewerJwt)),
String.class);
assertThat(response.getStatusCode()).isEqualTo(HttpStatus.OK);
@@ -131,7 +133,7 @@ class AgentRegistrationControllerIT extends AbstractClickHouseIT {
ResponseEntity response = restTemplate.exchange(
"/api/v1/agents?status=LIVE",
HttpMethod.GET,
- new HttpEntity<>(securityHelper.authHeadersNoBody(jwt)),
+ new HttpEntity<>(securityHelper.authHeadersNoBody(viewerJwt)),
String.class);
assertThat(response.getStatusCode()).isEqualTo(HttpStatus.OK);
@@ -148,7 +150,7 @@ class AgentRegistrationControllerIT extends AbstractClickHouseIT {
ResponseEntity response = restTemplate.exchange(
"/api/v1/agents?status=INVALID",
HttpMethod.GET,
- new HttpEntity<>(securityHelper.authHeadersNoBody(jwt)),
+ new HttpEntity<>(securityHelper.authHeadersNoBody(viewerJwt)),
String.class);
assertThat(response.getStatusCode()).isEqualTo(HttpStatus.BAD_REQUEST);
diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentSseControllerIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentSseControllerIT.java
index 1af16ed5..78a3743f 100644
--- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentSseControllerIT.java
+++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/AgentSseControllerIT.java
@@ -1,6 +1,6 @@
package com.cameleer3.server.app.controller;
-import com.cameleer3.server.app.AbstractClickHouseIT;
+import com.cameleer3.server.app.AbstractPostgresIT;
import com.cameleer3.server.app.TestSecurityHelper;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.junit.jupiter.api.BeforeEach;
@@ -30,7 +30,7 @@ import java.util.concurrent.atomic.AtomicInteger;
import static org.assertj.core.api.Assertions.assertThat;
import static org.awaitility.Awaitility.await;
-class AgentSseControllerIT extends AbstractClickHouseIT {
+class AgentSseControllerIT extends AbstractPostgresIT {
@Autowired
private TestRestTemplate restTemplate;
@@ -45,10 +45,12 @@ class AgentSseControllerIT extends AbstractClickHouseIT {
private int port;
private String jwt;
+ private String operatorJwt;
@BeforeEach
void setUp() {
jwt = securityHelper.registerTestAgent("test-agent-sse-it");
+ operatorJwt = securityHelper.operatorToken();
}
private ResponseEntity registerAgent(String agentId, String name, String group) {
@@ -76,7 +78,7 @@ class AgentSseControllerIT extends AbstractClickHouseIT {
return restTemplate.postForEntity(
"/api/v1/agents/" + agentId + "/commands",
- new HttpEntity<>(json, securityHelper.authHeaders(jwt)),
+ new HttpEntity<>(json, securityHelper.authHeaders(operatorJwt)),
String.class);
}
diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/BackpressureIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/BackpressureIT.java
index aa8baa17..ee3db1fe 100644
--- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/BackpressureIT.java
+++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/BackpressureIT.java
@@ -1,6 +1,6 @@
package com.cameleer3.server.app.controller;
-import com.cameleer3.server.app.AbstractClickHouseIT;
+import com.cameleer3.server.app.AbstractPostgresIT;
import com.cameleer3.server.app.TestSecurityHelper;
import com.cameleer3.server.core.ingestion.IngestionService;
import org.junit.jupiter.api.BeforeEach;
@@ -13,21 +13,20 @@ import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.test.context.TestPropertySource;
-import static java.util.concurrent.TimeUnit.SECONDS;
import static org.assertj.core.api.Assertions.assertThat;
-import static org.awaitility.Awaitility.await;
/**
- * Tests backpressure behavior when write buffers are full.
- * Uses a tiny buffer (capacity=5) and a very long flush interval
- * to prevent the scheduler from draining the buffer during the test.
+ * Tests backpressure behavior when the metrics write buffer is full.
+ *
+ * Execution and diagram ingestion are now synchronous (no buffers).
+ * Only the metrics pipeline still uses a write buffer with backpressure.
*/
@TestPropertySource(properties = {
"ingestion.buffer-capacity=5",
"ingestion.batch-size=5",
"ingestion.flush-interval-ms=60000" // 60s -- effectively no flush during test
})
-class BackpressureIT extends AbstractClickHouseIT {
+class BackpressureIT extends AbstractPostgresIT {
@Autowired
private TestRestTemplate restTemplate;
@@ -47,34 +46,31 @@ class BackpressureIT extends AbstractClickHouseIT {
}
@Test
- void whenBufferFull_returns503WithRetryAfter() {
- // Wait for any initial scheduled flush to complete, then fill buffer via batch POST
- await().atMost(5, SECONDS).until(() -> ingestionService.getExecutionBufferDepth() == 0);
-
- // Fill the buffer completely with a batch of 5
+ void whenMetricsBufferFull_returns503WithRetryAfter() {
+ // Fill the metrics buffer completely with a batch of 5
String batchJson = """
[
- {"routeId":"bp-0","exchangeId":"bp-e0","status":"COMPLETED","startTime":"2026-03-11T10:00:00Z","durationMs":100,"processors":[]},
- {"routeId":"bp-1","exchangeId":"bp-e1","status":"COMPLETED","startTime":"2026-03-11T10:00:00Z","durationMs":100,"processors":[]},
- {"routeId":"bp-2","exchangeId":"bp-e2","status":"COMPLETED","startTime":"2026-03-11T10:00:00Z","durationMs":100,"processors":[]},
- {"routeId":"bp-3","exchangeId":"bp-e3","status":"COMPLETED","startTime":"2026-03-11T10:00:00Z","durationMs":100,"processors":[]},
- {"routeId":"bp-4","exchangeId":"bp-e4","status":"COMPLETED","startTime":"2026-03-11T10:00:00Z","durationMs":100,"processors":[]}
+ {"agentId":"bp-agent","timestamp":"2026-03-11T10:00:00Z","metrics":{}},
+ {"agentId":"bp-agent","timestamp":"2026-03-11T10:00:01Z","metrics":{}},
+ {"agentId":"bp-agent","timestamp":"2026-03-11T10:00:02Z","metrics":{}},
+ {"agentId":"bp-agent","timestamp":"2026-03-11T10:00:03Z","metrics":{}},
+ {"agentId":"bp-agent","timestamp":"2026-03-11T10:00:04Z","metrics":{}}
]
""";
ResponseEntity batchResponse = restTemplate.postForEntity(
- "/api/v1/data/executions",
+ "/api/v1/data/metrics",
new HttpEntity<>(batchJson, authHeaders),
String.class);
assertThat(batchResponse.getStatusCode()).isEqualTo(HttpStatus.ACCEPTED);
// Now buffer should be full -- next POST should get 503
String overflowJson = """
- {"routeId":"bp-overflow","exchangeId":"bp-overflow-e","status":"COMPLETED","startTime":"2026-03-11T10:00:00Z","durationMs":100,"processors":[]}
+ [{"agentId":"bp-agent","timestamp":"2026-03-11T10:00:05Z","metrics":{}}]
""";
ResponseEntity response = restTemplate.postForEntity(
- "/api/v1/data/executions",
+ "/api/v1/data/metrics",
new HttpEntity<>(overflowJson, authHeaders),
String.class);
@@ -83,25 +79,17 @@ class BackpressureIT extends AbstractClickHouseIT {
}
@Test
- void bufferedDataNotLost_afterBackpressure() {
- // Post data to the diagram buffer (separate from executions used above)
- for (int i = 0; i < 3; i++) {
- String json = String.format("""
- {
- "routeId": "bp-persist-diagram-%d",
- "version": 1,
- "nodes": [],
- "edges": []
- }
- """, i);
+ void executionIngestion_isSynchronous_returnsAccepted() {
+ String json = """
+ {"routeId":"bp-sync","exchangeId":"bp-sync-e","status":"COMPLETED","startTime":"2026-03-11T10:00:00Z","durationMs":100,"processors":[]}
+ """;
- restTemplate.postForEntity(
- "/api/v1/data/diagrams",
- new HttpEntity<>(json, authHeaders),
- String.class);
- }
+ ResponseEntity response = restTemplate.postForEntity(
+ "/api/v1/data/executions",
+ new HttpEntity<>(json, authHeaders),
+ String.class);
- // Data is in the buffer. Verify the buffer has data.
- assertThat(ingestionService.getDiagramBufferDepth()).isGreaterThanOrEqualTo(3);
+ // Synchronous ingestion always returns 202 (no buffer to overflow)
+ assertThat(response.getStatusCode()).isEqualTo(HttpStatus.ACCEPTED);
}
}
diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DetailControllerIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DetailControllerIT.java
index cdd29df7..5229f883 100644
--- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DetailControllerIT.java
+++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DetailControllerIT.java
@@ -1,6 +1,6 @@
package com.cameleer3.server.app.controller;
-import com.cameleer3.server.app.AbstractClickHouseIT;
+import com.cameleer3.server.app.AbstractPostgresIT;
import com.cameleer3.server.app.TestSecurityHelper;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
@@ -23,7 +23,7 @@ import static org.awaitility.Awaitility.await;
* Integration tests for the detail and processor snapshot endpoints.
*/
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
-class DetailControllerIT extends AbstractClickHouseIT {
+class DetailControllerIT extends AbstractPostgresIT {
@Autowired
private TestRestTemplate restTemplate;
@@ -34,6 +34,7 @@ class DetailControllerIT extends AbstractClickHouseIT {
private final ObjectMapper objectMapper = new ObjectMapper();
private String jwt;
+ private String viewerJwt;
private String seededExecutionId;
/**
@@ -43,6 +44,7 @@ class DetailControllerIT extends AbstractClickHouseIT {
@BeforeAll
void seedTestData() {
jwt = securityHelper.registerTestAgent("test-agent-detail-it");
+ viewerJwt = securityHelper.viewerToken();
String json = """
{
@@ -121,13 +123,13 @@ class DetailControllerIT extends AbstractClickHouseIT {
// Wait for flush and get the execution_id
await().atMost(10, SECONDS).untilAsserted(() -> {
Integer count = jdbcTemplate.queryForObject(
- "SELECT count() FROM route_executions WHERE route_id = 'detail-test-route'",
+ "SELECT count(*) FROM executions WHERE route_id = 'detail-test-route'",
Integer.class);
assertThat(count).isGreaterThanOrEqualTo(1);
});
seededExecutionId = jdbcTemplate.queryForObject(
- "SELECT execution_id FROM route_executions WHERE route_id = 'detail-test-route' LIMIT 1",
+ "SELECT execution_id FROM executions WHERE route_id = 'detail-test-route' LIMIT 1",
String.class);
}
@@ -217,7 +219,7 @@ class DetailControllerIT extends AbstractClickHouseIT {
}
private ResponseEntity detailGet(String path) {
- HttpHeaders headers = securityHelper.authHeadersNoBody(jwt);
+ HttpHeaders headers = securityHelper.authHeadersNoBody(viewerJwt);
return restTemplate.exchange(
"/api/v1/executions" + path,
HttpMethod.GET,
diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DiagramControllerIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DiagramControllerIT.java
index 832967fc..af6f274d 100644
--- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DiagramControllerIT.java
+++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DiagramControllerIT.java
@@ -1,6 +1,6 @@
package com.cameleer3.server.app.controller;
-import com.cameleer3.server.app.AbstractClickHouseIT;
+import com.cameleer3.server.app.AbstractPostgresIT;
import com.cameleer3.server.app.TestSecurityHelper;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -15,7 +15,7 @@ import static java.util.concurrent.TimeUnit.SECONDS;
import static org.assertj.core.api.Assertions.assertThat;
import static org.awaitility.Awaitility.await;
-class DiagramControllerIT extends AbstractClickHouseIT {
+class DiagramControllerIT extends AbstractPostgresIT {
@Autowired
private TestRestTemplate restTemplate;
@@ -53,7 +53,7 @@ class DiagramControllerIT extends AbstractClickHouseIT {
}
@Test
- void postDiagram_dataAppearsInClickHouseAfterFlush() {
+ void postDiagram_dataAppearsAfterFlush() {
String json = """
{
"routeId": "diagram-flush-route",
@@ -72,7 +72,7 @@ class DiagramControllerIT extends AbstractClickHouseIT {
await().atMost(10, SECONDS).untilAsserted(() -> {
Integer count = jdbcTemplate.queryForObject(
- "SELECT count() FROM route_diagrams WHERE route_id = 'diagram-flush-route'",
+ "SELECT count(*) FROM route_diagrams WHERE route_id = 'diagram-flush-route'",
Integer.class);
assertThat(count).isGreaterThanOrEqualTo(1);
});
diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DiagramRenderControllerIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DiagramRenderControllerIT.java
index f4b0308d..416dc78c 100644
--- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DiagramRenderControllerIT.java
+++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/DiagramRenderControllerIT.java
@@ -1,6 +1,6 @@
package com.cameleer3.server.app.controller;
-import com.cameleer3.server.app.AbstractClickHouseIT;
+import com.cameleer3.server.app.AbstractPostgresIT;
import com.cameleer3.server.app.TestSecurityHelper;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -20,7 +20,7 @@ import static org.awaitility.Awaitility.await;
* Integration tests for {@link DiagramRenderController}.
* Seeds a diagram via the ingestion endpoint, then tests rendering.
*/
-class DiagramRenderControllerIT extends AbstractClickHouseIT {
+class DiagramRenderControllerIT extends AbstractPostgresIT {
@Autowired
private TestRestTemplate restTemplate;
@@ -29,6 +29,7 @@ class DiagramRenderControllerIT extends AbstractClickHouseIT {
private TestSecurityHelper securityHelper;
private String jwt;
+ private String viewerJwt;
private String contentHash;
/**
@@ -37,6 +38,7 @@ class DiagramRenderControllerIT extends AbstractClickHouseIT {
@BeforeEach
void seedDiagram() {
jwt = securityHelper.registerTestAgent("test-agent-diagram-render-it");
+ viewerJwt = securityHelper.viewerToken();
String json = """
{
@@ -61,7 +63,7 @@ class DiagramRenderControllerIT extends AbstractClickHouseIT {
new HttpEntity<>(json, securityHelper.authHeaders(jwt)),
String.class);
- // Wait for flush to ClickHouse and retrieve the content hash
+ // Wait for flush to storage and retrieve the content hash
await().atMost(10, SECONDS).untilAsserted(() -> {
String hash = jdbcTemplate.queryForObject(
"SELECT content_hash FROM route_diagrams WHERE route_id = 'render-test-route' LIMIT 1",
@@ -73,7 +75,7 @@ class DiagramRenderControllerIT extends AbstractClickHouseIT {
@Test
void getSvg_withAcceptHeader_returnsSvg() {
- HttpHeaders headers = securityHelper.authHeadersNoBody(jwt);
+ HttpHeaders headers = securityHelper.authHeadersNoBody(viewerJwt);
headers.set("Accept", "image/svg+xml");
ResponseEntity response = restTemplate.exchange(
@@ -90,7 +92,7 @@ class DiagramRenderControllerIT extends AbstractClickHouseIT {
@Test
void getJson_withAcceptHeader_returnsJson() {
- HttpHeaders headers = securityHelper.authHeadersNoBody(jwt);
+ HttpHeaders headers = securityHelper.authHeadersNoBody(viewerJwt);
headers.set("Accept", "application/json");
ResponseEntity response = restTemplate.exchange(
@@ -107,7 +109,7 @@ class DiagramRenderControllerIT extends AbstractClickHouseIT {
@Test
void getNonExistentHash_returns404() {
- HttpHeaders headers = securityHelper.authHeadersNoBody(jwt);
+ HttpHeaders headers = securityHelper.authHeadersNoBody(viewerJwt);
headers.set("Accept", "image/svg+xml");
ResponseEntity response = restTemplate.exchange(
@@ -122,7 +124,7 @@ class DiagramRenderControllerIT extends AbstractClickHouseIT {
@Test
void getWithNoAcceptHeader_defaultsToSvg() {
- HttpHeaders headers = securityHelper.authHeadersNoBody(jwt);
+ HttpHeaders headers = securityHelper.authHeadersNoBody(viewerJwt);
ResponseEntity response = restTemplate.exchange(
"/api/v1/diagrams/{hash}/render",
diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/ExecutionControllerIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/ExecutionControllerIT.java
index a2bf59d5..1ee376e2 100644
--- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/ExecutionControllerIT.java
+++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/ExecutionControllerIT.java
@@ -1,6 +1,6 @@
package com.cameleer3.server.app.controller;
-import com.cameleer3.server.app.AbstractClickHouseIT;
+import com.cameleer3.server.app.AbstractPostgresIT;
import com.cameleer3.server.app.TestSecurityHelper;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -16,7 +16,7 @@ import static java.util.concurrent.TimeUnit.SECONDS;
import static org.assertj.core.api.Assertions.assertThat;
import static org.awaitility.Awaitility.await;
-class ExecutionControllerIT extends AbstractClickHouseIT {
+class ExecutionControllerIT extends AbstractPostgresIT {
@Autowired
private TestRestTemplate restTemplate;
@@ -90,7 +90,7 @@ class ExecutionControllerIT extends AbstractClickHouseIT {
}
@Test
- void postExecution_dataAppearsInClickHouseAfterFlush() {
+ void postExecution_dataAppearsAfterFlush() {
String json = """
{
"routeId": "flush-test-route",
@@ -111,7 +111,7 @@ class ExecutionControllerIT extends AbstractClickHouseIT {
await().atMost(10, SECONDS).untilAsserted(() -> {
Integer count = jdbcTemplate.queryForObject(
- "SELECT count() FROM route_executions WHERE route_id = 'flush-test-route'",
+ "SELECT count(*) FROM executions WHERE route_id = 'flush-test-route'",
Integer.class);
assertThat(count).isGreaterThanOrEqualTo(1);
});
diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/ForwardCompatIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/ForwardCompatIT.java
index 9d68212d..555bbf7c 100644
--- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/ForwardCompatIT.java
+++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/ForwardCompatIT.java
@@ -1,6 +1,6 @@
package com.cameleer3.server.app.controller;
-import com.cameleer3.server.app.AbstractClickHouseIT;
+import com.cameleer3.server.app.AbstractPostgresIT;
import com.cameleer3.server.app.TestSecurityHelper;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -16,7 +16,7 @@ import static org.assertj.core.api.Assertions.assertThat;
* Integration test for forward compatibility (API-05).
* Verifies that unknown JSON fields in request bodies do not cause deserialization errors.
*/
-class ForwardCompatIT extends AbstractClickHouseIT {
+class ForwardCompatIT extends AbstractPostgresIT {
@Autowired
private TestRestTemplate restTemplate;
diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/HealthControllerIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/HealthControllerIT.java
index c701af3b..9ca31887 100644
--- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/HealthControllerIT.java
+++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/HealthControllerIT.java
@@ -1,6 +1,6 @@
package com.cameleer3.server.app.controller;
-import com.cameleer3.server.app.AbstractClickHouseIT;
+import com.cameleer3.server.app.AbstractPostgresIT;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.web.client.TestRestTemplate;
@@ -8,9 +8,9 @@ import org.springframework.boot.test.web.client.TestRestTemplate;
import static org.assertj.core.api.Assertions.assertThat;
/**
- * Integration tests for the health endpoint and ClickHouse TTL verification.
+ * Integration tests for the health endpoint.
*/
-class HealthControllerIT extends AbstractClickHouseIT {
+class HealthControllerIT extends AbstractPostgresIT {
@Autowired
private TestRestTemplate restTemplate;
@@ -28,20 +28,4 @@ class HealthControllerIT extends AbstractClickHouseIT {
var response = restTemplate.getForEntity("/api/v1/health", String.class);
assertThat(response.getStatusCode().value()).isEqualTo(200);
}
-
- @Test
- void ttlConfiguredOnRouteExecutions() {
- String createTable = jdbcTemplate.queryForObject(
- "SHOW CREATE TABLE route_executions", String.class);
- assertThat(createTable).containsIgnoringCase("TTL");
- assertThat(createTable).contains("toIntervalDay(30)");
- }
-
- @Test
- void ttlConfiguredOnAgentMetrics() {
- String createTable = jdbcTemplate.queryForObject(
- "SHOW CREATE TABLE agent_metrics", String.class);
- assertThat(createTable).containsIgnoringCase("TTL");
- assertThat(createTable).contains("toIntervalDay(30)");
- }
}
diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/MetricsControllerIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/MetricsControllerIT.java
index d0eb9793..8f0d8a14 100644
--- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/MetricsControllerIT.java
+++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/MetricsControllerIT.java
@@ -1,6 +1,6 @@
package com.cameleer3.server.app.controller;
-import com.cameleer3.server.app.AbstractClickHouseIT;
+import com.cameleer3.server.app.AbstractPostgresIT;
import com.cameleer3.server.app.TestSecurityHelper;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -15,7 +15,7 @@ import static java.util.concurrent.TimeUnit.SECONDS;
import static org.assertj.core.api.Assertions.assertThat;
import static org.awaitility.Awaitility.await;
-class MetricsControllerIT extends AbstractClickHouseIT {
+class MetricsControllerIT extends AbstractPostgresIT {
@Autowired
private TestRestTemplate restTemplate;
@@ -52,7 +52,7 @@ class MetricsControllerIT extends AbstractClickHouseIT {
}
@Test
- void postMetrics_dataAppearsInClickHouseAfterFlush() {
+ void postMetrics_dataAppearsAfterFlush() {
String json = """
[{
"agentId": "agent-flush-test",
@@ -70,7 +70,7 @@ class MetricsControllerIT extends AbstractClickHouseIT {
await().atMost(10, SECONDS).untilAsserted(() -> {
Integer count = jdbcTemplate.queryForObject(
- "SELECT count() FROM agent_metrics WHERE agent_id = 'agent-flush-test'",
+ "SELECT count(*) FROM agent_metrics WHERE agent_id = 'agent-flush-test'",
Integer.class);
assertThat(count).isGreaterThanOrEqualTo(1);
});
diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/OpenApiIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/OpenApiIT.java
index e474f2b8..a8ceb053 100644
--- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/OpenApiIT.java
+++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/OpenApiIT.java
@@ -1,6 +1,6 @@
package com.cameleer3.server.app.controller;
-import com.cameleer3.server.app.AbstractClickHouseIT;
+import com.cameleer3.server.app.AbstractPostgresIT;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.web.client.TestRestTemplate;
@@ -10,7 +10,7 @@ import static org.assertj.core.api.Assertions.assertThat;
/**
* Integration tests for OpenAPI documentation endpoints.
*/
-class OpenApiIT extends AbstractClickHouseIT {
+class OpenApiIT extends AbstractPostgresIT {
@Autowired
private TestRestTemplate restTemplate;
diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/SearchControllerIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/SearchControllerIT.java
index 8ae4e072..6a21552f 100644
--- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/SearchControllerIT.java
+++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/controller/SearchControllerIT.java
@@ -1,12 +1,11 @@
package com.cameleer3.server.app.controller;
-import com.cameleer3.server.app.AbstractClickHouseIT;
+import com.cameleer3.server.app.AbstractPostgresIT;
import com.cameleer3.server.app.TestSecurityHelper;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
-import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.TestInstance;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.web.client.TestRestTemplate;
import org.springframework.http.HttpEntity;
@@ -23,8 +22,7 @@ import static org.awaitility.Awaitility.await;
* Integration tests for the search controller endpoints.
* Tests all filter types independently and in combination.
*/
-@TestInstance(TestInstance.Lifecycle.PER_CLASS)
-class SearchControllerIT extends AbstractClickHouseIT {
+class SearchControllerIT extends AbstractPostgresIT {
@Autowired
private TestRestTemplate restTemplate;
@@ -34,15 +32,20 @@ class SearchControllerIT extends AbstractClickHouseIT {
private final ObjectMapper objectMapper = new ObjectMapper();
- private String jwt;
+ private static String jwt;
+ private static String viewerJwt;
+ private static boolean seeded;
/**
* Seed test data: Insert executions with varying statuses, times, durations,
* correlationIds, error messages, and exchange snapshot data.
*/
- @BeforeAll
+ @BeforeEach
void seedTestData() {
+ if (seeded) return;
+ seeded = true;
jwt = securityHelper.registerTestAgent("test-agent-search-it");
+ viewerJwt = securityHelper.viewerToken();
// Execution 1: COMPLETED, short duration, no errors
ingest("""
@@ -152,12 +155,18 @@ class SearchControllerIT extends AbstractClickHouseIT {
""", i, i, i, i, i));
}
- // Wait for all data to flush
- await().atMost(10, SECONDS).untilAsserted(() -> {
- Integer count = jdbcTemplate.queryForObject(
- "SELECT count() FROM route_executions WHERE route_id LIKE 'search-route-%'",
- Integer.class);
- assertThat(count).isEqualTo(10);
+ // Verify all data is in PostgreSQL (synchronous writes)
+ Integer count = jdbcTemplate.queryForObject(
+ "SELECT count(*) FROM executions WHERE route_id LIKE 'search-route-%'",
+ Integer.class);
+ assertThat(count).isEqualTo(10);
+
+ // Wait for async OpenSearch indexing (debounce + index time)
+ // Check for last seeded execution specifically to avoid false positives from other test classes
+ await().atMost(30, SECONDS).untilAsserted(() -> {
+ ResponseEntity r = searchGet("?correlationId=corr-page-10");
+ JsonNode body = objectMapper.readTree(r.getBody());
+ assertThat(body.get("total").asLong()).isGreaterThanOrEqualTo(1);
});
}
@@ -376,7 +385,7 @@ class SearchControllerIT extends AbstractClickHouseIT {
return restTemplate.exchange(
"/api/v1/search/executions",
HttpMethod.POST,
- new HttpEntity<>(jsonBody, securityHelper.authHeaders(jwt)),
+ new HttpEntity<>(jsonBody, securityHelper.authHeaders(viewerJwt)),
String.class);
}
}
diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/interceptor/ProtocolVersionIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/interceptor/ProtocolVersionIT.java
index 26e8d5a9..35d0c0d1 100644
--- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/interceptor/ProtocolVersionIT.java
+++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/interceptor/ProtocolVersionIT.java
@@ -1,6 +1,6 @@
package com.cameleer3.server.app.interceptor;
-import com.cameleer3.server.app.AbstractClickHouseIT;
+import com.cameleer3.server.app.AbstractPostgresIT;
import com.cameleer3.server.app.TestSecurityHelper;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -18,7 +18,7 @@ import static org.assertj.core.api.Assertions.assertThat;
* With security enabled, requests to protected endpoints need JWT auth
* to reach the interceptor layer.
*/
-class ProtocolVersionIT extends AbstractClickHouseIT {
+class ProtocolVersionIT extends AbstractPostgresIT {
@Autowired
private TestRestTemplate restTemplate;
diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/search/OpenSearchIndexIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/search/OpenSearchIndexIT.java
new file mode 100644
index 00000000..cdb0bff4
--- /dev/null
+++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/search/OpenSearchIndexIT.java
@@ -0,0 +1,82 @@
+package com.cameleer3.server.app.search;
+
+import com.cameleer3.server.app.AbstractPostgresIT;
+import com.cameleer3.server.core.search.ExecutionSummary;
+import com.cameleer3.server.core.search.SearchRequest;
+import com.cameleer3.server.core.search.SearchResult;
+import com.cameleer3.server.core.storage.SearchIndex;
+import com.cameleer3.server.core.storage.model.ExecutionDocument;
+import com.cameleer3.server.core.storage.model.ExecutionDocument.ProcessorDoc;
+import org.junit.jupiter.api.Test;
+import org.opensearch.client.opensearch.OpenSearchClient;
+import org.opensearch.client.opensearch.indices.RefreshRequest;
+import org.springframework.beans.factory.annotation.Autowired;
+
+import java.time.Instant;
+import java.util.List;
+
+import static org.junit.jupiter.api.Assertions.*;
+
+// Extends AbstractPostgresIT which provides both PostgreSQL and OpenSearch testcontainers
+class OpenSearchIndexIT extends AbstractPostgresIT {
+
+ @Autowired
+ SearchIndex searchIndex;
+
+ @Autowired
+ OpenSearchClient openSearchClient;
+
+ @Test
+ void indexAndSearchByText() throws Exception {
+ Instant now = Instant.now();
+ ExecutionDocument doc = new ExecutionDocument(
+ "search-1", "route-a", "agent-1", "app-1",
+ "FAILED", "corr-1", "exch-1",
+ now, now.plusMillis(100), 100L,
+ "OrderNotFoundException: order-12345 not found", null,
+ List.of(new ProcessorDoc("proc-1", "log", "COMPLETED",
+ null, null, "request body with customer-99", null, null, null)));
+
+ searchIndex.index(doc);
+ refreshOpenSearchIndices();
+
+ SearchRequest request = new SearchRequest(
+ null, now.minusSeconds(60), now.plusSeconds(60),
+ null, null, null,
+ "OrderNotFoundException", null, null, null,
+ null, null, null, null, null,
+ 0, 50, "startTime", "desc");
+
+ SearchResult result = searchIndex.search(request);
+ assertTrue(result.total() > 0);
+ assertEquals("search-1", result.data().get(0).executionId());
+ }
+
+ @Test
+ void wildcardSearchFindsSubstring() throws Exception {
+ Instant now = Instant.now();
+ ExecutionDocument doc = new ExecutionDocument(
+ "wild-1", "route-b", "agent-1", "app-1",
+ "COMPLETED", null, null,
+ now, now.plusMillis(50), 50L, null, null,
+ List.of(new ProcessorDoc("proc-1", "bean", "COMPLETED",
+ null, null, "UniquePayloadIdentifier12345", null, null, null)));
+
+ searchIndex.index(doc);
+ refreshOpenSearchIndices();
+
+ SearchRequest request = new SearchRequest(
+ null, now.minusSeconds(60), now.plusSeconds(60),
+ null, null, null,
+ "PayloadIdentifier", null, null, null,
+ null, null, null, null, null,
+ 0, 50, "startTime", "desc");
+
+ SearchResult result = searchIndex.search(request);
+ assertTrue(result.total() > 0);
+ }
+
+ private void refreshOpenSearchIndices() throws Exception {
+ openSearchClient.indices().refresh(RefreshRequest.of(r -> r.index("executions-*")));
+ }
+}
diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/BootstrapTokenIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/BootstrapTokenIT.java
index 1309517b..3ce87894 100644
--- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/BootstrapTokenIT.java
+++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/BootstrapTokenIT.java
@@ -1,6 +1,6 @@
package com.cameleer3.server.app.security;
-import com.cameleer3.server.app.AbstractClickHouseIT;
+import com.cameleer3.server.app.AbstractPostgresIT;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.junit.jupiter.api.Test;
@@ -17,7 +17,7 @@ import static org.assertj.core.api.Assertions.assertThat;
/**
* Integration tests verifying bootstrap token validation on the registration endpoint.
*/
-class BootstrapTokenIT extends AbstractClickHouseIT {
+class BootstrapTokenIT extends AbstractPostgresIT {
@Autowired
private TestRestTemplate restTemplate;
diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/JwtRefreshIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/JwtRefreshIT.java
index 7e40e0a1..af033318 100644
--- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/JwtRefreshIT.java
+++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/JwtRefreshIT.java
@@ -1,6 +1,6 @@
package com.cameleer3.server.app.security;
-import com.cameleer3.server.app.AbstractClickHouseIT;
+import com.cameleer3.server.app.AbstractPostgresIT;
import com.cameleer3.server.app.TestSecurityHelper;
import com.cameleer3.server.core.security.JwtService;
import com.fasterxml.jackson.databind.JsonNode;
@@ -20,7 +20,7 @@ import static org.assertj.core.api.Assertions.assertThat;
/**
* Integration tests for the JWT refresh flow.
*/
-class JwtRefreshIT extends AbstractClickHouseIT {
+class JwtRefreshIT extends AbstractPostgresIT {
@Autowired
private TestRestTemplate restTemplate;
@@ -153,13 +153,13 @@ class JwtRefreshIT extends AbstractClickHouseIT {
JsonNode refreshBody2 = objectMapper.readTree(refreshResponse.getBody());
String newAccessToken = refreshBody2.get("accessToken").asText();
- // Use the new access token to hit a protected endpoint
+ // Use the new access token to hit a protected endpoint accessible by AGENT role
HttpHeaders authHeaders = new HttpHeaders();
authHeaders.set("Authorization", "Bearer " + newAccessToken);
authHeaders.set("X-Cameleer-Protocol-Version", "1");
ResponseEntity response = restTemplate.exchange(
- "/api/v1/agents",
+ "/api/v1/search/executions",
HttpMethod.GET,
new HttpEntity<>(authHeaders),
String.class);
diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/RegistrationSecurityIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/RegistrationSecurityIT.java
index abd35524..54c17e71 100644
--- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/RegistrationSecurityIT.java
+++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/RegistrationSecurityIT.java
@@ -1,6 +1,6 @@
package com.cameleer3.server.app.security;
-import com.cameleer3.server.app.AbstractClickHouseIT;
+import com.cameleer3.server.app.AbstractPostgresIT;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.junit.jupiter.api.Test;
@@ -19,7 +19,7 @@ import static org.assertj.core.api.Assertions.assertThat;
* Integration tests verifying that registration returns security credentials
* and that those credentials can be used to access protected endpoints.
*/
-class RegistrationSecurityIT extends AbstractClickHouseIT {
+class RegistrationSecurityIT extends AbstractPostgresIT {
@Autowired
private TestRestTemplate restTemplate;
@@ -81,13 +81,13 @@ class RegistrationSecurityIT extends AbstractClickHouseIT {
JsonNode regBody = objectMapper.readTree(regResponse.getBody());
String accessToken = regBody.get("accessToken").asText();
- // Use the access token to hit a protected endpoint
+ // Use the access token to hit a protected endpoint accessible by AGENT role
HttpHeaders headers = new HttpHeaders();
headers.set("Authorization", "Bearer " + accessToken);
headers.set("X-Cameleer-Protocol-Version", "1");
ResponseEntity response = restTemplate.exchange(
- "/api/v1/agents",
+ "/api/v1/search/executions",
HttpMethod.GET,
new HttpEntity<>(headers),
String.class);
diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/SecurityFilterIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/SecurityFilterIT.java
index 38f25766..a55c7190 100644
--- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/SecurityFilterIT.java
+++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/SecurityFilterIT.java
@@ -1,6 +1,6 @@
package com.cameleer3.server.app.security;
-import com.cameleer3.server.app.AbstractClickHouseIT;
+import com.cameleer3.server.app.AbstractPostgresIT;
import com.cameleer3.server.app.TestSecurityHelper;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -19,7 +19,7 @@ import static org.assertj.core.api.Assertions.assertThat;
* Integration tests verifying that the SecurityFilterChain correctly
* protects endpoints and allows public access where configured.
*/
-class SecurityFilterIT extends AbstractClickHouseIT {
+class SecurityFilterIT extends AbstractPostgresIT {
@Autowired
private TestRestTemplate restTemplate;
@@ -28,10 +28,12 @@ class SecurityFilterIT extends AbstractClickHouseIT {
private TestSecurityHelper securityHelper;
private String jwt;
+ private String viewerJwt;
@BeforeEach
void setUp() {
jwt = securityHelper.registerTestAgent("test-agent-security-filter-it");
+ viewerJwt = securityHelper.viewerToken();
}
@Test
@@ -53,7 +55,7 @@ class SecurityFilterIT extends AbstractClickHouseIT {
ResponseEntity response = restTemplate.exchange(
"/api/v1/agents",
HttpMethod.GET,
- new HttpEntity<>(securityHelper.authHeadersNoBody(jwt)),
+ new HttpEntity<>(securityHelper.authHeadersNoBody(viewerJwt)),
String.class);
assertThat(response.getStatusCode()).isEqualTo(HttpStatus.OK);
diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/SseSigningIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/SseSigningIT.java
index ccbb8af9..11e0ed6b 100644
--- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/SseSigningIT.java
+++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/security/SseSigningIT.java
@@ -1,6 +1,7 @@
package com.cameleer3.server.app.security;
-import com.cameleer3.server.app.AbstractClickHouseIT;
+import com.cameleer3.server.app.AbstractPostgresIT;
+import com.cameleer3.server.app.TestSecurityHelper;
import com.cameleer3.server.core.security.Ed25519SigningService;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
@@ -44,7 +45,7 @@ import static org.awaitility.Awaitility.await;
* open SSE stream (with JWT query param) -> push config-update command (with JWT) ->
* receive SSE event -> verify signature field against server's Ed25519 public key.
*/
-class SseSigningIT extends AbstractClickHouseIT {
+class SseSigningIT extends AbstractPostgresIT {
@Autowired
private TestRestTemplate restTemplate;
@@ -52,6 +53,9 @@ class SseSigningIT extends AbstractClickHouseIT {
@Autowired
private ObjectMapper objectMapper;
+ @Autowired
+ private TestSecurityHelper securityHelper;
+
@Autowired
private Ed25519SigningService ed25519SigningService;
@@ -165,6 +169,7 @@ class SseSigningIT extends AbstractClickHouseIT {
String agentId = "sse-sign-it-" + UUID.randomUUID().toString().substring(0, 8);
JsonNode registration = registerAgentWithAuth(agentId);
String accessToken = registration.get("accessToken").asText();
+ String operatorToken = securityHelper.operatorToken();
String serverPublicKey = registration.get("serverPublicKey").asText();
SseStream stream = openSseStream(agentId, accessToken);
@@ -177,7 +182,7 @@ class SseSigningIT extends AbstractClickHouseIT {
await().atMost(10, TimeUnit.SECONDS).pollInterval(200, TimeUnit.MILLISECONDS)
.ignoreExceptions()
.until(() -> {
- sendCommand(agentId, "config-update", originalPayload, accessToken);
+ sendCommand(agentId, "config-update", originalPayload, operatorToken);
List lines = stream.snapshot();
return lines.stream().anyMatch(l -> l.contains("event:config-update"));
});
@@ -221,6 +226,7 @@ class SseSigningIT extends AbstractClickHouseIT {
String agentId = "sse-sign-trace-" + UUID.randomUUID().toString().substring(0, 8);
JsonNode registration = registerAgentWithAuth(agentId);
String accessToken = registration.get("accessToken").asText();
+ String operatorToken = securityHelper.operatorToken();
String serverPublicKey = registration.get("serverPublicKey").asText();
SseStream stream = openSseStream(agentId, accessToken);
@@ -232,7 +238,7 @@ class SseSigningIT extends AbstractClickHouseIT {
await().atMost(10, TimeUnit.SECONDS).pollInterval(200, TimeUnit.MILLISECONDS)
.ignoreExceptions()
.until(() -> {
- sendCommand(agentId, "deep-trace", originalPayload, accessToken);
+ sendCommand(agentId, "deep-trace", originalPayload, operatorToken);
List lines = stream.snapshot();
return lines.stream().anyMatch(l -> l.contains("event:deep-trace"));
});
diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/DiagramLinkingIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/DiagramLinkingIT.java
index 7322ec26..7805b133 100644
--- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/DiagramLinkingIT.java
+++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/DiagramLinkingIT.java
@@ -1,6 +1,6 @@
package com.cameleer3.server.app.storage;
-import com.cameleer3.server.app.AbstractClickHouseIT;
+import com.cameleer3.server.app.AbstractPostgresIT;
import com.cameleer3.server.app.TestSecurityHelper;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -11,15 +11,13 @@ import org.springframework.http.HttpHeaders;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
-import static java.util.concurrent.TimeUnit.SECONDS;
import static org.assertj.core.api.Assertions.assertThat;
-import static org.awaitility.Awaitility.await;
/**
* Integration test proving that diagram_content_hash is populated during
* execution ingestion when a RouteGraph exists for the same route+agent.
*/
-class DiagramLinkingIT extends AbstractClickHouseIT {
+class DiagramLinkingIT extends AbstractPostgresIT {
@Autowired
private TestRestTemplate restTemplate;
@@ -59,12 +57,10 @@ class DiagramLinkingIT extends AbstractClickHouseIT {
String.class);
assertThat(diagramResponse.getStatusCode()).isEqualTo(HttpStatus.ACCEPTED);
- await().atMost(10, SECONDS).untilAsserted(() -> {
- String hash = jdbcTemplate.queryForObject(
- "SELECT content_hash FROM route_diagrams WHERE route_id = 'diagram-link-route' LIMIT 1",
- String.class);
- assertThat(hash).isNotNull().isNotEmpty();
- });
+ String diagramHash = jdbcTemplate.queryForObject(
+ "SELECT content_hash FROM route_diagrams WHERE route_id = 'diagram-link-route' LIMIT 1",
+ String.class);
+ assertThat(diagramHash).isNotNull().isNotEmpty();
String executionJson = """
{
@@ -95,16 +91,14 @@ class DiagramLinkingIT extends AbstractClickHouseIT {
String.class);
assertThat(execResponse.getStatusCode()).isEqualTo(HttpStatus.ACCEPTED);
- await().atMost(10, SECONDS).ignoreExceptions().untilAsserted(() -> {
- String hash = jdbcTemplate.queryForObject(
- "SELECT diagram_content_hash FROM route_executions WHERE route_id = 'diagram-link-route'",
- String.class);
- assertThat(hash)
- .isNotNull()
- .isNotEmpty()
- .hasSize(64)
- .matches("[a-f0-9]{64}");
- });
+ String hash = jdbcTemplate.queryForObject(
+ "SELECT diagram_content_hash FROM executions WHERE route_id = 'diagram-link-route'",
+ String.class);
+ assertThat(hash)
+ .isNotNull()
+ .isNotEmpty()
+ .hasSize(64)
+ .matches("[a-f0-9]{64}");
}
@Test
@@ -138,13 +132,11 @@ class DiagramLinkingIT extends AbstractClickHouseIT {
String.class);
assertThat(response.getStatusCode()).isEqualTo(HttpStatus.ACCEPTED);
- await().atMost(10, SECONDS).ignoreExceptions().untilAsserted(() -> {
- String hash = jdbcTemplate.queryForObject(
- "SELECT diagram_content_hash FROM route_executions WHERE route_id = 'no-diagram-route'",
- String.class);
- assertThat(hash)
- .isNotNull()
- .isEmpty();
- });
+ String hash = jdbcTemplate.queryForObject(
+ "SELECT diagram_content_hash FROM executions WHERE route_id = 'no-diagram-route'",
+ String.class);
+ assertThat(hash)
+ .isNotNull()
+ .isEmpty();
}
}
diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/FlywayMigrationIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/FlywayMigrationIT.java
new file mode 100644
index 00000000..227a4236
--- /dev/null
+++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/FlywayMigrationIT.java
@@ -0,0 +1,36 @@
+package com.cameleer3.server.app.storage;
+
+import com.cameleer3.server.app.AbstractPostgresIT;
+import org.junit.jupiter.api.Test;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.jdbc.core.JdbcTemplate;
+
+import static org.junit.jupiter.api.Assertions.*;
+
+class FlywayMigrationIT extends AbstractPostgresIT {
+
+ @Autowired
+ JdbcTemplate jdbcTemplate;
+
+ @Test
+ void allMigrationsApplySuccessfully() {
+ // Verify core tables exist
+ Integer execCount = jdbcTemplate.queryForObject(
+ "SELECT COUNT(*) FROM executions", Integer.class);
+ assertEquals(0, execCount);
+
+ Integer procCount = jdbcTemplate.queryForObject(
+ "SELECT COUNT(*) FROM processor_executions", Integer.class);
+ assertEquals(0, procCount);
+
+ Integer userCount = jdbcTemplate.queryForObject(
+ "SELECT COUNT(*) FROM users", Integer.class);
+ assertEquals(0, userCount);
+
+ // Verify continuous aggregates exist
+ Integer caggCount = jdbcTemplate.queryForObject(
+ "SELECT COUNT(*) FROM timescaledb_information.continuous_aggregates",
+ Integer.class);
+ assertEquals(4, caggCount);
+ }
+}
diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/IngestionSchemaIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/IngestionSchemaIT.java
index d0d79e02..13cf60c8 100644
--- a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/IngestionSchemaIT.java
+++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/IngestionSchemaIT.java
@@ -1,6 +1,6 @@
package com.cameleer3.server.app.storage;
-import com.cameleer3.server.app.AbstractClickHouseIT;
+import com.cameleer3.server.app.AbstractPostgresIT;
import com.cameleer3.server.app.TestSecurityHelper;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
@@ -11,18 +11,16 @@ import org.springframework.http.HttpHeaders;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
-import java.util.Arrays;
import java.util.List;
+import java.util.Map;
-import static java.util.concurrent.TimeUnit.SECONDS;
import static org.assertj.core.api.Assertions.assertThat;
-import static org.awaitility.Awaitility.await;
/**
- * Integration test verifying that Phase 2 schema columns are correctly populated
+ * Integration test verifying that processor execution data is correctly populated
* during ingestion of route executions with nested processors and exchange data.
*/
-class IngestionSchemaIT extends AbstractClickHouseIT {
+class IngestionSchemaIT extends AbstractPostgresIT {
@Autowired
private TestRestTemplate restTemplate;
@@ -39,7 +37,7 @@ class IngestionSchemaIT extends AbstractClickHouseIT {
}
@Test
- void processorTreeMetadata_depthsAndParentIndexesCorrect() {
+ void processorTreeMetadata_depthsAndParentIdsCorrect() {
String json = """
{
"routeId": "schema-test-tree",
@@ -94,44 +92,46 @@ class IngestionSchemaIT extends AbstractClickHouseIT {
postExecution(json);
- await().atMost(30, SECONDS).ignoreExceptions().untilAsserted(() -> {
- var depths = queryArray(
- "SELECT processor_depths FROM route_executions WHERE route_id = 'schema-test-tree'");
- assertThat(depths).containsExactly("0", "1", "2");
+ // Verify execution row exists
+ Integer execCount = jdbcTemplate.queryForObject(
+ "SELECT count(*) FROM executions WHERE execution_id = 'ex-tree-1'",
+ Integer.class);
+ assertThat(execCount).isEqualTo(1);
- var parentIndexes = queryArray(
- "SELECT processor_parent_indexes FROM route_executions WHERE route_id = 'schema-test-tree'");
- assertThat(parentIndexes).containsExactly("-1", "0", "1");
+ // Verify processors were flattened into processor_executions
+ List> processors = jdbcTemplate.queryForList(
+ "SELECT processor_id, processor_type, depth, parent_processor_id, " +
+ "diagram_node_id, input_body, output_body, input_headers " +
+ "FROM processor_executions WHERE execution_id = 'ex-tree-1' " +
+ "ORDER BY depth, processor_id");
+ assertThat(processors).hasSize(3);
- var diagramNodeIds = queryArray(
- "SELECT processor_diagram_node_ids FROM route_executions WHERE route_id = 'schema-test-tree'");
- assertThat(diagramNodeIds).containsExactly("node-root", "node-child", "node-grandchild");
+ // Root processor: depth=0, no parent
+ assertThat(processors.get(0).get("processor_id")).isEqualTo("root-proc");
+ assertThat(((Number) processors.get(0).get("depth")).intValue()).isEqualTo(0);
+ assertThat(processors.get(0).get("parent_processor_id")).isNull();
+ assertThat(processors.get(0).get("diagram_node_id")).isEqualTo("node-root");
+ assertThat(processors.get(0).get("input_body")).isEqualTo("root-input");
+ assertThat(processors.get(0).get("output_body")).isEqualTo("root-output");
+ assertThat(processors.get(0).get("input_headers").toString()).contains("Content-Type");
- String bodies = jdbcTemplate.queryForObject(
- "SELECT exchange_bodies FROM route_executions WHERE route_id = 'schema-test-tree'",
- String.class);
- assertThat(bodies).contains("root-input");
- assertThat(bodies).contains("root-output");
- assertThat(bodies).contains("child-input");
- assertThat(bodies).contains("child-output");
+ // Child processor: depth=1, parent=root-proc
+ assertThat(processors.get(1).get("processor_id")).isEqualTo("child-proc");
+ assertThat(((Number) processors.get(1).get("depth")).intValue()).isEqualTo(1);
+ assertThat(processors.get(1).get("parent_processor_id")).isEqualTo("root-proc");
+ assertThat(processors.get(1).get("diagram_node_id")).isEqualTo("node-child");
+ assertThat(processors.get(1).get("input_body")).isEqualTo("child-input");
+ assertThat(processors.get(1).get("output_body")).isEqualTo("child-output");
- var inputBodies = queryArray(
- "SELECT processor_input_bodies FROM route_executions WHERE route_id = 'schema-test-tree'");
- assertThat(inputBodies).containsExactly("root-input", "child-input", "");
-
- var outputBodies = queryArray(
- "SELECT processor_output_bodies FROM route_executions WHERE route_id = 'schema-test-tree'");
- assertThat(outputBodies).containsExactly("root-output", "child-output", "");
-
- var inputHeaders = queryArray(
- "SELECT processor_input_headers FROM route_executions WHERE route_id = 'schema-test-tree'");
- assertThat(inputHeaders.get(0)).contains("Content-Type");
- assertThat(inputHeaders.get(0)).contains("application/json");
- });
+ // Grandchild processor: depth=2, parent=child-proc
+ assertThat(processors.get(2).get("processor_id")).isEqualTo("grandchild-proc");
+ assertThat(((Number) processors.get(2).get("depth")).intValue()).isEqualTo(2);
+ assertThat(processors.get(2).get("parent_processor_id")).isEqualTo("child-proc");
+ assertThat(processors.get(2).get("diagram_node_id")).isEqualTo("node-grandchild");
}
@Test
- void exchangeBodiesContainsConcatenatedText() {
+ void exchangeBodiesStored() {
String json = """
{
"routeId": "schema-test-bodies",
@@ -140,14 +140,6 @@ class IngestionSchemaIT extends AbstractClickHouseIT {
"startTime": "2026-03-11T10:00:00Z",
"endTime": "2026-03-11T10:00:01Z",
"durationMs": 1000,
- "inputSnapshot": {
- "body": "route-level-input-body",
- "headers": {"X-Route": "header-value"}
- },
- "outputSnapshot": {
- "body": "route-level-output-body",
- "headers": {}
- },
"processors": [
{
"processorId": "proc-1",
@@ -166,21 +158,13 @@ class IngestionSchemaIT extends AbstractClickHouseIT {
postExecution(json);
- await().atMost(30, SECONDS).ignoreExceptions().untilAsserted(() -> {
- String bodies = jdbcTemplate.queryForObject(
- "SELECT exchange_bodies FROM route_executions WHERE route_id = 'schema-test-bodies'",
- String.class);
- assertThat(bodies).contains("processor-body-text");
- assertThat(bodies).contains("processor-output-text");
- assertThat(bodies).contains("route-level-input-body");
- assertThat(bodies).contains("route-level-output-body");
-
- String headers = jdbcTemplate.queryForObject(
- "SELECT exchange_headers FROM route_executions WHERE route_id = 'schema-test-bodies'",
- String.class);
- assertThat(headers).contains("X-Route");
- assertThat(headers).contains("header-value");
- });
+ // Verify processor body data
+ List> processors = jdbcTemplate.queryForList(
+ "SELECT input_body, output_body FROM processor_executions " +
+ "WHERE execution_id = 'ex-bodies-1'");
+ assertThat(processors).hasSize(1);
+ assertThat(processors.get(0).get("input_body")).isEqualTo("processor-body-text");
+ assertThat(processors.get(0).get("output_body")).isEqualTo("processor-output-text");
}
@Test
@@ -209,20 +193,19 @@ class IngestionSchemaIT extends AbstractClickHouseIT {
postExecution(json);
- await().atMost(30, SECONDS).ignoreExceptions().untilAsserted(() -> {
- String bodies = jdbcTemplate.queryForObject(
- "SELECT exchange_bodies FROM route_executions WHERE route_id = 'schema-test-null-snap'",
- String.class);
- assertThat(bodies).isNotNull();
+ // Verify execution exists
+ Integer count = jdbcTemplate.queryForObject(
+ "SELECT count(*) FROM executions WHERE execution_id = 'ex-null-1'",
+ Integer.class);
+ assertThat(count).isEqualTo(1);
- var depths = queryArray(
- "SELECT processor_depths FROM route_executions WHERE route_id = 'schema-test-null-snap'");
- assertThat(depths).containsExactly("0");
-
- var parentIndexes = queryArray(
- "SELECT processor_parent_indexes FROM route_executions WHERE route_id = 'schema-test-null-snap'");
- assertThat(parentIndexes).containsExactly("-1");
- });
+ // Verify processor with null bodies inserted successfully
+ List> processors = jdbcTemplate.queryForList(
+ "SELECT depth, parent_processor_id, input_body, output_body " +
+ "FROM processor_executions WHERE execution_id = 'ex-null-1'");
+ assertThat(processors).hasSize(1);
+ assertThat(((Number) processors.get(0).get("depth")).intValue()).isEqualTo(0);
+ assertThat(processors.get(0).get("parent_processor_id")).isNull();
}
private void postExecution(String json) {
@@ -233,22 +216,4 @@ class IngestionSchemaIT extends AbstractClickHouseIT {
assertThat(response.getStatusCode()).isEqualTo(HttpStatus.ACCEPTED);
}
-
- private List queryArray(String sql) {
- return jdbcTemplate.query(sql, (rs, rowNum) -> {
- Object arr = rs.getArray(1).getArray();
- if (arr instanceof Object[] objects) {
- return Arrays.stream(objects).map(Object::toString).toList();
- } else if (arr instanceof short[] shorts) {
- var result = new java.util.ArrayList();
- for (short s : shorts) result.add(String.valueOf(s));
- return result;
- } else if (arr instanceof int[] ints) {
- var result = new java.util.ArrayList();
- for (int v : ints) result.add(String.valueOf(v));
- return result;
- }
- return List.of();
- }).get(0);
- }
}
diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/PostgresExecutionStoreIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/PostgresExecutionStoreIT.java
new file mode 100644
index 00000000..8b698d5e
--- /dev/null
+++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/PostgresExecutionStoreIT.java
@@ -0,0 +1,83 @@
+package com.cameleer3.server.app.storage;
+
+import com.cameleer3.server.app.AbstractPostgresIT;
+import com.cameleer3.server.core.storage.ExecutionStore;
+import com.cameleer3.server.core.storage.ExecutionStore.ExecutionRecord;
+import com.cameleer3.server.core.storage.ExecutionStore.ProcessorRecord;
+import org.junit.jupiter.api.Test;
+import org.springframework.beans.factory.annotation.Autowired;
+
+import java.time.Instant;
+import java.util.List;
+import java.util.Optional;
+
+import static org.junit.jupiter.api.Assertions.*;
+
+class PostgresExecutionStoreIT extends AbstractPostgresIT {
+
+ @Autowired
+ ExecutionStore executionStore;
+
+ @Test
+ void upsertAndFindById() {
+ Instant now = Instant.now();
+ ExecutionRecord record = new ExecutionRecord(
+ "exec-1", "route-a", "agent-1", "app-1",
+ "COMPLETED", "corr-1", "exchange-1",
+ now, now.plusMillis(100), 100L,
+ null, null, null);
+
+ executionStore.upsert(record);
+ Optional found = executionStore.findById("exec-1");
+
+ assertTrue(found.isPresent());
+ assertEquals("exec-1", found.get().executionId());
+ assertEquals("COMPLETED", found.get().status());
+ }
+
+ @Test
+ void upsertDeduplicatesByExecutionId() {
+ Instant now = Instant.now();
+ ExecutionRecord first = new ExecutionRecord(
+ "exec-dup", "route-a", "agent-1", "app-1",
+ "RUNNING", null, null, now, null, null, null, null, null);
+ ExecutionRecord second = new ExecutionRecord(
+ "exec-dup", "route-a", "agent-1", "app-1",
+ "COMPLETED", null, null, now, now.plusMillis(200), 200L, null, null, null);
+
+ executionStore.upsert(first);
+ executionStore.upsert(second);
+
+ Optional found = executionStore.findById("exec-dup");
+ assertTrue(found.isPresent());
+ assertEquals("COMPLETED", found.get().status());
+ assertEquals(200L, found.get().durationMs());
+ }
+
+ @Test
+ void upsertProcessorsAndFind() {
+ Instant now = Instant.now();
+ ExecutionRecord exec = new ExecutionRecord(
+ "exec-proc", "route-a", "agent-1", "app-1",
+ "COMPLETED", null, null, now, now.plusMillis(50), 50L, null, null, null);
+ executionStore.upsert(exec);
+
+ List processors = List.of(
+ new ProcessorRecord("exec-proc", "proc-1", "log", null,
+ "app-1", "route-a", 0, null, "COMPLETED",
+ now, now.plusMillis(10), 10L, null, null,
+ "input body", "output body", null, null),
+ new ProcessorRecord("exec-proc", "proc-2", "to", null,
+ "app-1", "route-a", 1, "proc-1", "COMPLETED",
+ now.plusMillis(10), now.plusMillis(30), 20L, null, null,
+ null, null, null, null)
+ );
+ executionStore.upsertProcessors("exec-proc", now, "app-1", "route-a", processors);
+
+ List found = executionStore.findProcessors("exec-proc");
+ assertEquals(2, found.size());
+ assertEquals("proc-1", found.get(0).processorId());
+ assertEquals("proc-2", found.get(1).processorId());
+ assertEquals("proc-1", found.get(1).parentProcessorId());
+ }
+}
diff --git a/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/PostgresStatsStoreIT.java b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/PostgresStatsStoreIT.java
new file mode 100644
index 00000000..c7bc748b
--- /dev/null
+++ b/cameleer3-server-app/src/test/java/com/cameleer3/server/app/storage/PostgresStatsStoreIT.java
@@ -0,0 +1,64 @@
+package com.cameleer3.server.app.storage;
+
+import com.cameleer3.server.app.AbstractPostgresIT;
+import com.cameleer3.server.core.search.ExecutionStats;
+import com.cameleer3.server.core.search.StatsTimeseries;
+import com.cameleer3.server.core.storage.ExecutionStore;
+import com.cameleer3.server.core.storage.ExecutionStore.ExecutionRecord;
+import com.cameleer3.server.core.storage.StatsStore;
+import org.junit.jupiter.api.Test;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.jdbc.core.JdbcTemplate;
+
+import java.time.Instant;
+import java.time.temporal.ChronoUnit;
+
+import static org.junit.jupiter.api.Assertions.*;
+
+class PostgresStatsStoreIT extends AbstractPostgresIT {
+
+ @Autowired StatsStore statsStore;
+ @Autowired ExecutionStore executionStore;
+ @Autowired JdbcTemplate jdbc;
+
+ @Test
+ void statsReturnsCountsForTimeWindow() {
+ // Use a unique route + statsForRoute to avoid data contamination from other tests
+ String uniqueRoute = "stats-route-" + System.nanoTime();
+ Instant base = Instant.now().minus(5, ChronoUnit.MINUTES).truncatedTo(ChronoUnit.SECONDS);
+ insertExecution("stats-1-" + uniqueRoute, uniqueRoute, "app-stats", "COMPLETED", base, 100L);
+ insertExecution("stats-2-" + uniqueRoute, uniqueRoute, "app-stats", "FAILED", base.plusSeconds(10), 200L);
+ insertExecution("stats-3-" + uniqueRoute, uniqueRoute, "app-stats", "COMPLETED", base.plusSeconds(20), 50L);
+
+ // Force continuous aggregate refresh
+ jdbc.execute("CALL refresh_continuous_aggregate('stats_1m_route', NOW() - INTERVAL '1 hour', NOW() + INTERVAL '1 hour')");
+
+ ExecutionStats stats = statsStore.statsForRoute(base.minusSeconds(60), base.plusSeconds(60), uniqueRoute, null);
+ assertEquals(3, stats.totalCount());
+ assertEquals(1, stats.failedCount());
+ }
+
+ @Test
+ void timeseriesReturnsBuckets() {
+ String uniqueRoute = "ts-route-" + System.nanoTime();
+ Instant base = Instant.now().minus(10, ChronoUnit.MINUTES).truncatedTo(ChronoUnit.MINUTES);
+ for (int i = 0; i < 10; i++) {
+ insertExecution("ts-" + i + "-" + uniqueRoute, uniqueRoute, "app-ts", "COMPLETED",
+ base.plusSeconds(i * 30), 100L + i);
+ }
+
+ jdbc.execute("CALL refresh_continuous_aggregate('stats_1m_route', NOW() - INTERVAL '1 hour', NOW() + INTERVAL '1 hour')");
+
+ StatsTimeseries ts = statsStore.timeseriesForRoute(base.minus(1, ChronoUnit.MINUTES), base.plus(10, ChronoUnit.MINUTES), 5, uniqueRoute, null);
+ assertNotNull(ts);
+ assertFalse(ts.buckets().isEmpty());
+ }
+
+ private void insertExecution(String id, String routeId, String groupName,
+ String status, Instant startTime, long durationMs) {
+ executionStore.upsert(new ExecutionRecord(
+ id, routeId, "agent-1", groupName, status, null, null,
+ startTime, startTime.plusMillis(durationMs), durationMs,
+ status.equals("FAILED") ? "error" : null, null, null));
+ }
+}
diff --git a/cameleer3-server-app/src/test/resources/application-test.yml b/cameleer3-server-app/src/test/resources/application-test.yml
index 027a4f67..8a6708b5 100644
--- a/cameleer3-server-app/src/test/resources/application-test.yml
+++ b/cameleer3-server-app/src/test/resources/application-test.yml
@@ -1,9 +1,10 @@
spring:
- datasource:
- url: jdbc:ch://placeholder:8123/cameleer3
- username: default
- password: ""
- driver-class-name: com.clickhouse.jdbc.ClickHouseDriver
+ flyway:
+ enabled: true
+
+opensearch:
+ url: http://localhost:9200
+ debounce-ms: 100
ingestion:
buffer-capacity: 100
diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/DetailService.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/DetailService.java
index 27dc39a8..7f6b31ce 100644
--- a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/DetailService.java
+++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/DetailService.java
@@ -1,104 +1,61 @@
package com.cameleer3.server.core.detail;
-import com.cameleer3.server.core.storage.ExecutionRepository;
+import com.cameleer3.server.core.storage.ExecutionStore;
+import com.cameleer3.server.core.storage.ExecutionStore.ProcessorRecord;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Optional;
+import java.util.*;
-/**
- * Provides execution detail with reconstructed processor tree.
- *
- * This is a plain class (no Spring annotations) -- it lives in the core module
- * and is wired as a bean by the app module configuration.
- */
public class DetailService {
- private final ExecutionRepository repository;
+ private final ExecutionStore executionStore;
- public DetailService(ExecutionRepository repository) {
- this.repository = repository;
+ public DetailService(ExecutionStore executionStore) {
+ this.executionStore = executionStore;
}
- /**
- * Get the full detail of a route execution, including the nested processor tree.
- *
- * @param executionId the execution ID to look up
- * @return the execution detail, or empty if not found
- */
public Optional getDetail(String executionId) {
- return repository.findRawById(executionId)
- .map(this::toDetail);
+ return executionStore.findById(executionId)
+ .map(exec -> {
+ List processors = executionStore.findProcessors(executionId);
+ List roots = buildTree(processors);
+ return new ExecutionDetail(
+ exec.executionId(), exec.routeId(), exec.agentId(),
+ exec.status(), exec.startTime(), exec.endTime(),
+ exec.durationMs() != null ? exec.durationMs() : 0L,
+ exec.correlationId(), exec.exchangeId(),
+ exec.errorMessage(), exec.errorStacktrace(),
+ exec.diagramContentHash(), roots
+ );
+ });
}
- private ExecutionDetail toDetail(RawExecutionRow row) {
- List roots = reconstructTree(
- row.processorIds(),
- row.processorTypes(),
- row.processorStatuses(),
- row.processorStarts(),
- row.processorEnds(),
- row.processorDurations(),
- row.processorDiagramNodeIds(),
- row.processorErrorMessages(),
- row.processorErrorStacktraces(),
- row.processorDepths(),
- row.processorParentIndexes()
- );
+ List buildTree(List processors) {
+ if (processors.isEmpty()) return List.of();
- return new ExecutionDetail(
- row.executionId(),
- row.routeId(),
- row.agentId(),
- row.status(),
- row.startTime(),
- row.endTime(),
- row.durationMs(),
- row.correlationId(),
- row.exchangeId(),
- row.errorMessage(),
- row.errorStackTrace(),
- row.diagramContentHash(),
- roots
- );
- }
-
- /**
- * Reconstruct the nested processor tree from flat parallel arrays.
- *
- * Uses parentIndexes to wire children: parentIndex == -1 means the node is a root.
- * Otherwise, parentIndex is the array index of the parent node.
- */
- List reconstructTree(
- String[] ids, String[] types, String[] statuses,
- java.time.Instant[] starts, java.time.Instant[] ends, long[] durations,
- String[] diagramNodeIds, String[] errorMessages, String[] errorStacktraces,
- int[] depths, int[] parentIndexes) {
-
- if (ids == null || ids.length == 0) {
- return List.of();
- }
-
- int len = ids.length;
- ProcessorNode[] nodes = new ProcessorNode[len];
-
- for (int i = 0; i < len; i++) {
- nodes[i] = new ProcessorNode(
- ids[i], types[i], statuses[i],
- starts[i], ends[i], durations[i],
- diagramNodeIds[i], errorMessages[i], errorStacktraces[i]
- );
+ Map nodeMap = new LinkedHashMap<>();
+ for (ProcessorRecord p : processors) {
+ nodeMap.put(p.processorId(), new ProcessorNode(
+ p.processorId(), p.processorType(), p.status(),
+ p.startTime(), p.endTime(),
+ p.durationMs() != null ? p.durationMs() : 0L,
+ p.diagramNodeId(), p.errorMessage(), p.errorStacktrace()
+ ));
}
List roots = new ArrayList<>();
- for (int i = 0; i < len; i++) {
- if (parentIndexes[i] == -1) {
- roots.add(nodes[i]);
+ for (ProcessorRecord p : processors) {
+ ProcessorNode node = nodeMap.get(p.processorId());
+ if (p.parentProcessorId() == null) {
+ roots.add(node);
} else {
- nodes[parentIndexes[i]].addChild(nodes[i]);
+ ProcessorNode parent = nodeMap.get(p.parentProcessorId());
+ if (parent != null) {
+ parent.addChild(node);
+ } else {
+ roots.add(node); // orphan safety
+ }
}
}
-
return roots;
}
}
diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/ExecutionDetail.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/ExecutionDetail.java
index e739dd81..1b474ba0 100644
--- a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/ExecutionDetail.java
+++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/ExecutionDetail.java
@@ -7,7 +7,7 @@ import java.util.List;
* Full detail of a route execution, including the nested processor tree.
*
* This is the rich detail model returned by the detail endpoint. The processor
- * tree is reconstructed from flat parallel arrays stored in ClickHouse.
+ * tree is reconstructed from individual processor records stored in PostgreSQL.
*
* @param executionId unique execution identifier
* @param routeId Camel route ID
diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/ProcessorNode.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/ProcessorNode.java
index 10d1e88e..65e08b9a 100644
--- a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/ProcessorNode.java
+++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/ProcessorNode.java
@@ -7,7 +7,7 @@ import java.util.List;
/**
* Nested tree node representing a single processor execution within a route.
*
- * The tree structure is reconstructed from flat parallel arrays stored in ClickHouse.
+ * The tree structure is reconstructed from individual processor records stored in PostgreSQL.
* Each node may have children (e.g., processors inside a split or try-catch block).
*/
public final class ProcessorNode {
diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/RawExecutionRow.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/RawExecutionRow.java
deleted file mode 100644
index 2297e4b6..00000000
--- a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/detail/RawExecutionRow.java
+++ /dev/null
@@ -1,59 +0,0 @@
-package com.cameleer3.server.core.detail;
-
-import java.time.Instant;
-
-/**
- * Raw execution data from ClickHouse, including all parallel arrays needed
- * for tree reconstruction. This is the intermediate representation between
- * the database and the {@link ExecutionDetail} domain object.
- *
- * @param executionId unique execution identifier
- * @param routeId Camel route ID
- * @param agentId agent instance
- * @param status execution status
- * @param startTime execution start time
- * @param endTime execution end time
- * @param durationMs execution duration in milliseconds
- * @param correlationId correlation ID
- * @param exchangeId Camel exchange ID
- * @param errorMessage execution-level error message
- * @param errorStackTrace execution-level error stack trace
- * @param diagramContentHash content hash for diagram linking
- * @param processorIds processor IDs (parallel array)
- * @param processorTypes processor types (parallel array)
- * @param processorStatuses processor statuses (parallel array)
- * @param processorStarts processor start times (parallel array)
- * @param processorEnds processor end times (parallel array)
- * @param processorDurations processor durations in ms (parallel array)
- * @param processorDiagramNodeIds processor diagram node IDs (parallel array)
- * @param processorErrorMessages processor error messages (parallel array)
- * @param processorErrorStacktraces processor error stack traces (parallel array)
- * @param processorDepths processor tree depths (parallel array)
- * @param processorParentIndexes processor parent indexes, -1 for roots (parallel array)
- */
-public record RawExecutionRow(
- String executionId,
- String routeId,
- String agentId,
- String status,
- Instant startTime,
- Instant endTime,
- long durationMs,
- String correlationId,
- String exchangeId,
- String errorMessage,
- String errorStackTrace,
- String diagramContentHash,
- String[] processorIds,
- String[] processorTypes,
- String[] processorStatuses,
- Instant[] processorStarts,
- Instant[] processorEnds,
- long[] processorDurations,
- String[] processorDiagramNodeIds,
- String[] processorErrorMessages,
- String[] processorErrorStacktraces,
- int[] processorDepths,
- int[] processorParentIndexes
-) {
-}
diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/indexing/ExecutionUpdatedEvent.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/indexing/ExecutionUpdatedEvent.java
new file mode 100644
index 00000000..08488fab
--- /dev/null
+++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/indexing/ExecutionUpdatedEvent.java
@@ -0,0 +1,5 @@
+package com.cameleer3.server.core.indexing;
+
+import java.time.Instant;
+
+public record ExecutionUpdatedEvent(String executionId, Instant startTime) {}
diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/indexing/SearchIndexer.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/indexing/SearchIndexer.java
new file mode 100644
index 00000000..6cff9e8d
--- /dev/null
+++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/indexing/SearchIndexer.java
@@ -0,0 +1,79 @@
+package com.cameleer3.server.core.indexing;
+
+import com.cameleer3.server.core.storage.ExecutionStore;
+import com.cameleer3.server.core.storage.ExecutionStore.ExecutionRecord;
+import com.cameleer3.server.core.storage.ExecutionStore.ProcessorRecord;
+import com.cameleer3.server.core.storage.SearchIndex;
+import com.cameleer3.server.core.storage.model.ExecutionDocument;
+import com.cameleer3.server.core.storage.model.ExecutionDocument.ProcessorDoc;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.*;
+
+public class SearchIndexer {
+
+ private static final Logger log = LoggerFactory.getLogger(SearchIndexer.class);
+
+ private final ExecutionStore executionStore;
+ private final SearchIndex searchIndex;
+ private final long debounceMs;
+ private final int queueCapacity;
+
+ private final Map> pending = new ConcurrentHashMap<>();
+ private final ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor(
+ r -> { Thread t = new Thread(r, "search-indexer"); t.setDaemon(true); return t; });
+
+ public SearchIndexer(ExecutionStore executionStore, SearchIndex searchIndex,
+ long debounceMs, int queueCapacity) {
+ this.executionStore = executionStore;
+ this.searchIndex = searchIndex;
+ this.debounceMs = debounceMs;
+ this.queueCapacity = queueCapacity;
+ }
+
+ public void onExecutionUpdated(ExecutionUpdatedEvent event) {
+ if (pending.size() >= queueCapacity) {
+ log.warn("Search indexer queue full, dropping event for {}", event.executionId());
+ return;
+ }
+
+ ScheduledFuture> existing = pending.put(event.executionId(),
+ scheduler.schedule(() -> indexExecution(event.executionId()),
+ debounceMs, TimeUnit.MILLISECONDS));
+ if (existing != null) {
+ existing.cancel(false);
+ }
+ }
+
+ private void indexExecution(String executionId) {
+ pending.remove(executionId);
+ try {
+ ExecutionRecord exec = executionStore.findById(executionId).orElse(null);
+ if (exec == null) return;
+
+ List processors = executionStore.findProcessors(executionId);
+ List processorDocs = processors.stream()
+ .map(p -> new ProcessorDoc(
+ p.processorId(), p.processorType(), p.status(),
+ p.errorMessage(), p.errorStacktrace(),
+ p.inputBody(), p.outputBody(),
+ p.inputHeaders(), p.outputHeaders()))
+ .toList();
+
+ searchIndex.index(new ExecutionDocument(
+ exec.executionId(), exec.routeId(), exec.agentId(), exec.groupName(),
+ exec.status(), exec.correlationId(), exec.exchangeId(),
+ exec.startTime(), exec.endTime(), exec.durationMs(),
+ exec.errorMessage(), exec.errorStacktrace(), processorDocs));
+ } catch (Exception e) {
+ log.error("Failed to index execution {}", executionId, e);
+ }
+ }
+
+ public void shutdown() {
+ scheduler.shutdown();
+ }
+}
diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/ingestion/IngestionService.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/ingestion/IngestionService.java
index 6841c683..bdd32cfb 100644
--- a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/ingestion/IngestionService.java
+++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/ingestion/IngestionService.java
@@ -1,113 +1,132 @@
package com.cameleer3.server.core.ingestion;
+import com.cameleer3.common.model.ProcessorExecution;
+import com.cameleer3.common.model.RouteExecution;
+import com.cameleer3.server.core.indexing.ExecutionUpdatedEvent;
+import com.cameleer3.server.core.storage.DiagramStore;
+import com.cameleer3.server.core.storage.ExecutionStore;
+import com.cameleer3.server.core.storage.ExecutionStore.ExecutionRecord;
+import com.cameleer3.server.core.storage.ExecutionStore.ProcessorRecord;
import com.cameleer3.server.core.storage.model.MetricsSnapshot;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import java.util.ArrayList;
import java.util.List;
+import java.util.Map;
+import java.util.function.Consumer;
-/**
- * Routes incoming data to the appropriate {@link WriteBuffer} instances.
- *
- * This is a plain class (no Spring annotations) -- it lives in the core module
- * and is wired as a bean by the app module configuration.
- */
public class IngestionService {
- private final WriteBuffer executionBuffer;
- private final WriteBuffer diagramBuffer;
+ private static final ObjectMapper JSON = new ObjectMapper();
+
+ private final ExecutionStore executionStore;
+ private final DiagramStore diagramStore;
private final WriteBuffer metricsBuffer;
+ private final Consumer eventPublisher;
+ private final int bodySizeLimit;
- public IngestionService(WriteBuffer executionBuffer,
- WriteBuffer diagramBuffer,
- WriteBuffer metricsBuffer) {
- this.executionBuffer = executionBuffer;
- this.diagramBuffer = diagramBuffer;
+ public IngestionService(ExecutionStore executionStore,
+ DiagramStore diagramStore,
+ WriteBuffer metricsBuffer,
+ Consumer eventPublisher,
+ int bodySizeLimit) {
+ this.executionStore = executionStore;
+ this.diagramStore = diagramStore;
this.metricsBuffer = metricsBuffer;
+ this.eventPublisher = eventPublisher;
+ this.bodySizeLimit = bodySizeLimit;
}
- /**
- * Accept a batch of tagged route executions into the buffer.
- *
- * @return true if all items were buffered, false if buffer is full (backpressure)
- */
- public boolean acceptExecutions(List executions) {
- return executionBuffer.offerBatch(executions);
+ public void ingestExecution(String agentId, String groupName, RouteExecution execution) {
+ ExecutionRecord record = toExecutionRecord(agentId, groupName, execution);
+ executionStore.upsert(record);
+
+ if (execution.getProcessors() != null && !execution.getProcessors().isEmpty()) {
+ List processors = flattenProcessors(
+ execution.getProcessors(), record.executionId(),
+ record.startTime(), groupName, execution.getRouteId(),
+ null, 0);
+ executionStore.upsertProcessors(
+ record.executionId(), record.startTime(),
+ groupName, execution.getRouteId(), processors);
+ }
+
+ eventPublisher.accept(new ExecutionUpdatedEvent(
+ record.executionId(), record.startTime()));
}
- /**
- * Accept a single tagged route execution into the buffer.
- *
- * @return true if the item was buffered, false if buffer is full (backpressure)
- */
- public boolean acceptExecution(TaggedExecution execution) {
- return executionBuffer.offer(execution);
+ public void ingestDiagram(TaggedDiagram diagram) {
+ diagramStore.store(diagram);
}
- /**
- * Accept a single tagged route diagram into the buffer.
- *
- * @return true if the item was buffered, false if buffer is full (backpressure)
- */
- public boolean acceptDiagram(TaggedDiagram diagram) {
- return diagramBuffer.offer(diagram);
- }
-
- /**
- * Accept a batch of tagged route diagrams into the buffer.
- *
- * @return true if all items were buffered, false if buffer is full (backpressure)
- */
- public boolean acceptDiagrams(List diagrams) {
- return diagramBuffer.offerBatch(diagrams);
- }
-
- /**
- * Accept a batch of metrics snapshots into the buffer.
- *
- * @return true if all items were buffered, false if buffer is full (backpressure)
- */
public boolean acceptMetrics(List metrics) {
return metricsBuffer.offerBatch(metrics);
}
- /**
- * @return current number of items in the execution buffer
- */
- public int getExecutionBufferDepth() {
- return executionBuffer.size();
- }
-
- /**
- * @return current number of items in the diagram buffer
- */
- public int getDiagramBufferDepth() {
- return diagramBuffer.size();
- }
-
- /**
- * @return current number of items in the metrics buffer
- */
public int getMetricsBufferDepth() {
return metricsBuffer.size();
}
- /**
- * @return the execution write buffer (for use by flush scheduler)
- */
- public WriteBuffer getExecutionBuffer() {
- return executionBuffer;
- }
-
- /**
- * @return the diagram write buffer (for use by flush scheduler)
- */
- public WriteBuffer getDiagramBuffer() {
- return diagramBuffer;
- }
-
- /**
- * @return the metrics write buffer (for use by flush scheduler)
- */
public WriteBuffer getMetricsBuffer() {
return metricsBuffer;
}
+
+ private ExecutionRecord toExecutionRecord(String agentId, String groupName,
+ RouteExecution exec) {
+ String diagramHash = diagramStore
+ .findContentHashForRoute(exec.getRouteId(), agentId)
+ .orElse("");
+ return new ExecutionRecord(
+ exec.getExchangeId(), exec.getRouteId(), agentId, groupName,
+ exec.getStatus() != null ? exec.getStatus().name() : "RUNNING",
+ exec.getCorrelationId(), exec.getExchangeId(),
+ exec.getStartTime(), exec.getEndTime(),
+ exec.getDurationMs(),
+ exec.getErrorMessage(), exec.getErrorStackTrace(),
+ diagramHash
+ );
+ }
+
+ private List flattenProcessors(
+ List processors, String executionId,
+ java.time.Instant execStartTime, String groupName, String routeId,
+ String parentProcessorId, int depth) {
+ List flat = new ArrayList<>();
+ for (ProcessorExecution p : processors) {
+ flat.add(new ProcessorRecord(
+ executionId, p.getProcessorId(), p.getProcessorType(),
+ p.getDiagramNodeId(), groupName, routeId,
+ depth, parentProcessorId,
+ p.getStatus() != null ? p.getStatus().name() : "RUNNING",
+ p.getStartTime() != null ? p.getStartTime() : execStartTime,
+ p.getEndTime(),
+ p.getDurationMs(),
+ p.getErrorMessage(), p.getErrorStackTrace(),
+ truncateBody(p.getInputBody()), truncateBody(p.getOutputBody()),
+ toJson(p.getInputHeaders()), toJson(p.getOutputHeaders())
+ ));
+ if (p.getChildren() != null) {
+ flat.addAll(flattenProcessors(
+ p.getChildren(), executionId, execStartTime,
+ groupName, routeId, p.getProcessorId(), depth + 1));
+ }
+ }
+ return flat;
+ }
+
+ private String truncateBody(String body) {
+ if (body == null) return null;
+ if (body.length() > bodySizeLimit) return body.substring(0, bodySizeLimit);
+ return body;
+ }
+
+ private static String toJson(Map headers) {
+ if (headers == null) return null;
+ try {
+ return JSON.writeValueAsString(headers);
+ } catch (JsonProcessingException e) {
+ return "{}";
+ }
+ }
}
diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/ingestion/WriteBuffer.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/ingestion/WriteBuffer.java
index 267de43c..bcd1077c 100644
--- a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/ingestion/WriteBuffer.java
+++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/ingestion/WriteBuffer.java
@@ -6,7 +6,7 @@ import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
/**
- * Bounded write buffer that decouples HTTP ingestion from ClickHouse batch inserts.
+ * Bounded write buffer that decouples HTTP ingestion from database batch inserts.
*
* Items are offered to the buffer by controllers and drained in batches by a
* scheduled flush task. When the buffer is full, {@link #offer} returns false,
diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/search/SearchEngine.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/search/SearchEngine.java
deleted file mode 100644
index 44955c18..00000000
--- a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/search/SearchEngine.java
+++ /dev/null
@@ -1,72 +0,0 @@
-package com.cameleer3.server.core.search;
-
-import java.util.List;
-
-/**
- * Swappable search backend abstraction.
- *
- * The current implementation uses ClickHouse for search. This interface allows
- * replacing the search backend (e.g., with OpenSearch) without changing the
- * service layer or controllers.
- */
-public interface SearchEngine {
-
- /**
- * Search for route executions matching the given criteria.
- *
- * @param request search filters and pagination
- * @return paginated search results with total count
- */
- SearchResult search(SearchRequest request);
-
- /**
- * Count route executions matching the given criteria (without fetching data).
- *
- * @param request search filters
- * @return total number of matching executions
- */
- long count(SearchRequest request);
-
- /**
- * Compute aggregate stats: P99 latency and count of currently running executions.
- *
- * @param from start of the time window
- * @param to end of the time window
- * @return execution stats
- */
- ExecutionStats stats(java.time.Instant from, java.time.Instant to);
-
- /**
- * Compute aggregate stats scoped to specific routes and agents.
- *
- * @param from start of the time window
- * @param to end of the time window
- * @param routeId optional route ID filter
- * @param agentIds optional agent ID filter (from group resolution)
- * @return execution stats
- */
- ExecutionStats stats(java.time.Instant from, java.time.Instant to, String routeId, List agentIds);
-
- /**
- * Compute bucketed time-series stats over a time window.
- *
- * @param from start of the time window
- * @param to end of the time window
- * @param bucketCount number of buckets to divide the window into
- * @return bucketed stats
- */
- StatsTimeseries timeseries(java.time.Instant from, java.time.Instant to, int bucketCount);
-
- /**
- * Compute bucketed time-series stats scoped to specific routes and agents.
- *
- * @param from start of the time window
- * @param to end of the time window
- * @param bucketCount number of buckets to divide the window into
- * @param routeId optional route ID filter
- * @param agentIds optional agent ID filter (from group resolution)
- * @return bucketed stats
- */
- StatsTimeseries timeseries(java.time.Instant from, java.time.Instant to, int bucketCount,
- String routeId, List agentIds);
-}
diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/search/SearchRequest.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/search/SearchRequest.java
index ab97c31e..17ff44c9 100644
--- a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/search/SearchRequest.java
+++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/search/SearchRequest.java
@@ -75,7 +75,7 @@ public record SearchRequest(
if (!"asc".equalsIgnoreCase(sortDir)) sortDir = "desc";
}
- /** Returns the validated ClickHouse column name for ORDER BY. */
+ /** Returns the validated database column name for ORDER BY. */
public String sortColumn() {
return SORT_FIELD_TO_COLUMN.getOrDefault(sortField, "start_time");
}
diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/search/SearchService.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/search/SearchService.java
index 263193c2..014c606d 100644
--- a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/search/SearchService.java
+++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/search/SearchService.java
@@ -1,63 +1,43 @@
package com.cameleer3.server.core.search;
+import com.cameleer3.server.core.storage.SearchIndex;
+import com.cameleer3.server.core.storage.StatsStore;
+
+import java.time.Instant;
import java.util.List;
-/**
- * Orchestrates search operations, delegating to a {@link SearchEngine} backend.
- *
- * This is a plain class (no Spring annotations) -- it lives in the core module
- * and is wired as a bean by the app module configuration. The thin orchestration
- * layer allows adding cross-cutting concerns (logging, caching, metrics) later.
- */
public class SearchService {
- private final SearchEngine engine;
+ private final SearchIndex searchIndex;
+ private final StatsStore statsStore;
- public SearchService(SearchEngine engine) {
- this.engine = engine;
+ public SearchService(SearchIndex searchIndex, StatsStore statsStore) {
+ this.searchIndex = searchIndex;
+ this.statsStore = statsStore;
}
- /**
- * Search for route executions matching the given criteria.
- */
public SearchResult search(SearchRequest request) {
- return engine.search(request);
+ return searchIndex.search(request);
}
- /**
- * Count route executions matching the given criteria.
- */
public long count(SearchRequest request) {
- return engine.count(request);
+ return searchIndex.count(request);
}
- /**
- * Compute aggregate execution stats (P99 latency, active count).
- */
- public ExecutionStats stats(java.time.Instant from, java.time.Instant to) {
- return engine.stats(from, to);
+ public ExecutionStats stats(Instant from, Instant to) {
+ return statsStore.stats(from, to);
}
- /**
- * Compute aggregate execution stats scoped to specific routes and agents.
- */
- public ExecutionStats stats(java.time.Instant from, java.time.Instant to,
- String routeId, List agentIds) {
- return engine.stats(from, to, routeId, agentIds);
+ public ExecutionStats stats(Instant from, Instant to, String routeId, List agentIds) {
+ return statsStore.statsForRoute(from, to, routeId, agentIds);
}
- /**
- * Compute bucketed time-series stats over a time window.
- */
- public StatsTimeseries timeseries(java.time.Instant from, java.time.Instant to, int bucketCount) {
- return engine.timeseries(from, to, bucketCount);
+ public StatsTimeseries timeseries(Instant from, Instant to, int bucketCount) {
+ return statsStore.timeseries(from, to, bucketCount);
}
- /**
- * Compute bucketed time-series stats scoped to specific routes and agents.
- */
- public StatsTimeseries timeseries(java.time.Instant from, java.time.Instant to, int bucketCount,
+ public StatsTimeseries timeseries(Instant from, Instant to, int bucketCount,
String routeId, List agentIds) {
- return engine.timeseries(from, to, bucketCount, routeId, agentIds);
+ return statsStore.timeseriesForRoute(from, to, bucketCount, routeId, agentIds);
}
}
diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/DiagramRepository.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/DiagramRepository.java
deleted file mode 100644
index 3a2c4bd6..00000000
--- a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/DiagramRepository.java
+++ /dev/null
@@ -1,35 +0,0 @@
-package com.cameleer3.server.core.storage;
-
-import com.cameleer3.common.graph.RouteGraph;
-import com.cameleer3.server.core.ingestion.TaggedDiagram;
-
-import java.util.List;
-import java.util.Optional;
-
-/**
- * Repository for route diagram storage with content-hash deduplication.
- */
-public interface DiagramRepository {
-
- /**
- * Store a tagged route graph. Uses content-hash deduplication via ReplacingMergeTree.
- */
- void store(TaggedDiagram diagram);
-
- /**
- * Find a route graph by its content hash.
- */
- Optional findByContentHash(String contentHash);
-
- /**
- * Find the content hash for the latest diagram of a given route and agent.
- */
- Optional findContentHashForRoute(String routeId, String agentId);
-
- /**
- * Find the content hash for the latest diagram of a route across any agent in the given list.
- * All instances of the same application produce the same route graph, so any agent's
- * diagram for the same route will have the same content hash.
- */
- Optional findContentHashForRouteByAgents(String routeId, List agentIds);
-}
diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/DiagramStore.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/DiagramStore.java
new file mode 100644
index 00000000..12ff6d7d
--- /dev/null
+++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/DiagramStore.java
@@ -0,0 +1,18 @@
+package com.cameleer3.server.core.storage;
+
+import com.cameleer3.common.graph.RouteGraph;
+import com.cameleer3.server.core.ingestion.TaggedDiagram;
+
+import java.util.List;
+import java.util.Optional;
+
+public interface DiagramStore {
+
+ void store(TaggedDiagram diagram);
+
+ Optional findByContentHash(String contentHash);
+
+ Optional findContentHashForRoute(String routeId, String agentId);
+
+ Optional findContentHashForRouteByAgents(String routeId, List agentIds);
+}
diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/ExecutionRepository.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/ExecutionRepository.java
deleted file mode 100644
index c58c1f81..00000000
--- a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/ExecutionRepository.java
+++ /dev/null
@@ -1,28 +0,0 @@
-package com.cameleer3.server.core.storage;
-
-import com.cameleer3.server.core.detail.RawExecutionRow;
-import com.cameleer3.server.core.ingestion.TaggedExecution;
-
-import java.util.List;
-import java.util.Optional;
-
-/**
- * Repository for route execution storage and retrieval.
- */
-public interface ExecutionRepository {
-
- /**
- * Insert a batch of tagged route executions.
- * Implementations must perform a single batch insert for efficiency.
- */
- void insertBatch(List executions);
-
- /**
- * Find a raw execution row by execution ID, including all parallel arrays
- * needed for processor tree reconstruction.
- *
- * @param executionId the execution ID to look up
- * @return the raw execution row, or empty if not found
- */
- Optional findRawById(String executionId);
-}
diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/ExecutionStore.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/ExecutionStore.java
new file mode 100644
index 00000000..ae45577e
--- /dev/null
+++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/ExecutionStore.java
@@ -0,0 +1,34 @@
+package com.cameleer3.server.core.storage;
+
+import java.time.Instant;
+import java.util.List;
+import java.util.Optional;
+
+public interface ExecutionStore {
+
+ void upsert(ExecutionRecord execution);
+
+ void upsertProcessors(String executionId, Instant startTime,
+ String groupName, String routeId,
+ List processors);
+
+ Optional findById(String executionId);
+
+ List findProcessors(String executionId);
+
+ record ExecutionRecord(
+ String executionId, String routeId, String agentId, String groupName,
+ String status, String correlationId, String exchangeId,
+ Instant startTime, Instant endTime, Long durationMs,
+ String errorMessage, String errorStacktrace, String diagramContentHash
+ ) {}
+
+ record ProcessorRecord(
+ String executionId, String processorId, String processorType,
+ String diagramNodeId, String groupName, String routeId,
+ int depth, String parentProcessorId, String status,
+ Instant startTime, Instant endTime, Long durationMs,
+ String errorMessage, String errorStacktrace,
+ String inputBody, String outputBody, String inputHeaders, String outputHeaders
+ ) {}
+}
diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/MetricsRepository.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/MetricsRepository.java
deleted file mode 100644
index ad15ef0a..00000000
--- a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/MetricsRepository.java
+++ /dev/null
@@ -1,17 +0,0 @@
-package com.cameleer3.server.core.storage;
-
-import com.cameleer3.server.core.storage.model.MetricsSnapshot;
-
-import java.util.List;
-
-/**
- * Repository for agent metrics batch inserts into ClickHouse.
- */
-public interface MetricsRepository {
-
- /**
- * Insert a batch of metrics snapshots.
- * Implementations must perform a single batch insert for efficiency.
- */
- void insertBatch(List metrics);
-}
diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/MetricsStore.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/MetricsStore.java
new file mode 100644
index 00000000..b7af4122
--- /dev/null
+++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/MetricsStore.java
@@ -0,0 +1,10 @@
+package com.cameleer3.server.core.storage;
+
+import com.cameleer3.server.core.storage.model.MetricsSnapshot;
+
+import java.util.List;
+
+public interface MetricsStore {
+
+ void insertBatch(List snapshots);
+}
diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/SearchIndex.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/SearchIndex.java
new file mode 100644
index 00000000..e06379ac
--- /dev/null
+++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/SearchIndex.java
@@ -0,0 +1,17 @@
+package com.cameleer3.server.core.storage;
+
+import com.cameleer3.server.core.search.ExecutionSummary;
+import com.cameleer3.server.core.search.SearchRequest;
+import com.cameleer3.server.core.search.SearchResult;
+import com.cameleer3.server.core.storage.model.ExecutionDocument;
+
+public interface SearchIndex {
+
+ SearchResult search(SearchRequest request);
+
+ long count(SearchRequest request);
+
+ void index(ExecutionDocument document);
+
+ void delete(String executionId);
+}
diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/StatsStore.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/StatsStore.java
new file mode 100644
index 00000000..05931a86
--- /dev/null
+++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/StatsStore.java
@@ -0,0 +1,36 @@
+package com.cameleer3.server.core.storage;
+
+import com.cameleer3.server.core.search.ExecutionStats;
+import com.cameleer3.server.core.search.StatsTimeseries;
+
+import java.time.Instant;
+import java.util.List;
+
+public interface StatsStore {
+
+ // Global stats (stats_1m_all)
+ ExecutionStats stats(Instant from, Instant to);
+
+ // Per-app stats (stats_1m_app)
+ ExecutionStats statsForApp(Instant from, Instant to, String groupName);
+
+ // Per-route stats (stats_1m_route), optionally scoped to specific agents
+ ExecutionStats statsForRoute(Instant from, Instant to, String routeId, List agentIds);
+
+ // Per-processor stats (stats_1m_processor)
+ ExecutionStats statsForProcessor(Instant from, Instant to, String routeId, String processorType);
+
+ // Global timeseries
+ StatsTimeseries timeseries(Instant from, Instant to, int bucketCount);
+
+ // Per-app timeseries
+ StatsTimeseries timeseriesForApp(Instant from, Instant to, int bucketCount, String groupName);
+
+ // Per-route timeseries, optionally scoped to specific agents
+ StatsTimeseries timeseriesForRoute(Instant from, Instant to, int bucketCount,
+ String routeId, List agentIds);
+
+ // Per-processor timeseries
+ StatsTimeseries timeseriesForProcessor(Instant from, Instant to, int bucketCount,
+ String routeId, String processorType);
+}
diff --git a/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/model/ExecutionDocument.java b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/model/ExecutionDocument.java
new file mode 100644
index 00000000..6822088a
--- /dev/null
+++ b/cameleer3-server-core/src/main/java/com/cameleer3/server/core/storage/model/ExecutionDocument.java
@@ -0,0 +1,19 @@
+package com.cameleer3.server.core.storage.model;
+
+import java.time.Instant;
+import java.util.List;
+
+public record ExecutionDocument(
+ String executionId, String routeId, String agentId, String groupName,
+ String status, String correlationId, String exchangeId,
+ Instant startTime, Instant endTime, Long durationMs,
+ String errorMessage, String errorStacktrace,
+ List processors
+) {
+ public record ProcessorDoc(
+ String processorId, String processorType, String status,
+ String errorMessage, String errorStacktrace,
+ String inputBody, String outputBody,
+ String inputHeaders, String outputHeaders
+ ) {}
+}
diff --git a/cameleer3-server-core/src/test/java/com/cameleer3/server/core/detail/TreeReconstructionTest.java b/cameleer3-server-core/src/test/java/com/cameleer3/server/core/detail/TreeReconstructionTest.java
index a6b4251a..89311bfe 100644
--- a/cameleer3-server-core/src/test/java/com/cameleer3/server/core/detail/TreeReconstructionTest.java
+++ b/cameleer3-server-core/src/test/java/com/cameleer3/server/core/detail/TreeReconstructionTest.java
@@ -1,6 +1,7 @@
package com.cameleer3.server.core.detail;
-import com.cameleer3.server.core.storage.ExecutionRepository;
+import com.cameleer3.server.core.storage.ExecutionStore;
+import com.cameleer3.server.core.storage.ExecutionStore.ProcessorRecord;
import org.junit.jupiter.api.Test;
import java.time.Instant;
@@ -10,33 +11,36 @@ import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.mock;
/**
- * Unit tests for {@link DetailService#reconstructTree} logic.
+ * Unit tests for {@link DetailService#buildTree} logic.
*
- * Verifies correct parent-child wiring from flat parallel arrays.
+ * Verifies correct parent-child wiring from flat ProcessorRecord lists.
*/
class TreeReconstructionTest {
- private final DetailService detailService = new DetailService(mock(ExecutionRepository.class));
+ private final DetailService detailService = new DetailService(mock(ExecutionStore.class));
private static final Instant NOW = Instant.parse("2026-03-10T10:00:00Z");
+ private ProcessorRecord proc(String id, String type, String status,
+ int depth, String parentId) {
+ return new ProcessorRecord(
+ "exec-1", id, type, "node-" + id,
+ "default", "route1", depth, parentId,
+ status, NOW, NOW, 10L,
+ null, null, null, null, null, null
+ );
+ }
+
@Test
void linearChain_rootChildGrandchild() {
- // [root, child, grandchild], depths=[0,1,2], parents=[-1,0,1]
- List roots = detailService.reconstructTree(
- new String[]{"root", "child", "grandchild"},
- new String[]{"log", "bean", "to"},
- new String[]{"COMPLETED", "COMPLETED", "COMPLETED"},
- new Instant[]{NOW, NOW, NOW},
- new Instant[]{NOW, NOW, NOW},
- new long[]{10, 20, 30},
- new String[]{"n1", "n2", "n3"},
- new String[]{"", "", ""},
- new String[]{"", "", ""},
- new int[]{0, 1, 2},
- new int[]{-1, 0, 1}
+ List processors = List.of(
+ proc("root", "log", "COMPLETED", 0, null),
+ proc("child", "bean", "COMPLETED", 1, "root"),
+ proc("grandchild", "to", "COMPLETED", 2, "child")
);
+ List roots = detailService.buildTree(processors);
+
assertThat(roots).hasSize(1);
ProcessorNode root = roots.get(0);
assertThat(root.getProcessorId()).isEqualTo("root");
@@ -53,21 +57,14 @@ class TreeReconstructionTest {
@Test
void multipleRoots_noNesting() {
- // [A, B, C], depths=[0,0,0], parents=[-1,-1,-1]
- List roots = detailService.reconstructTree(
- new String[]{"A", "B", "C"},
- new String[]{"log", "log", "log"},
- new String[]{"COMPLETED", "COMPLETED", "COMPLETED"},
- new Instant[]{NOW, NOW, NOW},
- new Instant[]{NOW, NOW, NOW},
- new long[]{10, 20, 30},
- new String[]{"n1", "n2", "n3"},
- new String[]{"", "", ""},
- new String[]{"", "", ""},
- new int[]{0, 0, 0},
- new int[]{-1, -1, -1}
+ List processors = List.of(
+ proc("A", "log", "COMPLETED", 0, null),
+ proc("B", "log", "COMPLETED", 0, null),
+ proc("C", "log", "COMPLETED", 0, null)
);
+ List roots = detailService.buildTree(processors);
+
assertThat(roots).hasSize(3);
assertThat(roots.get(0).getProcessorId()).isEqualTo("A");
assertThat(roots.get(1).getProcessorId()).isEqualTo("B");
@@ -77,21 +74,15 @@ class TreeReconstructionTest {
@Test
void branchingTree_parentWithTwoChildren_secondChildHasGrandchild() {
- // [parent, child1, child2, grandchild], depths=[0,1,1,2], parents=[-1,0,0,2]
- List roots = detailService.reconstructTree(
- new String[]{"parent", "child1", "child2", "grandchild"},
- new String[]{"split", "log", "bean", "to"},
- new String[]{"COMPLETED", "COMPLETED", "COMPLETED", "COMPLETED"},
- new Instant[]{NOW, NOW, NOW, NOW},
- new Instant[]{NOW, NOW, NOW, NOW},
- new long[]{100, 20, 30, 5},
- new String[]{"n1", "n2", "n3", "n4"},
- new String[]{"", "", "", ""},
- new String[]{"", "", "", ""},
- new int[]{0, 1, 1, 2},
- new int[]{-1, 0, 0, 2}
+ List processors = List.of(
+ proc("parent", "split", "COMPLETED", 0, null),
+ proc("child1", "log", "COMPLETED", 1, "parent"),
+ proc("child2", "bean", "COMPLETED", 1, "parent"),
+ proc("grandchild", "to", "COMPLETED", 2, "child2")
);
+ List roots = detailService.buildTree(processors);
+
assertThat(roots).hasSize(1);
ProcessorNode parent = roots.get(0);
assertThat(parent.getProcessorId()).isEqualTo("parent");
@@ -111,30 +102,8 @@ class TreeReconstructionTest {
}
@Test
- void emptyArrays_producesEmptyList() {
- List roots = detailService.reconstructTree(
- new String[]{},
- new String[]{},
- new String[]{},
- new Instant[]{},
- new Instant[]{},
- new long[]{},
- new String[]{},
- new String[]{},
- new String[]{},
- new int[]{},
- new int[]{}
- );
-
- assertThat(roots).isEmpty();
- }
-
- @Test
- void nullArrays_producesEmptyList() {
- List roots = detailService.reconstructTree(
- null, null, null, null, null, null, null, null, null, null, null
- );
-
+ void emptyList_producesEmptyRoots() {
+ List roots = detailService.buildTree(List.of());
assertThat(roots).isEmpty();
}
}
diff --git a/clickhouse/init/01-schema.sql b/clickhouse/init/01-schema.sql
deleted file mode 100644
index ab56da70..00000000
--- a/clickhouse/init/01-schema.sql
+++ /dev/null
@@ -1,57 +0,0 @@
--- Cameleer3 ClickHouse Schema
--- Tables for route executions, route diagrams, and agent metrics.
-
-CREATE TABLE IF NOT EXISTS route_executions (
- execution_id String,
- route_id LowCardinality(String),
- agent_id LowCardinality(String),
- status LowCardinality(String),
- start_time DateTime64(3, 'UTC'),
- end_time Nullable(DateTime64(3, 'UTC')),
- duration_ms UInt64,
- correlation_id String,
- exchange_id String,
- error_message String DEFAULT '',
- error_stacktrace String DEFAULT '',
- -- Nested processor executions stored as parallel arrays
- processor_ids Array(String),
- processor_types Array(LowCardinality(String)),
- processor_starts Array(DateTime64(3, 'UTC')),
- processor_ends Array(DateTime64(3, 'UTC')),
- processor_durations Array(UInt64),
- processor_statuses Array(LowCardinality(String)),
- -- Metadata
- server_received_at DateTime64(3, 'UTC') DEFAULT now64(3, 'UTC'),
- -- Skip indexes
- INDEX idx_correlation correlation_id TYPE bloom_filter GRANULARITY 4,
- INDEX idx_error error_message TYPE tokenbf_v1(32768, 3, 0) GRANULARITY 4
-)
-ENGINE = MergeTree()
-PARTITION BY toYYYYMMDD(start_time)
-ORDER BY (agent_id, status, start_time, execution_id)
-TTL toDateTime(start_time) + toIntervalDay(30)
-SETTINGS ttl_only_drop_parts = 1;
-
-CREATE TABLE IF NOT EXISTS route_diagrams (
- content_hash String,
- route_id LowCardinality(String),
- agent_id LowCardinality(String),
- definition String,
- created_at DateTime64(3, 'UTC') DEFAULT now64(3, 'UTC')
-)
-ENGINE = ReplacingMergeTree(created_at)
-ORDER BY (content_hash);
-
-CREATE TABLE IF NOT EXISTS agent_metrics (
- agent_id LowCardinality(String),
- collected_at DateTime64(3, 'UTC'),
- metric_name LowCardinality(String),
- metric_value Float64,
- tags Map(String, String),
- server_received_at DateTime64(3, 'UTC') DEFAULT now64(3, 'UTC')
-)
-ENGINE = MergeTree()
-PARTITION BY toYYYYMMDD(collected_at)
-ORDER BY (agent_id, metric_name, collected_at)
-TTL toDateTime(collected_at) + toIntervalDay(30)
-SETTINGS ttl_only_drop_parts = 1;
diff --git a/clickhouse/init/02-search-columns.sql b/clickhouse/init/02-search-columns.sql
deleted file mode 100644
index 2b11b435..00000000
--- a/clickhouse/init/02-search-columns.sql
+++ /dev/null
@@ -1,25 +0,0 @@
--- Phase 2: Schema extension for search, detail, and diagram linking columns.
--- Adds exchange snapshot data, processor tree metadata, and diagram content hash.
-
-ALTER TABLE route_executions
- ADD COLUMN IF NOT EXISTS exchange_bodies String DEFAULT '',
- ADD COLUMN IF NOT EXISTS exchange_headers String DEFAULT '',
- ADD COLUMN IF NOT EXISTS processor_depths Array(UInt16) DEFAULT [],
- ADD COLUMN IF NOT EXISTS processor_parent_indexes Array(Int32) DEFAULT [],
- ADD COLUMN IF NOT EXISTS processor_error_messages Array(String) DEFAULT [],
- ADD COLUMN IF NOT EXISTS processor_error_stacktraces Array(String) DEFAULT [],
- ADD COLUMN IF NOT EXISTS processor_input_bodies Array(String) DEFAULT [],
- ADD COLUMN IF NOT EXISTS processor_output_bodies Array(String) DEFAULT [],
- ADD COLUMN IF NOT EXISTS processor_input_headers Array(String) DEFAULT [],
- ADD COLUMN IF NOT EXISTS processor_output_headers Array(String) DEFAULT [],
- ADD COLUMN IF NOT EXISTS processor_diagram_node_ids Array(String) DEFAULT [],
- ADD COLUMN IF NOT EXISTS diagram_content_hash String DEFAULT '';
-
--- Skip indexes for full-text search on new text columns
-ALTER TABLE route_executions
- ADD INDEX IF NOT EXISTS idx_exchange_bodies exchange_bodies TYPE tokenbf_v1(32768, 3, 0) GRANULARITY 4,
- ADD INDEX IF NOT EXISTS idx_exchange_headers exchange_headers TYPE tokenbf_v1(32768, 3, 0) GRANULARITY 4;
-
--- Skip index on error_stacktrace (not indexed in 01-schema.sql, needed for SRCH-05)
-ALTER TABLE route_executions
- ADD INDEX IF NOT EXISTS idx_error_stacktrace error_stacktrace TYPE tokenbf_v1(32768, 3, 0) GRANULARITY 4;
diff --git a/clickhouse/init/03-users.sql b/clickhouse/init/03-users.sql
deleted file mode 100644
index 9dc7ce7a..00000000
--- a/clickhouse/init/03-users.sql
+++ /dev/null
@@ -1,10 +0,0 @@
-CREATE TABLE IF NOT EXISTS users (
- user_id String,
- provider LowCardinality(String),
- email String DEFAULT '',
- display_name String DEFAULT '',
- roles Array(LowCardinality(String)),
- created_at DateTime64(3, 'UTC') DEFAULT now64(3, 'UTC'),
- updated_at DateTime64(3, 'UTC') DEFAULT now64(3, 'UTC')
-) ENGINE = ReplacingMergeTree(updated_at)
-ORDER BY (user_id);
diff --git a/clickhouse/init/04-oidc-config.sql b/clickhouse/init/04-oidc-config.sql
deleted file mode 100644
index 35b4d896..00000000
--- a/clickhouse/init/04-oidc-config.sql
+++ /dev/null
@@ -1,13 +0,0 @@
-CREATE TABLE IF NOT EXISTS oidc_config (
- config_id String DEFAULT 'default',
- enabled Bool DEFAULT false,
- issuer_uri String DEFAULT '',
- client_id String DEFAULT '',
- client_secret String DEFAULT '',
- roles_claim String DEFAULT 'realm_access.roles',
- default_roles Array(LowCardinality(String)),
- auto_signup Bool DEFAULT true,
- display_name_claim String DEFAULT 'name',
- updated_at DateTime64(3, 'UTC') DEFAULT now64(3, 'UTC')
-) ENGINE = ReplacingMergeTree(updated_at)
-ORDER BY (config_id);
diff --git a/clickhouse/init/05-oidc-auto-signup.sql b/clickhouse/init/05-oidc-auto-signup.sql
deleted file mode 100644
index 643a69ea..00000000
--- a/clickhouse/init/05-oidc-auto-signup.sql
+++ /dev/null
@@ -1 +0,0 @@
-ALTER TABLE oidc_config ADD COLUMN IF NOT EXISTS auto_signup Bool DEFAULT true;
diff --git a/clickhouse/init/06-oidc-display-name-claim.sql b/clickhouse/init/06-oidc-display-name-claim.sql
deleted file mode 100644
index ef1870bd..00000000
--- a/clickhouse/init/06-oidc-display-name-claim.sql
+++ /dev/null
@@ -1 +0,0 @@
-ALTER TABLE oidc_config ADD COLUMN IF NOT EXISTS display_name_claim String DEFAULT 'name';
diff --git a/deploy/clickhouse.yaml b/deploy/clickhouse.yaml
deleted file mode 100644
index 2fe911d1..00000000
--- a/deploy/clickhouse.yaml
+++ /dev/null
@@ -1,107 +0,0 @@
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
- name: clickhouse
- namespace: cameleer
-spec:
- serviceName: clickhouse
- replicas: 1
- selector:
- matchLabels:
- app: clickhouse
- template:
- metadata:
- labels:
- app: clickhouse
- spec:
- containers:
- - name: clickhouse
- image: clickhouse/clickhouse-server:25.3
- ports:
- - containerPort: 8123
- name: http
- - containerPort: 9000
- name: native
- env:
- - name: CLICKHOUSE_USER
- valueFrom:
- secretKeyRef:
- name: clickhouse-credentials
- key: CLICKHOUSE_USER
- - name: CLICKHOUSE_PASSWORD
- valueFrom:
- secretKeyRef:
- name: clickhouse-credentials
- key: CLICKHOUSE_PASSWORD
- - name: CLICKHOUSE_DB
- value: cameleer3
- volumeMounts:
- - name: data
- mountPath: /var/lib/clickhouse
- resources:
- requests:
- memory: "1Gi"
- cpu: "200m"
- limits:
- memory: "4Gi"
- cpu: "1000m"
- livenessProbe:
- httpGet:
- path: /ping
- port: 8123
- initialDelaySeconds: 15
- periodSeconds: 10
- timeoutSeconds: 3
- failureThreshold: 3
- readinessProbe:
- httpGet:
- path: /ping
- port: 8123
- initialDelaySeconds: 5
- periodSeconds: 5
- timeoutSeconds: 3
- failureThreshold: 3
- volumeClaimTemplates:
- - metadata:
- name: data
- spec:
- accessModes: ["ReadWriteOnce"]
- resources:
- requests:
- storage: 2Gi
----
-apiVersion: v1
-kind: Service
-metadata:
- name: clickhouse
- namespace: cameleer
-spec:
- clusterIP: None
- selector:
- app: clickhouse
- ports:
- - port: 8123
- targetPort: 8123
- name: http
- - port: 9000
- targetPort: 9000
- name: native
----
-apiVersion: v1
-kind: Service
-metadata:
- name: clickhouse-external
- namespace: cameleer
-spec:
- type: NodePort
- selector:
- app: clickhouse
- ports:
- - port: 8123
- targetPort: 8123
- nodePort: 30123
- name: http
- - port: 9000
- targetPort: 9000
- nodePort: 30900
- name: native
diff --git a/deploy/opensearch.yaml b/deploy/opensearch.yaml
new file mode 100644
index 00000000..b2352ab2
--- /dev/null
+++ b/deploy/opensearch.yaml
@@ -0,0 +1,84 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: opensearch
+ namespace: cameleer
+spec:
+ serviceName: opensearch
+ replicas: 1
+ selector:
+ matchLabels:
+ app: opensearch
+ template:
+ metadata:
+ labels:
+ app: opensearch
+ spec:
+ containers:
+ - name: opensearch
+ image: opensearchproject/opensearch:2.19.0
+ ports:
+ - containerPort: 9200
+ name: http
+ - containerPort: 9300
+ name: transport
+ env:
+ - name: discovery.type
+ value: single-node
+ - name: DISABLE_SECURITY_PLUGIN
+ value: "true"
+ volumeMounts:
+ - name: data
+ mountPath: /usr/share/opensearch/data
+ resources:
+ requests:
+ memory: "1Gi"
+ cpu: "200m"
+ limits:
+ memory: "4Gi"
+ cpu: "1000m"
+ livenessProbe:
+ exec:
+ command:
+ - sh
+ - -c
+ - curl -s http://localhost:9200/_cluster/health
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 3
+ readinessProbe:
+ exec:
+ command:
+ - sh
+ - -c
+ - curl -s http://localhost:9200/_cluster/health
+ initialDelaySeconds: 15
+ periodSeconds: 10
+ timeoutSeconds: 5
+ failureThreshold: 3
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ spec:
+ accessModes: ["ReadWriteOnce"]
+ resources:
+ requests:
+ storage: 10Gi
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: opensearch
+ namespace: cameleer
+spec:
+ clusterIP: None
+ selector:
+ app: opensearch
+ ports:
+ - port: 9200
+ targetPort: 9200
+ name: http
+ - port: 9300
+ targetPort: 9300
+ name: transport
diff --git a/deploy/postgres.yaml b/deploy/postgres.yaml
new file mode 100644
index 00000000..8a4f64e6
--- /dev/null
+++ b/deploy/postgres.yaml
@@ -0,0 +1,91 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: postgres
+ namespace: cameleer
+spec:
+ serviceName: postgres
+ replicas: 1
+ selector:
+ matchLabels:
+ app: postgres
+ template:
+ metadata:
+ labels:
+ app: postgres
+ spec:
+ containers:
+ - name: postgres
+ image: timescale/timescaledb-ha:pg16
+ ports:
+ - containerPort: 5432
+ name: postgres
+ env:
+ - name: POSTGRES_DB
+ value: cameleer3
+ - name: POSTGRES_USER
+ valueFrom:
+ secretKeyRef:
+ name: postgres-credentials
+ key: POSTGRES_USER
+ - name: POSTGRES_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: postgres-credentials
+ key: POSTGRES_PASSWORD
+ volumeMounts:
+ - name: data
+ mountPath: /var/lib/postgresql/data
+ resources:
+ requests:
+ memory: "1Gi"
+ cpu: "200m"
+ limits:
+ memory: "4Gi"
+ cpu: "1000m"
+ livenessProbe:
+ exec:
+ command:
+ - pg_isready
+ - -U
+ - cameleer
+ - -d
+ - cameleer3
+ initialDelaySeconds: 15
+ periodSeconds: 10
+ timeoutSeconds: 3
+ failureThreshold: 3
+ readinessProbe:
+ exec:
+ command:
+ - pg_isready
+ - -U
+ - cameleer
+ - -d
+ - cameleer3
+ initialDelaySeconds: 5
+ periodSeconds: 5
+ timeoutSeconds: 3
+ failureThreshold: 3
+ volumeClaimTemplates:
+ - metadata:
+ name: data
+ spec:
+ accessModes: ["ReadWriteOnce"]
+ resources:
+ requests:
+ storage: 10Gi
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: postgres
+ namespace: cameleer
+spec:
+ clusterIP: None
+ selector:
+ app: postgres
+ ports:
+ - port: 5432
+ targetPort: 5432
+ name: postgres
diff --git a/deploy/server.yaml b/deploy/server.yaml
index 34d3e6ae..42c92d76 100644
--- a/deploy/server.yaml
+++ b/deploy/server.yaml
@@ -22,17 +22,19 @@ spec:
- containerPort: 8081
env:
- name: SPRING_DATASOURCE_URL
- value: "jdbc:ch://clickhouse:8123/cameleer3"
+ value: "jdbc:postgresql://postgres:5432/cameleer3"
- name: SPRING_DATASOURCE_USERNAME
valueFrom:
secretKeyRef:
- name: clickhouse-credentials
- key: CLICKHOUSE_USER
+ name: postgres-credentials
+ key: POSTGRES_USER
- name: SPRING_DATASOURCE_PASSWORD
valueFrom:
secretKeyRef:
- name: clickhouse-credentials
- key: CLICKHOUSE_PASSWORD
+ name: postgres-credentials
+ key: POSTGRES_PASSWORD
+ - name: OPENSEARCH_URL
+ value: "http://opensearch:9200"
- name: CAMELEER_AUTH_TOKEN
valueFrom:
secretKeyRef:
diff --git a/docker-compose.yml b/docker-compose.yml
index 4fa23d89..c5698b23 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -1,20 +1,27 @@
services:
- clickhouse:
- image: clickhouse/clickhouse-server:25.3
+ postgres:
+ image: timescale/timescaledb-ha:pg16
ports:
- - "8123:8123"
- - "9000:9000"
- volumes:
- - clickhouse-data:/var/lib/clickhouse
- - ./clickhouse/init:/docker-entrypoint-initdb.d
+ - "5432:5432"
environment:
- CLICKHOUSE_USER: cameleer
- CLICKHOUSE_PASSWORD: cameleer_dev
- CLICKHOUSE_DB: cameleer3
- ulimits:
- nofile:
- soft: 262144
- hard: 262144
+ POSTGRES_DB: cameleer3
+ POSTGRES_USER: cameleer
+ POSTGRES_PASSWORD: cameleer_dev
+ volumes:
+ - pgdata:/home/postgres/pgdata/data
+
+ opensearch:
+ image: opensearchproject/opensearch:2.19.0
+ ports:
+ - "9200:9200"
+ - "9300:9300"
+ environment:
+ discovery.type: single-node
+ DISABLE_SECURITY_PLUGIN: "true"
+ OPENSEARCH_JAVA_OPTS: "-Xms512m -Xmx512m"
+ volumes:
+ - osdata:/usr/share/opensearch/data
volumes:
- clickhouse-data:
+ pgdata:
+ osdata:
diff --git a/pom.xml b/pom.xml
index bca775b0..2f27d0fd 100644
--- a/pom.xml
+++ b/pom.xml
@@ -44,6 +44,13 @@
cameleer3-server-core
${project.version}