refactor: remove all ClickHouse code, old interfaces, and SQL migrations

- Delete all ClickHouse storage implementations and config
- Delete old core interfaces (ExecutionRepository, DiagramRepository, MetricsRepository, SearchEngine, RawExecutionRow)
- Delete ClickHouse SQL migration files
- Delete AbstractClickHouseIT
- Update controllers to use new store interfaces (DiagramStore, ExecutionStore)
- Fix IngestionService calls in controllers for new synchronous API
- Migrate all ITs from AbstractClickHouseIT to AbstractPostgresIT
- Fix count() syntax and remove ClickHouse-specific test assertions
- Update TreeReconstructionTest for new buildTree() method

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
hsiegeln
2026-03-16 18:56:13 +01:00
parent 7dbfaf0932
commit 565b548ac1
68 changed files with 226 additions and 2238 deletions

View File

@@ -1,8 +1,9 @@
package com.cameleer3.server.app.controller;
import com.cameleer3.server.app.storage.ClickHouseExecutionRepository;
import com.cameleer3.server.core.detail.DetailService;
import com.cameleer3.server.core.detail.ExecutionDetail;
import com.cameleer3.server.core.storage.ExecutionStore;
import com.cameleer3.server.core.storage.ExecutionStore.ProcessorRecord;
import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.tags.Tag;
@@ -12,14 +13,16 @@ import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
/**
* Endpoints for retrieving execution details and processor snapshots.
* <p>
* The detail endpoint returns a nested processor tree reconstructed from
* flat parallel arrays stored in ClickHouse. The snapshot endpoint returns
* per-processor exchange data (bodies and headers).
* individual processor records stored in PostgreSQL. The snapshot endpoint
* returns per-processor exchange data (bodies and headers).
*/
@RestController
@RequestMapping("/api/v1/executions")
@@ -27,12 +30,12 @@ import java.util.Map;
public class DetailController {
private final DetailService detailService;
private final ClickHouseExecutionRepository executionRepository;
private final ExecutionStore executionStore;
public DetailController(DetailService detailService,
ClickHouseExecutionRepository executionRepository) {
ExecutionStore executionStore) {
this.detailService = detailService;
this.executionRepository = executionRepository;
this.executionStore = executionStore;
}
@GetMapping("/{executionId}")
@@ -52,8 +55,18 @@ public class DetailController {
public ResponseEntity<Map<String, String>> getProcessorSnapshot(
@PathVariable String executionId,
@PathVariable int index) {
return executionRepository.findProcessorSnapshot(executionId, index)
.map(ResponseEntity::ok)
.orElse(ResponseEntity.notFound().build());
List<ProcessorRecord> processors = executionStore.findProcessors(executionId);
if (index < 0 || index >= processors.size()) {
return ResponseEntity.notFound().build();
}
ProcessorRecord p = processors.get(index);
Map<String, String> snapshot = new LinkedHashMap<>();
if (p.inputBody() != null) snapshot.put("inputBody", p.inputBody());
if (p.outputBody() != null) snapshot.put("outputBody", p.outputBody());
if (p.inputHeaders() != null) snapshot.put("inputHeaders", p.inputHeaders());
if (p.outputHeaders() != null) snapshot.put("outputHeaders", p.outputHeaders());
return ResponseEntity.ok(snapshot);
}
}

View File

@@ -11,7 +11,6 @@ import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.tags.Tag;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.security.core.Authentication;
import org.springframework.security.core.context.SecurityContextHolder;
@@ -25,8 +24,8 @@ import java.util.List;
/**
* Ingestion endpoint for route diagrams.
* <p>
* Accepts both single {@link RouteGraph} and arrays. Data is buffered
* and flushed to ClickHouse by the flush scheduler.
* Accepts both single {@link RouteGraph} and arrays. Data is written
* synchronously to PostgreSQL via {@link IngestionService}.
*/
@RestController
@RequestMapping("/api/v1/data")
@@ -47,26 +46,12 @@ public class DiagramController {
@Operation(summary = "Ingest route diagram data",
description = "Accepts a single RouteGraph or an array of RouteGraphs")
@ApiResponse(responseCode = "202", description = "Data accepted for processing")
@ApiResponse(responseCode = "503", description = "Buffer full, retry later")
public ResponseEntity<Void> ingestDiagrams(@RequestBody String body) throws JsonProcessingException {
String agentId = extractAgentId();
List<RouteGraph> graphs = parsePayload(body);
List<TaggedDiagram> tagged = graphs.stream()
.map(graph -> new TaggedDiagram(agentId, graph))
.toList();
boolean accepted;
if (tagged.size() == 1) {
accepted = ingestionService.acceptDiagram(tagged.get(0));
} else {
accepted = ingestionService.acceptDiagrams(tagged);
}
if (!accepted) {
log.warn("Diagram buffer full, returning 503");
return ResponseEntity.status(HttpStatus.SERVICE_UNAVAILABLE)
.header("Retry-After", "5")
.build();
for (RouteGraph graph : graphs) {
ingestionService.ingestDiagram(new TaggedDiagram(agentId, graph));
}
return ResponseEntity.accepted().build();

View File

@@ -5,7 +5,7 @@ import com.cameleer3.server.core.agent.AgentInfo;
import com.cameleer3.server.core.agent.AgentRegistryService;
import com.cameleer3.server.core.diagram.DiagramLayout;
import com.cameleer3.server.core.diagram.DiagramRenderer;
import com.cameleer3.server.core.storage.DiagramRepository;
import com.cameleer3.server.core.storage.DiagramStore;
import io.swagger.v3.oas.annotations.Operation;
import io.swagger.v3.oas.annotations.media.Content;
import io.swagger.v3.oas.annotations.media.Schema;
@@ -39,14 +39,14 @@ public class DiagramRenderController {
private static final MediaType SVG_MEDIA_TYPE = MediaType.valueOf("image/svg+xml");
private final DiagramRepository diagramRepository;
private final DiagramStore diagramStore;
private final DiagramRenderer diagramRenderer;
private final AgentRegistryService registryService;
public DiagramRenderController(DiagramRepository diagramRepository,
public DiagramRenderController(DiagramStore diagramStore,
DiagramRenderer diagramRenderer,
AgentRegistryService registryService) {
this.diagramRepository = diagramRepository;
this.diagramStore = diagramStore;
this.diagramRenderer = diagramRenderer;
this.registryService = registryService;
}
@@ -64,7 +64,7 @@ public class DiagramRenderController {
@PathVariable String contentHash,
HttpServletRequest request) {
Optional<RouteGraph> graphOpt = diagramRepository.findByContentHash(contentHash);
Optional<RouteGraph> graphOpt = diagramStore.findByContentHash(contentHash);
if (graphOpt.isEmpty()) {
return ResponseEntity.notFound().build();
}
@@ -105,12 +105,12 @@ public class DiagramRenderController {
return ResponseEntity.notFound().build();
}
Optional<String> contentHash = diagramRepository.findContentHashForRouteByAgents(routeId, agentIds);
Optional<String> contentHash = diagramStore.findContentHashForRouteByAgents(routeId, agentIds);
if (contentHash.isEmpty()) {
return ResponseEntity.notFound().build();
}
Optional<RouteGraph> graphOpt = diagramRepository.findByContentHash(contentHash.get());
Optional<RouteGraph> graphOpt = diagramStore.findByContentHash(contentHash.get());
if (graphOpt.isEmpty()) {
return ResponseEntity.notFound().build();
}

View File

@@ -1,8 +1,9 @@
package com.cameleer3.server.app.controller;
import com.cameleer3.common.model.RouteExecution;
import com.cameleer3.server.core.agent.AgentInfo;
import com.cameleer3.server.core.agent.AgentRegistryService;
import com.cameleer3.server.core.ingestion.IngestionService;
import com.cameleer3.server.core.ingestion.TaggedExecution;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
@@ -11,7 +12,6 @@ import io.swagger.v3.oas.annotations.responses.ApiResponse;
import io.swagger.v3.oas.annotations.tags.Tag;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.security.core.Authentication;
import org.springframework.security.core.context.SecurityContextHolder;
@@ -25,9 +25,8 @@ import java.util.List;
/**
* Ingestion endpoint for route execution data.
* <p>
* Accepts both single {@link RouteExecution} and arrays. Data is buffered
* in a {@link com.cameleer3.server.core.ingestion.WriteBuffer} and flushed
* to ClickHouse by the flush scheduler.
* Accepts both single {@link RouteExecution} and arrays. Data is written
* synchronously to PostgreSQL via {@link IngestionService}.
*/
@RestController
@RequestMapping("/api/v1/data")
@@ -37,10 +36,14 @@ public class ExecutionController {
private static final Logger log = LoggerFactory.getLogger(ExecutionController.class);
private final IngestionService ingestionService;
private final AgentRegistryService registryService;
private final ObjectMapper objectMapper;
public ExecutionController(IngestionService ingestionService, ObjectMapper objectMapper) {
public ExecutionController(IngestionService ingestionService,
AgentRegistryService registryService,
ObjectMapper objectMapper) {
this.ingestionService = ingestionService;
this.registryService = registryService;
this.objectMapper = objectMapper;
}
@@ -48,26 +51,13 @@ public class ExecutionController {
@Operation(summary = "Ingest route execution data",
description = "Accepts a single RouteExecution or an array of RouteExecutions")
@ApiResponse(responseCode = "202", description = "Data accepted for processing")
@ApiResponse(responseCode = "503", description = "Buffer full, retry later")
public ResponseEntity<Void> ingestExecutions(@RequestBody String body) throws JsonProcessingException {
String agentId = extractAgentId();
String groupName = resolveGroupName(agentId);
List<RouteExecution> executions = parsePayload(body);
List<TaggedExecution> tagged = executions.stream()
.map(exec -> new TaggedExecution(agentId, exec))
.toList();
boolean accepted;
if (tagged.size() == 1) {
accepted = ingestionService.acceptExecution(tagged.get(0));
} else {
accepted = ingestionService.acceptExecutions(tagged);
}
if (!accepted) {
log.warn("Execution buffer full, returning 503");
return ResponseEntity.status(HttpStatus.SERVICE_UNAVAILABLE)
.header("Retry-After", "5")
.build();
for (RouteExecution execution : executions) {
ingestionService.ingestExecution(agentId, groupName, execution);
}
return ResponseEntity.accepted().build();
@@ -78,6 +68,11 @@ public class ExecutionController {
return auth != null ? auth.getName() : "";
}
private String resolveGroupName(String agentId) {
AgentInfo agent = registryService.findById(agentId);
return agent != null ? agent.group() : "";
}
private List<RouteExecution> parsePayload(String body) throws JsonProcessingException {
String trimmed = body.strip();
if (trimmed.startsWith("[")) {

View File

@@ -23,7 +23,7 @@ import java.util.List;
* Ingestion endpoint for agent metrics.
* <p>
* Accepts an array of {@link MetricsSnapshot}. Data is buffered
* and flushed to ClickHouse by the flush scheduler.
* and flushed to PostgreSQL by the flush scheduler.
*/
@RestController
@RequestMapping("/api/v1/data")