refactor: remove all ClickHouse code, old interfaces, and SQL migrations

- Delete all ClickHouse storage implementations and config
- Delete old core interfaces (ExecutionRepository, DiagramRepository, MetricsRepository, SearchEngine, RawExecutionRow)
- Delete ClickHouse SQL migration files
- Delete AbstractClickHouseIT
- Update controllers to use new store interfaces (DiagramStore, ExecutionStore)
- Fix IngestionService calls in controllers for new synchronous API
- Migrate all ITs from AbstractClickHouseIT to AbstractPostgresIT
- Fix count() syntax and remove ClickHouse-specific test assertions
- Update TreeReconstructionTest for new buildTree() method

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
hsiegeln
2026-03-16 18:56:13 +01:00
parent 7dbfaf0932
commit 565b548ac1
68 changed files with 226 additions and 2238 deletions

View File

@@ -7,7 +7,7 @@ import java.util.List;
* Full detail of a route execution, including the nested processor tree.
* <p>
* This is the rich detail model returned by the detail endpoint. The processor
* tree is reconstructed from flat parallel arrays stored in ClickHouse.
* tree is reconstructed from individual processor records stored in PostgreSQL.
*
* @param executionId unique execution identifier
* @param routeId Camel route ID

View File

@@ -7,7 +7,7 @@ import java.util.List;
/**
* Nested tree node representing a single processor execution within a route.
* <p>
* The tree structure is reconstructed from flat parallel arrays stored in ClickHouse.
* The tree structure is reconstructed from individual processor records stored in PostgreSQL.
* Each node may have children (e.g., processors inside a split or try-catch block).
*/
public final class ProcessorNode {

View File

@@ -1,59 +0,0 @@
package com.cameleer3.server.core.detail;
import java.time.Instant;
/**
* Raw execution data from ClickHouse, including all parallel arrays needed
* for tree reconstruction. This is the intermediate representation between
* the database and the {@link ExecutionDetail} domain object.
*
* @param executionId unique execution identifier
* @param routeId Camel route ID
* @param agentId agent instance
* @param status execution status
* @param startTime execution start time
* @param endTime execution end time
* @param durationMs execution duration in milliseconds
* @param correlationId correlation ID
* @param exchangeId Camel exchange ID
* @param errorMessage execution-level error message
* @param errorStackTrace execution-level error stack trace
* @param diagramContentHash content hash for diagram linking
* @param processorIds processor IDs (parallel array)
* @param processorTypes processor types (parallel array)
* @param processorStatuses processor statuses (parallel array)
* @param processorStarts processor start times (parallel array)
* @param processorEnds processor end times (parallel array)
* @param processorDurations processor durations in ms (parallel array)
* @param processorDiagramNodeIds processor diagram node IDs (parallel array)
* @param processorErrorMessages processor error messages (parallel array)
* @param processorErrorStacktraces processor error stack traces (parallel array)
* @param processorDepths processor tree depths (parallel array)
* @param processorParentIndexes processor parent indexes, -1 for roots (parallel array)
*/
public record RawExecutionRow(
String executionId,
String routeId,
String agentId,
String status,
Instant startTime,
Instant endTime,
long durationMs,
String correlationId,
String exchangeId,
String errorMessage,
String errorStackTrace,
String diagramContentHash,
String[] processorIds,
String[] processorTypes,
String[] processorStatuses,
Instant[] processorStarts,
Instant[] processorEnds,
long[] processorDurations,
String[] processorDiagramNodeIds,
String[] processorErrorMessages,
String[] processorErrorStacktraces,
int[] processorDepths,
int[] processorParentIndexes
) {
}

View File

@@ -70,12 +70,12 @@ public class IngestionService {
private ExecutionRecord toExecutionRecord(String agentId, String groupName,
RouteExecution exec) {
return new ExecutionRecord(
exec.getExecutionId(), exec.getRouteId(), agentId, groupName,
exec.getExchangeId(), exec.getRouteId(), agentId, groupName,
exec.getStatus() != null ? exec.getStatus().name() : "RUNNING",
exec.getCorrelationId(), exec.getExchangeId(),
exec.getStartTime(), exec.getEndTime(),
exec.getDurationMs(),
exec.getErrorMessage(), exec.getErrorStacktrace(),
exec.getErrorMessage(), exec.getErrorStackTrace(),
null // diagramContentHash set separately
);
}
@@ -94,7 +94,7 @@ public class IngestionService {
p.getStartTime() != null ? p.getStartTime() : execStartTime,
p.getEndTime(),
p.getDurationMs(),
p.getErrorMessage(), p.getErrorStacktrace(),
p.getErrorMessage(), p.getErrorStackTrace(),
truncateBody(p.getInputBody()), truncateBody(p.getOutputBody()),
p.getInputHeaders() != null ? p.getInputHeaders().toString() : null,
p.getOutputHeaders() != null ? p.getOutputHeaders().toString() : null

View File

@@ -6,7 +6,7 @@ import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
/**
* Bounded write buffer that decouples HTTP ingestion from ClickHouse batch inserts.
* Bounded write buffer that decouples HTTP ingestion from database batch inserts.
* <p>
* Items are offered to the buffer by controllers and drained in batches by a
* scheduled flush task. When the buffer is full, {@link #offer} returns false,

View File

@@ -1,72 +0,0 @@
package com.cameleer3.server.core.search;
import java.util.List;
/**
* Swappable search backend abstraction.
* <p>
* The current implementation uses ClickHouse for search. This interface allows
* replacing the search backend (e.g., with OpenSearch) without changing the
* service layer or controllers.
*/
public interface SearchEngine {
/**
* Search for route executions matching the given criteria.
*
* @param request search filters and pagination
* @return paginated search results with total count
*/
SearchResult<ExecutionSummary> search(SearchRequest request);
/**
* Count route executions matching the given criteria (without fetching data).
*
* @param request search filters
* @return total number of matching executions
*/
long count(SearchRequest request);
/**
* Compute aggregate stats: P99 latency and count of currently running executions.
*
* @param from start of the time window
* @param to end of the time window
* @return execution stats
*/
ExecutionStats stats(java.time.Instant from, java.time.Instant to);
/**
* Compute aggregate stats scoped to specific routes and agents.
*
* @param from start of the time window
* @param to end of the time window
* @param routeId optional route ID filter
* @param agentIds optional agent ID filter (from group resolution)
* @return execution stats
*/
ExecutionStats stats(java.time.Instant from, java.time.Instant to, String routeId, List<String> agentIds);
/**
* Compute bucketed time-series stats over a time window.
*
* @param from start of the time window
* @param to end of the time window
* @param bucketCount number of buckets to divide the window into
* @return bucketed stats
*/
StatsTimeseries timeseries(java.time.Instant from, java.time.Instant to, int bucketCount);
/**
* Compute bucketed time-series stats scoped to specific routes and agents.
*
* @param from start of the time window
* @param to end of the time window
* @param bucketCount number of buckets to divide the window into
* @param routeId optional route ID filter
* @param agentIds optional agent ID filter (from group resolution)
* @return bucketed stats
*/
StatsTimeseries timeseries(java.time.Instant from, java.time.Instant to, int bucketCount,
String routeId, List<String> agentIds);
}

View File

@@ -75,7 +75,7 @@ public record SearchRequest(
if (!"asc".equalsIgnoreCase(sortDir)) sortDir = "desc";
}
/** Returns the validated ClickHouse column name for ORDER BY. */
/** Returns the validated database column name for ORDER BY. */
public String sortColumn() {
return SORT_FIELD_TO_COLUMN.getOrDefault(sortField, "start_time");
}

View File

@@ -1,35 +0,0 @@
package com.cameleer3.server.core.storage;
import com.cameleer3.common.graph.RouteGraph;
import com.cameleer3.server.core.ingestion.TaggedDiagram;
import java.util.List;
import java.util.Optional;
/**
* Repository for route diagram storage with content-hash deduplication.
*/
public interface DiagramRepository {
/**
* Store a tagged route graph. Uses content-hash deduplication via ReplacingMergeTree.
*/
void store(TaggedDiagram diagram);
/**
* Find a route graph by its content hash.
*/
Optional<RouteGraph> findByContentHash(String contentHash);
/**
* Find the content hash for the latest diagram of a given route and agent.
*/
Optional<String> findContentHashForRoute(String routeId, String agentId);
/**
* Find the content hash for the latest diagram of a route across any agent in the given list.
* All instances of the same application produce the same route graph, so any agent's
* diagram for the same route will have the same content hash.
*/
Optional<String> findContentHashForRouteByAgents(String routeId, List<String> agentIds);
}

View File

@@ -1,28 +0,0 @@
package com.cameleer3.server.core.storage;
import com.cameleer3.server.core.detail.RawExecutionRow;
import com.cameleer3.server.core.ingestion.TaggedExecution;
import java.util.List;
import java.util.Optional;
/**
* Repository for route execution storage and retrieval.
*/
public interface ExecutionRepository {
/**
* Insert a batch of tagged route executions.
* Implementations must perform a single batch insert for efficiency.
*/
void insertBatch(List<TaggedExecution> executions);
/**
* Find a raw execution row by execution ID, including all parallel arrays
* needed for processor tree reconstruction.
*
* @param executionId the execution ID to look up
* @return the raw execution row, or empty if not found
*/
Optional<RawExecutionRow> findRawById(String executionId);
}

View File

@@ -1,17 +0,0 @@
package com.cameleer3.server.core.storage;
import com.cameleer3.server.core.storage.model.MetricsSnapshot;
import java.util.List;
/**
* Repository for agent metrics batch inserts into ClickHouse.
*/
public interface MetricsRepository {
/**
* Insert a batch of metrics snapshots.
* Implementations must perform a single batch insert for efficiency.
*/
void insertBatch(List<MetricsSnapshot> metrics);
}

View File

@@ -1,6 +1,7 @@
package com.cameleer3.server.core.detail;
import com.cameleer3.server.core.storage.ExecutionRepository;
import com.cameleer3.server.core.storage.ExecutionStore;
import com.cameleer3.server.core.storage.ExecutionStore.ProcessorRecord;
import org.junit.jupiter.api.Test;
import java.time.Instant;
@@ -10,33 +11,36 @@ import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.mock;
/**
* Unit tests for {@link DetailService#reconstructTree} logic.
* Unit tests for {@link DetailService#buildTree} logic.
* <p>
* Verifies correct parent-child wiring from flat parallel arrays.
* Verifies correct parent-child wiring from flat ProcessorRecord lists.
*/
class TreeReconstructionTest {
private final DetailService detailService = new DetailService(mock(ExecutionRepository.class));
private final DetailService detailService = new DetailService(mock(ExecutionStore.class));
private static final Instant NOW = Instant.parse("2026-03-10T10:00:00Z");
private ProcessorRecord proc(String id, String type, String status,
int depth, String parentId) {
return new ProcessorRecord(
"exec-1", id, type, "node-" + id,
"default", "route1", depth, parentId,
status, NOW, NOW, 10L,
null, null, null, null, null, null
);
}
@Test
void linearChain_rootChildGrandchild() {
// [root, child, grandchild], depths=[0,1,2], parents=[-1,0,1]
List<ProcessorNode> roots = detailService.reconstructTree(
new String[]{"root", "child", "grandchild"},
new String[]{"log", "bean", "to"},
new String[]{"COMPLETED", "COMPLETED", "COMPLETED"},
new Instant[]{NOW, NOW, NOW},
new Instant[]{NOW, NOW, NOW},
new long[]{10, 20, 30},
new String[]{"n1", "n2", "n3"},
new String[]{"", "", ""},
new String[]{"", "", ""},
new int[]{0, 1, 2},
new int[]{-1, 0, 1}
List<ProcessorRecord> processors = List.of(
proc("root", "log", "COMPLETED", 0, null),
proc("child", "bean", "COMPLETED", 1, "root"),
proc("grandchild", "to", "COMPLETED", 2, "child")
);
List<ProcessorNode> roots = detailService.buildTree(processors);
assertThat(roots).hasSize(1);
ProcessorNode root = roots.get(0);
assertThat(root.getProcessorId()).isEqualTo("root");
@@ -53,21 +57,14 @@ class TreeReconstructionTest {
@Test
void multipleRoots_noNesting() {
// [A, B, C], depths=[0,0,0], parents=[-1,-1,-1]
List<ProcessorNode> roots = detailService.reconstructTree(
new String[]{"A", "B", "C"},
new String[]{"log", "log", "log"},
new String[]{"COMPLETED", "COMPLETED", "COMPLETED"},
new Instant[]{NOW, NOW, NOW},
new Instant[]{NOW, NOW, NOW},
new long[]{10, 20, 30},
new String[]{"n1", "n2", "n3"},
new String[]{"", "", ""},
new String[]{"", "", ""},
new int[]{0, 0, 0},
new int[]{-1, -1, -1}
List<ProcessorRecord> processors = List.of(
proc("A", "log", "COMPLETED", 0, null),
proc("B", "log", "COMPLETED", 0, null),
proc("C", "log", "COMPLETED", 0, null)
);
List<ProcessorNode> roots = detailService.buildTree(processors);
assertThat(roots).hasSize(3);
assertThat(roots.get(0).getProcessorId()).isEqualTo("A");
assertThat(roots.get(1).getProcessorId()).isEqualTo("B");
@@ -77,21 +74,15 @@ class TreeReconstructionTest {
@Test
void branchingTree_parentWithTwoChildren_secondChildHasGrandchild() {
// [parent, child1, child2, grandchild], depths=[0,1,1,2], parents=[-1,0,0,2]
List<ProcessorNode> roots = detailService.reconstructTree(
new String[]{"parent", "child1", "child2", "grandchild"},
new String[]{"split", "log", "bean", "to"},
new String[]{"COMPLETED", "COMPLETED", "COMPLETED", "COMPLETED"},
new Instant[]{NOW, NOW, NOW, NOW},
new Instant[]{NOW, NOW, NOW, NOW},
new long[]{100, 20, 30, 5},
new String[]{"n1", "n2", "n3", "n4"},
new String[]{"", "", "", ""},
new String[]{"", "", "", ""},
new int[]{0, 1, 1, 2},
new int[]{-1, 0, 0, 2}
List<ProcessorRecord> processors = List.of(
proc("parent", "split", "COMPLETED", 0, null),
proc("child1", "log", "COMPLETED", 1, "parent"),
proc("child2", "bean", "COMPLETED", 1, "parent"),
proc("grandchild", "to", "COMPLETED", 2, "child2")
);
List<ProcessorNode> roots = detailService.buildTree(processors);
assertThat(roots).hasSize(1);
ProcessorNode parent = roots.get(0);
assertThat(parent.getProcessorId()).isEqualTo("parent");
@@ -111,30 +102,8 @@ class TreeReconstructionTest {
}
@Test
void emptyArrays_producesEmptyList() {
List<ProcessorNode> roots = detailService.reconstructTree(
new String[]{},
new String[]{},
new String[]{},
new Instant[]{},
new Instant[]{},
new long[]{},
new String[]{},
new String[]{},
new String[]{},
new int[]{},
new int[]{}
);
assertThat(roots).isEmpty();
}
@Test
void nullArrays_producesEmptyList() {
List<ProcessorNode> roots = detailService.reconstructTree(
null, null, null, null, null, null, null, null, null, null, null
);
void emptyList_producesEmptyRoots() {
List<ProcessorNode> roots = detailService.buildTree(List.of());
assertThat(roots).isEmpty();
}
}