refactor: remove all ClickHouse code, old interfaces, and SQL migrations
- Delete all ClickHouse storage implementations and config - Delete old core interfaces (ExecutionRepository, DiagramRepository, MetricsRepository, SearchEngine, RawExecutionRow) - Delete ClickHouse SQL migration files - Delete AbstractClickHouseIT - Update controllers to use new store interfaces (DiagramStore, ExecutionStore) - Fix IngestionService calls in controllers for new synchronous API - Migrate all ITs from AbstractClickHouseIT to AbstractPostgresIT - Fix count() syntax and remove ClickHouse-specific test assertions - Update TreeReconstructionTest for new buildTree() method Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -1,57 +0,0 @@
|
||||
-- Cameleer3 ClickHouse Schema
|
||||
-- Tables for route executions, route diagrams, and agent metrics.
|
||||
|
||||
CREATE TABLE IF NOT EXISTS route_executions (
|
||||
execution_id String,
|
||||
route_id LowCardinality(String),
|
||||
agent_id LowCardinality(String),
|
||||
status LowCardinality(String),
|
||||
start_time DateTime64(3, 'UTC'),
|
||||
end_time Nullable(DateTime64(3, 'UTC')),
|
||||
duration_ms UInt64,
|
||||
correlation_id String,
|
||||
exchange_id String,
|
||||
error_message String DEFAULT '',
|
||||
error_stacktrace String DEFAULT '',
|
||||
-- Nested processor executions stored as parallel arrays
|
||||
processor_ids Array(String),
|
||||
processor_types Array(LowCardinality(String)),
|
||||
processor_starts Array(DateTime64(3, 'UTC')),
|
||||
processor_ends Array(DateTime64(3, 'UTC')),
|
||||
processor_durations Array(UInt64),
|
||||
processor_statuses Array(LowCardinality(String)),
|
||||
-- Metadata
|
||||
server_received_at DateTime64(3, 'UTC') DEFAULT now64(3, 'UTC'),
|
||||
-- Skip indexes
|
||||
INDEX idx_correlation correlation_id TYPE bloom_filter GRANULARITY 4,
|
||||
INDEX idx_error error_message TYPE tokenbf_v1(32768, 3, 0) GRANULARITY 4
|
||||
)
|
||||
ENGINE = MergeTree()
|
||||
PARTITION BY toYYYYMMDD(start_time)
|
||||
ORDER BY (agent_id, status, start_time, execution_id)
|
||||
TTL toDateTime(start_time) + toIntervalDay(30)
|
||||
SETTINGS ttl_only_drop_parts = 1;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS route_diagrams (
|
||||
content_hash String,
|
||||
route_id LowCardinality(String),
|
||||
agent_id LowCardinality(String),
|
||||
definition String,
|
||||
created_at DateTime64(3, 'UTC') DEFAULT now64(3, 'UTC')
|
||||
)
|
||||
ENGINE = ReplacingMergeTree(created_at)
|
||||
ORDER BY (content_hash);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS agent_metrics (
|
||||
agent_id LowCardinality(String),
|
||||
collected_at DateTime64(3, 'UTC'),
|
||||
metric_name LowCardinality(String),
|
||||
metric_value Float64,
|
||||
tags Map(String, String),
|
||||
server_received_at DateTime64(3, 'UTC') DEFAULT now64(3, 'UTC')
|
||||
)
|
||||
ENGINE = MergeTree()
|
||||
PARTITION BY toYYYYMMDD(collected_at)
|
||||
ORDER BY (agent_id, metric_name, collected_at)
|
||||
TTL toDateTime(collected_at) + toIntervalDay(30)
|
||||
SETTINGS ttl_only_drop_parts = 1;
|
||||
@@ -1,25 +0,0 @@
|
||||
-- Phase 2: Schema extension for search, detail, and diagram linking columns.
|
||||
-- Adds exchange snapshot data, processor tree metadata, and diagram content hash.
|
||||
|
||||
ALTER TABLE route_executions
|
||||
ADD COLUMN IF NOT EXISTS exchange_bodies String DEFAULT '',
|
||||
ADD COLUMN IF NOT EXISTS exchange_headers String DEFAULT '',
|
||||
ADD COLUMN IF NOT EXISTS processor_depths Array(UInt16) DEFAULT [],
|
||||
ADD COLUMN IF NOT EXISTS processor_parent_indexes Array(Int32) DEFAULT [],
|
||||
ADD COLUMN IF NOT EXISTS processor_error_messages Array(String) DEFAULT [],
|
||||
ADD COLUMN IF NOT EXISTS processor_error_stacktraces Array(String) DEFAULT [],
|
||||
ADD COLUMN IF NOT EXISTS processor_input_bodies Array(String) DEFAULT [],
|
||||
ADD COLUMN IF NOT EXISTS processor_output_bodies Array(String) DEFAULT [],
|
||||
ADD COLUMN IF NOT EXISTS processor_input_headers Array(String) DEFAULT [],
|
||||
ADD COLUMN IF NOT EXISTS processor_output_headers Array(String) DEFAULT [],
|
||||
ADD COLUMN IF NOT EXISTS processor_diagram_node_ids Array(String) DEFAULT [],
|
||||
ADD COLUMN IF NOT EXISTS diagram_content_hash String DEFAULT '';
|
||||
|
||||
-- Skip indexes for full-text search on new text columns
|
||||
ALTER TABLE route_executions
|
||||
ADD INDEX IF NOT EXISTS idx_exchange_bodies exchange_bodies TYPE tokenbf_v1(32768, 3, 0) GRANULARITY 4,
|
||||
ADD INDEX IF NOT EXISTS idx_exchange_headers exchange_headers TYPE tokenbf_v1(32768, 3, 0) GRANULARITY 4;
|
||||
|
||||
-- Skip index on error_stacktrace (not indexed in 01-schema.sql, needed for SRCH-05)
|
||||
ALTER TABLE route_executions
|
||||
ADD INDEX IF NOT EXISTS idx_error_stacktrace error_stacktrace TYPE tokenbf_v1(32768, 3, 0) GRANULARITY 4;
|
||||
@@ -1,10 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS users (
|
||||
user_id String,
|
||||
provider LowCardinality(String),
|
||||
email String DEFAULT '',
|
||||
display_name String DEFAULT '',
|
||||
roles Array(LowCardinality(String)),
|
||||
created_at DateTime64(3, 'UTC') DEFAULT now64(3, 'UTC'),
|
||||
updated_at DateTime64(3, 'UTC') DEFAULT now64(3, 'UTC')
|
||||
) ENGINE = ReplacingMergeTree(updated_at)
|
||||
ORDER BY (user_id);
|
||||
@@ -1,13 +0,0 @@
|
||||
CREATE TABLE IF NOT EXISTS oidc_config (
|
||||
config_id String DEFAULT 'default',
|
||||
enabled Bool DEFAULT false,
|
||||
issuer_uri String DEFAULT '',
|
||||
client_id String DEFAULT '',
|
||||
client_secret String DEFAULT '',
|
||||
roles_claim String DEFAULT 'realm_access.roles',
|
||||
default_roles Array(LowCardinality(String)),
|
||||
auto_signup Bool DEFAULT true,
|
||||
display_name_claim String DEFAULT 'name',
|
||||
updated_at DateTime64(3, 'UTC') DEFAULT now64(3, 'UTC')
|
||||
) ENGINE = ReplacingMergeTree(updated_at)
|
||||
ORDER BY (config_id);
|
||||
@@ -1 +0,0 @@
|
||||
ALTER TABLE oidc_config ADD COLUMN IF NOT EXISTS auto_signup Bool DEFAULT true;
|
||||
@@ -1 +0,0 @@
|
||||
ALTER TABLE oidc_config ADD COLUMN IF NOT EXISTS display_name_claim String DEFAULT 'name';
|
||||
@@ -1,35 +0,0 @@
|
||||
-- Pre-aggregated 5-minute stats rollup for route executions.
|
||||
-- Uses AggregatingMergeTree with -State/-Merge combinators so intermediate
|
||||
-- aggregates can be merged across arbitrary time windows and dimensions.
|
||||
|
||||
-- Drop existing objects to allow schema changes (MV must be dropped before table)
|
||||
DROP VIEW IF EXISTS route_execution_stats_5m_mv;
|
||||
DROP TABLE IF EXISTS route_execution_stats_5m;
|
||||
|
||||
CREATE TABLE route_execution_stats_5m (
|
||||
bucket DateTime('UTC'),
|
||||
route_id LowCardinality(String),
|
||||
agent_id LowCardinality(String),
|
||||
total_count AggregateFunction(count),
|
||||
failed_count AggregateFunction(countIf, UInt8),
|
||||
duration_sum AggregateFunction(sum, UInt64),
|
||||
p99_duration AggregateFunction(quantileTDigest(0.99), UInt64)
|
||||
)
|
||||
ENGINE = AggregatingMergeTree()
|
||||
PARTITION BY toYYYYMMDD(bucket)
|
||||
ORDER BY (agent_id, route_id, bucket)
|
||||
TTL bucket + toIntervalDay(30)
|
||||
SETTINGS ttl_only_drop_parts = 1;
|
||||
|
||||
CREATE MATERIALIZED VIEW route_execution_stats_5m_mv
|
||||
TO route_execution_stats_5m
|
||||
AS SELECT
|
||||
toStartOfFiveMinutes(start_time) AS bucket,
|
||||
route_id,
|
||||
agent_id,
|
||||
countState() AS total_count,
|
||||
countIfState(status = 'FAILED') AS failed_count,
|
||||
sumState(duration_ms) AS duration_sum,
|
||||
quantileTDigestState(0.99)(duration_ms) AS p99_duration
|
||||
FROM route_executions
|
||||
GROUP BY bucket, route_id, agent_id;
|
||||
@@ -1,16 +0,0 @@
|
||||
-- One-time idempotent backfill of existing route_executions into the
|
||||
-- 5-minute stats rollup table. Safe for repeated execution — the WHERE
|
||||
-- clause skips the INSERT if the target table already contains data.
|
||||
|
||||
INSERT INTO route_execution_stats_5m
|
||||
SELECT
|
||||
toStartOfFiveMinutes(start_time) AS bucket,
|
||||
route_id,
|
||||
agent_id,
|
||||
countState() AS total_count,
|
||||
countIfState(status = 'FAILED') AS failed_count,
|
||||
sumState(duration_ms) AS duration_sum,
|
||||
quantileTDigestState(0.99)(duration_ms) AS p99_duration
|
||||
FROM route_executions
|
||||
WHERE (SELECT count() FROM route_execution_stats_5m) = 0
|
||||
GROUP BY bucket, route_id, agent_id;
|
||||
Reference in New Issue
Block a user