refactor: rename group_name→application_name in DB, OpenSearch, SQL
Some checks failed
CI / build (push) Failing after 41s
CI / cleanup-branch (push) Has been skipped
CI / docker (push) Has been skipped
CI / deploy (push) Has been skipped
CI / deploy-feature (push) Has been skipped

Consolidate V1-V7 Flyway migrations into single V1__init.sql with
all columns renamed from group_name to application_name. Requires
fresh database (wipe flyway_schema_history, all data).

- DB columns: executions.group_name → application_name,
  processor_executions.group_name → application_name
- Continuous aggregates: all views updated to use application_name
- OpenSearch field: group_name → application_name in index/query
- All Java SQL strings updated to match new column names
- Delete V2-V7 migration files (folded into V1)

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
hsiegeln
2026-03-23 21:24:19 +01:00
parent 8ad0016a8e
commit 6a5dba4eba
13 changed files with 115 additions and 159 deletions

View File

@@ -242,19 +242,19 @@ public class AgentRegistrationController {
Instant from1m = now.minus(1, ChronoUnit.MINUTES);
try {
jdbc.query(
"SELECT group_name, " +
"SELECT application_name, " +
"SUM(total_count) AS total, " +
"SUM(failed_count) AS failed, " +
"COUNT(DISTINCT route_id) AS active_routes " +
"FROM stats_1m_route WHERE bucket >= ? AND bucket < ? " +
"GROUP BY group_name",
"GROUP BY application_name",
rs -> {
long total = rs.getLong("total");
long failed = rs.getLong("failed");
double tps = total / 60.0;
double errorRate = total > 0 ? (double) failed / total : 0.0;
int activeRoutes = rs.getInt("active_routes");
result.put(rs.getString("group_name"), new double[]{tps, errorRate, activeRoutes});
result.put(rs.getString("application_name"), new double[]{tps, errorRate, activeRoutes});
},
Timestamp.from(from1m), Timestamp.from(now));
} catch (Exception e) {

View File

@@ -73,11 +73,11 @@ public class RouteCatalogController {
Map<String, Instant> routeLastSeen = new LinkedHashMap<>();
try {
jdbc.query(
"SELECT group_name, route_id, SUM(total_count) AS cnt, MAX(bucket) AS last_seen " +
"SELECT application_name, route_id, SUM(total_count) AS cnt, MAX(bucket) AS last_seen " +
"FROM stats_1m_route WHERE bucket >= ? AND bucket < ? " +
"GROUP BY group_name, route_id",
"GROUP BY application_name, route_id",
rs -> {
String key = rs.getString("group_name") + "/" + rs.getString("route_id");
String key = rs.getString("application_name") + "/" + rs.getString("route_id");
routeExchangeCounts.put(key, rs.getLong("cnt"));
Timestamp ts = rs.getTimestamp("last_seen");
if (ts != null) routeLastSeen.put(key, ts.toInstant());
@@ -91,9 +91,9 @@ public class RouteCatalogController {
Map<String, Double> agentTps = new LinkedHashMap<>();
try {
jdbc.query(
"SELECT group_name, SUM(total_count) AS cnt " +
"SELECT application_name, SUM(total_count) AS cnt " +
"FROM stats_1m_route WHERE bucket >= ? AND bucket < ? " +
"GROUP BY group_name",
"GROUP BY application_name",
rs -> {
// This gives per-app TPS; we'll distribute among agents below
},

View File

@@ -44,7 +44,7 @@ public class RouteMetricsController {
long windowSeconds = Duration.between(fromInstant, toInstant).toSeconds();
var sql = new StringBuilder(
"SELECT group_name, route_id, " +
"SELECT application_name, route_id, " +
"SUM(total_count) AS total, " +
"SUM(failed_count) AS failed, " +
"CASE WHEN SUM(total_count) > 0 THEN SUM(duration_sum) / SUM(total_count) ELSE 0 END AS avg_dur, " +
@@ -55,17 +55,17 @@ public class RouteMetricsController {
params.add(Timestamp.from(toInstant));
if (appId != null) {
sql.append(" AND group_name = ?");
sql.append(" AND application_name = ?");
params.add(appId);
}
sql.append(" GROUP BY group_name, route_id ORDER BY group_name, route_id");
sql.append(" GROUP BY application_name, route_id ORDER BY application_name, route_id");
// Key struct for sparkline lookup
record RouteKey(String appId, String routeId) {}
List<RouteKey> routeKeys = new ArrayList<>();
List<RouteMetrics> metrics = jdbc.query(sql.toString(), (rs, rowNum) -> {
String applicationName = rs.getString("group_name");
String applicationName = rs.getString("application_name");
String routeId = rs.getString("route_id");
long total = rs.getLong("total");
long failed = rs.getLong("failed");
@@ -93,7 +93,7 @@ public class RouteMetricsController {
"SELECT time_bucket(? * INTERVAL '1 second', bucket) AS period, " +
"COALESCE(SUM(total_count), 0) AS cnt " +
"FROM stats_1m_route WHERE bucket >= ? AND bucket < ? " +
"AND group_name = ? AND route_id = ? " +
"AND application_name = ? AND route_id = ? " +
"GROUP BY period ORDER BY period",
(rs, rowNum) -> rs.getDouble("cnt"),
bucketSeconds, Timestamp.from(fromInstant), Timestamp.from(toInstant),
@@ -124,7 +124,7 @@ public class RouteMetricsController {
Instant fromInstant = from != null ? from : toInstant.minus(24, ChronoUnit.HOURS);
var sql = new StringBuilder(
"SELECT processor_id, processor_type, route_id, group_name, " +
"SELECT processor_id, processor_type, route_id, application_name, " +
"SUM(total_count) AS total_count, " +
"SUM(failed_count) AS failed_count, " +
"CASE WHEN SUM(total_count) > 0 THEN SUM(duration_sum)::double precision / SUM(total_count) ELSE 0 END AS avg_duration_ms, " +
@@ -137,10 +137,10 @@ public class RouteMetricsController {
params.add(routeId);
if (appId != null) {
sql.append(" AND group_name = ?");
sql.append(" AND application_name = ?");
params.add(appId);
}
sql.append(" GROUP BY processor_id, processor_type, route_id, group_name");
sql.append(" GROUP BY processor_id, processor_type, route_id, application_name");
sql.append(" ORDER BY SUM(total_count) DESC");
List<ProcessorMetrics> metrics = jdbc.query(sql.toString(), (rs, rowNum) -> {
@@ -151,7 +151,7 @@ public class RouteMetricsController {
rs.getString("processor_id"),
rs.getString("processor_type"),
rs.getString("route_id"),
rs.getString("group_name"),
rs.getString("application_name"),
totalCount,
failedCount,
rs.getDouble("avg_duration_ms"),

View File

@@ -288,7 +288,7 @@ public class OpenSearchIndex implements SearchIndex {
map.put("execution_id", doc.executionId());
map.put("route_id", doc.routeId());
map.put("agent_id", doc.agentId());
map.put("group_name", doc.applicationName());
map.put("application_name", doc.applicationName());
map.put("status", doc.status());
map.put("correlation_id", doc.correlationId());
map.put("exchange_id", doc.exchangeId());
@@ -323,7 +323,7 @@ public class OpenSearchIndex implements SearchIndex {
(String) src.get("execution_id"),
(String) src.get("route_id"),
(String) src.get("agent_id"),
(String) src.get("group_name"),
(String) src.get("application_name"),
(String) src.get("status"),
src.get("start_time") != null ? Instant.parse((String) src.get("start_time")) : null,
src.get("end_time") != null ? Instant.parse((String) src.get("end_time")) : null,

View File

@@ -24,7 +24,7 @@ public class PostgresExecutionStore implements ExecutionStore {
@Override
public void upsert(ExecutionRecord execution) {
jdbc.update("""
INSERT INTO executions (execution_id, route_id, agent_id, group_name,
INSERT INTO executions (execution_id, route_id, agent_id, application_name,
status, correlation_id, exchange_id, start_time, end_time,
duration_ms, error_message, error_stacktrace, diagram_content_hash,
created_at, updated_at)
@@ -59,7 +59,7 @@ public class PostgresExecutionStore implements ExecutionStore {
List<ProcessorRecord> processors) {
jdbc.batchUpdate("""
INSERT INTO processor_executions (execution_id, processor_id, processor_type,
diagram_node_id, group_name, route_id, depth, parent_processor_id,
diagram_node_id, application_name, route_id, depth, parent_processor_id,
status, start_time, end_time, duration_ms, error_message, error_stacktrace,
input_body, output_body, input_headers, output_headers)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?::jsonb, ?::jsonb)
@@ -103,7 +103,7 @@ public class PostgresExecutionStore implements ExecutionStore {
private static final RowMapper<ExecutionRecord> EXECUTION_MAPPER = (rs, rowNum) ->
new ExecutionRecord(
rs.getString("execution_id"), rs.getString("route_id"),
rs.getString("agent_id"), rs.getString("group_name"),
rs.getString("agent_id"), rs.getString("application_name"),
rs.getString("status"), rs.getString("correlation_id"),
rs.getString("exchange_id"),
toInstant(rs, "start_time"), toInstant(rs, "end_time"),
@@ -115,7 +115,7 @@ public class PostgresExecutionStore implements ExecutionStore {
new ProcessorRecord(
rs.getString("execution_id"), rs.getString("processor_id"),
rs.getString("processor_type"), rs.getString("diagram_node_id"),
rs.getString("group_name"), rs.getString("route_id"),
rs.getString("application_name"), rs.getString("route_id"),
rs.getInt("depth"), rs.getString("parent_processor_id"),
rs.getString("status"),
toInstant(rs, "start_time"), toInstant(rs, "end_time"),

View File

@@ -31,7 +31,7 @@ public class PostgresStatsStore implements StatsStore {
@Override
public ExecutionStats statsForApp(Instant from, Instant to, String applicationName) {
return queryStats("stats_1m_app", from, to, List.of(
new Filter("group_name", applicationName)));
new Filter("application_name", applicationName)));
}
@Override
@@ -58,7 +58,7 @@ public class PostgresStatsStore implements StatsStore {
@Override
public StatsTimeseries timeseriesForApp(Instant from, Instant to, int bucketCount, String applicationName) {
return queryTimeseries("stats_1m_app", from, to, bucketCount, List.of(
new Filter("group_name", applicationName)), true);
new Filter("application_name", applicationName)), true);
}
@Override

View File

@@ -13,6 +13,7 @@ CREATE TABLE users (
provider TEXT NOT NULL,
email TEXT,
display_name TEXT,
password_hash TEXT,
created_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_at TIMESTAMPTZ NOT NULL DEFAULT now()
);
@@ -39,12 +40,20 @@ CREATE TABLE groups (
created_at TIMESTAMPTZ NOT NULL DEFAULT now()
);
-- Built-in Admins group
INSERT INTO groups (id, name) VALUES
('00000000-0000-0000-0000-000000000010', 'Admins');
CREATE TABLE group_roles (
group_id UUID NOT NULL REFERENCES groups(id) ON DELETE CASCADE,
role_id UUID NOT NULL REFERENCES roles(id) ON DELETE CASCADE,
PRIMARY KEY (group_id, role_id)
);
-- Assign ADMIN role to Admins group
INSERT INTO group_roles (group_id, role_id) VALUES
('00000000-0000-0000-0000-000000000010', '00000000-0000-0000-0000-000000000004');
CREATE TABLE user_groups (
user_id TEXT NOT NULL REFERENCES users(user_id) ON DELETE CASCADE,
group_id UUID NOT NULL REFERENCES groups(id) ON DELETE CASCADE,
@@ -70,7 +79,7 @@ CREATE TABLE executions (
execution_id TEXT NOT NULL,
route_id TEXT NOT NULL,
agent_id TEXT NOT NULL,
group_name TEXT NOT NULL,
application_name TEXT NOT NULL,
status TEXT NOT NULL,
correlation_id TEXT,
exchange_id TEXT,
@@ -89,7 +98,7 @@ SELECT create_hypertable('executions', 'start_time', chunk_time_interval => INTE
CREATE INDEX idx_executions_agent_time ON executions (agent_id, start_time DESC);
CREATE INDEX idx_executions_route_time ON executions (route_id, start_time DESC);
CREATE INDEX idx_executions_group_time ON executions (group_name, start_time DESC);
CREATE INDEX idx_executions_app_time ON executions (application_name, start_time DESC);
CREATE INDEX idx_executions_correlation ON executions (correlation_id);
CREATE TABLE processor_executions (
@@ -98,7 +107,7 @@ CREATE TABLE processor_executions (
processor_id TEXT NOT NULL,
processor_type TEXT NOT NULL,
diagram_node_id TEXT,
group_name TEXT NOT NULL,
application_name TEXT NOT NULL,
route_id TEXT NOT NULL,
depth INT NOT NULL,
parent_processor_id TEXT,
@@ -138,6 +147,13 @@ SELECT create_hypertable('agent_metrics', 'collected_at', chunk_time_interval =>
CREATE INDEX idx_metrics_agent_name ON agent_metrics (agent_id, metric_name, collected_at DESC);
-- Retention: drop agent_metrics chunks older than 90 days
SELECT add_retention_policy('agent_metrics', INTERVAL '90 days', if_not_exists => true);
-- Compression: compress agent_metrics chunks older than 7 days
ALTER TABLE agent_metrics SET (timescaledb.compress);
SELECT add_compression_policy('agent_metrics', INTERVAL '7 days', if_not_exists => true);
-- =============================================================
-- Route diagrams
-- =============================================================
@@ -153,22 +169,56 @@ CREATE TABLE route_diagrams (
CREATE INDEX idx_diagrams_route_agent ON route_diagrams (route_id, agent_id);
-- =============================================================
-- OIDC configuration
-- Agent events
-- =============================================================
CREATE TABLE oidc_config (
config_id TEXT PRIMARY KEY DEFAULT 'default',
enabled BOOLEAN NOT NULL DEFAULT false,
issuer_uri TEXT,
client_id TEXT,
client_secret TEXT,
roles_claim TEXT,
default_roles TEXT[] NOT NULL DEFAULT '{}',
auto_signup BOOLEAN DEFAULT false,
display_name_claim TEXT,
updated_at TIMESTAMPTZ NOT NULL DEFAULT now()
CREATE TABLE agent_events (
id BIGSERIAL PRIMARY KEY,
agent_id TEXT NOT NULL,
app_id TEXT NOT NULL,
event_type TEXT NOT NULL,
detail TEXT,
timestamp TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_agent_events_agent ON agent_events(agent_id, timestamp DESC);
CREATE INDEX idx_agent_events_app ON agent_events(app_id, timestamp DESC);
CREATE INDEX idx_agent_events_time ON agent_events(timestamp DESC);
-- =============================================================
-- Server configuration
-- =============================================================
CREATE TABLE server_config (
config_key TEXT PRIMARY KEY,
config_val JSONB NOT NULL,
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_by TEXT
);
-- =============================================================
-- Admin
-- =============================================================
CREATE TABLE audit_log (
id BIGSERIAL PRIMARY KEY,
timestamp TIMESTAMPTZ NOT NULL DEFAULT now(),
username TEXT NOT NULL,
action TEXT NOT NULL,
category TEXT NOT NULL,
target TEXT,
detail JSONB,
result TEXT NOT NULL,
ip_address TEXT,
user_agent TEXT
);
CREATE INDEX idx_audit_log_timestamp ON audit_log (timestamp DESC);
CREATE INDEX idx_audit_log_username ON audit_log (username);
CREATE INDEX idx_audit_log_category ON audit_log (category);
CREATE INDEX idx_audit_log_action ON audit_log (action);
CREATE INDEX idx_audit_log_target ON audit_log (target);
-- =============================================================
-- Continuous aggregates
-- =============================================================
@@ -197,7 +247,7 @@ CREATE MATERIALIZED VIEW stats_1m_app
WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS
SELECT
time_bucket('1 minute', start_time) AS bucket,
group_name,
application_name,
COUNT(*) AS total_count,
COUNT(*) FILTER (WHERE status = 'FAILED') AS failed_count,
COUNT(*) FILTER (WHERE status = 'RUNNING') AS running_count,
@@ -206,7 +256,7 @@ SELECT
approx_percentile(0.99, percentile_agg(duration_ms::DOUBLE PRECISION)) AS p99_duration
FROM executions
WHERE status IS NOT NULL
GROUP BY bucket, group_name
GROUP BY bucket, application_name
WITH NO DATA;
SELECT add_continuous_aggregate_policy('stats_1m_app',
@@ -218,7 +268,7 @@ CREATE MATERIALIZED VIEW stats_1m_route
WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS
SELECT
time_bucket('1 minute', start_time) AS bucket,
group_name,
application_name,
route_id,
COUNT(*) AS total_count,
COUNT(*) FILTER (WHERE status = 'FAILED') AS failed_count,
@@ -228,7 +278,7 @@ SELECT
approx_percentile(0.99, percentile_agg(duration_ms::DOUBLE PRECISION)) AS p99_duration
FROM executions
WHERE status IS NOT NULL
GROUP BY bucket, group_name, route_id
GROUP BY bucket, application_name, route_id
WITH NO DATA;
SELECT add_continuous_aggregate_policy('stats_1m_route',
@@ -240,7 +290,7 @@ CREATE MATERIALIZED VIEW stats_1m_processor
WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS
SELECT
time_bucket('1 minute', start_time) AS bucket,
group_name,
application_name,
route_id,
processor_type,
COUNT(*) AS total_count,
@@ -249,7 +299,7 @@ SELECT
MAX(duration_ms) AS duration_max,
approx_percentile(0.99, percentile_agg(duration_ms::DOUBLE PRECISION)) AS p99_duration
FROM processor_executions
GROUP BY bucket, group_name, route_id, processor_type
GROUP BY bucket, application_name, route_id, processor_type
WITH NO DATA;
SELECT add_continuous_aggregate_policy('stats_1m_processor',
@@ -257,33 +307,23 @@ SELECT add_continuous_aggregate_policy('stats_1m_processor',
end_offset => INTERVAL '1 minute',
schedule_interval => INTERVAL '1 minute');
-- =============================================================
-- Admin
-- =============================================================
CREATE MATERIALIZED VIEW stats_1m_processor_detail
WITH (timescaledb.continuous) AS
SELECT
time_bucket('1 minute', start_time) AS bucket,
application_name,
route_id,
processor_id,
processor_type,
COUNT(*) AS total_count,
COUNT(*) FILTER (WHERE status = 'FAILED') AS failed_count,
SUM(duration_ms) AS duration_sum,
MAX(duration_ms) AS duration_max,
approx_percentile(0.99, percentile_agg(duration_ms)) AS p99_duration
FROM processor_executions
GROUP BY bucket, application_name, route_id, processor_id, processor_type;
CREATE TABLE admin_thresholds (
id INTEGER PRIMARY KEY DEFAULT 1,
config JSONB NOT NULL DEFAULT '{}',
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_by TEXT NOT NULL,
CONSTRAINT single_row CHECK (id = 1)
);
CREATE TABLE audit_log (
id BIGSERIAL PRIMARY KEY,
timestamp TIMESTAMPTZ NOT NULL DEFAULT now(),
username TEXT NOT NULL,
action TEXT NOT NULL,
category TEXT NOT NULL,
target TEXT,
detail JSONB,
result TEXT NOT NULL,
ip_address TEXT,
user_agent TEXT
);
CREATE INDEX idx_audit_log_timestamp ON audit_log (timestamp DESC);
CREATE INDEX idx_audit_log_username ON audit_log (username);
CREATE INDEX idx_audit_log_category ON audit_log (category);
CREATE INDEX idx_audit_log_action ON audit_log (action);
CREATE INDEX idx_audit_log_target ON audit_log (target);
SELECT add_continuous_aggregate_policy('stats_1m_processor_detail',
start_offset => INTERVAL '1 hour',
end_offset => INTERVAL '1 minute',
schedule_interval => INTERVAL '1 minute');

View File

@@ -1,7 +0,0 @@
-- Built-in Admins group
INSERT INTO groups (id, name) VALUES
('00000000-0000-0000-0000-000000000010', 'Admins');
-- Assign ADMIN role to Admins group
INSERT INTO group_roles (group_id, role_id) VALUES
('00000000-0000-0000-0000-000000000010', '00000000-0000-0000-0000-000000000004');

View File

@@ -1 +0,0 @@
ALTER TABLE users ADD COLUMN password_hash TEXT;

View File

@@ -1,36 +0,0 @@
-- =============================================================
-- Consolidate oidc_config + admin_thresholds → server_config
-- =============================================================
CREATE TABLE server_config (
config_key TEXT PRIMARY KEY,
config_val JSONB NOT NULL,
updated_at TIMESTAMPTZ NOT NULL DEFAULT now(),
updated_by TEXT
);
-- Migrate existing oidc_config row (if any)
INSERT INTO server_config (config_key, config_val, updated_at)
SELECT 'oidc',
jsonb_build_object(
'enabled', enabled,
'issuerUri', issuer_uri,
'clientId', client_id,
'clientSecret', client_secret,
'rolesClaim', roles_claim,
'defaultRoles', to_jsonb(default_roles),
'autoSignup', auto_signup,
'displayNameClaim', display_name_claim
),
updated_at
FROM oidc_config
WHERE config_id = 'default';
-- Migrate existing admin_thresholds row (if any)
INSERT INTO server_config (config_key, config_val, updated_at, updated_by)
SELECT 'thresholds', config, updated_at, updated_by
FROM admin_thresholds
WHERE id = 1;
DROP TABLE oidc_config;
DROP TABLE admin_thresholds;

View File

@@ -1,13 +0,0 @@
-- Agent lifecycle events for tracking registration, state transitions, etc.
CREATE TABLE agent_events (
id BIGSERIAL PRIMARY KEY,
agent_id TEXT NOT NULL,
app_id TEXT NOT NULL,
event_type TEXT NOT NULL,
detail TEXT,
timestamp TIMESTAMPTZ NOT NULL DEFAULT NOW()
);
CREATE INDEX idx_agent_events_agent ON agent_events(agent_id, timestamp DESC);
CREATE INDEX idx_agent_events_app ON agent_events(app_id, timestamp DESC);
CREATE INDEX idx_agent_events_time ON agent_events(timestamp DESC);

View File

@@ -1,6 +0,0 @@
-- Retention: drop agent_metrics chunks older than 90 days
SELECT add_retention_policy('agent_metrics', INTERVAL '90 days', if_not_exists => true);
-- Compression: compress agent_metrics chunks older than 7 days
ALTER TABLE agent_metrics SET (timescaledb.compress);
SELECT add_compression_policy('agent_metrics', INTERVAL '7 days', if_not_exists => true);

View File

@@ -1,21 +0,0 @@
-- V7: Per-processor-id continuous aggregate for route detail page
CREATE MATERIALIZED VIEW stats_1m_processor_detail
WITH (timescaledb.continuous) AS
SELECT
time_bucket('1 minute', start_time) AS bucket,
group_name,
route_id,
processor_id,
processor_type,
COUNT(*) AS total_count,
COUNT(*) FILTER (WHERE status = 'FAILED') AS failed_count,
SUM(duration_ms) AS duration_sum,
MAX(duration_ms) AS duration_max,
approx_percentile(0.99, percentile_agg(duration_ms)) AS p99_duration
FROM processor_executions
GROUP BY bucket, group_name, route_id, processor_id, processor_type;
SELECT add_continuous_aggregate_policy('stats_1m_processor_detail',
start_offset => INTERVAL '1 hour',
end_offset => INTERVAL '1 minute',
schedule_interval => INTERVAL '1 minute');