Fix rollup queries: alias shadowed AggregateFunction column name
countMerge(total_count) in the avg expression resolved to the UInt64 alias 'total_count' instead of the AggregateFunction column. Rename SELECT aliases (cnt, failed, avg_ms, p99_ms) to avoid shadowing. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -108,19 +108,19 @@ public class ClickHouseSearchEngine implements SearchEngine {
|
|||||||
String where = " WHERE " + String.join(" AND ", conditions);
|
String where = " WHERE " + String.join(" AND ", conditions);
|
||||||
|
|
||||||
String rollupSql = "SELECT " +
|
String rollupSql = "SELECT " +
|
||||||
"countMerge(total_count) AS total_count, " +
|
"countMerge(total_count) AS cnt, " +
|
||||||
"countIfMerge(failed_count) AS failed_count, " +
|
"countIfMerge(failed_count) AS failed, " +
|
||||||
"toInt64(ifNotFinite(sumMerge(duration_sum) / countMerge(total_count), 0)) AS avg_duration_ms, " +
|
"toInt64(ifNotFinite(sumMerge(duration_sum) / countMerge(total_count), 0)) AS avg_ms, " +
|
||||||
"toInt64(ifNotFinite(quantileTDigestMerge(0.99)(p99_duration), 0)) AS p99_duration_ms " +
|
"toInt64(ifNotFinite(quantileTDigestMerge(0.99)(p99_duration), 0)) AS p99_ms " +
|
||||||
"FROM route_execution_stats_5m" + where;
|
"FROM route_execution_stats_5m" + where;
|
||||||
|
|
||||||
record PeriodStats(long totalCount, long failedCount, long avgDurationMs, long p99LatencyMs) {}
|
record PeriodStats(long totalCount, long failedCount, long avgDurationMs, long p99LatencyMs) {}
|
||||||
PeriodStats current = jdbcTemplate.queryForObject(rollupSql,
|
PeriodStats current = jdbcTemplate.queryForObject(rollupSql,
|
||||||
(rs, rowNum) -> new PeriodStats(
|
(rs, rowNum) -> new PeriodStats(
|
||||||
rs.getLong("total_count"),
|
rs.getLong("cnt"),
|
||||||
rs.getLong("failed_count"),
|
rs.getLong("failed"),
|
||||||
rs.getLong("avg_duration_ms"),
|
rs.getLong("avg_ms"),
|
||||||
rs.getLong("p99_duration_ms")),
|
rs.getLong("p99_ms")),
|
||||||
params.toArray());
|
params.toArray());
|
||||||
|
|
||||||
// Active count — lightweight real-time query on base table (RUNNING is transient)
|
// Active count — lightweight real-time query on base table (RUNNING is transient)
|
||||||
@@ -147,18 +147,18 @@ public class ClickHouseSearchEngine implements SearchEngine {
|
|||||||
String prevWhere = " WHERE " + String.join(" AND ", prevConditions);
|
String prevWhere = " WHERE " + String.join(" AND ", prevConditions);
|
||||||
|
|
||||||
String prevRollupSql = "SELECT " +
|
String prevRollupSql = "SELECT " +
|
||||||
"countMerge(total_count) AS total_count, " +
|
"countMerge(total_count) AS cnt, " +
|
||||||
"countIfMerge(failed_count) AS failed_count, " +
|
"countIfMerge(failed_count) AS failed, " +
|
||||||
"toInt64(ifNotFinite(sumMerge(duration_sum) / countMerge(total_count), 0)) AS avg_duration_ms, " +
|
"toInt64(ifNotFinite(sumMerge(duration_sum) / countMerge(total_count), 0)) AS avg_ms, " +
|
||||||
"toInt64(ifNotFinite(quantileTDigestMerge(0.99)(p99_duration), 0)) AS p99_duration_ms " +
|
"toInt64(ifNotFinite(quantileTDigestMerge(0.99)(p99_duration), 0)) AS p99_ms " +
|
||||||
"FROM route_execution_stats_5m" + prevWhere;
|
"FROM route_execution_stats_5m" + prevWhere;
|
||||||
|
|
||||||
PeriodStats prev = jdbcTemplate.queryForObject(prevRollupSql,
|
PeriodStats prev = jdbcTemplate.queryForObject(prevRollupSql,
|
||||||
(rs, rowNum) -> new PeriodStats(
|
(rs, rowNum) -> new PeriodStats(
|
||||||
rs.getLong("total_count"),
|
rs.getLong("cnt"),
|
||||||
rs.getLong("failed_count"),
|
rs.getLong("failed"),
|
||||||
rs.getLong("avg_duration_ms"),
|
rs.getLong("avg_ms"),
|
||||||
rs.getLong("p99_duration_ms")),
|
rs.getLong("p99_ms")),
|
||||||
prevParams.toArray());
|
prevParams.toArray());
|
||||||
|
|
||||||
// Today total (midnight UTC to now) — read from rollup with same scope
|
// Today total (midnight UTC to now) — read from rollup with same scope
|
||||||
@@ -205,20 +205,20 @@ public class ClickHouseSearchEngine implements SearchEngine {
|
|||||||
// Re-aggregate 5-minute rollup buckets into the requested interval
|
// Re-aggregate 5-minute rollup buckets into the requested interval
|
||||||
String sql = "SELECT " +
|
String sql = "SELECT " +
|
||||||
"toDateTime(intDiv(toUInt32(bucket), " + intervalSeconds + ") * " + intervalSeconds + ") AS ts_bucket, " +
|
"toDateTime(intDiv(toUInt32(bucket), " + intervalSeconds + ") * " + intervalSeconds + ") AS ts_bucket, " +
|
||||||
"countMerge(total_count) AS total_count, " +
|
"countMerge(total_count) AS cnt, " +
|
||||||
"countIfMerge(failed_count) AS failed_count, " +
|
"countIfMerge(failed_count) AS failed, " +
|
||||||
"toInt64(ifNotFinite(sumMerge(duration_sum) / countMerge(total_count), 0)) AS avg_duration_ms, " +
|
"toInt64(ifNotFinite(sumMerge(duration_sum) / countMerge(total_count), 0)) AS avg_ms, " +
|
||||||
"toInt64(ifNotFinite(quantileTDigestMerge(0.99)(p99_duration), 0)) AS p99_duration_ms " +
|
"toInt64(ifNotFinite(quantileTDigestMerge(0.99)(p99_duration), 0)) AS p99_ms " +
|
||||||
"FROM route_execution_stats_5m" + where +
|
"FROM route_execution_stats_5m" + where +
|
||||||
" GROUP BY ts_bucket ORDER BY ts_bucket";
|
" GROUP BY ts_bucket ORDER BY ts_bucket";
|
||||||
|
|
||||||
List<StatsTimeseries.TimeseriesBucket> buckets = jdbcTemplate.query(sql, (rs, rowNum) ->
|
List<StatsTimeseries.TimeseriesBucket> buckets = jdbcTemplate.query(sql, (rs, rowNum) ->
|
||||||
new StatsTimeseries.TimeseriesBucket(
|
new StatsTimeseries.TimeseriesBucket(
|
||||||
rs.getTimestamp("ts_bucket").toInstant(),
|
rs.getTimestamp("ts_bucket").toInstant(),
|
||||||
rs.getLong("total_count"),
|
rs.getLong("cnt"),
|
||||||
rs.getLong("failed_count"),
|
rs.getLong("failed"),
|
||||||
rs.getLong("avg_duration_ms"),
|
rs.getLong("avg_ms"),
|
||||||
rs.getLong("p99_duration_ms"),
|
rs.getLong("p99_ms"),
|
||||||
0L
|
0L
|
||||||
),
|
),
|
||||||
params.toArray());
|
params.toArray());
|
||||||
|
|||||||
Reference in New Issue
Block a user