Compare commits
356 Commits
v0.0.2
...
f24a5e5ff0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f24a5e5ff0 | ||
|
|
1971c70638 | ||
|
|
69dcce2a8f | ||
|
|
cb36d7936f | ||
|
|
f95a78a380 | ||
|
|
3f94c98c5b | ||
|
|
ff62a34d89 | ||
|
|
bfed8174ca | ||
|
|
827ba3c798 | ||
|
|
3bf470f83f | ||
|
|
de46cee440 | ||
|
|
04c90bde06 | ||
|
|
2df5e0d7ba | ||
|
|
7b822a787a | ||
|
|
e88db56f79 | ||
|
|
eb7cd9ba62 | ||
|
|
b86e95f08e | ||
|
|
0720053523 | ||
|
|
a4a569a253 | ||
|
|
6288084daf | ||
|
|
64ebf19ad3 | ||
|
|
20f3dfe59d | ||
|
|
c923d8233b | ||
|
|
c72424543e | ||
|
|
18ffbea9db | ||
|
|
19da9b9f9f | ||
|
|
8b3c4ba2fe | ||
|
|
96fbca1b35 | ||
|
|
977bfc1c6b | ||
|
|
7e0536b5b3 | ||
|
|
6e444a414d | ||
|
|
f8d42026da | ||
|
|
fef3ef6184 | ||
|
|
76eacb17e6 | ||
|
|
3f2fec2815 | ||
|
|
55bdab472b | ||
|
|
b7d00548c5 | ||
|
|
fef0239b1d | ||
|
|
6eff271238 | ||
|
|
01e0062767 | ||
|
|
0fccdb636f | ||
|
|
123e66e44d | ||
|
|
b196918e70 | ||
|
|
dd4442329c | ||
|
|
da6bf694f8 | ||
|
|
7e47f1628d | ||
|
|
863a992cc4 | ||
|
|
0ccb8bc68d | ||
|
|
0a3733f9ba | ||
|
|
056b747c3f | ||
|
|
0b2d231b6b | ||
|
|
7503641afe | ||
|
|
967156d41b | ||
|
|
0a0733def7 | ||
|
|
b7f215e90c | ||
|
|
6a32b83326 | ||
|
|
c4fe992179 | ||
|
|
01ac47eeb4 | ||
|
|
1c5ecb02e3 | ||
|
|
b1b7e142bb | ||
|
|
de4ca10fa5 | ||
|
|
875062e59a | ||
|
|
e04dca55aa | ||
|
|
448a63adc9 | ||
|
|
a8b977a2db | ||
|
|
529e2c727c | ||
|
|
9af0043915 | ||
|
|
2e006051bc | ||
|
|
d9160b7d0e | ||
|
|
36e8b2d8ff | ||
|
|
3d20d7a0cb | ||
|
|
8f2aafadc1 | ||
|
|
248b716cb9 | ||
|
|
b05b7e5597 | ||
|
|
585e078667 | ||
|
|
55068ff625 | ||
|
|
17f45645ff | ||
|
|
fd2e52e155 | ||
|
|
85530d5ea3 | ||
|
|
32ae642fab | ||
|
|
ec9856d8a2 | ||
|
|
847c1f792b | ||
|
|
ac9ce4f2e7 | ||
|
|
7657081b78 | ||
|
|
b5e85162f8 | ||
|
|
7904a18f67 | ||
|
|
67ca1e726f | ||
|
|
b969075007 | ||
|
|
d734597ec3 | ||
|
|
dd5cf1b38c | ||
|
|
e1cb17707b | ||
|
|
b5cf35ef9a | ||
|
|
2f8fcb866e | ||
|
|
bd78207060 | ||
|
|
96ba7cd711 | ||
|
|
c6682c4c9c | ||
|
|
6a1d3bb129 | ||
|
|
9cbf647203 | ||
|
|
07f3c2584c | ||
|
|
ca1b549f10 | ||
|
|
7d5866bca8 | ||
|
|
f601074e78 | ||
|
|
725f826513 | ||
|
|
52f5a0414e | ||
|
|
11fc85e2b9 | ||
|
|
d4b530ff8a | ||
|
|
03ff9a3813 | ||
|
|
95eb388283 | ||
|
|
8852ec1483 | ||
|
|
23e90d6afb | ||
|
|
d19551f8aa | ||
|
|
b2e4b91d94 | ||
|
|
95b35f6203 | ||
|
|
a443abe6ae | ||
|
|
a5340059d7 | ||
|
|
45cccdbd8a | ||
|
|
281e168790 | ||
|
|
1386e80670 | ||
|
|
f372d0d63c | ||
|
|
6ef66a14ec | ||
|
|
0761d0dbee | ||
|
|
0de392ff6e | ||
|
|
c502a42f17 | ||
|
|
07ff576eb6 | ||
|
|
c249c6f3e0 | ||
|
|
bb6a9c9269 | ||
|
|
c6a8a4471f | ||
|
|
640a48114d | ||
|
|
b1655b366e | ||
|
|
e54f308607 | ||
|
|
e69b44f566 | ||
|
|
0c77f8d594 | ||
|
|
a96cf2afed | ||
|
|
549dbaa322 | ||
|
|
f4eafd9a0f | ||
|
|
4e12fcbe7a | ||
|
|
9c2e6aacad | ||
|
|
c757a0ea51 | ||
|
|
9a40626a27 | ||
|
|
4496be08bd | ||
|
|
e8bcc39ca9 | ||
|
|
94bfb8fc4a | ||
|
|
c628c25081 | ||
|
|
3cea306e17 | ||
|
|
4244dd82e9 | ||
|
|
d7001804f7 | ||
|
|
5c4c7ad321 | ||
|
|
0fab20e67a | ||
|
|
d7563902a7 | ||
|
|
99e2a8354f | ||
|
|
083cb8b9ec | ||
|
|
0609220cdf | ||
|
|
ca92b3ce7d | ||
|
|
7ebbc18b31 | ||
|
|
5b7c92848d | ||
|
|
44f3821df4 | ||
|
|
51abe45fba | ||
|
|
3c70313d78 | ||
|
|
12bb734c2d | ||
|
|
cbeaf30bc7 | ||
|
|
c4d2fa90ab | ||
|
|
e9ef97bc20 | ||
|
|
eecb0adf93 | ||
|
|
c47b8b9998 | ||
|
|
22d812d832 | ||
|
|
fec6717a85 | ||
|
|
3bd07c9b07 | ||
|
|
a5c4e0cead | ||
|
|
de85cdf5a2 | ||
|
|
2277a0498f | ||
|
|
ac87aa6eb2 | ||
|
|
f16d331621 | ||
|
|
69055f7d74 | ||
|
|
37eb56332a | ||
|
|
72ec87a3ba | ||
|
|
346e38ee1d | ||
|
|
39d9ec9cd6 | ||
|
|
08f2a01057 | ||
|
|
574f82b731 | ||
|
|
c2d4d38bfb | ||
|
|
694d0eef59 | ||
|
|
babdc1d7a4 | ||
|
|
a188308ec5 | ||
|
|
ee7226cf1c | ||
|
|
7429b85964 | ||
|
|
a5c07b8585 | ||
|
|
45a74075a1 | ||
|
|
abed4dc96f | ||
|
|
170b2c4a02 | ||
|
|
66e91ba18c | ||
|
|
e30b561dfe | ||
|
|
5ae94e1e2c | ||
|
|
7dca8f2609 | ||
|
|
2589c681c5 | ||
|
|
352fa43ef8 | ||
|
|
b04b12220b | ||
|
|
633a61d89d | ||
|
|
e0aac4bf0a | ||
|
|
ac94a67a49 | ||
|
|
e1cb9d7872 | ||
|
|
a9ec424d52 | ||
|
|
81f13396a0 | ||
|
|
670e458376 | ||
|
|
d4327af6a4 | ||
|
|
bb3e1e2bc3 | ||
|
|
984bb2d40f | ||
|
|
6f00ff2e28 | ||
|
|
2708bcec17 | ||
|
|
901dfd1eb8 | ||
|
|
726e77bb91 | ||
|
|
d30c267292 | ||
|
|
37c10ae0a6 | ||
|
|
c16f0e62ed | ||
|
|
2bc3efad7f | ||
|
|
0632f1c6a8 | ||
|
|
bdac363e40 | ||
|
|
d9615204bf | ||
|
|
2896bb90a9 | ||
|
|
a036d8a027 | ||
|
|
44a37317d1 | ||
|
|
146398b183 | ||
|
|
69ca52b25e | ||
|
|
111bcc302d | ||
|
|
cf36f81ef1 | ||
|
|
28f38331cc | ||
|
|
394fde30c7 | ||
|
|
62b5c56c56 | ||
|
|
9b401558a5 | ||
|
|
38b76513c7 | ||
|
|
2265ebf801 | ||
|
|
20af81a5dc | ||
|
|
d819f88ae4 | ||
|
|
5880abdd93 | ||
|
|
b676450995 | ||
|
|
e495b80432 | ||
|
|
45eab761b7 | ||
|
|
8d899cc70c | ||
|
|
520b80444a | ||
|
|
17aff5ef9d | ||
|
|
b714d3363f | ||
|
|
0acceaf1a9 | ||
|
|
ca1d472b78 | ||
|
|
c3b4f70913 | ||
|
|
027e45aadf | ||
|
|
f39f07e7bf | ||
|
|
d21d8b2c48 | ||
|
|
d5f5601554 | ||
|
|
00042b1d14 | ||
|
|
fe49eb5aba | ||
|
|
bc913eef6e | ||
|
|
d70ad91b33 | ||
|
|
ba361af2d7 | ||
|
|
78777d2ba6 | ||
|
|
3f8a9715a4 | ||
|
|
f00a3e8b97 | ||
|
|
d5028193c0 | ||
|
|
a484364029 | ||
|
|
d95e518622 | ||
|
|
56297701e6 | ||
|
|
8c7c9911c4 | ||
|
|
4d66d6ab23 | ||
|
|
b73f5e6dd4 | ||
|
|
a52751da1b | ||
|
|
51780031ea | ||
|
|
eb2cafc7fa | ||
|
|
805e6d51cb | ||
|
|
f3feaddbfe | ||
|
|
9057981cf7 | ||
|
|
b30a5b5760 | ||
|
|
910230cbf8 | ||
|
|
1d791bb329 | ||
|
|
9781fe0d7c | ||
|
|
92951f1dcf | ||
|
|
a7d256b38a | ||
|
|
e26266532a | ||
|
|
178bc40706 | ||
|
|
4168a6d45b | ||
|
|
a028905e41 | ||
|
|
f82aa26371 | ||
|
|
188810e54b | ||
|
|
283e38a20d | ||
|
|
5ed7d38bf7 | ||
|
|
4cdbcdaeea | ||
|
|
aa2d203f4e | ||
|
|
ce4abaf862 | ||
|
|
40ce4a57b4 | ||
|
|
b44ffd08be | ||
|
|
cf439248b5 | ||
|
|
e8f9ada1d1 | ||
|
|
bc70797e31 | ||
|
|
f6123b8a7c | ||
|
|
d739094a56 | ||
|
|
91400defe9 | ||
|
|
909d713837 | ||
|
|
ad8dd73596 | ||
|
|
e50c9fa60d | ||
|
|
d4dbfa7ae6 | ||
|
|
59374482bc | ||
|
|
43e187a023 | ||
|
|
bc1c71277c | ||
|
|
520181d241 | ||
|
|
95b9dea5c4 | ||
|
|
151b96a680 | ||
|
|
0661fd995f | ||
|
|
190ae2797d | ||
|
|
968117c41a | ||
|
|
7d7eb52afb | ||
|
|
c73e4abf68 | ||
|
|
cd63d300b3 | ||
|
|
f7daadaaa9 | ||
|
|
af080337f5 | ||
|
|
606f81a970 | ||
|
|
154bce366a | ||
|
|
a669df08bd | ||
|
|
af18fc4142 | ||
|
|
1a00eed389 | ||
|
|
0423518f72 | ||
|
|
9df00fdde0 | ||
|
|
052990bb59 | ||
|
|
eb0d26814f | ||
|
|
c8e6bbe059 | ||
|
|
a9eabe97f7 | ||
|
|
e724607a66 | ||
|
|
07f215b0fd | ||
|
|
38551eac9d | ||
|
|
31f7113b3f | ||
|
|
6052407c82 | ||
|
|
776f2ce90d | ||
|
|
62420cf0c2 | ||
|
|
81f7f8afe1 | ||
|
|
b30dfa39f4 | ||
|
|
20c8e17843 | ||
| a96fe59840 | |||
|
|
7cf849269f | ||
| 76afcaa637 | |||
|
|
b1c5cc0616 | ||
| 8838077eff | |||
|
|
8eeaecf6f3 | ||
| b54bef302d | |||
|
|
f8505401d7 | ||
| a0f1a4aba4 | |||
|
|
aa5fc1b830 | ||
|
|
c42e13932b | ||
|
|
59dd629b0e | ||
|
|
697c689192 | ||
|
|
7a2a0ee649 | ||
|
|
1b991f99a3 | ||
|
|
21991b6cf8 | ||
|
|
53766aeb56 | ||
|
|
bf0e9ea418 | ||
|
|
6e30b7ec65 | ||
|
|
08934376df | ||
|
|
23f901279a | ||
|
|
6171827243 | ||
|
|
c77d8a7af0 | ||
|
|
e7eda7a7b3 |
11
.gitea/sanitize-branch.sh
Normal file
11
.gitea/sanitize-branch.sh
Normal file
@@ -0,0 +1,11 @@
|
||||
#!/bin/sh
|
||||
# Shared branch slug sanitization for CI jobs.
|
||||
# Strips prefix (feature/, fix/, etc.), lowercases, replaces non-alphanum, truncates to 20 chars.
|
||||
sanitize_branch() {
|
||||
echo "$1" | sed -E 's#^(feature|fix|feat|hotfix)/##' \
|
||||
| tr '[:upper:]' '[:lower:]' \
|
||||
| sed 's/[^a-z0-9-]/-/g' \
|
||||
| sed 's/--*/-/g; s/^-//; s/-$//' \
|
||||
| cut -c1-20 \
|
||||
| sed 's/-$//'
|
||||
}
|
||||
@@ -53,6 +53,7 @@ jobs:
|
||||
npm run build
|
||||
env:
|
||||
REGISTRY_TOKEN: ${{ secrets.REGISTRY_TOKEN }}
|
||||
VITE_APP_VERSION: ${{ github.sha }}
|
||||
|
||||
- name: Build and Test
|
||||
run: mvn clean verify -DskipITs -U --batch-mode
|
||||
@@ -78,14 +79,7 @@ jobs:
|
||||
REGISTRY_TOKEN: ${{ secrets.REGISTRY_TOKEN }}
|
||||
- name: Compute branch slug
|
||||
run: |
|
||||
sanitize_branch() {
|
||||
echo "$1" | sed -E 's#^(feature|fix|feat|hotfix)/##' \
|
||||
| tr '[:upper:]' '[:lower:]' \
|
||||
| sed 's/[^a-z0-9-]/-/g' \
|
||||
| sed 's/--*/-/g; s/^-//; s/-$//' \
|
||||
| cut -c1-20 \
|
||||
| sed 's/-$//'
|
||||
}
|
||||
. .gitea/sanitize-branch.sh
|
||||
if [ "$GITHUB_REF_NAME" = "main" ]; then
|
||||
echo "BRANCH_SLUG=main" >> "$GITHUB_ENV"
|
||||
echo "IMAGE_TAGS=latest" >> "$GITHUB_ENV"
|
||||
@@ -118,9 +112,11 @@ jobs:
|
||||
for TAG in $IMAGE_TAGS; do
|
||||
TAGS="$TAGS -t gitea.siegeln.net/cameleer/cameleer3-server-ui:$TAG"
|
||||
done
|
||||
SHORT_SHA=$(echo "${{ github.sha }}" | cut -c1-7)
|
||||
docker buildx build --platform linux/amd64 \
|
||||
-f ui/Dockerfile \
|
||||
--build-arg REGISTRY_TOKEN="$REGISTRY_TOKEN" \
|
||||
--build-arg VITE_APP_VERSION="$SHORT_SHA" \
|
||||
$TAGS \
|
||||
--cache-from type=registry,ref=gitea.siegeln.net/cameleer/cameleer3-server-ui:buildcache \
|
||||
--cache-to type=registry,ref=gitea.siegeln.net/cameleer/cameleer3-server-ui:buildcache,mode=max \
|
||||
@@ -209,27 +205,28 @@ jobs:
|
||||
--from-literal=POSTGRES_DB="${POSTGRES_DB:-cameleer}" \
|
||||
--dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
kubectl create secret generic opensearch-credentials \
|
||||
kubectl create secret generic logto-credentials \
|
||||
--namespace=cameleer \
|
||||
--from-literal=OPENSEARCH_USER="${OPENSEARCH_USER:-admin}" \
|
||||
--from-literal=OPENSEARCH_PASSWORD="$OPENSEARCH_PASSWORD" \
|
||||
--from-literal=PG_USER="${LOGTO_PG_USER:-logto}" \
|
||||
--from-literal=PG_PASSWORD="${LOGTO_PG_PASSWORD}" \
|
||||
--from-literal=ENDPOINT="${LOGTO_ENDPOINT}" \
|
||||
--from-literal=ADMIN_ENDPOINT="${LOGTO_ADMIN_ENDPOINT}" \
|
||||
--dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
kubectl create secret generic authentik-credentials \
|
||||
kubectl create secret generic clickhouse-credentials \
|
||||
--namespace=cameleer \
|
||||
--from-literal=PG_USER="${AUTHENTIK_PG_USER:-authentik}" \
|
||||
--from-literal=PG_PASSWORD="${AUTHENTIK_PG_PASSWORD}" \
|
||||
--from-literal=AUTHENTIK_SECRET_KEY="${AUTHENTIK_SECRET_KEY}" \
|
||||
--from-literal=CLICKHOUSE_USER="${CLICKHOUSE_USER:-default}" \
|
||||
--from-literal=CLICKHOUSE_PASSWORD="$CLICKHOUSE_PASSWORD" \
|
||||
--dry-run=client -o yaml | kubectl apply -f -
|
||||
|
||||
kubectl apply -f deploy/postgres.yaml
|
||||
kubectl -n cameleer rollout status statefulset/postgres --timeout=120s
|
||||
|
||||
kubectl apply -f deploy/opensearch.yaml
|
||||
kubectl -n cameleer rollout status statefulset/opensearch --timeout=180s
|
||||
kubectl apply -f deploy/clickhouse.yaml
|
||||
kubectl -n cameleer rollout status statefulset/clickhouse --timeout=180s
|
||||
|
||||
kubectl apply -f deploy/authentik.yaml
|
||||
kubectl -n cameleer rollout status deployment/authentik-server --timeout=180s
|
||||
kubectl apply -f deploy/logto.yaml
|
||||
kubectl -n cameleer rollout status deployment/logto --timeout=180s
|
||||
|
||||
kubectl apply -k deploy/overlays/main
|
||||
kubectl -n cameleer set image deployment/cameleer3-server \
|
||||
@@ -248,11 +245,12 @@ jobs:
|
||||
POSTGRES_USER: ${{ secrets.POSTGRES_USER }}
|
||||
POSTGRES_PASSWORD: ${{ secrets.POSTGRES_PASSWORD }}
|
||||
POSTGRES_DB: ${{ secrets.POSTGRES_DB }}
|
||||
OPENSEARCH_USER: ${{ secrets.OPENSEARCH_USER }}
|
||||
OPENSEARCH_PASSWORD: ${{ secrets.OPENSEARCH_PASSWORD }}
|
||||
AUTHENTIK_PG_USER: ${{ secrets.AUTHENTIK_PG_USER }}
|
||||
AUTHENTIK_PG_PASSWORD: ${{ secrets.AUTHENTIK_PG_PASSWORD }}
|
||||
AUTHENTIK_SECRET_KEY: ${{ secrets.AUTHENTIK_SECRET_KEY }}
|
||||
LOGTO_PG_USER: ${{ secrets.LOGTO_PG_USER }}
|
||||
LOGTO_PG_PASSWORD: ${{ secrets.LOGTO_PG_PASSWORD }}
|
||||
LOGTO_ENDPOINT: ${{ secrets.LOGTO_ENDPOINT }}
|
||||
LOGTO_ADMIN_ENDPOINT: ${{ secrets.LOGTO_ADMIN_ENDPOINT }}
|
||||
CLICKHOUSE_USER: ${{ secrets.CLICKHOUSE_USER }}
|
||||
CLICKHOUSE_PASSWORD: ${{ secrets.CLICKHOUSE_PASSWORD }}
|
||||
|
||||
deploy-feature:
|
||||
needs: docker
|
||||
@@ -274,14 +272,7 @@ jobs:
|
||||
KUBECONFIG_B64: ${{ secrets.KUBECONFIG_BASE64 }}
|
||||
- name: Compute branch variables
|
||||
run: |
|
||||
sanitize_branch() {
|
||||
echo "$1" | sed -E 's#^(feature|fix|feat|hotfix)/##' \
|
||||
| tr '[:upper:]' '[:lower:]' \
|
||||
| sed 's/[^a-z0-9-]/-/g' \
|
||||
| sed 's/--*/-/g; s/^-//; s/-$//' \
|
||||
| cut -c1-20 \
|
||||
| sed 's/-$//'
|
||||
}
|
||||
. .gitea/sanitize-branch.sh
|
||||
SLUG=$(sanitize_branch "$GITHUB_REF_NAME")
|
||||
NS="cam-${SLUG}"
|
||||
SCHEMA="cam_$(echo $SLUG | tr '-' '_')"
|
||||
@@ -292,7 +283,7 @@ jobs:
|
||||
run: kubectl create namespace "$BRANCH_NS" --dry-run=client -o yaml | kubectl apply -f -
|
||||
- name: Copy secrets from cameleer namespace
|
||||
run: |
|
||||
for SECRET in gitea-registry postgres-credentials opensearch-credentials cameleer-auth; do
|
||||
for SECRET in gitea-registry postgres-credentials clickhouse-credentials cameleer-auth; do
|
||||
kubectl get secret "$SECRET" -n cameleer -o json \
|
||||
| jq 'del(.metadata.namespace, .metadata.resourceVersion, .metadata.uid, .metadata.creationTimestamp, .metadata.managedFields)' \
|
||||
| kubectl apply -n "$BRANCH_NS" -f -
|
||||
@@ -372,15 +363,6 @@ jobs:
|
||||
kubectl wait --for=condition=Ready pod/cleanup-schema-${BRANCH_SLUG} -n cameleer --timeout=30s || true
|
||||
kubectl wait --for=jsonpath='{.status.phase}'=Succeeded pod/cleanup-schema-${BRANCH_SLUG} -n cameleer --timeout=60s || true
|
||||
kubectl delete pod cleanup-schema-${BRANCH_SLUG} -n cameleer --ignore-not-found
|
||||
- name: Delete OpenSearch indices
|
||||
run: |
|
||||
kubectl run cleanup-indices-${BRANCH_SLUG} \
|
||||
--namespace=cameleer \
|
||||
--image=curlimages/curl:latest \
|
||||
--restart=Never \
|
||||
--command -- curl -sf -X DELETE "http://opensearch:9200/cam-${BRANCH_SLUG}-*"
|
||||
kubectl wait --for=jsonpath='{.status.phase}'=Succeeded pod/cleanup-indices-${BRANCH_SLUG} -n cameleer --timeout=60s || true
|
||||
kubectl delete pod cleanup-indices-${BRANCH_SLUG} -n cameleer --ignore-not-found
|
||||
- name: Cleanup Docker images
|
||||
run: |
|
||||
API="https://gitea.siegeln.net/api/v1"
|
||||
|
||||
@@ -42,9 +42,6 @@ jobs:
|
||||
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
|
||||
restore-keys: ${{ runner.os }}-maven-
|
||||
|
||||
- name: Build and Test Java
|
||||
run: mvn clean verify -DskipITs -U --batch-mode
|
||||
|
||||
- name: Install UI dependencies
|
||||
working-directory: ui
|
||||
run: |
|
||||
@@ -57,33 +54,10 @@ jobs:
|
||||
working-directory: ui
|
||||
run: npm run lint -- --format json --output-file eslint-report.json || true
|
||||
|
||||
- name: Install sonar-scanner
|
||||
- name: Build, Test and Analyze
|
||||
run: |
|
||||
SONAR_SCANNER_VERSION=6.2.1.4610
|
||||
ARCH=$(uname -m)
|
||||
case "$ARCH" in
|
||||
aarch64|arm64) PLATFORM="linux-aarch64" ;;
|
||||
*) PLATFORM="linux-x64" ;;
|
||||
esac
|
||||
curl -sSLo sonar-scanner.zip "https://binaries.sonarsource.com/Distribution/sonar-scanner-cli/sonar-scanner-cli-${SONAR_SCANNER_VERSION}-${PLATFORM}.zip"
|
||||
unzip -q sonar-scanner.zip
|
||||
ln -s "$(pwd)/sonar-scanner-${SONAR_SCANNER_VERSION}-${PLATFORM}/bin/sonar-scanner" /usr/local/bin/sonar-scanner
|
||||
|
||||
- name: SonarQube Analysis
|
||||
run: |
|
||||
sonar-scanner \
|
||||
-Dsonar.host.url="$SONAR_HOST_URL" \
|
||||
-Dsonar.token="$SONAR_TOKEN" \
|
||||
mvn clean verify sonar:sonar -DskipITs -U --batch-mode \
|
||||
-Dsonar.host.url=${{ secrets.SONAR_HOST_URL }} \
|
||||
-Dsonar.token=${{ secrets.SONAR_TOKEN }} \
|
||||
-Dsonar.projectKey=cameleer3-server \
|
||||
-Dsonar.projectName="Cameleer3 Server" \
|
||||
-Dsonar.sources=cameleer3-server-core/src/main/java,cameleer3-server-app/src/main/java,ui/src \
|
||||
-Dsonar.tests=cameleer3-server-core/src/test/java,cameleer3-server-app/src/test/java \
|
||||
-Dsonar.java.binaries=cameleer3-server-core/target/classes,cameleer3-server-app/target/classes \
|
||||
-Dsonar.java.test.binaries=cameleer3-server-core/target/test-classes,cameleer3-server-app/target/test-classes \
|
||||
-Dsonar.java.libraries="$HOME/.m2/repository/**/*.jar" \
|
||||
-Dsonar.typescript.eslint.reportPaths=ui/eslint-report.json \
|
||||
-Dsonar.eslint.reportPaths=ui/eslint-report.json \
|
||||
-Dsonar.exclusions="ui/node_modules/**,ui/dist/**,**/target/**"
|
||||
env:
|
||||
SONAR_HOST_URL: ${{ secrets.SONAR_HOST_URL }}
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
|
||||
-Dsonar.projectName="Cameleer3 Server"
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -40,3 +40,4 @@ logs/
|
||||
# Claude
|
||||
.claude/
|
||||
.worktrees/
|
||||
.gitnexus
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 142 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 141 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 6.7 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 92 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 115 KiB |
334
CLAUDE.md
334
CLAUDE.md
@@ -4,7 +4,7 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co
|
||||
|
||||
## Project
|
||||
|
||||
Cameleer3 Server — observability server that receives, stores, and serves Camel route execution data and route diagrams from Cameleer3 agents. Pushes config and commands to agents via SSE.
|
||||
Cameleer3 Server — observability server that receives, stores, and serves Camel route execution data and route diagrams from Cameleer3 agents. Pushes config and commands to agents via SSE. Also orchestrates Docker container deployments when running under cameleer-saas.
|
||||
|
||||
## Related Project
|
||||
|
||||
@@ -14,8 +14,8 @@ Cameleer3 Server — observability server that receives, stores, and serves Came
|
||||
|
||||
## Modules
|
||||
|
||||
- `cameleer3-server-core` — domain logic, storage, agent registry
|
||||
- `cameleer3-server-app` — Spring Boot web app, REST controllers, SSE, static resources
|
||||
- `cameleer3-server-core` — domain logic, storage interfaces, services (no Spring dependencies)
|
||||
- `cameleer3-server-app` — Spring Boot web app, REST controllers, SSE, persistence, Docker orchestration
|
||||
|
||||
## Build Commands
|
||||
|
||||
@@ -30,6 +30,116 @@ mvn clean verify # Full build with tests
|
||||
java -jar cameleer3-server-app/target/cameleer3-server-app-1.0-SNAPSHOT.jar
|
||||
```
|
||||
|
||||
## Key Classes by Package
|
||||
|
||||
### Core Module (`cameleer3-server-core/src/main/java/com/cameleer3/server/core/`)
|
||||
|
||||
**agent/** — Agent lifecycle and commands
|
||||
- `AgentRegistryService` — in-memory registry (ConcurrentHashMap), register/heartbeat/lifecycle
|
||||
- `AgentInfo` — record: id, name, application, environmentId, version, routeIds, capabilities, state
|
||||
- `AgentCommand` — record: id, type, targetAgent, payload, createdAt, expiresAt
|
||||
- `AgentEventService` — records agent state changes, heartbeats
|
||||
|
||||
**runtime/** — App/Environment/Deployment domain
|
||||
- `App` — record: id, environmentId, slug, displayName, containerConfig (JSONB)
|
||||
- `AppVersion` — record: id, appId, version, jarPath
|
||||
- `Environment` — record: id, slug, jarRetentionCount
|
||||
- `Deployment` — record: id, appId, appVersionId, environmentId, status, targetState, deploymentStrategy, replicaStates (JSONB), deployStage, containerId, containerName
|
||||
- `DeploymentStatus` — enum: STOPPED, STARTING, RUNNING, DEGRADED, STOPPING, FAILED
|
||||
- `DeployStage` — enum: PRE_FLIGHT, PULL_IMAGE, CREATE_NETWORK, START_REPLICAS, HEALTH_CHECK, SWAP_TRAFFIC, COMPLETE
|
||||
- `DeploymentService` — createDeployment (deletes terminal deployments first), markRunning, markFailed, markStopped
|
||||
- `ContainerRequest` — record: 17 fields for Docker container creation
|
||||
- `ResolvedContainerConfig` — record: typed config with memoryLimitMb, cpuShares, cpuLimit, appPort, replicas, routingMode, etc.
|
||||
- `ConfigMerger` — pure function: resolve(globalDefaults, envConfig, appConfig) -> ResolvedContainerConfig
|
||||
- `RuntimeOrchestrator` — interface: startContainer, stopContainer, getContainerStatus, getLogs
|
||||
|
||||
**search/** — Execution search
|
||||
- `SearchService` — search, topErrors, punchcard, distinctAttributeKeys
|
||||
- `SearchRequest` / `SearchResult` — search DTOs
|
||||
|
||||
**storage/** — Storage abstractions
|
||||
- `ExecutionStore`, `MetricsStore`, `DiagramStore`, `SearchIndex`, `LogIndex` — interfaces
|
||||
|
||||
**rbac/** — Role-based access control
|
||||
- `RbacService` — getDirectRolesForUser, syncOidcRoles, assignRole
|
||||
- `SystemRole` — enum: AGENT, VIEWER, OPERATOR, ADMIN; `normalizeScope()` maps scopes
|
||||
- `UserDetail`, `RoleDetail`, `GroupDetail` — records
|
||||
|
||||
**security/** — Auth
|
||||
- `JwtService` — interface: createAccessToken, validateAccessToken
|
||||
- `Ed25519SigningService` — interface: sign, verify (config signing)
|
||||
- `OidcConfig` — record: issuerUri, clientId, audience, rolesClaim, additionalScopes
|
||||
|
||||
**ingestion/** — Buffered data pipeline
|
||||
- `IngestionService` — ingestExecution, ingestMetric, ingestLog, ingestDiagram
|
||||
- `ChunkAccumulator` — batches data for efficient flush
|
||||
|
||||
### App Module (`cameleer3-server-app/src/main/java/com/cameleer3/server/app/`)
|
||||
|
||||
**controller/** — REST endpoints
|
||||
- `AgentRegistrationController` — POST /register, POST /heartbeat, GET / (list), POST /refresh-token
|
||||
- `AgentSseController` — GET /sse (Server-Sent Events connection)
|
||||
- `AgentCommandController` — POST /broadcast, POST /{agentId}, POST /{agentId}/ack
|
||||
- `AppController` — CRUD /api/v1/apps, POST /{appId}/upload-jar, GET /{appId}/versions
|
||||
- `DeploymentController` — GET/POST /api/v1/apps/{appId}/deployments, POST /{id}/stop, POST /{id}/promote, GET /{id}/logs
|
||||
- `EnvironmentAdminController` — CRUD /api/v1/admin/environments, PUT /{id}/jar-retention
|
||||
- `ExecutionController` — GET /api/v1/executions (search + detail)
|
||||
- `SearchController` — POST /api/v1/search, GET /routes, GET /top-errors, GET /punchcard
|
||||
- `LogQueryController` — GET /api/v1/logs, GET /tail
|
||||
- `ChunkIngestionController` — POST /api/v1/ingestion/chunk/{executions|metrics|diagrams}
|
||||
- `UserAdminController` — CRUD /api/v1/admin/users, POST /{id}/roles, POST /{id}/set-password
|
||||
- `RoleAdminController` — CRUD /api/v1/admin/roles
|
||||
- `GroupAdminController` — CRUD /api/v1/admin/groups
|
||||
- `OidcConfigAdminController` — GET/POST /api/v1/admin/oidc, POST /test
|
||||
- `AuditLogController` — GET /api/v1/admin/audit
|
||||
- `MetricsController` — GET /api/v1/metrics, GET /timeseries
|
||||
- `DiagramController` — GET /api/v1/diagrams/{id}, POST /
|
||||
- `DiagramRenderController` — POST /api/v1/diagrams/render (ELK layout)
|
||||
- `LicenseAdminController` — GET/POST /api/v1/admin/license
|
||||
|
||||
**runtime/** — Docker orchestration
|
||||
- `DockerRuntimeOrchestrator` — implements RuntimeOrchestrator; Docker Java client (zerodep transport), container lifecycle
|
||||
- `DeploymentExecutor` — @Async staged deploy: PRE_FLIGHT -> PULL_IMAGE -> CREATE_NETWORK -> START_REPLICAS -> HEALTH_CHECK -> SWAP_TRAFFIC -> COMPLETE
|
||||
- `DockerNetworkManager` — ensures bridge networks (cameleer-traefik, cameleer-env-{slug}), connects containers
|
||||
- `DockerEventMonitor` — persistent Docker event stream listener (die, oom, start, stop), updates deployment status
|
||||
- `TraefikLabelBuilder` — generates Traefik Docker labels for path-based or subdomain routing
|
||||
- `DisabledRuntimeOrchestrator` — no-op when runtime not enabled
|
||||
|
||||
**storage/** — PostgreSQL repositories (JdbcTemplate)
|
||||
- `PostgresAppRepository`, `PostgresAppVersionRepository`, `PostgresEnvironmentRepository`
|
||||
- `PostgresDeploymentRepository` — includes JSONB replica_states, deploy_stage, findByContainerId
|
||||
- `PostgresUserRepository`, `PostgresRoleRepository`, `PostgresGroupRepository`
|
||||
- `PostgresAuditRepository`, `PostgresOidcConfigRepository`, `PostgresClaimMappingRepository`
|
||||
|
||||
**storage/** — ClickHouse stores
|
||||
- `ClickHouseExecutionStore`, `ClickHouseMetricsStore`, `ClickHouseLogStore`
|
||||
- `ClickHouseStatsStore` — pre-aggregated stats, punchcard
|
||||
- `ClickHouseDiagramStore`, `ClickHouseAgentEventRepository`
|
||||
- `ClickHouseSearchIndex` — full-text search
|
||||
- `ClickHouseUsageTracker` — usage_events for billing
|
||||
|
||||
**security/** — Spring Security
|
||||
- `SecurityConfig` — WebSecurityFilterChain, JWT filter, CORS, OIDC conditional
|
||||
- `JwtAuthenticationFilter` — OncePerRequestFilter, validates Bearer tokens
|
||||
- `JwtServiceImpl` — HMAC-SHA256 JWT (Nimbus JOSE)
|
||||
- `OidcAuthController` — /api/v1/auth/oidc (login-uri, token-exchange, logout)
|
||||
- `OidcTokenExchanger` — code -> tokens, role extraction from access_token then id_token
|
||||
- `OidcProviderHelper` — OIDC discovery, JWK source cache
|
||||
|
||||
**agent/** — Agent lifecycle
|
||||
- `SseConnectionManager` — manages per-agent SSE connections, delivers commands
|
||||
- `AgentLifecycleMonitor` — @Scheduled 10s, LIVE->STALE->DEAD transitions
|
||||
|
||||
**retention/** — JAR cleanup
|
||||
- `JarRetentionJob` — @Scheduled 03:00 daily, per-environment retention, skips deployed versions
|
||||
|
||||
**config/** — Spring beans
|
||||
- `RuntimeOrchestratorAutoConfig` — conditional Docker/Disabled orchestrator + NetworkManager + EventMonitor
|
||||
- `RuntimeBeanConfig` — DeploymentExecutor, AppService, EnvironmentService
|
||||
- `SecurityBeanConfig` — JwtService, Ed25519, BootstrapTokenValidator
|
||||
- `StorageBeanConfig` — all repositories
|
||||
- `ClickHouseConfig` — ClickHouse JdbcTemplate, schema initializer
|
||||
|
||||
## Key Conventions
|
||||
|
||||
- Java 17+ required
|
||||
@@ -37,30 +147,228 @@ java -jar cameleer3-server-app/target/cameleer3-server-app-1.0-SNAPSHOT.jar
|
||||
- Depends on `com.cameleer3:cameleer3-common` from Gitea Maven registry
|
||||
- Jackson `JavaTimeModule` for `Instant` deserialization
|
||||
- Communication: receives HTTP POST data from agents (executions, diagrams, metrics, logs), serves SSE event streams for config push/commands (config-update, deep-trace, replay, route-control)
|
||||
- Maintains agent instance registry with states: LIVE → STALE → DEAD
|
||||
- Storage: PostgreSQL (TimescaleDB) for structured data, OpenSearch for full-text search and application log storage
|
||||
- Security: JWT auth with RBAC (AGENT/VIEWER/OPERATOR/ADMIN roles), Ed25519 config signing, bootstrap token for registration
|
||||
- OIDC: Optional external identity provider support (token exchange pattern). Configured via admin API, stored in database (`server_config` table)
|
||||
- Environment filtering: all data queries (exchanges, dashboard stats, route metrics, agent events, correlation) filter by the selected environment. All commands (config-update, route-control, set-traced-processors, replay) target only agents in the selected environment when one is selected. `AgentRegistryService.findByApplicationAndEnvironment()` for environment-scoped command dispatch. Backend endpoints accept optional `environment` query parameter; null = all environments (backward compatible).
|
||||
- Maintains agent instance registry (in-memory) with states: LIVE -> STALE -> DEAD. Auto-heals from JWT `env` claim + heartbeat body on heartbeat/SSE after server restart (priority: heartbeat `environmentId` > JWT `env` claim > `"default"`). Capabilities and route states updated on every heartbeat (protocol v2). Route catalog falls back to ClickHouse stats for route discovery when registry has incomplete data.
|
||||
- Multi-tenancy: each server instance serves one tenant (configured via `CAMELEER_TENANT_ID`, default: `"default"`). Environments (dev/staging/prod) are first-class — agents send `environmentId` at registration and in heartbeats. JWT carries `env` claim for environment persistence across token refresh. PostgreSQL isolated via schema-per-tenant (`?currentSchema=tenant_{id}`). ClickHouse shared DB with `tenant_id` + `environment` columns, partitioned by `(tenant_id, toYYYYMM(timestamp))`.
|
||||
- Storage: PostgreSQL for RBAC, config, and audit; ClickHouse for all observability data (executions, search, logs, metrics, stats, diagrams). ClickHouse schema migrations in `clickhouse/*.sql`, run idempotently on startup by `ClickHouseSchemaInitializer`. Use `IF NOT EXISTS` for CREATE and ADD PROJECTION.
|
||||
- Logging: ClickHouse JDBC set to INFO (`com.clickhouse`), HTTP client to WARN (`org.apache.hc.client5`) in application.yml
|
||||
- Security: JWT auth with RBAC (AGENT/VIEWER/OPERATOR/ADMIN roles), Ed25519 config signing (key derived deterministically from JWT secret via HMAC-SHA256), bootstrap token for registration. CORS: `CAMELEER_CORS_ALLOWED_ORIGINS` (comma-separated) overrides `CAMELEER_UI_ORIGIN` for multi-origin setups (e.g., reverse proxy). UI role gating: Admin sidebar/routes hidden for non-ADMIN; diagram toolbar and route control hidden for VIEWER. Read-only for VIEWER, editable for OPERATOR+. Role helpers: `useIsAdmin()`, `useCanControl()` in `auth-store.ts`. Route guard: `RequireAdmin` in `auth/RequireAdmin.tsx`. Last-ADMIN guard: system prevents removal of the last ADMIN role (409 Conflict on role removal, user deletion, group role removal). Password policy: min 12 chars, 3-of-4 character classes, no username match (enforced on user creation and admin password reset). Brute-force protection: 5 failed attempts -> 15 min lockout (tracked via `failed_login_attempts` / `locked_until` on users table). Token revocation: `token_revoked_before` column on users, checked in `JwtAuthenticationFilter`, set on password change.
|
||||
- OIDC: Optional external identity provider support (token exchange pattern). Configured via admin API/UI, stored in database (`server_config` table). Configurable `userIdClaim` (default `sub`) determines which id_token claim is used as the user identifier. Resource server mode: accepts external access tokens (Logto M2M) via JWKS validation when `CAMELEER_OIDC_ISSUER_URI` is set. `CAMELEER_OIDC_JWK_SET_URI` overrides JWKS discovery for container networking. `CAMELEER_OIDC_TLS_SKIP_VERIFY=true` disables TLS cert verification for OIDC calls (self-signed CAs). Scope-based role mapping via `SystemRole.normalizeScope()` (case-insensitive, strips `server:` prefix): `admin`/`server:admin` -> ADMIN, `operator`/`server:operator` -> OPERATOR, `viewer`/`server:viewer` -> VIEWER. SSO: when OIDC enabled, UI auto-redirects to provider with `prompt=none` for silent sign-in; falls back to `/login?local` on `login_required`, retries without `prompt=none` on `consent_required`. Logout always redirects to `/login?local` (via OIDC end_session or direct fallback) to prevent SSO re-login loops. Auto-signup provisions new OIDC users with default roles. System roles synced on every OIDC login via `syncOidcRoles` — always overwrites directly-assigned roles (falls back to `defaultRoles` when OIDC returns none); uses `getDirectRolesForUser` to avoid touching group-inherited roles. Group memberships are never touched. Supports ES384, ES256, RS256. Shared OIDC logic in `OidcProviderHelper` (discovery, JWK source, algorithm set).
|
||||
- OIDC role extraction: `OidcTokenExchanger` reads roles from the **access_token** first (JWT with `at+jwt` type, decoded by a separate processor), then falls back to id_token. `OidcConfig` includes `audience` (RFC 8707 resource indicator — included in both authorization request and token exchange POST body to trigger JWT access tokens) and `additionalScopes` (extra scopes for the SPA to request). The `rolesClaim` config points to the claim name in the token (e.g., `"roles"` for Custom JWT claims, `"realm_access.roles"` for Keycloak). All provider-specific configuration is external — no provider-specific code in the server.
|
||||
- User persistence: PostgreSQL `users` table, admin CRUD at `/api/v1/admin/users`
|
||||
- Usage analytics: ClickHouse `usage_events` table tracks authenticated UI requests, flushed every 5s
|
||||
|
||||
## Database Migrations
|
||||
|
||||
PostgreSQL (Flyway): `cameleer3-server-app/src/main/resources/db/migration/`
|
||||
- V1 — RBAC (users, roles, groups, audit_log)
|
||||
- V2 — Claim mappings (OIDC)
|
||||
- V3 — Runtime management (apps, environments, deployments, app_versions)
|
||||
- V4 — Environment config (default_container_config JSONB)
|
||||
- V5 — App container config (container_config JSONB on apps)
|
||||
- V6 — JAR retention policy (jar_retention_count on environments)
|
||||
- V7 — Deployment orchestration (target_state, deployment_strategy, replica_states JSONB, deploy_stage)
|
||||
- V8 — Deployment active config (resolved_config JSONB on deployments)
|
||||
- V9 — Password hardening (failed_login_attempts, locked_until, token_revoked_before on users)
|
||||
|
||||
ClickHouse: `cameleer3-server-app/src/main/resources/clickhouse/init.sql` (run idempotently on startup)
|
||||
|
||||
## CI/CD & Deployment
|
||||
|
||||
- CI workflow: `.gitea/workflows/ci.yml` — build → docker → deploy on push to main or feature branches
|
||||
- CI workflow: `.gitea/workflows/ci.yml` — build -> docker -> deploy on push to main or feature branches
|
||||
- Build step skips integration tests (`-DskipITs`) — Testcontainers needs Docker daemon
|
||||
- Docker: multi-stage build (`Dockerfile`), `$BUILDPLATFORM` for native Maven on ARM64 runner, amd64 runtime
|
||||
- `REGISTRY_TOKEN` build arg required for `cameleer3-common` dependency resolution
|
||||
- Registry: `gitea.siegeln.net/cameleer/cameleer3-server` (container images)
|
||||
- K8s manifests in `deploy/` — Kustomize base + overlays (main/feature), shared infra (PostgreSQL, OpenSearch, Authentik) as top-level manifests
|
||||
- K8s manifests in `deploy/` — Kustomize base + overlays (main/feature), shared infra (PostgreSQL, ClickHouse, Logto) as top-level manifests
|
||||
- Deployment target: k3s at 192.168.50.86, namespace `cameleer` (main), `cam-<slug>` (feature branches)
|
||||
- Feature branches: isolated namespace, PG schema, OpenSearch index prefix; Traefik Ingress at `<slug>-api.cameleer.siegeln.net`
|
||||
- Secrets managed in CI deploy step (idempotent `--dry-run=client | kubectl apply`): `cameleer-auth`, `postgres-credentials`, `opensearch-credentials`
|
||||
- K8s probes: server uses `/api/v1/health`, PostgreSQL uses `pg_isready`, OpenSearch uses `/_cluster/health`
|
||||
- Feature branches: isolated namespace, PG schema; Traefik Ingress at `<slug>-api.cameleer.siegeln.net`
|
||||
- Secrets managed in CI deploy step (idempotent `--dry-run=client | kubectl apply`): `cameleer-auth`, `postgres-credentials`, `clickhouse-credentials`
|
||||
- K8s probes: server uses `/api/v1/health`, PostgreSQL uses `pg_isready -U "$POSTGRES_USER"` (env var, not hardcoded)
|
||||
- K8s security: server and database pods run with `securityContext.runAsNonRoot`. UI (nginx) runs without securityContext (needs root for entrypoint setup).
|
||||
- Docker: server Dockerfile has no default credentials — all DB config comes from env vars at runtime
|
||||
- Docker build uses buildx registry cache + `--provenance=false` for Gitea compatibility
|
||||
- CI: branch slug sanitization extracted to `.gitea/sanitize-branch.sh`, sourced by docker and deploy-feature jobs
|
||||
|
||||
## UI Structure
|
||||
|
||||
The UI has 4 main tabs: **Exchanges**, **Dashboard**, **Runtime**, **Deployments**.
|
||||
|
||||
- **Exchanges** — route execution search and detail (`ui/src/pages/Exchanges/`)
|
||||
- **Dashboard** — metrics and stats with L1/L2/L3 drill-down (`ui/src/pages/DashboardTab/`)
|
||||
- **Runtime** — live agent status, logs, commands (`ui/src/pages/RuntimeTab/`)
|
||||
- **Deployments** — app management, JAR upload, deployment lifecycle (`ui/src/pages/AppsTab/`)
|
||||
- Config sub-tabs: **Variables | Monitoring | Traces & Taps | Route Recording | Resources**
|
||||
- Create app: full page at `/apps/new` (not a modal)
|
||||
- Deployment progress: `ui/src/components/DeploymentProgress.tsx` (7-stage step indicator)
|
||||
|
||||
### Key UI Files
|
||||
|
||||
- `ui/src/router.tsx` — React Router v6 routes
|
||||
- `ui/src/config.ts` — apiBaseUrl, basePath
|
||||
- `ui/src/auth/auth-store.ts` — Zustand: accessToken, user, roles, login/logout
|
||||
- `ui/src/api/environment-store.ts` — Zustand: selected environment (localStorage)
|
||||
- `ui/src/components/ContentTabs.tsx` — main tab switcher
|
||||
- `ui/src/components/ExecutionDiagram/` — interactive trace view (canvas)
|
||||
- `ui/src/components/ProcessDiagram/` — ELK-rendered route diagram
|
||||
- `ui/src/hooks/useScope.ts` — TabKey type, scope inference
|
||||
|
||||
## UI Styling
|
||||
|
||||
- Always use `@cameleer/design-system` CSS variables for colors (`var(--amber)`, `var(--error)`, `var(--success)`, etc.) — never hardcode hex values. This applies to CSS modules, inline styles, and SVG `fill`/`stroke` attributes. SVG presentation attributes resolve `var()` correctly.
|
||||
- Always use `@cameleer/design-system` CSS variables for colors (`var(--amber)`, `var(--error)`, `var(--success)`, etc.) — never hardcode hex values. This applies to CSS modules, inline styles, and SVG `fill`/`stroke` attributes. SVG presentation attributes resolve `var()` correctly. All colors use CSS variables (no hardcoded hex).
|
||||
- Shared CSS modules in `ui/src/styles/` (table-section, log-panel, rate-colors, refresh-indicator, chart-card, section-card) — import these instead of duplicating patterns.
|
||||
- Shared `PageLoader` component replaces copy-pasted spinner patterns.
|
||||
- Design system components used consistently: `Select`, `Tabs`, `Toggle`, `Button`, `LogViewer`, `Label` — prefer DS components over raw HTML elements.
|
||||
- Environment slugs are auto-computed from display name (read-only in UI).
|
||||
- Brand assets: `@cameleer/design-system/assets/` provides `camel-logo.svg` (currentColor), `cameleer3-{16,32,48,192,512}.png`, and `cameleer3-logo.png`. Copied to `ui/public/` for use as favicon (`favicon-16.png`, `favicon-32.png`) and logo (`camel-logo.svg` — login dialog 36px, sidebar 28x24px).
|
||||
- Sidebar generates `/exchanges/` paths directly (no legacy `/apps/` redirects). basePath is centralized in `ui/src/config.ts`; router.tsx imports it instead of re-reading `<base>` tag.
|
||||
- Global user preferences (environment selection) use Zustand stores with localStorage persistence — never URL search params. URL params are for page-specific state only (e.g. `?text=` search query). Switching environment resets all filters and remounts pages.
|
||||
|
||||
## Docker Orchestration
|
||||
|
||||
When deployed via the cameleer-saas platform, this server orchestrates customer app containers using Docker. Key components:
|
||||
|
||||
- **ConfigMerger** (`core/runtime/ConfigMerger.java`) — pure function: resolve(globalDefaults, envConfig, appConfig) -> ResolvedContainerConfig. Three-layer merge: global (application.yml) -> environment (defaultContainerConfig JSONB) -> app (containerConfig JSONB).
|
||||
- **TraefikLabelBuilder** (`app/runtime/TraefikLabelBuilder.java`) — generates Traefik Docker labels for path-based (`/{envSlug}/{appSlug}/`) or subdomain-based (`{appSlug}-{envSlug}.{domain}`) routing. Supports strip-prefix and SSL offloading toggles.
|
||||
- **DockerNetworkManager** (`app/runtime/DockerNetworkManager.java`) — manages two Docker network tiers:
|
||||
- `cameleer-traefik` — shared network; Traefik, server, and all app containers attach here. Server joined via docker-compose with `cameleer3-server` DNS alias.
|
||||
- `cameleer-env-{slug}` — per-environment isolated network; containers in the same environment discover each other via Docker DNS.
|
||||
- **DockerEventMonitor** (`app/runtime/DockerEventMonitor.java`) — persistent Docker event stream listener for containers with `managed-by=cameleer3-server` label. Detects die/oom/start/stop events and updates deployment replica states. Periodic reconciliation (@Scheduled every 30s) inspects actual container state and corrects deployment status mismatches (fixes stale DEGRADED with all replicas healthy).
|
||||
- **DeploymentProgress** (`ui/src/components/DeploymentProgress.tsx`) — UI step indicator showing 7 deploy stages with amber active/green completed styling.
|
||||
|
||||
### Deployment Status Model
|
||||
|
||||
Deployments move through these statuses:
|
||||
|
||||
| Status | Meaning |
|
||||
|--------|---------|
|
||||
| `STOPPED` | Intentionally stopped or initial state |
|
||||
| `STARTING` | Deploy in progress |
|
||||
| `RUNNING` | All replicas healthy and serving |
|
||||
| `DEGRADED` | Some replicas healthy, some dead |
|
||||
| `STOPPING` | Graceful shutdown in progress |
|
||||
| `FAILED` | Terminal failure (pre-flight, health check, or crash) |
|
||||
|
||||
**Replica support**: deployments can specify a replica count. `DEGRADED` is used when at least one but not all replicas are healthy.
|
||||
|
||||
**Deploy stages** (`DeployStage`): PRE_FLIGHT -> PULL_IMAGE -> CREATE_NETWORK -> START_REPLICAS -> HEALTH_CHECK -> SWAP_TRAFFIC -> COMPLETE (or FAILED at any stage).
|
||||
|
||||
**Blue/green strategy**: when re-deploying, new replicas are started and health-checked before old ones are stopped, minimising downtime.
|
||||
|
||||
**Deployment uniqueness**: `DeploymentService.createDeployment()` deletes any STOPPED/FAILED deployments for the same app+environment before creating a new one, preventing duplicate rows.
|
||||
|
||||
### JAR Management
|
||||
|
||||
- **Retention policy** per environment: configurable maximum number of JAR versions to keep. Older JARs are deleted automatically.
|
||||
- **Nightly cleanup job** (`JarRetentionJob`, Spring `@Scheduled` 03:00): purges JARs exceeding the retention limit and removes orphaned files not referenced by any app version. Skips versions currently deployed.
|
||||
- **Volume-based JAR mounting** for Docker-in-Docker setups: set `CAMELEER_JAR_DOCKER_VOLUME` to the Docker volume name that contains the JAR storage directory. When set, the orchestrator mounts this volume into the container instead of bind-mounting the host path (required when the SaaS container itself runs inside Docker and the host path is not accessible from sibling containers).
|
||||
|
||||
### nginx / Reverse Proxy
|
||||
|
||||
- `client_max_body_size 200m` is required in the nginx config to allow JAR uploads up to 200 MB. Without this, large JAR uploads return 413.
|
||||
|
||||
## Disabled Skills
|
||||
|
||||
- Do NOT use any `gsd:*` skills in this project. This includes all `/gsd:` prefixed commands.
|
||||
|
||||
<!-- gitnexus:start -->
|
||||
# GitNexus — Code Intelligence
|
||||
|
||||
This project is indexed by GitNexus as **cameleer3-server** (5509 symbols, 13919 relationships, 300 execution flows). Use the GitNexus MCP tools to understand code, assess impact, and navigate safely.
|
||||
|
||||
> If any GitNexus tool warns the index is stale, run `npx gitnexus analyze` in terminal first.
|
||||
|
||||
## Always Do
|
||||
|
||||
- **MUST run impact analysis before editing any symbol.** Before modifying a function, class, or method, run `gitnexus_impact({target: "symbolName", direction: "upstream"})` and report the blast radius (direct callers, affected processes, risk level) to the user.
|
||||
- **MUST run `gitnexus_detect_changes()` before committing** to verify your changes only affect expected symbols and execution flows.
|
||||
- **MUST warn the user** if impact analysis returns HIGH or CRITICAL risk before proceeding with edits.
|
||||
- When exploring unfamiliar code, use `gitnexus_query({query: "concept"})` to find execution flows instead of grepping. It returns process-grouped results ranked by relevance.
|
||||
- When you need full context on a specific symbol — callers, callees, which execution flows it participates in — use `gitnexus_context({name: "symbolName"})`.
|
||||
|
||||
## When Debugging
|
||||
|
||||
1. `gitnexus_query({query: "<error or symptom>"})` — find execution flows related to the issue
|
||||
2. `gitnexus_context({name: "<suspect function>"})` — see all callers, callees, and process participation
|
||||
3. `READ gitnexus://repo/cameleer3-server/process/{processName}` — trace the full execution flow step by step
|
||||
4. For regressions: `gitnexus_detect_changes({scope: "compare", base_ref: "main"})` — see what your branch changed
|
||||
|
||||
## When Refactoring
|
||||
|
||||
- **Renaming**: MUST use `gitnexus_rename({symbol_name: "old", new_name: "new", dry_run: true})` first. Review the preview — graph edits are safe, text_search edits need manual review. Then run with `dry_run: false`.
|
||||
- **Extracting/Splitting**: MUST run `gitnexus_context({name: "target"})` to see all incoming/outgoing refs, then `gitnexus_impact({target: "target", direction: "upstream"})` to find all external callers before moving code.
|
||||
- After any refactor: run `gitnexus_detect_changes({scope: "all"})` to verify only expected files changed.
|
||||
|
||||
## Never Do
|
||||
|
||||
- NEVER edit a function, class, or method without first running `gitnexus_impact` on it.
|
||||
- NEVER ignore HIGH or CRITICAL risk warnings from impact analysis.
|
||||
- NEVER rename symbols with find-and-replace — use `gitnexus_rename` which understands the call graph.
|
||||
- NEVER commit changes without running `gitnexus_detect_changes()` to check affected scope.
|
||||
|
||||
## Tools Quick Reference
|
||||
|
||||
| Tool | When to use | Command |
|
||||
|------|-------------|---------|
|
||||
| `query` | Find code by concept | `gitnexus_query({query: "auth validation"})` |
|
||||
| `context` | 360-degree view of one symbol | `gitnexus_context({name: "validateUser"})` |
|
||||
| `impact` | Blast radius before editing | `gitnexus_impact({target: "X", direction: "upstream"})` |
|
||||
| `detect_changes` | Pre-commit scope check | `gitnexus_detect_changes({scope: "staged"})` |
|
||||
| `rename` | Safe multi-file rename | `gitnexus_rename({symbol_name: "old", new_name: "new", dry_run: true})` |
|
||||
| `cypher` | Custom graph queries | `gitnexus_cypher({query: "MATCH ..."})` |
|
||||
|
||||
## Impact Risk Levels
|
||||
|
||||
| Depth | Meaning | Action |
|
||||
|-------|---------|--------|
|
||||
| d=1 | WILL BREAK — direct callers/importers | MUST update these |
|
||||
| d=2 | LIKELY AFFECTED — indirect deps | Should test |
|
||||
| d=3 | MAY NEED TESTING — transitive | Test if critical path |
|
||||
|
||||
## Resources
|
||||
|
||||
| Resource | Use for |
|
||||
|----------|---------|
|
||||
| `gitnexus://repo/cameleer3-server/context` | Codebase overview, check index freshness |
|
||||
| `gitnexus://repo/cameleer3-server/clusters` | All functional areas |
|
||||
| `gitnexus://repo/cameleer3-server/processes` | All execution flows |
|
||||
| `gitnexus://repo/cameleer3-server/process/{name}` | Step-by-step execution trace |
|
||||
|
||||
## Self-Check Before Finishing
|
||||
|
||||
Before completing any code modification task, verify:
|
||||
1. `gitnexus_impact` was run for all modified symbols
|
||||
2. No HIGH/CRITICAL risk warnings were ignored
|
||||
3. `gitnexus_detect_changes()` confirms changes match expected scope
|
||||
4. All d=1 (WILL BREAK) dependents were updated
|
||||
|
||||
## Keeping the Index Fresh
|
||||
|
||||
After committing code changes, the GitNexus index becomes stale. Re-run analyze to update it:
|
||||
|
||||
```bash
|
||||
npx gitnexus analyze
|
||||
```
|
||||
|
||||
If the index previously included embeddings, preserve them by adding `--embeddings`:
|
||||
|
||||
```bash
|
||||
npx gitnexus analyze --embeddings
|
||||
```
|
||||
|
||||
To check whether embeddings exist, inspect `.gitnexus/meta.json` — the `stats.embeddings` field shows the count (0 means no embeddings). **Running analyze without `--embeddings` will delete any previously generated embeddings.**
|
||||
|
||||
> Claude Code users: A PostToolUse hook handles this automatically after `git commit` and `git merge`.
|
||||
|
||||
## CLI
|
||||
|
||||
| Task | Read this skill file |
|
||||
|------|---------------------|
|
||||
| Understand architecture / "How does X work?" | `.claude/skills/gitnexus/gitnexus-exploring/SKILL.md` |
|
||||
| Blast radius / "What breaks if I change X?" | `.claude/skills/gitnexus/gitnexus-impact-analysis/SKILL.md` |
|
||||
| Trace bugs / "Why is X failing?" | `.claude/skills/gitnexus/gitnexus-debugging/SKILL.md` |
|
||||
| Rename / extract / split / refactor | `.claude/skills/gitnexus/gitnexus-refactoring/SKILL.md` |
|
||||
| Tools, resources, schema reference | `.claude/skills/gitnexus/gitnexus-guide/SKILL.md` |
|
||||
| Index, status, clean, wiki CLI commands | `.claude/skills/gitnexus/gitnexus-cli/SKILL.md` |
|
||||
|
||||
<!-- gitnexus:end -->
|
||||
|
||||
@@ -18,10 +18,6 @@ FROM eclipse-temurin:17-jre
|
||||
WORKDIR /app
|
||||
COPY --from=build /build/cameleer3-server-app/target/cameleer3-server-app-*.jar /app/server.jar
|
||||
|
||||
ENV SPRING_DATASOURCE_URL=jdbc:postgresql://postgres:5432/cameleer3
|
||||
ENV SPRING_DATASOURCE_USERNAME=cameleer
|
||||
ENV SPRING_DATASOURCE_PASSWORD=cameleer_dev
|
||||
ENV OPENSEARCH_URL=http://opensearch:9200
|
||||
|
||||
EXPOSE 8081
|
||||
ENTRYPOINT exec java -jar /app/server.jar
|
||||
ENV TZ=UTC
|
||||
ENTRYPOINT exec java -Duser.timezone=UTC -jar /app/server.jar
|
||||
|
||||
106
HOWTO.md
106
HOWTO.md
@@ -21,18 +21,17 @@ mvn clean verify # compile + run all tests (needs Docker for integrati
|
||||
|
||||
## Infrastructure Setup
|
||||
|
||||
Start PostgreSQL and OpenSearch:
|
||||
Start PostgreSQL:
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
This starts TimescaleDB (PostgreSQL 16) and OpenSearch 2.19. The database schema is applied automatically via Flyway migrations on server startup.
|
||||
This starts PostgreSQL 16. The database schema is applied automatically via Flyway migrations on server startup. ClickHouse tables are created by the schema initializer on startup.
|
||||
|
||||
| Service | Port | Purpose |
|
||||
|------------|------|----------------------|
|
||||
| PostgreSQL | 5432 | JDBC (Spring JDBC) |
|
||||
| OpenSearch | 9200 | REST API (full-text) |
|
||||
|
||||
PostgreSQL credentials: `cameleer` / `cameleer_dev`, database `cameleer3`.
|
||||
|
||||
@@ -40,9 +39,15 @@ PostgreSQL credentials: `cameleer` / `cameleer_dev`, database `cameleer3`.
|
||||
|
||||
```bash
|
||||
mvn clean package -DskipTests
|
||||
CAMELEER_AUTH_TOKEN=my-secret-token java -jar cameleer3-server-app/target/cameleer3-server-app-1.0-SNAPSHOT.jar
|
||||
SPRING_DATASOURCE_URL=jdbc:postgresql://localhost:5432/cameleer3 \
|
||||
SPRING_DATASOURCE_USERNAME=cameleer \
|
||||
SPRING_DATASOURCE_PASSWORD=cameleer_dev \
|
||||
CAMELEER_AUTH_TOKEN=my-secret-token \
|
||||
java -jar cameleer3-server-app/target/cameleer3-server-app-1.0-SNAPSHOT.jar
|
||||
```
|
||||
|
||||
> **Note:** The Docker image no longer includes default database credentials. When running via `docker run`, pass `-e SPRING_DATASOURCE_URL=...` etc. The docker-compose setup provides these automatically.
|
||||
|
||||
The server starts on **port 8081**. The `CAMELEER_AUTH_TOKEN` environment variable is **required** — the server fails fast on startup if it is not set.
|
||||
|
||||
For token rotation without downtime, set `CAMELEER_AUTH_TOKEN_PREVIOUS` to the old token while rolling out the new one. The server accepts both during the overlap window.
|
||||
@@ -101,12 +106,14 @@ JWTs carry a `roles` claim. Endpoints are restricted by role:
|
||||
| Role | Access |
|
||||
|------|--------|
|
||||
| `AGENT` | Data ingestion (`/data/**` — executions, diagrams, metrics, logs), heartbeat, SSE events, command ack |
|
||||
| `VIEWER` | Search, execution detail, diagrams, agent list |
|
||||
| `OPERATOR` | VIEWER + send commands to agents |
|
||||
| `ADMIN` | OPERATOR + user management (`/admin/**`) |
|
||||
| `VIEWER` | Search, execution detail, diagrams, agent list, app config (read-only) |
|
||||
| `OPERATOR` | VIEWER + send commands to agents, route control, replay, edit app config |
|
||||
| `ADMIN` | OPERATOR + user management, audit log, OIDC config, database admin (`/admin/**`) |
|
||||
|
||||
The env-var local user gets `ADMIN` role. Agents get `AGENT` role at registration.
|
||||
|
||||
**UI role gating:** The sidebar hides the Admin section for non-ADMIN users. Admin routes (`/admin/*`) redirect to `/` for non-admin. The diagram node toolbar and route control bar are hidden for VIEWER. Config is a main tab (`/config` shows all apps, `/config/:appId` filters to one app with detail panel; sidebar clicks stay on config tab, route clicks resolve to parent app). VIEWER sees read-only, OPERATOR+ can edit.
|
||||
|
||||
### OIDC Login (Optional)
|
||||
|
||||
OIDC configuration is stored in PostgreSQL and managed via the admin API or UI. The SPA checks if OIDC is available:
|
||||
@@ -139,7 +146,7 @@ curl -s -X PUT http://localhost:8081/api/v1/admin/oidc \
|
||||
-H "Authorization: Bearer $TOKEN" \
|
||||
-d '{
|
||||
"enabled": true,
|
||||
"issuerUri": "http://authentik:9000/application/o/cameleer/",
|
||||
"issuerUri": "http://logto:3001/oidc",
|
||||
"clientId": "your-client-id",
|
||||
"clientSecret": "your-client-secret",
|
||||
"rolesClaim": "realm_access.roles",
|
||||
@@ -157,28 +164,48 @@ curl -s -X DELETE http://localhost:8081/api/v1/admin/oidc \
|
||||
|
||||
**Initial provisioning**: OIDC can also be seeded from `CAMELEER_OIDC_*` env vars on first startup (when DB is empty). After that, the admin API takes over.
|
||||
|
||||
### Authentik Setup (OIDC Provider)
|
||||
### Logto Setup (OIDC Provider)
|
||||
|
||||
Authentik is deployed alongside the Cameleer stack. After first deployment:
|
||||
Logto is deployed alongside the Cameleer stack. After first deployment:
|
||||
|
||||
1. **Initial setup**: Open `http://192.168.50.86:30950/if/flow/initial-setup/` and create the admin account
|
||||
2. **Create provider**: Admin Interface → Providers → Create → OAuth2/OpenID Provider
|
||||
- Name: `Cameleer`
|
||||
- Authorization flow: `default-provider-authorization-explicit-consent`
|
||||
- Client type: `Confidential`
|
||||
- Redirect URIs: `http://192.168.50.86:30090/callback` (or your UI URL)
|
||||
Logto is proxy-aware via `TRUST_PROXY_HEADER=1`. The `LOGTO_ENDPOINT` and `LOGTO_ADMIN_ENDPOINT` secrets define the public-facing URLs that Logto uses for OIDC discovery, issuer URI, and redirect URLs. When behind a reverse proxy (e.g., Traefik), set these to the external URLs (e.g., `https://auth.cameleer.my.domain`). Logto needs its own subdomain — it cannot be path-prefixed under another app.
|
||||
|
||||
1. **Initial setup**: Open the Logto admin console (the `LOGTO_ADMIN_ENDPOINT` URL) and create the admin account
|
||||
2. **Create SPA application**: Applications → Create → Single Page App
|
||||
- Name: `Cameleer UI`
|
||||
- Redirect URI: your UI URL + `/oidc/callback`
|
||||
- Note the **Client ID**
|
||||
3. **Create API Resource**: API Resources → Create
|
||||
- Name: `Cameleer Server API`
|
||||
- Indicator: your API URL (e.g., `https://cameleer.siegeln.net/api`)
|
||||
- Add permissions: `server:admin`, `server:operator`, `server:viewer`
|
||||
4. **Create M2M application** (for SaaS platform): Applications → Create → Machine-to-Machine
|
||||
- Name: `Cameleer SaaS`
|
||||
- Assign the API Resource created above with `server:admin` scope
|
||||
- Note the **Client ID** and **Client Secret**
|
||||
3. **Create application**: Admin Interface → Applications → Create
|
||||
- Name: `Cameleer`
|
||||
- Provider: select `Cameleer` (created above)
|
||||
4. **Configure roles** (optional): Create groups in Authentik and map them to Cameleer roles via the `roles-claim` config. Default claim path is `realm_access.roles`. For Authentik, you may need to customize the OIDC scope to include group claims.
|
||||
5. **Configure Cameleer**: Use the admin API (`PUT /api/v1/admin/oidc`) or set env vars for initial seeding:
|
||||
5. **Configure Cameleer OIDC login**: Use the admin API (`PUT /api/v1/admin/oidc`) or set env vars for initial seeding:
|
||||
```
|
||||
CAMELEER_OIDC_ENABLED=true
|
||||
CAMELEER_OIDC_ISSUER=http://authentik:9000/application/o/cameleer/
|
||||
CAMELEER_OIDC_ISSUER=<LOGTO_ENDPOINT>/oidc
|
||||
CAMELEER_OIDC_CLIENT_ID=<client-id-from-step-2>
|
||||
CAMELEER_OIDC_CLIENT_SECRET=<client-secret-from-step-2>
|
||||
CAMELEER_OIDC_CLIENT_SECRET=<not-needed-for-public-spa>
|
||||
```
|
||||
6. **Configure resource server** (for M2M token validation):
|
||||
```
|
||||
CAMELEER_OIDC_ISSUER_URI=<LOGTO_ENDPOINT>/oidc
|
||||
CAMELEER_OIDC_JWK_SET_URI=http://logto:3001/oidc/jwks
|
||||
CAMELEER_OIDC_AUDIENCE=<api-resource-indicator-from-step-3>
|
||||
CAMELEER_OIDC_TLS_SKIP_VERIFY=true # optional — skip cert verification for self-signed CAs
|
||||
```
|
||||
`JWK_SET_URI` is needed when the public issuer URL isn't reachable from inside containers — it fetches JWKS directly from the internal Logto service. `TLS_SKIP_VERIFY` disables certificate verification for all OIDC HTTP calls (discovery, token exchange, JWKS); use only when the provider has a self-signed CA.
|
||||
|
||||
### SSO Behavior
|
||||
|
||||
When OIDC is configured and enabled, the UI automatically redirects to the OIDC provider for silent SSO (`prompt=none`). Users with an active provider session are signed in without seeing a login form. On first login, the provider may show a consent screen (scopes), after which subsequent logins are seamless. If auto-signup is enabled, new users are automatically provisioned with the configured default roles.
|
||||
|
||||
- **Bypass SSO**: Navigate to `/login?local` to see the local login form
|
||||
- **Subpath deployments**: The OIDC redirect_uri respects `BASE_PATH` (e.g., `https://host/server/oidc/callback`)
|
||||
- **Role sync**: System roles (ADMIN/OPERATOR/VIEWER) are synced from OIDC scopes on every login — revoking a scope in the provider takes effect on next login. Manually assigned group memberships are preserved.
|
||||
|
||||
### User Management (ADMIN only)
|
||||
|
||||
@@ -344,10 +371,14 @@ curl -s -X POST http://localhost:8081/api/v1/agents/agent-1/commands/{commandId}
|
||||
|
||||
**Agent lifecycle:** LIVE (heartbeat within 90s) → STALE (missed 3 heartbeats) → DEAD (5min after STALE). DEAD agents kept indefinitely.
|
||||
|
||||
**Server restart resilience:** The agent registry is in-memory and lost on server restart. Agents auto-re-register on their next heartbeat or SSE connection — the server reconstructs registry entries from JWT claims (subject, application). Route catalog uses ClickHouse execution data as fallback until agents re-register with full route IDs. Agents should also handle 404 on heartbeat by triggering a full re-registration.
|
||||
|
||||
**SSE events:** `config-update`, `deep-trace`, `replay`, `route-control` commands pushed in real time. Server sends ping keepalive every 15s.
|
||||
|
||||
**Command expiry:** Unacknowledged commands expire after 60 seconds.
|
||||
|
||||
**Route control responses:** Route control commands return `CommandGroupResponse` with per-agent status, response count, and timed-out agent IDs.
|
||||
|
||||
### Backpressure
|
||||
|
||||
When the write buffer is full (default capacity: 50,000), ingestion endpoints return **503 Service Unavailable**. Already-buffered data is not lost.
|
||||
@@ -374,6 +405,7 @@ Key settings in `cameleer3-server-app/src/main/resources/application.yml`:
|
||||
| `security.ui-user` | `admin` | UI login username (`CAMELEER_UI_USER` env var) |
|
||||
| `security.ui-password` | `admin` | UI login password (`CAMELEER_UI_PASSWORD` env var) |
|
||||
| `security.ui-origin` | `http://localhost:5173` | CORS allowed origin for UI (`CAMELEER_UI_ORIGIN` env var) |
|
||||
| `security.cors-allowed-origins` | *(empty)* | Comma-separated CORS origins (`CAMELEER_CORS_ALLOWED_ORIGINS`) — overrides `ui-origin` when set |
|
||||
| `security.jwt-secret` | *(random)* | HMAC secret for JWT signing (`CAMELEER_JWT_SECRET`). If set, tokens survive restarts |
|
||||
| `security.oidc.enabled` | `false` | Enable OIDC login (`CAMELEER_OIDC_ENABLED`) |
|
||||
| `security.oidc.issuer-uri` | | OIDC provider issuer URL (`CAMELEER_OIDC_ISSUER`) |
|
||||
@@ -381,8 +413,8 @@ Key settings in `cameleer3-server-app/src/main/resources/application.yml`:
|
||||
| `security.oidc.client-secret` | | OAuth2 client secret (`CAMELEER_OIDC_CLIENT_SECRET`) |
|
||||
| `security.oidc.roles-claim` | `realm_access.roles` | JSONPath to roles in OIDC id_token (`CAMELEER_OIDC_ROLES_CLAIM`) |
|
||||
| `security.oidc.default-roles` | `VIEWER` | Default roles for new OIDC users (`CAMELEER_OIDC_DEFAULT_ROLES`) |
|
||||
| `opensearch.log-index-prefix` | `logs-` | OpenSearch index prefix for application logs (`CAMELEER_LOG_INDEX_PREFIX`) |
|
||||
| `opensearch.log-retention-days` | `7` | Days before log indices are deleted (`CAMELEER_LOG_RETENTION_DAYS`) |
|
||||
| `cameleer.indexer.debounce-ms` | `2000` | Search indexer debounce delay (`CAMELEER_INDEXER_DEBOUNCE_MS`) |
|
||||
| `cameleer.indexer.queue-size` | `10000` | Search indexer queue capacity (`CAMELEER_INDEXER_QUEUE_SIZE`) |
|
||||
|
||||
## Web UI Development
|
||||
|
||||
@@ -407,7 +439,7 @@ npm run generate-api # Requires backend running on :8081
|
||||
|
||||
## Running Tests
|
||||
|
||||
Integration tests use Testcontainers (starts PostgreSQL and OpenSearch automatically — requires Docker):
|
||||
Integration tests use Testcontainers (starts PostgreSQL automatically — requires Docker):
|
||||
|
||||
```bash
|
||||
# All tests
|
||||
@@ -438,13 +470,15 @@ The full stack is deployed to k3s via CI/CD on push to `main`. K8s manifests are
|
||||
```
|
||||
cameleer namespace:
|
||||
PostgreSQL (StatefulSet, 10Gi PVC) ← postgres:5432 (ClusterIP)
|
||||
OpenSearch (StatefulSet, 10Gi PVC) ← opensearch:9200 (ClusterIP)
|
||||
ClickHouse (StatefulSet, 10Gi PVC) ← clickhouse:8123 (ClusterIP)
|
||||
cameleer3-server (Deployment) ← NodePort 30081
|
||||
cameleer3-ui (Deployment, Nginx) ← NodePort 30090
|
||||
Authentik Server (Deployment) ← NodePort 30950
|
||||
Authentik Worker (Deployment)
|
||||
Authentik PostgreSQL (StatefulSet, 1Gi) ← ClusterIP
|
||||
Authentik Redis (Deployment) ← ClusterIP
|
||||
cameleer-deploy-demo (Deployment) ← NodePort 30092
|
||||
Logto Server (Deployment) ← NodePort 30951/30952
|
||||
Logto PostgreSQL (StatefulSet, 1Gi) ← ClusterIP
|
||||
|
||||
cameleer-demo namespace:
|
||||
(deployed Camel applications — managed by cameleer-deploy-demo)
|
||||
```
|
||||
|
||||
### Access (from your network)
|
||||
@@ -454,13 +488,15 @@ cameleer namespace:
|
||||
| Web UI | `http://192.168.50.86:30090` |
|
||||
| Server API | `http://192.168.50.86:30081/api/v1/health` |
|
||||
| Swagger UI | `http://192.168.50.86:30081/api/v1/swagger-ui.html` |
|
||||
| Authentik | `http://192.168.50.86:30950` |
|
||||
| Deploy Demo | `http://192.168.50.86:30092` |
|
||||
| Logto API | `LOGTO_ENDPOINT` secret (NodePort 30951 direct, or behind reverse proxy) |
|
||||
| Logto Admin | `LOGTO_ADMIN_ENDPOINT` secret (NodePort 30952 direct, or behind reverse proxy) |
|
||||
|
||||
### CI/CD Pipeline
|
||||
|
||||
Push to `main` triggers: **build** (UI npm + Maven, unit tests) → **docker** (buildx amd64 for server + UI, push to Gitea registry) → **deploy** (kubectl apply + rolling update).
|
||||
|
||||
Required Gitea org secrets: `REGISTRY_TOKEN`, `KUBECONFIG_BASE64`, `CAMELEER_AUTH_TOKEN`, `CAMELEER_JWT_SECRET`, `POSTGRES_USER`, `POSTGRES_PASSWORD`, `POSTGRES_DB`, `OPENSEARCH_USER`, `OPENSEARCH_PASSWORD`, `CAMELEER_UI_USER` (optional), `CAMELEER_UI_PASSWORD` (optional), `AUTHENTIK_PG_USER`, `AUTHENTIK_PG_PASSWORD`, `AUTHENTIK_SECRET_KEY`, `CAMELEER_OIDC_ENABLED`, `CAMELEER_OIDC_ISSUER`, `CAMELEER_OIDC_CLIENT_ID`, `CAMELEER_OIDC_CLIENT_SECRET`.
|
||||
Required Gitea org secrets: `REGISTRY_TOKEN`, `KUBECONFIG_BASE64`, `CAMELEER_AUTH_TOKEN`, `CAMELEER_JWT_SECRET`, `POSTGRES_USER`, `POSTGRES_PASSWORD`, `POSTGRES_DB`, `CLICKHOUSE_USER`, `CLICKHOUSE_PASSWORD`, `CAMELEER_UI_USER` (optional), `CAMELEER_UI_PASSWORD` (optional), `LOGTO_PG_USER`, `LOGTO_PG_PASSWORD`, `LOGTO_ENDPOINT` (public-facing Logto URL, e.g., `https://auth.cameleer.my.domain`), `LOGTO_ADMIN_ENDPOINT` (admin console URL), `CAMELEER_OIDC_ISSUER_URI` (optional, for resource server M2M token validation), `CAMELEER_OIDC_AUDIENCE` (optional, API resource indicator), `CAMELEER_OIDC_TLS_SKIP_VERIFY` (optional, skip TLS cert verification for self-signed CAs).
|
||||
|
||||
### Manual K8s Commands
|
||||
|
||||
@@ -474,8 +510,8 @@ kubectl -n cameleer logs -f deploy/cameleer3-server
|
||||
# View PostgreSQL logs
|
||||
kubectl -n cameleer logs -f statefulset/postgres
|
||||
|
||||
# View OpenSearch logs
|
||||
kubectl -n cameleer logs -f statefulset/opensearch
|
||||
# View ClickHouse logs
|
||||
kubectl -n cameleer logs -f statefulset/clickhouse
|
||||
|
||||
# Restart server
|
||||
kubectl -n cameleer rollout restart deployment/cameleer3-server
|
||||
|
||||
259
UI-CONSISTENCY-AUDIT.md
Normal file
259
UI-CONSISTENCY-AUDIT.md
Normal file
@@ -0,0 +1,259 @@
|
||||
> **Status: RESOLVED** — All phases (1-5) executed on 2026-04-09. Remaining: responsive design (separate initiative).
|
||||
|
||||
# UI Consistency Audit — cameleer3-server
|
||||
|
||||
**Date:** 2026-04-09
|
||||
**Scope:** All files under `ui/src/` (26 CSS modules, ~45 TSX components, ~15 pages)
|
||||
**Verdict:** ~55% design system adoption for interactive UI. Significant duplication and inline style debt.
|
||||
|
||||
---
|
||||
|
||||
## Executive Summary
|
||||
|
||||
| Dimension | Score | Key Issue |
|
||||
|-----------|-------|-----------|
|
||||
| Design system component adoption | 55% | 32 raw `<button>`, 12 raw `<select>`, 8 raw `<input>` should use DS |
|
||||
| Color consistency | Poor | ~140 violations: 45 hardcoded hex in TSX, 13 naked hex in CSS, ~55 fallback hex in `var()` |
|
||||
| Inline styles | Poor | 55 RED (static inline styles), 8 YELLOW, 14 GREEN (justified) |
|
||||
| Layout consistency | Mixed | 3 different page padding values, mixed gap/margin approaches |
|
||||
| CSS module duplication | 22% | ~135 of 618 classes are copy-pasted across files |
|
||||
| Responsive design | None | Zero `@media` queries in entire UI |
|
||||
|
||||
---
|
||||
|
||||
## 1. Critical: Hardcoded Colors (CLAUDE.md violation)
|
||||
|
||||
The project rule states: *"Always use `@cameleer/design-system` CSS variables for colors — never hardcode hex values."*
|
||||
|
||||
### Worst offenders
|
||||
|
||||
| File | Violations | Severity |
|
||||
|------|-----------|----------|
|
||||
| `ProcessDiagram/DiagramNode.tsx` | ~20 hex values in SVG fill/stroke | Critical |
|
||||
| `ExecutionDiagram/ExecutionDiagram.module.css` | 17 naked hex + ~40 hex fallbacks in `var()` | Critical |
|
||||
| `ProcessDiagram/CompoundNode.tsx` | 8 hex values | Critical |
|
||||
| `ProcessDiagram/DiagramEdge.tsx` | 3 hex values | High |
|
||||
| `ProcessDiagram/ConfigBadge.tsx` | 3 hex values | High |
|
||||
| `ProcessDiagram/ErrorSection.tsx` | 2 hex values | High |
|
||||
| `ProcessDiagram/NodeToolbar.tsx` | 2 hex values | High |
|
||||
| `ProcessDiagram/Minimap.tsx` | 3 hex values | High |
|
||||
| `Dashboard/Dashboard.module.css` | `#5db866` (not even a DS color) | High |
|
||||
| `AppsTab/AppsTab.module.css` | `var(--accent, #6c7aff)` (undefined DS variable) | Medium |
|
||||
|
||||
### Undefined CSS variables (not in design system)
|
||||
|
||||
| Variable | Files | Should be |
|
||||
|----------|-------|-----------|
|
||||
| `--accent` | EnvironmentSelector, AppsTab | `--amber` (or define in DS) |
|
||||
| `--bg-base` | LoginPage | `--bg-body` |
|
||||
| `--surface` | ContentTabs, ExchangeHeader | `--bg-surface` |
|
||||
| `--bg-surface-raised` | AgentHealth | `--bg-raised` |
|
||||
|
||||
### Missing DS tokens needed
|
||||
|
||||
Several tint/background colors are used repeatedly but have no DS variable:
|
||||
- `--error-bg` (used as `#FDF2F0`, `#F9E0DC`)
|
||||
- `--success-bg` (used as `#F0F9F1`)
|
||||
- `--amber-bg` / `--warning-bg` (used as `#FFF8F0`)
|
||||
- `--bg-inverse` / `--text-inverse` (used as `#1A1612` / `#E4DFD8`)
|
||||
|
||||
---
|
||||
|
||||
## 2. Critical: CSS Module Duplication (~22%)
|
||||
|
||||
~135 of 618 class definitions are copy-pasted across files.
|
||||
|
||||
### Table section pattern — 5 files, ~35 duplicate classes
|
||||
|
||||
`.tableSection`, `.tableHeader`, `.tableTitle`, `.tableMeta`, `.tableRight` are **identical** in:
|
||||
- `DashboardTab.module.css`
|
||||
- `AuditLogPage.module.css`
|
||||
- `ClickHouseAdminPage.module.css`
|
||||
- `RoutesMetrics.module.css`
|
||||
- `RouteDetail.module.css`
|
||||
|
||||
### Log viewer panel — 2 files, ~50 lines identical
|
||||
|
||||
`.logCard`, `.logHeader`, `.logToolbar`, `.logSearchWrap`, `.logSearchInput`, `.logSearchClear`, `.logClearFilters`, `.logEmpty`, `.sortBtn`, `.refreshBtn`, `.headerActions` — byte-for-byte identical in `AgentHealth.module.css` and `AgentInstance.module.css`.
|
||||
|
||||
### Tap modal form — 2 files, ~40 lines identical
|
||||
|
||||
`.typeSelector`, `.typeOption`, `.typeOptionActive`, `.testSection`, `.testTabs`, `.testTabBtn`, `.testTabBtnActive`, `.testBody`, `.testResult`, `.testSuccess`, `.testError` — identical in `TapConfigModal.module.css` and `RouteDetail.module.css`.
|
||||
|
||||
### Other duplicates
|
||||
|
||||
| Pattern | Files | Lines |
|
||||
|---------|-------|-------|
|
||||
| Rate color classes (`.rateGood/.rateWarn/.rateBad/.rateNeutral`) | DashboardTab, RouteDetail, RoutesMetrics | ~12 each |
|
||||
| Refresh indicator + `@keyframes pulse` | DashboardTab, RoutesMetrics | ~15 each |
|
||||
| Chart card (`.chartCard`) | AgentInstance, RouteDetail | ~6 each |
|
||||
| Section card (`.section`) | AppConfigDetailPage, OidcConfigPage | ~7 each |
|
||||
| Meta grid (`.metaGrid/.metaLabel/.metaValue`) | AboutMeDialog, UserManagement | ~9 each |
|
||||
|
||||
---
|
||||
|
||||
## 3. High: Inline Styles (55 RED violations)
|
||||
|
||||
### Files with zero CSS modules (all inline)
|
||||
|
||||
| File | Issue |
|
||||
|------|-------|
|
||||
| `pages/Admin/AdminLayout.tsx` | Entire layout wrapper is inline styled |
|
||||
| `pages/Admin/DatabaseAdminPage.tsx` | All layout, typography, spacing inline — no CSS module |
|
||||
| `auth/OidcCallback.tsx` | Full-page layout inline — no CSS module |
|
||||
|
||||
### Most inline violations
|
||||
|
||||
| File | RED count | Primary patterns |
|
||||
|------|-----------|-----------------|
|
||||
| `pages/AppsTab/AppsTab.tsx` | ~25 | Fixed-width inputs (`width: 50-90px` x18), visually-hidden pattern x2, table cell layouts |
|
||||
| `components/LayoutShell.tsx` | 6 | StarredList sub-component, sidebar layout |
|
||||
| `pages/Admin/EnvironmentsPage.tsx` | 8 | Raw `<select>` fully styled inline, save/cancel button rows |
|
||||
| `pages/Routes/RouteDetail.tsx` | 5 | Heading styles, tab panel margins |
|
||||
|
||||
### Repeated inline patterns that need extraction
|
||||
|
||||
| Pattern | Occurrences | Fix |
|
||||
|---------|-------------|-----|
|
||||
| `style={{ display: 'flex', justifyContent: 'center', padding: '4rem' }}` (loading fallback) | 3 files | Create shared `<PageLoader>` |
|
||||
| `style={{ position: 'absolute', width: 1, height: 1, clip: 'rect(0,0,0,0)' }}` (visually hidden) | 2 in AppsTab | Create `.visuallyHidden` utility class |
|
||||
| `style={{ width: N }}` on `<Input>`/`<Select>` (fixed widths) | 18+ in AppsTab | Size classes or CSS module rules |
|
||||
| `style={{ marginTop: 8, display: 'flex', gap: 8, justifyContent: 'flex-end' }}` (action row) | 3+ in EnvironmentsPage | Shared `.editActions` class |
|
||||
|
||||
---
|
||||
|
||||
## 4. High: Design System Component Adoption Gaps
|
||||
|
||||
### Native HTML that should use DS components
|
||||
|
||||
| Element | Instances | Files | DS Replacement |
|
||||
|---------|-----------|-------|---------------|
|
||||
| `<button>` | 32 | 8 files | `Button`, `SegmentedTabs` |
|
||||
| `<select>` | 12 | 4 files | `Select` |
|
||||
| `<input>` | 8 | 4 files | `Input`, `Toggle`, `Checkbox` |
|
||||
| `<label>` | 9 | 2 files | `FormField`, `Label` |
|
||||
| `<table>` (data) | 2 | 2 files | `DataTable`, `LogViewer` |
|
||||
|
||||
### Highest-priority replacements
|
||||
|
||||
1. **`EnvironmentSelector.tsx`** — zero DS imports, entire component is a bare `<select>`. Used globally in sidebar.
|
||||
2. **`ExecutionDiagram/tabs/LogTab.tsx`** — reimplements LogViewer from scratch (raw table + input + button). AgentInstance and AgentHealth already use DS `LogViewer` correctly.
|
||||
3. **`AppsTab.tsx` sub-tabs** — 3 instances of homegrown `<button>` tab bars. DS provides `SegmentedTabs` and `Tabs`.
|
||||
4. **`AppConfigDetailPage.tsx`** — 4x `<select>`, 4x `<label>`, 2x `<input type="checkbox">`, 4x `<button>` — all have DS equivalents already used elsewhere.
|
||||
5. **`AgentHealth.tsx`** — config bar uses `Toggle` (correct) alongside raw `<select>` and `<button>` (incorrect).
|
||||
|
||||
### Cross-page inconsistencies
|
||||
|
||||
| Pattern | Correct usage | Incorrect usage |
|
||||
|---------|--------------|-----------------|
|
||||
| Log viewer | AgentInstance, AgentHealth use DS `LogViewer` | LogTab rebuilds from scratch |
|
||||
| Config edit form | Both pages render same 4 fields | AgentHealth uses `Toggle`, AppConfigDetail uses `<input type="checkbox">` |
|
||||
| Sub-tabs | RbacPage uses DS `Tabs` | AppsTab uses homegrown `<button>` tabs with non-DS `--accent` color |
|
||||
| Select dropdowns | AppsTab uses DS `Select` for some fields | Same file uses raw `<select>` for other fields |
|
||||
|
||||
---
|
||||
|
||||
## 5. Medium: Layout Inconsistencies
|
||||
|
||||
### Page padding (3 different values)
|
||||
|
||||
| Pages | Padding |
|
||||
|-------|---------|
|
||||
| AgentHealth, AgentInstance, AdminLayout | `20px 24px 40px` |
|
||||
| AppsTab | `16px` (all sides) |
|
||||
| DashboardTab, Dashboard | No padding (full-bleed) |
|
||||
|
||||
### Section gap spacing (mixed approaches)
|
||||
|
||||
| Approach | Pages |
|
||||
|----------|-------|
|
||||
| CSS `gap: 20px` on flex container | DashboardTab, RoutesMetrics |
|
||||
| `margin-bottom: 20px` | AgentInstance |
|
||||
| Mixed `margin-bottom: 16px` and `20px` on same page | AgentHealth, ClickHouseAdminPage |
|
||||
|
||||
### Typography inconsistencies
|
||||
|
||||
| Issue | Details |
|
||||
|-------|---------|
|
||||
| Card title weight | Most use `font-weight: 600`, RouteDetail `.paneTitle` uses `700` |
|
||||
| Chart title style | RouteDetail: `12px/700/uppercase`, AgentHealth: `12px/600/uppercase` |
|
||||
| Font units | ExchangeHeader + TabKpis use `rem`, everything else uses `px` |
|
||||
| Raw headings | DatabaseAdminPage uses `<h2>`/`<h3>` with inline styles; all others use DS `SectionHeader` or CSS classes |
|
||||
| Table header padding | Most: `12px 16px`, Dashboard: `8px 12px`, AgentHealth eventCard: `10px 16px` |
|
||||
|
||||
### Stat strip layouts
|
||||
|
||||
| Page | Layout | Gap |
|
||||
|------|--------|-----|
|
||||
| AgentHealth, AgentInstance, RbacPage | CSS grid `repeat(N, 1fr)` | `10px` |
|
||||
| ClickHouseAdminPage | Flexbox (unequal widths) | `10px` |
|
||||
| DatabaseAdminPage | Inline flex | `1rem` (16px) |
|
||||
|
||||
### Empty state patterns (4 different approaches)
|
||||
|
||||
1. DS `<EmptyState>` component (AgentInstance — correct)
|
||||
2. `EntityList emptyMessage` prop (EnvironmentsPage, RbacPage)
|
||||
3. `.logEmpty` CSS class, `12px`, `var(--text-faint)` (AgentHealth, AgentInstance)
|
||||
4. `.emptyNote` CSS class, `12px`, `italic` (AppsTab)
|
||||
5. Inline `0.875rem`, `var(--text-muted)` (ExchangesPage)
|
||||
|
||||
### Loading state patterns (3 different approaches)
|
||||
|
||||
1. `<Spinner size="lg">` in flex div with inline `padding: 4rem` — copy-pasted 3 times
|
||||
2. `<Spinner size="md">` returned directly, no centering (EnvironmentsPage)
|
||||
3. No loading UI, data simply absent (DashboardL1/L2/L3)
|
||||
|
||||
---
|
||||
|
||||
## 6. Low: Other Findings
|
||||
|
||||
- **`!important`**: 1 use in `RouteControlBar.module.css` — works around specificity conflict
|
||||
- **Zero responsive design**: no `@media` queries anywhere
|
||||
- **Z-index**: only 4 uses, all in diagram components (5 and 10), consistent
|
||||
- **Naming convention**: all camelCase — consistent, no issues
|
||||
- **Unused CSS classes**: ~11 likely unused in AppsTab (old create-modal classes) and TapConfigModal
|
||||
|
||||
---
|
||||
|
||||
## Recommended Fix Order
|
||||
|
||||
### Phase 1: Design system tokens (unblocks everything else)
|
||||
|
||||
1. Add missing DS variables: `--error-bg`, `--success-bg`, `--amber-bg`, `--bg-inverse`, `--text-inverse`
|
||||
2. Fix undefined variables: `--accent` -> `--amber`, `--bg-base` -> `--bg-body`, `--surface` -> `--bg-surface`
|
||||
|
||||
### Phase 2: Eliminate CSS duplication (~22% of all classes)
|
||||
|
||||
3. Extract shared `tableSection` pattern to shared CSS module (saves ~140 duplicate lines across 5 files)
|
||||
4. Extract shared log viewer CSS to shared module (saves ~50 lines across 2 files)
|
||||
5. Remove duplicate tap modal CSS from RouteDetail (saves ~40 lines)
|
||||
6. Extract shared rate/refresh/chart patterns
|
||||
|
||||
### Phase 3: Fix hardcoded colors
|
||||
|
||||
7. Replace all hex in `ProcessDiagram/*.tsx` SVG components (~45 values)
|
||||
8. Replace all hex in `ExecutionDiagram.module.css` (~17 naked + strip ~40 fallbacks)
|
||||
9. Fix remaining CSS hex violations (Dashboard, AppsTab, AgentHealth)
|
||||
|
||||
### Phase 4: Replace native HTML with DS components
|
||||
|
||||
10. `EnvironmentSelector` -> DS `Select`
|
||||
11. `LogTab` -> DS `LogViewer`
|
||||
12. `AppsTab` sub-tabs -> DS `SegmentedTabs`
|
||||
13. `AppConfigDetailPage` form elements -> DS `Select`/`Toggle`/`FormField`/`Button`
|
||||
14. Remaining `<button>` -> DS `Button`
|
||||
|
||||
### Phase 5: Eliminate inline styles
|
||||
|
||||
15. Create CSS modules for AdminLayout, DatabaseAdminPage, OidcCallback
|
||||
16. Extract shared `<PageLoader>` component
|
||||
17. Move AppsTab fixed-width inputs to CSS module size classes
|
||||
18. Move remaining inline margins/flex patterns to CSS classes
|
||||
|
||||
### Phase 6: Standardize layout patterns
|
||||
|
||||
19. Unify page padding to `20px 24px 40px`
|
||||
20. Standardize section gaps to `gap: 20px` on flex containers
|
||||
21. Normalize font units to `px` throughout
|
||||
22. Standardize empty state to DS `<EmptyState>`
|
||||
23. Standardize loading state to shared `<PageLoader>`
|
||||
@@ -48,14 +48,10 @@
|
||||
<artifactId>flyway-database-postgresql</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.opensearch.client</groupId>
|
||||
<artifactId>opensearch-java</artifactId>
|
||||
<version>2.19.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.opensearch.client</groupId>
|
||||
<artifactId>opensearch-rest-client</artifactId>
|
||||
<version>2.19.0</version>
|
||||
<groupId>com.clickhouse</groupId>
|
||||
<artifactId>clickhouse-jdbc</artifactId>
|
||||
<version>0.9.7</version>
|
||||
<classifier>all</classifier>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springdoc</groupId>
|
||||
@@ -90,6 +86,10 @@
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-security</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.springframework.boot</groupId>
|
||||
<artifactId>spring-boot-starter-oauth2-resource-server</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.nimbusds</groupId>
|
||||
<artifactId>nimbus-jose-jwt</artifactId>
|
||||
@@ -121,11 +121,20 @@
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.opensearch</groupId>
|
||||
<artifactId>opensearch-testcontainers</artifactId>
|
||||
<version>2.1.1</version>
|
||||
<groupId>org.testcontainers</groupId>
|
||||
<artifactId>testcontainers-clickhouse</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.docker-java</groupId>
|
||||
<artifactId>docker-java-core</artifactId>
|
||||
<version>3.4.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.github.docker-java</groupId>
|
||||
<artifactId>docker-java-transport-zerodep</artifactId>
|
||||
<version>3.4.1</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.awaitility</groupId>
|
||||
<artifactId>awaitility</artifactId>
|
||||
|
||||
@@ -5,6 +5,7 @@ import com.cameleer3.server.app.config.IngestionConfig;
|
||||
import org.springframework.boot.SpringApplication;
|
||||
import org.springframework.boot.autoconfigure.SpringBootApplication;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.scheduling.annotation.EnableAsync;
|
||||
import org.springframework.scheduling.annotation.EnableScheduling;
|
||||
|
||||
/**
|
||||
@@ -16,6 +17,7 @@ import org.springframework.scheduling.annotation.EnableScheduling;
|
||||
"com.cameleer3.server.app",
|
||||
"com.cameleer3.server.core"
|
||||
})
|
||||
@EnableAsync
|
||||
@EnableScheduling
|
||||
@EnableConfigurationProperties({IngestionConfig.class, AgentRegistryConfig.class})
|
||||
public class Cameleer3ServerApplication {
|
||||
|
||||
@@ -39,7 +39,7 @@ public class AgentLifecycleMonitor {
|
||||
// Snapshot states before lifecycle check
|
||||
Map<String, AgentState> statesBefore = new HashMap<>();
|
||||
for (AgentInfo agent : registryService.findAll()) {
|
||||
statesBefore.put(agent.id(), agent.state());
|
||||
statesBefore.put(agent.instanceId(), agent.state());
|
||||
}
|
||||
|
||||
registryService.checkLifecycle();
|
||||
@@ -47,12 +47,12 @@ public class AgentLifecycleMonitor {
|
||||
|
||||
// Detect transitions and record events
|
||||
for (AgentInfo agent : registryService.findAll()) {
|
||||
AgentState before = statesBefore.get(agent.id());
|
||||
AgentState before = statesBefore.get(agent.instanceId());
|
||||
if (before != null && before != agent.state()) {
|
||||
String eventType = mapTransitionEvent(before, agent.state());
|
||||
if (eventType != null) {
|
||||
agentEventService.recordEvent(agent.id(), agent.application(), eventType,
|
||||
agent.name() + " " + before + " -> " + agent.state());
|
||||
agentEventService.recordEvent(agent.instanceId(), agent.applicationId(), eventType,
|
||||
agent.displayName() + " " + before + " -> " + agent.state());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
package com.cameleer3.server.app.analytics;
|
||||
|
||||
import com.cameleer3.server.app.storage.ClickHouseUsageTracker;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.scheduling.annotation.Scheduled;
|
||||
|
||||
public class UsageFlushScheduler {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(UsageFlushScheduler.class);
|
||||
|
||||
private final ClickHouseUsageTracker tracker;
|
||||
|
||||
public UsageFlushScheduler(ClickHouseUsageTracker tracker) {
|
||||
this.tracker = tracker;
|
||||
}
|
||||
|
||||
@Scheduled(fixedDelayString = "${cameleer.usage.flush-interval-ms:5000}")
|
||||
public void flush() {
|
||||
try {
|
||||
tracker.flush();
|
||||
} catch (Exception e) {
|
||||
log.warn("Usage event flush failed: {}", e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,88 @@
|
||||
package com.cameleer3.server.app.analytics;
|
||||
|
||||
import com.cameleer3.server.core.analytics.UsageEvent;
|
||||
import com.cameleer3.server.core.analytics.UsageTracker;
|
||||
import jakarta.servlet.http.HttpServletRequest;
|
||||
import jakarta.servlet.http.HttpServletResponse;
|
||||
import org.springframework.security.core.Authentication;
|
||||
import org.springframework.security.core.context.SecurityContextHolder;
|
||||
import org.springframework.web.servlet.HandlerInterceptor;
|
||||
|
||||
import java.time.Instant;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
/**
|
||||
* Tracks authenticated UI user requests for usage analytics.
|
||||
* Skips agent requests, health checks, data ingestion, and static assets.
|
||||
*/
|
||||
public class UsageTrackingInterceptor implements HandlerInterceptor {
|
||||
|
||||
private static final String START_ATTR = "usage.startNanos";
|
||||
|
||||
// Patterns for normalizing dynamic path segments
|
||||
private static final Pattern EXCHANGE_ID = Pattern.compile(
|
||||
"/[A-F0-9]{15,}-[A-F0-9]{16}(?=/|$)", Pattern.CASE_INSENSITIVE);
|
||||
private static final Pattern UUID = Pattern.compile(
|
||||
"/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}(?=/|$)", Pattern.CASE_INSENSITIVE);
|
||||
private static final Pattern HEX_HASH = Pattern.compile(
|
||||
"/[0-9a-f]{32,64}(?=/|$)", Pattern.CASE_INSENSITIVE);
|
||||
private static final Pattern NUMERIC_ID = Pattern.compile(
|
||||
"(?<=/)(\\d{2,})(?=/|$)");
|
||||
// Agent instance IDs like "cameleer3-sample-598867949d-g7nt4-1"
|
||||
private static final Pattern INSTANCE_ID = Pattern.compile(
|
||||
"(?<=/agents/)[^/]+(?=/)", Pattern.CASE_INSENSITIVE);
|
||||
|
||||
private final UsageTracker usageTracker;
|
||||
|
||||
public UsageTrackingInterceptor(UsageTracker usageTracker) {
|
||||
this.usageTracker = usageTracker;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean preHandle(HttpServletRequest request, HttpServletResponse response, Object handler) {
|
||||
request.setAttribute(START_ATTR, System.nanoTime());
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterCompletion(HttpServletRequest request, HttpServletResponse response,
|
||||
Object handler, Exception ex) {
|
||||
String username = extractUsername();
|
||||
if (username == null) return; // unauthenticated or agent request
|
||||
|
||||
Long startNanos = (Long) request.getAttribute(START_ATTR);
|
||||
long durationMs = startNanos != null ? (System.nanoTime() - startNanos) / 1_000_000 : 0;
|
||||
|
||||
String path = request.getRequestURI();
|
||||
String queryString = request.getQueryString();
|
||||
|
||||
usageTracker.track(new UsageEvent(
|
||||
Instant.now(),
|
||||
username,
|
||||
request.getMethod(),
|
||||
path,
|
||||
normalizePath(path),
|
||||
response.getStatus(),
|
||||
durationMs,
|
||||
queryString
|
||||
));
|
||||
}
|
||||
|
||||
private String extractUsername() {
|
||||
Authentication auth = SecurityContextHolder.getContext().getAuthentication();
|
||||
if (auth == null || auth.getName() == null) return null;
|
||||
String name = auth.getName();
|
||||
// Only track UI users (user:admin), not agents
|
||||
if (!name.startsWith("user:")) return null;
|
||||
return name;
|
||||
}
|
||||
|
||||
static String normalizePath(String path) {
|
||||
String normalized = EXCHANGE_ID.matcher(path).replaceAll("/{id}");
|
||||
normalized = UUID.matcher(normalized).replaceAll("/{id}");
|
||||
normalized = HEX_HASH.matcher(normalized).replaceAll("/{hash}");
|
||||
normalized = INSTANCE_ID.matcher(normalized).replaceAll("{id}");
|
||||
normalized = NUMERIC_ID.matcher(normalized).replaceAll("{id}");
|
||||
return normalized;
|
||||
}
|
||||
}
|
||||
@@ -3,11 +3,13 @@ package com.cameleer3.server.app.config;
|
||||
import com.cameleer3.server.core.agent.AgentEventRepository;
|
||||
import com.cameleer3.server.core.agent.AgentEventService;
|
||||
import com.cameleer3.server.core.agent.AgentRegistryService;
|
||||
import com.cameleer3.server.core.agent.RouteStateRegistry;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
|
||||
/**
|
||||
* Creates the {@link AgentRegistryService} and {@link AgentEventService} beans.
|
||||
* Creates the {@link AgentRegistryService}, {@link AgentEventService},
|
||||
* and {@link RouteStateRegistry} beans.
|
||||
* <p>
|
||||
* Follows the established pattern: core module plain class, app module bean config.
|
||||
*/
|
||||
@@ -27,4 +29,9 @@ public class AgentRegistryBeanConfig {
|
||||
public AgentEventService agentEventService(AgentEventRepository repository) {
|
||||
return new AgentEventService(repository);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public RouteStateRegistry routeStateRegistry() {
|
||||
return new RouteStateRegistry();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,54 @@
|
||||
package com.cameleer3.server.app.config;
|
||||
|
||||
import com.zaxxer.hikari.HikariDataSource;
|
||||
import org.springframework.beans.factory.annotation.Qualifier;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.boot.autoconfigure.jdbc.DataSourceProperties;
|
||||
import org.springframework.boot.context.properties.EnableConfigurationProperties;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.context.annotation.Primary;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
|
||||
import javax.sql.DataSource;
|
||||
|
||||
@Configuration
|
||||
@EnableConfigurationProperties(ClickHouseProperties.class)
|
||||
@ConditionalOnProperty(name = "clickhouse.enabled", havingValue = "true")
|
||||
public class ClickHouseConfig {
|
||||
|
||||
/**
|
||||
* Explicit primary PG DataSource. Required because adding a second DataSource
|
||||
* (ClickHouse) prevents Spring Boot auto-configuration from creating the default one.
|
||||
*/
|
||||
@Bean
|
||||
@Primary
|
||||
public DataSource dataSource(DataSourceProperties properties) {
|
||||
return properties.initializeDataSourceBuilder().build();
|
||||
}
|
||||
|
||||
@Bean
|
||||
@Primary
|
||||
public JdbcTemplate jdbcTemplate(@Qualifier("dataSource") DataSource dataSource) {
|
||||
return new JdbcTemplate(dataSource);
|
||||
}
|
||||
|
||||
@Bean(name = "clickHouseDataSource")
|
||||
public DataSource clickHouseDataSource(ClickHouseProperties props) {
|
||||
HikariDataSource ds = new HikariDataSource();
|
||||
ds.setJdbcUrl(props.getUrl());
|
||||
ds.setUsername(props.getUsername());
|
||||
ds.setPassword(props.getPassword());
|
||||
ds.setMaximumPoolSize(props.getPoolSize());
|
||||
ds.setMinimumIdle(5);
|
||||
ds.setConnectionTimeout(5000);
|
||||
ds.setPoolName("clickhouse-pool");
|
||||
return ds;
|
||||
}
|
||||
|
||||
@Bean(name = "clickHouseJdbcTemplate")
|
||||
public JdbcTemplate clickHouseJdbcTemplate(
|
||||
@Qualifier("clickHouseDataSource") DataSource ds) {
|
||||
return new JdbcTemplate(ds);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,24 @@
|
||||
package com.cameleer3.server.app.config;
|
||||
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
|
||||
@ConfigurationProperties(prefix = "clickhouse")
|
||||
public class ClickHouseProperties {
|
||||
|
||||
private String url = "jdbc:clickhouse://localhost:8123/cameleer";
|
||||
private String username = "default";
|
||||
private String password = "";
|
||||
private int poolSize = 50;
|
||||
|
||||
public String getUrl() { return url; }
|
||||
public void setUrl(String url) { this.url = url; }
|
||||
|
||||
public String getUsername() { return username; }
|
||||
public void setUsername(String username) { this.username = username; }
|
||||
|
||||
public String getPassword() { return password; }
|
||||
public void setPassword(String password) { this.password = password; }
|
||||
|
||||
public int getPoolSize() { return poolSize; }
|
||||
public void setPoolSize(int poolSize) { this.poolSize = poolSize; }
|
||||
}
|
||||
@@ -0,0 +1,55 @@
|
||||
package com.cameleer3.server.app.config;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Qualifier;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.boot.context.event.ApplicationReadyEvent;
|
||||
import org.springframework.context.event.EventListener;
|
||||
import org.springframework.core.io.Resource;
|
||||
import org.springframework.core.io.support.PathMatchingResourcePatternResolver;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
||||
@Component
|
||||
@ConditionalOnProperty(name = "clickhouse.enabled", havingValue = "true")
|
||||
public class ClickHouseSchemaInitializer {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(ClickHouseSchemaInitializer.class);
|
||||
|
||||
private final JdbcTemplate clickHouseJdbc;
|
||||
|
||||
public ClickHouseSchemaInitializer(
|
||||
@Qualifier("clickHouseJdbcTemplate") JdbcTemplate clickHouseJdbc) {
|
||||
this.clickHouseJdbc = clickHouseJdbc;
|
||||
}
|
||||
|
||||
@EventListener(ApplicationReadyEvent.class)
|
||||
public void initializeSchema() {
|
||||
try {
|
||||
PathMatchingResourcePatternResolver resolver = new PathMatchingResourcePatternResolver();
|
||||
Resource script = resolver.getResource("classpath:clickhouse/init.sql");
|
||||
|
||||
String sql = script.getContentAsString(StandardCharsets.UTF_8);
|
||||
log.info("Executing ClickHouse schema: {}", script.getFilename());
|
||||
for (String statement : sql.split(";")) {
|
||||
String trimmed = statement.trim();
|
||||
// Skip empty segments and comment-only segments
|
||||
String withoutComments = trimmed.lines()
|
||||
.filter(line -> !line.stripLeading().startsWith("--"))
|
||||
.map(String::trim)
|
||||
.filter(line -> !line.isEmpty())
|
||||
.reduce("", (a, b) -> a + b);
|
||||
if (!withoutComments.isEmpty()) {
|
||||
clickHouseJdbc.execute(trimmed);
|
||||
}
|
||||
}
|
||||
|
||||
log.info("ClickHouse schema initialization complete");
|
||||
} catch (Exception e) {
|
||||
log.error("ClickHouse schema initialization failed — server will continue but ClickHouse features may not work", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,11 @@
|
||||
package com.cameleer3.server.app.config;
|
||||
|
||||
import com.cameleer3.server.core.ingestion.BufferedLogEntry;
|
||||
import com.cameleer3.server.core.ingestion.ChunkAccumulator;
|
||||
import com.cameleer3.server.core.ingestion.MergedExecution;
|
||||
import com.cameleer3.server.core.ingestion.WriteBuffer;
|
||||
import com.cameleer3.server.core.storage.model.MetricsSnapshot;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
|
||||
@@ -19,4 +23,22 @@ public class IngestionBeanConfig {
|
||||
public WriteBuffer<MetricsSnapshot> metricsBuffer(IngestionConfig config) {
|
||||
return new WriteBuffer<>(config.getBufferCapacity());
|
||||
}
|
||||
|
||||
@Bean
|
||||
@ConditionalOnProperty(name = "clickhouse.enabled", havingValue = "true")
|
||||
public WriteBuffer<MergedExecution> executionBuffer(IngestionConfig config) {
|
||||
return new WriteBuffer<>(config.getBufferCapacity());
|
||||
}
|
||||
|
||||
@Bean
|
||||
@ConditionalOnProperty(name = "clickhouse.enabled", havingValue = "true")
|
||||
public WriteBuffer<ChunkAccumulator.ProcessorBatch> processorBatchBuffer(IngestionConfig config) {
|
||||
return new WriteBuffer<>(config.getBufferCapacity());
|
||||
}
|
||||
|
||||
@Bean
|
||||
@ConditionalOnProperty(name = "clickhouse.enabled", havingValue = "true")
|
||||
public WriteBuffer<BufferedLogEntry> logBuffer(IngestionConfig config) {
|
||||
return new WriteBuffer<>(config.getBufferCapacity());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,68 @@
|
||||
package com.cameleer3.server.app.config;
|
||||
|
||||
import com.cameleer3.server.core.license.LicenseGate;
|
||||
import com.cameleer3.server.core.license.LicenseInfo;
|
||||
import com.cameleer3.server.core.license.LicenseValidator;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
|
||||
@Configuration
|
||||
public class LicenseBeanConfig {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(LicenseBeanConfig.class);
|
||||
|
||||
@Value("${license.token:}")
|
||||
private String licenseToken;
|
||||
|
||||
@Value("${license.file:}")
|
||||
private String licenseFile;
|
||||
|
||||
@Value("${license.public-key:}")
|
||||
private String licensePublicKey;
|
||||
|
||||
@Bean
|
||||
public LicenseGate licenseGate() {
|
||||
LicenseGate gate = new LicenseGate();
|
||||
|
||||
String token = resolveLicenseToken();
|
||||
if (token == null || token.isBlank()) {
|
||||
log.info("No license configured — running in open mode (all features enabled)");
|
||||
return gate;
|
||||
}
|
||||
|
||||
if (licensePublicKey == null || licensePublicKey.isBlank()) {
|
||||
log.warn("License token provided but no public key configured (CAMELEER_LICENSE_PUBLIC_KEY). Running in open mode.");
|
||||
return gate;
|
||||
}
|
||||
|
||||
try {
|
||||
LicenseValidator validator = new LicenseValidator(licensePublicKey);
|
||||
LicenseInfo info = validator.validate(token);
|
||||
gate.load(info);
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to validate license: {}. Running in open mode.", e.getMessage());
|
||||
}
|
||||
|
||||
return gate;
|
||||
}
|
||||
|
||||
private String resolveLicenseToken() {
|
||||
if (licenseToken != null && !licenseToken.isBlank()) {
|
||||
return licenseToken;
|
||||
}
|
||||
if (licenseFile != null && !licenseFile.isBlank()) {
|
||||
try {
|
||||
return Files.readString(Path.of(licenseFile)).trim();
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to read license file {}: {}", licenseFile, e.getMessage());
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package com.cameleer3.server.app.config;
|
||||
|
||||
import org.apache.http.HttpHost;
|
||||
import org.opensearch.client.RestClient;
|
||||
import org.opensearch.client.json.jackson.JacksonJsonpMapper;
|
||||
import org.opensearch.client.opensearch.OpenSearchClient;
|
||||
import org.opensearch.client.transport.rest_client.RestClientTransport;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
|
||||
@Configuration
|
||||
public class OpenSearchConfig {
|
||||
|
||||
@Value("${opensearch.url:http://localhost:9200}")
|
||||
private String opensearchUrl;
|
||||
|
||||
@Bean(destroyMethod = "close")
|
||||
public RestClient opensearchRestClient() {
|
||||
return RestClient.builder(HttpHost.create(opensearchUrl)).build();
|
||||
}
|
||||
|
||||
@Bean
|
||||
public OpenSearchClient openSearchClient(RestClient restClient) {
|
||||
var transport = new RestClientTransport(restClient, new JacksonJsonpMapper());
|
||||
return new OpenSearchClient(transport);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,27 @@
|
||||
package com.cameleer3.server.app.config;
|
||||
|
||||
import com.cameleer3.server.app.storage.PostgresClaimMappingRepository;
|
||||
import com.cameleer3.server.core.rbac.ClaimMappingRepository;
|
||||
import com.cameleer3.server.core.rbac.ClaimMappingService;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
|
||||
/**
|
||||
* Creates the {@link ClaimMappingRepository} and {@link ClaimMappingService} beans.
|
||||
* <p>
|
||||
* Follows the established pattern: core module plain class, app module bean config.
|
||||
*/
|
||||
@Configuration
|
||||
public class RbacBeanConfig {
|
||||
|
||||
@Bean
|
||||
public ClaimMappingRepository claimMappingRepository(JdbcTemplate jdbcTemplate) {
|
||||
return new PostgresClaimMappingRepository(jdbcTemplate);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public ClaimMappingService claimMappingService() {
|
||||
return new ClaimMappingService();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,77 @@
|
||||
package com.cameleer3.server.app.config;
|
||||
|
||||
import com.cameleer3.server.app.storage.PostgresAppRepository;
|
||||
import com.cameleer3.server.app.storage.PostgresAppVersionRepository;
|
||||
import com.cameleer3.server.app.storage.PostgresDeploymentRepository;
|
||||
import com.cameleer3.server.app.storage.PostgresEnvironmentRepository;
|
||||
import com.cameleer3.server.core.runtime.AppRepository;
|
||||
import com.cameleer3.server.core.runtime.AppService;
|
||||
import com.cameleer3.server.core.runtime.AppVersionRepository;
|
||||
import com.cameleer3.server.core.runtime.DeploymentRepository;
|
||||
import com.cameleer3.server.core.runtime.DeploymentService;
|
||||
import com.cameleer3.server.core.runtime.EnvironmentRepository;
|
||||
import com.cameleer3.server.core.runtime.EnvironmentService;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
|
||||
|
||||
import java.util.concurrent.Executor;
|
||||
|
||||
/**
|
||||
* Creates runtime management beans: repositories, services, and async executor.
|
||||
* <p>
|
||||
* Follows the established pattern: core module plain class, app module bean config.
|
||||
*/
|
||||
@Configuration
|
||||
public class RuntimeBeanConfig {
|
||||
|
||||
@Bean
|
||||
public EnvironmentRepository environmentRepository(JdbcTemplate jdbc, ObjectMapper objectMapper) {
|
||||
return new PostgresEnvironmentRepository(jdbc, objectMapper);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public AppRepository appRepository(JdbcTemplate jdbc, ObjectMapper objectMapper) {
|
||||
return new PostgresAppRepository(jdbc, objectMapper);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public AppVersionRepository appVersionRepository(JdbcTemplate jdbc) {
|
||||
return new PostgresAppVersionRepository(jdbc);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public DeploymentRepository deploymentRepository(JdbcTemplate jdbc, ObjectMapper objectMapper) {
|
||||
return new PostgresDeploymentRepository(jdbc, objectMapper);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public EnvironmentService environmentService(EnvironmentRepository repo) {
|
||||
return new EnvironmentService(repo);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public AppService appService(AppRepository appRepo, AppVersionRepository versionRepo,
|
||||
@Value("${cameleer.runtime.jar-storage-path:/data/jars}") String jarStoragePath) {
|
||||
return new AppService(appRepo, versionRepo, jarStoragePath);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public DeploymentService deploymentService(DeploymentRepository deployRepo, AppService appService, EnvironmentService envService) {
|
||||
return new DeploymentService(deployRepo, appService, envService);
|
||||
}
|
||||
|
||||
@Bean(name = "deploymentTaskExecutor")
|
||||
public Executor deploymentTaskExecutor() {
|
||||
ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor();
|
||||
executor.setCorePoolSize(4);
|
||||
executor.setMaxPoolSize(4);
|
||||
executor.setQueueCapacity(25);
|
||||
executor.setThreadNamePrefix("deploy-");
|
||||
executor.initialize();
|
||||
return executor;
|
||||
}
|
||||
}
|
||||
@@ -1,16 +1,37 @@
|
||||
package com.cameleer3.server.app.config;
|
||||
|
||||
import com.cameleer3.server.app.search.ClickHouseLogStore;
|
||||
import com.cameleer3.server.app.storage.ClickHouseAgentEventRepository;
|
||||
import com.cameleer3.server.app.storage.ClickHouseUsageTracker;
|
||||
import com.cameleer3.server.app.storage.ClickHouseDiagramStore;
|
||||
import com.cameleer3.server.app.storage.ClickHouseMetricsQueryStore;
|
||||
import com.cameleer3.server.app.storage.ClickHouseMetricsStore;
|
||||
import com.cameleer3.server.app.storage.ClickHouseStatsStore;
|
||||
import com.cameleer3.server.core.admin.AuditRepository;
|
||||
import com.cameleer3.server.core.admin.AuditService;
|
||||
import com.cameleer3.server.core.agent.AgentEventRepository;
|
||||
import com.cameleer3.server.core.agent.AgentInfo;
|
||||
import com.cameleer3.server.core.agent.AgentRegistryService;
|
||||
import com.cameleer3.server.core.detail.DetailService;
|
||||
import com.cameleer3.server.core.indexing.SearchIndexer;
|
||||
import com.cameleer3.server.app.ingestion.ExecutionFlushScheduler;
|
||||
import com.cameleer3.server.app.search.ClickHouseSearchIndex;
|
||||
import com.cameleer3.server.app.storage.ClickHouseExecutionStore;
|
||||
import com.cameleer3.server.core.ingestion.BufferedLogEntry;
|
||||
import com.cameleer3.server.core.ingestion.ChunkAccumulator;
|
||||
import com.cameleer3.server.core.ingestion.IngestionService;
|
||||
import com.cameleer3.server.core.ingestion.MergedExecution;
|
||||
import com.cameleer3.server.core.ingestion.WriteBuffer;
|
||||
import com.cameleer3.server.core.storage.*;
|
||||
import com.cameleer3.server.core.storage.LogIndex;
|
||||
import com.cameleer3.server.core.storage.StatsStore;
|
||||
import com.cameleer3.server.core.storage.model.MetricsSnapshot;
|
||||
import org.springframework.beans.factory.annotation.Qualifier;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
|
||||
@Configuration
|
||||
public class StorageBeanConfig {
|
||||
@@ -22,8 +43,8 @@ public class StorageBeanConfig {
|
||||
|
||||
@Bean(destroyMethod = "shutdown")
|
||||
public SearchIndexer searchIndexer(ExecutionStore executionStore, SearchIndex searchIndex,
|
||||
@Value("${opensearch.debounce-ms:2000}") long debounceMs,
|
||||
@Value("${opensearch.queue-size:10000}") int queueSize) {
|
||||
@Value("${cameleer.indexer.debounce-ms:2000}") long debounceMs,
|
||||
@Value("${cameleer.indexer.queue-size:10000}") int queueSize) {
|
||||
return new SearchIndexer(executionStore, searchIndex, debounceMs, queueSize);
|
||||
}
|
||||
|
||||
@@ -41,4 +62,128 @@ public class StorageBeanConfig {
|
||||
return new IngestionService(executionStore, diagramStore, metricsBuffer,
|
||||
searchIndexer::onExecutionUpdated, bodySizeLimit);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public MetricsStore clickHouseMetricsStore(
|
||||
TenantProperties tenantProperties,
|
||||
@Qualifier("clickHouseJdbcTemplate") JdbcTemplate clickHouseJdbc) {
|
||||
return new ClickHouseMetricsStore(tenantProperties.getId(), clickHouseJdbc);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public MetricsQueryStore clickHouseMetricsQueryStore(
|
||||
TenantProperties tenantProperties,
|
||||
@Qualifier("clickHouseJdbcTemplate") JdbcTemplate clickHouseJdbc) {
|
||||
return new ClickHouseMetricsQueryStore(tenantProperties.getId(), clickHouseJdbc);
|
||||
}
|
||||
|
||||
// ── Execution Store ──────────────────────────────────────────────────
|
||||
|
||||
@Bean
|
||||
public ClickHouseExecutionStore clickHouseExecutionStore(
|
||||
TenantProperties tenantProperties,
|
||||
@Qualifier("clickHouseJdbcTemplate") JdbcTemplate clickHouseJdbc) {
|
||||
return new ClickHouseExecutionStore(tenantProperties.getId(), clickHouseJdbc);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public ChunkAccumulator chunkAccumulator(
|
||||
TenantProperties tenantProperties,
|
||||
WriteBuffer<MergedExecution> executionBuffer,
|
||||
WriteBuffer<ChunkAccumulator.ProcessorBatch> processorBatchBuffer,
|
||||
DiagramStore diagramStore,
|
||||
AgentRegistryService registryService) {
|
||||
return new ChunkAccumulator(
|
||||
tenantProperties.getId(),
|
||||
executionBuffer::offerOrWarn,
|
||||
processorBatchBuffer::offerOrWarn,
|
||||
diagramStore,
|
||||
java.time.Duration.ofMinutes(5),
|
||||
instanceId -> {
|
||||
AgentInfo agent = registryService.findById(instanceId);
|
||||
return agent != null && agent.environmentId() != null
|
||||
? agent.environmentId() : "default";
|
||||
});
|
||||
}
|
||||
|
||||
@Bean
|
||||
public ExecutionFlushScheduler executionFlushScheduler(
|
||||
WriteBuffer<MergedExecution> executionBuffer,
|
||||
WriteBuffer<ChunkAccumulator.ProcessorBatch> processorBatchBuffer,
|
||||
WriteBuffer<BufferedLogEntry> logBuffer,
|
||||
ClickHouseExecutionStore executionStore,
|
||||
ClickHouseLogStore logStore,
|
||||
ChunkAccumulator accumulator,
|
||||
IngestionConfig config) {
|
||||
return new ExecutionFlushScheduler(executionBuffer, processorBatchBuffer,
|
||||
logBuffer, executionStore, logStore, accumulator, config);
|
||||
}
|
||||
|
||||
@Bean
|
||||
public SearchIndex clickHouseSearchIndex(
|
||||
TenantProperties tenantProperties,
|
||||
@Qualifier("clickHouseJdbcTemplate") JdbcTemplate clickHouseJdbc) {
|
||||
return new ClickHouseSearchIndex(tenantProperties.getId(), clickHouseJdbc);
|
||||
}
|
||||
|
||||
// ── ClickHouse Stats Store ─────────────────────────────────────────
|
||||
|
||||
@Bean
|
||||
public StatsStore clickHouseStatsStore(
|
||||
TenantProperties tenantProperties,
|
||||
@Qualifier("clickHouseJdbcTemplate") JdbcTemplate clickHouseJdbc) {
|
||||
return new ClickHouseStatsStore(tenantProperties.getId(), clickHouseJdbc);
|
||||
}
|
||||
|
||||
// ── ClickHouse Diagram Store ──────────────────────────────────────
|
||||
|
||||
@Bean
|
||||
public DiagramStore clickHouseDiagramStore(
|
||||
TenantProperties tenantProperties,
|
||||
@Qualifier("clickHouseJdbcTemplate") JdbcTemplate clickHouseJdbc) {
|
||||
return new ClickHouseDiagramStore(tenantProperties.getId(), clickHouseJdbc);
|
||||
}
|
||||
|
||||
// ── ClickHouse Agent Event Repository ─────────────────────────────
|
||||
|
||||
@Bean
|
||||
public AgentEventRepository clickHouseAgentEventRepository(
|
||||
TenantProperties tenantProperties,
|
||||
@Qualifier("clickHouseJdbcTemplate") JdbcTemplate clickHouseJdbc) {
|
||||
return new ClickHouseAgentEventRepository(tenantProperties.getId(), clickHouseJdbc);
|
||||
}
|
||||
|
||||
// ── ClickHouse Log Store ──────────────────────────────────────────
|
||||
|
||||
@Bean
|
||||
public ClickHouseLogStore clickHouseLogStore(
|
||||
TenantProperties tenantProperties,
|
||||
@Qualifier("clickHouseJdbcTemplate") JdbcTemplate clickHouseJdbc) {
|
||||
return new ClickHouseLogStore(tenantProperties.getId(), clickHouseJdbc);
|
||||
}
|
||||
|
||||
// ── Usage Analytics ──────────────────────────────────────────────
|
||||
|
||||
@Bean
|
||||
@ConditionalOnProperty(name = "clickhouse.enabled", havingValue = "true")
|
||||
public ClickHouseUsageTracker clickHouseUsageTracker(
|
||||
TenantProperties tenantProperties,
|
||||
@Qualifier("clickHouseJdbcTemplate") JdbcTemplate clickHouseJdbc) {
|
||||
return new ClickHouseUsageTracker(tenantProperties.getId(), clickHouseJdbc,
|
||||
new com.cameleer3.server.core.ingestion.WriteBuffer<>(5000));
|
||||
}
|
||||
|
||||
@Bean
|
||||
@ConditionalOnProperty(name = "clickhouse.enabled", havingValue = "true")
|
||||
public com.cameleer3.server.app.analytics.UsageTrackingInterceptor usageTrackingInterceptor(
|
||||
ClickHouseUsageTracker usageTracker) {
|
||||
return new com.cameleer3.server.app.analytics.UsageTrackingInterceptor(usageTracker);
|
||||
}
|
||||
|
||||
@Bean
|
||||
@ConditionalOnProperty(name = "clickhouse.enabled", havingValue = "true")
|
||||
public com.cameleer3.server.app.analytics.UsageFlushScheduler usageFlushScheduler(
|
||||
ClickHouseUsageTracker usageTracker) {
|
||||
return new com.cameleer3.server.app.analytics.UsageFlushScheduler(usageTracker);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,19 @@
|
||||
package com.cameleer3.server.app.config;
|
||||
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
@Component
|
||||
@ConfigurationProperties(prefix = "cameleer.tenant")
|
||||
public class TenantProperties {
|
||||
|
||||
private String id = "default";
|
||||
|
||||
public String getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
public void setId(String id) {
|
||||
this.id = id;
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
package com.cameleer3.server.app.config;
|
||||
|
||||
import com.cameleer3.server.app.analytics.UsageTrackingInterceptor;
|
||||
import com.cameleer3.server.app.interceptor.AuditInterceptor;
|
||||
import com.cameleer3.server.app.interceptor.ProtocolVersionInterceptor;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
@@ -14,11 +15,14 @@ public class WebConfig implements WebMvcConfigurer {
|
||||
|
||||
private final ProtocolVersionInterceptor protocolVersionInterceptor;
|
||||
private final AuditInterceptor auditInterceptor;
|
||||
private final UsageTrackingInterceptor usageTrackingInterceptor;
|
||||
|
||||
public WebConfig(ProtocolVersionInterceptor protocolVersionInterceptor,
|
||||
AuditInterceptor auditInterceptor) {
|
||||
AuditInterceptor auditInterceptor,
|
||||
@org.springframework.lang.Nullable UsageTrackingInterceptor usageTrackingInterceptor) {
|
||||
this.protocolVersionInterceptor = protocolVersionInterceptor;
|
||||
this.auditInterceptor = auditInterceptor;
|
||||
this.usageTrackingInterceptor = usageTrackingInterceptor;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -35,6 +39,18 @@ public class WebConfig implements WebMvcConfigurer {
|
||||
"/api/v1/agents/*/refresh"
|
||||
);
|
||||
|
||||
// Usage analytics: tracks authenticated UI user requests
|
||||
if (usageTrackingInterceptor != null) {
|
||||
registry.addInterceptor(usageTrackingInterceptor)
|
||||
.addPathPatterns("/api/v1/**")
|
||||
.excludePathPatterns(
|
||||
"/api/v1/data/**",
|
||||
"/api/v1/agents/*/heartbeat",
|
||||
"/api/v1/agents/*/events",
|
||||
"/api/v1/health"
|
||||
);
|
||||
}
|
||||
|
||||
// Safety-net audit: catches any unaudited POST/PUT/DELETE
|
||||
registry.addInterceptor(auditInterceptor)
|
||||
.addPathPatterns("/api/v1/**")
|
||||
|
||||
@@ -3,6 +3,7 @@ package com.cameleer3.server.app.controller;
|
||||
import com.cameleer3.server.app.agent.SseConnectionManager;
|
||||
import com.cameleer3.server.app.dto.CommandAckRequest;
|
||||
import com.cameleer3.server.app.dto.CommandBroadcastResponse;
|
||||
import com.cameleer3.server.app.dto.CommandGroupResponse;
|
||||
import com.cameleer3.server.app.dto.CommandRequest;
|
||||
import com.cameleer3.server.app.dto.CommandSingleResponse;
|
||||
import com.cameleer3.server.app.dto.ReplayRequest;
|
||||
@@ -31,6 +32,7 @@ import org.springframework.web.bind.annotation.PathVariable;
|
||||
import org.springframework.web.bind.annotation.PostMapping;
|
||||
import org.springframework.web.bind.annotation.RequestBody;
|
||||
import org.springframework.web.bind.annotation.RequestMapping;
|
||||
import org.springframework.web.bind.annotation.RequestParam;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
import org.springframework.web.server.ResponseStatusException;
|
||||
|
||||
@@ -109,32 +111,61 @@ public class AgentCommandController {
|
||||
|
||||
@PostMapping("/groups/{group}/commands")
|
||||
@Operation(summary = "Send command to all agents in a group",
|
||||
description = "Sends a command to all LIVE agents in the specified group")
|
||||
@ApiResponse(responseCode = "202", description = "Commands accepted")
|
||||
description = "Sends a command to all LIVE agents in the specified group and waits for responses")
|
||||
@ApiResponse(responseCode = "200", description = "Commands dispatched and responses collected")
|
||||
@ApiResponse(responseCode = "400", description = "Invalid command payload")
|
||||
public ResponseEntity<CommandBroadcastResponse> sendGroupCommand(@PathVariable String group,
|
||||
@RequestBody CommandRequest request,
|
||||
HttpServletRequest httpRequest) throws JsonProcessingException {
|
||||
public ResponseEntity<CommandGroupResponse> sendGroupCommand(@PathVariable String group,
|
||||
@RequestParam(required = false) String environment,
|
||||
@RequestBody CommandRequest request,
|
||||
HttpServletRequest httpRequest) throws JsonProcessingException {
|
||||
CommandType type = mapCommandType(request.type());
|
||||
String payloadJson = request.payload() != null ? objectMapper.writeValueAsString(request.payload()) : "{}";
|
||||
|
||||
List<AgentInfo> agents = registryService.findAll().stream()
|
||||
.filter(a -> a.state() == AgentState.LIVE)
|
||||
.filter(a -> group.equals(a.application()))
|
||||
.toList();
|
||||
Map<String, CompletableFuture<CommandReply>> futures =
|
||||
registryService.addGroupCommandWithReplies(group, environment, type, payloadJson);
|
||||
|
||||
List<String> commandIds = new ArrayList<>();
|
||||
for (AgentInfo agent : agents) {
|
||||
AgentCommand command = registryService.addCommand(agent.id(), type, payloadJson);
|
||||
commandIds.add(command.id());
|
||||
if (futures.isEmpty()) {
|
||||
auditService.log("broadcast_group_command", AuditCategory.AGENT, group,
|
||||
java.util.Map.of("type", request.type(), "agentCount", 0),
|
||||
AuditResult.SUCCESS, httpRequest);
|
||||
return ResponseEntity.ok(new CommandGroupResponse(true, 0, 0, List.of(), List.of()));
|
||||
}
|
||||
|
||||
// Wait with shared 10-second deadline
|
||||
long deadline = System.currentTimeMillis() + 10_000;
|
||||
List<CommandGroupResponse.AgentResponse> responses = new ArrayList<>();
|
||||
List<String> timedOut = new ArrayList<>();
|
||||
|
||||
for (var entry : futures.entrySet()) {
|
||||
long remaining = deadline - System.currentTimeMillis();
|
||||
if (remaining <= 0) {
|
||||
timedOut.add(entry.getKey());
|
||||
entry.getValue().cancel(false);
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
CommandReply reply = entry.getValue().get(remaining, TimeUnit.MILLISECONDS);
|
||||
responses.add(new CommandGroupResponse.AgentResponse(
|
||||
entry.getKey(), reply.status(), reply.message()));
|
||||
} catch (TimeoutException e) {
|
||||
timedOut.add(entry.getKey());
|
||||
entry.getValue().cancel(false);
|
||||
} catch (Exception e) {
|
||||
responses.add(new CommandGroupResponse.AgentResponse(
|
||||
entry.getKey(), "ERROR", e.getMessage()));
|
||||
}
|
||||
}
|
||||
|
||||
boolean allSuccess = timedOut.isEmpty() &&
|
||||
responses.stream().allMatch(r -> "SUCCESS".equals(r.status()));
|
||||
|
||||
auditService.log("broadcast_group_command", AuditCategory.AGENT, group,
|
||||
java.util.Map.of("type", request.type(), "agentCount", agents.size()),
|
||||
java.util.Map.of("type", request.type(), "agentCount", futures.size(),
|
||||
"responded", responses.size(), "timedOut", timedOut.size()),
|
||||
AuditResult.SUCCESS, httpRequest);
|
||||
|
||||
return ResponseEntity.status(HttpStatus.ACCEPTED)
|
||||
.body(new CommandBroadcastResponse(commandIds, agents.size()));
|
||||
return ResponseEntity.ok(new CommandGroupResponse(
|
||||
allSuccess, futures.size(), responses.size(), responses, timedOut));
|
||||
}
|
||||
|
||||
@PostMapping("/commands")
|
||||
@@ -142,16 +173,22 @@ public class AgentCommandController {
|
||||
description = "Sends a command to all agents currently in LIVE state")
|
||||
@ApiResponse(responseCode = "202", description = "Commands accepted")
|
||||
@ApiResponse(responseCode = "400", description = "Invalid command payload")
|
||||
public ResponseEntity<CommandBroadcastResponse> broadcastCommand(@RequestBody CommandRequest request,
|
||||
public ResponseEntity<CommandBroadcastResponse> broadcastCommand(@RequestParam(required = false) String environment,
|
||||
@RequestBody CommandRequest request,
|
||||
HttpServletRequest httpRequest) throws JsonProcessingException {
|
||||
CommandType type = mapCommandType(request.type());
|
||||
String payloadJson = request.payload() != null ? objectMapper.writeValueAsString(request.payload()) : "{}";
|
||||
|
||||
List<AgentInfo> liveAgents = registryService.findByState(AgentState.LIVE);
|
||||
if (environment != null) {
|
||||
liveAgents = liveAgents.stream()
|
||||
.filter(a -> environment.equals(a.environmentId()))
|
||||
.toList();
|
||||
}
|
||||
|
||||
List<String> commandIds = new ArrayList<>();
|
||||
for (AgentInfo agent : liveAgents) {
|
||||
AgentCommand command = registryService.addCommand(agent.id(), type, payloadJson);
|
||||
AgentCommand command = registryService.addCommand(agent.instanceId(), type, payloadJson);
|
||||
commandIds.add(command.id());
|
||||
}
|
||||
|
||||
@@ -185,7 +222,7 @@ public class AgentCommandController {
|
||||
// Record command result in agent event log
|
||||
if (body != null && body.status() != null) {
|
||||
AgentInfo agent = registryService.findById(id);
|
||||
String application = agent != null ? agent.application() : "unknown";
|
||||
String application = agent != null ? agent.applicationId() : "unknown";
|
||||
agentEventService.recordEvent(id, application, "COMMAND_" + body.status(),
|
||||
"Command " + commandId + ": " + body.message());
|
||||
log.debug("Command {} ack from agent {}: {} - {}", commandId, id, body.status(), body.message());
|
||||
|
||||
@@ -32,6 +32,7 @@ public class AgentEventsController {
|
||||
public ResponseEntity<List<AgentEventResponse>> getEvents(
|
||||
@RequestParam(required = false) String appId,
|
||||
@RequestParam(required = false) String agentId,
|
||||
@RequestParam(required = false) String environment,
|
||||
@RequestParam(required = false) String from,
|
||||
@RequestParam(required = false) String to,
|
||||
@RequestParam(defaultValue = "50") int limit) {
|
||||
@@ -39,7 +40,7 @@ public class AgentEventsController {
|
||||
Instant fromInstant = from != null ? Instant.parse(from) : null;
|
||||
Instant toInstant = to != null ? Instant.parse(to) : null;
|
||||
|
||||
var events = agentEventService.queryEvents(appId, agentId, fromInstant, toInstant, limit)
|
||||
var events = agentEventService.queryEvents(appId, agentId, environment, fromInstant, toInstant, limit)
|
||||
.stream()
|
||||
.map(AgentEventResponse::from)
|
||||
.toList();
|
||||
|
||||
@@ -2,22 +2,23 @@ package com.cameleer3.server.app.controller;
|
||||
|
||||
import com.cameleer3.server.app.dto.AgentMetricsResponse;
|
||||
import com.cameleer3.server.app.dto.MetricBucket;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
import com.cameleer3.server.core.storage.MetricsQueryStore;
|
||||
import com.cameleer3.server.core.storage.model.MetricTimeSeries;
|
||||
import org.springframework.web.bind.annotation.*;
|
||||
|
||||
import java.sql.Timestamp;
|
||||
import java.time.Instant;
|
||||
import java.time.temporal.ChronoUnit;
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
@RestController
|
||||
@RequestMapping("/api/v1/agents/{agentId}/metrics")
|
||||
public class AgentMetricsController {
|
||||
|
||||
private final JdbcTemplate jdbc;
|
||||
private final MetricsQueryStore metricsQueryStore;
|
||||
|
||||
public AgentMetricsController(JdbcTemplate jdbc) {
|
||||
this.jdbc = jdbc;
|
||||
public AgentMetricsController(MetricsQueryStore metricsQueryStore) {
|
||||
this.metricsQueryStore = metricsQueryStore;
|
||||
}
|
||||
|
||||
@GetMapping
|
||||
@@ -32,34 +33,18 @@ public class AgentMetricsController {
|
||||
if (to == null) to = Instant.now();
|
||||
|
||||
List<String> metricNames = Arrays.asList(names.split(","));
|
||||
long intervalMs = (to.toEpochMilli() - from.toEpochMilli()) / Math.max(buckets, 1);
|
||||
String intervalStr = intervalMs + " milliseconds";
|
||||
|
||||
Map<String, List<MetricBucket>> result = new LinkedHashMap<>();
|
||||
for (String name : metricNames) {
|
||||
result.put(name.trim(), new ArrayList<>());
|
||||
}
|
||||
Map<String, List<MetricTimeSeries.Bucket>> raw =
|
||||
metricsQueryStore.queryTimeSeries(agentId, metricNames, from, to, buckets);
|
||||
|
||||
String sql = """
|
||||
SELECT time_bucket(CAST(? AS interval), collected_at) AS bucket,
|
||||
metric_name,
|
||||
AVG(metric_value) AS avg_value
|
||||
FROM agent_metrics
|
||||
WHERE agent_id = ?
|
||||
AND collected_at >= ? AND collected_at < ?
|
||||
AND metric_name = ANY(?)
|
||||
GROUP BY bucket, metric_name
|
||||
ORDER BY bucket
|
||||
""";
|
||||
|
||||
String[] namesArray = metricNames.stream().map(String::trim).toArray(String[]::new);
|
||||
jdbc.query(sql, rs -> {
|
||||
String metricName = rs.getString("metric_name");
|
||||
Instant bucket = rs.getTimestamp("bucket").toInstant();
|
||||
double value = rs.getDouble("avg_value");
|
||||
result.computeIfAbsent(metricName, k -> new ArrayList<>())
|
||||
.add(new MetricBucket(bucket, value));
|
||||
}, intervalStr, agentId, Timestamp.from(from), Timestamp.from(to), namesArray);
|
||||
Map<String, List<MetricBucket>> result = raw.entrySet().stream()
|
||||
.collect(Collectors.toMap(
|
||||
Map.Entry::getKey,
|
||||
e -> e.getValue().stream()
|
||||
.map(b -> new MetricBucket(b.time(), b.value()))
|
||||
.toList(),
|
||||
(a, b) -> a,
|
||||
LinkedHashMap::new));
|
||||
|
||||
return new AgentMetricsResponse(result);
|
||||
}
|
||||
|
||||
@@ -7,7 +7,9 @@ import com.cameleer3.server.app.dto.AgentRefreshResponse;
|
||||
import com.cameleer3.server.app.dto.AgentRegistrationRequest;
|
||||
import com.cameleer3.server.app.dto.AgentRegistrationResponse;
|
||||
import com.cameleer3.server.app.dto.ErrorResponse;
|
||||
import com.cameleer3.common.model.HeartbeatRequest;
|
||||
import com.cameleer3.server.app.security.BootstrapTokenValidator;
|
||||
import com.cameleer3.server.app.security.JwtAuthenticationFilter;
|
||||
import com.cameleer3.server.core.admin.AuditCategory;
|
||||
import com.cameleer3.server.core.admin.AuditResult;
|
||||
import com.cameleer3.server.core.admin.AuditService;
|
||||
@@ -15,6 +17,7 @@ import com.cameleer3.server.core.agent.AgentEventService;
|
||||
import com.cameleer3.server.core.agent.AgentInfo;
|
||||
import com.cameleer3.server.core.agent.AgentRegistryService;
|
||||
import com.cameleer3.server.core.agent.AgentState;
|
||||
import com.cameleer3.server.core.agent.RouteStateRegistry;
|
||||
import com.cameleer3.server.core.security.Ed25519SigningService;
|
||||
import com.cameleer3.server.core.security.InvalidTokenException;
|
||||
import com.cameleer3.server.core.security.JwtService;
|
||||
@@ -25,6 +28,7 @@ import io.swagger.v3.oas.annotations.responses.ApiResponse;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
import jakarta.servlet.http.HttpServletRequest;
|
||||
import org.slf4j.Logger;
|
||||
import org.springframework.web.servlet.support.ServletUriComponentsBuilder;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
@@ -63,6 +67,7 @@ public class AgentRegistrationController {
|
||||
private final AgentEventService agentEventService;
|
||||
private final AuditService auditService;
|
||||
private final JdbcTemplate jdbc;
|
||||
private final RouteStateRegistry routeStateRegistry;
|
||||
|
||||
public AgentRegistrationController(AgentRegistryService registryService,
|
||||
AgentRegistryConfig config,
|
||||
@@ -71,7 +76,8 @@ public class AgentRegistrationController {
|
||||
Ed25519SigningService ed25519SigningService,
|
||||
AgentEventService agentEventService,
|
||||
AuditService auditService,
|
||||
JdbcTemplate jdbc) {
|
||||
@org.springframework.beans.factory.annotation.Qualifier("clickHouseJdbcTemplate") JdbcTemplate jdbc,
|
||||
RouteStateRegistry routeStateRegistry) {
|
||||
this.registryService = registryService;
|
||||
this.config = config;
|
||||
this.bootstrapTokenValidator = bootstrapTokenValidator;
|
||||
@@ -80,6 +86,7 @@ public class AgentRegistrationController {
|
||||
this.agentEventService = agentEventService;
|
||||
this.auditService = auditService;
|
||||
this.jdbc = jdbc;
|
||||
this.routeStateRegistry = routeStateRegistry;
|
||||
}
|
||||
|
||||
@PostMapping("/register")
|
||||
@@ -103,34 +110,41 @@ public class AgentRegistrationController {
|
||||
return ResponseEntity.status(401).build();
|
||||
}
|
||||
|
||||
if (request.agentId() == null || request.agentId().isBlank()
|
||||
|| request.name() == null || request.name().isBlank()) {
|
||||
if (request.instanceId() == null || request.instanceId().isBlank()
|
||||
|| request.displayName() == null || request.displayName().isBlank()) {
|
||||
return ResponseEntity.badRequest().build();
|
||||
}
|
||||
|
||||
String application = request.application() != null ? request.application() : "default";
|
||||
String application = request.applicationId() != null ? request.applicationId() : "default";
|
||||
String environmentId = request.environmentId() != null ? request.environmentId() : "default";
|
||||
List<String> routeIds = request.routeIds() != null ? request.routeIds() : List.of();
|
||||
var capabilities = request.capabilities() != null ? request.capabilities() : Collections.<String, Object>emptyMap();
|
||||
|
||||
AgentInfo agent = registryService.register(
|
||||
request.agentId(), request.name(), application, request.version(), routeIds, capabilities);
|
||||
log.info("Agent registered: {} (name={}, application={})", request.agentId(), request.name(), application);
|
||||
request.instanceId(), request.displayName(), application, environmentId,
|
||||
request.version(), routeIds, capabilities);
|
||||
log.info("Agent registered: {} (name={}, application={})", request.instanceId(), request.displayName(), application);
|
||||
|
||||
agentEventService.recordEvent(request.agentId(), application, "REGISTERED",
|
||||
"Agent registered: " + request.name());
|
||||
agentEventService.recordEvent(request.instanceId(), application, "REGISTERED",
|
||||
"Agent registered: " + request.displayName());
|
||||
|
||||
auditService.log(request.agentId(), "agent_register", AuditCategory.AGENT, request.agentId(),
|
||||
Map.of("application", application, "name", request.name()),
|
||||
auditService.log(request.instanceId(), "agent_register", AuditCategory.AGENT, request.instanceId(),
|
||||
Map.of("application", application, "name", request.displayName()),
|
||||
AuditResult.SUCCESS, httpRequest);
|
||||
|
||||
// Issue JWT tokens with AGENT role
|
||||
// Issue JWT tokens with AGENT role + environment
|
||||
List<String> roles = List.of("AGENT");
|
||||
String accessToken = jwtService.createAccessToken(request.agentId(), application, roles);
|
||||
String refreshToken = jwtService.createRefreshToken(request.agentId(), application, roles);
|
||||
String accessToken = jwtService.createAccessToken(request.instanceId(), application, environmentId, roles);
|
||||
String refreshToken = jwtService.createRefreshToken(request.instanceId(), application, environmentId, roles);
|
||||
|
||||
String sseEndpoint = ServletUriComponentsBuilder.fromCurrentContextPath()
|
||||
.path("/api/v1/agents/{id}/events")
|
||||
.buildAndExpand(agent.instanceId())
|
||||
.toUriString();
|
||||
|
||||
return ResponseEntity.ok(new AgentRegistrationResponse(
|
||||
agent.id(),
|
||||
"/api/v1/agents/" + agent.id() + "/events",
|
||||
agent.instanceId(),
|
||||
sseEndpoint,
|
||||
config.getHeartbeatIntervalMs(),
|
||||
ed25519SigningService.getPublicKeyBase64(),
|
||||
accessToken,
|
||||
@@ -168,17 +182,21 @@ public class AgentRegistrationController {
|
||||
return ResponseEntity.status(401).build();
|
||||
}
|
||||
|
||||
// Verify agent exists
|
||||
AgentInfo agent = registryService.findById(agentId);
|
||||
if (agent == null) {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
|
||||
// Preserve roles from refresh token
|
||||
// Preserve roles and application from refresh token
|
||||
List<String> roles = result.roles().isEmpty()
|
||||
? List.of("AGENT") : result.roles();
|
||||
String newAccessToken = jwtService.createAccessToken(agentId, agent.application(), roles);
|
||||
String newRefreshToken = jwtService.createRefreshToken(agentId, agent.application(), roles);
|
||||
String application = result.application() != null ? result.application() : "default";
|
||||
|
||||
// Try to get application + environment from registry (agent may not be registered after server restart)
|
||||
String environment = result.environment() != null ? result.environment() : "default";
|
||||
AgentInfo agent = registryService.findById(agentId);
|
||||
if (agent != null) {
|
||||
application = agent.applicationId();
|
||||
environment = agent.environmentId();
|
||||
}
|
||||
|
||||
String newAccessToken = jwtService.createAccessToken(agentId, application, environment, roles);
|
||||
String newRefreshToken = jwtService.createRefreshToken(agentId, application, environment, roles);
|
||||
|
||||
auditService.log(agentId, "agent_token_refresh", AuditCategory.AUTH, agentId,
|
||||
null, AuditResult.SUCCESS, httpRequest);
|
||||
@@ -188,14 +206,72 @@ public class AgentRegistrationController {
|
||||
|
||||
@PostMapping("/{id}/heartbeat")
|
||||
@Operation(summary = "Agent heartbeat ping",
|
||||
description = "Updates the agent's last heartbeat timestamp")
|
||||
description = "Updates the agent's last heartbeat timestamp. Auto-registers the agent if not in registry (e.g. after server restart).")
|
||||
@ApiResponse(responseCode = "200", description = "Heartbeat accepted")
|
||||
@ApiResponse(responseCode = "404", description = "Agent not registered")
|
||||
public ResponseEntity<Void> heartbeat(@PathVariable String id) {
|
||||
boolean found = registryService.heartbeat(id);
|
||||
public ResponseEntity<Void> heartbeat(@PathVariable String id,
|
||||
@RequestBody(required = false) HeartbeatRequest request,
|
||||
HttpServletRequest httpRequest) {
|
||||
Map<String, Object> capabilities = request != null ? request.getCapabilities() : null;
|
||||
String heartbeatEnv = request != null ? request.getEnvironmentId() : null;
|
||||
boolean found = registryService.heartbeat(id, capabilities);
|
||||
if (!found) {
|
||||
// Auto-heal: re-register agent from heartbeat body + JWT claims after server restart
|
||||
var jwtResult = (JwtService.JwtValidationResult) httpRequest.getAttribute(
|
||||
JwtAuthenticationFilter.JWT_RESULT_ATTR);
|
||||
if (jwtResult != null) {
|
||||
String application = jwtResult.application() != null ? jwtResult.application() : "default";
|
||||
// Prefer environment from heartbeat body (most current), fall back to JWT claim
|
||||
String env = heartbeatEnv != null ? heartbeatEnv
|
||||
: jwtResult.environment() != null ? jwtResult.environment() : "default";
|
||||
Map<String, Object> caps = capabilities != null ? capabilities : Map.of();
|
||||
registryService.register(id, id, application, env, "unknown",
|
||||
List.of(), caps);
|
||||
registryService.heartbeat(id);
|
||||
log.info("Auto-registered agent {} (app={}, env={}) from heartbeat after server restart", id, application, env);
|
||||
} else {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
}
|
||||
|
||||
if (request != null && request.getRouteStates() != null && !request.getRouteStates().isEmpty()) {
|
||||
AgentInfo agent = registryService.findById(id);
|
||||
if (agent != null) {
|
||||
for (var entry : request.getRouteStates().entrySet()) {
|
||||
RouteStateRegistry.RouteState state = parseRouteState(entry.getValue());
|
||||
if (state != null) {
|
||||
routeStateRegistry.setState(agent.applicationId(), entry.getKey(), state);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ResponseEntity.ok().build();
|
||||
}
|
||||
|
||||
private RouteStateRegistry.RouteState parseRouteState(String state) {
|
||||
if (state == null) return null;
|
||||
return switch (state) {
|
||||
case "Started" -> RouteStateRegistry.RouteState.STARTED;
|
||||
case "Stopped" -> RouteStateRegistry.RouteState.STOPPED;
|
||||
case "Suspended" -> RouteStateRegistry.RouteState.SUSPENDED;
|
||||
default -> null;
|
||||
};
|
||||
}
|
||||
|
||||
@PostMapping("/{id}/deregister")
|
||||
@Operation(summary = "Deregister agent",
|
||||
description = "Removes the agent from the registry. Called by agents during graceful shutdown.")
|
||||
@ApiResponse(responseCode = "200", description = "Agent deregistered")
|
||||
@ApiResponse(responseCode = "404", description = "Agent not registered")
|
||||
public ResponseEntity<Void> deregister(@PathVariable String id, HttpServletRequest httpRequest) {
|
||||
AgentInfo agent = registryService.findById(id);
|
||||
if (agent == null) {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
String applicationId = agent.applicationId();
|
||||
registryService.deregister(id);
|
||||
agentEventService.recordEvent(id, applicationId, "DEREGISTERED", "Agent deregistered");
|
||||
auditService.log(id, "agent_deregister", AuditCategory.AGENT, id, null, AuditResult.SUCCESS, httpRequest);
|
||||
return ResponseEntity.ok().build();
|
||||
}
|
||||
|
||||
@@ -207,7 +283,8 @@ public class AgentRegistrationController {
|
||||
content = @Content(schema = @Schema(implementation = ErrorResponse.class)))
|
||||
public ResponseEntity<List<AgentInstanceResponse>> listAgents(
|
||||
@RequestParam(required = false) String status,
|
||||
@RequestParam(required = false) String application) {
|
||||
@RequestParam(required = false) String application,
|
||||
@RequestParam(required = false) String environment) {
|
||||
List<AgentInfo> agents;
|
||||
|
||||
if (status != null) {
|
||||
@@ -224,7 +301,14 @@ public class AgentRegistrationController {
|
||||
// Apply application filter if specified
|
||||
if (application != null && !application.isBlank()) {
|
||||
agents = agents.stream()
|
||||
.filter(a -> application.equals(a.application()))
|
||||
.filter(a -> application.equals(a.applicationId()))
|
||||
.toList();
|
||||
}
|
||||
|
||||
// Apply environment filter if specified
|
||||
if (environment != null && !environment.isBlank()) {
|
||||
agents = agents.stream()
|
||||
.filter(a -> environment.equals(a.environmentId()))
|
||||
.toList();
|
||||
}
|
||||
|
||||
@@ -235,10 +319,10 @@ public class AgentRegistrationController {
|
||||
List<AgentInstanceResponse> response = finalAgents.stream()
|
||||
.map(a -> {
|
||||
AgentInstanceResponse dto = AgentInstanceResponse.from(a);
|
||||
double[] m = agentMetrics.get(a.application());
|
||||
double[] m = agentMetrics.get(a.applicationId());
|
||||
if (m != null) {
|
||||
long appAgentCount = finalAgents.stream()
|
||||
.filter(ag -> ag.application().equals(a.application())).count();
|
||||
.filter(ag -> ag.applicationId().equals(a.applicationId())).count();
|
||||
double agentTps = appAgentCount > 0 ? m[0] / appAgentCount : 0;
|
||||
double errorRate = m[1];
|
||||
int activeRoutes = (int) m[2];
|
||||
@@ -255,25 +339,33 @@ public class AgentRegistrationController {
|
||||
Instant now = Instant.now();
|
||||
Instant from1m = now.minus(1, ChronoUnit.MINUTES);
|
||||
try {
|
||||
// Literal SQL — ClickHouse JDBC driver wraps prepared statements in sub-queries
|
||||
// that strip AggregateFunction column types, breaking -Merge combinators
|
||||
jdbc.query(
|
||||
"SELECT application_name, " +
|
||||
"SUM(total_count) AS total, " +
|
||||
"SUM(failed_count) AS failed, " +
|
||||
"SELECT application_id, " +
|
||||
"countMerge(total_count) AS total, " +
|
||||
"countIfMerge(failed_count) AS failed, " +
|
||||
"COUNT(DISTINCT route_id) AS active_routes " +
|
||||
"FROM stats_1m_route WHERE bucket >= ? AND bucket < ? " +
|
||||
"GROUP BY application_name",
|
||||
"FROM stats_1m_route WHERE bucket >= " + lit(from1m) + " AND bucket < " + lit(now) +
|
||||
" GROUP BY application_id",
|
||||
rs -> {
|
||||
long total = rs.getLong("total");
|
||||
long failed = rs.getLong("failed");
|
||||
double tps = total / 60.0;
|
||||
double errorRate = total > 0 ? (double) failed / total : 0.0;
|
||||
int activeRoutes = rs.getInt("active_routes");
|
||||
result.put(rs.getString("application_name"), new double[]{tps, errorRate, activeRoutes});
|
||||
},
|
||||
Timestamp.from(from1m), Timestamp.from(now));
|
||||
result.put(rs.getString("application_id"), new double[]{tps, errorRate, activeRoutes});
|
||||
});
|
||||
} catch (Exception e) {
|
||||
log.debug("Could not query agent metrics: {}", e.getMessage());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/** Format an Instant as a ClickHouse DateTime literal. */
|
||||
private static String lit(Instant instant) {
|
||||
return "'" + java.time.format.DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss")
|
||||
.withZone(java.time.ZoneOffset.UTC)
|
||||
.format(instant.truncatedTo(ChronoUnit.SECONDS)) + "'";
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,12 +1,15 @@
|
||||
package com.cameleer3.server.app.controller;
|
||||
|
||||
import com.cameleer3.server.app.agent.SseConnectionManager;
|
||||
import com.cameleer3.server.app.security.JwtAuthenticationFilter;
|
||||
import com.cameleer3.server.core.agent.AgentInfo;
|
||||
import com.cameleer3.server.core.agent.AgentRegistryService;
|
||||
import com.cameleer3.server.core.security.JwtService;
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.Parameter;
|
||||
import io.swagger.v3.oas.annotations.responses.ApiResponse;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
import jakarta.servlet.http.HttpServletRequest;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.http.HttpStatus;
|
||||
@@ -19,6 +22,9 @@ import org.springframework.web.bind.annotation.RestController;
|
||||
import org.springframework.web.server.ResponseStatusException;
|
||||
import org.springframework.web.servlet.mvc.method.annotation.SseEmitter;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* SSE endpoint for real-time event streaming to agents.
|
||||
* <p>
|
||||
@@ -47,15 +53,26 @@ public class AgentSseController {
|
||||
+ "Commands (config-update, deep-trace, replay) are pushed as events. "
|
||||
+ "Ping keepalive comments sent every 15 seconds.")
|
||||
@ApiResponse(responseCode = "200", description = "SSE stream opened")
|
||||
@ApiResponse(responseCode = "404", description = "Agent not registered")
|
||||
@ApiResponse(responseCode = "404", description = "Agent not registered and cannot be auto-registered")
|
||||
public SseEmitter events(
|
||||
@PathVariable String id,
|
||||
@Parameter(description = "Last received event ID (no replay, acknowledged only)")
|
||||
@RequestHeader(value = "Last-Event-ID", required = false) String lastEventId) {
|
||||
@RequestHeader(value = "Last-Event-ID", required = false) String lastEventId,
|
||||
HttpServletRequest httpRequest) {
|
||||
|
||||
AgentInfo agent = registryService.findById(id);
|
||||
if (agent == null) {
|
||||
throw new ResponseStatusException(HttpStatus.NOT_FOUND, "Agent not found: " + id);
|
||||
// Auto-heal: re-register agent from JWT claims after server restart
|
||||
var jwtResult = (JwtService.JwtValidationResult) httpRequest.getAttribute(
|
||||
JwtAuthenticationFilter.JWT_RESULT_ATTR);
|
||||
if (jwtResult != null) {
|
||||
String application = jwtResult.application() != null ? jwtResult.application() : "default";
|
||||
String env = jwtResult.environment() != null ? jwtResult.environment() : "default";
|
||||
registryService.register(id, id, application, env, "unknown", List.of(), Map.of());
|
||||
log.info("Auto-registered agent {} (app={}, env={}) from SSE connect after server restart", id, application, env);
|
||||
} else {
|
||||
throw new ResponseStatusException(HttpStatus.NOT_FOUND, "Agent not found: " + id);
|
||||
}
|
||||
}
|
||||
|
||||
if (lastEventId != null) {
|
||||
|
||||
@@ -0,0 +1,136 @@
|
||||
package com.cameleer3.server.app.controller;
|
||||
|
||||
import com.cameleer3.server.core.runtime.App;
|
||||
import com.cameleer3.server.core.runtime.AppService;
|
||||
import com.cameleer3.server.core.runtime.AppVersion;
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.responses.ApiResponse;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
import org.springframework.http.MediaType;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.security.access.prepost.PreAuthorize;
|
||||
import org.springframework.web.bind.annotation.DeleteMapping;
|
||||
import org.springframework.web.bind.annotation.GetMapping;
|
||||
import org.springframework.web.bind.annotation.PathVariable;
|
||||
import org.springframework.web.bind.annotation.PostMapping;
|
||||
import org.springframework.web.bind.annotation.PutMapping;
|
||||
import org.springframework.web.bind.annotation.RequestBody;
|
||||
import org.springframework.web.bind.annotation.RequestMapping;
|
||||
import org.springframework.web.bind.annotation.RequestParam;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
import org.springframework.web.multipart.MultipartFile;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
|
||||
/**
|
||||
* App CRUD and JAR upload endpoints.
|
||||
* All app-scoped endpoints accept the app slug (not UUID) as path variable.
|
||||
* Protected by {@code ROLE_OPERATOR} or {@code ROLE_ADMIN}.
|
||||
*/
|
||||
@RestController
|
||||
@RequestMapping("/api/v1/apps")
|
||||
@Tag(name = "App Management", description = "Application lifecycle and JAR uploads")
|
||||
@PreAuthorize("hasAnyRole('OPERATOR', 'ADMIN')")
|
||||
public class AppController {
|
||||
|
||||
private final AppService appService;
|
||||
|
||||
public AppController(AppService appService) {
|
||||
this.appService = appService;
|
||||
}
|
||||
|
||||
@GetMapping
|
||||
@Operation(summary = "List apps by environment")
|
||||
@ApiResponse(responseCode = "200", description = "App list returned")
|
||||
public ResponseEntity<List<App>> listApps(@RequestParam(required = false) UUID environmentId) {
|
||||
if (environmentId != null) {
|
||||
return ResponseEntity.ok(appService.listByEnvironment(environmentId));
|
||||
}
|
||||
return ResponseEntity.ok(appService.listAll());
|
||||
}
|
||||
|
||||
@GetMapping("/{appSlug}")
|
||||
@Operation(summary = "Get app by slug")
|
||||
@ApiResponse(responseCode = "200", description = "App found")
|
||||
@ApiResponse(responseCode = "404", description = "App not found")
|
||||
public ResponseEntity<App> getApp(@PathVariable String appSlug) {
|
||||
try {
|
||||
return ResponseEntity.ok(appService.getBySlug(appSlug));
|
||||
} catch (IllegalArgumentException e) {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
}
|
||||
|
||||
@PostMapping
|
||||
@Operation(summary = "Create a new app")
|
||||
@ApiResponse(responseCode = "201", description = "App created")
|
||||
@ApiResponse(responseCode = "400", description = "Slug already exists in environment")
|
||||
public ResponseEntity<App> createApp(@RequestBody CreateAppRequest request) {
|
||||
try {
|
||||
UUID id = appService.createApp(request.environmentId(), request.slug(), request.displayName());
|
||||
return ResponseEntity.status(201).body(appService.getById(id));
|
||||
} catch (IllegalArgumentException e) {
|
||||
return ResponseEntity.badRequest().build();
|
||||
}
|
||||
}
|
||||
|
||||
@GetMapping("/{appSlug}/versions")
|
||||
@Operation(summary = "List app versions")
|
||||
@ApiResponse(responseCode = "200", description = "Version list returned")
|
||||
public ResponseEntity<List<AppVersion>> listVersions(@PathVariable String appSlug) {
|
||||
try {
|
||||
App app = appService.getBySlug(appSlug);
|
||||
return ResponseEntity.ok(appService.listVersions(app.id()));
|
||||
} catch (IllegalArgumentException e) {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
}
|
||||
|
||||
@PostMapping(value = "/{appSlug}/versions", consumes = MediaType.MULTIPART_FORM_DATA_VALUE)
|
||||
@Operation(summary = "Upload a JAR for a new app version")
|
||||
@ApiResponse(responseCode = "201", description = "JAR uploaded and version created")
|
||||
@ApiResponse(responseCode = "404", description = "App not found")
|
||||
public ResponseEntity<AppVersion> uploadJar(@PathVariable String appSlug,
|
||||
@RequestParam("file") MultipartFile file) throws IOException {
|
||||
try {
|
||||
App app = appService.getBySlug(appSlug);
|
||||
AppVersion version = appService.uploadJar(app.id(), file.getOriginalFilename(), file.getInputStream(), file.getSize());
|
||||
return ResponseEntity.status(201).body(version);
|
||||
} catch (IllegalArgumentException e) {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
}
|
||||
|
||||
@DeleteMapping("/{appSlug}")
|
||||
@Operation(summary = "Delete an app")
|
||||
@ApiResponse(responseCode = "204", description = "App deleted")
|
||||
public ResponseEntity<Void> deleteApp(@PathVariable String appSlug) {
|
||||
try {
|
||||
App app = appService.getBySlug(appSlug);
|
||||
appService.deleteApp(app.id());
|
||||
return ResponseEntity.noContent().build();
|
||||
} catch (IllegalArgumentException e) {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
}
|
||||
|
||||
@PutMapping("/{appSlug}/container-config")
|
||||
@Operation(summary = "Update container config for an app")
|
||||
@ApiResponse(responseCode = "200", description = "Container config updated")
|
||||
@ApiResponse(responseCode = "404", description = "App not found")
|
||||
public ResponseEntity<App> updateContainerConfig(@PathVariable String appSlug,
|
||||
@RequestBody Map<String, Object> containerConfig) {
|
||||
try {
|
||||
App app = appService.getBySlug(appSlug);
|
||||
appService.updateContainerConfig(app.id(), containerConfig);
|
||||
return ResponseEntity.ok(appService.getById(app.id()));
|
||||
} catch (IllegalArgumentException e) {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
}
|
||||
|
||||
public record CreateAppRequest(UUID environmentId, String slug, String displayName) {}
|
||||
}
|
||||
@@ -48,7 +48,7 @@ public class AppSettingsController {
|
||||
@GetMapping("/{appId}")
|
||||
@Operation(summary = "Get settings for a specific application (returns defaults if not configured)")
|
||||
public ResponseEntity<AppSettings> getByAppId(@PathVariable String appId) {
|
||||
AppSettings settings = repository.findByAppId(appId).orElse(AppSettings.defaults(appId));
|
||||
AppSettings settings = repository.findByApplicationId(appId).orElse(AppSettings.defaults(appId));
|
||||
return ResponseEntity.ok(settings);
|
||||
}
|
||||
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
package com.cameleer3.server.app.controller;
|
||||
|
||||
import com.cameleer3.common.model.ApplicationConfig;
|
||||
import com.cameleer3.server.app.dto.CommandGroupResponse;
|
||||
import com.cameleer3.server.app.dto.ConfigUpdateResponse;
|
||||
import com.cameleer3.server.app.dto.TestExpressionRequest;
|
||||
import com.cameleer3.server.app.dto.TestExpressionResponse;
|
||||
import com.cameleer3.server.app.storage.PostgresApplicationConfigRepository;
|
||||
import com.cameleer3.server.core.admin.AuditCategory;
|
||||
import com.cameleer3.server.core.admin.AuditResult;
|
||||
import com.cameleer3.server.core.admin.AuditService;
|
||||
import com.cameleer3.server.core.agent.AgentCommand;
|
||||
import com.cameleer3.server.core.agent.AgentInfo;
|
||||
import com.cameleer3.server.core.agent.AgentRegistryService;
|
||||
import com.cameleer3.server.core.agent.AgentState;
|
||||
@@ -27,6 +28,7 @@ import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.security.core.Authentication;
|
||||
import org.springframework.web.bind.annotation.*;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
@@ -88,23 +90,26 @@ public class ApplicationConfigController {
|
||||
@Operation(summary = "Update application config",
|
||||
description = "Saves config and pushes CONFIG_UPDATE to all LIVE agents of this application")
|
||||
@ApiResponse(responseCode = "200", description = "Config saved and pushed")
|
||||
public ResponseEntity<ApplicationConfig> updateConfig(@PathVariable String application,
|
||||
@RequestBody ApplicationConfig config,
|
||||
Authentication auth,
|
||||
HttpServletRequest httpRequest) {
|
||||
public ResponseEntity<ConfigUpdateResponse> updateConfig(@PathVariable String application,
|
||||
@RequestParam(required = false) String environment,
|
||||
@RequestBody ApplicationConfig config,
|
||||
Authentication auth,
|
||||
HttpServletRequest httpRequest) {
|
||||
String updatedBy = auth != null ? auth.getName() : "system";
|
||||
|
||||
config.setApplication(application);
|
||||
ApplicationConfig saved = configRepository.save(application, config, updatedBy);
|
||||
|
||||
int pushed = pushConfigToAgents(application, saved);
|
||||
log.info("Config v{} saved for '{}', pushed to {} agent(s)", saved.getVersion(), application, pushed);
|
||||
CommandGroupResponse pushResult = pushConfigToAgents(application, environment, saved);
|
||||
log.info("Config v{} saved for '{}', pushed to {} agent(s), {} responded",
|
||||
saved.getVersion(), application, pushResult.total(), pushResult.responded());
|
||||
|
||||
auditService.log("update_app_config", AuditCategory.CONFIG, application,
|
||||
Map.of("version", saved.getVersion(), "agentsPushed", pushed),
|
||||
Map.of("version", saved.getVersion(), "agentsPushed", pushResult.total(),
|
||||
"responded", pushResult.responded(), "timedOut", pushResult.timedOut().size()),
|
||||
AuditResult.SUCCESS, httpRequest);
|
||||
|
||||
return ResponseEntity.ok(saved);
|
||||
return ResponseEntity.ok(new ConfigUpdateResponse(saved, pushResult));
|
||||
}
|
||||
|
||||
@GetMapping("/{application}/processor-routes")
|
||||
@@ -122,13 +127,16 @@ public class ApplicationConfigController {
|
||||
@ApiResponse(responseCode = "504", description = "Agent did not respond in time")
|
||||
public ResponseEntity<TestExpressionResponse> testExpression(
|
||||
@PathVariable String application,
|
||||
@RequestParam(required = false) String environment,
|
||||
@RequestBody TestExpressionRequest request) {
|
||||
// Find a LIVE agent for this application
|
||||
AgentInfo agent = registryService.findAll().stream()
|
||||
.filter(a -> application.equals(a.application()))
|
||||
.filter(a -> a.state() == AgentState.LIVE)
|
||||
.findFirst()
|
||||
.orElse(null);
|
||||
// Find a LIVE agent for this application, optionally filtered by environment
|
||||
var candidates = registryService.findAll().stream()
|
||||
.filter(a -> application.equals(a.applicationId()))
|
||||
.filter(a -> a.state() == AgentState.LIVE);
|
||||
if (environment != null) {
|
||||
candidates = candidates.filter(a -> environment.equals(a.environmentId()));
|
||||
}
|
||||
AgentInfo agent = candidates.findFirst().orElse(null);
|
||||
|
||||
if (agent == null) {
|
||||
return ResponseEntity.status(HttpStatus.NOT_FOUND)
|
||||
@@ -152,7 +160,7 @@ public class ApplicationConfigController {
|
||||
|
||||
// Send command and await reply
|
||||
CompletableFuture<CommandReply> future = registryService.addCommandWithReply(
|
||||
agent.id(), CommandType.TEST_EXPRESSION, payloadJson);
|
||||
agent.instanceId(), CommandType.TEST_EXPRESSION, payloadJson);
|
||||
|
||||
try {
|
||||
CommandReply reply = future.orTimeout(5, TimeUnit.SECONDS).join();
|
||||
@@ -166,30 +174,56 @@ public class ApplicationConfigController {
|
||||
return ResponseEntity.status(HttpStatus.GATEWAY_TIMEOUT)
|
||||
.body(new TestExpressionResponse(null, "Agent did not respond within 5 seconds"));
|
||||
}
|
||||
log.error("Error awaiting test-expression reply from agent {}", agent.id(), e);
|
||||
log.error("Error awaiting test-expression reply from agent {}", agent.instanceId(), e);
|
||||
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR)
|
||||
.body(new TestExpressionResponse(null, "Internal error: " + e.getCause().getMessage()));
|
||||
}
|
||||
}
|
||||
|
||||
private int pushConfigToAgents(String application, ApplicationConfig config) {
|
||||
private CommandGroupResponse pushConfigToAgents(String application, String environment, ApplicationConfig config) {
|
||||
String payloadJson;
|
||||
try {
|
||||
payloadJson = objectMapper.writeValueAsString(config);
|
||||
} catch (JsonProcessingException e) {
|
||||
log.error("Failed to serialize config for push", e);
|
||||
return 0;
|
||||
return new CommandGroupResponse(false, 0, 0, List.of(), List.of());
|
||||
}
|
||||
|
||||
List<AgentInfo> agents = registryService.findAll().stream()
|
||||
.filter(a -> a.state() == AgentState.LIVE)
|
||||
.filter(a -> application.equals(a.application()))
|
||||
.toList();
|
||||
Map<String, CompletableFuture<CommandReply>> futures =
|
||||
registryService.addGroupCommandWithReplies(application, environment, CommandType.CONFIG_UPDATE, payloadJson);
|
||||
|
||||
for (AgentInfo agent : agents) {
|
||||
registryService.addCommand(agent.id(), CommandType.CONFIG_UPDATE, payloadJson);
|
||||
if (futures.isEmpty()) {
|
||||
return new CommandGroupResponse(true, 0, 0, List.of(), List.of());
|
||||
}
|
||||
return agents.size();
|
||||
|
||||
// Wait with shared 10-second deadline
|
||||
long deadline = System.currentTimeMillis() + 10_000;
|
||||
List<CommandGroupResponse.AgentResponse> responses = new ArrayList<>();
|
||||
List<String> timedOut = new ArrayList<>();
|
||||
|
||||
for (var entry : futures.entrySet()) {
|
||||
long remaining = deadline - System.currentTimeMillis();
|
||||
if (remaining <= 0) {
|
||||
timedOut.add(entry.getKey());
|
||||
entry.getValue().cancel(false);
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
CommandReply reply = entry.getValue().get(remaining, TimeUnit.MILLISECONDS);
|
||||
responses.add(new CommandGroupResponse.AgentResponse(
|
||||
entry.getKey(), reply.status(), reply.message()));
|
||||
} catch (TimeoutException e) {
|
||||
timedOut.add(entry.getKey());
|
||||
entry.getValue().cancel(false);
|
||||
} catch (Exception e) {
|
||||
responses.add(new CommandGroupResponse.AgentResponse(
|
||||
entry.getKey(), "ERROR", e.getMessage()));
|
||||
}
|
||||
}
|
||||
|
||||
boolean allSuccess = timedOut.isEmpty() &&
|
||||
responses.stream().allMatch(r -> "SUCCESS".equals(r.status()));
|
||||
return new CommandGroupResponse(allSuccess, futures.size(), responses.size(), responses, timedOut);
|
||||
}
|
||||
|
||||
private static ApplicationConfig defaultConfig(String application) {
|
||||
|
||||
@@ -0,0 +1,296 @@
|
||||
package com.cameleer3.server.app.controller;
|
||||
|
||||
import com.cameleer3.server.app.dto.AgentSummary;
|
||||
import com.cameleer3.server.app.dto.CatalogApp;
|
||||
import com.cameleer3.server.app.dto.RouteSummary;
|
||||
import com.cameleer3.common.graph.RouteGraph;
|
||||
import com.cameleer3.server.core.agent.AgentInfo;
|
||||
import com.cameleer3.server.core.agent.AgentRegistryService;
|
||||
import com.cameleer3.server.core.agent.AgentState;
|
||||
import com.cameleer3.server.core.agent.RouteStateRegistry;
|
||||
import com.cameleer3.server.core.runtime.*;
|
||||
import com.cameleer3.server.core.storage.DiagramStore;
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.responses.ApiResponse;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
import org.springframework.web.bind.annotation.GetMapping;
|
||||
import org.springframework.web.bind.annotation.RequestMapping;
|
||||
import org.springframework.web.bind.annotation.RequestParam;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
|
||||
import java.sql.Timestamp;
|
||||
import java.time.Instant;
|
||||
import java.time.temporal.ChronoUnit;
|
||||
import java.util.*;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* Unified catalog endpoint that merges App records (PostgreSQL) with live agent data
|
||||
* and ClickHouse stats. Replaces the separate RouteCatalogController.
|
||||
*/
|
||||
@RestController
|
||||
@RequestMapping("/api/v1/catalog")
|
||||
@Tag(name = "Catalog", description = "Unified application catalog")
|
||||
public class CatalogController {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(CatalogController.class);
|
||||
|
||||
private final AgentRegistryService registryService;
|
||||
private final DiagramStore diagramStore;
|
||||
private final JdbcTemplate jdbc;
|
||||
private final RouteStateRegistry routeStateRegistry;
|
||||
private final AppService appService;
|
||||
private final EnvironmentService envService;
|
||||
private final DeploymentRepository deploymentRepo;
|
||||
|
||||
public CatalogController(AgentRegistryService registryService,
|
||||
DiagramStore diagramStore,
|
||||
@org.springframework.beans.factory.annotation.Qualifier("clickHouseJdbcTemplate") JdbcTemplate jdbc,
|
||||
RouteStateRegistry routeStateRegistry,
|
||||
AppService appService,
|
||||
EnvironmentService envService,
|
||||
DeploymentRepository deploymentRepo) {
|
||||
this.registryService = registryService;
|
||||
this.diagramStore = diagramStore;
|
||||
this.jdbc = jdbc;
|
||||
this.routeStateRegistry = routeStateRegistry;
|
||||
this.appService = appService;
|
||||
this.envService = envService;
|
||||
this.deploymentRepo = deploymentRepo;
|
||||
}
|
||||
|
||||
@GetMapping
|
||||
@Operation(summary = "Get unified catalog",
|
||||
description = "Returns all applications (managed + unmanaged) with live agent data, routes, and deployment status")
|
||||
@ApiResponse(responseCode = "200", description = "Catalog returned")
|
||||
public ResponseEntity<List<CatalogApp>> getCatalog(
|
||||
@RequestParam(required = false) String environment,
|
||||
@RequestParam(required = false) String from,
|
||||
@RequestParam(required = false) String to) {
|
||||
|
||||
// 1. Resolve environment
|
||||
Environment env = null;
|
||||
if (environment != null && !environment.isBlank()) {
|
||||
try {
|
||||
env = envService.getBySlug(environment);
|
||||
} catch (IllegalArgumentException e) {
|
||||
return ResponseEntity.ok(List.of());
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Get managed apps from PostgreSQL
|
||||
List<App> managedApps = env != null
|
||||
? appService.listByEnvironment(env.id())
|
||||
: appService.listAll();
|
||||
Map<String, App> appsBySlug = managedApps.stream()
|
||||
.collect(Collectors.toMap(App::slug, a -> a, (a, b) -> a));
|
||||
|
||||
// 3. Get active deployments for managed apps
|
||||
Map<UUID, Deployment> activeDeployments = new HashMap<>();
|
||||
for (App app : managedApps) {
|
||||
UUID envId = env != null ? env.id() : app.environmentId();
|
||||
deploymentRepo.findActiveByAppIdAndEnvironmentId(app.id(), envId)
|
||||
.ifPresent(d -> activeDeployments.put(app.id(), d));
|
||||
}
|
||||
|
||||
// 4. Get agents, filter by environment
|
||||
List<AgentInfo> allAgents = registryService.findAll();
|
||||
if (environment != null && !environment.isBlank()) {
|
||||
allAgents = allAgents.stream()
|
||||
.filter(a -> environment.equals(a.environmentId()))
|
||||
.toList();
|
||||
}
|
||||
Map<String, List<AgentInfo>> agentsByApp = allAgents.stream()
|
||||
.collect(Collectors.groupingBy(AgentInfo::applicationId, LinkedHashMap::new, Collectors.toList()));
|
||||
|
||||
// 5. Collect routes per app from agents
|
||||
Map<String, Set<String>> routesByApp = new LinkedHashMap<>();
|
||||
for (var entry : agentsByApp.entrySet()) {
|
||||
Set<String> routes = new LinkedHashSet<>();
|
||||
for (AgentInfo agent : entry.getValue()) {
|
||||
if (agent.routeIds() != null) routes.addAll(agent.routeIds());
|
||||
}
|
||||
routesByApp.put(entry.getKey(), routes);
|
||||
}
|
||||
|
||||
// 6. ClickHouse exchange counts
|
||||
Instant now = Instant.now();
|
||||
Instant rangeFrom = from != null ? Instant.parse(from) : now.minus(24, ChronoUnit.HOURS);
|
||||
Instant rangeTo = to != null ? Instant.parse(to) : now;
|
||||
Map<String, Long> routeExchangeCounts = new LinkedHashMap<>();
|
||||
Map<String, Instant> routeLastSeen = new LinkedHashMap<>();
|
||||
try {
|
||||
String envFilter = (environment != null && !environment.isBlank())
|
||||
? " AND environment = " + lit(environment) : "";
|
||||
jdbc.query(
|
||||
"SELECT application_id, route_id, countMerge(total_count) AS cnt, MAX(bucket) AS last_seen " +
|
||||
"FROM stats_1m_route WHERE bucket >= " + lit(rangeFrom) + " AND bucket < " + lit(rangeTo) +
|
||||
envFilter + " GROUP BY application_id, route_id",
|
||||
rs -> {
|
||||
String key = rs.getString("application_id") + "/" + rs.getString("route_id");
|
||||
routeExchangeCounts.put(key, rs.getLong("cnt"));
|
||||
Timestamp ts = rs.getTimestamp("last_seen");
|
||||
if (ts != null) routeLastSeen.put(key, ts.toInstant());
|
||||
});
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to query route exchange counts: {}", e.getMessage());
|
||||
}
|
||||
|
||||
// Merge ClickHouse routes into routesByApp
|
||||
for (var countEntry : routeExchangeCounts.entrySet()) {
|
||||
String[] parts = countEntry.getKey().split("/", 2);
|
||||
if (parts.length == 2) {
|
||||
routesByApp.computeIfAbsent(parts[0], k -> new LinkedHashSet<>()).add(parts[1]);
|
||||
}
|
||||
}
|
||||
|
||||
// 7. Build unified catalog
|
||||
Set<String> allSlugs = new LinkedHashSet<>(appsBySlug.keySet());
|
||||
allSlugs.addAll(agentsByApp.keySet());
|
||||
allSlugs.addAll(routesByApp.keySet());
|
||||
|
||||
String envSlug = env != null ? env.slug() : "";
|
||||
List<CatalogApp> catalog = new ArrayList<>();
|
||||
|
||||
for (String slug : allSlugs) {
|
||||
App app = appsBySlug.get(slug);
|
||||
List<AgentInfo> agents = agentsByApp.getOrDefault(slug, List.of());
|
||||
Set<String> routeIds = routesByApp.getOrDefault(slug, Set.of());
|
||||
List<String> agentIds = agents.stream().map(AgentInfo::instanceId).toList();
|
||||
|
||||
// Routes
|
||||
List<RouteSummary> routeSummaries = routeIds.stream()
|
||||
.map(routeId -> {
|
||||
String key = slug + "/" + routeId;
|
||||
long count = routeExchangeCounts.getOrDefault(key, 0L);
|
||||
Instant lastSeen = routeLastSeen.get(key);
|
||||
String fromUri = resolveFromEndpointUri(routeId, agentIds);
|
||||
String state = routeStateRegistry.getState(slug, routeId).name().toLowerCase();
|
||||
String routeState = "started".equals(state) ? null : state;
|
||||
return new RouteSummary(routeId, count, lastSeen, fromUri, routeState);
|
||||
})
|
||||
.toList();
|
||||
|
||||
// Agent summaries
|
||||
List<AgentSummary> agentSummaries = agents.stream()
|
||||
.map(a -> new AgentSummary(a.instanceId(), a.displayName(), a.state().name().toLowerCase(), 0.0))
|
||||
.toList();
|
||||
|
||||
// Agent health
|
||||
String agentHealth = agents.isEmpty() ? "offline" : computeWorstHealth(agents);
|
||||
|
||||
// Total exchanges
|
||||
long totalExchanges = routeSummaries.stream().mapToLong(RouteSummary::exchangeCount).sum();
|
||||
|
||||
// Deployment summary (managed apps only)
|
||||
CatalogApp.DeploymentSummary deploymentSummary = null;
|
||||
DeploymentStatus deployStatus = null;
|
||||
if (app != null) {
|
||||
Deployment dep = activeDeployments.get(app.id());
|
||||
if (dep != null) {
|
||||
deployStatus = dep.status();
|
||||
int healthy = 0, total = 0;
|
||||
if (dep.replicaStates() != null) {
|
||||
total = dep.replicaStates().size();
|
||||
healthy = (int) dep.replicaStates().stream()
|
||||
.filter(r -> "RUNNING".equals(r.get("status")))
|
||||
.count();
|
||||
}
|
||||
int version = 0;
|
||||
try {
|
||||
var versions = appService.listVersions(app.id());
|
||||
version = versions.stream()
|
||||
.filter(v -> v.id().equals(dep.appVersionId()))
|
||||
.map(AppVersion::version)
|
||||
.findFirst().orElse(0);
|
||||
} catch (Exception ignored) {}
|
||||
|
||||
deploymentSummary = new CatalogApp.DeploymentSummary(
|
||||
dep.status().name(),
|
||||
healthy + "/" + total,
|
||||
version
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Composite health + tooltip
|
||||
String health = compositeHealth(app != null ? deployStatus : null, agentHealth);
|
||||
String healthTooltip = buildHealthTooltip(app != null, deployStatus, agentHealth, agents.size());
|
||||
|
||||
String displayName = app != null ? app.displayName() : slug;
|
||||
String appEnvSlug = envSlug;
|
||||
if (app != null && appEnvSlug.isEmpty()) {
|
||||
try {
|
||||
appEnvSlug = envService.getById(app.environmentId()).slug();
|
||||
} catch (Exception ignored) {}
|
||||
}
|
||||
|
||||
catalog.add(new CatalogApp(
|
||||
slug, displayName, app != null, appEnvSlug,
|
||||
health, healthTooltip, agents.size(), routeSummaries, agentSummaries,
|
||||
totalExchanges, deploymentSummary
|
||||
));
|
||||
}
|
||||
|
||||
return ResponseEntity.ok(catalog);
|
||||
}
|
||||
|
||||
private String resolveFromEndpointUri(String routeId, List<String> agentIds) {
|
||||
return diagramStore.findContentHashForRouteByAgents(routeId, agentIds)
|
||||
.flatMap(diagramStore::findByContentHash)
|
||||
.map(RouteGraph::getRoot)
|
||||
.map(root -> root.getEndpointUri())
|
||||
.orElse(null);
|
||||
}
|
||||
|
||||
private static String lit(Instant instant) {
|
||||
return "'" + java.time.format.DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss")
|
||||
.withZone(java.time.ZoneOffset.UTC)
|
||||
.format(instant.truncatedTo(ChronoUnit.SECONDS)) + "'";
|
||||
}
|
||||
|
||||
private static String lit(String value) {
|
||||
return "'" + value.replace("\\", "\\\\").replace("'", "\\'") + "'";
|
||||
}
|
||||
|
||||
private String computeWorstHealth(List<AgentInfo> agents) {
|
||||
boolean hasDead = false;
|
||||
boolean hasStale = false;
|
||||
for (AgentInfo a : agents) {
|
||||
if (a.state() == AgentState.DEAD) hasDead = true;
|
||||
if (a.state() == AgentState.STALE) hasStale = true;
|
||||
}
|
||||
if (hasDead) return "dead";
|
||||
if (hasStale) return "stale";
|
||||
return "live";
|
||||
}
|
||||
|
||||
private String compositeHealth(DeploymentStatus deployStatus, String agentHealth) {
|
||||
if (deployStatus == null) return agentHealth; // unmanaged or no deployment
|
||||
return switch (deployStatus) {
|
||||
case STARTING -> "running";
|
||||
case STOPPING, DEGRADED -> "stale";
|
||||
case STOPPED -> "dead";
|
||||
case FAILED -> "error";
|
||||
case RUNNING -> "offline".equals(agentHealth) ? "stale" : agentHealth;
|
||||
};
|
||||
}
|
||||
|
||||
private String buildHealthTooltip(boolean managed, DeploymentStatus deployStatus, String agentHealth, int agentCount) {
|
||||
if (!managed) {
|
||||
return "Agents: " + agentHealth + " (" + agentCount + " connected)";
|
||||
}
|
||||
if (deployStatus == null) {
|
||||
return "No deployment";
|
||||
}
|
||||
String depPart = "Deployment: " + deployStatus.name();
|
||||
if (deployStatus == DeploymentStatus.RUNNING || deployStatus == DeploymentStatus.DEGRADED) {
|
||||
return depPart + ", Agents: " + agentHealth + " (" + agentCount + " connected)";
|
||||
}
|
||||
return depPart;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,70 @@
|
||||
package com.cameleer3.server.app.controller;
|
||||
|
||||
import com.cameleer3.server.core.ingestion.ChunkAccumulator;
|
||||
import com.cameleer3.common.model.ExecutionChunk;
|
||||
import com.fasterxml.jackson.core.type.TypeReference;
|
||||
import com.fasterxml.jackson.databind.DeserializationFeature;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule;
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.web.bind.annotation.PostMapping;
|
||||
import org.springframework.web.bind.annotation.RequestBody;
|
||||
import org.springframework.web.bind.annotation.RequestMapping;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Ingestion endpoint for execution chunk data (ClickHouse pipeline).
|
||||
* <p>
|
||||
* Accepts single or array {@link ExecutionChunk} payloads and feeds them
|
||||
* into the {@link ChunkAccumulator}. Only active when
|
||||
* {@code clickhouse.enabled=true} (conditional on the accumulator bean).
|
||||
*/
|
||||
@RestController
|
||||
@RequestMapping("/api/v1/data")
|
||||
@ConditionalOnBean(ChunkAccumulator.class)
|
||||
@Tag(name = "Ingestion", description = "Data ingestion endpoints")
|
||||
public class ChunkIngestionController {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(ChunkIngestionController.class);
|
||||
|
||||
private final ChunkAccumulator accumulator;
|
||||
private final ObjectMapper objectMapper;
|
||||
|
||||
public ChunkIngestionController(ChunkAccumulator accumulator) {
|
||||
this.accumulator = accumulator;
|
||||
this.objectMapper = new ObjectMapper();
|
||||
this.objectMapper.registerModule(new JavaTimeModule());
|
||||
this.objectMapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
|
||||
}
|
||||
|
||||
@PostMapping("/executions")
|
||||
@Operation(summary = "Ingest execution chunk")
|
||||
public ResponseEntity<Void> ingestChunks(@RequestBody String body) {
|
||||
try {
|
||||
String trimmed = body.strip();
|
||||
List<ExecutionChunk> chunks;
|
||||
if (trimmed.startsWith("[")) {
|
||||
chunks = objectMapper.readValue(trimmed, new TypeReference<List<ExecutionChunk>>() {});
|
||||
} else {
|
||||
ExecutionChunk single = objectMapper.readValue(trimmed, ExecutionChunk.class);
|
||||
chunks = List.of(single);
|
||||
}
|
||||
|
||||
for (ExecutionChunk chunk : chunks) {
|
||||
accumulator.onChunk(chunk);
|
||||
}
|
||||
|
||||
return ResponseEntity.accepted().build();
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to parse execution chunk payload: {}", e.getMessage());
|
||||
return ResponseEntity.badRequest().build();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,77 @@
|
||||
package com.cameleer3.server.app.controller;
|
||||
|
||||
import com.cameleer3.server.core.rbac.ClaimMappingRepository;
|
||||
import com.cameleer3.server.core.rbac.ClaimMappingRule;
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.security.access.prepost.PreAuthorize;
|
||||
import org.springframework.web.bind.annotation.*;
|
||||
|
||||
import java.net.URI;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
|
||||
@RestController
|
||||
@RequestMapping("/api/v1/admin/claim-mappings")
|
||||
@PreAuthorize("hasRole('ADMIN')")
|
||||
@Tag(name = "Claim Mapping Admin", description = "Manage OIDC claim-to-role/group mapping rules")
|
||||
public class ClaimMappingAdminController {
|
||||
|
||||
private final ClaimMappingRepository repository;
|
||||
|
||||
public ClaimMappingAdminController(ClaimMappingRepository repository) {
|
||||
this.repository = repository;
|
||||
}
|
||||
|
||||
@GetMapping
|
||||
@Operation(summary = "List all claim mapping rules")
|
||||
public List<ClaimMappingRule> list() {
|
||||
return repository.findAll();
|
||||
}
|
||||
|
||||
@GetMapping("/{id}")
|
||||
@Operation(summary = "Get a claim mapping rule by ID")
|
||||
public ResponseEntity<ClaimMappingRule> get(@PathVariable UUID id) {
|
||||
return repository.findById(id)
|
||||
.map(ResponseEntity::ok)
|
||||
.orElse(ResponseEntity.notFound().build());
|
||||
}
|
||||
|
||||
record CreateRuleRequest(String claim, String matchType, String matchValue,
|
||||
String action, String target, int priority) {}
|
||||
|
||||
@PostMapping
|
||||
@Operation(summary = "Create a claim mapping rule")
|
||||
public ResponseEntity<ClaimMappingRule> create(@RequestBody CreateRuleRequest request) {
|
||||
UUID id = repository.create(
|
||||
request.claim(), request.matchType(), request.matchValue(),
|
||||
request.action(), request.target(), request.priority());
|
||||
return repository.findById(id)
|
||||
.map(rule -> ResponseEntity.created(URI.create("/api/v1/admin/claim-mappings/" + id)).body(rule))
|
||||
.orElse(ResponseEntity.internalServerError().build());
|
||||
}
|
||||
|
||||
@PutMapping("/{id}")
|
||||
@Operation(summary = "Update a claim mapping rule")
|
||||
public ResponseEntity<ClaimMappingRule> update(@PathVariable UUID id, @RequestBody CreateRuleRequest request) {
|
||||
if (repository.findById(id).isEmpty()) {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
repository.update(id, request.claim(), request.matchType(), request.matchValue(),
|
||||
request.action(), request.target(), request.priority());
|
||||
return repository.findById(id)
|
||||
.map(ResponseEntity::ok)
|
||||
.orElse(ResponseEntity.internalServerError().build());
|
||||
}
|
||||
|
||||
@DeleteMapping("/{id}")
|
||||
@Operation(summary = "Delete a claim mapping rule")
|
||||
public ResponseEntity<Void> delete(@PathVariable UUID id) {
|
||||
if (repository.findById(id).isEmpty()) {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
repository.delete(id);
|
||||
return ResponseEntity.noContent().build();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,166 @@
|
||||
package com.cameleer3.server.app.controller;
|
||||
|
||||
import com.cameleer3.server.app.dto.ClickHousePerformanceResponse;
|
||||
import com.cameleer3.server.app.dto.ClickHouseQueryInfo;
|
||||
import com.cameleer3.server.app.dto.ClickHouseStatusResponse;
|
||||
import com.cameleer3.server.app.dto.ClickHouseTableInfo;
|
||||
import com.cameleer3.server.app.dto.IndexerPipelineResponse;
|
||||
import com.cameleer3.server.core.indexing.SearchIndexerStats;
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
import org.springframework.beans.factory.annotation.Qualifier;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
import org.springframework.security.access.prepost.PreAuthorize;
|
||||
import org.springframework.web.bind.annotation.GetMapping;
|
||||
import org.springframework.web.bind.annotation.RequestMapping;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
@RestController
|
||||
@RequestMapping("/api/v1/admin/clickhouse")
|
||||
@PreAuthorize("hasRole('ADMIN')")
|
||||
@Tag(name = "ClickHouse Admin", description = "ClickHouse monitoring and diagnostics (ADMIN only)")
|
||||
public class ClickHouseAdminController {
|
||||
|
||||
private final JdbcTemplate clickHouseJdbc;
|
||||
private final SearchIndexerStats indexerStats;
|
||||
private final String clickHouseUrl;
|
||||
|
||||
public ClickHouseAdminController(
|
||||
@Qualifier("clickHouseJdbcTemplate") JdbcTemplate clickHouseJdbc,
|
||||
SearchIndexerStats indexerStats,
|
||||
@Value("${clickhouse.url:}") String clickHouseUrl) {
|
||||
this.clickHouseJdbc = clickHouseJdbc;
|
||||
this.indexerStats = indexerStats;
|
||||
this.clickHouseUrl = clickHouseUrl;
|
||||
}
|
||||
|
||||
@GetMapping("/status")
|
||||
@Operation(summary = "ClickHouse cluster status")
|
||||
public ClickHouseStatusResponse getStatus() {
|
||||
try {
|
||||
var row = clickHouseJdbc.queryForMap(
|
||||
"SELECT version() AS version, formatReadableTimeDelta(uptime()) AS uptime");
|
||||
return new ClickHouseStatusResponse(true,
|
||||
(String) row.get("version"),
|
||||
(String) row.get("uptime"),
|
||||
clickHouseUrl);
|
||||
} catch (Exception e) {
|
||||
return new ClickHouseStatusResponse(false, null, null, clickHouseUrl);
|
||||
}
|
||||
}
|
||||
|
||||
@GetMapping("/tables")
|
||||
@Operation(summary = "List ClickHouse tables with sizes")
|
||||
public List<ClickHouseTableInfo> getTables() {
|
||||
return clickHouseJdbc.query("""
|
||||
SELECT t.name, t.engine,
|
||||
t.total_rows AS row_count,
|
||||
formatReadableSize(t.total_bytes) AS data_size,
|
||||
t.total_bytes AS data_size_bytes,
|
||||
ifNull(p.partition_count, 0) AS partition_count
|
||||
FROM system.tables t
|
||||
LEFT JOIN (
|
||||
SELECT table, countDistinct(partition) AS partition_count
|
||||
FROM system.parts
|
||||
WHERE database = currentDatabase() AND active
|
||||
GROUP BY table
|
||||
) p ON t.name = p.table
|
||||
WHERE t.database = currentDatabase()
|
||||
ORDER BY t.total_bytes DESC NULLS LAST
|
||||
""",
|
||||
(rs, rowNum) -> new ClickHouseTableInfo(
|
||||
rs.getString("name"),
|
||||
rs.getString("engine"),
|
||||
rs.getLong("row_count"),
|
||||
rs.getString("data_size"),
|
||||
rs.getLong("data_size_bytes"),
|
||||
rs.getInt("partition_count")));
|
||||
}
|
||||
|
||||
@GetMapping("/performance")
|
||||
@Operation(summary = "ClickHouse storage and performance metrics")
|
||||
public ClickHousePerformanceResponse getPerformance() {
|
||||
try {
|
||||
var row = clickHouseJdbc.queryForMap("""
|
||||
SELECT
|
||||
formatReadableSize(sum(bytes_on_disk)) AS disk_size,
|
||||
formatReadableSize(sum(data_uncompressed_bytes)) AS uncompressed_size,
|
||||
if(sum(data_uncompressed_bytes) > 0,
|
||||
round(sum(bytes_on_disk) / sum(data_uncompressed_bytes), 3), 0) AS compression_ratio,
|
||||
sum(rows) AS total_rows,
|
||||
count() AS part_count
|
||||
FROM system.parts
|
||||
WHERE database = currentDatabase() AND active
|
||||
""");
|
||||
|
||||
String memory = "N/A";
|
||||
try {
|
||||
memory = clickHouseJdbc.queryForObject(
|
||||
"SELECT formatReadableSize(value) FROM system.metrics WHERE metric = 'MemoryTracking'",
|
||||
String.class);
|
||||
} catch (Exception ignored) {}
|
||||
|
||||
int currentQueries = 0;
|
||||
try {
|
||||
Integer q = clickHouseJdbc.queryForObject(
|
||||
"SELECT toInt32(value) FROM system.metrics WHERE metric = 'Query'",
|
||||
Integer.class);
|
||||
if (q != null) currentQueries = q;
|
||||
} catch (Exception ignored) {}
|
||||
|
||||
return new ClickHousePerformanceResponse(
|
||||
(String) row.get("disk_size"),
|
||||
(String) row.get("uncompressed_size"),
|
||||
((Number) row.get("compression_ratio")).doubleValue(),
|
||||
((Number) row.get("total_rows")).longValue(),
|
||||
((Number) row.get("part_count")).intValue(),
|
||||
memory != null ? memory : "N/A",
|
||||
currentQueries);
|
||||
} catch (Exception e) {
|
||||
return new ClickHousePerformanceResponse("N/A", "N/A", 0, 0, 0, "N/A", 0);
|
||||
}
|
||||
}
|
||||
|
||||
@GetMapping("/queries")
|
||||
@Operation(summary = "Active ClickHouse queries")
|
||||
public List<ClickHouseQueryInfo> getQueries() {
|
||||
try {
|
||||
return clickHouseJdbc.query("""
|
||||
SELECT
|
||||
query_id,
|
||||
round(elapsed, 2) AS elapsed_seconds,
|
||||
formatReadableSize(memory_usage) AS memory,
|
||||
read_rows,
|
||||
substring(query, 1, 200) AS query
|
||||
FROM system.processes
|
||||
WHERE is_initial_query = 1
|
||||
AND query NOT LIKE '%system.processes%'
|
||||
ORDER BY elapsed DESC
|
||||
""",
|
||||
(rs, rowNum) -> new ClickHouseQueryInfo(
|
||||
rs.getString("query_id"),
|
||||
rs.getDouble("elapsed_seconds"),
|
||||
rs.getString("memory"),
|
||||
rs.getLong("read_rows"),
|
||||
rs.getString("query")));
|
||||
} catch (Exception e) {
|
||||
return List.of();
|
||||
}
|
||||
}
|
||||
|
||||
@GetMapping("/pipeline")
|
||||
@Operation(summary = "Search indexer pipeline statistics")
|
||||
public IndexerPipelineResponse getPipeline() {
|
||||
return new IndexerPipelineResponse(
|
||||
indexerStats.getQueueDepth(),
|
||||
indexerStats.getMaxQueueSize(),
|
||||
indexerStats.getFailedCount(),
|
||||
indexerStats.getIndexedCount(),
|
||||
indexerStats.getDebounceMs(),
|
||||
indexerStats.getIndexingRate(),
|
||||
indexerStats.getLastIndexedAt());
|
||||
}
|
||||
}
|
||||
@@ -7,7 +7,6 @@ import com.cameleer3.server.app.dto.TableSizeResponse;
|
||||
import com.cameleer3.server.core.admin.AuditCategory;
|
||||
import com.cameleer3.server.core.admin.AuditResult;
|
||||
import com.cameleer3.server.core.admin.AuditService;
|
||||
import com.cameleer3.server.core.ingestion.IngestionService;
|
||||
import com.zaxxer.hikari.HikariDataSource;
|
||||
import com.zaxxer.hikari.HikariPoolMXBean;
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
@@ -25,9 +24,7 @@ import org.springframework.web.bind.annotation.RestController;
|
||||
import org.springframework.web.server.ResponseStatusException;
|
||||
|
||||
import javax.sql.DataSource;
|
||||
import java.time.Instant;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@RestController
|
||||
@RequestMapping("/api/v1/admin/database")
|
||||
@@ -38,14 +35,12 @@ public class DatabaseAdminController {
|
||||
private final JdbcTemplate jdbc;
|
||||
private final DataSource dataSource;
|
||||
private final AuditService auditService;
|
||||
private final IngestionService ingestionService;
|
||||
|
||||
public DatabaseAdminController(JdbcTemplate jdbc, DataSource dataSource,
|
||||
AuditService auditService, IngestionService ingestionService) {
|
||||
AuditService auditService) {
|
||||
this.jdbc = jdbc;
|
||||
this.dataSource = dataSource;
|
||||
this.auditService = auditService;
|
||||
this.ingestionService = ingestionService;
|
||||
}
|
||||
|
||||
@GetMapping("/status")
|
||||
@@ -53,14 +48,12 @@ public class DatabaseAdminController {
|
||||
public ResponseEntity<DatabaseStatusResponse> getStatus() {
|
||||
try {
|
||||
String version = jdbc.queryForObject("SELECT version()", String.class);
|
||||
boolean timescaleDb = Boolean.TRUE.equals(
|
||||
jdbc.queryForObject("SELECT EXISTS(SELECT 1 FROM pg_extension WHERE extname = 'timescaledb')", Boolean.class));
|
||||
String schema = jdbc.queryForObject("SELECT current_schema()", String.class);
|
||||
String host = extractHost(dataSource);
|
||||
return ResponseEntity.ok(new DatabaseStatusResponse(true, version, host, schema, timescaleDb));
|
||||
return ResponseEntity.ok(new DatabaseStatusResponse(true, version, host, schema));
|
||||
} catch (Exception e) {
|
||||
return ResponseEntity.status(HttpStatus.SERVICE_UNAVAILABLE)
|
||||
.body(new DatabaseStatusResponse(false, null, null, null, false));
|
||||
.body(new DatabaseStatusResponse(false, null, null, null));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -124,29 +117,6 @@ public class DatabaseAdminController {
|
||||
return ResponseEntity.ok().build();
|
||||
}
|
||||
|
||||
@GetMapping("/metrics-pipeline")
|
||||
@Operation(summary = "Get metrics ingestion pipeline diagnostics")
|
||||
public ResponseEntity<Map<String, Object>> getMetricsPipeline() {
|
||||
int bufferDepth = ingestionService.getMetricsBufferDepth();
|
||||
|
||||
Long totalRows = jdbc.queryForObject(
|
||||
"SELECT count(*) FROM agent_metrics", Long.class);
|
||||
List<String> agentIds = jdbc.queryForList(
|
||||
"SELECT DISTINCT agent_id FROM agent_metrics ORDER BY agent_id", String.class);
|
||||
Instant latestCollected = jdbc.queryForObject(
|
||||
"SELECT max(collected_at) FROM agent_metrics", Instant.class);
|
||||
List<String> metricNames = jdbc.queryForList(
|
||||
"SELECT DISTINCT metric_name FROM agent_metrics ORDER BY metric_name", String.class);
|
||||
|
||||
return ResponseEntity.ok(Map.of(
|
||||
"bufferDepth", bufferDepth,
|
||||
"totalRows", totalRows != null ? totalRows : 0,
|
||||
"distinctAgents", agentIds,
|
||||
"distinctMetrics", metricNames,
|
||||
"latestCollectedAt", latestCollected != null ? latestCollected.toString() : "none"
|
||||
));
|
||||
}
|
||||
|
||||
private String extractHost(DataSource ds) {
|
||||
try {
|
||||
if (ds instanceof HikariDataSource hds) {
|
||||
|
||||
@@ -0,0 +1,135 @@
|
||||
package com.cameleer3.server.app.controller;
|
||||
|
||||
import com.cameleer3.server.app.runtime.DeploymentExecutor;
|
||||
import com.cameleer3.server.core.runtime.*;
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.responses.ApiResponse;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.security.access.prepost.PreAuthorize;
|
||||
import org.springframework.web.bind.annotation.GetMapping;
|
||||
import org.springframework.web.bind.annotation.PathVariable;
|
||||
import org.springframework.web.bind.annotation.PostMapping;
|
||||
import org.springframework.web.bind.annotation.RequestBody;
|
||||
import org.springframework.web.bind.annotation.RequestMapping;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* Deployment management: deploy, stop, promote, and view logs.
|
||||
* All app-scoped endpoints accept the app slug (not UUID) as path variable.
|
||||
* Protected by {@code ROLE_OPERATOR} or {@code ROLE_ADMIN}.
|
||||
*/
|
||||
@RestController
|
||||
@RequestMapping("/api/v1/apps/{appSlug}/deployments")
|
||||
@Tag(name = "Deployment Management", description = "Deploy, stop, restart, promote, and view logs")
|
||||
@PreAuthorize("hasAnyRole('OPERATOR', 'ADMIN')")
|
||||
public class DeploymentController {
|
||||
|
||||
private final DeploymentService deploymentService;
|
||||
private final DeploymentExecutor deploymentExecutor;
|
||||
private final RuntimeOrchestrator orchestrator;
|
||||
private final AppService appService;
|
||||
|
||||
public DeploymentController(DeploymentService deploymentService,
|
||||
DeploymentExecutor deploymentExecutor,
|
||||
RuntimeOrchestrator orchestrator,
|
||||
AppService appService) {
|
||||
this.deploymentService = deploymentService;
|
||||
this.deploymentExecutor = deploymentExecutor;
|
||||
this.orchestrator = orchestrator;
|
||||
this.appService = appService;
|
||||
}
|
||||
|
||||
@GetMapping
|
||||
@Operation(summary = "List deployments for an app")
|
||||
@ApiResponse(responseCode = "200", description = "Deployment list returned")
|
||||
public ResponseEntity<List<Deployment>> listDeployments(@PathVariable String appSlug) {
|
||||
try {
|
||||
App app = appService.getBySlug(appSlug);
|
||||
return ResponseEntity.ok(deploymentService.listByApp(app.id()));
|
||||
} catch (IllegalArgumentException e) {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
}
|
||||
|
||||
@GetMapping("/{deploymentId}")
|
||||
@Operation(summary = "Get deployment by ID")
|
||||
@ApiResponse(responseCode = "200", description = "Deployment found")
|
||||
@ApiResponse(responseCode = "404", description = "Deployment not found")
|
||||
public ResponseEntity<Deployment> getDeployment(@PathVariable String appSlug, @PathVariable UUID deploymentId) {
|
||||
try {
|
||||
return ResponseEntity.ok(deploymentService.getById(deploymentId));
|
||||
} catch (IllegalArgumentException e) {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
}
|
||||
|
||||
@PostMapping
|
||||
@Operation(summary = "Create and start a new deployment")
|
||||
@ApiResponse(responseCode = "202", description = "Deployment accepted and starting")
|
||||
public ResponseEntity<Deployment> deploy(@PathVariable String appSlug, @RequestBody DeployRequest request) {
|
||||
try {
|
||||
App app = appService.getBySlug(appSlug);
|
||||
Deployment deployment = deploymentService.createDeployment(app.id(), request.appVersionId(), request.environmentId());
|
||||
deploymentExecutor.executeAsync(deployment);
|
||||
return ResponseEntity.accepted().body(deployment);
|
||||
} catch (IllegalArgumentException e) {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
}
|
||||
|
||||
@PostMapping("/{deploymentId}/stop")
|
||||
@Operation(summary = "Stop a running deployment")
|
||||
@ApiResponse(responseCode = "200", description = "Deployment stopped")
|
||||
@ApiResponse(responseCode = "404", description = "Deployment not found")
|
||||
public ResponseEntity<Deployment> stop(@PathVariable String appSlug, @PathVariable UUID deploymentId) {
|
||||
try {
|
||||
Deployment deployment = deploymentService.getById(deploymentId);
|
||||
deploymentExecutor.stopDeployment(deployment);
|
||||
return ResponseEntity.ok(deploymentService.getById(deploymentId));
|
||||
} catch (IllegalArgumentException e) {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
}
|
||||
|
||||
@PostMapping("/{deploymentId}/promote")
|
||||
@Operation(summary = "Promote deployment to a different environment")
|
||||
@ApiResponse(responseCode = "202", description = "Promotion accepted and starting")
|
||||
@ApiResponse(responseCode = "404", description = "Deployment not found")
|
||||
public ResponseEntity<Deployment> promote(@PathVariable String appSlug, @PathVariable UUID deploymentId,
|
||||
@RequestBody PromoteRequest request) {
|
||||
try {
|
||||
App app = appService.getBySlug(appSlug);
|
||||
Deployment source = deploymentService.getById(deploymentId);
|
||||
Deployment promoted = deploymentService.promote(app.id(), source.appVersionId(), request.targetEnvironmentId());
|
||||
deploymentExecutor.executeAsync(promoted);
|
||||
return ResponseEntity.accepted().body(promoted);
|
||||
} catch (IllegalArgumentException e) {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
}
|
||||
|
||||
@GetMapping("/{deploymentId}/logs")
|
||||
@Operation(summary = "Get container logs for a deployment")
|
||||
@ApiResponse(responseCode = "200", description = "Logs returned")
|
||||
@ApiResponse(responseCode = "404", description = "Deployment not found or no container")
|
||||
public ResponseEntity<List<String>> getLogs(@PathVariable String appSlug, @PathVariable UUID deploymentId) {
|
||||
try {
|
||||
Deployment deployment = deploymentService.getById(deploymentId);
|
||||
if (deployment.containerId() == null) {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
List<String> logs = orchestrator.getLogs(deployment.containerId(), 200).collect(Collectors.toList());
|
||||
return ResponseEntity.ok(logs);
|
||||
} catch (IllegalArgumentException e) {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
}
|
||||
|
||||
public record DeployRequest(UUID appVersionId, UUID environmentId) {}
|
||||
public record PromoteRequest(UUID targetEnvironmentId) {}
|
||||
}
|
||||
@@ -81,4 +81,16 @@ public class DetailController {
|
||||
.map(ResponseEntity::ok)
|
||||
.orElse(ResponseEntity.notFound().build());
|
||||
}
|
||||
|
||||
@GetMapping("/{executionId}/processors/by-seq/{seq}/snapshot")
|
||||
@Operation(summary = "Get exchange snapshot for a processor by seq number")
|
||||
@ApiResponse(responseCode = "200", description = "Snapshot data")
|
||||
@ApiResponse(responseCode = "404", description = "Snapshot not found")
|
||||
public ResponseEntity<Map<String, String>> processorSnapshotBySeq(
|
||||
@PathVariable String executionId,
|
||||
@PathVariable int seq) {
|
||||
return detailService.getProcessorSnapshotBySeq(executionId, seq)
|
||||
.map(ResponseEntity::ok)
|
||||
.orElse(ResponseEntity.notFound().build());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,8 +11,6 @@ import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.responses.ApiResponse;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.security.core.Authentication;
|
||||
import org.springframework.security.core.context.SecurityContextHolder;
|
||||
@@ -34,8 +32,6 @@ import java.util.List;
|
||||
@Tag(name = "Ingestion", description = "Data ingestion endpoints")
|
||||
public class DiagramController {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(DiagramController.class);
|
||||
|
||||
private final IngestionService ingestionService;
|
||||
private final AgentRegistryService registryService;
|
||||
private final ObjectMapper objectMapper;
|
||||
@@ -53,12 +49,12 @@ public class DiagramController {
|
||||
description = "Accepts a single RouteGraph or an array of RouteGraphs")
|
||||
@ApiResponse(responseCode = "202", description = "Data accepted for processing")
|
||||
public ResponseEntity<Void> ingestDiagrams(@RequestBody String body) throws JsonProcessingException {
|
||||
String agentId = extractAgentId();
|
||||
String applicationName = resolveApplicationName(agentId);
|
||||
String instanceId = extractAgentId();
|
||||
String applicationId = resolveApplicationId(instanceId);
|
||||
List<RouteGraph> graphs = parsePayload(body);
|
||||
|
||||
for (RouteGraph graph : graphs) {
|
||||
ingestionService.ingestDiagram(new TaggedDiagram(agentId, applicationName, graph));
|
||||
ingestionService.ingestDiagram(new TaggedDiagram(instanceId, applicationId, graph));
|
||||
}
|
||||
|
||||
return ResponseEntity.accepted().build();
|
||||
@@ -69,9 +65,9 @@ public class DiagramController {
|
||||
return auth != null ? auth.getName() : "";
|
||||
}
|
||||
|
||||
private String resolveApplicationName(String agentId) {
|
||||
AgentInfo agent = registryService.findById(agentId);
|
||||
return agent != null ? agent.application() : "";
|
||||
private String resolveApplicationId(String instanceId) {
|
||||
AgentInfo agent = registryService.findById(instanceId);
|
||||
return agent != null ? agent.applicationId() : "";
|
||||
}
|
||||
|
||||
private List<RouteGraph> parsePayload(String body) throws JsonProcessingException {
|
||||
|
||||
@@ -100,7 +100,7 @@ public class DiagramRenderController {
|
||||
@RequestParam String routeId,
|
||||
@RequestParam(defaultValue = "LR") String direction) {
|
||||
List<String> agentIds = registryService.findByApplication(application).stream()
|
||||
.map(AgentInfo::id)
|
||||
.map(AgentInfo::instanceId)
|
||||
.toList();
|
||||
|
||||
if (agentIds.isEmpty()) {
|
||||
|
||||
@@ -0,0 +1,127 @@
|
||||
package com.cameleer3.server.app.controller;
|
||||
|
||||
import com.cameleer3.server.core.runtime.Environment;
|
||||
import com.cameleer3.server.core.runtime.EnvironmentService;
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.responses.ApiResponse;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.security.access.prepost.PreAuthorize;
|
||||
import org.springframework.web.bind.annotation.*;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
|
||||
@RestController
|
||||
@RequestMapping("/api/v1/admin/environments")
|
||||
@Tag(name = "Environment Admin", description = "Environment management (ADMIN only)")
|
||||
@PreAuthorize("hasRole('ADMIN')")
|
||||
public class EnvironmentAdminController {
|
||||
|
||||
private final EnvironmentService environmentService;
|
||||
|
||||
public EnvironmentAdminController(EnvironmentService environmentService) {
|
||||
this.environmentService = environmentService;
|
||||
}
|
||||
|
||||
@GetMapping
|
||||
@Operation(summary = "List all environments")
|
||||
@PreAuthorize("isAuthenticated()")
|
||||
public ResponseEntity<List<Environment>> listEnvironments() {
|
||||
return ResponseEntity.ok(environmentService.listAll());
|
||||
}
|
||||
|
||||
@GetMapping("/{id}")
|
||||
@Operation(summary = "Get environment by ID")
|
||||
@ApiResponse(responseCode = "200", description = "Environment found")
|
||||
@ApiResponse(responseCode = "404", description = "Environment not found")
|
||||
public ResponseEntity<Environment> getEnvironment(@PathVariable UUID id) {
|
||||
try {
|
||||
return ResponseEntity.ok(environmentService.getById(id));
|
||||
} catch (IllegalArgumentException e) {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
}
|
||||
|
||||
@PostMapping
|
||||
@Operation(summary = "Create a new environment")
|
||||
@ApiResponse(responseCode = "201", description = "Environment created")
|
||||
@ApiResponse(responseCode = "400", description = "Slug already exists")
|
||||
public ResponseEntity<?> createEnvironment(@RequestBody CreateEnvironmentRequest request) {
|
||||
try {
|
||||
UUID id = environmentService.create(request.slug(), request.displayName(), request.production());
|
||||
return ResponseEntity.status(201).body(environmentService.getById(id));
|
||||
} catch (IllegalArgumentException e) {
|
||||
return ResponseEntity.badRequest().body(Map.of("error", e.getMessage()));
|
||||
}
|
||||
}
|
||||
|
||||
@PutMapping("/{id}")
|
||||
@Operation(summary = "Update an environment")
|
||||
@ApiResponse(responseCode = "200", description = "Environment updated")
|
||||
@ApiResponse(responseCode = "404", description = "Environment not found")
|
||||
public ResponseEntity<?> updateEnvironment(@PathVariable UUID id, @RequestBody UpdateEnvironmentRequest request) {
|
||||
try {
|
||||
environmentService.update(id, request.displayName(), request.production(), request.enabled());
|
||||
return ResponseEntity.ok(environmentService.getById(id));
|
||||
} catch (IllegalArgumentException e) {
|
||||
if (e.getMessage().contains("not found")) {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
return ResponseEntity.badRequest().body(Map.of("error", e.getMessage()));
|
||||
}
|
||||
}
|
||||
|
||||
@DeleteMapping("/{id}")
|
||||
@Operation(summary = "Delete an environment")
|
||||
@ApiResponse(responseCode = "204", description = "Environment deleted")
|
||||
@ApiResponse(responseCode = "400", description = "Cannot delete default environment")
|
||||
@ApiResponse(responseCode = "404", description = "Environment not found")
|
||||
public ResponseEntity<?> deleteEnvironment(@PathVariable UUID id) {
|
||||
try {
|
||||
environmentService.delete(id);
|
||||
return ResponseEntity.noContent().build();
|
||||
} catch (IllegalArgumentException e) {
|
||||
if (e.getMessage().contains("not found")) {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
return ResponseEntity.badRequest().body(Map.of("error", e.getMessage()));
|
||||
}
|
||||
}
|
||||
|
||||
@PutMapping("/{id}/default-container-config")
|
||||
@Operation(summary = "Update default container config for an environment")
|
||||
@ApiResponse(responseCode = "200", description = "Default container config updated")
|
||||
@ApiResponse(responseCode = "404", description = "Environment not found")
|
||||
public ResponseEntity<?> updateDefaultContainerConfig(@PathVariable UUID id,
|
||||
@RequestBody Map<String, Object> defaultContainerConfig) {
|
||||
try {
|
||||
environmentService.updateDefaultContainerConfig(id, defaultContainerConfig);
|
||||
return ResponseEntity.ok(environmentService.getById(id));
|
||||
} catch (IllegalArgumentException e) {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
}
|
||||
|
||||
@PutMapping("/{id}/jar-retention")
|
||||
@Operation(summary = "Update JAR retention policy for an environment")
|
||||
@ApiResponse(responseCode = "200", description = "Retention policy updated")
|
||||
@ApiResponse(responseCode = "404", description = "Environment not found")
|
||||
public ResponseEntity<?> updateJarRetention(@PathVariable UUID id,
|
||||
@RequestBody JarRetentionRequest request) {
|
||||
try {
|
||||
environmentService.updateJarRetentionCount(id, request.jarRetentionCount());
|
||||
return ResponseEntity.ok(environmentService.getById(id));
|
||||
} catch (IllegalArgumentException e) {
|
||||
if (e.getMessage().contains("not found")) {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
return ResponseEntity.badRequest().body(Map.of("error", e.getMessage()));
|
||||
}
|
||||
}
|
||||
|
||||
public record CreateEnvironmentRequest(String slug, String displayName, boolean production) {}
|
||||
public record UpdateEnvironmentRequest(String displayName, boolean production, boolean enabled) {}
|
||||
public record JarRetentionRequest(Integer jarRetentionCount) {}
|
||||
}
|
||||
@@ -0,0 +1,119 @@
|
||||
package com.cameleer3.server.app.controller;
|
||||
|
||||
import com.cameleer3.common.model.AgentEvent;
|
||||
import com.cameleer3.server.core.agent.AgentEventService;
|
||||
import com.cameleer3.server.core.agent.AgentInfo;
|
||||
import com.cameleer3.server.core.agent.AgentRegistryService;
|
||||
import com.cameleer3.server.core.agent.RouteStateRegistry;
|
||||
import com.fasterxml.jackson.core.type.TypeReference;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.security.core.Authentication;
|
||||
import org.springframework.security.core.context.SecurityContextHolder;
|
||||
import org.springframework.web.bind.annotation.PostMapping;
|
||||
import org.springframework.web.bind.annotation.RequestBody;
|
||||
import org.springframework.web.bind.annotation.RequestMapping;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Ingestion endpoint for agent lifecycle events.
|
||||
* <p>
|
||||
* Agents emit events (AGENT_STARTED, AGENT_STOPPED, etc.) which are
|
||||
* stored in the event log. AGENT_STOPPED triggers a graceful shutdown
|
||||
* transition in the registry.
|
||||
*/
|
||||
@RestController
|
||||
@RequestMapping("/api/v1/data")
|
||||
@Tag(name = "Ingestion", description = "Data ingestion endpoints")
|
||||
public class EventIngestionController {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(EventIngestionController.class);
|
||||
|
||||
private final AgentEventService agentEventService;
|
||||
private final AgentRegistryService registryService;
|
||||
private final ObjectMapper objectMapper;
|
||||
private final RouteStateRegistry routeStateRegistry;
|
||||
|
||||
public EventIngestionController(AgentEventService agentEventService,
|
||||
AgentRegistryService registryService,
|
||||
ObjectMapper objectMapper,
|
||||
RouteStateRegistry routeStateRegistry) {
|
||||
this.agentEventService = agentEventService;
|
||||
this.registryService = registryService;
|
||||
this.objectMapper = objectMapper;
|
||||
this.routeStateRegistry = routeStateRegistry;
|
||||
}
|
||||
|
||||
@PostMapping("/events")
|
||||
@Operation(summary = "Ingest agent events")
|
||||
public ResponseEntity<Void> ingestEvents(@RequestBody String body) {
|
||||
String instanceId = extractInstanceId();
|
||||
|
||||
List<AgentEvent> events;
|
||||
try {
|
||||
String trimmed = body.strip();
|
||||
if (trimmed.startsWith("[")) {
|
||||
events = objectMapper.readValue(trimmed, new TypeReference<List<AgentEvent>>() {});
|
||||
} else {
|
||||
events = List.of(objectMapper.readValue(trimmed, AgentEvent.class));
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to parse event payload: {}", e.getMessage());
|
||||
return ResponseEntity.badRequest().build();
|
||||
}
|
||||
|
||||
AgentInfo agent = registryService.findById(instanceId);
|
||||
String applicationId = agent != null ? agent.applicationId() : "";
|
||||
|
||||
for (AgentEvent event : events) {
|
||||
agentEventService.recordEvent(instanceId, applicationId,
|
||||
event.getEventType(),
|
||||
event.getDetails() != null ? event.getDetails().toString() : null);
|
||||
|
||||
if ("AGENT_STOPPED".equals(event.getEventType())) {
|
||||
log.info("Agent {} reported graceful shutdown", instanceId);
|
||||
registryService.shutdown(instanceId);
|
||||
}
|
||||
|
||||
if ("ROUTE_STATE_CHANGED".equals(event.getEventType())) {
|
||||
Map<String, String> details = event.getDetails();
|
||||
if (details != null) {
|
||||
String routeId = details.get("routeId");
|
||||
String newState = details.get("newState");
|
||||
if (routeId != null && newState != null) {
|
||||
RouteStateRegistry.RouteState state = parseRouteState(newState);
|
||||
if (state != null) {
|
||||
routeStateRegistry.setState(applicationId, routeId, state);
|
||||
log.debug("Route state changed: {}/{} -> {} (reason: {})",
|
||||
applicationId, routeId, newState, details.get("reason"));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ResponseEntity.accepted().build();
|
||||
}
|
||||
|
||||
private RouteStateRegistry.RouteState parseRouteState(String state) {
|
||||
if (state == null) return null;
|
||||
return switch (state) {
|
||||
case "Started" -> RouteStateRegistry.RouteState.STARTED;
|
||||
case "Stopped" -> RouteStateRegistry.RouteState.STOPPED;
|
||||
case "Suspended" -> RouteStateRegistry.RouteState.SUSPENDED;
|
||||
default -> null;
|
||||
};
|
||||
}
|
||||
|
||||
private String extractInstanceId() {
|
||||
Authentication auth = SecurityContextHolder.getContext().getAuthentication();
|
||||
return auth != null ? auth.getName() : "";
|
||||
}
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package com.cameleer3.server.app.controller;
|
||||
import com.cameleer3.common.model.RouteExecution;
|
||||
import com.cameleer3.server.core.agent.AgentInfo;
|
||||
import com.cameleer3.server.core.agent.AgentRegistryService;
|
||||
import com.cameleer3.server.core.ingestion.ChunkAccumulator;
|
||||
import com.cameleer3.server.core.ingestion.IngestionService;
|
||||
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||
import com.fasterxml.jackson.core.type.TypeReference;
|
||||
@@ -10,8 +11,7 @@ import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.responses.ApiResponse;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.security.core.Authentication;
|
||||
import org.springframework.security.core.context.SecurityContextHolder;
|
||||
@@ -23,18 +23,20 @@ import org.springframework.web.bind.annotation.RestController;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Ingestion endpoint for route execution data.
|
||||
* Legacy ingestion endpoint for route execution data (PostgreSQL path).
|
||||
* <p>
|
||||
* Accepts both single {@link RouteExecution} and arrays. Data is written
|
||||
* synchronously to PostgreSQL via {@link IngestionService}.
|
||||
* <p>
|
||||
* Only active when ClickHouse is disabled — when ClickHouse is enabled,
|
||||
* {@link ChunkIngestionController} takes over the {@code /executions} mapping.
|
||||
*/
|
||||
@RestController
|
||||
@RequestMapping("/api/v1/data")
|
||||
@ConditionalOnMissingBean(ChunkAccumulator.class)
|
||||
@Tag(name = "Ingestion", description = "Data ingestion endpoints")
|
||||
public class ExecutionController {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(ExecutionController.class);
|
||||
|
||||
private final IngestionService ingestionService;
|
||||
private final AgentRegistryService registryService;
|
||||
private final ObjectMapper objectMapper;
|
||||
@@ -52,12 +54,12 @@ public class ExecutionController {
|
||||
description = "Accepts a single RouteExecution or an array of RouteExecutions")
|
||||
@ApiResponse(responseCode = "202", description = "Data accepted for processing")
|
||||
public ResponseEntity<Void> ingestExecutions(@RequestBody String body) throws JsonProcessingException {
|
||||
String agentId = extractAgentId();
|
||||
String applicationName = resolveApplicationName(agentId);
|
||||
String instanceId = extractAgentId();
|
||||
String applicationId = resolveApplicationId(instanceId);
|
||||
List<RouteExecution> executions = parsePayload(body);
|
||||
|
||||
for (RouteExecution execution : executions) {
|
||||
ingestionService.ingestExecution(agentId, applicationName, execution);
|
||||
ingestionService.ingestExecution(instanceId, applicationId, execution);
|
||||
}
|
||||
|
||||
return ResponseEntity.accepted().build();
|
||||
@@ -68,9 +70,9 @@ public class ExecutionController {
|
||||
return auth != null ? auth.getName() : "";
|
||||
}
|
||||
|
||||
private String resolveApplicationName(String agentId) {
|
||||
AgentInfo agent = registryService.findById(agentId);
|
||||
return agent != null ? agent.application() : "";
|
||||
private String resolveApplicationId(String instanceId) {
|
||||
AgentInfo agent = registryService.findById(instanceId);
|
||||
return agent != null ? agent.applicationId() : "";
|
||||
}
|
||||
|
||||
private List<RouteExecution> parsePayload(String body) throws JsonProcessingException {
|
||||
|
||||
@@ -7,10 +7,12 @@ import com.cameleer3.server.core.rbac.GroupDetail;
|
||||
import com.cameleer3.server.core.rbac.GroupRepository;
|
||||
import com.cameleer3.server.core.rbac.GroupSummary;
|
||||
import com.cameleer3.server.core.rbac.RbacService;
|
||||
import com.cameleer3.server.core.rbac.SystemRole;
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.responses.ApiResponse;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
import jakarta.servlet.http.HttpServletRequest;
|
||||
import org.springframework.http.HttpStatus;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.security.access.prepost.PreAuthorize;
|
||||
import org.springframework.web.bind.annotation.DeleteMapping;
|
||||
@@ -21,6 +23,7 @@ import org.springframework.web.bind.annotation.PutMapping;
|
||||
import org.springframework.web.bind.annotation.RequestBody;
|
||||
import org.springframework.web.bind.annotation.RequestMapping;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
import org.springframework.web.server.ResponseStatusException;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
@@ -39,14 +42,14 @@ import java.util.UUID;
|
||||
public class GroupAdminController {
|
||||
|
||||
private final GroupRepository groupRepository;
|
||||
private final RbacService rbacService;
|
||||
private final AuditService auditService;
|
||||
private final RbacService rbacService;
|
||||
|
||||
public GroupAdminController(GroupRepository groupRepository, RbacService rbacService,
|
||||
AuditService auditService) {
|
||||
public GroupAdminController(GroupRepository groupRepository, AuditService auditService,
|
||||
RbacService rbacService) {
|
||||
this.groupRepository = groupRepository;
|
||||
this.rbacService = rbacService;
|
||||
this.auditService = auditService;
|
||||
this.rbacService = rbacService;
|
||||
}
|
||||
|
||||
@GetMapping
|
||||
@@ -156,6 +159,10 @@ public class GroupAdminController {
|
||||
if (groupRepository.findById(id).isEmpty()) {
|
||||
return ResponseEntity.notFound().build();
|
||||
}
|
||||
if (SystemRole.ADMIN_ID.equals(roleId) && rbacService.getEffectivePrincipalsForRole(SystemRole.ADMIN_ID).size() <= 1) {
|
||||
throw new ResponseStatusException(HttpStatus.CONFLICT,
|
||||
"Cannot remove the ADMIN role: at least one admin user must exist");
|
||||
}
|
||||
groupRepository.removeRole(id, roleId);
|
||||
auditService.log("remove_role_from_group", AuditCategory.RBAC, id.toString(),
|
||||
Map.of("roleId", roleId), AuditResult.SUCCESS, httpRequest);
|
||||
|
||||
@@ -0,0 +1,53 @@
|
||||
package com.cameleer3.server.app.controller;
|
||||
|
||||
import com.cameleer3.server.core.license.LicenseGate;
|
||||
import com.cameleer3.server.core.license.LicenseInfo;
|
||||
import com.cameleer3.server.core.license.LicenseValidator;
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.security.access.prepost.PreAuthorize;
|
||||
import org.springframework.web.bind.annotation.*;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
@RestController
|
||||
@RequestMapping("/api/v1/admin/license")
|
||||
@PreAuthorize("hasRole('ADMIN')")
|
||||
@Tag(name = "License Admin", description = "License management")
|
||||
public class LicenseAdminController {
|
||||
|
||||
private final LicenseGate licenseGate;
|
||||
private final String licensePublicKey;
|
||||
|
||||
public LicenseAdminController(LicenseGate licenseGate,
|
||||
@Value("${license.public-key:}") String licensePublicKey) {
|
||||
this.licenseGate = licenseGate;
|
||||
this.licensePublicKey = licensePublicKey;
|
||||
}
|
||||
|
||||
@GetMapping
|
||||
@Operation(summary = "Get current license info")
|
||||
public ResponseEntity<LicenseInfo> getCurrent() {
|
||||
return ResponseEntity.ok(licenseGate.getCurrent());
|
||||
}
|
||||
|
||||
record UpdateLicenseRequest(String token) {}
|
||||
|
||||
@PostMapping
|
||||
@Operation(summary = "Update license token at runtime")
|
||||
public ResponseEntity<?> update(@RequestBody UpdateLicenseRequest request) {
|
||||
if (licensePublicKey == null || licensePublicKey.isBlank()) {
|
||||
return ResponseEntity.badRequest().body(Map.of("error", "No license public key configured"));
|
||||
}
|
||||
try {
|
||||
LicenseValidator validator = new LicenseValidator(licensePublicKey);
|
||||
LicenseInfo info = validator.validate(request.token());
|
||||
licenseGate.load(info);
|
||||
return ResponseEntity.ok(info);
|
||||
} catch (Exception e) {
|
||||
return ResponseEntity.badRequest().body(Map.of("error", e.getMessage()));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,12 +1,14 @@
|
||||
package com.cameleer3.server.app.controller;
|
||||
|
||||
import com.cameleer3.common.model.LogBatch;
|
||||
import com.cameleer3.server.app.search.OpenSearchLogIndex;
|
||||
import com.cameleer3.server.core.ingestion.BufferedLogEntry;
|
||||
import com.cameleer3.server.core.ingestion.WriteBuffer;
|
||||
import com.cameleer3.server.core.agent.AgentInfo;
|
||||
import com.cameleer3.server.core.agent.AgentRegistryService;
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.responses.ApiResponse;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
import com.cameleer3.server.app.config.TenantProperties;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
@@ -24,26 +26,33 @@ public class LogIngestionController {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(LogIngestionController.class);
|
||||
|
||||
private final OpenSearchLogIndex logIndex;
|
||||
private final WriteBuffer<BufferedLogEntry> logBuffer;
|
||||
private final AgentRegistryService registryService;
|
||||
private final TenantProperties tenantProperties;
|
||||
|
||||
public LogIngestionController(OpenSearchLogIndex logIndex,
|
||||
AgentRegistryService registryService) {
|
||||
this.logIndex = logIndex;
|
||||
public LogIngestionController(WriteBuffer<BufferedLogEntry> logBuffer,
|
||||
AgentRegistryService registryService,
|
||||
TenantProperties tenantProperties) {
|
||||
this.logBuffer = logBuffer;
|
||||
this.registryService = registryService;
|
||||
this.tenantProperties = tenantProperties;
|
||||
}
|
||||
|
||||
@PostMapping("/logs")
|
||||
@Operation(summary = "Ingest application log entries",
|
||||
description = "Accepts a batch of log entries from an agent. Entries are indexed in OpenSearch.")
|
||||
description = "Accepts a batch of log entries from an agent. Entries are buffered and flushed periodically.")
|
||||
@ApiResponse(responseCode = "202", description = "Logs accepted for indexing")
|
||||
public ResponseEntity<Void> ingestLogs(@RequestBody LogBatch batch) {
|
||||
String agentId = extractAgentId();
|
||||
String application = resolveApplicationName(agentId);
|
||||
String instanceId = extractAgentId();
|
||||
String applicationId = resolveApplicationId(instanceId);
|
||||
|
||||
if (batch.getEntries() != null && !batch.getEntries().isEmpty()) {
|
||||
log.debug("Received {} log entries from agent={}, app={}", batch.getEntries().size(), agentId, application);
|
||||
logIndex.indexBatch(agentId, application, batch.getEntries());
|
||||
log.debug("Received {} log entries from instance={}, app={}", batch.getEntries().size(), instanceId, applicationId);
|
||||
String environment = resolveEnvironment(instanceId);
|
||||
for (var entry : batch.getEntries()) {
|
||||
logBuffer.offerOrWarn(new BufferedLogEntry(
|
||||
tenantProperties.getId(), environment, instanceId, applicationId, entry));
|
||||
}
|
||||
}
|
||||
|
||||
return ResponseEntity.accepted().build();
|
||||
@@ -54,8 +63,13 @@ public class LogIngestionController {
|
||||
return auth != null ? auth.getName() : "";
|
||||
}
|
||||
|
||||
private String resolveApplicationName(String agentId) {
|
||||
AgentInfo agent = registryService.findById(agentId);
|
||||
return agent != null ? agent.application() : "";
|
||||
private String resolveApplicationId(String instanceId) {
|
||||
AgentInfo agent = registryService.findById(instanceId);
|
||||
return agent != null ? agent.applicationId() : "";
|
||||
}
|
||||
|
||||
private String resolveEnvironment(String instanceId) {
|
||||
AgentInfo agent = registryService.findById(instanceId);
|
||||
return agent != null && agent.environmentId() != null ? agent.environmentId() : "default";
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
package com.cameleer3.server.app.controller;
|
||||
|
||||
import com.cameleer3.server.app.dto.LogEntryResponse;
|
||||
import com.cameleer3.server.app.search.OpenSearchLogIndex;
|
||||
import com.cameleer3.server.app.dto.LogSearchPageResponse;
|
||||
import com.cameleer3.server.core.search.LogSearchRequest;
|
||||
import com.cameleer3.server.core.search.LogSearchResponse;
|
||||
import com.cameleer3.server.core.storage.LogIndex;
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
@@ -11,40 +14,69 @@ import org.springframework.web.bind.annotation.RequestParam;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
|
||||
import java.time.Instant;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
@RestController
|
||||
@RequestMapping("/api/v1/logs")
|
||||
@Tag(name = "Application Logs", description = "Query application logs stored in OpenSearch")
|
||||
@Tag(name = "Application Logs", description = "Query application logs")
|
||||
public class LogQueryController {
|
||||
|
||||
private final OpenSearchLogIndex logIndex;
|
||||
private final LogIndex logIndex;
|
||||
|
||||
public LogQueryController(OpenSearchLogIndex logIndex) {
|
||||
public LogQueryController(LogIndex logIndex) {
|
||||
this.logIndex = logIndex;
|
||||
}
|
||||
|
||||
@GetMapping
|
||||
@Operation(summary = "Search application log entries",
|
||||
description = "Returns log entries for a given application, optionally filtered by agent, level, time range, and text query")
|
||||
public ResponseEntity<List<LogEntryResponse>> searchLogs(
|
||||
@RequestParam String application,
|
||||
@RequestParam(required = false) String agentId,
|
||||
@RequestParam(required = false) String level,
|
||||
description = "Returns log entries with cursor-based pagination and level count aggregation. " +
|
||||
"Supports free-text search, multi-level filtering, and optional application scoping.")
|
||||
public ResponseEntity<LogSearchPageResponse> searchLogs(
|
||||
@RequestParam(required = false) String q,
|
||||
@RequestParam(required = false) String query,
|
||||
@RequestParam(required = false) String level,
|
||||
@RequestParam(required = false) String application,
|
||||
@RequestParam(name = "agentId", required = false) String instanceId,
|
||||
@RequestParam(required = false) String exchangeId,
|
||||
@RequestParam(required = false) String logger,
|
||||
@RequestParam(required = false) String environment,
|
||||
@RequestParam(required = false) String from,
|
||||
@RequestParam(required = false) String to,
|
||||
@RequestParam(defaultValue = "200") int limit) {
|
||||
@RequestParam(required = false) String cursor,
|
||||
@RequestParam(defaultValue = "100") int limit,
|
||||
@RequestParam(defaultValue = "desc") String sort) {
|
||||
|
||||
limit = Math.min(limit, 1000);
|
||||
// q takes precedence over deprecated query param
|
||||
String searchText = q != null ? q : query;
|
||||
|
||||
// Parse CSV levels
|
||||
List<String> levels = List.of();
|
||||
if (level != null && !level.isEmpty()) {
|
||||
levels = Arrays.stream(level.split(","))
|
||||
.map(String::trim)
|
||||
.filter(s -> !s.isEmpty())
|
||||
.toList();
|
||||
}
|
||||
|
||||
Instant fromInstant = from != null ? Instant.parse(from) : null;
|
||||
Instant toInstant = to != null ? Instant.parse(to) : null;
|
||||
|
||||
List<LogEntryResponse> entries = logIndex.search(
|
||||
application, agentId, level, query, exchangeId, fromInstant, toInstant, limit);
|
||||
LogSearchRequest request = new LogSearchRequest(
|
||||
searchText, levels, application, instanceId, exchangeId,
|
||||
logger, environment, fromInstant, toInstant, cursor, limit, sort);
|
||||
|
||||
return ResponseEntity.ok(entries);
|
||||
LogSearchResponse result = logIndex.search(request);
|
||||
|
||||
List<LogEntryResponse> entries = result.data().stream()
|
||||
.map(r -> new LogEntryResponse(
|
||||
r.timestamp(), r.level(), r.loggerName(),
|
||||
r.message(), r.threadName(), r.stackTrace(),
|
||||
r.exchangeId(), r.instanceId(), r.application(),
|
||||
r.mdc()))
|
||||
.toList();
|
||||
|
||||
return ResponseEntity.ok(new LogSearchPageResponse(
|
||||
entries, result.nextCursor(), result.hasMore(), result.levelCounts()));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -98,10 +98,13 @@ public class OidcConfigAdminController {
|
||||
request.issuerUri() != null ? request.issuerUri() : "",
|
||||
request.clientId() != null ? request.clientId() : "",
|
||||
clientSecret,
|
||||
request.rolesClaim() != null ? request.rolesClaim() : "realm_access.roles",
|
||||
request.rolesClaim() != null ? request.rolesClaim() : "roles",
|
||||
request.defaultRoles() != null ? request.defaultRoles() : List.of("VIEWER"),
|
||||
request.autoSignup(),
|
||||
request.displayNameClaim() != null ? request.displayNameClaim() : "name"
|
||||
request.displayNameClaim() != null ? request.displayNameClaim() : "name",
|
||||
request.userIdClaim() != null ? request.userIdClaim() : "sub",
|
||||
request.audience() != null ? request.audience() : "",
|
||||
request.additionalScopes() != null ? request.additionalScopes() : List.of()
|
||||
);
|
||||
|
||||
configRepository.save(config);
|
||||
|
||||
@@ -1,266 +0,0 @@
|
||||
package com.cameleer3.server.app.controller;
|
||||
|
||||
import com.cameleer3.server.app.dto.IndexInfoResponse;
|
||||
import com.cameleer3.server.app.dto.IndicesPageResponse;
|
||||
import com.cameleer3.server.app.dto.OpenSearchStatusResponse;
|
||||
import com.cameleer3.server.app.dto.PerformanceResponse;
|
||||
import com.cameleer3.server.app.dto.PipelineStatsResponse;
|
||||
import com.cameleer3.server.core.admin.AuditCategory;
|
||||
import com.cameleer3.server.core.admin.AuditResult;
|
||||
import com.cameleer3.server.core.admin.AuditService;
|
||||
import com.cameleer3.server.core.indexing.SearchIndexerStats;
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
import jakarta.servlet.http.HttpServletRequest;
|
||||
import org.opensearch.client.Request;
|
||||
import org.opensearch.client.Response;
|
||||
import org.opensearch.client.RestClient;
|
||||
import org.opensearch.client.opensearch.OpenSearchClient;
|
||||
import org.opensearch.client.opensearch.cluster.HealthResponse;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.http.HttpStatus;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.security.access.prepost.PreAuthorize;
|
||||
import org.springframework.web.bind.annotation.DeleteMapping;
|
||||
import org.springframework.web.bind.annotation.GetMapping;
|
||||
import org.springframework.web.bind.annotation.PathVariable;
|
||||
import org.springframework.web.bind.annotation.RequestMapping;
|
||||
import org.springframework.web.bind.annotation.RequestParam;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
import org.springframework.web.server.ResponseStatusException;
|
||||
|
||||
import java.io.InputStream;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
|
||||
@RestController
|
||||
@RequestMapping("/api/v1/admin/opensearch")
|
||||
@PreAuthorize("hasRole('ADMIN')")
|
||||
@Tag(name = "OpenSearch Admin", description = "OpenSearch monitoring and management (ADMIN only)")
|
||||
public class OpenSearchAdminController {
|
||||
|
||||
private final OpenSearchClient client;
|
||||
private final RestClient restClient;
|
||||
private final SearchIndexerStats indexerStats;
|
||||
private final AuditService auditService;
|
||||
private final ObjectMapper objectMapper;
|
||||
private final String opensearchUrl;
|
||||
private final String indexPrefix;
|
||||
private final String logIndexPrefix;
|
||||
|
||||
public OpenSearchAdminController(OpenSearchClient client, RestClient restClient,
|
||||
SearchIndexerStats indexerStats, AuditService auditService,
|
||||
ObjectMapper objectMapper,
|
||||
@Value("${opensearch.url:http://localhost:9200}") String opensearchUrl,
|
||||
@Value("${opensearch.index-prefix:executions-}") String indexPrefix,
|
||||
@Value("${opensearch.log-index-prefix:logs-}") String logIndexPrefix) {
|
||||
this.client = client;
|
||||
this.restClient = restClient;
|
||||
this.indexerStats = indexerStats;
|
||||
this.auditService = auditService;
|
||||
this.objectMapper = objectMapper;
|
||||
this.opensearchUrl = opensearchUrl;
|
||||
this.indexPrefix = indexPrefix;
|
||||
this.logIndexPrefix = logIndexPrefix;
|
||||
}
|
||||
|
||||
@GetMapping("/status")
|
||||
@Operation(summary = "Get OpenSearch cluster status and version")
|
||||
public ResponseEntity<OpenSearchStatusResponse> getStatus() {
|
||||
try {
|
||||
HealthResponse health = client.cluster().health();
|
||||
String version = client.info().version().number();
|
||||
return ResponseEntity.ok(new OpenSearchStatusResponse(
|
||||
true,
|
||||
health.status().name(),
|
||||
version,
|
||||
health.numberOfNodes(),
|
||||
opensearchUrl));
|
||||
} catch (Exception e) {
|
||||
return ResponseEntity.status(HttpStatus.SERVICE_UNAVAILABLE)
|
||||
.body(new OpenSearchStatusResponse(
|
||||
false, "UNREACHABLE", null, 0, opensearchUrl));
|
||||
}
|
||||
}
|
||||
|
||||
@GetMapping("/pipeline")
|
||||
@Operation(summary = "Get indexing pipeline statistics")
|
||||
public ResponseEntity<PipelineStatsResponse> getPipeline() {
|
||||
return ResponseEntity.ok(new PipelineStatsResponse(
|
||||
indexerStats.getQueueDepth(),
|
||||
indexerStats.getMaxQueueSize(),
|
||||
indexerStats.getFailedCount(),
|
||||
indexerStats.getIndexedCount(),
|
||||
indexerStats.getDebounceMs(),
|
||||
indexerStats.getIndexingRate(),
|
||||
indexerStats.getLastIndexedAt()));
|
||||
}
|
||||
|
||||
@GetMapping("/indices")
|
||||
@Operation(summary = "Get OpenSearch indices with pagination")
|
||||
public ResponseEntity<IndicesPageResponse> getIndices(
|
||||
@RequestParam(defaultValue = "0") int page,
|
||||
@RequestParam(defaultValue = "20") int size,
|
||||
@RequestParam(defaultValue = "") String search,
|
||||
@RequestParam(defaultValue = "executions") String prefix) {
|
||||
try {
|
||||
Response response = restClient.performRequest(
|
||||
new Request("GET", "/_cat/indices?format=json&h=index,health,docs.count,store.size,pri,rep&bytes=b"));
|
||||
JsonNode indices;
|
||||
try (InputStream is = response.getEntity().getContent()) {
|
||||
indices = objectMapper.readTree(is);
|
||||
}
|
||||
|
||||
String filterPrefix = "logs".equals(prefix) ? logIndexPrefix : indexPrefix;
|
||||
|
||||
List<IndexInfoResponse> allIndices = new ArrayList<>();
|
||||
for (JsonNode idx : indices) {
|
||||
String name = idx.path("index").asText("");
|
||||
if (!name.startsWith(filterPrefix)) {
|
||||
continue;
|
||||
}
|
||||
if (!search.isEmpty() && !name.contains(search)) {
|
||||
continue;
|
||||
}
|
||||
allIndices.add(new IndexInfoResponse(
|
||||
name,
|
||||
parseLong(idx.path("docs.count").asText("0")),
|
||||
humanSize(parseLong(idx.path("store.size").asText("0"))),
|
||||
parseLong(idx.path("store.size").asText("0")),
|
||||
idx.path("health").asText("unknown"),
|
||||
parseInt(idx.path("pri").asText("0")),
|
||||
parseInt(idx.path("rep").asText("0"))));
|
||||
}
|
||||
|
||||
allIndices.sort(Comparator.comparing(IndexInfoResponse::name));
|
||||
|
||||
long totalDocs = allIndices.stream().mapToLong(IndexInfoResponse::docCount).sum();
|
||||
long totalBytes = allIndices.stream().mapToLong(IndexInfoResponse::sizeBytes).sum();
|
||||
int totalIndices = allIndices.size();
|
||||
int totalPages = Math.max(1, (int) Math.ceil((double) totalIndices / size));
|
||||
|
||||
int fromIndex = Math.min(page * size, totalIndices);
|
||||
int toIndex = Math.min(fromIndex + size, totalIndices);
|
||||
List<IndexInfoResponse> pageItems = allIndices.subList(fromIndex, toIndex);
|
||||
|
||||
return ResponseEntity.ok(new IndicesPageResponse(
|
||||
pageItems, totalIndices, totalDocs,
|
||||
humanSize(totalBytes), page, size, totalPages));
|
||||
} catch (Exception e) {
|
||||
return ResponseEntity.status(HttpStatus.BAD_GATEWAY)
|
||||
.body(new IndicesPageResponse(
|
||||
List.of(), 0, 0, "0 B", page, size, 0));
|
||||
}
|
||||
}
|
||||
|
||||
@DeleteMapping("/indices/{name}")
|
||||
@Operation(summary = "Delete an OpenSearch index")
|
||||
public ResponseEntity<Void> deleteIndex(@PathVariable String name, HttpServletRequest request) {
|
||||
try {
|
||||
if (!name.startsWith(indexPrefix) && !name.startsWith(logIndexPrefix)) {
|
||||
throw new ResponseStatusException(HttpStatus.FORBIDDEN, "Cannot delete index outside application scope");
|
||||
}
|
||||
boolean exists = client.indices().exists(r -> r.index(name)).value();
|
||||
if (!exists) {
|
||||
throw new ResponseStatusException(HttpStatus.NOT_FOUND, "Index not found: " + name);
|
||||
}
|
||||
client.indices().delete(r -> r.index(name));
|
||||
auditService.log("delete_index", AuditCategory.INFRA, name, null, AuditResult.SUCCESS, request);
|
||||
return ResponseEntity.ok().build();
|
||||
} catch (ResponseStatusException e) {
|
||||
throw e;
|
||||
} catch (Exception e) {
|
||||
throw new ResponseStatusException(HttpStatus.INTERNAL_SERVER_ERROR, "Failed to delete index: " + e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
@GetMapping("/performance")
|
||||
@Operation(summary = "Get OpenSearch performance metrics")
|
||||
public ResponseEntity<PerformanceResponse> getPerformance() {
|
||||
try {
|
||||
Response response = restClient.performRequest(
|
||||
new Request("GET", "/_nodes/stats/jvm,indices"));
|
||||
JsonNode root;
|
||||
try (InputStream is = response.getEntity().getContent()) {
|
||||
root = objectMapper.readTree(is);
|
||||
}
|
||||
|
||||
JsonNode nodes = root.path("nodes");
|
||||
long heapUsed = 0, heapMax = 0;
|
||||
long queryCacheHits = 0, queryCacheMisses = 0;
|
||||
long requestCacheHits = 0, requestCacheMisses = 0;
|
||||
long searchQueryTotal = 0, searchQueryTimeMs = 0;
|
||||
long indexTotal = 0, indexTimeMs = 0;
|
||||
|
||||
var it = nodes.fields();
|
||||
while (it.hasNext()) {
|
||||
var entry = it.next();
|
||||
JsonNode node = entry.getValue();
|
||||
|
||||
JsonNode jvm = node.path("jvm").path("mem");
|
||||
heapUsed += jvm.path("heap_used_in_bytes").asLong(0);
|
||||
heapMax += jvm.path("heap_max_in_bytes").asLong(0);
|
||||
|
||||
JsonNode indicesNode = node.path("indices");
|
||||
JsonNode queryCache = indicesNode.path("query_cache");
|
||||
queryCacheHits += queryCache.path("hit_count").asLong(0);
|
||||
queryCacheMisses += queryCache.path("miss_count").asLong(0);
|
||||
|
||||
JsonNode requestCache = indicesNode.path("request_cache");
|
||||
requestCacheHits += requestCache.path("hit_count").asLong(0);
|
||||
requestCacheMisses += requestCache.path("miss_count").asLong(0);
|
||||
|
||||
JsonNode searchNode = indicesNode.path("search");
|
||||
searchQueryTotal += searchNode.path("query_total").asLong(0);
|
||||
searchQueryTimeMs += searchNode.path("query_time_in_millis").asLong(0);
|
||||
|
||||
JsonNode indexing = indicesNode.path("indexing");
|
||||
indexTotal += indexing.path("index_total").asLong(0);
|
||||
indexTimeMs += indexing.path("index_time_in_millis").asLong(0);
|
||||
}
|
||||
|
||||
double queryCacheHitRate = (queryCacheHits + queryCacheMisses) > 0
|
||||
? (double) queryCacheHits / (queryCacheHits + queryCacheMisses) : 0.0;
|
||||
double requestCacheHitRate = (requestCacheHits + requestCacheMisses) > 0
|
||||
? (double) requestCacheHits / (requestCacheHits + requestCacheMisses) : 0.0;
|
||||
double searchLatency = searchQueryTotal > 0
|
||||
? (double) searchQueryTimeMs / searchQueryTotal : 0.0;
|
||||
double indexingLatency = indexTotal > 0
|
||||
? (double) indexTimeMs / indexTotal : 0.0;
|
||||
|
||||
return ResponseEntity.ok(new PerformanceResponse(
|
||||
queryCacheHitRate, requestCacheHitRate,
|
||||
searchLatency, indexingLatency,
|
||||
heapUsed, heapMax));
|
||||
} catch (Exception e) {
|
||||
return ResponseEntity.status(HttpStatus.BAD_GATEWAY)
|
||||
.body(new PerformanceResponse(0, 0, 0, 0, 0, 0));
|
||||
}
|
||||
}
|
||||
|
||||
private static long parseLong(String s) {
|
||||
try {
|
||||
return Long.parseLong(s);
|
||||
} catch (NumberFormatException e) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
private static int parseInt(String s) {
|
||||
try {
|
||||
return Integer.parseInt(s);
|
||||
} catch (NumberFormatException e) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
private static String humanSize(long bytes) {
|
||||
if (bytes < 1024) return bytes + " B";
|
||||
if (bytes < 1024 * 1024) return String.format("%.1f KB", bytes / 1024.0);
|
||||
if (bytes < 1024 * 1024 * 1024) return String.format("%.1f MB", bytes / (1024.0 * 1024));
|
||||
return String.format("%.1f GB", bytes / (1024.0 * 1024 * 1024));
|
||||
}
|
||||
}
|
||||
@@ -3,7 +3,6 @@ package com.cameleer3.server.app.controller;
|
||||
import com.cameleer3.server.core.admin.AuditCategory;
|
||||
import com.cameleer3.server.core.admin.AuditResult;
|
||||
import com.cameleer3.server.core.admin.AuditService;
|
||||
import com.cameleer3.server.core.rbac.RbacService;
|
||||
import com.cameleer3.server.core.rbac.RoleDetail;
|
||||
import com.cameleer3.server.core.rbac.RoleRepository;
|
||||
import com.cameleer3.server.core.rbac.SystemRole;
|
||||
@@ -37,13 +36,10 @@ import java.util.UUID;
|
||||
public class RoleAdminController {
|
||||
|
||||
private final RoleRepository roleRepository;
|
||||
private final RbacService rbacService;
|
||||
private final AuditService auditService;
|
||||
|
||||
public RoleAdminController(RoleRepository roleRepository, RbacService rbacService,
|
||||
AuditService auditService) {
|
||||
public RoleAdminController(RoleRepository roleRepository, AuditService auditService) {
|
||||
this.roleRepository = roleRepository;
|
||||
this.rbacService = rbacService;
|
||||
this.auditService = auditService;
|
||||
}
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ import com.cameleer3.common.graph.RouteGraph;
|
||||
import com.cameleer3.server.core.agent.AgentInfo;
|
||||
import com.cameleer3.server.core.agent.AgentRegistryService;
|
||||
import com.cameleer3.server.core.agent.AgentState;
|
||||
import com.cameleer3.server.core.agent.RouteStateRegistry;
|
||||
import com.cameleer3.server.core.storage.DiagramStore;
|
||||
import com.cameleer3.server.core.storage.StatsStore;
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
@@ -35,16 +36,21 @@ import java.util.stream.Collectors;
|
||||
@Tag(name = "Route Catalog", description = "Route catalog and discovery")
|
||||
public class RouteCatalogController {
|
||||
|
||||
private static final org.slf4j.Logger log = org.slf4j.LoggerFactory.getLogger(RouteCatalogController.class);
|
||||
|
||||
private final AgentRegistryService registryService;
|
||||
private final DiagramStore diagramStore;
|
||||
private final JdbcTemplate jdbc;
|
||||
private final RouteStateRegistry routeStateRegistry;
|
||||
|
||||
public RouteCatalogController(AgentRegistryService registryService,
|
||||
DiagramStore diagramStore,
|
||||
JdbcTemplate jdbc) {
|
||||
@org.springframework.beans.factory.annotation.Qualifier("clickHouseJdbcTemplate") JdbcTemplate jdbc,
|
||||
RouteStateRegistry routeStateRegistry) {
|
||||
this.registryService = registryService;
|
||||
this.diagramStore = diagramStore;
|
||||
this.jdbc = jdbc;
|
||||
this.routeStateRegistry = routeStateRegistry;
|
||||
}
|
||||
|
||||
@GetMapping("/catalog")
|
||||
@@ -53,12 +59,20 @@ public class RouteCatalogController {
|
||||
@ApiResponse(responseCode = "200", description = "Catalog returned")
|
||||
public ResponseEntity<List<AppCatalogEntry>> getCatalog(
|
||||
@RequestParam(required = false) String from,
|
||||
@RequestParam(required = false) String to) {
|
||||
@RequestParam(required = false) String to,
|
||||
@RequestParam(required = false) String environment) {
|
||||
List<AgentInfo> allAgents = registryService.findAll();
|
||||
|
||||
// Filter agents by environment if specified
|
||||
if (environment != null && !environment.isBlank()) {
|
||||
allAgents = allAgents.stream()
|
||||
.filter(a -> environment.equals(a.environmentId()))
|
||||
.toList();
|
||||
}
|
||||
|
||||
// Group agents by application name
|
||||
Map<String, List<AgentInfo>> agentsByApp = allAgents.stream()
|
||||
.collect(Collectors.groupingBy(AgentInfo::application, LinkedHashMap::new, Collectors.toList()));
|
||||
.collect(Collectors.groupingBy(AgentInfo::applicationId, LinkedHashMap::new, Collectors.toList()));
|
||||
|
||||
// Collect all distinct routes per app
|
||||
Map<String, Set<String>> routesByApp = new LinkedHashMap<>();
|
||||
@@ -76,64 +90,65 @@ public class RouteCatalogController {
|
||||
Instant now = Instant.now();
|
||||
Instant rangeFrom = from != null ? Instant.parse(from) : now.minus(24, ChronoUnit.HOURS);
|
||||
Instant rangeTo = to != null ? Instant.parse(to) : now;
|
||||
Instant from1m = now.minus(1, ChronoUnit.MINUTES);
|
||||
|
||||
// Route exchange counts from continuous aggregate
|
||||
// Route exchange counts from AggregatingMergeTree (literal SQL — ClickHouse JDBC driver
|
||||
// wraps prepared statements in sub-queries that strip AggregateFunction column types)
|
||||
Map<String, Long> routeExchangeCounts = new LinkedHashMap<>();
|
||||
Map<String, Instant> routeLastSeen = new LinkedHashMap<>();
|
||||
try {
|
||||
String envFilter = (environment != null && !environment.isBlank())
|
||||
? " AND environment = " + lit(environment) : "";
|
||||
jdbc.query(
|
||||
"SELECT application_name, route_id, SUM(total_count) AS cnt, MAX(bucket) AS last_seen " +
|
||||
"FROM stats_1m_route WHERE bucket >= ? AND bucket < ? " +
|
||||
"GROUP BY application_name, route_id",
|
||||
"SELECT application_id, route_id, countMerge(total_count) AS cnt, MAX(bucket) AS last_seen " +
|
||||
"FROM stats_1m_route WHERE bucket >= " + lit(rangeFrom) + " AND bucket < " + lit(rangeTo) +
|
||||
envFilter +
|
||||
" GROUP BY application_id, route_id",
|
||||
rs -> {
|
||||
String key = rs.getString("application_name") + "/" + rs.getString("route_id");
|
||||
String key = rs.getString("application_id") + "/" + rs.getString("route_id");
|
||||
routeExchangeCounts.put(key, rs.getLong("cnt"));
|
||||
Timestamp ts = rs.getTimestamp("last_seen");
|
||||
if (ts != null) routeLastSeen.put(key, ts.toInstant());
|
||||
},
|
||||
Timestamp.from(rangeFrom), Timestamp.from(rangeTo));
|
||||
});
|
||||
} catch (Exception e) {
|
||||
// Continuous aggregate may not exist yet
|
||||
log.warn("Failed to query route exchange counts: {}", e.getMessage());
|
||||
}
|
||||
|
||||
// Per-agent TPS from the last minute
|
||||
Map<String, Double> agentTps = new LinkedHashMap<>();
|
||||
try {
|
||||
jdbc.query(
|
||||
"SELECT application_name, SUM(total_count) AS cnt " +
|
||||
"FROM stats_1m_route WHERE bucket >= ? AND bucket < ? " +
|
||||
"GROUP BY application_name",
|
||||
rs -> {
|
||||
// This gives per-app TPS; we'll distribute among agents below
|
||||
},
|
||||
Timestamp.from(from1m), Timestamp.from(now));
|
||||
} catch (Exception e) {
|
||||
// Continuous aggregate may not exist yet
|
||||
// Merge route IDs from ClickHouse stats into routesByApp.
|
||||
// After server restart, auto-healed agents have empty routeIds, but
|
||||
// ClickHouse still has execution data with the correct route IDs.
|
||||
for (var countEntry : routeExchangeCounts.entrySet()) {
|
||||
String[] parts = countEntry.getKey().split("/", 2);
|
||||
if (parts.length == 2) {
|
||||
routesByApp.computeIfAbsent(parts[0], k -> new LinkedHashSet<>()).add(parts[1]);
|
||||
}
|
||||
}
|
||||
|
||||
// Build catalog entries
|
||||
// Build catalog entries — merge apps from agent registry + ClickHouse data
|
||||
Set<String> allAppIds = new LinkedHashSet<>(agentsByApp.keySet());
|
||||
allAppIds.addAll(routesByApp.keySet());
|
||||
|
||||
List<AppCatalogEntry> catalog = new ArrayList<>();
|
||||
for (var entry : agentsByApp.entrySet()) {
|
||||
String appId = entry.getKey();
|
||||
List<AgentInfo> agents = entry.getValue();
|
||||
for (String appId : allAppIds) {
|
||||
List<AgentInfo> agents = agentsByApp.getOrDefault(appId, List.of());
|
||||
|
||||
// Routes
|
||||
Set<String> routeIds = routesByApp.getOrDefault(appId, Set.of());
|
||||
List<String> agentIds = agents.stream().map(AgentInfo::id).toList();
|
||||
List<String> agentIds = agents.stream().map(AgentInfo::instanceId).toList();
|
||||
List<RouteSummary> routeSummaries = routeIds.stream()
|
||||
.map(routeId -> {
|
||||
String key = appId + "/" + routeId;
|
||||
long count = routeExchangeCounts.getOrDefault(key, 0L);
|
||||
Instant lastSeen = routeLastSeen.get(key);
|
||||
String fromUri = resolveFromEndpointUri(routeId, agentIds);
|
||||
return new RouteSummary(routeId, count, lastSeen, fromUri);
|
||||
String state = routeStateRegistry.getState(appId, routeId).name().toLowerCase();
|
||||
// Only include non-default states (stopped/suspended); null means started
|
||||
String routeState = "started".equals(state) ? null : state;
|
||||
return new RouteSummary(routeId, count, lastSeen, fromUri, routeState);
|
||||
})
|
||||
.toList();
|
||||
|
||||
// Agent summaries
|
||||
List<AgentSummary> agentSummaries = agents.stream()
|
||||
.map(a -> new AgentSummary(a.id(), a.name(), a.state().name().toLowerCase(), 0.0))
|
||||
.map(a -> new AgentSummary(a.instanceId(), a.displayName(), a.state().name().toLowerCase(), 0.0))
|
||||
.toList();
|
||||
|
||||
// Health = worst state among agents
|
||||
@@ -158,6 +173,18 @@ public class RouteCatalogController {
|
||||
.orElse(null);
|
||||
}
|
||||
|
||||
/** Format an Instant as a ClickHouse DateTime literal in UTC. */
|
||||
private static String lit(Instant instant) {
|
||||
return "'" + java.time.format.DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss")
|
||||
.withZone(java.time.ZoneOffset.UTC)
|
||||
.format(instant.truncatedTo(ChronoUnit.SECONDS)) + "'";
|
||||
}
|
||||
|
||||
/** Format a string as a ClickHouse SQL literal with backslash + quote escaping. */
|
||||
private static String lit(String value) {
|
||||
return "'" + value.replace("\\", "\\\\").replace("'", "\\'") + "'";
|
||||
}
|
||||
|
||||
private String computeWorstHealth(List<AgentInfo> agents) {
|
||||
boolean hasDead = false;
|
||||
boolean hasStale = false;
|
||||
|
||||
@@ -32,7 +32,7 @@ public class RouteMetricsController {
|
||||
private final StatsStore statsStore;
|
||||
private final AppSettingsRepository appSettingsRepository;
|
||||
|
||||
public RouteMetricsController(JdbcTemplate jdbc, StatsStore statsStore,
|
||||
public RouteMetricsController(@org.springframework.beans.factory.annotation.Qualifier("clickHouseJdbcTemplate") JdbcTemplate jdbc, StatsStore statsStore,
|
||||
AppSettingsRepository appSettingsRepository) {
|
||||
this.jdbc = jdbc;
|
||||
this.statsStore = statsStore;
|
||||
@@ -46,35 +46,33 @@ public class RouteMetricsController {
|
||||
public ResponseEntity<List<RouteMetrics>> getMetrics(
|
||||
@RequestParam(required = false) String from,
|
||||
@RequestParam(required = false) String to,
|
||||
@RequestParam(required = false) String appId) {
|
||||
@RequestParam(required = false) String appId,
|
||||
@RequestParam(required = false) String environment) {
|
||||
|
||||
Instant toInstant = to != null ? Instant.parse(to) : Instant.now();
|
||||
Instant fromInstant = from != null ? Instant.parse(from) : toInstant.minus(24, ChronoUnit.HOURS);
|
||||
long windowSeconds = Duration.between(fromInstant, toInstant).toSeconds();
|
||||
|
||||
// Literal SQL — ClickHouse JDBC driver wraps prepared statements in sub-queries
|
||||
// that strip AggregateFunction column types, breaking -Merge combinators
|
||||
var sql = new StringBuilder(
|
||||
"SELECT application_name, route_id, " +
|
||||
"SUM(total_count) AS total, " +
|
||||
"SUM(failed_count) AS failed, " +
|
||||
"CASE WHEN SUM(total_count) > 0 THEN SUM(duration_sum) / SUM(total_count) ELSE 0 END AS avg_dur, " +
|
||||
"COALESCE(MAX(p99_duration), 0) AS p99_dur " +
|
||||
"FROM stats_1m_route WHERE bucket >= ? AND bucket < ?");
|
||||
var params = new ArrayList<Object>();
|
||||
params.add(Timestamp.from(fromInstant));
|
||||
params.add(Timestamp.from(toInstant));
|
||||
"SELECT application_id, route_id, " +
|
||||
"countMerge(total_count) AS total, " +
|
||||
"countIfMerge(failed_count) AS failed, " +
|
||||
"CASE WHEN countMerge(total_count) > 0 THEN toFloat64(sumMerge(duration_sum)) / countMerge(total_count) ELSE 0 END AS avg_dur, " +
|
||||
"COALESCE(quantileMerge(0.99)(p99_duration), 0) AS p99_dur " +
|
||||
"FROM stats_1m_route WHERE bucket >= " + lit(fromInstant) + " AND bucket < " + lit(toInstant));
|
||||
|
||||
if (appId != null) {
|
||||
sql.append(" AND application_name = ?");
|
||||
params.add(appId);
|
||||
sql.append(" AND application_id = " + lit(appId));
|
||||
}
|
||||
sql.append(" GROUP BY application_name, route_id ORDER BY application_name, route_id");
|
||||
|
||||
// Key struct for sparkline lookup
|
||||
record RouteKey(String appId, String routeId) {}
|
||||
List<RouteKey> routeKeys = new ArrayList<>();
|
||||
if (environment != null) {
|
||||
sql.append(" AND environment = " + lit(environment));
|
||||
}
|
||||
sql.append(" GROUP BY application_id, route_id ORDER BY application_id, route_id");
|
||||
|
||||
List<RouteMetrics> metrics = jdbc.query(sql.toString(), (rs, rowNum) -> {
|
||||
String applicationName = rs.getString("application_name");
|
||||
String applicationId = rs.getString("application_id");
|
||||
String routeId = rs.getString("route_id");
|
||||
long total = rs.getLong("total");
|
||||
long failed = rs.getLong("failed");
|
||||
@@ -85,10 +83,9 @@ public class RouteMetricsController {
|
||||
double errorRate = total > 0 ? (double) failed / total : 0.0;
|
||||
double tps = windowSeconds > 0 ? (double) total / windowSeconds : 0.0;
|
||||
|
||||
routeKeys.add(new RouteKey(applicationName, routeId));
|
||||
return new RouteMetrics(routeId, applicationName, total, successRate,
|
||||
return new RouteMetrics(routeId, applicationId, total, successRate,
|
||||
avgDur, p99Dur, errorRate, tps, List.of(), -1.0);
|
||||
}, params.toArray());
|
||||
});
|
||||
|
||||
// Fetch sparklines (12 buckets over the time window)
|
||||
if (!metrics.isEmpty()) {
|
||||
@@ -98,15 +95,17 @@ public class RouteMetricsController {
|
||||
for (int i = 0; i < metrics.size(); i++) {
|
||||
RouteMetrics m = metrics.get(i);
|
||||
try {
|
||||
List<Double> sparkline = jdbc.query(
|
||||
"SELECT time_bucket(? * INTERVAL '1 second', bucket) AS period, " +
|
||||
"COALESCE(SUM(total_count), 0) AS cnt " +
|
||||
"FROM stats_1m_route WHERE bucket >= ? AND bucket < ? " +
|
||||
"AND application_name = ? AND route_id = ? " +
|
||||
"GROUP BY period ORDER BY period",
|
||||
(rs, rowNum) -> rs.getDouble("cnt"),
|
||||
bucketSeconds, Timestamp.from(fromInstant), Timestamp.from(toInstant),
|
||||
m.appId(), m.routeId());
|
||||
var sparkWhere = new StringBuilder(
|
||||
"FROM stats_1m_route WHERE bucket >= " + lit(fromInstant) + " AND bucket < " + lit(toInstant) +
|
||||
" AND application_id = " + lit(m.appId()) + " AND route_id = " + lit(m.routeId()));
|
||||
if (environment != null) {
|
||||
sparkWhere.append(" AND environment = " + lit(environment));
|
||||
}
|
||||
String sparkSql = "SELECT toStartOfInterval(bucket, toIntervalSecond(" + bucketSeconds + ")) AS period, " +
|
||||
"COALESCE(countMerge(total_count), 0) AS cnt " +
|
||||
sparkWhere + " GROUP BY period ORDER BY period";
|
||||
List<Double> sparkline = jdbc.query(sparkSql,
|
||||
(rs, rowNum) -> rs.getDouble("cnt"));
|
||||
metrics.set(i, new RouteMetrics(m.routeId(), m.appId(), m.exchangeCount(),
|
||||
m.successRate(), m.avgDurationMs(), m.p99DurationMs(),
|
||||
m.errorRate(), m.throughputPerSec(), sparkline, m.slaCompliance()));
|
||||
@@ -120,11 +119,11 @@ public class RouteMetricsController {
|
||||
if (!metrics.isEmpty()) {
|
||||
// Determine SLA threshold (per-app or default)
|
||||
String effectiveAppId = appId != null ? appId : (metrics.isEmpty() ? null : metrics.get(0).appId());
|
||||
int threshold = appSettingsRepository.findByAppId(effectiveAppId != null ? effectiveAppId : "")
|
||||
int threshold = appSettingsRepository.findByApplicationId(effectiveAppId != null ? effectiveAppId : "")
|
||||
.map(AppSettings::slaThresholdMs).orElse(300);
|
||||
|
||||
Map<String, long[]> slaCounts = statsStore.slaCountsByRoute(fromInstant, toInstant,
|
||||
effectiveAppId, threshold);
|
||||
effectiveAppId, threshold, environment);
|
||||
|
||||
for (int i = 0; i < metrics.size(); i++) {
|
||||
RouteMetrics m = metrics.get(i);
|
||||
@@ -148,47 +147,63 @@ public class RouteMetricsController {
|
||||
@RequestParam String routeId,
|
||||
@RequestParam(required = false) String appId,
|
||||
@RequestParam(required = false) Instant from,
|
||||
@RequestParam(required = false) Instant to) {
|
||||
@RequestParam(required = false) Instant to,
|
||||
@RequestParam(required = false) String environment) {
|
||||
|
||||
Instant toInstant = to != null ? to : Instant.now();
|
||||
Instant fromInstant = from != null ? from : toInstant.minus(24, ChronoUnit.HOURS);
|
||||
|
||||
// Literal SQL for AggregatingMergeTree -Merge combinators.
|
||||
// Aliases (tc, fc) must NOT shadow column names (total_count, failed_count) —
|
||||
// ClickHouse 24.12 new analyzer resolves subsequent countMerge(total_count)
|
||||
// to the alias (UInt64) instead of the AggregateFunction column.
|
||||
var sql = new StringBuilder(
|
||||
"SELECT processor_id, processor_type, route_id, application_name, " +
|
||||
"SUM(total_count) AS total_count, " +
|
||||
"SUM(failed_count) AS failed_count, " +
|
||||
"CASE WHEN SUM(total_count) > 0 THEN SUM(duration_sum)::double precision / SUM(total_count) ELSE 0 END AS avg_duration_ms, " +
|
||||
"MAX(p99_duration) AS p99_duration_ms " +
|
||||
"SELECT processor_id, processor_type, route_id, application_id, " +
|
||||
"countMerge(total_count) AS tc, " +
|
||||
"countIfMerge(failed_count) AS fc, " +
|
||||
"CASE WHEN countMerge(total_count) > 0 THEN toFloat64(sumMerge(duration_sum)) / countMerge(total_count) ELSE 0 END AS avg_duration_ms, " +
|
||||
"quantileMerge(0.99)(p99_duration) AS p99_duration_ms " +
|
||||
"FROM stats_1m_processor_detail " +
|
||||
"WHERE bucket >= ? AND bucket < ? AND route_id = ?");
|
||||
var params = new ArrayList<Object>();
|
||||
params.add(Timestamp.from(fromInstant));
|
||||
params.add(Timestamp.from(toInstant));
|
||||
params.add(routeId);
|
||||
"WHERE bucket >= " + lit(fromInstant) + " AND bucket < " + lit(toInstant) +
|
||||
" AND route_id = " + lit(routeId));
|
||||
|
||||
if (appId != null) {
|
||||
sql.append(" AND application_name = ?");
|
||||
params.add(appId);
|
||||
sql.append(" AND application_id = " + lit(appId));
|
||||
}
|
||||
sql.append(" GROUP BY processor_id, processor_type, route_id, application_name");
|
||||
sql.append(" ORDER BY SUM(total_count) DESC");
|
||||
if (environment != null) {
|
||||
sql.append(" AND environment = " + lit(environment));
|
||||
}
|
||||
sql.append(" GROUP BY processor_id, processor_type, route_id, application_id");
|
||||
sql.append(" ORDER BY tc DESC");
|
||||
|
||||
List<ProcessorMetrics> metrics = jdbc.query(sql.toString(), (rs, rowNum) -> {
|
||||
long totalCount = rs.getLong("total_count");
|
||||
long failedCount = rs.getLong("failed_count");
|
||||
long totalCount = rs.getLong("tc");
|
||||
long failedCount = rs.getLong("fc");
|
||||
double errorRate = failedCount > 0 ? (double) failedCount / totalCount : 0.0;
|
||||
return new ProcessorMetrics(
|
||||
rs.getString("processor_id"),
|
||||
rs.getString("processor_type"),
|
||||
rs.getString("route_id"),
|
||||
rs.getString("application_name"),
|
||||
rs.getString("application_id"),
|
||||
totalCount,
|
||||
failedCount,
|
||||
rs.getDouble("avg_duration_ms"),
|
||||
rs.getDouble("p99_duration_ms"),
|
||||
errorRate);
|
||||
}, params.toArray());
|
||||
});
|
||||
|
||||
return ResponseEntity.ok(metrics);
|
||||
}
|
||||
|
||||
/** Format an Instant as a ClickHouse DateTime literal. */
|
||||
private static String lit(Instant instant) {
|
||||
return "'" + java.time.format.DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss")
|
||||
.withZone(java.time.ZoneOffset.UTC)
|
||||
.format(instant.truncatedTo(ChronoUnit.SECONDS)) + "'";
|
||||
}
|
||||
|
||||
/** Format a string as a ClickHouse SQL literal with backslash + quote escaping. */
|
||||
private static String lit(String value) {
|
||||
return "'" + value.replace("\\", "\\\\").replace("'", "\\'") + "'";
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,9 +57,10 @@ public class SearchController {
|
||||
@RequestParam(required = false) String correlationId,
|
||||
@RequestParam(required = false) String text,
|
||||
@RequestParam(required = false) String routeId,
|
||||
@RequestParam(required = false) String agentId,
|
||||
@RequestParam(name = "agentId", required = false) String instanceId,
|
||||
@RequestParam(required = false) String processorType,
|
||||
@RequestParam(required = false) String application,
|
||||
@RequestParam(required = false) String environment,
|
||||
@RequestParam(defaultValue = "0") int offset,
|
||||
@RequestParam(defaultValue = "50") int limit,
|
||||
@RequestParam(required = false) String sortField,
|
||||
@@ -72,10 +73,11 @@ public class SearchController {
|
||||
null, null,
|
||||
correlationId,
|
||||
text, null, null, null,
|
||||
routeId, agentId, processorType,
|
||||
routeId, instanceId, processorType,
|
||||
application, agentIds,
|
||||
offset, limit,
|
||||
sortField, sortDir
|
||||
sortField, sortDir,
|
||||
environment
|
||||
);
|
||||
|
||||
return ResponseEntity.ok(searchService.search(request));
|
||||
@@ -87,9 +89,9 @@ public class SearchController {
|
||||
@RequestBody SearchRequest request) {
|
||||
// Resolve application to agentIds if application is specified but agentIds is not
|
||||
SearchRequest resolved = request;
|
||||
if (request.application() != null && !request.application().isBlank()
|
||||
&& (request.agentIds() == null || request.agentIds().isEmpty())) {
|
||||
resolved = request.withAgentIds(resolveApplicationToAgentIds(request.application()));
|
||||
if (request.applicationId() != null && !request.applicationId().isBlank()
|
||||
&& (request.instanceIds() == null || request.instanceIds().isEmpty())) {
|
||||
resolved = request.withInstanceIds(resolveApplicationToAgentIds(request.applicationId()));
|
||||
}
|
||||
return ResponseEntity.ok(searchService.search(resolved));
|
||||
}
|
||||
@@ -100,23 +102,24 @@ public class SearchController {
|
||||
@RequestParam Instant from,
|
||||
@RequestParam(required = false) Instant to,
|
||||
@RequestParam(required = false) String routeId,
|
||||
@RequestParam(required = false) String application) {
|
||||
@RequestParam(required = false) String application,
|
||||
@RequestParam(required = false) String environment) {
|
||||
Instant end = to != null ? to : Instant.now();
|
||||
ExecutionStats stats;
|
||||
if (routeId == null && application == null) {
|
||||
stats = searchService.stats(from, end);
|
||||
stats = searchService.stats(from, end, environment);
|
||||
} else if (routeId == null) {
|
||||
stats = searchService.statsForApp(from, end, application);
|
||||
stats = searchService.statsForApp(from, end, application, environment);
|
||||
} else {
|
||||
List<String> agentIds = resolveApplicationToAgentIds(application);
|
||||
stats = searchService.stats(from, end, routeId, agentIds);
|
||||
stats = searchService.stats(from, end, routeId, agentIds, environment);
|
||||
}
|
||||
|
||||
// Enrich with SLA compliance
|
||||
int threshold = appSettingsRepository
|
||||
.findByAppId(application != null ? application : "")
|
||||
.findByApplicationId(application != null ? application : "")
|
||||
.map(AppSettings::slaThresholdMs).orElse(300);
|
||||
double sla = searchService.slaCompliance(from, end, threshold, application, routeId);
|
||||
double sla = searchService.slaCompliance(from, end, threshold, application, routeId, environment);
|
||||
return ResponseEntity.ok(stats.withSlaCompliance(sla));
|
||||
}
|
||||
|
||||
@@ -127,19 +130,20 @@ public class SearchController {
|
||||
@RequestParam(required = false) Instant to,
|
||||
@RequestParam(defaultValue = "24") int buckets,
|
||||
@RequestParam(required = false) String routeId,
|
||||
@RequestParam(required = false) String application) {
|
||||
@RequestParam(required = false) String application,
|
||||
@RequestParam(required = false) String environment) {
|
||||
Instant end = to != null ? to : Instant.now();
|
||||
if (routeId == null && application == null) {
|
||||
return ResponseEntity.ok(searchService.timeseries(from, end, buckets));
|
||||
return ResponseEntity.ok(searchService.timeseries(from, end, buckets, environment));
|
||||
}
|
||||
if (routeId == null) {
|
||||
return ResponseEntity.ok(searchService.timeseriesForApp(from, end, buckets, application));
|
||||
return ResponseEntity.ok(searchService.timeseriesForApp(from, end, buckets, application, environment));
|
||||
}
|
||||
List<String> agentIds = resolveApplicationToAgentIds(application);
|
||||
if (routeId == null && agentIds == null) {
|
||||
return ResponseEntity.ok(searchService.timeseries(from, end, buckets));
|
||||
if (routeId == null && agentIds.isEmpty()) {
|
||||
return ResponseEntity.ok(searchService.timeseries(from, end, buckets, environment));
|
||||
}
|
||||
return ResponseEntity.ok(searchService.timeseries(from, end, buckets, routeId, agentIds));
|
||||
return ResponseEntity.ok(searchService.timeseries(from, end, buckets, routeId, agentIds, environment));
|
||||
}
|
||||
|
||||
@GetMapping("/stats/timeseries/by-app")
|
||||
@@ -147,9 +151,10 @@ public class SearchController {
|
||||
public ResponseEntity<Map<String, StatsTimeseries>> timeseriesByApp(
|
||||
@RequestParam Instant from,
|
||||
@RequestParam(required = false) Instant to,
|
||||
@RequestParam(defaultValue = "24") int buckets) {
|
||||
@RequestParam(defaultValue = "24") int buckets,
|
||||
@RequestParam(required = false) String environment) {
|
||||
Instant end = to != null ? to : Instant.now();
|
||||
return ResponseEntity.ok(searchService.timeseriesGroupedByApp(from, end, buckets));
|
||||
return ResponseEntity.ok(searchService.timeseriesGroupedByApp(from, end, buckets, environment));
|
||||
}
|
||||
|
||||
@GetMapping("/stats/timeseries/by-route")
|
||||
@@ -158,18 +163,26 @@ public class SearchController {
|
||||
@RequestParam Instant from,
|
||||
@RequestParam(required = false) Instant to,
|
||||
@RequestParam(defaultValue = "24") int buckets,
|
||||
@RequestParam String application) {
|
||||
@RequestParam String application,
|
||||
@RequestParam(required = false) String environment) {
|
||||
Instant end = to != null ? to : Instant.now();
|
||||
return ResponseEntity.ok(searchService.timeseriesGroupedByRoute(from, end, buckets, application));
|
||||
return ResponseEntity.ok(searchService.timeseriesGroupedByRoute(from, end, buckets, application, environment));
|
||||
}
|
||||
|
||||
@GetMapping("/stats/punchcard")
|
||||
@Operation(summary = "Transaction punchcard: weekday x hour grid (rolling 7 days)")
|
||||
public ResponseEntity<List<StatsStore.PunchcardCell>> punchcard(
|
||||
@RequestParam(required = false) String application) {
|
||||
@RequestParam(required = false) String application,
|
||||
@RequestParam(required = false) String environment) {
|
||||
Instant to = Instant.now();
|
||||
Instant from = to.minus(java.time.Duration.ofDays(7));
|
||||
return ResponseEntity.ok(searchService.punchcard(from, to, application));
|
||||
return ResponseEntity.ok(searchService.punchcard(from, to, application, environment));
|
||||
}
|
||||
|
||||
@GetMapping("/attributes/keys")
|
||||
@Operation(summary = "Distinct attribute key names across all executions")
|
||||
public ResponseEntity<List<String>> attributeKeys() {
|
||||
return ResponseEntity.ok(searchService.distinctAttributeKeys());
|
||||
}
|
||||
|
||||
@GetMapping("/errors/top")
|
||||
@@ -179,21 +192,22 @@ public class SearchController {
|
||||
@RequestParam(required = false) Instant to,
|
||||
@RequestParam(required = false) String application,
|
||||
@RequestParam(required = false) String routeId,
|
||||
@RequestParam(required = false) String environment,
|
||||
@RequestParam(defaultValue = "5") int limit) {
|
||||
Instant end = to != null ? to : Instant.now();
|
||||
return ResponseEntity.ok(searchService.topErrors(from, end, application, routeId, limit));
|
||||
return ResponseEntity.ok(searchService.topErrors(from, end, application, routeId, limit, environment));
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve an application name to agent IDs.
|
||||
* Returns null if application is null/blank (no filtering).
|
||||
* Returns empty list if application is null/blank (no filtering).
|
||||
*/
|
||||
private List<String> resolveApplicationToAgentIds(String application) {
|
||||
if (application == null || application.isBlank()) {
|
||||
return null;
|
||||
return List.of();
|
||||
}
|
||||
return registryService.findByApplication(application).stream()
|
||||
.map(AgentInfo::id)
|
||||
.map(AgentInfo::instanceId)
|
||||
.toList();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,50 @@
|
||||
package com.cameleer3.server.app.controller;
|
||||
|
||||
import com.cameleer3.server.app.storage.ClickHouseUsageTracker;
|
||||
import com.cameleer3.server.core.analytics.UsageStats;
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
import org.springframework.boot.autoconfigure.condition.ConditionalOnBean;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.web.bind.annotation.GetMapping;
|
||||
import org.springframework.web.bind.annotation.RequestMapping;
|
||||
import org.springframework.web.bind.annotation.RequestParam;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
|
||||
import java.time.Instant;
|
||||
import java.time.temporal.ChronoUnit;
|
||||
import java.util.List;
|
||||
|
||||
@RestController
|
||||
@RequestMapping("/api/v1/admin/usage")
|
||||
@ConditionalOnBean(ClickHouseUsageTracker.class)
|
||||
@Tag(name = "Usage Analytics", description = "UI usage pattern analytics")
|
||||
public class UsageAnalyticsController {
|
||||
|
||||
private final ClickHouseUsageTracker tracker;
|
||||
|
||||
public UsageAnalyticsController(ClickHouseUsageTracker tracker) {
|
||||
this.tracker = tracker;
|
||||
}
|
||||
|
||||
@GetMapping
|
||||
@Operation(summary = "Query usage statistics",
|
||||
description = "Returns aggregated API usage stats grouped by endpoint, user, or hour")
|
||||
public ResponseEntity<List<UsageStats>> getUsage(
|
||||
@RequestParam(required = false) String from,
|
||||
@RequestParam(required = false) String to,
|
||||
@RequestParam(required = false) String username,
|
||||
@RequestParam(defaultValue = "endpoint") String groupBy) {
|
||||
|
||||
Instant fromInstant = from != null ? Instant.parse(from) : Instant.now().minus(7, ChronoUnit.DAYS);
|
||||
Instant toInstant = to != null ? Instant.parse(to) : Instant.now();
|
||||
|
||||
List<UsageStats> stats = switch (groupBy) {
|
||||
case "user" -> tracker.queryByUser(fromInstant, toInstant);
|
||||
case "hour" -> tracker.queryByHour(fromInstant, toInstant, username);
|
||||
default -> tracker.queryByEndpoint(fromInstant, toInstant, username);
|
||||
};
|
||||
|
||||
return ResponseEntity.ok(stats);
|
||||
}
|
||||
}
|
||||
@@ -7,6 +7,7 @@ import com.cameleer3.server.core.admin.AuditService;
|
||||
import com.cameleer3.server.core.rbac.RbacService;
|
||||
import com.cameleer3.server.core.rbac.SystemRole;
|
||||
import com.cameleer3.server.core.rbac.UserDetail;
|
||||
import com.cameleer3.server.core.security.PasswordPolicyValidator;
|
||||
import com.cameleer3.server.core.security.UserInfo;
|
||||
import com.cameleer3.server.core.security.UserRepository;
|
||||
import io.swagger.v3.oas.annotations.Operation;
|
||||
@@ -14,6 +15,7 @@ import io.swagger.v3.oas.annotations.responses.ApiResponse;
|
||||
import io.swagger.v3.oas.annotations.tags.Tag;
|
||||
import jakarta.servlet.http.HttpServletRequest;
|
||||
import jakarta.validation.Valid;
|
||||
import org.springframework.http.HttpStatus;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.security.access.prepost.PreAuthorize;
|
||||
import org.springframework.web.bind.annotation.DeleteMapping;
|
||||
@@ -24,7 +26,9 @@ import org.springframework.web.bind.annotation.PutMapping;
|
||||
import org.springframework.web.bind.annotation.RequestBody;
|
||||
import org.springframework.web.bind.annotation.RequestMapping;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
import org.springframework.web.server.ResponseStatusException;
|
||||
|
||||
import com.cameleer3.server.app.security.SecurityProperties;
|
||||
import org.springframework.security.crypto.bcrypt.BCryptPasswordEncoder;
|
||||
|
||||
import java.time.Instant;
|
||||
@@ -32,6 +36,7 @@ import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
|
||||
|
||||
/**
|
||||
* Admin endpoints for user management.
|
||||
* Protected by {@code ROLE_ADMIN}.
|
||||
@@ -47,12 +52,15 @@ public class UserAdminController {
|
||||
private final RbacService rbacService;
|
||||
private final UserRepository userRepository;
|
||||
private final AuditService auditService;
|
||||
private final boolean oidcEnabled;
|
||||
|
||||
public UserAdminController(RbacService rbacService, UserRepository userRepository,
|
||||
AuditService auditService) {
|
||||
AuditService auditService, SecurityProperties securityProperties) {
|
||||
this.rbacService = rbacService;
|
||||
this.userRepository = userRepository;
|
||||
this.auditService = auditService;
|
||||
String issuer = securityProperties.getOidcIssuerUri();
|
||||
this.oidcEnabled = issuer != null && !issuer.isBlank();
|
||||
}
|
||||
|
||||
@GetMapping
|
||||
@@ -78,8 +86,12 @@ public class UserAdminController {
|
||||
@PostMapping
|
||||
@Operation(summary = "Create a local user")
|
||||
@ApiResponse(responseCode = "200", description = "User created")
|
||||
@ApiResponse(responseCode = "400", description = "Disabled in OIDC mode")
|
||||
public ResponseEntity<UserDetail> createUser(@RequestBody CreateUserRequest request,
|
||||
HttpServletRequest httpRequest) {
|
||||
if (oidcEnabled) {
|
||||
return ResponseEntity.badRequest().build();
|
||||
}
|
||||
String userId = "user:" + request.username();
|
||||
UserInfo user = new UserInfo(userId, "local",
|
||||
request.email() != null ? request.email() : "",
|
||||
@@ -87,6 +99,11 @@ public class UserAdminController {
|
||||
Instant.now());
|
||||
userRepository.upsert(user);
|
||||
if (request.password() != null && !request.password().isBlank()) {
|
||||
List<String> violations = PasswordPolicyValidator.validate(request.password(), request.username());
|
||||
if (!violations.isEmpty()) {
|
||||
throw new ResponseStatusException(HttpStatus.BAD_REQUEST,
|
||||
"Password policy violation: " + String.join("; ", violations));
|
||||
}
|
||||
userRepository.setPassword(userId, passwordEncoder.encode(request.password()));
|
||||
}
|
||||
rbacService.assignRoleToUser(userId, SystemRole.VIEWER_ID);
|
||||
@@ -167,8 +184,14 @@ public class UserAdminController {
|
||||
@DeleteMapping("/{userId}")
|
||||
@Operation(summary = "Delete user")
|
||||
@ApiResponse(responseCode = "204", description = "User deleted")
|
||||
@ApiResponse(responseCode = "409", description = "Cannot delete the last admin user")
|
||||
public ResponseEntity<Void> deleteUser(@PathVariable String userId,
|
||||
HttpServletRequest httpRequest) {
|
||||
boolean isAdmin = rbacService.getEffectiveRolesForUser(userId).stream()
|
||||
.anyMatch(r -> r.id().equals(SystemRole.ADMIN_ID));
|
||||
if (isAdmin && rbacService.getEffectivePrincipalsForRole(SystemRole.ADMIN_ID).size() <= 1) {
|
||||
throw new ResponseStatusException(HttpStatus.CONFLICT, "Cannot delete the last admin user");
|
||||
}
|
||||
userRepository.delete(userId);
|
||||
auditService.log("delete_user", AuditCategory.USER_MGMT, userId,
|
||||
null, AuditResult.SUCCESS, httpRequest);
|
||||
@@ -178,11 +201,24 @@ public class UserAdminController {
|
||||
@PostMapping("/{userId}/password")
|
||||
@Operation(summary = "Reset user password")
|
||||
@ApiResponse(responseCode = "204", description = "Password reset")
|
||||
@ApiResponse(responseCode = "400", description = "Disabled in OIDC mode or policy violation")
|
||||
public ResponseEntity<Void> resetPassword(
|
||||
@PathVariable String userId,
|
||||
@Valid @RequestBody SetPasswordRequest request,
|
||||
HttpServletRequest httpRequest) {
|
||||
if (oidcEnabled) {
|
||||
return ResponseEntity.badRequest().build();
|
||||
}
|
||||
// Extract bare username from "user:username" format for policy check
|
||||
String username = userId.startsWith("user:") ? userId.substring(5) : userId;
|
||||
List<String> violations = PasswordPolicyValidator.validate(request.password(), username);
|
||||
if (!violations.isEmpty()) {
|
||||
throw new ResponseStatusException(HttpStatus.BAD_REQUEST,
|
||||
"Password policy violation: " + String.join("; ", violations));
|
||||
}
|
||||
userRepository.setPassword(userId, passwordEncoder.encode(request.password()));
|
||||
// Revoke all existing tokens so the user must re-authenticate with the new password
|
||||
userRepository.revokeTokensBefore(userId, Instant.now());
|
||||
auditService.log("reset_password", AuditCategory.USER_MGMT, userId, null, AuditResult.SUCCESS, httpRequest);
|
||||
return ResponseEntity.noContent().build();
|
||||
}
|
||||
|
||||
@@ -884,6 +884,7 @@ public class ElkDiagramRenderer implements DiagramRenderer {
|
||||
}
|
||||
|
||||
private ElkNode getElkRoot(ElkNode node) {
|
||||
if (node == null) return null;
|
||||
ElkNode current = node;
|
||||
while (current.getParent() != null) {
|
||||
current = current.getParent();
|
||||
|
||||
@@ -9,16 +9,16 @@ import java.time.Instant;
|
||||
@Schema(description = "Agent lifecycle event")
|
||||
public record AgentEventResponse(
|
||||
@NotNull long id,
|
||||
@NotNull String agentId,
|
||||
@NotNull String appId,
|
||||
@NotNull String instanceId,
|
||||
@NotNull String applicationId,
|
||||
@NotNull String eventType,
|
||||
String detail,
|
||||
@NotNull Instant timestamp
|
||||
) {
|
||||
public static AgentEventResponse from(AgentEventRecord record) {
|
||||
public static AgentEventResponse from(AgentEventRecord event) {
|
||||
return new AgentEventResponse(
|
||||
record.id(), record.agentId(), record.appId(),
|
||||
record.eventType(), record.detail(), record.timestamp()
|
||||
event.id(), event.instanceId(), event.applicationId(),
|
||||
event.eventType(), event.detail(), event.timestamp()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,9 +11,10 @@ import java.util.Map;
|
||||
|
||||
@Schema(description = "Agent instance summary with runtime metrics")
|
||||
public record AgentInstanceResponse(
|
||||
@NotNull String id,
|
||||
@NotNull String name,
|
||||
@NotNull String application,
|
||||
@NotNull String instanceId,
|
||||
@NotNull String displayName,
|
||||
@NotNull String applicationId,
|
||||
String environmentId,
|
||||
@NotNull String status,
|
||||
@NotNull List<String> routeIds,
|
||||
@NotNull Instant registeredAt,
|
||||
@@ -29,7 +30,8 @@ public record AgentInstanceResponse(
|
||||
public static AgentInstanceResponse from(AgentInfo info) {
|
||||
long uptime = Duration.between(info.registeredAt(), Instant.now()).toSeconds();
|
||||
return new AgentInstanceResponse(
|
||||
info.id(), info.name(), info.application(),
|
||||
info.instanceId(), info.displayName(), info.applicationId(),
|
||||
info.environmentId(),
|
||||
info.state().name(), info.routeIds(),
|
||||
info.registeredAt(), info.lastHeartbeat(),
|
||||
info.version(), info.capabilities(),
|
||||
@@ -41,7 +43,8 @@ public record AgentInstanceResponse(
|
||||
|
||||
public AgentInstanceResponse withMetrics(double tps, double errorRate, int activeRoutes) {
|
||||
return new AgentInstanceResponse(
|
||||
id, name, application, status, routeIds, registeredAt, lastHeartbeat,
|
||||
instanceId, displayName, applicationId, environmentId,
|
||||
status, routeIds, registeredAt, lastHeartbeat,
|
||||
version, capabilities,
|
||||
tps, errorRate, activeRoutes, totalRoutes, uptimeSeconds
|
||||
);
|
||||
|
||||
@@ -8,9 +8,10 @@ import java.util.Map;
|
||||
|
||||
@Schema(description = "Agent registration payload")
|
||||
public record AgentRegistrationRequest(
|
||||
@NotNull String agentId,
|
||||
@NotNull String name,
|
||||
@Schema(defaultValue = "default") String application,
|
||||
@NotNull String instanceId,
|
||||
@NotNull String displayName,
|
||||
@Schema(defaultValue = "default") String applicationId,
|
||||
@Schema(defaultValue = "default") String environmentId,
|
||||
String version,
|
||||
List<String> routeIds,
|
||||
Map<String, Object> capabilities
|
||||
|
||||
@@ -5,7 +5,7 @@ import jakarta.validation.constraints.NotNull;
|
||||
|
||||
@Schema(description = "Agent registration result with JWT tokens and SSE endpoint")
|
||||
public record AgentRegistrationResponse(
|
||||
@NotNull String agentId,
|
||||
@NotNull String instanceId,
|
||||
@NotNull String sseEndpoint,
|
||||
long heartbeatIntervalMs,
|
||||
@NotNull String serverPublicKey,
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
package com.cameleer3.server.app.dto;
|
||||
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
@Schema(description = "Unified catalog entry combining app records with live agent data")
|
||||
public record CatalogApp(
|
||||
@Schema(description = "Application slug (universal identifier)") String slug,
|
||||
@Schema(description = "Display name") String displayName,
|
||||
@Schema(description = "True if a managed App record exists in the database") boolean managed,
|
||||
@Schema(description = "Environment slug") String environmentSlug,
|
||||
@Schema(description = "Composite health: deployment status + agent health") String health,
|
||||
@Schema(description = "Human-readable tooltip explaining the health state") String healthTooltip,
|
||||
@Schema(description = "Number of connected agents") int agentCount,
|
||||
@Schema(description = "Live routes from agents") List<RouteSummary> routes,
|
||||
@Schema(description = "Connected agent summaries") List<AgentSummary> agents,
|
||||
@Schema(description = "Total exchange count from ClickHouse") long exchangeCount,
|
||||
@Schema(description = "Active deployment info, null if no deployment") DeploymentSummary deployment
|
||||
) {
|
||||
public record DeploymentSummary(
|
||||
String status,
|
||||
String replicas,
|
||||
int version
|
||||
) {}
|
||||
}
|
||||
@@ -0,0 +1,14 @@
|
||||
package com.cameleer3.server.app.dto;
|
||||
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
|
||||
@Schema(description = "ClickHouse storage and performance metrics")
|
||||
public record ClickHousePerformanceResponse(
|
||||
String diskSize,
|
||||
String uncompressedSize,
|
||||
double compressionRatio,
|
||||
long totalRows,
|
||||
int partCount,
|
||||
String memoryUsage,
|
||||
int currentQueries
|
||||
) {}
|
||||
@@ -0,0 +1,12 @@
|
||||
package com.cameleer3.server.app.dto;
|
||||
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
|
||||
@Schema(description = "Active ClickHouse query information")
|
||||
public record ClickHouseQueryInfo(
|
||||
String queryId,
|
||||
double elapsedSeconds,
|
||||
String memory,
|
||||
long readRows,
|
||||
String query
|
||||
) {}
|
||||
@@ -0,0 +1,11 @@
|
||||
package com.cameleer3.server.app.dto;
|
||||
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
|
||||
@Schema(description = "ClickHouse cluster status")
|
||||
public record ClickHouseStatusResponse(
|
||||
boolean reachable,
|
||||
String version,
|
||||
String uptime,
|
||||
String host
|
||||
) {}
|
||||
@@ -0,0 +1,13 @@
|
||||
package com.cameleer3.server.app.dto;
|
||||
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
|
||||
@Schema(description = "ClickHouse table information")
|
||||
public record ClickHouseTableInfo(
|
||||
String name,
|
||||
String engine,
|
||||
long rowCount,
|
||||
String dataSize,
|
||||
long dataSizeBytes,
|
||||
int partitionCount
|
||||
) {}
|
||||
@@ -0,0 +1,13 @@
|
||||
package com.cameleer3.server.app.dto;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public record CommandGroupResponse(
|
||||
boolean success,
|
||||
int total,
|
||||
int responded,
|
||||
List<AgentResponse> responses,
|
||||
List<String> timedOut
|
||||
) {
|
||||
public record AgentResponse(String agentId, String status, String message) {}
|
||||
}
|
||||
@@ -0,0 +1,8 @@
|
||||
package com.cameleer3.server.app.dto;
|
||||
|
||||
import com.cameleer3.common.model.ApplicationConfig;
|
||||
|
||||
public record ConfigUpdateResponse(
|
||||
ApplicationConfig config,
|
||||
CommandGroupResponse pushResult
|
||||
) {}
|
||||
@@ -7,6 +7,5 @@ public record DatabaseStatusResponse(
|
||||
@Schema(description = "Whether the database is reachable") boolean connected,
|
||||
@Schema(description = "PostgreSQL version string") String version,
|
||||
@Schema(description = "Database host") String host,
|
||||
@Schema(description = "Current schema search path") String schema,
|
||||
@Schema(description = "Whether TimescaleDB extension is available") boolean timescaleDb
|
||||
@Schema(description = "Current schema") String schema
|
||||
) {}
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
package com.cameleer3.server.app.dto;
|
||||
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
|
||||
@Schema(description = "OpenSearch index information")
|
||||
public record IndexInfoResponse(
|
||||
@Schema(description = "Index name") String name,
|
||||
@Schema(description = "Document count") long docCount,
|
||||
@Schema(description = "Human-readable index size") String size,
|
||||
@Schema(description = "Index size in bytes") long sizeBytes,
|
||||
@Schema(description = "Index health status") String health,
|
||||
@Schema(description = "Number of primary shards") int primaryShards,
|
||||
@Schema(description = "Number of replica shards") int replicaShards
|
||||
) {}
|
||||
@@ -0,0 +1,16 @@
|
||||
package com.cameleer3.server.app.dto;
|
||||
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
|
||||
import java.time.Instant;
|
||||
|
||||
@Schema(description = "Search indexer pipeline statistics")
|
||||
public record IndexerPipelineResponse(
|
||||
int queueDepth,
|
||||
int maxQueueSize,
|
||||
long failedCount,
|
||||
long indexedCount,
|
||||
long debounceMs,
|
||||
double indexingRate,
|
||||
Instant lastIndexedAt
|
||||
) {}
|
||||
@@ -1,16 +0,0 @@
|
||||
package com.cameleer3.server.app.dto;
|
||||
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
@Schema(description = "Paginated list of OpenSearch indices")
|
||||
public record IndicesPageResponse(
|
||||
@Schema(description = "Index list for current page") List<IndexInfoResponse> indices,
|
||||
@Schema(description = "Total number of indices") long totalIndices,
|
||||
@Schema(description = "Total document count across all indices") long totalDocs,
|
||||
@Schema(description = "Human-readable total size") String totalSize,
|
||||
@Schema(description = "Current page number (0-based)") int page,
|
||||
@Schema(description = "Page size") int pageSize,
|
||||
@Schema(description = "Total number of pages") int totalPages
|
||||
) {}
|
||||
@@ -2,12 +2,18 @@ package com.cameleer3.server.app.dto;
|
||||
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
|
||||
@Schema(description = "Application log entry from OpenSearch")
|
||||
import java.util.Map;
|
||||
|
||||
@Schema(description = "Application log entry")
|
||||
public record LogEntryResponse(
|
||||
@Schema(description = "Log timestamp (ISO-8601)") String timestamp,
|
||||
@Schema(description = "Log level (INFO, WARN, ERROR, DEBUG)") String level,
|
||||
@Schema(description = "Log level (INFO, WARN, ERROR, DEBUG, TRACE)") String level,
|
||||
@Schema(description = "Logger name") String loggerName,
|
||||
@Schema(description = "Log message") String message,
|
||||
@Schema(description = "Thread name") String threadName,
|
||||
@Schema(description = "Stack trace (if present)") String stackTrace
|
||||
@Schema(description = "Stack trace (if present)") String stackTrace,
|
||||
@Schema(description = "Camel exchange ID (if present)") String exchangeId,
|
||||
@Schema(description = "Agent instance ID") String instanceId,
|
||||
@Schema(description = "Application ID") String application,
|
||||
@Schema(description = "MDC context map") Map<String, String> mdc
|
||||
) {}
|
||||
|
||||
@@ -0,0 +1,14 @@
|
||||
package com.cameleer3.server.app.dto;
|
||||
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@Schema(description = "Log search response with cursor pagination and level counts")
|
||||
public record LogSearchPageResponse(
|
||||
@Schema(description = "Log entries for the current page") List<LogEntryResponse> data,
|
||||
@Schema(description = "Cursor for next page (null if no more results)") String nextCursor,
|
||||
@Schema(description = "Whether more results exist beyond this page") boolean hasMore,
|
||||
@Schema(description = "Count of logs per level (unaffected by level filter)") Map<String, Long> levelCounts
|
||||
) {}
|
||||
@@ -13,5 +13,8 @@ public record OidcAdminConfigRequest(
|
||||
String rolesClaim,
|
||||
List<String> defaultRoles,
|
||||
boolean autoSignup,
|
||||
String displayNameClaim
|
||||
String displayNameClaim,
|
||||
String userIdClaim,
|
||||
String audience,
|
||||
List<String> additionalScopes
|
||||
) {}
|
||||
|
||||
@@ -16,17 +16,21 @@ public record OidcAdminConfigResponse(
|
||||
String rolesClaim,
|
||||
List<String> defaultRoles,
|
||||
boolean autoSignup,
|
||||
String displayNameClaim
|
||||
String displayNameClaim,
|
||||
String userIdClaim,
|
||||
String audience,
|
||||
List<String> additionalScopes
|
||||
) {
|
||||
public static OidcAdminConfigResponse unconfigured() {
|
||||
return new OidcAdminConfigResponse(false, false, null, null, false, null, null, false, null);
|
||||
return new OidcAdminConfigResponse(false, false, null, null, false, null, null, false, null, null, null, null);
|
||||
}
|
||||
|
||||
public static OidcAdminConfigResponse from(OidcConfig config) {
|
||||
return new OidcAdminConfigResponse(
|
||||
true, config.enabled(), config.issuerUri(), config.clientId(),
|
||||
!config.clientSecret().isBlank(), config.rolesClaim(),
|
||||
config.defaultRoles(), config.autoSignup(), config.displayNameClaim()
|
||||
config.defaultRoles(), config.autoSignup(), config.displayNameClaim(),
|
||||
config.userIdClaim(), config.audience(), config.additionalScopes()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,5 +9,9 @@ public record OidcPublicConfigResponse(
|
||||
@NotNull String clientId,
|
||||
@NotNull String authorizationEndpoint,
|
||||
@Schema(description = "Present if the provider supports RP-initiated logout")
|
||||
String endSessionEndpoint
|
||||
String endSessionEndpoint,
|
||||
@Schema(description = "RFC 8707 resource indicator for the authorization request")
|
||||
String resource,
|
||||
@Schema(description = "Additional scopes to request beyond openid email profile")
|
||||
java.util.List<String> additionalScopes
|
||||
) {}
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
package com.cameleer3.server.app.dto;
|
||||
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
|
||||
@Schema(description = "OpenSearch cluster status")
|
||||
public record OpenSearchStatusResponse(
|
||||
@Schema(description = "Whether the cluster is reachable") boolean reachable,
|
||||
@Schema(description = "Cluster health status (GREEN, YELLOW, RED)") String clusterHealth,
|
||||
@Schema(description = "OpenSearch version") String version,
|
||||
@Schema(description = "Number of nodes in the cluster") int nodeCount,
|
||||
@Schema(description = "OpenSearch host") String host
|
||||
) {}
|
||||
@@ -1,13 +0,0 @@
|
||||
package com.cameleer3.server.app.dto;
|
||||
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
|
||||
@Schema(description = "OpenSearch performance metrics")
|
||||
public record PerformanceResponse(
|
||||
@Schema(description = "Query cache hit rate (0.0-1.0)") double queryCacheHitRate,
|
||||
@Schema(description = "Request cache hit rate (0.0-1.0)") double requestCacheHitRate,
|
||||
@Schema(description = "Average search latency in milliseconds") double searchLatencyMs,
|
||||
@Schema(description = "Average indexing latency in milliseconds") double indexingLatencyMs,
|
||||
@Schema(description = "JVM heap used in bytes") long jvmHeapUsedBytes,
|
||||
@Schema(description = "JVM heap max in bytes") long jvmHeapMaxBytes
|
||||
) {}
|
||||
@@ -1,16 +0,0 @@
|
||||
package com.cameleer3.server.app.dto;
|
||||
|
||||
import io.swagger.v3.oas.annotations.media.Schema;
|
||||
|
||||
import java.time.Instant;
|
||||
|
||||
@Schema(description = "Search indexing pipeline statistics")
|
||||
public record PipelineStatsResponse(
|
||||
@Schema(description = "Current queue depth") int queueDepth,
|
||||
@Schema(description = "Maximum queue size") int maxQueueSize,
|
||||
@Schema(description = "Number of failed indexing operations") long failedCount,
|
||||
@Schema(description = "Number of successfully indexed documents") long indexedCount,
|
||||
@Schema(description = "Debounce interval in milliseconds") long debounceMs,
|
||||
@Schema(description = "Current indexing rate (docs/sec)") double indexingRate,
|
||||
@Schema(description = "Timestamp of last indexed document") Instant lastIndexedAt
|
||||
) {}
|
||||
@@ -11,5 +11,8 @@ public record RouteSummary(
|
||||
@NotNull long exchangeCount,
|
||||
Instant lastSeen,
|
||||
@Schema(description = "The from() endpoint URI, e.g. 'direct:processOrder'")
|
||||
String fromEndpointUri
|
||||
String fromEndpointUri,
|
||||
@Schema(description = "Operational state of the route: stopped, suspended, or null (started/default)")
|
||||
String routeState
|
||||
) {}
|
||||
|
||||
|
||||
@@ -5,18 +5,15 @@ import io.swagger.v3.oas.annotations.media.Schema;
|
||||
import jakarta.validation.Valid;
|
||||
import jakarta.validation.constraints.Max;
|
||||
import jakarta.validation.constraints.Min;
|
||||
import jakarta.validation.constraints.NotBlank;
|
||||
import jakarta.validation.constraints.NotNull;
|
||||
import jakarta.validation.constraints.Positive;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@Schema(description = "Threshold configuration for admin monitoring")
|
||||
public record ThresholdConfigRequest(
|
||||
@Valid @NotNull DatabaseThresholdsRequest database,
|
||||
@Valid @NotNull OpenSearchThresholdsRequest opensearch
|
||||
@Valid @NotNull DatabaseThresholdsRequest database
|
||||
) {
|
||||
|
||||
@Schema(description = "Database monitoring thresholds")
|
||||
@@ -38,41 +35,6 @@ public record ThresholdConfigRequest(
|
||||
double queryDurationCritical
|
||||
) {}
|
||||
|
||||
@Schema(description = "OpenSearch monitoring thresholds")
|
||||
public record OpenSearchThresholdsRequest(
|
||||
@NotBlank
|
||||
@Schema(description = "Cluster health warning threshold (GREEN, YELLOW, RED)")
|
||||
String clusterHealthWarning,
|
||||
|
||||
@NotBlank
|
||||
@Schema(description = "Cluster health critical threshold (GREEN, YELLOW, RED)")
|
||||
String clusterHealthCritical,
|
||||
|
||||
@Min(0)
|
||||
@Schema(description = "Queue depth warning threshold")
|
||||
int queueDepthWarning,
|
||||
|
||||
@Min(0)
|
||||
@Schema(description = "Queue depth critical threshold")
|
||||
int queueDepthCritical,
|
||||
|
||||
@Min(0) @Max(100)
|
||||
@Schema(description = "JVM heap usage warning threshold (percentage)")
|
||||
int jvmHeapWarning,
|
||||
|
||||
@Min(0) @Max(100)
|
||||
@Schema(description = "JVM heap usage critical threshold (percentage)")
|
||||
int jvmHeapCritical,
|
||||
|
||||
@Min(0)
|
||||
@Schema(description = "Failed document count warning threshold")
|
||||
int failedDocsWarning,
|
||||
|
||||
@Min(0)
|
||||
@Schema(description = "Failed document count critical threshold")
|
||||
int failedDocsCritical
|
||||
) {}
|
||||
|
||||
/** Convert to core domain model */
|
||||
public ThresholdConfig toConfig() {
|
||||
return new ThresholdConfig(
|
||||
@@ -81,16 +43,6 @@ public record ThresholdConfigRequest(
|
||||
database.connectionPoolCritical(),
|
||||
database.queryDurationWarning(),
|
||||
database.queryDurationCritical()
|
||||
),
|
||||
new ThresholdConfig.OpenSearchThresholds(
|
||||
opensearch.clusterHealthWarning(),
|
||||
opensearch.clusterHealthCritical(),
|
||||
opensearch.queueDepthWarning(),
|
||||
opensearch.queueDepthCritical(),
|
||||
opensearch.jvmHeapWarning(),
|
||||
opensearch.jvmHeapCritical(),
|
||||
opensearch.failedDocsWarning(),
|
||||
opensearch.failedDocsCritical()
|
||||
)
|
||||
);
|
||||
}
|
||||
@@ -108,37 +60,6 @@ public record ThresholdConfigRequest(
|
||||
}
|
||||
}
|
||||
|
||||
if (opensearch != null) {
|
||||
if (opensearch.queueDepthWarning() > opensearch.queueDepthCritical()) {
|
||||
errors.add("opensearch.queueDepthWarning must be <= queueDepthCritical");
|
||||
}
|
||||
if (opensearch.jvmHeapWarning() > opensearch.jvmHeapCritical()) {
|
||||
errors.add("opensearch.jvmHeapWarning must be <= jvmHeapCritical");
|
||||
}
|
||||
if (opensearch.failedDocsWarning() > opensearch.failedDocsCritical()) {
|
||||
errors.add("opensearch.failedDocsWarning must be <= failedDocsCritical");
|
||||
}
|
||||
// Validate health severity ordering: GREEN < YELLOW < RED
|
||||
int warningSeverity = healthSeverity(opensearch.clusterHealthWarning());
|
||||
int criticalSeverity = healthSeverity(opensearch.clusterHealthCritical());
|
||||
if (warningSeverity < 0) {
|
||||
errors.add("opensearch.clusterHealthWarning must be GREEN, YELLOW, or RED");
|
||||
}
|
||||
if (criticalSeverity < 0) {
|
||||
errors.add("opensearch.clusterHealthCritical must be GREEN, YELLOW, or RED");
|
||||
}
|
||||
if (warningSeverity >= 0 && criticalSeverity >= 0 && warningSeverity > criticalSeverity) {
|
||||
errors.add("opensearch.clusterHealthWarning severity must be <= clusterHealthCritical (GREEN < YELLOW < RED)");
|
||||
}
|
||||
}
|
||||
|
||||
return errors;
|
||||
}
|
||||
|
||||
private static final Map<String, Integer> HEALTH_SEVERITY =
|
||||
Map.of("GREEN", 0, "YELLOW", 1, "RED", 2);
|
||||
|
||||
private static int healthSeverity(String health) {
|
||||
return HEALTH_SEVERITY.getOrDefault(health != null ? health.toUpperCase() : "", -1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,149 @@
|
||||
package com.cameleer3.server.app.ingestion;
|
||||
|
||||
import com.cameleer3.server.app.config.IngestionConfig;
|
||||
import com.cameleer3.server.app.search.ClickHouseLogStore;
|
||||
import com.cameleer3.server.app.storage.ClickHouseExecutionStore;
|
||||
import com.cameleer3.server.core.ingestion.BufferedLogEntry;
|
||||
import com.cameleer3.server.core.ingestion.ChunkAccumulator;
|
||||
import com.cameleer3.server.core.ingestion.MergedExecution;
|
||||
import com.cameleer3.server.core.ingestion.WriteBuffer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.context.SmartLifecycle;
|
||||
import org.springframework.scheduling.annotation.Scheduled;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Scheduled flush task for ClickHouse execution and processor write buffers.
|
||||
* <p>
|
||||
* Drains both buffers on a fixed interval and delegates batch inserts to
|
||||
* {@link ClickHouseExecutionStore}. Also periodically sweeps stale exchanges
|
||||
* from the {@link ChunkAccumulator}.
|
||||
* <p>
|
||||
* Not a {@code @Component} — instantiated as a {@code @Bean} in StorageBeanConfig.
|
||||
*/
|
||||
public class ExecutionFlushScheduler implements SmartLifecycle {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(ExecutionFlushScheduler.class);
|
||||
|
||||
private final WriteBuffer<MergedExecution> executionBuffer;
|
||||
private final WriteBuffer<ChunkAccumulator.ProcessorBatch> processorBuffer;
|
||||
private final WriteBuffer<BufferedLogEntry> logBuffer;
|
||||
private final ClickHouseExecutionStore executionStore;
|
||||
private final ClickHouseLogStore logStore;
|
||||
private final ChunkAccumulator accumulator;
|
||||
private final int batchSize;
|
||||
private volatile boolean running = false;
|
||||
|
||||
public ExecutionFlushScheduler(WriteBuffer<MergedExecution> executionBuffer,
|
||||
WriteBuffer<ChunkAccumulator.ProcessorBatch> processorBuffer,
|
||||
WriteBuffer<BufferedLogEntry> logBuffer,
|
||||
ClickHouseExecutionStore executionStore,
|
||||
ClickHouseLogStore logStore,
|
||||
ChunkAccumulator accumulator,
|
||||
IngestionConfig config) {
|
||||
this.executionBuffer = executionBuffer;
|
||||
this.processorBuffer = processorBuffer;
|
||||
this.logBuffer = logBuffer;
|
||||
this.executionStore = executionStore;
|
||||
this.logStore = logStore;
|
||||
this.accumulator = accumulator;
|
||||
this.batchSize = config.getBatchSize();
|
||||
}
|
||||
|
||||
@Scheduled(fixedDelayString = "${ingestion.flush-interval-ms:1000}")
|
||||
public void flush() {
|
||||
try {
|
||||
List<MergedExecution> executions = executionBuffer.drain(batchSize);
|
||||
if (!executions.isEmpty()) {
|
||||
executionStore.insertExecutionBatch(executions);
|
||||
log.debug("Flushed {} executions to ClickHouse", executions.size());
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to flush executions", e);
|
||||
}
|
||||
|
||||
try {
|
||||
List<ChunkAccumulator.ProcessorBatch> batches = processorBuffer.drain(batchSize);
|
||||
if (!batches.isEmpty()) {
|
||||
executionStore.insertProcessorBatches(batches);
|
||||
log.debug("Flushed {} processor batches to ClickHouse", batches.size());
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to flush processor batches", e);
|
||||
}
|
||||
|
||||
try {
|
||||
List<BufferedLogEntry> logEntries = logBuffer.drain(batchSize);
|
||||
if (!logEntries.isEmpty()) {
|
||||
logStore.insertBufferedBatch(logEntries);
|
||||
log.debug("Flushed {} log entries to ClickHouse", logEntries.size());
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to flush log entries", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Scheduled(fixedDelay = 60_000)
|
||||
public void sweepStale() {
|
||||
try {
|
||||
accumulator.sweepStale();
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to sweep stale exchanges", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void start() {
|
||||
running = true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop() {
|
||||
// Drain remaining executions on shutdown
|
||||
while (executionBuffer.size() > 0) {
|
||||
List<MergedExecution> batch = executionBuffer.drain(batchSize);
|
||||
if (batch.isEmpty()) break;
|
||||
try {
|
||||
executionStore.insertExecutionBatch(batch);
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to flush executions during shutdown", e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Drain remaining processor batches on shutdown
|
||||
while (processorBuffer.size() > 0) {
|
||||
List<ChunkAccumulator.ProcessorBatch> batches = processorBuffer.drain(batchSize);
|
||||
if (batches.isEmpty()) break;
|
||||
try {
|
||||
executionStore.insertProcessorBatches(batches);
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to flush processor batches during shutdown", e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Drain remaining log entries on shutdown
|
||||
while (logBuffer.size() > 0) {
|
||||
List<BufferedLogEntry> entries = logBuffer.drain(batchSize);
|
||||
if (entries.isEmpty()) break;
|
||||
try {
|
||||
logStore.insertBufferedBatch(entries);
|
||||
} catch (Exception e) {
|
||||
log.error("Failed to flush log entries during shutdown", e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
running = false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isRunning() {
|
||||
return running;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getPhase() {
|
||||
return Integer.MAX_VALUE - 1;
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,19 @@
|
||||
package com.cameleer3.server.app.rbac;
|
||||
|
||||
import com.cameleer3.server.core.rbac.*;
|
||||
import com.cameleer3.server.core.rbac.GroupRepository;
|
||||
import com.cameleer3.server.core.rbac.GroupSummary;
|
||||
import com.cameleer3.server.core.rbac.RbacService;
|
||||
import com.cameleer3.server.core.rbac.RbacStats;
|
||||
import com.cameleer3.server.core.rbac.RoleSummary;
|
||||
import com.cameleer3.server.core.rbac.SystemRole;
|
||||
import com.cameleer3.server.core.rbac.UserDetail;
|
||||
import com.cameleer3.server.core.rbac.UserSummary;
|
||||
import com.cameleer3.server.core.security.UserInfo;
|
||||
import com.cameleer3.server.core.security.UserRepository;
|
||||
import org.springframework.http.HttpStatus;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
import org.springframework.stereotype.Service;
|
||||
import org.springframework.web.server.ResponseStatusException;
|
||||
|
||||
import java.util.*;
|
||||
|
||||
@@ -14,14 +23,12 @@ public class RbacServiceImpl implements RbacService {
|
||||
private final JdbcTemplate jdbc;
|
||||
private final UserRepository userRepository;
|
||||
private final GroupRepository groupRepository;
|
||||
private final RoleRepository roleRepository;
|
||||
|
||||
public RbacServiceImpl(JdbcTemplate jdbc, UserRepository userRepository,
|
||||
GroupRepository groupRepository, RoleRepository roleRepository) {
|
||||
GroupRepository groupRepository) {
|
||||
this.jdbc = jdbc;
|
||||
this.userRepository = userRepository;
|
||||
this.groupRepository = groupRepository;
|
||||
this.roleRepository = roleRepository;
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -50,19 +57,29 @@ public class RbacServiceImpl implements RbacService {
|
||||
|
||||
@Override
|
||||
public void assignRoleToUser(String userId, UUID roleId) {
|
||||
jdbc.update("INSERT INTO user_roles (user_id, role_id) VALUES (?, ?) ON CONFLICT DO NOTHING",
|
||||
userId, roleId);
|
||||
jdbc.update("""
|
||||
INSERT INTO user_roles (user_id, role_id, origin)
|
||||
VALUES (?, ?, 'direct')
|
||||
ON CONFLICT (user_id, role_id, origin) DO NOTHING
|
||||
""", userId, roleId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeRoleFromUser(String userId, UUID roleId) {
|
||||
if (SystemRole.ADMIN_ID.equals(roleId) && getEffectivePrincipalsForRole(SystemRole.ADMIN_ID).size() <= 1) {
|
||||
throw new ResponseStatusException(HttpStatus.CONFLICT,
|
||||
"Cannot remove the ADMIN role: at least one admin user must exist");
|
||||
}
|
||||
jdbc.update("DELETE FROM user_roles WHERE user_id = ? AND role_id = ?", userId, roleId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addUserToGroup(String userId, UUID groupId) {
|
||||
jdbc.update("INSERT INTO user_groups (user_id, group_id) VALUES (?, ?) ON CONFLICT DO NOTHING",
|
||||
userId, groupId);
|
||||
jdbc.update("""
|
||||
INSERT INTO user_groups (user_id, group_id, origin)
|
||||
VALUES (?, ?, 'direct')
|
||||
ON CONFLICT (user_id, group_id, origin) DO NOTHING
|
||||
""", userId, groupId);
|
||||
}
|
||||
|
||||
@Override
|
||||
@@ -235,12 +252,14 @@ public class RbacServiceImpl implements RbacService {
|
||||
return max;
|
||||
}
|
||||
|
||||
private List<RoleSummary> getDirectRolesForUser(String userId) {
|
||||
@Override
|
||||
public List<RoleSummary> getDirectRolesForUser(String userId) {
|
||||
return jdbc.query("""
|
||||
SELECT r.id, r.name, r.system FROM user_roles ur
|
||||
JOIN roles r ON r.id = ur.role_id WHERE ur.user_id = ?
|
||||
SELECT r.id, r.name, r.system, ur.origin FROM user_roles ur
|
||||
JOIN roles r ON r.id = ur.role_id
|
||||
WHERE ur.user_id = ?
|
||||
""", (rs, rowNum) -> new RoleSummary(rs.getObject("id", UUID.class),
|
||||
rs.getString("name"), rs.getBoolean("system"), "direct"), userId);
|
||||
rs.getString("name"), rs.getBoolean("system"), rs.getString("origin")), userId);
|
||||
}
|
||||
|
||||
private List<GroupSummary> getDirectGroupsForUser(String userId) {
|
||||
@@ -250,4 +269,28 @@ public class RbacServiceImpl implements RbacService {
|
||||
""", (rs, rowNum) -> new GroupSummary(rs.getObject("id", UUID.class),
|
||||
rs.getString("name")), userId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clearManagedAssignments(String userId) {
|
||||
jdbc.update("DELETE FROM user_roles WHERE user_id = ? AND origin = 'managed'", userId);
|
||||
jdbc.update("DELETE FROM user_groups WHERE user_id = ? AND origin = 'managed'", userId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void assignManagedRole(String userId, UUID roleId, UUID mappingId) {
|
||||
jdbc.update("""
|
||||
INSERT INTO user_roles (user_id, role_id, origin, mapping_id)
|
||||
VALUES (?, ?, 'managed', ?)
|
||||
ON CONFLICT (user_id, role_id, origin) DO UPDATE SET mapping_id = EXCLUDED.mapping_id
|
||||
""", userId, roleId, mappingId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addUserToManagedGroup(String userId, UUID groupId, UUID mappingId) {
|
||||
jdbc.update("""
|
||||
INSERT INTO user_groups (user_id, group_id, origin, mapping_id)
|
||||
VALUES (?, ?, 'managed', ?)
|
||||
ON CONFLICT (user_id, group_id, origin) DO UPDATE SET mapping_id = EXCLUDED.mapping_id
|
||||
""", userId, groupId, mappingId);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,112 @@
|
||||
package com.cameleer3.server.app.retention;
|
||||
|
||||
import com.cameleer3.server.core.runtime.*;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.scheduling.annotation.Scheduled;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* Nightly job that enforces JAR retention policies per environment.
|
||||
* For each app, keeps the N most recent versions (configured per environment)
|
||||
* and deletes older ones — unless they are currently deployed.
|
||||
*/
|
||||
@Component
|
||||
public class JarRetentionJob {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(JarRetentionJob.class);
|
||||
|
||||
private final EnvironmentService environmentService;
|
||||
private final AppService appService;
|
||||
private final AppVersionRepository versionRepo;
|
||||
private final DeploymentRepository deploymentRepo;
|
||||
|
||||
public JarRetentionJob(EnvironmentService environmentService,
|
||||
AppService appService,
|
||||
AppVersionRepository versionRepo,
|
||||
DeploymentRepository deploymentRepo) {
|
||||
this.environmentService = environmentService;
|
||||
this.appService = appService;
|
||||
this.versionRepo = versionRepo;
|
||||
this.deploymentRepo = deploymentRepo;
|
||||
}
|
||||
|
||||
@Scheduled(cron = "0 0 3 * * *") // 03:00 every day
|
||||
public void cleanupOldVersions() {
|
||||
log.info("JAR retention job started");
|
||||
int totalDeleted = 0;
|
||||
|
||||
for (Environment env : environmentService.listAll()) {
|
||||
Integer retentionCount = env.jarRetentionCount();
|
||||
if (retentionCount == null) {
|
||||
log.debug("Environment {} has unlimited retention, skipping", env.slug());
|
||||
continue;
|
||||
}
|
||||
|
||||
for (App app : appService.listByEnvironment(env.id())) {
|
||||
totalDeleted += cleanupApp(app, retentionCount);
|
||||
}
|
||||
}
|
||||
|
||||
log.info("JAR retention job completed — deleted {} versions", totalDeleted);
|
||||
}
|
||||
|
||||
private int cleanupApp(App app, int retentionCount) {
|
||||
List<AppVersion> versions = versionRepo.findByAppId(app.id()); // ordered DESC by version
|
||||
if (versions.size() <= retentionCount) return 0;
|
||||
|
||||
// Find version IDs that are currently deployed (any status)
|
||||
Set<UUID> deployedVersionIds = deploymentRepo.findByAppId(app.id()).stream()
|
||||
.map(Deployment::appVersionId)
|
||||
.collect(Collectors.toSet());
|
||||
|
||||
int deleted = 0;
|
||||
// versions is sorted DESC — skip the first retentionCount, delete the rest
|
||||
for (int i = retentionCount; i < versions.size(); i++) {
|
||||
AppVersion version = versions.get(i);
|
||||
if (deployedVersionIds.contains(version.id())) {
|
||||
log.debug("Skipping deployed version v{} of app {} ({})", version.version(), app.slug(), version.id());
|
||||
continue;
|
||||
}
|
||||
|
||||
// Delete JAR from disk
|
||||
deleteJarFile(version);
|
||||
|
||||
// Delete DB record
|
||||
versionRepo.delete(version.id());
|
||||
deleted++;
|
||||
log.info("Deleted version v{} of app {} ({}) — JAR: {}", version.version(), app.slug(), version.id(), version.jarPath());
|
||||
}
|
||||
|
||||
return deleted;
|
||||
}
|
||||
|
||||
private void deleteJarFile(AppVersion version) {
|
||||
try {
|
||||
Path jarPath = Path.of(version.jarPath());
|
||||
if (Files.exists(jarPath)) {
|
||||
Files.delete(jarPath);
|
||||
// Try to remove the empty version directory
|
||||
Path versionDir = jarPath.getParent();
|
||||
if (versionDir != null && Files.isDirectory(versionDir)) {
|
||||
try (var entries = Files.list(versionDir)) {
|
||||
if (entries.findFirst().isEmpty()) {
|
||||
Files.delete(versionDir);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (IOException e) {
|
||||
log.warn("Failed to delete JAR file for version {}: {}", version.id(), e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
package com.cameleer3.server.app.retention;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
import org.springframework.scheduling.annotation.Scheduled;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
@Component
|
||||
public class RetentionScheduler {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(RetentionScheduler.class);
|
||||
|
||||
private final JdbcTemplate jdbc;
|
||||
private final int retentionDays;
|
||||
|
||||
public RetentionScheduler(JdbcTemplate jdbc,
|
||||
@Value("${cameleer.retention-days:30}") int retentionDays) {
|
||||
this.jdbc = jdbc;
|
||||
this.retentionDays = retentionDays;
|
||||
}
|
||||
|
||||
@Scheduled(cron = "0 0 2 * * *") // Daily at 2 AM UTC
|
||||
public void dropExpiredChunks() {
|
||||
String interval = retentionDays + " days";
|
||||
try {
|
||||
// Raw data
|
||||
jdbc.execute("SELECT drop_chunks('executions', INTERVAL '" + interval + "')");
|
||||
jdbc.execute("SELECT drop_chunks('processor_executions', INTERVAL '" + interval + "')");
|
||||
jdbc.execute("SELECT drop_chunks('agent_metrics', INTERVAL '" + interval + "')");
|
||||
|
||||
// Continuous aggregates (keep 3x longer)
|
||||
String caggInterval = (retentionDays * 3) + " days";
|
||||
jdbc.execute("SELECT drop_chunks('stats_1m_all', INTERVAL '" + caggInterval + "')");
|
||||
jdbc.execute("SELECT drop_chunks('stats_1m_app', INTERVAL '" + caggInterval + "')");
|
||||
jdbc.execute("SELECT drop_chunks('stats_1m_route', INTERVAL '" + caggInterval + "')");
|
||||
jdbc.execute("SELECT drop_chunks('stats_1m_processor', INTERVAL '" + caggInterval + "')");
|
||||
|
||||
log.info("Retention: dropped chunks older than {} days (aggregates: {} days)",
|
||||
retentionDays, retentionDays * 3);
|
||||
} catch (Exception e) {
|
||||
log.error("Retention job failed", e);
|
||||
}
|
||||
}
|
||||
// Note: OpenSearch daily index deletion should be handled via ILM policy
|
||||
// configured at deployment time, not in application code.
|
||||
}
|
||||
@@ -0,0 +1,335 @@
|
||||
package com.cameleer3.server.app.runtime;
|
||||
|
||||
import com.cameleer3.server.app.storage.PostgresDeploymentRepository;
|
||||
import com.cameleer3.server.core.runtime.*;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.beans.factory.annotation.Autowired;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.scheduling.annotation.Async;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.*;
|
||||
|
||||
@Service
|
||||
public class DeploymentExecutor {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(DeploymentExecutor.class);
|
||||
|
||||
private final RuntimeOrchestrator orchestrator;
|
||||
private final DeploymentService deploymentService;
|
||||
private final AppService appService;
|
||||
private final EnvironmentService envService;
|
||||
private final DeploymentRepository deploymentRepository;
|
||||
private final PostgresDeploymentRepository pgDeployRepo;
|
||||
|
||||
@Autowired(required = false)
|
||||
private DockerNetworkManager networkManager;
|
||||
|
||||
@Value("${cameleer.runtime.base-image:cameleer-runtime-base:latest}")
|
||||
private String baseImage;
|
||||
|
||||
@Value("${cameleer.runtime.docker-network:cameleer}")
|
||||
private String dockerNetwork;
|
||||
|
||||
@Value("${cameleer.runtime.container-memory-limit:512m}")
|
||||
private String globalMemoryLimit;
|
||||
|
||||
@Value("${cameleer.runtime.container-cpu-request:500}")
|
||||
private int globalCpuRequest;
|
||||
|
||||
@Value("${cameleer.runtime.health-check-timeout:60}")
|
||||
private int healthCheckTimeout;
|
||||
|
||||
@Value("${cameleer.runtime.agent-health-port:9464}")
|
||||
private int agentHealthPort;
|
||||
|
||||
@Value("${security.bootstrap-token:}")
|
||||
private String bootstrapToken;
|
||||
|
||||
@Value("${cameleer.runtime.routing-mode:path}")
|
||||
private String globalRoutingMode;
|
||||
|
||||
@Value("${cameleer.runtime.routing-domain:localhost}")
|
||||
private String globalRoutingDomain;
|
||||
|
||||
@Value("${cameleer.runtime.server-url:}")
|
||||
private String globalServerUrl;
|
||||
|
||||
@Value("${cameleer.runtime.jar-docker-volume:}")
|
||||
private String jarDockerVolume;
|
||||
|
||||
@Value("${cameleer.runtime.jar-storage-path:/data/jars}")
|
||||
private String jarStoragePath;
|
||||
|
||||
public DeploymentExecutor(RuntimeOrchestrator orchestrator,
|
||||
DeploymentService deploymentService,
|
||||
AppService appService,
|
||||
EnvironmentService envService,
|
||||
DeploymentRepository deploymentRepository) {
|
||||
this.orchestrator = orchestrator;
|
||||
this.deploymentService = deploymentService;
|
||||
this.appService = appService;
|
||||
this.envService = envService;
|
||||
this.deploymentRepository = deploymentRepository;
|
||||
this.pgDeployRepo = (PostgresDeploymentRepository) deploymentRepository;
|
||||
}
|
||||
|
||||
@Async("deploymentTaskExecutor")
|
||||
public void executeAsync(Deployment deployment) {
|
||||
try {
|
||||
App app = appService.getById(deployment.appId());
|
||||
Environment env = envService.getById(deployment.environmentId());
|
||||
String jarPath = appService.resolveJarPath(deployment.appVersionId());
|
||||
|
||||
var globalDefaults = new ConfigMerger.GlobalRuntimeDefaults(
|
||||
parseMemoryLimitMb(globalMemoryLimit),
|
||||
globalCpuRequest,
|
||||
globalRoutingMode,
|
||||
globalRoutingDomain,
|
||||
globalServerUrl.isBlank() ? "http://cameleer3-server:8081" : globalServerUrl
|
||||
);
|
||||
ResolvedContainerConfig config = ConfigMerger.resolve(
|
||||
globalDefaults, env.defaultContainerConfig(), app.containerConfig());
|
||||
|
||||
pgDeployRepo.updateDeploymentStrategy(deployment.id(), config.deploymentStrategy());
|
||||
pgDeployRepo.updateResolvedConfig(deployment.id(), resolvedConfigToMap(config));
|
||||
|
||||
// === PRE-FLIGHT ===
|
||||
updateStage(deployment.id(), DeployStage.PRE_FLIGHT);
|
||||
preFlightChecks(jarPath, config);
|
||||
|
||||
// === PULL IMAGE ===
|
||||
updateStage(deployment.id(), DeployStage.PULL_IMAGE);
|
||||
// Docker pulls on create if not present locally
|
||||
|
||||
// === CREATE NETWORKS ===
|
||||
updateStage(deployment.id(), DeployStage.CREATE_NETWORK);
|
||||
String primaryNetwork = dockerNetwork;
|
||||
String envNet = null;
|
||||
if (networkManager != null) {
|
||||
primaryNetwork = DockerNetworkManager.TRAEFIK_NETWORK;
|
||||
networkManager.ensureNetwork(primaryNetwork);
|
||||
envNet = DockerNetworkManager.envNetworkName(env.slug());
|
||||
networkManager.ensureNetwork(envNet);
|
||||
}
|
||||
|
||||
// === START REPLICAS ===
|
||||
updateStage(deployment.id(), DeployStage.START_REPLICAS);
|
||||
|
||||
Map<String, String> baseEnvVars = buildEnvVars(app, env, config);
|
||||
Map<String, String> labels = TraefikLabelBuilder.build(app.slug(), env.slug(), config);
|
||||
|
||||
List<Map<String, Object>> replicaStates = new ArrayList<>();
|
||||
List<String> newContainerIds = new ArrayList<>();
|
||||
|
||||
for (int i = 0; i < config.replicas(); i++) {
|
||||
String containerName = env.slug() + "-" + app.slug() + "-" + i;
|
||||
|
||||
String volumeName = jarDockerVolume != null && !jarDockerVolume.isBlank() ? jarDockerVolume : null;
|
||||
ContainerRequest request = new ContainerRequest(
|
||||
containerName, baseImage, jarPath,
|
||||
volumeName, jarStoragePath,
|
||||
primaryNetwork,
|
||||
envNet != null ? List.of(envNet) : List.of(),
|
||||
baseEnvVars, labels,
|
||||
config.memoryLimitBytes(), config.memoryReserveBytes(),
|
||||
config.dockerCpuShares(), config.dockerCpuQuota(),
|
||||
config.exposedPorts(), agentHealthPort,
|
||||
"on-failure", 3
|
||||
);
|
||||
|
||||
String containerId = orchestrator.startContainer(request);
|
||||
newContainerIds.add(containerId);
|
||||
|
||||
// Connect to environment network after container is started
|
||||
if (networkManager != null && envNet != null) {
|
||||
networkManager.connectContainer(containerId, envNet);
|
||||
}
|
||||
|
||||
replicaStates.add(Map.of(
|
||||
"index", i,
|
||||
"containerId", containerId,
|
||||
"containerName", containerName,
|
||||
"status", "STARTING"
|
||||
));
|
||||
}
|
||||
|
||||
pgDeployRepo.updateReplicaStates(deployment.id(), replicaStates);
|
||||
|
||||
// === HEALTH CHECK ===
|
||||
updateStage(deployment.id(), DeployStage.HEALTH_CHECK);
|
||||
int healthyCount = waitForAnyHealthy(newContainerIds, healthCheckTimeout);
|
||||
|
||||
if (healthyCount == 0) {
|
||||
for (String cid : newContainerIds) {
|
||||
try { orchestrator.stopContainer(cid); orchestrator.removeContainer(cid); }
|
||||
catch (Exception e) { log.warn("Cleanup failed for {}: {}", cid, e.getMessage()); }
|
||||
}
|
||||
pgDeployRepo.updateDeployStage(deployment.id(), null);
|
||||
deploymentService.markFailed(deployment.id(), "No replicas passed health check within " + healthCheckTimeout + "s");
|
||||
return;
|
||||
}
|
||||
|
||||
replicaStates = updateReplicaHealth(replicaStates, newContainerIds);
|
||||
pgDeployRepo.updateReplicaStates(deployment.id(), replicaStates);
|
||||
|
||||
// === SWAP TRAFFIC ===
|
||||
updateStage(deployment.id(), DeployStage.SWAP_TRAFFIC);
|
||||
|
||||
Optional<Deployment> existing = deploymentRepository.findActiveByAppIdAndEnvironmentId(
|
||||
deployment.appId(), deployment.environmentId());
|
||||
if (existing.isPresent() && !existing.get().id().equals(deployment.id())) {
|
||||
stopDeploymentContainers(existing.get());
|
||||
deploymentService.markStopped(existing.get().id());
|
||||
log.info("Stopped previous deployment {} for replacement", existing.get().id());
|
||||
}
|
||||
|
||||
// === COMPLETE ===
|
||||
updateStage(deployment.id(), DeployStage.COMPLETE);
|
||||
|
||||
String primaryContainerId = newContainerIds.get(0);
|
||||
DeploymentStatus finalStatus = healthyCount == config.replicas()
|
||||
? DeploymentStatus.RUNNING : DeploymentStatus.DEGRADED;
|
||||
deploymentService.markRunning(deployment.id(), primaryContainerId);
|
||||
if (finalStatus == DeploymentStatus.DEGRADED) {
|
||||
deploymentRepository.updateStatus(deployment.id(), DeploymentStatus.DEGRADED,
|
||||
primaryContainerId, null);
|
||||
}
|
||||
|
||||
pgDeployRepo.updateDeployStage(deployment.id(), null);
|
||||
log.info("Deployment {} is {} ({}/{} replicas healthy)",
|
||||
deployment.id(), finalStatus, healthyCount, config.replicas());
|
||||
|
||||
} catch (Exception e) {
|
||||
log.error("Deployment {} FAILED: {}", deployment.id(), e.getMessage(), e);
|
||||
pgDeployRepo.updateDeployStage(deployment.id(), null);
|
||||
deploymentService.markFailed(deployment.id(), e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void stopDeployment(Deployment deployment) {
|
||||
pgDeployRepo.updateTargetState(deployment.id(), "STOPPED");
|
||||
deploymentRepository.updateStatus(deployment.id(), DeploymentStatus.STOPPING,
|
||||
deployment.containerId(), null);
|
||||
|
||||
stopDeploymentContainers(deployment);
|
||||
deploymentService.markStopped(deployment.id());
|
||||
}
|
||||
|
||||
private void stopDeploymentContainers(Deployment deployment) {
|
||||
List<Map<String, Object>> replicas = deployment.replicaStates() != null
|
||||
? deployment.replicaStates() : List.of();
|
||||
for (Map<String, Object> replica : replicas) {
|
||||
String cid = (String) replica.get("containerId");
|
||||
if (cid != null) {
|
||||
try {
|
||||
orchestrator.stopContainer(cid);
|
||||
orchestrator.removeContainer(cid);
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to stop replica container {}: {}", cid, e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
if (deployment.containerId() != null && replicas.isEmpty()) {
|
||||
try {
|
||||
orchestrator.stopContainer(deployment.containerId());
|
||||
orchestrator.removeContainer(deployment.containerId());
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to stop container {}: {}", deployment.containerId(), e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void preFlightChecks(String jarPath, ResolvedContainerConfig config) {
|
||||
if (!Files.exists(Path.of(jarPath))) {
|
||||
throw new IllegalStateException("JAR file not found: " + jarPath);
|
||||
}
|
||||
if (config.memoryLimitMb() <= 0) {
|
||||
throw new IllegalStateException("Memory limit must be positive, got: " + config.memoryLimitMb());
|
||||
}
|
||||
if (config.appPort() <= 0 || config.appPort() > 65535) {
|
||||
throw new IllegalStateException("Invalid app port: " + config.appPort());
|
||||
}
|
||||
if (config.replicas() < 1) {
|
||||
throw new IllegalStateException("Replicas must be >= 1, got: " + config.replicas());
|
||||
}
|
||||
}
|
||||
|
||||
private Map<String, String> buildEnvVars(App app, Environment env, ResolvedContainerConfig config) {
|
||||
Map<String, String> envVars = new LinkedHashMap<>();
|
||||
envVars.put("CAMELEER_EXPORT_TYPE", "HTTP");
|
||||
envVars.put("CAMELEER_APPLICATION_ID", app.slug());
|
||||
envVars.put("CAMELEER_ENVIRONMENT_ID", env.slug());
|
||||
envVars.put("CAMELEER_SERVER_URL", config.serverUrl());
|
||||
if (bootstrapToken != null && !bootstrapToken.isBlank()) {
|
||||
envVars.put("CAMELEER_AUTH_TOKEN", bootstrapToken);
|
||||
}
|
||||
envVars.putAll(config.customEnvVars());
|
||||
return envVars;
|
||||
}
|
||||
|
||||
private int waitForAnyHealthy(List<String> containerIds, int timeoutSeconds) {
|
||||
long deadline = System.currentTimeMillis() + (timeoutSeconds * 1000L);
|
||||
int lastHealthy = 0;
|
||||
while (System.currentTimeMillis() < deadline) {
|
||||
int healthy = 0;
|
||||
for (String cid : containerIds) {
|
||||
ContainerStatus status = orchestrator.getContainerStatus(cid);
|
||||
if ("healthy".equals(status.state())) healthy++;
|
||||
}
|
||||
lastHealthy = healthy;
|
||||
if (healthy == containerIds.size()) return healthy;
|
||||
try { Thread.sleep(2000); } catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
return lastHealthy;
|
||||
}
|
||||
}
|
||||
return lastHealthy;
|
||||
}
|
||||
|
||||
private List<Map<String, Object>> updateReplicaHealth(List<Map<String, Object>> replicas,
|
||||
List<String> containerIds) {
|
||||
List<Map<String, Object>> updated = new ArrayList<>();
|
||||
for (Map<String, Object> replica : replicas) {
|
||||
String cid = (String) replica.get("containerId");
|
||||
ContainerStatus status = orchestrator.getContainerStatus(cid);
|
||||
Map<String, Object> copy = new HashMap<>(replica);
|
||||
copy.put("status", status.running() ? "RUNNING" : "DEAD");
|
||||
updated.add(copy);
|
||||
}
|
||||
return updated;
|
||||
}
|
||||
|
||||
private void updateStage(UUID deploymentId, DeployStage stage) {
|
||||
pgDeployRepo.updateDeployStage(deploymentId, stage.name());
|
||||
}
|
||||
|
||||
private int parseMemoryLimitMb(String limit) {
|
||||
limit = limit.trim().toLowerCase();
|
||||
if (limit.endsWith("g")) return (int) (Double.parseDouble(limit.replace("g", "")) * 1024);
|
||||
if (limit.endsWith("m")) return (int) Double.parseDouble(limit.replace("m", ""));
|
||||
return Integer.parseInt(limit);
|
||||
}
|
||||
|
||||
private Map<String, Object> resolvedConfigToMap(ResolvedContainerConfig config) {
|
||||
Map<String, Object> map = new LinkedHashMap<>();
|
||||
map.put("memoryLimitMb", config.memoryLimitMb());
|
||||
if (config.memoryReserveMb() != null) map.put("memoryReserveMb", config.memoryReserveMb());
|
||||
map.put("cpuRequest", config.cpuRequest());
|
||||
if (config.cpuLimit() != null) map.put("cpuLimit", config.cpuLimit());
|
||||
map.put("appPort", config.appPort());
|
||||
map.put("exposedPorts", config.exposedPorts());
|
||||
map.put("customEnvVars", config.customEnvVars());
|
||||
map.put("stripPathPrefix", config.stripPathPrefix());
|
||||
map.put("sslOffloading", config.sslOffloading());
|
||||
map.put("routingMode", config.routingMode());
|
||||
map.put("routingDomain", config.routingDomain());
|
||||
map.put("serverUrl", config.serverUrl());
|
||||
map.put("replicas", config.replicas());
|
||||
map.put("deploymentStrategy", config.deploymentStrategy());
|
||||
return map;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
package com.cameleer3.server.app.runtime;
|
||||
|
||||
import com.cameleer3.server.core.runtime.ContainerRequest;
|
||||
import com.cameleer3.server.core.runtime.ContainerStatus;
|
||||
import com.cameleer3.server.core.runtime.RuntimeOrchestrator;
|
||||
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public class DisabledRuntimeOrchestrator implements RuntimeOrchestrator {
|
||||
@Override public boolean isEnabled() { return false; }
|
||||
@Override public String startContainer(ContainerRequest r) { throw new UnsupportedOperationException("Runtime management disabled"); }
|
||||
@Override public void stopContainer(String id) { throw new UnsupportedOperationException("Runtime management disabled"); }
|
||||
@Override public void removeContainer(String id) { throw new UnsupportedOperationException("Runtime management disabled"); }
|
||||
@Override public ContainerStatus getContainerStatus(String id) { return ContainerStatus.notFound(); }
|
||||
@Override public Stream<String> getLogs(String id, int tail) { return Stream.empty(); }
|
||||
}
|
||||
@@ -0,0 +1,193 @@
|
||||
package com.cameleer3.server.app.runtime;
|
||||
|
||||
import com.cameleer3.server.app.storage.PostgresDeploymentRepository;
|
||||
import com.cameleer3.server.core.runtime.ContainerStatus;
|
||||
import com.cameleer3.server.core.runtime.Deployment;
|
||||
import com.cameleer3.server.core.runtime.DeploymentStatus;
|
||||
import com.cameleer3.server.core.runtime.RuntimeOrchestrator;
|
||||
import com.github.dockerjava.api.DockerClient;
|
||||
import com.github.dockerjava.api.async.ResultCallback;
|
||||
import com.github.dockerjava.api.model.Event;
|
||||
import com.github.dockerjava.api.model.EventType;
|
||||
import jakarta.annotation.PostConstruct;
|
||||
import jakarta.annotation.PreDestroy;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.scheduling.annotation.Scheduled;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
|
||||
public class DockerEventMonitor {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(DockerEventMonitor.class);
|
||||
|
||||
private final DockerClient dockerClient;
|
||||
private final RuntimeOrchestrator runtimeOrchestrator;
|
||||
private final PostgresDeploymentRepository deploymentRepository;
|
||||
private Closeable eventStream;
|
||||
|
||||
public DockerEventMonitor(DockerRuntimeOrchestrator orchestrator,
|
||||
PostgresDeploymentRepository deploymentRepository) {
|
||||
this.dockerClient = orchestrator.getDockerClient();
|
||||
this.runtimeOrchestrator = orchestrator;
|
||||
this.deploymentRepository = deploymentRepository;
|
||||
}
|
||||
|
||||
@PostConstruct
|
||||
public void startListening() {
|
||||
eventStream = dockerClient.eventsCmd()
|
||||
.withEventTypeFilter(EventType.CONTAINER)
|
||||
.withEventFilter("die", "oom", "start", "stop")
|
||||
.exec(new ResultCallback.Adapter<Event>() {
|
||||
@Override
|
||||
public void onNext(Event event) {
|
||||
handleEvent(event);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onError(Throwable throwable) {
|
||||
log.warn("Docker event stream error, reconnecting: {}", throwable.getMessage());
|
||||
reconnect();
|
||||
}
|
||||
});
|
||||
|
||||
log.info("Docker event monitor started");
|
||||
}
|
||||
|
||||
@PreDestroy
|
||||
public void stop() {
|
||||
if (eventStream != null) {
|
||||
try { eventStream.close(); } catch (IOException e) { /* ignore */ }
|
||||
}
|
||||
}
|
||||
|
||||
private void handleEvent(Event event) {
|
||||
String containerId = event.getId();
|
||||
if (containerId == null) return;
|
||||
|
||||
Map<String, String> labels = event.getActor() != null ? event.getActor().getAttributes() : null;
|
||||
if (labels == null || !"cameleer3-server".equals(labels.get("managed-by"))) return;
|
||||
|
||||
String action = event.getAction();
|
||||
log.debug("Docker event: {} for container {} ({})", action, containerId.substring(0, 12),
|
||||
labels.get("cameleer.app"));
|
||||
|
||||
Optional<Deployment> deploymentOpt = deploymentRepository.findByContainerId(containerId);
|
||||
if (deploymentOpt.isEmpty()) return;
|
||||
|
||||
Deployment deployment = deploymentOpt.get();
|
||||
List<Map<String, Object>> replicas = new ArrayList<>(deployment.replicaStates());
|
||||
|
||||
boolean changed = false;
|
||||
for (int i = 0; i < replicas.size(); i++) {
|
||||
Map<String, Object> replica = replicas.get(i);
|
||||
if (containerId.equals(replica.get("containerId"))) {
|
||||
Map<String, Object> updated = new HashMap<>(replica);
|
||||
switch (action) {
|
||||
case "die", "oom", "stop" -> {
|
||||
updated.put("status", "DEAD");
|
||||
if ("oom".equals(action)) {
|
||||
updated.put("oomKilled", true);
|
||||
log.warn("Container {} OOM-killed (app={}, env={})", containerId.substring(0, 12),
|
||||
labels.get("cameleer.app"), labels.get("cameleer.environment"));
|
||||
}
|
||||
}
|
||||
case "start" -> updated.put("status", "RUNNING");
|
||||
}
|
||||
replicas.set(i, updated);
|
||||
changed = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!changed) return;
|
||||
|
||||
deploymentRepository.updateReplicaStates(deployment.id(), replicas);
|
||||
|
||||
long running = replicas.stream().filter(r -> "RUNNING".equals(r.get("status"))).count();
|
||||
DeploymentStatus newStatus;
|
||||
if (running == replicas.size()) {
|
||||
newStatus = DeploymentStatus.RUNNING;
|
||||
} else if (running > 0) {
|
||||
newStatus = DeploymentStatus.DEGRADED;
|
||||
} else {
|
||||
newStatus = DeploymentStatus.FAILED;
|
||||
}
|
||||
|
||||
if (deployment.status() != newStatus) {
|
||||
deploymentRepository.updateStatus(deployment.id(), newStatus, deployment.containerId(), deployment.errorMessage());
|
||||
log.info("Deployment {} status: {} -> {} ({}/{} replicas running)",
|
||||
deployment.id(), deployment.status(), newStatus, running, replicas.size());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Periodic reconciliation: inspects actual container state for active deployments
|
||||
* and corrects status mismatches caused by missed Docker events.
|
||||
*/
|
||||
@Scheduled(fixedDelay = 30_000, initialDelay = 60_000)
|
||||
public void reconcile() {
|
||||
List<Deployment> active = deploymentRepository.findByStatus(
|
||||
List.of(DeploymentStatus.RUNNING, DeploymentStatus.DEGRADED, DeploymentStatus.STARTING));
|
||||
|
||||
for (Deployment deployment : active) {
|
||||
if (deployment.replicaStates() == null || deployment.replicaStates().isEmpty()) continue;
|
||||
|
||||
List<Map<String, Object>> replicas = new ArrayList<>(deployment.replicaStates());
|
||||
boolean changed = false;
|
||||
|
||||
for (int i = 0; i < replicas.size(); i++) {
|
||||
Map<String, Object> replica = replicas.get(i);
|
||||
String containerId = (String) replica.get("containerId");
|
||||
if (containerId == null) continue;
|
||||
|
||||
ContainerStatus actual = runtimeOrchestrator.getContainerStatus(containerId);
|
||||
String currentStatus = (String) replica.get("status");
|
||||
String actualStatus = actual.running() ? "RUNNING" : "DEAD";
|
||||
|
||||
if (!actualStatus.equals(currentStatus)) {
|
||||
Map<String, Object> updated = new HashMap<>(replica);
|
||||
updated.put("status", actualStatus);
|
||||
replicas.set(i, updated);
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!changed) {
|
||||
// Even if replica states haven't changed, check if deployment status is correct
|
||||
long running = replicas.stream().filter(r -> "RUNNING".equals(r.get("status"))).count();
|
||||
DeploymentStatus expected = running == replicas.size() ? DeploymentStatus.RUNNING
|
||||
: running > 0 ? DeploymentStatus.DEGRADED : DeploymentStatus.FAILED;
|
||||
if (deployment.status() != expected) {
|
||||
deploymentRepository.updateStatus(deployment.id(), expected, deployment.containerId(), deployment.errorMessage());
|
||||
log.info("Reconcile: deployment {} status corrected {} -> {} ({}/{} running)",
|
||||
deployment.id(), deployment.status(), expected, running, replicas.size());
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
deploymentRepository.updateReplicaStates(deployment.id(), replicas);
|
||||
|
||||
long running = replicas.stream().filter(r -> "RUNNING".equals(r.get("status"))).count();
|
||||
DeploymentStatus newStatus = running == replicas.size() ? DeploymentStatus.RUNNING
|
||||
: running > 0 ? DeploymentStatus.DEGRADED : DeploymentStatus.FAILED;
|
||||
|
||||
if (deployment.status() != newStatus) {
|
||||
deploymentRepository.updateStatus(deployment.id(), newStatus, deployment.containerId(), deployment.errorMessage());
|
||||
log.info("Reconcile: deployment {} status {} -> {} ({}/{} replicas running)",
|
||||
deployment.id(), deployment.status(), newStatus, running, replicas.size());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void reconnect() {
|
||||
try {
|
||||
Thread.sleep(5000);
|
||||
startListening();
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,62 @@
|
||||
package com.cameleer3.server.app.runtime;
|
||||
|
||||
import com.github.dockerjava.api.DockerClient;
|
||||
import com.github.dockerjava.api.model.Network;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class DockerNetworkManager {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(DockerNetworkManager.class);
|
||||
public static final String TRAEFIK_NETWORK = "cameleer-traefik";
|
||||
public static final String ENV_NETWORK_PREFIX = "cameleer-env-";
|
||||
|
||||
private final DockerClient dockerClient;
|
||||
|
||||
public DockerNetworkManager(DockerClient dockerClient) {
|
||||
this.dockerClient = dockerClient;
|
||||
}
|
||||
|
||||
public String ensureNetwork(String networkName) {
|
||||
List<Network> existing = dockerClient.listNetworksCmd()
|
||||
.withNameFilter(networkName)
|
||||
.exec();
|
||||
|
||||
for (Network net : existing) {
|
||||
if (net.getName().equals(networkName)) {
|
||||
return net.getId();
|
||||
}
|
||||
}
|
||||
|
||||
String id = dockerClient.createNetworkCmd()
|
||||
.withName(networkName)
|
||||
.withDriver("bridge")
|
||||
.withCheckDuplicate(true)
|
||||
.exec()
|
||||
.getId();
|
||||
|
||||
log.info("Created Docker network: {} ({})", networkName, id);
|
||||
return id;
|
||||
}
|
||||
|
||||
public void connectContainer(String containerId, String networkName) {
|
||||
String networkId = ensureNetwork(networkName);
|
||||
try {
|
||||
dockerClient.connectToNetworkCmd()
|
||||
.withContainerId(containerId)
|
||||
.withNetworkId(networkId)
|
||||
.exec();
|
||||
log.debug("Connected container {} to network {}", containerId, networkName);
|
||||
} catch (Exception e) {
|
||||
if (!e.getMessage().contains("already exists")) {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static String envNetworkName(String envSlug) {
|
||||
return ENV_NETWORK_PREFIX + envSlug;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,199 @@
|
||||
package com.cameleer3.server.app.runtime;
|
||||
|
||||
import com.cameleer3.server.core.runtime.ContainerRequest;
|
||||
import com.cameleer3.server.core.runtime.ContainerStatus;
|
||||
import com.cameleer3.server.core.runtime.RuntimeOrchestrator;
|
||||
import com.github.dockerjava.api.DockerClient;
|
||||
import com.github.dockerjava.api.async.ResultCallback;
|
||||
import com.github.dockerjava.api.model.AccessMode;
|
||||
import com.github.dockerjava.api.model.Bind;
|
||||
import com.github.dockerjava.api.model.Frame;
|
||||
import com.github.dockerjava.api.model.HealthCheck;
|
||||
import com.github.dockerjava.api.model.HostConfig;
|
||||
import com.github.dockerjava.api.model.RestartPolicy;
|
||||
import com.github.dockerjava.api.model.Volume;
|
||||
import com.github.dockerjava.core.DefaultDockerClientConfig;
|
||||
import com.github.dockerjava.core.DockerClientImpl;
|
||||
import com.github.dockerjava.zerodep.ZerodepDockerHttpClient;
|
||||
import jakarta.annotation.PostConstruct;
|
||||
import jakarta.annotation.PreDestroy;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public class DockerRuntimeOrchestrator implements RuntimeOrchestrator {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(DockerRuntimeOrchestrator.class);
|
||||
private DockerClient dockerClient;
|
||||
|
||||
@PostConstruct
|
||||
public void init() {
|
||||
var config = DefaultDockerClientConfig.createDefaultConfigBuilder()
|
||||
.withDockerHost("unix:///var/run/docker.sock")
|
||||
.build();
|
||||
var httpClient = new ZerodepDockerHttpClient.Builder()
|
||||
.dockerHost(config.getDockerHost())
|
||||
.build();
|
||||
dockerClient = DockerClientImpl.getInstance(config, httpClient);
|
||||
log.info("Docker client initialized, host: {}", config.getDockerHost());
|
||||
}
|
||||
|
||||
@PreDestroy
|
||||
public void close() throws IOException {
|
||||
if (dockerClient != null) {
|
||||
dockerClient.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isEnabled() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String startContainer(ContainerRequest request) {
|
||||
List<String> envList = request.envVars().entrySet().stream()
|
||||
.map(e -> e.getKey() + "=" + e.getValue()).toList();
|
||||
|
||||
HostConfig hostConfig = HostConfig.newHostConfig()
|
||||
.withMemory(request.memoryLimitBytes())
|
||||
.withMemorySwap(request.memoryLimitBytes())
|
||||
.withCpuShares(request.cpuShares())
|
||||
.withNetworkMode(request.network())
|
||||
.withRestartPolicy(RestartPolicy.onFailureRestart(request.restartPolicyMaxRetries()));
|
||||
|
||||
// JAR mounting: volume mount (Docker-in-Docker) or bind mount (host path)
|
||||
if (request.jarVolumeName() != null && !request.jarVolumeName().isBlank()) {
|
||||
// Mount the named volume at the jar storage base path
|
||||
Bind volumeBind = new Bind(request.jarVolumeName(), new Volume(request.jarVolumeMountPath()), AccessMode.ro);
|
||||
hostConfig.withBinds(volumeBind);
|
||||
} else {
|
||||
Bind jarBind = new Bind(request.jarPath(), new Volume("/app/app.jar"), AccessMode.ro);
|
||||
hostConfig.withBinds(jarBind);
|
||||
}
|
||||
|
||||
if (request.memoryReserveBytes() != null) {
|
||||
hostConfig.withMemoryReservation(request.memoryReserveBytes());
|
||||
}
|
||||
if (request.cpuQuota() != null) {
|
||||
hostConfig.withCpuQuota(request.cpuQuota());
|
||||
}
|
||||
|
||||
// When using volume mount, the JAR is at the original path, not /app/app.jar
|
||||
if (request.jarVolumeName() != null && !request.jarVolumeName().isBlank()) {
|
||||
envList = new ArrayList<>(envList);
|
||||
envList.add("CAMELEER_APP_JAR=" + request.jarPath());
|
||||
}
|
||||
|
||||
var createCmd = dockerClient.createContainerCmd(request.baseImage())
|
||||
.withName(request.containerName())
|
||||
.withEnv(envList)
|
||||
.withLabels(request.labels() != null ? request.labels() : Map.of())
|
||||
.withHostConfig(hostConfig)
|
||||
.withHealthcheck(new HealthCheck()
|
||||
.withTest(List.of("CMD-SHELL",
|
||||
"wget -qO- http://localhost:" + request.healthCheckPort() + "/cameleer/health || exit 1"))
|
||||
.withInterval(10_000_000_000L)
|
||||
.withTimeout(5_000_000_000L)
|
||||
.withRetries(3)
|
||||
.withStartPeriod(30_000_000_000L));
|
||||
|
||||
// Override entrypoint to use the volume-mounted JAR path
|
||||
if (request.jarVolumeName() != null && !request.jarVolumeName().isBlank()) {
|
||||
createCmd.withEntrypoint("sh", "-c",
|
||||
"exec java -javaagent:/app/agent.jar " +
|
||||
"-Dcameleer.export.type=${CAMELEER_EXPORT_TYPE:-HTTP} " +
|
||||
"-Dcameleer.export.endpoint=${CAMELEER_SERVER_URL} " +
|
||||
"-Dcameleer.agent.name=${HOSTNAME} " +
|
||||
"-Dcameleer.agent.application=${CAMELEER_APPLICATION_ID:-default} " +
|
||||
"-Dcameleer.agent.environment=${CAMELEER_ENVIRONMENT_ID:-default} " +
|
||||
"-Dcameleer.routeControl.enabled=${CAMELEER_ROUTE_CONTROL_ENABLED:-false} " +
|
||||
"-Dcameleer.replay.enabled=${CAMELEER_REPLAY_ENABLED:-false} " +
|
||||
"-Dcameleer.health.enabled=true " +
|
||||
"-Dcameleer.health.port=9464 " +
|
||||
"-jar ${CAMELEER_APP_JAR}");
|
||||
}
|
||||
|
||||
if (request.exposedPorts() != null && !request.exposedPorts().isEmpty()) {
|
||||
var ports = request.exposedPorts().stream()
|
||||
.map(p -> com.github.dockerjava.api.model.ExposedPort.tcp(p))
|
||||
.toArray(com.github.dockerjava.api.model.ExposedPort[]::new);
|
||||
createCmd.withExposedPorts(ports);
|
||||
}
|
||||
|
||||
var container = createCmd.exec();
|
||||
dockerClient.startContainerCmd(container.getId()).exec();
|
||||
|
||||
log.info("Started container {} ({})", request.containerName(), container.getId());
|
||||
return container.getId();
|
||||
}
|
||||
|
||||
public DockerClient getDockerClient() {
|
||||
return dockerClient;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stopContainer(String containerId) {
|
||||
try {
|
||||
dockerClient.stopContainerCmd(containerId).withTimeout(30).exec();
|
||||
log.info("Stopped container {}", containerId);
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to stop container {}: {}", containerId, e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeContainer(String containerId) {
|
||||
try {
|
||||
dockerClient.removeContainerCmd(containerId).withForce(true).exec();
|
||||
log.info("Removed container {}", containerId);
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to remove container {}: {}", containerId, e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ContainerStatus getContainerStatus(String containerId) {
|
||||
try {
|
||||
var inspection = dockerClient.inspectContainerCmd(containerId).exec();
|
||||
var state = inspection.getState();
|
||||
var health = state.getHealth();
|
||||
var healthStatus = health != null ? health.getStatus() : null;
|
||||
// Use health status if available, otherwise fall back to container state
|
||||
var effectiveState = healthStatus != null ? healthStatus : state.getStatus();
|
||||
return new ContainerStatus(
|
||||
effectiveState,
|
||||
Boolean.TRUE.equals(state.getRunning()),
|
||||
state.getExitCodeLong() != null ? state.getExitCodeLong().intValue() : 0,
|
||||
state.getError());
|
||||
} catch (Exception e) {
|
||||
return new ContainerStatus("not_found", false, -1, e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Stream<String> getLogs(String containerId, int tailLines) {
|
||||
List<String> logLines = new ArrayList<>();
|
||||
try {
|
||||
dockerClient.logContainerCmd(containerId)
|
||||
.withStdOut(true)
|
||||
.withStdErr(true)
|
||||
.withTail(tailLines)
|
||||
.withTimestamps(true)
|
||||
.exec(new ResultCallback.Adapter<Frame>() {
|
||||
@Override
|
||||
public void onNext(Frame frame) {
|
||||
logLines.add(new String(frame.getPayload()).trim());
|
||||
}
|
||||
}).awaitCompletion();
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to get logs for container {}: {}", containerId, e.getMessage());
|
||||
}
|
||||
return logLines.stream();
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,47 @@
|
||||
package com.cameleer3.server.app.runtime;
|
||||
|
||||
import com.cameleer3.server.app.storage.PostgresDeploymentRepository;
|
||||
import com.cameleer3.server.core.runtime.DeploymentRepository;
|
||||
import com.cameleer3.server.core.runtime.RuntimeOrchestrator;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.context.annotation.Bean;
|
||||
import org.springframework.context.annotation.Configuration;
|
||||
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
|
||||
@Configuration
|
||||
public class RuntimeOrchestratorAutoConfig {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(RuntimeOrchestratorAutoConfig.class);
|
||||
|
||||
@Bean
|
||||
public RuntimeOrchestrator runtimeOrchestrator() {
|
||||
// Auto-detect: Docker socket available?
|
||||
if (Files.exists(Path.of("/var/run/docker.sock"))) {
|
||||
log.info("Docker socket detected - enabling Docker runtime orchestrator");
|
||||
return new DockerRuntimeOrchestrator();
|
||||
}
|
||||
// TODO: K8s detection (check for service account token)
|
||||
log.info("No Docker socket or K8s detected - runtime management disabled (observability-only mode)");
|
||||
return new DisabledRuntimeOrchestrator();
|
||||
}
|
||||
|
||||
@Bean
|
||||
public DockerNetworkManager dockerNetworkManager(RuntimeOrchestrator orchestrator) {
|
||||
if (orchestrator instanceof DockerRuntimeOrchestrator docker) {
|
||||
return new DockerNetworkManager(docker.getDockerClient());
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Bean
|
||||
public DockerEventMonitor dockerEventMonitor(RuntimeOrchestrator orchestrator,
|
||||
DeploymentRepository deploymentRepository) {
|
||||
if (orchestrator instanceof DockerRuntimeOrchestrator docker) {
|
||||
return new DockerEventMonitor(docker, (PostgresDeploymentRepository) deploymentRepository);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,49 @@
|
||||
package com.cameleer3.server.app.runtime;
|
||||
|
||||
import com.cameleer3.server.core.runtime.ResolvedContainerConfig;
|
||||
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Map;
|
||||
|
||||
public final class TraefikLabelBuilder {
|
||||
|
||||
private TraefikLabelBuilder() {}
|
||||
|
||||
public static Map<String, String> build(String appSlug, String envSlug, ResolvedContainerConfig config) {
|
||||
String svc = envSlug + "-" + appSlug;
|
||||
Map<String, String> labels = new LinkedHashMap<>();
|
||||
|
||||
labels.put("traefik.enable", "true");
|
||||
labels.put("managed-by", "cameleer3-server");
|
||||
labels.put("cameleer.app", appSlug);
|
||||
labels.put("cameleer.environment", envSlug);
|
||||
|
||||
labels.put("traefik.http.services." + svc + ".loadbalancer.server.port",
|
||||
String.valueOf(config.appPort()));
|
||||
|
||||
if ("subdomain".equals(config.routingMode())) {
|
||||
labels.put("traefik.http.routers." + svc + ".rule",
|
||||
"Host(`" + appSlug + "-" + envSlug + "." + config.routingDomain() + "`)");
|
||||
} else {
|
||||
labels.put("traefik.http.routers." + svc + ".rule",
|
||||
"PathPrefix(`/" + envSlug + "/" + appSlug + "/`)");
|
||||
|
||||
if (config.stripPathPrefix()) {
|
||||
labels.put("traefik.http.middlewares." + svc + "-strip.stripprefix.prefixes",
|
||||
"/" + envSlug + "/" + appSlug);
|
||||
labels.put("traefik.http.routers." + svc + ".middlewares",
|
||||
svc + "-strip");
|
||||
}
|
||||
}
|
||||
|
||||
labels.put("traefik.http.routers." + svc + ".entrypoints",
|
||||
config.sslOffloading() ? "websecure" : "web");
|
||||
|
||||
if (config.sslOffloading()) {
|
||||
labels.put("traefik.http.routers." + svc + ".tls", "true");
|
||||
labels.put("traefik.http.routers." + svc + ".tls.certresolver", "default");
|
||||
}
|
||||
|
||||
return labels;
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,246 @@
|
||||
package com.cameleer3.server.app.search;
|
||||
|
||||
import com.cameleer3.common.model.LogEntry;
|
||||
import com.cameleer3.server.core.ingestion.BufferedLogEntry;
|
||||
import com.cameleer3.server.core.search.LogSearchRequest;
|
||||
import com.cameleer3.server.core.search.LogSearchResponse;
|
||||
import com.cameleer3.server.core.storage.LogEntryResult;
|
||||
import com.cameleer3.server.core.storage.LogIndex;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.springframework.jdbc.core.JdbcTemplate;
|
||||
|
||||
import java.sql.Timestamp;
|
||||
import java.time.Instant;
|
||||
import java.time.ZoneOffset;
|
||||
import java.time.format.DateTimeFormatter;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* ClickHouse-backed implementation of {@link LogIndex}.
|
||||
* Stores application logs in the {@code logs} MergeTree table with
|
||||
* ngram bloom-filter indexes for efficient substring search.
|
||||
*/
|
||||
public class ClickHouseLogStore implements LogIndex {
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(ClickHouseLogStore.class);
|
||||
private static final DateTimeFormatter ISO_FMT = DateTimeFormatter.ISO_INSTANT;
|
||||
|
||||
private final String tenantId;
|
||||
private final JdbcTemplate jdbc;
|
||||
|
||||
public ClickHouseLogStore(String tenantId, JdbcTemplate jdbc) {
|
||||
this.tenantId = tenantId;
|
||||
this.jdbc = jdbc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void indexBatch(String instanceId, String applicationId, List<LogEntry> entries) {
|
||||
if (entries == null || entries.isEmpty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
String sql = "INSERT INTO logs (tenant_id, timestamp, application, instance_id, level, " +
|
||||
"logger_name, message, thread_name, stack_trace, exchange_id, mdc) " +
|
||||
"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
|
||||
|
||||
jdbc.batchUpdate(sql, entries, entries.size(), (ps, entry) -> {
|
||||
Instant ts = entry.getTimestamp() != null ? entry.getTimestamp() : Instant.now();
|
||||
ps.setString(1, tenantId);
|
||||
ps.setTimestamp(2, Timestamp.from(ts));
|
||||
ps.setString(3, applicationId);
|
||||
ps.setString(4, instanceId);
|
||||
ps.setString(5, entry.getLevel() != null ? entry.getLevel() : "");
|
||||
ps.setString(6, entry.getLoggerName() != null ? entry.getLoggerName() : "");
|
||||
ps.setString(7, entry.getMessage() != null ? entry.getMessage() : "");
|
||||
ps.setString(8, entry.getThreadName() != null ? entry.getThreadName() : "");
|
||||
ps.setString(9, entry.getStackTrace() != null ? entry.getStackTrace() : "");
|
||||
|
||||
Map<String, String> mdc = entry.getMdc() != null ? entry.getMdc() : Collections.emptyMap();
|
||||
String exchangeId = mdc.getOrDefault("camel.exchangeId", "");
|
||||
ps.setString(10, exchangeId);
|
||||
ps.setObject(11, mdc);
|
||||
});
|
||||
|
||||
log.debug("Indexed {} log entries for instance={}, app={}", entries.size(), instanceId, applicationId);
|
||||
}
|
||||
|
||||
public void insertBufferedBatch(List<BufferedLogEntry> entries) {
|
||||
if (entries.isEmpty()) return;
|
||||
|
||||
String sql = "INSERT INTO logs (tenant_id, environment, timestamp, application, instance_id, level, " +
|
||||
"logger_name, message, thread_name, stack_trace, exchange_id, mdc) " +
|
||||
"VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
|
||||
|
||||
jdbc.batchUpdate(sql, entries, entries.size(), (ps, ble) -> {
|
||||
LogEntry entry = ble.entry();
|
||||
Instant ts = entry.getTimestamp() != null ? entry.getTimestamp() : Instant.now();
|
||||
ps.setString(1, ble.tenantId() != null ? ble.tenantId() : tenantId);
|
||||
ps.setString(2, ble.environment() != null ? ble.environment() : "default");
|
||||
ps.setTimestamp(3, Timestamp.from(ts));
|
||||
ps.setString(4, ble.applicationId());
|
||||
ps.setString(5, ble.instanceId());
|
||||
ps.setString(6, entry.getLevel() != null ? entry.getLevel() : "");
|
||||
ps.setString(7, entry.getLoggerName() != null ? entry.getLoggerName() : "");
|
||||
ps.setString(8, entry.getMessage() != null ? entry.getMessage() : "");
|
||||
ps.setString(9, entry.getThreadName() != null ? entry.getThreadName() : "");
|
||||
ps.setString(10, entry.getStackTrace() != null ? entry.getStackTrace() : "");
|
||||
|
||||
Map<String, String> mdc = entry.getMdc() != null ? entry.getMdc() : Collections.emptyMap();
|
||||
String exchangeId = mdc.getOrDefault("camel.exchangeId", "");
|
||||
ps.setString(11, exchangeId);
|
||||
ps.setObject(12, mdc);
|
||||
});
|
||||
|
||||
log.debug("Flushed {} buffered log entries to ClickHouse", entries.size());
|
||||
}
|
||||
|
||||
@Override
|
||||
public LogSearchResponse search(LogSearchRequest request) {
|
||||
// Build shared WHERE conditions (used by both data and count queries)
|
||||
List<String> baseConditions = new ArrayList<>();
|
||||
List<Object> baseParams = new ArrayList<>();
|
||||
baseConditions.add("tenant_id = ?");
|
||||
baseParams.add(tenantId);
|
||||
|
||||
if (request.environment() != null && !request.environment().isEmpty()) {
|
||||
baseConditions.add("environment = ?");
|
||||
baseParams.add(request.environment());
|
||||
}
|
||||
|
||||
if (request.application() != null && !request.application().isEmpty()) {
|
||||
baseConditions.add("application = ?");
|
||||
baseParams.add(request.application());
|
||||
}
|
||||
|
||||
if (request.instanceId() != null && !request.instanceId().isEmpty()) {
|
||||
baseConditions.add("instance_id = ?");
|
||||
baseParams.add(request.instanceId());
|
||||
}
|
||||
|
||||
if (request.exchangeId() != null && !request.exchangeId().isEmpty()) {
|
||||
baseConditions.add("(exchange_id = ? OR (mapContains(mdc, 'camel.exchangeId') AND mdc['camel.exchangeId'] = ?))");
|
||||
baseParams.add(request.exchangeId());
|
||||
baseParams.add(request.exchangeId());
|
||||
}
|
||||
|
||||
if (request.q() != null && !request.q().isEmpty()) {
|
||||
String term = "%" + escapeLike(request.q()) + "%";
|
||||
baseConditions.add("(message ILIKE ? OR stack_trace ILIKE ?)");
|
||||
baseParams.add(term);
|
||||
baseParams.add(term);
|
||||
}
|
||||
|
||||
if (request.logger() != null && !request.logger().isEmpty()) {
|
||||
baseConditions.add("logger_name ILIKE ?");
|
||||
baseParams.add("%" + escapeLike(request.logger()) + "%");
|
||||
}
|
||||
|
||||
if (request.from() != null) {
|
||||
baseConditions.add("timestamp >= ?");
|
||||
baseParams.add(Timestamp.from(request.from()));
|
||||
}
|
||||
|
||||
if (request.to() != null) {
|
||||
baseConditions.add("timestamp <= ?");
|
||||
baseParams.add(Timestamp.from(request.to()));
|
||||
}
|
||||
|
||||
// Level counts query: uses base conditions WITHOUT level filter and cursor
|
||||
String baseWhere = String.join(" AND ", baseConditions);
|
||||
Map<String, Long> levelCounts = queryLevelCounts(baseWhere, baseParams);
|
||||
|
||||
// Data query conditions: add level filter and cursor on top of base
|
||||
List<String> dataConditions = new ArrayList<>(baseConditions);
|
||||
List<Object> dataParams = new ArrayList<>(baseParams);
|
||||
|
||||
if (request.levels() != null && !request.levels().isEmpty()) {
|
||||
String placeholders = String.join(", ", Collections.nCopies(request.levels().size(), "?"));
|
||||
dataConditions.add("level IN (" + placeholders + ")");
|
||||
for (String lvl : request.levels()) {
|
||||
dataParams.add(lvl.toUpperCase());
|
||||
}
|
||||
}
|
||||
|
||||
if (request.cursor() != null && !request.cursor().isEmpty()) {
|
||||
Instant cursorTs = Instant.parse(request.cursor());
|
||||
if ("asc".equalsIgnoreCase(request.sort())) {
|
||||
dataConditions.add("timestamp > ?");
|
||||
} else {
|
||||
dataConditions.add("timestamp < ?");
|
||||
}
|
||||
dataParams.add(Timestamp.from(cursorTs));
|
||||
}
|
||||
|
||||
String dataWhere = String.join(" AND ", dataConditions);
|
||||
String orderDir = "asc".equalsIgnoreCase(request.sort()) ? "ASC" : "DESC";
|
||||
int fetchLimit = request.limit() + 1; // fetch N+1 to detect hasMore
|
||||
|
||||
String dataSql = "SELECT timestamp, level, logger_name, message, thread_name, stack_trace, " +
|
||||
"exchange_id, instance_id, application, mdc " +
|
||||
"FROM logs WHERE " + dataWhere +
|
||||
" ORDER BY timestamp " + orderDir + " LIMIT ?";
|
||||
dataParams.add(fetchLimit);
|
||||
|
||||
List<LogEntryResult> results = jdbc.query(dataSql, dataParams.toArray(), (rs, rowNum) -> {
|
||||
Timestamp ts = rs.getTimestamp("timestamp");
|
||||
String timestampStr = ts != null
|
||||
? ts.toInstant().atOffset(ZoneOffset.UTC).format(ISO_FMT)
|
||||
: null;
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, String> mdc = (Map<String, String>) rs.getObject("mdc");
|
||||
if (mdc == null) mdc = Collections.emptyMap();
|
||||
|
||||
return new LogEntryResult(
|
||||
timestampStr,
|
||||
rs.getString("level"),
|
||||
rs.getString("logger_name"),
|
||||
rs.getString("message"),
|
||||
rs.getString("thread_name"),
|
||||
rs.getString("stack_trace"),
|
||||
rs.getString("exchange_id"),
|
||||
rs.getString("instance_id"),
|
||||
rs.getString("application"),
|
||||
mdc
|
||||
);
|
||||
});
|
||||
|
||||
boolean hasMore = results.size() > request.limit();
|
||||
if (hasMore) {
|
||||
results = new ArrayList<>(results.subList(0, request.limit()));
|
||||
}
|
||||
|
||||
String nextCursor = null;
|
||||
if (hasMore && !results.isEmpty()) {
|
||||
nextCursor = results.get(results.size() - 1).timestamp();
|
||||
}
|
||||
|
||||
return new LogSearchResponse(results, nextCursor, hasMore, levelCounts);
|
||||
}
|
||||
|
||||
private Map<String, Long> queryLevelCounts(String baseWhere, List<Object> baseParams) {
|
||||
String sql = "SELECT level, count() AS cnt FROM logs WHERE " + baseWhere + " GROUP BY level";
|
||||
Map<String, Long> counts = new LinkedHashMap<>();
|
||||
try {
|
||||
jdbc.query(sql, baseParams.toArray(), (rs, rowNum) -> {
|
||||
counts.put(rs.getString("level"), rs.getLong("cnt"));
|
||||
return null;
|
||||
});
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to query level counts", e);
|
||||
}
|
||||
return counts;
|
||||
}
|
||||
|
||||
private static String escapeLike(String term) {
|
||||
return term.replace("\\", "\\\\")
|
||||
.replace("%", "\\%")
|
||||
.replace("_", "\\_");
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user