refactor: CPU config to millicores, fix replica health, reorder tabs
Some checks failed
CI / cleanup-branch (push) Has been skipped
CI / build (push) Successful in 1m18s
CI / docker (push) Successful in 1m5s
CI / deploy-feature (push) Has been skipped
CI / deploy (push) Has been cancelled

- Rename cpuShares to cpuRequest (millicores), cpuLimit from cores to
  millicores. ResolvedContainerConfig translates to Docker-native units
  via dockerCpuShares() and dockerCpuQuota() helpers. Future K8s
  orchestrator can pass millicores through directly.
- Fix waitForAnyHealthy to wait for ALL replicas instead of returning
  on first healthy one. Prevents false DEGRADED status with 2+ replicas.
- Default app detail to Configuration tab (was Overview)
- Reorder config sub-tabs: Monitoring, Resources, Variables, Traces &
  Taps, Route Recording

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
hsiegeln
2026-04-09 07:38:23 +02:00
parent eb7cd9ba62
commit e88db56f79
5 changed files with 57 additions and 46 deletions

View File

@@ -37,8 +37,8 @@ public class DeploymentExecutor {
@Value("${cameleer.runtime.container-memory-limit:512m}")
private String globalMemoryLimit;
@Value("${cameleer.runtime.container-cpu-shares:512}")
private int globalCpuShares;
@Value("${cameleer.runtime.container-cpu-request:500}")
private int globalCpuRequest;
@Value("${cameleer.runtime.health-check-timeout:60}")
private int healthCheckTimeout;
@@ -86,7 +86,7 @@ public class DeploymentExecutor {
var globalDefaults = new ConfigMerger.GlobalRuntimeDefaults(
parseMemoryLimitMb(globalMemoryLimit),
globalCpuShares,
globalCpuRequest,
globalRoutingMode,
globalRoutingDomain,
globalServerUrl.isBlank() ? "http://cameleer3-server:8081" : globalServerUrl
@@ -126,7 +126,6 @@ public class DeploymentExecutor {
for (int i = 0; i < config.replicas(); i++) {
String containerName = env.slug() + "-" + app.slug() + "-" + i;
Long cpuQuota = config.cpuLimit() != null ? (long) (config.cpuLimit() * 100_000) : null;
String volumeName = jarDockerVolume != null && !jarDockerVolume.isBlank() ? jarDockerVolume : null;
ContainerRequest request = new ContainerRequest(
@@ -136,7 +135,7 @@ public class DeploymentExecutor {
envNet != null ? List.of(envNet) : List.of(),
baseEnvVars, labels,
config.memoryLimitBytes(), config.memoryReserveBytes(),
config.cpuShares(), cpuQuota,
config.dockerCpuShares(), config.dockerCpuQuota(),
config.exposedPorts(), agentHealthPort,
"on-failure", 3
);
@@ -273,19 +272,21 @@ public class DeploymentExecutor {
private int waitForAnyHealthy(List<String> containerIds, int timeoutSeconds) {
long deadline = System.currentTimeMillis() + (timeoutSeconds * 1000L);
int lastHealthy = 0;
while (System.currentTimeMillis() < deadline) {
int healthy = 0;
for (String cid : containerIds) {
ContainerStatus status = orchestrator.getContainerStatus(cid);
if ("healthy".equals(status.state())) healthy++;
}
if (healthy > 0) return healthy;
lastHealthy = healthy;
if (healthy == containerIds.size()) return healthy;
try { Thread.sleep(2000); } catch (InterruptedException e) {
Thread.currentThread().interrupt();
return 0;
return lastHealthy;
}
}
return 0;
return lastHealthy;
}
private List<Map<String, Object>> updateReplicaHealth(List<Map<String, Object>> replicas,