chore: rename cameleer3 to cameleer
Some checks failed
CI / cleanup-branch (push) Has been skipped
CI / build (push) Failing after 18s
CI / docker (push) Has been skipped
CI / deploy (push) Has been skipped
CI / deploy-feature (push) Has been skipped

Rename Java packages from com.cameleer3 to com.cameleer, module
directories from cameleer3-* to cameleer-*, and all references
throughout workflows, Dockerfiles, docs, migrations, and pom.xml.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
hsiegeln
2026-04-15 15:28:42 +02:00
parent 1077293343
commit cb3ebfea7c
569 changed files with 4356 additions and 3245 deletions

View File

@@ -0,0 +1,61 @@
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>com.cameleer</groupId>
<artifactId>cameleer-server-parent</artifactId>
<version>1.0-SNAPSHOT</version>
</parent>
<artifactId>cameleer-server-core</artifactId>
<name>Cameleer Server Core</name>
<description>Domain logic, storage, and agent registry</description>
<dependencies>
<dependency>
<groupId>com.cameleer</groupId>
<artifactId>cameleer-common</artifactId>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</dependency>
<dependency>
<groupId>jakarta.servlet</groupId>
<artifactId>jakarta.servlet-api</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.springframework.security</groupId>
<artifactId>spring-security-core</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.datatype</groupId>
<artifactId>jackson-datatype-jsr310</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.junit.jupiter</groupId>
<artifactId>junit-jupiter</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.assertj</groupId>
<artifactId>assertj-core</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
</project>

View File

@@ -0,0 +1,19 @@
package com.cameleer.server.core.admin;
import java.time.Instant;
public record AppSettings(
String applicationId,
int slaThresholdMs,
double healthErrorWarn,
double healthErrorCrit,
double healthSlaWarn,
double healthSlaCrit,
Instant createdAt,
Instant updatedAt) {
public static AppSettings defaults(String applicationId) {
Instant now = Instant.now();
return new AppSettings(applicationId, 300, 1.0, 5.0, 99.0, 95.0, now, now);
}
}

View File

@@ -0,0 +1,11 @@
package com.cameleer.server.core.admin;
import java.util.List;
import java.util.Optional;
public interface AppSettingsRepository {
Optional<AppSettings> findByApplicationId(String applicationId);
List<AppSettings> findAll();
AppSettings save(AppSettings settings);
void delete(String applicationId);
}

View File

@@ -0,0 +1,5 @@
package com.cameleer.server.core.admin;
public enum AuditCategory {
INFRA, AUTH, USER_MGMT, CONFIG, RBAC, AGENT
}

View File

@@ -0,0 +1,24 @@
package com.cameleer.server.core.admin;
import java.time.Instant;
import java.util.Map;
public record AuditRecord(
long id,
Instant timestamp,
String username,
String action,
AuditCategory category,
String target,
Map<String, Object> detail,
AuditResult result,
String ipAddress,
String userAgent
) {
/** Factory for creating new records (id and timestamp assigned by DB) */
public static AuditRecord create(String username, String action, AuditCategory category,
String target, Map<String, Object> detail, AuditResult result,
String ipAddress, String userAgent) {
return new AuditRecord(0, null, username, action, category, target, detail, result, ipAddress, userAgent);
}
}

View File

@@ -0,0 +1,25 @@
package com.cameleer.server.core.admin;
import java.time.Instant;
import java.util.List;
public interface AuditRepository {
void insert(AuditRecord auditRecord);
record AuditQuery(
String username,
AuditCategory category,
String search,
Instant from,
Instant to,
String sort,
String order,
int page,
int size
) {}
record AuditPage(List<AuditRecord> items, long totalCount) {}
AuditPage find(AuditQuery query);
}

View File

@@ -0,0 +1,5 @@
package com.cameleer.server.core.admin;
public enum AuditResult {
SUCCESS, FAILURE
}

View File

@@ -0,0 +1,53 @@
package com.cameleer.server.core.admin;
import jakarta.servlet.http.HttpServletRequest;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.security.core.Authentication;
import org.springframework.security.core.context.SecurityContextHolder;
import java.util.Map;
public class AuditService {
private static final Logger log = LoggerFactory.getLogger(AuditService.class);
private final AuditRepository repository;
public AuditService(AuditRepository repository) {
this.repository = repository;
}
/** Log an action using the current SecurityContext for username */
public void log(String action, AuditCategory category, String target,
Map<String, Object> detail, AuditResult result,
HttpServletRequest request) {
String username = extractUsername();
log(username, action, category, target, detail, result, request);
}
/** Log an action with explicit username (for pre-auth contexts like login) */
public void log(String username, String action, AuditCategory category, String target,
Map<String, Object> detail, AuditResult result,
HttpServletRequest request) {
String ip = request != null ? request.getRemoteAddr() : null;
String userAgent = request != null ? request.getHeader("User-Agent") : null;
AuditRecord auditRecord = AuditRecord.create(username, action, category, target, detail, result, ip, userAgent);
repository.insert(auditRecord);
if (request != null) {
request.setAttribute("audit.logged", true);
}
log.info("AUDIT: user={} action={} category={} target={} result={}",
username, action, category, target, result);
}
private String extractUsername() {
Authentication auth = SecurityContextHolder.getContext().getAuthentication();
if (auth != null && auth.getName() != null) {
String name = auth.getName();
return name.startsWith("user:") ? name.substring(5) : name;
}
return "unknown";
}
}

View File

@@ -0,0 +1,10 @@
package com.cameleer.server.core.admin;
import java.util.List;
public record SensitiveKeysConfig(List<String> keys) {
public SensitiveKeysConfig {
keys = keys != null ? List.copyOf(keys) : List.of();
}
}

View File

@@ -0,0 +1,45 @@
package com.cameleer.server.core.admin;
import java.util.ArrayList;
import java.util.List;
import java.util.TreeSet;
/**
* Merges global (enforced) sensitive keys with per-app additions.
* Union-only: per-app can add keys, never remove global keys.
* Case-insensitive deduplication, preserves first-seen casing.
*/
public final class SensitiveKeysMerger {
private SensitiveKeysMerger() {}
/**
* @param global enforced global keys (null = not configured)
* @param perApp per-app additional keys (null = none)
* @return merged list, or null if both inputs are null
*/
public static List<String> merge(List<String> global, List<String> perApp) {
if (global == null && perApp == null) {
return null;
}
TreeSet<String> seen = new TreeSet<>(String.CASE_INSENSITIVE_ORDER);
List<String> result = new ArrayList<>();
if (global != null) {
for (String key : global) {
if (seen.add(key)) {
result.add(key);
}
}
}
if (perApp != null) {
for (String key : perApp) {
if (seen.add(key)) {
result.add(key);
}
}
}
return result;
}
}

View File

@@ -0,0 +1,8 @@
package com.cameleer.server.core.admin;
import java.util.Optional;
public interface SensitiveKeysRepository {
Optional<SensitiveKeysConfig> find();
void save(SensitiveKeysConfig config, String updatedBy);
}

View File

@@ -0,0 +1,20 @@
package com.cameleer.server.core.admin;
public record ThresholdConfig(
DatabaseThresholds database
) {
public record DatabaseThresholds(
int connectionPoolWarning,
int connectionPoolCritical,
double queryDurationWarning,
double queryDurationCritical
) {
public static DatabaseThresholds defaults() {
return new DatabaseThresholds(80, 95, 1.0, 10.0);
}
}
public static ThresholdConfig defaults() {
return new ThresholdConfig(DatabaseThresholds.defaults());
}
}

View File

@@ -0,0 +1,8 @@
package com.cameleer.server.core.admin;
import java.util.Optional;
public interface ThresholdRepository {
Optional<ThresholdConfig> find();
void save(ThresholdConfig config, String updatedBy);
}

View File

@@ -0,0 +1,27 @@
package com.cameleer.server.core.agent;
import java.time.Instant;
/**
* Immutable record representing a command to be pushed to an agent.
*
* @param id unique command identifier (UUID)
* @param type command type
* @param payload raw JSON payload
* @param targetInstanceId target agent instance identifier
* @param createdAt when the command was created
* @param status current delivery status
*/
public record AgentCommand(
String id,
CommandType type,
String payload,
String targetInstanceId,
Instant createdAt,
CommandStatus status
) {
public AgentCommand withStatus(CommandStatus newStatus) {
return new AgentCommand(id, type, payload, targetInstanceId, createdAt, newStatus);
}
}

View File

@@ -0,0 +1,19 @@
package com.cameleer.server.core.agent;
/**
* Listener interface for agent registry events.
* <p>
* Defined in the core module so the registry service can notify listeners
* without depending on Spring or SSE classes. The app module provides the
* implementation that bridges to the SSE connection manager.
*/
public interface AgentEventListener {
/**
* Called when a new command is ready to be delivered to an agent.
*
* @param instanceId the target agent instance identifier
* @param command the command to deliver
*/
void onCommandReady(String instanceId, AgentCommand command);
}

View File

@@ -0,0 +1,12 @@
package com.cameleer.server.core.agent;
import java.time.Instant;
public record AgentEventRecord(
long id,
String instanceId,
String applicationId,
String eventType,
String detail,
Instant timestamp
) {}

View File

@@ -0,0 +1,13 @@
package com.cameleer.server.core.agent;
import java.time.Instant;
import java.util.List;
public interface AgentEventRepository {
void insert(String instanceId, String applicationId, String eventType, String detail);
List<AgentEventRecord> query(String applicationId, String instanceId, Instant from, Instant to, int limit);
List<AgentEventRecord> query(String applicationId, String instanceId, String environment, Instant from, Instant to, int limit);
}

View File

@@ -0,0 +1,31 @@
package com.cameleer.server.core.agent;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.time.Instant;
import java.util.List;
public class AgentEventService {
private static final Logger log = LoggerFactory.getLogger(AgentEventService.class);
private final AgentEventRepository repository;
public AgentEventService(AgentEventRepository repository) {
this.repository = repository;
}
public void recordEvent(String instanceId, String applicationId, String eventType, String detail) {
log.debug("Recording agent event: instance={}, app={}, type={}", instanceId, applicationId, eventType);
repository.insert(instanceId, applicationId, eventType, detail);
}
public List<AgentEventRecord> queryEvents(String applicationId, String instanceId, Instant from, Instant to, int limit) {
return repository.query(applicationId, instanceId, from, to, limit);
}
public List<AgentEventRecord> queryEvents(String applicationId, String instanceId, String environment, Instant from, Instant to, int limit) {
return repository.query(applicationId, instanceId, environment, from, to, limit);
}
}

View File

@@ -0,0 +1,70 @@
package com.cameleer.server.core.agent;
import java.time.Instant;
import java.util.List;
import java.util.Map;
/**
* Immutable record representing a registered agent's current state.
* <p>
* Stored in a {@link java.util.concurrent.ConcurrentHashMap} and atomically swapped
* via {@code computeIfPresent} for thread-safe state transitions. Wither-style methods
* return new instances with the specified field changed.
*
* @param instanceId agent-provided persistent identifier
* @param displayName human-readable agent name
* @param applicationId application identifier (e.g., "order-service-prod")
* @param environmentId logical environment (e.g., "dev", "staging", "prod")
* @param version agent software version
* @param routeIds list of Camel route IDs managed by this agent
* @param capabilities agent-declared capabilities (free-form)
* @param state current lifecycle state
* @param registeredAt when the agent first registered (or re-registered)
* @param lastHeartbeat last heartbeat timestamp
* @param staleTransitionTime when the agent transitioned to STALE (null if not STALE/DEAD)
*/
public record AgentInfo(
String instanceId,
String displayName,
String applicationId,
String environmentId,
String version,
List<String> routeIds,
Map<String, Object> capabilities,
AgentState state,
Instant registeredAt,
Instant lastHeartbeat,
Instant staleTransitionTime
) {
public AgentInfo withState(AgentState newState) {
return new AgentInfo(instanceId, displayName, applicationId, environmentId, version, routeIds, capabilities,
newState, registeredAt, lastHeartbeat, staleTransitionTime);
}
public AgentInfo withLastHeartbeat(Instant newLastHeartbeat) {
return new AgentInfo(instanceId, displayName, applicationId, environmentId, version, routeIds, capabilities,
state, registeredAt, newLastHeartbeat, staleTransitionTime);
}
public AgentInfo withRegisteredAt(Instant newRegisteredAt) {
return new AgentInfo(instanceId, displayName, applicationId, environmentId, version, routeIds, capabilities,
state, newRegisteredAt, lastHeartbeat, staleTransitionTime);
}
public AgentInfo withStaleTransitionTime(Instant newStaleTransitionTime) {
return new AgentInfo(instanceId, displayName, applicationId, environmentId, version, routeIds, capabilities,
state, registeredAt, lastHeartbeat, newStaleTransitionTime);
}
public AgentInfo withCapabilities(Map<String, Object> newCapabilities) {
return new AgentInfo(instanceId, displayName, applicationId, environmentId, version, routeIds, newCapabilities,
state, registeredAt, lastHeartbeat, staleTransitionTime);
}
public AgentInfo withMetadata(String displayName, String applicationId, String environmentId,
String version, List<String> routeIds, Map<String, Object> capabilities) {
return new AgentInfo(instanceId, displayName, applicationId, environmentId, version, routeIds, capabilities,
state, registeredAt, lastHeartbeat, staleTransitionTime);
}
}

View File

@@ -0,0 +1,397 @@
package com.cameleer.server.core.agent;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.time.Duration;
import java.time.Instant;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
/**
* In-memory agent registry managing agent lifecycle, heartbeats, and commands.
* <p>
* Plain class (no Spring annotations) -- wired as a bean by the app module.
* Uses {@link ConcurrentHashMap} for thread-safe agent storage with atomic
* record-swapping via {@code compute} operations.
*/
public class AgentRegistryService {
private static final Logger log = LoggerFactory.getLogger(AgentRegistryService.class);
private final long staleThresholdMs;
private final long deadThresholdMs;
private final long commandExpiryMs;
private final ConcurrentHashMap<String, AgentInfo> agents = new ConcurrentHashMap<>();
private final ConcurrentHashMap<String, ConcurrentLinkedQueue<AgentCommand>> commands = new ConcurrentHashMap<>();
private final ConcurrentHashMap<String, CompletableFuture<CommandReply>> pendingReplies = new ConcurrentHashMap<>();
private volatile AgentEventListener eventListener;
public AgentRegistryService(long staleThresholdMs, long deadThresholdMs, long commandExpiryMs) {
this.staleThresholdMs = staleThresholdMs;
this.deadThresholdMs = deadThresholdMs;
this.commandExpiryMs = commandExpiryMs;
}
/**
* Register a new agent or re-register an existing one.
* Re-registration updates metadata, transitions state to LIVE, and resets timestamps.
*/
public AgentInfo register(String id, String name, String application, String environmentId,
String version, List<String> routeIds, Map<String, Object> capabilities) {
Instant now = Instant.now();
AgentInfo newAgent = new AgentInfo(id, name, application, environmentId, version,
List.copyOf(routeIds), Map.copyOf(capabilities),
AgentState.LIVE, now, now, null);
return agents.compute(id, (key, existing) -> {
if (existing != null) {
// Re-registration: update metadata, reset to LIVE
log.info("Agent {} re-registering (was {})", id, existing.state());
return existing
.withMetadata(name, application, environmentId, version, List.copyOf(routeIds), Map.copyOf(capabilities))
.withState(AgentState.LIVE)
.withLastHeartbeat(now)
.withRegisteredAt(now)
.withStaleTransitionTime(null);
}
log.info("Agent {} registered (name={}, application={}, env={})", id, name, application, environmentId);
return newAgent;
});
}
/**
* Process a heartbeat from an agent.
* Updates lastHeartbeat, capabilities (if provided), and transitions STALE agents back to LIVE.
*
* @return true if the agent is known, false otherwise
*/
public boolean heartbeat(String id, Map<String, Object> capabilities) {
AgentInfo updated = agents.computeIfPresent(id, (key, existing) -> {
Instant now = Instant.now();
AgentInfo result = existing.withLastHeartbeat(now);
if (capabilities != null && !capabilities.isEmpty()) {
result = result.withCapabilities(Map.copyOf(capabilities));
}
if (existing.state() == AgentState.STALE) {
result = result.withState(AgentState.LIVE).withStaleTransitionTime(null);
log.info("Agent {} revived from STALE to LIVE via heartbeat", id);
}
return result;
});
return updated != null;
}
/** Overload for callers without capabilities (backward compatibility). */
public boolean heartbeat(String id) {
return heartbeat(id, null);
}
/**
* Manually transition an agent to a new state.
* Sets staleTransitionTime when transitioning to STALE.
*/
public void transitionState(String id, AgentState newState) {
agents.computeIfPresent(id, (key, existing) -> {
AgentInfo result = existing.withState(newState);
if (newState == AgentState.STALE) {
result = result.withStaleTransitionTime(Instant.now());
} else if (newState == AgentState.LIVE) {
result = result.withStaleTransitionTime(null);
}
log.debug("Agent {} transitioned {} -> {}", id, existing.state(), newState);
return result;
});
}
/**
* Gracefully shut down an agent. Transitions to SHUTDOWN state,
* which is excluded from the LIVE -> STALE -> DEAD lifecycle.
*
* @return true if the agent was found and transitioned
*/
public boolean shutdown(String id) {
AgentInfo updated = agents.computeIfPresent(id, (key, existing) -> {
log.info("Agent {} graceful shutdown ({} -> SHUTDOWN)", id, existing.state());
return existing.withState(AgentState.SHUTDOWN);
});
return updated != null;
}
/**
* Remove an agent from the registry entirely.
*
* @return true if the agent was found and removed
*/
public boolean deregister(String id) {
AgentInfo removed = agents.remove(id);
if (removed != null) {
commands.remove(id);
log.info("Agent {} deregistered (was {})", id, removed.state());
}
return removed != null;
}
/**
* Check all agents and apply lifecycle transitions:
* LIVE -> STALE when lastHeartbeat exceeds staleThresholdMs,
* STALE -> DEAD when staleTransitionTime exceeds deadThresholdMs.
* SHUTDOWN and DEAD agents are skipped.
*/
public void checkLifecycle() {
Instant now = Instant.now();
for (Map.Entry<String, AgentInfo> entry : agents.entrySet()) {
AgentInfo agent = entry.getValue();
if (agent.state() == AgentState.LIVE) {
Duration sinceHeartbeat = Duration.between(agent.lastHeartbeat(), now);
if (sinceHeartbeat.toMillis() > staleThresholdMs) {
agents.computeIfPresent(entry.getKey(), (k, current) -> {
if (current.state() == AgentState.LIVE) {
log.info("Agent {} LIVE -> STALE (no heartbeat for {}ms)", k, sinceHeartbeat.toMillis());
return current.withState(AgentState.STALE)
.withStaleTransitionTime(now);
}
return current;
});
}
} else if (agent.state() == AgentState.STALE && agent.staleTransitionTime() != null) {
Duration sinceStale = Duration.between(agent.staleTransitionTime(), now);
if (sinceStale.toMillis() > deadThresholdMs) {
agents.computeIfPresent(entry.getKey(), (k, current) -> {
if (current.state() == AgentState.STALE) {
log.info("Agent {} STALE -> DEAD (stale for {}ms)", k, sinceStale.toMillis());
return current.withState(AgentState.DEAD);
}
return current;
});
}
}
// DEAD agents remain DEAD (no auto-purge)
}
}
/**
* Find an agent by its ID.
*
* @return the agent info, or null if not found
*/
public AgentInfo findById(String id) {
return agents.get(id);
}
/**
* Return all registered agents regardless of state.
*/
public List<AgentInfo> findAll() {
return new ArrayList<>(agents.values());
}
/**
* Return all agents in the specified state.
*/
public List<AgentInfo> findByState(AgentState state) {
return agents.values().stream()
.filter(a -> a.state() == state)
.toList();
}
/**
* Return all agents belonging to the given application.
*/
public List<AgentInfo> findByApplication(String application) {
return agents.values().stream()
.filter(a -> application.equals(a.applicationId()))
.toList();
}
/**
* Return all agents belonging to the given application and environment.
*/
public List<AgentInfo> findByApplicationAndEnvironment(String application, String environment) {
return agents.values().stream()
.filter(a -> application.equals(a.applicationId()))
.filter(a -> environment.equals(a.environmentId()))
.toList();
}
/**
* Add a command to an agent's pending queue.
* Notifies the event listener if one is set.
*
* @return the created command
*/
public AgentCommand addCommand(String agentId, CommandType type, String payload) {
AgentCommand command = new AgentCommand(
UUID.randomUUID().toString(),
type,
payload,
agentId,
Instant.now(),
CommandStatus.PENDING
);
commands.computeIfAbsent(agentId, k -> new ConcurrentLinkedQueue<>()).add(command);
log.debug("Command {} ({}) added for agent {}", command.id(), type, agentId);
AgentEventListener listener = this.eventListener;
if (listener != null) {
listener.onCommandReady(agentId, command);
}
return command;
}
/**
* Acknowledge a command, transitioning it from PENDING/DELIVERED to ACKNOWLEDGED.
*
* @return true if the command was found and acknowledged
*/
public boolean acknowledgeCommand(String agentId, String commandId) {
ConcurrentLinkedQueue<AgentCommand> queue = commands.get(agentId);
if (queue == null) {
return false;
}
// Replace the command in the queue with an acknowledged version
boolean[] found = {false};
ConcurrentLinkedQueue<AgentCommand> newQueue = new ConcurrentLinkedQueue<>();
for (AgentCommand cmd : queue) {
if (cmd.id().equals(commandId) && !found[0]) {
newQueue.add(cmd.withStatus(CommandStatus.ACKNOWLEDGED));
found[0] = true;
} else {
newQueue.add(cmd);
}
}
if (found[0]) {
commands.put(agentId, newQueue);
log.debug("Command {} acknowledged by agent {}", commandId, agentId);
}
return found[0];
}
/**
* Return all PENDING commands for the specified agent.
*/
public List<AgentCommand> findPendingCommands(String agentId) {
ConcurrentLinkedQueue<AgentCommand> queue = commands.get(agentId);
if (queue == null) {
return List.of();
}
return queue.stream()
.filter(cmd -> cmd.status() == CommandStatus.PENDING)
.toList();
}
/**
* Mark a command as DELIVERED (sent via SSE but not yet acknowledged).
*/
public void markDelivered(String agentId, String commandId) {
ConcurrentLinkedQueue<AgentCommand> queue = commands.get(agentId);
if (queue == null) {
return;
}
ConcurrentLinkedQueue<AgentCommand> newQueue = new ConcurrentLinkedQueue<>();
for (AgentCommand cmd : queue) {
if (cmd.id().equals(commandId) && cmd.status() == CommandStatus.PENDING) {
newQueue.add(cmd.withStatus(CommandStatus.DELIVERED));
} else {
newQueue.add(cmd);
}
}
commands.put(agentId, newQueue);
}
/**
* Expire PENDING commands older than commandExpiryMs.
*/
public void expireOldCommands() {
Instant cutoff = Instant.now().minusMillis(commandExpiryMs);
for (Map.Entry<String, ConcurrentLinkedQueue<AgentCommand>> entry : commands.entrySet()) {
ConcurrentLinkedQueue<AgentCommand> queue = entry.getValue();
queue.removeIf(cmd ->
cmd.status() == CommandStatus.PENDING && cmd.createdAt().isBefore(cutoff));
}
}
/**
* Register a command that expects a synchronous reply from the agent.
* Returns a CompletableFuture that will be completed when the agent ACKs the command.
* Auto-cleans up from the pending map on completion or timeout.
*/
public CompletableFuture<CommandReply> addCommandWithReply(String agentId, CommandType type, String payload) {
AgentCommand command = addCommand(agentId, type, payload);
CompletableFuture<CommandReply> future = new CompletableFuture<>();
pendingReplies.put(command.id(), future);
future.whenComplete((result, ex) -> pendingReplies.remove(command.id()));
return future;
}
/**
* Send a command to all LIVE agents in a group and return futures for collecting replies.
* Returns a map of agentId -> CompletableFuture&lt;CommandReply&gt;.
*/
public Map<String, CompletableFuture<CommandReply>> addGroupCommandWithReplies(
String group, CommandType type, String payload) {
return addGroupCommandWithReplies(group, null, type, payload);
}
/**
* Send a command to all LIVE agents in a group, optionally filtered by environment.
* When environment is null, targets all agents for the application.
* Returns a map of agentId -> CompletableFuture&lt;CommandReply&gt;.
*/
public Map<String, CompletableFuture<CommandReply>> addGroupCommandWithReplies(
String group, String environment, CommandType type, String payload) {
Map<String, CompletableFuture<CommandReply>> results = new LinkedHashMap<>();
List<AgentInfo> candidates = environment != null
? findByApplicationAndEnvironment(group, environment)
: findByApplication(group);
List<AgentInfo> liveAgents = candidates.stream()
.filter(a -> a.state() == AgentState.LIVE)
.toList();
for (AgentInfo agent : liveAgents) {
AgentCommand cmd = addCommand(agent.instanceId(), type, payload);
CompletableFuture<CommandReply> future = new CompletableFuture<>();
pendingReplies.put(cmd.id(), future);
future.whenComplete((r, ex) -> pendingReplies.remove(cmd.id()));
results.put(agent.instanceId(), future);
}
return results;
}
/**
* Complete a pending reply future for a command.
* Called when an agent ACKs a command that was registered via {@link #addCommandWithReply}.
* No-op if no pending future exists for the given command ID.
*/
public void completeReply(String commandId, String status, String message, String data) {
CompletableFuture<CommandReply> future = pendingReplies.remove(commandId);
if (future != null) {
future.complete(new CommandReply(status, message, data));
}
}
/**
* Set the event listener for command notifications.
* The SSE layer in the app module implements this interface.
*/
public void setEventListener(AgentEventListener listener) {
this.eventListener = listener;
}
}

View File

@@ -0,0 +1,11 @@
package com.cameleer.server.core.agent;
/**
* Lifecycle states for a connected agent.
*/
public enum AgentState {
LIVE,
STALE,
DEAD,
SHUTDOWN
}

View File

@@ -0,0 +1,11 @@
package com.cameleer.server.core.agent;
/**
* Represents the reply data from an agent command acknowledgment.
* Used for synchronous request-reply command patterns (e.g. TEST_EXPRESSION).
*
* @param status "SUCCESS" or "FAILURE"
* @param message human-readable description of the result
* @param data optional structured JSON data returned by the agent
*/
public record CommandReply(String status, String message, String data) {}

View File

@@ -0,0 +1,11 @@
package com.cameleer.server.core.agent;
/**
* Delivery status of a command pushed to an agent.
*/
public enum CommandStatus {
PENDING,
DELIVERED,
ACKNOWLEDGED,
EXPIRED
}

View File

@@ -0,0 +1,13 @@
package com.cameleer.server.core.agent;
/**
* Types of commands that can be pushed to agents.
*/
public enum CommandType {
CONFIG_UPDATE,
DEEP_TRACE,
REPLAY,
SET_TRACED_PROCESSORS,
TEST_EXPRESSION,
ROUTE_CONTROL
}

View File

@@ -0,0 +1,37 @@
package com.cameleer.server.core.agent;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
/**
* In-memory registry tracking the operational state of routes.
* State is updated from agent heartbeats (routeStates map) and
* ROUTE_STATE_CHANGED lifecycle events.
* On server restart, all states reset to STARTED (default Camel behavior).
*/
public class RouteStateRegistry {
public enum RouteState { STARTED, STOPPED, SUSPENDED }
// Key: "applicationId:routeId"
private final ConcurrentHashMap<String, RouteState> states = new ConcurrentHashMap<>();
public void setState(String applicationId, String routeId, RouteState state) {
states.put(applicationId + ":" + routeId, state);
}
public RouteState getState(String applicationId, String routeId) {
return states.getOrDefault(applicationId + ":" + routeId, RouteState.STARTED);
}
public Map<String, RouteState> getStatesForApplication(String applicationId) {
Map<String, RouteState> result = new LinkedHashMap<>();
states.forEach((key, state) -> {
if (key.startsWith(applicationId + ":")) {
result.put(key.substring(applicationId.length() + 1), state);
}
});
return result;
}
}

View File

@@ -0,0 +1,14 @@
package com.cameleer.server.core.analytics;
import java.time.Instant;
public record UsageEvent(
Instant timestamp,
String username,
String method,
String path,
String normalized,
int statusCode,
long durationMs,
String queryParams
) {}

View File

@@ -0,0 +1,7 @@
package com.cameleer.server.core.analytics;
public record UsageStats(
String key,
long count,
long avgDurationMs
) {}

View File

@@ -0,0 +1,6 @@
package com.cameleer.server.core.analytics;
public interface UsageTracker {
void track(UsageEvent event);
}

View File

@@ -0,0 +1,231 @@
package com.cameleer.server.core.detail;
import com.cameleer.common.model.ProcessorExecution;
import com.cameleer.server.core.storage.ExecutionStore;
import com.cameleer.server.core.storage.ExecutionStore.ProcessorRecord;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import java.util.*;
public class DetailService {
private static final ObjectMapper JSON = new ObjectMapper()
.findAndRegisterModules()
.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
private static final TypeReference<Map<String, String>> STR_MAP = new TypeReference<>() {};
private static final TypeReference<List<ProcessorExecution>> PROCESSOR_EXEC_LIST = new TypeReference<>() {};
private final ExecutionStore executionStore;
public DetailService(ExecutionStore executionStore) {
this.executionStore = executionStore;
}
public Optional<ExecutionDetail> getDetail(String executionId) {
return executionStore.findById(executionId)
.map(exec -> {
// Prefer the raw processor tree (faithful to agent data) over
// flat-record reconstruction (which loses iteration context).
List<ProcessorNode> processors = parseProcessorsJson(exec.processorsJson());
if (processors.isEmpty()) {
// Fallback for executions ingested before processors_json was added
List<ProcessorRecord> records = executionStore.findProcessors(executionId);
processors = buildTree(records);
}
return new ExecutionDetail(
exec.executionId(), exec.routeId(), exec.instanceId(),
exec.applicationId(),
exec.status(), exec.startTime(), exec.endTime(),
exec.durationMs() != null ? exec.durationMs() : 0L,
exec.correlationId(), exec.exchangeId(),
exec.errorMessage(), exec.errorStacktrace(),
exec.diagramContentHash(), processors,
exec.inputBody(), exec.outputBody(),
exec.inputHeaders(), exec.outputHeaders(),
exec.inputProperties(), exec.outputProperties(),
parseAttributes(exec.attributes()),
exec.errorType(), exec.errorCategory(),
exec.rootCauseType(), exec.rootCauseMessage(),
exec.traceId(), exec.spanId()
);
});
}
public Optional<Map<String, String>> getProcessorSnapshot(String executionId, String processorId) {
return executionStore.findProcessorById(executionId, processorId)
.map(DetailService::snapshotFromRecord);
}
public Optional<Map<String, String>> getProcessorSnapshotBySeq(String executionId, int seq) {
return executionStore.findProcessorBySeq(executionId, seq)
.map(DetailService::snapshotFromRecord);
}
private static Map<String, String> snapshotFromRecord(ProcessorRecord p) {
Map<String, String> snapshot = new LinkedHashMap<>();
if (p.inputBody() != null) snapshot.put("inputBody", p.inputBody());
if (p.outputBody() != null) snapshot.put("outputBody", p.outputBody());
if (p.inputHeaders() != null) snapshot.put("inputHeaders", p.inputHeaders());
if (p.outputHeaders() != null) snapshot.put("outputHeaders", p.outputHeaders());
if (p.inputProperties() != null) snapshot.put("inputProperties", p.inputProperties());
if (p.outputProperties() != null) snapshot.put("outputProperties", p.outputProperties());
return snapshot;
}
/** Parse the raw processor tree JSON stored alongside the execution. */
private List<ProcessorNode> parseProcessorsJson(String json) {
if (json == null || json.isBlank()) return List.of();
try {
List<ProcessorExecution> executions = JSON.readValue(json, PROCESSOR_EXEC_LIST);
return convertProcessors(executions);
} catch (Exception e) {
return List.of();
}
}
/** Convert agent ProcessorExecution list to detail ProcessorNode list. */
private List<ProcessorNode> convertProcessors(List<ProcessorExecution> executions) {
if (executions == null) return List.of();
List<ProcessorNode> result = new ArrayList<>();
for (ProcessorExecution p : executions) {
boolean hasTrace = p.getInputBody() != null || p.getOutputBody() != null
|| p.getInputHeaders() != null || p.getOutputHeaders() != null
|| p.getInputProperties() != null || p.getOutputProperties() != null;
ProcessorNode node = new ProcessorNode(
p.getProcessorId(), p.getProcessorType(),
p.getStatus() != null ? p.getStatus().name() : null,
p.getStartTime(), p.getEndTime(),
p.getDurationMs(),
p.getErrorMessage(), p.getErrorStackTrace(),
p.getAttributes() != null ? new LinkedHashMap<>(p.getAttributes()) : null,
null, null, null, null, null, null, null,
p.getResolvedEndpointUri(),
p.getErrorType(), p.getErrorCategory(),
p.getRootCauseType(), p.getRootCauseMessage(),
p.getErrorHandlerType(), p.getCircuitBreakerState(),
p.getFallbackTriggered(),
p.getFilterMatched(), p.getDuplicateMessage(),
hasTrace
);
result.add(node);
}
return result;
}
/**
* Reconstruct processor tree from flat records.
* Detects whether records use the seq-based model (ClickHouse) or
* processorId-based model (PostgreSQL) and delegates accordingly.
*/
List<ProcessorNode> buildTree(List<ProcessorRecord> processors) {
if (processors.isEmpty()) return List.of();
boolean hasSeq = processors.stream().anyMatch(p -> p.seq() != null);
return hasSeq ? buildTreeBySeq(processors) : buildTreeByProcessorId(processors);
}
/**
* Seq-based tree reconstruction for ClickHouse flat processor model.
* Uses seq/parentSeq linkage, correctly handling duplicate processorIds
* across iterations (e.g., the same processor inside a split running N times).
*/
private List<ProcessorNode> buildTreeBySeq(List<ProcessorRecord> processors) {
Map<Integer, ProcessorNode> nodeBySeq = new LinkedHashMap<>();
for (ProcessorRecord p : processors) {
boolean hasTrace = p.inputBody() != null || p.outputBody() != null
|| p.inputHeaders() != null || p.outputHeaders() != null
|| p.inputProperties() != null || p.outputProperties() != null;
ProcessorNode node = new ProcessorNode(
p.processorId(), p.processorType(), p.status(),
p.startTime(), p.endTime(),
p.durationMs() != null ? p.durationMs() : 0L,
p.errorMessage(), p.errorStacktrace(),
parseAttributes(p.attributes()),
p.iteration(), p.iterationSize(),
null, null, null, null, null,
p.resolvedEndpointUri(),
p.errorType(), p.errorCategory(),
p.rootCauseType(), p.rootCauseMessage(),
null, p.circuitBreakerState(),
p.fallbackTriggered(),
p.filterMatched(), p.duplicateMessage(),
hasTrace
);
nodeBySeq.put(p.seq(), node);
}
List<ProcessorNode> roots = new ArrayList<>();
for (ProcessorRecord p : processors) {
ProcessorNode node = nodeBySeq.get(p.seq());
if (p.parentSeq() == null) {
roots.add(node);
} else {
ProcessorNode parent = nodeBySeq.get(p.parentSeq());
if (parent != null) {
parent.addChild(node);
} else {
roots.add(node); // orphan safety
}
}
}
return roots;
}
/**
* ProcessorId-based tree reconstruction for PostgreSQL flat records.
* Note: this loses iteration context for processors with the same ID across iterations.
*/
private List<ProcessorNode> buildTreeByProcessorId(List<ProcessorRecord> processors) {
Map<String, ProcessorNode> nodeMap = new LinkedHashMap<>();
for (ProcessorRecord p : processors) {
boolean hasTrace = p.inputBody() != null || p.outputBody() != null
|| p.inputHeaders() != null || p.outputHeaders() != null
|| p.inputProperties() != null || p.outputProperties() != null;
nodeMap.put(p.processorId(), new ProcessorNode(
p.processorId(), p.processorType(), p.status(),
p.startTime(), p.endTime(),
p.durationMs() != null ? p.durationMs() : 0L,
p.errorMessage(), p.errorStacktrace(),
parseAttributes(p.attributes()),
null, null,
p.loopIndex(), p.loopSize(),
p.splitIndex(), p.splitSize(),
p.multicastIndex(),
p.resolvedEndpointUri(),
p.errorType(), p.errorCategory(),
p.rootCauseType(), p.rootCauseMessage(),
p.errorHandlerType(), p.circuitBreakerState(),
p.fallbackTriggered(),
null, null, // filterMatched, duplicateMessage (not in flat DB records)
hasTrace
));
}
List<ProcessorNode> roots = new ArrayList<>();
for (ProcessorRecord p : processors) {
ProcessorNode node = nodeMap.get(p.processorId());
if (p.parentProcessorId() == null) {
roots.add(node);
} else {
ProcessorNode parent = nodeMap.get(p.parentProcessorId());
if (parent != null) {
parent.addChild(node);
} else {
roots.add(node); // orphan safety
}
}
}
return roots;
}
private static Map<String, String> parseAttributes(String json) {
if (json == null || json.isBlank()) return null;
try {
return JSON.readValue(json, STR_MAP);
} catch (Exception e) {
return null;
}
}
}

View File

@@ -0,0 +1,60 @@
package com.cameleer.server.core.detail;
import java.time.Instant;
import java.util.List;
import java.util.Map;
/**
* Full detail of a route execution, including the nested processor tree.
* <p>
* This is the rich detail model returned by the detail endpoint. The processor
* tree is reconstructed from individual processor records stored in PostgreSQL.
*
* @param executionId unique execution identifier
* @param routeId Camel route ID
* @param instanceId agent instance that reported the execution
* @param status execution status (COMPLETED, FAILED, RUNNING)
* @param startTime execution start time
* @param endTime execution end time (may be null for RUNNING)
* @param durationMs execution duration in milliseconds
* @param correlationId correlation ID for cross-instance tracing
* @param exchangeId Camel exchange ID
* @param errorMessage error message (empty string if no error)
* @param errorStackTrace error stack trace (empty string if no error)
* @param diagramContentHash content hash linking to the active route diagram version
* @param processors nested processor execution tree (root nodes)
* @param inputBody exchange input body at route entry (null if not captured)
* @param outputBody exchange output body at route exit (null if not captured)
* @param inputHeaders exchange input headers at route entry (null if not captured)
* @param outputHeaders exchange output headers at route exit (null if not captured)
*/
public record ExecutionDetail(
String executionId,
String routeId,
String instanceId,
String applicationId,
String status,
Instant startTime,
Instant endTime,
long durationMs,
String correlationId,
String exchangeId,
String errorMessage,
String errorStackTrace,
String diagramContentHash,
List<ProcessorNode> processors,
String inputBody,
String outputBody,
String inputHeaders,
String outputHeaders,
String inputProperties,
String outputProperties,
Map<String, String> attributes,
String errorType,
String errorCategory,
String rootCauseType,
String rootCauseMessage,
String traceId,
String spanId
) {
}

View File

@@ -0,0 +1,122 @@
package com.cameleer.server.core.detail;
import java.time.Instant;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
/**
* Nested tree node representing a single processor execution within a route.
* <p>
* The tree structure is reconstructed from individual processor records stored in PostgreSQL.
* Each node may have children (e.g., processors inside a split or try-catch block).
*/
public final class ProcessorNode {
private final String processorId;
private final String processorType;
private final String status;
private final Instant startTime;
private final Instant endTime;
private final long durationMs;
private final String errorMessage;
private final String errorStackTrace;
private final Map<String, String> attributes;
private final Integer iteration;
private final Integer iterationSize;
private final Integer loopIndex;
private final Integer loopSize;
private final Integer splitIndex;
private final Integer splitSize;
private final Integer multicastIndex;
private final String resolvedEndpointUri;
private final String errorType;
private final String errorCategory;
private final String rootCauseType;
private final String rootCauseMessage;
private final String errorHandlerType;
private final String circuitBreakerState;
private final Boolean fallbackTriggered;
private final Boolean filterMatched;
private final Boolean duplicateMessage;
private final boolean hasTraceData;
private final List<ProcessorNode> children;
public ProcessorNode(String processorId, String processorType, String status,
Instant startTime, Instant endTime, long durationMs,
String errorMessage, String errorStackTrace,
Map<String, String> attributes,
Integer iteration, Integer iterationSize,
Integer loopIndex, Integer loopSize,
Integer splitIndex, Integer splitSize,
Integer multicastIndex,
String resolvedEndpointUri,
String errorType, String errorCategory,
String rootCauseType, String rootCauseMessage,
String errorHandlerType, String circuitBreakerState,
Boolean fallbackTriggered,
Boolean filterMatched, Boolean duplicateMessage,
boolean hasTraceData) {
this.processorId = processorId;
this.processorType = processorType;
this.status = status;
this.startTime = startTime;
this.endTime = endTime;
this.durationMs = durationMs;
this.errorMessage = errorMessage;
this.errorStackTrace = errorStackTrace;
this.attributes = attributes;
this.iteration = iteration;
this.iterationSize = iterationSize;
this.loopIndex = loopIndex;
this.loopSize = loopSize;
this.splitIndex = splitIndex;
this.splitSize = splitSize;
this.multicastIndex = multicastIndex;
this.resolvedEndpointUri = resolvedEndpointUri;
this.errorType = errorType;
this.errorCategory = errorCategory;
this.rootCauseType = rootCauseType;
this.rootCauseMessage = rootCauseMessage;
this.errorHandlerType = errorHandlerType;
this.circuitBreakerState = circuitBreakerState;
this.fallbackTriggered = fallbackTriggered;
this.filterMatched = filterMatched;
this.duplicateMessage = duplicateMessage;
this.hasTraceData = hasTraceData;
this.children = new ArrayList<>();
}
public void addChild(ProcessorNode child) {
children.add(child);
}
public String getProcessorId() { return processorId; }
public String getProcessorType() { return processorType; }
public String getStatus() { return status; }
public Instant getStartTime() { return startTime; }
public Instant getEndTime() { return endTime; }
public long getDurationMs() { return durationMs; }
public String getErrorMessage() { return errorMessage; }
public String getErrorStackTrace() { return errorStackTrace; }
public Map<String, String> getAttributes() { return attributes; }
public Integer getIteration() { return iteration; }
public Integer getIterationSize() { return iterationSize; }
public Integer getLoopIndex() { return loopIndex; }
public Integer getLoopSize() { return loopSize; }
public Integer getSplitIndex() { return splitIndex; }
public Integer getSplitSize() { return splitSize; }
public Integer getMulticastIndex() { return multicastIndex; }
public String getResolvedEndpointUri() { return resolvedEndpointUri; }
public String getErrorType() { return errorType; }
public String getErrorCategory() { return errorCategory; }
public String getRootCauseType() { return rootCauseType; }
public String getRootCauseMessage() { return rootCauseMessage; }
public String getErrorHandlerType() { return errorHandlerType; }
public String getCircuitBreakerState() { return circuitBreakerState; }
public Boolean getFallbackTriggered() { return fallbackTriggered; }
public Boolean getFilterMatched() { return filterMatched; }
public Boolean getDuplicateMessage() { return duplicateMessage; }
public boolean isHasTraceData() { return hasTraceData; }
public List<ProcessorNode> getChildren() { return List.copyOf(children); }
}

View File

@@ -0,0 +1,21 @@
package com.cameleer.server.core.diagram;
import java.util.List;
/**
* Complete diagram layout with positioned nodes and edges.
* <p>
* This is the JSON response format for the layout endpoint.
*
* @param width total diagram width
* @param height total diagram height
* @param nodes positioned nodes with coordinates
* @param edges positioned edges with waypoints
*/
public record DiagramLayout(
double width,
double height,
List<PositionedNode> nodes,
List<PositionedEdge> edges
) {
}

View File

@@ -0,0 +1,32 @@
package com.cameleer.server.core.diagram;
import com.cameleer.common.graph.RouteGraph;
/**
* Renders a route graph as SVG or as a positioned JSON layout.
* <p>
* Implementations handle layout computation and visual rendering.
* Stub interface -- full implementation in a later plan.
*/
public interface DiagramRenderer {
/**
* Render the route graph as an SVG document string.
*/
String renderSvg(RouteGraph graph);
/**
* Compute a positioned JSON layout for the route graph.
*/
DiagramLayout layoutJson(RouteGraph graph);
/**
* Compute a positioned JSON layout with a specific flow direction.
*
* @param graph the route graph
* @param direction "LR" for left-to-right, "TB" for top-to-bottom
*/
default DiagramLayout layoutJson(RouteGraph graph, String direction) {
return layoutJson(graph);
}
}

View File

@@ -0,0 +1,19 @@
package com.cameleer.server.core.diagram;
import java.util.List;
/**
* An edge with computed waypoints for rendering.
*
* @param sourceId source node identifier
* @param targetId target node identifier
* @param label optional edge label
* @param points list of [x, y] waypoints from source to target
*/
public record PositionedEdge(
String sourceId,
String targetId,
String label,
List<double[]> points
) {
}

View File

@@ -0,0 +1,32 @@
package com.cameleer.server.core.diagram;
import java.util.List;
/**
* A node with computed layout position and dimensions.
* <p>
* For compound nodes (CHOICE, SPLIT, TRY_CATCH, etc.), {@code children}
* contains the nested child nodes rendered inside the parent bounds.
*
* @param id node identifier (matches RouteNode.id)
* @param label display label
* @param type NodeType name (e.g., "ENDPOINT", "PROCESSOR")
* @param x horizontal position
* @param y vertical position
* @param width node width
* @param height node height
* @param children nested child nodes for compound/swimlane groups
* @param endpointUri the Camel endpoint URI (e.g., "direct:processOrder"), null for non-endpoint nodes
*/
public record PositionedNode(
String id,
String label,
String type,
double x,
double y,
double width,
double height,
List<PositionedNode> children,
String endpointUri
) {
}

View File

@@ -0,0 +1,5 @@
package com.cameleer.server.core.indexing;
import java.time.Instant;
public record ExecutionUpdatedEvent(String executionId, Instant startTime) {}

View File

@@ -0,0 +1,143 @@
package com.cameleer.server.core.indexing;
import com.cameleer.server.core.storage.ExecutionStore;
import com.cameleer.server.core.storage.ExecutionStore.ExecutionRecord;
import com.cameleer.server.core.storage.ExecutionStore.ProcessorRecord;
import com.cameleer.server.core.storage.SearchIndex;
import com.cameleer.server.core.storage.model.ExecutionDocument;
import com.cameleer.server.core.storage.model.ExecutionDocument.ProcessorDoc;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.time.Instant;
import java.util.List;
import java.util.Map;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicLong;
public class SearchIndexer implements SearchIndexerStats {
private static final Logger log = LoggerFactory.getLogger(SearchIndexer.class);
private final ExecutionStore executionStore;
private final SearchIndex searchIndex;
private final long debounceMs;
private final int queueCapacity;
private final Map<String, ScheduledFuture<?>> pending = new ConcurrentHashMap<>();
private final ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor(
r -> { Thread t = new Thread(r, "search-indexer"); t.setDaemon(true); return t; });
private final AtomicLong failedCount = new AtomicLong();
private final AtomicLong indexedCount = new AtomicLong();
private volatile Instant lastIndexedAt;
private final AtomicLong rateWindowStartMs = new AtomicLong(System.currentTimeMillis());
private final AtomicLong rateWindowCount = new AtomicLong();
private volatile double lastRate;
public SearchIndexer(ExecutionStore executionStore, SearchIndex searchIndex,
long debounceMs, int queueCapacity) {
this.executionStore = executionStore;
this.searchIndex = searchIndex;
this.debounceMs = debounceMs;
this.queueCapacity = queueCapacity;
}
public void onExecutionUpdated(ExecutionUpdatedEvent event) {
if (pending.size() >= queueCapacity) {
log.warn("Search indexer queue full, dropping event for {}", event.executionId());
return;
}
ScheduledFuture<?> existing = pending.put(event.executionId(),
scheduler.schedule(() -> indexExecution(event.executionId()),
debounceMs, TimeUnit.MILLISECONDS));
if (existing != null) {
existing.cancel(false);
}
}
private void indexExecution(String executionId) {
pending.remove(executionId);
try {
ExecutionRecord exec = executionStore.findById(executionId).orElse(null);
if (exec == null) return;
List<ProcessorRecord> processors = executionStore.findProcessors(executionId);
List<ProcessorDoc> processorDocs = processors.stream()
.map(p -> new ProcessorDoc(
p.processorId(), p.processorType(), p.status(),
p.errorMessage(), p.errorStacktrace(),
p.inputBody(), p.outputBody(),
p.inputHeaders(), p.outputHeaders(),
p.attributes()))
.toList();
searchIndex.index(new ExecutionDocument(
exec.executionId(), exec.routeId(), exec.instanceId(), exec.applicationId(),
exec.status(), exec.correlationId(), exec.exchangeId(),
exec.startTime(), exec.endTime(), exec.durationMs(),
exec.errorMessage(), exec.errorStacktrace(), processorDocs,
exec.attributes(), exec.hasTraceData(), exec.isReplay()));
indexedCount.incrementAndGet();
lastIndexedAt = Instant.now();
updateRate();
} catch (Exception e) {
failedCount.incrementAndGet();
log.error("Failed to index execution {}", executionId, e);
}
}
private void updateRate() {
long now = System.currentTimeMillis();
long windowStart = rateWindowStartMs.get();
long count = rateWindowCount.incrementAndGet();
long elapsed = now - windowStart;
if (elapsed >= 15_000) { // 15-second window
lastRate = count / (elapsed / 1000.0);
rateWindowStartMs.set(now);
rateWindowCount.set(0);
}
}
@Override
public int getQueueDepth() {
return pending.size();
}
@Override
public int getMaxQueueSize() {
return queueCapacity;
}
@Override
public long getFailedCount() {
return failedCount.get();
}
@Override
public long getIndexedCount() {
return indexedCount.get();
}
@Override
public Instant getLastIndexedAt() {
return lastIndexedAt;
}
@Override
public long getDebounceMs() {
return debounceMs;
}
@Override
public double getIndexingRate() {
return lastRate;
}
public void shutdown() {
scheduler.shutdown();
}
}

View File

@@ -0,0 +1,14 @@
package com.cameleer.server.core.indexing;
import java.time.Instant;
public interface SearchIndexerStats {
int getQueueDepth();
int getMaxQueueSize();
long getFailedCount();
long getIndexedCount();
Instant getLastIndexedAt();
long getDebounceMs();
/** Approximate indexing rate in docs/sec over last measurement window */
double getIndexingRate();
}

View File

@@ -0,0 +1,14 @@
package com.cameleer.server.core.ingestion;
import com.cameleer.common.model.LogEntry;
/**
* A log entry paired with its agent metadata, ready for buffered ClickHouse insertion.
*/
public record BufferedLogEntry(
String tenantId,
String environment,
String instanceId,
String applicationId,
LogEntry entry
) {}

View File

@@ -0,0 +1,272 @@
package com.cameleer.server.core.ingestion;
import com.cameleer.common.model.ExchangeSnapshot;
import com.cameleer.common.model.ExecutionChunk;
import com.cameleer.common.model.FlatProcessorRecord;
import com.cameleer.server.core.storage.DiagramStore;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.time.Duration;
import java.time.Instant;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Consumer;
import java.util.function.Function;
/**
* Accumulates {@link ExecutionChunk} documents and produces:
* <ul>
* <li>{@link ProcessorBatch} — pushed immediately for each chunk (append-only)</li>
* <li>{@link MergedExecution} — pushed when the final chunk arrives or on stale sweep</li>
* </ul>
*/
public class ChunkAccumulator {
private static final Logger log = LoggerFactory.getLogger(ChunkAccumulator.class);
private static final ObjectMapper MAPPER = new ObjectMapper();
private final String tenantId;
private final Consumer<MergedExecution> executionSink;
private final Consumer<ProcessorBatch> processorSink;
private final DiagramStore diagramStore;
private final Duration staleThreshold;
private final Function<String, String> environmentResolver;
private final ConcurrentHashMap<String, PendingExchange> pending = new ConcurrentHashMap<>();
public ChunkAccumulator(String tenantId,
Consumer<MergedExecution> executionSink,
Consumer<ProcessorBatch> processorSink,
DiagramStore diagramStore,
Duration staleThreshold,
Function<String, String> environmentResolver) {
this.tenantId = tenantId;
this.executionSink = executionSink;
this.processorSink = processorSink;
this.diagramStore = diagramStore;
this.staleThreshold = staleThreshold;
this.environmentResolver = environmentResolver;
}
/**
* Process an incoming chunk: push processors immediately,
* buffer/merge the envelope, and emit when final.
*/
public void onChunk(ExecutionChunk chunk) {
// 1. Push processor records immediately (append-only)
String environment = environmentResolver.apply(
chunk.getInstanceId() != null ? chunk.getInstanceId() : "");
boolean chunkHasTrace = false;
if (chunk.getProcessors() != null && !chunk.getProcessors().isEmpty()) {
processorSink.accept(new ProcessorBatch(
this.tenantId,
chunk.getExchangeId(),
chunk.getRouteId(),
chunk.getApplicationId(),
environment,
chunk.getStartTime(),
chunk.getProcessors()));
chunkHasTrace = chunk.getProcessors().stream()
.anyMatch(p -> isNonEmpty(p.getInputBody()) || isNonEmpty(p.getOutputBody()));
}
// 2. Buffer/merge the exchange envelope
if (chunk.isFinal()) {
// Merge with any pending envelope, then emit
PendingExchange existing = pending.remove(chunk.getExchangeId());
ExecutionChunk merged = existing != null
? mergeEnvelopes(existing.envelope(), chunk)
: chunk;
boolean hasTrace = chunkHasTrace || (existing != null && existing.hasTraceData());
executionSink.accept(toMergedExecution(merged, hasTrace));
} else {
// Buffer the envelope for later merging
boolean trace = chunkHasTrace;
pending.merge(chunk.getExchangeId(),
new PendingExchange(chunk, Instant.now(), trace),
(old, incoming) -> new PendingExchange(
mergeEnvelopes(old.envelope(), incoming.envelope()),
old.receivedAt(),
old.hasTraceData() || incoming.hasTraceData()));
}
}
private static boolean isNonEmpty(String s) {
return s != null && !s.isEmpty();
}
/**
* Flush exchanges that have been pending longer than the stale threshold.
* Called periodically by a scheduled task.
*/
public void sweepStale() {
Instant cutoff = Instant.now().minus(staleThreshold);
pending.forEach((exchangeId, pe) -> {
if (pe.receivedAt().isBefore(cutoff)) {
PendingExchange removed = pending.remove(exchangeId);
if (removed != null) {
log.info("Flushing stale exchange {} (pending since {})",
exchangeId, removed.receivedAt());
executionSink.accept(toMergedExecution(removed.envelope(), removed.hasTraceData()));
}
}
});
}
/** Number of exchanges awaiting a final chunk. */
public int getPendingCount() {
return pending.size();
}
// ---- Merge logic ----
/**
* COALESCE merge: for each field, prefer the newer value if non-null, else keep older.
* The newer chunk (higher chunkSeq) takes precedence for status, endTime, durationMs.
*/
private static ExecutionChunk mergeEnvelopes(ExecutionChunk older, ExecutionChunk newer) {
ExecutionChunk merged = new ExecutionChunk();
merged.setExchangeId(coalesce(newer.getExchangeId(), older.getExchangeId()));
merged.setApplicationId(coalesce(newer.getApplicationId(), older.getApplicationId()));
merged.setInstanceId(coalesce(newer.getInstanceId(), older.getInstanceId()));
merged.setRouteId(coalesce(newer.getRouteId(), older.getRouteId()));
merged.setCorrelationId(coalesce(newer.getCorrelationId(), older.getCorrelationId()));
merged.setStatus(coalesce(newer.getStatus(), older.getStatus()));
merged.setStartTime(coalesce(older.getStartTime(), newer.getStartTime())); // prefer earliest startTime
merged.setEndTime(coalesce(newer.getEndTime(), older.getEndTime()));
merged.setDurationMs(coalesce(newer.getDurationMs(), older.getDurationMs()));
merged.setEngineLevel(coalesce(newer.getEngineLevel(), older.getEngineLevel()));
merged.setErrorMessage(coalesce(newer.getErrorMessage(), older.getErrorMessage()));
merged.setErrorStackTrace(coalesce(newer.getErrorStackTrace(), older.getErrorStackTrace()));
merged.setErrorType(coalesce(newer.getErrorType(), older.getErrorType()));
merged.setErrorCategory(coalesce(newer.getErrorCategory(), older.getErrorCategory()));
merged.setRootCauseType(coalesce(newer.getRootCauseType(), older.getRootCauseType()));
merged.setRootCauseMessage(coalesce(newer.getRootCauseMessage(), older.getRootCauseMessage()));
merged.setAttributes(coalesce(newer.getAttributes(), older.getAttributes()));
merged.setTraceId(coalesce(newer.getTraceId(), older.getTraceId()));
merged.setSpanId(coalesce(newer.getSpanId(), older.getSpanId()));
merged.setOriginalExchangeId(coalesce(newer.getOriginalExchangeId(), older.getOriginalExchangeId()));
merged.setReplayExchangeId(coalesce(newer.getReplayExchangeId(), older.getReplayExchangeId()));
merged.setInputSnapshot(coalesce(newer.getInputSnapshot(), older.getInputSnapshot()));
merged.setOutputSnapshot(coalesce(newer.getOutputSnapshot(), older.getOutputSnapshot()));
merged.setChunkSeq(Math.max(newer.getChunkSeq(), older.getChunkSeq()));
merged.setFinal(newer.isFinal() || older.isFinal());
merged.setProcessors(List.of()); // processors are handled separately
return merged;
}
private static <T> T coalesce(T a, T b) {
return a != null ? a : b;
}
// ---- Conversion to MergedExecution ----
private MergedExecution toMergedExecution(ExecutionChunk envelope, boolean hasTraceData) {
String diagramHash = "";
try {
diagramHash = diagramStore
.findContentHashForRoute(envelope.getRouteId(), envelope.getInstanceId())
.orElse("");
} catch (Exception e) {
log.debug("Could not resolve diagram hash for route={}", envelope.getRouteId());
}
String env = environmentResolver.apply(
envelope.getInstanceId() != null ? envelope.getInstanceId() : "");
return new MergedExecution(
this.tenantId,
1L,
envelope.getExchangeId(),
envelope.getRouteId(),
envelope.getInstanceId(),
envelope.getApplicationId(),
env,
envelope.getStatus() != null ? envelope.getStatus().name() : "RUNNING",
envelope.getCorrelationId(),
envelope.getExchangeId(),
envelope.getStartTime(),
envelope.getEndTime(),
envelope.getDurationMs(),
envelope.getErrorMessage(),
envelope.getErrorStackTrace(),
envelope.getErrorType(),
envelope.getErrorCategory(),
envelope.getRootCauseType(),
envelope.getRootCauseMessage(),
diagramHash,
envelope.getEngineLevel(),
extractBody(envelope.getInputSnapshot()),
extractBody(envelope.getOutputSnapshot()),
extractHeaders(envelope.getInputSnapshot()),
extractHeaders(envelope.getOutputSnapshot()),
extractProperties(envelope.getInputSnapshot()),
extractProperties(envelope.getOutputSnapshot()),
serializeAttributes(envelope.getAttributes()),
envelope.getTraceId(),
envelope.getSpanId(),
hasTraceData,
envelope.getReplayExchangeId() != null, // isReplay
envelope.getOriginalExchangeId(),
envelope.getReplayExchangeId()
);
}
private static String extractBody(ExchangeSnapshot snapshot) {
if (snapshot == null || snapshot.getBody() == null) return "";
return snapshot.getBody();
}
private static String extractHeaders(ExchangeSnapshot snapshot) {
if (snapshot == null || snapshot.getHeaders() == null) return "";
try {
return MAPPER.writeValueAsString(snapshot.getHeaders());
} catch (JsonProcessingException e) {
log.warn("Failed to serialize snapshot headers", e);
return "";
}
}
private static String extractProperties(ExchangeSnapshot snapshot) {
if (snapshot == null || snapshot.getProperties() == null) return "";
try {
return MAPPER.writeValueAsString(snapshot.getProperties());
} catch (JsonProcessingException e) {
log.warn("Failed to serialize snapshot properties", e);
return "";
}
}
private static String serializeAttributes(Map<String, String> attributes) {
if (attributes == null || attributes.isEmpty()) {
return "{}";
}
try {
return MAPPER.writeValueAsString(attributes);
} catch (JsonProcessingException e) {
log.warn("Failed to serialize attributes, falling back to empty object", e);
return "{}";
}
}
// ---- Inner types ----
/**
* A batch of processor records from a single chunk, ready for ClickHouse insertion.
*/
public record ProcessorBatch(
String tenantId,
String executionId,
String routeId,
String applicationId,
String environment,
Instant execStartTime,
List<FlatProcessorRecord> processors
) {}
/**
* Envelope buffered while waiting for the final chunk.
*/
private record PendingExchange(ExecutionChunk envelope, Instant receivedAt, boolean hasTraceData) {}
}

View File

@@ -0,0 +1,198 @@
package com.cameleer.server.core.ingestion;
import com.cameleer.common.model.ExchangeSnapshot;
import com.cameleer.common.model.ProcessorExecution;
import com.cameleer.common.model.RouteExecution;
import com.fasterxml.jackson.databind.SerializationFeature;
import com.cameleer.server.core.indexing.ExecutionUpdatedEvent;
import com.cameleer.server.core.storage.DiagramStore;
import com.cameleer.server.core.storage.ExecutionStore;
import com.cameleer.server.core.storage.ExecutionStore.ExecutionRecord;
import com.cameleer.server.core.storage.ExecutionStore.ProcessorRecord;
import com.cameleer.server.core.storage.model.MetricsSnapshot;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.function.Consumer;
public class IngestionService {
private static final ObjectMapper JSON = new ObjectMapper()
.findAndRegisterModules()
.disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS);
private final ExecutionStore executionStore;
private final DiagramStore diagramStore;
private final WriteBuffer<MetricsSnapshot> metricsBuffer;
private final Consumer<ExecutionUpdatedEvent> eventPublisher;
private final int bodySizeLimit;
public IngestionService(ExecutionStore executionStore,
DiagramStore diagramStore,
WriteBuffer<MetricsSnapshot> metricsBuffer,
Consumer<ExecutionUpdatedEvent> eventPublisher,
int bodySizeLimit) {
this.executionStore = executionStore;
this.diagramStore = diagramStore;
this.metricsBuffer = metricsBuffer;
this.eventPublisher = eventPublisher;
this.bodySizeLimit = bodySizeLimit;
}
public void ingestExecution(String instanceId, String applicationId, RouteExecution execution) {
ExecutionRecord record = toExecutionRecord(instanceId, applicationId, execution);
executionStore.upsert(record);
if (execution.getProcessors() != null && !execution.getProcessors().isEmpty()) {
List<ProcessorRecord> processors = flattenProcessors(
execution.getProcessors(), record.executionId(),
record.startTime(), applicationId, execution.getRouteId(),
null, 0);
executionStore.upsertProcessors(
record.executionId(), record.startTime(),
applicationId, execution.getRouteId(), processors);
}
eventPublisher.accept(new ExecutionUpdatedEvent(
record.executionId(), record.startTime()));
}
public void ingestDiagram(TaggedDiagram diagram) {
diagramStore.store(diagram);
}
public boolean acceptMetrics(List<MetricsSnapshot> metrics) {
return metricsBuffer.offerBatch(metrics);
}
public int getMetricsBufferDepth() {
return metricsBuffer.size();
}
public WriteBuffer<MetricsSnapshot> getMetricsBuffer() {
return metricsBuffer;
}
private ExecutionRecord toExecutionRecord(String instanceId, String applicationId,
RouteExecution exec) {
String diagramHash = diagramStore
.findContentHashForRoute(exec.getRouteId(), instanceId)
.orElse("");
// Extract route-level snapshots (critical for REGULAR mode where no processors are recorded)
String inputBody = null;
String outputBody = null;
String inputHeaders = null;
String outputHeaders = null;
String inputProperties = null;
String outputProperties = null;
ExchangeSnapshot inputSnapshot = exec.getInputSnapshot();
if (inputSnapshot != null) {
inputBody = truncateBody(inputSnapshot.getBody());
inputHeaders = toJson(inputSnapshot.getHeaders());
inputProperties = toJson(inputSnapshot.getProperties());
}
ExchangeSnapshot outputSnapshot = exec.getOutputSnapshot();
if (outputSnapshot != null) {
outputBody = truncateBody(outputSnapshot.getBody());
outputHeaders = toJson(outputSnapshot.getHeaders());
outputProperties = toJson(outputSnapshot.getProperties());
}
boolean hasTraceData = hasAnyTraceData(exec.getProcessors());
boolean isReplay = exec.getReplayExchangeId() != null;
if (!isReplay && inputSnapshot != null && inputSnapshot.getHeaders() != null) {
isReplay = "true".equalsIgnoreCase(
String.valueOf(inputSnapshot.getHeaders().get("X-Cameleer-Replay")));
}
return new ExecutionRecord(
exec.getExchangeId(), exec.getRouteId(), instanceId, applicationId,
exec.getStatus() != null ? exec.getStatus().name() : "RUNNING",
exec.getCorrelationId(), exec.getExchangeId(),
exec.getStartTime(), exec.getEndTime(),
exec.getDurationMs(),
exec.getErrorMessage(), exec.getErrorStackTrace(),
diagramHash,
exec.getEngineLevel(),
inputBody, outputBody, inputHeaders, outputHeaders,
inputProperties, outputProperties,
toJson(exec.getAttributes()),
exec.getErrorType(), exec.getErrorCategory(),
exec.getRootCauseType(), exec.getRootCauseMessage(),
exec.getTraceId(), exec.getSpanId(),
toJsonObject(exec.getProcessors()),
hasTraceData,
isReplay
);
}
private static boolean hasAnyTraceData(List<ProcessorExecution> processors) {
if (processors == null) return false;
for (ProcessorExecution p : processors) {
if (p.getInputBody() != null || p.getOutputBody() != null) return true;
}
return false;
}
private List<ProcessorRecord> flattenProcessors(
List<ProcessorExecution> processors, String executionId,
java.time.Instant execStartTime, String applicationId, String routeId,
String parentProcessorId, int depth) {
List<ProcessorRecord> flat = new ArrayList<>();
for (ProcessorExecution p : processors) {
flat.add(new ProcessorRecord(
executionId, p.getProcessorId(), p.getProcessorType(),
applicationId, routeId,
depth, parentProcessorId,
p.getStatus() != null ? p.getStatus().name() : "RUNNING",
p.getStartTime() != null ? p.getStartTime() : execStartTime,
p.getEndTime(),
p.getDurationMs(),
p.getErrorMessage(), p.getErrorStackTrace(),
truncateBody(p.getInputBody()), truncateBody(p.getOutputBody()),
toJson(p.getInputHeaders()), toJson(p.getOutputHeaders()),
null, null, // inputProperties, outputProperties (not on ProcessorExecution)
toJson(p.getAttributes()),
null, null, null, null, null,
p.getResolvedEndpointUri(),
p.getErrorType(), p.getErrorCategory(),
p.getRootCauseType(), p.getRootCauseMessage(),
p.getErrorHandlerType(), p.getCircuitBreakerState(),
p.getFallbackTriggered(),
null, null, null, null, null, null
));
}
return flat;
}
private String truncateBody(String body) {
if (body == null) return null;
if (body.length() > bodySizeLimit) return body.substring(0, bodySizeLimit);
return body;
}
private static String toJson(Map<String, String> headers) {
if (headers == null) return null;
try {
return JSON.writeValueAsString(headers);
} catch (JsonProcessingException e) {
return "{}";
}
}
private static String toJsonObject(Object obj) {
if (obj == null) return null;
try {
return JSON.writeValueAsString(obj);
} catch (JsonProcessingException e) {
return null;
}
}
}

View File

@@ -0,0 +1,44 @@
package com.cameleer.server.core.ingestion;
import java.time.Instant;
/**
* A merged execution envelope ready for ClickHouse insertion.
* Produced by ChunkAccumulator after receiving the final chunk.
*/
public record MergedExecution(
String tenantId,
long version,
String executionId,
String routeId,
String instanceId,
String applicationId,
String environment,
String status,
String correlationId,
String exchangeId,
Instant startTime,
Instant endTime,
Long durationMs,
String errorMessage,
String errorStacktrace,
String errorType,
String errorCategory,
String rootCauseType,
String rootCauseMessage,
String diagramContentHash,
String engineLevel,
String inputBody,
String outputBody,
String inputHeaders,
String outputHeaders,
String inputProperties,
String outputProperties,
String attributes,
String traceId,
String spanId,
boolean hasTraceData,
boolean isReplay,
String originalExchangeId,
String replayExchangeId
) {}

View File

@@ -0,0 +1,11 @@
package com.cameleer.server.core.ingestion;
import com.cameleer.common.graph.RouteGraph;
/**
* Pairs a {@link RouteGraph} with the authenticated agent identity.
* <p>
* The agent ID is extracted from the SecurityContext in the controller layer
* and carried through the write buffer so the flush scheduler can persist it.
*/
public record TaggedDiagram(String instanceId, String applicationId, RouteGraph graph) {}

View File

@@ -0,0 +1,11 @@
package com.cameleer.server.core.ingestion;
import com.cameleer.common.model.RouteExecution;
/**
* Pairs a {@link RouteExecution} with the authenticated agent identity.
* <p>
* The agent ID is extracted from the SecurityContext in the controller layer
* and carried through the write buffer so the flush scheduler can persist it.
*/
public record TaggedExecution(String instanceId, RouteExecution execution) {}

View File

@@ -0,0 +1,98 @@
package com.cameleer.server.core.ingestion;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
/**
* Bounded write buffer that decouples HTTP ingestion from database batch inserts.
* <p>
* Items are offered to the buffer by controllers and drained in batches by a
* scheduled flush task. When the buffer is full, {@link #offer} returns false,
* signaling the caller to apply backpressure (HTTP 503).
*
* @param <T> the type of items buffered
*/
public class WriteBuffer<T> {
private static final Logger log = LoggerFactory.getLogger(WriteBuffer.class);
private final BlockingQueue<T> queue;
private final int capacity;
public WriteBuffer(int capacity) {
this.capacity = capacity;
this.queue = new ArrayBlockingQueue<>(capacity);
}
/**
* Offer a single item to the buffer.
*
* @return true if the item was added, false if the buffer is full
*/
public boolean offer(T item) {
return queue.offer(item);
}
/**
* Offer an item, logging a warning if the buffer is full.
* Use this as a {@code Consumer<T>} when the caller cannot handle backpressure.
*/
public void offerOrWarn(T item) {
if (!queue.offer(item)) {
log.warn("WriteBuffer full (capacity={}), item dropped", capacity);
}
}
/**
* Offer a batch of items with all-or-nothing semantics.
* If the buffer does not have enough remaining capacity for the entire batch,
* no items are added and false is returned.
*
* @return true if all items were added, false if insufficient capacity
*/
public boolean offerBatch(List<T> items) {
if (queue.remainingCapacity() < items.size()) {
return false;
}
for (T item : items) {
if (!queue.offer(item)) {
log.warn("WriteBuffer offer rejected despite capacity check — possible concurrent modification");
return false;
}
}
return true;
}
/**
* Drain up to {@code maxBatch} items from the buffer.
* Called by the scheduled flush task.
*
* @return list of drained items (may be empty)
*/
public List<T> drain(int maxBatch) {
List<T> batch = new ArrayList<>(maxBatch);
queue.drainTo(batch, maxBatch);
return batch;
}
public int size() {
return queue.size();
}
public int capacity() {
return capacity;
}
public boolean isFull() {
return queue.remainingCapacity() == 0;
}
public int remainingCapacity() {
return queue.remainingCapacity();
}
}

View File

@@ -0,0 +1,9 @@
package com.cameleer.server.core.license;
public enum Feature {
topology,
lineage,
correlation,
debugger,
replay
}

View File

@@ -0,0 +1,35 @@
package com.cameleer.server.core.license;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.concurrent.atomic.AtomicReference;
public class LicenseGate {
private static final Logger log = LoggerFactory.getLogger(LicenseGate.class);
private final AtomicReference<LicenseInfo> current = new AtomicReference<>(LicenseInfo.open());
public void load(LicenseInfo license) {
current.set(license);
log.info("License loaded: tier={}, features={}, expires={}",
license.tier(), license.features(), license.expiresAt());
}
public boolean isEnabled(Feature feature) {
return current.get().hasFeature(feature);
}
public String getTier() {
return current.get().tier();
}
public int getLimit(String key, int defaultValue) {
return current.get().getLimit(key, defaultValue);
}
public LicenseInfo getCurrent() {
return current.get();
}
}

View File

@@ -0,0 +1,30 @@
package com.cameleer.server.core.license;
import java.time.Instant;
import java.util.Map;
import java.util.Set;
public record LicenseInfo(
String tier,
Set<Feature> features,
Map<String, Integer> limits,
Instant issuedAt,
Instant expiresAt
) {
public boolean isExpired() {
return expiresAt != null && Instant.now().isAfter(expiresAt);
}
public boolean hasFeature(Feature feature) {
return features.contains(feature);
}
public int getLimit(String key, int defaultValue) {
return limits.getOrDefault(key, defaultValue);
}
/** Open license — all features enabled, no limits. Used when no license is configured. */
public static LicenseInfo open() {
return new LicenseInfo("open", Set.of(Feature.values()), Map.of(), Instant.now(), null);
}
}

View File

@@ -0,0 +1,92 @@
package com.cameleer.server.core.license;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.security.*;
import java.security.spec.X509EncodedKeySpec;
import java.time.Instant;
import java.util.*;
public class LicenseValidator {
private static final Logger log = LoggerFactory.getLogger(LicenseValidator.class);
private static final ObjectMapper objectMapper = new ObjectMapper();
private final PublicKey publicKey;
public LicenseValidator(String publicKeyBase64) {
try {
byte[] keyBytes = Base64.getDecoder().decode(publicKeyBase64);
KeyFactory kf = KeyFactory.getInstance("Ed25519");
this.publicKey = kf.generatePublic(new X509EncodedKeySpec(keyBytes));
} catch (Exception e) {
throw new IllegalStateException("Failed to load license public key", e);
}
}
public LicenseInfo validate(String token) {
String[] parts = token.split("\\.", 2);
if (parts.length != 2) {
throw new IllegalArgumentException("Invalid license token format: expected payload.signature");
}
byte[] payloadBytes = Base64.getDecoder().decode(parts[0]);
byte[] signatureBytes = Base64.getDecoder().decode(parts[1]);
// Verify signature
try {
Signature verifier = Signature.getInstance("Ed25519");
verifier.initVerify(publicKey);
verifier.update(payloadBytes);
if (!verifier.verify(signatureBytes)) {
throw new SecurityException("License signature verification failed");
}
} catch (SecurityException e) {
throw e;
} catch (Exception e) {
throw new SecurityException("License signature verification failed", e);
}
// Parse payload
try {
JsonNode root = objectMapper.readTree(payloadBytes);
String tier = root.get("tier").asText();
Set<Feature> features = new HashSet<>();
if (root.has("features")) {
for (JsonNode f : root.get("features")) {
try {
features.add(Feature.valueOf(f.asText()));
} catch (IllegalArgumentException e) {
log.warn("Unknown feature in license: {}", f.asText());
}
}
}
Map<String, Integer> limits = new HashMap<>();
if (root.has("limits")) {
root.get("limits").fields().forEachRemaining(entry ->
limits.put(entry.getKey(), entry.getValue().asInt()));
}
Instant issuedAt = root.has("iat") ? Instant.ofEpochSecond(root.get("iat").asLong()) : Instant.now();
Instant expiresAt = root.has("exp") ? Instant.ofEpochSecond(root.get("exp").asLong()) : null;
LicenseInfo info = new LicenseInfo(tier, features, limits, issuedAt, expiresAt);
if (info.isExpired()) {
throw new IllegalArgumentException("License expired at " + expiresAt);
}
return info;
} catch (IllegalArgumentException e) {
throw e;
} catch (Exception e) {
throw new IllegalArgumentException("Failed to parse license payload", e);
}
}
}

View File

@@ -0,0 +1,5 @@
package com.cameleer.server.core.rbac;
public enum AssignmentOrigin {
direct, managed
}

View File

@@ -0,0 +1,13 @@
package com.cameleer.server.core.rbac;
import java.util.List;
import java.util.Optional;
import java.util.UUID;
public interface ClaimMappingRepository {
List<ClaimMappingRule> findAll();
Optional<ClaimMappingRule> findById(UUID id);
UUID create(String claim, String matchType, String matchValue, String action, String target, int priority);
void update(UUID id, String claim, String matchType, String matchValue, String action, String target, int priority);
void delete(UUID id);
}

View File

@@ -0,0 +1,18 @@
package com.cameleer.server.core.rbac;
import java.time.Instant;
import java.util.UUID;
public record ClaimMappingRule(
UUID id,
String claim,
String matchType,
String matchValue,
String action,
String target,
int priority,
Instant createdAt
) {
public enum MatchType { equals, contains, regex }
public enum Action { assignRole, addToGroup }
}

View File

@@ -0,0 +1,66 @@
package com.cameleer.server.core.rbac;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
import java.util.regex.Pattern;
public class ClaimMappingService {
private static final Logger log = LoggerFactory.getLogger(ClaimMappingService.class);
public record MappingResult(ClaimMappingRule rule) {}
public List<MappingResult> evaluate(List<ClaimMappingRule> rules, Map<String, Object> claims) {
return rules.stream()
.sorted(Comparator.comparingInt(ClaimMappingRule::priority))
.filter(rule -> matches(rule, claims))
.map(MappingResult::new)
.toList();
}
private boolean matches(ClaimMappingRule rule, Map<String, Object> claims) {
Object claimValue = claims.get(rule.claim());
if (claimValue == null) return false;
return switch (rule.matchType()) {
case "equals" -> equalsMatch(claimValue, rule.matchValue());
case "contains" -> containsMatch(claimValue, rule.matchValue());
case "regex" -> regexMatch(claimValue, rule.matchValue());
default -> {
log.warn("Unknown match type: {}", rule.matchType());
yield false;
}
};
}
private boolean equalsMatch(Object claimValue, String matchValue) {
if (claimValue instanceof String s) {
return s.equalsIgnoreCase(matchValue);
}
return String.valueOf(claimValue).equalsIgnoreCase(matchValue);
}
private boolean containsMatch(Object claimValue, String matchValue) {
if (claimValue instanceof List<?> list) {
return list.stream().anyMatch(item -> String.valueOf(item).equalsIgnoreCase(matchValue));
}
if (claimValue instanceof String s) {
// Space-separated string (e.g., OAuth2 scope claim)
return Arrays.stream(s.split("\\s+"))
.anyMatch(part -> part.equalsIgnoreCase(matchValue));
}
return false;
}
private boolean regexMatch(Object claimValue, String matchValue) {
String s = String.valueOf(claimValue);
try {
return Pattern.matches(matchValue, s);
} catch (Exception e) {
log.warn("Invalid regex in claim mapping rule: {}", matchValue, e);
return false;
}
}
}

View File

@@ -0,0 +1,9 @@
package com.cameleer.server.core.rbac;
import java.time.Instant;
import java.util.List;
import java.util.UUID;
public record GroupDetail(UUID id, String name, UUID parentGroupId, Instant createdAt,
List<RoleSummary> directRoles, List<RoleSummary> effectiveRoles,
List<UserSummary> members, List<GroupSummary> childGroups) {}

View File

@@ -0,0 +1,17 @@
package com.cameleer.server.core.rbac;
import java.util.List;
import java.util.Optional;
import java.util.UUID;
public interface GroupRepository {
List<GroupSummary> findAll();
Optional<GroupDetail> findById(UUID id);
UUID create(String name, UUID parentGroupId);
void update(UUID id, String name, UUID parentGroupId);
void delete(UUID id);
void addRole(UUID groupId, UUID roleId);
void removeRole(UUID groupId, UUID roleId);
List<GroupSummary> findChildGroups(UUID parentId);
List<GroupSummary> findAncestorChain(UUID groupId);
}

View File

@@ -0,0 +1,5 @@
package com.cameleer.server.core.rbac;
import java.util.UUID;
public record GroupSummary(UUID id, String name) {}

View File

@@ -0,0 +1,23 @@
package com.cameleer.server.core.rbac;
import java.util.List;
import java.util.UUID;
public interface RbacService {
List<UserDetail> listUsers();
UserDetail getUser(String userId);
void assignRoleToUser(String userId, UUID roleId);
void removeRoleFromUser(String userId, UUID roleId);
void addUserToGroup(String userId, UUID groupId);
void removeUserFromGroup(String userId, UUID groupId);
List<RoleSummary> getDirectRolesForUser(String userId);
List<RoleSummary> getEffectiveRolesForUser(String userId);
List<GroupSummary> getEffectiveGroupsForUser(String userId);
List<RoleSummary> getEffectiveRolesForGroup(UUID groupId);
List<UserSummary> getEffectivePrincipalsForRole(UUID roleId);
List<String> getSystemRoleNames(String userId);
RbacStats getStats();
void clearManagedAssignments(String userId);
void assignManagedRole(String userId, UUID roleId, UUID mappingId);
void addUserToManagedGroup(String userId, UUID groupId, UUID mappingId);
}

View File

@@ -0,0 +1,3 @@
package com.cameleer.server.core.rbac;
public record RbacStats(int userCount, int activeUserCount, int groupCount, int maxGroupDepth, int roleCount) {}

View File

@@ -0,0 +1,9 @@
package com.cameleer.server.core.rbac;
import java.time.Instant;
import java.util.List;
import java.util.UUID;
public record RoleDetail(UUID id, String name, String description, String scope, boolean system,
Instant createdAt, List<GroupSummary> assignedGroups, List<UserSummary> directUsers,
List<UserSummary> effectivePrincipals) {}

View File

@@ -0,0 +1,13 @@
package com.cameleer.server.core.rbac;
import java.util.List;
import java.util.Optional;
import java.util.UUID;
public interface RoleRepository {
List<RoleDetail> findAll();
Optional<RoleDetail> findById(UUID id);
UUID create(String name, String description, String scope);
void update(UUID id, String name, String description, String scope);
void delete(UUID id);
}

View File

@@ -0,0 +1,5 @@
package com.cameleer.server.core.rbac;
import java.util.UUID;
public record RoleSummary(UUID id, String name, boolean system, String source) {}

View File

@@ -0,0 +1,33 @@
package com.cameleer.server.core.rbac;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
public final class SystemRole {
private SystemRole() {}
public static final UUID AGENT_ID = UUID.fromString("00000000-0000-0000-0000-000000000001");
public static final UUID VIEWER_ID = UUID.fromString("00000000-0000-0000-0000-000000000002");
public static final UUID OPERATOR_ID = UUID.fromString("00000000-0000-0000-0000-000000000003");
public static final UUID ADMIN_ID = UUID.fromString("00000000-0000-0000-0000-000000000004");
public static final UUID ADMINS_GROUP_ID = UUID.fromString("00000000-0000-0000-0000-000000000010");
public static final Set<UUID> IDS = Set.of(AGENT_ID, VIEWER_ID, OPERATOR_ID, ADMIN_ID);
public static final Map<String, UUID> BY_NAME = Map.of(
"AGENT", AGENT_ID, "VIEWER", VIEWER_ID, "OPERATOR", OPERATOR_ID, "ADMIN", ADMIN_ID);
public static boolean isSystem(UUID id) { return IDS.contains(id); }
/**
* Normalizes an OIDC scope name to a system role name.
* Strips optional {@code server:} prefix, case-insensitive.
* E.g. {@code "server:admin"} → {@code "ADMIN"}, {@code "viewer"} → {@code "VIEWER"}.
*/
public static String normalizeScope(String scope) {
String upper = scope.toUpperCase();
return upper.startsWith("SERVER:") ? upper.substring("SERVER:".length()) : upper;
}
}

View File

@@ -0,0 +1,8 @@
package com.cameleer.server.core.rbac;
import java.time.Instant;
import java.util.List;
public record UserDetail(String userId, String provider, String email, String displayName,
Instant createdAt, List<RoleSummary> directRoles, List<GroupSummary> directGroups,
List<RoleSummary> effectiveRoles, List<GroupSummary> effectiveGroups) {}

View File

@@ -0,0 +1,3 @@
package com.cameleer.server.core.rbac;
public record UserSummary(String userId, String displayName, String provider) {}

View File

@@ -0,0 +1,8 @@
package com.cameleer.server.core.runtime;
import java.time.Instant;
import java.util.Map;
import java.util.UUID;
public record App(UUID id, UUID environmentId, String slug, String displayName,
Map<String, Object> containerConfig, Instant createdAt, Instant updatedAt) {}

View File

@@ -0,0 +1,17 @@
package com.cameleer.server.core.runtime;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.UUID;
public interface AppRepository {
List<App> findByEnvironmentId(UUID environmentId);
List<App> findAll();
Optional<App> findById(UUID id);
Optional<App> findByEnvironmentIdAndSlug(UUID environmentId, String slug);
Optional<App> findBySlug(String slug);
UUID create(UUID environmentId, String slug, String displayName);
void updateContainerConfig(UUID id, Map<String, Object> containerConfig);
void delete(UUID id);
}

View File

@@ -0,0 +1,102 @@
package com.cameleer.server.core.runtime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.InputStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.security.MessageDigest;
import java.util.HexFormat;
import java.util.List;
import java.util.Map;
import java.util.UUID;
public class AppService {
private static final Logger log = LoggerFactory.getLogger(AppService.class);
private final AppRepository appRepo;
private final AppVersionRepository versionRepo;
private final String jarStoragePath;
public AppService(AppRepository appRepo, AppVersionRepository versionRepo, String jarStoragePath) {
this.appRepo = appRepo;
this.versionRepo = versionRepo;
this.jarStoragePath = jarStoragePath;
}
public List<App> listAll() { return appRepo.findAll(); }
public List<App> listByEnvironment(UUID environmentId) { return appRepo.findByEnvironmentId(environmentId); }
public App getById(UUID id) { return appRepo.findById(id).orElseThrow(() -> new IllegalArgumentException("App not found: " + id)); }
public App getBySlug(String slug) { return appRepo.findBySlug(slug).orElseThrow(() -> new IllegalArgumentException("App not found: " + slug)); }
public List<AppVersion> listVersions(UUID appId) { return versionRepo.findByAppId(appId); }
public AppVersion getVersion(UUID versionId) {
return versionRepo.findById(versionId)
.orElseThrow(() -> new IllegalArgumentException("AppVersion not found: " + versionId));
}
public void updateContainerConfig(UUID id, Map<String, Object> containerConfig) {
getById(id); // verify exists
appRepo.updateContainerConfig(id, containerConfig);
}
public UUID createApp(UUID environmentId, String slug, String displayName) {
if (appRepo.findByEnvironmentIdAndSlug(environmentId, slug).isPresent()) {
throw new IllegalArgumentException("App with slug '" + slug + "' already exists in this environment");
}
return appRepo.create(environmentId, slug, displayName);
}
public AppVersion uploadJar(UUID appId, String filename, InputStream jarData, long size) throws IOException {
getById(appId); // verify app exists
int nextVersion = versionRepo.findMaxVersion(appId) + 1;
// Store JAR: {jarStoragePath}/{appId}/v{version}/app.jar
Path versionDir = Path.of(jarStoragePath, appId.toString(), "v" + nextVersion);
Files.createDirectories(versionDir);
Path jarFile = versionDir.resolve("app.jar");
MessageDigest digest;
try { digest = MessageDigest.getInstance("SHA-256"); }
catch (Exception e) { throw new RuntimeException(e); }
try (InputStream in = jarData) {
byte[] buffer = new byte[8192];
int bytesRead;
try (var out = Files.newOutputStream(jarFile)) {
while ((bytesRead = in.read(buffer)) != -1) {
out.write(buffer, 0, bytesRead);
digest.update(buffer, 0, bytesRead);
}
}
}
String checksum = HexFormat.of().formatHex(digest.digest());
UUID versionId = versionRepo.create(appId, nextVersion, jarFile.toString(), checksum, filename, size);
// Detect runtime type from the saved JAR
RuntimeDetector.DetectionResult detection = RuntimeDetector.detect(jarFile);
if (detection.runtimeType() != null) {
versionRepo.updateDetectedRuntime(versionId, detection.runtimeType().toConfigValue(), detection.mainClass());
log.info("Uploaded JAR for app {}: version={}, size={}, sha256={}, detected={}",
appId, nextVersion, size, checksum, detection.runtimeType().toConfigValue());
} else {
log.info("Uploaded JAR for app {}: version={}, size={}, sha256={}, detected=unknown",
appId, nextVersion, size, checksum);
}
return versionRepo.findById(versionId).orElseThrow();
}
public String resolveJarPath(UUID appVersionId) {
AppVersion version = versionRepo.findById(appVersionId)
.orElseThrow(() -> new IllegalArgumentException("AppVersion not found: " + appVersionId));
return version.jarPath();
}
public void deleteApp(UUID id) {
appRepo.delete(id);
}
}

View File

@@ -0,0 +1,8 @@
package com.cameleer.server.core.runtime;
import java.time.Instant;
import java.util.UUID;
public record AppVersion(UUID id, UUID appId, int version, String jarPath, String jarChecksum,
String jarFilename, Long jarSizeBytes, String detectedRuntimeType,
String detectedMainClass, Instant uploadedAt) {}

View File

@@ -0,0 +1,14 @@
package com.cameleer.server.core.runtime;
import java.util.List;
import java.util.Optional;
import java.util.UUID;
public interface AppVersionRepository {
List<AppVersion> findByAppId(UUID appId);
Optional<AppVersion> findById(UUID id);
int findMaxVersion(UUID appId);
UUID create(UUID appId, int version, String jarPath, String jarChecksum, String jarFilename, Long jarSizeBytes);
void delete(UUID id);
void updateDetectedRuntime(UUID id, String detectedRuntimeType, String detectedMainClass);
}

View File

@@ -0,0 +1,112 @@
package com.cameleer.server.core.runtime;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public final class ConfigMerger {
private ConfigMerger() {}
public static ResolvedContainerConfig resolve(
GlobalRuntimeDefaults global,
Map<String, Object> envConfig,
Map<String, Object> appConfig) {
return new ResolvedContainerConfig(
intVal(appConfig, envConfig, "memoryLimitMb", global.memoryLimitMb()),
intOrNull(appConfig, envConfig, "memoryReserveMb"),
intVal(appConfig, envConfig, "cpuRequest", global.cpuRequest()),
intOrNull(appConfig, envConfig, "cpuLimit"),
intVal(appConfig, envConfig, "appPort", 8080),
intList(appConfig, envConfig, "exposedPorts"),
stringMap(appConfig, envConfig, "customEnvVars"),
boolVal(appConfig, envConfig, "stripPathPrefix", true),
boolVal(appConfig, envConfig, "sslOffloading", true),
stringVal(appConfig, envConfig, "routingMode", global.routingMode()),
stringVal(appConfig, envConfig, "routingDomain", global.routingDomain()),
stringVal(appConfig, envConfig, "serverUrl", global.serverUrl()),
intVal(appConfig, envConfig, "replicas", 1),
stringVal(appConfig, envConfig, "deploymentStrategy", "blue-green"),
boolVal(appConfig, envConfig, "routeControlEnabled", true),
boolVal(appConfig, envConfig, "replayEnabled", true),
stringVal(appConfig, envConfig, "runtimeType", "auto"),
stringVal(appConfig, envConfig, "customArgs", ""),
stringList(appConfig, envConfig, "extraNetworks")
);
}
private static int intVal(Map<String, Object> app, Map<String, Object> env, String key, int fallback) {
if (app.containsKey(key) && app.get(key) instanceof Number n) return n.intValue();
if (env.containsKey(key) && env.get(key) instanceof Number n) return n.intValue();
return fallback;
}
private static Integer intOrNull(Map<String, Object> app, Map<String, Object> env, String key) {
if (app.containsKey(key) && app.get(key) instanceof Number n) return n.intValue();
if (env.containsKey(key) && env.get(key) instanceof Number n) return n.intValue();
return null;
}
private static Double doubleOrNull(Map<String, Object> app, Map<String, Object> env, String key) {
if (app.containsKey(key) && app.get(key) instanceof Number n) return n.doubleValue();
if (env.containsKey(key) && env.get(key) instanceof Number n) return n.doubleValue();
return null;
}
private static boolean boolVal(Map<String, Object> app, Map<String, Object> env, String key, boolean fallback) {
if (app.containsKey(key) && app.get(key) instanceof Boolean b) return b;
if (env.containsKey(key) && env.get(key) instanceof Boolean b) return b;
return fallback;
}
private static String stringVal(Map<String, Object> app, Map<String, Object> env, String key, String fallback) {
if (app.containsKey(key) && app.get(key) instanceof String s) return s;
if (env.containsKey(key) && env.get(key) instanceof String s) return s;
return fallback;
}
@SuppressWarnings("unchecked")
private static List<Integer> intList(Map<String, Object> app, Map<String, Object> env, String key) {
Object val = app.containsKey(key) ? app.get(key) : env.get(key);
if (val instanceof List<?> list) {
return list.stream()
.filter(Number.class::isInstance)
.map(n -> ((Number) n).intValue())
.toList();
}
return List.of();
}
private static List<String> stringList(Map<String, Object> app, Map<String, Object> env, String key) {
Object val = app.containsKey(key) ? app.get(key) : env.get(key);
if (val instanceof List<?> list) {
return list.stream()
.filter(String.class::isInstance)
.map(String.class::cast)
.toList();
}
return List.of();
}
@SuppressWarnings("unchecked")
private static Map<String, String> stringMap(Map<String, Object> app, Map<String, Object> env, String key) {
Object val = app.containsKey(key) ? app.get(key) : env.get(key);
if (val instanceof Map<?, ?> map) {
Map<String, String> result = new HashMap<>();
map.forEach((k, v) -> result.put(String.valueOf(k), String.valueOf(v)));
return Collections.unmodifiableMap(result);
}
return Map.of();
}
/** Global defaults extracted from application.yml @Value fields */
public record GlobalRuntimeDefaults(
int memoryLimitMb,
int cpuRequest,
String routingMode,
String routingDomain,
String serverUrl
) {}
}

View File

@@ -0,0 +1,27 @@
package com.cameleer.server.core.runtime;
import java.util.List;
import java.util.Map;
public record ContainerRequest(
String containerName,
String baseImage,
String jarPath,
String jarVolumeName,
String jarVolumeMountPath,
String network,
List<String> additionalNetworks,
Map<String, String> envVars,
Map<String, String> labels,
long memoryLimitBytes,
Long memoryReserveBytes,
int cpuShares,
Long cpuQuota,
List<Integer> exposedPorts,
int healthCheckPort,
String restartPolicyName,
int restartPolicyMaxRetries,
String runtimeType,
String customArgs,
String mainClass
) {}

View File

@@ -0,0 +1,7 @@
package com.cameleer.server.core.runtime;
public record ContainerStatus(String state, boolean running, int exitCode, String error) {
public static ContainerStatus notFound() {
return new ContainerStatus("not_found", false, -1, "Container not found");
}
}

View File

@@ -0,0 +1,5 @@
package com.cameleer.server.core.runtime;
public enum DeployStage {
PRE_FLIGHT, PULL_IMAGE, CREATE_NETWORK, START_REPLICAS, HEALTH_CHECK, SWAP_TRAFFIC, COMPLETE
}

View File

@@ -0,0 +1,32 @@
package com.cameleer.server.core.runtime;
import java.time.Instant;
import java.util.List;
import java.util.Map;
import java.util.UUID;
public record Deployment(
UUID id,
UUID appId,
UUID appVersionId,
UUID environmentId,
DeploymentStatus status,
String targetState,
String deploymentStrategy,
List<Map<String, Object>> replicaStates,
String deployStage,
String containerId,
String containerName,
String errorMessage,
Map<String, Object> resolvedConfig,
Instant deployedAt,
Instant stoppedAt,
Instant createdAt
) {
public Deployment withStatus(DeploymentStatus newStatus) {
return new Deployment(id, appId, appVersionId, environmentId, newStatus,
targetState, deploymentStrategy, replicaStates, deployStage,
containerId, containerName, errorMessage, resolvedConfig,
deployedAt, stoppedAt, createdAt);
}
}

View File

@@ -0,0 +1,17 @@
package com.cameleer.server.core.runtime;
import java.util.List;
import java.util.Optional;
import java.util.UUID;
public interface DeploymentRepository {
List<Deployment> findByAppId(UUID appId);
List<Deployment> findByEnvironmentId(UUID environmentId);
Optional<Deployment> findById(UUID id);
Optional<Deployment> findActiveByAppIdAndEnvironmentId(UUID appId, UUID environmentId);
UUID create(UUID appId, UUID appVersionId, UUID environmentId, String containerName);
void updateStatus(UUID id, DeploymentStatus status, String containerId, String errorMessage);
void markDeployed(UUID id);
void markStopped(UUID id);
void deleteTerminalByAppAndEnvironment(UUID appId, UUID environmentId);
}

View File

@@ -0,0 +1,54 @@
package com.cameleer.server.core.runtime;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import java.util.UUID;
public class DeploymentService {
private static final Logger log = LoggerFactory.getLogger(DeploymentService.class);
private final DeploymentRepository deployRepo;
private final AppService appService;
private final EnvironmentService envService;
public DeploymentService(DeploymentRepository deployRepo, AppService appService, EnvironmentService envService) {
this.deployRepo = deployRepo;
this.appService = appService;
this.envService = envService;
}
public List<Deployment> listByApp(UUID appId) { return deployRepo.findByAppId(appId); }
public Deployment getById(UUID id) { return deployRepo.findById(id).orElseThrow(() -> new IllegalArgumentException("Deployment not found: " + id)); }
/** Create a deployment record. Actual container start is handled by DeploymentExecutor (async). */
public Deployment createDeployment(UUID appId, UUID appVersionId, UUID environmentId) {
App app = appService.getById(appId);
Environment env = envService.getById(environmentId);
String containerName = env.slug() + "-" + app.slug();
deployRepo.deleteTerminalByAppAndEnvironment(appId, environmentId);
UUID deploymentId = deployRepo.create(appId, appVersionId, environmentId, containerName);
return deployRepo.findById(deploymentId).orElseThrow();
}
/** Promote: deploy the same app version to a different environment. */
public Deployment promote(UUID appId, UUID appVersionId, UUID targetEnvironmentId) {
return createDeployment(appId, appVersionId, targetEnvironmentId);
}
public void markRunning(UUID deploymentId, String containerId) {
deployRepo.updateStatus(deploymentId, DeploymentStatus.RUNNING, containerId, null);
deployRepo.markDeployed(deploymentId);
}
public void markFailed(UUID deploymentId, String errorMessage) {
deployRepo.updateStatus(deploymentId, DeploymentStatus.FAILED, null, errorMessage);
}
public void markStopped(UUID deploymentId) {
deployRepo.updateStatus(deploymentId, DeploymentStatus.STOPPED, null, null);
deployRepo.markStopped(deploymentId);
}
}

View File

@@ -0,0 +1,5 @@
package com.cameleer.server.core.runtime;
public enum DeploymentStatus {
STOPPED, STARTING, RUNNING, DEGRADED, STOPPING, FAILED
}

View File

@@ -0,0 +1,16 @@
package com.cameleer.server.core.runtime;
import java.time.Instant;
import java.util.Map;
import java.util.UUID;
public record Environment(
UUID id,
String slug,
String displayName,
boolean production,
boolean enabled,
Map<String, Object> defaultContainerConfig,
Integer jarRetentionCount,
Instant createdAt
) {}

View File

@@ -0,0 +1,17 @@
package com.cameleer.server.core.runtime;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.UUID;
public interface EnvironmentRepository {
List<Environment> findAll();
Optional<Environment> findById(UUID id);
Optional<Environment> findBySlug(String slug);
UUID create(String slug, String displayName, boolean production);
void update(UUID id, String displayName, boolean production, boolean enabled);
void updateDefaultContainerConfig(UUID id, Map<String, Object> defaultContainerConfig);
void updateJarRetentionCount(UUID id, Integer jarRetentionCount);
void delete(UUID id);
}

View File

@@ -0,0 +1,56 @@
package com.cameleer.server.core.runtime;
import java.util.List;
import java.util.Map;
import java.util.UUID;
public class EnvironmentService {
private final EnvironmentRepository repo;
public EnvironmentService(EnvironmentRepository repo) {
this.repo = repo;
}
public List<Environment> listAll() { return repo.findAll(); }
public Environment getById(UUID id) {
return repo.findById(id).orElseThrow(() -> new IllegalArgumentException("Environment not found: " + id));
}
public Environment getBySlug(String slug) {
return repo.findBySlug(slug).orElseThrow(() -> new IllegalArgumentException("Environment not found: " + slug));
}
public UUID create(String slug, String displayName, boolean production) {
if (repo.findBySlug(slug).isPresent()) {
throw new IllegalArgumentException("Environment with slug '" + slug + "' already exists");
}
return repo.create(slug, displayName, production);
}
public void update(UUID id, String displayName, boolean production, boolean enabled) {
getById(id); // verify exists
repo.update(id, displayName, production, enabled);
}
public void updateDefaultContainerConfig(UUID id, Map<String, Object> defaultContainerConfig) {
getById(id); // verify exists
repo.updateDefaultContainerConfig(id, defaultContainerConfig);
}
public void updateJarRetentionCount(UUID id, Integer jarRetentionCount) {
getById(id); // verify exists
if (jarRetentionCount != null && jarRetentionCount < 1) {
throw new IllegalArgumentException("Retention count must be at least 1 or null for unlimited");
}
repo.updateJarRetentionCount(id, jarRetentionCount);
}
public void delete(UUID id) {
Environment env = getById(id);
if ("default".equals(env.slug())) {
throw new IllegalArgumentException("Cannot delete the default environment");
}
repo.delete(id);
}
}

View File

@@ -0,0 +1,44 @@
package com.cameleer.server.core.runtime;
import java.util.List;
import java.util.Map;
public record ResolvedContainerConfig(
int memoryLimitMb,
Integer memoryReserveMb,
int cpuRequest,
Integer cpuLimit,
int appPort,
List<Integer> exposedPorts,
Map<String, String> customEnvVars,
boolean stripPathPrefix,
boolean sslOffloading,
String routingMode,
String routingDomain,
String serverUrl,
int replicas,
String deploymentStrategy,
boolean routeControlEnabled,
boolean replayEnabled,
String runtimeType,
String customArgs,
List<String> extraNetworks
) {
public long memoryLimitBytes() {
return (long) memoryLimitMb * 1024 * 1024;
}
public Long memoryReserveBytes() {
return memoryReserveMb != null ? (long) memoryReserveMb * 1024 * 1024 : null;
}
/** Convert cpuRequest (millicores) to Docker CPU shares (proportional to 1024 = 1 core). */
public int dockerCpuShares() {
return cpuRequest * 1024 / 1000;
}
/** Convert cpuLimit (millicores) to Docker CPU quota (microseconds per 100ms period). */
public Long dockerCpuQuota() {
return cpuLimit != null ? (long) cpuLimit * 100 : null;
}
}

View File

@@ -0,0 +1,3 @@
package com.cameleer.server.core.runtime;
public enum RoutingMode { path, subdomain }

View File

@@ -0,0 +1,77 @@
package com.cameleer.server.core.runtime;
import java.io.IOException;
import java.io.InputStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.jar.JarFile;
import java.util.jar.Manifest;
public final class RuntimeDetector {
private RuntimeDetector() {}
public record DetectionResult(RuntimeType runtimeType, String mainClass) {}
/**
* Detect the runtime type of a file (JAR or native binary).
* Returns a result with null runtimeType if detection fails.
*/
public static DetectionResult detect(Path file) {
if (!Files.exists(file) || Files.isDirectory(file)) {
return new DetectionResult(null, null);
}
// Check if it's a ZIP/JAR (starts with PK magic bytes)
if (!isZipFile(file)) {
// Non-ZIP file with content = native binary
try {
if (Files.size(file) > 0) {
return new DetectionResult(RuntimeType.NATIVE, null);
}
} catch (IOException e) {
// fall through
}
return new DetectionResult(null, null);
}
// It's a JAR — read the manifest
try (JarFile jar = new JarFile(file.toFile())) {
Manifest manifest = jar.getManifest();
if (manifest == null) {
return new DetectionResult(null, null);
}
String mainClass = manifest.getMainAttributes().getValue("Main-Class");
if (mainClass == null || mainClass.isBlank()) {
return new DetectionResult(null, null);
}
// Spring Boot: any launcher in org.springframework.boot.loader
if (mainClass.startsWith("org.springframework.boot.loader")) {
return new DetectionResult(RuntimeType.SPRING_BOOT, null);
}
// Quarkus
if (mainClass.equals("io.quarkus.bootstrap.runner.QuarkusEntryPoint")) {
return new DetectionResult(RuntimeType.QUARKUS, null);
}
// Plain Java: has a Main-Class, not a known framework
return new DetectionResult(RuntimeType.PLAIN_JAVA, mainClass);
} catch (IOException e) {
return new DetectionResult(null, null);
}
}
private static boolean isZipFile(Path file) {
try (InputStream is = Files.newInputStream(file)) {
byte[] magic = new byte[2];
int read = is.read(magic);
return read == 2 && magic[0] == 'P' && magic[1] == 'K';
} catch (IOException e) {
return false;
}
}
}

View File

@@ -0,0 +1,20 @@
package com.cameleer.server.core.runtime;
import java.util.stream.Stream;
public interface RuntimeOrchestrator {
boolean isEnabled();
/** Pull the latest version of a container image from the registry. */
default void pullImage(String image) {}
String startContainer(ContainerRequest request);
void stopContainer(String containerId);
void removeContainer(String containerId);
ContainerStatus getContainerStatus(String containerId);
Stream<String> getLogs(String containerId, int tailLines);
/** Start streaming container logs to ClickHouse. */
default void startLogCapture(String containerId, String instanceId, String appSlug, String envSlug, String tenantId) {}
/** Stop log capture for a specific container (e.g., on die/oom). */
default void stopLogCapture(String containerId) {}
}

View File

@@ -0,0 +1,27 @@
package com.cameleer.server.core.runtime;
public enum RuntimeType {
AUTO,
SPRING_BOOT,
QUARKUS,
PLAIN_JAVA,
NATIVE;
/** Parse from containerConfig string value, case-insensitive. Returns null if unrecognized. */
public static RuntimeType fromString(String value) {
if (value == null || value.isBlank()) return AUTO;
return switch (value.toLowerCase().replace("-", "_")) {
case "auto" -> AUTO;
case "spring_boot" -> SPRING_BOOT;
case "quarkus" -> QUARKUS;
case "plain_java" -> PLAIN_JAVA;
case "native" -> NATIVE;
default -> null;
};
}
/** Lowercase kebab-case for JSON serialization. */
public String toConfigValue() {
return name().toLowerCase().replace("_", "-");
}
}

View File

@@ -0,0 +1,36 @@
package com.cameleer.server.core.search;
/**
* Aggregate execution statistics within a time window, with comparison to the
* equivalent previous period (shifted back 24 h) and a "today" total.
*/
public record ExecutionStats(
long totalCount,
long failedCount,
long avgDurationMs,
long p99LatencyMs,
long activeCount,
long totalToday,
long prevTotalCount,
long prevFailedCount,
long prevAvgDurationMs,
long prevP99LatencyMs,
double slaCompliance) {
/** Constructor without SLA compliance (backward-compatible, sets to -1). */
public ExecutionStats(long totalCount, long failedCount, long avgDurationMs,
long p99LatencyMs, long activeCount, long totalToday,
long prevTotalCount, long prevFailedCount,
long prevAvgDurationMs, long prevP99LatencyMs) {
this(totalCount, failedCount, avgDurationMs, p99LatencyMs, activeCount,
totalToday, prevTotalCount, prevFailedCount, prevAvgDurationMs,
prevP99LatencyMs, -1.0);
}
/** Return a copy with the given SLA compliance value. */
public ExecutionStats withSlaCompliance(double slaCompliance) {
return new ExecutionStats(totalCount, failedCount, avgDurationMs, p99LatencyMs,
activeCount, totalToday, prevTotalCount, prevFailedCount,
prevAvgDurationMs, prevP99LatencyMs, slaCompliance);
}
}

View File

@@ -0,0 +1,40 @@
package com.cameleer.server.core.search;
import java.time.Instant;
import java.util.Map;
/**
* Lightweight summary of a route execution for search result listings.
* <p>
* Contains only the fields needed for the list view -- not the full processor
* arrays or exchange snapshot data.
*
* @param executionId unique execution identifier
* @param routeId Camel route ID
* @param instanceId agent instance that reported the execution
* @param status execution status (COMPLETED, FAILED, RUNNING)
* @param startTime execution start time
* @param endTime execution end time (may be null for RUNNING)
* @param durationMs execution duration in milliseconds
* @param correlationId correlation ID for cross-instance tracing
* @param errorMessage error message (empty string if no error)
* @param diagramContentHash content hash linking to the active route diagram version
*/
public record ExecutionSummary(
String executionId,
String routeId,
String instanceId,
String applicationId,
String status,
Instant startTime,
Instant endTime,
long durationMs,
String correlationId,
String errorMessage,
String diagramContentHash,
String highlight,
Map<String, String> attributes,
boolean hasTraceData,
boolean isReplay
) {
}

View File

@@ -0,0 +1,48 @@
package com.cameleer.server.core.search;
import java.time.Instant;
import java.util.List;
/**
* Immutable search criteria for querying application logs.
*
* @param q free-text search across message and stack trace
* @param levels log level filter (e.g. ["WARN","ERROR"])
* @param application application ID filter (nullable = all apps)
* @param instanceId agent instance ID filter
* @param exchangeId Camel exchange ID filter
* @param logger logger name substring filter
* @param environment optional environment filter (e.g. "dev", "staging", "prod")
* @param source optional source filter: "app" or "agent"
* @param from inclusive start of time range (required)
* @param to inclusive end of time range (required)
* @param cursor ISO timestamp cursor for keyset pagination
* @param limit page size (1-500, default 100)
* @param sort sort direction: "asc" or "desc" (default "desc")
*/
public record LogSearchRequest(
String q,
List<String> levels,
String application,
String instanceId,
String exchangeId,
String logger,
String environment,
String source,
Instant from,
Instant to,
String cursor,
int limit,
String sort
) {
private static final int DEFAULT_LIMIT = 100;
private static final int MAX_LIMIT = 500;
public LogSearchRequest {
if (limit <= 0) limit = DEFAULT_LIMIT;
if (limit > MAX_LIMIT) limit = MAX_LIMIT;
if (sort == null || !"asc".equalsIgnoreCase(sort)) sort = "desc";
if (levels == null) levels = List.of();
}
}

View File

@@ -0,0 +1,21 @@
package com.cameleer.server.core.search;
import com.cameleer.server.core.storage.LogEntryResult;
import java.util.List;
import java.util.Map;
/**
* Log search result with cursor-based pagination and level aggregation.
*
* @param data matching log entries for the current page
* @param nextCursor ISO timestamp cursor for the next page (null if no more)
* @param hasMore whether more results exist beyond this page
* @param levelCounts count of matching logs per level (unaffected by level filter)
*/
public record LogSearchResponse(
List<LogEntryResult> data,
String nextCursor,
boolean hasMore,
Map<String, Long> levelCounts
) {}

View File

@@ -0,0 +1,108 @@
package com.cameleer.server.core.search;
import java.time.Instant;
import java.util.List;
/**
* Immutable search criteria for querying route executions.
* <p>
* All filter fields are nullable/optional. When null, the filter is not applied.
* The compact constructor validates and normalizes pagination parameters.
*
* @param status execution status filter (COMPLETED, FAILED, RUNNING)
* @param timeFrom inclusive start of time range
* @param timeTo exclusive end of time range
* @param durationMin minimum duration in milliseconds (inclusive)
* @param durationMax maximum duration in milliseconds (inclusive)
* @param correlationId exact correlation ID match
* @param text global full-text search across all text fields
* @param textInBody full-text search scoped to exchange bodies
* @param textInHeaders full-text search scoped to exchange headers
* @param textInErrors full-text search scoped to error messages and stack traces
* @param routeId exact match on route_id
* @param instanceId exact match on instance_id
* @param processorType matches processor_types array via has()
* @param applicationId application ID filter (resolved to instanceIds server-side)
* @param instanceIds list of instance IDs (resolved from application, used for IN clause)
* @param offset pagination offset (0-based)
* @param limit page size (default 50, max 500)
* @param sortField column to sort by (default: startTime)
* @param sortDir sort direction: asc or desc (default: desc)
* @param environment optional environment filter (e.g. "dev", "staging", "prod")
*/
public record SearchRequest(
String status,
Instant timeFrom,
Instant timeTo,
Long durationMin,
Long durationMax,
String correlationId,
String text,
String textInBody,
String textInHeaders,
String textInErrors,
String routeId,
String instanceId,
String processorType,
String applicationId,
List<String> instanceIds,
int offset,
int limit,
String sortField,
String sortDir,
String environment
) {
private static final int DEFAULT_LIMIT = 50;
private static final int MAX_LIMIT = 500;
private static final java.util.Set<String> ALLOWED_SORT_FIELDS = java.util.Set.of(
"startTime", "status", "instanceId", "routeId", "correlationId",
"durationMs", "executionId", "applicationId"
);
/** Maps camelCase API sort field names to storage column names. */
private static final java.util.Map<String, String> SORT_FIELD_TO_COLUMN = java.util.Map.ofEntries(
java.util.Map.entry("startTime", "start_time"),
java.util.Map.entry("durationMs", "duration_ms"),
java.util.Map.entry("status", "status.keyword"),
java.util.Map.entry("instanceId", "instance_id.keyword"),
java.util.Map.entry("routeId", "route_id.keyword"),
java.util.Map.entry("correlationId", "correlation_id.keyword"),
java.util.Map.entry("executionId", "execution_id.keyword"),
java.util.Map.entry("applicationId", "application_id.keyword")
);
public SearchRequest {
if (limit <= 0) limit = DEFAULT_LIMIT;
if (limit > MAX_LIMIT) limit = MAX_LIMIT;
if (offset < 0) offset = 0;
if (sortField == null || !ALLOWED_SORT_FIELDS.contains(sortField)) sortField = "startTime";
if (!"asc".equalsIgnoreCase(sortDir)) sortDir = "desc";
}
/** Returns the snake_case column name for ORDER BY. */
public String sortColumn() {
return SORT_FIELD_TO_COLUMN.getOrDefault(sortField, "start_time");
}
/** Create a copy with resolved instanceIds (from application ID lookup). */
public SearchRequest withInstanceIds(List<String> resolvedInstanceIds) {
return new SearchRequest(
status, timeFrom, timeTo, durationMin, durationMax, correlationId,
text, textInBody, textInHeaders, textInErrors,
routeId, instanceId, processorType, applicationId, resolvedInstanceIds,
offset, limit, sortField, sortDir, environment
);
}
/** Create a copy with the given environment filter. */
public SearchRequest withEnvironment(String env) {
return new SearchRequest(
status, timeFrom, timeTo, durationMin, durationMax, correlationId,
text, textInBody, textInHeaders, textInErrors,
routeId, instanceId, processorType, applicationId, instanceIds,
offset, limit, sortField, sortDir, env
);
}
}

View File

@@ -0,0 +1,27 @@
package com.cameleer.server.core.search;
import java.util.List;
/**
* Paginated result envelope for search queries.
*
* @param data the result items for the current page
* @param total total number of matching items across all pages
* @param offset the offset used for this page
* @param limit the limit used for this page
* @param <T> the type of result items
*/
public record SearchResult<T>(
List<T> data,
long total,
int offset,
int limit
) {
/**
* Create an empty result with the given pagination parameters.
*/
public static <T> SearchResult<T> empty(int offset, int limit) {
return new SearchResult<>(List.of(), 0, offset, limit);
}
}

View File

@@ -0,0 +1,155 @@
package com.cameleer.server.core.search;
import com.cameleer.server.core.storage.SearchIndex;
import com.cameleer.server.core.storage.StatsStore;
import java.time.Instant;
import java.util.List;
import java.util.Map;
public class SearchService {
private final SearchIndex searchIndex;
private final StatsStore statsStore;
public SearchService(SearchIndex searchIndex, StatsStore statsStore) {
this.searchIndex = searchIndex;
this.statsStore = statsStore;
}
public SearchResult<ExecutionSummary> search(SearchRequest request) {
return searchIndex.search(request);
}
public long count(SearchRequest request) {
return searchIndex.count(request);
}
public List<String> distinctAttributeKeys() {
return searchIndex.distinctAttributeKeys();
}
public ExecutionStats stats(Instant from, Instant to) {
return statsStore.stats(from, to, null);
}
public ExecutionStats stats(Instant from, Instant to, String environment) {
return statsStore.stats(from, to, environment);
}
public ExecutionStats statsForApp(Instant from, Instant to, String applicationId) {
return statsStore.statsForApp(from, to, applicationId, null);
}
public ExecutionStats statsForApp(Instant from, Instant to, String applicationId, String environment) {
return statsStore.statsForApp(from, to, applicationId, environment);
}
public ExecutionStats stats(Instant from, Instant to, String routeId, List<String> agentIds) {
return statsStore.statsForRoute(from, to, routeId, agentIds, null);
}
public ExecutionStats stats(Instant from, Instant to, String routeId, List<String> agentIds, String environment) {
return statsStore.statsForRoute(from, to, routeId, agentIds, environment);
}
public StatsTimeseries timeseries(Instant from, Instant to, int bucketCount) {
return statsStore.timeseries(from, to, bucketCount, null);
}
public StatsTimeseries timeseries(Instant from, Instant to, int bucketCount, String environment) {
return statsStore.timeseries(from, to, bucketCount, environment);
}
public StatsTimeseries timeseriesForApp(Instant from, Instant to, int bucketCount, String applicationId) {
return statsStore.timeseriesForApp(from, to, bucketCount, applicationId, null);
}
public StatsTimeseries timeseriesForApp(Instant from, Instant to, int bucketCount, String applicationId, String environment) {
return statsStore.timeseriesForApp(from, to, bucketCount, applicationId, environment);
}
public StatsTimeseries timeseries(Instant from, Instant to, int bucketCount,
String routeId, List<String> agentIds) {
return statsStore.timeseriesForRoute(from, to, bucketCount, routeId, agentIds, null);
}
public StatsTimeseries timeseries(Instant from, Instant to, int bucketCount,
String routeId, List<String> agentIds, String environment) {
return statsStore.timeseriesForRoute(from, to, bucketCount, routeId, agentIds, environment);
}
// ── Dashboard-specific queries ────────────────────────────────────────
public Map<String, StatsTimeseries> timeseriesGroupedByApp(Instant from, Instant to, int bucketCount) {
return statsStore.timeseriesGroupedByApp(from, to, bucketCount, null);
}
public Map<String, StatsTimeseries> timeseriesGroupedByApp(Instant from, Instant to, int bucketCount, String environment) {
return statsStore.timeseriesGroupedByApp(from, to, bucketCount, environment);
}
public Map<String, StatsTimeseries> timeseriesGroupedByRoute(Instant from, Instant to,
int bucketCount, String applicationId) {
return statsStore.timeseriesGroupedByRoute(from, to, bucketCount, applicationId, null);
}
public Map<String, StatsTimeseries> timeseriesGroupedByRoute(Instant from, Instant to,
int bucketCount, String applicationId, String environment) {
return statsStore.timeseriesGroupedByRoute(from, to, bucketCount, applicationId, environment);
}
public double slaCompliance(Instant from, Instant to, int thresholdMs,
String applicationId, String routeId) {
return statsStore.slaCompliance(from, to, thresholdMs, applicationId, routeId, null);
}
public double slaCompliance(Instant from, Instant to, int thresholdMs,
String applicationId, String routeId, String environment) {
return statsStore.slaCompliance(from, to, thresholdMs, applicationId, routeId, environment);
}
public Map<String, long[]> slaCountsByApp(Instant from, Instant to, int defaultThresholdMs) {
return statsStore.slaCountsByApp(from, to, defaultThresholdMs, null);
}
public Map<String, long[]> slaCountsByApp(Instant from, Instant to, int defaultThresholdMs, String environment) {
return statsStore.slaCountsByApp(from, to, defaultThresholdMs, environment);
}
public Map<String, long[]> slaCountsByRoute(Instant from, Instant to,
String applicationId, int thresholdMs) {
return statsStore.slaCountsByRoute(from, to, applicationId, thresholdMs, null);
}
public Map<String, long[]> slaCountsByRoute(Instant from, Instant to,
String applicationId, int thresholdMs, String environment) {
return statsStore.slaCountsByRoute(from, to, applicationId, thresholdMs, environment);
}
public List<TopError> topErrors(Instant from, Instant to, String applicationId,
String routeId, int limit) {
return statsStore.topErrors(from, to, applicationId, routeId, limit, null);
}
public List<TopError> topErrors(Instant from, Instant to, String applicationId,
String routeId, int limit, String environment) {
return statsStore.topErrors(from, to, applicationId, routeId, limit, environment);
}
public int activeErrorTypes(Instant from, Instant to, String applicationId) {
return statsStore.activeErrorTypes(from, to, applicationId, null);
}
public int activeErrorTypes(Instant from, Instant to, String applicationId, String environment) {
return statsStore.activeErrorTypes(from, to, applicationId, environment);
}
public List<StatsStore.PunchcardCell> punchcard(Instant from, Instant to, String applicationId) {
return statsStore.punchcard(from, to, applicationId, null);
}
public List<StatsStore.PunchcardCell> punchcard(Instant from, Instant to, String applicationId, String environment) {
return statsStore.punchcard(from, to, applicationId, environment);
}
}

View File

@@ -0,0 +1,17 @@
package com.cameleer.server.core.search;
import java.time.Instant;
import java.util.List;
public record StatsTimeseries(
List<TimeseriesBucket> buckets
) {
public record TimeseriesBucket(
Instant time,
long totalCount,
long failedCount,
long avgDurationMs,
long p99DurationMs,
long activeCount
) {}
}

View File

@@ -0,0 +1,12 @@
package com.cameleer.server.core.search;
import java.time.Instant;
public record TopError(
String errorType,
String routeId,
String processorId,
long count,
double velocity,
String trend,
Instant lastSeen) {}

View File

@@ -0,0 +1,28 @@
package com.cameleer.server.core.security;
/**
* Service for Ed25519 digital signatures.
* <p>
* Used to sign configuration and command payloads pushed to agents via SSE,
* allowing agents to verify the authenticity of received data.
* The keypair is ephemeral (generated at startup); agents receive the public
* key during registration.
*/
public interface Ed25519SigningService {
/**
* Signs the given payload using the server's Ed25519 private key.
*
* @param payload the string payload to sign
* @return Base64-encoded signature bytes
*/
String sign(String payload);
/**
* Returns the server's Ed25519 public key as a Base64-encoded string
* (X.509 SubjectPublicKeyInfo DER format).
*
* @return Base64-encoded public key
*/
String getPublicKeyBase64();
}

View File

@@ -0,0 +1,15 @@
package com.cameleer.server.core.security;
/**
* Thrown when a JWT token is invalid, expired, or of the wrong type.
*/
public class InvalidTokenException extends RuntimeException {
public InvalidTokenException(String message) {
super(message);
}
public InvalidTokenException(String message, Throwable cause) {
super(message, cause);
}
}

View File

@@ -0,0 +1,76 @@
package com.cameleer.server.core.security;
import java.time.Instant;
import java.util.List;
/**
* Service for creating and validating JSON Web Tokens (JWT).
* <p>
* Access tokens are short-lived (default 1 hour) and used for API authentication.
* Refresh tokens are longer-lived (default 7 days) and used to obtain new access tokens.
* Tokens carry a {@code roles} claim for role-based access control.
*/
public interface JwtService {
/**
* Validated JWT payload.
*
* @param subject the {@code sub} claim (agent ID or {@code user:<username>})
* @param application the {@code group} claim (application name)
* @param environment the {@code env} claim
* @param roles the {@code roles} claim (e.g. {@code ["AGENT"]}, {@code ["ADMIN"]})
* @param issuedAt the {@code iat} claim (token issue time)
*/
record JwtValidationResult(String subject, String application, String environment, List<String> roles, Instant issuedAt) {
/** Backwards-compatible constructor (issuedAt defaults to null). */
public JwtValidationResult(String subject, String application, String environment, List<String> roles) {
this(subject, application, environment, roles, null);
}
}
/**
* Creates a signed access JWT with the given subject, application, and roles.
*/
String createAccessToken(String subject, String application, String environment, List<String> roles);
/**
* Creates a signed refresh JWT with the given subject, application, and roles.
*/
String createRefreshToken(String subject, String application, String environment, List<String> roles);
/**
* Validates an access token and returns the full validation result.
*
* @throws InvalidTokenException if the token is invalid, expired, or not an access token
*/
JwtValidationResult validateAccessToken(String token);
/**
* Validates a refresh token and returns the full validation result.
*
* @throws InvalidTokenException if the token is invalid, expired, or not a refresh token
*/
JwtValidationResult validateRefreshToken(String token);
// --- Backward-compatible defaults (delegate to role-aware methods) ---
default String createAccessToken(String subject, String application, List<String> roles) {
return createAccessToken(subject, application, "default", roles);
}
default String createAccessToken(String subject, String application) {
return createAccessToken(subject, application, "default", List.of());
}
default String createRefreshToken(String subject, String application, List<String> roles) {
return createRefreshToken(subject, application, "default", roles);
}
default String createRefreshToken(String subject, String application) {
return createRefreshToken(subject, application, "default", List.of());
}
default String validateAndExtractAgentId(String token) {
return validateAccessToken(token).subject();
}
}

View File

@@ -0,0 +1,36 @@
package com.cameleer.server.core.security;
import java.util.List;
/**
* Persisted OIDC provider configuration.
*
* @param enabled whether OIDC login is active
* @param issuerUri OIDC discovery issuer URL
* @param clientId OAuth2 client ID
* @param clientSecret OAuth2 client secret (stored server-side only)
* @param rolesClaim dot-separated path to roles in the id_token (e.g. {@code realm_access.roles})
* @param defaultRoles fallback roles for new users with no OIDC role claim
* @param autoSignup whether new OIDC users are automatically created on first login
* @param displayNameClaim dot-separated path to display name in the id_token (e.g. {@code name}, {@code preferred_username})
* @param userIdClaim dot-separated path to user identifier in the id_token (default {@code sub}); e.g. {@code email}, {@code preferred_username}
* @param audience RFC 8707 resource indicator — sent to SPA as {@code resource} param and used for access_token {@code aud} validation
* @param additionalScopes extra scopes the SPA should request beyond {@code openid email profile}
*/
public record OidcConfig(
boolean enabled,
String issuerUri,
String clientId,
String clientSecret,
String rolesClaim,
List<String> defaultRoles,
boolean autoSignup,
String displayNameClaim,
String userIdClaim,
String audience,
List<String> additionalScopes
) {
public static OidcConfig disabled() {
return new OidcConfig(false, "", "", "", "roles", List.of("VIEWER"), true, "name", "sub", "", List.of());
}
}

View File

@@ -0,0 +1,16 @@
package com.cameleer.server.core.security;
import java.util.Optional;
/**
* Persistence interface for OIDC provider configuration.
* Only one configuration is active at a time (singleton row).
*/
public interface OidcConfigRepository {
Optional<OidcConfig> find();
void save(OidcConfig config);
void delete();
}

Some files were not shown because too many files have changed in this diff Show More