diff --git a/internal/api/admin_handlers.go b/internal/api/admin_handlers.go index b4eb460..a4f4f67 100644 --- a/internal/api/admin_handlers.go +++ b/internal/api/admin_handlers.go @@ -8,6 +8,8 @@ import ( "strconv" "strings" "time" + + "github.com/RandomCodeSpace/otelcontext/internal/httpconst" ) // handleGetStats handles GET /api/stats @@ -18,7 +20,7 @@ func (s *Server) handleGetStats(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusInternalServerError) return } - w.Header().Set("Content-Type", "application/json") + w.Header().Set(httpconst.HeaderContentType, httpconst.ContentTypeJSON) _ = json.NewEncoder(w).Encode(stats) } @@ -50,7 +52,7 @@ func (s *Server) handlePurge(w http.ResponseWriter, r *http.Request) { slog.Info("Admin purge completed", "days", days, "logs_purged", logsDeleted, "traces_purged", tracesDeleted) - w.Header().Set("Content-Type", "application/json") + w.Header().Set(httpconst.HeaderContentType, httpconst.ContentTypeJSON) _ = json.NewEncoder(w).Encode(map[string]any{ "logs_purged": logsDeleted, "traces_purged": tracesDeleted, @@ -65,7 +67,7 @@ func (s *Server) handleVacuum(w http.ResponseWriter, _ *http.Request) { http.Error(w, err.Error(), http.StatusInternalServerError) return } - w.Header().Set("Content-Type", "application/json") + w.Header().Set(httpconst.HeaderContentType, httpconst.ContentTypeJSON) _ = json.NewEncoder(w).Encode(map[string]string{"status": "vacuumed"}) } @@ -110,7 +112,7 @@ func (s *Server) handleDropFTS(w http.ResponseWriter, r *http.Request) { } slog.Info("drop_fts completed", "elapsed_ms", elapsed.Milliseconds(), "reclaimed_bytes", reclaimed) - w.Header().Set("Content-Type", "application/json") + w.Header().Set(httpconst.HeaderContentType, httpconst.ContentTypeJSON) _ = json.NewEncoder(w).Encode(map[string]any{ "reclaimed_bytes": reclaimed, "elapsed_ms": elapsed.Milliseconds(), diff --git a/internal/api/log_handlers.go b/internal/api/log_handlers.go index 55ba4c9..2cbc9d5 100644 --- a/internal/api/log_handlers.go +++ b/internal/api/log_handlers.go @@ -8,6 +8,7 @@ import ( "time" "github.com/RandomCodeSpace/otelcontext/internal/api/views" + "github.com/RandomCodeSpace/otelcontext/internal/httpconst" "github.com/RandomCodeSpace/otelcontext/internal/realtime" "github.com/RandomCodeSpace/otelcontext/internal/storage" ) @@ -67,7 +68,7 @@ func (s *Server) handleGetLogs(w http.ResponseWriter, r *http.Request) { return } - w.Header().Set("Content-Type", "application/json") + w.Header().Set(httpconst.HeaderContentType, httpconst.ContentTypeJSON) _ = json.NewEncoder(w).Encode(map[string]any{ "data": views.LogsFromModels(logs), "total": total, @@ -96,7 +97,7 @@ func (s *Server) handleGetLogContext(w http.ResponseWriter, r *http.Request) { return } - w.Header().Set("Content-Type", "application/json") + w.Header().Set(httpconst.HeaderContentType, httpconst.ContentTypeJSON) _ = json.NewEncoder(w).Encode(views.LogsFromModels(logs)) } @@ -120,7 +121,7 @@ func (s *Server) handleGetLogInsight(w http.ResponseWriter, r *http.Request) { return } - w.Header().Set("Content-Type", "application/json") + w.Header().Set(httpconst.HeaderContentType, httpconst.ContentTypeJSON) _ = json.NewEncoder(w).Encode(map[string]string{"insight": string(l.AIInsight)}) } diff --git a/internal/api/metrics_handlers.go b/internal/api/metrics_handlers.go index 371f76e..6b38ab6 100644 --- a/internal/api/metrics_handlers.go +++ b/internal/api/metrics_handlers.go @@ -7,6 +7,7 @@ import ( "time" "github.com/RandomCodeSpace/otelcontext/internal/api/views" + "github.com/RandomCodeSpace/otelcontext/internal/httpconst" ) // handleGetTrafficMetrics handles GET /api/metrics/traffic @@ -35,7 +36,7 @@ func (s *Server) handleGetTrafficMetrics(w http.ResponseWriter, r *http.Request) return } - w.Header().Set("Content-Type", "application/json") + w.Header().Set(httpconst.HeaderContentType, httpconst.ContentTypeJSON) _ = json.NewEncoder(w).Encode(points) } @@ -64,7 +65,7 @@ func (s *Server) handleGetLatencyHeatmap(w http.ResponseWriter, r *http.Request) return } - w.Header().Set("Content-Type", "application/json") + w.Header().Set(httpconst.HeaderContentType, httpconst.ContentTypeJSON) _ = json.NewEncoder(w).Encode(points) } @@ -94,7 +95,7 @@ func (s *Server) handleGetDashboardStats(w http.ResponseWriter, r *http.Request) return } - w.Header().Set("Content-Type", "application/json") + w.Header().Set(httpconst.HeaderContentType, httpconst.ContentTypeJSON) _ = json.NewEncoder(w).Encode(views.DashboardStatsFromModel(stats)) } @@ -121,7 +122,7 @@ func (s *Server) handleGetServiceMapMetrics(w http.ResponseWriter, r *http.Reque return } - w.Header().Set("Content-Type", "application/json") + w.Header().Set(httpconst.HeaderContentType, httpconst.ContentTypeJSON) _ = json.NewEncoder(w).Encode(views.ServiceMapMetricsFromModel(metrics)) } @@ -149,7 +150,7 @@ func (s *Server) handleGetMetricBuckets(w http.ResponseWriter, r *http.Request) return } - w.Header().Set("Content-Type", "application/json") + w.Header().Set(httpconst.HeaderContentType, httpconst.ContentTypeJSON) _ = json.NewEncoder(w).Encode(views.MetricBucketsFromModels(buckets)) } @@ -164,7 +165,7 @@ func (s *Server) handleGetMetricNames(w http.ResponseWriter, r *http.Request) { return } - w.Header().Set("Content-Type", "application/json") + w.Header().Set(httpconst.HeaderContentType, httpconst.ContentTypeJSON) _ = json.NewEncoder(w).Encode(names) } @@ -186,6 +187,6 @@ func (s *Server) handleGetServices(w http.ResponseWriter, r *http.Request) { if services == nil { services = []string{} } - w.Header().Set("Content-Type", "application/json") + w.Header().Set(httpconst.HeaderContentType, httpconst.ContentTypeJSON) _ = json.NewEncoder(w).Encode(services) } diff --git a/internal/graphrag/drain_test.go b/internal/graphrag/drain_test.go index 4482406..bd30a35 100644 --- a/internal/graphrag/drain_test.go +++ b/internal/graphrag/drain_test.go @@ -147,7 +147,7 @@ func TestConcurrentMatchRaceFree(t *testing.T) { defer wg.Done() for i := 0; i < per; i++ { line := fmt.Sprintf("worker %d processed request id=%d from 10.0.0.%d", wid, i, i%250) - if tpl := d.Match(line, ts); tpl == nil { + if d.Match(line, ts) == nil { t.Errorf("nil template") return } diff --git a/internal/httpconst/httpconst.go b/internal/httpconst/httpconst.go new file mode 100644 index 0000000..9d28c1f --- /dev/null +++ b/internal/httpconst/httpconst.go @@ -0,0 +1,13 @@ +// Package httpconst centralises HTTP header names and content-type strings +// shared by the API, MCP, and OTLP-HTTP handlers so the same literal isn't +// duplicated across packages. +package httpconst + +const ( + // HeaderContentType is the canonical HTTP Content-Type header name. + HeaderContentType = "Content-Type" + + // ContentTypeJSON is the application/json content type used by every JSON + // response on the API and MCP surface. + ContentTypeJSON = "application/json" +) diff --git a/internal/ingest/otlp_http.go b/internal/ingest/otlp_http.go index 7d7008f..d9c6fce 100644 --- a/internal/ingest/otlp_http.go +++ b/internal/ingest/otlp_http.go @@ -28,6 +28,11 @@ import ( // downstream a chance to drain. const defaultRetryAfterSeconds = 1 +// headerContentType is the canonical HTTP Content-Type header name. Local +// const so the OTLP-HTTP receiver compiles without pulling in a shared +// helper for a one-line literal. +const headerContentType = "Content-Type" //nolint:goconst // single literal; Sonar S1192 satisfied via const + // withTenantFromHTTP attaches a tenant ID from the X-Tenant-ID header (if any) // to the request context before delegating to the gRPC Export methods. // Uses the shared storage.WithTenantContext helper so ingest and read paths @@ -269,7 +274,7 @@ func (h *HTTPHandler) readBody(r *http.Request) ([]byte, error) { // unmarshal decodes the body based on Content-Type header. func (h *HTTPHandler) unmarshal(r *http.Request, body []byte, msg proto.Message) error { - ct := r.Header.Get("Content-Type") + ct := r.Header.Get(headerContentType) switch ct { case contentTypeProtobuf, "": if err := proto.Unmarshal(body, msg); err != nil { @@ -287,9 +292,9 @@ func (h *HTTPHandler) unmarshal(r *http.Request, body []byte, msg proto.Message) // writeResponse marshals and writes the OTLP response. func (h *HTTPHandler) writeResponse(w http.ResponseWriter, r *http.Request, msg proto.Message) { - ct := r.Header.Get("Content-Type") + ct := r.Header.Get(headerContentType) if ct == contentTypeJSON { - w.Header().Set("Content-Type", contentTypeJSON) + w.Header().Set(headerContentType, contentTypeJSON) data, err := protojson.Marshal(msg) if err != nil { writeOTLPError(w, http.StatusInternalServerError, "failed to marshal response") @@ -298,7 +303,7 @@ func (h *HTTPHandler) writeResponse(w http.ResponseWriter, r *http.Request, msg w.WriteHeader(http.StatusOK) _, _ = w.Write(data) } else { - w.Header().Set("Content-Type", contentTypeProtobuf) + w.Header().Set(headerContentType, contentTypeProtobuf) data, err := proto.Marshal(msg) if err != nil { writeOTLPError(w, http.StatusInternalServerError, "failed to marshal response") @@ -321,7 +326,7 @@ func writeOTLPError(w http.ResponseWriter, statusCode int, msg string) { http.Error(w, msg, statusCode) return } - w.Header().Set("Content-Type", contentTypeProtobuf) + w.Header().Set(headerContentType, contentTypeProtobuf) w.WriteHeader(statusCode) _, _ = w.Write(data) } diff --git a/internal/ingest/otlp_http_backpressure_test.go b/internal/ingest/otlp_http_backpressure_test.go index 3f509b0..30dddcb 100644 --- a/internal/ingest/otlp_http_backpressure_test.go +++ b/internal/ingest/otlp_http_backpressure_test.go @@ -130,7 +130,7 @@ func TestHTTPBackpressure_TracesReturns429WithRetryAfter(t *testing.T) { if rec.Code != http.StatusTooManyRequests { t.Fatalf("want 429, got %d (body=%q)", rec.Code, rec.Body.String()) } - if got := rec.Header().Get("Retry-After"); got == "" { + if rec.Header().Get("Retry-After") == "" { t.Fatal("Retry-After header missing on 429 response") } if ct := rec.Header().Get("Content-Type"); ct != contentTypeProtobuf { diff --git a/internal/mcp/server.go b/internal/mcp/server.go index 098271c..b1feb68 100644 --- a/internal/mcp/server.go +++ b/internal/mcp/server.go @@ -14,6 +14,7 @@ import ( "github.com/RandomCodeSpace/central-ops/pkg/httputil" "github.com/RandomCodeSpace/otelcontext/internal/graph" "github.com/RandomCodeSpace/otelcontext/internal/graphrag" + "github.com/RandomCodeSpace/otelcontext/internal/httpconst" "github.com/RandomCodeSpace/otelcontext/internal/storage" "github.com/RandomCodeSpace/otelcontext/internal/telemetry" "github.com/RandomCodeSpace/otelcontext/internal/vectordb" @@ -209,7 +210,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { // handleRPC processes JSON-RPC 2.0 requests. func (s *Server) handleRPC(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") + w.Header().Set(httpconst.HeaderContentType, httpconst.ContentTypeJSON) body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20)) // 1 MB cap if err != nil { @@ -321,7 +322,7 @@ func (s *Server) handleRPC(w http.ResponseWriter, r *http.Request) { case "resources/list": result = map[string]any{ "resources": []map[string]any{ - {"uri": "OtelContext://system/graph", "name": "System Graph", "mimeType": "application/json"}, + {"uri": "OtelContext://system/graph", "name": "System Graph", "mimeType": httpconst.ContentTypeJSON}, {"uri": "OtelContext://metrics/prometheus", "name": "Prometheus Metrics", "mimeType": "text/plain"}, }, } @@ -406,7 +407,7 @@ func writeSSE(w http.ResponseWriter, f http.Flusher, event, data string) { // writeError writes a JSON-RPC error response. func writeError(w http.ResponseWriter, id any, code int, msg string) { - w.Header().Set("Content-Type", "application/json") + w.Header().Set(httpconst.HeaderContentType, httpconst.ContentTypeJSON) resp := JSONRPCResponse{ JSONRPC: "2.0", ID: id, diff --git a/internal/mcp/tools.go b/internal/mcp/tools.go index 6385566..630b1a8 100644 --- a/internal/mcp/tools.go +++ b/internal/mcp/tools.go @@ -7,9 +7,17 @@ import ( "time" "github.com/RandomCodeSpace/otelcontext/internal/graphrag" + "github.com/RandomCodeSpace/otelcontext/internal/httpconst" "github.com/RandomCodeSpace/otelcontext/internal/storage" ) +const ( + errSvcGraphNotInit = "service graph not yet initialized" + errGraphRAGNotInit = "GraphRAG not initialized" + errServiceRequired = "service is required" + resourceURIPrefix = "OtelContext://" +) + // toolDefs is the canonical list of all tools exposed by the OtelContext MCP server. var toolDefs = []Tool{ { @@ -357,7 +365,7 @@ func (s *Server) toolGetSystemGraph(ctx context.Context, _ map[string]any) ToolC return textResult(string(data)) } if s.svcGraph == nil { - return errorResult("service graph not yet initialized") + return errorResult(errSvcGraphNotInit) } snap := s.svcGraph.Snapshot() data, err := json.MarshalIndent(snap, "", " ") @@ -388,7 +396,7 @@ func (s *Server) toolGetServiceHealth(ctx context.Context, args map[string]any) return textResult(fmt.Sprintf("service %q not found in the current tenant window", svcName)) } if s.svcGraph == nil { - return errorResult("service graph not yet initialized") + return errorResult(errSvcGraphNotInit) } snap := s.svcGraph.Snapshot() node, ok := snap.Nodes[svcName] @@ -485,7 +493,7 @@ func (s *Server) toolSearchLogs(ctx context.Context, args map[string]any) ToolCa if err != nil { return errorResult(fmt.Sprintf("failed to marshal search results: %v", err)) } - return resourceResult("OtelContext://logs/search", "application/json", string(data)) + return resourceResult(resourceURIPrefix+"logs/search", httpconst.ContentTypeJSON, string(data)) } func (s *Server) toolTailLogs(ctx context.Context, args map[string]any) ToolCallResult { @@ -513,7 +521,7 @@ func (s *Server) toolTailLogs(ctx context.Context, args map[string]any) ToolCall if err != nil { return errorResult(fmt.Sprintf("failed to marshal tail results: %v", err)) } - return resourceResult("OtelContext://logs/tail", "application/json", string(data)) + return resourceResult(resourceURIPrefix+"logs/tail", httpconst.ContentTypeJSON, string(data)) } func (s *Server) toolGetTrace(ctx context.Context, args map[string]any) ToolCallResult { @@ -529,7 +537,7 @@ func (s *Server) toolGetTrace(ctx context.Context, args map[string]any) ToolCall if err != nil { return errorResult(fmt.Sprintf("failed to marshal trace: %v", err)) } - return resourceResult("OtelContext://traces/"+traceID, "application/json", string(data)) + return resourceResult(resourceURIPrefix+"traces/"+traceID, httpconst.ContentTypeJSON, string(data)) } func (s *Server) toolSearchTraces(ctx context.Context, args map[string]any) ToolCallResult { @@ -560,7 +568,7 @@ func (s *Server) toolSearchTraces(ctx context.Context, args map[string]any) Tool if err != nil { return errorResult(fmt.Sprintf("failed to marshal trace search results: %v", err)) } - return resourceResult("OtelContext://traces/search", "application/json", string(data)) + return resourceResult(resourceURIPrefix+"traces/search", httpconst.ContentTypeJSON, string(data)) } func (s *Server) toolGetMetrics(ctx context.Context, args map[string]any) ToolCallResult { @@ -580,7 +588,7 @@ func (s *Server) toolGetMetrics(ctx context.Context, args map[string]any) ToolCa if err != nil { return errorResult(fmt.Sprintf("failed to marshal metrics: %v", err)) } - return resourceResult("OtelContext://metrics/query", "application/json", string(data)) + return resourceResult(resourceURIPrefix+"metrics/query", httpconst.ContentTypeJSON, string(data)) } func (s *Server) toolGetDashboardStats(ctx context.Context, args map[string]any) ToolCallResult { @@ -645,7 +653,7 @@ func (s *Server) toolFindSimilarLogs(ctx context.Context, args map[string]any) T func (s *Server) toolGetAlerts() ToolCallResult { if s.svcGraph == nil { - return errorResult("service graph not yet initialized") + return errorResult(errSvcGraphNotInit) } snap := s.svcGraph.Snapshot() type alertEntry struct { @@ -679,7 +687,7 @@ func (s *Server) toolGetAlerts() ToolCallResult { func (s *Server) toolGetServiceMap(ctx context.Context, args map[string]any) ToolCallResult { if s.graphRAG == nil { - return errorResult("GraphRAG not initialized") + return errorResult(errGraphRAGNotInit) } depth := argInt(args, "depth", 3) result := s.graphRAG.ServiceMap(mcpCtx(ctx), depth) @@ -692,11 +700,11 @@ func (s *Server) toolGetServiceMap(ctx context.Context, args map[string]any) Too func (s *Server) toolGetErrorChains(ctx context.Context, args map[string]any) ToolCallResult { if s.graphRAG == nil { - return errorResult("GraphRAG not initialized") + return errorResult(errGraphRAGNotInit) } svcName, _ := args["service"].(string) if svcName == "" { - return errorResult("service is required") + return errorResult(errServiceRequired) } since := time.Now().Add(-15 * time.Minute) parseTimeRange(args, "time_range", &since) @@ -712,7 +720,7 @@ func (s *Server) toolGetErrorChains(ctx context.Context, args map[string]any) To func (s *Server) toolTraceGraph(ctx context.Context, args map[string]any) ToolCallResult { if s.graphRAG == nil { - return errorResult("GraphRAG not initialized") + return errorResult(errGraphRAGNotInit) } traceID, _ := args["trace_id"].(string) if traceID == "" { @@ -729,7 +737,7 @@ func (s *Server) toolTraceGraph(ctx context.Context, args map[string]any) ToolCa if err != nil { return errorResult(fmt.Sprintf("failed to marshal trace: %v", err)) } - return resourceResult("OtelContext://traces/"+traceID, "application/json", string(data)) + return resourceResult(resourceURIPrefix+"traces/"+traceID, httpconst.ContentTypeJSON, string(data)) } data, err := json.MarshalIndent(spans, "", " ") if err != nil { @@ -740,11 +748,11 @@ func (s *Server) toolTraceGraph(ctx context.Context, args map[string]any) ToolCa func (s *Server) toolImpactAnalysis(ctx context.Context, args map[string]any) ToolCallResult { if s.graphRAG == nil { - return errorResult("GraphRAG not initialized") + return errorResult(errGraphRAGNotInit) } svcName, _ := args["service"].(string) if svcName == "" { - return errorResult("service is required") + return errorResult(errServiceRequired) } depth := argInt(args, "depth", 5) result := s.graphRAG.ImpactAnalysis(mcpCtx(ctx), svcName, depth) @@ -757,11 +765,11 @@ func (s *Server) toolImpactAnalysis(ctx context.Context, args map[string]any) To func (s *Server) toolRootCauseAnalysis(ctx context.Context, args map[string]any) ToolCallResult { if s.graphRAG == nil { - return errorResult("GraphRAG not initialized") + return errorResult(errGraphRAGNotInit) } svcName, _ := args["service"].(string) if svcName == "" { - return errorResult("service is required") + return errorResult(errServiceRequired) } since := time.Now().Add(-15 * time.Minute) parseTimeRange(args, "time_range", &since) @@ -776,11 +784,11 @@ func (s *Server) toolRootCauseAnalysis(ctx context.Context, args map[string]any) func (s *Server) toolCorrelatedSignals(ctx context.Context, args map[string]any) ToolCallResult { if s.graphRAG == nil { - return errorResult("GraphRAG not initialized") + return errorResult(errGraphRAGNotInit) } svcName, _ := args["service"].(string) if svcName == "" { - return errorResult("service is required") + return errorResult(errServiceRequired) } since := time.Now().Add(-1 * time.Hour) parseTimeRange(args, "time_range", &since) @@ -795,7 +803,7 @@ func (s *Server) toolCorrelatedSignals(ctx context.Context, args map[string]any) func (s *Server) toolGetInvestigations(ctx context.Context, args map[string]any) ToolCallResult { if s.graphRAG == nil { - return errorResult("GraphRAG not initialized") + return errorResult(errGraphRAGNotInit) } service, _ := args["service"].(string) severity, _ := args["severity"].(string) @@ -815,7 +823,7 @@ func (s *Server) toolGetInvestigations(ctx context.Context, args map[string]any) func (s *Server) toolGetInvestigationByID(ctx context.Context, args map[string]any) ToolCallResult { if s.graphRAG == nil { - return errorResult("GraphRAG not initialized") + return errorResult(errGraphRAGNotInit) } id, _ := args["investigation_id"].(string) if id == "" { @@ -834,7 +842,7 @@ func (s *Server) toolGetInvestigationByID(ctx context.Context, args map[string]a func (s *Server) toolGetGraphSnapshot(ctx context.Context, args map[string]any) ToolCallResult { if s.graphRAG == nil { - return errorResult("GraphRAG not initialized") + return errorResult(errGraphRAGNotInit) } var at time.Time parseTime(args, "time", &at) @@ -854,7 +862,7 @@ func (s *Server) toolGetGraphSnapshot(ctx context.Context, args map[string]any) func (s *Server) toolGetAnomalyTimeline(ctx context.Context, args map[string]any) ToolCallResult { if s.graphRAG == nil { - return errorResult("GraphRAG not initialized") + return errorResult(errGraphRAGNotInit) } since := time.Now().Add(-1 * time.Hour) parseTime(args, "since", &since) diff --git a/internal/storage/log_repo.go b/internal/storage/log_repo.go index c72a21e..e26c2ad 100644 --- a/internal/storage/log_repo.go +++ b/internal/storage/log_repo.go @@ -44,7 +44,7 @@ func (r *Repository) BatchCreateLogs(logs []Log) error { func (r *Repository) GetLog(ctx context.Context, id uint) (*Log, error) { tenant := TenantFromContext(ctx) var l Log - if err := r.db.WithContext(ctx).Where("tenant_id = ?", tenant).First(&l, id).Error; err != nil { + if err := r.db.WithContext(ctx).Where(sqlWhereTenantID, tenant).First(&l, id).Error; err != nil { return nil, fmt.Errorf("failed to get log: %w", err) } return &l, nil @@ -54,7 +54,7 @@ func (r *Repository) GetLog(ctx context.Context, id uint) (*Log, error) { func (r *Repository) GetRecentLogs(ctx context.Context, limit int) ([]Log, error) { tenant := TenantFromContext(ctx) var logs []Log - if err := r.db.WithContext(ctx).Where("tenant_id = ?", tenant).Order("timestamp desc").Limit(limit).Find(&logs).Error; err != nil { + if err := r.db.WithContext(ctx).Where(sqlWhereTenantID, tenant).Order(sqlOrderTimestampDesc).Limit(limit).Find(&logs).Error; err != nil { return nil, fmt.Errorf("failed to get recent logs: %w", err) } return logs, nil @@ -81,34 +81,20 @@ func (r *Repository) GetLogsV2(ctx context.Context, filter LogFilter) ([]Log, in } } - base := r.db.WithContext(ctx).Model(&Log{}).Where("tenant_id = ?", tenant) + base := r.db.WithContext(ctx).Model(&Log{}).Where(sqlWhereTenantID, tenant) if useFTS5 { base = base.Joins("JOIN "+fts5LogsTable+" ON logs.id = "+fts5LogsTable+".rowid"). Where(fts5LogsTable+" MATCH ?", matchExpr) } - if filter.ServiceName != "" { - base = base.Where("service_name = ?", filter.ServiceName) - } - if filter.Severity != "" { - base = base.Where("severity = ?", filter.Severity) - } - if filter.TraceID != "" { - base = base.Where("trace_id = ?", filter.TraceID) - } - if !filter.StartTime.IsZero() { - base = base.Where("timestamp >= ?", filter.StartTime) - } - if !filter.EndTime.IsZero() { - base = base.Where("timestamp <= ?", filter.EndTime) - } + base = applyLogFilterCriteria(base, filter) if filter.Search != "" && !useFTS5 { search := "%" + filter.Search + "%" op := r.likeOp() base = base.Where(fmt.Sprintf("body %s ? OR trace_id %s ?", op, op), search, search) } - orderBy := "timestamp desc" + orderBy := sqlOrderTimestampDesc if useFTS5 { orderBy = "bm25(" + fts5LogsTable + ") ASC" } @@ -139,28 +125,36 @@ func (r *Repository) GetLogsV2(ctx context.Context, filter LogFilter) ([]Log, in return logs, total, nil } -// getLogsV2LikeFallback re-runs the query using LIKE against body/trace_id — -// used when the FTS5 path errors out so the API never serves a 500 because of -// an index-layer hiccup. -func (r *Repository) getLogsV2LikeFallback(ctx context.Context, filter LogFilter, tenant string) ([]Log, int64, error) { - var logs []Log - var total int64 - base := r.db.WithContext(ctx).Model(&Log{}).Where("tenant_id = ?", tenant) +// applyLogFilterCriteria appends the non-search WHERE clauses that are common +// to GetLogsV2 and its LIKE fallback. The Search clause is intentionally NOT +// applied here — the two callers handle it differently (FTS5 MATCH vs LIKE). +func applyLogFilterCriteria(base *gorm.DB, filter LogFilter) *gorm.DB { if filter.ServiceName != "" { base = base.Where("service_name = ?", filter.ServiceName) } if filter.Severity != "" { - base = base.Where("severity = ?", filter.Severity) + base = base.Where(sqlWhereSeverity, filter.Severity) } if filter.TraceID != "" { base = base.Where("trace_id = ?", filter.TraceID) } if !filter.StartTime.IsZero() { - base = base.Where("timestamp >= ?", filter.StartTime) + base = base.Where(sqlWhereTimestampGTE, filter.StartTime) } if !filter.EndTime.IsZero() { - base = base.Where("timestamp <= ?", filter.EndTime) + base = base.Where(sqlWhereTimestampLTE, filter.EndTime) } + return base +} + +// getLogsV2LikeFallback re-runs the query using LIKE against body/trace_id — +// used when the FTS5 path errors out so the API never serves a 500 because of +// an index-layer hiccup. +func (r *Repository) getLogsV2LikeFallback(ctx context.Context, filter LogFilter, tenant string) ([]Log, int64, error) { + var logs []Log + var total int64 + base := r.db.WithContext(ctx).Model(&Log{}).Where(sqlWhereTenantID, tenant) + base = applyLogFilterCriteria(base, filter) if filter.Search != "" { search := "%" + filter.Search + "%" op := r.likeOp() @@ -170,7 +164,7 @@ func (r *Repository) getLogsV2LikeFallback(ctx context.Context, filter LogFilter g.Go(func() error { return base.Session(&gorm.Session{}).Count(&total).Error }) g.Go(func() error { return base.Session(&gorm.Session{}). - Order("timestamp desc").Limit(filter.Limit).Offset(filter.Offset).Find(&logs).Error + Order(sqlOrderTimestampDesc).Limit(filter.Limit).Offset(filter.Offset).Find(&logs).Error }) if err := g.Wait(); err != nil { return nil, 0, fmt.Errorf("failed to fetch logs (fallback): %w", err) @@ -252,16 +246,16 @@ func (r *Repository) ListRecentHighSeverityLogsAllTenants(ctx context.Context, s } q := r.db.WithContext(ctx).Model(&Log{}) if severity != "" { - q = q.Where("severity = ?", severity) + q = q.Where(sqlWhereSeverity, severity) } if !since.IsZero() { - q = q.Where("timestamp >= ?", since) + q = q.Where(sqlWhereTimestampGTE, since) } if !until.IsZero() { - q = q.Where("timestamp <= ?", until) + q = q.Where(sqlWhereTimestampLTE, until) } var logs []Log - if err := q.Order("timestamp desc").Limit(limit).Find(&logs).Error; err != nil { + if err := q.Order(sqlOrderTimestampDesc).Limit(limit).Find(&logs).Error; err != nil { return nil, fmt.Errorf("failed to list recent logs all tenants: %w", err) } return logs, nil diff --git a/internal/storage/metrics_repo.go b/internal/storage/metrics_repo.go index 1d2a756..fd8e2e9 100644 --- a/internal/storage/metrics_repo.go +++ b/internal/storage/metrics_repo.go @@ -182,9 +182,9 @@ func (r *Repository) GetDashboardStats(ctx context.Context, start, end time.Time tenant := TenantFromContext(ctx) var stats DashboardStats - baseQuery := r.db.WithContext(ctx).Model(&Trace{}).Where("tenant_id = ? AND timestamp BETWEEN ? AND ?", tenant, start, end) + baseQuery := r.db.WithContext(ctx).Model(&Trace{}).Where(sqlWhereTenantTimeBetween, tenant, start, end) if len(serviceNames) > 0 { - baseQuery = baseQuery.Where("service_name IN ?", serviceNames) + baseQuery = baseQuery.Where(sqlWhereServiceIn, serviceNames) } // 1. Total Traces @@ -193,9 +193,9 @@ func (r *Repository) GetDashboardStats(ctx context.Context, start, end time.Time } // 2. Total Logs - logQuery := r.db.WithContext(ctx).Model(&Log{}).Where("tenant_id = ? AND timestamp BETWEEN ? AND ?", tenant, start, end) + logQuery := r.db.WithContext(ctx).Model(&Log{}).Where(sqlWhereTenantTimeBetween, tenant, start, end) if len(serviceNames) > 0 { - logQuery = logQuery.Where("service_name IN ?", serviceNames) + logQuery = logQuery.Where(sqlWhereServiceIn, serviceNames) } if err := logQuery.Count(&stats.TotalLogs).Error; err != nil { return nil, fmt.Errorf("failed to count logs: %w", err) @@ -290,7 +290,7 @@ func (r *Repository) GetTrafficMetrics(ctx context.Context, start, end time.Time Where("tenant_id = ? AND timestamp BETWEEN ? AND ?", tenant, start, end) if len(serviceNames) > 0 { - query = query.Where("service_name IN ?", serviceNames) + query = query.Where(sqlWhereServiceIn, serviceNames) } if err := query.Find(&rows).Error; err != nil { @@ -340,7 +340,7 @@ func (r *Repository) GetLatencyHeatmap(ctx context.Context, start, end time.Time Where("tenant_id = ? AND timestamp BETWEEN ? AND ?", tenant, start, end) if len(serviceNames) > 0 { - query = query.Where("service_name IN ?", serviceNames) + query = query.Where(sqlWhereServiceIn, serviceNames) } if err := query.Order("timestamp DESC").Limit(2000).Find(&points).Error; err != nil { diff --git a/internal/storage/partitions.go b/internal/storage/partitions.go index 112e48b..47bb1fa 100644 --- a/internal/storage/partitions.go +++ b/internal/storage/partitions.go @@ -158,8 +158,8 @@ func EnsureLogsPartitionForDay(db *gorm.DB, day time.Time) error { ddl := fmt.Sprintf( `CREATE TABLE IF NOT EXISTS %s PARTITION OF logs FOR VALUES FROM ('%s') TO ('%s')`, quoteIdent(name), - d.Format("2006-01-02 15:04:05+00"), - upper.Format("2006-01-02 15:04:05+00"), + d.Format(timeFormatPGUTC), + upper.Format(timeFormatPGUTC), ) if err := db.Exec(ddl).Error; err != nil { return fmt.Errorf("create partition %s: %w", name, err) @@ -283,7 +283,7 @@ func parsePartitionUpper(boundExpr string) (time.Time, bool) { // parse it as RFC3339-ish. Try a few layouts. layouts := []string{ "2006-01-02 15:04:05-07", - "2006-01-02 15:04:05+00", + timeFormatPGUTC, "2006-01-02 15:04:05Z07:00", "2006-01-02 15:04:05", } diff --git a/internal/storage/pg_partitions_test.go b/internal/storage/pg_partitions_test.go index f92d0c9..618f1de 100644 --- a/internal/storage/pg_partitions_test.go +++ b/internal/storage/pg_partitions_test.go @@ -241,8 +241,9 @@ func TestPGPartition_SchedulerDropsExpiredAndCreatesLookahead(t *testing.T) { sched.SetMetrics(func(n int) { dropped += n }, func(n int) { active = n }) ctx, cancel := context.WithCancel(context.Background()) + defer cancel() sched.Start(ctx) - defer func() { cancel(); sched.Stop() }() + defer sched.Stop() if dropped < 1 { t.Fatalf("scheduler initial pass should have dropped >=1 expired partition; got %d", dropped) diff --git a/internal/storage/repository.go b/internal/storage/repository.go index b76552c..bd886b6 100644 --- a/internal/storage/repository.go +++ b/internal/storage/repository.go @@ -154,19 +154,19 @@ func NewRepository(metrics *telemetry.Metrics) (*Repository, error) { // Register GORM Callback for DB Latency Metrics if metrics != nil { _ = db.Callback().Query().Before("gorm:query").Register("telemetry:before_query", func(d *gorm.DB) { - d.Set("telemetry:start_time", time.Now()) + d.Set(cacheKeyTelemetryStart, time.Now()) }) _ = db.Callback().Query().After("gorm:query").Register("telemetry:after_query", func(d *gorm.DB) { - if start, ok := d.Get("telemetry:start_time"); ok { + if start, ok := d.Get(cacheKeyTelemetryStart); ok { duration := time.Since(start.(time.Time)).Seconds() metrics.ObserveDBLatency(duration) } }) _ = db.Callback().Create().Before("gorm:create").Register("telemetry:before_create", func(d *gorm.DB) { - d.Set("telemetry:start_time", time.Now()) + d.Set(cacheKeyTelemetryStart, time.Now()) }) _ = db.Callback().Create().After("gorm:create").Register("telemetry:after_create", func(d *gorm.DB) { - if start, ok := d.Get("telemetry:start_time"); ok { + if start, ok := d.Get(cacheKeyTelemetryStart); ok { duration := time.Since(start.(time.Time)).Seconds() metrics.ObserveDBLatency(duration) } @@ -200,11 +200,11 @@ func (r *Repository) GetStats(ctx context.Context) (map[string]any, error) { var logCount int64 var errorCount int64 - if err := db.Model(&Trace{}).Where("tenant_id = ?", tenant).Count(&traceCount).Error; err != nil { + if err := db.Model(&Trace{}).Where(sqlWhereTenantID, tenant).Count(&traceCount).Error; err != nil { return nil, fmt.Errorf("failed to count traces: %w", err) } - if err := db.Model(&Log{}).Where("tenant_id = ?", tenant).Count(&logCount).Error; err != nil { + if err := db.Model(&Log{}).Where(sqlWhereTenantID, tenant).Count(&logCount).Error; err != nil { return nil, fmt.Errorf("failed to count logs: %w", err) } @@ -214,9 +214,9 @@ func (r *Repository) GetStats(ctx context.Context) (map[string]any, error) { // Count distinct services across both logs and traces (tenant-scoped). var serviceNames []string - db.Model(&Log{}).Where("tenant_id = ?", tenant).Distinct("service_name").Pluck("service_name", &serviceNames) + db.Model(&Log{}).Where(sqlWhereTenantID, tenant).Distinct("service_name").Pluck("service_name", &serviceNames) traceServices := []string{} - db.Model(&Trace{}).Where("tenant_id = ?", tenant).Distinct("service_name").Pluck("service_name", &traceServices) + db.Model(&Trace{}).Where(sqlWhereTenantID, tenant).Distinct("service_name").Pluck("service_name", &traceServices) serviceSet := make(map[string]struct{}, len(serviceNames)+len(traceServices)) for _, s := range serviceNames { if s != "" { @@ -290,7 +290,7 @@ func NewRepositoryFromDB(db *gorm.DB, driver string) *Repository { func (r *Repository) RecentTraces(ctx context.Context, limit int) ([]Trace, error) { tenant := TenantFromContext(ctx) var traces []Trace - if err := r.db.WithContext(ctx).Where("tenant_id = ?", tenant).Order("timestamp desc").Limit(limit).Find(&traces).Error; err != nil { + if err := r.db.WithContext(ctx).Where(sqlWhereTenantID, tenant).Order(sqlOrderTimestampDesc).Limit(limit).Find(&traces).Error; err != nil { return nil, err } return traces, nil @@ -300,7 +300,7 @@ func (r *Repository) RecentTraces(ctx context.Context, limit int) ([]Trace, erro func (r *Repository) RecentLogs(ctx context.Context, limit int) ([]Log, error) { tenant := TenantFromContext(ctx) var logs []Log - if err := r.db.WithContext(ctx).Where("tenant_id = ?", tenant).Order("timestamp desc").Limit(limit).Find(&logs).Error; err != nil { + if err := r.db.WithContext(ctx).Where(sqlWhereTenantID, tenant).Order(sqlOrderTimestampDesc).Limit(limit).Find(&logs).Error; err != nil { return nil, err } return logs, nil @@ -318,7 +318,7 @@ func (r *Repository) SearchLogs(ctx context.Context, query string, limit int) ([ return r.searchLogsFTS5(ctx, tenant, query, limit) } var logs []Log - db := r.db.WithContext(ctx).Where("tenant_id = ?", tenant).Order("timestamp desc").Limit(limit) + db := r.db.WithContext(ctx).Where(sqlWhereTenantID, tenant).Order(sqlOrderTimestampDesc).Limit(limit) if query != "" { op := r.likeOp() db = db.Where(fmt.Sprintf("body %s ? OR service_name %s ?", op, op), "%"+query+"%", "%"+query+"%") @@ -337,7 +337,7 @@ func (r *Repository) searchLogsFTS5(ctx context.Context, tenant, query string, l matchExpr := fts5MatchExpr(query) if matchExpr == "" { var logs []Log - err := r.db.WithContext(ctx).Where("tenant_id = ?", tenant).Order("timestamp desc").Limit(limit).Find(&logs).Error + err := r.db.WithContext(ctx).Where(sqlWhereTenantID, tenant).Order(sqlOrderTimestampDesc).Limit(limit).Find(&logs).Error return logs, err } var logs []Log @@ -368,7 +368,7 @@ func (r *Repository) searchLogsLikeFallback(ctx context.Context, tenant, query s err := r.db.WithContext(ctx). Where("tenant_id = ?", tenant). Where(fmt.Sprintf("body %s ? OR service_name %s ?", op, op), "%"+query+"%", "%"+query+"%"). - Order("timestamp desc"). + Order(sqlOrderTimestampDesc). Limit(limit). Find(&logs).Error return logs, err diff --git a/internal/storage/sql_consts.go b/internal/storage/sql_consts.go new file mode 100644 index 0000000..c2b3b69 --- /dev/null +++ b/internal/storage/sql_consts.go @@ -0,0 +1,25 @@ +package storage + +// Shared SQL fragments used by repository methods across log_repo, trace_repo, +// metrics_repo, repository.go and partitions.go. Centralised here so the same +// fragment isn't duplicated across files (Sonar S1192). +// +// These are not exported — they're internal building blocks for GORM `.Where()` +// calls and `.Order()` clauses, not part of any public contract. +const ( + sqlWhereTenantID = "tenant_id = ?" + sqlWhereSeverity = "severity = ?" + sqlWhereTimestampGTE = "timestamp >= ?" + sqlWhereTimestampLTE = "timestamp <= ?" + sqlOrderTimestampDesc = "timestamp desc" + sqlWhereTenantTimeBetween = "tenant_id = ? AND timestamp BETWEEN ? AND ?" + sqlWhereServiceIn = "service_name IN ?" + + // cacheKeyTelemetryStart is the in-memory cache key for the + // telemetry-start timestamp (used by the dashboard "uptime" tile). + cacheKeyTelemetryStart = "telemetry:start_time" + + // timeFormatPGUTC is the Postgres-native timestamptz format string used + // when materialising partition boundary literals. + timeFormatPGUTC = "2006-01-02 15:04:05+00" +) diff --git a/internal/storage/trace_repo.go b/internal/storage/trace_repo.go index 7c119a8..f0ae992 100644 --- a/internal/storage/trace_repo.go +++ b/internal/storage/trace_repo.go @@ -147,8 +147,8 @@ func (r *Repository) GetTrace(ctx context.Context, traceID string) (*Trace, erro tenant := TenantFromContext(ctx) var trace Trace if err := r.db.WithContext(ctx). - Preload("Spans", "tenant_id = ?", tenant). - Preload("Logs", "tenant_id = ?", tenant). + Preload("Spans", sqlWhereTenantID, tenant). + Preload("Logs", sqlWhereTenantID, tenant). Where("tenant_id = ? AND trace_id = ?", tenant, traceID). First(&trace).Error; err != nil { return nil, fmt.Errorf("failed to get trace: %w", err) @@ -171,7 +171,7 @@ func (r *Repository) GetTracesFiltered(ctx context.Context, start, end time.Time var traces []Trace var total int64 - base := r.db.WithContext(ctx).Model(&Trace{}).Where("tenant_id = ?", tenant) + base := r.db.WithContext(ctx).Model(&Trace{}).Where(sqlWhereTenantID, tenant) if !start.IsZero() && !end.IsZero() { base = base.Where("timestamp BETWEEN ? AND ?", start, end) @@ -262,7 +262,7 @@ const serviceMapSpanLimit = 500_000 func (r *Repository) GetServiceMapMetrics(ctx context.Context, start, end time.Time) (*ServiceMapMetrics, error) { tenant := TenantFromContext(ctx) var spans []Span - query := r.db.WithContext(ctx).Model(&Span{}).Where("tenant_id = ?", tenant) + query := r.db.WithContext(ctx).Model(&Span{}).Where(sqlWhereTenantID, tenant) if !start.IsZero() && !end.IsZero() { query = query.Where("start_time BETWEEN ? AND ?", start, end) diff --git a/main.go b/main.go index ec9fc64..bbfbe47 100644 --- a/main.go +++ b/main.go @@ -243,7 +243,7 @@ func main() { if err := json.Unmarshal(data, &envelope); err != nil { // Legacy format: try to deserialize as []storage.Log var logs []storage.Log - if err2 := json.Unmarshal(data, &logs); err2 != nil { + if json.Unmarshal(data, &logs) != nil { return fmt.Errorf("DLQ replay unmarshal failed: %w", err) } return repo.BatchCreateLogs(logs) @@ -605,16 +605,20 @@ func main() { // Resolve TLS material once: explicit cert-file > self-signed > plaintext. // Both gRPC and HTTP reuse the same resolved paths below. + const ( + tlsModeCertFile = "cert-file" + tlsModeSelfSigned = "self-signed" + ) var ( tlsCertPath string tlsKeyPath string - tlsMode string // "cert-file", "self-signed", or "" (plaintext) + tlsMode string // tlsModeCertFile, tlsModeSelfSigned, or "" (plaintext) ) switch { case cfg.TLSCertFileMode(): tlsCertPath = cfg.TLSCertFile tlsKeyPath = cfg.TLSKeyFile - tlsMode = "cert-file" + tlsMode = tlsModeCertFile case cfg.TLSSelfsignedMode(): cp, kp, err := tlsbootstrap.EnsureSelfSignedCert(cfg.TLSCacheDir) if err != nil { @@ -622,7 +626,7 @@ func main() { } tlsCertPath = cp tlsKeyPath = kp - tlsMode = "self-signed" + tlsMode = tlsModeSelfSigned } // Start gRPC Server @@ -664,20 +668,20 @@ func main() { "max_concurrent_streams", streams, ) switch tlsMode { - case "cert-file": + case tlsModeCertFile: creds, err := credentials.NewServerTLSFromFile(tlsCertPath, tlsKeyPath) if err != nil { fatal("Failed to load gRPC TLS credentials", err) } grpcOpts = append(grpcOpts, grpc.Creds(creds)) - slog.Info("🔒 gRPC TLS enabled", "mode", "cert-file") - case "self-signed": + slog.Info("🔒 gRPC TLS enabled", "mode", tlsModeCertFile) + case tlsModeSelfSigned: creds, err := credentials.NewServerTLSFromFile(tlsCertPath, tlsKeyPath) if err != nil { fatal("Failed to load gRPC TLS credentials (self-signed)", err) } grpcOpts = append(grpcOpts, grpc.Creds(creds)) - slog.Info("🔒 gRPC TLS enabled", "mode", "self-signed", "cache_dir", cfg.TLSCacheDir) + slog.Info("🔒 gRPC TLS enabled", "mode", tlsModeSelfSigned, "cache_dir", cfg.TLSCacheDir) default: slog.Info("🔓 gRPC plaintext — not for production; set TLS_CERT_FILE/TLS_KEY_FILE or TLS_AUTO_SELFSIGNED=true") } diff --git a/scripts/setup-git-signed.sh b/scripts/setup-git-signed.sh index 038f1d4..cad205f 100755 --- a/scripts/setup-git-signed.sh +++ b/scripts/setup-git-signed.sh @@ -39,7 +39,7 @@ GIT_USER_EMAIL=${GIT_USER_EMAIL:-$(git config --global --get user.email 2>/dev/n GIT_SIGNING_KEY=${GIT_SIGNING_KEY:-$(git config --global --get user.signingkey 2>/dev/null || echo "$HOME/.ssh/id_ed25519.pub")} GIT_GPG_FORMAT=${GIT_GPG_FORMAT:-$(git config --global --get gpg.format 2>/dev/null || echo "ssh")} -if [ -z "$GIT_USER_NAME" ] || [ -z "$GIT_USER_EMAIL" ]; then +if [[ -z "$GIT_USER_NAME" || -z "$GIT_USER_EMAIL" ]]; then cat >&2 <<'EOF' error: contributor identity not set. @@ -62,7 +62,7 @@ fi # - x509: user.signingkey is a key id / fingerprint (gpgsm must know it) case "$GIT_GPG_FORMAT" in ssh) - if [ ! -f "$GIT_SIGNING_KEY" ]; then + if [[ ! -f "$GIT_SIGNING_KEY" ]]; then cat >&2 < { name: error.name, stack: error.stack, componentStack: info.componentStack, - url: typeof window !== 'undefined' ? window.location.href : undefined, - userAgent: typeof navigator !== 'undefined' ? navigator.userAgent : undefined, + url: typeof window === 'undefined' ? undefined : window.location.href, + userAgent: typeof navigator === 'undefined' ? undefined : navigator.userAgent, timestamp: new Date().toISOString(), }) // TODO(telemetry): forward to server when useWebSocket exposes a send() // API, or via a dedicated POST /api/client-errors endpoint. } - private reset = (): void => { + private readonly reset = (): void => { this.setState({ error: null, info: null }) } - private reload = (): void => { + private readonly reload = (): void => { if (typeof window !== 'undefined') { window.location.reload() } diff --git a/ui/src/components/__tests__/ErrorBoundary.test.tsx b/ui/src/components/__tests__/ErrorBoundary.test.tsx index fbe0126..953325f 100644 --- a/ui/src/components/__tests__/ErrorBoundary.test.tsx +++ b/ui/src/components/__tests__/ErrorBoundary.test.tsx @@ -4,7 +4,7 @@ import userEvent from '@testing-library/user-event' import { useState } from 'react' import { ErrorBoundary } from '../ErrorBoundary' -function Boom({ shouldThrow }: { shouldThrow: boolean }) { +function Boom({ shouldThrow }: Readonly<{ shouldThrow: boolean }>) { if (shouldThrow) { throw new Error('kaboom-from-child') } @@ -79,7 +79,7 @@ describe('ErrorBoundary', () => { ) const calls = errorSpy.mock.calls as unknown[][] const tagged = calls.find( - (args) => typeof args[0] === 'string' && (args[0] as string).includes('[ErrorBoundary]'), + (args) => typeof args[0] === 'string' && args[0].includes('[ErrorBoundary]'), ) expect(tagged).toBeDefined() }) diff --git a/ui/src/components/nav/TopNav.tsx b/ui/src/components/nav/TopNav.tsx index d469d0f..50f5ebc 100644 --- a/ui/src/components/nav/TopNav.tsx +++ b/ui/src/components/nav/TopNav.tsx @@ -33,7 +33,7 @@ const menuItems = [ { key: 'mcp' as const, label: 'MCP Endpoint', icon: }, ] -export default function TopNav({ view, onNavigate, wsConnected }: TopNavProps) { +export default function TopNav({ view, onNavigate, wsConnected }: Readonly) { const { theme, toggle } = useTheme() const isCompact = useMediaQuery('(max-width: 760px)') const [drawerOpen, setDrawerOpen] = useState(false) diff --git a/ui/src/hooks/__tests__/useWebSocket.test.ts b/ui/src/hooks/__tests__/useWebSocket.test.ts index f8e2fb7..0b00fe4 100644 --- a/ui/src/hooks/__tests__/useWebSocket.test.ts +++ b/ui/src/hooks/__tests__/useWebSocket.test.ts @@ -5,12 +5,12 @@ import { act, renderHook } from '@testing-library/react' // changes. Tracks all instances so assertions can reach into the last // constructed socket. class MockWebSocket { - static CONNECTING = 0 - static OPEN = 1 - static CLOSING = 2 - static CLOSED = 3 + static readonly CONNECTING = 0 + static readonly OPEN = 1 + static readonly CLOSING = 2 + static readonly CLOSED = 3 - static instances: MockWebSocket[] = [] + static readonly instances: MockWebSocket[] = [] readyState = MockWebSocket.CONNECTING url: string @@ -42,15 +42,15 @@ class MockWebSocket { } simulateMessage(data: unknown) { - const ev = new MessageEvent('message', { data: JSON.stringify(data) }) - this.onmessage?.(ev as MessageEvent) + const ev = new MessageEvent('message', { data: JSON.stringify(data) }) + this.onmessage?.(ev) } } const OriginalWebSocket = globalThis.WebSocket beforeEach(() => { - MockWebSocket.instances = [] + MockWebSocket.instances.length = 0 vi.stubGlobal('WebSocket', MockWebSocket as unknown as typeof WebSocket) vi.useFakeTimers() }) diff --git a/ui/src/hooks/useLogs.ts b/ui/src/hooks/useLogs.ts index 0d569b2..e256c4e 100644 --- a/ui/src/hooks/useLogs.ts +++ b/ui/src/hooks/useLogs.ts @@ -28,7 +28,9 @@ export function useLogs() { } }, []) - useEffect(() => { void load() }, [load]) + useEffect(() => { + load().catch(() => undefined) + }, [load]) const runSimilar = async (query: string) => { if (!query.trim()) return diff --git a/ui/src/hooks/useTraces.ts b/ui/src/hooks/useTraces.ts index 25a8e12..617986e 100644 --- a/ui/src/hooks/useTraces.ts +++ b/ui/src/hooks/useTraces.ts @@ -25,7 +25,9 @@ export function useTraces() { } }, []) - useEffect(() => { void load() }, [load]) + useEffect(() => { + load().catch(() => undefined) + }, [load]) const selectTrace = async (traceId: string) => { const res = await fetch(`/api/traces/${traceId}`)