fix: clear all addressable technical debt (DEBT-005 through DEBT-025)
Security:
- DEBT-008: remove query-string token auth; header-only Bearer now enforced
- DEBT-013: add regex constraint ^[a-z0-9\-]{1,64}$ on decky_name path param
- DEBT-015: stop leaking raw exception detail to API clients; log server-side
- DEBT-016: validate search (max_length=512) and datetime params with regex
Reliability:
- DEBT-014: wrap SSE event_generator in try/except; yield error frame on failure
- DEBT-017: emit log.warning/error on DB init retry; silent failures now visible
Observability / Docs:
- DEBT-020: add 401/422 response declarations to all route decorators
Infrastructure:
- DEBT-018: add HEALTHCHECK to all 24 template Dockerfiles
- DEBT-019: add USER decnet + setcap cap_net_bind_service to all 24 Dockerfiles
- DEBT-024: bump Redis template version 7.0.12 → 7.2.7
Config:
- DEBT-012: validate DECNET_API_PORT and DECNET_WEB_PORT range (1-65535)
Code quality:
- DEBT-010: delete 22 duplicate decnet_logging.py copies; deployer injects canonical
- DEBT-022: closed as false positive (print only in module docstring)
- DEBT-009: closed as false positive (templates already use structured syslog_line)
Build:
- DEBT-025: generate requirements.lock via pip freeze
Testing:
- DEBT-005/006/007: comprehensive test suite added across tests/api/
- conftest: in-memory SQLite + StaticPool + monkeypatched session_factory
- fuzz mark added; default run excludes fuzz; -n logical parallelism
DEBT.md updated: 23/25 items closed; DEBT-011 (Alembic) and DEBT-023 (digest pinning) remain
This commit is contained in:
@@ -1,5 +1,6 @@
|
||||
import json
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import AsyncGenerator, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, Query, Request
|
||||
@@ -7,10 +8,13 @@ from fastapi.responses import StreamingResponse
|
||||
|
||||
from decnet.web.dependencies import get_current_user, repo
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/stream", tags=["Observability"])
|
||||
@router.get("/stream", tags=["Observability"],
|
||||
responses={401: {"description": "Not authenticated"}, 422: {"description": "Validation error"}},)
|
||||
async def stream_events(
|
||||
request: Request,
|
||||
last_event_id: int = Query(0, alias="lastEventId"),
|
||||
@@ -21,43 +25,42 @@ async def stream_events(
|
||||
) -> StreamingResponse:
|
||||
|
||||
async def event_generator() -> AsyncGenerator[str, None]:
|
||||
# Start tracking from the provided ID, or current max if 0
|
||||
last_id = last_event_id
|
||||
if last_id == 0:
|
||||
last_id = await repo.get_max_log_id()
|
||||
|
||||
stats_interval_sec = 10
|
||||
loops_since_stats = 0
|
||||
|
||||
while True:
|
||||
if await request.is_disconnected():
|
||||
break
|
||||
try:
|
||||
if last_id == 0:
|
||||
last_id = await repo.get_max_log_id()
|
||||
|
||||
# Poll for new logs
|
||||
new_logs = await repo.get_logs_after_id(last_id, limit=50, search=search, start_time=start_time, end_time=end_time)
|
||||
if new_logs:
|
||||
# Update last_id to the max id in the fetched batch
|
||||
last_id = max(log["id"] for log in new_logs)
|
||||
payload = json.dumps({"type": "logs", "data": new_logs})
|
||||
yield f"event: message\ndata: {payload}\n\n"
|
||||
|
||||
# If we have new logs, stats probably changed, so force a stats update
|
||||
loops_since_stats = stats_interval_sec
|
||||
|
||||
# Periodically poll for stats
|
||||
if loops_since_stats >= stats_interval_sec:
|
||||
stats = await repo.get_stats_summary()
|
||||
payload = json.dumps({"type": "stats", "data": stats})
|
||||
yield f"event: message\ndata: {payload}\n\n"
|
||||
while True:
|
||||
if await request.is_disconnected():
|
||||
break
|
||||
|
||||
# Also yield histogram
|
||||
histogram = await repo.get_log_histogram(search=search, start_time=start_time, end_time=end_time, interval_minutes=15)
|
||||
hist_payload = json.dumps({"type": "histogram", "data": histogram})
|
||||
yield f"event: message\ndata: {hist_payload}\n\n"
|
||||
new_logs = await repo.get_logs_after_id(
|
||||
last_id, limit=50, search=search,
|
||||
start_time=start_time, end_time=end_time,
|
||||
)
|
||||
if new_logs:
|
||||
last_id = max(entry["id"] for entry in new_logs)
|
||||
yield f"event: message\ndata: {json.dumps({'type': 'logs', 'data': new_logs})}\n\n"
|
||||
loops_since_stats = stats_interval_sec
|
||||
|
||||
loops_since_stats = 0
|
||||
|
||||
loops_since_stats += 1
|
||||
await asyncio.sleep(1)
|
||||
if loops_since_stats >= stats_interval_sec:
|
||||
stats = await repo.get_stats_summary()
|
||||
yield f"event: message\ndata: {json.dumps({'type': 'stats', 'data': stats})}\n\n"
|
||||
histogram = await repo.get_log_histogram(
|
||||
search=search, start_time=start_time,
|
||||
end_time=end_time, interval_minutes=15,
|
||||
)
|
||||
yield f"event: message\ndata: {json.dumps({'type': 'histogram', 'data': histogram})}\n\n"
|
||||
loops_since_stats = 0
|
||||
|
||||
loops_since_stats += 1
|
||||
await asyncio.sleep(1)
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
except Exception:
|
||||
log.exception("SSE stream error for user %s", last_event_id)
|
||||
yield f"event: error\ndata: {json.dumps({'type': 'error', 'message': 'Stream interrupted'})}\n\n"
|
||||
|
||||
return StreamingResponse(event_generator(), media_type="text/event-stream")
|
||||
|
||||
Reference in New Issue
Block a user