fix: remove event-loop-blocking cold start; unify profiler to cursor-based incremental

Cold start fetched all logs in one bulk query then processed them in a tight
synchronous loop with no yields, blocking the asyncio event loop for seconds
on datasets of 30K+ rows. This stalled every concurrent await — including the
SSE stream generator's initial DB calls — causing the dashboard to show
INITIALIZING SENSORS indefinitely.

Changes:
- Drop _cold_start() and get_all_logs_raw(); uninitialized state now runs the
  same cursor loop as incremental, starting from last_log_id=0
- Yield to the event loop after every _BATCH_SIZE rows (asyncio.sleep(0))
- Add SSE keepalive comment as first yield so the connection flushes before
  any DB work begins
- Add Cache-Control/X-Accel-Buffering headers to StreamingResponse
This commit is contained in:
2026-04-15 13:46:42 -04:00
parent 12aa98a83c
commit 314e6c6388
7 changed files with 32 additions and 74 deletions

View File

@@ -59,10 +59,7 @@ async def attacker_profile_worker(repo: BaseRepository, *, interval: int = 30) -
async def _incremental_update(repo: BaseRepository, state: _WorkerState) -> None:
if not state.initialized:
await _cold_start(repo, state)
return
was_cold = not state.initialized
affected_ips: set[str] = set()
while True:
@@ -76,9 +73,13 @@ async def _incremental_update(repo: BaseRepository, state: _WorkerState) -> None
affected_ips.add(event.attacker_ip)
state.last_log_id = row["id"]
await asyncio.sleep(0) # yield to event loop after each batch
if len(batch) < _BATCH_SIZE:
break
state.initialized = True
if not affected_ips:
await repo.set_state(_STATE_KEY, {"last_log_id": state.last_log_id})
return
@@ -86,27 +87,10 @@ async def _incremental_update(repo: BaseRepository, state: _WorkerState) -> None
await _update_profiles(repo, state, affected_ips)
await repo.set_state(_STATE_KEY, {"last_log_id": state.last_log_id})
logger.info("attacker worker: updated %d profiles (incremental)", len(affected_ips))
async def _cold_start(repo: BaseRepository, state: _WorkerState) -> None:
all_logs = await repo.get_all_logs_raw()
if not all_logs:
state.last_log_id = await repo.get_max_log_id()
state.initialized = True
await repo.set_state(_STATE_KEY, {"last_log_id": state.last_log_id})
return
for row in all_logs:
state.engine.ingest(row["raw_line"])
state.last_log_id = max(state.last_log_id, row["id"])
all_ips = set(state.engine._events.keys())
await _update_profiles(repo, state, all_ips)
await repo.set_state(_STATE_KEY, {"last_log_id": state.last_log_id})
state.initialized = True
logger.info("attacker worker: cold start rebuilt %d profiles", len(all_ips))
if was_cold:
logger.info("attacker worker: cold start rebuilt %d profiles", len(affected_ips))
else:
logger.info("attacker worker: updated %d profiles (incremental)", len(affected_ips))
async def _update_profiles(