fix: remove event-loop-blocking cold start; unify profiler to cursor-based incremental
Cold start fetched all logs in one bulk query then processed them in a tight synchronous loop with no yields, blocking the asyncio event loop for seconds on datasets of 30K+ rows. This stalled every concurrent await — including the SSE stream generator's initial DB calls — causing the dashboard to show INITIALIZING SENSORS indefinitely. Changes: - Drop _cold_start() and get_all_logs_raw(); uninitialized state now runs the same cursor loop as incremental, starting from last_log_id=0 - Yield to the event loop after every _BATCH_SIZE rows (asyncio.sleep(0)) - Add SSE keepalive comment as first yield so the connection flushes before any DB work begins - Add Cache-Control/X-Accel-Buffering headers to StreamingResponse
This commit is contained in:
@@ -59,10 +59,7 @@ async def attacker_profile_worker(repo: BaseRepository, *, interval: int = 30) -
|
||||
|
||||
|
||||
async def _incremental_update(repo: BaseRepository, state: _WorkerState) -> None:
|
||||
if not state.initialized:
|
||||
await _cold_start(repo, state)
|
||||
return
|
||||
|
||||
was_cold = not state.initialized
|
||||
affected_ips: set[str] = set()
|
||||
|
||||
while True:
|
||||
@@ -76,9 +73,13 @@ async def _incremental_update(repo: BaseRepository, state: _WorkerState) -> None
|
||||
affected_ips.add(event.attacker_ip)
|
||||
state.last_log_id = row["id"]
|
||||
|
||||
await asyncio.sleep(0) # yield to event loop after each batch
|
||||
|
||||
if len(batch) < _BATCH_SIZE:
|
||||
break
|
||||
|
||||
state.initialized = True
|
||||
|
||||
if not affected_ips:
|
||||
await repo.set_state(_STATE_KEY, {"last_log_id": state.last_log_id})
|
||||
return
|
||||
@@ -86,27 +87,10 @@ async def _incremental_update(repo: BaseRepository, state: _WorkerState) -> None
|
||||
await _update_profiles(repo, state, affected_ips)
|
||||
await repo.set_state(_STATE_KEY, {"last_log_id": state.last_log_id})
|
||||
|
||||
logger.info("attacker worker: updated %d profiles (incremental)", len(affected_ips))
|
||||
|
||||
|
||||
async def _cold_start(repo: BaseRepository, state: _WorkerState) -> None:
|
||||
all_logs = await repo.get_all_logs_raw()
|
||||
if not all_logs:
|
||||
state.last_log_id = await repo.get_max_log_id()
|
||||
state.initialized = True
|
||||
await repo.set_state(_STATE_KEY, {"last_log_id": state.last_log_id})
|
||||
return
|
||||
|
||||
for row in all_logs:
|
||||
state.engine.ingest(row["raw_line"])
|
||||
state.last_log_id = max(state.last_log_id, row["id"])
|
||||
|
||||
all_ips = set(state.engine._events.keys())
|
||||
await _update_profiles(repo, state, all_ips)
|
||||
await repo.set_state(_STATE_KEY, {"last_log_id": state.last_log_id})
|
||||
|
||||
state.initialized = True
|
||||
logger.info("attacker worker: cold start rebuilt %d profiles", len(all_ips))
|
||||
if was_cold:
|
||||
logger.info("attacker worker: cold start rebuilt %d profiles", len(affected_ips))
|
||||
else:
|
||||
logger.info("attacker worker: updated %d profiles (incremental)", len(affected_ips))
|
||||
|
||||
|
||||
async def _update_profiles(
|
||||
|
||||
@@ -111,11 +111,6 @@ class BaseRepository(ABC):
|
||||
"""Store a specific state entry by key."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_all_logs_raw(self) -> list[dict[str, Any]]:
|
||||
"""Retrieve all log rows with fields needed by the attacker profile worker."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_max_log_id(self) -> int:
|
||||
"""Return the highest log ID, or 0 if the table is empty."""
|
||||
|
||||
@@ -413,34 +413,6 @@ class SQLModelRepository(BaseRepository):
|
||||
|
||||
# ----------------------------------------------------------- attackers
|
||||
|
||||
async def get_all_logs_raw(self) -> List[dict[str, Any]]:
|
||||
async with self.session_factory() as session:
|
||||
result = await session.execute(
|
||||
select(
|
||||
Log.id,
|
||||
Log.raw_line,
|
||||
Log.attacker_ip,
|
||||
Log.service,
|
||||
Log.event_type,
|
||||
Log.decky,
|
||||
Log.timestamp,
|
||||
Log.fields,
|
||||
)
|
||||
)
|
||||
return [
|
||||
{
|
||||
"id": r.id,
|
||||
"raw_line": r.raw_line,
|
||||
"attacker_ip": r.attacker_ip,
|
||||
"service": r.service,
|
||||
"event_type": r.event_type,
|
||||
"decky": r.decky,
|
||||
"timestamp": r.timestamp,
|
||||
"fields": r.fields,
|
||||
}
|
||||
for r in result.all()
|
||||
]
|
||||
|
||||
async def get_all_bounties_by_ip(self) -> dict[str, List[dict[str, Any]]]:
|
||||
from collections import defaultdict
|
||||
async with self.session_factory() as session:
|
||||
|
||||
@@ -40,6 +40,8 @@ async def stream_events(
|
||||
loops_since_stats = 0
|
||||
emitted_chunks = 0
|
||||
try:
|
||||
yield ": keepalive\n\n" # flush headers immediately; helps diagnose pre-yield hangs
|
||||
|
||||
if last_id == 0:
|
||||
last_id = await repo.get_max_log_id()
|
||||
|
||||
@@ -90,4 +92,11 @@ async def stream_events(
|
||||
log.exception("SSE stream error for user %s", last_event_id)
|
||||
yield f"event: error\ndata: {json.dumps({'type': 'error', 'message': 'Stream interrupted'})}\n\n"
|
||||
|
||||
return StreamingResponse(event_generator(), media_type="text/event-stream")
|
||||
return StreamingResponse(
|
||||
event_generator(),
|
||||
media_type="text/event-stream",
|
||||
headers={
|
||||
"Cache-Control": "no-cache",
|
||||
"X-Accel-Buffering": "no",
|
||||
},
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user