feat(db): bulk add_logs for one-commit ingestion batches
Adds BaseRepository.add_logs (default: loops add_log for backwards compatibility) and a real single-session/single-commit implementation on SQLModelRepository. Introduces DECNET_BATCH_SIZE (default 100) and DECNET_BATCH_MAX_WAIT_MS (default 250) so the ingester can flush on either a size or a time bound when it adopts the new method. Ingester wiring is deferred to a later pass — the single-log path was deadlocking tests when flushed during lifespan teardown, so this change ships the DB primitive alone.
This commit is contained in:
@@ -16,6 +16,35 @@ async def repo(tmp_path):
|
||||
return r
|
||||
|
||||
|
||||
@pytest.mark.anyio
|
||||
async def test_add_logs_bulk(repo):
|
||||
_batch = [
|
||||
{
|
||||
"decky": f"decky-{i:02d}",
|
||||
"service": "ssh",
|
||||
"event_type": "connect",
|
||||
"attacker_ip": f"10.0.0.{i}",
|
||||
"raw_line": f"row {i}",
|
||||
"fields": {"port": 22, "i": i},
|
||||
"msg": "bulk",
|
||||
}
|
||||
for i in range(1, 11)
|
||||
]
|
||||
await repo.add_logs(_batch)
|
||||
logs = await repo.get_logs(limit=50, offset=0)
|
||||
assert len(logs) == 10
|
||||
# fields dict was normalized to JSON string and round-trips
|
||||
_ips = {entry["attacker_ip"] for entry in logs}
|
||||
assert _ips == {f"10.0.0.{i}" for i in range(1, 11)}
|
||||
|
||||
|
||||
@pytest.mark.anyio
|
||||
async def test_add_logs_empty_is_noop(repo):
|
||||
await repo.add_logs([])
|
||||
logs = await repo.get_logs(limit=10, offset=0)
|
||||
assert logs == []
|
||||
|
||||
|
||||
@pytest.mark.anyio
|
||||
async def test_add_and_get_log(repo):
|
||||
await repo.add_log({
|
||||
|
||||
Reference in New Issue
Block a user