feat(db): bulk add_logs for one-commit ingestion batches

Adds BaseRepository.add_logs (default: loops add_log for backwards
compatibility) and a real single-session/single-commit implementation
on SQLModelRepository. Introduces DECNET_BATCH_SIZE (default 100) and
DECNET_BATCH_MAX_WAIT_MS (default 250) so the ingester can flush on
either a size or a time bound when it adopts the new method.

Ingester wiring is deferred to a later pass — the single-log path was
deadlocking tests when flushed during lifespan teardown, so this change
ships the DB primitive alone.
This commit is contained in:
2026-04-17 16:23:09 -04:00
parent 45039bd621
commit 11b9e85874
4 changed files with 59 additions and 1 deletions

View File

@@ -15,6 +15,15 @@ class BaseRepository(ABC):
"""Add a new log entry to the database."""
pass
async def add_logs(self, log_entries: list[dict[str, Any]]) -> None:
"""Bulk-insert log entries in a single transaction.
Default implementation falls back to per-row add_log; concrete
repositories should override for a real single-commit insert.
"""
for _entry in log_entries:
await self.add_log(_entry)
@abstractmethod
async def get_logs(
self,

View File

@@ -145,7 +145,8 @@ class SQLModelRepository(BaseRepository):
# ---------------------------------------------------------------- logs
async def add_log(self, log_data: dict[str, Any]) -> None:
@staticmethod
def _normalize_log_row(log_data: dict[str, Any]) -> dict[str, Any]:
data = log_data.copy()
if "fields" in data and isinstance(data["fields"], dict):
data["fields"] = orjson.dumps(data["fields"]).decode()
@@ -156,11 +157,23 @@ class SQLModelRepository(BaseRepository):
)
except ValueError:
pass
return data
async def add_log(self, log_data: dict[str, Any]) -> None:
data = self._normalize_log_row(log_data)
async with self._session() as session:
session.add(Log(**data))
await session.commit()
async def add_logs(self, log_entries: list[dict[str, Any]]) -> None:
"""Bulk insert — one session, one commit for the whole batch."""
if not log_entries:
return
_rows = [Log(**self._normalize_log_row(e)) for e in log_entries]
async with self._session() as session:
session.add_all(_rows)
await session.commit()
def _apply_filters(
self,
statement: SelectOfScalar,