db: switch MySQL driver to asyncmy, env-tune pool, serialize DDL

- aiomysql → asyncmy on both sides of the URL/import (faster, maintained).
- Pool sizing now reads DECNET_DB_POOL_SIZE / MAX_OVERFLOW / RECYCLE /
  PRE_PING for both SQLite and MySQL engines so stress runs can bump
  without code edits.
- MySQL initialize() now wraps schema DDL in a GET_LOCK advisory lock so
  concurrent uvicorn workers racing create_all() don't hit 'Table was
  skipped since its definition is being modified by concurrent DDL'.
- sqlite & mysql repo get_log_histogram use the shared _session() helper
  instead of session_factory() for consistency with the rest of the repo.
- SSE stream_events docstring updated to asyncmy.
This commit is contained in:
2026-04-17 15:01:49 -04:00
parent 3945e72e11
commit 467511e997
5 changed files with 41 additions and 17 deletions

View File

@@ -1,3 +1,5 @@
import os
from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession, async_sessionmaker, create_async_engine
from sqlalchemy import create_engine, Engine, event
from sqlmodel import SQLModel
@@ -11,9 +13,20 @@ def get_async_engine(db_path: str) -> AsyncEngine:
prefix = "sqlite+aiosqlite:///"
if db_path.startswith(":memory:"):
prefix = "sqlite+aiosqlite://"
pool_size = int(os.environ.get("DECNET_DB_POOL_SIZE", "20"))
max_overflow = int(os.environ.get("DECNET_DB_MAX_OVERFLOW", "40"))
pool_recycle = int(os.environ.get("DECNET_DB_POOL_RECYCLE", "3600"))
pool_pre_ping = os.environ.get("DECNET_DB_POOL_PRE_PING", "true").lower() == "true"
engine = create_async_engine(
f"{prefix}{db_path}",
echo=False,
pool_size=pool_size,
max_overflow=max_overflow,
pool_recycle=pool_recycle,
pool_pre_ping=pool_pre_ping,
connect_args={"uri": True, "timeout": 30},
)

View File

@@ -54,6 +54,6 @@ class SQLiteRepository(SQLModelRepository):
literal_column("bucket_time")
)
async with self.session_factory() as session:
async with self._session() as session:
results = await session.execute(statement)
return [{"time": r[0], "count": r[1]} for r in results.all()]