feat(swarm): self-destruct agent on decommission

Decommissioning a worker from the dashboard (or swarm controller) now
asks the agent to wipe its own install before the master forgets it.
The agent stops decky containers + every decnet-* systemd unit, then
deletes /opt/decnet*, /etc/systemd/system/decnet-*, /var/lib/decnet/*,
and /usr/local/bin/decnet*. Logs under /var/log are preserved.

The reaper runs as a detached /tmp script (start_new_session=True) so
it survives the agent process being killed. Self-destruct dispatch is
best-effort — a dead worker doesn't block master-side cleanup.
This commit is contained in:
2026-04-19 20:47:09 -04:00
parent 9d68bb45c7
commit 14250cacad
7 changed files with 231 additions and 2 deletions

View File

@@ -87,6 +87,20 @@ async def teardown(req: TeardownRequest) -> dict:
return {"status": "torn_down", "decky_id": req.decky_id}
@app.post("/self-destruct")
async def self_destruct() -> dict:
"""Stop all DECNET services on this worker and delete the install
footprint. Called by the master during decommission. Logs under
/var/log/decnet* are preserved. Fire-and-forget — returns 202 before
the reaper starts deleting files."""
try:
await _exec.self_destruct()
except Exception as exc:
log.exception("agent.self_destruct failed")
raise HTTPException(status_code=500, detail=str(exc)) from exc
return {"status": "self_destruct_scheduled"}
@app.post("/mutate")
async def mutate(req: MutateRequest) -> dict:
# Service rotation is routed through the deployer's existing mutate path

View File

@@ -115,6 +115,76 @@ def _decky_runtime_states(config: DecnetConfig) -> dict[str, dict[str, Any]]:
return out
_REAPER_SCRIPT = r"""#!/bin/bash
# DECNET agent self-destruct reaper.
# Runs detached from the agent process so it survives the agent's death.
# Waits briefly for the HTTP response to drain, then stops services,
# wipes install paths, and preserves logs.
set +e
sleep 3
# Stop decky containers started by the local deployer (best-effort).
if command -v docker >/dev/null 2>&1; then
docker ps -q --filter "label=com.docker.compose.project=decnet" | xargs -r docker stop
docker ps -aq --filter "label=com.docker.compose.project=decnet" | xargs -r docker rm -f
docker network rm decnet_lan 2>/dev/null
fi
# Stop+disable every systemd unit the installer may have dropped.
for unit in decnet-agent decnet-engine decnet-collector decnet-forwarder decnet-prober decnet-sniffer decnet-updater; do
systemctl stop "$unit" 2>/dev/null
systemctl disable "$unit" 2>/dev/null
done
# Nuke install paths. Logs under /var/log/decnet* are intentionally
# preserved — the operator typically wants them for forensic review.
rm -rf /opt/decnet* /var/lib/decnet/* /usr/local/bin/decnet*
rm -f /etc/systemd/system/decnet-*.service /etc/systemd/system/decnet-*.timer
systemctl daemon-reload 2>/dev/null
rm -f "$0"
"""
async def self_destruct() -> None:
"""Tear down deckies, then spawn a detached reaper that wipes the
install footprint. Returns immediately so the HTTP response can drain
before the reaper starts deleting files out from under the agent."""
import os
import subprocess # nosec B404
import tempfile
# Best-effort teardown first — the reaper also runs docker stop, but
# going through the deployer gives the host-macvlan/ipvlan helper a
# chance to clean up routes cleanly.
try:
await asyncio.to_thread(_deployer.teardown, None)
await asyncio.to_thread(clear_state)
except Exception:
log.exception("self_destruct: pre-reap teardown failed — reaper will force-stop containers")
# Reaper lives under /tmp so it survives rm -rf /opt/decnet*.
fd, path = tempfile.mkstemp(prefix="decnet-reaper-", suffix=".sh", dir="/tmp") # nosec B108 — reaper must outlive /opt/decnet removal
try:
os.write(fd, _REAPER_SCRIPT.encode())
finally:
os.close(fd)
os.chmod(path, 0o700) # nosec B103 — root-owned reaper, needs exec
# start_new_session detaches from the agent process group so the
# reaper isn't killed when systemctl stop decnet-agent fires.
subprocess.Popen( # nosec B603
["/bin/bash", path],
stdin=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
start_new_session=True,
close_fds=True,
)
log.warning("self_destruct: reaper spawned path=%s — agent will die in ~3s", path)
async def status() -> dict[str, Any]:
state = await asyncio.to_thread(load_state)
if state is None:

View File

@@ -185,6 +185,12 @@ class AgentClient:
resp.raise_for_status()
return resp.json()
async def self_destruct(self) -> dict[str, Any]:
"""Trigger the worker to stop services and wipe its install."""
resp = await self._require_client().post("/self-destruct")
resp.raise_for_status()
return resp.json()
# -------------------------------------------------------------- diagnostics
def __repr__(self) -> str:

View File

@@ -3,6 +3,9 @@
Removes the DeckyShard rows bound to the host (portable cascade — MySQL
and SQLite both honor it via the repo layer), deletes the SwarmHost row,
and best-effort-cleans the per-worker bundle directory on the master.
Also asks the worker agent to wipe its own install (keeping logs). A
dead/unreachable worker does not block master-side cleanup.
"""
from __future__ import annotations
@@ -10,9 +13,12 @@ import pathlib
from fastapi import APIRouter, Depends, HTTPException, status
from decnet.logging import get_logger
from decnet.swarm.client import AgentClient
from decnet.web.db.repository import BaseRepository
from decnet.web.dependencies import get_repo
log = get_logger("swarm.decommission")
router = APIRouter()
@@ -29,6 +35,16 @@ async def api_decommission_host(
if row is None:
raise HTTPException(status_code=404, detail="host not found")
try:
async with AgentClient(host=row) as agent:
await agent.self_destruct()
except Exception:
log.exception(
"decommission: self-destruct dispatch failed host=%s"
"proceeding with master-side cleanup anyway",
row.get("name"),
)
await repo.delete_decky_shards_for_host(uuid)
await repo.delete_swarm_host(uuid)

View File

@@ -1,13 +1,22 @@
"""DELETE /swarm/hosts/{uuid} — decommission a worker from the dashboard."""
"""DELETE /swarm/hosts/{uuid} — decommission a worker from the dashboard.
Also instructs the worker agent to stop all DECNET services and delete
its install footprint (keeping logs). Agent self-destruct failure does
not block decommission — the master-side cleanup always runs so a dead
worker can still be removed from the dashboard.
"""
from __future__ import annotations
import pathlib
from fastapi import APIRouter, Depends, HTTPException, status
from decnet.logging import get_logger
from decnet.swarm.client import AgentClient
from decnet.web.db.repository import BaseRepository
from decnet.web.dependencies import get_repo, require_admin
log = get_logger("swarm.decommission")
router = APIRouter()
@@ -25,6 +34,21 @@ async def decommission_host(
if row is None:
raise HTTPException(status_code=404, detail="host not found")
# Ask the worker to wipe its own install (keeps logs). The agent
# schedules the reaper as a detached process and returns immediately,
# so this call is fast when the worker is reachable. A dead worker
# shouldn't block the operator from cleaning up the dashboard entry,
# hence best-effort with a log and continue.
try:
async with AgentClient(host=row) as agent:
await agent.self_destruct()
except Exception:
log.exception(
"decommission: self-destruct dispatch failed host=%s"
"proceeding with master-side cleanup anyway",
row.get("name"),
)
await repo.delete_decky_shards_for_host(uuid)
await repo.delete_swarm_host(uuid)