feat(ssh-stealth): hide capture artifacts via XOR+gzip entrypoint blob
The /opt/emit_capture.py, /opt/syslog_bridge.py, and /usr/libexec/udev/journal-relay files were plaintext and world-readable to any attacker root-shelled into the SSH honeypot — revealing the full capture logic on a single cat. Pack all three into /entrypoint.sh as XOR+gzip+base64 blobs at build time (_build_stealth.py), then decode in-memory at container start and exec the capture loop from a bash -c string. No .py files under /opt, no journal-relay file under /usr/libexec/udev, no argv_zap name anywhere. The LD_PRELOAD shim is installed as /usr/lib/x86_64-linux-gnu/libudev-shared.so.1 — sits next to the real libudev.so.1 and blends into the multiarch layout. A 1-byte random XOR key is chosen at image build so a bare 'base64 -d | gunzip' probe on the visible entrypoint returns binary noise instead of readable Python. Docker-dependent tests live under tests/docker/ behind a new 'docker' pytest marker (excluded from the default run, same pattern as fuzz / live / bench).
This commit is contained in:
@@ -20,6 +20,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
nmap \
|
||||
jq \
|
||||
python3 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir -p /var/run/sshd /root/.ssh /var/log/journal /var/lib/systemd/coredump \
|
||||
@@ -45,10 +46,15 @@ RUN printf '%s\n' \
|
||||
'user.* /proc/1/fd/1;RFC5424fmt' \
|
||||
> /etc/rsyslog.d/50-journal-forward.conf
|
||||
|
||||
# Silence default catch-all rules so we own auth/user routing exclusively
|
||||
# Silence default catch-all rules so we own auth/user routing exclusively.
|
||||
# Also disable rsyslog's privilege drop: PID 1's stdout (/proc/1/fd/1) is
|
||||
# owned by root, so a syslog-user rsyslogd gets EACCES and silently drops
|
||||
# every auth/user line (bash CMD events + file_captured emissions).
|
||||
RUN sed -i \
|
||||
-e 's|^\(\*\.\*;auth,authpriv\.none\)|#\1|' \
|
||||
-e 's|^auth,authpriv\.\*|#auth,authpriv.*|' \
|
||||
-e 's|^\$PrivDropToUser|#$PrivDropToUser|' \
|
||||
-e 's|^\$PrivDropToGroup|#$PrivDropToGroup|' \
|
||||
/etc/rsyslog.conf
|
||||
|
||||
# Sudo: log to syslog (auth facility) AND a local file with full I/O capture
|
||||
@@ -77,27 +83,30 @@ RUN mkdir -p /root/projects /root/backups /var/www/html && \
|
||||
printf 'DB_HOST=10.0.0.5\nDB_USER=admin\nDB_PASS=changeme123\nDB_NAME=prod_db\n' > /root/projects/.env && \
|
||||
printf '[Unit]\nDescription=App Server\n[Service]\nExecStart=/usr/bin/python3 /opt/app/server.py\n' > /root/projects/app.service
|
||||
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
# Capture machinery is installed under plausible systemd/udev paths so casual
|
||||
# `ps aux` inspection doesn't scream "honeypot". The script runs as
|
||||
# `journal-relay` and inotifywait is invoked through a symlink named
|
||||
# `kmsg-watch` — both names blend in with normal udev/journal daemons.
|
||||
COPY capture.sh /usr/libexec/udev/journal-relay
|
||||
# Stage all capture sources in a scratch dir. Nothing here survives the layer:
|
||||
# _build_stealth.py packs syslog_bridge.py + emit_capture.py + capture.sh into
|
||||
# XOR+gzip+base64 blobs embedded directly in /entrypoint.sh, and the whole
|
||||
# /tmp/build tree is wiped at the end of the RUN — so the final image has no
|
||||
# `.py` file under /opt and no `journal-relay` script under /usr/libexec/udev.
|
||||
COPY entrypoint.sh capture.sh syslog_bridge.py emit_capture.py \
|
||||
argv_zap.c _build_stealth.py /tmp/build/
|
||||
|
||||
# argv_zap.so: LD_PRELOAD shim that blanks argv[1..] after the target parses
|
||||
# its args, so /proc/PID/cmdline shows only argv[0] (no watch paths / flags
|
||||
# leaking from inotifywait's command line). gcc is installed only for the
|
||||
# build and purged in the same layer to keep the image slim.
|
||||
COPY argv_zap.c /tmp/argv_zap.c
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends gcc libc6-dev \
|
||||
&& gcc -O2 -fPIC -shared -o /usr/lib/argv_zap.so /tmp/argv_zap.c -ldl \
|
||||
# argv_zap is compiled into a shared object disguised as a multiarch
|
||||
# udev-companion library (sits next to real libudev.so.1). gcc is installed
|
||||
# only for this build step and purged in the same layer.
|
||||
RUN set -eu \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends gcc libc6-dev \
|
||||
&& mkdir -p /usr/lib/x86_64-linux-gnu /usr/libexec/udev \
|
||||
&& gcc -O2 -fPIC -shared \
|
||||
-o /usr/lib/x86_64-linux-gnu/libudev-shared.so.1 \
|
||||
/tmp/build/argv_zap.c -ldl \
|
||||
&& apt-get purge -y gcc libc6-dev \
|
||||
&& apt-get autoremove -y \
|
||||
&& rm -rf /var/lib/apt/lists/* /tmp/argv_zap.c
|
||||
|
||||
RUN mkdir -p /usr/libexec/udev \
|
||||
&& chmod +x /entrypoint.sh /usr/libexec/udev/journal-relay \
|
||||
&& ln -sf /usr/bin/inotifywait /usr/libexec/udev/kmsg-watch
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& ln -sf /usr/bin/inotifywait /usr/libexec/udev/kmsg-watch \
|
||||
&& python3 /tmp/build/_build_stealth.py \
|
||||
&& rm -rf /tmp/build
|
||||
|
||||
EXPOSE 22
|
||||
|
||||
|
||||
89
templates/ssh/_build_stealth.py
Normal file
89
templates/ssh/_build_stealth.py
Normal file
@@ -0,0 +1,89 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Build-time helper: merge capture Python sources, XOR+gzip+base64 pack them
|
||||
and the capture.sh loop, and render the final /entrypoint.sh from its
|
||||
templated form.
|
||||
|
||||
Runs inside the Docker build. Reads from /tmp/build/, writes /entrypoint.sh.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import gzip
|
||||
import random
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
BUILD = Path("/tmp/build")
|
||||
|
||||
|
||||
def _merge_python() -> str:
|
||||
bridge = (BUILD / "syslog_bridge.py").read_text()
|
||||
emit = (BUILD / "emit_capture.py").read_text()
|
||||
|
||||
def _clean(src: str) -> tuple[list[str], list[str]]:
|
||||
"""Return (future_imports, other_lines) with noise stripped."""
|
||||
futures: list[str] = []
|
||||
rest: list[str] = []
|
||||
for line in src.splitlines():
|
||||
ls = line.lstrip()
|
||||
if ls.startswith("from __future__"):
|
||||
futures.append(line)
|
||||
elif ls.startswith("sys.path.insert") or ls.startswith("from syslog_bridge"):
|
||||
continue
|
||||
else:
|
||||
rest.append(line)
|
||||
return futures, rest
|
||||
|
||||
b_fut, b_rest = _clean(bridge)
|
||||
e_fut, e_rest = _clean(emit)
|
||||
|
||||
# Deduplicate future imports and hoist to the very top.
|
||||
seen: set[str] = set()
|
||||
futures: list[str] = []
|
||||
for line in (*b_fut, *e_fut):
|
||||
stripped = line.strip()
|
||||
if stripped not in seen:
|
||||
seen.add(stripped)
|
||||
futures.append(line)
|
||||
|
||||
header = "\n".join(futures)
|
||||
body = "\n".join(b_rest) + "\n\n" + "\n".join(e_rest)
|
||||
return (header + "\n" if header else "") + body
|
||||
|
||||
|
||||
def _pack(text: str, key: int) -> str:
|
||||
gz = gzip.compress(text.encode("utf-8"))
|
||||
xored = bytes(b ^ key for b in gz)
|
||||
return base64.b64encode(xored).decode("ascii")
|
||||
|
||||
|
||||
def main() -> int:
|
||||
key = random.SystemRandom().randint(1, 255)
|
||||
|
||||
merged_py = _merge_python()
|
||||
capture_sh = (BUILD / "capture.sh").read_text()
|
||||
|
||||
emit_b64 = _pack(merged_py, key)
|
||||
relay_b64 = _pack(capture_sh, key)
|
||||
|
||||
tpl = (BUILD / "entrypoint.sh").read_text()
|
||||
rendered = (
|
||||
tpl.replace("__STEALTH_KEY__", str(key))
|
||||
.replace("__EMIT_CAPTURE_B64__", emit_b64)
|
||||
.replace("__JOURNAL_RELAY_B64__", relay_b64)
|
||||
)
|
||||
|
||||
for marker in ("__STEALTH_KEY__", "__EMIT_CAPTURE_B64__", "__JOURNAL_RELAY_B64__"):
|
||||
if marker in rendered:
|
||||
print(f"build: placeholder {marker} still present after render", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
Path("/entrypoint.sh").write_text(rendered)
|
||||
Path("/entrypoint.sh").chmod(0o755)
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -192,16 +192,27 @@ _capture_one() {
|
||||
local mtime
|
||||
mtime="$(stat -c '%y' "$src" 2>/dev/null)"
|
||||
|
||||
local decky="${HOSTNAME:-unknown}"
|
||||
# Prefer NODE_NAME (the deployer-supplied decky identifier) over
|
||||
# $HOSTNAME, which is a cosmetic fake like "SRV-DEV-36" set by
|
||||
# entrypoint.sh. The UI and the artifact bind mount both key on the
|
||||
# decky name, so using $HOSTNAME here makes /artifacts/{decky}/... URLs
|
||||
# unresolvable.
|
||||
local decky="${NODE_NAME:-${HOSTNAME:-unknown}}"
|
||||
|
||||
# One syslog line, no sidecar. Flat summary fields ride as top-level SD
|
||||
# params (searchable pills in the UI); bulky nested structures (writer
|
||||
# cmdline, concurrent_sessions, ss_snapshot) are base64-packed into a
|
||||
# single meta_json_b64 SD param by emit_capture.py.
|
||||
jq -n \
|
||||
--arg _hostname "$decky" \
|
||||
--arg _service "ssh" \
|
||||
--arg _event_type "file_captured" \
|
||||
--arg captured_at "$ts" \
|
||||
--arg orig_path "$src" \
|
||||
--arg stored_as "$stored_as" \
|
||||
--arg sha "$sha" \
|
||||
--arg sha256 "$sha" \
|
||||
--argjson size "$size" \
|
||||
--arg mtime "$mtime" \
|
||||
--arg decky "$decky" \
|
||||
--arg attribution "$attribution" \
|
||||
--arg writer_pid "${writer_pid:-}" \
|
||||
--arg writer_comm "${writer_comm:-}" \
|
||||
@@ -215,41 +226,37 @@ _capture_one() {
|
||||
--argjson concurrent "$who_json" \
|
||||
--argjson ss_snapshot "$ss_json" \
|
||||
'{
|
||||
_hostname: $_hostname,
|
||||
_service: $_service,
|
||||
_event_type: $_event_type,
|
||||
captured_at: $captured_at,
|
||||
orig_path: $orig_path,
|
||||
stored_as: $stored_as,
|
||||
sha256: $sha,
|
||||
sha256: $sha256,
|
||||
size: $size,
|
||||
mtime: $mtime,
|
||||
decky: $decky,
|
||||
attribution: $attribution,
|
||||
writer: {
|
||||
pid: ($writer_pid | if . == "" then null else tonumber? end),
|
||||
comm: $writer_comm,
|
||||
cmdline: $writer_cmdline,
|
||||
uid: ($writer_uid | if . == "" then null else tonumber? end),
|
||||
loginuid: ($writer_loginuid | if . == "" then null else tonumber? end)
|
||||
},
|
||||
ssh_session: {
|
||||
pid: ($ssh_pid | if . == "" then null else tonumber? end),
|
||||
user: (if $ssh_user == "" then null else $ssh_user end),
|
||||
src_ip: (if $src_ip == "" then null else $src_ip end),
|
||||
src_port: ($src_port | if . == "null" or . == "" then null else tonumber? end)
|
||||
},
|
||||
writer_pid: $writer_pid,
|
||||
writer_comm: $writer_comm,
|
||||
writer_uid: $writer_uid,
|
||||
ssh_pid: $ssh_pid,
|
||||
ssh_user: $ssh_user,
|
||||
src_ip: $src_ip,
|
||||
src_port: (if $src_port == "null" or $src_port == "" then "" else $src_port end),
|
||||
writer_cmdline: $writer_cmdline,
|
||||
writer_loginuid: $writer_loginuid,
|
||||
concurrent_sessions: $concurrent,
|
||||
ss_snapshot: $ss_snapshot
|
||||
}' > "$CAPTURE_DIR/$stored_as.meta.json"
|
||||
|
||||
logger -p user.info -t systemd-journal \
|
||||
"file_captured orig_path=$src sha256=$sha size=$size stored_as=$stored_as src_ip=${src_ip:-unknown} ssh_user=${ssh_user:-unknown} attribution=$attribution"
|
||||
}' \
|
||||
| python3 <(printf '%s' "$EMIT_CAPTURE_PY")
|
||||
}
|
||||
|
||||
# Main loop.
|
||||
# LD_PRELOAD argv_zap.so blanks argv[1..] after inotifywait parses its args,
|
||||
# LD_PRELOAD libudev-shared.so.1 blanks argv[1..] after inotifywait parses its args,
|
||||
# so /proc/PID/cmdline shows only "kmsg-watch" — the watch paths and flags
|
||||
# never make it to `ps aux`.
|
||||
# shellcheck disable=SC2086
|
||||
ARGV_ZAP_COMM=kmsg-watch LD_PRELOAD=/usr/lib/argv_zap.so "$INOTIFY_BIN" -m -r -q \
|
||||
ARGV_ZAP_COMM=kmsg-watch LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libudev-shared.so.1 "$INOTIFY_BIN" -m -r -q \
|
||||
--event close_write --event moved_to \
|
||||
--format '%w%f' \
|
||||
$CAPTURE_WATCH_PATHS 2>/dev/null \
|
||||
|
||||
84
templates/ssh/emit_capture.py
Normal file
84
templates/ssh/emit_capture.py
Normal file
@@ -0,0 +1,84 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Emit an RFC 5424 `file_captured` line to stdout.
|
||||
|
||||
Called by capture.sh after a file drop has been mirrored into the quarantine
|
||||
directory. Reads a single JSON object from stdin describing the event; emits
|
||||
one syslog line that the collector parses into `logs.fields`.
|
||||
|
||||
The input JSON may contain arbitrary nested structures (writer cmdline,
|
||||
concurrent_sessions, ss_snapshot). Bulky fields are base64-encoded into a
|
||||
single `meta_json_b64` SD param — this avoids pathological characters
|
||||
(`]`, `"`, `\\`) that the collector's SD-block regex cannot losslessly
|
||||
round-trip when embedded directly.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
from syslog_bridge import syslog_line, write_syslog_file # noqa: E402
|
||||
|
||||
# Flat fields ride as individual SD params (searchable, rendered as pills).
|
||||
# Everything else is rolled into the base64 meta blob.
|
||||
_FLAT_FIELDS: tuple[str, ...] = (
|
||||
"stored_as",
|
||||
"sha256",
|
||||
"size",
|
||||
"orig_path",
|
||||
"src_ip",
|
||||
"src_port",
|
||||
"ssh_user",
|
||||
"ssh_pid",
|
||||
"attribution",
|
||||
"writer_pid",
|
||||
"writer_comm",
|
||||
"writer_uid",
|
||||
"mtime",
|
||||
)
|
||||
|
||||
|
||||
def main() -> int:
|
||||
raw = sys.stdin.read()
|
||||
if not raw.strip():
|
||||
print("emit_capture: empty stdin", file=sys.stderr)
|
||||
return 1
|
||||
try:
|
||||
event: dict = json.loads(raw)
|
||||
except json.JSONDecodeError as exc:
|
||||
print(f"emit_capture: bad JSON: {exc}", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
hostname = str(event.pop("_hostname", None) or os.environ.get("HOSTNAME") or "-")
|
||||
service = str(event.pop("_service", "ssh"))
|
||||
event_type = str(event.pop("_event_type", "file_captured"))
|
||||
|
||||
fields: dict[str, str] = {}
|
||||
for key in _FLAT_FIELDS:
|
||||
if key in event:
|
||||
value = event.pop(key)
|
||||
if value is None or value == "":
|
||||
continue
|
||||
fields[key] = str(value)
|
||||
|
||||
if event:
|
||||
payload = json.dumps(event, separators=(",", ":"), ensure_ascii=False, sort_keys=True)
|
||||
fields["meta_json_b64"] = base64.b64encode(payload.encode("utf-8")).decode("ascii")
|
||||
|
||||
line = syslog_line(
|
||||
service=service,
|
||||
hostname=hostname,
|
||||
event_type=event_type,
|
||||
**fields,
|
||||
)
|
||||
write_syslog_file(line)
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@@ -35,18 +35,45 @@ fi
|
||||
# No intermediate pipe/relay — a named FIFO would be readable AND writable
|
||||
# by any root-in-container process, letting an attacker either eavesdrop on
|
||||
# the SIEM feed or inject forged log lines.
|
||||
if [ -n "${NODE_NAME:-}" ]; then
|
||||
printf '$LocalHostName %s\n' "$NODE_NAME" > /etc/rsyslog.d/00-localhostname.conf
|
||||
fi
|
||||
rsyslogd
|
||||
|
||||
# File-catcher: mirror attacker drops into host-mounted quarantine with attribution.
|
||||
# Script lives at /usr/libexec/udev/journal-relay so `ps aux` shows a
|
||||
# plausible udev helper. See Dockerfile for the rename rationale.
|
||||
# LD_PRELOAD + ARGV_ZAP_COMM blank bash's argv[1..] so /proc/PID/cmdline
|
||||
# shows only "journal-relay" (no script path leak) and /proc/PID/comm
|
||||
# matches.
|
||||
CAPTURE_DIR=/var/lib/systemd/coredump \
|
||||
LD_PRELOAD=/usr/lib/argv_zap.so \
|
||||
ARGV_ZAP_COMM=journal-relay \
|
||||
bash -c 'exec -a "journal-relay" bash /usr/libexec/udev/journal-relay' &
|
||||
# ─── systemd-journal companion bootstrap ─────────────────────────────────────
|
||||
# The next block primes cached udev/journal helpers from in-memory tables.
|
||||
# Tables are XOR+gzip+base64 packed at image build. Keeping the data inline
|
||||
# avoids a separate /usr/libexec script (which would be visible and readable)
|
||||
# and means the helpers only ever live in this process's memory + an
|
||||
# anonymous pipe the kernel hands to python via /dev/fd.
|
||||
_STEALTH_KEY=__STEALTH_KEY__
|
||||
_EMIT_CAPTURE_B64='__EMIT_CAPTURE_B64__'
|
||||
_JOURNAL_RELAY_B64='__JOURNAL_RELAY_B64__'
|
||||
|
||||
# sshd logs via syslog — no -e flag, so auth events flow through rsyslog → pipe → stdout
|
||||
_decode() {
|
||||
printf '%s' "$1" | base64 -d | python3 -c '
|
||||
import sys
|
||||
k = '"$_STEALTH_KEY"'
|
||||
d = sys.stdin.buffer.read()
|
||||
sys.stdout.buffer.write(bytes(b ^ k for b in d))
|
||||
' | gunzip
|
||||
}
|
||||
|
||||
EMIT_CAPTURE_PY="$(_decode "$_EMIT_CAPTURE_B64")"
|
||||
_JOURNAL_RELAY_SRC="$(_decode "$_JOURNAL_RELAY_B64")"
|
||||
export EMIT_CAPTURE_PY
|
||||
unset _EMIT_CAPTURE_B64 _JOURNAL_RELAY_B64 _STEALTH_KEY
|
||||
|
||||
# Launch the file-capture loop from memory. LD_PRELOAD + ARGV_ZAP_COMM blank
|
||||
# argv[1..] so /proc/PID/cmdline shows only "journal-relay".
|
||||
(
|
||||
export CAPTURE_DIR=/var/lib/systemd/coredump
|
||||
export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libudev-shared.so.1
|
||||
export ARGV_ZAP_COMM=journal-relay
|
||||
exec -a journal-relay bash -c "$_JOURNAL_RELAY_SRC"
|
||||
) &
|
||||
|
||||
unset _JOURNAL_RELAY_SRC
|
||||
|
||||
# sshd logs via syslog — no -e flag, so auth events flow through rsyslog → /proc/1/fd/1 → stdout
|
||||
exec /usr/sbin/sshd -D
|
||||
|
||||
89
templates/ssh/syslog_bridge.py
Normal file
89
templates/ssh/syslog_bridge.py
Normal file
@@ -0,0 +1,89 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Shared RFC 5424 syslog helper used by service containers.
|
||||
|
||||
Services call syslog_line() to format an RFC 5424 message, then
|
||||
write_syslog_file() to emit it to stdout — the container runtime
|
||||
captures it, and the host-side collector streams it into the log file.
|
||||
|
||||
RFC 5424 structure:
|
||||
<PRI>1 TIMESTAMP HOSTNAME APP-NAME PROCID MSGID [SD-ELEMENT] MSG
|
||||
|
||||
Facility: local0 (16). SD element ID uses PEN 55555.
|
||||
"""
|
||||
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any
|
||||
|
||||
# ─── Constants ────────────────────────────────────────────────────────────────
|
||||
|
||||
_FACILITY_LOCAL0 = 16
|
||||
_SD_ID = "relay@55555"
|
||||
_NILVALUE = "-"
|
||||
|
||||
SEVERITY_EMERG = 0
|
||||
SEVERITY_ALERT = 1
|
||||
SEVERITY_CRIT = 2
|
||||
SEVERITY_ERROR = 3
|
||||
SEVERITY_WARNING = 4
|
||||
SEVERITY_NOTICE = 5
|
||||
SEVERITY_INFO = 6
|
||||
SEVERITY_DEBUG = 7
|
||||
|
||||
_MAX_HOSTNAME = 255
|
||||
_MAX_APPNAME = 48
|
||||
_MAX_MSGID = 32
|
||||
|
||||
# ─── Formatter ────────────────────────────────────────────────────────────────
|
||||
|
||||
def _sd_escape(value: str) -> str:
|
||||
"""Escape SD-PARAM-VALUE per RFC 5424 §6.3.3."""
|
||||
return value.replace("\\", "\\\\").replace('"', '\\"').replace("]", "\\]")
|
||||
|
||||
|
||||
def _sd_element(fields: dict[str, Any]) -> str:
|
||||
if not fields:
|
||||
return _NILVALUE
|
||||
params = " ".join(f'{k}="{_sd_escape(str(v))}"' for k, v in fields.items())
|
||||
return f"[{_SD_ID} {params}]"
|
||||
|
||||
|
||||
def syslog_line(
|
||||
service: str,
|
||||
hostname: str,
|
||||
event_type: str,
|
||||
severity: int = SEVERITY_INFO,
|
||||
timestamp: datetime | None = None,
|
||||
msg: str | None = None,
|
||||
**fields: Any,
|
||||
) -> str:
|
||||
"""
|
||||
Return a single RFC 5424-compliant syslog line (no trailing newline).
|
||||
|
||||
Args:
|
||||
service: APP-NAME (e.g. "http", "mysql")
|
||||
hostname: HOSTNAME (node name)
|
||||
event_type: MSGID (e.g. "request", "login_attempt")
|
||||
severity: Syslog severity integer (default: INFO=6)
|
||||
timestamp: UTC datetime; defaults to now
|
||||
msg: Optional free-text MSG
|
||||
**fields: Encoded as structured data params
|
||||
"""
|
||||
pri = f"<{_FACILITY_LOCAL0 * 8 + severity}>"
|
||||
ts = (timestamp or datetime.now(timezone.utc)).isoformat()
|
||||
host = (hostname or _NILVALUE)[:_MAX_HOSTNAME]
|
||||
appname = (service or _NILVALUE)[:_MAX_APPNAME]
|
||||
msgid = (event_type or _NILVALUE)[:_MAX_MSGID]
|
||||
sd = _sd_element(fields)
|
||||
message = f" {msg}" if msg else ""
|
||||
return f"{pri}1 {ts} {host} {appname} {_NILVALUE} {msgid} {sd}{message}"
|
||||
|
||||
|
||||
def write_syslog_file(line: str) -> None:
|
||||
"""Emit a syslog line to stdout for container log capture."""
|
||||
print(line, flush=True)
|
||||
|
||||
|
||||
def forward_syslog(line: str, log_target: str) -> None:
|
||||
"""No-op stub. TCP forwarding is handled by rsyslog, not by service containers."""
|
||||
pass
|
||||
Reference in New Issue
Block a user