Compare commits
237 Commits
v0.1.0
...
49f3002c94
| Author | SHA1 | Date | |
|---|---|---|---|
| 49f3002c94 | |||
| 9b59f8672e | |||
| 296979003d | |||
| 89099b903d | |||
| 29578d9d99 | |||
| 70d8ffc607 | |||
| 04db13afae | |||
| d1a88e75bd | |||
| 65ddb0b359 | |||
| b437bc8eec | |||
| a1ca5d699b | |||
| e9d151734d | |||
| 0ab97d0ade | |||
| 60de16be84 | |||
| 82ec7f3117 | |||
| 11d749f13d | |||
| a4798946c1 | |||
| d869eb3d23 | |||
| 89887ec6fd | |||
| 02e73a19d5 | |||
| b3efd646f6 | |||
| 2ec64ef2ef | |||
| e67624452e | |||
| e05b632e56 | |||
| c8f05df4d9 | |||
| 935a9a58d2 | |||
| 63efe6c7ba | |||
| 314e6c6388 | |||
| 12aa98a83c | |||
| 7dbc71d664 | |||
| dae3687089 | |||
| 187194786f | |||
| 9de320421e | |||
| dd4e2aad91 | |||
| 7d10b78d50 | |||
| ddfb232590 | |||
| d7da3a7fc7 | |||
| 947efe7bd1 | |||
| c603531fd2 | |||
| a78126b1ba | |||
| 0ee23b8700 | |||
| 0952a0b71e | |||
| 4683274021 | |||
| ab187f70a1 | |||
| 172a002d41 | |||
| f6cb90ee66 | |||
| 2d65d74069 | |||
| d5eb60cb41 | |||
| 47f2da1d50 | |||
| 53fdeee208 | |||
| a2ba7a7f3c | |||
| 3eab6e8773 | |||
| 5a7ff285cd | |||
| 1d73957832 | |||
| c2eceb147d | |||
| 09d9c0ec74 | |||
| 2dcf47985e | |||
| 5585e4ec58 | |||
| ce2699455b | |||
| df3f04c10e | |||
| 7ff5703250 | |||
| a6c7cfdf66 | |||
| 7ecb126c8e | |||
| f3bb0b31ae | |||
| 8c249f6987 | |||
| 24e0d98425 | |||
| 7756747787 | |||
| e312e072e4 | |||
| 5631d09aa8 | |||
| c2f7622fbb | |||
| 8335c5dc4c | |||
| b71db65149 | |||
| fd62413935 | |||
| ea340065c6 | |||
| a022b4fed6 | |||
| 3dc5b509f6 | |||
| c9be447a38 | |||
| 62db686b42 | |||
| 57d395d6d7 | |||
| ac094965b5 | |||
| 435c004760 | |||
| 89a2132c61 | |||
| 3d01ca2c2a | |||
| 8124424e96 | |||
| a4da9b8f32 | |||
| 448cb9cee0 | |||
| 035499f255 | |||
| 0706919469 | |||
| f2cc585d72 | |||
| 89abb6ecc6 | |||
| 03f5a7826f | |||
| a5eaa3291e | |||
| b2e4706a14 | |||
| 6095d0d2ed | |||
| 04685ba1c4 | |||
| 2ce3f7ee90 | |||
| cb4bac4b42 | |||
| 8d5944f775 | |||
| ea9f7e734b | |||
| fe18575a9c | |||
| 0f63820ee6 | |||
| fdc404760f | |||
| 95190946e0 | |||
| 1692df7360 | |||
| aac39e818e | |||
| ff38d58508 | |||
| f78104e1c8 | |||
| 99be4e64ad | |||
| c3c1cd2fa6 | |||
| 68b13b8a59 | |||
| f8bb134d70 | |||
| 20fba18711 | |||
| b325fc8c5f | |||
| 1484d2f625 | |||
| f8ae9ce2a6 | |||
| 662a5e43e8 | |||
| d63e396410 | |||
| 65d585569b | |||
| c384a3103a | |||
| c79f96f321 | |||
| d77def64c4 | |||
| ce182652ad | |||
| a6063efbb9 | |||
| d4ac53c0c9 | |||
| 9ca3b4691d | |||
| babad5ce65 | |||
| 7abae5571a | |||
| 377ba0410c | |||
| 5ef48d60be | |||
| fe46b8fc0b | |||
| c7713c6228 | |||
| 1196363d0b | |||
| 62a67f3d1d | |||
| 6df2c9ccbf | |||
| b1f6c3b84a | |||
| 5fdfe67f2f | |||
| 4fac9570ec | |||
| 5e83c9e48d | |||
| d8457c57f3 | |||
| 38d37f862b | |||
| fa8b0f3cb5 | |||
| db425df6f2 | |||
| 73e68388c0 | |||
| 682322d564 | |||
| 33885a2eec | |||
| f583b3d699 | |||
| 5cb6666d7b | |||
| 25b6425496 | |||
| 08242a4d84 | |||
| 63fb477e1f | |||
| 94f82c9089 | |||
| 40cd582253 | |||
| 24f02c3466 | |||
| 25ba3fb56a | |||
| 8d023147cc | |||
| 14f7a535db | |||
| cea6279a08 | |||
| 6b8392102e | |||
| d2a569496d | |||
| f20e86826d | |||
| 29da2a75b3 | |||
| 3362325479 | |||
| 34a57d6f09 | |||
| 016115a523 | |||
| 0166d0d559 | |||
| dbf6d13b95 | |||
| d15c106b44 | |||
| 6fc1a2a3ea | |||
| de84cc664f | |||
| 1541b4b7e0 | |||
| 2b7d872ab7 | |||
| 4ae6f4f23d | |||
| 310c2a1fbe | |||
| 44de453bb2 | |||
| ec66e01f55 | |||
| a22f996027 | |||
| b6b046c90b | |||
| 29a2cf2738 | |||
| 551664bc43 | |||
| a2d07bd67c | |||
| a3b92d4dd6 | |||
| 30edf9a55d | |||
| 69626d705d | |||
| 0f86f883fe | |||
| 13f3d15a36 | |||
| 8c7ec2953e | |||
| 0123e1c69e | |||
| 9dc6ff3887 | |||
| fe25798425 | |||
| 6c2478ede3 | |||
| 532a4e2dc5 | |||
| ec503b9ec6 | |||
| fe6b349e5e | |||
| 65b220fdbe | |||
| 6f10e7556f | |||
| fc99375c62 | |||
| 6bdb5922fa | |||
| 32b06afef6 | |||
| 31e0c5151b | |||
| cc3d434c02 | |||
| 1b5d366b38 | |||
| 168ecf14ab | |||
| db9a2699b9 | |||
| d139729fa2 | |||
| dd363629ab | |||
| c544964f57 | |||
| 6e19848723 | |||
| e24da92e0f | |||
| 47f0e6da8f | |||
| 18de381a43 | |||
| 1f5c6604d6 | |||
| a9c7ddec2b | |||
| eb4be44c9a | |||
| 1a2ad27eca | |||
| b1f09b9c6a | |||
| 3656a89d60 | |||
| ba2faba5d5 | |||
| 950280a97b | |||
| 7bc8d75242 | |||
| 5f637b5272 | |||
| 6ed92d080f | |||
| 1b593920cd | |||
| bad90dfb75 | |||
| 05e71f6d2e | |||
| 52c26a2891 | |||
| 81135cb861 | |||
| 50e53120df | |||
| 697929a127 | |||
| b46934db46 | |||
| 5b990743db | |||
| fbb16a960c | |||
| c32ad82d0a | |||
| 850a6f2ad7 | |||
| d344e4c8bb | |||
| f8a9f8fc64 | |||
| a428410c8e | |||
| e5a6c2d9a7 |
12
.env.example
Normal file
12
.env.example
Normal file
@@ -0,0 +1,12 @@
|
||||
# API Options
|
||||
DECNET_API_HOST=0.0.0.0
|
||||
DECNET_API_PORT=8000
|
||||
DECNET_JWT_SECRET=supersecretkey12345678901234567
|
||||
DECNET_INGEST_LOG_FILE=/var/log/decnet/decnet.log
|
||||
|
||||
# Web Dashboard Options
|
||||
DECNET_WEB_HOST=0.0.0.0
|
||||
DECNET_WEB_PORT=8080
|
||||
DECNET_ADMIN_USER=admin
|
||||
DECNET_ADMIN_PASSWORD=admin
|
||||
DECNET_DEVELOPER=False
|
||||
@@ -2,7 +2,10 @@ name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [dev, testing]
|
||||
branches: [dev, testing, "temp/merge-*"]
|
||||
paths-ignore:
|
||||
- "**/*.md"
|
||||
- "docs/**"
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
@@ -16,20 +19,6 @@ jobs:
|
||||
- run: pip install ruff
|
||||
- run: ruff check .
|
||||
|
||||
test:
|
||||
name: Test (pytest)
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.11", "3.12"]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- run: pip install -e .
|
||||
- run: pytest tests/ -v --tb=short
|
||||
|
||||
bandit:
|
||||
name: SAST (bandit)
|
||||
runs-on: ubuntu-latest
|
||||
@@ -50,37 +39,116 @@ jobs:
|
||||
with:
|
||||
python-version: "3.11"
|
||||
- run: pip install pip-audit
|
||||
- run: pip install -e .
|
||||
- run: pip install -e .[dev]
|
||||
- run: pip-audit --skip-editable
|
||||
|
||||
open-pr:
|
||||
name: Open PR to main
|
||||
test-standard:
|
||||
name: Test (Standard)
|
||||
runs-on: ubuntu-latest
|
||||
needs: [lint, test, bandit, pip-audit]
|
||||
needs: [lint, bandit, pip-audit]
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.11", "3.12"]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- run: pip install -e .[dev]
|
||||
- run: pytest
|
||||
|
||||
test-live:
|
||||
name: Test (Live)
|
||||
runs-on: ubuntu-latest
|
||||
needs: [test-standard]
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.11"]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- run: pip install -e .[dev]
|
||||
- run: pytest -m live
|
||||
|
||||
test-fuzz:
|
||||
name: Test (Fuzz)
|
||||
runs-on: ubuntu-latest
|
||||
needs: [test-live]
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.11"]
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- run: pip install -e .[dev]
|
||||
- run: pytest -m fuzz
|
||||
|
||||
merge-to-testing:
|
||||
name: Merge dev → testing
|
||||
runs-on: ubuntu-latest
|
||||
needs: [test-standard, test-live, test-fuzz]
|
||||
if: github.ref == 'refs/heads/dev'
|
||||
steps:
|
||||
- name: Open PR via Gitea API
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.DECNET_PR_TOKEN }}
|
||||
- name: Configure git
|
||||
run: |
|
||||
echo "--- Checking for existing open PRs ---"
|
||||
LIST_RESPONSE=$(curl -s \
|
||||
-H "Authorization: token ${{ secrets.DECNET_PR_TOKEN }}" \
|
||||
"https://git.resacachile.cl/api/v1/repos/anti/DECNET/pulls?state=open&head=anti:dev&base=main&limit=5")
|
||||
echo "$LIST_RESPONSE"
|
||||
EXISTING=$(echo "$LIST_RESPONSE" | python3 -c "import sys, json; print(len(json.load(sys.stdin)))")
|
||||
echo "Open PRs found: $EXISTING"
|
||||
if [ "$EXISTING" -gt "0" ]; then
|
||||
echo "PR already open, skipping."
|
||||
exit 0
|
||||
fi
|
||||
echo "--- Creating PR ---"
|
||||
CREATE_RESPONSE=$(curl -s -X POST \
|
||||
-H "Authorization: token ${{ secrets.DECNET_PR_TOKEN }}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"title": "Auto PR: dev → main",
|
||||
"head": "dev",
|
||||
"base": "main",
|
||||
"body": "All CI and security checks passed. Review and merge when ready."
|
||||
}' \
|
||||
"https://git.resacachile.cl/api/v1/repos/anti/DECNET/pulls")
|
||||
echo "$CREATE_RESPONSE"
|
||||
git config user.name "DECNET CI"
|
||||
git config user.email "ci@decnet.local"
|
||||
- name: Merge dev into testing
|
||||
run: |
|
||||
git fetch origin testing
|
||||
git checkout testing
|
||||
git merge origin/dev --no-ff -m "ci: auto-merge dev → testing [skip ci]"
|
||||
git push origin testing
|
||||
|
||||
prepare-merge-to-main:
|
||||
name: Prepare Merge to Main
|
||||
runs-on: ubuntu-latest
|
||||
needs: [test-standard, test-live, test-fuzz]
|
||||
if: github.ref == 'refs/heads/testing'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.DECNET_PR_TOKEN }}
|
||||
- name: Configure git
|
||||
run: |
|
||||
git config user.name "DECNET CI"
|
||||
git config user.email "ci@decnet.local"
|
||||
- name: Create temp branch and sync with main
|
||||
run: |
|
||||
git fetch origin main
|
||||
git checkout -b temp/merge-testing-to-main
|
||||
echo "--- Switched to temp branch, merging main into it ---"
|
||||
git merge origin/main --no-edit || { echo "CONFLICT: Manual resolution required"; exit 1; }
|
||||
git push origin temp/merge-testing-to-main --force
|
||||
|
||||
finalize-merge-to-main:
|
||||
name: Finalize Merge to Main
|
||||
runs-on: ubuntu-latest
|
||||
needs: [test-standard, test-live, test-fuzz]
|
||||
if: startsWith(github.ref, 'refs/heads/temp/merge-')
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.DECNET_PR_TOKEN }}
|
||||
- name: Configure git
|
||||
run: |
|
||||
git config user.name "DECNET CI"
|
||||
git config user.email "ci@decnet.local"
|
||||
- name: Merge RC into main
|
||||
run: |
|
||||
git fetch origin main
|
||||
git checkout main
|
||||
git merge ${{ github.ref }} --no-ff -m "ci: auto-merge testing → main"
|
||||
git push origin main
|
||||
echo "--- Cleaning up temp branch ---"
|
||||
git push origin --delete ${{ github.ref_name }}
|
||||
|
||||
@@ -3,6 +3,9 @@ name: PR Gate
|
||||
on:
|
||||
pull_request:
|
||||
branches: [main]
|
||||
paths-ignore:
|
||||
- "**/*.md"
|
||||
- "docs/**"
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
@@ -27,5 +30,28 @@ jobs:
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- run: pip install -e .
|
||||
- run: pip install -e .[dev]
|
||||
- run: pytest tests/ -v --tb=short
|
||||
|
||||
bandit:
|
||||
name: SAST (bandit)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
- run: pip install bandit
|
||||
- run: bandit -r decnet/ -ll -x decnet/services/registry.py
|
||||
|
||||
pip-audit:
|
||||
name: Dependency audit (pip-audit)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
- run: pip install pip-audit
|
||||
- run: pip install -e .[dev]
|
||||
- run: pip-audit --skip-editable
|
||||
|
||||
@@ -3,6 +3,9 @@ name: Release
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
paths-ignore:
|
||||
- "**/*.md"
|
||||
- "docs/**"
|
||||
|
||||
env:
|
||||
REGISTRY: git.resacachile.cl
|
||||
@@ -19,27 +22,42 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.DECNET_PR_TOKEN }}
|
||||
|
||||
- name: Extract version from pyproject.toml
|
||||
- name: Configure git
|
||||
run: |
|
||||
git config user.name "DECNET CI"
|
||||
git config user.email "ci@decnet.local"
|
||||
|
||||
- name: Bump version and Tag
|
||||
id: version
|
||||
run: |
|
||||
VERSION=$(python3 -c "import tomllib; f=open('pyproject.toml','rb'); d=tomllib.load(f); print(d['project']['version'])")
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
# Calculate next version (v0.x)
|
||||
LATEST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0")
|
||||
NEXT_VER=$(python3 -c "
|
||||
tag = '$LATEST_TAG'.lstrip('v')
|
||||
parts = tag.split('.')
|
||||
major = int(parts[0]) if parts[0] else 0
|
||||
minor = int(parts[1]) if len(parts) > 1 else 0
|
||||
print(f'{major}.{minor + 1}.0')
|
||||
")
|
||||
|
||||
- name: Create tag if not exists
|
||||
id: tag
|
||||
run: |
|
||||
VERSION=${{ steps.version.outputs.version }}
|
||||
if git rev-parse "v$VERSION" >/dev/null 2>&1; then
|
||||
echo "Tag v$VERSION already exists, skipping."
|
||||
echo "created=false" >> $GITHUB_OUTPUT
|
||||
else
|
||||
git config user.name "gitea-actions"
|
||||
git config user.email "actions@git.resacachile.cl"
|
||||
git tag -a "v$VERSION" -m "Release v$VERSION"
|
||||
git push origin "v$VERSION"
|
||||
echo "Next version: $NEXT_VER (calculated from $LATEST_TAG)"
|
||||
|
||||
# Update pyproject.toml
|
||||
sed -i "s/^version = \".*\"/version = \"$NEXT_VER\"/" pyproject.toml
|
||||
|
||||
git add pyproject.toml
|
||||
git commit -m "chore: auto-release v$NEXT_VER [skip ci]" || echo "No changes to commit"
|
||||
CHANGELOG=$(git log ${LATEST_TAG}..HEAD --oneline --no-decorate --no-merges)
|
||||
git tag -a "v$NEXT_VER" -m "Auto-release v$NEXT_VER
|
||||
|
||||
Changes since $LATEST_TAG:
|
||||
$CHANGELOG"
|
||||
git push origin main --follow-tags
|
||||
|
||||
echo "version=$NEXT_VER" >> $GITHUB_OUTPUT
|
||||
echo "created=true" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
docker:
|
||||
name: Build, scan & push ${{ matrix.service }}
|
||||
@@ -49,7 +67,7 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
service:
|
||||
- cowrie
|
||||
- conpot
|
||||
- docker_api
|
||||
- elasticsearch
|
||||
- ftp
|
||||
@@ -66,11 +84,12 @@ jobs:
|
||||
- postgres
|
||||
- rdp
|
||||
- redis
|
||||
- real_ssh
|
||||
- sip
|
||||
- smb
|
||||
- smtp
|
||||
- snmp
|
||||
- ssh
|
||||
- telnet
|
||||
- tftp
|
||||
- vnc
|
||||
steps:
|
||||
@@ -96,13 +115,13 @@ jobs:
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
- name: Install Trivy
|
||||
run: |
|
||||
curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /usr/local/bin
|
||||
|
||||
- name: Scan with Trivy
|
||||
uses: aquasecurity/trivy-action@master
|
||||
with:
|
||||
image-ref: decnet-${{ matrix.service }}:scan
|
||||
exit-code: "1"
|
||||
severity: CRITICAL
|
||||
ignore-unfixed: true
|
||||
run: |
|
||||
trivy image --exit-code 1 --severity CRITICAL --ignore-unfixed decnet-${{ matrix.service }}:scan
|
||||
|
||||
- name: Push image
|
||||
if: success()
|
||||
|
||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -1,4 +1,6 @@
|
||||
.venv/
|
||||
logs/
|
||||
.claude/
|
||||
__pycache__/
|
||||
*.pyc
|
||||
*.pyo
|
||||
@@ -8,11 +10,15 @@ build/
|
||||
decnet-compose.yml
|
||||
decnet-state.json
|
||||
*.ini
|
||||
.env
|
||||
decnet.log*
|
||||
*.loggy
|
||||
*.nmap
|
||||
linterfails.log
|
||||
test-scan
|
||||
webmail
|
||||
windows1
|
||||
*.db
|
||||
decnet.json
|
||||
.env*
|
||||
.env.local
|
||||
.coverage
|
||||
.hypothesis/
|
||||
|
||||
@@ -46,6 +46,7 @@ DECNET is a honeypot/deception network framework. It deploys fake machines (call
|
||||
- The logging/aggregation network must be isolated from the decoy network.
|
||||
- A publicly accessible real server acts as the bridge between the two networks.
|
||||
- Deckies should differ in exposed services and OS fingerprints to appear as a heterogeneous network.
|
||||
- **IMPORTANT**: The system now strictly enforces dependency injection for storage. Do not import `SQLiteRepository` directly in new features; instead, use `get_repository()` from the factory or the FastAPI `get_repo` dependency.
|
||||
|
||||
## Development and testing
|
||||
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
CI/CD TEST 2
|
||||
104
GEMINI.md
Normal file
104
GEMINI.md
Normal file
@@ -0,0 +1,104 @@
|
||||
# DECNET (Deception Network) Project Context
|
||||
|
||||
DECNET is a high-fidelity honeypot framework designed to deploy heterogeneous fleets of fake machines (called **deckies**) that appear as real hosts on a local network.
|
||||
|
||||
## Project Overview
|
||||
|
||||
- **Core Purpose:** To lure, profile, and log attacker interactions within a controlled, deceptive environment.
|
||||
- **Key Technology:** Linux-native container networking (MACVLAN/IPvlan) combined with Docker to give each decoy its own MAC address, IP, and realistic TCP/IP stack behavior.
|
||||
- **Main Components:**
|
||||
- **Deckies:** Group of containers sharing a network namespace (one base container + multiple service containers).
|
||||
- **Archetypes:** Pre-defined machine profiles (e.g., `windows-workstation`, `linux-server`) that bundle services and OS fingerprints.
|
||||
- **Services:** Modular honeypot plugins (SSH, SMB, RDP, etc.) built as `BaseService` subclasses.
|
||||
- **OS Fingerprinting:** Sysctl-based TCP/IP stack tuning to spoof OS detection (nmap).
|
||||
- **Logging Pipeline:** RFC 5424 syslog forwarding to an isolated SIEM/ELK stack.
|
||||
|
||||
## Technical Stack
|
||||
|
||||
- **Language:** Python 3.11+
|
||||
- **CLI Framework:** [Typer](https://typer.tiangolo.com/)
|
||||
- **Data Validation:** [Pydantic v2](https://docs.pydantic.dev/)
|
||||
- **Orchestration:** Docker Engine 24+ (via Docker SDK for Python)
|
||||
- **Networking:** MACVLAN (default) or IPvlan L2 (for WiFi/restricted environments).
|
||||
- **Testing:** Pytest (100% pass requirement).
|
||||
- **Formatting/Linting:** Ruff, Bandit (SAST), pip-audit.
|
||||
|
||||
## Architecture
|
||||
|
||||
```text
|
||||
Host NIC (eth0)
|
||||
└── MACVLAN Bridge
|
||||
├── Decky-01 (192.168.1.10) -> [Base] + [SSH] + [HTTP]
|
||||
├── Decky-02 (192.168.1.11) -> [Base] + [SMB] + [RDP]
|
||||
└── ...
|
||||
```
|
||||
|
||||
- **Base Container:** Owns the IP/MAC, sets `sysctls` for OS spoofing, and runs `sleep infinity`.
|
||||
- **Service Containers:** Use `network_mode: service:<base>` to share the identity and networking of the base container.
|
||||
- **Isolation:** Decoy traffic is strictly separated from the logging network.
|
||||
|
||||
## Key Commands
|
||||
|
||||
### Development & Maintenance
|
||||
- **Install (Dev):**
|
||||
- `rm .venv -rf`
|
||||
- `python3 -m venv .venv`
|
||||
- `source .venv/bin/activate`
|
||||
- `pip install -e .`
|
||||
- **Run Tests:** `pytest` (Run before any commit)
|
||||
- **Linting:** `ruff check .`
|
||||
- **Security Scan:** `bandit -r decnet/`
|
||||
- **Web Git:** git.resacachile.cl (Gitea)
|
||||
|
||||
### CLI Usage
|
||||
- **List Services:** `decnet services`
|
||||
- **List Archetypes:** `decnet archetypes`
|
||||
- **Dry Run (Compose Gen):** `decnet deploy --deckies 3 --randomize-services --dry-run`
|
||||
- **Deploy (Full):** `sudo .venv/bin/decnet deploy --interface eth0 --deckies 5 --randomize-services`
|
||||
- **Status:** `decnet status`
|
||||
- **Teardown:** `sudo .venv/bin/decnet teardown --all`
|
||||
|
||||
## Development Conventions
|
||||
|
||||
- **Code Style:**
|
||||
- Strict adherence to Ruff/PEP8.
|
||||
- **Always use typed variables**. If any non-types variables are found, they must be corrected.
|
||||
- The correct way is `x: int = 1`, never `x : int = 1`.
|
||||
- If assignment is present, always use a space between the type and the equal sign `x: int = 1`.
|
||||
- **Never** use lowercase L (l), uppercase o (O) or uppercase i (i) in single-character names.
|
||||
- **Internal vars are to be declared with an underscore** (_internal_variable_name).
|
||||
- **Internal to internal vars are to be declared with double underscore** (__internal_variable_name).
|
||||
- Always use snake_case for code.
|
||||
- Always use PascalCase for classes and generics.
|
||||
- **Testing:** New features MUST include a `pytest` case. 100% test pass rate is mandatory before merging.
|
||||
- **Plugin System:**
|
||||
- New services go in `decnet/services/<name>.py`.
|
||||
- Subclass `decnet.services.base.BaseService`.
|
||||
- The registry uses auto-discovery; no manual registration required.
|
||||
- **Configuration:**
|
||||
- Use Pydantic models in `decnet/config.py` for any new settings.
|
||||
- INI file parsing is handled in `decnet/ini_loader.py`.
|
||||
- **State Management:**
|
||||
- Runtime state is persisted in `decnet-state.json`.
|
||||
- Do not modify this file manually.
|
||||
- **General Development Guidelines**:
|
||||
- **Never** commit broken code, or before running `pytest`s or `bandit` at the project level.
|
||||
- **No matter how small** the changes, they must be committed.
|
||||
- **If new features are addedd** new tests must be added, too.
|
||||
- **Never present broken code to the user**. Test, validate, then present.
|
||||
- **Extensive testing** for every function must be created.
|
||||
- **Always develop in the `dev` branch, never in `main`.**
|
||||
- **Test in the `testing` branch.**
|
||||
- **IMPORTANT**: The system now strictly enforces dependency injection for storage. Do not import `SQLiteRepository` directly in new features; instead, use `get_repository()` from the factory or the FastAPI `get_repo` dependency.
|
||||
|
||||
## Directory Structure
|
||||
|
||||
- `decnet/`: Main source code.
|
||||
- `services/`: Honeypot service implementations.
|
||||
- `logging/`: Syslog formatting and forwarding logic.
|
||||
- `correlation/`: (In Progress) Logic for grouping attacker events.
|
||||
- `templates/`: Dockerfiles and entrypoint scripts for services.
|
||||
- `tests/`: Pytest suite.
|
||||
- `pyproject.toml`: Dependency and entry point definitions.
|
||||
- `CLAUDE.md`: Claude-specific environment guidance.
|
||||
- `DEVELOPMENT.md`: Roadmap and TODOs.
|
||||
113
NOTES.md
113
NOTES.md
@@ -1,113 +0,0 @@
|
||||
# Initial steps
|
||||
|
||||
# Architecture
|
||||
|
||||
## DECNET-UNIHOST model
|
||||
|
||||
The unihost model is a mode in which DECNET deploys an _n_ amount of machines from a single one. This execution model lives in a decoy network which is accessible to an attacker from the outside.
|
||||
|
||||
Each decky (the son of the DECNET unihost) should have different services (RDP, SMB, SSH, FTP, etc) and all of them should communicate with an external, isolated network, which aggregates data and allows
|
||||
visualizations to be made. Think of the ELK stack. That data is then passed back via Logstash or other methods to a SIEM device or something else that may be beneficiated by this collected data.
|
||||
|
||||
## DECNET-MULTIHOST (SWARM) model
|
||||
|
||||
The SWARM model is similar to the UNIHOST model, but the difference is that instead of one real machine, we have n>1 machines. Same thought process really, but deployment may be different.
|
||||
A low cost option and fairly automatable one is the usage of Ansible, sshpass, or other tools.
|
||||
|
||||
# Modus operandi
|
||||
|
||||
## Docker-Compose
|
||||
|
||||
I will use Docker Compose extensively for this project. The reasons are:
|
||||
- Easily managed.
|
||||
- Easily extensible.
|
||||
- Less overhead.
|
||||
|
||||
To be completely transparent: I asked Deepseek to write the initial `docker-compose.yml` file. It was mostly boilerplate, and most of it mainly modified or deleted. It doesn't exist anymore.
|
||||
|
||||
## Distro to use.
|
||||
|
||||
I will be using the `debian:bookworm-slim` image for all the containers. I might think about mixing in there some Ubuntu or a Centos, but for now, Debian will do just fine.
|
||||
|
||||
The distro I'm running is WSL Kali Linux. Let's hope this doesn't cause any problems down the road.
|
||||
|
||||
## Networking
|
||||
|
||||
It was a hussle, but I think MACVLAN or IPVLAN (thanks @Deepseek!) might work. The reasoning behind picking this networking driver is that for the project to work, it requires having containers the entire container accessible from the network. This is to attempt to masquarede them as real, live machines.
|
||||
|
||||
Now, we will need a publicly accesible, real server that has access to this "internal" network. I'll try MACVLAN first.
|
||||
|
||||
### MACVLAN Tests
|
||||
|
||||
I will first use the default network to see what happens.
|
||||
|
||||
```
|
||||
docker network create -d macvlan \
|
||||
--subnet=192.168.1.0/24 \
|
||||
--gateway=192.168.1.1 \
|
||||
-o parent=eth0 localnet
|
||||
```
|
||||
|
||||
#### Issues
|
||||
|
||||
This initial test doesn't seem to be working. Might be that I'm using WSL, so I downloaded a Ubuntu 22.04 Server ISO. I'll try the MACVLAN network on it. Now, if that doesn't work, I don't see how the 802.1q would work, at least on _my network_. Perhaps if I had a switch I could make it work, but currently I don't have one :c
|
||||
|
||||
---
|
||||
|
||||
# TODO
|
||||
|
||||
## Core / Hardening
|
||||
|
||||
- [ ] **Attacker fingerprinting** — Beyond IP logging: capture TLS JA3/JA4 hashes, TCP window sizes, User-Agent strings, SSH client banners, and tool signatures (nmap, masscan, Metasploit, Cobalt Strike). Build attacker profiles across sessions.
|
||||
- [ ] **Canary tokens** — Embed canary URLs, fake AWS keys, fake API tokens, and honeydocs (PDF/DOCX with phone-home URLs) into decky filesystems. Fire an alert the moment one is used.
|
||||
- [ ] **Tarpit mode** — Slow down attackers by making services respond extremely slowly (e.g., SSH that takes 60s to reject, HTTP that drip-feeds bytes). Wastes attacker time and resources.
|
||||
- [ ] **Dynamic decky mutation** — Deckies that change their exposed services or OS fingerprint over time to confuse port-scan caching and appear more "alive."
|
||||
- [ ] **Credential harvesting DB** — Every username/password attempt across all services lands in a queryable database. Expose via CLI (`decnet creds`) and flag reuse across deckies.
|
||||
- [ ] **Session recording** — Full session capture for SSH/Telnet (keystroke logs, commands run, files downloaded). Cowrie already does this — surface it better in the CLI and correlation engine.
|
||||
- [ ] **Payload capture** — Store every file uploaded or command executed by an attacker. Hash and auto-submit to VirusTotal or a local sandbox.
|
||||
|
||||
## Detection & Intelligence
|
||||
|
||||
- [ ] **Real-time alerting** — Webhook/Slack/Telegram notifications when an attacker hits a decky for the first time, crosses N deckies (lateral movement), or uses a known bad IP.
|
||||
- [ ] **Threat intel enrichment** — Auto-lookup attacker IPs against AbuseIPDB, Shodan, GreyNoise, and AlienVault OTX. Tag known scanners vs. targeted attackers.
|
||||
- [ ] **Attack campaign clustering** — Group attacker sessions by tooling signatures, timing patterns, and credential sets. Identify coordinated campaigns hitting multiple deckies.
|
||||
- [ ] **GeoIP mapping** — Attacker origin on a world map. Correlate with ASN data to identify cloud exit nodes, VPNs, and Tor exits.
|
||||
- [ ] **TTPs tagging** — Map observed attacker behaviors to MITRE ATT&CK techniques automatically. Tag events in the correlation engine.
|
||||
- [ ] **Honeypot interaction scoring** — Score attackers on a scale: casual scanner vs. persistent targeted attacker, based on depth of interaction and commands run.
|
||||
|
||||
## Dashboard & Visibility
|
||||
|
||||
- [ ] **Web dashboard** — Real-time web UI showing live decky status, attacker activity, traversal graphs, and credential stats. Could be a simple FastAPI + HTMX or a full React app.
|
||||
- [ ] **Pre-built Kibana/Grafana dashboards** — Ship dashboard JSON exports out of the box so ELK/Grafana deployments are plug-and-play.
|
||||
- [ ] **CLI live feed** — `decnet watch` command: tail all decky logs in a unified, colored terminal stream (like `docker-compose logs -f` but prettier).
|
||||
- [ ] **Traversal graph export** — Export attacker traversal graphs as DOT/Graphviz or JSON for visualization in external tools.
|
||||
- [ ] **Daily digest** — Automated daily summary email/report: new attackers, top credentials tried, most-hit services.
|
||||
|
||||
## Deployment & Infrastructure
|
||||
|
||||
- [ ] **SWARM / multihost mode** — Full Ansible-based orchestration for deploying deckies across N real hosts.
|
||||
- [ ] **Terraform/Pulumi provider** — Spin up cloud-hosted deckies on AWS/GCP/Azure with one command. Useful for internet-facing honeynets.
|
||||
- [ ] **Auto-scaling** — When attack traffic increases, automatically spawn more deckies to absorb and log more activity.
|
||||
- [ ] **Kubernetes deployment mode** — Run deckies as Kubernetes pods for environments already running k8s.
|
||||
- [ ] **Proxmox/libvirt backend** — Full VM-based deckies instead of containers, for even more realistic OS fingerprints and behavior. Docker for speed; VMs for realism.
|
||||
- [ ] **Raspberry Pi / ARM support** — Low-cost physical honeynets using RPis. Validate ARM image builds.
|
||||
- [ ] **Decky health monitoring** — Watchdog that auto-restarts crashed deckies and alerts if a service goes dark.
|
||||
|
||||
## Services & Realism
|
||||
|
||||
- [ ] **HTTPS/TLS support** — HTTP honeypot with a self-signed or Let's Encrypt cert. Many real-world services use HTTPS; plain HTTP stands out.
|
||||
- [ ] **Fake Active Directory** — A convincing fake AD/LDAP with fake users, groups, and GPOs. Attacker tools like BloodHound should get juicy (fake) data.
|
||||
- [ ] **Fake file shares** — SMB/NFS shares pre-populated with enticing but fake files: "passwords.xlsx", "vpn_config.ovpn", "backup_keys.tar.gz". All instrumented to detect access.
|
||||
- [ ] **Realistic web apps** — HTTP honeypot serving convincing fake apps: a fake WordPress, a fake phpMyAdmin, a fake Grafana login — all logging every interaction.
|
||||
- [ ] **OT/ICS profiles** — Expand Conpot support: Modbus, DNP3, BACnet, EtherNet/IP. Convincing industrial control system decoys.
|
||||
- [ ] **Printer/IoT archetypes** — Expand existing printer/camera archetypes with actual service emulation (IPP, ONVIF, WS-Discovery).
|
||||
- [ ] **Service interaction depth** — Some services currently just log the connection. Deepen interaction: fake MySQL that accepts queries and returns realistic fake data, fake Redis that stores and retrieves dummy keys.
|
||||
|
||||
## Developer Experience
|
||||
|
||||
- [ ] **Plugin SDK docs** — Full documentation and an example plugin for adding custom services. Lower the barrier for community contributions.
|
||||
- [ ] **Integration tests** — Full deploy/teardown cycle tests against a real Docker daemon (not just unit tests).
|
||||
- [ ] **Per-service tests** — Each of the 29 service implementations deserves its own test coverage.
|
||||
- [ ] **CI/CD pipeline** — GitHub/Gitea Actions: run tests on push, lint, build Docker images, publish releases.
|
||||
- [ ] **Config validation CLI** — `decnet validate my.ini` to dry-check an INI config before deploying.
|
||||
- [ ] **Config generator wizard** — `decnet wizard` interactive prompt to generate an INI config without writing one by hand.
|
||||
54
README.md
54
README.md
@@ -69,7 +69,7 @@ From the outside a decky looks identical to a real machine: it has its own MAC a
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
git clone <repo-url> DECNET
|
||||
git clone https://git.resacachile.cl/anti/DECNET
|
||||
cd DECNET
|
||||
pip install -e .
|
||||
```
|
||||
@@ -207,6 +207,26 @@ sudo decnet deploy --deckies 4 --archetype windows-workstation
|
||||
[corp-workstations]
|
||||
archetype = windows-workstation
|
||||
amount = 4
|
||||
|
||||
[win-fileserver]
|
||||
services = ftp
|
||||
nmap_os = windows
|
||||
os_version = Windows Server 2019
|
||||
|
||||
[dbsrv01]
|
||||
ip = 192.168.1.112
|
||||
services = mysql, http
|
||||
nmap_os = linux
|
||||
|
||||
[dbsrv01.http]
|
||||
server_header = Apache/2.4.54 (Debian)
|
||||
response_code = 200
|
||||
fake_app = wordpress
|
||||
|
||||
[dbsrv01.mysql]
|
||||
mysql_version = 5.7.38-log
|
||||
mysql_banner = MySQL Community Server
|
||||
|
||||
```
|
||||
|
||||
---
|
||||
@@ -454,7 +474,7 @@ Key/value pairs are passed directly to the service plugin as persona config. Com
|
||||
| `mongodb` | `mongo_version` |
|
||||
| `elasticsearch` | `es_version`, `cluster_name` |
|
||||
| `ldap` | `base_dn`, `domain` |
|
||||
| `snmp` | `snmp_community`, `sys_descr` |
|
||||
| `snmp` | `snmp_community`, `sys_descr`, `snmp_archetype` (picks predefined sysDescr for `water_plant`, `hospital`, etc.) |
|
||||
| `mqtt` | `mqtt_version` |
|
||||
| `sip` | `sip_server`, `sip_domain` |
|
||||
| `k8s` | `k8s_version` |
|
||||
@@ -470,6 +490,30 @@ See [`test-full.ini`](test-full.ini) — covers all 25 services across 10 role-t
|
||||
|
||||
---
|
||||
|
||||
## Environment Configuration (.env)
|
||||
|
||||
DECNET supports loading configuration from `.env.local` and `.env` files located in the project root. This is useful for securing secrets like the JWT key and configuring default ports without passing flags every time.
|
||||
|
||||
An example `.env.example` is provided:
|
||||
|
||||
```ini
|
||||
# API Options
|
||||
DECNET_API_HOST=0.0.0.0
|
||||
DECNET_API_PORT=8000
|
||||
DECNET_JWT_SECRET=supersecretkey12345
|
||||
DECNET_INGEST_LOG_FILE=/var/log/decnet/decnet.log
|
||||
|
||||
# Web Dashboard Options
|
||||
DECNET_WEB_HOST=0.0.0.0
|
||||
DECNET_WEB_PORT=8080
|
||||
DECNET_ADMIN_USER=admin
|
||||
DECNET_ADMIN_PASSWORD=admin
|
||||
```
|
||||
|
||||
Copy `.env.example` to `.env.local` and modify it to suit your environment.
|
||||
|
||||
---
|
||||
|
||||
## Logging
|
||||
|
||||
All attacker interactions are forwarded off the decoy network to an isolated logging sink. The log pipeline lives on a separate internal Docker bridge (`decnet_logs`) that is not reachable from the fake LAN.
|
||||
@@ -631,3 +675,9 @@ The test suite covers:
|
||||
| `test_cli_service_pool.py` | CLI service resolution |
|
||||
|
||||
Every new feature requires passing tests before merging.
|
||||
|
||||
# AI Disclosure
|
||||
|
||||
This project has been made with lots, and I mean lots of help from AIs. While most of the design was made by me, most of the coding was done by AI models.
|
||||
|
||||
Nevertheless, this project will be kept under high scrutiny by humans.
|
||||
|
||||
@@ -148,7 +148,7 @@ ARCHETYPES: dict[str, Archetype] = {
|
||||
slug="deaddeck",
|
||||
display_name="Deaddeck (Entry Point)",
|
||||
description="Internet-facing entry point with real interactive SSH — no honeypot emulation",
|
||||
services=["real_ssh"],
|
||||
services=["ssh"],
|
||||
preferred_distros=["debian", "ubuntu22"],
|
||||
nmap_os="linux",
|
||||
),
|
||||
@@ -167,4 +167,4 @@ def all_archetypes() -> dict[str, Archetype]:
|
||||
|
||||
|
||||
def random_archetype() -> Archetype:
|
||||
return random.choice(list(ARCHETYPES.values()))
|
||||
return random.choice(list(ARCHETYPES.values())) # nosec B311
|
||||
|
||||
869
decnet/cli.py
869
decnet/cli.py
@@ -8,24 +8,49 @@ Usage:
|
||||
decnet services
|
||||
"""
|
||||
|
||||
import random
|
||||
import signal
|
||||
from typing import Optional
|
||||
|
||||
import typer
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
|
||||
from decnet.logging import get_logger
|
||||
from decnet.env import (
|
||||
DECNET_API_HOST,
|
||||
DECNET_API_PORT,
|
||||
DECNET_INGEST_LOG_FILE,
|
||||
DECNET_WEB_HOST,
|
||||
DECNET_WEB_PORT,
|
||||
)
|
||||
from decnet.archetypes import Archetype, all_archetypes, get_archetype
|
||||
from decnet.config import (
|
||||
DeckyConfig,
|
||||
DecnetConfig,
|
||||
random_hostname,
|
||||
)
|
||||
from decnet.distros import all_distros, get_distro, random_distro
|
||||
from decnet.ini_loader import IniConfig, load_ini
|
||||
from decnet.distros import all_distros, get_distro
|
||||
from decnet.fleet import all_service_names, build_deckies, build_deckies_from_ini
|
||||
from decnet.ini_loader import load_ini
|
||||
from decnet.network import detect_interface, detect_subnet, allocate_ips, get_host_ip
|
||||
from decnet.services.registry import all_services
|
||||
|
||||
log = get_logger("cli")
|
||||
|
||||
|
||||
def _daemonize() -> None:
|
||||
"""Fork the current process into a background daemon (Unix double-fork)."""
|
||||
import os
|
||||
import sys
|
||||
|
||||
if os.fork() > 0:
|
||||
raise SystemExit(0)
|
||||
os.setsid()
|
||||
if os.fork() > 0:
|
||||
raise SystemExit(0)
|
||||
sys.stdout = open(os.devnull, "w") # noqa: SIM115
|
||||
sys.stderr = open(os.devnull, "w") # noqa: SIM115
|
||||
sys.stdin = open(os.devnull, "r") # noqa: SIM115
|
||||
|
||||
|
||||
app = typer.Typer(
|
||||
name="decnet",
|
||||
help="Deploy a deception network of honeypot deckies on your LAN.",
|
||||
@@ -33,167 +58,55 @@ app = typer.Typer(
|
||||
)
|
||||
console = Console()
|
||||
|
||||
def _all_service_names() -> list[str]:
|
||||
"""Return all registered service names from the live plugin registry."""
|
||||
return sorted(all_services().keys())
|
||||
|
||||
def _kill_all_services() -> None:
|
||||
"""Find and kill all running DECNET microservice processes."""
|
||||
import os
|
||||
|
||||
def _resolve_distros(
|
||||
distros_explicit: list[str] | None,
|
||||
randomize_distros: bool,
|
||||
n: int,
|
||||
archetype: Archetype | None = None,
|
||||
) -> list[str]:
|
||||
"""Return a list of n distro slugs based on CLI flags or archetype preference."""
|
||||
if distros_explicit:
|
||||
return [distros_explicit[i % len(distros_explicit)] for i in range(n)]
|
||||
if randomize_distros:
|
||||
return [random_distro().slug for _ in range(n)]
|
||||
if archetype:
|
||||
pool = archetype.preferred_distros
|
||||
return [pool[i % len(pool)] for i in range(n)]
|
||||
# Default: cycle through all distros to maximize heterogeneity
|
||||
slugs = list(all_distros().keys())
|
||||
return [slugs[i % len(slugs)] for i in range(n)]
|
||||
registry = _service_registry(str(DECNET_INGEST_LOG_FILE))
|
||||
killed = 0
|
||||
for name, match_fn, _launch_args in registry:
|
||||
pid = _is_running(match_fn)
|
||||
if pid is not None:
|
||||
console.print(f"[yellow]Stopping {name} (PID {pid})...[/]")
|
||||
os.kill(pid, signal.SIGTERM)
|
||||
killed += 1
|
||||
|
||||
|
||||
def _build_deckies(
|
||||
n: int,
|
||||
ips: list[str],
|
||||
services_explicit: list[str] | None,
|
||||
randomize_services: bool,
|
||||
distros_explicit: list[str] | None = None,
|
||||
randomize_distros: bool = False,
|
||||
archetype: Archetype | None = None,
|
||||
) -> list[DeckyConfig]:
|
||||
deckies = []
|
||||
used_combos: set[frozenset] = set()
|
||||
distro_slugs = _resolve_distros(distros_explicit, randomize_distros, n, archetype)
|
||||
|
||||
for i, ip in enumerate(ips):
|
||||
name = f"decky-{i + 1:02d}"
|
||||
distro = get_distro(distro_slugs[i])
|
||||
hostname = random_hostname(distro.slug)
|
||||
|
||||
if services_explicit:
|
||||
svc_list = services_explicit
|
||||
elif archetype:
|
||||
svc_list = list(archetype.services)
|
||||
elif randomize_services:
|
||||
svc_pool = _all_service_names()
|
||||
attempts = 0
|
||||
while True:
|
||||
count = random.randint(1, min(3, len(svc_pool)))
|
||||
chosen = frozenset(random.sample(svc_pool, count))
|
||||
attempts += 1
|
||||
if chosen not in used_combos or attempts > 20:
|
||||
break
|
||||
svc_list = list(chosen)
|
||||
used_combos.add(chosen)
|
||||
if killed:
|
||||
console.print(f"[green]{killed} background process(es) stopped.[/]")
|
||||
else:
|
||||
typer.echo("Error: provide --services, --archetype, or --randomize-services.", err=True)
|
||||
raise typer.Exit(1)
|
||||
|
||||
deckies.append(
|
||||
DeckyConfig(
|
||||
name=name,
|
||||
ip=ip,
|
||||
services=svc_list,
|
||||
distro=distro.slug,
|
||||
base_image=distro.image,
|
||||
build_base=distro.build_base,
|
||||
hostname=hostname,
|
||||
archetype=archetype.slug if archetype else None,
|
||||
nmap_os=archetype.nmap_os if archetype else "linux",
|
||||
)
|
||||
)
|
||||
return deckies
|
||||
console.print("[dim]No DECNET services were running.[/]")
|
||||
|
||||
|
||||
def _build_deckies_from_ini(
|
||||
ini: IniConfig,
|
||||
subnet_cidr: str,
|
||||
gateway: str,
|
||||
host_ip: str,
|
||||
randomize: bool,
|
||||
) -> list[DeckyConfig]:
|
||||
"""Build DeckyConfig list from an IniConfig, auto-allocating missing IPs."""
|
||||
from ipaddress import IPv4Address, IPv4Network
|
||||
@app.command()
|
||||
def api(
|
||||
port: int = typer.Option(DECNET_API_PORT, "--port", help="Port for the backend API"),
|
||||
host: str = typer.Option(DECNET_API_HOST, "--host", help="Host IP for the backend API"),
|
||||
log_file: str = typer.Option(DECNET_INGEST_LOG_FILE, "--log-file", help="Path to the DECNET log file to monitor"),
|
||||
daemon: bool = typer.Option(False, "--daemon", "-d", help="Detach to background as a daemon process"),
|
||||
) -> None:
|
||||
"""Run the DECNET API and Web Dashboard in standalone mode."""
|
||||
import subprocess # nosec B404
|
||||
import sys
|
||||
import os
|
||||
|
||||
explicit_ips: set[IPv4Address] = {
|
||||
IPv4Address(s.ip) for s in ini.deckies if s.ip
|
||||
}
|
||||
if daemon:
|
||||
log.info("API daemonizing host=%s port=%d", host, port)
|
||||
_daemonize()
|
||||
|
||||
net = IPv4Network(subnet_cidr, strict=False)
|
||||
reserved = {
|
||||
net.network_address,
|
||||
net.broadcast_address,
|
||||
IPv4Address(gateway),
|
||||
IPv4Address(host_ip),
|
||||
} | explicit_ips
|
||||
|
||||
auto_pool = (str(addr) for addr in net.hosts() if addr not in reserved)
|
||||
|
||||
deckies: list[DeckyConfig] = []
|
||||
for spec in ini.deckies:
|
||||
# Resolve archetype (if any) — explicit services/distro override it
|
||||
arch: Archetype | None = None
|
||||
if spec.archetype:
|
||||
log.info("API command invoked host=%s port=%d", host, port)
|
||||
console.print(f"[green]Starting DECNET API on {host}:{port}...[/]")
|
||||
_env: dict[str, str] = os.environ.copy()
|
||||
_env["DECNET_INGEST_LOG_FILE"] = str(log_file)
|
||||
try:
|
||||
arch = get_archetype(spec.archetype)
|
||||
except ValueError as e:
|
||||
console.print(f"[red]{e}[/]")
|
||||
raise typer.Exit(1)
|
||||
|
||||
# Distro: archetype preferred list → random → global cycle
|
||||
distro_pool = arch.preferred_distros if arch else list(all_distros().keys())
|
||||
distro = get_distro(distro_pool[len(deckies) % len(distro_pool)])
|
||||
hostname = random_hostname(distro.slug)
|
||||
|
||||
ip = spec.ip or next(auto_pool, None)
|
||||
if ip is None:
|
||||
raise RuntimeError(
|
||||
f"Not enough free IPs in {subnet_cidr} while assigning IP for '{spec.name}'."
|
||||
subprocess.run( # nosec B603 B404
|
||||
[sys.executable, "-m", "uvicorn", "decnet.web.api:app", "--host", host, "--port", str(port)],
|
||||
env=_env
|
||||
)
|
||||
|
||||
if spec.services:
|
||||
known = set(_all_service_names())
|
||||
unknown = [s for s in spec.services if s not in known]
|
||||
if unknown:
|
||||
console.print(
|
||||
f"[red]Unknown service(s) in [{spec.name}]: {unknown}. "
|
||||
f"Available: {_all_service_names()}[/]"
|
||||
)
|
||||
raise typer.Exit(1)
|
||||
svc_list = spec.services
|
||||
elif arch:
|
||||
svc_list = list(arch.services)
|
||||
elif randomize:
|
||||
svc_pool = _all_service_names()
|
||||
count = random.randint(1, min(3, len(svc_pool)))
|
||||
svc_list = random.sample(svc_pool, count)
|
||||
else:
|
||||
console.print(
|
||||
f"[red]Decky '[{spec.name}]' has no services= in config. "
|
||||
"Add services=, archetype=, or use --randomize-services.[/]"
|
||||
)
|
||||
raise typer.Exit(1)
|
||||
|
||||
# nmap_os priority: explicit INI key > archetype default > "linux"
|
||||
resolved_nmap_os = spec.nmap_os or (arch.nmap_os if arch else "linux")
|
||||
deckies.append(DeckyConfig(
|
||||
name=spec.name,
|
||||
ip=ip,
|
||||
services=svc_list,
|
||||
distro=distro.slug,
|
||||
base_image=distro.image,
|
||||
build_base=distro.build_base,
|
||||
hostname=hostname,
|
||||
archetype=arch.slug if arch else None,
|
||||
service_config=spec.service_config,
|
||||
nmap_os=resolved_nmap_os,
|
||||
))
|
||||
return deckies
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
except (FileNotFoundError, subprocess.SubprocessError):
|
||||
console.print("[red]Failed to start API. Ensure 'uvicorn' is installed in the current environment.[/]")
|
||||
|
||||
|
||||
@app.command()
|
||||
@@ -207,15 +120,26 @@ def deploy(
|
||||
randomize_services: bool = typer.Option(False, "--randomize-services", help="Assign random services to each decky"),
|
||||
distro: Optional[str] = typer.Option(None, "--distro", help="Comma-separated distro slugs, e.g. debian,ubuntu22,rocky9"),
|
||||
randomize_distros: bool = typer.Option(False, "--randomize-distros", help="Assign a random distro to each decky"),
|
||||
log_target: Optional[str] = typer.Option(None, "--log-target", help="Forward logs to ip:port (e.g. 192.168.1.5:5140)"),
|
||||
log_file: Optional[str] = typer.Option(None, "--log-file", help="Write RFC 5424 syslog to this path inside containers (e.g. /var/log/decnet/decnet.log)"),
|
||||
log_file: Optional[str] = typer.Option(DECNET_INGEST_LOG_FILE, "--log-file", help="Host path for the collector to write RFC 5424 logs (e.g. /var/log/decnet/decnet.log)"),
|
||||
archetype_name: Optional[str] = typer.Option(None, "--archetype", "-a", help="Machine archetype slug (e.g. linux-server, windows-workstation)"),
|
||||
mutate_interval: Optional[int] = typer.Option(30, "--mutate-interval", help="Automatically rotate services every N minutes"),
|
||||
dry_run: bool = typer.Option(False, "--dry-run", help="Generate compose file without starting containers"),
|
||||
no_cache: bool = typer.Option(False, "--no-cache", help="Force rebuild all images, ignoring Docker layer cache"),
|
||||
parallel: bool = typer.Option(False, "--parallel", help="Build all images concurrently (enables BuildKit, separates build from up)"),
|
||||
ipvlan: bool = typer.Option(False, "--ipvlan", help="Use IPvlan L2 instead of MACVLAN (required on WiFi interfaces)"),
|
||||
config_file: Optional[str] = typer.Option(None, "--config", "-c", help="Path to INI config file"),
|
||||
api: bool = typer.Option(False, "--api", help="Start the FastAPI backend to ingest and serve logs"),
|
||||
api_port: int = typer.Option(8000, "--api-port", help="Port for the backend API"),
|
||||
daemon: bool = typer.Option(False, "--daemon", help="Detach to background as a daemon process"),
|
||||
) -> None:
|
||||
"""Deploy deckies to the LAN."""
|
||||
import os
|
||||
|
||||
if daemon:
|
||||
log.info("deploy daemonizing mode=%s deckies=%s", mode, deckies)
|
||||
_daemonize()
|
||||
|
||||
log.info("deploy command invoked mode=%s deckies=%s dry_run=%s", mode, deckies, dry_run)
|
||||
if mode not in ("unihost", "swarm"):
|
||||
console.print("[red]--mode must be 'unihost' or 'swarm'[/]")
|
||||
raise typer.Exit(1)
|
||||
@@ -230,7 +154,6 @@ def deploy(
|
||||
console.print(f"[red]{e}[/]")
|
||||
raise typer.Exit(1)
|
||||
|
||||
# CLI flags override INI values when explicitly provided
|
||||
iface = interface or ini.interface or detect_interface()
|
||||
subnet_cidr = subnet or ini.subnet
|
||||
effective_gateway = ini.gateway
|
||||
@@ -244,7 +167,6 @@ def deploy(
|
||||
f"[dim]Subnet:[/] {subnet_cidr} [dim]Gateway:[/] {effective_gateway} "
|
||||
f"[dim]Host IP:[/] {host_ip}")
|
||||
|
||||
# Register bring-your-own services from INI before validation
|
||||
if ini.custom_services:
|
||||
from decnet.custom_service import CustomService
|
||||
from decnet.services.registry import register_custom_service
|
||||
@@ -258,11 +180,14 @@ def deploy(
|
||||
)
|
||||
)
|
||||
|
||||
effective_log_target = log_target or ini.log_target
|
||||
effective_log_file = log_file
|
||||
decky_configs = _build_deckies_from_ini(
|
||||
ini, subnet_cidr, effective_gateway, host_ip, randomize_services
|
||||
try:
|
||||
decky_configs = build_deckies_from_ini(
|
||||
ini, subnet_cidr, effective_gateway, host_ip, randomize_services, cli_mutate_interval=mutate_interval
|
||||
)
|
||||
except ValueError as e:
|
||||
console.print(f"[red]{e}[/]")
|
||||
raise typer.Exit(1)
|
||||
# ------------------------------------------------------------------ #
|
||||
# Classic CLI path #
|
||||
# ------------------------------------------------------------------ #
|
||||
@@ -273,13 +198,12 @@ def deploy(
|
||||
|
||||
services_list = [s.strip() for s in services.split(",")] if services else None
|
||||
if services_list:
|
||||
known = set(_all_service_names())
|
||||
known = set(all_service_names())
|
||||
unknown = [s for s in services_list if s not in known]
|
||||
if unknown:
|
||||
console.print(f"[red]Unknown service(s): {unknown}. Available: {_all_service_names()}[/]")
|
||||
console.print(f"[red]Unknown service(s): {unknown}. Available: {all_service_names()}[/]")
|
||||
raise typer.Exit(1)
|
||||
|
||||
# Resolve archetype if provided
|
||||
arch: Archetype | None = None
|
||||
if archetype_name:
|
||||
try:
|
||||
@@ -313,41 +237,329 @@ def deploy(
|
||||
raise typer.Exit(1)
|
||||
|
||||
ips = allocate_ips(subnet_cidr, effective_gateway, host_ip, deckies, ip_start)
|
||||
decky_configs = _build_deckies(
|
||||
decky_configs = build_deckies(
|
||||
deckies, ips, services_list, randomize_services,
|
||||
distros_explicit=distros_list, randomize_distros=randomize_distros,
|
||||
archetype=arch,
|
||||
archetype=arch, mutate_interval=mutate_interval,
|
||||
)
|
||||
effective_log_target = log_target
|
||||
effective_log_file = log_file
|
||||
|
||||
if api and not effective_log_file:
|
||||
effective_log_file = os.path.join(os.getcwd(), "decnet.log")
|
||||
console.print(f"[cyan]API mode enabled: defaulting log-file to {effective_log_file}[/]")
|
||||
|
||||
config = DecnetConfig(
|
||||
mode=mode,
|
||||
interface=iface,
|
||||
subnet=subnet_cidr,
|
||||
gateway=effective_gateway,
|
||||
deckies=decky_configs,
|
||||
log_target=effective_log_target,
|
||||
log_file=effective_log_file,
|
||||
ipvlan=ipvlan,
|
||||
mutate_interval=mutate_interval,
|
||||
)
|
||||
|
||||
if effective_log_target and not dry_run:
|
||||
from decnet.logging.forwarder import probe_log_target
|
||||
if not probe_log_target(effective_log_target):
|
||||
console.print(f"[yellow]Warning: log target {effective_log_target} is unreachable. "
|
||||
"Logs will be lost if it stays down.[/]")
|
||||
log.debug("deploy: config built deckies=%d interface=%s subnet=%s", len(config.deckies), config.interface, config.subnet)
|
||||
from decnet.engine import deploy as _deploy
|
||||
_deploy(config, dry_run=dry_run, no_cache=no_cache, parallel=parallel)
|
||||
if dry_run:
|
||||
log.info("deploy: dry-run complete, no containers started")
|
||||
else:
|
||||
log.info("deploy: deployment complete deckies=%d", len(config.deckies))
|
||||
|
||||
from decnet.deployer import deploy as _deploy
|
||||
_deploy(config, dry_run=dry_run, no_cache=no_cache)
|
||||
if mutate_interval is not None and not dry_run:
|
||||
import subprocess # nosec B404
|
||||
import sys
|
||||
console.print(f"[green]Starting DECNET Mutator watcher in the background (interval: {mutate_interval}m)...[/]")
|
||||
try:
|
||||
subprocess.Popen( # nosec B603
|
||||
[sys.executable, "-m", "decnet.cli", "mutate", "--watch"],
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.STDOUT,
|
||||
start_new_session=True,
|
||||
)
|
||||
except (FileNotFoundError, subprocess.SubprocessError):
|
||||
console.print("[red]Failed to start mutator watcher.[/]")
|
||||
|
||||
if effective_log_file and not dry_run and not api:
|
||||
import subprocess # nosec B404
|
||||
import sys
|
||||
from pathlib import Path as _Path
|
||||
_collector_err = _Path(effective_log_file).with_suffix(".collector.log")
|
||||
console.print(f"[bold cyan]Starting log collector[/] → {effective_log_file}")
|
||||
subprocess.Popen( # nosec B603
|
||||
[sys.executable, "-m", "decnet.cli", "collect", "--log-file", str(effective_log_file)],
|
||||
stdin=subprocess.DEVNULL,
|
||||
stdout=open(_collector_err, "a"),
|
||||
stderr=subprocess.STDOUT,
|
||||
start_new_session=True,
|
||||
)
|
||||
|
||||
if api and not dry_run:
|
||||
import subprocess # nosec B404
|
||||
import sys
|
||||
console.print(f"[green]Starting DECNET API on port {api_port}...[/]")
|
||||
_env: dict[str, str] = os.environ.copy()
|
||||
_env["DECNET_INGEST_LOG_FILE"] = str(effective_log_file or "")
|
||||
try:
|
||||
subprocess.Popen( # nosec B603
|
||||
[sys.executable, "-m", "uvicorn", "decnet.web.api:app", "--host", DECNET_API_HOST, "--port", str(api_port)],
|
||||
env=_env,
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.STDOUT
|
||||
)
|
||||
console.print(f"[dim]API running at http://{DECNET_API_HOST}:{api_port}[/]")
|
||||
except (FileNotFoundError, subprocess.SubprocessError):
|
||||
console.print("[red]Failed to start API. Ensure 'uvicorn' is installed in the current environment.[/]")
|
||||
|
||||
if effective_log_file and not dry_run:
|
||||
import subprocess # nosec B404
|
||||
import sys
|
||||
console.print("[bold cyan]Starting DECNET-PROBER[/] (auto-discovers attackers from log stream)")
|
||||
try:
|
||||
_prober_args = [
|
||||
sys.executable, "-m", "decnet.cli", "probe",
|
||||
"--daemon",
|
||||
"--log-file", str(effective_log_file),
|
||||
]
|
||||
subprocess.Popen( # nosec B603
|
||||
_prober_args,
|
||||
stdin=subprocess.DEVNULL,
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.STDOUT,
|
||||
start_new_session=True,
|
||||
)
|
||||
except (FileNotFoundError, subprocess.SubprocessError):
|
||||
console.print("[red]Failed to start DECNET-PROBER.[/]")
|
||||
|
||||
if effective_log_file and not dry_run:
|
||||
import subprocess # nosec B404
|
||||
import sys
|
||||
console.print("[bold cyan]Starting DECNET-PROFILER[/] (builds attacker profiles from log stream)")
|
||||
try:
|
||||
subprocess.Popen( # nosec B603
|
||||
[sys.executable, "-m", "decnet.cli", "profiler", "--daemon"],
|
||||
stdin=subprocess.DEVNULL,
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.STDOUT,
|
||||
start_new_session=True,
|
||||
)
|
||||
except (FileNotFoundError, subprocess.SubprocessError):
|
||||
console.print("[red]Failed to start DECNET-PROFILER.[/]")
|
||||
|
||||
if effective_log_file and not dry_run:
|
||||
import subprocess # nosec B404
|
||||
import sys
|
||||
console.print("[bold cyan]Starting DECNET-SNIFFER[/] (passive network capture)")
|
||||
try:
|
||||
subprocess.Popen( # nosec B603
|
||||
[sys.executable, "-m", "decnet.cli", "sniffer",
|
||||
"--daemon",
|
||||
"--log-file", str(effective_log_file)],
|
||||
stdin=subprocess.DEVNULL,
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.STDOUT,
|
||||
start_new_session=True,
|
||||
)
|
||||
except (FileNotFoundError, subprocess.SubprocessError):
|
||||
console.print("[red]Failed to start DECNET-SNIFFER.[/]")
|
||||
|
||||
|
||||
def _is_running(match_fn) -> int | None:
|
||||
"""Return PID of a running DECNET process matching ``match_fn(cmdline)``, or None."""
|
||||
import psutil
|
||||
|
||||
for proc in psutil.process_iter(["pid", "cmdline"]):
|
||||
try:
|
||||
cmd = proc.info["cmdline"]
|
||||
if cmd and match_fn(cmd):
|
||||
return proc.info["pid"]
|
||||
except (psutil.NoSuchProcess, psutil.AccessDenied):
|
||||
continue
|
||||
return None
|
||||
|
||||
|
||||
# Each entry: (display_name, detection_fn, launch_args_fn)
|
||||
# launch_args_fn receives log_file and returns the Popen argv list.
|
||||
def _service_registry(log_file: str) -> list[tuple[str, callable, list[str]]]:
|
||||
"""Return the microservice registry for health-check and relaunch."""
|
||||
import sys
|
||||
|
||||
_py = sys.executable
|
||||
return [
|
||||
(
|
||||
"Collector",
|
||||
lambda cmd: "decnet.cli" in cmd and "collect" in cmd,
|
||||
[_py, "-m", "decnet.cli", "collect", "--daemon", "--log-file", log_file],
|
||||
),
|
||||
(
|
||||
"Mutator",
|
||||
lambda cmd: "decnet.cli" in cmd and "mutate" in cmd and "--watch" in cmd,
|
||||
[_py, "-m", "decnet.cli", "mutate", "--daemon", "--watch"],
|
||||
),
|
||||
(
|
||||
"Prober",
|
||||
lambda cmd: "decnet.cli" in cmd and "probe" in cmd,
|
||||
[_py, "-m", "decnet.cli", "probe", "--daemon", "--log-file", log_file],
|
||||
),
|
||||
(
|
||||
"Profiler",
|
||||
lambda cmd: "decnet.cli" in cmd and "profiler" in cmd,
|
||||
[_py, "-m", "decnet.cli", "profiler", "--daemon"],
|
||||
),
|
||||
(
|
||||
"Sniffer",
|
||||
lambda cmd: "decnet.cli" in cmd and "sniffer" in cmd,
|
||||
[_py, "-m", "decnet.cli", "sniffer", "--daemon", "--log-file", log_file],
|
||||
),
|
||||
(
|
||||
"API",
|
||||
lambda cmd: "uvicorn" in cmd and "decnet.web.api:app" in cmd,
|
||||
[_py, "-m", "uvicorn", "decnet.web.api:app",
|
||||
"--host", DECNET_API_HOST, "--port", str(DECNET_API_PORT)],
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@app.command()
|
||||
def redeploy(
|
||||
log_file: str = typer.Option(DECNET_INGEST_LOG_FILE, "--log-file", "-f", help="Path to the DECNET log file"),
|
||||
) -> None:
|
||||
"""Check running DECNET services and relaunch any that are down."""
|
||||
import subprocess # nosec B404
|
||||
|
||||
log.info("redeploy: checking services")
|
||||
registry = _service_registry(str(log_file))
|
||||
|
||||
table = Table(title="DECNET Services", show_lines=True)
|
||||
table.add_column("Service", style="bold cyan")
|
||||
table.add_column("Status")
|
||||
table.add_column("PID", style="dim")
|
||||
table.add_column("Action")
|
||||
|
||||
relaunched = 0
|
||||
for name, match_fn, launch_args in registry:
|
||||
pid = _is_running(match_fn)
|
||||
if pid is not None:
|
||||
table.add_row(name, "[green]UP[/]", str(pid), "—")
|
||||
else:
|
||||
try:
|
||||
subprocess.Popen( # nosec B603
|
||||
launch_args,
|
||||
stdin=subprocess.DEVNULL,
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.STDOUT,
|
||||
start_new_session=True,
|
||||
)
|
||||
table.add_row(name, "[red]DOWN[/]", "—", "[green]relaunched[/]")
|
||||
relaunched += 1
|
||||
except (FileNotFoundError, subprocess.SubprocessError) as exc:
|
||||
table.add_row(name, "[red]DOWN[/]", "—", f"[red]failed: {exc}[/]")
|
||||
|
||||
console.print(table)
|
||||
if relaunched:
|
||||
console.print(f"[green]{relaunched} service(s) relaunched.[/]")
|
||||
else:
|
||||
console.print("[green]All services running.[/]")
|
||||
|
||||
|
||||
@app.command()
|
||||
def probe(
|
||||
log_file: str = typer.Option(DECNET_INGEST_LOG_FILE, "--log-file", "-f", help="Path for RFC 5424 syslog + .json output (reads attackers from .json, writes results to both)"),
|
||||
interval: int = typer.Option(300, "--interval", "-i", help="Seconds between probe cycles (default: 300)"),
|
||||
timeout: float = typer.Option(5.0, "--timeout", help="Per-probe TCP timeout in seconds"),
|
||||
daemon: bool = typer.Option(False, "--daemon", "-d", help="Detach to background (used by deploy, no console output)"),
|
||||
) -> None:
|
||||
"""Fingerprint attackers (JARM + HASSH + TCP/IP stack) discovered in the log stream."""
|
||||
import asyncio
|
||||
from decnet.prober import prober_worker
|
||||
|
||||
if daemon:
|
||||
log.info("probe daemonizing log_file=%s interval=%d", log_file, interval)
|
||||
_daemonize()
|
||||
asyncio.run(prober_worker(log_file, interval=interval, timeout=timeout))
|
||||
return
|
||||
|
||||
else:
|
||||
log.info("probe command invoked log_file=%s interval=%d", log_file, interval)
|
||||
console.print(f"[bold cyan]DECNET-PROBER[/] watching {log_file} for attackers (interval: {interval}s)")
|
||||
console.print("[dim]Press Ctrl+C to stop[/]")
|
||||
try:
|
||||
asyncio.run(prober_worker(log_file, interval=interval, timeout=timeout))
|
||||
except KeyboardInterrupt:
|
||||
console.print("\n[yellow]DECNET-PROBER stopped.[/]")
|
||||
|
||||
|
||||
@app.command()
|
||||
def collect(
|
||||
log_file: str = typer.Option(DECNET_INGEST_LOG_FILE, "--log-file", "-f", help="Path to write RFC 5424 syslog lines and .json records"),
|
||||
daemon: bool = typer.Option(False, "--daemon", "-d", help="Detach to background as a daemon process"),
|
||||
) -> None:
|
||||
"""Stream Docker logs from all running decky service containers to a log file."""
|
||||
import asyncio
|
||||
from decnet.collector import log_collector_worker
|
||||
|
||||
if daemon:
|
||||
log.info("collect daemonizing log_file=%s", log_file)
|
||||
_daemonize()
|
||||
|
||||
log.info("collect command invoked log_file=%s", log_file)
|
||||
console.print(f"[bold cyan]Collector starting[/] → {log_file}")
|
||||
asyncio.run(log_collector_worker(log_file))
|
||||
|
||||
|
||||
@app.command()
|
||||
def mutate(
|
||||
watch: bool = typer.Option(False, "--watch", "-w", help="Run continuously and mutate deckies according to their interval"),
|
||||
decky_name: Optional[str] = typer.Option(None, "--decky", help="Force mutate a specific decky immediately"),
|
||||
force_all: bool = typer.Option(False, "--all", help="Force mutate all deckies immediately"),
|
||||
daemon: bool = typer.Option(False, "--daemon", "-d", help="Detach to background as a daemon process"),
|
||||
) -> None:
|
||||
"""Manually trigger or continuously watch for decky mutation."""
|
||||
import asyncio
|
||||
from decnet.mutator import mutate_decky, mutate_all, run_watch_loop
|
||||
from decnet.web.dependencies import repo
|
||||
|
||||
if daemon:
|
||||
log.info("mutate daemonizing watch=%s", watch)
|
||||
_daemonize()
|
||||
|
||||
async def _run() -> None:
|
||||
await repo.initialize()
|
||||
if watch:
|
||||
await run_watch_loop(repo)
|
||||
elif decky_name:
|
||||
await mutate_decky(decky_name, repo)
|
||||
elif force_all:
|
||||
await mutate_all(force=True, repo=repo)
|
||||
else:
|
||||
await mutate_all(force=False, repo=repo)
|
||||
|
||||
asyncio.run(_run())
|
||||
|
||||
|
||||
@app.command()
|
||||
def status() -> None:
|
||||
"""Show running deckies and their status."""
|
||||
from decnet.deployer import status as _status
|
||||
log.info("status command invoked")
|
||||
from decnet.engine import status as _status
|
||||
_status()
|
||||
|
||||
registry = _service_registry(str(DECNET_INGEST_LOG_FILE))
|
||||
svc_table = Table(title="DECNET Services", show_lines=True)
|
||||
svc_table.add_column("Service", style="bold cyan")
|
||||
svc_table.add_column("Status")
|
||||
svc_table.add_column("PID", style="dim")
|
||||
|
||||
for name, match_fn, _launch_args in registry:
|
||||
pid = _is_running(match_fn)
|
||||
if pid is not None:
|
||||
svc_table.add_row(name, "[green]UP[/]", str(pid))
|
||||
else:
|
||||
svc_table.add_row(name, "[red]DOWN[/]", "—")
|
||||
|
||||
console.print(svc_table)
|
||||
|
||||
|
||||
@app.command()
|
||||
def teardown(
|
||||
@@ -359,8 +571,13 @@ def teardown(
|
||||
console.print("[red]Specify --all or --id <name>.[/]")
|
||||
raise typer.Exit(1)
|
||||
|
||||
from decnet.deployer import teardown as _teardown
|
||||
log.info("teardown command invoked all=%s id=%s", all_, id_)
|
||||
from decnet.engine import teardown as _teardown
|
||||
_teardown(decky_id=id_)
|
||||
log.info("teardown complete all=%s id=%s", all_, id_)
|
||||
|
||||
if all_:
|
||||
_kill_all_services()
|
||||
|
||||
|
||||
@app.command(name="services")
|
||||
@@ -394,6 +611,7 @@ def correlate(
|
||||
min_deckies: int = typer.Option(2, "--min-deckies", "-m", help="Minimum number of distinct deckies an IP must touch to be reported"),
|
||||
output: str = typer.Option("table", "--output", "-o", help="Output format: table | json | syslog"),
|
||||
emit_syslog: bool = typer.Option(False, "--emit-syslog", help="Also print traversal events as RFC 5424 lines (for SIEM piping)"),
|
||||
daemon: bool = typer.Option(False, "--daemon", "-d", help="Detach to background as a daemon process"),
|
||||
) -> None:
|
||||
"""Analyse logs for cross-decky traversals and print the attacker movement graph."""
|
||||
import sys
|
||||
@@ -401,6 +619,10 @@ def correlate(
|
||||
from pathlib import Path
|
||||
from decnet.correlation.engine import CorrelationEngine
|
||||
|
||||
if daemon:
|
||||
log.info("correlate daemonizing log_file=%s", log_file)
|
||||
_daemonize()
|
||||
|
||||
engine = CorrelationEngine()
|
||||
|
||||
if log_file:
|
||||
@@ -459,3 +681,292 @@ def list_archetypes() -> None:
|
||||
arch.description,
|
||||
)
|
||||
console.print(table)
|
||||
|
||||
|
||||
@app.command(name="web")
|
||||
def serve_web(
|
||||
web_port: int = typer.Option(DECNET_WEB_PORT, "--web-port", help="Port to serve the DECNET Web Dashboard"),
|
||||
host: str = typer.Option(DECNET_WEB_HOST, "--host", help="Host IP to serve the Web Dashboard"),
|
||||
api_port: int = typer.Option(DECNET_API_PORT, "--api-port", help="Port the DECNET API is listening on"),
|
||||
daemon: bool = typer.Option(False, "--daemon", "-d", help="Detach to background as a daemon process"),
|
||||
) -> None:
|
||||
"""Serve the DECNET Web Dashboard frontend.
|
||||
|
||||
Proxies /api/* requests to the API server so the frontend can use
|
||||
relative URLs (/api/v1/...) with no CORS configuration required.
|
||||
"""
|
||||
import http.client
|
||||
import http.server
|
||||
import socketserver
|
||||
from pathlib import Path
|
||||
|
||||
dist_dir = Path(__file__).parent.parent / "decnet_web" / "dist"
|
||||
|
||||
if not dist_dir.exists():
|
||||
console.print(f"[red]Frontend build not found at {dist_dir}. Make sure you run 'npm run build' inside 'decnet_web'.[/]")
|
||||
raise typer.Exit(1)
|
||||
|
||||
if daemon:
|
||||
log.info("web daemonizing host=%s port=%d api_port=%d", host, web_port, api_port)
|
||||
_daemonize()
|
||||
|
||||
_api_port = api_port
|
||||
|
||||
class SPAHTTPRequestHandler(http.server.SimpleHTTPRequestHandler):
|
||||
def do_GET(self):
|
||||
if self.path.startswith("/api/"):
|
||||
self._proxy("GET")
|
||||
return
|
||||
path = self.translate_path(self.path)
|
||||
if not Path(path).exists() or Path(path).is_dir():
|
||||
self.path = "/index.html"
|
||||
return super().do_GET()
|
||||
|
||||
def do_POST(self):
|
||||
if self.path.startswith("/api/"):
|
||||
self._proxy("POST")
|
||||
return
|
||||
self.send_error(405)
|
||||
|
||||
def do_PUT(self):
|
||||
if self.path.startswith("/api/"):
|
||||
self._proxy("PUT")
|
||||
return
|
||||
self.send_error(405)
|
||||
|
||||
def do_DELETE(self):
|
||||
if self.path.startswith("/api/"):
|
||||
self._proxy("DELETE")
|
||||
return
|
||||
self.send_error(405)
|
||||
|
||||
def _proxy(self, method: str) -> None:
|
||||
content_length = int(self.headers.get("Content-Length", 0))
|
||||
body = self.rfile.read(content_length) if content_length else None
|
||||
|
||||
forward = {k: v for k, v in self.headers.items()
|
||||
if k.lower() not in ("host", "connection")}
|
||||
|
||||
try:
|
||||
conn = http.client.HTTPConnection("127.0.0.1", _api_port, timeout=120)
|
||||
conn.request(method, self.path, body=body, headers=forward)
|
||||
resp = conn.getresponse()
|
||||
|
||||
self.send_response(resp.status)
|
||||
for key, val in resp.getheaders():
|
||||
if key.lower() not in ("connection", "transfer-encoding"):
|
||||
self.send_header(key, val)
|
||||
self.end_headers()
|
||||
|
||||
# Disable socket timeout for SSE streams — they are
|
||||
# long-lived by design and the 120s timeout would kill them.
|
||||
content_type = resp.getheader("Content-Type", "")
|
||||
if "text/event-stream" in content_type:
|
||||
conn.sock.settimeout(None)
|
||||
|
||||
# read1() returns bytes immediately available in the buffer
|
||||
# without blocking for more. Plain read(4096) waits until
|
||||
# 4096 bytes accumulate — fatal for SSE where each event
|
||||
# is only ~100-500 bytes.
|
||||
_read = getattr(resp, "read1", resp.read)
|
||||
while True:
|
||||
chunk = _read(4096)
|
||||
if not chunk:
|
||||
break
|
||||
self.wfile.write(chunk)
|
||||
self.wfile.flush()
|
||||
except Exception as exc:
|
||||
log.warning("web proxy error %s %s: %s", method, self.path, exc)
|
||||
self.send_error(502, f"API proxy error: {exc}")
|
||||
finally:
|
||||
try:
|
||||
conn.close()
|
||||
except Exception: # nosec B110 — best-effort conn cleanup
|
||||
pass
|
||||
|
||||
def log_message(self, fmt: str, *args: object) -> None:
|
||||
log.debug("web %s", fmt % args)
|
||||
|
||||
import os
|
||||
os.chdir(dist_dir)
|
||||
|
||||
socketserver.TCPServer.allow_reuse_address = True
|
||||
with socketserver.ThreadingTCPServer((host, web_port), SPAHTTPRequestHandler) as httpd:
|
||||
console.print(f"[green]Serving DECNET Web Dashboard on http://{host}:{web_port}[/]")
|
||||
console.print(f"[dim]Proxying /api/* → http://127.0.0.1:{_api_port}[/]")
|
||||
try:
|
||||
httpd.serve_forever()
|
||||
except KeyboardInterrupt:
|
||||
console.print("\n[dim]Shutting down dashboard server.[/]")
|
||||
|
||||
@app.command(name="profiler")
|
||||
def profiler_cmd(
|
||||
interval: int = typer.Option(30, "--interval", "-i", help="Seconds between profile rebuild cycles"),
|
||||
daemon: bool = typer.Option(False, "--daemon", "-d", help="Detach to background as a daemon process"),
|
||||
) -> None:
|
||||
"""Run the attacker profiler as a standalone microservice."""
|
||||
import asyncio
|
||||
from decnet.profiler import attacker_profile_worker
|
||||
from decnet.web.dependencies import repo
|
||||
|
||||
if daemon:
|
||||
log.info("profiler daemonizing interval=%d", interval)
|
||||
_daemonize()
|
||||
|
||||
log.info("profiler starting interval=%d", interval)
|
||||
console.print(f"[bold cyan]Profiler starting[/] (interval: {interval}s)")
|
||||
|
||||
async def _run() -> None:
|
||||
await repo.initialize()
|
||||
await attacker_profile_worker(repo, interval=interval)
|
||||
|
||||
try:
|
||||
asyncio.run(_run())
|
||||
except KeyboardInterrupt:
|
||||
console.print("\n[yellow]Profiler stopped.[/]")
|
||||
|
||||
|
||||
@app.command(name="sniffer")
|
||||
def sniffer_cmd(
|
||||
log_file: str = typer.Option(DECNET_INGEST_LOG_FILE, "--log-file", "-f", help="Path to write captured syslog + JSON records"),
|
||||
daemon: bool = typer.Option(False, "--daemon", "-d", help="Detach to background as a daemon process"),
|
||||
) -> None:
|
||||
"""Run the network sniffer as a standalone microservice."""
|
||||
import asyncio
|
||||
from decnet.sniffer import sniffer_worker
|
||||
|
||||
if daemon:
|
||||
log.info("sniffer daemonizing log_file=%s", log_file)
|
||||
_daemonize()
|
||||
|
||||
log.info("sniffer starting log_file=%s", log_file)
|
||||
console.print(f"[bold cyan]Sniffer starting[/] → {log_file}")
|
||||
|
||||
try:
|
||||
asyncio.run(sniffer_worker(log_file))
|
||||
except KeyboardInterrupt:
|
||||
console.print("\n[yellow]Sniffer stopped.[/]")
|
||||
|
||||
|
||||
_DB_RESET_TABLES: tuple[str, ...] = (
|
||||
# Order matters for DROP TABLE: attacker_behavior FK-references attackers.
|
||||
"attacker_behavior",
|
||||
"attackers",
|
||||
"logs",
|
||||
"bounty",
|
||||
"state",
|
||||
"users",
|
||||
)
|
||||
|
||||
|
||||
async def _db_reset_mysql_async(dsn: str, mode: str, confirm: bool) -> None:
|
||||
"""Inspect + (optionally) wipe a MySQL database. Pulled out of the CLI
|
||||
wrapper so tests can drive it without spawning a Typer runner."""
|
||||
from urllib.parse import urlparse
|
||||
from sqlalchemy import text
|
||||
from sqlalchemy.ext.asyncio import create_async_engine
|
||||
|
||||
db_name = urlparse(dsn).path.lstrip("/") or "(default)"
|
||||
engine = create_async_engine(dsn)
|
||||
try:
|
||||
# Collect current row counts per table. Missing tables yield -1.
|
||||
rows: dict[str, int] = {}
|
||||
async with engine.connect() as conn:
|
||||
for tbl in _DB_RESET_TABLES:
|
||||
try:
|
||||
result = await conn.execute(text(f"SELECT COUNT(*) FROM `{tbl}`")) # nosec B608
|
||||
rows[tbl] = result.scalar() or 0
|
||||
except Exception: # noqa: BLE001 — ProgrammingError for missing table varies by driver
|
||||
rows[tbl] = -1
|
||||
|
||||
summary = Table(title=f"DECNET MySQL reset — database `{db_name}` (mode={mode})")
|
||||
summary.add_column("Table", style="cyan")
|
||||
summary.add_column("Rows", justify="right")
|
||||
for tbl, count in rows.items():
|
||||
summary.add_row(tbl, "[dim]missing[/]" if count < 0 else f"{count:,}")
|
||||
console.print(summary)
|
||||
|
||||
if not confirm:
|
||||
console.print(
|
||||
"[yellow]Dry-run only. Re-run with [bold]--i-know-what-im-doing[/] "
|
||||
"to actually execute.[/]"
|
||||
)
|
||||
return
|
||||
|
||||
# Destructive phase. FK checks off so TRUNCATE/DROP works in any order.
|
||||
async with engine.begin() as conn:
|
||||
await conn.execute(text("SET FOREIGN_KEY_CHECKS = 0"))
|
||||
for tbl in _DB_RESET_TABLES:
|
||||
if rows.get(tbl, -1) < 0:
|
||||
continue # skip absent tables silently
|
||||
if mode == "truncate":
|
||||
await conn.execute(text(f"TRUNCATE TABLE `{tbl}`"))
|
||||
console.print(f"[green]✓ TRUNCATE {tbl}[/]")
|
||||
else: # drop-tables
|
||||
await conn.execute(text(f"DROP TABLE `{tbl}`"))
|
||||
console.print(f"[green]✓ DROP TABLE {tbl}[/]")
|
||||
await conn.execute(text("SET FOREIGN_KEY_CHECKS = 1"))
|
||||
|
||||
console.print(f"[bold green]Done. Database `{db_name}` reset ({mode}).[/]")
|
||||
finally:
|
||||
await engine.dispose()
|
||||
|
||||
|
||||
@app.command(name="db-reset")
|
||||
def db_reset(
|
||||
i_know: bool = typer.Option(
|
||||
False,
|
||||
"--i-know-what-im-doing",
|
||||
help="Required to actually execute. Without it, the command runs in dry-run mode.",
|
||||
),
|
||||
mode: str = typer.Option(
|
||||
"truncate",
|
||||
"--mode",
|
||||
help="truncate (wipe rows, keep schema) | drop-tables (DROP TABLE for each DECNET table)",
|
||||
),
|
||||
url: Optional[str] = typer.Option(
|
||||
None,
|
||||
"--url",
|
||||
help="Override DECNET_DB_URL for this invocation (e.g. when cleanup needs admin creds).",
|
||||
),
|
||||
) -> None:
|
||||
"""Wipe the MySQL database used by the DECNET dashboard.
|
||||
|
||||
Destructive. Runs dry by default — pass --i-know-what-im-doing to commit.
|
||||
Only supported against MySQL; refuses to operate on SQLite.
|
||||
"""
|
||||
import asyncio
|
||||
import os
|
||||
|
||||
if mode not in ("truncate", "drop-tables"):
|
||||
console.print(f"[red]Invalid --mode '{mode}'. Expected: truncate | drop-tables.[/]")
|
||||
raise typer.Exit(2)
|
||||
|
||||
db_type = os.environ.get("DECNET_DB_TYPE", "sqlite").lower()
|
||||
if db_type != "mysql":
|
||||
console.print(
|
||||
f"[red]db-reset is MySQL-only (DECNET_DB_TYPE='{db_type}'). "
|
||||
f"For SQLite, just delete the decnet.db file.[/]"
|
||||
)
|
||||
raise typer.Exit(2)
|
||||
|
||||
dsn = url or os.environ.get("DECNET_DB_URL")
|
||||
if not dsn:
|
||||
# Fall back to component env vars (DECNET_DB_HOST/PORT/NAME/USER/PASSWORD).
|
||||
from decnet.web.db.mysql.database import build_mysql_url
|
||||
try:
|
||||
dsn = build_mysql_url()
|
||||
except ValueError as e:
|
||||
console.print(f"[red]{e}[/]")
|
||||
raise typer.Exit(2) from e
|
||||
|
||||
log.info("db-reset invoked mode=%s confirm=%s", mode, i_know)
|
||||
try:
|
||||
asyncio.run(_db_reset_mysql_async(dsn, mode=mode, confirm=i_know))
|
||||
except Exception as e: # noqa: BLE001
|
||||
console.print(f"[red]db-reset failed: {e}[/]")
|
||||
raise typer.Exit(1) from e
|
||||
|
||||
|
||||
if __name__ == '__main__': # pragma: no cover
|
||||
app()
|
||||
|
||||
13
decnet/collector/__init__.py
Normal file
13
decnet/collector/__init__.py
Normal file
@@ -0,0 +1,13 @@
|
||||
from decnet.collector.worker import (
|
||||
is_service_container,
|
||||
is_service_event,
|
||||
log_collector_worker,
|
||||
parse_rfc5424,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"is_service_container",
|
||||
"is_service_event",
|
||||
"log_collector_worker",
|
||||
"parse_rfc5424",
|
||||
]
|
||||
346
decnet/collector/worker.py
Normal file
346
decnet/collector/worker.py
Normal file
@@ -0,0 +1,346 @@
|
||||
"""
|
||||
Host-side Docker log collector.
|
||||
|
||||
Streams stdout from all running decky service containers via the Docker SDK,
|
||||
writes RFC 5424 lines to <log_file> and parsed JSON records to <log_file>.json.
|
||||
The ingester tails the .json file; rsyslog can consume the .log file independently.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import threading
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional
|
||||
|
||||
from decnet.logging import get_logger
|
||||
from decnet.telemetry import traced as _traced, get_tracer as _get_tracer, inject_context as _inject_ctx
|
||||
|
||||
logger = get_logger("collector")
|
||||
|
||||
# ─── Ingestion rate limiter ───────────────────────────────────────────────────
|
||||
#
|
||||
# Rationale: connection-lifecycle events (connect/disconnect/accept/close) are
|
||||
# emitted once per TCP connection. During a portscan or credential-stuffing
|
||||
# run, a single attacker can generate hundreds of these per second from the
|
||||
# honeypot services themselves — each becoming a tiny WAL-write transaction
|
||||
# through the ingester, starving reads until the queue drains.
|
||||
#
|
||||
# The collector still writes every line to the raw .log file (forensic record
|
||||
# for rsyslog/SIEM). Only the .json path — which feeds SQLite — is deduped.
|
||||
#
|
||||
# Dedup key: (attacker_ip, decky, service, event_type)
|
||||
# Window: DECNET_COLLECTOR_RL_WINDOW_SEC seconds (default 1.0)
|
||||
# Scope: DECNET_COLLECTOR_RL_EVENT_TYPES comma list
|
||||
# (default: connect,disconnect,connection,accept,close)
|
||||
# Events outside that set bypass the limiter untouched.
|
||||
|
||||
def _parse_float_env(name: str, default: float) -> float:
|
||||
raw = os.environ.get(name)
|
||||
if raw is None:
|
||||
return default
|
||||
try:
|
||||
value = float(raw)
|
||||
except ValueError:
|
||||
logger.warning("collector: invalid %s=%r, using default %s", name, raw, default)
|
||||
return default
|
||||
return max(0.0, value)
|
||||
|
||||
|
||||
_RL_WINDOW_SEC: float = _parse_float_env("DECNET_COLLECTOR_RL_WINDOW_SEC", 1.0)
|
||||
_RL_EVENT_TYPES: frozenset[str] = frozenset(
|
||||
t.strip()
|
||||
for t in os.environ.get(
|
||||
"DECNET_COLLECTOR_RL_EVENT_TYPES",
|
||||
"connect,disconnect,connection,accept,close",
|
||||
).split(",")
|
||||
if t.strip()
|
||||
)
|
||||
_RL_MAX_ENTRIES: int = 10_000
|
||||
|
||||
_rl_lock: threading.Lock = threading.Lock()
|
||||
_rl_last: dict[tuple[str, str, str, str], float] = {}
|
||||
|
||||
|
||||
def _should_ingest(parsed: dict[str, Any]) -> bool:
|
||||
"""
|
||||
Return True if this parsed event should be written to the JSON ingestion
|
||||
stream. Rate-limited connection-lifecycle events return False when another
|
||||
event with the same (attacker_ip, decky, service, event_type) was emitted
|
||||
inside the dedup window.
|
||||
"""
|
||||
event_type = parsed.get("event_type", "")
|
||||
if _RL_WINDOW_SEC <= 0.0 or event_type not in _RL_EVENT_TYPES:
|
||||
return True
|
||||
key = (
|
||||
parsed.get("attacker_ip", "Unknown"),
|
||||
parsed.get("decky", ""),
|
||||
parsed.get("service", ""),
|
||||
event_type,
|
||||
)
|
||||
now = time.monotonic()
|
||||
with _rl_lock:
|
||||
last = _rl_last.get(key, 0.0)
|
||||
if now - last < _RL_WINDOW_SEC:
|
||||
return False
|
||||
_rl_last[key] = now
|
||||
# Opportunistic GC: when the map grows past the cap, drop entries older
|
||||
# than 60 windows (well outside any realistic in-flight dedup range).
|
||||
if len(_rl_last) > _RL_MAX_ENTRIES:
|
||||
cutoff = now - (_RL_WINDOW_SEC * 60.0)
|
||||
stale = [k for k, t in _rl_last.items() if t < cutoff]
|
||||
for k in stale:
|
||||
del _rl_last[k]
|
||||
return True
|
||||
|
||||
|
||||
def _reset_rate_limiter() -> None:
|
||||
"""Test-only helper — clear dedup state between test cases."""
|
||||
with _rl_lock:
|
||||
_rl_last.clear()
|
||||
|
||||
# ─── RFC 5424 parser ──────────────────────────────────────────────────────────
|
||||
|
||||
_RFC5424_RE = re.compile(
|
||||
r"^<\d+>1 "
|
||||
r"(\S+) " # 1: TIMESTAMP
|
||||
r"(\S+) " # 2: HOSTNAME (decky name)
|
||||
r"(\S+) " # 3: APP-NAME (service)
|
||||
r"- " # PROCID always NILVALUE
|
||||
r"(\S+) " # 4: MSGID (event_type)
|
||||
r"(.+)$", # 5: SD element + optional MSG
|
||||
)
|
||||
_SD_BLOCK_RE = re.compile(r'\[decnet@55555\s+(.*?)\]', re.DOTALL)
|
||||
_PARAM_RE = re.compile(r'(\w+)="((?:[^"\\]|\\.)*)"')
|
||||
_IP_FIELDS = ("src_ip", "src", "client_ip", "remote_ip", "remote_addr", "target_ip", "ip")
|
||||
|
||||
|
||||
def parse_rfc5424(line: str) -> Optional[dict[str, Any]]:
|
||||
"""
|
||||
Parse an RFC 5424 DECNET log line into a structured dict.
|
||||
Returns None if the line does not match the expected format.
|
||||
"""
|
||||
m = _RFC5424_RE.match(line)
|
||||
if not m:
|
||||
return None
|
||||
ts_raw, decky, service, event_type, sd_rest = m.groups()
|
||||
|
||||
fields: dict[str, str] = {}
|
||||
msg: str = ""
|
||||
|
||||
if sd_rest.startswith("-"):
|
||||
msg = sd_rest[1:].lstrip()
|
||||
elif sd_rest.startswith("["):
|
||||
block = _SD_BLOCK_RE.search(sd_rest)
|
||||
if block:
|
||||
for k, v in _PARAM_RE.findall(block.group(1)):
|
||||
fields[k] = v.replace('\\"', '"').replace("\\\\", "\\").replace("\\]", "]")
|
||||
msg_match = re.search(r'\]\s+(.+)$', sd_rest)
|
||||
if msg_match:
|
||||
msg = msg_match.group(1).strip()
|
||||
else:
|
||||
msg = sd_rest
|
||||
|
||||
attacker_ip = "Unknown"
|
||||
for fname in _IP_FIELDS:
|
||||
if fname in fields:
|
||||
attacker_ip = fields[fname]
|
||||
break
|
||||
|
||||
try:
|
||||
ts_formatted = datetime.fromisoformat(ts_raw).strftime("%Y-%m-%d %H:%M:%S")
|
||||
except ValueError:
|
||||
ts_formatted = ts_raw
|
||||
|
||||
return {
|
||||
"timestamp": ts_formatted,
|
||||
"decky": decky,
|
||||
"service": service,
|
||||
"event_type": event_type,
|
||||
"attacker_ip": attacker_ip,
|
||||
"fields": fields,
|
||||
"msg": msg,
|
||||
"raw_line": line,
|
||||
}
|
||||
|
||||
|
||||
# ─── Container helpers ────────────────────────────────────────────────────────
|
||||
|
||||
def _load_service_container_names() -> set[str]:
|
||||
"""
|
||||
Return the exact set of service container names from decnet-state.json.
|
||||
Format: {decky_name}-{service_name}, e.g. 'omega-decky-smtp'.
|
||||
Returns an empty set if no state file exists.
|
||||
"""
|
||||
from decnet.config import load_state
|
||||
state = load_state()
|
||||
if state is None:
|
||||
return set()
|
||||
config, _ = state
|
||||
names: set[str] = set()
|
||||
for decky in config.deckies:
|
||||
for svc in decky.services:
|
||||
names.add(f"{decky.name}-{svc.replace('_', '-')}")
|
||||
return names
|
||||
|
||||
|
||||
def is_service_container(container) -> bool:
|
||||
"""Return True if this Docker container is a known DECNET service container."""
|
||||
name = (container if isinstance(container, str) else container.name).lstrip("/")
|
||||
return name in _load_service_container_names()
|
||||
|
||||
|
||||
def is_service_event(attrs: dict) -> bool:
|
||||
"""Return True if a Docker start event is for a known DECNET service container."""
|
||||
name = attrs.get("name", "").lstrip("/")
|
||||
return name in _load_service_container_names()
|
||||
|
||||
|
||||
# ─── Blocking stream worker (runs in a thread) ────────────────────────────────
|
||||
|
||||
def _reopen_if_needed(path: Path, fh: Optional[Any]) -> Any:
|
||||
"""Return fh if it still points to the same inode as path; otherwise close
|
||||
fh and open a fresh handle. Handles the file being deleted (manual rm) or
|
||||
rotated (logrotate rename + create)."""
|
||||
try:
|
||||
if fh is not None and os.fstat(fh.fileno()).st_ino == os.stat(path).st_ino:
|
||||
return fh
|
||||
except OSError:
|
||||
pass
|
||||
# File gone or inode changed — close stale handle and open a new one.
|
||||
if fh is not None:
|
||||
try:
|
||||
fh.close()
|
||||
except Exception: # nosec B110 — best-effort file handle cleanup
|
||||
pass
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
return open(path, "a", encoding="utf-8")
|
||||
|
||||
|
||||
@_traced("collector.stream_container")
|
||||
def _stream_container(container_id: str, log_path: Path, json_path: Path) -> None:
|
||||
"""Stream logs from one container and append to the host log files."""
|
||||
import docker # type: ignore[import]
|
||||
|
||||
lf: Optional[Any] = None
|
||||
jf: Optional[Any] = None
|
||||
try:
|
||||
client = docker.from_env()
|
||||
container = client.containers.get(container_id)
|
||||
log_stream = container.logs(stream=True, follow=True, stdout=True, stderr=False)
|
||||
buf = ""
|
||||
for chunk in log_stream:
|
||||
buf += chunk.decode("utf-8", errors="replace")
|
||||
while "\n" in buf:
|
||||
line, buf = buf.split("\n", 1)
|
||||
line = line.rstrip()
|
||||
if not line:
|
||||
continue
|
||||
lf = _reopen_if_needed(log_path, lf)
|
||||
lf.write(line + "\n")
|
||||
lf.flush()
|
||||
parsed = parse_rfc5424(line)
|
||||
if parsed:
|
||||
if _should_ingest(parsed):
|
||||
_tracer = _get_tracer("collector")
|
||||
with _tracer.start_as_current_span("collector.event") as _span:
|
||||
_span.set_attribute("decky", parsed.get("decky", ""))
|
||||
_span.set_attribute("service", parsed.get("service", ""))
|
||||
_span.set_attribute("event_type", parsed.get("event_type", ""))
|
||||
_span.set_attribute("attacker_ip", parsed.get("attacker_ip", ""))
|
||||
_inject_ctx(parsed)
|
||||
logger.debug("collector: event written decky=%s type=%s", parsed.get("decky"), parsed.get("event_type"))
|
||||
jf = _reopen_if_needed(json_path, jf)
|
||||
jf.write(json.dumps(parsed) + "\n")
|
||||
jf.flush()
|
||||
else:
|
||||
logger.debug(
|
||||
"collector: rate-limited decky=%s service=%s type=%s attacker=%s",
|
||||
parsed.get("decky"), parsed.get("service"),
|
||||
parsed.get("event_type"), parsed.get("attacker_ip"),
|
||||
)
|
||||
else:
|
||||
logger.debug("collector: malformed RFC5424 line snippet=%r", line[:80])
|
||||
except Exception as exc:
|
||||
logger.debug("collector: log stream ended container_id=%s reason=%s", container_id, exc)
|
||||
finally:
|
||||
for fh in (lf, jf):
|
||||
if fh is not None:
|
||||
try:
|
||||
fh.close()
|
||||
except Exception: # nosec B110 — best-effort file handle cleanup
|
||||
pass
|
||||
|
||||
|
||||
# ─── Async collector ──────────────────────────────────────────────────────────
|
||||
|
||||
async def log_collector_worker(log_file: str) -> None:
|
||||
"""
|
||||
Background task: streams Docker logs from all running decky service
|
||||
containers, writing RFC 5424 lines to log_file and parsed JSON records
|
||||
to log_file.json for the ingester to consume.
|
||||
|
||||
Watches Docker events to pick up containers started after initial scan.
|
||||
"""
|
||||
import docker # type: ignore[import]
|
||||
|
||||
log_path = Path(log_file)
|
||||
json_path = log_path.with_suffix(".json")
|
||||
log_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
active: dict[str, asyncio.Task[None]] = {}
|
||||
loop = asyncio.get_running_loop()
|
||||
|
||||
# Dedicated thread pool so long-running container log streams don't
|
||||
# saturate the default asyncio executor and starve short-lived
|
||||
# to_thread() calls elsewhere (e.g. load_state in the web API).
|
||||
collector_pool = ThreadPoolExecutor(
|
||||
max_workers=64, thread_name_prefix="decnet-collector",
|
||||
)
|
||||
|
||||
def _spawn(container_id: str, container_name: str) -> None:
|
||||
if container_id not in active or active[container_id].done():
|
||||
active[container_id] = asyncio.ensure_future(
|
||||
loop.run_in_executor(
|
||||
collector_pool, _stream_container,
|
||||
container_id, log_path, json_path,
|
||||
),
|
||||
loop=loop,
|
||||
)
|
||||
logger.info("collector: streaming container=%s", container_name)
|
||||
|
||||
try:
|
||||
logger.info("collector started log_path=%s", log_path)
|
||||
client = docker.from_env()
|
||||
|
||||
for container in client.containers.list():
|
||||
if is_service_container(container):
|
||||
_spawn(container.id, container.name.lstrip("/"))
|
||||
|
||||
def _watch_events() -> None:
|
||||
for event in client.events(
|
||||
decode=True,
|
||||
filters={"type": "container", "event": "start"},
|
||||
):
|
||||
attrs = event.get("Actor", {}).get("Attributes", {})
|
||||
cid = event.get("id", "")
|
||||
name = attrs.get("name", "")
|
||||
if cid and is_service_event(attrs):
|
||||
loop.call_soon_threadsafe(_spawn, cid, name)
|
||||
|
||||
await loop.run_in_executor(collector_pool, _watch_events)
|
||||
|
||||
except asyncio.CancelledError:
|
||||
logger.info("collector shutdown requested cancelling %d tasks", len(active))
|
||||
for task in active.values():
|
||||
task.cancel()
|
||||
collector_pool.shutdown(wait=False)
|
||||
raise
|
||||
except Exception as exc:
|
||||
logger.error("collector error: %s", exc)
|
||||
finally:
|
||||
collector_pool.shutdown(wait=False)
|
||||
@@ -6,6 +6,12 @@ Network model:
|
||||
All service containers for that decky share the base's network namespace
|
||||
via `network_mode: "service:<base>"`. From the outside, every service on
|
||||
a given decky appears to come from the same IP — exactly like a real host.
|
||||
|
||||
Logging model:
|
||||
Service containers write RFC 5424 lines to stdout. Docker captures them
|
||||
via the json-file driver. The host-side collector (decnet.web.collector)
|
||||
streams those logs and writes them to the host log file for the ingester
|
||||
and rsyslog to consume. No bind mounts or shared volumes are needed.
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
@@ -17,35 +23,19 @@ from decnet.network import MACVLAN_NETWORK_NAME
|
||||
from decnet.os_fingerprint import get_os_sysctls
|
||||
from decnet.services.registry import get_service
|
||||
|
||||
_CONTAINER_LOG_DIR = "/var/log/decnet"
|
||||
|
||||
_LOG_NETWORK = "decnet_logs"
|
||||
|
||||
|
||||
def _resolve_log_file(log_file: str) -> tuple[str, str]:
|
||||
"""
|
||||
Return (host_dir, container_log_path) for a user-supplied log file path.
|
||||
|
||||
The host path is resolved to absolute so Docker can bind-mount it.
|
||||
All containers share the same host directory, mounted at _CONTAINER_LOG_DIR.
|
||||
"""
|
||||
host_path = Path(log_file).resolve()
|
||||
host_dir = str(host_path.parent)
|
||||
container_path = f"{_CONTAINER_LOG_DIR}/{host_path.name}"
|
||||
return host_dir, container_path
|
||||
_DOCKER_LOGGING = {
|
||||
"driver": "json-file",
|
||||
"options": {
|
||||
"max-size": "10m",
|
||||
"max-file": "5",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def generate_compose(config: DecnetConfig) -> dict:
|
||||
"""Build and return the full docker-compose data structure."""
|
||||
services: dict = {}
|
||||
|
||||
log_host_dir: str | None = None
|
||||
log_container_path: str | None = None
|
||||
if config.log_file:
|
||||
log_host_dir, log_container_path = _resolve_log_file(config.log_file)
|
||||
# Ensure the host log directory exists so Docker doesn't create it as root-owned
|
||||
Path(log_host_dir).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
for decky in config.deckies:
|
||||
base_key = decky.name # e.g. "decky-01"
|
||||
|
||||
@@ -62,8 +52,6 @@ def generate_compose(config: DecnetConfig) -> dict:
|
||||
}
|
||||
},
|
||||
}
|
||||
if config.log_target:
|
||||
base["networks"][_LOG_NETWORK] = {}
|
||||
|
||||
# Inject TCP/IP stack sysctls to spoof the claimed OS fingerprint.
|
||||
# Only the base container needs this — service containers inherit the
|
||||
@@ -76,24 +64,21 @@ def generate_compose(config: DecnetConfig) -> dict:
|
||||
# --- Service containers: share base network namespace ---
|
||||
for svc_name in decky.services:
|
||||
svc = get_service(svc_name)
|
||||
if svc.fleet_singleton:
|
||||
continue
|
||||
svc_cfg = decky.service_config.get(svc_name, {})
|
||||
fragment = svc.compose_fragment(
|
||||
decky.name, log_target=config.log_target, service_cfg=svc_cfg
|
||||
)
|
||||
fragment = svc.compose_fragment(decky.name, service_cfg=svc_cfg)
|
||||
|
||||
# Inject the per-decky base image into build services so containers
|
||||
# vary by distro and don't all fingerprint as debian:bookworm-slim.
|
||||
# Services that need a fixed upstream image (e.g. conpot) can pre-set
|
||||
# build.args.BASE_IMAGE in their compose_fragment() to opt out.
|
||||
if "build" in fragment:
|
||||
fragment["build"].setdefault("args", {})["BASE_IMAGE"] = decky.build_base
|
||||
args = fragment["build"].setdefault("args", {})
|
||||
args.setdefault("BASE_IMAGE", decky.build_base)
|
||||
|
||||
fragment.setdefault("environment", {})
|
||||
fragment["environment"]["HOSTNAME"] = decky.hostname
|
||||
if log_host_dir and log_container_path:
|
||||
fragment["environment"]["DECNET_LOG_FILE"] = log_container_path
|
||||
fragment.setdefault("volumes", [])
|
||||
mount = f"{log_host_dir}:{_CONTAINER_LOG_DIR}"
|
||||
if mount not in fragment["volumes"]:
|
||||
fragment["volumes"].append(mount)
|
||||
|
||||
# Share the base container's network — no own IP needed
|
||||
fragment["network_mode"] = f"service:{base_key}"
|
||||
@@ -103,6 +88,9 @@ def generate_compose(config: DecnetConfig) -> dict:
|
||||
fragment.pop("hostname", None)
|
||||
fragment.pop("networks", None)
|
||||
|
||||
# Rotate Docker logs so disk usage is bounded
|
||||
fragment["logging"] = _DOCKER_LOGGING
|
||||
|
||||
services[f"{decky.name}-{svc_name}"] = fragment
|
||||
|
||||
# Network definitions
|
||||
@@ -111,8 +99,6 @@ def generate_compose(config: DecnetConfig) -> dict:
|
||||
"external": True, # created by network.py before compose up
|
||||
}
|
||||
}
|
||||
if config.log_target:
|
||||
networks[_LOG_NETWORK] = {"driver": "bridge", "internal": True}
|
||||
|
||||
return {
|
||||
"version": "3.8",
|
||||
|
||||
140
decnet/config.py
140
decnet/config.py
@@ -4,61 +4,113 @@ State is persisted to decnet-state.json in the working directory.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import socket as _socket
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import BaseModel, field_validator
|
||||
from decnet.models import DeckyConfig, DecnetConfig # noqa: F401
|
||||
|
||||
from decnet.distros import random_hostname as _random_hostname
|
||||
|
||||
STATE_FILE = Path("decnet-state.json")
|
||||
# ---------------------------------------------------------------------------
|
||||
# RFC 5424 syslog formatter
|
||||
# ---------------------------------------------------------------------------
|
||||
# Severity mapping: Python level → syslog severity (RFC 5424 §6.2.1)
|
||||
_SYSLOG_SEVERITY: dict[int, int] = {
|
||||
logging.CRITICAL: 2, # Critical
|
||||
logging.ERROR: 3, # Error
|
||||
logging.WARNING: 4, # Warning
|
||||
logging.INFO: 6, # Informational
|
||||
logging.DEBUG: 7, # Debug
|
||||
}
|
||||
_FACILITY_LOCAL0 = 16 # local0 (RFC 5424 §6.2.1 / POSIX)
|
||||
|
||||
|
||||
class Rfc5424Formatter(logging.Formatter):
|
||||
"""Formats log records as RFC 5424 syslog messages.
|
||||
|
||||
Output:
|
||||
<PRIVAL>1 TIMESTAMP HOSTNAME APP-NAME PROCID MSGID STRUCTURED-DATA MSG
|
||||
|
||||
Example:
|
||||
<134>1 2026-04-12T21:48:03.123456+00:00 host decnet 1234 decnet.config - Dev mode active
|
||||
"""
|
||||
|
||||
_hostname: str = _socket.gethostname()
|
||||
_app: str = "decnet"
|
||||
|
||||
def format(self, record: logging.LogRecord) -> str:
|
||||
severity = _SYSLOG_SEVERITY.get(record.levelno, 6)
|
||||
prival = (_FACILITY_LOCAL0 * 8) + severity
|
||||
ts = datetime.fromtimestamp(record.created, tz=timezone.utc).isoformat(timespec="microseconds")
|
||||
msg = record.getMessage()
|
||||
if record.exc_info:
|
||||
msg += "\n" + self.formatException(record.exc_info)
|
||||
app = getattr(record, "decnet_component", self._app)
|
||||
return (
|
||||
f"<{prival}>1 {ts} {self._hostname} {app}"
|
||||
f" {os.getpid()} {record.name} - {msg}"
|
||||
)
|
||||
|
||||
|
||||
def _configure_logging(dev: bool) -> None:
|
||||
"""Install RFC 5424 handlers on the root logger (idempotent).
|
||||
|
||||
Always adds a StreamHandler (stderr). Also adds a RotatingFileHandler
|
||||
writing to DECNET_SYSTEM_LOGS (default: decnet.system.log in $PWD) so
|
||||
all microservice daemons — which redirect stderr to /dev/null — still
|
||||
produce readable logs. File handler is skipped under pytest.
|
||||
"""
|
||||
import logging.handlers as _lh
|
||||
|
||||
root = logging.getLogger()
|
||||
# Guard: if our StreamHandler is already installed, all handlers are set.
|
||||
if any(isinstance(h, logging.StreamHandler) and isinstance(h.formatter, Rfc5424Formatter)
|
||||
for h in root.handlers):
|
||||
return
|
||||
|
||||
fmt = Rfc5424Formatter()
|
||||
root.setLevel(logging.DEBUG if dev else logging.INFO)
|
||||
|
||||
stream_handler = logging.StreamHandler()
|
||||
stream_handler.setFormatter(fmt)
|
||||
root.addHandler(stream_handler)
|
||||
|
||||
# Skip the file handler during pytest runs to avoid polluting the test cwd.
|
||||
_in_pytest = any(k.startswith("PYTEST") for k in os.environ)
|
||||
if not _in_pytest:
|
||||
_log_path = os.environ.get("DECNET_SYSTEM_LOGS", "decnet.system.log")
|
||||
file_handler = _lh.RotatingFileHandler(
|
||||
_log_path,
|
||||
mode="a",
|
||||
maxBytes=10 * 1024 * 1024, # 10 MB
|
||||
backupCount=5,
|
||||
encoding="utf-8",
|
||||
)
|
||||
file_handler.setFormatter(fmt)
|
||||
root.addHandler(file_handler)
|
||||
|
||||
|
||||
_dev = os.environ.get("DECNET_DEVELOPER", "").lower() == "true"
|
||||
_configure_logging(_dev)
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
if _dev:
|
||||
log.debug("Developer mode: debug logging active")
|
||||
|
||||
# Calculate absolute path to the project root (where the config file resides)
|
||||
_ROOT: Path = Path(__file__).parent.parent.absolute()
|
||||
STATE_FILE: Path = _ROOT / "decnet-state.json"
|
||||
DEFAULT_MUTATE_INTERVAL: int = 30 # default rotation interval in minutes
|
||||
|
||||
|
||||
def random_hostname(distro_slug: str = "debian") -> str:
|
||||
return _random_hostname(distro_slug)
|
||||
|
||||
|
||||
class DeckyConfig(BaseModel):
|
||||
name: str
|
||||
ip: str
|
||||
services: list[str]
|
||||
distro: str # slug from distros.DISTROS, e.g. "debian", "ubuntu22"
|
||||
base_image: str # Docker image for the base/IP-holder container
|
||||
build_base: str = "debian:bookworm-slim" # apt-compatible image for service Dockerfiles
|
||||
hostname: str
|
||||
archetype: str | None = None # archetype slug if spawned from an archetype profile
|
||||
service_config: dict[str, dict] = {} # optional per-service persona config
|
||||
nmap_os: str = "linux" # OS family for TCP/IP stack spoofing (see os_fingerprint.py)
|
||||
|
||||
@field_validator("services")
|
||||
@classmethod
|
||||
def services_not_empty(cls, v: list[str]) -> list[str]:
|
||||
if not v:
|
||||
raise ValueError("A decky must have at least one service.")
|
||||
return v
|
||||
|
||||
|
||||
class DecnetConfig(BaseModel):
|
||||
mode: Literal["unihost", "swarm"]
|
||||
interface: str
|
||||
subnet: str
|
||||
gateway: str
|
||||
deckies: list[DeckyConfig]
|
||||
log_target: str | None = None # "ip:port" or None
|
||||
log_file: str | None = None # path for RFC 5424 syslog file output
|
||||
ipvlan: bool = False # use IPvlan L2 instead of MACVLAN (WiFi-friendly)
|
||||
|
||||
@field_validator("log_target")
|
||||
@classmethod
|
||||
def validate_log_target(cls, v: str | None) -> str | None:
|
||||
if v is None:
|
||||
return v
|
||||
parts = v.rsplit(":", 1)
|
||||
if len(parts) != 2 or not parts[1].isdigit():
|
||||
raise ValueError("log_target must be in ip:port format, e.g. 192.168.1.5:5140")
|
||||
return v
|
||||
|
||||
|
||||
def save_state(config: DecnetConfig, compose_path: Path) -> None:
|
||||
payload = {
|
||||
"config": config.model_dump(),
|
||||
|
||||
@@ -5,9 +5,9 @@ from decnet.correlation.graph import AttackerTraversal, TraversalHop
|
||||
from decnet.correlation.parser import LogEvent, parse_line
|
||||
|
||||
__all__ = [
|
||||
"CorrelationEngine",
|
||||
"AttackerTraversal",
|
||||
"TraversalHop",
|
||||
"CorrelationEngine",
|
||||
"LogEvent",
|
||||
"TraversalHop",
|
||||
"parse_line",
|
||||
]
|
||||
|
||||
@@ -33,6 +33,7 @@ from decnet.logging.syslog_formatter import (
|
||||
SEVERITY_WARNING,
|
||||
format_rfc5424,
|
||||
)
|
||||
from decnet.telemetry import traced as _traced, get_tracer as _get_tracer
|
||||
|
||||
|
||||
class CorrelationEngine:
|
||||
@@ -64,6 +65,7 @@ class CorrelationEngine:
|
||||
self.events_indexed += 1
|
||||
return event
|
||||
|
||||
@_traced("correlation.ingest_file")
|
||||
def ingest_file(self, path: Path) -> int:
|
||||
"""
|
||||
Parse every line of *path* and index it.
|
||||
@@ -73,12 +75,18 @@ class CorrelationEngine:
|
||||
with open(path) as fh:
|
||||
for line in fh:
|
||||
self.ingest(line)
|
||||
_tracer = _get_tracer("correlation")
|
||||
with _tracer.start_as_current_span("correlation.ingest_file.summary") as _span:
|
||||
_span.set_attribute("lines_parsed", self.lines_parsed)
|
||||
_span.set_attribute("events_indexed", self.events_indexed)
|
||||
_span.set_attribute("unique_ips", len(self._events))
|
||||
return self.events_indexed
|
||||
|
||||
# ------------------------------------------------------------------ #
|
||||
# Query #
|
||||
# ------------------------------------------------------------------ #
|
||||
|
||||
@_traced("correlation.traversals")
|
||||
def traversals(self, min_deckies: int = 2) -> list[AttackerTraversal]:
|
||||
"""
|
||||
Return all attackers that touched at least *min_deckies* distinct
|
||||
@@ -135,6 +143,7 @@ class CorrelationEngine:
|
||||
)
|
||||
return table
|
||||
|
||||
@_traced("correlation.report_json")
|
||||
def report_json(self, min_deckies: int = 2) -> dict:
|
||||
"""Serialisable dict representation of all traversals."""
|
||||
return {
|
||||
@@ -147,6 +156,7 @@ class CorrelationEngine:
|
||||
"traversals": [t.to_dict() for t in self.traversals(min_deckies)],
|
||||
}
|
||||
|
||||
@_traced("correlation.traversal_syslog_lines")
|
||||
def traversal_syslog_lines(self, min_deckies: int = 2) -> list[str]:
|
||||
"""
|
||||
Emit one RFC 5424 syslog line per detected traversal.
|
||||
|
||||
@@ -38,7 +38,7 @@ _SD_BLOCK_RE = re.compile(r'\[decnet@55555\s+(.*?)\]', re.DOTALL)
|
||||
_PARAM_RE = re.compile(r'(\w+)="((?:[^"\\]|\\.)*)"')
|
||||
|
||||
# Field names to probe for attacker IP, in priority order
|
||||
_IP_FIELDS = ("src_ip", "src", "client_ip", "remote_ip", "ip")
|
||||
_IP_FIELDS = ("src_ip", "src", "client_ip", "remote_ip", "remote_addr", "target_ip", "ip")
|
||||
|
||||
|
||||
@dataclass
|
||||
|
||||
@@ -97,8 +97,8 @@ def random_hostname(distro_slug: str = "debian") -> str:
|
||||
"""Generate a plausible hostname for the given distro style."""
|
||||
profile = DISTROS.get(distro_slug)
|
||||
style = profile.hostname_style if profile else "generic"
|
||||
word = random.choice(_NAME_WORDS)
|
||||
num = random.randint(10, 99)
|
||||
word = random.choice(_NAME_WORDS) # nosec B311
|
||||
num = random.randint(10, 99) # nosec B311
|
||||
|
||||
if style == "rhel":
|
||||
# RHEL/CentOS/Fedora convention: word+num.localdomain
|
||||
@@ -107,7 +107,7 @@ def random_hostname(distro_slug: str = "debian") -> str:
|
||||
return f"{word}-{num}"
|
||||
elif style == "rolling":
|
||||
# Kali/Arch: just a word, no suffix
|
||||
return f"{word}-{random.choice(_NAME_WORDS)}"
|
||||
return f"{word}-{random.choice(_NAME_WORDS)}" # nosec B311
|
||||
else:
|
||||
# Debian/Ubuntu: SRV-WORD-nn
|
||||
return f"SRV-{word.upper()}-{num}"
|
||||
@@ -122,7 +122,7 @@ def get_distro(slug: str) -> DistroProfile:
|
||||
|
||||
|
||||
def random_distro() -> DistroProfile:
|
||||
return random.choice(list(DISTROS.values()))
|
||||
return random.choice(list(DISTROS.values())) # nosec B311
|
||||
|
||||
|
||||
def all_distros() -> dict[str, DistroProfile]:
|
||||
|
||||
15
decnet/engine/__init__.py
Normal file
15
decnet/engine/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
||||
from decnet.engine.deployer import (
|
||||
COMPOSE_FILE,
|
||||
_compose_with_retry,
|
||||
deploy,
|
||||
status,
|
||||
teardown,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"COMPOSE_FILE",
|
||||
"_compose_with_retry",
|
||||
"deploy",
|
||||
"status",
|
||||
"teardown",
|
||||
]
|
||||
@@ -2,7 +2,8 @@
|
||||
Deploy, teardown, and status via Docker SDK + subprocess docker compose.
|
||||
"""
|
||||
|
||||
import subprocess
|
||||
import shutil
|
||||
import subprocess # nosec B404
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
@@ -10,6 +11,8 @@ import docker
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
|
||||
from decnet.logging import get_logger
|
||||
from decnet.telemetry import traced as _traced
|
||||
from decnet.config import DecnetConfig, clear_state, load_state, save_state
|
||||
from decnet.composer import write_compose
|
||||
from decnet.network import (
|
||||
@@ -25,13 +28,35 @@ from decnet.network import (
|
||||
teardown_host_macvlan,
|
||||
)
|
||||
|
||||
log = get_logger("engine")
|
||||
console = Console()
|
||||
COMPOSE_FILE = Path("decnet-compose.yml")
|
||||
_CANONICAL_LOGGING = Path(__file__).parent.parent.parent / "templates" / "decnet_logging.py"
|
||||
|
||||
|
||||
def _compose(*args: str, compose_file: Path = COMPOSE_FILE) -> None:
|
||||
def _sync_logging_helper(config: DecnetConfig) -> None:
|
||||
"""Copy the canonical decnet_logging.py into every active template build context."""
|
||||
from decnet.services.registry import get_service
|
||||
seen: set[Path] = set()
|
||||
for decky in config.deckies:
|
||||
for svc_name in decky.services:
|
||||
svc = get_service(svc_name)
|
||||
if svc is None:
|
||||
continue
|
||||
ctx = svc.dockerfile_context()
|
||||
if ctx is None or ctx in seen:
|
||||
continue
|
||||
seen.add(ctx)
|
||||
dest = ctx / "decnet_logging.py"
|
||||
if not dest.exists() or dest.read_bytes() != _CANONICAL_LOGGING.read_bytes():
|
||||
shutil.copy2(_CANONICAL_LOGGING, dest)
|
||||
|
||||
|
||||
def _compose(*args: str, compose_file: Path = COMPOSE_FILE, env: dict | None = None) -> None:
|
||||
import os
|
||||
cmd = ["docker", "compose", "-f", str(compose_file), *args]
|
||||
subprocess.run(cmd, check=True)
|
||||
merged = {**os.environ, **(env or {})}
|
||||
subprocess.run(cmd, check=True, env=merged) # nosec B603
|
||||
|
||||
|
||||
_PERMANENT_ERRORS = (
|
||||
@@ -43,17 +68,21 @@ _PERMANENT_ERRORS = (
|
||||
)
|
||||
|
||||
|
||||
@_traced("engine.compose_with_retry")
|
||||
def _compose_with_retry(
|
||||
*args: str,
|
||||
compose_file: Path = COMPOSE_FILE,
|
||||
retries: int = 3,
|
||||
delay: float = 5.0,
|
||||
env: dict | None = None,
|
||||
) -> None:
|
||||
"""Run a docker compose command, retrying on transient failures."""
|
||||
import os
|
||||
last_exc: subprocess.CalledProcessError | None = None
|
||||
cmd = ["docker", "compose", "-f", str(compose_file), *args]
|
||||
merged = {**os.environ, **(env or {})}
|
||||
for attempt in range(1, retries + 1):
|
||||
result = subprocess.run(cmd, capture_output=True, text=True)
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, env=merged) # nosec B603
|
||||
if result.returncode == 0:
|
||||
if result.stdout:
|
||||
print(result.stdout, end="")
|
||||
@@ -80,13 +109,16 @@ def _compose_with_retry(
|
||||
raise last_exc
|
||||
|
||||
|
||||
def deploy(config: DecnetConfig, dry_run: bool = False, no_cache: bool = False) -> None:
|
||||
@_traced("engine.deploy")
|
||||
def deploy(config: DecnetConfig, dry_run: bool = False, no_cache: bool = False, parallel: bool = False) -> None:
|
||||
log.info("deployment started n_deckies=%d interface=%s subnet=%s dry_run=%s", len(config.deckies), config.interface, config.subnet, dry_run)
|
||||
log.debug("deploy: deckies=%s", [d.name for d in config.deckies])
|
||||
client = docker.from_env()
|
||||
|
||||
# --- Network setup ---
|
||||
ip_list = [d.ip for d in config.deckies]
|
||||
decky_range = ips_to_range(ip_list)
|
||||
host_ip = get_host_ip(config.interface)
|
||||
log.debug("deploy: ip_range=%s host_ip=%s", decky_range, host_ip)
|
||||
|
||||
net_driver = "IPvlan L2" if config.ipvlan else "MACVLAN"
|
||||
console.print(f"[bold cyan]Creating {net_driver} network[/] ({MACVLAN_NETWORK_NAME}) on {config.interface}")
|
||||
@@ -110,30 +142,44 @@ def deploy(config: DecnetConfig, dry_run: bool = False, no_cache: bool = False)
|
||||
)
|
||||
setup_host_macvlan(config.interface, host_ip, decky_range)
|
||||
|
||||
# --- Compose generation ---
|
||||
_sync_logging_helper(config)
|
||||
|
||||
compose_path = write_compose(config, COMPOSE_FILE)
|
||||
console.print(f"[bold cyan]Compose file written[/] → {compose_path}")
|
||||
|
||||
if dry_run:
|
||||
log.info("deployment dry-run complete compose_path=%s", compose_path)
|
||||
console.print("[yellow]Dry run — no containers started.[/]")
|
||||
return
|
||||
|
||||
# --- Save state before bring-up ---
|
||||
save_state(config, compose_path)
|
||||
|
||||
# --- Bring up ---
|
||||
build_env = {"DOCKER_BUILDKIT": "1"} if parallel else {}
|
||||
|
||||
console.print("[bold cyan]Building images and starting deckies...[/]")
|
||||
build_args = ["build"]
|
||||
if no_cache:
|
||||
build_args.append("--no-cache")
|
||||
|
||||
if parallel:
|
||||
console.print("[bold cyan]Parallel build enabled — building all images concurrently...[/]")
|
||||
_compose_with_retry(*build_args, compose_file=compose_path, env=build_env)
|
||||
_compose_with_retry("up", "-d", compose_file=compose_path, env=build_env)
|
||||
else:
|
||||
if no_cache:
|
||||
_compose_with_retry("build", "--no-cache", compose_file=compose_path)
|
||||
_compose_with_retry("up", "--build", "-d", compose_file=compose_path)
|
||||
|
||||
# --- Status summary ---
|
||||
log.info("deployment complete n_deckies=%d", len(config.deckies))
|
||||
_print_status(config)
|
||||
|
||||
|
||||
@_traced("engine.teardown")
|
||||
def teardown(decky_id: str | None = None) -> None:
|
||||
log.info("teardown requested decky_id=%s", decky_id or "all")
|
||||
state = load_state()
|
||||
if state is None:
|
||||
log.warning("teardown: no active deployment found")
|
||||
console.print("[red]No active deployment found (no decnet-state.json).[/]")
|
||||
return
|
||||
|
||||
@@ -141,7 +187,6 @@ def teardown(decky_id: str | None = None) -> None:
|
||||
client = docker.from_env()
|
||||
|
||||
if decky_id:
|
||||
# Bring down only the services matching this decky
|
||||
svc_names = [f"{decky_id}-{svc}" for svc in [d.services for d in config.deckies if d.name == decky_id]]
|
||||
if not svc_names:
|
||||
console.print(f"[red]Decky '{decky_id}' not found in current deployment.[/]")
|
||||
@@ -159,7 +204,9 @@ def teardown(decky_id: str | None = None) -> None:
|
||||
teardown_host_macvlan(decky_range)
|
||||
remove_macvlan_network(client)
|
||||
clear_state()
|
||||
|
||||
net_driver = "IPvlan" if config.ipvlan else "MACVLAN"
|
||||
log.info("teardown complete all deckies removed network_driver=%s", net_driver)
|
||||
console.print(f"[green]All deckies torn down. {net_driver} network removed.[/]")
|
||||
|
||||
|
||||
@@ -179,7 +226,7 @@ def status() -> None:
|
||||
table.add_column("Hostname")
|
||||
table.add_column("Status")
|
||||
|
||||
running = {c.name: c.status for c in client.containers.list(all=True)}
|
||||
running = {c.name: c.status for c in client.containers.list(all=True, ignore_removed=True)}
|
||||
|
||||
for decky in config.deckies:
|
||||
statuses = []
|
||||
97
decnet/env.py
Normal file
97
decnet/env.py
Normal file
@@ -0,0 +1,97 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Calculate absolute path to the project root
|
||||
_ROOT: Path = Path(__file__).parent.parent.absolute()
|
||||
|
||||
# Load .env.local first, then fallback to .env
|
||||
load_dotenv(_ROOT / ".env.local")
|
||||
load_dotenv(_ROOT / ".env")
|
||||
|
||||
|
||||
def _port(name: str, default: int) -> int:
|
||||
raw = os.environ.get(name, str(default))
|
||||
try:
|
||||
value = int(raw)
|
||||
except ValueError:
|
||||
raise ValueError(f"Environment variable '{name}' must be an integer, got '{raw}'.")
|
||||
if not (1 <= value <= 65535):
|
||||
raise ValueError(f"Environment variable '{name}' must be 1–65535, got {value}.")
|
||||
return value
|
||||
|
||||
|
||||
def _require_env(name: str) -> str:
|
||||
"""Return the env var value or raise at startup if it is unset or a known-bad default."""
|
||||
_KNOWN_BAD = {"fallback-secret-key-change-me", "admin", "secret", "password", "changeme"}
|
||||
value = os.environ.get(name)
|
||||
if not value:
|
||||
raise ValueError(
|
||||
f"Required environment variable '{name}' is not set. "
|
||||
f"Set it in .env.local or export it before starting DECNET."
|
||||
)
|
||||
|
||||
if any(k.startswith("PYTEST") for k in os.environ):
|
||||
return value
|
||||
|
||||
if value.lower() in _KNOWN_BAD:
|
||||
raise ValueError(
|
||||
f"Environment variable '{name}' is set to an insecure default ('{value}'). "
|
||||
f"Choose a strong, unique value before starting DECNET."
|
||||
)
|
||||
if name == "DECNET_JWT_SECRET" and len(value) < 32:
|
||||
_developer = os.environ.get("DECNET_DEVELOPER", "False").lower() == "true"
|
||||
if not _developer:
|
||||
raise ValueError(
|
||||
f"DECNET_JWT_SECRET is too short ({len(value)} bytes). "
|
||||
f"Use at least 32 characters to satisfy HS256 requirements (RFC 7518 §3.2)."
|
||||
)
|
||||
return value
|
||||
|
||||
|
||||
# System logging — all microservice daemons append here.
|
||||
DECNET_SYSTEM_LOGS: str = os.environ.get("DECNET_SYSTEM_LOGS", "decnet.system.log")
|
||||
|
||||
# Set to "true" to embed the profiler inside the API process.
|
||||
# Leave unset (default) when the standalone `decnet profiler --daemon` is
|
||||
# running — embedding both produces two workers sharing the same DB cursor,
|
||||
# which causes events to be skipped or processed twice.
|
||||
DECNET_EMBED_PROFILER: bool = os.environ.get("DECNET_EMBED_PROFILER", "").lower() == "true"
|
||||
|
||||
# API Options
|
||||
DECNET_API_HOST: str = os.environ.get("DECNET_API_HOST", "127.0.0.1")
|
||||
DECNET_API_PORT: int = _port("DECNET_API_PORT", 8000)
|
||||
DECNET_JWT_SECRET: str = _require_env("DECNET_JWT_SECRET")
|
||||
DECNET_INGEST_LOG_FILE: str | None = os.environ.get("DECNET_INGEST_LOG_FILE", "/var/log/decnet/decnet.log")
|
||||
|
||||
# Web Dashboard Options
|
||||
DECNET_WEB_HOST: str = os.environ.get("DECNET_WEB_HOST", "127.0.0.1")
|
||||
DECNET_WEB_PORT: int = _port("DECNET_WEB_PORT", 8080)
|
||||
DECNET_ADMIN_USER: str = os.environ.get("DECNET_ADMIN_USER", "admin")
|
||||
DECNET_ADMIN_PASSWORD: str = os.environ.get("DECNET_ADMIN_PASSWORD", "admin")
|
||||
DECNET_DEVELOPER: bool = os.environ.get("DECNET_DEVELOPER", "False").lower() == "true"
|
||||
|
||||
# Tracing — set to "true" to enable OpenTelemetry distributed tracing.
|
||||
# Separate from DECNET_DEVELOPER so tracing can be toggled independently.
|
||||
DECNET_DEVELOPER_TRACING: bool = os.environ.get("DECNET_DEVELOPER_TRACING", "").lower() == "true"
|
||||
DECNET_OTEL_ENDPOINT: str = os.environ.get("DECNET_OTEL_ENDPOINT", "http://localhost:4317")
|
||||
|
||||
# Database Options
|
||||
DECNET_DB_TYPE: str = os.environ.get("DECNET_DB_TYPE", "sqlite").lower()
|
||||
DECNET_DB_URL: Optional[str] = os.environ.get("DECNET_DB_URL")
|
||||
# MySQL component vars (used only when DECNET_DB_URL is not set)
|
||||
DECNET_DB_HOST: str = os.environ.get("DECNET_DB_HOST", "localhost")
|
||||
DECNET_DB_PORT: int = _port("DECNET_DB_PORT", 3306) if os.environ.get("DECNET_DB_PORT") else 3306
|
||||
DECNET_DB_NAME: str = os.environ.get("DECNET_DB_NAME", "decnet")
|
||||
DECNET_DB_USER: str = os.environ.get("DECNET_DB_USER", "decnet")
|
||||
DECNET_DB_PASSWORD: Optional[str] = os.environ.get("DECNET_DB_PASSWORD")
|
||||
|
||||
# CORS — comma-separated list of allowed origins for the web dashboard API.
|
||||
# Defaults to the configured web host/port. Override with DECNET_CORS_ORIGINS if needed.
|
||||
# Example: DECNET_CORS_ORIGINS=http://192.168.1.50:9090,https://dashboard.example.com
|
||||
_WILDCARD_ADDRS = {"0.0.0.0", "127.0.0.1", "::"} # nosec B104 — comparison only, not a bind
|
||||
_web_hostname: str = "localhost" if DECNET_WEB_HOST in _WILDCARD_ADDRS else DECNET_WEB_HOST
|
||||
_cors_default: str = f"http://{_web_hostname}:{DECNET_WEB_PORT}"
|
||||
_cors_raw: str = os.environ.get("DECNET_CORS_ORIGINS", _cors_default)
|
||||
DECNET_CORS_ORIGINS: list[str] = [o.strip() for o in _cors_raw.split(",") if o.strip()]
|
||||
177
decnet/fleet.py
Normal file
177
decnet/fleet.py
Normal file
@@ -0,0 +1,177 @@
|
||||
"""
|
||||
Fleet builder — shared logic for constructing DeckyConfig lists.
|
||||
|
||||
Used by both the CLI and the web API router to build deckies from
|
||||
flags or INI config. Lives here (not in cli.py) so that the web layer
|
||||
and the mutation engine can import it without depending on the CLI.
|
||||
"""
|
||||
|
||||
import random
|
||||
from typing import Optional
|
||||
|
||||
from decnet.archetypes import Archetype, get_archetype
|
||||
from decnet.config import DeckyConfig, random_hostname
|
||||
from decnet.distros import all_distros, get_distro, random_distro
|
||||
from decnet.models import IniConfig
|
||||
from decnet.services.registry import all_services
|
||||
|
||||
|
||||
def all_service_names() -> list[str]:
|
||||
"""Return all registered per-decky service names (excludes fleet singletons)."""
|
||||
return sorted(
|
||||
name for name, svc in all_services().items()
|
||||
if not svc.fleet_singleton
|
||||
)
|
||||
|
||||
|
||||
def resolve_distros(
|
||||
distros_explicit: list[str] | None,
|
||||
randomize_distros: bool,
|
||||
n: int,
|
||||
archetype: Archetype | None = None,
|
||||
) -> list[str]:
|
||||
"""Return a list of n distro slugs based on flags or archetype preference."""
|
||||
if distros_explicit:
|
||||
return [distros_explicit[i % len(distros_explicit)] for i in range(n)]
|
||||
if randomize_distros:
|
||||
return [random_distro().slug for _ in range(n)]
|
||||
if archetype:
|
||||
pool = archetype.preferred_distros
|
||||
return [pool[i % len(pool)] for i in range(n)]
|
||||
slugs = list(all_distros().keys())
|
||||
return [slugs[i % len(slugs)] for i in range(n)]
|
||||
|
||||
|
||||
def build_deckies(
|
||||
n: int,
|
||||
ips: list[str],
|
||||
services_explicit: list[str] | None,
|
||||
randomize_services: bool,
|
||||
distros_explicit: list[str] | None = None,
|
||||
randomize_distros: bool = False,
|
||||
archetype: Archetype | None = None,
|
||||
mutate_interval: Optional[int] = None,
|
||||
) -> list[DeckyConfig]:
|
||||
"""Build a list of DeckyConfigs from CLI-style flags."""
|
||||
deckies = []
|
||||
used_combos: set[frozenset] = set()
|
||||
distro_slugs = resolve_distros(distros_explicit, randomize_distros, n, archetype)
|
||||
|
||||
for i, ip in enumerate(ips):
|
||||
name = f"decky-{i + 1:02d}"
|
||||
distro = get_distro(distro_slugs[i])
|
||||
hostname = random_hostname(distro.slug)
|
||||
|
||||
if services_explicit:
|
||||
svc_list = services_explicit
|
||||
elif archetype:
|
||||
svc_list = list(archetype.services)
|
||||
elif randomize_services:
|
||||
svc_pool = all_service_names()
|
||||
attempts = 0
|
||||
while True:
|
||||
count = random.randint(1, min(3, len(svc_pool))) # nosec B311
|
||||
chosen = frozenset(random.sample(svc_pool, count)) # nosec B311
|
||||
attempts += 1
|
||||
if chosen not in used_combos or attempts > 20:
|
||||
break
|
||||
svc_list = list(chosen)
|
||||
used_combos.add(chosen)
|
||||
else:
|
||||
raise ValueError("Provide services_explicit, archetype, or randomize_services=True.")
|
||||
|
||||
deckies.append(
|
||||
DeckyConfig(
|
||||
name=name,
|
||||
ip=ip,
|
||||
services=svc_list,
|
||||
distro=distro.slug,
|
||||
base_image=distro.image,
|
||||
build_base=distro.build_base,
|
||||
hostname=hostname,
|
||||
archetype=archetype.slug if archetype else None,
|
||||
nmap_os=archetype.nmap_os if archetype else "linux",
|
||||
mutate_interval=mutate_interval,
|
||||
)
|
||||
)
|
||||
return deckies
|
||||
|
||||
|
||||
def build_deckies_from_ini(
|
||||
ini: IniConfig,
|
||||
subnet_cidr: str,
|
||||
gateway: str,
|
||||
host_ip: str,
|
||||
randomize: bool,
|
||||
cli_mutate_interval: int | None = None,
|
||||
) -> list[DeckyConfig]:
|
||||
"""Build DeckyConfig list from an IniConfig, auto-allocating missing IPs."""
|
||||
from ipaddress import IPv4Address, IPv4Network
|
||||
import time
|
||||
now = time.time()
|
||||
|
||||
explicit_ips: set[IPv4Address] = {
|
||||
IPv4Address(s.ip) for s in ini.deckies if s.ip
|
||||
}
|
||||
|
||||
net = IPv4Network(subnet_cidr, strict=False)
|
||||
reserved = {
|
||||
net.network_address,
|
||||
net.broadcast_address,
|
||||
IPv4Address(gateway),
|
||||
IPv4Address(host_ip),
|
||||
} | explicit_ips
|
||||
|
||||
auto_pool = (str(addr) for addr in net.hosts() if addr not in reserved)
|
||||
|
||||
deckies: list[DeckyConfig] = []
|
||||
for spec in ini.deckies:
|
||||
arch: Archetype | None = None
|
||||
if spec.archetype:
|
||||
arch = get_archetype(spec.archetype)
|
||||
|
||||
distro_pool = arch.preferred_distros if arch else list(all_distros().keys())
|
||||
distro = get_distro(distro_pool[len(deckies) % len(distro_pool)])
|
||||
hostname = random_hostname(distro.slug)
|
||||
|
||||
ip = spec.ip or next(auto_pool, None)
|
||||
if ip is None:
|
||||
raise ValueError(f"Not enough free IPs in {subnet_cidr} while assigning IP for '{spec.name}'.")
|
||||
|
||||
if spec.services:
|
||||
known = set(all_service_names())
|
||||
unknown = [s for s in spec.services if s not in known]
|
||||
if unknown:
|
||||
raise ValueError(
|
||||
f"Unknown service(s) in [{spec.name}]: {unknown}. "
|
||||
f"Available: {all_service_names()}"
|
||||
)
|
||||
svc_list = spec.services
|
||||
elif arch:
|
||||
svc_list = list(arch.services)
|
||||
elif randomize or (not spec.services and not arch):
|
||||
svc_pool = all_service_names()
|
||||
count = random.randint(1, min(3, len(svc_pool))) # nosec B311
|
||||
svc_list = random.sample(svc_pool, count) # nosec B311
|
||||
|
||||
resolved_nmap_os = spec.nmap_os or (arch.nmap_os if arch else "linux")
|
||||
|
||||
decky_mutate_interval = cli_mutate_interval
|
||||
if decky_mutate_interval is None:
|
||||
decky_mutate_interval = spec.mutate_interval if spec.mutate_interval is not None else ini.mutate_interval
|
||||
|
||||
deckies.append(DeckyConfig(
|
||||
name=spec.name,
|
||||
ip=ip,
|
||||
services=svc_list,
|
||||
distro=distro.slug,
|
||||
base_image=distro.image,
|
||||
build_base=distro.build_base,
|
||||
hostname=hostname,
|
||||
archetype=arch.slug if arch else None,
|
||||
service_config=spec.service_config,
|
||||
nmap_os=resolved_nmap_os,
|
||||
mutate_interval=decky_mutate_interval,
|
||||
last_mutated=now,
|
||||
))
|
||||
return deckies
|
||||
@@ -6,7 +6,6 @@ Format:
|
||||
net=192.168.1.0/24
|
||||
gw=192.168.1.1
|
||||
interface=wlp6s0
|
||||
log_target=192.168.1.5:5140 # optional
|
||||
|
||||
[hostname-1]
|
||||
ip=192.168.1.82 # optional
|
||||
@@ -42,37 +41,8 @@ Format:
|
||||
"""
|
||||
|
||||
import configparser
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
@dataclass
|
||||
class DeckySpec:
|
||||
name: str
|
||||
ip: str | None = None
|
||||
services: list[str] | None = None
|
||||
archetype: str | None = None
|
||||
service_config: dict[str, dict] = field(default_factory=dict)
|
||||
nmap_os: str | None = None # explicit OS family override (linux/windows/bsd/embedded/cisco)
|
||||
|
||||
|
||||
@dataclass
|
||||
class CustomServiceSpec:
|
||||
"""Spec for a user-defined (bring-your-own) service."""
|
||||
name: str # service slug, e.g. "myservice" (section is "custom-myservice")
|
||||
image: str # Docker image to use
|
||||
exec_cmd: str # command to run inside the container
|
||||
ports: list[int] = field(default_factory=list)
|
||||
|
||||
|
||||
@dataclass
|
||||
class IniConfig:
|
||||
subnet: str | None = None
|
||||
gateway: str | None = None
|
||||
interface: str | None = None
|
||||
log_target: str | None = None
|
||||
deckies: list[DeckySpec] = field(default_factory=list)
|
||||
custom_services: list[CustomServiceSpec] = field(default_factory=list)
|
||||
from decnet.models import IniConfig, DeckySpec, CustomServiceSpec, validate_ini_string # noqa: F401
|
||||
|
||||
|
||||
def load_ini(path: str | Path) -> IniConfig:
|
||||
@@ -81,7 +51,21 @@ def load_ini(path: str | Path) -> IniConfig:
|
||||
read = cp.read(str(path))
|
||||
if not read:
|
||||
raise FileNotFoundError(f"Config file not found: {path}")
|
||||
return _parse_configparser(cp)
|
||||
|
||||
|
||||
def load_ini_from_string(content: str) -> IniConfig:
|
||||
"""Parse a DECNET INI string and return an IniConfig."""
|
||||
# Normalize line endings (CRLF → LF, bare CR → LF) so the validator
|
||||
# and configparser both see the same line boundaries.
|
||||
content = content.replace('\r\n', '\n').replace('\r', '\n')
|
||||
validate_ini_string(content)
|
||||
cp = configparser.ConfigParser(strict=False)
|
||||
cp.read_string(content)
|
||||
return _parse_configparser(cp)
|
||||
|
||||
|
||||
def _parse_configparser(cp: configparser.ConfigParser) -> IniConfig:
|
||||
cfg = IniConfig()
|
||||
|
||||
if cp.has_section("general"):
|
||||
@@ -89,14 +73,24 @@ def load_ini(path: str | Path) -> IniConfig:
|
||||
cfg.subnet = g.get("net")
|
||||
cfg.gateway = g.get("gw")
|
||||
cfg.interface = g.get("interface")
|
||||
cfg.log_target = g.get("log_target") or g.get("log-target")
|
||||
|
||||
from decnet.services.registry import all_services
|
||||
known_services = set(all_services().keys())
|
||||
|
||||
# First pass: collect decky sections and custom service definitions
|
||||
for section in cp.sections():
|
||||
if section == "general":
|
||||
continue
|
||||
|
||||
# A service sub-section is identified if the section name has at least one dot
|
||||
# AND the last segment is a known service name.
|
||||
# e.g. "decky-01.ssh" -> sub-section
|
||||
# e.g. "decky.webmail" -> decky section (if "webmail" is not a service)
|
||||
if "." in section:
|
||||
continue # subsections handled in second pass
|
||||
_, _, last_segment = section.rpartition(".")
|
||||
if last_segment in known_services:
|
||||
continue # sub-section handled in second pass
|
||||
|
||||
if section.startswith("custom-"):
|
||||
# Bring-your-own service definition
|
||||
s = cp[section]
|
||||
@@ -115,17 +109,30 @@ def load_ini(path: str | Path) -> IniConfig:
|
||||
services = [sv.strip() for sv in svc_raw.split(",")] if svc_raw else None
|
||||
archetype = s.get("archetype")
|
||||
nmap_os = s.get("nmap_os") or s.get("nmap-os") or None
|
||||
|
||||
mi_raw = s.get("mutate_interval") or s.get("mutate-interval")
|
||||
mutate_interval = None
|
||||
if mi_raw:
|
||||
try:
|
||||
mutate_interval = int(mi_raw)
|
||||
except ValueError:
|
||||
raise ValueError(f"[{section}] mutate_interval= must be an integer, got '{mi_raw}'")
|
||||
|
||||
amount_raw = s.get("amount", "1")
|
||||
try:
|
||||
amount = int(amount_raw)
|
||||
if amount < 1:
|
||||
raise ValueError
|
||||
except ValueError:
|
||||
if amount > 100:
|
||||
raise ValueError(f"[{section}] amount={amount} exceeds maximum allowed (100).")
|
||||
except ValueError as e:
|
||||
if "exceeds maximum" in str(e):
|
||||
raise e
|
||||
raise ValueError(f"[{section}] amount= must be a positive integer, got '{amount_raw}'")
|
||||
|
||||
if amount == 1:
|
||||
cfg.deckies.append(DeckySpec(
|
||||
name=section, ip=ip, services=services, archetype=archetype, nmap_os=nmap_os,
|
||||
name=section, ip=ip, services=services, archetype=archetype, nmap_os=nmap_os, mutate_interval=mutate_interval,
|
||||
))
|
||||
else:
|
||||
# Expand into N deckies; explicit ip is ignored (can't share one IP)
|
||||
@@ -141,6 +148,7 @@ def load_ini(path: str | Path) -> IniConfig:
|
||||
services=services,
|
||||
archetype=archetype,
|
||||
nmap_os=nmap_os,
|
||||
mutate_interval=mutate_interval,
|
||||
))
|
||||
|
||||
# Second pass: collect per-service subsections [decky-name.service]
|
||||
@@ -149,7 +157,11 @@ def load_ini(path: str | Path) -> IniConfig:
|
||||
for section in cp.sections():
|
||||
if "." not in section:
|
||||
continue
|
||||
decky_name, _, svc_name = section.partition(".")
|
||||
|
||||
decky_name, dot, svc_name = section.rpartition(".")
|
||||
if svc_name not in known_services:
|
||||
continue # not a service sub-section
|
||||
|
||||
svc_cfg = {k: v for k, v in cp[section].items()}
|
||||
if decky_name in decky_map:
|
||||
# Direct match — single decky
|
||||
|
||||
@@ -0,0 +1,92 @@
|
||||
"""
|
||||
DECNET application logging helpers.
|
||||
|
||||
Usage:
|
||||
from decnet.logging import get_logger
|
||||
log = get_logger("engine") # APP-NAME in RFC 5424 output becomes "engine"
|
||||
|
||||
The returned logger propagates to the root logger (configured in config.py with
|
||||
Rfc5424Formatter), so level control via DECNET_DEVELOPER still applies globally.
|
||||
|
||||
When ``DECNET_DEVELOPER_TRACING`` is active, every LogRecord is enriched with
|
||||
``otel_trace_id`` and ``otel_span_id`` from the current OTEL span context.
|
||||
This lets you correlate log lines with Jaeger traces — click a log entry and
|
||||
jump straight to the span that produced it.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
|
||||
|
||||
class _ComponentFilter(logging.Filter):
|
||||
"""Injects *decnet_component* onto every LogRecord so Rfc5424Formatter can
|
||||
use it as the RFC 5424 APP-NAME field instead of the hardcoded "decnet"."""
|
||||
|
||||
def __init__(self, component: str) -> None:
|
||||
super().__init__()
|
||||
self.component = component
|
||||
|
||||
def filter(self, record: logging.LogRecord) -> bool:
|
||||
record.decnet_component = self.component # type: ignore[attr-defined]
|
||||
return True
|
||||
|
||||
|
||||
class _TraceContextFilter(logging.Filter):
|
||||
"""Injects ``otel_trace_id`` and ``otel_span_id`` onto every LogRecord
|
||||
from the active OTEL span context.
|
||||
|
||||
Installed once by ``enable_trace_context()`` on the root ``decnet`` logger
|
||||
so all child loggers inherit the enrichment via propagation.
|
||||
|
||||
When no span is active, both fields are set to ``"0"`` (cheap string
|
||||
comparison downstream, no None-checks needed).
|
||||
"""
|
||||
|
||||
def filter(self, record: logging.LogRecord) -> bool:
|
||||
try:
|
||||
from opentelemetry import trace
|
||||
span = trace.get_current_span()
|
||||
ctx = span.get_span_context()
|
||||
if ctx and ctx.trace_id:
|
||||
record.otel_trace_id = format(ctx.trace_id, "032x") # type: ignore[attr-defined]
|
||||
record.otel_span_id = format(ctx.span_id, "016x") # type: ignore[attr-defined]
|
||||
else:
|
||||
record.otel_trace_id = "0" # type: ignore[attr-defined]
|
||||
record.otel_span_id = "0" # type: ignore[attr-defined]
|
||||
except Exception:
|
||||
record.otel_trace_id = "0" # type: ignore[attr-defined]
|
||||
record.otel_span_id = "0" # type: ignore[attr-defined]
|
||||
return True
|
||||
|
||||
|
||||
_trace_filter_installed: bool = False
|
||||
|
||||
|
||||
def enable_trace_context() -> None:
|
||||
"""Install the OTEL trace-context filter on the root ``decnet`` logger.
|
||||
|
||||
Called once from ``decnet.telemetry.setup_tracing()`` after the
|
||||
TracerProvider is initialised. Safe to call multiple times (idempotent).
|
||||
"""
|
||||
global _trace_filter_installed
|
||||
if _trace_filter_installed:
|
||||
return
|
||||
root = logging.getLogger("decnet")
|
||||
root.addFilter(_TraceContextFilter())
|
||||
_trace_filter_installed = True
|
||||
|
||||
|
||||
def get_logger(component: str) -> logging.Logger:
|
||||
"""Return a named logger that self-identifies as *component* in RFC 5424.
|
||||
|
||||
Valid components: cli, engine, api, mutator, collector.
|
||||
|
||||
The logger is named ``decnet.<component>`` and propagates normally, so the
|
||||
root handler (Rfc5424Formatter + level gate from DECNET_DEVELOPER) handles
|
||||
output. Calling this function multiple times for the same component is safe.
|
||||
"""
|
||||
logger = logging.getLogger(f"decnet.{component}")
|
||||
if not any(isinstance(f, _ComponentFilter) for f in logger.filters):
|
||||
logger.addFilter(_ComponentFilter(component))
|
||||
return logger
|
||||
|
||||
@@ -13,6 +13,8 @@ import logging.handlers
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from decnet.telemetry import traced as _traced
|
||||
|
||||
_LOG_FILE_ENV = "DECNET_LOG_FILE"
|
||||
_DEFAULT_LOG_FILE = "/var/log/decnet/decnet.log"
|
||||
_MAX_BYTES = 10 * 1024 * 1024 # 10 MB
|
||||
@@ -22,10 +24,10 @@ _handler: logging.handlers.RotatingFileHandler | None = None
|
||||
_logger: logging.Logger | None = None
|
||||
|
||||
|
||||
def _get_logger() -> logging.Logger:
|
||||
@_traced("logging.init_file_handler")
|
||||
def _init_file_handler() -> logging.Logger:
|
||||
"""One-time initialisation of the rotating file handler."""
|
||||
global _handler, _logger
|
||||
if _logger is not None:
|
||||
return _logger
|
||||
|
||||
log_path = Path(os.environ.get(_LOG_FILE_ENV, _DEFAULT_LOG_FILE))
|
||||
log_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
@@ -46,14 +48,19 @@ def _get_logger() -> logging.Logger:
|
||||
return _logger
|
||||
|
||||
|
||||
def _get_logger() -> logging.Logger:
|
||||
if _logger is not None:
|
||||
return _logger
|
||||
return _init_file_handler()
|
||||
|
||||
|
||||
def write_syslog(line: str) -> None:
|
||||
"""Write a single RFC 5424 syslog line to the rotating log file."""
|
||||
try:
|
||||
_get_logger().info(line)
|
||||
except Exception:
|
||||
except Exception: # nosec B110
|
||||
pass
|
||||
|
||||
|
||||
def get_log_path() -> Path:
|
||||
"""Return the configured log file path (for tests/inspection)."""
|
||||
return Path(os.environ.get(_LOG_FILE_ENV, _DEFAULT_LOG_FILE))
|
||||
|
||||
@@ -11,6 +11,8 @@ shared utilities for validating and parsing the log_target string.
|
||||
|
||||
import socket
|
||||
|
||||
from decnet.telemetry import traced as _traced
|
||||
|
||||
|
||||
def parse_log_target(log_target: str) -> tuple[str, int]:
|
||||
"""
|
||||
@@ -23,6 +25,7 @@ def parse_log_target(log_target: str) -> tuple[str, int]:
|
||||
return parts[0], int(parts[1])
|
||||
|
||||
|
||||
@_traced("logging.probe_log_target")
|
||||
def probe_log_target(log_target: str, timeout: float = 2.0) -> bool:
|
||||
"""
|
||||
Return True if the log target is reachable (TCP connect succeeds).
|
||||
|
||||
120
decnet/models.py
Normal file
120
decnet/models.py
Normal file
@@ -0,0 +1,120 @@
|
||||
"""
|
||||
DECNET Domain Models.
|
||||
Centralized repository for all Pydantic specifications used throughout the project.
|
||||
This file ensures that core domain logic has no dependencies on the web or database layers.
|
||||
"""
|
||||
from typing import Optional, List, Dict, Literal, Annotated, Any
|
||||
from pydantic import BaseModel, ConfigDict, Field as PydanticField, field_validator, BeforeValidator
|
||||
import configparser
|
||||
|
||||
|
||||
# --- INI Specification Models ---
|
||||
|
||||
def validate_ini_string(v: Any) -> str:
|
||||
"""Structural validator for DECNET INI strings using configparser."""
|
||||
if not isinstance(v, str):
|
||||
# This remains an internal type mismatch (caught by Pydantic usually)
|
||||
raise ValueError("INI content must be a string")
|
||||
|
||||
# 512KB limit to prevent DoS/OOM
|
||||
if len(v) > 512 * 1024:
|
||||
raise ValueError("INI content is too large (max 512KB)")
|
||||
|
||||
if not v.strip():
|
||||
# Using exact phrasing expected by tests
|
||||
raise ValueError("INI content is empty")
|
||||
|
||||
parser = configparser.ConfigParser(interpolation=None, allow_no_value=True, strict=False)
|
||||
try:
|
||||
parser.read_string(v)
|
||||
if not parser.sections():
|
||||
raise ValueError("The provided INI content must contain at least one section (no sections found)")
|
||||
except configparser.Error as e:
|
||||
# If it's a generic parsing error, we check if it's effectively a "missing sections" error
|
||||
if "no section headers" in str(e).lower():
|
||||
raise ValueError("Invalid INI format: no sections found")
|
||||
raise ValueError(f"Invalid INI format: {str(e)}")
|
||||
|
||||
return v
|
||||
|
||||
# Reusable type that enforces INI structure during initialization.
|
||||
# Removed min_length=1 to make empty strings schema-compliant yet semantically invalid (mapped to 409).
|
||||
IniContent = Annotated[str, BeforeValidator(validate_ini_string)]
|
||||
|
||||
class DeckySpec(BaseModel):
|
||||
"""Configuration spec for a single decky as defined in the INI file."""
|
||||
model_config = ConfigDict(strict=True, extra="forbid")
|
||||
name: str = PydanticField(..., max_length=128, pattern=r"^[A-Za-z0-9\-_.]+$")
|
||||
ip: Optional[str] = None
|
||||
services: Optional[List[str]] = None
|
||||
archetype: Optional[str] = None
|
||||
service_config: Dict[str, Dict] = PydanticField(default_factory=dict)
|
||||
nmap_os: Optional[str] = None
|
||||
mutate_interval: Optional[int] = PydanticField(None, ge=1)
|
||||
|
||||
|
||||
class CustomServiceSpec(BaseModel):
|
||||
"""Spec for a user-defined (bring-your-own) service."""
|
||||
model_config = ConfigDict(strict=True, extra="forbid")
|
||||
name: str
|
||||
image: str
|
||||
exec_cmd: str
|
||||
ports: List[int] = PydanticField(default_factory=list)
|
||||
|
||||
|
||||
class IniConfig(BaseModel):
|
||||
"""The complete structured representation of a DECNET INI file."""
|
||||
model_config = ConfigDict(strict=True, extra="forbid")
|
||||
subnet: Optional[str] = None
|
||||
gateway: Optional[str] = None
|
||||
interface: Optional[str] = None
|
||||
mutate_interval: Optional[int] = PydanticField(None, ge=1)
|
||||
deckies: List[DeckySpec] = PydanticField(default_factory=list, min_length=1)
|
||||
custom_services: List[CustomServiceSpec] = PydanticField(default_factory=list)
|
||||
|
||||
@field_validator("deckies")
|
||||
@classmethod
|
||||
def at_least_one_decky(cls, v: List[DeckySpec]) -> List[DeckySpec]:
|
||||
"""Ensure that an INI deployment always contains at least one machine."""
|
||||
if not v:
|
||||
raise ValueError("INI must contain at least one decky section")
|
||||
return v
|
||||
|
||||
|
||||
# --- Runtime Configuration Models ---
|
||||
|
||||
class DeckyConfig(BaseModel):
|
||||
"""Full operational configuration for a deployed decky container."""
|
||||
model_config = ConfigDict(strict=True, extra="forbid")
|
||||
name: str
|
||||
ip: str
|
||||
services: list[str] = PydanticField(..., min_length=1)
|
||||
distro: str # slug from distros.DISTROS, e.g. "debian", "ubuntu22"
|
||||
base_image: str # Docker image for the base/IP-holder container
|
||||
build_base: str = "debian:bookworm-slim" # apt-compatible image for service Dockerfiles
|
||||
hostname: str
|
||||
archetype: str | None = None # archetype slug if spawned from an archetype profile
|
||||
service_config: dict[str, dict] = PydanticField(default_factory=dict)
|
||||
nmap_os: str = "linux" # OS family for TCP/IP stack spoofing (see os_fingerprint.py)
|
||||
mutate_interval: int | None = None # automatic rotation interval in minutes
|
||||
last_mutated: float = 0.0 # timestamp of last mutation
|
||||
last_login_attempt: float = 0.0 # timestamp of most recent interaction
|
||||
|
||||
@field_validator("services")
|
||||
@classmethod
|
||||
def services_not_empty(cls, v: list[str]) -> list[str]:
|
||||
if not v:
|
||||
raise ValueError("A decky must have at least one service.")
|
||||
return v
|
||||
|
||||
|
||||
class DecnetConfig(BaseModel):
|
||||
"""Root configuration for the entire DECNET fleet deployment."""
|
||||
mode: Literal["unihost", "swarm"]
|
||||
interface: str
|
||||
subnet: str
|
||||
gateway: str
|
||||
deckies: list[DeckyConfig] = PydanticField(..., min_length=1)
|
||||
log_file: str | None = None # host path where the collector writes the log file
|
||||
ipvlan: bool = False # use IPvlan L2 instead of MACVLAN (WiFi-friendly)
|
||||
mutate_interval: int | None = 30 # global automatic rotation interval in minutes
|
||||
3
decnet/mutator/__init__.py
Normal file
3
decnet/mutator/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from decnet.mutator.engine import mutate_all, mutate_decky, run_watch_loop
|
||||
|
||||
__all__ = ["mutate_all", "mutate_decky", "run_watch_loop"]
|
||||
147
decnet/mutator/engine.py
Normal file
147
decnet/mutator/engine.py
Normal file
@@ -0,0 +1,147 @@
|
||||
"""
|
||||
Mutation Engine for DECNET.
|
||||
Handles dynamic rotation of exposed honeypot services over time.
|
||||
"""
|
||||
|
||||
import random
|
||||
import time
|
||||
from typing import Optional
|
||||
|
||||
from rich.console import Console
|
||||
|
||||
from decnet.archetypes import get_archetype
|
||||
from decnet.fleet import all_service_names
|
||||
from decnet.composer import write_compose
|
||||
from decnet.config import DeckyConfig, DecnetConfig
|
||||
from decnet.engine import _compose_with_retry
|
||||
from decnet.logging import get_logger
|
||||
from decnet.telemetry import traced as _traced
|
||||
|
||||
from pathlib import Path
|
||||
import anyio
|
||||
import asyncio
|
||||
from decnet.web.db.repository import BaseRepository
|
||||
|
||||
log = get_logger("mutator")
|
||||
console = Console()
|
||||
|
||||
|
||||
@_traced("mutator.mutate_decky")
|
||||
async def mutate_decky(decky_name: str, repo: BaseRepository) -> bool:
|
||||
"""
|
||||
Perform an Intra-Archetype Shuffle for a specific decky.
|
||||
Returns True if mutation succeeded, False otherwise.
|
||||
"""
|
||||
log.debug("mutate_decky: start decky=%s", decky_name)
|
||||
state_dict = await repo.get_state("deployment")
|
||||
if state_dict is None:
|
||||
log.error("mutate_decky: no active deployment found in database")
|
||||
console.print("[red]No active deployment found in database.[/]")
|
||||
return False
|
||||
|
||||
config = DecnetConfig(**state_dict["config"])
|
||||
compose_path = Path(state_dict["compose_path"])
|
||||
decky: Optional[DeckyConfig] = next((d for d in config.deckies if d.name == decky_name), None)
|
||||
|
||||
if not decky:
|
||||
console.print(f"[red]Decky '{decky_name}' not found in state.[/]")
|
||||
return False
|
||||
|
||||
if decky.archetype:
|
||||
try:
|
||||
arch = get_archetype(decky.archetype)
|
||||
svc_pool = list(arch.services)
|
||||
except ValueError:
|
||||
svc_pool = all_service_names()
|
||||
else:
|
||||
svc_pool = all_service_names()
|
||||
|
||||
if not svc_pool:
|
||||
console.print(f"[yellow]No services available for mutating '{decky_name}'.[/]")
|
||||
return False
|
||||
|
||||
current_services = set(decky.services)
|
||||
|
||||
attempts = 0
|
||||
while True:
|
||||
count = random.randint(1, min(3, len(svc_pool))) # nosec B311
|
||||
chosen = set(random.sample(svc_pool, count)) # nosec B311
|
||||
attempts += 1
|
||||
if chosen != current_services or attempts > 20:
|
||||
break
|
||||
|
||||
decky.services = list(chosen)
|
||||
decky.last_mutated = time.time()
|
||||
|
||||
# Save to DB
|
||||
await repo.set_state("deployment", {"config": config.model_dump(), "compose_path": str(compose_path)})
|
||||
|
||||
# Still writes files for Docker to use
|
||||
write_compose(config, compose_path)
|
||||
|
||||
log.info("mutation applied decky=%s services=%s", decky_name, ",".join(decky.services))
|
||||
console.print(f"[cyan]Mutating '{decky_name}' to services: {', '.join(decky.services)}[/]")
|
||||
|
||||
try:
|
||||
# Wrap blocking call in thread
|
||||
await anyio.to_thread.run_sync(_compose_with_retry, "up", "-d", "--remove-orphans", compose_path)
|
||||
except Exception as e:
|
||||
log.error("mutation failed decky=%s error=%s", decky_name, e)
|
||||
console.print(f"[red]Failed to mutate '{decky_name}': {e}[/]")
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
@_traced("mutator.mutate_all")
|
||||
async def mutate_all(repo: BaseRepository, force: bool = False) -> None:
|
||||
"""
|
||||
Check all deckies and mutate those that are due.
|
||||
If force=True, mutates all deckies regardless of schedule.
|
||||
"""
|
||||
log.debug("mutate_all: start force=%s", force)
|
||||
state_dict = await repo.get_state("deployment")
|
||||
if state_dict is None:
|
||||
log.error("mutate_all: no active deployment found")
|
||||
console.print("[red]No active deployment found.[/]")
|
||||
return
|
||||
|
||||
config = DecnetConfig(**state_dict["config"])
|
||||
now = time.time()
|
||||
|
||||
mutated_count = 0
|
||||
for decky in config.deckies:
|
||||
interval_mins = decky.mutate_interval or config.mutate_interval
|
||||
if interval_mins is None and not force:
|
||||
continue
|
||||
|
||||
if force:
|
||||
due = True
|
||||
else:
|
||||
elapsed_secs = now - decky.last_mutated
|
||||
due = elapsed_secs >= (interval_mins * 60)
|
||||
|
||||
if due:
|
||||
success = await mutate_decky(decky.name, repo=repo)
|
||||
if success:
|
||||
mutated_count += 1
|
||||
|
||||
if mutated_count == 0 and not force:
|
||||
log.debug("mutate_all: no deckies due for mutation")
|
||||
console.print("[dim]No deckies are due for mutation.[/]")
|
||||
else:
|
||||
log.info("mutate_all: complete mutated_count=%d", mutated_count)
|
||||
|
||||
|
||||
@_traced("mutator.watch_loop")
|
||||
async def run_watch_loop(repo: BaseRepository, poll_interval_secs: int = 10) -> None:
|
||||
"""Run an infinite loop checking for deckies that need mutation."""
|
||||
log.info("mutator watch loop started poll_interval_secs=%d", poll_interval_secs)
|
||||
console.print(f"[green]DECNET Mutator Watcher started (polling every {poll_interval_secs}s).[/]")
|
||||
try:
|
||||
while True:
|
||||
await mutate_all(force=False, repo=repo)
|
||||
await asyncio.sleep(poll_interval_secs)
|
||||
except KeyboardInterrupt:
|
||||
log.info("mutator watch loop stopped")
|
||||
console.print("\n[dim]Mutator watcher stopped.[/]")
|
||||
@@ -9,7 +9,7 @@ Handles:
|
||||
"""
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import subprocess # nosec B404
|
||||
from ipaddress import IPv4Address, IPv4Interface, IPv4Network
|
||||
|
||||
import docker
|
||||
@@ -24,7 +24,7 @@ HOST_IPVLAN_IFACE = "decnet_ipvlan0"
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _run(cmd: list[str], check: bool = True) -> subprocess.CompletedProcess:
|
||||
return subprocess.run(cmd, capture_output=True, text=True, check=check)
|
||||
return subprocess.run(cmd, capture_output=True, text=True, check=check) # nosec B603 B404
|
||||
|
||||
|
||||
def detect_interface() -> str:
|
||||
|
||||
@@ -5,17 +5,31 @@ Maps an nmap OS family slug to a dict of Linux kernel sysctls that, when applied
|
||||
to a container's network namespace, make its TCP/IP stack behaviour resemble the
|
||||
claimed OS as closely as possible within the Linux kernel's constraints.
|
||||
|
||||
All sysctls listed here are network-namespace-scoped and safe to set per-container
|
||||
without --privileged (beyond the NET_ADMIN capability already granted).
|
||||
|
||||
Primary discriminator leveraged by nmap: net.ipv4.ip_default_ttl (TTL)
|
||||
Linux → 64
|
||||
Windows → 128
|
||||
BSD (FreeBSD/macOS)→ 64 (different TCP options, but same TTL as Linux)
|
||||
Embedded / network → 255
|
||||
|
||||
Secondary tuning (TCP behaviour):
|
||||
Secondary discriminators (nmap OPS / WIN / ECN / T2–T6 probe groups):
|
||||
net.ipv4.tcp_syn_retries – SYN retransmits before giving up
|
||||
net.ipv4.tcp_timestamps – TCP timestamp option (OPS probes); Windows = off
|
||||
net.ipv4.tcp_window_scaling – Window scale option; embedded/Cisco typically off
|
||||
net.ipv4.tcp_sack – Selective ACK option; absent on most embedded stacks
|
||||
net.ipv4.tcp_ecn – ECN negotiation; Linux offers (2), Windows off (0)
|
||||
net.ipv4.ip_no_pmtu_disc – DF bit in ICMP replies (IE probes); embedded on
|
||||
net.ipv4.tcp_fin_timeout – FIN_WAIT_2 seconds (T2–T6 timing); Windows shorter
|
||||
|
||||
ICMP tuning (nmap IE / U1 probe groups):
|
||||
net.ipv4.icmp_ratelimit – Min ms between ICMP error replies; Windows = 0 (none)
|
||||
net.ipv4.icmp_ratemask – Bitmask of ICMP types subject to rate limiting
|
||||
|
||||
Note: net.core.rmem_default is a global (non-namespaced) sysctl and cannot be
|
||||
set per-container without --privileged; it is intentionally excluded.
|
||||
set per-container without --privileged; TCP window size is already correct for
|
||||
Windows (64240) from the kernel's default tcp_rmem settings.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
@@ -24,27 +38,69 @@ OS_SYSCTLS: dict[str, dict[str, str]] = {
|
||||
"linux": {
|
||||
"net.ipv4.ip_default_ttl": "64",
|
||||
"net.ipv4.tcp_syn_retries": "6",
|
||||
"net.ipv4.tcp_timestamps": "1",
|
||||
"net.ipv4.tcp_window_scaling": "1",
|
||||
"net.ipv4.tcp_sack": "1",
|
||||
"net.ipv4.tcp_ecn": "2",
|
||||
"net.ipv4.ip_no_pmtu_disc": "0",
|
||||
"net.ipv4.tcp_fin_timeout": "60",
|
||||
"net.ipv4.icmp_ratelimit": "1000",
|
||||
"net.ipv4.icmp_ratemask": "6168",
|
||||
},
|
||||
"windows": {
|
||||
"net.ipv4.ip_default_ttl": "128",
|
||||
"net.ipv4.tcp_syn_retries": "2",
|
||||
"net.ipv4.tcp_timestamps": "0",
|
||||
"net.ipv4.tcp_window_scaling": "1",
|
||||
"net.ipv4.tcp_sack": "1",
|
||||
"net.ipv4.tcp_ecn": "0",
|
||||
"net.ipv4.ip_no_pmtu_disc": "0",
|
||||
"net.ipv4.tcp_fin_timeout": "30",
|
||||
"net.ipv4.icmp_ratelimit": "0",
|
||||
"net.ipv4.icmp_ratemask": "0",
|
||||
},
|
||||
"bsd": {
|
||||
"net.ipv4.ip_default_ttl": "64",
|
||||
"net.ipv4.tcp_syn_retries": "6",
|
||||
"net.ipv4.tcp_timestamps": "1",
|
||||
"net.ipv4.tcp_window_scaling": "1",
|
||||
"net.ipv4.tcp_sack": "1",
|
||||
"net.ipv4.tcp_ecn": "0",
|
||||
"net.ipv4.ip_no_pmtu_disc": "0",
|
||||
"net.ipv4.tcp_fin_timeout": "60",
|
||||
"net.ipv4.icmp_ratelimit": "250",
|
||||
"net.ipv4.icmp_ratemask": "6168",
|
||||
},
|
||||
"embedded": {
|
||||
"net.ipv4.ip_default_ttl": "255",
|
||||
"net.ipv4.tcp_syn_retries": "3",
|
||||
"net.ipv4.tcp_timestamps": "0",
|
||||
"net.ipv4.tcp_window_scaling": "0",
|
||||
"net.ipv4.tcp_sack": "0",
|
||||
"net.ipv4.tcp_ecn": "0",
|
||||
"net.ipv4.ip_no_pmtu_disc": "1",
|
||||
"net.ipv4.tcp_fin_timeout": "15",
|
||||
"net.ipv4.icmp_ratelimit": "0",
|
||||
"net.ipv4.icmp_ratemask": "0",
|
||||
},
|
||||
"cisco": {
|
||||
"net.ipv4.ip_default_ttl": "255",
|
||||
"net.ipv4.tcp_syn_retries": "2",
|
||||
"net.ipv4.tcp_timestamps": "0",
|
||||
"net.ipv4.tcp_window_scaling": "0",
|
||||
"net.ipv4.tcp_sack": "0",
|
||||
"net.ipv4.tcp_ecn": "0",
|
||||
"net.ipv4.ip_no_pmtu_disc": "1",
|
||||
"net.ipv4.tcp_fin_timeout": "15",
|
||||
"net.ipv4.icmp_ratelimit": "0",
|
||||
"net.ipv4.icmp_ratemask": "0",
|
||||
},
|
||||
}
|
||||
|
||||
_DEFAULT_OS = "linux"
|
||||
|
||||
_REQUIRED_SYSCTLS: frozenset[str] = frozenset(OS_SYSCTLS["linux"].keys())
|
||||
|
||||
|
||||
def get_os_sysctls(nmap_os: str) -> dict[str, str]:
|
||||
"""Return the sysctl dict for *nmap_os*. Falls back to Linux on unknown slugs."""
|
||||
@@ -54,3 +110,4 @@ def get_os_sysctls(nmap_os: str) -> dict[str, str]:
|
||||
def all_os_families() -> list[str]:
|
||||
"""Return all registered nmap OS family slugs."""
|
||||
return list(OS_SYSCTLS.keys())
|
||||
|
||||
|
||||
13
decnet/prober/__init__.py
Normal file
13
decnet/prober/__init__.py
Normal file
@@ -0,0 +1,13 @@
|
||||
"""
|
||||
DECNET-PROBER — standalone active network probing service.
|
||||
|
||||
Runs as a detached host-level process (no container). Sends crafted TLS
|
||||
probes to discover C2 frameworks and other attacker infrastructure via
|
||||
JARM fingerprinting. Results are written as RFC 5424 syslog + JSON to the
|
||||
same log file the collector uses, so the existing ingestion pipeline picks
|
||||
them up automatically.
|
||||
"""
|
||||
|
||||
from decnet.prober.worker import prober_worker
|
||||
|
||||
__all__ = ["prober_worker"]
|
||||
252
decnet/prober/hassh.py
Normal file
252
decnet/prober/hassh.py
Normal file
@@ -0,0 +1,252 @@
|
||||
"""
|
||||
HASSHServer — SSH server fingerprinting via KEX_INIT algorithm ordering.
|
||||
|
||||
Connects to an SSH server, completes the version exchange, captures the
|
||||
server's SSH_MSG_KEXINIT message, and hashes the server-to-client algorithm
|
||||
fields (kex, encryption, MAC, compression) into a 32-character MD5 digest.
|
||||
|
||||
This is the *server* variant of HASSH (HASSHServer). It fingerprints what
|
||||
the server *offers*, which identifies the SSH implementation (OpenSSH,
|
||||
Paramiko, libssh, Cobalt Strike SSH, etc.).
|
||||
|
||||
Stdlib only (socket, struct, hashlib) plus decnet.telemetry for tracing (zero-cost when disabled).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import socket
|
||||
import struct
|
||||
from typing import Any
|
||||
|
||||
from decnet.telemetry import traced as _traced
|
||||
|
||||
# SSH protocol constants
|
||||
_SSH_MSG_KEXINIT = 20
|
||||
_KEX_INIT_COOKIE_LEN = 16
|
||||
_KEX_INIT_NAME_LISTS = 10 # 10 name-list fields in KEX_INIT
|
||||
|
||||
# Blend in as a normal OpenSSH client
|
||||
_CLIENT_BANNER = b"SSH-2.0-OpenSSH_9.6\r\n"
|
||||
|
||||
# Max bytes to read for server banner
|
||||
_MAX_BANNER_LEN = 256
|
||||
|
||||
# Max bytes for a single SSH packet (KEX_INIT is typically < 2KB)
|
||||
_MAX_PACKET_LEN = 35000
|
||||
|
||||
|
||||
# ─── SSH connection + KEX_INIT capture ──────────────────────────────────────
|
||||
|
||||
@_traced("prober.hassh_ssh_connect")
|
||||
def _ssh_connect(
|
||||
host: str,
|
||||
port: int,
|
||||
timeout: float,
|
||||
) -> tuple[str, bytes] | None:
|
||||
"""
|
||||
TCP connect, exchange version strings, read server's KEX_INIT.
|
||||
|
||||
Returns (server_banner, kex_init_payload) or None on failure.
|
||||
The kex_init_payload starts at the SSH_MSG_KEXINIT type byte.
|
||||
"""
|
||||
sock = None
|
||||
try:
|
||||
sock = socket.create_connection((host, port), timeout=timeout)
|
||||
sock.settimeout(timeout)
|
||||
|
||||
# 1. Read server banner (line ending \r\n or \n)
|
||||
banner = _read_banner(sock)
|
||||
if banner is None or not banner.startswith("SSH-"):
|
||||
return None
|
||||
|
||||
# 2. Send our client version string
|
||||
sock.sendall(_CLIENT_BANNER)
|
||||
|
||||
# 3. Read the server's first binary packet (should be KEX_INIT)
|
||||
payload = _read_ssh_packet(sock)
|
||||
if payload is None or len(payload) < 1:
|
||||
return None
|
||||
|
||||
if payload[0] != _SSH_MSG_KEXINIT:
|
||||
return None
|
||||
|
||||
return (banner, payload)
|
||||
|
||||
except (OSError, socket.timeout, TimeoutError, ConnectionError):
|
||||
return None
|
||||
finally:
|
||||
if sock is not None:
|
||||
try:
|
||||
sock.close()
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
||||
def _read_banner(sock: socket.socket) -> str | None:
|
||||
"""Read the SSH version banner line from the socket."""
|
||||
buf = b""
|
||||
while len(buf) < _MAX_BANNER_LEN:
|
||||
try:
|
||||
byte = sock.recv(1)
|
||||
except (OSError, socket.timeout, TimeoutError):
|
||||
return None
|
||||
if not byte:
|
||||
return None
|
||||
buf += byte
|
||||
if buf.endswith(b"\n"):
|
||||
break
|
||||
|
||||
try:
|
||||
return buf.decode("utf-8", errors="replace").rstrip("\r\n")
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def _read_ssh_packet(sock: socket.socket) -> bytes | None:
|
||||
"""
|
||||
Read a single SSH binary packet and return its payload.
|
||||
|
||||
SSH binary packet format:
|
||||
uint32 packet_length (not including itself or MAC)
|
||||
byte padding_length
|
||||
byte[] payload (packet_length - padding_length - 1)
|
||||
byte[] padding
|
||||
"""
|
||||
header = _recv_exact(sock, 4)
|
||||
if header is None:
|
||||
return None
|
||||
|
||||
packet_length = struct.unpack("!I", header)[0]
|
||||
if packet_length < 2 or packet_length > _MAX_PACKET_LEN:
|
||||
return None
|
||||
|
||||
rest = _recv_exact(sock, packet_length)
|
||||
if rest is None:
|
||||
return None
|
||||
|
||||
padding_length = rest[0]
|
||||
payload_length = packet_length - padding_length - 1
|
||||
if payload_length < 1 or payload_length > len(rest) - 1:
|
||||
return None
|
||||
|
||||
return rest[1 : 1 + payload_length]
|
||||
|
||||
|
||||
def _recv_exact(sock: socket.socket, n: int) -> bytes | None:
|
||||
"""Read exactly n bytes from socket, or None on failure."""
|
||||
buf = b""
|
||||
while len(buf) < n:
|
||||
try:
|
||||
chunk = sock.recv(n - len(buf))
|
||||
except (OSError, socket.timeout, TimeoutError):
|
||||
return None
|
||||
if not chunk:
|
||||
return None
|
||||
buf += chunk
|
||||
return buf
|
||||
|
||||
|
||||
# ─── KEX_INIT parsing ──────────────────────────────────────────────────────
|
||||
|
||||
def _parse_kex_init(payload: bytes) -> dict[str, str] | None:
|
||||
"""
|
||||
Parse SSH_MSG_KEXINIT payload and extract the 10 name-list fields.
|
||||
|
||||
Payload layout:
|
||||
byte SSH_MSG_KEXINIT (20)
|
||||
byte[16] cookie
|
||||
10 × name-list:
|
||||
uint32 length
|
||||
byte[] utf-8 string (comma-separated algorithm names)
|
||||
bool first_kex_packet_follows
|
||||
uint32 reserved
|
||||
|
||||
Returns dict with keys: kex_algorithms, server_host_key_algorithms,
|
||||
encryption_client_to_server, encryption_server_to_client,
|
||||
mac_client_to_server, mac_server_to_client,
|
||||
compression_client_to_server, compression_server_to_client,
|
||||
languages_client_to_server, languages_server_to_client.
|
||||
"""
|
||||
if len(payload) < 1 + _KEX_INIT_COOKIE_LEN + 4:
|
||||
return None
|
||||
|
||||
offset = 1 + _KEX_INIT_COOKIE_LEN # skip type byte + cookie
|
||||
|
||||
field_names = [
|
||||
"kex_algorithms",
|
||||
"server_host_key_algorithms",
|
||||
"encryption_client_to_server",
|
||||
"encryption_server_to_client",
|
||||
"mac_client_to_server",
|
||||
"mac_server_to_client",
|
||||
"compression_client_to_server",
|
||||
"compression_server_to_client",
|
||||
"languages_client_to_server",
|
||||
"languages_server_to_client",
|
||||
]
|
||||
|
||||
fields: dict[str, str] = {}
|
||||
for name in field_names:
|
||||
if offset + 4 > len(payload):
|
||||
return None
|
||||
length = struct.unpack("!I", payload[offset : offset + 4])[0]
|
||||
offset += 4
|
||||
if offset + length > len(payload):
|
||||
return None
|
||||
fields[name] = payload[offset : offset + length].decode(
|
||||
"utf-8", errors="replace"
|
||||
)
|
||||
offset += length
|
||||
|
||||
return fields
|
||||
|
||||
|
||||
# ─── HASSH computation ──────────────────────────────────────────────────────
|
||||
|
||||
def _compute_hassh(kex: str, enc: str, mac: str, comp: str) -> str:
|
||||
"""
|
||||
Compute HASSHServer hash: MD5 of "kex;enc_s2c;mac_s2c;comp_s2c".
|
||||
|
||||
Returns 32-character lowercase hex digest.
|
||||
"""
|
||||
raw = f"{kex};{enc};{mac};{comp}"
|
||||
return hashlib.md5(raw.encode("utf-8"), usedforsecurity=False).hexdigest()
|
||||
|
||||
|
||||
# ─── Public API ─────────────────────────────────────────────────────────────
|
||||
|
||||
@_traced("prober.hassh_server")
|
||||
def hassh_server(
|
||||
host: str,
|
||||
port: int,
|
||||
timeout: float = 5.0,
|
||||
) -> dict[str, Any] | None:
|
||||
"""
|
||||
Connect to an SSH server and compute its HASSHServer fingerprint.
|
||||
|
||||
Returns a dict with the hash, banner, and raw algorithm fields,
|
||||
or None if the host is not running an SSH server on the given port.
|
||||
"""
|
||||
result = _ssh_connect(host, port, timeout)
|
||||
if result is None:
|
||||
return None
|
||||
|
||||
banner, payload = result
|
||||
fields = _parse_kex_init(payload)
|
||||
if fields is None:
|
||||
return None
|
||||
|
||||
kex = fields["kex_algorithms"]
|
||||
enc = fields["encryption_server_to_client"]
|
||||
mac = fields["mac_server_to_client"]
|
||||
comp = fields["compression_server_to_client"]
|
||||
|
||||
return {
|
||||
"hassh_server": _compute_hassh(kex, enc, mac, comp),
|
||||
"banner": banner,
|
||||
"kex_algorithms": kex,
|
||||
"encryption_s2c": enc,
|
||||
"mac_s2c": mac,
|
||||
"compression_s2c": comp,
|
||||
}
|
||||
506
decnet/prober/jarm.py
Normal file
506
decnet/prober/jarm.py
Normal file
@@ -0,0 +1,506 @@
|
||||
"""
|
||||
JARM TLS fingerprinting — pure stdlib implementation.
|
||||
|
||||
JARM sends 10 crafted TLS ClientHello packets to a target, each varying
|
||||
TLS version, cipher suite order, extensions, and ALPN values. The
|
||||
ServerHello responses are parsed and hashed to produce a 62-character
|
||||
fingerprint that identifies the TLS server implementation.
|
||||
|
||||
Reference: https://github.com/salesforce/jarm
|
||||
|
||||
Only DECNET import is decnet.telemetry for tracing (zero-cost when disabled).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import socket
|
||||
import struct
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
from decnet.telemetry import traced as _traced
|
||||
|
||||
# ─── Constants ────────────────────────────────────────────────────────────────
|
||||
|
||||
JARM_EMPTY_HASH = "0" * 62
|
||||
|
||||
_INTER_PROBE_DELAY = 0.1 # seconds between probes to avoid IDS triggers
|
||||
|
||||
# TLS version bytes
|
||||
_TLS_1_0 = b"\x03\x01"
|
||||
_TLS_1_1 = b"\x03\x02"
|
||||
_TLS_1_2 = b"\x03\x03"
|
||||
_TLS_1_3 = b"\x03\x03" # TLS 1.3 uses 0x0303 in record layer
|
||||
|
||||
# TLS record types
|
||||
_CONTENT_HANDSHAKE = 0x16
|
||||
_HANDSHAKE_CLIENT_HELLO = 0x01
|
||||
_HANDSHAKE_SERVER_HELLO = 0x02
|
||||
|
||||
# Extension types
|
||||
_EXT_SERVER_NAME = 0x0000
|
||||
_EXT_EC_POINT_FORMATS = 0x000B
|
||||
_EXT_SUPPORTED_GROUPS = 0x000A
|
||||
_EXT_SESSION_TICKET = 0x0023
|
||||
_EXT_ENCRYPT_THEN_MAC = 0x0016
|
||||
_EXT_EXTENDED_MASTER_SECRET = 0x0017
|
||||
_EXT_SIGNATURE_ALGORITHMS = 0x000D
|
||||
_EXT_SUPPORTED_VERSIONS = 0x002B
|
||||
_EXT_PSK_KEY_EXCHANGE_MODES = 0x002D
|
||||
_EXT_KEY_SHARE = 0x0033
|
||||
_EXT_ALPN = 0x0010
|
||||
_EXT_PADDING = 0x0015
|
||||
|
||||
# ─── Cipher suite lists per JARM spec ────────────────────────────────────────
|
||||
|
||||
# Forward cipher order (standard)
|
||||
_CIPHERS_FORWARD = [
|
||||
0x0016, 0x0033, 0x0067, 0xC09E, 0xC0A2, 0x009E, 0x0039, 0x006B,
|
||||
0xC09F, 0xC0A3, 0x009F, 0x0045, 0x00BE, 0x0088, 0x00C4, 0x009A,
|
||||
0xC008, 0xC009, 0xC023, 0xC0AC, 0xC0AE, 0xC02B, 0xC00A, 0xC024,
|
||||
0xC0AD, 0xC0AF, 0xC02C, 0xC072, 0xC073, 0xCCA8, 0x1301, 0x1302,
|
||||
0x1303, 0xC013, 0xC014, 0xC02F, 0x009C, 0xC02E, 0x002F, 0x0035,
|
||||
0x000A, 0x0005, 0x0004,
|
||||
]
|
||||
|
||||
# Reverse cipher order
|
||||
_CIPHERS_REVERSE = list(reversed(_CIPHERS_FORWARD))
|
||||
|
||||
# TLS 1.3-only ciphers
|
||||
_CIPHERS_TLS13 = [0x1301, 0x1302, 0x1303]
|
||||
|
||||
# Middle-out cipher order (interleaved from center)
|
||||
def _middle_out(lst: list[int]) -> list[int]:
|
||||
result: list[int] = []
|
||||
mid = len(lst) // 2
|
||||
for i in range(mid + 1):
|
||||
if mid + i < len(lst):
|
||||
result.append(lst[mid + i])
|
||||
if mid - i >= 0 and mid - i != mid + i:
|
||||
result.append(lst[mid - i])
|
||||
return result
|
||||
|
||||
_CIPHERS_MIDDLE_OUT = _middle_out(_CIPHERS_FORWARD)
|
||||
|
||||
# Rare/uncommon extensions cipher list
|
||||
_CIPHERS_RARE = [
|
||||
0x0016, 0x0033, 0xC011, 0xC012, 0x0067, 0xC09E, 0xC0A2, 0x009E,
|
||||
0x0039, 0x006B, 0xC09F, 0xC0A3, 0x009F, 0x0045, 0x00BE, 0x0088,
|
||||
0x00C4, 0x009A, 0xC008, 0xC009, 0xC023, 0xC0AC, 0xC0AE, 0xC02B,
|
||||
0xC00A, 0xC024, 0xC0AD, 0xC0AF, 0xC02C, 0xC072, 0xC073, 0xCCA8,
|
||||
0x1301, 0x1302, 0x1303, 0xC013, 0xC014, 0xC02F, 0x009C, 0xC02E,
|
||||
0x002F, 0x0035, 0x000A, 0x0005, 0x0004,
|
||||
]
|
||||
|
||||
|
||||
# ─── Probe definitions ────────────────────────────────────────────────────────
|
||||
|
||||
# Each probe: (tls_version, cipher_list, tls13_support, alpn, extensions_style)
|
||||
# tls_version: record-layer version bytes
|
||||
# cipher_list: which cipher suite ordering to use
|
||||
# tls13_support: whether to include TLS 1.3 extensions (supported_versions, key_share, psk)
|
||||
# alpn: ALPN protocol string or None
|
||||
# extensions_style: "standard", "rare", or "no_extensions"
|
||||
|
||||
_PROBE_CONFIGS: list[dict[str, Any]] = [
|
||||
# 0: TLS 1.2 forward
|
||||
{"version": _TLS_1_2, "ciphers": _CIPHERS_FORWARD, "tls13": False, "alpn": None, "style": "standard"},
|
||||
# 1: TLS 1.2 reverse
|
||||
{"version": _TLS_1_2, "ciphers": _CIPHERS_REVERSE, "tls13": False, "alpn": None, "style": "standard"},
|
||||
# 2: TLS 1.1 forward
|
||||
{"version": _TLS_1_1, "ciphers": _CIPHERS_FORWARD, "tls13": False, "alpn": None, "style": "standard"},
|
||||
# 3: TLS 1.3 forward
|
||||
{"version": _TLS_1_2, "ciphers": _CIPHERS_FORWARD, "tls13": True, "alpn": "h2", "style": "standard"},
|
||||
# 4: TLS 1.3 reverse
|
||||
{"version": _TLS_1_2, "ciphers": _CIPHERS_REVERSE, "tls13": True, "alpn": "h2", "style": "standard"},
|
||||
# 5: TLS 1.3 invalid (advertise 1.3 support but no key_share)
|
||||
{"version": _TLS_1_2, "ciphers": _CIPHERS_FORWARD, "tls13": "no_key_share", "alpn": None, "style": "standard"},
|
||||
# 6: TLS 1.3 middle-out
|
||||
{"version": _TLS_1_2, "ciphers": _CIPHERS_MIDDLE_OUT, "tls13": True, "alpn": None, "style": "standard"},
|
||||
# 7: TLS 1.0 forward
|
||||
{"version": _TLS_1_0, "ciphers": _CIPHERS_FORWARD, "tls13": False, "alpn": None, "style": "standard"},
|
||||
# 8: TLS 1.2 middle-out
|
||||
{"version": _TLS_1_2, "ciphers": _CIPHERS_MIDDLE_OUT, "tls13": False, "alpn": None, "style": "standard"},
|
||||
# 9: TLS 1.2 with rare extensions
|
||||
{"version": _TLS_1_2, "ciphers": _CIPHERS_RARE, "tls13": False, "alpn": "http/1.1", "style": "rare"},
|
||||
]
|
||||
|
||||
|
||||
# ─── Extension builders ──────────────────────────────────────────────────────
|
||||
|
||||
def _ext(ext_type: int, data: bytes) -> bytes:
|
||||
return struct.pack("!HH", ext_type, len(data)) + data
|
||||
|
||||
|
||||
def _ext_sni(host: str) -> bytes:
|
||||
host_bytes = host.encode("ascii")
|
||||
# ServerNameList: length(2) + ServerName: type(1) + length(2) + name
|
||||
sni_data = struct.pack("!HBH", len(host_bytes) + 3, 0, len(host_bytes)) + host_bytes
|
||||
return _ext(_EXT_SERVER_NAME, sni_data)
|
||||
|
||||
|
||||
def _ext_supported_groups() -> bytes:
|
||||
groups = [0x0017, 0x0018, 0x0019, 0x001D, 0x0100, 0x0101] # secp256r1, secp384r1, secp521r1, x25519, ffdhe2048, ffdhe3072
|
||||
data = struct.pack("!H", len(groups) * 2) + b"".join(struct.pack("!H", g) for g in groups)
|
||||
return _ext(_EXT_SUPPORTED_GROUPS, data)
|
||||
|
||||
|
||||
def _ext_ec_point_formats() -> bytes:
|
||||
formats = b"\x00" # uncompressed only
|
||||
return _ext(_EXT_EC_POINT_FORMATS, struct.pack("B", len(formats)) + formats)
|
||||
|
||||
|
||||
def _ext_signature_algorithms() -> bytes:
|
||||
algos = [
|
||||
0x0401, 0x0501, 0x0601, # RSA PKCS1 SHA256/384/512
|
||||
0x0201, # RSA PKCS1 SHA1
|
||||
0x0403, 0x0503, 0x0603, # ECDSA SHA256/384/512
|
||||
0x0203, # ECDSA SHA1
|
||||
0x0804, 0x0805, 0x0806, # RSA-PSS SHA256/384/512
|
||||
]
|
||||
data = struct.pack("!H", len(algos) * 2) + b"".join(struct.pack("!H", a) for a in algos)
|
||||
return _ext(_EXT_SIGNATURE_ALGORITHMS, data)
|
||||
|
||||
|
||||
def _ext_supported_versions_13() -> bytes:
|
||||
versions = [0x0304, 0x0303] # TLS 1.3, 1.2
|
||||
data = struct.pack("B", len(versions) * 2) + b"".join(struct.pack("!H", v) for v in versions)
|
||||
return _ext(_EXT_SUPPORTED_VERSIONS, data)
|
||||
|
||||
|
||||
def _ext_psk_key_exchange_modes() -> bytes:
|
||||
return _ext(_EXT_PSK_KEY_EXCHANGE_MODES, b"\x01\x01") # psk_dhe_ke
|
||||
|
||||
|
||||
def _ext_key_share() -> bytes:
|
||||
# x25519 key share with 32 random-looking bytes
|
||||
key_data = b"\x00" * 32
|
||||
entry = struct.pack("!HH", 0x001D, 32) + key_data # x25519 group
|
||||
data = struct.pack("!H", len(entry)) + entry
|
||||
return _ext(_EXT_KEY_SHARE, data)
|
||||
|
||||
|
||||
def _ext_alpn(protocol: str) -> bytes:
|
||||
proto_bytes = protocol.encode("ascii")
|
||||
proto_entry = struct.pack("B", len(proto_bytes)) + proto_bytes
|
||||
data = struct.pack("!H", len(proto_entry)) + proto_entry
|
||||
return _ext(_EXT_ALPN, data)
|
||||
|
||||
|
||||
def _ext_session_ticket() -> bytes:
|
||||
return _ext(_EXT_SESSION_TICKET, b"")
|
||||
|
||||
|
||||
def _ext_encrypt_then_mac() -> bytes:
|
||||
return _ext(_EXT_ENCRYPT_THEN_MAC, b"")
|
||||
|
||||
|
||||
def _ext_extended_master_secret() -> bytes:
|
||||
return _ext(_EXT_EXTENDED_MASTER_SECRET, b"")
|
||||
|
||||
|
||||
def _ext_padding(target_length: int, current_length: int) -> bytes:
|
||||
pad_needed = target_length - current_length - 4 # 4 bytes for ext type + length
|
||||
if pad_needed < 0:
|
||||
return b""
|
||||
return _ext(_EXT_PADDING, b"\x00" * pad_needed)
|
||||
|
||||
|
||||
# ─── ClientHello builder ─────────────────────────────────────────────────────
|
||||
|
||||
def _build_client_hello(probe_index: int, host: str = "localhost") -> bytes:
|
||||
"""
|
||||
Construct one of 10 JARM-specified ClientHello packets.
|
||||
|
||||
Args:
|
||||
probe_index: 0-9, selects the probe configuration
|
||||
host: target hostname for SNI extension
|
||||
|
||||
Returns:
|
||||
Complete TLS record bytes ready to send on the wire.
|
||||
"""
|
||||
cfg = _PROBE_CONFIGS[probe_index]
|
||||
version: bytes = cfg["version"]
|
||||
ciphers: list[int] = cfg["ciphers"]
|
||||
tls13 = cfg["tls13"]
|
||||
alpn: str | None = cfg["alpn"]
|
||||
|
||||
# Random (32 bytes)
|
||||
random_bytes = b"\x00" * 32
|
||||
|
||||
# Session ID (32 bytes, all zeros)
|
||||
session_id = b"\x00" * 32
|
||||
|
||||
# Cipher suites
|
||||
cipher_bytes = b"".join(struct.pack("!H", c) for c in ciphers)
|
||||
cipher_data = struct.pack("!H", len(cipher_bytes)) + cipher_bytes
|
||||
|
||||
# Compression methods (null only)
|
||||
compression = b"\x01\x00"
|
||||
|
||||
# Extensions
|
||||
extensions = b""
|
||||
extensions += _ext_sni(host)
|
||||
extensions += _ext_supported_groups()
|
||||
extensions += _ext_ec_point_formats()
|
||||
extensions += _ext_session_ticket()
|
||||
extensions += _ext_encrypt_then_mac()
|
||||
extensions += _ext_extended_master_secret()
|
||||
extensions += _ext_signature_algorithms()
|
||||
|
||||
if tls13 == True: # noqa: E712
|
||||
extensions += _ext_supported_versions_13()
|
||||
extensions += _ext_psk_key_exchange_modes()
|
||||
extensions += _ext_key_share()
|
||||
elif tls13 == "no_key_share":
|
||||
extensions += _ext_supported_versions_13()
|
||||
extensions += _ext_psk_key_exchange_modes()
|
||||
# Intentionally omit key_share
|
||||
|
||||
if alpn:
|
||||
extensions += _ext_alpn(alpn)
|
||||
|
||||
ext_data = struct.pack("!H", len(extensions)) + extensions
|
||||
|
||||
# ClientHello body
|
||||
body = (
|
||||
version # client_version (2)
|
||||
+ random_bytes # random (32)
|
||||
+ struct.pack("B", len(session_id)) + session_id # session_id
|
||||
+ cipher_data # cipher_suites
|
||||
+ compression # compression_methods
|
||||
+ ext_data # extensions
|
||||
)
|
||||
|
||||
# Handshake header: type(1) + length(3)
|
||||
handshake = struct.pack("B", _HANDSHAKE_CLIENT_HELLO) + struct.pack("!I", len(body))[1:] + body
|
||||
|
||||
# TLS record header: type(1) + version(2) + length(2)
|
||||
record = struct.pack("B", _CONTENT_HANDSHAKE) + _TLS_1_0 + struct.pack("!H", len(handshake)) + handshake
|
||||
|
||||
return record
|
||||
|
||||
|
||||
# ─── ServerHello parser ──────────────────────────────────────────────────────
|
||||
|
||||
def _parse_server_hello(data: bytes) -> str:
|
||||
"""
|
||||
Extract cipher suite and TLS version from a ServerHello response.
|
||||
|
||||
Returns a pipe-delimited string "cipher|version|extensions" that forms
|
||||
one component of the JARM hash, or "|||" on parse failure.
|
||||
"""
|
||||
try:
|
||||
if len(data) < 6:
|
||||
return "|||"
|
||||
|
||||
# TLS record header
|
||||
if data[0] != _CONTENT_HANDSHAKE:
|
||||
return "|||"
|
||||
|
||||
struct.unpack_from("!H", data, 1)[0] # record_version (unused)
|
||||
record_len = struct.unpack_from("!H", data, 3)[0]
|
||||
hs = data[5: 5 + record_len]
|
||||
|
||||
if len(hs) < 4:
|
||||
return "|||"
|
||||
|
||||
# Handshake header
|
||||
if hs[0] != _HANDSHAKE_SERVER_HELLO:
|
||||
return "|||"
|
||||
|
||||
hs_len = struct.unpack_from("!I", b"\x00" + hs[1:4])[0]
|
||||
body = hs[4: 4 + hs_len]
|
||||
|
||||
if len(body) < 34:
|
||||
return "|||"
|
||||
|
||||
pos = 0
|
||||
# Server version
|
||||
server_version = struct.unpack_from("!H", body, pos)[0]
|
||||
pos += 2
|
||||
|
||||
# Random (32 bytes)
|
||||
pos += 32
|
||||
|
||||
# Session ID
|
||||
if pos >= len(body):
|
||||
return "|||"
|
||||
sid_len = body[pos]
|
||||
pos += 1 + sid_len
|
||||
|
||||
# Cipher suite
|
||||
if pos + 2 > len(body):
|
||||
return "|||"
|
||||
cipher = struct.unpack_from("!H", body, pos)[0]
|
||||
pos += 2
|
||||
|
||||
# Compression method
|
||||
if pos >= len(body):
|
||||
return "|||"
|
||||
pos += 1
|
||||
|
||||
# Parse extensions for supported_versions (to detect actual TLS 1.3)
|
||||
actual_version = server_version
|
||||
extensions_str = ""
|
||||
if pos + 2 <= len(body):
|
||||
ext_total = struct.unpack_from("!H", body, pos)[0]
|
||||
pos += 2
|
||||
ext_end = pos + ext_total
|
||||
ext_types: list[str] = []
|
||||
while pos + 4 <= ext_end and pos + 4 <= len(body):
|
||||
ext_type = struct.unpack_from("!H", body, pos)[0]
|
||||
ext_len = struct.unpack_from("!H", body, pos + 2)[0]
|
||||
ext_types.append(f"{ext_type:04x}")
|
||||
|
||||
if ext_type == _EXT_SUPPORTED_VERSIONS and ext_len >= 2:
|
||||
actual_version = struct.unpack_from("!H", body, pos + 4)[0]
|
||||
|
||||
pos += 4 + ext_len
|
||||
extensions_str = "-".join(ext_types)
|
||||
|
||||
version_str = _version_to_str(actual_version)
|
||||
cipher_str = f"{cipher:04x}"
|
||||
|
||||
return f"{cipher_str}|{version_str}|{extensions_str}"
|
||||
|
||||
except Exception:
|
||||
return "|||"
|
||||
|
||||
|
||||
def _version_to_str(version: int) -> str:
|
||||
return {
|
||||
0x0304: "tls13",
|
||||
0x0303: "tls12",
|
||||
0x0302: "tls11",
|
||||
0x0301: "tls10",
|
||||
0x0300: "ssl30",
|
||||
}.get(version, f"{version:04x}")
|
||||
|
||||
|
||||
# ─── Probe sender ────────────────────────────────────────────────────────────
|
||||
|
||||
@_traced("prober.jarm_send_probe")
|
||||
def _send_probe(host: str, port: int, hello: bytes, timeout: float = 5.0) -> bytes | None:
|
||||
"""
|
||||
Open a TCP connection, send the ClientHello, and read the ServerHello.
|
||||
|
||||
Returns raw response bytes or None on any failure.
|
||||
"""
|
||||
try:
|
||||
sock = socket.create_connection((host, port), timeout=timeout)
|
||||
try:
|
||||
sock.sendall(hello)
|
||||
sock.settimeout(timeout)
|
||||
response = b""
|
||||
while True:
|
||||
chunk = sock.recv(1484)
|
||||
if not chunk:
|
||||
break
|
||||
response += chunk
|
||||
# We only need the first TLS record (ServerHello)
|
||||
if len(response) >= 5:
|
||||
record_len = struct.unpack_from("!H", response, 3)[0]
|
||||
if len(response) >= 5 + record_len:
|
||||
break
|
||||
return response if response else None
|
||||
finally:
|
||||
sock.close()
|
||||
except (OSError, socket.error, socket.timeout):
|
||||
return None
|
||||
|
||||
|
||||
# ─── JARM hash computation ───────────────────────────────────────────────────
|
||||
|
||||
def _compute_jarm(responses: list[str]) -> str:
|
||||
"""
|
||||
Compute the final 62-character JARM hash from 10 probe response strings.
|
||||
|
||||
The first 30 characters are the raw cipher/version concatenation.
|
||||
The remaining 32 characters are a truncated SHA256 of the extensions.
|
||||
"""
|
||||
if all(r == "|||" for r in responses):
|
||||
return JARM_EMPTY_HASH
|
||||
|
||||
# Build the fuzzy hash
|
||||
raw_parts: list[str] = []
|
||||
ext_parts: list[str] = []
|
||||
|
||||
for r in responses:
|
||||
parts = r.split("|")
|
||||
if len(parts) >= 3 and parts[0] != "":
|
||||
cipher = parts[0]
|
||||
version = parts[1]
|
||||
extensions = parts[2] if len(parts) > 2 else ""
|
||||
|
||||
# Map version to single char
|
||||
ver_char = {
|
||||
"tls13": "d", "tls12": "c", "tls11": "b",
|
||||
"tls10": "a", "ssl30": "0",
|
||||
}.get(version, "0")
|
||||
|
||||
raw_parts.append(f"{cipher}{ver_char}")
|
||||
ext_parts.append(extensions)
|
||||
else:
|
||||
raw_parts.append("000")
|
||||
ext_parts.append("")
|
||||
|
||||
# First 30 chars: cipher(4) + version(1) = 5 chars * 10 probes = 50... no
|
||||
# JARM spec: first part is c|v per probe joined, then SHA256 of extensions
|
||||
# Actual format: each response contributes 3 chars (cipher_first2 + ver_char)
|
||||
# to the first 30, then all extensions hashed for the remaining 32.
|
||||
|
||||
fuzzy_raw = ""
|
||||
for r in responses:
|
||||
parts = r.split("|")
|
||||
if len(parts) >= 3 and parts[0] != "":
|
||||
cipher = parts[0] # 4-char hex
|
||||
version = parts[1]
|
||||
ver_char = {
|
||||
"tls13": "d", "tls12": "c", "tls11": "b",
|
||||
"tls10": "a", "ssl30": "0",
|
||||
}.get(version, "0")
|
||||
fuzzy_raw += f"{cipher[0:2]}{ver_char}"
|
||||
else:
|
||||
fuzzy_raw += "000"
|
||||
|
||||
# fuzzy_raw is 30 chars (3 * 10)
|
||||
ext_str = ",".join(ext_parts)
|
||||
ext_hash = hashlib.sha256(ext_str.encode()).hexdigest()[:32]
|
||||
|
||||
return fuzzy_raw + ext_hash
|
||||
|
||||
|
||||
# ─── Public API ──────────────────────────────────────────────────────────────
|
||||
|
||||
@_traced("prober.jarm_hash")
|
||||
def jarm_hash(host: str, port: int, timeout: float = 5.0) -> str:
|
||||
"""
|
||||
Compute the JARM fingerprint for a TLS server.
|
||||
|
||||
Sends 10 crafted ClientHello packets and hashes the responses.
|
||||
|
||||
Args:
|
||||
host: target IP or hostname
|
||||
port: target port
|
||||
timeout: per-probe TCP timeout in seconds
|
||||
|
||||
Returns:
|
||||
62-character JARM hash string, or all-zeros on total failure.
|
||||
"""
|
||||
responses: list[str] = []
|
||||
|
||||
for i in range(10):
|
||||
hello = _build_client_hello(i, host=host)
|
||||
raw = _send_probe(host, port, hello, timeout=timeout)
|
||||
if raw is not None:
|
||||
parsed = _parse_server_hello(raw)
|
||||
responses.append(parsed)
|
||||
else:
|
||||
responses.append("|||")
|
||||
|
||||
if i < 9:
|
||||
time.sleep(_INTER_PROBE_DELAY)
|
||||
|
||||
return _compute_jarm(responses)
|
||||
227
decnet/prober/tcpfp.py
Normal file
227
decnet/prober/tcpfp.py
Normal file
@@ -0,0 +1,227 @@
|
||||
"""
|
||||
TCP/IP stack fingerprinting via SYN-ACK analysis.
|
||||
|
||||
Sends a crafted TCP SYN packet to a target host:port, captures the
|
||||
SYN-ACK response, and extracts OS/tool-identifying characteristics:
|
||||
TTL, window size, DF bit, MSS, window scale, SACK support, timestamps,
|
||||
and TCP options ordering.
|
||||
|
||||
Uses scapy for packet crafting and parsing. Requires root/CAP_NET_RAW.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import random
|
||||
from typing import Any
|
||||
|
||||
from decnet.telemetry import traced as _traced
|
||||
|
||||
# Lazy-import scapy to avoid breaking non-root usage of HASSH/JARM.
|
||||
# The actual import happens inside functions that need it.
|
||||
|
||||
# ─── TCP option short codes ─────────────────────────────────────────────────
|
||||
|
||||
_OPT_CODES: dict[str, str] = {
|
||||
"MSS": "M",
|
||||
"WScale": "W",
|
||||
"SAckOK": "S",
|
||||
"SAck": "S",
|
||||
"Timestamp": "T",
|
||||
"NOP": "N",
|
||||
"EOL": "E",
|
||||
"AltChkSum": "A",
|
||||
"AltChkSumOpt": "A",
|
||||
"UTO": "U",
|
||||
}
|
||||
|
||||
|
||||
# ─── Packet construction ───────────────────────────────────────────────────
|
||||
|
||||
@_traced("prober.tcpfp_send_syn")
|
||||
def _send_syn(
|
||||
host: str,
|
||||
port: int,
|
||||
timeout: float,
|
||||
) -> Any | None:
|
||||
"""
|
||||
Craft a TCP SYN with common options and send it. Returns the
|
||||
SYN-ACK response packet or None on timeout/failure.
|
||||
"""
|
||||
from scapy.all import IP, TCP, conf, sr1
|
||||
|
||||
# Suppress scapy's noisy output
|
||||
conf.verb = 0
|
||||
|
||||
src_port = random.randint(49152, 65535) # nosec B311 — ephemeral port, not crypto
|
||||
|
||||
pkt = (
|
||||
IP(dst=host)
|
||||
/ TCP(
|
||||
sport=src_port,
|
||||
dport=port,
|
||||
flags="S",
|
||||
options=[
|
||||
("MSS", 1460),
|
||||
("NOP", None),
|
||||
("WScale", 7),
|
||||
("NOP", None),
|
||||
("NOP", None),
|
||||
("Timestamp", (0, 0)),
|
||||
("SAckOK", b""),
|
||||
("EOL", None),
|
||||
],
|
||||
)
|
||||
)
|
||||
|
||||
try:
|
||||
resp = sr1(pkt, timeout=timeout, verbose=0)
|
||||
except (OSError, PermissionError):
|
||||
return None
|
||||
|
||||
if resp is None:
|
||||
return None
|
||||
|
||||
# Verify it's a SYN-ACK (flags == 0x12)
|
||||
from scapy.all import TCP as TCPLayer
|
||||
if not resp.haslayer(TCPLayer):
|
||||
return None
|
||||
if resp[TCPLayer].flags != 0x12: # SYN-ACK
|
||||
return None
|
||||
|
||||
# Send RST to clean up half-open connection
|
||||
_send_rst(host, port, src_port, resp)
|
||||
|
||||
return resp
|
||||
|
||||
|
||||
def _send_rst(
|
||||
host: str,
|
||||
dport: int,
|
||||
sport: int,
|
||||
resp: Any,
|
||||
) -> None:
|
||||
"""Send RST to clean up the half-open connection."""
|
||||
try:
|
||||
from scapy.all import IP, TCP, send
|
||||
rst = (
|
||||
IP(dst=host)
|
||||
/ TCP(
|
||||
sport=sport,
|
||||
dport=dport,
|
||||
flags="R",
|
||||
seq=resp.ack,
|
||||
)
|
||||
)
|
||||
send(rst, verbose=0)
|
||||
except Exception: # nosec B110 — best-effort RST cleanup
|
||||
pass
|
||||
|
||||
|
||||
# ─── Response parsing ───────────────────────────────────────────────────────
|
||||
|
||||
def _parse_synack(resp: Any) -> dict[str, Any]:
|
||||
"""
|
||||
Extract fingerprint fields from a scapy SYN-ACK response packet.
|
||||
"""
|
||||
from scapy.all import IP, TCP
|
||||
|
||||
ip_layer = resp[IP]
|
||||
tcp_layer = resp[TCP]
|
||||
|
||||
# IP fields
|
||||
ttl = ip_layer.ttl
|
||||
df_bit = 1 if (ip_layer.flags & 0x2) else 0 # DF = bit 1
|
||||
ip_id = ip_layer.id
|
||||
|
||||
# TCP fields
|
||||
window_size = tcp_layer.window
|
||||
|
||||
# Parse TCP options
|
||||
mss = 0
|
||||
window_scale = -1
|
||||
sack_ok = 0
|
||||
timestamp = 0
|
||||
options_order = _extract_options_order(tcp_layer.options)
|
||||
|
||||
for opt_name, opt_value in tcp_layer.options:
|
||||
if opt_name == "MSS":
|
||||
mss = opt_value
|
||||
elif opt_name == "WScale":
|
||||
window_scale = opt_value
|
||||
elif opt_name in ("SAckOK", "SAck"):
|
||||
sack_ok = 1
|
||||
elif opt_name == "Timestamp":
|
||||
timestamp = 1
|
||||
|
||||
return {
|
||||
"ttl": ttl,
|
||||
"window_size": window_size,
|
||||
"df_bit": df_bit,
|
||||
"ip_id": ip_id,
|
||||
"mss": mss,
|
||||
"window_scale": window_scale,
|
||||
"sack_ok": sack_ok,
|
||||
"timestamp": timestamp,
|
||||
"options_order": options_order,
|
||||
}
|
||||
|
||||
|
||||
def _extract_options_order(options: list[tuple[str, Any]]) -> str:
|
||||
"""
|
||||
Map scapy TCP option tuples to a short-code string.
|
||||
|
||||
E.g. [("MSS", 1460), ("NOP", None), ("WScale", 7)] → "M,N,W"
|
||||
"""
|
||||
codes = []
|
||||
for opt_name, _ in options:
|
||||
code = _OPT_CODES.get(opt_name, "?")
|
||||
codes.append(code)
|
||||
return ",".join(codes)
|
||||
|
||||
|
||||
# ─── Fingerprint computation ───────────────────────────────────────────────
|
||||
|
||||
def _compute_fingerprint(fields: dict[str, Any]) -> tuple[str, str]:
|
||||
"""
|
||||
Compute fingerprint raw string and SHA256 hash from parsed fields.
|
||||
|
||||
Returns (raw_string, hash_hex_32).
|
||||
"""
|
||||
raw = (
|
||||
f"{fields['ttl']}:{fields['window_size']}:{fields['df_bit']}:"
|
||||
f"{fields['mss']}:{fields['window_scale']}:{fields['sack_ok']}:"
|
||||
f"{fields['timestamp']}:{fields['options_order']}"
|
||||
)
|
||||
h = hashlib.sha256(raw.encode("utf-8")).hexdigest()[:32]
|
||||
return raw, h
|
||||
|
||||
|
||||
# ─── Public API ─────────────────────────────────────────────────────────────
|
||||
|
||||
@_traced("prober.tcp_fingerprint")
|
||||
def tcp_fingerprint(
|
||||
host: str,
|
||||
port: int,
|
||||
timeout: float = 5.0,
|
||||
) -> dict[str, Any] | None:
|
||||
"""
|
||||
Send a TCP SYN to host:port and fingerprint the SYN-ACK response.
|
||||
|
||||
Returns a dict with the hash, raw fingerprint string, and individual
|
||||
fields, or None if no SYN-ACK was received.
|
||||
|
||||
Requires root/CAP_NET_RAW.
|
||||
"""
|
||||
resp = _send_syn(host, port, timeout)
|
||||
if resp is None:
|
||||
return None
|
||||
|
||||
fields = _parse_synack(resp)
|
||||
raw, h = _compute_fingerprint(fields)
|
||||
|
||||
return {
|
||||
"tcpfp_hash": h,
|
||||
"tcpfp_raw": raw,
|
||||
**fields,
|
||||
}
|
||||
478
decnet/prober/worker.py
Normal file
478
decnet/prober/worker.py
Normal file
@@ -0,0 +1,478 @@
|
||||
"""
|
||||
DECNET-PROBER standalone worker.
|
||||
|
||||
Runs as a detached host-level process. Discovers attacker IPs by tailing the
|
||||
collector's JSON log file, then fingerprints them via multiple active probes:
|
||||
- JARM (TLS server fingerprinting)
|
||||
- HASSHServer (SSH server fingerprinting)
|
||||
- TCP/IP stack fingerprinting (OS/tool identification)
|
||||
|
||||
Results are written as RFC 5424 syslog + JSON to the same log files.
|
||||
|
||||
Target discovery is fully automatic — every unique attacker IP seen in the
|
||||
log stream gets probed. No manual target list required.
|
||||
|
||||
Tech debt: writing directly to the collector's log files couples the
|
||||
prober to the collector's file format. A future refactor should introduce
|
||||
a shared log-sink abstraction.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import re
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from decnet.logging import get_logger
|
||||
from decnet.prober.hassh import hassh_server
|
||||
from decnet.prober.jarm import JARM_EMPTY_HASH, jarm_hash
|
||||
from decnet.prober.tcpfp import tcp_fingerprint
|
||||
from decnet.telemetry import traced as _traced
|
||||
|
||||
logger = get_logger("prober")
|
||||
|
||||
# ─── Default ports per probe type ───────────────────────────────────────────
|
||||
|
||||
# JARM: common C2 callback / TLS server ports
|
||||
DEFAULT_PROBE_PORTS: list[int] = [
|
||||
443, 8443, 8080, 4443, 50050, 2222, 993, 995, 8888, 9001,
|
||||
]
|
||||
|
||||
# HASSHServer: common SSH server ports
|
||||
DEFAULT_SSH_PORTS: list[int] = [22, 2222, 22222, 2022]
|
||||
|
||||
# TCP/IP stack: probe on ports commonly open on attacker machines.
|
||||
# Wide spread gives the best chance of a SYN-ACK for TTL/fingerprint extraction.
|
||||
DEFAULT_TCPFP_PORTS: list[int] = [22, 80, 443, 8080, 8443, 445, 3389]
|
||||
|
||||
# ─── RFC 5424 formatting (inline, mirrors templates/*/decnet_logging.py) ─────
|
||||
|
||||
_FACILITY_LOCAL0 = 16
|
||||
_SD_ID = "decnet@55555"
|
||||
_SEVERITY_INFO = 6
|
||||
_SEVERITY_WARNING = 4
|
||||
|
||||
_MAX_HOSTNAME = 255
|
||||
_MAX_APPNAME = 48
|
||||
_MAX_MSGID = 32
|
||||
|
||||
|
||||
def _sd_escape(value: str) -> str:
|
||||
return value.replace("\\", "\\\\").replace('"', '\\"').replace("]", "\\]")
|
||||
|
||||
|
||||
def _sd_element(fields: dict[str, Any]) -> str:
|
||||
if not fields:
|
||||
return "-"
|
||||
params = " ".join(f'{k}="{_sd_escape(str(v))}"' for k, v in fields.items())
|
||||
return f"[{_SD_ID} {params}]"
|
||||
|
||||
|
||||
def _syslog_line(
|
||||
event_type: str,
|
||||
severity: int = _SEVERITY_INFO,
|
||||
msg: str | None = None,
|
||||
**fields: Any,
|
||||
) -> str:
|
||||
pri = f"<{_FACILITY_LOCAL0 * 8 + severity}>"
|
||||
ts = datetime.now(timezone.utc).isoformat()
|
||||
hostname = "decnet-prober"
|
||||
appname = "prober"
|
||||
msgid = (event_type or "-")[:_MAX_MSGID]
|
||||
sd = _sd_element(fields)
|
||||
message = f" {msg}" if msg else ""
|
||||
return f"{pri}1 {ts} {hostname} {appname} - {msgid} {sd}{message}"
|
||||
|
||||
|
||||
# ─── RFC 5424 parser (subset of collector's, for JSON generation) ─────────────
|
||||
|
||||
_RFC5424_RE = re.compile(
|
||||
r"^<\d+>1 "
|
||||
r"(\S+) " # 1: TIMESTAMP
|
||||
r"(\S+) " # 2: HOSTNAME
|
||||
r"(\S+) " # 3: APP-NAME
|
||||
r"- " # PROCID
|
||||
r"(\S+) " # 4: MSGID (event_type)
|
||||
r"(.+)$", # 5: SD + MSG
|
||||
)
|
||||
_SD_BLOCK_RE = re.compile(r'\[decnet@55555\s+(.*?)\]', re.DOTALL)
|
||||
_PARAM_RE = re.compile(r'(\w+)="((?:[^"\\]|\\.)*)"')
|
||||
_IP_FIELDS = ("src_ip", "src", "client_ip", "remote_ip", "ip", "target_ip")
|
||||
|
||||
|
||||
def _parse_to_json(line: str) -> dict[str, Any] | None:
|
||||
m = _RFC5424_RE.match(line)
|
||||
if not m:
|
||||
return None
|
||||
ts_raw, decky, service, event_type, sd_rest = m.groups()
|
||||
|
||||
fields: dict[str, str] = {}
|
||||
msg = ""
|
||||
|
||||
if sd_rest.startswith("["):
|
||||
block = _SD_BLOCK_RE.search(sd_rest)
|
||||
if block:
|
||||
for k, v in _PARAM_RE.findall(block.group(1)):
|
||||
fields[k] = v.replace('\\"', '"').replace("\\\\", "\\").replace("\\]", "]")
|
||||
msg_match = re.search(r'\]\s+(.+)$', sd_rest)
|
||||
if msg_match:
|
||||
msg = msg_match.group(1).strip()
|
||||
|
||||
attacker_ip = "Unknown"
|
||||
for fname in _IP_FIELDS:
|
||||
if fname in fields:
|
||||
attacker_ip = fields[fname]
|
||||
break
|
||||
|
||||
try:
|
||||
ts_formatted = datetime.fromisoformat(ts_raw).strftime("%Y-%m-%d %H:%M:%S")
|
||||
except ValueError:
|
||||
ts_formatted = ts_raw
|
||||
|
||||
return {
|
||||
"timestamp": ts_formatted,
|
||||
"decky": decky,
|
||||
"service": service,
|
||||
"event_type": event_type,
|
||||
"attacker_ip": attacker_ip,
|
||||
"fields": fields,
|
||||
"msg": msg,
|
||||
"raw_line": line,
|
||||
}
|
||||
|
||||
|
||||
# ─── Log writer ──────────────────────────────────────────────────────────────
|
||||
|
||||
def _write_event(
|
||||
log_path: Path,
|
||||
json_path: Path,
|
||||
event_type: str,
|
||||
severity: int = _SEVERITY_INFO,
|
||||
msg: str | None = None,
|
||||
**fields: Any,
|
||||
) -> None:
|
||||
line = _syslog_line(event_type, severity=severity, msg=msg, **fields)
|
||||
|
||||
with open(log_path, "a", encoding="utf-8") as f:
|
||||
f.write(line + "\n")
|
||||
f.flush()
|
||||
|
||||
parsed = _parse_to_json(line)
|
||||
if parsed:
|
||||
with open(json_path, "a", encoding="utf-8") as f:
|
||||
f.write(json.dumps(parsed) + "\n")
|
||||
f.flush()
|
||||
|
||||
|
||||
# ─── Target discovery from log stream ────────────────────────────────────────
|
||||
|
||||
@_traced("prober.discover_attackers")
|
||||
def _discover_attackers(json_path: Path, position: int) -> tuple[set[str], int]:
|
||||
"""
|
||||
Read new JSON log lines from the given position and extract unique
|
||||
attacker IPs. Returns (new_ips, new_position).
|
||||
|
||||
Only considers IPs that are not "Unknown" and come from events that
|
||||
indicate real attacker interaction (not prober's own events).
|
||||
"""
|
||||
new_ips: set[str] = set()
|
||||
|
||||
if not json_path.exists():
|
||||
return new_ips, position
|
||||
|
||||
size = json_path.stat().st_size
|
||||
if size < position:
|
||||
position = 0 # file rotated
|
||||
|
||||
if size == position:
|
||||
return new_ips, position
|
||||
|
||||
with open(json_path, "r", encoding="utf-8", errors="replace") as f:
|
||||
f.seek(position)
|
||||
while True:
|
||||
line = f.readline()
|
||||
if not line:
|
||||
break
|
||||
if not line.endswith("\n"):
|
||||
break # partial line
|
||||
|
||||
try:
|
||||
record = json.loads(line.strip())
|
||||
except json.JSONDecodeError:
|
||||
position = f.tell()
|
||||
continue
|
||||
|
||||
# Skip our own events
|
||||
if record.get("service") == "prober":
|
||||
position = f.tell()
|
||||
continue
|
||||
|
||||
ip = record.get("attacker_ip", "Unknown")
|
||||
if ip != "Unknown" and ip:
|
||||
new_ips.add(ip)
|
||||
|
||||
position = f.tell()
|
||||
|
||||
return new_ips, position
|
||||
|
||||
|
||||
# ─── Probe cycle ─────────────────────────────────────────────────────────────
|
||||
|
||||
@_traced("prober.probe_cycle")
|
||||
def _probe_cycle(
|
||||
targets: set[str],
|
||||
probed: dict[str, dict[str, set[int]]],
|
||||
jarm_ports: list[int],
|
||||
ssh_ports: list[int],
|
||||
tcpfp_ports: list[int],
|
||||
log_path: Path,
|
||||
json_path: Path,
|
||||
timeout: float = 5.0,
|
||||
) -> None:
|
||||
"""
|
||||
Probe all known attacker IPs with JARM, HASSH, and TCP/IP fingerprinting.
|
||||
|
||||
Args:
|
||||
targets: set of attacker IPs to probe
|
||||
probed: dict mapping IP -> {probe_type -> set of ports already probed}
|
||||
jarm_ports: TLS ports for JARM fingerprinting
|
||||
ssh_ports: SSH ports for HASSHServer fingerprinting
|
||||
tcpfp_ports: ports for TCP/IP stack fingerprinting
|
||||
log_path: RFC 5424 log file
|
||||
json_path: JSON log file
|
||||
timeout: per-probe TCP timeout
|
||||
"""
|
||||
for ip in sorted(targets):
|
||||
ip_probed = probed.setdefault(ip, {})
|
||||
|
||||
# Phase 1: JARM (TLS fingerprinting)
|
||||
_jarm_phase(ip, ip_probed, jarm_ports, log_path, json_path, timeout)
|
||||
|
||||
# Phase 2: HASSHServer (SSH fingerprinting)
|
||||
_hassh_phase(ip, ip_probed, ssh_ports, log_path, json_path, timeout)
|
||||
|
||||
# Phase 3: TCP/IP stack fingerprinting
|
||||
_tcpfp_phase(ip, ip_probed, tcpfp_ports, log_path, json_path, timeout)
|
||||
|
||||
|
||||
@_traced("prober.jarm_phase")
|
||||
def _jarm_phase(
|
||||
ip: str,
|
||||
ip_probed: dict[str, set[int]],
|
||||
ports: list[int],
|
||||
log_path: Path,
|
||||
json_path: Path,
|
||||
timeout: float,
|
||||
) -> None:
|
||||
"""JARM-fingerprint an IP on the given TLS ports."""
|
||||
done = ip_probed.setdefault("jarm", set())
|
||||
for port in ports:
|
||||
if port in done:
|
||||
continue
|
||||
try:
|
||||
h = jarm_hash(ip, port, timeout=timeout)
|
||||
done.add(port)
|
||||
if h == JARM_EMPTY_HASH:
|
||||
continue
|
||||
_write_event(
|
||||
log_path, json_path,
|
||||
"jarm_fingerprint",
|
||||
target_ip=ip,
|
||||
target_port=str(port),
|
||||
jarm_hash=h,
|
||||
msg=f"JARM {ip}:{port} = {h}",
|
||||
)
|
||||
logger.info("prober: JARM %s:%d = %s", ip, port, h)
|
||||
except Exception as exc:
|
||||
done.add(port)
|
||||
_write_event(
|
||||
log_path, json_path,
|
||||
"prober_error",
|
||||
severity=_SEVERITY_WARNING,
|
||||
target_ip=ip,
|
||||
target_port=str(port),
|
||||
error=str(exc),
|
||||
msg=f"JARM probe failed for {ip}:{port}: {exc}",
|
||||
)
|
||||
logger.warning("prober: JARM probe failed %s:%d: %s", ip, port, exc)
|
||||
|
||||
|
||||
@_traced("prober.hassh_phase")
|
||||
def _hassh_phase(
|
||||
ip: str,
|
||||
ip_probed: dict[str, set[int]],
|
||||
ports: list[int],
|
||||
log_path: Path,
|
||||
json_path: Path,
|
||||
timeout: float,
|
||||
) -> None:
|
||||
"""HASSHServer-fingerprint an IP on the given SSH ports."""
|
||||
done = ip_probed.setdefault("hassh", set())
|
||||
for port in ports:
|
||||
if port in done:
|
||||
continue
|
||||
try:
|
||||
result = hassh_server(ip, port, timeout=timeout)
|
||||
done.add(port)
|
||||
if result is None:
|
||||
continue
|
||||
_write_event(
|
||||
log_path, json_path,
|
||||
"hassh_fingerprint",
|
||||
target_ip=ip,
|
||||
target_port=str(port),
|
||||
hassh_server_hash=result["hassh_server"],
|
||||
ssh_banner=result["banner"],
|
||||
kex_algorithms=result["kex_algorithms"],
|
||||
encryption_s2c=result["encryption_s2c"],
|
||||
mac_s2c=result["mac_s2c"],
|
||||
compression_s2c=result["compression_s2c"],
|
||||
msg=f"HASSH {ip}:{port} = {result['hassh_server']}",
|
||||
)
|
||||
logger.info("prober: HASSH %s:%d = %s", ip, port, result["hassh_server"])
|
||||
except Exception as exc:
|
||||
done.add(port)
|
||||
_write_event(
|
||||
log_path, json_path,
|
||||
"prober_error",
|
||||
severity=_SEVERITY_WARNING,
|
||||
target_ip=ip,
|
||||
target_port=str(port),
|
||||
error=str(exc),
|
||||
msg=f"HASSH probe failed for {ip}:{port}: {exc}",
|
||||
)
|
||||
logger.warning("prober: HASSH probe failed %s:%d: %s", ip, port, exc)
|
||||
|
||||
|
||||
@_traced("prober.tcpfp_phase")
|
||||
def _tcpfp_phase(
|
||||
ip: str,
|
||||
ip_probed: dict[str, set[int]],
|
||||
ports: list[int],
|
||||
log_path: Path,
|
||||
json_path: Path,
|
||||
timeout: float,
|
||||
) -> None:
|
||||
"""TCP/IP stack fingerprint an IP on the given ports."""
|
||||
done = ip_probed.setdefault("tcpfp", set())
|
||||
for port in ports:
|
||||
if port in done:
|
||||
continue
|
||||
try:
|
||||
result = tcp_fingerprint(ip, port, timeout=timeout)
|
||||
done.add(port)
|
||||
if result is None:
|
||||
continue
|
||||
_write_event(
|
||||
log_path, json_path,
|
||||
"tcpfp_fingerprint",
|
||||
target_ip=ip,
|
||||
target_port=str(port),
|
||||
tcpfp_hash=result["tcpfp_hash"],
|
||||
tcpfp_raw=result["tcpfp_raw"],
|
||||
ttl=str(result["ttl"]),
|
||||
window_size=str(result["window_size"]),
|
||||
df_bit=str(result["df_bit"]),
|
||||
mss=str(result["mss"]),
|
||||
window_scale=str(result["window_scale"]),
|
||||
sack_ok=str(result["sack_ok"]),
|
||||
timestamp=str(result["timestamp"]),
|
||||
options_order=result["options_order"],
|
||||
msg=f"TCPFP {ip}:{port} = {result['tcpfp_hash']}",
|
||||
)
|
||||
logger.info("prober: TCPFP %s:%d = %s", ip, port, result["tcpfp_hash"])
|
||||
except Exception as exc:
|
||||
done.add(port)
|
||||
_write_event(
|
||||
log_path, json_path,
|
||||
"prober_error",
|
||||
severity=_SEVERITY_WARNING,
|
||||
target_ip=ip,
|
||||
target_port=str(port),
|
||||
error=str(exc),
|
||||
msg=f"TCPFP probe failed for {ip}:{port}: {exc}",
|
||||
)
|
||||
logger.warning("prober: TCPFP probe failed %s:%d: %s", ip, port, exc)
|
||||
|
||||
|
||||
# ─── Main worker ─────────────────────────────────────────────────────────────
|
||||
|
||||
@_traced("prober.worker")
|
||||
async def prober_worker(
|
||||
log_file: str,
|
||||
interval: int = 300,
|
||||
timeout: float = 5.0,
|
||||
ports: list[int] | None = None,
|
||||
ssh_ports: list[int] | None = None,
|
||||
tcpfp_ports: list[int] | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
Main entry point for the standalone prober process.
|
||||
|
||||
Discovers attacker IPs automatically by tailing the JSON log file,
|
||||
then fingerprints each IP via JARM, HASSH, and TCP/IP stack probes.
|
||||
|
||||
Args:
|
||||
log_file: base path for log files (RFC 5424 to .log, JSON to .json)
|
||||
interval: seconds between probe cycles
|
||||
timeout: per-probe TCP timeout
|
||||
ports: JARM TLS ports (defaults to DEFAULT_PROBE_PORTS)
|
||||
ssh_ports: HASSH SSH ports (defaults to DEFAULT_SSH_PORTS)
|
||||
tcpfp_ports: TCP fingerprint ports (defaults to DEFAULT_TCPFP_PORTS)
|
||||
"""
|
||||
jarm_ports = ports or DEFAULT_PROBE_PORTS
|
||||
hassh_ports = ssh_ports or DEFAULT_SSH_PORTS
|
||||
tcp_ports = tcpfp_ports or DEFAULT_TCPFP_PORTS
|
||||
|
||||
all_ports_str = (
|
||||
f"jarm={','.join(str(p) for p in jarm_ports)} "
|
||||
f"ssh={','.join(str(p) for p in hassh_ports)} "
|
||||
f"tcpfp={','.join(str(p) for p in tcp_ports)}"
|
||||
)
|
||||
|
||||
log_path = Path(log_file)
|
||||
json_path = log_path.with_suffix(".json")
|
||||
log_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
logger.info(
|
||||
"prober started interval=%ds %s log=%s",
|
||||
interval, all_ports_str, log_path,
|
||||
)
|
||||
|
||||
_write_event(
|
||||
log_path, json_path,
|
||||
"prober_startup",
|
||||
interval=str(interval),
|
||||
probe_ports=all_ports_str,
|
||||
msg=f"DECNET-PROBER started, interval {interval}s, {all_ports_str}",
|
||||
)
|
||||
|
||||
known_attackers: set[str] = set()
|
||||
probed: dict[str, dict[str, set[int]]] = {} # IP -> {type -> ports}
|
||||
log_position: int = 0
|
||||
|
||||
while True:
|
||||
# Discover new attacker IPs from the log stream
|
||||
new_ips, log_position = await asyncio.to_thread(
|
||||
_discover_attackers, json_path, log_position,
|
||||
)
|
||||
|
||||
if new_ips - known_attackers:
|
||||
fresh = new_ips - known_attackers
|
||||
known_attackers.update(fresh)
|
||||
logger.info(
|
||||
"prober: discovered %d new attacker(s), total=%d",
|
||||
len(fresh), len(known_attackers),
|
||||
)
|
||||
|
||||
if known_attackers:
|
||||
await asyncio.to_thread(
|
||||
_probe_cycle, known_attackers, probed,
|
||||
jarm_ports, hassh_ports, tcp_ports,
|
||||
log_path, json_path, timeout,
|
||||
)
|
||||
|
||||
await asyncio.sleep(interval)
|
||||
5
decnet/profiler/__init__.py
Normal file
5
decnet/profiler/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
"""DECNET profiler — standalone attacker profile builder worker."""
|
||||
|
||||
from decnet.profiler.worker import attacker_profile_worker
|
||||
|
||||
__all__ = ["attacker_profile_worker"]
|
||||
602
decnet/profiler/behavioral.py
Normal file
602
decnet/profiler/behavioral.py
Normal file
@@ -0,0 +1,602 @@
|
||||
"""
|
||||
Behavioral and timing analysis for DECNET attacker profiles.
|
||||
|
||||
Consumes the chronological `LogEvent` stream already built by
|
||||
`decnet.correlation.engine.CorrelationEngine` and derives per-IP metrics:
|
||||
|
||||
- Inter-event timing statistics (mean / median / stdev / min / max)
|
||||
- Coefficient-of-variation (jitter metric)
|
||||
- Beaconing vs. interactive vs. scanning vs. brute_force vs. slow_scan
|
||||
classification
|
||||
- Tool attribution against known C2 frameworks (Cobalt Strike, Sliver,
|
||||
Havoc, Mythic) using default beacon/jitter profiles — returns a list,
|
||||
since multiple tools can be in use simultaneously
|
||||
- Header-based tool detection (Nmap NSE, Gophish, Nikto, sqlmap, etc.)
|
||||
from HTTP request events
|
||||
- Recon → exfil phase sequencing (latency between the last recon event
|
||||
and the first exfil-like event)
|
||||
- OS / TCP fingerprint + retransmit rollup from sniffer-emitted events,
|
||||
with TTL-based fallback when p0f returns no match
|
||||
|
||||
Pure-Python; no external dependencies. All functions are safe to call from
|
||||
both sync and async contexts.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import re
|
||||
import statistics
|
||||
from collections import Counter
|
||||
from typing import Any
|
||||
|
||||
from decnet.correlation.parser import LogEvent
|
||||
from decnet.telemetry import traced as _traced, get_tracer as _get_tracer
|
||||
|
||||
# ─── Event-type taxonomy ────────────────────────────────────────────────────
|
||||
|
||||
# Sniffer-emitted packet events that feed into fingerprint rollup.
|
||||
_SNIFFER_SYN_EVENT: str = "tcp_syn_fingerprint"
|
||||
_SNIFFER_FLOW_EVENT: str = "tcp_flow_timing"
|
||||
# Prober-emitted active-probe result (SYN-ACK fingerprint of attacker machine).
|
||||
_PROBER_TCPFP_EVENT: str = "tcpfp_fingerprint"
|
||||
|
||||
# Canonical initial TTL for each coarse OS bucket. Used to derive hop
|
||||
# distance when only the observed TTL is available (prober path).
|
||||
_INITIAL_TTL: dict[str, int] = {
|
||||
"linux": 64,
|
||||
"windows": 128,
|
||||
"embedded": 255,
|
||||
}
|
||||
|
||||
# Events that signal "recon" phase (scans, probes, auth attempts).
|
||||
_RECON_EVENT_TYPES: frozenset[str] = frozenset({
|
||||
"scan", "connection", "banner", "probe",
|
||||
"login_attempt", "auth", "auth_failure",
|
||||
})
|
||||
|
||||
# Events that signal "exfil" / action-on-objective phase.
|
||||
_EXFIL_EVENT_TYPES: frozenset[str] = frozenset({
|
||||
"download", "upload", "file_transfer", "data_exfil",
|
||||
"command", "exec", "query", "shell_input",
|
||||
})
|
||||
|
||||
# Fields carrying payload byte counts (for "large payload" detection).
|
||||
_PAYLOAD_SIZE_FIELDS: tuple[str, ...] = ("bytes", "size", "content_length")
|
||||
|
||||
# ─── C2 tool attribution signatures (beacon timing) ─────────────────────────
|
||||
#
|
||||
# Each entry lists the default beacon cadence profile of a popular C2.
|
||||
# A profile *matches* an attacker when:
|
||||
# - mean inter-event time is within ±`interval_tolerance` seconds, AND
|
||||
# - jitter (cv = stdev / mean) is within ±`jitter_tolerance`
|
||||
#
|
||||
# Multiple matches are all returned (attacker may run multiple implants).
|
||||
|
||||
_TOOL_SIGNATURES: tuple[dict[str, Any], ...] = (
|
||||
{
|
||||
"name": "cobalt_strike",
|
||||
"interval_s": 60.0,
|
||||
"interval_tolerance_s": 8.0,
|
||||
"jitter_cv": 0.20,
|
||||
"jitter_tolerance": 0.05,
|
||||
},
|
||||
{
|
||||
"name": "sliver",
|
||||
"interval_s": 60.0,
|
||||
"interval_tolerance_s": 10.0,
|
||||
"jitter_cv": 0.30,
|
||||
"jitter_tolerance": 0.08,
|
||||
},
|
||||
{
|
||||
"name": "havoc",
|
||||
"interval_s": 45.0,
|
||||
"interval_tolerance_s": 8.0,
|
||||
"jitter_cv": 0.10,
|
||||
"jitter_tolerance": 0.03,
|
||||
},
|
||||
{
|
||||
"name": "mythic",
|
||||
"interval_s": 30.0,
|
||||
"interval_tolerance_s": 6.0,
|
||||
"jitter_cv": 0.15,
|
||||
"jitter_tolerance": 0.03,
|
||||
},
|
||||
)
|
||||
|
||||
# ─── Header-based tool signatures ───────────────────────────────────────────
|
||||
#
|
||||
# Scanned against HTTP `request` events. `pattern` is a case-insensitive
|
||||
# substring (or a regex anchored with ^ if it starts with that character).
|
||||
# `header` is matched case-insensitively against the event's headers dict.
|
||||
|
||||
_HEADER_TOOL_SIGNATURES: tuple[dict[str, str], ...] = (
|
||||
{"name": "nmap", "header": "user-agent", "pattern": "Nmap Scripting Engine"},
|
||||
{"name": "gophish", "header": "x-mailer", "pattern": "gophish"},
|
||||
{"name": "nikto", "header": "user-agent", "pattern": "Nikto"},
|
||||
{"name": "sqlmap", "header": "user-agent", "pattern": "sqlmap"},
|
||||
{"name": "nuclei", "header": "user-agent", "pattern": "Nuclei"},
|
||||
{"name": "masscan", "header": "user-agent", "pattern": "masscan"},
|
||||
{"name": "zgrab", "header": "user-agent", "pattern": "zgrab"},
|
||||
{"name": "metasploit", "header": "user-agent", "pattern": "Metasploit"},
|
||||
{"name": "curl", "header": "user-agent", "pattern": "^curl/"},
|
||||
{"name": "python_requests", "header": "user-agent", "pattern": "python-requests"},
|
||||
{"name": "gobuster", "header": "user-agent", "pattern": "gobuster"},
|
||||
{"name": "dirbuster", "header": "user-agent", "pattern": "DirBuster"},
|
||||
{"name": "hydra", "header": "user-agent", "pattern": "hydra"},
|
||||
{"name": "wfuzz", "header": "user-agent", "pattern": "Wfuzz"},
|
||||
)
|
||||
|
||||
# ─── TTL → coarse OS bucket (fallback when p0f returns nothing) ─────────────
|
||||
|
||||
def _os_from_ttl(ttl_str: str | None) -> str | None:
|
||||
"""Derive a coarse OS guess from observed TTL when p0f has no match."""
|
||||
if not ttl_str:
|
||||
return None
|
||||
try:
|
||||
ttl = int(ttl_str)
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
if 55 <= ttl <= 70:
|
||||
return "linux"
|
||||
if 115 <= ttl <= 135:
|
||||
return "windows"
|
||||
if 235 <= ttl <= 255:
|
||||
return "embedded"
|
||||
return None
|
||||
|
||||
|
||||
# ─── Timing stats ───────────────────────────────────────────────────────────
|
||||
|
||||
@_traced("profiler.timing_stats")
|
||||
def timing_stats(events: list[LogEvent]) -> dict[str, Any]:
|
||||
"""
|
||||
Compute inter-arrival-time statistics across *events* (sorted by ts).
|
||||
|
||||
Returns a dict with:
|
||||
mean_iat_s, median_iat_s, stdev_iat_s, min_iat_s, max_iat_s, cv,
|
||||
event_count, duration_s
|
||||
|
||||
For n < 2 events the interval-based fields are None/0.
|
||||
"""
|
||||
if not events:
|
||||
return {
|
||||
"event_count": 0,
|
||||
"duration_s": 0.0,
|
||||
"mean_iat_s": None,
|
||||
"median_iat_s": None,
|
||||
"stdev_iat_s": None,
|
||||
"min_iat_s": None,
|
||||
"max_iat_s": None,
|
||||
"cv": None,
|
||||
}
|
||||
|
||||
sorted_events = sorted(events, key=lambda e: e.timestamp)
|
||||
duration_s = (sorted_events[-1].timestamp - sorted_events[0].timestamp).total_seconds()
|
||||
|
||||
if len(sorted_events) < 2:
|
||||
return {
|
||||
"event_count": len(sorted_events),
|
||||
"duration_s": round(duration_s, 3),
|
||||
"mean_iat_s": None,
|
||||
"median_iat_s": None,
|
||||
"stdev_iat_s": None,
|
||||
"min_iat_s": None,
|
||||
"max_iat_s": None,
|
||||
"cv": None,
|
||||
}
|
||||
|
||||
iats = [
|
||||
(sorted_events[i].timestamp - sorted_events[i - 1].timestamp).total_seconds()
|
||||
for i in range(1, len(sorted_events))
|
||||
]
|
||||
# Exclude spuriously-negative (clock-skew) intervals.
|
||||
iats = [v for v in iats if v >= 0]
|
||||
if not iats:
|
||||
return {
|
||||
"event_count": len(sorted_events),
|
||||
"duration_s": round(duration_s, 3),
|
||||
"mean_iat_s": None,
|
||||
"median_iat_s": None,
|
||||
"stdev_iat_s": None,
|
||||
"min_iat_s": None,
|
||||
"max_iat_s": None,
|
||||
"cv": None,
|
||||
}
|
||||
|
||||
mean = statistics.fmean(iats)
|
||||
median = statistics.median(iats)
|
||||
stdev = statistics.pstdev(iats) if len(iats) > 1 else 0.0
|
||||
cv = (stdev / mean) if mean > 0 else None
|
||||
|
||||
return {
|
||||
"event_count": len(sorted_events),
|
||||
"duration_s": round(duration_s, 3),
|
||||
"mean_iat_s": round(mean, 3),
|
||||
"median_iat_s": round(median, 3),
|
||||
"stdev_iat_s": round(stdev, 3),
|
||||
"min_iat_s": round(min(iats), 3),
|
||||
"max_iat_s": round(max(iats), 3),
|
||||
"cv": round(cv, 4) if cv is not None else None,
|
||||
}
|
||||
|
||||
|
||||
# ─── Behavior classification ────────────────────────────────────────────────
|
||||
|
||||
@_traced("profiler.classify_behavior")
|
||||
def classify_behavior(stats: dict[str, Any], services_count: int) -> str:
|
||||
"""
|
||||
Coarse behavior bucket:
|
||||
beaconing | interactive | scanning | brute_force | slow_scan | mixed | unknown
|
||||
|
||||
Heuristics (evaluated in priority order):
|
||||
* `scanning` — ≥ 3 services touched OR mean IAT < 2 s, ≥ 3 events
|
||||
* `brute_force` — 1 service, n ≥ 8, mean IAT < 5 s, CV < 0.6
|
||||
* `beaconing` — CV < 0.35, mean IAT ≥ 5 s, ≥ 4 events
|
||||
* `slow_scan` — ≥ 2 services, mean IAT ≥ 10 s, ≥ 4 events
|
||||
* `interactive` — mean IAT < 5 s AND CV ≥ 0.5, ≥ 6 events
|
||||
* `mixed` — catch-all for sessions with enough data
|
||||
* `unknown` — too few data points
|
||||
"""
|
||||
n = stats.get("event_count") or 0
|
||||
mean = stats.get("mean_iat_s")
|
||||
cv = stats.get("cv")
|
||||
|
||||
if n < 3 or mean is None:
|
||||
return "unknown"
|
||||
|
||||
# Slow scan / low-and-slow: multiple services with long gaps.
|
||||
# Must be checked before generic scanning so slow multi-service sessions
|
||||
# don't get mis-bucketed as a fast sweep.
|
||||
if services_count >= 2 and mean >= 10.0 and n >= 4:
|
||||
return "slow_scan"
|
||||
|
||||
# Scanning: broad service sweep (multi-service) or very rapid single-service bursts.
|
||||
if n >= 3 and (
|
||||
(services_count >= 3 and mean < 10.0)
|
||||
or (services_count >= 2 and mean < 2.0)
|
||||
):
|
||||
return "scanning"
|
||||
|
||||
# Brute force: hammering one service rapidly and repeatedly.
|
||||
if services_count == 1 and n >= 8 and mean < 5.0 and cv is not None and cv < 0.6:
|
||||
return "brute_force"
|
||||
|
||||
# Beaconing: regular cadence over multiple events.
|
||||
if cv is not None and cv < 0.35 and mean >= 5.0 and n >= 4:
|
||||
return "beaconing"
|
||||
|
||||
# Interactive: short but irregular bursts (human or tool with think time).
|
||||
if cv is not None and cv >= 0.5 and mean < 5.0 and n >= 6:
|
||||
return "interactive"
|
||||
|
||||
return "mixed"
|
||||
|
||||
|
||||
# ─── C2 tool attribution (beacon timing) ────────────────────────────────────
|
||||
|
||||
def guess_tools(mean_iat_s: float | None, cv: float | None) -> list[str]:
|
||||
"""
|
||||
Match (mean_iat, cv) against known C2 default beacon profiles.
|
||||
|
||||
Returns a list of all matching tool names (may be empty). Multiple
|
||||
matches are all returned because an attacker can run several implants.
|
||||
"""
|
||||
if mean_iat_s is None or cv is None:
|
||||
return []
|
||||
|
||||
hits: list[str] = []
|
||||
for sig in _TOOL_SIGNATURES:
|
||||
if abs(mean_iat_s - sig["interval_s"]) > sig["interval_tolerance_s"]:
|
||||
continue
|
||||
if abs(cv - sig["jitter_cv"]) > sig["jitter_tolerance"]:
|
||||
continue
|
||||
hits.append(sig["name"])
|
||||
|
||||
return hits
|
||||
|
||||
|
||||
# Keep the old name as an alias so callers that expected a single string still
|
||||
# compile, but mark it deprecated. Returns the first hit or None.
|
||||
def guess_tool(mean_iat_s: float | None, cv: float | None) -> str | None:
|
||||
"""Deprecated: use guess_tools() instead."""
|
||||
hits = guess_tools(mean_iat_s, cv)
|
||||
if len(hits) == 1:
|
||||
return hits[0]
|
||||
return None
|
||||
|
||||
|
||||
# ─── Header-based tool detection ────────────────────────────────────────────
|
||||
|
||||
@_traced("profiler.detect_tools_from_headers")
|
||||
def detect_tools_from_headers(events: list[LogEvent]) -> list[str]:
|
||||
"""
|
||||
Scan HTTP `request` events for tool-identifying headers.
|
||||
|
||||
Checks User-Agent, X-Mailer, and other headers case-insensitively
|
||||
against `_HEADER_TOOL_SIGNATURES`. Returns a deduplicated list of
|
||||
matched tool names in detection order.
|
||||
"""
|
||||
found: list[str] = []
|
||||
seen: set[str] = set()
|
||||
|
||||
for e in events:
|
||||
if e.event_type != "request":
|
||||
continue
|
||||
|
||||
raw_headers = e.fields.get("headers")
|
||||
if not raw_headers:
|
||||
continue
|
||||
|
||||
# headers may arrive as a JSON string, a Python-repr string (legacy),
|
||||
# or a dict already (in-memory / test paths).
|
||||
if isinstance(raw_headers, str):
|
||||
try:
|
||||
headers: dict[str, str] = json.loads(raw_headers)
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
# Backward-compat: events written before the JSON-encode fix
|
||||
# were serialized as Python repr via str(dict). ast.literal_eval
|
||||
# handles that safely (no arbitrary code execution).
|
||||
try:
|
||||
import ast as _ast
|
||||
_parsed = _ast.literal_eval(raw_headers)
|
||||
if isinstance(_parsed, dict):
|
||||
headers = _parsed
|
||||
else:
|
||||
continue
|
||||
except Exception: # nosec B112 — skip unparseable header values
|
||||
continue
|
||||
elif isinstance(raw_headers, dict):
|
||||
headers = raw_headers
|
||||
else:
|
||||
continue
|
||||
|
||||
# Normalise header keys to lowercase for matching.
|
||||
lc_headers: dict[str, str] = {k.lower(): str(v) for k, v in headers.items()}
|
||||
|
||||
for sig in _HEADER_TOOL_SIGNATURES:
|
||||
name = sig["name"]
|
||||
if name in seen:
|
||||
continue
|
||||
value = lc_headers.get(sig["header"])
|
||||
if value is None:
|
||||
continue
|
||||
pattern = sig["pattern"]
|
||||
if pattern.startswith("^"):
|
||||
if re.match(pattern, value, re.IGNORECASE):
|
||||
found.append(name)
|
||||
seen.add(name)
|
||||
else:
|
||||
if pattern.lower() in value.lower():
|
||||
found.append(name)
|
||||
seen.add(name)
|
||||
|
||||
return found
|
||||
|
||||
|
||||
# ─── Phase sequencing ───────────────────────────────────────────────────────
|
||||
|
||||
@_traced("profiler.phase_sequence")
|
||||
def phase_sequence(events: list[LogEvent]) -> dict[str, Any]:
|
||||
"""
|
||||
Derive recon→exfil phase transition info.
|
||||
|
||||
Returns:
|
||||
recon_end_ts : ISO timestamp of last recon-class event (or None)
|
||||
exfil_start_ts : ISO timestamp of first exfil-class event (or None)
|
||||
exfil_latency_s : seconds between them (None if not both present)
|
||||
large_payload_count: count of events whose *fields* report a payload
|
||||
≥ 1 MiB (heuristic for bulk data transfer)
|
||||
"""
|
||||
recon_end = None
|
||||
exfil_start = None
|
||||
large_payload_count = 0
|
||||
|
||||
for e in sorted(events, key=lambda x: x.timestamp):
|
||||
if e.event_type in _RECON_EVENT_TYPES:
|
||||
recon_end = e.timestamp
|
||||
elif e.event_type in _EXFIL_EVENT_TYPES and exfil_start is None:
|
||||
exfil_start = e.timestamp
|
||||
|
||||
for fname in _PAYLOAD_SIZE_FIELDS:
|
||||
raw = e.fields.get(fname)
|
||||
if raw is None:
|
||||
continue
|
||||
try:
|
||||
if int(raw) >= 1_048_576:
|
||||
large_payload_count += 1
|
||||
break
|
||||
except (TypeError, ValueError):
|
||||
continue
|
||||
|
||||
latency: float | None = None
|
||||
if recon_end is not None and exfil_start is not None and exfil_start >= recon_end:
|
||||
latency = round((exfil_start - recon_end).total_seconds(), 3)
|
||||
|
||||
return {
|
||||
"recon_end_ts": recon_end.isoformat() if recon_end else None,
|
||||
"exfil_start_ts": exfil_start.isoformat() if exfil_start else None,
|
||||
"exfil_latency_s": latency,
|
||||
"large_payload_count": large_payload_count,
|
||||
}
|
||||
|
||||
|
||||
# ─── Sniffer rollup (OS fingerprint + retransmits) ──────────────────────────
|
||||
|
||||
@_traced("profiler.sniffer_rollup")
|
||||
def sniffer_rollup(events: list[LogEvent]) -> dict[str, Any]:
|
||||
"""
|
||||
Roll up sniffer-emitted `tcp_syn_fingerprint` and `tcp_flow_timing`
|
||||
events into a per-attacker summary.
|
||||
|
||||
OS guess priority:
|
||||
1. Modal p0f label from os_guess field (if not "unknown"/empty).
|
||||
2. TTL-based coarse bucket (linux / windows / embedded) as fallback.
|
||||
Hop distance: median of non-zero reported values only.
|
||||
"""
|
||||
os_guesses: list[str] = []
|
||||
ttl_values: list[str] = []
|
||||
hops: list[int] = []
|
||||
tcp_fp: dict[str, Any] | None = None
|
||||
retransmits = 0
|
||||
|
||||
for e in events:
|
||||
if e.event_type == _SNIFFER_SYN_EVENT:
|
||||
og = e.fields.get("os_guess")
|
||||
if og and og != "unknown":
|
||||
os_guesses.append(og)
|
||||
|
||||
# Collect raw TTL for fallback OS derivation.
|
||||
ttl_raw = e.fields.get("ttl") or e.fields.get("initial_ttl")
|
||||
if ttl_raw:
|
||||
ttl_values.append(ttl_raw)
|
||||
|
||||
# Only include hop distances that are valid and non-zero.
|
||||
hop_raw = e.fields.get("hop_distance")
|
||||
if hop_raw:
|
||||
try:
|
||||
hop_val = int(hop_raw)
|
||||
if hop_val > 0:
|
||||
hops.append(hop_val)
|
||||
except (TypeError, ValueError):
|
||||
pass
|
||||
|
||||
# Keep the latest fingerprint snapshot.
|
||||
tcp_fp = {
|
||||
"window": _int_or_none(e.fields.get("window")),
|
||||
"wscale": _int_or_none(e.fields.get("wscale")),
|
||||
"mss": _int_or_none(e.fields.get("mss")),
|
||||
"options_sig": e.fields.get("options_sig", ""),
|
||||
"has_sack": e.fields.get("has_sack") == "true",
|
||||
"has_timestamps": e.fields.get("has_timestamps") == "true",
|
||||
}
|
||||
|
||||
elif e.event_type == _SNIFFER_FLOW_EVENT:
|
||||
try:
|
||||
retransmits += int(e.fields.get("retransmits", "0"))
|
||||
except (TypeError, ValueError):
|
||||
pass
|
||||
|
||||
elif e.event_type == _PROBER_TCPFP_EVENT:
|
||||
# Active-probe result: prober sent SYN to attacker, got SYN-ACK back.
|
||||
# Field names differ from the passive sniffer (different emitter).
|
||||
ttl_raw = e.fields.get("ttl")
|
||||
if ttl_raw:
|
||||
ttl_values.append(ttl_raw)
|
||||
|
||||
# Derive hop distance from observed TTL vs canonical initial TTL.
|
||||
os_hint = _os_from_ttl(ttl_raw)
|
||||
if os_hint:
|
||||
initial = _INITIAL_TTL.get(os_hint)
|
||||
if initial:
|
||||
try:
|
||||
hop_val = initial - int(ttl_raw)
|
||||
if hop_val > 0:
|
||||
hops.append(hop_val)
|
||||
except (TypeError, ValueError):
|
||||
pass
|
||||
|
||||
# Prober uses window_size/window_scale/options_order instead of
|
||||
# the sniffer's window/wscale/options_sig.
|
||||
tcp_fp = {
|
||||
"window": _int_or_none(e.fields.get("window_size")),
|
||||
"wscale": _int_or_none(e.fields.get("window_scale")),
|
||||
"mss": _int_or_none(e.fields.get("mss")),
|
||||
"options_sig": e.fields.get("options_order", ""),
|
||||
"has_sack": e.fields.get("sack_ok") == "1",
|
||||
"has_timestamps": e.fields.get("timestamp") == "1",
|
||||
}
|
||||
|
||||
# Mode for the OS bucket — most frequently observed label.
|
||||
os_guess: str | None = None
|
||||
if os_guesses:
|
||||
os_guess = Counter(os_guesses).most_common(1)[0][0]
|
||||
else:
|
||||
# TTL-based fallback: use the most common observed TTL value.
|
||||
if ttl_values:
|
||||
modal_ttl = Counter(ttl_values).most_common(1)[0][0]
|
||||
os_guess = _os_from_ttl(modal_ttl)
|
||||
|
||||
# Median hop distance (robust to the occasional weird TTL).
|
||||
hop_distance: int | None = None
|
||||
if hops:
|
||||
hop_distance = int(statistics.median(hops))
|
||||
|
||||
return {
|
||||
"os_guess": os_guess,
|
||||
"hop_distance": hop_distance,
|
||||
"tcp_fingerprint": tcp_fp or {},
|
||||
"retransmit_count": retransmits,
|
||||
}
|
||||
|
||||
|
||||
def _int_or_none(v: Any) -> int | None:
|
||||
if v is None or v == "":
|
||||
return None
|
||||
try:
|
||||
return int(v)
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
|
||||
|
||||
# ─── Composite: build the full AttackerBehavior record ──────────────────────
|
||||
|
||||
@_traced("profiler.build_behavior_record")
|
||||
def build_behavior_record(events: list[LogEvent]) -> dict[str, Any]:
|
||||
"""
|
||||
Build the dict to persist in the `attacker_behavior` table.
|
||||
|
||||
Callers (profiler worker) pre-serialize JSON-typed fields; we do the
|
||||
JSON encoding here to keep the repo layer schema-agnostic.
|
||||
"""
|
||||
# Timing stats are computed across *all* events (not filtered), because
|
||||
# a C2 beacon often reuses the same "connection" event_type on each
|
||||
# check-in. Filtering would throw that signal away.
|
||||
stats = timing_stats(events)
|
||||
services = {e.service for e in events}
|
||||
behavior = classify_behavior(stats, len(services))
|
||||
rollup = sniffer_rollup(events)
|
||||
phase = phase_sequence(events)
|
||||
|
||||
# Combine beacon-timing tool matches with header-based detections.
|
||||
beacon_tools = guess_tools(stats.get("mean_iat_s"), stats.get("cv"))
|
||||
header_tools = detect_tools_from_headers(events)
|
||||
all_tools: list[str] = list(dict.fromkeys(beacon_tools + header_tools)) # dedup, preserve order
|
||||
|
||||
# Promote TCP-level scanner identification to tool_guesses.
|
||||
# p0f fingerprints nmap from the TCP handshake alone — this fires even
|
||||
# when no HTTP service is present, making it far more reliable than the
|
||||
# header-based path for raw port scans.
|
||||
if rollup["os_guess"] == "nmap" and "nmap" not in all_tools:
|
||||
all_tools.insert(0, "nmap")
|
||||
|
||||
# Beacon-specific projection: only surface interval/jitter when we've
|
||||
# classified the flow as beaconing (otherwise these numbers are noise).
|
||||
beacon_interval_s: float | None = None
|
||||
beacon_jitter_pct: float | None = None
|
||||
if behavior == "beaconing":
|
||||
beacon_interval_s = stats.get("mean_iat_s")
|
||||
cv = stats.get("cv")
|
||||
beacon_jitter_pct = round(cv * 100, 2) if cv is not None else None
|
||||
|
||||
_tracer = _get_tracer("profiler")
|
||||
with _tracer.start_as_current_span("profiler.behavior_summary") as _span:
|
||||
_span.set_attribute("behavior_class", behavior)
|
||||
_span.set_attribute("os_guess", rollup["os_guess"] or "unknown")
|
||||
_span.set_attribute("tool_count", len(all_tools))
|
||||
_span.set_attribute("event_count", stats.get("event_count", 0))
|
||||
if all_tools:
|
||||
_span.set_attribute("tools", ",".join(all_tools))
|
||||
|
||||
return {
|
||||
"os_guess": rollup["os_guess"],
|
||||
"hop_distance": rollup["hop_distance"],
|
||||
"tcp_fingerprint": json.dumps(rollup["tcp_fingerprint"]),
|
||||
"retransmit_count": rollup["retransmit_count"],
|
||||
"behavior_class": behavior,
|
||||
"beacon_interval_s": beacon_interval_s,
|
||||
"beacon_jitter_pct": beacon_jitter_pct,
|
||||
"tool_guesses": json.dumps(all_tools),
|
||||
"timing_stats": json.dumps(stats),
|
||||
"phase_sequence": json.dumps(phase),
|
||||
}
|
||||
215
decnet/profiler/worker.py
Normal file
215
decnet/profiler/worker.py
Normal file
@@ -0,0 +1,215 @@
|
||||
"""
|
||||
Attacker profile builder — incremental background worker.
|
||||
|
||||
Maintains a persistent CorrelationEngine and a log-ID cursor across cycles.
|
||||
On cold start (first cycle or process restart), performs one full build from
|
||||
all stored logs. Subsequent cycles fetch only new logs via the cursor,
|
||||
ingest them into the existing engine, and rebuild profiles for affected IPs
|
||||
only.
|
||||
|
||||
Complexity per cycle: O(new_logs + affected_ips) instead of O(total_logs²).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any
|
||||
|
||||
from decnet.correlation.engine import CorrelationEngine
|
||||
from decnet.correlation.parser import LogEvent
|
||||
from decnet.logging import get_logger
|
||||
from decnet.profiler.behavioral import build_behavior_record
|
||||
from decnet.telemetry import traced as _traced, get_tracer as _get_tracer
|
||||
from decnet.web.db.repository import BaseRepository
|
||||
|
||||
logger = get_logger("attacker_worker")
|
||||
|
||||
_BATCH_SIZE = 500
|
||||
_STATE_KEY = "attacker_worker_cursor"
|
||||
|
||||
# Event types that indicate active command/query execution (not just connection/scan)
|
||||
_COMMAND_EVENT_TYPES = frozenset({
|
||||
"command", "exec", "query", "input", "shell_input",
|
||||
"execute", "run", "sql_query", "redis_command",
|
||||
})
|
||||
|
||||
# Fields that carry the executed command/query text
|
||||
_COMMAND_FIELDS = ("command", "query", "input", "line", "sql", "cmd")
|
||||
|
||||
|
||||
@dataclass
|
||||
class _WorkerState:
|
||||
engine: CorrelationEngine = field(default_factory=CorrelationEngine)
|
||||
last_log_id: int = 0
|
||||
initialized: bool = False
|
||||
|
||||
|
||||
async def attacker_profile_worker(repo: BaseRepository, *, interval: int = 30) -> None:
|
||||
"""Periodically updates the Attacker table incrementally. Designed to run as an asyncio Task."""
|
||||
logger.info("attacker profile worker started interval=%ds", interval)
|
||||
state = _WorkerState()
|
||||
_saved_cursor = await repo.get_state(_STATE_KEY)
|
||||
if _saved_cursor:
|
||||
state.last_log_id = _saved_cursor.get("last_log_id", 0)
|
||||
state.initialized = True
|
||||
logger.info("attacker worker: resumed from cursor last_log_id=%d", state.last_log_id)
|
||||
while True:
|
||||
await asyncio.sleep(interval)
|
||||
try:
|
||||
await _incremental_update(repo, state)
|
||||
except Exception as exc:
|
||||
logger.error("attacker worker: update failed: %s", exc)
|
||||
|
||||
|
||||
@_traced("profiler.incremental_update")
|
||||
async def _incremental_update(repo: BaseRepository, state: _WorkerState) -> None:
|
||||
was_cold = not state.initialized
|
||||
affected_ips: set[str] = set()
|
||||
|
||||
while True:
|
||||
batch = await repo.get_logs_after_id(state.last_log_id, limit=_BATCH_SIZE)
|
||||
if not batch:
|
||||
break
|
||||
|
||||
for row in batch:
|
||||
event = state.engine.ingest(row["raw_line"])
|
||||
if event and event.attacker_ip:
|
||||
affected_ips.add(event.attacker_ip)
|
||||
state.last_log_id = row["id"]
|
||||
|
||||
await asyncio.sleep(0) # yield to event loop after each batch
|
||||
|
||||
if len(batch) < _BATCH_SIZE:
|
||||
break
|
||||
|
||||
state.initialized = True
|
||||
|
||||
if not affected_ips:
|
||||
await repo.set_state(_STATE_KEY, {"last_log_id": state.last_log_id})
|
||||
return
|
||||
|
||||
await _update_profiles(repo, state, affected_ips)
|
||||
await repo.set_state(_STATE_KEY, {"last_log_id": state.last_log_id})
|
||||
|
||||
if was_cold:
|
||||
logger.info("attacker worker: cold start rebuilt %d profiles", len(affected_ips))
|
||||
else:
|
||||
logger.info("attacker worker: updated %d profiles (incremental)", len(affected_ips))
|
||||
|
||||
|
||||
@_traced("profiler.update_profiles")
|
||||
async def _update_profiles(
|
||||
repo: BaseRepository,
|
||||
state: _WorkerState,
|
||||
ips: set[str],
|
||||
) -> None:
|
||||
traversal_map = {t.attacker_ip: t for t in state.engine.traversals(min_deckies=2)}
|
||||
bounties_map = await repo.get_bounties_for_ips(ips)
|
||||
|
||||
_tracer = _get_tracer("profiler")
|
||||
for ip in ips:
|
||||
events = state.engine._events.get(ip, [])
|
||||
if not events:
|
||||
continue
|
||||
|
||||
with _tracer.start_as_current_span("profiler.process_ip") as _span:
|
||||
_span.set_attribute("attacker_ip", ip)
|
||||
_span.set_attribute("event_count", len(events))
|
||||
|
||||
traversal = traversal_map.get(ip)
|
||||
bounties = bounties_map.get(ip, [])
|
||||
commands = _extract_commands_from_events(events)
|
||||
|
||||
record = _build_record(ip, events, traversal, bounties, commands)
|
||||
attacker_uuid = await repo.upsert_attacker(record)
|
||||
|
||||
_span.set_attribute("is_traversal", traversal is not None)
|
||||
_span.set_attribute("bounty_count", len(bounties))
|
||||
_span.set_attribute("command_count", len(commands))
|
||||
|
||||
# Behavioral / fingerprint rollup lives in a sibling table so failures
|
||||
# here never block the core attacker profile upsert.
|
||||
try:
|
||||
behavior = build_behavior_record(events)
|
||||
await repo.upsert_attacker_behavior(attacker_uuid, behavior)
|
||||
except Exception as exc:
|
||||
_span.record_exception(exc)
|
||||
logger.error("attacker worker: behavior upsert failed for %s: %s", ip, exc)
|
||||
|
||||
|
||||
def _build_record(
|
||||
ip: str,
|
||||
events: list[LogEvent],
|
||||
traversal: Any,
|
||||
bounties: list[dict[str, Any]],
|
||||
commands: list[dict[str, Any]],
|
||||
) -> dict[str, Any]:
|
||||
services = sorted({e.service for e in events})
|
||||
deckies = (
|
||||
traversal.deckies
|
||||
if traversal
|
||||
else _first_contact_deckies(events)
|
||||
)
|
||||
fingerprints = [b for b in bounties if b.get("bounty_type") == "fingerprint"]
|
||||
credential_count = sum(1 for b in bounties if b.get("bounty_type") == "credential")
|
||||
|
||||
return {
|
||||
"ip": ip,
|
||||
"first_seen": min(e.timestamp for e in events),
|
||||
"last_seen": max(e.timestamp for e in events),
|
||||
"event_count": len(events),
|
||||
"service_count": len(services),
|
||||
"decky_count": len({e.decky for e in events}),
|
||||
"services": json.dumps(services),
|
||||
"deckies": json.dumps(deckies),
|
||||
"traversal_path": traversal.path if traversal else None,
|
||||
"is_traversal": traversal is not None,
|
||||
"bounty_count": len(bounties),
|
||||
"credential_count": credential_count,
|
||||
"fingerprints": json.dumps(fingerprints),
|
||||
"commands": json.dumps(commands),
|
||||
"updated_at": datetime.now(timezone.utc),
|
||||
}
|
||||
|
||||
|
||||
def _first_contact_deckies(events: list[LogEvent]) -> list[str]:
|
||||
"""Return unique deckies in first-contact order (for non-traversal attackers)."""
|
||||
seen: list[str] = []
|
||||
for e in sorted(events, key=lambda x: x.timestamp):
|
||||
if e.decky not in seen:
|
||||
seen.append(e.decky)
|
||||
return seen
|
||||
|
||||
|
||||
def _extract_commands_from_events(events: list[LogEvent]) -> list[dict[str, Any]]:
|
||||
"""
|
||||
Extract executed commands from LogEvent objects.
|
||||
|
||||
Works directly on LogEvent.fields (already a dict), so no JSON parsing needed.
|
||||
"""
|
||||
commands: list[dict[str, Any]] = []
|
||||
for event in events:
|
||||
if event.event_type not in _COMMAND_EVENT_TYPES:
|
||||
continue
|
||||
|
||||
cmd_text: str | None = None
|
||||
for key in _COMMAND_FIELDS:
|
||||
val = event.fields.get(key)
|
||||
if val:
|
||||
cmd_text = str(val)
|
||||
break
|
||||
|
||||
if not cmd_text:
|
||||
continue
|
||||
|
||||
commands.append({
|
||||
"service": event.service,
|
||||
"decky": event.decky,
|
||||
"command": cmd_text,
|
||||
"timestamp": event.timestamp.isoformat(),
|
||||
})
|
||||
|
||||
return commands
|
||||
@@ -13,6 +13,7 @@ class BaseService(ABC):
|
||||
name: str # unique slug, e.g. "ssh", "smb"
|
||||
ports: list[int] # ports this service listens on inside the container
|
||||
default_image: str # Docker image tag, or "build" if a Dockerfile is needed
|
||||
fleet_singleton: bool = False # True = runs once fleet-wide, not per-decky
|
||||
|
||||
@abstractmethod
|
||||
def compose_fragment(
|
||||
|
||||
@@ -1,26 +1,35 @@
|
||||
from pathlib import Path
|
||||
from decnet.services.base import BaseService
|
||||
|
||||
|
||||
class ConpotService(BaseService):
|
||||
"""ICS/SCADA honeypot covering Modbus (502), SNMP (161 UDP), and HTTP (80).
|
||||
|
||||
Uses the official honeynet/conpot image which ships a default ICS profile
|
||||
that emulates a Siemens S7-200 PLC.
|
||||
Uses a custom build context wrapping the official honeynet/conpot image
|
||||
to fix Modbus binding to port 502.
|
||||
"""
|
||||
|
||||
name = "conpot"
|
||||
ports = [502, 161, 80]
|
||||
default_image = "honeynet/conpot"
|
||||
default_image = "build"
|
||||
|
||||
def compose_fragment(self, decky_name: str, log_target: str | None = None, service_cfg: dict | None = None) -> dict:
|
||||
env = {
|
||||
"CONPOT_TEMPLATE": "default",
|
||||
"NODE_NAME": decky_name,
|
||||
}
|
||||
if log_target:
|
||||
env["LOG_TARGET"] = log_target
|
||||
|
||||
return {
|
||||
"image": "honeynet/conpot",
|
||||
"build": {
|
||||
"context": str(self.dockerfile_context()),
|
||||
"args": {"BASE_IMAGE": "honeynet/conpot:latest"},
|
||||
},
|
||||
"container_name": f"{decky_name}-conpot",
|
||||
"restart": "unless-stopped",
|
||||
"environment": {
|
||||
"CONPOT_TEMPLATE": "default",
|
||||
},
|
||||
"environment": env,
|
||||
}
|
||||
|
||||
def dockerfile_context(self):
|
||||
return None
|
||||
return Path(__file__).parent.parent.parent / "templates" / "conpot"
|
||||
|
||||
59
decnet/services/https.py
Normal file
59
decnet/services/https.py
Normal file
@@ -0,0 +1,59 @@
|
||||
import json
|
||||
from pathlib import Path
|
||||
from decnet.services.base import BaseService
|
||||
|
||||
TEMPLATES_DIR = Path(__file__).parent.parent.parent / "templates" / "https"
|
||||
|
||||
|
||||
class HTTPSService(BaseService):
|
||||
name = "https"
|
||||
ports = [443]
|
||||
default_image = "build"
|
||||
|
||||
def compose_fragment(
|
||||
self,
|
||||
decky_name: str,
|
||||
log_target: str | None = None,
|
||||
service_cfg: dict | None = None,
|
||||
) -> dict:
|
||||
cfg = service_cfg or {}
|
||||
fragment: dict = {
|
||||
"build": {"context": str(TEMPLATES_DIR)},
|
||||
"container_name": f"{decky_name}-https",
|
||||
"restart": "unless-stopped",
|
||||
"environment": {
|
||||
"NODE_NAME": decky_name,
|
||||
},
|
||||
}
|
||||
if log_target:
|
||||
fragment["environment"]["LOG_TARGET"] = log_target
|
||||
|
||||
# Optional persona overrides — only injected when explicitly set
|
||||
if "server_header" in cfg:
|
||||
fragment["environment"]["SERVER_HEADER"] = cfg["server_header"]
|
||||
if "response_code" in cfg:
|
||||
fragment["environment"]["RESPONSE_CODE"] = str(cfg["response_code"])
|
||||
if "fake_app" in cfg:
|
||||
fragment["environment"]["FAKE_APP"] = cfg["fake_app"]
|
||||
if "extra_headers" in cfg:
|
||||
val = cfg["extra_headers"]
|
||||
fragment["environment"]["EXTRA_HEADERS"] = (
|
||||
json.dumps(val) if isinstance(val, dict) else val
|
||||
)
|
||||
if "custom_body" in cfg:
|
||||
fragment["environment"]["CUSTOM_BODY"] = cfg["custom_body"]
|
||||
if "files" in cfg:
|
||||
files_path = str(Path(cfg["files"]).resolve())
|
||||
fragment["environment"]["FILES_DIR"] = "/opt/html_files"
|
||||
fragment.setdefault("volumes", []).append(f"{files_path}:/opt/html_files:ro")
|
||||
if "tls_cert" in cfg:
|
||||
fragment["environment"]["TLS_CERT"] = cfg["tls_cert"]
|
||||
if "tls_key" in cfg:
|
||||
fragment["environment"]["TLS_KEY"] = cfg["tls_key"]
|
||||
if "tls_cn" in cfg:
|
||||
fragment["environment"]["TLS_CN"] = cfg["tls_cn"]
|
||||
|
||||
return fragment
|
||||
|
||||
def dockerfile_context(self) -> Path | None:
|
||||
return TEMPLATES_DIR
|
||||
@@ -1,46 +0,0 @@
|
||||
from pathlib import Path
|
||||
|
||||
from decnet.services.base import BaseService
|
||||
|
||||
TEMPLATES_DIR = Path(__file__).parent.parent.parent / "templates" / "real_ssh"
|
||||
|
||||
|
||||
class RealSSHService(BaseService):
|
||||
"""
|
||||
Fully interactive OpenSSH server — no honeypot emulation.
|
||||
|
||||
Used for the deaddeck (entry-point machine). Attackers get a real shell.
|
||||
Credentials are intentionally weak to invite exploitation.
|
||||
|
||||
service_cfg keys:
|
||||
password Root password (default: "admin")
|
||||
hostname Override container hostname
|
||||
"""
|
||||
|
||||
name = "real_ssh"
|
||||
ports = [22]
|
||||
default_image = "build"
|
||||
|
||||
def compose_fragment(
|
||||
self,
|
||||
decky_name: str,
|
||||
log_target: str | None = None,
|
||||
service_cfg: dict | None = None,
|
||||
) -> dict:
|
||||
cfg = service_cfg or {}
|
||||
env: dict = {
|
||||
"SSH_ROOT_PASSWORD": cfg.get("password", "admin"),
|
||||
}
|
||||
if "hostname" in cfg:
|
||||
env["SSH_HOSTNAME"] = cfg["hostname"]
|
||||
|
||||
return {
|
||||
"build": {"context": str(TEMPLATES_DIR)},
|
||||
"container_name": f"{decky_name}-real-ssh",
|
||||
"restart": "unless-stopped",
|
||||
"cap_add": ["NET_BIND_SERVICE"],
|
||||
"environment": env,
|
||||
}
|
||||
|
||||
def dockerfile_context(self) -> Path:
|
||||
return TEMPLATES_DIR
|
||||
43
decnet/services/smtp_relay.py
Normal file
43
decnet/services/smtp_relay.py
Normal file
@@ -0,0 +1,43 @@
|
||||
from pathlib import Path
|
||||
|
||||
from decnet.services.base import BaseService
|
||||
|
||||
# Reuses the same template as the smtp service — only difference is
|
||||
# SMTP_OPEN_RELAY=1 in the environment, which enables the open relay persona.
|
||||
_TEMPLATES_DIR = Path(__file__).parent.parent.parent / "templates" / "smtp"
|
||||
|
||||
|
||||
class SMTPRelayService(BaseService):
|
||||
"""SMTP open relay bait — accepts any RCPT TO and delivers messages."""
|
||||
|
||||
name = "smtp_relay"
|
||||
ports = [25, 587]
|
||||
default_image = "build"
|
||||
|
||||
def compose_fragment(
|
||||
self,
|
||||
decky_name: str,
|
||||
log_target: str | None = None,
|
||||
service_cfg: dict | None = None,
|
||||
) -> dict:
|
||||
cfg = service_cfg or {}
|
||||
fragment: dict = {
|
||||
"build": {"context": str(_TEMPLATES_DIR)},
|
||||
"container_name": f"{decky_name}-smtp_relay",
|
||||
"restart": "unless-stopped",
|
||||
"cap_add": ["NET_BIND_SERVICE"],
|
||||
"environment": {
|
||||
"NODE_NAME": decky_name,
|
||||
"SMTP_OPEN_RELAY": "1",
|
||||
},
|
||||
}
|
||||
if log_target:
|
||||
fragment["environment"]["LOG_TARGET"] = log_target
|
||||
if "banner" in cfg:
|
||||
fragment["environment"]["SMTP_BANNER"] = cfg["banner"]
|
||||
if "mta" in cfg:
|
||||
fragment["environment"]["SMTP_MTA"] = cfg["mta"]
|
||||
return fragment
|
||||
|
||||
def dockerfile_context(self) -> Path:
|
||||
return _TEMPLATES_DIR
|
||||
41
decnet/services/sniffer.py
Normal file
41
decnet/services/sniffer.py
Normal file
@@ -0,0 +1,41 @@
|
||||
from pathlib import Path
|
||||
from decnet.services.base import BaseService
|
||||
|
||||
TEMPLATES_DIR = Path(__file__).parent.parent.parent / "templates" / "sniffer"
|
||||
|
||||
|
||||
class SnifferService(BaseService):
|
||||
"""
|
||||
Passive network sniffer deployed alongside deckies on the MACVLAN.
|
||||
|
||||
Captures TLS handshakes in promiscuous mode and extracts JA3/JA3S hashes
|
||||
plus connection metadata. Requires NET_RAW + NET_ADMIN capabilities.
|
||||
No inbound ports — purely passive.
|
||||
"""
|
||||
|
||||
name = "sniffer"
|
||||
ports: list[int] = []
|
||||
default_image = "build"
|
||||
fleet_singleton = True
|
||||
|
||||
def compose_fragment(
|
||||
self,
|
||||
decky_name: str,
|
||||
log_target: str | None = None,
|
||||
service_cfg: dict | None = None,
|
||||
) -> dict:
|
||||
fragment: dict = {
|
||||
"build": {"context": str(TEMPLATES_DIR)},
|
||||
"container_name": f"{decky_name}-sniffer",
|
||||
"restart": "unless-stopped",
|
||||
"cap_add": ["NET_RAW", "NET_ADMIN"],
|
||||
"environment": {
|
||||
"NODE_NAME": decky_name,
|
||||
},
|
||||
}
|
||||
if log_target:
|
||||
fragment["environment"]["LOG_TARGET"] = log_target
|
||||
return fragment
|
||||
|
||||
def dockerfile_context(self) -> Path | None:
|
||||
return TEMPLATES_DIR
|
||||
@@ -1,12 +1,26 @@
|
||||
from pathlib import Path
|
||||
|
||||
from decnet.services.base import BaseService
|
||||
|
||||
TEMPLATES_DIR = Path(__file__).parent.parent.parent / "templates" / "cowrie"
|
||||
TEMPLATES_DIR = Path(__file__).parent.parent.parent / "templates" / "ssh"
|
||||
|
||||
|
||||
class SSHService(BaseService):
|
||||
"""
|
||||
Interactive OpenSSH server for general-purpose deckies.
|
||||
|
||||
Replaced Cowrie emulation with a real sshd so fingerprinting tools and
|
||||
experienced attackers cannot trivially identify the honeypot. Auth events,
|
||||
sudo activity, and interactive commands are all forwarded to stdout as
|
||||
RFC 5424 via the rsyslog bridge baked into the image.
|
||||
|
||||
service_cfg keys:
|
||||
password Root password (default: "admin")
|
||||
hostname Override container hostname
|
||||
"""
|
||||
|
||||
name = "ssh"
|
||||
ports = [22, 2222]
|
||||
ports = [22]
|
||||
default_image = "build"
|
||||
|
||||
def compose_fragment(
|
||||
@@ -17,28 +31,10 @@ class SSHService(BaseService):
|
||||
) -> dict:
|
||||
cfg = service_cfg or {}
|
||||
env: dict = {
|
||||
"NODE_NAME": decky_name,
|
||||
"COWRIE_HOSTNAME": decky_name,
|
||||
"COWRIE_HONEYPOT_LISTEN_ENDPOINTS": "tcp:22:interface=0.0.0.0 tcp:2222:interface=0.0.0.0",
|
||||
"COWRIE_SSH_LISTEN_ENDPOINTS": "tcp:22:interface=0.0.0.0 tcp:2222:interface=0.0.0.0",
|
||||
"SSH_ROOT_PASSWORD": cfg.get("password", "admin"),
|
||||
}
|
||||
if log_target:
|
||||
host, port = log_target.rsplit(":", 1)
|
||||
env["COWRIE_OUTPUT_TCP_ENABLED"] = "true"
|
||||
env["COWRIE_OUTPUT_TCP_HOST"] = host
|
||||
env["COWRIE_OUTPUT_TCP_PORT"] = port
|
||||
|
||||
# Optional persona overrides
|
||||
if "kernel_version" in cfg:
|
||||
env["COWRIE_HONEYPOT_KERNEL_VERSION"] = cfg["kernel_version"]
|
||||
if "kernel_build_string" in cfg:
|
||||
env["COWRIE_HONEYPOT_KERNEL_BUILD_STRING"] = cfg["kernel_build_string"]
|
||||
if "hardware_platform" in cfg:
|
||||
env["COWRIE_HONEYPOT_HARDWARE_PLATFORM"] = cfg["hardware_platform"]
|
||||
if "ssh_banner" in cfg:
|
||||
env["COWRIE_SSH_VERSION"] = cfg["ssh_banner"]
|
||||
if "users" in cfg:
|
||||
env["COWRIE_USERDB_ENTRIES"] = cfg["users"]
|
||||
if "hostname" in cfg:
|
||||
env["SSH_HOSTNAME"] = cfg["hostname"]
|
||||
|
||||
return {
|
||||
"build": {"context": str(TEMPLATES_DIR)},
|
||||
|
||||
@@ -1,31 +1,47 @@
|
||||
from pathlib import Path
|
||||
|
||||
from decnet.services.base import BaseService
|
||||
|
||||
TEMPLATES_DIR = Path(__file__).parent.parent.parent / "templates" / "telnet"
|
||||
|
||||
|
||||
class TelnetService(BaseService):
|
||||
"""
|
||||
Real telnetd using busybox telnetd + rsyslog logging pipeline.
|
||||
|
||||
Replaced Cowrie emulation (which also started an SSH daemon on port 22)
|
||||
with a real busybox telnetd so only port 23 is exposed and auth events
|
||||
are logged as RFC 5424 via the same rsyslog bridge used by the SSH service.
|
||||
|
||||
service_cfg keys:
|
||||
password Root password (default: "admin")
|
||||
hostname Override container hostname
|
||||
"""
|
||||
|
||||
name = "telnet"
|
||||
ports = [23]
|
||||
default_image = "cowrie/cowrie"
|
||||
default_image = "build"
|
||||
|
||||
def compose_fragment(self, decky_name: str, log_target: str | None = None, service_cfg: dict | None = None) -> dict:
|
||||
def compose_fragment(
|
||||
self,
|
||||
decky_name: str,
|
||||
log_target: str | None = None,
|
||||
service_cfg: dict | None = None,
|
||||
) -> dict:
|
||||
cfg = service_cfg or {}
|
||||
env: dict = {
|
||||
"COWRIE_HONEYPOT_HOSTNAME": decky_name,
|
||||
"COWRIE_TELNET_ENABLED": "true",
|
||||
"COWRIE_TELNET_LISTEN_ENDPOINTS": "tcp:23:interface=0.0.0.0",
|
||||
# Disable SSH so this container is telnet-only
|
||||
"COWRIE_SSH_ENABLED": "false",
|
||||
"TELNET_ROOT_PASSWORD": cfg.get("password", "admin"),
|
||||
}
|
||||
if log_target:
|
||||
host, port = log_target.rsplit(":", 1)
|
||||
env["COWRIE_OUTPUT_TCP_ENABLED"] = "true"
|
||||
env["COWRIE_OUTPUT_TCP_HOST"] = host
|
||||
env["COWRIE_OUTPUT_TCP_PORT"] = port
|
||||
if "hostname" in cfg:
|
||||
env["TELNET_HOSTNAME"] = cfg["hostname"]
|
||||
|
||||
return {
|
||||
"image": "cowrie/cowrie",
|
||||
"build": {"context": str(TEMPLATES_DIR)},
|
||||
"container_name": f"{decky_name}-telnet",
|
||||
"restart": "unless-stopped",
|
||||
"cap_add": ["NET_BIND_SERVICE"],
|
||||
"environment": env,
|
||||
}
|
||||
|
||||
def dockerfile_context(self):
|
||||
return None
|
||||
def dockerfile_context(self) -> Path:
|
||||
return TEMPLATES_DIR
|
||||
|
||||
11
decnet/sniffer/__init__.py
Normal file
11
decnet/sniffer/__init__.py
Normal file
@@ -0,0 +1,11 @@
|
||||
"""
|
||||
Fleet-wide MACVLAN sniffer microservice.
|
||||
|
||||
Runs as a single host-side background task (not per-decky) that sniffs
|
||||
all TLS traffic on the MACVLAN interface, extracts fingerprints, and
|
||||
feeds events into the existing log pipeline.
|
||||
"""
|
||||
|
||||
from decnet.sniffer.worker import sniffer_worker
|
||||
|
||||
__all__ = ["sniffer_worker"]
|
||||
1166
decnet/sniffer/fingerprint.py
Normal file
1166
decnet/sniffer/fingerprint.py
Normal file
File diff suppressed because it is too large
Load Diff
238
decnet/sniffer/p0f.py
Normal file
238
decnet/sniffer/p0f.py
Normal file
@@ -0,0 +1,238 @@
|
||||
"""
|
||||
Passive OS fingerprinting (p0f-lite) for the DECNET sniffer.
|
||||
|
||||
Pure-Python lookup module. Given the values of an incoming TCP SYN packet
|
||||
(TTL, window, MSS, window-scale, and TCP option ordering), returns a coarse
|
||||
OS bucket (linux / windows / macos_ios / freebsd / openbsd / nmap / unknown)
|
||||
plus derived hop distance and inferred initial TTL.
|
||||
|
||||
Rationale
|
||||
---------
|
||||
Full p0f v3 distinguishes several dozen OS/tool profiles by combining dozens
|
||||
of low-level quirks (OLEN, WSIZE, EOL padding, PCLASS, quirks, payload class).
|
||||
For DECNET we only need a coarse bucket — enough to tag an attacker as
|
||||
"linux beacon" vs "windows interactive" vs "active scan". The curated
|
||||
table below covers default stacks that dominate real-world attacker traffic.
|
||||
|
||||
References (public p0f v3 DB, nmap-os-db, and Mozilla OS Fingerprint table):
|
||||
https://github.com/p0f/p0f/blob/master/p0f.fp
|
||||
|
||||
No external dependencies.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from decnet.telemetry import traced as _traced
|
||||
|
||||
# ─── TTL → initial TTL bucket ───────────────────────────────────────────────
|
||||
|
||||
# Common "hop 0" TTLs. Packets decrement TTL once per hop, so we round up
|
||||
# the observed TTL to the nearest known starting value.
|
||||
_TTL_BUCKETS: tuple[int, ...] = (32, 64, 128, 255)
|
||||
|
||||
|
||||
def initial_ttl(ttl: int) -> int:
|
||||
"""
|
||||
Round *ttl* up to the nearest known initial-TTL bucket.
|
||||
|
||||
A SYN with TTL=59 was almost certainly emitted by a Linux/BSD host
|
||||
(initial 64) five hops away; TTL=120 by a Windows host (initial 128)
|
||||
eight hops away.
|
||||
"""
|
||||
for bucket in _TTL_BUCKETS:
|
||||
if ttl <= bucket:
|
||||
return bucket
|
||||
return 255
|
||||
|
||||
|
||||
def hop_distance(ttl: int) -> int:
|
||||
"""
|
||||
Estimate hops between the attacker and the sniffer based on TTL.
|
||||
|
||||
Upper-bounded at 64 (anything further has most likely been mangled
|
||||
by a misconfigured firewall or a TTL-spoofing NAT).
|
||||
"""
|
||||
dist = initial_ttl(ttl) - ttl
|
||||
if dist < 0:
|
||||
return 0
|
||||
if dist > 64:
|
||||
return 64
|
||||
return dist
|
||||
|
||||
|
||||
# ─── OS signature table (TTL bucket, window, MSS, wscale, option-order) ─────
|
||||
|
||||
# Each entry is a set of loose predicates. If all predicates match, the
|
||||
# OS label is returned. First-match wins. `None` means "don't care".
|
||||
#
|
||||
# The option signatures use the short-code alphabet from
|
||||
# decnet/prober/tcpfp.py :: _OPT_CODES (M=MSS, N=NOP, W=WScale,
|
||||
# T=Timestamp, S=SAckOK, E=EOL).
|
||||
|
||||
_SIGNATURES: tuple[tuple[dict, str], ...] = (
|
||||
# ── nmap -sS / -sT default probe ───────────────────────────────────────
|
||||
# nmap crafts very distinctive SYNs: tiny window (1024/4096/etc.), full
|
||||
# option set including WScale=10 and SAckOK. Match these first so they
|
||||
# don't get misclassified as Linux.
|
||||
(
|
||||
{
|
||||
"ttl_bucket": 64,
|
||||
"window_in": {1024, 2048, 3072, 4096, 31337, 32768, 65535},
|
||||
"mss": 1460,
|
||||
"wscale": 10,
|
||||
"options": "M,W,T,S,S",
|
||||
},
|
||||
"nmap",
|
||||
),
|
||||
(
|
||||
{
|
||||
"ttl_bucket": 64,
|
||||
"window_in": {1024, 2048, 3072, 4096, 31337, 32768, 65535},
|
||||
"options_starts_with": "M,W,T,S",
|
||||
},
|
||||
"nmap",
|
||||
),
|
||||
# ── macOS / iOS default SYN (match before Linux — shares TTL 64) ──────
|
||||
# TTL 64, window 65535, MSS 1460, WScale 6, specific option order
|
||||
# M,N,W,N,N,T,S,E (Darwin signature with EOL padding).
|
||||
(
|
||||
{
|
||||
"ttl_bucket": 64,
|
||||
"window": 65535,
|
||||
"wscale": 6,
|
||||
"options": "M,N,W,N,N,T,S,E",
|
||||
},
|
||||
"macos_ios",
|
||||
),
|
||||
(
|
||||
{
|
||||
"ttl_bucket": 64,
|
||||
"window_in": {65535},
|
||||
"wscale_in": {5, 6},
|
||||
"has_timestamps": True,
|
||||
"options_ends_with": "E",
|
||||
},
|
||||
"macos_ios",
|
||||
),
|
||||
# ── FreeBSD default SYN (TTL 64, no EOL) ───────────────────────────────
|
||||
(
|
||||
{
|
||||
"ttl_bucket": 64,
|
||||
"window": 65535,
|
||||
"wscale": 6,
|
||||
"has_sack": True,
|
||||
"has_timestamps": True,
|
||||
"options_no_eol": True,
|
||||
},
|
||||
"freebsd",
|
||||
),
|
||||
# ── Linux (kernel 3.x – 6.x) default SYN ───────────────────────────────
|
||||
# TTL 64, window 29200 / 64240 / 65535, MSS 1460, WScale 7, full options.
|
||||
(
|
||||
{
|
||||
"ttl_bucket": 64,
|
||||
"window_min": 5000,
|
||||
"wscale_in": {6, 7, 8, 9, 10, 11, 12, 13, 14},
|
||||
"has_sack": True,
|
||||
"has_timestamps": True,
|
||||
},
|
||||
"linux",
|
||||
),
|
||||
# ── OpenBSD default SYN ─────────────────────────────────────────────────
|
||||
# TTL 64, window 16384, WScale 3-6, MSS 1460
|
||||
(
|
||||
{
|
||||
"ttl_bucket": 64,
|
||||
"window_in": {16384, 16960},
|
||||
"wscale_in": {3, 4, 5, 6},
|
||||
},
|
||||
"openbsd",
|
||||
),
|
||||
# ── Windows 10/11/Server default SYN ────────────────────────────────────
|
||||
# TTL 128, window 64240/65535, MSS 1460, WScale 8, SACK+TS
|
||||
(
|
||||
{
|
||||
"ttl_bucket": 128,
|
||||
"window_min": 8192,
|
||||
"wscale_in": {2, 6, 7, 8},
|
||||
"has_sack": True,
|
||||
},
|
||||
"windows",
|
||||
),
|
||||
# ── Windows 7/XP (legacy) ───────────────────────────────────────────────
|
||||
(
|
||||
{
|
||||
"ttl_bucket": 128,
|
||||
"window_in": {8192, 16384, 65535},
|
||||
},
|
||||
"windows",
|
||||
),
|
||||
# ── Embedded / Cisco / network gear ─────────────────────────────────────
|
||||
(
|
||||
{
|
||||
"ttl_bucket": 255,
|
||||
},
|
||||
"embedded",
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def _match_signature(
|
||||
sig: dict,
|
||||
ttl: int,
|
||||
window: int,
|
||||
mss: int,
|
||||
wscale: int | None,
|
||||
options_sig: str,
|
||||
) -> bool:
|
||||
"""Evaluate every predicate in *sig* against the observed values."""
|
||||
tb = initial_ttl(ttl)
|
||||
if "ttl_bucket" in sig and sig["ttl_bucket"] != tb:
|
||||
return False
|
||||
if "window" in sig and sig["window"] != window:
|
||||
return False
|
||||
if "window_in" in sig and window not in sig["window_in"]:
|
||||
return False
|
||||
if "window_min" in sig and window < sig["window_min"]:
|
||||
return False
|
||||
if "mss" in sig and sig["mss"] != mss:
|
||||
return False
|
||||
if "wscale" in sig and sig["wscale"] != wscale:
|
||||
return False
|
||||
if "wscale_in" in sig and wscale not in sig["wscale_in"]:
|
||||
return False
|
||||
if "has_sack" in sig:
|
||||
if sig["has_sack"] != ("S" in options_sig):
|
||||
return False
|
||||
if "has_timestamps" in sig:
|
||||
if sig["has_timestamps"] != ("T" in options_sig):
|
||||
return False
|
||||
if "options" in sig and sig["options"] != options_sig:
|
||||
return False
|
||||
if "options_starts_with" in sig and not options_sig.startswith(sig["options_starts_with"]):
|
||||
return False
|
||||
if "options_ends_with" in sig and not options_sig.endswith(sig["options_ends_with"]):
|
||||
return False
|
||||
if "options_no_eol" in sig and sig["options_no_eol"] and "E" in options_sig:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
@_traced("sniffer.p0f_guess_os")
|
||||
def guess_os(
|
||||
ttl: int,
|
||||
window: int,
|
||||
mss: int = 0,
|
||||
wscale: int | None = None,
|
||||
options_sig: str = "",
|
||||
) -> str:
|
||||
"""
|
||||
Return a coarse OS bucket for the given SYN characteristics.
|
||||
|
||||
One of: "linux", "windows", "macos_ios", "freebsd", "openbsd",
|
||||
"embedded", "nmap", "unknown".
|
||||
"""
|
||||
for sig, label in _SIGNATURES:
|
||||
if _match_signature(sig, ttl, window, mss, wscale, options_sig):
|
||||
return label
|
||||
return "unknown"
|
||||
71
decnet/sniffer/syslog.py
Normal file
71
decnet/sniffer/syslog.py
Normal file
@@ -0,0 +1,71 @@
|
||||
"""
|
||||
RFC 5424 syslog formatting and log-file writing for the fleet sniffer.
|
||||
|
||||
Reuses the same wire format as templates/sniffer/decnet_logging.py so the
|
||||
existing collector parser and ingester can consume events without changes.
|
||||
"""
|
||||
|
||||
import json
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from decnet.collector.worker import parse_rfc5424
|
||||
from decnet.telemetry import traced as _traced
|
||||
|
||||
# ─── Constants (must match templates/sniffer/decnet_logging.py) ──────────────
|
||||
|
||||
_FACILITY_LOCAL0 = 16
|
||||
_SD_ID = "decnet@55555"
|
||||
_NILVALUE = "-"
|
||||
|
||||
SEVERITY_INFO = 6
|
||||
SEVERITY_WARNING = 4
|
||||
|
||||
_MAX_HOSTNAME = 255
|
||||
_MAX_APPNAME = 48
|
||||
_MAX_MSGID = 32
|
||||
|
||||
|
||||
# ─── Formatter ───────────────────────────────────────────────────────────────
|
||||
|
||||
def _sd_escape(value: str) -> str:
|
||||
return value.replace("\\", "\\\\").replace('"', '\\"').replace("]", "\\]")
|
||||
|
||||
|
||||
def _sd_element(fields: dict[str, Any]) -> str:
|
||||
if not fields:
|
||||
return _NILVALUE
|
||||
params = " ".join(f'{k}="{_sd_escape(str(v))}"' for k, v in fields.items())
|
||||
return f"[{_SD_ID} {params}]"
|
||||
|
||||
|
||||
def syslog_line(
|
||||
service: str,
|
||||
hostname: str,
|
||||
event_type: str,
|
||||
severity: int = SEVERITY_INFO,
|
||||
msg: str | None = None,
|
||||
**fields: Any,
|
||||
) -> str:
|
||||
pri = f"<{_FACILITY_LOCAL0 * 8 + severity}>"
|
||||
ts = datetime.now(timezone.utc).isoformat()
|
||||
host = (hostname or _NILVALUE)[:_MAX_HOSTNAME]
|
||||
appname = (service or _NILVALUE)[:_MAX_APPNAME]
|
||||
msgid = (event_type or _NILVALUE)[:_MAX_MSGID]
|
||||
sd = _sd_element(fields)
|
||||
message = f" {msg}" if msg else ""
|
||||
return f"{pri}1 {ts} {host} {appname} {_NILVALUE} {msgid} {sd}{message}"
|
||||
|
||||
|
||||
@_traced("sniffer.write_event")
|
||||
def write_event(line: str, log_path: Path, json_path: Path) -> None:
|
||||
"""Append a syslog line to the raw log and its parsed JSON to the json log."""
|
||||
with open(log_path, "a", encoding="utf-8") as lf:
|
||||
lf.write(line + "\n")
|
||||
lf.flush()
|
||||
parsed = parse_rfc5424(line)
|
||||
if parsed:
|
||||
with open(json_path, "a", encoding="utf-8") as jf:
|
||||
jf.write(json.dumps(parsed) + "\n")
|
||||
jf.flush()
|
||||
160
decnet/sniffer/worker.py
Normal file
160
decnet/sniffer/worker.py
Normal file
@@ -0,0 +1,160 @@
|
||||
"""
|
||||
Fleet-wide MACVLAN sniffer worker.
|
||||
|
||||
Runs as a single host-side async background task that sniffs all TLS
|
||||
traffic on the MACVLAN host interface. Maps packets to deckies by IP
|
||||
and feeds fingerprint events into the existing log pipeline.
|
||||
|
||||
Modeled on decnet.collector.worker — same lifecycle pattern.
|
||||
Fault-isolated: any exception is logged and the worker exits cleanly.
|
||||
The API never depends on this worker being alive.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
import subprocess # nosec B404 — needed for interface checks
|
||||
import threading
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from pathlib import Path
|
||||
|
||||
from decnet.logging import get_logger
|
||||
from decnet.network import HOST_MACVLAN_IFACE
|
||||
from decnet.sniffer.fingerprint import SnifferEngine
|
||||
from decnet.sniffer.syslog import write_event
|
||||
from decnet.telemetry import traced as _traced
|
||||
|
||||
logger = get_logger("sniffer")
|
||||
|
||||
_IP_MAP_REFRESH_INTERVAL: float = 60.0
|
||||
|
||||
|
||||
def _load_ip_to_decky() -> dict[str, str]:
|
||||
"""Build IP → decky-name mapping from decnet-state.json."""
|
||||
from decnet.config import load_state
|
||||
state = load_state()
|
||||
if state is None:
|
||||
return {}
|
||||
config, _ = state
|
||||
mapping: dict[str, str] = {}
|
||||
for decky in config.deckies:
|
||||
mapping[decky.ip] = decky.name
|
||||
return mapping
|
||||
|
||||
|
||||
def _interface_exists(iface: str) -> bool:
|
||||
"""Check if a network interface exists on this host."""
|
||||
try:
|
||||
result = subprocess.run( # nosec B603 B607 — hardcoded args
|
||||
["ip", "link", "show", iface],
|
||||
capture_output=True, text=True, check=False,
|
||||
)
|
||||
return result.returncode == 0
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
@_traced("sniffer.sniff_loop")
|
||||
def _sniff_loop(
|
||||
interface: str,
|
||||
log_path: Path,
|
||||
json_path: Path,
|
||||
stop_event: threading.Event,
|
||||
) -> None:
|
||||
"""Blocking sniff loop. Runs in a dedicated thread via asyncio.to_thread."""
|
||||
try:
|
||||
from scapy.sendrecv import sniff
|
||||
except ImportError:
|
||||
logger.error("scapy not installed — sniffer cannot start")
|
||||
return
|
||||
|
||||
ip_map = _load_ip_to_decky()
|
||||
if not ip_map:
|
||||
logger.warning("sniffer: no deckies in state — nothing to sniff")
|
||||
return
|
||||
|
||||
def _write_fn(line: str) -> None:
|
||||
write_event(line, log_path, json_path)
|
||||
|
||||
engine = SnifferEngine(ip_to_decky=ip_map, write_fn=_write_fn)
|
||||
|
||||
# Periodically refresh IP map in a background daemon thread
|
||||
def _refresh_loop() -> None:
|
||||
while not stop_event.is_set():
|
||||
stop_event.wait(_IP_MAP_REFRESH_INTERVAL)
|
||||
if stop_event.is_set():
|
||||
break
|
||||
try:
|
||||
new_map = _load_ip_to_decky()
|
||||
if new_map:
|
||||
engine.update_ip_map(new_map)
|
||||
except Exception as exc:
|
||||
logger.debug("sniffer: ip map refresh failed: %s", exc)
|
||||
|
||||
refresh_thread = threading.Thread(target=_refresh_loop, daemon=True)
|
||||
refresh_thread.start()
|
||||
|
||||
logger.info("sniffer: sniffing on interface=%s deckies=%d", interface, len(ip_map))
|
||||
|
||||
try:
|
||||
sniff(
|
||||
iface=interface,
|
||||
filter="tcp",
|
||||
prn=engine.on_packet,
|
||||
store=False,
|
||||
stop_filter=lambda pkt: stop_event.is_set(),
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.error("sniffer: scapy sniff exited: %s", exc)
|
||||
finally:
|
||||
stop_event.set()
|
||||
logger.info("sniffer: sniff loop ended")
|
||||
|
||||
|
||||
@_traced("sniffer.worker")
|
||||
async def sniffer_worker(log_file: str) -> None:
|
||||
"""
|
||||
Async entry point — started as asyncio.create_task in the API lifespan.
|
||||
|
||||
Fully fault-isolated: catches all exceptions, logs them, and returns
|
||||
cleanly. The API continues running regardless of sniffer state.
|
||||
"""
|
||||
try:
|
||||
interface = os.environ.get("DECNET_SNIFFER_IFACE", HOST_MACVLAN_IFACE)
|
||||
|
||||
if not _interface_exists(interface):
|
||||
logger.warning(
|
||||
"sniffer: interface %s not found — sniffer disabled "
|
||||
"(fleet may not be deployed yet)", interface,
|
||||
)
|
||||
return
|
||||
|
||||
log_path = Path(log_file)
|
||||
json_path = log_path.with_suffix(".json")
|
||||
log_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
stop_event = threading.Event()
|
||||
|
||||
# Dedicated thread pool so the long-running sniff loop doesn't
|
||||
# occupy a slot in the default asyncio executor.
|
||||
sniffer_pool = ThreadPoolExecutor(
|
||||
max_workers=2, thread_name_prefix="decnet-sniffer",
|
||||
)
|
||||
|
||||
try:
|
||||
loop = asyncio.get_running_loop()
|
||||
await loop.run_in_executor(
|
||||
sniffer_pool, _sniff_loop,
|
||||
interface, log_path, json_path, stop_event,
|
||||
)
|
||||
except asyncio.CancelledError:
|
||||
logger.info("sniffer: shutdown requested")
|
||||
stop_event.set()
|
||||
sniffer_pool.shutdown(wait=False)
|
||||
raise
|
||||
finally:
|
||||
sniffer_pool.shutdown(wait=False)
|
||||
|
||||
except asyncio.CancelledError:
|
||||
raise
|
||||
except Exception as exc:
|
||||
logger.error("sniffer: worker failed — API continues without sniffing: %s", exc)
|
||||
308
decnet/telemetry.py
Normal file
308
decnet/telemetry.py
Normal file
@@ -0,0 +1,308 @@
|
||||
"""
|
||||
DECNET OpenTelemetry tracing integration.
|
||||
|
||||
Controlled entirely by ``DECNET_DEVELOPER_TRACING``. When disabled (the
|
||||
default), every public export is a zero-cost no-op: no OTEL SDK imports, no
|
||||
monkey-patching, no middleware, and ``@traced`` returns the original function
|
||||
object unwrapped.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import functools
|
||||
import inspect
|
||||
from typing import Any, Callable, TypeVar, overload
|
||||
|
||||
from decnet.env import DECNET_DEVELOPER_TRACING, DECNET_OTEL_ENDPOINT
|
||||
from decnet.logging import get_logger
|
||||
|
||||
log = get_logger("api")
|
||||
|
||||
F = TypeVar("F", bound=Callable[..., Any])
|
||||
|
||||
_ENABLED: bool = DECNET_DEVELOPER_TRACING
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Lazy OTEL imports — only when tracing is enabled
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_tracer_provider: Any = None # TracerProvider | None
|
||||
|
||||
|
||||
def _init_provider() -> None:
|
||||
"""Initialise the global TracerProvider (called once from setup_tracing)."""
|
||||
global _tracer_provider
|
||||
|
||||
from opentelemetry import trace
|
||||
from opentelemetry.sdk.trace import TracerProvider
|
||||
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
||||
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
|
||||
from opentelemetry.sdk.resources import Resource
|
||||
|
||||
resource = Resource.create({
|
||||
"service.name": "decnet",
|
||||
"service.version": "0.2.0",
|
||||
})
|
||||
_tracer_provider = TracerProvider(resource=resource)
|
||||
exporter = OTLPSpanExporter(endpoint=DECNET_OTEL_ENDPOINT, insecure=True)
|
||||
_tracer_provider.add_span_processor(BatchSpanProcessor(exporter))
|
||||
trace.set_tracer_provider(_tracer_provider)
|
||||
log.info("OTEL tracing enabled endpoint=%s", DECNET_OTEL_ENDPOINT)
|
||||
|
||||
|
||||
def setup_tracing(app: Any) -> None:
|
||||
"""Configure the OTEL TracerProvider and instrument FastAPI.
|
||||
|
||||
Call once from the FastAPI lifespan, after DB init. No-op when
|
||||
``DECNET_DEVELOPER_TRACING`` is not ``"true"``.
|
||||
"""
|
||||
if not _ENABLED:
|
||||
return
|
||||
|
||||
try:
|
||||
_init_provider()
|
||||
from opentelemetry.instrumentation.fastapi import FastAPIInstrumentor
|
||||
FastAPIInstrumentor.instrument_app(app)
|
||||
from decnet.logging import enable_trace_context
|
||||
enable_trace_context()
|
||||
log.info("FastAPI auto-instrumentation active, log-trace correlation enabled")
|
||||
except Exception as exc:
|
||||
log.warning("OTEL setup failed — continuing without tracing: %s", exc)
|
||||
|
||||
|
||||
def shutdown_tracing() -> None:
|
||||
"""Flush and shut down the tracer provider. Safe to call when disabled."""
|
||||
if _tracer_provider is not None:
|
||||
try:
|
||||
_tracer_provider.shutdown()
|
||||
except Exception: # nosec B110 — best-effort tracer shutdown
|
||||
pass
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# get_tracer — mirrors get_logger(component) pattern
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class _NoOpSpan:
|
||||
"""Minimal stand-in so ``with get_tracer(...).start_as_current_span(...)``
|
||||
works when tracing is disabled."""
|
||||
|
||||
def set_attribute(self, key: str, value: Any) -> None:
|
||||
pass
|
||||
|
||||
def set_status(self, *args: Any, **kwargs: Any) -> None:
|
||||
pass
|
||||
|
||||
def record_exception(self, exc: BaseException) -> None:
|
||||
pass
|
||||
|
||||
def __enter__(self) -> "_NoOpSpan":
|
||||
return self
|
||||
|
||||
def __exit__(self, *args: Any) -> None:
|
||||
pass
|
||||
|
||||
|
||||
class _NoOpTracer:
|
||||
"""Returned by ``get_tracer()`` when tracing is disabled."""
|
||||
|
||||
def start_as_current_span(self, name: str, **kwargs: Any) -> _NoOpSpan:
|
||||
return _NoOpSpan()
|
||||
|
||||
def start_span(self, name: str, **kwargs: Any) -> _NoOpSpan:
|
||||
return _NoOpSpan()
|
||||
|
||||
|
||||
_tracers: dict[str, Any] = {}
|
||||
|
||||
|
||||
def get_tracer(component: str) -> Any:
|
||||
"""Return an OTEL Tracer (or a no-op stand-in) for *component*."""
|
||||
if not _ENABLED:
|
||||
return _NoOpTracer()
|
||||
|
||||
if component not in _tracers:
|
||||
from opentelemetry import trace
|
||||
_tracers[component] = trace.get_tracer(f"decnet.{component}")
|
||||
return _tracers[component]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# @traced decorator — async + sync, zero overhead when disabled
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@overload
|
||||
def traced(fn: F) -> F: ...
|
||||
@overload
|
||||
def traced(name: str) -> Callable[[F], F]: ...
|
||||
|
||||
|
||||
def traced(fn: Any = None, *, name: str | None = None) -> Any:
|
||||
"""Decorator that wraps a function in an OTEL span.
|
||||
|
||||
Usage::
|
||||
|
||||
@traced # span name = "module.func"
|
||||
async def my_worker(): ...
|
||||
|
||||
@traced("custom.span.name") # explicit span name
|
||||
def my_sync_func(): ...
|
||||
|
||||
When ``DECNET_DEVELOPER_TRACING`` is disabled the original function is
|
||||
returned **unwrapped** — zero overhead on every call.
|
||||
"""
|
||||
# Handle @traced("name") vs @traced vs @traced(name="name")
|
||||
if fn is None and name is not None:
|
||||
# Called as @traced("name") or @traced(name="name")
|
||||
def decorator(f: F) -> F:
|
||||
return _wrap(f, name)
|
||||
return decorator
|
||||
if fn is not None and isinstance(fn, str):
|
||||
# Called as @traced("name") — fn is actually the name string
|
||||
span_name = fn
|
||||
def decorator(f: F) -> F:
|
||||
return _wrap(f, span_name)
|
||||
return decorator
|
||||
if fn is not None and callable(fn):
|
||||
# Called as @traced (no arguments)
|
||||
return _wrap(fn, None)
|
||||
# Fallback: @traced() with no args
|
||||
def decorator(f: F) -> F:
|
||||
return _wrap(f, name)
|
||||
return decorator
|
||||
|
||||
|
||||
def _wrap(fn: F, span_name: str | None) -> F:
|
||||
"""Wrap *fn* in a span. Returns *fn* unchanged when tracing is off."""
|
||||
if not _ENABLED:
|
||||
return fn
|
||||
|
||||
resolved_name = span_name or f"{fn.__module__.rsplit('.', 1)[-1]}.{fn.__qualname__}"
|
||||
|
||||
if inspect.iscoroutinefunction(fn):
|
||||
@functools.wraps(fn)
|
||||
async def async_wrapper(*args: Any, **kwargs: Any) -> Any:
|
||||
tracer = get_tracer(fn.__module__.split(".")[-1])
|
||||
with tracer.start_as_current_span(resolved_name) as span:
|
||||
try:
|
||||
result = await fn(*args, **kwargs)
|
||||
return result
|
||||
except Exception as exc:
|
||||
span.record_exception(exc)
|
||||
raise
|
||||
return async_wrapper # type: ignore[return-value]
|
||||
else:
|
||||
@functools.wraps(fn)
|
||||
def sync_wrapper(*args: Any, **kwargs: Any) -> Any:
|
||||
tracer = get_tracer(fn.__module__.split(".")[-1])
|
||||
with tracer.start_as_current_span(resolved_name) as span:
|
||||
try:
|
||||
result = fn(*args, **kwargs)
|
||||
return result
|
||||
except Exception as exc:
|
||||
span.record_exception(exc)
|
||||
raise
|
||||
return sync_wrapper # type: ignore[return-value]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# TracedRepository — proxy wrapper for BaseRepository
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def wrap_repository(repo: Any) -> Any:
|
||||
"""Wrap *repo* in a dynamic tracing proxy. Returns *repo* unchanged when disabled.
|
||||
|
||||
Instead of mirroring every method signature (which drifts when concrete
|
||||
repos add extra kwargs beyond the ABC), this proxy introspects the inner
|
||||
repo at construction time and wraps every public async method in a span
|
||||
via ``__getattr__``. Sync attributes are forwarded directly.
|
||||
"""
|
||||
if not _ENABLED:
|
||||
return repo
|
||||
|
||||
tracer = get_tracer("db")
|
||||
|
||||
class TracedRepository:
|
||||
"""Dynamic proxy — wraps every async method call in a DB span."""
|
||||
|
||||
def __init__(self, inner: Any) -> None:
|
||||
self._inner = inner
|
||||
|
||||
def __getattr__(self, name: str) -> Any:
|
||||
attr = getattr(self._inner, name)
|
||||
|
||||
if asyncio.iscoroutinefunction(attr):
|
||||
@functools.wraps(attr)
|
||||
async def _traced_method(*args: Any, **kwargs: Any) -> Any:
|
||||
with tracer.start_as_current_span(f"db.{name}") as span:
|
||||
try:
|
||||
return await attr(*args, **kwargs)
|
||||
except Exception as exc:
|
||||
span.record_exception(exc)
|
||||
raise
|
||||
return _traced_method
|
||||
|
||||
return attr
|
||||
|
||||
return TracedRepository(repo)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Cross-stage trace context propagation
|
||||
# ---------------------------------------------------------------------------
|
||||
# The DECNET pipeline is decoupled via JSON files:
|
||||
# collector -> .json file -> ingester -> DB -> profiler
|
||||
#
|
||||
# To show the full journey of an event in Jaeger, we embed W3C trace context
|
||||
# into the JSON records. The collector injects it; the ingester extracts it
|
||||
# and continues the trace as a child span.
|
||||
|
||||
def inject_context(record: dict[str, Any]) -> None:
|
||||
"""Inject current OTEL trace context into *record* under ``_trace``.
|
||||
|
||||
No-op when tracing is disabled. The ``_trace`` key is stripped by the
|
||||
ingester after extraction — it never reaches the DB.
|
||||
"""
|
||||
if not _ENABLED:
|
||||
return
|
||||
try:
|
||||
from opentelemetry.propagate import inject
|
||||
carrier: dict[str, str] = {}
|
||||
inject(carrier)
|
||||
if carrier:
|
||||
record["_trace"] = carrier
|
||||
except Exception: # nosec B110 — trace injection is optional
|
||||
pass
|
||||
|
||||
|
||||
def extract_context(record: dict[str, Any]) -> Any:
|
||||
"""Extract OTEL trace context from *record* and return it.
|
||||
|
||||
Returns ``None`` when tracing is disabled or no context is present.
|
||||
Removes the ``_trace`` key from the record so it doesn't leak into the DB.
|
||||
"""
|
||||
if not _ENABLED:
|
||||
record.pop("_trace", None)
|
||||
return None
|
||||
try:
|
||||
carrier = record.pop("_trace", None)
|
||||
if not carrier:
|
||||
return None
|
||||
from opentelemetry.propagate import extract
|
||||
return extract(carrier)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def start_span_with_context(tracer: Any, name: str, context: Any = None) -> Any:
|
||||
"""Start a span, optionally as a child of an extracted context.
|
||||
|
||||
Returns a context manager span. When *context* is ``None``, creates a
|
||||
root span (normal behavior).
|
||||
"""
|
||||
if not _ENABLED:
|
||||
return _NoOpSpan()
|
||||
if context is not None:
|
||||
return tracer.start_as_current_span(name, context=context)
|
||||
return tracer.start_as_current_span(name)
|
||||
214
decnet/web/api.py
Normal file
214
decnet/web/api.py
Normal file
@@ -0,0 +1,214 @@
|
||||
import asyncio
|
||||
import os
|
||||
from contextlib import asynccontextmanager
|
||||
from typing import Any, AsyncGenerator, Optional
|
||||
|
||||
from fastapi import FastAPI, Request, status
|
||||
from fastapi.exceptions import RequestValidationError
|
||||
from fastapi.responses import JSONResponse
|
||||
from pydantic import ValidationError
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
|
||||
from decnet.env import DECNET_CORS_ORIGINS, DECNET_DEVELOPER, DECNET_EMBED_PROFILER, DECNET_INGEST_LOG_FILE
|
||||
from decnet.logging import get_logger
|
||||
from decnet.web.dependencies import repo
|
||||
from decnet.collector import log_collector_worker
|
||||
from decnet.web.ingester import log_ingestion_worker
|
||||
from decnet.profiler import attacker_profile_worker
|
||||
from decnet.web.router import api_router
|
||||
|
||||
log = get_logger("api")
|
||||
ingestion_task: Optional[asyncio.Task[Any]] = None
|
||||
collector_task: Optional[asyncio.Task[Any]] = None
|
||||
attacker_task: Optional[asyncio.Task[Any]] = None
|
||||
sniffer_task: Optional[asyncio.Task[Any]] = None
|
||||
|
||||
|
||||
def get_background_tasks() -> dict[str, Optional[asyncio.Task[Any]]]:
|
||||
"""Expose background task handles for the health endpoint."""
|
||||
return {
|
||||
"ingestion_worker": ingestion_task,
|
||||
"collector_worker": collector_task,
|
||||
"attacker_worker": attacker_task,
|
||||
"sniffer_worker": sniffer_task,
|
||||
}
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]:
|
||||
global ingestion_task, collector_task, attacker_task, sniffer_task
|
||||
|
||||
log.info("API startup initialising database")
|
||||
for attempt in range(1, 6):
|
||||
try:
|
||||
await repo.initialize()
|
||||
log.debug("API startup DB initialised attempt=%d", attempt)
|
||||
break
|
||||
except Exception as exc:
|
||||
log.warning("DB init attempt %d/5 failed: %s", attempt, exc)
|
||||
if attempt == 5:
|
||||
log.error("DB failed to initialize after 5 attempts — startup may be degraded")
|
||||
await asyncio.sleep(0.5)
|
||||
|
||||
# Conditionally enable OpenTelemetry tracing
|
||||
from decnet.telemetry import setup_tracing
|
||||
setup_tracing(app)
|
||||
|
||||
# Start background tasks only if not in contract test mode
|
||||
if os.environ.get("DECNET_CONTRACT_TEST") != "true":
|
||||
# Start background ingestion task
|
||||
if ingestion_task is None or ingestion_task.done():
|
||||
ingestion_task = asyncio.create_task(log_ingestion_worker(repo))
|
||||
log.debug("API startup ingest worker started")
|
||||
|
||||
# Start Docker log collector (writes to log file; ingester reads from it)
|
||||
_log_file = os.environ.get("DECNET_INGEST_LOG_FILE", DECNET_INGEST_LOG_FILE)
|
||||
if _log_file and (collector_task is None or collector_task.done()):
|
||||
collector_task = asyncio.create_task(log_collector_worker(_log_file))
|
||||
log.debug("API startup collector worker started log_file=%s", _log_file)
|
||||
elif not _log_file:
|
||||
log.warning("DECNET_INGEST_LOG_FILE not set — Docker log collection disabled.")
|
||||
|
||||
# Start attacker profile rebuild worker only when explicitly requested.
|
||||
# Default is OFF because `decnet deploy` always starts a standalone
|
||||
# `decnet profiler --daemon` process. Running both against the same
|
||||
# DB cursor causes events to be skipped or double-processed.
|
||||
if DECNET_EMBED_PROFILER:
|
||||
if attacker_task is None or attacker_task.done():
|
||||
attacker_task = asyncio.create_task(attacker_profile_worker(repo))
|
||||
log.info("API startup: embedded profiler started (DECNET_EMBED_PROFILER=true)")
|
||||
else:
|
||||
log.debug("API startup: profiler not embedded — expecting standalone daemon")
|
||||
|
||||
# Start fleet-wide MACVLAN sniffer (fault-isolated — never crashes the API)
|
||||
try:
|
||||
from decnet.sniffer import sniffer_worker
|
||||
if sniffer_task is None or sniffer_task.done():
|
||||
sniffer_task = asyncio.create_task(sniffer_worker(_log_file))
|
||||
log.debug("API startup sniffer worker started")
|
||||
except Exception as exc:
|
||||
log.warning("Sniffer worker failed to start — API continues without sniffing: %s", exc)
|
||||
else:
|
||||
log.info("Contract Test Mode: skipping background worker startup")
|
||||
|
||||
yield
|
||||
|
||||
log.info("API shutdown cancelling background tasks")
|
||||
for task in (ingestion_task, collector_task, attacker_task, sniffer_task):
|
||||
if task and not task.done():
|
||||
task.cancel()
|
||||
try:
|
||||
await task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
except Exception as exc:
|
||||
log.warning("Task shutdown error: %s", exc)
|
||||
from decnet.telemetry import shutdown_tracing
|
||||
shutdown_tracing()
|
||||
log.info("API shutdown complete")
|
||||
|
||||
|
||||
app: FastAPI = FastAPI(
|
||||
title="DECNET Web Dashboard API",
|
||||
version="1.0.0",
|
||||
lifespan=lifespan,
|
||||
docs_url="/docs" if DECNET_DEVELOPER else None,
|
||||
redoc_url="/redoc" if DECNET_DEVELOPER else None,
|
||||
openapi_url="/openapi.json" if DECNET_DEVELOPER else None
|
||||
)
|
||||
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=DECNET_CORS_ORIGINS,
|
||||
allow_credentials=False,
|
||||
allow_methods=["GET", "POST", "PUT", "DELETE", "OPTIONS"],
|
||||
allow_headers=["Authorization", "Content-Type", "Last-Event-ID"],
|
||||
)
|
||||
|
||||
# Include the modular API router
|
||||
app.include_router(api_router, prefix="/api/v1")
|
||||
|
||||
|
||||
@app.exception_handler(RequestValidationError)
|
||||
async def validation_exception_handler(request: Request, exc: RequestValidationError) -> JSONResponse:
|
||||
"""
|
||||
Handle validation errors with targeted status codes to satisfy contract tests.
|
||||
Tiered Prioritization:
|
||||
1. 400 Bad Request: For structural schema violations (extra fields, wrong types, missing fields).
|
||||
This satisfies Schemathesis 'Negative Data' checks.
|
||||
2. 409 Conflict: For semantic/structural INI content violations in valid strings.
|
||||
This satisfies Schemathesis 'Positive Data' checks.
|
||||
3. 422 Unprocessable: Default for other validation edge cases.
|
||||
"""
|
||||
errors = exc.errors()
|
||||
|
||||
# 1. Prioritize Structural Format Violations (Negative Data)
|
||||
# This catches: sending an object instead of a string, extra unknown properties, or empty-string length violations.
|
||||
is_structural_violation = any(
|
||||
err.get("type") in ("type_error", "extra_forbidden", "missing", "string_too_short", "string_type") or
|
||||
"must be a string" in err.get("msg", "") # Catch our validator's type check
|
||||
for err in errors
|
||||
)
|
||||
if is_structural_violation:
|
||||
return JSONResponse(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
content={"detail": "Bad Request: Schema structural violation (wrong type, extra fields, or invalid length)."},
|
||||
)
|
||||
|
||||
# 2. Targeted INI Error Rejections
|
||||
# We distinguishes between different failure modes for precise contract compliance.
|
||||
|
||||
# Empty INI content (Valid string but semantically empty)
|
||||
is_ini_empty = any("INI content is empty" in err.get("msg", "") for err in errors)
|
||||
if is_ini_empty:
|
||||
return JSONResponse(
|
||||
status_code=status.HTTP_409_CONFLICT,
|
||||
content={"detail": "Configuration conflict: INI content is empty."},
|
||||
)
|
||||
|
||||
# Invalid characters/syntax (Valid-length string but invalid INI syntax)
|
||||
# Mapping to 409 for Positive Data compliance.
|
||||
is_invalid_characters = any("Invalid INI format" in err.get("msg", "") for err in errors)
|
||||
if is_invalid_characters:
|
||||
return JSONResponse(
|
||||
status_code=status.HTTP_409_CONFLICT,
|
||||
content={"detail": "Configuration conflict: INI syntax or characters are invalid."},
|
||||
)
|
||||
|
||||
# Logical invalidity (Valid string, valid syntax, but missing required DECNET logic like sections)
|
||||
is_ini_invalid_logic = any("at least one section" in err.get("msg", "") for err in errors)
|
||||
if is_ini_invalid_logic:
|
||||
return JSONResponse(
|
||||
status_code=status.HTTP_409_CONFLICT,
|
||||
content={"detail": "Invalid INI config structure: No decky sections found."},
|
||||
)
|
||||
|
||||
# Developer Mode fallback
|
||||
if DECNET_DEVELOPER:
|
||||
from fastapi.exception_handlers import request_validation_exception_handler
|
||||
return await request_validation_exception_handler(request, exc)
|
||||
|
||||
# Production/Strict mode fallback: Sanitize remaining 422s
|
||||
message = "Invalid request parameters"
|
||||
if "/deckies/deploy" in request.url.path:
|
||||
message = "Invalid INI config"
|
||||
|
||||
return JSONResponse(
|
||||
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
|
||||
content={"detail": message},
|
||||
)
|
||||
|
||||
@app.exception_handler(ValidationError)
|
||||
async def pydantic_validation_exception_handler(request: Request, exc: ValidationError) -> JSONResponse:
|
||||
"""
|
||||
Handle Pydantic errors that occur during manual model instantiation (e.g. state hydration).
|
||||
Prevents 500 errors when the database contains inconsistent or outdated schema data.
|
||||
"""
|
||||
log.error("Internal Pydantic validation error: %s", exc)
|
||||
return JSONResponse(
|
||||
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
|
||||
content={
|
||||
"detail": "Internal data consistency error",
|
||||
"type": "internal_validation_error"
|
||||
},
|
||||
)
|
||||
38
decnet/web/auth.py
Normal file
38
decnet/web/auth.py
Normal file
@@ -0,0 +1,38 @@
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import Optional, Any
|
||||
import jwt
|
||||
import bcrypt
|
||||
|
||||
from decnet.env import DECNET_JWT_SECRET
|
||||
|
||||
SECRET_KEY: str = DECNET_JWT_SECRET
|
||||
ALGORITHM: str = "HS256"
|
||||
ACCESS_TOKEN_EXPIRE_MINUTES: int = 1440
|
||||
|
||||
|
||||
def verify_password(plain_password: str, hashed_password: str) -> bool:
|
||||
return bcrypt.checkpw(
|
||||
plain_password.encode("utf-8")[:72],
|
||||
hashed_password.encode("utf-8")
|
||||
)
|
||||
|
||||
|
||||
def get_password_hash(password: str) -> str:
|
||||
# Use a cost factor of 12 (default for passlib/bcrypt)
|
||||
_salt: bytes = bcrypt.gensalt(rounds=12)
|
||||
_hashed: bytes = bcrypt.hashpw(password.encode("utf-8")[:72], _salt)
|
||||
return _hashed.decode("utf-8")
|
||||
|
||||
|
||||
def create_access_token(data: dict[str, Any], expires_delta: Optional[timedelta] = None) -> str:
|
||||
_to_encode: dict[str, Any] = data.copy()
|
||||
_expire: datetime
|
||||
if expires_delta:
|
||||
_expire = datetime.now(timezone.utc) + expires_delta
|
||||
else:
|
||||
_expire = datetime.now(timezone.utc) + timedelta(minutes=15)
|
||||
|
||||
_to_encode.update({"exp": _expire})
|
||||
_to_encode.update({"iat": datetime.now(timezone.utc)})
|
||||
_encoded_jwt: str = jwt.encode(_to_encode, SECRET_KEY, algorithm=ALGORITHM)
|
||||
return _encoded_jwt
|
||||
33
decnet/web/db/factory.py
Normal file
33
decnet/web/db/factory.py
Normal file
@@ -0,0 +1,33 @@
|
||||
"""
|
||||
Repository factory — selects a :class:`BaseRepository` implementation based on
|
||||
``DECNET_DB_TYPE`` (``sqlite`` or ``mysql``).
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
from decnet.web.db.repository import BaseRepository
|
||||
|
||||
|
||||
def get_repository(**kwargs: Any) -> BaseRepository:
|
||||
"""Instantiate the repository implementation selected by ``DECNET_DB_TYPE``.
|
||||
|
||||
Keyword arguments are forwarded to the concrete implementation:
|
||||
|
||||
* SQLite accepts ``db_path``.
|
||||
* MySQL accepts ``url`` and engine tuning knobs (``pool_size``, …).
|
||||
"""
|
||||
db_type = os.environ.get("DECNET_DB_TYPE", "sqlite").lower()
|
||||
|
||||
if db_type == "sqlite":
|
||||
from decnet.web.db.sqlite.repository import SQLiteRepository
|
||||
repo = SQLiteRepository(**kwargs)
|
||||
elif db_type == "mysql":
|
||||
from decnet.web.db.mysql.repository import MySQLRepository
|
||||
repo = MySQLRepository(**kwargs)
|
||||
else:
|
||||
raise ValueError(f"Unsupported database type: {db_type}")
|
||||
|
||||
from decnet.telemetry import wrap_repository
|
||||
return wrap_repository(repo)
|
||||
233
decnet/web/db/models.py
Normal file
233
decnet/web/db/models.py
Normal file
@@ -0,0 +1,233 @@
|
||||
from datetime import datetime, timezone
|
||||
from typing import Literal, Optional, Any, List, Annotated
|
||||
from sqlalchemy import Column, Text
|
||||
from sqlalchemy.dialects.mysql import MEDIUMTEXT
|
||||
from sqlmodel import SQLModel, Field
|
||||
from pydantic import BaseModel, ConfigDict, Field as PydanticField, BeforeValidator
|
||||
from decnet.models import IniContent
|
||||
|
||||
# Use on columns that accumulate over an attacker's lifetime (commands,
|
||||
# fingerprints, state blobs). TEXT on MySQL caps at 64 KiB; MEDIUMTEXT
|
||||
# stretches to 16 MiB. SQLite has no fixed-width text types so Text()
|
||||
# stays unchanged there.
|
||||
_BIG_TEXT = Text().with_variant(MEDIUMTEXT(), "mysql")
|
||||
|
||||
def _normalize_null(v: Any) -> Any:
|
||||
if isinstance(v, str) and v.lower() in ("null", "undefined", ""):
|
||||
return None
|
||||
return v
|
||||
|
||||
NullableDatetime = Annotated[Optional[datetime], BeforeValidator(_normalize_null)]
|
||||
NullableString = Annotated[Optional[str], BeforeValidator(_normalize_null)]
|
||||
|
||||
# --- Database Tables (SQLModel) ---
|
||||
|
||||
class User(SQLModel, table=True):
|
||||
__tablename__ = "users"
|
||||
uuid: str = Field(primary_key=True)
|
||||
username: str = Field(index=True, unique=True)
|
||||
password_hash: str
|
||||
role: str = Field(default="viewer")
|
||||
must_change_password: bool = Field(default=False)
|
||||
|
||||
class Log(SQLModel, table=True):
|
||||
__tablename__ = "logs"
|
||||
id: Optional[int] = Field(default=None, primary_key=True)
|
||||
timestamp: datetime = Field(default_factory=lambda: datetime.now(timezone.utc), index=True)
|
||||
decky: str = Field(index=True)
|
||||
service: str = Field(index=True)
|
||||
event_type: str = Field(index=True)
|
||||
attacker_ip: str = Field(index=True)
|
||||
# Long-text columns — use TEXT so MySQL DDL doesn't truncate to VARCHAR(255).
|
||||
# TEXT is equivalent to plain text in SQLite.
|
||||
raw_line: str = Field(sa_column=Column("raw_line", Text, nullable=False))
|
||||
fields: str = Field(sa_column=Column("fields", Text, nullable=False))
|
||||
msg: Optional[str] = Field(default=None, sa_column=Column("msg", Text, nullable=True))
|
||||
# OTEL trace context — bridges the collector→ingester trace to the SSE
|
||||
# read path. Nullable so pre-existing rows and non-traced deployments
|
||||
# are unaffected.
|
||||
trace_id: Optional[str] = Field(default=None)
|
||||
span_id: Optional[str] = Field(default=None)
|
||||
|
||||
class Bounty(SQLModel, table=True):
|
||||
__tablename__ = "bounty"
|
||||
id: Optional[int] = Field(default=None, primary_key=True)
|
||||
timestamp: datetime = Field(default_factory=lambda: datetime.now(timezone.utc), index=True)
|
||||
decky: str = Field(index=True)
|
||||
service: str = Field(index=True)
|
||||
attacker_ip: str = Field(index=True)
|
||||
bounty_type: str = Field(index=True)
|
||||
payload: str = Field(sa_column=Column("payload", Text, nullable=False))
|
||||
|
||||
|
||||
class State(SQLModel, table=True):
|
||||
__tablename__ = "state"
|
||||
key: str = Field(primary_key=True)
|
||||
# JSON-serialized DecnetConfig or other state blobs — can be large as
|
||||
# deckies/services accumulate. MEDIUMTEXT on MySQL (16 MiB ceiling).
|
||||
value: str = Field(sa_column=Column("value", _BIG_TEXT, nullable=False))
|
||||
|
||||
|
||||
class Attacker(SQLModel, table=True):
|
||||
__tablename__ = "attackers"
|
||||
uuid: str = Field(primary_key=True)
|
||||
ip: str = Field(index=True)
|
||||
first_seen: datetime = Field(index=True)
|
||||
last_seen: datetime = Field(index=True)
|
||||
event_count: int = Field(default=0)
|
||||
service_count: int = Field(default=0)
|
||||
decky_count: int = Field(default=0)
|
||||
# JSON blobs — these grow over the attacker's lifetime. Use MEDIUMTEXT on
|
||||
# MySQL (16 MiB) for the fields that accumulate (fingerprints, commands,
|
||||
# and the deckies/services lists that are unbounded in principle).
|
||||
services: str = Field(
|
||||
default="[]", sa_column=Column("services", _BIG_TEXT, nullable=False, default="[]")
|
||||
) # JSON list[str]
|
||||
deckies: str = Field(
|
||||
default="[]", sa_column=Column("deckies", _BIG_TEXT, nullable=False, default="[]")
|
||||
) # JSON list[str], first-contact ordered
|
||||
traversal_path: Optional[str] = Field(
|
||||
default=None, sa_column=Column("traversal_path", Text, nullable=True)
|
||||
) # "decky-01 → decky-03 → decky-05"
|
||||
is_traversal: bool = Field(default=False)
|
||||
bounty_count: int = Field(default=0)
|
||||
credential_count: int = Field(default=0)
|
||||
fingerprints: str = Field(
|
||||
default="[]", sa_column=Column("fingerprints", _BIG_TEXT, nullable=False, default="[]")
|
||||
) # JSON list[dict] — bounty fingerprints
|
||||
commands: str = Field(
|
||||
default="[]", sa_column=Column("commands", _BIG_TEXT, nullable=False, default="[]")
|
||||
) # JSON list[dict] — commands per service/decky
|
||||
updated_at: datetime = Field(
|
||||
default_factory=lambda: datetime.now(timezone.utc), index=True
|
||||
)
|
||||
|
||||
|
||||
class AttackerBehavior(SQLModel, table=True):
|
||||
"""
|
||||
Timing & behavioral profile for an attacker, joined to Attacker by uuid.
|
||||
|
||||
Kept in a separate table so the core Attacker row stays narrow and
|
||||
behavior data can be updated independently (e.g. as the sniffer observes
|
||||
more packets) without touching the event-count aggregates.
|
||||
"""
|
||||
__tablename__ = "attacker_behavior"
|
||||
attacker_uuid: str = Field(primary_key=True, foreign_key="attackers.uuid")
|
||||
# OS / TCP stack fingerprint (rolled up from sniffer events)
|
||||
os_guess: Optional[str] = None
|
||||
hop_distance: Optional[int] = None
|
||||
tcp_fingerprint: str = Field(
|
||||
default="{}",
|
||||
sa_column=Column("tcp_fingerprint", Text, nullable=False, default="{}"),
|
||||
) # JSON: window, wscale, mss, options_sig
|
||||
retransmit_count: int = Field(default=0)
|
||||
# Behavioral (derived by the profiler from log-event timing)
|
||||
behavior_class: Optional[str] = None # beaconing | interactive | scanning | brute_force | slow_scan | mixed | unknown
|
||||
beacon_interval_s: Optional[float] = None
|
||||
beacon_jitter_pct: Optional[float] = None
|
||||
tool_guesses: Optional[str] = None # JSON list[str] — all matched tools
|
||||
timing_stats: str = Field(
|
||||
default="{}",
|
||||
sa_column=Column("timing_stats", Text, nullable=False, default="{}"),
|
||||
) # JSON: mean/median/stdev/min/max IAT
|
||||
phase_sequence: str = Field(
|
||||
default="{}",
|
||||
sa_column=Column("phase_sequence", Text, nullable=False, default="{}"),
|
||||
) # JSON: recon_end/exfil_start/latency
|
||||
updated_at: datetime = Field(
|
||||
default_factory=lambda: datetime.now(timezone.utc), index=True
|
||||
)
|
||||
|
||||
# --- API Request/Response Models (Pydantic) ---
|
||||
|
||||
class Token(BaseModel):
|
||||
access_token: str
|
||||
token_type: str
|
||||
must_change_password: bool = False
|
||||
|
||||
class LoginRequest(BaseModel):
|
||||
username: str
|
||||
password: str = PydanticField(..., max_length=72)
|
||||
|
||||
class ChangePasswordRequest(BaseModel):
|
||||
old_password: str = PydanticField(..., max_length=72)
|
||||
new_password: str = PydanticField(..., max_length=72)
|
||||
|
||||
class LogsResponse(BaseModel):
|
||||
total: int
|
||||
limit: int
|
||||
offset: int
|
||||
data: List[dict[str, Any]]
|
||||
|
||||
class BountyResponse(BaseModel):
|
||||
total: int
|
||||
limit: int
|
||||
offset: int
|
||||
data: List[dict[str, Any]]
|
||||
|
||||
class AttackersResponse(BaseModel):
|
||||
total: int
|
||||
limit: int
|
||||
offset: int
|
||||
data: List[dict[str, Any]]
|
||||
|
||||
class StatsResponse(BaseModel):
|
||||
total_logs: int
|
||||
unique_attackers: int
|
||||
active_deckies: int
|
||||
deployed_deckies: int
|
||||
|
||||
class MutateIntervalRequest(BaseModel):
|
||||
# Human-readable duration: <number><unit> where unit is m(inutes), d(ays), M(onths), y/Y(ears).
|
||||
# Minimum granularity is 1 minute. Seconds are not accepted.
|
||||
mutate_interval: Optional[str] = PydanticField(None, pattern=r"^[1-9]\d*[mdMyY]$")
|
||||
|
||||
class DeployIniRequest(BaseModel):
|
||||
model_config = ConfigDict(extra="forbid")
|
||||
# This field now enforces strict INI structure during Pydantic initialization.
|
||||
# The OpenAPI schema correctly shows it as a required string.
|
||||
ini_content: IniContent = PydanticField(..., description="A valid INI formatted string")
|
||||
|
||||
|
||||
# --- Configuration Models ---
|
||||
|
||||
class CreateUserRequest(BaseModel):
|
||||
username: str = PydanticField(..., min_length=1, max_length=64)
|
||||
password: str = PydanticField(..., min_length=8, max_length=72)
|
||||
role: Literal["admin", "viewer"] = "viewer"
|
||||
|
||||
class UpdateUserRoleRequest(BaseModel):
|
||||
role: Literal["admin", "viewer"]
|
||||
|
||||
class ResetUserPasswordRequest(BaseModel):
|
||||
new_password: str = PydanticField(..., min_length=8, max_length=72)
|
||||
|
||||
class DeploymentLimitRequest(BaseModel):
|
||||
deployment_limit: int = PydanticField(..., ge=1, le=500)
|
||||
|
||||
class GlobalMutationIntervalRequest(BaseModel):
|
||||
global_mutation_interval: str = PydanticField(..., pattern=r"^[1-9]\d*[mdMyY]$")
|
||||
|
||||
class UserResponse(BaseModel):
|
||||
uuid: str
|
||||
username: str
|
||||
role: str
|
||||
must_change_password: bool
|
||||
|
||||
class ConfigResponse(BaseModel):
|
||||
role: str
|
||||
deployment_limit: int
|
||||
global_mutation_interval: str
|
||||
|
||||
class AdminConfigResponse(ConfigResponse):
|
||||
users: List[UserResponse]
|
||||
|
||||
|
||||
class ComponentHealth(BaseModel):
|
||||
status: Literal["ok", "failing"]
|
||||
detail: Optional[str] = None
|
||||
|
||||
|
||||
class HealthResponse(BaseModel):
|
||||
status: Literal["healthy", "degraded", "unhealthy"]
|
||||
components: dict[str, ComponentHealth]
|
||||
0
decnet/web/db/mysql/__init__.py
Normal file
0
decnet/web/db/mysql/__init__.py
Normal file
98
decnet/web/db/mysql/database.py
Normal file
98
decnet/web/db/mysql/database.py
Normal file
@@ -0,0 +1,98 @@
|
||||
"""
|
||||
MySQL async engine factory.
|
||||
|
||||
Builds a SQLAlchemy AsyncEngine against MySQL using the ``aiomysql`` driver.
|
||||
|
||||
Connection info is resolved (in order of precedence):
|
||||
|
||||
1. An explicit ``url`` argument passed to :func:`get_async_engine`
|
||||
2. ``DECNET_DB_URL`` — full SQLAlchemy URL
|
||||
3. Component env vars:
|
||||
``DECNET_DB_HOST`` (default ``localhost``)
|
||||
``DECNET_DB_PORT`` (default ``3306``)
|
||||
``DECNET_DB_NAME`` (default ``decnet``)
|
||||
``DECNET_DB_USER`` (default ``decnet``)
|
||||
``DECNET_DB_PASSWORD`` (default empty — raises unless pytest is running)
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Optional
|
||||
from urllib.parse import quote_plus
|
||||
|
||||
from sqlalchemy.ext.asyncio import AsyncEngine, create_async_engine
|
||||
|
||||
|
||||
DEFAULT_POOL_SIZE = 10
|
||||
DEFAULT_MAX_OVERFLOW = 20
|
||||
DEFAULT_POOL_RECYCLE = 3600 # seconds — avoid MySQL ``wait_timeout`` disconnects
|
||||
DEFAULT_POOL_PRE_PING = True
|
||||
|
||||
|
||||
def build_mysql_url(
|
||||
host: Optional[str] = None,
|
||||
port: Optional[int] = None,
|
||||
database: Optional[str] = None,
|
||||
user: Optional[str] = None,
|
||||
password: Optional[str] = None,
|
||||
) -> str:
|
||||
"""Compose an async SQLAlchemy URL for MySQL using the aiomysql driver.
|
||||
|
||||
Component args override env vars. Password is percent-encoded so special
|
||||
characters (``@``, ``:``, ``/``…) don't break URL parsing.
|
||||
"""
|
||||
host = host or os.environ.get("DECNET_DB_HOST", "localhost")
|
||||
port = port or int(os.environ.get("DECNET_DB_PORT", "3306"))
|
||||
database = database or os.environ.get("DECNET_DB_NAME", "decnet")
|
||||
user = user or os.environ.get("DECNET_DB_USER", "decnet")
|
||||
|
||||
if password is None:
|
||||
password = os.environ.get("DECNET_DB_PASSWORD", "")
|
||||
|
||||
# Allow empty passwords during tests (pytest sets PYTEST_* env vars).
|
||||
# Outside tests, an empty MySQL password is almost never intentional.
|
||||
if not password and not any(k.startswith("PYTEST") for k in os.environ):
|
||||
raise ValueError(
|
||||
"DECNET_DB_PASSWORD is not set. Either export it, set DECNET_DB_URL, "
|
||||
"or run under pytest for an empty-password default."
|
||||
)
|
||||
|
||||
pw_enc = quote_plus(password)
|
||||
user_enc = quote_plus(user)
|
||||
return f"mysql+aiomysql://{user_enc}:{pw_enc}@{host}:{port}/{database}"
|
||||
|
||||
|
||||
def resolve_url(url: Optional[str] = None) -> str:
|
||||
"""Pick a connection URL: explicit arg → DECNET_DB_URL env → built from components."""
|
||||
if url:
|
||||
return url
|
||||
env_url = os.environ.get("DECNET_DB_URL")
|
||||
if env_url:
|
||||
return env_url
|
||||
return build_mysql_url()
|
||||
|
||||
|
||||
def get_async_engine(
|
||||
url: Optional[str] = None,
|
||||
*,
|
||||
pool_size: int = DEFAULT_POOL_SIZE,
|
||||
max_overflow: int = DEFAULT_MAX_OVERFLOW,
|
||||
pool_recycle: int = DEFAULT_POOL_RECYCLE,
|
||||
pool_pre_ping: bool = DEFAULT_POOL_PRE_PING,
|
||||
echo: bool = False,
|
||||
) -> AsyncEngine:
|
||||
"""Create an AsyncEngine for MySQL.
|
||||
|
||||
Defaults tuned for a dashboard workload: a modest pool, hourly recycle
|
||||
to sidestep MySQL's idle-connection reaper, and pre-ping to fail fast
|
||||
if a pooled connection has been killed server-side.
|
||||
"""
|
||||
dsn = resolve_url(url)
|
||||
return create_async_engine(
|
||||
dsn,
|
||||
echo=echo,
|
||||
pool_size=pool_size,
|
||||
max_overflow=max_overflow,
|
||||
pool_recycle=pool_recycle,
|
||||
pool_pre_ping=pool_pre_ping,
|
||||
)
|
||||
130
decnet/web/db/mysql/repository.py
Normal file
130
decnet/web/db/mysql/repository.py
Normal file
@@ -0,0 +1,130 @@
|
||||
"""
|
||||
MySQL implementation of :class:`BaseRepository`.
|
||||
|
||||
Inherits the portable SQLModel query code from :class:`SQLModelRepository`
|
||||
and only overrides the two places where MySQL's SQL dialect differs from
|
||||
SQLite's:
|
||||
|
||||
* :meth:`_migrate_attackers_table` — uses ``information_schema`` (MySQL
|
||||
has no ``PRAGMA``).
|
||||
* :meth:`get_log_histogram` — uses ``FROM_UNIXTIME`` /
|
||||
``UNIX_TIMESTAMP`` + integer division for bucketing.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List, Optional
|
||||
|
||||
from sqlalchemy import func, select, text, literal_column
|
||||
from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker
|
||||
from sqlmodel.sql.expression import SelectOfScalar
|
||||
|
||||
from decnet.web.db.models import Log
|
||||
from decnet.web.db.mysql.database import get_async_engine
|
||||
from decnet.web.db.sqlmodel_repo import SQLModelRepository
|
||||
|
||||
|
||||
class MySQLRepository(SQLModelRepository):
|
||||
"""MySQL backend — uses ``aiomysql``."""
|
||||
|
||||
def __init__(self, url: Optional[str] = None, **engine_kwargs) -> None:
|
||||
self.engine = get_async_engine(url=url, **engine_kwargs)
|
||||
self.session_factory = async_sessionmaker(
|
||||
self.engine, class_=AsyncSession, expire_on_commit=False
|
||||
)
|
||||
|
||||
async def _migrate_attackers_table(self) -> None:
|
||||
"""Drop the legacy (pre-UUID) ``attackers`` table if it exists without a ``uuid`` column.
|
||||
|
||||
MySQL exposes column metadata via ``information_schema.COLUMNS``.
|
||||
``DATABASE()`` scopes the lookup to the currently connected schema.
|
||||
"""
|
||||
async with self.engine.begin() as conn:
|
||||
rows = (await conn.execute(text(
|
||||
"SELECT COLUMN_NAME FROM information_schema.COLUMNS "
|
||||
"WHERE TABLE_SCHEMA = DATABASE() AND TABLE_NAME = 'attackers'"
|
||||
))).fetchall()
|
||||
if rows and not any(r[0] == "uuid" for r in rows):
|
||||
await conn.execute(text("DROP TABLE attackers"))
|
||||
|
||||
async def _migrate_column_types(self) -> None:
|
||||
"""Upgrade TEXT → MEDIUMTEXT for columns that accumulate large JSON blobs.
|
||||
|
||||
``create_all()`` never alters existing columns, so tables created before
|
||||
``_BIG_TEXT`` was introduced keep their 64 KiB ``TEXT`` cap. This method
|
||||
inspects ``information_schema`` and issues ``ALTER TABLE … MODIFY COLUMN``
|
||||
for each offending column found.
|
||||
"""
|
||||
targets: dict[str, dict[str, str]] = {
|
||||
"attackers": {
|
||||
"commands": "MEDIUMTEXT NOT NULL DEFAULT '[]'",
|
||||
"fingerprints": "MEDIUMTEXT NOT NULL DEFAULT '[]'",
|
||||
"services": "MEDIUMTEXT NOT NULL DEFAULT '[]'",
|
||||
"deckies": "MEDIUMTEXT NOT NULL DEFAULT '[]'",
|
||||
},
|
||||
"state": {
|
||||
"value": "MEDIUMTEXT NOT NULL",
|
||||
},
|
||||
}
|
||||
async with self.engine.begin() as conn:
|
||||
rows = (await conn.execute(text(
|
||||
"SELECT TABLE_NAME, COLUMN_NAME FROM information_schema.COLUMNS "
|
||||
"WHERE TABLE_SCHEMA = DATABASE() "
|
||||
" AND TABLE_NAME IN ('attackers', 'state') "
|
||||
" AND COLUMN_NAME IN ('commands','fingerprints','services','deckies','value') "
|
||||
" AND DATA_TYPE = 'text'"
|
||||
))).fetchall()
|
||||
for table_name, col_name in rows:
|
||||
spec = targets.get(table_name, {}).get(col_name)
|
||||
if spec:
|
||||
await conn.execute(text(
|
||||
f"ALTER TABLE `{table_name}` MODIFY COLUMN `{col_name}` {spec}"
|
||||
))
|
||||
|
||||
async def initialize(self) -> None:
|
||||
"""Create tables and run all MySQL-specific migrations."""
|
||||
from sqlmodel import SQLModel
|
||||
await self._migrate_attackers_table()
|
||||
await self._migrate_column_types()
|
||||
async with self.engine.begin() as conn:
|
||||
await conn.run_sync(SQLModel.metadata.create_all)
|
||||
await self._ensure_admin_user()
|
||||
|
||||
def _json_field_equals(self, key: str):
|
||||
# MySQL 5.7+ exposes JSON_EXTRACT; quoted string result returned for
|
||||
# TEXT-stored JSON, same behavior we rely on in SQLite.
|
||||
return text(f"JSON_UNQUOTE(JSON_EXTRACT(fields, '$.{key}')) = :val")
|
||||
|
||||
async def get_log_histogram(
|
||||
self,
|
||||
search: Optional[str] = None,
|
||||
start_time: Optional[str] = None,
|
||||
end_time: Optional[str] = None,
|
||||
interval_minutes: int = 15,
|
||||
) -> List[dict]:
|
||||
bucket_seconds = max(interval_minutes, 1) * 60
|
||||
# Truncate each timestamp to the start of its bucket:
|
||||
# FROM_UNIXTIME( (UNIX_TIMESTAMP(timestamp) DIV N) * N )
|
||||
# DIV is MySQL's integer division operator.
|
||||
bucket_expr = literal_column(
|
||||
f"FROM_UNIXTIME((UNIX_TIMESTAMP(timestamp) DIV {bucket_seconds}) * {bucket_seconds})"
|
||||
).label("bucket_time")
|
||||
|
||||
statement: SelectOfScalar = select(bucket_expr, func.count().label("count")).select_from(Log)
|
||||
statement = self._apply_filters(statement, search, start_time, end_time)
|
||||
statement = statement.group_by(literal_column("bucket_time")).order_by(
|
||||
literal_column("bucket_time")
|
||||
)
|
||||
|
||||
async with self.session_factory() as session:
|
||||
results = await session.execute(statement)
|
||||
# Normalize to ISO string for API parity with the SQLite backend
|
||||
# (SQLite's datetime() returns a string already; FROM_UNIXTIME
|
||||
# returns a datetime).
|
||||
out: List[dict] = []
|
||||
for r in results.all():
|
||||
ts = r[0]
|
||||
out.append({
|
||||
"time": ts.isoformat(sep=" ") if hasattr(ts, "isoformat") else ts,
|
||||
"count": r[1],
|
||||
})
|
||||
return out
|
||||
185
decnet/web/db/repository.py
Normal file
185
decnet/web/db/repository.py
Normal file
@@ -0,0 +1,185 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Optional
|
||||
|
||||
|
||||
class BaseRepository(ABC):
|
||||
"""Abstract base class for DECNET web dashboard data storage."""
|
||||
|
||||
@abstractmethod
|
||||
async def initialize(self) -> None:
|
||||
"""Initialize the database schema."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def add_log(self, log_data: dict[str, Any]) -> None:
|
||||
"""Add a new log entry to the database."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_logs(
|
||||
self,
|
||||
limit: int = 50,
|
||||
offset: int = 0,
|
||||
search: Optional[str] = None
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Retrieve paginated log entries."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_total_logs(self, search: Optional[str] = None) -> int:
|
||||
"""Retrieve the total count of logs, optionally filtered by search."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_stats_summary(self) -> dict[str, Any]:
|
||||
"""Retrieve high-level dashboard metrics."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_deckies(self) -> list[dict[str, Any]]:
|
||||
"""Retrieve the list of currently deployed deckies."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_user_by_username(self, username: str) -> Optional[dict[str, Any]]:
|
||||
"""Retrieve a user by their username."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_user_by_uuid(self, uuid: str) -> Optional[dict[str, Any]]:
|
||||
"""Retrieve a user by their UUID."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def create_user(self, user_data: dict[str, Any]) -> None:
|
||||
"""Create a new dashboard user."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def update_user_password(self, uuid: str, password_hash: str, must_change_password: bool = False) -> None:
|
||||
"""Update a user's password and change the must_change_password flag."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def list_users(self) -> list[dict[str, Any]]:
|
||||
"""Retrieve all users (caller must strip password_hash before returning to clients)."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def delete_user(self, uuid: str) -> bool:
|
||||
"""Delete a user by UUID. Returns True if user was found and deleted."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def update_user_role(self, uuid: str, role: str) -> None:
|
||||
"""Update a user's role."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def purge_logs_and_bounties(self) -> dict[str, int]:
|
||||
"""Delete all logs, bounties, and attacker profiles. Returns counts of deleted rows."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def add_bounty(self, bounty_data: dict[str, Any]) -> None:
|
||||
"""Add a new harvested artifact (bounty) to the database."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_bounties(
|
||||
self,
|
||||
limit: int = 50,
|
||||
offset: int = 0,
|
||||
bounty_type: Optional[str] = None,
|
||||
search: Optional[str] = None
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Retrieve paginated bounty entries."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_total_bounties(self, bounty_type: Optional[str] = None, search: Optional[str] = None) -> int:
|
||||
"""Retrieve the total count of bounties, optionally filtered."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_state(self, key: str) -> Optional[dict[str, Any]]:
|
||||
"""Retrieve a specific state entry by key."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def set_state(self, key: str, value: Any) -> None:
|
||||
"""Store a specific state entry by key."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_max_log_id(self) -> int:
|
||||
"""Return the highest log ID, or 0 if the table is empty."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_logs_after_id(self, last_id: int, limit: int = 500) -> list[dict[str, Any]]:
|
||||
"""Return logs with id > last_id, ordered by id ASC, up to limit."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_all_bounties_by_ip(self) -> dict[str, list[dict[str, Any]]]:
|
||||
"""Retrieve all bounty rows grouped by attacker_ip."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_bounties_for_ips(self, ips: set[str]) -> dict[str, list[dict[str, Any]]]:
|
||||
"""Retrieve bounty rows grouped by attacker_ip, filtered to only the given IPs."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def upsert_attacker(self, data: dict[str, Any]) -> str:
|
||||
"""Insert or replace an attacker profile record. Returns the row's UUID."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def upsert_attacker_behavior(self, attacker_uuid: str, data: dict[str, Any]) -> None:
|
||||
"""Insert or replace the behavioral/fingerprint row for an attacker."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_attacker_behavior(self, attacker_uuid: str) -> Optional[dict[str, Any]]:
|
||||
"""Retrieve the behavioral/fingerprint row for an attacker UUID."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_behaviors_for_ips(self, ips: set[str]) -> dict[str, dict[str, Any]]:
|
||||
"""Bulk-fetch behavior rows keyed by attacker IP (JOIN to attackers)."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_attacker_by_uuid(self, uuid: str) -> Optional[dict[str, Any]]:
|
||||
"""Retrieve a single attacker profile by UUID."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_attackers(
|
||||
self,
|
||||
limit: int = 50,
|
||||
offset: int = 0,
|
||||
search: Optional[str] = None,
|
||||
sort_by: str = "recent",
|
||||
service: Optional[str] = None,
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Retrieve paginated attacker profile records."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_total_attackers(self, search: Optional[str] = None, service: Optional[str] = None) -> int:
|
||||
"""Retrieve the total count of attacker profile records, optionally filtered."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_attacker_commands(
|
||||
self,
|
||||
uuid: str,
|
||||
limit: int = 50,
|
||||
offset: int = 0,
|
||||
service: Optional[str] = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Retrieve paginated commands for an attacker, optionally filtered by service."""
|
||||
pass
|
||||
50
decnet/web/db/sqlite/database.py
Normal file
50
decnet/web/db/sqlite/database.py
Normal file
@@ -0,0 +1,50 @@
|
||||
from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession, async_sessionmaker, create_async_engine
|
||||
from sqlalchemy import create_engine, Engine, event
|
||||
from sqlmodel import SQLModel
|
||||
from typing import AsyncGenerator
|
||||
|
||||
# We need both sync and async engines for SQLite
|
||||
# Sync for initialization (DDL) and async for standard queries
|
||||
|
||||
def get_async_engine(db_path: str) -> AsyncEngine:
|
||||
# If it's a memory URI, don't add the extra slash that turns it into a relative file
|
||||
prefix = "sqlite+aiosqlite:///"
|
||||
if db_path.startswith(":memory:"):
|
||||
prefix = "sqlite+aiosqlite://"
|
||||
engine = create_async_engine(
|
||||
f"{prefix}{db_path}",
|
||||
echo=False,
|
||||
connect_args={"uri": True, "timeout": 30},
|
||||
)
|
||||
|
||||
@event.listens_for(engine.sync_engine, "connect")
|
||||
def _set_sqlite_pragmas(dbapi_conn, _conn_record):
|
||||
cursor = dbapi_conn.cursor()
|
||||
cursor.execute("PRAGMA journal_mode=WAL")
|
||||
cursor.execute("PRAGMA synchronous=NORMAL")
|
||||
cursor.execute("PRAGMA busy_timeout=30000")
|
||||
cursor.close()
|
||||
|
||||
return engine
|
||||
|
||||
def get_sync_engine(db_path: str) -> Engine:
|
||||
prefix = "sqlite:///"
|
||||
if db_path.startswith(":memory:"):
|
||||
prefix = "sqlite://"
|
||||
return create_engine(f"{prefix}{db_path}", echo=False, connect_args={"uri": True})
|
||||
|
||||
def init_db(db_path: str) -> None:
|
||||
"""Synchronously create all tables."""
|
||||
engine = get_sync_engine(db_path)
|
||||
# Ensure WAL mode is set
|
||||
with engine.connect() as conn:
|
||||
conn.exec_driver_sql("PRAGMA journal_mode=WAL")
|
||||
conn.exec_driver_sql("PRAGMA synchronous=NORMAL")
|
||||
SQLModel.metadata.create_all(engine)
|
||||
|
||||
async def get_session(engine: AsyncEngine) -> AsyncGenerator[AsyncSession, None]:
|
||||
async_session = async_sessionmaker(
|
||||
engine, class_=AsyncSession, expire_on_commit=False
|
||||
)
|
||||
async with async_session() as session:
|
||||
yield session
|
||||
59
decnet/web/db/sqlite/repository.py
Normal file
59
decnet/web/db/sqlite/repository.py
Normal file
@@ -0,0 +1,59 @@
|
||||
from typing import List, Optional
|
||||
|
||||
from sqlalchemy import func, select, text, literal_column
|
||||
from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker
|
||||
from sqlmodel.sql.expression import SelectOfScalar
|
||||
|
||||
from decnet.config import _ROOT
|
||||
from decnet.web.db.models import Log
|
||||
from decnet.web.db.sqlite.database import get_async_engine
|
||||
from decnet.web.db.sqlmodel_repo import SQLModelRepository
|
||||
|
||||
|
||||
class SQLiteRepository(SQLModelRepository):
|
||||
"""SQLite backend — uses ``aiosqlite``.
|
||||
|
||||
Overrides the two places where SQLite's SQL dialect differs from
|
||||
MySQL/PostgreSQL: legacy-schema migration (via ``PRAGMA table_info``)
|
||||
and the log-histogram bucket expression (via ``strftime`` + ``unixepoch``).
|
||||
"""
|
||||
|
||||
def __init__(self, db_path: str = str(_ROOT / "decnet.db")) -> None:
|
||||
self.db_path = db_path
|
||||
self.engine = get_async_engine(db_path)
|
||||
self.session_factory = async_sessionmaker(
|
||||
self.engine, class_=AsyncSession, expire_on_commit=False
|
||||
)
|
||||
|
||||
async def _migrate_attackers_table(self) -> None:
|
||||
"""Drop the old attackers table if it lacks the uuid column (pre-UUID schema)."""
|
||||
async with self.engine.begin() as conn:
|
||||
rows = (await conn.execute(text("PRAGMA table_info(attackers)"))).fetchall()
|
||||
if rows and not any(r[1] == "uuid" for r in rows):
|
||||
await conn.execute(text("DROP TABLE attackers"))
|
||||
|
||||
def _json_field_equals(self, key: str):
|
||||
# SQLite stores JSON as text; json_extract is the canonical accessor.
|
||||
return text(f"json_extract(fields, '$.{key}') = :val")
|
||||
|
||||
async def get_log_histogram(
|
||||
self,
|
||||
search: Optional[str] = None,
|
||||
start_time: Optional[str] = None,
|
||||
end_time: Optional[str] = None,
|
||||
interval_minutes: int = 15,
|
||||
) -> List[dict]:
|
||||
bucket_seconds = max(interval_minutes, 1) * 60
|
||||
bucket_expr = literal_column(
|
||||
f"datetime((strftime('%s', timestamp) / {bucket_seconds}) * {bucket_seconds}, 'unixepoch')"
|
||||
).label("bucket_time")
|
||||
|
||||
statement: SelectOfScalar = select(bucket_expr, func.count().label("count")).select_from(Log)
|
||||
statement = self._apply_filters(statement, search, start_time, end_time)
|
||||
statement = statement.group_by(literal_column("bucket_time")).order_by(
|
||||
literal_column("bucket_time")
|
||||
)
|
||||
|
||||
async with self.session_factory() as session:
|
||||
results = await session.execute(statement)
|
||||
return [{"time": r[0], "count": r[1]} for r in results.all()]
|
||||
628
decnet/web/db/sqlmodel_repo.py
Normal file
628
decnet/web/db/sqlmodel_repo.py
Normal file
@@ -0,0 +1,628 @@
|
||||
"""
|
||||
Shared SQLModel-based repository implementation.
|
||||
|
||||
Contains all dialect-portable query code used by the SQLite and MySQL
|
||||
backends. Dialect-specific behavior lives in subclasses:
|
||||
|
||||
* engine/session construction (``__init__``)
|
||||
* ``_migrate_attackers_table`` (legacy schema check; DDL introspection
|
||||
is not portable)
|
||||
* ``get_log_histogram`` (date-bucket expression differs per dialect)
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import uuid
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Optional, List
|
||||
|
||||
from sqlalchemy import func, select, desc, asc, text, or_, update
|
||||
from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession, async_sessionmaker
|
||||
from sqlmodel.sql.expression import SelectOfScalar
|
||||
|
||||
from decnet.config import load_state
|
||||
from decnet.env import DECNET_ADMIN_USER, DECNET_ADMIN_PASSWORD
|
||||
from decnet.web.auth import get_password_hash
|
||||
from decnet.web.db.repository import BaseRepository
|
||||
from decnet.web.db.models import User, Log, Bounty, State, Attacker, AttackerBehavior
|
||||
|
||||
|
||||
class SQLModelRepository(BaseRepository):
|
||||
"""Concrete SQLModel/SQLAlchemy-async repository.
|
||||
|
||||
Subclasses provide ``self.engine`` (AsyncEngine) and ``self.session_factory``
|
||||
in ``__init__``, and override the few dialect-specific helpers.
|
||||
"""
|
||||
|
||||
engine: AsyncEngine
|
||||
session_factory: async_sessionmaker[AsyncSession]
|
||||
|
||||
# ------------------------------------------------------------ lifecycle
|
||||
|
||||
async def initialize(self) -> None:
|
||||
"""Create tables if absent and seed the admin user."""
|
||||
from sqlmodel import SQLModel
|
||||
await self._migrate_attackers_table()
|
||||
async with self.engine.begin() as conn:
|
||||
await conn.run_sync(SQLModel.metadata.create_all)
|
||||
await self._ensure_admin_user()
|
||||
|
||||
async def reinitialize(self) -> None:
|
||||
"""Re-create schema (for tests / reset flows). Does NOT drop existing tables."""
|
||||
from sqlmodel import SQLModel
|
||||
async with self.engine.begin() as conn:
|
||||
await conn.run_sync(SQLModel.metadata.create_all)
|
||||
await self._ensure_admin_user()
|
||||
|
||||
async def _ensure_admin_user(self) -> None:
|
||||
async with self.session_factory() as session:
|
||||
result = await session.execute(
|
||||
select(User).where(User.username == DECNET_ADMIN_USER)
|
||||
)
|
||||
if not result.scalar_one_or_none():
|
||||
session.add(User(
|
||||
uuid=str(uuid.uuid4()),
|
||||
username=DECNET_ADMIN_USER,
|
||||
password_hash=get_password_hash(DECNET_ADMIN_PASSWORD),
|
||||
role="admin",
|
||||
must_change_password=True,
|
||||
))
|
||||
await session.commit()
|
||||
|
||||
async def _migrate_attackers_table(self) -> None:
|
||||
"""Legacy-schema cleanup. Override per dialect (DDL introspection is non-portable)."""
|
||||
return None
|
||||
|
||||
# ---------------------------------------------------------------- logs
|
||||
|
||||
async def add_log(self, log_data: dict[str, Any]) -> None:
|
||||
data = log_data.copy()
|
||||
if "fields" in data and isinstance(data["fields"], dict):
|
||||
data["fields"] = json.dumps(data["fields"])
|
||||
if "timestamp" in data and isinstance(data["timestamp"], str):
|
||||
try:
|
||||
data["timestamp"] = datetime.fromisoformat(
|
||||
data["timestamp"].replace("Z", "+00:00")
|
||||
)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
async with self.session_factory() as session:
|
||||
session.add(Log(**data))
|
||||
await session.commit()
|
||||
|
||||
def _apply_filters(
|
||||
self,
|
||||
statement: SelectOfScalar,
|
||||
search: Optional[str],
|
||||
start_time: Optional[str],
|
||||
end_time: Optional[str],
|
||||
) -> SelectOfScalar:
|
||||
import re
|
||||
import shlex
|
||||
|
||||
if start_time:
|
||||
statement = statement.where(Log.timestamp >= start_time)
|
||||
if end_time:
|
||||
statement = statement.where(Log.timestamp <= end_time)
|
||||
|
||||
if search:
|
||||
try:
|
||||
tokens = shlex.split(search)
|
||||
except ValueError:
|
||||
tokens = search.split()
|
||||
|
||||
core_fields = {
|
||||
"decky": Log.decky,
|
||||
"service": Log.service,
|
||||
"event": Log.event_type,
|
||||
"attacker": Log.attacker_ip,
|
||||
"attacker-ip": Log.attacker_ip,
|
||||
"attacker_ip": Log.attacker_ip,
|
||||
}
|
||||
|
||||
for token in tokens:
|
||||
if ":" in token:
|
||||
key, val = token.split(":", 1)
|
||||
if key in core_fields:
|
||||
statement = statement.where(core_fields[key] == val)
|
||||
else:
|
||||
key_safe = re.sub(r"[^a-zA-Z0-9_]", "", key)
|
||||
if key_safe:
|
||||
statement = statement.where(
|
||||
self._json_field_equals(key_safe)
|
||||
).params(val=val)
|
||||
else:
|
||||
lk = f"%{token}%"
|
||||
statement = statement.where(
|
||||
or_(
|
||||
Log.raw_line.like(lk),
|
||||
Log.decky.like(lk),
|
||||
Log.service.like(lk),
|
||||
Log.attacker_ip.like(lk),
|
||||
)
|
||||
)
|
||||
return statement
|
||||
|
||||
def _json_field_equals(self, key: str):
|
||||
"""Return a text() predicate that matches rows where fields->key == :val.
|
||||
|
||||
Both SQLite and MySQL expose a ``JSON_EXTRACT`` function; MySQL also
|
||||
exposes the same function under ``json_extract`` (case-insensitive).
|
||||
The ``:val`` parameter is bound separately and must be supplied with
|
||||
``.params(val=...)`` by the caller, which keeps us safe from injection.
|
||||
"""
|
||||
return text(f"JSON_EXTRACT(fields, '$.{key}') = :val")
|
||||
|
||||
async def get_logs(
|
||||
self,
|
||||
limit: int = 50,
|
||||
offset: int = 0,
|
||||
search: Optional[str] = None,
|
||||
start_time: Optional[str] = None,
|
||||
end_time: Optional[str] = None,
|
||||
) -> List[dict]:
|
||||
statement = (
|
||||
select(Log)
|
||||
.order_by(desc(Log.timestamp))
|
||||
.offset(offset)
|
||||
.limit(limit)
|
||||
)
|
||||
statement = self._apply_filters(statement, search, start_time, end_time)
|
||||
|
||||
async with self.session_factory() as session:
|
||||
results = await session.execute(statement)
|
||||
return [log.model_dump(mode="json") for log in results.scalars().all()]
|
||||
|
||||
async def get_max_log_id(self) -> int:
|
||||
async with self.session_factory() as session:
|
||||
result = await session.execute(select(func.max(Log.id)))
|
||||
val = result.scalar()
|
||||
return val if val is not None else 0
|
||||
|
||||
async def get_logs_after_id(
|
||||
self,
|
||||
last_id: int,
|
||||
limit: int = 50,
|
||||
search: Optional[str] = None,
|
||||
start_time: Optional[str] = None,
|
||||
end_time: Optional[str] = None,
|
||||
) -> List[dict]:
|
||||
statement = (
|
||||
select(Log).where(Log.id > last_id).order_by(asc(Log.id)).limit(limit)
|
||||
)
|
||||
statement = self._apply_filters(statement, search, start_time, end_time)
|
||||
|
||||
async with self.session_factory() as session:
|
||||
results = await session.execute(statement)
|
||||
return [log.model_dump(mode="json") for log in results.scalars().all()]
|
||||
|
||||
async def get_total_logs(
|
||||
self,
|
||||
search: Optional[str] = None,
|
||||
start_time: Optional[str] = None,
|
||||
end_time: Optional[str] = None,
|
||||
) -> int:
|
||||
statement = select(func.count()).select_from(Log)
|
||||
statement = self._apply_filters(statement, search, start_time, end_time)
|
||||
|
||||
async with self.session_factory() as session:
|
||||
result = await session.execute(statement)
|
||||
return result.scalar() or 0
|
||||
|
||||
async def get_log_histogram(
|
||||
self,
|
||||
search: Optional[str] = None,
|
||||
start_time: Optional[str] = None,
|
||||
end_time: Optional[str] = None,
|
||||
interval_minutes: int = 15,
|
||||
) -> List[dict]:
|
||||
"""Dialect-specific — override per backend."""
|
||||
raise NotImplementedError
|
||||
|
||||
async def get_stats_summary(self) -> dict[str, Any]:
|
||||
async with self.session_factory() as session:
|
||||
total_logs = (
|
||||
await session.execute(select(func.count()).select_from(Log))
|
||||
).scalar() or 0
|
||||
unique_attackers = (
|
||||
await session.execute(
|
||||
select(func.count(func.distinct(Log.attacker_ip)))
|
||||
)
|
||||
).scalar() or 0
|
||||
|
||||
_state = await asyncio.to_thread(load_state)
|
||||
deployed_deckies = len(_state[0].deckies) if _state else 0
|
||||
|
||||
return {
|
||||
"total_logs": total_logs,
|
||||
"unique_attackers": unique_attackers,
|
||||
"active_deckies": deployed_deckies,
|
||||
"deployed_deckies": deployed_deckies,
|
||||
}
|
||||
|
||||
async def get_deckies(self) -> List[dict]:
|
||||
_state = await asyncio.to_thread(load_state)
|
||||
return [_d.model_dump() for _d in _state[0].deckies] if _state else []
|
||||
|
||||
# --------------------------------------------------------------- users
|
||||
|
||||
async def get_user_by_username(self, username: str) -> Optional[dict]:
|
||||
async with self.session_factory() as session:
|
||||
result = await session.execute(
|
||||
select(User).where(User.username == username)
|
||||
)
|
||||
user = result.scalar_one_or_none()
|
||||
return user.model_dump() if user else None
|
||||
|
||||
async def get_user_by_uuid(self, uuid: str) -> Optional[dict]:
|
||||
async with self.session_factory() as session:
|
||||
result = await session.execute(
|
||||
select(User).where(User.uuid == uuid)
|
||||
)
|
||||
user = result.scalar_one_or_none()
|
||||
return user.model_dump() if user else None
|
||||
|
||||
async def create_user(self, user_data: dict[str, Any]) -> None:
|
||||
async with self.session_factory() as session:
|
||||
session.add(User(**user_data))
|
||||
await session.commit()
|
||||
|
||||
async def update_user_password(
|
||||
self, uuid: str, password_hash: str, must_change_password: bool = False
|
||||
) -> None:
|
||||
async with self.session_factory() as session:
|
||||
await session.execute(
|
||||
update(User)
|
||||
.where(User.uuid == uuid)
|
||||
.values(
|
||||
password_hash=password_hash,
|
||||
must_change_password=must_change_password,
|
||||
)
|
||||
)
|
||||
await session.commit()
|
||||
|
||||
async def list_users(self) -> list[dict]:
|
||||
async with self.session_factory() as session:
|
||||
result = await session.execute(select(User))
|
||||
return [u.model_dump() for u in result.scalars().all()]
|
||||
|
||||
async def delete_user(self, uuid: str) -> bool:
|
||||
async with self.session_factory() as session:
|
||||
result = await session.execute(select(User).where(User.uuid == uuid))
|
||||
user = result.scalar_one_or_none()
|
||||
if not user:
|
||||
return False
|
||||
await session.delete(user)
|
||||
await session.commit()
|
||||
return True
|
||||
|
||||
async def update_user_role(self, uuid: str, role: str) -> None:
|
||||
async with self.session_factory() as session:
|
||||
await session.execute(
|
||||
update(User).where(User.uuid == uuid).values(role=role)
|
||||
)
|
||||
await session.commit()
|
||||
|
||||
async def purge_logs_and_bounties(self) -> dict[str, int]:
|
||||
async with self.session_factory() as session:
|
||||
logs_deleted = (await session.execute(text("DELETE FROM logs"))).rowcount
|
||||
bounties_deleted = (await session.execute(text("DELETE FROM bounty"))).rowcount
|
||||
# attacker_behavior has FK → attackers.uuid; delete children first.
|
||||
await session.execute(text("DELETE FROM attacker_behavior"))
|
||||
attackers_deleted = (await session.execute(text("DELETE FROM attackers"))).rowcount
|
||||
await session.commit()
|
||||
return {
|
||||
"logs": logs_deleted,
|
||||
"bounties": bounties_deleted,
|
||||
"attackers": attackers_deleted,
|
||||
}
|
||||
|
||||
# ------------------------------------------------------------ bounties
|
||||
|
||||
async def add_bounty(self, bounty_data: dict[str, Any]) -> None:
|
||||
data = bounty_data.copy()
|
||||
if "payload" in data and isinstance(data["payload"], dict):
|
||||
data["payload"] = json.dumps(data["payload"])
|
||||
|
||||
async with self.session_factory() as session:
|
||||
dup = await session.execute(
|
||||
select(Bounty.id).where(
|
||||
Bounty.bounty_type == data.get("bounty_type"),
|
||||
Bounty.attacker_ip == data.get("attacker_ip"),
|
||||
Bounty.payload == data.get("payload"),
|
||||
).limit(1)
|
||||
)
|
||||
if dup.first() is not None:
|
||||
return
|
||||
session.add(Bounty(**data))
|
||||
await session.commit()
|
||||
|
||||
def _apply_bounty_filters(
|
||||
self,
|
||||
statement: SelectOfScalar,
|
||||
bounty_type: Optional[str],
|
||||
search: Optional[str],
|
||||
) -> SelectOfScalar:
|
||||
if bounty_type:
|
||||
statement = statement.where(Bounty.bounty_type == bounty_type)
|
||||
if search:
|
||||
lk = f"%{search}%"
|
||||
statement = statement.where(
|
||||
or_(
|
||||
Bounty.decky.like(lk),
|
||||
Bounty.service.like(lk),
|
||||
Bounty.attacker_ip.like(lk),
|
||||
Bounty.payload.like(lk),
|
||||
)
|
||||
)
|
||||
return statement
|
||||
|
||||
async def get_bounties(
|
||||
self,
|
||||
limit: int = 50,
|
||||
offset: int = 0,
|
||||
bounty_type: Optional[str] = None,
|
||||
search: Optional[str] = None,
|
||||
) -> List[dict]:
|
||||
statement = (
|
||||
select(Bounty)
|
||||
.order_by(desc(Bounty.timestamp))
|
||||
.offset(offset)
|
||||
.limit(limit)
|
||||
)
|
||||
statement = self._apply_bounty_filters(statement, bounty_type, search)
|
||||
|
||||
async with self.session_factory() as session:
|
||||
results = await session.execute(statement)
|
||||
final = []
|
||||
for item in results.scalars().all():
|
||||
d = item.model_dump(mode="json")
|
||||
try:
|
||||
d["payload"] = json.loads(d["payload"])
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
pass
|
||||
final.append(d)
|
||||
return final
|
||||
|
||||
async def get_total_bounties(
|
||||
self, bounty_type: Optional[str] = None, search: Optional[str] = None
|
||||
) -> int:
|
||||
statement = select(func.count()).select_from(Bounty)
|
||||
statement = self._apply_bounty_filters(statement, bounty_type, search)
|
||||
|
||||
async with self.session_factory() as session:
|
||||
result = await session.execute(statement)
|
||||
return result.scalar() or 0
|
||||
|
||||
async def get_state(self, key: str) -> Optional[dict[str, Any]]:
|
||||
async with self.session_factory() as session:
|
||||
statement = select(State).where(State.key == key)
|
||||
result = await session.execute(statement)
|
||||
state = result.scalar_one_or_none()
|
||||
if state:
|
||||
return json.loads(state.value)
|
||||
return None
|
||||
|
||||
async def set_state(self, key: str, value: Any) -> None: # noqa: ANN401
|
||||
async with self.session_factory() as session:
|
||||
statement = select(State).where(State.key == key)
|
||||
result = await session.execute(statement)
|
||||
state = result.scalar_one_or_none()
|
||||
|
||||
value_json = json.dumps(value)
|
||||
if state:
|
||||
state.value = value_json
|
||||
session.add(state)
|
||||
else:
|
||||
session.add(State(key=key, value=value_json))
|
||||
|
||||
await session.commit()
|
||||
|
||||
# ----------------------------------------------------------- attackers
|
||||
|
||||
async def get_all_bounties_by_ip(self) -> dict[str, List[dict[str, Any]]]:
|
||||
from collections import defaultdict
|
||||
async with self.session_factory() as session:
|
||||
result = await session.execute(
|
||||
select(Bounty).order_by(asc(Bounty.timestamp))
|
||||
)
|
||||
grouped: dict[str, List[dict[str, Any]]] = defaultdict(list)
|
||||
for item in result.scalars().all():
|
||||
d = item.model_dump(mode="json")
|
||||
try:
|
||||
d["payload"] = json.loads(d["payload"])
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
pass
|
||||
grouped[item.attacker_ip].append(d)
|
||||
return dict(grouped)
|
||||
|
||||
async def get_bounties_for_ips(self, ips: set[str]) -> dict[str, List[dict[str, Any]]]:
|
||||
from collections import defaultdict
|
||||
async with self.session_factory() as session:
|
||||
result = await session.execute(
|
||||
select(Bounty).where(Bounty.attacker_ip.in_(ips)).order_by(asc(Bounty.timestamp))
|
||||
)
|
||||
grouped: dict[str, List[dict[str, Any]]] = defaultdict(list)
|
||||
for item in result.scalars().all():
|
||||
d = item.model_dump(mode="json")
|
||||
try:
|
||||
d["payload"] = json.loads(d["payload"])
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
pass
|
||||
grouped[item.attacker_ip].append(d)
|
||||
return dict(grouped)
|
||||
|
||||
async def upsert_attacker(self, data: dict[str, Any]) -> str:
|
||||
async with self.session_factory() as session:
|
||||
result = await session.execute(
|
||||
select(Attacker).where(Attacker.ip == data["ip"])
|
||||
)
|
||||
existing = result.scalar_one_or_none()
|
||||
if existing:
|
||||
for k, v in data.items():
|
||||
setattr(existing, k, v)
|
||||
session.add(existing)
|
||||
row_uuid = existing.uuid
|
||||
else:
|
||||
row_uuid = str(uuid.uuid4())
|
||||
data = {**data, "uuid": row_uuid}
|
||||
session.add(Attacker(**data))
|
||||
await session.commit()
|
||||
return row_uuid
|
||||
|
||||
async def upsert_attacker_behavior(
|
||||
self,
|
||||
attacker_uuid: str,
|
||||
data: dict[str, Any],
|
||||
) -> None:
|
||||
async with self.session_factory() as session:
|
||||
result = await session.execute(
|
||||
select(AttackerBehavior).where(
|
||||
AttackerBehavior.attacker_uuid == attacker_uuid
|
||||
)
|
||||
)
|
||||
existing = result.scalar_one_or_none()
|
||||
payload = {**data, "updated_at": datetime.now(timezone.utc)}
|
||||
if existing:
|
||||
for k, v in payload.items():
|
||||
setattr(existing, k, v)
|
||||
session.add(existing)
|
||||
else:
|
||||
session.add(AttackerBehavior(attacker_uuid=attacker_uuid, **payload))
|
||||
await session.commit()
|
||||
|
||||
async def get_attacker_behavior(
|
||||
self,
|
||||
attacker_uuid: str,
|
||||
) -> Optional[dict[str, Any]]:
|
||||
async with self.session_factory() as session:
|
||||
result = await session.execute(
|
||||
select(AttackerBehavior).where(
|
||||
AttackerBehavior.attacker_uuid == attacker_uuid
|
||||
)
|
||||
)
|
||||
row = result.scalar_one_or_none()
|
||||
if not row:
|
||||
return None
|
||||
return self._deserialize_behavior(row.model_dump(mode="json"))
|
||||
|
||||
async def get_behaviors_for_ips(
|
||||
self,
|
||||
ips: set[str],
|
||||
) -> dict[str, dict[str, Any]]:
|
||||
if not ips:
|
||||
return {}
|
||||
async with self.session_factory() as session:
|
||||
result = await session.execute(
|
||||
select(Attacker.ip, AttackerBehavior)
|
||||
.join(AttackerBehavior, Attacker.uuid == AttackerBehavior.attacker_uuid)
|
||||
.where(Attacker.ip.in_(ips))
|
||||
)
|
||||
out: dict[str, dict[str, Any]] = {}
|
||||
for ip, row in result.all():
|
||||
out[ip] = self._deserialize_behavior(row.model_dump(mode="json"))
|
||||
return out
|
||||
|
||||
@staticmethod
|
||||
def _deserialize_behavior(d: dict[str, Any]) -> dict[str, Any]:
|
||||
for key in ("tcp_fingerprint", "timing_stats", "phase_sequence"):
|
||||
if isinstance(d.get(key), str):
|
||||
try:
|
||||
d[key] = json.loads(d[key])
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
pass
|
||||
# Deserialize tool_guesses JSON array; normalise None → [].
|
||||
raw = d.get("tool_guesses")
|
||||
if isinstance(raw, str):
|
||||
try:
|
||||
parsed = json.loads(raw)
|
||||
d["tool_guesses"] = parsed if isinstance(parsed, list) else [parsed]
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
d["tool_guesses"] = []
|
||||
elif raw is None:
|
||||
d["tool_guesses"] = []
|
||||
return d
|
||||
|
||||
@staticmethod
|
||||
def _deserialize_attacker(d: dict[str, Any]) -> dict[str, Any]:
|
||||
for key in ("services", "deckies", "fingerprints", "commands"):
|
||||
if isinstance(d.get(key), str):
|
||||
try:
|
||||
d[key] = json.loads(d[key])
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
pass
|
||||
return d
|
||||
|
||||
async def get_attacker_by_uuid(self, uuid: str) -> Optional[dict[str, Any]]:
|
||||
async with self.session_factory() as session:
|
||||
result = await session.execute(
|
||||
select(Attacker).where(Attacker.uuid == uuid)
|
||||
)
|
||||
attacker = result.scalar_one_or_none()
|
||||
if not attacker:
|
||||
return None
|
||||
return self._deserialize_attacker(attacker.model_dump(mode="json"))
|
||||
|
||||
async def get_attackers(
|
||||
self,
|
||||
limit: int = 50,
|
||||
offset: int = 0,
|
||||
search: Optional[str] = None,
|
||||
sort_by: str = "recent",
|
||||
service: Optional[str] = None,
|
||||
) -> List[dict[str, Any]]:
|
||||
order = {
|
||||
"active": desc(Attacker.event_count),
|
||||
"traversals": desc(Attacker.is_traversal),
|
||||
}.get(sort_by, desc(Attacker.last_seen))
|
||||
|
||||
statement = select(Attacker).order_by(order).offset(offset).limit(limit)
|
||||
if search:
|
||||
statement = statement.where(Attacker.ip.like(f"%{search}%"))
|
||||
if service:
|
||||
statement = statement.where(Attacker.services.like(f'%"{service}"%'))
|
||||
|
||||
async with self.session_factory() as session:
|
||||
result = await session.execute(statement)
|
||||
return [
|
||||
self._deserialize_attacker(a.model_dump(mode="json"))
|
||||
for a in result.scalars().all()
|
||||
]
|
||||
|
||||
async def get_total_attackers(
|
||||
self, search: Optional[str] = None, service: Optional[str] = None
|
||||
) -> int:
|
||||
statement = select(func.count()).select_from(Attacker)
|
||||
if search:
|
||||
statement = statement.where(Attacker.ip.like(f"%{search}%"))
|
||||
if service:
|
||||
statement = statement.where(Attacker.services.like(f'%"{service}"%'))
|
||||
|
||||
async with self.session_factory() as session:
|
||||
result = await session.execute(statement)
|
||||
return result.scalar() or 0
|
||||
|
||||
async def get_attacker_commands(
|
||||
self,
|
||||
uuid: str,
|
||||
limit: int = 50,
|
||||
offset: int = 0,
|
||||
service: Optional[str] = None,
|
||||
) -> dict[str, Any]:
|
||||
async with self.session_factory() as session:
|
||||
result = await session.execute(
|
||||
select(Attacker.commands).where(Attacker.uuid == uuid)
|
||||
)
|
||||
raw = result.scalar_one_or_none()
|
||||
if raw is None:
|
||||
return {"total": 0, "data": []}
|
||||
|
||||
commands: list = json.loads(raw) if isinstance(raw, str) else raw
|
||||
if service:
|
||||
commands = [c for c in commands if c.get("service") == service]
|
||||
|
||||
total = len(commands)
|
||||
page = commands[offset: offset + limit]
|
||||
return {"total": total, "data": page}
|
||||
139
decnet/web/dependencies.py
Normal file
139
decnet/web/dependencies.py
Normal file
@@ -0,0 +1,139 @@
|
||||
from typing import Any, Optional
|
||||
|
||||
import jwt
|
||||
from fastapi import Depends, HTTPException, status, Request
|
||||
from fastapi.security import OAuth2PasswordBearer
|
||||
|
||||
from decnet.web.auth import ALGORITHM, SECRET_KEY
|
||||
from decnet.web.db.repository import BaseRepository
|
||||
from decnet.web.db.factory import get_repository
|
||||
|
||||
# Shared repository singleton
|
||||
_repo: Optional[BaseRepository] = None
|
||||
|
||||
def get_repo() -> BaseRepository:
|
||||
"""FastAPI dependency to inject the configured repository."""
|
||||
global _repo
|
||||
if _repo is None:
|
||||
_repo = get_repository()
|
||||
return _repo
|
||||
|
||||
repo = get_repo()
|
||||
|
||||
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/api/v1/auth/login")
|
||||
|
||||
|
||||
async def get_stream_user(request: Request, token: Optional[str] = None) -> str:
|
||||
"""Auth dependency for SSE endpoints — accepts Bearer header OR ?token= query param.
|
||||
EventSource does not support custom headers, so the query-string fallback is intentional here only.
|
||||
"""
|
||||
_credentials_exception = HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Could not validate credentials",
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
)
|
||||
|
||||
auth_header = request.headers.get("Authorization")
|
||||
resolved: str | None = (
|
||||
auth_header.split(" ", 1)[1]
|
||||
if auth_header and auth_header.startswith("Bearer ")
|
||||
else token
|
||||
)
|
||||
if not resolved:
|
||||
raise _credentials_exception
|
||||
|
||||
try:
|
||||
_payload: dict[str, Any] = jwt.decode(resolved, SECRET_KEY, algorithms=[ALGORITHM])
|
||||
_user_uuid: Optional[str] = _payload.get("uuid")
|
||||
if _user_uuid is None:
|
||||
raise _credentials_exception
|
||||
return _user_uuid
|
||||
except jwt.PyJWTError:
|
||||
raise _credentials_exception
|
||||
|
||||
|
||||
async def _decode_token(request: Request) -> str:
|
||||
"""Decode and validate a Bearer JWT, returning the user UUID."""
|
||||
_credentials_exception = HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Could not validate credentials",
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
)
|
||||
|
||||
auth_header = request.headers.get("Authorization")
|
||||
token: str | None = (
|
||||
auth_header.split(" ", 1)[1]
|
||||
if auth_header and auth_header.startswith("Bearer ")
|
||||
else None
|
||||
)
|
||||
if not token:
|
||||
raise _credentials_exception
|
||||
|
||||
try:
|
||||
_payload: dict[str, Any] = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
|
||||
_user_uuid: Optional[str] = _payload.get("uuid")
|
||||
if _user_uuid is None:
|
||||
raise _credentials_exception
|
||||
return _user_uuid
|
||||
except jwt.PyJWTError:
|
||||
raise _credentials_exception
|
||||
|
||||
|
||||
async def get_current_user(request: Request) -> str:
|
||||
"""Auth dependency — enforces must_change_password."""
|
||||
_user_uuid = await _decode_token(request)
|
||||
_user = await repo.get_user_by_uuid(_user_uuid)
|
||||
if _user and _user.get("must_change_password"):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Password change required before accessing this resource",
|
||||
)
|
||||
return _user_uuid
|
||||
|
||||
|
||||
async def get_current_user_unchecked(request: Request) -> str:
|
||||
"""Auth dependency — skips must_change_password enforcement.
|
||||
Use only for endpoints that must remain reachable with the flag set (e.g. change-password).
|
||||
"""
|
||||
return await _decode_token(request)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Role-based access control
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def require_role(*allowed_roles: str):
|
||||
"""Factory that returns a FastAPI dependency enforcing role membership.
|
||||
|
||||
The returned dependency chains from ``get_current_user`` (JWT + must_change_password)
|
||||
then verifies the user's role is in *allowed_roles*. Returns the full user dict so
|
||||
endpoints can inspect ``user["uuid"]``, ``user["role"]``, etc. without a second lookup.
|
||||
"""
|
||||
async def _check(current_user: str = Depends(get_current_user)) -> dict:
|
||||
user = await repo.get_user_by_uuid(current_user)
|
||||
if not user or user["role"] not in allowed_roles:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Insufficient permissions",
|
||||
)
|
||||
return user
|
||||
return _check
|
||||
|
||||
|
||||
def require_stream_role(*allowed_roles: str):
|
||||
"""Like ``require_role`` but for SSE endpoints that accept a query-param token."""
|
||||
async def _check(request: Request, token: Optional[str] = None) -> dict:
|
||||
user_uuid = await get_stream_user(request, token)
|
||||
user = await repo.get_user_by_uuid(user_uuid)
|
||||
if not user or user["role"] not in allowed_roles:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="Insufficient permissions",
|
||||
)
|
||||
return user
|
||||
return _check
|
||||
|
||||
|
||||
require_admin = require_role("admin")
|
||||
require_viewer = require_role("viewer", "admin")
|
||||
require_stream_viewer = require_stream_role("viewer", "admin")
|
||||
309
decnet/web/ingester.py
Normal file
309
decnet/web/ingester.py
Normal file
@@ -0,0 +1,309 @@
|
||||
import asyncio
|
||||
import os
|
||||
import json
|
||||
from typing import Any
|
||||
from pathlib import Path
|
||||
|
||||
from decnet.logging import get_logger
|
||||
from decnet.telemetry import (
|
||||
traced as _traced,
|
||||
get_tracer as _get_tracer,
|
||||
extract_context as _extract_ctx,
|
||||
start_span_with_context as _start_span,
|
||||
)
|
||||
from decnet.web.db.repository import BaseRepository
|
||||
|
||||
logger = get_logger("api")
|
||||
|
||||
_INGEST_STATE_KEY = "ingest_worker_position"
|
||||
|
||||
|
||||
async def log_ingestion_worker(repo: BaseRepository) -> None:
|
||||
"""
|
||||
Background task that tails the DECNET_INGEST_LOG_FILE.json and
|
||||
inserts structured JSON logs into the SQLite repository.
|
||||
"""
|
||||
_base_log_file: str | None = os.environ.get("DECNET_INGEST_LOG_FILE")
|
||||
if not _base_log_file:
|
||||
logger.warning("DECNET_INGEST_LOG_FILE not set. Log ingestion disabled.")
|
||||
return
|
||||
|
||||
_json_log_path: Path = Path(_base_log_file).with_suffix(".json")
|
||||
|
||||
_saved = await repo.get_state(_INGEST_STATE_KEY)
|
||||
_position: int = _saved.get("position", 0) if _saved else 0
|
||||
|
||||
logger.info("ingest worker started path=%s position=%d", _json_log_path, _position)
|
||||
|
||||
while True:
|
||||
try:
|
||||
if not _json_log_path.exists():
|
||||
await asyncio.sleep(2)
|
||||
continue
|
||||
|
||||
_stat: os.stat_result = _json_log_path.stat()
|
||||
if _stat.st_size < _position:
|
||||
# File rotated or truncated
|
||||
_position = 0
|
||||
await repo.set_state(_INGEST_STATE_KEY, {"position": 0})
|
||||
|
||||
if _stat.st_size == _position:
|
||||
# No new data
|
||||
await asyncio.sleep(1)
|
||||
continue
|
||||
|
||||
with open(_json_log_path, "r", encoding="utf-8", errors="replace") as _f:
|
||||
_f.seek(_position)
|
||||
while True:
|
||||
_line: str = _f.readline()
|
||||
if not _line:
|
||||
break # EOF reached
|
||||
|
||||
if not _line.endswith('\n'):
|
||||
# Partial line read, don't process yet, don't advance position
|
||||
break
|
||||
|
||||
try:
|
||||
_log_data: dict[str, Any] = json.loads(_line.strip())
|
||||
# Extract trace context injected by the collector.
|
||||
# This makes the ingester span a child of the collector span,
|
||||
# showing the full event journey in Jaeger.
|
||||
_parent_ctx = _extract_ctx(_log_data)
|
||||
_tracer = _get_tracer("ingester")
|
||||
with _start_span(_tracer, "ingester.process_record", context=_parent_ctx) as _span:
|
||||
_span.set_attribute("decky", _log_data.get("decky", ""))
|
||||
_span.set_attribute("service", _log_data.get("service", ""))
|
||||
_span.set_attribute("event_type", _log_data.get("event_type", ""))
|
||||
_span.set_attribute("attacker_ip", _log_data.get("attacker_ip", ""))
|
||||
# Persist trace context in the DB row so the SSE
|
||||
# read path can link back to this ingestion trace.
|
||||
_sctx = getattr(_span, "get_span_context", None)
|
||||
if _sctx:
|
||||
_ctx = _sctx()
|
||||
if _ctx and getattr(_ctx, "trace_id", 0):
|
||||
_log_data["trace_id"] = format(_ctx.trace_id, "032x")
|
||||
_log_data["span_id"] = format(_ctx.span_id, "016x")
|
||||
logger.debug("ingest: record decky=%s event_type=%s", _log_data.get("decky"), _log_data.get("event_type"))
|
||||
await repo.add_log(_log_data)
|
||||
await _extract_bounty(repo, _log_data)
|
||||
except json.JSONDecodeError:
|
||||
logger.error("ingest: failed to decode JSON log line: %s", _line.strip())
|
||||
continue
|
||||
|
||||
# Update position after successful line read
|
||||
_position = _f.tell()
|
||||
|
||||
await repo.set_state(_INGEST_STATE_KEY, {"position": _position})
|
||||
|
||||
except Exception as _e:
|
||||
_err_str = str(_e).lower()
|
||||
if "no such table" in _err_str or "no active connection" in _err_str or "connection closed" in _err_str:
|
||||
logger.error("ingest: post-shutdown or fatal DB error: %s", _e)
|
||||
break # Exit worker — DB is gone or uninitialized
|
||||
|
||||
logger.error("ingest: error in worker: %s", _e)
|
||||
await asyncio.sleep(5)
|
||||
|
||||
await asyncio.sleep(1)
|
||||
|
||||
|
||||
@_traced("ingester.extract_bounty")
|
||||
async def _extract_bounty(repo: BaseRepository, log_data: dict[str, Any]) -> None:
|
||||
"""Detect and extract valuable artifacts (bounties) from log entries."""
|
||||
_fields = log_data.get("fields")
|
||||
if not isinstance(_fields, dict):
|
||||
return
|
||||
|
||||
# 1. Credentials (User/Pass)
|
||||
_user = _fields.get("username")
|
||||
_pass = _fields.get("password")
|
||||
|
||||
if _user and _pass:
|
||||
await repo.add_bounty({
|
||||
"decky": log_data.get("decky"),
|
||||
"service": log_data.get("service"),
|
||||
"attacker_ip": log_data.get("attacker_ip"),
|
||||
"bounty_type": "credential",
|
||||
"payload": {
|
||||
"username": _user,
|
||||
"password": _pass
|
||||
}
|
||||
})
|
||||
|
||||
# 2. HTTP User-Agent fingerprint
|
||||
_h_raw = _fields.get("headers")
|
||||
if isinstance(_h_raw, dict):
|
||||
_headers = _h_raw
|
||||
elif isinstance(_h_raw, str):
|
||||
try:
|
||||
_parsed = json.loads(_h_raw)
|
||||
_headers = _parsed if isinstance(_parsed, dict) else {}
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
_headers = {}
|
||||
else:
|
||||
_headers = {}
|
||||
_ua = _headers.get("User-Agent") or _headers.get("user-agent")
|
||||
if _ua:
|
||||
await repo.add_bounty({
|
||||
"decky": log_data.get("decky"),
|
||||
"service": log_data.get("service"),
|
||||
"attacker_ip": log_data.get("attacker_ip"),
|
||||
"bounty_type": "fingerprint",
|
||||
"payload": {
|
||||
"fingerprint_type": "http_useragent",
|
||||
"value": _ua,
|
||||
"method": _fields.get("method"),
|
||||
"path": _fields.get("path"),
|
||||
}
|
||||
})
|
||||
|
||||
# 3. VNC client version fingerprint
|
||||
_vnc_ver = _fields.get("client_version")
|
||||
if _vnc_ver and log_data.get("event_type") == "version":
|
||||
await repo.add_bounty({
|
||||
"decky": log_data.get("decky"),
|
||||
"service": log_data.get("service"),
|
||||
"attacker_ip": log_data.get("attacker_ip"),
|
||||
"bounty_type": "fingerprint",
|
||||
"payload": {
|
||||
"fingerprint_type": "vnc_client_version",
|
||||
"value": _vnc_ver,
|
||||
}
|
||||
})
|
||||
|
||||
# 4. SSH client banner fingerprint (deferred — requires asyncssh server)
|
||||
# Fires on: service=ssh, event_type=client_banner, fields.client_banner
|
||||
|
||||
# 5. JA3/JA3S TLS fingerprint from sniffer container
|
||||
_ja3 = _fields.get("ja3")
|
||||
if _ja3 and log_data.get("service") == "sniffer":
|
||||
await repo.add_bounty({
|
||||
"decky": log_data.get("decky"),
|
||||
"service": "sniffer",
|
||||
"attacker_ip": log_data.get("attacker_ip"),
|
||||
"bounty_type": "fingerprint",
|
||||
"payload": {
|
||||
"fingerprint_type": "ja3",
|
||||
"ja3": _ja3,
|
||||
"ja3s": _fields.get("ja3s"),
|
||||
"ja4": _fields.get("ja4"),
|
||||
"ja4s": _fields.get("ja4s"),
|
||||
"tls_version": _fields.get("tls_version"),
|
||||
"sni": _fields.get("sni") or None,
|
||||
"alpn": _fields.get("alpn") or None,
|
||||
"dst_port": _fields.get("dst_port"),
|
||||
"raw_ciphers": _fields.get("raw_ciphers"),
|
||||
"raw_extensions": _fields.get("raw_extensions"),
|
||||
},
|
||||
})
|
||||
|
||||
# 6. JA4L latency fingerprint from sniffer
|
||||
_ja4l_rtt = _fields.get("ja4l_rtt_ms")
|
||||
if _ja4l_rtt and log_data.get("service") == "sniffer":
|
||||
await repo.add_bounty({
|
||||
"decky": log_data.get("decky"),
|
||||
"service": "sniffer",
|
||||
"attacker_ip": log_data.get("attacker_ip"),
|
||||
"bounty_type": "fingerprint",
|
||||
"payload": {
|
||||
"fingerprint_type": "ja4l",
|
||||
"rtt_ms": _ja4l_rtt,
|
||||
"client_ttl": _fields.get("ja4l_client_ttl"),
|
||||
},
|
||||
})
|
||||
|
||||
# 7. TLS session resumption behavior
|
||||
_resumption = _fields.get("resumption")
|
||||
if _resumption and log_data.get("service") == "sniffer":
|
||||
await repo.add_bounty({
|
||||
"decky": log_data.get("decky"),
|
||||
"service": "sniffer",
|
||||
"attacker_ip": log_data.get("attacker_ip"),
|
||||
"bounty_type": "fingerprint",
|
||||
"payload": {
|
||||
"fingerprint_type": "tls_resumption",
|
||||
"mechanisms": _resumption,
|
||||
},
|
||||
})
|
||||
|
||||
# 8. TLS certificate details (TLS 1.2 only — passive extraction)
|
||||
_subject_cn = _fields.get("subject_cn")
|
||||
if _subject_cn and log_data.get("service") == "sniffer":
|
||||
await repo.add_bounty({
|
||||
"decky": log_data.get("decky"),
|
||||
"service": "sniffer",
|
||||
"attacker_ip": log_data.get("attacker_ip"),
|
||||
"bounty_type": "fingerprint",
|
||||
"payload": {
|
||||
"fingerprint_type": "tls_certificate",
|
||||
"subject_cn": _subject_cn,
|
||||
"issuer": _fields.get("issuer"),
|
||||
"self_signed": _fields.get("self_signed"),
|
||||
"not_before": _fields.get("not_before"),
|
||||
"not_after": _fields.get("not_after"),
|
||||
"sans": _fields.get("sans"),
|
||||
"sni": _fields.get("sni") or None,
|
||||
},
|
||||
})
|
||||
|
||||
# 9. JARM fingerprint from active prober
|
||||
_jarm = _fields.get("jarm_hash")
|
||||
if _jarm and log_data.get("service") == "prober":
|
||||
await repo.add_bounty({
|
||||
"decky": log_data.get("decky"),
|
||||
"service": "prober",
|
||||
"attacker_ip": _fields.get("target_ip", "Unknown"),
|
||||
"bounty_type": "fingerprint",
|
||||
"payload": {
|
||||
"fingerprint_type": "jarm",
|
||||
"hash": _jarm,
|
||||
"target_ip": _fields.get("target_ip"),
|
||||
"target_port": _fields.get("target_port"),
|
||||
},
|
||||
})
|
||||
|
||||
# 10. HASSHServer fingerprint from active prober
|
||||
_hassh = _fields.get("hassh_server_hash")
|
||||
if _hassh and log_data.get("service") == "prober":
|
||||
await repo.add_bounty({
|
||||
"decky": log_data.get("decky"),
|
||||
"service": "prober",
|
||||
"attacker_ip": _fields.get("target_ip", "Unknown"),
|
||||
"bounty_type": "fingerprint",
|
||||
"payload": {
|
||||
"fingerprint_type": "hassh_server",
|
||||
"hash": _hassh,
|
||||
"target_ip": _fields.get("target_ip"),
|
||||
"target_port": _fields.get("target_port"),
|
||||
"ssh_banner": _fields.get("ssh_banner"),
|
||||
"kex_algorithms": _fields.get("kex_algorithms"),
|
||||
"encryption_s2c": _fields.get("encryption_s2c"),
|
||||
"mac_s2c": _fields.get("mac_s2c"),
|
||||
"compression_s2c": _fields.get("compression_s2c"),
|
||||
},
|
||||
})
|
||||
|
||||
# 11. TCP/IP stack fingerprint from active prober
|
||||
_tcpfp = _fields.get("tcpfp_hash")
|
||||
if _tcpfp and log_data.get("service") == "prober":
|
||||
await repo.add_bounty({
|
||||
"decky": log_data.get("decky"),
|
||||
"service": "prober",
|
||||
"attacker_ip": _fields.get("target_ip", "Unknown"),
|
||||
"bounty_type": "fingerprint",
|
||||
"payload": {
|
||||
"fingerprint_type": "tcpfp",
|
||||
"hash": _tcpfp,
|
||||
"raw": _fields.get("tcpfp_raw"),
|
||||
"target_ip": _fields.get("target_ip"),
|
||||
"target_port": _fields.get("target_port"),
|
||||
"ttl": _fields.get("ttl"),
|
||||
"window_size": _fields.get("window_size"),
|
||||
"df_bit": _fields.get("df_bit"),
|
||||
"mss": _fields.get("mss"),
|
||||
"window_scale": _fields.get("window_scale"),
|
||||
"sack_ok": _fields.get("sack_ok"),
|
||||
"timestamp": _fields.get("timestamp"),
|
||||
"options_order": _fields.get("options_order"),
|
||||
},
|
||||
})
|
||||
56
decnet/web/router/__init__.py
Normal file
56
decnet/web/router/__init__.py
Normal file
@@ -0,0 +1,56 @@
|
||||
from fastapi import APIRouter
|
||||
|
||||
from .auth.api_login import router as login_router
|
||||
from .auth.api_change_pass import router as change_pass_router
|
||||
from .logs.api_get_logs import router as logs_router
|
||||
from .logs.api_get_histogram import router as histogram_router
|
||||
from .bounty.api_get_bounties import router as bounty_router
|
||||
from .stats.api_get_stats import router as stats_router
|
||||
from .fleet.api_get_deckies import router as get_deckies_router
|
||||
from .fleet.api_mutate_decky import router as mutate_decky_router
|
||||
from .fleet.api_mutate_interval import router as mutate_interval_router
|
||||
from .fleet.api_deploy_deckies import router as deploy_deckies_router
|
||||
from .stream.api_stream_events import router as stream_router
|
||||
from .attackers.api_get_attackers import router as attackers_router
|
||||
from .attackers.api_get_attacker_detail import router as attacker_detail_router
|
||||
from .attackers.api_get_attacker_commands import router as attacker_commands_router
|
||||
from .config.api_get_config import router as config_get_router
|
||||
from .config.api_update_config import router as config_update_router
|
||||
from .config.api_manage_users import router as config_users_router
|
||||
from .config.api_reinit import router as config_reinit_router
|
||||
from .health.api_get_health import router as health_router
|
||||
|
||||
api_router = APIRouter()
|
||||
|
||||
# Authentication
|
||||
api_router.include_router(login_router)
|
||||
api_router.include_router(change_pass_router)
|
||||
|
||||
# Logs & Analytics
|
||||
api_router.include_router(logs_router)
|
||||
api_router.include_router(histogram_router)
|
||||
|
||||
# Bounty Vault
|
||||
api_router.include_router(bounty_router)
|
||||
|
||||
# Fleet Management
|
||||
api_router.include_router(get_deckies_router)
|
||||
api_router.include_router(mutate_decky_router)
|
||||
api_router.include_router(mutate_interval_router)
|
||||
api_router.include_router(deploy_deckies_router)
|
||||
|
||||
# Attacker Profiles
|
||||
api_router.include_router(attackers_router)
|
||||
api_router.include_router(attacker_detail_router)
|
||||
api_router.include_router(attacker_commands_router)
|
||||
|
||||
# Observability
|
||||
api_router.include_router(stats_router)
|
||||
api_router.include_router(stream_router)
|
||||
api_router.include_router(health_router)
|
||||
|
||||
# Configuration
|
||||
api_router.include_router(config_get_router)
|
||||
api_router.include_router(config_update_router)
|
||||
api_router.include_router(config_users_router)
|
||||
api_router.include_router(config_reinit_router)
|
||||
0
decnet/web/router/attackers/__init__.py
Normal file
0
decnet/web/router/attackers/__init__.py
Normal file
41
decnet/web/router/attackers/api_get_attacker_commands.py
Normal file
41
decnet/web/router/attackers/api_get_attacker_commands.py
Normal file
@@ -0,0 +1,41 @@
|
||||
from typing import Any, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query
|
||||
|
||||
from decnet.telemetry import traced as _traced
|
||||
from decnet.web.dependencies import require_viewer, repo
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get(
|
||||
"/attackers/{uuid}/commands",
|
||||
tags=["Attacker Profiles"],
|
||||
responses={
|
||||
401: {"description": "Could not validate credentials"},
|
||||
403: {"description": "Insufficient permissions"},
|
||||
404: {"description": "Attacker not found"},
|
||||
},
|
||||
)
|
||||
@_traced("api.get_attacker_commands")
|
||||
async def get_attacker_commands(
|
||||
uuid: str,
|
||||
limit: int = Query(50, ge=1, le=200),
|
||||
offset: int = Query(0, ge=0, le=2147483647),
|
||||
service: Optional[str] = None,
|
||||
user: dict = Depends(require_viewer),
|
||||
) -> dict[str, Any]:
|
||||
"""Retrieve paginated commands for an attacker profile."""
|
||||
attacker = await repo.get_attacker_by_uuid(uuid)
|
||||
if not attacker:
|
||||
raise HTTPException(status_code=404, detail="Attacker not found")
|
||||
|
||||
def _norm(v: Optional[str]) -> Optional[str]:
|
||||
if v in (None, "null", "NULL", "undefined", ""):
|
||||
return None
|
||||
return v
|
||||
|
||||
result = await repo.get_attacker_commands(
|
||||
uuid=uuid, limit=limit, offset=offset, service=_norm(service),
|
||||
)
|
||||
return {"total": result["total"], "limit": limit, "offset": offset, "data": result["data"]}
|
||||
30
decnet/web/router/attackers/api_get_attacker_detail.py
Normal file
30
decnet/web/router/attackers/api_get_attacker_detail.py
Normal file
@@ -0,0 +1,30 @@
|
||||
from typing import Any
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
|
||||
from decnet.telemetry import traced as _traced
|
||||
from decnet.web.dependencies import require_viewer, repo
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get(
|
||||
"/attackers/{uuid}",
|
||||
tags=["Attacker Profiles"],
|
||||
responses={
|
||||
401: {"description": "Could not validate credentials"},
|
||||
403: {"description": "Insufficient permissions"},
|
||||
404: {"description": "Attacker not found"},
|
||||
},
|
||||
)
|
||||
@_traced("api.get_attacker_detail")
|
||||
async def get_attacker_detail(
|
||||
uuid: str,
|
||||
user: dict = Depends(require_viewer),
|
||||
) -> dict[str, Any]:
|
||||
"""Retrieve a single attacker profile by UUID (with behavior block)."""
|
||||
attacker = await repo.get_attacker_by_uuid(uuid)
|
||||
if not attacker:
|
||||
raise HTTPException(status_code=404, detail="Attacker not found")
|
||||
attacker["behavior"] = await repo.get_attacker_behavior(uuid)
|
||||
return attacker
|
||||
48
decnet/web/router/attackers/api_get_attackers.py
Normal file
48
decnet/web/router/attackers/api_get_attackers.py
Normal file
@@ -0,0 +1,48 @@
|
||||
from typing import Any, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, Query
|
||||
|
||||
from decnet.telemetry import traced as _traced
|
||||
from decnet.web.dependencies import require_viewer, repo
|
||||
from decnet.web.db.models import AttackersResponse
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get(
|
||||
"/attackers",
|
||||
response_model=AttackersResponse,
|
||||
tags=["Attacker Profiles"],
|
||||
responses={
|
||||
401: {"description": "Could not validate credentials"},
|
||||
403: {"description": "Insufficient permissions"},
|
||||
422: {"description": "Validation error"},
|
||||
},
|
||||
)
|
||||
@_traced("api.get_attackers")
|
||||
async def get_attackers(
|
||||
limit: int = Query(50, ge=1, le=1000),
|
||||
offset: int = Query(0, ge=0, le=2147483647),
|
||||
search: Optional[str] = None,
|
||||
sort_by: str = Query("recent", pattern="^(recent|active|traversals)$"),
|
||||
service: Optional[str] = None,
|
||||
user: dict = Depends(require_viewer),
|
||||
) -> dict[str, Any]:
|
||||
"""Retrieve paginated attacker profiles."""
|
||||
def _norm(v: Optional[str]) -> Optional[str]:
|
||||
if v in (None, "null", "NULL", "undefined", ""):
|
||||
return None
|
||||
return v
|
||||
|
||||
s = _norm(search)
|
||||
svc = _norm(service)
|
||||
_data = await repo.get_attackers(limit=limit, offset=offset, search=s, sort_by=sort_by, service=svc)
|
||||
_total = await repo.get_total_attackers(search=s, service=svc)
|
||||
|
||||
# Bulk-join behavior rows for the IPs in this page to avoid N+1 queries.
|
||||
_ips = {row["ip"] for row in _data if row.get("ip")}
|
||||
_behaviors = await repo.get_behaviors_for_ips(_ips) if _ips else {}
|
||||
for row in _data:
|
||||
row["behavior"] = _behaviors.get(row.get("ip"))
|
||||
|
||||
return {"total": _total, "limit": limit, "offset": offset, "data": _data}
|
||||
33
decnet/web/router/auth/api_change_pass.py
Normal file
33
decnet/web/router/auth/api_change_pass.py
Normal file
@@ -0,0 +1,33 @@
|
||||
from typing import Any, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, status
|
||||
|
||||
from decnet.telemetry import traced as _traced
|
||||
from decnet.web.auth import get_password_hash, verify_password
|
||||
from decnet.web.dependencies import get_current_user_unchecked, repo
|
||||
from decnet.web.db.models import ChangePasswordRequest
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post(
|
||||
"/auth/change-password",
|
||||
tags=["Authentication"],
|
||||
responses={
|
||||
400: {"description": "Bad Request (e.g. malformed JSON)"},
|
||||
401: {"description": "Could not validate credentials"},
|
||||
422: {"description": "Validation error"}
|
||||
},
|
||||
)
|
||||
@_traced("api.change_password")
|
||||
async def change_password(request: ChangePasswordRequest, current_user: str = Depends(get_current_user_unchecked)) -> dict[str, str]:
|
||||
_user: Optional[dict[str, Any]] = await repo.get_user_by_uuid(current_user)
|
||||
if not _user or not verify_password(request.old_password, _user["password_hash"]):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Incorrect old password",
|
||||
)
|
||||
|
||||
_new_hash: str = get_password_hash(request.new_password)
|
||||
await repo.update_user_password(current_user, _new_hash, must_change_password=False)
|
||||
return {"message": "Password updated successfully"}
|
||||
47
decnet/web/router/auth/api_login.py
Normal file
47
decnet/web/router/auth/api_login.py
Normal file
@@ -0,0 +1,47 @@
|
||||
from datetime import timedelta
|
||||
from typing import Any, Optional
|
||||
|
||||
from fastapi import APIRouter, HTTPException, status
|
||||
|
||||
from decnet.telemetry import traced as _traced
|
||||
from decnet.web.auth import (
|
||||
ACCESS_TOKEN_EXPIRE_MINUTES,
|
||||
create_access_token,
|
||||
verify_password,
|
||||
)
|
||||
from decnet.web.dependencies import repo
|
||||
from decnet.web.db.models import LoginRequest, Token
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post(
|
||||
"/auth/login",
|
||||
response_model=Token,
|
||||
tags=["Authentication"],
|
||||
responses={
|
||||
400: {"description": "Bad Request (e.g. malformed JSON)"},
|
||||
401: {"description": "Incorrect username or password"},
|
||||
422: {"description": "Validation error"}
|
||||
},
|
||||
)
|
||||
@_traced("api.login")
|
||||
async def login(request: LoginRequest) -> dict[str, Any]:
|
||||
_user: Optional[dict[str, Any]] = await repo.get_user_by_username(request.username)
|
||||
if not _user or not verify_password(request.password, _user["password_hash"]):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Incorrect username or password",
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
)
|
||||
|
||||
_access_token_expires: timedelta = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
|
||||
# Token uses uuid instead of sub
|
||||
_access_token: str = create_access_token(
|
||||
data={"uuid": _user["uuid"]}, expires_delta=_access_token_expires
|
||||
)
|
||||
return {
|
||||
"access_token": _access_token,
|
||||
"token_type": "bearer", # nosec B105 — OAuth2 token type, not a password
|
||||
"must_change_password": bool(_user.get("must_change_password", False))
|
||||
}
|
||||
38
decnet/web/router/bounty/api_get_bounties.py
Normal file
38
decnet/web/router/bounty/api_get_bounties.py
Normal file
@@ -0,0 +1,38 @@
|
||||
from typing import Any, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, Query
|
||||
|
||||
from decnet.telemetry import traced as _traced
|
||||
from decnet.web.dependencies import require_viewer, repo
|
||||
from decnet.web.db.models import BountyResponse
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/bounty", response_model=BountyResponse, tags=["Bounty Vault"],
|
||||
responses={401: {"description": "Could not validate credentials"}, 403: {"description": "Insufficient permissions"}, 422: {"description": "Validation error"}},)
|
||||
@_traced("api.get_bounties")
|
||||
async def get_bounties(
|
||||
limit: int = Query(50, ge=1, le=1000),
|
||||
offset: int = Query(0, ge=0, le=2147483647),
|
||||
bounty_type: Optional[str] = None,
|
||||
search: Optional[str] = None,
|
||||
user: dict = Depends(require_viewer)
|
||||
) -> dict[str, Any]:
|
||||
"""Retrieve collected bounties (harvested credentials, payloads, etc.)."""
|
||||
def _norm(v: Optional[str]) -> Optional[str]:
|
||||
if v in (None, "null", "NULL", "undefined", ""):
|
||||
return None
|
||||
return v
|
||||
|
||||
bt = _norm(bounty_type)
|
||||
s = _norm(search)
|
||||
|
||||
_data = await repo.get_bounties(limit=limit, offset=offset, bounty_type=bt, search=s)
|
||||
_total = await repo.get_total_bounties(bounty_type=bt, search=s)
|
||||
return {
|
||||
"total": _total,
|
||||
"limit": limit,
|
||||
"offset": offset,
|
||||
"data": _data
|
||||
}
|
||||
0
decnet/web/router/config/__init__.py
Normal file
0
decnet/web/router/config/__init__.py
Normal file
58
decnet/web/router/config/api_get_config.py
Normal file
58
decnet/web/router/config/api_get_config.py
Normal file
@@ -0,0 +1,58 @@
|
||||
from fastapi import APIRouter, Depends
|
||||
|
||||
from decnet.env import DECNET_DEVELOPER
|
||||
from decnet.telemetry import traced as _traced
|
||||
from decnet.web.dependencies import require_viewer, repo
|
||||
from decnet.web.db.models import UserResponse
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
_DEFAULT_DEPLOYMENT_LIMIT = 10
|
||||
_DEFAULT_MUTATION_INTERVAL = "30m"
|
||||
|
||||
|
||||
@router.get(
|
||||
"/config",
|
||||
tags=["Configuration"],
|
||||
responses={
|
||||
401: {"description": "Could not validate credentials"},
|
||||
403: {"description": "Insufficient permissions"},
|
||||
},
|
||||
)
|
||||
@_traced("api.get_config")
|
||||
async def api_get_config(user: dict = Depends(require_viewer)) -> dict:
|
||||
limits_state = await repo.get_state("config_limits")
|
||||
globals_state = await repo.get_state("config_globals")
|
||||
|
||||
deployment_limit = (
|
||||
limits_state.get("deployment_limit", _DEFAULT_DEPLOYMENT_LIMIT)
|
||||
if limits_state
|
||||
else _DEFAULT_DEPLOYMENT_LIMIT
|
||||
)
|
||||
global_mutation_interval = (
|
||||
globals_state.get("global_mutation_interval", _DEFAULT_MUTATION_INTERVAL)
|
||||
if globals_state
|
||||
else _DEFAULT_MUTATION_INTERVAL
|
||||
)
|
||||
|
||||
base = {
|
||||
"role": user["role"],
|
||||
"deployment_limit": deployment_limit,
|
||||
"global_mutation_interval": global_mutation_interval,
|
||||
}
|
||||
|
||||
if user["role"] == "admin":
|
||||
all_users = await repo.list_users()
|
||||
base["users"] = [
|
||||
UserResponse(
|
||||
uuid=u["uuid"],
|
||||
username=u["username"],
|
||||
role=u["role"],
|
||||
must_change_password=u["must_change_password"],
|
||||
).model_dump()
|
||||
for u in all_users
|
||||
]
|
||||
if DECNET_DEVELOPER:
|
||||
base["developer_mode"] = True
|
||||
|
||||
return base
|
||||
131
decnet/web/router/config/api_manage_users.py
Normal file
131
decnet/web/router/config/api_manage_users.py
Normal file
@@ -0,0 +1,131 @@
|
||||
import uuid as _uuid
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
|
||||
from decnet.telemetry import traced as _traced
|
||||
from decnet.web.auth import get_password_hash
|
||||
from decnet.web.dependencies import require_admin, repo
|
||||
from decnet.web.db.models import (
|
||||
CreateUserRequest,
|
||||
UpdateUserRoleRequest,
|
||||
ResetUserPasswordRequest,
|
||||
UserResponse,
|
||||
)
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post(
|
||||
"/config/users",
|
||||
tags=["Configuration"],
|
||||
responses={
|
||||
400: {"description": "Bad Request (e.g. malformed JSON)"},
|
||||
401: {"description": "Could not validate credentials"},
|
||||
403: {"description": "Admin access required"},
|
||||
409: {"description": "Username already exists"},
|
||||
422: {"description": "Validation error"},
|
||||
},
|
||||
)
|
||||
@_traced("api.create_user")
|
||||
async def api_create_user(
|
||||
req: CreateUserRequest,
|
||||
admin: dict = Depends(require_admin),
|
||||
) -> UserResponse:
|
||||
existing = await repo.get_user_by_username(req.username)
|
||||
if existing:
|
||||
raise HTTPException(status_code=409, detail="Username already exists")
|
||||
|
||||
user_uuid = str(_uuid.uuid4())
|
||||
await repo.create_user({
|
||||
"uuid": user_uuid,
|
||||
"username": req.username,
|
||||
"password_hash": get_password_hash(req.password),
|
||||
"role": req.role,
|
||||
"must_change_password": True, # nosec B105 — not a password
|
||||
})
|
||||
return UserResponse(
|
||||
uuid=user_uuid,
|
||||
username=req.username,
|
||||
role=req.role,
|
||||
must_change_password=True,
|
||||
)
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/config/users/{user_uuid}",
|
||||
tags=["Configuration"],
|
||||
responses={
|
||||
401: {"description": "Could not validate credentials"},
|
||||
403: {"description": "Admin access required / cannot delete self"},
|
||||
404: {"description": "User not found"},
|
||||
},
|
||||
)
|
||||
@_traced("api.delete_user")
|
||||
async def api_delete_user(
|
||||
user_uuid: str,
|
||||
admin: dict = Depends(require_admin),
|
||||
) -> dict[str, str]:
|
||||
if user_uuid == admin["uuid"]:
|
||||
raise HTTPException(status_code=403, detail="Cannot delete your own account")
|
||||
|
||||
deleted = await repo.delete_user(user_uuid)
|
||||
if not deleted:
|
||||
raise HTTPException(status_code=404, detail="User not found")
|
||||
return {"message": "User deleted"}
|
||||
|
||||
|
||||
@router.put(
|
||||
"/config/users/{user_uuid}/role",
|
||||
tags=["Configuration"],
|
||||
responses={
|
||||
400: {"description": "Bad Request (e.g. malformed JSON)"},
|
||||
401: {"description": "Could not validate credentials"},
|
||||
403: {"description": "Admin access required / cannot change own role"},
|
||||
404: {"description": "User not found"},
|
||||
422: {"description": "Validation error"},
|
||||
},
|
||||
)
|
||||
@_traced("api.update_user_role")
|
||||
async def api_update_user_role(
|
||||
user_uuid: str,
|
||||
req: UpdateUserRoleRequest,
|
||||
admin: dict = Depends(require_admin),
|
||||
) -> dict[str, str]:
|
||||
if user_uuid == admin["uuid"]:
|
||||
raise HTTPException(status_code=403, detail="Cannot change your own role")
|
||||
|
||||
target = await repo.get_user_by_uuid(user_uuid)
|
||||
if not target:
|
||||
raise HTTPException(status_code=404, detail="User not found")
|
||||
|
||||
await repo.update_user_role(user_uuid, req.role)
|
||||
return {"message": "User role updated"}
|
||||
|
||||
|
||||
@router.put(
|
||||
"/config/users/{user_uuid}/reset-password",
|
||||
tags=["Configuration"],
|
||||
responses={
|
||||
400: {"description": "Bad Request (e.g. malformed JSON)"},
|
||||
401: {"description": "Could not validate credentials"},
|
||||
403: {"description": "Admin access required"},
|
||||
404: {"description": "User not found"},
|
||||
422: {"description": "Validation error"},
|
||||
},
|
||||
)
|
||||
@_traced("api.reset_user_password")
|
||||
async def api_reset_user_password(
|
||||
user_uuid: str,
|
||||
req: ResetUserPasswordRequest,
|
||||
admin: dict = Depends(require_admin),
|
||||
) -> dict[str, str]:
|
||||
target = await repo.get_user_by_uuid(user_uuid)
|
||||
if not target:
|
||||
raise HTTPException(status_code=404, detail="User not found")
|
||||
|
||||
await repo.update_user_password(
|
||||
user_uuid,
|
||||
get_password_hash(req.new_password),
|
||||
must_change_password=True,
|
||||
)
|
||||
return {"message": "Password reset successfully"}
|
||||
27
decnet/web/router/config/api_reinit.py
Normal file
27
decnet/web/router/config/api_reinit.py
Normal file
@@ -0,0 +1,27 @@
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
|
||||
from decnet.env import DECNET_DEVELOPER
|
||||
from decnet.telemetry import traced as _traced
|
||||
from decnet.web.dependencies import require_admin, repo
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/config/reinit",
|
||||
tags=["Configuration"],
|
||||
responses={
|
||||
401: {"description": "Could not validate credentials"},
|
||||
403: {"description": "Admin access required or developer mode not enabled"},
|
||||
},
|
||||
)
|
||||
@_traced("api.reinit")
|
||||
async def api_reinit(admin: dict = Depends(require_admin)) -> dict:
|
||||
if not DECNET_DEVELOPER:
|
||||
raise HTTPException(status_code=403, detail="Developer mode is not enabled")
|
||||
|
||||
counts = await repo.purge_logs_and_bounties()
|
||||
return {
|
||||
"message": "Data purged",
|
||||
"deleted": counts,
|
||||
}
|
||||
48
decnet/web/router/config/api_update_config.py
Normal file
48
decnet/web/router/config/api_update_config.py
Normal file
@@ -0,0 +1,48 @@
|
||||
from fastapi import APIRouter, Depends
|
||||
|
||||
from decnet.telemetry import traced as _traced
|
||||
from decnet.web.dependencies import require_admin, repo
|
||||
from decnet.web.db.models import DeploymentLimitRequest, GlobalMutationIntervalRequest
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.put(
|
||||
"/config/deployment-limit",
|
||||
tags=["Configuration"],
|
||||
responses={
|
||||
400: {"description": "Bad Request (e.g. malformed JSON)"},
|
||||
401: {"description": "Could not validate credentials"},
|
||||
403: {"description": "Admin access required"},
|
||||
422: {"description": "Validation error"},
|
||||
},
|
||||
)
|
||||
@_traced("api.update_deployment_limit")
|
||||
async def api_update_deployment_limit(
|
||||
req: DeploymentLimitRequest,
|
||||
admin: dict = Depends(require_admin),
|
||||
) -> dict[str, str]:
|
||||
await repo.set_state("config_limits", {"deployment_limit": req.deployment_limit})
|
||||
return {"message": "Deployment limit updated"}
|
||||
|
||||
|
||||
@router.put(
|
||||
"/config/global-mutation-interval",
|
||||
tags=["Configuration"],
|
||||
responses={
|
||||
400: {"description": "Bad Request (e.g. malformed JSON)"},
|
||||
401: {"description": "Could not validate credentials"},
|
||||
403: {"description": "Admin access required"},
|
||||
422: {"description": "Validation error"},
|
||||
},
|
||||
)
|
||||
@_traced("api.update_global_mutation_interval")
|
||||
async def api_update_global_mutation_interval(
|
||||
req: GlobalMutationIntervalRequest,
|
||||
admin: dict = Depends(require_admin),
|
||||
) -> dict[str, str]:
|
||||
await repo.set_state(
|
||||
"config_globals",
|
||||
{"global_mutation_interval": req.global_mutation_interval},
|
||||
)
|
||||
return {"message": "Global mutation interval updated"}
|
||||
121
decnet/web/router/fleet/api_deploy_deckies.py
Normal file
121
decnet/web/router/fleet/api_deploy_deckies.py
Normal file
@@ -0,0 +1,121 @@
|
||||
import os
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
|
||||
from decnet.logging import get_logger
|
||||
from decnet.telemetry import traced as _traced
|
||||
from decnet.config import DEFAULT_MUTATE_INTERVAL, DecnetConfig, _ROOT
|
||||
from decnet.engine import deploy as _deploy
|
||||
from decnet.ini_loader import load_ini_from_string
|
||||
from decnet.network import detect_interface, detect_subnet, get_host_ip
|
||||
from decnet.web.dependencies import require_admin, repo
|
||||
from decnet.web.db.models import DeployIniRequest
|
||||
|
||||
log = get_logger("api")
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post(
|
||||
"/deckies/deploy",
|
||||
tags=["Fleet Management"],
|
||||
responses={
|
||||
400: {"description": "Bad Request (e.g. malformed JSON)"},
|
||||
401: {"description": "Could not validate credentials"},
|
||||
403: {"description": "Insufficient permissions"},
|
||||
409: {"description": "Configuration conflict (e.g. invalid IP allocation or network mismatch)"},
|
||||
422: {"description": "Invalid INI config or schema validation error"},
|
||||
500: {"description": "Deployment failed"}
|
||||
}
|
||||
)
|
||||
@_traced("api.deploy_deckies")
|
||||
async def api_deploy_deckies(req: DeployIniRequest, admin: dict = Depends(require_admin)) -> dict[str, str]:
|
||||
from decnet.fleet import build_deckies_from_ini
|
||||
|
||||
try:
|
||||
ini = load_ini_from_string(req.ini_content)
|
||||
except ValueError as e:
|
||||
log.debug("deploy: invalid INI structure: %s", e)
|
||||
raise HTTPException(status_code=409, detail=str(e))
|
||||
|
||||
log.debug("deploy: processing configuration for %d deckies", len(ini.deckies))
|
||||
|
||||
state_dict = await repo.get_state("deployment")
|
||||
ingest_log_file = os.environ.get("DECNET_INGEST_LOG_FILE")
|
||||
|
||||
if state_dict:
|
||||
config = DecnetConfig(**state_dict["config"])
|
||||
subnet_cidr = ini.subnet or config.subnet
|
||||
gateway = ini.gateway or config.gateway
|
||||
host_ip = get_host_ip(config.interface)
|
||||
# Always sync config log_file with current API ingestion target
|
||||
if ingest_log_file:
|
||||
config.log_file = ingest_log_file
|
||||
else:
|
||||
# If no state exists, we need to infer network details from the INI or the host.
|
||||
try:
|
||||
iface = ini.interface or detect_interface()
|
||||
subnet_cidr, gateway = ini.subnet, ini.gateway
|
||||
if not subnet_cidr or not gateway:
|
||||
detected_subnet, detected_gateway = detect_subnet(iface)
|
||||
subnet_cidr = subnet_cidr or detected_subnet
|
||||
gateway = gateway or detected_gateway
|
||||
host_ip = get_host_ip(iface)
|
||||
except RuntimeError as e:
|
||||
raise HTTPException(
|
||||
status_code=409,
|
||||
detail=f"Network configuration conflict: {e}. "
|
||||
"Add a [general] section with interface=, net=, and gw= to the INI."
|
||||
)
|
||||
config = DecnetConfig(
|
||||
mode="unihost",
|
||||
interface=iface,
|
||||
subnet=subnet_cidr,
|
||||
gateway=gateway,
|
||||
deckies=[],
|
||||
log_file=ingest_log_file,
|
||||
ipvlan=False,
|
||||
mutate_interval=ini.mutate_interval or DEFAULT_MUTATE_INTERVAL
|
||||
)
|
||||
|
||||
try:
|
||||
new_decky_configs = build_deckies_from_ini(
|
||||
ini, subnet_cidr, gateway, host_ip, False, cli_mutate_interval=None
|
||||
)
|
||||
except ValueError as e:
|
||||
log.debug("deploy: build_deckies_from_ini rejected input: %s", e)
|
||||
raise HTTPException(status_code=409, detail=str(e))
|
||||
|
||||
# Merge deckies
|
||||
existing_deckies_map = {d.name: d for d in config.deckies}
|
||||
for new_decky in new_decky_configs:
|
||||
existing_deckies_map[new_decky.name] = new_decky
|
||||
|
||||
# Enforce deployment limit
|
||||
limits_state = await repo.get_state("config_limits")
|
||||
deployment_limit = limits_state.get("deployment_limit", 10) if limits_state else 10
|
||||
if len(existing_deckies_map) > deployment_limit:
|
||||
raise HTTPException(
|
||||
status_code=409,
|
||||
detail=f"Deployment would result in {len(existing_deckies_map)} deckies, "
|
||||
f"exceeding the configured limit of {deployment_limit}",
|
||||
)
|
||||
|
||||
config.deckies = list(existing_deckies_map.values())
|
||||
|
||||
# We call deploy(config) which regenerates docker-compose and runs `up -d --remove-orphans`.
|
||||
try:
|
||||
if os.environ.get("DECNET_CONTRACT_TEST") != "true":
|
||||
_deploy(config)
|
||||
|
||||
# Persist new state to DB
|
||||
new_state_payload = {
|
||||
"config": config.model_dump(),
|
||||
"compose_path": str(_ROOT / "docker-compose.yml") if not state_dict else state_dict["compose_path"]
|
||||
}
|
||||
await repo.set_state("deployment", new_state_payload)
|
||||
except Exception as e:
|
||||
log.exception("Deployment failed: %s", e)
|
||||
raise HTTPException(status_code=500, detail="Deployment failed. Check server logs for details.")
|
||||
|
||||
return {"message": "Deckies deployed successfully"}
|
||||
15
decnet/web/router/fleet/api_get_deckies.py
Normal file
15
decnet/web/router/fleet/api_get_deckies.py
Normal file
@@ -0,0 +1,15 @@
|
||||
from typing import Any
|
||||
|
||||
from fastapi import APIRouter, Depends
|
||||
|
||||
from decnet.telemetry import traced as _traced
|
||||
from decnet.web.dependencies import require_viewer, repo
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/deckies", tags=["Fleet Management"],
|
||||
responses={401: {"description": "Could not validate credentials"}, 403: {"description": "Insufficient permissions"}, 422: {"description": "Validation error"}},)
|
||||
@_traced("api.get_deckies")
|
||||
async def get_deckies(user: dict = Depends(require_viewer)) -> list[dict[str, Any]]:
|
||||
return await repo.get_deckies()
|
||||
27
decnet/web/router/fleet/api_mutate_decky.py
Normal file
27
decnet/web/router/fleet/api_mutate_decky.py
Normal file
@@ -0,0 +1,27 @@
|
||||
import os
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path
|
||||
|
||||
from decnet.telemetry import traced as _traced
|
||||
from decnet.mutator import mutate_decky
|
||||
from decnet.web.dependencies import require_admin, repo
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post(
|
||||
"/deckies/{decky_name}/mutate",
|
||||
tags=["Fleet Management"],
|
||||
responses={401: {"description": "Could not validate credentials"}, 403: {"description": "Insufficient permissions"}, 404: {"description": "Decky not found"}}
|
||||
)
|
||||
@_traced("api.mutate_decky")
|
||||
async def api_mutate_decky(
|
||||
decky_name: str = Path(..., pattern=r"^[a-z0-9\-]{1,64}$"),
|
||||
admin: dict = Depends(require_admin),
|
||||
) -> dict[str, str]:
|
||||
if os.environ.get("DECNET_CONTRACT_TEST") == "true":
|
||||
return {"message": f"Successfully mutated {decky_name} (Contract Test Mock)"}
|
||||
|
||||
success = await mutate_decky(decky_name, repo=repo)
|
||||
if success:
|
||||
return {"message": f"Successfully mutated {decky_name}"}
|
||||
raise HTTPException(status_code=404, detail=f"Decky {decky_name} not found or failed to mutate")
|
||||
44
decnet/web/router/fleet/api_mutate_interval.py
Normal file
44
decnet/web/router/fleet/api_mutate_interval.py
Normal file
@@ -0,0 +1,44 @@
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
|
||||
from decnet.telemetry import traced as _traced
|
||||
from decnet.config import DecnetConfig
|
||||
from decnet.web.dependencies import require_admin, repo
|
||||
from decnet.web.db.models import MutateIntervalRequest
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
_UNIT_TO_MINUTES = {"m": 1, "d": 1440, "M": 43200, "y": 525600, "Y": 525600}
|
||||
|
||||
|
||||
def _parse_duration(s: str) -> int:
|
||||
"""Convert a duration string (e.g. '5d') to minutes."""
|
||||
value, unit = int(s[:-1]), s[-1]
|
||||
return value * _UNIT_TO_MINUTES[unit]
|
||||
|
||||
|
||||
@router.put("/deckies/{decky_name}/mutate-interval", tags=["Fleet Management"],
|
||||
responses={
|
||||
400: {"description": "Bad Request (e.g. malformed JSON)"},
|
||||
401: {"description": "Could not validate credentials"},
|
||||
403: {"description": "Insufficient permissions"},
|
||||
404: {"description": "No active deployment or decky not found"},
|
||||
422: {"description": "Validation error"}
|
||||
},
|
||||
)
|
||||
@_traced("api.update_mutate_interval")
|
||||
async def api_update_mutate_interval(decky_name: str, req: MutateIntervalRequest, admin: dict = Depends(require_admin)) -> dict[str, str]:
|
||||
state_dict = await repo.get_state("deployment")
|
||||
if not state_dict:
|
||||
raise HTTPException(status_code=404, detail="No active deployment")
|
||||
|
||||
config = DecnetConfig(**state_dict["config"])
|
||||
compose_path = state_dict["compose_path"]
|
||||
|
||||
decky = next((d for d in config.deckies if d.name == decky_name), None)
|
||||
if not decky:
|
||||
raise HTTPException(status_code=404, detail="Decky not found")
|
||||
|
||||
decky.mutate_interval = _parse_duration(req.mutate_interval) if req.mutate_interval else None
|
||||
|
||||
await repo.set_state("deployment", {"config": config.model_dump(), "compose_path": compose_path})
|
||||
return {"message": "Mutation interval updated"}
|
||||
0
decnet/web/router/health/__init__.py
Normal file
0
decnet/web/router/health/__init__.py
Normal file
83
decnet/web/router/health/api_get_health.py
Normal file
83
decnet/web/router/health/api_get_health.py
Normal file
@@ -0,0 +1,83 @@
|
||||
from typing import Any
|
||||
|
||||
from fastapi import APIRouter, Depends
|
||||
from fastapi.responses import JSONResponse
|
||||
|
||||
from decnet.telemetry import traced as _traced
|
||||
from decnet.web.dependencies import require_viewer, repo
|
||||
from decnet.web.db.models import HealthResponse, ComponentHealth
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
_OPTIONAL_SERVICES = {"sniffer_worker"}
|
||||
|
||||
|
||||
@router.get(
|
||||
"/health",
|
||||
response_model=HealthResponse,
|
||||
tags=["Observability"],
|
||||
responses={
|
||||
401: {"description": "Could not validate credentials"},
|
||||
403: {"description": "Insufficient permissions"},
|
||||
503: {"model": HealthResponse, "description": "System unhealthy"},
|
||||
},
|
||||
)
|
||||
@_traced("api.get_health")
|
||||
async def get_health(user: dict = Depends(require_viewer)) -> Any:
|
||||
components: dict[str, ComponentHealth] = {}
|
||||
|
||||
# 1. Database
|
||||
try:
|
||||
await repo.get_total_logs()
|
||||
components["database"] = ComponentHealth(status="ok")
|
||||
except Exception as exc:
|
||||
components["database"] = ComponentHealth(status="failing", detail=str(exc))
|
||||
|
||||
# 2. Background workers
|
||||
from decnet.web.api import get_background_tasks
|
||||
for name, task in get_background_tasks().items():
|
||||
if task is None:
|
||||
components[name] = ComponentHealth(status="failing", detail="not started")
|
||||
elif task.done():
|
||||
if task.cancelled():
|
||||
detail = "cancelled"
|
||||
else:
|
||||
exc = task.exception()
|
||||
detail = f"exited: {exc}" if exc else "exited unexpectedly"
|
||||
components[name] = ComponentHealth(status="failing", detail=detail)
|
||||
else:
|
||||
components[name] = ComponentHealth(status="ok")
|
||||
|
||||
# 3. Docker daemon
|
||||
try:
|
||||
import docker
|
||||
|
||||
client = docker.from_env()
|
||||
client.ping()
|
||||
client.close()
|
||||
components["docker"] = ComponentHealth(status="ok")
|
||||
except Exception as exc:
|
||||
components["docker"] = ComponentHealth(status="failing", detail=str(exc))
|
||||
|
||||
# Compute overall status
|
||||
required_failing = any(
|
||||
c.status == "failing"
|
||||
for name, c in components.items()
|
||||
if name not in _OPTIONAL_SERVICES
|
||||
)
|
||||
optional_failing = any(
|
||||
c.status == "failing"
|
||||
for name, c in components.items()
|
||||
if name in _OPTIONAL_SERVICES
|
||||
)
|
||||
|
||||
if required_failing:
|
||||
overall = "unhealthy"
|
||||
elif optional_failing:
|
||||
overall = "degraded"
|
||||
else:
|
||||
overall = "healthy"
|
||||
|
||||
result = HealthResponse(status=overall, components=components)
|
||||
status_code = 503 if overall == "unhealthy" else 200
|
||||
return JSONResponse(content=result.model_dump(), status_code=status_code)
|
||||
30
decnet/web/router/logs/api_get_histogram.py
Normal file
30
decnet/web/router/logs/api_get_histogram.py
Normal file
@@ -0,0 +1,30 @@
|
||||
from typing import Any, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, Query
|
||||
|
||||
from decnet.telemetry import traced as _traced
|
||||
from decnet.web.dependencies import require_viewer, repo
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/logs/histogram", tags=["Logs"],
|
||||
responses={401: {"description": "Could not validate credentials"}, 403: {"description": "Insufficient permissions"}, 422: {"description": "Validation error"}},)
|
||||
@_traced("api.get_logs_histogram")
|
||||
async def get_logs_histogram(
|
||||
search: Optional[str] = None,
|
||||
start_time: Optional[str] = Query(None),
|
||||
end_time: Optional[str] = Query(None),
|
||||
interval_minutes: int = Query(15, ge=1),
|
||||
user: dict = Depends(require_viewer)
|
||||
) -> list[dict[str, Any]]:
|
||||
def _norm(v: Optional[str]) -> Optional[str]:
|
||||
if v in (None, "null", "NULL", "undefined", ""):
|
||||
return None
|
||||
return v
|
||||
|
||||
s = _norm(search)
|
||||
st = _norm(start_time)
|
||||
et = _norm(end_time)
|
||||
|
||||
return await repo.get_log_histogram(search=s, start_time=st, end_time=et, interval_minutes=interval_minutes)
|
||||
39
decnet/web/router/logs/api_get_logs.py
Normal file
39
decnet/web/router/logs/api_get_logs.py
Normal file
@@ -0,0 +1,39 @@
|
||||
from typing import Any, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, Query
|
||||
|
||||
from decnet.telemetry import traced as _traced
|
||||
from decnet.web.dependencies import require_viewer, repo
|
||||
from decnet.web.db.models import LogsResponse
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/logs", response_model=LogsResponse, tags=["Logs"],
|
||||
responses={401: {"description": "Could not validate credentials"}, 403: {"description": "Insufficient permissions"}, 422: {"description": "Validation error"}})
|
||||
@_traced("api.get_logs")
|
||||
async def get_logs(
|
||||
limit: int = Query(50, ge=1, le=1000),
|
||||
offset: int = Query(0, ge=0, le=2147483647),
|
||||
search: Optional[str] = Query(None, max_length=512),
|
||||
start_time: Optional[str] = Query(None),
|
||||
end_time: Optional[str] = Query(None),
|
||||
user: dict = Depends(require_viewer)
|
||||
) -> dict[str, Any]:
|
||||
def _norm(v: Optional[str]) -> Optional[str]:
|
||||
if v in (None, "null", "NULL", "undefined", ""):
|
||||
return None
|
||||
return v
|
||||
|
||||
s = _norm(search)
|
||||
st = _norm(start_time)
|
||||
et = _norm(end_time)
|
||||
|
||||
_logs: list[dict[str, Any]] = await repo.get_logs(limit=limit, offset=offset, search=s, start_time=st, end_time=et)
|
||||
_total: int = await repo.get_total_logs(search=s, start_time=st, end_time=et)
|
||||
return {
|
||||
"total": _total,
|
||||
"limit": limit,
|
||||
"offset": offset,
|
||||
"data": _logs
|
||||
}
|
||||
16
decnet/web/router/stats/api_get_stats.py
Normal file
16
decnet/web/router/stats/api_get_stats.py
Normal file
@@ -0,0 +1,16 @@
|
||||
from typing import Any
|
||||
|
||||
from fastapi import APIRouter, Depends
|
||||
|
||||
from decnet.telemetry import traced as _traced
|
||||
from decnet.web.dependencies import require_viewer, repo
|
||||
from decnet.web.db.models import StatsResponse
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/stats", response_model=StatsResponse, tags=["Observability"],
|
||||
responses={401: {"description": "Could not validate credentials"}, 403: {"description": "Insufficient permissions"}, 422: {"description": "Validation error"}},)
|
||||
@_traced("api.get_stats")
|
||||
async def get_stats(user: dict = Depends(require_viewer)) -> dict[str, Any]:
|
||||
return await repo.get_stats_summary()
|
||||
146
decnet/web/router/stream/api_stream_events.py
Normal file
146
decnet/web/router/stream/api_stream_events.py
Normal file
@@ -0,0 +1,146 @@
|
||||
import json
|
||||
import asyncio
|
||||
from typing import AsyncGenerator, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, Query, Request
|
||||
from fastapi.responses import StreamingResponse
|
||||
|
||||
from decnet.env import DECNET_DEVELOPER
|
||||
from decnet.logging import get_logger
|
||||
from decnet.telemetry import traced as _traced, get_tracer as _get_tracer
|
||||
from decnet.web.dependencies import require_stream_viewer, repo
|
||||
|
||||
log = get_logger("api")
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
def _build_trace_links(logs: list[dict]) -> list:
|
||||
"""Build OTEL span links from persisted trace_id/span_id in log rows.
|
||||
|
||||
Returns an empty list when tracing is disabled (no OTEL imports).
|
||||
"""
|
||||
try:
|
||||
from opentelemetry.trace import Link, SpanContext, TraceFlags
|
||||
except ImportError:
|
||||
return []
|
||||
links: list[Link] = []
|
||||
for entry in logs:
|
||||
tid = entry.get("trace_id")
|
||||
sid = entry.get("span_id")
|
||||
if not tid or not sid or tid == "0":
|
||||
continue
|
||||
try:
|
||||
ctx = SpanContext(
|
||||
trace_id=int(tid, 16),
|
||||
span_id=int(sid, 16),
|
||||
is_remote=True,
|
||||
trace_flags=TraceFlags(TraceFlags.SAMPLED),
|
||||
)
|
||||
links.append(Link(ctx))
|
||||
except (ValueError, TypeError):
|
||||
continue
|
||||
return links
|
||||
|
||||
|
||||
@router.get("/stream", tags=["Observability"],
|
||||
responses={
|
||||
200: {
|
||||
"content": {"text/event-stream": {}},
|
||||
"description": "Real-time Server-Sent Events (SSE) stream"
|
||||
},
|
||||
401: {"description": "Could not validate credentials"},
|
||||
403: {"description": "Insufficient permissions"},
|
||||
422: {"description": "Validation error"}
|
||||
},
|
||||
)
|
||||
@_traced("api.stream_events")
|
||||
async def stream_events(
|
||||
request: Request,
|
||||
last_event_id: int = Query(0, alias="lastEventId"),
|
||||
search: Optional[str] = None,
|
||||
start_time: Optional[str] = None,
|
||||
end_time: Optional[str] = None,
|
||||
max_output: Optional[int] = Query(None, alias="maxOutput"),
|
||||
user: dict = Depends(require_stream_viewer)
|
||||
) -> StreamingResponse:
|
||||
|
||||
# Prefetch the initial snapshot before entering the streaming generator.
|
||||
# With aiomysql (pure async TCP I/O), the first DB await inside the generator
|
||||
# fires immediately after the ASGI layer sends the keepalive chunk — the HTTP
|
||||
# write and the MySQL read compete for asyncio I/O callbacks and the MySQL
|
||||
# callback can stall. Running these here (normal async context, no streaming)
|
||||
# avoids that race entirely. aiosqlite is immune because it runs SQLite in a
|
||||
# thread, decoupled from the event loop's I/O scheduler.
|
||||
_start_id = last_event_id if last_event_id != 0 else await repo.get_max_log_id()
|
||||
_initial_stats = await repo.get_stats_summary()
|
||||
_initial_histogram = await repo.get_log_histogram(
|
||||
search=search, start_time=start_time, end_time=end_time, interval_minutes=15,
|
||||
)
|
||||
|
||||
async def event_generator() -> AsyncGenerator[str, None]:
|
||||
last_id = _start_id
|
||||
stats_interval_sec = 10
|
||||
loops_since_stats = 0
|
||||
emitted_chunks = 0
|
||||
try:
|
||||
yield ": keepalive\n\n" # flush headers immediately
|
||||
|
||||
# Emit pre-fetched initial snapshot — no DB calls in generator until the loop
|
||||
yield f"event: message\ndata: {json.dumps({'type': 'stats', 'data': _initial_stats})}\n\n"
|
||||
yield f"event: message\ndata: {json.dumps({'type': 'histogram', 'data': _initial_histogram})}\n\n"
|
||||
|
||||
while True:
|
||||
if DECNET_DEVELOPER and max_output is not None:
|
||||
emitted_chunks += 1
|
||||
if emitted_chunks > max_output:
|
||||
log.debug("Developer mode: max_output reached (%d), closing stream", max_output)
|
||||
break
|
||||
|
||||
if await request.is_disconnected():
|
||||
break
|
||||
|
||||
new_logs = await repo.get_logs_after_id(
|
||||
last_id, limit=50, search=search,
|
||||
start_time=start_time, end_time=end_time,
|
||||
)
|
||||
if new_logs:
|
||||
last_id = max(entry["id"] for entry in new_logs)
|
||||
# Create a span linking back to the ingestion traces
|
||||
# stored in each log row, closing the pipeline gap.
|
||||
_links = _build_trace_links(new_logs)
|
||||
_tracer = _get_tracer("sse")
|
||||
with _tracer.start_as_current_span(
|
||||
"sse.emit_logs", links=_links,
|
||||
attributes={"log_count": len(new_logs)},
|
||||
):
|
||||
yield f"event: message\ndata: {json.dumps({'type': 'logs', 'data': new_logs})}\n\n"
|
||||
loops_since_stats = stats_interval_sec
|
||||
|
||||
if loops_since_stats >= stats_interval_sec:
|
||||
stats = await repo.get_stats_summary()
|
||||
yield f"event: message\ndata: {json.dumps({'type': 'stats', 'data': stats})}\n\n"
|
||||
histogram = await repo.get_log_histogram(
|
||||
search=search, start_time=start_time,
|
||||
end_time=end_time, interval_minutes=15,
|
||||
)
|
||||
yield f"event: message\ndata: {json.dumps({'type': 'histogram', 'data': histogram})}\n\n"
|
||||
loops_since_stats = 0
|
||||
|
||||
loops_since_stats += 1
|
||||
|
||||
await asyncio.sleep(1)
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
except Exception:
|
||||
log.exception("SSE stream error for user %s", last_event_id)
|
||||
yield f"event: error\ndata: {json.dumps({'type': 'error', 'message': 'Stream interrupted'})}\n\n"
|
||||
|
||||
return StreamingResponse(
|
||||
event_generator(),
|
||||
media_type="text/event-stream",
|
||||
headers={
|
||||
"Cache-Control": "no-cache",
|
||||
"X-Accel-Buffering": "no",
|
||||
},
|
||||
)
|
||||
26
decnet_web/.gitignore
vendored
Normal file
26
decnet_web/.gitignore
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
# Logs
|
||||
logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
pnpm-debug.log*
|
||||
lerna-debug.log*
|
||||
|
||||
node_modules
|
||||
dist
|
||||
dist-ssr
|
||||
*.local
|
||||
|
||||
# Editor directories and files
|
||||
.vscode/*
|
||||
!.vscode/extensions.json
|
||||
.idea
|
||||
.DS_Store
|
||||
*.suo
|
||||
*.ntvs*
|
||||
*.njsproj
|
||||
*.sln
|
||||
*.sw?
|
||||
.env
|
||||
.env.local
|
||||
73
decnet_web/README.md
Normal file
73
decnet_web/README.md
Normal file
@@ -0,0 +1,73 @@
|
||||
# React + TypeScript + Vite
|
||||
|
||||
This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules.
|
||||
|
||||
Currently, two official plugins are available:
|
||||
|
||||
- [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react) uses [Oxc](https://oxc.rs)
|
||||
- [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react-swc) uses [SWC](https://swc.rs/)
|
||||
|
||||
## React Compiler
|
||||
|
||||
The React Compiler is not enabled on this template because of its impact on dev & build performances. To add it, see [this documentation](https://react.dev/learn/react-compiler/installation).
|
||||
|
||||
## Expanding the ESLint configuration
|
||||
|
||||
If you are developing a production application, we recommend updating the configuration to enable type-aware lint rules:
|
||||
|
||||
```js
|
||||
export default defineConfig([
|
||||
globalIgnores(['dist']),
|
||||
{
|
||||
files: ['**/*.{ts,tsx}'],
|
||||
extends: [
|
||||
// Other configs...
|
||||
|
||||
// Remove tseslint.configs.recommended and replace with this
|
||||
tseslint.configs.recommendedTypeChecked,
|
||||
// Alternatively, use this for stricter rules
|
||||
tseslint.configs.strictTypeChecked,
|
||||
// Optionally, add this for stylistic rules
|
||||
tseslint.configs.stylisticTypeChecked,
|
||||
|
||||
// Other configs...
|
||||
],
|
||||
languageOptions: {
|
||||
parserOptions: {
|
||||
project: ['./tsconfig.node.json', './tsconfig.app.json'],
|
||||
tsconfigRootDir: import.meta.dirname,
|
||||
},
|
||||
// other options...
|
||||
},
|
||||
},
|
||||
])
|
||||
```
|
||||
|
||||
You can also install [eslint-plugin-react-x](https://github.com/Rel1cx/eslint-react/tree/main/packages/plugins/eslint-plugin-react-x) and [eslint-plugin-react-dom](https://github.com/Rel1cx/eslint-react/tree/main/packages/plugins/eslint-plugin-react-dom) for React-specific lint rules:
|
||||
|
||||
```js
|
||||
// eslint.config.js
|
||||
import reactX from 'eslint-plugin-react-x'
|
||||
import reactDom from 'eslint-plugin-react-dom'
|
||||
|
||||
export default defineConfig([
|
||||
globalIgnores(['dist']),
|
||||
{
|
||||
files: ['**/*.{ts,tsx}'],
|
||||
extends: [
|
||||
// Other configs...
|
||||
// Enable lint rules for React
|
||||
reactX.configs['recommended-typescript'],
|
||||
// Enable lint rules for React DOM
|
||||
reactDom.configs.recommended,
|
||||
],
|
||||
languageOptions: {
|
||||
parserOptions: {
|
||||
project: ['./tsconfig.node.json', './tsconfig.app.json'],
|
||||
tsconfigRootDir: import.meta.dirname,
|
||||
},
|
||||
// other options...
|
||||
},
|
||||
},
|
||||
])
|
||||
```
|
||||
23
decnet_web/eslint.config.js
Normal file
23
decnet_web/eslint.config.js
Normal file
@@ -0,0 +1,23 @@
|
||||
import js from '@eslint/js'
|
||||
import globals from 'globals'
|
||||
import reactHooks from 'eslint-plugin-react-hooks'
|
||||
import reactRefresh from 'eslint-plugin-react-refresh'
|
||||
import tseslint from 'typescript-eslint'
|
||||
import { defineConfig, globalIgnores } from 'eslint/config'
|
||||
|
||||
export default defineConfig([
|
||||
globalIgnores(['dist']),
|
||||
{
|
||||
files: ['**/*.{ts,tsx}'],
|
||||
extends: [
|
||||
js.configs.recommended,
|
||||
tseslint.configs.recommended,
|
||||
reactHooks.configs.flat.recommended,
|
||||
reactRefresh.configs.vite,
|
||||
],
|
||||
languageOptions: {
|
||||
ecmaVersion: 2020,
|
||||
globals: globals.browser,
|
||||
},
|
||||
},
|
||||
])
|
||||
13
decnet_web/index.html
Normal file
13
decnet_web/index.html
Normal file
@@ -0,0 +1,13 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<link rel="icon" type="image/svg+xml" href="/favicon.svg" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>decnet_web</title>
|
||||
</head>
|
||||
<body>
|
||||
<div id="root"></div>
|
||||
<script type="module" src="/src/main.tsx"></script>
|
||||
</body>
|
||||
</html>
|
||||
3727
decnet_web/package-lock.json
generated
Normal file
3727
decnet_web/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
34
decnet_web/package.json
Normal file
34
decnet_web/package.json
Normal file
@@ -0,0 +1,34 @@
|
||||
{
|
||||
"name": "decnet_web",
|
||||
"private": true,
|
||||
"version": "0.0.0",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"dev": "vite",
|
||||
"build": "tsc -b && vite build",
|
||||
"lint": "eslint .",
|
||||
"preview": "vite preview"
|
||||
},
|
||||
"dependencies": {
|
||||
"axios": "^1.14.0",
|
||||
"lucide-react": "^1.7.0",
|
||||
"react": "^19.2.4",
|
||||
"react-dom": "^19.2.4",
|
||||
"react-router-dom": "^7.14.0",
|
||||
"recharts": "^3.8.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@eslint/js": "^9.39.4",
|
||||
"@types/node": "^24.12.2",
|
||||
"@types/react": "^19.2.14",
|
||||
"@types/react-dom": "^19.2.3",
|
||||
"@vitejs/plugin-react": "^6.0.1",
|
||||
"eslint": "^9.39.4",
|
||||
"eslint-plugin-react-hooks": "^7.0.1",
|
||||
"eslint-plugin-react-refresh": "^0.5.2",
|
||||
"globals": "^17.4.0",
|
||||
"typescript": "~6.0.2",
|
||||
"typescript-eslint": "^8.58.0",
|
||||
"vite": "^8.0.4"
|
||||
}
|
||||
}
|
||||
1
decnet_web/public/favicon.svg
Normal file
1
decnet_web/public/favicon.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 9.3 KiB |
24
decnet_web/public/icons.svg
Normal file
24
decnet_web/public/icons.svg
Normal file
@@ -0,0 +1,24 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg">
|
||||
<symbol id="bluesky-icon" viewBox="0 0 16 17">
|
||||
<g clip-path="url(#bluesky-clip)"><path fill="#08060d" d="M7.75 7.735c-.693-1.348-2.58-3.86-4.334-5.097-1.68-1.187-2.32-.981-2.74-.79C.188 2.065.1 2.812.1 3.251s.241 3.602.398 4.13c.52 1.744 2.367 2.333 4.07 2.145-2.495.37-4.71 1.278-1.805 4.512 3.196 3.309 4.38-.71 4.987-2.746.608 2.036 1.307 5.91 4.93 2.746 2.72-2.746.747-4.143-1.747-4.512 1.702.189 3.55-.4 4.07-2.145.156-.528.397-3.691.397-4.13s-.088-1.186-.575-1.406c-.42-.19-1.06-.395-2.741.79-1.755 1.24-3.64 3.752-4.334 5.099"/></g>
|
||||
<defs><clipPath id="bluesky-clip"><path fill="#fff" d="M.1.85h15.3v15.3H.1z"/></clipPath></defs>
|
||||
</symbol>
|
||||
<symbol id="discord-icon" viewBox="0 0 20 19">
|
||||
<path fill="#08060d" d="M16.224 3.768a14.5 14.5 0 0 0-3.67-1.153c-.158.286-.343.67-.47.976a13.5 13.5 0 0 0-4.067 0c-.128-.306-.317-.69-.476-.976A14.4 14.4 0 0 0 3.868 3.77C1.546 7.28.916 10.703 1.231 14.077a14.7 14.7 0 0 0 4.5 2.306q.545-.748.965-1.587a9.5 9.5 0 0 1-1.518-.74q.191-.14.372-.293c2.927 1.369 6.107 1.369 8.999 0q.183.152.372.294-.723.437-1.52.74.418.838.963 1.588a14.6 14.6 0 0 0 4.504-2.308c.37-3.911-.63-7.302-2.644-10.309m-9.13 8.234c-.878 0-1.599-.82-1.599-1.82 0-.998.705-1.82 1.6-1.82.894 0 1.614.82 1.599 1.82.001 1-.705 1.82-1.6 1.82m5.91 0c-.878 0-1.599-.82-1.599-1.82 0-.998.705-1.82 1.6-1.82.893 0 1.614.82 1.599 1.82 0 1-.706 1.82-1.6 1.82"/>
|
||||
</symbol>
|
||||
<symbol id="documentation-icon" viewBox="0 0 21 20">
|
||||
<path fill="none" stroke="#aa3bff" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.35" d="m15.5 13.333 1.533 1.322c.645.555.967.833.967 1.178s-.322.623-.967 1.179L15.5 18.333m-3.333-5-1.534 1.322c-.644.555-.966.833-.966 1.178s.322.623.966 1.179l1.534 1.321"/>
|
||||
<path fill="none" stroke="#aa3bff" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.35" d="M17.167 10.836v-4.32c0-1.41 0-2.117-.224-2.68-.359-.906-1.118-1.621-2.08-1.96-.599-.21-1.349-.21-2.848-.21-2.623 0-3.935 0-4.983.369-1.684.591-3.013 1.842-3.641 3.428C3 6.449 3 7.684 3 10.154v2.122c0 2.558 0 3.838.706 4.726q.306.383.713.671c.76.536 1.79.64 3.581.66"/>
|
||||
<path fill="none" stroke="#aa3bff" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.35" d="M3 10a2.78 2.78 0 0 1 2.778-2.778c.555 0 1.209.097 1.748-.047.48-.129.854-.503.982-.982.145-.54.048-1.194.048-1.749a2.78 2.78 0 0 1 2.777-2.777"/>
|
||||
</symbol>
|
||||
<symbol id="github-icon" viewBox="0 0 19 19">
|
||||
<path fill="#08060d" fill-rule="evenodd" d="M9.356 1.85C5.05 1.85 1.57 5.356 1.57 9.694a7.84 7.84 0 0 0 5.324 7.44c.387.079.528-.168.528-.376 0-.182-.013-.805-.013-1.454-2.165.467-2.616-.935-2.616-.935-.349-.91-.864-1.143-.864-1.143-.71-.48.051-.48.051-.48.787.051 1.2.805 1.2.805.695 1.194 1.817.857 2.268.649.064-.507.27-.857.49-1.052-1.728-.182-3.545-.857-3.545-3.87 0-.857.31-1.558.8-2.104-.078-.195-.349-1 .077-2.078 0 0 .657-.208 2.14.805a7.5 7.5 0 0 1 1.946-.26c.657 0 1.328.092 1.946.26 1.483-1.013 2.14-.805 2.14-.805.426 1.078.155 1.883.078 2.078.502.546.799 1.247.799 2.104 0 3.013-1.818 3.675-3.558 3.87.284.247.528.714.528 1.454 0 1.052-.012 1.896-.012 2.156 0 .208.142.455.528.377a7.84 7.84 0 0 0 5.324-7.441c.013-4.338-3.48-7.844-7.773-7.844" clip-rule="evenodd"/>
|
||||
</symbol>
|
||||
<symbol id="social-icon" viewBox="0 0 20 20">
|
||||
<path fill="none" stroke="#aa3bff" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.35" d="M12.5 6.667a4.167 4.167 0 1 0-8.334 0 4.167 4.167 0 0 0 8.334 0"/>
|
||||
<path fill="none" stroke="#aa3bff" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.35" d="M2.5 16.667a5.833 5.833 0 0 1 8.75-5.053m3.837.474.513 1.035c.07.144.257.282.414.309l.93.155c.596.1.736.536.307.965l-.723.73a.64.64 0 0 0-.152.531l.207.903c.164.715-.213.991-.84.618l-.872-.52a.63.63 0 0 0-.577 0l-.872.52c-.624.373-1.003.094-.84-.618l.207-.903a.64.64 0 0 0-.152-.532l-.723-.729c-.426-.43-.289-.864.306-.964l.93-.156a.64.64 0 0 0 .412-.31l.513-1.034c.28-.562.735-.562 1.012 0"/>
|
||||
</symbol>
|
||||
<symbol id="x-icon" viewBox="0 0 19 19">
|
||||
<path fill="#08060d" fill-rule="evenodd" d="M1.893 1.98c.052.072 1.245 1.769 2.653 3.77l2.892 4.114c.183.261.333.48.333.486s-.068.089-.152.183l-.522.593-.765.867-3.597 4.087c-.375.426-.734.834-.798.905a1 1 0 0 0-.118.148c0 .01.236.017.664.017h.663l.729-.83c.4-.457.796-.906.879-.999a692 692 0 0 0 1.794-2.038c.034-.037.301-.34.594-.675l.551-.624.345-.392a7 7 0 0 1 .34-.374c.006 0 .93 1.306 2.052 2.903l2.084 2.965.045.063h2.275c1.87 0 2.273-.003 2.266-.021-.008-.02-1.098-1.572-3.894-5.547-2.013-2.862-2.28-3.246-2.273-3.266.008-.019.282-.332 2.085-2.38l2-2.274 1.567-1.782c.022-.028-.016-.03-.65-.03h-.674l-.3.342a871 871 0 0 1-1.782 2.025c-.067.075-.405.458-.75.852a100 100 0 0 1-.803.91c-.148.172-.299.344-.99 1.127-.304.343-.32.358-.345.327-.015-.019-.904-1.282-1.976-2.808L6.365 1.85H1.8zm1.782.91 8.078 11.294c.772 1.08 1.413 1.973 1.425 1.984.016.017.241.02 1.05.017l1.03-.004-2.694-3.766L7.796 5.75 5.722 2.852l-1.039-.004-1.039-.004z" clip-rule="evenodd"/>
|
||||
</symbol>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 4.9 KiB |
184
decnet_web/src/App.css
Normal file
184
decnet_web/src/App.css
Normal file
@@ -0,0 +1,184 @@
|
||||
.counter {
|
||||
font-size: 16px;
|
||||
padding: 5px 10px;
|
||||
border-radius: 5px;
|
||||
color: var(--accent);
|
||||
background: var(--accent-bg);
|
||||
border: 2px solid transparent;
|
||||
transition: border-color 0.3s;
|
||||
margin-bottom: 24px;
|
||||
|
||||
&:hover {
|
||||
border-color: var(--accent-border);
|
||||
}
|
||||
&:focus-visible {
|
||||
outline: 2px solid var(--accent);
|
||||
outline-offset: 2px;
|
||||
}
|
||||
}
|
||||
|
||||
.hero {
|
||||
position: relative;
|
||||
|
||||
.base,
|
||||
.framework,
|
||||
.vite {
|
||||
inset-inline: 0;
|
||||
margin: 0 auto;
|
||||
}
|
||||
|
||||
.base {
|
||||
width: 170px;
|
||||
position: relative;
|
||||
z-index: 0;
|
||||
}
|
||||
|
||||
.framework,
|
||||
.vite {
|
||||
position: absolute;
|
||||
}
|
||||
|
||||
.framework {
|
||||
z-index: 1;
|
||||
top: 34px;
|
||||
height: 28px;
|
||||
transform: perspective(2000px) rotateZ(300deg) rotateX(44deg) rotateY(39deg)
|
||||
scale(1.4);
|
||||
}
|
||||
|
||||
.vite {
|
||||
z-index: 0;
|
||||
top: 107px;
|
||||
height: 26px;
|
||||
width: auto;
|
||||
transform: perspective(2000px) rotateZ(300deg) rotateX(40deg) rotateY(39deg)
|
||||
scale(0.8);
|
||||
}
|
||||
}
|
||||
|
||||
#center {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 25px;
|
||||
place-content: center;
|
||||
place-items: center;
|
||||
flex-grow: 1;
|
||||
|
||||
@media (max-width: 1024px) {
|
||||
padding: 32px 20px 24px;
|
||||
gap: 18px;
|
||||
}
|
||||
}
|
||||
|
||||
#next-steps {
|
||||
display: flex;
|
||||
border-top: 1px solid var(--border);
|
||||
text-align: left;
|
||||
|
||||
& > div {
|
||||
flex: 1 1 0;
|
||||
padding: 32px;
|
||||
@media (max-width: 1024px) {
|
||||
padding: 24px 20px;
|
||||
}
|
||||
}
|
||||
|
||||
.icon {
|
||||
margin-bottom: 16px;
|
||||
width: 22px;
|
||||
height: 22px;
|
||||
}
|
||||
|
||||
@media (max-width: 1024px) {
|
||||
flex-direction: column;
|
||||
text-align: center;
|
||||
}
|
||||
}
|
||||
|
||||
#docs {
|
||||
border-right: 1px solid var(--border);
|
||||
|
||||
@media (max-width: 1024px) {
|
||||
border-right: none;
|
||||
border-bottom: 1px solid var(--border);
|
||||
}
|
||||
}
|
||||
|
||||
#next-steps ul {
|
||||
list-style: none;
|
||||
padding: 0;
|
||||
display: flex;
|
||||
gap: 8px;
|
||||
margin: 32px 0 0;
|
||||
|
||||
.logo {
|
||||
height: 18px;
|
||||
}
|
||||
|
||||
a {
|
||||
color: var(--text-h);
|
||||
font-size: 16px;
|
||||
border-radius: 6px;
|
||||
background: var(--social-bg);
|
||||
display: flex;
|
||||
padding: 6px 12px;
|
||||
align-items: center;
|
||||
gap: 8px;
|
||||
text-decoration: none;
|
||||
transition: box-shadow 0.3s;
|
||||
|
||||
&:hover {
|
||||
box-shadow: var(--shadow);
|
||||
}
|
||||
.button-icon {
|
||||
height: 18px;
|
||||
width: 18px;
|
||||
}
|
||||
}
|
||||
|
||||
@media (max-width: 1024px) {
|
||||
margin-top: 20px;
|
||||
flex-wrap: wrap;
|
||||
justify-content: center;
|
||||
|
||||
li {
|
||||
flex: 1 1 calc(50% - 8px);
|
||||
}
|
||||
|
||||
a {
|
||||
width: 100%;
|
||||
justify-content: center;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#spacer {
|
||||
height: 88px;
|
||||
border-top: 1px solid var(--border);
|
||||
@media (max-width: 1024px) {
|
||||
height: 48px;
|
||||
}
|
||||
}
|
||||
|
||||
.ticks {
|
||||
position: relative;
|
||||
width: 100%;
|
||||
|
||||
&::before,
|
||||
&::after {
|
||||
content: '';
|
||||
position: absolute;
|
||||
top: -4.5px;
|
||||
border: 5px solid transparent;
|
||||
}
|
||||
|
||||
&::before {
|
||||
left: 0;
|
||||
border-left-color: var(--border);
|
||||
}
|
||||
&::after {
|
||||
right: 0;
|
||||
border-right-color: var(--border);
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user