Compare commits
139 Commits
0927d9e1e8
...
23ec470988
| Author | SHA1 | Date | |
|---|---|---|---|
| 23ec470988 | |||
| 4064e19af1 | |||
|
|
ac4e5e1570 | ||
| fe18575a9c | |||
| 0f63820ee6 | |||
| fdc404760f | |||
| 95190946e0 | |||
| 1692df7360 | |||
| aac39e818e | |||
| ff38d58508 | |||
| f78104e1c8 | |||
| 99be4e64ad | |||
| c3c1cd2fa6 | |||
| 68b13b8a59 | |||
| f8bb134d70 | |||
| 20fba18711 | |||
| b325fc8c5f | |||
| 1484d2f625 | |||
| f8ae9ce2a6 | |||
| 662a5e43e8 | |||
| d63e396410 | |||
| 65d585569b | |||
| c384a3103a | |||
| c79f96f321 | |||
| d77def64c4 | |||
| ce182652ad | |||
| a6063efbb9 | |||
| d4ac53c0c9 | |||
| 9ca3b4691d | |||
| babad5ce65 | |||
| 7abae5571a | |||
| 377ba0410c | |||
| 5ef48d60be | |||
| fe46b8fc0b | |||
| c7713c6228 | |||
| 1196363d0b | |||
| 62a67f3d1d | |||
| 6df2c9ccbf | |||
| b1f6c3b84a | |||
| 5fdfe67f2f | |||
| 4fac9570ec | |||
| 5e83c9e48d | |||
| d8457c57f3 | |||
| 38d37f862b | |||
| fa8b0f3cb5 | |||
| db425df6f2 | |||
| 73e68388c0 | |||
| 682322d564 | |||
| 33885a2eec | |||
| f583b3d699 | |||
| 5cb6666d7b | |||
| 25b6425496 | |||
| 08242a4d84 | |||
| 63fb477e1f | |||
| 94f82c9089 | |||
| 40cd582253 | |||
| 24f02c3466 | |||
| 25ba3fb56a | |||
| 8d023147cc | |||
| 14f7a535db | |||
| cea6279a08 | |||
| 6b8392102e | |||
| d2a569496d | |||
| f20e86826d | |||
| 29da2a75b3 | |||
| 3362325479 | |||
| 34a57d6f09 | |||
| 016115a523 | |||
| 0166d0d559 | |||
| dbf6d13b95 | |||
| d15c106b44 | |||
| 6fc1a2a3ea | |||
| de84cc664f | |||
| 1541b4b7e0 | |||
| 2b7d872ab7 | |||
| 4ae6f4f23d | |||
| 310c2a1fbe | |||
| 44de453bb2 | |||
| ec66e01f55 | |||
| a22f996027 | |||
| b6b046c90b | |||
| 29a2cf2738 | |||
| 551664bc43 | |||
| a2d07bd67c | |||
| a3b92d4dd6 | |||
| 30edf9a55d | |||
| 69626d705d | |||
| 0f86f883fe | |||
| 13f3d15a36 | |||
| 8c7ec2953e | |||
| 0123e1c69e | |||
| 9dc6ff3887 | |||
| fe25798425 | |||
| 6c2478ede3 | |||
| 532a4e2dc5 | |||
| ec503b9ec6 | |||
| fe6b349e5e | |||
| 65b220fdbe | |||
| 6f10e7556f | |||
| fc99375c62 | |||
| 6bdb5922fa | |||
| 32b06afef6 | |||
| 31e0c5151b | |||
| cc3d434c02 | |||
| 1b5d366b38 | |||
| 168ecf14ab | |||
| db9a2699b9 | |||
| d139729fa2 | |||
| dd363629ab | |||
| c544964f57 | |||
| 6e19848723 | |||
| e24da92e0f | |||
| 47f0e6da8f | |||
| 18de381a43 | |||
| eb40be2161 | |||
| 1f5c6604d6 | |||
| a9c7ddec2b | |||
| eb4be44c9a | |||
| 1a2ad27eca | |||
| b1f09b9c6a | |||
| 3656a89d60 | |||
| ba2faba5d5 | |||
| 950280a97b | |||
| 7bc8d75242 | |||
| 5f637b5272 | |||
| 6ed92d080f | |||
| 1b593920cd | |||
| bad90dfb75 | |||
| 05e71f6d2e | |||
| 52c26a2891 | |||
| 81135cb861 | |||
| 50e53120df | |||
| 697929a127 | |||
| b46934db46 | |||
| 5b990743db | |||
| fbb16a960c | |||
| c32ad82d0a | |||
| 850a6f2ad7 | |||
| d344e4c8bb |
12
.env.example
Normal file
12
.env.example
Normal file
@@ -0,0 +1,12 @@
|
||||
# API Options
|
||||
DECNET_API_HOST=0.0.0.0
|
||||
DECNET_API_PORT=8000
|
||||
DECNET_JWT_SECRET=supersecretkey12345678901234567
|
||||
DECNET_INGEST_LOG_FILE=/var/log/decnet/decnet.log
|
||||
|
||||
# Web Dashboard Options
|
||||
DECNET_WEB_HOST=0.0.0.0
|
||||
DECNET_WEB_PORT=8080
|
||||
DECNET_ADMIN_USER=admin
|
||||
DECNET_ADMIN_PASSWORD=admin
|
||||
DECNET_DEVELOPER=False
|
||||
@@ -30,7 +30,7 @@ jobs:
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- run: pip install -e .
|
||||
- run: pip install -e .[dev]
|
||||
- run: pytest tests/ -v --tb=short
|
||||
|
||||
bandit:
|
||||
@@ -53,21 +53,42 @@ jobs:
|
||||
with:
|
||||
python-version: "3.11"
|
||||
- run: pip install pip-audit
|
||||
- run: pip install -e .
|
||||
- run: pip install -e .[dev]
|
||||
- run: pip-audit --skip-editable
|
||||
|
||||
merge-to-testing:
|
||||
name: Merge dev → testing
|
||||
runs-on: ubuntu-latest
|
||||
needs: [lint, test, bandit, pip-audit]
|
||||
if: github.ref == 'refs/heads/dev'
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.DECNET_PR_TOKEN }}
|
||||
- name: Configure git
|
||||
run: |
|
||||
git config user.name "DECNET CI"
|
||||
git config user.email "ci@decnet.local"
|
||||
- name: Merge dev into testing
|
||||
run: |
|
||||
git fetch origin testing
|
||||
git checkout testing
|
||||
git merge origin/dev --no-ff -m "ci: auto-merge dev → testing"
|
||||
git push origin testing
|
||||
|
||||
open-pr:
|
||||
name: Open PR to main
|
||||
runs-on: ubuntu-latest
|
||||
needs: [lint, test, bandit, pip-audit]
|
||||
if: github.ref == 'refs/heads/dev'
|
||||
if: github.ref == 'refs/heads/testing'
|
||||
steps:
|
||||
- name: Open PR via Gitea API
|
||||
run: |
|
||||
echo "--- Checking for existing open PRs ---"
|
||||
LIST_RESPONSE=$(curl -s \
|
||||
-H "Authorization: token ${{ secrets.DECNET_PR_TOKEN }}" \
|
||||
"https://git.resacachile.cl/api/v1/repos/anti/DECNET/pulls?state=open&head=anti:dev&base=main&limit=5")
|
||||
"https://git.resacachile.cl/api/v1/repos/anti/DECNET/pulls?state=open&head=anti:testing&base=main&limit=5")
|
||||
echo "$LIST_RESPONSE"
|
||||
EXISTING=$(echo "$LIST_RESPONSE" | python3 -c "import sys, json; print(len(json.load(sys.stdin)))")
|
||||
echo "Open PRs found: $EXISTING"
|
||||
@@ -80,10 +101,10 @@ jobs:
|
||||
-H "Authorization: token ${{ secrets.DECNET_PR_TOKEN }}" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"title": "Auto PR: dev → main",
|
||||
"head": "dev",
|
||||
"title": "Auto PR: testing → main",
|
||||
"head": "testing",
|
||||
"base": "main",
|
||||
"body": "All CI and security checks passed. Review and merge when ready."
|
||||
"body": "All CI and security checks passed on both dev and testing. Review and merge when ready."
|
||||
}' \
|
||||
"https://git.resacachile.cl/api/v1/repos/anti/DECNET/pulls")
|
||||
echo "$CREATE_RESPONSE"
|
||||
|
||||
@@ -30,5 +30,28 @@ jobs:
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- run: pip install -e .
|
||||
- run: pip install -e .[dev]
|
||||
- run: pytest tests/ -v --tb=short
|
||||
|
||||
bandit:
|
||||
name: SAST (bandit)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
- run: pip install bandit
|
||||
- run: bandit -r decnet/ -ll -x decnet/services/registry.py
|
||||
|
||||
pip-audit:
|
||||
name: Dependency audit (pip-audit)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11"
|
||||
- run: pip install pip-audit
|
||||
- run: pip install -e .[dev]
|
||||
- run: pip-audit --skip-editable
|
||||
|
||||
8
.gitignore
vendored
8
.gitignore
vendored
@@ -1,4 +1,5 @@
|
||||
.venv/
|
||||
.claude/
|
||||
__pycache__/
|
||||
*.pyc
|
||||
*.pyo
|
||||
@@ -13,6 +14,11 @@ decnet.log*
|
||||
*.loggy
|
||||
*.nmap
|
||||
linterfails.log
|
||||
test-scan
|
||||
webmail
|
||||
windows1
|
||||
*.db
|
||||
decnet.json
|
||||
.env
|
||||
.env.local
|
||||
.coverage
|
||||
.hypothesis/
|
||||
|
||||
@@ -1,75 +0,0 @@
|
||||
# TODO
|
||||
|
||||
This is a list of DEVELOPMENT TODOs. Features, development experience, usage, documentation, etcetera.
|
||||
|
||||
## Core / Hardening
|
||||
|
||||
- [ ] **Attacker fingerprinting** — Beyond IP logging: capture TLS JA3/JA4 hashes, TCP window sizes, User-Agent strings, SSH client banners, and tool signatures (nmap, masscan, Metasploit, Cobalt Strike). Build attacker profiles across sessions.
|
||||
- [ ] **Canary tokens** — Embed canary URLs, fake AWS keys, fake API tokens, and honeydocs (PDF/DOCX with phone-home URLs) into decky filesystems. Fire an alert the moment one is used.
|
||||
- [ ] **Tarpit mode** — Slow down attackers by making services respond extremely slowly (e.g., SSH that takes 60s to reject, HTTP that drip-feeds bytes). Wastes attacker time and resources.
|
||||
- [ ] **Dynamic decky mutation** — Deckies that change their exposed services or OS fingerprint over time to confuse port-scan caching and appear more "alive."
|
||||
- [ ] **Credential harvesting DB** — Every username/password attempt across all services lands in a queryable database. Expose via CLI (`decnet creds`) and flag reuse across deckies.
|
||||
- [ ] **Session recording** — Full session capture for SSH/Telnet (keystroke logs, commands run, files downloaded). Cowrie already does this — surface it better in the CLI and correlation engine.
|
||||
- [ ] **Payload capture** — Store every file uploaded or command executed by an attacker. Hash and auto-submit to VirusTotal or a local sandbox.
|
||||
|
||||
## Detection & Intelligence
|
||||
|
||||
- [ ] **Real-time alerting** — Webhook/Slack/Telegram notifications when an attacker hits a decky for the first time, crosses N deckies (lateral movement), or uses a known bad IP.
|
||||
- [ ] **Threat intel enrichment** — Auto-lookup attacker IPs against AbuseIPDB, Shodan, GreyNoise, and AlienVault OTX. Tag known scanners vs. targeted attackers.
|
||||
- [ ] **Attack campaign clustering** — Group attacker sessions by tooling signatures, timing patterns, and credential sets. Identify coordinated campaigns hitting multiple deckies.
|
||||
- [ ] **GeoIP mapping** — Attacker origin on a world map. Correlate with ASN data to identify cloud exit nodes, VPNs, and Tor exits.
|
||||
- [ ] **TTPs tagging** — Map observed attacker behaviors to MITRE ATT&CK techniques automatically. Tag events in the correlation engine.
|
||||
- [ ] **Honeypot interaction scoring** — Score attackers on a scale: casual scanner vs. persistent targeted attacker, based on depth of interaction and commands run.
|
||||
|
||||
## Dashboard & Visibility
|
||||
|
||||
- [ ] **Web dashboard** — Real-time web UI showing live decky status, attacker activity, traversal graphs, and credential stats. Could be a simple FastAPI + HTMX or a full React app.
|
||||
- [ ] **Pre-built Kibana/Grafana dashboards** — Ship dashboard JSON exports out of the box so ELK/Grafana deployments are plug-and-play.
|
||||
- [ ] **CLI live feed** — `decnet watch` command: tail all decky logs in a unified, colored terminal stream (like `docker-compose logs -f` but prettier).
|
||||
- [ ] **Traversal graph export** — Export attacker traversal graphs as DOT/Graphviz or JSON for visualization in external tools.
|
||||
- [ ] **Daily digest** — Automated daily summary email/report: new attackers, top credentials tried, most-hit services.
|
||||
|
||||
## Deployment & Infrastructure
|
||||
|
||||
- [ ] **SWARM / multihost mode** — Full Ansible-based orchestration for deploying deckies across N real hosts.
|
||||
- [ ] **Terraform/Pulumi provider** — Spin up cloud-hosted deckies on AWS/GCP/Azure with one command. Useful for internet-facing honeynets.
|
||||
- [ ] **Auto-scaling** — When attack traffic increases, automatically spawn more deckies to absorb and log more activity.
|
||||
- [ ] **Kubernetes deployment mode** — Run deckies as Kubernetes pods for environments already running k8s.
|
||||
- [ ] **Proxmox/libvirt backend** — Full VM-based deckies instead of containers, for even more realistic OS fingerprints and behavior. Docker for speed; VMs for realism.
|
||||
- [ ] **Raspberry Pi / ARM support** — Low-cost physical honeynets using RPis. Validate ARM image builds.
|
||||
- [ ] **Decky health monitoring** — Watchdog that auto-restarts crashed deckies and alerts if a service goes dark.
|
||||
|
||||
## Services & Realism
|
||||
|
||||
- [ ] **HTTPS/TLS support** — HTTP honeypot with a self-signed or Let's Encrypt cert. Many real-world services use HTTPS; plain HTTP stands out.
|
||||
- [ ] **Fake Active Directory** — A convincing fake AD/LDAP with fake users, groups, and GPOs. Attacker tools like BloodHound should get juicy (fake) data.
|
||||
- [ ] **Fake file shares** — SMB/NFS shares pre-populated with enticing but fake files: "passwords.xlsx", "vpn_config.ovpn", "backup_keys.tar.gz". All instrumented to detect access.
|
||||
- [ ] **Realistic web apps** — HTTP honeypot serving convincing fake apps: a fake WordPress, a fake phpMyAdmin, a fake Grafana login — all logging every interaction.
|
||||
- [ ] **OT/ICS profiles** — Expand Conpot support: Modbus, DNP3, BACnet, EtherNet/IP. Convincing industrial control system decoys.
|
||||
- [ ] **Printer/IoT archetypes** — Expand existing printer/camera archetypes with actual service emulation (IPP, ONVIF, WS-Discovery).
|
||||
- [ ] **Service interaction depth** — Some services currently just log the connection. Deepen interaction: fake MySQL that accepts queries and returns realistic fake data, fake Redis that stores and retrieves dummy keys.
|
||||
|
||||
## Developer Experience
|
||||
|
||||
- [ ] **Plugin SDK docs** — Full documentation and an example plugin for adding custom services. Lower the barrier for community contributions.
|
||||
- [ ] **Integration tests** — Full deploy/teardown cycle tests against a real Docker daemon (not just unit tests).
|
||||
- [ ] **Per-service tests** — Each of the 29 service implementations deserves its own test coverage.
|
||||
- [x] **CI/CD pipeline** — GitHub/Gitea Actions: run tests on push, lint, build Docker images, publish releases.
|
||||
- ci.yaml contains several steps for the CI/CD pipeline. Mainly:
|
||||
- Trivy checks for Docker containers.
|
||||
- Ruff linting.
|
||||
- Pytests.
|
||||
- Bandit SAST.
|
||||
- pip-audit.
|
||||
- [ ] **Config validation CLI** — `decnet validate my.ini` to dry-check an INI config before deploying.
|
||||
- [ ] **Config generator wizard** — `decnet wizard` interactive prompt to generate an INI config without writing one by hand.
|
||||
- [ ] **Gitea Wiki** — Set up the repository wiki with structured docs across the following pages:
|
||||
- **Home** — Project overview, goals, and navigation index.
|
||||
- **Architecture** — UNIHOST vs SWARM models, the two-network design (decoy-facing vs isolated logging), MACVLAN/IPVLAN, log pipeline (Cowrie → Logstash → ELK → SIEM), WSL limitations.
|
||||
- **General Usage** — What DECNET can do and how: deploying deckies, choosing services, using `--randomize-services`, reading status, tearing down. Archetypes explained (what they are, how they group services into realistic machine personas — e.g. a Windows workstation archetype exposes RDP+SMB+LDAP, a Linux server exposes SSH+FTP+MySQL). List of built-in archetypes. How to pick an archetype vs. manually specifying services.
|
||||
- **Custom Services** — How the plugin registry works, anatomy of a service plugin, step-by-step guide to writing and registering a custom service, how to package it for reuse.
|
||||
- **Configuration Reference** — Full INI config option breakdown, all CLI flags (`--mode`, `--deckies`, `--interface`, `--log-target`, `--randomize-services`, etc.), environment variables.
|
||||
- **Deployment Guides** — UNIHOST quickstart (bare metal/VM), SWARM/multihost with Ansible (once implemented), cloud deployment via Terraform (once implemented), Raspberry Pi / ARM builds.
|
||||
- **Service Reference** — Full table of all 29 services: port, protocol, base image, interaction depth, and any known fingerprint quirks.
|
||||
- **Attacker Intelligence** — Credential harvesting (`decnet creds`), session recording playback, threat intel enrichment (AbuseIPDB, GreyNoise, Shodan, OTX), MITRE ATT&CK tagging, campaign clustering.
|
||||
- **Operations** — Health monitoring, watchdog behavior, teardown procedures, log rotation, troubleshooting common issues.
|
||||
103
GEMINI.md
Normal file
103
GEMINI.md
Normal file
@@ -0,0 +1,103 @@
|
||||
# DECNET (Deception Network) Project Context
|
||||
|
||||
DECNET is a high-fidelity honeypot framework designed to deploy heterogeneous fleets of fake machines (called **deckies**) that appear as real hosts on a local network.
|
||||
|
||||
## Project Overview
|
||||
|
||||
- **Core Purpose:** To lure, profile, and log attacker interactions within a controlled, deceptive environment.
|
||||
- **Key Technology:** Linux-native container networking (MACVLAN/IPvlan) combined with Docker to give each decoy its own MAC address, IP, and realistic TCP/IP stack behavior.
|
||||
- **Main Components:**
|
||||
- **Deckies:** Group of containers sharing a network namespace (one base container + multiple service containers).
|
||||
- **Archetypes:** Pre-defined machine profiles (e.g., `windows-workstation`, `linux-server`) that bundle services and OS fingerprints.
|
||||
- **Services:** Modular honeypot plugins (SSH, SMB, RDP, etc.) built as `BaseService` subclasses.
|
||||
- **OS Fingerprinting:** Sysctl-based TCP/IP stack tuning to spoof OS detection (nmap).
|
||||
- **Logging Pipeline:** RFC 5424 syslog forwarding to an isolated SIEM/ELK stack.
|
||||
|
||||
## Technical Stack
|
||||
|
||||
- **Language:** Python 3.11+
|
||||
- **CLI Framework:** [Typer](https://typer.tiangolo.com/)
|
||||
- **Data Validation:** [Pydantic v2](https://docs.pydantic.dev/)
|
||||
- **Orchestration:** Docker Engine 24+ (via Docker SDK for Python)
|
||||
- **Networking:** MACVLAN (default) or IPvlan L2 (for WiFi/restricted environments).
|
||||
- **Testing:** Pytest (100% pass requirement).
|
||||
- **Formatting/Linting:** Ruff, Bandit (SAST), pip-audit.
|
||||
|
||||
## Architecture
|
||||
|
||||
```text
|
||||
Host NIC (eth0)
|
||||
└── MACVLAN Bridge
|
||||
├── Decky-01 (192.168.1.10) -> [Base] + [SSH] + [HTTP]
|
||||
├── Decky-02 (192.168.1.11) -> [Base] + [SMB] + [RDP]
|
||||
└── ...
|
||||
```
|
||||
|
||||
- **Base Container:** Owns the IP/MAC, sets `sysctls` for OS spoofing, and runs `sleep infinity`.
|
||||
- **Service Containers:** Use `network_mode: service:<base>` to share the identity and networking of the base container.
|
||||
- **Isolation:** Decoy traffic is strictly separated from the logging network.
|
||||
|
||||
## Key Commands
|
||||
|
||||
### Development & Maintenance
|
||||
- **Install (Dev):**
|
||||
- `rm .venv -rf`
|
||||
- `python3 -m venv .venv`
|
||||
- `source .venv/bin/activate`
|
||||
- `pip install -e .`
|
||||
- **Run Tests:** `pytest` (Run before any commit)
|
||||
- **Linting:** `ruff check .`
|
||||
- **Security Scan:** `bandit -r decnet/`
|
||||
- **Web Git:** git.resacachile.cl (Gitea)
|
||||
|
||||
### CLI Usage
|
||||
- **List Services:** `decnet services`
|
||||
- **List Archetypes:** `decnet archetypes`
|
||||
- **Dry Run (Compose Gen):** `decnet deploy --deckies 3 --randomize-services --dry-run`
|
||||
- **Deploy (Full):** `sudo .venv/bin/decnet deploy --interface eth0 --deckies 5 --randomize-services`
|
||||
- **Status:** `decnet status`
|
||||
- **Teardown:** `sudo .venv/bin/decnet teardown --all`
|
||||
|
||||
## Development Conventions
|
||||
|
||||
- **Code Style:**
|
||||
- Strict adherence to Ruff/PEP8.
|
||||
- **Always use typed variables**. If any non-types variables are found, they must be corrected.
|
||||
- The correct way is `x: int = 1`, never `x : int = 1`.
|
||||
- If assignment is present, always use a space between the type and the equal sign `x: int = 1`.
|
||||
- **Never** use lowercase L (l), uppercase o (O) or uppercase i (i) in single-character names.
|
||||
- **Internal vars are to be declared with an underscore** (_internal_variable_name).
|
||||
- **Internal to internal vars are to be declared with double underscore** (__internal_variable_name).
|
||||
- Always use snake_case for code.
|
||||
- Always use PascalCase for classes and generics.
|
||||
- **Testing:** New features MUST include a `pytest` case. 100% test pass rate is mandatory before merging.
|
||||
- **Plugin System:**
|
||||
- New services go in `decnet/services/<name>.py`.
|
||||
- Subclass `decnet.services.base.BaseService`.
|
||||
- The registry uses auto-discovery; no manual registration required.
|
||||
- **Configuration:**
|
||||
- Use Pydantic models in `decnet/config.py` for any new settings.
|
||||
- INI file parsing is handled in `decnet/ini_loader.py`.
|
||||
- **State Management:**
|
||||
- Runtime state is persisted in `decnet-state.json`.
|
||||
- Do not modify this file manually.
|
||||
- **General Development Guidelines**:
|
||||
- **Never** commit broken code, or before running `pytest`s or `bandit` at the project level.
|
||||
- **No matter how small** the changes, they must be committed.
|
||||
- **If new features are addedd** new tests must be added, too.
|
||||
- **Never present broken code to the user**. Test, validate, then present.
|
||||
- **Extensive testing** for every function must be created.
|
||||
- **Always develop in the `dev` branch, never in `main`.**
|
||||
- **Test in the `testing` branch.**
|
||||
|
||||
## Directory Structure
|
||||
|
||||
- `decnet/`: Main source code.
|
||||
- `services/`: Honeypot service implementations.
|
||||
- `logging/`: Syslog formatting and forwarding logic.
|
||||
- `correlation/`: (In Progress) Logic for grouping attacker events.
|
||||
- `templates/`: Dockerfiles and entrypoint scripts for services.
|
||||
- `tests/`: Pytest suite.
|
||||
- `pyproject.toml`: Dependency and entry point definitions.
|
||||
- `CLAUDE.md`: Claude-specific environment guidance.
|
||||
- `DEVELOPMENT.md`: Roadmap and TODOs.
|
||||
48
README.md
48
README.md
@@ -69,7 +69,7 @@ From the outside a decky looks identical to a real machine: it has its own MAC a
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
git clone <repo-url> DECNET
|
||||
git clone https://git.resacachile.cl/anti/DECNET
|
||||
cd DECNET
|
||||
pip install -e .
|
||||
```
|
||||
@@ -208,6 +208,26 @@ sudo decnet deploy --deckies 4 --archetype windows-workstation
|
||||
[corp-workstations]
|
||||
archetype = windows-workstation
|
||||
amount = 4
|
||||
|
||||
[win-fileserver]
|
||||
services = ftp
|
||||
nmap_os = windows
|
||||
os_version = Windows Server 2019
|
||||
|
||||
[dbsrv01]
|
||||
ip = 192.168.1.112
|
||||
services = mysql, http
|
||||
nmap_os = linux
|
||||
|
||||
[dbsrv01.http]
|
||||
server_header = Apache/2.4.54 (Debian)
|
||||
response_code = 200
|
||||
fake_app = wordpress
|
||||
|
||||
[dbsrv01.mysql]
|
||||
mysql_version = 5.7.38-log
|
||||
mysql_banner = MySQL Community Server
|
||||
|
||||
```
|
||||
|
||||
---
|
||||
@@ -460,7 +480,7 @@ Key/value pairs are passed directly to the service plugin as persona config. Com
|
||||
| `mongodb` | `mongo_version` |
|
||||
| `elasticsearch` | `es_version`, `cluster_name` |
|
||||
| `ldap` | `base_dn`, `domain` |
|
||||
| `snmp` | `snmp_community`, `sys_descr` |
|
||||
| `snmp` | `snmp_community`, `sys_descr`, `snmp_archetype` (picks predefined sysDescr for `water_plant`, `hospital`, etc.) |
|
||||
| `mqtt` | `mqtt_version` |
|
||||
| `sip` | `sip_server`, `sip_domain` |
|
||||
| `k8s` | `k8s_version` |
|
||||
@@ -476,6 +496,30 @@ See [`test-full.ini`](test-full.ini) — covers all 25 services across 10 role-t
|
||||
|
||||
---
|
||||
|
||||
## Environment Configuration (.env)
|
||||
|
||||
DECNET supports loading configuration from `.env.local` and `.env` files located in the project root. This is useful for securing secrets like the JWT key and configuring default ports without passing flags every time.
|
||||
|
||||
An example `.env.example` is provided:
|
||||
|
||||
```ini
|
||||
# API Options
|
||||
DECNET_API_HOST=0.0.0.0
|
||||
DECNET_API_PORT=8000
|
||||
DECNET_JWT_SECRET=supersecretkey12345
|
||||
DECNET_INGEST_LOG_FILE=/var/log/decnet/decnet.log
|
||||
|
||||
# Web Dashboard Options
|
||||
DECNET_WEB_HOST=0.0.0.0
|
||||
DECNET_WEB_PORT=8080
|
||||
DECNET_ADMIN_USER=admin
|
||||
DECNET_ADMIN_PASSWORD=admin
|
||||
```
|
||||
|
||||
Copy `.env.example` to `.env.local` and modify it to suit your environment.
|
||||
|
||||
---
|
||||
|
||||
## Logging
|
||||
|
||||
All attacker interactions are forwarded off the decoy network to an isolated logging sink. The log pipeline lives on a separate internal Docker bridge (`decnet_logs`) that is not reachable from the fake LAN.
|
||||
|
||||
@@ -148,7 +148,7 @@ ARCHETYPES: dict[str, Archetype] = {
|
||||
slug="deaddeck",
|
||||
display_name="Deaddeck (Entry Point)",
|
||||
description="Internet-facing entry point with real interactive SSH — no honeypot emulation",
|
||||
services=["real_ssh"],
|
||||
services=["ssh"],
|
||||
preferred_distros=["debian", "ubuntu22"],
|
||||
nmap_os="linux",
|
||||
),
|
||||
@@ -167,4 +167,4 @@ def all_archetypes() -> dict[str, Archetype]:
|
||||
|
||||
|
||||
def random_archetype() -> Archetype:
|
||||
return random.choice(list(ARCHETYPES.values()))
|
||||
return random.choice(list(ARCHETYPES.values())) # nosec B311
|
||||
|
||||
379
decnet/cli.py
379
decnet/cli.py
@@ -8,21 +8,27 @@ Usage:
|
||||
decnet services
|
||||
"""
|
||||
|
||||
import random
|
||||
import signal
|
||||
from typing import Optional
|
||||
|
||||
import typer
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
|
||||
from decnet.env import (
|
||||
DECNET_API_HOST,
|
||||
DECNET_API_PORT,
|
||||
DECNET_INGEST_LOG_FILE,
|
||||
DECNET_WEB_HOST,
|
||||
DECNET_WEB_PORT,
|
||||
)
|
||||
from decnet.archetypes import Archetype, all_archetypes, get_archetype
|
||||
from decnet.config import (
|
||||
DeckyConfig,
|
||||
DecnetConfig,
|
||||
random_hostname,
|
||||
)
|
||||
from decnet.distros import all_distros, get_distro, random_distro
|
||||
from decnet.ini_loader import IniConfig, load_ini
|
||||
from decnet.distros import all_distros, get_distro
|
||||
from decnet.fleet import all_service_names, build_deckies, build_deckies_from_ini
|
||||
from decnet.ini_loader import load_ini
|
||||
from decnet.network import detect_interface, detect_subnet, allocate_ips, get_host_ip
|
||||
from decnet.services.registry import all_services
|
||||
|
||||
@@ -33,167 +39,56 @@ app = typer.Typer(
|
||||
)
|
||||
console = Console()
|
||||
|
||||
def _all_service_names() -> list[str]:
|
||||
"""Return all registered service names from the live plugin registry."""
|
||||
return sorted(all_services().keys())
|
||||
|
||||
def _kill_api() -> None:
|
||||
"""Find and kill any running DECNET API (uvicorn) or mutator processes."""
|
||||
import psutil
|
||||
import os
|
||||
|
||||
def _resolve_distros(
|
||||
distros_explicit: list[str] | None,
|
||||
randomize_distros: bool,
|
||||
n: int,
|
||||
archetype: Archetype | None = None,
|
||||
) -> list[str]:
|
||||
"""Return a list of n distro slugs based on CLI flags or archetype preference."""
|
||||
if distros_explicit:
|
||||
return [distros_explicit[i % len(distros_explicit)] for i in range(n)]
|
||||
if randomize_distros:
|
||||
return [random_distro().slug for _ in range(n)]
|
||||
if archetype:
|
||||
pool = archetype.preferred_distros
|
||||
return [pool[i % len(pool)] for i in range(n)]
|
||||
# Default: cycle through all distros to maximize heterogeneity
|
||||
slugs = list(all_distros().keys())
|
||||
return [slugs[i % len(slugs)] for i in range(n)]
|
||||
|
||||
|
||||
def _build_deckies(
|
||||
n: int,
|
||||
ips: list[str],
|
||||
services_explicit: list[str] | None,
|
||||
randomize_services: bool,
|
||||
distros_explicit: list[str] | None = None,
|
||||
randomize_distros: bool = False,
|
||||
archetype: Archetype | None = None,
|
||||
) -> list[DeckyConfig]:
|
||||
deckies = []
|
||||
used_combos: set[frozenset] = set()
|
||||
distro_slugs = _resolve_distros(distros_explicit, randomize_distros, n, archetype)
|
||||
|
||||
for i, ip in enumerate(ips):
|
||||
name = f"decky-{i + 1:02d}"
|
||||
distro = get_distro(distro_slugs[i])
|
||||
hostname = random_hostname(distro.slug)
|
||||
|
||||
if services_explicit:
|
||||
svc_list = services_explicit
|
||||
elif archetype:
|
||||
svc_list = list(archetype.services)
|
||||
elif randomize_services:
|
||||
svc_pool = _all_service_names()
|
||||
attempts = 0
|
||||
while True:
|
||||
count = random.randint(1, min(3, len(svc_pool)))
|
||||
chosen = frozenset(random.sample(svc_pool, count))
|
||||
attempts += 1
|
||||
if chosen not in used_combos or attempts > 20:
|
||||
break
|
||||
svc_list = list(chosen)
|
||||
used_combos.add(chosen)
|
||||
else:
|
||||
typer.echo("Error: provide --services, --archetype, or --randomize-services.", err=True)
|
||||
raise typer.Exit(1)
|
||||
|
||||
deckies.append(
|
||||
DeckyConfig(
|
||||
name=name,
|
||||
ip=ip,
|
||||
services=svc_list,
|
||||
distro=distro.slug,
|
||||
base_image=distro.image,
|
||||
build_base=distro.build_base,
|
||||
hostname=hostname,
|
||||
archetype=archetype.slug if archetype else None,
|
||||
nmap_os=archetype.nmap_os if archetype else "linux",
|
||||
)
|
||||
)
|
||||
return deckies
|
||||
|
||||
|
||||
def _build_deckies_from_ini(
|
||||
ini: IniConfig,
|
||||
subnet_cidr: str,
|
||||
gateway: str,
|
||||
host_ip: str,
|
||||
randomize: bool,
|
||||
) -> list[DeckyConfig]:
|
||||
"""Build DeckyConfig list from an IniConfig, auto-allocating missing IPs."""
|
||||
from ipaddress import IPv4Address, IPv4Network
|
||||
|
||||
explicit_ips: set[IPv4Address] = {
|
||||
IPv4Address(s.ip) for s in ini.deckies if s.ip
|
||||
}
|
||||
|
||||
net = IPv4Network(subnet_cidr, strict=False)
|
||||
reserved = {
|
||||
net.network_address,
|
||||
net.broadcast_address,
|
||||
IPv4Address(gateway),
|
||||
IPv4Address(host_ip),
|
||||
} | explicit_ips
|
||||
|
||||
auto_pool = (str(addr) for addr in net.hosts() if addr not in reserved)
|
||||
|
||||
deckies: list[DeckyConfig] = []
|
||||
for spec in ini.deckies:
|
||||
# Resolve archetype (if any) — explicit services/distro override it
|
||||
arch: Archetype | None = None
|
||||
if spec.archetype:
|
||||
_killed: bool = False
|
||||
for _proc in psutil.process_iter(['pid', 'name', 'cmdline']):
|
||||
try:
|
||||
arch = get_archetype(spec.archetype)
|
||||
except ValueError as e:
|
||||
console.print(f"[red]{e}[/]")
|
||||
raise typer.Exit(1)
|
||||
_cmd = _proc.info['cmdline']
|
||||
if not _cmd:
|
||||
continue
|
||||
if "uvicorn" in _cmd and "decnet.web.api:app" in _cmd:
|
||||
console.print(f"[yellow]Stopping DECNET API (PID {_proc.info['pid']})...[/]")
|
||||
os.kill(_proc.info['pid'], signal.SIGTERM)
|
||||
_killed = True
|
||||
elif "decnet.cli" in _cmd and "mutate" in _cmd and "--watch" in _cmd:
|
||||
console.print(f"[yellow]Stopping DECNET Mutator Watcher (PID {_proc.info['pid']})...[/]")
|
||||
os.kill(_proc.info['pid'], signal.SIGTERM)
|
||||
_killed = True
|
||||
except (psutil.NoSuchProcess, psutil.AccessDenied):
|
||||
continue
|
||||
|
||||
# Distro: archetype preferred list → random → global cycle
|
||||
distro_pool = arch.preferred_distros if arch else list(all_distros().keys())
|
||||
distro = get_distro(distro_pool[len(deckies) % len(distro_pool)])
|
||||
hostname = random_hostname(distro.slug)
|
||||
if _killed:
|
||||
console.print("[green]Background processes stopped.[/]")
|
||||
|
||||
ip = spec.ip or next(auto_pool, None)
|
||||
if ip is None:
|
||||
raise RuntimeError(
|
||||
f"Not enough free IPs in {subnet_cidr} while assigning IP for '{spec.name}'."
|
||||
|
||||
@app.command()
|
||||
def api(
|
||||
port: int = typer.Option(DECNET_API_PORT, "--port", help="Port for the backend API"),
|
||||
host: str = typer.Option(DECNET_API_HOST, "--host", help="Host IP for the backend API"),
|
||||
log_file: str = typer.Option(DECNET_INGEST_LOG_FILE, "--log-file", help="Path to the DECNET log file to monitor"),
|
||||
) -> None:
|
||||
"""Run the DECNET API and Web Dashboard in standalone mode."""
|
||||
import subprocess # nosec B404
|
||||
import sys
|
||||
import os
|
||||
|
||||
console.print(f"[green]Starting DECNET API on {host}:{port}...[/]")
|
||||
_env: dict[str, str] = os.environ.copy()
|
||||
_env["DECNET_INGEST_LOG_FILE"] = str(log_file)
|
||||
try:
|
||||
subprocess.run( # nosec B603 B404
|
||||
[sys.executable, "-m", "uvicorn", "decnet.web.api:app", "--host", host, "--port", str(port)],
|
||||
env=_env
|
||||
)
|
||||
|
||||
if spec.services:
|
||||
known = set(_all_service_names())
|
||||
unknown = [s for s in spec.services if s not in known]
|
||||
if unknown:
|
||||
console.print(
|
||||
f"[red]Unknown service(s) in [{spec.name}]: {unknown}. "
|
||||
f"Available: {_all_service_names()}[/]"
|
||||
)
|
||||
raise typer.Exit(1)
|
||||
svc_list = spec.services
|
||||
elif arch:
|
||||
svc_list = list(arch.services)
|
||||
elif randomize:
|
||||
svc_pool = _all_service_names()
|
||||
count = random.randint(1, min(3, len(svc_pool)))
|
||||
svc_list = random.sample(svc_pool, count)
|
||||
else:
|
||||
console.print(
|
||||
f"[red]Decky '[{spec.name}]' has no services= in config. "
|
||||
"Add services=, archetype=, or use --randomize-services.[/]"
|
||||
)
|
||||
raise typer.Exit(1)
|
||||
|
||||
# nmap_os priority: explicit INI key > archetype default > "linux"
|
||||
resolved_nmap_os = spec.nmap_os or (arch.nmap_os if arch else "linux")
|
||||
deckies.append(DeckyConfig(
|
||||
name=spec.name,
|
||||
ip=ip,
|
||||
services=svc_list,
|
||||
distro=distro.slug,
|
||||
base_image=distro.image,
|
||||
build_base=distro.build_base,
|
||||
hostname=hostname,
|
||||
archetype=arch.slug if arch else None,
|
||||
service_config=spec.service_config,
|
||||
nmap_os=resolved_nmap_os,
|
||||
))
|
||||
return deckies
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
except (FileNotFoundError, subprocess.SubprocessError):
|
||||
console.print("[red]Failed to start API. Ensure 'uvicorn' is installed in the current environment.[/]")
|
||||
|
||||
|
||||
@app.command()
|
||||
@@ -207,15 +102,19 @@ def deploy(
|
||||
randomize_services: bool = typer.Option(False, "--randomize-services", help="Assign random services to each decky"),
|
||||
distro: Optional[str] = typer.Option(None, "--distro", help="Comma-separated distro slugs, e.g. debian,ubuntu22,rocky9"),
|
||||
randomize_distros: bool = typer.Option(False, "--randomize-distros", help="Assign a random distro to each decky"),
|
||||
log_target: Optional[str] = typer.Option(None, "--log-target", help="Forward logs to ip:port (e.g. 192.168.1.5:5140)"),
|
||||
log_file: Optional[str] = typer.Option(None, "--log-file", help="Write RFC 5424 syslog to this path inside containers (e.g. /var/log/decnet/decnet.log)"),
|
||||
log_file: Optional[str] = typer.Option(DECNET_INGEST_LOG_FILE, "--log-file", help="Host path for the collector to write RFC 5424 logs (e.g. /var/log/decnet/decnet.log)"),
|
||||
archetype_name: Optional[str] = typer.Option(None, "--archetype", "-a", help="Machine archetype slug (e.g. linux-server, windows-workstation)"),
|
||||
mutate_interval: Optional[int] = typer.Option(30, "--mutate-interval", help="Automatically rotate services every N minutes"),
|
||||
dry_run: bool = typer.Option(False, "--dry-run", help="Generate compose file without starting containers"),
|
||||
no_cache: bool = typer.Option(False, "--no-cache", help="Force rebuild all images, ignoring Docker layer cache"),
|
||||
parallel: bool = typer.Option(False, "--parallel", help="Build all images concurrently (enables BuildKit, separates build from up)"),
|
||||
ipvlan: bool = typer.Option(False, "--ipvlan", help="Use IPvlan L2 instead of MACVLAN (required on WiFi interfaces)"),
|
||||
config_file: Optional[str] = typer.Option(None, "--config", "-c", help="Path to INI config file"),
|
||||
api: bool = typer.Option(False, "--api", help="Start the FastAPI backend to ingest and serve logs"),
|
||||
api_port: int = typer.Option(8000, "--api-port", help="Port for the backend API"),
|
||||
) -> None:
|
||||
"""Deploy deckies to the LAN."""
|
||||
import os
|
||||
if mode not in ("unihost", "swarm"):
|
||||
console.print("[red]--mode must be 'unihost' or 'swarm'[/]")
|
||||
raise typer.Exit(1)
|
||||
@@ -230,7 +129,6 @@ def deploy(
|
||||
console.print(f"[red]{e}[/]")
|
||||
raise typer.Exit(1)
|
||||
|
||||
# CLI flags override INI values when explicitly provided
|
||||
iface = interface or ini.interface or detect_interface()
|
||||
subnet_cidr = subnet or ini.subnet
|
||||
effective_gateway = ini.gateway
|
||||
@@ -244,7 +142,6 @@ def deploy(
|
||||
f"[dim]Subnet:[/] {subnet_cidr} [dim]Gateway:[/] {effective_gateway} "
|
||||
f"[dim]Host IP:[/] {host_ip}")
|
||||
|
||||
# Register bring-your-own services from INI before validation
|
||||
if ini.custom_services:
|
||||
from decnet.custom_service import CustomService
|
||||
from decnet.services.registry import register_custom_service
|
||||
@@ -258,11 +155,14 @@ def deploy(
|
||||
)
|
||||
)
|
||||
|
||||
effective_log_target = log_target or ini.log_target
|
||||
effective_log_file = log_file
|
||||
decky_configs = _build_deckies_from_ini(
|
||||
ini, subnet_cidr, effective_gateway, host_ip, randomize_services
|
||||
try:
|
||||
decky_configs = build_deckies_from_ini(
|
||||
ini, subnet_cidr, effective_gateway, host_ip, randomize_services, cli_mutate_interval=mutate_interval
|
||||
)
|
||||
except ValueError as e:
|
||||
console.print(f"[red]{e}[/]")
|
||||
raise typer.Exit(1)
|
||||
# ------------------------------------------------------------------ #
|
||||
# Classic CLI path #
|
||||
# ------------------------------------------------------------------ #
|
||||
@@ -273,13 +173,12 @@ def deploy(
|
||||
|
||||
services_list = [s.strip() for s in services.split(",")] if services else None
|
||||
if services_list:
|
||||
known = set(_all_service_names())
|
||||
known = set(all_service_names())
|
||||
unknown = [s for s in services_list if s not in known]
|
||||
if unknown:
|
||||
console.print(f"[red]Unknown service(s): {unknown}. Available: {_all_service_names()}[/]")
|
||||
console.print(f"[red]Unknown service(s): {unknown}. Available: {all_service_names()}[/]")
|
||||
raise typer.Exit(1)
|
||||
|
||||
# Resolve archetype if provided
|
||||
arch: Archetype | None = None
|
||||
if archetype_name:
|
||||
try:
|
||||
@@ -313,39 +212,113 @@ def deploy(
|
||||
raise typer.Exit(1)
|
||||
|
||||
ips = allocate_ips(subnet_cidr, effective_gateway, host_ip, deckies, ip_start)
|
||||
decky_configs = _build_deckies(
|
||||
decky_configs = build_deckies(
|
||||
deckies, ips, services_list, randomize_services,
|
||||
distros_explicit=distros_list, randomize_distros=randomize_distros,
|
||||
archetype=arch,
|
||||
archetype=arch, mutate_interval=mutate_interval,
|
||||
)
|
||||
effective_log_target = log_target
|
||||
effective_log_file = log_file
|
||||
|
||||
if api and not effective_log_file:
|
||||
effective_log_file = os.path.join(os.getcwd(), "decnet.log")
|
||||
console.print(f"[cyan]API mode enabled: defaulting log-file to {effective_log_file}[/]")
|
||||
|
||||
config = DecnetConfig(
|
||||
mode=mode,
|
||||
interface=iface,
|
||||
subnet=subnet_cidr,
|
||||
gateway=effective_gateway,
|
||||
deckies=decky_configs,
|
||||
log_target=effective_log_target,
|
||||
log_file=effective_log_file,
|
||||
ipvlan=ipvlan,
|
||||
mutate_interval=mutate_interval,
|
||||
)
|
||||
|
||||
if effective_log_target and not dry_run:
|
||||
from decnet.logging.forwarder import probe_log_target
|
||||
if not probe_log_target(effective_log_target):
|
||||
console.print(f"[yellow]Warning: log target {effective_log_target} is unreachable. "
|
||||
"Logs will be lost if it stays down.[/]")
|
||||
from decnet.engine import deploy as _deploy
|
||||
_deploy(config, dry_run=dry_run, no_cache=no_cache, parallel=parallel)
|
||||
|
||||
from decnet.deployer import deploy as _deploy
|
||||
_deploy(config, dry_run=dry_run, no_cache=no_cache)
|
||||
if mutate_interval is not None and not dry_run:
|
||||
import subprocess # nosec B404
|
||||
import sys
|
||||
console.print(f"[green]Starting DECNET Mutator watcher in the background (interval: {mutate_interval}m)...[/]")
|
||||
try:
|
||||
subprocess.Popen( # nosec B603
|
||||
[sys.executable, "-m", "decnet.cli", "mutate", "--watch"],
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.STDOUT,
|
||||
start_new_session=True,
|
||||
)
|
||||
except (FileNotFoundError, subprocess.SubprocessError):
|
||||
console.print("[red]Failed to start mutator watcher.[/]")
|
||||
|
||||
if effective_log_file and not dry_run and not api:
|
||||
import subprocess # noqa: F811 # nosec B404
|
||||
import sys
|
||||
from pathlib import Path as _Path
|
||||
_collector_err = _Path(effective_log_file).with_suffix(".collector.log")
|
||||
console.print(f"[bold cyan]Starting log collector[/] → {effective_log_file}")
|
||||
subprocess.Popen( # nosec B603
|
||||
[sys.executable, "-m", "decnet.cli", "collect", "--log-file", str(effective_log_file)],
|
||||
stdin=subprocess.DEVNULL,
|
||||
stdout=open(_collector_err, "a"), # nosec B603
|
||||
stderr=subprocess.STDOUT,
|
||||
start_new_session=True,
|
||||
)
|
||||
|
||||
if api and not dry_run:
|
||||
import subprocess # nosec B404
|
||||
import sys
|
||||
console.print(f"[green]Starting DECNET API on port {api_port}...[/]")
|
||||
_env: dict[str, str] = os.environ.copy()
|
||||
_env["DECNET_INGEST_LOG_FILE"] = str(effective_log_file or "")
|
||||
try:
|
||||
subprocess.Popen( # nosec B603
|
||||
[sys.executable, "-m", "uvicorn", "decnet.web.api:app", "--host", DECNET_API_HOST, "--port", str(api_port)],
|
||||
env=_env,
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.STDOUT
|
||||
)
|
||||
console.print(f"[dim]API running at http://{DECNET_API_HOST}:{api_port}[/]")
|
||||
except (FileNotFoundError, subprocess.SubprocessError):
|
||||
console.print("[red]Failed to start API. Ensure 'uvicorn' is installed in the current environment.[/]")
|
||||
|
||||
|
||||
@app.command()
|
||||
def collect(
|
||||
log_file: str = typer.Option(DECNET_INGEST_LOG_FILE, "--log-file", "-f", help="Path to write RFC 5424 syslog lines and .json records"),
|
||||
) -> None:
|
||||
"""Stream Docker logs from all running decky service containers to a log file."""
|
||||
import asyncio
|
||||
from decnet.collector import log_collector_worker
|
||||
console.print(f"[bold cyan]Collector starting[/] → {log_file}")
|
||||
asyncio.run(log_collector_worker(log_file))
|
||||
|
||||
|
||||
@app.command()
|
||||
def mutate(
|
||||
watch: bool = typer.Option(False, "--watch", "-w", help="Run continuously and mutate deckies according to their interval"),
|
||||
decky_name: Optional[str] = typer.Option(None, "--decky", "-d", help="Force mutate a specific decky immediately"),
|
||||
force_all: bool = typer.Option(False, "--all", help="Force mutate all deckies immediately"),
|
||||
) -> None:
|
||||
"""Manually trigger or continuously watch for decky mutation."""
|
||||
from decnet.mutator import mutate_decky, mutate_all, run_watch_loop
|
||||
|
||||
if watch:
|
||||
run_watch_loop()
|
||||
return
|
||||
|
||||
if decky_name:
|
||||
mutate_decky(decky_name)
|
||||
elif force_all:
|
||||
mutate_all(force=True)
|
||||
else:
|
||||
mutate_all(force=False)
|
||||
|
||||
|
||||
@app.command()
|
||||
def status() -> None:
|
||||
"""Show running deckies and their status."""
|
||||
from decnet.deployer import status as _status
|
||||
from decnet.engine import status as _status
|
||||
_status()
|
||||
|
||||
|
||||
@@ -359,9 +332,12 @@ def teardown(
|
||||
console.print("[red]Specify --all or --id <name>.[/]")
|
||||
raise typer.Exit(1)
|
||||
|
||||
from decnet.deployer import teardown as _teardown
|
||||
from decnet.engine import teardown as _teardown
|
||||
_teardown(decky_id=id_)
|
||||
|
||||
if all_:
|
||||
_kill_api()
|
||||
|
||||
|
||||
@app.command(name="services")
|
||||
def list_services() -> None:
|
||||
@@ -459,3 +435,40 @@ def list_archetypes() -> None:
|
||||
arch.description,
|
||||
)
|
||||
console.print(table)
|
||||
|
||||
|
||||
@app.command(name="web")
|
||||
def serve_web(
|
||||
web_port: int = typer.Option(DECNET_WEB_PORT, "--web-port", help="Port to serve the DECNET Web Dashboard"),
|
||||
host: str = typer.Option(DECNET_WEB_HOST, "--host", help="Host IP to serve the Web Dashboard"),
|
||||
) -> None:
|
||||
"""Serve the DECNET Web Dashboard frontend."""
|
||||
import http.server
|
||||
import socketserver
|
||||
from pathlib import Path
|
||||
|
||||
dist_dir = Path(__file__).parent.parent / "decnet_web" / "dist"
|
||||
|
||||
if not dist_dir.exists():
|
||||
console.print(f"[red]Frontend build not found at {dist_dir}. Make sure you run 'npm run build' inside 'decnet_web'.[/]")
|
||||
raise typer.Exit(1)
|
||||
|
||||
class SPAHTTPRequestHandler(http.server.SimpleHTTPRequestHandler):
|
||||
def do_GET(self):
|
||||
path = self.translate_path(self.path)
|
||||
if not Path(path).exists() or Path(path).is_dir():
|
||||
self.path = "/index.html"
|
||||
return super().do_GET()
|
||||
|
||||
import os
|
||||
os.chdir(dist_dir)
|
||||
|
||||
with socketserver.TCPServer((host, web_port), SPAHTTPRequestHandler) as httpd:
|
||||
console.print(f"[green]Serving DECNET Web Dashboard on http://{host}:{web_port}[/]")
|
||||
try:
|
||||
httpd.serve_forever()
|
||||
except KeyboardInterrupt:
|
||||
console.print("\n[dim]Shutting down dashboard server.[/]")
|
||||
|
||||
if __name__ == '__main__': # pragma: no cover
|
||||
app()
|
||||
|
||||
13
decnet/collector/__init__.py
Normal file
13
decnet/collector/__init__.py
Normal file
@@ -0,0 +1,13 @@
|
||||
from decnet.collector.worker import (
|
||||
is_service_container,
|
||||
is_service_event,
|
||||
log_collector_worker,
|
||||
parse_rfc5424,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"is_service_container",
|
||||
"is_service_event",
|
||||
"log_collector_worker",
|
||||
"parse_rfc5424",
|
||||
]
|
||||
200
decnet/collector/worker.py
Normal file
200
decnet/collector/worker.py
Normal file
@@ -0,0 +1,200 @@
|
||||
"""
|
||||
Host-side Docker log collector.
|
||||
|
||||
Streams stdout from all running decky service containers via the Docker SDK,
|
||||
writes RFC 5424 lines to <log_file> and parsed JSON records to <log_file>.json.
|
||||
The ingester tails the .json file; rsyslog can consume the .log file independently.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional
|
||||
|
||||
logger = logging.getLogger("decnet.collector")
|
||||
|
||||
# ─── RFC 5424 parser ──────────────────────────────────────────────────────────
|
||||
|
||||
_RFC5424_RE = re.compile(
|
||||
r"^<\d+>1 "
|
||||
r"(\S+) " # 1: TIMESTAMP
|
||||
r"(\S+) " # 2: HOSTNAME (decky name)
|
||||
r"(\S+) " # 3: APP-NAME (service)
|
||||
r"- " # PROCID always NILVALUE
|
||||
r"(\S+) " # 4: MSGID (event_type)
|
||||
r"(.+)$", # 5: SD element + optional MSG
|
||||
)
|
||||
_SD_BLOCK_RE = re.compile(r'\[decnet@55555\s+(.*?)\]', re.DOTALL)
|
||||
_PARAM_RE = re.compile(r'(\w+)="((?:[^"\\]|\\.)*)"')
|
||||
_IP_FIELDS = ("src_ip", "src", "client_ip", "remote_ip", "ip")
|
||||
|
||||
|
||||
def parse_rfc5424(line: str) -> Optional[dict[str, Any]]:
|
||||
"""
|
||||
Parse an RFC 5424 DECNET log line into a structured dict.
|
||||
Returns None if the line does not match the expected format.
|
||||
"""
|
||||
m = _RFC5424_RE.match(line)
|
||||
if not m:
|
||||
return None
|
||||
ts_raw, decky, service, event_type, sd_rest = m.groups()
|
||||
|
||||
fields: dict[str, str] = {}
|
||||
msg: str = ""
|
||||
|
||||
if sd_rest.startswith("-"):
|
||||
msg = sd_rest[1:].lstrip()
|
||||
elif sd_rest.startswith("["):
|
||||
block = _SD_BLOCK_RE.search(sd_rest)
|
||||
if block:
|
||||
for k, v in _PARAM_RE.findall(block.group(1)):
|
||||
fields[k] = v.replace('\\"', '"').replace("\\\\", "\\").replace("\\]", "]")
|
||||
msg_match = re.search(r'\]\s+(.+)$', sd_rest)
|
||||
if msg_match:
|
||||
msg = msg_match.group(1).strip()
|
||||
else:
|
||||
msg = sd_rest
|
||||
|
||||
attacker_ip = "Unknown"
|
||||
for fname in _IP_FIELDS:
|
||||
if fname in fields:
|
||||
attacker_ip = fields[fname]
|
||||
break
|
||||
|
||||
try:
|
||||
ts_formatted = datetime.fromisoformat(ts_raw).strftime("%Y-%m-%d %H:%M:%S")
|
||||
except ValueError:
|
||||
ts_formatted = ts_raw
|
||||
|
||||
return {
|
||||
"timestamp": ts_formatted,
|
||||
"decky": decky,
|
||||
"service": service,
|
||||
"event_type": event_type,
|
||||
"attacker_ip": attacker_ip,
|
||||
"fields": fields,
|
||||
"msg": msg,
|
||||
"raw_line": line,
|
||||
}
|
||||
|
||||
|
||||
# ─── Container helpers ────────────────────────────────────────────────────────
|
||||
|
||||
def _load_service_container_names() -> set[str]:
|
||||
"""
|
||||
Return the exact set of service container names from decnet-state.json.
|
||||
Format: {decky_name}-{service_name}, e.g. 'omega-decky-smtp'.
|
||||
Returns an empty set if no state file exists.
|
||||
"""
|
||||
from decnet.config import load_state
|
||||
state = load_state()
|
||||
if state is None:
|
||||
return set()
|
||||
config, _ = state
|
||||
names: set[str] = set()
|
||||
for decky in config.deckies:
|
||||
for svc in decky.services:
|
||||
names.add(f"{decky.name}-{svc.replace('_', '-')}")
|
||||
return names
|
||||
|
||||
|
||||
def is_service_container(container) -> bool:
|
||||
"""Return True if this Docker container is a known DECNET service container."""
|
||||
name = (container if isinstance(container, str) else container.name).lstrip("/")
|
||||
return name in _load_service_container_names()
|
||||
|
||||
|
||||
def is_service_event(attrs: dict) -> bool:
|
||||
"""Return True if a Docker start event is for a known DECNET service container."""
|
||||
name = attrs.get("name", "").lstrip("/")
|
||||
return name in _load_service_container_names()
|
||||
|
||||
|
||||
# ─── Blocking stream worker (runs in a thread) ────────────────────────────────
|
||||
|
||||
def _stream_container(container_id: str, log_path: Path, json_path: Path) -> None:
|
||||
"""Stream logs from one container and append to the host log files."""
|
||||
import docker # type: ignore[import]
|
||||
|
||||
try:
|
||||
client = docker.from_env()
|
||||
container = client.containers.get(container_id)
|
||||
log_stream = container.logs(stream=True, follow=True, stdout=True, stderr=False)
|
||||
buf = ""
|
||||
with (
|
||||
open(log_path, "a", encoding="utf-8") as lf,
|
||||
open(json_path, "a", encoding="utf-8") as jf,
|
||||
):
|
||||
for chunk in log_stream:
|
||||
buf += chunk.decode("utf-8", errors="replace")
|
||||
while "\n" in buf:
|
||||
line, buf = buf.split("\n", 1)
|
||||
line = line.rstrip()
|
||||
if not line:
|
||||
continue
|
||||
lf.write(line + "\n")
|
||||
lf.flush()
|
||||
parsed = parse_rfc5424(line)
|
||||
if parsed:
|
||||
jf.write(json.dumps(parsed) + "\n")
|
||||
jf.flush()
|
||||
except Exception as exc:
|
||||
logger.debug("Log stream ended for container %s: %s", container_id, exc)
|
||||
|
||||
|
||||
# ─── Async collector ──────────────────────────────────────────────────────────
|
||||
|
||||
async def log_collector_worker(log_file: str) -> None:
|
||||
"""
|
||||
Background task: streams Docker logs from all running decky service
|
||||
containers, writing RFC 5424 lines to log_file and parsed JSON records
|
||||
to log_file.json for the ingester to consume.
|
||||
|
||||
Watches Docker events to pick up containers started after initial scan.
|
||||
"""
|
||||
import docker # type: ignore[import]
|
||||
|
||||
log_path = Path(log_file)
|
||||
json_path = log_path.with_suffix(".json")
|
||||
log_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
active: dict[str, asyncio.Task[None]] = {}
|
||||
loop = asyncio.get_running_loop()
|
||||
|
||||
def _spawn(container_id: str, container_name: str) -> None:
|
||||
if container_id not in active or active[container_id].done():
|
||||
active[container_id] = asyncio.ensure_future(
|
||||
asyncio.to_thread(_stream_container, container_id, log_path, json_path),
|
||||
loop=loop,
|
||||
)
|
||||
logger.info("Collecting logs from container: %s", container_name)
|
||||
|
||||
try:
|
||||
client = docker.from_env()
|
||||
|
||||
for container in client.containers.list():
|
||||
if is_service_container(container):
|
||||
_spawn(container.id, container.name.lstrip("/"))
|
||||
|
||||
def _watch_events() -> None:
|
||||
for event in client.events(
|
||||
decode=True,
|
||||
filters={"type": "container", "event": "start"},
|
||||
):
|
||||
attrs = event.get("Actor", {}).get("Attributes", {})
|
||||
cid = event.get("id", "")
|
||||
name = attrs.get("name", "")
|
||||
if cid and is_service_event(attrs):
|
||||
loop.call_soon_threadsafe(_spawn, cid, name)
|
||||
|
||||
await asyncio.to_thread(_watch_events)
|
||||
|
||||
except asyncio.CancelledError:
|
||||
for task in active.values():
|
||||
task.cancel()
|
||||
raise
|
||||
except Exception as exc:
|
||||
logger.error("Collector error: %s", exc)
|
||||
@@ -6,6 +6,12 @@ Network model:
|
||||
All service containers for that decky share the base's network namespace
|
||||
via `network_mode: "service:<base>"`. From the outside, every service on
|
||||
a given decky appears to come from the same IP — exactly like a real host.
|
||||
|
||||
Logging model:
|
||||
Service containers write RFC 5424 lines to stdout. Docker captures them
|
||||
via the json-file driver. The host-side collector (decnet.web.collector)
|
||||
streams those logs and writes them to the host log file for the ingester
|
||||
and rsyslog to consume. No bind mounts or shared volumes are needed.
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
@@ -17,35 +23,19 @@ from decnet.network import MACVLAN_NETWORK_NAME
|
||||
from decnet.os_fingerprint import get_os_sysctls
|
||||
from decnet.services.registry import get_service
|
||||
|
||||
_CONTAINER_LOG_DIR = "/var/log/decnet"
|
||||
|
||||
_LOG_NETWORK = "decnet_logs"
|
||||
|
||||
|
||||
def _resolve_log_file(log_file: str) -> tuple[str, str]:
|
||||
"""
|
||||
Return (host_dir, container_log_path) for a user-supplied log file path.
|
||||
|
||||
The host path is resolved to absolute so Docker can bind-mount it.
|
||||
All containers share the same host directory, mounted at _CONTAINER_LOG_DIR.
|
||||
"""
|
||||
host_path = Path(log_file).resolve()
|
||||
host_dir = str(host_path.parent)
|
||||
container_path = f"{_CONTAINER_LOG_DIR}/{host_path.name}"
|
||||
return host_dir, container_path
|
||||
_DOCKER_LOGGING = {
|
||||
"driver": "json-file",
|
||||
"options": {
|
||||
"max-size": "10m",
|
||||
"max-file": "5",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def generate_compose(config: DecnetConfig) -> dict:
|
||||
"""Build and return the full docker-compose data structure."""
|
||||
services: dict = {}
|
||||
|
||||
log_host_dir: str | None = None
|
||||
log_container_path: str | None = None
|
||||
if config.log_file:
|
||||
log_host_dir, log_container_path = _resolve_log_file(config.log_file)
|
||||
# Ensure the host log directory exists so Docker doesn't create it as root-owned
|
||||
Path(log_host_dir).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
for decky in config.deckies:
|
||||
base_key = decky.name # e.g. "decky-01"
|
||||
|
||||
@@ -62,8 +52,6 @@ def generate_compose(config: DecnetConfig) -> dict:
|
||||
}
|
||||
},
|
||||
}
|
||||
if config.log_target:
|
||||
base["networks"][_LOG_NETWORK] = {}
|
||||
|
||||
# Inject TCP/IP stack sysctls to spoof the claimed OS fingerprint.
|
||||
# Only the base container needs this — service containers inherit the
|
||||
@@ -77,23 +65,18 @@ def generate_compose(config: DecnetConfig) -> dict:
|
||||
for svc_name in decky.services:
|
||||
svc = get_service(svc_name)
|
||||
svc_cfg = decky.service_config.get(svc_name, {})
|
||||
fragment = svc.compose_fragment(
|
||||
decky.name, log_target=config.log_target, service_cfg=svc_cfg
|
||||
)
|
||||
fragment = svc.compose_fragment(decky.name, service_cfg=svc_cfg)
|
||||
|
||||
# Inject the per-decky base image into build services so containers
|
||||
# vary by distro and don't all fingerprint as debian:bookworm-slim.
|
||||
# Services that need a fixed upstream image (e.g. conpot) can pre-set
|
||||
# build.args.BASE_IMAGE in their compose_fragment() to opt out.
|
||||
if "build" in fragment:
|
||||
fragment["build"].setdefault("args", {})["BASE_IMAGE"] = decky.build_base
|
||||
args = fragment["build"].setdefault("args", {})
|
||||
args.setdefault("BASE_IMAGE", decky.build_base)
|
||||
|
||||
fragment.setdefault("environment", {})
|
||||
fragment["environment"]["HOSTNAME"] = decky.hostname
|
||||
if log_host_dir and log_container_path:
|
||||
fragment["environment"]["DECNET_LOG_FILE"] = log_container_path
|
||||
fragment.setdefault("volumes", [])
|
||||
mount = f"{log_host_dir}:{_CONTAINER_LOG_DIR}"
|
||||
if mount not in fragment["volumes"]:
|
||||
fragment["volumes"].append(mount)
|
||||
|
||||
# Share the base container's network — no own IP needed
|
||||
fragment["network_mode"] = f"service:{base_key}"
|
||||
@@ -103,6 +86,9 @@ def generate_compose(config: DecnetConfig) -> dict:
|
||||
fragment.pop("hostname", None)
|
||||
fragment.pop("networks", None)
|
||||
|
||||
# Rotate Docker logs so disk usage is bounded
|
||||
fragment["logging"] = _DOCKER_LOGGING
|
||||
|
||||
services[f"{decky.name}-{svc_name}"] = fragment
|
||||
|
||||
# Network definitions
|
||||
@@ -111,8 +97,6 @@ def generate_compose(config: DecnetConfig) -> dict:
|
||||
"external": True, # created by network.py before compose up
|
||||
}
|
||||
}
|
||||
if config.log_target:
|
||||
networks[_LOG_NETWORK] = {"driver": "bridge", "internal": True}
|
||||
|
||||
return {
|
||||
"version": "3.8",
|
||||
|
||||
@@ -7,11 +7,14 @@ import json
|
||||
from pathlib import Path
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import BaseModel, field_validator
|
||||
from pydantic import BaseModel, field_validator # field_validator used by DeckyConfig
|
||||
|
||||
from decnet.distros import random_hostname as _random_hostname
|
||||
|
||||
STATE_FILE = Path("decnet-state.json")
|
||||
# Calculate absolute path to the project root (where the config file resides)
|
||||
_ROOT: Path = Path(__file__).parent.parent.absolute()
|
||||
STATE_FILE: Path = _ROOT / "decnet-state.json"
|
||||
DEFAULT_MUTATE_INTERVAL: int = 30 # default rotation interval in minutes
|
||||
|
||||
|
||||
def random_hostname(distro_slug: str = "debian") -> str:
|
||||
@@ -29,6 +32,8 @@ class DeckyConfig(BaseModel):
|
||||
archetype: str | None = None # archetype slug if spawned from an archetype profile
|
||||
service_config: dict[str, dict] = {} # optional per-service persona config
|
||||
nmap_os: str = "linux" # OS family for TCP/IP stack spoofing (see os_fingerprint.py)
|
||||
mutate_interval: int | None = None # automatic rotation interval in minutes
|
||||
last_mutated: float = 0.0 # timestamp of last mutation
|
||||
|
||||
@field_validator("services")
|
||||
@classmethod
|
||||
@@ -44,19 +49,9 @@ class DecnetConfig(BaseModel):
|
||||
subnet: str
|
||||
gateway: str
|
||||
deckies: list[DeckyConfig]
|
||||
log_target: str | None = None # "ip:port" or None
|
||||
log_file: str | None = None # path for RFC 5424 syslog file output
|
||||
log_file: str | None = None # host path where the collector writes the log file
|
||||
ipvlan: bool = False # use IPvlan L2 instead of MACVLAN (WiFi-friendly)
|
||||
|
||||
@field_validator("log_target")
|
||||
@classmethod
|
||||
def validate_log_target(cls, v: str | None) -> str | None:
|
||||
if v is None:
|
||||
return v
|
||||
parts = v.rsplit(":", 1)
|
||||
if len(parts) != 2 or not parts[1].isdigit():
|
||||
raise ValueError("log_target must be in ip:port format, e.g. 192.168.1.5:5140")
|
||||
return v
|
||||
mutate_interval: int | None = DEFAULT_MUTATE_INTERVAL # global automatic rotation interval in minutes
|
||||
|
||||
|
||||
def save_state(config: DecnetConfig, compose_path: Path) -> None:
|
||||
|
||||
@@ -97,8 +97,8 @@ def random_hostname(distro_slug: str = "debian") -> str:
|
||||
"""Generate a plausible hostname for the given distro style."""
|
||||
profile = DISTROS.get(distro_slug)
|
||||
style = profile.hostname_style if profile else "generic"
|
||||
word = random.choice(_NAME_WORDS)
|
||||
num = random.randint(10, 99)
|
||||
word = random.choice(_NAME_WORDS) # nosec B311
|
||||
num = random.randint(10, 99) # nosec B311
|
||||
|
||||
if style == "rhel":
|
||||
# RHEL/CentOS/Fedora convention: word+num.localdomain
|
||||
@@ -107,7 +107,7 @@ def random_hostname(distro_slug: str = "debian") -> str:
|
||||
return f"{word}-{num}"
|
||||
elif style == "rolling":
|
||||
# Kali/Arch: just a word, no suffix
|
||||
return f"{word}-{random.choice(_NAME_WORDS)}"
|
||||
return f"{word}-{random.choice(_NAME_WORDS)}" # nosec B311
|
||||
else:
|
||||
# Debian/Ubuntu: SRV-WORD-nn
|
||||
return f"SRV-{word.upper()}-{num}"
|
||||
@@ -122,7 +122,7 @@ def get_distro(slug: str) -> DistroProfile:
|
||||
|
||||
|
||||
def random_distro() -> DistroProfile:
|
||||
return random.choice(list(DISTROS.values()))
|
||||
return random.choice(list(DISTROS.values())) # nosec B311
|
||||
|
||||
|
||||
def all_distros() -> dict[str, DistroProfile]:
|
||||
|
||||
15
decnet/engine/__init__.py
Normal file
15
decnet/engine/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
||||
from decnet.engine.deployer import (
|
||||
COMPOSE_FILE,
|
||||
_compose_with_retry,
|
||||
deploy,
|
||||
status,
|
||||
teardown,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"COMPOSE_FILE",
|
||||
"_compose_with_retry",
|
||||
"deploy",
|
||||
"status",
|
||||
"teardown",
|
||||
]
|
||||
@@ -2,7 +2,8 @@
|
||||
Deploy, teardown, and status via Docker SDK + subprocess docker compose.
|
||||
"""
|
||||
|
||||
import subprocess
|
||||
import shutil
|
||||
import subprocess # nosec B404
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
@@ -27,11 +28,32 @@ from decnet.network import (
|
||||
|
||||
console = Console()
|
||||
COMPOSE_FILE = Path("decnet-compose.yml")
|
||||
_CANONICAL_LOGGING = Path(__file__).parent.parent.parent / "templates" / "decnet_logging.py"
|
||||
|
||||
|
||||
def _compose(*args: str, compose_file: Path = COMPOSE_FILE) -> None:
|
||||
def _sync_logging_helper(config: DecnetConfig) -> None:
|
||||
"""Copy the canonical decnet_logging.py into every active template build context."""
|
||||
from decnet.services.registry import get_service
|
||||
seen: set[Path] = set()
|
||||
for decky in config.deckies:
|
||||
for svc_name in decky.services:
|
||||
svc = get_service(svc_name)
|
||||
if svc is None:
|
||||
continue
|
||||
ctx = svc.dockerfile_context()
|
||||
if ctx is None or ctx in seen:
|
||||
continue
|
||||
seen.add(ctx)
|
||||
dest = ctx / "decnet_logging.py"
|
||||
if not dest.exists() or dest.read_bytes() != _CANONICAL_LOGGING.read_bytes():
|
||||
shutil.copy2(_CANONICAL_LOGGING, dest)
|
||||
|
||||
|
||||
def _compose(*args: str, compose_file: Path = COMPOSE_FILE, env: dict | None = None) -> None:
|
||||
import os
|
||||
cmd = ["docker", "compose", "-f", str(compose_file), *args]
|
||||
subprocess.run(cmd, check=True)
|
||||
merged = {**os.environ, **(env or {})}
|
||||
subprocess.run(cmd, check=True, env=merged) # nosec B603
|
||||
|
||||
|
||||
_PERMANENT_ERRORS = (
|
||||
@@ -48,12 +70,15 @@ def _compose_with_retry(
|
||||
compose_file: Path = COMPOSE_FILE,
|
||||
retries: int = 3,
|
||||
delay: float = 5.0,
|
||||
env: dict | None = None,
|
||||
) -> None:
|
||||
"""Run a docker compose command, retrying on transient failures."""
|
||||
import os
|
||||
last_exc: subprocess.CalledProcessError | None = None
|
||||
cmd = ["docker", "compose", "-f", str(compose_file), *args]
|
||||
merged = {**os.environ, **(env or {})}
|
||||
for attempt in range(1, retries + 1):
|
||||
result = subprocess.run(cmd, capture_output=True, text=True)
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, env=merged) # nosec B603
|
||||
if result.returncode == 0:
|
||||
if result.stdout:
|
||||
print(result.stdout, end="")
|
||||
@@ -80,10 +105,9 @@ def _compose_with_retry(
|
||||
raise last_exc
|
||||
|
||||
|
||||
def deploy(config: DecnetConfig, dry_run: bool = False, no_cache: bool = False) -> None:
|
||||
def deploy(config: DecnetConfig, dry_run: bool = False, no_cache: bool = False, parallel: bool = False) -> None:
|
||||
client = docker.from_env()
|
||||
|
||||
# --- Network setup ---
|
||||
ip_list = [d.ip for d in config.deckies]
|
||||
decky_range = ips_to_range(ip_list)
|
||||
host_ip = get_host_ip(config.interface)
|
||||
@@ -110,7 +134,8 @@ def deploy(config: DecnetConfig, dry_run: bool = False, no_cache: bool = False)
|
||||
)
|
||||
setup_host_macvlan(config.interface, host_ip, decky_range)
|
||||
|
||||
# --- Compose generation ---
|
||||
_sync_logging_helper(config)
|
||||
|
||||
compose_path = write_compose(config, COMPOSE_FILE)
|
||||
console.print(f"[bold cyan]Compose file written[/] → {compose_path}")
|
||||
|
||||
@@ -118,16 +143,24 @@ def deploy(config: DecnetConfig, dry_run: bool = False, no_cache: bool = False)
|
||||
console.print("[yellow]Dry run — no containers started.[/]")
|
||||
return
|
||||
|
||||
# --- Save state before bring-up ---
|
||||
save_state(config, compose_path)
|
||||
|
||||
# --- Bring up ---
|
||||
build_env = {"DOCKER_BUILDKIT": "1"} if parallel else {}
|
||||
|
||||
console.print("[bold cyan]Building images and starting deckies...[/]")
|
||||
build_args = ["build"]
|
||||
if no_cache:
|
||||
build_args.append("--no-cache")
|
||||
|
||||
if parallel:
|
||||
console.print("[bold cyan]Parallel build enabled — building all images concurrently...[/]")
|
||||
_compose_with_retry(*build_args, compose_file=compose_path, env=build_env)
|
||||
_compose_with_retry("up", "-d", compose_file=compose_path, env=build_env)
|
||||
else:
|
||||
if no_cache:
|
||||
_compose_with_retry("build", "--no-cache", compose_file=compose_path)
|
||||
_compose_with_retry("up", "--build", "-d", compose_file=compose_path)
|
||||
|
||||
# --- Status summary ---
|
||||
_print_status(config)
|
||||
|
||||
|
||||
@@ -141,7 +174,6 @@ def teardown(decky_id: str | None = None) -> None:
|
||||
client = docker.from_env()
|
||||
|
||||
if decky_id:
|
||||
# Bring down only the services matching this decky
|
||||
svc_names = [f"{decky_id}-{svc}" for svc in [d.services for d in config.deckies if d.name == decky_id]]
|
||||
if not svc_names:
|
||||
console.print(f"[red]Decky '{decky_id}' not found in current deployment.[/]")
|
||||
@@ -159,6 +191,7 @@ def teardown(decky_id: str | None = None) -> None:
|
||||
teardown_host_macvlan(decky_range)
|
||||
remove_macvlan_network(client)
|
||||
clear_state()
|
||||
|
||||
net_driver = "IPvlan" if config.ipvlan else "MACVLAN"
|
||||
console.print(f"[green]All deckies torn down. {net_driver} network removed.[/]")
|
||||
|
||||
@@ -179,7 +212,7 @@ def status() -> None:
|
||||
table.add_column("Hostname")
|
||||
table.add_column("Status")
|
||||
|
||||
running = {c.name: c.status for c in client.containers.list(all=True)}
|
||||
running = {c.name: c.status for c in client.containers.list(all=True, ignore_removed=True)}
|
||||
|
||||
for decky in config.deckies:
|
||||
statuses = []
|
||||
64
decnet/env.py
Normal file
64
decnet/env.py
Normal file
@@ -0,0 +1,64 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Calculate absolute path to the project root
|
||||
_ROOT: Path = Path(__file__).parent.parent.absolute()
|
||||
|
||||
# Load .env.local first, then fallback to .env
|
||||
load_dotenv(_ROOT / ".env.local")
|
||||
load_dotenv(_ROOT / ".env")
|
||||
|
||||
|
||||
def _port(name: str, default: int) -> int:
|
||||
raw = os.environ.get(name, str(default))
|
||||
try:
|
||||
value = int(raw)
|
||||
except ValueError:
|
||||
raise ValueError(f"Environment variable '{name}' must be an integer, got '{raw}'.")
|
||||
if not (1 <= value <= 65535):
|
||||
raise ValueError(f"Environment variable '{name}' must be 1–65535, got {value}.")
|
||||
return value
|
||||
|
||||
|
||||
def _require_env(name: str) -> str:
|
||||
"""Return the env var value or raise at startup if it is unset or a known-bad default."""
|
||||
_KNOWN_BAD = {"fallback-secret-key-change-me", "admin", "secret", "password", "changeme"}
|
||||
value = os.environ.get(name)
|
||||
if not value:
|
||||
raise ValueError(
|
||||
f"Required environment variable '{name}' is not set. "
|
||||
f"Set it in .env.local or export it before starting DECNET."
|
||||
)
|
||||
|
||||
if any(k.startswith("PYTEST") for k in os.environ):
|
||||
return value
|
||||
|
||||
if value.lower() in _KNOWN_BAD:
|
||||
raise ValueError(
|
||||
f"Environment variable '{name}' is set to an insecure default ('{value}'). "
|
||||
f"Choose a strong, unique value before starting DECNET."
|
||||
)
|
||||
return value
|
||||
|
||||
|
||||
# API Options
|
||||
DECNET_API_HOST: str = os.environ.get("DECNET_API_HOST", "0.0.0.0") # nosec B104
|
||||
DECNET_API_PORT: int = _port("DECNET_API_PORT", 8000)
|
||||
DECNET_JWT_SECRET: str = _require_env("DECNET_JWT_SECRET")
|
||||
DECNET_INGEST_LOG_FILE: str | None = os.environ.get("DECNET_INGEST_LOG_FILE", "/var/log/decnet/decnet.log")
|
||||
|
||||
# Web Dashboard Options
|
||||
DECNET_WEB_HOST: str = os.environ.get("DECNET_WEB_HOST", "0.0.0.0") # nosec B104
|
||||
DECNET_WEB_PORT: int = _port("DECNET_WEB_PORT", 8080)
|
||||
DECNET_ADMIN_USER: str = os.environ.get("DECNET_ADMIN_USER", "admin")
|
||||
DECNET_ADMIN_PASSWORD: str = os.environ.get("DECNET_ADMIN_PASSWORD", "admin")
|
||||
DECNET_DEVELOPER: bool = os.environ.get("DECNET_DEVELOPER", "False").lower() == "true"
|
||||
|
||||
# CORS — comma-separated list of allowed origins for the web dashboard API.
|
||||
# Defaults to the configured web host/port. Override with DECNET_CORS_ORIGINS if needed.
|
||||
# Example: DECNET_CORS_ORIGINS=http://192.168.1.50:9090,https://dashboard.example.com
|
||||
_web_hostname: str = "localhost" if DECNET_WEB_HOST in ("0.0.0.0", "127.0.0.1", "::") else DECNET_WEB_HOST # nosec B104
|
||||
_cors_default: str = f"http://{_web_hostname}:{DECNET_WEB_PORT}"
|
||||
_cors_raw: str = os.environ.get("DECNET_CORS_ORIGINS", _cors_default)
|
||||
DECNET_CORS_ORIGINS: list[str] = [o.strip() for o in _cors_raw.split(",") if o.strip()]
|
||||
179
decnet/fleet.py
Normal file
179
decnet/fleet.py
Normal file
@@ -0,0 +1,179 @@
|
||||
"""
|
||||
Fleet builder — shared logic for constructing DeckyConfig lists.
|
||||
|
||||
Used by both the CLI and the web API router to build deckies from
|
||||
flags or INI config. Lives here (not in cli.py) so that the web layer
|
||||
and the mutation engine can import it without depending on the CLI.
|
||||
"""
|
||||
|
||||
import random
|
||||
from typing import Optional
|
||||
|
||||
from decnet.archetypes import Archetype, get_archetype
|
||||
from decnet.config import DeckyConfig, random_hostname
|
||||
from decnet.distros import all_distros, get_distro, random_distro
|
||||
from decnet.ini_loader import IniConfig
|
||||
from decnet.services.registry import all_services
|
||||
|
||||
|
||||
def all_service_names() -> list[str]:
|
||||
"""Return all registered service names from the live plugin registry."""
|
||||
return sorted(all_services().keys())
|
||||
|
||||
|
||||
def resolve_distros(
|
||||
distros_explicit: list[str] | None,
|
||||
randomize_distros: bool,
|
||||
n: int,
|
||||
archetype: Archetype | None = None,
|
||||
) -> list[str]:
|
||||
"""Return a list of n distro slugs based on flags or archetype preference."""
|
||||
if distros_explicit:
|
||||
return [distros_explicit[i % len(distros_explicit)] for i in range(n)]
|
||||
if randomize_distros:
|
||||
return [random_distro().slug for _ in range(n)]
|
||||
if archetype:
|
||||
pool = archetype.preferred_distros
|
||||
return [pool[i % len(pool)] for i in range(n)]
|
||||
slugs = list(all_distros().keys())
|
||||
return [slugs[i % len(slugs)] for i in range(n)]
|
||||
|
||||
|
||||
def build_deckies(
|
||||
n: int,
|
||||
ips: list[str],
|
||||
services_explicit: list[str] | None,
|
||||
randomize_services: bool,
|
||||
distros_explicit: list[str] | None = None,
|
||||
randomize_distros: bool = False,
|
||||
archetype: Archetype | None = None,
|
||||
mutate_interval: Optional[int] = None,
|
||||
) -> list[DeckyConfig]:
|
||||
"""Build a list of DeckyConfigs from CLI-style flags."""
|
||||
deckies = []
|
||||
used_combos: set[frozenset] = set()
|
||||
distro_slugs = resolve_distros(distros_explicit, randomize_distros, n, archetype)
|
||||
|
||||
for i, ip in enumerate(ips):
|
||||
name = f"decky-{i + 1:02d}"
|
||||
distro = get_distro(distro_slugs[i])
|
||||
hostname = random_hostname(distro.slug)
|
||||
|
||||
if services_explicit:
|
||||
svc_list = services_explicit
|
||||
elif archetype:
|
||||
svc_list = list(archetype.services)
|
||||
elif randomize_services:
|
||||
svc_pool = all_service_names()
|
||||
attempts = 0
|
||||
while True:
|
||||
count = random.randint(1, min(3, len(svc_pool))) # nosec B311
|
||||
chosen = frozenset(random.sample(svc_pool, count)) # nosec B311
|
||||
attempts += 1
|
||||
if chosen not in used_combos or attempts > 20:
|
||||
break
|
||||
svc_list = list(chosen)
|
||||
used_combos.add(chosen)
|
||||
else:
|
||||
raise ValueError("Provide services_explicit, archetype, or randomize_services=True.")
|
||||
|
||||
deckies.append(
|
||||
DeckyConfig(
|
||||
name=name,
|
||||
ip=ip,
|
||||
services=svc_list,
|
||||
distro=distro.slug,
|
||||
base_image=distro.image,
|
||||
build_base=distro.build_base,
|
||||
hostname=hostname,
|
||||
archetype=archetype.slug if archetype else None,
|
||||
nmap_os=archetype.nmap_os if archetype else "linux",
|
||||
mutate_interval=mutate_interval,
|
||||
)
|
||||
)
|
||||
return deckies
|
||||
|
||||
|
||||
def build_deckies_from_ini(
|
||||
ini: IniConfig,
|
||||
subnet_cidr: str,
|
||||
gateway: str,
|
||||
host_ip: str,
|
||||
randomize: bool,
|
||||
cli_mutate_interval: int | None = None,
|
||||
) -> list[DeckyConfig]:
|
||||
"""Build DeckyConfig list from an IniConfig, auto-allocating missing IPs."""
|
||||
from ipaddress import IPv4Address, IPv4Network
|
||||
import time
|
||||
now = time.time()
|
||||
|
||||
explicit_ips: set[IPv4Address] = {
|
||||
IPv4Address(s.ip) for s in ini.deckies if s.ip
|
||||
}
|
||||
|
||||
net = IPv4Network(subnet_cidr, strict=False)
|
||||
reserved = {
|
||||
net.network_address,
|
||||
net.broadcast_address,
|
||||
IPv4Address(gateway),
|
||||
IPv4Address(host_ip),
|
||||
} | explicit_ips
|
||||
|
||||
auto_pool = (str(addr) for addr in net.hosts() if addr not in reserved)
|
||||
|
||||
deckies: list[DeckyConfig] = []
|
||||
for spec in ini.deckies:
|
||||
arch: Archetype | None = None
|
||||
if spec.archetype:
|
||||
arch = get_archetype(spec.archetype)
|
||||
|
||||
distro_pool = arch.preferred_distros if arch else list(all_distros().keys())
|
||||
distro = get_distro(distro_pool[len(deckies) % len(distro_pool)])
|
||||
hostname = random_hostname(distro.slug)
|
||||
|
||||
ip = spec.ip or next(auto_pool, None)
|
||||
if ip is None:
|
||||
raise ValueError(f"Not enough free IPs in {subnet_cidr} while assigning IP for '{spec.name}'.")
|
||||
|
||||
if spec.services:
|
||||
known = set(all_service_names())
|
||||
unknown = [s for s in spec.services if s not in known]
|
||||
if unknown:
|
||||
raise ValueError(
|
||||
f"Unknown service(s) in [{spec.name}]: {unknown}. "
|
||||
f"Available: {all_service_names()}"
|
||||
)
|
||||
svc_list = spec.services
|
||||
elif arch:
|
||||
svc_list = list(arch.services)
|
||||
elif randomize:
|
||||
svc_pool = all_service_names()
|
||||
count = random.randint(1, min(3, len(svc_pool))) # nosec B311
|
||||
svc_list = random.sample(svc_pool, count) # nosec B311
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Decky '[{spec.name}]' has no services= in config. "
|
||||
"Add services=, archetype=, or use --randomize-services."
|
||||
)
|
||||
|
||||
resolved_nmap_os = spec.nmap_os or (arch.nmap_os if arch else "linux")
|
||||
|
||||
decky_mutate_interval = cli_mutate_interval
|
||||
if decky_mutate_interval is None:
|
||||
decky_mutate_interval = spec.mutate_interval if spec.mutate_interval is not None else ini.mutate_interval
|
||||
|
||||
deckies.append(DeckyConfig(
|
||||
name=spec.name,
|
||||
ip=ip,
|
||||
services=svc_list,
|
||||
distro=distro.slug,
|
||||
base_image=distro.image,
|
||||
build_base=distro.build_base,
|
||||
hostname=hostname,
|
||||
archetype=arch.slug if arch else None,
|
||||
service_config=spec.service_config,
|
||||
nmap_os=resolved_nmap_os,
|
||||
mutate_interval=decky_mutate_interval,
|
||||
last_mutated=now,
|
||||
))
|
||||
return deckies
|
||||
@@ -6,7 +6,6 @@ Format:
|
||||
net=192.168.1.0/24
|
||||
gw=192.168.1.1
|
||||
interface=wlp6s0
|
||||
log_target=192.168.1.5:5140 # optional
|
||||
|
||||
[hostname-1]
|
||||
ip=192.168.1.82 # optional
|
||||
@@ -54,6 +53,7 @@ class DeckySpec:
|
||||
archetype: str | None = None
|
||||
service_config: dict[str, dict] = field(default_factory=dict)
|
||||
nmap_os: str | None = None # explicit OS family override (linux/windows/bsd/embedded/cisco)
|
||||
mutate_interval: int | None = None
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -70,7 +70,7 @@ class IniConfig:
|
||||
subnet: str | None = None
|
||||
gateway: str | None = None
|
||||
interface: str | None = None
|
||||
log_target: str | None = None
|
||||
mutate_interval: int | None = None
|
||||
deckies: list[DeckySpec] = field(default_factory=list)
|
||||
custom_services: list[CustomServiceSpec] = field(default_factory=list)
|
||||
|
||||
@@ -81,7 +81,33 @@ def load_ini(path: str | Path) -> IniConfig:
|
||||
read = cp.read(str(path))
|
||||
if not read:
|
||||
raise FileNotFoundError(f"Config file not found: {path}")
|
||||
return _parse_configparser(cp)
|
||||
|
||||
|
||||
def load_ini_from_string(content: str) -> IniConfig:
|
||||
"""Parse a DECNET INI string and return an IniConfig."""
|
||||
validate_ini_string(content)
|
||||
cp = configparser.ConfigParser()
|
||||
cp.read_string(content)
|
||||
return _parse_configparser(cp)
|
||||
|
||||
|
||||
def validate_ini_string(content: str) -> None:
|
||||
"""Perform safety and sanity checks on raw INI content string."""
|
||||
# 1. Size limit (e.g. 512KB)
|
||||
if len(content) > 512 * 1024:
|
||||
raise ValueError("INI content too large (max 512KB).")
|
||||
|
||||
# 2. Ensure it's not empty
|
||||
if not content.strip():
|
||||
raise ValueError("INI content is empty.")
|
||||
|
||||
# 3. Basic structure check (must contain at least one section header)
|
||||
if "[" not in content or "]" not in content:
|
||||
raise ValueError("Invalid INI format: no sections found.")
|
||||
|
||||
|
||||
def _parse_configparser(cp: configparser.ConfigParser) -> IniConfig:
|
||||
cfg = IniConfig()
|
||||
|
||||
if cp.has_section("general"):
|
||||
@@ -89,14 +115,24 @@ def load_ini(path: str | Path) -> IniConfig:
|
||||
cfg.subnet = g.get("net")
|
||||
cfg.gateway = g.get("gw")
|
||||
cfg.interface = g.get("interface")
|
||||
cfg.log_target = g.get("log_target") or g.get("log-target")
|
||||
|
||||
from decnet.services.registry import all_services
|
||||
known_services = set(all_services().keys())
|
||||
|
||||
# First pass: collect decky sections and custom service definitions
|
||||
for section in cp.sections():
|
||||
if section == "general":
|
||||
continue
|
||||
|
||||
# A service sub-section is identified if the section name has at least one dot
|
||||
# AND the last segment is a known service name.
|
||||
# e.g. "decky-01.ssh" -> sub-section
|
||||
# e.g. "decky.webmail" -> decky section (if "webmail" is not a service)
|
||||
if "." in section:
|
||||
continue # subsections handled in second pass
|
||||
_, _, last_segment = section.rpartition(".")
|
||||
if last_segment in known_services:
|
||||
continue # sub-section handled in second pass
|
||||
|
||||
if section.startswith("custom-"):
|
||||
# Bring-your-own service definition
|
||||
s = cp[section]
|
||||
@@ -115,17 +151,30 @@ def load_ini(path: str | Path) -> IniConfig:
|
||||
services = [sv.strip() for sv in svc_raw.split(",")] if svc_raw else None
|
||||
archetype = s.get("archetype")
|
||||
nmap_os = s.get("nmap_os") or s.get("nmap-os") or None
|
||||
|
||||
mi_raw = s.get("mutate_interval") or s.get("mutate-interval")
|
||||
mutate_interval = None
|
||||
if mi_raw:
|
||||
try:
|
||||
mutate_interval = int(mi_raw)
|
||||
except ValueError:
|
||||
raise ValueError(f"[{section}] mutate_interval= must be an integer, got '{mi_raw}'")
|
||||
|
||||
amount_raw = s.get("amount", "1")
|
||||
try:
|
||||
amount = int(amount_raw)
|
||||
if amount < 1:
|
||||
raise ValueError
|
||||
except ValueError:
|
||||
if amount > 100:
|
||||
raise ValueError(f"[{section}] amount={amount} exceeds maximum allowed (100).")
|
||||
except ValueError as e:
|
||||
if "exceeds maximum" in str(e):
|
||||
raise e
|
||||
raise ValueError(f"[{section}] amount= must be a positive integer, got '{amount_raw}'")
|
||||
|
||||
if amount == 1:
|
||||
cfg.deckies.append(DeckySpec(
|
||||
name=section, ip=ip, services=services, archetype=archetype, nmap_os=nmap_os,
|
||||
name=section, ip=ip, services=services, archetype=archetype, nmap_os=nmap_os, mutate_interval=mutate_interval,
|
||||
))
|
||||
else:
|
||||
# Expand into N deckies; explicit ip is ignored (can't share one IP)
|
||||
@@ -141,6 +190,7 @@ def load_ini(path: str | Path) -> IniConfig:
|
||||
services=services,
|
||||
archetype=archetype,
|
||||
nmap_os=nmap_os,
|
||||
mutate_interval=mutate_interval,
|
||||
))
|
||||
|
||||
# Second pass: collect per-service subsections [decky-name.service]
|
||||
@@ -149,7 +199,11 @@ def load_ini(path: str | Path) -> IniConfig:
|
||||
for section in cp.sections():
|
||||
if "." not in section:
|
||||
continue
|
||||
decky_name, _, svc_name = section.partition(".")
|
||||
|
||||
decky_name, dot, svc_name = section.rpartition(".")
|
||||
if svc_name not in known_services:
|
||||
continue # not a service sub-section
|
||||
|
||||
svc_cfg = {k: v for k, v in cp[section].items()}
|
||||
if decky_name in decky_map:
|
||||
# Direct match — single decky
|
||||
|
||||
@@ -50,10 +50,9 @@ def write_syslog(line: str) -> None:
|
||||
"""Write a single RFC 5424 syslog line to the rotating log file."""
|
||||
try:
|
||||
_get_logger().info(line)
|
||||
except Exception:
|
||||
except Exception: # nosec B110
|
||||
pass
|
||||
|
||||
|
||||
def get_log_path() -> Path:
|
||||
"""Return the configured log file path (for tests/inspection)."""
|
||||
return Path(os.environ.get(_LOG_FILE_ENV, _DEFAULT_LOG_FILE))
|
||||
|
||||
3
decnet/mutator/__init__.py
Normal file
3
decnet/mutator/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from decnet.mutator.engine import mutate_all, mutate_decky, run_watch_loop
|
||||
|
||||
__all__ = ["mutate_all", "mutate_decky", "run_watch_loop"]
|
||||
122
decnet/mutator/engine.py
Normal file
122
decnet/mutator/engine.py
Normal file
@@ -0,0 +1,122 @@
|
||||
"""
|
||||
Mutation Engine for DECNET.
|
||||
Handles dynamic rotation of exposed honeypot services over time.
|
||||
"""
|
||||
|
||||
import random
|
||||
import time
|
||||
from typing import Optional
|
||||
|
||||
from rich.console import Console
|
||||
|
||||
from decnet.archetypes import get_archetype
|
||||
from decnet.fleet import all_service_names
|
||||
from decnet.composer import write_compose
|
||||
from decnet.config import DeckyConfig, load_state, save_state
|
||||
from decnet.engine import _compose_with_retry
|
||||
|
||||
import subprocess # nosec B404
|
||||
|
||||
console = Console()
|
||||
|
||||
|
||||
def mutate_decky(decky_name: str) -> bool:
|
||||
"""
|
||||
Perform an Intra-Archetype Shuffle for a specific decky.
|
||||
Returns True if mutation succeeded, False otherwise.
|
||||
"""
|
||||
state = load_state()
|
||||
if state is None:
|
||||
console.print("[red]No active deployment found (no decnet-state.json).[/]")
|
||||
return False
|
||||
|
||||
config, compose_path = state
|
||||
decky: Optional[DeckyConfig] = next((d for d in config.deckies if d.name == decky_name), None)
|
||||
|
||||
if not decky:
|
||||
console.print(f"[red]Decky '{decky_name}' not found in state.[/]")
|
||||
return False
|
||||
|
||||
if decky.archetype:
|
||||
try:
|
||||
arch = get_archetype(decky.archetype)
|
||||
svc_pool = list(arch.services)
|
||||
except ValueError:
|
||||
svc_pool = all_service_names()
|
||||
else:
|
||||
svc_pool = all_service_names()
|
||||
|
||||
if not svc_pool:
|
||||
console.print(f"[yellow]No services available for mutating '{decky_name}'.[/]")
|
||||
return False
|
||||
|
||||
current_services = set(decky.services)
|
||||
|
||||
attempts = 0
|
||||
while True:
|
||||
count = random.randint(1, min(3, len(svc_pool))) # nosec B311
|
||||
chosen = set(random.sample(svc_pool, count)) # nosec B311
|
||||
attempts += 1
|
||||
if chosen != current_services or attempts > 20:
|
||||
break
|
||||
|
||||
decky.services = list(chosen)
|
||||
decky.last_mutated = time.time()
|
||||
|
||||
save_state(config, compose_path)
|
||||
write_compose(config, compose_path)
|
||||
|
||||
console.print(f"[cyan]Mutating '{decky_name}' to services: {', '.join(decky.services)}[/]")
|
||||
|
||||
try:
|
||||
_compose_with_retry("up", "-d", "--remove-orphans", compose_file=compose_path)
|
||||
except subprocess.CalledProcessError as e:
|
||||
console.print(f"[red]Failed to mutate '{decky_name}': {e.stderr}[/]")
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def mutate_all(force: bool = False) -> None:
|
||||
"""
|
||||
Check all deckies and mutate those that are due.
|
||||
If force=True, mutates all deckies regardless of schedule.
|
||||
"""
|
||||
state = load_state()
|
||||
if state is None:
|
||||
console.print("[red]No active deployment found.[/]")
|
||||
return
|
||||
|
||||
config, _ = state
|
||||
now = time.time()
|
||||
|
||||
mutated_count = 0
|
||||
for decky in config.deckies:
|
||||
interval_mins = decky.mutate_interval or config.mutate_interval
|
||||
if interval_mins is None and not force:
|
||||
continue
|
||||
|
||||
if force:
|
||||
due = True
|
||||
else:
|
||||
elapsed_secs = now - decky.last_mutated
|
||||
due = elapsed_secs >= (interval_mins * 60)
|
||||
|
||||
if due:
|
||||
success = mutate_decky(decky.name)
|
||||
if success:
|
||||
mutated_count += 1
|
||||
|
||||
if mutated_count == 0 and not force:
|
||||
console.print("[dim]No deckies are due for mutation.[/]")
|
||||
|
||||
|
||||
def run_watch_loop(poll_interval_secs: int = 10) -> None:
|
||||
"""Run an infinite loop checking for deckies that need mutation."""
|
||||
console.print(f"[green]DECNET Mutator Watcher started (polling every {poll_interval_secs}s).[/]")
|
||||
try:
|
||||
while True:
|
||||
mutate_all(force=False)
|
||||
time.sleep(poll_interval_secs)
|
||||
except KeyboardInterrupt:
|
||||
console.print("\n[dim]Mutator watcher stopped.[/]")
|
||||
@@ -9,7 +9,7 @@ Handles:
|
||||
"""
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import subprocess # nosec B404
|
||||
from ipaddress import IPv4Address, IPv4Interface, IPv4Network
|
||||
|
||||
import docker
|
||||
@@ -24,7 +24,7 @@ HOST_IPVLAN_IFACE = "decnet_ipvlan0"
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _run(cmd: list[str], check: bool = True) -> subprocess.CompletedProcess:
|
||||
return subprocess.run(cmd, capture_output=True, text=True, check=check)
|
||||
return subprocess.run(cmd, capture_output=True, text=True, check=check) # nosec B603 B404
|
||||
|
||||
|
||||
def detect_interface() -> str:
|
||||
|
||||
@@ -5,17 +5,31 @@ Maps an nmap OS family slug to a dict of Linux kernel sysctls that, when applied
|
||||
to a container's network namespace, make its TCP/IP stack behaviour resemble the
|
||||
claimed OS as closely as possible within the Linux kernel's constraints.
|
||||
|
||||
All sysctls listed here are network-namespace-scoped and safe to set per-container
|
||||
without --privileged (beyond the NET_ADMIN capability already granted).
|
||||
|
||||
Primary discriminator leveraged by nmap: net.ipv4.ip_default_ttl (TTL)
|
||||
Linux → 64
|
||||
Windows → 128
|
||||
BSD (FreeBSD/macOS)→ 64 (different TCP options, but same TTL as Linux)
|
||||
Embedded / network → 255
|
||||
|
||||
Secondary tuning (TCP behaviour):
|
||||
Secondary discriminators (nmap OPS / WIN / ECN / T2–T6 probe groups):
|
||||
net.ipv4.tcp_syn_retries – SYN retransmits before giving up
|
||||
net.ipv4.tcp_timestamps – TCP timestamp option (OPS probes); Windows = off
|
||||
net.ipv4.tcp_window_scaling – Window scale option; embedded/Cisco typically off
|
||||
net.ipv4.tcp_sack – Selective ACK option; absent on most embedded stacks
|
||||
net.ipv4.tcp_ecn – ECN negotiation; Linux offers (2), Windows off (0)
|
||||
net.ipv4.ip_no_pmtu_disc – DF bit in ICMP replies (IE probes); embedded on
|
||||
net.ipv4.tcp_fin_timeout – FIN_WAIT_2 seconds (T2–T6 timing); Windows shorter
|
||||
|
||||
ICMP tuning (nmap IE / U1 probe groups):
|
||||
net.ipv4.icmp_ratelimit – Min ms between ICMP error replies; Windows = 0 (none)
|
||||
net.ipv4.icmp_ratemask – Bitmask of ICMP types subject to rate limiting
|
||||
|
||||
Note: net.core.rmem_default is a global (non-namespaced) sysctl and cannot be
|
||||
set per-container without --privileged; it is intentionally excluded.
|
||||
set per-container without --privileged; TCP window size is already correct for
|
||||
Windows (64240) from the kernel's default tcp_rmem settings.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
@@ -24,27 +38,69 @@ OS_SYSCTLS: dict[str, dict[str, str]] = {
|
||||
"linux": {
|
||||
"net.ipv4.ip_default_ttl": "64",
|
||||
"net.ipv4.tcp_syn_retries": "6",
|
||||
"net.ipv4.tcp_timestamps": "1",
|
||||
"net.ipv4.tcp_window_scaling": "1",
|
||||
"net.ipv4.tcp_sack": "1",
|
||||
"net.ipv4.tcp_ecn": "2",
|
||||
"net.ipv4.ip_no_pmtu_disc": "0",
|
||||
"net.ipv4.tcp_fin_timeout": "60",
|
||||
"net.ipv4.icmp_ratelimit": "1000",
|
||||
"net.ipv4.icmp_ratemask": "6168",
|
||||
},
|
||||
"windows": {
|
||||
"net.ipv4.ip_default_ttl": "128",
|
||||
"net.ipv4.tcp_syn_retries": "2",
|
||||
"net.ipv4.tcp_timestamps": "0",
|
||||
"net.ipv4.tcp_window_scaling": "1",
|
||||
"net.ipv4.tcp_sack": "1",
|
||||
"net.ipv4.tcp_ecn": "0",
|
||||
"net.ipv4.ip_no_pmtu_disc": "0",
|
||||
"net.ipv4.tcp_fin_timeout": "30",
|
||||
"net.ipv4.icmp_ratelimit": "0",
|
||||
"net.ipv4.icmp_ratemask": "0",
|
||||
},
|
||||
"bsd": {
|
||||
"net.ipv4.ip_default_ttl": "64",
|
||||
"net.ipv4.tcp_syn_retries": "6",
|
||||
"net.ipv4.tcp_timestamps": "1",
|
||||
"net.ipv4.tcp_window_scaling": "1",
|
||||
"net.ipv4.tcp_sack": "1",
|
||||
"net.ipv4.tcp_ecn": "0",
|
||||
"net.ipv4.ip_no_pmtu_disc": "0",
|
||||
"net.ipv4.tcp_fin_timeout": "60",
|
||||
"net.ipv4.icmp_ratelimit": "250",
|
||||
"net.ipv4.icmp_ratemask": "6168",
|
||||
},
|
||||
"embedded": {
|
||||
"net.ipv4.ip_default_ttl": "255",
|
||||
"net.ipv4.tcp_syn_retries": "3",
|
||||
"net.ipv4.tcp_timestamps": "0",
|
||||
"net.ipv4.tcp_window_scaling": "0",
|
||||
"net.ipv4.tcp_sack": "0",
|
||||
"net.ipv4.tcp_ecn": "0",
|
||||
"net.ipv4.ip_no_pmtu_disc": "1",
|
||||
"net.ipv4.tcp_fin_timeout": "15",
|
||||
"net.ipv4.icmp_ratelimit": "0",
|
||||
"net.ipv4.icmp_ratemask": "0",
|
||||
},
|
||||
"cisco": {
|
||||
"net.ipv4.ip_default_ttl": "255",
|
||||
"net.ipv4.tcp_syn_retries": "2",
|
||||
"net.ipv4.tcp_timestamps": "0",
|
||||
"net.ipv4.tcp_window_scaling": "0",
|
||||
"net.ipv4.tcp_sack": "0",
|
||||
"net.ipv4.tcp_ecn": "0",
|
||||
"net.ipv4.ip_no_pmtu_disc": "1",
|
||||
"net.ipv4.tcp_fin_timeout": "15",
|
||||
"net.ipv4.icmp_ratelimit": "0",
|
||||
"net.ipv4.icmp_ratemask": "0",
|
||||
},
|
||||
}
|
||||
|
||||
_DEFAULT_OS = "linux"
|
||||
|
||||
_REQUIRED_SYSCTLS: frozenset[str] = frozenset(OS_SYSCTLS["linux"].keys())
|
||||
|
||||
|
||||
def get_os_sysctls(nmap_os: str) -> dict[str, str]:
|
||||
"""Return the sysctl dict for *nmap_os*. Falls back to Linux on unknown slugs."""
|
||||
@@ -54,3 +110,4 @@ def get_os_sysctls(nmap_os: str) -> dict[str, str]:
|
||||
def all_os_families() -> list[str]:
|
||||
"""Return all registered nmap OS family slugs."""
|
||||
return list(OS_SYSCTLS.keys())
|
||||
|
||||
|
||||
@@ -1,26 +1,35 @@
|
||||
from pathlib import Path
|
||||
from decnet.services.base import BaseService
|
||||
|
||||
|
||||
class ConpotService(BaseService):
|
||||
"""ICS/SCADA honeypot covering Modbus (502), SNMP (161 UDP), and HTTP (80).
|
||||
|
||||
Uses the official honeynet/conpot image which ships a default ICS profile
|
||||
that emulates a Siemens S7-200 PLC.
|
||||
Uses a custom build context wrapping the official honeynet/conpot image
|
||||
to fix Modbus binding to port 502.
|
||||
"""
|
||||
|
||||
name = "conpot"
|
||||
ports = [502, 161, 80]
|
||||
default_image = "honeynet/conpot"
|
||||
default_image = "build"
|
||||
|
||||
def compose_fragment(self, decky_name: str, log_target: str | None = None, service_cfg: dict | None = None) -> dict:
|
||||
env = {
|
||||
"CONPOT_TEMPLATE": "default",
|
||||
"NODE_NAME": decky_name,
|
||||
}
|
||||
if log_target:
|
||||
env["LOG_TARGET"] = log_target
|
||||
|
||||
return {
|
||||
"image": "honeynet/conpot",
|
||||
"build": {
|
||||
"context": str(self.dockerfile_context()),
|
||||
"args": {"BASE_IMAGE": "honeynet/conpot:latest"},
|
||||
},
|
||||
"container_name": f"{decky_name}-conpot",
|
||||
"restart": "unless-stopped",
|
||||
"environment": {
|
||||
"CONPOT_TEMPLATE": "default",
|
||||
},
|
||||
"environment": env,
|
||||
}
|
||||
|
||||
def dockerfile_context(self):
|
||||
return None
|
||||
return Path(__file__).parent.parent.parent / "templates" / "conpot"
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
from pathlib import Path
|
||||
|
||||
from decnet.services.base import BaseService
|
||||
|
||||
TEMPLATES_DIR = Path(__file__).parent.parent.parent / "templates" / "real_ssh"
|
||||
|
||||
|
||||
class RealSSHService(BaseService):
|
||||
"""
|
||||
Fully interactive OpenSSH server — no honeypot emulation.
|
||||
|
||||
Used for the deaddeck (entry-point machine). Attackers get a real shell.
|
||||
Credentials are intentionally weak to invite exploitation.
|
||||
|
||||
service_cfg keys:
|
||||
password Root password (default: "admin")
|
||||
hostname Override container hostname
|
||||
"""
|
||||
|
||||
name = "real_ssh"
|
||||
ports = [22]
|
||||
default_image = "build"
|
||||
|
||||
def compose_fragment(
|
||||
self,
|
||||
decky_name: str,
|
||||
log_target: str | None = None,
|
||||
service_cfg: dict | None = None,
|
||||
) -> dict:
|
||||
cfg = service_cfg or {}
|
||||
env: dict = {
|
||||
"SSH_ROOT_PASSWORD": cfg.get("password", "admin"),
|
||||
}
|
||||
if "hostname" in cfg:
|
||||
env["SSH_HOSTNAME"] = cfg["hostname"]
|
||||
|
||||
return {
|
||||
"build": {"context": str(TEMPLATES_DIR)},
|
||||
"container_name": f"{decky_name}-real-ssh",
|
||||
"restart": "unless-stopped",
|
||||
"cap_add": ["NET_BIND_SERVICE"],
|
||||
"environment": env,
|
||||
}
|
||||
|
||||
def dockerfile_context(self) -> Path:
|
||||
return TEMPLATES_DIR
|
||||
43
decnet/services/smtp_relay.py
Normal file
43
decnet/services/smtp_relay.py
Normal file
@@ -0,0 +1,43 @@
|
||||
from pathlib import Path
|
||||
|
||||
from decnet.services.base import BaseService
|
||||
|
||||
# Reuses the same template as the smtp service — only difference is
|
||||
# SMTP_OPEN_RELAY=1 in the environment, which enables the open relay persona.
|
||||
_TEMPLATES_DIR = Path(__file__).parent.parent.parent / "templates" / "smtp"
|
||||
|
||||
|
||||
class SMTPRelayService(BaseService):
|
||||
"""SMTP open relay bait — accepts any RCPT TO and delivers messages."""
|
||||
|
||||
name = "smtp_relay"
|
||||
ports = [25, 587]
|
||||
default_image = "build"
|
||||
|
||||
def compose_fragment(
|
||||
self,
|
||||
decky_name: str,
|
||||
log_target: str | None = None,
|
||||
service_cfg: dict | None = None,
|
||||
) -> dict:
|
||||
cfg = service_cfg or {}
|
||||
fragment: dict = {
|
||||
"build": {"context": str(_TEMPLATES_DIR)},
|
||||
"container_name": f"{decky_name}-smtp_relay",
|
||||
"restart": "unless-stopped",
|
||||
"cap_add": ["NET_BIND_SERVICE"],
|
||||
"environment": {
|
||||
"NODE_NAME": decky_name,
|
||||
"SMTP_OPEN_RELAY": "1",
|
||||
},
|
||||
}
|
||||
if log_target:
|
||||
fragment["environment"]["LOG_TARGET"] = log_target
|
||||
if "banner" in cfg:
|
||||
fragment["environment"]["SMTP_BANNER"] = cfg["banner"]
|
||||
if "mta" in cfg:
|
||||
fragment["environment"]["SMTP_MTA"] = cfg["mta"]
|
||||
return fragment
|
||||
|
||||
def dockerfile_context(self) -> Path:
|
||||
return _TEMPLATES_DIR
|
||||
@@ -1,12 +1,26 @@
|
||||
from pathlib import Path
|
||||
|
||||
from decnet.services.base import BaseService
|
||||
|
||||
TEMPLATES_DIR = Path(__file__).parent.parent.parent / "templates" / "cowrie"
|
||||
TEMPLATES_DIR = Path(__file__).parent.parent.parent / "templates" / "ssh"
|
||||
|
||||
|
||||
class SSHService(BaseService):
|
||||
"""
|
||||
Interactive OpenSSH server for general-purpose deckies.
|
||||
|
||||
Replaced Cowrie emulation with a real sshd so fingerprinting tools and
|
||||
experienced attackers cannot trivially identify the honeypot. Auth events,
|
||||
sudo activity, and interactive commands are all forwarded to stdout as
|
||||
RFC 5424 via the rsyslog bridge baked into the image.
|
||||
|
||||
service_cfg keys:
|
||||
password Root password (default: "admin")
|
||||
hostname Override container hostname
|
||||
"""
|
||||
|
||||
name = "ssh"
|
||||
ports = [22, 2222]
|
||||
ports = [22]
|
||||
default_image = "build"
|
||||
|
||||
def compose_fragment(
|
||||
@@ -17,28 +31,10 @@ class SSHService(BaseService):
|
||||
) -> dict:
|
||||
cfg = service_cfg or {}
|
||||
env: dict = {
|
||||
"NODE_NAME": decky_name,
|
||||
"COWRIE_HOSTNAME": decky_name,
|
||||
"COWRIE_HONEYPOT_LISTEN_ENDPOINTS": "tcp:22:interface=0.0.0.0 tcp:2222:interface=0.0.0.0",
|
||||
"COWRIE_SSH_LISTEN_ENDPOINTS": "tcp:22:interface=0.0.0.0 tcp:2222:interface=0.0.0.0",
|
||||
"SSH_ROOT_PASSWORD": cfg.get("password", "admin"),
|
||||
}
|
||||
if log_target:
|
||||
host, port = log_target.rsplit(":", 1)
|
||||
env["COWRIE_OUTPUT_TCP_ENABLED"] = "true"
|
||||
env["COWRIE_OUTPUT_TCP_HOST"] = host
|
||||
env["COWRIE_OUTPUT_TCP_PORT"] = port
|
||||
|
||||
# Optional persona overrides
|
||||
if "kernel_version" in cfg:
|
||||
env["COWRIE_HONEYPOT_KERNEL_VERSION"] = cfg["kernel_version"]
|
||||
if "kernel_build_string" in cfg:
|
||||
env["COWRIE_HONEYPOT_KERNEL_BUILD_STRING"] = cfg["kernel_build_string"]
|
||||
if "hardware_platform" in cfg:
|
||||
env["COWRIE_HONEYPOT_HARDWARE_PLATFORM"] = cfg["hardware_platform"]
|
||||
if "ssh_banner" in cfg:
|
||||
env["COWRIE_SSH_VERSION"] = cfg["ssh_banner"]
|
||||
if "users" in cfg:
|
||||
env["COWRIE_USERDB_ENTRIES"] = cfg["users"]
|
||||
if "hostname" in cfg:
|
||||
env["SSH_HOSTNAME"] = cfg["hostname"]
|
||||
|
||||
return {
|
||||
"build": {"context": str(TEMPLATES_DIR)},
|
||||
|
||||
@@ -1,31 +1,47 @@
|
||||
from pathlib import Path
|
||||
|
||||
from decnet.services.base import BaseService
|
||||
|
||||
TEMPLATES_DIR = Path(__file__).parent.parent.parent / "templates" / "telnet"
|
||||
|
||||
|
||||
class TelnetService(BaseService):
|
||||
"""
|
||||
Real telnetd using busybox telnetd + rsyslog logging pipeline.
|
||||
|
||||
Replaced Cowrie emulation (which also started an SSH daemon on port 22)
|
||||
with a real busybox telnetd so only port 23 is exposed and auth events
|
||||
are logged as RFC 5424 via the same rsyslog bridge used by the SSH service.
|
||||
|
||||
service_cfg keys:
|
||||
password Root password (default: "admin")
|
||||
hostname Override container hostname
|
||||
"""
|
||||
|
||||
name = "telnet"
|
||||
ports = [23]
|
||||
default_image = "cowrie/cowrie"
|
||||
default_image = "build"
|
||||
|
||||
def compose_fragment(self, decky_name: str, log_target: str | None = None, service_cfg: dict | None = None) -> dict:
|
||||
def compose_fragment(
|
||||
self,
|
||||
decky_name: str,
|
||||
log_target: str | None = None,
|
||||
service_cfg: dict | None = None,
|
||||
) -> dict:
|
||||
cfg = service_cfg or {}
|
||||
env: dict = {
|
||||
"COWRIE_HONEYPOT_HOSTNAME": decky_name,
|
||||
"COWRIE_TELNET_ENABLED": "true",
|
||||
"COWRIE_TELNET_LISTEN_ENDPOINTS": "tcp:23:interface=0.0.0.0",
|
||||
# Disable SSH so this container is telnet-only
|
||||
"COWRIE_SSH_ENABLED": "false",
|
||||
"TELNET_ROOT_PASSWORD": cfg.get("password", "admin"),
|
||||
}
|
||||
if log_target:
|
||||
host, port = log_target.rsplit(":", 1)
|
||||
env["COWRIE_OUTPUT_TCP_ENABLED"] = "true"
|
||||
env["COWRIE_OUTPUT_TCP_HOST"] = host
|
||||
env["COWRIE_OUTPUT_TCP_PORT"] = port
|
||||
if "hostname" in cfg:
|
||||
env["TELNET_HOSTNAME"] = cfg["hostname"]
|
||||
|
||||
return {
|
||||
"image": "cowrie/cowrie",
|
||||
"build": {"context": str(TEMPLATES_DIR)},
|
||||
"container_name": f"{decky_name}-telnet",
|
||||
"restart": "unless-stopped",
|
||||
"cap_add": ["NET_BIND_SERVICE"],
|
||||
"environment": env,
|
||||
}
|
||||
|
||||
def dockerfile_context(self):
|
||||
return None
|
||||
def dockerfile_context(self) -> Path:
|
||||
return TEMPLATES_DIR
|
||||
|
||||
72
decnet/web/api.py
Normal file
72
decnet/web/api.py
Normal file
@@ -0,0 +1,72 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
from contextlib import asynccontextmanager
|
||||
from typing import Any, AsyncGenerator, Optional
|
||||
|
||||
from fastapi import FastAPI
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
|
||||
from decnet.env import DECNET_CORS_ORIGINS, DECNET_DEVELOPER, DECNET_INGEST_LOG_FILE
|
||||
from decnet.web.dependencies import repo
|
||||
from decnet.collector import log_collector_worker
|
||||
from decnet.web.ingester import log_ingestion_worker
|
||||
from decnet.web.router import api_router
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
ingestion_task: Optional[asyncio.Task[Any]] = None
|
||||
collector_task: Optional[asyncio.Task[Any]] = None
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]:
|
||||
global ingestion_task, collector_task
|
||||
|
||||
for attempt in range(1, 6):
|
||||
try:
|
||||
await repo.initialize()
|
||||
break
|
||||
except Exception as exc:
|
||||
log.warning("DB init attempt %d/5 failed: %s", attempt, exc)
|
||||
if attempt == 5:
|
||||
log.error("DB failed to initialize after 5 attempts — startup may be degraded")
|
||||
await asyncio.sleep(0.5)
|
||||
|
||||
# Start background ingestion task
|
||||
if ingestion_task is None or ingestion_task.done():
|
||||
ingestion_task = asyncio.create_task(log_ingestion_worker(repo))
|
||||
|
||||
# Start Docker log collector (writes to log file; ingester reads from it)
|
||||
_log_file = os.environ.get("DECNET_INGEST_LOG_FILE", DECNET_INGEST_LOG_FILE)
|
||||
if _log_file and (collector_task is None or collector_task.done()):
|
||||
collector_task = asyncio.create_task(log_collector_worker(_log_file))
|
||||
else:
|
||||
log.warning("DECNET_INGEST_LOG_FILE not set — Docker log collection disabled.")
|
||||
|
||||
yield
|
||||
|
||||
# Shutdown background tasks
|
||||
for task in (ingestion_task, collector_task):
|
||||
if task:
|
||||
task.cancel()
|
||||
|
||||
|
||||
app: FastAPI = FastAPI(
|
||||
title="DECNET Web Dashboard API",
|
||||
version="1.0.0",
|
||||
lifespan=lifespan,
|
||||
docs_url="/docs" if DECNET_DEVELOPER else None,
|
||||
redoc_url="/redoc" if DECNET_DEVELOPER else None,
|
||||
openapi_url="/openapi.json" if DECNET_DEVELOPER else None
|
||||
)
|
||||
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=DECNET_CORS_ORIGINS,
|
||||
allow_credentials=False,
|
||||
allow_methods=["GET", "POST", "PUT", "DELETE", "OPTIONS"],
|
||||
allow_headers=["Authorization", "Content-Type", "Last-Event-ID"],
|
||||
)
|
||||
|
||||
# Include the modular API router
|
||||
app.include_router(api_router, prefix="/api/v1")
|
||||
38
decnet/web/auth.py
Normal file
38
decnet/web/auth.py
Normal file
@@ -0,0 +1,38 @@
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import Optional, Any
|
||||
import jwt
|
||||
import bcrypt
|
||||
|
||||
from decnet.env import DECNET_JWT_SECRET
|
||||
|
||||
SECRET_KEY: str = DECNET_JWT_SECRET
|
||||
ALGORITHM: str = "HS256"
|
||||
ACCESS_TOKEN_EXPIRE_MINUTES: int = 1440
|
||||
|
||||
|
||||
def verify_password(plain_password: str, hashed_password: str) -> bool:
|
||||
return bcrypt.checkpw(
|
||||
plain_password.encode("utf-8")[:72],
|
||||
hashed_password.encode("utf-8")
|
||||
)
|
||||
|
||||
|
||||
def get_password_hash(password: str) -> str:
|
||||
# Use a cost factor of 12 (default for passlib/bcrypt)
|
||||
_salt: bytes = bcrypt.gensalt(rounds=12)
|
||||
_hashed: bytes = bcrypt.hashpw(password.encode("utf-8")[:72], _salt)
|
||||
return _hashed.decode("utf-8")
|
||||
|
||||
|
||||
def create_access_token(data: dict[str, Any], expires_delta: Optional[timedelta] = None) -> str:
|
||||
_to_encode: dict[str, Any] = data.copy()
|
||||
_expire: datetime
|
||||
if expires_delta:
|
||||
_expire = datetime.now(timezone.utc) + expires_delta
|
||||
else:
|
||||
_expire = datetime.now(timezone.utc) + timedelta(minutes=15)
|
||||
|
||||
_to_encode.update({"exp": _expire})
|
||||
_to_encode.update({"iat": datetime.now(timezone.utc)})
|
||||
_encoded_jwt: str = jwt.encode(_to_encode, SECRET_KEY, algorithm=ALGORITHM)
|
||||
return _encoded_jwt
|
||||
75
decnet/web/db/models.py
Normal file
75
decnet/web/db/models.py
Normal file
@@ -0,0 +1,75 @@
|
||||
from datetime import datetime, timezone
|
||||
from typing import Optional, Any, List
|
||||
from sqlmodel import SQLModel, Field
|
||||
from pydantic import BaseModel, Field as PydanticField
|
||||
|
||||
# --- Database Tables (SQLModel) ---
|
||||
|
||||
class User(SQLModel, table=True):
|
||||
__tablename__ = "users"
|
||||
uuid: str = Field(primary_key=True)
|
||||
username: str = Field(index=True, unique=True)
|
||||
password_hash: str
|
||||
role: str = Field(default="viewer")
|
||||
must_change_password: bool = Field(default=False)
|
||||
|
||||
class Log(SQLModel, table=True):
|
||||
__tablename__ = "logs"
|
||||
id: Optional[int] = Field(default=None, primary_key=True)
|
||||
timestamp: datetime = Field(default_factory=lambda: datetime.now(timezone.utc), index=True)
|
||||
decky: str = Field(index=True)
|
||||
service: str = Field(index=True)
|
||||
event_type: str = Field(index=True)
|
||||
attacker_ip: str = Field(index=True)
|
||||
raw_line: str
|
||||
fields: str
|
||||
msg: Optional[str] = None
|
||||
|
||||
class Bounty(SQLModel, table=True):
|
||||
__tablename__ = "bounty"
|
||||
id: Optional[int] = Field(default=None, primary_key=True)
|
||||
timestamp: datetime = Field(default_factory=lambda: datetime.now(timezone.utc), index=True)
|
||||
decky: str = Field(index=True)
|
||||
service: str = Field(index=True)
|
||||
attacker_ip: str = Field(index=True)
|
||||
bounty_type: str = Field(index=True)
|
||||
payload: str
|
||||
|
||||
# --- API Request/Response Models (Pydantic) ---
|
||||
|
||||
class Token(BaseModel):
|
||||
access_token: str
|
||||
token_type: str
|
||||
must_change_password: bool = False
|
||||
|
||||
class LoginRequest(BaseModel):
|
||||
username: str
|
||||
password: str = PydanticField(..., max_length=72)
|
||||
|
||||
class ChangePasswordRequest(BaseModel):
|
||||
old_password: str = PydanticField(..., max_length=72)
|
||||
new_password: str = PydanticField(..., max_length=72)
|
||||
|
||||
class LogsResponse(BaseModel):
|
||||
total: int
|
||||
limit: int
|
||||
offset: int
|
||||
data: List[dict[str, Any]]
|
||||
|
||||
class BountyResponse(BaseModel):
|
||||
total: int
|
||||
limit: int
|
||||
offset: int
|
||||
data: List[dict[str, Any]]
|
||||
|
||||
class StatsResponse(BaseModel):
|
||||
total_logs: int
|
||||
unique_attackers: int
|
||||
active_deckies: int
|
||||
deployed_deckies: int
|
||||
|
||||
class MutateIntervalRequest(BaseModel):
|
||||
mutate_interval: Optional[int] = None
|
||||
|
||||
class DeployIniRequest(BaseModel):
|
||||
ini_content: str = PydanticField(..., min_length=5, max_length=512 * 1024)
|
||||
82
decnet/web/db/repository.py
Normal file
82
decnet/web/db/repository.py
Normal file
@@ -0,0 +1,82 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Optional
|
||||
|
||||
|
||||
class BaseRepository(ABC):
|
||||
"""Abstract base class for DECNET web dashboard data storage."""
|
||||
|
||||
@abstractmethod
|
||||
async def initialize(self) -> None:
|
||||
"""Initialize the database schema."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def add_log(self, log_data: dict[str, Any]) -> None:
|
||||
"""Add a new log entry to the database."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_logs(
|
||||
self,
|
||||
limit: int = 50,
|
||||
offset: int = 0,
|
||||
search: Optional[str] = None
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Retrieve paginated log entries."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_total_logs(self, search: Optional[str] = None) -> int:
|
||||
"""Retrieve the total count of logs, optionally filtered by search."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_stats_summary(self) -> dict[str, Any]:
|
||||
"""Retrieve high-level dashboard metrics."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_deckies(self) -> list[dict[str, Any]]:
|
||||
"""Retrieve the list of currently deployed deckies."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_user_by_username(self, username: str) -> Optional[dict[str, Any]]:
|
||||
"""Retrieve a user by their username."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_user_by_uuid(self, uuid: str) -> Optional[dict[str, Any]]:
|
||||
"""Retrieve a user by their UUID."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def create_user(self, user_data: dict[str, Any]) -> None:
|
||||
"""Create a new dashboard user."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def update_user_password(self, uuid: str, password_hash: str, must_change_password: bool = False) -> None:
|
||||
"""Update a user's password and change the must_change_password flag."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def add_bounty(self, bounty_data: dict[str, Any]) -> None:
|
||||
"""Add a new harvested artifact (bounty) to the database."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_bounties(
|
||||
self,
|
||||
limit: int = 50,
|
||||
offset: int = 0,
|
||||
bounty_type: Optional[str] = None,
|
||||
search: Optional[str] = None
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Retrieve paginated bounty entries."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def get_total_bounties(self, bounty_type: Optional[str] = None, search: Optional[str] = None) -> int:
|
||||
"""Retrieve the total count of bounties, optionally filtered."""
|
||||
pass
|
||||
33
decnet/web/db/sqlite/database.py
Normal file
33
decnet/web/db/sqlite/database.py
Normal file
@@ -0,0 +1,33 @@
|
||||
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession, async_sessionmaker
|
||||
from sqlalchemy import create_engine
|
||||
from sqlmodel import SQLModel
|
||||
|
||||
# We need both sync and async engines for SQLite
|
||||
# Sync for initialization (DDL) and async for standard queries
|
||||
|
||||
def get_async_engine(db_path: str):
|
||||
# If it's a memory URI, don't add the extra slash that turns it into a relative file
|
||||
prefix = "sqlite+aiosqlite:///"
|
||||
if db_path.startswith("file:"):
|
||||
prefix = "sqlite+aiosqlite:///"
|
||||
return create_async_engine(f"{prefix}{db_path}", echo=False, connect_args={"uri": True})
|
||||
|
||||
def get_sync_engine(db_path: str):
|
||||
prefix = "sqlite:///"
|
||||
return create_engine(f"{prefix}{db_path}", echo=False, connect_args={"uri": True})
|
||||
|
||||
def init_db(db_path: str):
|
||||
"""Synchronously create all tables."""
|
||||
engine = get_sync_engine(db_path)
|
||||
# Ensure WAL mode is set
|
||||
with engine.connect() as conn:
|
||||
conn.exec_driver_sql("PRAGMA journal_mode=WAL")
|
||||
conn.exec_driver_sql("PRAGMA synchronous=NORMAL")
|
||||
SQLModel.metadata.create_all(engine)
|
||||
|
||||
async def get_session(engine) -> AsyncSession:
|
||||
async_session = async_sessionmaker(
|
||||
engine, class_=AsyncSession, expire_on_commit=False
|
||||
)
|
||||
async with async_session() as session:
|
||||
yield session
|
||||
352
decnet/web/db/sqlite/repository.py
Normal file
352
decnet/web/db/sqlite/repository.py
Normal file
@@ -0,0 +1,352 @@
|
||||
import asyncio
|
||||
import json
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
from typing import Any, Optional, List
|
||||
|
||||
from sqlalchemy import func, select, desc, asc, text, or_, update, literal_column
|
||||
from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker
|
||||
|
||||
from decnet.config import load_state, _ROOT
|
||||
from decnet.env import DECNET_ADMIN_USER, DECNET_ADMIN_PASSWORD
|
||||
from decnet.web.auth import get_password_hash
|
||||
from decnet.web.db.repository import BaseRepository
|
||||
from decnet.web.db.models import User, Log, Bounty
|
||||
from decnet.web.db.sqlite.database import get_async_engine, init_db
|
||||
|
||||
|
||||
class SQLiteRepository(BaseRepository):
|
||||
"""SQLite implementation using SQLModel and SQLAlchemy Async."""
|
||||
|
||||
def __init__(self, db_path: str = str(_ROOT / "decnet.db")) -> None:
|
||||
self.db_path = db_path
|
||||
self.engine = get_async_engine(db_path)
|
||||
self.session_factory = async_sessionmaker(
|
||||
self.engine, class_=AsyncSession, expire_on_commit=False
|
||||
)
|
||||
self._initialize_sync()
|
||||
|
||||
def _initialize_sync(self) -> None:
|
||||
"""Initialize the database schema synchronously."""
|
||||
init_db(self.db_path)
|
||||
|
||||
from decnet.web.db.sqlite.database import get_sync_engine
|
||||
engine = get_sync_engine(self.db_path)
|
||||
with engine.connect() as conn:
|
||||
conn.execute(
|
||||
text(
|
||||
"INSERT OR IGNORE INTO users (uuid, username, password_hash, role, must_change_password) "
|
||||
"VALUES (:uuid, :u, :p, :r, :m)"
|
||||
),
|
||||
{
|
||||
"uuid": str(uuid.uuid4()),
|
||||
"u": DECNET_ADMIN_USER,
|
||||
"p": get_password_hash(DECNET_ADMIN_PASSWORD),
|
||||
"r": "admin",
|
||||
"m": 1,
|
||||
},
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
async def initialize(self) -> None:
|
||||
"""Async warm-up / verification."""
|
||||
async with self.session_factory() as session:
|
||||
await session.execute(text("SELECT 1"))
|
||||
|
||||
async def reinitialize(self) -> None:
|
||||
"""Initialize the database schema asynchronously (useful for tests)."""
|
||||
from sqlmodel import SQLModel
|
||||
async with self.engine.begin() as conn:
|
||||
await conn.run_sync(SQLModel.metadata.create_all)
|
||||
|
||||
async with self.session_factory() as session:
|
||||
result = await session.execute(
|
||||
select(User).where(User.username == DECNET_ADMIN_USER)
|
||||
)
|
||||
if not result.scalar_one_or_none():
|
||||
session.add(User(
|
||||
uuid=str(uuid.uuid4()),
|
||||
username=DECNET_ADMIN_USER,
|
||||
password_hash=get_password_hash(DECNET_ADMIN_PASSWORD),
|
||||
role="admin",
|
||||
must_change_password=True,
|
||||
))
|
||||
await session.commit()
|
||||
|
||||
# ------------------------------------------------------------------ logs
|
||||
|
||||
async def add_log(self, log_data: dict[str, Any]) -> None:
|
||||
data = log_data.copy()
|
||||
if "fields" in data and isinstance(data["fields"], dict):
|
||||
data["fields"] = json.dumps(data["fields"])
|
||||
if "timestamp" in data and isinstance(data["timestamp"], str):
|
||||
try:
|
||||
data["timestamp"] = datetime.fromisoformat(
|
||||
data["timestamp"].replace("Z", "+00:00")
|
||||
)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
async with self.session_factory() as session:
|
||||
session.add(Log(**data))
|
||||
await session.commit()
|
||||
|
||||
def _apply_filters(
|
||||
self,
|
||||
statement,
|
||||
search: Optional[str],
|
||||
start_time: Optional[str],
|
||||
end_time: Optional[str],
|
||||
):
|
||||
import re
|
||||
import shlex
|
||||
|
||||
if start_time:
|
||||
statement = statement.where(Log.timestamp >= start_time)
|
||||
if end_time:
|
||||
statement = statement.where(Log.timestamp <= end_time)
|
||||
|
||||
if search:
|
||||
try:
|
||||
tokens = shlex.split(search)
|
||||
except ValueError:
|
||||
tokens = search.split()
|
||||
|
||||
core_fields = {
|
||||
"decky": Log.decky,
|
||||
"service": Log.service,
|
||||
"event": Log.event_type,
|
||||
"attacker": Log.attacker_ip,
|
||||
"attacker-ip": Log.attacker_ip,
|
||||
"attacker_ip": Log.attacker_ip,
|
||||
}
|
||||
|
||||
for token in tokens:
|
||||
if ":" in token:
|
||||
key, val = token.split(":", 1)
|
||||
if key in core_fields:
|
||||
statement = statement.where(core_fields[key] == val)
|
||||
else:
|
||||
key_safe = re.sub(r"[^a-zA-Z0-9_]", "", key)
|
||||
statement = statement.where(
|
||||
text(f"json_extract(fields, '$.{key_safe}') = :val")
|
||||
).params(val=val)
|
||||
else:
|
||||
lk = f"%{token}%"
|
||||
statement = statement.where(
|
||||
or_(
|
||||
Log.raw_line.like(lk),
|
||||
Log.decky.like(lk),
|
||||
Log.service.like(lk),
|
||||
Log.attacker_ip.like(lk),
|
||||
)
|
||||
)
|
||||
return statement
|
||||
|
||||
async def get_logs(
|
||||
self,
|
||||
limit: int = 50,
|
||||
offset: int = 0,
|
||||
search: Optional[str] = None,
|
||||
start_time: Optional[str] = None,
|
||||
end_time: Optional[str] = None,
|
||||
) -> List[dict]:
|
||||
statement = (
|
||||
select(Log)
|
||||
.order_by(desc(Log.timestamp))
|
||||
.offset(offset)
|
||||
.limit(limit)
|
||||
)
|
||||
statement = self._apply_filters(statement, search, start_time, end_time)
|
||||
|
||||
async with self.session_factory() as session:
|
||||
results = await session.execute(statement)
|
||||
return [log.model_dump(mode='json') for log in results.scalars().all()]
|
||||
|
||||
async def get_max_log_id(self) -> int:
|
||||
async with self.session_factory() as session:
|
||||
result = await session.execute(select(func.max(Log.id)))
|
||||
val = result.scalar()
|
||||
return val if val is not None else 0
|
||||
|
||||
async def get_logs_after_id(
|
||||
self,
|
||||
last_id: int,
|
||||
limit: int = 50,
|
||||
search: Optional[str] = None,
|
||||
start_time: Optional[str] = None,
|
||||
end_time: Optional[str] = None,
|
||||
) -> List[dict]:
|
||||
statement = (
|
||||
select(Log).where(Log.id > last_id).order_by(asc(Log.id)).limit(limit)
|
||||
)
|
||||
statement = self._apply_filters(statement, search, start_time, end_time)
|
||||
|
||||
async with self.session_factory() as session:
|
||||
results = await session.execute(statement)
|
||||
return [log.model_dump(mode='json') for log in results.scalars().all()]
|
||||
|
||||
async def get_total_logs(
|
||||
self,
|
||||
search: Optional[str] = None,
|
||||
start_time: Optional[str] = None,
|
||||
end_time: Optional[str] = None,
|
||||
) -> int:
|
||||
statement = select(func.count()).select_from(Log)
|
||||
statement = self._apply_filters(statement, search, start_time, end_time)
|
||||
|
||||
async with self.session_factory() as session:
|
||||
result = await session.execute(statement)
|
||||
return result.scalar() or 0
|
||||
|
||||
async def get_log_histogram(
|
||||
self,
|
||||
search: Optional[str] = None,
|
||||
start_time: Optional[str] = None,
|
||||
end_time: Optional[str] = None,
|
||||
interval_minutes: int = 15,
|
||||
) -> List[dict]:
|
||||
bucket_seconds = interval_minutes * 60
|
||||
bucket_expr = literal_column(
|
||||
f"datetime((strftime('%s', timestamp) / {bucket_seconds}) * {bucket_seconds}, 'unixepoch')"
|
||||
).label("bucket_time")
|
||||
|
||||
statement = select(bucket_expr, func.count().label("count")).select_from(Log)
|
||||
statement = self._apply_filters(statement, search, start_time, end_time)
|
||||
statement = statement.group_by(literal_column("bucket_time")).order_by(
|
||||
literal_column("bucket_time")
|
||||
)
|
||||
|
||||
async with self.session_factory() as session:
|
||||
results = await session.execute(statement)
|
||||
return [{"time": r[0], "count": r[1]} for r in results.all()]
|
||||
|
||||
async def get_stats_summary(self) -> dict[str, Any]:
|
||||
async with self.session_factory() as session:
|
||||
total_logs = (
|
||||
await session.execute(select(func.count()).select_from(Log))
|
||||
).scalar() or 0
|
||||
unique_attackers = (
|
||||
await session.execute(
|
||||
select(func.count(func.distinct(Log.attacker_ip)))
|
||||
)
|
||||
).scalar() or 0
|
||||
active_deckies = (
|
||||
await session.execute(
|
||||
select(func.count(func.distinct(Log.decky)))
|
||||
)
|
||||
).scalar() or 0
|
||||
|
||||
_state = await asyncio.to_thread(load_state)
|
||||
deployed_deckies = len(_state[0].deckies) if _state else 0
|
||||
|
||||
return {
|
||||
"total_logs": total_logs,
|
||||
"unique_attackers": unique_attackers,
|
||||
"active_deckies": active_deckies,
|
||||
"deployed_deckies": deployed_deckies,
|
||||
}
|
||||
|
||||
async def get_deckies(self) -> List[dict]:
|
||||
_state = await asyncio.to_thread(load_state)
|
||||
return [_d.model_dump() for _d in _state[0].deckies] if _state else []
|
||||
|
||||
# ------------------------------------------------------------------ users
|
||||
|
||||
async def get_user_by_username(self, username: str) -> Optional[dict]:
|
||||
async with self.session_factory() as session:
|
||||
result = await session.execute(
|
||||
select(User).where(User.username == username)
|
||||
)
|
||||
user = result.scalar_one_or_none()
|
||||
return user.model_dump() if user else None
|
||||
|
||||
async def get_user_by_uuid(self, uuid: str) -> Optional[dict]:
|
||||
async with self.session_factory() as session:
|
||||
result = await session.execute(
|
||||
select(User).where(User.uuid == uuid)
|
||||
)
|
||||
user = result.scalar_one_or_none()
|
||||
return user.model_dump() if user else None
|
||||
|
||||
async def create_user(self, user_data: dict[str, Any]) -> None:
|
||||
async with self.session_factory() as session:
|
||||
session.add(User(**user_data))
|
||||
await session.commit()
|
||||
|
||||
async def update_user_password(
|
||||
self, uuid: str, password_hash: str, must_change_password: bool = False
|
||||
) -> None:
|
||||
async with self.session_factory() as session:
|
||||
await session.execute(
|
||||
update(User)
|
||||
.where(User.uuid == uuid)
|
||||
.values(
|
||||
password_hash=password_hash,
|
||||
must_change_password=must_change_password,
|
||||
)
|
||||
)
|
||||
await session.commit()
|
||||
|
||||
# ---------------------------------------------------------------- bounties
|
||||
|
||||
async def add_bounty(self, bounty_data: dict[str, Any]) -> None:
|
||||
data = bounty_data.copy()
|
||||
if "payload" in data and isinstance(data["payload"], dict):
|
||||
data["payload"] = json.dumps(data["payload"])
|
||||
|
||||
async with self.session_factory() as session:
|
||||
session.add(Bounty(**data))
|
||||
await session.commit()
|
||||
|
||||
def _apply_bounty_filters(self, statement, bounty_type: Optional[str], search: Optional[str]):
|
||||
if bounty_type:
|
||||
statement = statement.where(Bounty.bounty_type == bounty_type)
|
||||
if search:
|
||||
lk = f"%{search}%"
|
||||
statement = statement.where(
|
||||
or_(
|
||||
Bounty.decky.like(lk),
|
||||
Bounty.service.like(lk),
|
||||
Bounty.attacker_ip.like(lk),
|
||||
Bounty.payload.like(lk),
|
||||
)
|
||||
)
|
||||
return statement
|
||||
|
||||
async def get_bounties(
|
||||
self,
|
||||
limit: int = 50,
|
||||
offset: int = 0,
|
||||
bounty_type: Optional[str] = None,
|
||||
search: Optional[str] = None,
|
||||
) -> List[dict]:
|
||||
statement = (
|
||||
select(Bounty)
|
||||
.order_by(desc(Bounty.timestamp))
|
||||
.offset(offset)
|
||||
.limit(limit)
|
||||
)
|
||||
statement = self._apply_bounty_filters(statement, bounty_type, search)
|
||||
|
||||
async with self.session_factory() as session:
|
||||
results = await session.execute(statement)
|
||||
final = []
|
||||
for item in results.scalars().all():
|
||||
d = item.model_dump(mode='json')
|
||||
try:
|
||||
d["payload"] = json.loads(d["payload"])
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
pass
|
||||
final.append(d)
|
||||
return final
|
||||
|
||||
async def get_total_bounties(
|
||||
self, bounty_type: Optional[str] = None, search: Optional[str] = None
|
||||
) -> int:
|
||||
statement = select(func.count()).select_from(Bounty)
|
||||
statement = self._apply_bounty_filters(statement, bounty_type, search)
|
||||
|
||||
async with self.session_factory() as session:
|
||||
result = await session.execute(statement)
|
||||
return result.scalar() or 0
|
||||
73
decnet/web/dependencies.py
Normal file
73
decnet/web/dependencies.py
Normal file
@@ -0,0 +1,73 @@
|
||||
from typing import Any, Optional
|
||||
from pathlib import Path
|
||||
|
||||
import jwt
|
||||
from fastapi import HTTPException, status, Request
|
||||
from fastapi.security import OAuth2PasswordBearer
|
||||
|
||||
from decnet.web.auth import ALGORITHM, SECRET_KEY
|
||||
from decnet.web.db.sqlite.repository import SQLiteRepository
|
||||
|
||||
# Root directory for database
|
||||
_ROOT_DIR = Path(__file__).parent.parent.parent.absolute()
|
||||
DB_PATH = _ROOT_DIR / "decnet.db"
|
||||
|
||||
# Shared repository instance
|
||||
repo = SQLiteRepository(db_path=str(DB_PATH))
|
||||
|
||||
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/api/v1/auth/login")
|
||||
|
||||
|
||||
async def get_stream_user(request: Request, token: Optional[str] = None) -> str:
|
||||
"""Auth dependency for SSE endpoints — accepts Bearer header OR ?token= query param.
|
||||
EventSource does not support custom headers, so the query-string fallback is intentional here only.
|
||||
"""
|
||||
_credentials_exception = HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Could not validate credentials",
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
)
|
||||
|
||||
auth_header = request.headers.get("Authorization")
|
||||
resolved: str | None = (
|
||||
auth_header.split(" ", 1)[1]
|
||||
if auth_header and auth_header.startswith("Bearer ")
|
||||
else token
|
||||
)
|
||||
if not resolved:
|
||||
raise _credentials_exception
|
||||
|
||||
try:
|
||||
_payload: dict[str, Any] = jwt.decode(resolved, SECRET_KEY, algorithms=[ALGORITHM])
|
||||
_user_uuid: Optional[str] = _payload.get("uuid")
|
||||
if _user_uuid is None:
|
||||
raise _credentials_exception
|
||||
return _user_uuid
|
||||
except jwt.PyJWTError:
|
||||
raise _credentials_exception
|
||||
|
||||
|
||||
async def get_current_user(request: Request) -> str:
|
||||
_credentials_exception = HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Could not validate credentials",
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
)
|
||||
|
||||
auth_header = request.headers.get("Authorization")
|
||||
token: str | None = (
|
||||
auth_header.split(" ", 1)[1]
|
||||
if auth_header and auth_header.startswith("Bearer ")
|
||||
else None
|
||||
)
|
||||
if not token:
|
||||
raise _credentials_exception
|
||||
|
||||
try:
|
||||
_payload: dict[str, Any] = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
|
||||
_user_uuid: Optional[str] = _payload.get("uuid")
|
||||
if _user_uuid is None:
|
||||
raise _credentials_exception
|
||||
return _user_uuid
|
||||
except jwt.PyJWTError:
|
||||
raise _credentials_exception
|
||||
94
decnet/web/ingester.py
Normal file
94
decnet/web/ingester.py
Normal file
@@ -0,0 +1,94 @@
|
||||
import asyncio
|
||||
import os
|
||||
import logging
|
||||
import json
|
||||
from typing import Any
|
||||
from pathlib import Path
|
||||
|
||||
from decnet.web.db.repository import BaseRepository
|
||||
|
||||
logger: logging.Logger = logging.getLogger("decnet.web.ingester")
|
||||
|
||||
async def log_ingestion_worker(repo: BaseRepository) -> None:
|
||||
"""
|
||||
Background task that tails the DECNET_INGEST_LOG_FILE.json and
|
||||
inserts structured JSON logs into the SQLite repository.
|
||||
"""
|
||||
_base_log_file: str | None = os.environ.get("DECNET_INGEST_LOG_FILE")
|
||||
if not _base_log_file:
|
||||
logger.warning("DECNET_INGEST_LOG_FILE not set. Log ingestion disabled.")
|
||||
return
|
||||
|
||||
_json_log_path: Path = Path(_base_log_file).with_suffix(".json")
|
||||
_position: int = 0
|
||||
|
||||
logger.info(f"Starting JSON log ingestion from {_json_log_path}")
|
||||
|
||||
while True:
|
||||
try:
|
||||
if not _json_log_path.exists():
|
||||
await asyncio.sleep(2)
|
||||
continue
|
||||
|
||||
_stat: os.stat_result = _json_log_path.stat()
|
||||
if _stat.st_size < _position:
|
||||
# File rotated or truncated
|
||||
_position = 0
|
||||
|
||||
if _stat.st_size == _position:
|
||||
# No new data
|
||||
await asyncio.sleep(1)
|
||||
continue
|
||||
|
||||
with open(_json_log_path, "r", encoding="utf-8", errors="replace") as _f:
|
||||
_f.seek(_position)
|
||||
while True:
|
||||
_line: str = _f.readline()
|
||||
if not _line:
|
||||
break # EOF reached
|
||||
|
||||
if not _line.endswith('\n'):
|
||||
# Partial line read, don't process yet, don't advance position
|
||||
break
|
||||
|
||||
try:
|
||||
_log_data: dict[str, Any] = json.loads(_line.strip())
|
||||
await repo.add_log(_log_data)
|
||||
await _extract_bounty(repo, _log_data)
|
||||
except json.JSONDecodeError:
|
||||
logger.error(f"Failed to decode JSON log line: {_line}")
|
||||
continue
|
||||
|
||||
# Update position after successful line read
|
||||
_position = _f.tell()
|
||||
|
||||
except Exception as _e:
|
||||
logger.error(f"Error in log ingestion worker: {_e}")
|
||||
await asyncio.sleep(5)
|
||||
|
||||
await asyncio.sleep(1)
|
||||
|
||||
|
||||
async def _extract_bounty(repo: BaseRepository, log_data: dict[str, Any]) -> None:
|
||||
"""Detect and extract valuable artifacts (bounties) from log entries."""
|
||||
_fields = log_data.get("fields")
|
||||
if not isinstance(_fields, dict):
|
||||
return
|
||||
|
||||
# 1. Credentials (User/Pass)
|
||||
_user = _fields.get("username")
|
||||
_pass = _fields.get("password")
|
||||
|
||||
if _user and _pass:
|
||||
await repo.add_bounty({
|
||||
"decky": log_data.get("decky"),
|
||||
"service": log_data.get("service"),
|
||||
"attacker_ip": log_data.get("attacker_ip"),
|
||||
"bounty_type": "credential",
|
||||
"payload": {
|
||||
"username": _user,
|
||||
"password": _pass
|
||||
}
|
||||
})
|
||||
|
||||
# 2. Add more extractors here later (e.g. file hashes, crypto keys)
|
||||
36
decnet/web/router/__init__.py
Normal file
36
decnet/web/router/__init__.py
Normal file
@@ -0,0 +1,36 @@
|
||||
from fastapi import APIRouter
|
||||
|
||||
from .auth.api_login import router as login_router
|
||||
from .auth.api_change_pass import router as change_pass_router
|
||||
from .logs.api_get_logs import router as logs_router
|
||||
from .logs.api_get_histogram import router as histogram_router
|
||||
from .bounty.api_get_bounties import router as bounty_router
|
||||
from .stats.api_get_stats import router as stats_router
|
||||
from .fleet.api_get_deckies import router as get_deckies_router
|
||||
from .fleet.api_mutate_decky import router as mutate_decky_router
|
||||
from .fleet.api_mutate_interval import router as mutate_interval_router
|
||||
from .fleet.api_deploy_deckies import router as deploy_deckies_router
|
||||
from .stream.api_stream_events import router as stream_router
|
||||
|
||||
api_router = APIRouter()
|
||||
|
||||
# Authentication
|
||||
api_router.include_router(login_router)
|
||||
api_router.include_router(change_pass_router)
|
||||
|
||||
# Logs & Analytics
|
||||
api_router.include_router(logs_router)
|
||||
api_router.include_router(histogram_router)
|
||||
|
||||
# Bounty Vault
|
||||
api_router.include_router(bounty_router)
|
||||
|
||||
# Fleet Management
|
||||
api_router.include_router(get_deckies_router)
|
||||
api_router.include_router(mutate_decky_router)
|
||||
api_router.include_router(mutate_interval_router)
|
||||
api_router.include_router(deploy_deckies_router)
|
||||
|
||||
# Observability
|
||||
api_router.include_router(stats_router)
|
||||
api_router.include_router(stream_router)
|
||||
27
decnet/web/router/auth/api_change_pass.py
Normal file
27
decnet/web/router/auth/api_change_pass.py
Normal file
@@ -0,0 +1,27 @@
|
||||
from typing import Any, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, status
|
||||
|
||||
from decnet.web.auth import get_password_hash, verify_password
|
||||
from decnet.web.dependencies import get_current_user, repo
|
||||
from decnet.web.db.models import ChangePasswordRequest
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post(
|
||||
"/auth/change-password",
|
||||
tags=["Authentication"],
|
||||
responses={401: {"description": "Invalid or expired token / wrong old password"}, 422: {"description": "Validation error"}},
|
||||
)
|
||||
async def change_password(request: ChangePasswordRequest, current_user: str = Depends(get_current_user)) -> dict[str, str]:
|
||||
_user: Optional[dict[str, Any]] = await repo.get_user_by_uuid(current_user)
|
||||
if not _user or not verify_password(request.old_password, _user["password_hash"]):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Incorrect old password",
|
||||
)
|
||||
|
||||
_new_hash: str = get_password_hash(request.new_password)
|
||||
await repo.update_user_password(current_user, _new_hash, must_change_password=False)
|
||||
return {"message": "Password updated successfully"}
|
||||
41
decnet/web/router/auth/api_login.py
Normal file
41
decnet/web/router/auth/api_login.py
Normal file
@@ -0,0 +1,41 @@
|
||||
from datetime import timedelta
|
||||
from typing import Any, Optional
|
||||
|
||||
from fastapi import APIRouter, HTTPException, status
|
||||
|
||||
from decnet.web.auth import (
|
||||
ACCESS_TOKEN_EXPIRE_MINUTES,
|
||||
create_access_token,
|
||||
verify_password,
|
||||
)
|
||||
from decnet.web.dependencies import repo
|
||||
from decnet.web.db.models import LoginRequest, Token
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post(
|
||||
"/auth/login",
|
||||
response_model=Token,
|
||||
tags=["Authentication"],
|
||||
responses={401: {"description": "Incorrect username or password"}, 422: {"description": "Validation error"}},
|
||||
)
|
||||
async def login(request: LoginRequest) -> dict[str, Any]:
|
||||
_user: Optional[dict[str, Any]] = await repo.get_user_by_username(request.username)
|
||||
if not _user or not verify_password(request.password, _user["password_hash"]):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Incorrect username or password",
|
||||
headers={"WWW-Authenticate": "Bearer"},
|
||||
)
|
||||
|
||||
_access_token_expires: timedelta = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
|
||||
# Token uses uuid instead of sub
|
||||
_access_token: str = create_access_token(
|
||||
data={"uuid": _user["uuid"]}, expires_delta=_access_token_expires
|
||||
)
|
||||
return {
|
||||
"access_token": _access_token,
|
||||
"token_type": "bearer", # nosec B105
|
||||
"must_change_password": bool(_user.get("must_change_password", False))
|
||||
}
|
||||
28
decnet/web/router/bounty/api_get_bounties.py
Normal file
28
decnet/web/router/bounty/api_get_bounties.py
Normal file
@@ -0,0 +1,28 @@
|
||||
from typing import Any, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, Query
|
||||
|
||||
from decnet.web.dependencies import get_current_user, repo
|
||||
from decnet.web.db.models import BountyResponse
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/bounty", response_model=BountyResponse, tags=["Bounty Vault"],
|
||||
responses={401: {"description": "Not authenticated"}, 422: {"description": "Validation error"}},)
|
||||
async def get_bounties(
|
||||
limit: int = Query(50, ge=1, le=1000),
|
||||
offset: int = Query(0, ge=0),
|
||||
bounty_type: Optional[str] = None,
|
||||
search: Optional[str] = None,
|
||||
current_user: str = Depends(get_current_user)
|
||||
) -> dict[str, Any]:
|
||||
"""Retrieve collected bounties (harvested credentials, payloads, etc.)."""
|
||||
_data = await repo.get_bounties(limit=limit, offset=offset, bounty_type=bounty_type, search=search)
|
||||
_total = await repo.get_total_bounties(bounty_type=bounty_type, search=search)
|
||||
return {
|
||||
"total": _total,
|
||||
"limit": limit,
|
||||
"offset": offset,
|
||||
"data": _data
|
||||
}
|
||||
79
decnet/web/router/fleet/api_deploy_deckies.py
Normal file
79
decnet/web/router/fleet/api_deploy_deckies.py
Normal file
@@ -0,0 +1,79 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
|
||||
from decnet.config import DEFAULT_MUTATE_INTERVAL, DecnetConfig, load_state
|
||||
from decnet.engine import deploy as _deploy
|
||||
from decnet.ini_loader import load_ini_from_string
|
||||
from decnet.network import detect_interface, detect_subnet, get_host_ip
|
||||
from decnet.web.dependencies import get_current_user
|
||||
from decnet.web.db.models import DeployIniRequest
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post("/deckies/deploy", tags=["Fleet Management"])
|
||||
async def api_deploy_deckies(req: DeployIniRequest, current_user: str = Depends(get_current_user)) -> dict[str, str]:
|
||||
from decnet.fleet import build_deckies_from_ini
|
||||
|
||||
try:
|
||||
ini = load_ini_from_string(req.ini_content)
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=400, detail=f"Failed to parse INI: {e}")
|
||||
|
||||
state = load_state()
|
||||
ingest_log_file = os.environ.get("DECNET_INGEST_LOG_FILE")
|
||||
|
||||
if state:
|
||||
config, _ = state
|
||||
subnet_cidr = ini.subnet or config.subnet
|
||||
gateway = ini.gateway or config.gateway
|
||||
host_ip = get_host_ip(config.interface)
|
||||
randomize_services = False
|
||||
# Always sync config log_file with current API ingestion target
|
||||
if ingest_log_file:
|
||||
config.log_file = ingest_log_file
|
||||
else:
|
||||
# If no state exists, we need to infer network details
|
||||
iface = ini.interface or detect_interface()
|
||||
subnet_cidr, gateway = ini.subnet, ini.gateway
|
||||
if not subnet_cidr or not gateway:
|
||||
detected_subnet, detected_gateway = detect_subnet(iface)
|
||||
subnet_cidr = subnet_cidr or detected_subnet
|
||||
gateway = gateway or detected_gateway
|
||||
host_ip = get_host_ip(iface)
|
||||
randomize_services = False
|
||||
config = DecnetConfig(
|
||||
mode="unihost",
|
||||
interface=iface,
|
||||
subnet=subnet_cidr,
|
||||
gateway=gateway,
|
||||
deckies=[],
|
||||
log_file=ingest_log_file,
|
||||
ipvlan=False,
|
||||
mutate_interval=ini.mutate_interval or DEFAULT_MUTATE_INTERVAL
|
||||
)
|
||||
|
||||
try:
|
||||
new_decky_configs = build_deckies_from_ini(
|
||||
ini, subnet_cidr, gateway, host_ip, randomize_services, cli_mutate_interval=None
|
||||
)
|
||||
except ValueError as e:
|
||||
raise HTTPException(status_code=400, detail=str(e))
|
||||
|
||||
# Merge deckies
|
||||
existing_deckies_map = {d.name: d for d in config.deckies}
|
||||
for new_decky in new_decky_configs:
|
||||
existing_deckies_map[new_decky.name] = new_decky
|
||||
|
||||
config.deckies = list(existing_deckies_map.values())
|
||||
|
||||
# We call deploy(config) which regenerates docker-compose and runs `up -d --remove-orphans`.
|
||||
try:
|
||||
_deploy(config)
|
||||
except Exception as e:
|
||||
logging.getLogger("decnet.web.api").exception("Deployment failed: %s", e)
|
||||
raise HTTPException(status_code=500, detail="Deployment failed. Check server logs for details.")
|
||||
|
||||
return {"message": "Deckies deployed successfully"}
|
||||
13
decnet/web/router/fleet/api_get_deckies.py
Normal file
13
decnet/web/router/fleet/api_get_deckies.py
Normal file
@@ -0,0 +1,13 @@
|
||||
from typing import Any
|
||||
|
||||
from fastapi import APIRouter, Depends
|
||||
|
||||
from decnet.web.dependencies import get_current_user, repo
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/deckies", tags=["Fleet Management"],
|
||||
responses={401: {"description": "Not authenticated"}, 422: {"description": "Validation error"}},)
|
||||
async def get_deckies(current_user: str = Depends(get_current_user)) -> list[dict[str, Any]]:
|
||||
return await repo.get_deckies()
|
||||
17
decnet/web/router/fleet/api_mutate_decky.py
Normal file
17
decnet/web/router/fleet/api_mutate_decky.py
Normal file
@@ -0,0 +1,17 @@
|
||||
from fastapi import APIRouter, Depends, HTTPException, Path
|
||||
|
||||
from decnet.mutator import mutate_decky
|
||||
from decnet.web.dependencies import get_current_user
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post("/deckies/{decky_name}/mutate", tags=["Fleet Management"])
|
||||
async def api_mutate_decky(
|
||||
decky_name: str = Path(..., pattern=r"^[a-z0-9\-]{1,64}$"),
|
||||
current_user: str = Depends(get_current_user),
|
||||
) -> dict[str, str]:
|
||||
success = mutate_decky(decky_name)
|
||||
if success:
|
||||
return {"message": f"Successfully mutated {decky_name}"}
|
||||
raise HTTPException(status_code=404, detail=f"Decky {decky_name} not found or failed to mutate")
|
||||
22
decnet/web/router/fleet/api_mutate_interval.py
Normal file
22
decnet/web/router/fleet/api_mutate_interval.py
Normal file
@@ -0,0 +1,22 @@
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
|
||||
from decnet.config import load_state, save_state
|
||||
from decnet.web.dependencies import get_current_user
|
||||
from decnet.web.db.models import MutateIntervalRequest
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.put("/deckies/{decky_name}/mutate-interval", tags=["Fleet Management"],
|
||||
responses={401: {"description": "Not authenticated"}, 422: {"description": "Validation error"}},)
|
||||
async def api_update_mutate_interval(decky_name: str, req: MutateIntervalRequest, current_user: str = Depends(get_current_user)) -> dict[str, str]:
|
||||
state = load_state()
|
||||
if not state:
|
||||
raise HTTPException(status_code=500, detail="No active deployment")
|
||||
config, compose_path = state
|
||||
decky = next((d for d in config.deckies if d.name == decky_name), None)
|
||||
if not decky:
|
||||
raise HTTPException(status_code=404, detail="Decky not found")
|
||||
decky.mutate_interval = req.mutate_interval
|
||||
save_state(config, compose_path)
|
||||
return {"message": "Mutation interval updated"}
|
||||
19
decnet/web/router/logs/api_get_histogram.py
Normal file
19
decnet/web/router/logs/api_get_histogram.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from typing import Any, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, Query
|
||||
|
||||
from decnet.web.dependencies import get_current_user, repo
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/logs/histogram", tags=["Logs"],
|
||||
responses={401: {"description": "Not authenticated"}, 422: {"description": "Validation error"}},)
|
||||
async def get_logs_histogram(
|
||||
search: Optional[str] = None,
|
||||
start_time: Optional[str] = None,
|
||||
end_time: Optional[str] = None,
|
||||
interval_minutes: int = Query(15, ge=1),
|
||||
current_user: str = Depends(get_current_user)
|
||||
) -> list[dict[str, Any]]:
|
||||
return await repo.get_log_histogram(search=search, start_time=start_time, end_time=end_time, interval_minutes=interval_minutes)
|
||||
29
decnet/web/router/logs/api_get_logs.py
Normal file
29
decnet/web/router/logs/api_get_logs.py
Normal file
@@ -0,0 +1,29 @@
|
||||
from typing import Any, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, Query
|
||||
|
||||
from decnet.web.dependencies import get_current_user, repo
|
||||
from decnet.web.db.models import LogsResponse
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
_DATETIME_RE = r"^\d{4}-\d{2}-\d{2}[ T]\d{2}:\d{2}:\d{2}$"
|
||||
|
||||
|
||||
@router.get("/logs", response_model=LogsResponse, tags=["Logs"])
|
||||
async def get_logs(
|
||||
limit: int = Query(50, ge=1, le=1000),
|
||||
offset: int = Query(0, ge=0),
|
||||
search: Optional[str] = Query(None, max_length=512),
|
||||
start_time: Optional[str] = Query(None, pattern=_DATETIME_RE),
|
||||
end_time: Optional[str] = Query(None, pattern=_DATETIME_RE),
|
||||
current_user: str = Depends(get_current_user)
|
||||
) -> dict[str, Any]:
|
||||
_logs: list[dict[str, Any]] = await repo.get_logs(limit=limit, offset=offset, search=search, start_time=start_time, end_time=end_time)
|
||||
_total: int = await repo.get_total_logs(search=search, start_time=start_time, end_time=end_time)
|
||||
return {
|
||||
"total": _total,
|
||||
"limit": limit,
|
||||
"offset": offset,
|
||||
"data": _logs
|
||||
}
|
||||
14
decnet/web/router/stats/api_get_stats.py
Normal file
14
decnet/web/router/stats/api_get_stats.py
Normal file
@@ -0,0 +1,14 @@
|
||||
from typing import Any
|
||||
|
||||
from fastapi import APIRouter, Depends
|
||||
|
||||
from decnet.web.dependencies import get_current_user, repo
|
||||
from decnet.web.db.models import StatsResponse
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/stats", response_model=StatsResponse, tags=["Observability"],
|
||||
responses={401: {"description": "Not authenticated"}, 422: {"description": "Validation error"}},)
|
||||
async def get_stats(current_user: str = Depends(get_current_user)) -> dict[str, Any]:
|
||||
return await repo.get_stats_summary()
|
||||
75
decnet/web/router/stream/api_stream_events.py
Normal file
75
decnet/web/router/stream/api_stream_events.py
Normal file
@@ -0,0 +1,75 @@
|
||||
import json
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import AsyncGenerator, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, Query, Request
|
||||
from fastapi.responses import StreamingResponse
|
||||
|
||||
from decnet.web.dependencies import get_stream_user, repo
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/stream", tags=["Observability"],
|
||||
responses={401: {"description": "Not authenticated"}, 422: {"description": "Validation error"}},)
|
||||
async def stream_events(
|
||||
request: Request,
|
||||
last_event_id: int = Query(0, alias="lastEventId"),
|
||||
search: Optional[str] = None,
|
||||
start_time: Optional[str] = None,
|
||||
end_time: Optional[str] = None,
|
||||
current_user: str = Depends(get_stream_user)
|
||||
) -> StreamingResponse:
|
||||
|
||||
async def event_generator() -> AsyncGenerator[str, None]:
|
||||
last_id = last_event_id
|
||||
stats_interval_sec = 10
|
||||
loops_since_stats = 0
|
||||
try:
|
||||
if last_id == 0:
|
||||
last_id = await repo.get_max_log_id()
|
||||
|
||||
# Emit initial snapshot immediately so the client never needs to poll /stats
|
||||
stats = await repo.get_stats_summary()
|
||||
yield f"event: message\ndata: {json.dumps({'type': 'stats', 'data': stats})}\n\n"
|
||||
histogram = await repo.get_log_histogram(
|
||||
search=search, start_time=start_time,
|
||||
end_time=end_time, interval_minutes=15,
|
||||
)
|
||||
yield f"event: message\ndata: {json.dumps({'type': 'histogram', 'data': histogram})}\n\n"
|
||||
|
||||
while True:
|
||||
if await request.is_disconnected():
|
||||
break
|
||||
|
||||
new_logs = await repo.get_logs_after_id(
|
||||
last_id, limit=50, search=search,
|
||||
start_time=start_time, end_time=end_time,
|
||||
)
|
||||
if new_logs:
|
||||
last_id = max(entry["id"] for entry in new_logs)
|
||||
yield f"event: message\ndata: {json.dumps({'type': 'logs', 'data': new_logs})}\n\n"
|
||||
loops_since_stats = stats_interval_sec
|
||||
|
||||
if loops_since_stats >= stats_interval_sec:
|
||||
stats = await repo.get_stats_summary()
|
||||
yield f"event: message\ndata: {json.dumps({'type': 'stats', 'data': stats})}\n\n"
|
||||
histogram = await repo.get_log_histogram(
|
||||
search=search, start_time=start_time,
|
||||
end_time=end_time, interval_minutes=15,
|
||||
)
|
||||
yield f"event: message\ndata: {json.dumps({'type': 'histogram', 'data': histogram})}\n\n"
|
||||
loops_since_stats = 0
|
||||
|
||||
loops_since_stats += 1
|
||||
await asyncio.sleep(1)
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
except Exception:
|
||||
log.exception("SSE stream error for user %s", last_event_id)
|
||||
yield f"event: error\ndata: {json.dumps({'type': 'error', 'message': 'Stream interrupted'})}\n\n"
|
||||
|
||||
return StreamingResponse(event_generator(), media_type="text/event-stream")
|
||||
26
decnet_web/.gitignore
vendored
Normal file
26
decnet_web/.gitignore
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
# Logs
|
||||
logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
pnpm-debug.log*
|
||||
lerna-debug.log*
|
||||
|
||||
node_modules
|
||||
dist
|
||||
dist-ssr
|
||||
*.local
|
||||
|
||||
# Editor directories and files
|
||||
.vscode/*
|
||||
!.vscode/extensions.json
|
||||
.idea
|
||||
.DS_Store
|
||||
*.suo
|
||||
*.ntvs*
|
||||
*.njsproj
|
||||
*.sln
|
||||
*.sw?
|
||||
.env
|
||||
.env.local
|
||||
73
decnet_web/README.md
Normal file
73
decnet_web/README.md
Normal file
@@ -0,0 +1,73 @@
|
||||
# React + TypeScript + Vite
|
||||
|
||||
This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules.
|
||||
|
||||
Currently, two official plugins are available:
|
||||
|
||||
- [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react) uses [Oxc](https://oxc.rs)
|
||||
- [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react-swc) uses [SWC](https://swc.rs/)
|
||||
|
||||
## React Compiler
|
||||
|
||||
The React Compiler is not enabled on this template because of its impact on dev & build performances. To add it, see [this documentation](https://react.dev/learn/react-compiler/installation).
|
||||
|
||||
## Expanding the ESLint configuration
|
||||
|
||||
If you are developing a production application, we recommend updating the configuration to enable type-aware lint rules:
|
||||
|
||||
```js
|
||||
export default defineConfig([
|
||||
globalIgnores(['dist']),
|
||||
{
|
||||
files: ['**/*.{ts,tsx}'],
|
||||
extends: [
|
||||
// Other configs...
|
||||
|
||||
// Remove tseslint.configs.recommended and replace with this
|
||||
tseslint.configs.recommendedTypeChecked,
|
||||
// Alternatively, use this for stricter rules
|
||||
tseslint.configs.strictTypeChecked,
|
||||
// Optionally, add this for stylistic rules
|
||||
tseslint.configs.stylisticTypeChecked,
|
||||
|
||||
// Other configs...
|
||||
],
|
||||
languageOptions: {
|
||||
parserOptions: {
|
||||
project: ['./tsconfig.node.json', './tsconfig.app.json'],
|
||||
tsconfigRootDir: import.meta.dirname,
|
||||
},
|
||||
// other options...
|
||||
},
|
||||
},
|
||||
])
|
||||
```
|
||||
|
||||
You can also install [eslint-plugin-react-x](https://github.com/Rel1cx/eslint-react/tree/main/packages/plugins/eslint-plugin-react-x) and [eslint-plugin-react-dom](https://github.com/Rel1cx/eslint-react/tree/main/packages/plugins/eslint-plugin-react-dom) for React-specific lint rules:
|
||||
|
||||
```js
|
||||
// eslint.config.js
|
||||
import reactX from 'eslint-plugin-react-x'
|
||||
import reactDom from 'eslint-plugin-react-dom'
|
||||
|
||||
export default defineConfig([
|
||||
globalIgnores(['dist']),
|
||||
{
|
||||
files: ['**/*.{ts,tsx}'],
|
||||
extends: [
|
||||
// Other configs...
|
||||
// Enable lint rules for React
|
||||
reactX.configs['recommended-typescript'],
|
||||
// Enable lint rules for React DOM
|
||||
reactDom.configs.recommended,
|
||||
],
|
||||
languageOptions: {
|
||||
parserOptions: {
|
||||
project: ['./tsconfig.node.json', './tsconfig.app.json'],
|
||||
tsconfigRootDir: import.meta.dirname,
|
||||
},
|
||||
// other options...
|
||||
},
|
||||
},
|
||||
])
|
||||
```
|
||||
23
decnet_web/eslint.config.js
Normal file
23
decnet_web/eslint.config.js
Normal file
@@ -0,0 +1,23 @@
|
||||
import js from '@eslint/js'
|
||||
import globals from 'globals'
|
||||
import reactHooks from 'eslint-plugin-react-hooks'
|
||||
import reactRefresh from 'eslint-plugin-react-refresh'
|
||||
import tseslint from 'typescript-eslint'
|
||||
import { defineConfig, globalIgnores } from 'eslint/config'
|
||||
|
||||
export default defineConfig([
|
||||
globalIgnores(['dist']),
|
||||
{
|
||||
files: ['**/*.{ts,tsx}'],
|
||||
extends: [
|
||||
js.configs.recommended,
|
||||
tseslint.configs.recommended,
|
||||
reactHooks.configs.flat.recommended,
|
||||
reactRefresh.configs.vite,
|
||||
],
|
||||
languageOptions: {
|
||||
ecmaVersion: 2020,
|
||||
globals: globals.browser,
|
||||
},
|
||||
},
|
||||
])
|
||||
13
decnet_web/index.html
Normal file
13
decnet_web/index.html
Normal file
@@ -0,0 +1,13 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<link rel="icon" type="image/svg+xml" href="/favicon.svg" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>decnet_web</title>
|
||||
</head>
|
||||
<body>
|
||||
<div id="root"></div>
|
||||
<script type="module" src="/src/main.tsx"></script>
|
||||
</body>
|
||||
</html>
|
||||
3727
decnet_web/package-lock.json
generated
Normal file
3727
decnet_web/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
34
decnet_web/package.json
Normal file
34
decnet_web/package.json
Normal file
@@ -0,0 +1,34 @@
|
||||
{
|
||||
"name": "decnet_web",
|
||||
"private": true,
|
||||
"version": "0.0.0",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"dev": "vite",
|
||||
"build": "tsc -b && vite build",
|
||||
"lint": "eslint .",
|
||||
"preview": "vite preview"
|
||||
},
|
||||
"dependencies": {
|
||||
"axios": "^1.14.0",
|
||||
"lucide-react": "^1.7.0",
|
||||
"react": "^19.2.4",
|
||||
"react-dom": "^19.2.4",
|
||||
"react-router-dom": "^7.14.0",
|
||||
"recharts": "^3.8.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@eslint/js": "^9.39.4",
|
||||
"@types/node": "^24.12.2",
|
||||
"@types/react": "^19.2.14",
|
||||
"@types/react-dom": "^19.2.3",
|
||||
"@vitejs/plugin-react": "^6.0.1",
|
||||
"eslint": "^9.39.4",
|
||||
"eslint-plugin-react-hooks": "^7.0.1",
|
||||
"eslint-plugin-react-refresh": "^0.5.2",
|
||||
"globals": "^17.4.0",
|
||||
"typescript": "~6.0.2",
|
||||
"typescript-eslint": "^8.58.0",
|
||||
"vite": "^8.0.4"
|
||||
}
|
||||
}
|
||||
1
decnet_web/public/favicon.svg
Normal file
1
decnet_web/public/favicon.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 9.3 KiB |
24
decnet_web/public/icons.svg
Normal file
24
decnet_web/public/icons.svg
Normal file
@@ -0,0 +1,24 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg">
|
||||
<symbol id="bluesky-icon" viewBox="0 0 16 17">
|
||||
<g clip-path="url(#bluesky-clip)"><path fill="#08060d" d="M7.75 7.735c-.693-1.348-2.58-3.86-4.334-5.097-1.68-1.187-2.32-.981-2.74-.79C.188 2.065.1 2.812.1 3.251s.241 3.602.398 4.13c.52 1.744 2.367 2.333 4.07 2.145-2.495.37-4.71 1.278-1.805 4.512 3.196 3.309 4.38-.71 4.987-2.746.608 2.036 1.307 5.91 4.93 2.746 2.72-2.746.747-4.143-1.747-4.512 1.702.189 3.55-.4 4.07-2.145.156-.528.397-3.691.397-4.13s-.088-1.186-.575-1.406c-.42-.19-1.06-.395-2.741.79-1.755 1.24-3.64 3.752-4.334 5.099"/></g>
|
||||
<defs><clipPath id="bluesky-clip"><path fill="#fff" d="M.1.85h15.3v15.3H.1z"/></clipPath></defs>
|
||||
</symbol>
|
||||
<symbol id="discord-icon" viewBox="0 0 20 19">
|
||||
<path fill="#08060d" d="M16.224 3.768a14.5 14.5 0 0 0-3.67-1.153c-.158.286-.343.67-.47.976a13.5 13.5 0 0 0-4.067 0c-.128-.306-.317-.69-.476-.976A14.4 14.4 0 0 0 3.868 3.77C1.546 7.28.916 10.703 1.231 14.077a14.7 14.7 0 0 0 4.5 2.306q.545-.748.965-1.587a9.5 9.5 0 0 1-1.518-.74q.191-.14.372-.293c2.927 1.369 6.107 1.369 8.999 0q.183.152.372.294-.723.437-1.52.74.418.838.963 1.588a14.6 14.6 0 0 0 4.504-2.308c.37-3.911-.63-7.302-2.644-10.309m-9.13 8.234c-.878 0-1.599-.82-1.599-1.82 0-.998.705-1.82 1.6-1.82.894 0 1.614.82 1.599 1.82.001 1-.705 1.82-1.6 1.82m5.91 0c-.878 0-1.599-.82-1.599-1.82 0-.998.705-1.82 1.6-1.82.893 0 1.614.82 1.599 1.82 0 1-.706 1.82-1.6 1.82"/>
|
||||
</symbol>
|
||||
<symbol id="documentation-icon" viewBox="0 0 21 20">
|
||||
<path fill="none" stroke="#aa3bff" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.35" d="m15.5 13.333 1.533 1.322c.645.555.967.833.967 1.178s-.322.623-.967 1.179L15.5 18.333m-3.333-5-1.534 1.322c-.644.555-.966.833-.966 1.178s.322.623.966 1.179l1.534 1.321"/>
|
||||
<path fill="none" stroke="#aa3bff" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.35" d="M17.167 10.836v-4.32c0-1.41 0-2.117-.224-2.68-.359-.906-1.118-1.621-2.08-1.96-.599-.21-1.349-.21-2.848-.21-2.623 0-3.935 0-4.983.369-1.684.591-3.013 1.842-3.641 3.428C3 6.449 3 7.684 3 10.154v2.122c0 2.558 0 3.838.706 4.726q.306.383.713.671c.76.536 1.79.64 3.581.66"/>
|
||||
<path fill="none" stroke="#aa3bff" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.35" d="M3 10a2.78 2.78 0 0 1 2.778-2.778c.555 0 1.209.097 1.748-.047.48-.129.854-.503.982-.982.145-.54.048-1.194.048-1.749a2.78 2.78 0 0 1 2.777-2.777"/>
|
||||
</symbol>
|
||||
<symbol id="github-icon" viewBox="0 0 19 19">
|
||||
<path fill="#08060d" fill-rule="evenodd" d="M9.356 1.85C5.05 1.85 1.57 5.356 1.57 9.694a7.84 7.84 0 0 0 5.324 7.44c.387.079.528-.168.528-.376 0-.182-.013-.805-.013-1.454-2.165.467-2.616-.935-2.616-.935-.349-.91-.864-1.143-.864-1.143-.71-.48.051-.48.051-.48.787.051 1.2.805 1.2.805.695 1.194 1.817.857 2.268.649.064-.507.27-.857.49-1.052-1.728-.182-3.545-.857-3.545-3.87 0-.857.31-1.558.8-2.104-.078-.195-.349-1 .077-2.078 0 0 .657-.208 2.14.805a7.5 7.5 0 0 1 1.946-.26c.657 0 1.328.092 1.946.26 1.483-1.013 2.14-.805 2.14-.805.426 1.078.155 1.883.078 2.078.502.546.799 1.247.799 2.104 0 3.013-1.818 3.675-3.558 3.87.284.247.528.714.528 1.454 0 1.052-.012 1.896-.012 2.156 0 .208.142.455.528.377a7.84 7.84 0 0 0 5.324-7.441c.013-4.338-3.48-7.844-7.773-7.844" clip-rule="evenodd"/>
|
||||
</symbol>
|
||||
<symbol id="social-icon" viewBox="0 0 20 20">
|
||||
<path fill="none" stroke="#aa3bff" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.35" d="M12.5 6.667a4.167 4.167 0 1 0-8.334 0 4.167 4.167 0 0 0 8.334 0"/>
|
||||
<path fill="none" stroke="#aa3bff" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.35" d="M2.5 16.667a5.833 5.833 0 0 1 8.75-5.053m3.837.474.513 1.035c.07.144.257.282.414.309l.93.155c.596.1.736.536.307.965l-.723.73a.64.64 0 0 0-.152.531l.207.903c.164.715-.213.991-.84.618l-.872-.52a.63.63 0 0 0-.577 0l-.872.52c-.624.373-1.003.094-.84-.618l.207-.903a.64.64 0 0 0-.152-.532l-.723-.729c-.426-.43-.289-.864.306-.964l.93-.156a.64.64 0 0 0 .412-.31l.513-1.034c.28-.562.735-.562 1.012 0"/>
|
||||
</symbol>
|
||||
<symbol id="x-icon" viewBox="0 0 19 19">
|
||||
<path fill="#08060d" fill-rule="evenodd" d="M1.893 1.98c.052.072 1.245 1.769 2.653 3.77l2.892 4.114c.183.261.333.48.333.486s-.068.089-.152.183l-.522.593-.765.867-3.597 4.087c-.375.426-.734.834-.798.905a1 1 0 0 0-.118.148c0 .01.236.017.664.017h.663l.729-.83c.4-.457.796-.906.879-.999a692 692 0 0 0 1.794-2.038c.034-.037.301-.34.594-.675l.551-.624.345-.392a7 7 0 0 1 .34-.374c.006 0 .93 1.306 2.052 2.903l2.084 2.965.045.063h2.275c1.87 0 2.273-.003 2.266-.021-.008-.02-1.098-1.572-3.894-5.547-2.013-2.862-2.28-3.246-2.273-3.266.008-.019.282-.332 2.085-2.38l2-2.274 1.567-1.782c.022-.028-.016-.03-.65-.03h-.674l-.3.342a871 871 0 0 1-1.782 2.025c-.067.075-.405.458-.75.852a100 100 0 0 1-.803.91c-.148.172-.299.344-.99 1.127-.304.343-.32.358-.345.327-.015-.019-.904-1.282-1.976-2.808L6.365 1.85H1.8zm1.782.91 8.078 11.294c.772 1.08 1.413 1.973 1.425 1.984.016.017.241.02 1.05.017l1.03-.004-2.694-3.766L7.796 5.75 5.722 2.852l-1.039-.004-1.039-.004z" clip-rule="evenodd"/>
|
||||
</symbol>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 4.9 KiB |
184
decnet_web/src/App.css
Normal file
184
decnet_web/src/App.css
Normal file
@@ -0,0 +1,184 @@
|
||||
.counter {
|
||||
font-size: 16px;
|
||||
padding: 5px 10px;
|
||||
border-radius: 5px;
|
||||
color: var(--accent);
|
||||
background: var(--accent-bg);
|
||||
border: 2px solid transparent;
|
||||
transition: border-color 0.3s;
|
||||
margin-bottom: 24px;
|
||||
|
||||
&:hover {
|
||||
border-color: var(--accent-border);
|
||||
}
|
||||
&:focus-visible {
|
||||
outline: 2px solid var(--accent);
|
||||
outline-offset: 2px;
|
||||
}
|
||||
}
|
||||
|
||||
.hero {
|
||||
position: relative;
|
||||
|
||||
.base,
|
||||
.framework,
|
||||
.vite {
|
||||
inset-inline: 0;
|
||||
margin: 0 auto;
|
||||
}
|
||||
|
||||
.base {
|
||||
width: 170px;
|
||||
position: relative;
|
||||
z-index: 0;
|
||||
}
|
||||
|
||||
.framework,
|
||||
.vite {
|
||||
position: absolute;
|
||||
}
|
||||
|
||||
.framework {
|
||||
z-index: 1;
|
||||
top: 34px;
|
||||
height: 28px;
|
||||
transform: perspective(2000px) rotateZ(300deg) rotateX(44deg) rotateY(39deg)
|
||||
scale(1.4);
|
||||
}
|
||||
|
||||
.vite {
|
||||
z-index: 0;
|
||||
top: 107px;
|
||||
height: 26px;
|
||||
width: auto;
|
||||
transform: perspective(2000px) rotateZ(300deg) rotateX(40deg) rotateY(39deg)
|
||||
scale(0.8);
|
||||
}
|
||||
}
|
||||
|
||||
#center {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 25px;
|
||||
place-content: center;
|
||||
place-items: center;
|
||||
flex-grow: 1;
|
||||
|
||||
@media (max-width: 1024px) {
|
||||
padding: 32px 20px 24px;
|
||||
gap: 18px;
|
||||
}
|
||||
}
|
||||
|
||||
#next-steps {
|
||||
display: flex;
|
||||
border-top: 1px solid var(--border);
|
||||
text-align: left;
|
||||
|
||||
& > div {
|
||||
flex: 1 1 0;
|
||||
padding: 32px;
|
||||
@media (max-width: 1024px) {
|
||||
padding: 24px 20px;
|
||||
}
|
||||
}
|
||||
|
||||
.icon {
|
||||
margin-bottom: 16px;
|
||||
width: 22px;
|
||||
height: 22px;
|
||||
}
|
||||
|
||||
@media (max-width: 1024px) {
|
||||
flex-direction: column;
|
||||
text-align: center;
|
||||
}
|
||||
}
|
||||
|
||||
#docs {
|
||||
border-right: 1px solid var(--border);
|
||||
|
||||
@media (max-width: 1024px) {
|
||||
border-right: none;
|
||||
border-bottom: 1px solid var(--border);
|
||||
}
|
||||
}
|
||||
|
||||
#next-steps ul {
|
||||
list-style: none;
|
||||
padding: 0;
|
||||
display: flex;
|
||||
gap: 8px;
|
||||
margin: 32px 0 0;
|
||||
|
||||
.logo {
|
||||
height: 18px;
|
||||
}
|
||||
|
||||
a {
|
||||
color: var(--text-h);
|
||||
font-size: 16px;
|
||||
border-radius: 6px;
|
||||
background: var(--social-bg);
|
||||
display: flex;
|
||||
padding: 6px 12px;
|
||||
align-items: center;
|
||||
gap: 8px;
|
||||
text-decoration: none;
|
||||
transition: box-shadow 0.3s;
|
||||
|
||||
&:hover {
|
||||
box-shadow: var(--shadow);
|
||||
}
|
||||
.button-icon {
|
||||
height: 18px;
|
||||
width: 18px;
|
||||
}
|
||||
}
|
||||
|
||||
@media (max-width: 1024px) {
|
||||
margin-top: 20px;
|
||||
flex-wrap: wrap;
|
||||
justify-content: center;
|
||||
|
||||
li {
|
||||
flex: 1 1 calc(50% - 8px);
|
||||
}
|
||||
|
||||
a {
|
||||
width: 100%;
|
||||
justify-content: center;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#spacer {
|
||||
height: 88px;
|
||||
border-top: 1px solid var(--border);
|
||||
@media (max-width: 1024px) {
|
||||
height: 48px;
|
||||
}
|
||||
}
|
||||
|
||||
.ticks {
|
||||
position: relative;
|
||||
width: 100%;
|
||||
|
||||
&::before,
|
||||
&::after {
|
||||
content: '';
|
||||
position: absolute;
|
||||
top: -4.5px;
|
||||
border: 5px solid transparent;
|
||||
}
|
||||
|
||||
&::before {
|
||||
left: 0;
|
||||
border-left-color: var(--border);
|
||||
}
|
||||
&::after {
|
||||
right: 0;
|
||||
border-right-color: var(--border);
|
||||
}
|
||||
}
|
||||
57
decnet_web/src/App.tsx
Normal file
57
decnet_web/src/App.tsx
Normal file
@@ -0,0 +1,57 @@
|
||||
import { useState, useEffect } from 'react';
|
||||
import { BrowserRouter as Router, Routes, Route, Navigate } from 'react-router-dom';
|
||||
import Login from './components/Login';
|
||||
import Layout from './components/Layout';
|
||||
import Dashboard from './components/Dashboard';
|
||||
import DeckyFleet from './components/DeckyFleet';
|
||||
import LiveLogs from './components/LiveLogs';
|
||||
import Attackers from './components/Attackers';
|
||||
import Config from './components/Config';
|
||||
import Bounty from './components/Bounty';
|
||||
|
||||
function App() {
|
||||
const [token, setToken] = useState<string | null>(localStorage.getItem('token'));
|
||||
const [searchQuery, setSearchQuery] = useState('');
|
||||
|
||||
useEffect(() => {
|
||||
const savedToken = localStorage.getItem('token');
|
||||
if (savedToken) {
|
||||
setToken(savedToken);
|
||||
}
|
||||
}, []);
|
||||
|
||||
const handleLogin = (newToken: string) => {
|
||||
setToken(newToken);
|
||||
};
|
||||
|
||||
const handleLogout = () => {
|
||||
localStorage.removeItem('token');
|
||||
setToken(null);
|
||||
};
|
||||
|
||||
const handleSearch = (query: string) => {
|
||||
setSearchQuery(query);
|
||||
};
|
||||
|
||||
if (!token) {
|
||||
return <Login onLogin={handleLogin} />;
|
||||
}
|
||||
|
||||
return (
|
||||
<Router>
|
||||
<Layout onLogout={handleLogout} onSearch={handleSearch}>
|
||||
<Routes>
|
||||
<Route path="/" element={<Dashboard searchQuery={searchQuery} />} />
|
||||
<Route path="/fleet" element={<DeckyFleet />} />
|
||||
<Route path="/live-logs" element={<LiveLogs />} />
|
||||
<Route path="/bounty" element={<Bounty />} />
|
||||
<Route path="/attackers" element={<Attackers />} />
|
||||
<Route path="/config" element={<Config />} />
|
||||
<Route path="*" element={<Navigate to="/" replace />} />
|
||||
</Routes>
|
||||
</Layout>
|
||||
</Router>
|
||||
);
|
||||
}
|
||||
|
||||
export default App;
|
||||
BIN
decnet_web/src/assets/hero.png
Normal file
BIN
decnet_web/src/assets/hero.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 44 KiB |
1
decnet_web/src/assets/react.svg
Normal file
1
decnet_web/src/assets/react.svg
Normal file
@@ -0,0 +1 @@
|
||||
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" class="iconify iconify--logos" width="35.93" height="32" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 228"><path fill="#00D8FF" d="M210.483 73.824a171.49 171.49 0 0 0-8.24-2.597c.465-1.9.893-3.777 1.273-5.621c6.238-30.281 2.16-54.676-11.769-62.708c-13.355-7.7-35.196.329-57.254 19.526a171.23 171.23 0 0 0-6.375 5.848a155.866 155.866 0 0 0-4.241-3.917C100.759 3.829 77.587-4.822 63.673 3.233C50.33 10.957 46.379 33.89 51.995 62.588a170.974 170.974 0 0 0 1.892 8.48c-3.28.932-6.445 1.924-9.474 2.98C17.309 83.498 0 98.307 0 113.668c0 15.865 18.582 31.778 46.812 41.427a145.52 145.52 0 0 0 6.921 2.165a167.467 167.467 0 0 0-2.01 9.138c-5.354 28.2-1.173 50.591 12.134 58.266c13.744 7.926 36.812-.22 59.273-19.855a145.567 145.567 0 0 0 5.342-4.923a168.064 168.064 0 0 0 6.92 6.314c21.758 18.722 43.246 26.282 56.54 18.586c13.731-7.949 18.194-32.003 12.4-61.268a145.016 145.016 0 0 0-1.535-6.842c1.62-.48 3.21-.974 4.76-1.488c29.348-9.723 48.443-25.443 48.443-41.52c0-15.417-17.868-30.326-45.517-39.844Zm-6.365 70.984c-1.4.463-2.836.91-4.3 1.345c-3.24-10.257-7.612-21.163-12.963-32.432c5.106-11 9.31-21.767 12.459-31.957c2.619.758 5.16 1.557 7.61 2.4c23.69 8.156 38.14 20.213 38.14 29.504c0 9.896-15.606 22.743-40.946 31.14Zm-10.514 20.834c2.562 12.94 2.927 24.64 1.23 33.787c-1.524 8.219-4.59 13.698-8.382 15.893c-8.067 4.67-25.32-1.4-43.927-17.412a156.726 156.726 0 0 1-6.437-5.87c7.214-7.889 14.423-17.06 21.459-27.246c12.376-1.098 24.068-2.894 34.671-5.345a134.17 134.17 0 0 1 1.386 6.193ZM87.276 214.515c-7.882 2.783-14.16 2.863-17.955.675c-8.075-4.657-11.432-22.636-6.853-46.752a156.923 156.923 0 0 1 1.869-8.499c10.486 2.32 22.093 3.988 34.498 4.994c7.084 9.967 14.501 19.128 21.976 27.15a134.668 134.668 0 0 1-4.877 4.492c-9.933 8.682-19.886 14.842-28.658 17.94ZM50.35 144.747c-12.483-4.267-22.792-9.812-29.858-15.863c-6.35-5.437-9.555-10.836-9.555-15.216c0-9.322 13.897-21.212 37.076-29.293c2.813-.98 5.757-1.905 8.812-2.773c3.204 10.42 7.406 21.315 12.477 32.332c-5.137 11.18-9.399 22.249-12.634 32.792a134.718 134.718 0 0 1-6.318-1.979Zm12.378-84.26c-4.811-24.587-1.616-43.134 6.425-47.789c8.564-4.958 27.502 2.111 47.463 19.835a144.318 144.318 0 0 1 3.841 3.545c-7.438 7.987-14.787 17.08-21.808 26.988c-12.04 1.116-23.565 2.908-34.161 5.309a160.342 160.342 0 0 1-1.76-7.887Zm110.427 27.268a347.8 347.8 0 0 0-7.785-12.803c8.168 1.033 15.994 2.404 23.343 4.08c-2.206 7.072-4.956 14.465-8.193 22.045a381.151 381.151 0 0 0-7.365-13.322Zm-45.032-43.861c5.044 5.465 10.096 11.566 15.065 18.186a322.04 322.04 0 0 0-30.257-.006c4.974-6.559 10.069-12.652 15.192-18.18ZM82.802 87.83a323.167 323.167 0 0 0-7.227 13.238c-3.184-7.553-5.909-14.98-8.134-22.152c7.304-1.634 15.093-2.97 23.209-3.984a321.524 321.524 0 0 0-7.848 12.897Zm8.081 65.352c-8.385-.936-16.291-2.203-23.593-3.793c2.26-7.3 5.045-14.885 8.298-22.6a321.187 321.187 0 0 0 7.257 13.246c2.594 4.48 5.28 8.868 8.038 13.147Zm37.542 31.03c-5.184-5.592-10.354-11.779-15.403-18.433c4.902.192 9.899.29 14.978.29c5.218 0 10.376-.117 15.453-.343c-4.985 6.774-10.018 12.97-15.028 18.486Zm52.198-57.817c3.422 7.8 6.306 15.345 8.596 22.52c-7.422 1.694-15.436 3.058-23.88 4.071a382.417 382.417 0 0 0 7.859-13.026a347.403 347.403 0 0 0 7.425-13.565Zm-16.898 8.101a358.557 358.557 0 0 1-12.281 19.815a329.4 329.4 0 0 1-23.444.823c-7.967 0-15.716-.248-23.178-.732a310.202 310.202 0 0 1-12.513-19.846h.001a307.41 307.41 0 0 1-10.923-20.627a310.278 310.278 0 0 1 10.89-20.637l-.001.001a307.318 307.318 0 0 1 12.413-19.761c7.613-.576 15.42-.876 23.31-.876H128c7.926 0 15.743.303 23.354.883a329.357 329.357 0 0 1 12.335 19.695a358.489 358.489 0 0 1 11.036 20.54a329.472 329.472 0 0 1-11 20.722Zm22.56-122.124c8.572 4.944 11.906 24.881 6.52 51.026c-.344 1.668-.73 3.367-1.15 5.09c-10.622-2.452-22.155-4.275-34.23-5.408c-7.034-10.017-14.323-19.124-21.64-27.008a160.789 160.789 0 0 1 5.888-5.4c18.9-16.447 36.564-22.941 44.612-18.3ZM128 90.808c12.625 0 22.86 10.235 22.86 22.86s-10.235 22.86-22.86 22.86s-22.86-10.235-22.86-22.86s10.235-22.86 22.86-22.86Z"></path></svg>
|
||||
|
After Width: | Height: | Size: 4.0 KiB |
1
decnet_web/src/assets/vite.svg
Normal file
1
decnet_web/src/assets/vite.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 8.5 KiB |
20
decnet_web/src/components/Attackers.tsx
Normal file
20
decnet_web/src/components/Attackers.tsx
Normal file
@@ -0,0 +1,20 @@
|
||||
import React from 'react';
|
||||
import { Activity } from 'lucide-react';
|
||||
import './Dashboard.css';
|
||||
|
||||
const Attackers: React.FC = () => {
|
||||
return (
|
||||
<div className="logs-section">
|
||||
<div className="section-header">
|
||||
<Activity size={20} />
|
||||
<h2>ATTACKER PROFILES</h2>
|
||||
</div>
|
||||
<div style={{ padding: '40px', textAlign: 'center', opacity: 0.5 }}>
|
||||
<p>NO ACTIVE THREATS PROFILED YET.</p>
|
||||
<p style={{ marginTop: '10px', fontSize: '0.8rem' }}>(Attackers view placeholder)</p>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default Attackers;
|
||||
191
decnet_web/src/components/Bounty.tsx
Normal file
191
decnet_web/src/components/Bounty.tsx
Normal file
@@ -0,0 +1,191 @@
|
||||
import React, { useEffect, useState } from 'react';
|
||||
import { useSearchParams } from 'react-router-dom';
|
||||
import { Archive, Search, ChevronLeft, ChevronRight, Filter } from 'lucide-react';
|
||||
import api from '../utils/api';
|
||||
import './Dashboard.css';
|
||||
|
||||
interface BountyEntry {
|
||||
id: number;
|
||||
timestamp: string;
|
||||
decky: string;
|
||||
service: string;
|
||||
attacker_ip: string;
|
||||
bounty_type: string;
|
||||
payload: any;
|
||||
}
|
||||
|
||||
const Bounty: React.FC = () => {
|
||||
const [searchParams, setSearchParams] = useSearchParams();
|
||||
const query = searchParams.get('q') || '';
|
||||
const typeFilter = searchParams.get('type') || '';
|
||||
const page = parseInt(searchParams.get('page') || '1');
|
||||
|
||||
const [bounties, setBounties] = useState<BountyEntry[]>([]);
|
||||
const [total, setTotal] = useState(0);
|
||||
const [loading, setLoading] = useState(true);
|
||||
const [searchInput, setSearchInput] = useState(query);
|
||||
|
||||
const limit = 50;
|
||||
|
||||
const fetchBounties = async () => {
|
||||
setLoading(true);
|
||||
try {
|
||||
const offset = (page - 1) * limit;
|
||||
let url = `/bounty?limit=${limit}&offset=${offset}`;
|
||||
if (query) url += `&search=${encodeURIComponent(query)}`;
|
||||
if (typeFilter) url += `&bounty_type=${typeFilter}`;
|
||||
|
||||
const res = await api.get(url);
|
||||
setBounties(res.data.data);
|
||||
setTotal(res.data.total);
|
||||
} catch (err) {
|
||||
console.error('Failed to fetch bounties', err);
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
fetchBounties();
|
||||
}, [query, typeFilter, page]);
|
||||
|
||||
const handleSearch = (e: React.FormEvent) => {
|
||||
e.preventDefault();
|
||||
setSearchParams({ q: searchInput, type: typeFilter, page: '1' });
|
||||
};
|
||||
|
||||
const setPage = (p: number) => {
|
||||
setSearchParams({ q: query, type: typeFilter, page: p.toString() });
|
||||
};
|
||||
|
||||
const setType = (t: string) => {
|
||||
setSearchParams({ q: query, type: t, page: '1' });
|
||||
};
|
||||
|
||||
const totalPages = Math.ceil(total / limit);
|
||||
|
||||
return (
|
||||
<div className="dashboard">
|
||||
{/* Page Header */}
|
||||
<div style={{ display: 'flex', justifyContent: 'space-between', alignItems: 'center' }}>
|
||||
<div style={{ display: 'flex', alignItems: 'center', gap: '16px' }}>
|
||||
<Archive size={32} className="violet-accent" />
|
||||
<h1 style={{ fontSize: '1.5rem', letterSpacing: '4px' }}>BOUNTY VAULT</h1>
|
||||
</div>
|
||||
|
||||
<div style={{ display: 'flex', gap: '16px', alignItems: 'center' }}>
|
||||
<div style={{ display: 'flex', alignItems: 'center', gap: '8px', border: '1px solid var(--border-color)', padding: '4px 12px' }}>
|
||||
<Filter size={16} className="dim" />
|
||||
<select
|
||||
value={typeFilter}
|
||||
onChange={(e) => setType(e.target.value)}
|
||||
style={{ background: 'transparent', border: 'none', color: 'inherit', fontSize: '0.8rem', outline: 'none' }}
|
||||
>
|
||||
<option value="">ALL TYPES</option>
|
||||
<option value="credential">CREDENTIALS</option>
|
||||
<option value="payload">PAYLOADS</option>
|
||||
</select>
|
||||
</div>
|
||||
|
||||
<form onSubmit={handleSearch} style={{ display: 'flex', alignItems: 'center', border: '1px solid var(--border-color)', padding: '4px 12px' }}>
|
||||
<Search size={18} style={{ opacity: 0.5, marginRight: '8px' }} />
|
||||
<input
|
||||
type="text"
|
||||
placeholder="Search bounty..."
|
||||
value={searchInput}
|
||||
onChange={(e) => setSearchInput(e.target.value)}
|
||||
style={{ background: 'transparent', border: 'none', padding: '4px', fontSize: '0.8rem', width: '200px' }}
|
||||
/>
|
||||
</form>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="logs-section">
|
||||
<div className="section-header" style={{ justifyContent: 'space-between' }}>
|
||||
<div style={{ display: 'flex', alignItems: 'center', gap: '12px' }}>
|
||||
<span className="matrix-text" style={{ fontSize: '0.8rem' }}>{total} ARTIFACTS CAPTURED</span>
|
||||
</div>
|
||||
|
||||
<div style={{ display: 'flex', alignItems: 'center', gap: '16px' }}>
|
||||
<span className="dim" style={{ fontSize: '0.8rem' }}>
|
||||
Page {page} of {totalPages || 1}
|
||||
</span>
|
||||
<div style={{ display: 'flex', gap: '8px' }}>
|
||||
<button
|
||||
disabled={page <= 1}
|
||||
onClick={() => setPage(page - 1)}
|
||||
style={{ padding: '4px', border: '1px solid var(--border-color)', opacity: page <= 1 ? 0.3 : 1 }}
|
||||
>
|
||||
<ChevronLeft size={16} />
|
||||
</button>
|
||||
<button
|
||||
disabled={page >= totalPages}
|
||||
onClick={() => setPage(page + 1)}
|
||||
style={{ padding: '4px', border: '1px solid var(--border-color)', opacity: page >= totalPages ? 0.3 : 1 }}
|
||||
>
|
||||
<ChevronRight size={16} />
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div className="logs-table-container">
|
||||
<table className="logs-table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>TIMESTAMP</th>
|
||||
<th>DECKY</th>
|
||||
<th>SERVICE</th>
|
||||
<th>ATTACKER</th>
|
||||
<th>TYPE</th>
|
||||
<th>DATA</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{bounties.length > 0 ? bounties.map((b) => (
|
||||
<tr key={b.id}>
|
||||
<td className="dim" style={{ fontSize: '0.75rem', whiteSpace: 'nowrap' }}>{new Date(b.timestamp).toLocaleString()}</td>
|
||||
<td className="violet-accent">{b.decky}</td>
|
||||
<td>{b.service}</td>
|
||||
<td className="matrix-text">{b.attacker_ip}</td>
|
||||
<td>
|
||||
<span style={{
|
||||
fontSize: '0.7rem',
|
||||
padding: '2px 8px',
|
||||
borderRadius: '4px',
|
||||
border: `1px solid ${b.bounty_type === 'credential' ? 'var(--text-color)' : 'var(--accent-color)'}`,
|
||||
backgroundColor: b.bounty_type === 'credential' ? 'rgba(0, 255, 65, 0.1)' : 'rgba(238, 130, 238, 0.1)',
|
||||
color: b.bounty_type === 'credential' ? 'var(--text-color)' : 'var(--accent-color)'
|
||||
}}>
|
||||
{b.bounty_type.toUpperCase()}
|
||||
</span>
|
||||
</td>
|
||||
<td>
|
||||
<div style={{ fontSize: '0.9rem' }}>
|
||||
{b.bounty_type === 'credential' ? (
|
||||
<div style={{ display: 'flex', gap: '12px' }}>
|
||||
<span><span className="dim" style={{ marginRight: '4px' }}>user:</span>{b.payload.username}</span>
|
||||
<span><span className="dim" style={{ marginRight: '4px' }}>pass:</span>{b.payload.password}</span>
|
||||
</div>
|
||||
) : (
|
||||
<span className="dim" style={{ fontSize: '0.8rem' }}>{JSON.stringify(b.payload)}</span>
|
||||
)}
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
)) : (
|
||||
<tr>
|
||||
<td colSpan={6} style={{ textAlign: 'center', padding: '60px', opacity: 0.5, letterSpacing: '4px' }}>
|
||||
{loading ? 'RETRIEVING ARTIFACTS...' : 'THE VAULT IS EMPTY'}
|
||||
</td>
|
||||
</tr>
|
||||
)}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default Bounty;
|
||||
20
decnet_web/src/components/Config.tsx
Normal file
20
decnet_web/src/components/Config.tsx
Normal file
@@ -0,0 +1,20 @@
|
||||
import React from 'react';
|
||||
import { Settings } from 'lucide-react';
|
||||
import './Dashboard.css';
|
||||
|
||||
const Config: React.FC = () => {
|
||||
return (
|
||||
<div className="logs-section">
|
||||
<div className="section-header">
|
||||
<Settings size={20} />
|
||||
<h2>SYSTEM CONFIGURATION</h2>
|
||||
</div>
|
||||
<div style={{ padding: '40px', textAlign: 'center', opacity: 0.5 }}>
|
||||
<p>CONFIGURATION READ-ONLY MODE ACTIVE.</p>
|
||||
<p style={{ marginTop: '10px', fontSize: '0.8rem' }}>(Config view placeholder)</p>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default Config;
|
||||
129
decnet_web/src/components/Dashboard.css
Normal file
129
decnet_web/src/components/Dashboard.css
Normal file
@@ -0,0 +1,129 @@
|
||||
.dashboard {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 32px;
|
||||
}
|
||||
|
||||
.stats-grid {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(3, 1fr);
|
||||
gap: 24px;
|
||||
}
|
||||
|
||||
.stat-card {
|
||||
background-color: var(--secondary-color);
|
||||
border: 1px solid var(--border-color);
|
||||
padding: 24px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 20px;
|
||||
transition: all 0.3s ease;
|
||||
}
|
||||
|
||||
.stat-card:hover {
|
||||
border-color: var(--text-color);
|
||||
box-shadow: var(--matrix-green-glow);
|
||||
transform: translateY(-2px);
|
||||
}
|
||||
|
||||
.stat-icon {
|
||||
color: var(--accent-color);
|
||||
filter: drop-shadow(var(--violet-glow));
|
||||
}
|
||||
|
||||
.stat-content {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}
|
||||
|
||||
.stat-label {
|
||||
font-size: 0.7rem;
|
||||
opacity: 0.6;
|
||||
letter-spacing: 1px;
|
||||
}
|
||||
|
||||
.stat-value {
|
||||
font-size: 1.8rem;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.logs-section {
|
||||
background-color: var(--secondary-color);
|
||||
border: 1px solid var(--border-color);
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}
|
||||
|
||||
.section-header {
|
||||
padding: 16px 24px;
|
||||
border-bottom: 1px solid var(--border-color);
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 12px;
|
||||
}
|
||||
|
||||
.section-header h2 {
|
||||
font-size: 0.9rem;
|
||||
letter-spacing: 2px;
|
||||
}
|
||||
|
||||
.logs-table-container {
|
||||
overflow-x: auto;
|
||||
}
|
||||
|
||||
.logs-table {
|
||||
width: 100%;
|
||||
border-collapse: collapse;
|
||||
font-size: 0.8rem;
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
.logs-table th {
|
||||
padding: 12px 24px;
|
||||
border-bottom: 1px solid var(--border-color);
|
||||
opacity: 0.5;
|
||||
font-weight: normal;
|
||||
}
|
||||
|
||||
.logs-table td {
|
||||
padding: 12px 24px;
|
||||
border-bottom: 1px solid rgba(48, 54, 61, 0.5);
|
||||
}
|
||||
|
||||
.logs-table tr:hover {
|
||||
background-color: rgba(0, 255, 65, 0.03);
|
||||
}
|
||||
|
||||
.raw-line {
|
||||
max-width: 400px;
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
.dim {
|
||||
opacity: 0.5;
|
||||
}
|
||||
|
||||
.loader {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
height: 200px;
|
||||
letter-spacing: 4px;
|
||||
animation: pulse 1s infinite alternate;
|
||||
}
|
||||
|
||||
@keyframes pulse {
|
||||
from { opacity: 0.5; }
|
||||
to { opacity: 1; }
|
||||
}
|
||||
|
||||
.spin {
|
||||
animation: spin 1.5s linear infinite;
|
||||
}
|
||||
|
||||
@keyframes spin {
|
||||
from { transform: rotate(0deg); }
|
||||
to { transform: rotate(360deg); }
|
||||
}
|
||||
175
decnet_web/src/components/Dashboard.tsx
Normal file
175
decnet_web/src/components/Dashboard.tsx
Normal file
@@ -0,0 +1,175 @@
|
||||
import React, { useEffect, useState } from 'react';
|
||||
import './Dashboard.css';
|
||||
import { Shield, Users, Activity, Clock } from 'lucide-react';
|
||||
|
||||
interface Stats {
|
||||
total_logs: number;
|
||||
unique_attackers: number;
|
||||
active_deckies: number;
|
||||
deployed_deckies: number;
|
||||
}
|
||||
|
||||
interface LogEntry {
|
||||
id: number;
|
||||
timestamp: string;
|
||||
decky: string;
|
||||
service: string;
|
||||
event_type: string | null;
|
||||
attacker_ip: string;
|
||||
raw_line: string;
|
||||
fields: string | null;
|
||||
msg: string | null;
|
||||
}
|
||||
|
||||
interface DashboardProps {
|
||||
searchQuery: string;
|
||||
}
|
||||
|
||||
const Dashboard: React.FC<DashboardProps> = ({ searchQuery }) => {
|
||||
const [stats, setStats] = useState<Stats | null>(null);
|
||||
const [logs, setLogs] = useState<LogEntry[]>([]);
|
||||
const [loading, setLoading] = useState(true);
|
||||
|
||||
useEffect(() => {
|
||||
const token = localStorage.getItem('token');
|
||||
const baseUrl = import.meta.env.VITE_API_URL || 'http://localhost:8000/api/v1';
|
||||
let url = `${baseUrl}/stream?token=${token}`;
|
||||
if (searchQuery) {
|
||||
url += `&search=${encodeURIComponent(searchQuery)}`;
|
||||
}
|
||||
|
||||
const eventSource = new EventSource(url);
|
||||
|
||||
eventSource.onmessage = (event) => {
|
||||
try {
|
||||
const payload = JSON.parse(event.data);
|
||||
if (payload.type === 'logs') {
|
||||
setLogs(prev => [...payload.data, ...prev].slice(0, 100));
|
||||
} else if (payload.type === 'stats') {
|
||||
setStats(payload.data);
|
||||
setLoading(false);
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('Failed to parse SSE payload', err);
|
||||
}
|
||||
};
|
||||
|
||||
eventSource.onerror = (err) => {
|
||||
console.error('SSE connection error, attempting to reconnect...', err);
|
||||
};
|
||||
|
||||
return () => {
|
||||
eventSource.close();
|
||||
};
|
||||
}, [searchQuery]);
|
||||
|
||||
if (loading && !stats) return <div className="loader">INITIALIZING SENSORS...</div>;
|
||||
|
||||
return (
|
||||
<div className="dashboard">
|
||||
<div className="stats-grid">
|
||||
<StatCard
|
||||
icon={<Activity size={32} />}
|
||||
label="TOTAL INTERACTIONS"
|
||||
value={stats?.total_logs || 0}
|
||||
/>
|
||||
<StatCard
|
||||
icon={<Users size={32} />}
|
||||
label="UNIQUE ATTACKERS"
|
||||
value={stats?.unique_attackers || 0}
|
||||
/>
|
||||
<StatCard
|
||||
icon={<Shield size={32} />}
|
||||
label="ACTIVE DECKIES"
|
||||
value={`${stats?.active_deckies || 0} / ${stats?.deployed_deckies || 0}`}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className="logs-section">
|
||||
<div className="section-header">
|
||||
<Clock size={20} />
|
||||
<h2>LIVE INTERACTION LOG</h2>
|
||||
</div>
|
||||
<div className="logs-table-container">
|
||||
<table className="logs-table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>TIMESTAMP</th>
|
||||
<th>DECKY</th>
|
||||
<th>SERVICE</th>
|
||||
<th>ATTACKER IP</th>
|
||||
<th>EVENT</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{logs.length > 0 ? logs.map(log => {
|
||||
let parsedFields: Record<string, string> = {};
|
||||
if (log.fields) {
|
||||
try {
|
||||
parsedFields = JSON.parse(log.fields);
|
||||
} catch (e) {
|
||||
// Ignore parsing errors
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<tr key={log.id}>
|
||||
<td className="dim">{new Date(log.timestamp).toLocaleString()}</td>
|
||||
<td className="violet-accent">{log.decky}</td>
|
||||
<td className="matrix-text">{log.service}</td>
|
||||
<td>{log.attacker_ip}</td>
|
||||
<td>
|
||||
<div style={{ display: 'flex', flexDirection: 'column', gap: '8px' }}>
|
||||
<div style={{ fontWeight: 'bold', color: 'var(--text-color)' }}>
|
||||
{log.event_type} {log.msg && log.msg !== '-' && <span style={{ fontWeight: 'normal', opacity: 0.8 }}>— {log.msg}</span>}
|
||||
</div>
|
||||
{Object.keys(parsedFields).length > 0 && (
|
||||
<div style={{ display: 'flex', gap: '8px', flexWrap: 'wrap' }}>
|
||||
{Object.entries(parsedFields).map(([k, v]) => (
|
||||
<span key={k} style={{
|
||||
fontSize: '0.7rem',
|
||||
backgroundColor: 'rgba(0, 255, 65, 0.1)',
|
||||
padding: '2px 8px',
|
||||
borderRadius: '4px',
|
||||
border: '1px solid rgba(0, 255, 65, 0.3)',
|
||||
wordBreak: 'break-all'
|
||||
}}>
|
||||
<span style={{ opacity: 0.6 }}>{k}:</span> {v}
|
||||
</span>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
);
|
||||
}) : (
|
||||
<tr>
|
||||
<td colSpan={5} style={{textAlign: 'center', padding: '40px'}}>NO INTERACTION DETECTED</td>
|
||||
</tr>
|
||||
)}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
interface StatCardProps {
|
||||
icon: React.ReactNode;
|
||||
label: string;
|
||||
value: string | number;
|
||||
}
|
||||
|
||||
const StatCard: React.FC<StatCardProps> = ({ icon, label, value }) => (
|
||||
<div className="stat-card">
|
||||
<div className="stat-icon">{icon}</div>
|
||||
<div className="stat-content">
|
||||
<span className="stat-label">{label}</span>
|
||||
<span className="stat-value">{value.toLocaleString()}</span>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
|
||||
export default Dashboard;
|
||||
278
decnet_web/src/components/DeckyFleet.tsx
Normal file
278
decnet_web/src/components/DeckyFleet.tsx
Normal file
@@ -0,0 +1,278 @@
|
||||
import React, { useEffect, useState } from 'react';
|
||||
import api from '../utils/api';
|
||||
import './Dashboard.css'; // Re-use common dashboard styles
|
||||
import { Server, Cpu, Globe, Database, Clock, RefreshCw, Upload } from 'lucide-react';
|
||||
|
||||
interface Decky {
|
||||
name: string;
|
||||
ip: string;
|
||||
services: string[];
|
||||
distro: string;
|
||||
hostname: string;
|
||||
archetype: string | null;
|
||||
service_config: Record<string, Record<string, any>>;
|
||||
mutate_interval: number | null;
|
||||
last_mutated: number;
|
||||
}
|
||||
|
||||
const DeckyFleet: React.FC = () => {
|
||||
const [deckies, setDeckies] = useState<Decky[]>([]);
|
||||
const [loading, setLoading] = useState(true);
|
||||
const [mutating, setMutating] = useState<string | null>(null);
|
||||
const [showDeploy, setShowDeploy] = useState(false);
|
||||
const [iniContent, setIniContent] = useState('');
|
||||
const [deploying, setDeploying] = useState(false);
|
||||
|
||||
const fetchDeckies = async () => {
|
||||
try {
|
||||
const _res = await api.get('/deckies');
|
||||
setDeckies(_res.data);
|
||||
} catch (err) {
|
||||
console.error('Failed to fetch decky fleet', err);
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
const handleMutate = async (name: string) => {
|
||||
setMutating(name);
|
||||
try {
|
||||
await api.post(`/deckies/${name}/mutate`, {}, { timeout: 120000 });
|
||||
await fetchDeckies();
|
||||
} catch (err: any) {
|
||||
console.error('Failed to mutate', err);
|
||||
if (err.code === 'ECONNABORTED') {
|
||||
alert('Mutation is still running in the background but the UI timed out.');
|
||||
} else {
|
||||
alert('Mutation failed');
|
||||
}
|
||||
} finally {
|
||||
setMutating(null);
|
||||
}
|
||||
};
|
||||
|
||||
const handleIntervalChange = async (name: string, current: number | null) => {
|
||||
const _val = prompt(`Enter new mutation interval in minutes for ${name} (leave empty to disable):`, current?.toString() || '');
|
||||
if (_val === null) return;
|
||||
const mutate_interval = _val.trim() === '' ? null : parseInt(_val);
|
||||
try {
|
||||
await api.put(`/deckies/${name}/mutate-interval`, { mutate_interval });
|
||||
fetchDeckies();
|
||||
} catch (err) {
|
||||
console.error('Failed to update interval', err);
|
||||
alert('Update failed');
|
||||
}
|
||||
};
|
||||
|
||||
const handleDeploy = async () => {
|
||||
if (!iniContent.trim()) return;
|
||||
setDeploying(true);
|
||||
try {
|
||||
await api.post('/deckies/deploy', { ini_content: iniContent }, { timeout: 120000 });
|
||||
setIniContent('');
|
||||
setShowDeploy(false);
|
||||
fetchDeckies();
|
||||
} catch (err: any) {
|
||||
console.error('Deploy failed', err);
|
||||
alert(`Deploy failed: ${err.response?.data?.detail || err.message}`);
|
||||
} finally {
|
||||
setDeploying(false);
|
||||
}
|
||||
};
|
||||
|
||||
const handleFileUpload = (e: React.ChangeEvent<HTMLInputElement>) => {
|
||||
const file = e.target.files?.[0];
|
||||
if (!file) return;
|
||||
|
||||
const reader = new FileReader();
|
||||
reader.onload = (event) => {
|
||||
const content = event.target?.result as string;
|
||||
setIniContent(content);
|
||||
};
|
||||
reader.readAsText(file);
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
fetchDeckies();
|
||||
const _interval = setInterval(fetchDeckies, 10000); // Fleet state updates less frequently than logs
|
||||
return () => clearInterval(_interval);
|
||||
}, []);
|
||||
|
||||
if (loading) return <div className="loader">SCANNING NETWORK FOR DECOYS...</div>;
|
||||
|
||||
return (
|
||||
<div className="dashboard">
|
||||
<div className="section-header" style={{ display: 'flex', justifyContent: 'space-between', alignItems: 'center', border: '1px solid var(--border-color)', backgroundColor: 'var(--secondary-color)', marginBottom: '24px' }}>
|
||||
<div style={{ display: 'flex', alignItems: 'center', gap: '12px' }}>
|
||||
<Server size={20} />
|
||||
<h2 style={{ margin: 0 }}>DECOY FLEET ASSET INVENTORY</h2>
|
||||
</div>
|
||||
<button
|
||||
onClick={() => setShowDeploy(!showDeploy)}
|
||||
style={{ display: 'flex', alignItems: 'center', gap: '8px', border: '1px solid var(--accent-color)', color: 'var(--accent-color)' }}
|
||||
>
|
||||
+ DEPLOY DECKIES
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{showDeploy && (
|
||||
<div style={{ marginBottom: '24px', padding: '24px', backgroundColor: 'var(--secondary-color)', border: '1px solid var(--accent-color)', display: 'flex', flexDirection: 'column', gap: '16px' }}>
|
||||
<div style={{ display: 'flex', justifyContent: 'space-between', alignItems: 'center' }}>
|
||||
<h3 style={{ fontSize: '1rem', color: 'var(--text-color)' }}>Deploy via INI Configuration</h3>
|
||||
<div>
|
||||
<input
|
||||
type="file"
|
||||
id="ini-upload"
|
||||
accept=".ini"
|
||||
onChange={handleFileUpload}
|
||||
style={{ display: 'none' }}
|
||||
/>
|
||||
<label
|
||||
htmlFor="ini-upload"
|
||||
style={{
|
||||
cursor: 'pointer',
|
||||
display: 'flex',
|
||||
alignItems: 'center',
|
||||
gap: '8px',
|
||||
fontSize: '0.8rem',
|
||||
color: 'var(--accent-color)',
|
||||
border: '1px solid var(--accent-color)',
|
||||
padding: '4px 12px'
|
||||
}}
|
||||
>
|
||||
<Upload size={14} /> UPLOAD FILE
|
||||
</label>
|
||||
</div>
|
||||
</div>
|
||||
<textarea
|
||||
value={iniContent}
|
||||
onChange={(e) => setIniContent(e.target.value)}
|
||||
placeholder="[decky-01] archetype=linux-server services=ssh,http"
|
||||
style={{ width: '100%', height: '200px', backgroundColor: '#000', color: 'var(--text-color)', border: '1px solid var(--border-color)', padding: '12px', fontFamily: 'monospace' }}
|
||||
/>
|
||||
<div style={{ display: 'flex', justifyContent: 'flex-end', gap: '12px' }}>
|
||||
<button onClick={() => setShowDeploy(false)} style={{ border: '1px solid var(--border-color)', color: 'var(--dim-color)' }}>CANCEL</button>
|
||||
<button onClick={handleDeploy} disabled={deploying} style={{ background: 'var(--accent-color)', color: '#000', border: 'none', display: 'flex', alignItems: 'center', gap: '8px' }}>
|
||||
{deploying && <RefreshCw size={14} className="spin" />}
|
||||
{deploying ? 'DEPLOYING...' : 'DEPLOY'}
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className="deckies-grid" style={{ display: 'grid', gridTemplateColumns: 'repeat(auto-fill, minmax(350px, 1fr))', gap: '24px' }}>
|
||||
{deckies.length > 0 ? deckies.map(decky => (
|
||||
<div key={decky.name} className="stat-card" style={{ flexDirection: 'column', alignItems: 'flex-start', gap: '16px', padding: '24px' }}>
|
||||
<div style={{ width: '100%', display: 'flex', justifyContent: 'space-between', alignItems: 'center', borderBottom: '1px solid var(--border-color)', paddingBottom: '12px' }}>
|
||||
<span className="matrix-text" style={{ fontSize: '1.2rem', fontWeight: 'bold' }}>{decky.name}</span>
|
||||
<span className="dim" style={{ fontSize: '0.8rem', backgroundColor: 'rgba(0, 255, 65, 0.1)', padding: '2px 8px', borderRadius: '4px' }}>{decky.ip}</span>
|
||||
</div>
|
||||
|
||||
<div style={{ display: 'flex', flexDirection: 'column', gap: '8px', width: '100%' }}>
|
||||
<div style={{ display: 'flex', alignItems: 'center', gap: '8px', fontSize: '0.85rem' }}>
|
||||
<Cpu size={14} className="dim" />
|
||||
<span className="dim">HOSTNAME:</span> {decky.hostname}
|
||||
</div>
|
||||
<div style={{ display: 'flex', alignItems: 'center', gap: '8px', fontSize: '0.85rem' }}>
|
||||
<Globe size={14} className="dim" />
|
||||
<span className="dim">DISTRO:</span> {decky.distro}
|
||||
</div>
|
||||
{decky.archetype && (
|
||||
<div style={{ display: 'flex', alignItems: 'center', gap: '8px', fontSize: '0.85rem' }}>
|
||||
<Database size={14} className="dim" />
|
||||
<span className="dim">ARCHETYPE:</span> <span style={{ color: 'var(--highlight-color)' }}>{decky.archetype}</span>
|
||||
</div>
|
||||
)}
|
||||
<div style={{ display: 'flex', alignItems: 'center', gap: '8px', fontSize: '0.85rem', marginTop: '8px' }}>
|
||||
<Clock size={14} className="dim" />
|
||||
<span className="dim">MUTATION:</span>
|
||||
<span
|
||||
style={{ color: 'var(--accent-color)', cursor: 'pointer', textDecoration: 'underline' }}
|
||||
onClick={() => handleIntervalChange(decky.name, decky.mutate_interval)}
|
||||
>
|
||||
{decky.mutate_interval ? `EVERY ${decky.mutate_interval}m` : 'DISABLED'}
|
||||
</span>
|
||||
<button
|
||||
onClick={() => handleMutate(decky.name)}
|
||||
disabled={!!mutating}
|
||||
style={{
|
||||
background: 'transparent', border: '1px solid var(--accent-color)',
|
||||
color: 'var(--accent-color)', padding: '2px 8px', fontSize: '0.7rem',
|
||||
cursor: mutating ? 'not-allowed' : 'pointer', display: 'flex', alignItems: 'center', gap: '4px', marginLeft: 'auto',
|
||||
opacity: mutating ? 0.5 : 1
|
||||
}}
|
||||
>
|
||||
<RefreshCw size={10} className={mutating === decky.name ? "spin" : ""} /> {mutating === decky.name ? 'MUTATING...' : 'FORCE'}
|
||||
</button>
|
||||
</div>
|
||||
{decky.last_mutated > 0 && (
|
||||
<div style={{ fontSize: '0.7rem', color: 'var(--dim-color)', fontStyle: 'italic', marginTop: '4px' }}>
|
||||
Last mutated: {new Date(decky.last_mutated * 1000).toLocaleString()}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<div style={{ width: '100%' }}>
|
||||
<div className="dim" style={{ fontSize: '0.7rem', marginBottom: '8px', letterSpacing: '1px' }}>EXPOSED SERVICES:</div>
|
||||
<div style={{ display: 'flex', flexWrap: 'wrap', gap: '8px' }}>
|
||||
{decky.services.map(svc => {
|
||||
const _config = decky.service_config[svc];
|
||||
return (
|
||||
<div key={svc} className="service-tag-container" style={{ position: 'relative' }}>
|
||||
<span className="service-tag" style={{
|
||||
display: 'inline-block',
|
||||
padding: '4px 10px',
|
||||
fontSize: '0.75rem',
|
||||
backgroundColor: 'var(--bg-color)',
|
||||
border: '1px solid var(--accent-color)',
|
||||
color: 'var(--accent-color)',
|
||||
borderRadius: '2px',
|
||||
cursor: 'help'
|
||||
}}>
|
||||
{svc}
|
||||
</span>
|
||||
{_config && Object.keys(_config).length > 0 && (
|
||||
<div className="service-config-tooltip" style={{
|
||||
display: 'none',
|
||||
position: 'absolute',
|
||||
bottom: '100%',
|
||||
left: '0',
|
||||
backgroundColor: 'rgba(10, 10, 10, 0.95)',
|
||||
border: '1px solid var(--accent-color)',
|
||||
padding: '12px',
|
||||
zIndex: 100,
|
||||
minWidth: '200px',
|
||||
boxShadow: '0 0 15px rgba(0, 255, 65, 0.2)',
|
||||
marginBottom: '8px'
|
||||
}}>
|
||||
{Object.entries(_config).map(([k, v]) => (
|
||||
<div key={k} style={{ fontSize: '0.7rem', marginBottom: '4px' }}>
|
||||
<span style={{ color: 'var(--highlight-color)', fontWeight: 'bold' }}>{k}:</span>
|
||||
<span style={{ marginLeft: '6px', opacity: 0.9 }}>{String(v)}</span>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)) : (
|
||||
<div className="stat-card" style={{ gridColumn: '1 / -1', justifyContent: 'center', padding: '60px' }}>
|
||||
<span className="dim">NO DECOYS CURRENTLY DEPLOYED IN THIS SECTOR</span>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<style dangerouslySetInnerHTML={{ __html: `
|
||||
.service-tag-container:hover .service-config-tooltip {
|
||||
display: block !important;
|
||||
}
|
||||
`}} />
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default DeckyFleet;
|
||||
179
decnet_web/src/components/Layout.css
Normal file
179
decnet_web/src/components/Layout.css
Normal file
@@ -0,0 +1,179 @@
|
||||
.layout-container {
|
||||
display: flex;
|
||||
height: 100vh;
|
||||
width: 100vw;
|
||||
background-color: var(--background-color);
|
||||
}
|
||||
|
||||
/* Sidebar Styling */
|
||||
.sidebar {
|
||||
background-color: var(--secondary-color);
|
||||
border-right: 1px solid var(--border-color);
|
||||
height: 100%;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
transition: width 0.3s cubic-bezier(0.4, 0, 0.2, 1);
|
||||
overflow: hidden;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
.sidebar.open {
|
||||
width: 240px;
|
||||
}
|
||||
|
||||
.sidebar.closed {
|
||||
width: 70px;
|
||||
}
|
||||
|
||||
.sidebar-header {
|
||||
padding: 20px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
border-bottom: 1px solid var(--border-color);
|
||||
}
|
||||
|
||||
.logo-text {
|
||||
font-weight: bold;
|
||||
font-size: 1.2rem;
|
||||
margin-left: 10px;
|
||||
letter-spacing: 2px;
|
||||
}
|
||||
|
||||
.toggle-btn {
|
||||
background: transparent;
|
||||
border: none;
|
||||
color: var(--text-color);
|
||||
padding: 0;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
}
|
||||
|
||||
.toggle-btn:hover {
|
||||
box-shadow: none;
|
||||
color: var(--accent-color);
|
||||
}
|
||||
|
||||
.sidebar-nav {
|
||||
flex-grow: 1;
|
||||
padding: 20px 0;
|
||||
}
|
||||
|
||||
.nav-item {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
padding: 12px 24px;
|
||||
cursor: pointer;
|
||||
transition: all 0.2s ease;
|
||||
color: var(--text-color);
|
||||
opacity: 0.7;
|
||||
}
|
||||
|
||||
.nav-item:hover, .nav-item.active {
|
||||
background-color: rgba(0, 255, 65, 0.1);
|
||||
opacity: 1;
|
||||
color: var(--text-color);
|
||||
border-left: 3px solid var(--text-color);
|
||||
}
|
||||
|
||||
.nav-label {
|
||||
margin-left: 12px;
|
||||
font-size: 0.9rem;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
.sidebar-footer {
|
||||
padding: 20px;
|
||||
border-top: 1px solid var(--border-color);
|
||||
}
|
||||
|
||||
.logout-btn {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 12px;
|
||||
width: 100%;
|
||||
padding: 10px;
|
||||
border: 1px solid transparent;
|
||||
}
|
||||
|
||||
.logout-btn:hover {
|
||||
border: 1px solid var(--accent-color);
|
||||
color: var(--accent-color);
|
||||
background: transparent;
|
||||
}
|
||||
|
||||
/* Main Content Area */
|
||||
.main-content {
|
||||
flex-grow: 1;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
overflow: hidden;
|
||||
min-width: 0;
|
||||
}
|
||||
|
||||
/* Topbar Styling */
|
||||
.topbar {
|
||||
height: 64px;
|
||||
border-bottom: 1px solid var(--border-color);
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
padding: 0 32px;
|
||||
background-color: var(--background-color);
|
||||
}
|
||||
|
||||
.search-container {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
background-color: var(--secondary-color);
|
||||
border: 1px solid var(--border-color);
|
||||
padding: 4px 12px;
|
||||
max-width: 400px;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.search-icon {
|
||||
margin-right: 8px;
|
||||
opacity: 0.5;
|
||||
}
|
||||
|
||||
.search-container input {
|
||||
background: transparent;
|
||||
border: none;
|
||||
width: 100%;
|
||||
padding: 4px;
|
||||
}
|
||||
|
||||
.search-container input:focus {
|
||||
box-shadow: none;
|
||||
}
|
||||
|
||||
.topbar-status {
|
||||
font-size: 0.8rem;
|
||||
}
|
||||
|
||||
.neon-blink {
|
||||
animation: blink 2s infinite;
|
||||
}
|
||||
|
||||
@keyframes blink {
|
||||
0%, 100% { opacity: 1; text-shadow: var(--matrix-green-glow); }
|
||||
50% { opacity: 0.5; }
|
||||
}
|
||||
|
||||
.violet-accent {
|
||||
color: var(--accent-color);
|
||||
filter: drop-shadow(var(--violet-glow));
|
||||
}
|
||||
|
||||
.matrix-text {
|
||||
color: var(--text-color);
|
||||
}
|
||||
|
||||
/* Viewport for dynamic content */
|
||||
.content-viewport {
|
||||
flex-grow: 1;
|
||||
padding: 32px;
|
||||
overflow-y: auto;
|
||||
}
|
||||
109
decnet_web/src/components/Layout.tsx
Normal file
109
decnet_web/src/components/Layout.tsx
Normal file
@@ -0,0 +1,109 @@
|
||||
import React, { useState, useEffect } from 'react';
|
||||
import { NavLink } from 'react-router-dom';
|
||||
import { Menu, X, Search, Activity, LayoutDashboard, Terminal, Settings, LogOut, Server, Archive } from 'lucide-react';
|
||||
import api from '../utils/api';
|
||||
import './Layout.css';
|
||||
|
||||
interface LayoutProps {
|
||||
children: React.ReactNode;
|
||||
onLogout: () => void;
|
||||
onSearch: (q: string) => void;
|
||||
}
|
||||
|
||||
const Layout: React.FC<LayoutProps> = ({ children, onLogout, onSearch }) => {
|
||||
const [sidebarOpen, setSidebarOpen] = useState(true);
|
||||
const [search, setSearch] = useState('');
|
||||
const [systemActive, setSystemActive] = useState(false);
|
||||
|
||||
const handleSearchSubmit = (e: React.FormEvent) => {
|
||||
e.preventDefault();
|
||||
onSearch(search);
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
const fetchStatus = async () => {
|
||||
try {
|
||||
const res = await api.get('/stats');
|
||||
setSystemActive(res.data.deployed_deckies > 0);
|
||||
} catch (err) {
|
||||
console.error('Failed to fetch system status', err);
|
||||
}
|
||||
};
|
||||
fetchStatus();
|
||||
const interval = setInterval(fetchStatus, 10000);
|
||||
return () => clearInterval(interval);
|
||||
}, []);
|
||||
|
||||
return (
|
||||
<div className="layout-container">
|
||||
{/* Sidebar */}
|
||||
<aside className={`sidebar ${sidebarOpen ? 'open' : 'closed'}`}>
|
||||
<div className="sidebar-header">
|
||||
<Activity size={24} className="violet-accent" />
|
||||
{sidebarOpen && <span className="logo-text">DECNET</span>}
|
||||
<button className="toggle-btn" onClick={() => setSidebarOpen(!sidebarOpen)}>
|
||||
{sidebarOpen ? <X size={20} /> : <Menu size={20} />}
|
||||
</button>
|
||||
</div>
|
||||
|
||||
<nav className="sidebar-nav">
|
||||
<NavItem to="/" icon={<LayoutDashboard size={20} />} label="Dashboard" open={sidebarOpen} />
|
||||
<NavItem to="/fleet" icon={<Server size={20} />} label="Decoy Fleet" open={sidebarOpen} />
|
||||
<NavItem to="/live-logs" icon={<Terminal size={20} />} label="Live Logs" open={sidebarOpen} />
|
||||
<NavItem to="/bounty" icon={<Archive size={20} />} label="Bounty" open={sidebarOpen} />
|
||||
<NavItem to="/attackers" icon={<Activity size={20} />} label="Attackers" open={sidebarOpen} />
|
||||
<NavItem to="/config" icon={<Settings size={20} />} label="Config" open={sidebarOpen} />
|
||||
</nav>
|
||||
|
||||
<div className="sidebar-footer">
|
||||
<button className="logout-btn" onClick={onLogout}>
|
||||
<LogOut size={20} />
|
||||
{sidebarOpen && <span>Logout</span>}
|
||||
</button>
|
||||
</div>
|
||||
</aside>
|
||||
|
||||
{/* Main Content Area */}
|
||||
<main className="main-content">
|
||||
{/* Topbar */}
|
||||
<header className="topbar">
|
||||
<form onSubmit={handleSearchSubmit} className="search-container">
|
||||
<Search size={18} className="search-icon" />
|
||||
<input
|
||||
type="text"
|
||||
placeholder="Search logs, deckies, IPs..."
|
||||
value={search}
|
||||
onChange={(e) => setSearch(e.target.value)}
|
||||
/>
|
||||
</form>
|
||||
<div className="topbar-status">
|
||||
<span className="matrix-text" style={{ color: systemActive ? 'var(--text-color)' : 'var(--accent-color)' }}>
|
||||
SYSTEM: {systemActive ? 'ACTIVE' : 'INACTIVE'}
|
||||
</span>
|
||||
</div>
|
||||
</header>
|
||||
|
||||
{/* Dynamic Content */}
|
||||
<div className="content-viewport">
|
||||
{children}
|
||||
</div>
|
||||
</main>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
interface NavItemProps {
|
||||
to: string;
|
||||
icon: React.ReactNode;
|
||||
label: string;
|
||||
open: boolean;
|
||||
}
|
||||
|
||||
const NavItem: React.FC<NavItemProps> = ({ to, icon, label, open }) => (
|
||||
<NavLink to={to} className={({ isActive }) => `nav-item ${isActive ? 'active' : ''}`} end={to === '/'}>
|
||||
{icon}
|
||||
{open && <span className="nav-label">{label}</span>}
|
||||
</NavLink>
|
||||
);
|
||||
|
||||
export default Layout;
|
||||
344
decnet_web/src/components/LiveLogs.tsx
Normal file
344
decnet_web/src/components/LiveLogs.tsx
Normal file
@@ -0,0 +1,344 @@
|
||||
import React, { useEffect, useState, useRef } from 'react';
|
||||
import { useSearchParams } from 'react-router-dom';
|
||||
import {
|
||||
Terminal, Search, Activity,
|
||||
ChevronLeft, ChevronRight, Play, Pause
|
||||
} from 'lucide-react';
|
||||
import {
|
||||
BarChart, Bar, XAxis, YAxis, CartesianGrid, Tooltip, ResponsiveContainer, Cell
|
||||
} from 'recharts';
|
||||
import api from '../utils/api';
|
||||
import './Dashboard.css';
|
||||
|
||||
interface LogEntry {
|
||||
id: number;
|
||||
timestamp: string;
|
||||
decky: string;
|
||||
service: string;
|
||||
event_type: string;
|
||||
attacker_ip: string;
|
||||
raw_line: string;
|
||||
fields: string;
|
||||
msg: string;
|
||||
}
|
||||
|
||||
interface HistogramData {
|
||||
time: string;
|
||||
count: number;
|
||||
}
|
||||
|
||||
const LiveLogs: React.FC = () => {
|
||||
const [searchParams, setSearchParams] = useSearchParams();
|
||||
|
||||
// URL-synced state
|
||||
const query = searchParams.get('q') || '';
|
||||
const timeRange = searchParams.get('time') || '1h';
|
||||
const isLive = searchParams.get('live') !== 'false';
|
||||
const page = parseInt(searchParams.get('page') || '1');
|
||||
|
||||
// Local state
|
||||
const [logs, setLogs] = useState<LogEntry[]>([]);
|
||||
const [histogram, setHistogram] = useState<HistogramData[]>([]);
|
||||
const [totalLogs, setTotalLogs] = useState(0);
|
||||
const [loading, setLoading] = useState(true);
|
||||
const [streaming, setStreaming] = useState(isLive);
|
||||
const [searchInput, setSearchInput] = useState(query);
|
||||
|
||||
const eventSourceRef = useRef<EventSource | null>(null);
|
||||
const limit = 50;
|
||||
|
||||
// Sync search input if URL changes (e.g. back button)
|
||||
useEffect(() => {
|
||||
setSearchInput(query);
|
||||
}, [query]);
|
||||
|
||||
const fetchData = async () => {
|
||||
if (streaming) return; // Don't fetch historical if streaming
|
||||
|
||||
setLoading(true);
|
||||
try {
|
||||
const offset = (page - 1) * limit;
|
||||
let url = `/logs?limit=${limit}&offset=${offset}&search=${encodeURIComponent(query)}`;
|
||||
|
||||
// Calculate time bounds for historical fetch
|
||||
const now = new Date();
|
||||
let startTime: string | null = null;
|
||||
if (timeRange !== 'all') {
|
||||
const minutes = timeRange === '15m' ? 15 : timeRange === '1h' ? 60 : timeRange === '24h' ? 1440 : 0;
|
||||
if (minutes > 0) {
|
||||
startTime = new Date(now.getTime() - minutes * 60000).toISOString().replace('T', ' ').substring(0, 19);
|
||||
url += `&start_time=${startTime}`;
|
||||
}
|
||||
}
|
||||
|
||||
const res = await api.get(url);
|
||||
setLogs(res.data.data);
|
||||
setTotalLogs(res.data.total);
|
||||
|
||||
// Fetch histogram for historical view
|
||||
let histUrl = `/logs/histogram?search=${encodeURIComponent(query)}`;
|
||||
if (startTime) histUrl += `&start_time=${startTime}`;
|
||||
const histRes = await api.get(histUrl);
|
||||
setHistogram(histRes.data);
|
||||
|
||||
} catch (err) {
|
||||
console.error('Failed to fetch historical logs', err);
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
const setupSSE = () => {
|
||||
if (eventSourceRef.current) {
|
||||
eventSourceRef.current.close();
|
||||
}
|
||||
|
||||
const token = localStorage.getItem('token');
|
||||
const baseUrl = import.meta.env.VITE_API_URL || 'http://localhost:8000/api/v1';
|
||||
let url = `${baseUrl}/stream?token=${token}&search=${encodeURIComponent(query)}`;
|
||||
|
||||
if (timeRange !== 'all') {
|
||||
const minutes = timeRange === '15m' ? 15 : timeRange === '1h' ? 60 : timeRange === '24h' ? 1440 : 0;
|
||||
if (minutes > 0) {
|
||||
const startTime = new Date(Date.now() - minutes * 60000).toISOString().replace('T', ' ').substring(0, 19);
|
||||
url += `&start_time=${startTime}`;
|
||||
}
|
||||
}
|
||||
|
||||
const es = new EventSource(url);
|
||||
eventSourceRef.current = es;
|
||||
|
||||
es.onmessage = (event) => {
|
||||
try {
|
||||
const payload = JSON.parse(event.data);
|
||||
if (payload.type === 'logs') {
|
||||
setLogs(prev => [...payload.data, ...prev].slice(0, 500));
|
||||
} else if (payload.type === 'histogram') {
|
||||
setHistogram(payload.data);
|
||||
} else if (payload.type === 'stats') {
|
||||
setTotalLogs(payload.data.total_logs);
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('Failed to parse SSE payload', err);
|
||||
}
|
||||
};
|
||||
|
||||
es.onerror = () => {
|
||||
console.error('SSE connection lost, reconnecting...');
|
||||
};
|
||||
};
|
||||
|
||||
useEffect(() => {
|
||||
if (streaming) {
|
||||
setupSSE();
|
||||
setLoading(false);
|
||||
} else {
|
||||
if (eventSourceRef.current) {
|
||||
eventSourceRef.current.close();
|
||||
eventSourceRef.current = null;
|
||||
}
|
||||
fetchData();
|
||||
}
|
||||
|
||||
return () => {
|
||||
if (eventSourceRef.current) {
|
||||
eventSourceRef.current.close();
|
||||
}
|
||||
};
|
||||
}, [query, timeRange, streaming, page]);
|
||||
|
||||
const handleSearch = (e: React.FormEvent) => {
|
||||
e.preventDefault();
|
||||
setSearchParams({ q: searchInput, time: timeRange, live: streaming.toString(), page: '1' });
|
||||
};
|
||||
|
||||
const handleToggleLive = () => {
|
||||
const newStreaming = !streaming;
|
||||
setStreaming(newStreaming);
|
||||
setSearchParams({ q: query, time: timeRange, live: newStreaming.toString(), page: '1' });
|
||||
};
|
||||
|
||||
const handleTimeChange = (newTime: string) => {
|
||||
setSearchParams({ q: query, time: newTime, live: streaming.toString(), page: '1' });
|
||||
};
|
||||
|
||||
const changePage = (newPage: number) => {
|
||||
setSearchParams({ q: query, time: timeRange, live: 'false', page: newPage.toString() });
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="dashboard">
|
||||
{/* Control Bar */}
|
||||
<div className="logs-section" style={{ border: 'none', background: 'transparent', padding: 0 }}>
|
||||
<form onSubmit={handleSearch} style={{ display: 'flex', gap: '16px', marginBottom: '24px' }}>
|
||||
<div className="search-container" style={{ flexGrow: 1, maxWidth: 'none' }}>
|
||||
<Search className="search-icon" size={18} />
|
||||
<input
|
||||
type="text"
|
||||
placeholder="Query logs (e.g. decky:decky-01 service:http attacker:192.168.1.5 status:failed)"
|
||||
value={searchInput}
|
||||
onChange={(e) => setSearchInput(e.target.value)}
|
||||
/>
|
||||
</div>
|
||||
<select
|
||||
value={timeRange}
|
||||
onChange={(e) => handleTimeChange(e.target.value)}
|
||||
className="search-container"
|
||||
style={{ width: 'auto', color: 'var(--text-color)', cursor: 'pointer' }}
|
||||
>
|
||||
<option value="15m">LAST 15 MIN</option>
|
||||
<option value="1h">LAST 1 HOUR</option>
|
||||
<option value="24h">LAST 24 HOURS</option>
|
||||
<option value="all">ALL TIME</option>
|
||||
</select>
|
||||
<button
|
||||
type="button"
|
||||
onClick={handleToggleLive}
|
||||
style={{
|
||||
display: 'flex', alignItems: 'center', gap: '8px',
|
||||
border: `1px solid ${streaming ? 'var(--text-color)' : 'var(--accent-color)'}`,
|
||||
color: streaming ? 'var(--text-color)' : 'var(--accent-color)',
|
||||
minWidth: '120px', justifyContent: 'center'
|
||||
}}
|
||||
>
|
||||
{streaming ? <><Play size={14} className="neon-blink" /> LIVE</> : <><Pause size={14} /> PAUSED</>}
|
||||
</button>
|
||||
</form>
|
||||
</div>
|
||||
|
||||
{/* Histogram Chart */}
|
||||
<div className="logs-section" style={{ height: '200px', padding: '20px', marginBottom: '24px', minWidth: 0 }}>
|
||||
<div style={{ display: 'flex', justifyContent: 'space-between', marginBottom: '10px' }}>
|
||||
<div style={{ display: 'flex', alignItems: 'center', gap: '8px', fontSize: '0.7rem', color: 'var(--dim-color)' }}>
|
||||
<Activity size={12} /> ATTACK VOLUME OVER TIME
|
||||
</div>
|
||||
<div style={{ fontSize: '0.7rem', color: 'var(--text-color)' }}>
|
||||
MATCHES: {totalLogs.toLocaleString()}
|
||||
</div>
|
||||
</div>
|
||||
<ResponsiveContainer width="100%" height="100%">
|
||||
<BarChart data={histogram}>
|
||||
<CartesianGrid strokeDasharray="3 3" stroke="#30363d" vertical={false} />
|
||||
<XAxis
|
||||
dataKey="time"
|
||||
hide
|
||||
/>
|
||||
<YAxis
|
||||
stroke="#30363d"
|
||||
fontSize={10}
|
||||
tickFormatter={(val) => Math.floor(val).toString()}
|
||||
/>
|
||||
<Tooltip
|
||||
contentStyle={{ backgroundColor: '#0d1117', border: '1px solid #30363d', fontSize: '0.8rem' }}
|
||||
itemStyle={{ color: 'var(--text-color)' }}
|
||||
labelStyle={{ color: 'var(--dim-color)', marginBottom: '4px' }}
|
||||
cursor={{ fill: 'rgba(0, 255, 65, 0.05)' }}
|
||||
/>
|
||||
<Bar dataKey="count" fill="var(--text-color)" radius={[2, 2, 0, 0]}>
|
||||
{histogram.map((entry, index) => (
|
||||
<Cell key={`cell-${index}`} fillOpacity={0.6 + (entry.count / (Math.max(...histogram.map(h => h.count)) || 1)) * 0.4} />
|
||||
))}
|
||||
</Bar>
|
||||
</BarChart>
|
||||
</ResponsiveContainer>
|
||||
</div>
|
||||
|
||||
{/* Logs Table */}
|
||||
<div className="logs-section">
|
||||
<div className="section-header" style={{ display: 'flex', justifyContent: 'space-between' }}>
|
||||
<div style={{ display: 'flex', alignItems: 'center', gap: '8px' }}>
|
||||
<Terminal size={20} />
|
||||
<h2>LOG EXPLORER</h2>
|
||||
</div>
|
||||
{!streaming && (
|
||||
<div style={{ display: 'flex', alignItems: 'center', gap: '16px' }}>
|
||||
<span className="dim" style={{ fontSize: '0.8rem' }}>
|
||||
Page {page} of {Math.ceil(totalLogs / limit)}
|
||||
</span>
|
||||
<div style={{ display: 'flex', gap: '8px' }}>
|
||||
<button
|
||||
disabled={page === 1}
|
||||
onClick={() => changePage(page - 1)}
|
||||
style={{ padding: '4px', border: '1px solid var(--border-color)', opacity: page === 1 ? 0.3 : 1 }}
|
||||
>
|
||||
<ChevronLeft size={16} />
|
||||
</button>
|
||||
<button
|
||||
disabled={page >= Math.ceil(totalLogs / limit)}
|
||||
onClick={() => changePage(page + 1)}
|
||||
style={{ padding: '4px', border: '1px solid var(--border-color)', opacity: page >= Math.ceil(totalLogs / limit) ? 0.3 : 1 }}
|
||||
>
|
||||
<ChevronRight size={16} />
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
<div className="logs-table-container">
|
||||
<table className="logs-table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>TIMESTAMP</th>
|
||||
<th>DECKY</th>
|
||||
<th>SERVICE</th>
|
||||
<th>ATTACKER</th>
|
||||
<th>EVENT</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
{logs.length > 0 ? logs.map(log => {
|
||||
let parsedFields: Record<string, any> = {};
|
||||
if (log.fields) {
|
||||
try {
|
||||
parsedFields = JSON.parse(log.fields);
|
||||
} catch (e) {}
|
||||
}
|
||||
|
||||
return (
|
||||
<tr key={log.id}>
|
||||
<td className="dim" style={{ fontSize: '0.75rem', whiteSpace: 'nowrap' }}>{new Date(log.timestamp).toLocaleString()}</td>
|
||||
<td className="violet-accent">{log.decky}</td>
|
||||
<td className="matrix-text">{log.service}</td>
|
||||
<td>{log.attacker_ip}</td>
|
||||
<td>
|
||||
<div style={{ display: 'flex', flexDirection: 'column', gap: '8px' }}>
|
||||
<div style={{ fontWeight: 'bold', color: 'var(--text-color)', fontSize: '0.9rem' }}>
|
||||
{log.event_type} {log.msg && log.msg !== '-' && <span style={{ fontWeight: 'normal', opacity: 0.8 }}>— {log.msg}</span>}
|
||||
</div>
|
||||
{Object.keys(parsedFields).length > 0 && (
|
||||
<div style={{ display: 'flex', gap: '8px', flexWrap: 'wrap' }}>
|
||||
{Object.entries(parsedFields).map(([k, v]) => (
|
||||
<span key={k} style={{
|
||||
fontSize: '0.7rem',
|
||||
backgroundColor: 'rgba(0, 255, 65, 0.1)',
|
||||
padding: '2px 8px',
|
||||
borderRadius: '4px',
|
||||
border: '1px solid rgba(0, 255, 65, 0.3)',
|
||||
wordBreak: 'break-all'
|
||||
}}>
|
||||
<span style={{ opacity: 0.6 }}>{k}:</span> {typeof v === 'object' ? JSON.stringify(v) : v}
|
||||
</span>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</td>
|
||||
</tr>
|
||||
);
|
||||
}) : (
|
||||
<tr>
|
||||
<td colSpan={5} style={{ textAlign: 'center', padding: '40px', opacity: 0.5 }}>
|
||||
{loading ? 'RETRIEVING DATA...' : 'NO LOGS MATCHING CRITERIA'}
|
||||
</td>
|
||||
</tr>
|
||||
)}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default LiveLogs;
|
||||
90
decnet_web/src/components/Login.css
Normal file
90
decnet_web/src/components/Login.css
Normal file
@@ -0,0 +1,90 @@
|
||||
.login-container {
|
||||
height: 100vh;
|
||||
width: 100vw;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
background-color: var(--background-color);
|
||||
background-image:
|
||||
linear-gradient(rgba(0, 255, 65, 0.05) 1px, transparent 1px),
|
||||
linear-gradient(90deg, rgba(0, 255, 65, 0.05) 1px, transparent 1px);
|
||||
background-size: 20px 20px;
|
||||
}
|
||||
|
||||
.login-box {
|
||||
width: 100%;
|
||||
max-width: 400px;
|
||||
background-color: var(--secondary-color);
|
||||
border: 1px solid var(--border-color);
|
||||
padding: 40px;
|
||||
box-shadow: 0 0 20px rgba(0, 0, 0, 0.5);
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 32px;
|
||||
}
|
||||
|
||||
.login-header {
|
||||
text-align: center;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
gap: 12px;
|
||||
}
|
||||
|
||||
.login-header h1 {
|
||||
font-size: 2.5rem;
|
||||
letter-spacing: 10px;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.login-header p {
|
||||
font-size: 0.7rem;
|
||||
letter-spacing: 2px;
|
||||
opacity: 0.6;
|
||||
}
|
||||
|
||||
.login-form {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 24px;
|
||||
}
|
||||
|
||||
.form-group {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 8px;
|
||||
}
|
||||
|
||||
.form-group label {
|
||||
font-size: 0.7rem;
|
||||
opacity: 0.8;
|
||||
letter-spacing: 1px;
|
||||
}
|
||||
|
||||
.login-form input {
|
||||
width: 100%;
|
||||
background-color: rgba(0, 0, 0, 0.5);
|
||||
}
|
||||
|
||||
.error-msg {
|
||||
color: #ff4141;
|
||||
font-size: 0.8rem;
|
||||
text-align: center;
|
||||
padding: 8px;
|
||||
border: 1px solid #ff4141;
|
||||
background-color: rgba(255, 65, 65, 0.1);
|
||||
}
|
||||
|
||||
.login-form button {
|
||||
padding: 12px;
|
||||
margin-top: 8px;
|
||||
font-weight: bold;
|
||||
letter-spacing: 2px;
|
||||
}
|
||||
|
||||
.login-footer {
|
||||
text-align: center;
|
||||
font-size: 0.6rem;
|
||||
opacity: 0.4;
|
||||
letter-spacing: 1px;
|
||||
}
|
||||
154
decnet_web/src/components/Login.tsx
Normal file
154
decnet_web/src/components/Login.tsx
Normal file
@@ -0,0 +1,154 @@
|
||||
import React, { useState } from 'react';
|
||||
import api from '../utils/api';
|
||||
import './Login.css';
|
||||
import { Activity } from 'lucide-react';
|
||||
|
||||
interface LoginProps {
|
||||
onLogin: (token: string) => void;
|
||||
}
|
||||
|
||||
const Login: React.FC<LoginProps> = ({ onLogin }) => {
|
||||
const [username, setUsername] = useState('');
|
||||
const [password, setPassword] = useState('');
|
||||
const [error, setError] = useState('');
|
||||
const [loading, setLoading] = useState(false);
|
||||
const [needsPasswordChange, setNeedsPasswordChange] = useState(false);
|
||||
const [newPassword, setNewPassword] = useState('');
|
||||
const [confirmPassword, setConfirmPassword] = useState('');
|
||||
const [tempToken, setTempToken] = useState('');
|
||||
|
||||
const handleLoginSubmit = async (e: React.FormEvent) => {
|
||||
e.preventDefault();
|
||||
setLoading(true);
|
||||
setError('');
|
||||
|
||||
try {
|
||||
const response = await api.post('/auth/login', { username, password });
|
||||
const { access_token, must_change_password } = response.data;
|
||||
|
||||
if (must_change_password) {
|
||||
setTempToken(access_token);
|
||||
setNeedsPasswordChange(true);
|
||||
} else {
|
||||
localStorage.setItem('token', access_token);
|
||||
onLogin(access_token);
|
||||
}
|
||||
} catch (err: any) {
|
||||
setError(err.response?.data?.detail || 'Authentication failed');
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
const handleChangePasswordSubmit = async (e: React.FormEvent) => {
|
||||
e.preventDefault();
|
||||
if (newPassword !== confirmPassword) {
|
||||
setError('Passwords do not match');
|
||||
return;
|
||||
}
|
||||
|
||||
setLoading(true);
|
||||
setError('');
|
||||
|
||||
try {
|
||||
await api.post('/auth/change-password',
|
||||
{ old_password: password, new_password: newPassword },
|
||||
{ headers: { Authorization: `Bearer ${tempToken}` } }
|
||||
);
|
||||
|
||||
// Re-authenticate to get a fresh token with must_change_password=false
|
||||
const response = await api.post('/auth/login', { username, password: newPassword });
|
||||
const { access_token } = response.data;
|
||||
|
||||
localStorage.setItem('token', access_token);
|
||||
onLogin(access_token);
|
||||
} catch (err: any) {
|
||||
setError(err.response?.data?.detail || 'Password change failed');
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="login-container">
|
||||
<div className="login-box">
|
||||
<div className="login-header">
|
||||
<Activity size={48} className="violet-accent neon-blink" />
|
||||
<h1>DECNET</h1>
|
||||
<p>AUTHORIZED PERSONNEL ONLY</p>
|
||||
</div>
|
||||
|
||||
{!needsPasswordChange ? (
|
||||
<form onSubmit={handleLoginSubmit} className="login-form">
|
||||
<div className="form-group">
|
||||
<label>IDENTIFIER</label>
|
||||
<input
|
||||
type="text"
|
||||
value={username}
|
||||
onChange={(e) => setUsername(e.target.value)}
|
||||
required
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className="form-group">
|
||||
<label>ACCESS KEY</label>
|
||||
<input
|
||||
type="password"
|
||||
value={password}
|
||||
onChange={(e) => setPassword(e.target.value)}
|
||||
required
|
||||
/>
|
||||
</div>
|
||||
|
||||
{error && <div className="error-msg">{error}</div>}
|
||||
|
||||
<button type="submit" disabled={loading}>
|
||||
{loading ? 'VERIFYING...' : 'ESTABLISH CONNECTION'}
|
||||
</button>
|
||||
</form>
|
||||
) : (
|
||||
<form onSubmit={handleChangePasswordSubmit} className="login-form">
|
||||
<div className="form-group" style={{ textAlign: 'center', marginBottom: '10px' }}>
|
||||
<p className="violet-accent">MANDATORY SECURITY UPDATE</p>
|
||||
<p style={{ fontSize: '0.8rem', opacity: 0.7 }}>Please establish a new access key</p>
|
||||
</div>
|
||||
|
||||
<div className="form-group">
|
||||
<label>NEW ACCESS KEY</label>
|
||||
<input
|
||||
type="password"
|
||||
value={newPassword}
|
||||
onChange={(e) => setNewPassword(e.target.value)}
|
||||
required
|
||||
minLength={8}
|
||||
/>
|
||||
</div>
|
||||
|
||||
<div className="form-group">
|
||||
<label>CONFIRM KEY</label>
|
||||
<input
|
||||
type="password"
|
||||
value={confirmPassword}
|
||||
onChange={(e) => setConfirmPassword(e.target.value)}
|
||||
required
|
||||
minLength={8}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{error && <div className="error-msg">{error}</div>}
|
||||
|
||||
<button type="submit" disabled={loading}>
|
||||
{loading ? 'UPDATING...' : 'UPDATE SECURE KEY'}
|
||||
</button>
|
||||
</form>
|
||||
)}
|
||||
|
||||
<div className="login-footer">
|
||||
<span>SECURE PROTOCOL v1.0</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default Login;
|
||||
75
decnet_web/src/index.css
Normal file
75
decnet_web/src/index.css
Normal file
@@ -0,0 +1,75 @@
|
||||
@import url('https://fonts.googleapis.com/css2?family=Ubuntu+Mono:ital,wght@0,400;0,700;1,400;1,700&display=swap');
|
||||
|
||||
:root {
|
||||
--background-color: #000000;
|
||||
--text-color: #00ff41;
|
||||
--accent-color: #ee82ee;
|
||||
--secondary-color: #0d1117;
|
||||
--border-color: #30363d;
|
||||
--matrix-green-glow: 0 0 10px rgba(0, 255, 65, 0.5);
|
||||
--violet-glow: 0 0 10px rgba(238, 130, 238, 0.5);
|
||||
}
|
||||
|
||||
* {
|
||||
box-sizing: border-box;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
body {
|
||||
font-family: 'Ubuntu Mono', monospace;
|
||||
background-color: var(--background-color);
|
||||
color: var(--text-color);
|
||||
line-height: 1.5;
|
||||
overflow-x: hidden;
|
||||
}
|
||||
|
||||
input, button, textarea, select {
|
||||
font-family: inherit;
|
||||
}
|
||||
|
||||
button {
|
||||
cursor: pointer;
|
||||
background: transparent;
|
||||
border: 1px solid var(--text-color);
|
||||
color: var(--text-color);
|
||||
padding: 8px 16px;
|
||||
transition: all 0.3s ease;
|
||||
}
|
||||
|
||||
button:hover {
|
||||
background: var(--text-color);
|
||||
color: var(--background-color);
|
||||
box-shadow: var(--matrix-green-glow);
|
||||
}
|
||||
|
||||
input {
|
||||
background: #0d1117;
|
||||
border: 1px solid var(--border-color);
|
||||
color: var(--text-color);
|
||||
padding: 8px 12px;
|
||||
}
|
||||
|
||||
input:focus {
|
||||
outline: none;
|
||||
border-color: var(--text-color);
|
||||
box-shadow: var(--matrix-green-glow);
|
||||
}
|
||||
|
||||
/* Custom scrollbar */
|
||||
::-webkit-scrollbar {
|
||||
width: 8px;
|
||||
}
|
||||
|
||||
::-webkit-scrollbar-track {
|
||||
background: var(--background-color);
|
||||
}
|
||||
|
||||
::-webkit-scrollbar-thumb {
|
||||
background: var(--secondary-color);
|
||||
border: 1px solid var(--border-color);
|
||||
}
|
||||
|
||||
::-webkit-scrollbar-thumb:hover {
|
||||
background: var(--border-color);
|
||||
}
|
||||
10
decnet_web/src/main.tsx
Normal file
10
decnet_web/src/main.tsx
Normal file
@@ -0,0 +1,10 @@
|
||||
import { StrictMode } from 'react'
|
||||
import { createRoot } from 'react-dom/client'
|
||||
import './index.css'
|
||||
import App from './App.tsx'
|
||||
|
||||
createRoot(document.getElementById('root')!).render(
|
||||
<StrictMode>
|
||||
<App />
|
||||
</StrictMode>,
|
||||
)
|
||||
15
decnet_web/src/utils/api.ts
Normal file
15
decnet_web/src/utils/api.ts
Normal file
@@ -0,0 +1,15 @@
|
||||
import axios from 'axios';
|
||||
|
||||
const api = axios.create({
|
||||
baseURL: import.meta.env.VITE_API_URL || 'http://localhost:8000/api/v1',
|
||||
});
|
||||
|
||||
api.interceptors.request.use((config) => {
|
||||
const token = localStorage.getItem('token');
|
||||
if (token) {
|
||||
config.headers.Authorization = `Bearer ${token}`;
|
||||
}
|
||||
return config;
|
||||
});
|
||||
|
||||
export default api;
|
||||
25
decnet_web/tsconfig.app.json
Normal file
25
decnet_web/tsconfig.app.json
Normal file
@@ -0,0 +1,25 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"tsBuildInfoFile": "./node_modules/.tmp/tsconfig.app.tsbuildinfo",
|
||||
"target": "es2023",
|
||||
"lib": ["ES2023", "DOM", "DOM.Iterable"],
|
||||
"module": "esnext",
|
||||
"types": ["vite/client"],
|
||||
"skipLibCheck": true,
|
||||
|
||||
/* Bundler mode */
|
||||
"moduleResolution": "bundler",
|
||||
"allowImportingTsExtensions": true,
|
||||
"verbatimModuleSyntax": true,
|
||||
"moduleDetection": "force",
|
||||
"noEmit": true,
|
||||
"jsx": "react-jsx",
|
||||
|
||||
/* Linting */
|
||||
"noUnusedLocals": true,
|
||||
"noUnusedParameters": true,
|
||||
"erasableSyntaxOnly": true,
|
||||
"noFallthroughCasesInSwitch": true
|
||||
},
|
||||
"include": ["src"]
|
||||
}
|
||||
7
decnet_web/tsconfig.json
Normal file
7
decnet_web/tsconfig.json
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"files": [],
|
||||
"references": [
|
||||
{ "path": "./tsconfig.app.json" },
|
||||
{ "path": "./tsconfig.node.json" }
|
||||
]
|
||||
}
|
||||
24
decnet_web/tsconfig.node.json
Normal file
24
decnet_web/tsconfig.node.json
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"tsBuildInfoFile": "./node_modules/.tmp/tsconfig.node.tsbuildinfo",
|
||||
"target": "es2023",
|
||||
"lib": ["ES2023"],
|
||||
"module": "esnext",
|
||||
"types": ["node"],
|
||||
"skipLibCheck": true,
|
||||
|
||||
/* Bundler mode */
|
||||
"moduleResolution": "bundler",
|
||||
"allowImportingTsExtensions": true,
|
||||
"verbatimModuleSyntax": true,
|
||||
"moduleDetection": "force",
|
||||
"noEmit": true,
|
||||
|
||||
/* Linting */
|
||||
"noUnusedLocals": true,
|
||||
"noUnusedParameters": true,
|
||||
"erasableSyntaxOnly": true,
|
||||
"noFallthroughCasesInSwitch": true
|
||||
},
|
||||
"include": ["vite.config.ts"]
|
||||
}
|
||||
7
decnet_web/vite.config.ts
Normal file
7
decnet_web/vite.config.ts
Normal file
@@ -0,0 +1,7 @@
|
||||
import { defineConfig } from 'vite'
|
||||
import react from '@vitejs/plugin-react'
|
||||
|
||||
// https://vite.dev/config/
|
||||
export default defineConfig({
|
||||
plugins: [react()],
|
||||
})
|
||||
29
deploy/decnet-api.service
Normal file
29
deploy/decnet-api.service
Normal file
@@ -0,0 +1,29 @@
|
||||
[Unit]
|
||||
Description=DECNET API Service
|
||||
After=network.target docker.service
|
||||
Requires=docker.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=decnet
|
||||
Group=decnet
|
||||
WorkingDirectory=/path/to/DECNET
|
||||
# Ensure environment is loaded from the .env file
|
||||
EnvironmentFile=/path/to/DECNET/.env
|
||||
# Use the virtualenv python to run the decnet api command
|
||||
ExecStart=/path/to/DECNET/.venv/bin/decnet api
|
||||
|
||||
# Capabilities required to manage MACVLAN interfaces and network links without root
|
||||
CapabilityBoundingSet=CAP_NET_ADMIN CAP_NET_RAW
|
||||
AmbientCapabilities=CAP_NET_ADMIN CAP_NET_RAW
|
||||
|
||||
# Security Hardening
|
||||
NoNewPrivileges=yes
|
||||
ProtectSystem=full
|
||||
ProtectHome=read-only
|
||||
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
30
deploy/decnet-web.service
Normal file
30
deploy/decnet-web.service
Normal file
@@ -0,0 +1,30 @@
|
||||
[Unit]
|
||||
Description=DECNET Web Dashboard Service
|
||||
After=network.target decnet-api.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=decnet
|
||||
Group=decnet
|
||||
WorkingDirectory=/path/to/DECNET
|
||||
# Ensure environment is loaded from the .env file
|
||||
EnvironmentFile=/path/to/DECNET/.env
|
||||
# Use the virtualenv python to run the decnet web command
|
||||
ExecStart=/path/to/DECNET/.venv/bin/decnet web
|
||||
|
||||
# The Web Dashboard service does not require network administration privileges.
|
||||
# Enable the following lines if you wish to bind the Dashboard to a privileged port (e.g., 80 or 443)
|
||||
# while still running as a non-root user.
|
||||
# CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
||||
# AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
|
||||
# Security Hardening
|
||||
NoNewPrivileges=yes
|
||||
ProtectSystem=full
|
||||
ProtectHome=read-only
|
||||
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
37
development/BUGS.md
Normal file
37
development/BUGS.md
Normal file
@@ -0,0 +1,37 @@
|
||||
# BUGS
|
||||
|
||||
Active bugs detected during development. Do not fix until noted otherwise.
|
||||
|
||||
---
|
||||
|
||||
## BUG-001 — Split-brain model imports across router files (Gemini SQLModel migration)
|
||||
|
||||
**Detected:** 2026-04-09
|
||||
**Status:** Open — do not fix, migration in progress
|
||||
|
||||
**Symptom:** `from decnet.web.api import app` fails with `ModuleNotFoundError: No module named 'decnet.web.models'`
|
||||
|
||||
**Root cause:** Gemini's SQLModel migration is partially complete. Models were moved to `decnet/web/db/models.py`, but three router files were not updated and still import from the old `decnet.web.models` path:
|
||||
|
||||
| File | Stale import |
|
||||
|------|--------------|
|
||||
| `decnet/web/router/auth/api_login.py:12` | `from decnet.web.models import LoginRequest, Token` |
|
||||
| `decnet/web/router/auth/api_change_pass.py:7` | `from decnet.web.models import ChangePasswordRequest` |
|
||||
| `decnet/web/router/stats/api_get_stats.py:6` | `from decnet.web.models import StatsResponse` |
|
||||
|
||||
**Fix:** Update those three files to import from `decnet.web.db.models` (consistent with the other router files already migrated).
|
||||
|
||||
**Impact:** All `tests/api/` tests fail to collect. Web server cannot start.
|
||||
|
||||
---
|
||||
|
||||
## BUG-002 — `decnet/web/db/sqlite/repository.py` depends on `sqlalchemy` directly
|
||||
|
||||
**Detected:** 2026-04-09
|
||||
**Status:** Resolved (dependency installed via `pip install -e ".[dev]"`)
|
||||
|
||||
**Symptom:** `ModuleNotFoundError: No module named 'sqlalchemy'` before `sqlmodel` was installed.
|
||||
|
||||
**Root cause:** `sqlmodel>=0.0.16` was added to `pyproject.toml` but `pip install -e .` had not been re-run in the dev environment.
|
||||
|
||||
**Fix:** Run `pip install -e ".[dev]"`. Already applied.
|
||||
333
development/BUG_FIXES.md
Normal file
333
development/BUG_FIXES.md
Normal file
@@ -0,0 +1,333 @@
|
||||
# Bug Fixes — Non-Feature Realism Issues
|
||||
|
||||
> These are fingerprint leaks and broken protocol handlers that don't need new
|
||||
> interaction design — just targeted fixes. All severity High or above from REALISM_AUDIT.md.
|
||||
|
||||
---
|
||||
|
||||
## 1. HTTP — Werkzeug header leak (High)
|
||||
|
||||
### Problem
|
||||
|
||||
Every response has two `Server:` headers:
|
||||
```
|
||||
Server: Werkzeug/3.1.3 Python/3.11.2
|
||||
Server: Apache/2.4.54 (Debian)
|
||||
```
|
||||
|
||||
nmap correctly IDs Apache from the second header, but any attacker that does
|
||||
`curl -I` or runs Burp sees the Werkzeug leak immediately. Port 6443 (K8s) also
|
||||
leaks Werkzeug in every response.
|
||||
|
||||
### Fix — WSGI middleware to strip/replace the header
|
||||
|
||||
In `templates/http/server.py` (Flask app), add a `@app.after_request` hook:
|
||||
|
||||
```python
|
||||
@app.after_request
|
||||
def _fix_server_header(response):
|
||||
response.headers["Server"] = os.environ.get("HTTP_SERVER_HEADER", "Apache/2.4.54 (Debian)")
|
||||
return response
|
||||
```
|
||||
|
||||
Flask sets `Server: Werkzeug/...` by default. The `after_request` hook runs after
|
||||
Werkzeug's own header injection, so it overwrites it.
|
||||
|
||||
Same fix applies to the K8s server if it's also Flask-based.
|
||||
|
||||
### Fix — Apache 403 page body
|
||||
|
||||
Current response body: `<h1>403 Forbidden</h1>`
|
||||
|
||||
Replace with the actual Apache 2.4 default 403 page:
|
||||
|
||||
```html
|
||||
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
|
||||
<html><head>
|
||||
<title>403 Forbidden</title>
|
||||
</head><body>
|
||||
<h1>Forbidden</h1>
|
||||
<p>You don't have permission to access this resource.</p>
|
||||
<hr>
|
||||
<address>Apache/2.4.54 (Debian) Server at {hostname} Port 80</address>
|
||||
</body></html>
|
||||
```
|
||||
|
||||
Env var `HTTP_SERVER_HEADER` and `NODE_NAME` fill the address line.
|
||||
|
||||
### Env vars
|
||||
|
||||
| Var | Default |
|
||||
|-----|---------|
|
||||
| `HTTP_SERVER_HEADER` | `Apache/2.4.54 (Debian)` |
|
||||
|
||||
---
|
||||
|
||||
## 2. FTP — Twisted banner (High)
|
||||
|
||||
### Problem
|
||||
|
||||
```
|
||||
220 Twisted 25.5.0 FTP Server
|
||||
```
|
||||
|
||||
This is Twisted's built-in FTP server banner. Immediately identifies the framework.
|
||||
|
||||
### Fix
|
||||
|
||||
Override the banner. The Twisted FTP server class has a `factory.welcomeMessage` or
|
||||
the protocol's `sendLine()` for the greeting. Simplest fix: subclass the protocol
|
||||
and override `lineReceived` to intercept the `220` line before it goes out, OR
|
||||
use a `_FTPFactory` subclass that sets `welcomeMessage`:
|
||||
|
||||
```python
|
||||
from twisted.protocols.ftp import FTPFactory, FTPAnonymousShell
|
||||
from twisted.internet import reactor
|
||||
import os
|
||||
|
||||
NODE_NAME = os.environ.get("NODE_NAME", "ftpserver")
|
||||
BANNER = os.environ.get("FTP_BANNER", f"220 (vsFTPd 3.0.3)")
|
||||
|
||||
factory = FTPFactory(portal)
|
||||
factory.welcomeMessage = BANNER # overrides the Twisted default
|
||||
```
|
||||
|
||||
If `FTPFactory.welcomeMessage` is not directly settable, patch it at class level:
|
||||
|
||||
```python
|
||||
FTPFactory.welcomeMessage = BANNER
|
||||
```
|
||||
|
||||
### Anonymous login + fake directory
|
||||
|
||||
The current server rejects everything after login. Fix:
|
||||
|
||||
- Use `FTPAnonymousShell` pointed at a `MemoryFilesystem` with fake entries:
|
||||
```
|
||||
/
|
||||
├── backup.tar.gz (0 bytes, but listable)
|
||||
├── db_dump.sql (0 bytes)
|
||||
├── config.ini (0 bytes)
|
||||
└── credentials.txt (0 bytes)
|
||||
```
|
||||
- `RETR` any file → return 1–3 lines of plausible fake content, then close.
|
||||
- Log every `RETR` with filename and client IP.
|
||||
|
||||
### Env vars
|
||||
|
||||
| Var | Default |
|
||||
|-----|---------|
|
||||
| `FTP_BANNER` | `220 (vsFTPd 3.0.3)` |
|
||||
|
||||
---
|
||||
|
||||
## 3. MSSQL — Silent on TDS pre-login (High)
|
||||
|
||||
### Problem
|
||||
|
||||
No response to standard TDS pre-login packets. Connection is dropped silently.
|
||||
nmap barely recognizes port 1433 (`ms-sql-s?`).
|
||||
|
||||
### Diagnosis
|
||||
|
||||
The nmap fingerprint shows `\x04\x01\x00\x2b...` which is a valid TDS 7.x pre-login
|
||||
response fragment. So the server is sending _something_ — but it may be truncated or
|
||||
malformed enough that nmap can't complete its probe.
|
||||
|
||||
Check `templates/mssql/server.py`: look for the raw bytes being sent in response to
|
||||
`\x12\x01` (TDS pre-login type). Common bugs:
|
||||
- Wrong packet length field (bytes 2-3 of TDS header)
|
||||
- Missing `\xff` terminator on the pre-login option list
|
||||
- Status byte 0x01 instead of 0x00 in the TDS header (signaling last packet)
|
||||
|
||||
### Correct TDS 7.x pre-login response structure
|
||||
|
||||
```
|
||||
Byte 0: 0x04 (packet type: tabular result)
|
||||
Byte 1: 0x01 (status: last packet)
|
||||
Bytes 2-3: 0x00 0x2b (total length including header = 43)
|
||||
Bytes 4-5: 0x00 0x00 (SPID)
|
||||
Byte 6: 0x01 (packet ID)
|
||||
Byte 7: 0x00 (window)
|
||||
--- TDS pre-login payload ---
|
||||
[VERSION] option: type=0x00, offset=0x001a, length=0x0006
|
||||
[ENCRYPTION] option: type=0x01, offset=0x0020, length=0x0001
|
||||
[INSTOPT] option: type=0x02, offset=0x0021, length=0x0001
|
||||
[THREADID] option: type=0x03, offset=0x0022, length=0x0004
|
||||
[MARS] option: type=0x04, offset=0x0026, length=0x0001
|
||||
Terminator: 0xff
|
||||
VERSION: 0x0e 0x00 0x07 0xd0 0x00 0x00 (14.0.2000 = SQL Server 2017)
|
||||
ENCRYPTION: 0x02 (ENCRYPT_NOT_SUP)
|
||||
INSTOPT: 0x00
|
||||
THREADID: 0x00 0x00 0x00 0x00
|
||||
MARS: 0x00
|
||||
```
|
||||
|
||||
Verify the current implementation's bytes match this exactly. Fix the length field if off.
|
||||
|
||||
---
|
||||
|
||||
## 4. MongoDB — Silent on OP_MSG (High)
|
||||
|
||||
### Problem
|
||||
|
||||
No response to `OP_MSG isMaster` command. nmap shows `mongod?` (partial recognition).
|
||||
|
||||
### Diagnosis
|
||||
|
||||
MongoDB wire protocol since 3.6 uses `OP_MSG` (opcode 2013). Older clients use
|
||||
`OP_QUERY` (opcode 2004) against `admin.$cmd`. Check which one `templates/mongodb/server.py`
|
||||
handles, and whether the response's `responseTo` field matches the request's `requestID`.
|
||||
|
||||
Common bugs:
|
||||
- Handling `OP_QUERY` but not `OP_MSG`
|
||||
- Wrong `responseTo` in the response header (must echo the request's requestID)
|
||||
- Missing `flagBits` field in OP_MSG response (must be 0x00000000)
|
||||
|
||||
### Correct OP_MSG `hello` response
|
||||
|
||||
```python
|
||||
import struct, bson
|
||||
|
||||
def _op_msg_hello_response(request_id: int) -> bytes:
|
||||
doc = {
|
||||
"ismaster": True,
|
||||
"maxBsonObjectSize": 16777216,
|
||||
"maxMessageSizeBytes": 48000000,
|
||||
"maxWriteBatchSize": 100000,
|
||||
"localTime": {"$date": int(time.time() * 1000)},
|
||||
"logicalSessionTimeoutMinutes": 30,
|
||||
"connectionId": 1,
|
||||
"minWireVersion": 0,
|
||||
"maxWireVersion": 17,
|
||||
"readOnly": False,
|
||||
"ok": 1.0,
|
||||
}
|
||||
payload = b"\x00" + bson.encode(doc) # section type 0 = body
|
||||
flag_bits = struct.pack("<I", 0)
|
||||
msg_body = flag_bits + payload
|
||||
# MsgHeader: totalLength(4) + requestID(4) + responseTo(4) + opCode(4)
|
||||
header = struct.pack("<iiii",
|
||||
16 + len(msg_body), # total length
|
||||
1, # requestID (server-generated)
|
||||
request_id, # responseTo: echo the client's requestID
|
||||
2013, # OP_MSG
|
||||
)
|
||||
return header + msg_body
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5. Redis — Empty keyspace (Medium)
|
||||
|
||||
### Problem
|
||||
|
||||
`KEYS *` returns `*0\r\n` after a successful AUTH. A real exposed Redis always has data.
|
||||
Attacker does `AUTH anypassword` → `+OK` → `KEYS *` → empty → leaves.
|
||||
|
||||
### Fix — fake key-value store
|
||||
|
||||
Add a module-level dict with bait data. Handle `KEYS`, `GET`, `SCAN`, `TYPE`, `TTL`:
|
||||
|
||||
```python
|
||||
_FAKE_STORE = {
|
||||
b"sessions:user:1234": b'{"id":1234,"user":"admin","token":"eyJhbGciOiJIUzI1NiJ9..."}',
|
||||
b"sessions:user:5678": b'{"id":5678,"user":"alice","token":"eyJhbGciOiJIUzI1NiJ9..."}',
|
||||
b"cache:api_key": b"sk_live_9mK3xF2aP7qR1bN8cT4dW6vE0yU5hJ",
|
||||
b"jwt:secret": b"super_secret_jwt_signing_key_do_not_share_2024",
|
||||
b"user:admin": b'{"username":"admin","password":"$2b$12$LQv3c1yqBWVHxkd0LHAkC.","role":"superadmin"}',
|
||||
b"user:alice": b'{"username":"alice","password":"$2b$12$XKLDm3vT8nPqR4sY2hE6fO","role":"user"}',
|
||||
b"config:db_password": b"Pr0dDB!2024#Secure",
|
||||
b"config:aws_access_key": b"AKIAIOSFODNN7EXAMPLE",
|
||||
b"config:aws_secret_key": b"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
|
||||
b"rate_limit:192.168.1.1": b"42",
|
||||
}
|
||||
```
|
||||
|
||||
Commands to handle:
|
||||
- `KEYS *` → all keys as RESP array
|
||||
- `KEYS pattern` → filtered (basic glob: `*` matches all, `prefix:*` matches prefix)
|
||||
- `GET key` → value or `$-1` (nil)
|
||||
- `SCAN 0` → `*2\r\n$1\r\n0\r\n` + keys array (cursor always 0, return all)
|
||||
- `TYPE key` → `+string\r\n`
|
||||
- `TTL key` → `:-1\r\n` (no expiry)
|
||||
|
||||
---
|
||||
|
||||
## 6. SIP — Hardcoded nonce (Low)
|
||||
|
||||
### Problem
|
||||
|
||||
`nonce="decnet0000"` is hardcoded. A Shodan signature could detect this string.
|
||||
|
||||
### Fix
|
||||
|
||||
```python
|
||||
import secrets
|
||||
nonce = secrets.token_hex(16) # e.g. "a3f8c1b2e7d94051..."
|
||||
```
|
||||
|
||||
Generate once per connection in `connection_made`. The WWW-Authenticate header
|
||||
becomes: `Digest realm="{NODE_NAME}", nonce="{nonce}", algorithm=MD5`
|
||||
|
||||
---
|
||||
|
||||
## 7. VNC — Hardcoded DES challenge (Low)
|
||||
|
||||
### Problem
|
||||
|
||||
The 16-byte DES challenge sent during VNC auth negotiation is static.
|
||||
|
||||
### Fix
|
||||
|
||||
```python
|
||||
import os
|
||||
self._vnc_challenge = os.urandom(16)
|
||||
```
|
||||
|
||||
Generate in `connection_made`. Send `self._vnc_challenge` in the Security handshake.
|
||||
|
||||
---
|
||||
|
||||
## 8. PostgreSQL — Hardcoded salt (Low)
|
||||
|
||||
### Problem
|
||||
|
||||
`AuthenticationMD5Password` response contains `\xde\xad\xbe\xef` as the 4-byte salt.
|
||||
|
||||
### Fix
|
||||
|
||||
```python
|
||||
import os
|
||||
self._pg_salt = os.urandom(4)
|
||||
```
|
||||
|
||||
Use `self._pg_salt` in the `R\x00\x00\x00\x0c\x00\x00\x00\x05` response bytes.
|
||||
|
||||
---
|
||||
|
||||
## Files to change
|
||||
|
||||
| File | Change |
|
||||
|------|--------|
|
||||
| `templates/http/server.py` | `after_request` header fix, proper 403 body |
|
||||
| `templates/ftp/server.py` | Banner override, anonymous login, fake dir |
|
||||
| `templates/mssql/server.py` | Fix TDS pre-login response bytes |
|
||||
| `templates/mongodb/server.py` | Add OP_MSG handler, fix responseTo |
|
||||
| `templates/redis/server.py` | Add fake key-value store, KEYS/GET/SCAN |
|
||||
| `templates/sip/server.py` | Random nonce per connection |
|
||||
| `templates/vnc/server.py` | Random DES challenge per connection |
|
||||
| `templates/postgres/server.py` | Random MD5 salt per connection |
|
||||
| `tests/test_http_headers.py` | New: assert single Server header, correct 403 body |
|
||||
| `tests/test_redis.py` | Extend: KEYS *, GET, SCAN return bait data |
|
||||
|
||||
---
|
||||
|
||||
## Priority order
|
||||
|
||||
1. HTTP header leak — immediately visible to any attacker
|
||||
2. FTP banner — immediate framework disclosure
|
||||
3. MSSQL silent — service appears dead
|
||||
4. MongoDB silent — service appears dead
|
||||
5. Redis empty keyspace — breaks the bait value proposition
|
||||
6. SIP/VNC/PostgreSQL hardcoded values — low risk, quick wins
|
||||
107
development/COVERAGE.md
Normal file
107
development/COVERAGE.md
Normal file
@@ -0,0 +1,107 @@
|
||||
# DECNET Test Coverage Report
|
||||
|
||||
> **Last Updated:** 2026-04-12
|
||||
> **Total Coverage:** 93% ✅
|
||||
> **Total Tests:** 1074 Passed ✅
|
||||
|
||||
## 📊 Full Coverage Table
|
||||
|
||||
```text
|
||||
Name Stmts Miss Cover Missing
|
||||
------------------------------------------------------------------------------
|
||||
decnet/__init__.py 0 0 100%
|
||||
decnet/archetypes.py 21 0 100%
|
||||
decnet/cli.py 265 43 84% 62-63, 136, 138, 146-149, 163-165, 179-180, 198-199, 223-223, 251-252, 255-260, 282-283, 385-386, 390-393, 398, 400-401, 409-410, 418-419, 458-461
|
||||
decnet/collector/__init__.py 2 0 100%
|
||||
decnet/collector/worker.py 110 3 97% 196-198
|
||||
decnet/composer.py 36 3 92% 110-112
|
||||
decnet/config.py 38 0 100%
|
||||
decnet/correlation/__init__.py 4 0 100%
|
||||
decnet/correlation/engine.py 62 0 100%
|
||||
decnet/correlation/graph.py 37 0 100%
|
||||
decnet/correlation/parser.py 47 2 96% 98-99
|
||||
decnet/custom_service.py 17 0 100%
|
||||
decnet/distros.py 26 1 96% 110
|
||||
decnet/engine/__init__.py 2 0 100%
|
||||
decnet/engine/deployer.py 147 8 95% 42, 45, 177-182
|
||||
decnet/env.py 38 7 82% 17-18, 20, 29, 37-42
|
||||
decnet/fleet.py 83 1 99% 136
|
||||
decnet/ini_loader.py 111 5 95% 158-161, 205
|
||||
decnet/logging/__init__.py 0 0 100%
|
||||
decnet/logging/file_handler.py 30 0 100%
|
||||
decnet/logging/forwarder.py 13 0 100%
|
||||
decnet/logging/syslog_formatter.py 34 0 100%
|
||||
decnet/mutator/__init__.py 2 0 100%
|
||||
decnet/mutator/engine.py 80 10 88% 43, 50-51, 116-122
|
||||
decnet/network.py 106 0 100%
|
||||
decnet/os_fingerprint.py 8 0 100%
|
||||
decnet/services/__init__.py 0 0 100%
|
||||
decnet/services/base.py 7 1 86% 42
|
||||
decnet/services/conpot.py 13 0 100%
|
||||
decnet/services/docker_api.py 14 0 100%
|
||||
decnet/services/elasticsearch.py 14 0 100%
|
||||
decnet/services/ftp.py 14 0 100%
|
||||
decnet/services/http.py 31 3 90% 46-48
|
||||
decnet/services/imap.py 14 0 100%
|
||||
decnet/services/k8s.py 14 0 100%
|
||||
decnet/services/ldap.py 14 0 100%
|
||||
decnet/services/llmnr.py 14 0 100%
|
||||
decnet/services/mongodb.py 14 0 100%
|
||||
decnet/services/mqtt.py 14 0 100%
|
||||
decnet/services/mssql.py 14 0 100%
|
||||
decnet/services/mysql.py 17 0 100%
|
||||
decnet/services/pop3.py 14 0 100%
|
||||
decnet/services/postgres.py 14 0 100%
|
||||
decnet/services/rdp.py 14 0 100%
|
||||
decnet/services/redis.py 19 0 100%
|
||||
decnet/services/registry.py 31 3 90% 38-39, 45
|
||||
decnet/services/sip.py 14 0 100%
|
||||
decnet/services/smb.py 14 0 100%
|
||||
decnet/services/smtp.py 19 0 100%
|
||||
decnet/services/smtp_relay.py 19 0 100%
|
||||
decnet/services/snmp.py 14 0 100%
|
||||
decnet/services/ssh.py 15 0 100%
|
||||
decnet/services/telnet.py 15 1 93% 36
|
||||
decnet/services/tftp.py 14 0 100%
|
||||
decnet/services/vnc.py 14 0 100%
|
||||
decnet/web/api.py 39 2 95% 32, 44
|
||||
decnet/web/auth.py 23 0 100%
|
||||
decnet/web/db/models.py 41 0 100%
|
||||
decnet/web/db/repository.py 42 0 100%
|
||||
decnet/web/db/sqlite/database.py 21 4 81% 12, 29-33
|
||||
decnet/web/db/sqlite/repository.py 168 20 88% 53-54, 58-74, 81, 87-88, 112-113, 304, 306-307, 339-340
|
||||
decnet/web/dependencies.py 39 0 100%
|
||||
decnet/web/ingester.py 55 2 96% 66-67
|
||||
decnet/web/router/__init__.py 24 0 100%
|
||||
decnet/web/router/auth/api_change_pass.py 14 0 100%
|
||||
decnet/web/router/auth/api_login.py 15 0 100%
|
||||
decnet/web/router/bounty/api_get_bounties.py 10 0 100%
|
||||
decnet/web/router/fleet/api_deploy_deckies.py 50 38 24% 18-79
|
||||
decnet/web/router/fleet/api_get_deckies.py 7 0 100%
|
||||
decnet/web/router/fleet/api_mutate_decky.py 10 0 100%
|
||||
decnet/web/router/fleet/api_mutate_interval.py 17 0 100%
|
||||
decnet/web/router/logs/api_get_histogram.py 7 1 86% 19
|
||||
decnet/web/router/logs/api_get_logs.py 11 0 100%
|
||||
decnet/web/router/stats/api_get_stats.py 8 0 100%
|
||||
decnet/web/router/stream/api_stream_events.py 44 21 52% 36-68, 70
|
||||
------------------------------------------------------------------------------
|
||||
TOTAL 2402 179 93%
|
||||
```
|
||||
|
||||
## 📋 Future Coverage Plan (Missing Tests)
|
||||
|
||||
### 🔴 High Priority: `api_deploy_deckies.py` (24%)
|
||||
- **Problem:** Requires live Docker/MACVLAN orchestration.
|
||||
- **Plan:**
|
||||
- Implement a mock engine specifically for the API route test that validates the `config` object without calling Docker.
|
||||
- Integration testing using **Docker-in-Docker (DinD)** once CI infrastructure is ready.
|
||||
|
||||
### 🟡 Medium Priority: `api_stream_events.py` (52%)
|
||||
- **Problem:** Infinite event loop causes test hangs.
|
||||
- **Plan:**
|
||||
- Test frame headers/auth (Done).
|
||||
- Refactor generator to yield a fixed test set or use a loop-breaker for testing.
|
||||
|
||||
### 🟢 Low Priority: Misc. Service Logic
|
||||
- **Modules:** `services/http.py` (90%), `services/telnet.py` (93%), `distros.py` (96%).
|
||||
- **Plan:** Add edge-case unit tests for custom service configurations and invalid distro slugs.
|
||||
181
development/DEBT.md
Normal file
181
development/DEBT.md
Normal file
@@ -0,0 +1,181 @@
|
||||
# DECNET — Technical Debt Register
|
||||
|
||||
> Last updated: 2026-04-09 — All addressable debt cleared.
|
||||
> Severity: 🔴 Critical · 🟠 High · 🟡 Medium · 🟢 Low
|
||||
|
||||
---
|
||||
|
||||
## 🔴 Critical
|
||||
|
||||
### ~~DEBT-001 — Hardcoded JWT fallback secret~~ ✅ RESOLVED
|
||||
~~**File:** `decnet/env.py:15`~~
|
||||
Fixed in commit `b6b046c`. `DECNET_JWT_SECRET` is now required; startup raises `ValueError` if unset or set to a known-bad value.
|
||||
|
||||
### ~~DEBT-002 — Default admin credentials in code~~ ✅ CLOSED (by design)
|
||||
`DECNET_ADMIN_PASSWORD` defaults to `"admin"` intentionally — the web dashboard enforces a password change on first login (`must_change_password=1`). Startup enforcement removed as it broke tooling without adding meaningful security.
|
||||
|
||||
### ~~DEBT-003 — Hardcoded LDAP password placeholder~~ ✅ CLOSED (false positive)
|
||||
`templates/ldap/server.py:73` — `"<sasl_or_unknown>"` is a log label for SASL auth attempts, not an operational credential. The LDAP template is a honeypot; it has no bind password of its own.
|
||||
|
||||
### ~~DEBT-004 — Wildcard CORS with no origin restriction~~ ✅ RESOLVED
|
||||
~~**File:** `decnet/web/api.py:48-54`~~
|
||||
Fixed in commit `b6b046c`. `allow_origins` now uses `DECNET_CORS_ORIGINS` (env var, defaults to `http://localhost:8080`). `allow_methods` and `allow_headers` tightened to explicit allowlists.
|
||||
|
||||
---
|
||||
|
||||
## 🟠 High
|
||||
|
||||
### ~~DEBT-005 — Auth module has zero test coverage~~ ✅ RESOLVED
|
||||
~~**File:** `decnet/web/auth.py`~~
|
||||
Comprehensive test suite added in `tests/api/` covering login, password change, token validation, and JWT edge cases.
|
||||
|
||||
### ~~DEBT-006 — Database layer has zero test coverage~~ ✅ RESOLVED
|
||||
~~**File:** `decnet/web/sqlite_repository.py`~~
|
||||
`tests/api/test_repository.py` added — covers log insertion, bounty CRUD, histogram queries, stats summary, and fuzz testing of all query paths. In-memory SQLite with `StaticPool` ensures full isolation.
|
||||
|
||||
### ~~DEBT-007 — Web API routes mostly untested~~ ✅ RESOLVED
|
||||
~~**Files:** `decnet/web/router/` (all sub-modules)~~
|
||||
Full coverage added across `tests/api/` — fleet, logs, bounty, stream, auth all have dedicated test modules with both functional and fuzz test cases.
|
||||
|
||||
### ~~DEBT-008 — Auth token accepted via query string~~ ✅ RESOLVED
|
||||
~~**File:** `decnet/web/dependencies.py:33-34`~~
|
||||
Query-string token fallback removed. `get_current_user` now accepts only `Authorization: Bearer <token>` header. Tokens no longer appear in access logs or browser history.
|
||||
|
||||
### ~~DEBT-009 — Inconsistent and unstructured logging across templates~~ ✅ CLOSED (false positive)
|
||||
All service templates already import from `decnet_logging` and use `syslog_line()` for structured output. The `print(line, flush=True)` present in some templates is the intentional Docker stdout channel for container log forwarding — not unstructured debug output.
|
||||
|
||||
### ~~DEBT-010 — `decnet_logging.py` duplicated across all 19 service templates~~ ✅ RESOLVED
|
||||
~~**Files:** `templates/*/decnet_logging.py`~~
|
||||
All 22 per-directory copies deleted. Canonical source lives at `templates/decnet_logging.py`. `deployer.py` now calls `_sync_logging_helper()` before `docker compose up` — it copies the canonical file into each active template build context automatically.
|
||||
|
||||
---
|
||||
|
||||
## 🟡 Medium
|
||||
|
||||
### DEBT-011 — No database migration system
|
||||
**File:** `decnet/web/db/sqlite/repository.py`
|
||||
Schema is created during startup via `SQLModel.metadata.create_all`. There is no Alembic or equivalent migration layer. Schema changes across deployments require manual intervention or silently break existing databases.
|
||||
**Status:** Architectural. Deferred — requires Alembic integration and migration history bootstrapping.
|
||||
|
||||
### ~~DEBT-012 — No environment variable validation schema~~ ✅ RESOLVED
|
||||
~~**File:** `decnet/env.py`~~
|
||||
`DECNET_API_PORT` and `DECNET_WEB_PORT` now validated via `_port()` — enforces integer type and 1–65535 range, raises `ValueError` with a clear message on bad input.
|
||||
|
||||
### ~~DEBT-013 — Unvalidated input on `decky_name` route parameter~~ ✅ RESOLVED
|
||||
~~**File:** `decnet/web/router/fleet/api_mutate_decky.py:10`~~
|
||||
`decky_name` now declared as `Path(..., pattern=r"^[a-z0-9\-]{1,64}$")` — FastAPI rejects non-matching values with 422 before any downstream processing.
|
||||
|
||||
### ~~DEBT-014 — Streaming endpoint has no error handling~~ ✅ RESOLVED
|
||||
~~**File:** `decnet/web/router/stream/api_stream_events.py`~~
|
||||
`event_generator()` now wrapped in `try/except`. `asyncio.CancelledError` is handled silently (clean disconnect). All other exceptions log server-side via `log.exception()` and yield an `event: error` SSE frame to the client.
|
||||
|
||||
### ~~DEBT-015 — Broad exception detail leaked to API clients~~ ✅ RESOLVED
|
||||
~~**File:** `decnet/web/router/fleet/api_deploy_deckies.py:78`~~
|
||||
Raw exception message no longer returned to client. Full exception now logged server-side via `log.exception()`. Client receives generic `"Deployment failed. Check server logs for details."`.
|
||||
|
||||
### ~~DEBT-016 — Unvalidated log query parameters~~ ✅ RESOLVED
|
||||
~~**File:** `decnet/web/router/logs/api_get_logs.py:12-19`~~
|
||||
`search` capped at `max_length=512`. `start_time` and `end_time` validated against `^\d{4}-\d{2}-\d{2}[ T]\d{2}:\d{2}:\d{2}$` regex pattern. FastAPI rejects invalid input with 422.
|
||||
|
||||
### ~~DEBT-017 — Silent DB lock retry during startup~~ ✅ RESOLVED
|
||||
~~**File:** `decnet/web/api.py:20-26`~~
|
||||
Each retry attempt now emits `log.warning("DB init attempt %d/5 failed: %s", attempt, exc)`. After all retries exhausted, `log.error()` is emitted so degraded startup is always visible in logs.
|
||||
|
||||
### ~~DEBT-018 — No Docker HEALTHCHECK in any template~~ ✅ RESOLVED
|
||||
~~**Files:** All 20 `templates/*/Dockerfile`~~
|
||||
All 24 Dockerfiles updated with:
|
||||
```dockerfile
|
||||
HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \
|
||||
CMD kill -0 1 || exit 1
|
||||
```
|
||||
|
||||
### ~~DEBT-019 — Most template containers run as root~~ ✅ RESOLVED
|
||||
~~**Files:** All `templates/*/Dockerfile` except Cowrie~~
|
||||
All 24 Dockerfiles now create a `decnet` system user, use `setcap cap_net_bind_service+eip` on the Python binary (allows binding ports < 1024 without root), and drop to `USER decnet` before `ENTRYPOINT`.
|
||||
|
||||
### ~~DEBT-020 — Swagger/OpenAPI disabled in production~~ ✅ RESOLVED
|
||||
~~**File:** `decnet/web/api.py:43-45`~~
|
||||
All route decorators now declare `responses={401: {"description": "Not authenticated"}, 422: {"description": "Validation error"}}`. OpenAPI schema is complete for all endpoints.
|
||||
|
||||
### ~~DEBT-021 — `sqlite_repository.py` is a god module~~ ✅ RESOLVED
|
||||
~~**File:** `decnet/web/sqlite_repository.py` (~400 lines)~~
|
||||
Fully refactored to `decnet/web/db/` modular layout: `models.py` (SQLModel schema), `repository.py` (abstract base), `sqlite/repository.py` (SQLite implementation), `sqlite/database.py` (engine/session factory). Commit `de84cc6`.
|
||||
|
||||
### DEBT-026 — IMAP/POP3 bait emails not configurable via service config
|
||||
**Files:** `templates/imap/server.py`, `templates/pop3/server.py`, `decnet/services/imap.py`, `decnet/services/pop3.py`
|
||||
Bait emails are hardcoded. A stub env var `IMAP_EMAIL_SEED` is read but currently ignored. Full implementation requires:
|
||||
1. `IMAP_EMAIL_SEED` points to a JSON file with a list of `{from_, to, subject, date, body}` dicts.
|
||||
2. `templates/imap/server.py` loads and merges/replaces `_BAIT_EMAILS` from that file at startup.
|
||||
3. `decnet/services/imap.py` `compose_fragment()` reads `service_cfg["email_seed"]` and injects `IMAP_EMAIL_SEED` + a bind-mount for the seed file into the compose fragment.
|
||||
4. Same pattern for POP3 (`POP3_EMAIL_SEED`).
|
||||
**Status:** Stub in place — full wiring deferred to next session.
|
||||
|
||||
---
|
||||
|
||||
### DEBT-027 — Dynamic Bait Store
|
||||
**Files:** `templates/redis/server.py`, `templates/ftp/server.py`
|
||||
The bait store and honeypot files are hardcoded. A dynamic injection framework should be created to populate this payload across different honeypots.
|
||||
**Status:** Deferred — out of current scope.
|
||||
|
||||
### DEBT-028 — Test coverage for `api_deploy_deckies.py`
|
||||
**File:** `decnet/web/router/fleet/api_deploy_deckies.py` (24% coverage)
|
||||
The deploy endpoint exercises Docker Compose orchestration via `decnet.engine.deploy`, which creates MACVLAN/IPvlan networks and runs `docker compose up`. Meaningful tests require mocking the entire Docker SDK + subprocess layer, coupling tightly to implementation details.
|
||||
**Status:** Deferred — test after Docker-in-Docker CI is available.
|
||||
|
||||
---
|
||||
|
||||
## 🟢 Low
|
||||
|
||||
### ~~DEBT-022 — Debug `print()` in correlation engine~~ ✅ CLOSED (false positive)
|
||||
`decnet/correlation/engine.py:20` — The `print()` call is inside the module docstring as a usage example, not in executable code. No production code path affected.
|
||||
|
||||
### DEBT-023 — Unpinned base Docker images
|
||||
**Files:** All `templates/*/Dockerfile`
|
||||
`debian:bookworm-slim` and similar tags are used without digest pinning. Image contents can silently change on `docker pull`, breaking reproducibility and supply-chain integrity.
|
||||
**Status:** Deferred — requires `docker pull` access to resolve current digests for each base image.
|
||||
|
||||
### ~~DEBT-024 — Stale service version hardcoded in Redis template~~ ✅ RESOLVED
|
||||
~~**File:** `templates/redis/server.py:15`~~
|
||||
`REDIS_VERSION` updated from `"7.0.12"` to `"7.2.7"` (current stable).
|
||||
|
||||
### ~~DEBT-025 — No lock file for Python dependencies~~ ✅ RESOLVED
|
||||
~~**Files:** Project root~~
|
||||
`requirements.lock` generated via `pip freeze`. Reproducible installs now available via `pip install -r requirements.lock`.
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
| ID | Severity | Area | Status |
|
||||
|----|----------|------|--------|
|
||||
| ~~DEBT-001~~ | ✅ | Security / Auth | resolved `b6b046c` |
|
||||
| ~~DEBT-002~~ | ✅ | Security / Auth | closed (by design) |
|
||||
| ~~DEBT-003~~ | ✅ | Security / Infra | closed (false positive) |
|
||||
| ~~DEBT-004~~ | ✅ | Security / API | resolved `b6b046c` |
|
||||
| ~~DEBT-005~~ | ✅ | Testing | resolved |
|
||||
| ~~DEBT-006~~ | ✅ | Testing | resolved |
|
||||
| ~~DEBT-007~~ | ✅ | Testing | resolved |
|
||||
| ~~DEBT-008~~ | ✅ | Security / Auth | resolved |
|
||||
| ~~DEBT-009~~ | ✅ | Observability | closed (false positive) |
|
||||
| ~~DEBT-010~~ | ✅ | Code Duplication | resolved |
|
||||
| DEBT-011 | 🟡 Medium | DB / Migrations | deferred (Alembic scope) |
|
||||
| ~~DEBT-012~~ | ✅ | Config | resolved |
|
||||
| ~~DEBT-013~~ | ✅ | Security / Input | resolved |
|
||||
| ~~DEBT-014~~ | ✅ | Reliability | resolved |
|
||||
| ~~DEBT-015~~ | ✅ | Security / API | resolved |
|
||||
| ~~DEBT-016~~ | ✅ | Security / API | resolved |
|
||||
| ~~DEBT-017~~ | ✅ | Reliability | resolved |
|
||||
| ~~DEBT-018~~ | ✅ | Infra | resolved |
|
||||
| ~~DEBT-019~~ | ✅ | Security / Infra | resolved |
|
||||
| ~~DEBT-020~~ | ✅ | Docs | resolved |
|
||||
| ~~DEBT-021~~ | ✅ | Architecture | resolved `de84cc6` |
|
||||
| ~~DEBT-022~~ | ✅ | Code Quality | closed (false positive) |
|
||||
| DEBT-023 | 🟢 Low | Infra | deferred (needs docker pull) |
|
||||
| ~~DEBT-024~~ | ✅ | Infra | resolved |
|
||||
| ~~DEBT-025~~ | ✅ | Build | resolved |
|
||||
| DEBT-026 | 🟡 Medium | Features | deferred (out of scope) |
|
||||
| DEBT-027 | 🟡 Medium | Features | deferred (out of scope) |
|
||||
| DEBT-028 | 🟡 Medium | Testing | deferred (needs DinD CI) |
|
||||
|
||||
**Remaining open:** DEBT-011 (Alembic), DEBT-023 (image pinning), DEBT-026 (modular mailboxes), DEBT-027 (Dynamic bait store), DEBT-028 (deploy endpoint tests)
|
||||
**Estimated remaining effort:** ~12 hours
|
||||
93
development/DEVELOPMENT.md
Normal file
93
development/DEVELOPMENT.md
Normal file
@@ -0,0 +1,93 @@
|
||||
# DECNET Development Roadmap
|
||||
|
||||
## 🛠️ Service Realism & Interaction (First Release Path)
|
||||
*Goal: Ensure every service is interactive enough to feel real during manual exploration.*
|
||||
|
||||
### Remote Access & Shells
|
||||
- [ ] **SSH (Cowrie)** — Custom filesystem, realistic user database, and command execution.
|
||||
- [ ] **Telnet (Cowrie)** — Realistic banner and command emulation.
|
||||
- [ ] **RDP** — Realistic NLA authentication and screen capture (where possible).
|
||||
- [ ] **VNC** — Realistic RFB protocol handshake and authentication.
|
||||
- [x] **Real SSH** — High-interaction sshd with shell logging.
|
||||
|
||||
### Databases
|
||||
- [ ] **MySQL** — Support for common SQL queries and realistic schema.
|
||||
- [ ] **Postgres** — Realistic version strings and basic query support.
|
||||
- [ ] **MSSQL** — Realistic TDS protocol handshake.
|
||||
- [ ] **MongoDB** — Support for common Mongo wire protocol commands.
|
||||
- [x] **Redis** — Support for basic GET/SET/INFO commands.
|
||||
- [ ] **Elasticsearch** — Realistic REST API responses for `/_cluster/health` etc.
|
||||
|
||||
### Web & APIs
|
||||
- [x] **HTTP** — Flexible templates (WordPress, phpMyAdmin, etc.) with logging.
|
||||
- [ ] **Docker API** — Realistic responses for `docker version` and `docker ps`.
|
||||
- [ ] **Kubernetes (K8s)** — Mocked kubectl responses and basic API exploration.
|
||||
- [x] **LLMNR** — Realistic local name resolution responses via responder-style emulation.
|
||||
|
||||
### File Transfer & Storage
|
||||
- [ ] **SMB** — Realistic share discovery and basic file browsing.
|
||||
- [x] **FTP** — Support for common FTP commands and directory listing.
|
||||
- [ ] **TFTP** — Basic block-based file transfer emulation.
|
||||
|
||||
### Directory & Mail
|
||||
- [ ] **LDAP** — Basic directory search and authentication responses.
|
||||
- [x] **SMTP** — Mail server banners and basic EHLO/MAIL FROM support.
|
||||
- [x] **IMAP** — Realistic mail folder structure and auth.
|
||||
- [x] **POP3** — Basic mail retrieval protocol emulation.
|
||||
|
||||
### Industrial & IoT (ICS)
|
||||
- [x] **MQTT** — Basic topic subscription and publishing support.
|
||||
- [x] **SNMP** — Realistic MIB responses for common OIDs.
|
||||
- [ ] **SIP** — Basic VoIP protocol handshake and registration.
|
||||
- [x] **Conpot** — SCADA/ICS protocol emulation (Modbus, etc.).
|
||||
|
||||
---
|
||||
|
||||
## Core / Hardening
|
||||
|
||||
- [ ] **Attacker fingerprinting** — Capture TLS JA3/JA4 hashes, TCP window sizes, User-Agent strings, and SSH client banners.
|
||||
- [ ] **Canary tokens** — Embed fake AWS keys and honeydocs into decky filesystems.
|
||||
- [ ] **Tarpit mode** — Slow down attackers by drip-feeding bytes or delaying responses.
|
||||
- [x] **Dynamic decky mutation** — Rotate exposed services or OS fingerprints over time.
|
||||
- [x] **Credential harvesting DB** — Centralized database for all username/password attempts.
|
||||
- [ ] **Session recording** — Full capture for SSH/Telnet sessions.
|
||||
- [ ] **Payload capture** — Store and hash files uploaded by attackers.
|
||||
|
||||
## Detection & Intelligence
|
||||
|
||||
- [ ] **Real-time alerting** — Webhook/Slack/Telegram notifications for first-hits.
|
||||
- [ ] **Threat intel enrichment** — Auto-lookup IPs against AbuseIPDB, Shodan, and GreyNoise.
|
||||
- [ ] **Attack campaign clustering** — Group sessions by signatures and timing patterns.
|
||||
- [ ] **GeoIP mapping** — Visualize attacker origin and ASN data on a map.
|
||||
- [ ] **TTPs tagging** — Map observed behaviors to MITRE ATT&CK techniques.
|
||||
|
||||
## Dashboard & Visibility
|
||||
|
||||
- [x] **Web dashboard** — Real-time React SPA + FastAPI backend for logs and fleet status.
|
||||
- [x] **Decky Inventory** — Dedicated "Decoy Fleet" page showing all deployed assets.
|
||||
- [ ] **Pre-built Kibana/Grafana dashboards** — Ship JSON exports for ELK/Grafana.
|
||||
- [ ] **CLI live feed** — `decnet watch` command for a unified, colored terminal stream.
|
||||
- [x] **Traversal graph export** — Export attacker movement as JSON (via CLI).
|
||||
|
||||
## Deployment & Infrastructure
|
||||
|
||||
- [ ] **SWARM / multihost mode** — Ansible-based orchestration for multi-node deployments.
|
||||
- [ ] **Terraform/Pulumi provider** — Cloud-hosted decky deployment.
|
||||
- [ ] **Kubernetes deployment mode** — Run deckies as K8s pods.
|
||||
- [x] **Lifecycle Management** — Automatic API process termination on `teardown`.
|
||||
- [x] **Health monitoring** — Active vs. Deployed decky tracking in the dashboard.
|
||||
|
||||
## Services & Realism
|
||||
|
||||
- [ ] **HTTPS/TLS support** — Honeypots with SSL certificates.
|
||||
- [ ] **Fake Active Directory** — Convincing AD/LDAP emulation.
|
||||
- [ ] **Realistic web apps** — Fake WordPress, Grafana, and phpMyAdmin templates.
|
||||
- [ ] **OT/ICS profiles** — Expanded Modbus, DNP3, and BACnet support.
|
||||
|
||||
## Developer Experience
|
||||
|
||||
- [x] **API Fuzzing** — Property-based testing for all web endpoints.
|
||||
- [x] **CI/CD pipeline** — Automated testing and linting via Gitea Actions.
|
||||
- [x] **Strict Typing** — Project-wide enforcement of PEP 484 type hints.
|
||||
- [ ] **Plugin SDK docs** — Documentation for adding custom services.
|
||||
- [ ] **Config generator wizard** — `decnet wizard` for interactive setup.
|
||||
190
development/EVENTS.md
Normal file
190
development/EVENTS.md
Normal file
@@ -0,0 +1,190 @@
|
||||
# DECNET Honeypot Events
|
||||
|
||||
This document details the events generated by each DECNET honeypot service, as found in their respective `server.py` files.
|
||||
|
||||
## Service: `docker_api`
|
||||
| Event Type | Included Fields |
|
||||
| --- | --- |
|
||||
| `request` | `method`, `path`, `remote_addr`, `body` |
|
||||
| `startup` | *None* |
|
||||
|
||||
## Service: `elasticsearch`
|
||||
| Event Type | Included Fields |
|
||||
| --- | --- |
|
||||
| `startup` | *None* |
|
||||
| `post_request` | `src`, `method`, `path`, `body_preview`, `user_agent` |
|
||||
| `put_request` | `src`, `method`, `path`, `body_preview` |
|
||||
| `delete_request` | `src`, `method`, `path` |
|
||||
| `head_request` | `src`, `method`, `path` |
|
||||
| `root_probe` | `src`, `method`, `path` |
|
||||
| `cat_api` | `src`, `method`, `path` |
|
||||
| `cluster_recon` | `src`, `method`, `path` |
|
||||
| `nodes_recon` | `src`, `method`, `path` |
|
||||
| `security_probe` | `src`, `method`, `path` |
|
||||
| `request` | `src`, `method`, `path` |
|
||||
|
||||
## Service: `ftp`
|
||||
| Event Type | Included Fields |
|
||||
| --- | --- |
|
||||
| `startup` | *None* |
|
||||
| `connection` | `src_ip`, `src_port` |
|
||||
| `user` | `username` |
|
||||
| `auth_attempt` | `username`, `password` |
|
||||
| `download_attempt` | `path` |
|
||||
| `disconnect` | `src_ip`, `src_port` |
|
||||
|
||||
## Service: `http`
|
||||
| Event Type | Included Fields |
|
||||
| --- | --- |
|
||||
| `request` | `method`, `path`, `remote_addr`, `headers`, `body` |
|
||||
| `startup` | *None* |
|
||||
|
||||
## Service: `imap`
|
||||
| Event Type | Included Fields |
|
||||
| --- | --- |
|
||||
| `startup` | *None* |
|
||||
| `connect` | `src`, `src_port` |
|
||||
| `disconnect` | `src` |
|
||||
| `auth` | `src`, `username`, `password` |
|
||||
| `command` | `src`, `cmd` |
|
||||
|
||||
## Service: `k8s`
|
||||
| Event Type | Included Fields |
|
||||
| --- | --- |
|
||||
| `request` | `method`, `path`, `remote_addr`, `auth`, `body` |
|
||||
| `startup` | *None* |
|
||||
|
||||
## Service: `ldap`
|
||||
| Event Type | Included Fields |
|
||||
| --- | --- |
|
||||
| `startup` | *None* |
|
||||
| `connect` | `src`, `src_port` |
|
||||
| `bind` | `src`, `dn`, `password` |
|
||||
| `disconnect` | `src` |
|
||||
|
||||
## Service: `llmnr`
|
||||
| Event Type | Included Fields |
|
||||
| --- | --- |
|
||||
| `startup` | *None* |
|
||||
| `query` | `proto`, `src`, `src_port`, `name`, `qtype` |
|
||||
| `raw_packet` | `proto`, `src`, `data`, `error` |
|
||||
|
||||
## Service: `mongodb`
|
||||
| Event Type | Included Fields |
|
||||
| --- | --- |
|
||||
| `startup` | *None* |
|
||||
| `connect` | `src`, `src_port` |
|
||||
| `message` | `src`, `opcode`, `length` |
|
||||
| `disconnect` | `src` |
|
||||
|
||||
## Service: `mqtt`
|
||||
| Event Type | Included Fields |
|
||||
| --- | --- |
|
||||
| `startup` | *None* |
|
||||
| `connect` | `src`, `src_port` |
|
||||
| `disconnect` | `src` |
|
||||
| `auth` | `src` |
|
||||
| `packet` | `src`, `pkt_type` |
|
||||
|
||||
## Service: `mssql`
|
||||
| Event Type | Included Fields |
|
||||
| --- | --- |
|
||||
| `startup` | *None* |
|
||||
| `connect` | `src`, `src_port` |
|
||||
| `disconnect` | `src` |
|
||||
| `auth` | `src`, `username` |
|
||||
| `unknown_packet` | `src`, `pkt_type` |
|
||||
|
||||
## Service: `mysql`
|
||||
| Event Type | Included Fields |
|
||||
| --- | --- |
|
||||
| `startup` | *None* |
|
||||
| `connect` | `src`, `src_port` |
|
||||
| `disconnect` | `src` |
|
||||
| `auth` | `src`, `username` |
|
||||
|
||||
## Service: `pop3`
|
||||
| Event Type | Included Fields |
|
||||
| --- | --- |
|
||||
| `startup` | *None* |
|
||||
| `connect` | `src`, `src_port` |
|
||||
| `disconnect` | `src` |
|
||||
| `user` | `src`, `username` |
|
||||
| `auth` | `src`, `username`, `password` |
|
||||
| `command` | `src`, `cmd` |
|
||||
|
||||
## Service: `postgres`
|
||||
| Event Type | Included Fields |
|
||||
| --- | --- |
|
||||
| `startup` | *None* |
|
||||
| `connect` | `src`, `src_port` |
|
||||
| `startup` | `src`, `username`, `database` |
|
||||
| `auth` | `src`, `pw_hash` |
|
||||
| `disconnect` | `src` |
|
||||
|
||||
## Service: `rdp`
|
||||
| Event Type | Included Fields |
|
||||
| --- | --- |
|
||||
| `startup` | *None* |
|
||||
| `connection` | `src_ip`, `src_port` |
|
||||
| `data` | `src_ip`, `src_port`, `bytes`, `hex` |
|
||||
| `disconnect` | `src_ip`, `src_port` |
|
||||
|
||||
## Service: `redis`
|
||||
| Event Type | Included Fields |
|
||||
| --- | --- |
|
||||
| `startup` | *None* |
|
||||
| `connect` | `src`, `src_port` |
|
||||
| `command` | `src`, `cmd`, `args` |
|
||||
| `disconnect` | `src` |
|
||||
| `auth` | `src`, `password` |
|
||||
|
||||
## Service: `sip`
|
||||
| Event Type | Included Fields |
|
||||
| --- | --- |
|
||||
| `request` | `src`, `src_port`, `method`, `from_`, `to`, `username`, `auth` |
|
||||
| `startup` | *None* |
|
||||
|
||||
## Service: `smb`
|
||||
| Event Type | Included Fields |
|
||||
| --- | --- |
|
||||
| `startup` | *None* |
|
||||
| `shutdown` | *None* |
|
||||
|
||||
## Service: `smtp`
|
||||
| Event Type | Included Fields |
|
||||
| --- | --- |
|
||||
| `startup` | *None* |
|
||||
| `connect` | `src`, `src_port` |
|
||||
| `disconnect` | `src` |
|
||||
| `ehlo` | `src`, `domain` |
|
||||
| `auth_attempt` | `src`, `command` |
|
||||
| `mail_from` | `src`, `value` |
|
||||
| `rcpt_to` | `src`, `value` |
|
||||
| `vrfy` | `src`, `value` |
|
||||
| `unknown_command` | `src`, `command` |
|
||||
|
||||
## Service: `snmp`
|
||||
| Event Type | Included Fields |
|
||||
| --- | --- |
|
||||
| `startup` | *None* |
|
||||
| `get_request` | `src`, `src_port`, `version`, `community`, `oids` |
|
||||
| `parse_error` | `src`, `error`, `data` |
|
||||
|
||||
## Service: `tftp`
|
||||
| Event Type | Included Fields |
|
||||
| --- | --- |
|
||||
| `startup` | *None* |
|
||||
| `request` | `src`, `src_port`, `op`, `filename`, `mode` |
|
||||
| `unknown_opcode` | `src`, `opcode`, `data` |
|
||||
|
||||
## Service: `vnc`
|
||||
| Event Type | Included Fields |
|
||||
| --- | --- |
|
||||
| `startup` | *None* |
|
||||
| `connect` | `src`, `src_port` |
|
||||
| `disconnect` | `src` |
|
||||
| `version` | `src`, `client_version` |
|
||||
| `security_choice` | `src`, `type` |
|
||||
| `auth_response` | `src`, `response` |
|
||||
|
||||
63
development/FUTURE.md
Normal file
63
development/FUTURE.md
Normal file
@@ -0,0 +1,63 @@
|
||||
# DECNET — Future Concepts & Architecture
|
||||
|
||||
This document tracks long-term, visionary architectural concepts and ideas that are outside the scope of the 1.0 roadmap, but represent the ultimate end-state of the DECNET framework.
|
||||
|
||||
## The Honeymaze: Spider Network Topology
|
||||
|
||||
### Concept Overview
|
||||
As attackers breach the perimeter, instead of just lateral movement on a flat `/24` or massive VXLAN, DECNET can dynamically generate an infinite "daisy-chain" of isolated Docker networks. This forces the attacker to establish deep, nested C2 proxy chains (SOCKS, chisel, SSH tunnels) to pivot from machine to machine.
|
||||
|
||||
For example:
|
||||
- `decky-01` sits on the main LAN via `eth0` (MACVLAN). It also has `eth1`, which belongs to `docker-bridge-1`.
|
||||
- `decky-02` sits exclusively on `docker-bridge-1` as its `eth0`. It also has `eth1`, belonging to `docker-bridge-2`.
|
||||
- `decky-03` sits exclusively on `docker-bridge-2`.
|
||||
|
||||
### Strategic Value
|
||||
1. **High-Fidelity TTP Telemetry**: By forcing the attacker into a corner where they *must* deploy pivot infrastructure, we capture extremely high-value indicators of compromise regarding their proxy tooling and network tradecraft.
|
||||
2. **Infinite Time Sinks**: An attacker can spend weeks navigating simulated air-gaps and deep corporate enclaves feeling a false sense of progression.
|
||||
|
||||
### Execution & Realism Restrictions
|
||||
To prevent the topology from feeling artificial or obviously simulated:
|
||||
1. **Asymmetric Nesting**: A strict 1:1 nested daisy chain is a dead giveaway. Real corporate networks branch organically.
|
||||
- Some machines should be terminal endpoints (no nested subnets).
|
||||
- Some machines acts as jump hosts bridging two large local arrays.
|
||||
- The depth and horizontal fan-out per subnet must be randomized to emulate realistic DMZ $\rightarrow$ Internal $\rightarrow$ OT enclave architectures.
|
||||
2. **Variable Sizing**: Subnets must contain a random number of containers. An internal enclave might have 50 flat machines, and only *one* of them acts as the bridge to the next isolated segment.
|
||||
|
||||
### The Logging Paradox Solved
|
||||
Deeply nested, air-gapped machines present a logging challenge: if `decky-50` has no route to the internet or the logging network, how can it forward telemetry stealthily?
|
||||
|
||||
**Solution**: DECNET completely bypasses the container networking stack by relying purely on Docker's native `stdout` and daemon-level logging drivers. Because the host daemon handles the extraction, the attacker can completely destroy the container's virtual interfaces or be 50 layers deep in an air-gap without ever noticing a magic route, and the telemetry will still perfectly reach the SIEM out-of-band.
|
||||
|
||||
### Simulated Topographical Latency
|
||||
If an attacker proxies 5 subnets deep into what is supposed to be a secure, physically segmented enclave, and `ping` returns a flat `0.05ms` response time, they will instantly realize it's a local simulation on a single host.
|
||||
|
||||
To maintain the illusion of depth, DECNET can utilize the **Linux Traffic Control (`tc`)** subsystem and its **Network Emulator (`netem`)** module on the virtual bridge interfaces (`veth` pairs).
|
||||
|
||||
By procedurally generating `tc` rules as the network scales, we can inject mathematical latency penalties per hop:
|
||||
```bash
|
||||
# Example: Add 45ms latency, +/- 10ms jitter on a normal curve, with 0.1% packet loss
|
||||
tc qdisc add dev eth1 root netem delay 45ms 10ms distribution normal loss 0.1%
|
||||
```
|
||||
As an attacker pivots deeper into the "Spider Network," this injected latency compounds automatically. A proxy chain going 4 levels deep would realistically suffer from 150ms+ of latency and erratic jitter, perfectly mimicking the experience of routing over slow, multi-site corporate VPNs.
|
||||
|
||||
---
|
||||
|
||||
## Distributed Scale: Swarm Overlay Architecture
|
||||
|
||||
To scale DECNET across multiple physical racks or sites, DECNET can leverage **Docker Swarm Overlay Networks** to create a unified L2/L3 backbone without surrendering control to Swarm's "Orchestration" scheduler.
|
||||
|
||||
### The `--attachable` Paradigm
|
||||
By default, Docker's `overlay` driver requires Swarm mode but tightly couples it to `docker service` (which abstracts and randomizes container placement to balance loads). In honeypot deployments, absolute control over physical placement is critical (e.g., placing the `scada-archetype` explicitly on bare-metal node C in the DMZ).
|
||||
|
||||
To solve this, DECNET will initialize the swarm control plane simply to construct the backend VXLAN, but completely ignore the service scheduler in favor of `--attachable` networks:
|
||||
|
||||
1. **Initialize the Control Plane** (manager node + remote worker joins):
|
||||
```bash
|
||||
docker swarm init
|
||||
```
|
||||
2. **Create the Attachable Backbone**:
|
||||
```bash
|
||||
docker network create -d overlay --attachable decnet-backbone
|
||||
```
|
||||
3. **Deploy Standalone**: Keep relying entirely on local `decnet deploy` scripts on the individual physical nodes. Because the network is `attachable`, standalone container instances can seamlessly attach to it and communicate with containers running on completely different hardware across the globe as if they were on a local layer 2 switch!
|
||||
248
development/HARDENING.md
Normal file
248
development/HARDENING.md
Normal file
@@ -0,0 +1,248 @@
|
||||
# OS Fingerprint Spoofing — Hardening Roadmap
|
||||
|
||||
This document describes the current state of OS fingerprint spoofing in DECNET
|
||||
and the planned improvements to make `nmap -O`, `p0f`, and similar passive/active
|
||||
scanners see the intended OS rather than a generic Linux kernel.
|
||||
|
||||
---
|
||||
|
||||
## Current State (Post-Phase 1)
|
||||
|
||||
Phase 1 is **implemented and tested against live scans**. Each archetype declares
|
||||
an `nmap_os` slug (e.g. `"windows"`, `"linux"`, `"embedded"`). The **composer**
|
||||
resolves that slug via `os_fingerprint.get_os_sysctls()` and injects the resulting
|
||||
kernel parameters into the **base container** as Docker `sysctls`. Service
|
||||
containers inherit the same network namespace via `network_mode: "service:<base>"`
|
||||
and therefore appear identical to outside scanners.
|
||||
|
||||
### Implemented sysctls (8 per OS profile)
|
||||
|
||||
| Sysctl | Purpose | Win | Linux | Embedded |
|
||||
|---|---|---|---|---|
|
||||
| `net.ipv4.ip_default_ttl` | TTL discriminator | `128` | `64` | `255` |
|
||||
| `net.ipv4.tcp_syn_retries` | SYN retransmit count | `2` | `6` | `3` |
|
||||
| `net.ipv4.tcp_timestamps` | TCP timestamp option (OPS probes) | `0` | `1` | `0` |
|
||||
| `net.ipv4.tcp_window_scaling` | Window scale option | `1` | `1` | `0` |
|
||||
| `net.ipv4.tcp_sack` | Selective ACK option | `1` | `1` | `0` |
|
||||
| `net.ipv4.tcp_ecn` | ECN negotiation | `0` | `2` | `0` |
|
||||
| `net.ipv4.ip_no_pmtu_disc` | DF bit in ICMP replies | `0` | `0` | `1` |
|
||||
| `net.ipv4.tcp_fin_timeout` | FIN_WAIT_2 timeout (seconds) | `30` | `60` | `15` |
|
||||
|
||||
### Live scan results (Windows decky, 2026-04-10)
|
||||
|
||||
**What works:**
|
||||
|
||||
| nmap field | Expected | Got | Status |
|
||||
|---|---|---|---|
|
||||
| TTL (`T=`) | `80` (128 dec) | `T=80` | ✅ |
|
||||
| TCP timestamps (`TS=`) | `U` (unsupported) | `TS=U` | ✅ |
|
||||
| ECN (`CC=`) | `N` | `CC=N` | ✅ |
|
||||
| TCP window (`W1=`) | `FAF0` (64240) | `W1=FAF0` | ✅ |
|
||||
| Window options (`O1=`) | `M5B4NNSNWA` | `O1=M5B4NNSNWA` | ✅ |
|
||||
| SACK | present | present | ✅ |
|
||||
| DF bit | `DF=Y` | `DF=Y` | ✅ |
|
||||
|
||||
**What fails:**
|
||||
|
||||
| nmap field | Expected (Win) | Got | Impact |
|
||||
|---|---|---|---|
|
||||
| IP ID (`TI=`) | `I` (incremental) | `Z` (all zeros) | **Critical** — no Windows fingerprint in nmap's DB has `TI=Z`. This alone causes 91% confidence "Linux 2.4/2.6 embedded" |
|
||||
| ICMP rate limiting | unlimited | Linux default rate | Minor — affects `IE`/`U1` probe groups |
|
||||
|
||||
**Key finding:** `TI=Z` is the **single remaining blocker** for a convincing
|
||||
Windows fingerprint. Everything else (TTL, window, timestamps, ECN, SACK, DF)
|
||||
is already correct. The Phase 2 window mangling originally planned is
|
||||
**unnecessary** — the kernel already produces the correct 64240 value.
|
||||
|
||||
---
|
||||
|
||||
## Remaining Improvement Phases
|
||||
|
||||
### Phase 2 — ICMP Tuning via Sysctls (Low effort, Medium impact)
|
||||
|
||||
Two additional namespace-scoped sysctls control ICMP error rate limiting.
|
||||
nmap's `IE` and `U1` probe groups measure how quickly the target responds to
|
||||
ICMP and UDP-to-closed-port probes.
|
||||
|
||||
**Changes required:** add to `OS_SYSCTLS` in `decnet/os_fingerprint.py`.
|
||||
|
||||
| Sysctl | What it controls | Windows | Linux | Embedded |
|
||||
|---|---|---|---|---|
|
||||
| `net.ipv4.icmp_ratelimit` | Minimum ms between ICMP error messages | `0` (none) | `1000` (1/sec) | `1000` |
|
||||
| `net.ipv4.icmp_ratemask` | Bitmask of ICMP types subject to rate limiting | `0` | `6168` | `6168` |
|
||||
|
||||
**Why:** Windows does not rate-limit ICMP error responses. Linux defaults to
|
||||
1000ms between ICMP errors (effectively 1 per second per destination). When
|
||||
nmap sends rapid-fire UDP probes to closed ports, a Windows machine replies to
|
||||
all of them instantly while a Linux machine throttles responses. Setting
|
||||
`icmp_ratelimit=0` for Windows makes the `U1` probe response timing match.
|
||||
|
||||
**Estimated effort:** 15 min — same pattern as Phase 1, just two more entries.
|
||||
|
||||
---
|
||||
|
||||
### Phase 3 — NFQUEUE IP ID Rewriting (Medium effort, Very high impact)
|
||||
|
||||
This is the **highest-priority remaining item** and the only way to fix `TI=Z`.
|
||||
|
||||
#### Root cause of `TI=Z`
|
||||
|
||||
The Linux kernel's `ip_select_ident()` function sets the IP Identification
|
||||
field to `0` for all TCP packets where DF=1 (don't-fragment bit set). This is
|
||||
correct behavior per RFC 6864 ("IP ID is meaningless when DF=1") but no Windows
|
||||
fingerprint in nmap's database has `TI=Z`. **No namespace-scoped sysctl can
|
||||
change this** — it's hardcoded in the kernel's TCP stack.
|
||||
|
||||
Note: `ip_no_pmtu_disc` does NOT fix this. That sysctl controls Path MTU
|
||||
Discovery for UDP/ICMP paths only, not TCP IP ID generation. Setting it to 1
|
||||
for Windows was tested and confirmed to have no effect on `TI=Z`.
|
||||
|
||||
#### Solution: NFQUEUE userspace packet rewriting
|
||||
|
||||
Use `iptables -t mangle` to send outgoing TCP packets to an NFQUEUE, where a
|
||||
small Python daemon rewrites the IP ID field before release.
|
||||
|
||||
```
|
||||
┌──────────────────────────┐
|
||||
TCP SYN-ACK ───► │ iptables mangle/OUTPUT │
|
||||
│ -j NFQUEUE --queue-num 0 │
|
||||
└───────────┬──────────────┘
|
||||
▼
|
||||
┌──────────────────────────┐
|
||||
│ Python NFQUEUE daemon │
|
||||
│ 1. Read IP ID field │
|
||||
│ 2. Replace with target │
|
||||
│ pattern (sequential │
|
||||
│ for Windows, zero │
|
||||
│ for embedded, etc.) │
|
||||
│ 3. Recalculate checksum │
|
||||
│ 4. Accept packet │
|
||||
└───────────┬──────────────┘
|
||||
▼
|
||||
Packet goes out
|
||||
```
|
||||
|
||||
**Target IP ID patterns by OS:**
|
||||
|
||||
| OS | nmap label | Pattern | Implementation |
|
||||
|---|---|---|---|
|
||||
| Windows | `TI=I` | Sequential, incrementing by 1 per packet | Global atomic counter |
|
||||
| Linux 3.x+ | `TI=Z` | Zero (DF=1) or randomized | Leave untouched (already correct) |
|
||||
| Embedded/Cisco | `TI=I` or `TI=Z` | Varies by device | Sequential or zero |
|
||||
| BSD | `TI=RI` | Randomized incremental | Counter + small random delta |
|
||||
|
||||
**Two possible approaches:**
|
||||
|
||||
1. **TCPOPTSTRIP + NFQUEUE (comprehensive)**
|
||||
- `TCPOPTSTRIP` can strip/modify TCP options (window scale, SACK, etc.)
|
||||
via pure iptables rules, no userspace needed
|
||||
- `NFQUEUE` handles IP-layer rewriting (IP ID) in userspace
|
||||
- Combined: full control over the TCP/IP fingerprint
|
||||
|
||||
2. **NFQUEUE only (simpler)**
|
||||
- Single Python daemon handles everything: IP ID rewriting, and optionally
|
||||
TCP option/window manipulation if ever needed
|
||||
- Fewer moving parts, one daemon to monitor
|
||||
|
||||
**Required changes:**
|
||||
- `templates/base/Dockerfile` — new, installs `iptables` + `python3-netfilterqueue`
|
||||
- `templates/base/entrypoint.sh` — new, sets up iptables rules + launches daemon
|
||||
- `templates/base/nfq_spoofer.py` — new, the NFQUEUE packet rewriting daemon
|
||||
- `os_fingerprint.py` — add `ip_id_pattern` field to each OS profile
|
||||
- `composer.py` — pass `SPOOF_IP_ID` env var + use `templates/base/Dockerfile`
|
||||
instead of bare distro images for base containers
|
||||
|
||||
**Dependencies on the host kernel:**
|
||||
- `nfnetlink_queue` module (`modprobe nfnetlink_queue`)
|
||||
- `xt_NFQUEUE` module (standard in all distro kernels)
|
||||
- `NET_ADMIN` capability (already granted)
|
||||
|
||||
**Dependencies in the base container image:**
|
||||
- `iptables` package
|
||||
- `python3` + `python3-netfilterqueue` (or `scapy` with `NetfilterQueue`)
|
||||
|
||||
**Estimated effort:** 4–6 hours + tests
|
||||
|
||||
---
|
||||
|
||||
### Phase 4 — Full Fingerprint Database Matching (Hard, Low marginal impact)
|
||||
|
||||
After Phases 2–3, the remaining fingerprint differences are increasingly minor:
|
||||
|
||||
| Signal | Current | Notes |
|
||||
|---|---|---|
|
||||
| TCP initial sequence number (ISN) pattern (`SP=`, `ISR=`) | Linux kernel default | Kernel-level, not spoofable without userspace TCP |
|
||||
| TCP window variance across probes | Constant (`FAF0` × 6) | Real Windows sometimes varies slightly |
|
||||
| T2/T3 responses | `R=N` (no response) | Correct for some Windows, wrong for others |
|
||||
| ICMP data payload echo | Linux default | Difficult to control per-namespace |
|
||||
|
||||
These are diminishing returns. With Phases 1–3 complete, `nmap -O` should
|
||||
correctly identify the OS family in >90% of scans.
|
||||
|
||||
> Phase 4 is **not recommended** for the near term. Effort is measured in days
|
||||
> for single-digit percentage improvements.
|
||||
|
||||
---
|
||||
|
||||
## Implementation Priority (revised)
|
||||
|
||||
```
|
||||
Phase 1 ✅ DONE ─────────────────────────────
|
||||
└─ 8 sysctls per OS in os_fingerprint.py
|
||||
└─ Verified: TTL, window, timestamps, ECN, SACK all correct
|
||||
|
||||
Phase 2 ──────────────────────────────── (implement next)
|
||||
└─ 2 more sysctls: icmp_ratelimit + icmp_ratemask
|
||||
└─ Estimated effort: 15 min
|
||||
|
||||
Phase 3 ──────────────────────────────── (high priority)
|
||||
└─ NFQUEUE daemon in templates/base/
|
||||
└─ Fix TI=Z for Windows (THE remaining blocker)
|
||||
└─ Estimated effort: 4–6 hours + tests
|
||||
|
||||
Phase 4 ──────────────────────────────── (not recommended)
|
||||
└─ ISN pattern, T2/T3, ICMP payload echo
|
||||
└─ Estimated effort: days, diminishing returns
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
After each phase, validate with:
|
||||
|
||||
```bash
|
||||
# Active OS fingerprint scan against a deployed decky
|
||||
sudo nmap -O --osscan-guess <decky_ip>
|
||||
|
||||
# Aggressive scan with version detection
|
||||
sudo nmap -sV -O -A --osscan-guess <decky_ip>
|
||||
|
||||
# Passive fingerprinting (run on host while generating traffic to decky)
|
||||
sudo p0f -i <macvlan_interface> -p
|
||||
|
||||
# Quick TTL + window check
|
||||
hping3 -S -p 445 <decky_ip> # inspect TTL and window in reply
|
||||
|
||||
# Test INI (all OS families, 10 deckies)
|
||||
sudo .venv/bin/decnet deploy --config arche-test.ini --interface eth0
|
||||
```
|
||||
|
||||
### Expected outcomes by phase
|
||||
|
||||
| Check | Pre-Phase 1 | Post-Phase 1 ✅ | Post-Phase 2 | Post-Phase 3 |
|
||||
|---|---|---|---|---|
|
||||
| TTL | ✅ | ✅ | ✅ | ✅ |
|
||||
| TCP timestamps | ❌ | ✅ | ✅ | ✅ |
|
||||
| TCP window size | ❌ | ✅ (kernel default OK) | ✅ | ✅ |
|
||||
| ECN | ❌ | ✅ | ✅ | ✅ |
|
||||
| ICMP rate limiting | ❌ | ❌ | ✅ | ✅ |
|
||||
| IP ID sequence (`TI=`) | ❌ | ❌ | ❌ | ✅ |
|
||||
| `nmap -O` family match | ⚠️ | ⚠️ (TI=Z blocks) | ⚠️ | ✅ |
|
||||
| `p0f` match | ⚠️ | ⚠️ | ✅ | ✅ |
|
||||
|
||||
### Note on `P=` field in nmap output
|
||||
|
||||
The `P=x86_64-redhat-linux-gnu` that appears in the `SCAN(...)` block is the
|
||||
**GNU build triple of the nmap binary itself**, not a fingerprint of the target.
|
||||
It cannot be changed and is not relevant to OS spoofing.
|
||||
232
development/ICS_SCADA.md
Normal file
232
development/ICS_SCADA.md
Normal file
@@ -0,0 +1,232 @@
|
||||
# ICS/SCADA Bait — Plan
|
||||
|
||||
> Scenario: attacker finds MQTT broker on a water treatment plant, subscribes to
|
||||
> sensor topics, publishes commands trying to "open the valve" or "disable chlorination".
|
||||
|
||||
---
|
||||
|
||||
## Services in scope
|
||||
|
||||
| Service | Port | Current state | Target state |
|
||||
|---------|------|--------------|-------------|
|
||||
| MQTT | 1883 | CONNACK 0x05 (reject all) | CONNACK 0x00, fake sensor topics |
|
||||
| SNMP | 161/UDP | Functional, generic sysDescr | sysDescr tuned per archetype |
|
||||
| Conpot | 502 | Not responding | Investigate + fix port mapping |
|
||||
|
||||
---
|
||||
|
||||
## MQTT — water plant persona
|
||||
|
||||
### Current behavior
|
||||
|
||||
Every CONNECT gets `CONNACK 0x05` (Not Authorized) and the connection is closed.
|
||||
An ICS attacker immediately moves on — there's nothing to interact with.
|
||||
|
||||
### Target behavior
|
||||
|
||||
Accept all connections (`CONNACK 0x00`). Publish retained sensor data on
|
||||
realistic SCADA topics. Log every PUBLISH command (attacker trying to control plant).
|
||||
|
||||
### Topic tree
|
||||
|
||||
```
|
||||
plant/water/tank1/level → "73.4" (percent full)
|
||||
plant/water/tank1/pressure → "2.81" (bar)
|
||||
plant/water/pump1/status → "RUNNING"
|
||||
plant/water/pump1/rpm → "1420"
|
||||
plant/water/pump2/status → "STANDBY"
|
||||
plant/water/chlorine/dosing → "1.2" (mg/L)
|
||||
plant/water/chlorine/residual → "0.8" (mg/L)
|
||||
plant/water/valve/inlet/state → "OPEN"
|
||||
plant/water/valve/drain/state → "CLOSED"
|
||||
plant/alarm/high_pressure → "0"
|
||||
plant/alarm/low_chlorine → "0"
|
||||
plant/alarm/pump_fault → "0"
|
||||
plant/$SYS/broker/version → "Mosquitto 2.0.15"
|
||||
plant/$SYS/broker/uptime → "2847392 seconds"
|
||||
```
|
||||
|
||||
All topics have `retain=True` so subscribers immediately receive the last value.
|
||||
|
||||
### Protocol changes needed
|
||||
|
||||
Add handling for:
|
||||
|
||||
- **SUBSCRIBE (pkt_type=8)**: Parse topic filter + QoS pairs. For each matching topic,
|
||||
send SUBACK then immediately send a PUBLISH with the retained value.
|
||||
- **PUBLISH (pkt_type=3)**: Log the topic + payload (this is the attacker "sending a command").
|
||||
Return PUBACK for QoS 1. Do NOT update the retained value (the plant ignores the command).
|
||||
- **PINGREQ (pkt_type=12)**: Already handled. Keep alive.
|
||||
- **DISCONNECT (pkt_type=14)**: Close cleanly.
|
||||
|
||||
Do NOT implement: UNSUBSCRIBE, QoS 2. Return SUBACK with QoS 1 for all subscriptions.
|
||||
|
||||
### CONNACK change
|
||||
|
||||
```python
|
||||
_CONNACK_ACCEPTED = b"\x20\x02\x00\x00" # session_present=0, return_code=0
|
||||
```
|
||||
|
||||
### Env vars
|
||||
|
||||
| Var | Default | Description |
|
||||
|-----|---------|-------------|
|
||||
| `MQTT_PERSONA` | `water_plant` | Topic tree preset |
|
||||
| `MQTT_ACCEPT_ALL` | `1` | Accept all connections |
|
||||
| `NODE_NAME` | `mqtt-broker` | Hostname in logs |
|
||||
|
||||
---
|
||||
|
||||
## SUBSCRIBE packet parsing
|
||||
|
||||
```python
|
||||
def _parse_subscribe(payload: bytes):
|
||||
"""Returns (packet_id, [(topic, qos), ...])"""
|
||||
pos = 0
|
||||
packet_id = struct.unpack(">H", payload[pos:pos+2])[0]
|
||||
pos += 2
|
||||
topics = []
|
||||
while pos < len(payload):
|
||||
topic, pos = _read_utf8(payload, pos)
|
||||
qos = payload[pos] & 0x03
|
||||
pos += 1
|
||||
topics.append((topic, qos))
|
||||
return packet_id, topics
|
||||
```
|
||||
|
||||
### SUBACK
|
||||
|
||||
```python
|
||||
def _suback(packet_id: int, granted_qos: list[int]) -> bytes:
|
||||
payload = struct.pack(">H", packet_id) + bytes(granted_qos)
|
||||
return bytes([0x90, len(payload)]) + payload
|
||||
```
|
||||
|
||||
### PUBLISH (server → client, retained)
|
||||
|
||||
```python
|
||||
def _publish(topic: str, value: str, retain: bool = True) -> bytes:
|
||||
topic_bytes = topic.encode()
|
||||
topic_len = struct.pack(">H", len(topic_bytes))
|
||||
payload = value.encode()
|
||||
# Fixed header: type=3, retain flag, no QoS (fire and forget for retained)
|
||||
fixed = 0x31 if retain else 0x30
|
||||
remaining = len(topic_len) + len(topic_bytes) + len(payload)
|
||||
return bytes([fixed, remaining]) + topic_len + topic_bytes + payload
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## SNMP — sysDescr per archetype
|
||||
|
||||
Current `sysDescr` is a generic Linux string. It should reflect the decky's persona.
|
||||
|
||||
### Archetype strings
|
||||
|
||||
| Archetype | sysDescr |
|
||||
|-----------|---------|
|
||||
| water_plant | `Linux scada-plc01 4.19.0-18-amd64 #1 SMP Debian 4.19.208-1 (2021-09-29) x86_64` |
|
||||
| factory | `VxWorks 6.9 (Rockwell Automation Allen-Bradley ControlLogix 5580)` |
|
||||
| substation | `SEL Real-Time Automation Controller RTAC SEL-3555 firmware 1.9.7.0` |
|
||||
| hospital | `Linux medlogic-srv01 5.10.0-21-amd64 #1 SMP Debian 5.10.162-1 x86_64` |
|
||||
| default | `Linux decky-host 5.15.0-91-generic #101-Ubuntu SMP Tue Nov 14 13:30:08 UTC 2023 x86_64` |
|
||||
|
||||
Env var `SNMP_ARCHETYPE` selects the string. The SNMP server should also tune:
|
||||
|
||||
- `sysContact.0` → `ICS Admin <ics-admin@plant.local>`
|
||||
- `sysLocation.0` → `Water Treatment Facility — Pump Room B`
|
||||
- `sysName.0` → `scada-plc01` (from `NODE_NAME`)
|
||||
|
||||
---
|
||||
|
||||
## Conpot — Modbus TCP (port 502)
|
||||
|
||||
### Current state
|
||||
|
||||
Port 502 shows `CLOSED` in nmap. Conpot is deployed as a service container but
|
||||
is either not binding to 502 or the port mapping is wrong.
|
||||
|
||||
### Diagnosis steps
|
||||
|
||||
1. Check the compose fragment: `decnet services conpot` — what port does it expose?
|
||||
2. `docker exec decky-01-conpot netstat -tlnp` or `ss -tlnp` — is Conpot listening on 502?
|
||||
3. Check Conpot's default config — it may listen on a non-standard port (e.g. 5020) and
|
||||
expect a host-level iptables REDIRECT rule to map 502 → 5020.
|
||||
|
||||
### Fix options
|
||||
|
||||
**Option A** (preferred): Configure Conpot to listen directly on 502 by editing its
|
||||
`default.xml` template and setting `<port>502</port>`.
|
||||
|
||||
**Option B**: Add `iptables -t nat -A PREROUTING -p tcp --dport 502 -j REDIRECT --to-port 5020`
|
||||
to the base container entrypoint. Fragile — prefer A.
|
||||
|
||||
### What Modbus should respond
|
||||
|
||||
Conpot's default Modbus template already implements a plausible PLC. The key registers
|
||||
to tune for water-plant persona:
|
||||
|
||||
| Register | Address | Value | Description |
|
||||
|----------|---------|-------|-------------|
|
||||
| Coil | 0 | 1 | Pump 1 running |
|
||||
| Coil | 1 | 0 | Pump 2 standby |
|
||||
| Holding | 0 | 734 | Tank level (73.4%) |
|
||||
| Holding | 1 | 281 | Pressure (2.81 bar × 100) |
|
||||
| Holding | 2 | 12 | Chlorine dosing (1.2 mg/L × 10) |
|
||||
|
||||
These values should be consistent with the MQTT topic tree so an attacker who
|
||||
probes both sees a coherent picture.
|
||||
|
||||
---
|
||||
|
||||
## Log events
|
||||
|
||||
### MQTT
|
||||
|
||||
| event_type | Fields | Trigger |
|
||||
|------------|--------|---------|
|
||||
| `connect` | src, src_port, client_id, username | CONNECT packet |
|
||||
| `subscribe` | src, topics | SUBSCRIBE packet |
|
||||
| `publish` | src, topic, payload | PUBLISH from client (attacker command!) |
|
||||
| `disconnect` | src | DISCONNECT or connection lost |
|
||||
|
||||
### SNMP
|
||||
|
||||
No changes to event structure — sysDescr is just a config string.
|
||||
|
||||
---
|
||||
|
||||
## Files to change
|
||||
|
||||
| File | Change |
|
||||
|------|--------|
|
||||
| `templates/mqtt/server.py` | Accept connections, SUBSCRIBE handler, retained PUBLISH, PUBLISH log |
|
||||
| `templates/snmp/server.py` | Add `SNMP_ARCHETYPE` env var, tune sysDescr/sysContact/sysLocation |
|
||||
| `templates/conpot/` | Investigate port config, fix 502 binding |
|
||||
| `tests/test_mqtt.py` | New: connect accepted, subscribe → retained publish, attacker publish logged |
|
||||
| `tests/test_snmp.py` | Extend: sysDescr per archetype |
|
||||
|
||||
---
|
||||
|
||||
## Verification against live decky
|
||||
|
||||
```bash
|
||||
# MQTT: connect and subscribe
|
||||
mosquitto_sub -h 192.168.1.200 -t "plant/#" -v
|
||||
|
||||
# Expected output:
|
||||
# plant/water/tank1/level 73.4
|
||||
# plant/water/pump1/status RUNNING
|
||||
# ...
|
||||
|
||||
# MQTT: attacker sends a command (should be logged)
|
||||
mosquitto_pub -h 192.168.1.200 -t "plant/water/valve/inlet/state" -m "CLOSED"
|
||||
|
||||
# Modbus: read coil 0 (pump status)
|
||||
# (requires mbpoll or similar)
|
||||
mbpoll -a 1 -r 1 -c 2 192.168.1.200
|
||||
|
||||
# SNMP: sysDescr check
|
||||
snmpget -v2c -c public 192.168.1.200 1.3.6.1.2.1.1.1.0
|
||||
# Expected: STRING: "Linux scada-plc01 4.19.0..."
|
||||
```
|
||||
220
development/IMAP_BAIT.md
Normal file
220
development/IMAP_BAIT.md
Normal file
@@ -0,0 +1,220 @@
|
||||
# IMAP Bait Mailboxes — Plan
|
||||
|
||||
> Scenario: attacker credential-stuffs IMAP, logs in as `admin`/`admin`,
|
||||
> browses mail, finds juicy internal communications and credential leaks.
|
||||
|
||||
---
|
||||
|
||||
## Current state
|
||||
|
||||
Both IMAP and POP3 reject **all** credentials with a hard-coded failure.
|
||||
No mailbox commands are implemented. An attacker that successfully guesses
|
||||
credentials (which they can't, ever) would have nothing to read anyway.
|
||||
|
||||
This is the biggest missed opportunity in the whole stack.
|
||||
|
||||
---
|
||||
|
||||
## Design
|
||||
|
||||
### Credential policy
|
||||
|
||||
Accept a configurable set of username/password pairs. Defaults baked into
|
||||
the image — typical attacker wordlist winners:
|
||||
|
||||
```
|
||||
admin / admin
|
||||
admin / password
|
||||
admin / 123456
|
||||
root / root
|
||||
mail / mail
|
||||
user / user
|
||||
```
|
||||
|
||||
Env var override: `IMAP_USERS=admin:admin,root:toor,user:letmein`
|
||||
|
||||
Wrong credentials → `NO [AUTHENTICATIONFAILED] Invalid credentials` (log the attempt).
|
||||
Right credentials → `OK` + full session.
|
||||
|
||||
### Fake mailboxes
|
||||
|
||||
One static mailbox tree, same for all users (honeypot doesn't need per-user isolation):
|
||||
|
||||
```
|
||||
INBOX (12 messages)
|
||||
Sent (8 messages)
|
||||
Drafts (1 message)
|
||||
Archive (3 messages)
|
||||
```
|
||||
|
||||
### Bait email content
|
||||
|
||||
Bait emails are seeded at startup from a `MAIL_SEED` list embedded in the server.
|
||||
Content is designed to reward the attacker for staying in the session:
|
||||
|
||||
**INBOX messages (selected)**
|
||||
|
||||
| # | From | Subject | Bait payload |
|
||||
|---|------|---------|-------------|
|
||||
| 1 | devops@company.internal | AWS credentials rotation | `AKIAIOSFODNN7EXAMPLE` / `wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY` |
|
||||
| 2 | monitoring@company.internal | DB password changed | `mysql://admin:Sup3rS3cr3t!@10.0.1.5:3306/production` |
|
||||
| 3 | noreply@github.com | Your personal access token | `ghp_16C7e42F292c6912E7710c838347Ae178B4a` |
|
||||
| 4 | admin@company.internal | VPN config attached | `vpn.company.internal:1194 user=vpnadmin pass=VpnP@ss2024` |
|
||||
| 5 | sysadmin@company.internal | Root password | New root pw: `r00tM3T00!` — change after first login |
|
||||
| 6 | backup@company.internal | Backup job failed | Backup to `192.168.1.50:/mnt/nas` — credentials in /etc/backup.conf |
|
||||
| 7 | alerts@company.internal | SSH brute-force alert | 47 attempts from 185.220.101.x against root — all blocked |
|
||||
|
||||
**Sent messages**
|
||||
|
||||
| # | To | Subject | Bait payload |
|
||||
|---|-----|---------|-------------|
|
||||
| 1 | vendor@external.com | API credentials | API key: `sk_live_xK3mF2...9aP` |
|
||||
| 2 | helpdesk@company.internal | Need access reset | My password is `Winter2024!` — please reset MFA |
|
||||
|
||||
**Drafts**
|
||||
|
||||
| # | Subject | Bait payload |
|
||||
|---|---------|-------------|
|
||||
| 1 | DO NOT SEND - k8s secrets | `kubectl get secret admin-token -n kube-system -o yaml` output pasted in |
|
||||
|
||||
---
|
||||
|
||||
## Protocol implementation
|
||||
|
||||
### IMAP4rev1 commands to implement
|
||||
|
||||
```
|
||||
CAPABILITY → * CAPABILITY IMAP4rev1 LITERAL+ SASL-IR LOGIN-REFERRALS ID ENABLE IDLE AUTH=PLAIN AUTH=LOGIN
|
||||
LOGIN → authenticate or reject
|
||||
SELECT → select INBOX / Sent / Drafts / Archive
|
||||
LIST → return folder tree
|
||||
LSUB → same as LIST (subscribed)
|
||||
STATUS → return EXISTS / RECENT / UNSEEN for a mailbox
|
||||
FETCH → return message headers or full body
|
||||
UID FETCH → same with UID addressing
|
||||
SEARCH → stub: return all UIDs (we don't need real search)
|
||||
EXAMINE → read-only SELECT
|
||||
CLOSE → deselect current mailbox
|
||||
LOGOUT → BYE + OK
|
||||
NOOP → OK
|
||||
```
|
||||
|
||||
Commands NOT needed (return `BAD`): `STORE`, `COPY`, `APPEND`, `EXPUNGE`.
|
||||
Attackers rarely run these. Logging `BAD` is fine if they do.
|
||||
|
||||
### Banner
|
||||
|
||||
Change from:
|
||||
```
|
||||
* OK [omega-decky] IMAP4rev1 Service Ready
|
||||
```
|
||||
To:
|
||||
```
|
||||
* OK Dovecot ready.
|
||||
```
|
||||
|
||||
nmap currently says "(unrecognized)". Dovecot banner makes it ID correctly.
|
||||
|
||||
### CAPABILITY advertisement
|
||||
|
||||
```
|
||||
* CAPABILITY IMAP4rev1 LITERAL+ SASL-IR LOGIN-REFERRALS ID ENABLE IDLE AUTH=PLAIN AUTH=LOGIN
|
||||
```
|
||||
|
||||
### SELECT response
|
||||
|
||||
```
|
||||
* 12 EXISTS
|
||||
* 0 RECENT
|
||||
* OK [UNSEEN 7] Message 7 is first unseen
|
||||
* OK [UIDVALIDITY 1712345678] UIDs valid
|
||||
* OK [UIDNEXT 13] Predicted next UID
|
||||
* FLAGS (\Answered \Flagged \Deleted \Seen \Draft)
|
||||
* OK [PERMANENTFLAGS (\Deleted \Seen \*)] Limited
|
||||
A3 OK [READ-WRITE] SELECT completed
|
||||
```
|
||||
|
||||
### FETCH envelope/body
|
||||
|
||||
Message structs stored as Python dataclasses at startup. `FETCH 1:* (FLAGS ENVELOPE)` returns
|
||||
envelope tuples in RFC 3501 format. `FETCH N BODY[]` returns the raw RFC 2822 message.
|
||||
|
||||
---
|
||||
|
||||
## POP3 parity
|
||||
|
||||
POP3 is much simpler. Same credential list. After successful PASS:
|
||||
|
||||
```
|
||||
STAT → +OK 12 48000 (12 messages, total ~48 KB)
|
||||
LIST → +OK 12 messages\r\n1 3912\r\n2 2048\r\n...\r\n.
|
||||
RETR N → +OK <size>\r\n<raw message>\r\n.
|
||||
TOP N L → +OK\r\n<first L body lines>\r\n.
|
||||
UIDL → +OK\r\n1 <uid>\r\n...\r\n.
|
||||
DELE N → +OK Message deleted (just log it, don't actually remove)
|
||||
CAPA → +OK\r\nTOP\r\nUSER\r\nUIDL\r\nRESP-CODES\r\nAUTH-RESP-CODE\r\nSASL\r\n.
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## State machine (IMAP)
|
||||
|
||||
```
|
||||
NOT_AUTHENTICATED
|
||||
→ LOGIN success → AUTHENTICATED
|
||||
→ LOGIN fail → NOT_AUTHENTICATED (log, stay open for retries)
|
||||
|
||||
AUTHENTICATED
|
||||
→ SELECT / EXAMINE → SELECTED
|
||||
→ LIST / LSUB / STATUS / LOGOUT / NOOP → stay AUTHENTICATED
|
||||
|
||||
SELECTED
|
||||
→ FETCH / UID FETCH / SEARCH / EXAMINE / SELECT → stay SELECTED
|
||||
→ CLOSE / LOGOUT → AUTHENTICATED or closed
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Files to change
|
||||
|
||||
| File | Change |
|
||||
|------|--------|
|
||||
| `templates/imap/server.py` | Full rewrite: state machine, credential check, mailbox commands, bait emails |
|
||||
| `templates/pop3/server.py` | Extend: credential check, STAT/LIST/RETR/UIDL/TOP/DELE/CAPA |
|
||||
| `tests/test_imap.py` | New: login flow, SELECT, FETCH, bad creds, all mailboxes |
|
||||
| `tests/test_pop3.py` | New: login flow, STAT, LIST, RETR, CAPA |
|
||||
|
||||
---
|
||||
|
||||
## Implementation notes
|
||||
|
||||
- All bait emails are hardcoded Python strings — no files to load, no I/O.
|
||||
- Use a module-level `MESSAGES: list[dict]` list with fields: `uid`, `flags`, `size`, `date`,
|
||||
`from_`, `to`, `subject`, `body` (full RFC 2822 string).
|
||||
- `_format_envelope()` builds the IMAP ENVELOPE tuple string from the message dict.
|
||||
- Thread safety: all state per-connection in the Protocol class. No shared mutable state.
|
||||
|
||||
---
|
||||
|
||||
## Env vars
|
||||
|
||||
| Var | Default | Description |
|
||||
|-----|---------|-------------|
|
||||
| `IMAP_USERS` | `admin:admin,root:root,mail:mail` | Accepted credentials (user:pass,...) |
|
||||
| `IMAP_BANNER` | `* OK Dovecot ready.` | Greeting line |
|
||||
| `NODE_NAME` | `mailserver` | Hostname in responses |
|
||||
|
||||
---
|
||||
|
||||
## Verification against live decky
|
||||
|
||||
```bash
|
||||
# Credential test (should accept)
|
||||
printf "A1 LOGIN admin admin\r\nA2 SELECT INBOX\r\nA3 FETCH 1:3 (FLAGS ENVELOPE)\r\nA4 FETCH 5 BODY[]\r\nA5 LOGOUT\r\n" | nc 192.168.1.200 143
|
||||
|
||||
# Credential test (should reject)
|
||||
printf "A1 LOGIN admin wrongpass\r\n" | nc 192.168.1.200 143
|
||||
|
||||
# nmap fingerprint check (expect "Dovecot imapd")
|
||||
nmap -p 143 -sV 192.168.1.200
|
||||
```
|
||||
@@ -54,3 +54,4 @@ This initial test doesn't seem to be working. Might be that I'm using WSL, so I
|
||||
|
||||
---
|
||||
|
||||
# End of Notes
|
||||
403
development/REALISM_AUDIT.md
Normal file
403
development/REALISM_AUDIT.md
Normal file
@@ -0,0 +1,403 @@
|
||||
# Service Realism Audit
|
||||
|
||||
> Live-tested against `192.168.1.200` (omega-decky, full-audit.ini).
|
||||
> Every result below is from an actual `nc` or `nmap` probe, not code reading.
|
||||
|
||||
---
|
||||
|
||||
## nmap -sV Summary
|
||||
|
||||
```
|
||||
21/tcp ftp vsftpd (before 2.0.8) or WU-FTPD ← WRONG: banner says "Twisted 25.5.0"
|
||||
23/tcp telnet (unrecognized — Cowrie)
|
||||
25/tcp smtp Postfix smtpd ✓
|
||||
80/tcp http Apache httpd 2.4.54 ((Debian)) ✓ BUT leaks Werkzeug
|
||||
110/tcp pop3 (unrecognized)
|
||||
143/tcp imap (unrecognized)
|
||||
389/tcp ldap Cisco LDAP server
|
||||
445/tcp microsoft-ds ✓
|
||||
1433/tcp ms-sql-s? (partially recognized)
|
||||
1883/tcp mqtt ✓
|
||||
2375/tcp docker Docker 24.0.5 ✓
|
||||
3306/tcp mysql MySQL 5.7.38-log ✓
|
||||
3389/tcp ms-wbt-server xrdp
|
||||
5060/tcp sip SIP endpoint; Status: 401 Unauthorized ✓
|
||||
5432/tcp postgresql? (partially recognized)
|
||||
5900/tcp vnc VNC protocol 3.8 ✓
|
||||
6379/tcp redis? (partially recognized)
|
||||
6443/tcp (unrecognized) — K8s not responding at all
|
||||
9200/tcp wap-wsp? (completely unrecognized — ES)
|
||||
27017/tcp mongod? (partially recognized)
|
||||
502/tcp CLOSED — Conpot Modbus not on this port
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Service-by-Service
|
||||
|
||||
---
|
||||
|
||||
### SMTP — port 25
|
||||
|
||||
**Probe:**
|
||||
```
|
||||
220 omega-decky ESMTP Postfix (Debian/GNU)
|
||||
250-PIPELINING / SIZE / VRFY / AUTH PLAIN LOGIN / ENHANCEDSTATUSCODES / 8BITMIME / DSN
|
||||
250 2.1.0 Ok ← MAIL FROM accepted
|
||||
250 2.1.5 Ok ← RCPT TO accepted for any domain ✓ (open relay bait)
|
||||
354 End data with... ← DATA opened
|
||||
502 5.5.2 Error: command not recognized ← BUG: each message line fails
|
||||
221 2.0.0 Bye
|
||||
```
|
||||
|
||||
**Verdict:** Banner and EHLO are perfect. DATA handler is broken — server reads the socket line-by-line but the asyncio handler dispatches each line as a new command instead of buffering until `.\r\n`. The result is every line of the email body gets a 502 and the message is silently dropped.
|
||||
|
||||
**Fixes needed:**
|
||||
- Buffer DATA state until `\r\n.\r\n` terminator
|
||||
- Return `250 2.0.0 Ok: queued as <8-hex-id>` after message accepted
|
||||
- Don't require AUTH for relay (open relay is the point)
|
||||
- Optionally: store message content so IMAP can serve it later
|
||||
|
||||
---
|
||||
|
||||
### IMAP — port 143
|
||||
|
||||
**Probe:**
|
||||
```
|
||||
* OK [omega-decky] IMAP4rev1 Service Ready
|
||||
A1 OK CAPABILITY completed
|
||||
A2 NO [AUTHENTICATIONFAILED] Invalid credentials ← always, for any user/pass
|
||||
A3 BAD Command not recognized ← LIST, SELECT, FETCH all unknown
|
||||
```
|
||||
|
||||
**Verdict:** Login always fails. No mailbox commands implemented. An attacker who tries credential stuffing or default passwords (admin/admin, root/root) gets nothing and moves on. This is the biggest missed opportunity in the whole stack.
|
||||
|
||||
**Fixes needed:**
|
||||
- Accept configurable credentials (default `admin`/`admin` or pulled from persona config)
|
||||
- Implement: SELECT, LIST, FETCH, UID FETCH, SEARCH, LOGOUT
|
||||
- Serve seeded fake mailboxes with bait content (see IMAP_BAIT.md)
|
||||
- CAPABILITY should advertise `LITERAL+`, `SASL-IR`, `LOGIN-REFERRALS`, `ID`, `ENABLE`, `IDLE`
|
||||
- Banner should hint at Dovecot: `* OK Dovecot ready.`
|
||||
|
||||
---
|
||||
|
||||
### POP3 — port 110
|
||||
|
||||
**Probe:**
|
||||
```
|
||||
+OK omega-decky POP3 server ready
|
||||
+OK ← USER accepted
|
||||
-ERR Authentication failed ← always
|
||||
-ERR Unknown command ← STAT, LIST, RETR all unknown
|
||||
```
|
||||
|
||||
**Verdict:** Same problem as IMAP. CAPA only returns `USER`. Should be paired with IMAP fix to serve the same fake mailbox.
|
||||
|
||||
**Fixes needed:**
|
||||
- Accept same credentials as IMAP
|
||||
- Implement: STAT, LIST, RETR, DELE, TOP, UIDL, CAPA
|
||||
- CAPA should return: `TOP UIDL RESP-CODES AUTH-RESP-CODE SASL USER`
|
||||
|
||||
---
|
||||
|
||||
### HTTP — port 80
|
||||
|
||||
**Probe:**
|
||||
```
|
||||
HTTP/1.1 403 FORBIDDEN
|
||||
Server: Werkzeug/3.1.8 Python/3.11.2 ← DEAD GIVEAWAY
|
||||
Server: Apache/2.4.54 (Debian) ← duplicate Server header
|
||||
```
|
||||
|
||||
**Verdict:** nmap gets the Apache fingerprint right, but any attacker who looks at response headers sees two `Server:` headers — one of which is clearly Werkzeug/Flask. The HTTP body is also a bare `<h1>403 Forbidden</h1>` with no Apache default page styling.
|
||||
|
||||
**Fixes needed:**
|
||||
- Strip Werkzeug from Server header (set `SERVER_NAME` on the Flask app or use middleware to overwrite)
|
||||
- Apache default 403 page should be the actual Apache HTML, not a bare `<h1>` tag
|
||||
- Per-path routing for fake apps: `/wp-login.php`, `/wp-admin/`, `/xmlrpc.php`, etc.
|
||||
- POST credential capture on login endpoints
|
||||
|
||||
---
|
||||
|
||||
### FTP — port 21
|
||||
|
||||
**Probe:**
|
||||
```
|
||||
220 Twisted 25.5.0 FTP Server ← terrible: exposes framework
|
||||
331 Guest login ok...
|
||||
550 Requested action not taken ← after login, nothing works
|
||||
503 Incorrect sequence of commands: must send PORT or PASV before RETR
|
||||
```
|
||||
|
||||
**Verdict:** Banner immediately identifies this as Twisted's built-in FTP server. No directory listing. PASV mode not implemented so clients hang. Real FTP honeypots should expose anonymous access with a fake directory tree containing interesting-sounding files.
|
||||
|
||||
**Fixes needed:**
|
||||
- Override banner to: `220 (vsFTPd 3.0.3)` or similar
|
||||
- Implement anonymous login (no password required)
|
||||
- Implement PASV and at minimum LIST — return a fake directory with files: `backup.tar.gz`, `db_dump.sql`, `config.ini`, `credentials.txt`
|
||||
- Log any RETR attempts (file name, client IP)
|
||||
|
||||
---
|
||||
|
||||
### MySQL — port 3306
|
||||
|
||||
**Probe:**
|
||||
```
|
||||
HANDSHAKE: ...5.7.38-log...
|
||||
Version: 5.7.38-log
|
||||
```
|
||||
|
||||
**Verdict:** Handshake is excellent. nmap fingerprints it perfectly. Always returns `Access denied` which is correct behavior. The only issue is the hardcoded auth plugin data bytes in the greeting — a sophisticated scanner could detect the static challenge.
|
||||
|
||||
**Fixes needed (low priority):**
|
||||
- Randomize the 20-byte auth plugin data per connection
|
||||
|
||||
---
|
||||
|
||||
### PostgreSQL — port 5432
|
||||
|
||||
**Probe:**
|
||||
```
|
||||
R\x00\x00\x00\x0c\x00\x00\x00\x05\xde\xad\xbe\xef
|
||||
```
|
||||
That's `AuthenticationMD5Password` (type=5) with salt `0xdeadbeef`.
|
||||
|
||||
**Verdict:** Correct protocol response. Salt is hardcoded and static — `deadbeef` is trivially identifiable as fake.
|
||||
|
||||
**Fixes needed (low priority):**
|
||||
- Randomize the 4-byte MD5 salt per connection
|
||||
|
||||
---
|
||||
|
||||
### MSSQL — port 1433
|
||||
|
||||
**Probe:** No response to standard TDS pre-login packets. Server drops connection immediately.
|
||||
|
||||
**Verdict:** Broken. TDS pre-login handler is likely mismatching the packet format we sent.
|
||||
|
||||
**Fixes needed:**
|
||||
- Debug TDS pre-login response — currently silent
|
||||
- Verify the hardcoded TDS response bytes are valid
|
||||
|
||||
---
|
||||
|
||||
### Redis — port 6379
|
||||
|
||||
**Probe:**
|
||||
```
|
||||
+OK ← AUTH accepted (any password!)
|
||||
$150
|
||||
redis_version:7.2.7 / os:Linux 5.15.0 / uptime_in_seconds:864000 ...
|
||||
*0 ← KEYS * returns empty
|
||||
```
|
||||
|
||||
**Verdict:** Accepts any AUTH password (intentional for bait). INFO looks real. But `KEYS *` returns nothing — a real Redis exposed to the internet always has data. An attacker who gets `+OK` on AUTH will immediately run `KEYS *` or `SCAN 0` and leave when they find nothing.
|
||||
|
||||
**Fixes needed:**
|
||||
- Add fake key-value store: session tokens, JWT secrets, cached user objects, API keys
|
||||
- `KEYS *` → `["sessions:user:1234", "cache:api_key", "jwt:secret", "user:admin"]`
|
||||
- `GET sessions:user:1234` → JSON user object with credentials
|
||||
- `GET jwt:secret` → a plausible JWT signing key
|
||||
|
||||
---
|
||||
|
||||
### MongoDB — port 27017
|
||||
|
||||
**Probe:** No response to OP_MSG `isMaster` command.
|
||||
|
||||
**Verdict:** Broken or rejecting the wire protocol format we sent.
|
||||
|
||||
**Fixes needed:**
|
||||
- Debug the OP_MSG/OP_QUERY handler
|
||||
|
||||
---
|
||||
|
||||
### Elasticsearch — port 9200
|
||||
|
||||
**Probe:**
|
||||
```json
|
||||
{"name":"omega-decky","cluster_uuid":"xC3Pr9abTq2mNkOeLvXwYA","version":{"number":"7.17.9",...}}
|
||||
/_cat/indices → [] ← empty: dead giveaway
|
||||
```
|
||||
|
||||
**Verdict:** Root response is convincing. But `/_cat/indices` returns an empty array — a real exposed ES instance has indices. nmap doesn't recognize port 9200 as Elasticsearch at all ("wap-wsp?").
|
||||
|
||||
**Fixes needed:**
|
||||
- Add fake indices: `logs-2024.01`, `users`, `products`, `audit_trail`
|
||||
- `/_cat/indices` → return rows with doc counts, sizes
|
||||
- `/_search` on those indices → return sample documents (bait data: user records, API tokens)
|
||||
|
||||
---
|
||||
|
||||
### Docker API — port 2375
|
||||
|
||||
**Probe:**
|
||||
```json
|
||||
/version → {Version: "24.0.5", ApiVersion: "1.43", GoVersion: "go1.20.6", ...} ✓
|
||||
/containers/json → [{"Id":"a1b2c3d4e5f6","Names":["/webapp"],"Image":"nginx:latest",...}]
|
||||
```
|
||||
|
||||
**Verdict:** Version response is perfect. Container list is minimal (one hardcoded container). No `/images/json` data, no exec endpoint. An attacker will immediately try `POST /containers/webapp/exec` to get RCE.
|
||||
|
||||
**Fixes needed:**
|
||||
- Add 3-5 containers with realistic names/images: `db` (postgres:14), `api` (node:18-alpine), `redis` (redis:7)
|
||||
- Add `/images/json` with corresponding images
|
||||
- Add exec endpoint that captures the command and returns `{"Id":"<random>"}` then a fake stream
|
||||
|
||||
---
|
||||
|
||||
### SMB — port 445
|
||||
|
||||
**Probe:** SMB1 negotiate response received (standard `\xff\x53\x4d\x42r` header).
|
||||
|
||||
**Verdict:** Impacket SimpleSMBServer responds. nmap IDs it as `microsoft-ds`. Functional enough for credential capture.
|
||||
|
||||
---
|
||||
|
||||
### VNC — port 5900
|
||||
|
||||
**Probe:**
|
||||
```
|
||||
RFB 003.008 ✓
|
||||
```
|
||||
|
||||
**Verdict:** Correct RFB 3.8 handshake. nmap fingerprints it as VNC protocol 3.8. The 16-byte DES challenge is hardcoded — same bytes every time.
|
||||
|
||||
**Fixes needed (trivial):**
|
||||
- Randomize the 16-byte challenge per connection (`os.urandom(16)`)
|
||||
|
||||
---
|
||||
|
||||
### RDP — port 3389
|
||||
|
||||
**Probe:**
|
||||
```
|
||||
0300000b06d00000000000 ← X.224 Connection Confirm
|
||||
(connection closed)
|
||||
```
|
||||
|
||||
**Verdict:** nmap identifies it as "xrdp" which is correct enough. The X.224 CC is fine. But the server closes immediately after — no NLA/CredSSP negotiation, no credential capture. This is the single biggest missed opportunity for credential harvesting after SSH.
|
||||
|
||||
**Fixes needed:**
|
||||
- Implement NTLM Type-1/Type-2/Type-3 exchange to capture NTLMv2 hashes
|
||||
- Alternatively: send a fake TLS certificate then disconnect (many scanners fingerprint by the cert)
|
||||
|
||||
---
|
||||
|
||||
### SIP — port 5060
|
||||
|
||||
**Probe:**
|
||||
```
|
||||
SIP/2.0 401 Unauthorized
|
||||
WWW-Authenticate: Digest realm="omega-decky", nonce="decnet0000", algorithm=MD5
|
||||
```
|
||||
|
||||
**Verdict:** Functional. Correctly challenges with 401. But `nonce="decnet0000"` is a hardcoded string — a Shodan signature would immediately pick this up.
|
||||
|
||||
**Fixes needed (low effort):**
|
||||
- Generate a random hex nonce per connection
|
||||
|
||||
---
|
||||
|
||||
### MQTT — port 1883
|
||||
|
||||
**Probe:** `CONNACK` with return code `0x05` (not authorized).
|
||||
|
||||
**Verdict:** Rejects all connections. For an ICS/water-plant persona, this should accept connections and expose fake sensor topics. See `ICS_SCADA.md`.
|
||||
|
||||
**Fixes needed:**
|
||||
- Return CONNACK 0x00 (accepted)
|
||||
- Implement SUBSCRIBE: return retained sensor readings for bait topics
|
||||
- Implement PUBLISH: log any published commands (attacker trying to control plant)
|
||||
|
||||
---
|
||||
|
||||
### SNMP — port 161/UDP
|
||||
|
||||
Not directly testable without sudo for raw UDP send, but code review shows BER encoding is correct.
|
||||
|
||||
**Verdict:** Functional. sysDescr is a generic Linux string — should be tuned per archetype.
|
||||
|
||||
---
|
||||
|
||||
### LDAP — port 389
|
||||
|
||||
**Probe:** BER response received (code 49 = invalidCredentials).
|
||||
|
||||
**Verdict:** Correct protocol. nmap IDs it as "Cisco LDAP server" which is fine. No rootDSE response for unauthenticated enumeration.
|
||||
|
||||
---
|
||||
|
||||
### Telnet — port 23 (Cowrie)
|
||||
|
||||
**Probe:**
|
||||
```
|
||||
login: <IAC WILL ECHO>
|
||||
Password:
|
||||
Login incorrect ← for all tried credentials
|
||||
```
|
||||
|
||||
**Verdict:** Cowrie is running but rejecting everything. Default Cowrie credentials (root/1234, admin/admin, etc.) should work. May be a config issue with the decky hostname or user database.
|
||||
|
||||
---
|
||||
|
||||
### Conpot — port 502
|
||||
|
||||
**Verdict:** Not responding on port 502 (Modbus TCP). Conpot may use a different internal port that gets NAT'd, or it's not configured for Modbus. Needs investigation.
|
||||
|
||||
---
|
||||
|
||||
## Bug Ledger
|
||||
|
||||
| # | Service | Bug | Severity |
|
||||
|---|------------|-------------------------------------------|----------|
|
||||
| 1 | SMTP | DATA handler returns 502 for every line | Critical |
|
||||
| 2 | HTTP | Werkzeug in Server header + bare 403 body | High |
|
||||
| 3 | FTP | "Twisted 25.5.0" in banner | High |
|
||||
| 4 | MSSQL | No response to TDS pre-login | High |
|
||||
| 5 | MongoDB | No response to OP_MSG isMaster | High |
|
||||
| 6 | K8s | Not responding (TLS setup?) | Medium |
|
||||
| 7 | IMAP/POP3 | Always rejects, no mailbox ops | Critical (feature gap) |
|
||||
| 8 | Redis | Empty keyspace after AUTH success | Medium |
|
||||
| 9 | SIP/VNC | Hardcoded nonce/challenge | Low |
|
||||
| 10| MQTT | Rejects all connections | High (ICS feature gap) |
|
||||
| 11| Conpot | No Modbus response | Medium |
|
||||
| 12| PostgreSQL | Hardcoded salt `deadbeef` | Low |
|
||||
|
||||
---
|
||||
|
||||
## Related Plans
|
||||
|
||||
- [`SMTP_RELAY.md`](SMTP_RELAY.md) — Fix DATA handler, implement open relay persona
|
||||
- [`IMAP_BAIT.md`](IMAP_BAIT.md) — Auth + seeded mailboxes + POP3 parity
|
||||
- [`ICS_SCADA.md`](ICS_SCADA.md) — MQTT water plant, SNMP tuning, Conpot
|
||||
- [`BUG_FIXES.md`](BUG_FIXES.md) — HTTP header leak, FTP banner, MSSQL, MongoDB, Redis keys
|
||||
|
||||
---
|
||||
|
||||
## Progress Updates
|
||||
|
||||
### [2026-04-10] ICS/SCADA & IMAP Bait Completion
|
||||
The following infrastructure gaps from the Bug Ledger have been successfully resolved:
|
||||
* **#7 (IMAP/POP3):** Both services now implement full protocol state machines (authentication, selection/transactions, fetching) and serve realistic hardcoded bait payloads (AWS keys, DB passwords).
|
||||
* **#10 (MQTT):** The service now issues successful `CONNACK` responses, presents interactive persona-driven topic trees, and logs attacker `PUBLISH` events.
|
||||
* **#11 (Conpot):** Wrapped in a custom build context that correctly binds Modbus to port `502` using a temporary template overwrite, resolving the missing Modbus response issue.
|
||||
|
||||
---
|
||||
|
||||
## Implementation Plan
|
||||
|
||||
### Phase 3: Critical SMTP Data Handling (P0)
|
||||
- **SMTP (`SMTP_RELAY.md`)**: Rewrite `templates/smtp/server.py` to buffer `DATA` blocks properly and respond to `DATA` termination with a legitimate `250 OK` queue ID. Accept all open relay behavior inherently without mandating `AUTH`.
|
||||
|
||||
### Phase 4: High-Severity Protocol Fingerprint Fixes (P1)
|
||||
- **HTTP**: Hijack Flask `after_request` to enforce the Apache `Server` header in `templates/http/server.py`. Rewrite the 403 response body with authentic Apache HTML.
|
||||
- **FTP**: Update `templates/ftp/server.py` to overwrite Twisted FTP greeting banner to `vsFTPd`. Implement `FTPAnonymousShell` to serve fake files (tarball, db dump, credentials).
|
||||
- **MSSQL**: Update `templates/mssql/server.py` to emit a valid length-fixed TDS 7.x pre-login payload to successfully pass the nmap probe.
|
||||
- **MongoDB**: Update `templates/mongodb/server.py` to respond to the `OP_MSG isMaster` requests generated by modern `nmap` and MongoDB clients.
|
||||
|
||||
### Phase 5: State & Realism Improvements (P2)
|
||||
- **Redis**: Instantiate `_FAKE_STORE` dict with bait authentication tokens and JWT salts in `templates/redis/server.py` to return plausible data for `KEYS *`, `GET`, `SCAN`, etc.
|
||||
- **Dynamic Nonces (SIP/VNC/Postgres)**: Use `os.urandom()` and `secrets` to dynamically generate salts/nonces per connection instead of hardcoded strings in `templates/postgres/server.py`, `templates/sip/server.py`, and `templates/vnc/server.py`.
|
||||
- **K8s (Kubernetes API)**: Investigate TLS setup block for K8s API port `6443` dropping traffic, pending an actual solution (requires deeper analysis and likely a separate plan).
|
||||
|
||||
195
development/SMTP_RELAY.md
Normal file
195
development/SMTP_RELAY.md
Normal file
@@ -0,0 +1,195 @@
|
||||
# SMTP Open Relay — Plan
|
||||
|
||||
> Priority: **P0** — DATA handler is broken (502 on every body line).
|
||||
> Scenario: attacker finds open relay, sends mail through it.
|
||||
|
||||
---
|
||||
|
||||
## What's broken today
|
||||
|
||||
`templates/smtp/server.py` sends `354 End data with <CR><LF>.<CR><LF>` on `DATA`, then
|
||||
falls back to `_handle_line()` for every subsequent line. Because those lines don't start
|
||||
with a recognized SMTP verb, every line gets:
|
||||
|
||||
```
|
||||
502 5.5.2 Error: command not recognized
|
||||
```
|
||||
|
||||
The session never completes. The message is silently dropped.
|
||||
|
||||
---
|
||||
|
||||
## Fix: DATA state machine
|
||||
|
||||
Add a `_in_data` flag. Once `DATA` is received, accumulate raw body lines until the
|
||||
terminator `\r\n.\r\n`. On terminator: log the message, return `250`, flip flag back.
|
||||
|
||||
### State variables added to `SMTPProtocol.__init__`
|
||||
|
||||
```python
|
||||
self._in_data = False
|
||||
self._data_buf = [] # accumulate body lines
|
||||
self._mail_from = ""
|
||||
self._rcpt_to = []
|
||||
```
|
||||
|
||||
### Modified `data_received`
|
||||
|
||||
No change — still splits on `\r\n`.
|
||||
|
||||
### Modified `_handle_line`
|
||||
|
||||
```python
|
||||
def _handle_line(self, line: str) -> None:
|
||||
# DATA body accumulation mode
|
||||
if self._in_data:
|
||||
if line == ".":
|
||||
# end of message
|
||||
body = "\r\n".join(self._data_buf)
|
||||
msg_id = _rand_msg_id()
|
||||
_log("message_accepted",
|
||||
src=self._peer[0],
|
||||
mail_from=self._mail_from,
|
||||
rcpt_to=",".join(self._rcpt_to),
|
||||
body_bytes=len(body),
|
||||
msg_id=msg_id)
|
||||
self._transport.write(f"250 2.0.0 Ok: queued as {msg_id}\r\n".encode())
|
||||
self._in_data = False
|
||||
self._data_buf = []
|
||||
else:
|
||||
# RFC 5321 dot-stuffing: leading dot means literal dot, strip it
|
||||
self._data_buf.append(line[1:] if line.startswith("..") else line)
|
||||
return
|
||||
|
||||
cmd = line.split()[0].upper() if line.split() else ""
|
||||
# ... existing handlers ...
|
||||
elif cmd == "MAIL":
|
||||
self._mail_from = line.split(":", 1)[1].strip() if ":" in line else line
|
||||
_log("mail_from", src=self._peer[0], value=self._mail_from)
|
||||
self._transport.write(b"250 2.0.0 Ok\r\n")
|
||||
elif cmd == "RCPT":
|
||||
rcpt = line.split(":", 1)[1].strip() if ":" in line else line
|
||||
self._rcpt_to.append(rcpt)
|
||||
_log("rcpt_to", src=self._peer[0], value=rcpt)
|
||||
self._transport.write(b"250 2.1.5 Ok\r\n")
|
||||
elif cmd == "DATA":
|
||||
if not self._mail_from or not self._rcpt_to:
|
||||
self._transport.write(b"503 5.5.1 Error: need MAIL command\r\n")
|
||||
else:
|
||||
self._in_data = True
|
||||
self._transport.write(b"354 End data with <CR><LF>.<CR><LF>\r\n")
|
||||
elif cmd == "RSET":
|
||||
self._mail_from = ""
|
||||
self._rcpt_to = []
|
||||
self._in_data = False
|
||||
self._data_buf = []
|
||||
self._transport.write(b"250 2.0.0 Ok\r\n")
|
||||
```
|
||||
|
||||
### Helper
|
||||
|
||||
```python
|
||||
import random, string
|
||||
|
||||
def _rand_msg_id() -> str:
|
||||
"""Return a Postfix-style 12-char hex queue ID."""
|
||||
return "".join(random.choices("0123456789ABCDEF", k=12))
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Open relay behavior
|
||||
|
||||
The current server already returns `250 2.1.5 Ok` for any `RCPT TO` regardless of domain.
|
||||
That's correct — do NOT gate on the domain. The attacker's goal is to relay spam. We let
|
||||
them "succeed" and log everything.
|
||||
|
||||
Remove the `AUTH` rejection + close. An open relay doesn't require authentication. Replace:
|
||||
|
||||
```python
|
||||
elif cmd == "AUTH":
|
||||
_log("auth_attempt", src=self._peer[0], command=line)
|
||||
self._transport.write(b"535 5.7.8 Error: authentication failed: ...\r\n")
|
||||
self._transport.close()
|
||||
```
|
||||
|
||||
With:
|
||||
|
||||
```python
|
||||
elif cmd == "AUTH":
|
||||
# Log the attempt but advertise that auth succeeds (open relay bait)
|
||||
_log("auth_attempt", src=self._peer[0], command=line)
|
||||
self._transport.write(b"235 2.7.0 Authentication successful\r\n")
|
||||
```
|
||||
|
||||
Some scanners probe AUTH before DATA. Accepting it keeps them engaged.
|
||||
|
||||
---
|
||||
|
||||
## Banner / persona
|
||||
|
||||
Current banner is already perfect: `220 omega-decky ESMTP Postfix (Debian/GNU)`.
|
||||
|
||||
The `SMTP_BANNER` env var lets per-decky customization happen at deploy time via the
|
||||
persona config — no code change needed.
|
||||
|
||||
---
|
||||
|
||||
## Log events emitted
|
||||
|
||||
| event_type | Fields |
|
||||
|------------------|---------------------------------------------------|
|
||||
| `connect` | src, src_port |
|
||||
| `ehlo` | src, domain |
|
||||
| `auth_attempt` | src, command |
|
||||
| `mail_from` | src, value |
|
||||
| `rcpt_to` | src, value (one event per recipient) |
|
||||
| `message_accepted` | src, mail_from, rcpt_to, body_bytes, msg_id |
|
||||
| `disconnect` | src |
|
||||
|
||||
---
|
||||
|
||||
## Files to change
|
||||
|
||||
| File | Change |
|
||||
|------|--------|
|
||||
| `templates/smtp/server.py` | DATA state machine, open relay AUTH accept, RSET fix |
|
||||
| `tests/test_smtp.py` | New: DATA → 250 flow, multi-recipient, dot-stuffing, RSET |
|
||||
|
||||
---
|
||||
|
||||
## Test cases (pytest)
|
||||
|
||||
```python
|
||||
# full send flow
|
||||
conn → EHLO → MAIL FROM → RCPT TO → DATA → body lines → "." → 250 2.0.0 Ok: queued as ...
|
||||
|
||||
# multi-recipient
|
||||
RCPT TO x3 → DATA → body → "." → 250
|
||||
|
||||
# dot-stuffing
|
||||
..real dot → body line stored as ".real dot"
|
||||
|
||||
# RSET mid-session
|
||||
MAIL FROM → RCPT TO → RSET → assert _mail_from == "" and _rcpt_to == []
|
||||
|
||||
# AUTH accept
|
||||
AUTH PLAIN base64 → 235
|
||||
|
||||
# 503 if DATA before MAIL
|
||||
DATA (no prior MAIL) → 503
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Verification against live decky
|
||||
|
||||
```bash
|
||||
# Full relay test
|
||||
printf "EHLO test.com\r\nMAIL FROM:<hacker@evil.com>\r\nRCPT TO:<admin@target.com>\r\nDATA\r\nSubject: hello\r\n\r\nBody line 1\r\nBody line 2\r\n.\r\nQUIT\r\n" | nc 192.168.1.200 25
|
||||
|
||||
# Expected final lines:
|
||||
# 354 End data with ...
|
||||
# 250 2.0.0 Ok: queued as <ID>
|
||||
# 221 2.0.0 Bye
|
||||
```
|
||||
419
development/ast_graph.md
Normal file
419
development/ast_graph.md
Normal file
@@ -0,0 +1,419 @@
|
||||
# DECNET Codebase AST Graph
|
||||
|
||||
This diagram shows the structural organization of the DECNET project, extracted directly from the Python Abstract Syntax Tree (AST). It includes modules (prefixed with `Module_`), their internal functions, and the classes and methods they contain.
|
||||
|
||||
```mermaid
|
||||
classDiagram
|
||||
class Module_distros {
|
||||
+random_hostname()
|
||||
+get_distro()
|
||||
+random_distro()
|
||||
+all_distros()
|
||||
}
|
||||
class distros_DistroProfile {
|
||||
}
|
||||
Module_distros ..> distros_DistroProfile : contains
|
||||
|
||||
class custom_service_CustomService {
|
||||
+__init__()
|
||||
+compose_fragment()
|
||||
+dockerfile_context()
|
||||
}
|
||||
Module_custom_service ..> custom_service_CustomService : contains
|
||||
class Module_os_fingerprint {
|
||||
+get_os_sysctls()
|
||||
+all_os_families()
|
||||
}
|
||||
|
||||
class Module_network {
|
||||
+_run()
|
||||
+detect_interface()
|
||||
+detect_subnet()
|
||||
+get_host_ip()
|
||||
+allocate_ips()
|
||||
+create_macvlan_network()
|
||||
+create_ipvlan_network()
|
||||
+remove_macvlan_network()
|
||||
+_require_root()
|
||||
+setup_host_macvlan()
|
||||
+teardown_host_macvlan()
|
||||
+setup_host_ipvlan()
|
||||
+teardown_host_ipvlan()
|
||||
+ips_to_range()
|
||||
}
|
||||
|
||||
class Module_env {
|
||||
+_port()
|
||||
+_require_env()
|
||||
}
|
||||
|
||||
class Module_config {
|
||||
+random_hostname()
|
||||
+save_state()
|
||||
+load_state()
|
||||
+clear_state()
|
||||
}
|
||||
class config_DeckyConfig {
|
||||
+services_not_empty()
|
||||
}
|
||||
Module_config ..> config_DeckyConfig : contains
|
||||
class config_DecnetConfig {
|
||||
}
|
||||
Module_config ..> config_DecnetConfig : contains
|
||||
class Module_ini_loader {
|
||||
+load_ini()
|
||||
+load_ini_from_string()
|
||||
+validate_ini_string()
|
||||
+_parse_configparser()
|
||||
}
|
||||
class ini_loader_DeckySpec {
|
||||
}
|
||||
Module_ini_loader ..> ini_loader_DeckySpec : contains
|
||||
class ini_loader_CustomServiceSpec {
|
||||
}
|
||||
Module_ini_loader ..> ini_loader_CustomServiceSpec : contains
|
||||
class ini_loader_IniConfig {
|
||||
}
|
||||
Module_ini_loader ..> ini_loader_IniConfig : contains
|
||||
class Module_composer {
|
||||
+generate_compose()
|
||||
+write_compose()
|
||||
}
|
||||
|
||||
class Module_archetypes {
|
||||
+get_archetype()
|
||||
+all_archetypes()
|
||||
+random_archetype()
|
||||
}
|
||||
class archetypes_Archetype {
|
||||
}
|
||||
Module_archetypes ..> archetypes_Archetype : contains
|
||||
class Module_fleet {
|
||||
+all_service_names()
|
||||
+resolve_distros()
|
||||
+build_deckies()
|
||||
+build_deckies_from_ini()
|
||||
}
|
||||
|
||||
class Module_cli {
|
||||
+_kill_api()
|
||||
+api()
|
||||
+deploy()
|
||||
+collect()
|
||||
+mutate()
|
||||
+status()
|
||||
+teardown()
|
||||
+list_services()
|
||||
+list_distros()
|
||||
+correlate()
|
||||
+list_archetypes()
|
||||
+serve_web()
|
||||
}
|
||||
|
||||
|
||||
class services_base_BaseService {
|
||||
+compose_fragment()
|
||||
+dockerfile_context()
|
||||
}
|
||||
Module_services_base ..> services_base_BaseService : contains
|
||||
|
||||
class services_http_HTTPService {
|
||||
+compose_fragment()
|
||||
+dockerfile_context()
|
||||
}
|
||||
Module_services_http ..> services_http_HTTPService : contains
|
||||
|
||||
class services_smtp_SMTPService {
|
||||
+compose_fragment()
|
||||
+dockerfile_context()
|
||||
}
|
||||
Module_services_smtp ..> services_smtp_SMTPService : contains
|
||||
|
||||
class services_mysql_MySQLService {
|
||||
+compose_fragment()
|
||||
+dockerfile_context()
|
||||
}
|
||||
Module_services_mysql ..> services_mysql_MySQLService : contains
|
||||
|
||||
class services_redis_RedisService {
|
||||
+compose_fragment()
|
||||
+dockerfile_context()
|
||||
}
|
||||
Module_services_redis ..> services_redis_RedisService : contains
|
||||
|
||||
class services_elasticsearch_ElasticsearchService {
|
||||
+compose_fragment()
|
||||
+dockerfile_context()
|
||||
}
|
||||
Module_services_elasticsearch ..> services_elasticsearch_ElasticsearchService : contains
|
||||
|
||||
class services_ftp_FTPService {
|
||||
+compose_fragment()
|
||||
+dockerfile_context()
|
||||
}
|
||||
Module_services_ftp ..> services_ftp_FTPService : contains
|
||||
|
||||
class services_imap_IMAPService {
|
||||
+compose_fragment()
|
||||
+dockerfile_context()
|
||||
}
|
||||
Module_services_imap ..> services_imap_IMAPService : contains
|
||||
|
||||
class services_k8s_KubernetesAPIService {
|
||||
+compose_fragment()
|
||||
+dockerfile_context()
|
||||
}
|
||||
Module_services_k8s ..> services_k8s_KubernetesAPIService : contains
|
||||
|
||||
class services_ldap_LDAPService {
|
||||
+compose_fragment()
|
||||
+dockerfile_context()
|
||||
}
|
||||
Module_services_ldap ..> services_ldap_LDAPService : contains
|
||||
|
||||
class services_llmnr_LLMNRService {
|
||||
+compose_fragment()
|
||||
+dockerfile_context()
|
||||
}
|
||||
Module_services_llmnr ..> services_llmnr_LLMNRService : contains
|
||||
|
||||
class services_mongodb_MongoDBService {
|
||||
+compose_fragment()
|
||||
+dockerfile_context()
|
||||
}
|
||||
Module_services_mongodb ..> services_mongodb_MongoDBService : contains
|
||||
|
||||
class services_mqtt_MQTTService {
|
||||
+compose_fragment()
|
||||
+dockerfile_context()
|
||||
}
|
||||
Module_services_mqtt ..> services_mqtt_MQTTService : contains
|
||||
|
||||
class services_mssql_MSSQLService {
|
||||
+compose_fragment()
|
||||
+dockerfile_context()
|
||||
}
|
||||
Module_services_mssql ..> services_mssql_MSSQLService : contains
|
||||
|
||||
class services_pop3_POP3Service {
|
||||
+compose_fragment()
|
||||
+dockerfile_context()
|
||||
}
|
||||
Module_services_pop3 ..> services_pop3_POP3Service : contains
|
||||
|
||||
class services_postgres_PostgresService {
|
||||
+compose_fragment()
|
||||
+dockerfile_context()
|
||||
}
|
||||
Module_services_postgres ..> services_postgres_PostgresService : contains
|
||||
|
||||
class services_rdp_RDPService {
|
||||
+compose_fragment()
|
||||
+dockerfile_context()
|
||||
}
|
||||
Module_services_rdp ..> services_rdp_RDPService : contains
|
||||
|
||||
class services_sip_SIPService {
|
||||
+compose_fragment()
|
||||
+dockerfile_context()
|
||||
}
|
||||
Module_services_sip ..> services_sip_SIPService : contains
|
||||
|
||||
class services_smb_SMBService {
|
||||
+compose_fragment()
|
||||
+dockerfile_context()
|
||||
}
|
||||
Module_services_smb ..> services_smb_SMBService : contains
|
||||
|
||||
class services_snmp_SNMPService {
|
||||
+compose_fragment()
|
||||
+dockerfile_context()
|
||||
}
|
||||
Module_services_snmp ..> services_snmp_SNMPService : contains
|
||||
|
||||
class services_tftp_TFTPService {
|
||||
+compose_fragment()
|
||||
+dockerfile_context()
|
||||
}
|
||||
Module_services_tftp ..> services_tftp_TFTPService : contains
|
||||
|
||||
class services_vnc_VNCService {
|
||||
+compose_fragment()
|
||||
+dockerfile_context()
|
||||
}
|
||||
Module_services_vnc ..> services_vnc_VNCService : contains
|
||||
|
||||
class services_docker_api_DockerAPIService {
|
||||
+compose_fragment()
|
||||
+dockerfile_context()
|
||||
}
|
||||
Module_services_docker_api ..> services_docker_api_DockerAPIService : contains
|
||||
class Module_services_registry {
|
||||
+_load_plugins()
|
||||
+register_custom_service()
|
||||
+get_service()
|
||||
+all_services()
|
||||
}
|
||||
|
||||
|
||||
class services_smtp_relay_SMTPRelayService {
|
||||
+compose_fragment()
|
||||
+dockerfile_context()
|
||||
}
|
||||
Module_services_smtp_relay ..> services_smtp_relay_SMTPRelayService : contains
|
||||
|
||||
class services_conpot_ConpotService {
|
||||
+compose_fragment()
|
||||
+dockerfile_context()
|
||||
}
|
||||
Module_services_conpot ..> services_conpot_ConpotService : contains
|
||||
|
||||
class services_ssh_SSHService {
|
||||
+compose_fragment()
|
||||
+dockerfile_context()
|
||||
}
|
||||
Module_services_ssh ..> services_ssh_SSHService : contains
|
||||
|
||||
class services_telnet_TelnetService {
|
||||
+compose_fragment()
|
||||
+dockerfile_context()
|
||||
}
|
||||
Module_services_telnet ..> services_telnet_TelnetService : contains
|
||||
class Module_logging_forwarder {
|
||||
+parse_log_target()
|
||||
+probe_log_target()
|
||||
}
|
||||
|
||||
class Module_logging_file_handler {
|
||||
+_get_logger()
|
||||
+write_syslog()
|
||||
+get_log_path()
|
||||
}
|
||||
|
||||
class Module_logging_syslog_formatter {
|
||||
+_pri()
|
||||
+_truncate()
|
||||
+_sd_escape()
|
||||
+_sd_element()
|
||||
+format_rfc5424()
|
||||
}
|
||||
|
||||
|
||||
class correlation_graph_TraversalHop {
|
||||
}
|
||||
Module_correlation_graph ..> correlation_graph_TraversalHop : contains
|
||||
class correlation_graph_AttackerTraversal {
|
||||
+first_seen()
|
||||
+last_seen()
|
||||
+duration_seconds()
|
||||
+deckies()
|
||||
+decky_count()
|
||||
+path()
|
||||
+to_dict()
|
||||
}
|
||||
Module_correlation_graph ..> correlation_graph_AttackerTraversal : contains
|
||||
class Module_correlation_engine {
|
||||
+_fmt_duration()
|
||||
}
|
||||
class correlation_engine_CorrelationEngine {
|
||||
+__init__()
|
||||
+ingest()
|
||||
+ingest_file()
|
||||
+traversals()
|
||||
+all_attackers()
|
||||
+report_table()
|
||||
+report_json()
|
||||
+traversal_syslog_lines()
|
||||
}
|
||||
Module_correlation_engine ..> correlation_engine_CorrelationEngine : contains
|
||||
class Module_correlation_parser {
|
||||
+_parse_sd_params()
|
||||
+_extract_attacker_ip()
|
||||
+parse_line()
|
||||
}
|
||||
class correlation_parser_LogEvent {
|
||||
}
|
||||
Module_correlation_parser ..> correlation_parser_LogEvent : contains
|
||||
class Module_web_auth {
|
||||
+verify_password()
|
||||
+get_password_hash()
|
||||
+create_access_token()
|
||||
}
|
||||
|
||||
class Module_engine_deployer {
|
||||
+_sync_logging_helper()
|
||||
+_compose()
|
||||
+_compose_with_retry()
|
||||
+deploy()
|
||||
+teardown()
|
||||
+status()
|
||||
+_print_status()
|
||||
}
|
||||
|
||||
class Module_collector_worker {
|
||||
+parse_rfc5424()
|
||||
+_load_service_container_names()
|
||||
+is_service_container()
|
||||
+is_service_event()
|
||||
+_stream_container()
|
||||
}
|
||||
|
||||
class Module_mutator_engine {
|
||||
+mutate_decky()
|
||||
+mutate_all()
|
||||
+run_watch_loop()
|
||||
}
|
||||
|
||||
|
||||
class web_db_repository_BaseRepository {
|
||||
}
|
||||
Module_web_db_repository ..> web_db_repository_BaseRepository : contains
|
||||
|
||||
class web_db_models_User {
|
||||
}
|
||||
Module_web_db_models ..> web_db_models_User : contains
|
||||
class web_db_models_Log {
|
||||
}
|
||||
Module_web_db_models ..> web_db_models_Log : contains
|
||||
class web_db_models_Bounty {
|
||||
}
|
||||
Module_web_db_models ..> web_db_models_Bounty : contains
|
||||
class web_db_models_Token {
|
||||
}
|
||||
Module_web_db_models ..> web_db_models_Token : contains
|
||||
class web_db_models_LoginRequest {
|
||||
}
|
||||
Module_web_db_models ..> web_db_models_LoginRequest : contains
|
||||
class web_db_models_ChangePasswordRequest {
|
||||
}
|
||||
Module_web_db_models ..> web_db_models_ChangePasswordRequest : contains
|
||||
class web_db_models_LogsResponse {
|
||||
}
|
||||
Module_web_db_models ..> web_db_models_LogsResponse : contains
|
||||
class web_db_models_BountyResponse {
|
||||
}
|
||||
Module_web_db_models ..> web_db_models_BountyResponse : contains
|
||||
class web_db_models_StatsResponse {
|
||||
}
|
||||
Module_web_db_models ..> web_db_models_StatsResponse : contains
|
||||
class web_db_models_MutateIntervalRequest {
|
||||
}
|
||||
Module_web_db_models ..> web_db_models_MutateIntervalRequest : contains
|
||||
class web_db_models_DeployIniRequest {
|
||||
}
|
||||
Module_web_db_models ..> web_db_models_DeployIniRequest : contains
|
||||
class Module_web_db_sqlite_database {
|
||||
+get_async_engine()
|
||||
+get_sync_engine()
|
||||
+init_db()
|
||||
}
|
||||
|
||||
|
||||
class web_db_sqlite_repository_SQLiteRepository {
|
||||
+__init__()
|
||||
+_initialize_sync()
|
||||
+_apply_filters()
|
||||
+_apply_bounty_filters()
|
||||
}
|
||||
Module_web_db_sqlite_repository ..> web_db_sqlite_repository_SQLiteRepository : contains
|
||||
```
|
||||
192
development/complete_execution_graph.md
Normal file
192
development/complete_execution_graph.md
Normal file
@@ -0,0 +1,192 @@
|
||||
# DECNET: Complete Execution Graph
|
||||
|
||||
This diagram represents the absolute complete call graph of the DECNET project. It connects initial entry points (CLI and Web API) through the orchestration layers, down to the low-level network and service container logic.
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
subgraph CLI_Entry
|
||||
cli__kill_api([_kill_api])
|
||||
cli_api([api])
|
||||
cli_deploy([deploy])
|
||||
cli_collect([collect])
|
||||
cli_mutate([mutate])
|
||||
cli_status([status])
|
||||
cli_teardown([teardown])
|
||||
cli_list_services([list_services])
|
||||
cli_list_distros([list_distros])
|
||||
cli_correlate([correlate])
|
||||
cli_list_archetypes([list_archetypes])
|
||||
cli_serve_web([serve_web])
|
||||
cli_do_GET([do_GET])
|
||||
end
|
||||
subgraph Fleet_Management
|
||||
distros_random_hostname([distros_random_hostname])
|
||||
distros_get_distro([distros_get_distro])
|
||||
distros_random_distro([distros_random_distro])
|
||||
distros_all_distros([distros_all_distros])
|
||||
ini_loader_load_ini([ini_loader_load_ini])
|
||||
ini_loader_load_ini_from_string([ini_loader_load_ini_from_string])
|
||||
ini_loader_validate_ini_string([ini_loader_validate_ini_string])
|
||||
ini_loader__parse_configparser([ini_loader__parse_configparser])
|
||||
archetypes_get_archetype([archetypes_get_archetype])
|
||||
archetypes_all_archetypes([archetypes_all_archetypes])
|
||||
archetypes_random_archetype([archetypes_random_archetype])
|
||||
fleet_all_service_names([all_service_names])
|
||||
fleet_resolve_distros([resolve_distros])
|
||||
fleet_build_deckies([build_deckies])
|
||||
fleet_build_deckies_from_ini([build_deckies_from_ini])
|
||||
end
|
||||
subgraph Deployment_Engine
|
||||
network__run([network__run])
|
||||
network_detect_interface([network_detect_interface])
|
||||
network_detect_subnet([network_detect_subnet])
|
||||
network_get_host_ip([network_get_host_ip])
|
||||
network_allocate_ips([network_allocate_ips])
|
||||
network_create_macvlan_network([network_create_macvlan_network])
|
||||
network_create_ipvlan_network([network_create_ipvlan_network])
|
||||
network_remove_macvlan_network([network_remove_macvlan_network])
|
||||
network__require_root([network__require_root])
|
||||
network_setup_host_macvlan([network_setup_host_macvlan])
|
||||
network_teardown_host_macvlan([network_teardown_host_macvlan])
|
||||
network_setup_host_ipvlan([network_setup_host_ipvlan])
|
||||
network_teardown_host_ipvlan([network_teardown_host_ipvlan])
|
||||
network_ips_to_range([network_ips_to_range])
|
||||
config_random_hostname([config_random_hostname])
|
||||
config_save_state([config_save_state])
|
||||
config_load_state([config_load_state])
|
||||
config_clear_state([config_clear_state])
|
||||
composer_generate_compose([composer_generate_compose])
|
||||
composer_write_compose([composer_write_compose])
|
||||
engine_deployer__sync_logging_helper([_sync_logging_helper])
|
||||
engine_deployer__compose([_compose])
|
||||
engine_deployer__compose_with_retry([_compose_with_retry])
|
||||
engine_deployer_deploy([deploy])
|
||||
engine_deployer_teardown([teardown])
|
||||
engine_deployer_status([status])
|
||||
engine_deployer__print_status([_print_status])
|
||||
end
|
||||
subgraph Monitoring_Mutation
|
||||
collector_worker_parse_rfc5424([parse_rfc5424])
|
||||
collector_worker__load_service_container_names([_load_service_container_names])
|
||||
collector_worker_is_service_container([is_service_container])
|
||||
collector_worker_is_service_event([is_service_event])
|
||||
collector_worker__stream_container([_stream_container])
|
||||
collector_worker_log_collector_worker([log_collector_worker])
|
||||
collector_worker__spawn([_spawn])
|
||||
collector_worker__watch_events([_watch_events])
|
||||
mutator_engine_mutate_decky([mutate_decky])
|
||||
mutator_engine_mutate_all([mutate_all])
|
||||
mutator_engine_run_watch_loop([run_watch_loop])
|
||||
end
|
||||
subgraph Web_Service
|
||||
web_auth_verify_password([web_auth_verify_password])
|
||||
web_auth_get_password_hash([web_auth_get_password_hash])
|
||||
web_auth_create_access_token([web_auth_create_access_token])
|
||||
web_db_repository_initialize([web_db_repository_initialize])
|
||||
web_db_repository_add_log([web_db_repository_add_log])
|
||||
web_db_repository_get_logs([web_db_repository_get_logs])
|
||||
web_db_repository_get_total_logs([web_db_repository_get_total_logs])
|
||||
web_db_repository_get_stats_summary([web_db_repository_get_stats_summary])
|
||||
web_db_repository_get_deckies([web_db_repository_get_deckies])
|
||||
web_db_repository_get_user_by_uuid([web_db_repository_get_user_by_uuid])
|
||||
web_db_repository_update_user_password([web_db_repository_update_user_password])
|
||||
web_db_repository_add_bounty([web_db_repository_add_bounty])
|
||||
web_db_repository_get_bounties([web_db_repository_get_bounties])
|
||||
web_db_repository_get_total_bounties([web_db_repository_get_total_bounties])
|
||||
web_db_sqlite_database_get_async_engine([web_db_sqlite_database_get_async_engine])
|
||||
web_db_sqlite_database_get_sync_engine([web_db_sqlite_database_get_sync_engine])
|
||||
web_db_sqlite_database_init_db([web_db_sqlite_database_init_db])
|
||||
web_db_sqlite_repository_initialize([web_db_sqlite_repository_initialize])
|
||||
web_db_sqlite_repository_reinitialize([web_db_sqlite_repository_reinitialize])
|
||||
web_db_sqlite_repository_add_log([web_db_sqlite_repository_add_log])
|
||||
web_db_sqlite_repository__apply_filters([web_db_sqlite_repository__apply_filters])
|
||||
web_db_sqlite_repository_get_logs([web_db_sqlite_repository_get_logs])
|
||||
web_db_sqlite_repository_get_max_log_id([web_db_sqlite_repository_get_max_log_id])
|
||||
web_db_sqlite_repository_get_logs_after_id([web_db_sqlite_repository_get_logs_after_id])
|
||||
web_db_sqlite_repository_get_total_logs([web_db_sqlite_repository_get_total_logs])
|
||||
web_db_sqlite_repository_get_log_histogram([web_db_sqlite_repository_get_log_histogram])
|
||||
web_db_sqlite_repository_get_stats_summary([web_db_sqlite_repository_get_stats_summary])
|
||||
web_db_sqlite_repository_get_deckies([web_db_sqlite_repository_get_deckies])
|
||||
web_db_sqlite_repository_get_user_by_username([web_db_sqlite_repository_get_user_by_username])
|
||||
web_db_sqlite_repository_get_user_by_uuid([web_db_sqlite_repository_get_user_by_uuid])
|
||||
web_db_sqlite_repository_create_user([web_db_sqlite_repository_create_user])
|
||||
web_db_sqlite_repository_update_user_password([web_db_sqlite_repository_update_user_password])
|
||||
web_db_sqlite_repository_add_bounty([web_db_sqlite_repository_add_bounty])
|
||||
web_db_sqlite_repository__apply_bounty_filters([web_db_sqlite_repository__apply_bounty_filters])
|
||||
web_db_sqlite_repository_get_bounties([web_db_sqlite_repository_get_bounties])
|
||||
web_db_sqlite_repository_get_total_bounties([web_db_sqlite_repository_get_total_bounties])
|
||||
web_router_auth_api_change_pass_change_password([auth_api_change_pass_change_password])
|
||||
web_router_auth_api_login_login([auth_api_login_login])
|
||||
web_router_logs_api_get_logs_get_logs([logs_api_get_logs_get_logs])
|
||||
web_router_logs_api_get_histogram_get_logs_histogram([logs_api_get_histogram_get_logs_histogram])
|
||||
web_router_bounty_api_get_bounties_get_bounties([bounty_api_get_bounties_get_bounties])
|
||||
web_router_stats_api_get_stats_get_stats([stats_api_get_stats_get_stats])
|
||||
web_router_fleet_api_mutate_decky_api_mutate_decky([api_mutate_decky_api_mutate_decky])
|
||||
web_router_fleet_api_get_deckies_get_deckies([api_get_deckies_get_deckies])
|
||||
web_router_fleet_api_mutate_interval_api_update_mutate_interval([api_mutate_interval_api_update_mutate_interval])
|
||||
web_router_fleet_api_deploy_deckies_api_deploy_deckies([api_deploy_deckies_api_deploy_deckies])
|
||||
web_router_stream_api_stream_events_stream_events([stream_api_stream_events_stream_events])
|
||||
web_router_stream_api_stream_events_event_generator([stream_api_stream_events_event_generator])
|
||||
end
|
||||
|
||||
%% Key Connection Edges
|
||||
network_detect_interface --> network__run
|
||||
network_detect_subnet --> network__run
|
||||
network_get_host_ip --> network__run
|
||||
network_setup_host_macvlan --> network__run
|
||||
network_teardown_host_macvlan --> network__run
|
||||
network_setup_host_ipvlan --> network__run
|
||||
network_teardown_host_ipvlan --> network__run
|
||||
|
||||
ini_loader_load_ini --> ini_loader__parse_configparser
|
||||
ini_loader_load_ini_from_string --> ini_loader__parse_configparser
|
||||
|
||||
composer_generate_compose --> os_fingerprint_get_os_sysctls
|
||||
composer_write_compose --> composer_generate_compose
|
||||
|
||||
fleet_resolve_distros --> distros_random_distro
|
||||
fleet_build_deckies --> fleet_resolve_distros
|
||||
fleet_build_deckies --> config_random_hostname
|
||||
fleet_build_deckies_from_ini --> archetypes_get_archetype
|
||||
fleet_build_deckies_from_ini --> fleet_all_service_names
|
||||
|
||||
cli_deploy --> ini_loader_load_ini
|
||||
cli_deploy --> network_detect_interface
|
||||
cli_deploy --> fleet_build_deckies_from_ini
|
||||
cli_deploy --> engine_deployer_deploy
|
||||
|
||||
cli_collect --> collector_worker_log_collector_worker
|
||||
cli_mutate --> mutator_engine_run_watch_loop
|
||||
|
||||
cli_correlate --> correlation_engine_ingest_file
|
||||
cli_correlate --> correlation_engine_traversals
|
||||
|
||||
engine_deployer_deploy --> network_ips_to_range
|
||||
engine_deployer_deploy --> network_setup_host_macvlan
|
||||
engine_deployer_deploy --> composer_write_compose
|
||||
engine_deployer_deploy --> engine_deployer__compose_with_retry
|
||||
|
||||
engine_deployer_teardown --> network_teardown_host_macvlan
|
||||
engine_deployer_teardown --> config_clear_state
|
||||
|
||||
collector_worker_log_collector_worker --> collector_worker__stream_container
|
||||
collector_worker__stream_container --> collector_worker_parse_rfc5424
|
||||
|
||||
mutator_engine_mutate_decky --> composer_write_compose
|
||||
mutator_engine_mutate_decky --> engine_deployer__compose_with_retry
|
||||
mutator_engine_mutate_all --> mutator_engine_mutate_decky
|
||||
mutator_engine_run_watch_loop --> mutator_engine_mutate_all
|
||||
|
||||
web_db_sqlite_repository_initialize --> web_db_sqlite_database_init_db
|
||||
web_db_sqlite_repository_get_logs --> web_db_sqlite_repository__apply_filters
|
||||
|
||||
web_router_auth_api_login_login --> web_auth_verify_password
|
||||
web_router_auth_api_login_login --> web_auth_create_access_token
|
||||
|
||||
web_router_logs_api_get_logs_get_logs --> web_db_sqlite_repository_get_logs
|
||||
web_router_fleet_api_mutate_decky_api_mutate_decky --> mutator_engine_mutate_decky
|
||||
web_router_fleet_api_deploy_deckies_api_deploy_deckies --> fleet_build_deckies_from_ini
|
||||
|
||||
web_router_stream_api_stream_events_stream_events --> web_db_sqlite_repository_get_logs_after_id
|
||||
web_router_stream_api_stream_events_stream_events --> web_router_stream_api_stream_events_event_generator
|
||||
```
|
||||
66
development/execution_graphs.md
Normal file
66
development/execution_graphs.md
Normal file
@@ -0,0 +1,66 @@
|
||||
# DECNET Execution Graphs
|
||||
|
||||
These graphs illustrate the logical flow of execution within the DECNET framework, showing how high-level commands and API requests trigger secondary processes and subsystem interactions.
|
||||
|
||||
## 1. Deployment & Teardown Flow
|
||||
This flow shows the orchestration from a CLI `deploy` command down to network setup and container instantiation.
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
CLI_Deploy([cli.deploy]) --> INI[ini_loader.load_ini]
|
||||
CLI_Deploy --> NET_Detect[network.detect_interface]
|
||||
CLI_Deploy --> FleetBuild[fleet.build_deckies_from_ini]
|
||||
|
||||
FleetBuild --> Archetype[archetypes.get_archetype]
|
||||
FleetBuild --> Distro[distros.get_distro]
|
||||
|
||||
CLI_Deploy --> Engine_Deploy[engine.deployer.deploy]
|
||||
|
||||
Engine_Deploy --> IP_Alloc[network.allocate_ips]
|
||||
Engine_Deploy --> NET_Setup[network.setup_host_macvlan]
|
||||
Engine_Deploy --> Compose_Gen[composer.write_compose]
|
||||
Engine_Deploy --> Docker_Up[engine.deployer._compose_with_retry]
|
||||
|
||||
CLI_Teardown([cli.teardown]) --> Engine_Teardown[engine.deployer.teardown]
|
||||
Engine_Teardown --> NET_Cleanup[network.teardown_host_macvlan]
|
||||
Engine_Teardown --> Docker_Down[engine.deployer._compose]
|
||||
```
|
||||
|
||||
## 2. Mutation & Monitoring Flow
|
||||
How DECNET maintains deception by periodically changing decoy identities and monitoring activities.
|
||||
|
||||
```mermaid
|
||||
graph LR
|
||||
subgraph Periodic_Process
|
||||
CLI_Mutate([cli.mutate]) --> Mutate_Loop[mutator.engine.run_watch_loop]
|
||||
end
|
||||
|
||||
Mutate_Loop --> Mutate_All[mutator.engine.mutate_all]
|
||||
Mutate_All --> Mutate_Decky[mutator.engine.mutate_decky]
|
||||
|
||||
Mutate_Decky --> Get_New_Identity[archetypes.get_archetype]
|
||||
Mutate_Decky --> Rewrite_Compose[composer.write_compose]
|
||||
Mutate_Decky --> Restart_Container[engine.deployer._compose_with_retry]
|
||||
|
||||
subgraph Log_Collection
|
||||
CLI_Collect([cli.collect]) --> Worker[collector.worker.log_collector_worker]
|
||||
Worker --> Stream[collector.worker._stream_container]
|
||||
Stream --> Parse[collector.worker.parse_rfc5424]
|
||||
end
|
||||
```
|
||||
|
||||
## 3. Web API Flow (Fleet Management)
|
||||
How the Web UI interacts with the underlying systems via the FastAPI router.
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
Web_UI[Web Dashboard] --> API_Deploy[web.router.fleet.deploy_deckies]
|
||||
Web_UI --> API_Mutate[web.router.fleet.mutate_decky]
|
||||
Web_UI --> API_Stream[web.router.stream.stream_events]
|
||||
|
||||
API_Deploy --> FleetBuild[fleet.build_deckies_from_ini]
|
||||
API_Mutate --> Mutator[mutator.engine.mutate_decky]
|
||||
|
||||
API_Stream --> DB_Pull[web.db.sqlite.repository.get_logs_after_id]
|
||||
DB_Pull --> SQLite[(SQLite Database)]
|
||||
```
|
||||
102
development/mermaid.svg
Normal file
102
development/mermaid.svg
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 528 KiB |
476
development/nmap-output-post-fixes.txt
Normal file
476
development/nmap-output-post-fixes.txt
Normal file
@@ -0,0 +1,476 @@
|
||||
Nmap scan report for 192.168.1.200
|
||||
Host is up (0.0000020s latency).
|
||||
Not shown: 65515 closed tcp ports (reset)
|
||||
PORT STATE SERVICE VERSION
|
||||
21/tcp open ftp vsftpd (before 2.0.8) or WU-FTPD
|
||||
23/tcp open telnet?
|
||||
| fingerprint-strings:
|
||||
| DNSStatusRequestTCP, DNSVersionBindReqTCP, DistCCD, JavaRMI, LANDesk-RC, LDAPBindReq, NULL, NotesRPC, RPCCheck, Radmin, TerminalServer, WMSRequest, X11Probe, mydoom, tn3270:
|
||||
| login:
|
||||
| FourOhFourRequest, GenericLines, GetRequest, HTTPOptions, LDAPSearchReq, RTSPRequest:
|
||||
| login:
|
||||
| Password:
|
||||
| Login incorrect
|
||||
| login:
|
||||
| Hello, Help, Kerberos, LPDString, NessusTPv10, NessusTPv11, NessusTPv12, SSLSessionReq, SSLv23SessionReq, TerminalServerCookie, Verifier, VerifierAdvanced, WWWOFFLEctrlstat:
|
||||
| login:
|
||||
| Password:
|
||||
| SIPOptions:
|
||||
| login:
|
||||
| Password:
|
||||
| Login incorrect
|
||||
| login: Password:
|
||||
| Login incorrect
|
||||
| login: Password:
|
||||
| Login incorrect
|
||||
| login: Password:
|
||||
| Login incorrect
|
||||
| login: Password:
|
||||
| Login incorrect
|
||||
|_ login: Password:
|
||||
25/tcp open smtp Postfix smtpd
|
||||
|_smtp-commands: omega-decky, PIPELINING, SIZE 10240000, VRFY, ETRN, AUTH PLAIN LOGIN, ENHANCEDSTATUSCODES, 8BITMIME, DSN
|
||||
80/tcp open http Apache httpd 2.4.54
|
||||
|_http-title: 403 Forbidden
|
||||
|_http-server-header: Werkzeug/3.1.8 Python/3.11.2
|
||||
110/tcp open pop3 Dovecot pop3d ([omega-decky])
|
||||
|_pop3-capabilities: USER
|
||||
143/tcp open imap Dovecot imapd
|
||||
|_imap-capabilities: IMAP4rev1 AUTH=PLAIN OK completed AUTH=LOGINA0001 CAPABILITY
|
||||
389/tcp open ldap Cisco LDAP server
|
||||
445/tcp open microsoft-ds
|
||||
| fingerprint-strings:
|
||||
| SMBProgNeg:
|
||||
| SMBr
|
||||
|_ "3DUfw
|
||||
1433/tcp open ms-sql-s?
|
||||
1883/tcp open mqtt
|
||||
| mqtt-subscribe:
|
||||
| Topics and their most recent payloads:
|
||||
| plant/water/pump2/status: STANDBY
|
||||
| plant/alarm/high_pressure: 0
|
||||
| plant/water/chlorine/residual: 0.8
|
||||
| plant/water/chlorine/dosing: 1.2
|
||||
| plant/water/pump1/rpm: 1419
|
||||
| plant/water/tank1/level: 76.6
|
||||
| plant/$SYS/broker/uptime: 2847392
|
||||
| plant/$SYS/broker/version: Mosquitto 2.0.15
|
||||
| plant/water/valve/inlet/state: OPEN
|
||||
| plant/water/valve/drain/state: CLOSED
|
||||
| plant/water/tank1/pressure: 2.86
|
||||
| plant/water/pump1/status: RUNNING
|
||||
| plant/alarm/low_chlorine: 0
|
||||
|_ plant/alarm/pump_fault: 0
|
||||
2375/tcp open docker Docker 24.0.5
|
||||
| fingerprint-strings:
|
||||
| GetRequest:
|
||||
| HTTP/1.1 404 NOT FOUND
|
||||
| Server: Werkzeug/3.1.8 Python/3.11.2
|
||||
| Date: Fri, 10 Apr 2026 06:25:23 GMT
|
||||
| Content-Type: application/json
|
||||
| Content-Length: 46
|
||||
| Connection: close
|
||||
| {"message": "page not found", "response": 404}
|
||||
| HTTPOptions:
|
||||
| HTTP/1.1 200 OK
|
||||
| Server: Werkzeug/3.1.8 Python/3.11.2
|
||||
| Date: Fri, 10 Apr 2026 06:25:23 GMT
|
||||
| Content-Type: text/html; charset=utf-8
|
||||
| Allow: HEAD, OPTIONS, GET
|
||||
| Content-Length: 0
|
||||
| Connection: close
|
||||
| Hello:
|
||||
| <!DOCTYPE HTML>
|
||||
| <html lang="en">
|
||||
| <head>
|
||||
| <meta charset="utf-8">
|
||||
| <title>Error response</title>
|
||||
| </head>
|
||||
| <body>
|
||||
| <h1>Error response</h1>
|
||||
| <p>Error code: 400</p>
|
||||
| <p>Message: Bad request syntax ('EHLO').</p>
|
||||
| <p>Error code explanation: 400 - Bad request syntax or unsupported method.</p>
|
||||
| </body>
|
||||
| </html>
|
||||
| RTSPRequest:
|
||||
| <!DOCTYPE HTML>
|
||||
| <html lang="en">
|
||||
| <head>
|
||||
| <meta charset="utf-8">
|
||||
| <title>Error response</title>
|
||||
| </head>
|
||||
| <body>
|
||||
| <h1>Error response</h1>
|
||||
| <p>Error code: 400</p>
|
||||
| <p>Message: Bad request version ('RTSP/1.0').</p>
|
||||
| <p>Error code explanation: 400 - Bad request syntax or unsupported method.</p>
|
||||
| </body>
|
||||
| </html>
|
||||
| docker:
|
||||
| HTTP/1.1 200 OK
|
||||
| Server: Werkzeug/3.1.8 Python/3.11.2
|
||||
| Date: Fri, 10 Apr 2026 06:25:23 GMT
|
||||
| Content-Type: application/json
|
||||
| Content-Length: 187
|
||||
| Connection: close
|
||||
|_ {"Version": "24.0.5", "ApiVersion": "1.43", "MinAPIVersion": "1.12", "GitCommit": "ced0996", "GoVersion": "go1.20.6", "Os": "linux", "Arch": "amd64", "KernelVersion": "5.15.0-76-generic"}
|
||||
| docker-version:
|
||||
| KernelVersion: 5.15.0-76-generic
|
||||
| MinAPIVersion: 1.12
|
||||
| Arch: amd64
|
||||
| Os: linux
|
||||
| GoVersion: go1.20.6
|
||||
| Version: 24.0.5
|
||||
| GitCommit: ced0996
|
||||
|_ ApiVersion: 1.43
|
||||
3306/tcp open mysql MySQL 5.7.38-log
|
||||
| mysql-info:
|
||||
| Protocol: 10
|
||||
| Version: 5.7.38-log
|
||||
| Thread ID: 1
|
||||
| Capabilities flags: 63487
|
||||
| Some Capabilities: Support41Auth, DontAllowDatabaseTableColumn, Speaks41ProtocolOld, ConnectWithDatabase, SupportsTransactions, IgnoreSpaceBeforeParenthesis, SupportsCompression, LongColumnFlag, SupportsLoadDataLocal, ODBCClient, LongPassword, Speaks41ProtocolNew, InteractiveClient, FoundRows, IgnoreSigpipes, SupportsMultipleStatments, SupportsMultipleResults, SupportsAuthPlugins
|
||||
| Status: Autocommit
|
||||
| Salt: pv!magic!O}%>UM|gu^1
|
||||
|_ Auth Plugin Name: mysql_native_password
|
||||
3389/tcp open ms-wbt-server xrdp
|
||||
5060/tcp open sip (SIP end point; Status: 401 Unauthorized)
|
||||
| fingerprint-strings:
|
||||
| HTTPOptions:
|
||||
| SIP/2.0 401 Unauthorized
|
||||
| Via:
|
||||
| From:
|
||||
| Call-ID:
|
||||
| CSeq:
|
||||
| WWW-Authenticate: Digest realm="omega-decky", nonce="fa63b9f8e719d810", algorithm=MD5
|
||||
| Content-Length: 0
|
||||
| RTSPRequest:
|
||||
| SIP/2.0 401 Unauthorized
|
||||
| Via:
|
||||
| From:
|
||||
| Call-ID:
|
||||
| CSeq:
|
||||
| WWW-Authenticate: Digest realm="omega-decky", nonce="25b193b6f8c63e9d", algorithm=MD5
|
||||
| Content-Length: 0
|
||||
| SIPOptions:
|
||||
| SIP/2.0 401 Unauthorized
|
||||
| Via: SIP/2.0/TCP nm;branch=foo
|
||||
| From: <sip:nm@nm>;tag=root
|
||||
| <sip:nm2@nm2>
|
||||
| Call-ID: 50000
|
||||
| CSeq: 42 OPTIONS
|
||||
| WWW-Authenticate: Digest realm="omega-decky", nonce="7d2aa09cb9bfbac0", algorithm=MD5
|
||||
|_ Content-Length: 0
|
||||
5432/tcp open postgresql?
|
||||
5900/tcp open vnc VNC (protocol 3.8)
|
||||
| vnc-info:
|
||||
| Protocol version: 3.8
|
||||
| Security types:
|
||||
|_ VNC Authentication (2)
|
||||
6379/tcp open redis?
|
||||
| fingerprint-strings:
|
||||
| HELP4STOMP, HTTPOptions, Hello, Help, Kerberos, LPDString, Memcache, NessusTPv10, NessusTPv11, NessusTPv12, RTSPRequest, SSLSessionReq, SSLv23SessionReq, Socks5, SqueezeCenter_CLI, TLSSessionReq, TerminalServerCookie, Verifier, VerifierAdvanced, WWWOFFLEctrlstat, ajp, dominoconsole, firebird:
|
||||
| -ERR unknown command
|
||||
| LDAPSearchReq, hp-pjl, pervasive-btrieve:
|
||||
| -ERR unknown command
|
||||
| -ERR unknown command
|
||||
| SIPOptions:
|
||||
| -ERR unknown command
|
||||
| -ERR unknown command
|
||||
| -ERR unknown command
|
||||
| -ERR unknown command
|
||||
| -ERR unknown command
|
||||
| -ERR unknown command
|
||||
| -ERR unknown command
|
||||
| -ERR unknown command
|
||||
| -ERR unknown command
|
||||
| -ERR unknown command
|
||||
| redis-server:
|
||||
| $150
|
||||
| Server
|
||||
| redis_version:7.2.7
|
||||
| redis_mode:standalone
|
||||
| os:Linux 5.15.0
|
||||
| arch_bits:64
|
||||
| tcp_port:6379
|
||||
| uptime_in_seconds:864000
|
||||
| connected_clients:1
|
||||
|_ Keyspace
|
||||
6443/tcp open sun-sr-https?
|
||||
| fingerprint-strings:
|
||||
| GetRequest:
|
||||
| HTTP/1.1 404 NOT FOUND
|
||||
| Server: Werkzeug/3.1.8 Python/3.11.2
|
||||
| Date: Fri, 10 Apr 2026 06:25:23 GMT
|
||||
| Content-Type: application/json
|
||||
| Content-Length: 52
|
||||
| Connection: close
|
||||
| {"kind": "Status", "status": "Failure", "code": 404}
|
||||
| HTTPOptions:
|
||||
| HTTP/1.1 200 OK
|
||||
| Server: Werkzeug/3.1.8 Python/3.11.2
|
||||
| Date: Fri, 10 Apr 2026 06:25:23 GMT
|
||||
| Content-Type: text/html; charset=utf-8
|
||||
| Allow: GET, HEAD, OPTIONS
|
||||
| Content-Length: 0
|
||||
| Connection: close
|
||||
| RTSPRequest:
|
||||
| <!DOCTYPE HTML>
|
||||
| <html lang="en">
|
||||
| <head>
|
||||
| <meta charset="utf-8">
|
||||
| <title>Error response</title>
|
||||
| </head>
|
||||
| <body>
|
||||
| <h1>Error response</h1>
|
||||
| <p>Error code: 400</p>
|
||||
| <p>Message: Bad request version ('RTSP/1.0').</p>
|
||||
| <p>Error code explanation: 400 - Bad request syntax or unsupported method.</p>
|
||||
| </body>
|
||||
| </html>
|
||||
| SSLSessionReq:
|
||||
| <!DOCTYPE HTML>
|
||||
| <html lang="en">
|
||||
| <head>
|
||||
| <meta charset="utf-8">
|
||||
| <title>Error response</title>
|
||||
| </head>
|
||||
| <body>
|
||||
| <h1>Error response</h1>
|
||||
| <p>Error code: 400</p>
|
||||
| <p>Message: Bad request syntax ('
|
||||
| <=
|
||||
| ').</p>
|
||||
| <p>Error code explanation: 400 - Bad request syntax or unsupported method.</p>
|
||||
| </body>
|
||||
|_ </html>
|
||||
9200/tcp open wap-wsp?
|
||||
| fingerprint-strings:
|
||||
| GetRequest:
|
||||
| HTTP/1.0 200 OK
|
||||
| Server: elasticsearch
|
||||
| Date: Fri, 10 Apr 2026 06:25:23 GMT
|
||||
| Content-Type: application/json; charset=UTF-8
|
||||
| Content-Length: 477
|
||||
| X-elastic-product: Elasticsearch
|
||||
| {"name": "omega-decky", "cluster_name": "elasticsearch", "cluster_uuid": "xC3Pr9abTq2mNkOeLvXwYA", "version": {"number": "7.17.9", "build_flavor": "default", "build_type": "docker", "build_hash": "ef48222227ee6b9e70e502f0f0daa52435ee634d", "build_date": "2023-01-31T05:34:43.305517834Z", "build_snapshot": false, "lucene_version": "8.11.1", "minimum_wire_compatibility_version": "6.8.0", "minimum_index_compatibility_version": "6.0.0-beta1"}, "tagline": "You Know, for Search"}
|
||||
| HTTPOptions:
|
||||
| HTTP/1.0 501 Unsupported method ('OPTIONS')
|
||||
| Server: elasticsearch
|
||||
| Date: Fri, 10 Apr 2026 06:25:23 GMT
|
||||
| Connection: close
|
||||
| Content-Type: text/html;charset=utf-8
|
||||
| Content-Length: 360
|
||||
| <!DOCTYPE HTML>
|
||||
| <html lang="en">
|
||||
| <head>
|
||||
| <meta charset="utf-8">
|
||||
| <title>Error response</title>
|
||||
| </head>
|
||||
| <body>
|
||||
| <h1>Error response</h1>
|
||||
| <p>Error code: 501</p>
|
||||
| <p>Message: Unsupported method ('OPTIONS').</p>
|
||||
| <p>Error code explanation: 501 - Server does not support this operation.</p>
|
||||
| </body>
|
||||
| </html>
|
||||
| RTSPRequest:
|
||||
| <!DOCTYPE HTML>
|
||||
| <html lang="en">
|
||||
| <head>
|
||||
| <meta charset="utf-8">
|
||||
| <title>Error response</title>
|
||||
| </head>
|
||||
| <body>
|
||||
| <h1>Error response</h1>
|
||||
| <p>Error code: 400</p>
|
||||
| <p>Message: Bad request version ('RTSP/1.0').</p>
|
||||
| <p>Error code explanation: 400 - Bad request syntax or unsupported method.</p>
|
||||
| </body>
|
||||
|_ </html>
|
||||
27017/tcp open mongod?
|
||||
|_mongodb-databases: ERROR: Script execution failed (use -d to debug)
|
||||
|_mongodb-info: ERROR: Script execution failed (use -d to debug)
|
||||
8 services unrecognized despite returning data. If you know the service/version, please submit the following fingerprints at https://nmap.org/cgi-bin/submit.cgi?new-service :
|
||||
==============NEXT SERVICE FINGERPRINT (SUBMIT INDIVIDUALLY)==============
|
||||
SF-Port23-TCP:V=7.92%I=9%D=4/10%Time=69D897D3%P=x86_64-redhat-linux-gnu%r(
|
||||
SF:NULL,7,"login:\x20")%r(GenericLines,2C,"login:\x20\xff\xfb\x01Password:
|
||||
SF:\x20\nLogin\x20incorrect\nlogin:\x20")%r(tn3270,16,"login:\x20\xff\xfe\
|
||||
SF:x18\xff\xfe\x19\xff\xfc\x19\xff\xfe\0\xff\xfc\0")%r(GetRequest,2C,"logi
|
||||
SF:n:\x20\xff\xfb\x01Password:\x20\nLogin\x20incorrect\nlogin:\x20")%r(HTT
|
||||
SF:POptions,2C,"login:\x20\xff\xfb\x01Password:\x20\nLogin\x20incorrect\nl
|
||||
SF:ogin:\x20")%r(RTSPRequest,2C,"login:\x20\xff\xfb\x01Password:\x20\nLogi
|
||||
SF:n\x20incorrect\nlogin:\x20")%r(RPCCheck,7,"login:\x20")%r(DNSVersionBin
|
||||
SF:dReqTCP,7,"login:\x20")%r(DNSStatusRequestTCP,7,"login:\x20")%r(Hello,1
|
||||
SF:4,"login:\x20\xff\xfb\x01Password:\x20")%r(Help,14,"login:\x20\xff\xfb\
|
||||
SF:x01Password:\x20")%r(SSLSessionReq,14,"login:\x20\xff\xfb\x01Password:\
|
||||
SF:x20")%r(TerminalServerCookie,14,"login:\x20\xff\xfb\x01Password:\x20")%
|
||||
SF:r(SSLv23SessionReq,14,"login:\x20\xff\xfb\x01Password:\x20")%r(Kerberos
|
||||
SF:,14,"login:\x20\xff\xfb\x01Password:\x20")%r(X11Probe,7,"login:\x20")%r
|
||||
SF:(FourOhFourRequest,2C,"login:\x20\xff\xfb\x01Password:\x20\nLogin\x20in
|
||||
SF:correct\nlogin:\x20")%r(LPDString,14,"login:\x20\xff\xfb\x01Password:\x
|
||||
SF:20")%r(LDAPSearchReq,2C,"login:\x20\xff\xfb\x01Password:\x20\nLogin\x20
|
||||
SF:incorrect\nlogin:\x20")%r(LDAPBindReq,7,"login:\x20")%r(SIPOptions,BE,"
|
||||
SF:login:\x20\xff\xfb\x01Password:\x20\nLogin\x20incorrect\nlogin:\x20Pass
|
||||
SF:word:\x20\nLogin\x20incorrect\nlogin:\x20Password:\x20\nLogin\x20incorr
|
||||
SF:ect\nlogin:\x20Password:\x20\nLogin\x20incorrect\nlogin:\x20Password:\x
|
||||
SF:20\nLogin\x20incorrect\nlogin:\x20Password:\x20")%r(LANDesk-RC,7,"login
|
||||
SF::\x20")%r(TerminalServer,7,"login:\x20")%r(NotesRPC,7,"login:\x20")%r(D
|
||||
SF:istCCD,7,"login:\x20")%r(JavaRMI,7,"login:\x20")%r(Radmin,7,"login:\x20
|
||||
SF:")%r(NessusTPv12,14,"login:\x20\xff\xfb\x01Password:\x20")%r(NessusTPv1
|
||||
SF:1,14,"login:\x20\xff\xfb\x01Password:\x20")%r(NessusTPv10,14,"login:\x2
|
||||
SF:0\xff\xfb\x01Password:\x20")%r(WMSRequest,7,"login:\x20")%r(mydoom,7,"l
|
||||
SF:ogin:\x20")%r(WWWOFFLEctrlstat,14,"login:\x20\xff\xfb\x01Password:\x20"
|
||||
SF:)%r(Verifier,14,"login:\x20\xff\xfb\x01Password:\x20")%r(VerifierAdvanc
|
||||
SF:ed,14,"login:\x20\xff\xfb\x01Password:\x20");
|
||||
==============NEXT SERVICE FINGERPRINT (SUBMIT INDIVIDUALLY)==============
|
||||
SF-Port445-TCP:V=7.92%I=9%D=4/10%Time=69D897D8%P=x86_64-redhat-linux-gnu%r
|
||||
SF:(SMBProgNeg,51,"\0\0\0M\xffSMBr\0\0\0\0\x80\0\xc0\0\0\0\0\0\0\0\0\0\0\0
|
||||
SF:\0\0\0@\x06\0\0\x01\0\x11\x07\0\x03\x01\0\x01\0\0\xfa\0\0\0\0\x01\0\0\0
|
||||
SF:\0\0p\0\0\0\0\0\0\0\0\0\0\0\0\0\x08\x08\0\x11\"3DUfw\x88");
|
||||
==============NEXT SERVICE FINGERPRINT (SUBMIT INDIVIDUALLY)==============
|
||||
SF-Port1433-TCP:V=7.92%I=9%D=4/10%Time=69D897D8%P=x86_64-redhat-linux-gnu%
|
||||
SF:r(ms-sql-s,2F,"\x04\x01\0/\0\0\x01\0\0\0\x1a\0\x06\x01\0\x20\0\x01\x02\
|
||||
SF:0!\0\x01\x03\0\"\0\x04\x04\0&\0\x01\xff\x0e\0\x07\xd0\0\0\x02\0\0\0\0\0
|
||||
SF:\0");
|
||||
==============NEXT SERVICE FINGERPRINT (SUBMIT INDIVIDUALLY)==============
|
||||
SF-Port5060-TCP:V=7.92%I=9%D=4/10%Time=69D897E0%P=x86_64-redhat-linux-gnu%
|
||||
SF:r(SIPOptions,F7,"SIP/2\.0\x20401\x20Unauthorized\r\nVia:\x20SIP/2\.0/TC
|
||||
SF:P\x20nm;branch=foo\r\nFrom:\x20<sip:nm@nm>;tag=root\r\nTo:\x20<sip:nm2@
|
||||
SF:nm2>\r\nCall-ID:\x2050000\r\nCSeq:\x2042\x20OPTIONS\r\nWWW-Authenticate
|
||||
SF::\x20Digest\x20realm=\"omega-decky\",\x20nonce=\"7d2aa09cb9bfbac0\",\x2
|
||||
SF:0algorithm=MD5\r\nContent-Length:\x200\r\n\r\n")%r(HTTPOptions,AE,"SIP/
|
||||
SF:2\.0\x20401\x20Unauthorized\r\nVia:\x20\r\nFrom:\x20\r\nTo:\x20\r\nCall
|
||||
SF:-ID:\x20\r\nCSeq:\x20\r\nWWW-Authenticate:\x20Digest\x20realm=\"omega-d
|
||||
SF:ecky\",\x20nonce=\"fa63b9f8e719d810\",\x20algorithm=MD5\r\nContent-Leng
|
||||
SF:th:\x200\r\n\r\n")%r(RTSPRequest,AE,"SIP/2\.0\x20401\x20Unauthorized\r\
|
||||
SF:nVia:\x20\r\nFrom:\x20\r\nTo:\x20\r\nCall-ID:\x20\r\nCSeq:\x20\r\nWWW-A
|
||||
SF:uthenticate:\x20Digest\x20realm=\"omega-decky\",\x20nonce=\"25b193b6f8c
|
||||
SF:63e9d\",\x20algorithm=MD5\r\nContent-Length:\x200\r\n\r\n");
|
||||
==============NEXT SERVICE FINGERPRINT (SUBMIT INDIVIDUALLY)==============
|
||||
SF-Port5432-TCP:V=7.92%I=9%D=4/10%Time=69D897E2%P=x86_64-redhat-linux-gnu%
|
||||
SF:r(SMBProgNeg,D,"R\0\0\0\x0c\0\0\0\x05\x96\xbci&")%r(Kerberos,D,"R\0\0\0
|
||||
SF:\x0c\0\0\0\x05\xa7\x87:~")%r(ZendJavaBridge,D,"R\0\0\0\x0c\0\0\0\x05\xe
|
||||
SF:d\x9f\xf8\0");
|
||||
==============NEXT SERVICE FINGERPRINT (SUBMIT INDIVIDUALLY)==============
|
||||
SF-Port6379-TCP:V=7.92%I=9%D=4/10%Time=69D897D8%P=x86_64-redhat-linux-gnu%
|
||||
SF:r(redis-server,9E,"\$150\r\n#\x20Server\nredis_version:7\.2\.7\nredis_m
|
||||
SF:ode:standalone\nos:Linux\x205\.15\.0\narch_bits:64\ntcp_port:6379\nupti
|
||||
SF:me_in_seconds:864000\nconnected_clients:1\n#\x20Keyspace\n\r\n")%r(GetR
|
||||
SF:equest,5,"\$-1\r\n")%r(HTTPOptions,16,"-ERR\x20unknown\x20command\r\n")
|
||||
SF:%r(RTSPRequest,16,"-ERR\x20unknown\x20command\r\n")%r(Hello,16,"-ERR\x2
|
||||
SF:0unknown\x20command\r\n")%r(Help,16,"-ERR\x20unknown\x20command\r\n")%r
|
||||
SF:(SSLSessionReq,16,"-ERR\x20unknown\x20command\r\n")%r(TerminalServerCoo
|
||||
SF:kie,16,"-ERR\x20unknown\x20command\r\n")%r(TLSSessionReq,16,"-ERR\x20un
|
||||
SF:known\x20command\r\n")%r(SSLv23SessionReq,16,"-ERR\x20unknown\x20comman
|
||||
SF:d\r\n")%r(Kerberos,16,"-ERR\x20unknown\x20command\r\n")%r(FourOhFourReq
|
||||
SF:uest,5,"\$-1\r\n")%r(LPDString,16,"-ERR\x20unknown\x20command\r\n")%r(L
|
||||
SF:DAPSearchReq,2C,"-ERR\x20unknown\x20command\r\n-ERR\x20unknown\x20comma
|
||||
SF:nd\r\n")%r(SIPOptions,DC,"-ERR\x20unknown\x20command\r\n-ERR\x20unknown
|
||||
SF:\x20command\r\n-ERR\x20unknown\x20command\r\n-ERR\x20unknown\x20command
|
||||
SF:\r\n-ERR\x20unknown\x20command\r\n-ERR\x20unknown\x20command\r\n-ERR\x2
|
||||
SF:0unknown\x20command\r\n-ERR\x20unknown\x20command\r\n-ERR\x20unknown\x2
|
||||
SF:0command\r\n-ERR\x20unknown\x20command\r\n")%r(NessusTPv12,16,"-ERR\x20
|
||||
SF:unknown\x20command\r\n")%r(NessusTPv11,16,"-ERR\x20unknown\x20command\r
|
||||
SF:\n")%r(NessusTPv10,16,"-ERR\x20unknown\x20command\r\n")%r(WWWOFFLEctrls
|
||||
SF:tat,16,"-ERR\x20unknown\x20command\r\n")%r(Verifier,16,"-ERR\x20unknown
|
||||
SF:\x20command\r\n")%r(VerifierAdvanced,16,"-ERR\x20unknown\x20command\r\n
|
||||
SF:")%r(Socks5,16,"-ERR\x20unknown\x20command\r\n")%r(OfficeScan,5,"\$-1\r
|
||||
SF:\n")%r(HELP4STOMP,16,"-ERR\x20unknown\x20command\r\n")%r(Memcache,16,"-
|
||||
SF:ERR\x20unknown\x20command\r\n")%r(firebird,16,"-ERR\x20unknown\x20comma
|
||||
SF:nd\r\n")%r(pervasive-btrieve,2C,"-ERR\x20unknown\x20command\r\n-ERR\x20
|
||||
SF:unknown\x20command\r\n")%r(ajp,16,"-ERR\x20unknown\x20command\r\n")%r(h
|
||||
SF:p-pjl,2C,"-ERR\x20unknown\x20command\r\n-ERR\x20unknown\x20command\r\n"
|
||||
SF:)%r(SqueezeCenter_CLI,16,"-ERR\x20unknown\x20command\r\n")%r(dominocons
|
||||
SF:ole,16,"-ERR\x20unknown\x20command\r\n");
|
||||
==============NEXT SERVICE FINGERPRINT (SUBMIT INDIVIDUALLY)==============
|
||||
SF-Port6443-TCP:V=7.92%I=9%D=4/10%Time=69D897D3%P=x86_64-redhat-linux-gnu%
|
||||
SF:r(SSLSessionReq,1E8,"<!DOCTYPE\x20HTML>\n<html\x20lang=\"en\">\n\x20\x2
|
||||
SF:0\x20\x20<head>\n\x20\x20\x20\x20\x20\x20\x20\x20<meta\x20charset=\"utf
|
||||
SF:-8\">\n\x20\x20\x20\x20\x20\x20\x20\x20<title>Error\x20response</title>
|
||||
SF:\n\x20\x20\x20\x20</head>\n\x20\x20\x20\x20<body>\n\x20\x20\x20\x20\x20
|
||||
SF:\x20\x20\x20<h1>Error\x20response</h1>\n\x20\x20\x20\x20\x20\x20\x20\x2
|
||||
SF:0<p>Error\x20code:\x20400</p>\n\x20\x20\x20\x20\x20\x20\x20\x20<p>Messa
|
||||
SF:ge:\x20Bad\x20request\x20syntax\x20\('\\x16\\x03\\x00\\x00S\\x01\\x00\\
|
||||
SF:x00O\\x03\\x00\?G\xc3\x97\xc3\xb7\xc2\xba,\xc3\xae\xc3\xaa\xc2\xb2`~\xc
|
||||
SF:3\xb3\\x00\xc3\xbd\\x82{\xc2\xb9\xc3\x95\\x96\xc3\x88w\\x9b\xc3\xa6\xc3
|
||||
SF:\x84\xc3\x9b<=\xc3\x9bo\xc3\xaf\\x10n\\x00\\x00\(\\x00\\x16\\x00\\x1
|
||||
SF:3\\x00'\)\.</p>\n\x20\x20\x20\x20\x20\x20\x20\x20<p>Error\x20code\x20ex
|
||||
SF:planation:\x20400\x20-\x20Bad\x20request\x20syntax\x20or\x20unsupported
|
||||
SF:\x20method\.</p>\n\x20\x20\x20\x20</body>\n</html>\n")%r(GetRequest,E0,
|
||||
SF:"HTTP/1\.1\x20404\x20NOT\x20FOUND\r\nServer:\x20Werkzeug/3\.1\.8\x20Pyt
|
||||
SF:hon/3\.11\.2\r\nDate:\x20Fri,\x2010\x20Apr\x202026\x2006:25:23\x20GMT\r
|
||||
SF:\nContent-Type:\x20application/json\r\nContent-Length:\x2052\r\nConnect
|
||||
SF:ion:\x20close\r\n\r\n{\"kind\":\x20\"Status\",\x20\"status\":\x20\"Fail
|
||||
SF:ure\",\x20\"code\":\x20404}")%r(HTTPOptions,C7,"HTTP/1\.1\x20200\x20OK\
|
||||
SF:r\nServer:\x20Werkzeug/3\.1\.8\x20Python/3\.11\.2\r\nDate:\x20Fri,\x201
|
||||
SF:0\x20Apr\x202026\x2006:25:23\x20GMT\r\nContent-Type:\x20text/html;\x20c
|
||||
SF:harset=utf-8\r\nAllow:\x20GET,\x20HEAD,\x20OPTIONS\r\nContent-Length:\x
|
||||
SF:200\r\nConnection:\x20close\r\n\r\n")%r(RTSPRequest,16C,"<!DOCTYPE\x20H
|
||||
SF:TML>\n<html\x20lang=\"en\">\n\x20\x20\x20\x20<head>\n\x20\x20\x20\x20\x
|
||||
SF:20\x20\x20\x20<meta\x20charset=\"utf-8\">\n\x20\x20\x20\x20\x20\x20\x20
|
||||
SF:\x20<title>Error\x20response</title>\n\x20\x20\x20\x20</head>\n\x20\x20
|
||||
SF:\x20\x20<body>\n\x20\x20\x20\x20\x20\x20\x20\x20<h1>Error\x20response</
|
||||
SF:h1>\n\x20\x20\x20\x20\x20\x20\x20\x20<p>Error\x20code:\x20400</p>\n\x20
|
||||
SF:\x20\x20\x20\x20\x20\x20\x20<p>Message:\x20Bad\x20request\x20version\x2
|
||||
SF:0\('RTSP/1\.0'\)\.</p>\n\x20\x20\x20\x20\x20\x20\x20\x20<p>Error\x20cod
|
||||
SF:e\x20explanation:\x20400\x20-\x20Bad\x20request\x20syntax\x20or\x20unsu
|
||||
SF:pported\x20method\.</p>\n\x20\x20\x20\x20</body>\n</html>\n");
|
||||
==============NEXT SERVICE FINGERPRINT (SUBMIT INDIVIDUALLY)==============
|
||||
SF-Port9200-TCP:V=7.92%I=9%D=4/10%Time=69D897D3%P=x86_64-redhat-linux-gnu%
|
||||
SF:r(GetRequest,293,"HTTP/1\.0\x20200\x20OK\r\nServer:\x20elasticsearch\x2
|
||||
SF:0\r\nDate:\x20Fri,\x2010\x20Apr\x202026\x2006:25:23\x20GMT\r\nContent-T
|
||||
SF:ype:\x20application/json;\x20charset=UTF-8\r\nContent-Length:\x20477\r\
|
||||
SF:nX-elastic-product:\x20Elasticsearch\r\n\r\n{\"name\":\x20\"omega-decky
|
||||
SF:\",\x20\"cluster_name\":\x20\"elasticsearch\",\x20\"cluster_uuid\":\x20
|
||||
SF:\"xC3Pr9abTq2mNkOeLvXwYA\",\x20\"version\":\x20{\"number\":\x20\"7\.17\
|
||||
SF:.9\",\x20\"build_flavor\":\x20\"default\",\x20\"build_type\":\x20\"dock
|
||||
SF:er\",\x20\"build_hash\":\x20\"ef48222227ee6b9e70e502f0f0daa52435ee634d\
|
||||
SF:",\x20\"build_date\":\x20\"2023-01-31T05:34:43\.305517834Z\",\x20\"buil
|
||||
SF:d_snapshot\":\x20false,\x20\"lucene_version\":\x20\"8\.11\.1\",\x20\"mi
|
||||
SF:nimum_wire_compatibility_version\":\x20\"6\.8\.0\",\x20\"minimum_index_
|
||||
SF:compatibility_version\":\x20\"6\.0\.0-beta1\"},\x20\"tagline\":\x20\"Yo
|
||||
SF:u\x20Know,\x20for\x20Search\"}")%r(HTTPOptions,223,"HTTP/1\.0\x20501\x2
|
||||
SF:0Unsupported\x20method\x20\('OPTIONS'\)\r\nServer:\x20elasticsearch\x20
|
||||
SF:\r\nDate:\x20Fri,\x2010\x20Apr\x202026\x2006:25:23\x20GMT\r\nConnection
|
||||
SF::\x20close\r\nContent-Type:\x20text/html;charset=utf-8\r\nContent-Lengt
|
||||
SF:h:\x20360\r\n\r\n<!DOCTYPE\x20HTML>\n<html\x20lang=\"en\">\n\x20\x20\x2
|
||||
SF:0\x20<head>\n\x20\x20\x20\x20\x20\x20\x20\x20<meta\x20charset=\"utf-8\"
|
||||
SF:>\n\x20\x20\x20\x20\x20\x20\x20\x20<title>Error\x20response</title>\n\x
|
||||
SF:20\x20\x20\x20</head>\n\x20\x20\x20\x20<body>\n\x20\x20\x20\x20\x20\x20
|
||||
SF:\x20\x20<h1>Error\x20response</h1>\n\x20\x20\x20\x20\x20\x20\x20\x20<p>
|
||||
SF:Error\x20code:\x20501</p>\n\x20\x20\x20\x20\x20\x20\x20\x20<p>Message:\
|
||||
SF:x20Unsupported\x20method\x20\('OPTIONS'\)\.</p>\n\x20\x20\x20\x20\x20\x
|
||||
SF:20\x20\x20<p>Error\x20code\x20explanation:\x20501\x20-\x20Server\x20doe
|
||||
SF:s\x20not\x20support\x20this\x20operation\.</p>\n\x20\x20\x20\x20</body>
|
||||
SF:\n</html>\n")%r(RTSPRequest,16C,"<!DOCTYPE\x20HTML>\n<html\x20lang=\"en
|
||||
SF:\">\n\x20\x20\x20\x20<head>\n\x20\x20\x20\x20\x20\x20\x20\x20<meta\x20c
|
||||
SF:harset=\"utf-8\">\n\x20\x20\x20\x20\x20\x20\x20\x20<title>Error\x20resp
|
||||
SF:onse</title>\n\x20\x20\x20\x20</head>\n\x20\x20\x20\x20<body>\n\x20\x20
|
||||
SF:\x20\x20\x20\x20\x20\x20<h1>Error\x20response</h1>\n\x20\x20\x20\x20\x2
|
||||
SF:0\x20\x20\x20<p>Error\x20code:\x20400</p>\n\x20\x20\x20\x20\x20\x20\x20
|
||||
SF:\x20<p>Message:\x20Bad\x20request\x20version\x20\('RTSP/1\.0'\)\.</p>\n
|
||||
SF:\x20\x20\x20\x20\x20\x20\x20\x20<p>Error\x20code\x20explanation:\x20400
|
||||
SF:\x20-\x20Bad\x20request\x20syntax\x20or\x20unsupported\x20method\.</p>\
|
||||
SF:n\x20\x20\x20\x20</body>\n</html>\n");
|
||||
MAC Address: F2:5F:2F:EE:5B:96 (Unknown)
|
||||
Service Info: Hosts: omega-decky, omega-decky
|
||||
|
||||
Host script results:
|
||||
|_ms-sql-info: ERROR: Script execution failed (use -d to debug)
|
||||
| smb2-time:
|
||||
| date: 2026-04-10T06:33:53
|
||||
|_ start_date: 2026-04-10T06:33:53
|
||||
| smb-security-mode:
|
||||
| account_used: guest
|
||||
| authentication_level: user
|
||||
| challenge_response: supported
|
||||
|_ message_signing: disabled (dangerous, but default)
|
||||
| smb2-security-mode:
|
||||
| 2.0.2:
|
||||
|_ Message signing enabled but not required
|
||||
|_clock-skew: mean: -77663d15h16m57s, deviation: 109832d23h14m31s, median: -155327d06h33m54s
|
||||
|
||||
Service detection performed. Please report any incorrect results at https://nmap.org/submit/ .
|
||||
Nmap done: 1 IP address (1 host up) scanned in 784.93 seconds
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user