Compare commits
122 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| b1796520b8 | |||
| 0ff44d99c4 | |||
| 8c74b3fed8 | |||
| c3fefc60c0 | |||
| 7107ce4fdd | |||
| fa47068d6d | |||
| 07c761fc72 | |||
| 6821eaaa38 | |||
| 31aa86a2a9 | |||
| 87cb687610 | |||
| eb4059a887 | |||
| 415706036b | |||
| e2dd47255e | |||
| 3497aa23f8 | |||
| 8491fb2af7 | |||
| f61864282e | |||
| b2f7d6dda2 | |||
| eeedcc4781 | |||
| 5cf8cab5bd | |||
| 3ae9e19524 | |||
| 0ec4b00879 | |||
| b6b4b1b4d9 | |||
| 950a9d009c | |||
| 693542ef19 | |||
| d12f356ebe | |||
| 7b55d6a91f | |||
| aa077f60e6 | |||
| 094bd6e4f1 | |||
| 5b0b5eeac6 | |||
| 96a1f90ac3 | |||
| bfa06d78a7 | |||
| d16dcd34cc | |||
| dc2f4eb6d2 | |||
| 0f9a029269 | |||
| 70d1500096 | |||
| d0cb7acd10 | |||
| 0b58feee1e | |||
| 8be34e7284 | |||
| b56cef6298 | |||
| 0d203af8fb | |||
| 0468d0e603 | |||
| 7cfc2ba058 | |||
| da795d14f5 | |||
| d60c7e9110 | |||
| 83c99a5e65 | |||
| e438bb11ff | |||
| 8b4f75bf91 | |||
| d7e7386954 | |||
| 2100c64b91 | |||
| 74ebf59c6f | |||
| 53b49eacad | |||
| 0f11d23c75 | |||
| 311030bdaa | |||
| 1e05c66baa | |||
| 4082a6bf2a | |||
| 3485642b3e | |||
| 1240ae3829 | |||
| 2dd4d38dce | |||
| 7f862ce1f4 | |||
| 528fe97b59 | |||
| 3483d1bfce | |||
| 158423c155 | |||
| 087e91dca1 | |||
| 2de4cbc00f | |||
| 03fc465057 | |||
| b696b47feb | |||
| 6aae565541 | |||
| 214bd218a0 | |||
| 2afeee29ee | |||
| c8dee4c416 | |||
| f49f3c3b08 | |||
| c4bbb06710 | |||
| 4411cc4fff | |||
| 24a91887ef | |||
| 4e62b2919f | |||
| fa774156fe | |||
| 3b19f05c5b | |||
| fc3ecaacca | |||
| 08857093b5 | |||
| 62018b3e51 | |||
| 89e3a195a3 | |||
| f023ba0ac5 | |||
| a0570ef8f7 | |||
| facde1fef7 | |||
| 38106a2096 | |||
| a476afb311 | |||
| db4c7b9b72 | |||
| 3bc490b485 | |||
| dd6d70c46e | |||
| b1eaf42fef | |||
| fb9e5dcd10 | |||
| f95e71463f | |||
| 1088bff43d | |||
| cad68db2a2 | |||
| 50b10c8ac0 | |||
| a8b586ec92 | |||
| 632e1e4fa1 | |||
| 7e12816ebd | |||
| 8f64f8fb30 | |||
| b3ff3991c4 | |||
| a4ea387c98 | |||
| 68fbf74a23 | |||
| b857f778e9 | |||
| 31aa82b68c | |||
| de8eeb69e2 | |||
| f5970ce700 | |||
| ef1a4436ca | |||
| 981779cd9e | |||
| 3dcd2ae0b4 | |||
| 2750b867a3 | |||
| f6424add6c | |||
| 2dfd21d1d0 | |||
| 9d9ddc730b | |||
| 77ccee8331 | |||
| 175dcdf225 | |||
| 1549e9cd4f | |||
| 910e74b497 | |||
| 160c5c34b6 | |||
| a6638c0108 | |||
| 43c21d3ddc | |||
| b73c6c346e | |||
| b91ddc5bdf |
+11
-8
@@ -9,15 +9,19 @@
|
||||
.env.*
|
||||
!.env.example
|
||||
!.env.*.example
|
||||
aria-data/config/*.env
|
||||
!aria-data/config/*.env.example
|
||||
!aria-data/config/openclaw.env
|
||||
|
||||
# ── ARIAs Gedächtnis (nur per tar gesichert) ────
|
||||
aria-data/brain/
|
||||
# Privater User-Profile-Snippet (Tool-Stack, interne URLs) —
|
||||
# liegt jetzt in brain-import/ (frueher aria-data/config/USER.md).
|
||||
# USER.md.example ist Repo-Inhalt, USER.md lokal selbst anlegen.
|
||||
aria-data/brain-import/USER.md
|
||||
|
||||
# ── Stimmen (große Binärdateien) ─────────────────
|
||||
aria-data/voices/
|
||||
# ── ARIAs Gedächtnis (Vector-DB, Skills, Models) ──
|
||||
# Backup via Diagnostic → Gehirn-Export (tar.gz), nicht via Git.
|
||||
aria-data/brain/data/
|
||||
aria-data/brain/qdrant/
|
||||
|
||||
# Diagnostic-State (aktive Session etc.)
|
||||
aria-data/config/diag-state/
|
||||
|
||||
# ── Node / npm ──────────────────────────────────
|
||||
node_modules/
|
||||
@@ -46,7 +50,6 @@ desktop/dist/
|
||||
__pycache__/
|
||||
*.pyc
|
||||
*.pyo
|
||||
bridge/__pycache__/
|
||||
|
||||
# ── macOS ────────────────────────────────────────
|
||||
.DS_Store
|
||||
|
||||
Binary file not shown.
@@ -57,38 +57,44 @@ ARIA hat zwei Rollen:
|
||||
│ ┌─────────────────────────────────────────────────┐ │
|
||||
│ │ [proxy] claude-max-api-proxy Container │ │
|
||||
│ │ Claude Max Sub → lokale API │ │
|
||||
│ │ Port 3456, mit sed-Patches fuer │ │
|
||||
│ │ Tool-Permissions + Host-Binding │ │
|
||||
│ │ │ │
|
||||
│ │ [aria] OpenClaw Container (aria-core) │ │
|
||||
│ │ Gateway, Sessions, Memory, Skills │ │
|
||||
│ │ Liest BOOTSTRAP.md + AGENT.md │ │
|
||||
│ │ [qdrant] Vector-DB fuer ARIAs Gedaechtnis │ │
|
||||
│ │ Bind-Mount: aria-data/brain/qdrant/ │ │
|
||||
│ │ │ │
|
||||
│ │ [brain] ARIA Agent + Memory Container │ │
|
||||
│ │ FastAPI auf Port 8080 │ │
|
||||
│ │ Eigener Agent-Loop, Skills, │ │
|
||||
│ │ Vector-Memory, SSH-Zugriff zur VM │ │
|
||||
│ │ Bind-Mount: aria-data/brain/data/ │ │
|
||||
│ │ │ │
|
||||
│ │ [bridge] ARIA Voice Bridge Container │ │
|
||||
│ │ Wake-Word (lokales Mikro auf VM) │ │
|
||||
│ │ STT primaer remote (Gamebox-Whisper) │ │
|
||||
│ │ Fallback: lokales faster-whisper (CPU) │ │
|
||||
│ │ TTS via F5-TTS auf Gamebox │ │
|
||||
│ │ Bruecke: App <> RVS <> Bridge <> ARIA │ │
|
||||
│ │ Wake-Word, STT, TTS-Forwarding │ │
|
||||
│ │ Spricht mit Brain via HTTP/8080 │ │
|
||||
│ │ │ │
|
||||
│ │ [diagnostic] Selbstcheck-UI + Einstellungen │ │
|
||||
│ │ Gateway + RVS + Proxy Status │ │
|
||||
│ │ Chat, Sessions, Login, Logs │ │
|
||||
│ │ Port 3001 (im Netzwerk der Bridge) │ │
|
||||
│ │ Chat, Gehirn, Dateien, Logs │ │
|
||||
│ └──────────────────┬──────────────────────────────┘ │
|
||||
│ │ Volume Mount │
|
||||
│ ▼ │
|
||||
│ ┌─────────────────────────────────────────────────┐ │
|
||||
│ │ ./aria-data/ — Ein tar = vollstaendiges Backup │ │
|
||||
│ │ ./aria-data/ — Konfiguration + SSH-Keys │ │
|
||||
│ │ ./aria-data/brain/ — Vector-DB + Skills (gitignored)│
|
||||
│ │ Backup via Diagnostic → "Gehirn-Export" (tar.gz) │ │
|
||||
│ └─────────────────────────────────────────────────┘ │
|
||||
└─────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
> OpenClaw (frueher `aria-core`) ist abgerissen — ARIA laeuft jetzt mit eigenem Agent-Framework im
|
||||
> `aria-brain` Container. Eigene Tools, Skills, Vector-Memory statt Sessions. Letzter OpenClaw-Stand
|
||||
> ist als Git-Tag `v0.1.2.0` archiviert.
|
||||
|
||||
**Vier separate Deployments:**
|
||||
|
||||
| Was | Wo | Wie |
|
||||
|-----|----|-----|
|
||||
| RVS | Rechenzentrum | `cd rvs && docker compose up -d` |
|
||||
| ARIA Core | Debian 13 VM | `docker compose up -d && ./aria-setup.sh` |
|
||||
| ARIA Brain/Bridge/Diagnostic | Debian 13 VM | `./init.sh && ./aria-setup.sh && docker compose up -d` |
|
||||
| Gamebox-Stack (F5-TTS + Whisper) | Gamebox (GPU) | `cd xtts && docker compose up -d` |
|
||||
| Android App | Stefans Handy | APK installieren (Auto-Update via RVS) |
|
||||
|
||||
@@ -114,12 +120,12 @@ apt install -y docker.io docker-compose-plugin git curl jq
|
||||
git clone git@gitea.hackersoft.de:aria/aria.git ~/ARIA-AGENT
|
||||
cd ~/ARIA-AGENT
|
||||
cp .env.example .env
|
||||
bash init.sh # legt USER.md aus Vorlage an (idempotent, schadet nicht)
|
||||
```
|
||||
|
||||
`.env` Datei editieren (Details siehe `.env.example`):
|
||||
```bash
|
||||
# Gateway-Auth: Alle Services die mit aria-core reden brauchen diesen Token
|
||||
# Diagnostic, Bridge, App nutzen ihn fuer den WebSocket-Handshake
|
||||
# Auth-Token: Alle ARIA-Services nutzen ihn fuer interne Auth
|
||||
ARIA_AUTH_TOKEN= # openssl rand -hex 32
|
||||
|
||||
# RVS-Verbindung: Hostname + Port deines Rendezvous-Servers
|
||||
@@ -128,17 +134,18 @@ RVS_PORT=443
|
||||
RVS_TLS=true
|
||||
RVS_TLS_FALLBACK=true
|
||||
|
||||
# Pairing-Token: Verbindet App, Bridge, Diagnostic und XTTS im gleichen RVS-Room
|
||||
# Pairing-Token: Verbindet App, Bridge, Diagnostic und Gamebox im gleichen RVS-Room
|
||||
# MUSS auf allen Geraeten identisch sein (ARIA-VM, Gaming-PC, App)
|
||||
# Wird von generate-token.sh automatisch generiert und eingetragen
|
||||
RVS_TOKEN= # ./generate-token.sh
|
||||
|
||||
# Optional: SSH-Host des RVS-Servers fuer Auto-Update (z.B. root@aria-rvs)
|
||||
RVS_UPDATE_HOST=
|
||||
```
|
||||
|
||||
Alle anderen Einstellungen (Stimmen, Modi, Wake-Word, F5-TTS-Tuning) leben in
|
||||
`/shared/config/runtime.json` und werden ueber die Diagnostic-UI gepflegt — nicht
|
||||
in der `.env`. Komplett-Reset jederzeit moeglich via "🗑 ALLES löschen" im
|
||||
Diagnostic-Einstellungen-Tab.
|
||||
|
||||
**Zwei Tokens, zwei Zwecke:**
|
||||
- **ARIA_AUTH_TOKEN**: Authentifizierung am OpenClaw Gateway (aria-core). Wer diesen Token hat, kann ARIA Befehle geben.
|
||||
- **ARIA_AUTH_TOKEN**: Interner Auth-Token zwischen ARIAs Containern.
|
||||
- **RVS_TOKEN**: Pairing-Token fuer den Rendezvous-Server. Alle Geraete mit dem gleichen Token landen im gleichen "Room" und koennen kommunizieren. Die App bekommt diesen Token per QR-Code.
|
||||
|
||||
### 2. Claude CLI einloggen (Proxy-Auth)
|
||||
@@ -156,48 +163,24 @@ claude login
|
||||
**Wichtig:** Der Ordner `~/.claude/` (nicht `~/.config/claude/`!) wird als Volume
|
||||
in den Proxy gemountet. Die Credentials ueberleben Container-Restarts.
|
||||
|
||||
### 3. Voice Bridge konfigurieren
|
||||
### 3. SSH-Key fuer aria-wohnung generieren + RVS-Token + Container
|
||||
|
||||
```bash
|
||||
cp aria-data/config/aria.env.example aria-data/config/aria.env
|
||||
# Bei Bedarf anpassen (Whisper-Modell als Fallback, Sprache, Wake-Word)
|
||||
```
|
||||
# SSH-Key fuer den Zugriff von ARIA auf die VM (aria-wohnung)
|
||||
./aria-setup.sh
|
||||
|
||||
STT laeuft primaer auf der Gamebox (faster-whisper auf GPU), TTS ausschliesslich
|
||||
ueber F5-TTS auf der Gamebox — siehe Abschnitt "Gamebox-Stack — F5-TTS + Whisper"
|
||||
weiter unten.
|
||||
|
||||
### 5. RVS-Token generieren & Container starten
|
||||
|
||||
```bash
|
||||
# Token generieren — schreibt RVS_TOKEN in .env, zeigt QR-Code
|
||||
# RVS-Token generieren — schreibt RVS_TOKEN in .env, zeigt QR-Code
|
||||
./generate-token.sh
|
||||
|
||||
# Alle Container starten
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
### 6. ARIA Setup ausfuehren (einmalig!)
|
||||
`aria-setup.sh` generiert den ed25519-Key in `aria-data/ssh/` und traegt den
|
||||
Public-Key in `/root/.ssh/authorized_keys` ein (Script laeuft als root auf der VM
|
||||
aria-wohnung). Brain + Proxy nutzen den gleichen Key.
|
||||
|
||||
```bash
|
||||
./aria-setup.sh
|
||||
```
|
||||
|
||||
Dieses Script ist **essentiell** — es macht:
|
||||
1. Wartet bis aria-core laeuft
|
||||
2. Fixt Volume-Permissions (Docker → node User)
|
||||
3. Schreibt `openclaw.json` (Proxy-Provider, Model-Config, Timeout 900s)
|
||||
4. Setzt exec-approvals Wildcard (Tool-Ausfuehrung im headless-Modus)
|
||||
5. Generiert SSH-Key fuer VM-Zugriff (`aria-data/ssh/`)
|
||||
6. Fixt SSH-Permissions im Container
|
||||
7. Startet aria-core neu
|
||||
|
||||
**SSH-Key auf der VM eintragen** (wird vom Script angezeigt):
|
||||
```bash
|
||||
cat ~/ARIA-AGENT/aria-data/ssh/id_ed25519.pub >> /root/.ssh/authorized_keys
|
||||
```
|
||||
|
||||
### 7. App verbinden
|
||||
### 4. App verbinden
|
||||
|
||||
App oeffnen → QR-Code scannen → "ARIA, hoerst du mich?"
|
||||
|
||||
@@ -205,20 +188,21 @@ Der QR-Code enthaelt: Host, Port, Token, TLS-Flag — einmal scannen, nie wieder
|
||||
|
||||
Bestehendes Token nochmal als QR anzeigen: `./generate-token.sh show`
|
||||
|
||||
### 8. Diagnostic pruefen
|
||||
### 5. Diagnostic pruefen
|
||||
|
||||
```bash
|
||||
# Im Browser:
|
||||
http://<VM-IP>:3001
|
||||
```
|
||||
|
||||
Die Diagnostic-UI zeigt:
|
||||
- Gateway-Verbindung (gruener Punkt = OK)
|
||||
- RVS-Verbindung
|
||||
- Proxy-Status + Claude Login
|
||||
- Chat-Test (direkt an ARIA schreiben)
|
||||
- Session-Verwaltung
|
||||
- Container-Logs
|
||||
Die Diagnostic-UI hat sechs Top-Tabs:
|
||||
|
||||
- **Main** — Live-Chat-Test, Status (Brain / RVS / Proxy), End-to-End-Trace
|
||||
- **Gehirn** — Memory-Verwaltung (Vector-DB), Token/Call-Metrics (Subscription-Quota), Bootstrap & Migration, Komplett-Gehirn Export/Import
|
||||
- **Skills** — Liste mit Logs, Run, Activate/Deactivate, Export/Import als tar.gz
|
||||
- **Trigger** — Timer + Watcher anlegen/anzeigen/loeschen, Live-Variablen-Anzeige (disk_free, current_lat, hour_of_day, …), near(lat, lon, m) als Condition-Funktion
|
||||
- **Dateien** — alle Dateien aus `/shared/uploads/` mit Multi-Select, Bulk-Download (ZIP) + Bulk-Delete
|
||||
- **Einstellungen** — Reparatur (Container-Restart), Wipe, Sprachausgabe, Whisper, Sprachmodell, Runtime-Config, App-Onboarding (QR), Komplett-Reset
|
||||
|
||||
---
|
||||
|
||||
@@ -226,7 +210,7 @@ Die Diagnostic-UI zeigt:
|
||||
|
||||
Der Proxy ist das Herzsttueck: Er macht aus der Claude Max Subscription eine lokale API.
|
||||
|
||||
**Ablauf:** `OpenClaw (aria-core) → HTTP → claude-max-api-proxy → Claude Code CLI (--print) → Anthropic API`
|
||||
**Ablauf:** `aria-brain → HTTP → claude-max-api-proxy → Claude Code CLI (--print) → Anthropic API`
|
||||
|
||||
Der Proxy-Container (`node:22-alpine`) installiert bei jedem Start:
|
||||
- `@anthropic-ai/claude-code` — Claude Code CLI
|
||||
@@ -247,52 +231,34 @@ Danach werden per `sed` vier Patches angewendet:
|
||||
|
||||
## Konfigurationsdateien
|
||||
|
||||
### aria-data/config/
|
||||
### aria-data/
|
||||
|
||||
| Datei | Zweck | Gemountet als |
|
||||
|-------|-------|---------------|
|
||||
| `BOOTSTRAP.md` | ARIAs System-Prompt: Identitaet, Sicherheitsregeln, Tool-Freigaben, Infrastruktur | `BOOTSTRAP.md` + `CLAUDE.md` im Workspace |
|
||||
| `AGENT.md` | ARIAs Persoenlichkeit, Tool-Freigaben, Arbeitsprinzipien | `AGENT.md` im Workspace |
|
||||
| `USER.md` | Stefans Praeferenzen, Kommunikationsstil | `USER.md` im Workspace |
|
||||
| `openclaw.env` | OpenClaw Container-Environment | `.env` im Workspace |
|
||||
| `aria.env` | Voice Bridge Konfiguration (Whisper, Stimmen) | `/config/aria.env` in Bridge |
|
||||
| Pfad | Zweck |
|
||||
|------|-------|
|
||||
| `.env` | Tokens (ARIA_AUTH_TOKEN, RVS_TOKEN, RVS_HOST) — minimal, alles andere lebt in der DB |
|
||||
| `aria-data/ssh/` | SSH-Key fuer den Zugriff auf aria-wohnung (Brain + Proxy teilen den Key) |
|
||||
| `aria-data/brain/qdrant/` | Vector-DB-Storage (Bind-Mount, gitignored) |
|
||||
| `aria-data/brain/data/` | Skills, Embedding-Modell-Cache (Bind-Mount, gitignored) |
|
||||
| `aria-data/brain-import/` | `AGENT.md`, `USER.md.example`, `TOOLING.md.example` — Quelle fuer den initialen Memory-Import in die Vector-DB |
|
||||
| `aria-data/config/diag-state/` | Diagnostic State (z.B. zuletzt aktive Session) |
|
||||
|
||||
**BOOTSTRAP.md** ist die wichtigste Datei — sie definiert:
|
||||
- Wer ARIA ist (Name, Rolle, Persoenlichkeit)
|
||||
- Sicherheitsregeln (kein ClawHub, Prompt Injection abwehren)
|
||||
- Tool-Freigaben (alle Claude Code Tools: WebFetch, Bash, etc.)
|
||||
- SSH-Zugriff auf aria-wohnung (VM)
|
||||
- Gedaechtnis-System
|
||||
### /shared/config/ (im aria-shared Volume)
|
||||
|
||||
### openclaw.json (via aria-setup.sh)
|
||||
| Datei | Zweck |
|
||||
|-------|-------|
|
||||
| `voice_config.json` | TTS-Engine, geclonte Stimme, Whisper-Modell, F5-TTS-Tuning |
|
||||
| `runtime.json` | Token + RVS-Override + Whisper-Sprache (durch Diagnostic gepflegt) |
|
||||
| `highlight_triggers.json` | Highlight-Trigger-Woerter |
|
||||
| `chat_backup.jsonl` | Append-only Chat-Log (Quelle fuer die Chat-History in Diagnostic) |
|
||||
|
||||
Wird von `aria-setup.sh` in den Container geschrieben:
|
||||
```json
|
||||
{
|
||||
"agents": {
|
||||
"defaults": {
|
||||
"model": { "primary": "proxy/claude-sonnet-4" },
|
||||
"timeoutSeconds": 900,
|
||||
"maxConcurrent": 4
|
||||
}
|
||||
},
|
||||
"models": {
|
||||
"providers": {
|
||||
"proxy": {
|
||||
"api": "openai-completions",
|
||||
"baseUrl": "http://proxy:3456/v1",
|
||||
"apiKey": "not-needed"
|
||||
}
|
||||
}
|
||||
},
|
||||
"tools": { "profile": "full" },
|
||||
"messages": { "ackReactionScope": "all" }
|
||||
}
|
||||
```
|
||||
`voice_config.json` + `highlight_triggers.json` lassen sich via Diagnostic →
|
||||
"Sprachausgabe" als Bundle exportieren/importieren.
|
||||
|
||||
**timeoutSeconds: 900** (15 Min) — notwendig weil jede Anfrage einen neuen
|
||||
`claude --print` Prozess spawnt (Cold Start). Bei Tool-Nutzung (WebFetch, Bash)
|
||||
braucht ARIA mehrere API-Roundtrips.
|
||||
### Backup / Reset
|
||||
|
||||
- **Gehirn-Backup**: Diagnostic → Gehirn → "⬇ Export herunterladen" — komplettes Brain (Memories + Skills + Qdrant-DB) als `.tar.gz`
|
||||
- **Stimmen-Backup**: pro Stimme ein `.tar.gz` (Diagnostic → Sprachausgabe → ⬇ pro Stimme); Import via Upload-Button
|
||||
- **Komplett-Reset**: Diagnostic → Einstellungen → "🗑 ALLES löschen" — Memory + Stimmen + Settings weg; `.env` + SSH-Keys bleiben
|
||||
|
||||
---
|
||||
|
||||
@@ -303,14 +269,14 @@ auf der Gamebox.
|
||||
|
||||
**Nachrichtenfluss:**
|
||||
```
|
||||
Text: App → RVS → Bridge → chat.send → aria-core
|
||||
Text: App → RVS → Bridge → aria-brain (HTTP)
|
||||
Audio: App → RVS → Bridge → stt_request (RVS) → whisper-bridge (Gamebox)
|
||||
→ stt_response → Bridge → chat.send → aria-core
|
||||
→ stt_response → Bridge → aria-brain
|
||||
Fallback bei Timeout: lokales faster-whisper (CPU)
|
||||
Datei: App → RVS → Bridge → /shared/uploads/ → chat.send (mit Pfad) → aria-core
|
||||
Datei: App → RVS → Bridge → /shared/uploads/ → aria-brain (mit Pfad)
|
||||
|
||||
aria-core → Antwort → Gateway → Diagnostic → RVS → App
|
||||
→ Bridge → xtts_request (RVS) → f5tts-bridge
|
||||
aria-brain → Antwort → Bridge → RVS → App
|
||||
→ xtts_request (RVS) → f5tts-bridge
|
||||
→ audio_pcm Stream → RVS → App AudioTrack
|
||||
```
|
||||
|
||||
@@ -342,34 +308,26 @@ aria-core → Antwort → Gateway → Diagnostic → RVS → App
|
||||
|
||||
## Diagnostic — Selbstcheck-UI und Einstellungen
|
||||
|
||||
Erreichbar unter `http://<VM-IP>:3001`. Teilt das Netzwerk mit aria-core.
|
||||
Erreichbar unter `http://<VM-IP>:3001`. Teilt das Netzwerk mit der Bridge.
|
||||
|
||||
### Features
|
||||
### Tabs
|
||||
|
||||
- **Status-Karten**: Gateway (Handshake), RVS (TLS-Fallback), Proxy (Auth)
|
||||
- **Disk-Voll Banner**: Rotes Overlay wenn die VM-Disk knapp wird, mit copy-baren Cleanup-Befehlen (safe + aggressiv)
|
||||
- **Chat-Test**: Nachrichten direkt an ARIA senden (Gateway oder via RVS), Vollbild-Modus
|
||||
- **"ARIA denkt..." Indikator**: Zeigt live was ARIA gerade tut (Denken, Tool, Schreiben)
|
||||
- **Abbrechen-Button**: Stoppt laufende Anfragen + doctor --fix
|
||||
- **Session-Verwaltung**: Sessions auflisten, wechseln, erstellen, loeschen, als Markdown exportieren (⬇ Button)
|
||||
- **Chat-History**: Wird beim Laden und Session-Wechsel angezeigt (read-only aus JSONL)
|
||||
- **TTS-Diagnose Tab**: Stimmen testen, Status pruefen, Fehler anzeigen
|
||||
- **Einstellungen**: TTS aktiv-Toggle, F5-TTS-Voice (gecloned), Betriebsmodi, Whisper-Modell (tiny…large-v3, Hot-Reload auf der Gamebox)
|
||||
- **Voice-Status**: Beim Wechsel der globalen Stimme zeigt ein Status-Text "Lade…" → "bereit (X.Ys)" — getriggert ueber `voice_preload`/`voice_ready`
|
||||
- **Voice Cloning**: Audio-Samples hochladen, Referenz-Text wird automatisch via Whisper transkribiert
|
||||
- **Main**: Brain/RVS/Proxy-Status, Chat-Test, "ARIA denkt..."-Indikator, End-to-End-Trace, Container-Logs
|
||||
- **Gehirn**: Memory-Browser (Vector-DB), Suche + Filter, Edit/Add/Delete, Konversation-Status mit Destillat-Trigger, **Token/Call-Metrics mit Subscription-Quota-Tracking**, Bootstrap & Migration (3 Wiederherstellungs-Wege), Gehirn-Export/Import (tar.gz). Info-Buttons (ℹ) ueberall mit Modal-Erklaerung.
|
||||
- **Skills**: Liste aller Skills mit Logs pro Run, Activate/Deactivate, Export/Import als tar.gz, "von ARIA"-Badge fuer selbst gebaute
|
||||
- **Trigger**: passive Aufweck-Quellen. **Timer** (einmalig, ISO-Timestamp oder via `in_seconds` als Server-Berechnung) + **Watcher** (recurring, mit Condition + Throttle). Liste aktiver Trigger + Logs pro Feuer-Event. Modal mit Type-Dropdown, Live-Anzeige aller verfuegbaren Condition-Variablen (`disk_free_gb`, `hour_of_day`, `current_lat/lon`, `last_user_message_ago_sec`, …) und Condition-Funktionen (`near(lat, lon, m)` fuer GPS-Geofencing). Sicherer Condition-Parser via Python `ast` (Whitelist, kein `eval`). Der System-Prompt enthaelt zusaetzlich einen `## Aktuelle Zeit`-Block (UTC + Europa/Berlin) damit ARIA Timer-Zeitpunkte korrekt setzen kann.
|
||||
- **Dateien**: Browser fuer `/shared/uploads/` mit Multi-Select + "Alle markieren" + Bulk-Download (ZIP bei 2+) + Bulk-Delete. Live-Update der Chat-Bubbles beim Delete.
|
||||
- **Einstellungen**: Reparatur (Container-Restart fuer Brain/Bridge/Qdrant), Komplett-Reset, Betriebsmodi, Sprachausgabe + Voice-Cloning + F5-TTS-Tuning + Voice Export/Import, Whisper, Sprachmodell (brainModel), Onboarding-QR, App-Cleanup
|
||||
|
||||
### Was zusaetzlich noch drin steckt
|
||||
|
||||
- **Disk-Voll Banner** mit copy-baren Cleanup-Befehlen (safe + aggressiv)
|
||||
- **Token/Call-Metrics**: pro Claude-Call ein Eintrag in `/data/metrics.jsonl` mit ts + Token-Schaetzung. Gehirn-Tab zeigt 1h/5h/24h/30d-Aggregat plus Progress-Bar gegen Plan-Limit (Pro / Max 5x / Max 20x / Custom). Warn-Schwelle 80%, kritisch 90%.
|
||||
- **Voice Cloning**: Audio-Samples hochladen, Whisper transkribiert den Ref-Text automatisch
|
||||
- **Voice Export/Import**: einzelne Stimmen als `.tar.gz` zwischen Gameboxen mitnehmen
|
||||
- **Settings Export/Import**: `voice_config.json` + `highlight_triggers.json` als JSON-Bundle
|
||||
- **Claude Login**: Browser-Terminal zum Einloggen in den Proxy
|
||||
- **Core Terminal**: Shell in aria-core (openclaw CLI)
|
||||
- **Container-Logs**: Echtzeit-Logs aller Container (gefiltert nach Tab + Pipeline)
|
||||
- **SSH Terminal**: Direkter SSH-Zugang zu aria-wohnung
|
||||
- **Watchdog**: Erkennt stuck Runs (2min Warnung → 5min doctor --fix → 8min Container-Restart)
|
||||
|
||||
### Session-Verwaltung
|
||||
|
||||
Die in der Diagnostic gewaehlte Session gilt **global** — Bridge und App nutzen
|
||||
dieselbe Session. Die aktive Session wird unter `/data/active-session` persistiert
|
||||
und ueberlebt Container-Restarts.
|
||||
|
||||
API-Endpoint fuer andere Services: `GET http://localhost:3001/api/session`
|
||||
- **SSH Terminal**: direkter SSH-Zugang zu aria-wohnung
|
||||
|
||||
---
|
||||
|
||||
@@ -397,10 +355,15 @@ API-Endpoint fuer andere Services: `GET http://localhost:3001/api/session`
|
||||
- **Mehrere Anhaenge**: Bilder + Dateien sammeln, Text hinzufuegen, dann zusammen senden
|
||||
- **Paste-Support**: Bilder aus Zwischenablage einfuegen (Diagnostic)
|
||||
- **Anhaenge**: Bridge speichert in Shared Volume, ARIA kann darauf zugreifen, Re-Download ueber RVS
|
||||
- **Einstellungen**: TTS-aktiv, F5-TTS-Voice, Pre-Roll-Buffer, Stille-Toleranz, Speicherort, Auto-Download, GPS
|
||||
- **Einstellungen**: TTS-aktiv, F5-TTS-Voice, Pre-Roll-Buffer, Stille-Toleranz, Speicherort, Auto-Download, GPS, Verbose-Logging
|
||||
- **Auto-Update**: Prueft beim Start + per Button auf neue Version, Download + Installation ueber RVS (FileProvider)
|
||||
- GPS-Position (optional)
|
||||
- GPS-Position (optional, mit Runtime-Permission-Request) — wird in jeden Chat/Audio-Payload mitgegeben und ist in Diagnostic als Debug-Block einblendbar
|
||||
- **GPS-Tracking (kontinuierlich)**: Toggle in Settings → Standort. Wenn aktiv, pushed die App alle ~15s bzw. ab 30m Bewegung ein `location_update` an die Bridge — Voraussetzung damit Watcher mit `near(lat, lon, m)` (z.B. Blitzer-Warner, Ankunft-Erinnerungen) ueberhaupt feuern koennen. ARIA selbst kann das Tracking via `request_location_tracking`-Tool an-/ausschalten und tut das automatisch wenn sie einen GPS-Watcher anlegt
|
||||
- QR-Code Scanner fuer Token-Pairing
|
||||
- **ARIA-Dateien empfangen**: Wenn ARIA eine PDF/Bild/Markdown/ZIP fuer dich erstellt (Marker `[FILE: /shared/uploads/aria_*]` in der Antwort), erscheint sie als eigene Anhang-Bubble. Tippen → wird via RVS geladen + mit Android-Intent-Picker geoeffnet (PDF-Viewer, Bildbetrachter, Standard-App). Inline-Bilder aus Markdown-``-Syntax werden direkt unter dem Text gerendert (PNG/JPG via Image, SVG via react-native-svg)
|
||||
- **Vollbild mit Pinch-Zoom**: Bilder im Vollbild-Modal sind pinch-zoombar (1x..5x), 1-Finger-Pan wenn gezoomt, Doppel-Tap toggelt 1x↔2.5x — alles ohne externe Lib
|
||||
- **Container-Restart-Buttons** (Settings → Reparatur): aria-bridge / aria-brain / aria-qdrant gezielt neu starten (jeweils ~5s Downtime). Geht ueber RVS → Bridge → Diagnostic → Docker-Socket-API.
|
||||
- **Cache-Cleanup**: Beim App-Start werden orphane TTS-WAVs aus dem Cache geraeumt. Plus Settings-Buttons "TTS-Cache leeren", "Update-Cache leeren", "Anhang-Cache leeren"
|
||||
|
||||
### Wake-Word (openWakeWord, on-device)
|
||||
|
||||
@@ -508,9 +471,9 @@ Der Update-Flow:
|
||||
|
||||
```
|
||||
App (Mikrofon) → AAC/MP4 Aufnahme → Base64 → RVS → Bridge
|
||||
Bridge: FFmpeg (16kHz PCM) → Whisper STT → Text → aria-core
|
||||
Bridge: FFmpeg (16kHz PCM) → Whisper STT → Text → aria-brain
|
||||
Bridge: STT-Ergebnis → RVS → App (Placeholder wird durch transkribierten Text ersetzt)
|
||||
aria-core → Antwort → Bridge → F5-TTS (Gaming-PC) → PCM-Stream → RVS → App
|
||||
aria-brain → Antwort → Bridge → F5-TTS (Gaming-PC) → PCM-Stream → RVS → App
|
||||
App: AudioTrack MODE_STREAM (nahtlos), Cache als WAV pro Message
|
||||
```
|
||||
|
||||
@@ -544,8 +507,8 @@ bleibt bis ARIA wirklich verstummt ist.
|
||||
|
||||
```
|
||||
App (Kamera/Dateimanager) → Base64 → RVS → Bridge
|
||||
Bridge: Speichert in /shared/uploads/ (Shared Volume, fuer aria-core sichtbar)
|
||||
Bridge: chat.send → "Stefan hat ein Bild geschickt: foto.jpg — liegt unter /shared/uploads/..."
|
||||
Bridge: Speichert in /shared/uploads/ (Shared Volume, fuer aria-brain sichtbar)
|
||||
Bridge: aria-brain → "Stefan hat ein Bild geschickt: foto.jpg — liegt unter /shared/uploads/..."
|
||||
ARIA: Kann Datei per Bash/Read-Tool oeffnen und analysieren
|
||||
```
|
||||
|
||||
@@ -575,34 +538,34 @@ ist in den App-Einstellungen konfigurierbar.
|
||||
|
||||
## Datenverzeichnis — aria-data/
|
||||
|
||||
Alles was ARIA weiss, kann und ist — liegt hier. Ein `tar` = vollstaendiges Backup.
|
||||
|
||||
```
|
||||
aria-data/
|
||||
├── brain/ ← ARIAs Gedaechtnis (OpenClaw Memory)
|
||||
│ ├── MEMORY.md ← Langzeitgedaechtnis
|
||||
│ └── memory/ ← Tageslogbuecher
|
||||
├── brain/ ← ARIAs Gehirn — Bind-Mount, GITIGNORED
|
||||
│ ├── qdrant/ ← Vector-DB Storage (Memories, Skills-Embeddings)
|
||||
│ └── data/ ← Skills, Embedding-Modell-Cache
|
||||
│ └── skills/<name>/ ← Pro Skill ein Ordner mit Manifest, Code, venv
|
||||
│
|
||||
├── skills/ ← ARIAs Faehigkeiten (selbst geschrieben!)
|
||||
├── brain-import/ ← Quell-Dateien fuer den initialen Import in die DB
|
||||
│ ├── AGENT.md ← Persoenlichkeit (wird Memory-Punkte vom Typ identity/rule)
|
||||
│ ├── BOOTSTRAP.md
|
||||
│ ├── TOOLING.md.example
|
||||
│ └── USER.md.example
|
||||
│
|
||||
├── config/
|
||||
│ ├── BOOTSTRAP.md ← System-Prompt (Identitaet, Regeln, Tools)
|
||||
│ ├── AGENT.md ← Persoenlichkeit & Arbeitsprinzipien
|
||||
│ ├── USER.md ← Stefans Praeferenzen
|
||||
│ ├── openclaw.env ← OpenClaw Environment
|
||||
│ ├── aria.env ← Voice Bridge Config
|
||||
│ └── diag-state/ ← Diagnostic persistenter State
|
||||
│
|
||||
│ (im Shared Volume /shared/config/):
|
||||
│ ├── voice_config.json ← TTS-Einstellungen (Stimme, Speed, F5-TTS-Tuning)
|
||||
│ └── chat_backup.jsonl ← Nachrichten-Backup (on-the-fly)
|
||||
│
|
||||
└── ssh/ ← SSH Keys fuer VM-Zugriff
|
||||
├── id_ed25519 ← Private Key (generiert von aria-setup.sh)
|
||||
├── id_ed25519.pub ← Public Key (muss in VM authorized_keys!)
|
||||
└── config ← SSH Config (Host aria-wohnung)
|
||||
└── ssh/ ← SSH Keys (Brain + Proxy teilen sich)
|
||||
├── id_ed25519
|
||||
├── id_ed25519.pub
|
||||
└── config ← Host aria-wohnung
|
||||
```
|
||||
|
||||
`aria-data/brain/` (Vector-DB + Skills) ist gitignored — Backup laeuft ueber
|
||||
den Gehirn-Export-Button in der Diagnostic, nicht ueber Git.
|
||||
|
||||
Settings im Shared Volume (`/shared/config/`): `voice_config.json`,
|
||||
`highlight_triggers.json`, `runtime.json`, `chat_backup.jsonl`.
|
||||
|
||||
**Backup:**
|
||||
```bash
|
||||
tar -czf aria-backup-$(date +%Y%m%d).tar.gz aria-data/
|
||||
@@ -749,16 +712,15 @@ dem Cache wiederverwendet.
|
||||
|
||||
## Docker Volumes
|
||||
|
||||
| Volume | Pfad im Container | Zweck |
|
||||
|--------|-------------------|-------|
|
||||
| `openclaw-config` | `/home/node/.openclaw` | OpenClaw Config, Sessions, Auth |
|
||||
| `claude-config` | `/home/node/.claude` | Claude Code Settings, Permissions |
|
||||
| `~/.claude` (bind) | `/root/.claude` (Proxy) | Claude CLI Credentials |
|
||||
| `./aria-data/ssh` (bind) | `/root/.ssh`, `/home/node/.ssh` | SSH Keys |
|
||||
| `./aria-data/brain` (bind) | `/home/node/.openclaw/workspace/memory` | Gedaechtnis |
|
||||
| `./aria-data/skills` (bind) | `/home/node/.openclaw/workspace/skills` | Skills |
|
||||
| `aria-shared` | `/shared` (Core + Bridge + Proxy + Diag) | Datei-Austausch, Config, Uploads |
|
||||
| `./aria-data/config/diag-state` (bind) | `/data` (Diagnostic) | Persistenter State (aktive Session) |
|
||||
| Volume / Bind | Pfad im Container | Zweck |
|
||||
|---------------|-------------------|-------|
|
||||
| `~/.claude` (bind) | `/root/.claude` (proxy) | Claude CLI Credentials |
|
||||
| `./aria-data/ssh` (bind) | `/root/.ssh` (proxy, brain) | SSH-Keys fuer aria-wohnung |
|
||||
| `./aria-data/brain/qdrant` (bind) | `/qdrant/storage` (qdrant) | Vector-DB Storage |
|
||||
| `./aria-data/brain/data` (bind) | `/data` (brain) | Skills + Embedding-Modell-Cache |
|
||||
| `./aria-data/brain` (bind) | `/brain` (diagnostic) | Brain-Export/Import-Endpoints |
|
||||
| `aria-shared` | `/shared` (brain, bridge, proxy, diagnostic) | Datei-Austausch, Config, Uploads |
|
||||
| `./aria-data/config/diag-state` (bind) | `/data` (diagnostic) | Diagnostic persistenter State |
|
||||
|
||||
---
|
||||
|
||||
@@ -787,22 +749,21 @@ docker compose down
|
||||
|
||||
# Einzelnen Container neu bauen
|
||||
docker compose up -d --build diagnostic
|
||||
docker compose up -d --build bridge
|
||||
docker compose up -d --build bridge brain
|
||||
|
||||
# Logs
|
||||
docker compose logs -f # alle
|
||||
docker compose logs -f aria # nur aria-core
|
||||
docker compose logs -f proxy # nur proxy
|
||||
docker compose logs -f brain # nur Agent + Memory
|
||||
docker compose logs -f qdrant # nur Vector-DB
|
||||
docker compose logs -f bridge # nur Voice-Bridge
|
||||
docker compose logs -f proxy # nur Claude-Proxy
|
||||
|
||||
# Setup wiederholen (nach Config-Aenderungen)
|
||||
./aria-setup.sh
|
||||
# SSH-Test (Brain zu aria-wohnung)
|
||||
docker exec aria-brain ssh aria-wohnung hostname
|
||||
|
||||
# SSH-Test
|
||||
docker exec aria-core ssh aria-wohnung hostname
|
||||
|
||||
# Tool-Test
|
||||
# Neue Session in Diagnostic anlegen, dann:
|
||||
# "Wie wird das Wetter in Bremen?"
|
||||
# Brain-API direkt testen
|
||||
docker exec aria-brain curl localhost:8080/health
|
||||
docker exec aria-brain curl localhost:8080/memory/stats
|
||||
```
|
||||
|
||||
---
|
||||
@@ -884,14 +845,37 @@ docker exec aria-core ssh aria-wohnung hostname
|
||||
- [x] Disk-Voll Banner in Diagnostic mit copy-baren Cleanup-Befehlen
|
||||
- [x] Wake-Word on-device via openWakeWord (ONNX Runtime, kein API-Key) + State-Icon
|
||||
|
||||
### Phase A — Refactor: OpenClaw raus, eigenes Brain rein
|
||||
|
||||
- [x] aria-brain Container-Skeleton (FastAPI, Qdrant, sentence-transformers)
|
||||
- [x] aria-core (OpenClaw) komplett abgerissen — Tag `v0.1.2.0` als Archiv
|
||||
- [x] Diagnostic: Gehirn-Tab (Memory Search/Filter, Add/Edit/Delete)
|
||||
- [x] Diagnostic: Gehirn-Export/Import als tar.gz
|
||||
- [x] Diagnostic: Datei-Manager (Liste, Suche, Download, Delete, Multi-Select + ZIP + Bulk-Delete)
|
||||
- [x] Diagnostic: Komplett-Reset (Wipe All)
|
||||
- [x] Diagnostic: Info-Buttons mit Modal-Erklaerungen (Status, Konversation, Memories, Bootstrap)
|
||||
- [x] App: Datei-Manager als Modal in den Einstellungen (mit Multi-Select + ZIP-Download)
|
||||
- [x] Voice Export/Import (einzelne Stimmen + F5/Whisper-Settings als Bundle)
|
||||
|
||||
### Phase B — Brain mit Memory + Loop + Skills
|
||||
|
||||
- [x] **Phase B Punkt 2:** Migration aus `aria-data/brain-import/` → atomare Memory-Punkte (Identity / Rule / Preference / Tool / Skill, idempotent ueber migration_key) + Bootstrap-Snapshot Export/Import (nur pinned)
|
||||
- [x] **Phase B Punkt 3:** Brain Conversation-Loop (Single-Chat UI, Rolling Window 50 Turns, Schwelle 60 → automatisches Destillat, manueller Trigger)
|
||||
- [x] **Phase B Punkt 4:** Skills-System (Python-only via local-venv, skill_create als Tool, dynamische run_<skill> Tools, Diagnostic Skills-Tab mit Logs/Toggle/Export/Import, skill_created Live-Notification in App+Diagnostic, harte Schwelle "pip → Skill")
|
||||
- [x] **Phase B Punkt 5:** Triggers-System (passive Aufweck-Quellen — Timer + Watcher mit safe Condition-Parser, GPS-near(), Diagnostic Trigger-Tab, kontinuierliches GPS-Tracking in der App fuer Use-Cases wie Blitzer-Warner)
|
||||
- [x] Sprachmodell-Setting wieder funktional (brainModel in runtime.json statt aria-core)
|
||||
- [x] App-Chat-Sync: kompletter Server-Sync bei Reconnect (Server = Source of Truth) + chat_cleared Live-Update. Lokal-only Bubbles (Skill-Notifications, laufende Voice ohne STT) bleiben erhalten.
|
||||
- [x] App: Chat-Suche mit Next/Prev Navigation statt Filter
|
||||
- [x] Token/Call-Metrics + Subscription-Quota-Tracking (Pro / Max 5x / Max 20x / Custom)
|
||||
- [x] Datei-Manager Multi-Select: Bulk-Download als ZIP + Bulk-Delete (Diagnostic + App)
|
||||
|
||||
### Phase 2 — ARIA wird produktiv
|
||||
|
||||
- [ ] Skills bauen (Bildgenerierung, etc.)
|
||||
- [ ] Erste Skills bauen lassen (yt-dlp, pdf-extract, etc. — durch normale Anfragen)
|
||||
- [ ] Gitea-Integration
|
||||
- [ ] VM einrichten (Desktop, Browser, Tools)
|
||||
- [ ] Heartbeat (periodische Selbst-Checks)
|
||||
- [ ] Lokales LLM als Waechter (Triage vor Claude-Call)
|
||||
- [ ] Auto-Compacting / Memory-Verwaltung
|
||||
|
||||
### Phase 3 — Erweiterungen
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ import { createBottomTabNavigator } from '@react-navigation/bottom-tabs';
|
||||
import ChatScreen from './src/screens/ChatScreen';
|
||||
import SettingsScreen from './src/screens/SettingsScreen';
|
||||
import rvs from './src/services/rvs';
|
||||
import { initLogger } from './src/services/logger';
|
||||
|
||||
// --- Navigation ---
|
||||
|
||||
@@ -44,6 +45,10 @@ const TAB_ICONS: Record<string, { active: string; inactive: string }> = {
|
||||
const App: React.FC = () => {
|
||||
// Beim Start: gespeicherte RVS-Konfiguration laden und verbinden
|
||||
useEffect(() => {
|
||||
// Verbose-Logging-Setting laden BEVOR andere Module loslegen.
|
||||
// initLogger ist async aber blockt nichts — solange er noch laueft,
|
||||
// loggen wir normal (Default an), danach respektiert console.log das Setting.
|
||||
initLogger().catch(() => {});
|
||||
const initConnection = async () => {
|
||||
const config = await rvs.loadConfig();
|
||||
if (config) {
|
||||
|
||||
@@ -79,8 +79,8 @@ android {
|
||||
applicationId "com.ariacockpit"
|
||||
minSdkVersion rootProject.ext.minSdkVersion
|
||||
targetSdkVersion rootProject.ext.targetSdkVersion
|
||||
versionCode 902
|
||||
versionName "0.0.9.2"
|
||||
versionCode 10207
|
||||
versionName "0.1.2.7"
|
||||
// Fallback fuer Libraries mit Product Flavors
|
||||
missingDimensionStrategy 'react-native-camera', 'general'
|
||||
}
|
||||
|
||||
@@ -6,6 +6,9 @@
|
||||
<uses-permission android:name="android.permission.REQUEST_INSTALL_PACKAGES" />
|
||||
<!-- Anruf-State lesen damit TTS bei klingelndem Telefon pausiert -->
|
||||
<uses-permission android:name="android.permission.READ_PHONE_STATE" />
|
||||
<!-- Optional: GPS-Position der Frage anhaengen (nur wenn User in Settings aktiviert) -->
|
||||
<uses-permission android:name="android.permission.ACCESS_COARSE_LOCATION" />
|
||||
<uses-permission android:name="android.permission.ACCESS_FINE_LOCATION" />
|
||||
<!-- Foreground-Service damit TTS auch bei minimierter App weiterlaeuft.
|
||||
FOREGROUND_SERVICE_MICROPHONE ist Pflicht ab Android 14 wenn der
|
||||
Service waehrend des Backgrounds aufs Mikro zugreift (Wake-Word,
|
||||
|
||||
@@ -7,7 +7,7 @@ import com.facebook.react.uimanager.ViewManager
|
||||
|
||||
class ApkInstallerPackage : ReactPackage {
|
||||
override fun createNativeModules(reactContext: ReactApplicationContext): List<NativeModule> {
|
||||
return listOf(ApkInstallerModule(reactContext))
|
||||
return listOf(ApkInstallerModule(reactContext), FileOpenerModule(reactContext))
|
||||
}
|
||||
|
||||
override fun createViewManagers(reactContext: ReactApplicationContext): List<ViewManager<*, *>> {
|
||||
|
||||
@@ -131,6 +131,58 @@ class AudioFocusModule(reactContext: ReactApplicationContext) : ReactContextBase
|
||||
promise.resolve(true)
|
||||
}
|
||||
|
||||
/** Den USAGE_MEDIA-Focus-Stack im System aufmischen, damit Spotify/YouTube
|
||||
* resumen wenn ein anderer Player (z.B. react-native-sound) seinen Focus
|
||||
* nicht ordnungsgemaess released hat. Strategie: kurz selbst USAGE_MEDIA
|
||||
* GAIN beanspruchen — das System invalidiert dabei den haengenden Stack-
|
||||
* Eintrag des anderen Players — und sofort wieder abandonen. Spotify
|
||||
* bekommt den Focus-Gain und resumed.
|
||||
*
|
||||
* Workaround fuer das react-native-sound-Bug: Sound.stop()/release()
|
||||
* laesst den AudioFocusRequest haengen.
|
||||
*/
|
||||
@ReactMethod
|
||||
fun kickReleaseMedia(promise: Promise) {
|
||||
val am = audioManager()
|
||||
if (am == null) {
|
||||
promise.resolve(false)
|
||||
return
|
||||
}
|
||||
// Async laufen lassen — wir wollen einen request, Pause, dann abandon.
|
||||
// Ohne Pause merkt das System (und damit Spotify) die kurze Owner-
|
||||
// Wechsel oft gar nicht. 250ms reicht erfahrungsgemaess fuer den
|
||||
// Focus-Stack-Refresh.
|
||||
Thread {
|
||||
try {
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
|
||||
val attrs = AudioAttributes.Builder()
|
||||
.setUsage(AudioAttributes.USAGE_MEDIA)
|
||||
.setContentType(AudioAttributes.CONTENT_TYPE_MUSIC)
|
||||
.build()
|
||||
val kickListener = AudioManager.OnAudioFocusChangeListener { /* ignorieren */ }
|
||||
val kickReq = AudioFocusRequest.Builder(AudioManager.AUDIOFOCUS_GAIN)
|
||||
.setAudioAttributes(attrs)
|
||||
.setOnAudioFocusChangeListener(kickListener)
|
||||
.build()
|
||||
am.requestAudioFocus(kickReq)
|
||||
Thread.sleep(250)
|
||||
am.abandonAudioFocusRequest(kickReq)
|
||||
} else {
|
||||
val kickListener = AudioManager.OnAudioFocusChangeListener { /* ignorieren */ }
|
||||
@Suppress("DEPRECATION")
|
||||
am.requestAudioFocus(kickListener, AudioManager.STREAM_MUSIC, AudioManager.AUDIOFOCUS_GAIN)
|
||||
Thread.sleep(250)
|
||||
@Suppress("DEPRECATION")
|
||||
am.abandonAudioFocus(kickListener)
|
||||
}
|
||||
Log.i(TAG, "kickReleaseMedia: USAGE_MEDIA-Stack aufgemischt (250ms Pause)")
|
||||
} catch (e: Exception) {
|
||||
Log.w(TAG, "kickReleaseMedia failed: ${e.message}")
|
||||
}
|
||||
}.start()
|
||||
promise.resolve(true)
|
||||
}
|
||||
|
||||
private fun release() {
|
||||
val am = audioManager() ?: return
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
|
||||
|
||||
@@ -0,0 +1,55 @@
|
||||
package com.ariacockpit
|
||||
|
||||
import android.content.Intent
|
||||
import android.net.Uri
|
||||
import android.os.Build
|
||||
import androidx.core.content.FileProvider
|
||||
import com.facebook.react.bridge.Promise
|
||||
import com.facebook.react.bridge.ReactApplicationContext
|
||||
import com.facebook.react.bridge.ReactContextBaseJavaModule
|
||||
import com.facebook.react.bridge.ReactMethod
|
||||
import java.io.File
|
||||
|
||||
/**
|
||||
* Oeffnet eine beliebige Datei (PDF, Bild, Office-Doc, ...) mit der vom User
|
||||
* gewaehlten App via Android-Intent-Picker. Nutzt FileProvider damit auch
|
||||
* Android 7+ (content:// statt file://) das URI lesen darf.
|
||||
*
|
||||
* MIME-Type wird vom Caller bestimmt — App-Auswahl ist davon abhaengig (PDF
|
||||
* geht an PDF-Viewer, image/jpeg an Galerie, etc.).
|
||||
*/
|
||||
class FileOpenerModule(reactContext: ReactApplicationContext) : ReactContextBaseJavaModule(reactContext) {
|
||||
override fun getName() = "FileOpener"
|
||||
|
||||
@ReactMethod
|
||||
fun open(filePath: String, mimeType: String, promise: Promise) {
|
||||
try {
|
||||
val cleanPath = filePath.removePrefix("file://")
|
||||
val file = File(cleanPath)
|
||||
if (!file.exists()) {
|
||||
promise.reject("FILE_NOT_FOUND", "Datei nicht gefunden: $cleanPath")
|
||||
return
|
||||
}
|
||||
val context = reactApplicationContext
|
||||
val uri: Uri = if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.N) {
|
||||
FileProvider.getUriForFile(context, "${context.packageName}.fileprovider", file)
|
||||
} else {
|
||||
Uri.fromFile(file)
|
||||
}
|
||||
val safeMime = if (mimeType.isBlank()) "application/octet-stream" else mimeType
|
||||
val intent = Intent(Intent.ACTION_VIEW).apply {
|
||||
setDataAndType(uri, safeMime)
|
||||
addFlags(Intent.FLAG_ACTIVITY_NEW_TASK)
|
||||
addFlags(Intent.FLAG_GRANT_READ_URI_PERMISSION)
|
||||
}
|
||||
// Chooser zeigt Android-Auswahl falls mehrere Apps das MIME oeffnen koennen.
|
||||
val chooser = Intent.createChooser(intent, "Oeffnen mit").apply {
|
||||
addFlags(Intent.FLAG_ACTIVITY_NEW_TASK)
|
||||
}
|
||||
context.startActivity(chooser)
|
||||
promise.resolve(true)
|
||||
} catch (e: Exception) {
|
||||
promise.reject("OPEN_ERROR", e.message, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import android.media.AudioAttributes
|
||||
import android.media.AudioFormat
|
||||
import android.media.AudioManager
|
||||
import android.media.AudioTrack
|
||||
import android.os.Build
|
||||
import android.util.Base64
|
||||
import android.util.Log
|
||||
import com.facebook.react.bridge.Arguments
|
||||
@@ -107,7 +108,20 @@ class PcmStreamPlayerModule(reactContext: ReactApplicationContext) : ReactContex
|
||||
.setTransferMode(AudioTrack.MODE_STREAM)
|
||||
.build()
|
||||
|
||||
// AudioTrack erstellen — play() wird erst aufgerufen wenn Pre-Roll erreicht.
|
||||
// Start-Threshold runterdrehen: Default ist bufferSize/2 (= 2s bei 4s
|
||||
// Buffer). AudioTrack startet sonst nicht bevor 2s im Puffer sind —
|
||||
// bei kurzen TTS-Antworten (3 Worte ~ 1.4s) bleibt pos auf 0 stehen.
|
||||
// 0.1s reicht damit AudioTrack sofort mit dem ersten Chunk anlaeuft.
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.S) {
|
||||
try {
|
||||
val startFrames = (sampleRate / 10).coerceAtLeast(1) // 100ms
|
||||
newTrack.setStartThresholdInFrames(startFrames)
|
||||
Log.i(TAG, "Start-Threshold gesetzt: ${startFrames} frames (~100ms)")
|
||||
} catch (e: Exception) {
|
||||
Log.w(TAG, "setStartThresholdInFrames failed: ${e.message}")
|
||||
}
|
||||
}
|
||||
|
||||
track = newTrack
|
||||
queue.clear()
|
||||
writerShouldStop = false
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<paths>
|
||||
<cache-path name="cache" path="." />
|
||||
<files-path name="files" path="." />
|
||||
<external-path name="external" path="." />
|
||||
<external-files-path name="external_files" path="." />
|
||||
<external-cache-path name="external_cache" path="." />
|
||||
</paths>
|
||||
|
||||
+18
-17
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "aria-cockpit",
|
||||
"version": "0.0.9.2",
|
||||
"version": "0.1.2.7",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"android": "react-native run-android",
|
||||
@@ -10,31 +10,32 @@
|
||||
"build:apk": "cd android && ./gradlew assembleRelease"
|
||||
},
|
||||
"dependencies": {
|
||||
"@react-native-async-storage/async-storage": "^1.21.0",
|
||||
"@react-native-community/geolocation": "^3.2.1",
|
||||
"@react-navigation/bottom-tabs": "^6.5.11",
|
||||
"@react-navigation/native": "^6.1.9",
|
||||
"react": "18.2.0",
|
||||
"react-native": "0.73.4",
|
||||
"@react-navigation/native": "^6.1.9",
|
||||
"@react-navigation/bottom-tabs": "^6.5.11",
|
||||
"react-native-screens": "3.27.0",
|
||||
"react-native-safe-area-context": "^4.8.2",
|
||||
"react-native-audio-recorder-player": "^3.6.7",
|
||||
"react-native-camera-kit": "^13.0.0",
|
||||
"react-native-document-picker": "^9.1.1",
|
||||
"react-native-sound": "^0.11.2",
|
||||
"@react-native-community/geolocation": "^3.2.1",
|
||||
"react-native-fs": "^2.20.0",
|
||||
"react-native-image-picker": "^7.1.0",
|
||||
"react-native-permissions": "^4.1.4",
|
||||
"react-native-camera-kit": "^13.0.0",
|
||||
"@react-native-async-storage/async-storage": "^1.21.0",
|
||||
"react-native-fs": "^2.20.0",
|
||||
"react-native-audio-recorder-player": "^3.6.7"
|
||||
"react-native-safe-area-context": "^4.8.2",
|
||||
"react-native-screens": "3.27.0",
|
||||
"react-native-sound": "^0.11.2",
|
||||
"react-native-svg": "^14.1.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"typescript": "^5.3.3",
|
||||
"@react-native/eslint-config": "^0.73.2",
|
||||
"@react-native/metro-config": "^0.73.5",
|
||||
"@react-native/typescript-config": "^0.73.1",
|
||||
"@types/jest": "^29.5.11",
|
||||
"@types/react": "^18.2.48",
|
||||
"@types/react-native": "^0.73.0",
|
||||
"@react-native/eslint-config": "^0.73.2",
|
||||
"@react-native/typescript-config": "^0.73.1",
|
||||
"@react-native/metro-config": "^0.73.5",
|
||||
"metro-react-native-babel-preset": "^0.77.0",
|
||||
"jest": "^29.7.0",
|
||||
"@types/jest": "^29.5.11"
|
||||
"metro-react-native-babel-preset": "^0.77.0",
|
||||
"typescript": "^5.3.3"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,26 +1,88 @@
|
||||
/**
|
||||
* MessageText — selektierbarer Chat-Text mit Android-Auto-Linkifizierung.
|
||||
* MessageText — selektierbarer Chat-Text mit Android-Auto-Linkifizierung,
|
||||
* plus Inline-Image-Rendering wenn der Text Bild-URLs enthaelt.
|
||||
*
|
||||
* Wir nutzen Androids dataDetectorType="all" (System macht Phone/URL/Email
|
||||
* - Markdown-Syntax `` und plain `https://...image.png` werden
|
||||
* erkannt — die URL bleibt im Text sichtbar (klickbar via Linkify),
|
||||
* zusaetzlich wird das Bild als <Image> oder <SvgUri> drunter gerendert.
|
||||
* - Wir nutzen Androids dataDetectorType="all" (System macht Phone/URL/Email
|
||||
* automatisch klickbar) und ein einzelnes <Text selectable> ohne nested
|
||||
* <Text> mit eigenem onPress. Nested Text mit onPress fingen die Long-Press-
|
||||
* <Text> mit eigenem onPress — Nested Text mit onPress fing die Long-Press-
|
||||
* Geste ab, damit war Markieren+Kopieren defekt.
|
||||
*/
|
||||
|
||||
import React from 'react';
|
||||
import { Text, TextStyle, StyleProp } from 'react-native';
|
||||
import React, { useEffect, useState } from 'react';
|
||||
import { View, Text, Image, TextStyle, StyleProp } from 'react-native';
|
||||
import { SvgUri } from 'react-native-svg';
|
||||
|
||||
interface Props {
|
||||
text: string;
|
||||
style?: StyleProp<TextStyle>;
|
||||
}
|
||||
|
||||
// Bild-URL-Pattern: http(s)://... endend auf gaengige Bild-Endungen.
|
||||
const IMG_URL_RE = /https?:\/\/[^\s)<"']+\.(?:jpe?g|png|gif|webp|bmp|ico|svg)(?:\?[^\s)<"']*)?/gi;
|
||||
|
||||
function extractImageUrls(text: string): string[] {
|
||||
const urls = new Set<string>();
|
||||
const matches = text.match(IMG_URL_RE);
|
||||
if (matches) matches.forEach(u => urls.add(u));
|
||||
return Array.from(urls);
|
||||
}
|
||||
|
||||
const SVG_RE = /\.svg(?:\?|$)/i;
|
||||
|
||||
/** Image mit dynamischer Aspect-Ratio aus echten Bilddimensionen.
|
||||
* SVGs werden ueber react-native-svg gerendert (kein Image.getSize). */
|
||||
const InlineImage: React.FC<{ uri: string }> = ({ uri }) => {
|
||||
const isSvg = SVG_RE.test(uri);
|
||||
const [aspectRatio, setAspectRatio] = useState<number>(1);
|
||||
const [failed, setFailed] = useState(false);
|
||||
useEffect(() => {
|
||||
if (isSvg) return; // Image.getSize geht fuer SVG nicht
|
||||
let cancelled = false;
|
||||
Image.getSize(
|
||||
uri,
|
||||
(w, h) => { if (!cancelled && w > 0 && h > 0) setAspectRatio(Math.max(0.5, Math.min(2.5, w / h))); },
|
||||
() => { if (!cancelled) setFailed(true); },
|
||||
);
|
||||
return () => { cancelled = true; };
|
||||
}, [uri, isSvg]);
|
||||
if (failed) return null;
|
||||
if (isSvg) {
|
||||
return (
|
||||
<View style={{ marginTop: 8, width: 260, height: 260, backgroundColor: '#0D0D1A', borderRadius: 8, alignItems: 'center', justifyContent: 'center' }}>
|
||||
<SvgUri uri={uri} width="100%" height="100%" onError={() => setFailed(true)} />
|
||||
</View>
|
||||
);
|
||||
}
|
||||
return (
|
||||
<Image
|
||||
source={{ uri }}
|
||||
style={{ width: 260, aspectRatio, borderRadius: 8, marginTop: 8, backgroundColor: '#0D0D1A' }}
|
||||
resizeMode="cover"
|
||||
onError={() => setFailed(true)}
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
const MessageText: React.FC<Props> = ({ text, style }) => {
|
||||
const imageUrls = extractImageUrls(text || '');
|
||||
if (imageUrls.length === 0) {
|
||||
return (
|
||||
<Text style={style} selectable dataDetectorType="all">
|
||||
{text}
|
||||
</Text>
|
||||
);
|
||||
}
|
||||
return (
|
||||
<View>
|
||||
<Text style={style} selectable dataDetectorType="all">
|
||||
{text}
|
||||
</Text>
|
||||
{imageUrls.map(u => <InlineImage key={u} uri={u} />)}
|
||||
</View>
|
||||
);
|
||||
};
|
||||
|
||||
export default MessageText;
|
||||
|
||||
@@ -0,0 +1,224 @@
|
||||
/**
|
||||
* ZoomableImage — Pinch-to-Zoom + Pan fuers Vollbild-Modal.
|
||||
*
|
||||
* Reine RN-Implementation, ohne react-native-gesture-handler.
|
||||
*
|
||||
* - 2 Finger: Pinch (Zoom 1x..5x) + simultaner Pan via Focal-Punkt
|
||||
* - 1 Finger: Pan wenn schon gezoomt (>1.02x)
|
||||
* - Doppel-Tap (<300ms zw. zwei Single-Taps): Toggle 1x ↔ 2.5x
|
||||
*
|
||||
* Implementierungs-Hinweise zur alten Version (warum's nicht ging):
|
||||
* - `gestureState.numberActiveTouches` ist nicht zuverlaessig direkt
|
||||
* nach onPanResponderGrant. Wir lesen Finger-Anzahl jetzt
|
||||
* ausschliesslich aus `e.nativeEvent.touches.length`.
|
||||
* - Beim Wechsel von 2 → 1 Fingern bleib die Pinch-Referenz haengen.
|
||||
* Jetzt: bei jedem Finger-Wechsel re-snapshotten wir die Geste.
|
||||
* - Animated.Image bekommt jetzt pointerEvents="none" damit der View
|
||||
* GARANTIERT die Touches abbekommt.
|
||||
* - useNativeDriver ist bewusst AUS — sonst koennen wir setValue()
|
||||
* nicht synchron mit dem Pan-Responder zusammen nutzen.
|
||||
*/
|
||||
|
||||
import React, { useMemo, useRef } from 'react';
|
||||
import {
|
||||
Animated,
|
||||
PanResponder,
|
||||
GestureResponderEvent,
|
||||
ImageStyle,
|
||||
StyleProp,
|
||||
StyleSheet,
|
||||
View,
|
||||
} from 'react-native';
|
||||
|
||||
interface Props {
|
||||
uri: string;
|
||||
containerWidth: number;
|
||||
containerHeight: number;
|
||||
style?: StyleProp<ImageStyle>;
|
||||
}
|
||||
|
||||
const MIN_SCALE = 1;
|
||||
const MAX_SCALE = 5;
|
||||
const DOUBLE_TAP_MS = 300;
|
||||
const DOUBLE_TAP_DIST = 30; // Bewegung max. damit ein Tap als Tap gilt
|
||||
const PAN_SLOP_AT_SCALE_1 = 4; // Mikro-Movement nicht als Pan werten
|
||||
|
||||
const ZoomableImage: React.FC<Props> = ({ uri, containerWidth, containerHeight, style }) => {
|
||||
// Animated-Werte fuer die Render-Transformation
|
||||
const scale = useRef(new Animated.Value(1)).current;
|
||||
const tx = useRef(new Animated.Value(0)).current;
|
||||
const ty = useRef(new Animated.Value(0)).current;
|
||||
|
||||
// Logische Zustaende — wir lesen Animated.Value nicht zurueck (waere async)
|
||||
const view = useRef({ scale: 1, x: 0, y: 0 }).current;
|
||||
|
||||
// Geste-Snapshot: was war zu Beginn dieser Geste-Phase
|
||||
const gesture = useRef({
|
||||
fingers: 0, // aktuelle Finger-Anzahl
|
||||
startScale: 1,
|
||||
startX: 0,
|
||||
startY: 0,
|
||||
startDist: 0, // Pinch-Referenz-Distanz
|
||||
startFocalX: 0,
|
||||
startFocalY: 0,
|
||||
movedSinceTouch: 0, // fuer Tap-Erkennung
|
||||
touchStartedAt: 0,
|
||||
touchStartX: 0,
|
||||
touchStartY: 0,
|
||||
}).current;
|
||||
|
||||
// Doppel-Tap
|
||||
const lastTap = useRef({ at: 0, x: 0, y: 0 });
|
||||
|
||||
const clamp = (v: number, lo: number, hi: number) => Math.max(lo, Math.min(hi, v));
|
||||
|
||||
const applyClamped = (s: number, x: number, y: number) => {
|
||||
const ns = clamp(s, MIN_SCALE, MAX_SCALE);
|
||||
// Translation auf das verfuegbare Volumen begrenzen
|
||||
const maxX = Math.max(0, (containerWidth * ns - containerWidth) / 2);
|
||||
const maxY = Math.max(0, (containerHeight * ns - containerHeight) / 2);
|
||||
const nx = clamp(x, -maxX, maxX);
|
||||
const ny = clamp(y, -maxY, maxY);
|
||||
view.scale = ns;
|
||||
view.x = nx;
|
||||
view.y = ny;
|
||||
scale.setValue(ns);
|
||||
tx.setValue(nx);
|
||||
ty.setValue(ny);
|
||||
};
|
||||
|
||||
const distance = (touches: any[]) =>
|
||||
Math.hypot(touches[0].pageX - touches[1].pageX, touches[0].pageY - touches[1].pageY);
|
||||
|
||||
const focal = (touches: any[]) => ({
|
||||
x: (touches[0].pageX + touches[1].pageX) / 2,
|
||||
y: (touches[0].pageY + touches[1].pageY) / 2,
|
||||
});
|
||||
|
||||
// Snapshot vor jedem Phasenwechsel (1↔2 Finger) — verhindert Spruenge
|
||||
const snapshot = (touches: any[]) => {
|
||||
gesture.startScale = view.scale;
|
||||
gesture.startX = view.x;
|
||||
gesture.startY = view.y;
|
||||
if (touches.length >= 2) {
|
||||
gesture.startDist = distance(touches);
|
||||
const f = focal(touches);
|
||||
gesture.startFocalX = f.x;
|
||||
gesture.startFocalY = f.y;
|
||||
} else if (touches.length === 1) {
|
||||
gesture.startDist = 0;
|
||||
gesture.startFocalX = touches[0].pageX;
|
||||
gesture.startFocalY = touches[0].pageY;
|
||||
}
|
||||
};
|
||||
|
||||
const responder = useMemo(
|
||||
() =>
|
||||
PanResponder.create({
|
||||
onStartShouldSetPanResponder: () => true,
|
||||
onStartShouldSetPanResponderCapture: () => true,
|
||||
onMoveShouldSetPanResponder: () => true,
|
||||
onMoveShouldSetPanResponderCapture: () => true,
|
||||
|
||||
onPanResponderGrant: (e: GestureResponderEvent) => {
|
||||
const touches = e.nativeEvent.touches as any[];
|
||||
gesture.fingers = touches.length;
|
||||
gesture.movedSinceTouch = 0;
|
||||
gesture.touchStartedAt = Date.now();
|
||||
gesture.touchStartX = touches[0]?.pageX ?? 0;
|
||||
gesture.touchStartY = touches[0]?.pageY ?? 0;
|
||||
snapshot(touches);
|
||||
},
|
||||
|
||||
onPanResponderMove: (e: GestureResponderEvent, _gs) => {
|
||||
const touches = e.nativeEvent.touches as any[];
|
||||
|
||||
// Phasenwechsel? → Re-Snapshot, damit nicht gesprungen wird
|
||||
if (touches.length !== gesture.fingers) {
|
||||
gesture.fingers = touches.length;
|
||||
snapshot(touches);
|
||||
return;
|
||||
}
|
||||
|
||||
gesture.movedSinceTouch += 1;
|
||||
|
||||
if (touches.length >= 2) {
|
||||
// Pinch + Pan via Focal
|
||||
const d = distance(touches);
|
||||
if (gesture.startDist === 0) {
|
||||
// Sicherheitsnetz falls Snapshot gemissed wurde
|
||||
snapshot(touches);
|
||||
return;
|
||||
}
|
||||
const factor = d / gesture.startDist;
|
||||
const f = focal(touches);
|
||||
const newScale = clamp(gesture.startScale * factor, MIN_SCALE, MAX_SCALE);
|
||||
// Focal-basierter Pan: zoomt um den Mittelpunkt der zwei Finger
|
||||
const newX = gesture.startX + (f.x - gesture.startFocalX);
|
||||
const newY = gesture.startY + (f.y - gesture.startFocalY);
|
||||
applyClamped(newScale, newX, newY);
|
||||
} else if (touches.length === 1 && view.scale > 1.02) {
|
||||
const dx = touches[0].pageX - gesture.startFocalX;
|
||||
const dy = touches[0].pageY - gesture.startFocalY;
|
||||
if (Math.abs(dx) < PAN_SLOP_AT_SCALE_1 && Math.abs(dy) < PAN_SLOP_AT_SCALE_1) return;
|
||||
applyClamped(view.scale, gesture.startX + dx, gesture.startY + dy);
|
||||
}
|
||||
},
|
||||
|
||||
onPanResponderRelease: (e: GestureResponderEvent) => {
|
||||
const elapsed = Date.now() - gesture.touchStartedAt;
|
||||
const dx = (e.nativeEvent.changedTouches?.[0]?.pageX ?? gesture.touchStartX) - gesture.touchStartX;
|
||||
const dy = (e.nativeEvent.changedTouches?.[0]?.pageY ?? gesture.touchStartY) - gesture.touchStartY;
|
||||
const wasTap =
|
||||
elapsed < 280 &&
|
||||
Math.abs(dx) < DOUBLE_TAP_DIST &&
|
||||
Math.abs(dy) < DOUBLE_TAP_DIST;
|
||||
if (wasTap) {
|
||||
const now = Date.now();
|
||||
if (now - lastTap.current.at < DOUBLE_TAP_MS) {
|
||||
// Doppel-Tap → Zoom-Toggle
|
||||
if (view.scale > 1.1) {
|
||||
applyClamped(1, 0, 0);
|
||||
} else {
|
||||
applyClamped(2.5, 0, 0);
|
||||
}
|
||||
lastTap.current = { at: 0, x: 0, y: 0 };
|
||||
} else {
|
||||
lastTap.current = { at: now, x: gesture.touchStartX, y: gesture.touchStartY };
|
||||
}
|
||||
}
|
||||
gesture.fingers = 0;
|
||||
gesture.startDist = 0;
|
||||
},
|
||||
|
||||
onPanResponderTerminate: () => {
|
||||
gesture.fingers = 0;
|
||||
gesture.startDist = 0;
|
||||
},
|
||||
}),
|
||||
[],
|
||||
);
|
||||
|
||||
return (
|
||||
<View
|
||||
style={StyleSheet.absoluteFill}
|
||||
collapsable={false}
|
||||
{...responder.panHandlers}
|
||||
>
|
||||
<Animated.View pointerEvents="none" style={StyleSheet.absoluteFill}>
|
||||
<Animated.Image
|
||||
source={{ uri }}
|
||||
style={[
|
||||
style,
|
||||
{
|
||||
transform: [{ translateX: tx }, { translateY: ty }, { scale }],
|
||||
},
|
||||
]}
|
||||
resizeMode="contain"
|
||||
/>
|
||||
</Animated.View>
|
||||
</View>
|
||||
);
|
||||
};
|
||||
|
||||
export default ZoomableImage;
|
||||
@@ -20,9 +20,14 @@ import {
|
||||
Modal,
|
||||
ToastAndroid,
|
||||
AppState,
|
||||
NativeModules,
|
||||
Alert,
|
||||
} from 'react-native';
|
||||
import AsyncStorage from '@react-native-async-storage/async-storage';
|
||||
import RNFS from 'react-native-fs';
|
||||
import { SvgUri } from 'react-native-svg';
|
||||
import { Dimensions } from 'react-native';
|
||||
import ZoomableImage from '../components/ZoomableImage';
|
||||
import rvs, { RVSMessage, ConnectionState } from '../services/rvs';
|
||||
import audioService from '../services/audio';
|
||||
import wakeWordService from '../services/wakeword';
|
||||
@@ -49,6 +54,7 @@ interface Attachment {
|
||||
uri?: string; // Lokaler Pfad (file://) fuer Anzeige
|
||||
mimeType?: string;
|
||||
serverPath?: string; // Pfad auf dem Server (/shared/uploads/...) fuer Re-Download
|
||||
deleted?: boolean; // Datei wurde nachtraeglich geloescht (Diagnostic-Manager)
|
||||
}
|
||||
|
||||
interface ChatMessage {
|
||||
@@ -65,6 +71,22 @@ interface ChatMessage {
|
||||
* gespiegelt damit wir die EXAKT richtige Placeholder-Bubble ersetzen,
|
||||
* auch wenn mehrere Aufnahmen parallel offen sind. */
|
||||
audioRequestId?: string;
|
||||
/** Skill-Created-Bubble: ARIA hat einen neuen Skill angelegt */
|
||||
skillCreated?: {
|
||||
name: string;
|
||||
description: string;
|
||||
execution: string;
|
||||
active: boolean;
|
||||
setupError?: string;
|
||||
};
|
||||
/** Trigger-Created-Bubble: ARIA hat einen neuen Trigger angelegt */
|
||||
triggerCreated?: {
|
||||
name: string;
|
||||
type: 'timer' | 'watcher' | string;
|
||||
message: string;
|
||||
fires_at?: string;
|
||||
condition?: string;
|
||||
};
|
||||
}
|
||||
|
||||
// --- Konstanten ---
|
||||
@@ -80,6 +102,23 @@ const capMessages = (msgs: ChatMessage[]): ChatMessage[] =>
|
||||
const DEFAULT_ATTACHMENT_DIR = `${RNFS.DocumentDirectoryPath}/chat_attachments`;
|
||||
const STORAGE_PATH_KEY = 'aria_attachment_storage_path';
|
||||
|
||||
const { FileOpener } = NativeModules as {
|
||||
FileOpener?: { open: (filePath: string, mimeType: string) => Promise<boolean> };
|
||||
};
|
||||
|
||||
/** Datei mit Android-Intent-Picker oeffnen (System waehlt App nach MIME). */
|
||||
async function openFileWithIntent(filePath: string, mimeType: string): Promise<void> {
|
||||
if (!FileOpener) {
|
||||
ToastAndroid.show('FileOpener Native Module fehlt', ToastAndroid.SHORT);
|
||||
return;
|
||||
}
|
||||
try {
|
||||
await FileOpener.open(filePath, mimeType || 'application/octet-stream');
|
||||
} catch (err: any) {
|
||||
ToastAndroid.show(`Oeffnen fehlgeschlagen: ${err?.message || err}`, ToastAndroid.LONG);
|
||||
}
|
||||
}
|
||||
|
||||
/** Image-Vorschau in der Chat-Bubble. Misst die echte Bild-Dimension via
|
||||
* Image.getSize + setzt aspectRatio dynamisch — dadurch passt sich die
|
||||
* Bubble ans Bild an (kein "Strich" mehr bei breiten oder hohen Bildern). */
|
||||
@@ -95,7 +134,9 @@ const ChatImage: React.FC<{
|
||||
onError: () => void;
|
||||
}> = ({ uri, onPress, onError }) => {
|
||||
const [aspectRatio, setAspectRatio] = useState<number>(4 / 3);
|
||||
const isSvg = /\.svg(?:\?|$)/i.test(uri);
|
||||
useEffect(() => {
|
||||
if (isSvg) return; // SvgUri hat kein getSize
|
||||
let cancelled = false;
|
||||
Image.getSize(uri, (w, h) => {
|
||||
if (!cancelled && w > 0 && h > 0) {
|
||||
@@ -106,7 +147,16 @@ const ChatImage: React.FC<{
|
||||
}
|
||||
}, () => {});
|
||||
return () => { cancelled = true; };
|
||||
}, [uri]);
|
||||
}, [uri, isSvg]);
|
||||
if (isSvg) {
|
||||
return (
|
||||
<TouchableOpacity onPress={onPress} activeOpacity={0.8}>
|
||||
<View style={[CHAT_IMAGE_STYLE, { height: 260, alignItems: 'center', justifyContent: 'center' }]}>
|
||||
<SvgUri uri={uri} width="100%" height="100%" onError={onError} />
|
||||
</View>
|
||||
</TouchableOpacity>
|
||||
);
|
||||
}
|
||||
return (
|
||||
<TouchableOpacity onPress={onPress} activeOpacity={0.8}>
|
||||
<Image
|
||||
@@ -159,6 +209,7 @@ const ChatScreen: React.FC = () => {
|
||||
const [fullscreenImage, setFullscreenImage] = useState<string | null>(null);
|
||||
const [searchQuery, setSearchQuery] = useState('');
|
||||
const [searchVisible, setSearchVisible] = useState(false);
|
||||
const [searchIndex, setSearchIndex] = useState(0); // welcher Treffer aktiv ist
|
||||
const [pendingAttachments, setPendingAttachments] = useState<{file: any, isPhoto: boolean}[]>([]);
|
||||
const [agentActivity, setAgentActivity] = useState<{activity: string, tool: string}>({activity: 'idle', tool: ''});
|
||||
// Service-Status (Gamebox: F5-TTS / Whisper Lade-Status) + Banner-Sichtbarkeit
|
||||
@@ -179,6 +230,10 @@ const ChatScreen: React.FC = () => {
|
||||
|
||||
const flatListRef = useRef<FlatList>(null);
|
||||
const messageIdCounter = useRef(0);
|
||||
// ServerPaths fuer die der User auf "oeffnen" geklickt hat — beim
|
||||
// file_response wird die Datei nach dem Speichern direkt mit dem System-
|
||||
// Intent geoeffnet (PDF-Viewer, Galerie, etc.).
|
||||
const autoOpenPaths = useRef<Set<string>>(new Set());
|
||||
|
||||
// Eindeutige Message-ID generieren
|
||||
const nextId = (): string => {
|
||||
@@ -349,11 +404,148 @@ const ChatScreen: React.FC = () => {
|
||||
return;
|
||||
}
|
||||
|
||||
// skill_created: ARIA hat einen neuen Skill angelegt → eigene Bubble
|
||||
// chat_cleared: Diagnostic hat die History komplett geleert
|
||||
// → lokal auch loeschen (visuell + Persistenz)
|
||||
if (message.type === 'chat_cleared') {
|
||||
console.log('[Chat] chat_cleared — leere lokale Anzeige + Storage');
|
||||
setMessages([]);
|
||||
AsyncStorage.removeItem(CHAT_STORAGE_KEY).catch(() => {});
|
||||
AsyncStorage.removeItem('aria_chat_last_sync').catch(() => {});
|
||||
return;
|
||||
}
|
||||
|
||||
// chat_history_response: kompletter Server-Stand. App ersetzt ihre
|
||||
// persistierte Chat-History damit. Lokal-only Bubbles (laufende
|
||||
// Voice-Aufnahmen ohne STT-Result, Skill-Created-Events ohne
|
||||
// text) bleiben erhalten — die sind durch fehlendes 'text' oder
|
||||
// skillCreated/audioRequestId klar als "lokal" erkennbar.
|
||||
if (message.type === 'chat_history_response') {
|
||||
const p = (message.payload || {}) as any;
|
||||
const incoming = (p.messages || []) as Array<any>;
|
||||
console.log(`[Chat] Server-Sync: ${incoming.length} Nachrichten vom Server`);
|
||||
const fromServer: ChatMessage[] = incoming.map(m => {
|
||||
const role = m.role === 'user' ? 'user' : 'aria';
|
||||
const files = Array.isArray(m.files) ? m.files : [];
|
||||
const attachments = files.map((f: any) => ({
|
||||
type: (typeof f.mimeType === 'string' && f.mimeType.startsWith('image/')) ? 'image' : 'file',
|
||||
name: f.name || 'datei',
|
||||
size: f.size || 0,
|
||||
mimeType: f.mimeType || '',
|
||||
serverPath: f.serverPath || '',
|
||||
})) as Attachment[];
|
||||
return {
|
||||
id: nextId(),
|
||||
sender: role as 'user' | 'aria',
|
||||
text: m.text || '',
|
||||
timestamp: m.ts || Date.now(),
|
||||
attachments: attachments.length ? attachments : undefined,
|
||||
};
|
||||
});
|
||||
const maxTs = incoming.reduce((mx: number, m: any) => Math.max(mx, m.ts || 0), 0);
|
||||
setMessages(prev => {
|
||||
// Lokal-only Bubbles erkennen + behalten:
|
||||
// - Skill-Created-Notifications (skillCreated gesetzt)
|
||||
// - Laufende Sprachnachrichten ohne STT-Result (audioRequestId
|
||||
// gesetzt UND text leer/Placeholder)
|
||||
const localOnly = prev.filter(m =>
|
||||
m.skillCreated ||
|
||||
m.triggerCreated ||
|
||||
(m.audioRequestId && (!m.text || m.text === '🎙 Aufnahme...' || m.text === 'Aufnahme...'))
|
||||
);
|
||||
// Server-Stand + lokal-only (chronologisch sortiert)
|
||||
const merged = [...fromServer, ...localOnly].sort((a, b) => a.timestamp - b.timestamp);
|
||||
return capMessages(merged);
|
||||
});
|
||||
if (maxTs > 0) {
|
||||
AsyncStorage.setItem('aria_chat_last_sync', String(maxTs)).catch(() => {});
|
||||
} else {
|
||||
// Server leer → unsere lastSync auch zuruecksetzen
|
||||
AsyncStorage.removeItem('aria_chat_last_sync').catch(() => {});
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (message.type === 'skill_created') {
|
||||
const p = (message.payload || {}) as any;
|
||||
const skillMsg: ChatMessage = {
|
||||
id: nextId(),
|
||||
sender: 'aria',
|
||||
text: '',
|
||||
timestamp: Date.now(),
|
||||
skillCreated: {
|
||||
name: String(p.name || '(unbenannt)'),
|
||||
description: String(p.description || ''),
|
||||
execution: String(p.execution || 'bash'),
|
||||
active: p.active !== false,
|
||||
setupError: p.setup_error ? String(p.setup_error) : undefined,
|
||||
},
|
||||
};
|
||||
setMessages(prev => capMessages([...prev, skillMsg]));
|
||||
return;
|
||||
}
|
||||
|
||||
// trigger_created: ARIA hat einen Trigger angelegt → eigene Bubble
|
||||
if (message.type === 'trigger_created') {
|
||||
const p = (message.payload || {}) as any;
|
||||
const triggerMsg: ChatMessage = {
|
||||
id: nextId(),
|
||||
sender: 'aria',
|
||||
text: '',
|
||||
timestamp: Date.now(),
|
||||
triggerCreated: {
|
||||
name: String(p.name || '(unbenannt)'),
|
||||
type: String(p.type || 'timer'),
|
||||
message: String(p.message || ''),
|
||||
fires_at: p.fires_at ? String(p.fires_at) : undefined,
|
||||
condition: p.condition ? String(p.condition) : undefined,
|
||||
},
|
||||
};
|
||||
setMessages(prev => capMessages([...prev, triggerMsg]));
|
||||
return;
|
||||
}
|
||||
|
||||
// file_deleted: Datei wurde geloescht (vom Diagnostic User) → Bubble updaten
|
||||
if (message.type === 'file_deleted') {
|
||||
const p = (message.payload?.path as string) || '';
|
||||
if (!p) return;
|
||||
setMessages(prev => prev.map(m => ({
|
||||
...m,
|
||||
attachments: m.attachments?.map(a =>
|
||||
a.serverPath === p ? { ...a, deleted: true } : a
|
||||
),
|
||||
})));
|
||||
return;
|
||||
}
|
||||
|
||||
// file_list_response: wird vom Datei-Manager im SettingsScreen verarbeitet.
|
||||
|
||||
// file_from_aria: ARIA hat eine Datei rausgegeben → als ARIA-Bubble anzeigen
|
||||
if (message.type === 'file_from_aria') {
|
||||
const p = message.payload || {};
|
||||
const ariaMsg: ChatMessage = {
|
||||
id: nextId(),
|
||||
sender: 'aria',
|
||||
text: '',
|
||||
timestamp: Date.now(),
|
||||
attachments: [{
|
||||
type: (typeof p.mimeType === 'string' && p.mimeType.startsWith('image/')) ? 'image' : 'file',
|
||||
name: (p.name as string) || 'datei',
|
||||
size: (p.size as number) || 0,
|
||||
mimeType: (p.mimeType as string) || '',
|
||||
serverPath: (p.serverPath as string) || '',
|
||||
}],
|
||||
};
|
||||
setMessages(prev => capMessages([...prev, ariaMsg]));
|
||||
return;
|
||||
}
|
||||
|
||||
// file_response: Re-Download von Server — lokal speichern
|
||||
if (message.type === 'file_response') {
|
||||
const reqId = (message.payload.requestId as string) || '';
|
||||
const b64 = (message.payload.base64 as string) || '';
|
||||
const serverPath = (message.payload.serverPath as string) || '';
|
||||
const mimeType = (message.payload.mimeType as string) || '';
|
||||
if (b64 && reqId) {
|
||||
const fileName = (message.payload.name as string) || 'download';
|
||||
persistAttachment(b64, reqId, fileName).then(filePath => {
|
||||
@@ -363,6 +555,11 @@ const ChatScreen: React.FC = () => {
|
||||
a.serverPath === serverPath ? { ...a, uri: filePath } : a
|
||||
),
|
||||
})));
|
||||
// Wenn der User dieses File explizit oeffnen wollte → Intent-Picker
|
||||
if (serverPath && autoOpenPaths.current.has(serverPath)) {
|
||||
autoOpenPaths.current.delete(serverPath);
|
||||
openFileWithIntent(filePath.replace(/^file:\/\//, ''), mimeType);
|
||||
}
|
||||
}).catch(() => {});
|
||||
}
|
||||
return;
|
||||
@@ -373,6 +570,13 @@ const ChatScreen: React.FC = () => {
|
||||
const dbgText = ((message.payload.text as string) || '').slice(0, 60);
|
||||
console.log('[Chat] chat-event sender=%s text=%s', sender || '(none)', dbgText);
|
||||
|
||||
// last-sync tracken — so dass beim Reconnect nicht wieder dieselbe
|
||||
// Nachricht aus dem Server-Backup nachgeladen wird
|
||||
if (sender === 'aria' || sender === 'user' || sender === 'stt') {
|
||||
const ts = message.timestamp || Date.now();
|
||||
AsyncStorage.setItem('aria_chat_last_sync', String(ts)).catch(() => {});
|
||||
}
|
||||
|
||||
// STT-Ergebnis: Transkribierten Text in die Sprach-Bubble schreiben.
|
||||
// WICHTIG: Nur die ERSTE noch unaufgeloeste Aufnahme matchen — sonst
|
||||
// wuerde bei zwei kurz hintereinander gesendeten Audios beide Bubbles
|
||||
@@ -540,6 +744,14 @@ const ChatScreen: React.FC = () => {
|
||||
|
||||
const unsubState = rvs.onStateChange((state) => {
|
||||
setConnectionState(state);
|
||||
// Bei (re)connect: KOMPLETTEN Server-Stand holen. Server ist die
|
||||
// Source-of-Truth — wenn er leer ist (z.B. nach "Konversation
|
||||
// zuruecksetzen"), soll die App das spiegeln, auch wenn sie offline
|
||||
// war als das passiert ist. since=0 + limit=200 → die letzten 200
|
||||
// Nachrichten vom Server, oder leeres Array wenn Server leer.
|
||||
if (state === 'connected') {
|
||||
rvs.send('chat_history_request' as any, { since: 0, limit: 200 });
|
||||
}
|
||||
});
|
||||
|
||||
// Initalen Status setzen
|
||||
@@ -723,19 +935,70 @@ const ChatScreen: React.FC = () => {
|
||||
// Inverted FlatList: neueste Nachrichten unten, kein manuelles Scrollen noetig
|
||||
const invertedMessages = useMemo(() => [...messages].reverse(), [messages]);
|
||||
|
||||
// Such-Treffer: alle Message-IDs die zur Query passen, in chronologischer
|
||||
// Reihenfolge (aelteste zuerst). Bei Query-Change resetten wir den Index.
|
||||
const searchMatchIds = useMemo(() => {
|
||||
const q = searchQuery.trim().toLowerCase();
|
||||
if (!q) return [] as string[];
|
||||
return messages
|
||||
.filter(m => (m.text || '').toLowerCase().includes(q))
|
||||
.map(m => m.id);
|
||||
}, [messages, searchQuery]);
|
||||
|
||||
useEffect(() => {
|
||||
setSearchIndex(0);
|
||||
}, [searchQuery]);
|
||||
|
||||
// Bei Index-Wechsel zu der entsprechenden Bubble scrollen.
|
||||
// FlatList ist `inverted` → viewPosition 0.5 (mitte) ist beim inverted-Render
|
||||
// tatsaechlich die Mitte des sichtbaren Bereichs. Wir verzoegern minimal
|
||||
// damit Layout sicher fertig ist.
|
||||
useEffect(() => {
|
||||
if (!searchMatchIds.length) return;
|
||||
const id = searchMatchIds[searchIndex];
|
||||
if (!id) return;
|
||||
const idx = invertedMessages.findIndex(m => m.id === id);
|
||||
if (idx < 0 || !flatListRef.current) return;
|
||||
const tryScroll = () => {
|
||||
try {
|
||||
flatListRef.current?.scrollToIndex({ index: idx, animated: true, viewPosition: 0.5 });
|
||||
} catch {
|
||||
// wird von onScrollToIndexFailed nochmal versucht
|
||||
}
|
||||
};
|
||||
// requestAnimationFrame statt setTimeout 0 — wartet auf naechsten Layout-Frame
|
||||
requestAnimationFrame(tryScroll);
|
||||
}, [searchIndex, searchMatchIds, invertedMessages]);
|
||||
|
||||
const activeSearchId = searchMatchIds[searchIndex] || '';
|
||||
const gotoSearchPrev = () => {
|
||||
if (!searchMatchIds.length) return;
|
||||
setSearchIndex(i => (i - 1 + searchMatchIds.length) % searchMatchIds.length);
|
||||
};
|
||||
const gotoSearchNext = () => {
|
||||
if (!searchMatchIds.length) return;
|
||||
setSearchIndex(i => (i + 1) % searchMatchIds.length);
|
||||
};
|
||||
|
||||
// GPS-Position holen (optional)
|
||||
const getCurrentLocation = useCallback((): Promise<{ lat: number; lon: number } | null> => {
|
||||
if (!gpsEnabled) return Promise.resolve(null);
|
||||
if (!gpsEnabled) {
|
||||
console.log('[GPS] gpsEnabled=false → kein Standort');
|
||||
return Promise.resolve(null);
|
||||
}
|
||||
|
||||
return new Promise((resolve) => {
|
||||
Geolocation.getCurrentPosition(
|
||||
(position) => {
|
||||
resolve({
|
||||
const loc = {
|
||||
lat: position.coords.latitude,
|
||||
lon: position.coords.longitude,
|
||||
});
|
||||
};
|
||||
console.log('[GPS] Position: lat=%s lon=%s', loc.lat, loc.lon);
|
||||
resolve(loc);
|
||||
},
|
||||
(_error) => {
|
||||
(error) => {
|
||||
console.warn('[GPS] getCurrentPosition Fehler:', error?.code, error?.message);
|
||||
resolve(null);
|
||||
},
|
||||
{ enableHighAccuracy: false, timeout: 5000 },
|
||||
@@ -884,6 +1147,7 @@ const ChatScreen: React.FC = () => {
|
||||
// Alle Pending Anhaenge + Text senden
|
||||
const sendPendingAttachments = useCallback(async (messageText: string) => {
|
||||
if (pendingAttachments.length === 0) return;
|
||||
console.log('[Chat] sendPendingAttachments: %d Anhang/Anhaenge', pendingAttachments.length);
|
||||
const location = await getCurrentLocation();
|
||||
const msgId = nextId();
|
||||
|
||||
@@ -933,6 +1197,8 @@ const ChatScreen: React.FC = () => {
|
||||
}
|
||||
|
||||
// An RVS senden
|
||||
console.log('[Chat] sende file: name=%s mime=%s size=%s b64Bytes=%s',
|
||||
name, mimeType, file.size, base64.length);
|
||||
rvs.send('file', {
|
||||
name,
|
||||
type: mimeType,
|
||||
@@ -965,13 +1231,68 @@ const ChatScreen: React.FC = () => {
|
||||
hour: '2-digit',
|
||||
minute: '2-digit',
|
||||
});
|
||||
const isSearchHit = activeSearchId === item.id;
|
||||
const searchHighlightStyle = isSearchHit
|
||||
? { borderWidth: 2, borderColor: '#FFD60A' }
|
||||
: null;
|
||||
|
||||
// Spezial-Bubble: ARIA hat einen Trigger angelegt
|
||||
if (item.triggerCreated) {
|
||||
const t = item.triggerCreated;
|
||||
const detailLine = t.type === 'timer'
|
||||
? `feuert: ${t.fires_at || '?'}`
|
||||
: `wenn: ${t.condition || '?'}`;
|
||||
return (
|
||||
<View style={[styles.messageBubble, styles.ariaBubble, {borderLeftWidth: 3, borderLeftColor: '#FFD60A'}, searchHighlightStyle]}>
|
||||
<Text style={{color: '#FFD60A', fontWeight: 'bold', fontSize: 14}}>
|
||||
{'⏰ ARIA hat einen Trigger angelegt'}
|
||||
</Text>
|
||||
<Text style={{color: '#E0E0F0', marginTop: 4, fontSize: 14}}>
|
||||
<Text style={{fontWeight: 'bold'}}>{t.name}</Text>
|
||||
<Text style={{color: '#8888AA', fontSize: 12}}>{` (${t.type})`}</Text>
|
||||
</Text>
|
||||
<Text style={{color: '#8888AA', fontSize: 12, marginTop: 2, fontFamily: 'monospace'}}>{detailLine}</Text>
|
||||
<Text style={{color: '#888', fontSize: 12, marginTop: 2}}>{`"${t.message}"`}</Text>
|
||||
<Text style={{color: '#555570', fontSize: 10, marginTop: 6}}>ARIA-Trigger · {time}</Text>
|
||||
</View>
|
||||
);
|
||||
}
|
||||
|
||||
// Spezial-Bubble: ARIA hat einen Skill erstellt
|
||||
if (item.skillCreated) {
|
||||
const s = item.skillCreated;
|
||||
return (
|
||||
<View style={[styles.messageBubble, styles.ariaBubble, {borderLeftWidth: 3, borderLeftColor: '#FFD60A'}, searchHighlightStyle]}>
|
||||
<Text style={{color: '#FFD60A', fontWeight: 'bold', fontSize: 14}}>
|
||||
{'🛠 ARIA hat einen neuen Skill erstellt'}
|
||||
</Text>
|
||||
<Text style={{color: '#E0E0F0', marginTop: 4, fontSize: 14}}>
|
||||
<Text style={{fontWeight: 'bold'}}>{s.name}</Text>
|
||||
<Text style={{color: '#8888AA', fontSize: 12}}>{` (${s.execution}, ${s.active ? 'aktiv' : 'deaktiviert'})`}</Text>
|
||||
</Text>
|
||||
<Text style={{color: '#8888AA', fontSize: 12, marginTop: 2}}>{s.description}</Text>
|
||||
{s.setupError && (
|
||||
<Text style={{color: '#FF6B6B', fontSize: 11, marginTop: 4}}>
|
||||
{'⚠ Setup-Fehler: '}{s.setupError.slice(0, 200)}
|
||||
</Text>
|
||||
)}
|
||||
<Text style={{color: '#555570', fontSize: 10, marginTop: 6}}>ARIA-Skill · {time}</Text>
|
||||
</View>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<View style={[styles.messageBubble, isUser ? styles.userBubble : styles.ariaBubble]}>
|
||||
<View style={[styles.messageBubble, isUser ? styles.userBubble : styles.ariaBubble, searchHighlightStyle]}>
|
||||
{/* Anhang-Vorschau */}
|
||||
{item.attachments?.map((att, idx) => (
|
||||
<View key={idx}>
|
||||
{att.type === 'image' && att.uri ? (
|
||||
{att.deleted ? (
|
||||
<View style={[styles.attachmentFile, {opacity: 0.6}]}>
|
||||
<Text style={styles.attachmentFileIcon}>{'🗑️'}</Text>
|
||||
<Text style={[styles.attachmentFileName, {textDecorationLine: 'line-through'}]} numberOfLines={1}>{att.name}</Text>
|
||||
<Text style={[styles.attachmentFileSize, {color: '#FF9500'}]}>(geloescht)</Text>
|
||||
</View>
|
||||
) : att.type === 'image' && att.uri ? (
|
||||
<ChatImage
|
||||
uri={att.uri}
|
||||
onPress={() => setFullscreenImage(att.uri || null)}
|
||||
@@ -999,7 +1320,22 @@ const ChatScreen: React.FC = () => {
|
||||
</Text>
|
||||
</TouchableOpacity>
|
||||
) : (
|
||||
<View style={styles.attachmentFile}>
|
||||
<TouchableOpacity
|
||||
style={styles.attachmentFile}
|
||||
onPress={() => {
|
||||
// Lokal vorhanden \u2192 direkt mit System-Intent oeffnen
|
||||
if (att.uri) {
|
||||
openFileWithIntent(att.uri.replace(/^file:\/\//, ''), att.mimeType || '');
|
||||
return;
|
||||
}
|
||||
// Sonst: file_request \u2192 bei file_response wird die Datei
|
||||
// gespeichert UND geoeffnet (autoOpenPaths-Tracking).
|
||||
if (att.serverPath) {
|
||||
autoOpenPaths.current.add(att.serverPath);
|
||||
rvs.send('file_request' as any, { serverPath: att.serverPath, requestId: item.id });
|
||||
}
|
||||
}}
|
||||
>
|
||||
<Text style={styles.attachmentFileIcon}>
|
||||
{att.mimeType?.includes('pdf') ? '\uD83D\uDCC4' :
|
||||
att.mimeType?.includes('word') || att.mimeType?.includes('document') ? '\uD83D\uDCC3' :
|
||||
@@ -1009,12 +1345,10 @@ const ChatScreen: React.FC = () => {
|
||||
<Text style={styles.attachmentFileName} numberOfLines={1}>{att.name}</Text>
|
||||
{att.size ? <Text style={styles.attachmentFileSize}>{Math.round(att.size / 1024)}KB</Text> : null}
|
||||
{!att.uri && att.serverPath && (
|
||||
<TouchableOpacity onPress={() => rvs.send('file_request' as any, { serverPath: att.serverPath, requestId: item.id })}>
|
||||
<Text style={[styles.attachmentFileSize, {color: '#0096FF'}]}>(laden)</Text>
|
||||
</TouchableOpacity>
|
||||
<Text style={[styles.attachmentFileSize, {color: '#0096FF'}]}>(tippen zum oeffnen)</Text>
|
||||
)}
|
||||
{!att.uri && !att.serverPath && <Text style={styles.attachmentFileSize}>(nicht verfuegbar)</Text>}
|
||||
</View>
|
||||
</TouchableOpacity>
|
||||
)}
|
||||
</View>
|
||||
))}
|
||||
@@ -1029,10 +1363,16 @@ const ChatScreen: React.FC = () => {
|
||||
{!isUser && item.text.length > 0 && (
|
||||
<TouchableOpacity
|
||||
style={styles.playButton}
|
||||
onPress={() => {
|
||||
if (item.audioPath) {
|
||||
audioService.playFromPath(item.audioPath);
|
||||
} else {
|
||||
onPress={async () => {
|
||||
// Erst lokalen Cache pruefen — audioPath kann auf eine geloeschte
|
||||
// Datei zeigen (TTS-Cache geleert oder Auto-Cleanup). In dem Fall
|
||||
// ueber RVS neu rendern lassen statt stumm zu bleiben.
|
||||
const cachePath = item.audioPath?.replace(/^file:\/\//, '') || '';
|
||||
const cached = cachePath ? await RNFS.exists(cachePath).catch(() => false) : false;
|
||||
if (cached) {
|
||||
audioService.playFromPath(item.audioPath!);
|
||||
return;
|
||||
}
|
||||
// messageId mitschicken damit die Bridge das generierte Audio
|
||||
// wieder mit der Nachricht verknuepft (fuer den naechsten Replay aus Cache)
|
||||
rvs.send('tts_request' as any, {
|
||||
@@ -1041,7 +1381,6 @@ const ChatScreen: React.FC = () => {
|
||||
speed: ttsSpeedRef.current,
|
||||
messageId: item.messageId || '',
|
||||
});
|
||||
}
|
||||
}}
|
||||
>
|
||||
<Text style={styles.playButtonText}>{'\uD83D\uDD0A'}</Text>
|
||||
@@ -1117,7 +1456,7 @@ const ChatScreen: React.FC = () => {
|
||||
);
|
||||
})()}
|
||||
|
||||
{/* Suchleiste */}
|
||||
{/* Suchleiste mit Treffer-Navigation */}
|
||||
{searchVisible && (
|
||||
<View style={styles.searchBar}>
|
||||
<TextInput
|
||||
@@ -1128,17 +1467,47 @@ const ChatScreen: React.FC = () => {
|
||||
placeholderTextColor="#555570"
|
||||
autoFocus
|
||||
/>
|
||||
{searchQuery ? (
|
||||
<Text style={{color: searchMatchIds.length ? '#0096FF' : '#555570', fontSize: 12, paddingHorizontal: 6}}>
|
||||
{searchMatchIds.length ? `${searchIndex + 1}/${searchMatchIds.length}` : '0/0'}
|
||||
</Text>
|
||||
) : null}
|
||||
<TouchableOpacity
|
||||
onPress={gotoSearchPrev}
|
||||
disabled={!searchMatchIds.length}
|
||||
style={{paddingHorizontal: 6, opacity: searchMatchIds.length ? 1 : 0.3}}
|
||||
>
|
||||
<Text style={{color: '#0096FF', fontSize: 18}}>{'▲'}</Text>
|
||||
</TouchableOpacity>
|
||||
<TouchableOpacity
|
||||
onPress={gotoSearchNext}
|
||||
disabled={!searchMatchIds.length}
|
||||
style={{paddingHorizontal: 6, opacity: searchMatchIds.length ? 1 : 0.3}}
|
||||
>
|
||||
<Text style={{color: '#0096FF', fontSize: 18}}>{'▼'}</Text>
|
||||
</TouchableOpacity>
|
||||
<TouchableOpacity onPress={() => { setSearchVisible(false); setSearchQuery(''); }}>
|
||||
<Text style={{color: '#FF3B30', fontSize: 14, paddingHorizontal: 8}}>X</Text>
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
)}
|
||||
|
||||
{/* Nachrichtenliste */}
|
||||
{/* Nachrichtenliste — Suche FILTERT NICHT mehr, sondern hebt aktiven
|
||||
Treffer hervor (siehe renderMessage: activeSearchId-Border). */}
|
||||
<FlatList
|
||||
ref={flatListRef}
|
||||
inverted
|
||||
data={searchQuery ? messages.filter(m => m.text.toLowerCase().includes(searchQuery.toLowerCase())).reverse() : invertedMessages}
|
||||
data={invertedMessages}
|
||||
onScrollToIndexFailed={(info) => {
|
||||
// FlatList kennt das Item-Layout noch nicht. Zuerst grob in die
|
||||
// Naehe scrollen (Average-Item-Hoehe-Schaetzung), dann nach 250ms
|
||||
// praezise nochmal versuchen.
|
||||
const offset = info.averageItemLength * info.index;
|
||||
try { flatListRef.current?.scrollToOffset({ offset, animated: false }); } catch {}
|
||||
setTimeout(() => {
|
||||
try { flatListRef.current?.scrollToIndex({ index: info.index, animated: true, viewPosition: 0.5 }); } catch {}
|
||||
}, 250);
|
||||
}}
|
||||
keyExtractor={item => item.id}
|
||||
renderItem={renderMessage}
|
||||
contentContainerStyle={styles.messageList}
|
||||
@@ -1162,10 +1531,12 @@ const ChatScreen: React.FC = () => {
|
||||
? '\u270D\uFE0F ARIA schreibt...'
|
||||
: '\uD83D\uDCAD ARIA denkt...'}
|
||||
</Text>
|
||||
<View style={{flexDirection: 'row', gap: 6}}>
|
||||
<TouchableOpacity style={styles.thinkingCancel} onPress={cancelRequest}>
|
||||
<Text style={styles.thinkingCancelText}>Abbrechen</Text>
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
</View>
|
||||
)}
|
||||
|
||||
{/* Pending Anhaenge Vorschau */}
|
||||
@@ -1270,19 +1641,32 @@ const ChatScreen: React.FC = () => {
|
||||
|
||||
{/* Bild-Vollbild Modal */}
|
||||
<Modal visible={!!fullscreenImage} transparent animationType="fade" onRequestClose={() => setFullscreenImage(null)}>
|
||||
<View style={styles.fullscreenOverlay}>
|
||||
{fullscreenImage && (
|
||||
/\.svg(?:\?|$)/i.test(fullscreenImage) ? (
|
||||
// SVG: bisher keine Pinch-Zoom — Tap zum Schliessen
|
||||
<TouchableOpacity style={styles.fullscreenImage} activeOpacity={1} onPress={() => setFullscreenImage(null)}>
|
||||
<SvgUri uri={fullscreenImage} width="100%" height="100%" preserveAspectRatio="xMidYMid meet" />
|
||||
</TouchableOpacity>
|
||||
) : (
|
||||
// Pixel-Bild: Pinch-Zoom + Pan ueber ZoomableImage
|
||||
<ZoomableImage
|
||||
uri={fullscreenImage}
|
||||
containerWidth={Dimensions.get('window').width}
|
||||
containerHeight={Dimensions.get('window').height}
|
||||
style={styles.fullscreenImage}
|
||||
/>
|
||||
)
|
||||
)}
|
||||
{/* Close-Button oben rechts — die TouchableOpacity-uebergreifend funktioniert
|
||||
wegen ZoomableImage-PanResponder nicht zuverlaessig fuer Tap-to-Close */}
|
||||
<TouchableOpacity
|
||||
style={styles.fullscreenOverlay}
|
||||
activeOpacity={1}
|
||||
style={{ position: 'absolute', top: 32, right: 16, padding: 12, backgroundColor: 'rgba(0,0,0,0.5)', borderRadius: 24 }}
|
||||
onPress={() => setFullscreenImage(null)}
|
||||
>
|
||||
{fullscreenImage && (
|
||||
<Image
|
||||
source={{ uri: fullscreenImage }}
|
||||
style={styles.fullscreenImage}
|
||||
resizeMode="contain"
|
||||
/>
|
||||
)}
|
||||
<Text style={{ color: '#FFF', fontSize: 22 }}>{'✕'}</Text>
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
</Modal>
|
||||
|
||||
{/* Datei-Upload Modal */}
|
||||
|
||||
@@ -18,6 +18,7 @@ import {
|
||||
ToastAndroid,
|
||||
ActivityIndicator,
|
||||
Modal,
|
||||
PermissionsAndroid,
|
||||
} from 'react-native';
|
||||
import AsyncStorage from '@react-native-async-storage/async-storage';
|
||||
import RNFS from 'react-native-fs';
|
||||
@@ -49,6 +50,9 @@ import {
|
||||
TTS_SPEED_MAX,
|
||||
TTS_SPEED_STORAGE_KEY,
|
||||
} from '../services/audio';
|
||||
import audioService from '../services/audio';
|
||||
import gpsTrackingService from '../services/gpsTracking';
|
||||
import { isVerboseLogging, setVerboseLogging } from '../services/logger';
|
||||
import {
|
||||
isWakeReadySoundEnabled,
|
||||
setWakeReadySoundEnabled,
|
||||
@@ -95,6 +99,7 @@ const SETTINGS_SECTIONS = [
|
||||
{ id: 'wake_word', icon: '👂', label: 'Wake-Word', desc: 'Wake-Word-Auswahl' },
|
||||
{ id: 'voice_output', icon: '🔊', label: 'Sprachausgabe', desc: 'Stimmen, Pre-Roll, Geschwindigkeit' },
|
||||
{ id: 'storage', icon: '📁', label: 'Speicher', desc: 'Anhang-Speicherort, Auto-Download' },
|
||||
{ id: 'files', icon: '📂', label: 'Dateien', desc: 'ARIA- und User-Dateien — anzeigen, löschen' },
|
||||
{ id: 'protocol', icon: '📜', label: 'Protokoll', desc: 'Privatsphaere, Backup' },
|
||||
{ id: 'about', icon: 'ℹ️', label: 'Ueber', desc: 'App-Version, Update' },
|
||||
] as const;
|
||||
@@ -117,6 +122,7 @@ const SettingsScreen: React.FC = () => {
|
||||
const [manualPort, setManualPort] = useState('8765');
|
||||
const [currentMode, setCurrentMode] = useState('normal');
|
||||
const [gpsEnabled, setGpsEnabled] = useState(false);
|
||||
const [gpsTracking, setGpsTracking] = useState(gpsTrackingService.isActive());
|
||||
const [scannerVisible, setScannerVisible] = useState(false);
|
||||
const [logTab, setLogTab] = useState<LogTab>('live');
|
||||
const [logs, setLogs] = useState<LogEntry[]>([]);
|
||||
@@ -134,6 +140,8 @@ const SettingsScreen: React.FC = () => {
|
||||
const [vadSilenceDb, setVadSilenceDb] = useState<number | null>(null);
|
||||
const [showVadInfo, setShowVadInfo] = useState(false);
|
||||
const [apkCacheInfo, setApkCacheInfo] = useState<{count: number, totalMB: number} | null>(null);
|
||||
const [ttsCacheInfo, setTtsCacheInfo] = useState<{count: number, totalMB: number} | null>(null);
|
||||
const [verboseLogging, setVerboseLoggingState] = useState<boolean>(isVerboseLogging());
|
||||
const [ttsSpeed, setTtsSpeed] = useState<number>(TTS_SPEED_DEFAULT);
|
||||
const [wakeKeyword, setWakeKeyword] = useState<string>(DEFAULT_KEYWORD);
|
||||
const [wakeStatus, setWakeStatus] = useState<string>('');
|
||||
@@ -142,6 +150,16 @@ const SettingsScreen: React.FC = () => {
|
||||
const [xttsVoice, setXttsVoice] = useState('');
|
||||
const [loadingVoice, setLoadingVoice] = useState<string | null>(null);
|
||||
const [availableVoices, setAvailableVoices] = useState<Array<{name: string, size: number}>>([]);
|
||||
// Datei-Manager
|
||||
const [fileManagerOpen, setFileManagerOpen] = useState(false);
|
||||
const [fileManagerFiles, setFileManagerFiles] = useState<Array<{name: string; path: string; size: number; mtime: number; fromAria: boolean}>>([]);
|
||||
const [fileManagerLoading, setFileManagerLoading] = useState(false);
|
||||
const [fileManagerError, setFileManagerError] = useState('');
|
||||
const [fileManagerSearch, setFileManagerSearch] = useState('');
|
||||
const [fileManagerFilter, setFileManagerFilter] = useState<'all' | 'aria' | 'user'>('all');
|
||||
const [fileManagerSelected, setFileManagerSelected] = useState<Set<string>>(new Set());
|
||||
const fileZipPending = useRef<string | null>(null); // requestId fuer ZIP-Antwort
|
||||
const [fileZipBusy, setFileZipBusy] = useState(false);
|
||||
const [voiceCloneVisible, setVoiceCloneVisible] = useState(false);
|
||||
const [tempPath, setTempPath] = useState('');
|
||||
// Sub-Screen Navigation: null = Hauptmenue, sonst eine der Section-IDs.
|
||||
@@ -172,6 +190,11 @@ const SettingsScreen: React.FC = () => {
|
||||
AsyncStorage.getItem('aria_gps_enabled').then(saved => {
|
||||
if (saved !== null) setGpsEnabled(saved === 'true');
|
||||
});
|
||||
// gpsTrackingService status syncen + auf Aenderungen lauschen
|
||||
setGpsTracking(gpsTrackingService.isActive());
|
||||
const offGps = gpsTrackingService.onChange(setGpsTracking);
|
||||
// Persistierten Status wiederherstellen (war Tracking beim letzten Mal an?)
|
||||
gpsTrackingService.restoreFromStorage().catch(() => {});
|
||||
AsyncStorage.getItem(TTS_PREROLL_STORAGE_KEY).then(saved => {
|
||||
if (saved != null) {
|
||||
const n = parseFloat(saved);
|
||||
@@ -223,11 +246,16 @@ const SettingsScreen: React.FC = () => {
|
||||
});
|
||||
isWakeReadySoundEnabled().then(setWakeReadySound);
|
||||
updateService.getApkCacheSize().then(setApkCacheInfo).catch(() => {});
|
||||
audioService.getTtsCacheSize().then(setTtsCacheInfo).catch(() => {});
|
||||
AsyncStorage.getItem('aria_xtts_voice').then(saved => {
|
||||
if (saved) setXttsVoice(saved);
|
||||
});
|
||||
// Voice-Liste vom XTTS-Server holen (via RVS)
|
||||
rvs.send('xtts_list_voices' as any, {});
|
||||
return () => {
|
||||
// gpsTrackingService-Listener abmelden (Variable offGps oben definiert)
|
||||
try { offGps(); } catch {}
|
||||
};
|
||||
}, []);
|
||||
|
||||
// Speichergroesse berechnen
|
||||
@@ -365,6 +393,67 @@ const SettingsScreen: React.FC = () => {
|
||||
setAvailableVoices(voices);
|
||||
}
|
||||
|
||||
// Datei-Manager: Liste empfangen
|
||||
if (message.type === ('file_list_response' as any)) {
|
||||
const p: any = message.payload || {};
|
||||
if (p.ok) {
|
||||
setFileManagerFiles(p.files || []);
|
||||
} else {
|
||||
setFileManagerError(p.error || 'Unbekannter Fehler');
|
||||
}
|
||||
setFileManagerLoading(false);
|
||||
}
|
||||
|
||||
// Datei-Manager: Datei wurde geloescht (vom Diagnostic oder dieser App)
|
||||
if (message.type === ('file_deleted' as any)) {
|
||||
const p: any = message.payload || {};
|
||||
if (p.path) {
|
||||
setFileManagerFiles(prev => prev.filter(f => f.path !== p.path));
|
||||
setFileManagerSelected(prev => {
|
||||
if (!prev.has(p.path)) return prev;
|
||||
const next = new Set(prev);
|
||||
next.delete(p.path);
|
||||
return next;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// ARIA bittet um GPS-Tracking An/Aus (Tool request_location_tracking)
|
||||
if (message.type === ('location_tracking' as any)) {
|
||||
const p: any = message.payload || {};
|
||||
const on = !!p.on;
|
||||
const reason = (p.reason as string) || 'ARIA';
|
||||
if (on) {
|
||||
gpsTrackingService.start(reason).catch(() => {});
|
||||
} else {
|
||||
gpsTrackingService.stop(reason);
|
||||
}
|
||||
}
|
||||
|
||||
// Datei-Manager: ZIP-Response (Multi-Download)
|
||||
if (message.type === ('file_zip_response' as any)) {
|
||||
const p: any = message.payload || {};
|
||||
if (p.requestId && p.requestId !== fileZipPending.current) return; // veraltet
|
||||
fileZipPending.current = null;
|
||||
setFileZipBusy(false);
|
||||
if (!p.ok || !p.data) {
|
||||
ToastAndroid.show('ZIP fehlgeschlagen: ' + (p.error || 'unbekannt'), ToastAndroid.LONG);
|
||||
return;
|
||||
}
|
||||
// base64 → in Downloads-Ordner schreiben
|
||||
(async () => {
|
||||
try {
|
||||
const ts = new Date().toISOString().replace(/[:.]/g, '-').slice(0, 19);
|
||||
const dir = RNFS.DownloadDirectoryPath;
|
||||
const filePath = `${dir}/aria-files-${ts}.zip`;
|
||||
await RNFS.writeFile(filePath, p.data, 'base64');
|
||||
ToastAndroid.show(`ZIP gespeichert: ${filePath} (${Math.round((p.size||0)/1024)} KB)`, ToastAndroid.LONG);
|
||||
} catch (e: any) {
|
||||
ToastAndroid.show('ZIP speichern fehlgeschlagen: ' + e.message, ToastAndroid.LONG);
|
||||
}
|
||||
})();
|
||||
}
|
||||
|
||||
// Voice wurde gespeichert → Liste neu laden + ggf. auswaehlen
|
||||
if (message.type === ('xtts_voice_saved' as any)) {
|
||||
const name = (message.payload as any).name as string;
|
||||
@@ -457,7 +546,29 @@ const SettingsScreen: React.FC = () => {
|
||||
|
||||
// --- GPS Toggle ---
|
||||
|
||||
const handleGPSToggle = useCallback((value: boolean) => {
|
||||
const handleGPSToggle = useCallback(async (value: boolean) => {
|
||||
if (value && Platform.OS === 'android') {
|
||||
try {
|
||||
const granted = await PermissionsAndroid.request(
|
||||
PermissionsAndroid.PERMISSIONS.ACCESS_COARSE_LOCATION,
|
||||
{
|
||||
title: 'ARIA — Standort an Anfragen anhaengen',
|
||||
message: 'Damit ARIA bei Anfragen wie "Wo ist der naechste...?" den '
|
||||
+ 'Standort kennt, darf die App den ungefaehren Standort lesen. '
|
||||
+ 'Wird nur bei jeder Anfrage einmal abgerufen, nicht im Hintergrund.',
|
||||
buttonPositive: 'Erlauben',
|
||||
buttonNegative: 'Abbrechen',
|
||||
},
|
||||
);
|
||||
if (granted !== PermissionsAndroid.RESULTS.GRANTED) {
|
||||
ToastAndroid.show('Standort-Berechtigung abgelehnt', ToastAndroid.SHORT);
|
||||
return;
|
||||
}
|
||||
} catch (err) {
|
||||
console.warn('[Settings] GPS-Permission Request gescheitert:', err);
|
||||
return;
|
||||
}
|
||||
}
|
||||
setGpsEnabled(value);
|
||||
AsyncStorage.setItem('aria_gps_enabled', String(value)).catch(() => {});
|
||||
}, []);
|
||||
@@ -536,6 +647,225 @@ const SettingsScreen: React.FC = () => {
|
||||
visible={voiceCloneVisible}
|
||||
onClose={() => setVoiceCloneVisible(false)}
|
||||
/>
|
||||
{/* Datei-Manager Modal */}
|
||||
<Modal
|
||||
visible={fileManagerOpen}
|
||||
animationType="slide"
|
||||
onRequestClose={() => setFileManagerOpen(false)}
|
||||
>
|
||||
<View style={{flex:1, backgroundColor:'#080810', paddingTop:24}}>
|
||||
<View style={{flexDirection:'row', alignItems:'center', padding:12, borderBottomWidth:1, borderColor:'#1E1E2E'}}>
|
||||
<TouchableOpacity onPress={() => setFileManagerOpen(false)} style={{padding:8}}>
|
||||
<Text style={{color:'#0096FF', fontSize:24}}>‹</Text>
|
||||
</TouchableOpacity>
|
||||
<Text style={{color:'#E0E0F0', fontSize:18, fontWeight:'600', flex:1, marginLeft:8}}>Dateien</Text>
|
||||
<TouchableOpacity
|
||||
onPress={() => {
|
||||
setFileManagerError('');
|
||||
setFileManagerLoading(true);
|
||||
rvs.send('file_list_request' as any, {});
|
||||
}}
|
||||
style={{padding:8}}
|
||||
>
|
||||
<Text style={{color:'#0096FF', fontSize:14}}>🔄</Text>
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
<View style={{padding:12}}>
|
||||
<TextInput
|
||||
style={{backgroundColor:'#1E1E2E', borderRadius:8, padding:10, color:'#E0E0F0', fontSize:14}}
|
||||
placeholder="Suche..."
|
||||
placeholderTextColor="#555570"
|
||||
value={fileManagerSearch}
|
||||
onChangeText={setFileManagerSearch}
|
||||
autoCapitalize="none"
|
||||
/>
|
||||
<View style={{flexDirection:'row', marginTop:8, gap:6}}>
|
||||
{(['all','aria','user'] as const).map(f => (
|
||||
<TouchableOpacity
|
||||
key={f}
|
||||
onPress={() => setFileManagerFilter(f)}
|
||||
style={{
|
||||
paddingVertical:6, paddingHorizontal:12, borderRadius:14,
|
||||
backgroundColor: fileManagerFilter === f ? '#0096FF' : '#1E1E2E',
|
||||
}}
|
||||
>
|
||||
<Text style={{color: fileManagerFilter === f ? '#fff' : '#8888AA', fontSize:12}}>
|
||||
{f === 'all' ? 'Alle' : f === 'aria' ? 'Von ARIA' : 'Von dir'}
|
||||
</Text>
|
||||
</TouchableOpacity>
|
||||
))}
|
||||
</View>
|
||||
</View>
|
||||
{fileManagerLoading ? (
|
||||
<Text style={{color:'#8888AA', textAlign:'center', marginTop:20}}>Lade...</Text>
|
||||
) : fileManagerError ? (
|
||||
<Text style={{color:'#FF6B6B', textAlign:'center', marginTop:20}}>{fileManagerError}</Text>
|
||||
) : (() => {
|
||||
// Visible files (Filter+Suche)
|
||||
let files = fileManagerFiles;
|
||||
if (fileManagerFilter === 'aria') files = files.filter(f => f.fromAria);
|
||||
else if (fileManagerFilter === 'user') files = files.filter(f => !f.fromAria);
|
||||
if (fileManagerSearch) {
|
||||
const q = fileManagerSearch.toLowerCase();
|
||||
files = files.filter(f => f.name.toLowerCase().includes(q));
|
||||
}
|
||||
const visiblePaths = files.map(f => f.path);
|
||||
const selectedHere = visiblePaths.filter(p => fileManagerSelected.has(p));
|
||||
const allSelected = visiblePaths.length > 0 && selectedHere.length === visiblePaths.length;
|
||||
const fmtSize = (b: number) => b < 1024 ? `${b} B` : b < 1024*1024 ? `${(b/1024).toFixed(1)} KB` : `${(b/1024/1024).toFixed(1)} MB`;
|
||||
|
||||
const toggleSelectAll = () => {
|
||||
setFileManagerSelected(prev => {
|
||||
const next = new Set(prev);
|
||||
if (allSelected) visiblePaths.forEach(p => next.delete(p));
|
||||
else visiblePaths.forEach(p => next.add(p));
|
||||
return next;
|
||||
});
|
||||
};
|
||||
const toggleOne = (p: string) => {
|
||||
setFileManagerSelected(prev => {
|
||||
const next = new Set(prev);
|
||||
if (next.has(p)) next.delete(p);
|
||||
else next.add(p);
|
||||
return next;
|
||||
});
|
||||
};
|
||||
const bulkDelete = () => {
|
||||
const paths = [...fileManagerSelected];
|
||||
if (!paths.length) return;
|
||||
Alert.alert(
|
||||
`${paths.length} Dateien löschen?`,
|
||||
'In allen Chat-Bubbles werden sie als gelöscht markiert.',
|
||||
[
|
||||
{ text: 'Abbrechen', style: 'cancel' },
|
||||
{ text: 'Löschen', style: 'destructive', onPress: () => {
|
||||
rvs.send('file_delete_batch_request' as any, { paths, requestId: 'batch-' + Date.now() });
|
||||
setFileManagerSelected(new Set());
|
||||
ToastAndroid.show(`${paths.length} Lösch-Befehle gesendet…`, ToastAndroid.SHORT);
|
||||
}},
|
||||
],
|
||||
);
|
||||
};
|
||||
const bulkDownload = () => {
|
||||
const paths = [...fileManagerSelected];
|
||||
if (!paths.length) return;
|
||||
// 1 Datei: einfach via file_request (existing pattern). ZIP nur bei 2+.
|
||||
if (paths.length === 1) {
|
||||
rvs.send('file_request' as any, { serverPath: paths[0], requestId: 'single-' + Date.now() });
|
||||
ToastAndroid.show('Datei wird heruntergeladen…', ToastAndroid.SHORT);
|
||||
return;
|
||||
}
|
||||
const reqId = 'zip-' + Date.now();
|
||||
fileZipPending.current = reqId;
|
||||
setFileZipBusy(true);
|
||||
rvs.send('file_zip_request' as any, { paths, requestId: reqId });
|
||||
ToastAndroid.show(`ZIP wird erstellt (${paths.length} Dateien)…`, ToastAndroid.LONG);
|
||||
};
|
||||
|
||||
return (
|
||||
<>
|
||||
{/* Bulk-Bar */}
|
||||
<View style={{paddingHorizontal:12, paddingBottom:8, flexDirection:'row', alignItems:'center', gap:8, flexWrap:'wrap'}}>
|
||||
<TouchableOpacity onPress={toggleSelectAll} style={{flexDirection:'row', alignItems:'center', gap:6, paddingVertical:4}}>
|
||||
<View style={{
|
||||
width:18, height:18, borderRadius:3,
|
||||
borderWidth:2, borderColor: allSelected ? '#0096FF' : '#555570',
|
||||
backgroundColor: allSelected ? '#0096FF' : 'transparent',
|
||||
alignItems:'center', justifyContent:'center',
|
||||
}}>
|
||||
{allSelected && <Text style={{color:'#fff', fontSize:11, fontWeight:'bold'}}>✓</Text>}
|
||||
</View>
|
||||
<Text style={{color:'#E0E0F0', fontSize:13}}>Alle markieren</Text>
|
||||
</TouchableOpacity>
|
||||
{fileManagerSelected.size > 0 && (
|
||||
<>
|
||||
<Text style={{color:'#555570', fontSize:13}}>·</Text>
|
||||
<Text style={{color:'#0096FF', fontSize:13, fontWeight:'600'}}>{fileManagerSelected.size} ausgewählt</Text>
|
||||
<TouchableOpacity
|
||||
onPress={bulkDownload}
|
||||
disabled={fileZipBusy}
|
||||
style={{paddingVertical:4, paddingHorizontal:10, borderRadius:6, backgroundColor:'#0096FF22', opacity: fileZipBusy ? 0.5 : 1}}
|
||||
>
|
||||
<Text style={{color:'#0096FF', fontSize:12}}>{fileZipBusy ? '⏳ ZIP…' : (fileManagerSelected.size > 1 ? '⬇ ZIP' : '⬇ Download')}</Text>
|
||||
</TouchableOpacity>
|
||||
<TouchableOpacity
|
||||
onPress={bulkDelete}
|
||||
style={{paddingVertical:4, paddingHorizontal:10, borderRadius:6, backgroundColor:'#FF6B6B22'}}
|
||||
>
|
||||
<Text style={{color:'#FF6B6B', fontSize:12}}>🗑 Löschen</Text>
|
||||
</TouchableOpacity>
|
||||
</>
|
||||
)}
|
||||
</View>
|
||||
|
||||
<ScrollView style={{flex:1}} contentContainerStyle={{padding:12, paddingTop:0}}>
|
||||
{!files.length ? (
|
||||
<Text style={{color:'#555570', textAlign:'center', marginTop:20}}>Keine Dateien</Text>
|
||||
) : files.map(f => {
|
||||
const selected = fileManagerSelected.has(f.path);
|
||||
return (
|
||||
<TouchableOpacity
|
||||
key={f.path}
|
||||
onPress={() => toggleOne(f.path)}
|
||||
activeOpacity={0.7}
|
||||
style={{
|
||||
backgroundColor: selected ? '#1E2C44' : '#0D0D1A',
|
||||
padding:12, borderRadius:8, marginBottom:8,
|
||||
flexDirection:'row', alignItems:'center', gap:8,
|
||||
borderWidth: selected ? 1 : 0, borderColor:'#0096FF',
|
||||
}}
|
||||
>
|
||||
<View style={{
|
||||
width:18, height:18, borderRadius:3,
|
||||
borderWidth:2, borderColor: selected ? '#0096FF' : '#555570',
|
||||
backgroundColor: selected ? '#0096FF' : 'transparent',
|
||||
alignItems:'center', justifyContent:'center',
|
||||
}}>
|
||||
{selected && <Text style={{color:'#fff', fontSize:11, fontWeight:'bold'}}>✓</Text>}
|
||||
</View>
|
||||
<View style={{flex:1}}>
|
||||
<View style={{flexDirection:'row', alignItems:'center'}}>
|
||||
<View style={{
|
||||
backgroundColor: f.fromAria ? '#0096FF22' : '#34C75922',
|
||||
paddingHorizontal:6, paddingVertical:1, borderRadius:3, marginRight:6,
|
||||
}}>
|
||||
<Text style={{color: f.fromAria ? '#0096FF' : '#34C759', fontSize:9}}>
|
||||
{f.fromAria ? 'ARIA' : 'USER'}
|
||||
</Text>
|
||||
</View>
|
||||
<Text style={{color:'#E0E0F0', fontSize:13, flex:1}} numberOfLines={1}>{f.name}</Text>
|
||||
</View>
|
||||
<Text style={{color:'#555570', fontSize:10, marginTop:2}}>
|
||||
{fmtSize(f.size)} · {new Date(f.mtime).toLocaleString('de-DE')}
|
||||
</Text>
|
||||
</View>
|
||||
<TouchableOpacity
|
||||
onPress={() => {
|
||||
Alert.alert(
|
||||
'Datei löschen?',
|
||||
`"${f.name}"\n\nIn allen Chat-Bubbles wird sie als gelöscht markiert.`,
|
||||
[
|
||||
{ text: 'Abbrechen', style: 'cancel' },
|
||||
{ text: 'Löschen', style: 'destructive', onPress: () => {
|
||||
rvs.send('file_delete_request' as any, { path: f.path });
|
||||
ToastAndroid.show('Lösch-Befehl gesendet…', ToastAndroid.SHORT);
|
||||
}},
|
||||
],
|
||||
);
|
||||
}}
|
||||
style={{padding:8}}
|
||||
>
|
||||
<Text style={{color:'#FF6B6B', fontSize:18}}>🗑</Text>
|
||||
</TouchableOpacity>
|
||||
</TouchableOpacity>
|
||||
);
|
||||
})}
|
||||
</ScrollView>
|
||||
</>
|
||||
);
|
||||
})()}
|
||||
</View>
|
||||
</Modal>
|
||||
<ScrollView style={styles.container} contentContainerStyle={styles.content}>
|
||||
|
||||
{currentSection === null && (
|
||||
@@ -697,6 +1027,29 @@ const SettingsScreen: React.FC = () => {
|
||||
thumbColor={gpsEnabled ? '#FFFFFF' : '#666680'}
|
||||
/>
|
||||
</View>
|
||||
|
||||
{/* GPS-Tracking (kontinuierlich) — fuer near()-Watcher */}
|
||||
<View style={[styles.toggleRow, {marginTop: 12, borderTopWidth: 1, borderTopColor: '#1E1E2E', paddingTop: 12}]}>
|
||||
<View style={styles.toggleInfo}>
|
||||
<Text style={styles.toggleLabel}>GPS-Tracking (kontinuierlich)</Text>
|
||||
<Text style={styles.toggleHint}>
|
||||
Sendet alle ~15s deine Position an ARIA (wenn du dich {'>'}30m bewegt
|
||||
hast). Nur noetig fuer GPS-basierte Trigger wie Blitzer-Warner
|
||||
(near()-Conditions). ARIA kann das auch selbst an-/abschalten wenn
|
||||
sie einen GPS-Watcher anlegt. Akku-Verbrauch erhoeht — bei langer
|
||||
Fahrt einplanen.
|
||||
</Text>
|
||||
</View>
|
||||
<Switch
|
||||
value={gpsTracking}
|
||||
onValueChange={(v) => {
|
||||
if (v) gpsTrackingService.start('manuell').catch(() => {});
|
||||
else gpsTrackingService.stop('manuell');
|
||||
}}
|
||||
trackColor={{ false: '#2A2A3E', true: '#FF9500' }}
|
||||
thumbColor={gpsTracking ? '#FFFFFF' : '#666680'}
|
||||
/>
|
||||
</View>
|
||||
</View>
|
||||
</>)}
|
||||
|
||||
@@ -1228,11 +1581,123 @@ const SettingsScreen: React.FC = () => {
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
|
||||
{/* === TTS-Cache === */}
|
||||
<Text style={[styles.sectionTitle, {marginTop: 16}]}>TTS-Cache</Text>
|
||||
<View style={styles.card}>
|
||||
<Text style={styles.toggleHint}>
|
||||
Gespeicherte Sprachausgaben (WAV pro Antwort) — werden fuer den
|
||||
Play-Button und Auto-Resume nach Anrufen genutzt. Loeschen
|
||||
unterbricht keine laufende Wiedergabe, alte Antworten lassen sich
|
||||
danach nur nicht mehr abspielen.
|
||||
</Text>
|
||||
<Text style={[styles.storageSizeText, {marginTop: 8}]}>
|
||||
{ttsCacheInfo === null ? '...' :
|
||||
ttsCacheInfo.count === 0 ? 'leer' :
|
||||
`${ttsCacheInfo.count} WAV${ttsCacheInfo.count === 1 ? '' : 's'} · ${ttsCacheInfo.totalMB.toFixed(1)}MB`}
|
||||
</Text>
|
||||
<TouchableOpacity
|
||||
style={[styles.clearButton, {marginTop: 8, backgroundColor: 'rgba(255,59,48,0.15)'}]}
|
||||
onPress={async () => {
|
||||
const res = await audioService.clearTtsCache();
|
||||
ToastAndroid.show(
|
||||
res.removed === 0
|
||||
? 'TTS-Cache war schon leer'
|
||||
: `${res.removed} WAV${res.removed === 1 ? '' : 's'} geloescht (${res.freedMB.toFixed(1)}MB frei)`,
|
||||
ToastAndroid.SHORT,
|
||||
);
|
||||
const info = await audioService.getTtsCacheSize();
|
||||
setTtsCacheInfo(info);
|
||||
}}
|
||||
>
|
||||
<Text style={[styles.clearButtonText, {color: '#FF3B30'}]}>TTS-Cache leeren</Text>
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
|
||||
{/* === Reparatur === */}
|
||||
<Text style={[styles.sectionTitle, {marginTop: 16}]}>Reparatur</Text>
|
||||
<View style={styles.card}>
|
||||
<Text style={styles.toggleHint}>
|
||||
Container gezielt neu starten — wenn die Voice-Bridge, das Gehirn
|
||||
oder die Vector-DB haengt. Restart dauert wenige Sekunden,
|
||||
laufende Anfragen gehen verloren.
|
||||
</Text>
|
||||
{[
|
||||
{ name: 'aria-bridge', label: '🚨 aria-bridge neu (Voice + RVS)' },
|
||||
{ name: 'aria-brain', label: '🚨 aria-brain neu (Agent + Memory)' },
|
||||
{ name: 'aria-qdrant', label: '🚨 aria-qdrant neu (Vector-DB)' },
|
||||
].map(c => (
|
||||
<TouchableOpacity
|
||||
key={c.name}
|
||||
style={[styles.clearButton, {marginTop: 8, backgroundColor: 'rgba(255,59,48,0.10)'}]}
|
||||
onPress={() => {
|
||||
Alert.alert(
|
||||
`${c.name} neu starten?`,
|
||||
'Restart in wenigen Sekunden. Laufende Anfragen gehen verloren.',
|
||||
[
|
||||
{ text: 'Abbrechen', style: 'cancel' },
|
||||
{ text: 'Neu starten', style: 'destructive', onPress: () => {
|
||||
rvs.send('container_restart' as any, { name: c.name });
|
||||
ToastAndroid.show(`${c.name} wird neu gestartet…`, ToastAndroid.LONG);
|
||||
}},
|
||||
],
|
||||
);
|
||||
}}
|
||||
>
|
||||
<Text style={[styles.clearButtonText, {color: '#FF3B30'}]}>{c.label}</Text>
|
||||
</TouchableOpacity>
|
||||
))}
|
||||
</View>
|
||||
|
||||
</>)}
|
||||
|
||||
{/* === Datei-Manager === */}
|
||||
{currentSection === 'files' && (<>
|
||||
<Text style={styles.sectionTitle}>Dateien</Text>
|
||||
<View style={styles.card}>
|
||||
<Text style={styles.toggleHint}>
|
||||
Alle Dateien aus <Text style={{fontFamily:'monospace'}}>/shared/uploads/</Text>
|
||||
— was ARIA generiert hat und was du hochgeladen hast.
|
||||
Beim Löschen wird die Bubble in App + Diagnostic als gelöscht markiert.
|
||||
</Text>
|
||||
<TouchableOpacity
|
||||
style={[styles.clearButton, {marginTop: 8, backgroundColor: 'rgba(0,150,255,0.15)'}]}
|
||||
onPress={() => {
|
||||
setFileManagerError('');
|
||||
setFileManagerLoading(true);
|
||||
setFileManagerOpen(true);
|
||||
rvs.send('file_list_request' as any, {});
|
||||
}}
|
||||
>
|
||||
<Text style={[styles.clearButtonText, {color: '#0096FF'}]}>{'📂 Datei-Manager öffnen'}</Text>
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
</>)}
|
||||
|
||||
{/* === Logs === */}
|
||||
{currentSection === 'protocol' && (<>
|
||||
<Text style={styles.sectionTitle}>Protokoll</Text>
|
||||
|
||||
{/* Verbose-Logging-Toggle */}
|
||||
<View style={styles.card}>
|
||||
<View style={styles.toggleRow}>
|
||||
<Text style={styles.toggleLabel}>Verbose Logging</Text>
|
||||
<Switch
|
||||
value={verboseLogging}
|
||||
onValueChange={(v) => {
|
||||
setVerboseLogging(v);
|
||||
setVerboseLoggingState(v);
|
||||
}}
|
||||
trackColor={{ false: '#3A3A52', true: '#0096FF' }}
|
||||
thumbColor={verboseLogging ? '#FFFFFF' : '#666680'}
|
||||
/>
|
||||
</View>
|
||||
<Text style={styles.toggleHint}>
|
||||
Wenn aus: console.log wird global stummgeschaltet (Speicher schonen).
|
||||
Warnungen und Fehler bleiben immer aktiv. Bei Bedarf einschalten zum
|
||||
Debuggen via adb logcat.
|
||||
</Text>
|
||||
</View>
|
||||
|
||||
<View style={styles.card}>
|
||||
{/* Tab-Umschalter */}
|
||||
<View style={styles.tabRow}>
|
||||
@@ -1319,7 +1784,8 @@ const SettingsScreen: React.FC = () => {
|
||||
<Text style={styles.aboutTitle}>ARIA Cockpit</Text>
|
||||
<Text style={styles.aboutVersion}>Version {require('../../package.json').version}</Text>
|
||||
<Text style={styles.aboutInfo}>
|
||||
Stefans Kommandozentrale f{'\u00FC'}r ARIA.{'\n'}
|
||||
ARIA \u2014 Autonomous Reasoning & Intelligence Assistant.{'\n'}
|
||||
Stefans Kommandozentrale.{'\n'}
|
||||
Gebaut mit React Native + TypeScript.
|
||||
</Text>
|
||||
<TouchableOpacity
|
||||
|
||||
+215
-15
@@ -41,6 +41,8 @@ const { AudioFocus, PcmStreamPlayer } = NativeModules as {
|
||||
requestDuck: () => Promise<boolean>;
|
||||
requestExclusive: () => Promise<boolean>;
|
||||
release: () => Promise<boolean>;
|
||||
kickReleaseMedia: () => Promise<boolean>;
|
||||
getMode?: () => Promise<number>;
|
||||
};
|
||||
PcmStreamPlayer?: {
|
||||
start: (sampleRate: number, channels: number, prerollSeconds: number) => Promise<boolean>;
|
||||
@@ -301,6 +303,12 @@ class AudioService {
|
||||
console.warn('[Audio] PcmPlaybackFinished-Subscription fehlgeschlagen:', err);
|
||||
}
|
||||
}
|
||||
// App-Start: orphaned aria_tts_*.wav / aria_recording_*.mp4 aus dem Cache
|
||||
// wegraeumen. Sammeln sich an wenn Sound mid-playback gestoppt wird (Anruf,
|
||||
// Mute, Barge-In) — der completion-callback feuert dann nicht und die Datei
|
||||
// bleibt liegen. 5min-Threshold damit gerade aktiv geschriebene Files sicher
|
||||
// sind. cleanupOnStartup ist async, blockt den Constructor nicht.
|
||||
this._cleanupStaleCacheFiles(5 * 60 * 1000).catch(() => {});
|
||||
}
|
||||
|
||||
/** AudioFocus mit kleiner Verzoegerung freigeben — Spotify/YouTube
|
||||
@@ -310,13 +318,19 @@ class AudioService {
|
||||
* unterdrueckt — der Focus bleibt fuer die ganze Konversation gehalten. */
|
||||
private _releaseFocusDeferred(): void {
|
||||
if (this._conversationFocusActive) {
|
||||
console.log('[Audio] _releaseFocusDeferred: Conversation aktiv → kein Release');
|
||||
this._cancelDeferredFocusRelease();
|
||||
return;
|
||||
}
|
||||
this._cancelDeferredFocusRelease();
|
||||
console.log('[Audio] _releaseFocusDeferred: in %dms', this.FOCUS_RELEASE_DELAY_MS);
|
||||
this.focusReleaseTimer = setTimeout(() => {
|
||||
this.focusReleaseTimer = null;
|
||||
if (this._conversationFocusActive) return;
|
||||
if (this._conversationFocusActive) {
|
||||
console.log('[Audio] Focus-Release abgebrochen (Conversation jetzt aktiv)');
|
||||
return;
|
||||
}
|
||||
console.log('[Audio] AudioFocus jetzt released');
|
||||
AudioFocus?.release().catch(() => {});
|
||||
}, this.FOCUS_RELEASE_DELAY_MS);
|
||||
}
|
||||
@@ -347,19 +361,69 @@ class AudioService {
|
||||
this._releaseFocusDeferred();
|
||||
}
|
||||
|
||||
/** TTS-Wiedergabe haart stoppen — z.B. wenn ein Anruf reinkommt.
|
||||
* Released auch sofort den AudioFocus damit der Anruf-Klingelton hoerbar ist. */
|
||||
/** TTS-Wiedergabe haart stoppen — z.B. fuer Barge-In. Buffer wird geleert,
|
||||
* kein Auto-Resume. Released auch sofort den AudioFocus. */
|
||||
haltAllPlayback(reason: string = ''): void {
|
||||
console.log('[Audio] haltAllPlayback: %s', reason || '(no reason)');
|
||||
this._conversationFocusActive = false;
|
||||
this.stopPlayback();
|
||||
}
|
||||
|
||||
/** Speziell fuer Anrufe: AudioTrack stoppen + Focus releasen, ABER pcm-
|
||||
* Buffer + messageId behalten damit weitere Chunks der unterbrochenen
|
||||
* Antwort weiter gesammelt werden. isFinal schreibt dann die WAV trotz
|
||||
* Anruf — und resumeFromInterruption findet sie. */
|
||||
pauseForCall(reason: string = ''): void {
|
||||
console.log('[Audio] pauseForCall: %s', reason || '(no reason)');
|
||||
this._conversationFocusActive = false;
|
||||
this._pausedForCall = true;
|
||||
// Queue + isPlaying ruecksetzen — sonst klemmt der naechste Play-Button
|
||||
// (playAudio sieht isPlaying=true und ruft _playNext nicht mehr auf).
|
||||
this.audioQueue = [];
|
||||
this.isPlaying = false;
|
||||
// Foreground-Service stoppen — Notification waere sonst irrefuehrend
|
||||
stopBackgroundAudio().catch(() => {});
|
||||
// SoundPool/RNSound (Resume-Sound, Play-Button) stoppen — nicht relevant fuer Auto-Resume
|
||||
if (this.currentSound) {
|
||||
try { this.currentSound.stop(); this.currentSound.release(); } catch {}
|
||||
this.currentSound = null;
|
||||
}
|
||||
if (this.resumeSound) {
|
||||
try { this.resumeSound.stop(); this.resumeSound.release(); } catch {}
|
||||
this.resumeSound = null;
|
||||
}
|
||||
// AudioTrack hart stoppen damit nichts mehr aus dem Lautsprecher kommt.
|
||||
// pcmStreamActive bleibt true, pcmBuffer/pcmMessageId BLEIBEN — damit
|
||||
// weitere Chunks gesammelt werden und isFinal die WAV schreiben kann.
|
||||
PcmStreamPlayer?.stop().catch(() => {});
|
||||
this._cancelDeferredFocusRelease();
|
||||
AudioFocus?.release().catch(() => {});
|
||||
}
|
||||
|
||||
/** Anruf vorbei → weitere Chunks duerfen wieder abgespielt werden.
|
||||
* resumeFromInterruption uebernimmt die Wiedergabe ab gemerkter Position. */
|
||||
endCallPause(): void {
|
||||
if (!this._pausedForCall) return;
|
||||
this._pausedForCall = false;
|
||||
console.log('[Audio] endCallPause');
|
||||
}
|
||||
|
||||
/** Bei Anruf: aktuelle Wiedergabe-Position merken damit wir nach dem
|
||||
* Auflegen von dort weitermachen koennen. Returnt Position in Sekunden
|
||||
* oder 0 wenn nichts spielte. */
|
||||
* oder 0 wenn nichts spielte.
|
||||
*
|
||||
* Idempotent: bei mehrfachem Aufruf (ringing → offhook) wird die Position
|
||||
* vom ersten Mal NICHT ueberschrieben. playbackStartTime laeuft stumpf
|
||||
* weiter obwohl das Audio gestoppt ist — der erste Halt ist der echte. */
|
||||
captureInterruption(): number {
|
||||
if (this.pausedMessageId) {
|
||||
console.log('[Audio] captureInterruption: bereits erfasst (msgId=%s pos=%ss) — skip',
|
||||
this.pausedMessageId, this.pausedPosition.toFixed(2));
|
||||
return this.pausedPosition;
|
||||
}
|
||||
if (!this.playbackStartTime || !this.currentPlaybackMsgId) {
|
||||
console.log('[Audio] captureInterruption: nichts spielte (startTime=%s, msgId=%s)',
|
||||
this.playbackStartTime, this.currentPlaybackMsgId || '(leer)');
|
||||
this.pausedPosition = 0;
|
||||
this.pausedMessageId = '';
|
||||
return 0;
|
||||
@@ -379,7 +443,12 @@ class AudioService {
|
||||
async resumeFromInterruption(maxWaitMs: number = 30000): Promise<boolean> {
|
||||
const msgId = this.pausedMessageId;
|
||||
const position = this.pausedPosition;
|
||||
if (!msgId) return false;
|
||||
if (!msgId) {
|
||||
console.log('[Audio] resumeFromInterruption: kein gemerkter Stand — skip');
|
||||
return false;
|
||||
}
|
||||
console.log('[Audio] resumeFromInterruption: starte fuer msgId=%s pos=%ss',
|
||||
msgId, position.toFixed(2));
|
||||
this.pausedMessageId = ''; // konsumieren
|
||||
const cachePath = `${RNFS.DocumentDirectoryPath}/tts_cache/${msgId}.wav`;
|
||||
const startTime = Date.now();
|
||||
@@ -413,6 +482,14 @@ class AudioService {
|
||||
this._firePlaybackStarted();
|
||||
this.isPlaying = true;
|
||||
this.resumeSound = sound;
|
||||
// Tracking auch fuer den Resume-Sound aktualisieren — sonst kann
|
||||
// captureInterruption bei einem zweiten Anruf die Position nicht
|
||||
// mehr ermitteln (playbackStartTime waere von der ersten Wiedergabe).
|
||||
const msgIdMatch = path.match(/([^/\\]+)\.wav$/i);
|
||||
if (msgIdMatch) this.currentPlaybackMsgId = msgIdMatch[1];
|
||||
// Virtuelle Start-Zeit so setzen, dass captureInterruption (das den
|
||||
// Leading-Silence-Offset wieder abzieht) die korrekte Position liefert.
|
||||
this.playbackStartTime = Date.now() - (positionSec + this.LEADING_SILENCE_SEC) * 1000;
|
||||
console.log('[Audio] Resume von Position %ss aus %s',
|
||||
positionSec.toFixed(2), path);
|
||||
sound.setCurrentTime(Math.max(0, positionSec));
|
||||
@@ -719,8 +796,13 @@ class AudioService {
|
||||
if (!base64Data) return;
|
||||
// Mute-Flag respektieren — robust gegen Race-Conditions zwischen User-
|
||||
// Klick auf Mute und einem TTS-Chunk der im selben Tick eintrifft.
|
||||
if (this._muted) return;
|
||||
if (this._muted) {
|
||||
console.log('[Audio] playAudio: muted=true → skip');
|
||||
return;
|
||||
}
|
||||
this.audioQueue.push(base64Data);
|
||||
console.log('[Audio] playAudio: queued (queue=%d isPlaying=%s pausedForCall=%s)',
|
||||
this.audioQueue.length, this.isPlaying, this._pausedForCall);
|
||||
if (!this.isPlaying) {
|
||||
this._playNext();
|
||||
}
|
||||
@@ -786,9 +868,16 @@ class AudioService {
|
||||
final?: boolean;
|
||||
silent?: boolean;
|
||||
}): Promise<string> {
|
||||
// _stoppedMessageId: User hat diese Antwort mid-Wiedergabe gestoppt
|
||||
// (Mute geklickt). Auch wenn Mute jetzt wieder aus ist, soll diese
|
||||
// Antwort nicht weiterspielen. Erst eine neue messageId resetted das.
|
||||
const incomingMsgId = payload.messageId || '';
|
||||
const stoppedByUser = !!this._stoppedMessageId && incomingMsgId === this._stoppedMessageId;
|
||||
// Globaler Mute-Flag uebersteuert das per-Call silent — verhindert
|
||||
// Race-Conditions wenn der User zwischen Chunks den Mute-Knopf drueckt.
|
||||
const silent = !!payload.silent || this._muted;
|
||||
// _pausedForCall: AudioTrack ist gestoppt waehrend Anruf — Chunks weiter
|
||||
// sammeln (fuer WAV-Cache), aber NICHT in den Player schicken.
|
||||
const silent = !!payload.silent || this._muted || this._pausedForCall || stoppedByUser;
|
||||
if (!silent && !PcmStreamPlayer) {
|
||||
console.warn('[Audio] PcmStreamPlayer Native Module nicht verfuegbar');
|
||||
return '';
|
||||
@@ -829,6 +918,13 @@ class AudioService {
|
||||
this.pausedMessageId = '';
|
||||
this.pausedPosition = 0;
|
||||
}
|
||||
// Stop-Marker zuruecksetzen wenn neue messageId — neue Antwort darf
|
||||
// wieder normal abspielen, egal ob Mute zwischendurch aktiv war.
|
||||
if (this._stoppedMessageId && this._stoppedMessageId !== messageId) {
|
||||
console.log('[Audio] Neue Antwort (msgId=%s) — Stop-Marker fuer %s zurueckgesetzt',
|
||||
messageId, this._stoppedMessageId);
|
||||
this._stoppedMessageId = '';
|
||||
}
|
||||
this.pcmStreamActive = true;
|
||||
this.pcmMessageId = messageId;
|
||||
this.pcmSampleRate = sampleRate;
|
||||
@@ -946,7 +1042,10 @@ class AudioService {
|
||||
}
|
||||
}
|
||||
|
||||
/** Audio aus lokaler Datei (file:// Pfad) in die Queue und abspielen. */
|
||||
/** Audio aus lokaler Datei (file:// Pfad) in die Queue und abspielen.
|
||||
* Setzt zusaetzlich playbackStartTime + currentPlaybackMsgId damit ein
|
||||
* Anruf waehrend dieses Playbacks korrekt erfasst wird (ohne dieses
|
||||
* Tracking liefert captureInterruption nichts → kein Auto-Resume). */
|
||||
async playFromPath(filePath: string): Promise<void> {
|
||||
if (!filePath) return;
|
||||
try {
|
||||
@@ -955,6 +1054,14 @@ class AudioService {
|
||||
console.warn('[Audio] Cache-Datei existiert nicht mehr:', cleanPath);
|
||||
return;
|
||||
}
|
||||
// Dateiname ohne .wav als messageId nehmen (egal ob UUID oder andere ID)
|
||||
const fileMatch = cleanPath.match(/([^/\\]+)\.wav$/i);
|
||||
const msgId = fileMatch ? fileMatch[1] : '';
|
||||
console.log('[Audio] playFromPath: cleanPath=%s → msgId=%s', cleanPath, msgId || '(leer)');
|
||||
if (msgId) {
|
||||
this.currentPlaybackMsgId = msgId;
|
||||
this.playbackStartTime = Date.now() - this.LEADING_SILENCE_SEC * 1000;
|
||||
}
|
||||
const b64 = await RNFS.readFile(cleanPath, 'base64');
|
||||
this.playAudio(b64);
|
||||
} catch (err) {
|
||||
@@ -983,9 +1090,15 @@ class AudioService {
|
||||
}
|
||||
|
||||
private _firePlaybackStarted(): void {
|
||||
// Tracking fuer Auto-Resume nach Anruf-Pause
|
||||
// Tracking fuer Auto-Resume nach Anruf-Pause: NUR setzen wenn ein
|
||||
// PCM-Stream laeuft (Live-TTS). Bei Play-Button / Resume-Sound hat der
|
||||
// Caller (playFromPath / _playFromPathAtPosition) das Tracking schon
|
||||
// korrekt mit der msgId aus dem Pfad gesetzt — sonst wuerden wir hier
|
||||
// mit leerem pcmMessageId ueberschreiben.
|
||||
if (this.pcmMessageId) {
|
||||
this.playbackStartTime = Date.now();
|
||||
this.currentPlaybackMsgId = this.pcmMessageId || '';
|
||||
this.currentPlaybackMsgId = this.pcmMessageId;
|
||||
}
|
||||
this.playbackStartedListeners.forEach(cb => {
|
||||
try { cb(); } catch (e) { console.warn('[Audio] playbackStarted listener err:', e); }
|
||||
});
|
||||
@@ -1038,11 +1151,13 @@ class AudioService {
|
||||
}
|
||||
|
||||
this.currentSound = sound;
|
||||
console.log('[Audio] Sound.play startet (path=%s)', soundPath);
|
||||
|
||||
// Naechstes Audio schon vorbereiten waehrend dieses abspielt
|
||||
this._preloadNext();
|
||||
|
||||
sound.play((success) => {
|
||||
console.log('[Audio] Sound.play callback: success=%s queue=%d', success, this.audioQueue.length);
|
||||
if (!success) console.warn('[Audio] Wiedergabe fehlgeschlagen');
|
||||
sound.release();
|
||||
this.currentSound = null;
|
||||
@@ -1074,14 +1189,43 @@ class AudioService {
|
||||
* — die Bridge kann einen Chunk im selben JS-Tick liefern in dem der
|
||||
* User Mute geklickt hat. */
|
||||
private _muted: boolean = false;
|
||||
/** Anruf laeuft → Chunks werden nur in den Cache-Buffer gepusht, nicht
|
||||
* abgespielt. Wird in pauseForCall gesetzt, in endCallPause/resumeFrom-
|
||||
* Interruption zurueckgenommen. */
|
||||
private _pausedForCall: boolean = false;
|
||||
/** Wenn der User mid-Wiedergabe Mute drueckt: messageId der ABGEBROCHENEN
|
||||
* Antwort merken. Folge-Chunks dieser msgId werden silent ignoriert, auch
|
||||
* wenn der User Mute wieder ausschaltet — kein "Resume mid-Antwort". Eine
|
||||
* NEUE messageId resetted das, dann spielt's wieder normal. */
|
||||
private _stoppedMessageId: string = '';
|
||||
setMuted(muted: boolean): void {
|
||||
console.log('[Audio] setMuted: %s (currentSound=%s pcmStreamActive=%s)',
|
||||
muted, this.currentSound ? 'aktiv' : 'null', this.pcmStreamActive);
|
||||
this._muted = muted;
|
||||
if (muted) this.stopPlayback();
|
||||
if (muted) {
|
||||
// Aktuell laufende Antwort als "verworfen" markieren — nachfolgende
|
||||
// chunks dieser msgId werden silent gehalten auch wenn der User Mute
|
||||
// gleich wieder ausschaltet. Erst eine NEUE Antwort darf wieder reden.
|
||||
const activeMsgId = this.pcmMessageId || this.currentPlaybackMsgId;
|
||||
if (activeMsgId) {
|
||||
this._stoppedMessageId = activeMsgId;
|
||||
console.log('[Audio] Antwort %s als gestoppt markiert', activeMsgId);
|
||||
}
|
||||
this.stopPlayback();
|
||||
}
|
||||
}
|
||||
isMuted(): boolean { return this._muted; }
|
||||
|
||||
/** Laufende Wiedergabe stoppen + Queue leeren */
|
||||
stopPlayback(): void {
|
||||
// Idempotent: wenn nichts mehr aktiv ist, NICHT noch einen Focus-Release/
|
||||
// Kick-Cycle anstossen — Re-Renders triggern setMuted oft mehrfach hinter-
|
||||
// einander, und jeder weitere Kick lässt Spotify nochmal kurz pausieren.
|
||||
const hasAnything = !!(this.currentSound || this.resumeSound || this.preloadedSound
|
||||
|| this.pcmStreamActive || this.audioQueue.length || this.isPlaying);
|
||||
if (!hasAnything) return;
|
||||
console.log('[Audio] stopPlayback: currentSound=%s queue=%d pcm=%s',
|
||||
this.currentSound ? 'aktiv' : 'null', this.audioQueue.length, this.pcmStreamActive);
|
||||
// Foreground-Service auch stoppen — sonst bleibt die Notification haengen
|
||||
// wenn Wiedergabe abgebrochen wird (Anruf, Cancel, Barge-In).
|
||||
stopBackgroundAudio().catch(() => {});
|
||||
@@ -1092,6 +1236,11 @@ class AudioService {
|
||||
this.currentSound.release();
|
||||
this.currentSound = null;
|
||||
}
|
||||
if (this.resumeSound) {
|
||||
this.resumeSound.stop();
|
||||
this.resumeSound.release();
|
||||
this.resumeSound = null;
|
||||
}
|
||||
if (this.preloadedSound) {
|
||||
this.preloadedSound.release();
|
||||
this.preloadedSound = null;
|
||||
@@ -1107,7 +1256,11 @@ class AudioService {
|
||||
this.pcmBuffer = [];
|
||||
this.pcmBytesCollected = 0;
|
||||
this.pcmMessageId = '';
|
||||
// Audio-Focus sofort freigeben — User hat explizit abgebrochen
|
||||
// Audio-Focus sofort freigeben — User hat explizit abgebrochen.
|
||||
// Unser Focus war TRANSIENT, Spotify resumed darum automatisch beim
|
||||
// Abandon. Den frueheren kickReleaseMedia haben wir entfernt: er
|
||||
// requestete USAGE_MEDIA mit GAIN (permanent), was Spotify als
|
||||
// "user-action stopp" interpretierte und Auto-Resume verhinderte.
|
||||
this._cancelDeferredFocusRelease();
|
||||
AudioFocus?.release().catch(() => {});
|
||||
}
|
||||
@@ -1149,19 +1302,29 @@ class AudioService {
|
||||
}
|
||||
}
|
||||
|
||||
/** Alte Aufnahme- und TTS-Files aus dem Cache loeschen (>30s alt). */
|
||||
private async _cleanupStaleCacheFiles(): Promise<void> {
|
||||
/** Alte Aufnahme- und TTS-Files aus dem Cache loeschen.
|
||||
* Default 30s — verwendet beim Mikro-Start (kurze Lebensdauer reicht).
|
||||
* App-Start nutzt 5min damit gerade aktive Files nicht erwischt werden. */
|
||||
private async _cleanupStaleCacheFiles(maxAgeMs: number = 30000): Promise<void> {
|
||||
try {
|
||||
const files = await RNFS.readDir(RNFS.CachesDirectoryPath);
|
||||
const now = Date.now();
|
||||
let removed = 0;
|
||||
let freedBytes = 0;
|
||||
for (const f of files) {
|
||||
if (!f.isFile()) continue;
|
||||
if (!f.name.startsWith('aria_recording_') && !f.name.startsWith('aria_tts_')) continue;
|
||||
const age = now - (f.mtime ? f.mtime.getTime() : 0);
|
||||
if (age > 30000) {
|
||||
if (age > maxAgeMs) {
|
||||
freedBytes += parseInt(f.size as any, 10) || 0;
|
||||
await RNFS.unlink(f.path).catch(() => {});
|
||||
removed += 1;
|
||||
}
|
||||
}
|
||||
if (removed > 0) {
|
||||
console.log('[Audio] Cache-Cleanup: %d Files entfernt, %.1fMB freigegeben',
|
||||
removed, freedBytes / 1024 / 1024);
|
||||
}
|
||||
} catch {
|
||||
// silent — cleanup ist best-effort
|
||||
}
|
||||
@@ -1188,6 +1351,43 @@ class AudioService {
|
||||
// silent
|
||||
}
|
||||
}
|
||||
|
||||
/** Aktuelle Groesse des TTS-Caches. */
|
||||
async getTtsCacheSize(): Promise<{ count: number; totalMB: number }> {
|
||||
let count = 0;
|
||||
let total = 0;
|
||||
try {
|
||||
const dir = `${RNFS.DocumentDirectoryPath}/tts_cache`;
|
||||
if (await RNFS.exists(dir)) {
|
||||
const files = await RNFS.readDir(dir);
|
||||
for (const f of files) {
|
||||
if (!f.isFile() || !f.name.endsWith('.wav')) continue;
|
||||
count += 1;
|
||||
total += parseInt(f.size as any, 10) || 0;
|
||||
}
|
||||
}
|
||||
} catch {}
|
||||
return { count, totalMB: total / 1024 / 1024 };
|
||||
}
|
||||
|
||||
/** TTS-Cache komplett leeren (Settings-Button). */
|
||||
async clearTtsCache(): Promise<{ removed: number; freedMB: number }> {
|
||||
let removed = 0;
|
||||
let freed = 0;
|
||||
try {
|
||||
const dir = `${RNFS.DocumentDirectoryPath}/tts_cache`;
|
||||
if (!(await RNFS.exists(dir))) return { removed: 0, freedMB: 0 };
|
||||
const files = await RNFS.readDir(dir);
|
||||
for (const f of files) {
|
||||
if (!f.isFile() || !f.name.endsWith('.wav')) continue;
|
||||
const size = parseInt(f.size as any, 10) || 0;
|
||||
await RNFS.unlink(f.path).catch(() => {});
|
||||
removed += 1;
|
||||
freed += size;
|
||||
}
|
||||
} catch {}
|
||||
return { removed, freedMB: freed / 1024 / 1024 };
|
||||
}
|
||||
}
|
||||
|
||||
// Singleton
|
||||
|
||||
@@ -0,0 +1,138 @@
|
||||
/**
|
||||
* GPS-Tracking-Service.
|
||||
*
|
||||
* Wenn aktiv: pushed alle paar Sekunden die aktuelle Position als
|
||||
* `location_update {lat, lon}` an den RVS-Server, damit Brain-Watcher
|
||||
* mit `near()`-Conditions etwas zum Vergleichen haben.
|
||||
*
|
||||
* Default: AUS. Wird entweder vom User manuell in Settings angeschaltet
|
||||
* oder von ARIA via location_tracking-RVS-Message (Brain-Tool
|
||||
* `request_location_tracking`).
|
||||
*
|
||||
* Energie-Schutz: distanceFilter 30m, interval 15s. Echte Fahrt-Updates
|
||||
* (Geschwindigkeit) kommen sauber durch, stationaer wird kaum gesendet.
|
||||
*/
|
||||
|
||||
import AsyncStorage from '@react-native-async-storage/async-storage';
|
||||
import { PermissionsAndroid, Platform, ToastAndroid } from 'react-native';
|
||||
import Geolocation from '@react-native-community/geolocation';
|
||||
import rvs from './rvs';
|
||||
|
||||
type Listener = (active: boolean) => void;
|
||||
|
||||
class GpsTrackingService {
|
||||
private watchId: number | null = null;
|
||||
private active = false;
|
||||
private listeners: Set<Listener> = new Set();
|
||||
// Defensive: nicht zu schnell oeffentlich togglen
|
||||
private lastChangeAt = 0;
|
||||
|
||||
isActive(): boolean {
|
||||
return this.active;
|
||||
}
|
||||
|
||||
onChange(cb: Listener): () => void {
|
||||
this.listeners.add(cb);
|
||||
return () => { this.listeners.delete(cb); };
|
||||
}
|
||||
|
||||
private notify() {
|
||||
for (const cb of this.listeners) {
|
||||
try { cb(this.active); } catch {}
|
||||
}
|
||||
}
|
||||
|
||||
/** Beim App-Start: gespeicherten Zustand wiederherstellen (Default off). */
|
||||
async restoreFromStorage(): Promise<void> {
|
||||
try {
|
||||
const v = await AsyncStorage.getItem('aria_gps_tracking');
|
||||
if (v === 'true') {
|
||||
console.log('[gps-track] Restore: war an, starte wieder');
|
||||
this.start('Beim Start wiederhergestellt');
|
||||
}
|
||||
} catch {}
|
||||
}
|
||||
|
||||
private async ensurePermission(): Promise<boolean> {
|
||||
if (Platform.OS !== 'android') return true;
|
||||
try {
|
||||
const granted = await PermissionsAndroid.request(
|
||||
PermissionsAndroid.PERMISSIONS.ACCESS_FINE_LOCATION,
|
||||
{
|
||||
title: 'GPS-Tracking',
|
||||
message: 'ARIA braucht laufende Standort-Updates damit GPS-Watcher (Blitzer-Warner, near()) funktionieren.',
|
||||
buttonPositive: 'Erlauben',
|
||||
buttonNegative: 'Abbrechen',
|
||||
},
|
||||
);
|
||||
return granted === PermissionsAndroid.RESULTS.GRANTED;
|
||||
} catch (e) {
|
||||
console.warn('[gps-track] Permission-Fehler:', e);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
async start(reason: string = ''): Promise<boolean> {
|
||||
if (this.active) return true;
|
||||
const ok = await this.ensurePermission();
|
||||
if (!ok) {
|
||||
ToastAndroid.show('GPS-Tracking: Berechtigung abgelehnt', ToastAndroid.LONG);
|
||||
return false;
|
||||
}
|
||||
try {
|
||||
this.watchId = Geolocation.watchPosition(
|
||||
(pos) => {
|
||||
const lat = pos.coords.latitude;
|
||||
const lon = pos.coords.longitude;
|
||||
rvs.send('location_update' as any, { lat, lon });
|
||||
},
|
||||
(err) => {
|
||||
console.warn('[gps-track] watchPosition error:', err?.code, err?.message);
|
||||
},
|
||||
{
|
||||
enableHighAccuracy: true,
|
||||
distanceFilter: 30, // erst senden wenn 30m gewandert
|
||||
interval: 15000, // (Android) gewuenschte Frequenz
|
||||
fastestInterval: 10000, // (Android) max Frequenz
|
||||
} as any,
|
||||
);
|
||||
this.active = true;
|
||||
this.lastChangeAt = Date.now();
|
||||
this.notify();
|
||||
AsyncStorage.setItem('aria_gps_tracking', 'true').catch(() => {});
|
||||
ToastAndroid.show(
|
||||
reason ? `GPS-Tracking aktiv (${reason})` : 'GPS-Tracking aktiv',
|
||||
ToastAndroid.SHORT,
|
||||
);
|
||||
console.log('[gps-track] gestartet', reason ? `(${reason})` : '');
|
||||
return true;
|
||||
} catch (e: any) {
|
||||
console.warn('[gps-track] start fehlgeschlagen:', e?.message);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
stop(reason: string = ''): void {
|
||||
if (!this.active) return;
|
||||
if (this.watchId !== null) {
|
||||
try { Geolocation.clearWatch(this.watchId); } catch {}
|
||||
this.watchId = null;
|
||||
}
|
||||
this.active = false;
|
||||
this.lastChangeAt = Date.now();
|
||||
this.notify();
|
||||
AsyncStorage.setItem('aria_gps_tracking', 'false').catch(() => {});
|
||||
ToastAndroid.show(
|
||||
reason ? `GPS-Tracking aus (${reason})` : 'GPS-Tracking aus',
|
||||
ToastAndroid.SHORT,
|
||||
);
|
||||
console.log('[gps-track] gestoppt', reason ? `(${reason})` : '');
|
||||
}
|
||||
|
||||
async toggle(reason: string = ''): Promise<void> {
|
||||
if (this.active) this.stop(reason);
|
||||
else await this.start(reason);
|
||||
}
|
||||
}
|
||||
|
||||
export default new GpsTrackingService();
|
||||
@@ -0,0 +1,41 @@
|
||||
/**
|
||||
* Verbose-Logging-Toggle: console.log laesst sich global stummschalten.
|
||||
* console.warn/console.error bleiben immer an — Fehler will man immer sehen.
|
||||
*
|
||||
* Default: an (true). Toggle ueber Settings → Protokoll → Verbose Logging.
|
||||
* Beim Start wird der gespeicherte Wert geladen, vorher loggen wir normal.
|
||||
*/
|
||||
|
||||
import AsyncStorage from '@react-native-async-storage/async-storage';
|
||||
|
||||
export const VERBOSE_LOGGING_KEY = 'aria_verbose_logging';
|
||||
|
||||
// Original-console.log retten, damit wir die Wrapper jederzeit wieder
|
||||
// "scharf" stellen koennen (sonst waere ein Toggle-an nach -aus tot).
|
||||
const originalLog = console.log.bind(console);
|
||||
const noop = () => {};
|
||||
|
||||
let _verbose = true;
|
||||
|
||||
function applyState(): void {
|
||||
console.log = _verbose ? originalLog : noop;
|
||||
}
|
||||
|
||||
/** Wert aus AsyncStorage laden und anwenden. Beim App-Start aufrufen. */
|
||||
export async function initLogger(): Promise<void> {
|
||||
try {
|
||||
const v = await AsyncStorage.getItem(VERBOSE_LOGGING_KEY);
|
||||
_verbose = v !== 'false'; // default: true
|
||||
} catch {}
|
||||
applyState();
|
||||
}
|
||||
|
||||
export function isVerboseLogging(): boolean {
|
||||
return _verbose;
|
||||
}
|
||||
|
||||
export function setVerboseLogging(verbose: boolean): void {
|
||||
_verbose = verbose;
|
||||
applyState();
|
||||
AsyncStorage.setItem(VERBOSE_LOGGING_KEY, String(verbose)).catch(() => {});
|
||||
}
|
||||
@@ -189,22 +189,32 @@ class PhoneCallService {
|
||||
private _haltForCall(toast: string): void {
|
||||
// Position merken bevor wir den Stream killen — fuer Auto-Resume.
|
||||
audioService.captureInterruption();
|
||||
audioService.haltAllPlayback(toast);
|
||||
// pauseForCall (statt haltAllPlayback): pcmBuffer + messageId bleiben,
|
||||
// weitere Chunks werden weiter gesammelt damit isFinal die WAV schreibt.
|
||||
audioService.pauseForCall(toast);
|
||||
wakeWordService.pauseForCall().catch(() => {});
|
||||
ToastAndroid.show(toast, ToastAndroid.SHORT);
|
||||
}
|
||||
|
||||
private _resumeAfterCall(toast: string): void {
|
||||
// Anruf-Pause aufheben — neue Chunks duerfen wieder direkt abgespielt
|
||||
// werden (falls die Bridge mid-Anruf isFinal noch nicht geschickt hat).
|
||||
audioService.endCallPause();
|
||||
wakeWordService.resumeFromCall().catch(() => {});
|
||||
ToastAndroid.show(toast, ToastAndroid.SHORT);
|
||||
// Auto-Resume: ab gemerkter Position weiterspielen wenn ARIA vor dem
|
||||
// Anruf gerade redete. Wartet bis zu 30s auf den WAV-Cache (falls
|
||||
// final-Marker erst nach dem Anruf-Ende kam).
|
||||
// 800ms warten bevor Auto-Resume — sonst kollidiert ARIA's neuer Focus-
|
||||
// Request mit Spotify's Auto-Resume nach Anruf-Ende. System haengt nach
|
||||
// dem Auflegen noch im IN_CALL-Mode-Uebergang, Spotify schaut auf Focus-
|
||||
// Gain und wuerde sofort wieder LOSS sehen → bleibt pausiert.
|
||||
// Mit Delay: Spotify resumed kurz, dann pausiert ARIA wieder ordnungs-
|
||||
// gemaess. Wenn ARIA nichts pending hat, bleibt Spotify einfach an.
|
||||
setTimeout(() => {
|
||||
audioService.resumeFromInterruption(30000).then(ok => {
|
||||
if (ok) {
|
||||
console.log('[PhoneCall] Auto-Resume von gemerkter Position gestartet');
|
||||
}
|
||||
}).catch(() => {});
|
||||
}, 800);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -0,0 +1,35 @@
|
||||
# ════════════════════════════════════════════════════════════
|
||||
# ARIA Brain — Agent + Memory Container
|
||||
#
|
||||
# FastAPI-Server mit Vector-DB-Memory (Qdrant).
|
||||
# Spricht via HTTP/WebSocket mit Bridge und Diagnostic.
|
||||
# LLM-Calls gehen ueber den Proxy (claude-max-api-proxy).
|
||||
# ════════════════════════════════════════════════════════════
|
||||
|
||||
FROM python:3.12-slim
|
||||
|
||||
# System-Tools die Skills brauchen koennten (curl, jq, git, ssh-client,
|
||||
# Build-Basics fuer venv-Compiles). Bewusst sparsam — alles weitere
|
||||
# bringt der Skill selbst mit (siehe execution=local-bin).
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
curl \
|
||||
jq \
|
||||
git \
|
||||
openssh-client \
|
||||
ca-certificates \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY requirements.txt .
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
COPY . .
|
||||
|
||||
# Embedding-Model-Cache und Skills landen unter /data (Volume)
|
||||
ENV SENTENCE_TRANSFORMERS_HOME=/data/_models
|
||||
ENV ARIA_DATA_DIR=/data
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8080"]
|
||||
@@ -0,0 +1,579 @@
|
||||
"""
|
||||
Conversation-Loop. Eine Anfrage von Stefan, eine Antwort von ARIA.
|
||||
|
||||
Pro Turn:
|
||||
1. user-Turn an die laufende Conversation appenden
|
||||
2. Hot Memory holen (alle pinned Punkte)
|
||||
3. Cold Memory holen (Top-K semantisch zur user-Nachricht)
|
||||
4. System-Prompt aus Hot+Cold bauen
|
||||
5. Messages = [system, *window, user]
|
||||
6. Claude via Proxy aufrufen
|
||||
7. Assistant-Reply in Conversation appenden + zurueckgeben
|
||||
|
||||
Memory-Destillat laeuft asynchron NACH dem Reply, gesteuert vom
|
||||
/chat-Endpoint ueber BackgroundTasks.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from conversation import Conversation, Turn
|
||||
from memory import Embedder, VectorStore, MemoryPoint
|
||||
from prompts import build_system_prompt
|
||||
from proxy_client import ProxyClient, Message as ProxyMessage
|
||||
import skills as skills_mod
|
||||
import triggers as triggers_mod
|
||||
import watcher as watcher_mod
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Meta-Tool: ARIA kann selbst neue Skills bauen
|
||||
META_TOOLS = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "skill_create",
|
||||
"description": (
|
||||
"Erstelle einen neuen Skill (wiederverwendbare Faehigkeit). "
|
||||
"Skills sind IMMER Python — jeder Skill bekommt seine eigene venv "
|
||||
"mit den pip_packages die er braucht.\n\n"
|
||||
"HARTE REGEL — IMMER Skill anlegen wenn: die Loesung erfordert eine "
|
||||
"pip-Library. Sonst muesste der Install bei jedem Container-Restart "
|
||||
"neu laufen (Brain hat keinen persistenten State ausser /data/skills/).\n\n"
|
||||
"Sonst NUR wenn ALLE Kriterien erfuellt sind:\n"
|
||||
" 1) wiederkehrend (Aufgabe kommt realistisch nochmal),\n"
|
||||
" 2) nicht-trivial (mehrere Schritte),\n"
|
||||
" 3) parametrisierbar (nimmt Eingaben, gibt Ergebnis),\n"
|
||||
" 4) wiederverwendbar als ganzes Paket.\n"
|
||||
"NICHT fuer einzelne Shell-Befehle (date, hostname, ls etc.) und "
|
||||
"nicht fuer Einmal-Faelle. Stefan kann Skill-Erstellung explizit "
|
||||
"triggern (\"bau daraus einen Skill\").\n\n"
|
||||
"Wenn etwas nur via apt-Paket geht — Stefan fragen ob es ins "
|
||||
"Brain-Dockerfile soll, NICHT als Skill bauen."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {"type": "string", "description": "kurz, kebab-case, a-z 0-9 - _"},
|
||||
"description": {"type": "string", "description": "Was kann der Skill? 1 Satz."},
|
||||
"entry_code": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Python-Code. Args lesen via os.environ['ARG_NAME']. "
|
||||
"Resultat per print() (stdout) zurueck. Bei Fehler: "
|
||||
"non-zero exit (sys.exit(1) o.ae.)."
|
||||
),
|
||||
},
|
||||
"readme": {"type": "string", "description": "Markdown — was macht der Skill, Beispiel-Aufrufe"},
|
||||
"pip_packages": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "pip-Pakete die in der venv installiert werden (z.B. requests, yt-dlp, pypdf)",
|
||||
},
|
||||
"args": {
|
||||
"type": "array",
|
||||
"items": {"type": "object"},
|
||||
"description": "Argumente-Schema [{name, type, required, description}]",
|
||||
},
|
||||
},
|
||||
"required": ["name", "description", "entry_code"],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "skill_list",
|
||||
"description": "Zeigt alle Skills (inkl. deaktivierte). Sollte selten noetig sein — die Liste steht eh im System-Prompt.",
|
||||
"parameters": {"type": "object", "properties": {}},
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "trigger_timer",
|
||||
"description": (
|
||||
"Lege einen Timer-Trigger an — feuert EINMALIG und ruft dich dann selbst auf "
|
||||
"(Push-Nachricht an Stefan). Use-Case: 'erinnere mich in 10min', "
|
||||
"'sag mir um 14:30 Bescheid'. Genau EINES von `in_seconds` ODER `fires_at` "
|
||||
"muss gesetzt sein."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {"type": "string", "description": "kurzer kebab-case-Name, a-z 0-9 - _"},
|
||||
"in_seconds": {
|
||||
"type": "integer",
|
||||
"description": (
|
||||
"Relativ ab jetzt in Sekunden. Bevorzugt bei Angaben wie "
|
||||
"'in 2 Minuten' (=120), 'in 1 Stunde' (=3600). "
|
||||
"Server berechnet daraus den absoluten Feuer-Zeitpunkt."
|
||||
),
|
||||
},
|
||||
"fires_at": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Absoluter ISO-Timestamp UTC fuer feste Termine, z.B. "
|
||||
"'2026-05-12T14:30:00Z'. Die aktuelle Zeit findest du im "
|
||||
"System-Prompt unter '## Aktuelle Zeit'. Fuer relative Angaben "
|
||||
"lieber `in_seconds` nutzen."
|
||||
),
|
||||
},
|
||||
"message": {"type": "string", "description": "Was soll bei der Erinnerung gesagt werden"},
|
||||
},
|
||||
"required": ["name", "message"],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "trigger_watcher",
|
||||
"description": (
|
||||
"Lege einen Watcher-Trigger an — pollt alle paar Minuten eine Condition, "
|
||||
"feuert wenn sie wahr wird (mit Throttle damit's nicht spammt). "
|
||||
"Use-Case: 'sag bescheid wenn Disk unter 5GB', 'pingt mich wenn um 8 Uhr'. "
|
||||
"Welche Variablen verfuegbar sind und ihre Bedeutung steht im System-Prompt."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {"type": "string", "description": "kurzer Name"},
|
||||
"condition": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Boolescher Ausdruck mit den erlaubten Variablen, z.B. "
|
||||
"'disk_free_gb < 5', 'hour_of_day == 8 and day_of_week == \"mon\"'. "
|
||||
"Operatoren: < > <= >= == != and or not"
|
||||
),
|
||||
},
|
||||
"message": {"type": "string", "description": "Was soll bei Erfuellung gesagt werden"},
|
||||
"check_interval_sec": {
|
||||
"type": "integer",
|
||||
"description": "Wie oft Condition pruefen (Default 300 = alle 5min, min 30)",
|
||||
},
|
||||
"throttle_sec": {
|
||||
"type": "integer",
|
||||
"description": "Mindestabstand zwischen 2 Feuerungen (Default 3600 = max 1x/h)",
|
||||
},
|
||||
},
|
||||
"required": ["name", "condition", "message"],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "trigger_cancel",
|
||||
"description": "Loescht einen Trigger (Timer abbrechen oder Watcher entfernen).",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {"name": {"type": "string"}},
|
||||
"required": ["name"],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "trigger_list",
|
||||
"description": "Zeigt alle Trigger (active + inaktiv). Selten noetig — Stefan sieht sie im Diagnostic.",
|
||||
"parameters": {"type": "object", "properties": {}},
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "request_location_tracking",
|
||||
"description": (
|
||||
"Bittet die App, das kontinuierliche GPS-Tracking zu aktivieren oder zu "
|
||||
"deaktivieren. Default ist AUS (Akku-Schutz). Nutze das wenn du einen "
|
||||
"GPS-basierten Watcher anlegst (z.B. `near(...)`), sonst hat die App "
|
||||
"veraltete Position und der Watcher feuert nie. Auch wieder ausschalten "
|
||||
"wenn der letzte GPS-Watcher geloescht wurde."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"on": {"type": "boolean", "description": "true = Tracking an, false = aus"},
|
||||
"reason": {"type": "string", "description": "Kurzer Grund (wird in App-Notification angezeigt)"},
|
||||
},
|
||||
"required": ["on"],
|
||||
},
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
def _skill_to_tool(s: dict) -> dict:
|
||||
"""Mappt einen Skill auf ein OpenAI-Function-Tool."""
|
||||
args = s.get("args") or []
|
||||
props = {}
|
||||
required = []
|
||||
for a in args:
|
||||
if not isinstance(a, dict):
|
||||
continue
|
||||
name = a.get("name") or ""
|
||||
if not name:
|
||||
continue
|
||||
props[name] = {
|
||||
"type": a.get("type", "string"),
|
||||
"description": a.get("description", ""),
|
||||
}
|
||||
if a.get("required"):
|
||||
required.append(name)
|
||||
return {
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": f"run_{s['name']}",
|
||||
"description": s.get("description", "(ohne Beschreibung)"),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": props,
|
||||
"required": required,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class Agent:
|
||||
def __init__(self, store: VectorStore, embedder: Embedder,
|
||||
conversation: Conversation, proxy: ProxyClient,
|
||||
cold_k: int = 5):
|
||||
self.store = store
|
||||
self.embedder = embedder
|
||||
self.conversation = conversation
|
||||
self.proxy = proxy
|
||||
self.cold_k = cold_k
|
||||
# Side-Channel-Events die im Turn entstehen (z.B. skill_create).
|
||||
# Werden vom /chat-Endpoint in der Response mitgeschickt, damit
|
||||
# Stefan in der App und Diagnostic eine sichtbare Bubble bekommt.
|
||||
self._pending_events: list[dict] = []
|
||||
|
||||
def pop_events(self) -> list[dict]:
|
||||
"""Holt die Events des letzten chat()-Calls und leert die Liste."""
|
||||
events = self._pending_events
|
||||
self._pending_events = []
|
||||
return events
|
||||
|
||||
# ── Hauptpfad: ein User-Turn → Tool-Loop → finaler Reply ──
|
||||
|
||||
MAX_TOOL_ITERATIONS = 8 # Schutz vor Endlos-Loops
|
||||
|
||||
def chat(self, user_message: str, source: str = "") -> str:
|
||||
user_message = (user_message or "").strip()
|
||||
if not user_message:
|
||||
raise ValueError("Leere Nachricht")
|
||||
|
||||
# Events vom letzten Turn weglassen
|
||||
self._pending_events = []
|
||||
|
||||
# 1. User-Turn an die Konversation
|
||||
self.conversation.add("user", user_message, source=source)
|
||||
|
||||
# 2. Hot Memory (alle pinned Punkte)
|
||||
hot = self.store.list_pinned()
|
||||
|
||||
# 3. Cold Memory (Top-K semantic)
|
||||
try:
|
||||
qvec = self.embedder.embed(user_message)
|
||||
cold = self.store.search(qvec, k=self.cold_k, exclude_pinned=True)
|
||||
except Exception as exc:
|
||||
logger.warning("Cold-Search fehlgeschlagen: %s", exc)
|
||||
cold = []
|
||||
|
||||
# 4. Aktive Skills holen + Tool-Liste bauen
|
||||
all_skills = skills_mod.list_skills(active_only=False)
|
||||
active_skills = [s for s in all_skills if s.get("active", True)]
|
||||
tools = list(META_TOOLS) + [_skill_to_tool(s) for s in active_skills]
|
||||
|
||||
# Trigger-Liste + Variablen-Info fuer den System-Prompt
|
||||
all_triggers = triggers_mod.list_triggers(active_only=False)
|
||||
condition_vars = watcher_mod.describe_variables()
|
||||
condition_funcs = watcher_mod.describe_functions()
|
||||
|
||||
# 5. System-Prompt + Window-Messages
|
||||
system_prompt = build_system_prompt(hot, cold, skills=all_skills,
|
||||
triggers=all_triggers,
|
||||
condition_vars=condition_vars,
|
||||
condition_funcs=condition_funcs)
|
||||
messages = [ProxyMessage(role="system", content=system_prompt)]
|
||||
for t in self.conversation.window():
|
||||
messages.append(ProxyMessage(role=t.role, content=t.content))
|
||||
|
||||
logger.info("chat: pinned=%d cold=%d skills=%d/%d window=%d prompt_chars=%d",
|
||||
len(hot), len(cold), len(active_skills), len(all_skills),
|
||||
len(self.conversation.window()), len(system_prompt))
|
||||
|
||||
# 6. Tool-Use-Loop
|
||||
final_reply = ""
|
||||
for iteration in range(self.MAX_TOOL_ITERATIONS):
|
||||
result = self.proxy.chat_full(messages, tools=tools)
|
||||
if result.tool_calls:
|
||||
# Assistant-Turn mit tool_calls in messages anhaengen (nicht in Conversation!)
|
||||
messages.append(ProxyMessage(
|
||||
role="assistant",
|
||||
content=result.content or None,
|
||||
tool_calls=[{
|
||||
"id": tc["id"], "type": "function",
|
||||
"function": {"name": tc["name"], "arguments": json.dumps(tc["arguments"])},
|
||||
} for tc in result.tool_calls],
|
||||
))
|
||||
# Tools ausfuehren + Ergebnis als role=tool zurueck
|
||||
for tc in result.tool_calls:
|
||||
tool_result = self._dispatch_tool(tc["name"], tc["arguments"])
|
||||
messages.append(ProxyMessage(
|
||||
role="tool",
|
||||
tool_call_id=tc["id"],
|
||||
name=tc["name"],
|
||||
content=tool_result[:8000],
|
||||
))
|
||||
continue # next iteration mit Tool-Results
|
||||
# Kein Tool-Call mehr → final reply
|
||||
final_reply = (result.content or "").strip()
|
||||
break
|
||||
else:
|
||||
# Loop-Limit erreicht
|
||||
final_reply = "[Tool-Loop-Limit erreicht — ARIA hat zu viele Tool-Calls gemacht ohne fertig zu werden]"
|
||||
logger.warning("Tool-Loop hit MAX_TOOL_ITERATIONS=%d", self.MAX_TOOL_ITERATIONS)
|
||||
|
||||
if not final_reply:
|
||||
raise RuntimeError("Leerer Reply vom Proxy")
|
||||
|
||||
# 7. Assistant-Turn (final reply) in die Conversation
|
||||
self.conversation.add("assistant", final_reply)
|
||||
return final_reply
|
||||
|
||||
# ── Tool-Dispatcher ───────────────────────────────────────
|
||||
|
||||
def _dispatch_tool(self, name: str, arguments: dict) -> str:
|
||||
"""Fuehrt einen Tool-Call aus und gibt ein kurzes Text-Resultat zurueck.
|
||||
Niemals werfen — Fehler werden als Text-Resultat reportet damit Claude
|
||||
weitermachen kann."""
|
||||
try:
|
||||
if name == "skill_create":
|
||||
# ARIA-Skills sind immer Python — execution ist nicht mehr im Schema
|
||||
manifest = skills_mod.create_skill(
|
||||
name=arguments["name"],
|
||||
description=arguments["description"],
|
||||
execution="local-venv",
|
||||
entry_code=arguments["entry_code"],
|
||||
readme=arguments.get("readme", ""),
|
||||
args=arguments.get("args", []),
|
||||
pip_packages=arguments.get("pip_packages", []),
|
||||
author="aria",
|
||||
)
|
||||
# Side-Channel-Event: Stefan soll sehen wenn ARIA was anlegt
|
||||
self._pending_events.append({
|
||||
"type": "skill_created",
|
||||
"skill": {
|
||||
"name": manifest["name"],
|
||||
"description": manifest.get("description", ""),
|
||||
"execution": manifest.get("execution", ""),
|
||||
"active": manifest.get("active", True),
|
||||
"setup_error": manifest.get("setup_error"),
|
||||
},
|
||||
})
|
||||
return f"OK — Skill '{manifest['name']}' erstellt (active={manifest['active']})."
|
||||
if name == "skill_list":
|
||||
items = skills_mod.list_skills(active_only=False)
|
||||
if not items:
|
||||
return "(keine Skills vorhanden)"
|
||||
return "\n".join(
|
||||
f"- {s['name']} ({s['execution']}) {'aktiv' if s.get('active', True) else 'DEAKTIVIERT'}: {s.get('description', '')}"
|
||||
for s in items
|
||||
)
|
||||
if name.startswith("run_"):
|
||||
skill_name = name[len("run_"):]
|
||||
res = skills_mod.run_skill(skill_name, args=arguments)
|
||||
snippet = (res.get("stdout") or "")[:2000] or "(kein stdout)"
|
||||
err = (res.get("stderr") or "")[:500]
|
||||
marker = "OK" if res["ok"] else f"FEHLER (exit={res['exit_code']})"
|
||||
out = f"{marker} · {res['duration_sec']}s\nstdout:\n{snippet}"
|
||||
if err:
|
||||
out += f"\nstderr:\n{err}"
|
||||
return out
|
||||
if name == "trigger_timer":
|
||||
fires_at_iso = arguments.get("fires_at")
|
||||
in_seconds = arguments.get("in_seconds")
|
||||
if not fires_at_iso and in_seconds is not None:
|
||||
from datetime import datetime as _dt, timezone as _tz, timedelta as _td
|
||||
try:
|
||||
secs = int(in_seconds)
|
||||
except (TypeError, ValueError):
|
||||
return "FEHLER: in_seconds muss eine ganze Zahl sein."
|
||||
if secs < 1:
|
||||
return "FEHLER: in_seconds muss >= 1 sein."
|
||||
fires_at_iso = (_dt.now(_tz.utc) + _td(seconds=secs)).isoformat(timespec="seconds")
|
||||
if not fires_at_iso:
|
||||
return "FEHLER: entweder `in_seconds` ODER `fires_at` muss gesetzt sein."
|
||||
t = triggers_mod.create_timer(
|
||||
name=arguments["name"],
|
||||
fires_at_iso=fires_at_iso,
|
||||
message=arguments["message"],
|
||||
author="aria",
|
||||
)
|
||||
self._pending_events.append({
|
||||
"type": "trigger_created",
|
||||
"trigger": {"name": t["name"], "type": "timer",
|
||||
"fires_at": t["fires_at"], "message": t["message"]},
|
||||
})
|
||||
return f"OK — Timer '{t['name']}' angelegt, feuert um {t['fires_at']}."
|
||||
if name == "trigger_watcher":
|
||||
t = triggers_mod.create_watcher(
|
||||
name=arguments["name"],
|
||||
condition=arguments["condition"],
|
||||
message=arguments["message"],
|
||||
check_interval_sec=int(arguments.get("check_interval_sec", 300)),
|
||||
throttle_sec=int(arguments.get("throttle_sec", 3600)),
|
||||
author="aria",
|
||||
)
|
||||
self._pending_events.append({
|
||||
"type": "trigger_created",
|
||||
"trigger": {"name": t["name"], "type": "watcher",
|
||||
"condition": t["condition"], "message": t["message"]},
|
||||
})
|
||||
return f"OK — Watcher '{t['name']}' angelegt: feuert wenn '{t['condition']}'."
|
||||
if name == "trigger_cancel":
|
||||
try:
|
||||
triggers_mod.delete(arguments["name"])
|
||||
return f"OK — Trigger '{arguments['name']}' geloescht."
|
||||
except ValueError as e:
|
||||
return f"FEHLER: {e}"
|
||||
if name == "request_location_tracking":
|
||||
on = bool(arguments.get("on", False))
|
||||
reason = (arguments.get("reason") or "").strip()
|
||||
self._pending_events.append({
|
||||
"type": "location_tracking",
|
||||
"on": on,
|
||||
"reason": reason,
|
||||
})
|
||||
return f"OK — Tracking-Request gesendet (on={on}). App wird in Kuerze umschalten."
|
||||
if name == "trigger_list":
|
||||
items = triggers_mod.list_triggers(active_only=False)
|
||||
if not items:
|
||||
return "(keine Trigger vorhanden)"
|
||||
lines = []
|
||||
for t in items:
|
||||
state = "aktiv" if t.get("active", True) else "DEAKTIVIERT"
|
||||
if t["type"] == "timer":
|
||||
lines.append(f"- {t['name']} (timer, {state}): feuert {t.get('fires_at')} — \"{t.get('message','')[:50]}\"")
|
||||
elif t["type"] == "watcher":
|
||||
lines.append(f"- {t['name']} (watcher, {state}): cond=\"{t.get('condition')}\", throttle={t.get('throttle_sec')}s")
|
||||
else:
|
||||
lines.append(f"- {t['name']} ({t['type']}, {state})")
|
||||
return "\n".join(lines)
|
||||
return f"Unbekanntes Tool: {name}"
|
||||
except Exception as exc:
|
||||
logger.exception("Tool '%s' fehlgeschlagen", name)
|
||||
return f"FEHLER: {exc}"
|
||||
|
||||
# ── Memory-Destillat (laeuft im Hintergrund) ──────────────
|
||||
|
||||
def distill_old_turns(self) -> dict:
|
||||
"""Nimmt die N aeltesten Turns und destilliert sie zu fact-Memories.
|
||||
|
||||
Pattern: separater Claude-Call, lieferte 3-7 JSON-Facts, die als
|
||||
type=fact, source=distilled gespeichert werden. Erfolgreiches
|
||||
Schreiben → Turns aus dem Window entfernen.
|
||||
"""
|
||||
if not self.conversation.needs_distill():
|
||||
return {"distilled": 0, "reason": "kein Bedarf"}
|
||||
|
||||
old_turns = self.conversation.take_oldest_for_distill()
|
||||
if not old_turns:
|
||||
return {"distilled": 0, "reason": "keine alten Turns"}
|
||||
|
||||
# Konversation als Klartext bauen
|
||||
transcript = "\n".join(
|
||||
f"[{t.role.upper()}] {t.content}" for t in old_turns
|
||||
)[:30000] # Cap auf 30k Zeichen damit der Prompt nicht explodiert
|
||||
|
||||
system = (
|
||||
"Du extrahierst aus einer Konversation zwischen Stefan und ARIA die "
|
||||
"wichtigsten dauerhaft relevanten Fakten — keine Smalltalk-Details, "
|
||||
"keine flüchtigen Zustände. Antworte AUSSCHLIESSLICH mit gültigem JSON "
|
||||
"im Format: {\"facts\": [{\"title\": \"kurz, max 80 Zeichen\", "
|
||||
"\"content\": \"1-3 Sätze, konkret und nützlich\"}]}. "
|
||||
"Mindestens 0, höchstens 7 Facts. Wenn nichts wichtig genug ist: leeres Array."
|
||||
)
|
||||
user = (
|
||||
"Hier ist der Konversations-Abschnitt:\n\n"
|
||||
f"{transcript}\n\n"
|
||||
"Extrahiere die wichtigsten Fakten als JSON."
|
||||
)
|
||||
|
||||
try:
|
||||
raw = self.proxy.chat([
|
||||
ProxyMessage(role="system", content=system),
|
||||
ProxyMessage(role="user", content=user),
|
||||
])
|
||||
except Exception as exc:
|
||||
logger.warning("Destillat-Call fehlgeschlagen: %s — Turns bleiben", exc)
|
||||
return {"distilled": 0, "error": str(exc)}
|
||||
|
||||
facts = self._parse_facts(raw)
|
||||
if facts is None:
|
||||
logger.warning("Destillat lieferte unparsbares JSON: %r", raw[:200])
|
||||
return {"distilled": 0, "error": "JSON parse failed", "raw": raw[:200]}
|
||||
|
||||
# Facts in die DB schreiben
|
||||
created = 0
|
||||
for f in facts:
|
||||
content = (f.get("content") or "").strip()
|
||||
if not content:
|
||||
continue
|
||||
title = (f.get("title") or "").strip()[:120] or "Fakt"
|
||||
point = MemoryPoint(
|
||||
id="",
|
||||
type="fact",
|
||||
title=title,
|
||||
content=content,
|
||||
pinned=False,
|
||||
category="konversation",
|
||||
source="distilled",
|
||||
tags=[],
|
||||
)
|
||||
try:
|
||||
vec = self.embedder.embed(content)
|
||||
self.store.upsert(point, vec)
|
||||
created += 1
|
||||
except Exception as exc:
|
||||
logger.warning("Fakt schreiben fehlgeschlagen: %s", exc)
|
||||
|
||||
# Erst nach erfolgreichem Schreiben aus dem Window entfernen
|
||||
last_ts = old_turns[-1].ts
|
||||
self.conversation.commit_distill(last_ts)
|
||||
logger.info("Destillat: %d Facts geschrieben, %d Turns aus Window entfernt",
|
||||
created, len(old_turns))
|
||||
return {"distilled": created, "removed_turns": len(old_turns)}
|
||||
|
||||
@staticmethod
|
||||
def _parse_facts(raw: str) -> Optional[list]:
|
||||
if not raw:
|
||||
return None
|
||||
# JSON robust extrahieren — Claude kann Code-Fences setzen
|
||||
cleaned = raw.strip()
|
||||
if cleaned.startswith("```"):
|
||||
# ```json oder ``` rauswerfen
|
||||
cleaned = cleaned.split("\n", 1)[1] if "\n" in cleaned else cleaned[3:]
|
||||
if cleaned.endswith("```"):
|
||||
cleaned = cleaned[: -3]
|
||||
cleaned = cleaned.strip()
|
||||
# Erstes { bis letztes }
|
||||
start = cleaned.find("{")
|
||||
end = cleaned.rfind("}")
|
||||
if start == -1 or end == -1 or end < start:
|
||||
return None
|
||||
try:
|
||||
obj = json.loads(cleaned[start: end + 1])
|
||||
except Exception:
|
||||
return None
|
||||
facts = obj.get("facts") if isinstance(obj, dict) else None
|
||||
if not isinstance(facts, list):
|
||||
return None
|
||||
return facts
|
||||
@@ -0,0 +1,169 @@
|
||||
"""
|
||||
Background-Loop fuer Triggers.
|
||||
|
||||
Laeuft alle TICK_SEC Sekunden in einem asyncio Task, geht ueber alle
|
||||
active Triggers und entscheidet ob sie feuern muessen.
|
||||
|
||||
Feuern bedeutet:
|
||||
1. Trigger-Manifest update (fire_count++, last_fired_at, ggf. deaktivieren)
|
||||
2. Log-Eintrag schreiben
|
||||
3. agent.chat() mit einem system-Praefix aufrufen (NICHT als 'user'!)
|
||||
→ ARIA bekommt das wie eine Push-Nachricht und kann antworten
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
from typing import Optional
|
||||
|
||||
import triggers as triggers_mod
|
||||
import watcher as watcher_mod
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
TICK_SEC = 30
|
||||
|
||||
|
||||
def _now_iso() -> str:
|
||||
return datetime.now(timezone.utc).isoformat()
|
||||
|
||||
|
||||
def _parse_iso(s: str) -> Optional[datetime]:
|
||||
if not s:
|
||||
return None
|
||||
try:
|
||||
return datetime.fromisoformat(s.replace("Z", "+00:00"))
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def _should_fire(trigger: dict, vars_: dict, now: datetime) -> bool:
|
||||
if not trigger.get("active", True):
|
||||
return False
|
||||
t = trigger.get("type", "")
|
||||
|
||||
if t == "timer":
|
||||
fires_at = _parse_iso(trigger.get("fires_at", ""))
|
||||
if not fires_at:
|
||||
return False
|
||||
if fires_at.tzinfo is None:
|
||||
fires_at = fires_at.replace(tzinfo=timezone.utc)
|
||||
return now >= fires_at
|
||||
|
||||
if t == "watcher":
|
||||
# Check-Interval respektieren (sonst pollen wir zu hektisch)
|
||||
check_interval = int(trigger.get("check_interval_sec", 300))
|
||||
last_checked = _parse_iso(trigger.get("last_checked_at", ""))
|
||||
if last_checked:
|
||||
if last_checked.tzinfo is None:
|
||||
last_checked = last_checked.replace(tzinfo=timezone.utc)
|
||||
if (now - last_checked).total_seconds() < check_interval:
|
||||
return False
|
||||
# Throttle: erst feuern wenn last_fired lange genug her ist
|
||||
last_fired = _parse_iso(trigger.get("last_fired_at", ""))
|
||||
throttle = int(trigger.get("throttle_sec", 3600))
|
||||
if last_fired:
|
||||
if last_fired.tzinfo is None:
|
||||
last_fired = last_fired.replace(tzinfo=timezone.utc)
|
||||
if (now - last_fired).total_seconds() < throttle:
|
||||
return False
|
||||
# Condition pruefen
|
||||
cond = (trigger.get("condition") or "").strip()
|
||||
if not cond:
|
||||
return False
|
||||
try:
|
||||
return watcher_mod.evaluate(cond, vars_)
|
||||
except Exception as e:
|
||||
logger.warning("Trigger %s: Condition '%s' fehlerhaft: %s",
|
||||
trigger.get("name"), cond, e)
|
||||
return False
|
||||
|
||||
if t == "cron":
|
||||
# TODO: später, wenn jemand Bock auf Cron-Parser hat
|
||||
return False
|
||||
|
||||
return False
|
||||
|
||||
|
||||
async def _fire(trigger: dict, agent_factory) -> None:
|
||||
"""Ruft ARIA mit einer System-Praefix-Nachricht auf."""
|
||||
name = trigger.get("name", "?")
|
||||
message = trigger.get("message") or "(ohne Nachricht)"
|
||||
ttype = trigger.get("type", "?")
|
||||
|
||||
# Manifest updaten
|
||||
try:
|
||||
triggers_mod.mark_fired(name)
|
||||
except Exception as e:
|
||||
logger.warning("mark_fired %s: %s", name, e)
|
||||
|
||||
# Log
|
||||
triggers_mod.append_log(name, {"event": "fired", "type": ttype, "message": message})
|
||||
|
||||
# System-Nachricht an ARIA: nicht als User, sondern als Hinweis
|
||||
prompt = (
|
||||
f"[Trigger ausgelöst: '{name}', Typ: {ttype}] "
|
||||
f"Geplante Nachricht: \"{message}\". "
|
||||
f"Sage Stefan jetzt diese Information, in deinem Stil. "
|
||||
f"Wenn der Trigger ein Watcher war (Bedingung wurde erfuellt), "
|
||||
f"erwaehne kurz worum es geht. Antworte direkt, keine Rueckfrage."
|
||||
)
|
||||
|
||||
try:
|
||||
agent = agent_factory()
|
||||
reply = agent.chat(prompt, source="trigger")
|
||||
logger.info("[trigger] %s gefeuert → ARIA-Reply: %s", name, reply[:80])
|
||||
triggers_mod.append_log(name, {"event": "reply", "text": reply[:500]})
|
||||
except Exception as e:
|
||||
logger.exception("Trigger %s feuern fehlgeschlagen: %s", name, e)
|
||||
triggers_mod.append_log(name, {"event": "error", "error": str(e)[:300]})
|
||||
|
||||
|
||||
async def _tick(agent_factory) -> None:
|
||||
"""Ein Pruefdurchlauf. Geht ueber alle Triggers, feuert was zu feuern ist."""
|
||||
try:
|
||||
all_triggers = triggers_mod.list_triggers(active_only=True)
|
||||
except Exception as e:
|
||||
logger.warning("triggers.list: %s", e)
|
||||
return
|
||||
if not all_triggers:
|
||||
return
|
||||
now = datetime.now(timezone.utc)
|
||||
# Variablen einmal pro Tick sammeln (nicht pro Trigger — Disk-Stat ist teuer)
|
||||
try:
|
||||
vars_ = watcher_mod.collect_variables()
|
||||
except Exception as e:
|
||||
logger.warning("collect_variables: %s", e)
|
||||
vars_ = {}
|
||||
|
||||
# Watcher: last_checked_at jetzt updaten (auch wenn nicht gefeuert wird,
|
||||
# damit der Check-Interval respektiert wird)
|
||||
for t in all_triggers:
|
||||
if t.get("type") == "watcher":
|
||||
try:
|
||||
t["last_checked_at"] = _now_iso()
|
||||
triggers_mod.write(t["name"], t)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
for trigger in all_triggers:
|
||||
try:
|
||||
if _should_fire(trigger, vars_, now):
|
||||
# Feuern als eigener Task — wenn ARIA langsam antwortet,
|
||||
# darf der naechste Tick nicht blockieren
|
||||
asyncio.create_task(_fire(trigger, agent_factory))
|
||||
except Exception as e:
|
||||
logger.warning("Trigger-Check %s: %s", trigger.get("name"), e)
|
||||
|
||||
|
||||
async def run_loop(agent_factory) -> None:
|
||||
"""Endlosschleife — wird vom main lifespan gestartet + gestoppt."""
|
||||
logger.info("Trigger-Loop gestartet (TICK_SEC=%d)", TICK_SEC)
|
||||
while True:
|
||||
try:
|
||||
await _tick(agent_factory)
|
||||
except Exception as e:
|
||||
logger.exception("Tick-Fehler: %s", e)
|
||||
await asyncio.sleep(TICK_SEC)
|
||||
@@ -0,0 +1,130 @@
|
||||
"""
|
||||
Conversation-State — ein einziger Rolling-Window-State fuer ARIAs
|
||||
laufendes Gespraech mit Stefan.
|
||||
|
||||
Stefan-Entscheidung: KEINE Sessions, KEIN Multi-Thread. EIN Strang,
|
||||
intern rollend. Was rausfaellt, wird ggf. destilliert und landet
|
||||
als type=fact Memory in der Vector-DB.
|
||||
|
||||
Persistenz: append-only JSONL unter /data/conversation.jsonl.
|
||||
Bei Restart wird die letzte N gelesen (komplett vermeidet Memory-
|
||||
Overhead bei sehr langen Verlaeufen).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from dataclasses import dataclass, field, asdict
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import List, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CONVERSATION_FILE = Path(os.environ.get("CONVERSATION_FILE", "/data/conversation.jsonl"))
|
||||
|
||||
|
||||
@dataclass
|
||||
class Turn:
|
||||
role: str # "user" | "assistant"
|
||||
content: str
|
||||
ts: str = field(default_factory=lambda: datetime.now(timezone.utc).isoformat())
|
||||
source: str = "" # "app" / "diagnostic" / "stt" — optional
|
||||
|
||||
|
||||
class Conversation:
|
||||
"""In-Memory Rolling Window, mit JSONL-Persistenz."""
|
||||
|
||||
def __init__(self, max_window: int = 50, distill_threshold: int = 60,
|
||||
distill_count: int = 30):
|
||||
self.max_window = max_window
|
||||
self.distill_threshold = distill_threshold
|
||||
self.distill_count = distill_count
|
||||
self.turns: List[Turn] = []
|
||||
self._load()
|
||||
|
||||
def _load(self):
|
||||
if not CONVERSATION_FILE.exists():
|
||||
return
|
||||
try:
|
||||
lines = CONVERSATION_FILE.read_text(encoding="utf-8").splitlines()
|
||||
except Exception as exc:
|
||||
logger.warning("Konversation laden fehlgeschlagen: %s", exc)
|
||||
return
|
||||
loaded: List[Turn] = []
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
try:
|
||||
obj = json.loads(line)
|
||||
except Exception:
|
||||
continue
|
||||
if obj.get("op") == "distill":
|
||||
# Marker: bis hierhin wurde alles destilliert
|
||||
drop_until_ts = obj.get("ts", "")
|
||||
if drop_until_ts:
|
||||
loaded = [t for t in loaded if t.ts > drop_until_ts]
|
||||
continue
|
||||
role = obj.get("role")
|
||||
content = obj.get("content")
|
||||
if role in ("user", "assistant") and isinstance(content, str):
|
||||
loaded.append(Turn(role=role, content=content,
|
||||
ts=obj.get("ts", ""),
|
||||
source=obj.get("source", "")))
|
||||
self.turns = loaded
|
||||
logger.info("Konversation geladen: %d Turns aus %s", len(self.turns), CONVERSATION_FILE)
|
||||
|
||||
def _append_to_file(self, record: dict):
|
||||
try:
|
||||
CONVERSATION_FILE.parent.mkdir(parents=True, exist_ok=True)
|
||||
with CONVERSATION_FILE.open("a", encoding="utf-8") as f:
|
||||
f.write(json.dumps(record, ensure_ascii=False) + "\n")
|
||||
except Exception as exc:
|
||||
logger.warning("Konversation persist fehlgeschlagen: %s", exc)
|
||||
|
||||
def add(self, role: str, content: str, source: str = "") -> Turn:
|
||||
t = Turn(role=role, content=content, source=source)
|
||||
self.turns.append(t)
|
||||
self._append_to_file({
|
||||
"ts": t.ts, "role": t.role, "content": t.content, "source": t.source,
|
||||
})
|
||||
return t
|
||||
|
||||
def window(self) -> List[Turn]:
|
||||
"""Die letzten max_window Turns — gehen in den LLM-Prompt."""
|
||||
return self.turns[-self.max_window:]
|
||||
|
||||
def needs_distill(self) -> bool:
|
||||
return len(self.turns) > self.distill_threshold
|
||||
|
||||
def take_oldest_for_distill(self) -> List[Turn]:
|
||||
"""Gibt die N aeltesten Turns zurueck — fuer den Destillat-Call.
|
||||
Entfernt sie NICHT — das macht commit_distill nach erfolgreichem Call."""
|
||||
return self.turns[: self.distill_count]
|
||||
|
||||
def commit_distill(self, last_distilled_ts: str):
|
||||
"""Schreibt einen Distill-Marker, entfernt aus dem In-Memory-Window."""
|
||||
self._append_to_file({"op": "distill", "ts": last_distilled_ts})
|
||||
self.turns = [t for t in self.turns if t.ts > last_distilled_ts]
|
||||
logger.info("Distill commit bei ts=%s — Window jetzt %d Turns", last_distilled_ts, len(self.turns))
|
||||
|
||||
def reset(self):
|
||||
"""Hardes Reset — verwende vorsichtig (Diagnostic-Button)."""
|
||||
try:
|
||||
if CONVERSATION_FILE.exists():
|
||||
CONVERSATION_FILE.unlink()
|
||||
except Exception:
|
||||
pass
|
||||
self.turns = []
|
||||
logger.warning("Konversation komplett zurueckgesetzt")
|
||||
|
||||
def stats(self) -> dict:
|
||||
return {
|
||||
"turns": len(self.turns),
|
||||
"max_window": self.max_window,
|
||||
"distill_threshold": self.distill_threshold,
|
||||
"needs_distill": self.needs_distill(),
|
||||
}
|
||||
@@ -0,0 +1,652 @@
|
||||
"""
|
||||
ARIA Brain — FastAPI-Einstieg.
|
||||
|
||||
Phase B Punkt 1: nur Skeleton.
|
||||
- /health → Liveness
|
||||
- /memory/list → alle Punkte (gefiltert)
|
||||
- /memory/pinned → Hot Memory
|
||||
- /memory/search?q=...&k=5 → semantische Suche
|
||||
- /memory/save → neuen Punkt anlegen
|
||||
- /memory/update/{id} → Punkt aendern (re-embed wenn content geaendert)
|
||||
- /memory/delete/{id} → Punkt loeschen
|
||||
- /memory/stats → Anzahl Punkte pro Type
|
||||
|
||||
/chat (Conversation-Loop) und /skills/* kommen in spaeteren Phasen.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
from typing import List, Optional
|
||||
|
||||
import asyncio
|
||||
from contextlib import asynccontextmanager
|
||||
|
||||
from fastapi import FastAPI, HTTPException, BackgroundTasks, Request
|
||||
from fastapi.responses import Response
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from memory import Embedder, VectorStore, MemoryPoint
|
||||
from conversation import Conversation
|
||||
from proxy_client import ProxyClient
|
||||
from agent import Agent
|
||||
import skills as skills_mod
|
||||
import metrics as metrics_mod
|
||||
import triggers as triggers_mod
|
||||
import watcher as watcher_mod
|
||||
import background as background_mod
|
||||
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(name)s: %(message)s")
|
||||
logger = logging.getLogger("aria-brain")
|
||||
|
||||
QDRANT_HOST = os.environ.get("QDRANT_HOST", "aria-qdrant")
|
||||
QDRANT_PORT = int(os.environ.get("QDRANT_PORT", "6333"))
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
"""Beim Brain-Start: Trigger-Background-Loop anwerfen. Beim Shutdown: stoppen."""
|
||||
task = asyncio.create_task(background_mod.run_loop(agent))
|
||||
logger.info("Lifespan: Trigger-Loop gestartet")
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
task.cancel()
|
||||
try:
|
||||
await task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
logger.info("Lifespan: Trigger-Loop gestoppt")
|
||||
|
||||
|
||||
app = FastAPI(title="ARIA Brain", version="0.1.0", lifespan=lifespan)
|
||||
|
||||
_embedder: Optional[Embedder] = None
|
||||
_store: Optional[VectorStore] = None
|
||||
_conversation: Optional[Conversation] = None
|
||||
_proxy: Optional[ProxyClient] = None
|
||||
_agent: Optional[Agent] = None
|
||||
|
||||
|
||||
def embedder() -> Embedder:
|
||||
global _embedder
|
||||
if _embedder is None:
|
||||
_embedder = Embedder()
|
||||
return _embedder
|
||||
|
||||
|
||||
def store() -> VectorStore:
|
||||
global _store
|
||||
if _store is None:
|
||||
_store = VectorStore(host=QDRANT_HOST, port=QDRANT_PORT)
|
||||
return _store
|
||||
|
||||
|
||||
def conversation() -> Conversation:
|
||||
global _conversation
|
||||
if _conversation is None:
|
||||
_conversation = Conversation()
|
||||
return _conversation
|
||||
|
||||
|
||||
def proxy_client() -> ProxyClient:
|
||||
global _proxy
|
||||
if _proxy is None:
|
||||
_proxy = ProxyClient()
|
||||
return _proxy
|
||||
|
||||
|
||||
def agent() -> Agent:
|
||||
global _agent
|
||||
if _agent is None:
|
||||
_agent = Agent(store(), embedder(), conversation(), proxy_client())
|
||||
return _agent
|
||||
|
||||
|
||||
# ─── Pydantic-Schemas ─────────────────────────────────────────────────
|
||||
|
||||
class MemoryIn(BaseModel):
|
||||
type: str = Field(..., description="identity|rule|preference|tool|skill|fact|conversation|reminder")
|
||||
title: str
|
||||
content: str
|
||||
pinned: bool = False
|
||||
category: str = ""
|
||||
source: str = "manual"
|
||||
tags: List[str] = Field(default_factory=list)
|
||||
conversation_id: Optional[str] = None
|
||||
|
||||
|
||||
class MemoryUpdate(BaseModel):
|
||||
title: Optional[str] = None
|
||||
content: Optional[str] = None
|
||||
pinned: Optional[bool] = None
|
||||
category: Optional[str] = None
|
||||
tags: Optional[List[str]] = None
|
||||
|
||||
|
||||
class MemoryOut(BaseModel):
|
||||
id: str
|
||||
type: str
|
||||
title: str
|
||||
content: str
|
||||
pinned: bool
|
||||
category: str
|
||||
source: str
|
||||
tags: List[str]
|
||||
created_at: str
|
||||
updated_at: str
|
||||
conversation_id: Optional[str] = None
|
||||
score: Optional[float] = None
|
||||
|
||||
@classmethod
|
||||
def from_point(cls, p: MemoryPoint) -> "MemoryOut":
|
||||
return cls(**p.__dict__)
|
||||
|
||||
|
||||
# ─── Health ───────────────────────────────────────────────────────────
|
||||
|
||||
@app.get("/health")
|
||||
def health():
|
||||
try:
|
||||
n = store().count()
|
||||
return {"status": "ok", "memory_count": n, "qdrant": f"{QDRANT_HOST}:{QDRANT_PORT}"}
|
||||
except Exception as exc:
|
||||
return {"status": "degraded", "error": str(exc), "qdrant": f"{QDRANT_HOST}:{QDRANT_PORT}"}
|
||||
|
||||
|
||||
# ─── Memory-Endpoints ─────────────────────────────────────────────────
|
||||
|
||||
@app.get("/memory/stats")
|
||||
def memory_stats():
|
||||
s = store()
|
||||
points = s.list_all()
|
||||
by_type = {}
|
||||
pinned = 0
|
||||
for p in points:
|
||||
by_type[p.type] = by_type.get(p.type, 0) + 1
|
||||
if p.pinned:
|
||||
pinned += 1
|
||||
return {"total": len(points), "pinned": pinned, "by_type": by_type}
|
||||
|
||||
|
||||
@app.get("/memory/list", response_model=List[MemoryOut])
|
||||
def memory_list(type: Optional[str] = None, limit: int = 200):
|
||||
s = store()
|
||||
points = s.list_by_type(type, limit=limit) if type else s.list_all(limit=limit)
|
||||
return [MemoryOut.from_point(p) for p in points]
|
||||
|
||||
|
||||
@app.get("/memory/pinned", response_model=List[MemoryOut])
|
||||
def memory_pinned():
|
||||
return [MemoryOut.from_point(p) for p in store().list_pinned()]
|
||||
|
||||
|
||||
@app.get("/memory/search", response_model=List[MemoryOut])
|
||||
def memory_search(q: str, k: int = 5, type: Optional[str] = None, include_pinned: bool = False):
|
||||
vec = embedder().embed(q)
|
||||
points = store().search(vec, k=k, type_filter=type, exclude_pinned=not include_pinned)
|
||||
return [MemoryOut.from_point(p) for p in points]
|
||||
|
||||
|
||||
@app.post("/memory/save", response_model=MemoryOut)
|
||||
def memory_save(body: MemoryIn):
|
||||
s = store()
|
||||
vec = embedder().embed(body.content)
|
||||
point = MemoryPoint(
|
||||
id="",
|
||||
type=body.type,
|
||||
title=body.title,
|
||||
content=body.content,
|
||||
pinned=body.pinned,
|
||||
category=body.category,
|
||||
source=body.source,
|
||||
tags=body.tags,
|
||||
conversation_id=body.conversation_id,
|
||||
)
|
||||
pid = s.upsert(point, vec)
|
||||
saved = s.get(pid)
|
||||
return MemoryOut.from_point(saved)
|
||||
|
||||
|
||||
@app.patch("/memory/update/{point_id}", response_model=MemoryOut)
|
||||
def memory_update(point_id: str, body: MemoryUpdate):
|
||||
s = store()
|
||||
existing = s.get(point_id)
|
||||
if not existing:
|
||||
raise HTTPException(404, f"Memory {point_id} nicht gefunden")
|
||||
|
||||
content_changed = body.content is not None and body.content != existing.content
|
||||
if body.title is not None:
|
||||
existing.title = body.title
|
||||
if body.content is not None:
|
||||
existing.content = body.content
|
||||
if body.pinned is not None:
|
||||
existing.pinned = body.pinned
|
||||
if body.category is not None:
|
||||
existing.category = body.category
|
||||
if body.tags is not None:
|
||||
existing.tags = body.tags
|
||||
|
||||
vec = embedder().embed(existing.content) if content_changed else None
|
||||
if vec is None:
|
||||
# Vektor unveraendert lassen — nur Payload neu schreiben
|
||||
from qdrant_client.http import models as qm
|
||||
from memory.vector_store import COLLECTION
|
||||
s.client.set_payload(
|
||||
collection_name=COLLECTION,
|
||||
payload=existing.to_payload() | {"updated_at": __import__("datetime").datetime.now(__import__("datetime").timezone.utc).isoformat()},
|
||||
points=[point_id],
|
||||
)
|
||||
saved = s.get(point_id)
|
||||
else:
|
||||
s.upsert(existing, vec)
|
||||
saved = s.get(point_id)
|
||||
return MemoryOut.from_point(saved)
|
||||
|
||||
|
||||
@app.delete("/memory/delete/{point_id}")
|
||||
def memory_delete(point_id: str):
|
||||
s = store()
|
||||
if not s.get(point_id):
|
||||
raise HTTPException(404, f"Memory {point_id} nicht gefunden")
|
||||
s.delete(point_id)
|
||||
return {"deleted": point_id}
|
||||
|
||||
|
||||
# ─── Migration aus brain-import/ ──────────────────────────────────────
|
||||
|
||||
IMPORT_DIR = os.environ.get("IMPORT_DIR", "/import")
|
||||
|
||||
|
||||
@app.post("/memory/migrate")
|
||||
def memory_migrate():
|
||||
"""Liest /import/*.md und schreibt atomare Memory-Punkte in die DB.
|
||||
Idempotent: bei Re-Run werden Punkte mit gleicher migration_key ersetzt."""
|
||||
from pathlib import Path
|
||||
from migration import run_migration
|
||||
s = store()
|
||||
e = embedder()
|
||||
result = run_migration(Path(IMPORT_DIR), s, e)
|
||||
return result
|
||||
|
||||
|
||||
@app.get("/memory/import-files")
|
||||
def memory_import_files():
|
||||
"""Listet was unter /import/ liegt — fuer die Diagnostic-UI."""
|
||||
from pathlib import Path
|
||||
d = Path(IMPORT_DIR)
|
||||
if not d.exists():
|
||||
return {"import_dir": str(d), "exists": False, "files": []}
|
||||
out = []
|
||||
for p in sorted(d.iterdir()):
|
||||
if p.is_file():
|
||||
try:
|
||||
out.append({"name": p.name, "size": p.stat().st_size})
|
||||
except Exception:
|
||||
pass
|
||||
return {"import_dir": str(d), "exists": True, "files": out}
|
||||
|
||||
|
||||
# ─── Bootstrap-Snapshot ───────────────────────────────────────────────
|
||||
# "Bootstrap" = alle pinned Memories. Export/Import zum schnellen
|
||||
# Wiederherstellen einer schlanken ARIA nach Wipe.
|
||||
|
||||
@app.get("/memory/export-bootstrap")
|
||||
def memory_export_bootstrap():
|
||||
"""Gibt alle pinned Memories als JSON zurueck — fuer Browser-Download."""
|
||||
s = store()
|
||||
pinned = s.list_pinned()
|
||||
return {
|
||||
"version": 1,
|
||||
"exported_at": __import__("datetime").datetime.now(
|
||||
__import__("datetime").timezone.utc
|
||||
).isoformat(),
|
||||
"count": len(pinned),
|
||||
"memories": [
|
||||
{
|
||||
"type": p.type,
|
||||
"title": p.title,
|
||||
"content": p.content,
|
||||
"pinned": True,
|
||||
"category": p.category,
|
||||
"source": p.source,
|
||||
"tags": p.tags,
|
||||
}
|
||||
for p in pinned
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
class BootstrapBundle(BaseModel):
|
||||
version: int = 1
|
||||
memories: List[dict]
|
||||
|
||||
|
||||
@app.post("/memory/import-bootstrap")
|
||||
def memory_import_bootstrap(body: BootstrapBundle):
|
||||
"""Loescht alle pinned Memories und importiert die im Bundle.
|
||||
Cold Memory (unpinned) bleibt unangetastet.
|
||||
|
||||
Wenn keine Memories im Bundle: nur loeschen ist NICHT erlaubt — der
|
||||
Caller soll erst exportieren und dann importieren.
|
||||
"""
|
||||
if not body.memories:
|
||||
raise HTTPException(400, "Bundle hat keine memories — Abbruch zur Sicherheit")
|
||||
|
||||
s = store()
|
||||
e = embedder()
|
||||
|
||||
# Alle aktuell pinned Punkte loeschen
|
||||
from qdrant_client.http import models as qm
|
||||
from memory.vector_store import COLLECTION
|
||||
s.client.delete(
|
||||
collection_name=COLLECTION,
|
||||
points_selector=qm.FilterSelector(filter=qm.Filter(must=[
|
||||
qm.FieldCondition(key="pinned", match=qm.MatchValue(value=True))
|
||||
])),
|
||||
)
|
||||
|
||||
# Neue Punkte einspeisen
|
||||
created = 0
|
||||
for m in body.memories:
|
||||
content = (m.get("content") or "").strip()
|
||||
if not content:
|
||||
continue
|
||||
point = MemoryPoint(
|
||||
id="",
|
||||
type=m.get("type", "fact"),
|
||||
title=m.get("title", "(ohne Titel)"),
|
||||
content=content,
|
||||
pinned=True,
|
||||
category=m.get("category", ""),
|
||||
source=m.get("source", "bootstrap-import"),
|
||||
tags=list(m.get("tags", [])),
|
||||
)
|
||||
vec = e.embed(content)
|
||||
s.upsert(point, vec)
|
||||
created += 1
|
||||
|
||||
return {"created": created, "deleted_previous_pinned": True}
|
||||
|
||||
|
||||
# ─── Conversation-Loop ──────────────────────────────────────────────
|
||||
|
||||
class ChatIn(BaseModel):
|
||||
message: str
|
||||
source: str = "" # "app" / "diagnostic" / "stt" — optional
|
||||
|
||||
|
||||
class ChatOut(BaseModel):
|
||||
reply: str
|
||||
turns: int
|
||||
distilling: bool
|
||||
events: list = Field(default_factory=list)
|
||||
|
||||
|
||||
@app.post("/chat", response_model=ChatOut)
|
||||
def chat(body: ChatIn, background: BackgroundTasks):
|
||||
"""Hauptpfad. Antwort kommt synchron. Memory-Destillat laeuft
|
||||
im Hintergrund nachdem die Response rausging."""
|
||||
a = agent()
|
||||
try:
|
||||
reply = a.chat(body.message, source=body.source)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(400, str(exc))
|
||||
except RuntimeError as exc:
|
||||
logger.error("chat fehlgeschlagen: %s", exc)
|
||||
raise HTTPException(502, str(exc))
|
||||
|
||||
needs_distill = a.conversation.needs_distill()
|
||||
if needs_distill:
|
||||
background.add_task(a.distill_old_turns)
|
||||
return ChatOut(
|
||||
reply=reply,
|
||||
turns=len(a.conversation.turns),
|
||||
distilling=needs_distill,
|
||||
events=a.pop_events(),
|
||||
)
|
||||
|
||||
|
||||
@app.get("/conversation/stats")
|
||||
def conversation_stats():
|
||||
return conversation().stats()
|
||||
|
||||
|
||||
@app.post("/conversation/reset")
|
||||
def conversation_reset():
|
||||
"""Hardes Reset — der Rolling-Window-Verlauf wird komplett geleert.
|
||||
Destillierte facts bleiben in der DB."""
|
||||
conversation().reset()
|
||||
return {"ok": True, "turns": 0}
|
||||
|
||||
|
||||
@app.post("/conversation/distill")
|
||||
def conversation_distill_now():
|
||||
"""Manueller Trigger fuer Destillat — fuer Tests oder vor einem
|
||||
bewussten Reset."""
|
||||
return agent().distill_old_turns()
|
||||
|
||||
|
||||
# ─── Call-Metrics (Token / Quota-Monitoring) ────────────────────────
|
||||
|
||||
@app.get("/metrics/calls")
|
||||
def metrics_calls():
|
||||
"""Liefert Aggregate fuer 1h / 5h / 24h / 30d.
|
||||
Jedes Window: {window_seconds, calls, tokens_in, tokens_out, by_model}."""
|
||||
return metrics_mod.stats()
|
||||
|
||||
|
||||
# ─── Triggers (passive Aufweck-Quellen) ─────────────────────────────
|
||||
|
||||
class TriggerTimerBody(BaseModel):
|
||||
name: str
|
||||
fires_at: str # ISO timestamp
|
||||
message: str
|
||||
author: str = "stefan"
|
||||
|
||||
|
||||
class TriggerWatcherBody(BaseModel):
|
||||
name: str
|
||||
condition: str
|
||||
message: str
|
||||
check_interval_sec: int = 300
|
||||
throttle_sec: int = 3600
|
||||
author: str = "stefan"
|
||||
|
||||
|
||||
class TriggerPatch(BaseModel):
|
||||
active: bool | None = None
|
||||
message: str | None = None
|
||||
condition: str | None = None
|
||||
throttle_sec: int | None = None
|
||||
check_interval_sec: int | None = None
|
||||
fires_at: str | None = None
|
||||
|
||||
|
||||
@app.get("/triggers/list")
|
||||
def triggers_list(active_only: bool = False):
|
||||
return {"triggers": triggers_mod.list_triggers(active_only=active_only)}
|
||||
|
||||
|
||||
@app.get("/triggers/conditions")
|
||||
def triggers_conditions():
|
||||
"""Verfuegbare Variablen + Funktionen fuer Watcher-Conditions
|
||||
(mit aktuellen Werten)."""
|
||||
current = watcher_mod.collect_variables()
|
||||
# near() ist ein callable in vars_ — fuer die UI rausfiltern
|
||||
serializable = {k: v for k, v in current.items() if not callable(v)}
|
||||
return {
|
||||
"variables": watcher_mod.describe_variables(),
|
||||
"functions": watcher_mod.describe_functions(),
|
||||
"current": serializable,
|
||||
}
|
||||
|
||||
|
||||
@app.get("/triggers/{name}")
|
||||
def triggers_get(name: str):
|
||||
t = triggers_mod.read(name)
|
||||
if t is None:
|
||||
raise HTTPException(404, f"Trigger '{name}' nicht gefunden")
|
||||
return t
|
||||
|
||||
|
||||
@app.get("/triggers/{name}/logs")
|
||||
def triggers_get_logs(name: str, limit: int = 50):
|
||||
return {"logs": triggers_mod.list_logs(name, limit=limit)}
|
||||
|
||||
|
||||
@app.post("/triggers/timer")
|
||||
def triggers_create_timer(body: TriggerTimerBody):
|
||||
try:
|
||||
return triggers_mod.create_timer(
|
||||
name=body.name, fires_at_iso=body.fires_at,
|
||||
message=body.message, author=body.author,
|
||||
)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(400, str(exc))
|
||||
|
||||
|
||||
@app.post("/triggers/watcher")
|
||||
def triggers_create_watcher(body: TriggerWatcherBody):
|
||||
try:
|
||||
return triggers_mod.create_watcher(
|
||||
name=body.name, condition=body.condition,
|
||||
message=body.message,
|
||||
check_interval_sec=body.check_interval_sec,
|
||||
throttle_sec=body.throttle_sec,
|
||||
author=body.author,
|
||||
)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(400, str(exc))
|
||||
|
||||
|
||||
@app.patch("/triggers/{name}")
|
||||
def triggers_patch(name: str, body: TriggerPatch):
|
||||
patch = {k: v for k, v in body.model_dump().items() if v is not None}
|
||||
try:
|
||||
return triggers_mod.update(name, patch)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(404, str(exc))
|
||||
|
||||
|
||||
@app.delete("/triggers/{name}")
|
||||
def triggers_delete(name: str):
|
||||
try:
|
||||
triggers_mod.delete(name)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(404, str(exc))
|
||||
return {"deleted": name}
|
||||
|
||||
|
||||
# ─── Skills ─────────────────────────────────────────────────────────
|
||||
|
||||
class SkillCreate(BaseModel):
|
||||
name: str
|
||||
description: str
|
||||
execution: str # local-venv | local-bin | bash
|
||||
entry_code: str
|
||||
readme: str = ""
|
||||
args: list = Field(default_factory=list)
|
||||
requires: dict = Field(default_factory=dict)
|
||||
pip_packages: list = Field(default_factory=list)
|
||||
author: str = "stefan"
|
||||
|
||||
|
||||
class SkillRun(BaseModel):
|
||||
name: str
|
||||
args: dict = Field(default_factory=dict)
|
||||
timeout_sec: int = 300
|
||||
|
||||
|
||||
class SkillPatch(BaseModel):
|
||||
description: str | None = None
|
||||
active: bool | None = None
|
||||
args: list | None = None
|
||||
|
||||
|
||||
@app.get("/skills/list")
|
||||
def skills_list(active_only: bool = False):
|
||||
return {"skills": skills_mod.list_skills(active_only=active_only)}
|
||||
|
||||
|
||||
@app.get("/skills/{name}")
|
||||
def skills_get(name: str):
|
||||
m = skills_mod.read_manifest(name)
|
||||
if m is None:
|
||||
raise HTTPException(404, f"Skill '{name}' nicht gefunden")
|
||||
readme = skills_mod.read_readme(name)
|
||||
return {"manifest": m, "readme": readme}
|
||||
|
||||
|
||||
@app.post("/skills/create")
|
||||
def skills_create(body: SkillCreate):
|
||||
try:
|
||||
return skills_mod.create_skill(
|
||||
name=body.name,
|
||||
description=body.description,
|
||||
execution=body.execution,
|
||||
entry_code=body.entry_code,
|
||||
readme=body.readme,
|
||||
args=body.args,
|
||||
requires=body.requires,
|
||||
pip_packages=body.pip_packages,
|
||||
author=body.author,
|
||||
)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(400, str(exc))
|
||||
|
||||
|
||||
@app.post("/skills/run")
|
||||
def skills_run(body: SkillRun):
|
||||
try:
|
||||
return skills_mod.run_skill(body.name, args=body.args, timeout_sec=body.timeout_sec)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(400, str(exc))
|
||||
|
||||
|
||||
@app.patch("/skills/{name}")
|
||||
def skills_patch(name: str, body: SkillPatch):
|
||||
patch = {k: v for k, v in body.model_dump().items() if v is not None}
|
||||
try:
|
||||
return skills_mod.update_skill(name, patch)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(404, str(exc))
|
||||
|
||||
|
||||
@app.delete("/skills/{name}")
|
||||
def skills_delete(name: str):
|
||||
try:
|
||||
skills_mod.delete_skill(name)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(404, str(exc))
|
||||
return {"deleted": name}
|
||||
|
||||
|
||||
@app.get("/skills/{name}/logs")
|
||||
def skills_logs(name: str, limit: int = 50):
|
||||
return {"logs": skills_mod.list_logs(name, limit=limit)}
|
||||
|
||||
|
||||
@app.get("/skills/{name}/export")
|
||||
def skills_export(name: str):
|
||||
try:
|
||||
data = skills_mod.export_skill(name)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(404, str(exc))
|
||||
return Response(
|
||||
content=data,
|
||||
media_type="application/gzip",
|
||||
headers={"Content-Disposition": f'attachment; filename="skill-{name}.tar.gz"'},
|
||||
)
|
||||
|
||||
|
||||
@app.post("/skills/import")
|
||||
async def skills_import(request: Request, overwrite: bool = False):
|
||||
data = await request.body()
|
||||
if not data:
|
||||
raise HTTPException(400, "Leerer Body")
|
||||
try:
|
||||
manifest = skills_mod.import_skill(data, overwrite=overwrite)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(400, str(exc))
|
||||
return {"imported": manifest}
|
||||
@@ -0,0 +1,4 @@
|
||||
from .embedder import Embedder
|
||||
from .vector_store import VectorStore, MemoryPoint, MemoryType
|
||||
|
||||
__all__ = ["Embedder", "VectorStore", "MemoryPoint", "MemoryType"]
|
||||
@@ -0,0 +1,42 @@
|
||||
"""
|
||||
Lokaler Embedder fuer Memory-Texte.
|
||||
|
||||
Nutzt sentence-transformers (paraphrase-multilingual-MiniLM-L12-v2):
|
||||
- Deutsch + Englisch
|
||||
- 384-dimensionale Vektoren
|
||||
- Laeuft auf CPU, ~30ms pro kurzer Text
|
||||
- Modell wird beim ersten Aufruf in /data/_models gecached
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import List
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
MODEL_NAME = "paraphrase-multilingual-MiniLM-L12-v2"
|
||||
VECTOR_DIM = 384
|
||||
|
||||
|
||||
class Embedder:
|
||||
def __init__(self, model_name: str = MODEL_NAME):
|
||||
self.model_name = model_name
|
||||
self._model = None
|
||||
|
||||
def _load(self):
|
||||
if self._model is None:
|
||||
logger.info("Lade Embedding-Modell %s ...", self.model_name)
|
||||
from sentence_transformers import SentenceTransformer
|
||||
self._model = SentenceTransformer(self.model_name)
|
||||
logger.info("Embedding-Modell geladen.")
|
||||
|
||||
def embed(self, text: str) -> List[float]:
|
||||
self._load()
|
||||
vec = self._model.encode(text, convert_to_numpy=True, normalize_embeddings=True)
|
||||
return vec.tolist()
|
||||
|
||||
def embed_batch(self, texts: List[str]) -> List[List[float]]:
|
||||
self._load()
|
||||
vecs = self._model.encode(texts, convert_to_numpy=True, normalize_embeddings=True)
|
||||
return vecs.tolist()
|
||||
@@ -0,0 +1,209 @@
|
||||
"""
|
||||
Vector-Store-Wrapper um Qdrant.
|
||||
|
||||
Eine Collection "aria_memory" haelt ALLE Memory-Punkte.
|
||||
Trennung nach Type/Pinned-Status via Payload-Filter.
|
||||
|
||||
Punkt-Schema (Payload):
|
||||
type — identity | rule | preference | tool | skill | fact | conversation | reminder
|
||||
category — frei, fuer UI-Gruppierung
|
||||
title — kurze Ueberschrift
|
||||
content — eigentlicher Text (wird embedded)
|
||||
pinned — bool, True = Hot Memory (immer in Prompt)
|
||||
source — import | conversation | manual
|
||||
tags — Liste von Strings
|
||||
created_at, updated_at — ISO-Strings
|
||||
conversation_id — optional, nur fuer type=conversation
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import uuid
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from enum import Enum
|
||||
from typing import List, Optional
|
||||
|
||||
from qdrant_client import QdrantClient
|
||||
from qdrant_client.http import models as qm
|
||||
|
||||
from .embedder import VECTOR_DIM
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
COLLECTION = "aria_memory"
|
||||
|
||||
|
||||
class MemoryType(str, Enum):
|
||||
IDENTITY = "identity"
|
||||
RULE = "rule"
|
||||
PREFERENCE = "preference"
|
||||
TOOL = "tool"
|
||||
SKILL = "skill"
|
||||
FACT = "fact"
|
||||
CONVERSATION = "conversation"
|
||||
REMINDER = "reminder"
|
||||
|
||||
|
||||
@dataclass
|
||||
class MemoryPoint:
|
||||
id: str
|
||||
type: str
|
||||
title: str
|
||||
content: str
|
||||
pinned: bool = False
|
||||
category: str = ""
|
||||
source: str = "manual"
|
||||
tags: List[str] = field(default_factory=list)
|
||||
created_at: str = ""
|
||||
updated_at: str = ""
|
||||
conversation_id: Optional[str] = None
|
||||
score: Optional[float] = None # nur bei Search gesetzt
|
||||
|
||||
def to_payload(self) -> dict:
|
||||
p = {
|
||||
"type": self.type,
|
||||
"title": self.title,
|
||||
"content": self.content,
|
||||
"pinned": self.pinned,
|
||||
"category": self.category,
|
||||
"source": self.source,
|
||||
"tags": self.tags,
|
||||
"created_at": self.created_at,
|
||||
"updated_at": self.updated_at,
|
||||
}
|
||||
if self.conversation_id:
|
||||
p["conversation_id"] = self.conversation_id
|
||||
return p
|
||||
|
||||
@classmethod
|
||||
def from_qdrant(cls, point) -> "MemoryPoint":
|
||||
payload = point.payload or {}
|
||||
return cls(
|
||||
id=str(point.id),
|
||||
type=payload.get("type", "fact"),
|
||||
title=payload.get("title", ""),
|
||||
content=payload.get("content", ""),
|
||||
pinned=payload.get("pinned", False),
|
||||
category=payload.get("category", ""),
|
||||
source=payload.get("source", "manual"),
|
||||
tags=payload.get("tags", []),
|
||||
created_at=payload.get("created_at", ""),
|
||||
updated_at=payload.get("updated_at", ""),
|
||||
conversation_id=payload.get("conversation_id"),
|
||||
score=getattr(point, "score", None),
|
||||
)
|
||||
|
||||
|
||||
def _now() -> str:
|
||||
return datetime.now(timezone.utc).isoformat()
|
||||
|
||||
|
||||
class VectorStore:
|
||||
def __init__(self, host: str, port: int = 6333):
|
||||
self.client = QdrantClient(host=host, port=port)
|
||||
self._ensure_collection()
|
||||
|
||||
def _ensure_collection(self):
|
||||
existing = [c.name for c in self.client.get_collections().collections]
|
||||
if COLLECTION not in existing:
|
||||
logger.info("Erstelle Collection %s ...", COLLECTION)
|
||||
self.client.create_collection(
|
||||
collection_name=COLLECTION,
|
||||
vectors_config=qm.VectorParams(size=VECTOR_DIM, distance=qm.Distance.COSINE),
|
||||
)
|
||||
# Indexe fuer typische Filter-Felder
|
||||
for field_name in ("type", "pinned", "category", "source", "migration_key"):
|
||||
self.client.create_payload_index(
|
||||
collection_name=COLLECTION,
|
||||
field_name=field_name,
|
||||
field_schema=qm.PayloadSchemaType.KEYWORD if field_name != "pinned"
|
||||
else qm.PayloadSchemaType.BOOL,
|
||||
)
|
||||
|
||||
# ─── Schreib-Operationen ─────────────────────────────────────────
|
||||
|
||||
def upsert(self, point: MemoryPoint, vector: List[float]) -> str:
|
||||
if not point.id:
|
||||
point.id = str(uuid.uuid4())
|
||||
if not point.created_at:
|
||||
point.created_at = _now()
|
||||
point.updated_at = _now()
|
||||
|
||||
self.client.upsert(
|
||||
collection_name=COLLECTION,
|
||||
points=[qm.PointStruct(id=point.id, vector=vector, payload=point.to_payload())],
|
||||
)
|
||||
return point.id
|
||||
|
||||
def delete(self, point_id: str):
|
||||
self.client.delete(
|
||||
collection_name=COLLECTION,
|
||||
points_selector=qm.PointIdsList(points=[point_id]),
|
||||
)
|
||||
|
||||
# ─── Lese-Operationen ────────────────────────────────────────────
|
||||
|
||||
def get(self, point_id: str) -> Optional[MemoryPoint]:
|
||||
result = self.client.retrieve(collection_name=COLLECTION, ids=[point_id], with_payload=True)
|
||||
if not result:
|
||||
return None
|
||||
return MemoryPoint.from_qdrant(result[0])
|
||||
|
||||
def list_pinned(self) -> List[MemoryPoint]:
|
||||
"""Alle pinned Punkte — Hot Memory."""
|
||||
return self._scroll(filter=qm.Filter(must=[
|
||||
qm.FieldCondition(key="pinned", match=qm.MatchValue(value=True))
|
||||
]))
|
||||
|
||||
def list_by_type(self, type_: str, limit: int = 100) -> List[MemoryPoint]:
|
||||
return self._scroll(
|
||||
filter=qm.Filter(must=[
|
||||
qm.FieldCondition(key="type", match=qm.MatchValue(value=type_))
|
||||
]),
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
def list_all(self, limit: int = 1000) -> List[MemoryPoint]:
|
||||
return self._scroll(filter=None, limit=limit)
|
||||
|
||||
def _scroll(self, filter, limit: int = 1000) -> List[MemoryPoint]:
|
||||
points, _ = self.client.scroll(
|
||||
collection_name=COLLECTION,
|
||||
scroll_filter=filter,
|
||||
limit=limit,
|
||||
with_payload=True,
|
||||
with_vectors=False,
|
||||
)
|
||||
return [MemoryPoint.from_qdrant(p) for p in points]
|
||||
|
||||
def search(
|
||||
self,
|
||||
query_vector: List[float],
|
||||
k: int = 5,
|
||||
type_filter: Optional[str] = None,
|
||||
exclude_pinned: bool = True,
|
||||
) -> List[MemoryPoint]:
|
||||
"""Semantische Search. Standard: pinned-Punkte ausgeschlossen
|
||||
(die kommen separat via list_pinned in den Prompt)."""
|
||||
must = []
|
||||
must_not = []
|
||||
if type_filter:
|
||||
must.append(qm.FieldCondition(key="type", match=qm.MatchValue(value=type_filter)))
|
||||
if exclude_pinned:
|
||||
must_not.append(qm.FieldCondition(key="pinned", match=qm.MatchValue(value=True)))
|
||||
|
||||
flt = qm.Filter(must=must or None, must_not=must_not or None)
|
||||
|
||||
results = self.client.search(
|
||||
collection_name=COLLECTION,
|
||||
query_vector=query_vector,
|
||||
query_filter=flt if (must or must_not) else None,
|
||||
limit=k,
|
||||
with_payload=True,
|
||||
)
|
||||
return [MemoryPoint.from_qdrant(p) for p in results]
|
||||
|
||||
def count(self) -> int:
|
||||
return self.client.count(collection_name=COLLECTION, exact=True).count
|
||||
@@ -0,0 +1,133 @@
|
||||
"""
|
||||
Call-Metrics fuer den Proxy-Client.
|
||||
|
||||
Pro Claude-Call wird ein Eintrag in /data/metrics.jsonl angehaengt:
|
||||
|
||||
{"ts": <ms>, "model": "...", "in": <tokens_in_estimate>, "out": <tokens_out_estimate>}
|
||||
|
||||
Tokens-Schaetzung: characters / 4 (Anthropic-Default-Heuristik). Nicht exakt
|
||||
aber gut genug fuer Quota-Monitoring. Wir summieren nicht in-memory weil
|
||||
der Brain-Container neugestartet werden kann — alles auf Disk.
|
||||
|
||||
Auswertung via aggregate(window_seconds) — liefert {calls, tokens_in, tokens_out}
|
||||
fuer die letzten N Sekunden. Lazy gelesen, keine grossen Datenmengen erwartet
|
||||
(bei 1000 Calls/Tag ~70 KB pro Monat).
|
||||
|
||||
Auto-Rotate: bei > 50k Zeilen werden die aeltesten 25k weggeschnitten.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
METRICS_FILE = Path(os.environ.get("METRICS_FILE", "/data/metrics.jsonl"))
|
||||
ROTATE_AT = 50_000
|
||||
ROTATE_KEEP = 25_000
|
||||
|
||||
|
||||
def _estimate_tokens(text: str) -> int:
|
||||
"""Anthropic-Default: ~4 chars pro Token. Grob genug."""
|
||||
if not text:
|
||||
return 0
|
||||
return max(1, len(text) // 4)
|
||||
|
||||
|
||||
def _messages_tokens(messages: list) -> int:
|
||||
total = 0
|
||||
for m in messages:
|
||||
# Pydantic-Model oder dict
|
||||
if hasattr(m, "content"):
|
||||
total += _estimate_tokens(m.content or "")
|
||||
elif isinstance(m, dict):
|
||||
c = m.get("content") or ""
|
||||
if isinstance(c, str):
|
||||
total += _estimate_tokens(c)
|
||||
return total
|
||||
|
||||
|
||||
def log_call(model: str, messages_in: list, reply_text: str = "") -> None:
|
||||
"""Eine Call-Metric anhaengen. Robust gegen Fehler (silent fail)."""
|
||||
try:
|
||||
tokens_in = _messages_tokens(messages_in)
|
||||
tokens_out = _estimate_tokens(reply_text)
|
||||
line = json.dumps({
|
||||
"ts": int(time.time() * 1000),
|
||||
"model": model,
|
||||
"in": tokens_in,
|
||||
"out": tokens_out,
|
||||
})
|
||||
METRICS_FILE.parent.mkdir(parents=True, exist_ok=True)
|
||||
with METRICS_FILE.open("a", encoding="utf-8") as f:
|
||||
f.write(line + "\n")
|
||||
# Sanftes Rotate ohne hohe IO-Kosten — nur alle 1000 Calls checken
|
||||
if (tokens_in + tokens_out) % 1000 < 4:
|
||||
_maybe_rotate()
|
||||
except Exception as exc:
|
||||
logger.warning("metrics.log_call: %s", exc)
|
||||
|
||||
|
||||
def _maybe_rotate() -> None:
|
||||
try:
|
||||
if not METRICS_FILE.exists():
|
||||
return
|
||||
with METRICS_FILE.open("r", encoding="utf-8") as f:
|
||||
lines = f.readlines()
|
||||
if len(lines) > ROTATE_AT:
|
||||
keep = lines[-ROTATE_KEEP:]
|
||||
METRICS_FILE.write_text("".join(keep), encoding="utf-8")
|
||||
logger.info("metrics rotated: %d → %d Zeilen", len(lines), len(keep))
|
||||
except Exception as exc:
|
||||
logger.warning("metrics rotate: %s", exc)
|
||||
|
||||
|
||||
def aggregate(window_seconds: int) -> dict:
|
||||
"""Aggregiert die Calls der letzten N Sekunden."""
|
||||
now_ms = int(time.time() * 1000)
|
||||
cutoff_ms = now_ms - (window_seconds * 1000)
|
||||
calls = 0
|
||||
tokens_in = 0
|
||||
tokens_out = 0
|
||||
by_model: dict[str, int] = {}
|
||||
if METRICS_FILE.exists():
|
||||
try:
|
||||
for raw in METRICS_FILE.read_text(encoding="utf-8").splitlines():
|
||||
raw = raw.strip()
|
||||
if not raw:
|
||||
continue
|
||||
try:
|
||||
obj = json.loads(raw)
|
||||
except Exception:
|
||||
continue
|
||||
if obj.get("ts", 0) < cutoff_ms:
|
||||
continue
|
||||
calls += 1
|
||||
tokens_in += int(obj.get("in") or 0)
|
||||
tokens_out += int(obj.get("out") or 0)
|
||||
m = obj.get("model", "?")
|
||||
by_model[m] = by_model.get(m, 0) + 1
|
||||
except Exception as exc:
|
||||
logger.warning("metrics aggregate: %s", exc)
|
||||
return {
|
||||
"window_seconds": window_seconds,
|
||||
"calls": calls,
|
||||
"tokens_in": tokens_in,
|
||||
"tokens_out": tokens_out,
|
||||
"by_model": by_model,
|
||||
}
|
||||
|
||||
|
||||
def stats() -> dict:
|
||||
"""Komplett-Snapshot mit den drei wichtigsten Fenstern."""
|
||||
return {
|
||||
"h1": aggregate(3600),
|
||||
"h5": aggregate(5 * 3600),
|
||||
"h24": aggregate(24 * 3600),
|
||||
"d30": aggregate(30 * 24 * 3600),
|
||||
}
|
||||
@@ -0,0 +1,399 @@
|
||||
"""
|
||||
Migration aus aria-data/brain-import/ → Vector-DB.
|
||||
|
||||
Parst die mitgelieferten Markdown-Dateien (AGENT.md, USER.md, TOOLING.md)
|
||||
und zerlegt sie in atomare Memory-Punkte. Jeder Punkt bekommt:
|
||||
|
||||
source = "import"
|
||||
migration_key = stabiler Identifier (z.B. "agent.md/rule-1") fuer Idempotenz
|
||||
pinned = True
|
||||
|
||||
Beim Re-Run werden vorhandene Punkte mit gleicher migration_key entfernt
|
||||
und neu geschrieben.
|
||||
|
||||
Mapping pro Datei:
|
||||
|
||||
AGENT.md
|
||||
"Identitaet" → 1 Punkt type=identity
|
||||
"Persoenlichkeit" (Intro) → 1 Punkt type=identity
|
||||
"Kern-Eigenschaften" (Liste) → 1 Punkt pro Bullet type=identity
|
||||
"Tool-Freigaben" → 1 Punkt type=tool
|
||||
"Sicherheitsregeln" (Liste) → 1 Punkt pro Bullet type=rule
|
||||
"Arbeitsprinzipien" (Liste) → 1 Punkt pro Bullet type=rule
|
||||
"Dateien an Stefan zurueckgeben"→ 1 Punkt type=skill
|
||||
"Stimme" → 1 Punkt type=tool
|
||||
|
||||
USER.md
|
||||
"Allgemein" (Liste) → 1 Punkt pro Bullet type=preference
|
||||
"Bestaetigung erforderlich" → 1 Punkt type=preference
|
||||
"Autonomes Arbeiten OK fuer" → 1 Punkt type=preference
|
||||
"Tools & Infrastruktur" → 1 Punkt type=preference
|
||||
|
||||
TOOLING.md
|
||||
gesamter Inhalt → 1 Punkt type=tool, title="Tooling-Stack"
|
||||
|
||||
BOOTSTRAP.md ist eine Variante von AGENT.md — wird (vorerst) ignoriert
|
||||
damit keine doppelten Punkte landen.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import List, Optional
|
||||
|
||||
from memory import Embedder, VectorStore, MemoryPoint
|
||||
from memory.vector_store import COLLECTION
|
||||
from qdrant_client.http import models as qm
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class _Block:
|
||||
title: str
|
||||
content: str
|
||||
|
||||
|
||||
def _split_h2(md: str) -> List[_Block]:
|
||||
"""Zerlegt Markdown in H2-Bloecke. Inhalt vor dem ersten H2 wird verworfen."""
|
||||
blocks: List[_Block] = []
|
||||
current: Optional[_Block] = None
|
||||
for line in md.splitlines():
|
||||
m = re.match(r"^##\s+(.+?)\s*$", line)
|
||||
if m and not line.startswith("### "):
|
||||
if current:
|
||||
blocks.append(current)
|
||||
current = _Block(title=m.group(1).strip(), content="")
|
||||
continue
|
||||
if current is not None:
|
||||
current.content += line + "\n"
|
||||
if current:
|
||||
blocks.append(current)
|
||||
return blocks
|
||||
|
||||
|
||||
def _split_h3(content: str) -> List[_Block]:
|
||||
"""Zerlegt einen H2-Block in H3-Untersektionen + 'header'-Block davor."""
|
||||
blocks: List[_Block] = []
|
||||
header_lines: List[str] = []
|
||||
current: Optional[_Block] = None
|
||||
for line in content.splitlines():
|
||||
m = re.match(r"^###\s+(.+?)\s*$", line)
|
||||
if m:
|
||||
if current is None and header_lines:
|
||||
blocks.append(_Block(title="_intro", content="\n".join(header_lines).strip()))
|
||||
if current:
|
||||
blocks.append(current)
|
||||
current = _Block(title=m.group(1).strip(), content="")
|
||||
continue
|
||||
if current is None:
|
||||
header_lines.append(line)
|
||||
else:
|
||||
current.content += line + "\n"
|
||||
if current:
|
||||
blocks.append(current)
|
||||
elif header_lines:
|
||||
blocks.append(_Block(title="_intro", content="\n".join(header_lines).strip()))
|
||||
return blocks
|
||||
|
||||
|
||||
def _extract_bullets(content: str) -> List[tuple[str, str]]:
|
||||
"""Findet "- **Title** — Body" oder "N. **Title** — Body" Bullets.
|
||||
|
||||
Returns: Liste von (title, full_bullet_text).
|
||||
"""
|
||||
bullets: List[tuple[str, str]] = []
|
||||
current_lines: List[str] = []
|
||||
current_title: Optional[str] = None
|
||||
|
||||
def flush():
|
||||
if current_title and current_lines:
|
||||
bullets.append((current_title, "\n".join(current_lines).strip()))
|
||||
|
||||
for line in content.splitlines():
|
||||
m = re.match(r"^\s*(?:[-*]|\d+\.)\s+\*\*([^*]+?)\*\*\s*[—\-:]?\s*(.*)$", line)
|
||||
if m:
|
||||
flush()
|
||||
current_title = m.group(1).strip()
|
||||
current_lines = [line]
|
||||
continue
|
||||
# Folge-Zeilen mit Einrueckung gehoeren zum aktuellen Bullet
|
||||
if current_title and (line.startswith(" ") or line.startswith("\t") or not line.strip()):
|
||||
current_lines.append(line)
|
||||
continue
|
||||
if current_title and not re.match(r"^\s*(?:[-*]|\d+\.)\s+", line):
|
||||
current_lines.append(line)
|
||||
continue
|
||||
# Neuer Bullet ohne **Title** Format
|
||||
if re.match(r"^\s*(?:[-*]|\d+\.)\s+", line):
|
||||
flush()
|
||||
text = re.sub(r"^\s*(?:[-*]|\d+\.)\s+", "", line).strip()
|
||||
short_title = (text[:60] + "…") if len(text) > 60 else text
|
||||
bullets.append((short_title, line.strip()))
|
||||
current_title = None
|
||||
current_lines = []
|
||||
flush()
|
||||
return bullets
|
||||
|
||||
|
||||
# ─── Pro Datei eine Parser-Funktion ──────────────────────────────────
|
||||
|
||||
def _parse_agent_md(md: str, source_file: str) -> List[MemoryPoint]:
|
||||
points: List[MemoryPoint] = []
|
||||
h2_blocks = _split_h2(md)
|
||||
for h2 in h2_blocks:
|
||||
title = h2.title
|
||||
content = h2.content.strip()
|
||||
if not content:
|
||||
continue
|
||||
|
||||
if title.lower() == "identitaet" or title.lower() == "identität":
|
||||
points.append(_mk(
|
||||
type_="identity", title="ARIA — Identitaet",
|
||||
content=f"## {title}\n\n{content}",
|
||||
category="persoenlichkeit",
|
||||
migration_key=f"{source_file}/identity",
|
||||
))
|
||||
|
||||
elif title.lower() == "persoenlichkeit" or title.lower() == "persönlichkeit":
|
||||
# Intro-Absatz + Kern-Eigenschaften-Liste trennen
|
||||
sub = _split_h3(content)
|
||||
for s in sub:
|
||||
if s.title == "_intro" and s.content.strip():
|
||||
points.append(_mk(
|
||||
type_="identity", title="Persoenlichkeit — Grundsatz",
|
||||
content=s.content.strip(),
|
||||
category="persoenlichkeit",
|
||||
migration_key=f"{source_file}/personality-intro",
|
||||
))
|
||||
elif s.title.lower().startswith("kern"):
|
||||
for idx, (btitle, btext) in enumerate(_extract_bullets(s.content), 1):
|
||||
points.append(_mk(
|
||||
type_="identity", title=f"Eigenschaft: {btitle}",
|
||||
content=btext, category="persoenlichkeit",
|
||||
migration_key=f"{source_file}/personality-trait-{idx}",
|
||||
))
|
||||
|
||||
elif "sicherheitsregel" in title.lower():
|
||||
for idx, (btitle, btext) in enumerate(_extract_bullets(content), 1):
|
||||
points.append(_mk(
|
||||
type_="rule", title=f"Sicherheit: {btitle}",
|
||||
content=btext, category="sicherheit",
|
||||
migration_key=f"{source_file}/security-{idx}",
|
||||
))
|
||||
|
||||
elif "arbeitsprinzipien" in title.lower() or "arbeitsprinzip" in title.lower():
|
||||
for idx, (btitle, btext) in enumerate(_extract_bullets(content), 1):
|
||||
points.append(_mk(
|
||||
type_="rule", title=f"Prinzip: {btitle}",
|
||||
content=btext, category="arbeitsweise",
|
||||
migration_key=f"{source_file}/work-principle-{idx}",
|
||||
))
|
||||
|
||||
elif "tool-freigaben" in title.lower() or "tool freigaben" in title.lower():
|
||||
points.append(_mk(
|
||||
type_="tool", title="Tool-Freigaben — Vollzugriff",
|
||||
content=content, category="infrastruktur",
|
||||
migration_key=f"{source_file}/tool-access",
|
||||
))
|
||||
|
||||
elif "dateien an stefan" in title.lower() or "dateien zurueckgeben" in title.lower() or "dateien zur" in title.lower():
|
||||
points.append(_mk(
|
||||
type_="skill", title="Dateien an User zurueckgeben",
|
||||
content=content, category="ausgabe",
|
||||
migration_key=f"{source_file}/file-return-skill",
|
||||
))
|
||||
|
||||
elif title.lower() == "stimme":
|
||||
points.append(_mk(
|
||||
type_="tool", title="Stimme (F5-TTS)",
|
||||
content=content, category="infrastruktur",
|
||||
migration_key=f"{source_file}/voice",
|
||||
))
|
||||
|
||||
# Permanente Freigaben (in BOOTSTRAP) — als rule
|
||||
elif "freigaben" in title.lower():
|
||||
points.append(_mk(
|
||||
type_="rule", title=title,
|
||||
content=content, category="freigaben",
|
||||
migration_key=f"{source_file}/permissions",
|
||||
))
|
||||
|
||||
else:
|
||||
# Unbekannter Block: als generischer fact ablegen, NICHT pinned
|
||||
logger.info("Unbekannter H2-Block '%s' in %s — als fact (unpinned)", title, source_file)
|
||||
points.append(_mk(
|
||||
type_="fact", title=f"{source_file}: {title}",
|
||||
content=content, pinned=False,
|
||||
migration_key=f"{source_file}/section-{title.lower().replace(' ', '-')}",
|
||||
))
|
||||
return points
|
||||
|
||||
|
||||
def _parse_user_md(md: str, source_file: str) -> List[MemoryPoint]:
|
||||
points: List[MemoryPoint] = []
|
||||
for h2 in _split_h2(md):
|
||||
title = h2.title
|
||||
content = h2.content.strip()
|
||||
if not content:
|
||||
continue
|
||||
# Template-Platzhalter herausfiltern: Beispiel-Zeilen mit <Tag>
|
||||
if "<Beispiel-Tool>" in content or "<Username>" in title:
|
||||
continue
|
||||
if title.lower() == "allgemein":
|
||||
for idx, (btitle, btext) in enumerate(_extract_bullets(content), 1):
|
||||
# Template-Platzhalter ueberspringen
|
||||
if "<z.B." in btext or "<XYZ>" in btext:
|
||||
continue
|
||||
points.append(_mk(
|
||||
type_="preference", title=f"User: {btitle}",
|
||||
content=btext, category="allgemein",
|
||||
migration_key=f"{source_file}/general-{idx}",
|
||||
))
|
||||
else:
|
||||
cat_key = re.sub(r"[^a-z0-9]+", "-", title.lower()).strip("-") or "allgemein"
|
||||
points.append(_mk(
|
||||
type_="preference", title=title,
|
||||
content=content, category=cat_key,
|
||||
migration_key=f"{source_file}/{cat_key}",
|
||||
))
|
||||
return points
|
||||
|
||||
|
||||
def _parse_tooling_md(md: str, source_file: str) -> List[MemoryPoint]:
|
||||
md = md.strip()
|
||||
if not md:
|
||||
return []
|
||||
return [_mk(
|
||||
type_="tool", title="Tooling-Stack (VM)",
|
||||
content=md, category="infrastruktur",
|
||||
migration_key=f"{source_file}/tooling-full",
|
||||
)]
|
||||
|
||||
|
||||
# ─── Helper ─────────────────────────────────────────────────────────
|
||||
|
||||
def _mk(
|
||||
type_: str,
|
||||
title: str,
|
||||
content: str,
|
||||
migration_key: str,
|
||||
pinned: bool = True,
|
||||
category: str = "",
|
||||
) -> MemoryPoint:
|
||||
p = MemoryPoint(
|
||||
id="",
|
||||
type=type_,
|
||||
title=title,
|
||||
content=content.strip(),
|
||||
pinned=pinned,
|
||||
category=category,
|
||||
source="import",
|
||||
tags=[],
|
||||
)
|
||||
# migration_key wird ueber Payload-Index angesprochen — in to_payload manuell anhaengen
|
||||
setattr(p, "_migration_key", migration_key)
|
||||
return p
|
||||
|
||||
|
||||
# ─── Eintrittspunkt ─────────────────────────────────────────────────
|
||||
|
||||
def run_migration(
|
||||
import_dir: Path,
|
||||
store: VectorStore,
|
||||
embedder: Embedder,
|
||||
) -> dict:
|
||||
"""Liest alle .md-Dateien aus import_dir, parst sie, schreibt in DB.
|
||||
|
||||
Idempotent: vorhandene Punkte mit gleicher migration_key werden geloescht
|
||||
und neu geschrieben.
|
||||
|
||||
Returns: {"created": int, "updated": int, "skipped": int, "files": [...]}
|
||||
"""
|
||||
if not import_dir.exists():
|
||||
return {"created": 0, "updated": 0, "skipped": 0, "files": [], "error": f"{import_dir} nicht gefunden"}
|
||||
|
||||
parsers = {
|
||||
"AGENT.md": _parse_agent_md,
|
||||
"BOOTSTRAP.md": _parse_agent_md, # gleicher Parser, ggf. ueberlappende Eintraege
|
||||
"USER.md": _parse_user_md,
|
||||
"USER.md.example": _parse_user_md,
|
||||
"TOOLING.md": _parse_tooling_md,
|
||||
"TOOLING.md.example": _parse_tooling_md,
|
||||
}
|
||||
|
||||
# USER.md hat Vorrang vor USER.md.example
|
||||
file_priority = ["AGENT.md", "BOOTSTRAP.md", "USER.md", "USER.md.example",
|
||||
"TOOLING.md", "TOOLING.md.example"]
|
||||
seen_kinds: set[str] = set() # "USER" / "TOOLING" — nur einmal
|
||||
|
||||
points: List[MemoryPoint] = []
|
||||
processed_files: List[str] = []
|
||||
|
||||
for fname in file_priority:
|
||||
fp = import_dir / fname
|
||||
if not fp.exists():
|
||||
continue
|
||||
kind = fname.split(".")[0] # "AGENT", "BOOTSTRAP", "USER", "TOOLING"
|
||||
# USER.md.example nur wenn USER.md fehlt
|
||||
if kind in ("USER", "TOOLING") and kind in seen_kinds:
|
||||
continue
|
||||
seen_kinds.add(kind)
|
||||
parser = parsers.get(fname)
|
||||
if not parser:
|
||||
continue
|
||||
try:
|
||||
md = fp.read_text(encoding="utf-8")
|
||||
file_points = parser(md, fname)
|
||||
points.extend(file_points)
|
||||
processed_files.append(f"{fname} ({len(file_points)})")
|
||||
logger.info("Migration: %s → %d Punkte", fname, len(file_points))
|
||||
except Exception as exc:
|
||||
logger.exception("Migration: %s fehlgeschlagen", fname)
|
||||
processed_files.append(f"{fname} (FEHLER: {exc})")
|
||||
|
||||
if not points:
|
||||
return {"created": 0, "updated": 0, "skipped": 0, "files": processed_files}
|
||||
|
||||
# Erst alte Migration-Punkte mit gleicher migration_key loeschen
|
||||
migration_keys = [getattr(p, "_migration_key", None) for p in points]
|
||||
migration_keys = [k for k in migration_keys if k]
|
||||
if migration_keys:
|
||||
store.client.delete(
|
||||
collection_name=COLLECTION,
|
||||
points_selector=qm.FilterSelector(filter=qm.Filter(must=[
|
||||
qm.FieldCondition(key="migration_key", match=qm.MatchAny(any=migration_keys))
|
||||
])),
|
||||
)
|
||||
logger.info("Migration: %d alte Punkte mit gleicher migration_key entfernt", len(migration_keys))
|
||||
|
||||
# Embed in Batches
|
||||
texts = [p.content for p in points]
|
||||
vectors = embedder.embed_batch(texts)
|
||||
|
||||
created = 0
|
||||
for p, vec in zip(points, vectors):
|
||||
payload = p.to_payload()
|
||||
mkey = getattr(p, "_migration_key", None)
|
||||
if mkey:
|
||||
payload["migration_key"] = mkey
|
||||
from datetime import datetime, timezone
|
||||
import uuid as _uuid
|
||||
pid = str(_uuid.uuid4())
|
||||
now = datetime.now(timezone.utc).isoformat()
|
||||
payload["created_at"] = now
|
||||
payload["updated_at"] = now
|
||||
store.client.upsert(
|
||||
collection_name=COLLECTION,
|
||||
points=[qm.PointStruct(id=pid, vector=vec, payload=payload)],
|
||||
)
|
||||
created += 1
|
||||
|
||||
return {
|
||||
"created": created,
|
||||
"files": processed_files,
|
||||
"import_dir": str(import_dir),
|
||||
}
|
||||
@@ -0,0 +1,214 @@
|
||||
"""
|
||||
System-Prompt-Bau aus Memory-Punkten.
|
||||
|
||||
Strategie:
|
||||
1. Alle pinned Punkte (Hot Memory) — gruppiert nach Type — in den
|
||||
System-Prompt schreiben. IMMER drin.
|
||||
2. Top-K semantisch aehnliche Punkte (Cold Memory) zur aktuellen
|
||||
User-Nachricht — als "Moeglicherweise relevant" eingehaengt.
|
||||
3. Aktive Skills als kompakte Liste (nur Name + Description) — damit
|
||||
ARIA weiss was sie hat.
|
||||
|
||||
Phase B Punkt 1: nur Hot-Memory-Bau, Skills + Cold-Search kommen
|
||||
mit dem Conversation-Loop in spaeteren Phasen.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime, timezone, timedelta
|
||||
from typing import List
|
||||
|
||||
from memory import MemoryPoint
|
||||
|
||||
|
||||
def build_time_section() -> str:
|
||||
"""Aktueller Zeitstempel — damit ARIA Timer korrekt anlegen kann
|
||||
und Watcher-Conditions mit hour_of_day etc. einordenbar bleiben."""
|
||||
now_utc = datetime.now(timezone.utc)
|
||||
# Europa/Berlin: Sommerzeit CEST = UTC+2, Winterzeit CET = UTC+1.
|
||||
# Wir nehmen den simplen Fall (kein zoneinfo-Import noetig im Brain-Image):
|
||||
# Stefans VM laeuft auf UTC, die Bridge in der Wohnung — Anzeige reicht.
|
||||
local_offset_h = 2 if 3 <= now_utc.month <= 10 else 1
|
||||
local = now_utc + timedelta(hours=local_offset_h)
|
||||
lines = [
|
||||
"## Aktuelle Zeit",
|
||||
f"- UTC: {now_utc.isoformat(timespec='seconds')}",
|
||||
f"- Lokal (Europa/Berlin, UTC+{local_offset_h}): "
|
||||
f"{local.strftime('%Y-%m-%d %H:%M:%S')} ({local.strftime('%A')})",
|
||||
"",
|
||||
"Nutze das fuer Trigger-Timestamps und um Watcher-Conditions wie "
|
||||
"`hour_of_day == 8` einzuordnen. Fuer relative Angaben "
|
||||
"('in 10min', 'in 2 Stunden') nutze beim `trigger_timer` den "
|
||||
"`in_seconds`-Parameter — Server rechnet dann selbst.",
|
||||
]
|
||||
return "\n".join(lines)
|
||||
|
||||
TYPE_HEADINGS = {
|
||||
"identity": "## Wer du bist",
|
||||
"rule": "## Sicherheitsregeln & Prinzipien",
|
||||
"preference": "## Benutzer-Praeferenzen",
|
||||
"tool": "## Tool-Freigaben",
|
||||
"skill": "## Deine Skills",
|
||||
}
|
||||
|
||||
|
||||
def build_hot_memory_section(pinned: List[MemoryPoint]) -> str:
|
||||
"""Baue den 'IMMER-im-Prompt'-Block aus pinned Punkten."""
|
||||
grouped: dict[str, List[MemoryPoint]] = {}
|
||||
for p in pinned:
|
||||
grouped.setdefault(p.type, []).append(p)
|
||||
|
||||
parts: List[str] = []
|
||||
# Sortier-Reihenfolge: identity → rule → preference → tool → skill → Rest
|
||||
order = ["identity", "rule", "preference", "tool", "skill"]
|
||||
for t in order:
|
||||
items = grouped.pop(t, [])
|
||||
if not items:
|
||||
continue
|
||||
parts.append(TYPE_HEADINGS.get(t, f"## {t}"))
|
||||
for p in items:
|
||||
parts.append(f"### {p.title}")
|
||||
parts.append(p.content.strip())
|
||||
parts.append("")
|
||||
|
||||
# uebrige Types (falls jemand was anderes als pinned markiert)
|
||||
for t, items in grouped.items():
|
||||
parts.append(f"## {t}")
|
||||
for p in items:
|
||||
parts.append(f"### {p.title}")
|
||||
parts.append(p.content.strip())
|
||||
parts.append("")
|
||||
|
||||
return "\n".join(parts).strip()
|
||||
|
||||
|
||||
def build_cold_memory_section(matches: List[MemoryPoint]) -> str:
|
||||
"""Baue 'Moeglicherweise relevant'-Block aus Search-Treffern."""
|
||||
if not matches:
|
||||
return ""
|
||||
lines = ["## Moeglicherweise relevant (aus Gedaechtnis)"]
|
||||
for p in matches:
|
||||
score = f" [score={p.score:.2f}]" if p.score is not None else ""
|
||||
lines.append(f"- **{p.title}**{score}")
|
||||
lines.append(f" {p.content.strip()}")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def build_skills_section(skills: List[dict]) -> str:
|
||||
"""Listet alle Skills (aktiv + deaktiviert) damit ARIA weiss was es gibt
|
||||
und keine doppelt baut. Plus klare Schwelle wann ein Skill sich lohnt."""
|
||||
lines = ["## Deine Skills"]
|
||||
if skills:
|
||||
for s in skills:
|
||||
active = s.get("active", True)
|
||||
marker = "" if active else " [DEAKTIVIERT — kann nicht aufgerufen werden]"
|
||||
lines.append(f"- **{s.get('name', '?')}**{marker} — {s.get('description', '(ohne Beschreibung)')}")
|
||||
lines.append("")
|
||||
lines.append("Wenn ein vorhandener Skill zur Aufgabe passt: nutze ihn via Tool-Call.")
|
||||
else:
|
||||
lines.append("(noch keine Skills vorhanden)")
|
||||
|
||||
lines.append("")
|
||||
lines.append("### Wann lohnt sich ein neuer Skill?")
|
||||
lines.append("")
|
||||
lines.append("**Skills sind IMMER Python** — eigene venv pro Skill mit den noetigen "
|
||||
"pip-Paketen. Kein apt im Skill, kein systemweiter Install. Python deckt "
|
||||
"in der Regel alles ab (yt-dlp, requests, pypdf, pillow, openpyxl, "
|
||||
"static-ffmpeg, beautifulsoup4, …). Falls etwas WIRKLICH nur via apt geht: "
|
||||
"Stefan fragen ob es ins Brain-Dockerfile soll.")
|
||||
lines.append("")
|
||||
lines.append("**Harte Regel — IMMER Skill anlegen wenn:** die Loesung erfordert eine "
|
||||
"pip-Library. Begruendung: Brain-Container hat keinen persistenten State "
|
||||
"ausser /data/skills/. Ohne Skill wuerde der Install bei jedem "
|
||||
"Container-Restart wiederholt.")
|
||||
lines.append("")
|
||||
lines.append("**Sonst — Skill nur wenn alle vier zutreffen:**")
|
||||
lines.append("")
|
||||
lines.append("1. **Wiederkehrend** — die Aufgabe wird realistisch nochmal gestellt. "
|
||||
"Einmal-Faelle (\"wie spaet ist es jetzt\") kein Skill.")
|
||||
lines.append("2. **Nicht-trivial** — mehrere Schritte. Ein einzelner Shell-Befehl "
|
||||
"(`date`, `hostname`, `ls`) ist KEIN Skill — das macht Bash direkt.")
|
||||
lines.append("3. **Parametrisierbar** — der Skill nimmt Eingaben (URL, Datei, Suchbegriff) "
|
||||
"und gibt ein nuetzliches Ergebnis zurueck.")
|
||||
lines.append("4. **Wiederverwendbar als ganzes** — Stefan wuerde es zukuenftig per Name "
|
||||
"ansprechen (\"mach mir den YouTube zu MP3\") statt jedes Mal zu erklaeren.")
|
||||
lines.append("")
|
||||
lines.append("Wenn nichts installiert werden muss UND nicht alle vier zutreffen: einfach "
|
||||
"die Aufgabe loesen ohne Skill anzulegen. Stefan kann jederzeit sagen "
|
||||
"'bau daraus einen Skill'.")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def build_triggers_section(
|
||||
triggers: List[dict],
|
||||
condition_vars: List[dict],
|
||||
condition_funcs: List[dict] | None = None,
|
||||
) -> str:
|
||||
"""Triggers (passive Aufweck-Quellen) + verfuegbare Condition-Variablen + Funktionen."""
|
||||
lines = ["## Trigger (passive Aufweck-Quellen)"]
|
||||
lines.append("")
|
||||
lines.append("Trigger sind ANDERS als Skills: das System ruft DICH wenn ein Event passiert. "
|
||||
"Du legst sie an wenn Stefan sagt 'erinner mich an X' oder 'sag bescheid wenn Y'.")
|
||||
lines.append("")
|
||||
if triggers:
|
||||
lines.append("### Aktuelle Trigger")
|
||||
for t in triggers:
|
||||
active = t.get("active", True)
|
||||
mark = "" if active else " [INAKTIV]"
|
||||
if t["type"] == "timer":
|
||||
lines.append(f"- **{t['name']}**{mark} (timer) feuert {t.get('fires_at')}: \"{t.get('message','')[:80]}\"")
|
||||
elif t["type"] == "watcher":
|
||||
lines.append(f"- **{t['name']}**{mark} (watcher) cond=`{t.get('condition')}`: \"{t.get('message','')[:80]}\"")
|
||||
lines.append("")
|
||||
lines.append("### Verfuegbare Condition-Variablen (fuer Watcher)")
|
||||
for v in condition_vars:
|
||||
lines.append(f"- `{v['name']}` ({v['type']}) — {v['desc']}")
|
||||
if condition_funcs:
|
||||
lines.append("")
|
||||
lines.append("### Verfuegbare Funktionen in Conditions")
|
||||
for fn in condition_funcs:
|
||||
lines.append(f"- `{fn['signature']}` — {fn['desc']}")
|
||||
lines.append("")
|
||||
lines.append("Operatoren in Conditions: `<` `>` `<=` `>=` `==` `!=` `and` `or` `not`. "
|
||||
"Beispiele: `disk_free_gb < 5 and hour_of_day >= 8`, "
|
||||
"`day_of_week == \"mon\"`, `near(53.123, 7.456, 500)`. "
|
||||
"Funktionen nur mit Konstanten als Argumenten (keine Variablen, "
|
||||
"keine geschachtelten Funktionen).")
|
||||
lines.append("")
|
||||
lines.append("### Wann welcher Typ?")
|
||||
lines.append("- **Timer** fuer einmalige Erinnerungen mit konkreter Zeit ('in 10min', 'um 14:30').")
|
||||
lines.append("- **Watcher** fuer 'wenn X passiert' (Disk voll, bestimmte Tageszeit, GPS-Naehe).")
|
||||
lines.append("- ARIA legt Trigger NUR auf Stefan-Wunsch an, nicht eigenmaechtig.")
|
||||
lines.append("")
|
||||
lines.append("### GPS-Watcher mit near()")
|
||||
lines.append(
|
||||
"Wenn du einen Watcher mit `near()` anlegst: die App sendet GPS-Position "
|
||||
"nur kontinuierlich wenn Tracking AN ist (Default: AUS, Akku-Schutz). "
|
||||
"Rufe dafuer `request_location_tracking(on=true, reason=\"...\")` auf "
|
||||
"bevor oder gleich nach dem trigger_watcher. Sonst hat current_lat/lon "
|
||||
"veraltete Werte und der Watcher feuert nie. "
|
||||
"Beim Loeschen des letzten GPS-Watchers (trigger_cancel) wieder "
|
||||
"`request_location_tracking(on=false)` aufrufen.")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def build_system_prompt(
|
||||
pinned: List[MemoryPoint],
|
||||
cold: List[MemoryPoint] | None = None,
|
||||
skills: List[dict] | None = None,
|
||||
triggers: List[dict] | None = None,
|
||||
condition_vars: List[dict] | None = None,
|
||||
condition_funcs: List[dict] | None = None,
|
||||
) -> str:
|
||||
"""Kompletter System-Prompt: Hot + Cold + Skills + Triggers."""
|
||||
parts = [build_hot_memory_section(pinned), "", build_time_section()]
|
||||
if skills:
|
||||
parts.append("")
|
||||
parts.append(build_skills_section(skills))
|
||||
if condition_vars:
|
||||
parts.append("")
|
||||
parts.append(build_triggers_section(triggers or [], condition_vars, condition_funcs))
|
||||
if cold:
|
||||
parts.append("")
|
||||
parts.append(build_cold_memory_section(cold))
|
||||
return "\n".join(parts).strip()
|
||||
@@ -0,0 +1,149 @@
|
||||
"""
|
||||
Claude-Aufruf ueber den lokalen Proxy.
|
||||
|
||||
Der Proxy (claude-max-api-proxy) bietet eine OpenAI-kompatible API
|
||||
unter http://proxy:3456/v1/chat/completions. Wir nutzen non-streaming
|
||||
mit einem laengeren Timeout — Claude Code spawnt pro Anfrage einen
|
||||
neuen CLI-Prozess (Cold-Start), das dauert.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import List, Optional
|
||||
|
||||
import httpx
|
||||
from pydantic import BaseModel
|
||||
|
||||
import metrics
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
RUNTIME_CONFIG_FILE = Path("/shared/config/runtime.json")
|
||||
ENV_MODEL = os.environ.get("BRAIN_MODEL", "claude-sonnet-4")
|
||||
PROXY_URL = os.environ.get("PROXY_URL", "http://proxy:3456")
|
||||
PROXY_TIMEOUT_SEC = float(os.environ.get("PROXY_TIMEOUT_SEC", "300"))
|
||||
|
||||
|
||||
def _read_model_from_runtime() -> str:
|
||||
"""Liest brainModel aus runtime.json. Fallback: ENV BRAIN_MODEL."""
|
||||
try:
|
||||
if RUNTIME_CONFIG_FILE.exists():
|
||||
data = json.loads(RUNTIME_CONFIG_FILE.read_text(encoding="utf-8"))
|
||||
m = (data.get("brainModel") or "").strip()
|
||||
if m:
|
||||
return m
|
||||
except Exception as exc:
|
||||
logger.warning("runtime.json lesen fehlgeschlagen: %s", exc)
|
||||
return ENV_MODEL
|
||||
|
||||
|
||||
DEFAULT_MODEL = _read_model_from_runtime()
|
||||
|
||||
|
||||
class Message(BaseModel):
|
||||
role: str # "system" | "user" | "assistant" | "tool"
|
||||
content: Optional[str] = None
|
||||
tool_calls: Optional[list] = None
|
||||
tool_call_id: Optional[str] = None
|
||||
name: Optional[str] = None # nur fuer role=tool
|
||||
|
||||
|
||||
class ProxyResult(BaseModel):
|
||||
content: str = ""
|
||||
tool_calls: list = [] # je: {"id", "name", "arguments" (dict)}
|
||||
finish_reason: str = ""
|
||||
|
||||
|
||||
class ProxyClient:
|
||||
def __init__(self, base_url: str = PROXY_URL, model: str = DEFAULT_MODEL):
|
||||
self.base_url = base_url.rstrip("/")
|
||||
self.model = model
|
||||
# Persistente Client-Connection — vermeidet TCP-Handshake bei jedem Call
|
||||
self._client = httpx.Client(timeout=PROXY_TIMEOUT_SEC)
|
||||
|
||||
def chat(self, messages: List[Message], model: Optional[str] = None) -> str:
|
||||
"""Convenience: einfacher Chat ohne Tools. Gibt nur den Reply-String zurueck."""
|
||||
result = self.chat_full(messages, tools=None, model=model)
|
||||
if not result.content:
|
||||
raise RuntimeError("Proxy lieferte leeren content")
|
||||
return result.content
|
||||
|
||||
def chat_full(
|
||||
self,
|
||||
messages: List[Message],
|
||||
tools: Optional[list] = None,
|
||||
model: Optional[str] = None,
|
||||
) -> ProxyResult:
|
||||
"""Full chat — kann Tool-Calls liefern (wenn tools mitgegeben).
|
||||
|
||||
tools-Format ist OpenAI-Style:
|
||||
[{"type":"function","function":{"name":..,"description":..,"parameters":{...}}}, ...]
|
||||
"""
|
||||
url = f"{self.base_url}/v1/chat/completions"
|
||||
# Pydantic-Dumps mit exclude_none damit role=tool ohne tool_calls geht
|
||||
payload = {
|
||||
"model": model or self.model,
|
||||
"messages": [m.model_dump(exclude_none=True) for m in messages],
|
||||
}
|
||||
if tools:
|
||||
payload["tools"] = tools
|
||||
logger.info("Proxy → %s (%d Messages, %d tools, model=%s)",
|
||||
url, len(messages), len(tools or []), payload["model"])
|
||||
try:
|
||||
r = self._client.post(url, json=payload)
|
||||
except httpx.RequestError as exc:
|
||||
raise RuntimeError(f"Proxy unreachable: {exc}") from exc
|
||||
if r.status_code != 200:
|
||||
raise RuntimeError(f"Proxy HTTP {r.status_code}: {r.text[:300]}")
|
||||
try:
|
||||
data = r.json()
|
||||
except Exception as exc:
|
||||
raise RuntimeError(f"Proxy invalid JSON: {exc}") from exc
|
||||
|
||||
choices = data.get("choices") or []
|
||||
if not choices:
|
||||
raise RuntimeError(f"Proxy ohne choices: {str(data)[:300]}")
|
||||
|
||||
msg = choices[0].get("message") or {}
|
||||
finish_reason = choices[0].get("finish_reason", "")
|
||||
|
||||
content = msg.get("content") or ""
|
||||
if isinstance(content, list):
|
||||
content = "".join(
|
||||
part.get("text", "") for part in content if isinstance(part, dict) and part.get("type") == "text"
|
||||
)
|
||||
|
||||
tool_calls_raw = msg.get("tool_calls") or []
|
||||
tool_calls = []
|
||||
import json as _json
|
||||
for tc in tool_calls_raw:
|
||||
fn = tc.get("function") or {}
|
||||
args_raw = fn.get("arguments", "{}")
|
||||
args: dict
|
||||
if isinstance(args_raw, dict):
|
||||
args = args_raw
|
||||
else:
|
||||
try:
|
||||
args = _json.loads(args_raw)
|
||||
except Exception:
|
||||
args = {"_raw": args_raw}
|
||||
tool_calls.append({
|
||||
"id": tc.get("id", ""),
|
||||
"name": fn.get("name", ""),
|
||||
"arguments": args,
|
||||
})
|
||||
|
||||
# Call-Metric anhaengen — Token-Schaetzung fuer Quota-Monitoring
|
||||
metrics.log_call(payload["model"], messages, content or "")
|
||||
|
||||
return ProxyResult(content=content or "", tool_calls=tool_calls, finish_reason=finish_reason)
|
||||
|
||||
def close(self):
|
||||
try:
|
||||
self._client.close()
|
||||
except Exception:
|
||||
pass
|
||||
@@ -0,0 +1,14 @@
|
||||
fastapi==0.115.0
|
||||
uvicorn[standard]==0.32.0
|
||||
pydantic==2.9.2
|
||||
httpx==0.27.2
|
||||
websockets==13.1
|
||||
|
||||
# Vector-DB
|
||||
qdrant-client==1.12.1
|
||||
|
||||
# Embeddings (laeuft auf CPU, ~120MB Modell)
|
||||
sentence-transformers==3.2.1
|
||||
|
||||
# Utility
|
||||
python-multipart==0.0.12
|
||||
@@ -0,0 +1,373 @@
|
||||
"""
|
||||
Skill-Manager — Filesystem-Layer fuer ARIAs Faehigkeiten.
|
||||
|
||||
Layout:
|
||||
/data/skills/<name>/
|
||||
skill.json - Manifest
|
||||
README.md - Beschreibung (vom Stil her: was, wann, wie aufrufen)
|
||||
run.sh - Entry-Point (sh, python -m, was auch immer)
|
||||
requirements.txt - optional, fuer local-venv
|
||||
venv/ - automatisch erzeugt bei local-venv
|
||||
bin/ - statische Binaries (fuer local-bin)
|
||||
logs/ - <ts>.json Run-Logs (append-only pro Run)
|
||||
|
||||
Manifest (skill.json):
|
||||
{
|
||||
"name": "youtube2mp3",
|
||||
"description": "Konvertiert YouTube-Video-URL zu MP3",
|
||||
"execution": "local-venv" | "local-bin" | "bash",
|
||||
"entry": "run.sh",
|
||||
"args": [{"name": "url", "required": true}, ...],
|
||||
"requires": {"pip": [...], "binaries": [...]},
|
||||
"active": true,
|
||||
"created_at": "ISO",
|
||||
"updated_at": "ISO",
|
||||
"last_used": null | "ISO",
|
||||
"use_count": 0,
|
||||
"version": "1.0",
|
||||
"author": "aria" | "stefan"
|
||||
}
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import time
|
||||
import uuid
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
SKILLS_DIR = Path(os.environ.get("SKILLS_DIR", "/data/skills"))
|
||||
SHARED_UPLOADS = Path("/shared/uploads")
|
||||
|
||||
VALID_EXECUTIONS = {"local-venv", "local-bin", "bash"}
|
||||
NAME_RE = re.compile(r"^[a-zA-Z0-9_-]{2,60}$")
|
||||
|
||||
|
||||
def _now() -> str:
|
||||
return datetime.now(timezone.utc).isoformat()
|
||||
|
||||
|
||||
def _safe_name(name: str) -> str:
|
||||
if not isinstance(name, str) or not NAME_RE.match(name):
|
||||
raise ValueError(f"Ungültiger Skill-Name: {name!r}")
|
||||
return name
|
||||
|
||||
|
||||
def _skill_dir(name: str) -> Path:
|
||||
return SKILLS_DIR / _safe_name(name)
|
||||
|
||||
|
||||
# ─── Listing ────────────────────────────────────────────────────────
|
||||
|
||||
def list_skills(active_only: bool = False) -> list[dict]:
|
||||
out: list[dict] = []
|
||||
if not SKILLS_DIR.exists():
|
||||
return out
|
||||
for entry in sorted(SKILLS_DIR.iterdir()):
|
||||
if not entry.is_dir():
|
||||
continue
|
||||
manifest = read_manifest(entry.name)
|
||||
if manifest is None:
|
||||
continue
|
||||
if active_only and not manifest.get("active", True):
|
||||
continue
|
||||
out.append(manifest)
|
||||
return out
|
||||
|
||||
|
||||
def read_manifest(name: str) -> Optional[dict]:
|
||||
try:
|
||||
path = _skill_dir(name) / "skill.json"
|
||||
if not path.exists():
|
||||
return None
|
||||
return json.loads(path.read_text(encoding="utf-8"))
|
||||
except Exception as exc:
|
||||
logger.warning("Manifest lesen %s: %s", name, exc)
|
||||
return None
|
||||
|
||||
|
||||
def write_manifest(name: str, manifest: dict) -> None:
|
||||
d = _skill_dir(name)
|
||||
d.mkdir(parents=True, exist_ok=True)
|
||||
manifest["updated_at"] = _now()
|
||||
(d / "skill.json").write_text(json.dumps(manifest, indent=2, ensure_ascii=False), encoding="utf-8")
|
||||
|
||||
|
||||
def read_readme(name: str) -> str:
|
||||
path = _skill_dir(name) / "README.md"
|
||||
return path.read_text(encoding="utf-8") if path.exists() else ""
|
||||
|
||||
|
||||
# ─── Create / Update / Delete ────────────────────────────────────────
|
||||
|
||||
def create_skill(
|
||||
name: str,
|
||||
description: str,
|
||||
execution: str,
|
||||
entry_code: str,
|
||||
readme: str = "",
|
||||
args: Optional[list] = None,
|
||||
requires: Optional[dict] = None,
|
||||
pip_packages: Optional[list[str]] = None,
|
||||
author: str = "aria",
|
||||
) -> dict:
|
||||
"""Legt einen neuen Skill an. Wirft ValueError bei ungueltigen Inputs.
|
||||
|
||||
entry_code wird je nach execution in run.sh oder run.py geschrieben.
|
||||
Bei local-venv wird sofort eine venv erzeugt + pip_packages installiert.
|
||||
"""
|
||||
name = _safe_name(name)
|
||||
if execution not in VALID_EXECUTIONS:
|
||||
raise ValueError(f"execution muss eines von {VALID_EXECUTIONS} sein")
|
||||
d = _skill_dir(name)
|
||||
if d.exists():
|
||||
raise ValueError(f"Skill '{name}' existiert bereits — erst loeschen oder updaten")
|
||||
|
||||
d.mkdir(parents=True)
|
||||
(d / "logs").mkdir()
|
||||
|
||||
# Entry-File: run.sh oder run.py
|
||||
if execution == "local-venv":
|
||||
entry_path = d / "run.py"
|
||||
entry_path.write_text(entry_code, encoding="utf-8")
|
||||
entry_name = "run.py"
|
||||
(d / "requirements.txt").write_text("\n".join(pip_packages or []) + "\n", encoding="utf-8")
|
||||
else:
|
||||
entry_path = d / "run.sh"
|
||||
# Shebang ergaenzen wenn nicht da
|
||||
content = entry_code if entry_code.startswith("#!") else "#!/usr/bin/env bash\nset -euo pipefail\n" + entry_code
|
||||
entry_path.write_text(content, encoding="utf-8")
|
||||
entry_path.chmod(0o755)
|
||||
entry_name = "run.sh"
|
||||
|
||||
# README
|
||||
(d / "README.md").write_text(readme or f"# {name}\n\n{description}\n", encoding="utf-8")
|
||||
|
||||
manifest = {
|
||||
"name": name,
|
||||
"description": description,
|
||||
"execution": execution,
|
||||
"entry": entry_name,
|
||||
"args": args or [],
|
||||
"requires": requires or {},
|
||||
"active": True,
|
||||
"created_at": _now(),
|
||||
"updated_at": _now(),
|
||||
"last_used": None,
|
||||
"use_count": 0,
|
||||
"version": "1.0",
|
||||
"author": author,
|
||||
}
|
||||
write_manifest(name, manifest)
|
||||
|
||||
# venv aufbauen bei local-venv
|
||||
if execution == "local-venv":
|
||||
try:
|
||||
_setup_venv(d, pip_packages or [])
|
||||
except Exception as exc:
|
||||
# venv-Aufbau fehlgeschlagen → Skill steht trotzdem im Repo, aber inaktiv
|
||||
manifest["active"] = False
|
||||
manifest["setup_error"] = str(exc)[:500]
|
||||
write_manifest(name, manifest)
|
||||
logger.warning("Skill %s: venv-Setup fehlgeschlagen → deaktiviert: %s", name, exc)
|
||||
|
||||
logger.info("Skill erstellt: %s (%s)", name, execution)
|
||||
return manifest
|
||||
|
||||
|
||||
def _setup_venv(skill_dir: Path, pip_packages: list[str]) -> None:
|
||||
venv = skill_dir / "venv"
|
||||
logger.info("venv erstellen: %s", venv)
|
||||
subprocess.run(["python", "-m", "venv", str(venv)], check=True, timeout=120)
|
||||
pip = venv / "bin" / "pip"
|
||||
if pip_packages:
|
||||
subprocess.run([str(pip), "install", "--no-cache-dir", *pip_packages], check=True, timeout=600)
|
||||
|
||||
|
||||
def update_skill(name: str, patch: dict) -> dict:
|
||||
manifest = read_manifest(name)
|
||||
if manifest is None:
|
||||
raise ValueError(f"Skill '{name}' nicht gefunden")
|
||||
allowed = {"description", "args", "requires", "active", "version", "entry"}
|
||||
for k, v in patch.items():
|
||||
if k in allowed:
|
||||
manifest[k] = v
|
||||
write_manifest(name, manifest)
|
||||
return manifest
|
||||
|
||||
|
||||
def delete_skill(name: str) -> None:
|
||||
d = _skill_dir(name)
|
||||
if not d.exists():
|
||||
raise ValueError(f"Skill '{name}' nicht gefunden")
|
||||
shutil.rmtree(d)
|
||||
logger.info("Skill geloescht: %s", name)
|
||||
|
||||
|
||||
# ─── Run ────────────────────────────────────────────────────────────
|
||||
|
||||
def run_skill(name: str, args: Optional[dict] = None, timeout_sec: int = 300) -> dict:
|
||||
"""Fuehrt einen Skill aus. Args werden als ENV-Vars uebergeben
|
||||
(Praefix ARG_, z.B. ARG_URL fuer args["url"]).
|
||||
|
||||
Returns: {ok, exit_code, stdout, stderr, duration_sec, log_path}
|
||||
"""
|
||||
manifest = read_manifest(name)
|
||||
if manifest is None:
|
||||
raise ValueError(f"Skill '{name}' nicht gefunden")
|
||||
if not manifest.get("active", True):
|
||||
raise ValueError(f"Skill '{name}' ist deaktiviert")
|
||||
|
||||
d = _skill_dir(name)
|
||||
entry = manifest.get("entry", "run.sh")
|
||||
exec_mode = manifest.get("execution", "bash")
|
||||
|
||||
env = os.environ.copy()
|
||||
# Skill-Args als ENV-Vars
|
||||
for k, v in (args or {}).items():
|
||||
if not re.match(r"^[a-zA-Z][a-zA-Z0-9_]*$", k):
|
||||
continue
|
||||
env[f"ARG_{k.upper()}"] = str(v)
|
||||
env["SKILL_DIR"] = str(d)
|
||||
env["SHARED_UPLOADS"] = str(SHARED_UPLOADS)
|
||||
|
||||
# Command bauen
|
||||
if exec_mode == "local-venv":
|
||||
python = d / "venv" / "bin" / "python"
|
||||
cmd = [str(python), str(d / entry)]
|
||||
elif exec_mode == "local-bin":
|
||||
# Skill bringt seine bin/ mit — wir prepended sie an den PATH
|
||||
env["PATH"] = f"{d / 'bin'}:{env.get('PATH', '')}"
|
||||
cmd = [str(d / entry)]
|
||||
else: # bash
|
||||
cmd = [str(d / entry)]
|
||||
|
||||
log_id = f"{int(time.time())}-{uuid.uuid4().hex[:8]}"
|
||||
log_path = d / "logs" / f"{log_id}.json"
|
||||
|
||||
t0 = time.time()
|
||||
try:
|
||||
proc = subprocess.run(
|
||||
cmd, env=env, cwd=str(d),
|
||||
capture_output=True, text=True, timeout=timeout_sec,
|
||||
)
|
||||
out_text = proc.stdout
|
||||
err_text = proc.stderr
|
||||
exit_code = proc.returncode
|
||||
timed_out = False
|
||||
except subprocess.TimeoutExpired as exc:
|
||||
out_text = exc.stdout or ""
|
||||
err_text = (exc.stderr or "") + f"\n[TIMEOUT {timeout_sec}s]"
|
||||
exit_code = -1
|
||||
timed_out = True
|
||||
duration = time.time() - t0
|
||||
|
||||
# Log schreiben (gekuerzt damit es nicht explodiert)
|
||||
record = {
|
||||
"ts": _now(),
|
||||
"args": args or {},
|
||||
"exit_code": exit_code,
|
||||
"duration_sec": round(duration, 2),
|
||||
"stdout": (out_text or "")[:8000],
|
||||
"stderr": (err_text or "")[:8000],
|
||||
"timed_out": timed_out,
|
||||
}
|
||||
try:
|
||||
log_path.write_text(json.dumps(record, indent=2, ensure_ascii=False), encoding="utf-8")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Stats updaten
|
||||
manifest["last_used"] = _now()
|
||||
manifest["use_count"] = int(manifest.get("use_count", 0)) + 1
|
||||
write_manifest(name, manifest)
|
||||
|
||||
record["ok"] = exit_code == 0
|
||||
record["log_path"] = str(log_path)
|
||||
return record
|
||||
|
||||
|
||||
def list_logs(name: str, limit: int = 50) -> list[dict]:
|
||||
d = _skill_dir(name) / "logs"
|
||||
if not d.exists():
|
||||
return []
|
||||
files = sorted(d.glob("*.json"), reverse=True)[:limit]
|
||||
out: list[dict] = []
|
||||
for f in files:
|
||||
try:
|
||||
data = json.loads(f.read_text(encoding="utf-8"))
|
||||
data["log_id"] = f.stem
|
||||
out.append(data)
|
||||
except Exception:
|
||||
continue
|
||||
return out
|
||||
|
||||
|
||||
# ─── Export / Import ────────────────────────────────────────────────
|
||||
|
||||
def export_skill(name: str) -> bytes:
|
||||
"""Packt einen Skill als tar.gz und gibt die Bytes zurueck.
|
||||
venv und logs werden ausgeschlossen (werden beim Import neu gebaut)."""
|
||||
import io
|
||||
import tarfile
|
||||
d = _skill_dir(name)
|
||||
if not d.exists():
|
||||
raise ValueError(f"Skill '{name}' nicht gefunden")
|
||||
buf = io.BytesIO()
|
||||
with tarfile.open(fileobj=buf, mode="w:gz") as tar:
|
||||
for path in d.iterdir():
|
||||
if path.name in ("venv", "logs", "__pycache__"):
|
||||
continue
|
||||
tar.add(path, arcname=f"{name}/{path.name}")
|
||||
return buf.getvalue()
|
||||
|
||||
|
||||
def import_skill(tar_bytes: bytes, overwrite: bool = False) -> dict:
|
||||
"""Importiert einen Skill aus tar.gz. Liefert das Manifest zurueck."""
|
||||
import io
|
||||
import tarfile
|
||||
SKILLS_DIR.mkdir(parents=True, exist_ok=True)
|
||||
with tarfile.open(fileobj=io.BytesIO(tar_bytes), mode="r:gz") as tar:
|
||||
# Erst Root-Name finden (= Skill-Name)
|
||||
members = tar.getmembers()
|
||||
if not members:
|
||||
raise ValueError("Leeres Archiv")
|
||||
root = members[0].name.split("/", 1)[0]
|
||||
name = _safe_name(root)
|
||||
d = _skill_dir(name)
|
||||
if d.exists():
|
||||
if not overwrite:
|
||||
raise ValueError(f"Skill '{name}' existiert bereits — overwrite=true setzen")
|
||||
shutil.rmtree(d)
|
||||
# Extrahieren — Path-Traversal verhindern
|
||||
for m in members:
|
||||
target = SKILLS_DIR / m.name
|
||||
if not str(target.resolve()).startswith(str(SKILLS_DIR.resolve())):
|
||||
raise ValueError(f"Unsicherer Pfad im Archiv: {m.name}")
|
||||
tar.extractall(SKILLS_DIR)
|
||||
# logs-Verzeichnis anlegen falls fehlte
|
||||
(d / "logs").mkdir(exist_ok=True)
|
||||
# venv neu bauen falls local-venv
|
||||
manifest = read_manifest(name) or {}
|
||||
if manifest.get("execution") == "local-venv":
|
||||
req_file = d / "requirements.txt"
|
||||
pip_packages: list[str] = []
|
||||
if req_file.exists():
|
||||
pip_packages = [l.strip() for l in req_file.read_text().splitlines() if l.strip() and not l.startswith("#")]
|
||||
try:
|
||||
_setup_venv(d, pip_packages)
|
||||
except Exception as exc:
|
||||
logger.warning("Skill-Import %s: venv-Setup fehlgeschlagen: %s", name, exc)
|
||||
manifest["active"] = False
|
||||
manifest["setup_error"] = str(exc)[:500]
|
||||
write_manifest(name, manifest)
|
||||
return manifest
|
||||
@@ -0,0 +1,229 @@
|
||||
"""
|
||||
Triggers — passive Aufweck-Quellen fuer ARIA.
|
||||
|
||||
Skills sind aktiv (ARIA ruft sie). Triggers sind passiv — das System ruft
|
||||
ARIA wenn ein Event passiert. Drei Typen:
|
||||
|
||||
timer Einmalig zu einem festen Zeitpunkt
|
||||
watcher Recurring: Condition pruefen, bei True → feuern (mit Throttle)
|
||||
cron Cron-Expression (vorerst nicht implementiert, Platzhalter)
|
||||
|
||||
Layout:
|
||||
/data/triggers/<name>.json Manifest pro Trigger
|
||||
/data/triggers/logs/<name>.jsonl Append-only Log pro Feuerung
|
||||
|
||||
Polling-Kosten: Brain-internes Background-Polling (kein LLM-Call).
|
||||
ARIA wird nur aufgeweckt wenn ein Trigger tatsaechlich feuert.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
TRIGGERS_DIR = Path(os.environ.get("TRIGGERS_DIR", "/data/triggers"))
|
||||
LOGS_DIR = TRIGGERS_DIR / "logs"
|
||||
NAME_RE = re.compile(r"^[a-zA-Z0-9_-]{2,60}$")
|
||||
VALID_TYPES = {"timer", "watcher", "cron"}
|
||||
|
||||
|
||||
def _now_iso() -> str:
|
||||
return datetime.now(timezone.utc).isoformat()
|
||||
|
||||
|
||||
def _safe_name(name: str) -> str:
|
||||
if not isinstance(name, str) or not NAME_RE.match(name):
|
||||
raise ValueError(f"Ungueltiger Trigger-Name: {name!r}")
|
||||
return name
|
||||
|
||||
|
||||
def _path(name: str) -> Path:
|
||||
return TRIGGERS_DIR / f"{_safe_name(name)}.json"
|
||||
|
||||
|
||||
def _ensure_dirs():
|
||||
TRIGGERS_DIR.mkdir(parents=True, exist_ok=True)
|
||||
LOGS_DIR.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
# ─── CRUD ───────────────────────────────────────────────────────────
|
||||
|
||||
def list_triggers(active_only: bool = False) -> list[dict]:
|
||||
if not TRIGGERS_DIR.exists():
|
||||
return []
|
||||
out: list[dict] = []
|
||||
for f in sorted(TRIGGERS_DIR.glob("*.json")):
|
||||
try:
|
||||
data = json.loads(f.read_text(encoding="utf-8"))
|
||||
if active_only and not data.get("active", True):
|
||||
continue
|
||||
out.append(data)
|
||||
except Exception as e:
|
||||
logger.warning("Trigger lesen %s: %s", f, e)
|
||||
return out
|
||||
|
||||
|
||||
def read(name: str) -> Optional[dict]:
|
||||
p = _path(name)
|
||||
if not p.exists():
|
||||
return None
|
||||
try:
|
||||
return json.loads(p.read_text(encoding="utf-8"))
|
||||
except Exception as e:
|
||||
logger.warning("Trigger %s lesen: %s", name, e)
|
||||
return None
|
||||
|
||||
|
||||
def write(name: str, data: dict) -> None:
|
||||
_ensure_dirs()
|
||||
data["updated_at"] = _now_iso()
|
||||
p = _path(name)
|
||||
tmp = p.with_suffix(".tmp")
|
||||
tmp.write_text(json.dumps(data, indent=2, ensure_ascii=False), encoding="utf-8")
|
||||
tmp.replace(p)
|
||||
|
||||
|
||||
def delete(name: str) -> None:
|
||||
p = _path(name)
|
||||
if not p.exists():
|
||||
raise ValueError(f"Trigger '{name}' nicht gefunden")
|
||||
p.unlink()
|
||||
# Logs auch wegraeumen
|
||||
log_file = LOGS_DIR / f"{_safe_name(name)}.jsonl"
|
||||
if log_file.exists():
|
||||
log_file.unlink()
|
||||
|
||||
|
||||
def update(name: str, patch: dict) -> dict:
|
||||
data = read(name)
|
||||
if data is None:
|
||||
raise ValueError(f"Trigger '{name}' nicht gefunden")
|
||||
allowed = {"active", "message", "condition", "throttle_sec",
|
||||
"check_interval_sec", "fires_at"}
|
||||
for k, v in patch.items():
|
||||
if k in allowed:
|
||||
data[k] = v
|
||||
write(name, data)
|
||||
return data
|
||||
|
||||
|
||||
# ─── Create-Helpers (typ-spezifisch) ────────────────────────────────
|
||||
|
||||
def create_timer(
|
||||
name: str,
|
||||
fires_at_iso: str,
|
||||
message: str,
|
||||
author: str = "aria",
|
||||
) -> dict:
|
||||
_safe_name(name)
|
||||
if _path(name).exists():
|
||||
raise ValueError(f"Trigger '{name}' existiert schon")
|
||||
# ISO validieren
|
||||
try:
|
||||
datetime.fromisoformat(fires_at_iso.replace("Z", "+00:00"))
|
||||
except Exception:
|
||||
raise ValueError(f"fires_at_iso ungueltig: {fires_at_iso}")
|
||||
data = {
|
||||
"name": name,
|
||||
"type": "timer",
|
||||
"active": True,
|
||||
"author": author,
|
||||
"created_at": _now_iso(),
|
||||
"fires_at": fires_at_iso,
|
||||
"message": message,
|
||||
"fire_count": 0,
|
||||
"last_fired_at": None,
|
||||
}
|
||||
write(name, data)
|
||||
logger.info("Trigger angelegt: %s (timer, fires_at=%s)", name, fires_at_iso)
|
||||
return data
|
||||
|
||||
|
||||
def create_watcher(
|
||||
name: str,
|
||||
condition: str,
|
||||
message: str,
|
||||
check_interval_sec: int = 300,
|
||||
throttle_sec: int = 3600,
|
||||
author: str = "aria",
|
||||
) -> dict:
|
||||
_safe_name(name)
|
||||
if _path(name).exists():
|
||||
raise ValueError(f"Trigger '{name}' existiert schon")
|
||||
# Condition parsen-pruefen (wirft bei Syntax-Fehler)
|
||||
from watcher import parse_condition
|
||||
parse_condition(condition) # nur Validate
|
||||
if check_interval_sec < 30:
|
||||
check_interval_sec = 30 # nicht oefter als alle 30s pruefen
|
||||
if throttle_sec < 0:
|
||||
throttle_sec = 0
|
||||
data = {
|
||||
"name": name,
|
||||
"type": "watcher",
|
||||
"active": True,
|
||||
"author": author,
|
||||
"created_at": _now_iso(),
|
||||
"condition": condition,
|
||||
"check_interval_sec": int(check_interval_sec),
|
||||
"throttle_sec": int(throttle_sec),
|
||||
"message": message,
|
||||
"fire_count": 0,
|
||||
"last_fired_at": None,
|
||||
"last_checked_at": None,
|
||||
}
|
||||
write(name, data)
|
||||
logger.info("Trigger angelegt: %s (watcher, cond='%s')", name, condition)
|
||||
return data
|
||||
|
||||
|
||||
# ─── Feuern + Log ───────────────────────────────────────────────────
|
||||
|
||||
def mark_fired(name: str) -> dict:
|
||||
data = read(name)
|
||||
if data is None:
|
||||
raise ValueError(f"Trigger '{name}' nicht gefunden")
|
||||
data["fire_count"] = int(data.get("fire_count", 0)) + 1
|
||||
data["last_fired_at"] = _now_iso()
|
||||
# Timer: nach Feuern auto-deaktivieren (one-shot)
|
||||
if data.get("type") == "timer":
|
||||
data["active"] = False
|
||||
write(name, data)
|
||||
return data
|
||||
|
||||
|
||||
def append_log(name: str, entry: dict) -> None:
|
||||
_ensure_dirs()
|
||||
log_file = LOGS_DIR / f"{_safe_name(name)}.jsonl"
|
||||
record = {"ts": _now_iso()}
|
||||
record.update(entry)
|
||||
try:
|
||||
with log_file.open("a", encoding="utf-8") as f:
|
||||
f.write(json.dumps(record, ensure_ascii=False) + "\n")
|
||||
except Exception as e:
|
||||
logger.warning("Trigger-Log append %s: %s", name, e)
|
||||
|
||||
|
||||
def list_logs(name: str, limit: int = 50) -> list[dict]:
|
||||
log_file = LOGS_DIR / f"{_safe_name(name)}.jsonl"
|
||||
if not log_file.exists():
|
||||
return []
|
||||
try:
|
||||
lines = log_file.read_text(encoding="utf-8").splitlines()
|
||||
out: list[dict] = []
|
||||
for line in lines[-limit:]:
|
||||
try:
|
||||
out.append(json.loads(line))
|
||||
except Exception:
|
||||
pass
|
||||
return out
|
||||
except Exception:
|
||||
return []
|
||||
@@ -0,0 +1,310 @@
|
||||
"""
|
||||
Built-in Condition-Variablen + sicherer Mini-Parser fuer Watcher-Triggers.
|
||||
|
||||
Erlaubte Variablen + die EINE Funktion `near(lat, lon, radius_m)` kommen
|
||||
aus diesem Modul. Condition-Ausdruck ist ein sicheres Subset von Python
|
||||
(kein eval, kein exec): nur Vergleiche, Boolean-Operatoren, Whitelisted
|
||||
Funktionen, Variablen aus describe_variables(), Konstanten (Zahl/Bool/Str).
|
||||
|
||||
Beispiele:
|
||||
disk_free_gb < 5
|
||||
hour_of_day == 8 and day_of_week == "mon"
|
||||
is_weekend and minute_of_hour == 0
|
||||
near(53.123, 7.456, 500)
|
||||
current_lat and location_age_sec < 120
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import ast
|
||||
import json
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import shutil
|
||||
import time
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
STATE_DIR = Path("/shared/state")
|
||||
|
||||
|
||||
# ─── State-Helfer (gemeinsam mit Bridge: /shared/state/*.json) ──────
|
||||
|
||||
def _read_state(name: str) -> dict | None:
|
||||
f = STATE_DIR / f"{name}.json"
|
||||
if not f.exists():
|
||||
return None
|
||||
try:
|
||||
return json.loads(f.read_text(encoding="utf-8"))
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
# ─── Variablen-Quellen ──────────────────────────────────────────────
|
||||
|
||||
def _disk_stats() -> tuple[float, float]:
|
||||
"""Returns (free_gb, free_pct). Schaut /shared (geteiltes Volume) — sonst /."""
|
||||
target = "/shared" if os.path.exists("/shared") else "/"
|
||||
try:
|
||||
st = shutil.disk_usage(target)
|
||||
free_gb = st.free / (1024 ** 3)
|
||||
free_pct = 100.0 * st.free / st.total if st.total else 0.0
|
||||
return free_gb, free_pct
|
||||
except Exception as e:
|
||||
logger.warning("disk_usage: %s", e)
|
||||
return 0.0, 0.0
|
||||
|
||||
|
||||
def _uptime_sec() -> int:
|
||||
try:
|
||||
with open("/proc/uptime", "r") as f:
|
||||
return int(float(f.read().split()[0]))
|
||||
except Exception:
|
||||
return 0
|
||||
|
||||
|
||||
def _ram_free_mb() -> int:
|
||||
"""Container-RAM: MemAvailable aus /proc/meminfo (kB → MB)."""
|
||||
try:
|
||||
with open("/proc/meminfo", "r") as f:
|
||||
for line in f:
|
||||
if line.startswith("MemAvailable:"):
|
||||
return int(line.split()[1]) // 1024
|
||||
except Exception:
|
||||
pass
|
||||
return 0
|
||||
|
||||
|
||||
def _cpu_load_1min() -> float:
|
||||
"""load avg ueber 1 Minute (linux). Vorsicht: das ist die HOST-load,
|
||||
nicht container-spezifisch."""
|
||||
try:
|
||||
with open("/proc/loadavg", "r") as f:
|
||||
return float(f.read().split()[0])
|
||||
except Exception:
|
||||
return 0.0
|
||||
|
||||
|
||||
_DAYS = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"]
|
||||
|
||||
|
||||
def _gps_state() -> dict[str, Any]:
|
||||
"""Letzte bekannte Position aus /shared/state/location.json.
|
||||
Returns dict mit current_lat, current_lon (oder None), location_age_sec."""
|
||||
data = _read_state("location") or {}
|
||||
now = int(time.time())
|
||||
age = -1
|
||||
lat = data.get("lat")
|
||||
lon = data.get("lon")
|
||||
ts = data.get("ts_unix")
|
||||
if isinstance(ts, (int, float)):
|
||||
age = int(now - ts)
|
||||
return {
|
||||
"current_lat": float(lat) if isinstance(lat, (int, float)) else None,
|
||||
"current_lon": float(lon) if isinstance(lon, (int, float)) else None,
|
||||
"location_age_sec": age,
|
||||
}
|
||||
|
||||
|
||||
def _user_activity_age() -> int:
|
||||
"""Sekunden seit letzter User-Aktion (Chat oder Voice). -1 wenn nie."""
|
||||
data = _read_state("activity") or {}
|
||||
ts = data.get("last_user_ts")
|
||||
if not isinstance(ts, (int, float)):
|
||||
return -1
|
||||
return int(time.time() - ts)
|
||||
|
||||
|
||||
def collect_variables() -> dict[str, Any]:
|
||||
"""Liefert aktuellen Snapshot aller Built-in-Variablen + near()-Helper."""
|
||||
free_gb, free_pct = _disk_stats()
|
||||
now = datetime.now()
|
||||
gps = _gps_state()
|
||||
|
||||
# Memory-Counts aus der Vector-DB (lazy import, sonst zirkulaer)
|
||||
memory_count = 0
|
||||
pinned_count = 0
|
||||
try:
|
||||
from main import store # type: ignore
|
||||
s = store()
|
||||
memory_count = s.count()
|
||||
try:
|
||||
pinned_count = len(s.list_pinned())
|
||||
except Exception:
|
||||
pass
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
vars_: dict[str, Any] = {
|
||||
# Disk + System
|
||||
"disk_free_gb": round(free_gb, 2),
|
||||
"disk_free_pct": round(free_pct, 1),
|
||||
"ram_free_mb": _ram_free_mb(),
|
||||
"cpu_load_1min": round(_cpu_load_1min(), 2),
|
||||
"uptime_sec": _uptime_sec(),
|
||||
|
||||
# Zeit
|
||||
"hour_of_day": now.hour,
|
||||
"minute_of_hour": now.minute,
|
||||
"day_of_month": now.day,
|
||||
"month": now.month,
|
||||
"year": now.year,
|
||||
"day_of_week": _DAYS[now.weekday()],
|
||||
"is_weekend": now.weekday() >= 5,
|
||||
"unix_timestamp": int(time.time()),
|
||||
|
||||
# GPS
|
||||
"current_lat": gps["current_lat"],
|
||||
"current_lon": gps["current_lon"],
|
||||
"location_age_sec": gps["location_age_sec"],
|
||||
|
||||
# Activity
|
||||
"last_user_message_ago_sec": _user_activity_age(),
|
||||
|
||||
# Memory
|
||||
"memory_count": memory_count,
|
||||
"pinned_count": pinned_count,
|
||||
|
||||
# rvs_connected: kann Brain noch nicht zuverlaessig feststellen
|
||||
# (Bridge muesste eigenen Heartbeat-State schreiben — kommt spaeter)
|
||||
"rvs_connected": False,
|
||||
}
|
||||
|
||||
# Funktion-Helper — wird vom Parser als ast.Call mit Name "near" erkannt.
|
||||
# Closure ueber die GPS-Werte, damit eval keine extra Variablen braucht.
|
||||
def _near(lat: float, lon: float, radius_m: float) -> bool:
|
||||
"""Haversine-Distanz: True wenn aktuelle Position < radius_m vom Punkt."""
|
||||
cur_lat = vars_.get("current_lat")
|
||||
cur_lon = vars_.get("current_lon")
|
||||
if cur_lat is None or cur_lon is None:
|
||||
return False
|
||||
try:
|
||||
R = 6371000.0
|
||||
phi1 = math.radians(float(cur_lat))
|
||||
phi2 = math.radians(float(lat))
|
||||
dphi = math.radians(float(lat) - float(cur_lat))
|
||||
dlam = math.radians(float(lon) - float(cur_lon))
|
||||
a = math.sin(dphi / 2) ** 2 + math.cos(phi1) * math.cos(phi2) * math.sin(dlam / 2) ** 2
|
||||
distance = 2 * R * math.asin(math.sqrt(a))
|
||||
return distance < float(radius_m)
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
vars_["near"] = _near
|
||||
return vars_
|
||||
|
||||
|
||||
def describe_variables() -> list[dict]:
|
||||
"""Beschreibung — fuer System-Prompt + UI."""
|
||||
return [
|
||||
# Disk / System
|
||||
{"name": "disk_free_gb", "type": "number", "desc": "freier Plattenplatz in GB (auf /shared)"},
|
||||
{"name": "disk_free_pct", "type": "number", "desc": "freier Plattenplatz in Prozent"},
|
||||
{"name": "ram_free_mb", "type": "number", "desc": "freier RAM im Brain-Container (MB)"},
|
||||
{"name": "cpu_load_1min", "type": "number", "desc": "Load-Avg 1min (Host)"},
|
||||
{"name": "uptime_sec", "type": "number", "desc": "Sekunden seit Brain-Start"},
|
||||
# Zeit
|
||||
{"name": "hour_of_day", "type": "number", "desc": "0..23, lokale Zeit"},
|
||||
{"name": "minute_of_hour", "type": "number", "desc": "0..59"},
|
||||
{"name": "day_of_month", "type": "number", "desc": "1..31"},
|
||||
{"name": "month", "type": "number", "desc": "1..12"},
|
||||
{"name": "year", "type": "number", "desc": "z.B. 2026"},
|
||||
{"name": "day_of_week", "type": "string", "desc": "mon|tue|wed|thu|fri|sat|sun"},
|
||||
{"name": "is_weekend", "type": "bool", "desc": "True wenn Samstag oder Sonntag"},
|
||||
{"name": "unix_timestamp", "type": "number", "desc": "Sekunden seit Epoche (UTC)"},
|
||||
# GPS
|
||||
{"name": "current_lat", "type": "number", "desc": "letzte bekannte Breitengrad (oder None)"},
|
||||
{"name": "current_lon", "type": "number", "desc": "letzte bekannte Laengengrad (oder None)"},
|
||||
{"name": "location_age_sec", "type": "number", "desc": "Sekunden seit letzter Position (-1 = nie)"},
|
||||
# Activity
|
||||
{"name": "last_user_message_ago_sec", "type": "number",
|
||||
"desc": "Sekunden seit letztem User-Input (-1 = nie)"},
|
||||
# Memory
|
||||
{"name": "memory_count", "type": "number", "desc": "Anzahl Memories total"},
|
||||
{"name": "pinned_count", "type": "number", "desc": "Anzahl pinned (Hot Memory)"},
|
||||
{"name": "rvs_connected", "type": "bool", "desc": "RVS-Verbindung (z.Zt. immer False)"},
|
||||
]
|
||||
|
||||
|
||||
def describe_functions() -> list[dict]:
|
||||
"""Whitelisted Funktionen fuer Conditions."""
|
||||
return [
|
||||
{
|
||||
"name": "near",
|
||||
"signature": "near(lat, lon, radius_m)",
|
||||
"desc": "True wenn die aktuelle GPS-Position innerhalb von radius_m Metern "
|
||||
"vom Punkt (lat, lon) liegt. Haversine. Bei unbekannter Position: False.",
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
_ALLOWED_FUNCTIONS = {f["name"] for f in describe_functions()}
|
||||
|
||||
|
||||
# ─── Sicherer Condition-Parser ──────────────────────────────────────
|
||||
|
||||
_ALLOWED_NODES = (
|
||||
ast.Expression, ast.BoolOp, ast.UnaryOp, ast.Compare,
|
||||
ast.Name, ast.Constant, ast.Load,
|
||||
ast.And, ast.Or, ast.Not,
|
||||
ast.Eq, ast.NotEq, ast.Lt, ast.LtE, ast.Gt, ast.GtE,
|
||||
ast.Call,
|
||||
)
|
||||
|
||||
|
||||
def parse_condition(expr: str) -> ast.Expression:
|
||||
"""Parst einen Condition-Ausdruck und validiert ihn gegen das Safe-Subset.
|
||||
Wirft ValueError bei verbotenen Konstrukten."""
|
||||
expr = (expr or "").strip()
|
||||
if not expr:
|
||||
raise ValueError("Leere Condition")
|
||||
if len(expr) > 500:
|
||||
raise ValueError("Condition zu lang (>500 Zeichen)")
|
||||
try:
|
||||
tree = ast.parse(expr, mode="eval")
|
||||
except SyntaxError as e:
|
||||
raise ValueError(f"Condition Syntax-Fehler: {e}")
|
||||
allowed_names = {v["name"] for v in describe_variables()}
|
||||
for node in ast.walk(tree):
|
||||
if not isinstance(node, _ALLOWED_NODES):
|
||||
raise ValueError(f"Verbotener Ausdruck: {type(node).__name__}")
|
||||
if isinstance(node, ast.Call):
|
||||
# Nur direkter Funktionsname, kein attribute-access (foo.bar())
|
||||
if not isinstance(node.func, ast.Name):
|
||||
raise ValueError("Funktionsaufruf nur ueber direkten Namen erlaubt")
|
||||
if node.func.id not in _ALLOWED_FUNCTIONS:
|
||||
raise ValueError(f"Verbotene Funktion: {node.func.id}")
|
||||
# Args muessen Constants oder einzelne Names sein
|
||||
for a in node.args:
|
||||
if not isinstance(a, (ast.Constant, ast.Name, ast.UnaryOp)):
|
||||
raise ValueError(f"Argument-Typ in {node.func.id}() nicht erlaubt")
|
||||
if node.keywords:
|
||||
raise ValueError("Keyword-Argumente in Funktionen nicht erlaubt")
|
||||
if isinstance(node, ast.Name):
|
||||
if (node.id not in allowed_names
|
||||
and node.id not in _ALLOWED_FUNCTIONS
|
||||
and node.id not in ("True", "False")):
|
||||
raise ValueError(f"Unbekannte Variable: {node.id}")
|
||||
if isinstance(node, ast.Constant):
|
||||
if not isinstance(node.value, (int, float, str, bool)) and node.value is not None:
|
||||
raise ValueError(f"Verbotener Konstant-Typ: {type(node.value).__name__}")
|
||||
return tree
|
||||
|
||||
|
||||
def evaluate(expr: str, variables: dict[str, Any] | None = None) -> bool:
|
||||
"""Evaluiert die Condition gegen die aktuellen Variablen.
|
||||
Returns bool. Bei Fehler in Variablen → False (defensiv)."""
|
||||
tree = parse_condition(expr)
|
||||
vars_ = variables if variables is not None else collect_variables()
|
||||
code = compile(tree, "<condition>", "eval")
|
||||
# Globals leer, locals enthalten Variablen + near()-Funktion → kein Builtin-Zugriff
|
||||
try:
|
||||
result = eval(code, {"__builtins__": {}}, vars_)
|
||||
except Exception as e:
|
||||
logger.warning("Condition '%s' eval-Fehler: %s", expr, e)
|
||||
return False
|
||||
return bool(result)
|
||||
@@ -52,6 +52,59 @@ Fuer Web-Anfragen: **WebFetch** oder **Bash mit curl**. Niemals sagen "ich habe
|
||||
4. **Regelmaessig committen** — mit sinnvollen Commit-Messages.
|
||||
5. **Tageslog fuehren** — was wurde getan, was ist offen.
|
||||
|
||||
## Dateien an Stefan zurueckgeben — KRITISCH
|
||||
|
||||
**Das ist die EINZIGE Methode wie Stefan an Dateien rankommt. Ohne
|
||||
diese Schritte sieht und bekommt er die Datei NICHT.**
|
||||
|
||||
### Regel 1 — Speicher-Ort
|
||||
|
||||
Dateien fuer Stefan AUSSCHLIESSLICH unter `/shared/uploads/` speichern.
|
||||
|
||||
NIEMALS in:
|
||||
- `/home/node/.openclaw/workspace/...` (das ist NUR dein Arbeitsverzeichnis,
|
||||
Stefan hat keinen Zugriff darauf)
|
||||
- `/tmp/...`, `/root/...`, oder sonst irgendwo
|
||||
|
||||
Dateinamen mit `aria_`-Prefix damit Cleanup-Scripts sie zuordnen koennen:
|
||||
|
||||
```
|
||||
/shared/uploads/aria_<beschreibender_name>.<ext>
|
||||
```
|
||||
|
||||
Beispiele: `aria_termin_zusage.pdf`, `aria_einkaufsliste.md`,
|
||||
`aria_logs_2026-05-10.zip`.
|
||||
|
||||
### Regel 2 — Marker im Antworttext
|
||||
|
||||
Am Ende deiner Antwort EINMALIG den Marker setzen:
|
||||
|
||||
```
|
||||
[FILE: /shared/uploads/aria_<name>.<ext>]
|
||||
```
|
||||
|
||||
OHNE diesen Marker erscheint die Datei NICHT in der App / Diagnostic.
|
||||
|
||||
Mehrere Dateien: mehrere `[FILE: ...]`-Marker am Ende, jeder in
|
||||
eigener Zeile.
|
||||
|
||||
### Beispiel — kompletter Workflow
|
||||
|
||||
User: "Schreib mir ein Lasagne-Rezept als md-Datei"
|
||||
|
||||
1. Du schreibst die Datei: `Write` Tool mit Pfad `/shared/uploads/aria_lasagne.md`
|
||||
2. Antwort an Stefan:
|
||||
|
||||
```
|
||||
Hier dein Lasagne-Rezept — Ragu am Vortag, echter Parmesan,
|
||||
Ruhezeit nicht skippen. Beim Schichten Bechamel auf jede Lage.
|
||||
|
||||
[FILE: /shared/uploads/aria_lasagne.md]
|
||||
```
|
||||
|
||||
Der Marker wird automatisch aus dem sichtbaren Text entfernt und
|
||||
als Anhang-Bubble angezeigt. Stefan tippt drauf → oeffnet die Datei.
|
||||
|
||||
## Stimme
|
||||
|
||||
TTS laeuft ueber F5-TTS (Voice Cloning, Gaming-PC). Stefan kann eigene
|
||||
@@ -78,6 +78,97 @@ Wenn ein Tool nicht klappt, probiere die Alternative. Niemals sagen "ich habe ke
|
||||
- Destruktive Operationen (Dateien loeschen, Datenbanken droppen)
|
||||
- Push auf main
|
||||
|
||||
## Dateien an Stefan zurueckgeben — KRITISCH
|
||||
|
||||
**Das ist die EINZIGE Methode wie Stefan an Dateien rankommt. Ohne diese
|
||||
Schritte sieht und bekommt er die Datei NICHT.**
|
||||
|
||||
### Regel 1 — Speicher-Ort
|
||||
|
||||
Dateien fuer Stefan AUSSCHLIESSLICH unter `/shared/uploads/` speichern.
|
||||
|
||||
NIEMALS in:
|
||||
- `/home/node/.openclaw/workspace/...` (NUR dein Arbeitsverzeichnis,
|
||||
Stefan hat keinen Zugriff)
|
||||
- `/tmp/...`, `/root/...`, oder sonst irgendwo
|
||||
|
||||
Dateinamen mit `aria_`-Prefix:
|
||||
|
||||
```
|
||||
/shared/uploads/aria_<beschreibender_name>.<ext>
|
||||
```
|
||||
|
||||
Beispiele: `aria_termin_zusage.pdf`, `aria_einkaufsliste.md`,
|
||||
`aria_logs_2026-05-10.zip`.
|
||||
|
||||
### Regel 2 — Marker im Antworttext
|
||||
|
||||
Am Ende deiner Antwort EINMALIG den Marker setzen:
|
||||
|
||||
```
|
||||
[FILE: /shared/uploads/aria_<name>.<ext>]
|
||||
```
|
||||
|
||||
OHNE diesen Marker erscheint die Datei NICHT in der App / Diagnostic.
|
||||
|
||||
Mehrere Dateien: mehrere `[FILE: ...]`-Marker am Ende, jeder in
|
||||
eigener Zeile.
|
||||
|
||||
**WICHTIG — Datei MUSS existieren bevor du den Marker setzt.**
|
||||
Marker fuer nicht-existente Pfade werden silent gefiltert + Stefan
|
||||
bekommt einen Hinweis dass du eine Datei versprochen aber nicht
|
||||
erstellt hast. Wenn du z.B. eine MIDI-Datei nicht generieren kannst,
|
||||
sag das offen statt nur den Marker zu setzen. Verifiziere zur Not
|
||||
mit `Bash` + `ls -la /shared/uploads/aria_<name>.<ext>` dass die
|
||||
Datei wirklich da ist.
|
||||
|
||||
### Beispiel — kompletter Workflow
|
||||
|
||||
User: "Schreib mir ein Lasagne-Rezept als md-Datei"
|
||||
|
||||
1. Du schreibst: `Write` Tool mit Pfad `/shared/uploads/aria_lasagne.md`
|
||||
2. Antwort an Stefan:
|
||||
|
||||
```
|
||||
Hier dein Lasagne-Rezept — Ragu am Vortag, echter Parmesan,
|
||||
Ruhezeit nicht skippen. Beim Schichten Bechamel auf jede Lage.
|
||||
|
||||
[FILE: /shared/uploads/aria_lasagne.md]
|
||||
```
|
||||
|
||||
Der Marker wird automatisch aus dem sichtbaren Text entfernt und
|
||||
als Anhang-Bubble angezeigt. Stefan tippt drauf → oeffnet die Datei
|
||||
im jeweiligen Standard-Programm.
|
||||
|
||||
### Externe Bilder/Dateien — IMMER runterladen, nicht nur verlinken
|
||||
|
||||
Wenn Stefan ein Bild oder eine Datei aus dem Netz haben will (Wikipedia,
|
||||
Wiki Commons, ein Beispiel-PDF, etc.):
|
||||
|
||||
NICHT NUR die URL in die Antwort schreiben — das Bild ist dann nur
|
||||
solange sichtbar wie der externe Server lebt.
|
||||
|
||||
STATTDESSEN:
|
||||
1. Mit `Bash` + curl/wget herunterladen nach `/shared/uploads/aria_<name>.<ext>`
|
||||
2. Mit `[FILE: ...]`-Marker als Anhang ausspielen
|
||||
|
||||
Beispiel — User: "Zeig mir ein Bild von Micky Maus"
|
||||
|
||||
```bash
|
||||
curl -sL "https://upload.wikimedia.org/wikipedia/commons/7/7f/Mickey_Mouse.svg" \
|
||||
-o /shared/uploads/aria_mickey_mouse.svg
|
||||
```
|
||||
|
||||
Antwort:
|
||||
```
|
||||
Hier Micky Maus — offizielles SVG von Wikimedia Commons (Public Domain).
|
||||
|
||||
[FILE: /shared/uploads/aria_mickey_mouse.svg]
|
||||
```
|
||||
|
||||
So bleibt das Bild permanent im Chat-Verlauf, auch wenn die Wiki-URL
|
||||
spaeter offline geht oder umgezogen wird.
|
||||
|
||||
## Stimme
|
||||
|
||||
TTS laeuft ueber F5-TTS auf der Gamebox (Voice Cloning). Stefan kann
|
||||
@@ -1,10 +1,10 @@
|
||||
# Stefan — Benutzer-Praeferenzen
|
||||
# <Username> — Benutzer-Praeferenzen
|
||||
|
||||
## Allgemein
|
||||
|
||||
- **Sprache:** Deutsch
|
||||
- **Kommunikation:** Direkt, kein Bullshit, Humor willkommen
|
||||
- **Rolle:** Chef, Auftraggeber, Entwickler bei HackerSoft Oldenburg
|
||||
- **Sprache:** <z.B. Deutsch>
|
||||
- **Kommunikation:** <z.B. Direkt, kein Bullshit, Humor willkommen>
|
||||
- **Rolle:** <z.B. Chef, Auftraggeber, Entwickler bei XYZ>
|
||||
|
||||
## Bestaetigung erforderlich fuer
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
- Push auf main
|
||||
- Aenderungen an Kundensystemen
|
||||
- Server-Befehle die nicht rueckgaengig gemacht werden koennen
|
||||
- Windows neu installieren (erst Daten sichern!)
|
||||
|
||||
## Autonomes Arbeiten OK fuer
|
||||
|
||||
@@ -28,8 +27,10 @@
|
||||
|
||||
| Tool | Zweck |
|
||||
|------|-------|
|
||||
| **Proxmox** | VM-Infrastruktur (ARIAs Zuhause) |
|
||||
| **Gitea** | Code-Hosting (gitea.hackersoft.de) |
|
||||
| **OpenCRM** | Kundenverwaltung |
|
||||
| **STARFACE** | Telefonie |
|
||||
| **RustDesk** | Remote IT-Support bei Kunden |
|
||||
| **<Beispiel-Tool>** | <Zweck> |
|
||||
|
||||
<!--
|
||||
Diese Datei ist eine Vorlage. Lokal als USER.md kopieren und mit
|
||||
eigenen Praeferenzen + Tool-Stack fuellen. USER.md selbst ist via
|
||||
.gitignore vom Repo ausgeschlossen.
|
||||
-->
|
||||
@@ -1,14 +0,0 @@
|
||||
# Bridge → aria-core (OpenClaw Gateway)
|
||||
# Bridge teilt Netzwerk mit aria-core (network_mode: service:aria)
|
||||
# → localhost ist aria-core
|
||||
ARIA_CORE_WS=ws://127.0.0.1:18789
|
||||
|
||||
# Wake-Word
|
||||
WAKE_WORD=aria
|
||||
|
||||
# Whisper STT — wird zur Laufzeit in der Diagnostic (Sektion "Whisper") umgeschaltet
|
||||
# und in /shared/config/voice_config.json gespeichert. Der Wert hier ist nur der
|
||||
# Initial-Default beim ersten Start.
|
||||
# Optionen: tiny | base | small | medium | large-v3
|
||||
WHISPER_MODEL=medium
|
||||
WHISPER_LANGUAGE=de
|
||||
@@ -1,11 +0,0 @@
|
||||
{
|
||||
"version": 1,
|
||||
"profiles": {
|
||||
"openai-proxy": {
|
||||
"provider": "openai",
|
||||
"default": true,
|
||||
"apiKey": "not-needed",
|
||||
"baseUrl": "http://proxy:3456/v1"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
# OpenClaw (aria-core) Konfiguration
|
||||
# Diese Datei wird als /workspace/.env in den Container gemountet
|
||||
#
|
||||
# WICHTIG: ANTHROPIC_API_KEY und ANTHROPIC_BASE_URL absichtlich NICHT gesetzt!
|
||||
# OpenClaw wuerde sonst die echte Anthropic API direkt anrufen (401 weil kein API Key).
|
||||
# Stattdessen nur den OpenAI-kompatiblen Proxy nutzen.
|
||||
@@ -1,137 +0,0 @@
|
||||
# OpenClaw Tool-Permissions — Stand 2026-03-15
|
||||
|
||||
## Das Problem (GELÖST)
|
||||
|
||||
ARIA hat ZWEI Tool-Systeme gleichzeitig: Claude Code Tools UND OpenClaw-native Tools.
|
||||
Das Model hat aber nur Zugriff auf **Claude Code Tools** (über den Proxy), nicht auf OpenClaw-native Tools.
|
||||
|
||||
### Root Cause: DREI Probleme gleichzeitig
|
||||
|
||||
```
|
||||
OpenClaw (aria-core) → API Request → claude-max-api-proxy (aria-proxy) → Claude Code CLI (--print Mode)
|
||||
↓
|
||||
Tools: WebFetch, Bash, etc. (Claude Code)
|
||||
NICHT: web_fetch, exec (OpenClaw-nativ)
|
||||
```
|
||||
|
||||
**Problem 1: Proxy benutzt `--print` Modus**
|
||||
- `claude-max-api-proxy` ruft Claude Code CLI mit `--print --output-format stream-json` auf
|
||||
- Der Prompt wird als einziger String übergeben, keine Tool-Definitionen von OpenClaw
|
||||
- Das Model sieht NUR Claude Code's eingebaute Tools (WebFetch, Bash, etc.)
|
||||
- OpenClaw-native Tools (web_fetch, exec) existieren NUR auf Gateway-Ebene, kommen nie beim Model an
|
||||
|
||||
**Problem 2: BOOTSTRAP.md hat die falschen Tools angewiesen**
|
||||
- BOOTSTRAP.md sagte: "NIEMALS WebFetch benutzen, stattdessen web_fetch"
|
||||
- Aber web_fetch existiert nicht im Claude Code CLI Kontext
|
||||
- Und WebFetch war das einzige Tool das funktioniert hätte
|
||||
- → Model hatte keine Tools die es benutzen "durfte"
|
||||
|
||||
**Problem 3: settings.json im Proxy war leer**
|
||||
- `/root/.claude/settings.json` enthielt nur `{}` (keine Permissions)
|
||||
- Claude Code CLI im headless-Modus kann keine Tool-Genehmigungen erteilen
|
||||
- → Selbst wenn das Model WebFetch benutzen wollte, war es nicht vorab genehmigt
|
||||
|
||||
## Die Lösung
|
||||
|
||||
### Fix 1: BOOTSTRAP.md + AGENT.md umgeschrieben
|
||||
|
||||
**Vorher (FALSCH):**
|
||||
- "NIEMALS WebFetch benutzen — hat Permission-Probleme"
|
||||
- "Benutze web_fetch (OpenClaw-nativ)"
|
||||
|
||||
**Nachher (KORREKT):**
|
||||
- "WebFetch — URLs abrufen, Webseiten lesen, APIs aufrufen, Wetter abfragen"
|
||||
- "Bash — Shell-Befehle ausfuehren (curl, ssh, docker, etc.)"
|
||||
- "Niemals sagen 'ich habe keinen Zugriff' — du hast Zugriff auf alles"
|
||||
|
||||
### Fix 2: `CLAUDE_CODE_BUBBLEWRAP=1` + `--dangerously-skip-permissions`
|
||||
|
||||
**Der Schlüssel-Fix.** Zwei Zeilen in `docker-compose.yml`:
|
||||
|
||||
```yaml
|
||||
# 1. sed-Patch: --dangerously-skip-permissions in manager.js einfügen
|
||||
sed -i 's/"--no-session-persistence",/"--no-session-persistence","--dangerously-skip-permissions",/' $$DIST/subprocess/manager.js &&
|
||||
|
||||
# 2. Environment-Variable: Root-Check umgehen
|
||||
environment:
|
||||
- CLAUDE_CODE_BUBBLEWRAP=1
|
||||
```
|
||||
|
||||
**Warum beides nötig:**
|
||||
- `--dangerously-skip-permissions` umgeht alle Tool-Permission-Checks in Claude Code CLI
|
||||
- Aber: Claude Code CLI blockiert dieses Flag wenn es als root läuft
|
||||
- `CLAUDE_CODE_BUBBLEWRAP=1` überspringt den Root-Check (gefunden im minifizierten `cli.js`)
|
||||
- Proxy-Container (`node:22-alpine`) läuft als root → ohne BUBBLEWRAP geht's nicht
|
||||
|
||||
**Resultierende CLI-Argumente:**
|
||||
```
|
||||
claude --print --output-format stream-json --verbose --include-partial-messages \
|
||||
--model opus --no-session-persistence --dangerously-skip-permissions "prompt"
|
||||
```
|
||||
|
||||
## Wie der Proxy intern funktioniert
|
||||
|
||||
```
|
||||
openai-to-cli.js: OpenAI Messages → einzelner Prompt-String
|
||||
system → <system>...</system>
|
||||
user → direkt
|
||||
assistant → <previous_response>...</previous_response>
|
||||
|
||||
subprocess/manager.js: Spawnt `claude --print ... --dangerously-skip-permissions "{prompt}"`
|
||||
|
||||
cli-to-openai.js: Claude CLI JSON-Stream → OpenAI Chat Completion Chunks
|
||||
```
|
||||
|
||||
Der Proxy leitet KEINE Tool-Definitionen von OpenClaw weiter.
|
||||
Tool-Calls passieren INTERN in der Claude Code CLI und sind für OpenClaw transparent.
|
||||
|
||||
## Permission-Architektur
|
||||
|
||||
**Granulare Tool-Kontrolle ist NICHT möglich.** Es ist Alles-oder-Nichts:
|
||||
- `--dangerously-skip-permissions` AN → ARIA kann alle Claude Code Tools benutzen
|
||||
- `--dangerously-skip-permissions` AUS → ARIA kann keine Tools benutzen
|
||||
|
||||
OpenClaw's eigene Permissions (`tools.allow/deny` in `openclaw.json`) haben **keinen Effekt** auf die
|
||||
Claude Code Tools — die laufen komplett auf Proxy-Seite.
|
||||
|
||||
## Was NICHT funktioniert hat (17 Versuche)
|
||||
|
||||
1. **settings.json in aria-core** — OpenClaw benutzt NICHT Claude Code's settings.json
|
||||
2. **tools.allow mit PascalCase** (WebFetch, Grep) — OpenClaw kennt diese Namen nicht
|
||||
3. **tools.allow mit snake_case** (web_fetch) — Nur exec, read, write, edit erkannt
|
||||
4. **tools.allow mit Wildcard** `["*"]` — Hat nicht geholfen
|
||||
5. **tools.allow leer + tools.profile: "full"** — Nur ohne andere Fehler
|
||||
6. **System-Prompt Anweisung allein** — Reicht nicht wenn Tools blockiert sind
|
||||
7. **exec-approvals Wildcard allein** — Reicht nicht bei Config-Validation-Error
|
||||
8. **`openclaw config unset tools.exec.ask`** — CLI kennt den Pfad nicht
|
||||
9. **BOOTSTRAP.md mit OpenClaw-Tool-Namen** — Tools existieren nur auf Gateway-Ebene
|
||||
10. **settings.json im Proxy ohne BOOTSTRAP.md Fix** — BOOTSTRAP.md verbot die Tools
|
||||
11. **tools.byProvider.proxy.profile full** — Kein Effekt
|
||||
12. **settings.json + BOOTSTRAP.md ohne --dangerously-skip-permissions** — `--print` ignoriert settings.json
|
||||
13. **Manuelles `docker exec sed`** — Wird bei jedem Restart überschrieben
|
||||
14. **`--dangerously-skip-permissions` ohne BUBBLEWRAP** — Root-Check blockiert
|
||||
15. **`--allowedTools`** — Variadisches Argument frisst den Prompt
|
||||
16. **`--permission-mode bypassPermissions`** — Gleicher Root-Check
|
||||
17. **Non-Root User (`su node`)** — Auth-Pfad-Probleme, Credentials unerreichbar
|
||||
|
||||
## Wichtige Pfade
|
||||
|
||||
### aria-core (OpenClaw)
|
||||
- `/home/node/.openclaw/openclaw.json` — OpenClaw Haupt-Config
|
||||
- `/home/node/.openclaw/exec-approvals.json` — Exec Approvals
|
||||
- `/tmp/openclaw/openclaw-YYYY-MM-DD.log` — Tages-Log
|
||||
|
||||
### aria-proxy (Claude Code CLI)
|
||||
- `/root/.claude/.credentials.json` — Auth Credentials (NICHT in /root/.config/claude/)
|
||||
- `/usr/local/lib/node_modules/claude-max-api-proxy/dist/` — Proxy Source
|
||||
- `/usr/local/lib/node_modules/@anthropic-ai/claude-code/cli.js` — Claude Code CLI (enthält Root-Check)
|
||||
|
||||
## OpenClaw CLI Referenz
|
||||
|
||||
```bash
|
||||
openclaw config get/set/unset <path> # Config verwalten
|
||||
openclaw approvals get # Exec-Approvals anzeigen
|
||||
openclaw approvals allowlist add # Exec-Pattern freigeben
|
||||
openclaw doctor [--fix] # Health Check
|
||||
openclaw gateway status # Gateway-Status
|
||||
```
|
||||
+24
-113
@@ -1,104 +1,23 @@
|
||||
#!/bin/bash
|
||||
# ════════════════════════════════════════════════
|
||||
# ARIA — Ersteinrichtung nach docker compose up
|
||||
# Einmalig ausfuehren, danach persistiert alles.
|
||||
#
|
||||
# OpenClaw (aria-core) ist abgerissen — das Setup macht jetzt
|
||||
# nur noch den SSH-Key fuer den Zugriff auf die VM (aria-wohnung).
|
||||
# Brain + Proxy teilen sich denselben Key, beide haben aria-data/ssh
|
||||
# als Volume gemountet.
|
||||
# ════════════════════════════════════════════════
|
||||
set -e
|
||||
|
||||
echo "=== ARIA Setup ==="
|
||||
echo ""
|
||||
|
||||
# Warten bis aria-core laeuft
|
||||
echo "[1/7] Warte auf aria-core..."
|
||||
until docker inspect -f '{{.State.Running}}' aria-core 2>/dev/null | grep -q true; do
|
||||
sleep 2
|
||||
echo " ... warte..."
|
||||
done
|
||||
echo " aria-core laeuft."
|
||||
|
||||
# Permissions fixen — Docker-Volumes gehoeren root, OpenClaw laeuft als node
|
||||
echo ""
|
||||
echo "[2/7] Fixe Permissions auf /home/node/.openclaw und /home/node/.claude..."
|
||||
docker exec -u root aria-core chown -R node:node /home/node/.openclaw
|
||||
docker exec -u root aria-core chown -R node:node /home/node/.claude 2>/dev/null || true
|
||||
docker exec -u root aria-core chmod 700 /home/node/.openclaw
|
||||
echo " Permissions OK."
|
||||
|
||||
# OpenClaw Config schreiben — Custom Provider fuer claude-max-api-proxy
|
||||
echo ""
|
||||
echo "[3/7] Schreibe openclaw.json (Proxy-Provider + Model + Tools)..."
|
||||
docker exec aria-core sh -c 'cat > /home/node/.openclaw/openclaw.json << '"'"'INNEREOF'"'"'
|
||||
{
|
||||
"meta": {
|
||||
"lastTouchedVersion": "2026.3.8"
|
||||
},
|
||||
"gateway": {
|
||||
"mode": "local"
|
||||
},
|
||||
"agents": {
|
||||
"defaults": {
|
||||
"model": {
|
||||
"primary": "proxy/claude-sonnet-4"
|
||||
},
|
||||
"compaction": {
|
||||
"mode": "safeguard"
|
||||
},
|
||||
"timeoutSeconds": 900,
|
||||
"maxConcurrent": 4,
|
||||
"subagents": {
|
||||
"maxConcurrent": 8
|
||||
}
|
||||
}
|
||||
},
|
||||
"models": {
|
||||
"providers": {
|
||||
"proxy": {
|
||||
"api": "openai-completions",
|
||||
"baseUrl": "http://proxy:3456/v1",
|
||||
"apiKey": "not-needed",
|
||||
"models": [
|
||||
{ "id": "claude-sonnet-4", "name": "claude-sonnet-4" },
|
||||
{ "id": "claude-opus-4", "name": "claude-opus-4" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"tools": {
|
||||
"profile": "full",
|
||||
"web": {
|
||||
"fetch": {
|
||||
"enabled": true
|
||||
}
|
||||
},
|
||||
"exec": {
|
||||
"host": "gateway"
|
||||
}
|
||||
},
|
||||
"messages": {
|
||||
"ackReactionScope": "all"
|
||||
},
|
||||
"commands": {
|
||||
"native": "auto",
|
||||
"nativeSkills": "auto",
|
||||
"restart": true,
|
||||
"ownerDisplay": "raw"
|
||||
}
|
||||
}
|
||||
INNEREOF'
|
||||
echo " Config geschrieben."
|
||||
|
||||
# Exec-Approvals Wildcard — erlaubt Tool-Ausfuehrung im headless-Modus
|
||||
echo ""
|
||||
echo "[4/7] Setze exec-approvals Wildcard..."
|
||||
docker exec aria-core openclaw approvals allowlist add --agent "*" "*" 2>/dev/null || true
|
||||
echo " Approvals gesetzt."
|
||||
|
||||
# SSH-Key generieren fuer VM-Zugriff
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
SSH_DIR="$SCRIPT_DIR/aria-data/ssh"
|
||||
echo ""
|
||||
echo "[5/7] SSH-Key fuer VM-Zugriff..."
|
||||
|
||||
echo "=== ARIA Setup ==="
|
||||
|
||||
mkdir -p "$SSH_DIR"
|
||||
|
||||
if [ ! -f "$SSH_DIR/id_ed25519" ]; then
|
||||
echo "Generiere SSH-Key fuer aria-wohnung..."
|
||||
ssh-keygen -t ed25519 -f "$SSH_DIR/id_ed25519" -N "" -C "aria@aria-wohnung"
|
||||
cat > "$SSH_DIR/config" << 'SSHEOF'
|
||||
Host aria-wohnung
|
||||
@@ -108,34 +27,26 @@ Host aria-wohnung
|
||||
StrictHostKeyChecking accept-new
|
||||
SSHEOF
|
||||
chmod 600 "$SSH_DIR/id_ed25519"
|
||||
chmod 644 "$SSH_DIR/id_ed25519.pub"
|
||||
chmod 644 "$SSH_DIR/config"
|
||||
echo " Key generiert."
|
||||
# Public Key direkt in root's authorized_keys eintragen (Script laeuft als root auf der VM)
|
||||
chmod 644 "$SSH_DIR/id_ed25519.pub" "$SSH_DIR/config"
|
||||
|
||||
# Public Key direkt in /root/.ssh/authorized_keys eintragen
|
||||
# (Script laeuft als root auf der VM aria-wohnung)
|
||||
if [ -w /root/.ssh ] || [ -w /root ]; then
|
||||
mkdir -p /root/.ssh
|
||||
chmod 700 /root/.ssh
|
||||
cat "$SSH_DIR/id_ed25519.pub" >> /root/.ssh/authorized_keys
|
||||
chmod 600 /root/.ssh/authorized_keys
|
||||
echo " Public Key in /root/.ssh/authorized_keys eingetragen."
|
||||
else
|
||||
echo " Hinweis: konnte /root/.ssh/authorized_keys nicht schreiben."
|
||||
echo " Pubkey manuell eintragen:"
|
||||
cat "$SSH_DIR/id_ed25519.pub"
|
||||
fi
|
||||
else
|
||||
echo " Key existiert bereits."
|
||||
echo "SSH-Key existiert bereits — uebersprungen."
|
||||
fi
|
||||
|
||||
# Permissions im Container fixen
|
||||
echo ""
|
||||
echo "[6/7] Fixe SSH-Permissions..."
|
||||
docker exec -u root aria-core chown -R node:node /home/node/.ssh 2>/dev/null || true
|
||||
|
||||
# Neustart damit Gateway die Config laedt
|
||||
echo ""
|
||||
echo "[7/7] Starte aria-core neu..."
|
||||
docker restart aria-core
|
||||
|
||||
echo ""
|
||||
echo "=== Setup fertig ==="
|
||||
echo ""
|
||||
echo "Teste mit: docker logs aria-core --tail 20"
|
||||
echo "Erwartete Zeile: 'agent model: proxy/claude-sonnet-4'"
|
||||
echo ""
|
||||
echo "SSH-Test: docker exec aria-core ssh aria-wohnung hostname"
|
||||
echo "Tool-Test: Neue Session anlegen, dann 'Wie wird das Wetter in Bremen?' fragen"
|
||||
echo "Naechster Schritt: docker compose up -d"
|
||||
echo "Test: docker exec aria-brain ssh aria-wohnung hostname"
|
||||
|
||||
+564
-57
@@ -16,9 +16,12 @@ import asyncio
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
import mimetypes
|
||||
import os
|
||||
import re
|
||||
import signal
|
||||
import ssl
|
||||
import time
|
||||
import sys
|
||||
import tempfile
|
||||
import uuid
|
||||
@@ -46,7 +49,6 @@ logger = logging.getLogger("aria-bridge")
|
||||
|
||||
# ── Konfiguration ───────────────────────────────────────────
|
||||
|
||||
CONFIG_PATH = Path("/config/aria.env")
|
||||
VOICES_DIR = Path("/voices")
|
||||
CORE_WS_URL = os.getenv("ARIA_CORE_WS", "ws://127.0.0.1:18789")
|
||||
CORE_AUTH_TOKEN = os.getenv("ARIA_AUTH_TOKEN", "") # OpenClaw Gateway Token
|
||||
@@ -66,38 +68,22 @@ BLOCK_SIZE = 1280 # 80ms bei 16kHz — gut fuer Wake-Word-Erkennung
|
||||
RECORD_SECONDS = 8 # Max. Aufnahmedauer nach Wake-Word
|
||||
|
||||
def load_config() -> dict[str, str]:
|
||||
"""Laedt Konfiguration.
|
||||
|
||||
Reihenfolge (hoechste Prioritaet zuletzt):
|
||||
1. /config/aria.env (bind-mount)
|
||||
2. /shared/config/runtime.json (zentral gepflegt ueber Diagnostic UI)
|
||||
|
||||
Werte aus runtime.json ueberschreiben die env-Datei.
|
||||
"""Laedt Konfiguration ausschliesslich aus /shared/config/runtime.json
|
||||
(zentral gepflegt ueber Diagnostic UI). Tokens + RVS-Settings kommen
|
||||
via ENV (siehe docker-compose).
|
||||
"""
|
||||
config: dict[str, str] = {}
|
||||
if CONFIG_PATH.exists():
|
||||
for line in CONFIG_PATH.read_text().splitlines():
|
||||
line = line.strip()
|
||||
if not line or line.startswith("#"):
|
||||
continue
|
||||
if "=" in line:
|
||||
key, _, value = line.partition("=")
|
||||
config[key.strip()] = value.strip()
|
||||
logger.info("Konfiguration geladen aus %s", CONFIG_PATH)
|
||||
else:
|
||||
logger.warning("Keine Konfiguration gefunden: %s", CONFIG_PATH)
|
||||
|
||||
# Runtime-Overrides aus zentralem Shared-Volume (Diagnostic UI)
|
||||
runtime_path = Path("/shared/config/runtime.json")
|
||||
if runtime_path.exists():
|
||||
try:
|
||||
runtime = json.loads(runtime_path.read_text())
|
||||
overrides = {k: str(v) for k, v in runtime.items() if v not in (None, "")}
|
||||
if overrides:
|
||||
config.update(overrides)
|
||||
logger.info("Runtime-Overrides geladen: %s", sorted(overrides.keys()))
|
||||
config = {k: str(v) for k, v in runtime.items() if v not in (None, "")}
|
||||
if config:
|
||||
logger.info("Runtime-Config geladen: %s", sorted(config.keys()))
|
||||
except Exception as e:
|
||||
logger.warning("runtime.json konnte nicht gelesen werden: %s", e)
|
||||
else:
|
||||
logger.info("Keine runtime.json — Diagnostic schreibt sie beim ersten Konfigurieren")
|
||||
return config
|
||||
|
||||
|
||||
@@ -547,6 +533,13 @@ class ARIABridge:
|
||||
# Beeinflusst das Timeout fuer stt_request — bei "loading" warten wir laenger,
|
||||
# weil das Modell beim ersten Request noch ~1-2 Min runtergeladen werden kann.
|
||||
self._remote_stt_ready: bool = False
|
||||
# User-Message-Counter fuer Auto-Compact. Bei zu langer Konversation
|
||||
# sprengt die argv-Liste beim Claude-Subprocess-Spawn (E2BIG). Bei
|
||||
# COMPACT_AFTER erreicht → Sessions reset + Container restart.
|
||||
# Counter ueberlebt Bridge-Restart nicht (frischer Zaehler beim Start ok).
|
||||
# _user_message_count + _compact_after entfallen — Auto-Compact war
|
||||
# aria-core-spezifisch (E2BIG-Schutz). Der neue Brain-Loop kennt
|
||||
# diese Begrenzung nicht.
|
||||
# Pending Files: wenn die App ein Bild + Text gleichzeitig schickt, kommen
|
||||
# zwei separate RVS-Events ('file' und 'chat') — wir buffern die Files
|
||||
# kurz und mergen sie mit dem nachfolgenden Chat-Text zu einer einzigen
|
||||
@@ -590,7 +583,7 @@ class ARIABridge:
|
||||
logger.info("RVS: %s (Token: %s...)", self.rvs_url, self.rvs_token[:8])
|
||||
else:
|
||||
logger.warning("RVS nicht konfiguriert — App-Verbindung deaktiviert")
|
||||
logger.warning(" Setze RVS_HOST, RVS_PORT, RVS_TOKEN in /config/aria.env")
|
||||
logger.warning(" Setze RVS_HOST, RVS_PORT, RVS_TOKEN in der .env auf der VM")
|
||||
logger.info("Modus: %s %s", self.current_mode.config.emoji, self.current_mode.config.name)
|
||||
|
||||
# ── aria-core Verbindung (OpenClaw Gateway Protokoll) ───
|
||||
@@ -677,7 +670,10 @@ class ARIABridge:
|
||||
while self.running:
|
||||
try:
|
||||
logger.info("[core] Verbinde: %s", self.ws_url)
|
||||
async with websockets.connect(self.ws_url) as ws:
|
||||
# max_size=50MB damit grosse Bilder/Voice-Uploads durchgehen.
|
||||
# Python-websockets Default ist nur 1 MiB → 5MB JPEG sprengt
|
||||
# das Limit, Connection wird silent gedroppt.
|
||||
async with websockets.connect(self.ws_url, max_size=50 * 1024 * 1024) as ws:
|
||||
# OpenClaw Handshake durchfuehren
|
||||
if not await self._openclaw_handshake(ws):
|
||||
logger.error("[core] Handshake fehlgeschlagen — Reconnect")
|
||||
@@ -783,13 +779,29 @@ class ARIABridge:
|
||||
await self._emit_activity("idle", "")
|
||||
if not text:
|
||||
logger.warning("[core] chat final ohne Text: %s", json.dumps(payload)[:200])
|
||||
# App+Diagnostic informieren statt stumm — sonst wartet die
|
||||
# UI ewig auf eine Antwort die nicht kommt. Passiert z.B.
|
||||
# wenn Claude-Vision das Bild ablehnt (leere Antwort)
|
||||
# oder die Antwort nur aus Tool-Calls ohne Final-Text bestand.
|
||||
await self._send_to_rvs({
|
||||
"type": "chat",
|
||||
"payload": {
|
||||
"text": "[Hinweis] Antwort ohne Text — moeglicherweise Bild zu gross fuer Vision-API oder reine Tool-Ausfuehrung.",
|
||||
"sender": "aria",
|
||||
},
|
||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||
})
|
||||
return
|
||||
logger.info("[core] Antwort: '%s'", text[:80])
|
||||
await self._process_core_response(text, payload)
|
||||
return
|
||||
|
||||
if state == "error":
|
||||
error = payload.get("error", "Unbekannt")
|
||||
# OpenClaw nutzt errorMessage statt error bei state=error.
|
||||
error = (payload.get("error")
|
||||
or payload.get("errorMessage")
|
||||
or payload.get("message")
|
||||
or "Unbekannt")
|
||||
logger.error("[core] Chat-Fehler: %s", error)
|
||||
self._last_chat_final_at = asyncio.get_event_loop().time()
|
||||
await self._emit_activity("idle", "")
|
||||
@@ -825,7 +837,12 @@ class ARIABridge:
|
||||
return
|
||||
|
||||
if event_name == "chat:error":
|
||||
error = payload.get("error", payload.get("message", "Unbekannt"))
|
||||
# OpenClaw legt den echten Text manchmal in errorMessage ab
|
||||
# (state=error). Vorher wurde nur error/message gechecked → "Unbekannt".
|
||||
error = (payload.get("error")
|
||||
or payload.get("errorMessage")
|
||||
or payload.get("message")
|
||||
or "Unbekannt")
|
||||
logger.error("[core] Chat-Fehler (legacy): %s", error)
|
||||
await self._send_to_rvs({
|
||||
"type": "chat",
|
||||
@@ -858,6 +875,139 @@ class ARIABridge:
|
||||
pass
|
||||
return payload.get("text", "")
|
||||
|
||||
# File-Marker-Pattern: `[FILE: /pfad/zur/datei.ext]` (Pfad kann Spaces
|
||||
# enthalten, Endung beliebig). Mehrfach im Text moeglich.
|
||||
_FILE_MARKER_RE = re.compile(r"\[FILE:\s*(/shared/uploads/[^\]]+?)\s*\]", re.IGNORECASE)
|
||||
|
||||
def _extract_file_markers(self, text: str) -> tuple[str, list[dict], list[str]]:
|
||||
"""Sucht [FILE: /shared/uploads/...]-Marker.
|
||||
Returns (cleaned_text, valid_files, missing_paths)."""
|
||||
files: list[dict] = []
|
||||
missing: list[str] = []
|
||||
for m in self._FILE_MARKER_RE.finditer(text):
|
||||
path = m.group(1).strip()
|
||||
if not path.startswith("/shared/uploads/"):
|
||||
logger.warning("[core] FILE-Marker mit unerlaubtem Pfad ignoriert: %s", path)
|
||||
continue
|
||||
if not os.path.isfile(path):
|
||||
logger.warning("[core] FILE-Marker zeigt auf nicht existente Datei: %s", path)
|
||||
missing.append(path)
|
||||
continue
|
||||
name = os.path.basename(path)
|
||||
mime, _ = mimetypes.guess_type(path)
|
||||
size = os.path.getsize(path)
|
||||
files.append({
|
||||
"serverPath": path,
|
||||
"name": name,
|
||||
"mimeType": mime or "application/octet-stream",
|
||||
"size": size,
|
||||
})
|
||||
cleaned = self._FILE_MARKER_RE.sub("", text).strip()
|
||||
# Zwei aufeinanderfolgende Leerzeilen → eine
|
||||
cleaned = re.sub(r"\n{3,}", "\n\n", cleaned)
|
||||
return cleaned, files, missing
|
||||
|
||||
async def _broadcast_aria_file(self, file_info: dict) -> None:
|
||||
"""ARIA hat eine Datei fuer den User erstellt — App+Diagnostic informieren."""
|
||||
logger.info("[rvs] ARIA-Datei rausgeben: %s (%s, %dKB)",
|
||||
file_info["name"], file_info["mimeType"], file_info["size"] // 1024)
|
||||
try:
|
||||
await self._send_to_rvs({
|
||||
"type": "file_from_aria",
|
||||
"payload": file_info,
|
||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||
})
|
||||
except Exception as e:
|
||||
logger.warning("[rvs] file_from_aria broadcast fehlgeschlagen: %s", e)
|
||||
|
||||
def _persist_state(self, key: str, data: dict) -> None:
|
||||
"""Atomic-Write in /shared/state/<key>.json — fuer Brain-Watcher.
|
||||
Wird genutzt fuer location + activity-Tracking."""
|
||||
try:
|
||||
import time as _time
|
||||
data = dict(data)
|
||||
data["ts_unix"] = int(_time.time())
|
||||
Path("/shared/state").mkdir(parents=True, exist_ok=True)
|
||||
target = Path(f"/shared/state/{key}.json")
|
||||
tmp = target.with_suffix(".tmp")
|
||||
tmp.write_text(json.dumps(data), encoding="utf-8")
|
||||
tmp.replace(target)
|
||||
except Exception as e:
|
||||
logger.warning("[state] %s schreiben fehlgeschlagen: %s", key, e)
|
||||
|
||||
def _persist_location(self, location: Optional[dict]) -> None:
|
||||
"""Speichert die letzte bekannte GPS-Position fuer Watcher.
|
||||
Erwartet {lat, lon} oder {lat, lng}. Nicht-Dicts und fehlende
|
||||
Koordinaten werden ignoriert."""
|
||||
if not isinstance(location, dict):
|
||||
return
|
||||
try:
|
||||
lat = location.get("lat")
|
||||
lon = location.get("lon") or location.get("lng")
|
||||
if lat is None or lon is None:
|
||||
return
|
||||
self._persist_state("location", {
|
||||
"lat": float(lat),
|
||||
"lon": float(lon),
|
||||
})
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def _persist_user_activity(self) -> None:
|
||||
"""Markiert dass der User gerade etwas gemacht hat (Chat/Voice).
|
||||
Watcher: last_user_message_ago_sec basiert darauf."""
|
||||
self._persist_state("activity", {"last_user_ts": int(time.time())})
|
||||
|
||||
def _append_chat_backup(self, entry: dict) -> None:
|
||||
"""Schreibt eine Zeile in /shared/config/chat_backup.jsonl.
|
||||
Wird von Diagnostic + App als History-Quelle gelesen.
|
||||
entry braucht mindestens {role, text}; ts wird ergaenzt."""
|
||||
try:
|
||||
line = {"ts": int(asyncio.get_event_loop().time() * 1000)}
|
||||
line.update(entry)
|
||||
Path("/shared/config").mkdir(parents=True, exist_ok=True)
|
||||
with open("/shared/config/chat_backup.jsonl", "a", encoding="utf-8") as f:
|
||||
f.write(json.dumps(line, ensure_ascii=False) + "\n")
|
||||
except Exception as e:
|
||||
logger.warning("[backup] chat_backup-Write fehlgeschlagen: %s", e)
|
||||
|
||||
def _read_chat_backup_since(self, since_ms: int, limit: int = 100) -> list[dict]:
|
||||
"""Liest chat_backup.jsonl, gibt Eintraege > since_ms zurueck, max limit neueste.
|
||||
File-deleted-Marker werden honoriert: vor einem file_deleted-Marker liegende
|
||||
Eintraege mit gleichem Pfad werden als deleted markiert."""
|
||||
path = Path("/shared/config/chat_backup.jsonl")
|
||||
if not path.exists():
|
||||
return []
|
||||
try:
|
||||
lines = path.read_text(encoding="utf-8").splitlines()
|
||||
except Exception as e:
|
||||
logger.warning("[backup] Lesen fehlgeschlagen: %s", e)
|
||||
return []
|
||||
out: list[dict] = []
|
||||
for raw in lines:
|
||||
raw = raw.strip()
|
||||
if not raw:
|
||||
continue
|
||||
try:
|
||||
obj = json.loads(raw)
|
||||
except Exception:
|
||||
continue
|
||||
ts = obj.get("ts") or 0
|
||||
if ts <= since_ms:
|
||||
continue
|
||||
# file_deleted-Marker: nicht als Chat ausliefern, aber an die App schicken
|
||||
# damit sie ihre Bubbles updaten kann (separater Pfad existiert ja schon)
|
||||
if obj.get("type") == "file_deleted":
|
||||
continue
|
||||
role = obj.get("role")
|
||||
if role not in ("user", "assistant"):
|
||||
continue
|
||||
out.append(obj)
|
||||
# Auf "limit" neueste cappen
|
||||
if len(out) > limit:
|
||||
out = out[-limit:]
|
||||
return out
|
||||
|
||||
async def _process_core_response(self, text: str, payload: dict) -> None:
|
||||
"""Verarbeitet eine fertige Antwort von aria-core.
|
||||
|
||||
@@ -872,6 +1022,34 @@ class ARIABridge:
|
||||
logger.info("[core] NO_REPLY empfangen — Antwort still verworfen")
|
||||
return
|
||||
|
||||
# Antwort in chat_backup.jsonl loggen (cleaned text, ohne File-Marker)
|
||||
# — passiert weiter unten nach extract_file_markers
|
||||
|
||||
# File-Marker `[FILE: /shared/uploads/aria_xyz.pdf]` extrahieren —
|
||||
# ARIA legt damit Dateien fuer den User bereit (Bilder, PDFs, etc.).
|
||||
# Der Marker wird aus dem Antworttext entfernt (TTS soll ihn nicht
|
||||
# vorlesen) und parallel als file_from_aria-Event geschickt.
|
||||
text, aria_files, missing_files = self._extract_file_markers(text)
|
||||
for f in aria_files:
|
||||
await self._broadcast_aria_file(f)
|
||||
# Bei fehlenden Files: User informieren (sonst sieht er nur stille
|
||||
# Verluste — ARIA hat den Marker hingeschrieben aber das File nicht
|
||||
# tatsaechlich angelegt).
|
||||
if missing_files:
|
||||
missing_list = "\n".join(f" • {os.path.basename(p)}" for p in missing_files)
|
||||
text = (text + "\n\n[Hinweis] Folgende Dateien hat ARIA zwar erwaehnt "
|
||||
f"aber nicht erstellt:\n{missing_list}\n"
|
||||
"Bitte ARIA bitten, sie wirklich zu schreiben.").strip()
|
||||
|
||||
# Antwort in chat_backup.jsonl loggen (gecleanter Text, ohne File-Marker)
|
||||
# File-Marker werden separat als file_from_aria-Events ausgeliefert.
|
||||
self._append_chat_backup({
|
||||
"role": "assistant",
|
||||
"text": text,
|
||||
"files": [{"serverPath": f["serverPath"], "name": f["name"],
|
||||
"mimeType": f["mimeType"], "size": f["size"]} for f in aria_files],
|
||||
})
|
||||
|
||||
metadata = payload.get("metadata", {})
|
||||
is_critical = metadata.get("critical", False)
|
||||
requested_voice = metadata.get("voice")
|
||||
@@ -947,6 +1125,12 @@ class ARIABridge:
|
||||
except Exception as e:
|
||||
logger.error("[core] XTTS-Request fehlgeschlagen: %s — kein Audio", e)
|
||||
|
||||
# ARIA ist fertig — App's "ARIA denkt..." Indicator zurueck auf idle.
|
||||
# _last_chat_final_at bewusst NICHT setzen: die 3s-Cooldown war fuer
|
||||
# trailing OpenClaw-Activity-Events; bei Voice-Chat wuerde sie die
|
||||
# naechste thinking-Welle unterdruecken.
|
||||
await self._emit_activity("idle", "")
|
||||
|
||||
# ── Mode Persistence (global, nicht pro Geraet) ──────
|
||||
_MODE_FILE = "/shared/config/mode.json"
|
||||
|
||||
@@ -1095,31 +1279,118 @@ class ARIABridge:
|
||||
return True
|
||||
|
||||
async def send_to_core(self, text: str, source: str = "bridge") -> None:
|
||||
"""Sendet Text an aria-core (OpenClaw chat.send Protokoll)."""
|
||||
if self.ws_core is None:
|
||||
logger.error("[core] Nicht verbunden — Nachricht verworfen: '%s'", text[:60])
|
||||
"""Sendet Text an aria-brain (HTTP /chat) und broadcastet die Antwort.
|
||||
|
||||
Nicht-Streaming: wir warten bis Brain fertig ist, dann pushen wir
|
||||
die komplette Reply via RVS an alle Clients (App + Diagnostic).
|
||||
TTS wird vom Bridge-Code separat angestossen (gleiche Logik wie
|
||||
vorher mit aria-core).
|
||||
"""
|
||||
brain_url = os.environ.get("BRAIN_URL", "http://aria-brain:8080")
|
||||
url = f"{brain_url}/chat"
|
||||
payload = json.dumps({"message": text, "source": source}).encode("utf-8")
|
||||
logger.info("[brain] chat ← %s '%s'", source, text[:80])
|
||||
|
||||
# User-Nachricht in chat_backup.jsonl loggen — wird beim App-Reconnect
|
||||
# / Diagnostic-Reload als History-Quelle gelesen.
|
||||
self._append_chat_backup({"role": "user", "text": text, "source": source})
|
||||
|
||||
# agent_activity → thinking. _emit_activity statt direktem _send_to_rvs
|
||||
# damit der State-Cache fuer die spaetere idle-Dedup richtig steht.
|
||||
await self._emit_activity("thinking", "")
|
||||
|
||||
def _do_call():
|
||||
try:
|
||||
req = urllib.request.Request(
|
||||
url, data=payload, method="POST",
|
||||
headers={"Content-Type": "application/json"},
|
||||
)
|
||||
# Cold-Start kann lange dauern, 5min Timeout
|
||||
with urllib.request.urlopen(req, timeout=300) as resp:
|
||||
return resp.status, resp.read().decode("utf-8", errors="ignore")
|
||||
except Exception as exc:
|
||||
return None, str(exc)
|
||||
|
||||
status, body = await asyncio.get_event_loop().run_in_executor(None, _do_call)
|
||||
if status != 200:
|
||||
logger.error("[brain] /chat fehlgeschlagen: status=%s body=%s", status, body[:200])
|
||||
await self._emit_activity("idle", "")
|
||||
await self._send_to_rvs({
|
||||
"type": "chat",
|
||||
"payload": {
|
||||
"text": f"[Brain-Fehler] {body[:200] or 'unbekannt'}",
|
||||
"sender": "aria",
|
||||
},
|
||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||
})
|
||||
return
|
||||
|
||||
# Aktive Session vom Diagnostic holen
|
||||
self._fetch_active_session()
|
||||
try:
|
||||
data = json.loads(body)
|
||||
except Exception:
|
||||
logger.error("[brain] /chat lieferte ungueltiges JSON: %s", body[:200])
|
||||
await self._emit_activity("idle", "")
|
||||
return
|
||||
|
||||
req_id = self._next_req_id()
|
||||
message = json.dumps({
|
||||
"type": "req",
|
||||
"id": req_id,
|
||||
"method": "chat.send",
|
||||
"params": {
|
||||
"sessionKey": self._session_key,
|
||||
"message": text,
|
||||
"idempotencyKey": str(uuid.uuid4()),
|
||||
reply = (data.get("reply") or "").strip()
|
||||
if not reply:
|
||||
logger.warning("[brain] /chat: leerer Reply")
|
||||
await self._emit_activity("idle", "")
|
||||
return
|
||||
|
||||
# Side-Channel-Events VOR der Chat-Bubble broadcasten (z.B. skill_created)
|
||||
# damit sie in der UI vor der Reply auftauchen
|
||||
for event in data.get("events", []) or []:
|
||||
etype = event.get("type")
|
||||
if etype == "skill_created":
|
||||
await self._send_to_rvs({
|
||||
"type": "skill_created",
|
||||
"payload": event.get("skill", {}),
|
||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||
})
|
||||
logger.info("[brain] ARIA hat einen Skill erstellt: %s",
|
||||
event.get("skill", {}).get("name"))
|
||||
elif etype == "trigger_created":
|
||||
await self._send_to_rvs({
|
||||
"type": "trigger_created",
|
||||
"payload": event.get("trigger", {}),
|
||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||
})
|
||||
logger.info("[brain] ARIA hat einen Trigger angelegt: %s",
|
||||
event.get("trigger", {}).get("name"))
|
||||
elif etype == "location_tracking":
|
||||
# ARIA bittet die App das GPS-Tracking ein-/auszuschalten
|
||||
await self._send_to_rvs({
|
||||
"type": "location_tracking",
|
||||
"payload": {
|
||||
"on": bool(event.get("on")),
|
||||
"reason": event.get("reason") or "",
|
||||
},
|
||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||
})
|
||||
logger.info("[brain] location_tracking Request: on=%s (%s)",
|
||||
event.get("on"), event.get("reason", ""))
|
||||
|
||||
# _process_core_response uebernimmt alles weitere:
|
||||
# File-Marker extrahieren + broadcasten, NO_REPLY-Check, Chat-
|
||||
# Broadcast an RVS, TTS, agent_activity idle. Wir geben das
|
||||
# raw payload mit dem reply rein damit Mode/voice-Metadata
|
||||
# passend behandelt wird (hier minimal, weil Brain noch keine
|
||||
# metadata mitschickt).
|
||||
try:
|
||||
await self._process_core_response(reply, {})
|
||||
except Exception:
|
||||
logger.exception("[brain] _process_core_response Fehler")
|
||||
await self._emit_activity("idle", "")
|
||||
# Originaler Fallback-Send (toter Code, _emit_activity uebernimmt jetzt)
|
||||
await self._send_to_rvs({
|
||||
"type": "agent_activity",
|
||||
"payload": {"activity": "idle"},
|
||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||
})
|
||||
|
||||
try:
|
||||
await self.ws_core.send(message)
|
||||
logger.info("[core] chat.send (%s, id=%s): '%s'", source, req_id, text[:80])
|
||||
except Exception:
|
||||
logger.exception("[core] Sendefehler")
|
||||
if data.get("distilling"):
|
||||
logger.info("[brain] Destillat laeuft im Hintergrund")
|
||||
|
||||
# ── RVS Verbindung (App-Relay) ──────────────────────────
|
||||
|
||||
@@ -1141,7 +1412,8 @@ class ARIABridge:
|
||||
try:
|
||||
url = f"{current_url}?token={self.rvs_token}"
|
||||
logger.info("[rvs] Verbinde: %s", current_url)
|
||||
async with websockets.connect(url) as ws:
|
||||
# max_size=50MB (siehe core-Connect oben — gleicher Grund).
|
||||
async with websockets.connect(url, max_size=50 * 1024 * 1024) as ws:
|
||||
self.ws_rvs = ws
|
||||
retry_delay = 2
|
||||
logger.info("[rvs] Verbunden — warte auf App-Nachrichten")
|
||||
@@ -1258,6 +1530,9 @@ class ARIABridge:
|
||||
if text:
|
||||
interrupted = bool(payload.get("interrupted", False))
|
||||
location = payload.get("location") or None
|
||||
# State persist fuer Brain-Watcher (current_lat, ..., last_user_ts)
|
||||
self._persist_location(location)
|
||||
self._persist_user_activity()
|
||||
# Wenn Files gerade gepuffert sind (Bild + Text gleichzeitig
|
||||
# gesendet), mergen wir sie zu einer einzigen Anfrage statt
|
||||
# zwei separater send_to_core-Calls.
|
||||
@@ -1461,6 +1736,31 @@ class ARIABridge:
|
||||
size_kb = len(file_b64) // 1365
|
||||
logger.info("[rvs] Datei gespeichert: %s (%dKB)", file_path, size_kb)
|
||||
|
||||
# Pixel-Bilder fuer Claude-Vision shrinken wenn > 2 MB. SVG/PDF/ZIP
|
||||
# bleiben unangetastet (Vision laeuft eh nur auf Raster-Formaten).
|
||||
CLAUDE_VISION_FORMATS = ("image/jpeg", "image/jpg", "image/png", "image/webp", "image/gif")
|
||||
if file_type.lower() in CLAUDE_VISION_FORMATS:
|
||||
file_size_bytes = os.path.getsize(file_path)
|
||||
if file_size_bytes > 2 * 1024 * 1024:
|
||||
try:
|
||||
from PIL import Image
|
||||
with Image.open(file_path) as img:
|
||||
orig_w, orig_h = img.size
|
||||
# Anthropic-Empfehlung: max 1568px lange Seite. RGB-Konvertierung
|
||||
# falls RGBA/Palette (JPEG braucht RGB).
|
||||
img.thumbnail((1568, 1568), Image.Resampling.LANCZOS)
|
||||
if img.mode in ("RGBA", "P"):
|
||||
img = img.convert("RGB")
|
||||
img.save(file_path, "JPEG", quality=85, optimize=True)
|
||||
new_size_bytes = os.path.getsize(file_path)
|
||||
logger.info("[rvs] Bild verkleinert: %dx%d → %dx%d, %.1fMB → %.1fMB",
|
||||
orig_w, orig_h, img.size[0], img.size[1],
|
||||
file_size_bytes / 1024 / 1024,
|
||||
new_size_bytes / 1024 / 1024)
|
||||
except Exception as e:
|
||||
logger.warning("[rvs] Bild-Resize fehlgeschlagen (%s) — Original wird genutzt: %s",
|
||||
file_name, e)
|
||||
|
||||
# In Pending-Queue + Flush-Timer (anti-spam Buffering)
|
||||
self._pending_files.append((file_path, file_name, file_type, size_kb, int(width or 0), int(height or 0)))
|
||||
if self._pending_files_flush_task and not self._pending_files_flush_task.done():
|
||||
@@ -1478,6 +1778,200 @@ class ARIABridge:
|
||||
except Exception as e:
|
||||
logger.warning("[rvs] file_saved konnte nicht an App gesendet werden: %s", e)
|
||||
|
||||
elif msg_type == "chat_history_request":
|
||||
# App holt verpasste Nachrichten beim Reconnect.
|
||||
# payload: {since: <ts_ms>}, default 0 = alles
|
||||
since = int(payload.get("since") or 0)
|
||||
limit = int(payload.get("limit") or 100)
|
||||
logger.info("[rvs] chat_history_request since=%d limit=%d", since, limit)
|
||||
messages = self._read_chat_backup_since(since, limit=limit)
|
||||
await self._send_to_rvs({
|
||||
"type": "chat_history_response",
|
||||
"payload": {"messages": messages, "since": since},
|
||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||
})
|
||||
return
|
||||
|
||||
elif msg_type == "file_list_request":
|
||||
# App fragt die Liste aller /shared/uploads/-Dateien an.
|
||||
logger.info("[rvs] file_list_request von App")
|
||||
try:
|
||||
req = urllib.request.Request(
|
||||
"http://localhost:3001/api/files-list",
|
||||
method="GET",
|
||||
)
|
||||
def _do_list():
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=10) as resp:
|
||||
return json.loads(resp.read().decode("utf-8", errors="ignore"))
|
||||
except Exception as e:
|
||||
return {"ok": False, "error": str(e)}
|
||||
d = await asyncio.get_event_loop().run_in_executor(None, _do_list)
|
||||
await self._send_to_rvs({
|
||||
"type": "file_list_response",
|
||||
"payload": d,
|
||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||
})
|
||||
except Exception as e:
|
||||
logger.warning("[rvs] file_list_request: %s", e)
|
||||
return
|
||||
|
||||
elif msg_type == "file_delete_batch_request":
|
||||
# App will mehrere Dateien auf einmal loeschen.
|
||||
paths = payload.get("paths") or []
|
||||
req_id = payload.get("requestId", "")
|
||||
logger.warning("[rvs] file_delete_batch_request: %d Pfade", len(paths))
|
||||
try:
|
||||
body_bytes = json.dumps({"paths": paths}).encode("utf-8")
|
||||
req = urllib.request.Request(
|
||||
"http://localhost:3001/api/files-delete-batch",
|
||||
data=body_bytes, method="POST",
|
||||
headers={"Content-Type": "application/json"},
|
||||
)
|
||||
def _do_delete():
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=30) as resp:
|
||||
return resp.status, resp.read().decode("utf-8", errors="ignore")
|
||||
except Exception as e:
|
||||
return None, str(e)
|
||||
status, body = await asyncio.get_event_loop().run_in_executor(None, _do_delete)
|
||||
logger.info("[rvs] file_delete_batch result: status=%s", status)
|
||||
# Server broadcastet file_deleted pro Pfad — App kriegt das via persistente RVS.
|
||||
# Wir bestaetigen zusaetzlich mit Counts.
|
||||
try: d = json.loads(body or "{}")
|
||||
except: d = {}
|
||||
await self._send_to_rvs({
|
||||
"type": "file_delete_batch_response",
|
||||
"payload": {
|
||||
"requestId": req_id,
|
||||
"deleted": len(d.get("deleted", [])),
|
||||
"errors": d.get("errors", []),
|
||||
},
|
||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||
})
|
||||
except Exception as e:
|
||||
logger.warning("[rvs] file_delete_batch_request: %s", e)
|
||||
return
|
||||
|
||||
elif msg_type == "file_zip_request":
|
||||
# App will mehrere Dateien als ZIP. Bridge holt ZIP von Diagnostic
|
||||
# via HTTP, kodiert base64 und schickt zurueck. Cap auf 30 MB
|
||||
# ZIP-Groesse damit RVS nicht erstickt.
|
||||
paths = payload.get("paths") or []
|
||||
req_id = payload.get("requestId", "")
|
||||
logger.warning("[rvs] file_zip_request: %d Pfade (req=%s)", len(paths), req_id)
|
||||
|
||||
def _do_zip():
|
||||
try:
|
||||
body_bytes = json.dumps({"paths": paths}).encode("utf-8")
|
||||
req = urllib.request.Request(
|
||||
"http://localhost:3001/api/files-download-zip",
|
||||
data=body_bytes, method="POST",
|
||||
headers={"Content-Type": "application/json"},
|
||||
)
|
||||
with urllib.request.urlopen(req, timeout=120) as resp:
|
||||
if resp.status != 200:
|
||||
return None, f"HTTP {resp.status}"
|
||||
data = resp.read()
|
||||
if len(data) > 30 * 1024 * 1024:
|
||||
return None, f"ZIP zu gross ({len(data) // (1024*1024)} MB > 30 MB)"
|
||||
return data, None
|
||||
except Exception as e:
|
||||
return None, str(e)
|
||||
|
||||
data, err = await asyncio.get_event_loop().run_in_executor(None, _do_zip)
|
||||
if err or data is None:
|
||||
await self._send_to_rvs({
|
||||
"type": "file_zip_response",
|
||||
"payload": {"requestId": req_id, "ok": False, "error": err or "leer"},
|
||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||
})
|
||||
return
|
||||
import base64 as _b64
|
||||
await self._send_to_rvs({
|
||||
"type": "file_zip_response",
|
||||
"payload": {
|
||||
"requestId": req_id, "ok": True,
|
||||
"size": len(data),
|
||||
"data": _b64.b64encode(data).decode("ascii"),
|
||||
},
|
||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||
})
|
||||
return
|
||||
|
||||
elif msg_type == "file_delete_request":
|
||||
# App will eine Datei loeschen — leite an Diagnostic.
|
||||
p = payload.get("path", "")
|
||||
logger.warning("[rvs] file_delete_request von App: %s", p)
|
||||
try:
|
||||
body_bytes = json.dumps({"path": p}).encode("utf-8")
|
||||
req = urllib.request.Request(
|
||||
"http://localhost:3001/api/files-delete",
|
||||
data=body_bytes,
|
||||
method="POST",
|
||||
headers={"Content-Type": "application/json"},
|
||||
)
|
||||
def _do_delete():
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=10) as resp:
|
||||
return resp.status, resp.read().decode("utf-8", errors="ignore")
|
||||
except Exception as e:
|
||||
return None, str(e)
|
||||
status, body = await asyncio.get_event_loop().run_in_executor(None, _do_delete)
|
||||
logger.info("[rvs] file_delete_request %s: status=%s", p, status)
|
||||
# Diagnostic broadcastet file_deleted via sendToRVS_raw — kommt
|
||||
# ueber den persistenten WS-Path zur App. Wir bestaetigen
|
||||
# zusaetzlich, damit der Caller sicher ist dass es durch ist.
|
||||
except Exception as e:
|
||||
logger.warning("[rvs] file_delete_request: %s", e)
|
||||
return
|
||||
|
||||
elif msg_type == "location_update":
|
||||
# Live-GPS-Update von der App (nicht an Chat gekoppelt). Wird in
|
||||
# /shared/state/location.json geschrieben, damit Watcher-Trigger
|
||||
# near()-Conditions auswerten koennen.
|
||||
lat = payload.get("lat")
|
||||
lon = payload.get("lon") or payload.get("lng")
|
||||
if lat is not None and lon is not None:
|
||||
self._persist_location({"lat": lat, "lon": lon})
|
||||
logger.debug("[gps] location_update: %.5f, %.5f", float(lat), float(lon))
|
||||
return
|
||||
|
||||
elif msg_type == "container_restart":
|
||||
# App-Button "Container neu" — leitet generisch an Diagnostic
|
||||
# weiter. Whitelist ist im Diagnostic-Server.
|
||||
name = payload.get("name", "")
|
||||
logger.warning("[rvs] container_restart Request von App: %s", name)
|
||||
try:
|
||||
body_bytes = json.dumps({"name": name}).encode("utf-8")
|
||||
req = urllib.request.Request(
|
||||
"http://localhost:3001/api/container-restart",
|
||||
data=body_bytes,
|
||||
method="POST",
|
||||
headers={"Content-Type": "application/json"},
|
||||
)
|
||||
def _do_restart():
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=45) as resp:
|
||||
return resp.status, resp.read().decode("utf-8", errors="ignore")
|
||||
except Exception as e:
|
||||
return None, str(e)
|
||||
status, body = await asyncio.get_event_loop().run_in_executor(None, _do_restart)
|
||||
logger.info("[rvs] container_restart %s Result: status=%s", name, status)
|
||||
ok = status == 200
|
||||
await self._send_to_rvs({
|
||||
"type": "chat",
|
||||
"payload": {
|
||||
"text": f"[Container] {name} neu gestartet." if ok
|
||||
else f"[Container] Restart {name} fehlgeschlagen: {body[:200]}",
|
||||
"sender": "aria",
|
||||
},
|
||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||
})
|
||||
except Exception as e:
|
||||
logger.warning("[rvs] container_restart Weiterleitung fehlgeschlagen: %s", e)
|
||||
return
|
||||
|
||||
elif msg_type == "file_request":
|
||||
# App fordert eine Datei an (Re-Download nach Cache-Leerung)
|
||||
server_path = payload.get("serverPath", "")
|
||||
@@ -1495,6 +1989,7 @@ class ARIABridge:
|
||||
return
|
||||
with open(server_path, "rb") as f:
|
||||
file_b64 = base64.b64encode(f.read()).decode("ascii")
|
||||
mime, _ = mimetypes.guess_type(server_path)
|
||||
logger.info("[rvs] Re-Download: %s (%dKB)", server_path, len(file_b64) // 1365)
|
||||
await self._send_to_rvs({
|
||||
"type": "file_response",
|
||||
@@ -1503,6 +1998,7 @@ class ARIABridge:
|
||||
"serverPath": server_path,
|
||||
"base64": file_b64,
|
||||
"name": os.path.basename(server_path),
|
||||
"mimeType": mime or "application/octet-stream",
|
||||
},
|
||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||
})
|
||||
@@ -1530,6 +2026,9 @@ class ARIABridge:
|
||||
interrupted = bool(payload.get("interrupted", False))
|
||||
audio_request_id = payload.get("audioRequestId", "") or ""
|
||||
location = payload.get("location") or None
|
||||
# State persist fuer Brain-Watcher (current_lat etc.)
|
||||
self._persist_location(location)
|
||||
self._persist_user_activity()
|
||||
logger.info("[rvs] Audio empfangen: %s, %dms, %dKB%s%s%s",
|
||||
mime_type, duration_ms, len(audio_b64) // 1365,
|
||||
" [BARGE-IN]" if interrupted else "",
|
||||
@@ -1620,13 +2119,11 @@ class ARIABridge:
|
||||
|
||||
if text.strip():
|
||||
logger.info("[rvs] STT Ergebnis: '%s'", text[:80])
|
||||
# Hints (Barge-In, GPS) als Praefix vorschalten — gemeinsamer Helper
|
||||
# mit dem chat-Pfad damit das Verhalten konsistent ist.
|
||||
core_text = self._build_core_text(text, interrupted, location)
|
||||
# ERST an aria-core senden (wichtigster Schritt)
|
||||
await self.send_to_core(core_text, source="app-voice" + (" [barge-in]" if interrupted else ""))
|
||||
# STT-Text an RVS senden (fuer Anzeige in App + Diagnostic)
|
||||
# sender="stt" damit Bridge es ignoriert (kein Loop)
|
||||
|
||||
# Reihenfolge wichtig: STT-Text ZUERST broadcasten damit die App
|
||||
# die Voice-Bubble sofort mit dem erkannten Text aktualisieren
|
||||
# kann — send_to_core blockt danach synchron auf Brain (kann
|
||||
# dauern), wuerde sonst die Anzeige verzoegern.
|
||||
try:
|
||||
stt_payload = {
|
||||
"text": text,
|
||||
@@ -1634,6 +2131,11 @@ class ARIABridge:
|
||||
}
|
||||
if audio_request_id:
|
||||
stt_payload["audioRequestId"] = audio_request_id
|
||||
# GPS aus dem Original-Audio-Payload mitgeben — Diagnostic
|
||||
# zeigt sie sonst nicht an (App sendet location nur einmal,
|
||||
# die im audio-Payload). Reine Anzeige-Information.
|
||||
if location:
|
||||
stt_payload["location"] = location
|
||||
ok = await self._send_to_rvs({
|
||||
"type": "chat",
|
||||
"payload": stt_payload,
|
||||
@@ -1645,6 +2147,10 @@ class ARIABridge:
|
||||
logger.warning("[rvs] STT-Text NICHT broadcastet — _send_to_rvs lieferte False")
|
||||
except Exception as e:
|
||||
logger.warning("[rvs] STT-Text konnte nicht an RVS gesendet werden: %s", e)
|
||||
|
||||
# Dann an Brain — der blockt synchron bis ARIA fertig ist.
|
||||
core_text = self._build_core_text(text, interrupted, location)
|
||||
await self.send_to_core(core_text, source="app-voice" + (" [barge-in]" if interrupted else ""))
|
||||
else:
|
||||
logger.info("[rvs] Keine Sprache erkannt — ignoriert")
|
||||
|
||||
@@ -1896,7 +2402,8 @@ class ARIABridge:
|
||||
self.running = True
|
||||
|
||||
tasks = [
|
||||
asyncio.create_task(self.connect_to_core()),
|
||||
# connect_to_core entfaellt — Bridge ruft jetzt aria-brain ueber
|
||||
# HTTP (siehe send_to_core). Keine persistente WS-Verbindung mehr.
|
||||
asyncio.create_task(self.connect_to_rvs()),
|
||||
]
|
||||
|
||||
|
||||
@@ -16,3 +16,6 @@ sounddevice
|
||||
|
||||
# Wake-Word Erkennung
|
||||
openwakeword
|
||||
|
||||
# Bild-Resizing (zu grosse Pixel-Bilder shrinken bevor Claude-Vision sie sieht — 5MB-Limit)
|
||||
Pillow
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
FROM node:22-alpine
|
||||
WORKDIR /app
|
||||
# zip fuer Multi-Datei-Downloads (Brain-Export nutzt tar.gz, Datei-Manager zip)
|
||||
RUN apk add --no-cache zip
|
||||
COPY package.json ./
|
||||
RUN npm install --production
|
||||
COPY . .
|
||||
|
||||
+2002
-486
File diff suppressed because it is too large
Load Diff
+623
-709
File diff suppressed because it is too large
Load Diff
+44
-37
@@ -28,38 +28,40 @@ services:
|
||||
networks:
|
||||
- aria-net
|
||||
|
||||
# ─── OpenClaw (ARIA Gehirn) ─────────────────────────────
|
||||
aria:
|
||||
image: ghcr.io/openclaw/openclaw:latest
|
||||
container_name: aria-core
|
||||
hostname: aria-wohnung
|
||||
privileged: true # ARIAs Wohnung — sie hat die Schlüssel
|
||||
# ─── Qdrant (Vector-DB fuer ARIAs Gedaechtnis) ────────
|
||||
# Storage liegt im Repo-Bind-Mount aria-data/brain/qdrant.
|
||||
# Damit Backup/Export/Import komplett ueber das Filesystem gehen.
|
||||
qdrant:
|
||||
image: qdrant/qdrant:latest
|
||||
container_name: aria-qdrant
|
||||
volumes:
|
||||
- ./aria-data/brain/qdrant:/qdrant/storage
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- aria-net
|
||||
|
||||
# ─── ARIA Brain (Agent + Memory) ─────────────────────────
|
||||
# Loest das alte aria-core (OpenClaw) ab. Vector-DB-basiertes
|
||||
# Memory, eigener Agent-Loop, SSH zur aria-wohnung-VM.
|
||||
brain:
|
||||
build: ./aria-brain
|
||||
container_name: aria-brain
|
||||
hostname: aria-wohnung-brain # damit ssh known_hosts stabil bleibt
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway" # Zugriff auf die VM via SSH
|
||||
depends_on:
|
||||
- qdrant
|
||||
- proxy
|
||||
ports:
|
||||
- "3001:3001" # Diagnostic Web-UI (laeuft im shared network)
|
||||
environment:
|
||||
- CANVAS_HOST=127.0.0.1
|
||||
- OPENCLAW_GATEWAY_TOKEN=${ARIA_AUTH_TOKEN}
|
||||
- DEFAULT_MODEL=proxy/claude-sonnet-4
|
||||
- RATE_LIMIT_PER_USER=30
|
||||
- DISPLAY=:0
|
||||
- QDRANT_HOST=aria-qdrant
|
||||
- QDRANT_PORT=6333
|
||||
- PROXY_URL=http://proxy:3456
|
||||
- ARIA_AUTH_TOKEN=${ARIA_AUTH_TOKEN:-}
|
||||
volumes:
|
||||
- openclaw-config:/home/node/.openclaw # OpenClaw Config (persistiert Model + Auth)
|
||||
- ./aria-data/brain:/home/node/.openclaw/workspace/memory
|
||||
- ./aria-data/skills:/home/node/.openclaw/workspace/skills
|
||||
- ./aria-data/config/AGENT.md:/home/node/.openclaw/workspace/AGENT.md
|
||||
- ./aria-data/config/USER.md:/home/node/.openclaw/workspace/USER.md
|
||||
- ./aria-data/config/BOOTSTRAP.md:/home/node/.openclaw/workspace/BOOTSTRAP.md
|
||||
- ./aria-data/config/BOOTSTRAP.md:/home/node/.openclaw/workspace/CLAUDE.md
|
||||
- ./aria-data/config/openclaw.env:/home/node/.openclaw/workspace/.env
|
||||
- claude-config:/home/node/.claude # Claude Code Settings (Permissions)
|
||||
- ./aria-data/ssh:/home/node/.ssh # SSH Keys fuer VM-Zugriff
|
||||
- /tmp/.X11-unix:/tmp/.X11-unix
|
||||
- /var/run/docker.sock:/var/run/docker.sock # VM von innen verwalten
|
||||
- aria-shared:/shared # Shared Volume fuer Datei-Austausch (Bridge <> Core)
|
||||
- ./aria-data/brain/data:/data # Memory-Cache + Skills + Models (bind-mount fuer Export)
|
||||
- ./aria-data/brain-import:/import:ro # Quell-MDs fuer den initialen Memory-Import (read-only)
|
||||
- ./aria-data/ssh:/root/.ssh # SSH-Keys fuer aria-wohnung (geteilt mit Proxy)
|
||||
- aria-shared:/shared # gleicher Austausch-Speicher wie Bridge
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- aria-net
|
||||
@@ -69,11 +71,13 @@ services:
|
||||
build: ./bridge
|
||||
container_name: aria-bridge
|
||||
depends_on:
|
||||
- aria
|
||||
network_mode: "service:aria" # Teilt Netzwerk mit aria-core → localhost:18789
|
||||
- brain
|
||||
networks:
|
||||
- aria-net
|
||||
ports:
|
||||
- "3001:3001" # Diagnostic Web-UI (Diagnostic teilt Netzwerk mit Bridge)
|
||||
volumes:
|
||||
- ./aria-data/config/aria.env:/config/aria.env
|
||||
- aria-shared:/shared # Shared Volume fuer Datei-Austausch (Bridge <> Core)
|
||||
- aria-shared:/shared # Shared Volume fuer Datei-Austausch
|
||||
# Audio-Zugriff
|
||||
- /run/user/1000/pulse:/run/user/1000/pulse
|
||||
- /dev/snd:/dev/snd
|
||||
@@ -81,6 +85,7 @@ services:
|
||||
- /dev/snd
|
||||
environment:
|
||||
- PULSE_SERVER=unix:/run/user/1000/pulse/native
|
||||
- BRAIN_URL=http://aria-brain:8080
|
||||
- ARIA_AUTH_TOKEN=${ARIA_AUTH_TOKEN:-}
|
||||
- RVS_HOST=${RVS_HOST:-}
|
||||
- RVS_PORT=${RVS_PORT:-443}
|
||||
@@ -90,19 +95,23 @@ services:
|
||||
restart: unless-stopped
|
||||
|
||||
# ─── Diagnostic (Selbstcheck-UI und Einstellungen) ────
|
||||
# Teilt Netzwerk mit Bridge, damit der Diagnostic-Server die
|
||||
# Bridge auf localhost erreichen kann.
|
||||
diagnostic:
|
||||
build: ./diagnostic
|
||||
container_name: aria-diagnostic
|
||||
depends_on:
|
||||
- aria
|
||||
network_mode: "service:aria" # Teilt Netzwerk mit aria-core → localhost:18789
|
||||
- bridge
|
||||
network_mode: "service:bridge"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock # Container Restart + Brain-Export/Import
|
||||
- ./aria-data/config/diag-state:/data # Persistenter State (aktive Session etc.)
|
||||
- aria-shared:/shared # Shared Volume (Uploads + Config)
|
||||
- aria-shared:/shared # Shared Volume (Uploads + Config + Voices)
|
||||
- ./aria-data/brain:/brain # Brain-Export/Import (tar.gz aus Bind-Mount)
|
||||
environment:
|
||||
- ARIA_AUTH_TOKEN=${ARIA_AUTH_TOKEN:-}
|
||||
- PROXY_URL=http://proxy:3456
|
||||
- BRAIN_URL=http://aria-brain:8080
|
||||
- RVS_HOST=${RVS_HOST:-}
|
||||
- RVS_PORT=${RVS_PORT:-443}
|
||||
- RVS_TLS=${RVS_TLS:-true}
|
||||
@@ -111,9 +120,7 @@ services:
|
||||
restart: unless-stopped
|
||||
|
||||
volumes:
|
||||
openclaw-config: # Persistiert ~/.openclaw (Model, Auth, Sessions)
|
||||
claude-config: # Persistiert ~/.claude (Permissions, Settings)
|
||||
aria-shared: # Datei-Austausch zwischen Bridge und Core
|
||||
aria-shared: # Datei-Austausch zwischen Bridge / Brain / Diagnostic
|
||||
|
||||
networks:
|
||||
aria-net:
|
||||
|
||||
@@ -0,0 +1,31 @@
|
||||
#!/bin/bash
|
||||
# ════════════════════════════════════════════════════════════
|
||||
# ARIA — Setup-Script
|
||||
#
|
||||
# Aktuell nur noch der .env-Bootstrap (Tokens + RVS). Alle weiteren
|
||||
# Settings landen ueber die Diagnostic in /shared/config/runtime.json
|
||||
# (persistent in der "Datenbank").
|
||||
#
|
||||
# Im Phase-A-Cleanup-Status: System-Prompt-Files liegen unter
|
||||
# aria-data/brain-import/ und werden vom neuen Agent-Framework
|
||||
# spaeter importiert. OpenClaw laeuft noch ohne Persoenlichkeit.
|
||||
# ════════════════════════════════════════════════════════════
|
||||
|
||||
set -e
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
if [ ! -f .env ]; then
|
||||
if [ -f .env.example ]; then
|
||||
cp .env.example .env
|
||||
echo "✓ .env erstellt aus .env.example — Tokens jetzt eintragen!"
|
||||
else
|
||||
echo "⚠ Keine .env.example gefunden — manuell anlegen."
|
||||
fi
|
||||
else
|
||||
echo ".env existiert bereits — uebersprungen."
|
||||
fi
|
||||
|
||||
# ── Brain-Verzeichnisse anlegen (Bind-Mounts fuer aria-brain + aria-qdrant)
|
||||
# Inhalt ist gitignored — wird ueber Diagnostic-Export/Import gesichert.
|
||||
mkdir -p aria-data/brain/data aria-data/brain/qdrant
|
||||
echo "✓ aria-data/brain/{data,qdrant} bereit"
|
||||
@@ -55,6 +55,15 @@ Wichtige Mechanismen:
|
||||
|
||||
### Bugs / Fixes
|
||||
|
||||
- [x] **Timer "in 2 Minuten" wird wieder angelegt**: ARIA hatte keine Moeglichkeit die aktuelle Zeit zu kennen — kein Bash-Tool, kein Time-Tool, kein Timestamp im System-Prompt. Die Tool-Beschreibung von `trigger_timer` empfahl sogar `date -u -d '+10 minutes'` via Bash, aber Bash gab's nicht. Folge: LLM liess den Tool-Call entweder weg oder riet einen Cutoff-Zeitstempel (Vergangenheit) → Background-Loop feuerte beim naechsten 30s-Tick sofort statt in 2min. Fix: (1) `build_time_section()` in `prompts.py` injiziert UTC + lokale Europa/Berlin-Zeit als `## Aktuelle Zeit`-Block oben im System-Prompt. (2) `trigger_timer` akzeptiert jetzt `in_seconds` als Alternative zu `fires_at` — Server rechnet den absoluten Timestamp, ARIA muss nicht ISO-rechnen
|
||||
- [x] **"ARIA denkt..." haengt nach Brain-Antwort** (App + Diagnostic): `send_to_core` schickte `thinking` direkt via `_send_to_rvs`, hat aber `_last_activity_state` nicht gepflegt — der spaetere `_emit_activity("idle")` wurde dedupliziert und verschluckt. Fix: durchgehend `_emit_activity` fuer beide Zustaende
|
||||
- [x] **Such-Scroll in App-Chat springt jetzt zur Treffer-Bubble**: `scrollToIndex` wurde zu frueh gerufen + `viewPosition: 0.4` schoss vorbei. Fix: `requestAnimationFrame` + `viewPosition: 0.5` + `onScrollToIndexFailed`-Fallback mit averageItemLength-Schaetzung + 250ms-Retry
|
||||
- [x] **STT-Bubble bekommt den Text jetzt sofort** (nicht erst mit ARIAs Antwort): `_process_app_audio` rief erst `send_to_core` (blockt synchron) und DANN STT-Broadcast. Fix: Reihenfolge getauscht — STT raus, dann Core-Call
|
||||
- [x] **ARIA-Antworten landen wieder in der Diagnostic**: `if (sender === 'aria') return;` im `rvs_chat`-Handler war OpenClaw-Leiche und filterte die neuen Brain-Antworten weg. Fix: aria → received-Bubble
|
||||
- [x] **Brain-Card im Main-Tab zeigt jetzt Live-Status**: `updateState` ueberschrieb die Card mit altem `state.gateway`-Text aus OpenClaw-Zeiten. Fix: `updateState` laesst Brain-Card unangetastet, `loadBrainStatus` synchronisiert beide Cards (Main + Gehirn-Tab) alle 15s
|
||||
- [x] **App-Chat-Sync zeigte veralteten Stand**: `since:lastSync` war diff-only — wenn Server geleert war, blieb die App-History stehen. Fix: `since:0, limit:200` komplett-Replace (Server = Source of Truth). Lokal-only Bubbles (Skill-Notifications, laufende Voice ohne STT) bleiben erhalten
|
||||
- [x] **Konversation-Reset leert jetzt beides**: vorher leerte der Button nur das Brain-Memory, `chat_backup.jsonl` blieb. Fix: ein Button feuert `Promise.all` auf `/api/brain/conversation/reset` + `/api/chat-history-clear`, plus `chat_cleared`-Broadcast via RVS damit App + Diagnostic sich live leeren
|
||||
- [x] **JS-Crashes beim Diagnostic-Laden behoben**: Ghost-IDs aus OpenClaw-Zeiten (`gw-dot`, `openclaw-config`, `btn-core-term`, `core-auth`, `perms-status`, `rc-compact-after`) wurden null-referenziert. Fix: null-safe oder Code raus
|
||||
- [x] Diagnostic: "ARIA denkt..." bleibt nicht mehr stehen
|
||||
- [x] App: "ARIA denkt..." Indicator + Abbrechen-Button (Bridge spiegelt agent_activity via RVS)
|
||||
- [x] Textnachrichten werden von ARIA beantwortet (Bridge chat handler fix)
|
||||
@@ -85,6 +94,36 @@ Wichtige Mechanismen:
|
||||
- [x] **App-Resume-Cooldown**: Wechsel von Background → Foreground triggert keinen falschen Wake-Word-Trigger mehr. AppState-Listener setzt 1.5s Cooldown in dem onWakeDetected-Events ignoriert werden (Audio-Pegel-Spike beim AudioFocus-Switch sonst als Wake-Word interpretiert)
|
||||
- [x] Background-Mikro robust: acquireBackgroundAudio('rec'/'wake') wird jetzt VOR AudioRecord.startRecording gerufen — Foreground-Service mit foregroundServiceType=microphone muss aktiv sein bevor das Mikro greift, sonst blockiert Android ab 11+ den Background-Zugriff
|
||||
- [x] **Stille-Pegel manuell setzbar** (Settings → Spracheingabe): Override-Wert in dB von -55 bis -15, default "automatisch". Info-Button mit Modal erklaert die Skala (niedriger = sensibler, hoeher = robuster gegen Hintergrundlaerm). Bei manuell gesetztem Wert wird die adaptive Baseline ignoriert
|
||||
- [x] **Kurze TTS-Texte (1-3 Worte) spielen jetzt ab** — auf OnePlus A12 stallte AudioTrack mit `pos=0` weil der Default-Start-Threshold `bufferSize/2` (= 2s) bei kurzen Streams nie ueberschritten wurde. Fix: `setStartThresholdInFrames(100ms)` direkt nach dem Track-Build (API 31+). Buffer auf 4s entkoppelt von Pre-Roll, `play()` wird beim allerersten data-chunk gerufen
|
||||
- [x] **Mute-Button stoppt jetzt auch laufenden PCM-Stream** — `pcmStreamActive` wurde beim isFinal-Chunk schon false gesetzt, der AudioTrack spielte aber noch sekundenlang aus seinem Buffer. `stopPlayback()` uebersprang darum `PcmStreamPlayer.stop()`. Fix: stop() immer rufen (ist idempotent), kein Flag-Check mehr
|
||||
- [x] **GPS-Permission im Manifest + Runtime-Request** beim Settings-Toggle — vorher fehlten ACCESS_COARSE_LOCATION / ACCESS_FINE_LOCATION komplett. `Geolocation.getCurrentPosition` schlug lautlos fehl, App sendete nie ein location-Feld
|
||||
- [x] **GPS-Position auch im STT-Payload an Diagnostic** — die App sendet location einmal im audio-Payload. Die Bridge nutzte sie zwar (ging in aria-core's Kontext rein), reichte sie aber nicht im STT-broadcast an Diagnostic durch. Diagnostic zeigte darum bei Spracheingaben nie den GPS-Block, obwohl der "GPS einblenden"-Toggle aktiv war
|
||||
- [x] **Auto-Resume nach Anruf — pcmBuffer bleibt erhalten**: `haltAllPlayback` leerte den pcmBuffer mid-Anruf, isFinal schrieb dann eine leere WAV. Neue `pauseForCall`-Methode statt `haltAllPlayback`: AudioTrack stoppt + Focus released, `pcmBuffer` und `pcmMessageId` bleiben — chunks werden weiter gesammelt damit isFinal die WAV schreibt und resumeFromInterruption sie findet. Plus `captureInterruption` idempotent gemacht (ringing → offhook ueberschreibt nicht)
|
||||
- [x] **Replay-Resume nach Anruf**: `_firePlaybackStarted` ueberschrieb `currentPlaybackMsgId` mit leerem pcmMessageId — captureInterruption hatte nichts zu merken. Plus Regex `[0-9a-f-]+\.wav` matchte nicht alle Dateinamen. Plus `_playFromPathAtPosition` aktualisiert jetzt das Tracking damit ein zweiter Anruf in derselben Antwort auch funktioniert
|
||||
- [x] **`pauseForCall` setzt `isPlaying` zurueck**: vorher haengten weitere Play-Button-Klicks nach Anruf, weil `playAudio` bei `isPlaying=true` den `_playNext`-Pfad ueberspringt
|
||||
- [x] **Play-Button rendert neu wenn Cache-Datei weg ist**: vorher checkte der Button nur `if (item.audioPath)` — auf eine geloeschte Cache-Datei zeigte das aber stillschweigend ins Leere. Jetzt RNFS.exists-Check mit Fallback auf `tts_request` an die Bridge → F5-TTS rendert neu, WAV wandert zurueck in den Cache
|
||||
- [x] **Bridge WebSocket max_size 50 MB**: Python `websockets.connect` hat 1 MiB Default — Stefan's 4MB JPEG (5.78 MB Base64) sprengte das, Bridge-Connection wurde silent gedroppt. f5tts/whisper-bridges hatten max_size schon, nur aria_bridge war vergessen
|
||||
- [x] **Bridge resized Bilder >2 MB serverseitig auf 1568px**: Claude-Vision-API hat ~5 MB Base64-Limit. Galerie-Bilder via `react-native-image-picker` sind clientseitig schon klein, Buroklammer/DocumentPicker reichte das rohe File durch — Claude lieferte leere Antwort. Pillow im Bridge-Container, nur fuer JPEG/PNG/WebP/GIF (PDFs/ZIPs/SVGs unangetastet)
|
||||
- [x] **Bridge `chat:error` liest auch `errorMessage`**: OpenClaw legt bei state=error den Text dort statt in `error` ab → Bridge meldete generisches "[Fehler] Unbekannt", echter Fehler nur in Container-Logs. Plus: `chat:final` ohne text wird jetzt mit Hinweis-Bubble an die App gemeldet (statt stumm), z.B. wenn Vision das Bild silent ablehnt
|
||||
- [x] **Cache-Cleanup beim App-Start** — orphane `aria_tts_*.wav` Files (>5 min) im CachesDirectoryPath werden weggeraeumt, sammeln sich sonst an wenn Sound mid-playback gestoppt wird (Anruf, Mute, Barge-In) und der completion-Callback nicht feuert. Plus neuer Settings-Button "TTS-Cache leeren" mit Live-Groessenanzeige
|
||||
- [x] **Verbose-Logging-Toggle in Settings → Protokoll**: `console.log` global stummschaltbar (warn/error bleiben aktiv) — spart adb-logcat-Speicher wenn alles laeuft
|
||||
- [x] **800 ms-Delay vor Anruf-Auto-Resume**: ARIA's neuer Focus-Request kollidierte sonst mit Spotify's Auto-Resume nach Anruf-Ende. System haengt noch im IN_CALL→NORMAL-Mode-Uebergang, Spotify sieht Loss → Loss und bleibt pausiert. Mit Delay schafft Spotify den Resume-Schritt, dann pausiert ARIA wieder ordnungsgemaess
|
||||
- [x] **Mute-Button = Stop fuer aktuelle Antwort**: vorher startete eine NEUE PCM-Chunk-Sequenz nach Mute-aus die alte Antwort weiter wo sie war (funktionierte 2x, dann nicht mehr weil isFinal schon kam). Jetzt mit `_stoppedMessageId`-Tracking: bei Mute wird die aktive msgId gemerkt, alle weiteren chunks dieser msgId bleiben silent — auch wenn Mute zurueckgenommen wird. Reset bei neuer msgId, neue Antworten spielen normal
|
||||
- [x] **Spotify resumed nach Mute-Stop**: `stopPlayback` released seinen TRANSIENT-Focus (USAGE_ASSISTANT) sauber → Spotify bekommt GAIN-Event und resumed automatisch. Ein zwischenzeitlich eingebauter `kickReleaseMedia` (USAGE_MEDIA + GAIN) verhinderte das Auto-Resume sogar (Spotify interpretierte es als "user-action stopp") — wieder rausgenommen
|
||||
- [x] **ARIA kann Dateien an User zurueckgeben** (PDFs, Bilder, Office-Docs, Markdown, ZIPs, ...): ARIA setzt am Antwort-Ende `[FILE: /shared/uploads/aria_<name>.<ext>]` Marker, Bridge parsed sie raus (TTS liest's nicht vor) und sendet `file_from_aria`-Event ueber RVS. App zeigt Anhang-Bubble + Klick oeffnet via Android-Intent-Picker (`FileOpenerModule`, FileProvider), Diagnostic zeigt Bubble + PDFs/Bilder neuer Tab, andere als Download. Mehrere Marker = mehrere Bubbles, nicht-existente Marker werden mit Hinweis an User gemeldet (statt silent gedroppt)
|
||||
- [x] **External Bilder/Dateien werden serverseitig persistiert**: ARIA laed externe URLs (Wikipedia, Wiki Commons) mit curl runter und gibt sie via `[FILE: ...]`-Marker zurueck — bleibt permanent im Chat auch wenn die Online-Quelle stirbt. System-Prompt instruiert sie das Pattern zu nutzen
|
||||
- [x] **ARIA-Datei-Bubbles ueberleben Browser-Refresh**: Diagnostic-Server parsed beim `load_chat_history` die Marker aus dem OpenClaw-Session-File und schickt `aria_file`-Eintraege mit, sodass die Anhang-Bubbles nach F5 wiederhergestellt werden. Plus: `/shared/uploads/`-Bildpfade werden im History-Render auch als Inline-Image gerendert (vorher nur in live-Bubbles)
|
||||
- [x] **"ARIA reparieren"-Button** in App + Diagnostic: triggert `openclaw doctor --fix` ueber RVS → Bridge → Diagnostic HTTP-API. Fix fuer stuck Runs ohne SSH
|
||||
- [x] **"ARIA hart neu starten"-Button**: docker compose-Restart ueber Docker-Socket-API im Diagnostic-Server. Mit Confirmation in der App, fuer Faelle wo doctor nicht reicht (alive aber haengender Run)
|
||||
- [x] **Auto-Compact nach N Messages**: bei zu langer Session wirft Linux beim Subprocess-spawn E2BIG (Argument list too long, ~128KB-2MB Limit). Bridge zaehlt User-Messages; bei `COMPACT_AFTER_MESSAGES` (env, default 140) werden Sessions geleert + Container neu gestartet, User bekommt Hinweis-Bubble. Plus manueller "🧹 Konversation komprimieren"-Button in App-Settings und Diagnostic
|
||||
- [x] **`[FILE: ...]`-Marker-Filter ueberall in Diagnostic**: Filter direkt in `addChat` damit er fuer alle Code-Pfade greift (chat_final, proxy_result, History-Load, ...) — vorher rutschten Marker als Text durch wenn sie nicht ueber chat_final kamen
|
||||
- [x] **Mehrere `[FILE: ...]`-Marker in einer Antwort**: Bridge zerlegt sauber in mehrere file_from_aria-Events, ARIA muss nicht selbst zwei Antworten posten. Bei nicht-existenten Files erscheint ein User-Hinweis statt silent skip
|
||||
- [x] **Inline-Bilder in Chat-Nachrichten** (App): ``- und plain-`https://image.png`-URLs werden als Image-Vorschau unter dem Text gerendert. Mit `react-native-svg` auch SVG-URLs inline
|
||||
- [x] **SVG-Anhaenge** werden korrekt gerendert: ChatImage-Komponente erkennt `.svg`-Endung und nutzt SvgUri statt Image (RN-Image kann SVG nicht). Vollbild-Modal genauso, mit `preserveAspectRatio="xMidYMid meet"` damit SVGs nicht gestreckt werden
|
||||
- [x] **Pinch-Zoom + Pan im Vollbild-Modal** (App): neue `ZoomableImage`-Komponente, reine RN-Implementation mit PanResponder+Animated, ohne externe Lib. 2-Finger-Pinch 1x..5x, 1-Finger-Pan wenn gezoomt, Doppel-Tap toggelt 1x↔2.5x. Plus ✕-Close-Button damit Tap-to-Close nicht mit Pan-Gesten kollidiert
|
||||
- [x] **ARIA-Abkuerzung ausgeschrieben**: in App → Einstellungen → Ueber und Diagnostic → Einstellungen ist jetzt erklaert: "ARIA — Autonomous Reasoning & Intelligence Assistant"
|
||||
- [x] **`init.sh`** legt fehlende Config-Dateien aus *.example-Vorlagen an — frischer Clone laeuft ohne Anleitung an
|
||||
- [x] **`USER.md` privat**: aus dem Repo genommen (enthielt interne Tool-Liste mit Gitea-URL etc.). Vorlage als `USER.md.example` checked-in, lokales File via `.gitignore` ausgeschlossen
|
||||
|
||||
### App Features
|
||||
|
||||
@@ -119,7 +158,7 @@ Wichtige Mechanismen:
|
||||
- [x] Piper komplett entfernt
|
||||
- [x] Gespraechsmodus: Speech-Gate strenger (-28dB / 500ms)
|
||||
- [x] Diagnostic: Archivierte Session-Versionen (.reset.*) angezeigt + exportierbar
|
||||
- [x] tools/export-jsonl-to-md.js: CLI-Konverter fuer Session-JSONL zu Markdown
|
||||
- [x] tools/export-jsonl-to-md.js: CLI-Konverter fuer Session-JSONL zu Markdown (mit OpenClaw raus)
|
||||
- [x] NO_REPLY-Filter in Bridge + Diagnostic
|
||||
- [x] Audio-Ducking + Exklusiv-Focus (Kotlin AudioFocusModule)
|
||||
- [x] TTS-Cleanup serverseitig: Code-Bloecke raus, Einheiten ausgeschrieben, Abkuerzungen buchstabiert, URLs zu "ein Link"
|
||||
@@ -182,9 +221,75 @@ Wichtige Mechanismen:
|
||||
- [x] RVS Nachrichten vom Smartphone gehen durch
|
||||
- [x] SSH Volume read-write fuer Proxy (kein -F Workaround mehr)
|
||||
|
||||
## Offen
|
||||
## Brain — Phase B (komplett)
|
||||
|
||||
### Bugs
|
||||
Der grosse Refactor weg von OpenClaw zu eigener Brain-Architektur — alle 4 Punkte
|
||||
durch. ARIA hat jetzt eigenes Gedaechtnis (Vector-DB), eigenen Loop, eigene
|
||||
Skills mit Tool-Use.
|
||||
|
||||
### Infrastruktur
|
||||
|
||||
- [x] aria-brain Container (FastAPI + Qdrant + sentence-transformers, MiniLM multilingual)
|
||||
- [x] aria-core (OpenClaw) abgerissen — Tag `v0.1.2.0` als Archiv
|
||||
- [x] docker-compose komplett umgebaut: brain + qdrant + bridge + diagnostic + proxy
|
||||
- [x] Voice-Bridge: aria-core-Logik raus (doctor_fix, aria_restart, compact_after) → durch Brain-HTTP-Call ersetzt
|
||||
- [x] Sprachmodell-Setting in runtime.json (brainModel) — Diagnostic kann Modell live wechseln, Brain-Restart noetig
|
||||
|
||||
### Memory / Vector-DB
|
||||
|
||||
- [x] Memory CRUD via Diagnostic-Gehirn-Tab (Add/Edit/Delete + Suche + Type/Pinned-Filter)
|
||||
- [x] **Migration aus brain-import/** (Phase B Punkt 2) — Parser fuer AGENT.md/USER.md/TOOLING.md, atomare Punkte mit migration_key (idempotent)
|
||||
- [x] **Bootstrap-Snapshot** (Phase B Punkt 2) — Export/Import nur pinned Memories als JSON
|
||||
- [x] **Komplettes Gehirn** Export/Import als tar.gz (Memories + Skills + Qdrant)
|
||||
|
||||
### Conversation-Loop (Phase B Punkt 3)
|
||||
|
||||
- [x] Single-Chat UI + Rolling Window (50 Turns)
|
||||
- [x] Memory-Destillat: bei >60 Turns automatisch 30 aelteste → fact-Memories via Claude-Call
|
||||
- [x] Hot Memory (pinned) + Cold Memory (Top-5 semantisch) im System-Prompt
|
||||
- [x] Manueller Destillat-Trigger + Konversation-Reset (Brain + Diagnostic chat_backup gleichzeitig)
|
||||
- [x] Bridge schreibt chat_backup.jsonl bei jedem Turn (User + ARIA + ARIA-Files)
|
||||
- [x] App-Chat-Sync: kompletter Server-Sync bei Reconnect (Server = Source of Truth). Wenn Server leer → App leert auch. Lokal-only Bubbles (Skill-Notifications, laufende Voice ohne STT) bleiben erhalten. Plus chat_cleared Live-Update wenn Diagnostic die History wiped.
|
||||
|
||||
### Skills-System (Phase B Punkt 4)
|
||||
|
||||
- [x] Python-only Skills (local-venv pro Skill, eigene pip-Pakete)
|
||||
- [x] Tool-Use im Brain: skill_create als Meta-Tool, dynamische run_<skill> pro aktivem Skill
|
||||
- [x] Harte Schwelle dokumentiert: pip-Install → IMMER Skill (Brain hat keinen Persistenz ausser /data/skills/)
|
||||
- [x] Diagnostic Skills-Tab: Liste, README, Logs pro Run, Activate/Deactivate/Delete, Export/Import als tar.gz
|
||||
- [x] skill_created Live-Notification: gelbe Bubble in App + Diagnostic sobald ARIA selbst einen Skill anlegt
|
||||
|
||||
### Triggers-System (Phase B Punkt 5)
|
||||
|
||||
- [x] **Filesystem-Layer** unter `/data/triggers/<name>.json` + `logs/<name>.jsonl` pro Trigger
|
||||
- [x] **Timer** (one-shot, ISO-Timestamp) — "erinner mich in 10 Minuten an X" → ARIA legt via `trigger_timer`-Tool an, Background-Loop feuert zum Stichzeitpunkt einmal
|
||||
- [x] **Watcher** (recurring) — feuert wenn `condition` true wird, mit Throttle (min_seconds_between_fires) gegen Spam. Checks alle 30s
|
||||
- [x] **Sicherer Condition-Parser** via Python `ast`-Module (Whitelist statt `eval`): nur `<` `>` `<=` `>=` `==` `!=` `and` `or` `not`, Konstanten + Variablennamen aus Whitelist
|
||||
- [x] **Built-in Variablen**: `disk_free_gb`, `disk_free_pct`, `ram_free_mb`, `cpu_load_1min`, `uptime_sec`, `hour_of_day`, `minute_of_hour`, `day_of_month`, `month`, `year`, `day_of_week`, `is_weekend`, `unix_timestamp`, `current_lat`, `current_lon`, `location_age_sec`, `last_user_message_ago_sec`, `memory_count`, `pinned_count`, `rvs_connected`
|
||||
- [x] **near(lat, lon, radius_m) Funktion** im Parser (Haversine) — GPS-Geofencing fuer Blitzer-Warner / Ankunft-Erinnerungen
|
||||
- [x] **Background-Loop** im Brain-Container (Lifespan async task): laeuft alle 30s, prueft alle aktiven Trigger, ruft bei Match `agent.chat(prompt, source="trigger")` mit System-Praefix → ARIA reagiert wie auf eine Frage von Stefan, kann TTS sprechen / Skills starten / weitere Trigger anlegen
|
||||
- [x] **Diagnostic Trigger-Tab**: Liste aktiver Trigger mit Logs, Anlegen-Modal mit Type-Dropdown, Live-Anzeige aller verfuegbaren Variablen + Funktionen, Beispiele
|
||||
- [x] **App Live-Notification**: `trigger_created`-Bubble (gelb) sobald ARIA selbst einen Trigger anlegt — User sieht sofort dass die Bitte angekommen ist
|
||||
- [x] **GPS-Tracking via App** (`@react-native-community/geolocation` watchPosition, distanceFilter 30m, interval 15s) — Singleton-Service in `gpsTracking.ts`, Toggle in Settings → Standort, persistiert AsyncStorage, Restore beim App-Start
|
||||
- [x] **`request_location_tracking`-Tool**: ARIA kann das Tracking via `location_tracking`-Event an-/ausschalten — Bridge forwarded an App, App startet/stoppt watchPosition. ARIA tut das automatisch wenn sie einen Watcher mit `near()` anlegt
|
||||
- [x] **`location_update`-Forwarding**: App schickt alle 15s/30m ein `location_update {lat,lon}`, Bridge persistiert in `/shared/state/location.json`, Watcher liest beim Check
|
||||
- [x] **Activity-Persistenz**: `/shared/state/activity.json` traegt User-Message-Zeitstempel, damit `last_user_message_ago_sec` als Variable verfuegbar ist
|
||||
- [x] **`trigger_cancel`** + **`trigger_list`** als Tools — ARIA kann eigene Trigger verwalten
|
||||
- [x] **Triggers-Block im System-Prompt**: aktive Trigger + verfuegbare Variablen + Funktionen werden bei jedem Chat-Turn injiziert, dazu Hinweis dass GPS-Watcher `request_location_tracking` mit-aufrufen sollen
|
||||
- [x] **Aktuelle-Zeit-Block im System-Prompt**: UTC + lokale Europa/Berlin-Zeit (Sommer/Winter-Heuristik) wird bei jedem Chat-Turn oben mit-injiziert, damit Timer-fires_at und Watcher mit `hour_of_day` ueberhaupt sinnvoll sind. `trigger_timer` akzeptiert zusaetzlich `in_seconds` (Server rechnet) — ARIA muss bei relativen Angaben ('in 2 Minuten') nicht selbst ISO-rechnen
|
||||
|
||||
### Diagnostic / App Features (drumherum)
|
||||
|
||||
- [x] Datei-Manager (Diagnostic + App-Modal): /shared/uploads/ verwalten, Multi-Select + Select-All + Bulk-Download als ZIP + Bulk-Delete
|
||||
- [x] Wipe-All-Button (Memory + Stimmen + Settings)
|
||||
- [x] Voice Export/Import pro Stimme (Diagnostic + XTTS-Bridge auf Gamebox)
|
||||
- [x] F5/Whisper-Settings als JSON-Bundle Export/Import
|
||||
- [x] App Chat-Suche umgebaut: Highlight + Next/Prev statt Filter
|
||||
- [x] App Pinch-Zoom in Bildern rewriten (Multi-Touch-Race-Bugs)
|
||||
- [x] Info-Buttons mit Modal-Erklaerungen im Gehirn-Tab
|
||||
- [x] Token/Call-Metrics + Subscription-Quota-Tracking: pro Claude-Call ein Log-Eintrag mit Token-Schaetzung (chars/4). Gehirn-Tab zeigt 1h/5h/24h/30d-Aggregat + Progress-Bar gegen Plan-Limit (Pro=45/5h, Max 5x=225/5h, Max 20x=900/5h, Custom). Warn-Schwelle 80%, kritisch 90%.
|
||||
|
||||
## Offen
|
||||
|
||||
### App Features
|
||||
- [ ] Chat-History zuverlaessiger laden (AsyncStorage Race Condition)
|
||||
@@ -192,9 +297,10 @@ Wichtige Mechanismen:
|
||||
|
||||
### Architektur
|
||||
- [ ] Bilder: Claude Vision direkt nutzen (aktuell nur Dateipfad an ARIA)
|
||||
- [ ] Auto-Compacting und Memory/Brain Verwaltung (SQLite?)
|
||||
- [ ] Diagnostic: System-Info Tab (Container-Status, Disk, RAM, CPU)
|
||||
- [ ] RVS Zombie-Connections endgueltig loesen
|
||||
- [ ] Alle .env-Variablen ueber Diagnostic konfigurierbar machen (Fallback .env bleibt fuer initialen Bootstrap)
|
||||
- [ ] Gamebox: kleine Web-Oberflaeche fuer Credentials/Server-Config oder zentral aus Diagnostic per RVS push
|
||||
- [ ] Root-Cause OpenClaw Session-Reset: Herausfinden warum Sessions beim ersten chat.send nach Container-Restart verworfen werden
|
||||
- [ ] Erste Skills bauen lassen (yt-dlp, pdf-extract, image-resize, etc.) — durch normale Anfragen, ARIA legt sie selbst an
|
||||
- [ ] Tool-Use-Verifikation: Live-Test ob claude-max-api-proxy `tools` und `tool_calls` sauber durchreicht
|
||||
- [ ] Heartbeat (periodische Selbst-Checks)
|
||||
- [ ] Lokales LLM als Waechter (Triage vor Claude-Call)
|
||||
|
||||
@@ -18,6 +18,18 @@ const ALLOWED_TYPES = new Set([
|
||||
"update_check", "update_available", "update_download", "update_data",
|
||||
"agent_activity", "cancel_request",
|
||||
"audio_pcm",
|
||||
"file_from_aria",
|
||||
"container_restart",
|
||||
"file_list_request", "file_list_response",
|
||||
"file_delete_request", "file_deleted",
|
||||
"xtts_export_voice", "xtts_voice_exported",
|
||||
"xtts_import_voice", "xtts_voice_imported",
|
||||
"skill_created",
|
||||
"trigger_created",
|
||||
"location_update", "location_tracking",
|
||||
"chat_history_request", "chat_history_response", "chat_cleared",
|
||||
"file_delete_batch_request", "file_delete_batch_response",
|
||||
"file_zip_request", "file_zip_response",
|
||||
"xtts_delete_voice",
|
||||
"voice_preload", "voice_ready",
|
||||
"stt_request", "stt_response",
|
||||
|
||||
@@ -1,74 +0,0 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Exportiert ein OpenClaw Session-JSONL (auch .reset.*) als Markdown.
|
||||
*
|
||||
* Nutzung:
|
||||
* node export-jsonl-to-md.js <input.jsonl> [output.md]
|
||||
*
|
||||
* Oder direkt aus dem aria-core Container:
|
||||
* docker exec aria-core cat /home/node/.openclaw/agents/main/sessions/<ID>.jsonl.reset.<TS> \
|
||||
* | node export-jsonl-to-md.js - > output.md
|
||||
*/
|
||||
|
||||
const fs = require("fs");
|
||||
|
||||
const inputArg = process.argv[2];
|
||||
const outputArg = process.argv[3];
|
||||
|
||||
if (!inputArg) {
|
||||
console.error("Usage: export-jsonl-to-md.js <input.jsonl|-> [output.md]");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const raw = inputArg === "-" ? fs.readFileSync(0, "utf-8") : fs.readFileSync(inputArg, "utf-8");
|
||||
const lines = raw.split("\n").filter(l => l.trim());
|
||||
|
||||
const blocks = [];
|
||||
for (const line of lines) {
|
||||
let obj;
|
||||
try { obj = JSON.parse(line); } catch { continue; }
|
||||
if (obj.type !== "message" || !obj.message) continue;
|
||||
const role = obj.message.role;
|
||||
if (role !== "user" && role !== "assistant") continue;
|
||||
|
||||
let text = "";
|
||||
const content = obj.message.content;
|
||||
if (typeof content === "string") text = content;
|
||||
else if (Array.isArray(content)) text = content.filter(c => c.type === "text").map(c => c.text || "").join("\n");
|
||||
if (!text) continue;
|
||||
|
||||
if (role === "user") {
|
||||
text = text.replace(/^Sender \(untrusted metadata\):[\s\S]*?```[\s\S]*?```\s*\n*/m, "").trim();
|
||||
text = text.replace(/^\[.*?\]\s*/, "").trim();
|
||||
} else {
|
||||
text = text.replace(/^\[\[reply_to_\w+\]\]\s*/g, "").trim();
|
||||
}
|
||||
if (!text) continue;
|
||||
|
||||
const ts = obj.message.timestamp || obj.timestamp || 0;
|
||||
const when = ts ? new Date(ts).toISOString().replace("T", " ").slice(0, 19) : "";
|
||||
const heading = role === "user" ? "## 🧑 User" : "## 🤖 ARIA";
|
||||
blocks.push(`${heading}${when ? ` — ${when}` : ""}\n\n${text}`);
|
||||
}
|
||||
|
||||
const exportedAt = new Date().toISOString().replace("T", " ").slice(0, 19);
|
||||
const title = inputArg === "-" ? "Session" : inputArg.split("/").pop().replace(/\.jsonl.*/, "");
|
||||
const md = [
|
||||
`# Session: ${title}`,
|
||||
``,
|
||||
`Exportiert: ${exportedAt} `,
|
||||
`Quelle: ${inputArg === "-" ? "stdin" : inputArg}`,
|
||||
`Nachrichten: ${blocks.length}`,
|
||||
``,
|
||||
`---`,
|
||||
``,
|
||||
blocks.join("\n\n---\n\n"),
|
||||
``,
|
||||
].join("\n");
|
||||
|
||||
if (outputArg) {
|
||||
fs.writeFileSync(outputArg, md);
|
||||
console.error(`OK: ${blocks.length} Nachrichten → ${outputArg}`);
|
||||
} else {
|
||||
process.stdout.write(md);
|
||||
}
|
||||
@@ -661,6 +661,76 @@ async def handle_delete_voice(ws, payload: dict) -> None:
|
||||
logger.exception("handle_delete_voice Fehler")
|
||||
|
||||
|
||||
async def handle_export_voice(ws, payload: dict) -> None:
|
||||
"""Packt eine Stimme (.wav + .txt) als tar.gz und sendet sie base64 zurueck."""
|
||||
name = (payload.get("name") or "").strip()
|
||||
if not name:
|
||||
await _send(ws, "xtts_voice_exported", {"ok": False, "error": "name fehlt"})
|
||||
return
|
||||
try:
|
||||
wav, txt = voice_paths(name)
|
||||
if not wav.exists():
|
||||
await _send(ws, "xtts_voice_exported", {"ok": False, "name": name, "error": "Stimme nicht gefunden"})
|
||||
return
|
||||
import io, tarfile
|
||||
buf = io.BytesIO()
|
||||
with tarfile.open(fileobj=buf, mode="w:gz") as tar:
|
||||
tar.add(wav, arcname=wav.name)
|
||||
if txt.exists():
|
||||
tar.add(txt, arcname=txt.name)
|
||||
data = base64.b64encode(buf.getvalue()).decode("ascii")
|
||||
logger.info("Voice exportiert: %s (%d KB tar.gz)", name, len(buf.getvalue()) // 1024)
|
||||
await _send(ws, "xtts_voice_exported", {"ok": True, "name": name, "data": data})
|
||||
except Exception as e:
|
||||
logger.exception("handle_export_voice Fehler")
|
||||
await _send(ws, "xtts_voice_exported", {"ok": False, "name": name, "error": str(e)[:200]})
|
||||
|
||||
|
||||
async def handle_import_voice(ws, payload: dict) -> None:
|
||||
"""Empfaengt eine tar.gz mit <name>.wav (+ optional <name>.txt) und legt
|
||||
sie in VOICES_DIR ab. Ueberschreibt bestehende Stimme gleichen Namens."""
|
||||
name = (payload.get("name") or "").strip()
|
||||
data_b64 = payload.get("data") or ""
|
||||
if not name or not data_b64:
|
||||
await _send(ws, "xtts_voice_imported", {"ok": False, "error": "name/data fehlt"})
|
||||
return
|
||||
try:
|
||||
import io, tarfile
|
||||
VOICES_DIR.mkdir(parents=True, exist_ok=True)
|
||||
safe = sanitize_voice_name(name)
|
||||
data = base64.b64decode(data_b64)
|
||||
extracted_wav = False
|
||||
with tarfile.open(fileobj=io.BytesIO(data), mode="r:gz") as tar:
|
||||
for member in tar.getmembers():
|
||||
if not member.isfile():
|
||||
continue
|
||||
base = Path(member.name).name # Path-Traversal verhindern
|
||||
if base.lower().endswith(".wav"):
|
||||
target = VOICES_DIR / f"{safe}.wav"
|
||||
f = tar.extractfile(member)
|
||||
if f is None:
|
||||
continue
|
||||
with open(target, "wb") as out:
|
||||
out.write(f.read())
|
||||
extracted_wav = True
|
||||
elif base.lower().endswith(".txt"):
|
||||
target = VOICES_DIR / f"{safe}.txt"
|
||||
f = tar.extractfile(member)
|
||||
if f is None:
|
||||
continue
|
||||
with open(target, "wb") as out:
|
||||
out.write(f.read())
|
||||
if not extracted_wav:
|
||||
await _send(ws, "xtts_voice_imported", {"ok": False, "name": name, "error": "Kein .wav im Archiv"})
|
||||
return
|
||||
logger.info("Voice importiert: %s", name)
|
||||
await _send(ws, "xtts_voice_imported", {"ok": True, "name": name})
|
||||
await handle_list_voices(ws)
|
||||
except Exception as e:
|
||||
logger.exception("handle_import_voice Fehler")
|
||||
await _send(ws, "xtts_voice_imported", {"ok": False, "name": name, "error": str(e)[:200]})
|
||||
|
||||
|
||||
# Letzte diagnostisch-gesetzte Voice (verhindert Endlos-Preload bei jedem config)
|
||||
_last_diag_voice = ""
|
||||
|
||||
@@ -781,6 +851,10 @@ async def run_loop(runner: F5Runner) -> None:
|
||||
asyncio.create_task(handle_list_voices(ws))
|
||||
elif mtype == "xtts_delete_voice":
|
||||
asyncio.create_task(handle_delete_voice(ws, payload))
|
||||
elif mtype == "xtts_export_voice":
|
||||
asyncio.create_task(handle_export_voice(ws, payload))
|
||||
elif mtype == "xtts_import_voice":
|
||||
asyncio.create_task(handle_import_voice(ws, payload))
|
||||
elif mtype == "voice_preload":
|
||||
asyncio.create_task(handle_voice_preload(ws, payload, runner))
|
||||
elif mtype == "stt_response":
|
||||
|
||||
Reference in New Issue
Block a user