Compare commits
171 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 7b55d6a91f | |||
| aa077f60e6 | |||
| 094bd6e4f1 | |||
| 5b0b5eeac6 | |||
| 96a1f90ac3 | |||
| bfa06d78a7 | |||
| d16dcd34cc | |||
| dc2f4eb6d2 | |||
| 0f9a029269 | |||
| 70d1500096 | |||
| d0cb7acd10 | |||
| 0b58feee1e | |||
| 8be34e7284 | |||
| b56cef6298 | |||
| 0d203af8fb | |||
| 0468d0e603 | |||
| 7cfc2ba058 | |||
| da795d14f5 | |||
| d60c7e9110 | |||
| 83c99a5e65 | |||
| e438bb11ff | |||
| 8b4f75bf91 | |||
| d7e7386954 | |||
| 2100c64b91 | |||
| 74ebf59c6f | |||
| 53b49eacad | |||
| 0f11d23c75 | |||
| 311030bdaa | |||
| 1e05c66baa | |||
| 4082a6bf2a | |||
| 3485642b3e | |||
| 1240ae3829 | |||
| 2dd4d38dce | |||
| 7f862ce1f4 | |||
| 528fe97b59 | |||
| 3483d1bfce | |||
| 158423c155 | |||
| 087e91dca1 | |||
| 2de4cbc00f | |||
| 03fc465057 | |||
| b696b47feb | |||
| 6aae565541 | |||
| 214bd218a0 | |||
| 2afeee29ee | |||
| c8dee4c416 | |||
| f49f3c3b08 | |||
| c4bbb06710 | |||
| 4411cc4fff | |||
| 24a91887ef | |||
| 4e62b2919f | |||
| fa774156fe | |||
| 3b19f05c5b | |||
| fc3ecaacca | |||
| 08857093b5 | |||
| 62018b3e51 | |||
| 89e3a195a3 | |||
| f023ba0ac5 | |||
| a0570ef8f7 | |||
| facde1fef7 | |||
| 38106a2096 | |||
| a476afb311 | |||
| db4c7b9b72 | |||
| 3bc490b485 | |||
| dd6d70c46e | |||
| b1eaf42fef | |||
| fb9e5dcd10 | |||
| f95e71463f | |||
| 1088bff43d | |||
| cad68db2a2 | |||
| 50b10c8ac0 | |||
| a8b586ec92 | |||
| 632e1e4fa1 | |||
| 7e12816ebd | |||
| 8f64f8fb30 | |||
| b3ff3991c4 | |||
| a4ea387c98 | |||
| 68fbf74a23 | |||
| b857f778e9 | |||
| 31aa82b68c | |||
| de8eeb69e2 | |||
| f5970ce700 | |||
| ef1a4436ca | |||
| 981779cd9e | |||
| 3dcd2ae0b4 | |||
| 2750b867a3 | |||
| f6424add6c | |||
| 2dfd21d1d0 | |||
| 9d9ddc730b | |||
| 77ccee8331 | |||
| 175dcdf225 | |||
| 1549e9cd4f | |||
| 910e74b497 | |||
| 160c5c34b6 | |||
| a6638c0108 | |||
| 43c21d3ddc | |||
| b73c6c346e | |||
| b91ddc5bdf | |||
| 7d08c06720 | |||
| f066a2a555 | |||
| b55b0e7c42 | |||
| 70f806ef80 | |||
| 0773d9496d | |||
| 1a4857ed62 | |||
| 962d814318 | |||
| 9276a92c83 | |||
| d16896c4b4 | |||
| 20050d4077 | |||
| 79760d1b2e | |||
| 13f1103604 | |||
| 73b7a76ea8 | |||
| 17f3d8870e | |||
| 4feaacc7e4 | |||
| af7b2674f3 | |||
| 97442198ec | |||
| e3e841f2ab | |||
| 33185de42b | |||
| dbe547d4ea | |||
| 1a982c0d45 | |||
| dfba5ceb1f | |||
| 1a6f633836 | |||
| 7f7db100af | |||
| d646e9d58e | |||
| bef59ba134 | |||
| dbebfd44ff | |||
| 4d0b9e0d78 | |||
| 0c43a18402 | |||
| 5bdcc3c65b | |||
| 52795530f9 | |||
| 2eb0b4df90 | |||
| 0c18090351 | |||
| d6b54d3247 | |||
| ead28cf09a | |||
| f682aad4ff | |||
| e0c1a4bcd5 | |||
| a648dad96d | |||
| da5579038e | |||
| 4ba48940b9 | |||
| 568ef9ed10 | |||
| 7682a0ce58 | |||
| 3ca834e633 | |||
| 55ef207454 | |||
| 6651f5937d | |||
| e9e7dd804f | |||
| ec9530f17f | |||
| 97cb7be313 | |||
| 77e927ffcd | |||
| a9a87f12df | |||
| 2a56ac0290 | |||
| edc65ce645 | |||
| d7efaf93b3 | |||
| 31ff20c846 | |||
| 406f4cb3cc | |||
| fa0667088a | |||
| f55329706e | |||
| 6c7fd1d0e3 | |||
| 9d8db111ac | |||
| 482cb6ace3 | |||
| 69c1c49a7d | |||
| b1ccf29295 | |||
| 4cd9faece2 | |||
| fec8aa977b | |||
| 20123de827 | |||
| 8761d1a1b7 | |||
| abc5b971f4 | |||
| b588dd7e3b | |||
| 309df9d851 | |||
| f2e643d1fb | |||
| 6ac374621c | |||
| efbd306597 | |||
| 4454613a98 | |||
| 55cfb752a2 |
+11
-8
@@ -9,15 +9,19 @@
|
||||
.env.*
|
||||
!.env.example
|
||||
!.env.*.example
|
||||
aria-data/config/*.env
|
||||
!aria-data/config/*.env.example
|
||||
!aria-data/config/openclaw.env
|
||||
|
||||
# ── ARIAs Gedächtnis (nur per tar gesichert) ────
|
||||
aria-data/brain/
|
||||
# Privater User-Profile-Snippet (Tool-Stack, interne URLs) —
|
||||
# liegt jetzt in brain-import/ (frueher aria-data/config/USER.md).
|
||||
# USER.md.example ist Repo-Inhalt, USER.md lokal selbst anlegen.
|
||||
aria-data/brain-import/USER.md
|
||||
|
||||
# ── Stimmen (große Binärdateien) ─────────────────
|
||||
aria-data/voices/
|
||||
# ── ARIAs Gedächtnis (Vector-DB, Skills, Models) ──
|
||||
# Backup via Diagnostic → Gehirn-Export (tar.gz), nicht via Git.
|
||||
aria-data/brain/data/
|
||||
aria-data/brain/qdrant/
|
||||
|
||||
# Diagnostic-State (aktive Session etc.)
|
||||
aria-data/config/diag-state/
|
||||
|
||||
# ── Node / npm ──────────────────────────────────
|
||||
node_modules/
|
||||
@@ -46,7 +50,6 @@ desktop/dist/
|
||||
__pycache__/
|
||||
*.pyc
|
||||
*.pyo
|
||||
bridge/__pycache__/
|
||||
|
||||
# ── macOS ────────────────────────────────────────
|
||||
.DS_Store
|
||||
|
||||
Binary file not shown.
@@ -57,38 +57,44 @@ ARIA hat zwei Rollen:
|
||||
│ ┌─────────────────────────────────────────────────┐ │
|
||||
│ │ [proxy] claude-max-api-proxy Container │ │
|
||||
│ │ Claude Max Sub → lokale API │ │
|
||||
│ │ Port 3456, mit sed-Patches fuer │ │
|
||||
│ │ Tool-Permissions + Host-Binding │ │
|
||||
│ │ │ │
|
||||
│ │ [aria] OpenClaw Container (aria-core) │ │
|
||||
│ │ Gateway, Sessions, Memory, Skills │ │
|
||||
│ │ Liest BOOTSTRAP.md + AGENT.md │ │
|
||||
│ │ [qdrant] Vector-DB fuer ARIAs Gedaechtnis │ │
|
||||
│ │ Bind-Mount: aria-data/brain/qdrant/ │ │
|
||||
│ │ │ │
|
||||
│ │ [brain] ARIA Agent + Memory Container │ │
|
||||
│ │ FastAPI auf Port 8080 │ │
|
||||
│ │ Eigener Agent-Loop, Skills, │ │
|
||||
│ │ Vector-Memory, SSH-Zugriff zur VM │ │
|
||||
│ │ Bind-Mount: aria-data/brain/data/ │ │
|
||||
│ │ │ │
|
||||
│ │ [bridge] ARIA Voice Bridge Container │ │
|
||||
│ │ Wake-Word (lokales Mikro auf VM) │ │
|
||||
│ │ STT primaer remote (Gamebox-Whisper) │ │
|
||||
│ │ Fallback: lokales faster-whisper (CPU) │ │
|
||||
│ │ TTS via F5-TTS auf Gamebox │ │
|
||||
│ │ Bruecke: App <> RVS <> Bridge <> ARIA │ │
|
||||
│ │ Wake-Word, STT, TTS-Forwarding │ │
|
||||
│ │ Spricht mit Brain via HTTP/8080 │ │
|
||||
│ │ │ │
|
||||
│ │ [diagnostic] Selbstcheck-UI + Einstellungen │ │
|
||||
│ │ Gateway + RVS + Proxy Status │ │
|
||||
│ │ Chat, Sessions, Login, Logs │ │
|
||||
│ │ Port 3001 (im Netzwerk der Bridge) │ │
|
||||
│ │ Chat, Gehirn, Dateien, Logs │ │
|
||||
│ └──────────────────┬──────────────────────────────┘ │
|
||||
│ │ Volume Mount │
|
||||
│ ▼ │
|
||||
│ ┌─────────────────────────────────────────────────┐ │
|
||||
│ │ ./aria-data/ — Ein tar = vollstaendiges Backup │ │
|
||||
│ │ ./aria-data/ — Konfiguration + SSH-Keys │ │
|
||||
│ │ ./aria-data/brain/ — Vector-DB + Skills (gitignored)│
|
||||
│ │ Backup via Diagnostic → "Gehirn-Export" (tar.gz) │ │
|
||||
│ └─────────────────────────────────────────────────┘ │
|
||||
└─────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
> OpenClaw (frueher `aria-core`) ist abgerissen — ARIA laeuft jetzt mit eigenem Agent-Framework im
|
||||
> `aria-brain` Container. Eigene Tools, Skills, Vector-Memory statt Sessions. Letzter OpenClaw-Stand
|
||||
> ist als Git-Tag `v0.1.2.0` archiviert.
|
||||
|
||||
**Vier separate Deployments:**
|
||||
|
||||
| Was | Wo | Wie |
|
||||
|-----|----|-----|
|
||||
| RVS | Rechenzentrum | `cd rvs && docker compose up -d` |
|
||||
| ARIA Core | Debian 13 VM | `docker compose up -d && ./aria-setup.sh` |
|
||||
| ARIA Brain/Bridge/Diagnostic | Debian 13 VM | `./init.sh && ./aria-setup.sh && docker compose up -d` |
|
||||
| Gamebox-Stack (F5-TTS + Whisper) | Gamebox (GPU) | `cd xtts && docker compose up -d` |
|
||||
| Android App | Stefans Handy | APK installieren (Auto-Update via RVS) |
|
||||
|
||||
@@ -114,12 +120,12 @@ apt install -y docker.io docker-compose-plugin git curl jq
|
||||
git clone git@gitea.hackersoft.de:aria/aria.git ~/ARIA-AGENT
|
||||
cd ~/ARIA-AGENT
|
||||
cp .env.example .env
|
||||
bash init.sh # legt USER.md aus Vorlage an (idempotent, schadet nicht)
|
||||
```
|
||||
|
||||
`.env` Datei editieren (Details siehe `.env.example`):
|
||||
```bash
|
||||
# Gateway-Auth: Alle Services die mit aria-core reden brauchen diesen Token
|
||||
# Diagnostic, Bridge, App nutzen ihn fuer den WebSocket-Handshake
|
||||
# Auth-Token: Alle ARIA-Services nutzen ihn fuer interne Auth
|
||||
ARIA_AUTH_TOKEN= # openssl rand -hex 32
|
||||
|
||||
# RVS-Verbindung: Hostname + Port deines Rendezvous-Servers
|
||||
@@ -128,17 +134,18 @@ RVS_PORT=443
|
||||
RVS_TLS=true
|
||||
RVS_TLS_FALLBACK=true
|
||||
|
||||
# Pairing-Token: Verbindet App, Bridge, Diagnostic und XTTS im gleichen RVS-Room
|
||||
# Pairing-Token: Verbindet App, Bridge, Diagnostic und Gamebox im gleichen RVS-Room
|
||||
# MUSS auf allen Geraeten identisch sein (ARIA-VM, Gaming-PC, App)
|
||||
# Wird von generate-token.sh automatisch generiert und eingetragen
|
||||
RVS_TOKEN= # ./generate-token.sh
|
||||
|
||||
# Optional: SSH-Host des RVS-Servers fuer Auto-Update (z.B. root@aria-rvs)
|
||||
RVS_UPDATE_HOST=
|
||||
```
|
||||
|
||||
Alle anderen Einstellungen (Stimmen, Modi, Wake-Word, F5-TTS-Tuning) leben in
|
||||
`/shared/config/runtime.json` und werden ueber die Diagnostic-UI gepflegt — nicht
|
||||
in der `.env`. Komplett-Reset jederzeit moeglich via "🗑 ALLES löschen" im
|
||||
Diagnostic-Einstellungen-Tab.
|
||||
|
||||
**Zwei Tokens, zwei Zwecke:**
|
||||
- **ARIA_AUTH_TOKEN**: Authentifizierung am OpenClaw Gateway (aria-core). Wer diesen Token hat, kann ARIA Befehle geben.
|
||||
- **ARIA_AUTH_TOKEN**: Interner Auth-Token zwischen ARIAs Containern.
|
||||
- **RVS_TOKEN**: Pairing-Token fuer den Rendezvous-Server. Alle Geraete mit dem gleichen Token landen im gleichen "Room" und koennen kommunizieren. Die App bekommt diesen Token per QR-Code.
|
||||
|
||||
### 2. Claude CLI einloggen (Proxy-Auth)
|
||||
@@ -156,48 +163,24 @@ claude login
|
||||
**Wichtig:** Der Ordner `~/.claude/` (nicht `~/.config/claude/`!) wird als Volume
|
||||
in den Proxy gemountet. Die Credentials ueberleben Container-Restarts.
|
||||
|
||||
### 3. Voice Bridge konfigurieren
|
||||
### 3. SSH-Key fuer aria-wohnung generieren + RVS-Token + Container
|
||||
|
||||
```bash
|
||||
cp aria-data/config/aria.env.example aria-data/config/aria.env
|
||||
# Bei Bedarf anpassen (Whisper-Modell als Fallback, Sprache, Wake-Word)
|
||||
```
|
||||
# SSH-Key fuer den Zugriff von ARIA auf die VM (aria-wohnung)
|
||||
./aria-setup.sh
|
||||
|
||||
STT laeuft primaer auf der Gamebox (faster-whisper auf GPU), TTS ausschliesslich
|
||||
ueber F5-TTS auf der Gamebox — siehe Abschnitt "Gamebox-Stack — F5-TTS + Whisper"
|
||||
weiter unten.
|
||||
|
||||
### 5. RVS-Token generieren & Container starten
|
||||
|
||||
```bash
|
||||
# Token generieren — schreibt RVS_TOKEN in .env, zeigt QR-Code
|
||||
# RVS-Token generieren — schreibt RVS_TOKEN in .env, zeigt QR-Code
|
||||
./generate-token.sh
|
||||
|
||||
# Alle Container starten
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
### 6. ARIA Setup ausfuehren (einmalig!)
|
||||
`aria-setup.sh` generiert den ed25519-Key in `aria-data/ssh/` und traegt den
|
||||
Public-Key in `/root/.ssh/authorized_keys` ein (Script laeuft als root auf der VM
|
||||
aria-wohnung). Brain + Proxy nutzen den gleichen Key.
|
||||
|
||||
```bash
|
||||
./aria-setup.sh
|
||||
```
|
||||
|
||||
Dieses Script ist **essentiell** — es macht:
|
||||
1. Wartet bis aria-core laeuft
|
||||
2. Fixt Volume-Permissions (Docker → node User)
|
||||
3. Schreibt `openclaw.json` (Proxy-Provider, Model-Config, Timeout 900s)
|
||||
4. Setzt exec-approvals Wildcard (Tool-Ausfuehrung im headless-Modus)
|
||||
5. Generiert SSH-Key fuer VM-Zugriff (`aria-data/ssh/`)
|
||||
6. Fixt SSH-Permissions im Container
|
||||
7. Startet aria-core neu
|
||||
|
||||
**SSH-Key auf der VM eintragen** (wird vom Script angezeigt):
|
||||
```bash
|
||||
cat ~/ARIA-AGENT/aria-data/ssh/id_ed25519.pub >> /root/.ssh/authorized_keys
|
||||
```
|
||||
|
||||
### 7. App verbinden
|
||||
### 4. App verbinden
|
||||
|
||||
App oeffnen → QR-Code scannen → "ARIA, hoerst du mich?"
|
||||
|
||||
@@ -205,20 +188,19 @@ Der QR-Code enthaelt: Host, Port, Token, TLS-Flag — einmal scannen, nie wieder
|
||||
|
||||
Bestehendes Token nochmal als QR anzeigen: `./generate-token.sh show`
|
||||
|
||||
### 8. Diagnostic pruefen
|
||||
### 5. Diagnostic pruefen
|
||||
|
||||
```bash
|
||||
# Im Browser:
|
||||
http://<VM-IP>:3001
|
||||
```
|
||||
|
||||
Die Diagnostic-UI zeigt:
|
||||
- Gateway-Verbindung (gruener Punkt = OK)
|
||||
- RVS-Verbindung
|
||||
- Proxy-Status + Claude Login
|
||||
- Chat-Test (direkt an ARIA schreiben)
|
||||
- Session-Verwaltung
|
||||
- Container-Logs
|
||||
Die Diagnostic-UI hat vier Top-Tabs:
|
||||
|
||||
- **Main** — Live-Chat-Test, Status (Brain / RVS / Proxy), End-to-End-Trace
|
||||
- **Gehirn** — Memory-Verwaltung (Vector-DB), Skills, Export/Import des kompletten Gehirns als tar.gz
|
||||
- **Dateien** — alle Dateien aus `/shared/uploads/` (von ARIA generiert oder hochgeladen) mit Download/Delete
|
||||
- **Einstellungen** — Reparatur (Container-Restart), Wipe, Sprachausgabe, Whisper, Runtime-Config, App-Onboarding (QR), Komplett-Reset
|
||||
|
||||
---
|
||||
|
||||
@@ -226,7 +208,7 @@ Die Diagnostic-UI zeigt:
|
||||
|
||||
Der Proxy ist das Herzsttueck: Er macht aus der Claude Max Subscription eine lokale API.
|
||||
|
||||
**Ablauf:** `OpenClaw (aria-core) → HTTP → claude-max-api-proxy → Claude Code CLI (--print) → Anthropic API`
|
||||
**Ablauf:** `aria-brain → HTTP → claude-max-api-proxy → Claude Code CLI (--print) → Anthropic API`
|
||||
|
||||
Der Proxy-Container (`node:22-alpine`) installiert bei jedem Start:
|
||||
- `@anthropic-ai/claude-code` — Claude Code CLI
|
||||
@@ -247,52 +229,34 @@ Danach werden per `sed` vier Patches angewendet:
|
||||
|
||||
## Konfigurationsdateien
|
||||
|
||||
### aria-data/config/
|
||||
### aria-data/
|
||||
|
||||
| Datei | Zweck | Gemountet als |
|
||||
|-------|-------|---------------|
|
||||
| `BOOTSTRAP.md` | ARIAs System-Prompt: Identitaet, Sicherheitsregeln, Tool-Freigaben, Infrastruktur | `BOOTSTRAP.md` + `CLAUDE.md` im Workspace |
|
||||
| `AGENT.md` | ARIAs Persoenlichkeit, Tool-Freigaben, Arbeitsprinzipien | `AGENT.md` im Workspace |
|
||||
| `USER.md` | Stefans Praeferenzen, Kommunikationsstil | `USER.md` im Workspace |
|
||||
| `openclaw.env` | OpenClaw Container-Environment | `.env` im Workspace |
|
||||
| `aria.env` | Voice Bridge Konfiguration (Whisper, Stimmen) | `/config/aria.env` in Bridge |
|
||||
| Pfad | Zweck |
|
||||
|------|-------|
|
||||
| `.env` | Tokens (ARIA_AUTH_TOKEN, RVS_TOKEN, RVS_HOST) — minimal, alles andere lebt in der DB |
|
||||
| `aria-data/ssh/` | SSH-Key fuer den Zugriff auf aria-wohnung (Brain + Proxy teilen den Key) |
|
||||
| `aria-data/brain/qdrant/` | Vector-DB-Storage (Bind-Mount, gitignored) |
|
||||
| `aria-data/brain/data/` | Skills, Embedding-Modell-Cache (Bind-Mount, gitignored) |
|
||||
| `aria-data/brain-import/` | `AGENT.md`, `USER.md.example`, `TOOLING.md.example` — Quelle fuer den initialen Memory-Import in die Vector-DB |
|
||||
| `aria-data/config/diag-state/` | Diagnostic State (z.B. zuletzt aktive Session) |
|
||||
|
||||
**BOOTSTRAP.md** ist die wichtigste Datei — sie definiert:
|
||||
- Wer ARIA ist (Name, Rolle, Persoenlichkeit)
|
||||
- Sicherheitsregeln (kein ClawHub, Prompt Injection abwehren)
|
||||
- Tool-Freigaben (alle Claude Code Tools: WebFetch, Bash, etc.)
|
||||
- SSH-Zugriff auf aria-wohnung (VM)
|
||||
- Gedaechtnis-System
|
||||
### /shared/config/ (im aria-shared Volume)
|
||||
|
||||
### openclaw.json (via aria-setup.sh)
|
||||
| Datei | Zweck |
|
||||
|-------|-------|
|
||||
| `voice_config.json` | TTS-Engine, geclonte Stimme, Whisper-Modell, F5-TTS-Tuning |
|
||||
| `runtime.json` | Token + RVS-Override + Whisper-Sprache (durch Diagnostic gepflegt) |
|
||||
| `highlight_triggers.json` | Highlight-Trigger-Woerter |
|
||||
| `chat_backup.jsonl` | Append-only Chat-Log (Quelle fuer die Chat-History in Diagnostic) |
|
||||
|
||||
Wird von `aria-setup.sh` in den Container geschrieben:
|
||||
```json
|
||||
{
|
||||
"agents": {
|
||||
"defaults": {
|
||||
"model": { "primary": "proxy/claude-sonnet-4" },
|
||||
"timeoutSeconds": 900,
|
||||
"maxConcurrent": 4
|
||||
}
|
||||
},
|
||||
"models": {
|
||||
"providers": {
|
||||
"proxy": {
|
||||
"api": "openai-completions",
|
||||
"baseUrl": "http://proxy:3456/v1",
|
||||
"apiKey": "not-needed"
|
||||
}
|
||||
}
|
||||
},
|
||||
"tools": { "profile": "full" },
|
||||
"messages": { "ackReactionScope": "all" }
|
||||
}
|
||||
```
|
||||
`voice_config.json` + `highlight_triggers.json` lassen sich via Diagnostic →
|
||||
"Sprachausgabe" als Bundle exportieren/importieren.
|
||||
|
||||
**timeoutSeconds: 900** (15 Min) — notwendig weil jede Anfrage einen neuen
|
||||
`claude --print` Prozess spawnt (Cold Start). Bei Tool-Nutzung (WebFetch, Bash)
|
||||
braucht ARIA mehrere API-Roundtrips.
|
||||
### Backup / Reset
|
||||
|
||||
- **Gehirn-Backup**: Diagnostic → Gehirn → "⬇ Export herunterladen" — komplettes Brain (Memories + Skills + Qdrant-DB) als `.tar.gz`
|
||||
- **Stimmen-Backup**: pro Stimme ein `.tar.gz` (Diagnostic → Sprachausgabe → ⬇ pro Stimme); Import via Upload-Button
|
||||
- **Komplett-Reset**: Diagnostic → Einstellungen → "🗑 ALLES löschen" — Memory + Stimmen + Settings weg; `.env` + SSH-Keys bleiben
|
||||
|
||||
---
|
||||
|
||||
@@ -303,14 +267,14 @@ auf der Gamebox.
|
||||
|
||||
**Nachrichtenfluss:**
|
||||
```
|
||||
Text: App → RVS → Bridge → chat.send → aria-core
|
||||
Text: App → RVS → Bridge → aria-brain (HTTP)
|
||||
Audio: App → RVS → Bridge → stt_request (RVS) → whisper-bridge (Gamebox)
|
||||
→ stt_response → Bridge → chat.send → aria-core
|
||||
→ stt_response → Bridge → aria-brain
|
||||
Fallback bei Timeout: lokales faster-whisper (CPU)
|
||||
Datei: App → RVS → Bridge → /shared/uploads/ → chat.send (mit Pfad) → aria-core
|
||||
Datei: App → RVS → Bridge → /shared/uploads/ → aria-brain (mit Pfad)
|
||||
|
||||
aria-core → Antwort → Gateway → Diagnostic → RVS → App
|
||||
→ Bridge → xtts_request (RVS) → f5tts-bridge
|
||||
aria-brain → Antwort → Bridge → RVS → App
|
||||
→ xtts_request (RVS) → f5tts-bridge
|
||||
→ audio_pcm Stream → RVS → App AudioTrack
|
||||
```
|
||||
|
||||
@@ -342,34 +306,23 @@ aria-core → Antwort → Gateway → Diagnostic → RVS → App
|
||||
|
||||
## Diagnostic — Selbstcheck-UI und Einstellungen
|
||||
|
||||
Erreichbar unter `http://<VM-IP>:3001`. Teilt das Netzwerk mit aria-core.
|
||||
Erreichbar unter `http://<VM-IP>:3001`. Teilt das Netzwerk mit der Bridge.
|
||||
|
||||
### Features
|
||||
### Tabs
|
||||
|
||||
- **Status-Karten**: Gateway (Handshake), RVS (TLS-Fallback), Proxy (Auth)
|
||||
- **Disk-Voll Banner**: Rotes Overlay wenn die VM-Disk knapp wird, mit copy-baren Cleanup-Befehlen (safe + aggressiv)
|
||||
- **Chat-Test**: Nachrichten direkt an ARIA senden (Gateway oder via RVS), Vollbild-Modus
|
||||
- **"ARIA denkt..." Indikator**: Zeigt live was ARIA gerade tut (Denken, Tool, Schreiben)
|
||||
- **Abbrechen-Button**: Stoppt laufende Anfragen + doctor --fix
|
||||
- **Session-Verwaltung**: Sessions auflisten, wechseln, erstellen, loeschen, als Markdown exportieren (⬇ Button)
|
||||
- **Chat-History**: Wird beim Laden und Session-Wechsel angezeigt (read-only aus JSONL)
|
||||
- **TTS-Diagnose Tab**: Stimmen testen, Status pruefen, Fehler anzeigen
|
||||
- **Einstellungen**: TTS aktiv-Toggle, F5-TTS-Voice (gecloned), Betriebsmodi, Whisper-Modell (tiny…large-v3, Hot-Reload auf der Gamebox)
|
||||
- **Voice-Status**: Beim Wechsel der globalen Stimme zeigt ein Status-Text "Lade…" → "bereit (X.Ys)" — getriggert ueber `voice_preload`/`voice_ready`
|
||||
- **Voice Cloning**: Audio-Samples hochladen, Referenz-Text wird automatisch via Whisper transkribiert
|
||||
- **Main**: Brain/RVS/Proxy-Status, Chat-Test, "ARIA denkt..."-Indikator, End-to-End-Trace, Container-Logs
|
||||
- **Gehirn**: Memory-Browser (Vector-DB), Suche + Filter, Edit/Add/Delete, Gehirn-Export/Import (tar.gz), Skills (geplant)
|
||||
- **Dateien**: Browser fuer `/shared/uploads/` — von ARIA generierte oder hochgeladene Dateien herunterladen oder loeschen (Live-Update der Chat-Bubbles)
|
||||
- **Einstellungen**: Reparatur (Container-Restart fuer Brain/Bridge/Qdrant), Komplett-Reset, Betriebsmodi, Sprachausgabe + Voice-Cloning + F5-TTS-Tuning, Whisper, Onboarding-QR, App-Cleanup
|
||||
|
||||
### Was zusaetzlich noch drin steckt
|
||||
|
||||
- **Disk-Voll Banner** mit copy-baren Cleanup-Befehlen (safe + aggressiv)
|
||||
- **Voice Cloning**: Audio-Samples hochladen, Whisper transkribiert den Ref-Text automatisch
|
||||
- **Voice Export/Import**: einzelne Stimmen als `.tar.gz` zwischen Gameboxen mitnehmen
|
||||
- **Settings Export/Import**: `voice_config.json` + `highlight_triggers.json` als JSON-Bundle
|
||||
- **Claude Login**: Browser-Terminal zum Einloggen in den Proxy
|
||||
- **Core Terminal**: Shell in aria-core (openclaw CLI)
|
||||
- **Container-Logs**: Echtzeit-Logs aller Container (gefiltert nach Tab + Pipeline)
|
||||
- **SSH Terminal**: Direkter SSH-Zugang zu aria-wohnung
|
||||
- **Watchdog**: Erkennt stuck Runs (2min Warnung → 5min doctor --fix → 8min Container-Restart)
|
||||
|
||||
### Session-Verwaltung
|
||||
|
||||
Die in der Diagnostic gewaehlte Session gilt **global** — Bridge und App nutzen
|
||||
dieselbe Session. Die aktive Session wird unter `/data/active-session` persistiert
|
||||
und ueberlebt Container-Restarts.
|
||||
|
||||
API-Endpoint fuer andere Services: `GET http://localhost:3001/api/session`
|
||||
- **SSH Terminal**: direkter SSH-Zugang zu aria-wohnung
|
||||
|
||||
---
|
||||
|
||||
@@ -378,10 +331,13 @@ API-Endpoint fuer andere Services: `GET http://localhost:3001/api/session`
|
||||
### Features
|
||||
|
||||
- Text-Chat mit ARIA
|
||||
- **Sprachaufnahme**: Push-to-Talk (halten) oder Tap-to-Talk (tippen, Auto-Stop bei Stille)
|
||||
- **Sprachaufnahme**: Tap-to-Talk (tippen startet, tippen stoppt, Auto-Stop bei Stille via VAD)
|
||||
- **Gespraechsmodus** (Ohr-Button): Nach jeder ARIA-Antwort startet automatisch die Aufnahme — wie ein natuerliches Gespraech hin und her
|
||||
- **Wake-Word** (optional, Picovoice Porcupine on-device): "Jarvis", "Computer" usw. — Mikrofon hoert passiv mit, Konversation startet beim Schluesselwort. Eigene Wake-Words ueber die Picovoice Console moeglich. Ohne API-Key faellt der Ohr-Button auf Direkt-Aufnahme zurueck.
|
||||
- **VAD (Voice Activity Detection)**: Konfigurierbare Stille-Toleranz (1.0–8.0s, Default 2.8s) bevor Auto-Stop greift. Max-Aufnahme 120s.
|
||||
- **Wake-Word** (on-device, openWakeWord ONNX): "Hey Jarvis", "Alexa", "Hey Mycroft", "Hey Rhasspy" — Mikrofon hoert passiv mit, Konversation startet beim Schluesselwort. Komplett on-device via ONNX Runtime, kein API-Key, kein Cloud-Roundtrip, Audio verlaesst das Geraet nicht.
|
||||
- **VAD (Voice Activity Detection)**: Adaptive Schwelle (Baseline aus ersten 500ms Mic-Pegel + 6dB Offset). Konfigurierbare Stille-Toleranz (1.0–8.0s, Default 2.8s) bevor Auto-Stop greift. Max-Aufnahme einstellbar (1–30 min, Default 5 min)
|
||||
- **Barge-In**: Wenn du waehrend ARIAs Antwort eine neue Sprach-/Text-Nachricht reinschickst, wird sie unterbrochen + bekommt den Hint "das ist eine Korrektur"
|
||||
- **Wake-Word waehrend TTS**: Du kannst "Computer" sagen waehrend ARIA noch redet — AcousticEchoCanceler verhindert dass ARIAs eigene Stimme das Wake-Word triggert
|
||||
- **Anruf-Pause + Auto-Resume**: TTS verstummt bei klassischem Anruf oder VoIP-Call (WhatsApp/Signal/Discord). Nach dem Auflegen geht ARIA von der **genauen Stelle** weiter wo sie unterbrochen wurde — die App misst die Position vom Wiedergabe-Anfang und nutzt den WAV-Cache der Antwort
|
||||
- **Speech Gate**: Aufnahme wird verworfen wenn keine Sprache erkannt
|
||||
- **STT (Speech-to-Text)**: 16kHz mono → Bridge → Gamebox-Whisper (CUDA) → Text im Chat. Fast in Echtzeit.
|
||||
- **"ARIA denkt..." Indicator**: Zeigt live den Status vom Core (Denken, Tool, Schreiben) + Abbrechen-Button
|
||||
@@ -394,53 +350,53 @@ API-Endpoint fuer andere Services: `GET http://localhost:3001/api/session`
|
||||
- **Mehrere Anhaenge**: Bilder + Dateien sammeln, Text hinzufuegen, dann zusammen senden
|
||||
- **Paste-Support**: Bilder aus Zwischenablage einfuegen (Diagnostic)
|
||||
- **Anhaenge**: Bridge speichert in Shared Volume, ARIA kann darauf zugreifen, Re-Download ueber RVS
|
||||
- **Einstellungen**: TTS-aktiv, F5-TTS-Voice, Pre-Roll-Buffer, Stille-Toleranz, Speicherort, Auto-Download, GPS
|
||||
- **Einstellungen**: TTS-aktiv, F5-TTS-Voice, Pre-Roll-Buffer, Stille-Toleranz, Speicherort, Auto-Download, GPS, Verbose-Logging
|
||||
- **Auto-Update**: Prueft beim Start + per Button auf neue Version, Download + Installation ueber RVS (FileProvider)
|
||||
- GPS-Position (optional)
|
||||
- GPS-Position (optional, mit Runtime-Permission-Request) — wird in jeden Chat/Audio-Payload mitgegeben und ist in Diagnostic als Debug-Block einblendbar
|
||||
- QR-Code Scanner fuer Token-Pairing
|
||||
- **ARIA-Dateien empfangen**: Wenn ARIA eine PDF/Bild/Markdown/ZIP fuer dich erstellt (Marker `[FILE: /shared/uploads/aria_*]` in der Antwort), erscheint sie als eigene Anhang-Bubble. Tippen → wird via RVS geladen + mit Android-Intent-Picker geoeffnet (PDF-Viewer, Bildbetrachter, Standard-App). Inline-Bilder aus Markdown-``-Syntax werden direkt unter dem Text gerendert (PNG/JPG via Image, SVG via react-native-svg)
|
||||
- **Vollbild mit Pinch-Zoom**: Bilder im Vollbild-Modal sind pinch-zoombar (1x..5x), 1-Finger-Pan wenn gezoomt, Doppel-Tap toggelt 1x↔2.5x — alles ohne externe Lib
|
||||
- **Container-Restart-Buttons** (Settings → Reparatur): aria-bridge / aria-brain / aria-qdrant gezielt neu starten (jeweils ~5s Downtime). Geht ueber RVS → Bridge → Diagnostic → Docker-Socket-API.
|
||||
- **Cache-Cleanup**: Beim App-Start werden orphane TTS-WAVs aus dem Cache geraeumt. Plus Settings-Buttons "TTS-Cache leeren", "Update-Cache leeren", "Anhang-Cache leeren"
|
||||
|
||||
### Wake-Word einrichten (Picovoice Porcupine)
|
||||
### Wake-Word (openWakeWord, on-device)
|
||||
|
||||
Das Wake-Word laeuft komplett **on-device** in der App — kein Audio verlaesst dein Telefon
|
||||
fuer die Erkennung. Picovoice bietet aktuell einen **7-Tage Free Trial** ohne Kreditkarte
|
||||
und ohne Auto-Renewal an, danach kostenpflichtig (siehe [picovoice.ai/pricing](https://picovoice.ai/pricing)).
|
||||
Wer das Wake-Word ueberspringen will: der Ohr-Button funktioniert auch ohne AccessKey
|
||||
(Direkt-Aufnahme statt passivem Lauschen — siehe unten).
|
||||
Wake-Word-Erkennung laeuft komplett **on-device** ueber [openWakeWord](https://github.com/dscripka/openWakeWord)
|
||||
mit ONNX Runtime — kein API-Key, kein Cloud-Roundtrip, kein Cent Lizenzgebuehren,
|
||||
und das Audio verlaesst das Geraet nie.
|
||||
|
||||
**1) AccessKey holen** (einmalig, ~2 Minuten):
|
||||
**Mitgelieferte Wake-Words** (ONNX-Dateien in `android/android/app/src/main/assets/openwakeword/`):
|
||||
- `Hey Jarvis` (Default, openWakeWord-Original)
|
||||
- `Computer` (Star-Trek-Style, Community-Modell)
|
||||
- `Alexa`, `Hey Mycroft`, `Hey Rhasspy` (openWakeWord-Originale)
|
||||
|
||||
1. Auf [console.picovoice.ai](https://console.picovoice.ai) registrieren (Email + Passwort, keine Kreditkarte fuer den Trial).
|
||||
2. Nach dem Login auf dem Dashboard → **AccessKey** kopieren (langer Base64-String).
|
||||
|
||||
**2) AccessKey in der App eintragen:**
|
||||
|
||||
- App → **Einstellungen** → Abschnitt **Wake-Word**
|
||||
- AccessKey einfuegen, **Keyword** auswaehlen (Default: `jarvis`)
|
||||
- Speichern → die App initialisiert Porcupine automatisch
|
||||
|
||||
**Eingebaute Keywords** (sofort verfuegbar, kein Training noetig):
|
||||
`jarvis`, `computer`, `picovoice`, `porcupine`, `bumblebee`, `terminator`,
|
||||
`alexa`, `hey google`, `ok google`, `hey siri`
|
||||
|
||||
**3) Eigenes Wake-Word erstellen** ("ARIA", "Hey Stefan", was du willst):
|
||||
|
||||
1. [console.picovoice.ai](https://console.picovoice.ai) → **Porcupine** → **Train Wake Word**
|
||||
2. Wort eingeben (z.B. `ARIA`), Sprache `German` waehlen, Plattform `Android`
|
||||
3. **Train** druecken — Picovoice trainiert das Modell in ~1–2 Minuten
|
||||
4. Die fertige `.ppn`-Datei runterladen
|
||||
5. *(Custom-Upload in der App ist Phase 2 — aktuell nur eingebaute Keywords.
|
||||
`.ppn`-Dateien koennen schon manuell ins App-Bundle gelegt werden, die UI
|
||||
dafuer kommt mit dem naechsten Diagnostic-Update.)*
|
||||
Community-Modelle stammen aus [fwartner/home-assistant-wakewords-collection](https://github.com/fwartner/home-assistant-wakewords-collection).
|
||||
|
||||
**Bedienung:**
|
||||
- App → **Einstellungen** → **Wake-Word** → gewuenschtes Keyword waehlen → **Speichern + Aktivieren**
|
||||
- **Ohr-Button (👂)** in der Statusleiste tippen → Wake-Word ist scharf, App hoert passiv mit
|
||||
- Wake-Word sagen → Symbol wechselt auf 🎙️, normale Konversation laeuft
|
||||
- Wake-Word sagen → Symbol wechselt auf 🎙️, **Bereit-Sound** (Ding-Dong, optional in Settings) + Toast "🎤 sprich jetzt" sobald das Mikro wirklich offen ist
|
||||
- Nach jeder ARIA-Antwort oeffnet sich das Mikro nochmal — Stille → zurueck zu 👂
|
||||
- Erneut tippen → Ohr aus (🔇)
|
||||
|
||||
**Ohne AccessKey:** Der Ohr-Button startet stattdessen die Direkt-Aufnahme (Mikro
|
||||
ist sofort aktiv, kein passives Lauschen). Auch ein gueltiger Modus, nur halt ohne
|
||||
"Hands-free" via Schluesselwort.
|
||||
**Eigene Wake-Words trainieren** (gratis, ~30 Min):
|
||||
|
||||
1. openWakeWord Trainings-Notebook auf Colab oeffnen (Link im
|
||||
[openWakeWord Repo](https://github.com/dscripka/openWakeWord) unter "Training Custom Models")
|
||||
2. Wake-Word-Phrase eingeben (z.B. "ARIA", "Hey Stefan"), Notebook ausfuehren —
|
||||
das Notebook generiert synthetische Trainings-Beispiele und trainiert das Modell.
|
||||
3. Resultierende `.onnx`-Datei runterladen
|
||||
4. Datei in `android/android/app/src/main/assets/openwakeword/` ablegen
|
||||
5. In `android/src/services/wakeword.ts` den Dateinamen (ohne `.onnx`) zur
|
||||
`WAKE_KEYWORDS`-Liste hinzufuegen
|
||||
6. APK neu bauen
|
||||
|
||||
*(Diagnostic-Upload fuer Custom-`.onnx` ohne Rebuild kommt spaeter.)*
|
||||
|
||||
**Tuning** (in [wakeword.ts](android/src/services/wakeword.ts)):
|
||||
- `DEFAULT_THRESHOLD = 0.5` — Score-Schwelle (raise auf 0.6–0.7 bei False-Positives)
|
||||
- `DEFAULT_PATIENCE = 2` — wie viele Frames ueber Threshold noetig
|
||||
- `DEFAULT_DEBOUNCE_MS = 1500` — Mindestabstand zwischen zwei Triggern
|
||||
|
||||
### Ersteinrichtung (Dev-Maschine, einmalig)
|
||||
|
||||
@@ -509,18 +465,44 @@ Der Update-Flow:
|
||||
|
||||
```
|
||||
App (Mikrofon) → AAC/MP4 Aufnahme → Base64 → RVS → Bridge
|
||||
Bridge: FFmpeg (16kHz PCM) → Whisper STT → Text → aria-core
|
||||
Bridge: FFmpeg (16kHz PCM) → Whisper STT → Text → aria-brain
|
||||
Bridge: STT-Ergebnis → RVS → App (Placeholder wird durch transkribierten Text ersetzt)
|
||||
aria-core → Antwort → Bridge → XTTS (Gaming-PC) → PCM-Stream → RVS → App
|
||||
aria-brain → Antwort → Bridge → F5-TTS (Gaming-PC) → PCM-Stream → RVS → App
|
||||
App: AudioTrack MODE_STREAM (nahtlos), Cache als WAV pro Message
|
||||
```
|
||||
|
||||
### Audio-Verhalten in der App
|
||||
|
||||
| Phase | Andere App (Spotify) | ARIA-Mikro |
|
||||
|------------------------------|----------------------|-------------------------|
|
||||
| Idle / Ohr aus | spielt frei | aus |
|
||||
| Wake-Word lauscht (armed) | spielt frei | passiv (openWakeWord) |
|
||||
| User-Aufnahme laeuft | pausiert (EXCLUSIVE) | Recording |
|
||||
| Aufnahme zu Ende | resumed | aus |
|
||||
| ARIA denkt/schreibt (~20s) | spielt frei | aus |
|
||||
| TTS startet | pausiert (DUCK) | aus (oder barge) |
|
||||
| TTS spielt (auch GPU-Pausen) | bleibt pausiert | barge wenn Wake-Word |
|
||||
| TTS zu Ende | nach 800ms resumed | (Conversation-Window) |
|
||||
| Eingehender Anruf (auch VoIP)| — | Mikro pausiert |
|
||||
| Anruf vorbei (Auto-Resume) | pausiert wieder | aus |
|
||||
| Neue Frage waehrend Anruf | — | (Resume verworfen) |
|
||||
|
||||
Mechanismen: Underrun-Schutz im PcmStreamPlayer (Stille-Fill in Render-
|
||||
Pausen), Conversation-Focus bei Wake-Word, Foreground-Service mit
|
||||
mediaPlayback|microphone, Anruf-Erkennung ueber TelephonyManager +
|
||||
AudioFocus-Loss-Listener mit Polling-Fallback (VoIP). Bei Anruf wird
|
||||
die Wiedergabe-Position gemerkt — nach dem Auflegen spielt ARIA ab
|
||||
der genauen Stelle weiter (oder verwirft das wenn der User waehrend
|
||||
des Telefonats per Text eine neue Frage gestellt hat). PcmPlayback-
|
||||
Finished-Event vom Native sorgt dafuer dass Spotify erst pausiert
|
||||
bleibt bis ARIA wirklich verstummt ist.
|
||||
|
||||
### Datei-Pipeline (Bilder & Anhaenge)
|
||||
|
||||
```
|
||||
App (Kamera/Dateimanager) → Base64 → RVS → Bridge
|
||||
Bridge: Speichert in /shared/uploads/ (Shared Volume, fuer aria-core sichtbar)
|
||||
Bridge: chat.send → "Stefan hat ein Bild geschickt: foto.jpg — liegt unter /shared/uploads/..."
|
||||
Bridge: Speichert in /shared/uploads/ (Shared Volume, fuer aria-brain sichtbar)
|
||||
Bridge: aria-brain → "Stefan hat ein Bild geschickt: foto.jpg — liegt unter /shared/uploads/..."
|
||||
ARIA: Kann Datei per Bash/Read-Tool oeffnen und analysieren
|
||||
```
|
||||
|
||||
@@ -550,35 +532,34 @@ ist in den App-Einstellungen konfigurierbar.
|
||||
|
||||
## Datenverzeichnis — aria-data/
|
||||
|
||||
Alles was ARIA weiss, kann und ist — liegt hier. Ein `tar` = vollstaendiges Backup.
|
||||
|
||||
```
|
||||
aria-data/
|
||||
├── brain/ ← ARIAs Gedaechtnis (OpenClaw Memory)
|
||||
│ ├── MEMORY.md ← Langzeitgedaechtnis
|
||||
│ └── memory/ ← Tageslogbuecher
|
||||
├── brain/ ← ARIAs Gehirn — Bind-Mount, GITIGNORED
|
||||
│ ├── qdrant/ ← Vector-DB Storage (Memories, Skills-Embeddings)
|
||||
│ └── data/ ← Skills, Embedding-Modell-Cache
|
||||
│ └── skills/<name>/ ← Pro Skill ein Ordner mit Manifest, Code, venv
|
||||
│
|
||||
├── skills/ ← ARIAs Faehigkeiten (selbst geschrieben!)
|
||||
├── brain-import/ ← Quell-Dateien fuer den initialen Import in die DB
|
||||
│ ├── AGENT.md ← Persoenlichkeit (wird Memory-Punkte vom Typ identity/rule)
|
||||
│ ├── BOOTSTRAP.md
|
||||
│ ├── TOOLING.md.example
|
||||
│ └── USER.md.example
|
||||
│
|
||||
├── config/
|
||||
│ ├── BOOTSTRAP.md ← System-Prompt (Identitaet, Regeln, Tools)
|
||||
│ ├── AGENT.md ← Persoenlichkeit & Arbeitsprinzipien
|
||||
│ ├── USER.md ← Stefans Praeferenzen
|
||||
│ ├── openclaw.env ← OpenClaw Environment
|
||||
│ ├── aria.env ← Voice Bridge Config
|
||||
│ └── diag-state/ ← Diagnostic persistenter State
|
||||
│
|
||||
│ (im Shared Volume /shared/config/):
|
||||
│ ├── voice_config.json ← TTS-Einstellungen (Stimme, Speed, Engine)
|
||||
│ ├── highlight_triggers.json ← Highlight-Trigger Woerter
|
||||
│ └── chat_backup.jsonl ← Nachrichten-Backup (on-the-fly)
|
||||
│
|
||||
└── ssh/ ← SSH Keys fuer VM-Zugriff
|
||||
├── id_ed25519 ← Private Key (generiert von aria-setup.sh)
|
||||
├── id_ed25519.pub ← Public Key (muss in VM authorized_keys!)
|
||||
└── config ← SSH Config (Host aria-wohnung)
|
||||
└── ssh/ ← SSH Keys (Brain + Proxy teilen sich)
|
||||
├── id_ed25519
|
||||
├── id_ed25519.pub
|
||||
└── config ← Host aria-wohnung
|
||||
```
|
||||
|
||||
`aria-data/brain/` (Vector-DB + Skills) ist gitignored — Backup laeuft ueber
|
||||
den Gehirn-Export-Button in der Diagnostic, nicht ueber Git.
|
||||
|
||||
Settings im Shared Volume (`/shared/config/`): `voice_config.json`,
|
||||
`highlight_triggers.json`, `runtime.json`, `chat_backup.jsonl`.
|
||||
|
||||
**Backup:**
|
||||
```bash
|
||||
tar -czf aria-backup-$(date +%Y%m%d).tar.gz aria-data/
|
||||
@@ -725,16 +706,15 @@ dem Cache wiederverwendet.
|
||||
|
||||
## Docker Volumes
|
||||
|
||||
| Volume | Pfad im Container | Zweck |
|
||||
|--------|-------------------|-------|
|
||||
| `openclaw-config` | `/home/node/.openclaw` | OpenClaw Config, Sessions, Auth |
|
||||
| `claude-config` | `/home/node/.claude` | Claude Code Settings, Permissions |
|
||||
| `~/.claude` (bind) | `/root/.claude` (Proxy) | Claude CLI Credentials |
|
||||
| `./aria-data/ssh` (bind) | `/root/.ssh`, `/home/node/.ssh` | SSH Keys |
|
||||
| `./aria-data/brain` (bind) | `/home/node/.openclaw/workspace/memory` | Gedaechtnis |
|
||||
| `./aria-data/skills` (bind) | `/home/node/.openclaw/workspace/skills` | Skills |
|
||||
| `aria-shared` | `/shared` (Core + Bridge + Proxy + Diag) | Datei-Austausch, Config, Uploads |
|
||||
| `./aria-data/config/diag-state` (bind) | `/data` (Diagnostic) | Persistenter State (aktive Session) |
|
||||
| Volume / Bind | Pfad im Container | Zweck |
|
||||
|---------------|-------------------|-------|
|
||||
| `~/.claude` (bind) | `/root/.claude` (proxy) | Claude CLI Credentials |
|
||||
| `./aria-data/ssh` (bind) | `/root/.ssh` (proxy, brain) | SSH-Keys fuer aria-wohnung |
|
||||
| `./aria-data/brain/qdrant` (bind) | `/qdrant/storage` (qdrant) | Vector-DB Storage |
|
||||
| `./aria-data/brain/data` (bind) | `/data` (brain) | Skills + Embedding-Modell-Cache |
|
||||
| `./aria-data/brain` (bind) | `/brain` (diagnostic) | Brain-Export/Import-Endpoints |
|
||||
| `aria-shared` | `/shared` (brain, bridge, proxy, diagnostic) | Datei-Austausch, Config, Uploads |
|
||||
| `./aria-data/config/diag-state` (bind) | `/data` (diagnostic) | Diagnostic persistenter State |
|
||||
|
||||
---
|
||||
|
||||
@@ -763,22 +743,21 @@ docker compose down
|
||||
|
||||
# Einzelnen Container neu bauen
|
||||
docker compose up -d --build diagnostic
|
||||
docker compose up -d --build bridge
|
||||
docker compose up -d --build bridge brain
|
||||
|
||||
# Logs
|
||||
docker compose logs -f # alle
|
||||
docker compose logs -f aria # nur aria-core
|
||||
docker compose logs -f proxy # nur proxy
|
||||
docker compose logs -f # alle
|
||||
docker compose logs -f brain # nur Agent + Memory
|
||||
docker compose logs -f qdrant # nur Vector-DB
|
||||
docker compose logs -f bridge # nur Voice-Bridge
|
||||
docker compose logs -f proxy # nur Claude-Proxy
|
||||
|
||||
# Setup wiederholen (nach Config-Aenderungen)
|
||||
./aria-setup.sh
|
||||
# SSH-Test (Brain zu aria-wohnung)
|
||||
docker exec aria-brain ssh aria-wohnung hostname
|
||||
|
||||
# SSH-Test
|
||||
docker exec aria-core ssh aria-wohnung hostname
|
||||
|
||||
# Tool-Test
|
||||
# Neue Session in Diagnostic anlegen, dann:
|
||||
# "Wie wird das Wetter in Bremen?"
|
||||
# Brain-API direkt testen
|
||||
docker exec aria-brain curl localhost:8080/health
|
||||
docker exec aria-brain curl localhost:8080/memory/stats
|
||||
```
|
||||
|
||||
---
|
||||
@@ -788,9 +767,10 @@ docker exec aria-core ssh aria-wohnung hostname
|
||||
- **Proxy Cold Start**: Jede Nachricht spawnt einen neuen `claude --print` Prozess.
|
||||
Dadurch ist ARIA langsamer als die direkte Claude CLI. Timeout ist auf 900s (15 Min).
|
||||
- **Kein Streaming zur App**: Die App zeigt erst die fertige Antwort, keine Streaming-Tokens.
|
||||
- **Wake-Word in der App nur eingebaute Keywords**: `jarvis`, `computer` etc. funktionieren
|
||||
sofort, eigene Wake-Words (`.ppn` aus der Picovoice Console) muessen aktuell noch manuell
|
||||
ins App-Bundle. Die Upload-UI in Diagnostic ist Phase 2.
|
||||
- **Wake-Word in der App nur eingebaute Keywords**: `Hey Jarvis`, `Alexa`, `Hey Mycroft`,
|
||||
`Hey Rhasspy` funktionieren sofort, eigene Wake-Words muessen aktuell noch als
|
||||
`.onnx`-Datei ins App-Bundle gelegt + zur Liste in `wakeword.ts` hinzugefuegt werden.
|
||||
Die Diagnostic-Upload-UI ist Phase 2.
|
||||
- **Audio-Format**: App nimmt AAC/MP4 auf, Bridge konvertiert via FFmpeg zu 16kHz PCM.
|
||||
- **RVS Zombie-Connections**: WebSocket-Verbindungen sterben gelegentlich ohne Fehlermeldung.
|
||||
Bridge hat Ping-Check (5s), Diagnostic nutzt frische Verbindungen pro Request.
|
||||
@@ -816,7 +796,7 @@ docker exec aria-core ssh aria-wohnung hostname
|
||||
- [x] SSH-Zugriff auf VM (aria-wohnung)
|
||||
- [x] Diagnostic Web-UI + Einstellungen
|
||||
- [x] Session-Verwaltung + Chat-History
|
||||
- [x] Stimmen-Einstellungen (Ramona/Thorsten, Speed, Highlight-Trigger) — durch XTTS v2 Voice Cloning ersetzt
|
||||
- [x] Stimmen-Einstellungen (frueher Piper Ramona/Thorsten, Highlight-Trigger) — durch XTTS, dann F5-TTS Voice Cloning ersetzt
|
||||
- [x] Piper komplett entfernt — nur noch XTTS v2 als TTS (Gaming-PC)
|
||||
- [x] Streaming TTS: PCM-Chunks direkt in AudioTrack, nahtlose Wiedergabe
|
||||
- [x] TTS satzweise fuer lange Texte
|
||||
@@ -843,9 +823,35 @@ docker exec aria-core ssh aria-wohnung hostname
|
||||
- [x] Whisper STT auf die Gamebox ausgelagert (CUDA float16, fast Echtzeit)
|
||||
- [x] **F5-TTS ersetzt XTTS** — bessere Voice-Cloning-Qualitaet, Whisper-auto-transkribierter Referenz-Text
|
||||
- [x] Audio-Pause statt Ducking (TRANSIENT statt MAY_DUCK) + release-Timing fix
|
||||
- [x] VAD-Stille-Toleranz und Max-Aufnahme einstellbar (1-8s, 120s)
|
||||
- [x] VAD-Stille-Toleranz einstellbar (1-8s) + adaptive Mikro-Baseline + Max-Aufnahme einstellbar (1-30 min)
|
||||
- [x] Barge-In: User kann ARIA waehrend Antwort unterbrechen, aria-core bekommt Kontext-Hint
|
||||
- [x] Anruf-Pause + Auto-Resume: TTS verstummt bei Anruf, faehrt nach Auflegen ab der gemerkten Position fort (Date.now()-Tracking + WAV-Cache der Antwort)
|
||||
- [x] PcmPlaybackFinished-Event: AudioFocus wird erst released wenn AudioTrack wirklich durch ist — kein Spotify-mid-TTS mehr
|
||||
- [x] Edge-Case: neue Frage waehrend Telefonat verwirft pending Auto-Resume, neueste Antwort gewinnt
|
||||
- [x] Settings-Sub-Screens: 8 Kategorien statt langer Liste
|
||||
- [x] APK ABI-Split arm64-v8a: 35 MB statt 136 MB
|
||||
- [x] Sprachnachrichten-Bubble: audioRequestId statt Substring-Match — keine vertauschten Bubbles mehr bei parallelen Aufnahmen
|
||||
- [x] Bereit-Sound (Airplane Ding-Dong) wenn Mikro nach Wake-Word offen ist — akustische Bestaetigung, in Settings abschaltbar
|
||||
- [x] Wake-Word parallel zu TTS mit AcousticEchoCanceler — "Computer" sagen waehrend ARIA spricht stoppt sie und oeffnet Mikro
|
||||
- [x] GPS-Position mit Nachrichten mitsenden (Toggle in Settings) — ARIA nutzt sie nur bei standortbezogenen Fragen, im Chat sichtbar nur in ihrer Antwort
|
||||
- [x] Sprachnachrichten ohne STT-Result werden nach Timeout automatisch entfernt (skaliert mit Aufnahmedauer)
|
||||
- [x] Background Audio Service: TTS, Wake-Word-Lauschen + Aufnahme laufen auch bei minimierter App weiter (Foreground-Service mit mediaPlayback|microphone, dynamische Notification)
|
||||
- [x] Disk-Voll Banner in Diagnostic mit copy-baren Cleanup-Befehlen
|
||||
- [x] Porcupine Wake-Word on-device in der App (eingebaute Keywords + State-Icon)
|
||||
- [x] Wake-Word on-device via openWakeWord (ONNX Runtime, kein API-Key) + State-Icon
|
||||
|
||||
### Phase A — Refactor: OpenClaw raus, eigenes Brain rein
|
||||
|
||||
- [x] aria-brain Container-Skeleton (FastAPI, Qdrant, sentence-transformers)
|
||||
- [x] Diagnostic: Gehirn-Tab (Memory Search/Filter, Add/Edit/Delete)
|
||||
- [x] Diagnostic: Gehirn-Export/Import als tar.gz
|
||||
- [x] Diagnostic: Datei-Manager (Liste, Suche, Download, Delete mit Live-Bubble-Update)
|
||||
- [x] App: Datei-Manager als Modal in den Einstellungen
|
||||
- [x] Diagnostic: Komplett-Reset (Wipe All)
|
||||
- [x] Voice Export/Import (einzelne Stimmen + F5/Whisper-Settings als Bundle)
|
||||
- [x] aria-core (OpenClaw) komplett abgerissen — Tag `v0.1.2.0` als Archiv
|
||||
- [ ] **Phase B Punkt 2:** Migration `aria-data/brain-import/` → atomare Memory-Punkte
|
||||
- [ ] **Phase B Punkt 3:** Brain Conversation-Loop (Single-Chat + Rolling Window + Memory-Destillat)
|
||||
- [ ] **Phase B Punkt 4:** Skills-System (Manifest, venv, README pro Skill, Diagnostic-Tab)
|
||||
|
||||
### Phase 2 — ARIA wird produktiv
|
||||
|
||||
@@ -854,12 +860,11 @@ docker exec aria-core ssh aria-wohnung hostname
|
||||
- [ ] VM einrichten (Desktop, Browser, Tools)
|
||||
- [ ] Heartbeat (periodische Selbst-Checks)
|
||||
- [ ] Lokales LLM als Waechter (Triage vor Claude-Call)
|
||||
- [ ] Auto-Compacting / Memory-Verwaltung
|
||||
|
||||
### Phase 3 — Erweiterungen
|
||||
|
||||
- [ ] STARFACE Telefonie-Skill
|
||||
- [ ] Desktop Client (Tauri)
|
||||
- [ ] bKVM Remote IT-Support
|
||||
- [ ] Custom-`.ppn`-Upload fuer Wake-Word ueber Diagnostic (eigene Trigger-Worte)
|
||||
- [ ] Custom-`.onnx`-Upload fuer Wake-Word ueber Diagnostic (ohne App-Rebuild)
|
||||
- [ ] Claude Vision direkt (Bildanalyse ohne Dateipfad-Umweg)
|
||||
|
||||
@@ -13,6 +13,7 @@ import { createBottomTabNavigator } from '@react-navigation/bottom-tabs';
|
||||
import ChatScreen from './src/screens/ChatScreen';
|
||||
import SettingsScreen from './src/screens/SettingsScreen';
|
||||
import rvs from './src/services/rvs';
|
||||
import { initLogger } from './src/services/logger';
|
||||
|
||||
// --- Navigation ---
|
||||
|
||||
@@ -44,6 +45,10 @@ const TAB_ICONS: Record<string, { active: string; inactive: string }> = {
|
||||
const App: React.FC = () => {
|
||||
// Beim Start: gespeicherte RVS-Konfiguration laden und verbinden
|
||||
useEffect(() => {
|
||||
// Verbose-Logging-Setting laden BEVOR andere Module loslegen.
|
||||
// initLogger ist async aber blockt nichts — solange er noch laueft,
|
||||
// loggen wir normal (Default an), danach respektiert console.log das Setting.
|
||||
initLogger().catch(() => {});
|
||||
const initConnection = async () => {
|
||||
const config = await rvs.loadConfig();
|
||||
if (config) {
|
||||
|
||||
@@ -79,8 +79,8 @@ android {
|
||||
applicationId "com.ariacockpit"
|
||||
minSdkVersion rootProject.ext.minSdkVersion
|
||||
targetSdkVersion rootProject.ext.targetSdkVersion
|
||||
versionCode 605
|
||||
versionName "0.0.6.5"
|
||||
versionCode 10202
|
||||
versionName "0.1.2.2"
|
||||
// Fallback fuer Libraries mit Product Flavors
|
||||
missingDimensionStrategy 'react-native-camera', 'general'
|
||||
}
|
||||
@@ -104,6 +104,19 @@ android {
|
||||
proguardFiles getDefaultProguardFile("proguard-android.txt"), "proguard-rules.pro"
|
||||
}
|
||||
}
|
||||
|
||||
// ABI-Split: nur arm64-v8a (jedes Android-Phone seit ~2017). Bringt die
|
||||
// APK von ~136 MB auf ~35 MB — relevant weil ONNX Runtime + die anderen
|
||||
// Native-Libs sonst pro Architektur dazukommen. Wer 32-bit oder Emulator
|
||||
// braucht, kann hier "armeabi-v7a", "x86_64" etc. ergaenzen.
|
||||
splits {
|
||||
abi {
|
||||
enable true
|
||||
reset()
|
||||
include "arm64-v8a"
|
||||
universalApk false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dependencies {
|
||||
@@ -111,6 +124,9 @@ dependencies {
|
||||
implementation("com.facebook.react:react-android")
|
||||
implementation("com.facebook.react:flipper-integration")
|
||||
|
||||
// ONNX Runtime fuer on-device Wake-Word (openWakeWord ONNX-Modelle in assets/openwakeword/)
|
||||
implementation("com.microsoft.onnxruntime:onnxruntime-android:1.17.1")
|
||||
|
||||
if (hermesEnabled.toBoolean()) {
|
||||
implementation("com.facebook.react:hermes-android")
|
||||
} else {
|
||||
|
||||
@@ -4,6 +4,19 @@
|
||||
<uses-permission android:name="android.permission.CAMERA" />
|
||||
<uses-permission android:name="android.permission.RECORD_AUDIO" />
|
||||
<uses-permission android:name="android.permission.REQUEST_INSTALL_PACKAGES" />
|
||||
<!-- Anruf-State lesen damit TTS bei klingelndem Telefon pausiert -->
|
||||
<uses-permission android:name="android.permission.READ_PHONE_STATE" />
|
||||
<!-- Optional: GPS-Position der Frage anhaengen (nur wenn User in Settings aktiviert) -->
|
||||
<uses-permission android:name="android.permission.ACCESS_COARSE_LOCATION" />
|
||||
<uses-permission android:name="android.permission.ACCESS_FINE_LOCATION" />
|
||||
<!-- Foreground-Service damit TTS auch bei minimierter App weiterlaeuft.
|
||||
FOREGROUND_SERVICE_MICROPHONE ist Pflicht ab Android 14 wenn der
|
||||
Service waehrend des Backgrounds aufs Mikro zugreift (Wake-Word,
|
||||
Aufnahme im Gespraechsmodus). -->
|
||||
<uses-permission android:name="android.permission.FOREGROUND_SERVICE" />
|
||||
<uses-permission android:name="android.permission.FOREGROUND_SERVICE_MEDIA_PLAYBACK" />
|
||||
<uses-permission android:name="android.permission.FOREGROUND_SERVICE_MICROPHONE" />
|
||||
<uses-permission android:name="android.permission.POST_NOTIFICATIONS" />
|
||||
|
||||
<application
|
||||
android:name=".MainApplication"
|
||||
@@ -35,5 +48,10 @@
|
||||
android:name="android.support.FILE_PROVIDER_PATHS"
|
||||
android:resource="@xml/file_paths" />
|
||||
</provider>
|
||||
|
||||
<service
|
||||
android:name=".AriaPlaybackService"
|
||||
android:exported="false"
|
||||
android:foregroundServiceType="mediaPlayback|microphone" />
|
||||
</application>
|
||||
</manifest>
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -7,7 +7,7 @@ import com.facebook.react.uimanager.ViewManager
|
||||
|
||||
class ApkInstallerPackage : ReactPackage {
|
||||
override fun createNativeModules(reactContext: ReactApplicationContext): List<NativeModule> {
|
||||
return listOf(ApkInstallerModule(reactContext))
|
||||
return listOf(ApkInstallerModule(reactContext), FileOpenerModule(reactContext))
|
||||
}
|
||||
|
||||
override fun createViewManagers(reactContext: ReactApplicationContext): List<ViewManager<*, *>> {
|
||||
|
||||
@@ -0,0 +1,108 @@
|
||||
package com.ariacockpit
|
||||
|
||||
import android.app.Notification
|
||||
import android.app.NotificationChannel
|
||||
import android.app.NotificationManager
|
||||
import android.app.PendingIntent
|
||||
import android.app.Service
|
||||
import android.content.Intent
|
||||
import android.os.Build
|
||||
import android.os.IBinder
|
||||
import android.util.Log
|
||||
import androidx.core.app.NotificationCompat
|
||||
|
||||
/**
|
||||
* Foreground-Service der den App-Prozess waehrend TTS-Wiedergabe am Leben
|
||||
* haelt — Android killt sonst den Prozess sobald die App im Hintergrund ist
|
||||
* und ARIA verstummt mitten im Satz.
|
||||
*
|
||||
* Notification ist persistent (ongoing) waehrend der Service laeuft.
|
||||
* Tap auf die Notification bringt MainActivity zurueck nach vorne.
|
||||
*
|
||||
* foregroundServiceType="mediaPlayback" ist Pflicht ab Android 14, sonst
|
||||
* wirft startForeground() eine SecurityException.
|
||||
*/
|
||||
class AriaPlaybackService : Service() {
|
||||
companion object {
|
||||
private const val TAG = "AriaPlaybackService"
|
||||
private const val CHANNEL_ID = "aria_playback"
|
||||
private const val NOTIFICATION_ID = 1042
|
||||
const val EXTRA_REASON = "reason" // "tts" | "wake" | "rec" | ""
|
||||
}
|
||||
|
||||
private var currentReason: String = ""
|
||||
|
||||
override fun onCreate() {
|
||||
super.onCreate()
|
||||
ensureNotificationChannel()
|
||||
}
|
||||
|
||||
override fun onStartCommand(intent: Intent?, flags: Int, startId: Int): Int {
|
||||
val reason = intent?.getStringExtra(EXTRA_REASON) ?: ""
|
||||
currentReason = reason
|
||||
Log.i(TAG, "Foreground-Service start/update (reason=$reason)")
|
||||
try {
|
||||
startForeground(NOTIFICATION_ID, buildNotification(reason))
|
||||
} catch (e: Exception) {
|
||||
Log.e(TAG, "startForeground fehlgeschlagen", e)
|
||||
stopSelf()
|
||||
}
|
||||
// START_NOT_STICKY: wenn Android den Service killt, NICHT automatisch
|
||||
// wieder starten — die App entscheidet wann der Service noetig ist.
|
||||
return START_NOT_STICKY
|
||||
}
|
||||
|
||||
override fun onDestroy() {
|
||||
Log.i(TAG, "Foreground-Service gestoppt")
|
||||
super.onDestroy()
|
||||
}
|
||||
|
||||
override fun onBind(intent: Intent?): IBinder? = null
|
||||
|
||||
private fun ensureNotificationChannel() {
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
|
||||
val nm = getSystemService(NotificationManager::class.java) ?: return
|
||||
if (nm.getNotificationChannel(CHANNEL_ID) == null) {
|
||||
val channel = NotificationChannel(
|
||||
CHANNEL_ID,
|
||||
"ARIA Audio-Wiedergabe",
|
||||
NotificationManager.IMPORTANCE_LOW,
|
||||
).apply {
|
||||
description = "Notification waehrend ARIA spricht (haelt die App im Hintergrund am Leben)"
|
||||
setShowBadge(false)
|
||||
}
|
||||
nm.createNotificationChannel(channel)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private fun buildNotification(reason: String): Notification {
|
||||
val launchIntent = Intent(this, MainActivity::class.java).apply {
|
||||
flags = Intent.FLAG_ACTIVITY_NEW_TASK or Intent.FLAG_ACTIVITY_CLEAR_TOP
|
||||
}
|
||||
val pendingFlags = if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M)
|
||||
PendingIntent.FLAG_IMMUTABLE or PendingIntent.FLAG_UPDATE_CURRENT
|
||||
else
|
||||
PendingIntent.FLAG_UPDATE_CURRENT
|
||||
val pendingIntent = PendingIntent.getActivity(this, 0, launchIntent, pendingFlags)
|
||||
|
||||
val (title, body) = when (reason) {
|
||||
"tts" -> "ARIA spricht" to "Antwort wird abgespielt — antippen oeffnet die App"
|
||||
"rec" -> "ARIA hoert zu" to "Sprachaufnahme laeuft — antippen oeffnet die App"
|
||||
"wake" -> "ARIA bereit" to "Wake-Word lauscht passiv — antippen oeffnet die App"
|
||||
else -> "ARIA aktiv" to "Hintergrund-Modus — antippen oeffnet die App"
|
||||
}
|
||||
|
||||
return NotificationCompat.Builder(this, CHANNEL_ID)
|
||||
.setContentTitle(title)
|
||||
.setContentText(body)
|
||||
.setSmallIcon(R.mipmap.ic_launcher)
|
||||
.setContentIntent(pendingIntent)
|
||||
.setOngoing(true)
|
||||
.setShowWhen(false)
|
||||
.setPriority(NotificationCompat.PRIORITY_LOW)
|
||||
.setCategory(NotificationCompat.CATEGORY_SERVICE)
|
||||
.setVisibility(NotificationCompat.VISIBILITY_PUBLIC)
|
||||
.build()
|
||||
}
|
||||
}
|
||||
@@ -5,26 +5,71 @@ import android.media.AudioAttributes
|
||||
import android.media.AudioFocusRequest
|
||||
import android.media.AudioManager
|
||||
import android.os.Build
|
||||
import android.util.Log
|
||||
import com.facebook.react.bridge.Arguments
|
||||
import com.facebook.react.bridge.Promise
|
||||
import com.facebook.react.bridge.ReactApplicationContext
|
||||
import com.facebook.react.bridge.ReactContextBaseJavaModule
|
||||
import com.facebook.react.bridge.ReactMethod
|
||||
import com.facebook.react.modules.core.DeviceEventManagerModule
|
||||
|
||||
/**
|
||||
* Steuert Audio-Focus fuer Ducking/Muten anderer Apps.
|
||||
* Steuert Audio-Focus fuer Ducking/Muten anderer Apps + emittiert Loss-Events
|
||||
* an JS damit ARIA bei VoIP-Anrufen (WhatsApp/Signal/Discord/...) aufhoert
|
||||
* zu sprechen — diese Anrufe gehen nicht ueber TelephonyManager, sondern
|
||||
* requestn AudioFocus_GAIN_TRANSIENT_EXCLUSIVE was wir hier mitbekommen.
|
||||
*
|
||||
* - requestDuck() → andere Apps werden leiser (ARIA spricht TTS)
|
||||
* - requestExclusive() → andere Apps werden pausiert (Mikrofon-Aufnahme)
|
||||
* - release() → Focus abgeben, andere Apps duerfen wieder
|
||||
*
|
||||
* Events:
|
||||
* - "AudioFocusChanged" mit type:
|
||||
* "loss" — endgueltiger Verlust (Anruf, andere App permanent)
|
||||
* "loss_transient" — vorruebergehender Verlust (kurze Unterbrechung)
|
||||
* "gain" — Fokus zurueck
|
||||
*/
|
||||
class AudioFocusModule(reactContext: ReactApplicationContext) : ReactContextBaseJavaModule(reactContext) {
|
||||
override fun getName() = "AudioFocus"
|
||||
|
||||
companion object { private const val TAG = "AudioFocus" }
|
||||
|
||||
private var currentRequest: AudioFocusRequest? = null
|
||||
|
||||
private fun audioManager(): AudioManager? =
|
||||
reactApplicationContext.getSystemService(Context.AUDIO_SERVICE) as? AudioManager
|
||||
|
||||
private fun emitFocusChange(type: String) {
|
||||
try {
|
||||
val params = Arguments.createMap().apply { putString("type", type) }
|
||||
reactApplicationContext.getJSModule(DeviceEventManagerModule.RCTDeviceEventEmitter::class.java)
|
||||
.emit("AudioFocusChanged", params)
|
||||
} catch (e: Exception) {
|
||||
Log.w(TAG, "emit failed: ${e.message}")
|
||||
}
|
||||
}
|
||||
|
||||
private val focusListener = AudioManager.OnAudioFocusChangeListener { focusChange ->
|
||||
when (focusChange) {
|
||||
AudioManager.AUDIOFOCUS_LOSS -> {
|
||||
Log.i(TAG, "AUDIOFOCUS_LOSS (z.B. Anruf, anderer Player permanent)")
|
||||
emitFocusChange("loss")
|
||||
}
|
||||
AudioManager.AUDIOFOCUS_LOSS_TRANSIENT -> {
|
||||
Log.i(TAG, "AUDIOFOCUS_LOSS_TRANSIENT (kurze Unterbrechung)")
|
||||
emitFocusChange("loss_transient")
|
||||
}
|
||||
AudioManager.AUDIOFOCUS_LOSS_TRANSIENT_CAN_DUCK -> {
|
||||
// Notification-Sound o.ae. — wir ignorieren das, ARIA macht weiter
|
||||
Log.d(TAG, "AUDIOFOCUS_LOSS_CAN_DUCK ignoriert")
|
||||
}
|
||||
AudioManager.AUDIOFOCUS_GAIN -> {
|
||||
Log.i(TAG, "AUDIOFOCUS_GAIN")
|
||||
emitFocusChange("gain")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private fun requestFocus(durationHint: Int, usage: Int, promise: Promise) {
|
||||
val am = audioManager()
|
||||
if (am == null) {
|
||||
@@ -41,13 +86,13 @@ class AudioFocusModule(reactContext: ReactApplicationContext) : ReactContextBase
|
||||
.build()
|
||||
val req = AudioFocusRequest.Builder(durationHint)
|
||||
.setAudioAttributes(attrs)
|
||||
.setOnAudioFocusChangeListener { /* kein Callback noetig */ }
|
||||
.setOnAudioFocusChangeListener(focusListener)
|
||||
.build()
|
||||
currentRequest = req
|
||||
am.requestAudioFocus(req)
|
||||
} else {
|
||||
@Suppress("DEPRECATION")
|
||||
am.requestAudioFocus(null, AudioManager.STREAM_MUSIC, durationHint)
|
||||
am.requestAudioFocus(focusListener, AudioManager.STREAM_MUSIC, durationHint)
|
||||
}
|
||||
|
||||
promise.resolve(result == AudioManager.AUDIOFOCUS_REQUEST_GRANTED)
|
||||
@@ -86,14 +131,82 @@ class AudioFocusModule(reactContext: ReactApplicationContext) : ReactContextBase
|
||||
promise.resolve(true)
|
||||
}
|
||||
|
||||
/** Den USAGE_MEDIA-Focus-Stack im System aufmischen, damit Spotify/YouTube
|
||||
* resumen wenn ein anderer Player (z.B. react-native-sound) seinen Focus
|
||||
* nicht ordnungsgemaess released hat. Strategie: kurz selbst USAGE_MEDIA
|
||||
* GAIN beanspruchen — das System invalidiert dabei den haengenden Stack-
|
||||
* Eintrag des anderen Players — und sofort wieder abandonen. Spotify
|
||||
* bekommt den Focus-Gain und resumed.
|
||||
*
|
||||
* Workaround fuer das react-native-sound-Bug: Sound.stop()/release()
|
||||
* laesst den AudioFocusRequest haengen.
|
||||
*/
|
||||
@ReactMethod
|
||||
fun kickReleaseMedia(promise: Promise) {
|
||||
val am = audioManager()
|
||||
if (am == null) {
|
||||
promise.resolve(false)
|
||||
return
|
||||
}
|
||||
// Async laufen lassen — wir wollen einen request, Pause, dann abandon.
|
||||
// Ohne Pause merkt das System (und damit Spotify) die kurze Owner-
|
||||
// Wechsel oft gar nicht. 250ms reicht erfahrungsgemaess fuer den
|
||||
// Focus-Stack-Refresh.
|
||||
Thread {
|
||||
try {
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
|
||||
val attrs = AudioAttributes.Builder()
|
||||
.setUsage(AudioAttributes.USAGE_MEDIA)
|
||||
.setContentType(AudioAttributes.CONTENT_TYPE_MUSIC)
|
||||
.build()
|
||||
val kickListener = AudioManager.OnAudioFocusChangeListener { /* ignorieren */ }
|
||||
val kickReq = AudioFocusRequest.Builder(AudioManager.AUDIOFOCUS_GAIN)
|
||||
.setAudioAttributes(attrs)
|
||||
.setOnAudioFocusChangeListener(kickListener)
|
||||
.build()
|
||||
am.requestAudioFocus(kickReq)
|
||||
Thread.sleep(250)
|
||||
am.abandonAudioFocusRequest(kickReq)
|
||||
} else {
|
||||
val kickListener = AudioManager.OnAudioFocusChangeListener { /* ignorieren */ }
|
||||
@Suppress("DEPRECATION")
|
||||
am.requestAudioFocus(kickListener, AudioManager.STREAM_MUSIC, AudioManager.AUDIOFOCUS_GAIN)
|
||||
Thread.sleep(250)
|
||||
@Suppress("DEPRECATION")
|
||||
am.abandonAudioFocus(kickListener)
|
||||
}
|
||||
Log.i(TAG, "kickReleaseMedia: USAGE_MEDIA-Stack aufgemischt (250ms Pause)")
|
||||
} catch (e: Exception) {
|
||||
Log.w(TAG, "kickReleaseMedia failed: ${e.message}")
|
||||
}
|
||||
}.start()
|
||||
promise.resolve(true)
|
||||
}
|
||||
|
||||
private fun release() {
|
||||
val am = audioManager() ?: return
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
|
||||
currentRequest?.let { am.abandonAudioFocusRequest(it) }
|
||||
} else {
|
||||
@Suppress("DEPRECATION")
|
||||
am.abandonAudioFocus(null)
|
||||
am.abandonAudioFocus(focusListener)
|
||||
}
|
||||
currentRequest = null
|
||||
}
|
||||
|
||||
/** Aktueller Audio-Mode: NORMAL=0, IN_CALL=2, IN_COMMUNICATION=3, CALL_SCREENING=4.
|
||||
* IN_COMMUNICATION ist der typische VoIP-Anruf-Mode (WhatsApp, Signal, etc.) —
|
||||
* kann gepollt werden um zu erkennen wann der Anruf vorbei ist (zurueck NORMAL). */
|
||||
@ReactMethod
|
||||
fun getMode(promise: Promise) {
|
||||
val am = audioManager()
|
||||
if (am == null) {
|
||||
promise.resolve(0)
|
||||
return
|
||||
}
|
||||
promise.resolve(am.mode)
|
||||
}
|
||||
|
||||
@ReactMethod fun addListener(eventName: String) {}
|
||||
@ReactMethod fun removeListeners(count: Int) {}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,59 @@
|
||||
package com.ariacockpit
|
||||
|
||||
import android.content.Intent
|
||||
import android.os.Build
|
||||
import android.util.Log
|
||||
import com.facebook.react.bridge.Promise
|
||||
import com.facebook.react.bridge.ReactApplicationContext
|
||||
import com.facebook.react.bridge.ReactContextBaseJavaModule
|
||||
import com.facebook.react.bridge.ReactMethod
|
||||
|
||||
/**
|
||||
* RN-Bridge fuer den AriaPlaybackService.
|
||||
*
|
||||
* Wird vom JS waehrend einer TTS-Wiedergabe gestartet damit Android den
|
||||
* App-Prozess nicht killt wenn die App im Hintergrund ist (= ARIA spricht
|
||||
* weiter, auch wenn Stefan die App minimiert hat).
|
||||
*
|
||||
* Service stoppt entweder explizit per stop() oder wird von Android
|
||||
* mitgekillt wenn der Prozess weg ist (was bei Foreground-Service nur
|
||||
* passiert wenn der User die App force-stopped).
|
||||
*/
|
||||
class BackgroundAudioModule(reactContext: ReactApplicationContext) : ReactContextBaseJavaModule(reactContext) {
|
||||
override fun getName() = "BackgroundAudio"
|
||||
|
||||
companion object { private const val TAG = "BackgroundAudio" }
|
||||
|
||||
@ReactMethod
|
||||
fun start(reason: String, promise: Promise) {
|
||||
try {
|
||||
val ctx = reactApplicationContext
|
||||
val intent = Intent(ctx, AriaPlaybackService::class.java)
|
||||
intent.putExtra(AriaPlaybackService.EXTRA_REASON, reason ?: "")
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
|
||||
ctx.startForegroundService(intent)
|
||||
} else {
|
||||
ctx.startService(intent)
|
||||
}
|
||||
promise.resolve(true)
|
||||
} catch (e: Exception) {
|
||||
Log.w(TAG, "start fehlgeschlagen: ${e.message}")
|
||||
promise.reject("START_FAILED", e.message ?: "Unbekannter Fehler", e)
|
||||
}
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
fun stop(promise: Promise) {
|
||||
try {
|
||||
val ctx = reactApplicationContext
|
||||
ctx.stopService(Intent(ctx, AriaPlaybackService::class.java))
|
||||
promise.resolve(true)
|
||||
} catch (e: Exception) {
|
||||
Log.w(TAG, "stop fehlgeschlagen: ${e.message}")
|
||||
promise.reject("STOP_FAILED", e.message ?: "Unbekannter Fehler", e)
|
||||
}
|
||||
}
|
||||
|
||||
@ReactMethod fun addListener(eventName: String) {}
|
||||
@ReactMethod fun removeListeners(count: Int) {}
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
package com.ariacockpit
|
||||
|
||||
import com.facebook.react.ReactPackage
|
||||
import com.facebook.react.bridge.NativeModule
|
||||
import com.facebook.react.bridge.ReactApplicationContext
|
||||
import com.facebook.react.uimanager.ViewManager
|
||||
|
||||
class BackgroundAudioPackage : ReactPackage {
|
||||
override fun createNativeModules(reactContext: ReactApplicationContext): List<NativeModule> {
|
||||
return listOf(BackgroundAudioModule(reactContext))
|
||||
}
|
||||
|
||||
override fun createViewManagers(reactContext: ReactApplicationContext): List<ViewManager<*, *>> {
|
||||
return emptyList()
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,55 @@
|
||||
package com.ariacockpit
|
||||
|
||||
import android.content.Intent
|
||||
import android.net.Uri
|
||||
import android.os.Build
|
||||
import androidx.core.content.FileProvider
|
||||
import com.facebook.react.bridge.Promise
|
||||
import com.facebook.react.bridge.ReactApplicationContext
|
||||
import com.facebook.react.bridge.ReactContextBaseJavaModule
|
||||
import com.facebook.react.bridge.ReactMethod
|
||||
import java.io.File
|
||||
|
||||
/**
|
||||
* Oeffnet eine beliebige Datei (PDF, Bild, Office-Doc, ...) mit der vom User
|
||||
* gewaehlten App via Android-Intent-Picker. Nutzt FileProvider damit auch
|
||||
* Android 7+ (content:// statt file://) das URI lesen darf.
|
||||
*
|
||||
* MIME-Type wird vom Caller bestimmt — App-Auswahl ist davon abhaengig (PDF
|
||||
* geht an PDF-Viewer, image/jpeg an Galerie, etc.).
|
||||
*/
|
||||
class FileOpenerModule(reactContext: ReactApplicationContext) : ReactContextBaseJavaModule(reactContext) {
|
||||
override fun getName() = "FileOpener"
|
||||
|
||||
@ReactMethod
|
||||
fun open(filePath: String, mimeType: String, promise: Promise) {
|
||||
try {
|
||||
val cleanPath = filePath.removePrefix("file://")
|
||||
val file = File(cleanPath)
|
||||
if (!file.exists()) {
|
||||
promise.reject("FILE_NOT_FOUND", "Datei nicht gefunden: $cleanPath")
|
||||
return
|
||||
}
|
||||
val context = reactApplicationContext
|
||||
val uri: Uri = if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.N) {
|
||||
FileProvider.getUriForFile(context, "${context.packageName}.fileprovider", file)
|
||||
} else {
|
||||
Uri.fromFile(file)
|
||||
}
|
||||
val safeMime = if (mimeType.isBlank()) "application/octet-stream" else mimeType
|
||||
val intent = Intent(Intent.ACTION_VIEW).apply {
|
||||
setDataAndType(uri, safeMime)
|
||||
addFlags(Intent.FLAG_ACTIVITY_NEW_TASK)
|
||||
addFlags(Intent.FLAG_GRANT_READ_URI_PERMISSION)
|
||||
}
|
||||
// Chooser zeigt Android-Auswahl falls mehrere Apps das MIME oeffnen koennen.
|
||||
val chooser = Intent.createChooser(intent, "Oeffnen mit").apply {
|
||||
addFlags(Intent.FLAG_ACTIVITY_NEW_TASK)
|
||||
}
|
||||
context.startActivity(chooser)
|
||||
promise.resolve(true)
|
||||
} catch (e: Exception) {
|
||||
promise.reject("OPEN_ERROR", e.message, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -21,6 +21,9 @@ class MainApplication : Application(), ReactApplication {
|
||||
add(ApkInstallerPackage())
|
||||
add(AudioFocusPackage())
|
||||
add(PcmStreamPlayerPackage())
|
||||
add(OpenWakeWordPackage())
|
||||
add(PhoneCallPackage())
|
||||
add(BackgroundAudioPackage())
|
||||
}
|
||||
|
||||
override fun getJSMainModuleName(): String = "index"
|
||||
|
||||
@@ -0,0 +1,413 @@
|
||||
package com.ariacockpit
|
||||
|
||||
import ai.onnxruntime.OnnxTensor
|
||||
import ai.onnxruntime.OrtEnvironment
|
||||
import ai.onnxruntime.OrtSession
|
||||
import android.Manifest
|
||||
import android.content.pm.PackageManager
|
||||
import android.media.AudioFormat
|
||||
import android.media.AudioRecord
|
||||
import android.media.MediaRecorder
|
||||
import android.media.audiofx.AcousticEchoCanceler
|
||||
import android.media.audiofx.AutomaticGainControl
|
||||
import android.media.audiofx.NoiseSuppressor
|
||||
import android.util.Log
|
||||
import androidx.core.content.ContextCompat
|
||||
import com.facebook.react.bridge.Promise
|
||||
import com.facebook.react.bridge.ReactApplicationContext
|
||||
import com.facebook.react.bridge.ReactContextBaseJavaModule
|
||||
import com.facebook.react.bridge.ReactMethod
|
||||
import com.facebook.react.modules.core.DeviceEventManagerModule
|
||||
import java.nio.FloatBuffer
|
||||
import java.util.concurrent.atomic.AtomicBoolean
|
||||
|
||||
/**
|
||||
* Wake-Word Erkennung on-device via openWakeWord (https://github.com/dscripka/openWakeWord).
|
||||
*
|
||||
* Drei-stufige ONNX Pipeline:
|
||||
* 1. Audio (16kHz mono int16, 1280-Sample-Chunks) → Melspectrogram → 32-mel Frames
|
||||
* 2. 76 Mel-Frames Sliding Window (stride 8) → Speech-Embedding → 96-dim Vektor
|
||||
* 3. Letzte 16 Embeddings (~1.28s Kontext) → Wake-Word-Klassifikator → Sigmoid-Score
|
||||
*
|
||||
* Modelle liegen in assets/openwakeword/ (mel + embedding shared, plus pro Keyword
|
||||
* ein eigenes .onnx). Erkennung feuert nach `patience` aufeinanderfolgenden
|
||||
* Frames ueber `threshold` und unterdrueckt Wiederholungen fuer `debounceMs`.
|
||||
*
|
||||
* Emittiert "WakeWordDetected" als RN-Event wenn ein Trigger erkannt wurde.
|
||||
*/
|
||||
class OpenWakeWordModule(reactContext: ReactApplicationContext) : ReactContextBaseJavaModule(reactContext) {
|
||||
override fun getName() = "OpenWakeWord"
|
||||
|
||||
companion object {
|
||||
private const val TAG = "OpenWakeWord"
|
||||
private const val SAMPLE_RATE = 16000
|
||||
private const val CHUNK_SAMPLES = 1280 // 80ms @ 16kHz
|
||||
private const val MEL_FRAMES_PER_EMBEDDING = 76 // Embedding-Fenster
|
||||
private const val EMBEDDING_STRIDE = 8 // Slide um 8 Mel-Frames
|
||||
private const val EMBEDDING_DIM = 96
|
||||
private const val MEL_BINS = 32
|
||||
private const val DEFAULT_WW_INPUT_FRAMES = 16 // Fallback wenn Modell-Metadata fehlt
|
||||
}
|
||||
|
||||
private val env: OrtEnvironment = OrtEnvironment.getEnvironment()
|
||||
private var melSession: OrtSession? = null
|
||||
private var embSession: OrtSession? = null
|
||||
private var wwSession: OrtSession? = null
|
||||
|
||||
private var melInputName: String = "input"
|
||||
private var embInputName: String = "input_1"
|
||||
private var wwInputName: String = "input"
|
||||
// Anzahl Embedding-Frames die der Wake-Word-Klassifikator pro Inferenz erwartet —
|
||||
// hey_jarvis hat 16, andere Community-Modelle koennen abweichen (z.B. 28).
|
||||
// Wird beim init() aus den Modell-Metadaten gelesen.
|
||||
private var wwInputFrames: Int = DEFAULT_WW_INPUT_FRAMES
|
||||
|
||||
// Konfiguration
|
||||
private var threshold: Float = 0.5f
|
||||
private var patience: Int = 2
|
||||
private var debounceMs: Long = 1500
|
||||
private var modelName: String = "hey_jarvis"
|
||||
|
||||
// Audio-Capture-Thread
|
||||
private var audioRecord: AudioRecord? = null
|
||||
private val running = AtomicBoolean(false)
|
||||
private var captureThread: Thread? = null
|
||||
|
||||
// Audio-Effects: Echo-Cancellation (gegen ARIAs eigene TTS-Stimme die sonst
|
||||
// das Wake-Word triggern wuerde) + Noise-Suppression. Per VOICE_COMMUNICATION
|
||||
// Audio-Source schon vorhanden, aber explizites Aktivieren ist robuster.
|
||||
private var aec: AcousticEchoCanceler? = null
|
||||
private var ns: NoiseSuppressor? = null
|
||||
private var agc: AutomaticGainControl? = null
|
||||
|
||||
// Inferenz-State
|
||||
private val melBuffer: ArrayList<FloatArray> = ArrayList(256) // Liste von 32-dim Frames
|
||||
private var melProcessedIdx: Int = 0
|
||||
private val embBuffer: ArrayDeque<FloatArray> = ArrayDeque(32) // Ringpuffer letzter Embeddings
|
||||
private var consecutiveAboveThreshold: Int = 0
|
||||
private var lastDetectionMs: Long = 0L
|
||||
|
||||
/**
|
||||
* Initialisiert die ONNX-Sessions fuer ein bestimmtes Wake-Word.
|
||||
* modelName: dateiname ohne Suffix (z.B. "hey_jarvis", "alexa", "hey_mycroft", "hey_rhasspy")
|
||||
*/
|
||||
@ReactMethod
|
||||
fun init(modelName: String, threshold: Double, patience: Int, debounceMs: Int, promise: Promise) {
|
||||
try {
|
||||
disposeSessions()
|
||||
this.modelName = modelName
|
||||
this.threshold = threshold.toFloat()
|
||||
this.patience = patience.coerceAtLeast(1)
|
||||
this.debounceMs = debounceMs.toLong()
|
||||
|
||||
val ctx = reactApplicationContext
|
||||
val melBytes = ctx.assets.open("openwakeword/melspectrogram.onnx").use { it.readBytes() }
|
||||
val embBytes = ctx.assets.open("openwakeword/embedding_model.onnx").use { it.readBytes() }
|
||||
val wwBytes = ctx.assets.open("openwakeword/$modelName.onnx").use { it.readBytes() }
|
||||
|
||||
val opts = OrtSession.SessionOptions()
|
||||
melSession = env.createSession(melBytes, opts)
|
||||
embSession = env.createSession(embBytes, opts)
|
||||
wwSession = env.createSession(wwBytes, opts)
|
||||
|
||||
melInputName = melSession!!.inputNames.first()
|
||||
embInputName = embSession!!.inputNames.first()
|
||||
wwInputName = wwSession!!.inputNames.first()
|
||||
|
||||
// WW-Input-Frame-Count aus dem Modell lesen — variiert pro Keyword.
|
||||
// Erwartete Form: (1, N, 96), N steht in der Modell-Metadaten.
|
||||
val wwInputInfo = wwSession!!.inputInfo[wwInputName]
|
||||
val wwShape = (wwInputInfo?.info as? ai.onnxruntime.TensorInfo)?.shape
|
||||
wwInputFrames = wwShape?.getOrNull(1)?.toInt()?.takeIf { it > 0 } ?: DEFAULT_WW_INPUT_FRAMES
|
||||
|
||||
Log.i(TAG, "Init OK: model=$modelName wwFrames=$wwInputFrames threshold=$threshold patience=$patience " +
|
||||
"debounce=${debounceMs}ms (inputs: mel=$melInputName emb=$embInputName ww=$wwInputName)")
|
||||
promise.resolve(true)
|
||||
} catch (e: Exception) {
|
||||
Log.e(TAG, "Init fehlgeschlagen: ${e.message}", e)
|
||||
disposeSessions()
|
||||
promise.reject("INIT_FAILED", e.message ?: "Unbekannter Fehler", e)
|
||||
}
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
fun start(promise: Promise) {
|
||||
if (running.get()) {
|
||||
promise.resolve(true)
|
||||
return
|
||||
}
|
||||
if (melSession == null || embSession == null || wwSession == null) {
|
||||
promise.reject("NOT_INITIALIZED", "init() muss vor start() aufgerufen werden")
|
||||
return
|
||||
}
|
||||
// Berechtigung pruefen — der App-Code holt die ueblicherweise schon vorher,
|
||||
// aber wir bestehen hier explizit darauf damit AudioRecord nicht stumm
|
||||
// failt.
|
||||
val perm = ContextCompat.checkSelfPermission(reactApplicationContext, Manifest.permission.RECORD_AUDIO)
|
||||
if (perm != PackageManager.PERMISSION_GRANTED) {
|
||||
promise.reject("NO_MIC_PERMISSION", "RECORD_AUDIO Permission fehlt")
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
val minBuf = AudioRecord.getMinBufferSize(
|
||||
SAMPLE_RATE,
|
||||
AudioFormat.CHANNEL_IN_MONO,
|
||||
AudioFormat.ENCODING_PCM_16BIT,
|
||||
).coerceAtLeast(CHUNK_SAMPLES * 2 * 4)
|
||||
|
||||
// VOICE_COMMUNICATION-Source: aktiviert auf den meisten Android-Geraeten
|
||||
// automatisch Echo-Cancellation + Noise-Suppression. Wichtig damit
|
||||
// ARIAs eigene Stimme nicht das Wake-Word triggert wenn parallel
|
||||
// zur TTS-Wiedergabe gelauscht wird.
|
||||
val record = AudioRecord(
|
||||
MediaRecorder.AudioSource.VOICE_COMMUNICATION,
|
||||
SAMPLE_RATE,
|
||||
AudioFormat.CHANNEL_IN_MONO,
|
||||
AudioFormat.ENCODING_PCM_16BIT,
|
||||
minBuf,
|
||||
)
|
||||
if (record.state != AudioRecord.STATE_INITIALIZED) {
|
||||
record.release()
|
||||
promise.reject("AUDIO_INIT", "AudioRecord nicht initialisiert (Mikro belegt?)")
|
||||
return
|
||||
}
|
||||
audioRecord = record
|
||||
|
||||
// Audio-Effects ZUSAETZLICH explizit aktivieren — manche Geraete
|
||||
// benoetigen das, obwohl VOICE_COMMUNICATION es eigentlich schon
|
||||
// mitbringt. Failure ist nicht kritisch (continue ohne Effects).
|
||||
try {
|
||||
if (AcousticEchoCanceler.isAvailable()) {
|
||||
aec = AcousticEchoCanceler.create(record.audioSessionId)?.apply { enabled = true }
|
||||
Log.i(TAG, "AEC aktiviert (enabled=${aec?.enabled})")
|
||||
}
|
||||
} catch (e: Exception) { Log.w(TAG, "AEC failed: ${e.message}") }
|
||||
try {
|
||||
if (NoiseSuppressor.isAvailable()) {
|
||||
ns = NoiseSuppressor.create(record.audioSessionId)?.apply { enabled = true }
|
||||
}
|
||||
} catch (e: Exception) { Log.w(TAG, "NS failed: ${e.message}") }
|
||||
try {
|
||||
if (AutomaticGainControl.isAvailable()) {
|
||||
agc = AutomaticGainControl.create(record.audioSessionId)?.apply { enabled = true }
|
||||
}
|
||||
} catch (e: Exception) { Log.w(TAG, "AGC failed: ${e.message}") }
|
||||
|
||||
resetInferenceState()
|
||||
running.set(true)
|
||||
record.startRecording()
|
||||
|
||||
captureThread = Thread({ captureLoop() }, "OpenWakeWordCapture").apply {
|
||||
isDaemon = true
|
||||
start()
|
||||
}
|
||||
|
||||
Log.i(TAG, "Lauschen gestartet (model=$modelName)")
|
||||
promise.resolve(true)
|
||||
} catch (e: Exception) {
|
||||
Log.e(TAG, "start fehlgeschlagen", e)
|
||||
running.set(false)
|
||||
audioRecord?.release()
|
||||
audioRecord = null
|
||||
promise.reject("START_FAILED", e.message ?: "Unbekannter Fehler", e)
|
||||
}
|
||||
}
|
||||
|
||||
private fun releaseAudioEffects() {
|
||||
try { aec?.release() } catch (_: Exception) {}
|
||||
try { ns?.release() } catch (_: Exception) {}
|
||||
try { agc?.release() } catch (_: Exception) {}
|
||||
aec = null; ns = null; agc = null
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
fun stop(promise: Promise) {
|
||||
running.set(false)
|
||||
try {
|
||||
captureThread?.join(1500)
|
||||
} catch (_: InterruptedException) {}
|
||||
captureThread = null
|
||||
try { audioRecord?.stop() } catch (_: Exception) {}
|
||||
try { audioRecord?.release() } catch (_: Exception) {}
|
||||
audioRecord = null
|
||||
releaseAudioEffects()
|
||||
Log.i(TAG, "Lauschen gestoppt")
|
||||
promise.resolve(true)
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
fun dispose(promise: Promise) {
|
||||
running.set(false)
|
||||
try { captureThread?.join(1000) } catch (_: InterruptedException) {}
|
||||
captureThread = null
|
||||
try { audioRecord?.stop() } catch (_: Exception) {}
|
||||
try { audioRecord?.release() } catch (_: Exception) {}
|
||||
audioRecord = null
|
||||
releaseAudioEffects()
|
||||
disposeSessions()
|
||||
promise.resolve(true)
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
fun isAvailable(promise: Promise) {
|
||||
// Wake-Word ist immer verfuegbar (kein API-Key, alles on-device)
|
||||
promise.resolve(true)
|
||||
}
|
||||
|
||||
// RN-Event-Subscriptions — RN-Konvention, sonst Warnung im Debug-Build
|
||||
@ReactMethod fun addListener(eventName: String) {}
|
||||
@ReactMethod fun removeListeners(count: Int) {}
|
||||
|
||||
private fun disposeSessions() {
|
||||
try { melSession?.close() } catch (_: Exception) {}
|
||||
try { embSession?.close() } catch (_: Exception) {}
|
||||
try { wwSession?.close() } catch (_: Exception) {}
|
||||
melSession = null
|
||||
embSession = null
|
||||
wwSession = null
|
||||
}
|
||||
|
||||
private fun resetInferenceState() {
|
||||
melBuffer.clear()
|
||||
melProcessedIdx = 0
|
||||
embBuffer.clear()
|
||||
consecutiveAboveThreshold = 0
|
||||
lastDetectionMs = 0L
|
||||
}
|
||||
|
||||
private fun emitDetected() {
|
||||
val params = com.facebook.react.bridge.Arguments.createMap().apply {
|
||||
putString("model", modelName)
|
||||
}
|
||||
try {
|
||||
reactApplicationContext
|
||||
.getJSModule(DeviceEventManagerModule.RCTDeviceEventEmitter::class.java)
|
||||
.emit("WakeWordDetected", params)
|
||||
} catch (e: Exception) {
|
||||
Log.w(TAG, "emit fehlgeschlagen: ${e.message}")
|
||||
}
|
||||
}
|
||||
|
||||
private fun captureLoop() {
|
||||
val buf = ShortArray(CHUNK_SAMPLES)
|
||||
val record = audioRecord ?: return
|
||||
Log.i(TAG, "Capture-Loop gestartet")
|
||||
while (running.get()) {
|
||||
var read = 0
|
||||
while (read < CHUNK_SAMPLES && running.get()) {
|
||||
val n = record.read(buf, read, CHUNK_SAMPLES - read)
|
||||
if (n <= 0) {
|
||||
Log.w(TAG, "AudioRecord.read returned $n — Loop ende")
|
||||
running.set(false)
|
||||
return
|
||||
}
|
||||
read += n
|
||||
}
|
||||
if (!running.get()) break
|
||||
try {
|
||||
processChunk(buf)
|
||||
} catch (e: Exception) {
|
||||
Log.w(TAG, "processChunk: ${e.message}")
|
||||
}
|
||||
}
|
||||
Log.i(TAG, "Capture-Loop beendet")
|
||||
}
|
||||
|
||||
/** Verarbeitet einen 1280-Sample int16 Audio-Chunk. */
|
||||
private fun processChunk(audio: ShortArray) {
|
||||
// 1) Audio → mel (output (1, 1, frames, 32))
|
||||
val floats = FloatArray(audio.size) { audio[it].toFloat() }
|
||||
val melTensor = OnnxTensor.createTensor(
|
||||
env,
|
||||
FloatBuffer.wrap(floats),
|
||||
longArrayOf(1L, audio.size.toLong()),
|
||||
)
|
||||
val melResult = melSession!!.run(mapOf(melInputName to melTensor))
|
||||
val melOut = melResult.get(0).value
|
||||
melTensor.close()
|
||||
@Suppress("UNCHECKED_CAST")
|
||||
val mel4 = melOut as Array<Array<Array<FloatArray>>>
|
||||
val frames = mel4[0][0]
|
||||
// openWakeWord wendet `mel/10 + 2` an, bevor es ans Embedding-Modell geht
|
||||
for (frame in frames) {
|
||||
val scaled = FloatArray(frame.size) { frame[it] / 10f + 2f }
|
||||
melBuffer.add(scaled)
|
||||
}
|
||||
melResult.close()
|
||||
|
||||
// 2) Sliding window: alle vollstaendigen 76-Frame-Fenster verarbeiten
|
||||
while (melBuffer.size >= melProcessedIdx + MEL_FRAMES_PER_EMBEDDING) {
|
||||
val flat = FloatArray(MEL_FRAMES_PER_EMBEDDING * MEL_BINS)
|
||||
var pos = 0
|
||||
for (i in 0 until MEL_FRAMES_PER_EMBEDDING) {
|
||||
val src = melBuffer[melProcessedIdx + i]
|
||||
System.arraycopy(src, 0, flat, pos, MEL_BINS)
|
||||
pos += MEL_BINS
|
||||
}
|
||||
val embIn = OnnxTensor.createTensor(
|
||||
env,
|
||||
FloatBuffer.wrap(flat),
|
||||
longArrayOf(1L, MEL_FRAMES_PER_EMBEDDING.toLong(), MEL_BINS.toLong(), 1L),
|
||||
)
|
||||
val embRes = embSession!!.run(mapOf(embInputName to embIn))
|
||||
val embOut = embRes.get(0).value
|
||||
embIn.close()
|
||||
// Erwartete Output-Form: (1, 1, 1, 96) — rank-4, NICHT (1, 96).
|
||||
// Die Google-Embedding-Pipeline behaelt extra Dimensionen.
|
||||
@Suppress("UNCHECKED_CAST")
|
||||
val embArr = embOut as Array<Array<Array<FloatArray>>>
|
||||
embBuffer.addLast(embArr[0][0][0].copyOf())
|
||||
while (embBuffer.size > wwInputFrames) embBuffer.removeFirst()
|
||||
embRes.close()
|
||||
|
||||
melProcessedIdx += EMBEDDING_STRIDE
|
||||
}
|
||||
// Mel-Buffer trimmen — verhindert Memory-Wachstum
|
||||
if (melProcessedIdx > MEL_FRAMES_PER_EMBEDDING) {
|
||||
val keepFrom = melProcessedIdx - MEL_FRAMES_PER_EMBEDDING
|
||||
val newList = ArrayList<FloatArray>(melBuffer.size - keepFrom)
|
||||
for (i in keepFrom until melBuffer.size) newList.add(melBuffer[i])
|
||||
melBuffer.clear()
|
||||
melBuffer.addAll(newList)
|
||||
melProcessedIdx = MEL_FRAMES_PER_EMBEDDING
|
||||
}
|
||||
|
||||
// 3) Klassifikation — sobald wir 16 Embeddings haben
|
||||
if (embBuffer.size < wwInputFrames) return
|
||||
val flatEmb = FloatArray(wwInputFrames * EMBEDDING_DIM)
|
||||
var p = 0
|
||||
// Letzte wwInputFrames Embeddings nehmen (embBuffer ist auf wwInputFrames begrenzt)
|
||||
for (e in embBuffer) {
|
||||
System.arraycopy(e, 0, flatEmb, p, EMBEDDING_DIM)
|
||||
p += EMBEDDING_DIM
|
||||
}
|
||||
val wwIn = OnnxTensor.createTensor(
|
||||
env,
|
||||
FloatBuffer.wrap(flatEmb),
|
||||
longArrayOf(1L, wwInputFrames.toLong(), EMBEDDING_DIM.toLong()),
|
||||
)
|
||||
val wwRes = wwSession!!.run(mapOf(wwInputName to wwIn))
|
||||
val wwOut = wwRes.get(0).value
|
||||
wwIn.close()
|
||||
// Erwartete Output-Form: (1, 1) → Array<FloatArray>
|
||||
@Suppress("UNCHECKED_CAST")
|
||||
val score = (wwOut as Array<FloatArray>)[0][0]
|
||||
wwRes.close()
|
||||
|
||||
if (score >= threshold) {
|
||||
consecutiveAboveThreshold++
|
||||
if (consecutiveAboveThreshold >= patience) {
|
||||
val now = System.currentTimeMillis()
|
||||
if (now - lastDetectionMs >= debounceMs) {
|
||||
lastDetectionMs = now
|
||||
consecutiveAboveThreshold = 0
|
||||
Log.i(TAG, "Wake-Word erkannt! score=$score model=$modelName")
|
||||
emitDetected()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
consecutiveAboveThreshold = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
package com.ariacockpit
|
||||
|
||||
import com.facebook.react.ReactPackage
|
||||
import com.facebook.react.bridge.NativeModule
|
||||
import com.facebook.react.bridge.ReactApplicationContext
|
||||
import com.facebook.react.uimanager.ViewManager
|
||||
|
||||
class OpenWakeWordPackage : ReactPackage {
|
||||
override fun createNativeModules(reactContext: ReactApplicationContext): List<NativeModule> {
|
||||
return listOf(OpenWakeWordModule(reactContext))
|
||||
}
|
||||
|
||||
override fun createViewManagers(reactContext: ReactApplicationContext): List<ViewManager<*, *>> {
|
||||
return emptyList()
|
||||
}
|
||||
}
|
||||
@@ -4,12 +4,15 @@ import android.media.AudioAttributes
|
||||
import android.media.AudioFormat
|
||||
import android.media.AudioManager
|
||||
import android.media.AudioTrack
|
||||
import android.os.Build
|
||||
import android.util.Base64
|
||||
import android.util.Log
|
||||
import com.facebook.react.bridge.Arguments
|
||||
import com.facebook.react.bridge.Promise
|
||||
import com.facebook.react.bridge.ReactApplicationContext
|
||||
import com.facebook.react.bridge.ReactContextBaseJavaModule
|
||||
import com.facebook.react.bridge.ReactMethod
|
||||
import com.facebook.react.modules.core.DeviceEventManagerModule
|
||||
import java.util.concurrent.LinkedBlockingQueue
|
||||
|
||||
/**
|
||||
@@ -76,9 +79,12 @@ class PcmStreamPlayerModule(reactContext: ReactApplicationContext) : ReactContex
|
||||
val encoding = AudioFormat.ENCODING_PCM_16BIT
|
||||
val minBuf = AudioTrack.getMinBufferSize(sampleRate, channelConfig, encoding)
|
||||
val bytesPerSecond = sampleRate * channels * 2 // 16-bit = 2 bytes
|
||||
// Buffer muss mindestens PREROLL + etwas Spielraum fassen.
|
||||
val prerollTarget = (bytesPerSecond * prerollSec).toInt()
|
||||
val bufferSize = (minBuf * 32).coerceAtLeast(prerollTarget * 2)
|
||||
// Buffer entkoppelt von Preroll — fester ~4s-Buffer. OnePlus A12
|
||||
// mit USAGE_ASSISTANT laeuft AudioTrack erst ab ~3s gepufferter
|
||||
// Daten an. Wir padden Kurztexte vor play() auf 3s (siehe Block
|
||||
// nach mainLoop), Buffer braucht ~1s Headroom weil write() blockt.
|
||||
val bufferSize = (bytesPerSecond * 4).coerceAtLeast(minBuf * 8)
|
||||
prerollBytes = prerollTarget
|
||||
bytesBuffered = 0
|
||||
playbackStarted = false
|
||||
@@ -102,7 +108,20 @@ class PcmStreamPlayerModule(reactContext: ReactApplicationContext) : ReactContex
|
||||
.setTransferMode(AudioTrack.MODE_STREAM)
|
||||
.build()
|
||||
|
||||
// AudioTrack erstellen — play() wird erst aufgerufen wenn Pre-Roll erreicht.
|
||||
// Start-Threshold runterdrehen: Default ist bufferSize/2 (= 2s bei 4s
|
||||
// Buffer). AudioTrack startet sonst nicht bevor 2s im Puffer sind —
|
||||
// bei kurzen TTS-Antworten (3 Worte ~ 1.4s) bleibt pos auf 0 stehen.
|
||||
// 0.1s reicht damit AudioTrack sofort mit dem ersten Chunk anlaeuft.
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.S) {
|
||||
try {
|
||||
val startFrames = (sampleRate / 10).coerceAtLeast(1) // 100ms
|
||||
newTrack.setStartThresholdInFrames(startFrames)
|
||||
Log.i(TAG, "Start-Threshold gesetzt: ${startFrames} frames (~100ms)")
|
||||
} catch (e: Exception) {
|
||||
Log.w(TAG, "setStartThresholdInFrames failed: ${e.message}")
|
||||
}
|
||||
}
|
||||
|
||||
track = newTrack
|
||||
queue.clear()
|
||||
writerShouldStop = false
|
||||
@@ -137,31 +156,69 @@ class PcmStreamPlayerModule(reactContext: ReactApplicationContext) : ReactContex
|
||||
Log.w(TAG, "play() sofort failed: ${e.message}")
|
||||
}
|
||||
}
|
||||
// Idle-Cutoff: wenn endRequested NICHT kam aber lange nichts mehr
|
||||
// reinkommt, brechen wir ab (Bridge-Crash, verlorener final).
|
||||
// 120s damit lange F5-TTS-Render-Pausen zwischen Saetzen (z.B. bei
|
||||
// Modell-Wechsel oder kalter GPU) nicht den Stream abreissen.
|
||||
var idleMs = 0L
|
||||
val maxIdleMs = 120_000L
|
||||
// Zielpufferfuellung — unter diesem Wasserstand fuettern wir
|
||||
// Stille rein damit AudioTrack nicht underrunt waehrend die
|
||||
// Bridge den naechsten Satz rendert. Spotify/YouTube reagieren
|
||||
// sonst mit eigenmaechtiger Wiederaufnahme nach ~10s Stille.
|
||||
val underrunGuardFrames = sampleRate / 10 // ~100ms
|
||||
val silenceFillFrames = sampleRate / 20 // ~50ms pro Refill
|
||||
|
||||
mainLoop@ while (!writerShouldStop) {
|
||||
val data = queue.poll(50, java.util.concurrent.TimeUnit.MILLISECONDS)
|
||||
if (data == null) {
|
||||
if (endRequested) {
|
||||
// Falls wir vor Pre-Roll enden (kurzer Text): trotzdem abspielen
|
||||
// Falls play() noch gar nicht lief (Stream ohne data
|
||||
// ueberhaupt — sehr seltene Edge-Case): jetzt anstossen
|
||||
// damit das finally{}-Wait nicht endlos blockt.
|
||||
if (!playbackStarted) {
|
||||
try {
|
||||
t.play()
|
||||
playbackStarted = true
|
||||
Log.i(TAG, "Playback gestartet VOR Pre-Roll (kurzer Text, ${bytesBuffered}B gepuffert)")
|
||||
} catch (e: Exception) {
|
||||
Log.w(TAG, "play() fallback failed: ${e.message}")
|
||||
}
|
||||
try { t.play(); playbackStarted = true } catch (_: Exception) {}
|
||||
}
|
||||
break@mainLoop
|
||||
}
|
||||
// Underrun-Schutz: Stille reinfuettern wenn der AudioTrack-
|
||||
// Puffer leerzulaufen droht. Spotify resumed sonst nach
|
||||
// ~10s Pause auf eigene Faust, obwohl wir den Fokus halten.
|
||||
if (playbackStarted) {
|
||||
val framesWritten = bytesBuffered / streamBytesPerFrame
|
||||
val framesPlayed = t.playbackHeadPosition.toLong()
|
||||
val framesInBuffer = framesWritten - framesPlayed
|
||||
if (framesInBuffer < underrunGuardFrames) {
|
||||
val fillBytes = silenceFillFrames * streamBytesPerFrame
|
||||
val silence = ByteArray(fillBytes)
|
||||
var silOff = 0
|
||||
while (silOff < silence.size && !writerShouldStop) {
|
||||
val w = t.write(silence, silOff, silence.size - silOff)
|
||||
if (w <= 0) break
|
||||
silOff += w
|
||||
}
|
||||
bytesBuffered += silence.size
|
||||
}
|
||||
}
|
||||
idleMs += 50L
|
||||
if (idleMs >= maxIdleMs) {
|
||||
Log.w(TAG, "Idle-Cutoff: ${maxIdleMs}ms keine Daten — Stream wird beendet")
|
||||
break@mainLoop
|
||||
}
|
||||
continue@mainLoop
|
||||
}
|
||||
idleMs = 0L
|
||||
|
||||
// Pre-Roll Check: play() erst wenn genug gepuffert
|
||||
if (!playbackStarted && bytesBuffered + data.size >= prerollBytes) {
|
||||
// play() beim ALLERERSTEN data-chunk aufrufen — egal wie wenig
|
||||
// Daten da sind. Sonst stallt AudioTrack auf OnePlus A12 wenn
|
||||
// play() erst gerufen wird nachdem der Buffer komplett gefuellt
|
||||
// ist. Pre-Roll als "Vorrat aufbauen" passiert dann waehrend
|
||||
// der Track schon spielt — Underrun-Schutz fuettert ggf. Stille.
|
||||
if (!playbackStarted) {
|
||||
try {
|
||||
t.play()
|
||||
playbackStarted = true
|
||||
Log.i(TAG, "Playback gestartet nach Pre-Roll ${bytesBuffered + data.size} Bytes")
|
||||
Log.i(TAG, "Playback gestartet beim 1. Chunk (${bytesBuffered}B leading + ${data.size}B data)")
|
||||
} catch (e: Exception) {
|
||||
Log.w(TAG, "play() failed: ${e.message}")
|
||||
}
|
||||
@@ -197,12 +254,21 @@ class PcmStreamPlayerModule(reactContext: ReactApplicationContext) : ReactContex
|
||||
val totalFrames = (bytesBuffered / streamBytesPerFrame).toInt()
|
||||
var lastPos = -1
|
||||
var stalledCount = 0
|
||||
var retried = false
|
||||
while (!writerShouldStop) {
|
||||
val pos = t.playbackHeadPosition
|
||||
if (pos >= totalFrames) break
|
||||
// Safety: wenn Position 2s nicht mehr vorwaerts → AudioTrack hing
|
||||
if (pos == lastPos) {
|
||||
stalledCount++
|
||||
// Nach 500ms Stillstand: AudioTrack-Quirk auf manchen
|
||||
// Geraeten (OnePlus A12) — play() nochmal anstossen.
|
||||
if (stalledCount == 10 && pos == 0 && !retried) {
|
||||
retried = true
|
||||
Log.w(TAG, "playback nicht angefahren — retry play()")
|
||||
try { t.play() } catch (e: Exception) {
|
||||
Log.w(TAG, "retry play() failed: ${e.message}")
|
||||
}
|
||||
}
|
||||
if (stalledCount > 40) {
|
||||
Log.w(TAG, "playback stalled at $pos/$totalFrames — give up")
|
||||
break
|
||||
@@ -217,6 +283,17 @@ class PcmStreamPlayerModule(reactContext: ReactApplicationContext) : ReactContex
|
||||
} catch (_: Exception) {}
|
||||
try { t.stop() } catch (_: Exception) {}
|
||||
try { t.release() } catch (_: Exception) {}
|
||||
// RN-Event: AudioTrack ist wirklich durch (alle Samples gespielt).
|
||||
// JS released erst JETZT den AudioFocus — sonst spielt Spotify
|
||||
// beim end()-Cap waehrend ARIA noch redet (15s+ je nach Buffer).
|
||||
try {
|
||||
val params = Arguments.createMap()
|
||||
reactApplicationContext
|
||||
.getJSModule(DeviceEventManagerModule.RCTDeviceEventEmitter::class.java)
|
||||
.emit("PcmPlaybackFinished", params)
|
||||
} catch (e: Exception) {
|
||||
Log.w(TAG, "PlaybackFinished emit failed: ${e.message}")
|
||||
}
|
||||
}
|
||||
}, "PcmStreamWriter").apply { start() }
|
||||
|
||||
@@ -273,6 +350,9 @@ class PcmStreamPlayerModule(reactContext: ReactApplicationContext) : ReactContex
|
||||
promise.resolve(true)
|
||||
}
|
||||
|
||||
@ReactMethod fun addListener(eventName: String) {}
|
||||
@ReactMethod fun removeListeners(count: Int) {}
|
||||
|
||||
private fun stopInternal() {
|
||||
writerShouldStop = true
|
||||
endRequested = true
|
||||
|
||||
@@ -0,0 +1,126 @@
|
||||
package com.ariacockpit
|
||||
|
||||
import android.Manifest
|
||||
import android.content.Context
|
||||
import android.content.pm.PackageManager
|
||||
import android.os.Build
|
||||
import android.telephony.PhoneStateListener
|
||||
import android.telephony.TelephonyCallback
|
||||
import android.telephony.TelephonyManager
|
||||
import android.util.Log
|
||||
import androidx.core.content.ContextCompat
|
||||
import com.facebook.react.bridge.Arguments
|
||||
import com.facebook.react.bridge.Promise
|
||||
import com.facebook.react.bridge.ReactApplicationContext
|
||||
import com.facebook.react.bridge.ReactContextBaseJavaModule
|
||||
import com.facebook.react.bridge.ReactMethod
|
||||
import com.facebook.react.modules.core.DeviceEventManagerModule
|
||||
|
||||
/**
|
||||
* Lauscht auf Anruf-Statusaenderungen — wenn das Telefon klingelt oder ein
|
||||
* Anruf laeuft, sendet das Modul ein "PhoneCallStateChanged"-Event an JS.
|
||||
*
|
||||
* JS-Side stoppt dann die TTS-Wiedergabe damit ARIA nicht mitten ins Gespraech
|
||||
* weiterredet. Ohne READ_PHONE_STATE-Permission failt start() leise — der Rest
|
||||
* der App funktioniert wie bisher.
|
||||
*
|
||||
* State-Strings: "idle" | "ringing" | "offhook"
|
||||
*/
|
||||
class PhoneCallModule(reactContext: ReactApplicationContext) : ReactContextBaseJavaModule(reactContext) {
|
||||
override fun getName() = "PhoneCall"
|
||||
|
||||
companion object { private const val TAG = "PhoneCall" }
|
||||
|
||||
private var telephonyManager: TelephonyManager? = null
|
||||
private var legacyListener: PhoneStateListener? = null
|
||||
private var modernCallback: Any? = null // TelephonyCallback ab API 31
|
||||
private var lastState: Int = TelephonyManager.CALL_STATE_IDLE
|
||||
|
||||
@ReactMethod
|
||||
fun start(promise: Promise) {
|
||||
try {
|
||||
val perm = ContextCompat.checkSelfPermission(reactApplicationContext, Manifest.permission.READ_PHONE_STATE)
|
||||
if (perm != PackageManager.PERMISSION_GRANTED) {
|
||||
Log.w(TAG, "READ_PHONE_STATE Permission fehlt — Anruf-Erkennung inaktiv")
|
||||
promise.resolve(false)
|
||||
return
|
||||
}
|
||||
val tm = reactApplicationContext.getSystemService(Context.TELEPHONY_SERVICE) as? TelephonyManager
|
||||
if (tm == null) {
|
||||
Log.w(TAG, "TelephonyManager nicht verfuegbar")
|
||||
promise.resolve(false)
|
||||
return
|
||||
}
|
||||
telephonyManager = tm
|
||||
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.S) {
|
||||
val cb = object : TelephonyCallback(), TelephonyCallback.CallStateListener {
|
||||
override fun onCallStateChanged(state: Int) {
|
||||
handleStateChange(state)
|
||||
}
|
||||
}
|
||||
tm.registerTelephonyCallback(reactApplicationContext.mainExecutor, cb)
|
||||
modernCallback = cb
|
||||
} else {
|
||||
@Suppress("DEPRECATION")
|
||||
val l = object : PhoneStateListener() {
|
||||
override fun onCallStateChanged(state: Int, phoneNumber: String?) {
|
||||
handleStateChange(state)
|
||||
}
|
||||
}
|
||||
@Suppress("DEPRECATION")
|
||||
tm.listen(l, PhoneStateListener.LISTEN_CALL_STATE)
|
||||
legacyListener = l
|
||||
}
|
||||
Log.i(TAG, "PhoneCall-Listener aktiv")
|
||||
promise.resolve(true)
|
||||
} catch (e: Exception) {
|
||||
Log.e(TAG, "start fehlgeschlagen", e)
|
||||
promise.reject("START_FAILED", e.message ?: "Unbekannter Fehler", e)
|
||||
}
|
||||
}
|
||||
|
||||
@ReactMethod
|
||||
fun stop(promise: Promise) {
|
||||
try {
|
||||
val tm = telephonyManager
|
||||
if (tm != null) {
|
||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.S) {
|
||||
(modernCallback as? TelephonyCallback)?.let { tm.unregisterTelephonyCallback(it) }
|
||||
modernCallback = null
|
||||
} else {
|
||||
@Suppress("DEPRECATION")
|
||||
legacyListener?.let { tm.listen(it, PhoneStateListener.LISTEN_NONE) }
|
||||
legacyListener = null
|
||||
}
|
||||
}
|
||||
telephonyManager = null
|
||||
lastState = TelephonyManager.CALL_STATE_IDLE
|
||||
promise.resolve(true)
|
||||
} catch (e: Exception) {
|
||||
promise.reject("STOP_FAILED", e.message ?: "")
|
||||
}
|
||||
}
|
||||
|
||||
private fun handleStateChange(state: Int) {
|
||||
if (state == lastState) return
|
||||
lastState = state
|
||||
val name = when (state) {
|
||||
TelephonyManager.CALL_STATE_RINGING -> "ringing"
|
||||
TelephonyManager.CALL_STATE_OFFHOOK -> "offhook"
|
||||
TelephonyManager.CALL_STATE_IDLE -> "idle"
|
||||
else -> return
|
||||
}
|
||||
Log.i(TAG, "Telefon-State: $name")
|
||||
val params = Arguments.createMap().apply { putString("state", name) }
|
||||
try {
|
||||
reactApplicationContext.getJSModule(DeviceEventManagerModule.RCTDeviceEventEmitter::class.java)
|
||||
.emit("PhoneCallStateChanged", params)
|
||||
} catch (e: Exception) {
|
||||
Log.w(TAG, "Event-emit fehlgeschlagen: ${e.message}")
|
||||
}
|
||||
}
|
||||
|
||||
@ReactMethod fun addListener(eventName: String) {}
|
||||
@ReactMethod fun removeListeners(count: Int) {}
|
||||
}
|
||||
@@ -0,0 +1,16 @@
|
||||
package com.ariacockpit
|
||||
|
||||
import com.facebook.react.ReactPackage
|
||||
import com.facebook.react.bridge.NativeModule
|
||||
import com.facebook.react.bridge.ReactApplicationContext
|
||||
import com.facebook.react.uimanager.ViewManager
|
||||
|
||||
class PhoneCallPackage : ReactPackage {
|
||||
override fun createNativeModules(reactContext: ReactApplicationContext): List<NativeModule> {
|
||||
return listOf(PhoneCallModule(reactContext))
|
||||
}
|
||||
|
||||
override fun createViewManagers(reactContext: ReactApplicationContext): List<ViewManager<*, *>> {
|
||||
return emptyList()
|
||||
}
|
||||
}
|
||||
Binary file not shown.
@@ -1,4 +1,8 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<paths>
|
||||
<cache-path name="cache" path="." />
|
||||
<files-path name="files" path="." />
|
||||
<external-path name="external" path="." />
|
||||
<external-files-path name="external_files" path="." />
|
||||
<external-cache-path name="external_cache" path="." />
|
||||
</paths>
|
||||
|
||||
+15
-2
@@ -167,10 +167,23 @@ export CI=true
|
||||
|
||||
if [ "$MODE" = "debug" ]; then
|
||||
./gradlew assembleDebug
|
||||
APK_PATH="app/build/outputs/apk/debug/app-debug.apk"
|
||||
OUT_DIR="app/build/outputs/apk/debug"
|
||||
else
|
||||
./gradlew assembleRelease
|
||||
APK_PATH="app/build/outputs/apk/release/app-release.apk"
|
||||
OUT_DIR="app/build/outputs/apk/release"
|
||||
fi
|
||||
|
||||
# Mit ABI-Splits heisst die APK z.B. app-arm64-v8a-release.apk statt
|
||||
# app-release.apk. arm64-v8a-Variante zuerst probieren (das ist unser
|
||||
# Standard), Universal-APK als Fallback falls Splits deaktiviert sind.
|
||||
if [ -f "$OUT_DIR/app-arm64-v8a-${MODE}.apk" ]; then
|
||||
APK_PATH="$OUT_DIR/app-arm64-v8a-${MODE}.apk"
|
||||
elif [ -f "$OUT_DIR/app-${MODE}.apk" ]; then
|
||||
APK_PATH="$OUT_DIR/app-${MODE}.apk"
|
||||
else
|
||||
echo -e "${RED}Keine passende APK in $OUT_DIR gefunden${NC}"
|
||||
cd ..
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd ..
|
||||
|
||||
+18
-19
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "aria-cockpit",
|
||||
"version": "0.0.6.5",
|
||||
"version": "0.1.2.2",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"android": "react-native run-android",
|
||||
@@ -10,33 +10,32 @@
|
||||
"build:apk": "cd android && ./gradlew assembleRelease"
|
||||
},
|
||||
"dependencies": {
|
||||
"@react-native-async-storage/async-storage": "^1.21.0",
|
||||
"@react-native-community/geolocation": "^3.2.1",
|
||||
"@react-navigation/bottom-tabs": "^6.5.11",
|
||||
"@react-navigation/native": "^6.1.9",
|
||||
"react": "18.2.0",
|
||||
"react-native": "0.73.4",
|
||||
"@react-navigation/native": "^6.1.9",
|
||||
"@react-navigation/bottom-tabs": "^6.5.11",
|
||||
"react-native-screens": "3.27.0",
|
||||
"react-native-safe-area-context": "^4.8.2",
|
||||
"react-native-audio-recorder-player": "^3.6.7",
|
||||
"react-native-camera-kit": "^13.0.0",
|
||||
"react-native-document-picker": "^9.1.1",
|
||||
"react-native-sound": "^0.11.2",
|
||||
"@react-native-community/geolocation": "^3.2.1",
|
||||
"react-native-fs": "^2.20.0",
|
||||
"react-native-image-picker": "^7.1.0",
|
||||
"react-native-permissions": "^4.1.4",
|
||||
"react-native-camera-kit": "^13.0.0",
|
||||
"@react-native-async-storage/async-storage": "^1.21.0",
|
||||
"react-native-fs": "^2.20.0",
|
||||
"react-native-audio-recorder-player": "^3.6.7",
|
||||
"@picovoice/porcupine-react-native": "3.0.5",
|
||||
"@picovoice/react-native-voice-processor": "1.2.3"
|
||||
"react-native-safe-area-context": "^4.8.2",
|
||||
"react-native-screens": "3.27.0",
|
||||
"react-native-sound": "^0.11.2",
|
||||
"react-native-svg": "^14.1.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"typescript": "^5.3.3",
|
||||
"@react-native/eslint-config": "^0.73.2",
|
||||
"@react-native/metro-config": "^0.73.5",
|
||||
"@react-native/typescript-config": "^0.73.1",
|
||||
"@types/jest": "^29.5.11",
|
||||
"@types/react": "^18.2.48",
|
||||
"@types/react-native": "^0.73.0",
|
||||
"@react-native/eslint-config": "^0.73.2",
|
||||
"@react-native/typescript-config": "^0.73.1",
|
||||
"@react-native/metro-config": "^0.73.5",
|
||||
"metro-react-native-babel-preset": "^0.77.0",
|
||||
"jest": "^29.7.0",
|
||||
"@types/jest": "^29.5.11"
|
||||
"metro-react-native-babel-preset": "^0.77.0",
|
||||
"typescript": "^5.3.3"
|
||||
}
|
||||
}
|
||||
|
||||
Binary file not shown.
@@ -1,104 +1,87 @@
|
||||
/**
|
||||
* MessageText — rendert Chat-Text mit Auto-Linkifizierung:
|
||||
* - http(s)://... → tippbar, oeffnet im Browser
|
||||
* - mailto: oder plain E-Mail → tippbar, oeffnet Mail-App
|
||||
* - Telefonnummern → tippbar, oeffnet Android-Dialer
|
||||
* MessageText — selektierbarer Chat-Text mit Android-Auto-Linkifizierung,
|
||||
* plus Inline-Image-Rendering wenn der Text Bild-URLs enthaelt.
|
||||
*
|
||||
* Text ist durchgaengig markierbar/kopierbar (selectable).
|
||||
* - Markdown-Syntax `` und plain `https://...image.png` werden
|
||||
* erkannt — die URL bleibt im Text sichtbar (klickbar via Linkify),
|
||||
* zusaetzlich wird das Bild als <Image> oder <SvgUri> drunter gerendert.
|
||||
* - Wir nutzen Androids dataDetectorType="all" (System macht Phone/URL/Email
|
||||
* automatisch klickbar) und ein einzelnes <Text selectable> ohne nested
|
||||
* <Text> mit eigenem onPress — Nested Text mit onPress fing die Long-Press-
|
||||
* Geste ab, damit war Markieren+Kopieren defekt.
|
||||
*/
|
||||
|
||||
import React from 'react';
|
||||
import { Text, Linking, TextStyle, StyleProp } from 'react-native';
|
||||
|
||||
// Regex kombiniert URL | Email | Telefonnummer.
|
||||
// Gruppenreihenfolge ist wichtig fuer die Erkennung unten.
|
||||
//
|
||||
// URL: http://... oder https://... bis zum ersten Whitespace / Anfuehrungszeichen.
|
||||
// Email: simpler Standard-Match (kein RFC-kompatibel aber gut genug).
|
||||
// Telefon: internationale Form (+49..., 0049..., 0176...), darf Leerzeichen
|
||||
// / Bindestriche / Schraegstriche / Klammern enthalten, mindestens 7
|
||||
// Ziffern insgesamt. Vermeidet banale Zahlen (Uhrzeiten, Datum).
|
||||
const LINK_REGEX = new RegExp(
|
||||
'(https?:\\/\\/[^\\s<>"]+)' + // 1: URL
|
||||
'|([A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,})' + // 2: Email
|
||||
'|((?:\\+|00)\\d[\\d\\s()\\-\\/]{6,}\\d|0\\d{2,4}[\\s\\/\\-]?[\\d\\s\\-\\/]{5,}\\d)', // 3: Telefon
|
||||
'g',
|
||||
);
|
||||
|
||||
const LINK_STYLE = { color: '#0096FF', textDecorationLine: 'underline' } as TextStyle;
|
||||
|
||||
interface Segment {
|
||||
text: string;
|
||||
kind: 'text' | 'url' | 'email' | 'phone';
|
||||
}
|
||||
|
||||
function tokenize(raw: string): Segment[] {
|
||||
const out: Segment[] = [];
|
||||
let lastEnd = 0;
|
||||
LINK_REGEX.lastIndex = 0;
|
||||
let m: RegExpExecArray | null;
|
||||
while ((m = LINK_REGEX.exec(raw)) !== null) {
|
||||
if (m.index > lastEnd) {
|
||||
out.push({ text: raw.slice(lastEnd, m.index), kind: 'text' });
|
||||
}
|
||||
if (m[1]) out.push({ text: m[1], kind: 'url' });
|
||||
else if (m[2]) out.push({ text: m[2], kind: 'email' });
|
||||
else if (m[3]) out.push({ text: m[3], kind: 'phone' });
|
||||
lastEnd = LINK_REGEX.lastIndex;
|
||||
}
|
||||
if (lastEnd < raw.length) out.push({ text: raw.slice(lastEnd), kind: 'text' });
|
||||
return out;
|
||||
}
|
||||
|
||||
function onPress(seg: Segment) {
|
||||
try {
|
||||
if (seg.kind === 'url') {
|
||||
Linking.openURL(seg.text);
|
||||
} else if (seg.kind === 'email') {
|
||||
Linking.openURL(`mailto:${seg.text}`);
|
||||
} else if (seg.kind === 'phone') {
|
||||
// Android-Dialer erwartet tel:-Schema ohne Leerzeichen/Bindestriche
|
||||
const clean = seg.text.replace(/[\s\-\/()]/g, '');
|
||||
Linking.openURL(`tel:${clean}`);
|
||||
}
|
||||
} catch {}
|
||||
}
|
||||
import React, { useEffect, useState } from 'react';
|
||||
import { View, Text, Image, TextStyle, StyleProp } from 'react-native';
|
||||
import { SvgUri } from 'react-native-svg';
|
||||
|
||||
interface Props {
|
||||
text: string;
|
||||
style?: StyleProp<TextStyle>;
|
||||
}
|
||||
|
||||
const MessageText: React.FC<Props> = ({ text, style }) => {
|
||||
const segments = React.useMemo(() => tokenize(text), [text]);
|
||||
// Bild-URL-Pattern: http(s)://... endend auf gaengige Bild-Endungen.
|
||||
const IMG_URL_RE = /https?:\/\/[^\s)<"']+\.(?:jpe?g|png|gif|webp|bmp|ico|svg)(?:\?[^\s)<"']*)?/gi;
|
||||
|
||||
function extractImageUrls(text: string): string[] {
|
||||
const urls = new Set<string>();
|
||||
const matches = text.match(IMG_URL_RE);
|
||||
if (matches) matches.forEach(u => urls.add(u));
|
||||
return Array.from(urls);
|
||||
}
|
||||
|
||||
const SVG_RE = /\.svg(?:\?|$)/i;
|
||||
|
||||
/** Image mit dynamischer Aspect-Ratio aus echten Bilddimensionen.
|
||||
* SVGs werden ueber react-native-svg gerendert (kein Image.getSize). */
|
||||
const InlineImage: React.FC<{ uri: string }> = ({ uri }) => {
|
||||
const isSvg = SVG_RE.test(uri);
|
||||
const [aspectRatio, setAspectRatio] = useState<number>(1);
|
||||
const [failed, setFailed] = useState(false);
|
||||
useEffect(() => {
|
||||
if (isSvg) return; // Image.getSize geht fuer SVG nicht
|
||||
let cancelled = false;
|
||||
Image.getSize(
|
||||
uri,
|
||||
(w, h) => { if (!cancelled && w > 0 && h > 0) setAspectRatio(Math.max(0.5, Math.min(2.5, w / h))); },
|
||||
() => { if (!cancelled) setFailed(true); },
|
||||
);
|
||||
return () => { cancelled = true; };
|
||||
}, [uri, isSvg]);
|
||||
if (failed) return null;
|
||||
if (isSvg) {
|
||||
return (
|
||||
<View style={{ marginTop: 8, width: 260, height: 260, backgroundColor: '#0D0D1A', borderRadius: 8, alignItems: 'center', justifyContent: 'center' }}>
|
||||
<SvgUri uri={uri} width="100%" height="100%" onError={() => setFailed(true)} />
|
||||
</View>
|
||||
);
|
||||
}
|
||||
return (
|
||||
<Text
|
||||
style={style}
|
||||
selectable
|
||||
// dataDetectorType ist Android-only und macht Phone/URL/Email zusaetzlich
|
||||
// ueber System-Detection klickbar — als Fallback falls unsere Regex-
|
||||
// Tokens nicht passen.
|
||||
dataDetectorType="all"
|
||||
>
|
||||
{segments.map((seg, i) => {
|
||||
if (seg.kind === 'text') {
|
||||
return <Text key={i} selectable>{seg.text}</Text>;
|
||||
}
|
||||
return (
|
||||
<Text
|
||||
key={i}
|
||||
selectable
|
||||
style={LINK_STYLE}
|
||||
onPress={() => onPress(seg)}
|
||||
// Long-Press soll an den Parent durch fuer Selection
|
||||
onLongPress={undefined}
|
||||
suppressHighlighting={false}
|
||||
>
|
||||
{seg.text}
|
||||
</Text>
|
||||
);
|
||||
})}
|
||||
</Text>
|
||||
<Image
|
||||
source={{ uri }}
|
||||
style={{ width: 260, aspectRatio, borderRadius: 8, marginTop: 8, backgroundColor: '#0D0D1A' }}
|
||||
resizeMode="cover"
|
||||
onError={() => setFailed(true)}
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
const MessageText: React.FC<Props> = ({ text, style }) => {
|
||||
const imageUrls = extractImageUrls(text || '');
|
||||
if (imageUrls.length === 0) {
|
||||
return (
|
||||
<Text style={style} selectable dataDetectorType="all">
|
||||
{text}
|
||||
</Text>
|
||||
);
|
||||
}
|
||||
return (
|
||||
<View>
|
||||
<Text style={style} selectable dataDetectorType="all">
|
||||
{text}
|
||||
</Text>
|
||||
{imageUrls.map(u => <InlineImage key={u} uri={u} />)}
|
||||
</View>
|
||||
);
|
||||
};
|
||||
|
||||
|
||||
@@ -44,7 +44,6 @@ const VoiceButton: React.FC<VoiceButtonProps> = ({
|
||||
const [meterDb, setMeterDb] = useState(-160);
|
||||
const pulseAnim = useRef(new Animated.Value(1)).current;
|
||||
const durationTimer = useRef<ReturnType<typeof setInterval> | null>(null);
|
||||
const isLongPress = useRef(false);
|
||||
|
||||
// Puls-Animation starten/stoppen
|
||||
useEffect(() => {
|
||||
@@ -117,31 +116,10 @@ const VoiceButton: React.FC<VoiceButtonProps> = ({
|
||||
if (disabled || isRecording) return;
|
||||
const started = await audioService.startRecording(true); // autoStop = true
|
||||
if (started) {
|
||||
isLongPress.current = false;
|
||||
setIsRecording(true);
|
||||
}
|
||||
}, [disabled, isRecording]);
|
||||
|
||||
// Push-to-Talk: Lang druecken
|
||||
const handlePressIn = async () => {
|
||||
if (disabled || isRecording) return;
|
||||
isLongPress.current = true;
|
||||
const started = await audioService.startRecording(false); // kein autoStop
|
||||
if (started) {
|
||||
setIsRecording(true);
|
||||
}
|
||||
};
|
||||
|
||||
const handlePressOut = async () => {
|
||||
if (!isRecording || !isLongPress.current) return;
|
||||
isLongPress.current = false;
|
||||
setIsRecording(false);
|
||||
const result = await audioService.stopRecording();
|
||||
if (result && result.durationMs > 300) {
|
||||
onRecordingComplete(result);
|
||||
}
|
||||
};
|
||||
|
||||
// Tap-to-Talk: Einmal tippen startet mit Auto-Stop.
|
||||
// Guard gegen Doppel-Tap während asyncer Start/Stop.
|
||||
const tapBusy = useRef(false);
|
||||
@@ -162,7 +140,6 @@ const VoiceButton: React.FC<VoiceButtonProps> = ({
|
||||
// Aufnahme mit Auto-Stop starten
|
||||
const started = await audioService.startRecording(true);
|
||||
if (started) {
|
||||
isLongPress.current = false;
|
||||
setIsRecording(true);
|
||||
}
|
||||
}
|
||||
@@ -201,10 +178,6 @@ const VoiceButton: React.FC<VoiceButtonProps> = ({
|
||||
isRecording && styles.buttonOuterRecording,
|
||||
{ transform: [{ scale: pulseAnim }] },
|
||||
]}
|
||||
onStartShouldSetResponder={() => true}
|
||||
onResponderGrant={handlePressIn}
|
||||
onResponderRelease={handlePressOut}
|
||||
onResponderTerminate={handlePressOut}
|
||||
>
|
||||
<TouchableOpacity
|
||||
activeOpacity={0.8}
|
||||
|
||||
@@ -0,0 +1,224 @@
|
||||
/**
|
||||
* ZoomableImage — Pinch-to-Zoom + Pan fuers Vollbild-Modal.
|
||||
*
|
||||
* Reine RN-Implementation, ohne react-native-gesture-handler.
|
||||
*
|
||||
* - 2 Finger: Pinch (Zoom 1x..5x) + simultaner Pan via Focal-Punkt
|
||||
* - 1 Finger: Pan wenn schon gezoomt (>1.02x)
|
||||
* - Doppel-Tap (<300ms zw. zwei Single-Taps): Toggle 1x ↔ 2.5x
|
||||
*
|
||||
* Implementierungs-Hinweise zur alten Version (warum's nicht ging):
|
||||
* - `gestureState.numberActiveTouches` ist nicht zuverlaessig direkt
|
||||
* nach onPanResponderGrant. Wir lesen Finger-Anzahl jetzt
|
||||
* ausschliesslich aus `e.nativeEvent.touches.length`.
|
||||
* - Beim Wechsel von 2 → 1 Fingern bleib die Pinch-Referenz haengen.
|
||||
* Jetzt: bei jedem Finger-Wechsel re-snapshotten wir die Geste.
|
||||
* - Animated.Image bekommt jetzt pointerEvents="none" damit der View
|
||||
* GARANTIERT die Touches abbekommt.
|
||||
* - useNativeDriver ist bewusst AUS — sonst koennen wir setValue()
|
||||
* nicht synchron mit dem Pan-Responder zusammen nutzen.
|
||||
*/
|
||||
|
||||
import React, { useMemo, useRef } from 'react';
|
||||
import {
|
||||
Animated,
|
||||
PanResponder,
|
||||
GestureResponderEvent,
|
||||
ImageStyle,
|
||||
StyleProp,
|
||||
StyleSheet,
|
||||
View,
|
||||
} from 'react-native';
|
||||
|
||||
interface Props {
|
||||
uri: string;
|
||||
containerWidth: number;
|
||||
containerHeight: number;
|
||||
style?: StyleProp<ImageStyle>;
|
||||
}
|
||||
|
||||
const MIN_SCALE = 1;
|
||||
const MAX_SCALE = 5;
|
||||
const DOUBLE_TAP_MS = 300;
|
||||
const DOUBLE_TAP_DIST = 30; // Bewegung max. damit ein Tap als Tap gilt
|
||||
const PAN_SLOP_AT_SCALE_1 = 4; // Mikro-Movement nicht als Pan werten
|
||||
|
||||
const ZoomableImage: React.FC<Props> = ({ uri, containerWidth, containerHeight, style }) => {
|
||||
// Animated-Werte fuer die Render-Transformation
|
||||
const scale = useRef(new Animated.Value(1)).current;
|
||||
const tx = useRef(new Animated.Value(0)).current;
|
||||
const ty = useRef(new Animated.Value(0)).current;
|
||||
|
||||
// Logische Zustaende — wir lesen Animated.Value nicht zurueck (waere async)
|
||||
const view = useRef({ scale: 1, x: 0, y: 0 }).current;
|
||||
|
||||
// Geste-Snapshot: was war zu Beginn dieser Geste-Phase
|
||||
const gesture = useRef({
|
||||
fingers: 0, // aktuelle Finger-Anzahl
|
||||
startScale: 1,
|
||||
startX: 0,
|
||||
startY: 0,
|
||||
startDist: 0, // Pinch-Referenz-Distanz
|
||||
startFocalX: 0,
|
||||
startFocalY: 0,
|
||||
movedSinceTouch: 0, // fuer Tap-Erkennung
|
||||
touchStartedAt: 0,
|
||||
touchStartX: 0,
|
||||
touchStartY: 0,
|
||||
}).current;
|
||||
|
||||
// Doppel-Tap
|
||||
const lastTap = useRef({ at: 0, x: 0, y: 0 });
|
||||
|
||||
const clamp = (v: number, lo: number, hi: number) => Math.max(lo, Math.min(hi, v));
|
||||
|
||||
const applyClamped = (s: number, x: number, y: number) => {
|
||||
const ns = clamp(s, MIN_SCALE, MAX_SCALE);
|
||||
// Translation auf das verfuegbare Volumen begrenzen
|
||||
const maxX = Math.max(0, (containerWidth * ns - containerWidth) / 2);
|
||||
const maxY = Math.max(0, (containerHeight * ns - containerHeight) / 2);
|
||||
const nx = clamp(x, -maxX, maxX);
|
||||
const ny = clamp(y, -maxY, maxY);
|
||||
view.scale = ns;
|
||||
view.x = nx;
|
||||
view.y = ny;
|
||||
scale.setValue(ns);
|
||||
tx.setValue(nx);
|
||||
ty.setValue(ny);
|
||||
};
|
||||
|
||||
const distance = (touches: any[]) =>
|
||||
Math.hypot(touches[0].pageX - touches[1].pageX, touches[0].pageY - touches[1].pageY);
|
||||
|
||||
const focal = (touches: any[]) => ({
|
||||
x: (touches[0].pageX + touches[1].pageX) / 2,
|
||||
y: (touches[0].pageY + touches[1].pageY) / 2,
|
||||
});
|
||||
|
||||
// Snapshot vor jedem Phasenwechsel (1↔2 Finger) — verhindert Spruenge
|
||||
const snapshot = (touches: any[]) => {
|
||||
gesture.startScale = view.scale;
|
||||
gesture.startX = view.x;
|
||||
gesture.startY = view.y;
|
||||
if (touches.length >= 2) {
|
||||
gesture.startDist = distance(touches);
|
||||
const f = focal(touches);
|
||||
gesture.startFocalX = f.x;
|
||||
gesture.startFocalY = f.y;
|
||||
} else if (touches.length === 1) {
|
||||
gesture.startDist = 0;
|
||||
gesture.startFocalX = touches[0].pageX;
|
||||
gesture.startFocalY = touches[0].pageY;
|
||||
}
|
||||
};
|
||||
|
||||
const responder = useMemo(
|
||||
() =>
|
||||
PanResponder.create({
|
||||
onStartShouldSetPanResponder: () => true,
|
||||
onStartShouldSetPanResponderCapture: () => true,
|
||||
onMoveShouldSetPanResponder: () => true,
|
||||
onMoveShouldSetPanResponderCapture: () => true,
|
||||
|
||||
onPanResponderGrant: (e: GestureResponderEvent) => {
|
||||
const touches = e.nativeEvent.touches as any[];
|
||||
gesture.fingers = touches.length;
|
||||
gesture.movedSinceTouch = 0;
|
||||
gesture.touchStartedAt = Date.now();
|
||||
gesture.touchStartX = touches[0]?.pageX ?? 0;
|
||||
gesture.touchStartY = touches[0]?.pageY ?? 0;
|
||||
snapshot(touches);
|
||||
},
|
||||
|
||||
onPanResponderMove: (e: GestureResponderEvent, _gs) => {
|
||||
const touches = e.nativeEvent.touches as any[];
|
||||
|
||||
// Phasenwechsel? → Re-Snapshot, damit nicht gesprungen wird
|
||||
if (touches.length !== gesture.fingers) {
|
||||
gesture.fingers = touches.length;
|
||||
snapshot(touches);
|
||||
return;
|
||||
}
|
||||
|
||||
gesture.movedSinceTouch += 1;
|
||||
|
||||
if (touches.length >= 2) {
|
||||
// Pinch + Pan via Focal
|
||||
const d = distance(touches);
|
||||
if (gesture.startDist === 0) {
|
||||
// Sicherheitsnetz falls Snapshot gemissed wurde
|
||||
snapshot(touches);
|
||||
return;
|
||||
}
|
||||
const factor = d / gesture.startDist;
|
||||
const f = focal(touches);
|
||||
const newScale = clamp(gesture.startScale * factor, MIN_SCALE, MAX_SCALE);
|
||||
// Focal-basierter Pan: zoomt um den Mittelpunkt der zwei Finger
|
||||
const newX = gesture.startX + (f.x - gesture.startFocalX);
|
||||
const newY = gesture.startY + (f.y - gesture.startFocalY);
|
||||
applyClamped(newScale, newX, newY);
|
||||
} else if (touches.length === 1 && view.scale > 1.02) {
|
||||
const dx = touches[0].pageX - gesture.startFocalX;
|
||||
const dy = touches[0].pageY - gesture.startFocalY;
|
||||
if (Math.abs(dx) < PAN_SLOP_AT_SCALE_1 && Math.abs(dy) < PAN_SLOP_AT_SCALE_1) return;
|
||||
applyClamped(view.scale, gesture.startX + dx, gesture.startY + dy);
|
||||
}
|
||||
},
|
||||
|
||||
onPanResponderRelease: (e: GestureResponderEvent) => {
|
||||
const elapsed = Date.now() - gesture.touchStartedAt;
|
||||
const dx = (e.nativeEvent.changedTouches?.[0]?.pageX ?? gesture.touchStartX) - gesture.touchStartX;
|
||||
const dy = (e.nativeEvent.changedTouches?.[0]?.pageY ?? gesture.touchStartY) - gesture.touchStartY;
|
||||
const wasTap =
|
||||
elapsed < 280 &&
|
||||
Math.abs(dx) < DOUBLE_TAP_DIST &&
|
||||
Math.abs(dy) < DOUBLE_TAP_DIST;
|
||||
if (wasTap) {
|
||||
const now = Date.now();
|
||||
if (now - lastTap.current.at < DOUBLE_TAP_MS) {
|
||||
// Doppel-Tap → Zoom-Toggle
|
||||
if (view.scale > 1.1) {
|
||||
applyClamped(1, 0, 0);
|
||||
} else {
|
||||
applyClamped(2.5, 0, 0);
|
||||
}
|
||||
lastTap.current = { at: 0, x: 0, y: 0 };
|
||||
} else {
|
||||
lastTap.current = { at: now, x: gesture.touchStartX, y: gesture.touchStartY };
|
||||
}
|
||||
}
|
||||
gesture.fingers = 0;
|
||||
gesture.startDist = 0;
|
||||
},
|
||||
|
||||
onPanResponderTerminate: () => {
|
||||
gesture.fingers = 0;
|
||||
gesture.startDist = 0;
|
||||
},
|
||||
}),
|
||||
[],
|
||||
);
|
||||
|
||||
return (
|
||||
<View
|
||||
style={StyleSheet.absoluteFill}
|
||||
collapsable={false}
|
||||
{...responder.panHandlers}
|
||||
>
|
||||
<Animated.View pointerEvents="none" style={StyleSheet.absoluteFill}>
|
||||
<Animated.Image
|
||||
source={{ uri }}
|
||||
style={[
|
||||
style,
|
||||
{
|
||||
transform: [{ translateX: tx }, { translateY: ty }, { scale }],
|
||||
},
|
||||
]}
|
||||
resizeMode="contain"
|
||||
/>
|
||||
</Animated.View>
|
||||
</View>
|
||||
);
|
||||
};
|
||||
|
||||
export default ZoomableImage;
|
||||
@@ -19,12 +19,24 @@ import {
|
||||
ScrollView,
|
||||
Modal,
|
||||
ToastAndroid,
|
||||
AppState,
|
||||
NativeModules,
|
||||
Alert,
|
||||
} from 'react-native';
|
||||
import AsyncStorage from '@react-native-async-storage/async-storage';
|
||||
import RNFS from 'react-native-fs';
|
||||
import { SvgUri } from 'react-native-svg';
|
||||
import { Dimensions } from 'react-native';
|
||||
import ZoomableImage from '../components/ZoomableImage';
|
||||
import rvs, { RVSMessage, ConnectionState } from '../services/rvs';
|
||||
import audioService from '../services/audio';
|
||||
import wakeWordService from '../services/wakeword';
|
||||
import phoneCallService from '../services/phoneCall';
|
||||
import { playWakeReadySound } from '../services/wakeReadySound';
|
||||
import {
|
||||
acquireBackgroundAudio,
|
||||
releaseBackgroundAudio,
|
||||
} from '../services/backgroundAudio';
|
||||
import updateService from '../services/updater';
|
||||
import VoiceButton from '../components/VoiceButton';
|
||||
import FileUpload, { FileData } from '../components/FileUpload';
|
||||
@@ -42,6 +54,7 @@ interface Attachment {
|
||||
uri?: string; // Lokaler Pfad (file://) fuer Anzeige
|
||||
mimeType?: string;
|
||||
serverPath?: string; // Pfad auf dem Server (/shared/uploads/...) fuer Re-Download
|
||||
deleted?: boolean; // Datei wurde nachtraeglich geloescht (Diagnostic-Manager)
|
||||
}
|
||||
|
||||
interface ChatMessage {
|
||||
@@ -54,6 +67,18 @@ interface ChatMessage {
|
||||
messageId?: string;
|
||||
/** Lokaler Pfad zur gecachten TTS-Audio-Datei (file://...) */
|
||||
audioPath?: string;
|
||||
/** Korrelations-ID fuer Sprachnachrichten — wird mit dem STT-Result zurueck-
|
||||
* gespiegelt damit wir die EXAKT richtige Placeholder-Bubble ersetzen,
|
||||
* auch wenn mehrere Aufnahmen parallel offen sind. */
|
||||
audioRequestId?: string;
|
||||
/** Skill-Created-Bubble: ARIA hat einen neuen Skill angelegt */
|
||||
skillCreated?: {
|
||||
name: string;
|
||||
description: string;
|
||||
execution: string;
|
||||
active: boolean;
|
||||
setupError?: string;
|
||||
};
|
||||
}
|
||||
|
||||
// --- Konstanten ---
|
||||
@@ -69,6 +94,73 @@ const capMessages = (msgs: ChatMessage[]): ChatMessage[] =>
|
||||
const DEFAULT_ATTACHMENT_DIR = `${RNFS.DocumentDirectoryPath}/chat_attachments`;
|
||||
const STORAGE_PATH_KEY = 'aria_attachment_storage_path';
|
||||
|
||||
const { FileOpener } = NativeModules as {
|
||||
FileOpener?: { open: (filePath: string, mimeType: string) => Promise<boolean> };
|
||||
};
|
||||
|
||||
/** Datei mit Android-Intent-Picker oeffnen (System waehlt App nach MIME). */
|
||||
async function openFileWithIntent(filePath: string, mimeType: string): Promise<void> {
|
||||
if (!FileOpener) {
|
||||
ToastAndroid.show('FileOpener Native Module fehlt', ToastAndroid.SHORT);
|
||||
return;
|
||||
}
|
||||
try {
|
||||
await FileOpener.open(filePath, mimeType || 'application/octet-stream');
|
||||
} catch (err: any) {
|
||||
ToastAndroid.show(`Oeffnen fehlgeschlagen: ${err?.message || err}`, ToastAndroid.LONG);
|
||||
}
|
||||
}
|
||||
|
||||
/** Image-Vorschau in der Chat-Bubble. Misst die echte Bild-Dimension via
|
||||
* Image.getSize + setzt aspectRatio dynamisch — dadurch passt sich die
|
||||
* Bubble ans Bild an (kein "Strich" mehr bei breiten oder hohen Bildern). */
|
||||
const CHAT_IMAGE_STYLE = {
|
||||
width: 260,
|
||||
borderRadius: 8,
|
||||
marginBottom: 6,
|
||||
backgroundColor: '#0D0D1A',
|
||||
} as const;
|
||||
const ChatImage: React.FC<{
|
||||
uri: string;
|
||||
onPress: () => void;
|
||||
onError: () => void;
|
||||
}> = ({ uri, onPress, onError }) => {
|
||||
const [aspectRatio, setAspectRatio] = useState<number>(4 / 3);
|
||||
const isSvg = /\.svg(?:\?|$)/i.test(uri);
|
||||
useEffect(() => {
|
||||
if (isSvg) return; // SvgUri hat kein getSize
|
||||
let cancelled = false;
|
||||
Image.getSize(uri, (w, h) => {
|
||||
if (!cancelled && w > 0 && h > 0) {
|
||||
// Aspect-Ratio capen damit sehr lange Panorama-Bilder oder hohe
|
||||
// Screenshot-Streifen die Bubble nicht sprengen
|
||||
const r = Math.max(0.5, Math.min(2.5, w / h));
|
||||
setAspectRatio(r);
|
||||
}
|
||||
}, () => {});
|
||||
return () => { cancelled = true; };
|
||||
}, [uri, isSvg]);
|
||||
if (isSvg) {
|
||||
return (
|
||||
<TouchableOpacity onPress={onPress} activeOpacity={0.8}>
|
||||
<View style={[CHAT_IMAGE_STYLE, { height: 260, alignItems: 'center', justifyContent: 'center' }]}>
|
||||
<SvgUri uri={uri} width="100%" height="100%" onError={onError} />
|
||||
</View>
|
||||
</TouchableOpacity>
|
||||
);
|
||||
}
|
||||
return (
|
||||
<TouchableOpacity onPress={onPress} activeOpacity={0.8}>
|
||||
<Image
|
||||
source={{ uri }}
|
||||
style={[CHAT_IMAGE_STYLE, { aspectRatio }]}
|
||||
resizeMode="cover"
|
||||
onError={onError}
|
||||
/>
|
||||
</TouchableOpacity>
|
||||
);
|
||||
};
|
||||
|
||||
async function getAttachmentDir(): Promise<string> {
|
||||
try {
|
||||
const saved = await AsyncStorage.getItem(STORAGE_PATH_KEY);
|
||||
@@ -129,6 +221,10 @@ const ChatScreen: React.FC = () => {
|
||||
|
||||
const flatListRef = useRef<FlatList>(null);
|
||||
const messageIdCounter = useRef(0);
|
||||
// ServerPaths fuer die der User auf "oeffnen" geklickt hat — beim
|
||||
// file_response wird die Datei nach dem Speichern direkt mit dem System-
|
||||
// Intent geoeffnet (PDF-Viewer, Galerie, etc.).
|
||||
const autoOpenPaths = useRef<Set<string>>(new Set());
|
||||
|
||||
// Eindeutige Message-ID generieren
|
||||
const nextId = (): string => {
|
||||
@@ -136,20 +232,24 @@ const ChatScreen: React.FC = () => {
|
||||
return `msg_${Date.now()}_${messageIdCounter.current}`;
|
||||
};
|
||||
|
||||
// TTS-Settings beim Mount + bei Screen-Fokus neu laden (damit Settings-Toggle sofort greift)
|
||||
// TTS- + GPS-Settings beim Mount + alle 2s neu laden (damit Settings-Toggle
|
||||
// sofort greift, ohne Context- oder Event-System)
|
||||
useEffect(() => {
|
||||
const loadTtsSettings = async () => {
|
||||
const loadSettings = async () => {
|
||||
const enabled = await AsyncStorage.getItem('aria_tts_enabled');
|
||||
setTtsDeviceEnabled(enabled !== 'false'); // default true
|
||||
const muted = await AsyncStorage.getItem('aria_tts_muted');
|
||||
setTtsMuted(muted === 'true'); // default false
|
||||
const isMuted = muted === 'true';
|
||||
setTtsMuted(isMuted); // default false
|
||||
audioService.setMuted(isMuted); // service-internen Flag synchronisieren
|
||||
const voice = await AsyncStorage.getItem('aria_xtts_voice');
|
||||
localXttsVoiceRef.current = voice || '';
|
||||
ttsSpeedRef.current = await loadTtsSpeed();
|
||||
const gps = await AsyncStorage.getItem('aria_gps_enabled');
|
||||
setGpsEnabled(gps === 'true');
|
||||
};
|
||||
loadTtsSettings();
|
||||
// Poll alle 2s um Settings-Aenderung mitzubekommen (einfache Loesung ohne Context)
|
||||
const interval = setInterval(loadTtsSettings, 2000);
|
||||
loadSettings();
|
||||
const interval = setInterval(loadSettings, 2000);
|
||||
return () => clearInterval(interval);
|
||||
}, []);
|
||||
|
||||
@@ -159,6 +259,49 @@ const ChatScreen: React.FC = () => {
|
||||
const unsub = wakeWordService.onStateChange((s) => {
|
||||
setWakeWordState(s);
|
||||
setWakeWordActive(s !== 'off');
|
||||
// Conversation-Focus an Wake-Word-State koppeln: solange wir aktiv im
|
||||
// Dialog sind, soll Spotify dauerhaft gepaust bleiben (auch ueber
|
||||
// Render-Pausen + zwischen Antworten hinweg). Sobald wir zurueck nach
|
||||
// 'armed' oder 'off' fallen, darf Spotify wieder.
|
||||
if (s === 'conversing') audioService.acquireConversationFocus();
|
||||
else audioService.releaseConversationFocus();
|
||||
// Foreground-Service-Slot 'wake' — solange das Ohr ueberhaupt aktiv ist
|
||||
// (armed oder conversing), soll der App-Prozess im Hintergrund am Leben
|
||||
// bleiben damit Mikro-Lauschen + Aufnahme weiterlaufen.
|
||||
if (s !== 'off') acquireBackgroundAudio('wake').catch(() => {});
|
||||
else releaseBackgroundAudio('wake').catch(() => {});
|
||||
});
|
||||
return () => unsub();
|
||||
}, []);
|
||||
|
||||
// Anruf-Erkennung: TTS pausieren wenn das Telefon klingelt
|
||||
useEffect(() => {
|
||||
phoneCallService.start().catch(err =>
|
||||
console.warn('[Chat] phoneCall.start fehlgeschlagen', err));
|
||||
return () => { phoneCallService.stop().catch(() => {}); };
|
||||
}, []);
|
||||
|
||||
// App-Resume: kurzer Wake-Word-Cooldown — beim Wechsel Background→Foreground
|
||||
// gibt's haeufig Audio-Pegel-Spikes (AudioFocus-Switch, AudioTrack re-route)
|
||||
// die openWakeWord sonst faelschlich als Wake-Word interpretiert.
|
||||
useEffect(() => {
|
||||
let lastState: string = AppState.currentState;
|
||||
const sub = AppState.addEventListener('change', (next) => {
|
||||
if (lastState !== 'active' && next === 'active') {
|
||||
wakeWordService.setResumeCooldown(1500);
|
||||
}
|
||||
lastState = next;
|
||||
});
|
||||
return () => sub.remove();
|
||||
}, []);
|
||||
|
||||
// Recording-State an Background-Service-Slot 'rec' koppeln — damit das Mikro
|
||||
// auch im Hintergrund weiter aufnehmen darf (Android killt den App-Prozess
|
||||
// sonst und die Aufnahme bricht ab).
|
||||
useEffect(() => {
|
||||
const unsub = audioService.onStateChange((s) => {
|
||||
if (s === 'recording') acquireBackgroundAudio('rec').catch(() => {});
|
||||
else releaseBackgroundAudio('rec').catch(() => {});
|
||||
});
|
||||
return () => unsub();
|
||||
}, []);
|
||||
@@ -173,11 +316,15 @@ const ChatScreen: React.FC = () => {
|
||||
setTtsMuted(prev => {
|
||||
const next = !prev;
|
||||
AsyncStorage.setItem('aria_tts_muted', String(next));
|
||||
// Bei Muten sofort laufende Wiedergabe stoppen
|
||||
if (next) audioService.stopPlayback();
|
||||
// Ref synchron updaten — sonst kommen noch Chunks im selben Tick
|
||||
// mit canPlay=true durch (Race vor dem useEffect-Update).
|
||||
ttsCanPlayRef.current = ttsDeviceEnabled && !next;
|
||||
// Globalen Mute-Flag im audioService setzen — uebersteuert auch
|
||||
// payload.silent in handlePcmChunk und stoppt laufende Wiedergabe.
|
||||
audioService.setMuted(next);
|
||||
return next;
|
||||
});
|
||||
}, []);
|
||||
}, [ttsDeviceEnabled]);
|
||||
|
||||
// Chat-Verlauf aus AsyncStorage laden
|
||||
const isInitialLoad = useRef(true);
|
||||
@@ -248,11 +395,67 @@ const ChatScreen: React.FC = () => {
|
||||
return;
|
||||
}
|
||||
|
||||
// skill_created: ARIA hat einen neuen Skill angelegt → eigene Bubble
|
||||
if (message.type === 'skill_created') {
|
||||
const p = (message.payload || {}) as any;
|
||||
const skillMsg: ChatMessage = {
|
||||
id: nextId(),
|
||||
sender: 'aria',
|
||||
text: '',
|
||||
timestamp: Date.now(),
|
||||
skillCreated: {
|
||||
name: String(p.name || '(unbenannt)'),
|
||||
description: String(p.description || ''),
|
||||
execution: String(p.execution || 'bash'),
|
||||
active: p.active !== false,
|
||||
setupError: p.setup_error ? String(p.setup_error) : undefined,
|
||||
},
|
||||
};
|
||||
setMessages(prev => capMessages([...prev, skillMsg]));
|
||||
return;
|
||||
}
|
||||
|
||||
// file_deleted: Datei wurde geloescht (vom Diagnostic User) → Bubble updaten
|
||||
if (message.type === 'file_deleted') {
|
||||
const p = (message.payload?.path as string) || '';
|
||||
if (!p) return;
|
||||
setMessages(prev => prev.map(m => ({
|
||||
...m,
|
||||
attachments: m.attachments?.map(a =>
|
||||
a.serverPath === p ? { ...a, deleted: true } : a
|
||||
),
|
||||
})));
|
||||
return;
|
||||
}
|
||||
|
||||
// file_list_response: wird vom Datei-Manager im SettingsScreen verarbeitet.
|
||||
|
||||
// file_from_aria: ARIA hat eine Datei rausgegeben → als ARIA-Bubble anzeigen
|
||||
if (message.type === 'file_from_aria') {
|
||||
const p = message.payload || {};
|
||||
const ariaMsg: ChatMessage = {
|
||||
id: nextId(),
|
||||
sender: 'aria',
|
||||
text: '',
|
||||
timestamp: Date.now(),
|
||||
attachments: [{
|
||||
type: (typeof p.mimeType === 'string' && p.mimeType.startsWith('image/')) ? 'image' : 'file',
|
||||
name: (p.name as string) || 'datei',
|
||||
size: (p.size as number) || 0,
|
||||
mimeType: (p.mimeType as string) || '',
|
||||
serverPath: (p.serverPath as string) || '',
|
||||
}],
|
||||
};
|
||||
setMessages(prev => capMessages([...prev, ariaMsg]));
|
||||
return;
|
||||
}
|
||||
|
||||
// file_response: Re-Download von Server — lokal speichern
|
||||
if (message.type === 'file_response') {
|
||||
const reqId = (message.payload.requestId as string) || '';
|
||||
const b64 = (message.payload.base64 as string) || '';
|
||||
const serverPath = (message.payload.serverPath as string) || '';
|
||||
const mimeType = (message.payload.mimeType as string) || '';
|
||||
if (b64 && reqId) {
|
||||
const fileName = (message.payload.name as string) || 'download';
|
||||
persistAttachment(b64, reqId, fileName).then(filePath => {
|
||||
@@ -262,6 +465,11 @@ const ChatScreen: React.FC = () => {
|
||||
a.serverPath === serverPath ? { ...a, uri: filePath } : a
|
||||
),
|
||||
})));
|
||||
// Wenn der User dieses File explizit oeffnen wollte → Intent-Picker
|
||||
if (serverPath && autoOpenPaths.current.has(serverPath)) {
|
||||
autoOpenPaths.current.delete(serverPath);
|
||||
openFileWithIntent(filePath.replace(/^file:\/\//, ''), mimeType);
|
||||
}
|
||||
}).catch(() => {});
|
||||
}
|
||||
return;
|
||||
@@ -269,6 +477,8 @@ const ChatScreen: React.FC = () => {
|
||||
|
||||
if (message.type === 'chat') {
|
||||
const sender = (message.payload.sender as string) || '';
|
||||
const dbgText = ((message.payload.text as string) || '').slice(0, 60);
|
||||
console.log('[Chat] chat-event sender=%s text=%s', sender || '(none)', dbgText);
|
||||
|
||||
// STT-Ergebnis: Transkribierten Text in die Sprach-Bubble schreiben.
|
||||
// WICHTIG: Nur die ERSTE noch unaufgeloeste Aufnahme matchen — sonst
|
||||
@@ -276,17 +486,42 @@ const ChatScreen: React.FC = () => {
|
||||
// den gleichen Text bekommen (Bug: zweite Antwort ueberschreibt erste).
|
||||
if (sender === 'stt') {
|
||||
const sttText = (message.payload.text as string) || '';
|
||||
if (sttText) {
|
||||
setMessages(prev => {
|
||||
const idx = prev.findIndex(m =>
|
||||
m.sender === 'user' && m.text.includes('Spracheingabe wird verarbeitet')
|
||||
);
|
||||
if (idx < 0) return prev;
|
||||
const next = prev.slice();
|
||||
next[idx] = { ...next[idx], text: `\uD83C\uDFA4 ${sttText}` };
|
||||
return next;
|
||||
});
|
||||
const sttAudioReqId = (message.payload.audioRequestId as string) || '';
|
||||
if (!sttText) {
|
||||
return;
|
||||
}
|
||||
setMessages(prev => {
|
||||
const newText = `\uD83C\uDFA4 ${sttText}`;
|
||||
// Primaer: matche per audioRequestId (eindeutig pro Aufnahme).
|
||||
// So gibt's keine Verwechslung wenn zwei Audios kurz hintereinander
|
||||
// gesendet wurden und ihre STT-Results ueberlappen.
|
||||
if (sttAudioReqId) {
|
||||
const idxById = prev.findIndex(m => m.audioRequestId === sttAudioReqId);
|
||||
if (idxById >= 0) {
|
||||
const next = prev.slice();
|
||||
next[idxById] = { ...next[idxById], text: newText };
|
||||
return next;
|
||||
}
|
||||
}
|
||||
// Fallback: alte Bridge-Version ohne audioRequestId \u2014 match per Substring,
|
||||
// nimmt die ERSTE noch unaufgeloeste Placeholder.
|
||||
const idx = prev.findIndex(m =>
|
||||
m.sender === 'user' && m.text.includes('Spracheingabe wird verarbeitet')
|
||||
);
|
||||
if (idx >= 0) {
|
||||
const next = prev.slice();
|
||||
next[idx] = { ...next[idx], text: newText };
|
||||
return next;
|
||||
}
|
||||
// Letzter Fallback: gar keine Placeholder \u2192 neue Bubble einfuegen
|
||||
return capMessages([...prev, {
|
||||
id: nextId(),
|
||||
sender: 'user',
|
||||
text: newText,
|
||||
timestamp: message.timestamp,
|
||||
attachments: [{ type: 'audio', name: 'Sprachaufnahme' }],
|
||||
}]);
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -367,6 +602,8 @@ const ChatScreen: React.FC = () => {
|
||||
const activity = (message.payload.activity as string) || 'idle';
|
||||
const tool = (message.payload.tool as string) || '';
|
||||
setAgentActivity({ activity, tool });
|
||||
// Spotify darf waehrend "ARIA denkt/schreibt" weiterspielen — pausiert
|
||||
// nur wenn TTS startet (dann acquired _firePlaybackStarted den Focus).
|
||||
}
|
||||
|
||||
// Voice-Config aus Diagnostic — setzt die lokale App-Stimme auf den
|
||||
@@ -448,7 +685,14 @@ const ChatScreen: React.FC = () => {
|
||||
// Conversation-Window: User hat X Sekunden um anzufangen, sonst Konversation aus
|
||||
const windowMs = await loadConvWindowMs();
|
||||
const started = await audioService.startRecording(true, windowMs);
|
||||
if (!started) {
|
||||
if (started) {
|
||||
// Erst JETZT signalisieren dass das Mikro wirklich offen ist —
|
||||
// vorher war's noch in der Init-Phase. So weiss der User exakt
|
||||
// ab wann er reden kann. "Bereit"-Sound (Ding-Dong) ist optional
|
||||
// ueber Settings → Wake-Word abschaltbar.
|
||||
ToastAndroid.show('🎤 Mikro offen — sprich jetzt', ToastAndroid.SHORT);
|
||||
playWakeReadySound().catch(() => {});
|
||||
} else {
|
||||
// Mikrofon nicht verfuegbar, naechsten Versuch
|
||||
wakeWordService.resume();
|
||||
}
|
||||
@@ -459,13 +703,17 @@ const ChatScreen: React.FC = () => {
|
||||
const result = await audioService.stopRecording();
|
||||
if (result && result.durationMs > 500) {
|
||||
// User hat im Fenster gesprochen → Sprachnachricht senden
|
||||
// Barge-In: laufende ARIA-Aktivitaet abbrechen wenn welche da ist.
|
||||
const wasInterrupted = interruptAriaIfBusy();
|
||||
const location = await getCurrentLocation();
|
||||
const audioRequestId = `audio_${Date.now()}_${Math.floor(Math.random() * 100000)}`;
|
||||
const userMsg: ChatMessage = {
|
||||
id: nextId(),
|
||||
sender: 'user',
|
||||
text: '🎙 Spracheingabe wird verarbeitet...',
|
||||
timestamp: Date.now(),
|
||||
attachments: [{ type: 'audio', name: 'Sprachaufnahme' }],
|
||||
audioRequestId,
|
||||
};
|
||||
setMessages(prev => capMessages([...prev, userMsg]));
|
||||
rvs.send('audio', {
|
||||
@@ -474,8 +722,11 @@ const ChatScreen: React.FC = () => {
|
||||
mimeType: result.mimeType,
|
||||
voice: localXttsVoiceRef.current,
|
||||
speed: ttsSpeedRef.current,
|
||||
interrupted: wasInterrupted,
|
||||
audioRequestId,
|
||||
...(location && { location }),
|
||||
});
|
||||
scheduleStaleAudioCleanup(audioRequestId, result.durationMs);
|
||||
// resume() wird durch onPlaybackFinished nach ARIAs Antwort getriggert.
|
||||
} else {
|
||||
// Kein Speech im Window → Konversation beenden (Ohr geht aus oder
|
||||
@@ -486,9 +737,47 @@ const ChatScreen: React.FC = () => {
|
||||
}
|
||||
});
|
||||
|
||||
// Barge-In via Wake-Word: User sagt "Computer" waehrend ARIA spricht.
|
||||
// Wake-Word-Service hat bei TTS-Start parallel zu lauschen begonnen
|
||||
// (mit AcousticEchoCanceler damit ARIAs eigene Stimme nicht triggert).
|
||||
const unsubBarge = wakeWordService.onBargeIn(async () => {
|
||||
console.log('[Chat] Barge-In via Wake-Word — TTS abbrechen + neue Aufnahme');
|
||||
audioService.haltAllPlayback('barge-in via wake-word');
|
||||
setAgentActivity({ activity: 'idle', tool: '' });
|
||||
rvs.send('cancel_request' as any, {});
|
||||
// Kurze Pause damit halt durchgreift, dann neue Aufnahme starten
|
||||
await new Promise(r => setTimeout(r, 150));
|
||||
const windowMs = await loadConvWindowMs();
|
||||
const started = await audioService.startRecording(true, windowMs);
|
||||
if (started) {
|
||||
ToastAndroid.show('🎤 Mikro offen — sprich jetzt', ToastAndroid.SHORT);
|
||||
playWakeReadySound().catch(() => {});
|
||||
}
|
||||
});
|
||||
|
||||
// TTS-Lifecycle: solange ARIA spricht und Wake-Word verfuegbar ist,
|
||||
// parallel mitlauschen — User kann "Computer" sagen statt manuell tappen.
|
||||
// PLUS: Foreground-Service-Slot 'tts' belegen damit Android den App-
|
||||
// Prozess nicht killt wenn die App im Hintergrund ist.
|
||||
const unsubTtsStart = audioService.onPlaybackStarted(() => {
|
||||
acquireBackgroundAudio('tts').catch(() => {});
|
||||
if (wakeWordService.isConversing() && wakeWordService.hasWakeWord()) {
|
||||
wakeWordService.startBargeListening().catch(() => {});
|
||||
}
|
||||
});
|
||||
const unsubTtsEnd = audioService.onPlaybackFinished(() => {
|
||||
releaseBackgroundAudio('tts').catch(() => {});
|
||||
// Vor naechster Aufnahme: barge-listening aus damit der AudioRecorder
|
||||
// das Mikro greifen kann.
|
||||
wakeWordService.stopBargeListening().catch(() => {});
|
||||
});
|
||||
|
||||
return () => {
|
||||
unsubWake();
|
||||
unsubSilence();
|
||||
unsubBarge();
|
||||
unsubTtsStart();
|
||||
unsubTtsEnd();
|
||||
};
|
||||
}, [wakeWordActive]);
|
||||
|
||||
@@ -543,17 +832,23 @@ const ChatScreen: React.FC = () => {
|
||||
|
||||
// GPS-Position holen (optional)
|
||||
const getCurrentLocation = useCallback((): Promise<{ lat: number; lon: number } | null> => {
|
||||
if (!gpsEnabled) return Promise.resolve(null);
|
||||
if (!gpsEnabled) {
|
||||
console.log('[GPS] gpsEnabled=false → kein Standort');
|
||||
return Promise.resolve(null);
|
||||
}
|
||||
|
||||
return new Promise((resolve) => {
|
||||
Geolocation.getCurrentPosition(
|
||||
(position) => {
|
||||
resolve({
|
||||
const loc = {
|
||||
lat: position.coords.latitude,
|
||||
lon: position.coords.longitude,
|
||||
});
|
||||
};
|
||||
console.log('[GPS] Position: lat=%s lon=%s', loc.lat, loc.lon);
|
||||
resolve(loc);
|
||||
},
|
||||
(_error) => {
|
||||
(error) => {
|
||||
console.warn('[GPS] getCurrentPosition Fehler:', error?.code, error?.message);
|
||||
resolve(null);
|
||||
},
|
||||
{ enableHighAccuracy: false, timeout: 5000 },
|
||||
@@ -563,6 +858,29 @@ const ChatScreen: React.FC = () => {
|
||||
|
||||
// --- Nachricht senden ---
|
||||
|
||||
// Aufraeumen von "verarbeitet"-Placeholder die nie ein STT-Result bekommen
|
||||
// haben (leere Aufnahme, Wake-Word-Echo, STT-Fehler etc). Timeout skaliert
|
||||
// mit der Aufnahmedauer — Whisper braucht auf der Gamebox grob real-time/5,
|
||||
// plus Bridge-Roundtrip + Network. Formel: 60s Buffer + 1x Aufnahmedauer.
|
||||
// Bei 5min Aufnahme = 6 min Wait, bei 5s Aufnahme = 65s. Sicher genug damit
|
||||
// langsame STTs nicht versehentlich aufgeraeumt werden.
|
||||
const scheduleStaleAudioCleanup = useCallback((audioRequestId: string, recordingMs: number) => {
|
||||
const timeoutMs = 60000 + recordingMs;
|
||||
setTimeout(() => {
|
||||
setMessages(prev => {
|
||||
const idx = prev.findIndex(m =>
|
||||
m.audioRequestId === audioRequestId &&
|
||||
m.text.includes('Spracheingabe wird verarbeitet')
|
||||
);
|
||||
if (idx < 0) return prev;
|
||||
console.log('[Chat] Sprachnachricht ohne STT-Result nach %dms entfernt: %s',
|
||||
timeoutMs, audioRequestId);
|
||||
ToastAndroid.show('Sprachnachricht nicht erkannt — entfernt', ToastAndroid.SHORT);
|
||||
return prev.filter((_, i) => i !== idx);
|
||||
});
|
||||
}, timeoutMs);
|
||||
}, []);
|
||||
|
||||
const sendTextMessage = useCallback(async () => {
|
||||
const text = inputText.trim();
|
||||
|
||||
@@ -576,6 +894,8 @@ const ChatScreen: React.FC = () => {
|
||||
|
||||
setInputText('');
|
||||
|
||||
// Barge-In: laufende ARIA-Aktivitaet abbrechen wenn welche da ist.
|
||||
const wasInterrupted = interruptAriaIfBusy();
|
||||
const location = await getCurrentLocation();
|
||||
|
||||
const userMsg: ChatMessage = {
|
||||
@@ -586,16 +906,17 @@ const ChatScreen: React.FC = () => {
|
||||
};
|
||||
setMessages(prev => capMessages([...prev, userMsg]));
|
||||
|
||||
console.log('[Chat] sende mit voice=%s speed=%s',
|
||||
localXttsVoiceRef.current || '(default)', ttsSpeedRef.current);
|
||||
console.log('[Chat] sende mit voice=%s speed=%s interrupted=%s',
|
||||
localXttsVoiceRef.current || '(default)', ttsSpeedRef.current, wasInterrupted);
|
||||
// An RVS senden — mit geraetelokaler Voice (Bridge nutzt sie fuer die Antwort)
|
||||
rvs.send('chat', {
|
||||
text,
|
||||
voice: localXttsVoiceRef.current,
|
||||
speed: ttsSpeedRef.current,
|
||||
interrupted: wasInterrupted,
|
||||
...(location && { location }),
|
||||
});
|
||||
}, [inputText, getCurrentLocation, pendingAttachments, sendPendingAttachments]);
|
||||
}, [inputText, getCurrentLocation, pendingAttachments, sendPendingAttachments, interruptAriaIfBusy]);
|
||||
|
||||
// Anfrage abbrechen — sofort lokalen Indicator weg, Bridge triggert doctor --fix
|
||||
const cancelRequest = useCallback(() => {
|
||||
@@ -603,15 +924,37 @@ const ChatScreen: React.FC = () => {
|
||||
rvs.send('cancel_request' as any, {});
|
||||
}, []);
|
||||
|
||||
// Barge-In: wenn der User waehrend ARIA arbeitet/spricht eine neue Sprach-
|
||||
// Nachricht aufnimmt, alte Aktivitaet sofort abbrechen — TTS verstummen,
|
||||
// aria-core-Run via cancel_request abbrechen. So kann man "ach vergiss es,
|
||||
// mach lieber X" sagen wie in einem echten Gespraech.
|
||||
const interruptAriaIfBusy = useCallback(() => {
|
||||
const speaking = audioService.isPlayingAudio();
|
||||
const thinking = agentActivity.activity !== 'idle';
|
||||
if (!speaking && !thinking) return false;
|
||||
console.log('[Chat] Barge-In: speaking=%s thinking=%s — interrupting ARIA',
|
||||
speaking, thinking);
|
||||
if (speaking) audioService.haltAllPlayback('user spricht (barge-in)');
|
||||
if (thinking) {
|
||||
setAgentActivity({ activity: 'idle', tool: '' });
|
||||
rvs.send('cancel_request' as any, {});
|
||||
}
|
||||
return true;
|
||||
}, [agentActivity]);
|
||||
|
||||
// Sprachaufnahme abgeschlossen
|
||||
const handleVoiceRecording = useCallback(async (result: RecordingResult) => {
|
||||
// Barge-In: laufende ARIA-Aktivitaet abbrechen falls aktiv.
|
||||
const wasInterrupted = interruptAriaIfBusy();
|
||||
const location = await getCurrentLocation();
|
||||
const audioRequestId = `audio_${Date.now()}_${Math.floor(Math.random() * 100000)}`;
|
||||
|
||||
const userMsg: ChatMessage = {
|
||||
id: nextId(),
|
||||
sender: 'user',
|
||||
text: '🎙 Spracheingabe wird verarbeitet...',
|
||||
timestamp: Date.now(),
|
||||
audioRequestId,
|
||||
};
|
||||
setMessages(prev => capMessages([...prev, userMsg]));
|
||||
|
||||
@@ -619,9 +962,25 @@ const ChatScreen: React.FC = () => {
|
||||
base64: result.base64,
|
||||
durationMs: result.durationMs,
|
||||
mimeType: result.mimeType,
|
||||
voice: localXttsVoiceRef.current,
|
||||
speed: ttsSpeedRef.current,
|
||||
interrupted: wasInterrupted,
|
||||
audioRequestId,
|
||||
...(location && { location }),
|
||||
});
|
||||
}, [getCurrentLocation]);
|
||||
scheduleStaleAudioCleanup(audioRequestId, result.durationMs);
|
||||
|
||||
// Manueller Mikro-Stop waehrend Wake-Word-Konversation: User hat explizit
|
||||
// den Knopf gedrueckt → er moechte nicht in den automatischen Multi-Turn-
|
||||
// Modus, sondern nach ARIAs Antwort zurueck zu passivem Wake-Word-Lauschen.
|
||||
// Bei VAD-Auto-Stop (Wake-Word-Pfad) laeuft das ueber den silence-callback
|
||||
// und endet mit resume() — der manuelle Stop hier ist der "ich bin fertig"-
|
||||
// Knopf.
|
||||
if (wakeWordService.isConversing()) {
|
||||
console.log('[Chat] Manueller Stop in Konversation → endConversation, zurueck zu armed');
|
||||
await wakeWordService.endConversation();
|
||||
}
|
||||
}, [getCurrentLocation, interruptAriaIfBusy, scheduleStaleAudioCleanup]);
|
||||
|
||||
// Datei auswaehlen → zur Pending-Liste hinzufuegen
|
||||
const handleFileSelected = useCallback(async (file: FileData) => {
|
||||
@@ -638,6 +997,7 @@ const ChatScreen: React.FC = () => {
|
||||
// Alle Pending Anhaenge + Text senden
|
||||
const sendPendingAttachments = useCallback(async (messageText: string) => {
|
||||
if (pendingAttachments.length === 0) return;
|
||||
console.log('[Chat] sendPendingAttachments: %d Anhang/Anhaenge', pendingAttachments.length);
|
||||
const location = await getCurrentLocation();
|
||||
const msgId = nextId();
|
||||
|
||||
@@ -687,6 +1047,8 @@ const ChatScreen: React.FC = () => {
|
||||
}
|
||||
|
||||
// An RVS senden
|
||||
console.log('[Chat] sende file: name=%s mime=%s size=%s b64Bytes=%s',
|
||||
name, mimeType, file.size, base64.length);
|
||||
rvs.send('file', {
|
||||
name,
|
||||
type: mimeType,
|
||||
@@ -720,17 +1082,44 @@ const ChatScreen: React.FC = () => {
|
||||
minute: '2-digit',
|
||||
});
|
||||
|
||||
// Spezial-Bubble: ARIA hat einen Skill erstellt
|
||||
if (item.skillCreated) {
|
||||
const s = item.skillCreated;
|
||||
return (
|
||||
<View style={[styles.messageBubble, styles.ariaBubble, {borderLeftWidth: 3, borderLeftColor: '#FFD60A'}]}>
|
||||
<Text style={{color: '#FFD60A', fontWeight: 'bold', fontSize: 14}}>
|
||||
{'🛠 ARIA hat einen neuen Skill erstellt'}
|
||||
</Text>
|
||||
<Text style={{color: '#E0E0F0', marginTop: 4, fontSize: 14}}>
|
||||
<Text style={{fontWeight: 'bold'}}>{s.name}</Text>
|
||||
<Text style={{color: '#8888AA', fontSize: 12}}>{` (${s.execution}, ${s.active ? 'aktiv' : 'deaktiviert'})`}</Text>
|
||||
</Text>
|
||||
<Text style={{color: '#8888AA', fontSize: 12, marginTop: 2}}>{s.description}</Text>
|
||||
{s.setupError && (
|
||||
<Text style={{color: '#FF6B6B', fontSize: 11, marginTop: 4}}>
|
||||
{'⚠ Setup-Fehler: '}{s.setupError.slice(0, 200)}
|
||||
</Text>
|
||||
)}
|
||||
<Text style={{color: '#555570', fontSize: 10, marginTop: 6}}>ARIA-Skill · {time}</Text>
|
||||
</View>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<View style={[styles.messageBubble, isUser ? styles.userBubble : styles.ariaBubble]}>
|
||||
{/* Anhang-Vorschau */}
|
||||
{item.attachments?.map((att, idx) => (
|
||||
<View key={idx}>
|
||||
{att.type === 'image' && att.uri ? (
|
||||
<TouchableOpacity onPress={() => setFullscreenImage(att.uri || null)} activeOpacity={0.8}>
|
||||
<Image
|
||||
source={{ uri: att.uri }}
|
||||
style={styles.attachmentImage}
|
||||
resizeMode="cover"
|
||||
{att.deleted ? (
|
||||
<View style={[styles.attachmentFile, {opacity: 0.6}]}>
|
||||
<Text style={styles.attachmentFileIcon}>{'🗑️'}</Text>
|
||||
<Text style={[styles.attachmentFileName, {textDecorationLine: 'line-through'}]} numberOfLines={1}>{att.name}</Text>
|
||||
<Text style={[styles.attachmentFileSize, {color: '#FF9500'}]}>(geloescht)</Text>
|
||||
</View>
|
||||
) : att.type === 'image' && att.uri ? (
|
||||
<ChatImage
|
||||
uri={att.uri}
|
||||
onPress={() => setFullscreenImage(att.uri || null)}
|
||||
onError={() => {
|
||||
setMessages(prev => prev.map(m =>
|
||||
m.id === item.id ? { ...m, attachments: m.attachments?.map((a, i) =>
|
||||
@@ -739,7 +1128,6 @@ const ChatScreen: React.FC = () => {
|
||||
));
|
||||
}}
|
||||
/>
|
||||
</TouchableOpacity>
|
||||
) : att.type === 'image' && !att.uri ? (
|
||||
<TouchableOpacity
|
||||
style={styles.attachmentFile}
|
||||
@@ -756,7 +1144,22 @@ const ChatScreen: React.FC = () => {
|
||||
</Text>
|
||||
</TouchableOpacity>
|
||||
) : (
|
||||
<View style={styles.attachmentFile}>
|
||||
<TouchableOpacity
|
||||
style={styles.attachmentFile}
|
||||
onPress={() => {
|
||||
// Lokal vorhanden \u2192 direkt mit System-Intent oeffnen
|
||||
if (att.uri) {
|
||||
openFileWithIntent(att.uri.replace(/^file:\/\//, ''), att.mimeType || '');
|
||||
return;
|
||||
}
|
||||
// Sonst: file_request \u2192 bei file_response wird die Datei
|
||||
// gespeichert UND geoeffnet (autoOpenPaths-Tracking).
|
||||
if (att.serverPath) {
|
||||
autoOpenPaths.current.add(att.serverPath);
|
||||
rvs.send('file_request' as any, { serverPath: att.serverPath, requestId: item.id });
|
||||
}
|
||||
}}
|
||||
>
|
||||
<Text style={styles.attachmentFileIcon}>
|
||||
{att.mimeType?.includes('pdf') ? '\uD83D\uDCC4' :
|
||||
att.mimeType?.includes('word') || att.mimeType?.includes('document') ? '\uD83D\uDCC3' :
|
||||
@@ -766,12 +1169,10 @@ const ChatScreen: React.FC = () => {
|
||||
<Text style={styles.attachmentFileName} numberOfLines={1}>{att.name}</Text>
|
||||
{att.size ? <Text style={styles.attachmentFileSize}>{Math.round(att.size / 1024)}KB</Text> : null}
|
||||
{!att.uri && att.serverPath && (
|
||||
<TouchableOpacity onPress={() => rvs.send('file_request' as any, { serverPath: att.serverPath, requestId: item.id })}>
|
||||
<Text style={[styles.attachmentFileSize, {color: '#0096FF'}]}>(laden)</Text>
|
||||
</TouchableOpacity>
|
||||
<Text style={[styles.attachmentFileSize, {color: '#0096FF'}]}>(tippen zum oeffnen)</Text>
|
||||
)}
|
||||
{!att.uri && !att.serverPath && <Text style={styles.attachmentFileSize}>(nicht verfuegbar)</Text>}
|
||||
</View>
|
||||
</TouchableOpacity>
|
||||
)}
|
||||
</View>
|
||||
))}
|
||||
@@ -786,19 +1187,24 @@ const ChatScreen: React.FC = () => {
|
||||
{!isUser && item.text.length > 0 && (
|
||||
<TouchableOpacity
|
||||
style={styles.playButton}
|
||||
onPress={() => {
|
||||
if (item.audioPath) {
|
||||
audioService.playFromPath(item.audioPath);
|
||||
} else {
|
||||
// messageId mitschicken damit die Bridge das generierte Audio
|
||||
// wieder mit der Nachricht verknuepft (fuer den naechsten Replay aus Cache)
|
||||
rvs.send('tts_request' as any, {
|
||||
text: item.text,
|
||||
voice: localXttsVoiceRef.current,
|
||||
speed: ttsSpeedRef.current,
|
||||
messageId: item.messageId || '',
|
||||
});
|
||||
onPress={async () => {
|
||||
// Erst lokalen Cache pruefen — audioPath kann auf eine geloeschte
|
||||
// Datei zeigen (TTS-Cache geleert oder Auto-Cleanup). In dem Fall
|
||||
// ueber RVS neu rendern lassen statt stumm zu bleiben.
|
||||
const cachePath = item.audioPath?.replace(/^file:\/\//, '') || '';
|
||||
const cached = cachePath ? await RNFS.exists(cachePath).catch(() => false) : false;
|
||||
if (cached) {
|
||||
audioService.playFromPath(item.audioPath!);
|
||||
return;
|
||||
}
|
||||
// messageId mitschicken damit die Bridge das generierte Audio
|
||||
// wieder mit der Nachricht verknuepft (fuer den naechsten Replay aus Cache)
|
||||
rvs.send('tts_request' as any, {
|
||||
text: item.text,
|
||||
voice: localXttsVoiceRef.current,
|
||||
speed: ttsSpeedRef.current,
|
||||
messageId: item.messageId || '',
|
||||
});
|
||||
}}
|
||||
>
|
||||
<Text style={styles.playButtonText}>{'\uD83D\uDD0A'}</Text>
|
||||
@@ -919,9 +1325,11 @@ const ChatScreen: React.FC = () => {
|
||||
? '\u270D\uFE0F ARIA schreibt...'
|
||||
: '\uD83D\uDCAD ARIA denkt...'}
|
||||
</Text>
|
||||
<TouchableOpacity style={styles.thinkingCancel} onPress={cancelRequest}>
|
||||
<Text style={styles.thinkingCancelText}>Abbrechen</Text>
|
||||
</TouchableOpacity>
|
||||
<View style={{flexDirection: 'row', gap: 6}}>
|
||||
<TouchableOpacity style={styles.thinkingCancel} onPress={cancelRequest}>
|
||||
<Text style={styles.thinkingCancelText}>Abbrechen</Text>
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
</View>
|
||||
)}
|
||||
|
||||
@@ -1027,19 +1435,32 @@ const ChatScreen: React.FC = () => {
|
||||
|
||||
{/* Bild-Vollbild Modal */}
|
||||
<Modal visible={!!fullscreenImage} transparent animationType="fade" onRequestClose={() => setFullscreenImage(null)}>
|
||||
<TouchableOpacity
|
||||
style={styles.fullscreenOverlay}
|
||||
activeOpacity={1}
|
||||
onPress={() => setFullscreenImage(null)}
|
||||
>
|
||||
<View style={styles.fullscreenOverlay}>
|
||||
{fullscreenImage && (
|
||||
<Image
|
||||
source={{ uri: fullscreenImage }}
|
||||
style={styles.fullscreenImage}
|
||||
resizeMode="contain"
|
||||
/>
|
||||
/\.svg(?:\?|$)/i.test(fullscreenImage) ? (
|
||||
// SVG: bisher keine Pinch-Zoom — Tap zum Schliessen
|
||||
<TouchableOpacity style={styles.fullscreenImage} activeOpacity={1} onPress={() => setFullscreenImage(null)}>
|
||||
<SvgUri uri={fullscreenImage} width="100%" height="100%" preserveAspectRatio="xMidYMid meet" />
|
||||
</TouchableOpacity>
|
||||
) : (
|
||||
// Pixel-Bild: Pinch-Zoom + Pan ueber ZoomableImage
|
||||
<ZoomableImage
|
||||
uri={fullscreenImage}
|
||||
containerWidth={Dimensions.get('window').width}
|
||||
containerHeight={Dimensions.get('window').height}
|
||||
style={styles.fullscreenImage}
|
||||
/>
|
||||
)
|
||||
)}
|
||||
</TouchableOpacity>
|
||||
{/* Close-Button oben rechts — die TouchableOpacity-uebergreifend funktioniert
|
||||
wegen ZoomableImage-PanResponder nicht zuverlaessig fuer Tap-to-Close */}
|
||||
<TouchableOpacity
|
||||
style={{ position: 'absolute', top: 32, right: 16, padding: 12, backgroundColor: 'rgba(0,0,0,0.5)', borderRadius: 24 }}
|
||||
onPress={() => setFullscreenImage(null)}
|
||||
>
|
||||
<Text style={{ color: '#FFF', fontSize: 22 }}>{'✕'}</Text>
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
</Modal>
|
||||
|
||||
{/* Datei-Upload Modal */}
|
||||
@@ -1142,9 +1563,11 @@ const styles = StyleSheet.create({
|
||||
color: '#E0E0F0',
|
||||
},
|
||||
attachmentImage: {
|
||||
width: '100%',
|
||||
minHeight: 200,
|
||||
maxHeight: 400,
|
||||
// Feste Breite + dynamische aspectRatio (in ChatImage gesetzt) damit die
|
||||
// Bubble sich ans Bild anpasst. Mit width: '100%' ohne explizite Parent-
|
||||
// Breite wuerde RN das Bild auf 0px schrumpfen → "Strich".
|
||||
width: 260,
|
||||
aspectRatio: 4 / 3,
|
||||
borderRadius: 8,
|
||||
marginBottom: 6,
|
||||
backgroundColor: '#0D0D1A',
|
||||
|
||||
@@ -17,6 +17,8 @@ import {
|
||||
Platform,
|
||||
ToastAndroid,
|
||||
ActivityIndicator,
|
||||
Modal,
|
||||
PermissionsAndroid,
|
||||
} from 'react-native';
|
||||
import AsyncStorage from '@react-native-async-storage/async-storage';
|
||||
import RNFS from 'react-native-fs';
|
||||
@@ -35,20 +37,36 @@ import {
|
||||
CONV_WINDOW_MIN_SEC,
|
||||
CONV_WINDOW_MAX_SEC,
|
||||
CONV_WINDOW_STORAGE_KEY,
|
||||
MAX_RECORDING_DEFAULT_SEC,
|
||||
MAX_RECORDING_MIN_SEC,
|
||||
MAX_RECORDING_MAX_SEC,
|
||||
MAX_RECORDING_STORAGE_KEY,
|
||||
VAD_SILENCE_DB_DEFAULT,
|
||||
VAD_SILENCE_DB_MIN,
|
||||
VAD_SILENCE_DB_MAX,
|
||||
VAD_SILENCE_DB_OVERRIDE_KEY,
|
||||
TTS_SPEED_DEFAULT,
|
||||
TTS_SPEED_MIN,
|
||||
TTS_SPEED_MAX,
|
||||
TTS_SPEED_STORAGE_KEY,
|
||||
} from '../services/audio';
|
||||
import audioService from '../services/audio';
|
||||
import { isVerboseLogging, setVerboseLogging } from '../services/logger';
|
||||
import {
|
||||
isWakeReadySoundEnabled,
|
||||
setWakeReadySoundEnabled,
|
||||
playWakeReadySound,
|
||||
} from '../services/wakeReadySound';
|
||||
import wakeWordService, {
|
||||
BUILTIN_KEYWORDS,
|
||||
WAKE_KEYWORDS,
|
||||
KEYWORD_LABELS,
|
||||
DEFAULT_KEYWORD,
|
||||
WAKE_ACCESS_KEY_STORAGE,
|
||||
WAKE_KEYWORD_STORAGE,
|
||||
} from '../services/wakeword';
|
||||
import ModeSelector from '../components/ModeSelector';
|
||||
import QRScanner from '../components/QRScanner';
|
||||
import VoiceCloneModal from '../components/VoiceCloneModal';
|
||||
import updateService from '../services/updater';
|
||||
|
||||
const STORAGE_PATH_KEY = 'aria_attachment_storage_path';
|
||||
const DEFAULT_STORAGE_PATH = `${RNFS.DocumentDirectoryPath}/chat_attachments`;
|
||||
@@ -72,6 +90,19 @@ interface EventEntry {
|
||||
|
||||
type LogTab = 'live' | 'events';
|
||||
|
||||
// Settings-Sub-Screens. Reihenfolge im Hauptmenue.
|
||||
const SETTINGS_SECTIONS = [
|
||||
{ id: 'connection', icon: '🔌', label: 'Verbindung', desc: 'Server, Token, Status, Verbindungslog' },
|
||||
{ id: 'general', icon: '⚙️', label: 'Allgemein', desc: 'Betriebsmodus, GPS-Standort' },
|
||||
{ id: 'voice_input', icon: '🎙️', label: 'Spracheingabe', desc: 'Stille-Toleranz, Aufnahmedauer' },
|
||||
{ id: 'wake_word', icon: '👂', label: 'Wake-Word', desc: 'Wake-Word-Auswahl' },
|
||||
{ id: 'voice_output', icon: '🔊', label: 'Sprachausgabe', desc: 'Stimmen, Pre-Roll, Geschwindigkeit' },
|
||||
{ id: 'storage', icon: '📁', label: 'Speicher', desc: 'Anhang-Speicherort, Auto-Download' },
|
||||
{ id: 'files', icon: '📂', label: 'Dateien', desc: 'ARIA- und User-Dateien — anzeigen, löschen' },
|
||||
{ id: 'protocol', icon: '📜', label: 'Protokoll', desc: 'Privatsphaere, Backup' },
|
||||
{ id: 'about', icon: 'ℹ️', label: 'Ueber', desc: 'App-Version, Update' },
|
||||
] as const;
|
||||
|
||||
// Container-Farben fuer Live-Logs
|
||||
const SOURCE_COLORS: Record<string, string> = {
|
||||
'aria-core': '#4A9EFF', // Blau
|
||||
@@ -102,17 +133,34 @@ const SettingsScreen: React.FC = () => {
|
||||
const [ttsPrerollSec, setTtsPrerollSec] = useState<number>(TTS_PREROLL_DEFAULT_SEC);
|
||||
const [vadSilenceSec, setVadSilenceSec] = useState<number>(VAD_SILENCE_DEFAULT_SEC);
|
||||
const [convWindowSec, setConvWindowSec] = useState<number>(CONV_WINDOW_DEFAULT_SEC);
|
||||
const [maxRecordingSec, setMaxRecordingSec] = useState<number>(MAX_RECORDING_DEFAULT_SEC);
|
||||
// null = automatisch (adaptive Baseline), sonst manueller dB-Override
|
||||
const [vadSilenceDb, setVadSilenceDb] = useState<number | null>(null);
|
||||
const [showVadInfo, setShowVadInfo] = useState(false);
|
||||
const [apkCacheInfo, setApkCacheInfo] = useState<{count: number, totalMB: number} | null>(null);
|
||||
const [ttsCacheInfo, setTtsCacheInfo] = useState<{count: number, totalMB: number} | null>(null);
|
||||
const [verboseLogging, setVerboseLoggingState] = useState<boolean>(isVerboseLogging());
|
||||
const [ttsSpeed, setTtsSpeed] = useState<number>(TTS_SPEED_DEFAULT);
|
||||
const [wakeAccessKey, setWakeAccessKey] = useState<string>('');
|
||||
const [wakeAccessKeyVisible, setWakeAccessKeyVisible] = useState(false);
|
||||
const [wakeKeyword, setWakeKeyword] = useState<string>(DEFAULT_KEYWORD);
|
||||
const [wakeStatus, setWakeStatus] = useState<string>('');
|
||||
const [wakeReadySound, setWakeReadySound] = useState<boolean>(true);
|
||||
const [editingPath, setEditingPath] = useState(false);
|
||||
const [xttsVoice, setXttsVoice] = useState('');
|
||||
const [loadingVoice, setLoadingVoice] = useState<string | null>(null);
|
||||
const [availableVoices, setAvailableVoices] = useState<Array<{name: string, size: number}>>([]);
|
||||
// Datei-Manager
|
||||
const [fileManagerOpen, setFileManagerOpen] = useState(false);
|
||||
const [fileManagerFiles, setFileManagerFiles] = useState<Array<{name: string; path: string; size: number; mtime: number; fromAria: boolean}>>([]);
|
||||
const [fileManagerLoading, setFileManagerLoading] = useState(false);
|
||||
const [fileManagerError, setFileManagerError] = useState('');
|
||||
const [fileManagerSearch, setFileManagerSearch] = useState('');
|
||||
const [fileManagerFilter, setFileManagerFilter] = useState<'all' | 'aria' | 'user'>('all');
|
||||
const [voiceCloneVisible, setVoiceCloneVisible] = useState(false);
|
||||
const [tempPath, setTempPath] = useState('');
|
||||
// Sub-Screen Navigation: null = Hauptmenue, sonst eine der Section-IDs.
|
||||
// So bleibt aller geteilte State im selben Component-Closure und wir
|
||||
// brauchen keine react-navigation-Stack-Setup.
|
||||
const [currentSection, setCurrentSection] = useState<string | null>(null);
|
||||
|
||||
let logIdCounter = 0;
|
||||
|
||||
@@ -134,6 +182,9 @@ const SettingsScreen: React.FC = () => {
|
||||
AsyncStorage.getItem('aria_tts_enabled').then(saved => {
|
||||
if (saved !== null) setTtsEnabled(saved === 'true');
|
||||
});
|
||||
AsyncStorage.getItem('aria_gps_enabled').then(saved => {
|
||||
if (saved !== null) setGpsEnabled(saved === 'true');
|
||||
});
|
||||
AsyncStorage.getItem(TTS_PREROLL_STORAGE_KEY).then(saved => {
|
||||
if (saved != null) {
|
||||
const n = parseFloat(saved);
|
||||
@@ -158,18 +209,34 @@ const SettingsScreen: React.FC = () => {
|
||||
}
|
||||
}
|
||||
});
|
||||
AsyncStorage.getItem(MAX_RECORDING_STORAGE_KEY).then(saved => {
|
||||
if (saved != null) {
|
||||
const n = parseFloat(saved);
|
||||
if (isFinite(n) && n >= MAX_RECORDING_MIN_SEC && n <= MAX_RECORDING_MAX_SEC) {
|
||||
setMaxRecordingSec(n);
|
||||
}
|
||||
}
|
||||
});
|
||||
AsyncStorage.getItem(VAD_SILENCE_DB_OVERRIDE_KEY).then(saved => {
|
||||
if (saved != null && saved !== '') {
|
||||
const n = parseFloat(saved);
|
||||
if (isFinite(n) && n >= VAD_SILENCE_DB_MIN && n <= VAD_SILENCE_DB_MAX) {
|
||||
setVadSilenceDb(n);
|
||||
}
|
||||
}
|
||||
});
|
||||
AsyncStorage.getItem(TTS_SPEED_STORAGE_KEY).then(saved => {
|
||||
if (saved != null) {
|
||||
const n = parseFloat(saved);
|
||||
if (isFinite(n) && n >= TTS_SPEED_MIN && n <= TTS_SPEED_MAX) setTtsSpeed(n);
|
||||
}
|
||||
});
|
||||
AsyncStorage.getItem(WAKE_ACCESS_KEY_STORAGE).then(saved => {
|
||||
if (saved) setWakeAccessKey(saved);
|
||||
});
|
||||
AsyncStorage.getItem(WAKE_KEYWORD_STORAGE).then(saved => {
|
||||
if (saved) setWakeKeyword(saved);
|
||||
if (saved && (WAKE_KEYWORDS as readonly string[]).includes(saved)) setWakeKeyword(saved);
|
||||
});
|
||||
isWakeReadySoundEnabled().then(setWakeReadySound);
|
||||
updateService.getApkCacheSize().then(setApkCacheInfo).catch(() => {});
|
||||
audioService.getTtsCacheSize().then(setTtsCacheInfo).catch(() => {});
|
||||
AsyncStorage.getItem('aria_xtts_voice').then(saved => {
|
||||
if (saved) setXttsVoice(saved);
|
||||
});
|
||||
@@ -312,6 +379,25 @@ const SettingsScreen: React.FC = () => {
|
||||
setAvailableVoices(voices);
|
||||
}
|
||||
|
||||
// Datei-Manager: Liste empfangen
|
||||
if (message.type === ('file_list_response' as any)) {
|
||||
const p: any = message.payload || {};
|
||||
if (p.ok) {
|
||||
setFileManagerFiles(p.files || []);
|
||||
} else {
|
||||
setFileManagerError(p.error || 'Unbekannter Fehler');
|
||||
}
|
||||
setFileManagerLoading(false);
|
||||
}
|
||||
|
||||
// Datei-Manager: Datei wurde geloescht (vom Diagnostic oder dieser App)
|
||||
if (message.type === ('file_deleted' as any)) {
|
||||
const p: any = message.payload || {};
|
||||
if (p.path) {
|
||||
setFileManagerFiles(prev => prev.filter(f => f.path !== p.path));
|
||||
}
|
||||
}
|
||||
|
||||
// Voice wurde gespeichert → Liste neu laden + ggf. auswaehlen
|
||||
if (message.type === ('xtts_voice_saved' as any)) {
|
||||
const name = (message.payload as any).name as string;
|
||||
@@ -404,9 +490,31 @@ const SettingsScreen: React.FC = () => {
|
||||
|
||||
// --- GPS Toggle ---
|
||||
|
||||
const handleGPSToggle = useCallback((value: boolean) => {
|
||||
const handleGPSToggle = useCallback(async (value: boolean) => {
|
||||
if (value && Platform.OS === 'android') {
|
||||
try {
|
||||
const granted = await PermissionsAndroid.request(
|
||||
PermissionsAndroid.PERMISSIONS.ACCESS_COARSE_LOCATION,
|
||||
{
|
||||
title: 'ARIA — Standort an Anfragen anhaengen',
|
||||
message: 'Damit ARIA bei Anfragen wie "Wo ist der naechste...?" den '
|
||||
+ 'Standort kennt, darf die App den ungefaehren Standort lesen. '
|
||||
+ 'Wird nur bei jeder Anfrage einmal abgerufen, nicht im Hintergrund.',
|
||||
buttonPositive: 'Erlauben',
|
||||
buttonNegative: 'Abbrechen',
|
||||
},
|
||||
);
|
||||
if (granted !== PermissionsAndroid.RESULTS.GRANTED) {
|
||||
ToastAndroid.show('Standort-Berechtigung abgelehnt', ToastAndroid.SHORT);
|
||||
return;
|
||||
}
|
||||
} catch (err) {
|
||||
console.warn('[Settings] GPS-Permission Request gescheitert:', err);
|
||||
return;
|
||||
}
|
||||
}
|
||||
setGpsEnabled(value);
|
||||
// In Produktion: Wert in AsyncStorage persistieren
|
||||
AsyncStorage.setItem('aria_gps_enabled', String(value)).catch(() => {});
|
||||
}, []);
|
||||
|
||||
// --- XTTS Voice ---
|
||||
@@ -483,9 +591,154 @@ const SettingsScreen: React.FC = () => {
|
||||
visible={voiceCloneVisible}
|
||||
onClose={() => setVoiceCloneVisible(false)}
|
||||
/>
|
||||
{/* Datei-Manager Modal */}
|
||||
<Modal
|
||||
visible={fileManagerOpen}
|
||||
animationType="slide"
|
||||
onRequestClose={() => setFileManagerOpen(false)}
|
||||
>
|
||||
<View style={{flex:1, backgroundColor:'#080810', paddingTop:24}}>
|
||||
<View style={{flexDirection:'row', alignItems:'center', padding:12, borderBottomWidth:1, borderColor:'#1E1E2E'}}>
|
||||
<TouchableOpacity onPress={() => setFileManagerOpen(false)} style={{padding:8}}>
|
||||
<Text style={{color:'#0096FF', fontSize:24}}>‹</Text>
|
||||
</TouchableOpacity>
|
||||
<Text style={{color:'#E0E0F0', fontSize:18, fontWeight:'600', flex:1, marginLeft:8}}>Dateien</Text>
|
||||
<TouchableOpacity
|
||||
onPress={() => {
|
||||
setFileManagerError('');
|
||||
setFileManagerLoading(true);
|
||||
rvs.send('file_list_request' as any, {});
|
||||
}}
|
||||
style={{padding:8}}
|
||||
>
|
||||
<Text style={{color:'#0096FF', fontSize:14}}>🔄</Text>
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
<View style={{padding:12}}>
|
||||
<TextInput
|
||||
style={{backgroundColor:'#1E1E2E', borderRadius:8, padding:10, color:'#E0E0F0', fontSize:14}}
|
||||
placeholder="Suche..."
|
||||
placeholderTextColor="#555570"
|
||||
value={fileManagerSearch}
|
||||
onChangeText={setFileManagerSearch}
|
||||
autoCapitalize="none"
|
||||
/>
|
||||
<View style={{flexDirection:'row', marginTop:8, gap:6}}>
|
||||
{(['all','aria','user'] as const).map(f => (
|
||||
<TouchableOpacity
|
||||
key={f}
|
||||
onPress={() => setFileManagerFilter(f)}
|
||||
style={{
|
||||
paddingVertical:6, paddingHorizontal:12, borderRadius:14,
|
||||
backgroundColor: fileManagerFilter === f ? '#0096FF' : '#1E1E2E',
|
||||
}}
|
||||
>
|
||||
<Text style={{color: fileManagerFilter === f ? '#fff' : '#8888AA', fontSize:12}}>
|
||||
{f === 'all' ? 'Alle' : f === 'aria' ? 'Von ARIA' : 'Von dir'}
|
||||
</Text>
|
||||
</TouchableOpacity>
|
||||
))}
|
||||
</View>
|
||||
</View>
|
||||
{fileManagerLoading ? (
|
||||
<Text style={{color:'#8888AA', textAlign:'center', marginTop:20}}>Lade...</Text>
|
||||
) : fileManagerError ? (
|
||||
<Text style={{color:'#FF6B6B', textAlign:'center', marginTop:20}}>{fileManagerError}</Text>
|
||||
) : (
|
||||
<ScrollView style={{flex:1}} contentContainerStyle={{padding:12}}>
|
||||
{(() => {
|
||||
let files = fileManagerFiles;
|
||||
if (fileManagerFilter === 'aria') files = files.filter(f => f.fromAria);
|
||||
else if (fileManagerFilter === 'user') files = files.filter(f => !f.fromAria);
|
||||
if (fileManagerSearch) {
|
||||
const q = fileManagerSearch.toLowerCase();
|
||||
files = files.filter(f => f.name.toLowerCase().includes(q));
|
||||
}
|
||||
if (!files.length) {
|
||||
return <Text style={{color:'#555570', textAlign:'center', marginTop:20}}>Keine Dateien</Text>;
|
||||
}
|
||||
const fmtSize = (b: number) => b < 1024 ? `${b} B` : b < 1024*1024 ? `${(b/1024).toFixed(1)} KB` : `${(b/1024/1024).toFixed(1)} MB`;
|
||||
return files.map(f => (
|
||||
<View key={f.path} style={{
|
||||
backgroundColor:'#0D0D1A', padding:12, borderRadius:8, marginBottom:8,
|
||||
flexDirection:'row', alignItems:'center', gap:8,
|
||||
}}>
|
||||
<View style={{flex:1}}>
|
||||
<View style={{flexDirection:'row', alignItems:'center'}}>
|
||||
<View style={{
|
||||
backgroundColor: f.fromAria ? '#0096FF22' : '#34C75922',
|
||||
paddingHorizontal:6, paddingVertical:1, borderRadius:3, marginRight:6,
|
||||
}}>
|
||||
<Text style={{color: f.fromAria ? '#0096FF' : '#34C759', fontSize:9}}>
|
||||
{f.fromAria ? 'ARIA' : 'USER'}
|
||||
</Text>
|
||||
</View>
|
||||
<Text style={{color:'#E0E0F0', fontSize:13, flex:1}} numberOfLines={1}>{f.name}</Text>
|
||||
</View>
|
||||
<Text style={{color:'#555570', fontSize:10, marginTop:2}}>
|
||||
{fmtSize(f.size)} · {new Date(f.mtime).toLocaleString('de-DE')}
|
||||
</Text>
|
||||
</View>
|
||||
<TouchableOpacity
|
||||
onPress={() => {
|
||||
Alert.alert(
|
||||
'Datei löschen?',
|
||||
`"${f.name}"\n\nIn allen Chat-Bubbles wird sie als gelöscht markiert.`,
|
||||
[
|
||||
{ text: 'Abbrechen', style: 'cancel' },
|
||||
{ text: 'Löschen', style: 'destructive', onPress: () => {
|
||||
rvs.send('file_delete_request' as any, { path: f.path });
|
||||
ToastAndroid.show('Lösch-Befehl gesendet…', ToastAndroid.SHORT);
|
||||
}},
|
||||
],
|
||||
);
|
||||
}}
|
||||
style={{padding:8}}
|
||||
>
|
||||
<Text style={{color:'#FF6B6B', fontSize:18}}>🗑</Text>
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
));
|
||||
})()}
|
||||
</ScrollView>
|
||||
)}
|
||||
</View>
|
||||
</Modal>
|
||||
<ScrollView style={styles.container} contentContainerStyle={styles.content}>
|
||||
|
||||
{currentSection === null && (
|
||||
<>
|
||||
{SETTINGS_SECTIONS.map(s => (
|
||||
<TouchableOpacity
|
||||
key={s.id}
|
||||
style={styles.menuItem}
|
||||
onPress={() => setCurrentSection(s.id)}
|
||||
>
|
||||
<Text style={styles.menuItemIcon}>{s.icon}</Text>
|
||||
<View style={styles.menuItemTextWrap}>
|
||||
<Text style={styles.menuItemLabel}>{s.label}</Text>
|
||||
<Text style={styles.menuItemDesc}>{s.desc}</Text>
|
||||
</View>
|
||||
<Text style={styles.menuItemChevron}>›</Text>
|
||||
</TouchableOpacity>
|
||||
))}
|
||||
</>
|
||||
)}
|
||||
|
||||
{currentSection !== null && (
|
||||
<TouchableOpacity
|
||||
style={styles.subScreenHeader}
|
||||
onPress={() => setCurrentSection(null)}
|
||||
>
|
||||
<Text style={styles.subScreenBack}>‹</Text>
|
||||
<Text style={styles.subScreenTitle}>
|
||||
{SETTINGS_SECTIONS.find(s => s.id === currentSection)?.label || ''}
|
||||
</Text>
|
||||
</TouchableOpacity>
|
||||
)}
|
||||
|
||||
{/* === Verbindung === */}
|
||||
{currentSection === 'connection' && (<>
|
||||
<Text style={styles.sectionTitle}>Verbindung</Text>
|
||||
<View style={styles.card}>
|
||||
{/* Status-Anzeige */}
|
||||
@@ -582,8 +835,10 @@ const SettingsScreen: React.FC = () => {
|
||||
<Text style={styles.clearButtonText}>Log l{'\u00F6'}schen</Text>
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
</>)}
|
||||
|
||||
{/* === Modus === */}
|
||||
{currentSection === 'general' && (<>
|
||||
<Text style={styles.sectionTitle}>Betriebsmodus</Text>
|
||||
<View style={styles.card}>
|
||||
<ModeSelector currentModeId={currentMode} onModeChange={handleModeChange} />
|
||||
@@ -596,7 +851,11 @@ const SettingsScreen: React.FC = () => {
|
||||
<View style={styles.toggleInfo}>
|
||||
<Text style={styles.toggleLabel}>GPS-Position mitsenden</Text>
|
||||
<Text style={styles.toggleHint}>
|
||||
Standort wird automatisch an Nachrichten angehaengt
|
||||
Position (lat/lon) wird mit jeder Nachricht an ARIA mitgeschickt.
|
||||
Sie sieht's nur intern und nutzt es bei standortbezogenen Fragen
|
||||
("wo bin ich?", "Wetter hier?"), erwaehnt es sonst nicht.
|
||||
Im Chat-Verlauf bleibt die Bubble unveraendert — nur ARIAs
|
||||
Antwort kann darauf eingehen.
|
||||
</Text>
|
||||
</View>
|
||||
<Switch
|
||||
@@ -607,8 +866,10 @@ const SettingsScreen: React.FC = () => {
|
||||
/>
|
||||
</View>
|
||||
</View>
|
||||
</>)}
|
||||
|
||||
{/* === Spracheingabe (geraetelokal) === */}
|
||||
{currentSection === 'voice_input' && (<>
|
||||
<Text style={styles.sectionTitle}>Spracheingabe</Text>
|
||||
<View style={styles.card}>
|
||||
<Text style={styles.toggleLabel}>Stille-Toleranz</Text>
|
||||
@@ -676,46 +937,146 @@ const SettingsScreen: React.FC = () => {
|
||||
<Text style={styles.prerollButtonText}>+1</Text>
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
</View>
|
||||
|
||||
{/* === Wake-Word (geraetelokal) === */}
|
||||
<Text style={styles.sectionTitle}>Wake-Word</Text>
|
||||
<View style={styles.card}>
|
||||
<Text style={[styles.toggleLabel, {marginTop: 24}]}>Maximale Aufnahmedauer</Text>
|
||||
<Text style={styles.toggleHint}>
|
||||
Wenn ein Picovoice-Access-Key eingetragen ist, hoert die App passiv
|
||||
auf das gewaehlte Wake-Word — du kannst dich mit anderen unterhalten,
|
||||
Musik laufen lassen und mit "{wakeKeyword}" eine Konversation mit
|
||||
ARIA starten. Ohne Key oder bei Fehlschlag startet das Ohr direkt
|
||||
eine Konversation (klassischer Modus).
|
||||
Notbremse: nach so vielen Minuten wird die Aufnahme automatisch beendet,
|
||||
auch wenn keine Stille erkannt wurde. Nuetzlich fuer lange Erklaerungen
|
||||
oder Diktate. Default: {Math.round(MAX_RECORDING_DEFAULT_SEC / 60)} Min, max {Math.round(MAX_RECORDING_MAX_SEC / 60)} Min.
|
||||
</Text>
|
||||
|
||||
<Text style={[styles.toggleLabel, {marginTop: 16}]}>Picovoice Access Key</Text>
|
||||
<View style={{flexDirection: 'row', alignItems: 'center', gap: 8, marginTop: 6}}>
|
||||
<TextInput
|
||||
style={[styles.input, {flex: 1}]}
|
||||
value={wakeAccessKey}
|
||||
onChangeText={setWakeAccessKey}
|
||||
placeholder="kostenlos auf console.picovoice.ai"
|
||||
placeholderTextColor="#666680"
|
||||
secureTextEntry={!wakeAccessKeyVisible}
|
||||
autoCapitalize="none"
|
||||
autoCorrect={false}
|
||||
/>
|
||||
<View style={styles.prerollRow}>
|
||||
<TouchableOpacity
|
||||
onPress={() => setWakeAccessKeyVisible(v => !v)}
|
||||
style={{padding: 8}}
|
||||
style={styles.prerollButton}
|
||||
onPress={() => {
|
||||
const next = Math.max(MAX_RECORDING_MIN_SEC, maxRecordingSec - 60);
|
||||
setMaxRecordingSec(next);
|
||||
AsyncStorage.setItem(MAX_RECORDING_STORAGE_KEY, String(next));
|
||||
}}
|
||||
disabled={maxRecordingSec <= MAX_RECORDING_MIN_SEC}
|
||||
>
|
||||
<Text style={{fontSize: 18}}>{wakeAccessKeyVisible ? '🙈' : '👁'}</Text>
|
||||
<Text style={styles.prerollButtonText}>−1m</Text>
|
||||
</TouchableOpacity>
|
||||
<Text style={styles.prerollValue}>{Math.round(maxRecordingSec / 60)} min</Text>
|
||||
<TouchableOpacity
|
||||
style={styles.prerollButton}
|
||||
onPress={() => {
|
||||
const next = Math.min(MAX_RECORDING_MAX_SEC, maxRecordingSec + 60);
|
||||
setMaxRecordingSec(next);
|
||||
AsyncStorage.setItem(MAX_RECORDING_STORAGE_KEY, String(next));
|
||||
}}
|
||||
disabled={maxRecordingSec >= MAX_RECORDING_MAX_SEC}
|
||||
>
|
||||
<Text style={styles.prerollButtonText}>+1m</Text>
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
|
||||
<View style={{flexDirection: 'row', alignItems: 'center', marginTop: 24, gap: 8}}>
|
||||
<Text style={styles.toggleLabel}>Stille-Pegel (dB)</Text>
|
||||
<TouchableOpacity onPress={() => setShowVadInfo(true)} style={styles.infoBtn}>
|
||||
<Text style={styles.infoBtnText}>i</Text>
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
<Text style={styles.toggleHint}>
|
||||
Welcher Mikro-Pegel als "Stille" gilt. Standard: automatisch (Baseline aus
|
||||
den ersten 500ms). Manuell setzen wenn Auto nicht zuverlaessig greift.
|
||||
</Text>
|
||||
<View style={styles.prerollRow}>
|
||||
<TouchableOpacity
|
||||
style={styles.prerollButton}
|
||||
onPress={() => {
|
||||
const next = vadSilenceDb == null
|
||||
? VAD_SILENCE_DB_DEFAULT - 1
|
||||
: Math.max(VAD_SILENCE_DB_MIN, vadSilenceDb - 1);
|
||||
setVadSilenceDb(next);
|
||||
AsyncStorage.setItem(VAD_SILENCE_DB_OVERRIDE_KEY, String(next));
|
||||
}}
|
||||
>
|
||||
<Text style={styles.prerollButtonText}>−1</Text>
|
||||
</TouchableOpacity>
|
||||
<Text style={styles.prerollValue}>
|
||||
{vadSilenceDb == null ? 'auto' : `${vadSilenceDb} dB`}
|
||||
</Text>
|
||||
<TouchableOpacity
|
||||
style={styles.prerollButton}
|
||||
onPress={() => {
|
||||
const next = vadSilenceDb == null
|
||||
? VAD_SILENCE_DB_DEFAULT + 1
|
||||
: Math.min(VAD_SILENCE_DB_MAX, vadSilenceDb + 1);
|
||||
setVadSilenceDb(next);
|
||||
AsyncStorage.setItem(VAD_SILENCE_DB_OVERRIDE_KEY, String(next));
|
||||
}}
|
||||
>
|
||||
<Text style={styles.prerollButtonText}>+1</Text>
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
{vadSilenceDb != null && (
|
||||
<TouchableOpacity
|
||||
onPress={() => {
|
||||
setVadSilenceDb(null);
|
||||
AsyncStorage.removeItem(VAD_SILENCE_DB_OVERRIDE_KEY);
|
||||
}}
|
||||
style={{alignSelf: 'center', marginTop: 8, paddingVertical: 6, paddingHorizontal: 12}}
|
||||
>
|
||||
<Text style={{color: '#0096FF', fontSize: 13}}>↻ Auf automatisch zuruecksetzen</Text>
|
||||
</TouchableOpacity>
|
||||
)}
|
||||
</View>
|
||||
|
||||
<Modal
|
||||
visible={showVadInfo}
|
||||
transparent
|
||||
animationType="fade"
|
||||
onRequestClose={() => setShowVadInfo(false)}
|
||||
>
|
||||
<View style={styles.modalOverlay}>
|
||||
<View style={styles.modalCard}>
|
||||
<Text style={styles.modalTitle}>Stille-Pegel (dB)</Text>
|
||||
<Text style={styles.modalText}>
|
||||
Lautstaerken werden in Dezibel (dB) gemessen — negative Werte, je
|
||||
hoeher (naeher an 0), desto lauter.{'\n\n'}
|
||||
<Text style={{fontWeight: '700'}}>Standard:</Text> automatisch.
|
||||
Die App misst die ersten 500ms Hintergrundpegel und setzt die
|
||||
Stille-Schwelle auf Baseline + 6 dB. Funktioniert in den meisten
|
||||
Umgebungen.{'\n\n'}
|
||||
<Text style={{fontWeight: '700'}}>Manuell:</Text> Pegel unter dem
|
||||
eingestellten Wert gilt als "Stille" → Aufnahme stoppt.{'\n\n'}
|
||||
<Text style={{fontWeight: '700'}}>Faustregel:</Text>{'\n'}
|
||||
• <Text style={{color: '#FFD60A'}}>−45 dB</Text> sehr empfindlich (stoppt schnell, auch bei Atmen){'\n'}
|
||||
• <Text style={{color: '#34C759'}}>−38 dB</Text> ausgewogen (typische Bueroumgebung){'\n'}
|
||||
• <Text style={{color: '#FF6B6B'}}>−25 dB</Text> unempfindlich (laute Umgebung, nur klare Sprache zaehlt){'\n\n'}
|
||||
<Text style={{color: '#8888AA'}}>Niedrigere Zahl (z.B. −50) = sensibler.{'\n'}
|
||||
Hoehere Zahl (z.B. −20) = robuster gegen Hintergrundlaerm,
|
||||
braucht aber lautere Sprache.</Text>
|
||||
</Text>
|
||||
<TouchableOpacity
|
||||
style={[styles.connectButton, {marginTop: 16, alignSelf: 'stretch'}]}
|
||||
onPress={() => setShowVadInfo(false)}
|
||||
>
|
||||
<Text style={styles.connectButtonText}>OK</Text>
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
</View>
|
||||
</Modal>
|
||||
</>)}
|
||||
|
||||
{/* === Wake-Word (komplett on-device, openWakeWord) === */}
|
||||
{currentSection === 'wake_word' && (<>
|
||||
<Text style={styles.sectionTitle}>Wake-Word</Text>
|
||||
<View style={styles.card}>
|
||||
<Text style={styles.toggleHint}>
|
||||
Lokale Erkennung via openWakeWord (ONNX, on-device). Kein API-Key,
|
||||
kein Cloud-Roundtrip — Audio verlaesst das Geraet nicht. Wenn das Ohr
|
||||
aktiv ist, hoerst du normal mit; sagst du das Wake-Word, startet eine
|
||||
Konversation mit ARIA.
|
||||
</Text>
|
||||
|
||||
<Text style={[styles.toggleLabel, {marginTop: 16}]}>Wake-Word</Text>
|
||||
<Text style={styles.toggleHint}>
|
||||
Built-In: sofort verwendbar. "ARIA" als Custom-Keyword kommt spaeter
|
||||
ueber Diagnostic-Upload.
|
||||
Eigene Wake-Words via openWakeWord-Notebook trainierbar (gratis).
|
||||
Custom-Upload ueber Diagnostic kommt in einer spaeteren Version.
|
||||
</Text>
|
||||
<View style={{flexDirection: 'row', flexWrap: 'wrap', gap: 6, marginTop: 8}}>
|
||||
{BUILTIN_KEYWORDS.map(kw => (
|
||||
{WAKE_KEYWORDS.map(kw => (
|
||||
<TouchableOpacity
|
||||
key={kw}
|
||||
style={[
|
||||
@@ -728,7 +1089,7 @@ const SettingsScreen: React.FC = () => {
|
||||
styles.keywordChipText,
|
||||
wakeKeyword === kw && styles.keywordChipTextActive,
|
||||
]}>
|
||||
{kw}
|
||||
{KEYWORD_LABELS[kw]}
|
||||
</Text>
|
||||
</TouchableOpacity>
|
||||
))}
|
||||
@@ -740,8 +1101,8 @@ const SettingsScreen: React.FC = () => {
|
||||
onPress={async () => {
|
||||
setWakeStatus('Initialisiere...');
|
||||
try {
|
||||
const ok = await wakeWordService.configure(wakeAccessKey, wakeKeyword);
|
||||
setWakeStatus(ok ? `✅ "${wakeKeyword}" bereit` : '❌ Fehlgeschlagen — Access Key pruefen');
|
||||
const ok = await wakeWordService.configure(wakeKeyword);
|
||||
setWakeStatus(ok ? `✅ "${KEYWORD_LABELS[wakeKeyword as keyof typeof KEYWORD_LABELS]}" bereit` : '❌ Init-Fehler — Logs pruefen');
|
||||
} catch (err: any) {
|
||||
setWakeStatus('❌ ' + String(err?.message || err).slice(0, 80));
|
||||
}
|
||||
@@ -754,9 +1115,36 @@ const SettingsScreen: React.FC = () => {
|
||||
{!!wakeStatus && (
|
||||
<Text style={{marginTop: 8, fontSize: 12, color: '#8888AA'}}>{wakeStatus}</Text>
|
||||
)}
|
||||
|
||||
<View style={[styles.toggleRow, {marginTop: 20, borderTopWidth: 1, borderTopColor: '#1E1E2E', paddingTop: 16}]}>
|
||||
<View style={styles.toggleInfo}>
|
||||
<Text style={styles.toggleLabel}>Bereit-Sound abspielen</Text>
|
||||
<Text style={styles.toggleHint}>
|
||||
Kurzer Ding-Dong wenn das Mikro nach Wake-Word offen ist —
|
||||
akustische Bestaetigung dass du jetzt sprechen darfst.
|
||||
</Text>
|
||||
</View>
|
||||
<Switch
|
||||
value={wakeReadySound}
|
||||
onValueChange={async (val) => {
|
||||
setWakeReadySound(val);
|
||||
await setWakeReadySoundEnabled(val);
|
||||
if (val) {
|
||||
// Direkt eine Vorschau abspielen damit der User weiss wie's klingt.
|
||||
// playWakeReadySound checked das gerade gesetzte Flag — wenn val=true,
|
||||
// wird abgespielt; bei false bleibt es still.
|
||||
setTimeout(() => playWakeReadySound().catch(() => {}), 150);
|
||||
}
|
||||
}}
|
||||
trackColor={{ false: '#2A2A3E', true: '#0096FF' }}
|
||||
thumbColor={wakeReadySound ? '#FFFFFF' : '#666680'}
|
||||
/>
|
||||
</View>
|
||||
</View>
|
||||
</>)}
|
||||
|
||||
{/* === Sprachausgabe (geraetelokal) === */}
|
||||
{currentSection === 'voice_output' && (<>
|
||||
<Text style={styles.sectionTitle}>Sprachausgabe</Text>
|
||||
<View style={styles.card}>
|
||||
<View style={styles.toggleRow}>
|
||||
@@ -899,7 +1287,10 @@ const SettingsScreen: React.FC = () => {
|
||||
)}
|
||||
</View>
|
||||
|
||||
</>)}
|
||||
|
||||
{/* === Speicher === */}
|
||||
{currentSection === 'storage' && (<>
|
||||
<Text style={styles.sectionTitle}>Anhang-Speicher</Text>
|
||||
<View style={styles.card}>
|
||||
<View style={styles.toggleRow}>
|
||||
@@ -974,8 +1365,154 @@ const SettingsScreen: React.FC = () => {
|
||||
)}
|
||||
</View>
|
||||
|
||||
{/* === Update-Cache === */}
|
||||
<Text style={[styles.sectionTitle, {marginTop: 16}]}>Update-Cache</Text>
|
||||
<View style={styles.card}>
|
||||
<Text style={styles.toggleHint}>
|
||||
Heruntergeladene APK-Dateien fuer App-Updates. Werden automatisch
|
||||
beim App-Start und vor jedem neuen Download geloescht — der Button
|
||||
ist fuer den Notfall (z.B. wenn ein Download haengen geblieben ist).
|
||||
</Text>
|
||||
<Text style={[styles.storageSizeText, {marginTop: 8}]}>
|
||||
{apkCacheInfo === null ? '...' :
|
||||
apkCacheInfo.count === 0 ? 'leer' :
|
||||
`${apkCacheInfo.count} APK${apkCacheInfo.count === 1 ? '' : 's'} · ${apkCacheInfo.totalMB.toFixed(1)}MB`}
|
||||
</Text>
|
||||
<TouchableOpacity
|
||||
style={[styles.clearButton, {marginTop: 8, backgroundColor: 'rgba(255,59,48,0.15)'}]}
|
||||
onPress={async () => {
|
||||
const res = await updateService.cleanupOldApks();
|
||||
ToastAndroid.show(
|
||||
res.removed === 0
|
||||
? 'Update-Cache war schon leer'
|
||||
: `${res.removed} APK${res.removed === 1 ? '' : 's'} geloescht (${res.freedMB.toFixed(1)}MB frei)`,
|
||||
ToastAndroid.SHORT,
|
||||
);
|
||||
const info = await updateService.getApkCacheSize();
|
||||
setApkCacheInfo(info);
|
||||
}}
|
||||
>
|
||||
<Text style={[styles.clearButtonText, {color: '#FF3B30'}]}>Update-Cache leeren</Text>
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
|
||||
{/* === TTS-Cache === */}
|
||||
<Text style={[styles.sectionTitle, {marginTop: 16}]}>TTS-Cache</Text>
|
||||
<View style={styles.card}>
|
||||
<Text style={styles.toggleHint}>
|
||||
Gespeicherte Sprachausgaben (WAV pro Antwort) — werden fuer den
|
||||
Play-Button und Auto-Resume nach Anrufen genutzt. Loeschen
|
||||
unterbricht keine laufende Wiedergabe, alte Antworten lassen sich
|
||||
danach nur nicht mehr abspielen.
|
||||
</Text>
|
||||
<Text style={[styles.storageSizeText, {marginTop: 8}]}>
|
||||
{ttsCacheInfo === null ? '...' :
|
||||
ttsCacheInfo.count === 0 ? 'leer' :
|
||||
`${ttsCacheInfo.count} WAV${ttsCacheInfo.count === 1 ? '' : 's'} · ${ttsCacheInfo.totalMB.toFixed(1)}MB`}
|
||||
</Text>
|
||||
<TouchableOpacity
|
||||
style={[styles.clearButton, {marginTop: 8, backgroundColor: 'rgba(255,59,48,0.15)'}]}
|
||||
onPress={async () => {
|
||||
const res = await audioService.clearTtsCache();
|
||||
ToastAndroid.show(
|
||||
res.removed === 0
|
||||
? 'TTS-Cache war schon leer'
|
||||
: `${res.removed} WAV${res.removed === 1 ? '' : 's'} geloescht (${res.freedMB.toFixed(1)}MB frei)`,
|
||||
ToastAndroid.SHORT,
|
||||
);
|
||||
const info = await audioService.getTtsCacheSize();
|
||||
setTtsCacheInfo(info);
|
||||
}}
|
||||
>
|
||||
<Text style={[styles.clearButtonText, {color: '#FF3B30'}]}>TTS-Cache leeren</Text>
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
|
||||
{/* === Reparatur === */}
|
||||
<Text style={[styles.sectionTitle, {marginTop: 16}]}>Reparatur</Text>
|
||||
<View style={styles.card}>
|
||||
<Text style={styles.toggleHint}>
|
||||
Container gezielt neu starten — wenn die Voice-Bridge, das Gehirn
|
||||
oder die Vector-DB haengt. Restart dauert wenige Sekunden,
|
||||
laufende Anfragen gehen verloren.
|
||||
</Text>
|
||||
{[
|
||||
{ name: 'aria-bridge', label: '🚨 aria-bridge neu (Voice + RVS)' },
|
||||
{ name: 'aria-brain', label: '🚨 aria-brain neu (Agent + Memory)' },
|
||||
{ name: 'aria-qdrant', label: '🚨 aria-qdrant neu (Vector-DB)' },
|
||||
].map(c => (
|
||||
<TouchableOpacity
|
||||
key={c.name}
|
||||
style={[styles.clearButton, {marginTop: 8, backgroundColor: 'rgba(255,59,48,0.10)'}]}
|
||||
onPress={() => {
|
||||
Alert.alert(
|
||||
`${c.name} neu starten?`,
|
||||
'Restart in wenigen Sekunden. Laufende Anfragen gehen verloren.',
|
||||
[
|
||||
{ text: 'Abbrechen', style: 'cancel' },
|
||||
{ text: 'Neu starten', style: 'destructive', onPress: () => {
|
||||
rvs.send('container_restart' as any, { name: c.name });
|
||||
ToastAndroid.show(`${c.name} wird neu gestartet…`, ToastAndroid.LONG);
|
||||
}},
|
||||
],
|
||||
);
|
||||
}}
|
||||
>
|
||||
<Text style={[styles.clearButtonText, {color: '#FF3B30'}]}>{c.label}</Text>
|
||||
</TouchableOpacity>
|
||||
))}
|
||||
</View>
|
||||
|
||||
</>)}
|
||||
|
||||
{/* === Datei-Manager === */}
|
||||
{currentSection === 'files' && (<>
|
||||
<Text style={styles.sectionTitle}>Dateien</Text>
|
||||
<View style={styles.card}>
|
||||
<Text style={styles.toggleHint}>
|
||||
Alle Dateien aus <Text style={{fontFamily:'monospace'}}>/shared/uploads/</Text>
|
||||
— was ARIA generiert hat und was du hochgeladen hast.
|
||||
Beim Löschen wird die Bubble in App + Diagnostic als gelöscht markiert.
|
||||
</Text>
|
||||
<TouchableOpacity
|
||||
style={[styles.clearButton, {marginTop: 8, backgroundColor: 'rgba(0,150,255,0.15)'}]}
|
||||
onPress={() => {
|
||||
setFileManagerError('');
|
||||
setFileManagerLoading(true);
|
||||
setFileManagerOpen(true);
|
||||
rvs.send('file_list_request' as any, {});
|
||||
}}
|
||||
>
|
||||
<Text style={[styles.clearButtonText, {color: '#0096FF'}]}>{'📂 Datei-Manager öffnen'}</Text>
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
</>)}
|
||||
|
||||
{/* === Logs === */}
|
||||
{currentSection === 'protocol' && (<>
|
||||
<Text style={styles.sectionTitle}>Protokoll</Text>
|
||||
|
||||
{/* Verbose-Logging-Toggle */}
|
||||
<View style={styles.card}>
|
||||
<View style={styles.toggleRow}>
|
||||
<Text style={styles.toggleLabel}>Verbose Logging</Text>
|
||||
<Switch
|
||||
value={verboseLogging}
|
||||
onValueChange={(v) => {
|
||||
setVerboseLogging(v);
|
||||
setVerboseLoggingState(v);
|
||||
}}
|
||||
trackColor={{ false: '#3A3A52', true: '#0096FF' }}
|
||||
thumbColor={verboseLogging ? '#FFFFFF' : '#666680'}
|
||||
/>
|
||||
</View>
|
||||
<Text style={styles.toggleHint}>
|
||||
Wenn aus: console.log wird global stummgeschaltet (Speicher schonen).
|
||||
Warnungen und Fehler bleiben immer aktiv. Bei Bedarf einschalten zum
|
||||
Debuggen via adb logcat.
|
||||
</Text>
|
||||
</View>
|
||||
|
||||
<View style={styles.card}>
|
||||
{/* Tab-Umschalter */}
|
||||
<View style={styles.tabRow}>
|
||||
@@ -1053,14 +1590,17 @@ const SettingsScreen: React.FC = () => {
|
||||
<Text style={styles.clearButtonText}>Protokoll l\u00F6schen</Text>
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
</>)}
|
||||
|
||||
{/* === About === */}
|
||||
{currentSection === 'about' && (<>
|
||||
<Text style={styles.sectionTitle}>{'\u00DC'}ber</Text>
|
||||
<View style={styles.card}>
|
||||
<Text style={styles.aboutTitle}>ARIA Cockpit</Text>
|
||||
<Text style={styles.aboutVersion}>Version {require('../../package.json').version}</Text>
|
||||
<Text style={styles.aboutInfo}>
|
||||
Stefans Kommandozentrale f{'\u00FC'}r ARIA.{'\n'}
|
||||
ARIA \u2014 Autonomous Reasoning & Intelligence Assistant.{'\n'}
|
||||
Stefans Kommandozentrale.{'\n'}
|
||||
Gebaut mit React Native + TypeScript.
|
||||
</Text>
|
||||
<TouchableOpacity
|
||||
@@ -1074,6 +1614,7 @@ const SettingsScreen: React.FC = () => {
|
||||
<Text style={styles.connectButtonText}>Auf Updates pr{'\u00FC'}fen</Text>
|
||||
</TouchableOpacity>
|
||||
</View>
|
||||
</>)}
|
||||
|
||||
{/* Platz am Ende */}
|
||||
<View style={styles.bottomSpacer} />
|
||||
@@ -1102,6 +1643,58 @@ const styles = StyleSheet.create({
|
||||
marginBottom: 8,
|
||||
marginLeft: 4,
|
||||
},
|
||||
menuItem: {
|
||||
flexDirection: 'row',
|
||||
alignItems: 'center',
|
||||
backgroundColor: '#1E1E2E',
|
||||
borderRadius: 10,
|
||||
paddingVertical: 14,
|
||||
paddingHorizontal: 14,
|
||||
marginBottom: 8,
|
||||
},
|
||||
menuItemIcon: {
|
||||
fontSize: 22,
|
||||
marginRight: 14,
|
||||
width: 28,
|
||||
textAlign: 'center',
|
||||
},
|
||||
menuItemTextWrap: {
|
||||
flex: 1,
|
||||
},
|
||||
menuItemLabel: {
|
||||
color: '#FFFFFF',
|
||||
fontSize: 16,
|
||||
fontWeight: '600',
|
||||
},
|
||||
menuItemDesc: {
|
||||
color: '#8888AA',
|
||||
fontSize: 12,
|
||||
marginTop: 2,
|
||||
},
|
||||
menuItemChevron: {
|
||||
color: '#8888AA',
|
||||
fontSize: 24,
|
||||
fontWeight: '300',
|
||||
marginLeft: 8,
|
||||
},
|
||||
subScreenHeader: {
|
||||
flexDirection: 'row',
|
||||
alignItems: 'center',
|
||||
paddingVertical: 8,
|
||||
marginBottom: 8,
|
||||
},
|
||||
subScreenBack: {
|
||||
color: '#0096FF',
|
||||
fontSize: 32,
|
||||
fontWeight: '300',
|
||||
marginRight: 12,
|
||||
lineHeight: 36,
|
||||
},
|
||||
subScreenTitle: {
|
||||
color: '#FFFFFF',
|
||||
fontSize: 20,
|
||||
fontWeight: '700',
|
||||
},
|
||||
card: {
|
||||
backgroundColor: '#12122A',
|
||||
borderRadius: 14,
|
||||
@@ -1459,6 +2052,48 @@ const styles = StyleSheet.create({
|
||||
textAlign: 'center',
|
||||
},
|
||||
|
||||
infoBtn: {
|
||||
width: 22,
|
||||
height: 22,
|
||||
borderRadius: 11,
|
||||
borderWidth: 1.5,
|
||||
borderColor: '#0096FF',
|
||||
alignItems: 'center',
|
||||
justifyContent: 'center',
|
||||
},
|
||||
infoBtnText: {
|
||||
color: '#0096FF',
|
||||
fontSize: 13,
|
||||
fontWeight: '700',
|
||||
fontStyle: 'italic',
|
||||
lineHeight: 16,
|
||||
},
|
||||
modalOverlay: {
|
||||
flex: 1,
|
||||
backgroundColor: 'rgba(0,0,0,0.7)',
|
||||
justifyContent: 'center',
|
||||
alignItems: 'center',
|
||||
padding: 20,
|
||||
},
|
||||
modalCard: {
|
||||
backgroundColor: '#1E1E2E',
|
||||
borderRadius: 14,
|
||||
padding: 20,
|
||||
maxWidth: 460,
|
||||
width: '100%',
|
||||
},
|
||||
modalTitle: {
|
||||
color: '#FFFFFF',
|
||||
fontSize: 18,
|
||||
fontWeight: '700',
|
||||
marginBottom: 12,
|
||||
},
|
||||
modalText: {
|
||||
color: '#E0E0F0',
|
||||
fontSize: 14,
|
||||
lineHeight: 20,
|
||||
},
|
||||
|
||||
keywordChip: {
|
||||
backgroundColor: '#1E1E2E',
|
||||
borderWidth: 1,
|
||||
|
||||
+555
-34
@@ -6,10 +6,11 @@
|
||||
* Nutzt react-native-audio-recorder-player fuer Aufnahme.
|
||||
*/
|
||||
|
||||
import { Platform, PermissionsAndroid, NativeModules } from 'react-native';
|
||||
import { Platform, PermissionsAndroid, NativeModules, ToastAndroid, NativeEventEmitter } from 'react-native';
|
||||
import Sound from 'react-native-sound';
|
||||
import RNFS from 'react-native-fs';
|
||||
import AsyncStorage from '@react-native-async-storage/async-storage';
|
||||
import { acquireBackgroundAudio, releaseBackgroundAudio, stopBackgroundAudio } from './backgroundAudio';
|
||||
import AudioRecorderPlayer, {
|
||||
AudioEncoderAndroidType,
|
||||
AudioSourceAndroidType,
|
||||
@@ -40,6 +41,8 @@ const { AudioFocus, PcmStreamPlayer } = NativeModules as {
|
||||
requestDuck: () => Promise<boolean>;
|
||||
requestExclusive: () => Promise<boolean>;
|
||||
release: () => Promise<boolean>;
|
||||
kickReleaseMedia: () => Promise<boolean>;
|
||||
getMode?: () => Promise<number>;
|
||||
};
|
||||
PcmStreamPlayer?: {
|
||||
start: (sampleRate: number, channels: number, prerollSeconds: number) => Promise<boolean>;
|
||||
@@ -72,11 +75,41 @@ const AUDIO_SAMPLE_RATE = 16000;
|
||||
const AUDIO_CHANNELS = 1;
|
||||
const AUDIO_ENCODING = 'audio/wav';
|
||||
|
||||
// VAD (Voice Activity Detection) — Stille-Erkennung
|
||||
const VAD_SILENCE_THRESHOLD_DB = -45; // dB unter dem als "Stille" gilt
|
||||
const VAD_SPEECH_THRESHOLD_DB = -28; // dB ueber dem als "Sprache" gilt (Sprach-Gate) — hoeher = weniger Umgebungsgeraeusche
|
||||
// VAD (Voice Activity Detection) — Stille-Erkennung.
|
||||
// Fallback-Werte falls die adaptive Baseline-Messung fehlschlaegt (z.B. weil
|
||||
// das Mikro keine metering-Updates liefert). Adaptive Werte werden zur
|
||||
// Laufzeit aus den ersten BASELINE_SAMPLES gemessen und auf baseline+offset
|
||||
// gesetzt — funktioniert in lauten wie leisen Umgebungen.
|
||||
const VAD_SILENCE_FALLBACK_DB = -38; // Fallback Stille-Schwelle
|
||||
const VAD_SPEECH_FALLBACK_DB = -22; // Fallback Sprach-Schwelle
|
||||
const VAD_SILENCE_OFFSET_DB = 6; // Sprache = Baseline + 6dB
|
||||
const VAD_SPEECH_OFFSET_DB = 12; // sicheres Speech = Baseline + 12dB
|
||||
const VAD_BASELINE_SAMPLES = 5; // 5 × 100ms = 500ms Baseline
|
||||
const VAD_SPEECH_MIN_MS = 500; // ms Sprache bevor Aufnahme zaehlt — laenger = keine Huestler/Klopfer mehr
|
||||
|
||||
// Override fuer die Stille-Schwelle — wenn gesetzt, wird die adaptive Baseline
|
||||
// ignoriert. Nuetzlich wenn die adaptive Logik in spezifischen Umgebungen
|
||||
// nicht zuverlaessig greift. Range -55..-15 dB. Speech-Schwelle wird auf
|
||||
// override+10 dB gesetzt (Speech muss klar lauter als Stille sein).
|
||||
export const VAD_SILENCE_DB_DEFAULT = -38; // wenn User Manuell-Modus waehlt
|
||||
export const VAD_SILENCE_DB_MIN = -85; // extrem empfindlich, praktisch alles gilt als Sprache
|
||||
export const VAD_SILENCE_DB_MAX = -15; // sehr unempfindlich, nur lautes Reden gilt
|
||||
export const VAD_SILENCE_DB_OVERRIDE_KEY = 'aria_vad_silence_db_override';
|
||||
|
||||
/** Liefert den manuellen Override-Wert oder null wenn "automatisch". */
|
||||
export async function loadVadSilenceDbOverride(): Promise<number | null> {
|
||||
try {
|
||||
const raw = await AsyncStorage.getItem(VAD_SILENCE_DB_OVERRIDE_KEY);
|
||||
if (raw == null || raw === '') return null;
|
||||
const n = parseFloat(raw);
|
||||
if (!isFinite(n)) return null;
|
||||
if (n < VAD_SILENCE_DB_MIN || n > VAD_SILENCE_DB_MAX) return null;
|
||||
return n;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// VAD-Stille (in Sekunden) — wie lange Sprechpause toleriert wird, bevor
|
||||
// die Aufnahme automatisch beendet wird. Einstellbar in den App-Settings.
|
||||
export const VAD_SILENCE_DEFAULT_SEC = 2.8;
|
||||
@@ -138,7 +171,24 @@ async function loadVadSilenceMs(): Promise<number> {
|
||||
|
||||
// Max-Dauer einer Aufnahme (Notbremse gegen Runaway-Loops). Auf 2 Minuten
|
||||
// hochgezogen damit auch laengere Erklaerungen durchgehen.
|
||||
const MAX_RECORDING_MS = 120000;
|
||||
// Default 5 Minuten — konfigurierbar in den App-Settings (1-30 Minuten).
|
||||
export const MAX_RECORDING_DEFAULT_SEC = 300;
|
||||
export const MAX_RECORDING_MIN_SEC = 60;
|
||||
export const MAX_RECORDING_MAX_SEC = 1800;
|
||||
export const MAX_RECORDING_STORAGE_KEY = 'aria_max_recording_sec';
|
||||
|
||||
export async function loadMaxRecordingMs(): Promise<number> {
|
||||
try {
|
||||
const raw = await AsyncStorage.getItem(MAX_RECORDING_STORAGE_KEY);
|
||||
if (raw != null) {
|
||||
const n = parseFloat(raw);
|
||||
if (isFinite(n) && n >= MAX_RECORDING_MIN_SEC && n <= MAX_RECORDING_MAX_SEC) {
|
||||
return Math.round(n * 1000);
|
||||
}
|
||||
}
|
||||
} catch {}
|
||||
return MAX_RECORDING_DEFAULT_SEC * 1000;
|
||||
}
|
||||
|
||||
// Pre-Roll: Wie lange Audio im AudioTrack-Buffer liegt bevor play() startet.
|
||||
// Einstellbar via Diagnostic/Settings (Key: aria_tts_preroll_sec).
|
||||
@@ -198,6 +248,12 @@ class AudioService {
|
||||
private focusReleaseTimer: ReturnType<typeof setTimeout> | null = null;
|
||||
private readonly FOCUS_RELEASE_DELAY_MS = 800;
|
||||
|
||||
// Conversation-Mode: solange aktiv (Wake-Word Status 'conversing' ODER
|
||||
// wir wissen "ARIA spricht gerade in einem Multi-Turn-Dialog"), halten wir
|
||||
// den AudioFocus DAUERHAFT. Der per-Stream-Release wird unterdrueckt,
|
||||
// damit Spotify nicht in Render-Pausen oder zwischen Antworten zurueckkehrt.
|
||||
private _conversationFocusActive: boolean = false;
|
||||
|
||||
// VAD State
|
||||
private vadEnabled: boolean = false;
|
||||
private lastSpeechTime: number = 0;
|
||||
@@ -206,19 +262,75 @@ class AudioService {
|
||||
// Latch damit der Silence-Callback pro Aufnahme genau einmal feuert
|
||||
private silenceFired: boolean = false;
|
||||
private noSpeechTimer: ReturnType<typeof setTimeout> | null = null;
|
||||
// Adaptive Schwellen — werden in den ersten 500ms aus dem Mikro-Pegel
|
||||
// gemessen. baseline = avg dB der ersten 5 Samples, dann:
|
||||
// silence = baseline + VAD_SILENCE_OFFSET_DB (6dB ueber ambient)
|
||||
// speech = baseline + VAD_SPEECH_OFFSET_DB (12dB ueber ambient = klares Reden)
|
||||
// Funktioniert sowohl im stillen Buero als auch im lauten Cafe.
|
||||
private vadBaselineSamples: number[] = [];
|
||||
private vadAdaptiveSilenceDb: number = VAD_SILENCE_FALLBACK_DB;
|
||||
private vadAdaptiveSpeechDb: number = VAD_SPEECH_FALLBACK_DB;
|
||||
|
||||
// Interruption-Tracking fuer Auto-Resume nach Anruf:
|
||||
// - playbackStartTime: ms-Timestamp wenn AudioTrack tatsaechlich anfing
|
||||
// abzuspielen (= _firePlaybackStarted)
|
||||
// - currentPlaybackMsgId: welche Antwort lief gerade
|
||||
// - pausedPosition / pausedMessageId: bei captureInterruption gemerkt
|
||||
private playbackStartTime: number = 0;
|
||||
private currentPlaybackMsgId: string = '';
|
||||
private pausedPosition: number = 0; // Sekunden in der Audio-Datei
|
||||
private pausedMessageId: string = '';
|
||||
private resumeSound: Sound | null = null; // halten damit GC nicht zuschlaegt
|
||||
// Leading-Silence wird im Native vor den Chunks geschrieben — beim
|
||||
// Position-Berechnen vom playbackStarted abziehen
|
||||
private readonly LEADING_SILENCE_SEC = 0.3;
|
||||
|
||||
constructor() {
|
||||
this.recorder = new AudioRecorderPlayer();
|
||||
this.recorder.setSubscriptionDuration(0.1); // 100ms Metering-Updates
|
||||
// Native Event: AudioTrack hat alle Samples wirklich durchgespielt (nach
|
||||
// dem finally{}-Block im Writer-Thread). ERST jetzt darf AudioFocus
|
||||
// freigegeben werden — sonst spielt Spotify schon waehrend ARIA noch
|
||||
// redet (PcmStreamPlayer.end() returnt mit 15s-Cap viel zu frueh).
|
||||
if (PcmStreamPlayer) {
|
||||
try {
|
||||
const emitter = new NativeEventEmitter(NativeModules.PcmStreamPlayer as any);
|
||||
emitter.addListener('PcmPlaybackFinished', () => {
|
||||
console.log('[Audio] PcmPlaybackFinished — Focus jetzt freigeben');
|
||||
this._releaseFocusDeferred();
|
||||
});
|
||||
} catch (err) {
|
||||
console.warn('[Audio] PcmPlaybackFinished-Subscription fehlgeschlagen:', err);
|
||||
}
|
||||
}
|
||||
// App-Start: orphaned aria_tts_*.wav / aria_recording_*.mp4 aus dem Cache
|
||||
// wegraeumen. Sammeln sich an wenn Sound mid-playback gestoppt wird (Anruf,
|
||||
// Mute, Barge-In) — der completion-callback feuert dann nicht und die Datei
|
||||
// bleibt liegen. 5min-Threshold damit gerade aktiv geschriebene Files sicher
|
||||
// sind. cleanupOnStartup ist async, blockt den Constructor nicht.
|
||||
this._cleanupStaleCacheFiles(5 * 60 * 1000).catch(() => {});
|
||||
}
|
||||
|
||||
/** AudioFocus mit kleiner Verzoegerung freigeben — Spotify/YouTube
|
||||
* springen sonst im Gap zwischen zwei TTS-Streams (oder wenn ARIA
|
||||
* eine zweite Antwort direkt hinterherschickt) kurz wieder an. */
|
||||
* eine zweite Antwort direkt hinterherschickt) kurz wieder an.
|
||||
* Im Conversation-Mode (Wake-Word conversing) wird das Release komplett
|
||||
* unterdrueckt — der Focus bleibt fuer die ganze Konversation gehalten. */
|
||||
private _releaseFocusDeferred(): void {
|
||||
if (this._conversationFocusActive) {
|
||||
console.log('[Audio] _releaseFocusDeferred: Conversation aktiv → kein Release');
|
||||
this._cancelDeferredFocusRelease();
|
||||
return;
|
||||
}
|
||||
this._cancelDeferredFocusRelease();
|
||||
console.log('[Audio] _releaseFocusDeferred: in %dms', this.FOCUS_RELEASE_DELAY_MS);
|
||||
this.focusReleaseTimer = setTimeout(() => {
|
||||
this.focusReleaseTimer = null;
|
||||
if (this._conversationFocusActive) {
|
||||
console.log('[Audio] Focus-Release abgebrochen (Conversation jetzt aktiv)');
|
||||
return;
|
||||
}
|
||||
console.log('[Audio] AudioFocus jetzt released');
|
||||
AudioFocus?.release().catch(() => {});
|
||||
}, this.FOCUS_RELEASE_DELAY_MS);
|
||||
}
|
||||
@@ -230,6 +342,182 @@ class AudioService {
|
||||
}
|
||||
}
|
||||
|
||||
/** Conversation-Mode beginnt → AudioFocus dauerhaft halten (Spotify bleibt
|
||||
* pausiert). Idempotent: mehrfaches Aufrufen ist sicher. */
|
||||
acquireConversationFocus(): void {
|
||||
if (this._conversationFocusActive) return;
|
||||
this._conversationFocusActive = true;
|
||||
this._cancelDeferredFocusRelease();
|
||||
console.log('[Audio] Conversation-Focus aktiv (Spotify bleibt gepaust)');
|
||||
AudioFocus?.requestDuck().catch(() => {});
|
||||
}
|
||||
|
||||
/** Conversation-Mode endet → Focus darf wieder freigegeben werden
|
||||
* (verzoegert, damit eine direkt folgende Antwort nichts kaputtmacht). */
|
||||
releaseConversationFocus(): void {
|
||||
if (!this._conversationFocusActive) return;
|
||||
this._conversationFocusActive = false;
|
||||
console.log('[Audio] Conversation-Focus inaktiv');
|
||||
this._releaseFocusDeferred();
|
||||
}
|
||||
|
||||
/** TTS-Wiedergabe haart stoppen — z.B. fuer Barge-In. Buffer wird geleert,
|
||||
* kein Auto-Resume. Released auch sofort den AudioFocus. */
|
||||
haltAllPlayback(reason: string = ''): void {
|
||||
console.log('[Audio] haltAllPlayback: %s', reason || '(no reason)');
|
||||
this._conversationFocusActive = false;
|
||||
this.stopPlayback();
|
||||
}
|
||||
|
||||
/** Speziell fuer Anrufe: AudioTrack stoppen + Focus releasen, ABER pcm-
|
||||
* Buffer + messageId behalten damit weitere Chunks der unterbrochenen
|
||||
* Antwort weiter gesammelt werden. isFinal schreibt dann die WAV trotz
|
||||
* Anruf — und resumeFromInterruption findet sie. */
|
||||
pauseForCall(reason: string = ''): void {
|
||||
console.log('[Audio] pauseForCall: %s', reason || '(no reason)');
|
||||
this._conversationFocusActive = false;
|
||||
this._pausedForCall = true;
|
||||
// Queue + isPlaying ruecksetzen — sonst klemmt der naechste Play-Button
|
||||
// (playAudio sieht isPlaying=true und ruft _playNext nicht mehr auf).
|
||||
this.audioQueue = [];
|
||||
this.isPlaying = false;
|
||||
// Foreground-Service stoppen — Notification waere sonst irrefuehrend
|
||||
stopBackgroundAudio().catch(() => {});
|
||||
// SoundPool/RNSound (Resume-Sound, Play-Button) stoppen — nicht relevant fuer Auto-Resume
|
||||
if (this.currentSound) {
|
||||
try { this.currentSound.stop(); this.currentSound.release(); } catch {}
|
||||
this.currentSound = null;
|
||||
}
|
||||
if (this.resumeSound) {
|
||||
try { this.resumeSound.stop(); this.resumeSound.release(); } catch {}
|
||||
this.resumeSound = null;
|
||||
}
|
||||
// AudioTrack hart stoppen damit nichts mehr aus dem Lautsprecher kommt.
|
||||
// pcmStreamActive bleibt true, pcmBuffer/pcmMessageId BLEIBEN — damit
|
||||
// weitere Chunks gesammelt werden und isFinal die WAV schreiben kann.
|
||||
PcmStreamPlayer?.stop().catch(() => {});
|
||||
this._cancelDeferredFocusRelease();
|
||||
AudioFocus?.release().catch(() => {});
|
||||
}
|
||||
|
||||
/** Anruf vorbei → weitere Chunks duerfen wieder abgespielt werden.
|
||||
* resumeFromInterruption uebernimmt die Wiedergabe ab gemerkter Position. */
|
||||
endCallPause(): void {
|
||||
if (!this._pausedForCall) return;
|
||||
this._pausedForCall = false;
|
||||
console.log('[Audio] endCallPause');
|
||||
}
|
||||
|
||||
/** Bei Anruf: aktuelle Wiedergabe-Position merken damit wir nach dem
|
||||
* Auflegen von dort weitermachen koennen. Returnt Position in Sekunden
|
||||
* oder 0 wenn nichts spielte.
|
||||
*
|
||||
* Idempotent: bei mehrfachem Aufruf (ringing → offhook) wird die Position
|
||||
* vom ersten Mal NICHT ueberschrieben. playbackStartTime laeuft stumpf
|
||||
* weiter obwohl das Audio gestoppt ist — der erste Halt ist der echte. */
|
||||
captureInterruption(): number {
|
||||
if (this.pausedMessageId) {
|
||||
console.log('[Audio] captureInterruption: bereits erfasst (msgId=%s pos=%ss) — skip',
|
||||
this.pausedMessageId, this.pausedPosition.toFixed(2));
|
||||
return this.pausedPosition;
|
||||
}
|
||||
if (!this.playbackStartTime || !this.currentPlaybackMsgId) {
|
||||
console.log('[Audio] captureInterruption: nichts spielte (startTime=%s, msgId=%s)',
|
||||
this.playbackStartTime, this.currentPlaybackMsgId || '(leer)');
|
||||
this.pausedPosition = 0;
|
||||
this.pausedMessageId = '';
|
||||
return 0;
|
||||
}
|
||||
const elapsedMs = Date.now() - this.playbackStartTime;
|
||||
const positionSec = Math.max(0, elapsedMs / 1000 - this.LEADING_SILENCE_SEC);
|
||||
this.pausedPosition = positionSec;
|
||||
this.pausedMessageId = this.currentPlaybackMsgId;
|
||||
console.log('[Audio] captureInterruption: msgId=%s pos=%ss',
|
||||
this.pausedMessageId, positionSec.toFixed(2));
|
||||
return positionSec;
|
||||
}
|
||||
|
||||
/** Nach Anruf-Ende: ab gemerkter Position weiterspielen. Wenn Cache noch
|
||||
* nicht geschrieben (final kam waehrend Anruf vielleicht doch nicht),
|
||||
* warten bis maxWaitMs und dann probieren. Returnt true wenn gestartet. */
|
||||
async resumeFromInterruption(maxWaitMs: number = 30000): Promise<boolean> {
|
||||
const msgId = this.pausedMessageId;
|
||||
const position = this.pausedPosition;
|
||||
if (!msgId) {
|
||||
console.log('[Audio] resumeFromInterruption: kein gemerkter Stand — skip');
|
||||
return false;
|
||||
}
|
||||
console.log('[Audio] resumeFromInterruption: starte fuer msgId=%s pos=%ss',
|
||||
msgId, position.toFixed(2));
|
||||
this.pausedMessageId = ''; // konsumieren
|
||||
const cachePath = `${RNFS.DocumentDirectoryPath}/tts_cache/${msgId}.wav`;
|
||||
const startTime = Date.now();
|
||||
while (Date.now() - startTime < maxWaitMs) {
|
||||
try {
|
||||
if (await RNFS.exists(cachePath)) {
|
||||
return await this._playFromPathAtPosition(cachePath, position);
|
||||
}
|
||||
} catch {}
|
||||
await new Promise(r => setTimeout(r, 500));
|
||||
}
|
||||
console.warn('[Audio] resumeFromInterruption: WAV %s nicht binnen %dms verfuegbar',
|
||||
msgId, maxWaitMs);
|
||||
return false;
|
||||
}
|
||||
|
||||
private async _playFromPathAtPosition(path: string, positionSec: number): Promise<boolean> {
|
||||
try {
|
||||
// Bestehende laufende Wiedergabe abbrechen damit wir sauber starten
|
||||
if (this.resumeSound) {
|
||||
try { this.resumeSound.stop(); this.resumeSound.release(); } catch {}
|
||||
this.resumeSound = null;
|
||||
}
|
||||
const sound = await new Promise<Sound>((resolve, reject) => {
|
||||
const s = new Sound(path.replace(/^file:\/\//, ''), '', (err) =>
|
||||
err ? reject(err) : resolve(s));
|
||||
});
|
||||
// Audio-Focus anfordern damit Spotify pausiert
|
||||
this._cancelDeferredFocusRelease();
|
||||
AudioFocus?.requestDuck().catch(() => {});
|
||||
this._firePlaybackStarted();
|
||||
this.isPlaying = true;
|
||||
this.resumeSound = sound;
|
||||
// Tracking auch fuer den Resume-Sound aktualisieren — sonst kann
|
||||
// captureInterruption bei einem zweiten Anruf die Position nicht
|
||||
// mehr ermitteln (playbackStartTime waere von der ersten Wiedergabe).
|
||||
const msgIdMatch = path.match(/([^/\\]+)\.wav$/i);
|
||||
if (msgIdMatch) this.currentPlaybackMsgId = msgIdMatch[1];
|
||||
// Virtuelle Start-Zeit so setzen, dass captureInterruption (das den
|
||||
// Leading-Silence-Offset wieder abzieht) die korrekte Position liefert.
|
||||
this.playbackStartTime = Date.now() - (positionSec + this.LEADING_SILENCE_SEC) * 1000;
|
||||
console.log('[Audio] Resume von Position %ss aus %s',
|
||||
positionSec.toFixed(2), path);
|
||||
sound.setCurrentTime(Math.max(0, positionSec));
|
||||
sound.play((success) => {
|
||||
if (!success) console.warn('[Audio] Resume-Wiedergabe fehlgeschlagen');
|
||||
try { sound.release(); } catch {}
|
||||
if (this.resumeSound === sound) this.resumeSound = null;
|
||||
this.isPlaying = false;
|
||||
this.playbackFinishedListeners.forEach(cb => {
|
||||
try { cb(); } catch (e) { console.warn('[Audio] cb err:', e); }
|
||||
});
|
||||
this._releaseFocusDeferred();
|
||||
});
|
||||
return true;
|
||||
} catch (err: any) {
|
||||
console.warn('[Audio] _playFromPathAtPosition fehlgeschlagen:', err?.message || err);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/** True wenn ARIA gerade was abspielt — egal ob WAV-Queue oder PCM-Stream.
|
||||
* Nuetzlich fuer "Barge-In": wenn der User spricht waehrend ARIA spricht,
|
||||
* soll die ARIA-Wiedergabe abgebrochen + die neue User-Message verarbeitet
|
||||
* werden ("ach vergiss es, mach lieber X"). */
|
||||
isPlayingAudio(): boolean {
|
||||
return this.isPlaying || this.pcmStreamActive;
|
||||
}
|
||||
|
||||
// --- Berechtigungen ---
|
||||
|
||||
async requestMicrophonePermission(): Promise<boolean> {
|
||||
@@ -287,6 +575,12 @@ class AudioService {
|
||||
|
||||
this.recordingPath = `${RNFS.CachesDirectoryPath}/aria_recording_${Date.now()}.mp4`;
|
||||
|
||||
// Foreground-Service VOR dem AudioRecord starten — sonst blockt Android
|
||||
// den Background-Mic-Zugriff (foregroundServiceType=microphone muss zum
|
||||
// Zeitpunkt des startRecorder() schon aktiv sein, sonst greifen die
|
||||
// Background-Mic-Restrictions ab Android 11+).
|
||||
await acquireBackgroundAudio('rec');
|
||||
|
||||
// Aufnahme mit Metering starten
|
||||
await this.recorder.startRecorder(this.recordingPath, {
|
||||
AudioEncoderAndroid: AudioEncoderAndroidType.AAC,
|
||||
@@ -301,8 +595,36 @@ class AudioService {
|
||||
const db = e.currentMetering ?? -160;
|
||||
this.meterListeners.forEach(cb => cb(db));
|
||||
|
||||
// Adaptive Baseline: erste 5 Samples (~500ms) sammeln, dann Schwellen
|
||||
// anpassen. -160 (kein Metering) ignorieren — sonst wird die Baseline
|
||||
// sinnlos niedrig.
|
||||
if (this.vadBaselineSamples.length < VAD_BASELINE_SAMPLES) {
|
||||
if (db > -100) {
|
||||
this.vadBaselineSamples.push(db);
|
||||
if (this.vadBaselineSamples.length === VAD_BASELINE_SAMPLES) {
|
||||
// Minimum statt Mittelwert: robust gegen Spike-Samples (z.B. wenn
|
||||
// der User direkt nach Wake-Word sofort spricht oder das Wake-Word-
|
||||
// Echo noch im Mikro ist). Min ist der ruhigste Moment.
|
||||
const lowest = Math.min(...this.vadBaselineSamples);
|
||||
const rawSilence = lowest + VAD_SILENCE_OFFSET_DB;
|
||||
const rawSpeech = lowest + VAD_SPEECH_OFFSET_DB;
|
||||
// Cap auf einen vernuenftigen Bereich:
|
||||
// - Silence-Schwelle nicht ueber -28dB (sonst zaehlt Hintergrund-
|
||||
// geraeusch dauerhaft als "Sprache" → VAD feuert nie)
|
||||
// - Silence-Schwelle nicht unter -50dB (sonst zu strikt)
|
||||
this.vadAdaptiveSilenceDb = Math.max(-50, Math.min(rawSilence, -28));
|
||||
this.vadAdaptiveSpeechDb = Math.max(-40, Math.min(rawSpeech, -18));
|
||||
const msg = `VAD: ambient=${lowest.toFixed(0)}dB stille>${this.vadAdaptiveSilenceDb.toFixed(0)}dB`;
|
||||
console.log('[Audio] %s speech>%s (raw silence=%s speech=%s)',
|
||||
msg, this.vadAdaptiveSpeechDb.toFixed(1),
|
||||
rawSilence.toFixed(1), rawSpeech.toFixed(1));
|
||||
try { ToastAndroid.show(msg, ToastAndroid.SHORT); } catch {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sprach-Gate: Erkennen ob tatsaechlich gesprochen wird
|
||||
if (db > VAD_SPEECH_THRESHOLD_DB) {
|
||||
if (db > this.vadAdaptiveSpeechDb) {
|
||||
if (!this.speechDetected && this.speechStartTime === 0) {
|
||||
this.speechStartTime = Date.now();
|
||||
}
|
||||
@@ -317,7 +639,7 @@ class AudioService {
|
||||
|
||||
// VAD: Stille erkennen (nur wenn Sprache erkannt wurde)
|
||||
if (this.vadEnabled) {
|
||||
if (db > VAD_SILENCE_THRESHOLD_DB) {
|
||||
if (db > this.vadAdaptiveSilenceDb) {
|
||||
this.lastSpeechTime = Date.now();
|
||||
}
|
||||
}
|
||||
@@ -327,6 +649,23 @@ class AudioService {
|
||||
this.lastSpeechTime = Date.now();
|
||||
this.speechDetected = false;
|
||||
this.speechStartTime = 0;
|
||||
// VAD-Adaptive zurueckgesetzt: Baseline wird in den ersten 500ms neu
|
||||
// gemessen. Bis dahin gelten die Fallback-Schwellen.
|
||||
this.vadBaselineSamples = [];
|
||||
this.vadAdaptiveSilenceDb = VAD_SILENCE_FALLBACK_DB;
|
||||
this.vadAdaptiveSpeechDb = VAD_SPEECH_FALLBACK_DB;
|
||||
|
||||
// Manueller Override aus Settings — wenn gesetzt, wird die adaptive
|
||||
// Baseline-Messung uebersteuert. User-Wahl gewinnt vor Auto-Magic.
|
||||
const dbOverride = await loadVadSilenceDbOverride();
|
||||
if (dbOverride != null) {
|
||||
this.vadAdaptiveSilenceDb = dbOverride;
|
||||
this.vadAdaptiveSpeechDb = dbOverride + 10; // Speech klar ueber Stille
|
||||
this.vadBaselineSamples = new Array(VAD_BASELINE_SAMPLES).fill(0); // Baseline-Sammeln deaktivieren
|
||||
const msg = `VAD: manuell stille>${dbOverride}dB`;
|
||||
console.log('[Audio] %s', msg);
|
||||
try { ToastAndroid.show(msg, ToastAndroid.SHORT); } catch {}
|
||||
}
|
||||
this.setState('recording');
|
||||
|
||||
// Andere Apps waehrend der Aufnahme pausieren (Musik, Videos etc.)
|
||||
@@ -354,18 +693,19 @@ class AudioService {
|
||||
};
|
||||
if (autoStop) {
|
||||
const vadSilenceMs = await loadVadSilenceMs();
|
||||
const maxRecordingMs = await loadMaxRecordingMs();
|
||||
console.log('[Audio] startRecording: autoStop=true, VAD-Stille=%dms, MAX=%dms',
|
||||
vadSilenceMs, MAX_RECORDING_MS);
|
||||
vadSilenceMs, maxRecordingMs);
|
||||
this.vadTimer = setInterval(() => {
|
||||
const silenceDuration = Date.now() - this.lastSpeechTime;
|
||||
if (silenceDuration >= vadSilenceMs) {
|
||||
fireSilenceOnce(`VAD ${silenceDuration}ms Stille (Schwelle=${vadSilenceMs}ms)`);
|
||||
}
|
||||
}, 200);
|
||||
// Notbremse: Nach MAX_RECORDING_MS zwangsweise stoppen
|
||||
// Notbremse: Nach maxRecordingMs zwangsweise stoppen
|
||||
this.maxDurationTimer = setTimeout(() => {
|
||||
fireSilenceOnce(`Max-Dauer ${MAX_RECORDING_MS}ms`);
|
||||
}, MAX_RECORDING_MS);
|
||||
fireSilenceOnce(`Max-Dauer ${maxRecordingMs}ms`);
|
||||
}, maxRecordingMs);
|
||||
}
|
||||
|
||||
// Conversation-Window: Wenn der User innerhalb noSpeechTimeoutMs nicht
|
||||
@@ -454,8 +794,15 @@ class AudioService {
|
||||
/** Base64-kodiertes Audio in die Queue stellen und abspielen */
|
||||
async playAudio(base64Data: string): Promise<void> {
|
||||
if (!base64Data) return;
|
||||
|
||||
// Mute-Flag respektieren — robust gegen Race-Conditions zwischen User-
|
||||
// Klick auf Mute und einem TTS-Chunk der im selben Tick eintrifft.
|
||||
if (this._muted) {
|
||||
console.log('[Audio] playAudio: muted=true → skip');
|
||||
return;
|
||||
}
|
||||
this.audioQueue.push(base64Data);
|
||||
console.log('[Audio] playAudio: queued (queue=%d isPlaying=%s pausedForCall=%s)',
|
||||
this.audioQueue.length, this.isPlaying, this._pausedForCall);
|
||||
if (!this.isPlaying) {
|
||||
this._playNext();
|
||||
}
|
||||
@@ -521,7 +868,16 @@ class AudioService {
|
||||
final?: boolean;
|
||||
silent?: boolean;
|
||||
}): Promise<string> {
|
||||
const silent = !!payload.silent;
|
||||
// _stoppedMessageId: User hat diese Antwort mid-Wiedergabe gestoppt
|
||||
// (Mute geklickt). Auch wenn Mute jetzt wieder aus ist, soll diese
|
||||
// Antwort nicht weiterspielen. Erst eine neue messageId resetted das.
|
||||
const incomingMsgId = payload.messageId || '';
|
||||
const stoppedByUser = !!this._stoppedMessageId && incomingMsgId === this._stoppedMessageId;
|
||||
// Globaler Mute-Flag uebersteuert das per-Call silent — verhindert
|
||||
// Race-Conditions wenn der User zwischen Chunks den Mute-Knopf drueckt.
|
||||
// _pausedForCall: AudioTrack ist gestoppt waehrend Anruf — Chunks weiter
|
||||
// sammeln (fuer WAV-Cache), aber NICHT in den Player schicken.
|
||||
const silent = !!payload.silent || this._muted || this._pausedForCall || stoppedByUser;
|
||||
if (!silent && !PcmStreamPlayer) {
|
||||
console.warn('[Audio] PcmStreamPlayer Native Module nicht verfuegbar');
|
||||
return '';
|
||||
@@ -547,6 +903,28 @@ class AudioService {
|
||||
this.pcmBuffer = [];
|
||||
this.pcmBytesCollected = 0;
|
||||
}
|
||||
// Resume-Sound stoppen falls noch aktiv (User hat nach Anruf eine
|
||||
// neue Frage gestellt — die alte interruptierte Antwort ist obsolet).
|
||||
if (this.resumeSound) {
|
||||
try { this.resumeSound.stop(); this.resumeSound.release(); } catch {}
|
||||
this.resumeSound = null;
|
||||
}
|
||||
// Pending Auto-Resume verwerfen wenn die neue Antwort eine andere
|
||||
// messageId hat. Sonst spielt nach 30s-Wartezeit der Resume die
|
||||
// ueberholte Antwort ab.
|
||||
if (this.pausedMessageId && this.pausedMessageId !== messageId) {
|
||||
console.log('[Audio] Neue TTS-Antwort (msgId=%s) — Auto-Resume fuer %s verworfen',
|
||||
messageId, this.pausedMessageId);
|
||||
this.pausedMessageId = '';
|
||||
this.pausedPosition = 0;
|
||||
}
|
||||
// Stop-Marker zuruecksetzen wenn neue messageId — neue Antwort darf
|
||||
// wieder normal abspielen, egal ob Mute zwischendurch aktiv war.
|
||||
if (this._stoppedMessageId && this._stoppedMessageId !== messageId) {
|
||||
console.log('[Audio] Neue Antwort (msgId=%s) — Stop-Marker fuer %s zurueckgesetzt',
|
||||
messageId, this._stoppedMessageId);
|
||||
this._stoppedMessageId = '';
|
||||
}
|
||||
this.pcmStreamActive = true;
|
||||
this.pcmMessageId = messageId;
|
||||
this.pcmSampleRate = sampleRate;
|
||||
@@ -564,6 +942,7 @@ class AudioService {
|
||||
}
|
||||
this._cancelDeferredFocusRelease();
|
||||
AudioFocus?.requestDuck().catch(() => {});
|
||||
this._firePlaybackStarted();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -580,13 +959,16 @@ class AudioService {
|
||||
|
||||
if (isFinal) {
|
||||
if (!silent) {
|
||||
// end() resolved jetzt erst wenn der native Writer-Thread fertig
|
||||
// ist (alle Samples ausgespielt) — danach AudioFocus verzoegert
|
||||
// freigeben, damit Spotify/YouTube nicht im Mikro-Gap zwischen zwei
|
||||
// ARIA-Antworten wieder hochdrehen. Wenn ein neuer Stream innerhalb
|
||||
// FOCUS_RELEASE_DELAY_MS startet, wird das Release abgebrochen.
|
||||
// end() signalisiert dem Writer "keine weiteren Chunks". Aber WIR
|
||||
// releasen den AudioFocus NICHT hier — der writer braucht u.U. noch
|
||||
// 30+ Sekunden bis der Buffer wirklich abgespielt ist. Den release
|
||||
// triggert das native Event "PcmPlaybackFinished" wenn AudioTrack
|
||||
// wirklich am Ende ist (siehe ensurePlaybackFinishedListener).
|
||||
try { await PcmStreamPlayer!.end(); } catch {}
|
||||
this._releaseFocusDeferred();
|
||||
// playbackFinished-Listener informieren (UI-Logik)
|
||||
this.playbackFinishedListeners.forEach(cb => {
|
||||
try { cb(); } catch (e) { console.warn('[Audio] playbackFinished cb err:', e); }
|
||||
});
|
||||
}
|
||||
this.pcmStreamActive = false;
|
||||
|
||||
@@ -660,7 +1042,10 @@ class AudioService {
|
||||
}
|
||||
}
|
||||
|
||||
/** Audio aus lokaler Datei (file:// Pfad) in die Queue und abspielen. */
|
||||
/** Audio aus lokaler Datei (file:// Pfad) in die Queue und abspielen.
|
||||
* Setzt zusaetzlich playbackStartTime + currentPlaybackMsgId damit ein
|
||||
* Anruf waehrend dieses Playbacks korrekt erfasst wird (ohne dieses
|
||||
* Tracking liefert captureInterruption nichts → kein Auto-Resume). */
|
||||
async playFromPath(filePath: string): Promise<void> {
|
||||
if (!filePath) return;
|
||||
try {
|
||||
@@ -669,6 +1054,14 @@ class AudioService {
|
||||
console.warn('[Audio] Cache-Datei existiert nicht mehr:', cleanPath);
|
||||
return;
|
||||
}
|
||||
// Dateiname ohne .wav als messageId nehmen (egal ob UUID oder andere ID)
|
||||
const fileMatch = cleanPath.match(/([^/\\]+)\.wav$/i);
|
||||
const msgId = fileMatch ? fileMatch[1] : '';
|
||||
console.log('[Audio] playFromPath: cleanPath=%s → msgId=%s', cleanPath, msgId || '(leer)');
|
||||
if (msgId) {
|
||||
this.currentPlaybackMsgId = msgId;
|
||||
this.playbackStartTime = Date.now() - this.LEADING_SILENCE_SEC * 1000;
|
||||
}
|
||||
const b64 = await RNFS.readFile(cleanPath, 'base64');
|
||||
this.playAudio(b64);
|
||||
} catch (err) {
|
||||
@@ -678,6 +1071,7 @@ class AudioService {
|
||||
|
||||
// Callback wenn alle Audio-Teile abgespielt sind
|
||||
private playbackFinishedListeners: (() => void)[] = [];
|
||||
private playbackStartedListeners: (() => void)[] = [];
|
||||
|
||||
onPlaybackFinished(callback: () => void): () => void {
|
||||
this.playbackFinishedListeners.push(callback);
|
||||
@@ -686,6 +1080,30 @@ class AudioService {
|
||||
};
|
||||
}
|
||||
|
||||
/** Callback wenn ARIAs TTS-Wiedergabe startet — fuer Wake-Word-parallel-
|
||||
* Listening waehrend ARIA spricht (Barge-In via "Computer" sagen). */
|
||||
onPlaybackStarted(callback: () => void): () => void {
|
||||
this.playbackStartedListeners.push(callback);
|
||||
return () => {
|
||||
this.playbackStartedListeners = this.playbackStartedListeners.filter(cb => cb !== callback);
|
||||
};
|
||||
}
|
||||
|
||||
private _firePlaybackStarted(): void {
|
||||
// Tracking fuer Auto-Resume nach Anruf-Pause: NUR setzen wenn ein
|
||||
// PCM-Stream laeuft (Live-TTS). Bei Play-Button / Resume-Sound hat der
|
||||
// Caller (playFromPath / _playFromPathAtPosition) das Tracking schon
|
||||
// korrekt mit der msgId aus dem Pfad gesetzt — sonst wuerden wir hier
|
||||
// mit leerem pcmMessageId ueberschreiben.
|
||||
if (this.pcmMessageId) {
|
||||
this.playbackStartTime = Date.now();
|
||||
this.currentPlaybackMsgId = this.pcmMessageId;
|
||||
}
|
||||
this.playbackStartedListeners.forEach(cb => {
|
||||
try { cb(); } catch (e) { console.warn('[Audio] playbackStarted listener err:', e); }
|
||||
});
|
||||
}
|
||||
|
||||
/** Naechstes Audio aus der Queue abspielen */
|
||||
private async _playNext(): Promise<void> {
|
||||
if (this.audioQueue.length === 0) {
|
||||
@@ -698,10 +1116,11 @@ class AudioService {
|
||||
return;
|
||||
}
|
||||
|
||||
// Beim ersten Playback-Start: andere Apps ducken
|
||||
// Beim ersten Playback-Start: andere Apps ducken + Listener informieren
|
||||
if (!this.isPlaying) {
|
||||
this._cancelDeferredFocusRelease();
|
||||
AudioFocus?.requestDuck().catch(() => {});
|
||||
this._firePlaybackStarted();
|
||||
}
|
||||
this.isPlaying = true;
|
||||
|
||||
@@ -732,11 +1151,13 @@ class AudioService {
|
||||
}
|
||||
|
||||
this.currentSound = sound;
|
||||
console.log('[Audio] Sound.play startet (path=%s)', soundPath);
|
||||
|
||||
// Naechstes Audio schon vorbereiten waehrend dieses abspielt
|
||||
this._preloadNext();
|
||||
|
||||
sound.play((success) => {
|
||||
console.log('[Audio] Sound.play callback: success=%s queue=%d', success, this.audioQueue.length);
|
||||
if (!success) console.warn('[Audio] Wiedergabe fehlgeschlagen');
|
||||
sound.release();
|
||||
this.currentSound = null;
|
||||
@@ -763,8 +1184,51 @@ class AudioService {
|
||||
}
|
||||
}
|
||||
|
||||
/** Mute: alle eingehenden TTS-Chunks/WAVs werden ignoriert bis wieder
|
||||
* unmuted. Robuster als ein React-Ref weil hier kein Re-Render-Race ist
|
||||
* — die Bridge kann einen Chunk im selben JS-Tick liefern in dem der
|
||||
* User Mute geklickt hat. */
|
||||
private _muted: boolean = false;
|
||||
/** Anruf laeuft → Chunks werden nur in den Cache-Buffer gepusht, nicht
|
||||
* abgespielt. Wird in pauseForCall gesetzt, in endCallPause/resumeFrom-
|
||||
* Interruption zurueckgenommen. */
|
||||
private _pausedForCall: boolean = false;
|
||||
/** Wenn der User mid-Wiedergabe Mute drueckt: messageId der ABGEBROCHENEN
|
||||
* Antwort merken. Folge-Chunks dieser msgId werden silent ignoriert, auch
|
||||
* wenn der User Mute wieder ausschaltet — kein "Resume mid-Antwort". Eine
|
||||
* NEUE messageId resetted das, dann spielt's wieder normal. */
|
||||
private _stoppedMessageId: string = '';
|
||||
setMuted(muted: boolean): void {
|
||||
console.log('[Audio] setMuted: %s (currentSound=%s pcmStreamActive=%s)',
|
||||
muted, this.currentSound ? 'aktiv' : 'null', this.pcmStreamActive);
|
||||
this._muted = muted;
|
||||
if (muted) {
|
||||
// Aktuell laufende Antwort als "verworfen" markieren — nachfolgende
|
||||
// chunks dieser msgId werden silent gehalten auch wenn der User Mute
|
||||
// gleich wieder ausschaltet. Erst eine NEUE Antwort darf wieder reden.
|
||||
const activeMsgId = this.pcmMessageId || this.currentPlaybackMsgId;
|
||||
if (activeMsgId) {
|
||||
this._stoppedMessageId = activeMsgId;
|
||||
console.log('[Audio] Antwort %s als gestoppt markiert', activeMsgId);
|
||||
}
|
||||
this.stopPlayback();
|
||||
}
|
||||
}
|
||||
isMuted(): boolean { return this._muted; }
|
||||
|
||||
/** Laufende Wiedergabe stoppen + Queue leeren */
|
||||
stopPlayback(): void {
|
||||
// Idempotent: wenn nichts mehr aktiv ist, NICHT noch einen Focus-Release/
|
||||
// Kick-Cycle anstossen — Re-Renders triggern setMuted oft mehrfach hinter-
|
||||
// einander, und jeder weitere Kick lässt Spotify nochmal kurz pausieren.
|
||||
const hasAnything = !!(this.currentSound || this.resumeSound || this.preloadedSound
|
||||
|| this.pcmStreamActive || this.audioQueue.length || this.isPlaying);
|
||||
if (!hasAnything) return;
|
||||
console.log('[Audio] stopPlayback: currentSound=%s queue=%d pcm=%s',
|
||||
this.currentSound ? 'aktiv' : 'null', this.audioQueue.length, this.pcmStreamActive);
|
||||
// Foreground-Service auch stoppen — sonst bleibt die Notification haengen
|
||||
// wenn Wiedergabe abgebrochen wird (Anruf, Cancel, Barge-In).
|
||||
stopBackgroundAudio().catch(() => {});
|
||||
this.audioQueue = [];
|
||||
this.isPlaying = false;
|
||||
if (this.currentSound) {
|
||||
@@ -772,21 +1236,31 @@ class AudioService {
|
||||
this.currentSound.release();
|
||||
this.currentSound = null;
|
||||
}
|
||||
if (this.resumeSound) {
|
||||
this.resumeSound.stop();
|
||||
this.resumeSound.release();
|
||||
this.resumeSound = null;
|
||||
}
|
||||
if (this.preloadedSound) {
|
||||
this.preloadedSound.release();
|
||||
this.preloadedSound = null;
|
||||
if (this.preloadedPath) RNFS.unlink(this.preloadedPath).catch(() => {});
|
||||
this.preloadedPath = '';
|
||||
}
|
||||
// PCM-Stream ebenfalls hart stoppen (Cancel/Abbruch)
|
||||
if (this.pcmStreamActive) {
|
||||
PcmStreamPlayer?.stop().catch(() => {});
|
||||
this.pcmStreamActive = false;
|
||||
this.pcmBuffer = [];
|
||||
this.pcmBytesCollected = 0;
|
||||
this.pcmMessageId = '';
|
||||
}
|
||||
// Audio-Focus sofort freigeben — User hat explizit abgebrochen
|
||||
// PCM-Stream ebenfalls hart stoppen (Cancel/Abbruch).
|
||||
// pcmStreamActive wird beim isFinal-Chunk schon false gesetzt — der
|
||||
// AudioTrack spielt aber noch sekundenlang aus seinem Buffer ab. Daher
|
||||
// IMMER stop() aufrufen, ohne den Flag zu pruefen (ist idempotent).
|
||||
PcmStreamPlayer?.stop().catch(() => {});
|
||||
this.pcmStreamActive = false;
|
||||
this.pcmBuffer = [];
|
||||
this.pcmBytesCollected = 0;
|
||||
this.pcmMessageId = '';
|
||||
// Audio-Focus sofort freigeben — User hat explizit abgebrochen.
|
||||
// Unser Focus war TRANSIENT, Spotify resumed darum automatisch beim
|
||||
// Abandon. Den frueheren kickReleaseMedia haben wir entfernt: er
|
||||
// requestete USAGE_MEDIA mit GAIN (permanent), was Spotify als
|
||||
// "user-action stopp" interpretierte und Auto-Resume verhinderte.
|
||||
this._cancelDeferredFocusRelease();
|
||||
AudioFocus?.release().catch(() => {});
|
||||
}
|
||||
@@ -828,19 +1302,29 @@ class AudioService {
|
||||
}
|
||||
}
|
||||
|
||||
/** Alte Aufnahme- und TTS-Files aus dem Cache loeschen (>30s alt). */
|
||||
private async _cleanupStaleCacheFiles(): Promise<void> {
|
||||
/** Alte Aufnahme- und TTS-Files aus dem Cache loeschen.
|
||||
* Default 30s — verwendet beim Mikro-Start (kurze Lebensdauer reicht).
|
||||
* App-Start nutzt 5min damit gerade aktive Files nicht erwischt werden. */
|
||||
private async _cleanupStaleCacheFiles(maxAgeMs: number = 30000): Promise<void> {
|
||||
try {
|
||||
const files = await RNFS.readDir(RNFS.CachesDirectoryPath);
|
||||
const now = Date.now();
|
||||
let removed = 0;
|
||||
let freedBytes = 0;
|
||||
for (const f of files) {
|
||||
if (!f.isFile()) continue;
|
||||
if (!f.name.startsWith('aria_recording_') && !f.name.startsWith('aria_tts_')) continue;
|
||||
const age = now - (f.mtime ? f.mtime.getTime() : 0);
|
||||
if (age > 30000) {
|
||||
if (age > maxAgeMs) {
|
||||
freedBytes += parseInt(f.size as any, 10) || 0;
|
||||
await RNFS.unlink(f.path).catch(() => {});
|
||||
removed += 1;
|
||||
}
|
||||
}
|
||||
if (removed > 0) {
|
||||
console.log('[Audio] Cache-Cleanup: %d Files entfernt, %.1fMB freigegeben',
|
||||
removed, freedBytes / 1024 / 1024);
|
||||
}
|
||||
} catch {
|
||||
// silent — cleanup ist best-effort
|
||||
}
|
||||
@@ -867,6 +1351,43 @@ class AudioService {
|
||||
// silent
|
||||
}
|
||||
}
|
||||
|
||||
/** Aktuelle Groesse des TTS-Caches. */
|
||||
async getTtsCacheSize(): Promise<{ count: number; totalMB: number }> {
|
||||
let count = 0;
|
||||
let total = 0;
|
||||
try {
|
||||
const dir = `${RNFS.DocumentDirectoryPath}/tts_cache`;
|
||||
if (await RNFS.exists(dir)) {
|
||||
const files = await RNFS.readDir(dir);
|
||||
for (const f of files) {
|
||||
if (!f.isFile() || !f.name.endsWith('.wav')) continue;
|
||||
count += 1;
|
||||
total += parseInt(f.size as any, 10) || 0;
|
||||
}
|
||||
}
|
||||
} catch {}
|
||||
return { count, totalMB: total / 1024 / 1024 };
|
||||
}
|
||||
|
||||
/** TTS-Cache komplett leeren (Settings-Button). */
|
||||
async clearTtsCache(): Promise<{ removed: number; freedMB: number }> {
|
||||
let removed = 0;
|
||||
let freed = 0;
|
||||
try {
|
||||
const dir = `${RNFS.DocumentDirectoryPath}/tts_cache`;
|
||||
if (!(await RNFS.exists(dir))) return { removed: 0, freedMB: 0 };
|
||||
const files = await RNFS.readDir(dir);
|
||||
for (const f of files) {
|
||||
if (!f.isFile() || !f.name.endsWith('.wav')) continue;
|
||||
const size = parseInt(f.size as any, 10) || 0;
|
||||
await RNFS.unlink(f.path).catch(() => {});
|
||||
removed += 1;
|
||||
freed += size;
|
||||
}
|
||||
} catch {}
|
||||
return { removed, freedMB: freed / 1024 / 1024 };
|
||||
}
|
||||
}
|
||||
|
||||
// Singleton
|
||||
|
||||
@@ -0,0 +1,76 @@
|
||||
/**
|
||||
* Background-Audio: ARIAs TTS, Mic-Aufnahme und Wake-Word-Lauschen sollen
|
||||
* auch bei minimierter App weiterlaufen. Wir starten dafuer einen Foreground-
|
||||
* Service mit foregroundServiceType=mediaPlayback|microphone, der eine
|
||||
* persistente Notification zeigt waehrend irgendein Audio-Slot aktiv ist.
|
||||
*
|
||||
* Mehrere Komponenten koennen den Service unabhaengig "halten":
|
||||
* - 'tts' : ARIA spricht
|
||||
* - 'rec' : Aufnahme laeuft
|
||||
* - 'wake' : Wake-Word lauscht passiv (Ohr aktiv)
|
||||
*
|
||||
* Solange mindestens ein Slot aktiv ist, laeuft der Service. Wenn alle
|
||||
* Slots leer sind, wird er gestoppt. Der Notification-Text passt sich an
|
||||
* den hoechstprioren Slot an (tts > rec > wake).
|
||||
*/
|
||||
|
||||
import { NativeModules } from 'react-native';
|
||||
|
||||
interface BackgroundAudioNative {
|
||||
start(reason: string): Promise<boolean>;
|
||||
stop(): Promise<boolean>;
|
||||
}
|
||||
|
||||
const { BackgroundAudio } = NativeModules as { BackgroundAudio?: BackgroundAudioNative };
|
||||
|
||||
type Slot = 'tts' | 'rec' | 'wake';
|
||||
|
||||
const slots = new Set<Slot>();
|
||||
|
||||
// Prioritaet fuer den Notification-Text — hoechste zuerst.
|
||||
const PRIORITY: Slot[] = ['tts', 'rec', 'wake'];
|
||||
|
||||
function topReason(): string {
|
||||
for (const s of PRIORITY) {
|
||||
if (slots.has(s)) return s;
|
||||
}
|
||||
return '';
|
||||
}
|
||||
|
||||
async function applyState(): Promise<void> {
|
||||
if (!BackgroundAudio) return;
|
||||
if (slots.size === 0) {
|
||||
try { await BackgroundAudio.stop(); } catch {}
|
||||
console.log('[BackgroundAudio] Service gestoppt (keine Slots)');
|
||||
return;
|
||||
}
|
||||
const reason = topReason();
|
||||
try {
|
||||
await BackgroundAudio.start(reason);
|
||||
console.log('[BackgroundAudio] Service aktiv (slot=%s, slots=%s)',
|
||||
reason, [...slots].join('+'));
|
||||
} catch (err: any) {
|
||||
console.warn('[BackgroundAudio] start fehlgeschlagen:', err?.message || err);
|
||||
}
|
||||
}
|
||||
|
||||
export async function acquireBackgroundAudio(slot: Slot): Promise<void> {
|
||||
if (slots.has(slot)) return;
|
||||
slots.add(slot);
|
||||
await applyState();
|
||||
}
|
||||
|
||||
export async function releaseBackgroundAudio(slot: Slot): Promise<void> {
|
||||
if (!slots.has(slot)) return;
|
||||
slots.delete(slot);
|
||||
await applyState();
|
||||
}
|
||||
|
||||
export function backgroundAudioActive(): boolean {
|
||||
return slots.size > 0;
|
||||
}
|
||||
|
||||
// --- Legacy API (nur tts-Slot) — fuer Aufruf-Sites die noch nichts vom Slot-
|
||||
// system wissen. Mappt auf den 'tts'-Slot. ---
|
||||
export const startBackgroundAudio = () => acquireBackgroundAudio('tts');
|
||||
export const stopBackgroundAudio = () => releaseBackgroundAudio('tts');
|
||||
@@ -0,0 +1,41 @@
|
||||
/**
|
||||
* Verbose-Logging-Toggle: console.log laesst sich global stummschalten.
|
||||
* console.warn/console.error bleiben immer an — Fehler will man immer sehen.
|
||||
*
|
||||
* Default: an (true). Toggle ueber Settings → Protokoll → Verbose Logging.
|
||||
* Beim Start wird der gespeicherte Wert geladen, vorher loggen wir normal.
|
||||
*/
|
||||
|
||||
import AsyncStorage from '@react-native-async-storage/async-storage';
|
||||
|
||||
export const VERBOSE_LOGGING_KEY = 'aria_verbose_logging';
|
||||
|
||||
// Original-console.log retten, damit wir die Wrapper jederzeit wieder
|
||||
// "scharf" stellen koennen (sonst waere ein Toggle-an nach -aus tot).
|
||||
const originalLog = console.log.bind(console);
|
||||
const noop = () => {};
|
||||
|
||||
let _verbose = true;
|
||||
|
||||
function applyState(): void {
|
||||
console.log = _verbose ? originalLog : noop;
|
||||
}
|
||||
|
||||
/** Wert aus AsyncStorage laden und anwenden. Beim App-Start aufrufen. */
|
||||
export async function initLogger(): Promise<void> {
|
||||
try {
|
||||
const v = await AsyncStorage.getItem(VERBOSE_LOGGING_KEY);
|
||||
_verbose = v !== 'false'; // default: true
|
||||
} catch {}
|
||||
applyState();
|
||||
}
|
||||
|
||||
export function isVerboseLogging(): boolean {
|
||||
return _verbose;
|
||||
}
|
||||
|
||||
export function setVerboseLogging(verbose: boolean): void {
|
||||
_verbose = verbose;
|
||||
applyState();
|
||||
AsyncStorage.setItem(VERBOSE_LOGGING_KEY, String(verbose)).catch(() => {});
|
||||
}
|
||||
@@ -0,0 +1,222 @@
|
||||
/**
|
||||
* PhoneCall-Service — pausiert ARIA bei Telefonaten:
|
||||
*
|
||||
* 1. Klassischer Mobilfunk-Anruf via TelephonyManager (PhoneCallModule.kt)
|
||||
* Status: idle / ringing / offhook
|
||||
*
|
||||
* 2. VoIP-Anrufe (WhatsApp, Signal, Discord, Telegram, Teams, ...) via
|
||||
* AudioFocus-Loss-Event (AudioFocusModule.kt). Diese Apps requestn
|
||||
* AUDIOFOCUS_GAIN_TRANSIENT_EXCLUSIVE wenn ein Anruf reinkommt — wir
|
||||
* bekommen ein "loss" Event und reagieren genauso wie auf RINGING.
|
||||
*
|
||||
* In beiden Faellen wird audioService.haltAllPlayback() + wakeWordService.
|
||||
* pauseForCall() gerufen. Bei call-end (idle / focus-gain) → resumeFromCall.
|
||||
*
|
||||
* Permission READ_PHONE_STATE ist nur fuer Pfad 1 noetig — Pfad 2 braucht
|
||||
* keine extra Berechtigung weil unser eigener AudioFocus-Listener feuert.
|
||||
*/
|
||||
|
||||
import {
|
||||
NativeEventEmitter,
|
||||
NativeModules,
|
||||
PermissionsAndroid,
|
||||
Platform,
|
||||
ToastAndroid,
|
||||
} from 'react-native';
|
||||
import audioService from './audio';
|
||||
import wakeWordService from './wakeword';
|
||||
|
||||
interface PhoneCallNative {
|
||||
start(): Promise<boolean>;
|
||||
stop(): Promise<boolean>;
|
||||
}
|
||||
|
||||
const { PhoneCall } = NativeModules as { PhoneCall?: PhoneCallNative };
|
||||
|
||||
type PhoneState = 'idle' | 'ringing' | 'offhook';
|
||||
|
||||
class PhoneCallService {
|
||||
private started: boolean = false;
|
||||
private subscription: { remove: () => void } | null = null;
|
||||
private focusSubscription: { remove: () => void } | null = null;
|
||||
private lastState: PhoneState = 'idle';
|
||||
/** Damit Resume nach VoIP-Loss nicht doppelt feuert wenn auch
|
||||
* TelephonyManager-IDLE-Event kommt. */
|
||||
private interruptedByFocus: boolean = false;
|
||||
|
||||
async start(): Promise<boolean> {
|
||||
if (this.started || Platform.OS !== 'android') return false;
|
||||
|
||||
// 1. AudioFocus-Listener IMMER registrieren — fangs VoIP-Calls (WhatsApp,
|
||||
// Signal, Discord etc.) abdecken, brauchen keine Permission.
|
||||
try {
|
||||
const focusEmitter = new NativeEventEmitter(NativeModules.AudioFocus as any);
|
||||
this.focusSubscription = focusEmitter.addListener(
|
||||
'AudioFocusChanged',
|
||||
(e: { type: 'loss' | 'loss_transient' | 'gain' }) => this._onFocusChanged(e.type),
|
||||
);
|
||||
console.log('[PhoneCall] AudioFocus-Listener aktiv (fuer VoIP-Calls)');
|
||||
} catch (err: any) {
|
||||
console.warn('[PhoneCall] AudioFocus-Subscription gescheitert', err?.message || err);
|
||||
}
|
||||
|
||||
// 2. TelephonyManager-Listener — fuer klassische Mobilfunk-Anrufe
|
||||
if (PhoneCall) {
|
||||
try {
|
||||
const granted = await PermissionsAndroid.request(
|
||||
PermissionsAndroid.PERMISSIONS.READ_PHONE_STATE,
|
||||
{
|
||||
title: 'ARIA Cockpit — Anruf-Erkennung',
|
||||
message: 'Damit ARIA bei einem eingehenden Anruf nicht weiterredet, '
|
||||
+ 'darf die App den Anruf-Status sehen (Klingeln/Aktiv/Aufgelegt). '
|
||||
+ 'Es werden keine Anrufdaten gelesen oder gespeichert.',
|
||||
buttonPositive: 'Erlauben',
|
||||
buttonNegative: 'Spaeter',
|
||||
},
|
||||
);
|
||||
if (granted === PermissionsAndroid.RESULTS.GRANTED) {
|
||||
const ok = await PhoneCall.start();
|
||||
if (ok) {
|
||||
const emitter = new NativeEventEmitter(NativeModules.PhoneCall as any);
|
||||
this.subscription = emitter.addListener(
|
||||
'PhoneCallStateChanged',
|
||||
(e: { state: PhoneState }) => this._onStateChanged(e.state),
|
||||
);
|
||||
console.log('[PhoneCall] TelephonyManager-Listener aktiv');
|
||||
}
|
||||
} else {
|
||||
console.warn('[PhoneCall] READ_PHONE_STATE abgelehnt — VoIP-Calls werden trotzdem ueber AudioFocus erkannt');
|
||||
}
|
||||
} catch (err: any) {
|
||||
console.warn('[PhoneCall] TelephonyManager-Setup gescheitert:', err?.message || err);
|
||||
}
|
||||
}
|
||||
|
||||
this.started = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
async stop(): Promise<void> {
|
||||
if (!this.started) return;
|
||||
try { this.subscription?.remove(); } catch {}
|
||||
try { this.focusSubscription?.remove(); } catch {}
|
||||
this.subscription = null;
|
||||
this.focusSubscription = null;
|
||||
if (PhoneCall) {
|
||||
try { await PhoneCall.stop(); } catch {}
|
||||
}
|
||||
this.started = false;
|
||||
this.lastState = 'idle';
|
||||
this.interruptedByFocus = false;
|
||||
}
|
||||
|
||||
private _onStateChanged(state: PhoneState): void {
|
||||
if (state === this.lastState) return;
|
||||
const prev = this.lastState;
|
||||
console.log('[PhoneCall] State: %s → %s', prev, state);
|
||||
this.lastState = state;
|
||||
if (state === 'ringing' || state === 'offhook') {
|
||||
this._haltForCall(state === 'ringing' ? 'Anruf — ARIA pausiert' : 'Im Gespraech — ARIA pausiert');
|
||||
} else if (state === 'idle' && prev !== 'idle') {
|
||||
// Wenn schon durch AudioFocus-Loss pausiert wurde, NICHT doppelt resumen.
|
||||
// Der Focus-Gain-Event triggert das Resume.
|
||||
if (!this.interruptedByFocus) {
|
||||
this._resumeAfterCall('Anruf beendet — ARIA wieder aktiv');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** AudioFocus-Loss = irgendeine andere App hat den Focus uebernommen.
|
||||
* Das passiert bei VoIP-Anrufen (was wir wollen) ABER auch bei normalen
|
||||
* Audio-Playern (anderer Player startet, Notification-Sound, sogar
|
||||
* unsere eigenen Sound-Calls beim Play-Button). Daher checken wir den
|
||||
* AudioMode — nur IN_CALL (2) oder IN_COMMUNICATION (3) zaehlt als Anruf. */
|
||||
private async _onFocusChanged(type: 'loss' | 'loss_transient' | 'gain'): Promise<void> {
|
||||
if (type === 'loss' || type === 'loss_transient') {
|
||||
// Schon durch klassischen TelephonyManager pausiert? Dann nichts doppeln.
|
||||
if (this.lastState === 'ringing' || this.lastState === 'offhook') return;
|
||||
// Mode pruefen — nur echte Anrufe behandeln.
|
||||
let mode = -1;
|
||||
try { mode = await (NativeModules.AudioFocus as any)?.getMode?.(); } catch {}
|
||||
if (mode !== 2 && mode !== 3) {
|
||||
// NORMAL-Mode → kein Anruf (Stefan hat z.B. Play-Button gedrueckt
|
||||
// oder Spotify hat sich neu reingedraengelt). Keine Toasts.
|
||||
console.log('[PhoneCall] FOCUS_LOSS ignoriert (AudioMode=%d, kein Call)', mode);
|
||||
return;
|
||||
}
|
||||
this.interruptedByFocus = true;
|
||||
this._haltForCall('Anruf erkannt (VoIP) — ARIA pausiert');
|
||||
// Pollen, weil GAIN nicht zuverlaessig kommt (wir releasen den Focus
|
||||
// selbst beim halt → kein automatischer GAIN). AudioMode != IN_COMMUNICATION
|
||||
// = Call vorbei.
|
||||
this._startVoipResumePoll();
|
||||
} else if (type === 'gain') {
|
||||
if (this.interruptedByFocus) {
|
||||
this.interruptedByFocus = false;
|
||||
this._stopVoipResumePoll();
|
||||
this._resumeAfterCall('Audio frei — ARIA wieder aktiv');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Polling-Fallback: alle 3s checken ob AudioMode wieder NORMAL ist. */
|
||||
private voipPollTimer: ReturnType<typeof setInterval> | null = null;
|
||||
private _startVoipResumePoll(): void {
|
||||
if (this.voipPollTimer) return;
|
||||
this.voipPollTimer = setInterval(async () => {
|
||||
if (!this.interruptedByFocus) {
|
||||
this._stopVoipResumePoll();
|
||||
return;
|
||||
}
|
||||
try {
|
||||
const mode = await (NativeModules.AudioFocus as any)?.getMode?.();
|
||||
// 0 = MODE_NORMAL — Call ist vorbei
|
||||
if (typeof mode === 'number' && mode === 0) {
|
||||
this.interruptedByFocus = false;
|
||||
this._stopVoipResumePoll();
|
||||
this._resumeAfterCall('Anruf beendet — ARIA wieder aktiv');
|
||||
}
|
||||
} catch {}
|
||||
}, 3000);
|
||||
}
|
||||
private _stopVoipResumePoll(): void {
|
||||
if (this.voipPollTimer) {
|
||||
clearInterval(this.voipPollTimer);
|
||||
this.voipPollTimer = null;
|
||||
}
|
||||
}
|
||||
|
||||
private _haltForCall(toast: string): void {
|
||||
// Position merken bevor wir den Stream killen — fuer Auto-Resume.
|
||||
audioService.captureInterruption();
|
||||
// pauseForCall (statt haltAllPlayback): pcmBuffer + messageId bleiben,
|
||||
// weitere Chunks werden weiter gesammelt damit isFinal die WAV schreibt.
|
||||
audioService.pauseForCall(toast);
|
||||
wakeWordService.pauseForCall().catch(() => {});
|
||||
ToastAndroid.show(toast, ToastAndroid.SHORT);
|
||||
}
|
||||
|
||||
private _resumeAfterCall(toast: string): void {
|
||||
// Anruf-Pause aufheben — neue Chunks duerfen wieder direkt abgespielt
|
||||
// werden (falls die Bridge mid-Anruf isFinal noch nicht geschickt hat).
|
||||
audioService.endCallPause();
|
||||
wakeWordService.resumeFromCall().catch(() => {});
|
||||
ToastAndroid.show(toast, ToastAndroid.SHORT);
|
||||
// 800ms warten bevor Auto-Resume — sonst kollidiert ARIA's neuer Focus-
|
||||
// Request mit Spotify's Auto-Resume nach Anruf-Ende. System haengt nach
|
||||
// dem Auflegen noch im IN_CALL-Mode-Uebergang, Spotify schaut auf Focus-
|
||||
// Gain und wuerde sofort wieder LOSS sehen → bleibt pausiert.
|
||||
// Mit Delay: Spotify resumed kurz, dann pausiert ARIA wieder ordnungs-
|
||||
// gemaess. Wenn ARIA nichts pending hat, bleibt Spotify einfach an.
|
||||
setTimeout(() => {
|
||||
audioService.resumeFromInterruption(30000).then(ok => {
|
||||
if (ok) {
|
||||
console.log('[PhoneCall] Auto-Resume von gemerkter Position gestartet');
|
||||
}
|
||||
}).catch(() => {});
|
||||
}, 800);
|
||||
}
|
||||
}
|
||||
|
||||
const phoneCallService = new PhoneCallService();
|
||||
export default phoneCallService;
|
||||
@@ -50,28 +50,69 @@ class UpdateService {
|
||||
});
|
||||
}
|
||||
|
||||
/** Raeumt alte heruntergeladene APK-Dateien aus dem Cache auf. */
|
||||
private async cleanupOldApks(): Promise<void> {
|
||||
try {
|
||||
const files = await RNFS.readDir(RNFS.CachesDirectoryPath);
|
||||
const apks = files.filter(f => /\.apk$/i.test(f.name));
|
||||
let freed = 0;
|
||||
for (const f of apks) {
|
||||
try {
|
||||
const size = parseInt(f.size as any, 10) || 0;
|
||||
await RNFS.unlink(f.path);
|
||||
freed += size;
|
||||
console.log(`[Update] Alte APK geloescht: ${f.name} (${(size / 1024 / 1024).toFixed(1)}MB)`);
|
||||
} catch (err: any) {
|
||||
console.warn(`[Update] APK-Loeschen fehlgeschlagen: ${f.name} (${err?.message || err})`);
|
||||
}
|
||||
}
|
||||
if (apks.length > 0) {
|
||||
console.log(`[Update] Cleanup fertig: ${apks.length} APKs entfernt, ${(freed / 1024 / 1024).toFixed(1)}MB freigegeben`);
|
||||
}
|
||||
} catch (err: any) {
|
||||
console.warn(`[Update] Cleanup-Fehler: ${err?.message || err}`);
|
||||
/** Sucht ueberall wo .apk-Dateien herumliegen koennten. */
|
||||
private async _apkSearchDirs(): Promise<string[]> {
|
||||
const dirs = [RNFS.CachesDirectoryPath, RNFS.DocumentDirectoryPath];
|
||||
if ((RNFS as any).ExternalCachesDirectoryPath) {
|
||||
dirs.push((RNFS as any).ExternalCachesDirectoryPath);
|
||||
}
|
||||
if (RNFS.ExternalDirectoryPath) {
|
||||
dirs.push(RNFS.ExternalDirectoryPath);
|
||||
}
|
||||
return dirs;
|
||||
}
|
||||
|
||||
/** Raeumt alte heruntergeladene APK-Dateien aus den App-Verzeichnissen auf.
|
||||
* Public damit Settings den Button "Update-Cache leeren" benutzen kann. */
|
||||
async cleanupOldApks(keepCurrentName?: string): Promise<{ removed: number; freedMB: number }> {
|
||||
const dirs = await this._apkSearchDirs();
|
||||
let removed = 0;
|
||||
let freed = 0;
|
||||
for (const dir of dirs) {
|
||||
try {
|
||||
if (!(await RNFS.exists(dir))) continue;
|
||||
const files = await RNFS.readDir(dir);
|
||||
const apks = files.filter(f => /\.apk$/i.test(f.name));
|
||||
for (const f of apks) {
|
||||
if (keepCurrentName && f.name === keepCurrentName) continue;
|
||||
try {
|
||||
const size = parseInt(f.size as any, 10) || 0;
|
||||
await RNFS.unlink(f.path);
|
||||
removed += 1;
|
||||
freed += size;
|
||||
console.log(`[Update] APK geloescht: ${f.path} (${(size / 1024 / 1024).toFixed(1)}MB)`);
|
||||
} catch (err: any) {
|
||||
console.warn(`[Update] APK-Loeschen fehlgeschlagen: ${f.path} (${err?.message || err})`);
|
||||
}
|
||||
}
|
||||
} catch (err: any) {
|
||||
console.warn(`[Update] Cleanup-Fehler in ${dir}: ${err?.message || err}`);
|
||||
}
|
||||
}
|
||||
const freedMB = freed / 1024 / 1024;
|
||||
if (removed > 0) {
|
||||
console.log(`[Update] Cleanup fertig: ${removed} APK${removed === 1 ? '' : 's'} entfernt, ${freedMB.toFixed(1)}MB freigegeben`);
|
||||
}
|
||||
return { removed, freedMB };
|
||||
}
|
||||
|
||||
/** Aktuelle Groesse aller APK-Dateien in den App-Verzeichnissen (in MB). */
|
||||
async getApkCacheSize(): Promise<{ count: number; totalMB: number }> {
|
||||
const dirs = await this._apkSearchDirs();
|
||||
let count = 0;
|
||||
let total = 0;
|
||||
for (const dir of dirs) {
|
||||
try {
|
||||
if (!(await RNFS.exists(dir))) continue;
|
||||
const files = await RNFS.readDir(dir);
|
||||
for (const f of files) {
|
||||
if (!f.isFile() || !/\.apk$/i.test(f.name)) continue;
|
||||
count += 1;
|
||||
total += parseInt(f.size as any, 10) || 0;
|
||||
}
|
||||
} catch {}
|
||||
}
|
||||
return { count, totalMB: total / 1024 / 1024 };
|
||||
}
|
||||
|
||||
/** Bei App-Start Update pruefen */
|
||||
|
||||
@@ -0,0 +1,71 @@
|
||||
/**
|
||||
* Spielt einen kurzen "Bereit"-Sound (Airplane Ding-Dong) wenn das Mikrofon
|
||||
* nach Wake-Word-Erkennung wirklich offen ist. Datei liegt in
|
||||
* android/app/src/main/res/raw/wake_ready_sound.mp3 — wird ueber Android's
|
||||
* Resource-System per react-native-sound abgespielt.
|
||||
*
|
||||
* Toggle: AsyncStorage-Key 'aria_wake_ready_sound_enabled' (default true).
|
||||
*/
|
||||
|
||||
import Sound from 'react-native-sound';
|
||||
import AsyncStorage from '@react-native-async-storage/async-storage';
|
||||
|
||||
export const WAKE_READY_SOUND_STORAGE_KEY = 'aria_wake_ready_sound_enabled';
|
||||
|
||||
Sound.setCategory('Playback', false);
|
||||
|
||||
let cachedSound: Sound | null = null;
|
||||
let cachedFailed = false;
|
||||
|
||||
function getSound(): Promise<Sound | null> {
|
||||
if (cachedFailed) return Promise.resolve(null);
|
||||
if (cachedSound) return Promise.resolve(cachedSound);
|
||||
return new Promise(resolve => {
|
||||
const s = new Sound('wake_ready_sound', Sound.MAIN_BUNDLE, (err) => {
|
||||
if (err) {
|
||||
console.warn('[WakeReadySound] Konnte nicht geladen werden:', err);
|
||||
cachedFailed = true;
|
||||
resolve(null);
|
||||
return;
|
||||
}
|
||||
cachedSound = s;
|
||||
resolve(s);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/** True wenn der User den "Bereit"-Sound aktiviert hat. Default: true. */
|
||||
export async function isWakeReadySoundEnabled(): Promise<boolean> {
|
||||
try {
|
||||
const raw = await AsyncStorage.getItem(WAKE_READY_SOUND_STORAGE_KEY);
|
||||
if (raw === null) return true; // Default an
|
||||
return raw === 'true';
|
||||
} catch {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
export async function setWakeReadySoundEnabled(enabled: boolean): Promise<void> {
|
||||
try {
|
||||
await AsyncStorage.setItem(WAKE_READY_SOUND_STORAGE_KEY, String(enabled));
|
||||
} catch {}
|
||||
}
|
||||
|
||||
/** Spielt den Bereit-Sound einmal ab — non-blocking. Wenn der User ihn
|
||||
* in den Settings deaktiviert hat oder die Datei nicht ladbar ist,
|
||||
* passiert einfach nichts. */
|
||||
export async function playWakeReadySound(): Promise<void> {
|
||||
if (!(await isWakeReadySoundEnabled())) return;
|
||||
const s = await getSound();
|
||||
if (!s) return;
|
||||
try {
|
||||
s.stop(() => {
|
||||
s.setCurrentTime(0);
|
||||
s.play((success) => {
|
||||
if (!success) console.warn('[WakeReadySound] Wiedergabe fehlgeschlagen');
|
||||
});
|
||||
});
|
||||
} catch (e) {
|
||||
console.warn('[WakeReadySound] play() Exception:', e);
|
||||
}
|
||||
}
|
||||
+229
-113
@@ -1,142 +1,154 @@
|
||||
/**
|
||||
* Gespraechsmodus / Wake Word Service
|
||||
*
|
||||
* Wake-Word-Engine: openWakeWord (https://github.com/dscripka/openWakeWord),
|
||||
* komplett on-device via ONNX Runtime in Native-Kotlin (siehe
|
||||
* OpenWakeWordModule.kt + assets/openwakeword/). Kein API-Key, kein Cloud-
|
||||
* Roundtrip, kein Cent Lizenzgebuehren.
|
||||
*
|
||||
* Drei Zustaende:
|
||||
* off — Ohr aus, nichts laeuft
|
||||
* armed — Ohr aktiv, Porcupine hoert passiv auf das Wake-Word.
|
||||
* Das Mikro ist von Porcupine belegt; AudioRecorder ist aus.
|
||||
* conversing — Wake-Word getriggert (oder Ohr-Tap ohne Wake-Word):
|
||||
* aktive Konversation. Porcupine pausiert (gibt Mikro frei),
|
||||
* armed — Ohr aktiv, openWakeWord hoert passiv auf das Wake-Word.
|
||||
* Das Mikro ist von OpenWakeWord belegt; AudioRecorder ist aus.
|
||||
* conversing — Wake-Word getriggert (oder Ohr-Tap manuell):
|
||||
* aktive Konversation. OpenWakeWord pausiert (gibt Mikro frei),
|
||||
* AudioRecorder uebernimmt fuer die Aufnahme.
|
||||
* Nach jeder ARIA-Antwort oeffnet das Mikro fuer X Sekunden
|
||||
* (Conversation-Window). Stille im Fenster → zurueck zu armed.
|
||||
*
|
||||
* Wake-Word fallback: ist kein Picovoice-Access-Key gesetzt, geht 'start'
|
||||
* direkt in 'conversing' (klassischer Gespraechsmodus). 'endConversation'
|
||||
* geht dann nach 'off' statt 'armed'.
|
||||
* Faellt das Native-Modul aus (alte App-Version, ONNX-Init-Fehler), geht
|
||||
* 'start' direkt in 'conversing' (klassischer Direkt-Aufnahme-Modus).
|
||||
*/
|
||||
|
||||
import { NativeEventEmitter, NativeModules, ToastAndroid } from 'react-native';
|
||||
import AsyncStorage from '@react-native-async-storage/async-storage';
|
||||
import { ToastAndroid } from 'react-native';
|
||||
import { acquireBackgroundAudio } from './backgroundAudio';
|
||||
|
||||
type WakeWordCallback = () => void;
|
||||
type StateCallback = (state: WakeWordState) => void;
|
||||
|
||||
export type WakeWordState = 'off' | 'armed' | 'conversing';
|
||||
|
||||
export const WAKE_ACCESS_KEY_STORAGE = 'aria_wake_access_key';
|
||||
export const WAKE_KEYWORD_STORAGE = 'aria_wake_keyword';
|
||||
|
||||
/** Built-In Keywords von Picovoice — pre-trained, sofort einsetzbar.
|
||||
* Custom Keywords (z.B. "ARIA") brauchen ein .ppn File aus der Picovoice
|
||||
* Console — wird spaeter ueber Diagnostic uploadbar. */
|
||||
export const BUILTIN_KEYWORDS = [
|
||||
'jarvis',
|
||||
/** Verfuegbare Wake-Words — entsprechen den .onnx Dateien in
|
||||
* android/app/src/main/assets/openwakeword/. Custom-Keywords (eigenes
|
||||
* Training via openwakeword Notebook) muessen aktuell als Asset eingebaut
|
||||
* werden — Diagnostic-Upload ist Phase 2. */
|
||||
export const WAKE_KEYWORDS = [
|
||||
'hey_jarvis',
|
||||
'computer',
|
||||
'picovoice',
|
||||
'porcupine',
|
||||
'bumblebee',
|
||||
'terminator',
|
||||
'alexa',
|
||||
'hey google',
|
||||
'ok google',
|
||||
'hey siri',
|
||||
'hey_mycroft',
|
||||
'hey_rhasspy',
|
||||
] as const;
|
||||
export type BuiltinKeyword = typeof BUILTIN_KEYWORDS[number];
|
||||
export const DEFAULT_KEYWORD: BuiltinKeyword = 'jarvis';
|
||||
export type WakeKeyword = typeof WAKE_KEYWORDS[number];
|
||||
export const DEFAULT_KEYWORD: WakeKeyword = 'hey_jarvis';
|
||||
|
||||
/** Hilfs-Mapping fuer die Anzeige im UI. */
|
||||
export const KEYWORD_LABELS: Record<WakeKeyword, string> = {
|
||||
hey_jarvis: 'Hey Jarvis',
|
||||
computer: 'Computer',
|
||||
alexa: 'Alexa',
|
||||
hey_mycroft: 'Hey Mycroft',
|
||||
hey_rhasspy: 'Hey Rhasspy',
|
||||
};
|
||||
|
||||
// Detection-Tuning — kann in Settings spaeter konfigurierbar werden.
|
||||
const DEFAULT_THRESHOLD = 0.5;
|
||||
const DEFAULT_PATIENCE = 2;
|
||||
const DEFAULT_DEBOUNCE_MS = 1500;
|
||||
|
||||
interface OpenWakeWordModule {
|
||||
init(modelName: string, threshold: number, patience: number, debounceMs: number): Promise<boolean>;
|
||||
start(): Promise<boolean>;
|
||||
stop(): Promise<boolean>;
|
||||
dispose(): Promise<boolean>;
|
||||
isAvailable(): Promise<boolean>;
|
||||
}
|
||||
|
||||
const { OpenWakeWord } = NativeModules as { OpenWakeWord?: OpenWakeWordModule };
|
||||
|
||||
class WakeWordService {
|
||||
private state: WakeWordState = 'off';
|
||||
private wakeCallbacks: WakeWordCallback[] = [];
|
||||
private stateCallbacks: StateCallback[] = [];
|
||||
/** Barge-In-Callbacks: feuern wenn Wake-Word WAEHREND ARIA spricht erkannt
|
||||
* wird. ChatScreen reagiert mit TTS-stop + neuer Aufnahme. */
|
||||
private bargeCallbacks: WakeWordCallback[] = [];
|
||||
/** True solange Wake-Word parallel zu TTS aktiv ist. */
|
||||
private bargeListening: boolean = false;
|
||||
/** Anruf-Pause: state wird gemerkt damit nach Auflegen wiederhergestellt wird. */
|
||||
private callPaused: boolean = false;
|
||||
private preCallState: WakeWordState = 'off';
|
||||
/** Cooldown nach App-Resume: kurze Phase in der Wake-Word-Detections
|
||||
* ignoriert werden. Beim Wechsel von Background nach Vordergrund gibt's
|
||||
* oft einen Audio-Pegel-Spike (AudioFocus-Switch, AudioTrack re-route),
|
||||
* der openWakeWord faelschlich triggern kann. */
|
||||
private cooldownUntilMs: number = 0;
|
||||
|
||||
// Picovoice Manager (lazy, da Native Module nicht in jedem Build verfuegbar ist)
|
||||
private porcupine: any = null;
|
||||
private accessKey: string = '';
|
||||
private keyword: string = DEFAULT_KEYWORD;
|
||||
private keyword: WakeKeyword = DEFAULT_KEYWORD;
|
||||
private nativeReady: boolean = false;
|
||||
private initInProgress: Promise<boolean> | null = null;
|
||||
private eventSub: { remove: () => void } | null = null;
|
||||
|
||||
/** Beim App-Start aufrufen — laedt Settings, baut Porcupine wenn Key da ist. */
|
||||
/** Beim App-Start aufrufen — laedt Settings, baut Native-Modul. */
|
||||
async loadFromStorage(): Promise<void> {
|
||||
try {
|
||||
const k = await AsyncStorage.getItem(WAKE_ACCESS_KEY_STORAGE);
|
||||
const w = await AsyncStorage.getItem(WAKE_KEYWORD_STORAGE);
|
||||
this.accessKey = (k || '').trim();
|
||||
this.keyword = (w || DEFAULT_KEYWORD).trim();
|
||||
if (this.accessKey) {
|
||||
// Vorinitialisieren — wirft sich nicht durch wenn etwas fehlt
|
||||
await this.initPorcupine();
|
||||
}
|
||||
const wt = (w || DEFAULT_KEYWORD).trim() as WakeKeyword;
|
||||
this.keyword = (WAKE_KEYWORDS as readonly string[]).includes(wt) ? wt : DEFAULT_KEYWORD;
|
||||
await this.initNative();
|
||||
} catch (err) {
|
||||
console.warn('[WakeWord] loadFromStorage', err);
|
||||
}
|
||||
}
|
||||
|
||||
/** Settings-Wechsel — neuer Key oder Keyword. Re-Init Porcupine. */
|
||||
async configure(accessKey: string, keyword: string): Promise<boolean> {
|
||||
this.accessKey = (accessKey || '').trim();
|
||||
this.keyword = (keyword || DEFAULT_KEYWORD).trim();
|
||||
await AsyncStorage.setItem(WAKE_ACCESS_KEY_STORAGE, this.accessKey);
|
||||
await AsyncStorage.setItem(WAKE_KEYWORD_STORAGE, this.keyword);
|
||||
/** Settings-Wechsel: anderes Wake-Word. Re-Init des Native-Moduls. */
|
||||
async configure(keyword: string): Promise<boolean> {
|
||||
const next: WakeKeyword = (WAKE_KEYWORDS as readonly string[]).includes(keyword)
|
||||
? (keyword as WakeKeyword)
|
||||
: DEFAULT_KEYWORD;
|
||||
this.keyword = next;
|
||||
await AsyncStorage.setItem(WAKE_KEYWORD_STORAGE, next);
|
||||
|
||||
// Laufende Instanz stoppen
|
||||
await this.disposePorcupine();
|
||||
if (!this.accessKey) {
|
||||
console.warn('[WakeWord] configure: kein Access Key gesetzt');
|
||||
return false;
|
||||
}
|
||||
|
||||
// Neu initialisieren
|
||||
const ok = await this.initPorcupine();
|
||||
// Laufende Instanz stoppen + neu initialisieren
|
||||
await this.disposeNative();
|
||||
const ok = await this.initNative();
|
||||
if (!ok) {
|
||||
ToastAndroid.show(
|
||||
`Wake-Word "${this.keyword}" konnte nicht initialisiert werden — Logs pruefen`,
|
||||
`Wake-Word "${KEYWORD_LABELS[next]}" konnte nicht initialisiert werden — Logs pruefen`,
|
||||
ToastAndroid.LONG,
|
||||
);
|
||||
}
|
||||
return ok;
|
||||
}
|
||||
|
||||
private async initPorcupine(): Promise<boolean> {
|
||||
private async initNative(): Promise<boolean> {
|
||||
if (!OpenWakeWord) {
|
||||
console.warn('[WakeWord] OpenWakeWord Native-Modul nicht verfuegbar — Direkt-Aufnahme-Fallback aktiv');
|
||||
this.nativeReady = false;
|
||||
return false;
|
||||
}
|
||||
if (this.initInProgress) return this.initInProgress;
|
||||
this.initInProgress = (async () => {
|
||||
try {
|
||||
const porcupineRN = require('@picovoice/porcupine-react-native');
|
||||
const { PorcupineManager, BuiltInKeywords } = porcupineRN;
|
||||
// Manche Porcupine-Versionen wollen das BuiltInKeywords-Enum (Objekt
|
||||
// mit keys wie JARVIS, COMPUTER, HEY_GOOGLE), andere akzeptieren
|
||||
// den String direkt. Mappen mit Fallback auf String:
|
||||
const enumKey = this.keyword.toUpperCase().replace(/\s+/g, '_');
|
||||
const kw = (BuiltInKeywords && BuiltInKeywords[enumKey]) || this.keyword;
|
||||
console.log('[WakeWord] Porcupine init: keyword=%s (resolved=%s)',
|
||||
this.keyword, typeof kw === 'string' ? kw : '[enum]');
|
||||
this.porcupine = await PorcupineManager.fromBuiltInKeywords(
|
||||
this.accessKey,
|
||||
[kw],
|
||||
(keywordIndex: number) => {
|
||||
console.log('[WakeWord] Porcupine callback fired (index=%d)', keywordIndex);
|
||||
await OpenWakeWord.init(this.keyword, DEFAULT_THRESHOLD, DEFAULT_PATIENCE, DEFAULT_DEBOUNCE_MS);
|
||||
// Subscribe nur einmal
|
||||
if (!this.eventSub) {
|
||||
const emitter = new NativeEventEmitter(NativeModules.OpenWakeWord);
|
||||
this.eventSub = emitter.addListener('WakeWordDetected', () => {
|
||||
console.log('[WakeWord] Native Detection-Event empfangen');
|
||||
this.onWakeDetected().catch(err =>
|
||||
console.warn('[WakeWord] onWakeDetected crashed:', err));
|
||||
},
|
||||
// Error handler (wenn Porcupine im Background-Thread crashed,
|
||||
// z.B. beim Audio-Engine-Konflikt mit audio-recorder-player)
|
||||
(error: any) => {
|
||||
console.warn('[WakeWord] Porcupine runtime error:', error?.message || error);
|
||||
// Nicht in Loop crashen — state zurueck auf off damit der User
|
||||
// mit dem Aufnahme-Button wieder normal arbeiten kann
|
||||
this.setState('off');
|
||||
this.disposePorcupine().catch(() => {});
|
||||
},
|
||||
);
|
||||
console.log('[WakeWord] Porcupine init OK (keyword=%s, manager=%s)',
|
||||
this.keyword, this.porcupine ? 'created' : 'NULL');
|
||||
});
|
||||
}
|
||||
this.nativeReady = true;
|
||||
console.log('[WakeWord] Init OK (model=%s)', this.keyword);
|
||||
return true;
|
||||
} catch (err: any) {
|
||||
console.warn('[WakeWord] Porcupine init fehlgeschlagen:', err?.message || err);
|
||||
console.warn('[WakeWord] err details:', JSON.stringify({
|
||||
name: err?.name, code: err?.code, stack: err?.stack?.slice(0, 200),
|
||||
}));
|
||||
this.porcupine = null;
|
||||
console.warn('[WakeWord] Init fehlgeschlagen:', err?.message || err);
|
||||
this.nativeReady = false;
|
||||
return false;
|
||||
} finally {
|
||||
this.initInProgress = null;
|
||||
@@ -145,27 +157,28 @@ class WakeWordService {
|
||||
return this.initInProgress;
|
||||
}
|
||||
|
||||
private async disposePorcupine() {
|
||||
if (this.porcupine) {
|
||||
try { await this.porcupine.stop(); } catch {}
|
||||
try { await this.porcupine.delete(); } catch {}
|
||||
this.porcupine = null;
|
||||
}
|
||||
private async disposeNative(): Promise<void> {
|
||||
if (!OpenWakeWord) return;
|
||||
try { await OpenWakeWord.dispose(); } catch {}
|
||||
this.nativeReady = false;
|
||||
}
|
||||
|
||||
/** Ohr-Button gedrueckt — startet passives Lauschen oder direkt Konversation. */
|
||||
async start(): Promise<boolean> {
|
||||
if (this.state !== 'off') return true;
|
||||
if (this.porcupine) {
|
||||
// Passives Lauschen via Porcupine
|
||||
// Foreground-Service VOR dem Mic-Zugriff hochziehen damit Background-
|
||||
// Lauschen funktioniert (Android braucht foregroundServiceType=microphone
|
||||
// aktiv zum Zeitpunkt des AudioRecord.startRecording).
|
||||
await acquireBackgroundAudio('wake');
|
||||
if (this.nativeReady && OpenWakeWord) {
|
||||
try {
|
||||
await this.porcupine.start();
|
||||
console.log('[WakeWord] armed — warte auf Wake Word "%s"', this.keyword);
|
||||
ToastAndroid.show(`Lausche auf "${this.keyword}"`, ToastAndroid.SHORT);
|
||||
await OpenWakeWord.start();
|
||||
console.log('[WakeWord] armed — warte auf "%s"', this.keyword);
|
||||
ToastAndroid.show(`Lausche auf "${KEYWORD_LABELS[this.keyword]}"`, ToastAndroid.SHORT);
|
||||
this.setState('armed');
|
||||
return true;
|
||||
} catch (err: any) {
|
||||
console.warn('[WakeWord] Porcupine start fehlgeschlagen — Fallback Direkt-Konversation:',
|
||||
console.warn('[WakeWord] start fehlgeschlagen — Fallback Direkt-Aufnahme:',
|
||||
err?.message || err);
|
||||
ToastAndroid.show(
|
||||
`Wake-Word-Start failed: ${err?.message || err}`,
|
||||
@@ -173,14 +186,13 @@ class WakeWordService {
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// Kein Porcupine init → User explicit informieren
|
||||
console.warn('[WakeWord] Porcupine nicht initialisiert — Access Key fehlt? Fallback Direkt-Aufnahme');
|
||||
console.warn('[WakeWord] Native-Modul nicht bereit — Direkt-Aufnahme-Fallback');
|
||||
ToastAndroid.show(
|
||||
'Wake-Word nicht aktiv — direkte Aufnahme startet (Mikro hoert mit)',
|
||||
ToastAndroid.LONG,
|
||||
);
|
||||
}
|
||||
// Fallback: direkt in die Konversation (Mikro AKTIV, nicht passive)
|
||||
// Fallback: direkt in Konversation
|
||||
console.log('[WakeWord] Direkt-Aufnahme startet (kein Wake-Word)');
|
||||
this.setState('conversing');
|
||||
setTimeout(() => {
|
||||
@@ -194,21 +206,46 @@ class WakeWordService {
|
||||
/** Komplett ausschalten (Ohr abschalten) */
|
||||
async stop(): Promise<void> {
|
||||
console.log('[WakeWord] Ohr deaktiviert');
|
||||
if (this.porcupine) {
|
||||
try { await this.porcupine.stop(); } catch {}
|
||||
if (this.nativeReady && OpenWakeWord) {
|
||||
try { await OpenWakeWord.stop(); } catch {}
|
||||
}
|
||||
this.bargeListening = false;
|
||||
this.setState('off');
|
||||
}
|
||||
|
||||
/** Wake-Word getriggert: Porcupine pausieren, Konversation starten. */
|
||||
/** Cooldown setzen — alle Wake-Word-Detections in den naechsten ms ignorieren.
|
||||
* Wird beim App-Resume gerufen weil AppState-Wechsel Audio-Spikes erzeugen
|
||||
* die openWakeWord faelschlich als Trigger interpretiert. */
|
||||
setResumeCooldown(ms: number = 1500): void {
|
||||
this.cooldownUntilMs = Date.now() + ms;
|
||||
console.log('[WakeWord] Cooldown aktiv fuer %dms', ms);
|
||||
}
|
||||
|
||||
/** Wake-Word getriggert: Native-Modul pausieren, Konversation starten. */
|
||||
private async onWakeDetected(): Promise<void> {
|
||||
console.log('[WakeWord] Wake-Word "%s" erkannt!', this.keyword);
|
||||
ToastAndroid.show(`Wake-Word "${this.keyword}" erkannt — sprich jetzt`, ToastAndroid.SHORT);
|
||||
if (this.porcupine) {
|
||||
try { await this.porcupine.stop(); } catch {}
|
||||
const now = Date.now();
|
||||
if (now < this.cooldownUntilMs) {
|
||||
const left = this.cooldownUntilMs - now;
|
||||
console.log('[WakeWord] Trigger ignoriert (Cooldown noch %dms aktiv — wahrscheinlich App-Resume-Spike)', left);
|
||||
return;
|
||||
}
|
||||
console.log('[WakeWord] Wake-Word "%s" erkannt! (state=%s, barge=%s)',
|
||||
this.keyword, this.state, this.bargeListening);
|
||||
if (this.nativeReady && OpenWakeWord) {
|
||||
try { await OpenWakeWord.stop(); } catch {}
|
||||
}
|
||||
this.bargeListening = false;
|
||||
// Wenn wir bereits in 'conversing' sind und der Trigger waehrend ARIAs TTS
|
||||
// kam (Barge-In via Wake-Word), feuern wir einen separaten Callback damit
|
||||
// ChatScreen das TTS abbrechen + neue Aufnahme starten kann. Sonst normal.
|
||||
if (this.state === 'conversing') {
|
||||
this.bargeCallbacks.forEach(cb => {
|
||||
try { cb(); } catch (e) { console.warn('[WakeWord] barge cb err:', e); }
|
||||
});
|
||||
// Kein erneutes setState — wir bleiben in 'conversing'.
|
||||
return;
|
||||
}
|
||||
this.setState('conversing');
|
||||
// kurz warten damit Mikrofon frei ist
|
||||
setTimeout(() => {
|
||||
if (this.state === 'conversing') {
|
||||
this.wakeCallbacks.forEach(cb => cb());
|
||||
@@ -216,17 +253,83 @@ class WakeWordService {
|
||||
}, 200);
|
||||
}
|
||||
|
||||
/** Wake-Word PARALLEL zur TTS-Wiedergabe lauschen lassen — User kann
|
||||
* "Computer" sagen waehrend ARIA noch redet, AcousticEchoCanceler im
|
||||
* Native-Modul verhindert dass ARIAs eigene Stimme triggert.
|
||||
* Voraussetzung: AudioRecorder muss frei sein (Recording aus). Wenn der
|
||||
* AudioRecorder gerade laeuft, hat der Vorrang — Wake-Word geht nicht. */
|
||||
async startBargeListening(): Promise<void> {
|
||||
if (!this.nativeReady || !OpenWakeWord) return;
|
||||
if (this.state !== 'conversing') return;
|
||||
if (this.bargeListening) return;
|
||||
try {
|
||||
await OpenWakeWord.start();
|
||||
this.bargeListening = true;
|
||||
console.log('[WakeWord] Barge-Listening aktiv (parallel zu TTS)');
|
||||
} catch (err) {
|
||||
console.warn('[WakeWord] Barge-Listening start fehlgeschlagen:', err);
|
||||
}
|
||||
}
|
||||
|
||||
/** Barge-Listening wieder aus — z.B. wenn der AudioRecorder fuer die
|
||||
* naechste Aufnahme das Mikro braucht. */
|
||||
async stopBargeListening(): Promise<void> {
|
||||
if (!this.bargeListening) return;
|
||||
if (this.nativeReady && OpenWakeWord) {
|
||||
try { await OpenWakeWord.stop(); } catch {}
|
||||
}
|
||||
this.bargeListening = false;
|
||||
console.log('[WakeWord] Barge-Listening aus');
|
||||
}
|
||||
|
||||
/** Bei eingehendem Anruf: Wake-Word + Aufnahme stoppen, Pre-Call-State
|
||||
* merken. Telefonie-App belegt das Mikro waehrend des Anrufs, plus ARIA
|
||||
* soll nicht in laufende Telefonate reinhoeren. */
|
||||
async pauseForCall(): Promise<void> {
|
||||
if (this.callPaused) return;
|
||||
this.preCallState = this.state;
|
||||
if (this.state === 'off') {
|
||||
this.callPaused = true; // merken dass wir pausiert wurden
|
||||
return;
|
||||
}
|
||||
this.callPaused = true;
|
||||
if (this.nativeReady && OpenWakeWord) {
|
||||
try { await OpenWakeWord.stop(); } catch {}
|
||||
}
|
||||
this.bargeListening = false;
|
||||
console.log('[WakeWord] Anruf — Wake-Word pausiert (war: %s)', this.preCallState);
|
||||
}
|
||||
|
||||
/** Nach Auflegen: Pre-Call-State wiederherstellen. Aktive Konversation
|
||||
* geht zu armed zurueck (User soll nicht in einen halben Dialog springen). */
|
||||
async resumeFromCall(): Promise<void> {
|
||||
if (!this.callPaused) return;
|
||||
const restoreTo = this.preCallState;
|
||||
this.callPaused = false;
|
||||
this.preCallState = 'off';
|
||||
console.log('[WakeWord] Anruf zu Ende — restore state=%s', restoreTo);
|
||||
if (restoreTo === 'off') return;
|
||||
// Aktive Konversation war wahrscheinlich durch haltAllPlayback eh abgebrochen,
|
||||
// sicher zu armed degraden.
|
||||
if (restoreTo === 'conversing') this.setState('armed');
|
||||
if (this.nativeReady && OpenWakeWord) {
|
||||
try { await OpenWakeWord.start(); } catch (err) {
|
||||
console.warn('[WakeWord] Restore-Start fehlgeschlagen:', err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Konversation beenden — User hat im Window nichts gesagt.
|
||||
* Mit Wake-Word: zurueck zu 'armed' (Porcupine wieder an).
|
||||
* Mit Wake-Word: zurueck zu 'armed' (Listener wieder an).
|
||||
* Ohne: zurueck zu 'off'.
|
||||
*/
|
||||
async endConversation(): Promise<void> {
|
||||
if (this.state !== 'conversing') return;
|
||||
if (this.porcupine && this.accessKey) {
|
||||
if (this.nativeReady && OpenWakeWord) {
|
||||
try {
|
||||
await this.porcupine.start();
|
||||
await OpenWakeWord.start();
|
||||
console.log('[WakeWord] Konversation zu Ende — zurueck zu armed');
|
||||
ToastAndroid.show(`Lausche wieder auf "${this.keyword}"`, ToastAndroid.SHORT);
|
||||
ToastAndroid.show(`Lausche wieder auf "${KEYWORD_LABELS[this.keyword]}"`, ToastAndroid.SHORT);
|
||||
this.setState('armed');
|
||||
return;
|
||||
} catch (err) {
|
||||
@@ -259,10 +362,10 @@ class WakeWordService {
|
||||
}
|
||||
|
||||
hasWakeWord(): boolean {
|
||||
return !!this.porcupine;
|
||||
return this.nativeReady;
|
||||
}
|
||||
|
||||
getKeyword(): string {
|
||||
getKeyword(): WakeKeyword {
|
||||
return this.keyword;
|
||||
}
|
||||
|
||||
@@ -275,6 +378,19 @@ class WakeWordService {
|
||||
};
|
||||
}
|
||||
|
||||
/** Subscribe auf Barge-In-Events: Wake-Word erkannt waehrend ARIA noch
|
||||
* spricht. ChatScreen sollte dann TTS abbrechen + neue Aufnahme starten. */
|
||||
onBargeIn(callback: WakeWordCallback): () => void {
|
||||
this.bargeCallbacks.push(callback);
|
||||
return () => {
|
||||
this.bargeCallbacks = this.bargeCallbacks.filter(cb => cb !== callback);
|
||||
};
|
||||
}
|
||||
|
||||
isBargeListening(): boolean {
|
||||
return this.bargeListening;
|
||||
}
|
||||
|
||||
onStateChange(callback: StateCallback): () => void {
|
||||
this.stateCallbacks.push(callback);
|
||||
return () => {
|
||||
|
||||
@@ -0,0 +1,35 @@
|
||||
# ════════════════════════════════════════════════════════════
|
||||
# ARIA Brain — Agent + Memory Container
|
||||
#
|
||||
# FastAPI-Server mit Vector-DB-Memory (Qdrant).
|
||||
# Spricht via HTTP/WebSocket mit Bridge und Diagnostic.
|
||||
# LLM-Calls gehen ueber den Proxy (claude-max-api-proxy).
|
||||
# ════════════════════════════════════════════════════════════
|
||||
|
||||
FROM python:3.12-slim
|
||||
|
||||
# System-Tools die Skills brauchen koennten (curl, jq, git, ssh-client,
|
||||
# Build-Basics fuer venv-Compiles). Bewusst sparsam — alles weitere
|
||||
# bringt der Skill selbst mit (siehe execution=local-bin).
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
curl \
|
||||
jq \
|
||||
git \
|
||||
openssh-client \
|
||||
ca-certificates \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY requirements.txt .
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
COPY . .
|
||||
|
||||
# Embedding-Model-Cache und Skills landen unter /data (Volume)
|
||||
ENV SENTENCE_TRANSFORMERS_HOME=/data/_models
|
||||
ENV ARIA_DATA_DIR=/data
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8080"]
|
||||
@@ -0,0 +1,385 @@
|
||||
"""
|
||||
Conversation-Loop. Eine Anfrage von Stefan, eine Antwort von ARIA.
|
||||
|
||||
Pro Turn:
|
||||
1. user-Turn an die laufende Conversation appenden
|
||||
2. Hot Memory holen (alle pinned Punkte)
|
||||
3. Cold Memory holen (Top-K semantisch zur user-Nachricht)
|
||||
4. System-Prompt aus Hot+Cold bauen
|
||||
5. Messages = [system, *window, user]
|
||||
6. Claude via Proxy aufrufen
|
||||
7. Assistant-Reply in Conversation appenden + zurueckgeben
|
||||
|
||||
Memory-Destillat laeuft asynchron NACH dem Reply, gesteuert vom
|
||||
/chat-Endpoint ueber BackgroundTasks.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from conversation import Conversation, Turn
|
||||
from memory import Embedder, VectorStore, MemoryPoint
|
||||
from prompts import build_system_prompt
|
||||
from proxy_client import ProxyClient, Message as ProxyMessage
|
||||
import skills as skills_mod
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Meta-Tool: ARIA kann selbst neue Skills bauen
|
||||
META_TOOLS = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "skill_create",
|
||||
"description": (
|
||||
"Erstelle einen neuen Skill (wiederverwendbare Faehigkeit). "
|
||||
"Skills sind IMMER Python — jeder Skill bekommt seine eigene venv "
|
||||
"mit den pip_packages die er braucht.\n\n"
|
||||
"HARTE REGEL — IMMER Skill anlegen wenn: die Loesung erfordert eine "
|
||||
"pip-Library. Sonst muesste der Install bei jedem Container-Restart "
|
||||
"neu laufen (Brain hat keinen persistenten State ausser /data/skills/).\n\n"
|
||||
"Sonst NUR wenn ALLE Kriterien erfuellt sind:\n"
|
||||
" 1) wiederkehrend (Aufgabe kommt realistisch nochmal),\n"
|
||||
" 2) nicht-trivial (mehrere Schritte),\n"
|
||||
" 3) parametrisierbar (nimmt Eingaben, gibt Ergebnis),\n"
|
||||
" 4) wiederverwendbar als ganzes Paket.\n"
|
||||
"NICHT fuer einzelne Shell-Befehle (date, hostname, ls etc.) und "
|
||||
"nicht fuer Einmal-Faelle. Stefan kann Skill-Erstellung explizit "
|
||||
"triggern (\"bau daraus einen Skill\").\n\n"
|
||||
"Wenn etwas nur via apt-Paket geht — Stefan fragen ob es ins "
|
||||
"Brain-Dockerfile soll, NICHT als Skill bauen."
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {"type": "string", "description": "kurz, kebab-case, a-z 0-9 - _"},
|
||||
"description": {"type": "string", "description": "Was kann der Skill? 1 Satz."},
|
||||
"entry_code": {
|
||||
"type": "string",
|
||||
"description": (
|
||||
"Python-Code. Args lesen via os.environ['ARG_NAME']. "
|
||||
"Resultat per print() (stdout) zurueck. Bei Fehler: "
|
||||
"non-zero exit (sys.exit(1) o.ae.)."
|
||||
),
|
||||
},
|
||||
"readme": {"type": "string", "description": "Markdown — was macht der Skill, Beispiel-Aufrufe"},
|
||||
"pip_packages": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "pip-Pakete die in der venv installiert werden (z.B. requests, yt-dlp, pypdf)",
|
||||
},
|
||||
"args": {
|
||||
"type": "array",
|
||||
"items": {"type": "object"},
|
||||
"description": "Argumente-Schema [{name, type, required, description}]",
|
||||
},
|
||||
},
|
||||
"required": ["name", "description", "entry_code"],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "skill_list",
|
||||
"description": "Zeigt alle Skills (inkl. deaktivierte). Sollte selten noetig sein — die Liste steht eh im System-Prompt.",
|
||||
"parameters": {"type": "object", "properties": {}},
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
def _skill_to_tool(s: dict) -> dict:
|
||||
"""Mappt einen Skill auf ein OpenAI-Function-Tool."""
|
||||
args = s.get("args") or []
|
||||
props = {}
|
||||
required = []
|
||||
for a in args:
|
||||
if not isinstance(a, dict):
|
||||
continue
|
||||
name = a.get("name") or ""
|
||||
if not name:
|
||||
continue
|
||||
props[name] = {
|
||||
"type": a.get("type", "string"),
|
||||
"description": a.get("description", ""),
|
||||
}
|
||||
if a.get("required"):
|
||||
required.append(name)
|
||||
return {
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": f"run_{s['name']}",
|
||||
"description": s.get("description", "(ohne Beschreibung)"),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": props,
|
||||
"required": required,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class Agent:
|
||||
def __init__(self, store: VectorStore, embedder: Embedder,
|
||||
conversation: Conversation, proxy: ProxyClient,
|
||||
cold_k: int = 5):
|
||||
self.store = store
|
||||
self.embedder = embedder
|
||||
self.conversation = conversation
|
||||
self.proxy = proxy
|
||||
self.cold_k = cold_k
|
||||
# Side-Channel-Events die im Turn entstehen (z.B. skill_create).
|
||||
# Werden vom /chat-Endpoint in der Response mitgeschickt, damit
|
||||
# Stefan in der App und Diagnostic eine sichtbare Bubble bekommt.
|
||||
self._pending_events: list[dict] = []
|
||||
|
||||
def pop_events(self) -> list[dict]:
|
||||
"""Holt die Events des letzten chat()-Calls und leert die Liste."""
|
||||
events = self._pending_events
|
||||
self._pending_events = []
|
||||
return events
|
||||
|
||||
# ── Hauptpfad: ein User-Turn → Tool-Loop → finaler Reply ──
|
||||
|
||||
MAX_TOOL_ITERATIONS = 8 # Schutz vor Endlos-Loops
|
||||
|
||||
def chat(self, user_message: str, source: str = "") -> str:
|
||||
user_message = (user_message or "").strip()
|
||||
if not user_message:
|
||||
raise ValueError("Leere Nachricht")
|
||||
|
||||
# Events vom letzten Turn weglassen
|
||||
self._pending_events = []
|
||||
|
||||
# 1. User-Turn an die Konversation
|
||||
self.conversation.add("user", user_message, source=source)
|
||||
|
||||
# 2. Hot Memory (alle pinned Punkte)
|
||||
hot = self.store.list_pinned()
|
||||
|
||||
# 3. Cold Memory (Top-K semantic)
|
||||
try:
|
||||
qvec = self.embedder.embed(user_message)
|
||||
cold = self.store.search(qvec, k=self.cold_k, exclude_pinned=True)
|
||||
except Exception as exc:
|
||||
logger.warning("Cold-Search fehlgeschlagen: %s", exc)
|
||||
cold = []
|
||||
|
||||
# 4. Aktive Skills holen + Tool-Liste bauen
|
||||
all_skills = skills_mod.list_skills(active_only=False)
|
||||
active_skills = [s for s in all_skills if s.get("active", True)]
|
||||
tools = list(META_TOOLS) + [_skill_to_tool(s) for s in active_skills]
|
||||
|
||||
# 5. System-Prompt + Window-Messages
|
||||
system_prompt = build_system_prompt(hot, cold, skills=all_skills)
|
||||
messages = [ProxyMessage(role="system", content=system_prompt)]
|
||||
for t in self.conversation.window():
|
||||
messages.append(ProxyMessage(role=t.role, content=t.content))
|
||||
|
||||
logger.info("chat: pinned=%d cold=%d skills=%d/%d window=%d prompt_chars=%d",
|
||||
len(hot), len(cold), len(active_skills), len(all_skills),
|
||||
len(self.conversation.window()), len(system_prompt))
|
||||
|
||||
# 6. Tool-Use-Loop
|
||||
final_reply = ""
|
||||
for iteration in range(self.MAX_TOOL_ITERATIONS):
|
||||
result = self.proxy.chat_full(messages, tools=tools)
|
||||
if result.tool_calls:
|
||||
# Assistant-Turn mit tool_calls in messages anhaengen (nicht in Conversation!)
|
||||
messages.append(ProxyMessage(
|
||||
role="assistant",
|
||||
content=result.content or None,
|
||||
tool_calls=[{
|
||||
"id": tc["id"], "type": "function",
|
||||
"function": {"name": tc["name"], "arguments": json.dumps(tc["arguments"])},
|
||||
} for tc in result.tool_calls],
|
||||
))
|
||||
# Tools ausfuehren + Ergebnis als role=tool zurueck
|
||||
for tc in result.tool_calls:
|
||||
tool_result = self._dispatch_tool(tc["name"], tc["arguments"])
|
||||
messages.append(ProxyMessage(
|
||||
role="tool",
|
||||
tool_call_id=tc["id"],
|
||||
name=tc["name"],
|
||||
content=tool_result[:8000],
|
||||
))
|
||||
continue # next iteration mit Tool-Results
|
||||
# Kein Tool-Call mehr → final reply
|
||||
final_reply = (result.content or "").strip()
|
||||
break
|
||||
else:
|
||||
# Loop-Limit erreicht
|
||||
final_reply = "[Tool-Loop-Limit erreicht — ARIA hat zu viele Tool-Calls gemacht ohne fertig zu werden]"
|
||||
logger.warning("Tool-Loop hit MAX_TOOL_ITERATIONS=%d", self.MAX_TOOL_ITERATIONS)
|
||||
|
||||
if not final_reply:
|
||||
raise RuntimeError("Leerer Reply vom Proxy")
|
||||
|
||||
# 7. Assistant-Turn (final reply) in die Conversation
|
||||
self.conversation.add("assistant", final_reply)
|
||||
return final_reply
|
||||
|
||||
# ── Tool-Dispatcher ───────────────────────────────────────
|
||||
|
||||
def _dispatch_tool(self, name: str, arguments: dict) -> str:
|
||||
"""Fuehrt einen Tool-Call aus und gibt ein kurzes Text-Resultat zurueck.
|
||||
Niemals werfen — Fehler werden als Text-Resultat reportet damit Claude
|
||||
weitermachen kann."""
|
||||
try:
|
||||
if name == "skill_create":
|
||||
# ARIA-Skills sind immer Python — execution ist nicht mehr im Schema
|
||||
manifest = skills_mod.create_skill(
|
||||
name=arguments["name"],
|
||||
description=arguments["description"],
|
||||
execution="local-venv",
|
||||
entry_code=arguments["entry_code"],
|
||||
readme=arguments.get("readme", ""),
|
||||
args=arguments.get("args", []),
|
||||
pip_packages=arguments.get("pip_packages", []),
|
||||
author="aria",
|
||||
)
|
||||
# Side-Channel-Event: Stefan soll sehen wenn ARIA was anlegt
|
||||
self._pending_events.append({
|
||||
"type": "skill_created",
|
||||
"skill": {
|
||||
"name": manifest["name"],
|
||||
"description": manifest.get("description", ""),
|
||||
"execution": manifest.get("execution", ""),
|
||||
"active": manifest.get("active", True),
|
||||
"setup_error": manifest.get("setup_error"),
|
||||
},
|
||||
})
|
||||
return f"OK — Skill '{manifest['name']}' erstellt (active={manifest['active']})."
|
||||
if name == "skill_list":
|
||||
items = skills_mod.list_skills(active_only=False)
|
||||
if not items:
|
||||
return "(keine Skills vorhanden)"
|
||||
return "\n".join(
|
||||
f"- {s['name']} ({s['execution']}) {'aktiv' if s.get('active', True) else 'DEAKTIVIERT'}: {s.get('description', '')}"
|
||||
for s in items
|
||||
)
|
||||
if name.startswith("run_"):
|
||||
skill_name = name[len("run_"):]
|
||||
res = skills_mod.run_skill(skill_name, args=arguments)
|
||||
snippet = (res.get("stdout") or "")[:2000] or "(kein stdout)"
|
||||
err = (res.get("stderr") or "")[:500]
|
||||
marker = "OK" if res["ok"] else f"FEHLER (exit={res['exit_code']})"
|
||||
out = f"{marker} · {res['duration_sec']}s\nstdout:\n{snippet}"
|
||||
if err:
|
||||
out += f"\nstderr:\n{err}"
|
||||
return out
|
||||
return f"Unbekanntes Tool: {name}"
|
||||
except Exception as exc:
|
||||
logger.exception("Tool '%s' fehlgeschlagen", name)
|
||||
return f"FEHLER: {exc}"
|
||||
|
||||
# ── Memory-Destillat (laeuft im Hintergrund) ──────────────
|
||||
|
||||
def distill_old_turns(self) -> dict:
|
||||
"""Nimmt die N aeltesten Turns und destilliert sie zu fact-Memories.
|
||||
|
||||
Pattern: separater Claude-Call, lieferte 3-7 JSON-Facts, die als
|
||||
type=fact, source=distilled gespeichert werden. Erfolgreiches
|
||||
Schreiben → Turns aus dem Window entfernen.
|
||||
"""
|
||||
if not self.conversation.needs_distill():
|
||||
return {"distilled": 0, "reason": "kein Bedarf"}
|
||||
|
||||
old_turns = self.conversation.take_oldest_for_distill()
|
||||
if not old_turns:
|
||||
return {"distilled": 0, "reason": "keine alten Turns"}
|
||||
|
||||
# Konversation als Klartext bauen
|
||||
transcript = "\n".join(
|
||||
f"[{t.role.upper()}] {t.content}" for t in old_turns
|
||||
)[:30000] # Cap auf 30k Zeichen damit der Prompt nicht explodiert
|
||||
|
||||
system = (
|
||||
"Du extrahierst aus einer Konversation zwischen Stefan und ARIA die "
|
||||
"wichtigsten dauerhaft relevanten Fakten — keine Smalltalk-Details, "
|
||||
"keine flüchtigen Zustände. Antworte AUSSCHLIESSLICH mit gültigem JSON "
|
||||
"im Format: {\"facts\": [{\"title\": \"kurz, max 80 Zeichen\", "
|
||||
"\"content\": \"1-3 Sätze, konkret und nützlich\"}]}. "
|
||||
"Mindestens 0, höchstens 7 Facts. Wenn nichts wichtig genug ist: leeres Array."
|
||||
)
|
||||
user = (
|
||||
"Hier ist der Konversations-Abschnitt:\n\n"
|
||||
f"{transcript}\n\n"
|
||||
"Extrahiere die wichtigsten Fakten als JSON."
|
||||
)
|
||||
|
||||
try:
|
||||
raw = self.proxy.chat([
|
||||
ProxyMessage(role="system", content=system),
|
||||
ProxyMessage(role="user", content=user),
|
||||
])
|
||||
except Exception as exc:
|
||||
logger.warning("Destillat-Call fehlgeschlagen: %s — Turns bleiben", exc)
|
||||
return {"distilled": 0, "error": str(exc)}
|
||||
|
||||
facts = self._parse_facts(raw)
|
||||
if facts is None:
|
||||
logger.warning("Destillat lieferte unparsbares JSON: %r", raw[:200])
|
||||
return {"distilled": 0, "error": "JSON parse failed", "raw": raw[:200]}
|
||||
|
||||
# Facts in die DB schreiben
|
||||
created = 0
|
||||
for f in facts:
|
||||
content = (f.get("content") or "").strip()
|
||||
if not content:
|
||||
continue
|
||||
title = (f.get("title") or "").strip()[:120] or "Fakt"
|
||||
point = MemoryPoint(
|
||||
id="",
|
||||
type="fact",
|
||||
title=title,
|
||||
content=content,
|
||||
pinned=False,
|
||||
category="konversation",
|
||||
source="distilled",
|
||||
tags=[],
|
||||
)
|
||||
try:
|
||||
vec = self.embedder.embed(content)
|
||||
self.store.upsert(point, vec)
|
||||
created += 1
|
||||
except Exception as exc:
|
||||
logger.warning("Fakt schreiben fehlgeschlagen: %s", exc)
|
||||
|
||||
# Erst nach erfolgreichem Schreiben aus dem Window entfernen
|
||||
last_ts = old_turns[-1].ts
|
||||
self.conversation.commit_distill(last_ts)
|
||||
logger.info("Destillat: %d Facts geschrieben, %d Turns aus Window entfernt",
|
||||
created, len(old_turns))
|
||||
return {"distilled": created, "removed_turns": len(old_turns)}
|
||||
|
||||
@staticmethod
|
||||
def _parse_facts(raw: str) -> Optional[list]:
|
||||
if not raw:
|
||||
return None
|
||||
# JSON robust extrahieren — Claude kann Code-Fences setzen
|
||||
cleaned = raw.strip()
|
||||
if cleaned.startswith("```"):
|
||||
# ```json oder ``` rauswerfen
|
||||
cleaned = cleaned.split("\n", 1)[1] if "\n" in cleaned else cleaned[3:]
|
||||
if cleaned.endswith("```"):
|
||||
cleaned = cleaned[: -3]
|
||||
cleaned = cleaned.strip()
|
||||
# Erstes { bis letztes }
|
||||
start = cleaned.find("{")
|
||||
end = cleaned.rfind("}")
|
||||
if start == -1 or end == -1 or end < start:
|
||||
return None
|
||||
try:
|
||||
obj = json.loads(cleaned[start: end + 1])
|
||||
except Exception:
|
||||
return None
|
||||
facts = obj.get("facts") if isinstance(obj, dict) else None
|
||||
if not isinstance(facts, list):
|
||||
return None
|
||||
return facts
|
||||
@@ -0,0 +1,130 @@
|
||||
"""
|
||||
Conversation-State — ein einziger Rolling-Window-State fuer ARIAs
|
||||
laufendes Gespraech mit Stefan.
|
||||
|
||||
Stefan-Entscheidung: KEINE Sessions, KEIN Multi-Thread. EIN Strang,
|
||||
intern rollend. Was rausfaellt, wird ggf. destilliert und landet
|
||||
als type=fact Memory in der Vector-DB.
|
||||
|
||||
Persistenz: append-only JSONL unter /data/conversation.jsonl.
|
||||
Bei Restart wird die letzte N gelesen (komplett vermeidet Memory-
|
||||
Overhead bei sehr langen Verlaeufen).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from dataclasses import dataclass, field, asdict
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import List, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CONVERSATION_FILE = Path(os.environ.get("CONVERSATION_FILE", "/data/conversation.jsonl"))
|
||||
|
||||
|
||||
@dataclass
|
||||
class Turn:
|
||||
role: str # "user" | "assistant"
|
||||
content: str
|
||||
ts: str = field(default_factory=lambda: datetime.now(timezone.utc).isoformat())
|
||||
source: str = "" # "app" / "diagnostic" / "stt" — optional
|
||||
|
||||
|
||||
class Conversation:
|
||||
"""In-Memory Rolling Window, mit JSONL-Persistenz."""
|
||||
|
||||
def __init__(self, max_window: int = 50, distill_threshold: int = 60,
|
||||
distill_count: int = 30):
|
||||
self.max_window = max_window
|
||||
self.distill_threshold = distill_threshold
|
||||
self.distill_count = distill_count
|
||||
self.turns: List[Turn] = []
|
||||
self._load()
|
||||
|
||||
def _load(self):
|
||||
if not CONVERSATION_FILE.exists():
|
||||
return
|
||||
try:
|
||||
lines = CONVERSATION_FILE.read_text(encoding="utf-8").splitlines()
|
||||
except Exception as exc:
|
||||
logger.warning("Konversation laden fehlgeschlagen: %s", exc)
|
||||
return
|
||||
loaded: List[Turn] = []
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
try:
|
||||
obj = json.loads(line)
|
||||
except Exception:
|
||||
continue
|
||||
if obj.get("op") == "distill":
|
||||
# Marker: bis hierhin wurde alles destilliert
|
||||
drop_until_ts = obj.get("ts", "")
|
||||
if drop_until_ts:
|
||||
loaded = [t for t in loaded if t.ts > drop_until_ts]
|
||||
continue
|
||||
role = obj.get("role")
|
||||
content = obj.get("content")
|
||||
if role in ("user", "assistant") and isinstance(content, str):
|
||||
loaded.append(Turn(role=role, content=content,
|
||||
ts=obj.get("ts", ""),
|
||||
source=obj.get("source", "")))
|
||||
self.turns = loaded
|
||||
logger.info("Konversation geladen: %d Turns aus %s", len(self.turns), CONVERSATION_FILE)
|
||||
|
||||
def _append_to_file(self, record: dict):
|
||||
try:
|
||||
CONVERSATION_FILE.parent.mkdir(parents=True, exist_ok=True)
|
||||
with CONVERSATION_FILE.open("a", encoding="utf-8") as f:
|
||||
f.write(json.dumps(record, ensure_ascii=False) + "\n")
|
||||
except Exception as exc:
|
||||
logger.warning("Konversation persist fehlgeschlagen: %s", exc)
|
||||
|
||||
def add(self, role: str, content: str, source: str = "") -> Turn:
|
||||
t = Turn(role=role, content=content, source=source)
|
||||
self.turns.append(t)
|
||||
self._append_to_file({
|
||||
"ts": t.ts, "role": t.role, "content": t.content, "source": t.source,
|
||||
})
|
||||
return t
|
||||
|
||||
def window(self) -> List[Turn]:
|
||||
"""Die letzten max_window Turns — gehen in den LLM-Prompt."""
|
||||
return self.turns[-self.max_window:]
|
||||
|
||||
def needs_distill(self) -> bool:
|
||||
return len(self.turns) > self.distill_threshold
|
||||
|
||||
def take_oldest_for_distill(self) -> List[Turn]:
|
||||
"""Gibt die N aeltesten Turns zurueck — fuer den Destillat-Call.
|
||||
Entfernt sie NICHT — das macht commit_distill nach erfolgreichem Call."""
|
||||
return self.turns[: self.distill_count]
|
||||
|
||||
def commit_distill(self, last_distilled_ts: str):
|
||||
"""Schreibt einen Distill-Marker, entfernt aus dem In-Memory-Window."""
|
||||
self._append_to_file({"op": "distill", "ts": last_distilled_ts})
|
||||
self.turns = [t for t in self.turns if t.ts > last_distilled_ts]
|
||||
logger.info("Distill commit bei ts=%s — Window jetzt %d Turns", last_distilled_ts, len(self.turns))
|
||||
|
||||
def reset(self):
|
||||
"""Hardes Reset — verwende vorsichtig (Diagnostic-Button)."""
|
||||
try:
|
||||
if CONVERSATION_FILE.exists():
|
||||
CONVERSATION_FILE.unlink()
|
||||
except Exception:
|
||||
pass
|
||||
self.turns = []
|
||||
logger.warning("Konversation komplett zurueckgesetzt")
|
||||
|
||||
def stats(self) -> dict:
|
||||
return {
|
||||
"turns": len(self.turns),
|
||||
"max_window": self.max_window,
|
||||
"distill_threshold": self.distill_threshold,
|
||||
"needs_distill": self.needs_distill(),
|
||||
}
|
||||
@@ -0,0 +1,518 @@
|
||||
"""
|
||||
ARIA Brain — FastAPI-Einstieg.
|
||||
|
||||
Phase B Punkt 1: nur Skeleton.
|
||||
- /health → Liveness
|
||||
- /memory/list → alle Punkte (gefiltert)
|
||||
- /memory/pinned → Hot Memory
|
||||
- /memory/search?q=...&k=5 → semantische Suche
|
||||
- /memory/save → neuen Punkt anlegen
|
||||
- /memory/update/{id} → Punkt aendern (re-embed wenn content geaendert)
|
||||
- /memory/delete/{id} → Punkt loeschen
|
||||
- /memory/stats → Anzahl Punkte pro Type
|
||||
|
||||
/chat (Conversation-Loop) und /skills/* kommen in spaeteren Phasen.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
from typing import List, Optional
|
||||
|
||||
from fastapi import FastAPI, HTTPException, BackgroundTasks, Request
|
||||
from fastapi.responses import Response
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from memory import Embedder, VectorStore, MemoryPoint
|
||||
from conversation import Conversation
|
||||
from proxy_client import ProxyClient
|
||||
from agent import Agent
|
||||
import skills as skills_mod
|
||||
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(name)s: %(message)s")
|
||||
logger = logging.getLogger("aria-brain")
|
||||
|
||||
QDRANT_HOST = os.environ.get("QDRANT_HOST", "aria-qdrant")
|
||||
QDRANT_PORT = int(os.environ.get("QDRANT_PORT", "6333"))
|
||||
|
||||
app = FastAPI(title="ARIA Brain", version="0.1.0")
|
||||
|
||||
_embedder: Optional[Embedder] = None
|
||||
_store: Optional[VectorStore] = None
|
||||
_conversation: Optional[Conversation] = None
|
||||
_proxy: Optional[ProxyClient] = None
|
||||
_agent: Optional[Agent] = None
|
||||
|
||||
|
||||
def embedder() -> Embedder:
|
||||
global _embedder
|
||||
if _embedder is None:
|
||||
_embedder = Embedder()
|
||||
return _embedder
|
||||
|
||||
|
||||
def store() -> VectorStore:
|
||||
global _store
|
||||
if _store is None:
|
||||
_store = VectorStore(host=QDRANT_HOST, port=QDRANT_PORT)
|
||||
return _store
|
||||
|
||||
|
||||
def conversation() -> Conversation:
|
||||
global _conversation
|
||||
if _conversation is None:
|
||||
_conversation = Conversation()
|
||||
return _conversation
|
||||
|
||||
|
||||
def proxy_client() -> ProxyClient:
|
||||
global _proxy
|
||||
if _proxy is None:
|
||||
_proxy = ProxyClient()
|
||||
return _proxy
|
||||
|
||||
|
||||
def agent() -> Agent:
|
||||
global _agent
|
||||
if _agent is None:
|
||||
_agent = Agent(store(), embedder(), conversation(), proxy_client())
|
||||
return _agent
|
||||
|
||||
|
||||
# ─── Pydantic-Schemas ─────────────────────────────────────────────────
|
||||
|
||||
class MemoryIn(BaseModel):
|
||||
type: str = Field(..., description="identity|rule|preference|tool|skill|fact|conversation|reminder")
|
||||
title: str
|
||||
content: str
|
||||
pinned: bool = False
|
||||
category: str = ""
|
||||
source: str = "manual"
|
||||
tags: List[str] = Field(default_factory=list)
|
||||
conversation_id: Optional[str] = None
|
||||
|
||||
|
||||
class MemoryUpdate(BaseModel):
|
||||
title: Optional[str] = None
|
||||
content: Optional[str] = None
|
||||
pinned: Optional[bool] = None
|
||||
category: Optional[str] = None
|
||||
tags: Optional[List[str]] = None
|
||||
|
||||
|
||||
class MemoryOut(BaseModel):
|
||||
id: str
|
||||
type: str
|
||||
title: str
|
||||
content: str
|
||||
pinned: bool
|
||||
category: str
|
||||
source: str
|
||||
tags: List[str]
|
||||
created_at: str
|
||||
updated_at: str
|
||||
conversation_id: Optional[str] = None
|
||||
score: Optional[float] = None
|
||||
|
||||
@classmethod
|
||||
def from_point(cls, p: MemoryPoint) -> "MemoryOut":
|
||||
return cls(**p.__dict__)
|
||||
|
||||
|
||||
# ─── Health ───────────────────────────────────────────────────────────
|
||||
|
||||
@app.get("/health")
|
||||
def health():
|
||||
try:
|
||||
n = store().count()
|
||||
return {"status": "ok", "memory_count": n, "qdrant": f"{QDRANT_HOST}:{QDRANT_PORT}"}
|
||||
except Exception as exc:
|
||||
return {"status": "degraded", "error": str(exc), "qdrant": f"{QDRANT_HOST}:{QDRANT_PORT}"}
|
||||
|
||||
|
||||
# ─── Memory-Endpoints ─────────────────────────────────────────────────
|
||||
|
||||
@app.get("/memory/stats")
|
||||
def memory_stats():
|
||||
s = store()
|
||||
points = s.list_all()
|
||||
by_type = {}
|
||||
pinned = 0
|
||||
for p in points:
|
||||
by_type[p.type] = by_type.get(p.type, 0) + 1
|
||||
if p.pinned:
|
||||
pinned += 1
|
||||
return {"total": len(points), "pinned": pinned, "by_type": by_type}
|
||||
|
||||
|
||||
@app.get("/memory/list", response_model=List[MemoryOut])
|
||||
def memory_list(type: Optional[str] = None, limit: int = 200):
|
||||
s = store()
|
||||
points = s.list_by_type(type, limit=limit) if type else s.list_all(limit=limit)
|
||||
return [MemoryOut.from_point(p) for p in points]
|
||||
|
||||
|
||||
@app.get("/memory/pinned", response_model=List[MemoryOut])
|
||||
def memory_pinned():
|
||||
return [MemoryOut.from_point(p) for p in store().list_pinned()]
|
||||
|
||||
|
||||
@app.get("/memory/search", response_model=List[MemoryOut])
|
||||
def memory_search(q: str, k: int = 5, type: Optional[str] = None, include_pinned: bool = False):
|
||||
vec = embedder().embed(q)
|
||||
points = store().search(vec, k=k, type_filter=type, exclude_pinned=not include_pinned)
|
||||
return [MemoryOut.from_point(p) for p in points]
|
||||
|
||||
|
||||
@app.post("/memory/save", response_model=MemoryOut)
|
||||
def memory_save(body: MemoryIn):
|
||||
s = store()
|
||||
vec = embedder().embed(body.content)
|
||||
point = MemoryPoint(
|
||||
id="",
|
||||
type=body.type,
|
||||
title=body.title,
|
||||
content=body.content,
|
||||
pinned=body.pinned,
|
||||
category=body.category,
|
||||
source=body.source,
|
||||
tags=body.tags,
|
||||
conversation_id=body.conversation_id,
|
||||
)
|
||||
pid = s.upsert(point, vec)
|
||||
saved = s.get(pid)
|
||||
return MemoryOut.from_point(saved)
|
||||
|
||||
|
||||
@app.patch("/memory/update/{point_id}", response_model=MemoryOut)
|
||||
def memory_update(point_id: str, body: MemoryUpdate):
|
||||
s = store()
|
||||
existing = s.get(point_id)
|
||||
if not existing:
|
||||
raise HTTPException(404, f"Memory {point_id} nicht gefunden")
|
||||
|
||||
content_changed = body.content is not None and body.content != existing.content
|
||||
if body.title is not None:
|
||||
existing.title = body.title
|
||||
if body.content is not None:
|
||||
existing.content = body.content
|
||||
if body.pinned is not None:
|
||||
existing.pinned = body.pinned
|
||||
if body.category is not None:
|
||||
existing.category = body.category
|
||||
if body.tags is not None:
|
||||
existing.tags = body.tags
|
||||
|
||||
vec = embedder().embed(existing.content) if content_changed else None
|
||||
if vec is None:
|
||||
# Vektor unveraendert lassen — nur Payload neu schreiben
|
||||
from qdrant_client.http import models as qm
|
||||
from memory.vector_store import COLLECTION
|
||||
s.client.set_payload(
|
||||
collection_name=COLLECTION,
|
||||
payload=existing.to_payload() | {"updated_at": __import__("datetime").datetime.now(__import__("datetime").timezone.utc).isoformat()},
|
||||
points=[point_id],
|
||||
)
|
||||
saved = s.get(point_id)
|
||||
else:
|
||||
s.upsert(existing, vec)
|
||||
saved = s.get(point_id)
|
||||
return MemoryOut.from_point(saved)
|
||||
|
||||
|
||||
@app.delete("/memory/delete/{point_id}")
|
||||
def memory_delete(point_id: str):
|
||||
s = store()
|
||||
if not s.get(point_id):
|
||||
raise HTTPException(404, f"Memory {point_id} nicht gefunden")
|
||||
s.delete(point_id)
|
||||
return {"deleted": point_id}
|
||||
|
||||
|
||||
# ─── Migration aus brain-import/ ──────────────────────────────────────
|
||||
|
||||
IMPORT_DIR = os.environ.get("IMPORT_DIR", "/import")
|
||||
|
||||
|
||||
@app.post("/memory/migrate")
|
||||
def memory_migrate():
|
||||
"""Liest /import/*.md und schreibt atomare Memory-Punkte in die DB.
|
||||
Idempotent: bei Re-Run werden Punkte mit gleicher migration_key ersetzt."""
|
||||
from pathlib import Path
|
||||
from migration import run_migration
|
||||
s = store()
|
||||
e = embedder()
|
||||
result = run_migration(Path(IMPORT_DIR), s, e)
|
||||
return result
|
||||
|
||||
|
||||
@app.get("/memory/import-files")
|
||||
def memory_import_files():
|
||||
"""Listet was unter /import/ liegt — fuer die Diagnostic-UI."""
|
||||
from pathlib import Path
|
||||
d = Path(IMPORT_DIR)
|
||||
if not d.exists():
|
||||
return {"import_dir": str(d), "exists": False, "files": []}
|
||||
out = []
|
||||
for p in sorted(d.iterdir()):
|
||||
if p.is_file():
|
||||
try:
|
||||
out.append({"name": p.name, "size": p.stat().st_size})
|
||||
except Exception:
|
||||
pass
|
||||
return {"import_dir": str(d), "exists": True, "files": out}
|
||||
|
||||
|
||||
# ─── Bootstrap-Snapshot ───────────────────────────────────────────────
|
||||
# "Bootstrap" = alle pinned Memories. Export/Import zum schnellen
|
||||
# Wiederherstellen einer schlanken ARIA nach Wipe.
|
||||
|
||||
@app.get("/memory/export-bootstrap")
|
||||
def memory_export_bootstrap():
|
||||
"""Gibt alle pinned Memories als JSON zurueck — fuer Browser-Download."""
|
||||
s = store()
|
||||
pinned = s.list_pinned()
|
||||
return {
|
||||
"version": 1,
|
||||
"exported_at": __import__("datetime").datetime.now(
|
||||
__import__("datetime").timezone.utc
|
||||
).isoformat(),
|
||||
"count": len(pinned),
|
||||
"memories": [
|
||||
{
|
||||
"type": p.type,
|
||||
"title": p.title,
|
||||
"content": p.content,
|
||||
"pinned": True,
|
||||
"category": p.category,
|
||||
"source": p.source,
|
||||
"tags": p.tags,
|
||||
}
|
||||
for p in pinned
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
class BootstrapBundle(BaseModel):
|
||||
version: int = 1
|
||||
memories: List[dict]
|
||||
|
||||
|
||||
@app.post("/memory/import-bootstrap")
|
||||
def memory_import_bootstrap(body: BootstrapBundle):
|
||||
"""Loescht alle pinned Memories und importiert die im Bundle.
|
||||
Cold Memory (unpinned) bleibt unangetastet.
|
||||
|
||||
Wenn keine Memories im Bundle: nur loeschen ist NICHT erlaubt — der
|
||||
Caller soll erst exportieren und dann importieren.
|
||||
"""
|
||||
if not body.memories:
|
||||
raise HTTPException(400, "Bundle hat keine memories — Abbruch zur Sicherheit")
|
||||
|
||||
s = store()
|
||||
e = embedder()
|
||||
|
||||
# Alle aktuell pinned Punkte loeschen
|
||||
from qdrant_client.http import models as qm
|
||||
from memory.vector_store import COLLECTION
|
||||
s.client.delete(
|
||||
collection_name=COLLECTION,
|
||||
points_selector=qm.FilterSelector(filter=qm.Filter(must=[
|
||||
qm.FieldCondition(key="pinned", match=qm.MatchValue(value=True))
|
||||
])),
|
||||
)
|
||||
|
||||
# Neue Punkte einspeisen
|
||||
created = 0
|
||||
for m in body.memories:
|
||||
content = (m.get("content") or "").strip()
|
||||
if not content:
|
||||
continue
|
||||
point = MemoryPoint(
|
||||
id="",
|
||||
type=m.get("type", "fact"),
|
||||
title=m.get("title", "(ohne Titel)"),
|
||||
content=content,
|
||||
pinned=True,
|
||||
category=m.get("category", ""),
|
||||
source=m.get("source", "bootstrap-import"),
|
||||
tags=list(m.get("tags", [])),
|
||||
)
|
||||
vec = e.embed(content)
|
||||
s.upsert(point, vec)
|
||||
created += 1
|
||||
|
||||
return {"created": created, "deleted_previous_pinned": True}
|
||||
|
||||
|
||||
# ─── Conversation-Loop ──────────────────────────────────────────────
|
||||
|
||||
class ChatIn(BaseModel):
|
||||
message: str
|
||||
source: str = "" # "app" / "diagnostic" / "stt" — optional
|
||||
|
||||
|
||||
class ChatOut(BaseModel):
|
||||
reply: str
|
||||
turns: int
|
||||
distilling: bool
|
||||
events: list = Field(default_factory=list)
|
||||
|
||||
|
||||
@app.post("/chat", response_model=ChatOut)
|
||||
def chat(body: ChatIn, background: BackgroundTasks):
|
||||
"""Hauptpfad. Antwort kommt synchron. Memory-Destillat laeuft
|
||||
im Hintergrund nachdem die Response rausging."""
|
||||
a = agent()
|
||||
try:
|
||||
reply = a.chat(body.message, source=body.source)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(400, str(exc))
|
||||
except RuntimeError as exc:
|
||||
logger.error("chat fehlgeschlagen: %s", exc)
|
||||
raise HTTPException(502, str(exc))
|
||||
|
||||
needs_distill = a.conversation.needs_distill()
|
||||
if needs_distill:
|
||||
background.add_task(a.distill_old_turns)
|
||||
return ChatOut(
|
||||
reply=reply,
|
||||
turns=len(a.conversation.turns),
|
||||
distilling=needs_distill,
|
||||
events=a.pop_events(),
|
||||
)
|
||||
|
||||
|
||||
@app.get("/conversation/stats")
|
||||
def conversation_stats():
|
||||
return conversation().stats()
|
||||
|
||||
|
||||
@app.post("/conversation/reset")
|
||||
def conversation_reset():
|
||||
"""Hardes Reset — der Rolling-Window-Verlauf wird komplett geleert.
|
||||
Destillierte facts bleiben in der DB."""
|
||||
conversation().reset()
|
||||
return {"ok": True, "turns": 0}
|
||||
|
||||
|
||||
@app.post("/conversation/distill")
|
||||
def conversation_distill_now():
|
||||
"""Manueller Trigger fuer Destillat — fuer Tests oder vor einem
|
||||
bewussten Reset."""
|
||||
return agent().distill_old_turns()
|
||||
|
||||
|
||||
# ─── Skills ─────────────────────────────────────────────────────────
|
||||
|
||||
class SkillCreate(BaseModel):
|
||||
name: str
|
||||
description: str
|
||||
execution: str # local-venv | local-bin | bash
|
||||
entry_code: str
|
||||
readme: str = ""
|
||||
args: list = Field(default_factory=list)
|
||||
requires: dict = Field(default_factory=dict)
|
||||
pip_packages: list = Field(default_factory=list)
|
||||
author: str = "stefan"
|
||||
|
||||
|
||||
class SkillRun(BaseModel):
|
||||
name: str
|
||||
args: dict = Field(default_factory=dict)
|
||||
timeout_sec: int = 300
|
||||
|
||||
|
||||
class SkillPatch(BaseModel):
|
||||
description: str | None = None
|
||||
active: bool | None = None
|
||||
args: list | None = None
|
||||
|
||||
|
||||
@app.get("/skills/list")
|
||||
def skills_list(active_only: bool = False):
|
||||
return {"skills": skills_mod.list_skills(active_only=active_only)}
|
||||
|
||||
|
||||
@app.get("/skills/{name}")
|
||||
def skills_get(name: str):
|
||||
m = skills_mod.read_manifest(name)
|
||||
if m is None:
|
||||
raise HTTPException(404, f"Skill '{name}' nicht gefunden")
|
||||
readme = skills_mod.read_readme(name)
|
||||
return {"manifest": m, "readme": readme}
|
||||
|
||||
|
||||
@app.post("/skills/create")
|
||||
def skills_create(body: SkillCreate):
|
||||
try:
|
||||
return skills_mod.create_skill(
|
||||
name=body.name,
|
||||
description=body.description,
|
||||
execution=body.execution,
|
||||
entry_code=body.entry_code,
|
||||
readme=body.readme,
|
||||
args=body.args,
|
||||
requires=body.requires,
|
||||
pip_packages=body.pip_packages,
|
||||
author=body.author,
|
||||
)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(400, str(exc))
|
||||
|
||||
|
||||
@app.post("/skills/run")
|
||||
def skills_run(body: SkillRun):
|
||||
try:
|
||||
return skills_mod.run_skill(body.name, args=body.args, timeout_sec=body.timeout_sec)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(400, str(exc))
|
||||
|
||||
|
||||
@app.patch("/skills/{name}")
|
||||
def skills_patch(name: str, body: SkillPatch):
|
||||
patch = {k: v for k, v in body.model_dump().items() if v is not None}
|
||||
try:
|
||||
return skills_mod.update_skill(name, patch)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(404, str(exc))
|
||||
|
||||
|
||||
@app.delete("/skills/{name}")
|
||||
def skills_delete(name: str):
|
||||
try:
|
||||
skills_mod.delete_skill(name)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(404, str(exc))
|
||||
return {"deleted": name}
|
||||
|
||||
|
||||
@app.get("/skills/{name}/logs")
|
||||
def skills_logs(name: str, limit: int = 50):
|
||||
return {"logs": skills_mod.list_logs(name, limit=limit)}
|
||||
|
||||
|
||||
@app.get("/skills/{name}/export")
|
||||
def skills_export(name: str):
|
||||
try:
|
||||
data = skills_mod.export_skill(name)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(404, str(exc))
|
||||
return Response(
|
||||
content=data,
|
||||
media_type="application/gzip",
|
||||
headers={"Content-Disposition": f'attachment; filename="skill-{name}.tar.gz"'},
|
||||
)
|
||||
|
||||
|
||||
@app.post("/skills/import")
|
||||
async def skills_import(request: Request, overwrite: bool = False):
|
||||
data = await request.body()
|
||||
if not data:
|
||||
raise HTTPException(400, "Leerer Body")
|
||||
try:
|
||||
manifest = skills_mod.import_skill(data, overwrite=overwrite)
|
||||
except ValueError as exc:
|
||||
raise HTTPException(400, str(exc))
|
||||
return {"imported": manifest}
|
||||
@@ -0,0 +1,4 @@
|
||||
from .embedder import Embedder
|
||||
from .vector_store import VectorStore, MemoryPoint, MemoryType
|
||||
|
||||
__all__ = ["Embedder", "VectorStore", "MemoryPoint", "MemoryType"]
|
||||
@@ -0,0 +1,42 @@
|
||||
"""
|
||||
Lokaler Embedder fuer Memory-Texte.
|
||||
|
||||
Nutzt sentence-transformers (paraphrase-multilingual-MiniLM-L12-v2):
|
||||
- Deutsch + Englisch
|
||||
- 384-dimensionale Vektoren
|
||||
- Laeuft auf CPU, ~30ms pro kurzer Text
|
||||
- Modell wird beim ersten Aufruf in /data/_models gecached
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import List
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
MODEL_NAME = "paraphrase-multilingual-MiniLM-L12-v2"
|
||||
VECTOR_DIM = 384
|
||||
|
||||
|
||||
class Embedder:
|
||||
def __init__(self, model_name: str = MODEL_NAME):
|
||||
self.model_name = model_name
|
||||
self._model = None
|
||||
|
||||
def _load(self):
|
||||
if self._model is None:
|
||||
logger.info("Lade Embedding-Modell %s ...", self.model_name)
|
||||
from sentence_transformers import SentenceTransformer
|
||||
self._model = SentenceTransformer(self.model_name)
|
||||
logger.info("Embedding-Modell geladen.")
|
||||
|
||||
def embed(self, text: str) -> List[float]:
|
||||
self._load()
|
||||
vec = self._model.encode(text, convert_to_numpy=True, normalize_embeddings=True)
|
||||
return vec.tolist()
|
||||
|
||||
def embed_batch(self, texts: List[str]) -> List[List[float]]:
|
||||
self._load()
|
||||
vecs = self._model.encode(texts, convert_to_numpy=True, normalize_embeddings=True)
|
||||
return vecs.tolist()
|
||||
@@ -0,0 +1,209 @@
|
||||
"""
|
||||
Vector-Store-Wrapper um Qdrant.
|
||||
|
||||
Eine Collection "aria_memory" haelt ALLE Memory-Punkte.
|
||||
Trennung nach Type/Pinned-Status via Payload-Filter.
|
||||
|
||||
Punkt-Schema (Payload):
|
||||
type — identity | rule | preference | tool | skill | fact | conversation | reminder
|
||||
category — frei, fuer UI-Gruppierung
|
||||
title — kurze Ueberschrift
|
||||
content — eigentlicher Text (wird embedded)
|
||||
pinned — bool, True = Hot Memory (immer in Prompt)
|
||||
source — import | conversation | manual
|
||||
tags — Liste von Strings
|
||||
created_at, updated_at — ISO-Strings
|
||||
conversation_id — optional, nur fuer type=conversation
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import uuid
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from enum import Enum
|
||||
from typing import List, Optional
|
||||
|
||||
from qdrant_client import QdrantClient
|
||||
from qdrant_client.http import models as qm
|
||||
|
||||
from .embedder import VECTOR_DIM
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
COLLECTION = "aria_memory"
|
||||
|
||||
|
||||
class MemoryType(str, Enum):
|
||||
IDENTITY = "identity"
|
||||
RULE = "rule"
|
||||
PREFERENCE = "preference"
|
||||
TOOL = "tool"
|
||||
SKILL = "skill"
|
||||
FACT = "fact"
|
||||
CONVERSATION = "conversation"
|
||||
REMINDER = "reminder"
|
||||
|
||||
|
||||
@dataclass
|
||||
class MemoryPoint:
|
||||
id: str
|
||||
type: str
|
||||
title: str
|
||||
content: str
|
||||
pinned: bool = False
|
||||
category: str = ""
|
||||
source: str = "manual"
|
||||
tags: List[str] = field(default_factory=list)
|
||||
created_at: str = ""
|
||||
updated_at: str = ""
|
||||
conversation_id: Optional[str] = None
|
||||
score: Optional[float] = None # nur bei Search gesetzt
|
||||
|
||||
def to_payload(self) -> dict:
|
||||
p = {
|
||||
"type": self.type,
|
||||
"title": self.title,
|
||||
"content": self.content,
|
||||
"pinned": self.pinned,
|
||||
"category": self.category,
|
||||
"source": self.source,
|
||||
"tags": self.tags,
|
||||
"created_at": self.created_at,
|
||||
"updated_at": self.updated_at,
|
||||
}
|
||||
if self.conversation_id:
|
||||
p["conversation_id"] = self.conversation_id
|
||||
return p
|
||||
|
||||
@classmethod
|
||||
def from_qdrant(cls, point) -> "MemoryPoint":
|
||||
payload = point.payload or {}
|
||||
return cls(
|
||||
id=str(point.id),
|
||||
type=payload.get("type", "fact"),
|
||||
title=payload.get("title", ""),
|
||||
content=payload.get("content", ""),
|
||||
pinned=payload.get("pinned", False),
|
||||
category=payload.get("category", ""),
|
||||
source=payload.get("source", "manual"),
|
||||
tags=payload.get("tags", []),
|
||||
created_at=payload.get("created_at", ""),
|
||||
updated_at=payload.get("updated_at", ""),
|
||||
conversation_id=payload.get("conversation_id"),
|
||||
score=getattr(point, "score", None),
|
||||
)
|
||||
|
||||
|
||||
def _now() -> str:
|
||||
return datetime.now(timezone.utc).isoformat()
|
||||
|
||||
|
||||
class VectorStore:
|
||||
def __init__(self, host: str, port: int = 6333):
|
||||
self.client = QdrantClient(host=host, port=port)
|
||||
self._ensure_collection()
|
||||
|
||||
def _ensure_collection(self):
|
||||
existing = [c.name for c in self.client.get_collections().collections]
|
||||
if COLLECTION not in existing:
|
||||
logger.info("Erstelle Collection %s ...", COLLECTION)
|
||||
self.client.create_collection(
|
||||
collection_name=COLLECTION,
|
||||
vectors_config=qm.VectorParams(size=VECTOR_DIM, distance=qm.Distance.COSINE),
|
||||
)
|
||||
# Indexe fuer typische Filter-Felder
|
||||
for field_name in ("type", "pinned", "category", "source", "migration_key"):
|
||||
self.client.create_payload_index(
|
||||
collection_name=COLLECTION,
|
||||
field_name=field_name,
|
||||
field_schema=qm.PayloadSchemaType.KEYWORD if field_name != "pinned"
|
||||
else qm.PayloadSchemaType.BOOL,
|
||||
)
|
||||
|
||||
# ─── Schreib-Operationen ─────────────────────────────────────────
|
||||
|
||||
def upsert(self, point: MemoryPoint, vector: List[float]) -> str:
|
||||
if not point.id:
|
||||
point.id = str(uuid.uuid4())
|
||||
if not point.created_at:
|
||||
point.created_at = _now()
|
||||
point.updated_at = _now()
|
||||
|
||||
self.client.upsert(
|
||||
collection_name=COLLECTION,
|
||||
points=[qm.PointStruct(id=point.id, vector=vector, payload=point.to_payload())],
|
||||
)
|
||||
return point.id
|
||||
|
||||
def delete(self, point_id: str):
|
||||
self.client.delete(
|
||||
collection_name=COLLECTION,
|
||||
points_selector=qm.PointIdsList(points=[point_id]),
|
||||
)
|
||||
|
||||
# ─── Lese-Operationen ────────────────────────────────────────────
|
||||
|
||||
def get(self, point_id: str) -> Optional[MemoryPoint]:
|
||||
result = self.client.retrieve(collection_name=COLLECTION, ids=[point_id], with_payload=True)
|
||||
if not result:
|
||||
return None
|
||||
return MemoryPoint.from_qdrant(result[0])
|
||||
|
||||
def list_pinned(self) -> List[MemoryPoint]:
|
||||
"""Alle pinned Punkte — Hot Memory."""
|
||||
return self._scroll(filter=qm.Filter(must=[
|
||||
qm.FieldCondition(key="pinned", match=qm.MatchValue(value=True))
|
||||
]))
|
||||
|
||||
def list_by_type(self, type_: str, limit: int = 100) -> List[MemoryPoint]:
|
||||
return self._scroll(
|
||||
filter=qm.Filter(must=[
|
||||
qm.FieldCondition(key="type", match=qm.MatchValue(value=type_))
|
||||
]),
|
||||
limit=limit,
|
||||
)
|
||||
|
||||
def list_all(self, limit: int = 1000) -> List[MemoryPoint]:
|
||||
return self._scroll(filter=None, limit=limit)
|
||||
|
||||
def _scroll(self, filter, limit: int = 1000) -> List[MemoryPoint]:
|
||||
points, _ = self.client.scroll(
|
||||
collection_name=COLLECTION,
|
||||
scroll_filter=filter,
|
||||
limit=limit,
|
||||
with_payload=True,
|
||||
with_vectors=False,
|
||||
)
|
||||
return [MemoryPoint.from_qdrant(p) for p in points]
|
||||
|
||||
def search(
|
||||
self,
|
||||
query_vector: List[float],
|
||||
k: int = 5,
|
||||
type_filter: Optional[str] = None,
|
||||
exclude_pinned: bool = True,
|
||||
) -> List[MemoryPoint]:
|
||||
"""Semantische Search. Standard: pinned-Punkte ausgeschlossen
|
||||
(die kommen separat via list_pinned in den Prompt)."""
|
||||
must = []
|
||||
must_not = []
|
||||
if type_filter:
|
||||
must.append(qm.FieldCondition(key="type", match=qm.MatchValue(value=type_filter)))
|
||||
if exclude_pinned:
|
||||
must_not.append(qm.FieldCondition(key="pinned", match=qm.MatchValue(value=True)))
|
||||
|
||||
flt = qm.Filter(must=must or None, must_not=must_not or None)
|
||||
|
||||
results = self.client.search(
|
||||
collection_name=COLLECTION,
|
||||
query_vector=query_vector,
|
||||
query_filter=flt if (must or must_not) else None,
|
||||
limit=k,
|
||||
with_payload=True,
|
||||
)
|
||||
return [MemoryPoint.from_qdrant(p) for p in results]
|
||||
|
||||
def count(self) -> int:
|
||||
return self.client.count(collection_name=COLLECTION, exact=True).count
|
||||
@@ -0,0 +1,399 @@
|
||||
"""
|
||||
Migration aus aria-data/brain-import/ → Vector-DB.
|
||||
|
||||
Parst die mitgelieferten Markdown-Dateien (AGENT.md, USER.md, TOOLING.md)
|
||||
und zerlegt sie in atomare Memory-Punkte. Jeder Punkt bekommt:
|
||||
|
||||
source = "import"
|
||||
migration_key = stabiler Identifier (z.B. "agent.md/rule-1") fuer Idempotenz
|
||||
pinned = True
|
||||
|
||||
Beim Re-Run werden vorhandene Punkte mit gleicher migration_key entfernt
|
||||
und neu geschrieben.
|
||||
|
||||
Mapping pro Datei:
|
||||
|
||||
AGENT.md
|
||||
"Identitaet" → 1 Punkt type=identity
|
||||
"Persoenlichkeit" (Intro) → 1 Punkt type=identity
|
||||
"Kern-Eigenschaften" (Liste) → 1 Punkt pro Bullet type=identity
|
||||
"Tool-Freigaben" → 1 Punkt type=tool
|
||||
"Sicherheitsregeln" (Liste) → 1 Punkt pro Bullet type=rule
|
||||
"Arbeitsprinzipien" (Liste) → 1 Punkt pro Bullet type=rule
|
||||
"Dateien an Stefan zurueckgeben"→ 1 Punkt type=skill
|
||||
"Stimme" → 1 Punkt type=tool
|
||||
|
||||
USER.md
|
||||
"Allgemein" (Liste) → 1 Punkt pro Bullet type=preference
|
||||
"Bestaetigung erforderlich" → 1 Punkt type=preference
|
||||
"Autonomes Arbeiten OK fuer" → 1 Punkt type=preference
|
||||
"Tools & Infrastruktur" → 1 Punkt type=preference
|
||||
|
||||
TOOLING.md
|
||||
gesamter Inhalt → 1 Punkt type=tool, title="Tooling-Stack"
|
||||
|
||||
BOOTSTRAP.md ist eine Variante von AGENT.md — wird (vorerst) ignoriert
|
||||
damit keine doppelten Punkte landen.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import List, Optional
|
||||
|
||||
from memory import Embedder, VectorStore, MemoryPoint
|
||||
from memory.vector_store import COLLECTION
|
||||
from qdrant_client.http import models as qm
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class _Block:
|
||||
title: str
|
||||
content: str
|
||||
|
||||
|
||||
def _split_h2(md: str) -> List[_Block]:
|
||||
"""Zerlegt Markdown in H2-Bloecke. Inhalt vor dem ersten H2 wird verworfen."""
|
||||
blocks: List[_Block] = []
|
||||
current: Optional[_Block] = None
|
||||
for line in md.splitlines():
|
||||
m = re.match(r"^##\s+(.+?)\s*$", line)
|
||||
if m and not line.startswith("### "):
|
||||
if current:
|
||||
blocks.append(current)
|
||||
current = _Block(title=m.group(1).strip(), content="")
|
||||
continue
|
||||
if current is not None:
|
||||
current.content += line + "\n"
|
||||
if current:
|
||||
blocks.append(current)
|
||||
return blocks
|
||||
|
||||
|
||||
def _split_h3(content: str) -> List[_Block]:
|
||||
"""Zerlegt einen H2-Block in H3-Untersektionen + 'header'-Block davor."""
|
||||
blocks: List[_Block] = []
|
||||
header_lines: List[str] = []
|
||||
current: Optional[_Block] = None
|
||||
for line in content.splitlines():
|
||||
m = re.match(r"^###\s+(.+?)\s*$", line)
|
||||
if m:
|
||||
if current is None and header_lines:
|
||||
blocks.append(_Block(title="_intro", content="\n".join(header_lines).strip()))
|
||||
if current:
|
||||
blocks.append(current)
|
||||
current = _Block(title=m.group(1).strip(), content="")
|
||||
continue
|
||||
if current is None:
|
||||
header_lines.append(line)
|
||||
else:
|
||||
current.content += line + "\n"
|
||||
if current:
|
||||
blocks.append(current)
|
||||
elif header_lines:
|
||||
blocks.append(_Block(title="_intro", content="\n".join(header_lines).strip()))
|
||||
return blocks
|
||||
|
||||
|
||||
def _extract_bullets(content: str) -> List[tuple[str, str]]:
|
||||
"""Findet "- **Title** — Body" oder "N. **Title** — Body" Bullets.
|
||||
|
||||
Returns: Liste von (title, full_bullet_text).
|
||||
"""
|
||||
bullets: List[tuple[str, str]] = []
|
||||
current_lines: List[str] = []
|
||||
current_title: Optional[str] = None
|
||||
|
||||
def flush():
|
||||
if current_title and current_lines:
|
||||
bullets.append((current_title, "\n".join(current_lines).strip()))
|
||||
|
||||
for line in content.splitlines():
|
||||
m = re.match(r"^\s*(?:[-*]|\d+\.)\s+\*\*([^*]+?)\*\*\s*[—\-:]?\s*(.*)$", line)
|
||||
if m:
|
||||
flush()
|
||||
current_title = m.group(1).strip()
|
||||
current_lines = [line]
|
||||
continue
|
||||
# Folge-Zeilen mit Einrueckung gehoeren zum aktuellen Bullet
|
||||
if current_title and (line.startswith(" ") or line.startswith("\t") or not line.strip()):
|
||||
current_lines.append(line)
|
||||
continue
|
||||
if current_title and not re.match(r"^\s*(?:[-*]|\d+\.)\s+", line):
|
||||
current_lines.append(line)
|
||||
continue
|
||||
# Neuer Bullet ohne **Title** Format
|
||||
if re.match(r"^\s*(?:[-*]|\d+\.)\s+", line):
|
||||
flush()
|
||||
text = re.sub(r"^\s*(?:[-*]|\d+\.)\s+", "", line).strip()
|
||||
short_title = (text[:60] + "…") if len(text) > 60 else text
|
||||
bullets.append((short_title, line.strip()))
|
||||
current_title = None
|
||||
current_lines = []
|
||||
flush()
|
||||
return bullets
|
||||
|
||||
|
||||
# ─── Pro Datei eine Parser-Funktion ──────────────────────────────────
|
||||
|
||||
def _parse_agent_md(md: str, source_file: str) -> List[MemoryPoint]:
|
||||
points: List[MemoryPoint] = []
|
||||
h2_blocks = _split_h2(md)
|
||||
for h2 in h2_blocks:
|
||||
title = h2.title
|
||||
content = h2.content.strip()
|
||||
if not content:
|
||||
continue
|
||||
|
||||
if title.lower() == "identitaet" or title.lower() == "identität":
|
||||
points.append(_mk(
|
||||
type_="identity", title="ARIA — Identitaet",
|
||||
content=f"## {title}\n\n{content}",
|
||||
category="persoenlichkeit",
|
||||
migration_key=f"{source_file}/identity",
|
||||
))
|
||||
|
||||
elif title.lower() == "persoenlichkeit" or title.lower() == "persönlichkeit":
|
||||
# Intro-Absatz + Kern-Eigenschaften-Liste trennen
|
||||
sub = _split_h3(content)
|
||||
for s in sub:
|
||||
if s.title == "_intro" and s.content.strip():
|
||||
points.append(_mk(
|
||||
type_="identity", title="Persoenlichkeit — Grundsatz",
|
||||
content=s.content.strip(),
|
||||
category="persoenlichkeit",
|
||||
migration_key=f"{source_file}/personality-intro",
|
||||
))
|
||||
elif s.title.lower().startswith("kern"):
|
||||
for idx, (btitle, btext) in enumerate(_extract_bullets(s.content), 1):
|
||||
points.append(_mk(
|
||||
type_="identity", title=f"Eigenschaft: {btitle}",
|
||||
content=btext, category="persoenlichkeit",
|
||||
migration_key=f"{source_file}/personality-trait-{idx}",
|
||||
))
|
||||
|
||||
elif "sicherheitsregel" in title.lower():
|
||||
for idx, (btitle, btext) in enumerate(_extract_bullets(content), 1):
|
||||
points.append(_mk(
|
||||
type_="rule", title=f"Sicherheit: {btitle}",
|
||||
content=btext, category="sicherheit",
|
||||
migration_key=f"{source_file}/security-{idx}",
|
||||
))
|
||||
|
||||
elif "arbeitsprinzipien" in title.lower() or "arbeitsprinzip" in title.lower():
|
||||
for idx, (btitle, btext) in enumerate(_extract_bullets(content), 1):
|
||||
points.append(_mk(
|
||||
type_="rule", title=f"Prinzip: {btitle}",
|
||||
content=btext, category="arbeitsweise",
|
||||
migration_key=f"{source_file}/work-principle-{idx}",
|
||||
))
|
||||
|
||||
elif "tool-freigaben" in title.lower() or "tool freigaben" in title.lower():
|
||||
points.append(_mk(
|
||||
type_="tool", title="Tool-Freigaben — Vollzugriff",
|
||||
content=content, category="infrastruktur",
|
||||
migration_key=f"{source_file}/tool-access",
|
||||
))
|
||||
|
||||
elif "dateien an stefan" in title.lower() or "dateien zurueckgeben" in title.lower() or "dateien zur" in title.lower():
|
||||
points.append(_mk(
|
||||
type_="skill", title="Dateien an User zurueckgeben",
|
||||
content=content, category="ausgabe",
|
||||
migration_key=f"{source_file}/file-return-skill",
|
||||
))
|
||||
|
||||
elif title.lower() == "stimme":
|
||||
points.append(_mk(
|
||||
type_="tool", title="Stimme (F5-TTS)",
|
||||
content=content, category="infrastruktur",
|
||||
migration_key=f"{source_file}/voice",
|
||||
))
|
||||
|
||||
# Permanente Freigaben (in BOOTSTRAP) — als rule
|
||||
elif "freigaben" in title.lower():
|
||||
points.append(_mk(
|
||||
type_="rule", title=title,
|
||||
content=content, category="freigaben",
|
||||
migration_key=f"{source_file}/permissions",
|
||||
))
|
||||
|
||||
else:
|
||||
# Unbekannter Block: als generischer fact ablegen, NICHT pinned
|
||||
logger.info("Unbekannter H2-Block '%s' in %s — als fact (unpinned)", title, source_file)
|
||||
points.append(_mk(
|
||||
type_="fact", title=f"{source_file}: {title}",
|
||||
content=content, pinned=False,
|
||||
migration_key=f"{source_file}/section-{title.lower().replace(' ', '-')}",
|
||||
))
|
||||
return points
|
||||
|
||||
|
||||
def _parse_user_md(md: str, source_file: str) -> List[MemoryPoint]:
|
||||
points: List[MemoryPoint] = []
|
||||
for h2 in _split_h2(md):
|
||||
title = h2.title
|
||||
content = h2.content.strip()
|
||||
if not content:
|
||||
continue
|
||||
# Template-Platzhalter herausfiltern: Beispiel-Zeilen mit <Tag>
|
||||
if "<Beispiel-Tool>" in content or "<Username>" in title:
|
||||
continue
|
||||
if title.lower() == "allgemein":
|
||||
for idx, (btitle, btext) in enumerate(_extract_bullets(content), 1):
|
||||
# Template-Platzhalter ueberspringen
|
||||
if "<z.B." in btext or "<XYZ>" in btext:
|
||||
continue
|
||||
points.append(_mk(
|
||||
type_="preference", title=f"User: {btitle}",
|
||||
content=btext, category="allgemein",
|
||||
migration_key=f"{source_file}/general-{idx}",
|
||||
))
|
||||
else:
|
||||
cat_key = re.sub(r"[^a-z0-9]+", "-", title.lower()).strip("-") or "allgemein"
|
||||
points.append(_mk(
|
||||
type_="preference", title=title,
|
||||
content=content, category=cat_key,
|
||||
migration_key=f"{source_file}/{cat_key}",
|
||||
))
|
||||
return points
|
||||
|
||||
|
||||
def _parse_tooling_md(md: str, source_file: str) -> List[MemoryPoint]:
|
||||
md = md.strip()
|
||||
if not md:
|
||||
return []
|
||||
return [_mk(
|
||||
type_="tool", title="Tooling-Stack (VM)",
|
||||
content=md, category="infrastruktur",
|
||||
migration_key=f"{source_file}/tooling-full",
|
||||
)]
|
||||
|
||||
|
||||
# ─── Helper ─────────────────────────────────────────────────────────
|
||||
|
||||
def _mk(
|
||||
type_: str,
|
||||
title: str,
|
||||
content: str,
|
||||
migration_key: str,
|
||||
pinned: bool = True,
|
||||
category: str = "",
|
||||
) -> MemoryPoint:
|
||||
p = MemoryPoint(
|
||||
id="",
|
||||
type=type_,
|
||||
title=title,
|
||||
content=content.strip(),
|
||||
pinned=pinned,
|
||||
category=category,
|
||||
source="import",
|
||||
tags=[],
|
||||
)
|
||||
# migration_key wird ueber Payload-Index angesprochen — in to_payload manuell anhaengen
|
||||
setattr(p, "_migration_key", migration_key)
|
||||
return p
|
||||
|
||||
|
||||
# ─── Eintrittspunkt ─────────────────────────────────────────────────
|
||||
|
||||
def run_migration(
|
||||
import_dir: Path,
|
||||
store: VectorStore,
|
||||
embedder: Embedder,
|
||||
) -> dict:
|
||||
"""Liest alle .md-Dateien aus import_dir, parst sie, schreibt in DB.
|
||||
|
||||
Idempotent: vorhandene Punkte mit gleicher migration_key werden geloescht
|
||||
und neu geschrieben.
|
||||
|
||||
Returns: {"created": int, "updated": int, "skipped": int, "files": [...]}
|
||||
"""
|
||||
if not import_dir.exists():
|
||||
return {"created": 0, "updated": 0, "skipped": 0, "files": [], "error": f"{import_dir} nicht gefunden"}
|
||||
|
||||
parsers = {
|
||||
"AGENT.md": _parse_agent_md,
|
||||
"BOOTSTRAP.md": _parse_agent_md, # gleicher Parser, ggf. ueberlappende Eintraege
|
||||
"USER.md": _parse_user_md,
|
||||
"USER.md.example": _parse_user_md,
|
||||
"TOOLING.md": _parse_tooling_md,
|
||||
"TOOLING.md.example": _parse_tooling_md,
|
||||
}
|
||||
|
||||
# USER.md hat Vorrang vor USER.md.example
|
||||
file_priority = ["AGENT.md", "BOOTSTRAP.md", "USER.md", "USER.md.example",
|
||||
"TOOLING.md", "TOOLING.md.example"]
|
||||
seen_kinds: set[str] = set() # "USER" / "TOOLING" — nur einmal
|
||||
|
||||
points: List[MemoryPoint] = []
|
||||
processed_files: List[str] = []
|
||||
|
||||
for fname in file_priority:
|
||||
fp = import_dir / fname
|
||||
if not fp.exists():
|
||||
continue
|
||||
kind = fname.split(".")[0] # "AGENT", "BOOTSTRAP", "USER", "TOOLING"
|
||||
# USER.md.example nur wenn USER.md fehlt
|
||||
if kind in ("USER", "TOOLING") and kind in seen_kinds:
|
||||
continue
|
||||
seen_kinds.add(kind)
|
||||
parser = parsers.get(fname)
|
||||
if not parser:
|
||||
continue
|
||||
try:
|
||||
md = fp.read_text(encoding="utf-8")
|
||||
file_points = parser(md, fname)
|
||||
points.extend(file_points)
|
||||
processed_files.append(f"{fname} ({len(file_points)})")
|
||||
logger.info("Migration: %s → %d Punkte", fname, len(file_points))
|
||||
except Exception as exc:
|
||||
logger.exception("Migration: %s fehlgeschlagen", fname)
|
||||
processed_files.append(f"{fname} (FEHLER: {exc})")
|
||||
|
||||
if not points:
|
||||
return {"created": 0, "updated": 0, "skipped": 0, "files": processed_files}
|
||||
|
||||
# Erst alte Migration-Punkte mit gleicher migration_key loeschen
|
||||
migration_keys = [getattr(p, "_migration_key", None) for p in points]
|
||||
migration_keys = [k for k in migration_keys if k]
|
||||
if migration_keys:
|
||||
store.client.delete(
|
||||
collection_name=COLLECTION,
|
||||
points_selector=qm.FilterSelector(filter=qm.Filter(must=[
|
||||
qm.FieldCondition(key="migration_key", match=qm.MatchAny(any=migration_keys))
|
||||
])),
|
||||
)
|
||||
logger.info("Migration: %d alte Punkte mit gleicher migration_key entfernt", len(migration_keys))
|
||||
|
||||
# Embed in Batches
|
||||
texts = [p.content for p in points]
|
||||
vectors = embedder.embed_batch(texts)
|
||||
|
||||
created = 0
|
||||
for p, vec in zip(points, vectors):
|
||||
payload = p.to_payload()
|
||||
mkey = getattr(p, "_migration_key", None)
|
||||
if mkey:
|
||||
payload["migration_key"] = mkey
|
||||
from datetime import datetime, timezone
|
||||
import uuid as _uuid
|
||||
pid = str(_uuid.uuid4())
|
||||
now = datetime.now(timezone.utc).isoformat()
|
||||
payload["created_at"] = now
|
||||
payload["updated_at"] = now
|
||||
store.client.upsert(
|
||||
collection_name=COLLECTION,
|
||||
points=[qm.PointStruct(id=pid, vector=vec, payload=payload)],
|
||||
)
|
||||
created += 1
|
||||
|
||||
return {
|
||||
"created": created,
|
||||
"files": processed_files,
|
||||
"import_dir": str(import_dir),
|
||||
}
|
||||
@@ -0,0 +1,131 @@
|
||||
"""
|
||||
System-Prompt-Bau aus Memory-Punkten.
|
||||
|
||||
Strategie:
|
||||
1. Alle pinned Punkte (Hot Memory) — gruppiert nach Type — in den
|
||||
System-Prompt schreiben. IMMER drin.
|
||||
2. Top-K semantisch aehnliche Punkte (Cold Memory) zur aktuellen
|
||||
User-Nachricht — als "Moeglicherweise relevant" eingehaengt.
|
||||
3. Aktive Skills als kompakte Liste (nur Name + Description) — damit
|
||||
ARIA weiss was sie hat.
|
||||
|
||||
Phase B Punkt 1: nur Hot-Memory-Bau, Skills + Cold-Search kommen
|
||||
mit dem Conversation-Loop in spaeteren Phasen.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List
|
||||
|
||||
from memory import MemoryPoint
|
||||
|
||||
TYPE_HEADINGS = {
|
||||
"identity": "## Wer du bist",
|
||||
"rule": "## Sicherheitsregeln & Prinzipien",
|
||||
"preference": "## Benutzer-Praeferenzen",
|
||||
"tool": "## Tool-Freigaben",
|
||||
"skill": "## Deine Skills",
|
||||
}
|
||||
|
||||
|
||||
def build_hot_memory_section(pinned: List[MemoryPoint]) -> str:
|
||||
"""Baue den 'IMMER-im-Prompt'-Block aus pinned Punkten."""
|
||||
grouped: dict[str, List[MemoryPoint]] = {}
|
||||
for p in pinned:
|
||||
grouped.setdefault(p.type, []).append(p)
|
||||
|
||||
parts: List[str] = []
|
||||
# Sortier-Reihenfolge: identity → rule → preference → tool → skill → Rest
|
||||
order = ["identity", "rule", "preference", "tool", "skill"]
|
||||
for t in order:
|
||||
items = grouped.pop(t, [])
|
||||
if not items:
|
||||
continue
|
||||
parts.append(TYPE_HEADINGS.get(t, f"## {t}"))
|
||||
for p in items:
|
||||
parts.append(f"### {p.title}")
|
||||
parts.append(p.content.strip())
|
||||
parts.append("")
|
||||
|
||||
# uebrige Types (falls jemand was anderes als pinned markiert)
|
||||
for t, items in grouped.items():
|
||||
parts.append(f"## {t}")
|
||||
for p in items:
|
||||
parts.append(f"### {p.title}")
|
||||
parts.append(p.content.strip())
|
||||
parts.append("")
|
||||
|
||||
return "\n".join(parts).strip()
|
||||
|
||||
|
||||
def build_cold_memory_section(matches: List[MemoryPoint]) -> str:
|
||||
"""Baue 'Moeglicherweise relevant'-Block aus Search-Treffern."""
|
||||
if not matches:
|
||||
return ""
|
||||
lines = ["## Moeglicherweise relevant (aus Gedaechtnis)"]
|
||||
for p in matches:
|
||||
score = f" [score={p.score:.2f}]" if p.score is not None else ""
|
||||
lines.append(f"- **{p.title}**{score}")
|
||||
lines.append(f" {p.content.strip()}")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def build_skills_section(skills: List[dict]) -> str:
|
||||
"""Listet alle Skills (aktiv + deaktiviert) damit ARIA weiss was es gibt
|
||||
und keine doppelt baut. Plus klare Schwelle wann ein Skill sich lohnt."""
|
||||
lines = ["## Deine Skills"]
|
||||
if skills:
|
||||
for s in skills:
|
||||
active = s.get("active", True)
|
||||
marker = "" if active else " [DEAKTIVIERT — kann nicht aufgerufen werden]"
|
||||
lines.append(f"- **{s.get('name', '?')}**{marker} — {s.get('description', '(ohne Beschreibung)')}")
|
||||
lines.append("")
|
||||
lines.append("Wenn ein vorhandener Skill zur Aufgabe passt: nutze ihn via Tool-Call.")
|
||||
else:
|
||||
lines.append("(noch keine Skills vorhanden)")
|
||||
|
||||
lines.append("")
|
||||
lines.append("### Wann lohnt sich ein neuer Skill?")
|
||||
lines.append("")
|
||||
lines.append("**Skills sind IMMER Python** — eigene venv pro Skill mit den noetigen "
|
||||
"pip-Paketen. Kein apt im Skill, kein systemweiter Install. Python deckt "
|
||||
"in der Regel alles ab (yt-dlp, requests, pypdf, pillow, openpyxl, "
|
||||
"static-ffmpeg, beautifulsoup4, …). Falls etwas WIRKLICH nur via apt geht: "
|
||||
"Stefan fragen ob es ins Brain-Dockerfile soll.")
|
||||
lines.append("")
|
||||
lines.append("**Harte Regel — IMMER Skill anlegen wenn:** die Loesung erfordert eine "
|
||||
"pip-Library. Begruendung: Brain-Container hat keinen persistenten State "
|
||||
"ausser /data/skills/. Ohne Skill wuerde der Install bei jedem "
|
||||
"Container-Restart wiederholt.")
|
||||
lines.append("")
|
||||
lines.append("**Sonst — Skill nur wenn alle vier zutreffen:**")
|
||||
lines.append("")
|
||||
lines.append("1. **Wiederkehrend** — die Aufgabe wird realistisch nochmal gestellt. "
|
||||
"Einmal-Faelle (\"wie spaet ist es jetzt\") kein Skill.")
|
||||
lines.append("2. **Nicht-trivial** — mehrere Schritte. Ein einzelner Shell-Befehl "
|
||||
"(`date`, `hostname`, `ls`) ist KEIN Skill — das macht Bash direkt.")
|
||||
lines.append("3. **Parametrisierbar** — der Skill nimmt Eingaben (URL, Datei, Suchbegriff) "
|
||||
"und gibt ein nuetzliches Ergebnis zurueck.")
|
||||
lines.append("4. **Wiederverwendbar als ganzes** — Stefan wuerde es zukuenftig per Name "
|
||||
"ansprechen (\"mach mir den YouTube zu MP3\") statt jedes Mal zu erklaeren.")
|
||||
lines.append("")
|
||||
lines.append("Wenn nichts installiert werden muss UND nicht alle vier zutreffen: einfach "
|
||||
"die Aufgabe loesen ohne Skill anzulegen. Stefan kann jederzeit sagen "
|
||||
"'bau daraus einen Skill'.")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def build_system_prompt(
|
||||
pinned: List[MemoryPoint],
|
||||
cold: List[MemoryPoint] | None = None,
|
||||
skills: List[dict] | None = None,
|
||||
) -> str:
|
||||
"""Kompletter System-Prompt: Hot + Cold + Skills."""
|
||||
parts = [build_hot_memory_section(pinned)]
|
||||
if skills:
|
||||
parts.append("")
|
||||
parts.append(build_skills_section(skills))
|
||||
if cold:
|
||||
parts.append("")
|
||||
parts.append(build_cold_memory_section(cold))
|
||||
return "\n".join(parts).strip()
|
||||
@@ -0,0 +1,144 @@
|
||||
"""
|
||||
Claude-Aufruf ueber den lokalen Proxy.
|
||||
|
||||
Der Proxy (claude-max-api-proxy) bietet eine OpenAI-kompatible API
|
||||
unter http://proxy:3456/v1/chat/completions. Wir nutzen non-streaming
|
||||
mit einem laengeren Timeout — Claude Code spawnt pro Anfrage einen
|
||||
neuen CLI-Prozess (Cold-Start), das dauert.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import List, Optional
|
||||
|
||||
import httpx
|
||||
from pydantic import BaseModel
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
RUNTIME_CONFIG_FILE = Path("/shared/config/runtime.json")
|
||||
ENV_MODEL = os.environ.get("BRAIN_MODEL", "claude-sonnet-4")
|
||||
PROXY_URL = os.environ.get("PROXY_URL", "http://proxy:3456")
|
||||
PROXY_TIMEOUT_SEC = float(os.environ.get("PROXY_TIMEOUT_SEC", "300"))
|
||||
|
||||
|
||||
def _read_model_from_runtime() -> str:
|
||||
"""Liest brainModel aus runtime.json. Fallback: ENV BRAIN_MODEL."""
|
||||
try:
|
||||
if RUNTIME_CONFIG_FILE.exists():
|
||||
data = json.loads(RUNTIME_CONFIG_FILE.read_text(encoding="utf-8"))
|
||||
m = (data.get("brainModel") or "").strip()
|
||||
if m:
|
||||
return m
|
||||
except Exception as exc:
|
||||
logger.warning("runtime.json lesen fehlgeschlagen: %s", exc)
|
||||
return ENV_MODEL
|
||||
|
||||
|
||||
DEFAULT_MODEL = _read_model_from_runtime()
|
||||
|
||||
|
||||
class Message(BaseModel):
|
||||
role: str # "system" | "user" | "assistant" | "tool"
|
||||
content: Optional[str] = None
|
||||
tool_calls: Optional[list] = None
|
||||
tool_call_id: Optional[str] = None
|
||||
name: Optional[str] = None # nur fuer role=tool
|
||||
|
||||
|
||||
class ProxyResult(BaseModel):
|
||||
content: str = ""
|
||||
tool_calls: list = [] # je: {"id", "name", "arguments" (dict)}
|
||||
finish_reason: str = ""
|
||||
|
||||
|
||||
class ProxyClient:
|
||||
def __init__(self, base_url: str = PROXY_URL, model: str = DEFAULT_MODEL):
|
||||
self.base_url = base_url.rstrip("/")
|
||||
self.model = model
|
||||
# Persistente Client-Connection — vermeidet TCP-Handshake bei jedem Call
|
||||
self._client = httpx.Client(timeout=PROXY_TIMEOUT_SEC)
|
||||
|
||||
def chat(self, messages: List[Message], model: Optional[str] = None) -> str:
|
||||
"""Convenience: einfacher Chat ohne Tools. Gibt nur den Reply-String zurueck."""
|
||||
result = self.chat_full(messages, tools=None, model=model)
|
||||
if not result.content:
|
||||
raise RuntimeError("Proxy lieferte leeren content")
|
||||
return result.content
|
||||
|
||||
def chat_full(
|
||||
self,
|
||||
messages: List[Message],
|
||||
tools: Optional[list] = None,
|
||||
model: Optional[str] = None,
|
||||
) -> ProxyResult:
|
||||
"""Full chat — kann Tool-Calls liefern (wenn tools mitgegeben).
|
||||
|
||||
tools-Format ist OpenAI-Style:
|
||||
[{"type":"function","function":{"name":..,"description":..,"parameters":{...}}}, ...]
|
||||
"""
|
||||
url = f"{self.base_url}/v1/chat/completions"
|
||||
# Pydantic-Dumps mit exclude_none damit role=tool ohne tool_calls geht
|
||||
payload = {
|
||||
"model": model or self.model,
|
||||
"messages": [m.model_dump(exclude_none=True) for m in messages],
|
||||
}
|
||||
if tools:
|
||||
payload["tools"] = tools
|
||||
logger.info("Proxy → %s (%d Messages, %d tools, model=%s)",
|
||||
url, len(messages), len(tools or []), payload["model"])
|
||||
try:
|
||||
r = self._client.post(url, json=payload)
|
||||
except httpx.RequestError as exc:
|
||||
raise RuntimeError(f"Proxy unreachable: {exc}") from exc
|
||||
if r.status_code != 200:
|
||||
raise RuntimeError(f"Proxy HTTP {r.status_code}: {r.text[:300]}")
|
||||
try:
|
||||
data = r.json()
|
||||
except Exception as exc:
|
||||
raise RuntimeError(f"Proxy invalid JSON: {exc}") from exc
|
||||
|
||||
choices = data.get("choices") or []
|
||||
if not choices:
|
||||
raise RuntimeError(f"Proxy ohne choices: {str(data)[:300]}")
|
||||
|
||||
msg = choices[0].get("message") or {}
|
||||
finish_reason = choices[0].get("finish_reason", "")
|
||||
|
||||
content = msg.get("content") or ""
|
||||
if isinstance(content, list):
|
||||
content = "".join(
|
||||
part.get("text", "") for part in content if isinstance(part, dict) and part.get("type") == "text"
|
||||
)
|
||||
|
||||
tool_calls_raw = msg.get("tool_calls") or []
|
||||
tool_calls = []
|
||||
import json as _json
|
||||
for tc in tool_calls_raw:
|
||||
fn = tc.get("function") or {}
|
||||
args_raw = fn.get("arguments", "{}")
|
||||
args: dict
|
||||
if isinstance(args_raw, dict):
|
||||
args = args_raw
|
||||
else:
|
||||
try:
|
||||
args = _json.loads(args_raw)
|
||||
except Exception:
|
||||
args = {"_raw": args_raw}
|
||||
tool_calls.append({
|
||||
"id": tc.get("id", ""),
|
||||
"name": fn.get("name", ""),
|
||||
"arguments": args,
|
||||
})
|
||||
|
||||
return ProxyResult(content=content or "", tool_calls=tool_calls, finish_reason=finish_reason)
|
||||
|
||||
def close(self):
|
||||
try:
|
||||
self._client.close()
|
||||
except Exception:
|
||||
pass
|
||||
@@ -0,0 +1,14 @@
|
||||
fastapi==0.115.0
|
||||
uvicorn[standard]==0.32.0
|
||||
pydantic==2.9.2
|
||||
httpx==0.27.2
|
||||
websockets==13.1
|
||||
|
||||
# Vector-DB
|
||||
qdrant-client==1.12.1
|
||||
|
||||
# Embeddings (laeuft auf CPU, ~120MB Modell)
|
||||
sentence-transformers==3.2.1
|
||||
|
||||
# Utility
|
||||
python-multipart==0.0.12
|
||||
@@ -0,0 +1,373 @@
|
||||
"""
|
||||
Skill-Manager — Filesystem-Layer fuer ARIAs Faehigkeiten.
|
||||
|
||||
Layout:
|
||||
/data/skills/<name>/
|
||||
skill.json - Manifest
|
||||
README.md - Beschreibung (vom Stil her: was, wann, wie aufrufen)
|
||||
run.sh - Entry-Point (sh, python -m, was auch immer)
|
||||
requirements.txt - optional, fuer local-venv
|
||||
venv/ - automatisch erzeugt bei local-venv
|
||||
bin/ - statische Binaries (fuer local-bin)
|
||||
logs/ - <ts>.json Run-Logs (append-only pro Run)
|
||||
|
||||
Manifest (skill.json):
|
||||
{
|
||||
"name": "youtube2mp3",
|
||||
"description": "Konvertiert YouTube-Video-URL zu MP3",
|
||||
"execution": "local-venv" | "local-bin" | "bash",
|
||||
"entry": "run.sh",
|
||||
"args": [{"name": "url", "required": true}, ...],
|
||||
"requires": {"pip": [...], "binaries": [...]},
|
||||
"active": true,
|
||||
"created_at": "ISO",
|
||||
"updated_at": "ISO",
|
||||
"last_used": null | "ISO",
|
||||
"use_count": 0,
|
||||
"version": "1.0",
|
||||
"author": "aria" | "stefan"
|
||||
}
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import time
|
||||
import uuid
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
SKILLS_DIR = Path(os.environ.get("SKILLS_DIR", "/data/skills"))
|
||||
SHARED_UPLOADS = Path("/shared/uploads")
|
||||
|
||||
VALID_EXECUTIONS = {"local-venv", "local-bin", "bash"}
|
||||
NAME_RE = re.compile(r"^[a-zA-Z0-9_-]{2,60}$")
|
||||
|
||||
|
||||
def _now() -> str:
|
||||
return datetime.now(timezone.utc).isoformat()
|
||||
|
||||
|
||||
def _safe_name(name: str) -> str:
|
||||
if not isinstance(name, str) or not NAME_RE.match(name):
|
||||
raise ValueError(f"Ungültiger Skill-Name: {name!r}")
|
||||
return name
|
||||
|
||||
|
||||
def _skill_dir(name: str) -> Path:
|
||||
return SKILLS_DIR / _safe_name(name)
|
||||
|
||||
|
||||
# ─── Listing ────────────────────────────────────────────────────────
|
||||
|
||||
def list_skills(active_only: bool = False) -> list[dict]:
|
||||
out: list[dict] = []
|
||||
if not SKILLS_DIR.exists():
|
||||
return out
|
||||
for entry in sorted(SKILLS_DIR.iterdir()):
|
||||
if not entry.is_dir():
|
||||
continue
|
||||
manifest = read_manifest(entry.name)
|
||||
if manifest is None:
|
||||
continue
|
||||
if active_only and not manifest.get("active", True):
|
||||
continue
|
||||
out.append(manifest)
|
||||
return out
|
||||
|
||||
|
||||
def read_manifest(name: str) -> Optional[dict]:
|
||||
try:
|
||||
path = _skill_dir(name) / "skill.json"
|
||||
if not path.exists():
|
||||
return None
|
||||
return json.loads(path.read_text(encoding="utf-8"))
|
||||
except Exception as exc:
|
||||
logger.warning("Manifest lesen %s: %s", name, exc)
|
||||
return None
|
||||
|
||||
|
||||
def write_manifest(name: str, manifest: dict) -> None:
|
||||
d = _skill_dir(name)
|
||||
d.mkdir(parents=True, exist_ok=True)
|
||||
manifest["updated_at"] = _now()
|
||||
(d / "skill.json").write_text(json.dumps(manifest, indent=2, ensure_ascii=False), encoding="utf-8")
|
||||
|
||||
|
||||
def read_readme(name: str) -> str:
|
||||
path = _skill_dir(name) / "README.md"
|
||||
return path.read_text(encoding="utf-8") if path.exists() else ""
|
||||
|
||||
|
||||
# ─── Create / Update / Delete ────────────────────────────────────────
|
||||
|
||||
def create_skill(
|
||||
name: str,
|
||||
description: str,
|
||||
execution: str,
|
||||
entry_code: str,
|
||||
readme: str = "",
|
||||
args: Optional[list] = None,
|
||||
requires: Optional[dict] = None,
|
||||
pip_packages: Optional[list[str]] = None,
|
||||
author: str = "aria",
|
||||
) -> dict:
|
||||
"""Legt einen neuen Skill an. Wirft ValueError bei ungueltigen Inputs.
|
||||
|
||||
entry_code wird je nach execution in run.sh oder run.py geschrieben.
|
||||
Bei local-venv wird sofort eine venv erzeugt + pip_packages installiert.
|
||||
"""
|
||||
name = _safe_name(name)
|
||||
if execution not in VALID_EXECUTIONS:
|
||||
raise ValueError(f"execution muss eines von {VALID_EXECUTIONS} sein")
|
||||
d = _skill_dir(name)
|
||||
if d.exists():
|
||||
raise ValueError(f"Skill '{name}' existiert bereits — erst loeschen oder updaten")
|
||||
|
||||
d.mkdir(parents=True)
|
||||
(d / "logs").mkdir()
|
||||
|
||||
# Entry-File: run.sh oder run.py
|
||||
if execution == "local-venv":
|
||||
entry_path = d / "run.py"
|
||||
entry_path.write_text(entry_code, encoding="utf-8")
|
||||
entry_name = "run.py"
|
||||
(d / "requirements.txt").write_text("\n".join(pip_packages or []) + "\n", encoding="utf-8")
|
||||
else:
|
||||
entry_path = d / "run.sh"
|
||||
# Shebang ergaenzen wenn nicht da
|
||||
content = entry_code if entry_code.startswith("#!") else "#!/usr/bin/env bash\nset -euo pipefail\n" + entry_code
|
||||
entry_path.write_text(content, encoding="utf-8")
|
||||
entry_path.chmod(0o755)
|
||||
entry_name = "run.sh"
|
||||
|
||||
# README
|
||||
(d / "README.md").write_text(readme or f"# {name}\n\n{description}\n", encoding="utf-8")
|
||||
|
||||
manifest = {
|
||||
"name": name,
|
||||
"description": description,
|
||||
"execution": execution,
|
||||
"entry": entry_name,
|
||||
"args": args or [],
|
||||
"requires": requires or {},
|
||||
"active": True,
|
||||
"created_at": _now(),
|
||||
"updated_at": _now(),
|
||||
"last_used": None,
|
||||
"use_count": 0,
|
||||
"version": "1.0",
|
||||
"author": author,
|
||||
}
|
||||
write_manifest(name, manifest)
|
||||
|
||||
# venv aufbauen bei local-venv
|
||||
if execution == "local-venv":
|
||||
try:
|
||||
_setup_venv(d, pip_packages or [])
|
||||
except Exception as exc:
|
||||
# venv-Aufbau fehlgeschlagen → Skill steht trotzdem im Repo, aber inaktiv
|
||||
manifest["active"] = False
|
||||
manifest["setup_error"] = str(exc)[:500]
|
||||
write_manifest(name, manifest)
|
||||
logger.warning("Skill %s: venv-Setup fehlgeschlagen → deaktiviert: %s", name, exc)
|
||||
|
||||
logger.info("Skill erstellt: %s (%s)", name, execution)
|
||||
return manifest
|
||||
|
||||
|
||||
def _setup_venv(skill_dir: Path, pip_packages: list[str]) -> None:
|
||||
venv = skill_dir / "venv"
|
||||
logger.info("venv erstellen: %s", venv)
|
||||
subprocess.run(["python", "-m", "venv", str(venv)], check=True, timeout=120)
|
||||
pip = venv / "bin" / "pip"
|
||||
if pip_packages:
|
||||
subprocess.run([str(pip), "install", "--no-cache-dir", *pip_packages], check=True, timeout=600)
|
||||
|
||||
|
||||
def update_skill(name: str, patch: dict) -> dict:
|
||||
manifest = read_manifest(name)
|
||||
if manifest is None:
|
||||
raise ValueError(f"Skill '{name}' nicht gefunden")
|
||||
allowed = {"description", "args", "requires", "active", "version", "entry"}
|
||||
for k, v in patch.items():
|
||||
if k in allowed:
|
||||
manifest[k] = v
|
||||
write_manifest(name, manifest)
|
||||
return manifest
|
||||
|
||||
|
||||
def delete_skill(name: str) -> None:
|
||||
d = _skill_dir(name)
|
||||
if not d.exists():
|
||||
raise ValueError(f"Skill '{name}' nicht gefunden")
|
||||
shutil.rmtree(d)
|
||||
logger.info("Skill geloescht: %s", name)
|
||||
|
||||
|
||||
# ─── Run ────────────────────────────────────────────────────────────
|
||||
|
||||
def run_skill(name: str, args: Optional[dict] = None, timeout_sec: int = 300) -> dict:
|
||||
"""Fuehrt einen Skill aus. Args werden als ENV-Vars uebergeben
|
||||
(Praefix ARG_, z.B. ARG_URL fuer args["url"]).
|
||||
|
||||
Returns: {ok, exit_code, stdout, stderr, duration_sec, log_path}
|
||||
"""
|
||||
manifest = read_manifest(name)
|
||||
if manifest is None:
|
||||
raise ValueError(f"Skill '{name}' nicht gefunden")
|
||||
if not manifest.get("active", True):
|
||||
raise ValueError(f"Skill '{name}' ist deaktiviert")
|
||||
|
||||
d = _skill_dir(name)
|
||||
entry = manifest.get("entry", "run.sh")
|
||||
exec_mode = manifest.get("execution", "bash")
|
||||
|
||||
env = os.environ.copy()
|
||||
# Skill-Args als ENV-Vars
|
||||
for k, v in (args or {}).items():
|
||||
if not re.match(r"^[a-zA-Z][a-zA-Z0-9_]*$", k):
|
||||
continue
|
||||
env[f"ARG_{k.upper()}"] = str(v)
|
||||
env["SKILL_DIR"] = str(d)
|
||||
env["SHARED_UPLOADS"] = str(SHARED_UPLOADS)
|
||||
|
||||
# Command bauen
|
||||
if exec_mode == "local-venv":
|
||||
python = d / "venv" / "bin" / "python"
|
||||
cmd = [str(python), str(d / entry)]
|
||||
elif exec_mode == "local-bin":
|
||||
# Skill bringt seine bin/ mit — wir prepended sie an den PATH
|
||||
env["PATH"] = f"{d / 'bin'}:{env.get('PATH', '')}"
|
||||
cmd = [str(d / entry)]
|
||||
else: # bash
|
||||
cmd = [str(d / entry)]
|
||||
|
||||
log_id = f"{int(time.time())}-{uuid.uuid4().hex[:8]}"
|
||||
log_path = d / "logs" / f"{log_id}.json"
|
||||
|
||||
t0 = time.time()
|
||||
try:
|
||||
proc = subprocess.run(
|
||||
cmd, env=env, cwd=str(d),
|
||||
capture_output=True, text=True, timeout=timeout_sec,
|
||||
)
|
||||
out_text = proc.stdout
|
||||
err_text = proc.stderr
|
||||
exit_code = proc.returncode
|
||||
timed_out = False
|
||||
except subprocess.TimeoutExpired as exc:
|
||||
out_text = exc.stdout or ""
|
||||
err_text = (exc.stderr or "") + f"\n[TIMEOUT {timeout_sec}s]"
|
||||
exit_code = -1
|
||||
timed_out = True
|
||||
duration = time.time() - t0
|
||||
|
||||
# Log schreiben (gekuerzt damit es nicht explodiert)
|
||||
record = {
|
||||
"ts": _now(),
|
||||
"args": args or {},
|
||||
"exit_code": exit_code,
|
||||
"duration_sec": round(duration, 2),
|
||||
"stdout": (out_text or "")[:8000],
|
||||
"stderr": (err_text or "")[:8000],
|
||||
"timed_out": timed_out,
|
||||
}
|
||||
try:
|
||||
log_path.write_text(json.dumps(record, indent=2, ensure_ascii=False), encoding="utf-8")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Stats updaten
|
||||
manifest["last_used"] = _now()
|
||||
manifest["use_count"] = int(manifest.get("use_count", 0)) + 1
|
||||
write_manifest(name, manifest)
|
||||
|
||||
record["ok"] = exit_code == 0
|
||||
record["log_path"] = str(log_path)
|
||||
return record
|
||||
|
||||
|
||||
def list_logs(name: str, limit: int = 50) -> list[dict]:
|
||||
d = _skill_dir(name) / "logs"
|
||||
if not d.exists():
|
||||
return []
|
||||
files = sorted(d.glob("*.json"), reverse=True)[:limit]
|
||||
out: list[dict] = []
|
||||
for f in files:
|
||||
try:
|
||||
data = json.loads(f.read_text(encoding="utf-8"))
|
||||
data["log_id"] = f.stem
|
||||
out.append(data)
|
||||
except Exception:
|
||||
continue
|
||||
return out
|
||||
|
||||
|
||||
# ─── Export / Import ────────────────────────────────────────────────
|
||||
|
||||
def export_skill(name: str) -> bytes:
|
||||
"""Packt einen Skill als tar.gz und gibt die Bytes zurueck.
|
||||
venv und logs werden ausgeschlossen (werden beim Import neu gebaut)."""
|
||||
import io
|
||||
import tarfile
|
||||
d = _skill_dir(name)
|
||||
if not d.exists():
|
||||
raise ValueError(f"Skill '{name}' nicht gefunden")
|
||||
buf = io.BytesIO()
|
||||
with tarfile.open(fileobj=buf, mode="w:gz") as tar:
|
||||
for path in d.iterdir():
|
||||
if path.name in ("venv", "logs", "__pycache__"):
|
||||
continue
|
||||
tar.add(path, arcname=f"{name}/{path.name}")
|
||||
return buf.getvalue()
|
||||
|
||||
|
||||
def import_skill(tar_bytes: bytes, overwrite: bool = False) -> dict:
|
||||
"""Importiert einen Skill aus tar.gz. Liefert das Manifest zurueck."""
|
||||
import io
|
||||
import tarfile
|
||||
SKILLS_DIR.mkdir(parents=True, exist_ok=True)
|
||||
with tarfile.open(fileobj=io.BytesIO(tar_bytes), mode="r:gz") as tar:
|
||||
# Erst Root-Name finden (= Skill-Name)
|
||||
members = tar.getmembers()
|
||||
if not members:
|
||||
raise ValueError("Leeres Archiv")
|
||||
root = members[0].name.split("/", 1)[0]
|
||||
name = _safe_name(root)
|
||||
d = _skill_dir(name)
|
||||
if d.exists():
|
||||
if not overwrite:
|
||||
raise ValueError(f"Skill '{name}' existiert bereits — overwrite=true setzen")
|
||||
shutil.rmtree(d)
|
||||
# Extrahieren — Path-Traversal verhindern
|
||||
for m in members:
|
||||
target = SKILLS_DIR / m.name
|
||||
if not str(target.resolve()).startswith(str(SKILLS_DIR.resolve())):
|
||||
raise ValueError(f"Unsicherer Pfad im Archiv: {m.name}")
|
||||
tar.extractall(SKILLS_DIR)
|
||||
# logs-Verzeichnis anlegen falls fehlte
|
||||
(d / "logs").mkdir(exist_ok=True)
|
||||
# venv neu bauen falls local-venv
|
||||
manifest = read_manifest(name) or {}
|
||||
if manifest.get("execution") == "local-venv":
|
||||
req_file = d / "requirements.txt"
|
||||
pip_packages: list[str] = []
|
||||
if req_file.exists():
|
||||
pip_packages = [l.strip() for l in req_file.read_text().splitlines() if l.strip() and not l.startswith("#")]
|
||||
try:
|
||||
_setup_venv(d, pip_packages)
|
||||
except Exception as exc:
|
||||
logger.warning("Skill-Import %s: venv-Setup fehlgeschlagen: %s", name, exc)
|
||||
manifest["active"] = False
|
||||
manifest["setup_error"] = str(exc)[:500]
|
||||
write_manifest(name, manifest)
|
||||
return manifest
|
||||
@@ -52,15 +52,61 @@ Fuer Web-Anfragen: **WebFetch** oder **Bash mit curl**. Niemals sagen "ich habe
|
||||
4. **Regelmaessig committen** — mit sinnvollen Commit-Messages.
|
||||
5. **Tageslog fuehren** — was wurde getan, was ist offen.
|
||||
|
||||
## Dateien an Stefan zurueckgeben — KRITISCH
|
||||
|
||||
**Das ist die EINZIGE Methode wie Stefan an Dateien rankommt. Ohne
|
||||
diese Schritte sieht und bekommt er die Datei NICHT.**
|
||||
|
||||
### Regel 1 — Speicher-Ort
|
||||
|
||||
Dateien fuer Stefan AUSSCHLIESSLICH unter `/shared/uploads/` speichern.
|
||||
|
||||
NIEMALS in:
|
||||
- `/home/node/.openclaw/workspace/...` (das ist NUR dein Arbeitsverzeichnis,
|
||||
Stefan hat keinen Zugriff darauf)
|
||||
- `/tmp/...`, `/root/...`, oder sonst irgendwo
|
||||
|
||||
Dateinamen mit `aria_`-Prefix damit Cleanup-Scripts sie zuordnen koennen:
|
||||
|
||||
```
|
||||
/shared/uploads/aria_<beschreibender_name>.<ext>
|
||||
```
|
||||
|
||||
Beispiele: `aria_termin_zusage.pdf`, `aria_einkaufsliste.md`,
|
||||
`aria_logs_2026-05-10.zip`.
|
||||
|
||||
### Regel 2 — Marker im Antworttext
|
||||
|
||||
Am Ende deiner Antwort EINMALIG den Marker setzen:
|
||||
|
||||
```
|
||||
[FILE: /shared/uploads/aria_<name>.<ext>]
|
||||
```
|
||||
|
||||
OHNE diesen Marker erscheint die Datei NICHT in der App / Diagnostic.
|
||||
|
||||
Mehrere Dateien: mehrere `[FILE: ...]`-Marker am Ende, jeder in
|
||||
eigener Zeile.
|
||||
|
||||
### Beispiel — kompletter Workflow
|
||||
|
||||
User: "Schreib mir ein Lasagne-Rezept als md-Datei"
|
||||
|
||||
1. Du schreibst die Datei: `Write` Tool mit Pfad `/shared/uploads/aria_lasagne.md`
|
||||
2. Antwort an Stefan:
|
||||
|
||||
```
|
||||
Hier dein Lasagne-Rezept — Ragu am Vortag, echter Parmesan,
|
||||
Ruhezeit nicht skippen. Beim Schichten Bechamel auf jede Lage.
|
||||
|
||||
[FILE: /shared/uploads/aria_lasagne.md]
|
||||
```
|
||||
|
||||
Der Marker wird automatisch aus dem sichtbaren Text entfernt und
|
||||
als Anhang-Bubble angezeigt. Stefan tippt drauf → oeffnet die Datei.
|
||||
|
||||
## Stimme
|
||||
|
||||
| Stimme | Modell | Wann |
|
||||
|--------|--------|------|
|
||||
| **Ramona** (weiblich) | `de_DE-ramona-low` | Alltag, Antworten, Gespraeche (Standard) |
|
||||
| **Thorsten** (maennlich, tief) | `de_DE-thorsten-high` | Epische Momente, Alarme, besondere Ereignisse |
|
||||
|
||||
**Thorsten spricht bei:**
|
||||
- Build erfolgreich deployed
|
||||
- Ticket geloest / Aufgabe abgeschlossen
|
||||
- Kritischer Alarm (Server down, Sicherheitswarnung)
|
||||
- Wenn Stefan sagt "So soll es sein"
|
||||
TTS laeuft ueber F5-TTS (Voice Cloning, Gaming-PC). Stefan kann eigene
|
||||
Stimmen aus Audio-Samples klonen (Diagnostic → Stimmen → Stimme klonen)
|
||||
und in App + Diagnostic auswaehlen.
|
||||
@@ -78,12 +78,101 @@ Wenn ein Tool nicht klappt, probiere die Alternative. Niemals sagen "ich habe ke
|
||||
- Destruktive Operationen (Dateien loeschen, Datenbanken droppen)
|
||||
- Push auf main
|
||||
|
||||
## Dateien an Stefan zurueckgeben — KRITISCH
|
||||
|
||||
**Das ist die EINZIGE Methode wie Stefan an Dateien rankommt. Ohne diese
|
||||
Schritte sieht und bekommt er die Datei NICHT.**
|
||||
|
||||
### Regel 1 — Speicher-Ort
|
||||
|
||||
Dateien fuer Stefan AUSSCHLIESSLICH unter `/shared/uploads/` speichern.
|
||||
|
||||
NIEMALS in:
|
||||
- `/home/node/.openclaw/workspace/...` (NUR dein Arbeitsverzeichnis,
|
||||
Stefan hat keinen Zugriff)
|
||||
- `/tmp/...`, `/root/...`, oder sonst irgendwo
|
||||
|
||||
Dateinamen mit `aria_`-Prefix:
|
||||
|
||||
```
|
||||
/shared/uploads/aria_<beschreibender_name>.<ext>
|
||||
```
|
||||
|
||||
Beispiele: `aria_termin_zusage.pdf`, `aria_einkaufsliste.md`,
|
||||
`aria_logs_2026-05-10.zip`.
|
||||
|
||||
### Regel 2 — Marker im Antworttext
|
||||
|
||||
Am Ende deiner Antwort EINMALIG den Marker setzen:
|
||||
|
||||
```
|
||||
[FILE: /shared/uploads/aria_<name>.<ext>]
|
||||
```
|
||||
|
||||
OHNE diesen Marker erscheint die Datei NICHT in der App / Diagnostic.
|
||||
|
||||
Mehrere Dateien: mehrere `[FILE: ...]`-Marker am Ende, jeder in
|
||||
eigener Zeile.
|
||||
|
||||
**WICHTIG — Datei MUSS existieren bevor du den Marker setzt.**
|
||||
Marker fuer nicht-existente Pfade werden silent gefiltert + Stefan
|
||||
bekommt einen Hinweis dass du eine Datei versprochen aber nicht
|
||||
erstellt hast. Wenn du z.B. eine MIDI-Datei nicht generieren kannst,
|
||||
sag das offen statt nur den Marker zu setzen. Verifiziere zur Not
|
||||
mit `Bash` + `ls -la /shared/uploads/aria_<name>.<ext>` dass die
|
||||
Datei wirklich da ist.
|
||||
|
||||
### Beispiel — kompletter Workflow
|
||||
|
||||
User: "Schreib mir ein Lasagne-Rezept als md-Datei"
|
||||
|
||||
1. Du schreibst: `Write` Tool mit Pfad `/shared/uploads/aria_lasagne.md`
|
||||
2. Antwort an Stefan:
|
||||
|
||||
```
|
||||
Hier dein Lasagne-Rezept — Ragu am Vortag, echter Parmesan,
|
||||
Ruhezeit nicht skippen. Beim Schichten Bechamel auf jede Lage.
|
||||
|
||||
[FILE: /shared/uploads/aria_lasagne.md]
|
||||
```
|
||||
|
||||
Der Marker wird automatisch aus dem sichtbaren Text entfernt und
|
||||
als Anhang-Bubble angezeigt. Stefan tippt drauf → oeffnet die Datei
|
||||
im jeweiligen Standard-Programm.
|
||||
|
||||
### Externe Bilder/Dateien — IMMER runterladen, nicht nur verlinken
|
||||
|
||||
Wenn Stefan ein Bild oder eine Datei aus dem Netz haben will (Wikipedia,
|
||||
Wiki Commons, ein Beispiel-PDF, etc.):
|
||||
|
||||
NICHT NUR die URL in die Antwort schreiben — das Bild ist dann nur
|
||||
solange sichtbar wie der externe Server lebt.
|
||||
|
||||
STATTDESSEN:
|
||||
1. Mit `Bash` + curl/wget herunterladen nach `/shared/uploads/aria_<name>.<ext>`
|
||||
2. Mit `[FILE: ...]`-Marker als Anhang ausspielen
|
||||
|
||||
Beispiel — User: "Zeig mir ein Bild von Micky Maus"
|
||||
|
||||
```bash
|
||||
curl -sL "https://upload.wikimedia.org/wikipedia/commons/7/7f/Mickey_Mouse.svg" \
|
||||
-o /shared/uploads/aria_mickey_mouse.svg
|
||||
```
|
||||
|
||||
Antwort:
|
||||
```
|
||||
Hier Micky Maus — offizielles SVG von Wikimedia Commons (Public Domain).
|
||||
|
||||
[FILE: /shared/uploads/aria_mickey_mouse.svg]
|
||||
```
|
||||
|
||||
So bleibt das Bild permanent im Chat-Verlauf, auch wenn die Wiki-URL
|
||||
spaeter offline geht oder umgezogen wird.
|
||||
|
||||
## Stimme
|
||||
|
||||
| Stimme | Modell | Wann |
|
||||
|--------|--------|------|
|
||||
| **Ramona** (weiblich) | `de_DE-ramona-low` | Alltag, Antworten, Gespraeche (Standard) |
|
||||
| **Thorsten** (maennlich, tief) | `de_DE-thorsten-high` | Epische Momente, Alarme, besondere Ereignisse |
|
||||
TTS laeuft ueber F5-TTS auf der Gamebox (Voice Cloning). Stefan kann
|
||||
eigene Stimmen aus Audio-Samples klonen und in App/Diagnostic auswaehlen.
|
||||
|
||||
## Gedaechtnis (Memory)
|
||||
|
||||
@@ -147,4 +236,4 @@ Danach den Eintrag in `memory/MEMORY.md` (Index) verlinken.
|
||||
### Netzwerk
|
||||
- **aria-net:** Internes Docker-Netz (proxy, aria-core)
|
||||
- **RVS:** Rendezvous-Server im Rechenzentrum — Relay fuer die Android-App
|
||||
- **Bridge:** Voice Bridge (Whisper STT + Piper TTS) — teilt Netzwerk mit aria-core
|
||||
- **Bridge:** Voice Bridge (orchestriert STT/TTS via Gamebox-Bridges) — teilt Netzwerk mit aria-core
|
||||
@@ -1,10 +1,10 @@
|
||||
# Stefan — Benutzer-Praeferenzen
|
||||
# <Username> — Benutzer-Praeferenzen
|
||||
|
||||
## Allgemein
|
||||
|
||||
- **Sprache:** Deutsch
|
||||
- **Kommunikation:** Direkt, kein Bullshit, Humor willkommen
|
||||
- **Rolle:** Chef, Auftraggeber, Entwickler bei HackerSoft Oldenburg
|
||||
- **Sprache:** <z.B. Deutsch>
|
||||
- **Kommunikation:** <z.B. Direkt, kein Bullshit, Humor willkommen>
|
||||
- **Rolle:** <z.B. Chef, Auftraggeber, Entwickler bei XYZ>
|
||||
|
||||
## Bestaetigung erforderlich fuer
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
- Push auf main
|
||||
- Aenderungen an Kundensystemen
|
||||
- Server-Befehle die nicht rueckgaengig gemacht werden koennen
|
||||
- Windows neu installieren (erst Daten sichern!)
|
||||
|
||||
## Autonomes Arbeiten OK fuer
|
||||
|
||||
@@ -28,8 +27,10 @@
|
||||
|
||||
| Tool | Zweck |
|
||||
|------|-------|
|
||||
| **Proxmox** | VM-Infrastruktur (ARIAs Zuhause) |
|
||||
| **Gitea** | Code-Hosting (gitea.hackersoft.de) |
|
||||
| **OpenCRM** | Kundenverwaltung |
|
||||
| **STARFACE** | Telefonie |
|
||||
| **RustDesk** | Remote IT-Support bei Kunden |
|
||||
| **<Beispiel-Tool>** | <Zweck> |
|
||||
|
||||
<!--
|
||||
Diese Datei ist eine Vorlage. Lokal als USER.md kopieren und mit
|
||||
eigenen Praeferenzen + Tool-Stack fuellen. USER.md selbst ist via
|
||||
.gitignore vom Repo ausgeschlossen.
|
||||
-->
|
||||
@@ -1,14 +0,0 @@
|
||||
# Bridge → aria-core (OpenClaw Gateway)
|
||||
# Bridge teilt Netzwerk mit aria-core (network_mode: service:aria)
|
||||
# → localhost ist aria-core
|
||||
ARIA_CORE_WS=ws://127.0.0.1:18789
|
||||
|
||||
# Wake-Word
|
||||
WAKE_WORD=aria
|
||||
|
||||
# Whisper STT — wird zur Laufzeit in der Diagnostic (Sektion "Whisper") umgeschaltet
|
||||
# und in /shared/config/voice_config.json gespeichert. Der Wert hier ist nur der
|
||||
# Initial-Default beim ersten Start.
|
||||
# Optionen: tiny | base | small | medium | large-v3
|
||||
WHISPER_MODEL=medium
|
||||
WHISPER_LANGUAGE=de
|
||||
@@ -1,11 +0,0 @@
|
||||
{
|
||||
"version": 1,
|
||||
"profiles": {
|
||||
"openai-proxy": {
|
||||
"provider": "openai",
|
||||
"default": true,
|
||||
"apiKey": "not-needed",
|
||||
"baseUrl": "http://proxy:3456/v1"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,6 +0,0 @@
|
||||
# OpenClaw (aria-core) Konfiguration
|
||||
# Diese Datei wird als /workspace/.env in den Container gemountet
|
||||
#
|
||||
# WICHTIG: ANTHROPIC_API_KEY und ANTHROPIC_BASE_URL absichtlich NICHT gesetzt!
|
||||
# OpenClaw wuerde sonst die echte Anthropic API direkt anrufen (401 weil kein API Key).
|
||||
# Stattdessen nur den OpenAI-kompatiblen Proxy nutzen.
|
||||
@@ -1,137 +0,0 @@
|
||||
# OpenClaw Tool-Permissions — Stand 2026-03-15
|
||||
|
||||
## Das Problem (GELÖST)
|
||||
|
||||
ARIA hat ZWEI Tool-Systeme gleichzeitig: Claude Code Tools UND OpenClaw-native Tools.
|
||||
Das Model hat aber nur Zugriff auf **Claude Code Tools** (über den Proxy), nicht auf OpenClaw-native Tools.
|
||||
|
||||
### Root Cause: DREI Probleme gleichzeitig
|
||||
|
||||
```
|
||||
OpenClaw (aria-core) → API Request → claude-max-api-proxy (aria-proxy) → Claude Code CLI (--print Mode)
|
||||
↓
|
||||
Tools: WebFetch, Bash, etc. (Claude Code)
|
||||
NICHT: web_fetch, exec (OpenClaw-nativ)
|
||||
```
|
||||
|
||||
**Problem 1: Proxy benutzt `--print` Modus**
|
||||
- `claude-max-api-proxy` ruft Claude Code CLI mit `--print --output-format stream-json` auf
|
||||
- Der Prompt wird als einziger String übergeben, keine Tool-Definitionen von OpenClaw
|
||||
- Das Model sieht NUR Claude Code's eingebaute Tools (WebFetch, Bash, etc.)
|
||||
- OpenClaw-native Tools (web_fetch, exec) existieren NUR auf Gateway-Ebene, kommen nie beim Model an
|
||||
|
||||
**Problem 2: BOOTSTRAP.md hat die falschen Tools angewiesen**
|
||||
- BOOTSTRAP.md sagte: "NIEMALS WebFetch benutzen, stattdessen web_fetch"
|
||||
- Aber web_fetch existiert nicht im Claude Code CLI Kontext
|
||||
- Und WebFetch war das einzige Tool das funktioniert hätte
|
||||
- → Model hatte keine Tools die es benutzen "durfte"
|
||||
|
||||
**Problem 3: settings.json im Proxy war leer**
|
||||
- `/root/.claude/settings.json` enthielt nur `{}` (keine Permissions)
|
||||
- Claude Code CLI im headless-Modus kann keine Tool-Genehmigungen erteilen
|
||||
- → Selbst wenn das Model WebFetch benutzen wollte, war es nicht vorab genehmigt
|
||||
|
||||
## Die Lösung
|
||||
|
||||
### Fix 1: BOOTSTRAP.md + AGENT.md umgeschrieben
|
||||
|
||||
**Vorher (FALSCH):**
|
||||
- "NIEMALS WebFetch benutzen — hat Permission-Probleme"
|
||||
- "Benutze web_fetch (OpenClaw-nativ)"
|
||||
|
||||
**Nachher (KORREKT):**
|
||||
- "WebFetch — URLs abrufen, Webseiten lesen, APIs aufrufen, Wetter abfragen"
|
||||
- "Bash — Shell-Befehle ausfuehren (curl, ssh, docker, etc.)"
|
||||
- "Niemals sagen 'ich habe keinen Zugriff' — du hast Zugriff auf alles"
|
||||
|
||||
### Fix 2: `CLAUDE_CODE_BUBBLEWRAP=1` + `--dangerously-skip-permissions`
|
||||
|
||||
**Der Schlüssel-Fix.** Zwei Zeilen in `docker-compose.yml`:
|
||||
|
||||
```yaml
|
||||
# 1. sed-Patch: --dangerously-skip-permissions in manager.js einfügen
|
||||
sed -i 's/"--no-session-persistence",/"--no-session-persistence","--dangerously-skip-permissions",/' $$DIST/subprocess/manager.js &&
|
||||
|
||||
# 2. Environment-Variable: Root-Check umgehen
|
||||
environment:
|
||||
- CLAUDE_CODE_BUBBLEWRAP=1
|
||||
```
|
||||
|
||||
**Warum beides nötig:**
|
||||
- `--dangerously-skip-permissions` umgeht alle Tool-Permission-Checks in Claude Code CLI
|
||||
- Aber: Claude Code CLI blockiert dieses Flag wenn es als root läuft
|
||||
- `CLAUDE_CODE_BUBBLEWRAP=1` überspringt den Root-Check (gefunden im minifizierten `cli.js`)
|
||||
- Proxy-Container (`node:22-alpine`) läuft als root → ohne BUBBLEWRAP geht's nicht
|
||||
|
||||
**Resultierende CLI-Argumente:**
|
||||
```
|
||||
claude --print --output-format stream-json --verbose --include-partial-messages \
|
||||
--model opus --no-session-persistence --dangerously-skip-permissions "prompt"
|
||||
```
|
||||
|
||||
## Wie der Proxy intern funktioniert
|
||||
|
||||
```
|
||||
openai-to-cli.js: OpenAI Messages → einzelner Prompt-String
|
||||
system → <system>...</system>
|
||||
user → direkt
|
||||
assistant → <previous_response>...</previous_response>
|
||||
|
||||
subprocess/manager.js: Spawnt `claude --print ... --dangerously-skip-permissions "{prompt}"`
|
||||
|
||||
cli-to-openai.js: Claude CLI JSON-Stream → OpenAI Chat Completion Chunks
|
||||
```
|
||||
|
||||
Der Proxy leitet KEINE Tool-Definitionen von OpenClaw weiter.
|
||||
Tool-Calls passieren INTERN in der Claude Code CLI und sind für OpenClaw transparent.
|
||||
|
||||
## Permission-Architektur
|
||||
|
||||
**Granulare Tool-Kontrolle ist NICHT möglich.** Es ist Alles-oder-Nichts:
|
||||
- `--dangerously-skip-permissions` AN → ARIA kann alle Claude Code Tools benutzen
|
||||
- `--dangerously-skip-permissions` AUS → ARIA kann keine Tools benutzen
|
||||
|
||||
OpenClaw's eigene Permissions (`tools.allow/deny` in `openclaw.json`) haben **keinen Effekt** auf die
|
||||
Claude Code Tools — die laufen komplett auf Proxy-Seite.
|
||||
|
||||
## Was NICHT funktioniert hat (17 Versuche)
|
||||
|
||||
1. **settings.json in aria-core** — OpenClaw benutzt NICHT Claude Code's settings.json
|
||||
2. **tools.allow mit PascalCase** (WebFetch, Grep) — OpenClaw kennt diese Namen nicht
|
||||
3. **tools.allow mit snake_case** (web_fetch) — Nur exec, read, write, edit erkannt
|
||||
4. **tools.allow mit Wildcard** `["*"]` — Hat nicht geholfen
|
||||
5. **tools.allow leer + tools.profile: "full"** — Nur ohne andere Fehler
|
||||
6. **System-Prompt Anweisung allein** — Reicht nicht wenn Tools blockiert sind
|
||||
7. **exec-approvals Wildcard allein** — Reicht nicht bei Config-Validation-Error
|
||||
8. **`openclaw config unset tools.exec.ask`** — CLI kennt den Pfad nicht
|
||||
9. **BOOTSTRAP.md mit OpenClaw-Tool-Namen** — Tools existieren nur auf Gateway-Ebene
|
||||
10. **settings.json im Proxy ohne BOOTSTRAP.md Fix** — BOOTSTRAP.md verbot die Tools
|
||||
11. **tools.byProvider.proxy.profile full** — Kein Effekt
|
||||
12. **settings.json + BOOTSTRAP.md ohne --dangerously-skip-permissions** — `--print` ignoriert settings.json
|
||||
13. **Manuelles `docker exec sed`** — Wird bei jedem Restart überschrieben
|
||||
14. **`--dangerously-skip-permissions` ohne BUBBLEWRAP** — Root-Check blockiert
|
||||
15. **`--allowedTools`** — Variadisches Argument frisst den Prompt
|
||||
16. **`--permission-mode bypassPermissions`** — Gleicher Root-Check
|
||||
17. **Non-Root User (`su node`)** — Auth-Pfad-Probleme, Credentials unerreichbar
|
||||
|
||||
## Wichtige Pfade
|
||||
|
||||
### aria-core (OpenClaw)
|
||||
- `/home/node/.openclaw/openclaw.json` — OpenClaw Haupt-Config
|
||||
- `/home/node/.openclaw/exec-approvals.json` — Exec Approvals
|
||||
- `/tmp/openclaw/openclaw-YYYY-MM-DD.log` — Tages-Log
|
||||
|
||||
### aria-proxy (Claude Code CLI)
|
||||
- `/root/.claude/.credentials.json` — Auth Credentials (NICHT in /root/.config/claude/)
|
||||
- `/usr/local/lib/node_modules/claude-max-api-proxy/dist/` — Proxy Source
|
||||
- `/usr/local/lib/node_modules/@anthropic-ai/claude-code/cli.js` — Claude Code CLI (enthält Root-Check)
|
||||
|
||||
## OpenClaw CLI Referenz
|
||||
|
||||
```bash
|
||||
openclaw config get/set/unset <path> # Config verwalten
|
||||
openclaw approvals get # Exec-Approvals anzeigen
|
||||
openclaw approvals allowlist add # Exec-Pattern freigeben
|
||||
openclaw doctor [--fix] # Health Check
|
||||
openclaw gateway status # Gateway-Status
|
||||
```
|
||||
+29
-118
@@ -1,104 +1,23 @@
|
||||
#!/bin/bash
|
||||
# ════════════════════════════════════════════════
|
||||
# ARIA — Ersteinrichtung nach docker compose up
|
||||
# Einmalig ausfuehren, danach persistiert alles.
|
||||
#
|
||||
# OpenClaw (aria-core) ist abgerissen — das Setup macht jetzt
|
||||
# nur noch den SSH-Key fuer den Zugriff auf die VM (aria-wohnung).
|
||||
# Brain + Proxy teilen sich denselben Key, beide haben aria-data/ssh
|
||||
# als Volume gemountet.
|
||||
# ════════════════════════════════════════════════
|
||||
set -e
|
||||
|
||||
echo "=== ARIA Setup ==="
|
||||
echo ""
|
||||
|
||||
# Warten bis aria-core laeuft
|
||||
echo "[1/7] Warte auf aria-core..."
|
||||
until docker inspect -f '{{.State.Running}}' aria-core 2>/dev/null | grep -q true; do
|
||||
sleep 2
|
||||
echo " ... warte..."
|
||||
done
|
||||
echo " aria-core laeuft."
|
||||
|
||||
# Permissions fixen — Docker-Volumes gehoeren root, OpenClaw laeuft als node
|
||||
echo ""
|
||||
echo "[2/7] Fixe Permissions auf /home/node/.openclaw und /home/node/.claude..."
|
||||
docker exec -u root aria-core chown -R node:node /home/node/.openclaw
|
||||
docker exec -u root aria-core chown -R node:node /home/node/.claude 2>/dev/null || true
|
||||
docker exec -u root aria-core chmod 700 /home/node/.openclaw
|
||||
echo " Permissions OK."
|
||||
|
||||
# OpenClaw Config schreiben — Custom Provider fuer claude-max-api-proxy
|
||||
echo ""
|
||||
echo "[3/7] Schreibe openclaw.json (Proxy-Provider + Model + Tools)..."
|
||||
docker exec aria-core sh -c 'cat > /home/node/.openclaw/openclaw.json << '"'"'INNEREOF'"'"'
|
||||
{
|
||||
"meta": {
|
||||
"lastTouchedVersion": "2026.3.8"
|
||||
},
|
||||
"gateway": {
|
||||
"mode": "local"
|
||||
},
|
||||
"agents": {
|
||||
"defaults": {
|
||||
"model": {
|
||||
"primary": "proxy/claude-sonnet-4"
|
||||
},
|
||||
"compaction": {
|
||||
"mode": "safeguard"
|
||||
},
|
||||
"timeoutSeconds": 900,
|
||||
"maxConcurrent": 4,
|
||||
"subagents": {
|
||||
"maxConcurrent": 8
|
||||
}
|
||||
}
|
||||
},
|
||||
"models": {
|
||||
"providers": {
|
||||
"proxy": {
|
||||
"api": "openai-completions",
|
||||
"baseUrl": "http://proxy:3456/v1",
|
||||
"apiKey": "not-needed",
|
||||
"models": [
|
||||
{ "id": "claude-sonnet-4", "name": "claude-sonnet-4" },
|
||||
{ "id": "claude-opus-4", "name": "claude-opus-4" }
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"tools": {
|
||||
"profile": "full",
|
||||
"web": {
|
||||
"fetch": {
|
||||
"enabled": true
|
||||
}
|
||||
},
|
||||
"exec": {
|
||||
"host": "gateway"
|
||||
}
|
||||
},
|
||||
"messages": {
|
||||
"ackReactionScope": "all"
|
||||
},
|
||||
"commands": {
|
||||
"native": "auto",
|
||||
"nativeSkills": "auto",
|
||||
"restart": true,
|
||||
"ownerDisplay": "raw"
|
||||
}
|
||||
}
|
||||
INNEREOF'
|
||||
echo " Config geschrieben."
|
||||
|
||||
# Exec-Approvals Wildcard — erlaubt Tool-Ausfuehrung im headless-Modus
|
||||
echo ""
|
||||
echo "[4/7] Setze exec-approvals Wildcard..."
|
||||
docker exec aria-core openclaw approvals allowlist add --agent "*" "*" 2>/dev/null || true
|
||||
echo " Approvals gesetzt."
|
||||
|
||||
# SSH-Key generieren fuer VM-Zugriff
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
SSH_DIR="$SCRIPT_DIR/aria-data/ssh"
|
||||
echo ""
|
||||
echo "[5/7] SSH-Key fuer VM-Zugriff..."
|
||||
|
||||
echo "=== ARIA Setup ==="
|
||||
|
||||
mkdir -p "$SSH_DIR"
|
||||
|
||||
if [ ! -f "$SSH_DIR/id_ed25519" ]; then
|
||||
echo "Generiere SSH-Key fuer aria-wohnung..."
|
||||
ssh-keygen -t ed25519 -f "$SSH_DIR/id_ed25519" -N "" -C "aria@aria-wohnung"
|
||||
cat > "$SSH_DIR/config" << 'SSHEOF'
|
||||
Host aria-wohnung
|
||||
@@ -108,34 +27,26 @@ Host aria-wohnung
|
||||
StrictHostKeyChecking accept-new
|
||||
SSHEOF
|
||||
chmod 600 "$SSH_DIR/id_ed25519"
|
||||
chmod 644 "$SSH_DIR/id_ed25519.pub"
|
||||
chmod 644 "$SSH_DIR/config"
|
||||
echo " Key generiert."
|
||||
# Public Key direkt in root's authorized_keys eintragen (Script laeuft als root auf der VM)
|
||||
mkdir -p /root/.ssh
|
||||
chmod 700 /root/.ssh
|
||||
cat "$SSH_DIR/id_ed25519.pub" >> /root/.ssh/authorized_keys
|
||||
chmod 600 /root/.ssh/authorized_keys
|
||||
echo " Public Key in /root/.ssh/authorized_keys eingetragen."
|
||||
chmod 644 "$SSH_DIR/id_ed25519.pub" "$SSH_DIR/config"
|
||||
|
||||
# Public Key direkt in /root/.ssh/authorized_keys eintragen
|
||||
# (Script laeuft als root auf der VM aria-wohnung)
|
||||
if [ -w /root/.ssh ] || [ -w /root ]; then
|
||||
mkdir -p /root/.ssh
|
||||
chmod 700 /root/.ssh
|
||||
cat "$SSH_DIR/id_ed25519.pub" >> /root/.ssh/authorized_keys
|
||||
chmod 600 /root/.ssh/authorized_keys
|
||||
echo " Public Key in /root/.ssh/authorized_keys eingetragen."
|
||||
else
|
||||
echo " Hinweis: konnte /root/.ssh/authorized_keys nicht schreiben."
|
||||
echo " Pubkey manuell eintragen:"
|
||||
cat "$SSH_DIR/id_ed25519.pub"
|
||||
fi
|
||||
else
|
||||
echo " Key existiert bereits."
|
||||
echo "SSH-Key existiert bereits — uebersprungen."
|
||||
fi
|
||||
|
||||
# Permissions im Container fixen
|
||||
echo ""
|
||||
echo "[6/7] Fixe SSH-Permissions..."
|
||||
docker exec -u root aria-core chown -R node:node /home/node/.ssh 2>/dev/null || true
|
||||
|
||||
# Neustart damit Gateway die Config laedt
|
||||
echo ""
|
||||
echo "[7/7] Starte aria-core neu..."
|
||||
docker restart aria-core
|
||||
|
||||
echo ""
|
||||
echo "=== Setup fertig ==="
|
||||
echo ""
|
||||
echo "Teste mit: docker logs aria-core --tail 20"
|
||||
echo "Erwartete Zeile: 'agent model: proxy/claude-sonnet-4'"
|
||||
echo ""
|
||||
echo "SSH-Test: docker exec aria-core ssh aria-wohnung hostname"
|
||||
echo "Tool-Test: Neue Session anlegen, dann 'Wie wird das Wetter in Bremen?' fragen"
|
||||
echo "Naechster Schritt: docker compose up -d"
|
||||
echo "Test: docker exec aria-brain ssh aria-wohnung hostname"
|
||||
|
||||
+529
-152
@@ -1,17 +1,13 @@
|
||||
"""
|
||||
ARIA Voice Bridge — Hauptmodul.
|
||||
|
||||
Verbindet die Android App (via RVS) mit ARIA-Core und bietet
|
||||
lokale Spracheingabe (Wake-Word + Whisper STT) und Sprachausgabe (Piper TTS).
|
||||
Verbindet die Android App (via RVS) mit ARIA-Core. Spracheingabe laeuft
|
||||
ueber die whisper-bridge (Gamebox, faster-whisper auf CUDA), Sprachausgabe
|
||||
ueber die f5tts-bridge (Voice Cloning, satzweises PCM-Streaming).
|
||||
|
||||
Nachrichtenfluss:
|
||||
App → RVS → Bridge → aria-core
|
||||
aria-core → Bridge → RVS → App
|
||||
→ Lautsprecher (TTS)
|
||||
|
||||
Stimmen:
|
||||
- Ramona (de_DE-ramona-low) — Alltag, Gespraeche
|
||||
- Thorsten (de_DE-thorsten-high) — epische Momente, Alarme
|
||||
aria-core → Bridge → f5tts-bridge → PCM → RVS → App
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
@@ -20,7 +16,9 @@ import asyncio
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
import mimetypes
|
||||
import os
|
||||
import re
|
||||
import signal
|
||||
import ssl
|
||||
import sys
|
||||
@@ -50,7 +48,6 @@ logger = logging.getLogger("aria-bridge")
|
||||
|
||||
# ── Konfiguration ───────────────────────────────────────────
|
||||
|
||||
CONFIG_PATH = Path("/config/aria.env")
|
||||
VOICES_DIR = Path("/voices")
|
||||
CORE_WS_URL = os.getenv("ARIA_CORE_WS", "ws://127.0.0.1:18789")
|
||||
CORE_AUTH_TOKEN = os.getenv("ARIA_AUTH_TOKEN", "") # OpenClaw Gateway Token
|
||||
@@ -70,38 +67,22 @@ BLOCK_SIZE = 1280 # 80ms bei 16kHz — gut fuer Wake-Word-Erkennung
|
||||
RECORD_SECONDS = 8 # Max. Aufnahmedauer nach Wake-Word
|
||||
|
||||
def load_config() -> dict[str, str]:
|
||||
"""Laedt Konfiguration.
|
||||
|
||||
Reihenfolge (hoechste Prioritaet zuletzt):
|
||||
1. /config/aria.env (bind-mount)
|
||||
2. /shared/config/runtime.json (zentral gepflegt ueber Diagnostic UI)
|
||||
|
||||
Werte aus runtime.json ueberschreiben die env-Datei.
|
||||
"""Laedt Konfiguration ausschliesslich aus /shared/config/runtime.json
|
||||
(zentral gepflegt ueber Diagnostic UI). Tokens + RVS-Settings kommen
|
||||
via ENV (siehe docker-compose).
|
||||
"""
|
||||
config: dict[str, str] = {}
|
||||
if CONFIG_PATH.exists():
|
||||
for line in CONFIG_PATH.read_text().splitlines():
|
||||
line = line.strip()
|
||||
if not line or line.startswith("#"):
|
||||
continue
|
||||
if "=" in line:
|
||||
key, _, value = line.partition("=")
|
||||
config[key.strip()] = value.strip()
|
||||
logger.info("Konfiguration geladen aus %s", CONFIG_PATH)
|
||||
else:
|
||||
logger.warning("Keine Konfiguration gefunden: %s", CONFIG_PATH)
|
||||
|
||||
# Runtime-Overrides aus zentralem Shared-Volume (Diagnostic UI)
|
||||
runtime_path = Path("/shared/config/runtime.json")
|
||||
if runtime_path.exists():
|
||||
try:
|
||||
runtime = json.loads(runtime_path.read_text())
|
||||
overrides = {k: str(v) for k, v in runtime.items() if v not in (None, "")}
|
||||
if overrides:
|
||||
config.update(overrides)
|
||||
logger.info("Runtime-Overrides geladen: %s", sorted(overrides.keys()))
|
||||
config = {k: str(v) for k, v in runtime.items() if v not in (None, "")}
|
||||
if config:
|
||||
logger.info("Runtime-Config geladen: %s", sorted(config.keys()))
|
||||
except Exception as e:
|
||||
logger.warning("runtime.json konnte nicht gelesen werden: %s", e)
|
||||
else:
|
||||
logger.info("Keine runtime.json — Diagnostic schreibt sie beim ersten Konfigurieren")
|
||||
return config
|
||||
|
||||
|
||||
@@ -493,7 +474,7 @@ class ARIABridge:
|
||||
self.current_mode = self._load_persisted_mode()
|
||||
self.running = False
|
||||
|
||||
# Komponenten (TTS: immer XTTS remote, Piper wurde entfernt)
|
||||
# Komponenten (TTS: F5-TTS remote auf der Gamebox, lokales TTS wurde entfernt)
|
||||
self.tts_enabled = True
|
||||
self.xtts_voice = ""
|
||||
self._f5tts_config: dict = {}
|
||||
@@ -551,6 +532,22 @@ class ARIABridge:
|
||||
# Beeinflusst das Timeout fuer stt_request — bei "loading" warten wir laenger,
|
||||
# weil das Modell beim ersten Request noch ~1-2 Min runtergeladen werden kann.
|
||||
self._remote_stt_ready: bool = False
|
||||
# User-Message-Counter fuer Auto-Compact. Bei zu langer Konversation
|
||||
# sprengt die argv-Liste beim Claude-Subprocess-Spawn (E2BIG). Bei
|
||||
# COMPACT_AFTER erreicht → Sessions reset + Container restart.
|
||||
# Counter ueberlebt Bridge-Restart nicht (frischer Zaehler beim Start ok).
|
||||
# _user_message_count + _compact_after entfallen — Auto-Compact war
|
||||
# aria-core-spezifisch (E2BIG-Schutz). Der neue Brain-Loop kennt
|
||||
# diese Begrenzung nicht.
|
||||
# Pending Files: wenn die App ein Bild + Text gleichzeitig schickt, kommen
|
||||
# zwei separate RVS-Events ('file' und 'chat') — wir buffern die Files
|
||||
# kurz und mergen sie mit dem nachfolgenden Chat-Text zu einer einzigen
|
||||
# Anfrage an aria-core. Sonst antwortet ARIA zweimal (einmal "warte auf
|
||||
# Anweisung" beim file, einmal auf den Chat-Text).
|
||||
# Liste von Tuples: (file_path, name, file_type, size_kb, width, height)
|
||||
self._pending_files: list[tuple[str, str, str, int, int, int]] = []
|
||||
self._pending_files_flush_task: Optional[asyncio.Task] = None
|
||||
self._PENDING_FILES_WINDOW_SEC: float = 0.8
|
||||
|
||||
def initialize(self) -> None:
|
||||
"""Initialisiert alle Komponenten.
|
||||
@@ -585,7 +582,7 @@ class ARIABridge:
|
||||
logger.info("RVS: %s (Token: %s...)", self.rvs_url, self.rvs_token[:8])
|
||||
else:
|
||||
logger.warning("RVS nicht konfiguriert — App-Verbindung deaktiviert")
|
||||
logger.warning(" Setze RVS_HOST, RVS_PORT, RVS_TOKEN in /config/aria.env")
|
||||
logger.warning(" Setze RVS_HOST, RVS_PORT, RVS_TOKEN in der .env auf der VM")
|
||||
logger.info("Modus: %s %s", self.current_mode.config.emoji, self.current_mode.config.name)
|
||||
|
||||
# ── aria-core Verbindung (OpenClaw Gateway Protokoll) ───
|
||||
@@ -672,7 +669,10 @@ class ARIABridge:
|
||||
while self.running:
|
||||
try:
|
||||
logger.info("[core] Verbinde: %s", self.ws_url)
|
||||
async with websockets.connect(self.ws_url) as ws:
|
||||
# max_size=50MB damit grosse Bilder/Voice-Uploads durchgehen.
|
||||
# Python-websockets Default ist nur 1 MiB → 5MB JPEG sprengt
|
||||
# das Limit, Connection wird silent gedroppt.
|
||||
async with websockets.connect(self.ws_url, max_size=50 * 1024 * 1024) as ws:
|
||||
# OpenClaw Handshake durchfuehren
|
||||
if not await self._openclaw_handshake(ws):
|
||||
logger.error("[core] Handshake fehlgeschlagen — Reconnect")
|
||||
@@ -778,13 +778,29 @@ class ARIABridge:
|
||||
await self._emit_activity("idle", "")
|
||||
if not text:
|
||||
logger.warning("[core] chat final ohne Text: %s", json.dumps(payload)[:200])
|
||||
# App+Diagnostic informieren statt stumm — sonst wartet die
|
||||
# UI ewig auf eine Antwort die nicht kommt. Passiert z.B.
|
||||
# wenn Claude-Vision das Bild ablehnt (leere Antwort)
|
||||
# oder die Antwort nur aus Tool-Calls ohne Final-Text bestand.
|
||||
await self._send_to_rvs({
|
||||
"type": "chat",
|
||||
"payload": {
|
||||
"text": "[Hinweis] Antwort ohne Text — moeglicherweise Bild zu gross fuer Vision-API oder reine Tool-Ausfuehrung.",
|
||||
"sender": "aria",
|
||||
},
|
||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||
})
|
||||
return
|
||||
logger.info("[core] Antwort: '%s'", text[:80])
|
||||
await self._process_core_response(text, payload)
|
||||
return
|
||||
|
||||
if state == "error":
|
||||
error = payload.get("error", "Unbekannt")
|
||||
# OpenClaw nutzt errorMessage statt error bei state=error.
|
||||
error = (payload.get("error")
|
||||
or payload.get("errorMessage")
|
||||
or payload.get("message")
|
||||
or "Unbekannt")
|
||||
logger.error("[core] Chat-Fehler: %s", error)
|
||||
self._last_chat_final_at = asyncio.get_event_loop().time()
|
||||
await self._emit_activity("idle", "")
|
||||
@@ -820,7 +836,12 @@ class ARIABridge:
|
||||
return
|
||||
|
||||
if event_name == "chat:error":
|
||||
error = payload.get("error", payload.get("message", "Unbekannt"))
|
||||
# OpenClaw legt den echten Text manchmal in errorMessage ab
|
||||
# (state=error). Vorher wurde nur error/message gechecked → "Unbekannt".
|
||||
error = (payload.get("error")
|
||||
or payload.get("errorMessage")
|
||||
or payload.get("message")
|
||||
or "Unbekannt")
|
||||
logger.error("[core] Chat-Fehler (legacy): %s", error)
|
||||
await self._send_to_rvs({
|
||||
"type": "chat",
|
||||
@@ -853,6 +874,51 @@ class ARIABridge:
|
||||
pass
|
||||
return payload.get("text", "")
|
||||
|
||||
# File-Marker-Pattern: `[FILE: /pfad/zur/datei.ext]` (Pfad kann Spaces
|
||||
# enthalten, Endung beliebig). Mehrfach im Text moeglich.
|
||||
_FILE_MARKER_RE = re.compile(r"\[FILE:\s*(/shared/uploads/[^\]]+?)\s*\]", re.IGNORECASE)
|
||||
|
||||
def _extract_file_markers(self, text: str) -> tuple[str, list[dict], list[str]]:
|
||||
"""Sucht [FILE: /shared/uploads/...]-Marker.
|
||||
Returns (cleaned_text, valid_files, missing_paths)."""
|
||||
files: list[dict] = []
|
||||
missing: list[str] = []
|
||||
for m in self._FILE_MARKER_RE.finditer(text):
|
||||
path = m.group(1).strip()
|
||||
if not path.startswith("/shared/uploads/"):
|
||||
logger.warning("[core] FILE-Marker mit unerlaubtem Pfad ignoriert: %s", path)
|
||||
continue
|
||||
if not os.path.isfile(path):
|
||||
logger.warning("[core] FILE-Marker zeigt auf nicht existente Datei: %s", path)
|
||||
missing.append(path)
|
||||
continue
|
||||
name = os.path.basename(path)
|
||||
mime, _ = mimetypes.guess_type(path)
|
||||
size = os.path.getsize(path)
|
||||
files.append({
|
||||
"serverPath": path,
|
||||
"name": name,
|
||||
"mimeType": mime or "application/octet-stream",
|
||||
"size": size,
|
||||
})
|
||||
cleaned = self._FILE_MARKER_RE.sub("", text).strip()
|
||||
# Zwei aufeinanderfolgende Leerzeilen → eine
|
||||
cleaned = re.sub(r"\n{3,}", "\n\n", cleaned)
|
||||
return cleaned, files, missing
|
||||
|
||||
async def _broadcast_aria_file(self, file_info: dict) -> None:
|
||||
"""ARIA hat eine Datei fuer den User erstellt — App+Diagnostic informieren."""
|
||||
logger.info("[rvs] ARIA-Datei rausgeben: %s (%s, %dKB)",
|
||||
file_info["name"], file_info["mimeType"], file_info["size"] // 1024)
|
||||
try:
|
||||
await self._send_to_rvs({
|
||||
"type": "file_from_aria",
|
||||
"payload": file_info,
|
||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||
})
|
||||
except Exception as e:
|
||||
logger.warning("[rvs] file_from_aria broadcast fehlgeschlagen: %s", e)
|
||||
|
||||
async def _process_core_response(self, text: str, payload: dict) -> None:
|
||||
"""Verarbeitet eine fertige Antwort von aria-core.
|
||||
|
||||
@@ -867,6 +933,22 @@ class ARIABridge:
|
||||
logger.info("[core] NO_REPLY empfangen — Antwort still verworfen")
|
||||
return
|
||||
|
||||
# File-Marker `[FILE: /shared/uploads/aria_xyz.pdf]` extrahieren —
|
||||
# ARIA legt damit Dateien fuer den User bereit (Bilder, PDFs, etc.).
|
||||
# Der Marker wird aus dem Antworttext entfernt (TTS soll ihn nicht
|
||||
# vorlesen) und parallel als file_from_aria-Event geschickt.
|
||||
text, aria_files, missing_files = self._extract_file_markers(text)
|
||||
for f in aria_files:
|
||||
await self._broadcast_aria_file(f)
|
||||
# Bei fehlenden Files: User informieren (sonst sieht er nur stille
|
||||
# Verluste — ARIA hat den Marker hingeschrieben aber das File nicht
|
||||
# tatsaechlich angelegt).
|
||||
if missing_files:
|
||||
missing_list = "\n".join(f" • {os.path.basename(p)}" for p in missing_files)
|
||||
text = (text + "\n\n[Hinweis] Folgende Dateien hat ARIA zwar erwaehnt "
|
||||
f"aber nicht erstellt:\n{missing_list}\n"
|
||||
"Bitte ARIA bitten, sie wirklich zu schreiben.").strip()
|
||||
|
||||
metadata = payload.get("metadata", {})
|
||||
is_critical = metadata.get("critical", False)
|
||||
requested_voice = metadata.get("voice")
|
||||
@@ -907,18 +989,13 @@ class ARIABridge:
|
||||
logger.info("[core] TTS unterdrueckt (Modus: %s)", self.current_mode.config.name)
|
||||
return
|
||||
|
||||
# Voice bestimmen: App-Override fuer diesen Request > globale Default-Voice
|
||||
# Voice bestimmen: App-Override (gesetzt durch letzten chat-Event) > globale
|
||||
# Default-Voice. Der Override wird NICHT pro Antwort verbraucht — sonst nutzt
|
||||
# eine Multi-Turn-Antwort von ARIA (Tool-Use + finale Antwort) ab dem zweiten
|
||||
# TTS-Call wieder die alte Default-Stimme. Der Override bleibt gueltig bis
|
||||
# zum naechsten chat-Event, wo er entweder ueberschrieben oder geloescht wird.
|
||||
xtts_voice = self._next_voice_override or getattr(self, 'xtts_voice', '')
|
||||
# Override verbrauchen (gilt nur fuer genau diese naechste Antwort)
|
||||
if self._next_voice_override:
|
||||
logger.info("[core] Nutze Voice-Override: %s", self._next_voice_override)
|
||||
self._next_voice_override = None
|
||||
|
||||
# Speed ebenfalls aus App-Override nehmen (fallback 1.0)
|
||||
xtts_speed = self._next_speed_override or 1.0
|
||||
if self._next_speed_override:
|
||||
logger.info("[core] Nutze Speed-Override: %.2fx", self._next_speed_override)
|
||||
self._next_speed_override = None
|
||||
|
||||
tts_text = tts_text_preview or text
|
||||
if not tts_text:
|
||||
@@ -1024,32 +1101,178 @@ class ARIABridge:
|
||||
except Exception as e:
|
||||
logger.debug("[session] Diagnostic nicht erreichbar (%s) — nutze '%s'", e, self._session_key)
|
||||
|
||||
async def send_to_core(self, text: str, source: str = "bridge") -> None:
|
||||
"""Sendet Text an aria-core (OpenClaw chat.send Protokoll)."""
|
||||
if self.ws_core is None:
|
||||
logger.error("[core] Nicht verbunden — Nachricht verworfen: '%s'", text[:60])
|
||||
def _build_core_text(self, text: str, interrupted: bool = False,
|
||||
location: Optional[dict] = None) -> str:
|
||||
"""Baut den Text fuer aria-core mit allen relevanten Hints (Barge-In,
|
||||
GPS-Position). Hints sind in eckigen Klammern, der eigentliche User-
|
||||
Text folgt unverandert."""
|
||||
parts: list[str] = []
|
||||
if interrupted:
|
||||
parts.append(
|
||||
"[Hinweis: Stefan hat dich gerade unterbrochen waehrend du noch "
|
||||
"gesprochen oder gearbeitet hast. Folgendes ist eine Korrektur, "
|
||||
"Ergaenzung oder ein Themenwechsel zu deiner letzten Antwort.]"
|
||||
)
|
||||
if location and isinstance(location, dict):
|
||||
lat = location.get("lat")
|
||||
lon = location.get("lon") or location.get("lng")
|
||||
if lat is not None and lon is not None:
|
||||
parts.append(
|
||||
f"[Stefans aktuelle GPS-Position: {float(lat):.6f}, {float(lon):.6f}. "
|
||||
f"Nutze die nur wenn die Frage sich auf seinen Standort bezieht. "
|
||||
f"Erwaehne sie nicht von dir aus, ausser er fragt explizit danach.]"
|
||||
)
|
||||
if parts:
|
||||
return " ".join(parts) + " " + text
|
||||
return text
|
||||
|
||||
def _build_pending_files_message(self, user_text: str) -> str:
|
||||
"""Baut eine Anweisung an aria-core aus den gepufferten Files + optionalem
|
||||
User-Text. user_text leer → 'warte auf Anweisung'-Variante."""
|
||||
parts: list[str] = []
|
||||
for fp, name, ftype, kb, w, h in self._pending_files:
|
||||
dim = f" {w}x{h}px" if (w and h) else ""
|
||||
kind = "Bild" if ftype.startswith("image/") else "Datei"
|
||||
parts.append(f"- {kind}: {name}{dim} ({ftype}, {kb}KB) liegt unter {fp}")
|
||||
files_summary = "\n".join(parts)
|
||||
n = len(self._pending_files)
|
||||
anhang = "Anhang" if n == 1 else "Anhaenge"
|
||||
if user_text:
|
||||
return (f"Stefan hat dir {n} {anhang} geschickt:\n{files_summary}\n\n"
|
||||
f"Er sagt dazu: \"{user_text}\"")
|
||||
return (f"Stefan hat dir {n} {anhang} geschickt:\n{files_summary}\n\n"
|
||||
f"Warte auf seine Anweisung was du damit tun sollst.")
|
||||
|
||||
async def _flush_pending_files_after(self, delay: float) -> None:
|
||||
"""Wenn nach `delay`s kein chat-Text gekommen ist: Files alleine an
|
||||
aria-core senden ('warte auf Anweisung'-Variante)."""
|
||||
try:
|
||||
await asyncio.sleep(delay)
|
||||
except asyncio.CancelledError:
|
||||
return
|
||||
if not self._pending_files:
|
||||
return
|
||||
text = self._build_pending_files_message("")
|
||||
self._pending_files = []
|
||||
self._pending_files_flush_task = None
|
||||
await self.send_to_core(text, source="app-file")
|
||||
|
||||
# Aktive Session vom Diagnostic holen
|
||||
self._fetch_active_session()
|
||||
async def _flush_pending_files_with_text(self, user_text: str) -> bool:
|
||||
"""Wenn ein chat-Text reinkommt waehrend Files gepuffert sind:
|
||||
Files + Text zu einer einzigen aria-core-Nachricht mergen.
|
||||
Returns True wenn gemerged wurde (Caller soll dann nicht nochmal senden)."""
|
||||
if not self._pending_files:
|
||||
return False
|
||||
if self._pending_files_flush_task and not self._pending_files_flush_task.done():
|
||||
self._pending_files_flush_task.cancel()
|
||||
self._pending_files_flush_task = None
|
||||
text = self._build_pending_files_message(user_text)
|
||||
self._pending_files = []
|
||||
await self.send_to_core(text, source="app-file+chat")
|
||||
return True
|
||||
|
||||
req_id = self._next_req_id()
|
||||
message = json.dumps({
|
||||
"type": "req",
|
||||
"id": req_id,
|
||||
"method": "chat.send",
|
||||
"params": {
|
||||
"sessionKey": self._session_key,
|
||||
"message": text,
|
||||
"idempotencyKey": str(uuid.uuid4()),
|
||||
},
|
||||
async def send_to_core(self, text: str, source: str = "bridge") -> None:
|
||||
"""Sendet Text an aria-brain (HTTP /chat) und broadcastet die Antwort.
|
||||
|
||||
Nicht-Streaming: wir warten bis Brain fertig ist, dann pushen wir
|
||||
die komplette Reply via RVS an alle Clients (App + Diagnostic).
|
||||
TTS wird vom Bridge-Code separat angestossen (gleiche Logik wie
|
||||
vorher mit aria-core).
|
||||
"""
|
||||
brain_url = os.environ.get("BRAIN_URL", "http://aria-brain:8080")
|
||||
url = f"{brain_url}/chat"
|
||||
payload = json.dumps({"message": text, "source": source}).encode("utf-8")
|
||||
logger.info("[brain] chat ← %s '%s'", source, text[:80])
|
||||
|
||||
# agent_activity broadcasten (App + Diagnostic "ARIA denkt..." Indicator)
|
||||
await self._send_to_rvs({
|
||||
"type": "agent_activity",
|
||||
"payload": {"activity": "thinking"},
|
||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||
})
|
||||
|
||||
def _do_call():
|
||||
try:
|
||||
req = urllib.request.Request(
|
||||
url, data=payload, method="POST",
|
||||
headers={"Content-Type": "application/json"},
|
||||
)
|
||||
# Cold-Start kann lange dauern, 5min Timeout
|
||||
with urllib.request.urlopen(req, timeout=300) as resp:
|
||||
return resp.status, resp.read().decode("utf-8", errors="ignore")
|
||||
except Exception as exc:
|
||||
return None, str(exc)
|
||||
|
||||
status, body = await asyncio.get_event_loop().run_in_executor(None, _do_call)
|
||||
if status != 200:
|
||||
logger.error("[brain] /chat fehlgeschlagen: status=%s body=%s", status, body[:200])
|
||||
await self._send_to_rvs({
|
||||
"type": "agent_activity",
|
||||
"payload": {"activity": "idle"},
|
||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||
})
|
||||
await self._send_to_rvs({
|
||||
"type": "chat",
|
||||
"payload": {
|
||||
"text": f"[Brain-Fehler] {body[:200] or 'unbekannt'}",
|
||||
"sender": "aria",
|
||||
},
|
||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||
})
|
||||
return
|
||||
|
||||
try:
|
||||
await self.ws_core.send(message)
|
||||
logger.info("[core] chat.send (%s, id=%s): '%s'", source, req_id, text[:80])
|
||||
data = json.loads(body)
|
||||
except Exception:
|
||||
logger.exception("[core] Sendefehler")
|
||||
logger.error("[brain] /chat lieferte ungueltiges JSON: %s", body[:200])
|
||||
await self._send_to_rvs({
|
||||
"type": "agent_activity",
|
||||
"payload": {"activity": "idle"},
|
||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||
})
|
||||
return
|
||||
|
||||
reply = (data.get("reply") or "").strip()
|
||||
if not reply:
|
||||
logger.warning("[brain] /chat: leerer Reply")
|
||||
await self._send_to_rvs({
|
||||
"type": "agent_activity",
|
||||
"payload": {"activity": "idle"},
|
||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||
})
|
||||
return
|
||||
|
||||
# Side-Channel-Events VOR der Chat-Bubble broadcasten (z.B. skill_created)
|
||||
# damit sie in der UI vor der Reply auftauchen
|
||||
for event in data.get("events", []) or []:
|
||||
etype = event.get("type")
|
||||
if etype == "skill_created":
|
||||
await self._send_to_rvs({
|
||||
"type": "skill_created",
|
||||
"payload": event.get("skill", {}),
|
||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||
})
|
||||
logger.info("[brain] ARIA hat einen Skill erstellt: %s",
|
||||
event.get("skill", {}).get("name"))
|
||||
|
||||
# _process_core_response uebernimmt alles weitere:
|
||||
# File-Marker extrahieren + broadcasten, NO_REPLY-Check, Chat-
|
||||
# Broadcast an RVS, TTS, agent_activity idle. Wir geben das
|
||||
# raw payload mit dem reply rein damit Mode/voice-Metadata
|
||||
# passend behandelt wird (hier minimal, weil Brain noch keine
|
||||
# metadata mitschickt).
|
||||
try:
|
||||
await self._process_core_response(reply, {})
|
||||
except Exception:
|
||||
logger.exception("[brain] _process_core_response Fehler")
|
||||
await self._send_to_rvs({
|
||||
"type": "agent_activity",
|
||||
"payload": {"activity": "idle"},
|
||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||
})
|
||||
|
||||
if data.get("distilling"):
|
||||
logger.info("[brain] Destillat laeuft im Hintergrund")
|
||||
|
||||
# ── RVS Verbindung (App-Relay) ──────────────────────────
|
||||
|
||||
@@ -1071,7 +1294,8 @@ class ARIABridge:
|
||||
try:
|
||||
url = f"{current_url}?token={self.rvs_token}"
|
||||
logger.info("[rvs] Verbinde: %s", current_url)
|
||||
async with websockets.connect(url) as ws:
|
||||
# max_size=50MB (siehe core-Connect oben — gleicher Grund).
|
||||
async with websockets.connect(url, max_size=50 * 1024 * 1024) as ws:
|
||||
self.ws_rvs = ws
|
||||
retry_delay = 2
|
||||
logger.info("[rvs] Verbunden — warte auf App-Nachrichten")
|
||||
@@ -1169,21 +1393,38 @@ class ARIABridge:
|
||||
if sender in ("aria", "stt"):
|
||||
return
|
||||
text = payload.get("text", "")
|
||||
# Voice-Override fuer die naechste ARIA-Antwort merken
|
||||
voice_override = payload.get("voice", "")
|
||||
if voice_override:
|
||||
self._next_voice_override = voice_override
|
||||
logger.info("[rvs] Voice-Override fuer naechste Antwort: %s", voice_override)
|
||||
# Voice-Override fuer Folgenachrichten setzen — gilt bis zum naechsten
|
||||
# chat-Event. Leerer String "" = explizit Default-Voice (override loeschen).
|
||||
# Field nicht gesendet = vorherigen Override unveraendert lassen (z.B. wenn
|
||||
# cancel_request oder anderer Service die App umgeht).
|
||||
if "voice" in payload:
|
||||
voice_override = payload.get("voice", "") or ""
|
||||
self._next_voice_override = voice_override or None
|
||||
logger.info("[rvs] Voice fuer Antworten: %s",
|
||||
self._next_voice_override or "(Default)")
|
||||
# Speed-Override (TTS-Wiedergabegeschwindigkeit, pro Geraet)
|
||||
try:
|
||||
speed = float(payload.get("speed", 0) or 0)
|
||||
if 0.1 <= speed <= 5.0:
|
||||
self._next_speed_override = speed
|
||||
except (TypeError, ValueError):
|
||||
pass
|
||||
if "speed" in payload:
|
||||
try:
|
||||
speed = float(payload.get("speed", 0) or 0)
|
||||
self._next_speed_override = speed if 0.1 <= speed <= 5.0 else None
|
||||
except (TypeError, ValueError):
|
||||
self._next_speed_override = None
|
||||
if text:
|
||||
logger.info("[rvs] App-Chat: '%s'", text[:80])
|
||||
await self.send_to_core(text, source="app")
|
||||
interrupted = bool(payload.get("interrupted", False))
|
||||
location = payload.get("location") or None
|
||||
# Wenn Files gerade gepuffert sind (Bild + Text gleichzeitig
|
||||
# gesendet), mergen wir sie zu einer einzigen Anfrage statt
|
||||
# zwei separater send_to_core-Calls.
|
||||
merged = await self._flush_pending_files_with_text(text)
|
||||
if merged:
|
||||
logger.info("[rvs] App-Chat (mit Anhaengen): '%s'", text[:80])
|
||||
else:
|
||||
core_text = self._build_core_text(text, interrupted, location)
|
||||
logger.info("[rvs] App-Chat%s%s: '%s'",
|
||||
" [BARGE-IN]" if interrupted else "",
|
||||
" [GPS]" if location else "",
|
||||
text[:80])
|
||||
await self.send_to_core(core_text, source="app" + (" [barge-in]" if interrupted else ""))
|
||||
return
|
||||
|
||||
if msg_type == "cancel_request":
|
||||
@@ -1342,70 +1583,165 @@ class ARIABridge:
|
||||
await self.ws_core.send(raw_message)
|
||||
|
||||
elif msg_type == "file":
|
||||
# Datei von der App → als Text-Nachricht an aria-core
|
||||
# Datei von der App: speichern + zu Pending-Queue hinzufuegen.
|
||||
# Wird mit dem nachfolgenden chat-Event (innerhalb PENDING_FILES_WINDOW)
|
||||
# zu einer einzigen aria-core-Anfrage gemerged. Sonst antwortet ARIA
|
||||
# zweimal: einmal "warte auf Anweisung" beim file, einmal auf den Chat.
|
||||
file_name = payload.get("name", "unbekannt")
|
||||
file_type = payload.get("type", "")
|
||||
file_b64 = payload.get("base64", "")
|
||||
file_size = payload.get("size", 0)
|
||||
width = payload.get("width", 0)
|
||||
height = payload.get("height", 0)
|
||||
logger.info("[rvs] Datei empfangen: %s (%s, %dKB)",
|
||||
file_name, file_type, len(file_b64) // 1365 if file_b64 else 0)
|
||||
|
||||
# Shared Volume: /shared/ ist in Bridge UND aria-core gemountet
|
||||
SHARED_DIR = "/shared/uploads"
|
||||
os.makedirs(SHARED_DIR, exist_ok=True)
|
||||
|
||||
if file_b64 and file_type.startswith("image/"):
|
||||
# Bild in Shared Volume speichern
|
||||
if not file_b64:
|
||||
text = f"Stefan hat eine Datei gesendet ({file_name}, {file_type}) aber die Daten sind leer angekommen."
|
||||
await self.send_to_core(text, source="app-file")
|
||||
return
|
||||
|
||||
if file_type.startswith("image/"):
|
||||
ext = ".jpg" if "jpeg" in file_type or "jpg" in file_type else ".png"
|
||||
safe_name = f"img_{int(asyncio.get_event_loop().time())}_{file_name.replace('/', '_')}"
|
||||
file_path = os.path.join(SHARED_DIR, safe_name if safe_name.endswith(ext) else safe_name + ext)
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(base64.b64decode(file_b64))
|
||||
size_kb = len(file_b64) // 1365
|
||||
logger.info("[rvs] Bild gespeichert: %s (%dKB)", file_path, size_kb)
|
||||
# ERST an aria-core senden (wichtigster Schritt)
|
||||
text = (f"Stefan hat dir ein Bild geschickt: {file_name}"
|
||||
f"{f' ({width}x{height}px)' if width else ''}"
|
||||
f", {size_kb}KB."
|
||||
f" Das Bild liegt unter: {file_path}"
|
||||
f" Warte auf Stefans Anweisung was du damit tun sollst.")
|
||||
await self.send_to_core(text, source="app-file")
|
||||
# Dann App informieren (optional, darf nicht crashen)
|
||||
try:
|
||||
await self._send_to_rvs({
|
||||
"type": "file_saved",
|
||||
"payload": {"name": file_name, "serverPath": file_path, "mimeType": file_type},
|
||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||
})
|
||||
except Exception as e:
|
||||
logger.warning("[rvs] file_saved konnte nicht an App gesendet werden: %s", e)
|
||||
elif file_b64:
|
||||
# Andere Datei in Shared Volume speichern
|
||||
else:
|
||||
safe_name = f"file_{int(asyncio.get_event_loop().time())}_{file_name.replace('/', '_')}"
|
||||
file_path = os.path.join(SHARED_DIR, safe_name)
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(base64.b64decode(file_b64))
|
||||
size_kb = len(file_b64) // 1365
|
||||
logger.info("[rvs] Datei gespeichert: %s (%dKB)", file_path, size_kb)
|
||||
# ERST an aria-core senden
|
||||
text = (f"Stefan hat dir eine Datei geschickt: {file_name}"
|
||||
f" ({file_type}, {size_kb}KB)."
|
||||
f" Die Datei liegt unter: {file_path}"
|
||||
f" Warte auf Stefans Anweisung was du damit tun sollst.")
|
||||
await self.send_to_core(text, source="app-file")
|
||||
try:
|
||||
await self._send_to_rvs({
|
||||
"type": "file_saved",
|
||||
"payload": {"name": file_name, "serverPath": file_path, "mimeType": file_type},
|
||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||
})
|
||||
except Exception as e:
|
||||
logger.warning("[rvs] file_saved konnte nicht an App gesendet werden: %s", e)
|
||||
else:
|
||||
text = f"Stefan hat eine Datei gesendet ({file_name}, {file_type}) aber die Daten sind leer angekommen."
|
||||
await self.send_to_core(text, source="app-file")
|
||||
with open(file_path, "wb") as f:
|
||||
f.write(base64.b64decode(file_b64))
|
||||
size_kb = len(file_b64) // 1365
|
||||
logger.info("[rvs] Datei gespeichert: %s (%dKB)", file_path, size_kb)
|
||||
|
||||
# Pixel-Bilder fuer Claude-Vision shrinken wenn > 2 MB. SVG/PDF/ZIP
|
||||
# bleiben unangetastet (Vision laeuft eh nur auf Raster-Formaten).
|
||||
CLAUDE_VISION_FORMATS = ("image/jpeg", "image/jpg", "image/png", "image/webp", "image/gif")
|
||||
if file_type.lower() in CLAUDE_VISION_FORMATS:
|
||||
file_size_bytes = os.path.getsize(file_path)
|
||||
if file_size_bytes > 2 * 1024 * 1024:
|
||||
try:
|
||||
from PIL import Image
|
||||
with Image.open(file_path) as img:
|
||||
orig_w, orig_h = img.size
|
||||
# Anthropic-Empfehlung: max 1568px lange Seite. RGB-Konvertierung
|
||||
# falls RGBA/Palette (JPEG braucht RGB).
|
||||
img.thumbnail((1568, 1568), Image.Resampling.LANCZOS)
|
||||
if img.mode in ("RGBA", "P"):
|
||||
img = img.convert("RGB")
|
||||
img.save(file_path, "JPEG", quality=85, optimize=True)
|
||||
new_size_bytes = os.path.getsize(file_path)
|
||||
logger.info("[rvs] Bild verkleinert: %dx%d → %dx%d, %.1fMB → %.1fMB",
|
||||
orig_w, orig_h, img.size[0], img.size[1],
|
||||
file_size_bytes / 1024 / 1024,
|
||||
new_size_bytes / 1024 / 1024)
|
||||
except Exception as e:
|
||||
logger.warning("[rvs] Bild-Resize fehlgeschlagen (%s) — Original wird genutzt: %s",
|
||||
file_name, e)
|
||||
|
||||
# In Pending-Queue + Flush-Timer (anti-spam Buffering)
|
||||
self._pending_files.append((file_path, file_name, file_type, size_kb, int(width or 0), int(height or 0)))
|
||||
if self._pending_files_flush_task and not self._pending_files_flush_task.done():
|
||||
self._pending_files_flush_task.cancel()
|
||||
self._pending_files_flush_task = asyncio.create_task(
|
||||
self._flush_pending_files_after(self._PENDING_FILES_WINDOW_SEC)
|
||||
)
|
||||
|
||||
try:
|
||||
await self._send_to_rvs({
|
||||
"type": "file_saved",
|
||||
"payload": {"name": file_name, "serverPath": file_path, "mimeType": file_type},
|
||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||
})
|
||||
except Exception as e:
|
||||
logger.warning("[rvs] file_saved konnte nicht an App gesendet werden: %s", e)
|
||||
|
||||
elif msg_type == "file_list_request":
|
||||
# App fragt die Liste aller /shared/uploads/-Dateien an.
|
||||
logger.info("[rvs] file_list_request von App")
|
||||
try:
|
||||
req = urllib.request.Request(
|
||||
"http://localhost:3001/api/files-list",
|
||||
method="GET",
|
||||
)
|
||||
def _do_list():
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=10) as resp:
|
||||
return json.loads(resp.read().decode("utf-8", errors="ignore"))
|
||||
except Exception as e:
|
||||
return {"ok": False, "error": str(e)}
|
||||
d = await asyncio.get_event_loop().run_in_executor(None, _do_list)
|
||||
await self._send_to_rvs({
|
||||
"type": "file_list_response",
|
||||
"payload": d,
|
||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||
})
|
||||
except Exception as e:
|
||||
logger.warning("[rvs] file_list_request: %s", e)
|
||||
return
|
||||
|
||||
elif msg_type == "file_delete_request":
|
||||
# App will eine Datei loeschen — leite an Diagnostic.
|
||||
p = payload.get("path", "")
|
||||
logger.warning("[rvs] file_delete_request von App: %s", p)
|
||||
try:
|
||||
body_bytes = json.dumps({"path": p}).encode("utf-8")
|
||||
req = urllib.request.Request(
|
||||
"http://localhost:3001/api/files-delete",
|
||||
data=body_bytes,
|
||||
method="POST",
|
||||
headers={"Content-Type": "application/json"},
|
||||
)
|
||||
def _do_delete():
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=10) as resp:
|
||||
return resp.status, resp.read().decode("utf-8", errors="ignore")
|
||||
except Exception as e:
|
||||
return None, str(e)
|
||||
status, body = await asyncio.get_event_loop().run_in_executor(None, _do_delete)
|
||||
logger.info("[rvs] file_delete_request %s: status=%s", p, status)
|
||||
# Diagnostic broadcastet file_deleted via sendToRVS_raw — kommt
|
||||
# ueber den persistenten WS-Path zur App. Wir bestaetigen
|
||||
# zusaetzlich, damit der Caller sicher ist dass es durch ist.
|
||||
except Exception as e:
|
||||
logger.warning("[rvs] file_delete_request: %s", e)
|
||||
return
|
||||
|
||||
elif msg_type == "container_restart":
|
||||
# App-Button "Container neu" — leitet generisch an Diagnostic
|
||||
# weiter. Whitelist ist im Diagnostic-Server.
|
||||
name = payload.get("name", "")
|
||||
logger.warning("[rvs] container_restart Request von App: %s", name)
|
||||
try:
|
||||
body_bytes = json.dumps({"name": name}).encode("utf-8")
|
||||
req = urllib.request.Request(
|
||||
"http://localhost:3001/api/container-restart",
|
||||
data=body_bytes,
|
||||
method="POST",
|
||||
headers={"Content-Type": "application/json"},
|
||||
)
|
||||
def _do_restart():
|
||||
try:
|
||||
with urllib.request.urlopen(req, timeout=45) as resp:
|
||||
return resp.status, resp.read().decode("utf-8", errors="ignore")
|
||||
except Exception as e:
|
||||
return None, str(e)
|
||||
status, body = await asyncio.get_event_loop().run_in_executor(None, _do_restart)
|
||||
logger.info("[rvs] container_restart %s Result: status=%s", name, status)
|
||||
ok = status == 200
|
||||
await self._send_to_rvs({
|
||||
"type": "chat",
|
||||
"payload": {
|
||||
"text": f"[Container] {name} neu gestartet." if ok
|
||||
else f"[Container] Restart {name} fehlgeschlagen: {body[:200]}",
|
||||
"sender": "aria",
|
||||
},
|
||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||
})
|
||||
except Exception as e:
|
||||
logger.warning("[rvs] container_restart Weiterleitung fehlgeschlagen: %s", e)
|
||||
return
|
||||
|
||||
elif msg_type == "file_request":
|
||||
# App fordert eine Datei an (Re-Download nach Cache-Leerung)
|
||||
@@ -1424,6 +1760,7 @@ class ARIABridge:
|
||||
return
|
||||
with open(server_path, "rb") as f:
|
||||
file_b64 = base64.b64encode(f.read()).decode("ascii")
|
||||
mime, _ = mimetypes.guess_type(server_path)
|
||||
logger.info("[rvs] Re-Download: %s (%dKB)", server_path, len(file_b64) // 1365)
|
||||
await self._send_to_rvs({
|
||||
"type": "file_response",
|
||||
@@ -1432,6 +1769,7 @@ class ARIABridge:
|
||||
"serverPath": server_path,
|
||||
"base64": file_b64,
|
||||
"name": os.path.basename(server_path),
|
||||
"mimeType": mime or "application/octet-stream",
|
||||
},
|
||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||
})
|
||||
@@ -1444,20 +1782,28 @@ class ARIABridge:
|
||||
if not audio_b64:
|
||||
logger.warning("[rvs] Audio ohne Daten empfangen")
|
||||
return
|
||||
# Voice-Override fuer die kommende ARIA-Antwort (App-lokal gewaehlt)
|
||||
voice_override = payload.get("voice", "")
|
||||
if voice_override:
|
||||
self._next_voice_override = voice_override
|
||||
logger.info("[rvs] Voice-Override (via Audio): %s", voice_override)
|
||||
try:
|
||||
speed = float(payload.get("speed", 0) or 0)
|
||||
if 0.1 <= speed <= 5.0:
|
||||
self._next_speed_override = speed
|
||||
except (TypeError, ValueError):
|
||||
pass
|
||||
logger.info("[rvs] Audio empfangen: %s, %dms, %dKB",
|
||||
mime_type, duration_ms, len(audio_b64) // 1365)
|
||||
asyncio.create_task(self._process_app_audio(audio_b64, mime_type))
|
||||
# Voice-Override fuer Folgenachrichten — gleiche Semantik wie beim chat-Event.
|
||||
if "voice" in payload:
|
||||
voice_override = payload.get("voice", "") or ""
|
||||
self._next_voice_override = voice_override or None
|
||||
logger.info("[rvs] Voice fuer Antworten (via Audio): %s",
|
||||
self._next_voice_override or "(Default)")
|
||||
if "speed" in payload:
|
||||
try:
|
||||
speed = float(payload.get("speed", 0) or 0)
|
||||
self._next_speed_override = speed if 0.1 <= speed <= 5.0 else None
|
||||
except (TypeError, ValueError):
|
||||
self._next_speed_override = None
|
||||
interrupted = bool(payload.get("interrupted", False))
|
||||
audio_request_id = payload.get("audioRequestId", "") or ""
|
||||
location = payload.get("location") or None
|
||||
logger.info("[rvs] Audio empfangen: %s, %dms, %dKB%s%s%s",
|
||||
mime_type, duration_ms, len(audio_b64) // 1365,
|
||||
" [BARGE-IN]" if interrupted else "",
|
||||
" [GPS]" if location else "",
|
||||
f" reqId={audio_request_id[:16]}" if audio_request_id else "")
|
||||
asyncio.create_task(self._process_app_audio(
|
||||
audio_b64, mime_type, interrupted, audio_request_id, location))
|
||||
|
||||
elif msg_type == "stt_response":
|
||||
# Antwort der whisper-bridge auf unseren stt_request
|
||||
@@ -1513,8 +1859,23 @@ class ARIABridge:
|
||||
_STT_REMOTE_TIMEOUT_READY_S = 45.0
|
||||
_STT_REMOTE_TIMEOUT_LOADING_S = 300.0
|
||||
|
||||
async def _process_app_audio(self, audio_b64: str, mime_type: str) -> None:
|
||||
"""App-Audio → STT → aria-core. Primaer via whisper-bridge (RVS), Fallback lokal."""
|
||||
async def _process_app_audio(self, audio_b64: str, mime_type: str,
|
||||
interrupted: bool = False,
|
||||
audio_request_id: str = "",
|
||||
location: Optional[dict] = None) -> None:
|
||||
"""App-Audio → STT → aria-core. Primaer via whisper-bridge (RVS), Fallback lokal.
|
||||
|
||||
interrupted=True wenn der User waehrend ARIA noch sprach/dachte aufgenommen hat
|
||||
(Barge-In). Wird als Hinweis-Praefix an aria-core mitgegeben damit ARIA die
|
||||
Korrektur/Unterbrechung in den Kontext einordnen kann statt als reine
|
||||
Folgefrage zu behandeln.
|
||||
|
||||
audio_request_id: Korrelations-ID die die App im audio-Event mitschickt — wird
|
||||
unveraendert ans STT-Result zurueckgegeben damit die App die EXAKT richtige
|
||||
'wird verarbeitet'-Bubble ersetzen kann (auch bei mehreren parallelen Aufnahmen).
|
||||
|
||||
location: Optional GPS-Position {lat, lon} — wird als Hinweis-Praefix mitgegeben
|
||||
damit ARIA bei standortbezogenen Fragen sie nutzen kann."""
|
||||
# Erst Remote versuchen
|
||||
text = await self._stt_remote(audio_b64, mime_type)
|
||||
if text is None:
|
||||
@@ -1526,19 +1887,34 @@ class ARIABridge:
|
||||
|
||||
if text.strip():
|
||||
logger.info("[rvs] STT Ergebnis: '%s'", text[:80])
|
||||
# Hints (Barge-In, GPS) als Praefix vorschalten — gemeinsamer Helper
|
||||
# mit dem chat-Pfad damit das Verhalten konsistent ist.
|
||||
core_text = self._build_core_text(text, interrupted, location)
|
||||
# ERST an aria-core senden (wichtigster Schritt)
|
||||
await self.send_to_core(text, source="app-voice")
|
||||
await self.send_to_core(core_text, source="app-voice" + (" [barge-in]" if interrupted else ""))
|
||||
# STT-Text an RVS senden (fuer Anzeige in App + Diagnostic)
|
||||
# sender="stt" damit Bridge es ignoriert (kein Loop)
|
||||
try:
|
||||
await self._send_to_rvs({
|
||||
stt_payload = {
|
||||
"text": text,
|
||||
"sender": "stt",
|
||||
}
|
||||
if audio_request_id:
|
||||
stt_payload["audioRequestId"] = audio_request_id
|
||||
# GPS aus dem Original-Audio-Payload mitgeben — Diagnostic
|
||||
# zeigt sie sonst nicht an (App sendet location nur einmal,
|
||||
# die im audio-Payload). Reine Anzeige-Information.
|
||||
if location:
|
||||
stt_payload["location"] = location
|
||||
ok = await self._send_to_rvs({
|
||||
"type": "chat",
|
||||
"payload": {
|
||||
"text": text,
|
||||
"sender": "stt",
|
||||
},
|
||||
"payload": stt_payload,
|
||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||
})
|
||||
if ok:
|
||||
logger.info("[rvs] STT-Text an RVS broadcastet (sender=stt)")
|
||||
else:
|
||||
logger.warning("[rvs] STT-Text NICHT broadcastet — _send_to_rvs lieferte False")
|
||||
except Exception as e:
|
||||
logger.warning("[rvs] STT-Text konnte nicht an RVS gesendet werden: %s", e)
|
||||
else:
|
||||
@@ -1792,7 +2168,8 @@ class ARIABridge:
|
||||
self.running = True
|
||||
|
||||
tasks = [
|
||||
asyncio.create_task(self.connect_to_core()),
|
||||
# connect_to_core entfaellt — Bridge ruft jetzt aria-brain ueber
|
||||
# HTTP (siehe send_to_core). Keine persistente WS-Verbindung mehr.
|
||||
asyncio.create_task(self.connect_to_rvs()),
|
||||
]
|
||||
|
||||
|
||||
@@ -16,3 +16,6 @@ sounddevice
|
||||
|
||||
# Wake-Word Erkennung
|
||||
openwakeword
|
||||
|
||||
# Bild-Resizing (zu grosse Pixel-Bilder shrinken bevor Claude-Vision sie sieht — 5MB-Limit)
|
||||
Pillow
|
||||
|
||||
+1348
-499
File diff suppressed because it is too large
Load Diff
+542
-739
File diff suppressed because it is too large
Load Diff
+45
-38
@@ -9,7 +9,7 @@ services:
|
||||
command: >-
|
||||
sh -c "apk add --no-cache openssh-client bash curl &&
|
||||
npm install -g @anthropic-ai/claude-code claude-max-api-proxy &&
|
||||
DIST=$(find /usr/local/lib -path '*/claude-max-api-proxy/dist' -type d | head -1) &&
|
||||
DIST=$$(find /usr/local/lib -path '*/claude-max-api-proxy/dist' -type d | head -1) &&
|
||||
sed -i 's/startServer({ port })/startServer({ port, host: process.env.HOST || \"127.0.0.1\" })/' $$DIST/server/standalone.js &&
|
||||
sed -i 's/if (model\.includes/if ((model||\"claude-sonnet-4\").includes/g' $$DIST/adapter/cli-to-openai.js &&
|
||||
sed -i '1i\\function _t(c){return typeof c===\"string\"?c:Array.isArray(c)?c.filter(function(b){return b.type===\"text\"}).map(function(b){return b.text||\"\"}).join(\"\"):String(c)}' $$DIST/adapter/openai-to-cli.js &&
|
||||
@@ -28,38 +28,40 @@ services:
|
||||
networks:
|
||||
- aria-net
|
||||
|
||||
# ─── OpenClaw (ARIA Gehirn) ─────────────────────────────
|
||||
aria:
|
||||
image: ghcr.io/openclaw/openclaw:latest
|
||||
container_name: aria-core
|
||||
hostname: aria-wohnung
|
||||
privileged: true # ARIAs Wohnung — sie hat die Schlüssel
|
||||
# ─── Qdrant (Vector-DB fuer ARIAs Gedaechtnis) ────────
|
||||
# Storage liegt im Repo-Bind-Mount aria-data/brain/qdrant.
|
||||
# Damit Backup/Export/Import komplett ueber das Filesystem gehen.
|
||||
qdrant:
|
||||
image: qdrant/qdrant:latest
|
||||
container_name: aria-qdrant
|
||||
volumes:
|
||||
- ./aria-data/brain/qdrant:/qdrant/storage
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- aria-net
|
||||
|
||||
# ─── ARIA Brain (Agent + Memory) ─────────────────────────
|
||||
# Loest das alte aria-core (OpenClaw) ab. Vector-DB-basiertes
|
||||
# Memory, eigener Agent-Loop, SSH zur aria-wohnung-VM.
|
||||
brain:
|
||||
build: ./aria-brain
|
||||
container_name: aria-brain
|
||||
hostname: aria-wohnung-brain # damit ssh known_hosts stabil bleibt
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway" # Zugriff auf die VM via SSH
|
||||
depends_on:
|
||||
- qdrant
|
||||
- proxy
|
||||
ports:
|
||||
- "3001:3001" # Diagnostic Web-UI (laeuft im shared network)
|
||||
environment:
|
||||
- CANVAS_HOST=127.0.0.1
|
||||
- OPENCLAW_GATEWAY_TOKEN=${ARIA_AUTH_TOKEN}
|
||||
- DEFAULT_MODEL=proxy/claude-sonnet-4
|
||||
- RATE_LIMIT_PER_USER=30
|
||||
- DISPLAY=:0
|
||||
- QDRANT_HOST=aria-qdrant
|
||||
- QDRANT_PORT=6333
|
||||
- PROXY_URL=http://proxy:3456
|
||||
- ARIA_AUTH_TOKEN=${ARIA_AUTH_TOKEN:-}
|
||||
volumes:
|
||||
- openclaw-config:/home/node/.openclaw # OpenClaw Config (persistiert Model + Auth)
|
||||
- ./aria-data/brain:/home/node/.openclaw/workspace/memory
|
||||
- ./aria-data/skills:/home/node/.openclaw/workspace/skills
|
||||
- ./aria-data/config/AGENT.md:/home/node/.openclaw/workspace/AGENT.md
|
||||
- ./aria-data/config/USER.md:/home/node/.openclaw/workspace/USER.md
|
||||
- ./aria-data/config/BOOTSTRAP.md:/home/node/.openclaw/workspace/BOOTSTRAP.md
|
||||
- ./aria-data/config/BOOTSTRAP.md:/home/node/.openclaw/workspace/CLAUDE.md
|
||||
- ./aria-data/config/openclaw.env:/home/node/.openclaw/workspace/.env
|
||||
- claude-config:/home/node/.claude # Claude Code Settings (Permissions)
|
||||
- ./aria-data/ssh:/home/node/.ssh # SSH Keys fuer VM-Zugriff
|
||||
- /tmp/.X11-unix:/tmp/.X11-unix
|
||||
- /var/run/docker.sock:/var/run/docker.sock # VM von innen verwalten
|
||||
- aria-shared:/shared # Shared Volume fuer Datei-Austausch (Bridge <> Core)
|
||||
- ./aria-data/brain/data:/data # Memory-Cache + Skills + Models (bind-mount fuer Export)
|
||||
- ./aria-data/brain-import:/import:ro # Quell-MDs fuer den initialen Memory-Import (read-only)
|
||||
- ./aria-data/ssh:/root/.ssh # SSH-Keys fuer aria-wohnung (geteilt mit Proxy)
|
||||
- aria-shared:/shared # gleicher Austausch-Speicher wie Bridge
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- aria-net
|
||||
@@ -69,11 +71,13 @@ services:
|
||||
build: ./bridge
|
||||
container_name: aria-bridge
|
||||
depends_on:
|
||||
- aria
|
||||
network_mode: "service:aria" # Teilt Netzwerk mit aria-core → localhost:18789
|
||||
- brain
|
||||
networks:
|
||||
- aria-net
|
||||
ports:
|
||||
- "3001:3001" # Diagnostic Web-UI (Diagnostic teilt Netzwerk mit Bridge)
|
||||
volumes:
|
||||
- ./aria-data/config/aria.env:/config/aria.env
|
||||
- aria-shared:/shared # Shared Volume fuer Datei-Austausch (Bridge <> Core)
|
||||
- aria-shared:/shared # Shared Volume fuer Datei-Austausch
|
||||
# Audio-Zugriff
|
||||
- /run/user/1000/pulse:/run/user/1000/pulse
|
||||
- /dev/snd:/dev/snd
|
||||
@@ -81,6 +85,7 @@ services:
|
||||
- /dev/snd
|
||||
environment:
|
||||
- PULSE_SERVER=unix:/run/user/1000/pulse/native
|
||||
- BRAIN_URL=http://aria-brain:8080
|
||||
- ARIA_AUTH_TOKEN=${ARIA_AUTH_TOKEN:-}
|
||||
- RVS_HOST=${RVS_HOST:-}
|
||||
- RVS_PORT=${RVS_PORT:-443}
|
||||
@@ -90,19 +95,23 @@ services:
|
||||
restart: unless-stopped
|
||||
|
||||
# ─── Diagnostic (Selbstcheck-UI und Einstellungen) ────
|
||||
# Teilt Netzwerk mit Bridge, damit der Diagnostic-Server die
|
||||
# Bridge auf localhost erreichen kann.
|
||||
diagnostic:
|
||||
build: ./diagnostic
|
||||
container_name: aria-diagnostic
|
||||
depends_on:
|
||||
- aria
|
||||
network_mode: "service:aria" # Teilt Netzwerk mit aria-core → localhost:18789
|
||||
- bridge
|
||||
network_mode: "service:bridge"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
- /var/run/docker.sock:/var/run/docker.sock # Container Restart + Brain-Export/Import
|
||||
- ./aria-data/config/diag-state:/data # Persistenter State (aktive Session etc.)
|
||||
- aria-shared:/shared # Shared Volume (Uploads + Config)
|
||||
- aria-shared:/shared # Shared Volume (Uploads + Config + Voices)
|
||||
- ./aria-data/brain:/brain # Brain-Export/Import (tar.gz aus Bind-Mount)
|
||||
environment:
|
||||
- ARIA_AUTH_TOKEN=${ARIA_AUTH_TOKEN:-}
|
||||
- PROXY_URL=http://proxy:3456
|
||||
- BRAIN_URL=http://aria-brain:8080
|
||||
- RVS_HOST=${RVS_HOST:-}
|
||||
- RVS_PORT=${RVS_PORT:-443}
|
||||
- RVS_TLS=${RVS_TLS:-true}
|
||||
@@ -111,9 +120,7 @@ services:
|
||||
restart: unless-stopped
|
||||
|
||||
volumes:
|
||||
openclaw-config: # Persistiert ~/.openclaw (Model, Auth, Sessions)
|
||||
claude-config: # Persistiert ~/.claude (Permissions, Settings)
|
||||
aria-shared: # Datei-Austausch zwischen Bridge und Core
|
||||
aria-shared: # Datei-Austausch zwischen Bridge / Brain / Diagnostic
|
||||
|
||||
networks:
|
||||
aria-net:
|
||||
|
||||
@@ -0,0 +1,31 @@
|
||||
#!/bin/bash
|
||||
# ════════════════════════════════════════════════════════════
|
||||
# ARIA — Setup-Script
|
||||
#
|
||||
# Aktuell nur noch der .env-Bootstrap (Tokens + RVS). Alle weiteren
|
||||
# Settings landen ueber die Diagnostic in /shared/config/runtime.json
|
||||
# (persistent in der "Datenbank").
|
||||
#
|
||||
# Im Phase-A-Cleanup-Status: System-Prompt-Files liegen unter
|
||||
# aria-data/brain-import/ und werden vom neuen Agent-Framework
|
||||
# spaeter importiert. OpenClaw laeuft noch ohne Persoenlichkeit.
|
||||
# ════════════════════════════════════════════════════════════
|
||||
|
||||
set -e
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
if [ ! -f .env ]; then
|
||||
if [ -f .env.example ]; then
|
||||
cp .env.example .env
|
||||
echo "✓ .env erstellt aus .env.example — Tokens jetzt eintragen!"
|
||||
else
|
||||
echo "⚠ Keine .env.example gefunden — manuell anlegen."
|
||||
fi
|
||||
else
|
||||
echo ".env existiert bereits — uebersprungen."
|
||||
fi
|
||||
|
||||
# ── Brain-Verzeichnisse anlegen (Bind-Mounts fuer aria-brain + aria-qdrant)
|
||||
# Inhalt ist gitignored — wird ueber Diagnostic-Export/Import gesichert.
|
||||
mkdir -p aria-data/brain/data aria-data/brain/qdrant
|
||||
echo "✓ aria-data/brain/{data,qdrant} bereit"
|
||||
@@ -1,7 +1,123 @@
|
||||
# ARIA Issues & Features
|
||||
|
||||
## Audio-Verhalten in der App
|
||||
|
||||
So sollte die App in den verschiedenen Phasen mit fremden Audio-Apps
|
||||
(Spotify, YouTube, Podcasts etc.) und dem eigenen Mikro umgehen.
|
||||
Wenn was anders ist, ist's ein Bug.
|
||||
|
||||
| Phase | Andere App (Spotify) | ARIA-Mikro | Hintergrund-Service |
|
||||
|------------------------------|----------------------|---------------------|---------------------|
|
||||
| Idle / Ohr aus | spielt frei | aus | aus |
|
||||
| Wake-Word lauscht (armed) | spielt frei | passiv (openWakeWord) | aktiv ('wake') |
|
||||
| User-Aufnahme laeuft | pausiert (EXCLUSIVE) | Recording | aktiv ('rec') |
|
||||
| Aufnahme zu Ende | resumed | aus | (rec released) |
|
||||
| ARIA denkt/schreibt (~20s) | spielt frei | aus | (kein Slot) |
|
||||
| TTS startet | pausiert (DUCK) | aus (oder barge) | aktiv ('tts') |
|
||||
| TTS spielt (auch GPU-Pausen) | bleibt pausiert | barge wenn Wake-Word| aktiv |
|
||||
| TTS zu Ende | nach 800ms resumed | (Conversation-Window)| (tts released) |
|
||||
| Eingehender Anruf (auch VoIP)| — | Mikro pausiert | aus |
|
||||
| Anruf vorbei | — | Mikro wieder armed | aktiv ('wake') |
|
||||
| Anruf vorbei (Auto-Resume) | pausiert wieder | aus | aktiv ('tts') |
|
||||
| Neue Frage waehrend Anruf | — | Mikro pausiert | (rec waehrend Anruf nicht) |
|
||||
| Anruf vorbei nach neuer Frage | (siehe TTS-Phasen) | (siehe TTS-Phasen) | (tts gewinnt, alter Resume verworfen) |
|
||||
|
||||
Wichtige Mechanismen:
|
||||
- **Underrun-Schutz** im PcmStreamPlayer fuettert Stille rein wenn die
|
||||
Bridge in Render-Pausen liefert — Spotify bleibt durchgehend pausiert,
|
||||
auch zwischen den Saetzen einer langen Antwort.
|
||||
- **Conversation-Focus** (nur bei Wake-Word 'conversing') haelt den
|
||||
AudioFocus dauerhaft. Bei reinem Tap-to-Talk oder Text-Chat greift's
|
||||
nicht — Spotify darf in der Denk-Phase ruhig weiterspielen.
|
||||
- **Foreground-Service** (mediaPlayback|microphone) haelt App-Prozess
|
||||
am Leben damit TTS/Mikro/Wake-Word auch bei minimierter App weiter-
|
||||
laufen. Notification zeigt aktuellen Status ("ARIA spricht/hoert
|
||||
zu/bereit").
|
||||
- **Anruf-Erkennung** ueber TelephonyManager (klassisch) + AudioFocus-
|
||||
Loss-Listener mit Polling-Fallback (VoIP wie WhatsApp/Signal/Discord).
|
||||
- **Auto-Resume nach Anruf**: beim Halt wird die Wiedergabe-Position
|
||||
gemerkt (Date.now() - playbackStart - leadingSilence). Nach Auflegen
|
||||
wartet die App bis zu 30s auf den WAV-Cache und spielt dann ab der
|
||||
gemerkten Position weiter. Wenn das Telefonat länger als die Antwort
|
||||
dauerte, ist der Cache schon fertig — instant Resume.
|
||||
- **Neue Frage waehrend Anruf** (Text-Chat geht trotz Telefonat): die
|
||||
neue Antwort ueberschreibt den pending Resume. _handlePcmChunkImpl
|
||||
stoppt einen ggf. laufenden resumeSound und setzt pausedMessageId
|
||||
zurueck wenn die neue Stream-messageId abweicht. Die letzte Antwort
|
||||
gewinnt immer.
|
||||
- **Audio-Ausgabe trotz aktivem Telefonat**: ARIA antwortet auch waehrend
|
||||
eines Telefonats per Lautsprecher (Telefon-Audio geht ueber separaten
|
||||
Stream zur Gegenseite). haltAllPlayback wird nur beim STATE-WECHSEL
|
||||
ringing/offhook gerufen — wenn der Anruf schon laeuft (offhook→offhook),
|
||||
triggert eine neue Frage keinen Halt mehr.
|
||||
|
||||
## Erledigt
|
||||
|
||||
### Bugs / Fixes
|
||||
|
||||
- [x] Diagnostic: "ARIA denkt..." bleibt nicht mehr stehen
|
||||
- [x] App: "ARIA denkt..." Indicator + Abbrechen-Button (Bridge spiegelt agent_activity via RVS)
|
||||
- [x] Textnachrichten werden von ARIA beantwortet (Bridge chat handler fix)
|
||||
- [x] Voice-Auswahl funktioniert wieder: speaker_wav als Basename statt Pfad fuer daswer123 local-Mode
|
||||
- [x] Diagnostic-Voice-Wechsel resettet alle App-lokalen Voice-Overrides via type "config"
|
||||
- [x] Streaming TTS Stop-Race: Writer wartet auf playbackHeadPosition vor stop()/release() — keine abgeschnittenen Saetze mehr
|
||||
- [x] App: Audioausgabe hoert nicht mehr mitten im Satz auf (playbackHeadPosition wait + Stop-Race fix)
|
||||
- [x] AudioFocus.release wartet auf echten Playback-Ende — kein Volume-Hochfahren mehr mid-Antwort
|
||||
- [x] App Mute-/Auto-Playback-Bug: Closure-Bug geloest (ttsCanPlayRef live-gespiegelt, nicht mehr stale)
|
||||
- [x] App Zombie-Recording: Ohr-aus kill laufende Aufnahme damit der Aufnahme-Button weiter funktioniert
|
||||
- [x] Whisper transkribiert Voice-Uploads nicht mehr mit hardcoded "small" — aktuelles Modell wird behalten, kein unnoetiger Modell-Swap
|
||||
- [x] RVS/WebSocket maxPayload 50MB: voice_upload mit WAV als base64 sprengt kein Frame-Limit mehr
|
||||
- [x] Wake-Word Embedding rank-4 Fix (Pipeline-Bug der das Triggern verhinderte) + Frame-Count aus Modell-Metadaten lesen
|
||||
- [x] PCM-Underrun-Schutz: Stille-Fill in Render-Pausen verhindert Spotify-Auto-Resume nach 10s Stillstand
|
||||
- [x] Conversation-Focus-Lifecycle: AudioFocus haengt am Wake-Word-State 'conversing' statt an einzelnen Streams — Spotify bleibt durchgehend gepaust, auch zwischen mehreren Antworten
|
||||
- [x] Voice-Override behaelt Stimme ueber alle TTS-Calls einer Antwort (vorher: nach erstem TTS-Call zurueck auf Default)
|
||||
- [x] Sprachnachricht-Bubble defensiv: STT-Result fuegt neue Bubble hinzu wenn Placeholder fehlt (Race-Schutz)
|
||||
- [x] Bild + Text als EINE Anfrage: Bridge buffert files 800ms, merged mit folgendem chat-Text zu einem send_to_core (statt zwei getrennten ARIA-Antworten)
|
||||
- [x] Diagnostic→App: persistente RVS-Connection statt frische pro Send (Race-Probleme mit Zombie-WS geloest)
|
||||
- [x] Textauswahl in Bubbles wieder funktional (nested Text+onPress raus, dataDetectorType="all" macht Links automatisch klickbar)
|
||||
- [x] **Placeholder-Race bei parallelen Sprachnachrichten geloest**: jede Aufnahme bekommt eine eindeutige audioRequestId, Bridge gibt sie ans STT-Result zurueck — App matcht jetzt punktgenau die richtige Bubble statt per Substring
|
||||
- [x] Mikro-Offen-Toast "🎤 sprich jetzt" erscheint erst wenn audioService.startRecording wirklich erfolgreich war (statt ~400ms vorher beim Wake-Word-Detect)
|
||||
- [x] Sprachnachrichten ohne STT-Result werden nach 60s+Aufnahmedauer automatisch entfernt (sicher genug fuer 5-30min-Aufnahmen, schnell genug fuer leere Wake-Word-Echos)
|
||||
- [x] VAD adaptive Baseline robuster: minimum statt avg + Cap auf -50dB bis -28dB (Stille) / -40dB bis -18dB (Speech) — keine "tote" VAD-Konfiguration mehr bei lauter Umgebung oder Wake-Word-Echo
|
||||
- [x] Push-to-Talk raus, nur noch Tap-to-Talk (verhinderte Touch-Race-Probleme)
|
||||
- [x] Manueller Mikro-Stop beendet Wake-Word-Konversation: Tap auf Mikro-Knopf waehrend conversing → audio raus + zurueck zu armed (= Wake-Word lauscht wieder, kein Auto-Mikro nach ARIAs Antwort). VAD-Auto-Stop bleibt bei Multi-Turn
|
||||
- [x] **Wake-Word pausiert bei Anruf**: phoneCall ruft pauseForCall (openWakeWord.stop) bei RINGING/OFFHOOK, resumeFromCall bei IDLE. Pre-Call-State wird gemerkt — armed bleibt armed, conversing degraded zu armed (User soll nicht in halbem Dialog landen)
|
||||
- [x] **App-Resume-Cooldown**: Wechsel von Background → Foreground triggert keinen falschen Wake-Word-Trigger mehr. AppState-Listener setzt 1.5s Cooldown in dem onWakeDetected-Events ignoriert werden (Audio-Pegel-Spike beim AudioFocus-Switch sonst als Wake-Word interpretiert)
|
||||
- [x] Background-Mikro robust: acquireBackgroundAudio('rec'/'wake') wird jetzt VOR AudioRecord.startRecording gerufen — Foreground-Service mit foregroundServiceType=microphone muss aktiv sein bevor das Mikro greift, sonst blockiert Android ab 11+ den Background-Zugriff
|
||||
- [x] **Stille-Pegel manuell setzbar** (Settings → Spracheingabe): Override-Wert in dB von -55 bis -15, default "automatisch". Info-Button mit Modal erklaert die Skala (niedriger = sensibler, hoeher = robuster gegen Hintergrundlaerm). Bei manuell gesetztem Wert wird die adaptive Baseline ignoriert
|
||||
- [x] **Kurze TTS-Texte (1-3 Worte) spielen jetzt ab** — auf OnePlus A12 stallte AudioTrack mit `pos=0` weil der Default-Start-Threshold `bufferSize/2` (= 2s) bei kurzen Streams nie ueberschritten wurde. Fix: `setStartThresholdInFrames(100ms)` direkt nach dem Track-Build (API 31+). Buffer auf 4s entkoppelt von Pre-Roll, `play()` wird beim allerersten data-chunk gerufen
|
||||
- [x] **Mute-Button stoppt jetzt auch laufenden PCM-Stream** — `pcmStreamActive` wurde beim isFinal-Chunk schon false gesetzt, der AudioTrack spielte aber noch sekundenlang aus seinem Buffer. `stopPlayback()` uebersprang darum `PcmStreamPlayer.stop()`. Fix: stop() immer rufen (ist idempotent), kein Flag-Check mehr
|
||||
- [x] **GPS-Permission im Manifest + Runtime-Request** beim Settings-Toggle — vorher fehlten ACCESS_COARSE_LOCATION / ACCESS_FINE_LOCATION komplett. `Geolocation.getCurrentPosition` schlug lautlos fehl, App sendete nie ein location-Feld
|
||||
- [x] **GPS-Position auch im STT-Payload an Diagnostic** — die App sendet location einmal im audio-Payload. Die Bridge nutzte sie zwar (ging in aria-core's Kontext rein), reichte sie aber nicht im STT-broadcast an Diagnostic durch. Diagnostic zeigte darum bei Spracheingaben nie den GPS-Block, obwohl der "GPS einblenden"-Toggle aktiv war
|
||||
- [x] **Auto-Resume nach Anruf — pcmBuffer bleibt erhalten**: `haltAllPlayback` leerte den pcmBuffer mid-Anruf, isFinal schrieb dann eine leere WAV. Neue `pauseForCall`-Methode statt `haltAllPlayback`: AudioTrack stoppt + Focus released, `pcmBuffer` und `pcmMessageId` bleiben — chunks werden weiter gesammelt damit isFinal die WAV schreibt und resumeFromInterruption sie findet. Plus `captureInterruption` idempotent gemacht (ringing → offhook ueberschreibt nicht)
|
||||
- [x] **Replay-Resume nach Anruf**: `_firePlaybackStarted` ueberschrieb `currentPlaybackMsgId` mit leerem pcmMessageId — captureInterruption hatte nichts zu merken. Plus Regex `[0-9a-f-]+\.wav` matchte nicht alle Dateinamen. Plus `_playFromPathAtPosition` aktualisiert jetzt das Tracking damit ein zweiter Anruf in derselben Antwort auch funktioniert
|
||||
- [x] **`pauseForCall` setzt `isPlaying` zurueck**: vorher haengten weitere Play-Button-Klicks nach Anruf, weil `playAudio` bei `isPlaying=true` den `_playNext`-Pfad ueberspringt
|
||||
- [x] **Play-Button rendert neu wenn Cache-Datei weg ist**: vorher checkte der Button nur `if (item.audioPath)` — auf eine geloeschte Cache-Datei zeigte das aber stillschweigend ins Leere. Jetzt RNFS.exists-Check mit Fallback auf `tts_request` an die Bridge → F5-TTS rendert neu, WAV wandert zurueck in den Cache
|
||||
- [x] **Bridge WebSocket max_size 50 MB**: Python `websockets.connect` hat 1 MiB Default — Stefan's 4MB JPEG (5.78 MB Base64) sprengte das, Bridge-Connection wurde silent gedroppt. f5tts/whisper-bridges hatten max_size schon, nur aria_bridge war vergessen
|
||||
- [x] **Bridge resized Bilder >2 MB serverseitig auf 1568px**: Claude-Vision-API hat ~5 MB Base64-Limit. Galerie-Bilder via `react-native-image-picker` sind clientseitig schon klein, Buroklammer/DocumentPicker reichte das rohe File durch — Claude lieferte leere Antwort. Pillow im Bridge-Container, nur fuer JPEG/PNG/WebP/GIF (PDFs/ZIPs/SVGs unangetastet)
|
||||
- [x] **Bridge `chat:error` liest auch `errorMessage`**: OpenClaw legt bei state=error den Text dort statt in `error` ab → Bridge meldete generisches "[Fehler] Unbekannt", echter Fehler nur in Container-Logs. Plus: `chat:final` ohne text wird jetzt mit Hinweis-Bubble an die App gemeldet (statt stumm), z.B. wenn Vision das Bild silent ablehnt
|
||||
- [x] **Cache-Cleanup beim App-Start** — orphane `aria_tts_*.wav` Files (>5 min) im CachesDirectoryPath werden weggeraeumt, sammeln sich sonst an wenn Sound mid-playback gestoppt wird (Anruf, Mute, Barge-In) und der completion-Callback nicht feuert. Plus neuer Settings-Button "TTS-Cache leeren" mit Live-Groessenanzeige
|
||||
- [x] **Verbose-Logging-Toggle in Settings → Protokoll**: `console.log` global stummschaltbar (warn/error bleiben aktiv) — spart adb-logcat-Speicher wenn alles laeuft
|
||||
- [x] **800 ms-Delay vor Anruf-Auto-Resume**: ARIA's neuer Focus-Request kollidierte sonst mit Spotify's Auto-Resume nach Anruf-Ende. System haengt noch im IN_CALL→NORMAL-Mode-Uebergang, Spotify sieht Loss → Loss und bleibt pausiert. Mit Delay schafft Spotify den Resume-Schritt, dann pausiert ARIA wieder ordnungsgemaess
|
||||
- [x] **Mute-Button = Stop fuer aktuelle Antwort**: vorher startete eine NEUE PCM-Chunk-Sequenz nach Mute-aus die alte Antwort weiter wo sie war (funktionierte 2x, dann nicht mehr weil isFinal schon kam). Jetzt mit `_stoppedMessageId`-Tracking: bei Mute wird die aktive msgId gemerkt, alle weiteren chunks dieser msgId bleiben silent — auch wenn Mute zurueckgenommen wird. Reset bei neuer msgId, neue Antworten spielen normal
|
||||
- [x] **Spotify resumed nach Mute-Stop**: `stopPlayback` released seinen TRANSIENT-Focus (USAGE_ASSISTANT) sauber → Spotify bekommt GAIN-Event und resumed automatisch. Ein zwischenzeitlich eingebauter `kickReleaseMedia` (USAGE_MEDIA + GAIN) verhinderte das Auto-Resume sogar (Spotify interpretierte es als "user-action stopp") — wieder rausgenommen
|
||||
- [x] **ARIA kann Dateien an User zurueckgeben** (PDFs, Bilder, Office-Docs, Markdown, ZIPs, ...): ARIA setzt am Antwort-Ende `[FILE: /shared/uploads/aria_<name>.<ext>]` Marker, Bridge parsed sie raus (TTS liest's nicht vor) und sendet `file_from_aria`-Event ueber RVS. App zeigt Anhang-Bubble + Klick oeffnet via Android-Intent-Picker (`FileOpenerModule`, FileProvider), Diagnostic zeigt Bubble + PDFs/Bilder neuer Tab, andere als Download. Mehrere Marker = mehrere Bubbles, nicht-existente Marker werden mit Hinweis an User gemeldet (statt silent gedroppt)
|
||||
- [x] **External Bilder/Dateien werden serverseitig persistiert**: ARIA laed externe URLs (Wikipedia, Wiki Commons) mit curl runter und gibt sie via `[FILE: ...]`-Marker zurueck — bleibt permanent im Chat auch wenn die Online-Quelle stirbt. System-Prompt instruiert sie das Pattern zu nutzen
|
||||
- [x] **ARIA-Datei-Bubbles ueberleben Browser-Refresh**: Diagnostic-Server parsed beim `load_chat_history` die Marker aus dem OpenClaw-Session-File und schickt `aria_file`-Eintraege mit, sodass die Anhang-Bubbles nach F5 wiederhergestellt werden. Plus: `/shared/uploads/`-Bildpfade werden im History-Render auch als Inline-Image gerendert (vorher nur in live-Bubbles)
|
||||
- [x] **"ARIA reparieren"-Button** in App + Diagnostic: triggert `openclaw doctor --fix` ueber RVS → Bridge → Diagnostic HTTP-API. Fix fuer stuck Runs ohne SSH
|
||||
- [x] **"ARIA hart neu starten"-Button**: docker compose-Restart ueber Docker-Socket-API im Diagnostic-Server. Mit Confirmation in der App, fuer Faelle wo doctor nicht reicht (alive aber haengender Run)
|
||||
- [x] **Auto-Compact nach N Messages**: bei zu langer Session wirft Linux beim Subprocess-spawn E2BIG (Argument list too long, ~128KB-2MB Limit). Bridge zaehlt User-Messages; bei `COMPACT_AFTER_MESSAGES` (env, default 140) werden Sessions geleert + Container neu gestartet, User bekommt Hinweis-Bubble. Plus manueller "🧹 Konversation komprimieren"-Button in App-Settings und Diagnostic
|
||||
- [x] **`[FILE: ...]`-Marker-Filter ueberall in Diagnostic**: Filter direkt in `addChat` damit er fuer alle Code-Pfade greift (chat_final, proxy_result, History-Load, ...) — vorher rutschten Marker als Text durch wenn sie nicht ueber chat_final kamen
|
||||
- [x] **Mehrere `[FILE: ...]`-Marker in einer Antwort**: Bridge zerlegt sauber in mehrere file_from_aria-Events, ARIA muss nicht selbst zwei Antworten posten. Bei nicht-existenten Files erscheint ein User-Hinweis statt silent skip
|
||||
- [x] **Inline-Bilder in Chat-Nachrichten** (App): ``- und plain-`https://image.png`-URLs werden als Image-Vorschau unter dem Text gerendert. Mit `react-native-svg` auch SVG-URLs inline
|
||||
- [x] **SVG-Anhaenge** werden korrekt gerendert: ChatImage-Komponente erkennt `.svg`-Endung und nutzt SvgUri statt Image (RN-Image kann SVG nicht). Vollbild-Modal genauso, mit `preserveAspectRatio="xMidYMid meet"` damit SVGs nicht gestreckt werden
|
||||
- [x] **Pinch-Zoom + Pan im Vollbild-Modal** (App): neue `ZoomableImage`-Komponente, reine RN-Implementation mit PanResponder+Animated, ohne externe Lib. 2-Finger-Pinch 1x..5x, 1-Finger-Pan wenn gezoomt, Doppel-Tap toggelt 1x↔2.5x. Plus ✕-Close-Button damit Tap-to-Close nicht mit Pan-Gesten kollidiert
|
||||
- [x] **ARIA-Abkuerzung ausgeschrieben**: in App → Einstellungen → Ueber und Diagnostic → Einstellungen ist jetzt erklaert: "ARIA — Autonomous Reasoning & Intelligence Assistant"
|
||||
- [x] **`init.sh`** legt fehlende Config-Dateien aus *.example-Vorlagen an — frischer Clone laeuft ohne Anleitung an
|
||||
- [x] **`USER.md` privat**: aus dem Repo genommen (enthielt interne Tool-Liste mit Gitea-URL etc.). Vorlage als `USER.md.example` checked-in, lokales File via `.gitignore` ausgeschlossen
|
||||
|
||||
### App Features
|
||||
|
||||
- [x] Bildupload funktioniert (Shared Volume /shared/uploads/)
|
||||
- [x] Sprachnachrichten werden als Text angezeigt (STT → Chat-Bubble)
|
||||
- [x] Cache leeren + Auto-Download von Anhaengen
|
||||
@@ -11,13 +127,9 @@
|
||||
- [x] Ohr-Button → Gespraechsmodus (Auto-Aufnahme nach ARIA-Antwort)
|
||||
- [x] Play-Button in ARIA-Nachrichten fuer Sprachwiedergabe
|
||||
- [x] Chat-Suche in der App (Lupe in Statusleiste)
|
||||
- [x] Watchdog mit Container-Restart (2min Warnung → 5min doctor --fix → 8min Restart)
|
||||
- [x] Abbrechen-Button im Diagnostic Chat
|
||||
- [x] Nachrichten Backup on-the-fly (/shared/config/chat_backup.jsonl)
|
||||
- [x] Grosse Nachrichten satzweise aufteilen fuer TTS
|
||||
- [x] RVS Nachrichten vom Smartphone gehen durch
|
||||
- [x] Stimmen-Einstellungen (Ramona/Thorsten, Speed pro Stimme — durch XTTS/F5-TTS ersetzt)
|
||||
- [x] Highlight-Trigger konfigurierbar in Diagnostic
|
||||
- [x] Highlight-Trigger konfigurierbar in Diagnostic (spaeter komplett entfernt — war Piper-Relikt)
|
||||
- [x] XTTS v2 Integration (Gaming-PC, GPU, Voice Cloning) — durch F5-TTS ersetzt
|
||||
- [x] XTTS Voice Cloning (Audio-Samples hochladen, eigene Stimme)
|
||||
- [x] TTS Engine waehlbar (Piper/XTTS) — Piper raus, XTTS raus, jetzt nur F5-TTS
|
||||
@@ -25,23 +137,19 @@
|
||||
- [x] Auto-Update: APK-Installation via FileProvider
|
||||
- [x] Auto-Update: "Auf Updates pruefen" Button in App-Einstellungen
|
||||
- [x] Audio-Queue (sequentielle Wiedergabe, kein Ueberlappen)
|
||||
- [x] Textnachrichten werden von ARIA beantwortet (Bridge chat handler fix)
|
||||
- [x] Mehrere Anhaenge + Text vor dem Senden (Pending-Vorschau)
|
||||
- [x] Paste-Support fuer Bilder in Diagnostic Chat
|
||||
- [x] Markdown-Bereinigung fuer TTS (fett, kursiv, code, links, etc.)
|
||||
- [x] SSH Volume read-write fuer Proxy (kein -F Workaround mehr)
|
||||
- [x] Diagnostic: Sessions als Markdown exportieren (Download-Button)
|
||||
- [x] Speech Gate: Aufnahme wird verworfen wenn keine Sprache erkannt
|
||||
- [x] Session-Persistenz: Gewaehlte Session bleibt ueber Container-Restarts erhalten
|
||||
- [x] Diagnostic: "ARIA denkt..." bleibt nicht mehr stehen
|
||||
- [x] App: "ARIA denkt..." Indicator + Abbrechen-Button (Bridge spiegelt agent_activity via RVS)
|
||||
- [x] Whisper STT: Model-Auswahl in Diagnostic (tiny/base/small/medium/large-v3), Hot-Reload
|
||||
- [x] App: Audio-Aufnahme explizit 16kHz mono (spart Resample, optimal fuer Whisper)
|
||||
- [x] Streaming TTS: PCM-Stream → AudioTrack MODE_STREAM, keine WAV-Gaps
|
||||
- [x] Piper komplett entfernt
|
||||
- [x] Gespraechsmodus: Speech-Gate strenger (-28dB / 500ms)
|
||||
- [x] Diagnostic: Archivierte Session-Versionen (.reset.*) angezeigt + exportierbar
|
||||
- [x] tools/export-jsonl-to-md.js: CLI-Konverter fuer Session-JSONL zu Markdown
|
||||
- [x] tools/export-jsonl-to-md.js: CLI-Konverter fuer Session-JSONL zu Markdown (mit OpenClaw raus)
|
||||
- [x] NO_REPLY-Filter in Bridge + Diagnostic
|
||||
- [x] Audio-Ducking + Exklusiv-Focus (Kotlin AudioFocusModule)
|
||||
- [x] TTS-Cleanup serverseitig: Code-Bloecke raus, Einheiten ausgeschrieben, Abkuerzungen buchstabiert, URLs zu "ein Link"
|
||||
@@ -51,14 +159,11 @@
|
||||
- [x] Disk-Voll Banner in Diagnostic: rotes Overlay + copy-baren Cleanup-Befehlen (safe + aggressiv)
|
||||
- [x] cleanup.sh: kombinierter Docker-Aufraeum-Befehl (safe / --full)
|
||||
- [x] Streaming TTS Pre-Roll: AudioTrack play() startet erst wenn 2.5s gepuffert sind
|
||||
- [x] Streaming TTS Stop-Race: Writer wartet auf playbackHeadPosition vor stop()/release() — keine abgeschnittenen Saetze mehr
|
||||
- [x] Leading-Silence (200ms) am Stream-Anfang — AudioTrack faehrt sauber an
|
||||
- [x] Pre-Roll-Buffer einstellbar in App-Settings (1.0-6.0s, Default 3.5s)
|
||||
- [x] Fade-In auf erstem PCM-Chunk (120ms) — versteckt XTTS/F5-TTS Warmup-Glitches
|
||||
- [x] Decimal-zu-Worte fuer TTS (0.1 → null komma eins, mit IP-Schutz-Lookahead)
|
||||
- [x] Generic Acronym-Buchstabieren (XTTS → X T T S, USB → U S B, ueber expliziter Liste)
|
||||
- [x] Voice-Auswahl funktioniert wieder: speaker_wav als Basename statt Pfad fuer daswer123 local-Mode
|
||||
- [x] Diagnostic-Voice-Wechsel resettet alle App-lokalen Voice-Overrides via type "config"
|
||||
- [x] voice_preload/voice_ready: Stille Mini-Render bei Voice-Wechsel + Toast/Status "bereit"
|
||||
- [x] Whisper STT auf die Gamebox ausgelagert (faster-whisper CUDA, float16) — neuer aria-whisper-bridge Container
|
||||
- [x] aria-bridge: STT primaer remote (Gamebox), Fallback lokal nach 45s Timeout
|
||||
@@ -66,43 +171,70 @@
|
||||
- [x] **F5-TTS ersetzt XTTS komplett** — neuer aria-f5tts-bridge Container, Voice Cloning, satzweises Streaming
|
||||
- [x] Voice-Upload mit Whisper-Auto-Transkription — User muss keinen Referenz-Text eintippen
|
||||
- [x] Audio-Pause statt Ducking: Spotify/YouTube pausieren komplett waehrend TTS (TRANSIENT statt MAY_DUCK)
|
||||
- [x] AudioFocus.release wartet auf echten Playback-Ende — kein Volume-Hochfahren mehr mid-Antwort
|
||||
- [x] VAD-Stille einstellbar in App-Settings (1.0-8.0s, Default 2.8s)
|
||||
- [x] MAX_RECORDING auf 120s — laengere Erklaerungen moeglich
|
||||
- [x] App: Audioausgabe hoert nicht mehr mitten im Satz auf (playbackHeadPosition wait + Stop-Race fix)
|
||||
- [x] F5-TTS: Referenz-WAV-Preprocessing — Loudness-Normalisierung -16 LUFS + Silence-Trim + 10s Clip fuer konsistente Cloning-Quali
|
||||
- [x] F5-TTS: deutsches Fine-Tune (aihpi/F5-TTS-German, Vocos-Variante) via hf:// Pfad in Diagnostic konfigurierbar
|
||||
- [x] Whisper transkribiert Voice-Uploads nicht mehr mit hardcoded "small" — aktuelles Modell wird behalten, kein unnoetiger Modell-Swap
|
||||
- [x] RVS/WebSocket maxPayload 50MB: voice_upload mit WAV als base64 sprengt kein Frame-Limit mehr
|
||||
- [x] Dynamischer STT-Timeout in aria-bridge: 300s waehrend whisper-bridge 'loading', 45s wenn 'ready'
|
||||
- [x] service_status Broadcasts: f5tts/whisper melden Lade-Status, Banner in Diagnostic (unten rechts) + App (oben)
|
||||
- [x] config_request Pattern: Bridges fragen beim Connect die aktuelle Voice-Config an, aria-bridge antwortet
|
||||
- [x] F5-TTS Tuning via Diagnostic (Modell-ID, Checkpoint, cfg_strength, nfe_step) statt ENV-Vars — Hot-Reload bei Modell-Wechsel
|
||||
- [x] Conversation-Window: Gespraechsmodus endet nach X Sekunden Stille (1.0-20.0s, Default 8s, einstellbar in Settings)
|
||||
- [x] Porcupine Wake-Word-Integration in der App (Built-In Keywords + Custom spaeter, per Geraet einstellbar)
|
||||
- [x] Porcupine Wake-Word-Integration in der App (durch openWakeWord ersetzt)
|
||||
- [x] HF-Cache als Bind-Mount statt Docker Volume — kein .vhdx-Bloat auf Docker Desktop / Windows
|
||||
- [x] cleanup-windows.ps1 / .bat: VHDX-Cleanup via diskpart (ohne Hyper-V) mit Self-Elevation
|
||||
- [x] App Mute-/Auto-Playback-Bug: Closure-Bug geloest (ttsCanPlayRef live-gespiegelt, nicht mehr stale)
|
||||
- [x] App Zombie-Recording: Ohr-aus kill laufende Aufnahme damit der Aufnahme-Button weiter funktioniert
|
||||
- [x] App Text-Rendering: Nachrichten selektierbar + Autolink fuer URLs/E-Mails/Telefonnummern (Browser/Mail/Dialer)
|
||||
- [x] TTS-Wiedergabegeschwindigkeit pro Geraet einstellbar (Settings → 0.5-2.0x in 0.1-Schritten, Default 1.0)
|
||||
- [x] Diagnostic: Voice-Preview-Modal (Play-Icon vor Delete-X, Textfeld mit Default, WAV im Browser abspielen)
|
||||
- [x] **Wake-Word komplett on-device via openWakeWord (ONNX Runtime)** — Porcupine raus, kein API-Key/keine Lizenzgebuehren mehr. Mitgelieferte Keywords: hey_jarvis, computer, alexa, hey_mycroft, hey_rhasspy
|
||||
- [x] APK ABI-Split auf arm64-v8a — von ~136 MB auf ~35 MB, Auto-Update-Downloads aufs Phone deutlich kleiner
|
||||
- [x] PhoneStateListener: TTS pausiert bei eingehendem Anruf (READ_PHONE_STATE Permission)
|
||||
- [x] **VoIP-Anrufe** (WhatsApp/Signal/Discord/Teams) erkannt via AudioFocus-Loss-Listener + getMode-Polling-Fallback (alle 3s)
|
||||
- [x] **Auto-Resume nach Anruf**: ARIAs unterbrochene Antwort spielt nach dem Auflegen ab der gemerkten Position weiter (Date.now()-Tracking + WAV-Cache, 30s-Wartezeit auf final-Marker bei kurzem Telefonat)
|
||||
- [x] **Neue Frage waehrend Telefonat** ueberschreibt pending Auto-Resume — letzte Antwort gewinnt, alter resumeSound wird gestoppt
|
||||
- [x] **Audio-Ausgabe waehrend aktivem Telefonat** funktioniert (haltAllPlayback nur bei state-Wechsel idle→ringing/offhook, nicht bei offhook→offhook)
|
||||
- [x] **PcmPlaybackFinished-Event** im Native: AudioFocus wird erst released wenn AudioTrack wirklich durch ist (vorher: end()-Cap nach 0.5s → Spotify spielte 32s parallel zu ARIA)
|
||||
- [x] **APK-Cache-Cleanup robuster**: durchsucht jetzt CachesDirectoryPath + DocumentDirectoryPath + ExternalCachesDirectoryPath + ExternalDirectoryPath statt nur Caches. Plus manueller Button "Update-Cache leeren" in Settings → Speicher mit Live-Anzeige der aktuellen Groesse
|
||||
- [x] Diagnostic-Chat: bubblige Formatierung, mehrzeiliges Eingabefeld (textarea, Enter sendet, Shift+Enter neue Zeile)
|
||||
- [x] Adaptive VAD-Schwelle: Baseline aus den ersten 500ms Mic-Pegel, Stille = baseline+6dB / Sprache = baseline+12dB
|
||||
- [x] Max-Aufnahmedauer konfigurierbar in Settings (1-30 min, Default 5 min) — laengere Diktate moeglich
|
||||
- [x] Barge-In: User kann ARIA waehrend Antwort/Tool-Use unterbrechen, alte Aktivitaet wird abgebrochen, Bridge gibt aria-core einen Kontext-Hint dass es eine Korrektur ist
|
||||
- [x] Settings-Sub-Screens: 8 Kategorien (Verbindung, Allgemein, Spracheingabe, Wake-Word, Sprachausgabe, Speicher, Protokoll, Ueber) statt langer Liste
|
||||
- [x] **Bereit-Sound (Airplane Ding-Dong) wenn Mikro nach Wake-Word offen** — akustische Bestaetigung statt nur Toast. Toggle in Settings → Wake-Word, default aktiv
|
||||
- [x] **Wake-Word parallel zu TTS** mit AcousticEchoCanceler: User sagt "Computer" waehrend ARIA spricht → TTS verstummt sofort, neue Aufnahme startet
|
||||
- [x] **GPS-Position mitsenden**: Toggle in Settings → Allgemein → Standort, persistiert in AsyncStorage. Wenn aktiv wird lat/lon mit jeder chat/audio-Message mitgegeben. Bridge prefixed den Text fuer aria-core mit GPS-Hint (mit Anweisung dass die Position nur bei Bedarf erwaehnt wird)
|
||||
- [x] **Background Audio Service**: TTS, Wake-Word-Lauschen UND Aufnahme laufen auch bei minimierter App weiter. Foreground-Service mit foregroundServiceType=mediaPlayback|microphone, persistente Notification mit dynamischem Text ("ARIA spricht" / "ARIA hoert zu" / "ARIA bereit")
|
||||
|
||||
### Infrastruktur
|
||||
|
||||
- [x] Watchdog mit Container-Restart (2min Warnung → 5min doctor --fix → 8min Restart)
|
||||
- [x] Nachrichten Backup on-the-fly (/shared/config/chat_backup.jsonl)
|
||||
- [x] RVS Nachrichten vom Smartphone gehen durch
|
||||
- [x] SSH Volume read-write fuer Proxy (kein -F Workaround mehr)
|
||||
|
||||
## Offen
|
||||
|
||||
### Bugs
|
||||
- [ ] App: Wake-Word "jarvis" triggert nicht zuverlaessig (Porcupine-Debugging via ADB-Logcat ausstehend)
|
||||
- [ ] App: Stuerzt beim Lauschen ab, eventuell bei Nebengeraeuschen (Porcupine + Mic-Race, errorCallback haelt's jetzt zurueck — Dauertest ausstehend)
|
||||
### Brain (Phase B — der grosse Refactor laeuft)
|
||||
|
||||
- [x] aria-brain Container-Skeleton (FastAPI + Qdrant + sentence-transformers)
|
||||
- [x] Memory CRUD via Diagnostic-Gehirn-Tab (Add/Edit/Delete + Search + Filter)
|
||||
- [x] Gehirn-Export/Import als tar.gz (komplett: Memories + Skills + Qdrant)
|
||||
- [x] Voice-Bridge: aria-core-spezifische Logik raus (doctor_fix, aria_restart, aria_session_reset, compact_after)
|
||||
- [x] aria-core komplett aus docker-compose.yml raus, Watchdog raus
|
||||
- [x] Diagnostic: Wipe-All-Button (Memory + Stimmen + Settings)
|
||||
- [x] Voice Export/Import (Diagnostic + XTTS-Bridge auf Gaming-PC)
|
||||
- [x] F5/Whisper-Settings als JSON-Bundle Export/Import
|
||||
- [x] Datei-Manager (Diagnostic + App-Modal): /shared/uploads/ verwalten, Delete spiegelt sich live in den Chat-Bubbles
|
||||
- [ ] **Phase B Punkt 2:** Migration `aria-data/brain-import/` → atomare Memory-Punkte (Identity / Rules / Preferences / Tools)
|
||||
- [ ] **Phase B Punkt 3:** Brain Conversation-Loop (Single-Chat UI + Rolling Window + Memory-Destillat)
|
||||
- [ ] **Phase B Punkt 4:** Skills-System (Manifest, venv/local-bin, README pro Skill, Diagnostic-Skills-Tab, Export/Import)
|
||||
|
||||
### App Features
|
||||
- [ ] Chat-History zuverlaessiger laden (AsyncStorage Race Condition)
|
||||
- [ ] Background Audio Service (TTS auch bei minimierter App)
|
||||
- [ ] Custom-Wake-Word-Upload via Diagnostic (eigene .onnx-Files ohne App-Rebuild)
|
||||
|
||||
### Architektur
|
||||
- [ ] Bilder: Claude Vision direkt nutzen (aktuell nur Dateipfad an ARIA)
|
||||
- [ ] Auto-Compacting und Memory/Brain Verwaltung (SQLite?)
|
||||
- [ ] Diagnostic: System-Info Tab (Container-Status, Disk, RAM, CPU)
|
||||
- [ ] RVS Zombie-Connections endgueltig loesen
|
||||
- [ ] Alle .env-Variablen ueber Diagnostic konfigurierbar machen (Fallback .env bleibt fuer initialen Bootstrap)
|
||||
- [ ] Gamebox: kleine Web-Oberflaeche fuer Credentials/Server-Config oder zentral aus Diagnostic per RVS push
|
||||
- [ ] Root-Cause OpenClaw Session-Reset: Herausfinden warum Sessions beim ersten chat.send nach Container-Restart verworfen werden
|
||||
|
||||
@@ -18,6 +18,13 @@ const ALLOWED_TYPES = new Set([
|
||||
"update_check", "update_available", "update_download", "update_data",
|
||||
"agent_activity", "cancel_request",
|
||||
"audio_pcm",
|
||||
"file_from_aria",
|
||||
"container_restart",
|
||||
"file_list_request", "file_list_response",
|
||||
"file_delete_request", "file_deleted",
|
||||
"xtts_export_voice", "xtts_voice_exported",
|
||||
"xtts_import_voice", "xtts_voice_imported",
|
||||
"skill_created",
|
||||
"xtts_delete_voice",
|
||||
"voice_preload", "voice_ready",
|
||||
"stt_request", "stt_response",
|
||||
|
||||
@@ -1,74 +0,0 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Exportiert ein OpenClaw Session-JSONL (auch .reset.*) als Markdown.
|
||||
*
|
||||
* Nutzung:
|
||||
* node export-jsonl-to-md.js <input.jsonl> [output.md]
|
||||
*
|
||||
* Oder direkt aus dem aria-core Container:
|
||||
* docker exec aria-core cat /home/node/.openclaw/agents/main/sessions/<ID>.jsonl.reset.<TS> \
|
||||
* | node export-jsonl-to-md.js - > output.md
|
||||
*/
|
||||
|
||||
const fs = require("fs");
|
||||
|
||||
const inputArg = process.argv[2];
|
||||
const outputArg = process.argv[3];
|
||||
|
||||
if (!inputArg) {
|
||||
console.error("Usage: export-jsonl-to-md.js <input.jsonl|-> [output.md]");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const raw = inputArg === "-" ? fs.readFileSync(0, "utf-8") : fs.readFileSync(inputArg, "utf-8");
|
||||
const lines = raw.split("\n").filter(l => l.trim());
|
||||
|
||||
const blocks = [];
|
||||
for (const line of lines) {
|
||||
let obj;
|
||||
try { obj = JSON.parse(line); } catch { continue; }
|
||||
if (obj.type !== "message" || !obj.message) continue;
|
||||
const role = obj.message.role;
|
||||
if (role !== "user" && role !== "assistant") continue;
|
||||
|
||||
let text = "";
|
||||
const content = obj.message.content;
|
||||
if (typeof content === "string") text = content;
|
||||
else if (Array.isArray(content)) text = content.filter(c => c.type === "text").map(c => c.text || "").join("\n");
|
||||
if (!text) continue;
|
||||
|
||||
if (role === "user") {
|
||||
text = text.replace(/^Sender \(untrusted metadata\):[\s\S]*?```[\s\S]*?```\s*\n*/m, "").trim();
|
||||
text = text.replace(/^\[.*?\]\s*/, "").trim();
|
||||
} else {
|
||||
text = text.replace(/^\[\[reply_to_\w+\]\]\s*/g, "").trim();
|
||||
}
|
||||
if (!text) continue;
|
||||
|
||||
const ts = obj.message.timestamp || obj.timestamp || 0;
|
||||
const when = ts ? new Date(ts).toISOString().replace("T", " ").slice(0, 19) : "";
|
||||
const heading = role === "user" ? "## 🧑 User" : "## 🤖 ARIA";
|
||||
blocks.push(`${heading}${when ? ` — ${when}` : ""}\n\n${text}`);
|
||||
}
|
||||
|
||||
const exportedAt = new Date().toISOString().replace("T", " ").slice(0, 19);
|
||||
const title = inputArg === "-" ? "Session" : inputArg.split("/").pop().replace(/\.jsonl.*/, "");
|
||||
const md = [
|
||||
`# Session: ${title}`,
|
||||
``,
|
||||
`Exportiert: ${exportedAt} `,
|
||||
`Quelle: ${inputArg === "-" ? "stdin" : inputArg}`,
|
||||
`Nachrichten: ${blocks.length}`,
|
||||
``,
|
||||
`---`,
|
||||
``,
|
||||
blocks.join("\n\n---\n\n"),
|
||||
``,
|
||||
].join("\n");
|
||||
|
||||
if (outputArg) {
|
||||
fs.writeFileSync(outputArg, md);
|
||||
console.error(`OK: ${blocks.length} Nachrichten → ${outputArg}`);
|
||||
} else {
|
||||
process.stdout.write(md);
|
||||
}
|
||||
@@ -661,6 +661,76 @@ async def handle_delete_voice(ws, payload: dict) -> None:
|
||||
logger.exception("handle_delete_voice Fehler")
|
||||
|
||||
|
||||
async def handle_export_voice(ws, payload: dict) -> None:
|
||||
"""Packt eine Stimme (.wav + .txt) als tar.gz und sendet sie base64 zurueck."""
|
||||
name = (payload.get("name") or "").strip()
|
||||
if not name:
|
||||
await _send(ws, "xtts_voice_exported", {"ok": False, "error": "name fehlt"})
|
||||
return
|
||||
try:
|
||||
wav, txt = voice_paths(name)
|
||||
if not wav.exists():
|
||||
await _send(ws, "xtts_voice_exported", {"ok": False, "name": name, "error": "Stimme nicht gefunden"})
|
||||
return
|
||||
import io, tarfile
|
||||
buf = io.BytesIO()
|
||||
with tarfile.open(fileobj=buf, mode="w:gz") as tar:
|
||||
tar.add(wav, arcname=wav.name)
|
||||
if txt.exists():
|
||||
tar.add(txt, arcname=txt.name)
|
||||
data = base64.b64encode(buf.getvalue()).decode("ascii")
|
||||
logger.info("Voice exportiert: %s (%d KB tar.gz)", name, len(buf.getvalue()) // 1024)
|
||||
await _send(ws, "xtts_voice_exported", {"ok": True, "name": name, "data": data})
|
||||
except Exception as e:
|
||||
logger.exception("handle_export_voice Fehler")
|
||||
await _send(ws, "xtts_voice_exported", {"ok": False, "name": name, "error": str(e)[:200]})
|
||||
|
||||
|
||||
async def handle_import_voice(ws, payload: dict) -> None:
|
||||
"""Empfaengt eine tar.gz mit <name>.wav (+ optional <name>.txt) und legt
|
||||
sie in VOICES_DIR ab. Ueberschreibt bestehende Stimme gleichen Namens."""
|
||||
name = (payload.get("name") or "").strip()
|
||||
data_b64 = payload.get("data") or ""
|
||||
if not name or not data_b64:
|
||||
await _send(ws, "xtts_voice_imported", {"ok": False, "error": "name/data fehlt"})
|
||||
return
|
||||
try:
|
||||
import io, tarfile
|
||||
VOICES_DIR.mkdir(parents=True, exist_ok=True)
|
||||
safe = sanitize_voice_name(name)
|
||||
data = base64.b64decode(data_b64)
|
||||
extracted_wav = False
|
||||
with tarfile.open(fileobj=io.BytesIO(data), mode="r:gz") as tar:
|
||||
for member in tar.getmembers():
|
||||
if not member.isfile():
|
||||
continue
|
||||
base = Path(member.name).name # Path-Traversal verhindern
|
||||
if base.lower().endswith(".wav"):
|
||||
target = VOICES_DIR / f"{safe}.wav"
|
||||
f = tar.extractfile(member)
|
||||
if f is None:
|
||||
continue
|
||||
with open(target, "wb") as out:
|
||||
out.write(f.read())
|
||||
extracted_wav = True
|
||||
elif base.lower().endswith(".txt"):
|
||||
target = VOICES_DIR / f"{safe}.txt"
|
||||
f = tar.extractfile(member)
|
||||
if f is None:
|
||||
continue
|
||||
with open(target, "wb") as out:
|
||||
out.write(f.read())
|
||||
if not extracted_wav:
|
||||
await _send(ws, "xtts_voice_imported", {"ok": False, "name": name, "error": "Kein .wav im Archiv"})
|
||||
return
|
||||
logger.info("Voice importiert: %s", name)
|
||||
await _send(ws, "xtts_voice_imported", {"ok": True, "name": name})
|
||||
await handle_list_voices(ws)
|
||||
except Exception as e:
|
||||
logger.exception("handle_import_voice Fehler")
|
||||
await _send(ws, "xtts_voice_imported", {"ok": False, "name": name, "error": str(e)[:200]})
|
||||
|
||||
|
||||
# Letzte diagnostisch-gesetzte Voice (verhindert Endlos-Preload bei jedem config)
|
||||
_last_diag_voice = ""
|
||||
|
||||
@@ -781,6 +851,10 @@ async def run_loop(runner: F5Runner) -> None:
|
||||
asyncio.create_task(handle_list_voices(ws))
|
||||
elif mtype == "xtts_delete_voice":
|
||||
asyncio.create_task(handle_delete_voice(ws, payload))
|
||||
elif mtype == "xtts_export_voice":
|
||||
asyncio.create_task(handle_export_voice(ws, payload))
|
||||
elif mtype == "xtts_import_voice":
|
||||
asyncio.create_task(handle_import_voice(ws, payload))
|
||||
elif mtype == "voice_preload":
|
||||
asyncio.create_task(handle_voice_preload(ws, payload, runner))
|
||||
elif mtype == "stt_response":
|
||||
|
||||
Reference in New Issue
Block a user