Compare commits
177 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| c3fefc60c0 | |||
| 7107ce4fdd | |||
| fa47068d6d | |||
| 07c761fc72 | |||
| 6821eaaa38 | |||
| 31aa86a2a9 | |||
| 87cb687610 | |||
| eb4059a887 | |||
| 415706036b | |||
| e2dd47255e | |||
| 3497aa23f8 | |||
| 8491fb2af7 | |||
| f61864282e | |||
| b2f7d6dda2 | |||
| eeedcc4781 | |||
| 5cf8cab5bd | |||
| 3ae9e19524 | |||
| 0ec4b00879 | |||
| b6b4b1b4d9 | |||
| 950a9d009c | |||
| 693542ef19 | |||
| d12f356ebe | |||
| 7b55d6a91f | |||
| aa077f60e6 | |||
| 094bd6e4f1 | |||
| 5b0b5eeac6 | |||
| 96a1f90ac3 | |||
| bfa06d78a7 | |||
| d16dcd34cc | |||
| dc2f4eb6d2 | |||
| 0f9a029269 | |||
| 70d1500096 | |||
| d0cb7acd10 | |||
| 0b58feee1e | |||
| 8be34e7284 | |||
| b56cef6298 | |||
| 0d203af8fb | |||
| 0468d0e603 | |||
| 7cfc2ba058 | |||
| da795d14f5 | |||
| d60c7e9110 | |||
| 83c99a5e65 | |||
| e438bb11ff | |||
| 8b4f75bf91 | |||
| d7e7386954 | |||
| 2100c64b91 | |||
| 74ebf59c6f | |||
| 53b49eacad | |||
| 0f11d23c75 | |||
| 311030bdaa | |||
| 1e05c66baa | |||
| 4082a6bf2a | |||
| 3485642b3e | |||
| 1240ae3829 | |||
| 2dd4d38dce | |||
| 7f862ce1f4 | |||
| 528fe97b59 | |||
| 3483d1bfce | |||
| 158423c155 | |||
| 087e91dca1 | |||
| 2de4cbc00f | |||
| 03fc465057 | |||
| b696b47feb | |||
| 6aae565541 | |||
| 214bd218a0 | |||
| 2afeee29ee | |||
| c8dee4c416 | |||
| f49f3c3b08 | |||
| c4bbb06710 | |||
| 4411cc4fff | |||
| 24a91887ef | |||
| 4e62b2919f | |||
| fa774156fe | |||
| 3b19f05c5b | |||
| fc3ecaacca | |||
| 08857093b5 | |||
| 62018b3e51 | |||
| 89e3a195a3 | |||
| f023ba0ac5 | |||
| a0570ef8f7 | |||
| facde1fef7 | |||
| 38106a2096 | |||
| a476afb311 | |||
| db4c7b9b72 | |||
| 3bc490b485 | |||
| dd6d70c46e | |||
| b1eaf42fef | |||
| fb9e5dcd10 | |||
| f95e71463f | |||
| 1088bff43d | |||
| cad68db2a2 | |||
| 50b10c8ac0 | |||
| a8b586ec92 | |||
| 632e1e4fa1 | |||
| 7e12816ebd | |||
| 8f64f8fb30 | |||
| b3ff3991c4 | |||
| a4ea387c98 | |||
| 68fbf74a23 | |||
| b857f778e9 | |||
| 31aa82b68c | |||
| de8eeb69e2 | |||
| f5970ce700 | |||
| ef1a4436ca | |||
| 981779cd9e | |||
| 3dcd2ae0b4 | |||
| 2750b867a3 | |||
| f6424add6c | |||
| 2dfd21d1d0 | |||
| 9d9ddc730b | |||
| 77ccee8331 | |||
| 175dcdf225 | |||
| 1549e9cd4f | |||
| 910e74b497 | |||
| 160c5c34b6 | |||
| a6638c0108 | |||
| 43c21d3ddc | |||
| b73c6c346e | |||
| b91ddc5bdf | |||
| 7d08c06720 | |||
| f066a2a555 | |||
| b55b0e7c42 | |||
| 70f806ef80 | |||
| 0773d9496d | |||
| 1a4857ed62 | |||
| 962d814318 | |||
| 9276a92c83 | |||
| d16896c4b4 | |||
| 20050d4077 | |||
| 79760d1b2e | |||
| 13f1103604 | |||
| 73b7a76ea8 | |||
| 17f3d8870e | |||
| 4feaacc7e4 | |||
| af7b2674f3 | |||
| 97442198ec | |||
| e3e841f2ab | |||
| 33185de42b | |||
| dbe547d4ea | |||
| 1a982c0d45 | |||
| dfba5ceb1f | |||
| 1a6f633836 | |||
| 7f7db100af | |||
| d646e9d58e | |||
| bef59ba134 | |||
| dbebfd44ff | |||
| 4d0b9e0d78 | |||
| 0c43a18402 | |||
| 5bdcc3c65b | |||
| 52795530f9 | |||
| 2eb0b4df90 | |||
| 0c18090351 | |||
| d6b54d3247 | |||
| ead28cf09a | |||
| f682aad4ff | |||
| e0c1a4bcd5 | |||
| a648dad96d | |||
| da5579038e | |||
| 4ba48940b9 | |||
| 568ef9ed10 | |||
| 7682a0ce58 | |||
| 3ca834e633 | |||
| 55ef207454 | |||
| 6651f5937d | |||
| e9e7dd804f | |||
| ec9530f17f | |||
| 97cb7be313 | |||
| 77e927ffcd | |||
| a9a87f12df | |||
| 2a56ac0290 | |||
| edc65ce645 | |||
| d7efaf93b3 | |||
| 31ff20c846 | |||
| 406f4cb3cc | |||
| fa0667088a | |||
| f55329706e | |||
| 6c7fd1d0e3 |
+11
-8
@@ -9,15 +9,19 @@
|
|||||||
.env.*
|
.env.*
|
||||||
!.env.example
|
!.env.example
|
||||||
!.env.*.example
|
!.env.*.example
|
||||||
aria-data/config/*.env
|
|
||||||
!aria-data/config/*.env.example
|
|
||||||
!aria-data/config/openclaw.env
|
|
||||||
|
|
||||||
# ── ARIAs Gedächtnis (nur per tar gesichert) ────
|
# Privater User-Profile-Snippet (Tool-Stack, interne URLs) —
|
||||||
aria-data/brain/
|
# liegt jetzt in brain-import/ (frueher aria-data/config/USER.md).
|
||||||
|
# USER.md.example ist Repo-Inhalt, USER.md lokal selbst anlegen.
|
||||||
|
aria-data/brain-import/USER.md
|
||||||
|
|
||||||
# ── Stimmen (große Binärdateien) ─────────────────
|
# ── ARIAs Gedächtnis (Vector-DB, Skills, Models) ──
|
||||||
aria-data/voices/
|
# Backup via Diagnostic → Gehirn-Export (tar.gz), nicht via Git.
|
||||||
|
aria-data/brain/data/
|
||||||
|
aria-data/brain/qdrant/
|
||||||
|
|
||||||
|
# Diagnostic-State (aktive Session etc.)
|
||||||
|
aria-data/config/diag-state/
|
||||||
|
|
||||||
# ── Node / npm ──────────────────────────────────
|
# ── Node / npm ──────────────────────────────────
|
||||||
node_modules/
|
node_modules/
|
||||||
@@ -46,7 +50,6 @@ desktop/dist/
|
|||||||
__pycache__/
|
__pycache__/
|
||||||
*.pyc
|
*.pyc
|
||||||
*.pyo
|
*.pyo
|
||||||
bridge/__pycache__/
|
|
||||||
|
|
||||||
# ── macOS ────────────────────────────────────────
|
# ── macOS ────────────────────────────────────────
|
||||||
.DS_Store
|
.DS_Store
|
||||||
|
|||||||
Binary file not shown.
@@ -57,38 +57,44 @@ ARIA hat zwei Rollen:
|
|||||||
│ ┌─────────────────────────────────────────────────┐ │
|
│ ┌─────────────────────────────────────────────────┐ │
|
||||||
│ │ [proxy] claude-max-api-proxy Container │ │
|
│ │ [proxy] claude-max-api-proxy Container │ │
|
||||||
│ │ Claude Max Sub → lokale API │ │
|
│ │ Claude Max Sub → lokale API │ │
|
||||||
│ │ Port 3456, mit sed-Patches fuer │ │
|
|
||||||
│ │ Tool-Permissions + Host-Binding │ │
|
|
||||||
│ │ │ │
|
│ │ │ │
|
||||||
│ │ [aria] OpenClaw Container (aria-core) │ │
|
│ │ [qdrant] Vector-DB fuer ARIAs Gedaechtnis │ │
|
||||||
│ │ Gateway, Sessions, Memory, Skills │ │
|
│ │ Bind-Mount: aria-data/brain/qdrant/ │ │
|
||||||
│ │ Liest BOOTSTRAP.md + AGENT.md │ │
|
│ │ │ │
|
||||||
|
│ │ [brain] ARIA Agent + Memory Container │ │
|
||||||
|
│ │ FastAPI auf Port 8080 │ │
|
||||||
|
│ │ Eigener Agent-Loop, Skills, │ │
|
||||||
|
│ │ Vector-Memory, SSH-Zugriff zur VM │ │
|
||||||
|
│ │ Bind-Mount: aria-data/brain/data/ │ │
|
||||||
│ │ │ │
|
│ │ │ │
|
||||||
│ │ [bridge] ARIA Voice Bridge Container │ │
|
│ │ [bridge] ARIA Voice Bridge Container │ │
|
||||||
│ │ Wake-Word (lokales Mikro auf VM) │ │
|
│ │ Wake-Word, STT, TTS-Forwarding │ │
|
||||||
│ │ STT primaer remote (Gamebox-Whisper) │ │
|
│ │ Spricht mit Brain via HTTP/8080 │ │
|
||||||
│ │ Fallback: lokales faster-whisper (CPU) │ │
|
|
||||||
│ │ TTS via F5-TTS auf Gamebox │ │
|
|
||||||
│ │ Bruecke: App <> RVS <> Bridge <> ARIA │ │
|
|
||||||
│ │ │ │
|
│ │ │ │
|
||||||
│ │ [diagnostic] Selbstcheck-UI + Einstellungen │ │
|
│ │ [diagnostic] Selbstcheck-UI + Einstellungen │ │
|
||||||
│ │ Gateway + RVS + Proxy Status │ │
|
│ │ Port 3001 (im Netzwerk der Bridge) │ │
|
||||||
│ │ Chat, Sessions, Login, Logs │ │
|
│ │ Chat, Gehirn, Dateien, Logs │ │
|
||||||
│ └──────────────────┬──────────────────────────────┘ │
|
│ └──────────────────┬──────────────────────────────┘ │
|
||||||
│ │ Volume Mount │
|
│ │ Volume Mount │
|
||||||
│ ▼ │
|
│ ▼ │
|
||||||
│ ┌─────────────────────────────────────────────────┐ │
|
│ ┌─────────────────────────────────────────────────┐ │
|
||||||
│ │ ./aria-data/ — Ein tar = vollstaendiges Backup │ │
|
│ │ ./aria-data/ — Konfiguration + SSH-Keys │ │
|
||||||
|
│ │ ./aria-data/brain/ — Vector-DB + Skills (gitignored)│
|
||||||
|
│ │ Backup via Diagnostic → "Gehirn-Export" (tar.gz) │ │
|
||||||
│ └─────────────────────────────────────────────────┘ │
|
│ └─────────────────────────────────────────────────┘ │
|
||||||
└─────────────────────────────────────────────────────────┘
|
└─────────────────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
> OpenClaw (frueher `aria-core`) ist abgerissen — ARIA laeuft jetzt mit eigenem Agent-Framework im
|
||||||
|
> `aria-brain` Container. Eigene Tools, Skills, Vector-Memory statt Sessions. Letzter OpenClaw-Stand
|
||||||
|
> ist als Git-Tag `v0.1.2.0` archiviert.
|
||||||
|
|
||||||
**Vier separate Deployments:**
|
**Vier separate Deployments:**
|
||||||
|
|
||||||
| Was | Wo | Wie |
|
| Was | Wo | Wie |
|
||||||
|-----|----|-----|
|
|-----|----|-----|
|
||||||
| RVS | Rechenzentrum | `cd rvs && docker compose up -d` |
|
| RVS | Rechenzentrum | `cd rvs && docker compose up -d` |
|
||||||
| ARIA Core | Debian 13 VM | `docker compose up -d && ./aria-setup.sh` |
|
| ARIA Brain/Bridge/Diagnostic | Debian 13 VM | `./init.sh && ./aria-setup.sh && docker compose up -d` |
|
||||||
| Gamebox-Stack (F5-TTS + Whisper) | Gamebox (GPU) | `cd xtts && docker compose up -d` |
|
| Gamebox-Stack (F5-TTS + Whisper) | Gamebox (GPU) | `cd xtts && docker compose up -d` |
|
||||||
| Android App | Stefans Handy | APK installieren (Auto-Update via RVS) |
|
| Android App | Stefans Handy | APK installieren (Auto-Update via RVS) |
|
||||||
|
|
||||||
@@ -114,12 +120,12 @@ apt install -y docker.io docker-compose-plugin git curl jq
|
|||||||
git clone git@gitea.hackersoft.de:aria/aria.git ~/ARIA-AGENT
|
git clone git@gitea.hackersoft.de:aria/aria.git ~/ARIA-AGENT
|
||||||
cd ~/ARIA-AGENT
|
cd ~/ARIA-AGENT
|
||||||
cp .env.example .env
|
cp .env.example .env
|
||||||
|
bash init.sh # legt USER.md aus Vorlage an (idempotent, schadet nicht)
|
||||||
```
|
```
|
||||||
|
|
||||||
`.env` Datei editieren (Details siehe `.env.example`):
|
`.env` Datei editieren (Details siehe `.env.example`):
|
||||||
```bash
|
```bash
|
||||||
# Gateway-Auth: Alle Services die mit aria-core reden brauchen diesen Token
|
# Auth-Token: Alle ARIA-Services nutzen ihn fuer interne Auth
|
||||||
# Diagnostic, Bridge, App nutzen ihn fuer den WebSocket-Handshake
|
|
||||||
ARIA_AUTH_TOKEN= # openssl rand -hex 32
|
ARIA_AUTH_TOKEN= # openssl rand -hex 32
|
||||||
|
|
||||||
# RVS-Verbindung: Hostname + Port deines Rendezvous-Servers
|
# RVS-Verbindung: Hostname + Port deines Rendezvous-Servers
|
||||||
@@ -128,17 +134,18 @@ RVS_PORT=443
|
|||||||
RVS_TLS=true
|
RVS_TLS=true
|
||||||
RVS_TLS_FALLBACK=true
|
RVS_TLS_FALLBACK=true
|
||||||
|
|
||||||
# Pairing-Token: Verbindet App, Bridge, Diagnostic und XTTS im gleichen RVS-Room
|
# Pairing-Token: Verbindet App, Bridge, Diagnostic und Gamebox im gleichen RVS-Room
|
||||||
# MUSS auf allen Geraeten identisch sein (ARIA-VM, Gaming-PC, App)
|
# MUSS auf allen Geraeten identisch sein (ARIA-VM, Gaming-PC, App)
|
||||||
# Wird von generate-token.sh automatisch generiert und eingetragen
|
|
||||||
RVS_TOKEN= # ./generate-token.sh
|
RVS_TOKEN= # ./generate-token.sh
|
||||||
|
|
||||||
# Optional: SSH-Host des RVS-Servers fuer Auto-Update (z.B. root@aria-rvs)
|
|
||||||
RVS_UPDATE_HOST=
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Alle anderen Einstellungen (Stimmen, Modi, Wake-Word, F5-TTS-Tuning) leben in
|
||||||
|
`/shared/config/runtime.json` und werden ueber die Diagnostic-UI gepflegt — nicht
|
||||||
|
in der `.env`. Komplett-Reset jederzeit moeglich via "🗑 ALLES löschen" im
|
||||||
|
Diagnostic-Einstellungen-Tab.
|
||||||
|
|
||||||
**Zwei Tokens, zwei Zwecke:**
|
**Zwei Tokens, zwei Zwecke:**
|
||||||
- **ARIA_AUTH_TOKEN**: Authentifizierung am OpenClaw Gateway (aria-core). Wer diesen Token hat, kann ARIA Befehle geben.
|
- **ARIA_AUTH_TOKEN**: Interner Auth-Token zwischen ARIAs Containern.
|
||||||
- **RVS_TOKEN**: Pairing-Token fuer den Rendezvous-Server. Alle Geraete mit dem gleichen Token landen im gleichen "Room" und koennen kommunizieren. Die App bekommt diesen Token per QR-Code.
|
- **RVS_TOKEN**: Pairing-Token fuer den Rendezvous-Server. Alle Geraete mit dem gleichen Token landen im gleichen "Room" und koennen kommunizieren. Die App bekommt diesen Token per QR-Code.
|
||||||
|
|
||||||
### 2. Claude CLI einloggen (Proxy-Auth)
|
### 2. Claude CLI einloggen (Proxy-Auth)
|
||||||
@@ -156,48 +163,24 @@ claude login
|
|||||||
**Wichtig:** Der Ordner `~/.claude/` (nicht `~/.config/claude/`!) wird als Volume
|
**Wichtig:** Der Ordner `~/.claude/` (nicht `~/.config/claude/`!) wird als Volume
|
||||||
in den Proxy gemountet. Die Credentials ueberleben Container-Restarts.
|
in den Proxy gemountet. Die Credentials ueberleben Container-Restarts.
|
||||||
|
|
||||||
### 3. Voice Bridge konfigurieren
|
### 3. SSH-Key fuer aria-wohnung generieren + RVS-Token + Container
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cp aria-data/config/aria.env.example aria-data/config/aria.env
|
# SSH-Key fuer den Zugriff von ARIA auf die VM (aria-wohnung)
|
||||||
# Bei Bedarf anpassen (Whisper-Modell als Fallback, Sprache, Wake-Word)
|
./aria-setup.sh
|
||||||
```
|
|
||||||
|
|
||||||
STT laeuft primaer auf der Gamebox (faster-whisper auf GPU), TTS ausschliesslich
|
# RVS-Token generieren — schreibt RVS_TOKEN in .env, zeigt QR-Code
|
||||||
ueber F5-TTS auf der Gamebox — siehe Abschnitt "Gamebox-Stack — F5-TTS + Whisper"
|
|
||||||
weiter unten.
|
|
||||||
|
|
||||||
### 5. RVS-Token generieren & Container starten
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Token generieren — schreibt RVS_TOKEN in .env, zeigt QR-Code
|
|
||||||
./generate-token.sh
|
./generate-token.sh
|
||||||
|
|
||||||
# Alle Container starten
|
# Alle Container starten
|
||||||
docker compose up -d
|
docker compose up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
### 6. ARIA Setup ausfuehren (einmalig!)
|
`aria-setup.sh` generiert den ed25519-Key in `aria-data/ssh/` und traegt den
|
||||||
|
Public-Key in `/root/.ssh/authorized_keys` ein (Script laeuft als root auf der VM
|
||||||
|
aria-wohnung). Brain + Proxy nutzen den gleichen Key.
|
||||||
|
|
||||||
```bash
|
### 4. App verbinden
|
||||||
./aria-setup.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
Dieses Script ist **essentiell** — es macht:
|
|
||||||
1. Wartet bis aria-core laeuft
|
|
||||||
2. Fixt Volume-Permissions (Docker → node User)
|
|
||||||
3. Schreibt `openclaw.json` (Proxy-Provider, Model-Config, Timeout 900s)
|
|
||||||
4. Setzt exec-approvals Wildcard (Tool-Ausfuehrung im headless-Modus)
|
|
||||||
5. Generiert SSH-Key fuer VM-Zugriff (`aria-data/ssh/`)
|
|
||||||
6. Fixt SSH-Permissions im Container
|
|
||||||
7. Startet aria-core neu
|
|
||||||
|
|
||||||
**SSH-Key auf der VM eintragen** (wird vom Script angezeigt):
|
|
||||||
```bash
|
|
||||||
cat ~/ARIA-AGENT/aria-data/ssh/id_ed25519.pub >> /root/.ssh/authorized_keys
|
|
||||||
```
|
|
||||||
|
|
||||||
### 7. App verbinden
|
|
||||||
|
|
||||||
App oeffnen → QR-Code scannen → "ARIA, hoerst du mich?"
|
App oeffnen → QR-Code scannen → "ARIA, hoerst du mich?"
|
||||||
|
|
||||||
@@ -205,20 +188,21 @@ Der QR-Code enthaelt: Host, Port, Token, TLS-Flag — einmal scannen, nie wieder
|
|||||||
|
|
||||||
Bestehendes Token nochmal als QR anzeigen: `./generate-token.sh show`
|
Bestehendes Token nochmal als QR anzeigen: `./generate-token.sh show`
|
||||||
|
|
||||||
### 8. Diagnostic pruefen
|
### 5. Diagnostic pruefen
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Im Browser:
|
# Im Browser:
|
||||||
http://<VM-IP>:3001
|
http://<VM-IP>:3001
|
||||||
```
|
```
|
||||||
|
|
||||||
Die Diagnostic-UI zeigt:
|
Die Diagnostic-UI hat sechs Top-Tabs:
|
||||||
- Gateway-Verbindung (gruener Punkt = OK)
|
|
||||||
- RVS-Verbindung
|
- **Main** — Live-Chat-Test, Status (Brain / RVS / Proxy), End-to-End-Trace
|
||||||
- Proxy-Status + Claude Login
|
- **Gehirn** — Memory-Verwaltung (Vector-DB), Token/Call-Metrics (Subscription-Quota), Bootstrap & Migration, Komplett-Gehirn Export/Import
|
||||||
- Chat-Test (direkt an ARIA schreiben)
|
- **Skills** — Liste mit Logs, Run, Activate/Deactivate, Export/Import als tar.gz
|
||||||
- Session-Verwaltung
|
- **Trigger** — Timer + Watcher anlegen/anzeigen/loeschen, Live-Variablen-Anzeige (disk_free, current_lat, hour_of_day, …), near(lat, lon, m) als Condition-Funktion
|
||||||
- Container-Logs
|
- **Dateien** — alle Dateien aus `/shared/uploads/` mit Multi-Select, Bulk-Download (ZIP) + Bulk-Delete
|
||||||
|
- **Einstellungen** — Reparatur (Container-Restart), Wipe, Sprachausgabe, Whisper, Sprachmodell, Runtime-Config, App-Onboarding (QR), Komplett-Reset
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -226,7 +210,7 @@ Die Diagnostic-UI zeigt:
|
|||||||
|
|
||||||
Der Proxy ist das Herzsttueck: Er macht aus der Claude Max Subscription eine lokale API.
|
Der Proxy ist das Herzsttueck: Er macht aus der Claude Max Subscription eine lokale API.
|
||||||
|
|
||||||
**Ablauf:** `OpenClaw (aria-core) → HTTP → claude-max-api-proxy → Claude Code CLI (--print) → Anthropic API`
|
**Ablauf:** `aria-brain → HTTP → claude-max-api-proxy → Claude Code CLI (--print) → Anthropic API`
|
||||||
|
|
||||||
Der Proxy-Container (`node:22-alpine`) installiert bei jedem Start:
|
Der Proxy-Container (`node:22-alpine`) installiert bei jedem Start:
|
||||||
- `@anthropic-ai/claude-code` — Claude Code CLI
|
- `@anthropic-ai/claude-code` — Claude Code CLI
|
||||||
@@ -247,52 +231,34 @@ Danach werden per `sed` vier Patches angewendet:
|
|||||||
|
|
||||||
## Konfigurationsdateien
|
## Konfigurationsdateien
|
||||||
|
|
||||||
### aria-data/config/
|
### aria-data/
|
||||||
|
|
||||||
| Datei | Zweck | Gemountet als |
|
| Pfad | Zweck |
|
||||||
|-------|-------|---------------|
|
|------|-------|
|
||||||
| `BOOTSTRAP.md` | ARIAs System-Prompt: Identitaet, Sicherheitsregeln, Tool-Freigaben, Infrastruktur | `BOOTSTRAP.md` + `CLAUDE.md` im Workspace |
|
| `.env` | Tokens (ARIA_AUTH_TOKEN, RVS_TOKEN, RVS_HOST) — minimal, alles andere lebt in der DB |
|
||||||
| `AGENT.md` | ARIAs Persoenlichkeit, Tool-Freigaben, Arbeitsprinzipien | `AGENT.md` im Workspace |
|
| `aria-data/ssh/` | SSH-Key fuer den Zugriff auf aria-wohnung (Brain + Proxy teilen den Key) |
|
||||||
| `USER.md` | Stefans Praeferenzen, Kommunikationsstil | `USER.md` im Workspace |
|
| `aria-data/brain/qdrant/` | Vector-DB-Storage (Bind-Mount, gitignored) |
|
||||||
| `openclaw.env` | OpenClaw Container-Environment | `.env` im Workspace |
|
| `aria-data/brain/data/` | Skills, Embedding-Modell-Cache (Bind-Mount, gitignored) |
|
||||||
| `aria.env` | Voice Bridge Konfiguration (Whisper, Stimmen) | `/config/aria.env` in Bridge |
|
| `aria-data/brain-import/` | `AGENT.md`, `USER.md.example`, `TOOLING.md.example` — Quelle fuer den initialen Memory-Import in die Vector-DB |
|
||||||
|
| `aria-data/config/diag-state/` | Diagnostic State (z.B. zuletzt aktive Session) |
|
||||||
|
|
||||||
**BOOTSTRAP.md** ist die wichtigste Datei — sie definiert:
|
### /shared/config/ (im aria-shared Volume)
|
||||||
- Wer ARIA ist (Name, Rolle, Persoenlichkeit)
|
|
||||||
- Sicherheitsregeln (kein ClawHub, Prompt Injection abwehren)
|
|
||||||
- Tool-Freigaben (alle Claude Code Tools: WebFetch, Bash, etc.)
|
|
||||||
- SSH-Zugriff auf aria-wohnung (VM)
|
|
||||||
- Gedaechtnis-System
|
|
||||||
|
|
||||||
### openclaw.json (via aria-setup.sh)
|
| Datei | Zweck |
|
||||||
|
|-------|-------|
|
||||||
|
| `voice_config.json` | TTS-Engine, geclonte Stimme, Whisper-Modell, F5-TTS-Tuning |
|
||||||
|
| `runtime.json` | Token + RVS-Override + Whisper-Sprache (durch Diagnostic gepflegt) |
|
||||||
|
| `highlight_triggers.json` | Highlight-Trigger-Woerter |
|
||||||
|
| `chat_backup.jsonl` | Append-only Chat-Log (Quelle fuer die Chat-History in Diagnostic) |
|
||||||
|
|
||||||
Wird von `aria-setup.sh` in den Container geschrieben:
|
`voice_config.json` + `highlight_triggers.json` lassen sich via Diagnostic →
|
||||||
```json
|
"Sprachausgabe" als Bundle exportieren/importieren.
|
||||||
{
|
|
||||||
"agents": {
|
|
||||||
"defaults": {
|
|
||||||
"model": { "primary": "proxy/claude-sonnet-4" },
|
|
||||||
"timeoutSeconds": 900,
|
|
||||||
"maxConcurrent": 4
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"models": {
|
|
||||||
"providers": {
|
|
||||||
"proxy": {
|
|
||||||
"api": "openai-completions",
|
|
||||||
"baseUrl": "http://proxy:3456/v1",
|
|
||||||
"apiKey": "not-needed"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"tools": { "profile": "full" },
|
|
||||||
"messages": { "ackReactionScope": "all" }
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
**timeoutSeconds: 900** (15 Min) — notwendig weil jede Anfrage einen neuen
|
### Backup / Reset
|
||||||
`claude --print` Prozess spawnt (Cold Start). Bei Tool-Nutzung (WebFetch, Bash)
|
|
||||||
braucht ARIA mehrere API-Roundtrips.
|
- **Gehirn-Backup**: Diagnostic → Gehirn → "⬇ Export herunterladen" — komplettes Brain (Memories + Skills + Qdrant-DB) als `.tar.gz`
|
||||||
|
- **Stimmen-Backup**: pro Stimme ein `.tar.gz` (Diagnostic → Sprachausgabe → ⬇ pro Stimme); Import via Upload-Button
|
||||||
|
- **Komplett-Reset**: Diagnostic → Einstellungen → "🗑 ALLES löschen" — Memory + Stimmen + Settings weg; `.env` + SSH-Keys bleiben
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -303,14 +269,14 @@ auf der Gamebox.
|
|||||||
|
|
||||||
**Nachrichtenfluss:**
|
**Nachrichtenfluss:**
|
||||||
```
|
```
|
||||||
Text: App → RVS → Bridge → chat.send → aria-core
|
Text: App → RVS → Bridge → aria-brain (HTTP)
|
||||||
Audio: App → RVS → Bridge → stt_request (RVS) → whisper-bridge (Gamebox)
|
Audio: App → RVS → Bridge → stt_request (RVS) → whisper-bridge (Gamebox)
|
||||||
→ stt_response → Bridge → chat.send → aria-core
|
→ stt_response → Bridge → aria-brain
|
||||||
Fallback bei Timeout: lokales faster-whisper (CPU)
|
Fallback bei Timeout: lokales faster-whisper (CPU)
|
||||||
Datei: App → RVS → Bridge → /shared/uploads/ → chat.send (mit Pfad) → aria-core
|
Datei: App → RVS → Bridge → /shared/uploads/ → aria-brain (mit Pfad)
|
||||||
|
|
||||||
aria-core → Antwort → Gateway → Diagnostic → RVS → App
|
aria-brain → Antwort → Bridge → RVS → App
|
||||||
→ Bridge → xtts_request (RVS) → f5tts-bridge
|
→ xtts_request (RVS) → f5tts-bridge
|
||||||
→ audio_pcm Stream → RVS → App AudioTrack
|
→ audio_pcm Stream → RVS → App AudioTrack
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -342,34 +308,26 @@ aria-core → Antwort → Gateway → Diagnostic → RVS → App
|
|||||||
|
|
||||||
## Diagnostic — Selbstcheck-UI und Einstellungen
|
## Diagnostic — Selbstcheck-UI und Einstellungen
|
||||||
|
|
||||||
Erreichbar unter `http://<VM-IP>:3001`. Teilt das Netzwerk mit aria-core.
|
Erreichbar unter `http://<VM-IP>:3001`. Teilt das Netzwerk mit der Bridge.
|
||||||
|
|
||||||
### Features
|
### Tabs
|
||||||
|
|
||||||
- **Status-Karten**: Gateway (Handshake), RVS (TLS-Fallback), Proxy (Auth)
|
- **Main**: Brain/RVS/Proxy-Status, Chat-Test, "ARIA denkt..."-Indikator, End-to-End-Trace, Container-Logs
|
||||||
- **Disk-Voll Banner**: Rotes Overlay wenn die VM-Disk knapp wird, mit copy-baren Cleanup-Befehlen (safe + aggressiv)
|
- **Gehirn**: Memory-Browser (Vector-DB), Suche + Filter, Edit/Add/Delete, Konversation-Status mit Destillat-Trigger, **Token/Call-Metrics mit Subscription-Quota-Tracking**, Bootstrap & Migration (3 Wiederherstellungs-Wege), Gehirn-Export/Import (tar.gz). Info-Buttons (ℹ) ueberall mit Modal-Erklaerung.
|
||||||
- **Chat-Test**: Nachrichten direkt an ARIA senden (Gateway oder via RVS), Vollbild-Modus
|
- **Skills**: Liste aller Skills mit Logs pro Run, Activate/Deactivate, Export/Import als tar.gz, "von ARIA"-Badge fuer selbst gebaute
|
||||||
- **"ARIA denkt..." Indikator**: Zeigt live was ARIA gerade tut (Denken, Tool, Schreiben)
|
- **Trigger**: passive Aufweck-Quellen. **Timer** (einmalig, ISO-Timestamp) + **Watcher** (recurring, mit Condition + Throttle). Liste aktiver Trigger + Logs pro Feuer-Event. Modal mit Type-Dropdown, Live-Anzeige aller verfuegbaren Condition-Variablen (`disk_free_gb`, `hour_of_day`, `current_lat/lon`, `last_user_message_ago_sec`, …) und Condition-Funktionen (`near(lat, lon, m)` fuer GPS-Geofencing). Sicherer Condition-Parser via Python `ast` (Whitelist, kein `eval`).
|
||||||
- **Abbrechen-Button**: Stoppt laufende Anfragen + doctor --fix
|
- **Dateien**: Browser fuer `/shared/uploads/` mit Multi-Select + "Alle markieren" + Bulk-Download (ZIP bei 2+) + Bulk-Delete. Live-Update der Chat-Bubbles beim Delete.
|
||||||
- **Session-Verwaltung**: Sessions auflisten, wechseln, erstellen, loeschen, als Markdown exportieren (⬇ Button)
|
- **Einstellungen**: Reparatur (Container-Restart fuer Brain/Bridge/Qdrant), Komplett-Reset, Betriebsmodi, Sprachausgabe + Voice-Cloning + F5-TTS-Tuning + Voice Export/Import, Whisper, Sprachmodell (brainModel), Onboarding-QR, App-Cleanup
|
||||||
- **Chat-History**: Wird beim Laden und Session-Wechsel angezeigt (read-only aus JSONL)
|
|
||||||
- **TTS-Diagnose Tab**: Stimmen testen, Status pruefen, Fehler anzeigen
|
### Was zusaetzlich noch drin steckt
|
||||||
- **Einstellungen**: TTS aktiv-Toggle, F5-TTS-Voice (gecloned), Betriebsmodi, Whisper-Modell (tiny…large-v3, Hot-Reload auf der Gamebox)
|
|
||||||
- **Voice-Status**: Beim Wechsel der globalen Stimme zeigt ein Status-Text "Lade…" → "bereit (X.Ys)" — getriggert ueber `voice_preload`/`voice_ready`
|
- **Disk-Voll Banner** mit copy-baren Cleanup-Befehlen (safe + aggressiv)
|
||||||
- **Voice Cloning**: Audio-Samples hochladen, Referenz-Text wird automatisch via Whisper transkribiert
|
- **Token/Call-Metrics**: pro Claude-Call ein Eintrag in `/data/metrics.jsonl` mit ts + Token-Schaetzung. Gehirn-Tab zeigt 1h/5h/24h/30d-Aggregat plus Progress-Bar gegen Plan-Limit (Pro / Max 5x / Max 20x / Custom). Warn-Schwelle 80%, kritisch 90%.
|
||||||
|
- **Voice Cloning**: Audio-Samples hochladen, Whisper transkribiert den Ref-Text automatisch
|
||||||
|
- **Voice Export/Import**: einzelne Stimmen als `.tar.gz` zwischen Gameboxen mitnehmen
|
||||||
|
- **Settings Export/Import**: `voice_config.json` + `highlight_triggers.json` als JSON-Bundle
|
||||||
- **Claude Login**: Browser-Terminal zum Einloggen in den Proxy
|
- **Claude Login**: Browser-Terminal zum Einloggen in den Proxy
|
||||||
- **Core Terminal**: Shell in aria-core (openclaw CLI)
|
- **SSH Terminal**: direkter SSH-Zugang zu aria-wohnung
|
||||||
- **Container-Logs**: Echtzeit-Logs aller Container (gefiltert nach Tab + Pipeline)
|
|
||||||
- **SSH Terminal**: Direkter SSH-Zugang zu aria-wohnung
|
|
||||||
- **Watchdog**: Erkennt stuck Runs (2min Warnung → 5min doctor --fix → 8min Container-Restart)
|
|
||||||
|
|
||||||
### Session-Verwaltung
|
|
||||||
|
|
||||||
Die in der Diagnostic gewaehlte Session gilt **global** — Bridge und App nutzen
|
|
||||||
dieselbe Session. Die aktive Session wird unter `/data/active-session` persistiert
|
|
||||||
und ueberlebt Container-Restarts.
|
|
||||||
|
|
||||||
API-Endpoint fuer andere Services: `GET http://localhost:3001/api/session`
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -378,10 +336,13 @@ API-Endpoint fuer andere Services: `GET http://localhost:3001/api/session`
|
|||||||
### Features
|
### Features
|
||||||
|
|
||||||
- Text-Chat mit ARIA
|
- Text-Chat mit ARIA
|
||||||
- **Sprachaufnahme**: Push-to-Talk (halten) oder Tap-to-Talk (tippen, Auto-Stop bei Stille)
|
- **Sprachaufnahme**: Tap-to-Talk (tippen startet, tippen stoppt, Auto-Stop bei Stille via VAD)
|
||||||
- **Gespraechsmodus** (Ohr-Button): Nach jeder ARIA-Antwort startet automatisch die Aufnahme — wie ein natuerliches Gespraech hin und her
|
- **Gespraechsmodus** (Ohr-Button): Nach jeder ARIA-Antwort startet automatisch die Aufnahme — wie ein natuerliches Gespraech hin und her
|
||||||
- **Wake-Word** (on-device, openWakeWord ONNX): "Hey Jarvis", "Alexa", "Hey Mycroft", "Hey Rhasspy" — Mikrofon hoert passiv mit, Konversation startet beim Schluesselwort. Komplett on-device via ONNX Runtime, kein API-Key, kein Cloud-Roundtrip, Audio verlaesst das Geraet nicht.
|
- **Wake-Word** (on-device, openWakeWord ONNX): "Hey Jarvis", "Alexa", "Hey Mycroft", "Hey Rhasspy" — Mikrofon hoert passiv mit, Konversation startet beim Schluesselwort. Komplett on-device via ONNX Runtime, kein API-Key, kein Cloud-Roundtrip, Audio verlaesst das Geraet nicht.
|
||||||
- **VAD (Voice Activity Detection)**: Konfigurierbare Stille-Toleranz (1.0–8.0s, Default 2.8s) bevor Auto-Stop greift. Max-Aufnahme 120s.
|
- **VAD (Voice Activity Detection)**: Adaptive Schwelle (Baseline aus ersten 500ms Mic-Pegel + 6dB Offset). Konfigurierbare Stille-Toleranz (1.0–8.0s, Default 2.8s) bevor Auto-Stop greift. Max-Aufnahme einstellbar (1–30 min, Default 5 min)
|
||||||
|
- **Barge-In**: Wenn du waehrend ARIAs Antwort eine neue Sprach-/Text-Nachricht reinschickst, wird sie unterbrochen + bekommt den Hint "das ist eine Korrektur"
|
||||||
|
- **Wake-Word waehrend TTS**: Du kannst "Computer" sagen waehrend ARIA noch redet — AcousticEchoCanceler verhindert dass ARIAs eigene Stimme das Wake-Word triggert
|
||||||
|
- **Anruf-Pause + Auto-Resume**: TTS verstummt bei klassischem Anruf oder VoIP-Call (WhatsApp/Signal/Discord). Nach dem Auflegen geht ARIA von der **genauen Stelle** weiter wo sie unterbrochen wurde — die App misst die Position vom Wiedergabe-Anfang und nutzt den WAV-Cache der Antwort
|
||||||
- **Speech Gate**: Aufnahme wird verworfen wenn keine Sprache erkannt
|
- **Speech Gate**: Aufnahme wird verworfen wenn keine Sprache erkannt
|
||||||
- **STT (Speech-to-Text)**: 16kHz mono → Bridge → Gamebox-Whisper (CUDA) → Text im Chat. Fast in Echtzeit.
|
- **STT (Speech-to-Text)**: 16kHz mono → Bridge → Gamebox-Whisper (CUDA) → Text im Chat. Fast in Echtzeit.
|
||||||
- **"ARIA denkt..." Indicator**: Zeigt live den Status vom Core (Denken, Tool, Schreiben) + Abbrechen-Button
|
- **"ARIA denkt..." Indicator**: Zeigt live den Status vom Core (Denken, Tool, Schreiben) + Abbrechen-Button
|
||||||
@@ -394,10 +355,15 @@ API-Endpoint fuer andere Services: `GET http://localhost:3001/api/session`
|
|||||||
- **Mehrere Anhaenge**: Bilder + Dateien sammeln, Text hinzufuegen, dann zusammen senden
|
- **Mehrere Anhaenge**: Bilder + Dateien sammeln, Text hinzufuegen, dann zusammen senden
|
||||||
- **Paste-Support**: Bilder aus Zwischenablage einfuegen (Diagnostic)
|
- **Paste-Support**: Bilder aus Zwischenablage einfuegen (Diagnostic)
|
||||||
- **Anhaenge**: Bridge speichert in Shared Volume, ARIA kann darauf zugreifen, Re-Download ueber RVS
|
- **Anhaenge**: Bridge speichert in Shared Volume, ARIA kann darauf zugreifen, Re-Download ueber RVS
|
||||||
- **Einstellungen**: TTS-aktiv, F5-TTS-Voice, Pre-Roll-Buffer, Stille-Toleranz, Speicherort, Auto-Download, GPS
|
- **Einstellungen**: TTS-aktiv, F5-TTS-Voice, Pre-Roll-Buffer, Stille-Toleranz, Speicherort, Auto-Download, GPS, Verbose-Logging
|
||||||
- **Auto-Update**: Prueft beim Start + per Button auf neue Version, Download + Installation ueber RVS (FileProvider)
|
- **Auto-Update**: Prueft beim Start + per Button auf neue Version, Download + Installation ueber RVS (FileProvider)
|
||||||
- GPS-Position (optional)
|
- GPS-Position (optional, mit Runtime-Permission-Request) — wird in jeden Chat/Audio-Payload mitgegeben und ist in Diagnostic als Debug-Block einblendbar
|
||||||
|
- **GPS-Tracking (kontinuierlich)**: Toggle in Settings → Standort. Wenn aktiv, pushed die App alle ~15s bzw. ab 30m Bewegung ein `location_update` an die Bridge — Voraussetzung damit Watcher mit `near(lat, lon, m)` (z.B. Blitzer-Warner, Ankunft-Erinnerungen) ueberhaupt feuern koennen. ARIA selbst kann das Tracking via `request_location_tracking`-Tool an-/ausschalten und tut das automatisch wenn sie einen GPS-Watcher anlegt
|
||||||
- QR-Code Scanner fuer Token-Pairing
|
- QR-Code Scanner fuer Token-Pairing
|
||||||
|
- **ARIA-Dateien empfangen**: Wenn ARIA eine PDF/Bild/Markdown/ZIP fuer dich erstellt (Marker `[FILE: /shared/uploads/aria_*]` in der Antwort), erscheint sie als eigene Anhang-Bubble. Tippen → wird via RVS geladen + mit Android-Intent-Picker geoeffnet (PDF-Viewer, Bildbetrachter, Standard-App). Inline-Bilder aus Markdown-``-Syntax werden direkt unter dem Text gerendert (PNG/JPG via Image, SVG via react-native-svg)
|
||||||
|
- **Vollbild mit Pinch-Zoom**: Bilder im Vollbild-Modal sind pinch-zoombar (1x..5x), 1-Finger-Pan wenn gezoomt, Doppel-Tap toggelt 1x↔2.5x — alles ohne externe Lib
|
||||||
|
- **Container-Restart-Buttons** (Settings → Reparatur): aria-bridge / aria-brain / aria-qdrant gezielt neu starten (jeweils ~5s Downtime). Geht ueber RVS → Bridge → Diagnostic → Docker-Socket-API.
|
||||||
|
- **Cache-Cleanup**: Beim App-Start werden orphane TTS-WAVs aus dem Cache geraeumt. Plus Settings-Buttons "TTS-Cache leeren", "Update-Cache leeren", "Anhang-Cache leeren"
|
||||||
|
|
||||||
### Wake-Word (openWakeWord, on-device)
|
### Wake-Word (openWakeWord, on-device)
|
||||||
|
|
||||||
@@ -415,7 +381,7 @@ Community-Modelle stammen aus [fwartner/home-assistant-wakewords-collection](htt
|
|||||||
**Bedienung:**
|
**Bedienung:**
|
||||||
- App → **Einstellungen** → **Wake-Word** → gewuenschtes Keyword waehlen → **Speichern + Aktivieren**
|
- App → **Einstellungen** → **Wake-Word** → gewuenschtes Keyword waehlen → **Speichern + Aktivieren**
|
||||||
- **Ohr-Button (👂)** in der Statusleiste tippen → Wake-Word ist scharf, App hoert passiv mit
|
- **Ohr-Button (👂)** in der Statusleiste tippen → Wake-Word ist scharf, App hoert passiv mit
|
||||||
- Wake-Word sagen → Symbol wechselt auf 🎙️, Konversation laeuft
|
- Wake-Word sagen → Symbol wechselt auf 🎙️, **Bereit-Sound** (Ding-Dong, optional in Settings) + Toast "🎤 sprich jetzt" sobald das Mikro wirklich offen ist
|
||||||
- Nach jeder ARIA-Antwort oeffnet sich das Mikro nochmal — Stille → zurueck zu 👂
|
- Nach jeder ARIA-Antwort oeffnet sich das Mikro nochmal — Stille → zurueck zu 👂
|
||||||
- Erneut tippen → Ohr aus (🔇)
|
- Erneut tippen → Ohr aus (🔇)
|
||||||
|
|
||||||
@@ -505,18 +471,44 @@ Der Update-Flow:
|
|||||||
|
|
||||||
```
|
```
|
||||||
App (Mikrofon) → AAC/MP4 Aufnahme → Base64 → RVS → Bridge
|
App (Mikrofon) → AAC/MP4 Aufnahme → Base64 → RVS → Bridge
|
||||||
Bridge: FFmpeg (16kHz PCM) → Whisper STT → Text → aria-core
|
Bridge: FFmpeg (16kHz PCM) → Whisper STT → Text → aria-brain
|
||||||
Bridge: STT-Ergebnis → RVS → App (Placeholder wird durch transkribierten Text ersetzt)
|
Bridge: STT-Ergebnis → RVS → App (Placeholder wird durch transkribierten Text ersetzt)
|
||||||
aria-core → Antwort → Bridge → XTTS (Gaming-PC) → PCM-Stream → RVS → App
|
aria-brain → Antwort → Bridge → F5-TTS (Gaming-PC) → PCM-Stream → RVS → App
|
||||||
App: AudioTrack MODE_STREAM (nahtlos), Cache als WAV pro Message
|
App: AudioTrack MODE_STREAM (nahtlos), Cache als WAV pro Message
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Audio-Verhalten in der App
|
||||||
|
|
||||||
|
| Phase | Andere App (Spotify) | ARIA-Mikro |
|
||||||
|
|------------------------------|----------------------|-------------------------|
|
||||||
|
| Idle / Ohr aus | spielt frei | aus |
|
||||||
|
| Wake-Word lauscht (armed) | spielt frei | passiv (openWakeWord) |
|
||||||
|
| User-Aufnahme laeuft | pausiert (EXCLUSIVE) | Recording |
|
||||||
|
| Aufnahme zu Ende | resumed | aus |
|
||||||
|
| ARIA denkt/schreibt (~20s) | spielt frei | aus |
|
||||||
|
| TTS startet | pausiert (DUCK) | aus (oder barge) |
|
||||||
|
| TTS spielt (auch GPU-Pausen) | bleibt pausiert | barge wenn Wake-Word |
|
||||||
|
| TTS zu Ende | nach 800ms resumed | (Conversation-Window) |
|
||||||
|
| Eingehender Anruf (auch VoIP)| — | Mikro pausiert |
|
||||||
|
| Anruf vorbei (Auto-Resume) | pausiert wieder | aus |
|
||||||
|
| Neue Frage waehrend Anruf | — | (Resume verworfen) |
|
||||||
|
|
||||||
|
Mechanismen: Underrun-Schutz im PcmStreamPlayer (Stille-Fill in Render-
|
||||||
|
Pausen), Conversation-Focus bei Wake-Word, Foreground-Service mit
|
||||||
|
mediaPlayback|microphone, Anruf-Erkennung ueber TelephonyManager +
|
||||||
|
AudioFocus-Loss-Listener mit Polling-Fallback (VoIP). Bei Anruf wird
|
||||||
|
die Wiedergabe-Position gemerkt — nach dem Auflegen spielt ARIA ab
|
||||||
|
der genauen Stelle weiter (oder verwirft das wenn der User waehrend
|
||||||
|
des Telefonats per Text eine neue Frage gestellt hat). PcmPlayback-
|
||||||
|
Finished-Event vom Native sorgt dafuer dass Spotify erst pausiert
|
||||||
|
bleibt bis ARIA wirklich verstummt ist.
|
||||||
|
|
||||||
### Datei-Pipeline (Bilder & Anhaenge)
|
### Datei-Pipeline (Bilder & Anhaenge)
|
||||||
|
|
||||||
```
|
```
|
||||||
App (Kamera/Dateimanager) → Base64 → RVS → Bridge
|
App (Kamera/Dateimanager) → Base64 → RVS → Bridge
|
||||||
Bridge: Speichert in /shared/uploads/ (Shared Volume, fuer aria-core sichtbar)
|
Bridge: Speichert in /shared/uploads/ (Shared Volume, fuer aria-brain sichtbar)
|
||||||
Bridge: chat.send → "Stefan hat ein Bild geschickt: foto.jpg — liegt unter /shared/uploads/..."
|
Bridge: aria-brain → "Stefan hat ein Bild geschickt: foto.jpg — liegt unter /shared/uploads/..."
|
||||||
ARIA: Kann Datei per Bash/Read-Tool oeffnen und analysieren
|
ARIA: Kann Datei per Bash/Read-Tool oeffnen und analysieren
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -546,35 +538,34 @@ ist in den App-Einstellungen konfigurierbar.
|
|||||||
|
|
||||||
## Datenverzeichnis — aria-data/
|
## Datenverzeichnis — aria-data/
|
||||||
|
|
||||||
Alles was ARIA weiss, kann und ist — liegt hier. Ein `tar` = vollstaendiges Backup.
|
|
||||||
|
|
||||||
```
|
```
|
||||||
aria-data/
|
aria-data/
|
||||||
├── brain/ ← ARIAs Gedaechtnis (OpenClaw Memory)
|
├── brain/ ← ARIAs Gehirn — Bind-Mount, GITIGNORED
|
||||||
│ ├── MEMORY.md ← Langzeitgedaechtnis
|
│ ├── qdrant/ ← Vector-DB Storage (Memories, Skills-Embeddings)
|
||||||
│ └── memory/ ← Tageslogbuecher
|
│ └── data/ ← Skills, Embedding-Modell-Cache
|
||||||
|
│ └── skills/<name>/ ← Pro Skill ein Ordner mit Manifest, Code, venv
|
||||||
│
|
│
|
||||||
├── skills/ ← ARIAs Faehigkeiten (selbst geschrieben!)
|
├── brain-import/ ← Quell-Dateien fuer den initialen Import in die DB
|
||||||
|
│ ├── AGENT.md ← Persoenlichkeit (wird Memory-Punkte vom Typ identity/rule)
|
||||||
|
│ ├── BOOTSTRAP.md
|
||||||
|
│ ├── TOOLING.md.example
|
||||||
|
│ └── USER.md.example
|
||||||
│
|
│
|
||||||
├── config/
|
├── config/
|
||||||
│ ├── BOOTSTRAP.md ← System-Prompt (Identitaet, Regeln, Tools)
|
|
||||||
│ ├── AGENT.md ← Persoenlichkeit & Arbeitsprinzipien
|
|
||||||
│ ├── USER.md ← Stefans Praeferenzen
|
|
||||||
│ ├── openclaw.env ← OpenClaw Environment
|
|
||||||
│ ├── aria.env ← Voice Bridge Config
|
|
||||||
│ └── diag-state/ ← Diagnostic persistenter State
|
│ └── diag-state/ ← Diagnostic persistenter State
|
||||||
│
|
│
|
||||||
│ (im Shared Volume /shared/config/):
|
└── ssh/ ← SSH Keys (Brain + Proxy teilen sich)
|
||||||
│ ├── voice_config.json ← TTS-Einstellungen (Stimme, Speed, Engine)
|
├── id_ed25519
|
||||||
│ ├── highlight_triggers.json ← Highlight-Trigger Woerter
|
├── id_ed25519.pub
|
||||||
│ └── chat_backup.jsonl ← Nachrichten-Backup (on-the-fly)
|
└── config ← Host aria-wohnung
|
||||||
│
|
|
||||||
└── ssh/ ← SSH Keys fuer VM-Zugriff
|
|
||||||
├── id_ed25519 ← Private Key (generiert von aria-setup.sh)
|
|
||||||
├── id_ed25519.pub ← Public Key (muss in VM authorized_keys!)
|
|
||||||
└── config ← SSH Config (Host aria-wohnung)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
`aria-data/brain/` (Vector-DB + Skills) ist gitignored — Backup laeuft ueber
|
||||||
|
den Gehirn-Export-Button in der Diagnostic, nicht ueber Git.
|
||||||
|
|
||||||
|
Settings im Shared Volume (`/shared/config/`): `voice_config.json`,
|
||||||
|
`highlight_triggers.json`, `runtime.json`, `chat_backup.jsonl`.
|
||||||
|
|
||||||
**Backup:**
|
**Backup:**
|
||||||
```bash
|
```bash
|
||||||
tar -czf aria-backup-$(date +%Y%m%d).tar.gz aria-data/
|
tar -czf aria-backup-$(date +%Y%m%d).tar.gz aria-data/
|
||||||
@@ -721,16 +712,15 @@ dem Cache wiederverwendet.
|
|||||||
|
|
||||||
## Docker Volumes
|
## Docker Volumes
|
||||||
|
|
||||||
| Volume | Pfad im Container | Zweck |
|
| Volume / Bind | Pfad im Container | Zweck |
|
||||||
|--------|-------------------|-------|
|
|---------------|-------------------|-------|
|
||||||
| `openclaw-config` | `/home/node/.openclaw` | OpenClaw Config, Sessions, Auth |
|
| `~/.claude` (bind) | `/root/.claude` (proxy) | Claude CLI Credentials |
|
||||||
| `claude-config` | `/home/node/.claude` | Claude Code Settings, Permissions |
|
| `./aria-data/ssh` (bind) | `/root/.ssh` (proxy, brain) | SSH-Keys fuer aria-wohnung |
|
||||||
| `~/.claude` (bind) | `/root/.claude` (Proxy) | Claude CLI Credentials |
|
| `./aria-data/brain/qdrant` (bind) | `/qdrant/storage` (qdrant) | Vector-DB Storage |
|
||||||
| `./aria-data/ssh` (bind) | `/root/.ssh`, `/home/node/.ssh` | SSH Keys |
|
| `./aria-data/brain/data` (bind) | `/data` (brain) | Skills + Embedding-Modell-Cache |
|
||||||
| `./aria-data/brain` (bind) | `/home/node/.openclaw/workspace/memory` | Gedaechtnis |
|
| `./aria-data/brain` (bind) | `/brain` (diagnostic) | Brain-Export/Import-Endpoints |
|
||||||
| `./aria-data/skills` (bind) | `/home/node/.openclaw/workspace/skills` | Skills |
|
| `aria-shared` | `/shared` (brain, bridge, proxy, diagnostic) | Datei-Austausch, Config, Uploads |
|
||||||
| `aria-shared` | `/shared` (Core + Bridge + Proxy + Diag) | Datei-Austausch, Config, Uploads |
|
| `./aria-data/config/diag-state` (bind) | `/data` (diagnostic) | Diagnostic persistenter State |
|
||||||
| `./aria-data/config/diag-state` (bind) | `/data` (Diagnostic) | Persistenter State (aktive Session) |
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -759,22 +749,21 @@ docker compose down
|
|||||||
|
|
||||||
# Einzelnen Container neu bauen
|
# Einzelnen Container neu bauen
|
||||||
docker compose up -d --build diagnostic
|
docker compose up -d --build diagnostic
|
||||||
docker compose up -d --build bridge
|
docker compose up -d --build bridge brain
|
||||||
|
|
||||||
# Logs
|
# Logs
|
||||||
docker compose logs -f # alle
|
docker compose logs -f # alle
|
||||||
docker compose logs -f aria # nur aria-core
|
docker compose logs -f brain # nur Agent + Memory
|
||||||
docker compose logs -f proxy # nur proxy
|
docker compose logs -f qdrant # nur Vector-DB
|
||||||
|
docker compose logs -f bridge # nur Voice-Bridge
|
||||||
|
docker compose logs -f proxy # nur Claude-Proxy
|
||||||
|
|
||||||
# Setup wiederholen (nach Config-Aenderungen)
|
# SSH-Test (Brain zu aria-wohnung)
|
||||||
./aria-setup.sh
|
docker exec aria-brain ssh aria-wohnung hostname
|
||||||
|
|
||||||
# SSH-Test
|
# Brain-API direkt testen
|
||||||
docker exec aria-core ssh aria-wohnung hostname
|
docker exec aria-brain curl localhost:8080/health
|
||||||
|
docker exec aria-brain curl localhost:8080/memory/stats
|
||||||
# Tool-Test
|
|
||||||
# Neue Session in Diagnostic anlegen, dann:
|
|
||||||
# "Wie wird das Wetter in Bremen?"
|
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
@@ -813,7 +802,7 @@ docker exec aria-core ssh aria-wohnung hostname
|
|||||||
- [x] SSH-Zugriff auf VM (aria-wohnung)
|
- [x] SSH-Zugriff auf VM (aria-wohnung)
|
||||||
- [x] Diagnostic Web-UI + Einstellungen
|
- [x] Diagnostic Web-UI + Einstellungen
|
||||||
- [x] Session-Verwaltung + Chat-History
|
- [x] Session-Verwaltung + Chat-History
|
||||||
- [x] Stimmen-Einstellungen (Ramona/Thorsten, Speed, Highlight-Trigger) — durch XTTS v2 Voice Cloning ersetzt
|
- [x] Stimmen-Einstellungen (frueher Piper Ramona/Thorsten, Highlight-Trigger) — durch XTTS, dann F5-TTS Voice Cloning ersetzt
|
||||||
- [x] Piper komplett entfernt — nur noch XTTS v2 als TTS (Gaming-PC)
|
- [x] Piper komplett entfernt — nur noch XTTS v2 als TTS (Gaming-PC)
|
||||||
- [x] Streaming TTS: PCM-Chunks direkt in AudioTrack, nahtlose Wiedergabe
|
- [x] Streaming TTS: PCM-Chunks direkt in AudioTrack, nahtlose Wiedergabe
|
||||||
- [x] TTS satzweise fuer lange Texte
|
- [x] TTS satzweise fuer lange Texte
|
||||||
@@ -840,18 +829,53 @@ docker exec aria-core ssh aria-wohnung hostname
|
|||||||
- [x] Whisper STT auf die Gamebox ausgelagert (CUDA float16, fast Echtzeit)
|
- [x] Whisper STT auf die Gamebox ausgelagert (CUDA float16, fast Echtzeit)
|
||||||
- [x] **F5-TTS ersetzt XTTS** — bessere Voice-Cloning-Qualitaet, Whisper-auto-transkribierter Referenz-Text
|
- [x] **F5-TTS ersetzt XTTS** — bessere Voice-Cloning-Qualitaet, Whisper-auto-transkribierter Referenz-Text
|
||||||
- [x] Audio-Pause statt Ducking (TRANSIENT statt MAY_DUCK) + release-Timing fix
|
- [x] Audio-Pause statt Ducking (TRANSIENT statt MAY_DUCK) + release-Timing fix
|
||||||
- [x] VAD-Stille-Toleranz und Max-Aufnahme einstellbar (1-8s, 120s)
|
- [x] VAD-Stille-Toleranz einstellbar (1-8s) + adaptive Mikro-Baseline + Max-Aufnahme einstellbar (1-30 min)
|
||||||
|
- [x] Barge-In: User kann ARIA waehrend Antwort unterbrechen, aria-core bekommt Kontext-Hint
|
||||||
|
- [x] Anruf-Pause + Auto-Resume: TTS verstummt bei Anruf, faehrt nach Auflegen ab der gemerkten Position fort (Date.now()-Tracking + WAV-Cache der Antwort)
|
||||||
|
- [x] PcmPlaybackFinished-Event: AudioFocus wird erst released wenn AudioTrack wirklich durch ist — kein Spotify-mid-TTS mehr
|
||||||
|
- [x] Edge-Case: neue Frage waehrend Telefonat verwirft pending Auto-Resume, neueste Antwort gewinnt
|
||||||
|
- [x] Settings-Sub-Screens: 8 Kategorien statt langer Liste
|
||||||
|
- [x] APK ABI-Split arm64-v8a: 35 MB statt 136 MB
|
||||||
|
- [x] Sprachnachrichten-Bubble: audioRequestId statt Substring-Match — keine vertauschten Bubbles mehr bei parallelen Aufnahmen
|
||||||
|
- [x] Bereit-Sound (Airplane Ding-Dong) wenn Mikro nach Wake-Word offen ist — akustische Bestaetigung, in Settings abschaltbar
|
||||||
|
- [x] Wake-Word parallel zu TTS mit AcousticEchoCanceler — "Computer" sagen waehrend ARIA spricht stoppt sie und oeffnet Mikro
|
||||||
|
- [x] GPS-Position mit Nachrichten mitsenden (Toggle in Settings) — ARIA nutzt sie nur bei standortbezogenen Fragen, im Chat sichtbar nur in ihrer Antwort
|
||||||
|
- [x] Sprachnachrichten ohne STT-Result werden nach Timeout automatisch entfernt (skaliert mit Aufnahmedauer)
|
||||||
|
- [x] Background Audio Service: TTS, Wake-Word-Lauschen + Aufnahme laufen auch bei minimierter App weiter (Foreground-Service mit mediaPlayback|microphone, dynamische Notification)
|
||||||
- [x] Disk-Voll Banner in Diagnostic mit copy-baren Cleanup-Befehlen
|
- [x] Disk-Voll Banner in Diagnostic mit copy-baren Cleanup-Befehlen
|
||||||
- [x] Wake-Word on-device via openWakeWord (ONNX Runtime, kein API-Key) + State-Icon
|
- [x] Wake-Word on-device via openWakeWord (ONNX Runtime, kein API-Key) + State-Icon
|
||||||
|
|
||||||
|
### Phase A — Refactor: OpenClaw raus, eigenes Brain rein
|
||||||
|
|
||||||
|
- [x] aria-brain Container-Skeleton (FastAPI, Qdrant, sentence-transformers)
|
||||||
|
- [x] aria-core (OpenClaw) komplett abgerissen — Tag `v0.1.2.0` als Archiv
|
||||||
|
- [x] Diagnostic: Gehirn-Tab (Memory Search/Filter, Add/Edit/Delete)
|
||||||
|
- [x] Diagnostic: Gehirn-Export/Import als tar.gz
|
||||||
|
- [x] Diagnostic: Datei-Manager (Liste, Suche, Download, Delete, Multi-Select + ZIP + Bulk-Delete)
|
||||||
|
- [x] Diagnostic: Komplett-Reset (Wipe All)
|
||||||
|
- [x] Diagnostic: Info-Buttons mit Modal-Erklaerungen (Status, Konversation, Memories, Bootstrap)
|
||||||
|
- [x] App: Datei-Manager als Modal in den Einstellungen (mit Multi-Select + ZIP-Download)
|
||||||
|
- [x] Voice Export/Import (einzelne Stimmen + F5/Whisper-Settings als Bundle)
|
||||||
|
|
||||||
|
### Phase B — Brain mit Memory + Loop + Skills
|
||||||
|
|
||||||
|
- [x] **Phase B Punkt 2:** Migration aus `aria-data/brain-import/` → atomare Memory-Punkte (Identity / Rule / Preference / Tool / Skill, idempotent ueber migration_key) + Bootstrap-Snapshot Export/Import (nur pinned)
|
||||||
|
- [x] **Phase B Punkt 3:** Brain Conversation-Loop (Single-Chat UI, Rolling Window 50 Turns, Schwelle 60 → automatisches Destillat, manueller Trigger)
|
||||||
|
- [x] **Phase B Punkt 4:** Skills-System (Python-only via local-venv, skill_create als Tool, dynamische run_<skill> Tools, Diagnostic Skills-Tab mit Logs/Toggle/Export/Import, skill_created Live-Notification in App+Diagnostic, harte Schwelle "pip → Skill")
|
||||||
|
- [x] **Phase B Punkt 5:** Triggers-System (passive Aufweck-Quellen — Timer + Watcher mit safe Condition-Parser, GPS-near(), Diagnostic Trigger-Tab, kontinuierliches GPS-Tracking in der App fuer Use-Cases wie Blitzer-Warner)
|
||||||
|
- [x] Sprachmodell-Setting wieder funktional (brainModel in runtime.json statt aria-core)
|
||||||
|
- [x] App-Chat-Sync: kompletter Server-Sync bei Reconnect (Server = Source of Truth) + chat_cleared Live-Update. Lokal-only Bubbles (Skill-Notifications, laufende Voice ohne STT) bleiben erhalten.
|
||||||
|
- [x] App: Chat-Suche mit Next/Prev Navigation statt Filter
|
||||||
|
- [x] Token/Call-Metrics + Subscription-Quota-Tracking (Pro / Max 5x / Max 20x / Custom)
|
||||||
|
- [x] Datei-Manager Multi-Select: Bulk-Download als ZIP + Bulk-Delete (Diagnostic + App)
|
||||||
|
|
||||||
### Phase 2 — ARIA wird produktiv
|
### Phase 2 — ARIA wird produktiv
|
||||||
|
|
||||||
- [ ] Skills bauen (Bildgenerierung, etc.)
|
- [ ] Erste Skills bauen lassen (yt-dlp, pdf-extract, etc. — durch normale Anfragen)
|
||||||
- [ ] Gitea-Integration
|
- [ ] Gitea-Integration
|
||||||
- [ ] VM einrichten (Desktop, Browser, Tools)
|
- [ ] VM einrichten (Desktop, Browser, Tools)
|
||||||
- [ ] Heartbeat (periodische Selbst-Checks)
|
- [ ] Heartbeat (periodische Selbst-Checks)
|
||||||
- [ ] Lokales LLM als Waechter (Triage vor Claude-Call)
|
- [ ] Lokales LLM als Waechter (Triage vor Claude-Call)
|
||||||
- [ ] Auto-Compacting / Memory-Verwaltung
|
|
||||||
|
|
||||||
### Phase 3 — Erweiterungen
|
### Phase 3 — Erweiterungen
|
||||||
|
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ import { createBottomTabNavigator } from '@react-navigation/bottom-tabs';
|
|||||||
import ChatScreen from './src/screens/ChatScreen';
|
import ChatScreen from './src/screens/ChatScreen';
|
||||||
import SettingsScreen from './src/screens/SettingsScreen';
|
import SettingsScreen from './src/screens/SettingsScreen';
|
||||||
import rvs from './src/services/rvs';
|
import rvs from './src/services/rvs';
|
||||||
|
import { initLogger } from './src/services/logger';
|
||||||
|
|
||||||
// --- Navigation ---
|
// --- Navigation ---
|
||||||
|
|
||||||
@@ -44,6 +45,10 @@ const TAB_ICONS: Record<string, { active: string; inactive: string }> = {
|
|||||||
const App: React.FC = () => {
|
const App: React.FC = () => {
|
||||||
// Beim Start: gespeicherte RVS-Konfiguration laden und verbinden
|
// Beim Start: gespeicherte RVS-Konfiguration laden und verbinden
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
|
// Verbose-Logging-Setting laden BEVOR andere Module loslegen.
|
||||||
|
// initLogger ist async aber blockt nichts — solange er noch laueft,
|
||||||
|
// loggen wir normal (Default an), danach respektiert console.log das Setting.
|
||||||
|
initLogger().catch(() => {});
|
||||||
const initConnection = async () => {
|
const initConnection = async () => {
|
||||||
const config = await rvs.loadConfig();
|
const config = await rvs.loadConfig();
|
||||||
if (config) {
|
if (config) {
|
||||||
|
|||||||
@@ -79,8 +79,8 @@ android {
|
|||||||
applicationId "com.ariacockpit"
|
applicationId "com.ariacockpit"
|
||||||
minSdkVersion rootProject.ext.minSdkVersion
|
minSdkVersion rootProject.ext.minSdkVersion
|
||||||
targetSdkVersion rootProject.ext.targetSdkVersion
|
targetSdkVersion rootProject.ext.targetSdkVersion
|
||||||
versionCode 702
|
versionCode 10206
|
||||||
versionName "0.0.7.2"
|
versionName "0.1.2.6"
|
||||||
// Fallback fuer Libraries mit Product Flavors
|
// Fallback fuer Libraries mit Product Flavors
|
||||||
missingDimensionStrategy 'react-native-camera', 'general'
|
missingDimensionStrategy 'react-native-camera', 'general'
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,6 +6,17 @@
|
|||||||
<uses-permission android:name="android.permission.REQUEST_INSTALL_PACKAGES" />
|
<uses-permission android:name="android.permission.REQUEST_INSTALL_PACKAGES" />
|
||||||
<!-- Anruf-State lesen damit TTS bei klingelndem Telefon pausiert -->
|
<!-- Anruf-State lesen damit TTS bei klingelndem Telefon pausiert -->
|
||||||
<uses-permission android:name="android.permission.READ_PHONE_STATE" />
|
<uses-permission android:name="android.permission.READ_PHONE_STATE" />
|
||||||
|
<!-- Optional: GPS-Position der Frage anhaengen (nur wenn User in Settings aktiviert) -->
|
||||||
|
<uses-permission android:name="android.permission.ACCESS_COARSE_LOCATION" />
|
||||||
|
<uses-permission android:name="android.permission.ACCESS_FINE_LOCATION" />
|
||||||
|
<!-- Foreground-Service damit TTS auch bei minimierter App weiterlaeuft.
|
||||||
|
FOREGROUND_SERVICE_MICROPHONE ist Pflicht ab Android 14 wenn der
|
||||||
|
Service waehrend des Backgrounds aufs Mikro zugreift (Wake-Word,
|
||||||
|
Aufnahme im Gespraechsmodus). -->
|
||||||
|
<uses-permission android:name="android.permission.FOREGROUND_SERVICE" />
|
||||||
|
<uses-permission android:name="android.permission.FOREGROUND_SERVICE_MEDIA_PLAYBACK" />
|
||||||
|
<uses-permission android:name="android.permission.FOREGROUND_SERVICE_MICROPHONE" />
|
||||||
|
<uses-permission android:name="android.permission.POST_NOTIFICATIONS" />
|
||||||
|
|
||||||
<application
|
<application
|
||||||
android:name=".MainApplication"
|
android:name=".MainApplication"
|
||||||
@@ -37,5 +48,10 @@
|
|||||||
android:name="android.support.FILE_PROVIDER_PATHS"
|
android:name="android.support.FILE_PROVIDER_PATHS"
|
||||||
android:resource="@xml/file_paths" />
|
android:resource="@xml/file_paths" />
|
||||||
</provider>
|
</provider>
|
||||||
|
|
||||||
|
<service
|
||||||
|
android:name=".AriaPlaybackService"
|
||||||
|
android:exported="false"
|
||||||
|
android:foregroundServiceType="mediaPlayback|microphone" />
|
||||||
</application>
|
</application>
|
||||||
</manifest>
|
</manifest>
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ import com.facebook.react.uimanager.ViewManager
|
|||||||
|
|
||||||
class ApkInstallerPackage : ReactPackage {
|
class ApkInstallerPackage : ReactPackage {
|
||||||
override fun createNativeModules(reactContext: ReactApplicationContext): List<NativeModule> {
|
override fun createNativeModules(reactContext: ReactApplicationContext): List<NativeModule> {
|
||||||
return listOf(ApkInstallerModule(reactContext))
|
return listOf(ApkInstallerModule(reactContext), FileOpenerModule(reactContext))
|
||||||
}
|
}
|
||||||
|
|
||||||
override fun createViewManagers(reactContext: ReactApplicationContext): List<ViewManager<*, *>> {
|
override fun createViewManagers(reactContext: ReactApplicationContext): List<ViewManager<*, *>> {
|
||||||
|
|||||||
@@ -0,0 +1,108 @@
|
|||||||
|
package com.ariacockpit
|
||||||
|
|
||||||
|
import android.app.Notification
|
||||||
|
import android.app.NotificationChannel
|
||||||
|
import android.app.NotificationManager
|
||||||
|
import android.app.PendingIntent
|
||||||
|
import android.app.Service
|
||||||
|
import android.content.Intent
|
||||||
|
import android.os.Build
|
||||||
|
import android.os.IBinder
|
||||||
|
import android.util.Log
|
||||||
|
import androidx.core.app.NotificationCompat
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Foreground-Service der den App-Prozess waehrend TTS-Wiedergabe am Leben
|
||||||
|
* haelt — Android killt sonst den Prozess sobald die App im Hintergrund ist
|
||||||
|
* und ARIA verstummt mitten im Satz.
|
||||||
|
*
|
||||||
|
* Notification ist persistent (ongoing) waehrend der Service laeuft.
|
||||||
|
* Tap auf die Notification bringt MainActivity zurueck nach vorne.
|
||||||
|
*
|
||||||
|
* foregroundServiceType="mediaPlayback" ist Pflicht ab Android 14, sonst
|
||||||
|
* wirft startForeground() eine SecurityException.
|
||||||
|
*/
|
||||||
|
class AriaPlaybackService : Service() {
|
||||||
|
companion object {
|
||||||
|
private const val TAG = "AriaPlaybackService"
|
||||||
|
private const val CHANNEL_ID = "aria_playback"
|
||||||
|
private const val NOTIFICATION_ID = 1042
|
||||||
|
const val EXTRA_REASON = "reason" // "tts" | "wake" | "rec" | ""
|
||||||
|
}
|
||||||
|
|
||||||
|
private var currentReason: String = ""
|
||||||
|
|
||||||
|
override fun onCreate() {
|
||||||
|
super.onCreate()
|
||||||
|
ensureNotificationChannel()
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun onStartCommand(intent: Intent?, flags: Int, startId: Int): Int {
|
||||||
|
val reason = intent?.getStringExtra(EXTRA_REASON) ?: ""
|
||||||
|
currentReason = reason
|
||||||
|
Log.i(TAG, "Foreground-Service start/update (reason=$reason)")
|
||||||
|
try {
|
||||||
|
startForeground(NOTIFICATION_ID, buildNotification(reason))
|
||||||
|
} catch (e: Exception) {
|
||||||
|
Log.e(TAG, "startForeground fehlgeschlagen", e)
|
||||||
|
stopSelf()
|
||||||
|
}
|
||||||
|
// START_NOT_STICKY: wenn Android den Service killt, NICHT automatisch
|
||||||
|
// wieder starten — die App entscheidet wann der Service noetig ist.
|
||||||
|
return START_NOT_STICKY
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun onDestroy() {
|
||||||
|
Log.i(TAG, "Foreground-Service gestoppt")
|
||||||
|
super.onDestroy()
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun onBind(intent: Intent?): IBinder? = null
|
||||||
|
|
||||||
|
private fun ensureNotificationChannel() {
|
||||||
|
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
|
||||||
|
val nm = getSystemService(NotificationManager::class.java) ?: return
|
||||||
|
if (nm.getNotificationChannel(CHANNEL_ID) == null) {
|
||||||
|
val channel = NotificationChannel(
|
||||||
|
CHANNEL_ID,
|
||||||
|
"ARIA Audio-Wiedergabe",
|
||||||
|
NotificationManager.IMPORTANCE_LOW,
|
||||||
|
).apply {
|
||||||
|
description = "Notification waehrend ARIA spricht (haelt die App im Hintergrund am Leben)"
|
||||||
|
setShowBadge(false)
|
||||||
|
}
|
||||||
|
nm.createNotificationChannel(channel)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun buildNotification(reason: String): Notification {
|
||||||
|
val launchIntent = Intent(this, MainActivity::class.java).apply {
|
||||||
|
flags = Intent.FLAG_ACTIVITY_NEW_TASK or Intent.FLAG_ACTIVITY_CLEAR_TOP
|
||||||
|
}
|
||||||
|
val pendingFlags = if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M)
|
||||||
|
PendingIntent.FLAG_IMMUTABLE or PendingIntent.FLAG_UPDATE_CURRENT
|
||||||
|
else
|
||||||
|
PendingIntent.FLAG_UPDATE_CURRENT
|
||||||
|
val pendingIntent = PendingIntent.getActivity(this, 0, launchIntent, pendingFlags)
|
||||||
|
|
||||||
|
val (title, body) = when (reason) {
|
||||||
|
"tts" -> "ARIA spricht" to "Antwort wird abgespielt — antippen oeffnet die App"
|
||||||
|
"rec" -> "ARIA hoert zu" to "Sprachaufnahme laeuft — antippen oeffnet die App"
|
||||||
|
"wake" -> "ARIA bereit" to "Wake-Word lauscht passiv — antippen oeffnet die App"
|
||||||
|
else -> "ARIA aktiv" to "Hintergrund-Modus — antippen oeffnet die App"
|
||||||
|
}
|
||||||
|
|
||||||
|
return NotificationCompat.Builder(this, CHANNEL_ID)
|
||||||
|
.setContentTitle(title)
|
||||||
|
.setContentText(body)
|
||||||
|
.setSmallIcon(R.mipmap.ic_launcher)
|
||||||
|
.setContentIntent(pendingIntent)
|
||||||
|
.setOngoing(true)
|
||||||
|
.setShowWhen(false)
|
||||||
|
.setPriority(NotificationCompat.PRIORITY_LOW)
|
||||||
|
.setCategory(NotificationCompat.CATEGORY_SERVICE)
|
||||||
|
.setVisibility(NotificationCompat.VISIBILITY_PUBLIC)
|
||||||
|
.build()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -5,26 +5,71 @@ import android.media.AudioAttributes
|
|||||||
import android.media.AudioFocusRequest
|
import android.media.AudioFocusRequest
|
||||||
import android.media.AudioManager
|
import android.media.AudioManager
|
||||||
import android.os.Build
|
import android.os.Build
|
||||||
|
import android.util.Log
|
||||||
|
import com.facebook.react.bridge.Arguments
|
||||||
import com.facebook.react.bridge.Promise
|
import com.facebook.react.bridge.Promise
|
||||||
import com.facebook.react.bridge.ReactApplicationContext
|
import com.facebook.react.bridge.ReactApplicationContext
|
||||||
import com.facebook.react.bridge.ReactContextBaseJavaModule
|
import com.facebook.react.bridge.ReactContextBaseJavaModule
|
||||||
import com.facebook.react.bridge.ReactMethod
|
import com.facebook.react.bridge.ReactMethod
|
||||||
|
import com.facebook.react.modules.core.DeviceEventManagerModule
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Steuert Audio-Focus fuer Ducking/Muten anderer Apps.
|
* Steuert Audio-Focus fuer Ducking/Muten anderer Apps + emittiert Loss-Events
|
||||||
|
* an JS damit ARIA bei VoIP-Anrufen (WhatsApp/Signal/Discord/...) aufhoert
|
||||||
|
* zu sprechen — diese Anrufe gehen nicht ueber TelephonyManager, sondern
|
||||||
|
* requestn AudioFocus_GAIN_TRANSIENT_EXCLUSIVE was wir hier mitbekommen.
|
||||||
*
|
*
|
||||||
* - requestDuck() → andere Apps werden leiser (ARIA spricht TTS)
|
* - requestDuck() → andere Apps werden leiser (ARIA spricht TTS)
|
||||||
* - requestExclusive() → andere Apps werden pausiert (Mikrofon-Aufnahme)
|
* - requestExclusive() → andere Apps werden pausiert (Mikrofon-Aufnahme)
|
||||||
* - release() → Focus abgeben, andere Apps duerfen wieder
|
* - release() → Focus abgeben, andere Apps duerfen wieder
|
||||||
|
*
|
||||||
|
* Events:
|
||||||
|
* - "AudioFocusChanged" mit type:
|
||||||
|
* "loss" — endgueltiger Verlust (Anruf, andere App permanent)
|
||||||
|
* "loss_transient" — vorruebergehender Verlust (kurze Unterbrechung)
|
||||||
|
* "gain" — Fokus zurueck
|
||||||
*/
|
*/
|
||||||
class AudioFocusModule(reactContext: ReactApplicationContext) : ReactContextBaseJavaModule(reactContext) {
|
class AudioFocusModule(reactContext: ReactApplicationContext) : ReactContextBaseJavaModule(reactContext) {
|
||||||
override fun getName() = "AudioFocus"
|
override fun getName() = "AudioFocus"
|
||||||
|
|
||||||
|
companion object { private const val TAG = "AudioFocus" }
|
||||||
|
|
||||||
private var currentRequest: AudioFocusRequest? = null
|
private var currentRequest: AudioFocusRequest? = null
|
||||||
|
|
||||||
private fun audioManager(): AudioManager? =
|
private fun audioManager(): AudioManager? =
|
||||||
reactApplicationContext.getSystemService(Context.AUDIO_SERVICE) as? AudioManager
|
reactApplicationContext.getSystemService(Context.AUDIO_SERVICE) as? AudioManager
|
||||||
|
|
||||||
|
private fun emitFocusChange(type: String) {
|
||||||
|
try {
|
||||||
|
val params = Arguments.createMap().apply { putString("type", type) }
|
||||||
|
reactApplicationContext.getJSModule(DeviceEventManagerModule.RCTDeviceEventEmitter::class.java)
|
||||||
|
.emit("AudioFocusChanged", params)
|
||||||
|
} catch (e: Exception) {
|
||||||
|
Log.w(TAG, "emit failed: ${e.message}")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private val focusListener = AudioManager.OnAudioFocusChangeListener { focusChange ->
|
||||||
|
when (focusChange) {
|
||||||
|
AudioManager.AUDIOFOCUS_LOSS -> {
|
||||||
|
Log.i(TAG, "AUDIOFOCUS_LOSS (z.B. Anruf, anderer Player permanent)")
|
||||||
|
emitFocusChange("loss")
|
||||||
|
}
|
||||||
|
AudioManager.AUDIOFOCUS_LOSS_TRANSIENT -> {
|
||||||
|
Log.i(TAG, "AUDIOFOCUS_LOSS_TRANSIENT (kurze Unterbrechung)")
|
||||||
|
emitFocusChange("loss_transient")
|
||||||
|
}
|
||||||
|
AudioManager.AUDIOFOCUS_LOSS_TRANSIENT_CAN_DUCK -> {
|
||||||
|
// Notification-Sound o.ae. — wir ignorieren das, ARIA macht weiter
|
||||||
|
Log.d(TAG, "AUDIOFOCUS_LOSS_CAN_DUCK ignoriert")
|
||||||
|
}
|
||||||
|
AudioManager.AUDIOFOCUS_GAIN -> {
|
||||||
|
Log.i(TAG, "AUDIOFOCUS_GAIN")
|
||||||
|
emitFocusChange("gain")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private fun requestFocus(durationHint: Int, usage: Int, promise: Promise) {
|
private fun requestFocus(durationHint: Int, usage: Int, promise: Promise) {
|
||||||
val am = audioManager()
|
val am = audioManager()
|
||||||
if (am == null) {
|
if (am == null) {
|
||||||
@@ -41,13 +86,13 @@ class AudioFocusModule(reactContext: ReactApplicationContext) : ReactContextBase
|
|||||||
.build()
|
.build()
|
||||||
val req = AudioFocusRequest.Builder(durationHint)
|
val req = AudioFocusRequest.Builder(durationHint)
|
||||||
.setAudioAttributes(attrs)
|
.setAudioAttributes(attrs)
|
||||||
.setOnAudioFocusChangeListener { /* kein Callback noetig */ }
|
.setOnAudioFocusChangeListener(focusListener)
|
||||||
.build()
|
.build()
|
||||||
currentRequest = req
|
currentRequest = req
|
||||||
am.requestAudioFocus(req)
|
am.requestAudioFocus(req)
|
||||||
} else {
|
} else {
|
||||||
@Suppress("DEPRECATION")
|
@Suppress("DEPRECATION")
|
||||||
am.requestAudioFocus(null, AudioManager.STREAM_MUSIC, durationHint)
|
am.requestAudioFocus(focusListener, AudioManager.STREAM_MUSIC, durationHint)
|
||||||
}
|
}
|
||||||
|
|
||||||
promise.resolve(result == AudioManager.AUDIOFOCUS_REQUEST_GRANTED)
|
promise.resolve(result == AudioManager.AUDIOFOCUS_REQUEST_GRANTED)
|
||||||
@@ -86,14 +131,82 @@ class AudioFocusModule(reactContext: ReactApplicationContext) : ReactContextBase
|
|||||||
promise.resolve(true)
|
promise.resolve(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Den USAGE_MEDIA-Focus-Stack im System aufmischen, damit Spotify/YouTube
|
||||||
|
* resumen wenn ein anderer Player (z.B. react-native-sound) seinen Focus
|
||||||
|
* nicht ordnungsgemaess released hat. Strategie: kurz selbst USAGE_MEDIA
|
||||||
|
* GAIN beanspruchen — das System invalidiert dabei den haengenden Stack-
|
||||||
|
* Eintrag des anderen Players — und sofort wieder abandonen. Spotify
|
||||||
|
* bekommt den Focus-Gain und resumed.
|
||||||
|
*
|
||||||
|
* Workaround fuer das react-native-sound-Bug: Sound.stop()/release()
|
||||||
|
* laesst den AudioFocusRequest haengen.
|
||||||
|
*/
|
||||||
|
@ReactMethod
|
||||||
|
fun kickReleaseMedia(promise: Promise) {
|
||||||
|
val am = audioManager()
|
||||||
|
if (am == null) {
|
||||||
|
promise.resolve(false)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Async laufen lassen — wir wollen einen request, Pause, dann abandon.
|
||||||
|
// Ohne Pause merkt das System (und damit Spotify) die kurze Owner-
|
||||||
|
// Wechsel oft gar nicht. 250ms reicht erfahrungsgemaess fuer den
|
||||||
|
// Focus-Stack-Refresh.
|
||||||
|
Thread {
|
||||||
|
try {
|
||||||
|
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
|
||||||
|
val attrs = AudioAttributes.Builder()
|
||||||
|
.setUsage(AudioAttributes.USAGE_MEDIA)
|
||||||
|
.setContentType(AudioAttributes.CONTENT_TYPE_MUSIC)
|
||||||
|
.build()
|
||||||
|
val kickListener = AudioManager.OnAudioFocusChangeListener { /* ignorieren */ }
|
||||||
|
val kickReq = AudioFocusRequest.Builder(AudioManager.AUDIOFOCUS_GAIN)
|
||||||
|
.setAudioAttributes(attrs)
|
||||||
|
.setOnAudioFocusChangeListener(kickListener)
|
||||||
|
.build()
|
||||||
|
am.requestAudioFocus(kickReq)
|
||||||
|
Thread.sleep(250)
|
||||||
|
am.abandonAudioFocusRequest(kickReq)
|
||||||
|
} else {
|
||||||
|
val kickListener = AudioManager.OnAudioFocusChangeListener { /* ignorieren */ }
|
||||||
|
@Suppress("DEPRECATION")
|
||||||
|
am.requestAudioFocus(kickListener, AudioManager.STREAM_MUSIC, AudioManager.AUDIOFOCUS_GAIN)
|
||||||
|
Thread.sleep(250)
|
||||||
|
@Suppress("DEPRECATION")
|
||||||
|
am.abandonAudioFocus(kickListener)
|
||||||
|
}
|
||||||
|
Log.i(TAG, "kickReleaseMedia: USAGE_MEDIA-Stack aufgemischt (250ms Pause)")
|
||||||
|
} catch (e: Exception) {
|
||||||
|
Log.w(TAG, "kickReleaseMedia failed: ${e.message}")
|
||||||
|
}
|
||||||
|
}.start()
|
||||||
|
promise.resolve(true)
|
||||||
|
}
|
||||||
|
|
||||||
private fun release() {
|
private fun release() {
|
||||||
val am = audioManager() ?: return
|
val am = audioManager() ?: return
|
||||||
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
|
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
|
||||||
currentRequest?.let { am.abandonAudioFocusRequest(it) }
|
currentRequest?.let { am.abandonAudioFocusRequest(it) }
|
||||||
} else {
|
} else {
|
||||||
@Suppress("DEPRECATION")
|
@Suppress("DEPRECATION")
|
||||||
am.abandonAudioFocus(null)
|
am.abandonAudioFocus(focusListener)
|
||||||
}
|
}
|
||||||
currentRequest = null
|
currentRequest = null
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Aktueller Audio-Mode: NORMAL=0, IN_CALL=2, IN_COMMUNICATION=3, CALL_SCREENING=4.
|
||||||
|
* IN_COMMUNICATION ist der typische VoIP-Anruf-Mode (WhatsApp, Signal, etc.) —
|
||||||
|
* kann gepollt werden um zu erkennen wann der Anruf vorbei ist (zurueck NORMAL). */
|
||||||
|
@ReactMethod
|
||||||
|
fun getMode(promise: Promise) {
|
||||||
|
val am = audioManager()
|
||||||
|
if (am == null) {
|
||||||
|
promise.resolve(0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
promise.resolve(am.mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
@ReactMethod fun addListener(eventName: String) {}
|
||||||
|
@ReactMethod fun removeListeners(count: Int) {}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,59 @@
|
|||||||
|
package com.ariacockpit
|
||||||
|
|
||||||
|
import android.content.Intent
|
||||||
|
import android.os.Build
|
||||||
|
import android.util.Log
|
||||||
|
import com.facebook.react.bridge.Promise
|
||||||
|
import com.facebook.react.bridge.ReactApplicationContext
|
||||||
|
import com.facebook.react.bridge.ReactContextBaseJavaModule
|
||||||
|
import com.facebook.react.bridge.ReactMethod
|
||||||
|
|
||||||
|
/**
|
||||||
|
* RN-Bridge fuer den AriaPlaybackService.
|
||||||
|
*
|
||||||
|
* Wird vom JS waehrend einer TTS-Wiedergabe gestartet damit Android den
|
||||||
|
* App-Prozess nicht killt wenn die App im Hintergrund ist (= ARIA spricht
|
||||||
|
* weiter, auch wenn Stefan die App minimiert hat).
|
||||||
|
*
|
||||||
|
* Service stoppt entweder explizit per stop() oder wird von Android
|
||||||
|
* mitgekillt wenn der Prozess weg ist (was bei Foreground-Service nur
|
||||||
|
* passiert wenn der User die App force-stopped).
|
||||||
|
*/
|
||||||
|
class BackgroundAudioModule(reactContext: ReactApplicationContext) : ReactContextBaseJavaModule(reactContext) {
|
||||||
|
override fun getName() = "BackgroundAudio"
|
||||||
|
|
||||||
|
companion object { private const val TAG = "BackgroundAudio" }
|
||||||
|
|
||||||
|
@ReactMethod
|
||||||
|
fun start(reason: String, promise: Promise) {
|
||||||
|
try {
|
||||||
|
val ctx = reactApplicationContext
|
||||||
|
val intent = Intent(ctx, AriaPlaybackService::class.java)
|
||||||
|
intent.putExtra(AriaPlaybackService.EXTRA_REASON, reason ?: "")
|
||||||
|
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
|
||||||
|
ctx.startForegroundService(intent)
|
||||||
|
} else {
|
||||||
|
ctx.startService(intent)
|
||||||
|
}
|
||||||
|
promise.resolve(true)
|
||||||
|
} catch (e: Exception) {
|
||||||
|
Log.w(TAG, "start fehlgeschlagen: ${e.message}")
|
||||||
|
promise.reject("START_FAILED", e.message ?: "Unbekannter Fehler", e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@ReactMethod
|
||||||
|
fun stop(promise: Promise) {
|
||||||
|
try {
|
||||||
|
val ctx = reactApplicationContext
|
||||||
|
ctx.stopService(Intent(ctx, AriaPlaybackService::class.java))
|
||||||
|
promise.resolve(true)
|
||||||
|
} catch (e: Exception) {
|
||||||
|
Log.w(TAG, "stop fehlgeschlagen: ${e.message}")
|
||||||
|
promise.reject("STOP_FAILED", e.message ?: "Unbekannter Fehler", e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@ReactMethod fun addListener(eventName: String) {}
|
||||||
|
@ReactMethod fun removeListeners(count: Int) {}
|
||||||
|
}
|
||||||
@@ -0,0 +1,16 @@
|
|||||||
|
package com.ariacockpit
|
||||||
|
|
||||||
|
import com.facebook.react.ReactPackage
|
||||||
|
import com.facebook.react.bridge.NativeModule
|
||||||
|
import com.facebook.react.bridge.ReactApplicationContext
|
||||||
|
import com.facebook.react.uimanager.ViewManager
|
||||||
|
|
||||||
|
class BackgroundAudioPackage : ReactPackage {
|
||||||
|
override fun createNativeModules(reactContext: ReactApplicationContext): List<NativeModule> {
|
||||||
|
return listOf(BackgroundAudioModule(reactContext))
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun createViewManagers(reactContext: ReactApplicationContext): List<ViewManager<*, *>> {
|
||||||
|
return emptyList()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,55 @@
|
|||||||
|
package com.ariacockpit
|
||||||
|
|
||||||
|
import android.content.Intent
|
||||||
|
import android.net.Uri
|
||||||
|
import android.os.Build
|
||||||
|
import androidx.core.content.FileProvider
|
||||||
|
import com.facebook.react.bridge.Promise
|
||||||
|
import com.facebook.react.bridge.ReactApplicationContext
|
||||||
|
import com.facebook.react.bridge.ReactContextBaseJavaModule
|
||||||
|
import com.facebook.react.bridge.ReactMethod
|
||||||
|
import java.io.File
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Oeffnet eine beliebige Datei (PDF, Bild, Office-Doc, ...) mit der vom User
|
||||||
|
* gewaehlten App via Android-Intent-Picker. Nutzt FileProvider damit auch
|
||||||
|
* Android 7+ (content:// statt file://) das URI lesen darf.
|
||||||
|
*
|
||||||
|
* MIME-Type wird vom Caller bestimmt — App-Auswahl ist davon abhaengig (PDF
|
||||||
|
* geht an PDF-Viewer, image/jpeg an Galerie, etc.).
|
||||||
|
*/
|
||||||
|
class FileOpenerModule(reactContext: ReactApplicationContext) : ReactContextBaseJavaModule(reactContext) {
|
||||||
|
override fun getName() = "FileOpener"
|
||||||
|
|
||||||
|
@ReactMethod
|
||||||
|
fun open(filePath: String, mimeType: String, promise: Promise) {
|
||||||
|
try {
|
||||||
|
val cleanPath = filePath.removePrefix("file://")
|
||||||
|
val file = File(cleanPath)
|
||||||
|
if (!file.exists()) {
|
||||||
|
promise.reject("FILE_NOT_FOUND", "Datei nicht gefunden: $cleanPath")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
val context = reactApplicationContext
|
||||||
|
val uri: Uri = if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.N) {
|
||||||
|
FileProvider.getUriForFile(context, "${context.packageName}.fileprovider", file)
|
||||||
|
} else {
|
||||||
|
Uri.fromFile(file)
|
||||||
|
}
|
||||||
|
val safeMime = if (mimeType.isBlank()) "application/octet-stream" else mimeType
|
||||||
|
val intent = Intent(Intent.ACTION_VIEW).apply {
|
||||||
|
setDataAndType(uri, safeMime)
|
||||||
|
addFlags(Intent.FLAG_ACTIVITY_NEW_TASK)
|
||||||
|
addFlags(Intent.FLAG_GRANT_READ_URI_PERMISSION)
|
||||||
|
}
|
||||||
|
// Chooser zeigt Android-Auswahl falls mehrere Apps das MIME oeffnen koennen.
|
||||||
|
val chooser = Intent.createChooser(intent, "Oeffnen mit").apply {
|
||||||
|
addFlags(Intent.FLAG_ACTIVITY_NEW_TASK)
|
||||||
|
}
|
||||||
|
context.startActivity(chooser)
|
||||||
|
promise.resolve(true)
|
||||||
|
} catch (e: Exception) {
|
||||||
|
promise.reject("OPEN_ERROR", e.message, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -23,6 +23,7 @@ class MainApplication : Application(), ReactApplication {
|
|||||||
add(PcmStreamPlayerPackage())
|
add(PcmStreamPlayerPackage())
|
||||||
add(OpenWakeWordPackage())
|
add(OpenWakeWordPackage())
|
||||||
add(PhoneCallPackage())
|
add(PhoneCallPackage())
|
||||||
|
add(BackgroundAudioPackage())
|
||||||
}
|
}
|
||||||
|
|
||||||
override fun getJSMainModuleName(): String = "index"
|
override fun getJSMainModuleName(): String = "index"
|
||||||
|
|||||||
@@ -8,6 +8,9 @@ import android.content.pm.PackageManager
|
|||||||
import android.media.AudioFormat
|
import android.media.AudioFormat
|
||||||
import android.media.AudioRecord
|
import android.media.AudioRecord
|
||||||
import android.media.MediaRecorder
|
import android.media.MediaRecorder
|
||||||
|
import android.media.audiofx.AcousticEchoCanceler
|
||||||
|
import android.media.audiofx.AutomaticGainControl
|
||||||
|
import android.media.audiofx.NoiseSuppressor
|
||||||
import android.util.Log
|
import android.util.Log
|
||||||
import androidx.core.content.ContextCompat
|
import androidx.core.content.ContextCompat
|
||||||
import com.facebook.react.bridge.Promise
|
import com.facebook.react.bridge.Promise
|
||||||
@@ -70,6 +73,13 @@ class OpenWakeWordModule(reactContext: ReactApplicationContext) : ReactContextBa
|
|||||||
private val running = AtomicBoolean(false)
|
private val running = AtomicBoolean(false)
|
||||||
private var captureThread: Thread? = null
|
private var captureThread: Thread? = null
|
||||||
|
|
||||||
|
// Audio-Effects: Echo-Cancellation (gegen ARIAs eigene TTS-Stimme die sonst
|
||||||
|
// das Wake-Word triggern wuerde) + Noise-Suppression. Per VOICE_COMMUNICATION
|
||||||
|
// Audio-Source schon vorhanden, aber explizites Aktivieren ist robuster.
|
||||||
|
private var aec: AcousticEchoCanceler? = null
|
||||||
|
private var ns: NoiseSuppressor? = null
|
||||||
|
private var agc: AutomaticGainControl? = null
|
||||||
|
|
||||||
// Inferenz-State
|
// Inferenz-State
|
||||||
private val melBuffer: ArrayList<FloatArray> = ArrayList(256) // Liste von 32-dim Frames
|
private val melBuffer: ArrayList<FloatArray> = ArrayList(256) // Liste von 32-dim Frames
|
||||||
private var melProcessedIdx: Int = 0
|
private var melProcessedIdx: Int = 0
|
||||||
@@ -146,8 +156,12 @@ class OpenWakeWordModule(reactContext: ReactApplicationContext) : ReactContextBa
|
|||||||
AudioFormat.ENCODING_PCM_16BIT,
|
AudioFormat.ENCODING_PCM_16BIT,
|
||||||
).coerceAtLeast(CHUNK_SAMPLES * 2 * 4)
|
).coerceAtLeast(CHUNK_SAMPLES * 2 * 4)
|
||||||
|
|
||||||
|
// VOICE_COMMUNICATION-Source: aktiviert auf den meisten Android-Geraeten
|
||||||
|
// automatisch Echo-Cancellation + Noise-Suppression. Wichtig damit
|
||||||
|
// ARIAs eigene Stimme nicht das Wake-Word triggert wenn parallel
|
||||||
|
// zur TTS-Wiedergabe gelauscht wird.
|
||||||
val record = AudioRecord(
|
val record = AudioRecord(
|
||||||
MediaRecorder.AudioSource.MIC,
|
MediaRecorder.AudioSource.VOICE_COMMUNICATION,
|
||||||
SAMPLE_RATE,
|
SAMPLE_RATE,
|
||||||
AudioFormat.CHANNEL_IN_MONO,
|
AudioFormat.CHANNEL_IN_MONO,
|
||||||
AudioFormat.ENCODING_PCM_16BIT,
|
AudioFormat.ENCODING_PCM_16BIT,
|
||||||
@@ -159,6 +173,27 @@ class OpenWakeWordModule(reactContext: ReactApplicationContext) : ReactContextBa
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
audioRecord = record
|
audioRecord = record
|
||||||
|
|
||||||
|
// Audio-Effects ZUSAETZLICH explizit aktivieren — manche Geraete
|
||||||
|
// benoetigen das, obwohl VOICE_COMMUNICATION es eigentlich schon
|
||||||
|
// mitbringt. Failure ist nicht kritisch (continue ohne Effects).
|
||||||
|
try {
|
||||||
|
if (AcousticEchoCanceler.isAvailable()) {
|
||||||
|
aec = AcousticEchoCanceler.create(record.audioSessionId)?.apply { enabled = true }
|
||||||
|
Log.i(TAG, "AEC aktiviert (enabled=${aec?.enabled})")
|
||||||
|
}
|
||||||
|
} catch (e: Exception) { Log.w(TAG, "AEC failed: ${e.message}") }
|
||||||
|
try {
|
||||||
|
if (NoiseSuppressor.isAvailable()) {
|
||||||
|
ns = NoiseSuppressor.create(record.audioSessionId)?.apply { enabled = true }
|
||||||
|
}
|
||||||
|
} catch (e: Exception) { Log.w(TAG, "NS failed: ${e.message}") }
|
||||||
|
try {
|
||||||
|
if (AutomaticGainControl.isAvailable()) {
|
||||||
|
agc = AutomaticGainControl.create(record.audioSessionId)?.apply { enabled = true }
|
||||||
|
}
|
||||||
|
} catch (e: Exception) { Log.w(TAG, "AGC failed: ${e.message}") }
|
||||||
|
|
||||||
resetInferenceState()
|
resetInferenceState()
|
||||||
running.set(true)
|
running.set(true)
|
||||||
record.startRecording()
|
record.startRecording()
|
||||||
@@ -179,6 +214,13 @@ class OpenWakeWordModule(reactContext: ReactApplicationContext) : ReactContextBa
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private fun releaseAudioEffects() {
|
||||||
|
try { aec?.release() } catch (_: Exception) {}
|
||||||
|
try { ns?.release() } catch (_: Exception) {}
|
||||||
|
try { agc?.release() } catch (_: Exception) {}
|
||||||
|
aec = null; ns = null; agc = null
|
||||||
|
}
|
||||||
|
|
||||||
@ReactMethod
|
@ReactMethod
|
||||||
fun stop(promise: Promise) {
|
fun stop(promise: Promise) {
|
||||||
running.set(false)
|
running.set(false)
|
||||||
@@ -189,6 +231,7 @@ class OpenWakeWordModule(reactContext: ReactApplicationContext) : ReactContextBa
|
|||||||
try { audioRecord?.stop() } catch (_: Exception) {}
|
try { audioRecord?.stop() } catch (_: Exception) {}
|
||||||
try { audioRecord?.release() } catch (_: Exception) {}
|
try { audioRecord?.release() } catch (_: Exception) {}
|
||||||
audioRecord = null
|
audioRecord = null
|
||||||
|
releaseAudioEffects()
|
||||||
Log.i(TAG, "Lauschen gestoppt")
|
Log.i(TAG, "Lauschen gestoppt")
|
||||||
promise.resolve(true)
|
promise.resolve(true)
|
||||||
}
|
}
|
||||||
@@ -201,6 +244,7 @@ class OpenWakeWordModule(reactContext: ReactApplicationContext) : ReactContextBa
|
|||||||
try { audioRecord?.stop() } catch (_: Exception) {}
|
try { audioRecord?.stop() } catch (_: Exception) {}
|
||||||
try { audioRecord?.release() } catch (_: Exception) {}
|
try { audioRecord?.release() } catch (_: Exception) {}
|
||||||
audioRecord = null
|
audioRecord = null
|
||||||
|
releaseAudioEffects()
|
||||||
disposeSessions()
|
disposeSessions()
|
||||||
promise.resolve(true)
|
promise.resolve(true)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,12 +4,15 @@ import android.media.AudioAttributes
|
|||||||
import android.media.AudioFormat
|
import android.media.AudioFormat
|
||||||
import android.media.AudioManager
|
import android.media.AudioManager
|
||||||
import android.media.AudioTrack
|
import android.media.AudioTrack
|
||||||
|
import android.os.Build
|
||||||
import android.util.Base64
|
import android.util.Base64
|
||||||
import android.util.Log
|
import android.util.Log
|
||||||
|
import com.facebook.react.bridge.Arguments
|
||||||
import com.facebook.react.bridge.Promise
|
import com.facebook.react.bridge.Promise
|
||||||
import com.facebook.react.bridge.ReactApplicationContext
|
import com.facebook.react.bridge.ReactApplicationContext
|
||||||
import com.facebook.react.bridge.ReactContextBaseJavaModule
|
import com.facebook.react.bridge.ReactContextBaseJavaModule
|
||||||
import com.facebook.react.bridge.ReactMethod
|
import com.facebook.react.bridge.ReactMethod
|
||||||
|
import com.facebook.react.modules.core.DeviceEventManagerModule
|
||||||
import java.util.concurrent.LinkedBlockingQueue
|
import java.util.concurrent.LinkedBlockingQueue
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -76,9 +79,12 @@ class PcmStreamPlayerModule(reactContext: ReactApplicationContext) : ReactContex
|
|||||||
val encoding = AudioFormat.ENCODING_PCM_16BIT
|
val encoding = AudioFormat.ENCODING_PCM_16BIT
|
||||||
val minBuf = AudioTrack.getMinBufferSize(sampleRate, channelConfig, encoding)
|
val minBuf = AudioTrack.getMinBufferSize(sampleRate, channelConfig, encoding)
|
||||||
val bytesPerSecond = sampleRate * channels * 2 // 16-bit = 2 bytes
|
val bytesPerSecond = sampleRate * channels * 2 // 16-bit = 2 bytes
|
||||||
// Buffer muss mindestens PREROLL + etwas Spielraum fassen.
|
|
||||||
val prerollTarget = (bytesPerSecond * prerollSec).toInt()
|
val prerollTarget = (bytesPerSecond * prerollSec).toInt()
|
||||||
val bufferSize = (minBuf * 32).coerceAtLeast(prerollTarget * 2)
|
// Buffer entkoppelt von Preroll — fester ~4s-Buffer. OnePlus A12
|
||||||
|
// mit USAGE_ASSISTANT laeuft AudioTrack erst ab ~3s gepufferter
|
||||||
|
// Daten an. Wir padden Kurztexte vor play() auf 3s (siehe Block
|
||||||
|
// nach mainLoop), Buffer braucht ~1s Headroom weil write() blockt.
|
||||||
|
val bufferSize = (bytesPerSecond * 4).coerceAtLeast(minBuf * 8)
|
||||||
prerollBytes = prerollTarget
|
prerollBytes = prerollTarget
|
||||||
bytesBuffered = 0
|
bytesBuffered = 0
|
||||||
playbackStarted = false
|
playbackStarted = false
|
||||||
@@ -102,7 +108,20 @@ class PcmStreamPlayerModule(reactContext: ReactApplicationContext) : ReactContex
|
|||||||
.setTransferMode(AudioTrack.MODE_STREAM)
|
.setTransferMode(AudioTrack.MODE_STREAM)
|
||||||
.build()
|
.build()
|
||||||
|
|
||||||
// AudioTrack erstellen — play() wird erst aufgerufen wenn Pre-Roll erreicht.
|
// Start-Threshold runterdrehen: Default ist bufferSize/2 (= 2s bei 4s
|
||||||
|
// Buffer). AudioTrack startet sonst nicht bevor 2s im Puffer sind —
|
||||||
|
// bei kurzen TTS-Antworten (3 Worte ~ 1.4s) bleibt pos auf 0 stehen.
|
||||||
|
// 0.1s reicht damit AudioTrack sofort mit dem ersten Chunk anlaeuft.
|
||||||
|
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.S) {
|
||||||
|
try {
|
||||||
|
val startFrames = (sampleRate / 10).coerceAtLeast(1) // 100ms
|
||||||
|
newTrack.setStartThresholdInFrames(startFrames)
|
||||||
|
Log.i(TAG, "Start-Threshold gesetzt: ${startFrames} frames (~100ms)")
|
||||||
|
} catch (e: Exception) {
|
||||||
|
Log.w(TAG, "setStartThresholdInFrames failed: ${e.message}")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
track = newTrack
|
track = newTrack
|
||||||
queue.clear()
|
queue.clear()
|
||||||
writerShouldStop = false
|
writerShouldStop = false
|
||||||
@@ -137,10 +156,12 @@ class PcmStreamPlayerModule(reactContext: ReactApplicationContext) : ReactContex
|
|||||||
Log.w(TAG, "play() sofort failed: ${e.message}")
|
Log.w(TAG, "play() sofort failed: ${e.message}")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Idle-Cutoff: wenn endRequested NICHT kam aber 30s nichts mehr
|
// Idle-Cutoff: wenn endRequested NICHT kam aber lange nichts mehr
|
||||||
// reinkommt, brechen wir ab (Bridge-Crash, verlorener final).
|
// reinkommt, brechen wir ab (Bridge-Crash, verlorener final).
|
||||||
|
// 120s damit lange F5-TTS-Render-Pausen zwischen Saetzen (z.B. bei
|
||||||
|
// Modell-Wechsel oder kalter GPU) nicht den Stream abreissen.
|
||||||
var idleMs = 0L
|
var idleMs = 0L
|
||||||
val maxIdleMs = 30_000L
|
val maxIdleMs = 120_000L
|
||||||
// Zielpufferfuellung — unter diesem Wasserstand fuettern wir
|
// Zielpufferfuellung — unter diesem Wasserstand fuettern wir
|
||||||
// Stille rein damit AudioTrack nicht underrunt waehrend die
|
// Stille rein damit AudioTrack nicht underrunt waehrend die
|
||||||
// Bridge den naechsten Satz rendert. Spotify/YouTube reagieren
|
// Bridge den naechsten Satz rendert. Spotify/YouTube reagieren
|
||||||
@@ -152,15 +173,11 @@ class PcmStreamPlayerModule(reactContext: ReactApplicationContext) : ReactContex
|
|||||||
val data = queue.poll(50, java.util.concurrent.TimeUnit.MILLISECONDS)
|
val data = queue.poll(50, java.util.concurrent.TimeUnit.MILLISECONDS)
|
||||||
if (data == null) {
|
if (data == null) {
|
||||||
if (endRequested) {
|
if (endRequested) {
|
||||||
// Falls wir vor Pre-Roll enden (kurzer Text): trotzdem abspielen
|
// Falls play() noch gar nicht lief (Stream ohne data
|
||||||
|
// ueberhaupt — sehr seltene Edge-Case): jetzt anstossen
|
||||||
|
// damit das finally{}-Wait nicht endlos blockt.
|
||||||
if (!playbackStarted) {
|
if (!playbackStarted) {
|
||||||
try {
|
try { t.play(); playbackStarted = true } catch (_: Exception) {}
|
||||||
t.play()
|
|
||||||
playbackStarted = true
|
|
||||||
Log.i(TAG, "Playback gestartet VOR Pre-Roll (kurzer Text, ${bytesBuffered}B gepuffert)")
|
|
||||||
} catch (e: Exception) {
|
|
||||||
Log.w(TAG, "play() fallback failed: ${e.message}")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
break@mainLoop
|
break@mainLoop
|
||||||
}
|
}
|
||||||
@@ -192,12 +209,16 @@ class PcmStreamPlayerModule(reactContext: ReactApplicationContext) : ReactContex
|
|||||||
}
|
}
|
||||||
idleMs = 0L
|
idleMs = 0L
|
||||||
|
|
||||||
// Pre-Roll Check: play() erst wenn genug gepuffert
|
// play() beim ALLERERSTEN data-chunk aufrufen — egal wie wenig
|
||||||
if (!playbackStarted && bytesBuffered + data.size >= prerollBytes) {
|
// Daten da sind. Sonst stallt AudioTrack auf OnePlus A12 wenn
|
||||||
|
// play() erst gerufen wird nachdem der Buffer komplett gefuellt
|
||||||
|
// ist. Pre-Roll als "Vorrat aufbauen" passiert dann waehrend
|
||||||
|
// der Track schon spielt — Underrun-Schutz fuettert ggf. Stille.
|
||||||
|
if (!playbackStarted) {
|
||||||
try {
|
try {
|
||||||
t.play()
|
t.play()
|
||||||
playbackStarted = true
|
playbackStarted = true
|
||||||
Log.i(TAG, "Playback gestartet nach Pre-Roll ${bytesBuffered + data.size} Bytes")
|
Log.i(TAG, "Playback gestartet beim 1. Chunk (${bytesBuffered}B leading + ${data.size}B data)")
|
||||||
} catch (e: Exception) {
|
} catch (e: Exception) {
|
||||||
Log.w(TAG, "play() failed: ${e.message}")
|
Log.w(TAG, "play() failed: ${e.message}")
|
||||||
}
|
}
|
||||||
@@ -233,12 +254,21 @@ class PcmStreamPlayerModule(reactContext: ReactApplicationContext) : ReactContex
|
|||||||
val totalFrames = (bytesBuffered / streamBytesPerFrame).toInt()
|
val totalFrames = (bytesBuffered / streamBytesPerFrame).toInt()
|
||||||
var lastPos = -1
|
var lastPos = -1
|
||||||
var stalledCount = 0
|
var stalledCount = 0
|
||||||
|
var retried = false
|
||||||
while (!writerShouldStop) {
|
while (!writerShouldStop) {
|
||||||
val pos = t.playbackHeadPosition
|
val pos = t.playbackHeadPosition
|
||||||
if (pos >= totalFrames) break
|
if (pos >= totalFrames) break
|
||||||
// Safety: wenn Position 2s nicht mehr vorwaerts → AudioTrack hing
|
|
||||||
if (pos == lastPos) {
|
if (pos == lastPos) {
|
||||||
stalledCount++
|
stalledCount++
|
||||||
|
// Nach 500ms Stillstand: AudioTrack-Quirk auf manchen
|
||||||
|
// Geraeten (OnePlus A12) — play() nochmal anstossen.
|
||||||
|
if (stalledCount == 10 && pos == 0 && !retried) {
|
||||||
|
retried = true
|
||||||
|
Log.w(TAG, "playback nicht angefahren — retry play()")
|
||||||
|
try { t.play() } catch (e: Exception) {
|
||||||
|
Log.w(TAG, "retry play() failed: ${e.message}")
|
||||||
|
}
|
||||||
|
}
|
||||||
if (stalledCount > 40) {
|
if (stalledCount > 40) {
|
||||||
Log.w(TAG, "playback stalled at $pos/$totalFrames — give up")
|
Log.w(TAG, "playback stalled at $pos/$totalFrames — give up")
|
||||||
break
|
break
|
||||||
@@ -253,6 +283,17 @@ class PcmStreamPlayerModule(reactContext: ReactApplicationContext) : ReactContex
|
|||||||
} catch (_: Exception) {}
|
} catch (_: Exception) {}
|
||||||
try { t.stop() } catch (_: Exception) {}
|
try { t.stop() } catch (_: Exception) {}
|
||||||
try { t.release() } catch (_: Exception) {}
|
try { t.release() } catch (_: Exception) {}
|
||||||
|
// RN-Event: AudioTrack ist wirklich durch (alle Samples gespielt).
|
||||||
|
// JS released erst JETZT den AudioFocus — sonst spielt Spotify
|
||||||
|
// beim end()-Cap waehrend ARIA noch redet (15s+ je nach Buffer).
|
||||||
|
try {
|
||||||
|
val params = Arguments.createMap()
|
||||||
|
reactApplicationContext
|
||||||
|
.getJSModule(DeviceEventManagerModule.RCTDeviceEventEmitter::class.java)
|
||||||
|
.emit("PcmPlaybackFinished", params)
|
||||||
|
} catch (e: Exception) {
|
||||||
|
Log.w(TAG, "PlaybackFinished emit failed: ${e.message}")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}, "PcmStreamWriter").apply { start() }
|
}, "PcmStreamWriter").apply { start() }
|
||||||
|
|
||||||
@@ -309,6 +350,9 @@ class PcmStreamPlayerModule(reactContext: ReactApplicationContext) : ReactContex
|
|||||||
promise.resolve(true)
|
promise.resolve(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ReactMethod fun addListener(eventName: String) {}
|
||||||
|
@ReactMethod fun removeListeners(count: Int) {}
|
||||||
|
|
||||||
private fun stopInternal() {
|
private fun stopInternal() {
|
||||||
writerShouldStop = true
|
writerShouldStop = true
|
||||||
endRequested = true
|
endRequested = true
|
||||||
|
|||||||
Binary file not shown.
@@ -1,4 +1,8 @@
|
|||||||
<?xml version="1.0" encoding="utf-8"?>
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
<paths>
|
<paths>
|
||||||
<cache-path name="cache" path="." />
|
<cache-path name="cache" path="." />
|
||||||
|
<files-path name="files" path="." />
|
||||||
|
<external-path name="external" path="." />
|
||||||
|
<external-files-path name="external_files" path="." />
|
||||||
|
<external-cache-path name="external_cache" path="." />
|
||||||
</paths>
|
</paths>
|
||||||
|
|||||||
+18
-17
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "aria-cockpit",
|
"name": "aria-cockpit",
|
||||||
"version": "0.0.7.2",
|
"version": "0.1.2.6",
|
||||||
"private": true,
|
"private": true,
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"android": "react-native run-android",
|
"android": "react-native run-android",
|
||||||
@@ -10,31 +10,32 @@
|
|||||||
"build:apk": "cd android && ./gradlew assembleRelease"
|
"build:apk": "cd android && ./gradlew assembleRelease"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
|
"@react-native-async-storage/async-storage": "^1.21.0",
|
||||||
|
"@react-native-community/geolocation": "^3.2.1",
|
||||||
|
"@react-navigation/bottom-tabs": "^6.5.11",
|
||||||
|
"@react-navigation/native": "^6.1.9",
|
||||||
"react": "18.2.0",
|
"react": "18.2.0",
|
||||||
"react-native": "0.73.4",
|
"react-native": "0.73.4",
|
||||||
"@react-navigation/native": "^6.1.9",
|
"react-native-audio-recorder-player": "^3.6.7",
|
||||||
"@react-navigation/bottom-tabs": "^6.5.11",
|
"react-native-camera-kit": "^13.0.0",
|
||||||
"react-native-screens": "3.27.0",
|
|
||||||
"react-native-safe-area-context": "^4.8.2",
|
|
||||||
"react-native-document-picker": "^9.1.1",
|
"react-native-document-picker": "^9.1.1",
|
||||||
"react-native-sound": "^0.11.2",
|
"react-native-fs": "^2.20.0",
|
||||||
"@react-native-community/geolocation": "^3.2.1",
|
|
||||||
"react-native-image-picker": "^7.1.0",
|
"react-native-image-picker": "^7.1.0",
|
||||||
"react-native-permissions": "^4.1.4",
|
"react-native-permissions": "^4.1.4",
|
||||||
"react-native-camera-kit": "^13.0.0",
|
"react-native-safe-area-context": "^4.8.2",
|
||||||
"@react-native-async-storage/async-storage": "^1.21.0",
|
"react-native-screens": "3.27.0",
|
||||||
"react-native-fs": "^2.20.0",
|
"react-native-sound": "^0.11.2",
|
||||||
"react-native-audio-recorder-player": "^3.6.7"
|
"react-native-svg": "^14.1.0"
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"typescript": "^5.3.3",
|
"@react-native/eslint-config": "^0.73.2",
|
||||||
|
"@react-native/metro-config": "^0.73.5",
|
||||||
|
"@react-native/typescript-config": "^0.73.1",
|
||||||
|
"@types/jest": "^29.5.11",
|
||||||
"@types/react": "^18.2.48",
|
"@types/react": "^18.2.48",
|
||||||
"@types/react-native": "^0.73.0",
|
"@types/react-native": "^0.73.0",
|
||||||
"@react-native/eslint-config": "^0.73.2",
|
|
||||||
"@react-native/typescript-config": "^0.73.1",
|
|
||||||
"@react-native/metro-config": "^0.73.5",
|
|
||||||
"metro-react-native-babel-preset": "^0.77.0",
|
|
||||||
"jest": "^29.7.0",
|
"jest": "^29.7.0",
|
||||||
"@types/jest": "^29.5.11"
|
"metro-react-native-babel-preset": "^0.77.0",
|
||||||
|
"typescript": "^5.3.3"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Binary file not shown.
@@ -1,104 +1,87 @@
|
|||||||
/**
|
/**
|
||||||
* MessageText — rendert Chat-Text mit Auto-Linkifizierung:
|
* MessageText — selektierbarer Chat-Text mit Android-Auto-Linkifizierung,
|
||||||
* - http(s)://... → tippbar, oeffnet im Browser
|
* plus Inline-Image-Rendering wenn der Text Bild-URLs enthaelt.
|
||||||
* - mailto: oder plain E-Mail → tippbar, oeffnet Mail-App
|
|
||||||
* - Telefonnummern → tippbar, oeffnet Android-Dialer
|
|
||||||
*
|
*
|
||||||
* Text ist durchgaengig markierbar/kopierbar (selectable).
|
* - Markdown-Syntax `` und plain `https://...image.png` werden
|
||||||
|
* erkannt — die URL bleibt im Text sichtbar (klickbar via Linkify),
|
||||||
|
* zusaetzlich wird das Bild als <Image> oder <SvgUri> drunter gerendert.
|
||||||
|
* - Wir nutzen Androids dataDetectorType="all" (System macht Phone/URL/Email
|
||||||
|
* automatisch klickbar) und ein einzelnes <Text selectable> ohne nested
|
||||||
|
* <Text> mit eigenem onPress — Nested Text mit onPress fing die Long-Press-
|
||||||
|
* Geste ab, damit war Markieren+Kopieren defekt.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import React from 'react';
|
import React, { useEffect, useState } from 'react';
|
||||||
import { Text, Linking, TextStyle, StyleProp } from 'react-native';
|
import { View, Text, Image, TextStyle, StyleProp } from 'react-native';
|
||||||
|
import { SvgUri } from 'react-native-svg';
|
||||||
// Regex kombiniert URL | Email | Telefonnummer.
|
|
||||||
// Gruppenreihenfolge ist wichtig fuer die Erkennung unten.
|
|
||||||
//
|
|
||||||
// URL: http://... oder https://... bis zum ersten Whitespace / Anfuehrungszeichen.
|
|
||||||
// Email: simpler Standard-Match (kein RFC-kompatibel aber gut genug).
|
|
||||||
// Telefon: internationale Form (+49..., 0049..., 0176...), darf Leerzeichen
|
|
||||||
// / Bindestriche / Schraegstriche / Klammern enthalten, mindestens 7
|
|
||||||
// Ziffern insgesamt. Vermeidet banale Zahlen (Uhrzeiten, Datum).
|
|
||||||
const LINK_REGEX = new RegExp(
|
|
||||||
'(https?:\\/\\/[^\\s<>"]+)' + // 1: URL
|
|
||||||
'|([A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,})' + // 2: Email
|
|
||||||
'|((?:\\+|00)\\d[\\d\\s()\\-\\/]{6,}\\d|0\\d{2,4}[\\s\\/\\-]?[\\d\\s\\-\\/]{5,}\\d)', // 3: Telefon
|
|
||||||
'g',
|
|
||||||
);
|
|
||||||
|
|
||||||
const LINK_STYLE = { color: '#0096FF', textDecorationLine: 'underline' } as TextStyle;
|
|
||||||
|
|
||||||
interface Segment {
|
|
||||||
text: string;
|
|
||||||
kind: 'text' | 'url' | 'email' | 'phone';
|
|
||||||
}
|
|
||||||
|
|
||||||
function tokenize(raw: string): Segment[] {
|
|
||||||
const out: Segment[] = [];
|
|
||||||
let lastEnd = 0;
|
|
||||||
LINK_REGEX.lastIndex = 0;
|
|
||||||
let m: RegExpExecArray | null;
|
|
||||||
while ((m = LINK_REGEX.exec(raw)) !== null) {
|
|
||||||
if (m.index > lastEnd) {
|
|
||||||
out.push({ text: raw.slice(lastEnd, m.index), kind: 'text' });
|
|
||||||
}
|
|
||||||
if (m[1]) out.push({ text: m[1], kind: 'url' });
|
|
||||||
else if (m[2]) out.push({ text: m[2], kind: 'email' });
|
|
||||||
else if (m[3]) out.push({ text: m[3], kind: 'phone' });
|
|
||||||
lastEnd = LINK_REGEX.lastIndex;
|
|
||||||
}
|
|
||||||
if (lastEnd < raw.length) out.push({ text: raw.slice(lastEnd), kind: 'text' });
|
|
||||||
return out;
|
|
||||||
}
|
|
||||||
|
|
||||||
function onPress(seg: Segment) {
|
|
||||||
try {
|
|
||||||
if (seg.kind === 'url') {
|
|
||||||
Linking.openURL(seg.text);
|
|
||||||
} else if (seg.kind === 'email') {
|
|
||||||
Linking.openURL(`mailto:${seg.text}`);
|
|
||||||
} else if (seg.kind === 'phone') {
|
|
||||||
// Android-Dialer erwartet tel:-Schema ohne Leerzeichen/Bindestriche
|
|
||||||
const clean = seg.text.replace(/[\s\-\/()]/g, '');
|
|
||||||
Linking.openURL(`tel:${clean}`);
|
|
||||||
}
|
|
||||||
} catch {}
|
|
||||||
}
|
|
||||||
|
|
||||||
interface Props {
|
interface Props {
|
||||||
text: string;
|
text: string;
|
||||||
style?: StyleProp<TextStyle>;
|
style?: StyleProp<TextStyle>;
|
||||||
}
|
}
|
||||||
|
|
||||||
const MessageText: React.FC<Props> = ({ text, style }) => {
|
// Bild-URL-Pattern: http(s)://... endend auf gaengige Bild-Endungen.
|
||||||
const segments = React.useMemo(() => tokenize(text), [text]);
|
const IMG_URL_RE = /https?:\/\/[^\s)<"']+\.(?:jpe?g|png|gif|webp|bmp|ico|svg)(?:\?[^\s)<"']*)?/gi;
|
||||||
|
|
||||||
|
function extractImageUrls(text: string): string[] {
|
||||||
|
const urls = new Set<string>();
|
||||||
|
const matches = text.match(IMG_URL_RE);
|
||||||
|
if (matches) matches.forEach(u => urls.add(u));
|
||||||
|
return Array.from(urls);
|
||||||
|
}
|
||||||
|
|
||||||
|
const SVG_RE = /\.svg(?:\?|$)/i;
|
||||||
|
|
||||||
|
/** Image mit dynamischer Aspect-Ratio aus echten Bilddimensionen.
|
||||||
|
* SVGs werden ueber react-native-svg gerendert (kein Image.getSize). */
|
||||||
|
const InlineImage: React.FC<{ uri: string }> = ({ uri }) => {
|
||||||
|
const isSvg = SVG_RE.test(uri);
|
||||||
|
const [aspectRatio, setAspectRatio] = useState<number>(1);
|
||||||
|
const [failed, setFailed] = useState(false);
|
||||||
|
useEffect(() => {
|
||||||
|
if (isSvg) return; // Image.getSize geht fuer SVG nicht
|
||||||
|
let cancelled = false;
|
||||||
|
Image.getSize(
|
||||||
|
uri,
|
||||||
|
(w, h) => { if (!cancelled && w > 0 && h > 0) setAspectRatio(Math.max(0.5, Math.min(2.5, w / h))); },
|
||||||
|
() => { if (!cancelled) setFailed(true); },
|
||||||
|
);
|
||||||
|
return () => { cancelled = true; };
|
||||||
|
}, [uri, isSvg]);
|
||||||
|
if (failed) return null;
|
||||||
|
if (isSvg) {
|
||||||
return (
|
return (
|
||||||
<Text
|
<View style={{ marginTop: 8, width: 260, height: 260, backgroundColor: '#0D0D1A', borderRadius: 8, alignItems: 'center', justifyContent: 'center' }}>
|
||||||
style={style}
|
<SvgUri uri={uri} width="100%" height="100%" onError={() => setFailed(true)} />
|
||||||
selectable
|
</View>
|
||||||
// dataDetectorType ist Android-only und macht Phone/URL/Email zusaetzlich
|
);
|
||||||
// ueber System-Detection klickbar — als Fallback falls unsere Regex-
|
|
||||||
// Tokens nicht passen.
|
|
||||||
dataDetectorType="all"
|
|
||||||
>
|
|
||||||
{segments.map((seg, i) => {
|
|
||||||
if (seg.kind === 'text') {
|
|
||||||
return <Text key={i} selectable>{seg.text}</Text>;
|
|
||||||
}
|
}
|
||||||
return (
|
return (
|
||||||
<Text
|
<Image
|
||||||
key={i}
|
source={{ uri }}
|
||||||
selectable
|
style={{ width: 260, aspectRatio, borderRadius: 8, marginTop: 8, backgroundColor: '#0D0D1A' }}
|
||||||
style={LINK_STYLE}
|
resizeMode="cover"
|
||||||
onPress={() => onPress(seg)}
|
onError={() => setFailed(true)}
|
||||||
// Long-Press soll an den Parent durch fuer Selection
|
/>
|
||||||
onLongPress={undefined}
|
);
|
||||||
suppressHighlighting={false}
|
};
|
||||||
>
|
|
||||||
{seg.text}
|
const MessageText: React.FC<Props> = ({ text, style }) => {
|
||||||
|
const imageUrls = extractImageUrls(text || '');
|
||||||
|
if (imageUrls.length === 0) {
|
||||||
|
return (
|
||||||
|
<Text style={style} selectable dataDetectorType="all">
|
||||||
|
{text}
|
||||||
</Text>
|
</Text>
|
||||||
);
|
);
|
||||||
})}
|
}
|
||||||
|
return (
|
||||||
|
<View>
|
||||||
|
<Text style={style} selectable dataDetectorType="all">
|
||||||
|
{text}
|
||||||
</Text>
|
</Text>
|
||||||
|
{imageUrls.map(u => <InlineImage key={u} uri={u} />)}
|
||||||
|
</View>
|
||||||
);
|
);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -44,7 +44,6 @@ const VoiceButton: React.FC<VoiceButtonProps> = ({
|
|||||||
const [meterDb, setMeterDb] = useState(-160);
|
const [meterDb, setMeterDb] = useState(-160);
|
||||||
const pulseAnim = useRef(new Animated.Value(1)).current;
|
const pulseAnim = useRef(new Animated.Value(1)).current;
|
||||||
const durationTimer = useRef<ReturnType<typeof setInterval> | null>(null);
|
const durationTimer = useRef<ReturnType<typeof setInterval> | null>(null);
|
||||||
const isLongPress = useRef(false);
|
|
||||||
|
|
||||||
// Puls-Animation starten/stoppen
|
// Puls-Animation starten/stoppen
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
@@ -117,31 +116,10 @@ const VoiceButton: React.FC<VoiceButtonProps> = ({
|
|||||||
if (disabled || isRecording) return;
|
if (disabled || isRecording) return;
|
||||||
const started = await audioService.startRecording(true); // autoStop = true
|
const started = await audioService.startRecording(true); // autoStop = true
|
||||||
if (started) {
|
if (started) {
|
||||||
isLongPress.current = false;
|
|
||||||
setIsRecording(true);
|
setIsRecording(true);
|
||||||
}
|
}
|
||||||
}, [disabled, isRecording]);
|
}, [disabled, isRecording]);
|
||||||
|
|
||||||
// Push-to-Talk: Lang druecken
|
|
||||||
const handlePressIn = async () => {
|
|
||||||
if (disabled || isRecording) return;
|
|
||||||
isLongPress.current = true;
|
|
||||||
const started = await audioService.startRecording(false); // kein autoStop
|
|
||||||
if (started) {
|
|
||||||
setIsRecording(true);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const handlePressOut = async () => {
|
|
||||||
if (!isRecording || !isLongPress.current) return;
|
|
||||||
isLongPress.current = false;
|
|
||||||
setIsRecording(false);
|
|
||||||
const result = await audioService.stopRecording();
|
|
||||||
if (result && result.durationMs > 300) {
|
|
||||||
onRecordingComplete(result);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Tap-to-Talk: Einmal tippen startet mit Auto-Stop.
|
// Tap-to-Talk: Einmal tippen startet mit Auto-Stop.
|
||||||
// Guard gegen Doppel-Tap während asyncer Start/Stop.
|
// Guard gegen Doppel-Tap während asyncer Start/Stop.
|
||||||
const tapBusy = useRef(false);
|
const tapBusy = useRef(false);
|
||||||
@@ -162,7 +140,6 @@ const VoiceButton: React.FC<VoiceButtonProps> = ({
|
|||||||
// Aufnahme mit Auto-Stop starten
|
// Aufnahme mit Auto-Stop starten
|
||||||
const started = await audioService.startRecording(true);
|
const started = await audioService.startRecording(true);
|
||||||
if (started) {
|
if (started) {
|
||||||
isLongPress.current = false;
|
|
||||||
setIsRecording(true);
|
setIsRecording(true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -201,10 +178,6 @@ const VoiceButton: React.FC<VoiceButtonProps> = ({
|
|||||||
isRecording && styles.buttonOuterRecording,
|
isRecording && styles.buttonOuterRecording,
|
||||||
{ transform: [{ scale: pulseAnim }] },
|
{ transform: [{ scale: pulseAnim }] },
|
||||||
]}
|
]}
|
||||||
onStartShouldSetResponder={() => true}
|
|
||||||
onResponderGrant={handlePressIn}
|
|
||||||
onResponderRelease={handlePressOut}
|
|
||||||
onResponderTerminate={handlePressOut}
|
|
||||||
>
|
>
|
||||||
<TouchableOpacity
|
<TouchableOpacity
|
||||||
activeOpacity={0.8}
|
activeOpacity={0.8}
|
||||||
|
|||||||
@@ -0,0 +1,224 @@
|
|||||||
|
/**
|
||||||
|
* ZoomableImage — Pinch-to-Zoom + Pan fuers Vollbild-Modal.
|
||||||
|
*
|
||||||
|
* Reine RN-Implementation, ohne react-native-gesture-handler.
|
||||||
|
*
|
||||||
|
* - 2 Finger: Pinch (Zoom 1x..5x) + simultaner Pan via Focal-Punkt
|
||||||
|
* - 1 Finger: Pan wenn schon gezoomt (>1.02x)
|
||||||
|
* - Doppel-Tap (<300ms zw. zwei Single-Taps): Toggle 1x ↔ 2.5x
|
||||||
|
*
|
||||||
|
* Implementierungs-Hinweise zur alten Version (warum's nicht ging):
|
||||||
|
* - `gestureState.numberActiveTouches` ist nicht zuverlaessig direkt
|
||||||
|
* nach onPanResponderGrant. Wir lesen Finger-Anzahl jetzt
|
||||||
|
* ausschliesslich aus `e.nativeEvent.touches.length`.
|
||||||
|
* - Beim Wechsel von 2 → 1 Fingern bleib die Pinch-Referenz haengen.
|
||||||
|
* Jetzt: bei jedem Finger-Wechsel re-snapshotten wir die Geste.
|
||||||
|
* - Animated.Image bekommt jetzt pointerEvents="none" damit der View
|
||||||
|
* GARANTIERT die Touches abbekommt.
|
||||||
|
* - useNativeDriver ist bewusst AUS — sonst koennen wir setValue()
|
||||||
|
* nicht synchron mit dem Pan-Responder zusammen nutzen.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import React, { useMemo, useRef } from 'react';
|
||||||
|
import {
|
||||||
|
Animated,
|
||||||
|
PanResponder,
|
||||||
|
GestureResponderEvent,
|
||||||
|
ImageStyle,
|
||||||
|
StyleProp,
|
||||||
|
StyleSheet,
|
||||||
|
View,
|
||||||
|
} from 'react-native';
|
||||||
|
|
||||||
|
interface Props {
|
||||||
|
uri: string;
|
||||||
|
containerWidth: number;
|
||||||
|
containerHeight: number;
|
||||||
|
style?: StyleProp<ImageStyle>;
|
||||||
|
}
|
||||||
|
|
||||||
|
const MIN_SCALE = 1;
|
||||||
|
const MAX_SCALE = 5;
|
||||||
|
const DOUBLE_TAP_MS = 300;
|
||||||
|
const DOUBLE_TAP_DIST = 30; // Bewegung max. damit ein Tap als Tap gilt
|
||||||
|
const PAN_SLOP_AT_SCALE_1 = 4; // Mikro-Movement nicht als Pan werten
|
||||||
|
|
||||||
|
const ZoomableImage: React.FC<Props> = ({ uri, containerWidth, containerHeight, style }) => {
|
||||||
|
// Animated-Werte fuer die Render-Transformation
|
||||||
|
const scale = useRef(new Animated.Value(1)).current;
|
||||||
|
const tx = useRef(new Animated.Value(0)).current;
|
||||||
|
const ty = useRef(new Animated.Value(0)).current;
|
||||||
|
|
||||||
|
// Logische Zustaende — wir lesen Animated.Value nicht zurueck (waere async)
|
||||||
|
const view = useRef({ scale: 1, x: 0, y: 0 }).current;
|
||||||
|
|
||||||
|
// Geste-Snapshot: was war zu Beginn dieser Geste-Phase
|
||||||
|
const gesture = useRef({
|
||||||
|
fingers: 0, // aktuelle Finger-Anzahl
|
||||||
|
startScale: 1,
|
||||||
|
startX: 0,
|
||||||
|
startY: 0,
|
||||||
|
startDist: 0, // Pinch-Referenz-Distanz
|
||||||
|
startFocalX: 0,
|
||||||
|
startFocalY: 0,
|
||||||
|
movedSinceTouch: 0, // fuer Tap-Erkennung
|
||||||
|
touchStartedAt: 0,
|
||||||
|
touchStartX: 0,
|
||||||
|
touchStartY: 0,
|
||||||
|
}).current;
|
||||||
|
|
||||||
|
// Doppel-Tap
|
||||||
|
const lastTap = useRef({ at: 0, x: 0, y: 0 });
|
||||||
|
|
||||||
|
const clamp = (v: number, lo: number, hi: number) => Math.max(lo, Math.min(hi, v));
|
||||||
|
|
||||||
|
const applyClamped = (s: number, x: number, y: number) => {
|
||||||
|
const ns = clamp(s, MIN_SCALE, MAX_SCALE);
|
||||||
|
// Translation auf das verfuegbare Volumen begrenzen
|
||||||
|
const maxX = Math.max(0, (containerWidth * ns - containerWidth) / 2);
|
||||||
|
const maxY = Math.max(0, (containerHeight * ns - containerHeight) / 2);
|
||||||
|
const nx = clamp(x, -maxX, maxX);
|
||||||
|
const ny = clamp(y, -maxY, maxY);
|
||||||
|
view.scale = ns;
|
||||||
|
view.x = nx;
|
||||||
|
view.y = ny;
|
||||||
|
scale.setValue(ns);
|
||||||
|
tx.setValue(nx);
|
||||||
|
ty.setValue(ny);
|
||||||
|
};
|
||||||
|
|
||||||
|
const distance = (touches: any[]) =>
|
||||||
|
Math.hypot(touches[0].pageX - touches[1].pageX, touches[0].pageY - touches[1].pageY);
|
||||||
|
|
||||||
|
const focal = (touches: any[]) => ({
|
||||||
|
x: (touches[0].pageX + touches[1].pageX) / 2,
|
||||||
|
y: (touches[0].pageY + touches[1].pageY) / 2,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Snapshot vor jedem Phasenwechsel (1↔2 Finger) — verhindert Spruenge
|
||||||
|
const snapshot = (touches: any[]) => {
|
||||||
|
gesture.startScale = view.scale;
|
||||||
|
gesture.startX = view.x;
|
||||||
|
gesture.startY = view.y;
|
||||||
|
if (touches.length >= 2) {
|
||||||
|
gesture.startDist = distance(touches);
|
||||||
|
const f = focal(touches);
|
||||||
|
gesture.startFocalX = f.x;
|
||||||
|
gesture.startFocalY = f.y;
|
||||||
|
} else if (touches.length === 1) {
|
||||||
|
gesture.startDist = 0;
|
||||||
|
gesture.startFocalX = touches[0].pageX;
|
||||||
|
gesture.startFocalY = touches[0].pageY;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const responder = useMemo(
|
||||||
|
() =>
|
||||||
|
PanResponder.create({
|
||||||
|
onStartShouldSetPanResponder: () => true,
|
||||||
|
onStartShouldSetPanResponderCapture: () => true,
|
||||||
|
onMoveShouldSetPanResponder: () => true,
|
||||||
|
onMoveShouldSetPanResponderCapture: () => true,
|
||||||
|
|
||||||
|
onPanResponderGrant: (e: GestureResponderEvent) => {
|
||||||
|
const touches = e.nativeEvent.touches as any[];
|
||||||
|
gesture.fingers = touches.length;
|
||||||
|
gesture.movedSinceTouch = 0;
|
||||||
|
gesture.touchStartedAt = Date.now();
|
||||||
|
gesture.touchStartX = touches[0]?.pageX ?? 0;
|
||||||
|
gesture.touchStartY = touches[0]?.pageY ?? 0;
|
||||||
|
snapshot(touches);
|
||||||
|
},
|
||||||
|
|
||||||
|
onPanResponderMove: (e: GestureResponderEvent, _gs) => {
|
||||||
|
const touches = e.nativeEvent.touches as any[];
|
||||||
|
|
||||||
|
// Phasenwechsel? → Re-Snapshot, damit nicht gesprungen wird
|
||||||
|
if (touches.length !== gesture.fingers) {
|
||||||
|
gesture.fingers = touches.length;
|
||||||
|
snapshot(touches);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
gesture.movedSinceTouch += 1;
|
||||||
|
|
||||||
|
if (touches.length >= 2) {
|
||||||
|
// Pinch + Pan via Focal
|
||||||
|
const d = distance(touches);
|
||||||
|
if (gesture.startDist === 0) {
|
||||||
|
// Sicherheitsnetz falls Snapshot gemissed wurde
|
||||||
|
snapshot(touches);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const factor = d / gesture.startDist;
|
||||||
|
const f = focal(touches);
|
||||||
|
const newScale = clamp(gesture.startScale * factor, MIN_SCALE, MAX_SCALE);
|
||||||
|
// Focal-basierter Pan: zoomt um den Mittelpunkt der zwei Finger
|
||||||
|
const newX = gesture.startX + (f.x - gesture.startFocalX);
|
||||||
|
const newY = gesture.startY + (f.y - gesture.startFocalY);
|
||||||
|
applyClamped(newScale, newX, newY);
|
||||||
|
} else if (touches.length === 1 && view.scale > 1.02) {
|
||||||
|
const dx = touches[0].pageX - gesture.startFocalX;
|
||||||
|
const dy = touches[0].pageY - gesture.startFocalY;
|
||||||
|
if (Math.abs(dx) < PAN_SLOP_AT_SCALE_1 && Math.abs(dy) < PAN_SLOP_AT_SCALE_1) return;
|
||||||
|
applyClamped(view.scale, gesture.startX + dx, gesture.startY + dy);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
onPanResponderRelease: (e: GestureResponderEvent) => {
|
||||||
|
const elapsed = Date.now() - gesture.touchStartedAt;
|
||||||
|
const dx = (e.nativeEvent.changedTouches?.[0]?.pageX ?? gesture.touchStartX) - gesture.touchStartX;
|
||||||
|
const dy = (e.nativeEvent.changedTouches?.[0]?.pageY ?? gesture.touchStartY) - gesture.touchStartY;
|
||||||
|
const wasTap =
|
||||||
|
elapsed < 280 &&
|
||||||
|
Math.abs(dx) < DOUBLE_TAP_DIST &&
|
||||||
|
Math.abs(dy) < DOUBLE_TAP_DIST;
|
||||||
|
if (wasTap) {
|
||||||
|
const now = Date.now();
|
||||||
|
if (now - lastTap.current.at < DOUBLE_TAP_MS) {
|
||||||
|
// Doppel-Tap → Zoom-Toggle
|
||||||
|
if (view.scale > 1.1) {
|
||||||
|
applyClamped(1, 0, 0);
|
||||||
|
} else {
|
||||||
|
applyClamped(2.5, 0, 0);
|
||||||
|
}
|
||||||
|
lastTap.current = { at: 0, x: 0, y: 0 };
|
||||||
|
} else {
|
||||||
|
lastTap.current = { at: now, x: gesture.touchStartX, y: gesture.touchStartY };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
gesture.fingers = 0;
|
||||||
|
gesture.startDist = 0;
|
||||||
|
},
|
||||||
|
|
||||||
|
onPanResponderTerminate: () => {
|
||||||
|
gesture.fingers = 0;
|
||||||
|
gesture.startDist = 0;
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
[],
|
||||||
|
);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<View
|
||||||
|
style={StyleSheet.absoluteFill}
|
||||||
|
collapsable={false}
|
||||||
|
{...responder.panHandlers}
|
||||||
|
>
|
||||||
|
<Animated.View pointerEvents="none" style={StyleSheet.absoluteFill}>
|
||||||
|
<Animated.Image
|
||||||
|
source={{ uri }}
|
||||||
|
style={[
|
||||||
|
style,
|
||||||
|
{
|
||||||
|
transform: [{ translateX: tx }, { translateY: ty }, { scale }],
|
||||||
|
},
|
||||||
|
]}
|
||||||
|
resizeMode="contain"
|
||||||
|
/>
|
||||||
|
</Animated.View>
|
||||||
|
</View>
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
export default ZoomableImage;
|
||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
+512
-31
@@ -6,10 +6,11 @@
|
|||||||
* Nutzt react-native-audio-recorder-player fuer Aufnahme.
|
* Nutzt react-native-audio-recorder-player fuer Aufnahme.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import { Platform, PermissionsAndroid, NativeModules } from 'react-native';
|
import { Platform, PermissionsAndroid, NativeModules, ToastAndroid, NativeEventEmitter } from 'react-native';
|
||||||
import Sound from 'react-native-sound';
|
import Sound from 'react-native-sound';
|
||||||
import RNFS from 'react-native-fs';
|
import RNFS from 'react-native-fs';
|
||||||
import AsyncStorage from '@react-native-async-storage/async-storage';
|
import AsyncStorage from '@react-native-async-storage/async-storage';
|
||||||
|
import { acquireBackgroundAudio, releaseBackgroundAudio, stopBackgroundAudio } from './backgroundAudio';
|
||||||
import AudioRecorderPlayer, {
|
import AudioRecorderPlayer, {
|
||||||
AudioEncoderAndroidType,
|
AudioEncoderAndroidType,
|
||||||
AudioSourceAndroidType,
|
AudioSourceAndroidType,
|
||||||
@@ -40,6 +41,8 @@ const { AudioFocus, PcmStreamPlayer } = NativeModules as {
|
|||||||
requestDuck: () => Promise<boolean>;
|
requestDuck: () => Promise<boolean>;
|
||||||
requestExclusive: () => Promise<boolean>;
|
requestExclusive: () => Promise<boolean>;
|
||||||
release: () => Promise<boolean>;
|
release: () => Promise<boolean>;
|
||||||
|
kickReleaseMedia: () => Promise<boolean>;
|
||||||
|
getMode?: () => Promise<number>;
|
||||||
};
|
};
|
||||||
PcmStreamPlayer?: {
|
PcmStreamPlayer?: {
|
||||||
start: (sampleRate: number, channels: number, prerollSeconds: number) => Promise<boolean>;
|
start: (sampleRate: number, channels: number, prerollSeconds: number) => Promise<boolean>;
|
||||||
@@ -72,11 +75,41 @@ const AUDIO_SAMPLE_RATE = 16000;
|
|||||||
const AUDIO_CHANNELS = 1;
|
const AUDIO_CHANNELS = 1;
|
||||||
const AUDIO_ENCODING = 'audio/wav';
|
const AUDIO_ENCODING = 'audio/wav';
|
||||||
|
|
||||||
// VAD (Voice Activity Detection) — Stille-Erkennung
|
// VAD (Voice Activity Detection) — Stille-Erkennung.
|
||||||
const VAD_SILENCE_THRESHOLD_DB = -45; // dB unter dem als "Stille" gilt
|
// Fallback-Werte falls die adaptive Baseline-Messung fehlschlaegt (z.B. weil
|
||||||
const VAD_SPEECH_THRESHOLD_DB = -28; // dB ueber dem als "Sprache" gilt (Sprach-Gate) — hoeher = weniger Umgebungsgeraeusche
|
// das Mikro keine metering-Updates liefert). Adaptive Werte werden zur
|
||||||
|
// Laufzeit aus den ersten BASELINE_SAMPLES gemessen und auf baseline+offset
|
||||||
|
// gesetzt — funktioniert in lauten wie leisen Umgebungen.
|
||||||
|
const VAD_SILENCE_FALLBACK_DB = -38; // Fallback Stille-Schwelle
|
||||||
|
const VAD_SPEECH_FALLBACK_DB = -22; // Fallback Sprach-Schwelle
|
||||||
|
const VAD_SILENCE_OFFSET_DB = 6; // Sprache = Baseline + 6dB
|
||||||
|
const VAD_SPEECH_OFFSET_DB = 12; // sicheres Speech = Baseline + 12dB
|
||||||
|
const VAD_BASELINE_SAMPLES = 5; // 5 × 100ms = 500ms Baseline
|
||||||
const VAD_SPEECH_MIN_MS = 500; // ms Sprache bevor Aufnahme zaehlt — laenger = keine Huestler/Klopfer mehr
|
const VAD_SPEECH_MIN_MS = 500; // ms Sprache bevor Aufnahme zaehlt — laenger = keine Huestler/Klopfer mehr
|
||||||
|
|
||||||
|
// Override fuer die Stille-Schwelle — wenn gesetzt, wird die adaptive Baseline
|
||||||
|
// ignoriert. Nuetzlich wenn die adaptive Logik in spezifischen Umgebungen
|
||||||
|
// nicht zuverlaessig greift. Range -55..-15 dB. Speech-Schwelle wird auf
|
||||||
|
// override+10 dB gesetzt (Speech muss klar lauter als Stille sein).
|
||||||
|
export const VAD_SILENCE_DB_DEFAULT = -38; // wenn User Manuell-Modus waehlt
|
||||||
|
export const VAD_SILENCE_DB_MIN = -85; // extrem empfindlich, praktisch alles gilt als Sprache
|
||||||
|
export const VAD_SILENCE_DB_MAX = -15; // sehr unempfindlich, nur lautes Reden gilt
|
||||||
|
export const VAD_SILENCE_DB_OVERRIDE_KEY = 'aria_vad_silence_db_override';
|
||||||
|
|
||||||
|
/** Liefert den manuellen Override-Wert oder null wenn "automatisch". */
|
||||||
|
export async function loadVadSilenceDbOverride(): Promise<number | null> {
|
||||||
|
try {
|
||||||
|
const raw = await AsyncStorage.getItem(VAD_SILENCE_DB_OVERRIDE_KEY);
|
||||||
|
if (raw == null || raw === '') return null;
|
||||||
|
const n = parseFloat(raw);
|
||||||
|
if (!isFinite(n)) return null;
|
||||||
|
if (n < VAD_SILENCE_DB_MIN || n > VAD_SILENCE_DB_MAX) return null;
|
||||||
|
return n;
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// VAD-Stille (in Sekunden) — wie lange Sprechpause toleriert wird, bevor
|
// VAD-Stille (in Sekunden) — wie lange Sprechpause toleriert wird, bevor
|
||||||
// die Aufnahme automatisch beendet wird. Einstellbar in den App-Settings.
|
// die Aufnahme automatisch beendet wird. Einstellbar in den App-Settings.
|
||||||
export const VAD_SILENCE_DEFAULT_SEC = 2.8;
|
export const VAD_SILENCE_DEFAULT_SEC = 2.8;
|
||||||
@@ -138,7 +171,24 @@ async function loadVadSilenceMs(): Promise<number> {
|
|||||||
|
|
||||||
// Max-Dauer einer Aufnahme (Notbremse gegen Runaway-Loops). Auf 2 Minuten
|
// Max-Dauer einer Aufnahme (Notbremse gegen Runaway-Loops). Auf 2 Minuten
|
||||||
// hochgezogen damit auch laengere Erklaerungen durchgehen.
|
// hochgezogen damit auch laengere Erklaerungen durchgehen.
|
||||||
const MAX_RECORDING_MS = 120000;
|
// Default 5 Minuten — konfigurierbar in den App-Settings (1-30 Minuten).
|
||||||
|
export const MAX_RECORDING_DEFAULT_SEC = 300;
|
||||||
|
export const MAX_RECORDING_MIN_SEC = 60;
|
||||||
|
export const MAX_RECORDING_MAX_SEC = 1800;
|
||||||
|
export const MAX_RECORDING_STORAGE_KEY = 'aria_max_recording_sec';
|
||||||
|
|
||||||
|
export async function loadMaxRecordingMs(): Promise<number> {
|
||||||
|
try {
|
||||||
|
const raw = await AsyncStorage.getItem(MAX_RECORDING_STORAGE_KEY);
|
||||||
|
if (raw != null) {
|
||||||
|
const n = parseFloat(raw);
|
||||||
|
if (isFinite(n) && n >= MAX_RECORDING_MIN_SEC && n <= MAX_RECORDING_MAX_SEC) {
|
||||||
|
return Math.round(n * 1000);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch {}
|
||||||
|
return MAX_RECORDING_DEFAULT_SEC * 1000;
|
||||||
|
}
|
||||||
|
|
||||||
// Pre-Roll: Wie lange Audio im AudioTrack-Buffer liegt bevor play() startet.
|
// Pre-Roll: Wie lange Audio im AudioTrack-Buffer liegt bevor play() startet.
|
||||||
// Einstellbar via Diagnostic/Settings (Key: aria_tts_preroll_sec).
|
// Einstellbar via Diagnostic/Settings (Key: aria_tts_preroll_sec).
|
||||||
@@ -212,10 +262,53 @@ class AudioService {
|
|||||||
// Latch damit der Silence-Callback pro Aufnahme genau einmal feuert
|
// Latch damit der Silence-Callback pro Aufnahme genau einmal feuert
|
||||||
private silenceFired: boolean = false;
|
private silenceFired: boolean = false;
|
||||||
private noSpeechTimer: ReturnType<typeof setTimeout> | null = null;
|
private noSpeechTimer: ReturnType<typeof setTimeout> | null = null;
|
||||||
|
// Adaptive Schwellen — werden in den ersten 500ms aus dem Mikro-Pegel
|
||||||
|
// gemessen. baseline = avg dB der ersten 5 Samples, dann:
|
||||||
|
// silence = baseline + VAD_SILENCE_OFFSET_DB (6dB ueber ambient)
|
||||||
|
// speech = baseline + VAD_SPEECH_OFFSET_DB (12dB ueber ambient = klares Reden)
|
||||||
|
// Funktioniert sowohl im stillen Buero als auch im lauten Cafe.
|
||||||
|
private vadBaselineSamples: number[] = [];
|
||||||
|
private vadAdaptiveSilenceDb: number = VAD_SILENCE_FALLBACK_DB;
|
||||||
|
private vadAdaptiveSpeechDb: number = VAD_SPEECH_FALLBACK_DB;
|
||||||
|
|
||||||
|
// Interruption-Tracking fuer Auto-Resume nach Anruf:
|
||||||
|
// - playbackStartTime: ms-Timestamp wenn AudioTrack tatsaechlich anfing
|
||||||
|
// abzuspielen (= _firePlaybackStarted)
|
||||||
|
// - currentPlaybackMsgId: welche Antwort lief gerade
|
||||||
|
// - pausedPosition / pausedMessageId: bei captureInterruption gemerkt
|
||||||
|
private playbackStartTime: number = 0;
|
||||||
|
private currentPlaybackMsgId: string = '';
|
||||||
|
private pausedPosition: number = 0; // Sekunden in der Audio-Datei
|
||||||
|
private pausedMessageId: string = '';
|
||||||
|
private resumeSound: Sound | null = null; // halten damit GC nicht zuschlaegt
|
||||||
|
// Leading-Silence wird im Native vor den Chunks geschrieben — beim
|
||||||
|
// Position-Berechnen vom playbackStarted abziehen
|
||||||
|
private readonly LEADING_SILENCE_SEC = 0.3;
|
||||||
|
|
||||||
constructor() {
|
constructor() {
|
||||||
this.recorder = new AudioRecorderPlayer();
|
this.recorder = new AudioRecorderPlayer();
|
||||||
this.recorder.setSubscriptionDuration(0.1); // 100ms Metering-Updates
|
this.recorder.setSubscriptionDuration(0.1); // 100ms Metering-Updates
|
||||||
|
// Native Event: AudioTrack hat alle Samples wirklich durchgespielt (nach
|
||||||
|
// dem finally{}-Block im Writer-Thread). ERST jetzt darf AudioFocus
|
||||||
|
// freigegeben werden — sonst spielt Spotify schon waehrend ARIA noch
|
||||||
|
// redet (PcmStreamPlayer.end() returnt mit 15s-Cap viel zu frueh).
|
||||||
|
if (PcmStreamPlayer) {
|
||||||
|
try {
|
||||||
|
const emitter = new NativeEventEmitter(NativeModules.PcmStreamPlayer as any);
|
||||||
|
emitter.addListener('PcmPlaybackFinished', () => {
|
||||||
|
console.log('[Audio] PcmPlaybackFinished — Focus jetzt freigeben');
|
||||||
|
this._releaseFocusDeferred();
|
||||||
|
});
|
||||||
|
} catch (err) {
|
||||||
|
console.warn('[Audio] PcmPlaybackFinished-Subscription fehlgeschlagen:', err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// App-Start: orphaned aria_tts_*.wav / aria_recording_*.mp4 aus dem Cache
|
||||||
|
// wegraeumen. Sammeln sich an wenn Sound mid-playback gestoppt wird (Anruf,
|
||||||
|
// Mute, Barge-In) — der completion-callback feuert dann nicht und die Datei
|
||||||
|
// bleibt liegen. 5min-Threshold damit gerade aktiv geschriebene Files sicher
|
||||||
|
// sind. cleanupOnStartup ist async, blockt den Constructor nicht.
|
||||||
|
this._cleanupStaleCacheFiles(5 * 60 * 1000).catch(() => {});
|
||||||
}
|
}
|
||||||
|
|
||||||
/** AudioFocus mit kleiner Verzoegerung freigeben — Spotify/YouTube
|
/** AudioFocus mit kleiner Verzoegerung freigeben — Spotify/YouTube
|
||||||
@@ -225,13 +318,19 @@ class AudioService {
|
|||||||
* unterdrueckt — der Focus bleibt fuer die ganze Konversation gehalten. */
|
* unterdrueckt — der Focus bleibt fuer die ganze Konversation gehalten. */
|
||||||
private _releaseFocusDeferred(): void {
|
private _releaseFocusDeferred(): void {
|
||||||
if (this._conversationFocusActive) {
|
if (this._conversationFocusActive) {
|
||||||
|
console.log('[Audio] _releaseFocusDeferred: Conversation aktiv → kein Release');
|
||||||
this._cancelDeferredFocusRelease();
|
this._cancelDeferredFocusRelease();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
this._cancelDeferredFocusRelease();
|
this._cancelDeferredFocusRelease();
|
||||||
|
console.log('[Audio] _releaseFocusDeferred: in %dms', this.FOCUS_RELEASE_DELAY_MS);
|
||||||
this.focusReleaseTimer = setTimeout(() => {
|
this.focusReleaseTimer = setTimeout(() => {
|
||||||
this.focusReleaseTimer = null;
|
this.focusReleaseTimer = null;
|
||||||
if (this._conversationFocusActive) return;
|
if (this._conversationFocusActive) {
|
||||||
|
console.log('[Audio] Focus-Release abgebrochen (Conversation jetzt aktiv)');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
console.log('[Audio] AudioFocus jetzt released');
|
||||||
AudioFocus?.release().catch(() => {});
|
AudioFocus?.release().catch(() => {});
|
||||||
}, this.FOCUS_RELEASE_DELAY_MS);
|
}, this.FOCUS_RELEASE_DELAY_MS);
|
||||||
}
|
}
|
||||||
@@ -262,14 +361,163 @@ class AudioService {
|
|||||||
this._releaseFocusDeferred();
|
this._releaseFocusDeferred();
|
||||||
}
|
}
|
||||||
|
|
||||||
/** TTS-Wiedergabe haart stoppen — z.B. wenn ein Anruf reinkommt.
|
/** TTS-Wiedergabe haart stoppen — z.B. fuer Barge-In. Buffer wird geleert,
|
||||||
* Released auch sofort den AudioFocus damit der Anruf-Klingelton hoerbar ist. */
|
* kein Auto-Resume. Released auch sofort den AudioFocus. */
|
||||||
haltAllPlayback(reason: string = ''): void {
|
haltAllPlayback(reason: string = ''): void {
|
||||||
console.log('[Audio] haltAllPlayback: %s', reason || '(no reason)');
|
console.log('[Audio] haltAllPlayback: %s', reason || '(no reason)');
|
||||||
this._conversationFocusActive = false;
|
this._conversationFocusActive = false;
|
||||||
this.stopPlayback();
|
this.stopPlayback();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Speziell fuer Anrufe: AudioTrack stoppen + Focus releasen, ABER pcm-
|
||||||
|
* Buffer + messageId behalten damit weitere Chunks der unterbrochenen
|
||||||
|
* Antwort weiter gesammelt werden. isFinal schreibt dann die WAV trotz
|
||||||
|
* Anruf — und resumeFromInterruption findet sie. */
|
||||||
|
pauseForCall(reason: string = ''): void {
|
||||||
|
console.log('[Audio] pauseForCall: %s', reason || '(no reason)');
|
||||||
|
this._conversationFocusActive = false;
|
||||||
|
this._pausedForCall = true;
|
||||||
|
// Queue + isPlaying ruecksetzen — sonst klemmt der naechste Play-Button
|
||||||
|
// (playAudio sieht isPlaying=true und ruft _playNext nicht mehr auf).
|
||||||
|
this.audioQueue = [];
|
||||||
|
this.isPlaying = false;
|
||||||
|
// Foreground-Service stoppen — Notification waere sonst irrefuehrend
|
||||||
|
stopBackgroundAudio().catch(() => {});
|
||||||
|
// SoundPool/RNSound (Resume-Sound, Play-Button) stoppen — nicht relevant fuer Auto-Resume
|
||||||
|
if (this.currentSound) {
|
||||||
|
try { this.currentSound.stop(); this.currentSound.release(); } catch {}
|
||||||
|
this.currentSound = null;
|
||||||
|
}
|
||||||
|
if (this.resumeSound) {
|
||||||
|
try { this.resumeSound.stop(); this.resumeSound.release(); } catch {}
|
||||||
|
this.resumeSound = null;
|
||||||
|
}
|
||||||
|
// AudioTrack hart stoppen damit nichts mehr aus dem Lautsprecher kommt.
|
||||||
|
// pcmStreamActive bleibt true, pcmBuffer/pcmMessageId BLEIBEN — damit
|
||||||
|
// weitere Chunks gesammelt werden und isFinal die WAV schreiben kann.
|
||||||
|
PcmStreamPlayer?.stop().catch(() => {});
|
||||||
|
this._cancelDeferredFocusRelease();
|
||||||
|
AudioFocus?.release().catch(() => {});
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Anruf vorbei → weitere Chunks duerfen wieder abgespielt werden.
|
||||||
|
* resumeFromInterruption uebernimmt die Wiedergabe ab gemerkter Position. */
|
||||||
|
endCallPause(): void {
|
||||||
|
if (!this._pausedForCall) return;
|
||||||
|
this._pausedForCall = false;
|
||||||
|
console.log('[Audio] endCallPause');
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Bei Anruf: aktuelle Wiedergabe-Position merken damit wir nach dem
|
||||||
|
* Auflegen von dort weitermachen koennen. Returnt Position in Sekunden
|
||||||
|
* oder 0 wenn nichts spielte.
|
||||||
|
*
|
||||||
|
* Idempotent: bei mehrfachem Aufruf (ringing → offhook) wird die Position
|
||||||
|
* vom ersten Mal NICHT ueberschrieben. playbackStartTime laeuft stumpf
|
||||||
|
* weiter obwohl das Audio gestoppt ist — der erste Halt ist der echte. */
|
||||||
|
captureInterruption(): number {
|
||||||
|
if (this.pausedMessageId) {
|
||||||
|
console.log('[Audio] captureInterruption: bereits erfasst (msgId=%s pos=%ss) — skip',
|
||||||
|
this.pausedMessageId, this.pausedPosition.toFixed(2));
|
||||||
|
return this.pausedPosition;
|
||||||
|
}
|
||||||
|
if (!this.playbackStartTime || !this.currentPlaybackMsgId) {
|
||||||
|
console.log('[Audio] captureInterruption: nichts spielte (startTime=%s, msgId=%s)',
|
||||||
|
this.playbackStartTime, this.currentPlaybackMsgId || '(leer)');
|
||||||
|
this.pausedPosition = 0;
|
||||||
|
this.pausedMessageId = '';
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
const elapsedMs = Date.now() - this.playbackStartTime;
|
||||||
|
const positionSec = Math.max(0, elapsedMs / 1000 - this.LEADING_SILENCE_SEC);
|
||||||
|
this.pausedPosition = positionSec;
|
||||||
|
this.pausedMessageId = this.currentPlaybackMsgId;
|
||||||
|
console.log('[Audio] captureInterruption: msgId=%s pos=%ss',
|
||||||
|
this.pausedMessageId, positionSec.toFixed(2));
|
||||||
|
return positionSec;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Nach Anruf-Ende: ab gemerkter Position weiterspielen. Wenn Cache noch
|
||||||
|
* nicht geschrieben (final kam waehrend Anruf vielleicht doch nicht),
|
||||||
|
* warten bis maxWaitMs und dann probieren. Returnt true wenn gestartet. */
|
||||||
|
async resumeFromInterruption(maxWaitMs: number = 30000): Promise<boolean> {
|
||||||
|
const msgId = this.pausedMessageId;
|
||||||
|
const position = this.pausedPosition;
|
||||||
|
if (!msgId) {
|
||||||
|
console.log('[Audio] resumeFromInterruption: kein gemerkter Stand — skip');
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
console.log('[Audio] resumeFromInterruption: starte fuer msgId=%s pos=%ss',
|
||||||
|
msgId, position.toFixed(2));
|
||||||
|
this.pausedMessageId = ''; // konsumieren
|
||||||
|
const cachePath = `${RNFS.DocumentDirectoryPath}/tts_cache/${msgId}.wav`;
|
||||||
|
const startTime = Date.now();
|
||||||
|
while (Date.now() - startTime < maxWaitMs) {
|
||||||
|
try {
|
||||||
|
if (await RNFS.exists(cachePath)) {
|
||||||
|
return await this._playFromPathAtPosition(cachePath, position);
|
||||||
|
}
|
||||||
|
} catch {}
|
||||||
|
await new Promise(r => setTimeout(r, 500));
|
||||||
|
}
|
||||||
|
console.warn('[Audio] resumeFromInterruption: WAV %s nicht binnen %dms verfuegbar',
|
||||||
|
msgId, maxWaitMs);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
private async _playFromPathAtPosition(path: string, positionSec: number): Promise<boolean> {
|
||||||
|
try {
|
||||||
|
// Bestehende laufende Wiedergabe abbrechen damit wir sauber starten
|
||||||
|
if (this.resumeSound) {
|
||||||
|
try { this.resumeSound.stop(); this.resumeSound.release(); } catch {}
|
||||||
|
this.resumeSound = null;
|
||||||
|
}
|
||||||
|
const sound = await new Promise<Sound>((resolve, reject) => {
|
||||||
|
const s = new Sound(path.replace(/^file:\/\//, ''), '', (err) =>
|
||||||
|
err ? reject(err) : resolve(s));
|
||||||
|
});
|
||||||
|
// Audio-Focus anfordern damit Spotify pausiert
|
||||||
|
this._cancelDeferredFocusRelease();
|
||||||
|
AudioFocus?.requestDuck().catch(() => {});
|
||||||
|
this._firePlaybackStarted();
|
||||||
|
this.isPlaying = true;
|
||||||
|
this.resumeSound = sound;
|
||||||
|
// Tracking auch fuer den Resume-Sound aktualisieren — sonst kann
|
||||||
|
// captureInterruption bei einem zweiten Anruf die Position nicht
|
||||||
|
// mehr ermitteln (playbackStartTime waere von der ersten Wiedergabe).
|
||||||
|
const msgIdMatch = path.match(/([^/\\]+)\.wav$/i);
|
||||||
|
if (msgIdMatch) this.currentPlaybackMsgId = msgIdMatch[1];
|
||||||
|
// Virtuelle Start-Zeit so setzen, dass captureInterruption (das den
|
||||||
|
// Leading-Silence-Offset wieder abzieht) die korrekte Position liefert.
|
||||||
|
this.playbackStartTime = Date.now() - (positionSec + this.LEADING_SILENCE_SEC) * 1000;
|
||||||
|
console.log('[Audio] Resume von Position %ss aus %s',
|
||||||
|
positionSec.toFixed(2), path);
|
||||||
|
sound.setCurrentTime(Math.max(0, positionSec));
|
||||||
|
sound.play((success) => {
|
||||||
|
if (!success) console.warn('[Audio] Resume-Wiedergabe fehlgeschlagen');
|
||||||
|
try { sound.release(); } catch {}
|
||||||
|
if (this.resumeSound === sound) this.resumeSound = null;
|
||||||
|
this.isPlaying = false;
|
||||||
|
this.playbackFinishedListeners.forEach(cb => {
|
||||||
|
try { cb(); } catch (e) { console.warn('[Audio] cb err:', e); }
|
||||||
|
});
|
||||||
|
this._releaseFocusDeferred();
|
||||||
|
});
|
||||||
|
return true;
|
||||||
|
} catch (err: any) {
|
||||||
|
console.warn('[Audio] _playFromPathAtPosition fehlgeschlagen:', err?.message || err);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** True wenn ARIA gerade was abspielt — egal ob WAV-Queue oder PCM-Stream.
|
||||||
|
* Nuetzlich fuer "Barge-In": wenn der User spricht waehrend ARIA spricht,
|
||||||
|
* soll die ARIA-Wiedergabe abgebrochen + die neue User-Message verarbeitet
|
||||||
|
* werden ("ach vergiss es, mach lieber X"). */
|
||||||
|
isPlayingAudio(): boolean {
|
||||||
|
return this.isPlaying || this.pcmStreamActive;
|
||||||
|
}
|
||||||
|
|
||||||
// --- Berechtigungen ---
|
// --- Berechtigungen ---
|
||||||
|
|
||||||
async requestMicrophonePermission(): Promise<boolean> {
|
async requestMicrophonePermission(): Promise<boolean> {
|
||||||
@@ -327,6 +575,12 @@ class AudioService {
|
|||||||
|
|
||||||
this.recordingPath = `${RNFS.CachesDirectoryPath}/aria_recording_${Date.now()}.mp4`;
|
this.recordingPath = `${RNFS.CachesDirectoryPath}/aria_recording_${Date.now()}.mp4`;
|
||||||
|
|
||||||
|
// Foreground-Service VOR dem AudioRecord starten — sonst blockt Android
|
||||||
|
// den Background-Mic-Zugriff (foregroundServiceType=microphone muss zum
|
||||||
|
// Zeitpunkt des startRecorder() schon aktiv sein, sonst greifen die
|
||||||
|
// Background-Mic-Restrictions ab Android 11+).
|
||||||
|
await acquireBackgroundAudio('rec');
|
||||||
|
|
||||||
// Aufnahme mit Metering starten
|
// Aufnahme mit Metering starten
|
||||||
await this.recorder.startRecorder(this.recordingPath, {
|
await this.recorder.startRecorder(this.recordingPath, {
|
||||||
AudioEncoderAndroid: AudioEncoderAndroidType.AAC,
|
AudioEncoderAndroid: AudioEncoderAndroidType.AAC,
|
||||||
@@ -341,8 +595,36 @@ class AudioService {
|
|||||||
const db = e.currentMetering ?? -160;
|
const db = e.currentMetering ?? -160;
|
||||||
this.meterListeners.forEach(cb => cb(db));
|
this.meterListeners.forEach(cb => cb(db));
|
||||||
|
|
||||||
|
// Adaptive Baseline: erste 5 Samples (~500ms) sammeln, dann Schwellen
|
||||||
|
// anpassen. -160 (kein Metering) ignorieren — sonst wird die Baseline
|
||||||
|
// sinnlos niedrig.
|
||||||
|
if (this.vadBaselineSamples.length < VAD_BASELINE_SAMPLES) {
|
||||||
|
if (db > -100) {
|
||||||
|
this.vadBaselineSamples.push(db);
|
||||||
|
if (this.vadBaselineSamples.length === VAD_BASELINE_SAMPLES) {
|
||||||
|
// Minimum statt Mittelwert: robust gegen Spike-Samples (z.B. wenn
|
||||||
|
// der User direkt nach Wake-Word sofort spricht oder das Wake-Word-
|
||||||
|
// Echo noch im Mikro ist). Min ist der ruhigste Moment.
|
||||||
|
const lowest = Math.min(...this.vadBaselineSamples);
|
||||||
|
const rawSilence = lowest + VAD_SILENCE_OFFSET_DB;
|
||||||
|
const rawSpeech = lowest + VAD_SPEECH_OFFSET_DB;
|
||||||
|
// Cap auf einen vernuenftigen Bereich:
|
||||||
|
// - Silence-Schwelle nicht ueber -28dB (sonst zaehlt Hintergrund-
|
||||||
|
// geraeusch dauerhaft als "Sprache" → VAD feuert nie)
|
||||||
|
// - Silence-Schwelle nicht unter -50dB (sonst zu strikt)
|
||||||
|
this.vadAdaptiveSilenceDb = Math.max(-50, Math.min(rawSilence, -28));
|
||||||
|
this.vadAdaptiveSpeechDb = Math.max(-40, Math.min(rawSpeech, -18));
|
||||||
|
const msg = `VAD: ambient=${lowest.toFixed(0)}dB stille>${this.vadAdaptiveSilenceDb.toFixed(0)}dB`;
|
||||||
|
console.log('[Audio] %s speech>%s (raw silence=%s speech=%s)',
|
||||||
|
msg, this.vadAdaptiveSpeechDb.toFixed(1),
|
||||||
|
rawSilence.toFixed(1), rawSpeech.toFixed(1));
|
||||||
|
try { ToastAndroid.show(msg, ToastAndroid.SHORT); } catch {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Sprach-Gate: Erkennen ob tatsaechlich gesprochen wird
|
// Sprach-Gate: Erkennen ob tatsaechlich gesprochen wird
|
||||||
if (db > VAD_SPEECH_THRESHOLD_DB) {
|
if (db > this.vadAdaptiveSpeechDb) {
|
||||||
if (!this.speechDetected && this.speechStartTime === 0) {
|
if (!this.speechDetected && this.speechStartTime === 0) {
|
||||||
this.speechStartTime = Date.now();
|
this.speechStartTime = Date.now();
|
||||||
}
|
}
|
||||||
@@ -357,7 +639,7 @@ class AudioService {
|
|||||||
|
|
||||||
// VAD: Stille erkennen (nur wenn Sprache erkannt wurde)
|
// VAD: Stille erkennen (nur wenn Sprache erkannt wurde)
|
||||||
if (this.vadEnabled) {
|
if (this.vadEnabled) {
|
||||||
if (db > VAD_SILENCE_THRESHOLD_DB) {
|
if (db > this.vadAdaptiveSilenceDb) {
|
||||||
this.lastSpeechTime = Date.now();
|
this.lastSpeechTime = Date.now();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -367,6 +649,23 @@ class AudioService {
|
|||||||
this.lastSpeechTime = Date.now();
|
this.lastSpeechTime = Date.now();
|
||||||
this.speechDetected = false;
|
this.speechDetected = false;
|
||||||
this.speechStartTime = 0;
|
this.speechStartTime = 0;
|
||||||
|
// VAD-Adaptive zurueckgesetzt: Baseline wird in den ersten 500ms neu
|
||||||
|
// gemessen. Bis dahin gelten die Fallback-Schwellen.
|
||||||
|
this.vadBaselineSamples = [];
|
||||||
|
this.vadAdaptiveSilenceDb = VAD_SILENCE_FALLBACK_DB;
|
||||||
|
this.vadAdaptiveSpeechDb = VAD_SPEECH_FALLBACK_DB;
|
||||||
|
|
||||||
|
// Manueller Override aus Settings — wenn gesetzt, wird die adaptive
|
||||||
|
// Baseline-Messung uebersteuert. User-Wahl gewinnt vor Auto-Magic.
|
||||||
|
const dbOverride = await loadVadSilenceDbOverride();
|
||||||
|
if (dbOverride != null) {
|
||||||
|
this.vadAdaptiveSilenceDb = dbOverride;
|
||||||
|
this.vadAdaptiveSpeechDb = dbOverride + 10; // Speech klar ueber Stille
|
||||||
|
this.vadBaselineSamples = new Array(VAD_BASELINE_SAMPLES).fill(0); // Baseline-Sammeln deaktivieren
|
||||||
|
const msg = `VAD: manuell stille>${dbOverride}dB`;
|
||||||
|
console.log('[Audio] %s', msg);
|
||||||
|
try { ToastAndroid.show(msg, ToastAndroid.SHORT); } catch {}
|
||||||
|
}
|
||||||
this.setState('recording');
|
this.setState('recording');
|
||||||
|
|
||||||
// Andere Apps waehrend der Aufnahme pausieren (Musik, Videos etc.)
|
// Andere Apps waehrend der Aufnahme pausieren (Musik, Videos etc.)
|
||||||
@@ -394,18 +693,19 @@ class AudioService {
|
|||||||
};
|
};
|
||||||
if (autoStop) {
|
if (autoStop) {
|
||||||
const vadSilenceMs = await loadVadSilenceMs();
|
const vadSilenceMs = await loadVadSilenceMs();
|
||||||
|
const maxRecordingMs = await loadMaxRecordingMs();
|
||||||
console.log('[Audio] startRecording: autoStop=true, VAD-Stille=%dms, MAX=%dms',
|
console.log('[Audio] startRecording: autoStop=true, VAD-Stille=%dms, MAX=%dms',
|
||||||
vadSilenceMs, MAX_RECORDING_MS);
|
vadSilenceMs, maxRecordingMs);
|
||||||
this.vadTimer = setInterval(() => {
|
this.vadTimer = setInterval(() => {
|
||||||
const silenceDuration = Date.now() - this.lastSpeechTime;
|
const silenceDuration = Date.now() - this.lastSpeechTime;
|
||||||
if (silenceDuration >= vadSilenceMs) {
|
if (silenceDuration >= vadSilenceMs) {
|
||||||
fireSilenceOnce(`VAD ${silenceDuration}ms Stille (Schwelle=${vadSilenceMs}ms)`);
|
fireSilenceOnce(`VAD ${silenceDuration}ms Stille (Schwelle=${vadSilenceMs}ms)`);
|
||||||
}
|
}
|
||||||
}, 200);
|
}, 200);
|
||||||
// Notbremse: Nach MAX_RECORDING_MS zwangsweise stoppen
|
// Notbremse: Nach maxRecordingMs zwangsweise stoppen
|
||||||
this.maxDurationTimer = setTimeout(() => {
|
this.maxDurationTimer = setTimeout(() => {
|
||||||
fireSilenceOnce(`Max-Dauer ${MAX_RECORDING_MS}ms`);
|
fireSilenceOnce(`Max-Dauer ${maxRecordingMs}ms`);
|
||||||
}, MAX_RECORDING_MS);
|
}, maxRecordingMs);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Conversation-Window: Wenn der User innerhalb noSpeechTimeoutMs nicht
|
// Conversation-Window: Wenn der User innerhalb noSpeechTimeoutMs nicht
|
||||||
@@ -494,8 +794,15 @@ class AudioService {
|
|||||||
/** Base64-kodiertes Audio in die Queue stellen und abspielen */
|
/** Base64-kodiertes Audio in die Queue stellen und abspielen */
|
||||||
async playAudio(base64Data: string): Promise<void> {
|
async playAudio(base64Data: string): Promise<void> {
|
||||||
if (!base64Data) return;
|
if (!base64Data) return;
|
||||||
|
// Mute-Flag respektieren — robust gegen Race-Conditions zwischen User-
|
||||||
|
// Klick auf Mute und einem TTS-Chunk der im selben Tick eintrifft.
|
||||||
|
if (this._muted) {
|
||||||
|
console.log('[Audio] playAudio: muted=true → skip');
|
||||||
|
return;
|
||||||
|
}
|
||||||
this.audioQueue.push(base64Data);
|
this.audioQueue.push(base64Data);
|
||||||
|
console.log('[Audio] playAudio: queued (queue=%d isPlaying=%s pausedForCall=%s)',
|
||||||
|
this.audioQueue.length, this.isPlaying, this._pausedForCall);
|
||||||
if (!this.isPlaying) {
|
if (!this.isPlaying) {
|
||||||
this._playNext();
|
this._playNext();
|
||||||
}
|
}
|
||||||
@@ -561,7 +868,16 @@ class AudioService {
|
|||||||
final?: boolean;
|
final?: boolean;
|
||||||
silent?: boolean;
|
silent?: boolean;
|
||||||
}): Promise<string> {
|
}): Promise<string> {
|
||||||
const silent = !!payload.silent;
|
// _stoppedMessageId: User hat diese Antwort mid-Wiedergabe gestoppt
|
||||||
|
// (Mute geklickt). Auch wenn Mute jetzt wieder aus ist, soll diese
|
||||||
|
// Antwort nicht weiterspielen. Erst eine neue messageId resetted das.
|
||||||
|
const incomingMsgId = payload.messageId || '';
|
||||||
|
const stoppedByUser = !!this._stoppedMessageId && incomingMsgId === this._stoppedMessageId;
|
||||||
|
// Globaler Mute-Flag uebersteuert das per-Call silent — verhindert
|
||||||
|
// Race-Conditions wenn der User zwischen Chunks den Mute-Knopf drueckt.
|
||||||
|
// _pausedForCall: AudioTrack ist gestoppt waehrend Anruf — Chunks weiter
|
||||||
|
// sammeln (fuer WAV-Cache), aber NICHT in den Player schicken.
|
||||||
|
const silent = !!payload.silent || this._muted || this._pausedForCall || stoppedByUser;
|
||||||
if (!silent && !PcmStreamPlayer) {
|
if (!silent && !PcmStreamPlayer) {
|
||||||
console.warn('[Audio] PcmStreamPlayer Native Module nicht verfuegbar');
|
console.warn('[Audio] PcmStreamPlayer Native Module nicht verfuegbar');
|
||||||
return '';
|
return '';
|
||||||
@@ -587,6 +903,28 @@ class AudioService {
|
|||||||
this.pcmBuffer = [];
|
this.pcmBuffer = [];
|
||||||
this.pcmBytesCollected = 0;
|
this.pcmBytesCollected = 0;
|
||||||
}
|
}
|
||||||
|
// Resume-Sound stoppen falls noch aktiv (User hat nach Anruf eine
|
||||||
|
// neue Frage gestellt — die alte interruptierte Antwort ist obsolet).
|
||||||
|
if (this.resumeSound) {
|
||||||
|
try { this.resumeSound.stop(); this.resumeSound.release(); } catch {}
|
||||||
|
this.resumeSound = null;
|
||||||
|
}
|
||||||
|
// Pending Auto-Resume verwerfen wenn die neue Antwort eine andere
|
||||||
|
// messageId hat. Sonst spielt nach 30s-Wartezeit der Resume die
|
||||||
|
// ueberholte Antwort ab.
|
||||||
|
if (this.pausedMessageId && this.pausedMessageId !== messageId) {
|
||||||
|
console.log('[Audio] Neue TTS-Antwort (msgId=%s) — Auto-Resume fuer %s verworfen',
|
||||||
|
messageId, this.pausedMessageId);
|
||||||
|
this.pausedMessageId = '';
|
||||||
|
this.pausedPosition = 0;
|
||||||
|
}
|
||||||
|
// Stop-Marker zuruecksetzen wenn neue messageId — neue Antwort darf
|
||||||
|
// wieder normal abspielen, egal ob Mute zwischendurch aktiv war.
|
||||||
|
if (this._stoppedMessageId && this._stoppedMessageId !== messageId) {
|
||||||
|
console.log('[Audio] Neue Antwort (msgId=%s) — Stop-Marker fuer %s zurueckgesetzt',
|
||||||
|
messageId, this._stoppedMessageId);
|
||||||
|
this._stoppedMessageId = '';
|
||||||
|
}
|
||||||
this.pcmStreamActive = true;
|
this.pcmStreamActive = true;
|
||||||
this.pcmMessageId = messageId;
|
this.pcmMessageId = messageId;
|
||||||
this.pcmSampleRate = sampleRate;
|
this.pcmSampleRate = sampleRate;
|
||||||
@@ -604,6 +942,7 @@ class AudioService {
|
|||||||
}
|
}
|
||||||
this._cancelDeferredFocusRelease();
|
this._cancelDeferredFocusRelease();
|
||||||
AudioFocus?.requestDuck().catch(() => {});
|
AudioFocus?.requestDuck().catch(() => {});
|
||||||
|
this._firePlaybackStarted();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -620,13 +959,16 @@ class AudioService {
|
|||||||
|
|
||||||
if (isFinal) {
|
if (isFinal) {
|
||||||
if (!silent) {
|
if (!silent) {
|
||||||
// end() resolved jetzt erst wenn der native Writer-Thread fertig
|
// end() signalisiert dem Writer "keine weiteren Chunks". Aber WIR
|
||||||
// ist (alle Samples ausgespielt) — danach AudioFocus verzoegert
|
// releasen den AudioFocus NICHT hier — der writer braucht u.U. noch
|
||||||
// freigeben, damit Spotify/YouTube nicht im Mikro-Gap zwischen zwei
|
// 30+ Sekunden bis der Buffer wirklich abgespielt ist. Den release
|
||||||
// ARIA-Antworten wieder hochdrehen. Wenn ein neuer Stream innerhalb
|
// triggert das native Event "PcmPlaybackFinished" wenn AudioTrack
|
||||||
// FOCUS_RELEASE_DELAY_MS startet, wird das Release abgebrochen.
|
// wirklich am Ende ist (siehe ensurePlaybackFinishedListener).
|
||||||
try { await PcmStreamPlayer!.end(); } catch {}
|
try { await PcmStreamPlayer!.end(); } catch {}
|
||||||
this._releaseFocusDeferred();
|
// playbackFinished-Listener informieren (UI-Logik)
|
||||||
|
this.playbackFinishedListeners.forEach(cb => {
|
||||||
|
try { cb(); } catch (e) { console.warn('[Audio] playbackFinished cb err:', e); }
|
||||||
|
});
|
||||||
}
|
}
|
||||||
this.pcmStreamActive = false;
|
this.pcmStreamActive = false;
|
||||||
|
|
||||||
@@ -700,7 +1042,10 @@ class AudioService {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Audio aus lokaler Datei (file:// Pfad) in die Queue und abspielen. */
|
/** Audio aus lokaler Datei (file:// Pfad) in die Queue und abspielen.
|
||||||
|
* Setzt zusaetzlich playbackStartTime + currentPlaybackMsgId damit ein
|
||||||
|
* Anruf waehrend dieses Playbacks korrekt erfasst wird (ohne dieses
|
||||||
|
* Tracking liefert captureInterruption nichts → kein Auto-Resume). */
|
||||||
async playFromPath(filePath: string): Promise<void> {
|
async playFromPath(filePath: string): Promise<void> {
|
||||||
if (!filePath) return;
|
if (!filePath) return;
|
||||||
try {
|
try {
|
||||||
@@ -709,6 +1054,14 @@ class AudioService {
|
|||||||
console.warn('[Audio] Cache-Datei existiert nicht mehr:', cleanPath);
|
console.warn('[Audio] Cache-Datei existiert nicht mehr:', cleanPath);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
// Dateiname ohne .wav als messageId nehmen (egal ob UUID oder andere ID)
|
||||||
|
const fileMatch = cleanPath.match(/([^/\\]+)\.wav$/i);
|
||||||
|
const msgId = fileMatch ? fileMatch[1] : '';
|
||||||
|
console.log('[Audio] playFromPath: cleanPath=%s → msgId=%s', cleanPath, msgId || '(leer)');
|
||||||
|
if (msgId) {
|
||||||
|
this.currentPlaybackMsgId = msgId;
|
||||||
|
this.playbackStartTime = Date.now() - this.LEADING_SILENCE_SEC * 1000;
|
||||||
|
}
|
||||||
const b64 = await RNFS.readFile(cleanPath, 'base64');
|
const b64 = await RNFS.readFile(cleanPath, 'base64');
|
||||||
this.playAudio(b64);
|
this.playAudio(b64);
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
@@ -718,6 +1071,7 @@ class AudioService {
|
|||||||
|
|
||||||
// Callback wenn alle Audio-Teile abgespielt sind
|
// Callback wenn alle Audio-Teile abgespielt sind
|
||||||
private playbackFinishedListeners: (() => void)[] = [];
|
private playbackFinishedListeners: (() => void)[] = [];
|
||||||
|
private playbackStartedListeners: (() => void)[] = [];
|
||||||
|
|
||||||
onPlaybackFinished(callback: () => void): () => void {
|
onPlaybackFinished(callback: () => void): () => void {
|
||||||
this.playbackFinishedListeners.push(callback);
|
this.playbackFinishedListeners.push(callback);
|
||||||
@@ -726,6 +1080,30 @@ class AudioService {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Callback wenn ARIAs TTS-Wiedergabe startet — fuer Wake-Word-parallel-
|
||||||
|
* Listening waehrend ARIA spricht (Barge-In via "Computer" sagen). */
|
||||||
|
onPlaybackStarted(callback: () => void): () => void {
|
||||||
|
this.playbackStartedListeners.push(callback);
|
||||||
|
return () => {
|
||||||
|
this.playbackStartedListeners = this.playbackStartedListeners.filter(cb => cb !== callback);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
private _firePlaybackStarted(): void {
|
||||||
|
// Tracking fuer Auto-Resume nach Anruf-Pause: NUR setzen wenn ein
|
||||||
|
// PCM-Stream laeuft (Live-TTS). Bei Play-Button / Resume-Sound hat der
|
||||||
|
// Caller (playFromPath / _playFromPathAtPosition) das Tracking schon
|
||||||
|
// korrekt mit der msgId aus dem Pfad gesetzt — sonst wuerden wir hier
|
||||||
|
// mit leerem pcmMessageId ueberschreiben.
|
||||||
|
if (this.pcmMessageId) {
|
||||||
|
this.playbackStartTime = Date.now();
|
||||||
|
this.currentPlaybackMsgId = this.pcmMessageId;
|
||||||
|
}
|
||||||
|
this.playbackStartedListeners.forEach(cb => {
|
||||||
|
try { cb(); } catch (e) { console.warn('[Audio] playbackStarted listener err:', e); }
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
/** Naechstes Audio aus der Queue abspielen */
|
/** Naechstes Audio aus der Queue abspielen */
|
||||||
private async _playNext(): Promise<void> {
|
private async _playNext(): Promise<void> {
|
||||||
if (this.audioQueue.length === 0) {
|
if (this.audioQueue.length === 0) {
|
||||||
@@ -738,10 +1116,11 @@ class AudioService {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Beim ersten Playback-Start: andere Apps ducken
|
// Beim ersten Playback-Start: andere Apps ducken + Listener informieren
|
||||||
if (!this.isPlaying) {
|
if (!this.isPlaying) {
|
||||||
this._cancelDeferredFocusRelease();
|
this._cancelDeferredFocusRelease();
|
||||||
AudioFocus?.requestDuck().catch(() => {});
|
AudioFocus?.requestDuck().catch(() => {});
|
||||||
|
this._firePlaybackStarted();
|
||||||
}
|
}
|
||||||
this.isPlaying = true;
|
this.isPlaying = true;
|
||||||
|
|
||||||
@@ -772,11 +1151,13 @@ class AudioService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
this.currentSound = sound;
|
this.currentSound = sound;
|
||||||
|
console.log('[Audio] Sound.play startet (path=%s)', soundPath);
|
||||||
|
|
||||||
// Naechstes Audio schon vorbereiten waehrend dieses abspielt
|
// Naechstes Audio schon vorbereiten waehrend dieses abspielt
|
||||||
this._preloadNext();
|
this._preloadNext();
|
||||||
|
|
||||||
sound.play((success) => {
|
sound.play((success) => {
|
||||||
|
console.log('[Audio] Sound.play callback: success=%s queue=%d', success, this.audioQueue.length);
|
||||||
if (!success) console.warn('[Audio] Wiedergabe fehlgeschlagen');
|
if (!success) console.warn('[Audio] Wiedergabe fehlgeschlagen');
|
||||||
sound.release();
|
sound.release();
|
||||||
this.currentSound = null;
|
this.currentSound = null;
|
||||||
@@ -803,8 +1184,51 @@ class AudioService {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Mute: alle eingehenden TTS-Chunks/WAVs werden ignoriert bis wieder
|
||||||
|
* unmuted. Robuster als ein React-Ref weil hier kein Re-Render-Race ist
|
||||||
|
* — die Bridge kann einen Chunk im selben JS-Tick liefern in dem der
|
||||||
|
* User Mute geklickt hat. */
|
||||||
|
private _muted: boolean = false;
|
||||||
|
/** Anruf laeuft → Chunks werden nur in den Cache-Buffer gepusht, nicht
|
||||||
|
* abgespielt. Wird in pauseForCall gesetzt, in endCallPause/resumeFrom-
|
||||||
|
* Interruption zurueckgenommen. */
|
||||||
|
private _pausedForCall: boolean = false;
|
||||||
|
/** Wenn der User mid-Wiedergabe Mute drueckt: messageId der ABGEBROCHENEN
|
||||||
|
* Antwort merken. Folge-Chunks dieser msgId werden silent ignoriert, auch
|
||||||
|
* wenn der User Mute wieder ausschaltet — kein "Resume mid-Antwort". Eine
|
||||||
|
* NEUE messageId resetted das, dann spielt's wieder normal. */
|
||||||
|
private _stoppedMessageId: string = '';
|
||||||
|
setMuted(muted: boolean): void {
|
||||||
|
console.log('[Audio] setMuted: %s (currentSound=%s pcmStreamActive=%s)',
|
||||||
|
muted, this.currentSound ? 'aktiv' : 'null', this.pcmStreamActive);
|
||||||
|
this._muted = muted;
|
||||||
|
if (muted) {
|
||||||
|
// Aktuell laufende Antwort als "verworfen" markieren — nachfolgende
|
||||||
|
// chunks dieser msgId werden silent gehalten auch wenn der User Mute
|
||||||
|
// gleich wieder ausschaltet. Erst eine NEUE Antwort darf wieder reden.
|
||||||
|
const activeMsgId = this.pcmMessageId || this.currentPlaybackMsgId;
|
||||||
|
if (activeMsgId) {
|
||||||
|
this._stoppedMessageId = activeMsgId;
|
||||||
|
console.log('[Audio] Antwort %s als gestoppt markiert', activeMsgId);
|
||||||
|
}
|
||||||
|
this.stopPlayback();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
isMuted(): boolean { return this._muted; }
|
||||||
|
|
||||||
/** Laufende Wiedergabe stoppen + Queue leeren */
|
/** Laufende Wiedergabe stoppen + Queue leeren */
|
||||||
stopPlayback(): void {
|
stopPlayback(): void {
|
||||||
|
// Idempotent: wenn nichts mehr aktiv ist, NICHT noch einen Focus-Release/
|
||||||
|
// Kick-Cycle anstossen — Re-Renders triggern setMuted oft mehrfach hinter-
|
||||||
|
// einander, und jeder weitere Kick lässt Spotify nochmal kurz pausieren.
|
||||||
|
const hasAnything = !!(this.currentSound || this.resumeSound || this.preloadedSound
|
||||||
|
|| this.pcmStreamActive || this.audioQueue.length || this.isPlaying);
|
||||||
|
if (!hasAnything) return;
|
||||||
|
console.log('[Audio] stopPlayback: currentSound=%s queue=%d pcm=%s',
|
||||||
|
this.currentSound ? 'aktiv' : 'null', this.audioQueue.length, this.pcmStreamActive);
|
||||||
|
// Foreground-Service auch stoppen — sonst bleibt die Notification haengen
|
||||||
|
// wenn Wiedergabe abgebrochen wird (Anruf, Cancel, Barge-In).
|
||||||
|
stopBackgroundAudio().catch(() => {});
|
||||||
this.audioQueue = [];
|
this.audioQueue = [];
|
||||||
this.isPlaying = false;
|
this.isPlaying = false;
|
||||||
if (this.currentSound) {
|
if (this.currentSound) {
|
||||||
@@ -812,21 +1236,31 @@ class AudioService {
|
|||||||
this.currentSound.release();
|
this.currentSound.release();
|
||||||
this.currentSound = null;
|
this.currentSound = null;
|
||||||
}
|
}
|
||||||
|
if (this.resumeSound) {
|
||||||
|
this.resumeSound.stop();
|
||||||
|
this.resumeSound.release();
|
||||||
|
this.resumeSound = null;
|
||||||
|
}
|
||||||
if (this.preloadedSound) {
|
if (this.preloadedSound) {
|
||||||
this.preloadedSound.release();
|
this.preloadedSound.release();
|
||||||
this.preloadedSound = null;
|
this.preloadedSound = null;
|
||||||
if (this.preloadedPath) RNFS.unlink(this.preloadedPath).catch(() => {});
|
if (this.preloadedPath) RNFS.unlink(this.preloadedPath).catch(() => {});
|
||||||
this.preloadedPath = '';
|
this.preloadedPath = '';
|
||||||
}
|
}
|
||||||
// PCM-Stream ebenfalls hart stoppen (Cancel/Abbruch)
|
// PCM-Stream ebenfalls hart stoppen (Cancel/Abbruch).
|
||||||
if (this.pcmStreamActive) {
|
// pcmStreamActive wird beim isFinal-Chunk schon false gesetzt — der
|
||||||
|
// AudioTrack spielt aber noch sekundenlang aus seinem Buffer ab. Daher
|
||||||
|
// IMMER stop() aufrufen, ohne den Flag zu pruefen (ist idempotent).
|
||||||
PcmStreamPlayer?.stop().catch(() => {});
|
PcmStreamPlayer?.stop().catch(() => {});
|
||||||
this.pcmStreamActive = false;
|
this.pcmStreamActive = false;
|
||||||
this.pcmBuffer = [];
|
this.pcmBuffer = [];
|
||||||
this.pcmBytesCollected = 0;
|
this.pcmBytesCollected = 0;
|
||||||
this.pcmMessageId = '';
|
this.pcmMessageId = '';
|
||||||
}
|
// Audio-Focus sofort freigeben — User hat explizit abgebrochen.
|
||||||
// Audio-Focus sofort freigeben — User hat explizit abgebrochen
|
// Unser Focus war TRANSIENT, Spotify resumed darum automatisch beim
|
||||||
|
// Abandon. Den frueheren kickReleaseMedia haben wir entfernt: er
|
||||||
|
// requestete USAGE_MEDIA mit GAIN (permanent), was Spotify als
|
||||||
|
// "user-action stopp" interpretierte und Auto-Resume verhinderte.
|
||||||
this._cancelDeferredFocusRelease();
|
this._cancelDeferredFocusRelease();
|
||||||
AudioFocus?.release().catch(() => {});
|
AudioFocus?.release().catch(() => {});
|
||||||
}
|
}
|
||||||
@@ -868,19 +1302,29 @@ class AudioService {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Alte Aufnahme- und TTS-Files aus dem Cache loeschen (>30s alt). */
|
/** Alte Aufnahme- und TTS-Files aus dem Cache loeschen.
|
||||||
private async _cleanupStaleCacheFiles(): Promise<void> {
|
* Default 30s — verwendet beim Mikro-Start (kurze Lebensdauer reicht).
|
||||||
|
* App-Start nutzt 5min damit gerade aktive Files nicht erwischt werden. */
|
||||||
|
private async _cleanupStaleCacheFiles(maxAgeMs: number = 30000): Promise<void> {
|
||||||
try {
|
try {
|
||||||
const files = await RNFS.readDir(RNFS.CachesDirectoryPath);
|
const files = await RNFS.readDir(RNFS.CachesDirectoryPath);
|
||||||
const now = Date.now();
|
const now = Date.now();
|
||||||
|
let removed = 0;
|
||||||
|
let freedBytes = 0;
|
||||||
for (const f of files) {
|
for (const f of files) {
|
||||||
if (!f.isFile()) continue;
|
if (!f.isFile()) continue;
|
||||||
if (!f.name.startsWith('aria_recording_') && !f.name.startsWith('aria_tts_')) continue;
|
if (!f.name.startsWith('aria_recording_') && !f.name.startsWith('aria_tts_')) continue;
|
||||||
const age = now - (f.mtime ? f.mtime.getTime() : 0);
|
const age = now - (f.mtime ? f.mtime.getTime() : 0);
|
||||||
if (age > 30000) {
|
if (age > maxAgeMs) {
|
||||||
|
freedBytes += parseInt(f.size as any, 10) || 0;
|
||||||
await RNFS.unlink(f.path).catch(() => {});
|
await RNFS.unlink(f.path).catch(() => {});
|
||||||
|
removed += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (removed > 0) {
|
||||||
|
console.log('[Audio] Cache-Cleanup: %d Files entfernt, %.1fMB freigegeben',
|
||||||
|
removed, freedBytes / 1024 / 1024);
|
||||||
|
}
|
||||||
} catch {
|
} catch {
|
||||||
// silent — cleanup ist best-effort
|
// silent — cleanup ist best-effort
|
||||||
}
|
}
|
||||||
@@ -907,6 +1351,43 @@ class AudioService {
|
|||||||
// silent
|
// silent
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Aktuelle Groesse des TTS-Caches. */
|
||||||
|
async getTtsCacheSize(): Promise<{ count: number; totalMB: number }> {
|
||||||
|
let count = 0;
|
||||||
|
let total = 0;
|
||||||
|
try {
|
||||||
|
const dir = `${RNFS.DocumentDirectoryPath}/tts_cache`;
|
||||||
|
if (await RNFS.exists(dir)) {
|
||||||
|
const files = await RNFS.readDir(dir);
|
||||||
|
for (const f of files) {
|
||||||
|
if (!f.isFile() || !f.name.endsWith('.wav')) continue;
|
||||||
|
count += 1;
|
||||||
|
total += parseInt(f.size as any, 10) || 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch {}
|
||||||
|
return { count, totalMB: total / 1024 / 1024 };
|
||||||
|
}
|
||||||
|
|
||||||
|
/** TTS-Cache komplett leeren (Settings-Button). */
|
||||||
|
async clearTtsCache(): Promise<{ removed: number; freedMB: number }> {
|
||||||
|
let removed = 0;
|
||||||
|
let freed = 0;
|
||||||
|
try {
|
||||||
|
const dir = `${RNFS.DocumentDirectoryPath}/tts_cache`;
|
||||||
|
if (!(await RNFS.exists(dir))) return { removed: 0, freedMB: 0 };
|
||||||
|
const files = await RNFS.readDir(dir);
|
||||||
|
for (const f of files) {
|
||||||
|
if (!f.isFile() || !f.name.endsWith('.wav')) continue;
|
||||||
|
const size = parseInt(f.size as any, 10) || 0;
|
||||||
|
await RNFS.unlink(f.path).catch(() => {});
|
||||||
|
removed += 1;
|
||||||
|
freed += size;
|
||||||
|
}
|
||||||
|
} catch {}
|
||||||
|
return { removed, freedMB: freed / 1024 / 1024 };
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Singleton
|
// Singleton
|
||||||
|
|||||||
@@ -0,0 +1,76 @@
|
|||||||
|
/**
|
||||||
|
* Background-Audio: ARIAs TTS, Mic-Aufnahme und Wake-Word-Lauschen sollen
|
||||||
|
* auch bei minimierter App weiterlaufen. Wir starten dafuer einen Foreground-
|
||||||
|
* Service mit foregroundServiceType=mediaPlayback|microphone, der eine
|
||||||
|
* persistente Notification zeigt waehrend irgendein Audio-Slot aktiv ist.
|
||||||
|
*
|
||||||
|
* Mehrere Komponenten koennen den Service unabhaengig "halten":
|
||||||
|
* - 'tts' : ARIA spricht
|
||||||
|
* - 'rec' : Aufnahme laeuft
|
||||||
|
* - 'wake' : Wake-Word lauscht passiv (Ohr aktiv)
|
||||||
|
*
|
||||||
|
* Solange mindestens ein Slot aktiv ist, laeuft der Service. Wenn alle
|
||||||
|
* Slots leer sind, wird er gestoppt. Der Notification-Text passt sich an
|
||||||
|
* den hoechstprioren Slot an (tts > rec > wake).
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { NativeModules } from 'react-native';
|
||||||
|
|
||||||
|
interface BackgroundAudioNative {
|
||||||
|
start(reason: string): Promise<boolean>;
|
||||||
|
stop(): Promise<boolean>;
|
||||||
|
}
|
||||||
|
|
||||||
|
const { BackgroundAudio } = NativeModules as { BackgroundAudio?: BackgroundAudioNative };
|
||||||
|
|
||||||
|
type Slot = 'tts' | 'rec' | 'wake';
|
||||||
|
|
||||||
|
const slots = new Set<Slot>();
|
||||||
|
|
||||||
|
// Prioritaet fuer den Notification-Text — hoechste zuerst.
|
||||||
|
const PRIORITY: Slot[] = ['tts', 'rec', 'wake'];
|
||||||
|
|
||||||
|
function topReason(): string {
|
||||||
|
for (const s of PRIORITY) {
|
||||||
|
if (slots.has(s)) return s;
|
||||||
|
}
|
||||||
|
return '';
|
||||||
|
}
|
||||||
|
|
||||||
|
async function applyState(): Promise<void> {
|
||||||
|
if (!BackgroundAudio) return;
|
||||||
|
if (slots.size === 0) {
|
||||||
|
try { await BackgroundAudio.stop(); } catch {}
|
||||||
|
console.log('[BackgroundAudio] Service gestoppt (keine Slots)');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const reason = topReason();
|
||||||
|
try {
|
||||||
|
await BackgroundAudio.start(reason);
|
||||||
|
console.log('[BackgroundAudio] Service aktiv (slot=%s, slots=%s)',
|
||||||
|
reason, [...slots].join('+'));
|
||||||
|
} catch (err: any) {
|
||||||
|
console.warn('[BackgroundAudio] start fehlgeschlagen:', err?.message || err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function acquireBackgroundAudio(slot: Slot): Promise<void> {
|
||||||
|
if (slots.has(slot)) return;
|
||||||
|
slots.add(slot);
|
||||||
|
await applyState();
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function releaseBackgroundAudio(slot: Slot): Promise<void> {
|
||||||
|
if (!slots.has(slot)) return;
|
||||||
|
slots.delete(slot);
|
||||||
|
await applyState();
|
||||||
|
}
|
||||||
|
|
||||||
|
export function backgroundAudioActive(): boolean {
|
||||||
|
return slots.size > 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Legacy API (nur tts-Slot) — fuer Aufruf-Sites die noch nichts vom Slot-
|
||||||
|
// system wissen. Mappt auf den 'tts'-Slot. ---
|
||||||
|
export const startBackgroundAudio = () => acquireBackgroundAudio('tts');
|
||||||
|
export const stopBackgroundAudio = () => releaseBackgroundAudio('tts');
|
||||||
@@ -0,0 +1,138 @@
|
|||||||
|
/**
|
||||||
|
* GPS-Tracking-Service.
|
||||||
|
*
|
||||||
|
* Wenn aktiv: pushed alle paar Sekunden die aktuelle Position als
|
||||||
|
* `location_update {lat, lon}` an den RVS-Server, damit Brain-Watcher
|
||||||
|
* mit `near()`-Conditions etwas zum Vergleichen haben.
|
||||||
|
*
|
||||||
|
* Default: AUS. Wird entweder vom User manuell in Settings angeschaltet
|
||||||
|
* oder von ARIA via location_tracking-RVS-Message (Brain-Tool
|
||||||
|
* `request_location_tracking`).
|
||||||
|
*
|
||||||
|
* Energie-Schutz: distanceFilter 30m, interval 15s. Echte Fahrt-Updates
|
||||||
|
* (Geschwindigkeit) kommen sauber durch, stationaer wird kaum gesendet.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import AsyncStorage from '@react-native-async-storage/async-storage';
|
||||||
|
import { PermissionsAndroid, Platform, ToastAndroid } from 'react-native';
|
||||||
|
import Geolocation from '@react-native-community/geolocation';
|
||||||
|
import rvs from './rvs';
|
||||||
|
|
||||||
|
type Listener = (active: boolean) => void;
|
||||||
|
|
||||||
|
class GpsTrackingService {
|
||||||
|
private watchId: number | null = null;
|
||||||
|
private active = false;
|
||||||
|
private listeners: Set<Listener> = new Set();
|
||||||
|
// Defensive: nicht zu schnell oeffentlich togglen
|
||||||
|
private lastChangeAt = 0;
|
||||||
|
|
||||||
|
isActive(): boolean {
|
||||||
|
return this.active;
|
||||||
|
}
|
||||||
|
|
||||||
|
onChange(cb: Listener): () => void {
|
||||||
|
this.listeners.add(cb);
|
||||||
|
return () => { this.listeners.delete(cb); };
|
||||||
|
}
|
||||||
|
|
||||||
|
private notify() {
|
||||||
|
for (const cb of this.listeners) {
|
||||||
|
try { cb(this.active); } catch {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Beim App-Start: gespeicherten Zustand wiederherstellen (Default off). */
|
||||||
|
async restoreFromStorage(): Promise<void> {
|
||||||
|
try {
|
||||||
|
const v = await AsyncStorage.getItem('aria_gps_tracking');
|
||||||
|
if (v === 'true') {
|
||||||
|
console.log('[gps-track] Restore: war an, starte wieder');
|
||||||
|
this.start('Beim Start wiederhergestellt');
|
||||||
|
}
|
||||||
|
} catch {}
|
||||||
|
}
|
||||||
|
|
||||||
|
private async ensurePermission(): Promise<boolean> {
|
||||||
|
if (Platform.OS !== 'android') return true;
|
||||||
|
try {
|
||||||
|
const granted = await PermissionsAndroid.request(
|
||||||
|
PermissionsAndroid.PERMISSIONS.ACCESS_FINE_LOCATION,
|
||||||
|
{
|
||||||
|
title: 'GPS-Tracking',
|
||||||
|
message: 'ARIA braucht laufende Standort-Updates damit GPS-Watcher (Blitzer-Warner, near()) funktionieren.',
|
||||||
|
buttonPositive: 'Erlauben',
|
||||||
|
buttonNegative: 'Abbrechen',
|
||||||
|
},
|
||||||
|
);
|
||||||
|
return granted === PermissionsAndroid.RESULTS.GRANTED;
|
||||||
|
} catch (e) {
|
||||||
|
console.warn('[gps-track] Permission-Fehler:', e);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async start(reason: string = ''): Promise<boolean> {
|
||||||
|
if (this.active) return true;
|
||||||
|
const ok = await this.ensurePermission();
|
||||||
|
if (!ok) {
|
||||||
|
ToastAndroid.show('GPS-Tracking: Berechtigung abgelehnt', ToastAndroid.LONG);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
this.watchId = Geolocation.watchPosition(
|
||||||
|
(pos) => {
|
||||||
|
const lat = pos.coords.latitude;
|
||||||
|
const lon = pos.coords.longitude;
|
||||||
|
rvs.send('location_update' as any, { lat, lon });
|
||||||
|
},
|
||||||
|
(err) => {
|
||||||
|
console.warn('[gps-track] watchPosition error:', err?.code, err?.message);
|
||||||
|
},
|
||||||
|
{
|
||||||
|
enableHighAccuracy: true,
|
||||||
|
distanceFilter: 30, // erst senden wenn 30m gewandert
|
||||||
|
interval: 15000, // (Android) gewuenschte Frequenz
|
||||||
|
fastestInterval: 10000, // (Android) max Frequenz
|
||||||
|
} as any,
|
||||||
|
);
|
||||||
|
this.active = true;
|
||||||
|
this.lastChangeAt = Date.now();
|
||||||
|
this.notify();
|
||||||
|
AsyncStorage.setItem('aria_gps_tracking', 'true').catch(() => {});
|
||||||
|
ToastAndroid.show(
|
||||||
|
reason ? `GPS-Tracking aktiv (${reason})` : 'GPS-Tracking aktiv',
|
||||||
|
ToastAndroid.SHORT,
|
||||||
|
);
|
||||||
|
console.log('[gps-track] gestartet', reason ? `(${reason})` : '');
|
||||||
|
return true;
|
||||||
|
} catch (e: any) {
|
||||||
|
console.warn('[gps-track] start fehlgeschlagen:', e?.message);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
stop(reason: string = ''): void {
|
||||||
|
if (!this.active) return;
|
||||||
|
if (this.watchId !== null) {
|
||||||
|
try { Geolocation.clearWatch(this.watchId); } catch {}
|
||||||
|
this.watchId = null;
|
||||||
|
}
|
||||||
|
this.active = false;
|
||||||
|
this.lastChangeAt = Date.now();
|
||||||
|
this.notify();
|
||||||
|
AsyncStorage.setItem('aria_gps_tracking', 'false').catch(() => {});
|
||||||
|
ToastAndroid.show(
|
||||||
|
reason ? `GPS-Tracking aus (${reason})` : 'GPS-Tracking aus',
|
||||||
|
ToastAndroid.SHORT,
|
||||||
|
);
|
||||||
|
console.log('[gps-track] gestoppt', reason ? `(${reason})` : '');
|
||||||
|
}
|
||||||
|
|
||||||
|
async toggle(reason: string = ''): Promise<void> {
|
||||||
|
if (this.active) this.stop(reason);
|
||||||
|
else await this.start(reason);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export default new GpsTrackingService();
|
||||||
@@ -0,0 +1,41 @@
|
|||||||
|
/**
|
||||||
|
* Verbose-Logging-Toggle: console.log laesst sich global stummschalten.
|
||||||
|
* console.warn/console.error bleiben immer an — Fehler will man immer sehen.
|
||||||
|
*
|
||||||
|
* Default: an (true). Toggle ueber Settings → Protokoll → Verbose Logging.
|
||||||
|
* Beim Start wird der gespeicherte Wert geladen, vorher loggen wir normal.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import AsyncStorage from '@react-native-async-storage/async-storage';
|
||||||
|
|
||||||
|
export const VERBOSE_LOGGING_KEY = 'aria_verbose_logging';
|
||||||
|
|
||||||
|
// Original-console.log retten, damit wir die Wrapper jederzeit wieder
|
||||||
|
// "scharf" stellen koennen (sonst waere ein Toggle-an nach -aus tot).
|
||||||
|
const originalLog = console.log.bind(console);
|
||||||
|
const noop = () => {};
|
||||||
|
|
||||||
|
let _verbose = true;
|
||||||
|
|
||||||
|
function applyState(): void {
|
||||||
|
console.log = _verbose ? originalLog : noop;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Wert aus AsyncStorage laden und anwenden. Beim App-Start aufrufen. */
|
||||||
|
export async function initLogger(): Promise<void> {
|
||||||
|
try {
|
||||||
|
const v = await AsyncStorage.getItem(VERBOSE_LOGGING_KEY);
|
||||||
|
_verbose = v !== 'false'; // default: true
|
||||||
|
} catch {}
|
||||||
|
applyState();
|
||||||
|
}
|
||||||
|
|
||||||
|
export function isVerboseLogging(): boolean {
|
||||||
|
return _verbose;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function setVerboseLogging(verbose: boolean): void {
|
||||||
|
_verbose = verbose;
|
||||||
|
applyState();
|
||||||
|
AsyncStorage.setItem(VERBOSE_LOGGING_KEY, String(verbose)).catch(() => {});
|
||||||
|
}
|
||||||
@@ -1,14 +1,19 @@
|
|||||||
/**
|
/**
|
||||||
* PhoneCall-Service — pausiert die TTS-Wiedergabe wenn das Telefon klingelt
|
* PhoneCall-Service — pausiert ARIA bei Telefonaten:
|
||||||
* oder ein Anruf laeuft. Native-Bindung an PhoneCallModule.kt.
|
|
||||||
*
|
*
|
||||||
* Bei "ringing" oder "offhook" wird audioService.haltAllPlayback() gerufen —
|
* 1. Klassischer Mobilfunk-Anruf via TelephonyManager (PhoneCallModule.kt)
|
||||||
* ARIA verstummt sofort. Nach dem Auflegen passiert nichts automatisch
|
* Status: idle / ringing / offhook
|
||||||
* (Audio kommt nicht zurueck), der User muesste die Antwort manuell
|
|
||||||
* nochmal anfordern (Play-Button auf der Nachricht).
|
|
||||||
*
|
*
|
||||||
* Permission READ_PHONE_STATE muss vom Nutzer einmalig erteilt werden —
|
* 2. VoIP-Anrufe (WhatsApp, Signal, Discord, Telegram, Teams, ...) via
|
||||||
* wenn nicht, failed start() leise und der Rest funktioniert wie bisher.
|
* AudioFocus-Loss-Event (AudioFocusModule.kt). Diese Apps requestn
|
||||||
|
* AUDIOFOCUS_GAIN_TRANSIENT_EXCLUSIVE wenn ein Anruf reinkommt — wir
|
||||||
|
* bekommen ein "loss" Event und reagieren genauso wie auf RINGING.
|
||||||
|
*
|
||||||
|
* In beiden Faellen wird audioService.haltAllPlayback() + wakeWordService.
|
||||||
|
* pauseForCall() gerufen. Bei call-end (idle / focus-gain) → resumeFromCall.
|
||||||
|
*
|
||||||
|
* Permission READ_PHONE_STATE ist nur fuer Pfad 1 noetig — Pfad 2 braucht
|
||||||
|
* keine extra Berechtigung weil unser eigener AudioFocus-Listener feuert.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import {
|
import {
|
||||||
@@ -19,6 +24,7 @@ import {
|
|||||||
ToastAndroid,
|
ToastAndroid,
|
||||||
} from 'react-native';
|
} from 'react-native';
|
||||||
import audioService from './audio';
|
import audioService from './audio';
|
||||||
|
import wakeWordService from './wakeword';
|
||||||
|
|
||||||
interface PhoneCallNative {
|
interface PhoneCallNative {
|
||||||
start(): Promise<boolean>;
|
start(): Promise<boolean>;
|
||||||
@@ -32,13 +38,30 @@ type PhoneState = 'idle' | 'ringing' | 'offhook';
|
|||||||
class PhoneCallService {
|
class PhoneCallService {
|
||||||
private started: boolean = false;
|
private started: boolean = false;
|
||||||
private subscription: { remove: () => void } | null = null;
|
private subscription: { remove: () => void } | null = null;
|
||||||
|
private focusSubscription: { remove: () => void } | null = null;
|
||||||
private lastState: PhoneState = 'idle';
|
private lastState: PhoneState = 'idle';
|
||||||
|
/** Damit Resume nach VoIP-Loss nicht doppelt feuert wenn auch
|
||||||
|
* TelephonyManager-IDLE-Event kommt. */
|
||||||
|
private interruptedByFocus: boolean = false;
|
||||||
|
|
||||||
async start(): Promise<boolean> {
|
async start(): Promise<boolean> {
|
||||||
if (this.started || !PhoneCall) return false;
|
if (this.started || Platform.OS !== 'android') return false;
|
||||||
if (Platform.OS !== 'android') return false;
|
|
||||||
|
|
||||||
// Runtime-Permission holen (nur einmal noetig)
|
// 1. AudioFocus-Listener IMMER registrieren — fangs VoIP-Calls (WhatsApp,
|
||||||
|
// Signal, Discord etc.) abdecken, brauchen keine Permission.
|
||||||
|
try {
|
||||||
|
const focusEmitter = new NativeEventEmitter(NativeModules.AudioFocus as any);
|
||||||
|
this.focusSubscription = focusEmitter.addListener(
|
||||||
|
'AudioFocusChanged',
|
||||||
|
(e: { type: 'loss' | 'loss_transient' | 'gain' }) => this._onFocusChanged(e.type),
|
||||||
|
);
|
||||||
|
console.log('[PhoneCall] AudioFocus-Listener aktiv (fuer VoIP-Calls)');
|
||||||
|
} catch (err: any) {
|
||||||
|
console.warn('[PhoneCall] AudioFocus-Subscription gescheitert', err?.message || err);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. TelephonyManager-Listener — fuer klassische Mobilfunk-Anrufe
|
||||||
|
if (PhoneCall) {
|
||||||
try {
|
try {
|
||||||
const granted = await PermissionsAndroid.request(
|
const granted = await PermissionsAndroid.request(
|
||||||
PermissionsAndroid.PERMISSIONS.READ_PHONE_STATE,
|
PermissionsAndroid.PERMISSIONS.READ_PHONE_STATE,
|
||||||
@@ -51,56 +74,147 @@ class PhoneCallService {
|
|||||||
buttonNegative: 'Spaeter',
|
buttonNegative: 'Spaeter',
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
if (granted !== PermissionsAndroid.RESULTS.GRANTED) {
|
if (granted === PermissionsAndroid.RESULTS.GRANTED) {
|
||||||
console.warn('[PhoneCall] READ_PHONE_STATE Permission abgelehnt');
|
const ok = await PhoneCall.start();
|
||||||
return false;
|
if (ok) {
|
||||||
|
const emitter = new NativeEventEmitter(NativeModules.PhoneCall as any);
|
||||||
|
this.subscription = emitter.addListener(
|
||||||
|
'PhoneCallStateChanged',
|
||||||
|
(e: { state: PhoneState }) => this._onStateChanged(e.state),
|
||||||
|
);
|
||||||
|
console.log('[PhoneCall] TelephonyManager-Listener aktiv');
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
console.warn('[PhoneCall] READ_PHONE_STATE abgelehnt — VoIP-Calls werden trotzdem ueber AudioFocus erkannt');
|
||||||
|
}
|
||||||
|
} catch (err: any) {
|
||||||
|
console.warn('[PhoneCall] TelephonyManager-Setup gescheitert:', err?.message || err);
|
||||||
}
|
}
|
||||||
} catch (err) {
|
|
||||||
console.warn('[PhoneCall] Permission-Anfrage gescheitert', err);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
|
||||||
const ok = await PhoneCall.start();
|
|
||||||
if (!ok) {
|
|
||||||
console.warn('[PhoneCall] Native start() lieferte false (Permission?)');
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
const emitter = new NativeEventEmitter(NativeModules.PhoneCall as any);
|
|
||||||
this.subscription = emitter.addListener('PhoneCallStateChanged', (e: { state: PhoneState }) => {
|
|
||||||
this._onStateChanged(e.state);
|
|
||||||
});
|
|
||||||
this.started = true;
|
this.started = true;
|
||||||
console.log('[PhoneCall] Listener aktiv');
|
|
||||||
return true;
|
return true;
|
||||||
} catch (err: any) {
|
|
||||||
console.warn('[PhoneCall] start gescheitert:', err?.message || err);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async stop(): Promise<void> {
|
async stop(): Promise<void> {
|
||||||
if (!this.started || !PhoneCall) return;
|
if (!this.started) return;
|
||||||
try {
|
try { this.subscription?.remove(); } catch {}
|
||||||
this.subscription?.remove();
|
try { this.focusSubscription?.remove(); } catch {}
|
||||||
this.subscription = null;
|
this.subscription = null;
|
||||||
await PhoneCall.stop();
|
this.focusSubscription = null;
|
||||||
} catch {}
|
if (PhoneCall) {
|
||||||
|
try { await PhoneCall.stop(); } catch {}
|
||||||
|
}
|
||||||
this.started = false;
|
this.started = false;
|
||||||
this.lastState = 'idle';
|
this.lastState = 'idle';
|
||||||
|
this.interruptedByFocus = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
private _onStateChanged(state: PhoneState): void {
|
private _onStateChanged(state: PhoneState): void {
|
||||||
if (state === this.lastState) return;
|
if (state === this.lastState) return;
|
||||||
console.log('[PhoneCall] State: %s → %s', this.lastState, state);
|
const prev = this.lastState;
|
||||||
|
console.log('[PhoneCall] State: %s → %s', prev, state);
|
||||||
this.lastState = state;
|
this.lastState = state;
|
||||||
if (state === 'ringing' || state === 'offhook') {
|
if (state === 'ringing' || state === 'offhook') {
|
||||||
audioService.haltAllPlayback(`Telefon-State: ${state}`);
|
this._haltForCall(state === 'ringing' ? 'Anruf — ARIA pausiert' : 'Im Gespraech — ARIA pausiert');
|
||||||
ToastAndroid.show(
|
} else if (state === 'idle' && prev !== 'idle') {
|
||||||
state === 'ringing' ? 'Anruf — ARIA pausiert' : 'Im Gespraech — ARIA pausiert',
|
// Wenn schon durch AudioFocus-Loss pausiert wurde, NICHT doppelt resumen.
|
||||||
ToastAndroid.SHORT,
|
// Der Focus-Gain-Event triggert das Resume.
|
||||||
);
|
if (!this.interruptedByFocus) {
|
||||||
|
this._resumeAfterCall('Anruf beendet — ARIA wieder aktiv');
|
||||||
}
|
}
|
||||||
// idle: nichts automatisch — User soll nichts unbeabsichtigt re-triggern
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** AudioFocus-Loss = irgendeine andere App hat den Focus uebernommen.
|
||||||
|
* Das passiert bei VoIP-Anrufen (was wir wollen) ABER auch bei normalen
|
||||||
|
* Audio-Playern (anderer Player startet, Notification-Sound, sogar
|
||||||
|
* unsere eigenen Sound-Calls beim Play-Button). Daher checken wir den
|
||||||
|
* AudioMode — nur IN_CALL (2) oder IN_COMMUNICATION (3) zaehlt als Anruf. */
|
||||||
|
private async _onFocusChanged(type: 'loss' | 'loss_transient' | 'gain'): Promise<void> {
|
||||||
|
if (type === 'loss' || type === 'loss_transient') {
|
||||||
|
// Schon durch klassischen TelephonyManager pausiert? Dann nichts doppeln.
|
||||||
|
if (this.lastState === 'ringing' || this.lastState === 'offhook') return;
|
||||||
|
// Mode pruefen — nur echte Anrufe behandeln.
|
||||||
|
let mode = -1;
|
||||||
|
try { mode = await (NativeModules.AudioFocus as any)?.getMode?.(); } catch {}
|
||||||
|
if (mode !== 2 && mode !== 3) {
|
||||||
|
// NORMAL-Mode → kein Anruf (Stefan hat z.B. Play-Button gedrueckt
|
||||||
|
// oder Spotify hat sich neu reingedraengelt). Keine Toasts.
|
||||||
|
console.log('[PhoneCall] FOCUS_LOSS ignoriert (AudioMode=%d, kein Call)', mode);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
this.interruptedByFocus = true;
|
||||||
|
this._haltForCall('Anruf erkannt (VoIP) — ARIA pausiert');
|
||||||
|
// Pollen, weil GAIN nicht zuverlaessig kommt (wir releasen den Focus
|
||||||
|
// selbst beim halt → kein automatischer GAIN). AudioMode != IN_COMMUNICATION
|
||||||
|
// = Call vorbei.
|
||||||
|
this._startVoipResumePoll();
|
||||||
|
} else if (type === 'gain') {
|
||||||
|
if (this.interruptedByFocus) {
|
||||||
|
this.interruptedByFocus = false;
|
||||||
|
this._stopVoipResumePoll();
|
||||||
|
this._resumeAfterCall('Audio frei — ARIA wieder aktiv');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Polling-Fallback: alle 3s checken ob AudioMode wieder NORMAL ist. */
|
||||||
|
private voipPollTimer: ReturnType<typeof setInterval> | null = null;
|
||||||
|
private _startVoipResumePoll(): void {
|
||||||
|
if (this.voipPollTimer) return;
|
||||||
|
this.voipPollTimer = setInterval(async () => {
|
||||||
|
if (!this.interruptedByFocus) {
|
||||||
|
this._stopVoipResumePoll();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
const mode = await (NativeModules.AudioFocus as any)?.getMode?.();
|
||||||
|
// 0 = MODE_NORMAL — Call ist vorbei
|
||||||
|
if (typeof mode === 'number' && mode === 0) {
|
||||||
|
this.interruptedByFocus = false;
|
||||||
|
this._stopVoipResumePoll();
|
||||||
|
this._resumeAfterCall('Anruf beendet — ARIA wieder aktiv');
|
||||||
|
}
|
||||||
|
} catch {}
|
||||||
|
}, 3000);
|
||||||
|
}
|
||||||
|
private _stopVoipResumePoll(): void {
|
||||||
|
if (this.voipPollTimer) {
|
||||||
|
clearInterval(this.voipPollTimer);
|
||||||
|
this.voipPollTimer = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private _haltForCall(toast: string): void {
|
||||||
|
// Position merken bevor wir den Stream killen — fuer Auto-Resume.
|
||||||
|
audioService.captureInterruption();
|
||||||
|
// pauseForCall (statt haltAllPlayback): pcmBuffer + messageId bleiben,
|
||||||
|
// weitere Chunks werden weiter gesammelt damit isFinal die WAV schreibt.
|
||||||
|
audioService.pauseForCall(toast);
|
||||||
|
wakeWordService.pauseForCall().catch(() => {});
|
||||||
|
ToastAndroid.show(toast, ToastAndroid.SHORT);
|
||||||
|
}
|
||||||
|
|
||||||
|
private _resumeAfterCall(toast: string): void {
|
||||||
|
// Anruf-Pause aufheben — neue Chunks duerfen wieder direkt abgespielt
|
||||||
|
// werden (falls die Bridge mid-Anruf isFinal noch nicht geschickt hat).
|
||||||
|
audioService.endCallPause();
|
||||||
|
wakeWordService.resumeFromCall().catch(() => {});
|
||||||
|
ToastAndroid.show(toast, ToastAndroid.SHORT);
|
||||||
|
// 800ms warten bevor Auto-Resume — sonst kollidiert ARIA's neuer Focus-
|
||||||
|
// Request mit Spotify's Auto-Resume nach Anruf-Ende. System haengt nach
|
||||||
|
// dem Auflegen noch im IN_CALL-Mode-Uebergang, Spotify schaut auf Focus-
|
||||||
|
// Gain und wuerde sofort wieder LOSS sehen → bleibt pausiert.
|
||||||
|
// Mit Delay: Spotify resumed kurz, dann pausiert ARIA wieder ordnungs-
|
||||||
|
// gemaess. Wenn ARIA nichts pending hat, bleibt Spotify einfach an.
|
||||||
|
setTimeout(() => {
|
||||||
|
audioService.resumeFromInterruption(30000).then(ok => {
|
||||||
|
if (ok) {
|
||||||
|
console.log('[PhoneCall] Auto-Resume von gemerkter Position gestartet');
|
||||||
|
}
|
||||||
|
}).catch(() => {});
|
||||||
|
}, 800);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -50,29 +50,70 @@ class UpdateService {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Raeumt alte heruntergeladene APK-Dateien aus dem Cache auf. */
|
/** Sucht ueberall wo .apk-Dateien herumliegen koennten. */
|
||||||
private async cleanupOldApks(): Promise<void> {
|
private async _apkSearchDirs(): Promise<string[]> {
|
||||||
try {
|
const dirs = [RNFS.CachesDirectoryPath, RNFS.DocumentDirectoryPath];
|
||||||
const files = await RNFS.readDir(RNFS.CachesDirectoryPath);
|
if ((RNFS as any).ExternalCachesDirectoryPath) {
|
||||||
const apks = files.filter(f => /\.apk$/i.test(f.name));
|
dirs.push((RNFS as any).ExternalCachesDirectoryPath);
|
||||||
|
}
|
||||||
|
if (RNFS.ExternalDirectoryPath) {
|
||||||
|
dirs.push(RNFS.ExternalDirectoryPath);
|
||||||
|
}
|
||||||
|
return dirs;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Raeumt alte heruntergeladene APK-Dateien aus den App-Verzeichnissen auf.
|
||||||
|
* Public damit Settings den Button "Update-Cache leeren" benutzen kann. */
|
||||||
|
async cleanupOldApks(keepCurrentName?: string): Promise<{ removed: number; freedMB: number }> {
|
||||||
|
const dirs = await this._apkSearchDirs();
|
||||||
|
let removed = 0;
|
||||||
let freed = 0;
|
let freed = 0;
|
||||||
|
for (const dir of dirs) {
|
||||||
|
try {
|
||||||
|
if (!(await RNFS.exists(dir))) continue;
|
||||||
|
const files = await RNFS.readDir(dir);
|
||||||
|
const apks = files.filter(f => /\.apk$/i.test(f.name));
|
||||||
for (const f of apks) {
|
for (const f of apks) {
|
||||||
|
if (keepCurrentName && f.name === keepCurrentName) continue;
|
||||||
try {
|
try {
|
||||||
const size = parseInt(f.size as any, 10) || 0;
|
const size = parseInt(f.size as any, 10) || 0;
|
||||||
await RNFS.unlink(f.path);
|
await RNFS.unlink(f.path);
|
||||||
|
removed += 1;
|
||||||
freed += size;
|
freed += size;
|
||||||
console.log(`[Update] Alte APK geloescht: ${f.name} (${(size / 1024 / 1024).toFixed(1)}MB)`);
|
console.log(`[Update] APK geloescht: ${f.path} (${(size / 1024 / 1024).toFixed(1)}MB)`);
|
||||||
} catch (err: any) {
|
} catch (err: any) {
|
||||||
console.warn(`[Update] APK-Loeschen fehlgeschlagen: ${f.name} (${err?.message || err})`);
|
console.warn(`[Update] APK-Loeschen fehlgeschlagen: ${f.path} (${err?.message || err})`);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (apks.length > 0) {
|
|
||||||
console.log(`[Update] Cleanup fertig: ${apks.length} APKs entfernt, ${(freed / 1024 / 1024).toFixed(1)}MB freigegeben`);
|
|
||||||
}
|
|
||||||
} catch (err: any) {
|
} catch (err: any) {
|
||||||
console.warn(`[Update] Cleanup-Fehler: ${err?.message || err}`);
|
console.warn(`[Update] Cleanup-Fehler in ${dir}: ${err?.message || err}`);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
const freedMB = freed / 1024 / 1024;
|
||||||
|
if (removed > 0) {
|
||||||
|
console.log(`[Update] Cleanup fertig: ${removed} APK${removed === 1 ? '' : 's'} entfernt, ${freedMB.toFixed(1)}MB freigegeben`);
|
||||||
|
}
|
||||||
|
return { removed, freedMB };
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Aktuelle Groesse aller APK-Dateien in den App-Verzeichnissen (in MB). */
|
||||||
|
async getApkCacheSize(): Promise<{ count: number; totalMB: number }> {
|
||||||
|
const dirs = await this._apkSearchDirs();
|
||||||
|
let count = 0;
|
||||||
|
let total = 0;
|
||||||
|
for (const dir of dirs) {
|
||||||
|
try {
|
||||||
|
if (!(await RNFS.exists(dir))) continue;
|
||||||
|
const files = await RNFS.readDir(dir);
|
||||||
|
for (const f of files) {
|
||||||
|
if (!f.isFile() || !/\.apk$/i.test(f.name)) continue;
|
||||||
|
count += 1;
|
||||||
|
total += parseInt(f.size as any, 10) || 0;
|
||||||
|
}
|
||||||
|
} catch {}
|
||||||
|
}
|
||||||
|
return { count, totalMB: total / 1024 / 1024 };
|
||||||
|
}
|
||||||
|
|
||||||
/** Bei App-Start Update pruefen */
|
/** Bei App-Start Update pruefen */
|
||||||
checkForUpdate(): void {
|
checkForUpdate(): void {
|
||||||
|
|||||||
@@ -0,0 +1,71 @@
|
|||||||
|
/**
|
||||||
|
* Spielt einen kurzen "Bereit"-Sound (Airplane Ding-Dong) wenn das Mikrofon
|
||||||
|
* nach Wake-Word-Erkennung wirklich offen ist. Datei liegt in
|
||||||
|
* android/app/src/main/res/raw/wake_ready_sound.mp3 — wird ueber Android's
|
||||||
|
* Resource-System per react-native-sound abgespielt.
|
||||||
|
*
|
||||||
|
* Toggle: AsyncStorage-Key 'aria_wake_ready_sound_enabled' (default true).
|
||||||
|
*/
|
||||||
|
|
||||||
|
import Sound from 'react-native-sound';
|
||||||
|
import AsyncStorage from '@react-native-async-storage/async-storage';
|
||||||
|
|
||||||
|
export const WAKE_READY_SOUND_STORAGE_KEY = 'aria_wake_ready_sound_enabled';
|
||||||
|
|
||||||
|
Sound.setCategory('Playback', false);
|
||||||
|
|
||||||
|
let cachedSound: Sound | null = null;
|
||||||
|
let cachedFailed = false;
|
||||||
|
|
||||||
|
function getSound(): Promise<Sound | null> {
|
||||||
|
if (cachedFailed) return Promise.resolve(null);
|
||||||
|
if (cachedSound) return Promise.resolve(cachedSound);
|
||||||
|
return new Promise(resolve => {
|
||||||
|
const s = new Sound('wake_ready_sound', Sound.MAIN_BUNDLE, (err) => {
|
||||||
|
if (err) {
|
||||||
|
console.warn('[WakeReadySound] Konnte nicht geladen werden:', err);
|
||||||
|
cachedFailed = true;
|
||||||
|
resolve(null);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
cachedSound = s;
|
||||||
|
resolve(s);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/** True wenn der User den "Bereit"-Sound aktiviert hat. Default: true. */
|
||||||
|
export async function isWakeReadySoundEnabled(): Promise<boolean> {
|
||||||
|
try {
|
||||||
|
const raw = await AsyncStorage.getItem(WAKE_READY_SOUND_STORAGE_KEY);
|
||||||
|
if (raw === null) return true; // Default an
|
||||||
|
return raw === 'true';
|
||||||
|
} catch {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function setWakeReadySoundEnabled(enabled: boolean): Promise<void> {
|
||||||
|
try {
|
||||||
|
await AsyncStorage.setItem(WAKE_READY_SOUND_STORAGE_KEY, String(enabled));
|
||||||
|
} catch {}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Spielt den Bereit-Sound einmal ab — non-blocking. Wenn der User ihn
|
||||||
|
* in den Settings deaktiviert hat oder die Datei nicht ladbar ist,
|
||||||
|
* passiert einfach nichts. */
|
||||||
|
export async function playWakeReadySound(): Promise<void> {
|
||||||
|
if (!(await isWakeReadySoundEnabled())) return;
|
||||||
|
const s = await getSound();
|
||||||
|
if (!s) return;
|
||||||
|
try {
|
||||||
|
s.stop(() => {
|
||||||
|
s.setCurrentTime(0);
|
||||||
|
s.play((success) => {
|
||||||
|
if (!success) console.warn('[WakeReadySound] Wiedergabe fehlgeschlagen');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
} catch (e) {
|
||||||
|
console.warn('[WakeReadySound] play() Exception:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -22,6 +22,7 @@
|
|||||||
|
|
||||||
import { NativeEventEmitter, NativeModules, ToastAndroid } from 'react-native';
|
import { NativeEventEmitter, NativeModules, ToastAndroid } from 'react-native';
|
||||||
import AsyncStorage from '@react-native-async-storage/async-storage';
|
import AsyncStorage from '@react-native-async-storage/async-storage';
|
||||||
|
import { acquireBackgroundAudio } from './backgroundAudio';
|
||||||
|
|
||||||
type WakeWordCallback = () => void;
|
type WakeWordCallback = () => void;
|
||||||
type StateCallback = (state: WakeWordState) => void;
|
type StateCallback = (state: WakeWordState) => void;
|
||||||
@@ -72,6 +73,19 @@ class WakeWordService {
|
|||||||
private state: WakeWordState = 'off';
|
private state: WakeWordState = 'off';
|
||||||
private wakeCallbacks: WakeWordCallback[] = [];
|
private wakeCallbacks: WakeWordCallback[] = [];
|
||||||
private stateCallbacks: StateCallback[] = [];
|
private stateCallbacks: StateCallback[] = [];
|
||||||
|
/** Barge-In-Callbacks: feuern wenn Wake-Word WAEHREND ARIA spricht erkannt
|
||||||
|
* wird. ChatScreen reagiert mit TTS-stop + neuer Aufnahme. */
|
||||||
|
private bargeCallbacks: WakeWordCallback[] = [];
|
||||||
|
/** True solange Wake-Word parallel zu TTS aktiv ist. */
|
||||||
|
private bargeListening: boolean = false;
|
||||||
|
/** Anruf-Pause: state wird gemerkt damit nach Auflegen wiederhergestellt wird. */
|
||||||
|
private callPaused: boolean = false;
|
||||||
|
private preCallState: WakeWordState = 'off';
|
||||||
|
/** Cooldown nach App-Resume: kurze Phase in der Wake-Word-Detections
|
||||||
|
* ignoriert werden. Beim Wechsel von Background nach Vordergrund gibt's
|
||||||
|
* oft einen Audio-Pegel-Spike (AudioFocus-Switch, AudioTrack re-route),
|
||||||
|
* der openWakeWord faelschlich triggern kann. */
|
||||||
|
private cooldownUntilMs: number = 0;
|
||||||
|
|
||||||
private keyword: WakeKeyword = DEFAULT_KEYWORD;
|
private keyword: WakeKeyword = DEFAULT_KEYWORD;
|
||||||
private nativeReady: boolean = false;
|
private nativeReady: boolean = false;
|
||||||
@@ -152,6 +166,10 @@ class WakeWordService {
|
|||||||
/** Ohr-Button gedrueckt — startet passives Lauschen oder direkt Konversation. */
|
/** Ohr-Button gedrueckt — startet passives Lauschen oder direkt Konversation. */
|
||||||
async start(): Promise<boolean> {
|
async start(): Promise<boolean> {
|
||||||
if (this.state !== 'off') return true;
|
if (this.state !== 'off') return true;
|
||||||
|
// Foreground-Service VOR dem Mic-Zugriff hochziehen damit Background-
|
||||||
|
// Lauschen funktioniert (Android braucht foregroundServiceType=microphone
|
||||||
|
// aktiv zum Zeitpunkt des AudioRecord.startRecording).
|
||||||
|
await acquireBackgroundAudio('wake');
|
||||||
if (this.nativeReady && OpenWakeWord) {
|
if (this.nativeReady && OpenWakeWord) {
|
||||||
try {
|
try {
|
||||||
await OpenWakeWord.start();
|
await OpenWakeWord.start();
|
||||||
@@ -191,16 +209,42 @@ class WakeWordService {
|
|||||||
if (this.nativeReady && OpenWakeWord) {
|
if (this.nativeReady && OpenWakeWord) {
|
||||||
try { await OpenWakeWord.stop(); } catch {}
|
try { await OpenWakeWord.stop(); } catch {}
|
||||||
}
|
}
|
||||||
|
this.bargeListening = false;
|
||||||
this.setState('off');
|
this.setState('off');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Cooldown setzen — alle Wake-Word-Detections in den naechsten ms ignorieren.
|
||||||
|
* Wird beim App-Resume gerufen weil AppState-Wechsel Audio-Spikes erzeugen
|
||||||
|
* die openWakeWord faelschlich als Trigger interpretiert. */
|
||||||
|
setResumeCooldown(ms: number = 1500): void {
|
||||||
|
this.cooldownUntilMs = Date.now() + ms;
|
||||||
|
console.log('[WakeWord] Cooldown aktiv fuer %dms', ms);
|
||||||
|
}
|
||||||
|
|
||||||
/** Wake-Word getriggert: Native-Modul pausieren, Konversation starten. */
|
/** Wake-Word getriggert: Native-Modul pausieren, Konversation starten. */
|
||||||
private async onWakeDetected(): Promise<void> {
|
private async onWakeDetected(): Promise<void> {
|
||||||
console.log('[WakeWord] Wake-Word "%s" erkannt!', this.keyword);
|
const now = Date.now();
|
||||||
ToastAndroid.show(`Wake-Word "${KEYWORD_LABELS[this.keyword]}" erkannt — sprich jetzt`, ToastAndroid.SHORT);
|
if (now < this.cooldownUntilMs) {
|
||||||
|
const left = this.cooldownUntilMs - now;
|
||||||
|
console.log('[WakeWord] Trigger ignoriert (Cooldown noch %dms aktiv — wahrscheinlich App-Resume-Spike)', left);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
console.log('[WakeWord] Wake-Word "%s" erkannt! (state=%s, barge=%s)',
|
||||||
|
this.keyword, this.state, this.bargeListening);
|
||||||
if (this.nativeReady && OpenWakeWord) {
|
if (this.nativeReady && OpenWakeWord) {
|
||||||
try { await OpenWakeWord.stop(); } catch {}
|
try { await OpenWakeWord.stop(); } catch {}
|
||||||
}
|
}
|
||||||
|
this.bargeListening = false;
|
||||||
|
// Wenn wir bereits in 'conversing' sind und der Trigger waehrend ARIAs TTS
|
||||||
|
// kam (Barge-In via Wake-Word), feuern wir einen separaten Callback damit
|
||||||
|
// ChatScreen das TTS abbrechen + neue Aufnahme starten kann. Sonst normal.
|
||||||
|
if (this.state === 'conversing') {
|
||||||
|
this.bargeCallbacks.forEach(cb => {
|
||||||
|
try { cb(); } catch (e) { console.warn('[WakeWord] barge cb err:', e); }
|
||||||
|
});
|
||||||
|
// Kein erneutes setState — wir bleiben in 'conversing'.
|
||||||
|
return;
|
||||||
|
}
|
||||||
this.setState('conversing');
|
this.setState('conversing');
|
||||||
setTimeout(() => {
|
setTimeout(() => {
|
||||||
if (this.state === 'conversing') {
|
if (this.state === 'conversing') {
|
||||||
@@ -209,6 +253,72 @@ class WakeWordService {
|
|||||||
}, 200);
|
}, 200);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Wake-Word PARALLEL zur TTS-Wiedergabe lauschen lassen — User kann
|
||||||
|
* "Computer" sagen waehrend ARIA noch redet, AcousticEchoCanceler im
|
||||||
|
* Native-Modul verhindert dass ARIAs eigene Stimme triggert.
|
||||||
|
* Voraussetzung: AudioRecorder muss frei sein (Recording aus). Wenn der
|
||||||
|
* AudioRecorder gerade laeuft, hat der Vorrang — Wake-Word geht nicht. */
|
||||||
|
async startBargeListening(): Promise<void> {
|
||||||
|
if (!this.nativeReady || !OpenWakeWord) return;
|
||||||
|
if (this.state !== 'conversing') return;
|
||||||
|
if (this.bargeListening) return;
|
||||||
|
try {
|
||||||
|
await OpenWakeWord.start();
|
||||||
|
this.bargeListening = true;
|
||||||
|
console.log('[WakeWord] Barge-Listening aktiv (parallel zu TTS)');
|
||||||
|
} catch (err) {
|
||||||
|
console.warn('[WakeWord] Barge-Listening start fehlgeschlagen:', err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Barge-Listening wieder aus — z.B. wenn der AudioRecorder fuer die
|
||||||
|
* naechste Aufnahme das Mikro braucht. */
|
||||||
|
async stopBargeListening(): Promise<void> {
|
||||||
|
if (!this.bargeListening) return;
|
||||||
|
if (this.nativeReady && OpenWakeWord) {
|
||||||
|
try { await OpenWakeWord.stop(); } catch {}
|
||||||
|
}
|
||||||
|
this.bargeListening = false;
|
||||||
|
console.log('[WakeWord] Barge-Listening aus');
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Bei eingehendem Anruf: Wake-Word + Aufnahme stoppen, Pre-Call-State
|
||||||
|
* merken. Telefonie-App belegt das Mikro waehrend des Anrufs, plus ARIA
|
||||||
|
* soll nicht in laufende Telefonate reinhoeren. */
|
||||||
|
async pauseForCall(): Promise<void> {
|
||||||
|
if (this.callPaused) return;
|
||||||
|
this.preCallState = this.state;
|
||||||
|
if (this.state === 'off') {
|
||||||
|
this.callPaused = true; // merken dass wir pausiert wurden
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
this.callPaused = true;
|
||||||
|
if (this.nativeReady && OpenWakeWord) {
|
||||||
|
try { await OpenWakeWord.stop(); } catch {}
|
||||||
|
}
|
||||||
|
this.bargeListening = false;
|
||||||
|
console.log('[WakeWord] Anruf — Wake-Word pausiert (war: %s)', this.preCallState);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Nach Auflegen: Pre-Call-State wiederherstellen. Aktive Konversation
|
||||||
|
* geht zu armed zurueck (User soll nicht in einen halben Dialog springen). */
|
||||||
|
async resumeFromCall(): Promise<void> {
|
||||||
|
if (!this.callPaused) return;
|
||||||
|
const restoreTo = this.preCallState;
|
||||||
|
this.callPaused = false;
|
||||||
|
this.preCallState = 'off';
|
||||||
|
console.log('[WakeWord] Anruf zu Ende — restore state=%s', restoreTo);
|
||||||
|
if (restoreTo === 'off') return;
|
||||||
|
// Aktive Konversation war wahrscheinlich durch haltAllPlayback eh abgebrochen,
|
||||||
|
// sicher zu armed degraden.
|
||||||
|
if (restoreTo === 'conversing') this.setState('armed');
|
||||||
|
if (this.nativeReady && OpenWakeWord) {
|
||||||
|
try { await OpenWakeWord.start(); } catch (err) {
|
||||||
|
console.warn('[WakeWord] Restore-Start fehlgeschlagen:', err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/** Konversation beenden — User hat im Window nichts gesagt.
|
/** Konversation beenden — User hat im Window nichts gesagt.
|
||||||
* Mit Wake-Word: zurueck zu 'armed' (Listener wieder an).
|
* Mit Wake-Word: zurueck zu 'armed' (Listener wieder an).
|
||||||
* Ohne: zurueck zu 'off'.
|
* Ohne: zurueck zu 'off'.
|
||||||
@@ -268,6 +378,19 @@ class WakeWordService {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Subscribe auf Barge-In-Events: Wake-Word erkannt waehrend ARIA noch
|
||||||
|
* spricht. ChatScreen sollte dann TTS abbrechen + neue Aufnahme starten. */
|
||||||
|
onBargeIn(callback: WakeWordCallback): () => void {
|
||||||
|
this.bargeCallbacks.push(callback);
|
||||||
|
return () => {
|
||||||
|
this.bargeCallbacks = this.bargeCallbacks.filter(cb => cb !== callback);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
isBargeListening(): boolean {
|
||||||
|
return this.bargeListening;
|
||||||
|
}
|
||||||
|
|
||||||
onStateChange(callback: StateCallback): () => void {
|
onStateChange(callback: StateCallback): () => void {
|
||||||
this.stateCallbacks.push(callback);
|
this.stateCallbacks.push(callback);
|
||||||
return () => {
|
return () => {
|
||||||
|
|||||||
@@ -0,0 +1,35 @@
|
|||||||
|
# ════════════════════════════════════════════════════════════
|
||||||
|
# ARIA Brain — Agent + Memory Container
|
||||||
|
#
|
||||||
|
# FastAPI-Server mit Vector-DB-Memory (Qdrant).
|
||||||
|
# Spricht via HTTP/WebSocket mit Bridge und Diagnostic.
|
||||||
|
# LLM-Calls gehen ueber den Proxy (claude-max-api-proxy).
|
||||||
|
# ════════════════════════════════════════════════════════════
|
||||||
|
|
||||||
|
FROM python:3.12-slim
|
||||||
|
|
||||||
|
# System-Tools die Skills brauchen koennten (curl, jq, git, ssh-client,
|
||||||
|
# Build-Basics fuer venv-Compiles). Bewusst sparsam — alles weitere
|
||||||
|
# bringt der Skill selbst mit (siehe execution=local-bin).
|
||||||
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
|
curl \
|
||||||
|
jq \
|
||||||
|
git \
|
||||||
|
openssh-client \
|
||||||
|
ca-certificates \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
COPY requirements.txt .
|
||||||
|
RUN pip install --no-cache-dir -r requirements.txt
|
||||||
|
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Embedding-Model-Cache und Skills landen unter /data (Volume)
|
||||||
|
ENV SENTENCE_TRANSFORMERS_HOME=/data/_models
|
||||||
|
ENV ARIA_DATA_DIR=/data
|
||||||
|
|
||||||
|
EXPOSE 8080
|
||||||
|
|
||||||
|
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8080"]
|
||||||
@@ -0,0 +1,557 @@
|
|||||||
|
"""
|
||||||
|
Conversation-Loop. Eine Anfrage von Stefan, eine Antwort von ARIA.
|
||||||
|
|
||||||
|
Pro Turn:
|
||||||
|
1. user-Turn an die laufende Conversation appenden
|
||||||
|
2. Hot Memory holen (alle pinned Punkte)
|
||||||
|
3. Cold Memory holen (Top-K semantisch zur user-Nachricht)
|
||||||
|
4. System-Prompt aus Hot+Cold bauen
|
||||||
|
5. Messages = [system, *window, user]
|
||||||
|
6. Claude via Proxy aufrufen
|
||||||
|
7. Assistant-Reply in Conversation appenden + zurueckgeben
|
||||||
|
|
||||||
|
Memory-Destillat laeuft asynchron NACH dem Reply, gesteuert vom
|
||||||
|
/chat-Endpoint ueber BackgroundTasks.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from conversation import Conversation, Turn
|
||||||
|
from memory import Embedder, VectorStore, MemoryPoint
|
||||||
|
from prompts import build_system_prompt
|
||||||
|
from proxy_client import ProxyClient, Message as ProxyMessage
|
||||||
|
import skills as skills_mod
|
||||||
|
import triggers as triggers_mod
|
||||||
|
import watcher as watcher_mod
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
# Meta-Tool: ARIA kann selbst neue Skills bauen
|
||||||
|
META_TOOLS = [
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "skill_create",
|
||||||
|
"description": (
|
||||||
|
"Erstelle einen neuen Skill (wiederverwendbare Faehigkeit). "
|
||||||
|
"Skills sind IMMER Python — jeder Skill bekommt seine eigene venv "
|
||||||
|
"mit den pip_packages die er braucht.\n\n"
|
||||||
|
"HARTE REGEL — IMMER Skill anlegen wenn: die Loesung erfordert eine "
|
||||||
|
"pip-Library. Sonst muesste der Install bei jedem Container-Restart "
|
||||||
|
"neu laufen (Brain hat keinen persistenten State ausser /data/skills/).\n\n"
|
||||||
|
"Sonst NUR wenn ALLE Kriterien erfuellt sind:\n"
|
||||||
|
" 1) wiederkehrend (Aufgabe kommt realistisch nochmal),\n"
|
||||||
|
" 2) nicht-trivial (mehrere Schritte),\n"
|
||||||
|
" 3) parametrisierbar (nimmt Eingaben, gibt Ergebnis),\n"
|
||||||
|
" 4) wiederverwendbar als ganzes Paket.\n"
|
||||||
|
"NICHT fuer einzelne Shell-Befehle (date, hostname, ls etc.) und "
|
||||||
|
"nicht fuer Einmal-Faelle. Stefan kann Skill-Erstellung explizit "
|
||||||
|
"triggern (\"bau daraus einen Skill\").\n\n"
|
||||||
|
"Wenn etwas nur via apt-Paket geht — Stefan fragen ob es ins "
|
||||||
|
"Brain-Dockerfile soll, NICHT als Skill bauen."
|
||||||
|
),
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"name": {"type": "string", "description": "kurz, kebab-case, a-z 0-9 - _"},
|
||||||
|
"description": {"type": "string", "description": "Was kann der Skill? 1 Satz."},
|
||||||
|
"entry_code": {
|
||||||
|
"type": "string",
|
||||||
|
"description": (
|
||||||
|
"Python-Code. Args lesen via os.environ['ARG_NAME']. "
|
||||||
|
"Resultat per print() (stdout) zurueck. Bei Fehler: "
|
||||||
|
"non-zero exit (sys.exit(1) o.ae.)."
|
||||||
|
),
|
||||||
|
},
|
||||||
|
"readme": {"type": "string", "description": "Markdown — was macht der Skill, Beispiel-Aufrufe"},
|
||||||
|
"pip_packages": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
"description": "pip-Pakete die in der venv installiert werden (z.B. requests, yt-dlp, pypdf)",
|
||||||
|
},
|
||||||
|
"args": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "object"},
|
||||||
|
"description": "Argumente-Schema [{name, type, required, description}]",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"required": ["name", "description", "entry_code"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "skill_list",
|
||||||
|
"description": "Zeigt alle Skills (inkl. deaktivierte). Sollte selten noetig sein — die Liste steht eh im System-Prompt.",
|
||||||
|
"parameters": {"type": "object", "properties": {}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "trigger_timer",
|
||||||
|
"description": (
|
||||||
|
"Lege einen Timer-Trigger an — feuert EINMALIG zum angegebenen Zeitpunkt "
|
||||||
|
"und ruft dich selbst auf (Push-Nachricht an Stefan). "
|
||||||
|
"Use-Case: 'erinnere mich in 10min', 'sag mir um 14:30 Bescheid'."
|
||||||
|
),
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"name": {"type": "string", "description": "kurzer kebab-case-Name, a-z 0-9 - _"},
|
||||||
|
"fires_at": {
|
||||||
|
"type": "string",
|
||||||
|
"description": (
|
||||||
|
"Absoluter ISO-Timestamp UTC, z.B. '2026-05-12T14:30:00Z'. "
|
||||||
|
"Berechne aus relativer Angabe ('in 10min') selbst — die "
|
||||||
|
"aktuelle Zeit findest du im System-Prompt nicht, also nutze "
|
||||||
|
"Bash: `date -u -d '+10 minutes' --iso-8601=seconds`."
|
||||||
|
),
|
||||||
|
},
|
||||||
|
"message": {"type": "string", "description": "Was soll bei der Erinnerung gesagt werden"},
|
||||||
|
},
|
||||||
|
"required": ["name", "fires_at", "message"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "trigger_watcher",
|
||||||
|
"description": (
|
||||||
|
"Lege einen Watcher-Trigger an — pollt alle paar Minuten eine Condition, "
|
||||||
|
"feuert wenn sie wahr wird (mit Throttle damit's nicht spammt). "
|
||||||
|
"Use-Case: 'sag bescheid wenn Disk unter 5GB', 'pingt mich wenn um 8 Uhr'. "
|
||||||
|
"Welche Variablen verfuegbar sind und ihre Bedeutung steht im System-Prompt."
|
||||||
|
),
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"name": {"type": "string", "description": "kurzer Name"},
|
||||||
|
"condition": {
|
||||||
|
"type": "string",
|
||||||
|
"description": (
|
||||||
|
"Boolescher Ausdruck mit den erlaubten Variablen, z.B. "
|
||||||
|
"'disk_free_gb < 5', 'hour_of_day == 8 and day_of_week == \"mon\"'. "
|
||||||
|
"Operatoren: < > <= >= == != and or not"
|
||||||
|
),
|
||||||
|
},
|
||||||
|
"message": {"type": "string", "description": "Was soll bei Erfuellung gesagt werden"},
|
||||||
|
"check_interval_sec": {
|
||||||
|
"type": "integer",
|
||||||
|
"description": "Wie oft Condition pruefen (Default 300 = alle 5min, min 30)",
|
||||||
|
},
|
||||||
|
"throttle_sec": {
|
||||||
|
"type": "integer",
|
||||||
|
"description": "Mindestabstand zwischen 2 Feuerungen (Default 3600 = max 1x/h)",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"required": ["name", "condition", "message"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "trigger_cancel",
|
||||||
|
"description": "Loescht einen Trigger (Timer abbrechen oder Watcher entfernen).",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {"name": {"type": "string"}},
|
||||||
|
"required": ["name"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "trigger_list",
|
||||||
|
"description": "Zeigt alle Trigger (active + inaktiv). Selten noetig — Stefan sieht sie im Diagnostic.",
|
||||||
|
"parameters": {"type": "object", "properties": {}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "request_location_tracking",
|
||||||
|
"description": (
|
||||||
|
"Bittet die App, das kontinuierliche GPS-Tracking zu aktivieren oder zu "
|
||||||
|
"deaktivieren. Default ist AUS (Akku-Schutz). Nutze das wenn du einen "
|
||||||
|
"GPS-basierten Watcher anlegst (z.B. `near(...)`), sonst hat die App "
|
||||||
|
"veraltete Position und der Watcher feuert nie. Auch wieder ausschalten "
|
||||||
|
"wenn der letzte GPS-Watcher geloescht wurde."
|
||||||
|
),
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"on": {"type": "boolean", "description": "true = Tracking an, false = aus"},
|
||||||
|
"reason": {"type": "string", "description": "Kurzer Grund (wird in App-Notification angezeigt)"},
|
||||||
|
},
|
||||||
|
"required": ["on"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def _skill_to_tool(s: dict) -> dict:
|
||||||
|
"""Mappt einen Skill auf ein OpenAI-Function-Tool."""
|
||||||
|
args = s.get("args") or []
|
||||||
|
props = {}
|
||||||
|
required = []
|
||||||
|
for a in args:
|
||||||
|
if not isinstance(a, dict):
|
||||||
|
continue
|
||||||
|
name = a.get("name") or ""
|
||||||
|
if not name:
|
||||||
|
continue
|
||||||
|
props[name] = {
|
||||||
|
"type": a.get("type", "string"),
|
||||||
|
"description": a.get("description", ""),
|
||||||
|
}
|
||||||
|
if a.get("required"):
|
||||||
|
required.append(name)
|
||||||
|
return {
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": f"run_{s['name']}",
|
||||||
|
"description": s.get("description", "(ohne Beschreibung)"),
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": props,
|
||||||
|
"required": required,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class Agent:
|
||||||
|
def __init__(self, store: VectorStore, embedder: Embedder,
|
||||||
|
conversation: Conversation, proxy: ProxyClient,
|
||||||
|
cold_k: int = 5):
|
||||||
|
self.store = store
|
||||||
|
self.embedder = embedder
|
||||||
|
self.conversation = conversation
|
||||||
|
self.proxy = proxy
|
||||||
|
self.cold_k = cold_k
|
||||||
|
# Side-Channel-Events die im Turn entstehen (z.B. skill_create).
|
||||||
|
# Werden vom /chat-Endpoint in der Response mitgeschickt, damit
|
||||||
|
# Stefan in der App und Diagnostic eine sichtbare Bubble bekommt.
|
||||||
|
self._pending_events: list[dict] = []
|
||||||
|
|
||||||
|
def pop_events(self) -> list[dict]:
|
||||||
|
"""Holt die Events des letzten chat()-Calls und leert die Liste."""
|
||||||
|
events = self._pending_events
|
||||||
|
self._pending_events = []
|
||||||
|
return events
|
||||||
|
|
||||||
|
# ── Hauptpfad: ein User-Turn → Tool-Loop → finaler Reply ──
|
||||||
|
|
||||||
|
MAX_TOOL_ITERATIONS = 8 # Schutz vor Endlos-Loops
|
||||||
|
|
||||||
|
def chat(self, user_message: str, source: str = "") -> str:
|
||||||
|
user_message = (user_message or "").strip()
|
||||||
|
if not user_message:
|
||||||
|
raise ValueError("Leere Nachricht")
|
||||||
|
|
||||||
|
# Events vom letzten Turn weglassen
|
||||||
|
self._pending_events = []
|
||||||
|
|
||||||
|
# 1. User-Turn an die Konversation
|
||||||
|
self.conversation.add("user", user_message, source=source)
|
||||||
|
|
||||||
|
# 2. Hot Memory (alle pinned Punkte)
|
||||||
|
hot = self.store.list_pinned()
|
||||||
|
|
||||||
|
# 3. Cold Memory (Top-K semantic)
|
||||||
|
try:
|
||||||
|
qvec = self.embedder.embed(user_message)
|
||||||
|
cold = self.store.search(qvec, k=self.cold_k, exclude_pinned=True)
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning("Cold-Search fehlgeschlagen: %s", exc)
|
||||||
|
cold = []
|
||||||
|
|
||||||
|
# 4. Aktive Skills holen + Tool-Liste bauen
|
||||||
|
all_skills = skills_mod.list_skills(active_only=False)
|
||||||
|
active_skills = [s for s in all_skills if s.get("active", True)]
|
||||||
|
tools = list(META_TOOLS) + [_skill_to_tool(s) for s in active_skills]
|
||||||
|
|
||||||
|
# Trigger-Liste + Variablen-Info fuer den System-Prompt
|
||||||
|
all_triggers = triggers_mod.list_triggers(active_only=False)
|
||||||
|
condition_vars = watcher_mod.describe_variables()
|
||||||
|
condition_funcs = watcher_mod.describe_functions()
|
||||||
|
|
||||||
|
# 5. System-Prompt + Window-Messages
|
||||||
|
system_prompt = build_system_prompt(hot, cold, skills=all_skills,
|
||||||
|
triggers=all_triggers,
|
||||||
|
condition_vars=condition_vars,
|
||||||
|
condition_funcs=condition_funcs)
|
||||||
|
messages = [ProxyMessage(role="system", content=system_prompt)]
|
||||||
|
for t in self.conversation.window():
|
||||||
|
messages.append(ProxyMessage(role=t.role, content=t.content))
|
||||||
|
|
||||||
|
logger.info("chat: pinned=%d cold=%d skills=%d/%d window=%d prompt_chars=%d",
|
||||||
|
len(hot), len(cold), len(active_skills), len(all_skills),
|
||||||
|
len(self.conversation.window()), len(system_prompt))
|
||||||
|
|
||||||
|
# 6. Tool-Use-Loop
|
||||||
|
final_reply = ""
|
||||||
|
for iteration in range(self.MAX_TOOL_ITERATIONS):
|
||||||
|
result = self.proxy.chat_full(messages, tools=tools)
|
||||||
|
if result.tool_calls:
|
||||||
|
# Assistant-Turn mit tool_calls in messages anhaengen (nicht in Conversation!)
|
||||||
|
messages.append(ProxyMessage(
|
||||||
|
role="assistant",
|
||||||
|
content=result.content or None,
|
||||||
|
tool_calls=[{
|
||||||
|
"id": tc["id"], "type": "function",
|
||||||
|
"function": {"name": tc["name"], "arguments": json.dumps(tc["arguments"])},
|
||||||
|
} for tc in result.tool_calls],
|
||||||
|
))
|
||||||
|
# Tools ausfuehren + Ergebnis als role=tool zurueck
|
||||||
|
for tc in result.tool_calls:
|
||||||
|
tool_result = self._dispatch_tool(tc["name"], tc["arguments"])
|
||||||
|
messages.append(ProxyMessage(
|
||||||
|
role="tool",
|
||||||
|
tool_call_id=tc["id"],
|
||||||
|
name=tc["name"],
|
||||||
|
content=tool_result[:8000],
|
||||||
|
))
|
||||||
|
continue # next iteration mit Tool-Results
|
||||||
|
# Kein Tool-Call mehr → final reply
|
||||||
|
final_reply = (result.content or "").strip()
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
# Loop-Limit erreicht
|
||||||
|
final_reply = "[Tool-Loop-Limit erreicht — ARIA hat zu viele Tool-Calls gemacht ohne fertig zu werden]"
|
||||||
|
logger.warning("Tool-Loop hit MAX_TOOL_ITERATIONS=%d", self.MAX_TOOL_ITERATIONS)
|
||||||
|
|
||||||
|
if not final_reply:
|
||||||
|
raise RuntimeError("Leerer Reply vom Proxy")
|
||||||
|
|
||||||
|
# 7. Assistant-Turn (final reply) in die Conversation
|
||||||
|
self.conversation.add("assistant", final_reply)
|
||||||
|
return final_reply
|
||||||
|
|
||||||
|
# ── Tool-Dispatcher ───────────────────────────────────────
|
||||||
|
|
||||||
|
def _dispatch_tool(self, name: str, arguments: dict) -> str:
|
||||||
|
"""Fuehrt einen Tool-Call aus und gibt ein kurzes Text-Resultat zurueck.
|
||||||
|
Niemals werfen — Fehler werden als Text-Resultat reportet damit Claude
|
||||||
|
weitermachen kann."""
|
||||||
|
try:
|
||||||
|
if name == "skill_create":
|
||||||
|
# ARIA-Skills sind immer Python — execution ist nicht mehr im Schema
|
||||||
|
manifest = skills_mod.create_skill(
|
||||||
|
name=arguments["name"],
|
||||||
|
description=arguments["description"],
|
||||||
|
execution="local-venv",
|
||||||
|
entry_code=arguments["entry_code"],
|
||||||
|
readme=arguments.get("readme", ""),
|
||||||
|
args=arguments.get("args", []),
|
||||||
|
pip_packages=arguments.get("pip_packages", []),
|
||||||
|
author="aria",
|
||||||
|
)
|
||||||
|
# Side-Channel-Event: Stefan soll sehen wenn ARIA was anlegt
|
||||||
|
self._pending_events.append({
|
||||||
|
"type": "skill_created",
|
||||||
|
"skill": {
|
||||||
|
"name": manifest["name"],
|
||||||
|
"description": manifest.get("description", ""),
|
||||||
|
"execution": manifest.get("execution", ""),
|
||||||
|
"active": manifest.get("active", True),
|
||||||
|
"setup_error": manifest.get("setup_error"),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return f"OK — Skill '{manifest['name']}' erstellt (active={manifest['active']})."
|
||||||
|
if name == "skill_list":
|
||||||
|
items = skills_mod.list_skills(active_only=False)
|
||||||
|
if not items:
|
||||||
|
return "(keine Skills vorhanden)"
|
||||||
|
return "\n".join(
|
||||||
|
f"- {s['name']} ({s['execution']}) {'aktiv' if s.get('active', True) else 'DEAKTIVIERT'}: {s.get('description', '')}"
|
||||||
|
for s in items
|
||||||
|
)
|
||||||
|
if name.startswith("run_"):
|
||||||
|
skill_name = name[len("run_"):]
|
||||||
|
res = skills_mod.run_skill(skill_name, args=arguments)
|
||||||
|
snippet = (res.get("stdout") or "")[:2000] or "(kein stdout)"
|
||||||
|
err = (res.get("stderr") or "")[:500]
|
||||||
|
marker = "OK" if res["ok"] else f"FEHLER (exit={res['exit_code']})"
|
||||||
|
out = f"{marker} · {res['duration_sec']}s\nstdout:\n{snippet}"
|
||||||
|
if err:
|
||||||
|
out += f"\nstderr:\n{err}"
|
||||||
|
return out
|
||||||
|
if name == "trigger_timer":
|
||||||
|
t = triggers_mod.create_timer(
|
||||||
|
name=arguments["name"],
|
||||||
|
fires_at_iso=arguments["fires_at"],
|
||||||
|
message=arguments["message"],
|
||||||
|
author="aria",
|
||||||
|
)
|
||||||
|
self._pending_events.append({
|
||||||
|
"type": "trigger_created",
|
||||||
|
"trigger": {"name": t["name"], "type": "timer",
|
||||||
|
"fires_at": t["fires_at"], "message": t["message"]},
|
||||||
|
})
|
||||||
|
return f"OK — Timer '{t['name']}' angelegt, feuert um {t['fires_at']}."
|
||||||
|
if name == "trigger_watcher":
|
||||||
|
t = triggers_mod.create_watcher(
|
||||||
|
name=arguments["name"],
|
||||||
|
condition=arguments["condition"],
|
||||||
|
message=arguments["message"],
|
||||||
|
check_interval_sec=int(arguments.get("check_interval_sec", 300)),
|
||||||
|
throttle_sec=int(arguments.get("throttle_sec", 3600)),
|
||||||
|
author="aria",
|
||||||
|
)
|
||||||
|
self._pending_events.append({
|
||||||
|
"type": "trigger_created",
|
||||||
|
"trigger": {"name": t["name"], "type": "watcher",
|
||||||
|
"condition": t["condition"], "message": t["message"]},
|
||||||
|
})
|
||||||
|
return f"OK — Watcher '{t['name']}' angelegt: feuert wenn '{t['condition']}'."
|
||||||
|
if name == "trigger_cancel":
|
||||||
|
try:
|
||||||
|
triggers_mod.delete(arguments["name"])
|
||||||
|
return f"OK — Trigger '{arguments['name']}' geloescht."
|
||||||
|
except ValueError as e:
|
||||||
|
return f"FEHLER: {e}"
|
||||||
|
if name == "request_location_tracking":
|
||||||
|
on = bool(arguments.get("on", False))
|
||||||
|
reason = (arguments.get("reason") or "").strip()
|
||||||
|
self._pending_events.append({
|
||||||
|
"type": "location_tracking",
|
||||||
|
"on": on,
|
||||||
|
"reason": reason,
|
||||||
|
})
|
||||||
|
return f"OK — Tracking-Request gesendet (on={on}). App wird in Kuerze umschalten."
|
||||||
|
if name == "trigger_list":
|
||||||
|
items = triggers_mod.list_triggers(active_only=False)
|
||||||
|
if not items:
|
||||||
|
return "(keine Trigger vorhanden)"
|
||||||
|
lines = []
|
||||||
|
for t in items:
|
||||||
|
state = "aktiv" if t.get("active", True) else "DEAKTIVIERT"
|
||||||
|
if t["type"] == "timer":
|
||||||
|
lines.append(f"- {t['name']} (timer, {state}): feuert {t.get('fires_at')} — \"{t.get('message','')[:50]}\"")
|
||||||
|
elif t["type"] == "watcher":
|
||||||
|
lines.append(f"- {t['name']} (watcher, {state}): cond=\"{t.get('condition')}\", throttle={t.get('throttle_sec')}s")
|
||||||
|
else:
|
||||||
|
lines.append(f"- {t['name']} ({t['type']}, {state})")
|
||||||
|
return "\n".join(lines)
|
||||||
|
return f"Unbekanntes Tool: {name}"
|
||||||
|
except Exception as exc:
|
||||||
|
logger.exception("Tool '%s' fehlgeschlagen", name)
|
||||||
|
return f"FEHLER: {exc}"
|
||||||
|
|
||||||
|
# ── Memory-Destillat (laeuft im Hintergrund) ──────────────
|
||||||
|
|
||||||
|
def distill_old_turns(self) -> dict:
|
||||||
|
"""Nimmt die N aeltesten Turns und destilliert sie zu fact-Memories.
|
||||||
|
|
||||||
|
Pattern: separater Claude-Call, lieferte 3-7 JSON-Facts, die als
|
||||||
|
type=fact, source=distilled gespeichert werden. Erfolgreiches
|
||||||
|
Schreiben → Turns aus dem Window entfernen.
|
||||||
|
"""
|
||||||
|
if not self.conversation.needs_distill():
|
||||||
|
return {"distilled": 0, "reason": "kein Bedarf"}
|
||||||
|
|
||||||
|
old_turns = self.conversation.take_oldest_for_distill()
|
||||||
|
if not old_turns:
|
||||||
|
return {"distilled": 0, "reason": "keine alten Turns"}
|
||||||
|
|
||||||
|
# Konversation als Klartext bauen
|
||||||
|
transcript = "\n".join(
|
||||||
|
f"[{t.role.upper()}] {t.content}" for t in old_turns
|
||||||
|
)[:30000] # Cap auf 30k Zeichen damit der Prompt nicht explodiert
|
||||||
|
|
||||||
|
system = (
|
||||||
|
"Du extrahierst aus einer Konversation zwischen Stefan und ARIA die "
|
||||||
|
"wichtigsten dauerhaft relevanten Fakten — keine Smalltalk-Details, "
|
||||||
|
"keine flüchtigen Zustände. Antworte AUSSCHLIESSLICH mit gültigem JSON "
|
||||||
|
"im Format: {\"facts\": [{\"title\": \"kurz, max 80 Zeichen\", "
|
||||||
|
"\"content\": \"1-3 Sätze, konkret und nützlich\"}]}. "
|
||||||
|
"Mindestens 0, höchstens 7 Facts. Wenn nichts wichtig genug ist: leeres Array."
|
||||||
|
)
|
||||||
|
user = (
|
||||||
|
"Hier ist der Konversations-Abschnitt:\n\n"
|
||||||
|
f"{transcript}\n\n"
|
||||||
|
"Extrahiere die wichtigsten Fakten als JSON."
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
raw = self.proxy.chat([
|
||||||
|
ProxyMessage(role="system", content=system),
|
||||||
|
ProxyMessage(role="user", content=user),
|
||||||
|
])
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning("Destillat-Call fehlgeschlagen: %s — Turns bleiben", exc)
|
||||||
|
return {"distilled": 0, "error": str(exc)}
|
||||||
|
|
||||||
|
facts = self._parse_facts(raw)
|
||||||
|
if facts is None:
|
||||||
|
logger.warning("Destillat lieferte unparsbares JSON: %r", raw[:200])
|
||||||
|
return {"distilled": 0, "error": "JSON parse failed", "raw": raw[:200]}
|
||||||
|
|
||||||
|
# Facts in die DB schreiben
|
||||||
|
created = 0
|
||||||
|
for f in facts:
|
||||||
|
content = (f.get("content") or "").strip()
|
||||||
|
if not content:
|
||||||
|
continue
|
||||||
|
title = (f.get("title") or "").strip()[:120] or "Fakt"
|
||||||
|
point = MemoryPoint(
|
||||||
|
id="",
|
||||||
|
type="fact",
|
||||||
|
title=title,
|
||||||
|
content=content,
|
||||||
|
pinned=False,
|
||||||
|
category="konversation",
|
||||||
|
source="distilled",
|
||||||
|
tags=[],
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
vec = self.embedder.embed(content)
|
||||||
|
self.store.upsert(point, vec)
|
||||||
|
created += 1
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning("Fakt schreiben fehlgeschlagen: %s", exc)
|
||||||
|
|
||||||
|
# Erst nach erfolgreichem Schreiben aus dem Window entfernen
|
||||||
|
last_ts = old_turns[-1].ts
|
||||||
|
self.conversation.commit_distill(last_ts)
|
||||||
|
logger.info("Destillat: %d Facts geschrieben, %d Turns aus Window entfernt",
|
||||||
|
created, len(old_turns))
|
||||||
|
return {"distilled": created, "removed_turns": len(old_turns)}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _parse_facts(raw: str) -> Optional[list]:
|
||||||
|
if not raw:
|
||||||
|
return None
|
||||||
|
# JSON robust extrahieren — Claude kann Code-Fences setzen
|
||||||
|
cleaned = raw.strip()
|
||||||
|
if cleaned.startswith("```"):
|
||||||
|
# ```json oder ``` rauswerfen
|
||||||
|
cleaned = cleaned.split("\n", 1)[1] if "\n" in cleaned else cleaned[3:]
|
||||||
|
if cleaned.endswith("```"):
|
||||||
|
cleaned = cleaned[: -3]
|
||||||
|
cleaned = cleaned.strip()
|
||||||
|
# Erstes { bis letztes }
|
||||||
|
start = cleaned.find("{")
|
||||||
|
end = cleaned.rfind("}")
|
||||||
|
if start == -1 or end == -1 or end < start:
|
||||||
|
return None
|
||||||
|
try:
|
||||||
|
obj = json.loads(cleaned[start: end + 1])
|
||||||
|
except Exception:
|
||||||
|
return None
|
||||||
|
facts = obj.get("facts") if isinstance(obj, dict) else None
|
||||||
|
if not isinstance(facts, list):
|
||||||
|
return None
|
||||||
|
return facts
|
||||||
@@ -0,0 +1,169 @@
|
|||||||
|
"""
|
||||||
|
Background-Loop fuer Triggers.
|
||||||
|
|
||||||
|
Laeuft alle TICK_SEC Sekunden in einem asyncio Task, geht ueber alle
|
||||||
|
active Triggers und entscheidet ob sie feuern muessen.
|
||||||
|
|
||||||
|
Feuern bedeutet:
|
||||||
|
1. Trigger-Manifest update (fire_count++, last_fired_at, ggf. deaktivieren)
|
||||||
|
2. Log-Eintrag schreiben
|
||||||
|
3. agent.chat() mit einem system-Praefix aufrufen (NICHT als 'user'!)
|
||||||
|
→ ARIA bekommt das wie eine Push-Nachricht und kann antworten
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
import triggers as triggers_mod
|
||||||
|
import watcher as watcher_mod
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
TICK_SEC = 30
|
||||||
|
|
||||||
|
|
||||||
|
def _now_iso() -> str:
|
||||||
|
return datetime.now(timezone.utc).isoformat()
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_iso(s: str) -> Optional[datetime]:
|
||||||
|
if not s:
|
||||||
|
return None
|
||||||
|
try:
|
||||||
|
return datetime.fromisoformat(s.replace("Z", "+00:00"))
|
||||||
|
except Exception:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def _should_fire(trigger: dict, vars_: dict, now: datetime) -> bool:
|
||||||
|
if not trigger.get("active", True):
|
||||||
|
return False
|
||||||
|
t = trigger.get("type", "")
|
||||||
|
|
||||||
|
if t == "timer":
|
||||||
|
fires_at = _parse_iso(trigger.get("fires_at", ""))
|
||||||
|
if not fires_at:
|
||||||
|
return False
|
||||||
|
if fires_at.tzinfo is None:
|
||||||
|
fires_at = fires_at.replace(tzinfo=timezone.utc)
|
||||||
|
return now >= fires_at
|
||||||
|
|
||||||
|
if t == "watcher":
|
||||||
|
# Check-Interval respektieren (sonst pollen wir zu hektisch)
|
||||||
|
check_interval = int(trigger.get("check_interval_sec", 300))
|
||||||
|
last_checked = _parse_iso(trigger.get("last_checked_at", ""))
|
||||||
|
if last_checked:
|
||||||
|
if last_checked.tzinfo is None:
|
||||||
|
last_checked = last_checked.replace(tzinfo=timezone.utc)
|
||||||
|
if (now - last_checked).total_seconds() < check_interval:
|
||||||
|
return False
|
||||||
|
# Throttle: erst feuern wenn last_fired lange genug her ist
|
||||||
|
last_fired = _parse_iso(trigger.get("last_fired_at", ""))
|
||||||
|
throttle = int(trigger.get("throttle_sec", 3600))
|
||||||
|
if last_fired:
|
||||||
|
if last_fired.tzinfo is None:
|
||||||
|
last_fired = last_fired.replace(tzinfo=timezone.utc)
|
||||||
|
if (now - last_fired).total_seconds() < throttle:
|
||||||
|
return False
|
||||||
|
# Condition pruefen
|
||||||
|
cond = (trigger.get("condition") or "").strip()
|
||||||
|
if not cond:
|
||||||
|
return False
|
||||||
|
try:
|
||||||
|
return watcher_mod.evaluate(cond, vars_)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("Trigger %s: Condition '%s' fehlerhaft: %s",
|
||||||
|
trigger.get("name"), cond, e)
|
||||||
|
return False
|
||||||
|
|
||||||
|
if t == "cron":
|
||||||
|
# TODO: später, wenn jemand Bock auf Cron-Parser hat
|
||||||
|
return False
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
async def _fire(trigger: dict, agent_factory) -> None:
|
||||||
|
"""Ruft ARIA mit einer System-Praefix-Nachricht auf."""
|
||||||
|
name = trigger.get("name", "?")
|
||||||
|
message = trigger.get("message") or "(ohne Nachricht)"
|
||||||
|
ttype = trigger.get("type", "?")
|
||||||
|
|
||||||
|
# Manifest updaten
|
||||||
|
try:
|
||||||
|
triggers_mod.mark_fired(name)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("mark_fired %s: %s", name, e)
|
||||||
|
|
||||||
|
# Log
|
||||||
|
triggers_mod.append_log(name, {"event": "fired", "type": ttype, "message": message})
|
||||||
|
|
||||||
|
# System-Nachricht an ARIA: nicht als User, sondern als Hinweis
|
||||||
|
prompt = (
|
||||||
|
f"[Trigger ausgelöst: '{name}', Typ: {ttype}] "
|
||||||
|
f"Geplante Nachricht: \"{message}\". "
|
||||||
|
f"Sage Stefan jetzt diese Information, in deinem Stil. "
|
||||||
|
f"Wenn der Trigger ein Watcher war (Bedingung wurde erfuellt), "
|
||||||
|
f"erwaehne kurz worum es geht. Antworte direkt, keine Rueckfrage."
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
agent = agent_factory()
|
||||||
|
reply = agent.chat(prompt, source="trigger")
|
||||||
|
logger.info("[trigger] %s gefeuert → ARIA-Reply: %s", name, reply[:80])
|
||||||
|
triggers_mod.append_log(name, {"event": "reply", "text": reply[:500]})
|
||||||
|
except Exception as e:
|
||||||
|
logger.exception("Trigger %s feuern fehlgeschlagen: %s", name, e)
|
||||||
|
triggers_mod.append_log(name, {"event": "error", "error": str(e)[:300]})
|
||||||
|
|
||||||
|
|
||||||
|
async def _tick(agent_factory) -> None:
|
||||||
|
"""Ein Pruefdurchlauf. Geht ueber alle Triggers, feuert was zu feuern ist."""
|
||||||
|
try:
|
||||||
|
all_triggers = triggers_mod.list_triggers(active_only=True)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("triggers.list: %s", e)
|
||||||
|
return
|
||||||
|
if not all_triggers:
|
||||||
|
return
|
||||||
|
now = datetime.now(timezone.utc)
|
||||||
|
# Variablen einmal pro Tick sammeln (nicht pro Trigger — Disk-Stat ist teuer)
|
||||||
|
try:
|
||||||
|
vars_ = watcher_mod.collect_variables()
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("collect_variables: %s", e)
|
||||||
|
vars_ = {}
|
||||||
|
|
||||||
|
# Watcher: last_checked_at jetzt updaten (auch wenn nicht gefeuert wird,
|
||||||
|
# damit der Check-Interval respektiert wird)
|
||||||
|
for t in all_triggers:
|
||||||
|
if t.get("type") == "watcher":
|
||||||
|
try:
|
||||||
|
t["last_checked_at"] = _now_iso()
|
||||||
|
triggers_mod.write(t["name"], t)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
for trigger in all_triggers:
|
||||||
|
try:
|
||||||
|
if _should_fire(trigger, vars_, now):
|
||||||
|
# Feuern als eigener Task — wenn ARIA langsam antwortet,
|
||||||
|
# darf der naechste Tick nicht blockieren
|
||||||
|
asyncio.create_task(_fire(trigger, agent_factory))
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("Trigger-Check %s: %s", trigger.get("name"), e)
|
||||||
|
|
||||||
|
|
||||||
|
async def run_loop(agent_factory) -> None:
|
||||||
|
"""Endlosschleife — wird vom main lifespan gestartet + gestoppt."""
|
||||||
|
logger.info("Trigger-Loop gestartet (TICK_SEC=%d)", TICK_SEC)
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
await _tick(agent_factory)
|
||||||
|
except Exception as e:
|
||||||
|
logger.exception("Tick-Fehler: %s", e)
|
||||||
|
await asyncio.sleep(TICK_SEC)
|
||||||
@@ -0,0 +1,130 @@
|
|||||||
|
"""
|
||||||
|
Conversation-State — ein einziger Rolling-Window-State fuer ARIAs
|
||||||
|
laufendes Gespraech mit Stefan.
|
||||||
|
|
||||||
|
Stefan-Entscheidung: KEINE Sessions, KEIN Multi-Thread. EIN Strang,
|
||||||
|
intern rollend. Was rausfaellt, wird ggf. destilliert und landet
|
||||||
|
als type=fact Memory in der Vector-DB.
|
||||||
|
|
||||||
|
Persistenz: append-only JSONL unter /data/conversation.jsonl.
|
||||||
|
Bei Restart wird die letzte N gelesen (komplett vermeidet Memory-
|
||||||
|
Overhead bei sehr langen Verlaeufen).
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
from dataclasses import dataclass, field, asdict
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List, Optional
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
CONVERSATION_FILE = Path(os.environ.get("CONVERSATION_FILE", "/data/conversation.jsonl"))
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class Turn:
|
||||||
|
role: str # "user" | "assistant"
|
||||||
|
content: str
|
||||||
|
ts: str = field(default_factory=lambda: datetime.now(timezone.utc).isoformat())
|
||||||
|
source: str = "" # "app" / "diagnostic" / "stt" — optional
|
||||||
|
|
||||||
|
|
||||||
|
class Conversation:
|
||||||
|
"""In-Memory Rolling Window, mit JSONL-Persistenz."""
|
||||||
|
|
||||||
|
def __init__(self, max_window: int = 50, distill_threshold: int = 60,
|
||||||
|
distill_count: int = 30):
|
||||||
|
self.max_window = max_window
|
||||||
|
self.distill_threshold = distill_threshold
|
||||||
|
self.distill_count = distill_count
|
||||||
|
self.turns: List[Turn] = []
|
||||||
|
self._load()
|
||||||
|
|
||||||
|
def _load(self):
|
||||||
|
if not CONVERSATION_FILE.exists():
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
lines = CONVERSATION_FILE.read_text(encoding="utf-8").splitlines()
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning("Konversation laden fehlgeschlagen: %s", exc)
|
||||||
|
return
|
||||||
|
loaded: List[Turn] = []
|
||||||
|
for line in lines:
|
||||||
|
line = line.strip()
|
||||||
|
if not line:
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
obj = json.loads(line)
|
||||||
|
except Exception:
|
||||||
|
continue
|
||||||
|
if obj.get("op") == "distill":
|
||||||
|
# Marker: bis hierhin wurde alles destilliert
|
||||||
|
drop_until_ts = obj.get("ts", "")
|
||||||
|
if drop_until_ts:
|
||||||
|
loaded = [t for t in loaded if t.ts > drop_until_ts]
|
||||||
|
continue
|
||||||
|
role = obj.get("role")
|
||||||
|
content = obj.get("content")
|
||||||
|
if role in ("user", "assistant") and isinstance(content, str):
|
||||||
|
loaded.append(Turn(role=role, content=content,
|
||||||
|
ts=obj.get("ts", ""),
|
||||||
|
source=obj.get("source", "")))
|
||||||
|
self.turns = loaded
|
||||||
|
logger.info("Konversation geladen: %d Turns aus %s", len(self.turns), CONVERSATION_FILE)
|
||||||
|
|
||||||
|
def _append_to_file(self, record: dict):
|
||||||
|
try:
|
||||||
|
CONVERSATION_FILE.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
with CONVERSATION_FILE.open("a", encoding="utf-8") as f:
|
||||||
|
f.write(json.dumps(record, ensure_ascii=False) + "\n")
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning("Konversation persist fehlgeschlagen: %s", exc)
|
||||||
|
|
||||||
|
def add(self, role: str, content: str, source: str = "") -> Turn:
|
||||||
|
t = Turn(role=role, content=content, source=source)
|
||||||
|
self.turns.append(t)
|
||||||
|
self._append_to_file({
|
||||||
|
"ts": t.ts, "role": t.role, "content": t.content, "source": t.source,
|
||||||
|
})
|
||||||
|
return t
|
||||||
|
|
||||||
|
def window(self) -> List[Turn]:
|
||||||
|
"""Die letzten max_window Turns — gehen in den LLM-Prompt."""
|
||||||
|
return self.turns[-self.max_window:]
|
||||||
|
|
||||||
|
def needs_distill(self) -> bool:
|
||||||
|
return len(self.turns) > self.distill_threshold
|
||||||
|
|
||||||
|
def take_oldest_for_distill(self) -> List[Turn]:
|
||||||
|
"""Gibt die N aeltesten Turns zurueck — fuer den Destillat-Call.
|
||||||
|
Entfernt sie NICHT — das macht commit_distill nach erfolgreichem Call."""
|
||||||
|
return self.turns[: self.distill_count]
|
||||||
|
|
||||||
|
def commit_distill(self, last_distilled_ts: str):
|
||||||
|
"""Schreibt einen Distill-Marker, entfernt aus dem In-Memory-Window."""
|
||||||
|
self._append_to_file({"op": "distill", "ts": last_distilled_ts})
|
||||||
|
self.turns = [t for t in self.turns if t.ts > last_distilled_ts]
|
||||||
|
logger.info("Distill commit bei ts=%s — Window jetzt %d Turns", last_distilled_ts, len(self.turns))
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
"""Hardes Reset — verwende vorsichtig (Diagnostic-Button)."""
|
||||||
|
try:
|
||||||
|
if CONVERSATION_FILE.exists():
|
||||||
|
CONVERSATION_FILE.unlink()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
self.turns = []
|
||||||
|
logger.warning("Konversation komplett zurueckgesetzt")
|
||||||
|
|
||||||
|
def stats(self) -> dict:
|
||||||
|
return {
|
||||||
|
"turns": len(self.turns),
|
||||||
|
"max_window": self.max_window,
|
||||||
|
"distill_threshold": self.distill_threshold,
|
||||||
|
"needs_distill": self.needs_distill(),
|
||||||
|
}
|
||||||
@@ -0,0 +1,652 @@
|
|||||||
|
"""
|
||||||
|
ARIA Brain — FastAPI-Einstieg.
|
||||||
|
|
||||||
|
Phase B Punkt 1: nur Skeleton.
|
||||||
|
- /health → Liveness
|
||||||
|
- /memory/list → alle Punkte (gefiltert)
|
||||||
|
- /memory/pinned → Hot Memory
|
||||||
|
- /memory/search?q=...&k=5 → semantische Suche
|
||||||
|
- /memory/save → neuen Punkt anlegen
|
||||||
|
- /memory/update/{id} → Punkt aendern (re-embed wenn content geaendert)
|
||||||
|
- /memory/delete/{id} → Punkt loeschen
|
||||||
|
- /memory/stats → Anzahl Punkte pro Type
|
||||||
|
|
||||||
|
/chat (Conversation-Loop) und /skills/* kommen in spaeteren Phasen.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
from typing import List, Optional
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
from contextlib import asynccontextmanager
|
||||||
|
|
||||||
|
from fastapi import FastAPI, HTTPException, BackgroundTasks, Request
|
||||||
|
from fastapi.responses import Response
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
from memory import Embedder, VectorStore, MemoryPoint
|
||||||
|
from conversation import Conversation
|
||||||
|
from proxy_client import ProxyClient
|
||||||
|
from agent import Agent
|
||||||
|
import skills as skills_mod
|
||||||
|
import metrics as metrics_mod
|
||||||
|
import triggers as triggers_mod
|
||||||
|
import watcher as watcher_mod
|
||||||
|
import background as background_mod
|
||||||
|
|
||||||
|
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(name)s: %(message)s")
|
||||||
|
logger = logging.getLogger("aria-brain")
|
||||||
|
|
||||||
|
QDRANT_HOST = os.environ.get("QDRANT_HOST", "aria-qdrant")
|
||||||
|
QDRANT_PORT = int(os.environ.get("QDRANT_PORT", "6333"))
|
||||||
|
|
||||||
|
@asynccontextmanager
|
||||||
|
async def lifespan(app: FastAPI):
|
||||||
|
"""Beim Brain-Start: Trigger-Background-Loop anwerfen. Beim Shutdown: stoppen."""
|
||||||
|
task = asyncio.create_task(background_mod.run_loop(agent))
|
||||||
|
logger.info("Lifespan: Trigger-Loop gestartet")
|
||||||
|
try:
|
||||||
|
yield
|
||||||
|
finally:
|
||||||
|
task.cancel()
|
||||||
|
try:
|
||||||
|
await task
|
||||||
|
except asyncio.CancelledError:
|
||||||
|
pass
|
||||||
|
logger.info("Lifespan: Trigger-Loop gestoppt")
|
||||||
|
|
||||||
|
|
||||||
|
app = FastAPI(title="ARIA Brain", version="0.1.0", lifespan=lifespan)
|
||||||
|
|
||||||
|
_embedder: Optional[Embedder] = None
|
||||||
|
_store: Optional[VectorStore] = None
|
||||||
|
_conversation: Optional[Conversation] = None
|
||||||
|
_proxy: Optional[ProxyClient] = None
|
||||||
|
_agent: Optional[Agent] = None
|
||||||
|
|
||||||
|
|
||||||
|
def embedder() -> Embedder:
|
||||||
|
global _embedder
|
||||||
|
if _embedder is None:
|
||||||
|
_embedder = Embedder()
|
||||||
|
return _embedder
|
||||||
|
|
||||||
|
|
||||||
|
def store() -> VectorStore:
|
||||||
|
global _store
|
||||||
|
if _store is None:
|
||||||
|
_store = VectorStore(host=QDRANT_HOST, port=QDRANT_PORT)
|
||||||
|
return _store
|
||||||
|
|
||||||
|
|
||||||
|
def conversation() -> Conversation:
|
||||||
|
global _conversation
|
||||||
|
if _conversation is None:
|
||||||
|
_conversation = Conversation()
|
||||||
|
return _conversation
|
||||||
|
|
||||||
|
|
||||||
|
def proxy_client() -> ProxyClient:
|
||||||
|
global _proxy
|
||||||
|
if _proxy is None:
|
||||||
|
_proxy = ProxyClient()
|
||||||
|
return _proxy
|
||||||
|
|
||||||
|
|
||||||
|
def agent() -> Agent:
|
||||||
|
global _agent
|
||||||
|
if _agent is None:
|
||||||
|
_agent = Agent(store(), embedder(), conversation(), proxy_client())
|
||||||
|
return _agent
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Pydantic-Schemas ─────────────────────────────────────────────────
|
||||||
|
|
||||||
|
class MemoryIn(BaseModel):
|
||||||
|
type: str = Field(..., description="identity|rule|preference|tool|skill|fact|conversation|reminder")
|
||||||
|
title: str
|
||||||
|
content: str
|
||||||
|
pinned: bool = False
|
||||||
|
category: str = ""
|
||||||
|
source: str = "manual"
|
||||||
|
tags: List[str] = Field(default_factory=list)
|
||||||
|
conversation_id: Optional[str] = None
|
||||||
|
|
||||||
|
|
||||||
|
class MemoryUpdate(BaseModel):
|
||||||
|
title: Optional[str] = None
|
||||||
|
content: Optional[str] = None
|
||||||
|
pinned: Optional[bool] = None
|
||||||
|
category: Optional[str] = None
|
||||||
|
tags: Optional[List[str]] = None
|
||||||
|
|
||||||
|
|
||||||
|
class MemoryOut(BaseModel):
|
||||||
|
id: str
|
||||||
|
type: str
|
||||||
|
title: str
|
||||||
|
content: str
|
||||||
|
pinned: bool
|
||||||
|
category: str
|
||||||
|
source: str
|
||||||
|
tags: List[str]
|
||||||
|
created_at: str
|
||||||
|
updated_at: str
|
||||||
|
conversation_id: Optional[str] = None
|
||||||
|
score: Optional[float] = None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_point(cls, p: MemoryPoint) -> "MemoryOut":
|
||||||
|
return cls(**p.__dict__)
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Health ───────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
@app.get("/health")
|
||||||
|
def health():
|
||||||
|
try:
|
||||||
|
n = store().count()
|
||||||
|
return {"status": "ok", "memory_count": n, "qdrant": f"{QDRANT_HOST}:{QDRANT_PORT}"}
|
||||||
|
except Exception as exc:
|
||||||
|
return {"status": "degraded", "error": str(exc), "qdrant": f"{QDRANT_HOST}:{QDRANT_PORT}"}
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Memory-Endpoints ─────────────────────────────────────────────────
|
||||||
|
|
||||||
|
@app.get("/memory/stats")
|
||||||
|
def memory_stats():
|
||||||
|
s = store()
|
||||||
|
points = s.list_all()
|
||||||
|
by_type = {}
|
||||||
|
pinned = 0
|
||||||
|
for p in points:
|
||||||
|
by_type[p.type] = by_type.get(p.type, 0) + 1
|
||||||
|
if p.pinned:
|
||||||
|
pinned += 1
|
||||||
|
return {"total": len(points), "pinned": pinned, "by_type": by_type}
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/memory/list", response_model=List[MemoryOut])
|
||||||
|
def memory_list(type: Optional[str] = None, limit: int = 200):
|
||||||
|
s = store()
|
||||||
|
points = s.list_by_type(type, limit=limit) if type else s.list_all(limit=limit)
|
||||||
|
return [MemoryOut.from_point(p) for p in points]
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/memory/pinned", response_model=List[MemoryOut])
|
||||||
|
def memory_pinned():
|
||||||
|
return [MemoryOut.from_point(p) for p in store().list_pinned()]
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/memory/search", response_model=List[MemoryOut])
|
||||||
|
def memory_search(q: str, k: int = 5, type: Optional[str] = None, include_pinned: bool = False):
|
||||||
|
vec = embedder().embed(q)
|
||||||
|
points = store().search(vec, k=k, type_filter=type, exclude_pinned=not include_pinned)
|
||||||
|
return [MemoryOut.from_point(p) for p in points]
|
||||||
|
|
||||||
|
|
||||||
|
@app.post("/memory/save", response_model=MemoryOut)
|
||||||
|
def memory_save(body: MemoryIn):
|
||||||
|
s = store()
|
||||||
|
vec = embedder().embed(body.content)
|
||||||
|
point = MemoryPoint(
|
||||||
|
id="",
|
||||||
|
type=body.type,
|
||||||
|
title=body.title,
|
||||||
|
content=body.content,
|
||||||
|
pinned=body.pinned,
|
||||||
|
category=body.category,
|
||||||
|
source=body.source,
|
||||||
|
tags=body.tags,
|
||||||
|
conversation_id=body.conversation_id,
|
||||||
|
)
|
||||||
|
pid = s.upsert(point, vec)
|
||||||
|
saved = s.get(pid)
|
||||||
|
return MemoryOut.from_point(saved)
|
||||||
|
|
||||||
|
|
||||||
|
@app.patch("/memory/update/{point_id}", response_model=MemoryOut)
|
||||||
|
def memory_update(point_id: str, body: MemoryUpdate):
|
||||||
|
s = store()
|
||||||
|
existing = s.get(point_id)
|
||||||
|
if not existing:
|
||||||
|
raise HTTPException(404, f"Memory {point_id} nicht gefunden")
|
||||||
|
|
||||||
|
content_changed = body.content is not None and body.content != existing.content
|
||||||
|
if body.title is not None:
|
||||||
|
existing.title = body.title
|
||||||
|
if body.content is not None:
|
||||||
|
existing.content = body.content
|
||||||
|
if body.pinned is not None:
|
||||||
|
existing.pinned = body.pinned
|
||||||
|
if body.category is not None:
|
||||||
|
existing.category = body.category
|
||||||
|
if body.tags is not None:
|
||||||
|
existing.tags = body.tags
|
||||||
|
|
||||||
|
vec = embedder().embed(existing.content) if content_changed else None
|
||||||
|
if vec is None:
|
||||||
|
# Vektor unveraendert lassen — nur Payload neu schreiben
|
||||||
|
from qdrant_client.http import models as qm
|
||||||
|
from memory.vector_store import COLLECTION
|
||||||
|
s.client.set_payload(
|
||||||
|
collection_name=COLLECTION,
|
||||||
|
payload=existing.to_payload() | {"updated_at": __import__("datetime").datetime.now(__import__("datetime").timezone.utc).isoformat()},
|
||||||
|
points=[point_id],
|
||||||
|
)
|
||||||
|
saved = s.get(point_id)
|
||||||
|
else:
|
||||||
|
s.upsert(existing, vec)
|
||||||
|
saved = s.get(point_id)
|
||||||
|
return MemoryOut.from_point(saved)
|
||||||
|
|
||||||
|
|
||||||
|
@app.delete("/memory/delete/{point_id}")
|
||||||
|
def memory_delete(point_id: str):
|
||||||
|
s = store()
|
||||||
|
if not s.get(point_id):
|
||||||
|
raise HTTPException(404, f"Memory {point_id} nicht gefunden")
|
||||||
|
s.delete(point_id)
|
||||||
|
return {"deleted": point_id}
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Migration aus brain-import/ ──────────────────────────────────────
|
||||||
|
|
||||||
|
IMPORT_DIR = os.environ.get("IMPORT_DIR", "/import")
|
||||||
|
|
||||||
|
|
||||||
|
@app.post("/memory/migrate")
|
||||||
|
def memory_migrate():
|
||||||
|
"""Liest /import/*.md und schreibt atomare Memory-Punkte in die DB.
|
||||||
|
Idempotent: bei Re-Run werden Punkte mit gleicher migration_key ersetzt."""
|
||||||
|
from pathlib import Path
|
||||||
|
from migration import run_migration
|
||||||
|
s = store()
|
||||||
|
e = embedder()
|
||||||
|
result = run_migration(Path(IMPORT_DIR), s, e)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/memory/import-files")
|
||||||
|
def memory_import_files():
|
||||||
|
"""Listet was unter /import/ liegt — fuer die Diagnostic-UI."""
|
||||||
|
from pathlib import Path
|
||||||
|
d = Path(IMPORT_DIR)
|
||||||
|
if not d.exists():
|
||||||
|
return {"import_dir": str(d), "exists": False, "files": []}
|
||||||
|
out = []
|
||||||
|
for p in sorted(d.iterdir()):
|
||||||
|
if p.is_file():
|
||||||
|
try:
|
||||||
|
out.append({"name": p.name, "size": p.stat().st_size})
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
return {"import_dir": str(d), "exists": True, "files": out}
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Bootstrap-Snapshot ───────────────────────────────────────────────
|
||||||
|
# "Bootstrap" = alle pinned Memories. Export/Import zum schnellen
|
||||||
|
# Wiederherstellen einer schlanken ARIA nach Wipe.
|
||||||
|
|
||||||
|
@app.get("/memory/export-bootstrap")
|
||||||
|
def memory_export_bootstrap():
|
||||||
|
"""Gibt alle pinned Memories als JSON zurueck — fuer Browser-Download."""
|
||||||
|
s = store()
|
||||||
|
pinned = s.list_pinned()
|
||||||
|
return {
|
||||||
|
"version": 1,
|
||||||
|
"exported_at": __import__("datetime").datetime.now(
|
||||||
|
__import__("datetime").timezone.utc
|
||||||
|
).isoformat(),
|
||||||
|
"count": len(pinned),
|
||||||
|
"memories": [
|
||||||
|
{
|
||||||
|
"type": p.type,
|
||||||
|
"title": p.title,
|
||||||
|
"content": p.content,
|
||||||
|
"pinned": True,
|
||||||
|
"category": p.category,
|
||||||
|
"source": p.source,
|
||||||
|
"tags": p.tags,
|
||||||
|
}
|
||||||
|
for p in pinned
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class BootstrapBundle(BaseModel):
|
||||||
|
version: int = 1
|
||||||
|
memories: List[dict]
|
||||||
|
|
||||||
|
|
||||||
|
@app.post("/memory/import-bootstrap")
|
||||||
|
def memory_import_bootstrap(body: BootstrapBundle):
|
||||||
|
"""Loescht alle pinned Memories und importiert die im Bundle.
|
||||||
|
Cold Memory (unpinned) bleibt unangetastet.
|
||||||
|
|
||||||
|
Wenn keine Memories im Bundle: nur loeschen ist NICHT erlaubt — der
|
||||||
|
Caller soll erst exportieren und dann importieren.
|
||||||
|
"""
|
||||||
|
if not body.memories:
|
||||||
|
raise HTTPException(400, "Bundle hat keine memories — Abbruch zur Sicherheit")
|
||||||
|
|
||||||
|
s = store()
|
||||||
|
e = embedder()
|
||||||
|
|
||||||
|
# Alle aktuell pinned Punkte loeschen
|
||||||
|
from qdrant_client.http import models as qm
|
||||||
|
from memory.vector_store import COLLECTION
|
||||||
|
s.client.delete(
|
||||||
|
collection_name=COLLECTION,
|
||||||
|
points_selector=qm.FilterSelector(filter=qm.Filter(must=[
|
||||||
|
qm.FieldCondition(key="pinned", match=qm.MatchValue(value=True))
|
||||||
|
])),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Neue Punkte einspeisen
|
||||||
|
created = 0
|
||||||
|
for m in body.memories:
|
||||||
|
content = (m.get("content") or "").strip()
|
||||||
|
if not content:
|
||||||
|
continue
|
||||||
|
point = MemoryPoint(
|
||||||
|
id="",
|
||||||
|
type=m.get("type", "fact"),
|
||||||
|
title=m.get("title", "(ohne Titel)"),
|
||||||
|
content=content,
|
||||||
|
pinned=True,
|
||||||
|
category=m.get("category", ""),
|
||||||
|
source=m.get("source", "bootstrap-import"),
|
||||||
|
tags=list(m.get("tags", [])),
|
||||||
|
)
|
||||||
|
vec = e.embed(content)
|
||||||
|
s.upsert(point, vec)
|
||||||
|
created += 1
|
||||||
|
|
||||||
|
return {"created": created, "deleted_previous_pinned": True}
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Conversation-Loop ──────────────────────────────────────────────
|
||||||
|
|
||||||
|
class ChatIn(BaseModel):
|
||||||
|
message: str
|
||||||
|
source: str = "" # "app" / "diagnostic" / "stt" — optional
|
||||||
|
|
||||||
|
|
||||||
|
class ChatOut(BaseModel):
|
||||||
|
reply: str
|
||||||
|
turns: int
|
||||||
|
distilling: bool
|
||||||
|
events: list = Field(default_factory=list)
|
||||||
|
|
||||||
|
|
||||||
|
@app.post("/chat", response_model=ChatOut)
|
||||||
|
def chat(body: ChatIn, background: BackgroundTasks):
|
||||||
|
"""Hauptpfad. Antwort kommt synchron. Memory-Destillat laeuft
|
||||||
|
im Hintergrund nachdem die Response rausging."""
|
||||||
|
a = agent()
|
||||||
|
try:
|
||||||
|
reply = a.chat(body.message, source=body.source)
|
||||||
|
except ValueError as exc:
|
||||||
|
raise HTTPException(400, str(exc))
|
||||||
|
except RuntimeError as exc:
|
||||||
|
logger.error("chat fehlgeschlagen: %s", exc)
|
||||||
|
raise HTTPException(502, str(exc))
|
||||||
|
|
||||||
|
needs_distill = a.conversation.needs_distill()
|
||||||
|
if needs_distill:
|
||||||
|
background.add_task(a.distill_old_turns)
|
||||||
|
return ChatOut(
|
||||||
|
reply=reply,
|
||||||
|
turns=len(a.conversation.turns),
|
||||||
|
distilling=needs_distill,
|
||||||
|
events=a.pop_events(),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/conversation/stats")
|
||||||
|
def conversation_stats():
|
||||||
|
return conversation().stats()
|
||||||
|
|
||||||
|
|
||||||
|
@app.post("/conversation/reset")
|
||||||
|
def conversation_reset():
|
||||||
|
"""Hardes Reset — der Rolling-Window-Verlauf wird komplett geleert.
|
||||||
|
Destillierte facts bleiben in der DB."""
|
||||||
|
conversation().reset()
|
||||||
|
return {"ok": True, "turns": 0}
|
||||||
|
|
||||||
|
|
||||||
|
@app.post("/conversation/distill")
|
||||||
|
def conversation_distill_now():
|
||||||
|
"""Manueller Trigger fuer Destillat — fuer Tests oder vor einem
|
||||||
|
bewussten Reset."""
|
||||||
|
return agent().distill_old_turns()
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Call-Metrics (Token / Quota-Monitoring) ────────────────────────
|
||||||
|
|
||||||
|
@app.get("/metrics/calls")
|
||||||
|
def metrics_calls():
|
||||||
|
"""Liefert Aggregate fuer 1h / 5h / 24h / 30d.
|
||||||
|
Jedes Window: {window_seconds, calls, tokens_in, tokens_out, by_model}."""
|
||||||
|
return metrics_mod.stats()
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Triggers (passive Aufweck-Quellen) ─────────────────────────────
|
||||||
|
|
||||||
|
class TriggerTimerBody(BaseModel):
|
||||||
|
name: str
|
||||||
|
fires_at: str # ISO timestamp
|
||||||
|
message: str
|
||||||
|
author: str = "stefan"
|
||||||
|
|
||||||
|
|
||||||
|
class TriggerWatcherBody(BaseModel):
|
||||||
|
name: str
|
||||||
|
condition: str
|
||||||
|
message: str
|
||||||
|
check_interval_sec: int = 300
|
||||||
|
throttle_sec: int = 3600
|
||||||
|
author: str = "stefan"
|
||||||
|
|
||||||
|
|
||||||
|
class TriggerPatch(BaseModel):
|
||||||
|
active: bool | None = None
|
||||||
|
message: str | None = None
|
||||||
|
condition: str | None = None
|
||||||
|
throttle_sec: int | None = None
|
||||||
|
check_interval_sec: int | None = None
|
||||||
|
fires_at: str | None = None
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/triggers/list")
|
||||||
|
def triggers_list(active_only: bool = False):
|
||||||
|
return {"triggers": triggers_mod.list_triggers(active_only=active_only)}
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/triggers/conditions")
|
||||||
|
def triggers_conditions():
|
||||||
|
"""Verfuegbare Variablen + Funktionen fuer Watcher-Conditions
|
||||||
|
(mit aktuellen Werten)."""
|
||||||
|
current = watcher_mod.collect_variables()
|
||||||
|
# near() ist ein callable in vars_ — fuer die UI rausfiltern
|
||||||
|
serializable = {k: v for k, v in current.items() if not callable(v)}
|
||||||
|
return {
|
||||||
|
"variables": watcher_mod.describe_variables(),
|
||||||
|
"functions": watcher_mod.describe_functions(),
|
||||||
|
"current": serializable,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/triggers/{name}")
|
||||||
|
def triggers_get(name: str):
|
||||||
|
t = triggers_mod.read(name)
|
||||||
|
if t is None:
|
||||||
|
raise HTTPException(404, f"Trigger '{name}' nicht gefunden")
|
||||||
|
return t
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/triggers/{name}/logs")
|
||||||
|
def triggers_get_logs(name: str, limit: int = 50):
|
||||||
|
return {"logs": triggers_mod.list_logs(name, limit=limit)}
|
||||||
|
|
||||||
|
|
||||||
|
@app.post("/triggers/timer")
|
||||||
|
def triggers_create_timer(body: TriggerTimerBody):
|
||||||
|
try:
|
||||||
|
return triggers_mod.create_timer(
|
||||||
|
name=body.name, fires_at_iso=body.fires_at,
|
||||||
|
message=body.message, author=body.author,
|
||||||
|
)
|
||||||
|
except ValueError as exc:
|
||||||
|
raise HTTPException(400, str(exc))
|
||||||
|
|
||||||
|
|
||||||
|
@app.post("/triggers/watcher")
|
||||||
|
def triggers_create_watcher(body: TriggerWatcherBody):
|
||||||
|
try:
|
||||||
|
return triggers_mod.create_watcher(
|
||||||
|
name=body.name, condition=body.condition,
|
||||||
|
message=body.message,
|
||||||
|
check_interval_sec=body.check_interval_sec,
|
||||||
|
throttle_sec=body.throttle_sec,
|
||||||
|
author=body.author,
|
||||||
|
)
|
||||||
|
except ValueError as exc:
|
||||||
|
raise HTTPException(400, str(exc))
|
||||||
|
|
||||||
|
|
||||||
|
@app.patch("/triggers/{name}")
|
||||||
|
def triggers_patch(name: str, body: TriggerPatch):
|
||||||
|
patch = {k: v for k, v in body.model_dump().items() if v is not None}
|
||||||
|
try:
|
||||||
|
return triggers_mod.update(name, patch)
|
||||||
|
except ValueError as exc:
|
||||||
|
raise HTTPException(404, str(exc))
|
||||||
|
|
||||||
|
|
||||||
|
@app.delete("/triggers/{name}")
|
||||||
|
def triggers_delete(name: str):
|
||||||
|
try:
|
||||||
|
triggers_mod.delete(name)
|
||||||
|
except ValueError as exc:
|
||||||
|
raise HTTPException(404, str(exc))
|
||||||
|
return {"deleted": name}
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Skills ─────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
class SkillCreate(BaseModel):
|
||||||
|
name: str
|
||||||
|
description: str
|
||||||
|
execution: str # local-venv | local-bin | bash
|
||||||
|
entry_code: str
|
||||||
|
readme: str = ""
|
||||||
|
args: list = Field(default_factory=list)
|
||||||
|
requires: dict = Field(default_factory=dict)
|
||||||
|
pip_packages: list = Field(default_factory=list)
|
||||||
|
author: str = "stefan"
|
||||||
|
|
||||||
|
|
||||||
|
class SkillRun(BaseModel):
|
||||||
|
name: str
|
||||||
|
args: dict = Field(default_factory=dict)
|
||||||
|
timeout_sec: int = 300
|
||||||
|
|
||||||
|
|
||||||
|
class SkillPatch(BaseModel):
|
||||||
|
description: str | None = None
|
||||||
|
active: bool | None = None
|
||||||
|
args: list | None = None
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/skills/list")
|
||||||
|
def skills_list(active_only: bool = False):
|
||||||
|
return {"skills": skills_mod.list_skills(active_only=active_only)}
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/skills/{name}")
|
||||||
|
def skills_get(name: str):
|
||||||
|
m = skills_mod.read_manifest(name)
|
||||||
|
if m is None:
|
||||||
|
raise HTTPException(404, f"Skill '{name}' nicht gefunden")
|
||||||
|
readme = skills_mod.read_readme(name)
|
||||||
|
return {"manifest": m, "readme": readme}
|
||||||
|
|
||||||
|
|
||||||
|
@app.post("/skills/create")
|
||||||
|
def skills_create(body: SkillCreate):
|
||||||
|
try:
|
||||||
|
return skills_mod.create_skill(
|
||||||
|
name=body.name,
|
||||||
|
description=body.description,
|
||||||
|
execution=body.execution,
|
||||||
|
entry_code=body.entry_code,
|
||||||
|
readme=body.readme,
|
||||||
|
args=body.args,
|
||||||
|
requires=body.requires,
|
||||||
|
pip_packages=body.pip_packages,
|
||||||
|
author=body.author,
|
||||||
|
)
|
||||||
|
except ValueError as exc:
|
||||||
|
raise HTTPException(400, str(exc))
|
||||||
|
|
||||||
|
|
||||||
|
@app.post("/skills/run")
|
||||||
|
def skills_run(body: SkillRun):
|
||||||
|
try:
|
||||||
|
return skills_mod.run_skill(body.name, args=body.args, timeout_sec=body.timeout_sec)
|
||||||
|
except ValueError as exc:
|
||||||
|
raise HTTPException(400, str(exc))
|
||||||
|
|
||||||
|
|
||||||
|
@app.patch("/skills/{name}")
|
||||||
|
def skills_patch(name: str, body: SkillPatch):
|
||||||
|
patch = {k: v for k, v in body.model_dump().items() if v is not None}
|
||||||
|
try:
|
||||||
|
return skills_mod.update_skill(name, patch)
|
||||||
|
except ValueError as exc:
|
||||||
|
raise HTTPException(404, str(exc))
|
||||||
|
|
||||||
|
|
||||||
|
@app.delete("/skills/{name}")
|
||||||
|
def skills_delete(name: str):
|
||||||
|
try:
|
||||||
|
skills_mod.delete_skill(name)
|
||||||
|
except ValueError as exc:
|
||||||
|
raise HTTPException(404, str(exc))
|
||||||
|
return {"deleted": name}
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/skills/{name}/logs")
|
||||||
|
def skills_logs(name: str, limit: int = 50):
|
||||||
|
return {"logs": skills_mod.list_logs(name, limit=limit)}
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/skills/{name}/export")
|
||||||
|
def skills_export(name: str):
|
||||||
|
try:
|
||||||
|
data = skills_mod.export_skill(name)
|
||||||
|
except ValueError as exc:
|
||||||
|
raise HTTPException(404, str(exc))
|
||||||
|
return Response(
|
||||||
|
content=data,
|
||||||
|
media_type="application/gzip",
|
||||||
|
headers={"Content-Disposition": f'attachment; filename="skill-{name}.tar.gz"'},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@app.post("/skills/import")
|
||||||
|
async def skills_import(request: Request, overwrite: bool = False):
|
||||||
|
data = await request.body()
|
||||||
|
if not data:
|
||||||
|
raise HTTPException(400, "Leerer Body")
|
||||||
|
try:
|
||||||
|
manifest = skills_mod.import_skill(data, overwrite=overwrite)
|
||||||
|
except ValueError as exc:
|
||||||
|
raise HTTPException(400, str(exc))
|
||||||
|
return {"imported": manifest}
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
from .embedder import Embedder
|
||||||
|
from .vector_store import VectorStore, MemoryPoint, MemoryType
|
||||||
|
|
||||||
|
__all__ = ["Embedder", "VectorStore", "MemoryPoint", "MemoryType"]
|
||||||
@@ -0,0 +1,42 @@
|
|||||||
|
"""
|
||||||
|
Lokaler Embedder fuer Memory-Texte.
|
||||||
|
|
||||||
|
Nutzt sentence-transformers (paraphrase-multilingual-MiniLM-L12-v2):
|
||||||
|
- Deutsch + Englisch
|
||||||
|
- 384-dimensionale Vektoren
|
||||||
|
- Laeuft auf CPU, ~30ms pro kurzer Text
|
||||||
|
- Modell wird beim ersten Aufruf in /data/_models gecached
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
MODEL_NAME = "paraphrase-multilingual-MiniLM-L12-v2"
|
||||||
|
VECTOR_DIM = 384
|
||||||
|
|
||||||
|
|
||||||
|
class Embedder:
|
||||||
|
def __init__(self, model_name: str = MODEL_NAME):
|
||||||
|
self.model_name = model_name
|
||||||
|
self._model = None
|
||||||
|
|
||||||
|
def _load(self):
|
||||||
|
if self._model is None:
|
||||||
|
logger.info("Lade Embedding-Modell %s ...", self.model_name)
|
||||||
|
from sentence_transformers import SentenceTransformer
|
||||||
|
self._model = SentenceTransformer(self.model_name)
|
||||||
|
logger.info("Embedding-Modell geladen.")
|
||||||
|
|
||||||
|
def embed(self, text: str) -> List[float]:
|
||||||
|
self._load()
|
||||||
|
vec = self._model.encode(text, convert_to_numpy=True, normalize_embeddings=True)
|
||||||
|
return vec.tolist()
|
||||||
|
|
||||||
|
def embed_batch(self, texts: List[str]) -> List[List[float]]:
|
||||||
|
self._load()
|
||||||
|
vecs = self._model.encode(texts, convert_to_numpy=True, normalize_embeddings=True)
|
||||||
|
return vecs.tolist()
|
||||||
@@ -0,0 +1,209 @@
|
|||||||
|
"""
|
||||||
|
Vector-Store-Wrapper um Qdrant.
|
||||||
|
|
||||||
|
Eine Collection "aria_memory" haelt ALLE Memory-Punkte.
|
||||||
|
Trennung nach Type/Pinned-Status via Payload-Filter.
|
||||||
|
|
||||||
|
Punkt-Schema (Payload):
|
||||||
|
type — identity | rule | preference | tool | skill | fact | conversation | reminder
|
||||||
|
category — frei, fuer UI-Gruppierung
|
||||||
|
title — kurze Ueberschrift
|
||||||
|
content — eigentlicher Text (wird embedded)
|
||||||
|
pinned — bool, True = Hot Memory (immer in Prompt)
|
||||||
|
source — import | conversation | manual
|
||||||
|
tags — Liste von Strings
|
||||||
|
created_at, updated_at — ISO-Strings
|
||||||
|
conversation_id — optional, nur fuer type=conversation
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import uuid
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from enum import Enum
|
||||||
|
from typing import List, Optional
|
||||||
|
|
||||||
|
from qdrant_client import QdrantClient
|
||||||
|
from qdrant_client.http import models as qm
|
||||||
|
|
||||||
|
from .embedder import VECTOR_DIM
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
COLLECTION = "aria_memory"
|
||||||
|
|
||||||
|
|
||||||
|
class MemoryType(str, Enum):
|
||||||
|
IDENTITY = "identity"
|
||||||
|
RULE = "rule"
|
||||||
|
PREFERENCE = "preference"
|
||||||
|
TOOL = "tool"
|
||||||
|
SKILL = "skill"
|
||||||
|
FACT = "fact"
|
||||||
|
CONVERSATION = "conversation"
|
||||||
|
REMINDER = "reminder"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class MemoryPoint:
|
||||||
|
id: str
|
||||||
|
type: str
|
||||||
|
title: str
|
||||||
|
content: str
|
||||||
|
pinned: bool = False
|
||||||
|
category: str = ""
|
||||||
|
source: str = "manual"
|
||||||
|
tags: List[str] = field(default_factory=list)
|
||||||
|
created_at: str = ""
|
||||||
|
updated_at: str = ""
|
||||||
|
conversation_id: Optional[str] = None
|
||||||
|
score: Optional[float] = None # nur bei Search gesetzt
|
||||||
|
|
||||||
|
def to_payload(self) -> dict:
|
||||||
|
p = {
|
||||||
|
"type": self.type,
|
||||||
|
"title": self.title,
|
||||||
|
"content": self.content,
|
||||||
|
"pinned": self.pinned,
|
||||||
|
"category": self.category,
|
||||||
|
"source": self.source,
|
||||||
|
"tags": self.tags,
|
||||||
|
"created_at": self.created_at,
|
||||||
|
"updated_at": self.updated_at,
|
||||||
|
}
|
||||||
|
if self.conversation_id:
|
||||||
|
p["conversation_id"] = self.conversation_id
|
||||||
|
return p
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_qdrant(cls, point) -> "MemoryPoint":
|
||||||
|
payload = point.payload or {}
|
||||||
|
return cls(
|
||||||
|
id=str(point.id),
|
||||||
|
type=payload.get("type", "fact"),
|
||||||
|
title=payload.get("title", ""),
|
||||||
|
content=payload.get("content", ""),
|
||||||
|
pinned=payload.get("pinned", False),
|
||||||
|
category=payload.get("category", ""),
|
||||||
|
source=payload.get("source", "manual"),
|
||||||
|
tags=payload.get("tags", []),
|
||||||
|
created_at=payload.get("created_at", ""),
|
||||||
|
updated_at=payload.get("updated_at", ""),
|
||||||
|
conversation_id=payload.get("conversation_id"),
|
||||||
|
score=getattr(point, "score", None),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _now() -> str:
|
||||||
|
return datetime.now(timezone.utc).isoformat()
|
||||||
|
|
||||||
|
|
||||||
|
class VectorStore:
|
||||||
|
def __init__(self, host: str, port: int = 6333):
|
||||||
|
self.client = QdrantClient(host=host, port=port)
|
||||||
|
self._ensure_collection()
|
||||||
|
|
||||||
|
def _ensure_collection(self):
|
||||||
|
existing = [c.name for c in self.client.get_collections().collections]
|
||||||
|
if COLLECTION not in existing:
|
||||||
|
logger.info("Erstelle Collection %s ...", COLLECTION)
|
||||||
|
self.client.create_collection(
|
||||||
|
collection_name=COLLECTION,
|
||||||
|
vectors_config=qm.VectorParams(size=VECTOR_DIM, distance=qm.Distance.COSINE),
|
||||||
|
)
|
||||||
|
# Indexe fuer typische Filter-Felder
|
||||||
|
for field_name in ("type", "pinned", "category", "source", "migration_key"):
|
||||||
|
self.client.create_payload_index(
|
||||||
|
collection_name=COLLECTION,
|
||||||
|
field_name=field_name,
|
||||||
|
field_schema=qm.PayloadSchemaType.KEYWORD if field_name != "pinned"
|
||||||
|
else qm.PayloadSchemaType.BOOL,
|
||||||
|
)
|
||||||
|
|
||||||
|
# ─── Schreib-Operationen ─────────────────────────────────────────
|
||||||
|
|
||||||
|
def upsert(self, point: MemoryPoint, vector: List[float]) -> str:
|
||||||
|
if not point.id:
|
||||||
|
point.id = str(uuid.uuid4())
|
||||||
|
if not point.created_at:
|
||||||
|
point.created_at = _now()
|
||||||
|
point.updated_at = _now()
|
||||||
|
|
||||||
|
self.client.upsert(
|
||||||
|
collection_name=COLLECTION,
|
||||||
|
points=[qm.PointStruct(id=point.id, vector=vector, payload=point.to_payload())],
|
||||||
|
)
|
||||||
|
return point.id
|
||||||
|
|
||||||
|
def delete(self, point_id: str):
|
||||||
|
self.client.delete(
|
||||||
|
collection_name=COLLECTION,
|
||||||
|
points_selector=qm.PointIdsList(points=[point_id]),
|
||||||
|
)
|
||||||
|
|
||||||
|
# ─── Lese-Operationen ────────────────────────────────────────────
|
||||||
|
|
||||||
|
def get(self, point_id: str) -> Optional[MemoryPoint]:
|
||||||
|
result = self.client.retrieve(collection_name=COLLECTION, ids=[point_id], with_payload=True)
|
||||||
|
if not result:
|
||||||
|
return None
|
||||||
|
return MemoryPoint.from_qdrant(result[0])
|
||||||
|
|
||||||
|
def list_pinned(self) -> List[MemoryPoint]:
|
||||||
|
"""Alle pinned Punkte — Hot Memory."""
|
||||||
|
return self._scroll(filter=qm.Filter(must=[
|
||||||
|
qm.FieldCondition(key="pinned", match=qm.MatchValue(value=True))
|
||||||
|
]))
|
||||||
|
|
||||||
|
def list_by_type(self, type_: str, limit: int = 100) -> List[MemoryPoint]:
|
||||||
|
return self._scroll(
|
||||||
|
filter=qm.Filter(must=[
|
||||||
|
qm.FieldCondition(key="type", match=qm.MatchValue(value=type_))
|
||||||
|
]),
|
||||||
|
limit=limit,
|
||||||
|
)
|
||||||
|
|
||||||
|
def list_all(self, limit: int = 1000) -> List[MemoryPoint]:
|
||||||
|
return self._scroll(filter=None, limit=limit)
|
||||||
|
|
||||||
|
def _scroll(self, filter, limit: int = 1000) -> List[MemoryPoint]:
|
||||||
|
points, _ = self.client.scroll(
|
||||||
|
collection_name=COLLECTION,
|
||||||
|
scroll_filter=filter,
|
||||||
|
limit=limit,
|
||||||
|
with_payload=True,
|
||||||
|
with_vectors=False,
|
||||||
|
)
|
||||||
|
return [MemoryPoint.from_qdrant(p) for p in points]
|
||||||
|
|
||||||
|
def search(
|
||||||
|
self,
|
||||||
|
query_vector: List[float],
|
||||||
|
k: int = 5,
|
||||||
|
type_filter: Optional[str] = None,
|
||||||
|
exclude_pinned: bool = True,
|
||||||
|
) -> List[MemoryPoint]:
|
||||||
|
"""Semantische Search. Standard: pinned-Punkte ausgeschlossen
|
||||||
|
(die kommen separat via list_pinned in den Prompt)."""
|
||||||
|
must = []
|
||||||
|
must_not = []
|
||||||
|
if type_filter:
|
||||||
|
must.append(qm.FieldCondition(key="type", match=qm.MatchValue(value=type_filter)))
|
||||||
|
if exclude_pinned:
|
||||||
|
must_not.append(qm.FieldCondition(key="pinned", match=qm.MatchValue(value=True)))
|
||||||
|
|
||||||
|
flt = qm.Filter(must=must or None, must_not=must_not or None)
|
||||||
|
|
||||||
|
results = self.client.search(
|
||||||
|
collection_name=COLLECTION,
|
||||||
|
query_vector=query_vector,
|
||||||
|
query_filter=flt if (must or must_not) else None,
|
||||||
|
limit=k,
|
||||||
|
with_payload=True,
|
||||||
|
)
|
||||||
|
return [MemoryPoint.from_qdrant(p) for p in results]
|
||||||
|
|
||||||
|
def count(self) -> int:
|
||||||
|
return self.client.count(collection_name=COLLECTION, exact=True).count
|
||||||
@@ -0,0 +1,133 @@
|
|||||||
|
"""
|
||||||
|
Call-Metrics fuer den Proxy-Client.
|
||||||
|
|
||||||
|
Pro Claude-Call wird ein Eintrag in /data/metrics.jsonl angehaengt:
|
||||||
|
|
||||||
|
{"ts": <ms>, "model": "...", "in": <tokens_in_estimate>, "out": <tokens_out_estimate>}
|
||||||
|
|
||||||
|
Tokens-Schaetzung: characters / 4 (Anthropic-Default-Heuristik). Nicht exakt
|
||||||
|
aber gut genug fuer Quota-Monitoring. Wir summieren nicht in-memory weil
|
||||||
|
der Brain-Container neugestartet werden kann — alles auf Disk.
|
||||||
|
|
||||||
|
Auswertung via aggregate(window_seconds) — liefert {calls, tokens_in, tokens_out}
|
||||||
|
fuer die letzten N Sekunden. Lazy gelesen, keine grossen Datenmengen erwartet
|
||||||
|
(bei 1000 Calls/Tag ~70 KB pro Monat).
|
||||||
|
|
||||||
|
Auto-Rotate: bei > 50k Zeilen werden die aeltesten 25k weggeschnitten.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
METRICS_FILE = Path(os.environ.get("METRICS_FILE", "/data/metrics.jsonl"))
|
||||||
|
ROTATE_AT = 50_000
|
||||||
|
ROTATE_KEEP = 25_000
|
||||||
|
|
||||||
|
|
||||||
|
def _estimate_tokens(text: str) -> int:
|
||||||
|
"""Anthropic-Default: ~4 chars pro Token. Grob genug."""
|
||||||
|
if not text:
|
||||||
|
return 0
|
||||||
|
return max(1, len(text) // 4)
|
||||||
|
|
||||||
|
|
||||||
|
def _messages_tokens(messages: list) -> int:
|
||||||
|
total = 0
|
||||||
|
for m in messages:
|
||||||
|
# Pydantic-Model oder dict
|
||||||
|
if hasattr(m, "content"):
|
||||||
|
total += _estimate_tokens(m.content or "")
|
||||||
|
elif isinstance(m, dict):
|
||||||
|
c = m.get("content") or ""
|
||||||
|
if isinstance(c, str):
|
||||||
|
total += _estimate_tokens(c)
|
||||||
|
return total
|
||||||
|
|
||||||
|
|
||||||
|
def log_call(model: str, messages_in: list, reply_text: str = "") -> None:
|
||||||
|
"""Eine Call-Metric anhaengen. Robust gegen Fehler (silent fail)."""
|
||||||
|
try:
|
||||||
|
tokens_in = _messages_tokens(messages_in)
|
||||||
|
tokens_out = _estimate_tokens(reply_text)
|
||||||
|
line = json.dumps({
|
||||||
|
"ts": int(time.time() * 1000),
|
||||||
|
"model": model,
|
||||||
|
"in": tokens_in,
|
||||||
|
"out": tokens_out,
|
||||||
|
})
|
||||||
|
METRICS_FILE.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
with METRICS_FILE.open("a", encoding="utf-8") as f:
|
||||||
|
f.write(line + "\n")
|
||||||
|
# Sanftes Rotate ohne hohe IO-Kosten — nur alle 1000 Calls checken
|
||||||
|
if (tokens_in + tokens_out) % 1000 < 4:
|
||||||
|
_maybe_rotate()
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning("metrics.log_call: %s", exc)
|
||||||
|
|
||||||
|
|
||||||
|
def _maybe_rotate() -> None:
|
||||||
|
try:
|
||||||
|
if not METRICS_FILE.exists():
|
||||||
|
return
|
||||||
|
with METRICS_FILE.open("r", encoding="utf-8") as f:
|
||||||
|
lines = f.readlines()
|
||||||
|
if len(lines) > ROTATE_AT:
|
||||||
|
keep = lines[-ROTATE_KEEP:]
|
||||||
|
METRICS_FILE.write_text("".join(keep), encoding="utf-8")
|
||||||
|
logger.info("metrics rotated: %d → %d Zeilen", len(lines), len(keep))
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning("metrics rotate: %s", exc)
|
||||||
|
|
||||||
|
|
||||||
|
def aggregate(window_seconds: int) -> dict:
|
||||||
|
"""Aggregiert die Calls der letzten N Sekunden."""
|
||||||
|
now_ms = int(time.time() * 1000)
|
||||||
|
cutoff_ms = now_ms - (window_seconds * 1000)
|
||||||
|
calls = 0
|
||||||
|
tokens_in = 0
|
||||||
|
tokens_out = 0
|
||||||
|
by_model: dict[str, int] = {}
|
||||||
|
if METRICS_FILE.exists():
|
||||||
|
try:
|
||||||
|
for raw in METRICS_FILE.read_text(encoding="utf-8").splitlines():
|
||||||
|
raw = raw.strip()
|
||||||
|
if not raw:
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
obj = json.loads(raw)
|
||||||
|
except Exception:
|
||||||
|
continue
|
||||||
|
if obj.get("ts", 0) < cutoff_ms:
|
||||||
|
continue
|
||||||
|
calls += 1
|
||||||
|
tokens_in += int(obj.get("in") or 0)
|
||||||
|
tokens_out += int(obj.get("out") or 0)
|
||||||
|
m = obj.get("model", "?")
|
||||||
|
by_model[m] = by_model.get(m, 0) + 1
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning("metrics aggregate: %s", exc)
|
||||||
|
return {
|
||||||
|
"window_seconds": window_seconds,
|
||||||
|
"calls": calls,
|
||||||
|
"tokens_in": tokens_in,
|
||||||
|
"tokens_out": tokens_out,
|
||||||
|
"by_model": by_model,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def stats() -> dict:
|
||||||
|
"""Komplett-Snapshot mit den drei wichtigsten Fenstern."""
|
||||||
|
return {
|
||||||
|
"h1": aggregate(3600),
|
||||||
|
"h5": aggregate(5 * 3600),
|
||||||
|
"h24": aggregate(24 * 3600),
|
||||||
|
"d30": aggregate(30 * 24 * 3600),
|
||||||
|
}
|
||||||
@@ -0,0 +1,399 @@
|
|||||||
|
"""
|
||||||
|
Migration aus aria-data/brain-import/ → Vector-DB.
|
||||||
|
|
||||||
|
Parst die mitgelieferten Markdown-Dateien (AGENT.md, USER.md, TOOLING.md)
|
||||||
|
und zerlegt sie in atomare Memory-Punkte. Jeder Punkt bekommt:
|
||||||
|
|
||||||
|
source = "import"
|
||||||
|
migration_key = stabiler Identifier (z.B. "agent.md/rule-1") fuer Idempotenz
|
||||||
|
pinned = True
|
||||||
|
|
||||||
|
Beim Re-Run werden vorhandene Punkte mit gleicher migration_key entfernt
|
||||||
|
und neu geschrieben.
|
||||||
|
|
||||||
|
Mapping pro Datei:
|
||||||
|
|
||||||
|
AGENT.md
|
||||||
|
"Identitaet" → 1 Punkt type=identity
|
||||||
|
"Persoenlichkeit" (Intro) → 1 Punkt type=identity
|
||||||
|
"Kern-Eigenschaften" (Liste) → 1 Punkt pro Bullet type=identity
|
||||||
|
"Tool-Freigaben" → 1 Punkt type=tool
|
||||||
|
"Sicherheitsregeln" (Liste) → 1 Punkt pro Bullet type=rule
|
||||||
|
"Arbeitsprinzipien" (Liste) → 1 Punkt pro Bullet type=rule
|
||||||
|
"Dateien an Stefan zurueckgeben"→ 1 Punkt type=skill
|
||||||
|
"Stimme" → 1 Punkt type=tool
|
||||||
|
|
||||||
|
USER.md
|
||||||
|
"Allgemein" (Liste) → 1 Punkt pro Bullet type=preference
|
||||||
|
"Bestaetigung erforderlich" → 1 Punkt type=preference
|
||||||
|
"Autonomes Arbeiten OK fuer" → 1 Punkt type=preference
|
||||||
|
"Tools & Infrastruktur" → 1 Punkt type=preference
|
||||||
|
|
||||||
|
TOOLING.md
|
||||||
|
gesamter Inhalt → 1 Punkt type=tool, title="Tooling-Stack"
|
||||||
|
|
||||||
|
BOOTSTRAP.md ist eine Variante von AGENT.md — wird (vorerst) ignoriert
|
||||||
|
damit keine doppelten Punkte landen.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List, Optional
|
||||||
|
|
||||||
|
from memory import Embedder, VectorStore, MemoryPoint
|
||||||
|
from memory.vector_store import COLLECTION
|
||||||
|
from qdrant_client.http import models as qm
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class _Block:
|
||||||
|
title: str
|
||||||
|
content: str
|
||||||
|
|
||||||
|
|
||||||
|
def _split_h2(md: str) -> List[_Block]:
|
||||||
|
"""Zerlegt Markdown in H2-Bloecke. Inhalt vor dem ersten H2 wird verworfen."""
|
||||||
|
blocks: List[_Block] = []
|
||||||
|
current: Optional[_Block] = None
|
||||||
|
for line in md.splitlines():
|
||||||
|
m = re.match(r"^##\s+(.+?)\s*$", line)
|
||||||
|
if m and not line.startswith("### "):
|
||||||
|
if current:
|
||||||
|
blocks.append(current)
|
||||||
|
current = _Block(title=m.group(1).strip(), content="")
|
||||||
|
continue
|
||||||
|
if current is not None:
|
||||||
|
current.content += line + "\n"
|
||||||
|
if current:
|
||||||
|
blocks.append(current)
|
||||||
|
return blocks
|
||||||
|
|
||||||
|
|
||||||
|
def _split_h3(content: str) -> List[_Block]:
|
||||||
|
"""Zerlegt einen H2-Block in H3-Untersektionen + 'header'-Block davor."""
|
||||||
|
blocks: List[_Block] = []
|
||||||
|
header_lines: List[str] = []
|
||||||
|
current: Optional[_Block] = None
|
||||||
|
for line in content.splitlines():
|
||||||
|
m = re.match(r"^###\s+(.+?)\s*$", line)
|
||||||
|
if m:
|
||||||
|
if current is None and header_lines:
|
||||||
|
blocks.append(_Block(title="_intro", content="\n".join(header_lines).strip()))
|
||||||
|
if current:
|
||||||
|
blocks.append(current)
|
||||||
|
current = _Block(title=m.group(1).strip(), content="")
|
||||||
|
continue
|
||||||
|
if current is None:
|
||||||
|
header_lines.append(line)
|
||||||
|
else:
|
||||||
|
current.content += line + "\n"
|
||||||
|
if current:
|
||||||
|
blocks.append(current)
|
||||||
|
elif header_lines:
|
||||||
|
blocks.append(_Block(title="_intro", content="\n".join(header_lines).strip()))
|
||||||
|
return blocks
|
||||||
|
|
||||||
|
|
||||||
|
def _extract_bullets(content: str) -> List[tuple[str, str]]:
|
||||||
|
"""Findet "- **Title** — Body" oder "N. **Title** — Body" Bullets.
|
||||||
|
|
||||||
|
Returns: Liste von (title, full_bullet_text).
|
||||||
|
"""
|
||||||
|
bullets: List[tuple[str, str]] = []
|
||||||
|
current_lines: List[str] = []
|
||||||
|
current_title: Optional[str] = None
|
||||||
|
|
||||||
|
def flush():
|
||||||
|
if current_title and current_lines:
|
||||||
|
bullets.append((current_title, "\n".join(current_lines).strip()))
|
||||||
|
|
||||||
|
for line in content.splitlines():
|
||||||
|
m = re.match(r"^\s*(?:[-*]|\d+\.)\s+\*\*([^*]+?)\*\*\s*[—\-:]?\s*(.*)$", line)
|
||||||
|
if m:
|
||||||
|
flush()
|
||||||
|
current_title = m.group(1).strip()
|
||||||
|
current_lines = [line]
|
||||||
|
continue
|
||||||
|
# Folge-Zeilen mit Einrueckung gehoeren zum aktuellen Bullet
|
||||||
|
if current_title and (line.startswith(" ") or line.startswith("\t") or not line.strip()):
|
||||||
|
current_lines.append(line)
|
||||||
|
continue
|
||||||
|
if current_title and not re.match(r"^\s*(?:[-*]|\d+\.)\s+", line):
|
||||||
|
current_lines.append(line)
|
||||||
|
continue
|
||||||
|
# Neuer Bullet ohne **Title** Format
|
||||||
|
if re.match(r"^\s*(?:[-*]|\d+\.)\s+", line):
|
||||||
|
flush()
|
||||||
|
text = re.sub(r"^\s*(?:[-*]|\d+\.)\s+", "", line).strip()
|
||||||
|
short_title = (text[:60] + "…") if len(text) > 60 else text
|
||||||
|
bullets.append((short_title, line.strip()))
|
||||||
|
current_title = None
|
||||||
|
current_lines = []
|
||||||
|
flush()
|
||||||
|
return bullets
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Pro Datei eine Parser-Funktion ──────────────────────────────────
|
||||||
|
|
||||||
|
def _parse_agent_md(md: str, source_file: str) -> List[MemoryPoint]:
|
||||||
|
points: List[MemoryPoint] = []
|
||||||
|
h2_blocks = _split_h2(md)
|
||||||
|
for h2 in h2_blocks:
|
||||||
|
title = h2.title
|
||||||
|
content = h2.content.strip()
|
||||||
|
if not content:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if title.lower() == "identitaet" or title.lower() == "identität":
|
||||||
|
points.append(_mk(
|
||||||
|
type_="identity", title="ARIA — Identitaet",
|
||||||
|
content=f"## {title}\n\n{content}",
|
||||||
|
category="persoenlichkeit",
|
||||||
|
migration_key=f"{source_file}/identity",
|
||||||
|
))
|
||||||
|
|
||||||
|
elif title.lower() == "persoenlichkeit" or title.lower() == "persönlichkeit":
|
||||||
|
# Intro-Absatz + Kern-Eigenschaften-Liste trennen
|
||||||
|
sub = _split_h3(content)
|
||||||
|
for s in sub:
|
||||||
|
if s.title == "_intro" and s.content.strip():
|
||||||
|
points.append(_mk(
|
||||||
|
type_="identity", title="Persoenlichkeit — Grundsatz",
|
||||||
|
content=s.content.strip(),
|
||||||
|
category="persoenlichkeit",
|
||||||
|
migration_key=f"{source_file}/personality-intro",
|
||||||
|
))
|
||||||
|
elif s.title.lower().startswith("kern"):
|
||||||
|
for idx, (btitle, btext) in enumerate(_extract_bullets(s.content), 1):
|
||||||
|
points.append(_mk(
|
||||||
|
type_="identity", title=f"Eigenschaft: {btitle}",
|
||||||
|
content=btext, category="persoenlichkeit",
|
||||||
|
migration_key=f"{source_file}/personality-trait-{idx}",
|
||||||
|
))
|
||||||
|
|
||||||
|
elif "sicherheitsregel" in title.lower():
|
||||||
|
for idx, (btitle, btext) in enumerate(_extract_bullets(content), 1):
|
||||||
|
points.append(_mk(
|
||||||
|
type_="rule", title=f"Sicherheit: {btitle}",
|
||||||
|
content=btext, category="sicherheit",
|
||||||
|
migration_key=f"{source_file}/security-{idx}",
|
||||||
|
))
|
||||||
|
|
||||||
|
elif "arbeitsprinzipien" in title.lower() or "arbeitsprinzip" in title.lower():
|
||||||
|
for idx, (btitle, btext) in enumerate(_extract_bullets(content), 1):
|
||||||
|
points.append(_mk(
|
||||||
|
type_="rule", title=f"Prinzip: {btitle}",
|
||||||
|
content=btext, category="arbeitsweise",
|
||||||
|
migration_key=f"{source_file}/work-principle-{idx}",
|
||||||
|
))
|
||||||
|
|
||||||
|
elif "tool-freigaben" in title.lower() or "tool freigaben" in title.lower():
|
||||||
|
points.append(_mk(
|
||||||
|
type_="tool", title="Tool-Freigaben — Vollzugriff",
|
||||||
|
content=content, category="infrastruktur",
|
||||||
|
migration_key=f"{source_file}/tool-access",
|
||||||
|
))
|
||||||
|
|
||||||
|
elif "dateien an stefan" in title.lower() or "dateien zurueckgeben" in title.lower() or "dateien zur" in title.lower():
|
||||||
|
points.append(_mk(
|
||||||
|
type_="skill", title="Dateien an User zurueckgeben",
|
||||||
|
content=content, category="ausgabe",
|
||||||
|
migration_key=f"{source_file}/file-return-skill",
|
||||||
|
))
|
||||||
|
|
||||||
|
elif title.lower() == "stimme":
|
||||||
|
points.append(_mk(
|
||||||
|
type_="tool", title="Stimme (F5-TTS)",
|
||||||
|
content=content, category="infrastruktur",
|
||||||
|
migration_key=f"{source_file}/voice",
|
||||||
|
))
|
||||||
|
|
||||||
|
# Permanente Freigaben (in BOOTSTRAP) — als rule
|
||||||
|
elif "freigaben" in title.lower():
|
||||||
|
points.append(_mk(
|
||||||
|
type_="rule", title=title,
|
||||||
|
content=content, category="freigaben",
|
||||||
|
migration_key=f"{source_file}/permissions",
|
||||||
|
))
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Unbekannter Block: als generischer fact ablegen, NICHT pinned
|
||||||
|
logger.info("Unbekannter H2-Block '%s' in %s — als fact (unpinned)", title, source_file)
|
||||||
|
points.append(_mk(
|
||||||
|
type_="fact", title=f"{source_file}: {title}",
|
||||||
|
content=content, pinned=False,
|
||||||
|
migration_key=f"{source_file}/section-{title.lower().replace(' ', '-')}",
|
||||||
|
))
|
||||||
|
return points
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_user_md(md: str, source_file: str) -> List[MemoryPoint]:
|
||||||
|
points: List[MemoryPoint] = []
|
||||||
|
for h2 in _split_h2(md):
|
||||||
|
title = h2.title
|
||||||
|
content = h2.content.strip()
|
||||||
|
if not content:
|
||||||
|
continue
|
||||||
|
# Template-Platzhalter herausfiltern: Beispiel-Zeilen mit <Tag>
|
||||||
|
if "<Beispiel-Tool>" in content or "<Username>" in title:
|
||||||
|
continue
|
||||||
|
if title.lower() == "allgemein":
|
||||||
|
for idx, (btitle, btext) in enumerate(_extract_bullets(content), 1):
|
||||||
|
# Template-Platzhalter ueberspringen
|
||||||
|
if "<z.B." in btext or "<XYZ>" in btext:
|
||||||
|
continue
|
||||||
|
points.append(_mk(
|
||||||
|
type_="preference", title=f"User: {btitle}",
|
||||||
|
content=btext, category="allgemein",
|
||||||
|
migration_key=f"{source_file}/general-{idx}",
|
||||||
|
))
|
||||||
|
else:
|
||||||
|
cat_key = re.sub(r"[^a-z0-9]+", "-", title.lower()).strip("-") or "allgemein"
|
||||||
|
points.append(_mk(
|
||||||
|
type_="preference", title=title,
|
||||||
|
content=content, category=cat_key,
|
||||||
|
migration_key=f"{source_file}/{cat_key}",
|
||||||
|
))
|
||||||
|
return points
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_tooling_md(md: str, source_file: str) -> List[MemoryPoint]:
|
||||||
|
md = md.strip()
|
||||||
|
if not md:
|
||||||
|
return []
|
||||||
|
return [_mk(
|
||||||
|
type_="tool", title="Tooling-Stack (VM)",
|
||||||
|
content=md, category="infrastruktur",
|
||||||
|
migration_key=f"{source_file}/tooling-full",
|
||||||
|
)]
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Helper ─────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
def _mk(
|
||||||
|
type_: str,
|
||||||
|
title: str,
|
||||||
|
content: str,
|
||||||
|
migration_key: str,
|
||||||
|
pinned: bool = True,
|
||||||
|
category: str = "",
|
||||||
|
) -> MemoryPoint:
|
||||||
|
p = MemoryPoint(
|
||||||
|
id="",
|
||||||
|
type=type_,
|
||||||
|
title=title,
|
||||||
|
content=content.strip(),
|
||||||
|
pinned=pinned,
|
||||||
|
category=category,
|
||||||
|
source="import",
|
||||||
|
tags=[],
|
||||||
|
)
|
||||||
|
# migration_key wird ueber Payload-Index angesprochen — in to_payload manuell anhaengen
|
||||||
|
setattr(p, "_migration_key", migration_key)
|
||||||
|
return p
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Eintrittspunkt ─────────────────────────────────────────────────
|
||||||
|
|
||||||
|
def run_migration(
|
||||||
|
import_dir: Path,
|
||||||
|
store: VectorStore,
|
||||||
|
embedder: Embedder,
|
||||||
|
) -> dict:
|
||||||
|
"""Liest alle .md-Dateien aus import_dir, parst sie, schreibt in DB.
|
||||||
|
|
||||||
|
Idempotent: vorhandene Punkte mit gleicher migration_key werden geloescht
|
||||||
|
und neu geschrieben.
|
||||||
|
|
||||||
|
Returns: {"created": int, "updated": int, "skipped": int, "files": [...]}
|
||||||
|
"""
|
||||||
|
if not import_dir.exists():
|
||||||
|
return {"created": 0, "updated": 0, "skipped": 0, "files": [], "error": f"{import_dir} nicht gefunden"}
|
||||||
|
|
||||||
|
parsers = {
|
||||||
|
"AGENT.md": _parse_agent_md,
|
||||||
|
"BOOTSTRAP.md": _parse_agent_md, # gleicher Parser, ggf. ueberlappende Eintraege
|
||||||
|
"USER.md": _parse_user_md,
|
||||||
|
"USER.md.example": _parse_user_md,
|
||||||
|
"TOOLING.md": _parse_tooling_md,
|
||||||
|
"TOOLING.md.example": _parse_tooling_md,
|
||||||
|
}
|
||||||
|
|
||||||
|
# USER.md hat Vorrang vor USER.md.example
|
||||||
|
file_priority = ["AGENT.md", "BOOTSTRAP.md", "USER.md", "USER.md.example",
|
||||||
|
"TOOLING.md", "TOOLING.md.example"]
|
||||||
|
seen_kinds: set[str] = set() # "USER" / "TOOLING" — nur einmal
|
||||||
|
|
||||||
|
points: List[MemoryPoint] = []
|
||||||
|
processed_files: List[str] = []
|
||||||
|
|
||||||
|
for fname in file_priority:
|
||||||
|
fp = import_dir / fname
|
||||||
|
if not fp.exists():
|
||||||
|
continue
|
||||||
|
kind = fname.split(".")[0] # "AGENT", "BOOTSTRAP", "USER", "TOOLING"
|
||||||
|
# USER.md.example nur wenn USER.md fehlt
|
||||||
|
if kind in ("USER", "TOOLING") and kind in seen_kinds:
|
||||||
|
continue
|
||||||
|
seen_kinds.add(kind)
|
||||||
|
parser = parsers.get(fname)
|
||||||
|
if not parser:
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
md = fp.read_text(encoding="utf-8")
|
||||||
|
file_points = parser(md, fname)
|
||||||
|
points.extend(file_points)
|
||||||
|
processed_files.append(f"{fname} ({len(file_points)})")
|
||||||
|
logger.info("Migration: %s → %d Punkte", fname, len(file_points))
|
||||||
|
except Exception as exc:
|
||||||
|
logger.exception("Migration: %s fehlgeschlagen", fname)
|
||||||
|
processed_files.append(f"{fname} (FEHLER: {exc})")
|
||||||
|
|
||||||
|
if not points:
|
||||||
|
return {"created": 0, "updated": 0, "skipped": 0, "files": processed_files}
|
||||||
|
|
||||||
|
# Erst alte Migration-Punkte mit gleicher migration_key loeschen
|
||||||
|
migration_keys = [getattr(p, "_migration_key", None) for p in points]
|
||||||
|
migration_keys = [k for k in migration_keys if k]
|
||||||
|
if migration_keys:
|
||||||
|
store.client.delete(
|
||||||
|
collection_name=COLLECTION,
|
||||||
|
points_selector=qm.FilterSelector(filter=qm.Filter(must=[
|
||||||
|
qm.FieldCondition(key="migration_key", match=qm.MatchAny(any=migration_keys))
|
||||||
|
])),
|
||||||
|
)
|
||||||
|
logger.info("Migration: %d alte Punkte mit gleicher migration_key entfernt", len(migration_keys))
|
||||||
|
|
||||||
|
# Embed in Batches
|
||||||
|
texts = [p.content for p in points]
|
||||||
|
vectors = embedder.embed_batch(texts)
|
||||||
|
|
||||||
|
created = 0
|
||||||
|
for p, vec in zip(points, vectors):
|
||||||
|
payload = p.to_payload()
|
||||||
|
mkey = getattr(p, "_migration_key", None)
|
||||||
|
if mkey:
|
||||||
|
payload["migration_key"] = mkey
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
import uuid as _uuid
|
||||||
|
pid = str(_uuid.uuid4())
|
||||||
|
now = datetime.now(timezone.utc).isoformat()
|
||||||
|
payload["created_at"] = now
|
||||||
|
payload["updated_at"] = now
|
||||||
|
store.client.upsert(
|
||||||
|
collection_name=COLLECTION,
|
||||||
|
points=[qm.PointStruct(id=pid, vector=vec, payload=payload)],
|
||||||
|
)
|
||||||
|
created += 1
|
||||||
|
|
||||||
|
return {
|
||||||
|
"created": created,
|
||||||
|
"files": processed_files,
|
||||||
|
"import_dir": str(import_dir),
|
||||||
|
}
|
||||||
@@ -0,0 +1,190 @@
|
|||||||
|
"""
|
||||||
|
System-Prompt-Bau aus Memory-Punkten.
|
||||||
|
|
||||||
|
Strategie:
|
||||||
|
1. Alle pinned Punkte (Hot Memory) — gruppiert nach Type — in den
|
||||||
|
System-Prompt schreiben. IMMER drin.
|
||||||
|
2. Top-K semantisch aehnliche Punkte (Cold Memory) zur aktuellen
|
||||||
|
User-Nachricht — als "Moeglicherweise relevant" eingehaengt.
|
||||||
|
3. Aktive Skills als kompakte Liste (nur Name + Description) — damit
|
||||||
|
ARIA weiss was sie hat.
|
||||||
|
|
||||||
|
Phase B Punkt 1: nur Hot-Memory-Bau, Skills + Cold-Search kommen
|
||||||
|
mit dem Conversation-Loop in spaeteren Phasen.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
from memory import MemoryPoint
|
||||||
|
|
||||||
|
TYPE_HEADINGS = {
|
||||||
|
"identity": "## Wer du bist",
|
||||||
|
"rule": "## Sicherheitsregeln & Prinzipien",
|
||||||
|
"preference": "## Benutzer-Praeferenzen",
|
||||||
|
"tool": "## Tool-Freigaben",
|
||||||
|
"skill": "## Deine Skills",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def build_hot_memory_section(pinned: List[MemoryPoint]) -> str:
|
||||||
|
"""Baue den 'IMMER-im-Prompt'-Block aus pinned Punkten."""
|
||||||
|
grouped: dict[str, List[MemoryPoint]] = {}
|
||||||
|
for p in pinned:
|
||||||
|
grouped.setdefault(p.type, []).append(p)
|
||||||
|
|
||||||
|
parts: List[str] = []
|
||||||
|
# Sortier-Reihenfolge: identity → rule → preference → tool → skill → Rest
|
||||||
|
order = ["identity", "rule", "preference", "tool", "skill"]
|
||||||
|
for t in order:
|
||||||
|
items = grouped.pop(t, [])
|
||||||
|
if not items:
|
||||||
|
continue
|
||||||
|
parts.append(TYPE_HEADINGS.get(t, f"## {t}"))
|
||||||
|
for p in items:
|
||||||
|
parts.append(f"### {p.title}")
|
||||||
|
parts.append(p.content.strip())
|
||||||
|
parts.append("")
|
||||||
|
|
||||||
|
# uebrige Types (falls jemand was anderes als pinned markiert)
|
||||||
|
for t, items in grouped.items():
|
||||||
|
parts.append(f"## {t}")
|
||||||
|
for p in items:
|
||||||
|
parts.append(f"### {p.title}")
|
||||||
|
parts.append(p.content.strip())
|
||||||
|
parts.append("")
|
||||||
|
|
||||||
|
return "\n".join(parts).strip()
|
||||||
|
|
||||||
|
|
||||||
|
def build_cold_memory_section(matches: List[MemoryPoint]) -> str:
|
||||||
|
"""Baue 'Moeglicherweise relevant'-Block aus Search-Treffern."""
|
||||||
|
if not matches:
|
||||||
|
return ""
|
||||||
|
lines = ["## Moeglicherweise relevant (aus Gedaechtnis)"]
|
||||||
|
for p in matches:
|
||||||
|
score = f" [score={p.score:.2f}]" if p.score is not None else ""
|
||||||
|
lines.append(f"- **{p.title}**{score}")
|
||||||
|
lines.append(f" {p.content.strip()}")
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
|
||||||
|
def build_skills_section(skills: List[dict]) -> str:
|
||||||
|
"""Listet alle Skills (aktiv + deaktiviert) damit ARIA weiss was es gibt
|
||||||
|
und keine doppelt baut. Plus klare Schwelle wann ein Skill sich lohnt."""
|
||||||
|
lines = ["## Deine Skills"]
|
||||||
|
if skills:
|
||||||
|
for s in skills:
|
||||||
|
active = s.get("active", True)
|
||||||
|
marker = "" if active else " [DEAKTIVIERT — kann nicht aufgerufen werden]"
|
||||||
|
lines.append(f"- **{s.get('name', '?')}**{marker} — {s.get('description', '(ohne Beschreibung)')}")
|
||||||
|
lines.append("")
|
||||||
|
lines.append("Wenn ein vorhandener Skill zur Aufgabe passt: nutze ihn via Tool-Call.")
|
||||||
|
else:
|
||||||
|
lines.append("(noch keine Skills vorhanden)")
|
||||||
|
|
||||||
|
lines.append("")
|
||||||
|
lines.append("### Wann lohnt sich ein neuer Skill?")
|
||||||
|
lines.append("")
|
||||||
|
lines.append("**Skills sind IMMER Python** — eigene venv pro Skill mit den noetigen "
|
||||||
|
"pip-Paketen. Kein apt im Skill, kein systemweiter Install. Python deckt "
|
||||||
|
"in der Regel alles ab (yt-dlp, requests, pypdf, pillow, openpyxl, "
|
||||||
|
"static-ffmpeg, beautifulsoup4, …). Falls etwas WIRKLICH nur via apt geht: "
|
||||||
|
"Stefan fragen ob es ins Brain-Dockerfile soll.")
|
||||||
|
lines.append("")
|
||||||
|
lines.append("**Harte Regel — IMMER Skill anlegen wenn:** die Loesung erfordert eine "
|
||||||
|
"pip-Library. Begruendung: Brain-Container hat keinen persistenten State "
|
||||||
|
"ausser /data/skills/. Ohne Skill wuerde der Install bei jedem "
|
||||||
|
"Container-Restart wiederholt.")
|
||||||
|
lines.append("")
|
||||||
|
lines.append("**Sonst — Skill nur wenn alle vier zutreffen:**")
|
||||||
|
lines.append("")
|
||||||
|
lines.append("1. **Wiederkehrend** — die Aufgabe wird realistisch nochmal gestellt. "
|
||||||
|
"Einmal-Faelle (\"wie spaet ist es jetzt\") kein Skill.")
|
||||||
|
lines.append("2. **Nicht-trivial** — mehrere Schritte. Ein einzelner Shell-Befehl "
|
||||||
|
"(`date`, `hostname`, `ls`) ist KEIN Skill — das macht Bash direkt.")
|
||||||
|
lines.append("3. **Parametrisierbar** — der Skill nimmt Eingaben (URL, Datei, Suchbegriff) "
|
||||||
|
"und gibt ein nuetzliches Ergebnis zurueck.")
|
||||||
|
lines.append("4. **Wiederverwendbar als ganzes** — Stefan wuerde es zukuenftig per Name "
|
||||||
|
"ansprechen (\"mach mir den YouTube zu MP3\") statt jedes Mal zu erklaeren.")
|
||||||
|
lines.append("")
|
||||||
|
lines.append("Wenn nichts installiert werden muss UND nicht alle vier zutreffen: einfach "
|
||||||
|
"die Aufgabe loesen ohne Skill anzulegen. Stefan kann jederzeit sagen "
|
||||||
|
"'bau daraus einen Skill'.")
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
|
||||||
|
def build_triggers_section(
|
||||||
|
triggers: List[dict],
|
||||||
|
condition_vars: List[dict],
|
||||||
|
condition_funcs: List[dict] | None = None,
|
||||||
|
) -> str:
|
||||||
|
"""Triggers (passive Aufweck-Quellen) + verfuegbare Condition-Variablen + Funktionen."""
|
||||||
|
lines = ["## Trigger (passive Aufweck-Quellen)"]
|
||||||
|
lines.append("")
|
||||||
|
lines.append("Trigger sind ANDERS als Skills: das System ruft DICH wenn ein Event passiert. "
|
||||||
|
"Du legst sie an wenn Stefan sagt 'erinner mich an X' oder 'sag bescheid wenn Y'.")
|
||||||
|
lines.append("")
|
||||||
|
if triggers:
|
||||||
|
lines.append("### Aktuelle Trigger")
|
||||||
|
for t in triggers:
|
||||||
|
active = t.get("active", True)
|
||||||
|
mark = "" if active else " [INAKTIV]"
|
||||||
|
if t["type"] == "timer":
|
||||||
|
lines.append(f"- **{t['name']}**{mark} (timer) feuert {t.get('fires_at')}: \"{t.get('message','')[:80]}\"")
|
||||||
|
elif t["type"] == "watcher":
|
||||||
|
lines.append(f"- **{t['name']}**{mark} (watcher) cond=`{t.get('condition')}`: \"{t.get('message','')[:80]}\"")
|
||||||
|
lines.append("")
|
||||||
|
lines.append("### Verfuegbare Condition-Variablen (fuer Watcher)")
|
||||||
|
for v in condition_vars:
|
||||||
|
lines.append(f"- `{v['name']}` ({v['type']}) — {v['desc']}")
|
||||||
|
if condition_funcs:
|
||||||
|
lines.append("")
|
||||||
|
lines.append("### Verfuegbare Funktionen in Conditions")
|
||||||
|
for fn in condition_funcs:
|
||||||
|
lines.append(f"- `{fn['signature']}` — {fn['desc']}")
|
||||||
|
lines.append("")
|
||||||
|
lines.append("Operatoren in Conditions: `<` `>` `<=` `>=` `==` `!=` `and` `or` `not`. "
|
||||||
|
"Beispiele: `disk_free_gb < 5 and hour_of_day >= 8`, "
|
||||||
|
"`day_of_week == \"mon\"`, `near(53.123, 7.456, 500)`. "
|
||||||
|
"Funktionen nur mit Konstanten als Argumenten (keine Variablen, "
|
||||||
|
"keine geschachtelten Funktionen).")
|
||||||
|
lines.append("")
|
||||||
|
lines.append("### Wann welcher Typ?")
|
||||||
|
lines.append("- **Timer** fuer einmalige Erinnerungen mit konkreter Zeit ('in 10min', 'um 14:30').")
|
||||||
|
lines.append("- **Watcher** fuer 'wenn X passiert' (Disk voll, bestimmte Tageszeit, GPS-Naehe).")
|
||||||
|
lines.append("- ARIA legt Trigger NUR auf Stefan-Wunsch an, nicht eigenmaechtig.")
|
||||||
|
lines.append("")
|
||||||
|
lines.append("### GPS-Watcher mit near()")
|
||||||
|
lines.append(
|
||||||
|
"Wenn du einen Watcher mit `near()` anlegst: die App sendet GPS-Position "
|
||||||
|
"nur kontinuierlich wenn Tracking AN ist (Default: AUS, Akku-Schutz). "
|
||||||
|
"Rufe dafuer `request_location_tracking(on=true, reason=\"...\")` auf "
|
||||||
|
"bevor oder gleich nach dem trigger_watcher. Sonst hat current_lat/lon "
|
||||||
|
"veraltete Werte und der Watcher feuert nie. "
|
||||||
|
"Beim Loeschen des letzten GPS-Watchers (trigger_cancel) wieder "
|
||||||
|
"`request_location_tracking(on=false)` aufrufen.")
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
|
||||||
|
def build_system_prompt(
|
||||||
|
pinned: List[MemoryPoint],
|
||||||
|
cold: List[MemoryPoint] | None = None,
|
||||||
|
skills: List[dict] | None = None,
|
||||||
|
triggers: List[dict] | None = None,
|
||||||
|
condition_vars: List[dict] | None = None,
|
||||||
|
condition_funcs: List[dict] | None = None,
|
||||||
|
) -> str:
|
||||||
|
"""Kompletter System-Prompt: Hot + Cold + Skills + Triggers."""
|
||||||
|
parts = [build_hot_memory_section(pinned)]
|
||||||
|
if skills:
|
||||||
|
parts.append("")
|
||||||
|
parts.append(build_skills_section(skills))
|
||||||
|
if condition_vars:
|
||||||
|
parts.append("")
|
||||||
|
parts.append(build_triggers_section(triggers or [], condition_vars, condition_funcs))
|
||||||
|
if cold:
|
||||||
|
parts.append("")
|
||||||
|
parts.append(build_cold_memory_section(cold))
|
||||||
|
return "\n".join(parts).strip()
|
||||||
@@ -0,0 +1,149 @@
|
|||||||
|
"""
|
||||||
|
Claude-Aufruf ueber den lokalen Proxy.
|
||||||
|
|
||||||
|
Der Proxy (claude-max-api-proxy) bietet eine OpenAI-kompatible API
|
||||||
|
unter http://proxy:3456/v1/chat/completions. Wir nutzen non-streaming
|
||||||
|
mit einem laengeren Timeout — Claude Code spawnt pro Anfrage einen
|
||||||
|
neuen CLI-Prozess (Cold-Start), das dauert.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List, Optional
|
||||||
|
|
||||||
|
import httpx
|
||||||
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
import metrics
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
RUNTIME_CONFIG_FILE = Path("/shared/config/runtime.json")
|
||||||
|
ENV_MODEL = os.environ.get("BRAIN_MODEL", "claude-sonnet-4")
|
||||||
|
PROXY_URL = os.environ.get("PROXY_URL", "http://proxy:3456")
|
||||||
|
PROXY_TIMEOUT_SEC = float(os.environ.get("PROXY_TIMEOUT_SEC", "300"))
|
||||||
|
|
||||||
|
|
||||||
|
def _read_model_from_runtime() -> str:
|
||||||
|
"""Liest brainModel aus runtime.json. Fallback: ENV BRAIN_MODEL."""
|
||||||
|
try:
|
||||||
|
if RUNTIME_CONFIG_FILE.exists():
|
||||||
|
data = json.loads(RUNTIME_CONFIG_FILE.read_text(encoding="utf-8"))
|
||||||
|
m = (data.get("brainModel") or "").strip()
|
||||||
|
if m:
|
||||||
|
return m
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning("runtime.json lesen fehlgeschlagen: %s", exc)
|
||||||
|
return ENV_MODEL
|
||||||
|
|
||||||
|
|
||||||
|
DEFAULT_MODEL = _read_model_from_runtime()
|
||||||
|
|
||||||
|
|
||||||
|
class Message(BaseModel):
|
||||||
|
role: str # "system" | "user" | "assistant" | "tool"
|
||||||
|
content: Optional[str] = None
|
||||||
|
tool_calls: Optional[list] = None
|
||||||
|
tool_call_id: Optional[str] = None
|
||||||
|
name: Optional[str] = None # nur fuer role=tool
|
||||||
|
|
||||||
|
|
||||||
|
class ProxyResult(BaseModel):
|
||||||
|
content: str = ""
|
||||||
|
tool_calls: list = [] # je: {"id", "name", "arguments" (dict)}
|
||||||
|
finish_reason: str = ""
|
||||||
|
|
||||||
|
|
||||||
|
class ProxyClient:
|
||||||
|
def __init__(self, base_url: str = PROXY_URL, model: str = DEFAULT_MODEL):
|
||||||
|
self.base_url = base_url.rstrip("/")
|
||||||
|
self.model = model
|
||||||
|
# Persistente Client-Connection — vermeidet TCP-Handshake bei jedem Call
|
||||||
|
self._client = httpx.Client(timeout=PROXY_TIMEOUT_SEC)
|
||||||
|
|
||||||
|
def chat(self, messages: List[Message], model: Optional[str] = None) -> str:
|
||||||
|
"""Convenience: einfacher Chat ohne Tools. Gibt nur den Reply-String zurueck."""
|
||||||
|
result = self.chat_full(messages, tools=None, model=model)
|
||||||
|
if not result.content:
|
||||||
|
raise RuntimeError("Proxy lieferte leeren content")
|
||||||
|
return result.content
|
||||||
|
|
||||||
|
def chat_full(
|
||||||
|
self,
|
||||||
|
messages: List[Message],
|
||||||
|
tools: Optional[list] = None,
|
||||||
|
model: Optional[str] = None,
|
||||||
|
) -> ProxyResult:
|
||||||
|
"""Full chat — kann Tool-Calls liefern (wenn tools mitgegeben).
|
||||||
|
|
||||||
|
tools-Format ist OpenAI-Style:
|
||||||
|
[{"type":"function","function":{"name":..,"description":..,"parameters":{...}}}, ...]
|
||||||
|
"""
|
||||||
|
url = f"{self.base_url}/v1/chat/completions"
|
||||||
|
# Pydantic-Dumps mit exclude_none damit role=tool ohne tool_calls geht
|
||||||
|
payload = {
|
||||||
|
"model": model or self.model,
|
||||||
|
"messages": [m.model_dump(exclude_none=True) for m in messages],
|
||||||
|
}
|
||||||
|
if tools:
|
||||||
|
payload["tools"] = tools
|
||||||
|
logger.info("Proxy → %s (%d Messages, %d tools, model=%s)",
|
||||||
|
url, len(messages), len(tools or []), payload["model"])
|
||||||
|
try:
|
||||||
|
r = self._client.post(url, json=payload)
|
||||||
|
except httpx.RequestError as exc:
|
||||||
|
raise RuntimeError(f"Proxy unreachable: {exc}") from exc
|
||||||
|
if r.status_code != 200:
|
||||||
|
raise RuntimeError(f"Proxy HTTP {r.status_code}: {r.text[:300]}")
|
||||||
|
try:
|
||||||
|
data = r.json()
|
||||||
|
except Exception as exc:
|
||||||
|
raise RuntimeError(f"Proxy invalid JSON: {exc}") from exc
|
||||||
|
|
||||||
|
choices = data.get("choices") or []
|
||||||
|
if not choices:
|
||||||
|
raise RuntimeError(f"Proxy ohne choices: {str(data)[:300]}")
|
||||||
|
|
||||||
|
msg = choices[0].get("message") or {}
|
||||||
|
finish_reason = choices[0].get("finish_reason", "")
|
||||||
|
|
||||||
|
content = msg.get("content") or ""
|
||||||
|
if isinstance(content, list):
|
||||||
|
content = "".join(
|
||||||
|
part.get("text", "") for part in content if isinstance(part, dict) and part.get("type") == "text"
|
||||||
|
)
|
||||||
|
|
||||||
|
tool_calls_raw = msg.get("tool_calls") or []
|
||||||
|
tool_calls = []
|
||||||
|
import json as _json
|
||||||
|
for tc in tool_calls_raw:
|
||||||
|
fn = tc.get("function") or {}
|
||||||
|
args_raw = fn.get("arguments", "{}")
|
||||||
|
args: dict
|
||||||
|
if isinstance(args_raw, dict):
|
||||||
|
args = args_raw
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
args = _json.loads(args_raw)
|
||||||
|
except Exception:
|
||||||
|
args = {"_raw": args_raw}
|
||||||
|
tool_calls.append({
|
||||||
|
"id": tc.get("id", ""),
|
||||||
|
"name": fn.get("name", ""),
|
||||||
|
"arguments": args,
|
||||||
|
})
|
||||||
|
|
||||||
|
# Call-Metric anhaengen — Token-Schaetzung fuer Quota-Monitoring
|
||||||
|
metrics.log_call(payload["model"], messages, content or "")
|
||||||
|
|
||||||
|
return ProxyResult(content=content or "", tool_calls=tool_calls, finish_reason=finish_reason)
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
try:
|
||||||
|
self._client.close()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
@@ -0,0 +1,14 @@
|
|||||||
|
fastapi==0.115.0
|
||||||
|
uvicorn[standard]==0.32.0
|
||||||
|
pydantic==2.9.2
|
||||||
|
httpx==0.27.2
|
||||||
|
websockets==13.1
|
||||||
|
|
||||||
|
# Vector-DB
|
||||||
|
qdrant-client==1.12.1
|
||||||
|
|
||||||
|
# Embeddings (laeuft auf CPU, ~120MB Modell)
|
||||||
|
sentence-transformers==3.2.1
|
||||||
|
|
||||||
|
# Utility
|
||||||
|
python-multipart==0.0.12
|
||||||
@@ -0,0 +1,373 @@
|
|||||||
|
"""
|
||||||
|
Skill-Manager — Filesystem-Layer fuer ARIAs Faehigkeiten.
|
||||||
|
|
||||||
|
Layout:
|
||||||
|
/data/skills/<name>/
|
||||||
|
skill.json - Manifest
|
||||||
|
README.md - Beschreibung (vom Stil her: was, wann, wie aufrufen)
|
||||||
|
run.sh - Entry-Point (sh, python -m, was auch immer)
|
||||||
|
requirements.txt - optional, fuer local-venv
|
||||||
|
venv/ - automatisch erzeugt bei local-venv
|
||||||
|
bin/ - statische Binaries (fuer local-bin)
|
||||||
|
logs/ - <ts>.json Run-Logs (append-only pro Run)
|
||||||
|
|
||||||
|
Manifest (skill.json):
|
||||||
|
{
|
||||||
|
"name": "youtube2mp3",
|
||||||
|
"description": "Konvertiert YouTube-Video-URL zu MP3",
|
||||||
|
"execution": "local-venv" | "local-bin" | "bash",
|
||||||
|
"entry": "run.sh",
|
||||||
|
"args": [{"name": "url", "required": true}, ...],
|
||||||
|
"requires": {"pip": [...], "binaries": [...]},
|
||||||
|
"active": true,
|
||||||
|
"created_at": "ISO",
|
||||||
|
"updated_at": "ISO",
|
||||||
|
"last_used": null | "ISO",
|
||||||
|
"use_count": 0,
|
||||||
|
"version": "1.0",
|
||||||
|
"author": "aria" | "stefan"
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import shutil
|
||||||
|
import subprocess
|
||||||
|
import time
|
||||||
|
import uuid
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
SKILLS_DIR = Path(os.environ.get("SKILLS_DIR", "/data/skills"))
|
||||||
|
SHARED_UPLOADS = Path("/shared/uploads")
|
||||||
|
|
||||||
|
VALID_EXECUTIONS = {"local-venv", "local-bin", "bash"}
|
||||||
|
NAME_RE = re.compile(r"^[a-zA-Z0-9_-]{2,60}$")
|
||||||
|
|
||||||
|
|
||||||
|
def _now() -> str:
|
||||||
|
return datetime.now(timezone.utc).isoformat()
|
||||||
|
|
||||||
|
|
||||||
|
def _safe_name(name: str) -> str:
|
||||||
|
if not isinstance(name, str) or not NAME_RE.match(name):
|
||||||
|
raise ValueError(f"Ungültiger Skill-Name: {name!r}")
|
||||||
|
return name
|
||||||
|
|
||||||
|
|
||||||
|
def _skill_dir(name: str) -> Path:
|
||||||
|
return SKILLS_DIR / _safe_name(name)
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Listing ────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
def list_skills(active_only: bool = False) -> list[dict]:
|
||||||
|
out: list[dict] = []
|
||||||
|
if not SKILLS_DIR.exists():
|
||||||
|
return out
|
||||||
|
for entry in sorted(SKILLS_DIR.iterdir()):
|
||||||
|
if not entry.is_dir():
|
||||||
|
continue
|
||||||
|
manifest = read_manifest(entry.name)
|
||||||
|
if manifest is None:
|
||||||
|
continue
|
||||||
|
if active_only and not manifest.get("active", True):
|
||||||
|
continue
|
||||||
|
out.append(manifest)
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
def read_manifest(name: str) -> Optional[dict]:
|
||||||
|
try:
|
||||||
|
path = _skill_dir(name) / "skill.json"
|
||||||
|
if not path.exists():
|
||||||
|
return None
|
||||||
|
return json.loads(path.read_text(encoding="utf-8"))
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning("Manifest lesen %s: %s", name, exc)
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def write_manifest(name: str, manifest: dict) -> None:
|
||||||
|
d = _skill_dir(name)
|
||||||
|
d.mkdir(parents=True, exist_ok=True)
|
||||||
|
manifest["updated_at"] = _now()
|
||||||
|
(d / "skill.json").write_text(json.dumps(manifest, indent=2, ensure_ascii=False), encoding="utf-8")
|
||||||
|
|
||||||
|
|
||||||
|
def read_readme(name: str) -> str:
|
||||||
|
path = _skill_dir(name) / "README.md"
|
||||||
|
return path.read_text(encoding="utf-8") if path.exists() else ""
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Create / Update / Delete ────────────────────────────────────────
|
||||||
|
|
||||||
|
def create_skill(
|
||||||
|
name: str,
|
||||||
|
description: str,
|
||||||
|
execution: str,
|
||||||
|
entry_code: str,
|
||||||
|
readme: str = "",
|
||||||
|
args: Optional[list] = None,
|
||||||
|
requires: Optional[dict] = None,
|
||||||
|
pip_packages: Optional[list[str]] = None,
|
||||||
|
author: str = "aria",
|
||||||
|
) -> dict:
|
||||||
|
"""Legt einen neuen Skill an. Wirft ValueError bei ungueltigen Inputs.
|
||||||
|
|
||||||
|
entry_code wird je nach execution in run.sh oder run.py geschrieben.
|
||||||
|
Bei local-venv wird sofort eine venv erzeugt + pip_packages installiert.
|
||||||
|
"""
|
||||||
|
name = _safe_name(name)
|
||||||
|
if execution not in VALID_EXECUTIONS:
|
||||||
|
raise ValueError(f"execution muss eines von {VALID_EXECUTIONS} sein")
|
||||||
|
d = _skill_dir(name)
|
||||||
|
if d.exists():
|
||||||
|
raise ValueError(f"Skill '{name}' existiert bereits — erst loeschen oder updaten")
|
||||||
|
|
||||||
|
d.mkdir(parents=True)
|
||||||
|
(d / "logs").mkdir()
|
||||||
|
|
||||||
|
# Entry-File: run.sh oder run.py
|
||||||
|
if execution == "local-venv":
|
||||||
|
entry_path = d / "run.py"
|
||||||
|
entry_path.write_text(entry_code, encoding="utf-8")
|
||||||
|
entry_name = "run.py"
|
||||||
|
(d / "requirements.txt").write_text("\n".join(pip_packages or []) + "\n", encoding="utf-8")
|
||||||
|
else:
|
||||||
|
entry_path = d / "run.sh"
|
||||||
|
# Shebang ergaenzen wenn nicht da
|
||||||
|
content = entry_code if entry_code.startswith("#!") else "#!/usr/bin/env bash\nset -euo pipefail\n" + entry_code
|
||||||
|
entry_path.write_text(content, encoding="utf-8")
|
||||||
|
entry_path.chmod(0o755)
|
||||||
|
entry_name = "run.sh"
|
||||||
|
|
||||||
|
# README
|
||||||
|
(d / "README.md").write_text(readme or f"# {name}\n\n{description}\n", encoding="utf-8")
|
||||||
|
|
||||||
|
manifest = {
|
||||||
|
"name": name,
|
||||||
|
"description": description,
|
||||||
|
"execution": execution,
|
||||||
|
"entry": entry_name,
|
||||||
|
"args": args or [],
|
||||||
|
"requires": requires or {},
|
||||||
|
"active": True,
|
||||||
|
"created_at": _now(),
|
||||||
|
"updated_at": _now(),
|
||||||
|
"last_used": None,
|
||||||
|
"use_count": 0,
|
||||||
|
"version": "1.0",
|
||||||
|
"author": author,
|
||||||
|
}
|
||||||
|
write_manifest(name, manifest)
|
||||||
|
|
||||||
|
# venv aufbauen bei local-venv
|
||||||
|
if execution == "local-venv":
|
||||||
|
try:
|
||||||
|
_setup_venv(d, pip_packages or [])
|
||||||
|
except Exception as exc:
|
||||||
|
# venv-Aufbau fehlgeschlagen → Skill steht trotzdem im Repo, aber inaktiv
|
||||||
|
manifest["active"] = False
|
||||||
|
manifest["setup_error"] = str(exc)[:500]
|
||||||
|
write_manifest(name, manifest)
|
||||||
|
logger.warning("Skill %s: venv-Setup fehlgeschlagen → deaktiviert: %s", name, exc)
|
||||||
|
|
||||||
|
logger.info("Skill erstellt: %s (%s)", name, execution)
|
||||||
|
return manifest
|
||||||
|
|
||||||
|
|
||||||
|
def _setup_venv(skill_dir: Path, pip_packages: list[str]) -> None:
|
||||||
|
venv = skill_dir / "venv"
|
||||||
|
logger.info("venv erstellen: %s", venv)
|
||||||
|
subprocess.run(["python", "-m", "venv", str(venv)], check=True, timeout=120)
|
||||||
|
pip = venv / "bin" / "pip"
|
||||||
|
if pip_packages:
|
||||||
|
subprocess.run([str(pip), "install", "--no-cache-dir", *pip_packages], check=True, timeout=600)
|
||||||
|
|
||||||
|
|
||||||
|
def update_skill(name: str, patch: dict) -> dict:
|
||||||
|
manifest = read_manifest(name)
|
||||||
|
if manifest is None:
|
||||||
|
raise ValueError(f"Skill '{name}' nicht gefunden")
|
||||||
|
allowed = {"description", "args", "requires", "active", "version", "entry"}
|
||||||
|
for k, v in patch.items():
|
||||||
|
if k in allowed:
|
||||||
|
manifest[k] = v
|
||||||
|
write_manifest(name, manifest)
|
||||||
|
return manifest
|
||||||
|
|
||||||
|
|
||||||
|
def delete_skill(name: str) -> None:
|
||||||
|
d = _skill_dir(name)
|
||||||
|
if not d.exists():
|
||||||
|
raise ValueError(f"Skill '{name}' nicht gefunden")
|
||||||
|
shutil.rmtree(d)
|
||||||
|
logger.info("Skill geloescht: %s", name)
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Run ────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
def run_skill(name: str, args: Optional[dict] = None, timeout_sec: int = 300) -> dict:
|
||||||
|
"""Fuehrt einen Skill aus. Args werden als ENV-Vars uebergeben
|
||||||
|
(Praefix ARG_, z.B. ARG_URL fuer args["url"]).
|
||||||
|
|
||||||
|
Returns: {ok, exit_code, stdout, stderr, duration_sec, log_path}
|
||||||
|
"""
|
||||||
|
manifest = read_manifest(name)
|
||||||
|
if manifest is None:
|
||||||
|
raise ValueError(f"Skill '{name}' nicht gefunden")
|
||||||
|
if not manifest.get("active", True):
|
||||||
|
raise ValueError(f"Skill '{name}' ist deaktiviert")
|
||||||
|
|
||||||
|
d = _skill_dir(name)
|
||||||
|
entry = manifest.get("entry", "run.sh")
|
||||||
|
exec_mode = manifest.get("execution", "bash")
|
||||||
|
|
||||||
|
env = os.environ.copy()
|
||||||
|
# Skill-Args als ENV-Vars
|
||||||
|
for k, v in (args or {}).items():
|
||||||
|
if not re.match(r"^[a-zA-Z][a-zA-Z0-9_]*$", k):
|
||||||
|
continue
|
||||||
|
env[f"ARG_{k.upper()}"] = str(v)
|
||||||
|
env["SKILL_DIR"] = str(d)
|
||||||
|
env["SHARED_UPLOADS"] = str(SHARED_UPLOADS)
|
||||||
|
|
||||||
|
# Command bauen
|
||||||
|
if exec_mode == "local-venv":
|
||||||
|
python = d / "venv" / "bin" / "python"
|
||||||
|
cmd = [str(python), str(d / entry)]
|
||||||
|
elif exec_mode == "local-bin":
|
||||||
|
# Skill bringt seine bin/ mit — wir prepended sie an den PATH
|
||||||
|
env["PATH"] = f"{d / 'bin'}:{env.get('PATH', '')}"
|
||||||
|
cmd = [str(d / entry)]
|
||||||
|
else: # bash
|
||||||
|
cmd = [str(d / entry)]
|
||||||
|
|
||||||
|
log_id = f"{int(time.time())}-{uuid.uuid4().hex[:8]}"
|
||||||
|
log_path = d / "logs" / f"{log_id}.json"
|
||||||
|
|
||||||
|
t0 = time.time()
|
||||||
|
try:
|
||||||
|
proc = subprocess.run(
|
||||||
|
cmd, env=env, cwd=str(d),
|
||||||
|
capture_output=True, text=True, timeout=timeout_sec,
|
||||||
|
)
|
||||||
|
out_text = proc.stdout
|
||||||
|
err_text = proc.stderr
|
||||||
|
exit_code = proc.returncode
|
||||||
|
timed_out = False
|
||||||
|
except subprocess.TimeoutExpired as exc:
|
||||||
|
out_text = exc.stdout or ""
|
||||||
|
err_text = (exc.stderr or "") + f"\n[TIMEOUT {timeout_sec}s]"
|
||||||
|
exit_code = -1
|
||||||
|
timed_out = True
|
||||||
|
duration = time.time() - t0
|
||||||
|
|
||||||
|
# Log schreiben (gekuerzt damit es nicht explodiert)
|
||||||
|
record = {
|
||||||
|
"ts": _now(),
|
||||||
|
"args": args or {},
|
||||||
|
"exit_code": exit_code,
|
||||||
|
"duration_sec": round(duration, 2),
|
||||||
|
"stdout": (out_text or "")[:8000],
|
||||||
|
"stderr": (err_text or "")[:8000],
|
||||||
|
"timed_out": timed_out,
|
||||||
|
}
|
||||||
|
try:
|
||||||
|
log_path.write_text(json.dumps(record, indent=2, ensure_ascii=False), encoding="utf-8")
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Stats updaten
|
||||||
|
manifest["last_used"] = _now()
|
||||||
|
manifest["use_count"] = int(manifest.get("use_count", 0)) + 1
|
||||||
|
write_manifest(name, manifest)
|
||||||
|
|
||||||
|
record["ok"] = exit_code == 0
|
||||||
|
record["log_path"] = str(log_path)
|
||||||
|
return record
|
||||||
|
|
||||||
|
|
||||||
|
def list_logs(name: str, limit: int = 50) -> list[dict]:
|
||||||
|
d = _skill_dir(name) / "logs"
|
||||||
|
if not d.exists():
|
||||||
|
return []
|
||||||
|
files = sorted(d.glob("*.json"), reverse=True)[:limit]
|
||||||
|
out: list[dict] = []
|
||||||
|
for f in files:
|
||||||
|
try:
|
||||||
|
data = json.loads(f.read_text(encoding="utf-8"))
|
||||||
|
data["log_id"] = f.stem
|
||||||
|
out.append(data)
|
||||||
|
except Exception:
|
||||||
|
continue
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Export / Import ────────────────────────────────────────────────
|
||||||
|
|
||||||
|
def export_skill(name: str) -> bytes:
|
||||||
|
"""Packt einen Skill als tar.gz und gibt die Bytes zurueck.
|
||||||
|
venv und logs werden ausgeschlossen (werden beim Import neu gebaut)."""
|
||||||
|
import io
|
||||||
|
import tarfile
|
||||||
|
d = _skill_dir(name)
|
||||||
|
if not d.exists():
|
||||||
|
raise ValueError(f"Skill '{name}' nicht gefunden")
|
||||||
|
buf = io.BytesIO()
|
||||||
|
with tarfile.open(fileobj=buf, mode="w:gz") as tar:
|
||||||
|
for path in d.iterdir():
|
||||||
|
if path.name in ("venv", "logs", "__pycache__"):
|
||||||
|
continue
|
||||||
|
tar.add(path, arcname=f"{name}/{path.name}")
|
||||||
|
return buf.getvalue()
|
||||||
|
|
||||||
|
|
||||||
|
def import_skill(tar_bytes: bytes, overwrite: bool = False) -> dict:
|
||||||
|
"""Importiert einen Skill aus tar.gz. Liefert das Manifest zurueck."""
|
||||||
|
import io
|
||||||
|
import tarfile
|
||||||
|
SKILLS_DIR.mkdir(parents=True, exist_ok=True)
|
||||||
|
with tarfile.open(fileobj=io.BytesIO(tar_bytes), mode="r:gz") as tar:
|
||||||
|
# Erst Root-Name finden (= Skill-Name)
|
||||||
|
members = tar.getmembers()
|
||||||
|
if not members:
|
||||||
|
raise ValueError("Leeres Archiv")
|
||||||
|
root = members[0].name.split("/", 1)[0]
|
||||||
|
name = _safe_name(root)
|
||||||
|
d = _skill_dir(name)
|
||||||
|
if d.exists():
|
||||||
|
if not overwrite:
|
||||||
|
raise ValueError(f"Skill '{name}' existiert bereits — overwrite=true setzen")
|
||||||
|
shutil.rmtree(d)
|
||||||
|
# Extrahieren — Path-Traversal verhindern
|
||||||
|
for m in members:
|
||||||
|
target = SKILLS_DIR / m.name
|
||||||
|
if not str(target.resolve()).startswith(str(SKILLS_DIR.resolve())):
|
||||||
|
raise ValueError(f"Unsicherer Pfad im Archiv: {m.name}")
|
||||||
|
tar.extractall(SKILLS_DIR)
|
||||||
|
# logs-Verzeichnis anlegen falls fehlte
|
||||||
|
(d / "logs").mkdir(exist_ok=True)
|
||||||
|
# venv neu bauen falls local-venv
|
||||||
|
manifest = read_manifest(name) or {}
|
||||||
|
if manifest.get("execution") == "local-venv":
|
||||||
|
req_file = d / "requirements.txt"
|
||||||
|
pip_packages: list[str] = []
|
||||||
|
if req_file.exists():
|
||||||
|
pip_packages = [l.strip() for l in req_file.read_text().splitlines() if l.strip() and not l.startswith("#")]
|
||||||
|
try:
|
||||||
|
_setup_venv(d, pip_packages)
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning("Skill-Import %s: venv-Setup fehlgeschlagen: %s", name, exc)
|
||||||
|
manifest["active"] = False
|
||||||
|
manifest["setup_error"] = str(exc)[:500]
|
||||||
|
write_manifest(name, manifest)
|
||||||
|
return manifest
|
||||||
@@ -0,0 +1,229 @@
|
|||||||
|
"""
|
||||||
|
Triggers — passive Aufweck-Quellen fuer ARIA.
|
||||||
|
|
||||||
|
Skills sind aktiv (ARIA ruft sie). Triggers sind passiv — das System ruft
|
||||||
|
ARIA wenn ein Event passiert. Drei Typen:
|
||||||
|
|
||||||
|
timer Einmalig zu einem festen Zeitpunkt
|
||||||
|
watcher Recurring: Condition pruefen, bei True → feuern (mit Throttle)
|
||||||
|
cron Cron-Expression (vorerst nicht implementiert, Platzhalter)
|
||||||
|
|
||||||
|
Layout:
|
||||||
|
/data/triggers/<name>.json Manifest pro Trigger
|
||||||
|
/data/triggers/logs/<name>.jsonl Append-only Log pro Feuerung
|
||||||
|
|
||||||
|
Polling-Kosten: Brain-internes Background-Polling (kein LLM-Call).
|
||||||
|
ARIA wird nur aufgeweckt wenn ein Trigger tatsaechlich feuert.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import shutil
|
||||||
|
import time
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
TRIGGERS_DIR = Path(os.environ.get("TRIGGERS_DIR", "/data/triggers"))
|
||||||
|
LOGS_DIR = TRIGGERS_DIR / "logs"
|
||||||
|
NAME_RE = re.compile(r"^[a-zA-Z0-9_-]{2,60}$")
|
||||||
|
VALID_TYPES = {"timer", "watcher", "cron"}
|
||||||
|
|
||||||
|
|
||||||
|
def _now_iso() -> str:
|
||||||
|
return datetime.now(timezone.utc).isoformat()
|
||||||
|
|
||||||
|
|
||||||
|
def _safe_name(name: str) -> str:
|
||||||
|
if not isinstance(name, str) or not NAME_RE.match(name):
|
||||||
|
raise ValueError(f"Ungueltiger Trigger-Name: {name!r}")
|
||||||
|
return name
|
||||||
|
|
||||||
|
|
||||||
|
def _path(name: str) -> Path:
|
||||||
|
return TRIGGERS_DIR / f"{_safe_name(name)}.json"
|
||||||
|
|
||||||
|
|
||||||
|
def _ensure_dirs():
|
||||||
|
TRIGGERS_DIR.mkdir(parents=True, exist_ok=True)
|
||||||
|
LOGS_DIR.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
|
||||||
|
# ─── CRUD ───────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
def list_triggers(active_only: bool = False) -> list[dict]:
|
||||||
|
if not TRIGGERS_DIR.exists():
|
||||||
|
return []
|
||||||
|
out: list[dict] = []
|
||||||
|
for f in sorted(TRIGGERS_DIR.glob("*.json")):
|
||||||
|
try:
|
||||||
|
data = json.loads(f.read_text(encoding="utf-8"))
|
||||||
|
if active_only and not data.get("active", True):
|
||||||
|
continue
|
||||||
|
out.append(data)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("Trigger lesen %s: %s", f, e)
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
def read(name: str) -> Optional[dict]:
|
||||||
|
p = _path(name)
|
||||||
|
if not p.exists():
|
||||||
|
return None
|
||||||
|
try:
|
||||||
|
return json.loads(p.read_text(encoding="utf-8"))
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("Trigger %s lesen: %s", name, e)
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def write(name: str, data: dict) -> None:
|
||||||
|
_ensure_dirs()
|
||||||
|
data["updated_at"] = _now_iso()
|
||||||
|
p = _path(name)
|
||||||
|
tmp = p.with_suffix(".tmp")
|
||||||
|
tmp.write_text(json.dumps(data, indent=2, ensure_ascii=False), encoding="utf-8")
|
||||||
|
tmp.replace(p)
|
||||||
|
|
||||||
|
|
||||||
|
def delete(name: str) -> None:
|
||||||
|
p = _path(name)
|
||||||
|
if not p.exists():
|
||||||
|
raise ValueError(f"Trigger '{name}' nicht gefunden")
|
||||||
|
p.unlink()
|
||||||
|
# Logs auch wegraeumen
|
||||||
|
log_file = LOGS_DIR / f"{_safe_name(name)}.jsonl"
|
||||||
|
if log_file.exists():
|
||||||
|
log_file.unlink()
|
||||||
|
|
||||||
|
|
||||||
|
def update(name: str, patch: dict) -> dict:
|
||||||
|
data = read(name)
|
||||||
|
if data is None:
|
||||||
|
raise ValueError(f"Trigger '{name}' nicht gefunden")
|
||||||
|
allowed = {"active", "message", "condition", "throttle_sec",
|
||||||
|
"check_interval_sec", "fires_at"}
|
||||||
|
for k, v in patch.items():
|
||||||
|
if k in allowed:
|
||||||
|
data[k] = v
|
||||||
|
write(name, data)
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Create-Helpers (typ-spezifisch) ────────────────────────────────
|
||||||
|
|
||||||
|
def create_timer(
|
||||||
|
name: str,
|
||||||
|
fires_at_iso: str,
|
||||||
|
message: str,
|
||||||
|
author: str = "aria",
|
||||||
|
) -> dict:
|
||||||
|
_safe_name(name)
|
||||||
|
if _path(name).exists():
|
||||||
|
raise ValueError(f"Trigger '{name}' existiert schon")
|
||||||
|
# ISO validieren
|
||||||
|
try:
|
||||||
|
datetime.fromisoformat(fires_at_iso.replace("Z", "+00:00"))
|
||||||
|
except Exception:
|
||||||
|
raise ValueError(f"fires_at_iso ungueltig: {fires_at_iso}")
|
||||||
|
data = {
|
||||||
|
"name": name,
|
||||||
|
"type": "timer",
|
||||||
|
"active": True,
|
||||||
|
"author": author,
|
||||||
|
"created_at": _now_iso(),
|
||||||
|
"fires_at": fires_at_iso,
|
||||||
|
"message": message,
|
||||||
|
"fire_count": 0,
|
||||||
|
"last_fired_at": None,
|
||||||
|
}
|
||||||
|
write(name, data)
|
||||||
|
logger.info("Trigger angelegt: %s (timer, fires_at=%s)", name, fires_at_iso)
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
def create_watcher(
|
||||||
|
name: str,
|
||||||
|
condition: str,
|
||||||
|
message: str,
|
||||||
|
check_interval_sec: int = 300,
|
||||||
|
throttle_sec: int = 3600,
|
||||||
|
author: str = "aria",
|
||||||
|
) -> dict:
|
||||||
|
_safe_name(name)
|
||||||
|
if _path(name).exists():
|
||||||
|
raise ValueError(f"Trigger '{name}' existiert schon")
|
||||||
|
# Condition parsen-pruefen (wirft bei Syntax-Fehler)
|
||||||
|
from watcher import parse_condition
|
||||||
|
parse_condition(condition) # nur Validate
|
||||||
|
if check_interval_sec < 30:
|
||||||
|
check_interval_sec = 30 # nicht oefter als alle 30s pruefen
|
||||||
|
if throttle_sec < 0:
|
||||||
|
throttle_sec = 0
|
||||||
|
data = {
|
||||||
|
"name": name,
|
||||||
|
"type": "watcher",
|
||||||
|
"active": True,
|
||||||
|
"author": author,
|
||||||
|
"created_at": _now_iso(),
|
||||||
|
"condition": condition,
|
||||||
|
"check_interval_sec": int(check_interval_sec),
|
||||||
|
"throttle_sec": int(throttle_sec),
|
||||||
|
"message": message,
|
||||||
|
"fire_count": 0,
|
||||||
|
"last_fired_at": None,
|
||||||
|
"last_checked_at": None,
|
||||||
|
}
|
||||||
|
write(name, data)
|
||||||
|
logger.info("Trigger angelegt: %s (watcher, cond='%s')", name, condition)
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Feuern + Log ───────────────────────────────────────────────────
|
||||||
|
|
||||||
|
def mark_fired(name: str) -> dict:
|
||||||
|
data = read(name)
|
||||||
|
if data is None:
|
||||||
|
raise ValueError(f"Trigger '{name}' nicht gefunden")
|
||||||
|
data["fire_count"] = int(data.get("fire_count", 0)) + 1
|
||||||
|
data["last_fired_at"] = _now_iso()
|
||||||
|
# Timer: nach Feuern auto-deaktivieren (one-shot)
|
||||||
|
if data.get("type") == "timer":
|
||||||
|
data["active"] = False
|
||||||
|
write(name, data)
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
def append_log(name: str, entry: dict) -> None:
|
||||||
|
_ensure_dirs()
|
||||||
|
log_file = LOGS_DIR / f"{_safe_name(name)}.jsonl"
|
||||||
|
record = {"ts": _now_iso()}
|
||||||
|
record.update(entry)
|
||||||
|
try:
|
||||||
|
with log_file.open("a", encoding="utf-8") as f:
|
||||||
|
f.write(json.dumps(record, ensure_ascii=False) + "\n")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("Trigger-Log append %s: %s", name, e)
|
||||||
|
|
||||||
|
|
||||||
|
def list_logs(name: str, limit: int = 50) -> list[dict]:
|
||||||
|
log_file = LOGS_DIR / f"{_safe_name(name)}.jsonl"
|
||||||
|
if not log_file.exists():
|
||||||
|
return []
|
||||||
|
try:
|
||||||
|
lines = log_file.read_text(encoding="utf-8").splitlines()
|
||||||
|
out: list[dict] = []
|
||||||
|
for line in lines[-limit:]:
|
||||||
|
try:
|
||||||
|
out.append(json.loads(line))
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
return out
|
||||||
|
except Exception:
|
||||||
|
return []
|
||||||
@@ -0,0 +1,310 @@
|
|||||||
|
"""
|
||||||
|
Built-in Condition-Variablen + sicherer Mini-Parser fuer Watcher-Triggers.
|
||||||
|
|
||||||
|
Erlaubte Variablen + die EINE Funktion `near(lat, lon, radius_m)` kommen
|
||||||
|
aus diesem Modul. Condition-Ausdruck ist ein sicheres Subset von Python
|
||||||
|
(kein eval, kein exec): nur Vergleiche, Boolean-Operatoren, Whitelisted
|
||||||
|
Funktionen, Variablen aus describe_variables(), Konstanten (Zahl/Bool/Str).
|
||||||
|
|
||||||
|
Beispiele:
|
||||||
|
disk_free_gb < 5
|
||||||
|
hour_of_day == 8 and day_of_week == "mon"
|
||||||
|
is_weekend and minute_of_hour == 0
|
||||||
|
near(53.123, 7.456, 500)
|
||||||
|
current_lat and location_age_sec < 120
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import ast
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import math
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import time
|
||||||
|
from datetime import datetime
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
STATE_DIR = Path("/shared/state")
|
||||||
|
|
||||||
|
|
||||||
|
# ─── State-Helfer (gemeinsam mit Bridge: /shared/state/*.json) ──────
|
||||||
|
|
||||||
|
def _read_state(name: str) -> dict | None:
|
||||||
|
f = STATE_DIR / f"{name}.json"
|
||||||
|
if not f.exists():
|
||||||
|
return None
|
||||||
|
try:
|
||||||
|
return json.loads(f.read_text(encoding="utf-8"))
|
||||||
|
except Exception:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Variablen-Quellen ──────────────────────────────────────────────
|
||||||
|
|
||||||
|
def _disk_stats() -> tuple[float, float]:
|
||||||
|
"""Returns (free_gb, free_pct). Schaut /shared (geteiltes Volume) — sonst /."""
|
||||||
|
target = "/shared" if os.path.exists("/shared") else "/"
|
||||||
|
try:
|
||||||
|
st = shutil.disk_usage(target)
|
||||||
|
free_gb = st.free / (1024 ** 3)
|
||||||
|
free_pct = 100.0 * st.free / st.total if st.total else 0.0
|
||||||
|
return free_gb, free_pct
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("disk_usage: %s", e)
|
||||||
|
return 0.0, 0.0
|
||||||
|
|
||||||
|
|
||||||
|
def _uptime_sec() -> int:
|
||||||
|
try:
|
||||||
|
with open("/proc/uptime", "r") as f:
|
||||||
|
return int(float(f.read().split()[0]))
|
||||||
|
except Exception:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def _ram_free_mb() -> int:
|
||||||
|
"""Container-RAM: MemAvailable aus /proc/meminfo (kB → MB)."""
|
||||||
|
try:
|
||||||
|
with open("/proc/meminfo", "r") as f:
|
||||||
|
for line in f:
|
||||||
|
if line.startswith("MemAvailable:"):
|
||||||
|
return int(line.split()[1]) // 1024
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def _cpu_load_1min() -> float:
|
||||||
|
"""load avg ueber 1 Minute (linux). Vorsicht: das ist die HOST-load,
|
||||||
|
nicht container-spezifisch."""
|
||||||
|
try:
|
||||||
|
with open("/proc/loadavg", "r") as f:
|
||||||
|
return float(f.read().split()[0])
|
||||||
|
except Exception:
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
|
||||||
|
_DAYS = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"]
|
||||||
|
|
||||||
|
|
||||||
|
def _gps_state() -> dict[str, Any]:
|
||||||
|
"""Letzte bekannte Position aus /shared/state/location.json.
|
||||||
|
Returns dict mit current_lat, current_lon (oder None), location_age_sec."""
|
||||||
|
data = _read_state("location") or {}
|
||||||
|
now = int(time.time())
|
||||||
|
age = -1
|
||||||
|
lat = data.get("lat")
|
||||||
|
lon = data.get("lon")
|
||||||
|
ts = data.get("ts_unix")
|
||||||
|
if isinstance(ts, (int, float)):
|
||||||
|
age = int(now - ts)
|
||||||
|
return {
|
||||||
|
"current_lat": float(lat) if isinstance(lat, (int, float)) else None,
|
||||||
|
"current_lon": float(lon) if isinstance(lon, (int, float)) else None,
|
||||||
|
"location_age_sec": age,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def _user_activity_age() -> int:
|
||||||
|
"""Sekunden seit letzter User-Aktion (Chat oder Voice). -1 wenn nie."""
|
||||||
|
data = _read_state("activity") or {}
|
||||||
|
ts = data.get("last_user_ts")
|
||||||
|
if not isinstance(ts, (int, float)):
|
||||||
|
return -1
|
||||||
|
return int(time.time() - ts)
|
||||||
|
|
||||||
|
|
||||||
|
def collect_variables() -> dict[str, Any]:
|
||||||
|
"""Liefert aktuellen Snapshot aller Built-in-Variablen + near()-Helper."""
|
||||||
|
free_gb, free_pct = _disk_stats()
|
||||||
|
now = datetime.now()
|
||||||
|
gps = _gps_state()
|
||||||
|
|
||||||
|
# Memory-Counts aus der Vector-DB (lazy import, sonst zirkulaer)
|
||||||
|
memory_count = 0
|
||||||
|
pinned_count = 0
|
||||||
|
try:
|
||||||
|
from main import store # type: ignore
|
||||||
|
s = store()
|
||||||
|
memory_count = s.count()
|
||||||
|
try:
|
||||||
|
pinned_count = len(s.list_pinned())
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
vars_: dict[str, Any] = {
|
||||||
|
# Disk + System
|
||||||
|
"disk_free_gb": round(free_gb, 2),
|
||||||
|
"disk_free_pct": round(free_pct, 1),
|
||||||
|
"ram_free_mb": _ram_free_mb(),
|
||||||
|
"cpu_load_1min": round(_cpu_load_1min(), 2),
|
||||||
|
"uptime_sec": _uptime_sec(),
|
||||||
|
|
||||||
|
# Zeit
|
||||||
|
"hour_of_day": now.hour,
|
||||||
|
"minute_of_hour": now.minute,
|
||||||
|
"day_of_month": now.day,
|
||||||
|
"month": now.month,
|
||||||
|
"year": now.year,
|
||||||
|
"day_of_week": _DAYS[now.weekday()],
|
||||||
|
"is_weekend": now.weekday() >= 5,
|
||||||
|
"unix_timestamp": int(time.time()),
|
||||||
|
|
||||||
|
# GPS
|
||||||
|
"current_lat": gps["current_lat"],
|
||||||
|
"current_lon": gps["current_lon"],
|
||||||
|
"location_age_sec": gps["location_age_sec"],
|
||||||
|
|
||||||
|
# Activity
|
||||||
|
"last_user_message_ago_sec": _user_activity_age(),
|
||||||
|
|
||||||
|
# Memory
|
||||||
|
"memory_count": memory_count,
|
||||||
|
"pinned_count": pinned_count,
|
||||||
|
|
||||||
|
# rvs_connected: kann Brain noch nicht zuverlaessig feststellen
|
||||||
|
# (Bridge muesste eigenen Heartbeat-State schreiben — kommt spaeter)
|
||||||
|
"rvs_connected": False,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Funktion-Helper — wird vom Parser als ast.Call mit Name "near" erkannt.
|
||||||
|
# Closure ueber die GPS-Werte, damit eval keine extra Variablen braucht.
|
||||||
|
def _near(lat: float, lon: float, radius_m: float) -> bool:
|
||||||
|
"""Haversine-Distanz: True wenn aktuelle Position < radius_m vom Punkt."""
|
||||||
|
cur_lat = vars_.get("current_lat")
|
||||||
|
cur_lon = vars_.get("current_lon")
|
||||||
|
if cur_lat is None or cur_lon is None:
|
||||||
|
return False
|
||||||
|
try:
|
||||||
|
R = 6371000.0
|
||||||
|
phi1 = math.radians(float(cur_lat))
|
||||||
|
phi2 = math.radians(float(lat))
|
||||||
|
dphi = math.radians(float(lat) - float(cur_lat))
|
||||||
|
dlam = math.radians(float(lon) - float(cur_lon))
|
||||||
|
a = math.sin(dphi / 2) ** 2 + math.cos(phi1) * math.cos(phi2) * math.sin(dlam / 2) ** 2
|
||||||
|
distance = 2 * R * math.asin(math.sqrt(a))
|
||||||
|
return distance < float(radius_m)
|
||||||
|
except Exception:
|
||||||
|
return False
|
||||||
|
|
||||||
|
vars_["near"] = _near
|
||||||
|
return vars_
|
||||||
|
|
||||||
|
|
||||||
|
def describe_variables() -> list[dict]:
|
||||||
|
"""Beschreibung — fuer System-Prompt + UI."""
|
||||||
|
return [
|
||||||
|
# Disk / System
|
||||||
|
{"name": "disk_free_gb", "type": "number", "desc": "freier Plattenplatz in GB (auf /shared)"},
|
||||||
|
{"name": "disk_free_pct", "type": "number", "desc": "freier Plattenplatz in Prozent"},
|
||||||
|
{"name": "ram_free_mb", "type": "number", "desc": "freier RAM im Brain-Container (MB)"},
|
||||||
|
{"name": "cpu_load_1min", "type": "number", "desc": "Load-Avg 1min (Host)"},
|
||||||
|
{"name": "uptime_sec", "type": "number", "desc": "Sekunden seit Brain-Start"},
|
||||||
|
# Zeit
|
||||||
|
{"name": "hour_of_day", "type": "number", "desc": "0..23, lokale Zeit"},
|
||||||
|
{"name": "minute_of_hour", "type": "number", "desc": "0..59"},
|
||||||
|
{"name": "day_of_month", "type": "number", "desc": "1..31"},
|
||||||
|
{"name": "month", "type": "number", "desc": "1..12"},
|
||||||
|
{"name": "year", "type": "number", "desc": "z.B. 2026"},
|
||||||
|
{"name": "day_of_week", "type": "string", "desc": "mon|tue|wed|thu|fri|sat|sun"},
|
||||||
|
{"name": "is_weekend", "type": "bool", "desc": "True wenn Samstag oder Sonntag"},
|
||||||
|
{"name": "unix_timestamp", "type": "number", "desc": "Sekunden seit Epoche (UTC)"},
|
||||||
|
# GPS
|
||||||
|
{"name": "current_lat", "type": "number", "desc": "letzte bekannte Breitengrad (oder None)"},
|
||||||
|
{"name": "current_lon", "type": "number", "desc": "letzte bekannte Laengengrad (oder None)"},
|
||||||
|
{"name": "location_age_sec", "type": "number", "desc": "Sekunden seit letzter Position (-1 = nie)"},
|
||||||
|
# Activity
|
||||||
|
{"name": "last_user_message_ago_sec", "type": "number",
|
||||||
|
"desc": "Sekunden seit letztem User-Input (-1 = nie)"},
|
||||||
|
# Memory
|
||||||
|
{"name": "memory_count", "type": "number", "desc": "Anzahl Memories total"},
|
||||||
|
{"name": "pinned_count", "type": "number", "desc": "Anzahl pinned (Hot Memory)"},
|
||||||
|
{"name": "rvs_connected", "type": "bool", "desc": "RVS-Verbindung (z.Zt. immer False)"},
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def describe_functions() -> list[dict]:
|
||||||
|
"""Whitelisted Funktionen fuer Conditions."""
|
||||||
|
return [
|
||||||
|
{
|
||||||
|
"name": "near",
|
||||||
|
"signature": "near(lat, lon, radius_m)",
|
||||||
|
"desc": "True wenn die aktuelle GPS-Position innerhalb von radius_m Metern "
|
||||||
|
"vom Punkt (lat, lon) liegt. Haversine. Bei unbekannter Position: False.",
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
_ALLOWED_FUNCTIONS = {f["name"] for f in describe_functions()}
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Sicherer Condition-Parser ──────────────────────────────────────
|
||||||
|
|
||||||
|
_ALLOWED_NODES = (
|
||||||
|
ast.Expression, ast.BoolOp, ast.UnaryOp, ast.Compare,
|
||||||
|
ast.Name, ast.Constant, ast.Load,
|
||||||
|
ast.And, ast.Or, ast.Not,
|
||||||
|
ast.Eq, ast.NotEq, ast.Lt, ast.LtE, ast.Gt, ast.GtE,
|
||||||
|
ast.Call,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_condition(expr: str) -> ast.Expression:
|
||||||
|
"""Parst einen Condition-Ausdruck und validiert ihn gegen das Safe-Subset.
|
||||||
|
Wirft ValueError bei verbotenen Konstrukten."""
|
||||||
|
expr = (expr or "").strip()
|
||||||
|
if not expr:
|
||||||
|
raise ValueError("Leere Condition")
|
||||||
|
if len(expr) > 500:
|
||||||
|
raise ValueError("Condition zu lang (>500 Zeichen)")
|
||||||
|
try:
|
||||||
|
tree = ast.parse(expr, mode="eval")
|
||||||
|
except SyntaxError as e:
|
||||||
|
raise ValueError(f"Condition Syntax-Fehler: {e}")
|
||||||
|
allowed_names = {v["name"] for v in describe_variables()}
|
||||||
|
for node in ast.walk(tree):
|
||||||
|
if not isinstance(node, _ALLOWED_NODES):
|
||||||
|
raise ValueError(f"Verbotener Ausdruck: {type(node).__name__}")
|
||||||
|
if isinstance(node, ast.Call):
|
||||||
|
# Nur direkter Funktionsname, kein attribute-access (foo.bar())
|
||||||
|
if not isinstance(node.func, ast.Name):
|
||||||
|
raise ValueError("Funktionsaufruf nur ueber direkten Namen erlaubt")
|
||||||
|
if node.func.id not in _ALLOWED_FUNCTIONS:
|
||||||
|
raise ValueError(f"Verbotene Funktion: {node.func.id}")
|
||||||
|
# Args muessen Constants oder einzelne Names sein
|
||||||
|
for a in node.args:
|
||||||
|
if not isinstance(a, (ast.Constant, ast.Name, ast.UnaryOp)):
|
||||||
|
raise ValueError(f"Argument-Typ in {node.func.id}() nicht erlaubt")
|
||||||
|
if node.keywords:
|
||||||
|
raise ValueError("Keyword-Argumente in Funktionen nicht erlaubt")
|
||||||
|
if isinstance(node, ast.Name):
|
||||||
|
if (node.id not in allowed_names
|
||||||
|
and node.id not in _ALLOWED_FUNCTIONS
|
||||||
|
and node.id not in ("True", "False")):
|
||||||
|
raise ValueError(f"Unbekannte Variable: {node.id}")
|
||||||
|
if isinstance(node, ast.Constant):
|
||||||
|
if not isinstance(node.value, (int, float, str, bool)) and node.value is not None:
|
||||||
|
raise ValueError(f"Verbotener Konstant-Typ: {type(node.value).__name__}")
|
||||||
|
return tree
|
||||||
|
|
||||||
|
|
||||||
|
def evaluate(expr: str, variables: dict[str, Any] | None = None) -> bool:
|
||||||
|
"""Evaluiert die Condition gegen die aktuellen Variablen.
|
||||||
|
Returns bool. Bei Fehler in Variablen → False (defensiv)."""
|
||||||
|
tree = parse_condition(expr)
|
||||||
|
vars_ = variables if variables is not None else collect_variables()
|
||||||
|
code = compile(tree, "<condition>", "eval")
|
||||||
|
# Globals leer, locals enthalten Variablen + near()-Funktion → kein Builtin-Zugriff
|
||||||
|
try:
|
||||||
|
result = eval(code, {"__builtins__": {}}, vars_)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("Condition '%s' eval-Fehler: %s", expr, e)
|
||||||
|
return False
|
||||||
|
return bool(result)
|
||||||
@@ -52,15 +52,61 @@ Fuer Web-Anfragen: **WebFetch** oder **Bash mit curl**. Niemals sagen "ich habe
|
|||||||
4. **Regelmaessig committen** — mit sinnvollen Commit-Messages.
|
4. **Regelmaessig committen** — mit sinnvollen Commit-Messages.
|
||||||
5. **Tageslog fuehren** — was wurde getan, was ist offen.
|
5. **Tageslog fuehren** — was wurde getan, was ist offen.
|
||||||
|
|
||||||
|
## Dateien an Stefan zurueckgeben — KRITISCH
|
||||||
|
|
||||||
|
**Das ist die EINZIGE Methode wie Stefan an Dateien rankommt. Ohne
|
||||||
|
diese Schritte sieht und bekommt er die Datei NICHT.**
|
||||||
|
|
||||||
|
### Regel 1 — Speicher-Ort
|
||||||
|
|
||||||
|
Dateien fuer Stefan AUSSCHLIESSLICH unter `/shared/uploads/` speichern.
|
||||||
|
|
||||||
|
NIEMALS in:
|
||||||
|
- `/home/node/.openclaw/workspace/...` (das ist NUR dein Arbeitsverzeichnis,
|
||||||
|
Stefan hat keinen Zugriff darauf)
|
||||||
|
- `/tmp/...`, `/root/...`, oder sonst irgendwo
|
||||||
|
|
||||||
|
Dateinamen mit `aria_`-Prefix damit Cleanup-Scripts sie zuordnen koennen:
|
||||||
|
|
||||||
|
```
|
||||||
|
/shared/uploads/aria_<beschreibender_name>.<ext>
|
||||||
|
```
|
||||||
|
|
||||||
|
Beispiele: `aria_termin_zusage.pdf`, `aria_einkaufsliste.md`,
|
||||||
|
`aria_logs_2026-05-10.zip`.
|
||||||
|
|
||||||
|
### Regel 2 — Marker im Antworttext
|
||||||
|
|
||||||
|
Am Ende deiner Antwort EINMALIG den Marker setzen:
|
||||||
|
|
||||||
|
```
|
||||||
|
[FILE: /shared/uploads/aria_<name>.<ext>]
|
||||||
|
```
|
||||||
|
|
||||||
|
OHNE diesen Marker erscheint die Datei NICHT in der App / Diagnostic.
|
||||||
|
|
||||||
|
Mehrere Dateien: mehrere `[FILE: ...]`-Marker am Ende, jeder in
|
||||||
|
eigener Zeile.
|
||||||
|
|
||||||
|
### Beispiel — kompletter Workflow
|
||||||
|
|
||||||
|
User: "Schreib mir ein Lasagne-Rezept als md-Datei"
|
||||||
|
|
||||||
|
1. Du schreibst die Datei: `Write` Tool mit Pfad `/shared/uploads/aria_lasagne.md`
|
||||||
|
2. Antwort an Stefan:
|
||||||
|
|
||||||
|
```
|
||||||
|
Hier dein Lasagne-Rezept — Ragu am Vortag, echter Parmesan,
|
||||||
|
Ruhezeit nicht skippen. Beim Schichten Bechamel auf jede Lage.
|
||||||
|
|
||||||
|
[FILE: /shared/uploads/aria_lasagne.md]
|
||||||
|
```
|
||||||
|
|
||||||
|
Der Marker wird automatisch aus dem sichtbaren Text entfernt und
|
||||||
|
als Anhang-Bubble angezeigt. Stefan tippt drauf → oeffnet die Datei.
|
||||||
|
|
||||||
## Stimme
|
## Stimme
|
||||||
|
|
||||||
| Stimme | Modell | Wann |
|
TTS laeuft ueber F5-TTS (Voice Cloning, Gaming-PC). Stefan kann eigene
|
||||||
|--------|--------|------|
|
Stimmen aus Audio-Samples klonen (Diagnostic → Stimmen → Stimme klonen)
|
||||||
| **Ramona** (weiblich) | `de_DE-ramona-low` | Alltag, Antworten, Gespraeche (Standard) |
|
und in App + Diagnostic auswaehlen.
|
||||||
| **Thorsten** (maennlich, tief) | `de_DE-thorsten-high` | Epische Momente, Alarme, besondere Ereignisse |
|
|
||||||
|
|
||||||
**Thorsten spricht bei:**
|
|
||||||
- Build erfolgreich deployed
|
|
||||||
- Ticket geloest / Aufgabe abgeschlossen
|
|
||||||
- Kritischer Alarm (Server down, Sicherheitswarnung)
|
|
||||||
- Wenn Stefan sagt "So soll es sein"
|
|
||||||
@@ -78,12 +78,101 @@ Wenn ein Tool nicht klappt, probiere die Alternative. Niemals sagen "ich habe ke
|
|||||||
- Destruktive Operationen (Dateien loeschen, Datenbanken droppen)
|
- Destruktive Operationen (Dateien loeschen, Datenbanken droppen)
|
||||||
- Push auf main
|
- Push auf main
|
||||||
|
|
||||||
|
## Dateien an Stefan zurueckgeben — KRITISCH
|
||||||
|
|
||||||
|
**Das ist die EINZIGE Methode wie Stefan an Dateien rankommt. Ohne diese
|
||||||
|
Schritte sieht und bekommt er die Datei NICHT.**
|
||||||
|
|
||||||
|
### Regel 1 — Speicher-Ort
|
||||||
|
|
||||||
|
Dateien fuer Stefan AUSSCHLIESSLICH unter `/shared/uploads/` speichern.
|
||||||
|
|
||||||
|
NIEMALS in:
|
||||||
|
- `/home/node/.openclaw/workspace/...` (NUR dein Arbeitsverzeichnis,
|
||||||
|
Stefan hat keinen Zugriff)
|
||||||
|
- `/tmp/...`, `/root/...`, oder sonst irgendwo
|
||||||
|
|
||||||
|
Dateinamen mit `aria_`-Prefix:
|
||||||
|
|
||||||
|
```
|
||||||
|
/shared/uploads/aria_<beschreibender_name>.<ext>
|
||||||
|
```
|
||||||
|
|
||||||
|
Beispiele: `aria_termin_zusage.pdf`, `aria_einkaufsliste.md`,
|
||||||
|
`aria_logs_2026-05-10.zip`.
|
||||||
|
|
||||||
|
### Regel 2 — Marker im Antworttext
|
||||||
|
|
||||||
|
Am Ende deiner Antwort EINMALIG den Marker setzen:
|
||||||
|
|
||||||
|
```
|
||||||
|
[FILE: /shared/uploads/aria_<name>.<ext>]
|
||||||
|
```
|
||||||
|
|
||||||
|
OHNE diesen Marker erscheint die Datei NICHT in der App / Diagnostic.
|
||||||
|
|
||||||
|
Mehrere Dateien: mehrere `[FILE: ...]`-Marker am Ende, jeder in
|
||||||
|
eigener Zeile.
|
||||||
|
|
||||||
|
**WICHTIG — Datei MUSS existieren bevor du den Marker setzt.**
|
||||||
|
Marker fuer nicht-existente Pfade werden silent gefiltert + Stefan
|
||||||
|
bekommt einen Hinweis dass du eine Datei versprochen aber nicht
|
||||||
|
erstellt hast. Wenn du z.B. eine MIDI-Datei nicht generieren kannst,
|
||||||
|
sag das offen statt nur den Marker zu setzen. Verifiziere zur Not
|
||||||
|
mit `Bash` + `ls -la /shared/uploads/aria_<name>.<ext>` dass die
|
||||||
|
Datei wirklich da ist.
|
||||||
|
|
||||||
|
### Beispiel — kompletter Workflow
|
||||||
|
|
||||||
|
User: "Schreib mir ein Lasagne-Rezept als md-Datei"
|
||||||
|
|
||||||
|
1. Du schreibst: `Write` Tool mit Pfad `/shared/uploads/aria_lasagne.md`
|
||||||
|
2. Antwort an Stefan:
|
||||||
|
|
||||||
|
```
|
||||||
|
Hier dein Lasagne-Rezept — Ragu am Vortag, echter Parmesan,
|
||||||
|
Ruhezeit nicht skippen. Beim Schichten Bechamel auf jede Lage.
|
||||||
|
|
||||||
|
[FILE: /shared/uploads/aria_lasagne.md]
|
||||||
|
```
|
||||||
|
|
||||||
|
Der Marker wird automatisch aus dem sichtbaren Text entfernt und
|
||||||
|
als Anhang-Bubble angezeigt. Stefan tippt drauf → oeffnet die Datei
|
||||||
|
im jeweiligen Standard-Programm.
|
||||||
|
|
||||||
|
### Externe Bilder/Dateien — IMMER runterladen, nicht nur verlinken
|
||||||
|
|
||||||
|
Wenn Stefan ein Bild oder eine Datei aus dem Netz haben will (Wikipedia,
|
||||||
|
Wiki Commons, ein Beispiel-PDF, etc.):
|
||||||
|
|
||||||
|
NICHT NUR die URL in die Antwort schreiben — das Bild ist dann nur
|
||||||
|
solange sichtbar wie der externe Server lebt.
|
||||||
|
|
||||||
|
STATTDESSEN:
|
||||||
|
1. Mit `Bash` + curl/wget herunterladen nach `/shared/uploads/aria_<name>.<ext>`
|
||||||
|
2. Mit `[FILE: ...]`-Marker als Anhang ausspielen
|
||||||
|
|
||||||
|
Beispiel — User: "Zeig mir ein Bild von Micky Maus"
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl -sL "https://upload.wikimedia.org/wikipedia/commons/7/7f/Mickey_Mouse.svg" \
|
||||||
|
-o /shared/uploads/aria_mickey_mouse.svg
|
||||||
|
```
|
||||||
|
|
||||||
|
Antwort:
|
||||||
|
```
|
||||||
|
Hier Micky Maus — offizielles SVG von Wikimedia Commons (Public Domain).
|
||||||
|
|
||||||
|
[FILE: /shared/uploads/aria_mickey_mouse.svg]
|
||||||
|
```
|
||||||
|
|
||||||
|
So bleibt das Bild permanent im Chat-Verlauf, auch wenn die Wiki-URL
|
||||||
|
spaeter offline geht oder umgezogen wird.
|
||||||
|
|
||||||
## Stimme
|
## Stimme
|
||||||
|
|
||||||
| Stimme | Modell | Wann |
|
TTS laeuft ueber F5-TTS auf der Gamebox (Voice Cloning). Stefan kann
|
||||||
|--------|--------|------|
|
eigene Stimmen aus Audio-Samples klonen und in App/Diagnostic auswaehlen.
|
||||||
| **Ramona** (weiblich) | `de_DE-ramona-low` | Alltag, Antworten, Gespraeche (Standard) |
|
|
||||||
| **Thorsten** (maennlich, tief) | `de_DE-thorsten-high` | Epische Momente, Alarme, besondere Ereignisse |
|
|
||||||
|
|
||||||
## Gedaechtnis (Memory)
|
## Gedaechtnis (Memory)
|
||||||
|
|
||||||
@@ -147,4 +236,4 @@ Danach den Eintrag in `memory/MEMORY.md` (Index) verlinken.
|
|||||||
### Netzwerk
|
### Netzwerk
|
||||||
- **aria-net:** Internes Docker-Netz (proxy, aria-core)
|
- **aria-net:** Internes Docker-Netz (proxy, aria-core)
|
||||||
- **RVS:** Rendezvous-Server im Rechenzentrum — Relay fuer die Android-App
|
- **RVS:** Rendezvous-Server im Rechenzentrum — Relay fuer die Android-App
|
||||||
- **Bridge:** Voice Bridge (Whisper STT + Piper TTS) — teilt Netzwerk mit aria-core
|
- **Bridge:** Voice Bridge (orchestriert STT/TTS via Gamebox-Bridges) — teilt Netzwerk mit aria-core
|
||||||
@@ -1,10 +1,10 @@
|
|||||||
# Stefan — Benutzer-Praeferenzen
|
# <Username> — Benutzer-Praeferenzen
|
||||||
|
|
||||||
## Allgemein
|
## Allgemein
|
||||||
|
|
||||||
- **Sprache:** Deutsch
|
- **Sprache:** <z.B. Deutsch>
|
||||||
- **Kommunikation:** Direkt, kein Bullshit, Humor willkommen
|
- **Kommunikation:** <z.B. Direkt, kein Bullshit, Humor willkommen>
|
||||||
- **Rolle:** Chef, Auftraggeber, Entwickler bei HackerSoft Oldenburg
|
- **Rolle:** <z.B. Chef, Auftraggeber, Entwickler bei XYZ>
|
||||||
|
|
||||||
## Bestaetigung erforderlich fuer
|
## Bestaetigung erforderlich fuer
|
||||||
|
|
||||||
@@ -12,7 +12,6 @@
|
|||||||
- Push auf main
|
- Push auf main
|
||||||
- Aenderungen an Kundensystemen
|
- Aenderungen an Kundensystemen
|
||||||
- Server-Befehle die nicht rueckgaengig gemacht werden koennen
|
- Server-Befehle die nicht rueckgaengig gemacht werden koennen
|
||||||
- Windows neu installieren (erst Daten sichern!)
|
|
||||||
|
|
||||||
## Autonomes Arbeiten OK fuer
|
## Autonomes Arbeiten OK fuer
|
||||||
|
|
||||||
@@ -28,8 +27,10 @@
|
|||||||
|
|
||||||
| Tool | Zweck |
|
| Tool | Zweck |
|
||||||
|------|-------|
|
|------|-------|
|
||||||
| **Proxmox** | VM-Infrastruktur (ARIAs Zuhause) |
|
| **<Beispiel-Tool>** | <Zweck> |
|
||||||
| **Gitea** | Code-Hosting (gitea.hackersoft.de) |
|
|
||||||
| **OpenCRM** | Kundenverwaltung |
|
<!--
|
||||||
| **STARFACE** | Telefonie |
|
Diese Datei ist eine Vorlage. Lokal als USER.md kopieren und mit
|
||||||
| **RustDesk** | Remote IT-Support bei Kunden |
|
eigenen Praeferenzen + Tool-Stack fuellen. USER.md selbst ist via
|
||||||
|
.gitignore vom Repo ausgeschlossen.
|
||||||
|
-->
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
# Bridge → aria-core (OpenClaw Gateway)
|
|
||||||
# Bridge teilt Netzwerk mit aria-core (network_mode: service:aria)
|
|
||||||
# → localhost ist aria-core
|
|
||||||
ARIA_CORE_WS=ws://127.0.0.1:18789
|
|
||||||
|
|
||||||
# Wake-Word
|
|
||||||
WAKE_WORD=aria
|
|
||||||
|
|
||||||
# Whisper STT — wird zur Laufzeit in der Diagnostic (Sektion "Whisper") umgeschaltet
|
|
||||||
# und in /shared/config/voice_config.json gespeichert. Der Wert hier ist nur der
|
|
||||||
# Initial-Default beim ersten Start.
|
|
||||||
# Optionen: tiny | base | small | medium | large-v3
|
|
||||||
WHISPER_MODEL=medium
|
|
||||||
WHISPER_LANGUAGE=de
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
{
|
|
||||||
"version": 1,
|
|
||||||
"profiles": {
|
|
||||||
"openai-proxy": {
|
|
||||||
"provider": "openai",
|
|
||||||
"default": true,
|
|
||||||
"apiKey": "not-needed",
|
|
||||||
"baseUrl": "http://proxy:3456/v1"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
# OpenClaw (aria-core) Konfiguration
|
|
||||||
# Diese Datei wird als /workspace/.env in den Container gemountet
|
|
||||||
#
|
|
||||||
# WICHTIG: ANTHROPIC_API_KEY und ANTHROPIC_BASE_URL absichtlich NICHT gesetzt!
|
|
||||||
# OpenClaw wuerde sonst die echte Anthropic API direkt anrufen (401 weil kein API Key).
|
|
||||||
# Stattdessen nur den OpenAI-kompatiblen Proxy nutzen.
|
|
||||||
@@ -1,137 +0,0 @@
|
|||||||
# OpenClaw Tool-Permissions — Stand 2026-03-15
|
|
||||||
|
|
||||||
## Das Problem (GELÖST)
|
|
||||||
|
|
||||||
ARIA hat ZWEI Tool-Systeme gleichzeitig: Claude Code Tools UND OpenClaw-native Tools.
|
|
||||||
Das Model hat aber nur Zugriff auf **Claude Code Tools** (über den Proxy), nicht auf OpenClaw-native Tools.
|
|
||||||
|
|
||||||
### Root Cause: DREI Probleme gleichzeitig
|
|
||||||
|
|
||||||
```
|
|
||||||
OpenClaw (aria-core) → API Request → claude-max-api-proxy (aria-proxy) → Claude Code CLI (--print Mode)
|
|
||||||
↓
|
|
||||||
Tools: WebFetch, Bash, etc. (Claude Code)
|
|
||||||
NICHT: web_fetch, exec (OpenClaw-nativ)
|
|
||||||
```
|
|
||||||
|
|
||||||
**Problem 1: Proxy benutzt `--print` Modus**
|
|
||||||
- `claude-max-api-proxy` ruft Claude Code CLI mit `--print --output-format stream-json` auf
|
|
||||||
- Der Prompt wird als einziger String übergeben, keine Tool-Definitionen von OpenClaw
|
|
||||||
- Das Model sieht NUR Claude Code's eingebaute Tools (WebFetch, Bash, etc.)
|
|
||||||
- OpenClaw-native Tools (web_fetch, exec) existieren NUR auf Gateway-Ebene, kommen nie beim Model an
|
|
||||||
|
|
||||||
**Problem 2: BOOTSTRAP.md hat die falschen Tools angewiesen**
|
|
||||||
- BOOTSTRAP.md sagte: "NIEMALS WebFetch benutzen, stattdessen web_fetch"
|
|
||||||
- Aber web_fetch existiert nicht im Claude Code CLI Kontext
|
|
||||||
- Und WebFetch war das einzige Tool das funktioniert hätte
|
|
||||||
- → Model hatte keine Tools die es benutzen "durfte"
|
|
||||||
|
|
||||||
**Problem 3: settings.json im Proxy war leer**
|
|
||||||
- `/root/.claude/settings.json` enthielt nur `{}` (keine Permissions)
|
|
||||||
- Claude Code CLI im headless-Modus kann keine Tool-Genehmigungen erteilen
|
|
||||||
- → Selbst wenn das Model WebFetch benutzen wollte, war es nicht vorab genehmigt
|
|
||||||
|
|
||||||
## Die Lösung
|
|
||||||
|
|
||||||
### Fix 1: BOOTSTRAP.md + AGENT.md umgeschrieben
|
|
||||||
|
|
||||||
**Vorher (FALSCH):**
|
|
||||||
- "NIEMALS WebFetch benutzen — hat Permission-Probleme"
|
|
||||||
- "Benutze web_fetch (OpenClaw-nativ)"
|
|
||||||
|
|
||||||
**Nachher (KORREKT):**
|
|
||||||
- "WebFetch — URLs abrufen, Webseiten lesen, APIs aufrufen, Wetter abfragen"
|
|
||||||
- "Bash — Shell-Befehle ausfuehren (curl, ssh, docker, etc.)"
|
|
||||||
- "Niemals sagen 'ich habe keinen Zugriff' — du hast Zugriff auf alles"
|
|
||||||
|
|
||||||
### Fix 2: `CLAUDE_CODE_BUBBLEWRAP=1` + `--dangerously-skip-permissions`
|
|
||||||
|
|
||||||
**Der Schlüssel-Fix.** Zwei Zeilen in `docker-compose.yml`:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# 1. sed-Patch: --dangerously-skip-permissions in manager.js einfügen
|
|
||||||
sed -i 's/"--no-session-persistence",/"--no-session-persistence","--dangerously-skip-permissions",/' $$DIST/subprocess/manager.js &&
|
|
||||||
|
|
||||||
# 2. Environment-Variable: Root-Check umgehen
|
|
||||||
environment:
|
|
||||||
- CLAUDE_CODE_BUBBLEWRAP=1
|
|
||||||
```
|
|
||||||
|
|
||||||
**Warum beides nötig:**
|
|
||||||
- `--dangerously-skip-permissions` umgeht alle Tool-Permission-Checks in Claude Code CLI
|
|
||||||
- Aber: Claude Code CLI blockiert dieses Flag wenn es als root läuft
|
|
||||||
- `CLAUDE_CODE_BUBBLEWRAP=1` überspringt den Root-Check (gefunden im minifizierten `cli.js`)
|
|
||||||
- Proxy-Container (`node:22-alpine`) läuft als root → ohne BUBBLEWRAP geht's nicht
|
|
||||||
|
|
||||||
**Resultierende CLI-Argumente:**
|
|
||||||
```
|
|
||||||
claude --print --output-format stream-json --verbose --include-partial-messages \
|
|
||||||
--model opus --no-session-persistence --dangerously-skip-permissions "prompt"
|
|
||||||
```
|
|
||||||
|
|
||||||
## Wie der Proxy intern funktioniert
|
|
||||||
|
|
||||||
```
|
|
||||||
openai-to-cli.js: OpenAI Messages → einzelner Prompt-String
|
|
||||||
system → <system>...</system>
|
|
||||||
user → direkt
|
|
||||||
assistant → <previous_response>...</previous_response>
|
|
||||||
|
|
||||||
subprocess/manager.js: Spawnt `claude --print ... --dangerously-skip-permissions "{prompt}"`
|
|
||||||
|
|
||||||
cli-to-openai.js: Claude CLI JSON-Stream → OpenAI Chat Completion Chunks
|
|
||||||
```
|
|
||||||
|
|
||||||
Der Proxy leitet KEINE Tool-Definitionen von OpenClaw weiter.
|
|
||||||
Tool-Calls passieren INTERN in der Claude Code CLI und sind für OpenClaw transparent.
|
|
||||||
|
|
||||||
## Permission-Architektur
|
|
||||||
|
|
||||||
**Granulare Tool-Kontrolle ist NICHT möglich.** Es ist Alles-oder-Nichts:
|
|
||||||
- `--dangerously-skip-permissions` AN → ARIA kann alle Claude Code Tools benutzen
|
|
||||||
- `--dangerously-skip-permissions` AUS → ARIA kann keine Tools benutzen
|
|
||||||
|
|
||||||
OpenClaw's eigene Permissions (`tools.allow/deny` in `openclaw.json`) haben **keinen Effekt** auf die
|
|
||||||
Claude Code Tools — die laufen komplett auf Proxy-Seite.
|
|
||||||
|
|
||||||
## Was NICHT funktioniert hat (17 Versuche)
|
|
||||||
|
|
||||||
1. **settings.json in aria-core** — OpenClaw benutzt NICHT Claude Code's settings.json
|
|
||||||
2. **tools.allow mit PascalCase** (WebFetch, Grep) — OpenClaw kennt diese Namen nicht
|
|
||||||
3. **tools.allow mit snake_case** (web_fetch) — Nur exec, read, write, edit erkannt
|
|
||||||
4. **tools.allow mit Wildcard** `["*"]` — Hat nicht geholfen
|
|
||||||
5. **tools.allow leer + tools.profile: "full"** — Nur ohne andere Fehler
|
|
||||||
6. **System-Prompt Anweisung allein** — Reicht nicht wenn Tools blockiert sind
|
|
||||||
7. **exec-approvals Wildcard allein** — Reicht nicht bei Config-Validation-Error
|
|
||||||
8. **`openclaw config unset tools.exec.ask`** — CLI kennt den Pfad nicht
|
|
||||||
9. **BOOTSTRAP.md mit OpenClaw-Tool-Namen** — Tools existieren nur auf Gateway-Ebene
|
|
||||||
10. **settings.json im Proxy ohne BOOTSTRAP.md Fix** — BOOTSTRAP.md verbot die Tools
|
|
||||||
11. **tools.byProvider.proxy.profile full** — Kein Effekt
|
|
||||||
12. **settings.json + BOOTSTRAP.md ohne --dangerously-skip-permissions** — `--print` ignoriert settings.json
|
|
||||||
13. **Manuelles `docker exec sed`** — Wird bei jedem Restart überschrieben
|
|
||||||
14. **`--dangerously-skip-permissions` ohne BUBBLEWRAP** — Root-Check blockiert
|
|
||||||
15. **`--allowedTools`** — Variadisches Argument frisst den Prompt
|
|
||||||
16. **`--permission-mode bypassPermissions`** — Gleicher Root-Check
|
|
||||||
17. **Non-Root User (`su node`)** — Auth-Pfad-Probleme, Credentials unerreichbar
|
|
||||||
|
|
||||||
## Wichtige Pfade
|
|
||||||
|
|
||||||
### aria-core (OpenClaw)
|
|
||||||
- `/home/node/.openclaw/openclaw.json` — OpenClaw Haupt-Config
|
|
||||||
- `/home/node/.openclaw/exec-approvals.json` — Exec Approvals
|
|
||||||
- `/tmp/openclaw/openclaw-YYYY-MM-DD.log` — Tages-Log
|
|
||||||
|
|
||||||
### aria-proxy (Claude Code CLI)
|
|
||||||
- `/root/.claude/.credentials.json` — Auth Credentials (NICHT in /root/.config/claude/)
|
|
||||||
- `/usr/local/lib/node_modules/claude-max-api-proxy/dist/` — Proxy Source
|
|
||||||
- `/usr/local/lib/node_modules/@anthropic-ai/claude-code/cli.js` — Claude Code CLI (enthält Root-Check)
|
|
||||||
|
|
||||||
## OpenClaw CLI Referenz
|
|
||||||
|
|
||||||
```bash
|
|
||||||
openclaw config get/set/unset <path> # Config verwalten
|
|
||||||
openclaw approvals get # Exec-Approvals anzeigen
|
|
||||||
openclaw approvals allowlist add # Exec-Pattern freigeben
|
|
||||||
openclaw doctor [--fix] # Health Check
|
|
||||||
openclaw gateway status # Gateway-Status
|
|
||||||
```
|
|
||||||
+24
-113
@@ -1,104 +1,23 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# ════════════════════════════════════════════════
|
# ════════════════════════════════════════════════
|
||||||
# ARIA — Ersteinrichtung nach docker compose up
|
# ARIA — Ersteinrichtung nach docker compose up
|
||||||
# Einmalig ausfuehren, danach persistiert alles.
|
#
|
||||||
|
# OpenClaw (aria-core) ist abgerissen — das Setup macht jetzt
|
||||||
|
# nur noch den SSH-Key fuer den Zugriff auf die VM (aria-wohnung).
|
||||||
|
# Brain + Proxy teilen sich denselben Key, beide haben aria-data/ssh
|
||||||
|
# als Volume gemountet.
|
||||||
# ════════════════════════════════════════════════
|
# ════════════════════════════════════════════════
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
echo "=== ARIA Setup ==="
|
|
||||||
echo ""
|
|
||||||
|
|
||||||
# Warten bis aria-core laeuft
|
|
||||||
echo "[1/7] Warte auf aria-core..."
|
|
||||||
until docker inspect -f '{{.State.Running}}' aria-core 2>/dev/null | grep -q true; do
|
|
||||||
sleep 2
|
|
||||||
echo " ... warte..."
|
|
||||||
done
|
|
||||||
echo " aria-core laeuft."
|
|
||||||
|
|
||||||
# Permissions fixen — Docker-Volumes gehoeren root, OpenClaw laeuft als node
|
|
||||||
echo ""
|
|
||||||
echo "[2/7] Fixe Permissions auf /home/node/.openclaw und /home/node/.claude..."
|
|
||||||
docker exec -u root aria-core chown -R node:node /home/node/.openclaw
|
|
||||||
docker exec -u root aria-core chown -R node:node /home/node/.claude 2>/dev/null || true
|
|
||||||
docker exec -u root aria-core chmod 700 /home/node/.openclaw
|
|
||||||
echo " Permissions OK."
|
|
||||||
|
|
||||||
# OpenClaw Config schreiben — Custom Provider fuer claude-max-api-proxy
|
|
||||||
echo ""
|
|
||||||
echo "[3/7] Schreibe openclaw.json (Proxy-Provider + Model + Tools)..."
|
|
||||||
docker exec aria-core sh -c 'cat > /home/node/.openclaw/openclaw.json << '"'"'INNEREOF'"'"'
|
|
||||||
{
|
|
||||||
"meta": {
|
|
||||||
"lastTouchedVersion": "2026.3.8"
|
|
||||||
},
|
|
||||||
"gateway": {
|
|
||||||
"mode": "local"
|
|
||||||
},
|
|
||||||
"agents": {
|
|
||||||
"defaults": {
|
|
||||||
"model": {
|
|
||||||
"primary": "proxy/claude-sonnet-4"
|
|
||||||
},
|
|
||||||
"compaction": {
|
|
||||||
"mode": "safeguard"
|
|
||||||
},
|
|
||||||
"timeoutSeconds": 900,
|
|
||||||
"maxConcurrent": 4,
|
|
||||||
"subagents": {
|
|
||||||
"maxConcurrent": 8
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"models": {
|
|
||||||
"providers": {
|
|
||||||
"proxy": {
|
|
||||||
"api": "openai-completions",
|
|
||||||
"baseUrl": "http://proxy:3456/v1",
|
|
||||||
"apiKey": "not-needed",
|
|
||||||
"models": [
|
|
||||||
{ "id": "claude-sonnet-4", "name": "claude-sonnet-4" },
|
|
||||||
{ "id": "claude-opus-4", "name": "claude-opus-4" }
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"tools": {
|
|
||||||
"profile": "full",
|
|
||||||
"web": {
|
|
||||||
"fetch": {
|
|
||||||
"enabled": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"exec": {
|
|
||||||
"host": "gateway"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"messages": {
|
|
||||||
"ackReactionScope": "all"
|
|
||||||
},
|
|
||||||
"commands": {
|
|
||||||
"native": "auto",
|
|
||||||
"nativeSkills": "auto",
|
|
||||||
"restart": true,
|
|
||||||
"ownerDisplay": "raw"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
INNEREOF'
|
|
||||||
echo " Config geschrieben."
|
|
||||||
|
|
||||||
# Exec-Approvals Wildcard — erlaubt Tool-Ausfuehrung im headless-Modus
|
|
||||||
echo ""
|
|
||||||
echo "[4/7] Setze exec-approvals Wildcard..."
|
|
||||||
docker exec aria-core openclaw approvals allowlist add --agent "*" "*" 2>/dev/null || true
|
|
||||||
echo " Approvals gesetzt."
|
|
||||||
|
|
||||||
# SSH-Key generieren fuer VM-Zugriff
|
|
||||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||||
SSH_DIR="$SCRIPT_DIR/aria-data/ssh"
|
SSH_DIR="$SCRIPT_DIR/aria-data/ssh"
|
||||||
echo ""
|
|
||||||
echo "[5/7] SSH-Key fuer VM-Zugriff..."
|
echo "=== ARIA Setup ==="
|
||||||
|
|
||||||
|
mkdir -p "$SSH_DIR"
|
||||||
|
|
||||||
if [ ! -f "$SSH_DIR/id_ed25519" ]; then
|
if [ ! -f "$SSH_DIR/id_ed25519" ]; then
|
||||||
|
echo "Generiere SSH-Key fuer aria-wohnung..."
|
||||||
ssh-keygen -t ed25519 -f "$SSH_DIR/id_ed25519" -N "" -C "aria@aria-wohnung"
|
ssh-keygen -t ed25519 -f "$SSH_DIR/id_ed25519" -N "" -C "aria@aria-wohnung"
|
||||||
cat > "$SSH_DIR/config" << 'SSHEOF'
|
cat > "$SSH_DIR/config" << 'SSHEOF'
|
||||||
Host aria-wohnung
|
Host aria-wohnung
|
||||||
@@ -108,34 +27,26 @@ Host aria-wohnung
|
|||||||
StrictHostKeyChecking accept-new
|
StrictHostKeyChecking accept-new
|
||||||
SSHEOF
|
SSHEOF
|
||||||
chmod 600 "$SSH_DIR/id_ed25519"
|
chmod 600 "$SSH_DIR/id_ed25519"
|
||||||
chmod 644 "$SSH_DIR/id_ed25519.pub"
|
chmod 644 "$SSH_DIR/id_ed25519.pub" "$SSH_DIR/config"
|
||||||
chmod 644 "$SSH_DIR/config"
|
|
||||||
echo " Key generiert."
|
# Public Key direkt in /root/.ssh/authorized_keys eintragen
|
||||||
# Public Key direkt in root's authorized_keys eintragen (Script laeuft als root auf der VM)
|
# (Script laeuft als root auf der VM aria-wohnung)
|
||||||
|
if [ -w /root/.ssh ] || [ -w /root ]; then
|
||||||
mkdir -p /root/.ssh
|
mkdir -p /root/.ssh
|
||||||
chmod 700 /root/.ssh
|
chmod 700 /root/.ssh
|
||||||
cat "$SSH_DIR/id_ed25519.pub" >> /root/.ssh/authorized_keys
|
cat "$SSH_DIR/id_ed25519.pub" >> /root/.ssh/authorized_keys
|
||||||
chmod 600 /root/.ssh/authorized_keys
|
chmod 600 /root/.ssh/authorized_keys
|
||||||
echo " Public Key in /root/.ssh/authorized_keys eingetragen."
|
echo " Public Key in /root/.ssh/authorized_keys eingetragen."
|
||||||
else
|
else
|
||||||
echo " Key existiert bereits."
|
echo " Hinweis: konnte /root/.ssh/authorized_keys nicht schreiben."
|
||||||
|
echo " Pubkey manuell eintragen:"
|
||||||
|
cat "$SSH_DIR/id_ed25519.pub"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "SSH-Key existiert bereits — uebersprungen."
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Permissions im Container fixen
|
|
||||||
echo ""
|
|
||||||
echo "[6/7] Fixe SSH-Permissions..."
|
|
||||||
docker exec -u root aria-core chown -R node:node /home/node/.ssh 2>/dev/null || true
|
|
||||||
|
|
||||||
# Neustart damit Gateway die Config laedt
|
|
||||||
echo ""
|
|
||||||
echo "[7/7] Starte aria-core neu..."
|
|
||||||
docker restart aria-core
|
|
||||||
|
|
||||||
echo ""
|
|
||||||
echo "=== Setup fertig ==="
|
echo "=== Setup fertig ==="
|
||||||
echo ""
|
echo ""
|
||||||
echo "Teste mit: docker logs aria-core --tail 20"
|
echo "Naechster Schritt: docker compose up -d"
|
||||||
echo "Erwartete Zeile: 'agent model: proxy/claude-sonnet-4'"
|
echo "Test: docker exec aria-brain ssh aria-wohnung hostname"
|
||||||
echo ""
|
|
||||||
echo "SSH-Test: docker exec aria-core ssh aria-wohnung hostname"
|
|
||||||
echo "Tool-Test: Neue Session anlegen, dann 'Wie wird das Wetter in Bremen?' fragen"
|
|
||||||
|
|||||||
+640
-74
@@ -1,17 +1,13 @@
|
|||||||
"""
|
"""
|
||||||
ARIA Voice Bridge — Hauptmodul.
|
ARIA Voice Bridge — Hauptmodul.
|
||||||
|
|
||||||
Verbindet die Android App (via RVS) mit ARIA-Core und bietet
|
Verbindet die Android App (via RVS) mit ARIA-Core. Spracheingabe laeuft
|
||||||
lokale Spracheingabe (Wake-Word + Whisper STT) und Sprachausgabe (Piper TTS).
|
ueber die whisper-bridge (Gamebox, faster-whisper auf CUDA), Sprachausgabe
|
||||||
|
ueber die f5tts-bridge (Voice Cloning, satzweises PCM-Streaming).
|
||||||
|
|
||||||
Nachrichtenfluss:
|
Nachrichtenfluss:
|
||||||
App → RVS → Bridge → aria-core
|
App → RVS → Bridge → aria-core
|
||||||
aria-core → Bridge → RVS → App
|
aria-core → Bridge → f5tts-bridge → PCM → RVS → App
|
||||||
→ Lautsprecher (TTS)
|
|
||||||
|
|
||||||
Stimmen:
|
|
||||||
- Ramona (de_DE-ramona-low) — Alltag, Gespraeche
|
|
||||||
- Thorsten (de_DE-thorsten-high) — epische Momente, Alarme
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
@@ -20,9 +16,12 @@ import asyncio
|
|||||||
import base64
|
import base64
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
|
import mimetypes
|
||||||
import os
|
import os
|
||||||
|
import re
|
||||||
import signal
|
import signal
|
||||||
import ssl
|
import ssl
|
||||||
|
import time
|
||||||
import sys
|
import sys
|
||||||
import tempfile
|
import tempfile
|
||||||
import uuid
|
import uuid
|
||||||
@@ -50,7 +49,6 @@ logger = logging.getLogger("aria-bridge")
|
|||||||
|
|
||||||
# ── Konfiguration ───────────────────────────────────────────
|
# ── Konfiguration ───────────────────────────────────────────
|
||||||
|
|
||||||
CONFIG_PATH = Path("/config/aria.env")
|
|
||||||
VOICES_DIR = Path("/voices")
|
VOICES_DIR = Path("/voices")
|
||||||
CORE_WS_URL = os.getenv("ARIA_CORE_WS", "ws://127.0.0.1:18789")
|
CORE_WS_URL = os.getenv("ARIA_CORE_WS", "ws://127.0.0.1:18789")
|
||||||
CORE_AUTH_TOKEN = os.getenv("ARIA_AUTH_TOKEN", "") # OpenClaw Gateway Token
|
CORE_AUTH_TOKEN = os.getenv("ARIA_AUTH_TOKEN", "") # OpenClaw Gateway Token
|
||||||
@@ -70,38 +68,22 @@ BLOCK_SIZE = 1280 # 80ms bei 16kHz — gut fuer Wake-Word-Erkennung
|
|||||||
RECORD_SECONDS = 8 # Max. Aufnahmedauer nach Wake-Word
|
RECORD_SECONDS = 8 # Max. Aufnahmedauer nach Wake-Word
|
||||||
|
|
||||||
def load_config() -> dict[str, str]:
|
def load_config() -> dict[str, str]:
|
||||||
"""Laedt Konfiguration.
|
"""Laedt Konfiguration ausschliesslich aus /shared/config/runtime.json
|
||||||
|
(zentral gepflegt ueber Diagnostic UI). Tokens + RVS-Settings kommen
|
||||||
Reihenfolge (hoechste Prioritaet zuletzt):
|
via ENV (siehe docker-compose).
|
||||||
1. /config/aria.env (bind-mount)
|
|
||||||
2. /shared/config/runtime.json (zentral gepflegt ueber Diagnostic UI)
|
|
||||||
|
|
||||||
Werte aus runtime.json ueberschreiben die env-Datei.
|
|
||||||
"""
|
"""
|
||||||
config: dict[str, str] = {}
|
config: dict[str, str] = {}
|
||||||
if CONFIG_PATH.exists():
|
|
||||||
for line in CONFIG_PATH.read_text().splitlines():
|
|
||||||
line = line.strip()
|
|
||||||
if not line or line.startswith("#"):
|
|
||||||
continue
|
|
||||||
if "=" in line:
|
|
||||||
key, _, value = line.partition("=")
|
|
||||||
config[key.strip()] = value.strip()
|
|
||||||
logger.info("Konfiguration geladen aus %s", CONFIG_PATH)
|
|
||||||
else:
|
|
||||||
logger.warning("Keine Konfiguration gefunden: %s", CONFIG_PATH)
|
|
||||||
|
|
||||||
# Runtime-Overrides aus zentralem Shared-Volume (Diagnostic UI)
|
|
||||||
runtime_path = Path("/shared/config/runtime.json")
|
runtime_path = Path("/shared/config/runtime.json")
|
||||||
if runtime_path.exists():
|
if runtime_path.exists():
|
||||||
try:
|
try:
|
||||||
runtime = json.loads(runtime_path.read_text())
|
runtime = json.loads(runtime_path.read_text())
|
||||||
overrides = {k: str(v) for k, v in runtime.items() if v not in (None, "")}
|
config = {k: str(v) for k, v in runtime.items() if v not in (None, "")}
|
||||||
if overrides:
|
if config:
|
||||||
config.update(overrides)
|
logger.info("Runtime-Config geladen: %s", sorted(config.keys()))
|
||||||
logger.info("Runtime-Overrides geladen: %s", sorted(overrides.keys()))
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning("runtime.json konnte nicht gelesen werden: %s", e)
|
logger.warning("runtime.json konnte nicht gelesen werden: %s", e)
|
||||||
|
else:
|
||||||
|
logger.info("Keine runtime.json — Diagnostic schreibt sie beim ersten Konfigurieren")
|
||||||
return config
|
return config
|
||||||
|
|
||||||
|
|
||||||
@@ -493,7 +475,7 @@ class ARIABridge:
|
|||||||
self.current_mode = self._load_persisted_mode()
|
self.current_mode = self._load_persisted_mode()
|
||||||
self.running = False
|
self.running = False
|
||||||
|
|
||||||
# Komponenten (TTS: immer XTTS remote, Piper wurde entfernt)
|
# Komponenten (TTS: F5-TTS remote auf der Gamebox, lokales TTS wurde entfernt)
|
||||||
self.tts_enabled = True
|
self.tts_enabled = True
|
||||||
self.xtts_voice = ""
|
self.xtts_voice = ""
|
||||||
self._f5tts_config: dict = {}
|
self._f5tts_config: dict = {}
|
||||||
@@ -551,6 +533,13 @@ class ARIABridge:
|
|||||||
# Beeinflusst das Timeout fuer stt_request — bei "loading" warten wir laenger,
|
# Beeinflusst das Timeout fuer stt_request — bei "loading" warten wir laenger,
|
||||||
# weil das Modell beim ersten Request noch ~1-2 Min runtergeladen werden kann.
|
# weil das Modell beim ersten Request noch ~1-2 Min runtergeladen werden kann.
|
||||||
self._remote_stt_ready: bool = False
|
self._remote_stt_ready: bool = False
|
||||||
|
# User-Message-Counter fuer Auto-Compact. Bei zu langer Konversation
|
||||||
|
# sprengt die argv-Liste beim Claude-Subprocess-Spawn (E2BIG). Bei
|
||||||
|
# COMPACT_AFTER erreicht → Sessions reset + Container restart.
|
||||||
|
# Counter ueberlebt Bridge-Restart nicht (frischer Zaehler beim Start ok).
|
||||||
|
# _user_message_count + _compact_after entfallen — Auto-Compact war
|
||||||
|
# aria-core-spezifisch (E2BIG-Schutz). Der neue Brain-Loop kennt
|
||||||
|
# diese Begrenzung nicht.
|
||||||
# Pending Files: wenn die App ein Bild + Text gleichzeitig schickt, kommen
|
# Pending Files: wenn die App ein Bild + Text gleichzeitig schickt, kommen
|
||||||
# zwei separate RVS-Events ('file' und 'chat') — wir buffern die Files
|
# zwei separate RVS-Events ('file' und 'chat') — wir buffern die Files
|
||||||
# kurz und mergen sie mit dem nachfolgenden Chat-Text zu einer einzigen
|
# kurz und mergen sie mit dem nachfolgenden Chat-Text zu einer einzigen
|
||||||
@@ -594,7 +583,7 @@ class ARIABridge:
|
|||||||
logger.info("RVS: %s (Token: %s...)", self.rvs_url, self.rvs_token[:8])
|
logger.info("RVS: %s (Token: %s...)", self.rvs_url, self.rvs_token[:8])
|
||||||
else:
|
else:
|
||||||
logger.warning("RVS nicht konfiguriert — App-Verbindung deaktiviert")
|
logger.warning("RVS nicht konfiguriert — App-Verbindung deaktiviert")
|
||||||
logger.warning(" Setze RVS_HOST, RVS_PORT, RVS_TOKEN in /config/aria.env")
|
logger.warning(" Setze RVS_HOST, RVS_PORT, RVS_TOKEN in der .env auf der VM")
|
||||||
logger.info("Modus: %s %s", self.current_mode.config.emoji, self.current_mode.config.name)
|
logger.info("Modus: %s %s", self.current_mode.config.emoji, self.current_mode.config.name)
|
||||||
|
|
||||||
# ── aria-core Verbindung (OpenClaw Gateway Protokoll) ───
|
# ── aria-core Verbindung (OpenClaw Gateway Protokoll) ───
|
||||||
@@ -681,7 +670,10 @@ class ARIABridge:
|
|||||||
while self.running:
|
while self.running:
|
||||||
try:
|
try:
|
||||||
logger.info("[core] Verbinde: %s", self.ws_url)
|
logger.info("[core] Verbinde: %s", self.ws_url)
|
||||||
async with websockets.connect(self.ws_url) as ws:
|
# max_size=50MB damit grosse Bilder/Voice-Uploads durchgehen.
|
||||||
|
# Python-websockets Default ist nur 1 MiB → 5MB JPEG sprengt
|
||||||
|
# das Limit, Connection wird silent gedroppt.
|
||||||
|
async with websockets.connect(self.ws_url, max_size=50 * 1024 * 1024) as ws:
|
||||||
# OpenClaw Handshake durchfuehren
|
# OpenClaw Handshake durchfuehren
|
||||||
if not await self._openclaw_handshake(ws):
|
if not await self._openclaw_handshake(ws):
|
||||||
logger.error("[core] Handshake fehlgeschlagen — Reconnect")
|
logger.error("[core] Handshake fehlgeschlagen — Reconnect")
|
||||||
@@ -787,13 +779,29 @@ class ARIABridge:
|
|||||||
await self._emit_activity("idle", "")
|
await self._emit_activity("idle", "")
|
||||||
if not text:
|
if not text:
|
||||||
logger.warning("[core] chat final ohne Text: %s", json.dumps(payload)[:200])
|
logger.warning("[core] chat final ohne Text: %s", json.dumps(payload)[:200])
|
||||||
|
# App+Diagnostic informieren statt stumm — sonst wartet die
|
||||||
|
# UI ewig auf eine Antwort die nicht kommt. Passiert z.B.
|
||||||
|
# wenn Claude-Vision das Bild ablehnt (leere Antwort)
|
||||||
|
# oder die Antwort nur aus Tool-Calls ohne Final-Text bestand.
|
||||||
|
await self._send_to_rvs({
|
||||||
|
"type": "chat",
|
||||||
|
"payload": {
|
||||||
|
"text": "[Hinweis] Antwort ohne Text — moeglicherweise Bild zu gross fuer Vision-API oder reine Tool-Ausfuehrung.",
|
||||||
|
"sender": "aria",
|
||||||
|
},
|
||||||
|
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||||
|
})
|
||||||
return
|
return
|
||||||
logger.info("[core] Antwort: '%s'", text[:80])
|
logger.info("[core] Antwort: '%s'", text[:80])
|
||||||
await self._process_core_response(text, payload)
|
await self._process_core_response(text, payload)
|
||||||
return
|
return
|
||||||
|
|
||||||
if state == "error":
|
if state == "error":
|
||||||
error = payload.get("error", "Unbekannt")
|
# OpenClaw nutzt errorMessage statt error bei state=error.
|
||||||
|
error = (payload.get("error")
|
||||||
|
or payload.get("errorMessage")
|
||||||
|
or payload.get("message")
|
||||||
|
or "Unbekannt")
|
||||||
logger.error("[core] Chat-Fehler: %s", error)
|
logger.error("[core] Chat-Fehler: %s", error)
|
||||||
self._last_chat_final_at = asyncio.get_event_loop().time()
|
self._last_chat_final_at = asyncio.get_event_loop().time()
|
||||||
await self._emit_activity("idle", "")
|
await self._emit_activity("idle", "")
|
||||||
@@ -829,7 +837,12 @@ class ARIABridge:
|
|||||||
return
|
return
|
||||||
|
|
||||||
if event_name == "chat:error":
|
if event_name == "chat:error":
|
||||||
error = payload.get("error", payload.get("message", "Unbekannt"))
|
# OpenClaw legt den echten Text manchmal in errorMessage ab
|
||||||
|
# (state=error). Vorher wurde nur error/message gechecked → "Unbekannt".
|
||||||
|
error = (payload.get("error")
|
||||||
|
or payload.get("errorMessage")
|
||||||
|
or payload.get("message")
|
||||||
|
or "Unbekannt")
|
||||||
logger.error("[core] Chat-Fehler (legacy): %s", error)
|
logger.error("[core] Chat-Fehler (legacy): %s", error)
|
||||||
await self._send_to_rvs({
|
await self._send_to_rvs({
|
||||||
"type": "chat",
|
"type": "chat",
|
||||||
@@ -862,6 +875,139 @@ class ARIABridge:
|
|||||||
pass
|
pass
|
||||||
return payload.get("text", "")
|
return payload.get("text", "")
|
||||||
|
|
||||||
|
# File-Marker-Pattern: `[FILE: /pfad/zur/datei.ext]` (Pfad kann Spaces
|
||||||
|
# enthalten, Endung beliebig). Mehrfach im Text moeglich.
|
||||||
|
_FILE_MARKER_RE = re.compile(r"\[FILE:\s*(/shared/uploads/[^\]]+?)\s*\]", re.IGNORECASE)
|
||||||
|
|
||||||
|
def _extract_file_markers(self, text: str) -> tuple[str, list[dict], list[str]]:
|
||||||
|
"""Sucht [FILE: /shared/uploads/...]-Marker.
|
||||||
|
Returns (cleaned_text, valid_files, missing_paths)."""
|
||||||
|
files: list[dict] = []
|
||||||
|
missing: list[str] = []
|
||||||
|
for m in self._FILE_MARKER_RE.finditer(text):
|
||||||
|
path = m.group(1).strip()
|
||||||
|
if not path.startswith("/shared/uploads/"):
|
||||||
|
logger.warning("[core] FILE-Marker mit unerlaubtem Pfad ignoriert: %s", path)
|
||||||
|
continue
|
||||||
|
if not os.path.isfile(path):
|
||||||
|
logger.warning("[core] FILE-Marker zeigt auf nicht existente Datei: %s", path)
|
||||||
|
missing.append(path)
|
||||||
|
continue
|
||||||
|
name = os.path.basename(path)
|
||||||
|
mime, _ = mimetypes.guess_type(path)
|
||||||
|
size = os.path.getsize(path)
|
||||||
|
files.append({
|
||||||
|
"serverPath": path,
|
||||||
|
"name": name,
|
||||||
|
"mimeType": mime or "application/octet-stream",
|
||||||
|
"size": size,
|
||||||
|
})
|
||||||
|
cleaned = self._FILE_MARKER_RE.sub("", text).strip()
|
||||||
|
# Zwei aufeinanderfolgende Leerzeilen → eine
|
||||||
|
cleaned = re.sub(r"\n{3,}", "\n\n", cleaned)
|
||||||
|
return cleaned, files, missing
|
||||||
|
|
||||||
|
async def _broadcast_aria_file(self, file_info: dict) -> None:
|
||||||
|
"""ARIA hat eine Datei fuer den User erstellt — App+Diagnostic informieren."""
|
||||||
|
logger.info("[rvs] ARIA-Datei rausgeben: %s (%s, %dKB)",
|
||||||
|
file_info["name"], file_info["mimeType"], file_info["size"] // 1024)
|
||||||
|
try:
|
||||||
|
await self._send_to_rvs({
|
||||||
|
"type": "file_from_aria",
|
||||||
|
"payload": file_info,
|
||||||
|
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||||
|
})
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("[rvs] file_from_aria broadcast fehlgeschlagen: %s", e)
|
||||||
|
|
||||||
|
def _persist_state(self, key: str, data: dict) -> None:
|
||||||
|
"""Atomic-Write in /shared/state/<key>.json — fuer Brain-Watcher.
|
||||||
|
Wird genutzt fuer location + activity-Tracking."""
|
||||||
|
try:
|
||||||
|
import time as _time
|
||||||
|
data = dict(data)
|
||||||
|
data["ts_unix"] = int(_time.time())
|
||||||
|
Path("/shared/state").mkdir(parents=True, exist_ok=True)
|
||||||
|
target = Path(f"/shared/state/{key}.json")
|
||||||
|
tmp = target.with_suffix(".tmp")
|
||||||
|
tmp.write_text(json.dumps(data), encoding="utf-8")
|
||||||
|
tmp.replace(target)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("[state] %s schreiben fehlgeschlagen: %s", key, e)
|
||||||
|
|
||||||
|
def _persist_location(self, location: Optional[dict]) -> None:
|
||||||
|
"""Speichert die letzte bekannte GPS-Position fuer Watcher.
|
||||||
|
Erwartet {lat, lon} oder {lat, lng}. Nicht-Dicts und fehlende
|
||||||
|
Koordinaten werden ignoriert."""
|
||||||
|
if not isinstance(location, dict):
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
lat = location.get("lat")
|
||||||
|
lon = location.get("lon") or location.get("lng")
|
||||||
|
if lat is None or lon is None:
|
||||||
|
return
|
||||||
|
self._persist_state("location", {
|
||||||
|
"lat": float(lat),
|
||||||
|
"lon": float(lon),
|
||||||
|
})
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def _persist_user_activity(self) -> None:
|
||||||
|
"""Markiert dass der User gerade etwas gemacht hat (Chat/Voice).
|
||||||
|
Watcher: last_user_message_ago_sec basiert darauf."""
|
||||||
|
self._persist_state("activity", {"last_user_ts": int(time.time())})
|
||||||
|
|
||||||
|
def _append_chat_backup(self, entry: dict) -> None:
|
||||||
|
"""Schreibt eine Zeile in /shared/config/chat_backup.jsonl.
|
||||||
|
Wird von Diagnostic + App als History-Quelle gelesen.
|
||||||
|
entry braucht mindestens {role, text}; ts wird ergaenzt."""
|
||||||
|
try:
|
||||||
|
line = {"ts": int(asyncio.get_event_loop().time() * 1000)}
|
||||||
|
line.update(entry)
|
||||||
|
Path("/shared/config").mkdir(parents=True, exist_ok=True)
|
||||||
|
with open("/shared/config/chat_backup.jsonl", "a", encoding="utf-8") as f:
|
||||||
|
f.write(json.dumps(line, ensure_ascii=False) + "\n")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("[backup] chat_backup-Write fehlgeschlagen: %s", e)
|
||||||
|
|
||||||
|
def _read_chat_backup_since(self, since_ms: int, limit: int = 100) -> list[dict]:
|
||||||
|
"""Liest chat_backup.jsonl, gibt Eintraege > since_ms zurueck, max limit neueste.
|
||||||
|
File-deleted-Marker werden honoriert: vor einem file_deleted-Marker liegende
|
||||||
|
Eintraege mit gleichem Pfad werden als deleted markiert."""
|
||||||
|
path = Path("/shared/config/chat_backup.jsonl")
|
||||||
|
if not path.exists():
|
||||||
|
return []
|
||||||
|
try:
|
||||||
|
lines = path.read_text(encoding="utf-8").splitlines()
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("[backup] Lesen fehlgeschlagen: %s", e)
|
||||||
|
return []
|
||||||
|
out: list[dict] = []
|
||||||
|
for raw in lines:
|
||||||
|
raw = raw.strip()
|
||||||
|
if not raw:
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
obj = json.loads(raw)
|
||||||
|
except Exception:
|
||||||
|
continue
|
||||||
|
ts = obj.get("ts") or 0
|
||||||
|
if ts <= since_ms:
|
||||||
|
continue
|
||||||
|
# file_deleted-Marker: nicht als Chat ausliefern, aber an die App schicken
|
||||||
|
# damit sie ihre Bubbles updaten kann (separater Pfad existiert ja schon)
|
||||||
|
if obj.get("type") == "file_deleted":
|
||||||
|
continue
|
||||||
|
role = obj.get("role")
|
||||||
|
if role not in ("user", "assistant"):
|
||||||
|
continue
|
||||||
|
out.append(obj)
|
||||||
|
# Auf "limit" neueste cappen
|
||||||
|
if len(out) > limit:
|
||||||
|
out = out[-limit:]
|
||||||
|
return out
|
||||||
|
|
||||||
async def _process_core_response(self, text: str, payload: dict) -> None:
|
async def _process_core_response(self, text: str, payload: dict) -> None:
|
||||||
"""Verarbeitet eine fertige Antwort von aria-core.
|
"""Verarbeitet eine fertige Antwort von aria-core.
|
||||||
|
|
||||||
@@ -876,6 +1022,34 @@ class ARIABridge:
|
|||||||
logger.info("[core] NO_REPLY empfangen — Antwort still verworfen")
|
logger.info("[core] NO_REPLY empfangen — Antwort still verworfen")
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# Antwort in chat_backup.jsonl loggen (cleaned text, ohne File-Marker)
|
||||||
|
# — passiert weiter unten nach extract_file_markers
|
||||||
|
|
||||||
|
# File-Marker `[FILE: /shared/uploads/aria_xyz.pdf]` extrahieren —
|
||||||
|
# ARIA legt damit Dateien fuer den User bereit (Bilder, PDFs, etc.).
|
||||||
|
# Der Marker wird aus dem Antworttext entfernt (TTS soll ihn nicht
|
||||||
|
# vorlesen) und parallel als file_from_aria-Event geschickt.
|
||||||
|
text, aria_files, missing_files = self._extract_file_markers(text)
|
||||||
|
for f in aria_files:
|
||||||
|
await self._broadcast_aria_file(f)
|
||||||
|
# Bei fehlenden Files: User informieren (sonst sieht er nur stille
|
||||||
|
# Verluste — ARIA hat den Marker hingeschrieben aber das File nicht
|
||||||
|
# tatsaechlich angelegt).
|
||||||
|
if missing_files:
|
||||||
|
missing_list = "\n".join(f" • {os.path.basename(p)}" for p in missing_files)
|
||||||
|
text = (text + "\n\n[Hinweis] Folgende Dateien hat ARIA zwar erwaehnt "
|
||||||
|
f"aber nicht erstellt:\n{missing_list}\n"
|
||||||
|
"Bitte ARIA bitten, sie wirklich zu schreiben.").strip()
|
||||||
|
|
||||||
|
# Antwort in chat_backup.jsonl loggen (gecleanter Text, ohne File-Marker)
|
||||||
|
# File-Marker werden separat als file_from_aria-Events ausgeliefert.
|
||||||
|
self._append_chat_backup({
|
||||||
|
"role": "assistant",
|
||||||
|
"text": text,
|
||||||
|
"files": [{"serverPath": f["serverPath"], "name": f["name"],
|
||||||
|
"mimeType": f["mimeType"], "size": f["size"]} for f in aria_files],
|
||||||
|
})
|
||||||
|
|
||||||
metadata = payload.get("metadata", {})
|
metadata = payload.get("metadata", {})
|
||||||
is_critical = metadata.get("critical", False)
|
is_critical = metadata.get("critical", False)
|
||||||
requested_voice = metadata.get("voice")
|
requested_voice = metadata.get("voice")
|
||||||
@@ -951,6 +1125,12 @@ class ARIABridge:
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error("[core] XTTS-Request fehlgeschlagen: %s — kein Audio", e)
|
logger.error("[core] XTTS-Request fehlgeschlagen: %s — kein Audio", e)
|
||||||
|
|
||||||
|
# ARIA ist fertig — App's "ARIA denkt..." Indicator zurueck auf idle.
|
||||||
|
# _last_chat_final_at bewusst NICHT setzen: die 3s-Cooldown war fuer
|
||||||
|
# trailing OpenClaw-Activity-Events; bei Voice-Chat wuerde sie die
|
||||||
|
# naechste thinking-Welle unterdruecken.
|
||||||
|
await self._emit_activity("idle", "")
|
||||||
|
|
||||||
# ── Mode Persistence (global, nicht pro Geraet) ──────
|
# ── Mode Persistence (global, nicht pro Geraet) ──────
|
||||||
_MODE_FILE = "/shared/config/mode.json"
|
_MODE_FILE = "/shared/config/mode.json"
|
||||||
|
|
||||||
@@ -1028,6 +1208,31 @@ class ARIABridge:
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.debug("[session] Diagnostic nicht erreichbar (%s) — nutze '%s'", e, self._session_key)
|
logger.debug("[session] Diagnostic nicht erreichbar (%s) — nutze '%s'", e, self._session_key)
|
||||||
|
|
||||||
|
def _build_core_text(self, text: str, interrupted: bool = False,
|
||||||
|
location: Optional[dict] = None) -> str:
|
||||||
|
"""Baut den Text fuer aria-core mit allen relevanten Hints (Barge-In,
|
||||||
|
GPS-Position). Hints sind in eckigen Klammern, der eigentliche User-
|
||||||
|
Text folgt unverandert."""
|
||||||
|
parts: list[str] = []
|
||||||
|
if interrupted:
|
||||||
|
parts.append(
|
||||||
|
"[Hinweis: Stefan hat dich gerade unterbrochen waehrend du noch "
|
||||||
|
"gesprochen oder gearbeitet hast. Folgendes ist eine Korrektur, "
|
||||||
|
"Ergaenzung oder ein Themenwechsel zu deiner letzten Antwort.]"
|
||||||
|
)
|
||||||
|
if location and isinstance(location, dict):
|
||||||
|
lat = location.get("lat")
|
||||||
|
lon = location.get("lon") or location.get("lng")
|
||||||
|
if lat is not None and lon is not None:
|
||||||
|
parts.append(
|
||||||
|
f"[Stefans aktuelle GPS-Position: {float(lat):.6f}, {float(lon):.6f}. "
|
||||||
|
f"Nutze die nur wenn die Frage sich auf seinen Standort bezieht. "
|
||||||
|
f"Erwaehne sie nicht von dir aus, ausser er fragt explizit danach.]"
|
||||||
|
)
|
||||||
|
if parts:
|
||||||
|
return " ".join(parts) + " " + text
|
||||||
|
return text
|
||||||
|
|
||||||
def _build_pending_files_message(self, user_text: str) -> str:
|
def _build_pending_files_message(self, user_text: str) -> str:
|
||||||
"""Baut eine Anweisung an aria-core aus den gepufferten Files + optionalem
|
"""Baut eine Anweisung an aria-core aus den gepufferten Files + optionalem
|
||||||
User-Text. user_text leer → 'warte auf Anweisung'-Variante."""
|
User-Text. user_text leer → 'warte auf Anweisung'-Variante."""
|
||||||
@@ -1074,31 +1279,118 @@ class ARIABridge:
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
async def send_to_core(self, text: str, source: str = "bridge") -> None:
|
async def send_to_core(self, text: str, source: str = "bridge") -> None:
|
||||||
"""Sendet Text an aria-core (OpenClaw chat.send Protokoll)."""
|
"""Sendet Text an aria-brain (HTTP /chat) und broadcastet die Antwort.
|
||||||
if self.ws_core is None:
|
|
||||||
logger.error("[core] Nicht verbunden — Nachricht verworfen: '%s'", text[:60])
|
Nicht-Streaming: wir warten bis Brain fertig ist, dann pushen wir
|
||||||
|
die komplette Reply via RVS an alle Clients (App + Diagnostic).
|
||||||
|
TTS wird vom Bridge-Code separat angestossen (gleiche Logik wie
|
||||||
|
vorher mit aria-core).
|
||||||
|
"""
|
||||||
|
brain_url = os.environ.get("BRAIN_URL", "http://aria-brain:8080")
|
||||||
|
url = f"{brain_url}/chat"
|
||||||
|
payload = json.dumps({"message": text, "source": source}).encode("utf-8")
|
||||||
|
logger.info("[brain] chat ← %s '%s'", source, text[:80])
|
||||||
|
|
||||||
|
# User-Nachricht in chat_backup.jsonl loggen — wird beim App-Reconnect
|
||||||
|
# / Diagnostic-Reload als History-Quelle gelesen.
|
||||||
|
self._append_chat_backup({"role": "user", "text": text, "source": source})
|
||||||
|
|
||||||
|
# agent_activity → thinking. _emit_activity statt direktem _send_to_rvs
|
||||||
|
# damit der State-Cache fuer die spaetere idle-Dedup richtig steht.
|
||||||
|
await self._emit_activity("thinking", "")
|
||||||
|
|
||||||
|
def _do_call():
|
||||||
|
try:
|
||||||
|
req = urllib.request.Request(
|
||||||
|
url, data=payload, method="POST",
|
||||||
|
headers={"Content-Type": "application/json"},
|
||||||
|
)
|
||||||
|
# Cold-Start kann lange dauern, 5min Timeout
|
||||||
|
with urllib.request.urlopen(req, timeout=300) as resp:
|
||||||
|
return resp.status, resp.read().decode("utf-8", errors="ignore")
|
||||||
|
except Exception as exc:
|
||||||
|
return None, str(exc)
|
||||||
|
|
||||||
|
status, body = await asyncio.get_event_loop().run_in_executor(None, _do_call)
|
||||||
|
if status != 200:
|
||||||
|
logger.error("[brain] /chat fehlgeschlagen: status=%s body=%s", status, body[:200])
|
||||||
|
await self._emit_activity("idle", "")
|
||||||
|
await self._send_to_rvs({
|
||||||
|
"type": "chat",
|
||||||
|
"payload": {
|
||||||
|
"text": f"[Brain-Fehler] {body[:200] or 'unbekannt'}",
|
||||||
|
"sender": "aria",
|
||||||
|
},
|
||||||
|
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||||
|
})
|
||||||
return
|
return
|
||||||
|
|
||||||
# Aktive Session vom Diagnostic holen
|
try:
|
||||||
self._fetch_active_session()
|
data = json.loads(body)
|
||||||
|
except Exception:
|
||||||
|
logger.error("[brain] /chat lieferte ungueltiges JSON: %s", body[:200])
|
||||||
|
await self._emit_activity("idle", "")
|
||||||
|
return
|
||||||
|
|
||||||
req_id = self._next_req_id()
|
reply = (data.get("reply") or "").strip()
|
||||||
message = json.dumps({
|
if not reply:
|
||||||
"type": "req",
|
logger.warning("[brain] /chat: leerer Reply")
|
||||||
"id": req_id,
|
await self._emit_activity("idle", "")
|
||||||
"method": "chat.send",
|
return
|
||||||
"params": {
|
|
||||||
"sessionKey": self._session_key,
|
# Side-Channel-Events VOR der Chat-Bubble broadcasten (z.B. skill_created)
|
||||||
"message": text,
|
# damit sie in der UI vor der Reply auftauchen
|
||||||
"idempotencyKey": str(uuid.uuid4()),
|
for event in data.get("events", []) or []:
|
||||||
|
etype = event.get("type")
|
||||||
|
if etype == "skill_created":
|
||||||
|
await self._send_to_rvs({
|
||||||
|
"type": "skill_created",
|
||||||
|
"payload": event.get("skill", {}),
|
||||||
|
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||||
|
})
|
||||||
|
logger.info("[brain] ARIA hat einen Skill erstellt: %s",
|
||||||
|
event.get("skill", {}).get("name"))
|
||||||
|
elif etype == "trigger_created":
|
||||||
|
await self._send_to_rvs({
|
||||||
|
"type": "trigger_created",
|
||||||
|
"payload": event.get("trigger", {}),
|
||||||
|
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||||
|
})
|
||||||
|
logger.info("[brain] ARIA hat einen Trigger angelegt: %s",
|
||||||
|
event.get("trigger", {}).get("name"))
|
||||||
|
elif etype == "location_tracking":
|
||||||
|
# ARIA bittet die App das GPS-Tracking ein-/auszuschalten
|
||||||
|
await self._send_to_rvs({
|
||||||
|
"type": "location_tracking",
|
||||||
|
"payload": {
|
||||||
|
"on": bool(event.get("on")),
|
||||||
|
"reason": event.get("reason") or "",
|
||||||
},
|
},
|
||||||
|
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||||
|
})
|
||||||
|
logger.info("[brain] location_tracking Request: on=%s (%s)",
|
||||||
|
event.get("on"), event.get("reason", ""))
|
||||||
|
|
||||||
|
# _process_core_response uebernimmt alles weitere:
|
||||||
|
# File-Marker extrahieren + broadcasten, NO_REPLY-Check, Chat-
|
||||||
|
# Broadcast an RVS, TTS, agent_activity idle. Wir geben das
|
||||||
|
# raw payload mit dem reply rein damit Mode/voice-Metadata
|
||||||
|
# passend behandelt wird (hier minimal, weil Brain noch keine
|
||||||
|
# metadata mitschickt).
|
||||||
|
try:
|
||||||
|
await self._process_core_response(reply, {})
|
||||||
|
except Exception:
|
||||||
|
logger.exception("[brain] _process_core_response Fehler")
|
||||||
|
await self._emit_activity("idle", "")
|
||||||
|
# Originaler Fallback-Send (toter Code, _emit_activity uebernimmt jetzt)
|
||||||
|
await self._send_to_rvs({
|
||||||
|
"type": "agent_activity",
|
||||||
|
"payload": {"activity": "idle"},
|
||||||
|
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||||
})
|
})
|
||||||
|
|
||||||
try:
|
if data.get("distilling"):
|
||||||
await self.ws_core.send(message)
|
logger.info("[brain] Destillat laeuft im Hintergrund")
|
||||||
logger.info("[core] chat.send (%s, id=%s): '%s'", source, req_id, text[:80])
|
|
||||||
except Exception:
|
|
||||||
logger.exception("[core] Sendefehler")
|
|
||||||
|
|
||||||
# ── RVS Verbindung (App-Relay) ──────────────────────────
|
# ── RVS Verbindung (App-Relay) ──────────────────────────
|
||||||
|
|
||||||
@@ -1120,7 +1412,8 @@ class ARIABridge:
|
|||||||
try:
|
try:
|
||||||
url = f"{current_url}?token={self.rvs_token}"
|
url = f"{current_url}?token={self.rvs_token}"
|
||||||
logger.info("[rvs] Verbinde: %s", current_url)
|
logger.info("[rvs] Verbinde: %s", current_url)
|
||||||
async with websockets.connect(url) as ws:
|
# max_size=50MB (siehe core-Connect oben — gleicher Grund).
|
||||||
|
async with websockets.connect(url, max_size=50 * 1024 * 1024) as ws:
|
||||||
self.ws_rvs = ws
|
self.ws_rvs = ws
|
||||||
retry_delay = 2
|
retry_delay = 2
|
||||||
logger.info("[rvs] Verbunden — warte auf App-Nachrichten")
|
logger.info("[rvs] Verbunden — warte auf App-Nachrichten")
|
||||||
@@ -1235,6 +1528,11 @@ class ARIABridge:
|
|||||||
except (TypeError, ValueError):
|
except (TypeError, ValueError):
|
||||||
self._next_speed_override = None
|
self._next_speed_override = None
|
||||||
if text:
|
if text:
|
||||||
|
interrupted = bool(payload.get("interrupted", False))
|
||||||
|
location = payload.get("location") or None
|
||||||
|
# State persist fuer Brain-Watcher (current_lat, ..., last_user_ts)
|
||||||
|
self._persist_location(location)
|
||||||
|
self._persist_user_activity()
|
||||||
# Wenn Files gerade gepuffert sind (Bild + Text gleichzeitig
|
# Wenn Files gerade gepuffert sind (Bild + Text gleichzeitig
|
||||||
# gesendet), mergen wir sie zu einer einzigen Anfrage statt
|
# gesendet), mergen wir sie zu einer einzigen Anfrage statt
|
||||||
# zwei separater send_to_core-Calls.
|
# zwei separater send_to_core-Calls.
|
||||||
@@ -1242,8 +1540,12 @@ class ARIABridge:
|
|||||||
if merged:
|
if merged:
|
||||||
logger.info("[rvs] App-Chat (mit Anhaengen): '%s'", text[:80])
|
logger.info("[rvs] App-Chat (mit Anhaengen): '%s'", text[:80])
|
||||||
else:
|
else:
|
||||||
logger.info("[rvs] App-Chat: '%s'", text[:80])
|
core_text = self._build_core_text(text, interrupted, location)
|
||||||
await self.send_to_core(text, source="app")
|
logger.info("[rvs] App-Chat%s%s: '%s'",
|
||||||
|
" [BARGE-IN]" if interrupted else "",
|
||||||
|
" [GPS]" if location else "",
|
||||||
|
text[:80])
|
||||||
|
await self.send_to_core(core_text, source="app" + (" [barge-in]" if interrupted else ""))
|
||||||
return
|
return
|
||||||
|
|
||||||
if msg_type == "cancel_request":
|
if msg_type == "cancel_request":
|
||||||
@@ -1434,6 +1736,31 @@ class ARIABridge:
|
|||||||
size_kb = len(file_b64) // 1365
|
size_kb = len(file_b64) // 1365
|
||||||
logger.info("[rvs] Datei gespeichert: %s (%dKB)", file_path, size_kb)
|
logger.info("[rvs] Datei gespeichert: %s (%dKB)", file_path, size_kb)
|
||||||
|
|
||||||
|
# Pixel-Bilder fuer Claude-Vision shrinken wenn > 2 MB. SVG/PDF/ZIP
|
||||||
|
# bleiben unangetastet (Vision laeuft eh nur auf Raster-Formaten).
|
||||||
|
CLAUDE_VISION_FORMATS = ("image/jpeg", "image/jpg", "image/png", "image/webp", "image/gif")
|
||||||
|
if file_type.lower() in CLAUDE_VISION_FORMATS:
|
||||||
|
file_size_bytes = os.path.getsize(file_path)
|
||||||
|
if file_size_bytes > 2 * 1024 * 1024:
|
||||||
|
try:
|
||||||
|
from PIL import Image
|
||||||
|
with Image.open(file_path) as img:
|
||||||
|
orig_w, orig_h = img.size
|
||||||
|
# Anthropic-Empfehlung: max 1568px lange Seite. RGB-Konvertierung
|
||||||
|
# falls RGBA/Palette (JPEG braucht RGB).
|
||||||
|
img.thumbnail((1568, 1568), Image.Resampling.LANCZOS)
|
||||||
|
if img.mode in ("RGBA", "P"):
|
||||||
|
img = img.convert("RGB")
|
||||||
|
img.save(file_path, "JPEG", quality=85, optimize=True)
|
||||||
|
new_size_bytes = os.path.getsize(file_path)
|
||||||
|
logger.info("[rvs] Bild verkleinert: %dx%d → %dx%d, %.1fMB → %.1fMB",
|
||||||
|
orig_w, orig_h, img.size[0], img.size[1],
|
||||||
|
file_size_bytes / 1024 / 1024,
|
||||||
|
new_size_bytes / 1024 / 1024)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("[rvs] Bild-Resize fehlgeschlagen (%s) — Original wird genutzt: %s",
|
||||||
|
file_name, e)
|
||||||
|
|
||||||
# In Pending-Queue + Flush-Timer (anti-spam Buffering)
|
# In Pending-Queue + Flush-Timer (anti-spam Buffering)
|
||||||
self._pending_files.append((file_path, file_name, file_type, size_kb, int(width or 0), int(height or 0)))
|
self._pending_files.append((file_path, file_name, file_type, size_kb, int(width or 0), int(height or 0)))
|
||||||
if self._pending_files_flush_task and not self._pending_files_flush_task.done():
|
if self._pending_files_flush_task and not self._pending_files_flush_task.done():
|
||||||
@@ -1451,6 +1778,200 @@ class ARIABridge:
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning("[rvs] file_saved konnte nicht an App gesendet werden: %s", e)
|
logger.warning("[rvs] file_saved konnte nicht an App gesendet werden: %s", e)
|
||||||
|
|
||||||
|
elif msg_type == "chat_history_request":
|
||||||
|
# App holt verpasste Nachrichten beim Reconnect.
|
||||||
|
# payload: {since: <ts_ms>}, default 0 = alles
|
||||||
|
since = int(payload.get("since") or 0)
|
||||||
|
limit = int(payload.get("limit") or 100)
|
||||||
|
logger.info("[rvs] chat_history_request since=%d limit=%d", since, limit)
|
||||||
|
messages = self._read_chat_backup_since(since, limit=limit)
|
||||||
|
await self._send_to_rvs({
|
||||||
|
"type": "chat_history_response",
|
||||||
|
"payload": {"messages": messages, "since": since},
|
||||||
|
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||||
|
})
|
||||||
|
return
|
||||||
|
|
||||||
|
elif msg_type == "file_list_request":
|
||||||
|
# App fragt die Liste aller /shared/uploads/-Dateien an.
|
||||||
|
logger.info("[rvs] file_list_request von App")
|
||||||
|
try:
|
||||||
|
req = urllib.request.Request(
|
||||||
|
"http://localhost:3001/api/files-list",
|
||||||
|
method="GET",
|
||||||
|
)
|
||||||
|
def _do_list():
|
||||||
|
try:
|
||||||
|
with urllib.request.urlopen(req, timeout=10) as resp:
|
||||||
|
return json.loads(resp.read().decode("utf-8", errors="ignore"))
|
||||||
|
except Exception as e:
|
||||||
|
return {"ok": False, "error": str(e)}
|
||||||
|
d = await asyncio.get_event_loop().run_in_executor(None, _do_list)
|
||||||
|
await self._send_to_rvs({
|
||||||
|
"type": "file_list_response",
|
||||||
|
"payload": d,
|
||||||
|
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||||
|
})
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("[rvs] file_list_request: %s", e)
|
||||||
|
return
|
||||||
|
|
||||||
|
elif msg_type == "file_delete_batch_request":
|
||||||
|
# App will mehrere Dateien auf einmal loeschen.
|
||||||
|
paths = payload.get("paths") or []
|
||||||
|
req_id = payload.get("requestId", "")
|
||||||
|
logger.warning("[rvs] file_delete_batch_request: %d Pfade", len(paths))
|
||||||
|
try:
|
||||||
|
body_bytes = json.dumps({"paths": paths}).encode("utf-8")
|
||||||
|
req = urllib.request.Request(
|
||||||
|
"http://localhost:3001/api/files-delete-batch",
|
||||||
|
data=body_bytes, method="POST",
|
||||||
|
headers={"Content-Type": "application/json"},
|
||||||
|
)
|
||||||
|
def _do_delete():
|
||||||
|
try:
|
||||||
|
with urllib.request.urlopen(req, timeout=30) as resp:
|
||||||
|
return resp.status, resp.read().decode("utf-8", errors="ignore")
|
||||||
|
except Exception as e:
|
||||||
|
return None, str(e)
|
||||||
|
status, body = await asyncio.get_event_loop().run_in_executor(None, _do_delete)
|
||||||
|
logger.info("[rvs] file_delete_batch result: status=%s", status)
|
||||||
|
# Server broadcastet file_deleted pro Pfad — App kriegt das via persistente RVS.
|
||||||
|
# Wir bestaetigen zusaetzlich mit Counts.
|
||||||
|
try: d = json.loads(body or "{}")
|
||||||
|
except: d = {}
|
||||||
|
await self._send_to_rvs({
|
||||||
|
"type": "file_delete_batch_response",
|
||||||
|
"payload": {
|
||||||
|
"requestId": req_id,
|
||||||
|
"deleted": len(d.get("deleted", [])),
|
||||||
|
"errors": d.get("errors", []),
|
||||||
|
},
|
||||||
|
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||||
|
})
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("[rvs] file_delete_batch_request: %s", e)
|
||||||
|
return
|
||||||
|
|
||||||
|
elif msg_type == "file_zip_request":
|
||||||
|
# App will mehrere Dateien als ZIP. Bridge holt ZIP von Diagnostic
|
||||||
|
# via HTTP, kodiert base64 und schickt zurueck. Cap auf 30 MB
|
||||||
|
# ZIP-Groesse damit RVS nicht erstickt.
|
||||||
|
paths = payload.get("paths") or []
|
||||||
|
req_id = payload.get("requestId", "")
|
||||||
|
logger.warning("[rvs] file_zip_request: %d Pfade (req=%s)", len(paths), req_id)
|
||||||
|
|
||||||
|
def _do_zip():
|
||||||
|
try:
|
||||||
|
body_bytes = json.dumps({"paths": paths}).encode("utf-8")
|
||||||
|
req = urllib.request.Request(
|
||||||
|
"http://localhost:3001/api/files-download-zip",
|
||||||
|
data=body_bytes, method="POST",
|
||||||
|
headers={"Content-Type": "application/json"},
|
||||||
|
)
|
||||||
|
with urllib.request.urlopen(req, timeout=120) as resp:
|
||||||
|
if resp.status != 200:
|
||||||
|
return None, f"HTTP {resp.status}"
|
||||||
|
data = resp.read()
|
||||||
|
if len(data) > 30 * 1024 * 1024:
|
||||||
|
return None, f"ZIP zu gross ({len(data) // (1024*1024)} MB > 30 MB)"
|
||||||
|
return data, None
|
||||||
|
except Exception as e:
|
||||||
|
return None, str(e)
|
||||||
|
|
||||||
|
data, err = await asyncio.get_event_loop().run_in_executor(None, _do_zip)
|
||||||
|
if err or data is None:
|
||||||
|
await self._send_to_rvs({
|
||||||
|
"type": "file_zip_response",
|
||||||
|
"payload": {"requestId": req_id, "ok": False, "error": err or "leer"},
|
||||||
|
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||||
|
})
|
||||||
|
return
|
||||||
|
import base64 as _b64
|
||||||
|
await self._send_to_rvs({
|
||||||
|
"type": "file_zip_response",
|
||||||
|
"payload": {
|
||||||
|
"requestId": req_id, "ok": True,
|
||||||
|
"size": len(data),
|
||||||
|
"data": _b64.b64encode(data).decode("ascii"),
|
||||||
|
},
|
||||||
|
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||||
|
})
|
||||||
|
return
|
||||||
|
|
||||||
|
elif msg_type == "file_delete_request":
|
||||||
|
# App will eine Datei loeschen — leite an Diagnostic.
|
||||||
|
p = payload.get("path", "")
|
||||||
|
logger.warning("[rvs] file_delete_request von App: %s", p)
|
||||||
|
try:
|
||||||
|
body_bytes = json.dumps({"path": p}).encode("utf-8")
|
||||||
|
req = urllib.request.Request(
|
||||||
|
"http://localhost:3001/api/files-delete",
|
||||||
|
data=body_bytes,
|
||||||
|
method="POST",
|
||||||
|
headers={"Content-Type": "application/json"},
|
||||||
|
)
|
||||||
|
def _do_delete():
|
||||||
|
try:
|
||||||
|
with urllib.request.urlopen(req, timeout=10) as resp:
|
||||||
|
return resp.status, resp.read().decode("utf-8", errors="ignore")
|
||||||
|
except Exception as e:
|
||||||
|
return None, str(e)
|
||||||
|
status, body = await asyncio.get_event_loop().run_in_executor(None, _do_delete)
|
||||||
|
logger.info("[rvs] file_delete_request %s: status=%s", p, status)
|
||||||
|
# Diagnostic broadcastet file_deleted via sendToRVS_raw — kommt
|
||||||
|
# ueber den persistenten WS-Path zur App. Wir bestaetigen
|
||||||
|
# zusaetzlich, damit der Caller sicher ist dass es durch ist.
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("[rvs] file_delete_request: %s", e)
|
||||||
|
return
|
||||||
|
|
||||||
|
elif msg_type == "location_update":
|
||||||
|
# Live-GPS-Update von der App (nicht an Chat gekoppelt). Wird in
|
||||||
|
# /shared/state/location.json geschrieben, damit Watcher-Trigger
|
||||||
|
# near()-Conditions auswerten koennen.
|
||||||
|
lat = payload.get("lat")
|
||||||
|
lon = payload.get("lon") or payload.get("lng")
|
||||||
|
if lat is not None and lon is not None:
|
||||||
|
self._persist_location({"lat": lat, "lon": lon})
|
||||||
|
logger.debug("[gps] location_update: %.5f, %.5f", float(lat), float(lon))
|
||||||
|
return
|
||||||
|
|
||||||
|
elif msg_type == "container_restart":
|
||||||
|
# App-Button "Container neu" — leitet generisch an Diagnostic
|
||||||
|
# weiter. Whitelist ist im Diagnostic-Server.
|
||||||
|
name = payload.get("name", "")
|
||||||
|
logger.warning("[rvs] container_restart Request von App: %s", name)
|
||||||
|
try:
|
||||||
|
body_bytes = json.dumps({"name": name}).encode("utf-8")
|
||||||
|
req = urllib.request.Request(
|
||||||
|
"http://localhost:3001/api/container-restart",
|
||||||
|
data=body_bytes,
|
||||||
|
method="POST",
|
||||||
|
headers={"Content-Type": "application/json"},
|
||||||
|
)
|
||||||
|
def _do_restart():
|
||||||
|
try:
|
||||||
|
with urllib.request.urlopen(req, timeout=45) as resp:
|
||||||
|
return resp.status, resp.read().decode("utf-8", errors="ignore")
|
||||||
|
except Exception as e:
|
||||||
|
return None, str(e)
|
||||||
|
status, body = await asyncio.get_event_loop().run_in_executor(None, _do_restart)
|
||||||
|
logger.info("[rvs] container_restart %s Result: status=%s", name, status)
|
||||||
|
ok = status == 200
|
||||||
|
await self._send_to_rvs({
|
||||||
|
"type": "chat",
|
||||||
|
"payload": {
|
||||||
|
"text": f"[Container] {name} neu gestartet." if ok
|
||||||
|
else f"[Container] Restart {name} fehlgeschlagen: {body[:200]}",
|
||||||
|
"sender": "aria",
|
||||||
|
},
|
||||||
|
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||||
|
})
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("[rvs] container_restart Weiterleitung fehlgeschlagen: %s", e)
|
||||||
|
return
|
||||||
|
|
||||||
elif msg_type == "file_request":
|
elif msg_type == "file_request":
|
||||||
# App fordert eine Datei an (Re-Download nach Cache-Leerung)
|
# App fordert eine Datei an (Re-Download nach Cache-Leerung)
|
||||||
server_path = payload.get("serverPath", "")
|
server_path = payload.get("serverPath", "")
|
||||||
@@ -1468,6 +1989,7 @@ class ARIABridge:
|
|||||||
return
|
return
|
||||||
with open(server_path, "rb") as f:
|
with open(server_path, "rb") as f:
|
||||||
file_b64 = base64.b64encode(f.read()).decode("ascii")
|
file_b64 = base64.b64encode(f.read()).decode("ascii")
|
||||||
|
mime, _ = mimetypes.guess_type(server_path)
|
||||||
logger.info("[rvs] Re-Download: %s (%dKB)", server_path, len(file_b64) // 1365)
|
logger.info("[rvs] Re-Download: %s (%dKB)", server_path, len(file_b64) // 1365)
|
||||||
await self._send_to_rvs({
|
await self._send_to_rvs({
|
||||||
"type": "file_response",
|
"type": "file_response",
|
||||||
@@ -1476,6 +1998,7 @@ class ARIABridge:
|
|||||||
"serverPath": server_path,
|
"serverPath": server_path,
|
||||||
"base64": file_b64,
|
"base64": file_b64,
|
||||||
"name": os.path.basename(server_path),
|
"name": os.path.basename(server_path),
|
||||||
|
"mimeType": mime or "application/octet-stream",
|
||||||
},
|
},
|
||||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||||
})
|
})
|
||||||
@@ -1500,9 +2023,19 @@ class ARIABridge:
|
|||||||
self._next_speed_override = speed if 0.1 <= speed <= 5.0 else None
|
self._next_speed_override = speed if 0.1 <= speed <= 5.0 else None
|
||||||
except (TypeError, ValueError):
|
except (TypeError, ValueError):
|
||||||
self._next_speed_override = None
|
self._next_speed_override = None
|
||||||
logger.info("[rvs] Audio empfangen: %s, %dms, %dKB",
|
interrupted = bool(payload.get("interrupted", False))
|
||||||
mime_type, duration_ms, len(audio_b64) // 1365)
|
audio_request_id = payload.get("audioRequestId", "") or ""
|
||||||
asyncio.create_task(self._process_app_audio(audio_b64, mime_type))
|
location = payload.get("location") or None
|
||||||
|
# State persist fuer Brain-Watcher (current_lat etc.)
|
||||||
|
self._persist_location(location)
|
||||||
|
self._persist_user_activity()
|
||||||
|
logger.info("[rvs] Audio empfangen: %s, %dms, %dKB%s%s%s",
|
||||||
|
mime_type, duration_ms, len(audio_b64) // 1365,
|
||||||
|
" [BARGE-IN]" if interrupted else "",
|
||||||
|
" [GPS]" if location else "",
|
||||||
|
f" reqId={audio_request_id[:16]}" if audio_request_id else "")
|
||||||
|
asyncio.create_task(self._process_app_audio(
|
||||||
|
audio_b64, mime_type, interrupted, audio_request_id, location))
|
||||||
|
|
||||||
elif msg_type == "stt_response":
|
elif msg_type == "stt_response":
|
||||||
# Antwort der whisper-bridge auf unseren stt_request
|
# Antwort der whisper-bridge auf unseren stt_request
|
||||||
@@ -1558,8 +2091,23 @@ class ARIABridge:
|
|||||||
_STT_REMOTE_TIMEOUT_READY_S = 45.0
|
_STT_REMOTE_TIMEOUT_READY_S = 45.0
|
||||||
_STT_REMOTE_TIMEOUT_LOADING_S = 300.0
|
_STT_REMOTE_TIMEOUT_LOADING_S = 300.0
|
||||||
|
|
||||||
async def _process_app_audio(self, audio_b64: str, mime_type: str) -> None:
|
async def _process_app_audio(self, audio_b64: str, mime_type: str,
|
||||||
"""App-Audio → STT → aria-core. Primaer via whisper-bridge (RVS), Fallback lokal."""
|
interrupted: bool = False,
|
||||||
|
audio_request_id: str = "",
|
||||||
|
location: Optional[dict] = None) -> None:
|
||||||
|
"""App-Audio → STT → aria-core. Primaer via whisper-bridge (RVS), Fallback lokal.
|
||||||
|
|
||||||
|
interrupted=True wenn der User waehrend ARIA noch sprach/dachte aufgenommen hat
|
||||||
|
(Barge-In). Wird als Hinweis-Praefix an aria-core mitgegeben damit ARIA die
|
||||||
|
Korrektur/Unterbrechung in den Kontext einordnen kann statt als reine
|
||||||
|
Folgefrage zu behandeln.
|
||||||
|
|
||||||
|
audio_request_id: Korrelations-ID die die App im audio-Event mitschickt — wird
|
||||||
|
unveraendert ans STT-Result zurueckgegeben damit die App die EXAKT richtige
|
||||||
|
'wird verarbeitet'-Bubble ersetzen kann (auch bei mehreren parallelen Aufnahmen).
|
||||||
|
|
||||||
|
location: Optional GPS-Position {lat, lon} — wird als Hinweis-Praefix mitgegeben
|
||||||
|
damit ARIA bei standortbezogenen Fragen sie nutzen kann."""
|
||||||
# Erst Remote versuchen
|
# Erst Remote versuchen
|
||||||
text = await self._stt_remote(audio_b64, mime_type)
|
text = await self._stt_remote(audio_b64, mime_type)
|
||||||
if text is None:
|
if text is None:
|
||||||
@@ -1571,21 +2119,38 @@ class ARIABridge:
|
|||||||
|
|
||||||
if text.strip():
|
if text.strip():
|
||||||
logger.info("[rvs] STT Ergebnis: '%s'", text[:80])
|
logger.info("[rvs] STT Ergebnis: '%s'", text[:80])
|
||||||
# ERST an aria-core senden (wichtigster Schritt)
|
|
||||||
await self.send_to_core(text, source="app-voice")
|
# Reihenfolge wichtig: STT-Text ZUERST broadcasten damit die App
|
||||||
# STT-Text an RVS senden (fuer Anzeige in App + Diagnostic)
|
# die Voice-Bubble sofort mit dem erkannten Text aktualisieren
|
||||||
# sender="stt" damit Bridge es ignoriert (kein Loop)
|
# kann — send_to_core blockt danach synchron auf Brain (kann
|
||||||
|
# dauern), wuerde sonst die Anzeige verzoegern.
|
||||||
try:
|
try:
|
||||||
await self._send_to_rvs({
|
stt_payload = {
|
||||||
"type": "chat",
|
|
||||||
"payload": {
|
|
||||||
"text": text,
|
"text": text,
|
||||||
"sender": "stt",
|
"sender": "stt",
|
||||||
},
|
}
|
||||||
|
if audio_request_id:
|
||||||
|
stt_payload["audioRequestId"] = audio_request_id
|
||||||
|
# GPS aus dem Original-Audio-Payload mitgeben — Diagnostic
|
||||||
|
# zeigt sie sonst nicht an (App sendet location nur einmal,
|
||||||
|
# die im audio-Payload). Reine Anzeige-Information.
|
||||||
|
if location:
|
||||||
|
stt_payload["location"] = location
|
||||||
|
ok = await self._send_to_rvs({
|
||||||
|
"type": "chat",
|
||||||
|
"payload": stt_payload,
|
||||||
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
"timestamp": int(asyncio.get_event_loop().time() * 1000),
|
||||||
})
|
})
|
||||||
|
if ok:
|
||||||
|
logger.info("[rvs] STT-Text an RVS broadcastet (sender=stt)")
|
||||||
|
else:
|
||||||
|
logger.warning("[rvs] STT-Text NICHT broadcastet — _send_to_rvs lieferte False")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning("[rvs] STT-Text konnte nicht an RVS gesendet werden: %s", e)
|
logger.warning("[rvs] STT-Text konnte nicht an RVS gesendet werden: %s", e)
|
||||||
|
|
||||||
|
# Dann an Brain — der blockt synchron bis ARIA fertig ist.
|
||||||
|
core_text = self._build_core_text(text, interrupted, location)
|
||||||
|
await self.send_to_core(core_text, source="app-voice" + (" [barge-in]" if interrupted else ""))
|
||||||
else:
|
else:
|
||||||
logger.info("[rvs] Keine Sprache erkannt — ignoriert")
|
logger.info("[rvs] Keine Sprache erkannt — ignoriert")
|
||||||
|
|
||||||
@@ -1837,7 +2402,8 @@ class ARIABridge:
|
|||||||
self.running = True
|
self.running = True
|
||||||
|
|
||||||
tasks = [
|
tasks = [
|
||||||
asyncio.create_task(self.connect_to_core()),
|
# connect_to_core entfaellt — Bridge ruft jetzt aria-brain ueber
|
||||||
|
# HTTP (siehe send_to_core). Keine persistente WS-Verbindung mehr.
|
||||||
asyncio.create_task(self.connect_to_rvs()),
|
asyncio.create_task(self.connect_to_rvs()),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|||||||
@@ -16,3 +16,6 @@ sounddevice
|
|||||||
|
|
||||||
# Wake-Word Erkennung
|
# Wake-Word Erkennung
|
||||||
openwakeword
|
openwakeword
|
||||||
|
|
||||||
|
# Bild-Resizing (zu grosse Pixel-Bilder shrinken bevor Claude-Vision sie sieht — 5MB-Limit)
|
||||||
|
Pillow
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
FROM node:22-alpine
|
FROM node:22-alpine
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
# zip fuer Multi-Datei-Downloads (Brain-Export nutzt tar.gz, Datei-Manager zip)
|
||||||
|
RUN apk add --no-cache zip
|
||||||
COPY package.json ./
|
COPY package.json ./
|
||||||
RUN npm install --production
|
RUN npm install --production
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|||||||
+2031
-529
File diff suppressed because it is too large
Load Diff
+623
-738
File diff suppressed because it is too large
Load Diff
+44
-37
@@ -28,38 +28,40 @@ services:
|
|||||||
networks:
|
networks:
|
||||||
- aria-net
|
- aria-net
|
||||||
|
|
||||||
# ─── OpenClaw (ARIA Gehirn) ─────────────────────────────
|
# ─── Qdrant (Vector-DB fuer ARIAs Gedaechtnis) ────────
|
||||||
aria:
|
# Storage liegt im Repo-Bind-Mount aria-data/brain/qdrant.
|
||||||
image: ghcr.io/openclaw/openclaw:latest
|
# Damit Backup/Export/Import komplett ueber das Filesystem gehen.
|
||||||
container_name: aria-core
|
qdrant:
|
||||||
hostname: aria-wohnung
|
image: qdrant/qdrant:latest
|
||||||
privileged: true # ARIAs Wohnung — sie hat die Schlüssel
|
container_name: aria-qdrant
|
||||||
|
volumes:
|
||||||
|
- ./aria-data/brain/qdrant:/qdrant/storage
|
||||||
|
restart: unless-stopped
|
||||||
|
networks:
|
||||||
|
- aria-net
|
||||||
|
|
||||||
|
# ─── ARIA Brain (Agent + Memory) ─────────────────────────
|
||||||
|
# Loest das alte aria-core (OpenClaw) ab. Vector-DB-basiertes
|
||||||
|
# Memory, eigener Agent-Loop, SSH zur aria-wohnung-VM.
|
||||||
|
brain:
|
||||||
|
build: ./aria-brain
|
||||||
|
container_name: aria-brain
|
||||||
|
hostname: aria-wohnung-brain # damit ssh known_hosts stabil bleibt
|
||||||
extra_hosts:
|
extra_hosts:
|
||||||
- "host.docker.internal:host-gateway" # Zugriff auf die VM via SSH
|
- "host.docker.internal:host-gateway" # Zugriff auf die VM via SSH
|
||||||
depends_on:
|
depends_on:
|
||||||
|
- qdrant
|
||||||
- proxy
|
- proxy
|
||||||
ports:
|
|
||||||
- "3001:3001" # Diagnostic Web-UI (laeuft im shared network)
|
|
||||||
environment:
|
environment:
|
||||||
- CANVAS_HOST=127.0.0.1
|
- QDRANT_HOST=aria-qdrant
|
||||||
- OPENCLAW_GATEWAY_TOKEN=${ARIA_AUTH_TOKEN}
|
- QDRANT_PORT=6333
|
||||||
- DEFAULT_MODEL=proxy/claude-sonnet-4
|
- PROXY_URL=http://proxy:3456
|
||||||
- RATE_LIMIT_PER_USER=30
|
- ARIA_AUTH_TOKEN=${ARIA_AUTH_TOKEN:-}
|
||||||
- DISPLAY=:0
|
|
||||||
volumes:
|
volumes:
|
||||||
- openclaw-config:/home/node/.openclaw # OpenClaw Config (persistiert Model + Auth)
|
- ./aria-data/brain/data:/data # Memory-Cache + Skills + Models (bind-mount fuer Export)
|
||||||
- ./aria-data/brain:/home/node/.openclaw/workspace/memory
|
- ./aria-data/brain-import:/import:ro # Quell-MDs fuer den initialen Memory-Import (read-only)
|
||||||
- ./aria-data/skills:/home/node/.openclaw/workspace/skills
|
- ./aria-data/ssh:/root/.ssh # SSH-Keys fuer aria-wohnung (geteilt mit Proxy)
|
||||||
- ./aria-data/config/AGENT.md:/home/node/.openclaw/workspace/AGENT.md
|
- aria-shared:/shared # gleicher Austausch-Speicher wie Bridge
|
||||||
- ./aria-data/config/USER.md:/home/node/.openclaw/workspace/USER.md
|
|
||||||
- ./aria-data/config/BOOTSTRAP.md:/home/node/.openclaw/workspace/BOOTSTRAP.md
|
|
||||||
- ./aria-data/config/BOOTSTRAP.md:/home/node/.openclaw/workspace/CLAUDE.md
|
|
||||||
- ./aria-data/config/openclaw.env:/home/node/.openclaw/workspace/.env
|
|
||||||
- claude-config:/home/node/.claude # Claude Code Settings (Permissions)
|
|
||||||
- ./aria-data/ssh:/home/node/.ssh # SSH Keys fuer VM-Zugriff
|
|
||||||
- /tmp/.X11-unix:/tmp/.X11-unix
|
|
||||||
- /var/run/docker.sock:/var/run/docker.sock # VM von innen verwalten
|
|
||||||
- aria-shared:/shared # Shared Volume fuer Datei-Austausch (Bridge <> Core)
|
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
networks:
|
networks:
|
||||||
- aria-net
|
- aria-net
|
||||||
@@ -69,11 +71,13 @@ services:
|
|||||||
build: ./bridge
|
build: ./bridge
|
||||||
container_name: aria-bridge
|
container_name: aria-bridge
|
||||||
depends_on:
|
depends_on:
|
||||||
- aria
|
- brain
|
||||||
network_mode: "service:aria" # Teilt Netzwerk mit aria-core → localhost:18789
|
networks:
|
||||||
|
- aria-net
|
||||||
|
ports:
|
||||||
|
- "3001:3001" # Diagnostic Web-UI (Diagnostic teilt Netzwerk mit Bridge)
|
||||||
volumes:
|
volumes:
|
||||||
- ./aria-data/config/aria.env:/config/aria.env
|
- aria-shared:/shared # Shared Volume fuer Datei-Austausch
|
||||||
- aria-shared:/shared # Shared Volume fuer Datei-Austausch (Bridge <> Core)
|
|
||||||
# Audio-Zugriff
|
# Audio-Zugriff
|
||||||
- /run/user/1000/pulse:/run/user/1000/pulse
|
- /run/user/1000/pulse:/run/user/1000/pulse
|
||||||
- /dev/snd:/dev/snd
|
- /dev/snd:/dev/snd
|
||||||
@@ -81,6 +85,7 @@ services:
|
|||||||
- /dev/snd
|
- /dev/snd
|
||||||
environment:
|
environment:
|
||||||
- PULSE_SERVER=unix:/run/user/1000/pulse/native
|
- PULSE_SERVER=unix:/run/user/1000/pulse/native
|
||||||
|
- BRAIN_URL=http://aria-brain:8080
|
||||||
- ARIA_AUTH_TOKEN=${ARIA_AUTH_TOKEN:-}
|
- ARIA_AUTH_TOKEN=${ARIA_AUTH_TOKEN:-}
|
||||||
- RVS_HOST=${RVS_HOST:-}
|
- RVS_HOST=${RVS_HOST:-}
|
||||||
- RVS_PORT=${RVS_PORT:-443}
|
- RVS_PORT=${RVS_PORT:-443}
|
||||||
@@ -90,19 +95,23 @@ services:
|
|||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
|
||||||
# ─── Diagnostic (Selbstcheck-UI und Einstellungen) ────
|
# ─── Diagnostic (Selbstcheck-UI und Einstellungen) ────
|
||||||
|
# Teilt Netzwerk mit Bridge, damit der Diagnostic-Server die
|
||||||
|
# Bridge auf localhost erreichen kann.
|
||||||
diagnostic:
|
diagnostic:
|
||||||
build: ./diagnostic
|
build: ./diagnostic
|
||||||
container_name: aria-diagnostic
|
container_name: aria-diagnostic
|
||||||
depends_on:
|
depends_on:
|
||||||
- aria
|
- bridge
|
||||||
network_mode: "service:aria" # Teilt Netzwerk mit aria-core → localhost:18789
|
network_mode: "service:bridge"
|
||||||
volumes:
|
volumes:
|
||||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
- /var/run/docker.sock:/var/run/docker.sock # Container Restart + Brain-Export/Import
|
||||||
- ./aria-data/config/diag-state:/data # Persistenter State (aktive Session etc.)
|
- ./aria-data/config/diag-state:/data # Persistenter State (aktive Session etc.)
|
||||||
- aria-shared:/shared # Shared Volume (Uploads + Config)
|
- aria-shared:/shared # Shared Volume (Uploads + Config + Voices)
|
||||||
|
- ./aria-data/brain:/brain # Brain-Export/Import (tar.gz aus Bind-Mount)
|
||||||
environment:
|
environment:
|
||||||
- ARIA_AUTH_TOKEN=${ARIA_AUTH_TOKEN:-}
|
- ARIA_AUTH_TOKEN=${ARIA_AUTH_TOKEN:-}
|
||||||
- PROXY_URL=http://proxy:3456
|
- PROXY_URL=http://proxy:3456
|
||||||
|
- BRAIN_URL=http://aria-brain:8080
|
||||||
- RVS_HOST=${RVS_HOST:-}
|
- RVS_HOST=${RVS_HOST:-}
|
||||||
- RVS_PORT=${RVS_PORT:-443}
|
- RVS_PORT=${RVS_PORT:-443}
|
||||||
- RVS_TLS=${RVS_TLS:-true}
|
- RVS_TLS=${RVS_TLS:-true}
|
||||||
@@ -111,9 +120,7 @@ services:
|
|||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
openclaw-config: # Persistiert ~/.openclaw (Model, Auth, Sessions)
|
aria-shared: # Datei-Austausch zwischen Bridge / Brain / Diagnostic
|
||||||
claude-config: # Persistiert ~/.claude (Permissions, Settings)
|
|
||||||
aria-shared: # Datei-Austausch zwischen Bridge und Core
|
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
aria-net:
|
aria-net:
|
||||||
|
|||||||
@@ -0,0 +1,31 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# ════════════════════════════════════════════════════════════
|
||||||
|
# ARIA — Setup-Script
|
||||||
|
#
|
||||||
|
# Aktuell nur noch der .env-Bootstrap (Tokens + RVS). Alle weiteren
|
||||||
|
# Settings landen ueber die Diagnostic in /shared/config/runtime.json
|
||||||
|
# (persistent in der "Datenbank").
|
||||||
|
#
|
||||||
|
# Im Phase-A-Cleanup-Status: System-Prompt-Files liegen unter
|
||||||
|
# aria-data/brain-import/ und werden vom neuen Agent-Framework
|
||||||
|
# spaeter importiert. OpenClaw laeuft noch ohne Persoenlichkeit.
|
||||||
|
# ════════════════════════════════════════════════════════════
|
||||||
|
|
||||||
|
set -e
|
||||||
|
cd "$(dirname "$0")"
|
||||||
|
|
||||||
|
if [ ! -f .env ]; then
|
||||||
|
if [ -f .env.example ]; then
|
||||||
|
cp .env.example .env
|
||||||
|
echo "✓ .env erstellt aus .env.example — Tokens jetzt eintragen!"
|
||||||
|
else
|
||||||
|
echo "⚠ Keine .env.example gefunden — manuell anlegen."
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo ".env existiert bereits — uebersprungen."
|
||||||
|
fi
|
||||||
|
|
||||||
|
# ── Brain-Verzeichnisse anlegen (Bind-Mounts fuer aria-brain + aria-qdrant)
|
||||||
|
# Inhalt ist gitignored — wird ueber Diagnostic-Export/Import gesichert.
|
||||||
|
mkdir -p aria-data/brain/data aria-data/brain/qdrant
|
||||||
|
echo "✓ aria-data/brain/{data,qdrant} bereit"
|
||||||
@@ -1,7 +1,131 @@
|
|||||||
# ARIA Issues & Features
|
# ARIA Issues & Features
|
||||||
|
|
||||||
|
## Audio-Verhalten in der App
|
||||||
|
|
||||||
|
So sollte die App in den verschiedenen Phasen mit fremden Audio-Apps
|
||||||
|
(Spotify, YouTube, Podcasts etc.) und dem eigenen Mikro umgehen.
|
||||||
|
Wenn was anders ist, ist's ein Bug.
|
||||||
|
|
||||||
|
| Phase | Andere App (Spotify) | ARIA-Mikro | Hintergrund-Service |
|
||||||
|
|------------------------------|----------------------|---------------------|---------------------|
|
||||||
|
| Idle / Ohr aus | spielt frei | aus | aus |
|
||||||
|
| Wake-Word lauscht (armed) | spielt frei | passiv (openWakeWord) | aktiv ('wake') |
|
||||||
|
| User-Aufnahme laeuft | pausiert (EXCLUSIVE) | Recording | aktiv ('rec') |
|
||||||
|
| Aufnahme zu Ende | resumed | aus | (rec released) |
|
||||||
|
| ARIA denkt/schreibt (~20s) | spielt frei | aus | (kein Slot) |
|
||||||
|
| TTS startet | pausiert (DUCK) | aus (oder barge) | aktiv ('tts') |
|
||||||
|
| TTS spielt (auch GPU-Pausen) | bleibt pausiert | barge wenn Wake-Word| aktiv |
|
||||||
|
| TTS zu Ende | nach 800ms resumed | (Conversation-Window)| (tts released) |
|
||||||
|
| Eingehender Anruf (auch VoIP)| — | Mikro pausiert | aus |
|
||||||
|
| Anruf vorbei | — | Mikro wieder armed | aktiv ('wake') |
|
||||||
|
| Anruf vorbei (Auto-Resume) | pausiert wieder | aus | aktiv ('tts') |
|
||||||
|
| Neue Frage waehrend Anruf | — | Mikro pausiert | (rec waehrend Anruf nicht) |
|
||||||
|
| Anruf vorbei nach neuer Frage | (siehe TTS-Phasen) | (siehe TTS-Phasen) | (tts gewinnt, alter Resume verworfen) |
|
||||||
|
|
||||||
|
Wichtige Mechanismen:
|
||||||
|
- **Underrun-Schutz** im PcmStreamPlayer fuettert Stille rein wenn die
|
||||||
|
Bridge in Render-Pausen liefert — Spotify bleibt durchgehend pausiert,
|
||||||
|
auch zwischen den Saetzen einer langen Antwort.
|
||||||
|
- **Conversation-Focus** (nur bei Wake-Word 'conversing') haelt den
|
||||||
|
AudioFocus dauerhaft. Bei reinem Tap-to-Talk oder Text-Chat greift's
|
||||||
|
nicht — Spotify darf in der Denk-Phase ruhig weiterspielen.
|
||||||
|
- **Foreground-Service** (mediaPlayback|microphone) haelt App-Prozess
|
||||||
|
am Leben damit TTS/Mikro/Wake-Word auch bei minimierter App weiter-
|
||||||
|
laufen. Notification zeigt aktuellen Status ("ARIA spricht/hoert
|
||||||
|
zu/bereit").
|
||||||
|
- **Anruf-Erkennung** ueber TelephonyManager (klassisch) + AudioFocus-
|
||||||
|
Loss-Listener mit Polling-Fallback (VoIP wie WhatsApp/Signal/Discord).
|
||||||
|
- **Auto-Resume nach Anruf**: beim Halt wird die Wiedergabe-Position
|
||||||
|
gemerkt (Date.now() - playbackStart - leadingSilence). Nach Auflegen
|
||||||
|
wartet die App bis zu 30s auf den WAV-Cache und spielt dann ab der
|
||||||
|
gemerkten Position weiter. Wenn das Telefonat länger als die Antwort
|
||||||
|
dauerte, ist der Cache schon fertig — instant Resume.
|
||||||
|
- **Neue Frage waehrend Anruf** (Text-Chat geht trotz Telefonat): die
|
||||||
|
neue Antwort ueberschreibt den pending Resume. _handlePcmChunkImpl
|
||||||
|
stoppt einen ggf. laufenden resumeSound und setzt pausedMessageId
|
||||||
|
zurueck wenn die neue Stream-messageId abweicht. Die letzte Antwort
|
||||||
|
gewinnt immer.
|
||||||
|
- **Audio-Ausgabe trotz aktivem Telefonat**: ARIA antwortet auch waehrend
|
||||||
|
eines Telefonats per Lautsprecher (Telefon-Audio geht ueber separaten
|
||||||
|
Stream zur Gegenseite). haltAllPlayback wird nur beim STATE-WECHSEL
|
||||||
|
ringing/offhook gerufen — wenn der Anruf schon laeuft (offhook→offhook),
|
||||||
|
triggert eine neue Frage keinen Halt mehr.
|
||||||
|
|
||||||
## Erledigt
|
## Erledigt
|
||||||
|
|
||||||
|
### Bugs / Fixes
|
||||||
|
|
||||||
|
- [x] **"ARIA denkt..." haengt nach Brain-Antwort** (App + Diagnostic): `send_to_core` schickte `thinking` direkt via `_send_to_rvs`, hat aber `_last_activity_state` nicht gepflegt — der spaetere `_emit_activity("idle")` wurde dedupliziert und verschluckt. Fix: durchgehend `_emit_activity` fuer beide Zustaende
|
||||||
|
- [x] **Such-Scroll in App-Chat springt jetzt zur Treffer-Bubble**: `scrollToIndex` wurde zu frueh gerufen + `viewPosition: 0.4` schoss vorbei. Fix: `requestAnimationFrame` + `viewPosition: 0.5` + `onScrollToIndexFailed`-Fallback mit averageItemLength-Schaetzung + 250ms-Retry
|
||||||
|
- [x] **STT-Bubble bekommt den Text jetzt sofort** (nicht erst mit ARIAs Antwort): `_process_app_audio` rief erst `send_to_core` (blockt synchron) und DANN STT-Broadcast. Fix: Reihenfolge getauscht — STT raus, dann Core-Call
|
||||||
|
- [x] **ARIA-Antworten landen wieder in der Diagnostic**: `if (sender === 'aria') return;` im `rvs_chat`-Handler war OpenClaw-Leiche und filterte die neuen Brain-Antworten weg. Fix: aria → received-Bubble
|
||||||
|
- [x] **Brain-Card im Main-Tab zeigt jetzt Live-Status**: `updateState` ueberschrieb die Card mit altem `state.gateway`-Text aus OpenClaw-Zeiten. Fix: `updateState` laesst Brain-Card unangetastet, `loadBrainStatus` synchronisiert beide Cards (Main + Gehirn-Tab) alle 15s
|
||||||
|
- [x] **App-Chat-Sync zeigte veralteten Stand**: `since:lastSync` war diff-only — wenn Server geleert war, blieb die App-History stehen. Fix: `since:0, limit:200` komplett-Replace (Server = Source of Truth). Lokal-only Bubbles (Skill-Notifications, laufende Voice ohne STT) bleiben erhalten
|
||||||
|
- [x] **Konversation-Reset leert jetzt beides**: vorher leerte der Button nur das Brain-Memory, `chat_backup.jsonl` blieb. Fix: ein Button feuert `Promise.all` auf `/api/brain/conversation/reset` + `/api/chat-history-clear`, plus `chat_cleared`-Broadcast via RVS damit App + Diagnostic sich live leeren
|
||||||
|
- [x] **JS-Crashes beim Diagnostic-Laden behoben**: Ghost-IDs aus OpenClaw-Zeiten (`gw-dot`, `openclaw-config`, `btn-core-term`, `core-auth`, `perms-status`, `rc-compact-after`) wurden null-referenziert. Fix: null-safe oder Code raus
|
||||||
|
- [x] Diagnostic: "ARIA denkt..." bleibt nicht mehr stehen
|
||||||
|
- [x] App: "ARIA denkt..." Indicator + Abbrechen-Button (Bridge spiegelt agent_activity via RVS)
|
||||||
|
- [x] Textnachrichten werden von ARIA beantwortet (Bridge chat handler fix)
|
||||||
|
- [x] Voice-Auswahl funktioniert wieder: speaker_wav als Basename statt Pfad fuer daswer123 local-Mode
|
||||||
|
- [x] Diagnostic-Voice-Wechsel resettet alle App-lokalen Voice-Overrides via type "config"
|
||||||
|
- [x] Streaming TTS Stop-Race: Writer wartet auf playbackHeadPosition vor stop()/release() — keine abgeschnittenen Saetze mehr
|
||||||
|
- [x] App: Audioausgabe hoert nicht mehr mitten im Satz auf (playbackHeadPosition wait + Stop-Race fix)
|
||||||
|
- [x] AudioFocus.release wartet auf echten Playback-Ende — kein Volume-Hochfahren mehr mid-Antwort
|
||||||
|
- [x] App Mute-/Auto-Playback-Bug: Closure-Bug geloest (ttsCanPlayRef live-gespiegelt, nicht mehr stale)
|
||||||
|
- [x] App Zombie-Recording: Ohr-aus kill laufende Aufnahme damit der Aufnahme-Button weiter funktioniert
|
||||||
|
- [x] Whisper transkribiert Voice-Uploads nicht mehr mit hardcoded "small" — aktuelles Modell wird behalten, kein unnoetiger Modell-Swap
|
||||||
|
- [x] RVS/WebSocket maxPayload 50MB: voice_upload mit WAV als base64 sprengt kein Frame-Limit mehr
|
||||||
|
- [x] Wake-Word Embedding rank-4 Fix (Pipeline-Bug der das Triggern verhinderte) + Frame-Count aus Modell-Metadaten lesen
|
||||||
|
- [x] PCM-Underrun-Schutz: Stille-Fill in Render-Pausen verhindert Spotify-Auto-Resume nach 10s Stillstand
|
||||||
|
- [x] Conversation-Focus-Lifecycle: AudioFocus haengt am Wake-Word-State 'conversing' statt an einzelnen Streams — Spotify bleibt durchgehend gepaust, auch zwischen mehreren Antworten
|
||||||
|
- [x] Voice-Override behaelt Stimme ueber alle TTS-Calls einer Antwort (vorher: nach erstem TTS-Call zurueck auf Default)
|
||||||
|
- [x] Sprachnachricht-Bubble defensiv: STT-Result fuegt neue Bubble hinzu wenn Placeholder fehlt (Race-Schutz)
|
||||||
|
- [x] Bild + Text als EINE Anfrage: Bridge buffert files 800ms, merged mit folgendem chat-Text zu einem send_to_core (statt zwei getrennten ARIA-Antworten)
|
||||||
|
- [x] Diagnostic→App: persistente RVS-Connection statt frische pro Send (Race-Probleme mit Zombie-WS geloest)
|
||||||
|
- [x] Textauswahl in Bubbles wieder funktional (nested Text+onPress raus, dataDetectorType="all" macht Links automatisch klickbar)
|
||||||
|
- [x] **Placeholder-Race bei parallelen Sprachnachrichten geloest**: jede Aufnahme bekommt eine eindeutige audioRequestId, Bridge gibt sie ans STT-Result zurueck — App matcht jetzt punktgenau die richtige Bubble statt per Substring
|
||||||
|
- [x] Mikro-Offen-Toast "🎤 sprich jetzt" erscheint erst wenn audioService.startRecording wirklich erfolgreich war (statt ~400ms vorher beim Wake-Word-Detect)
|
||||||
|
- [x] Sprachnachrichten ohne STT-Result werden nach 60s+Aufnahmedauer automatisch entfernt (sicher genug fuer 5-30min-Aufnahmen, schnell genug fuer leere Wake-Word-Echos)
|
||||||
|
- [x] VAD adaptive Baseline robuster: minimum statt avg + Cap auf -50dB bis -28dB (Stille) / -40dB bis -18dB (Speech) — keine "tote" VAD-Konfiguration mehr bei lauter Umgebung oder Wake-Word-Echo
|
||||||
|
- [x] Push-to-Talk raus, nur noch Tap-to-Talk (verhinderte Touch-Race-Probleme)
|
||||||
|
- [x] Manueller Mikro-Stop beendet Wake-Word-Konversation: Tap auf Mikro-Knopf waehrend conversing → audio raus + zurueck zu armed (= Wake-Word lauscht wieder, kein Auto-Mikro nach ARIAs Antwort). VAD-Auto-Stop bleibt bei Multi-Turn
|
||||||
|
- [x] **Wake-Word pausiert bei Anruf**: phoneCall ruft pauseForCall (openWakeWord.stop) bei RINGING/OFFHOOK, resumeFromCall bei IDLE. Pre-Call-State wird gemerkt — armed bleibt armed, conversing degraded zu armed (User soll nicht in halbem Dialog landen)
|
||||||
|
- [x] **App-Resume-Cooldown**: Wechsel von Background → Foreground triggert keinen falschen Wake-Word-Trigger mehr. AppState-Listener setzt 1.5s Cooldown in dem onWakeDetected-Events ignoriert werden (Audio-Pegel-Spike beim AudioFocus-Switch sonst als Wake-Word interpretiert)
|
||||||
|
- [x] Background-Mikro robust: acquireBackgroundAudio('rec'/'wake') wird jetzt VOR AudioRecord.startRecording gerufen — Foreground-Service mit foregroundServiceType=microphone muss aktiv sein bevor das Mikro greift, sonst blockiert Android ab 11+ den Background-Zugriff
|
||||||
|
- [x] **Stille-Pegel manuell setzbar** (Settings → Spracheingabe): Override-Wert in dB von -55 bis -15, default "automatisch". Info-Button mit Modal erklaert die Skala (niedriger = sensibler, hoeher = robuster gegen Hintergrundlaerm). Bei manuell gesetztem Wert wird die adaptive Baseline ignoriert
|
||||||
|
- [x] **Kurze TTS-Texte (1-3 Worte) spielen jetzt ab** — auf OnePlus A12 stallte AudioTrack mit `pos=0` weil der Default-Start-Threshold `bufferSize/2` (= 2s) bei kurzen Streams nie ueberschritten wurde. Fix: `setStartThresholdInFrames(100ms)` direkt nach dem Track-Build (API 31+). Buffer auf 4s entkoppelt von Pre-Roll, `play()` wird beim allerersten data-chunk gerufen
|
||||||
|
- [x] **Mute-Button stoppt jetzt auch laufenden PCM-Stream** — `pcmStreamActive` wurde beim isFinal-Chunk schon false gesetzt, der AudioTrack spielte aber noch sekundenlang aus seinem Buffer. `stopPlayback()` uebersprang darum `PcmStreamPlayer.stop()`. Fix: stop() immer rufen (ist idempotent), kein Flag-Check mehr
|
||||||
|
- [x] **GPS-Permission im Manifest + Runtime-Request** beim Settings-Toggle — vorher fehlten ACCESS_COARSE_LOCATION / ACCESS_FINE_LOCATION komplett. `Geolocation.getCurrentPosition` schlug lautlos fehl, App sendete nie ein location-Feld
|
||||||
|
- [x] **GPS-Position auch im STT-Payload an Diagnostic** — die App sendet location einmal im audio-Payload. Die Bridge nutzte sie zwar (ging in aria-core's Kontext rein), reichte sie aber nicht im STT-broadcast an Diagnostic durch. Diagnostic zeigte darum bei Spracheingaben nie den GPS-Block, obwohl der "GPS einblenden"-Toggle aktiv war
|
||||||
|
- [x] **Auto-Resume nach Anruf — pcmBuffer bleibt erhalten**: `haltAllPlayback` leerte den pcmBuffer mid-Anruf, isFinal schrieb dann eine leere WAV. Neue `pauseForCall`-Methode statt `haltAllPlayback`: AudioTrack stoppt + Focus released, `pcmBuffer` und `pcmMessageId` bleiben — chunks werden weiter gesammelt damit isFinal die WAV schreibt und resumeFromInterruption sie findet. Plus `captureInterruption` idempotent gemacht (ringing → offhook ueberschreibt nicht)
|
||||||
|
- [x] **Replay-Resume nach Anruf**: `_firePlaybackStarted` ueberschrieb `currentPlaybackMsgId` mit leerem pcmMessageId — captureInterruption hatte nichts zu merken. Plus Regex `[0-9a-f-]+\.wav` matchte nicht alle Dateinamen. Plus `_playFromPathAtPosition` aktualisiert jetzt das Tracking damit ein zweiter Anruf in derselben Antwort auch funktioniert
|
||||||
|
- [x] **`pauseForCall` setzt `isPlaying` zurueck**: vorher haengten weitere Play-Button-Klicks nach Anruf, weil `playAudio` bei `isPlaying=true` den `_playNext`-Pfad ueberspringt
|
||||||
|
- [x] **Play-Button rendert neu wenn Cache-Datei weg ist**: vorher checkte der Button nur `if (item.audioPath)` — auf eine geloeschte Cache-Datei zeigte das aber stillschweigend ins Leere. Jetzt RNFS.exists-Check mit Fallback auf `tts_request` an die Bridge → F5-TTS rendert neu, WAV wandert zurueck in den Cache
|
||||||
|
- [x] **Bridge WebSocket max_size 50 MB**: Python `websockets.connect` hat 1 MiB Default — Stefan's 4MB JPEG (5.78 MB Base64) sprengte das, Bridge-Connection wurde silent gedroppt. f5tts/whisper-bridges hatten max_size schon, nur aria_bridge war vergessen
|
||||||
|
- [x] **Bridge resized Bilder >2 MB serverseitig auf 1568px**: Claude-Vision-API hat ~5 MB Base64-Limit. Galerie-Bilder via `react-native-image-picker` sind clientseitig schon klein, Buroklammer/DocumentPicker reichte das rohe File durch — Claude lieferte leere Antwort. Pillow im Bridge-Container, nur fuer JPEG/PNG/WebP/GIF (PDFs/ZIPs/SVGs unangetastet)
|
||||||
|
- [x] **Bridge `chat:error` liest auch `errorMessage`**: OpenClaw legt bei state=error den Text dort statt in `error` ab → Bridge meldete generisches "[Fehler] Unbekannt", echter Fehler nur in Container-Logs. Plus: `chat:final` ohne text wird jetzt mit Hinweis-Bubble an die App gemeldet (statt stumm), z.B. wenn Vision das Bild silent ablehnt
|
||||||
|
- [x] **Cache-Cleanup beim App-Start** — orphane `aria_tts_*.wav` Files (>5 min) im CachesDirectoryPath werden weggeraeumt, sammeln sich sonst an wenn Sound mid-playback gestoppt wird (Anruf, Mute, Barge-In) und der completion-Callback nicht feuert. Plus neuer Settings-Button "TTS-Cache leeren" mit Live-Groessenanzeige
|
||||||
|
- [x] **Verbose-Logging-Toggle in Settings → Protokoll**: `console.log` global stummschaltbar (warn/error bleiben aktiv) — spart adb-logcat-Speicher wenn alles laeuft
|
||||||
|
- [x] **800 ms-Delay vor Anruf-Auto-Resume**: ARIA's neuer Focus-Request kollidierte sonst mit Spotify's Auto-Resume nach Anruf-Ende. System haengt noch im IN_CALL→NORMAL-Mode-Uebergang, Spotify sieht Loss → Loss und bleibt pausiert. Mit Delay schafft Spotify den Resume-Schritt, dann pausiert ARIA wieder ordnungsgemaess
|
||||||
|
- [x] **Mute-Button = Stop fuer aktuelle Antwort**: vorher startete eine NEUE PCM-Chunk-Sequenz nach Mute-aus die alte Antwort weiter wo sie war (funktionierte 2x, dann nicht mehr weil isFinal schon kam). Jetzt mit `_stoppedMessageId`-Tracking: bei Mute wird die aktive msgId gemerkt, alle weiteren chunks dieser msgId bleiben silent — auch wenn Mute zurueckgenommen wird. Reset bei neuer msgId, neue Antworten spielen normal
|
||||||
|
- [x] **Spotify resumed nach Mute-Stop**: `stopPlayback` released seinen TRANSIENT-Focus (USAGE_ASSISTANT) sauber → Spotify bekommt GAIN-Event und resumed automatisch. Ein zwischenzeitlich eingebauter `kickReleaseMedia` (USAGE_MEDIA + GAIN) verhinderte das Auto-Resume sogar (Spotify interpretierte es als "user-action stopp") — wieder rausgenommen
|
||||||
|
- [x] **ARIA kann Dateien an User zurueckgeben** (PDFs, Bilder, Office-Docs, Markdown, ZIPs, ...): ARIA setzt am Antwort-Ende `[FILE: /shared/uploads/aria_<name>.<ext>]` Marker, Bridge parsed sie raus (TTS liest's nicht vor) und sendet `file_from_aria`-Event ueber RVS. App zeigt Anhang-Bubble + Klick oeffnet via Android-Intent-Picker (`FileOpenerModule`, FileProvider), Diagnostic zeigt Bubble + PDFs/Bilder neuer Tab, andere als Download. Mehrere Marker = mehrere Bubbles, nicht-existente Marker werden mit Hinweis an User gemeldet (statt silent gedroppt)
|
||||||
|
- [x] **External Bilder/Dateien werden serverseitig persistiert**: ARIA laed externe URLs (Wikipedia, Wiki Commons) mit curl runter und gibt sie via `[FILE: ...]`-Marker zurueck — bleibt permanent im Chat auch wenn die Online-Quelle stirbt. System-Prompt instruiert sie das Pattern zu nutzen
|
||||||
|
- [x] **ARIA-Datei-Bubbles ueberleben Browser-Refresh**: Diagnostic-Server parsed beim `load_chat_history` die Marker aus dem OpenClaw-Session-File und schickt `aria_file`-Eintraege mit, sodass die Anhang-Bubbles nach F5 wiederhergestellt werden. Plus: `/shared/uploads/`-Bildpfade werden im History-Render auch als Inline-Image gerendert (vorher nur in live-Bubbles)
|
||||||
|
- [x] **"ARIA reparieren"-Button** in App + Diagnostic: triggert `openclaw doctor --fix` ueber RVS → Bridge → Diagnostic HTTP-API. Fix fuer stuck Runs ohne SSH
|
||||||
|
- [x] **"ARIA hart neu starten"-Button**: docker compose-Restart ueber Docker-Socket-API im Diagnostic-Server. Mit Confirmation in der App, fuer Faelle wo doctor nicht reicht (alive aber haengender Run)
|
||||||
|
- [x] **Auto-Compact nach N Messages**: bei zu langer Session wirft Linux beim Subprocess-spawn E2BIG (Argument list too long, ~128KB-2MB Limit). Bridge zaehlt User-Messages; bei `COMPACT_AFTER_MESSAGES` (env, default 140) werden Sessions geleert + Container neu gestartet, User bekommt Hinweis-Bubble. Plus manueller "🧹 Konversation komprimieren"-Button in App-Settings und Diagnostic
|
||||||
|
- [x] **`[FILE: ...]`-Marker-Filter ueberall in Diagnostic**: Filter direkt in `addChat` damit er fuer alle Code-Pfade greift (chat_final, proxy_result, History-Load, ...) — vorher rutschten Marker als Text durch wenn sie nicht ueber chat_final kamen
|
||||||
|
- [x] **Mehrere `[FILE: ...]`-Marker in einer Antwort**: Bridge zerlegt sauber in mehrere file_from_aria-Events, ARIA muss nicht selbst zwei Antworten posten. Bei nicht-existenten Files erscheint ein User-Hinweis statt silent skip
|
||||||
|
- [x] **Inline-Bilder in Chat-Nachrichten** (App): ``- und plain-`https://image.png`-URLs werden als Image-Vorschau unter dem Text gerendert. Mit `react-native-svg` auch SVG-URLs inline
|
||||||
|
- [x] **SVG-Anhaenge** werden korrekt gerendert: ChatImage-Komponente erkennt `.svg`-Endung und nutzt SvgUri statt Image (RN-Image kann SVG nicht). Vollbild-Modal genauso, mit `preserveAspectRatio="xMidYMid meet"` damit SVGs nicht gestreckt werden
|
||||||
|
- [x] **Pinch-Zoom + Pan im Vollbild-Modal** (App): neue `ZoomableImage`-Komponente, reine RN-Implementation mit PanResponder+Animated, ohne externe Lib. 2-Finger-Pinch 1x..5x, 1-Finger-Pan wenn gezoomt, Doppel-Tap toggelt 1x↔2.5x. Plus ✕-Close-Button damit Tap-to-Close nicht mit Pan-Gesten kollidiert
|
||||||
|
- [x] **ARIA-Abkuerzung ausgeschrieben**: in App → Einstellungen → Ueber und Diagnostic → Einstellungen ist jetzt erklaert: "ARIA — Autonomous Reasoning & Intelligence Assistant"
|
||||||
|
- [x] **`init.sh`** legt fehlende Config-Dateien aus *.example-Vorlagen an — frischer Clone laeuft ohne Anleitung an
|
||||||
|
- [x] **`USER.md` privat**: aus dem Repo genommen (enthielt interne Tool-Liste mit Gitea-URL etc.). Vorlage als `USER.md.example` checked-in, lokales File via `.gitignore` ausgeschlossen
|
||||||
|
|
||||||
|
### App Features
|
||||||
|
|
||||||
- [x] Bildupload funktioniert (Shared Volume /shared/uploads/)
|
- [x] Bildupload funktioniert (Shared Volume /shared/uploads/)
|
||||||
- [x] Sprachnachrichten werden als Text angezeigt (STT → Chat-Bubble)
|
- [x] Sprachnachrichten werden als Text angezeigt (STT → Chat-Bubble)
|
||||||
- [x] Cache leeren + Auto-Download von Anhaengen
|
- [x] Cache leeren + Auto-Download von Anhaengen
|
||||||
@@ -11,13 +135,9 @@
|
|||||||
- [x] Ohr-Button → Gespraechsmodus (Auto-Aufnahme nach ARIA-Antwort)
|
- [x] Ohr-Button → Gespraechsmodus (Auto-Aufnahme nach ARIA-Antwort)
|
||||||
- [x] Play-Button in ARIA-Nachrichten fuer Sprachwiedergabe
|
- [x] Play-Button in ARIA-Nachrichten fuer Sprachwiedergabe
|
||||||
- [x] Chat-Suche in der App (Lupe in Statusleiste)
|
- [x] Chat-Suche in der App (Lupe in Statusleiste)
|
||||||
- [x] Watchdog mit Container-Restart (2min Warnung → 5min doctor --fix → 8min Restart)
|
|
||||||
- [x] Abbrechen-Button im Diagnostic Chat
|
- [x] Abbrechen-Button im Diagnostic Chat
|
||||||
- [x] Nachrichten Backup on-the-fly (/shared/config/chat_backup.jsonl)
|
|
||||||
- [x] Grosse Nachrichten satzweise aufteilen fuer TTS
|
|
||||||
- [x] RVS Nachrichten vom Smartphone gehen durch
|
|
||||||
- [x] Stimmen-Einstellungen (Ramona/Thorsten, Speed pro Stimme — durch XTTS/F5-TTS ersetzt)
|
- [x] Stimmen-Einstellungen (Ramona/Thorsten, Speed pro Stimme — durch XTTS/F5-TTS ersetzt)
|
||||||
- [x] Highlight-Trigger konfigurierbar in Diagnostic
|
- [x] Highlight-Trigger konfigurierbar in Diagnostic (spaeter komplett entfernt — war Piper-Relikt)
|
||||||
- [x] XTTS v2 Integration (Gaming-PC, GPU, Voice Cloning) — durch F5-TTS ersetzt
|
- [x] XTTS v2 Integration (Gaming-PC, GPU, Voice Cloning) — durch F5-TTS ersetzt
|
||||||
- [x] XTTS Voice Cloning (Audio-Samples hochladen, eigene Stimme)
|
- [x] XTTS Voice Cloning (Audio-Samples hochladen, eigene Stimme)
|
||||||
- [x] TTS Engine waehlbar (Piper/XTTS) — Piper raus, XTTS raus, jetzt nur F5-TTS
|
- [x] TTS Engine waehlbar (Piper/XTTS) — Piper raus, XTTS raus, jetzt nur F5-TTS
|
||||||
@@ -25,23 +145,19 @@
|
|||||||
- [x] Auto-Update: APK-Installation via FileProvider
|
- [x] Auto-Update: APK-Installation via FileProvider
|
||||||
- [x] Auto-Update: "Auf Updates pruefen" Button in App-Einstellungen
|
- [x] Auto-Update: "Auf Updates pruefen" Button in App-Einstellungen
|
||||||
- [x] Audio-Queue (sequentielle Wiedergabe, kein Ueberlappen)
|
- [x] Audio-Queue (sequentielle Wiedergabe, kein Ueberlappen)
|
||||||
- [x] Textnachrichten werden von ARIA beantwortet (Bridge chat handler fix)
|
|
||||||
- [x] Mehrere Anhaenge + Text vor dem Senden (Pending-Vorschau)
|
- [x] Mehrere Anhaenge + Text vor dem Senden (Pending-Vorschau)
|
||||||
- [x] Paste-Support fuer Bilder in Diagnostic Chat
|
- [x] Paste-Support fuer Bilder in Diagnostic Chat
|
||||||
- [x] Markdown-Bereinigung fuer TTS (fett, kursiv, code, links, etc.)
|
- [x] Markdown-Bereinigung fuer TTS (fett, kursiv, code, links, etc.)
|
||||||
- [x] SSH Volume read-write fuer Proxy (kein -F Workaround mehr)
|
|
||||||
- [x] Diagnostic: Sessions als Markdown exportieren (Download-Button)
|
- [x] Diagnostic: Sessions als Markdown exportieren (Download-Button)
|
||||||
- [x] Speech Gate: Aufnahme wird verworfen wenn keine Sprache erkannt
|
- [x] Speech Gate: Aufnahme wird verworfen wenn keine Sprache erkannt
|
||||||
- [x] Session-Persistenz: Gewaehlte Session bleibt ueber Container-Restarts erhalten
|
- [x] Session-Persistenz: Gewaehlte Session bleibt ueber Container-Restarts erhalten
|
||||||
- [x] Diagnostic: "ARIA denkt..." bleibt nicht mehr stehen
|
|
||||||
- [x] App: "ARIA denkt..." Indicator + Abbrechen-Button (Bridge spiegelt agent_activity via RVS)
|
|
||||||
- [x] Whisper STT: Model-Auswahl in Diagnostic (tiny/base/small/medium/large-v3), Hot-Reload
|
- [x] Whisper STT: Model-Auswahl in Diagnostic (tiny/base/small/medium/large-v3), Hot-Reload
|
||||||
- [x] App: Audio-Aufnahme explizit 16kHz mono (spart Resample, optimal fuer Whisper)
|
- [x] App: Audio-Aufnahme explizit 16kHz mono (spart Resample, optimal fuer Whisper)
|
||||||
- [x] Streaming TTS: PCM-Stream → AudioTrack MODE_STREAM, keine WAV-Gaps
|
- [x] Streaming TTS: PCM-Stream → AudioTrack MODE_STREAM, keine WAV-Gaps
|
||||||
- [x] Piper komplett entfernt
|
- [x] Piper komplett entfernt
|
||||||
- [x] Gespraechsmodus: Speech-Gate strenger (-28dB / 500ms)
|
- [x] Gespraechsmodus: Speech-Gate strenger (-28dB / 500ms)
|
||||||
- [x] Diagnostic: Archivierte Session-Versionen (.reset.*) angezeigt + exportierbar
|
- [x] Diagnostic: Archivierte Session-Versionen (.reset.*) angezeigt + exportierbar
|
||||||
- [x] tools/export-jsonl-to-md.js: CLI-Konverter fuer Session-JSONL zu Markdown
|
- [x] tools/export-jsonl-to-md.js: CLI-Konverter fuer Session-JSONL zu Markdown (mit OpenClaw raus)
|
||||||
- [x] NO_REPLY-Filter in Bridge + Diagnostic
|
- [x] NO_REPLY-Filter in Bridge + Diagnostic
|
||||||
- [x] Audio-Ducking + Exklusiv-Focus (Kotlin AudioFocusModule)
|
- [x] Audio-Ducking + Exklusiv-Focus (Kotlin AudioFocusModule)
|
||||||
- [x] TTS-Cleanup serverseitig: Code-Bloecke raus, Einheiten ausgeschrieben, Abkuerzungen buchstabiert, URLs zu "ein Link"
|
- [x] TTS-Cleanup serverseitig: Code-Bloecke raus, Einheiten ausgeschrieben, Abkuerzungen buchstabiert, URLs zu "ein Link"
|
||||||
@@ -51,14 +167,11 @@
|
|||||||
- [x] Disk-Voll Banner in Diagnostic: rotes Overlay + copy-baren Cleanup-Befehlen (safe + aggressiv)
|
- [x] Disk-Voll Banner in Diagnostic: rotes Overlay + copy-baren Cleanup-Befehlen (safe + aggressiv)
|
||||||
- [x] cleanup.sh: kombinierter Docker-Aufraeum-Befehl (safe / --full)
|
- [x] cleanup.sh: kombinierter Docker-Aufraeum-Befehl (safe / --full)
|
||||||
- [x] Streaming TTS Pre-Roll: AudioTrack play() startet erst wenn 2.5s gepuffert sind
|
- [x] Streaming TTS Pre-Roll: AudioTrack play() startet erst wenn 2.5s gepuffert sind
|
||||||
- [x] Streaming TTS Stop-Race: Writer wartet auf playbackHeadPosition vor stop()/release() — keine abgeschnittenen Saetze mehr
|
|
||||||
- [x] Leading-Silence (200ms) am Stream-Anfang — AudioTrack faehrt sauber an
|
- [x] Leading-Silence (200ms) am Stream-Anfang — AudioTrack faehrt sauber an
|
||||||
- [x] Pre-Roll-Buffer einstellbar in App-Settings (1.0-6.0s, Default 3.5s)
|
- [x] Pre-Roll-Buffer einstellbar in App-Settings (1.0-6.0s, Default 3.5s)
|
||||||
- [x] Fade-In auf erstem PCM-Chunk (120ms) — versteckt XTTS/F5-TTS Warmup-Glitches
|
- [x] Fade-In auf erstem PCM-Chunk (120ms) — versteckt XTTS/F5-TTS Warmup-Glitches
|
||||||
- [x] Decimal-zu-Worte fuer TTS (0.1 → null komma eins, mit IP-Schutz-Lookahead)
|
- [x] Decimal-zu-Worte fuer TTS (0.1 → null komma eins, mit IP-Schutz-Lookahead)
|
||||||
- [x] Generic Acronym-Buchstabieren (XTTS → X T T S, USB → U S B, ueber expliziter Liste)
|
- [x] Generic Acronym-Buchstabieren (XTTS → X T T S, USB → U S B, ueber expliziter Liste)
|
||||||
- [x] Voice-Auswahl funktioniert wieder: speaker_wav als Basename statt Pfad fuer daswer123 local-Mode
|
|
||||||
- [x] Diagnostic-Voice-Wechsel resettet alle App-lokalen Voice-Overrides via type "config"
|
|
||||||
- [x] voice_preload/voice_ready: Stille Mini-Render bei Voice-Wechsel + Toast/Status "bereit"
|
- [x] voice_preload/voice_ready: Stille Mini-Render bei Voice-Wechsel + Toast/Status "bereit"
|
||||||
- [x] Whisper STT auf die Gamebox ausgelagert (faster-whisper CUDA, float16) — neuer aria-whisper-bridge Container
|
- [x] Whisper STT auf die Gamebox ausgelagert (faster-whisper CUDA, float16) — neuer aria-whisper-bridge Container
|
||||||
- [x] aria-bridge: STT primaer remote (Gamebox), Fallback lokal nach 45s Timeout
|
- [x] aria-bridge: STT primaer remote (Gamebox), Fallback lokal nach 45s Timeout
|
||||||
@@ -66,43 +179,126 @@
|
|||||||
- [x] **F5-TTS ersetzt XTTS komplett** — neuer aria-f5tts-bridge Container, Voice Cloning, satzweises Streaming
|
- [x] **F5-TTS ersetzt XTTS komplett** — neuer aria-f5tts-bridge Container, Voice Cloning, satzweises Streaming
|
||||||
- [x] Voice-Upload mit Whisper-Auto-Transkription — User muss keinen Referenz-Text eintippen
|
- [x] Voice-Upload mit Whisper-Auto-Transkription — User muss keinen Referenz-Text eintippen
|
||||||
- [x] Audio-Pause statt Ducking: Spotify/YouTube pausieren komplett waehrend TTS (TRANSIENT statt MAY_DUCK)
|
- [x] Audio-Pause statt Ducking: Spotify/YouTube pausieren komplett waehrend TTS (TRANSIENT statt MAY_DUCK)
|
||||||
- [x] AudioFocus.release wartet auf echten Playback-Ende — kein Volume-Hochfahren mehr mid-Antwort
|
|
||||||
- [x] VAD-Stille einstellbar in App-Settings (1.0-8.0s, Default 2.8s)
|
- [x] VAD-Stille einstellbar in App-Settings (1.0-8.0s, Default 2.8s)
|
||||||
- [x] MAX_RECORDING auf 120s — laengere Erklaerungen moeglich
|
- [x] MAX_RECORDING auf 120s — laengere Erklaerungen moeglich
|
||||||
- [x] App: Audioausgabe hoert nicht mehr mitten im Satz auf (playbackHeadPosition wait + Stop-Race fix)
|
|
||||||
- [x] F5-TTS: Referenz-WAV-Preprocessing — Loudness-Normalisierung -16 LUFS + Silence-Trim + 10s Clip fuer konsistente Cloning-Quali
|
- [x] F5-TTS: Referenz-WAV-Preprocessing — Loudness-Normalisierung -16 LUFS + Silence-Trim + 10s Clip fuer konsistente Cloning-Quali
|
||||||
- [x] F5-TTS: deutsches Fine-Tune (aihpi/F5-TTS-German, Vocos-Variante) via hf:// Pfad in Diagnostic konfigurierbar
|
- [x] F5-TTS: deutsches Fine-Tune (aihpi/F5-TTS-German, Vocos-Variante) via hf:// Pfad in Diagnostic konfigurierbar
|
||||||
- [x] Whisper transkribiert Voice-Uploads nicht mehr mit hardcoded "small" — aktuelles Modell wird behalten, kein unnoetiger Modell-Swap
|
|
||||||
- [x] RVS/WebSocket maxPayload 50MB: voice_upload mit WAV als base64 sprengt kein Frame-Limit mehr
|
|
||||||
- [x] Dynamischer STT-Timeout in aria-bridge: 300s waehrend whisper-bridge 'loading', 45s wenn 'ready'
|
- [x] Dynamischer STT-Timeout in aria-bridge: 300s waehrend whisper-bridge 'loading', 45s wenn 'ready'
|
||||||
- [x] service_status Broadcasts: f5tts/whisper melden Lade-Status, Banner in Diagnostic (unten rechts) + App (oben)
|
- [x] service_status Broadcasts: f5tts/whisper melden Lade-Status, Banner in Diagnostic (unten rechts) + App (oben)
|
||||||
- [x] config_request Pattern: Bridges fragen beim Connect die aktuelle Voice-Config an, aria-bridge antwortet
|
- [x] config_request Pattern: Bridges fragen beim Connect die aktuelle Voice-Config an, aria-bridge antwortet
|
||||||
- [x] F5-TTS Tuning via Diagnostic (Modell-ID, Checkpoint, cfg_strength, nfe_step) statt ENV-Vars — Hot-Reload bei Modell-Wechsel
|
- [x] F5-TTS Tuning via Diagnostic (Modell-ID, Checkpoint, cfg_strength, nfe_step) statt ENV-Vars — Hot-Reload bei Modell-Wechsel
|
||||||
- [x] Conversation-Window: Gespraechsmodus endet nach X Sekunden Stille (1.0-20.0s, Default 8s, einstellbar in Settings)
|
- [x] Conversation-Window: Gespraechsmodus endet nach X Sekunden Stille (1.0-20.0s, Default 8s, einstellbar in Settings)
|
||||||
- [x] Porcupine Wake-Word-Integration in der App (Built-In Keywords + Custom spaeter, per Geraet einstellbar)
|
- [x] Porcupine Wake-Word-Integration in der App (durch openWakeWord ersetzt)
|
||||||
- [x] HF-Cache als Bind-Mount statt Docker Volume — kein .vhdx-Bloat auf Docker Desktop / Windows
|
- [x] HF-Cache als Bind-Mount statt Docker Volume — kein .vhdx-Bloat auf Docker Desktop / Windows
|
||||||
- [x] cleanup-windows.ps1 / .bat: VHDX-Cleanup via diskpart (ohne Hyper-V) mit Self-Elevation
|
- [x] cleanup-windows.ps1 / .bat: VHDX-Cleanup via diskpart (ohne Hyper-V) mit Self-Elevation
|
||||||
- [x] App Mute-/Auto-Playback-Bug: Closure-Bug geloest (ttsCanPlayRef live-gespiegelt, nicht mehr stale)
|
|
||||||
- [x] App Zombie-Recording: Ohr-aus kill laufende Aufnahme damit der Aufnahme-Button weiter funktioniert
|
|
||||||
- [x] App Text-Rendering: Nachrichten selektierbar + Autolink fuer URLs/E-Mails/Telefonnummern (Browser/Mail/Dialer)
|
- [x] App Text-Rendering: Nachrichten selektierbar + Autolink fuer URLs/E-Mails/Telefonnummern (Browser/Mail/Dialer)
|
||||||
- [x] TTS-Wiedergabegeschwindigkeit pro Geraet einstellbar (Settings → 0.5-2.0x in 0.1-Schritten, Default 1.0)
|
- [x] TTS-Wiedergabegeschwindigkeit pro Geraet einstellbar (Settings → 0.5-2.0x in 0.1-Schritten, Default 1.0)
|
||||||
- [x] Diagnostic: Voice-Preview-Modal (Play-Icon vor Delete-X, Textfeld mit Default, WAV im Browser abspielen)
|
- [x] Diagnostic: Voice-Preview-Modal (Play-Icon vor Delete-X, Textfeld mit Default, WAV im Browser abspielen)
|
||||||
|
- [x] **Wake-Word komplett on-device via openWakeWord (ONNX Runtime)** — Porcupine raus, kein API-Key/keine Lizenzgebuehren mehr. Mitgelieferte Keywords: hey_jarvis, computer, alexa, hey_mycroft, hey_rhasspy
|
||||||
|
- [x] APK ABI-Split auf arm64-v8a — von ~136 MB auf ~35 MB, Auto-Update-Downloads aufs Phone deutlich kleiner
|
||||||
|
- [x] PhoneStateListener: TTS pausiert bei eingehendem Anruf (READ_PHONE_STATE Permission)
|
||||||
|
- [x] **VoIP-Anrufe** (WhatsApp/Signal/Discord/Teams) erkannt via AudioFocus-Loss-Listener + getMode-Polling-Fallback (alle 3s)
|
||||||
|
- [x] **Auto-Resume nach Anruf**: ARIAs unterbrochene Antwort spielt nach dem Auflegen ab der gemerkten Position weiter (Date.now()-Tracking + WAV-Cache, 30s-Wartezeit auf final-Marker bei kurzem Telefonat)
|
||||||
|
- [x] **Neue Frage waehrend Telefonat** ueberschreibt pending Auto-Resume — letzte Antwort gewinnt, alter resumeSound wird gestoppt
|
||||||
|
- [x] **Audio-Ausgabe waehrend aktivem Telefonat** funktioniert (haltAllPlayback nur bei state-Wechsel idle→ringing/offhook, nicht bei offhook→offhook)
|
||||||
|
- [x] **PcmPlaybackFinished-Event** im Native: AudioFocus wird erst released wenn AudioTrack wirklich durch ist (vorher: end()-Cap nach 0.5s → Spotify spielte 32s parallel zu ARIA)
|
||||||
|
- [x] **APK-Cache-Cleanup robuster**: durchsucht jetzt CachesDirectoryPath + DocumentDirectoryPath + ExternalCachesDirectoryPath + ExternalDirectoryPath statt nur Caches. Plus manueller Button "Update-Cache leeren" in Settings → Speicher mit Live-Anzeige der aktuellen Groesse
|
||||||
|
- [x] Diagnostic-Chat: bubblige Formatierung, mehrzeiliges Eingabefeld (textarea, Enter sendet, Shift+Enter neue Zeile)
|
||||||
|
- [x] Adaptive VAD-Schwelle: Baseline aus den ersten 500ms Mic-Pegel, Stille = baseline+6dB / Sprache = baseline+12dB
|
||||||
|
- [x] Max-Aufnahmedauer konfigurierbar in Settings (1-30 min, Default 5 min) — laengere Diktate moeglich
|
||||||
|
- [x] Barge-In: User kann ARIA waehrend Antwort/Tool-Use unterbrechen, alte Aktivitaet wird abgebrochen, Bridge gibt aria-core einen Kontext-Hint dass es eine Korrektur ist
|
||||||
|
- [x] Settings-Sub-Screens: 8 Kategorien (Verbindung, Allgemein, Spracheingabe, Wake-Word, Sprachausgabe, Speicher, Protokoll, Ueber) statt langer Liste
|
||||||
|
- [x] **Bereit-Sound (Airplane Ding-Dong) wenn Mikro nach Wake-Word offen** — akustische Bestaetigung statt nur Toast. Toggle in Settings → Wake-Word, default aktiv
|
||||||
|
- [x] **Wake-Word parallel zu TTS** mit AcousticEchoCanceler: User sagt "Computer" waehrend ARIA spricht → TTS verstummt sofort, neue Aufnahme startet
|
||||||
|
- [x] **GPS-Position mitsenden**: Toggle in Settings → Allgemein → Standort, persistiert in AsyncStorage. Wenn aktiv wird lat/lon mit jeder chat/audio-Message mitgegeben. Bridge prefixed den Text fuer aria-core mit GPS-Hint (mit Anweisung dass die Position nur bei Bedarf erwaehnt wird)
|
||||||
|
- [x] **Background Audio Service**: TTS, Wake-Word-Lauschen UND Aufnahme laufen auch bei minimierter App weiter. Foreground-Service mit foregroundServiceType=mediaPlayback|microphone, persistente Notification mit dynamischem Text ("ARIA spricht" / "ARIA hoert zu" / "ARIA bereit")
|
||||||
|
|
||||||
|
### Infrastruktur
|
||||||
|
|
||||||
|
- [x] Watchdog mit Container-Restart (2min Warnung → 5min doctor --fix → 8min Restart)
|
||||||
|
- [x] Nachrichten Backup on-the-fly (/shared/config/chat_backup.jsonl)
|
||||||
|
- [x] RVS Nachrichten vom Smartphone gehen durch
|
||||||
|
- [x] SSH Volume read-write fuer Proxy (kein -F Workaround mehr)
|
||||||
|
|
||||||
|
## Brain — Phase B (komplett)
|
||||||
|
|
||||||
|
Der grosse Refactor weg von OpenClaw zu eigener Brain-Architektur — alle 4 Punkte
|
||||||
|
durch. ARIA hat jetzt eigenes Gedaechtnis (Vector-DB), eigenen Loop, eigene
|
||||||
|
Skills mit Tool-Use.
|
||||||
|
|
||||||
|
### Infrastruktur
|
||||||
|
|
||||||
|
- [x] aria-brain Container (FastAPI + Qdrant + sentence-transformers, MiniLM multilingual)
|
||||||
|
- [x] aria-core (OpenClaw) abgerissen — Tag `v0.1.2.0` als Archiv
|
||||||
|
- [x] docker-compose komplett umgebaut: brain + qdrant + bridge + diagnostic + proxy
|
||||||
|
- [x] Voice-Bridge: aria-core-Logik raus (doctor_fix, aria_restart, compact_after) → durch Brain-HTTP-Call ersetzt
|
||||||
|
- [x] Sprachmodell-Setting in runtime.json (brainModel) — Diagnostic kann Modell live wechseln, Brain-Restart noetig
|
||||||
|
|
||||||
|
### Memory / Vector-DB
|
||||||
|
|
||||||
|
- [x] Memory CRUD via Diagnostic-Gehirn-Tab (Add/Edit/Delete + Suche + Type/Pinned-Filter)
|
||||||
|
- [x] **Migration aus brain-import/** (Phase B Punkt 2) — Parser fuer AGENT.md/USER.md/TOOLING.md, atomare Punkte mit migration_key (idempotent)
|
||||||
|
- [x] **Bootstrap-Snapshot** (Phase B Punkt 2) — Export/Import nur pinned Memories als JSON
|
||||||
|
- [x] **Komplettes Gehirn** Export/Import als tar.gz (Memories + Skills + Qdrant)
|
||||||
|
|
||||||
|
### Conversation-Loop (Phase B Punkt 3)
|
||||||
|
|
||||||
|
- [x] Single-Chat UI + Rolling Window (50 Turns)
|
||||||
|
- [x] Memory-Destillat: bei >60 Turns automatisch 30 aelteste → fact-Memories via Claude-Call
|
||||||
|
- [x] Hot Memory (pinned) + Cold Memory (Top-5 semantisch) im System-Prompt
|
||||||
|
- [x] Manueller Destillat-Trigger + Konversation-Reset (Brain + Diagnostic chat_backup gleichzeitig)
|
||||||
|
- [x] Bridge schreibt chat_backup.jsonl bei jedem Turn (User + ARIA + ARIA-Files)
|
||||||
|
- [x] App-Chat-Sync: kompletter Server-Sync bei Reconnect (Server = Source of Truth). Wenn Server leer → App leert auch. Lokal-only Bubbles (Skill-Notifications, laufende Voice ohne STT) bleiben erhalten. Plus chat_cleared Live-Update wenn Diagnostic die History wiped.
|
||||||
|
|
||||||
|
### Skills-System (Phase B Punkt 4)
|
||||||
|
|
||||||
|
- [x] Python-only Skills (local-venv pro Skill, eigene pip-Pakete)
|
||||||
|
- [x] Tool-Use im Brain: skill_create als Meta-Tool, dynamische run_<skill> pro aktivem Skill
|
||||||
|
- [x] Harte Schwelle dokumentiert: pip-Install → IMMER Skill (Brain hat keinen Persistenz ausser /data/skills/)
|
||||||
|
- [x] Diagnostic Skills-Tab: Liste, README, Logs pro Run, Activate/Deactivate/Delete, Export/Import als tar.gz
|
||||||
|
- [x] skill_created Live-Notification: gelbe Bubble in App + Diagnostic sobald ARIA selbst einen Skill anlegt
|
||||||
|
|
||||||
|
### Triggers-System (Phase B Punkt 5)
|
||||||
|
|
||||||
|
- [x] **Filesystem-Layer** unter `/data/triggers/<name>.json` + `logs/<name>.jsonl` pro Trigger
|
||||||
|
- [x] **Timer** (one-shot, ISO-Timestamp) — "erinner mich in 10 Minuten an X" → ARIA legt via `trigger_timer`-Tool an, Background-Loop feuert zum Stichzeitpunkt einmal
|
||||||
|
- [x] **Watcher** (recurring) — feuert wenn `condition` true wird, mit Throttle (min_seconds_between_fires) gegen Spam. Checks alle 30s
|
||||||
|
- [x] **Sicherer Condition-Parser** via Python `ast`-Module (Whitelist statt `eval`): nur `<` `>` `<=` `>=` `==` `!=` `and` `or` `not`, Konstanten + Variablennamen aus Whitelist
|
||||||
|
- [x] **Built-in Variablen**: `disk_free_gb`, `disk_free_pct`, `ram_free_mb`, `cpu_load_1min`, `uptime_sec`, `hour_of_day`, `minute_of_hour`, `day_of_month`, `month`, `year`, `day_of_week`, `is_weekend`, `unix_timestamp`, `current_lat`, `current_lon`, `location_age_sec`, `last_user_message_ago_sec`, `memory_count`, `pinned_count`, `rvs_connected`
|
||||||
|
- [x] **near(lat, lon, radius_m) Funktion** im Parser (Haversine) — GPS-Geofencing fuer Blitzer-Warner / Ankunft-Erinnerungen
|
||||||
|
- [x] **Background-Loop** im Brain-Container (Lifespan async task): laeuft alle 30s, prueft alle aktiven Trigger, ruft bei Match `agent.chat(prompt, source="trigger")` mit System-Praefix → ARIA reagiert wie auf eine Frage von Stefan, kann TTS sprechen / Skills starten / weitere Trigger anlegen
|
||||||
|
- [x] **Diagnostic Trigger-Tab**: Liste aktiver Trigger mit Logs, Anlegen-Modal mit Type-Dropdown, Live-Anzeige aller verfuegbaren Variablen + Funktionen, Beispiele
|
||||||
|
- [x] **App Live-Notification**: `trigger_created`-Bubble (gelb) sobald ARIA selbst einen Trigger anlegt — User sieht sofort dass die Bitte angekommen ist
|
||||||
|
- [x] **GPS-Tracking via App** (`@react-native-community/geolocation` watchPosition, distanceFilter 30m, interval 15s) — Singleton-Service in `gpsTracking.ts`, Toggle in Settings → Standort, persistiert AsyncStorage, Restore beim App-Start
|
||||||
|
- [x] **`request_location_tracking`-Tool**: ARIA kann das Tracking via `location_tracking`-Event an-/ausschalten — Bridge forwarded an App, App startet/stoppt watchPosition. ARIA tut das automatisch wenn sie einen Watcher mit `near()` anlegt
|
||||||
|
- [x] **`location_update`-Forwarding**: App schickt alle 15s/30m ein `location_update {lat,lon}`, Bridge persistiert in `/shared/state/location.json`, Watcher liest beim Check
|
||||||
|
- [x] **Activity-Persistenz**: `/shared/state/activity.json` traegt User-Message-Zeitstempel, damit `last_user_message_ago_sec` als Variable verfuegbar ist
|
||||||
|
- [x] **`trigger_cancel`** + **`trigger_list`** als Tools — ARIA kann eigene Trigger verwalten
|
||||||
|
- [x] **Triggers-Block im System-Prompt**: aktive Trigger + verfuegbare Variablen + Funktionen werden bei jedem Chat-Turn injiziert, dazu Hinweis dass GPS-Watcher `request_location_tracking` mit-aufrufen sollen
|
||||||
|
|
||||||
|
### Diagnostic / App Features (drumherum)
|
||||||
|
|
||||||
|
- [x] Datei-Manager (Diagnostic + App-Modal): /shared/uploads/ verwalten, Multi-Select + Select-All + Bulk-Download als ZIP + Bulk-Delete
|
||||||
|
- [x] Wipe-All-Button (Memory + Stimmen + Settings)
|
||||||
|
- [x] Voice Export/Import pro Stimme (Diagnostic + XTTS-Bridge auf Gamebox)
|
||||||
|
- [x] F5/Whisper-Settings als JSON-Bundle Export/Import
|
||||||
|
- [x] App Chat-Suche umgebaut: Highlight + Next/Prev statt Filter
|
||||||
|
- [x] App Pinch-Zoom in Bildern rewriten (Multi-Touch-Race-Bugs)
|
||||||
|
- [x] Info-Buttons mit Modal-Erklaerungen im Gehirn-Tab
|
||||||
|
- [x] Token/Call-Metrics + Subscription-Quota-Tracking: pro Claude-Call ein Log-Eintrag mit Token-Schaetzung (chars/4). Gehirn-Tab zeigt 1h/5h/24h/30d-Aggregat + Progress-Bar gegen Plan-Limit (Pro=45/5h, Max 5x=225/5h, Max 20x=900/5h, Custom). Warn-Schwelle 80%, kritisch 90%.
|
||||||
|
|
||||||
## Offen
|
## Offen
|
||||||
|
|
||||||
### Bugs
|
|
||||||
- [ ] App: Wake-Word "jarvis" triggert nicht zuverlaessig (Porcupine-Debugging via ADB-Logcat ausstehend)
|
|
||||||
- [ ] App: Stuerzt beim Lauschen ab, eventuell bei Nebengeraeuschen (Porcupine + Mic-Race, errorCallback haelt's jetzt zurueck — Dauertest ausstehend)
|
|
||||||
|
|
||||||
### App Features
|
### App Features
|
||||||
- [ ] Chat-History zuverlaessiger laden (AsyncStorage Race Condition)
|
- [ ] Chat-History zuverlaessiger laden (AsyncStorage Race Condition)
|
||||||
- [ ] Background Audio Service (TTS auch bei minimierter App)
|
- [ ] Custom-Wake-Word-Upload via Diagnostic (eigene .onnx-Files ohne App-Rebuild)
|
||||||
|
|
||||||
### Architektur
|
### Architektur
|
||||||
- [ ] Bilder: Claude Vision direkt nutzen (aktuell nur Dateipfad an ARIA)
|
- [ ] Bilder: Claude Vision direkt nutzen (aktuell nur Dateipfad an ARIA)
|
||||||
- [ ] Auto-Compacting und Memory/Brain Verwaltung (SQLite?)
|
|
||||||
- [ ] Diagnostic: System-Info Tab (Container-Status, Disk, RAM, CPU)
|
- [ ] Diagnostic: System-Info Tab (Container-Status, Disk, RAM, CPU)
|
||||||
- [ ] RVS Zombie-Connections endgueltig loesen
|
- [ ] RVS Zombie-Connections endgueltig loesen
|
||||||
- [ ] Alle .env-Variablen ueber Diagnostic konfigurierbar machen (Fallback .env bleibt fuer initialen Bootstrap)
|
|
||||||
- [ ] Gamebox: kleine Web-Oberflaeche fuer Credentials/Server-Config oder zentral aus Diagnostic per RVS push
|
- [ ] Gamebox: kleine Web-Oberflaeche fuer Credentials/Server-Config oder zentral aus Diagnostic per RVS push
|
||||||
- [ ] Root-Cause OpenClaw Session-Reset: Herausfinden warum Sessions beim ersten chat.send nach Container-Restart verworfen werden
|
- [ ] Erste Skills bauen lassen (yt-dlp, pdf-extract, image-resize, etc.) — durch normale Anfragen, ARIA legt sie selbst an
|
||||||
|
- [ ] Tool-Use-Verifikation: Live-Test ob claude-max-api-proxy `tools` und `tool_calls` sauber durchreicht
|
||||||
|
- [ ] Heartbeat (periodische Selbst-Checks)
|
||||||
|
- [ ] Lokales LLM als Waechter (Triage vor Claude-Call)
|
||||||
|
|||||||
@@ -18,6 +18,18 @@ const ALLOWED_TYPES = new Set([
|
|||||||
"update_check", "update_available", "update_download", "update_data",
|
"update_check", "update_available", "update_download", "update_data",
|
||||||
"agent_activity", "cancel_request",
|
"agent_activity", "cancel_request",
|
||||||
"audio_pcm",
|
"audio_pcm",
|
||||||
|
"file_from_aria",
|
||||||
|
"container_restart",
|
||||||
|
"file_list_request", "file_list_response",
|
||||||
|
"file_delete_request", "file_deleted",
|
||||||
|
"xtts_export_voice", "xtts_voice_exported",
|
||||||
|
"xtts_import_voice", "xtts_voice_imported",
|
||||||
|
"skill_created",
|
||||||
|
"trigger_created",
|
||||||
|
"location_update", "location_tracking",
|
||||||
|
"chat_history_request", "chat_history_response", "chat_cleared",
|
||||||
|
"file_delete_batch_request", "file_delete_batch_response",
|
||||||
|
"file_zip_request", "file_zip_response",
|
||||||
"xtts_delete_voice",
|
"xtts_delete_voice",
|
||||||
"voice_preload", "voice_ready",
|
"voice_preload", "voice_ready",
|
||||||
"stt_request", "stt_response",
|
"stt_request", "stt_response",
|
||||||
|
|||||||
@@ -1,74 +0,0 @@
|
|||||||
#!/usr/bin/env node
|
|
||||||
/**
|
|
||||||
* Exportiert ein OpenClaw Session-JSONL (auch .reset.*) als Markdown.
|
|
||||||
*
|
|
||||||
* Nutzung:
|
|
||||||
* node export-jsonl-to-md.js <input.jsonl> [output.md]
|
|
||||||
*
|
|
||||||
* Oder direkt aus dem aria-core Container:
|
|
||||||
* docker exec aria-core cat /home/node/.openclaw/agents/main/sessions/<ID>.jsonl.reset.<TS> \
|
|
||||||
* | node export-jsonl-to-md.js - > output.md
|
|
||||||
*/
|
|
||||||
|
|
||||||
const fs = require("fs");
|
|
||||||
|
|
||||||
const inputArg = process.argv[2];
|
|
||||||
const outputArg = process.argv[3];
|
|
||||||
|
|
||||||
if (!inputArg) {
|
|
||||||
console.error("Usage: export-jsonl-to-md.js <input.jsonl|-> [output.md]");
|
|
||||||
process.exit(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
const raw = inputArg === "-" ? fs.readFileSync(0, "utf-8") : fs.readFileSync(inputArg, "utf-8");
|
|
||||||
const lines = raw.split("\n").filter(l => l.trim());
|
|
||||||
|
|
||||||
const blocks = [];
|
|
||||||
for (const line of lines) {
|
|
||||||
let obj;
|
|
||||||
try { obj = JSON.parse(line); } catch { continue; }
|
|
||||||
if (obj.type !== "message" || !obj.message) continue;
|
|
||||||
const role = obj.message.role;
|
|
||||||
if (role !== "user" && role !== "assistant") continue;
|
|
||||||
|
|
||||||
let text = "";
|
|
||||||
const content = obj.message.content;
|
|
||||||
if (typeof content === "string") text = content;
|
|
||||||
else if (Array.isArray(content)) text = content.filter(c => c.type === "text").map(c => c.text || "").join("\n");
|
|
||||||
if (!text) continue;
|
|
||||||
|
|
||||||
if (role === "user") {
|
|
||||||
text = text.replace(/^Sender \(untrusted metadata\):[\s\S]*?```[\s\S]*?```\s*\n*/m, "").trim();
|
|
||||||
text = text.replace(/^\[.*?\]\s*/, "").trim();
|
|
||||||
} else {
|
|
||||||
text = text.replace(/^\[\[reply_to_\w+\]\]\s*/g, "").trim();
|
|
||||||
}
|
|
||||||
if (!text) continue;
|
|
||||||
|
|
||||||
const ts = obj.message.timestamp || obj.timestamp || 0;
|
|
||||||
const when = ts ? new Date(ts).toISOString().replace("T", " ").slice(0, 19) : "";
|
|
||||||
const heading = role === "user" ? "## 🧑 User" : "## 🤖 ARIA";
|
|
||||||
blocks.push(`${heading}${when ? ` — ${when}` : ""}\n\n${text}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const exportedAt = new Date().toISOString().replace("T", " ").slice(0, 19);
|
|
||||||
const title = inputArg === "-" ? "Session" : inputArg.split("/").pop().replace(/\.jsonl.*/, "");
|
|
||||||
const md = [
|
|
||||||
`# Session: ${title}`,
|
|
||||||
``,
|
|
||||||
`Exportiert: ${exportedAt} `,
|
|
||||||
`Quelle: ${inputArg === "-" ? "stdin" : inputArg}`,
|
|
||||||
`Nachrichten: ${blocks.length}`,
|
|
||||||
``,
|
|
||||||
`---`,
|
|
||||||
``,
|
|
||||||
blocks.join("\n\n---\n\n"),
|
|
||||||
``,
|
|
||||||
].join("\n");
|
|
||||||
|
|
||||||
if (outputArg) {
|
|
||||||
fs.writeFileSync(outputArg, md);
|
|
||||||
console.error(`OK: ${blocks.length} Nachrichten → ${outputArg}`);
|
|
||||||
} else {
|
|
||||||
process.stdout.write(md);
|
|
||||||
}
|
|
||||||
@@ -661,6 +661,76 @@ async def handle_delete_voice(ws, payload: dict) -> None:
|
|||||||
logger.exception("handle_delete_voice Fehler")
|
logger.exception("handle_delete_voice Fehler")
|
||||||
|
|
||||||
|
|
||||||
|
async def handle_export_voice(ws, payload: dict) -> None:
|
||||||
|
"""Packt eine Stimme (.wav + .txt) als tar.gz und sendet sie base64 zurueck."""
|
||||||
|
name = (payload.get("name") or "").strip()
|
||||||
|
if not name:
|
||||||
|
await _send(ws, "xtts_voice_exported", {"ok": False, "error": "name fehlt"})
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
wav, txt = voice_paths(name)
|
||||||
|
if not wav.exists():
|
||||||
|
await _send(ws, "xtts_voice_exported", {"ok": False, "name": name, "error": "Stimme nicht gefunden"})
|
||||||
|
return
|
||||||
|
import io, tarfile
|
||||||
|
buf = io.BytesIO()
|
||||||
|
with tarfile.open(fileobj=buf, mode="w:gz") as tar:
|
||||||
|
tar.add(wav, arcname=wav.name)
|
||||||
|
if txt.exists():
|
||||||
|
tar.add(txt, arcname=txt.name)
|
||||||
|
data = base64.b64encode(buf.getvalue()).decode("ascii")
|
||||||
|
logger.info("Voice exportiert: %s (%d KB tar.gz)", name, len(buf.getvalue()) // 1024)
|
||||||
|
await _send(ws, "xtts_voice_exported", {"ok": True, "name": name, "data": data})
|
||||||
|
except Exception as e:
|
||||||
|
logger.exception("handle_export_voice Fehler")
|
||||||
|
await _send(ws, "xtts_voice_exported", {"ok": False, "name": name, "error": str(e)[:200]})
|
||||||
|
|
||||||
|
|
||||||
|
async def handle_import_voice(ws, payload: dict) -> None:
|
||||||
|
"""Empfaengt eine tar.gz mit <name>.wav (+ optional <name>.txt) und legt
|
||||||
|
sie in VOICES_DIR ab. Ueberschreibt bestehende Stimme gleichen Namens."""
|
||||||
|
name = (payload.get("name") or "").strip()
|
||||||
|
data_b64 = payload.get("data") or ""
|
||||||
|
if not name or not data_b64:
|
||||||
|
await _send(ws, "xtts_voice_imported", {"ok": False, "error": "name/data fehlt"})
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
import io, tarfile
|
||||||
|
VOICES_DIR.mkdir(parents=True, exist_ok=True)
|
||||||
|
safe = sanitize_voice_name(name)
|
||||||
|
data = base64.b64decode(data_b64)
|
||||||
|
extracted_wav = False
|
||||||
|
with tarfile.open(fileobj=io.BytesIO(data), mode="r:gz") as tar:
|
||||||
|
for member in tar.getmembers():
|
||||||
|
if not member.isfile():
|
||||||
|
continue
|
||||||
|
base = Path(member.name).name # Path-Traversal verhindern
|
||||||
|
if base.lower().endswith(".wav"):
|
||||||
|
target = VOICES_DIR / f"{safe}.wav"
|
||||||
|
f = tar.extractfile(member)
|
||||||
|
if f is None:
|
||||||
|
continue
|
||||||
|
with open(target, "wb") as out:
|
||||||
|
out.write(f.read())
|
||||||
|
extracted_wav = True
|
||||||
|
elif base.lower().endswith(".txt"):
|
||||||
|
target = VOICES_DIR / f"{safe}.txt"
|
||||||
|
f = tar.extractfile(member)
|
||||||
|
if f is None:
|
||||||
|
continue
|
||||||
|
with open(target, "wb") as out:
|
||||||
|
out.write(f.read())
|
||||||
|
if not extracted_wav:
|
||||||
|
await _send(ws, "xtts_voice_imported", {"ok": False, "name": name, "error": "Kein .wav im Archiv"})
|
||||||
|
return
|
||||||
|
logger.info("Voice importiert: %s", name)
|
||||||
|
await _send(ws, "xtts_voice_imported", {"ok": True, "name": name})
|
||||||
|
await handle_list_voices(ws)
|
||||||
|
except Exception as e:
|
||||||
|
logger.exception("handle_import_voice Fehler")
|
||||||
|
await _send(ws, "xtts_voice_imported", {"ok": False, "name": name, "error": str(e)[:200]})
|
||||||
|
|
||||||
|
|
||||||
# Letzte diagnostisch-gesetzte Voice (verhindert Endlos-Preload bei jedem config)
|
# Letzte diagnostisch-gesetzte Voice (verhindert Endlos-Preload bei jedem config)
|
||||||
_last_diag_voice = ""
|
_last_diag_voice = ""
|
||||||
|
|
||||||
@@ -781,6 +851,10 @@ async def run_loop(runner: F5Runner) -> None:
|
|||||||
asyncio.create_task(handle_list_voices(ws))
|
asyncio.create_task(handle_list_voices(ws))
|
||||||
elif mtype == "xtts_delete_voice":
|
elif mtype == "xtts_delete_voice":
|
||||||
asyncio.create_task(handle_delete_voice(ws, payload))
|
asyncio.create_task(handle_delete_voice(ws, payload))
|
||||||
|
elif mtype == "xtts_export_voice":
|
||||||
|
asyncio.create_task(handle_export_voice(ws, payload))
|
||||||
|
elif mtype == "xtts_import_voice":
|
||||||
|
asyncio.create_task(handle_import_voice(ws, payload))
|
||||||
elif mtype == "voice_preload":
|
elif mtype == "voice_preload":
|
||||||
asyncio.create_task(handle_voice_preload(ws, payload, runner))
|
asyncio.create_task(handle_voice_preload(ws, payload, runner))
|
||||||
elif mtype == "stt_response":
|
elif mtype == "stt_response":
|
||||||
|
|||||||
Reference in New Issue
Block a user