Compare commits
428 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 415706036b | |||
| e2dd47255e | |||
| 3497aa23f8 | |||
| 8491fb2af7 | |||
| f61864282e | |||
| b2f7d6dda2 | |||
| eeedcc4781 | |||
| 5cf8cab5bd | |||
| 3ae9e19524 | |||
| 0ec4b00879 | |||
| b6b4b1b4d9 | |||
| 950a9d009c | |||
| 693542ef19 | |||
| d12f356ebe | |||
| 7b55d6a91f | |||
| aa077f60e6 | |||
| 094bd6e4f1 | |||
| 5b0b5eeac6 | |||
| 96a1f90ac3 | |||
| bfa06d78a7 | |||
| d16dcd34cc | |||
| dc2f4eb6d2 | |||
| 0f9a029269 | |||
| 70d1500096 | |||
| d0cb7acd10 | |||
| 0b58feee1e | |||
| 8be34e7284 | |||
| b56cef6298 | |||
| 0d203af8fb | |||
| 0468d0e603 | |||
| 7cfc2ba058 | |||
| da795d14f5 | |||
| d60c7e9110 | |||
| 83c99a5e65 | |||
| e438bb11ff | |||
| 8b4f75bf91 | |||
| d7e7386954 | |||
| 2100c64b91 | |||
| 74ebf59c6f | |||
| 53b49eacad | |||
| 0f11d23c75 | |||
| 311030bdaa | |||
| 1e05c66baa | |||
| 4082a6bf2a | |||
| 3485642b3e | |||
| 1240ae3829 | |||
| 2dd4d38dce | |||
| 7f862ce1f4 | |||
| 528fe97b59 | |||
| 3483d1bfce | |||
| 158423c155 | |||
| 087e91dca1 | |||
| 2de4cbc00f | |||
| 03fc465057 | |||
| b696b47feb | |||
| 6aae565541 | |||
| 214bd218a0 | |||
| 2afeee29ee | |||
| c8dee4c416 | |||
| f49f3c3b08 | |||
| c4bbb06710 | |||
| 4411cc4fff | |||
| 24a91887ef | |||
| 4e62b2919f | |||
| fa774156fe | |||
| 3b19f05c5b | |||
| fc3ecaacca | |||
| 08857093b5 | |||
| 62018b3e51 | |||
| 89e3a195a3 | |||
| f023ba0ac5 | |||
| a0570ef8f7 | |||
| facde1fef7 | |||
| 38106a2096 | |||
| a476afb311 | |||
| db4c7b9b72 | |||
| 3bc490b485 | |||
| dd6d70c46e | |||
| b1eaf42fef | |||
| fb9e5dcd10 | |||
| f95e71463f | |||
| 1088bff43d | |||
| cad68db2a2 | |||
| 50b10c8ac0 | |||
| a8b586ec92 | |||
| 632e1e4fa1 | |||
| 7e12816ebd | |||
| 8f64f8fb30 | |||
| b3ff3991c4 | |||
| a4ea387c98 | |||
| 68fbf74a23 | |||
| b857f778e9 | |||
| 31aa82b68c | |||
| de8eeb69e2 | |||
| f5970ce700 | |||
| ef1a4436ca | |||
| 981779cd9e | |||
| 3dcd2ae0b4 | |||
| 2750b867a3 | |||
| f6424add6c | |||
| 2dfd21d1d0 | |||
| 9d9ddc730b | |||
| 77ccee8331 | |||
| 175dcdf225 | |||
| 1549e9cd4f | |||
| 910e74b497 | |||
| 160c5c34b6 | |||
| a6638c0108 | |||
| 43c21d3ddc | |||
| b73c6c346e | |||
| b91ddc5bdf | |||
| 7d08c06720 | |||
| f066a2a555 | |||
| b55b0e7c42 | |||
| 70f806ef80 | |||
| 0773d9496d | |||
| 1a4857ed62 | |||
| 962d814318 | |||
| 9276a92c83 | |||
| d16896c4b4 | |||
| 20050d4077 | |||
| 79760d1b2e | |||
| 13f1103604 | |||
| 73b7a76ea8 | |||
| 17f3d8870e | |||
| 4feaacc7e4 | |||
| af7b2674f3 | |||
| 97442198ec | |||
| e3e841f2ab | |||
| 33185de42b | |||
| dbe547d4ea | |||
| 1a982c0d45 | |||
| dfba5ceb1f | |||
| 1a6f633836 | |||
| 7f7db100af | |||
| d646e9d58e | |||
| bef59ba134 | |||
| dbebfd44ff | |||
| 4d0b9e0d78 | |||
| 0c43a18402 | |||
| 5bdcc3c65b | |||
| 52795530f9 | |||
| 2eb0b4df90 | |||
| 0c18090351 | |||
| d6b54d3247 | |||
| ead28cf09a | |||
| f682aad4ff | |||
| e0c1a4bcd5 | |||
| a648dad96d | |||
| da5579038e | |||
| 4ba48940b9 | |||
| 568ef9ed10 | |||
| 7682a0ce58 | |||
| 3ca834e633 | |||
| 55ef207454 | |||
| 6651f5937d | |||
| e9e7dd804f | |||
| ec9530f17f | |||
| 97cb7be313 | |||
| 77e927ffcd | |||
| a9a87f12df | |||
| 2a56ac0290 | |||
| edc65ce645 | |||
| d7efaf93b3 | |||
| 31ff20c846 | |||
| 406f4cb3cc | |||
| fa0667088a | |||
| f55329706e | |||
| 6c7fd1d0e3 | |||
| 9d8db111ac | |||
| 482cb6ace3 | |||
| 69c1c49a7d | |||
| b1ccf29295 | |||
| 4cd9faece2 | |||
| fec8aa977b | |||
| 20123de827 | |||
| 8761d1a1b7 | |||
| abc5b971f4 | |||
| b588dd7e3b | |||
| 309df9d851 | |||
| f2e643d1fb | |||
| 6ac374621c | |||
| efbd306597 | |||
| 4454613a98 | |||
| 55cfb752a2 | |||
| a4d3449e3a | |||
| 44d2c6b4fe | |||
| 0309c95aa5 | |||
| 2aa2cc70c9 | |||
| 9d0776c819 | |||
| f031fa159e | |||
| be373466a3 | |||
| bbf9aed3ba | |||
| 745b4a07c0 | |||
| 23ca815cb2 | |||
| cc3fac8142 | |||
| cd89e36ec2 | |||
| f5b4285d15 | |||
| 248e7c9ae4 | |||
| 7058cc8d8d | |||
| 7919489543 | |||
| feac7f2479 | |||
| b80b813703 | |||
| e7bb6c37cb | |||
| d146ca92c4 | |||
| fd95af2c40 | |||
| 9e12e0001c | |||
| 1d34143be5 | |||
| 0fc11e33c8 | |||
| dae603541b | |||
| 87b4cd305c | |||
| 190352820c | |||
| 2264f4e3bc | |||
| 58fd8721e3 | |||
| 4f494daffb | |||
| 958c8d6fc6 | |||
| 5ba89c7191 | |||
| b373f915b5 | |||
| 7748834a0f | |||
| 8b52f4c92b | |||
| dc20570f6d | |||
| 744a27cfd1 | |||
| 37c5f6c368 | |||
| a361015ff4 | |||
| d83b555209 | |||
| a029267d9d | |||
| 8ba6a71a49 | |||
| 2f625572fc | |||
| ac56916eb0 | |||
| ae08a5051c | |||
| d372cd638e | |||
| 60c5cb7e59 | |||
| 607a4c9ff8 | |||
| 4ea16cfa8f | |||
| 6ce9880bc0 | |||
| 187ffad7ee | |||
| 467f95424e | |||
| c1a5518fb7 | |||
| 22fa4b3ccf | |||
| 1b8a51aad0 | |||
| 578ade3544 | |||
| ed2f1bb5ee | |||
| 0a04972455 | |||
| 2a4379eb64 | |||
| e64df23bb7 | |||
| 576ae925dd | |||
| e170991222 | |||
| a1343ee18f | |||
| b2d3c935d8 | |||
| 49089eee4b | |||
| e544992c9f | |||
| 97a1a3089a | |||
| 64f18e97a0 | |||
| 9cbea27455 | |||
| c8881f9e4d | |||
| 028e3b2240 | |||
| c042f27106 | |||
| 4ceadf8be5 | |||
| ddd30b3059 | |||
| 6c8ba5fe2d | |||
| 32ddac002f | |||
| bbbe69d928 | |||
| 23c39d5bba | |||
| 5328dc8595 | |||
| 0c03b4f161 | |||
| 31fe70bab5 | |||
| 39251b3d32 | |||
| 0623de32a0 | |||
| cd5e6e7ee6 | |||
| ee3e0a0af6 | |||
| 0783b1b99d | |||
| 5492c7a46f | |||
| 4cbe184faa | |||
| 647a1cb726 | |||
| 73263b69a6 | |||
| c62ceafdc2 | |||
| 9b5a35cb4a | |||
| 5ac1a0a522 | |||
| a28b46a809 | |||
| 59c8d36a3d | |||
| 79ba7b8487 | |||
| ba62cec78c | |||
| f15b3f583f | |||
| 402bddc18a | |||
| 350069d371 | |||
| 019c078393 | |||
| d411df4074 | |||
| 763e0d79ab | |||
| 47fe4ad655 | |||
| 99cb83202e | |||
| fc2438be2d | |||
| 40e48b046b | |||
| f801d99748 | |||
| 6ab6196739 | |||
| eb12281dfc | |||
| 1fb1fdef9e | |||
| 593d26e0ff | |||
| 394abb58be | |||
| fc3bee6d05 | |||
| b203503fd8 | |||
| 8b0a72dc9b | |||
| 23add7a107 | |||
| caf84196fb | |||
| 099b9651a6 | |||
| 76d72a1eef | |||
| 87deede078 | |||
| 6fec8588c1 | |||
| aafdbcd57a | |||
| 08da28f475 | |||
| 8c1014d281 | |||
| 271fc4edf6 | |||
| cd390a4115 | |||
| a65ed579d2 | |||
| 2ad1f57382 | |||
| 58e3cfd3e6 | |||
| 7de4ee8f5b | |||
| 213edac3a7 | |||
| acc13aef6b | |||
| 4bbc6f7787 | |||
| 20f2ea1829 | |||
| 2d23f0668b | |||
| d6030a06b7 | |||
| 0df76e2af6 | |||
| f80fe1df93 | |||
| cff421bc53 | |||
| bca925d385 | |||
| 9abde89805 | |||
| ea4f639fcb | |||
| 64cd5f7d52 | |||
| 843ebe1d8f | |||
| 764619f076 | |||
| e3a0cfb55a | |||
| 2929749314 | |||
| 51b9512f4e | |||
| ffcfa44eef | |||
| 6363da97b1 | |||
| 07ed2cdcf6 | |||
| 5ad68b7dfc | |||
| 8a6ee018ea | |||
| b42590ff95 | |||
| 056b579c47 | |||
| 576e612cd0 | |||
| c2faa06a15 | |||
| d3ed3556eb | |||
| d960d125c0 | |||
| 89d5d7ec0a | |||
| ea0c13936b | |||
| 773c976822 | |||
| cd05ed2379 | |||
| 054e4057d8 | |||
| 3943e79bb1 | |||
| 87f4317c15 | |||
| 50aa793910 | |||
| 5efc9865a8 | |||
| 949c573c49 | |||
| f7f450a09d | |||
| 81f7c38383 | |||
| 2c785cb37a | |||
| 57e65b061c | |||
| aa54765b03 | |||
| 8929bc99bb | |||
| 0428c06612 | |||
| a7eb3cf433 | |||
| e4e0e793a8 | |||
| b3d3b8b6bc | |||
| 06bc456221 | |||
| 3461f45207 | |||
| a17d4acc13 | |||
| 62fd9193a1 | |||
| 2329645df4 | |||
| 8a435ddf6c | |||
| 25b754ba31 | |||
| b734593bf2 | |||
| 16847ce6f7 | |||
| 6300829317 | |||
| a1e1ee31bd | |||
| 7ed70b876d | |||
| 3ca85da906 | |||
| d6a89168ef | |||
| cb33a20694 | |||
| a242693751 | |||
| 81ca3cc7a7 | |||
| 1a32098c9e | |||
| fa4c32270b | |||
| 9c43b875f4 | |||
| 63560e290b | |||
| 1ab8a6a2fe | |||
| a2c0196e05 | |||
| 680f7a64e2 | |||
| 4893616a5a | |||
| 04e8c0245d | |||
| 10cefaf1cd | |||
| adbb1fe80a | |||
| 79c50aedcc | |||
| eb72b35e23 | |||
| bbd02d46a6 | |||
| 3d3c8ce973 | |||
| 562f929056 | |||
| ff03d8ce62 | |||
| 8281131432 | |||
| 8a6bd4e0e7 | |||
| 1b4df0565a | |||
| eb3692ef81 | |||
| 46a9ac9f84 | |||
| a012ec65ef | |||
| b86c4a0d1a | |||
| 11de9a01b9 | |||
| 80dec2daf9 | |||
| da591bb53c | |||
| 7545c9c823 | |||
| ecc3d59a8f | |||
| b8862f025b | |||
| db20a07b27 | |||
| 8dadd5c9fe | |||
| b7cecb2a8b | |||
| 6c7b631cb7 | |||
| 892c6403eb | |||
| f6834f49d4 | |||
| 75752eefc0 | |||
| fbdd4274ac | |||
| 867b03aa1e | |||
| 457b469c96 | |||
| 94691f12ab | |||
| 5c8d11824e | |||
| db053c2dbd | |||
| 8c1dac86d5 | |||
| 8fb95b884f | |||
| f1f297b3a7 |
+37
-7
@@ -1,20 +1,50 @@
|
|||||||
# ARIA Environment Configuration
|
# ════════════════════════════════════════════════
|
||||||
# Copy to .env and fill in values
|
# ARIA — Umgebungsvariablen
|
||||||
|
# Kopieren nach .env und Werte eintragen
|
||||||
|
# ════════════════════════════════════════════════
|
||||||
|
|
||||||
# Auth token for ARIA Core (generate a long random string)
|
# ── ARIA Auth Token ──────────────────────────────
|
||||||
# openssl rand -hex 32
|
# Authentifizierung fuer den OpenClaw Gateway (aria-core).
|
||||||
|
# Wird von Diagnostic, Bridge und App genutzt um sich am Gateway anzumelden.
|
||||||
|
# Alle Services die mit aria-core kommunizieren brauchen diesen Token.
|
||||||
|
# Generieren: openssl rand -hex 32
|
||||||
ARIA_AUTH_TOKEN=change-me-to-a-long-random-string
|
ARIA_AUTH_TOKEN=change-me-to-a-long-random-string
|
||||||
|
|
||||||
# RVS — Rendezvous-Server (Bridge + App verbinden sich hierüber)
|
# ── RVS — Rendezvous-Server ─────────────────────
|
||||||
|
# Der RVS ist ein WebSocket-Relay im Rechenzentrum.
|
||||||
|
# App, Bridge, Diagnostic und XTTS-Bridge verbinden sich hierueber.
|
||||||
|
# Alle muessen den gleichen Host, Port und Token nutzen.
|
||||||
|
|
||||||
|
# Hostname des RVS-Servers (z.B. rvs.example.de oder mobil.hacker-net.de)
|
||||||
RVS_HOST=rvs.example.de
|
RVS_HOST=rvs.example.de
|
||||||
|
|
||||||
|
# Port auf dem der RVS laeuft (muss mit rvs/docker-compose.yml uebereinstimmen)
|
||||||
RVS_PORT=443
|
RVS_PORT=443
|
||||||
|
|
||||||
|
# TLS (wss://) verwenden? true = verschluesselt, false = unverschluesselt (ws://)
|
||||||
RVS_TLS=true
|
RVS_TLS=true
|
||||||
|
|
||||||
# Bei TLS-Fehler automatisch auf ws:// (ohne TLS) fallback?
|
# Bei TLS-Fehler automatisch auf ws:// (ohne TLS) fallback?
|
||||||
# true = Fallback erlaubt, false = nur mit TLS verbinden
|
# Nuetzlich wenn kein TLS-Zertifikat vorhanden (z.B. Entwicklung)
|
||||||
RVS_TLS_FALLBACK=true
|
RVS_TLS_FALLBACK=true
|
||||||
|
|
||||||
|
# Pairing-Token: Wer den gleichen Token hat, landet im gleichen RVS-Room.
|
||||||
|
# Wird von generate-token.sh automatisch generiert und hier eingetragen.
|
||||||
|
# Die Android App bekommt den Token per QR-Code beim Pairing.
|
||||||
|
# WICHTIG: Muss auf ARIA-VM, Gaming-PC (xtts/.env) und App identisch sein!
|
||||||
|
# Generieren: ./generate-token.sh (traegt den Token automatisch ein)
|
||||||
RVS_TOKEN=
|
RVS_TOKEN=
|
||||||
|
|
||||||
# Gitea (for release.sh — Kennwort wird interaktiv abgefragt)
|
# ── Gitea — Release-Verwaltung ───────────────────
|
||||||
|
# Wird von release.sh genutzt um APKs auf Gitea zu veroeffentlichen.
|
||||||
|
# Kennwort wird beim Release interaktiv abgefragt (nicht in .env!).
|
||||||
GITEA_URL=https://git.hacker-net.de
|
GITEA_URL=https://git.hacker-net.de
|
||||||
GITEA_REPO=Hacker-Software/ARIA-AGENT
|
GITEA_REPO=Hacker-Software/ARIA-AGENT
|
||||||
GITEA_USER=duffyduck
|
GITEA_USER=duffyduck
|
||||||
|
|
||||||
|
# ── Auto-Update — APK auf RVS-Server kopieren ───
|
||||||
|
# SSH-Ziel fuer scp: release.sh kopiert die APK dorthin.
|
||||||
|
# Der RVS-Server stellt sie dann per WebSocket an die App bereit.
|
||||||
|
# Format: user@host (z.B. root@aria-rvs oder root@rvs.example.de)
|
||||||
|
# Leer lassen = Auto-Update ueberspringen, APK manuell auf RVS kopieren.
|
||||||
|
RVS_UPDATE_HOST=
|
||||||
|
|||||||
+16
-8
@@ -9,15 +9,19 @@
|
|||||||
.env.*
|
.env.*
|
||||||
!.env.example
|
!.env.example
|
||||||
!.env.*.example
|
!.env.*.example
|
||||||
aria-data/config/*.env
|
|
||||||
!aria-data/config/*.env.example
|
|
||||||
!aria-data/config/openclaw.env
|
|
||||||
|
|
||||||
# ── ARIAs Gedächtnis (nur per tar gesichert) ────
|
# Privater User-Profile-Snippet (Tool-Stack, interne URLs) —
|
||||||
aria-data/brain/
|
# liegt jetzt in brain-import/ (frueher aria-data/config/USER.md).
|
||||||
|
# USER.md.example ist Repo-Inhalt, USER.md lokal selbst anlegen.
|
||||||
|
aria-data/brain-import/USER.md
|
||||||
|
|
||||||
# ── Stimmen (große Binärdateien) ─────────────────
|
# ── ARIAs Gedächtnis (Vector-DB, Skills, Models) ──
|
||||||
aria-data/voices/
|
# Backup via Diagnostic → Gehirn-Export (tar.gz), nicht via Git.
|
||||||
|
aria-data/brain/data/
|
||||||
|
aria-data/brain/qdrant/
|
||||||
|
|
||||||
|
# Diagnostic-State (aktive Session etc.)
|
||||||
|
aria-data/config/diag-state/
|
||||||
|
|
||||||
# ── Node / npm ──────────────────────────────────
|
# ── Node / npm ──────────────────────────────────
|
||||||
node_modules/
|
node_modules/
|
||||||
@@ -29,9 +33,14 @@ yarn-error.log*
|
|||||||
android/build/
|
android/build/
|
||||||
android/.gradle/
|
android/.gradle/
|
||||||
android/app/build/
|
android/app/build/
|
||||||
|
android/android/.gradle/
|
||||||
|
android/android/app/build/
|
||||||
|
android/android/local.properties
|
||||||
android/local.properties
|
android/local.properties
|
||||||
|
android/package-lock.json
|
||||||
*.apk
|
*.apk
|
||||||
*.aab
|
*.aab
|
||||||
|
rvs/updates/*.apk
|
||||||
|
|
||||||
# ── Tauri / Desktop Build ───────────────────────
|
# ── Tauri / Desktop Build ───────────────────────
|
||||||
desktop/src-tauri/target/
|
desktop/src-tauri/target/
|
||||||
@@ -41,7 +50,6 @@ desktop/dist/
|
|||||||
__pycache__/
|
__pycache__/
|
||||||
*.pyc
|
*.pyc
|
||||||
*.pyo
|
*.pyo
|
||||||
bridge/__pycache__/
|
|
||||||
|
|
||||||
# ── macOS ────────────────────────────────────────
|
# ── macOS ────────────────────────────────────────
|
||||||
.DS_Store
|
.DS_Store
|
||||||
|
|||||||
Binary file not shown.
@@ -29,11 +29,26 @@ ARIA hat zwei Rollen:
|
|||||||
┌─────────────────────────────────────────────────────────┐
|
┌─────────────────────────────────────────────────────────┐
|
||||||
│ RVS — Rendezvous-Server │
|
│ RVS — Rendezvous-Server │
|
||||||
│ Node.js WebSocket Relay (Docker, Rechenzentrum) │
|
│ Node.js WebSocket Relay (Docker, Rechenzentrum) │
|
||||||
│ Reiner Relay — kennt keine Tokens, leitet durch │
|
│ Relay + Auto-Update (APK-Verteilung) │
|
||||||
│ rvs/docker-compose.yml │
|
│ rvs/docker-compose.yml │
|
||||||
└───────────────────────┬─────────────────────────────────┘
|
└───────────┬───────────────────────────┬─────────────────┘
|
||||||
│ WebSocket Tunnel
|
│ WebSocket Tunnel │ WebSocket Tunnel
|
||||||
▼
|
▼ ▼
|
||||||
|
┌─────────────────────────────────┐
|
||||||
|
│ Gamebox (Windows + WSL2) │
|
||||||
|
│ RTX 3060, Docker Desktop │
|
||||||
|
│ ┌──────────────────────────┐ │
|
||||||
|
│ │ aria-f5tts-bridge │ │
|
||||||
|
│ │ F5-TTS Voice Cloning │ │
|
||||||
|
│ │ PCM-Streaming an die App │ │
|
||||||
|
│ ├──────────────────────────┤ │
|
||||||
|
│ │ aria-whisper-bridge │ │
|
||||||
|
│ │ Faster-Whisper CUDA │ │
|
||||||
|
│ │ STT in fast-Echtzeit │ │
|
||||||
|
│ └──────────────────────────┘ │
|
||||||
|
│ Beide teilen ./voices Volume │
|
||||||
|
│ xtts/docker-compose.yml │
|
||||||
|
└─────────────────────────────────┘
|
||||||
┌─────────────────────────────────────────────────────────┐
|
┌─────────────────────────────────────────────────────────┐
|
||||||
│ ARIA-VM (Proxmox, Debian 13) — ARIAs Wohnung │
|
│ ARIA-VM (Proxmox, Debian 13) — ARIAs Wohnung │
|
||||||
│ Basissystem + Docker. Rest richtet ARIA selbst ein. │
|
│ Basissystem + Docker. Rest richtet ARIA selbst ein. │
|
||||||
@@ -42,37 +57,49 @@ ARIA hat zwei Rollen:
|
|||||||
│ ┌─────────────────────────────────────────────────┐ │
|
│ ┌─────────────────────────────────────────────────┐ │
|
||||||
│ │ [proxy] claude-max-api-proxy Container │ │
|
│ │ [proxy] claude-max-api-proxy Container │ │
|
||||||
│ │ Claude Max Sub → lokale API │ │
|
│ │ Claude Max Sub → lokale API │ │
|
||||||
│ │ Port 3456, mit sed-Patches fuer │ │
|
|
||||||
│ │ Tool-Permissions + Host-Binding │ │
|
|
||||||
│ │ │ │
|
│ │ │ │
|
||||||
│ │ [aria] OpenClaw Container (aria-core) │ │
|
│ │ [qdrant] Vector-DB fuer ARIAs Gedaechtnis │ │
|
||||||
│ │ Gateway, Sessions, Memory, Skills │ │
|
│ │ Bind-Mount: aria-data/brain/qdrant/ │ │
|
||||||
│ │ Liest BOOTSTRAP.md + AGENT.md │ │
|
│ │ │ │
|
||||||
|
│ │ [brain] ARIA Agent + Memory Container │ │
|
||||||
|
│ │ FastAPI auf Port 8080 │ │
|
||||||
|
│ │ Eigener Agent-Loop, Skills, │ │
|
||||||
|
│ │ Vector-Memory, SSH-Zugriff zur VM │ │
|
||||||
|
│ │ Bind-Mount: aria-data/brain/data/ │ │
|
||||||
│ │ │ │
|
│ │ │ │
|
||||||
│ │ [bridge] ARIA Voice Bridge Container │ │
|
│ │ [bridge] ARIA Voice Bridge Container │ │
|
||||||
│ │ Whisper STT · Piper TTS · Wake-Word │ │
|
│ │ Wake-Word, STT, TTS-Forwarding │ │
|
||||||
│ │ Ramona (weiblich) + Thorsten (tief) │ │
|
│ │ Spricht mit Brain via HTTP/8080 │ │
|
||||||
│ │ Bruecke: App <> RVS <> Bridge <> ARIA │ │
|
|
||||||
│ │ │ │
|
│ │ │ │
|
||||||
│ │ [diagnostic] Selbstcheck-UI + Einstellungen │ │
|
│ │ [diagnostic] Selbstcheck-UI + Einstellungen │ │
|
||||||
│ │ Gateway + RVS + Proxy Status │ │
|
│ │ Port 3001 (im Netzwerk der Bridge) │ │
|
||||||
│ │ Chat, Sessions, Login, Logs │ │
|
│ │ Chat, Gehirn, Dateien, Logs │ │
|
||||||
│ └──────────────────┬──────────────────────────────┘ │
|
│ └──────────────────┬──────────────────────────────┘ │
|
||||||
│ │ Volume Mount │
|
│ │ Volume Mount │
|
||||||
│ ▼ │
|
│ ▼ │
|
||||||
│ ┌─────────────────────────────────────────────────┐ │
|
│ ┌─────────────────────────────────────────────────┐ │
|
||||||
│ │ ./aria-data/ — Ein tar = vollstaendiges Backup │ │
|
│ │ ./aria-data/ — Konfiguration + SSH-Keys │ │
|
||||||
|
│ │ ./aria-data/brain/ — Vector-DB + Skills (gitignored)│
|
||||||
|
│ │ Backup via Diagnostic → "Gehirn-Export" (tar.gz) │ │
|
||||||
│ └─────────────────────────────────────────────────┘ │
|
│ └─────────────────────────────────────────────────┘ │
|
||||||
└─────────────────────────────────────────────────────────┘
|
└─────────────────────────────────────────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
**Drei separate Deployments:**
|
> OpenClaw (frueher `aria-core`) ist abgerissen — ARIA laeuft jetzt mit eigenem Agent-Framework im
|
||||||
|
> `aria-brain` Container. Eigene Tools, Skills, Vector-Memory statt Sessions. Letzter OpenClaw-Stand
|
||||||
|
> ist als Git-Tag `v0.1.2.0` archiviert.
|
||||||
|
|
||||||
|
**Vier separate Deployments:**
|
||||||
|
|
||||||
| Was | Wo | Wie |
|
| Was | Wo | Wie |
|
||||||
|-----|----|-----|
|
|-----|----|-----|
|
||||||
| RVS | Rechenzentrum | `cd rvs && docker compose up -d` |
|
| RVS | Rechenzentrum | `cd rvs && docker compose up -d` |
|
||||||
| ARIA Core | Debian 13 VM | `docker compose up -d && ./aria-setup.sh` |
|
| ARIA Brain/Bridge/Diagnostic | Debian 13 VM | `./init.sh && ./aria-setup.sh && docker compose up -d` |
|
||||||
| Android App | Stefans Handy | APK installieren, QR-Code scannen |
|
| Gamebox-Stack (F5-TTS + Whisper) | Gamebox (GPU) | `cd xtts && docker compose up -d` |
|
||||||
|
| Android App | Stefans Handy | APK installieren (Auto-Update via RVS) |
|
||||||
|
|
||||||
|
> Der Gamebox-Stack ist optional: ohne ihn faellt STT auf lokales Whisper (CPU,
|
||||||
|
> langsamer) zurueck; TTS bleibt aus (ARIA antwortet dann nur als Text).
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -93,18 +120,34 @@ apt install -y docker.io docker-compose-plugin git curl jq
|
|||||||
git clone git@gitea.hackersoft.de:aria/aria.git ~/ARIA-AGENT
|
git clone git@gitea.hackersoft.de:aria/aria.git ~/ARIA-AGENT
|
||||||
cd ~/ARIA-AGENT
|
cd ~/ARIA-AGENT
|
||||||
cp .env.example .env
|
cp .env.example .env
|
||||||
|
bash init.sh # legt USER.md aus Vorlage an (idempotent, schadet nicht)
|
||||||
```
|
```
|
||||||
|
|
||||||
`.env` Datei editieren:
|
`.env` Datei editieren (Details siehe `.env.example`):
|
||||||
```bash
|
```bash
|
||||||
|
# Auth-Token: Alle ARIA-Services nutzen ihn fuer interne Auth
|
||||||
ARIA_AUTH_TOKEN= # openssl rand -hex 32
|
ARIA_AUTH_TOKEN= # openssl rand -hex 32
|
||||||
|
|
||||||
|
# RVS-Verbindung: Hostname + Port deines Rendezvous-Servers
|
||||||
RVS_HOST= # z.B. rvs.hackersoft.de
|
RVS_HOST= # z.B. rvs.hackersoft.de
|
||||||
RVS_PORT=443
|
RVS_PORT=443
|
||||||
RVS_TLS=true
|
RVS_TLS=true
|
||||||
RVS_TLS_FALLBACK=true
|
RVS_TLS_FALLBACK=true
|
||||||
RVS_TOKEN= # wird von generate-token.sh automatisch gesetzt
|
|
||||||
|
# Pairing-Token: Verbindet App, Bridge, Diagnostic und Gamebox im gleichen RVS-Room
|
||||||
|
# MUSS auf allen Geraeten identisch sein (ARIA-VM, Gaming-PC, App)
|
||||||
|
RVS_TOKEN= # ./generate-token.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Alle anderen Einstellungen (Stimmen, Modi, Wake-Word, F5-TTS-Tuning) leben in
|
||||||
|
`/shared/config/runtime.json` und werden ueber die Diagnostic-UI gepflegt — nicht
|
||||||
|
in der `.env`. Komplett-Reset jederzeit moeglich via "🗑 ALLES löschen" im
|
||||||
|
Diagnostic-Einstellungen-Tab.
|
||||||
|
|
||||||
|
**Zwei Tokens, zwei Zwecke:**
|
||||||
|
- **ARIA_AUTH_TOKEN**: Interner Auth-Token zwischen ARIAs Containern.
|
||||||
|
- **RVS_TOKEN**: Pairing-Token fuer den Rendezvous-Server. Alle Geraete mit dem gleichen Token landen im gleichen "Room" und koennen kommunizieren. Die App bekommt diesen Token per QR-Code.
|
||||||
|
|
||||||
### 2. Claude CLI einloggen (Proxy-Auth)
|
### 2. Claude CLI einloggen (Proxy-Auth)
|
||||||
|
|
||||||
Der Proxy-Container nutzt deine Claude Max Subscription. Die Credentials muessen
|
Der Proxy-Container nutzt deine Claude Max Subscription. Die Credentials muessen
|
||||||
@@ -120,52 +163,24 @@ claude login
|
|||||||
**Wichtig:** Der Ordner `~/.claude/` (nicht `~/.config/claude/`!) wird als Volume
|
**Wichtig:** Der Ordner `~/.claude/` (nicht `~/.config/claude/`!) wird als Volume
|
||||||
in den Proxy gemountet. Die Credentials ueberleben Container-Restarts.
|
in den Proxy gemountet. Die Credentials ueberleben Container-Restarts.
|
||||||
|
|
||||||
### 3. Stimmen herunterladen
|
### 3. SSH-Key fuer aria-wohnung generieren + RVS-Token + Container
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./get-voices.sh
|
# SSH-Key fuer den Zugriff von ARIA auf die VM (aria-wohnung)
|
||||||
# Laedt Ramona + Thorsten (Piper TTS) nach aria-data/voices/
|
./aria-setup.sh
|
||||||
# Ca. 100MB, dauert ein paar Minuten
|
|
||||||
```
|
|
||||||
|
|
||||||
### 4. Voice Bridge konfigurieren
|
# RVS-Token generieren — schreibt RVS_TOKEN in .env, zeigt QR-Code
|
||||||
|
|
||||||
```bash
|
|
||||||
cp aria-data/config/aria.env.example aria-data/config/aria.env
|
|
||||||
# Bei Bedarf anpassen (Whisper-Modell, Sprache, Stimmen-Pfade)
|
|
||||||
```
|
|
||||||
|
|
||||||
### 5. RVS-Token generieren & Container starten
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# Token generieren — schreibt RVS_TOKEN in .env, zeigt QR-Code
|
|
||||||
./generate-token.sh
|
./generate-token.sh
|
||||||
|
|
||||||
# Alle Container starten
|
# Alle Container starten
|
||||||
docker compose up -d
|
docker compose up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
### 6. ARIA Setup ausfuehren (einmalig!)
|
`aria-setup.sh` generiert den ed25519-Key in `aria-data/ssh/` und traegt den
|
||||||
|
Public-Key in `/root/.ssh/authorized_keys` ein (Script laeuft als root auf der VM
|
||||||
|
aria-wohnung). Brain + Proxy nutzen den gleichen Key.
|
||||||
|
|
||||||
```bash
|
### 4. App verbinden
|
||||||
./aria-setup.sh
|
|
||||||
```
|
|
||||||
|
|
||||||
Dieses Script ist **essentiell** — es macht:
|
|
||||||
1. Wartet bis aria-core laeuft
|
|
||||||
2. Fixt Volume-Permissions (Docker → node User)
|
|
||||||
3. Schreibt `openclaw.json` (Proxy-Provider, Model-Config, Timeout 900s)
|
|
||||||
4. Setzt exec-approvals Wildcard (Tool-Ausfuehrung im headless-Modus)
|
|
||||||
5. Generiert SSH-Key fuer VM-Zugriff (`aria-data/ssh/`)
|
|
||||||
6. Fixt SSH-Permissions im Container
|
|
||||||
7. Startet aria-core neu
|
|
||||||
|
|
||||||
**SSH-Key auf der VM eintragen** (wird vom Script angezeigt):
|
|
||||||
```bash
|
|
||||||
cat ~/ARIA-AGENT/aria-data/ssh/id_ed25519.pub >> /root/.ssh/authorized_keys
|
|
||||||
```
|
|
||||||
|
|
||||||
### 7. App verbinden
|
|
||||||
|
|
||||||
App oeffnen → QR-Code scannen → "ARIA, hoerst du mich?"
|
App oeffnen → QR-Code scannen → "ARIA, hoerst du mich?"
|
||||||
|
|
||||||
@@ -173,20 +188,20 @@ Der QR-Code enthaelt: Host, Port, Token, TLS-Flag — einmal scannen, nie wieder
|
|||||||
|
|
||||||
Bestehendes Token nochmal als QR anzeigen: `./generate-token.sh show`
|
Bestehendes Token nochmal als QR anzeigen: `./generate-token.sh show`
|
||||||
|
|
||||||
### 8. Diagnostic pruefen
|
### 5. Diagnostic pruefen
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Im Browser:
|
# Im Browser:
|
||||||
http://<VM-IP>:3001
|
http://<VM-IP>:3001
|
||||||
```
|
```
|
||||||
|
|
||||||
Die Diagnostic-UI zeigt:
|
Die Diagnostic-UI hat fünf Top-Tabs:
|
||||||
- Gateway-Verbindung (gruener Punkt = OK)
|
|
||||||
- RVS-Verbindung
|
- **Main** — Live-Chat-Test, Status (Brain / RVS / Proxy), End-to-End-Trace
|
||||||
- Proxy-Status + Claude Login
|
- **Gehirn** — Memory-Verwaltung (Vector-DB), Token/Call-Metrics (Subscription-Quota), Bootstrap & Migration, Komplett-Gehirn Export/Import
|
||||||
- Chat-Test (direkt an ARIA schreiben)
|
- **Skills** — Liste mit Logs, Run, Activate/Deactivate, Export/Import als tar.gz
|
||||||
- Session-Verwaltung
|
- **Dateien** — alle Dateien aus `/shared/uploads/` mit Multi-Select, Bulk-Download (ZIP) + Bulk-Delete
|
||||||
- Container-Logs
|
- **Einstellungen** — Reparatur (Container-Restart), Wipe, Sprachausgabe, Whisper, Sprachmodell, Runtime-Config, App-Onboarding (QR), Komplett-Reset
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -194,7 +209,7 @@ Die Diagnostic-UI zeigt:
|
|||||||
|
|
||||||
Der Proxy ist das Herzsttueck: Er macht aus der Claude Max Subscription eine lokale API.
|
Der Proxy ist das Herzsttueck: Er macht aus der Claude Max Subscription eine lokale API.
|
||||||
|
|
||||||
**Ablauf:** `OpenClaw (aria-core) → HTTP → claude-max-api-proxy → Claude Code CLI (--print) → Anthropic API`
|
**Ablauf:** `aria-brain → HTTP → claude-max-api-proxy → Claude Code CLI (--print) → Anthropic API`
|
||||||
|
|
||||||
Der Proxy-Container (`node:22-alpine`) installiert bei jedem Start:
|
Der Proxy-Container (`node:22-alpine`) installiert bei jedem Start:
|
||||||
- `@anthropic-ai/claude-code` — Claude Code CLI
|
- `@anthropic-ai/claude-code` — Claude Code CLI
|
||||||
@@ -215,73 +230,67 @@ Danach werden per `sed` vier Patches angewendet:
|
|||||||
|
|
||||||
## Konfigurationsdateien
|
## Konfigurationsdateien
|
||||||
|
|
||||||
### aria-data/config/
|
### aria-data/
|
||||||
|
|
||||||
| Datei | Zweck | Gemountet als |
|
| Pfad | Zweck |
|
||||||
|-------|-------|---------------|
|
|------|-------|
|
||||||
| `BOOTSTRAP.md` | ARIAs System-Prompt: Identitaet, Sicherheitsregeln, Tool-Freigaben, Infrastruktur | `BOOTSTRAP.md` + `CLAUDE.md` im Workspace |
|
| `.env` | Tokens (ARIA_AUTH_TOKEN, RVS_TOKEN, RVS_HOST) — minimal, alles andere lebt in der DB |
|
||||||
| `AGENT.md` | ARIAs Persoenlichkeit, Tool-Freigaben, Arbeitsprinzipien | `AGENT.md` im Workspace |
|
| `aria-data/ssh/` | SSH-Key fuer den Zugriff auf aria-wohnung (Brain + Proxy teilen den Key) |
|
||||||
| `USER.md` | Stefans Praeferenzen, Kommunikationsstil | `USER.md` im Workspace |
|
| `aria-data/brain/qdrant/` | Vector-DB-Storage (Bind-Mount, gitignored) |
|
||||||
| `openclaw.env` | OpenClaw Container-Environment | `.env` im Workspace |
|
| `aria-data/brain/data/` | Skills, Embedding-Modell-Cache (Bind-Mount, gitignored) |
|
||||||
| `aria.env` | Voice Bridge Konfiguration (Whisper, Stimmen) | `/config/aria.env` in Bridge |
|
| `aria-data/brain-import/` | `AGENT.md`, `USER.md.example`, `TOOLING.md.example` — Quelle fuer den initialen Memory-Import in die Vector-DB |
|
||||||
|
| `aria-data/config/diag-state/` | Diagnostic State (z.B. zuletzt aktive Session) |
|
||||||
|
|
||||||
**BOOTSTRAP.md** ist die wichtigste Datei — sie definiert:
|
### /shared/config/ (im aria-shared Volume)
|
||||||
- Wer ARIA ist (Name, Rolle, Persoenlichkeit)
|
|
||||||
- Sicherheitsregeln (kein ClawHub, Prompt Injection abwehren)
|
|
||||||
- Tool-Freigaben (alle Claude Code Tools: WebFetch, Bash, etc.)
|
|
||||||
- SSH-Zugriff auf aria-wohnung (VM)
|
|
||||||
- Stimmen-Auswahl (Ramona vs Thorsten)
|
|
||||||
- Gedaechtnis-System
|
|
||||||
|
|
||||||
### openclaw.json (via aria-setup.sh)
|
| Datei | Zweck |
|
||||||
|
|-------|-------|
|
||||||
|
| `voice_config.json` | TTS-Engine, geclonte Stimme, Whisper-Modell, F5-TTS-Tuning |
|
||||||
|
| `runtime.json` | Token + RVS-Override + Whisper-Sprache (durch Diagnostic gepflegt) |
|
||||||
|
| `highlight_triggers.json` | Highlight-Trigger-Woerter |
|
||||||
|
| `chat_backup.jsonl` | Append-only Chat-Log (Quelle fuer die Chat-History in Diagnostic) |
|
||||||
|
|
||||||
Wird von `aria-setup.sh` in den Container geschrieben:
|
`voice_config.json` + `highlight_triggers.json` lassen sich via Diagnostic →
|
||||||
```json
|
"Sprachausgabe" als Bundle exportieren/importieren.
|
||||||
{
|
|
||||||
"agents": {
|
|
||||||
"defaults": {
|
|
||||||
"model": { "primary": "proxy/claude-sonnet-4" },
|
|
||||||
"timeoutSeconds": 900,
|
|
||||||
"maxConcurrent": 4
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"models": {
|
|
||||||
"providers": {
|
|
||||||
"proxy": {
|
|
||||||
"api": "openai-completions",
|
|
||||||
"baseUrl": "http://proxy:3456/v1",
|
|
||||||
"apiKey": "not-needed"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"tools": { "profile": "full" },
|
|
||||||
"messages": { "ackReactionScope": "all" }
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
**timeoutSeconds: 900** (15 Min) — notwendig weil jede Anfrage einen neuen
|
### Backup / Reset
|
||||||
`claude --print` Prozess spawnt (Cold Start). Bei Tool-Nutzung (WebFetch, Bash)
|
|
||||||
braucht ARIA mehrere API-Roundtrips.
|
- **Gehirn-Backup**: Diagnostic → Gehirn → "⬇ Export herunterladen" — komplettes Brain (Memories + Skills + Qdrant-DB) als `.tar.gz`
|
||||||
|
- **Stimmen-Backup**: pro Stimme ein `.tar.gz` (Diagnostic → Sprachausgabe → ⬇ pro Stimme); Import via Upload-Button
|
||||||
|
- **Komplett-Reset**: Diagnostic → Einstellungen → "🗑 ALLES löschen" — Memory + Stimmen + Settings weg; `.env` + SSH-Keys bleiben
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Voice Bridge
|
## Voice Bridge
|
||||||
|
|
||||||
Die Bridge verbindet die Android App mit ARIA und bietet lokale Sprachverarbeitung.
|
Die Bridge verbindet die Android App mit ARIA und orchestriert die GPU-Services
|
||||||
|
auf der Gamebox.
|
||||||
|
|
||||||
**Nachrichtenfluss:**
|
**Nachrichtenfluss:**
|
||||||
```
|
```
|
||||||
App → RVS → Bridge → aria-core
|
Text: App → RVS → Bridge → aria-brain (HTTP)
|
||||||
aria-core → Bridge → RVS → App
|
Audio: App → RVS → Bridge → stt_request (RVS) → whisper-bridge (Gamebox)
|
||||||
→ Lautsprecher (TTS)
|
→ stt_response → Bridge → aria-brain
|
||||||
|
Fallback bei Timeout: lokales faster-whisper (CPU)
|
||||||
|
Datei: App → RVS → Bridge → /shared/uploads/ → aria-brain (mit Pfad)
|
||||||
|
|
||||||
|
aria-brain → Antwort → Bridge → RVS → App
|
||||||
|
→ xtts_request (RVS) → f5tts-bridge
|
||||||
|
→ audio_pcm Stream → RVS → App AudioTrack
|
||||||
```
|
```
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
|
|
||||||
- **STT**: faster-whisper (lokal, offline, 16kHz mono)
|
- **STT primaer remote**: aria-bridge sendet `stt_request` an die Gamebox-Whisper
|
||||||
- **TTS**: Piper (Ramona + Thorsten, offline)
|
(faster-whisper CUDA, fast Echtzeit). 45s Timeout, dann Fallback auf lokales
|
||||||
- **Wake-Word**: openwakeword (lokales Mikrofon auf der VM)
|
CPU-Whisper. Modell-Wahl in Diagnostic, Hot-Swap via config-Broadcast.
|
||||||
- **App-Audio**: Base64 Audio von App → FFmpeg → Whisper STT → Text an aria-core
|
- **TTS via F5-TTS**: aria-f5tts-bridge auf der Gamebox. Voice Cloning mit
|
||||||
|
Referenz-Audio + automatisch transkribiertem Referenz-Text.
|
||||||
|
- **Text-Cleanup**: `<voice>...</voice>` Tag bevorzugt; Markdown, Code,
|
||||||
|
Einheiten und URLs werden TTS-gerecht aufbereitet. Dezimalzahlen werden
|
||||||
|
ausgeschrieben (`0,1` → "null komma eins"). Acronyme bis 5 Buchstaben werden
|
||||||
|
buchstabiert (`USB` → "U S B", `XTTS` → "X T T S").
|
||||||
|
- **Wake-Word**: openwakeword (lokales Mikrofon auf der VM, optional)
|
||||||
- **Modi**: Normal, Nicht stoeren, Fluestern, Hangar, Gaming
|
- **Modi**: Normal, Nicht stoeren, Fluestern, Hangar, Gaming
|
||||||
|
|
||||||
### Betriebsmodi
|
### Betriebsmodi
|
||||||
@@ -294,37 +303,29 @@ aria-core → Bridge → RVS → App
|
|||||||
| Hangar | `"ARIA, ich arbeite"` | Nur wichtige Meldungen |
|
| Hangar | `"ARIA, ich arbeite"` | Nur wichtige Meldungen |
|
||||||
| Gaming | `"ARIA, Gaming-Modus"` | Nur auf direkte Fragen antworten |
|
| Gaming | `"ARIA, Gaming-Modus"` | Nur auf direkte Fragen antworten |
|
||||||
|
|
||||||
### Stimmen
|
|
||||||
|
|
||||||
| Stimme | Modell | Wann |
|
|
||||||
|--------|--------|------|
|
|
||||||
| **Ramona** (weiblich) | `de_DE-ramona-low` | Alltag, Antworten, Gespraeche |
|
|
||||||
| **Thorsten** (maennlich, tief) | `de_DE-thorsten-high` | Epische Momente, Alarme |
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Diagnostic — Selbstcheck-UI und Einstellungen
|
## Diagnostic — Selbstcheck-UI und Einstellungen
|
||||||
|
|
||||||
Erreichbar unter `http://<VM-IP>:3001`. Teilt das Netzwerk mit aria-core.
|
Erreichbar unter `http://<VM-IP>:3001`. Teilt das Netzwerk mit der Bridge.
|
||||||
|
|
||||||
### Features
|
### Tabs
|
||||||
|
|
||||||
- **Status-Karten**: Gateway (Handshake), RVS (TLS-Fallback), Proxy (Auth)
|
- **Main**: Brain/RVS/Proxy-Status, Chat-Test, "ARIA denkt..."-Indikator, End-to-End-Trace, Container-Logs
|
||||||
- **Chat-Test**: Nachrichten direkt an ARIA senden (Gateway oder via RVS)
|
- **Gehirn**: Memory-Browser (Vector-DB), Suche + Filter, Edit/Add/Delete, Konversation-Status mit Destillat-Trigger, **Token/Call-Metrics mit Subscription-Quota-Tracking**, Bootstrap & Migration (3 Wiederherstellungs-Wege), Gehirn-Export/Import (tar.gz). Info-Buttons (ℹ) ueberall mit Modal-Erklaerung.
|
||||||
- **Session-Verwaltung**: Sessions auflisten, wechseln, erstellen, loeschen
|
- **Skills**: Liste aller Skills mit Logs pro Run, Activate/Deactivate, Export/Import als tar.gz, "von ARIA"-Badge fuer selbst gebaute
|
||||||
- **Chat-History**: Wird beim Laden und Session-Wechsel angezeigt (read-only aus JSONL)
|
- **Dateien**: Browser fuer `/shared/uploads/` mit Multi-Select + "Alle markieren" + Bulk-Download (ZIP bei 2+) + Bulk-Delete. Live-Update der Chat-Bubbles beim Delete.
|
||||||
|
- **Einstellungen**: Reparatur (Container-Restart fuer Brain/Bridge/Qdrant), Komplett-Reset, Betriebsmodi, Sprachausgabe + Voice-Cloning + F5-TTS-Tuning + Voice Export/Import, Whisper, Sprachmodell (brainModel), Onboarding-QR, App-Cleanup
|
||||||
|
|
||||||
|
### Was zusaetzlich noch drin steckt
|
||||||
|
|
||||||
|
- **Disk-Voll Banner** mit copy-baren Cleanup-Befehlen (safe + aggressiv)
|
||||||
|
- **Token/Call-Metrics**: pro Claude-Call ein Eintrag in `/data/metrics.jsonl` mit ts + Token-Schaetzung. Gehirn-Tab zeigt 1h/5h/24h/30d-Aggregat plus Progress-Bar gegen Plan-Limit (Pro / Max 5x / Max 20x / Custom). Warn-Schwelle 80%, kritisch 90%.
|
||||||
|
- **Voice Cloning**: Audio-Samples hochladen, Whisper transkribiert den Ref-Text automatisch
|
||||||
|
- **Voice Export/Import**: einzelne Stimmen als `.tar.gz` zwischen Gameboxen mitnehmen
|
||||||
|
- **Settings Export/Import**: `voice_config.json` + `highlight_triggers.json` als JSON-Bundle
|
||||||
- **Claude Login**: Browser-Terminal zum Einloggen in den Proxy
|
- **Claude Login**: Browser-Terminal zum Einloggen in den Proxy
|
||||||
- **Core Terminal**: Shell in aria-core (openclaw CLI)
|
- **SSH Terminal**: direkter SSH-Zugang zu aria-wohnung
|
||||||
- **Container-Logs**: Echtzeit-Logs aller Container (gefiltert nach Tab)
|
|
||||||
- **SSH Terminal**: Direkter SSH-Zugang zu aria-wohnung
|
|
||||||
|
|
||||||
### Session-Verwaltung
|
|
||||||
|
|
||||||
Die in der Diagnostic gewaehlte Session gilt **global** — Bridge und App nutzen
|
|
||||||
dieselbe Session. Die aktive Session wird unter `/data/active-session` persistiert
|
|
||||||
und ueberlebt Container-Restarts.
|
|
||||||
|
|
||||||
API-Endpoint fuer andere Services: `GET http://localhost:3001/api/session`
|
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -333,13 +334,72 @@ API-Endpoint fuer andere Services: `GET http://localhost:3001/api/session`
|
|||||||
### Features
|
### Features
|
||||||
|
|
||||||
- Text-Chat mit ARIA
|
- Text-Chat mit ARIA
|
||||||
- **Sprachaufnahme**: Push-to-Talk (halten) oder Tap-to-Talk (tippen, Auto-Stop bei Stille)
|
- **Sprachaufnahme**: Tap-to-Talk (tippen startet, tippen stoppt, Auto-Stop bei Stille via VAD)
|
||||||
- **VAD (Voice Activity Detection)**: Erkennt 1.8s Stille und stoppt automatisch
|
- **Gespraechsmodus** (Ohr-Button): Nach jeder ARIA-Antwort startet automatisch die Aufnahme — wie ein natuerliches Gespraech hin und her
|
||||||
- **Wake Word**: Toggle-Button aktiviert kontinuierliches Mikrofon-Monitoring
|
- **Wake-Word** (on-device, openWakeWord ONNX): "Hey Jarvis", "Alexa", "Hey Mycroft", "Hey Rhasspy" — Mikrofon hoert passiv mit, Konversation startet beim Schluesselwort. Komplett on-device via ONNX Runtime, kein API-Key, kein Cloud-Roundtrip, Audio verlaesst das Geraet nicht.
|
||||||
- **TTS-Wiedergabe**: ARIA antwortet per Lautsprecher (Ramona/Thorsten)
|
- **VAD (Voice Activity Detection)**: Adaptive Schwelle (Baseline aus ersten 500ms Mic-Pegel + 6dB Offset). Konfigurierbare Stille-Toleranz (1.0–8.0s, Default 2.8s) bevor Auto-Stop greift. Max-Aufnahme einstellbar (1–30 min, Default 5 min)
|
||||||
- Datei- und Kamera-Upload
|
- **Barge-In**: Wenn du waehrend ARIAs Antwort eine neue Sprach-/Text-Nachricht reinschickst, wird sie unterbrochen + bekommt den Hint "das ist eine Korrektur"
|
||||||
- GPS-Position (optional)
|
- **Wake-Word waehrend TTS**: Du kannst "Computer" sagen waehrend ARIA noch redet — AcousticEchoCanceler verhindert dass ARIAs eigene Stimme das Wake-Word triggert
|
||||||
|
- **Anruf-Pause + Auto-Resume**: TTS verstummt bei klassischem Anruf oder VoIP-Call (WhatsApp/Signal/Discord). Nach dem Auflegen geht ARIA von der **genauen Stelle** weiter wo sie unterbrochen wurde — die App misst die Position vom Wiedergabe-Anfang und nutzt den WAV-Cache der Antwort
|
||||||
|
- **Speech Gate**: Aufnahme wird verworfen wenn keine Sprache erkannt
|
||||||
|
- **STT (Speech-to-Text)**: 16kHz mono → Bridge → Gamebox-Whisper (CUDA) → Text im Chat. Fast in Echtzeit.
|
||||||
|
- **"ARIA denkt..." Indicator**: Zeigt live den Status vom Core (Denken, Tool, Schreiben) + Abbrechen-Button
|
||||||
|
- **TTS-Wiedergabe**: F5-TTS PCM-Streaming direkt in AudioTrack mit konfigurierbarem Pre-Roll-Buffer (1.0–6.0s, Default 3.5s) gegen Gaps bei Render-Pausen
|
||||||
|
- **Audio-Pause**: Andere Apps (Spotify, YouTube etc.) pausieren komplett waehrend ARIA spricht und kommen erst wieder nach echtem Wiedergabe-Ende
|
||||||
|
- **Lokale Voice-Wahl**: Pro Geraet eigene Stimme moeglich (in Settings). Diagnostic-Wechsel ueberschreibt alle App-Wahlen.
|
||||||
|
- **Voice-Ready Toast**: Beim Wechsel zeigt die App "Stimme X bereit (X.Ys)" sobald der Preload durch ist
|
||||||
|
- **Play-Button**: Jede ARIA-Nachricht kann nochmal vorgelesen werden (aus Cache wenn vorhanden, sonst neu rendern)
|
||||||
|
- **Chat-Suche**: Lupe in der Statusleiste filtert Nachrichten live
|
||||||
|
- **Mehrere Anhaenge**: Bilder + Dateien sammeln, Text hinzufuegen, dann zusammen senden
|
||||||
|
- **Paste-Support**: Bilder aus Zwischenablage einfuegen (Diagnostic)
|
||||||
|
- **Anhaenge**: Bridge speichert in Shared Volume, ARIA kann darauf zugreifen, Re-Download ueber RVS
|
||||||
|
- **Einstellungen**: TTS-aktiv, F5-TTS-Voice, Pre-Roll-Buffer, Stille-Toleranz, Speicherort, Auto-Download, GPS, Verbose-Logging
|
||||||
|
- **Auto-Update**: Prueft beim Start + per Button auf neue Version, Download + Installation ueber RVS (FileProvider)
|
||||||
|
- GPS-Position (optional, mit Runtime-Permission-Request) — wird in jeden Chat/Audio-Payload mitgegeben und ist in Diagnostic als Debug-Block einblendbar
|
||||||
- QR-Code Scanner fuer Token-Pairing
|
- QR-Code Scanner fuer Token-Pairing
|
||||||
|
- **ARIA-Dateien empfangen**: Wenn ARIA eine PDF/Bild/Markdown/ZIP fuer dich erstellt (Marker `[FILE: /shared/uploads/aria_*]` in der Antwort), erscheint sie als eigene Anhang-Bubble. Tippen → wird via RVS geladen + mit Android-Intent-Picker geoeffnet (PDF-Viewer, Bildbetrachter, Standard-App). Inline-Bilder aus Markdown-``-Syntax werden direkt unter dem Text gerendert (PNG/JPG via Image, SVG via react-native-svg)
|
||||||
|
- **Vollbild mit Pinch-Zoom**: Bilder im Vollbild-Modal sind pinch-zoombar (1x..5x), 1-Finger-Pan wenn gezoomt, Doppel-Tap toggelt 1x↔2.5x — alles ohne externe Lib
|
||||||
|
- **Container-Restart-Buttons** (Settings → Reparatur): aria-bridge / aria-brain / aria-qdrant gezielt neu starten (jeweils ~5s Downtime). Geht ueber RVS → Bridge → Diagnostic → Docker-Socket-API.
|
||||||
|
- **Cache-Cleanup**: Beim App-Start werden orphane TTS-WAVs aus dem Cache geraeumt. Plus Settings-Buttons "TTS-Cache leeren", "Update-Cache leeren", "Anhang-Cache leeren"
|
||||||
|
|
||||||
|
### Wake-Word (openWakeWord, on-device)
|
||||||
|
|
||||||
|
Wake-Word-Erkennung laeuft komplett **on-device** ueber [openWakeWord](https://github.com/dscripka/openWakeWord)
|
||||||
|
mit ONNX Runtime — kein API-Key, kein Cloud-Roundtrip, kein Cent Lizenzgebuehren,
|
||||||
|
und das Audio verlaesst das Geraet nie.
|
||||||
|
|
||||||
|
**Mitgelieferte Wake-Words** (ONNX-Dateien in `android/android/app/src/main/assets/openwakeword/`):
|
||||||
|
- `Hey Jarvis` (Default, openWakeWord-Original)
|
||||||
|
- `Computer` (Star-Trek-Style, Community-Modell)
|
||||||
|
- `Alexa`, `Hey Mycroft`, `Hey Rhasspy` (openWakeWord-Originale)
|
||||||
|
|
||||||
|
Community-Modelle stammen aus [fwartner/home-assistant-wakewords-collection](https://github.com/fwartner/home-assistant-wakewords-collection).
|
||||||
|
|
||||||
|
**Bedienung:**
|
||||||
|
- App → **Einstellungen** → **Wake-Word** → gewuenschtes Keyword waehlen → **Speichern + Aktivieren**
|
||||||
|
- **Ohr-Button (👂)** in der Statusleiste tippen → Wake-Word ist scharf, App hoert passiv mit
|
||||||
|
- Wake-Word sagen → Symbol wechselt auf 🎙️, **Bereit-Sound** (Ding-Dong, optional in Settings) + Toast "🎤 sprich jetzt" sobald das Mikro wirklich offen ist
|
||||||
|
- Nach jeder ARIA-Antwort oeffnet sich das Mikro nochmal — Stille → zurueck zu 👂
|
||||||
|
- Erneut tippen → Ohr aus (🔇)
|
||||||
|
|
||||||
|
**Eigene Wake-Words trainieren** (gratis, ~30 Min):
|
||||||
|
|
||||||
|
1. openWakeWord Trainings-Notebook auf Colab oeffnen (Link im
|
||||||
|
[openWakeWord Repo](https://github.com/dscripka/openWakeWord) unter "Training Custom Models")
|
||||||
|
2. Wake-Word-Phrase eingeben (z.B. "ARIA", "Hey Stefan"), Notebook ausfuehren —
|
||||||
|
das Notebook generiert synthetische Trainings-Beispiele und trainiert das Modell.
|
||||||
|
3. Resultierende `.onnx`-Datei runterladen
|
||||||
|
4. Datei in `android/android/app/src/main/assets/openwakeword/` ablegen
|
||||||
|
5. In `android/src/services/wakeword.ts` den Dateinamen (ohne `.onnx`) zur
|
||||||
|
`WAKE_KEYWORDS`-Liste hinzufuegen
|
||||||
|
6. APK neu bauen
|
||||||
|
|
||||||
|
*(Diagnostic-Upload fuer Custom-`.onnx` ohne Rebuild kommt spaeter.)*
|
||||||
|
|
||||||
|
**Tuning** (in [wakeword.ts](android/src/services/wakeword.ts)):
|
||||||
|
- `DEFAULT_THRESHOLD = 0.5` — Score-Schwelle (raise auf 0.6–0.7 bei False-Positives)
|
||||||
|
- `DEFAULT_PATIENCE = 2` — wie viele Frames ueber Threshold noetig
|
||||||
|
- `DEFAULT_DEBOUNCE_MS = 1500` — Mindestabstand zwischen zwei Triggern
|
||||||
|
|
||||||
### Ersteinrichtung (Dev-Maschine, einmalig)
|
### Ersteinrichtung (Dev-Maschine, einmalig)
|
||||||
|
|
||||||
@@ -361,47 +421,148 @@ cd android
|
|||||||
# APK liegt unter android/app/build/outputs/apk/release/
|
# APK liegt unter android/app/build/outputs/apk/release/
|
||||||
```
|
```
|
||||||
|
|
||||||
### Audio-Pipeline
|
### Release auf Gitea veroeffentlichen
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./release.sh 1.2.0
|
||||||
|
```
|
||||||
|
|
||||||
|
Das Script macht alles in einem Schritt:
|
||||||
|
1. Setzt Versionsnummern (package.json, build.gradle, SettingsScreen)
|
||||||
|
2. Fragt Gitea-Kennwort ab (wird nirgends gespeichert)
|
||||||
|
3. Baut die Release-APK
|
||||||
|
4. Git Commit + Tag + Push
|
||||||
|
5. Erstellt Gitea Release + laedt APK hoch
|
||||||
|
6. Kopiert APK auf RVS-Server (Auto-Update, optional)
|
||||||
|
|
||||||
|
Voraussetzung in `.env`:
|
||||||
|
```bash
|
||||||
|
GITEA_URL=https://gitea.hackersoft.de
|
||||||
|
GITEA_REPO=stefan/aria-agent
|
||||||
|
GITEA_USER=stefan
|
||||||
|
RVS_UPDATE_HOST=root@aria-rvs # Optional: fuer Auto-Update
|
||||||
|
```
|
||||||
|
|
||||||
|
### Docker-Cleanup
|
||||||
|
|
||||||
|
Das Bridge-Image zieht grosse ML-Deps (faster-whisper, ctranslate2, onnxruntime,
|
||||||
|
openwakeword) — bei jedem Rebuild waechst der Docker-Build-Cache. Wenn
|
||||||
|
die VM voll laeuft:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./cleanup.sh # sicher: Build-Cache + ungenutzte Images
|
||||||
|
./cleanup.sh --full # aggressiv: zusaetzlich ungenutzte Volumes (mit Rueckfrage)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Auto-Update
|
||||||
|
|
||||||
|
Die App prueft beim Start ob eine neuere Version auf dem RVS liegt.
|
||||||
|
Der Update-Flow:
|
||||||
|
1. `./release.sh 0.0.3.0` → APK wird auf RVS kopiert (via scp)
|
||||||
|
2. Alternativ: `git pull` auf dem RVS-Server → APK in `rvs/updates/`
|
||||||
|
3. App sendet `update_check` mit aktueller Version
|
||||||
|
4. RVS vergleicht → sendet `update_available`
|
||||||
|
5. App zeigt Dialog → Download ueber WebSocket → Installation
|
||||||
|
|
||||||
|
### Audio-Pipeline (Spracheingabe)
|
||||||
|
|
||||||
```
|
```
|
||||||
App (Mikrofon) → AAC/MP4 Aufnahme → Base64 → RVS → Bridge
|
App (Mikrofon) → AAC/MP4 Aufnahme → Base64 → RVS → Bridge
|
||||||
Bridge: FFmpeg (16kHz PCM) → Whisper STT → Text → aria-core
|
Bridge: FFmpeg (16kHz PCM) → Whisper STT → Text → aria-brain
|
||||||
aria-core → Antwort → Bridge → Piper TTS (WAV) → Base64 → RVS → App
|
Bridge: STT-Ergebnis → RVS → App (Placeholder wird durch transkribierten Text ersetzt)
|
||||||
App: Base64 → WAV → Lautsprecher
|
aria-brain → Antwort → Bridge → F5-TTS (Gaming-PC) → PCM-Stream → RVS → App
|
||||||
|
App: AudioTrack MODE_STREAM (nahtlos), Cache als WAV pro Message
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Audio-Verhalten in der App
|
||||||
|
|
||||||
|
| Phase | Andere App (Spotify) | ARIA-Mikro |
|
||||||
|
|------------------------------|----------------------|-------------------------|
|
||||||
|
| Idle / Ohr aus | spielt frei | aus |
|
||||||
|
| Wake-Word lauscht (armed) | spielt frei | passiv (openWakeWord) |
|
||||||
|
| User-Aufnahme laeuft | pausiert (EXCLUSIVE) | Recording |
|
||||||
|
| Aufnahme zu Ende | resumed | aus |
|
||||||
|
| ARIA denkt/schreibt (~20s) | spielt frei | aus |
|
||||||
|
| TTS startet | pausiert (DUCK) | aus (oder barge) |
|
||||||
|
| TTS spielt (auch GPU-Pausen) | bleibt pausiert | barge wenn Wake-Word |
|
||||||
|
| TTS zu Ende | nach 800ms resumed | (Conversation-Window) |
|
||||||
|
| Eingehender Anruf (auch VoIP)| — | Mikro pausiert |
|
||||||
|
| Anruf vorbei (Auto-Resume) | pausiert wieder | aus |
|
||||||
|
| Neue Frage waehrend Anruf | — | (Resume verworfen) |
|
||||||
|
|
||||||
|
Mechanismen: Underrun-Schutz im PcmStreamPlayer (Stille-Fill in Render-
|
||||||
|
Pausen), Conversation-Focus bei Wake-Word, Foreground-Service mit
|
||||||
|
mediaPlayback|microphone, Anruf-Erkennung ueber TelephonyManager +
|
||||||
|
AudioFocus-Loss-Listener mit Polling-Fallback (VoIP). Bei Anruf wird
|
||||||
|
die Wiedergabe-Position gemerkt — nach dem Auflegen spielt ARIA ab
|
||||||
|
der genauen Stelle weiter (oder verwirft das wenn der User waehrend
|
||||||
|
des Telefonats per Text eine neue Frage gestellt hat). PcmPlayback-
|
||||||
|
Finished-Event vom Native sorgt dafuer dass Spotify erst pausiert
|
||||||
|
bleibt bis ARIA wirklich verstummt ist.
|
||||||
|
|
||||||
|
### Datei-Pipeline (Bilder & Anhaenge)
|
||||||
|
|
||||||
|
```
|
||||||
|
App (Kamera/Dateimanager) → Base64 → RVS → Bridge
|
||||||
|
Bridge: Speichert in /shared/uploads/ (Shared Volume, fuer aria-brain sichtbar)
|
||||||
|
Bridge: aria-brain → "Stefan hat ein Bild geschickt: foto.jpg — liegt unter /shared/uploads/..."
|
||||||
|
ARIA: Kann Datei per Bash/Read-Tool oeffnen und analysieren
|
||||||
|
```
|
||||||
|
|
||||||
|
**Unterstuetzte Formate:** Bilder (JPG, PNG), Dokumente (PDF, DOCX, TXT), beliebige Dateien.
|
||||||
|
Bilder werden in der App inline angezeigt, andere Dateien als Icon + Dateiname.
|
||||||
|
|
||||||
|
**Re-Download:** Wird der lokale Cache in der App geleert (Einstellungen → Anhang-Speicher → Cache leeren),
|
||||||
|
werden fehlende Anhaenge automatisch ueber RVS vom Server neu geladen. Der Speicherort
|
||||||
|
ist in den App-Einstellungen konfigurierbar.
|
||||||
|
|
||||||
|
> **Tipp Speicherplatz:** Das Docker Volume `aria-shared` liegt standardmaessig auf ARIAs VM-Disk.
|
||||||
|
> Bei vielen Uploads kann das den Speicher der VM belasten (dort laufen auch alle Container).
|
||||||
|
> Empfehlung: Das Volume auf ein Netzwerk-Filesystem mounten (CephFS, NFS, GlusterFS):
|
||||||
|
> ```yaml
|
||||||
|
> # docker-compose.yml
|
||||||
|
> volumes:
|
||||||
|
> aria-shared:
|
||||||
|
> driver: local
|
||||||
|
> driver_opts:
|
||||||
|
> type: nfs
|
||||||
|
> o: addr=nas.local,rw
|
||||||
|
> device: ":/exports/aria-uploads"
|
||||||
|
> ```
|
||||||
|
> So bleibt ARIAs VM-Disk sauber und die Uploads liegen auf dediziertem Storage.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## Datenverzeichnis — aria-data/
|
## Datenverzeichnis — aria-data/
|
||||||
|
|
||||||
Alles was ARIA weiss, kann und ist — liegt hier. Ein `tar` = vollstaendiges Backup.
|
|
||||||
|
|
||||||
```
|
```
|
||||||
aria-data/
|
aria-data/
|
||||||
├── brain/ ← ARIAs Gedaechtnis (OpenClaw Memory)
|
├── brain/ ← ARIAs Gehirn — Bind-Mount, GITIGNORED
|
||||||
│ ├── MEMORY.md ← Langzeitgedaechtnis
|
│ ├── qdrant/ ← Vector-DB Storage (Memories, Skills-Embeddings)
|
||||||
│ └── memory/ ← Tageslogbuecher
|
│ └── data/ ← Skills, Embedding-Modell-Cache
|
||||||
|
│ └── skills/<name>/ ← Pro Skill ein Ordner mit Manifest, Code, venv
|
||||||
│
|
│
|
||||||
├── skills/ ← ARIAs Faehigkeiten (selbst geschrieben!)
|
├── brain-import/ ← Quell-Dateien fuer den initialen Import in die DB
|
||||||
│
|
│ ├── AGENT.md ← Persoenlichkeit (wird Memory-Punkte vom Typ identity/rule)
|
||||||
├── voices/ ← Piper TTS Stimmen (offline)
|
│ ├── BOOTSTRAP.md
|
||||||
│ ├── de_DE-ramona-low.onnx
|
│ ├── TOOLING.md.example
|
||||||
│ └── de_DE-thorsten-high.onnx
|
│ └── USER.md.example
|
||||||
│
|
│
|
||||||
├── config/
|
├── config/
|
||||||
│ ├── BOOTSTRAP.md ← System-Prompt (Identitaet, Regeln, Tools)
|
|
||||||
│ ├── AGENT.md ← Persoenlichkeit & Arbeitsprinzipien
|
|
||||||
│ ├── USER.md ← Stefans Praeferenzen
|
|
||||||
│ ├── openclaw.env ← OpenClaw Environment
|
|
||||||
│ ├── aria.env ← Voice Bridge Config
|
|
||||||
│ └── diag-state/ ← Diagnostic persistenter State
|
│ └── diag-state/ ← Diagnostic persistenter State
|
||||||
│
|
│
|
||||||
└── ssh/ ← SSH Keys fuer VM-Zugriff
|
└── ssh/ ← SSH Keys (Brain + Proxy teilen sich)
|
||||||
├── id_ed25519 ← Private Key (generiert von aria-setup.sh)
|
├── id_ed25519
|
||||||
├── id_ed25519.pub ← Public Key (muss in VM authorized_keys!)
|
├── id_ed25519.pub
|
||||||
└── config ← SSH Config (Host aria-wohnung)
|
└── config ← Host aria-wohnung
|
||||||
```
|
```
|
||||||
|
|
||||||
|
`aria-data/brain/` (Vector-DB + Skills) ist gitignored — Backup laeuft ueber
|
||||||
|
den Gehirn-Export-Button in der Diagnostic, nicht ueber Git.
|
||||||
|
|
||||||
|
Settings im Shared Volume (`/shared/config/`): `voice_config.json`,
|
||||||
|
`highlight_triggers.json`, `runtime.json`, `chat_backup.jsonl`.
|
||||||
|
|
||||||
**Backup:**
|
**Backup:**
|
||||||
```bash
|
```bash
|
||||||
tar -czf aria-backup-$(date +%Y%m%d).tar.gz aria-data/
|
tar -czf aria-backup-$(date +%Y%m%d).tar.gz aria-data/
|
||||||
@@ -411,7 +572,7 @@ tar -czf aria-backup-$(date +%Y%m%d).tar.gz aria-data/
|
|||||||
|
|
||||||
## RVS — Rendezvous-Server
|
## RVS — Rendezvous-Server
|
||||||
|
|
||||||
Laeuft im Rechenzentrum. Reiner Relay — kennt keine Tokens, speichert nichts.
|
Laeuft im Rechenzentrum. WebSocket Relay + Auto-Update Server.
|
||||||
Wer sich mit dem gleichen Token verbindet, landet im gleichen Room.
|
Wer sich mit dem gleichen Token verbindet, landet im gleichen Room.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@@ -419,20 +580,144 @@ cd rvs
|
|||||||
docker compose up -d
|
docker compose up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Features:**
|
||||||
|
- WebSocket Relay (alle Message-Types: chat, audio, file, config, xtts, update, etc.)
|
||||||
|
- Auto-Update: APK-Verteilung an Apps ueber WebSocket
|
||||||
|
- Heartbeat + tote Verbindungen aufraeumen
|
||||||
|
|
||||||
|
**Auto-Update APK bereitstellen:**
|
||||||
|
```bash
|
||||||
|
# APK in updates/ legen (manuell oder via release.sh)
|
||||||
|
cp ARIA-v0.0.3.0.apk ~/ARIA-AGENT/rvs/updates/
|
||||||
|
# RVS erkennt die Version aus dem Dateinamen
|
||||||
|
```
|
||||||
|
|
||||||
**Multi-Instanz:** Mehrere ARIA-VMs koennen denselben RVS nutzen — jede mit eigenem Token.
|
**Multi-Instanz:** Mehrere ARIA-VMs koennen denselben RVS nutzen — jede mit eigenem Token.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
## Gamebox-Stack — F5-TTS + Whisper (GPU-Services)
|
||||||
|
|
||||||
|
Laeuft auf einem separaten Rechner mit NVIDIA GPU (z.B. Gaming-PC mit RTX 3060).
|
||||||
|
Verbindet sich ueber RVS mit der ARIA-Infrastruktur — kein VPN noetig, funktioniert
|
||||||
|
ueber verschiedene Netze hinweg.
|
||||||
|
|
||||||
|
### Architektur
|
||||||
|
|
||||||
|
```
|
||||||
|
Gamebox (Windows, RTX 3060, Docker Desktop + WSL2)
|
||||||
|
├── aria-f5tts-bridge F5-TTS Voice Cloning + RVS-Relay
|
||||||
|
│ Hoert auf xtts_request, streamt audio_pcm
|
||||||
|
├── aria-whisper-bridge faster-whisper auf CUDA (float16)
|
||||||
|
│ Hoert auf stt_request, antwortet mit stt_response
|
||||||
|
└── ./voices/ Geteilt zwischen beiden:
|
||||||
|
{name}.wav — Referenz-Audio (~6-10s)
|
||||||
|
{name}.txt — Referenz-Text (auto via Whisper)
|
||||||
|
|
||||||
|
↕ RVS (Rechenzentrum, WebSocket Relay)
|
||||||
|
|
||||||
|
ARIA-VM
|
||||||
|
└── aria-bridge: STT primaer remote (45s Timeout, dann lokaler CPU-Fallback)
|
||||||
|
TTS via xtts_request → audio_pcm Stream
|
||||||
|
```
|
||||||
|
|
||||||
|
### Voraussetzungen
|
||||||
|
|
||||||
|
- Docker Desktop mit WSL2 (Windows) oder Docker mit NVIDIA Runtime (Linux)
|
||||||
|
- NVIDIA Container Toolkit
|
||||||
|
- GPU mit mindestens 6GB VRAM (Whisper-large + F5-TTS gemeinsam)
|
||||||
|
- **Gleicher RVS_TOKEN wie auf der ARIA-VM!**
|
||||||
|
|
||||||
|
### Setup
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd xtts
|
||||||
|
cp .env.example .env
|
||||||
|
# .env mit RVS-Verbindungsdaten fuellen (gleicher Token wie ARIA-VM!)
|
||||||
|
docker compose up -d
|
||||||
|
# Erster Start laedt die Modelle (Whisper ~1-3GB je nach Groesse, F5-TTS ~1GB)
|
||||||
|
```
|
||||||
|
|
||||||
|
Die Modelle werden in den Volumes `f5tts-models` und `whisper-models` gecacht
|
||||||
|
und muessen nur einmal geladen werden.
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
**F5-TTS (Sprachausgabe):**
|
||||||
|
- Hochqualitatives Voice Cloning auf Basis von 6-10s Referenz-Audio
|
||||||
|
- Renderzeit ~0.3x Realtime auf RTX 3060 (RTF ≈ 0.3)
|
||||||
|
- Satzweises Streaming, fade-in auf erstem Chunk gegen Warmup-Glitches
|
||||||
|
- Sequentielle Queue gegen GPU-OOM bei parallelen Requests
|
||||||
|
|
||||||
|
**Whisper (Spracherkennung):**
|
||||||
|
- faster-whisper mit CUDA + float16 — fast Echtzeit-Transkription
|
||||||
|
- Modelle: tiny / base / small / medium / large-v3 (Hot-Swap via Diagnostic)
|
||||||
|
- Wird zusaetzlich von der f5tts-bridge intern genutzt um den Referenz-Text
|
||||||
|
beim Voice-Upload automatisch zu erzeugen
|
||||||
|
|
||||||
|
### TTS-Config
|
||||||
|
|
||||||
|
In der Diagnostic unter Einstellungen → Sprachausgabe:
|
||||||
|
- **TTS aktiv**: Global An/Aus
|
||||||
|
- **F5-TTS Stimme**: Default oder gecloned (Maia etc.)
|
||||||
|
|
||||||
|
> F5-TTS ist die einzige Engine — wenn die Gamebox offline ist, bleibt ARIA stumm.
|
||||||
|
> Chat-Antworten kommen weiter an (nur kein Audio).
|
||||||
|
|
||||||
|
### Stimme klonen
|
||||||
|
|
||||||
|
1. App oder Diagnostic → "Stimme klonen" → Audio-Dateien hochladen
|
||||||
|
(WAV/MP3, 1-10 Dateien, ~6-10s gesamt)
|
||||||
|
2. Name vergeben → "Stimme erstellen"
|
||||||
|
3. f5tts-bridge speichert das WAV, schickt einen `stt_request` an die
|
||||||
|
whisper-bridge, legt die Transkription als `.txt` daneben ab und meldet
|
||||||
|
`xtts_voice_saved` zurueck. Der Toast in der App zeigt "Stimme bereit".
|
||||||
|
4. Stimme auswaehlen → ein Voice-Preload (stiller Mini-Render) waermt die
|
||||||
|
Latents auf, "voice_ready" Toast bestaetigt es.
|
||||||
|
|
||||||
|
> **Tipp:** Fuer beste Ergebnisse: saubere Aufnahme, eine Stimme, kein Hintergrund,
|
||||||
|
> 10-30 Sekunden Gesamtlaenge. Mehrere kurze Dateien werden zusammengefuegt.
|
||||||
|
|
||||||
|
### Deutsches Fine-Tune (bessere Qualitaet auf Deutsch)
|
||||||
|
|
||||||
|
Das Default-Modell `F5TTS_v1_Base` ist primaer auf Englisch + Chinesisch trainiert
|
||||||
|
und liefert auf Deutsch merklich schwaechere Voice-Cloning-Qualitaet als XTTS es
|
||||||
|
tat. Community-Fine-Tune von [aihpi](https://huggingface.co/aihpi/F5-TTS-German)
|
||||||
|
auf dem Emilia-Dataset + Common Voice 19.0 funktioniert deutlich besser.
|
||||||
|
|
||||||
|
**Konfiguration ueber Diagnostic → "F5-TTS Modell-Tuning (advanced)":**
|
||||||
|
|
||||||
|
| Feld | Wert |
|
||||||
|
|------|------|
|
||||||
|
| Modell-Architektur | `F5TTS_Base` *(nicht v1_Base! Fine-Tune basiert auf der alten Architektur)* |
|
||||||
|
| Custom Checkpoint | `hf://aihpi/F5-TTS-German/F5TTS_Base/model_365000.safetensors` |
|
||||||
|
| Custom Vocab | `hf://aihpi/F5-TTS-German/vocab.txt` |
|
||||||
|
| cfg_strength | `2.0` |
|
||||||
|
| nfe_step | `32` |
|
||||||
|
|
||||||
|
→ "Anwenden" klicken. Die `hf://`-Pfade werden einmalig automatisch runter-
|
||||||
|
geladen (~3-5GB, landet im `xtts/hf-cache/`) und bei Container-Restart aus
|
||||||
|
dem Cache wiederverwendet.
|
||||||
|
|
||||||
|
> **Warnung zur BigVGAN-Variante** (`F5TTS_Base_bigvgan/model_295000.safetensors`):
|
||||||
|
> funktioniert AKTUELL NICHT mit dieser Bridge. Die f5-tts Library laedt
|
||||||
|
> per Default den Vocos-Vocoder, die BigVGAN-Weights sind damit inkompatibel
|
||||||
|
> → Modell produziert NaN, App bleibt stumm. Nur die **Vocos-Variante
|
||||||
|
> (F5TTS_Base/model_365000.safetensors)** nutzen.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## Docker Volumes
|
## Docker Volumes
|
||||||
|
|
||||||
| Volume | Pfad im Container | Zweck |
|
| Volume / Bind | Pfad im Container | Zweck |
|
||||||
|--------|-------------------|-------|
|
|---------------|-------------------|-------|
|
||||||
| `openclaw-config` | `/home/node/.openclaw` | OpenClaw Config, Sessions, Auth |
|
| `~/.claude` (bind) | `/root/.claude` (proxy) | Claude CLI Credentials |
|
||||||
| `claude-config` | `/home/node/.claude` | Claude Code Settings, Permissions |
|
| `./aria-data/ssh` (bind) | `/root/.ssh` (proxy, brain) | SSH-Keys fuer aria-wohnung |
|
||||||
| `~/.claude` (bind) | `/root/.claude` (Proxy) | Claude CLI Credentials |
|
| `./aria-data/brain/qdrant` (bind) | `/qdrant/storage` (qdrant) | Vector-DB Storage |
|
||||||
| `./aria-data/ssh` (bind) | `/root/.ssh`, `/home/node/.ssh` | SSH Keys |
|
| `./aria-data/brain/data` (bind) | `/data` (brain) | Skills + Embedding-Modell-Cache |
|
||||||
| `./aria-data/brain` (bind) | `/home/node/.openclaw/workspace/memory` | Gedaechtnis |
|
| `./aria-data/brain` (bind) | `/brain` (diagnostic) | Brain-Export/Import-Endpoints |
|
||||||
| `./aria-data/skills` (bind) | `/home/node/.openclaw/workspace/skills` | Skills |
|
| `aria-shared` | `/shared` (brain, bridge, proxy, diagnostic) | Datei-Austausch, Config, Uploads |
|
||||||
|
| `./aria-data/config/diag-state` (bind) | `/data` (diagnostic) | Diagnostic persistenter State |
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -461,22 +746,21 @@ docker compose down
|
|||||||
|
|
||||||
# Einzelnen Container neu bauen
|
# Einzelnen Container neu bauen
|
||||||
docker compose up -d --build diagnostic
|
docker compose up -d --build diagnostic
|
||||||
docker compose up -d --build bridge
|
docker compose up -d --build bridge brain
|
||||||
|
|
||||||
# Logs
|
# Logs
|
||||||
docker compose logs -f # alle
|
docker compose logs -f # alle
|
||||||
docker compose logs -f aria # nur aria-core
|
docker compose logs -f brain # nur Agent + Memory
|
||||||
docker compose logs -f proxy # nur proxy
|
docker compose logs -f qdrant # nur Vector-DB
|
||||||
|
docker compose logs -f bridge # nur Voice-Bridge
|
||||||
|
docker compose logs -f proxy # nur Claude-Proxy
|
||||||
|
|
||||||
# Setup wiederholen (nach Config-Aenderungen)
|
# SSH-Test (Brain zu aria-wohnung)
|
||||||
./aria-setup.sh
|
docker exec aria-brain ssh aria-wohnung hostname
|
||||||
|
|
||||||
# SSH-Test
|
# Brain-API direkt testen
|
||||||
docker exec aria-core ssh aria-wohnung hostname
|
docker exec aria-brain curl localhost:8080/health
|
||||||
|
docker exec aria-brain curl localhost:8080/memory/stats
|
||||||
# Tool-Test
|
|
||||||
# Neue Session in Diagnostic anlegen, dann:
|
|
||||||
# "Wie wird das Wetter in Bremen?"
|
|
||||||
```
|
```
|
||||||
|
|
||||||
---
|
---
|
||||||
@@ -486,9 +770,18 @@ docker exec aria-core ssh aria-wohnung hostname
|
|||||||
- **Proxy Cold Start**: Jede Nachricht spawnt einen neuen `claude --print` Prozess.
|
- **Proxy Cold Start**: Jede Nachricht spawnt einen neuen `claude --print` Prozess.
|
||||||
Dadurch ist ARIA langsamer als die direkte Claude CLI. Timeout ist auf 900s (15 Min).
|
Dadurch ist ARIA langsamer als die direkte Claude CLI. Timeout ist auf 900s (15 Min).
|
||||||
- **Kein Streaming zur App**: Die App zeigt erst die fertige Antwort, keine Streaming-Tokens.
|
- **Kein Streaming zur App**: Die App zeigt erst die fertige Antwort, keine Streaming-Tokens.
|
||||||
- **Wake Word nur auf VM**: Die Bridge hoert auf "ARIA" ueber das lokale Mikrofon der VM.
|
- **Wake-Word in der App nur eingebaute Keywords**: `Hey Jarvis`, `Alexa`, `Hey Mycroft`,
|
||||||
In der App gibt es Energy-basierte Erkennung (Phase 1).
|
`Hey Rhasspy` funktionieren sofort, eigene Wake-Words muessen aktuell noch als
|
||||||
|
`.onnx`-Datei ins App-Bundle gelegt + zur Liste in `wakeword.ts` hinzugefuegt werden.
|
||||||
|
Die Diagnostic-Upload-UI ist Phase 2.
|
||||||
- **Audio-Format**: App nimmt AAC/MP4 auf, Bridge konvertiert via FFmpeg zu 16kHz PCM.
|
- **Audio-Format**: App nimmt AAC/MP4 auf, Bridge konvertiert via FFmpeg zu 16kHz PCM.
|
||||||
|
- **RVS Zombie-Connections**: WebSocket-Verbindungen sterben gelegentlich ohne Fehlermeldung.
|
||||||
|
Bridge hat Ping-Check (5s), Diagnostic nutzt frische Verbindungen pro Request.
|
||||||
|
- **Bildanalyse eingeschraenkt**: Bilder werden in `/shared/uploads/` gespeichert. ARIA kann
|
||||||
|
sie per Bash/Read-Tool oeffnen, aber Claude Vision (direkte Bildanalyse) ist ueber den
|
||||||
|
Proxy-Pfad (`claude --print`) noch nicht moeglich. ARIA sieht den Dateipfad, nicht das Bild.
|
||||||
|
- **Dateigroesse**: Grosse Dateien (>5MB) koennen WebSocket-Limits ueberschreiten.
|
||||||
|
Bilder werden in der App auf max 1920x1920px @ 80% Qualitaet komprimiert.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -504,20 +797,86 @@ docker exec aria-core ssh aria-wohnung hostname
|
|||||||
- [x] Android App (Chat + Sprache + Uploads)
|
- [x] Android App (Chat + Sprache + Uploads)
|
||||||
- [x] Tool-Permissions (alle Tools freigeschaltet)
|
- [x] Tool-Permissions (alle Tools freigeschaltet)
|
||||||
- [x] SSH-Zugriff auf VM (aria-wohnung)
|
- [x] SSH-Zugriff auf VM (aria-wohnung)
|
||||||
- [x] Diagnostic Web-UI
|
- [x] Diagnostic Web-UI + Einstellungen
|
||||||
- [x] Session-Verwaltung + Chat-History
|
- [x] Session-Verwaltung + Chat-History
|
||||||
|
- [x] Stimmen-Einstellungen (frueher Piper Ramona/Thorsten, Highlight-Trigger) — durch XTTS, dann F5-TTS Voice Cloning ersetzt
|
||||||
|
- [x] Piper komplett entfernt — nur noch XTTS v2 als TTS (Gaming-PC)
|
||||||
|
- [x] Streaming TTS: PCM-Chunks direkt in AudioTrack, nahtlose Wiedergabe
|
||||||
|
- [x] TTS satzweise fuer lange Texte
|
||||||
|
- [x] Datei-/Bild-Upload mit Shared Volume
|
||||||
|
- [x] Watchdog (stuck Run Erkennung + Auto-Fix + Container-Restart)
|
||||||
|
- [x] Auto-Update System (APK via RVS)
|
||||||
|
- [x] Chat-Suche, Play-Button, Abbrechen-Button
|
||||||
|
- [x] XTTS v2 Integration (GPU, Voice Cloning, remote ueber RVS)
|
||||||
|
- [x] Gespraechsmodus (Ohr-Button, automatische Aufnahme nach ARIA-Antwort)
|
||||||
|
- [x] Mehrere Anhaenge + Text vor dem Senden + Paste-Support
|
||||||
|
- [x] Markdown-Bereinigung fuer TTS
|
||||||
|
- [x] Auto-Update mit FileProvider + Update-Check Button
|
||||||
|
- [x] Inverted FlatList (zuverlaessiges Scroll-to-Bottom)
|
||||||
|
- [x] Speech Gate (VAD verwirft Aufnahme ohne erkannte Sprache)
|
||||||
|
- [x] Session-Persistenz ueber Container-Restarts (sessionFromFile + atomic write)
|
||||||
|
- [x] Session-Export als Markdown-Datei (Download-Button pro Session)
|
||||||
|
- [x] "ARIA denkt..."-Indicator + Abbrechen-Button in App (via Bridge → RVS)
|
||||||
|
- [x] Whisper-Modell waehlbar in Diagnostic (tiny…large-v3, Hot-Reload)
|
||||||
|
- [x] App-Aufnahme explizit 16kHz mono (optimal fuer Whisper, kein Resample)
|
||||||
|
- [x] Streaming TTS Pre-Roll-Buffer + Wartezeit auf playbackHeadPosition (kein Cutoff mid-Satz mehr)
|
||||||
|
- [x] Pre-Roll-Buffer einstellbar in App-Settings
|
||||||
|
- [x] Decimal-zu-Worte fuer TTS + generisches Acronym-Buchstabieren
|
||||||
|
- [x] voice_preload/voice_ready: visueller Status-Indikator beim Stimmen-Wechsel
|
||||||
|
- [x] Whisper STT auf die Gamebox ausgelagert (CUDA float16, fast Echtzeit)
|
||||||
|
- [x] **F5-TTS ersetzt XTTS** — bessere Voice-Cloning-Qualitaet, Whisper-auto-transkribierter Referenz-Text
|
||||||
|
- [x] Audio-Pause statt Ducking (TRANSIENT statt MAY_DUCK) + release-Timing fix
|
||||||
|
- [x] VAD-Stille-Toleranz einstellbar (1-8s) + adaptive Mikro-Baseline + Max-Aufnahme einstellbar (1-30 min)
|
||||||
|
- [x] Barge-In: User kann ARIA waehrend Antwort unterbrechen, aria-core bekommt Kontext-Hint
|
||||||
|
- [x] Anruf-Pause + Auto-Resume: TTS verstummt bei Anruf, faehrt nach Auflegen ab der gemerkten Position fort (Date.now()-Tracking + WAV-Cache der Antwort)
|
||||||
|
- [x] PcmPlaybackFinished-Event: AudioFocus wird erst released wenn AudioTrack wirklich durch ist — kein Spotify-mid-TTS mehr
|
||||||
|
- [x] Edge-Case: neue Frage waehrend Telefonat verwirft pending Auto-Resume, neueste Antwort gewinnt
|
||||||
|
- [x] Settings-Sub-Screens: 8 Kategorien statt langer Liste
|
||||||
|
- [x] APK ABI-Split arm64-v8a: 35 MB statt 136 MB
|
||||||
|
- [x] Sprachnachrichten-Bubble: audioRequestId statt Substring-Match — keine vertauschten Bubbles mehr bei parallelen Aufnahmen
|
||||||
|
- [x] Bereit-Sound (Airplane Ding-Dong) wenn Mikro nach Wake-Word offen ist — akustische Bestaetigung, in Settings abschaltbar
|
||||||
|
- [x] Wake-Word parallel zu TTS mit AcousticEchoCanceler — "Computer" sagen waehrend ARIA spricht stoppt sie und oeffnet Mikro
|
||||||
|
- [x] GPS-Position mit Nachrichten mitsenden (Toggle in Settings) — ARIA nutzt sie nur bei standortbezogenen Fragen, im Chat sichtbar nur in ihrer Antwort
|
||||||
|
- [x] Sprachnachrichten ohne STT-Result werden nach Timeout automatisch entfernt (skaliert mit Aufnahmedauer)
|
||||||
|
- [x] Background Audio Service: TTS, Wake-Word-Lauschen + Aufnahme laufen auch bei minimierter App weiter (Foreground-Service mit mediaPlayback|microphone, dynamische Notification)
|
||||||
|
- [x] Disk-Voll Banner in Diagnostic mit copy-baren Cleanup-Befehlen
|
||||||
|
- [x] Wake-Word on-device via openWakeWord (ONNX Runtime, kein API-Key) + State-Icon
|
||||||
|
|
||||||
|
### Phase A — Refactor: OpenClaw raus, eigenes Brain rein
|
||||||
|
|
||||||
|
- [x] aria-brain Container-Skeleton (FastAPI, Qdrant, sentence-transformers)
|
||||||
|
- [x] aria-core (OpenClaw) komplett abgerissen — Tag `v0.1.2.0` als Archiv
|
||||||
|
- [x] Diagnostic: Gehirn-Tab (Memory Search/Filter, Add/Edit/Delete)
|
||||||
|
- [x] Diagnostic: Gehirn-Export/Import als tar.gz
|
||||||
|
- [x] Diagnostic: Datei-Manager (Liste, Suche, Download, Delete, Multi-Select + ZIP + Bulk-Delete)
|
||||||
|
- [x] Diagnostic: Komplett-Reset (Wipe All)
|
||||||
|
- [x] Diagnostic: Info-Buttons mit Modal-Erklaerungen (Status, Konversation, Memories, Bootstrap)
|
||||||
|
- [x] App: Datei-Manager als Modal in den Einstellungen (mit Multi-Select + ZIP-Download)
|
||||||
|
- [x] Voice Export/Import (einzelne Stimmen + F5/Whisper-Settings als Bundle)
|
||||||
|
|
||||||
|
### Phase B — Brain mit Memory + Loop + Skills
|
||||||
|
|
||||||
|
- [x] **Phase B Punkt 2:** Migration aus `aria-data/brain-import/` → atomare Memory-Punkte (Identity / Rule / Preference / Tool / Skill, idempotent ueber migration_key) + Bootstrap-Snapshot Export/Import (nur pinned)
|
||||||
|
- [x] **Phase B Punkt 3:** Brain Conversation-Loop (Single-Chat UI, Rolling Window 50 Turns, Schwelle 60 → automatisches Destillat, manueller Trigger)
|
||||||
|
- [x] **Phase B Punkt 4:** Skills-System (Python-only via local-venv, skill_create als Tool, dynamische run_<skill> Tools, Diagnostic Skills-Tab mit Logs/Toggle/Export/Import, skill_created Live-Notification in App+Diagnostic, harte Schwelle "pip → Skill")
|
||||||
|
- [x] Sprachmodell-Setting wieder funktional (brainModel in runtime.json statt aria-core)
|
||||||
|
- [x] App-Chat-Sync: kompletter Server-Sync bei Reconnect (Server = Source of Truth) + chat_cleared Live-Update. Lokal-only Bubbles (Skill-Notifications, laufende Voice ohne STT) bleiben erhalten.
|
||||||
|
- [x] App: Chat-Suche mit Next/Prev Navigation statt Filter
|
||||||
|
- [x] Token/Call-Metrics + Subscription-Quota-Tracking (Pro / Max 5x / Max 20x / Custom)
|
||||||
|
- [x] Datei-Manager Multi-Select: Bulk-Download als ZIP + Bulk-Delete (Diagnostic + App)
|
||||||
|
|
||||||
### Phase 2 — ARIA wird produktiv
|
### Phase 2 — ARIA wird produktiv
|
||||||
|
|
||||||
- [ ] Skills bauen (Bildgenerierung, etc.)
|
- [ ] Erste Skills bauen lassen (yt-dlp, pdf-extract, etc. — durch normale Anfragen)
|
||||||
- [ ] Gitea-Integration
|
- [ ] Gitea-Integration
|
||||||
- [ ] VM einrichten (Desktop, Browser, Tools)
|
- [ ] VM einrichten (Desktop, Browser, Tools)
|
||||||
- [ ] Heartbeat (periodische Selbst-Checks)
|
- [ ] Heartbeat (periodische Selbst-Checks)
|
||||||
- [ ] Lokales LLM als Wächter (Triage vor Claude-Call)
|
- [ ] Lokales LLM als Waechter (Triage vor Claude-Call)
|
||||||
|
|
||||||
### Phase 3 — Erweiterungen
|
### Phase 3 — Erweiterungen
|
||||||
|
|
||||||
- [ ] STARFACE Telefonie-Skill
|
- [ ] STARFACE Telefonie-Skill
|
||||||
- [ ] Desktop Client (Tauri)
|
- [ ] Desktop Client (Tauri)
|
||||||
- [ ] bKVM Remote IT-Support
|
- [ ] bKVM Remote IT-Support
|
||||||
- [ ] Porcupine Wake Word (on-device "ARIA" in der App)
|
- [ ] Custom-`.onnx`-Upload fuer Wake-Word ueber Diagnostic (ohne App-Rebuild)
|
||||||
|
- [ ] Claude Vision direkt (Bildanalyse ohne Dateipfad-Umweg)
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ import { createBottomTabNavigator } from '@react-navigation/bottom-tabs';
|
|||||||
import ChatScreen from './src/screens/ChatScreen';
|
import ChatScreen from './src/screens/ChatScreen';
|
||||||
import SettingsScreen from './src/screens/SettingsScreen';
|
import SettingsScreen from './src/screens/SettingsScreen';
|
||||||
import rvs from './src/services/rvs';
|
import rvs from './src/services/rvs';
|
||||||
|
import { initLogger } from './src/services/logger';
|
||||||
|
|
||||||
// --- Navigation ---
|
// --- Navigation ---
|
||||||
|
|
||||||
@@ -44,6 +45,10 @@ const TAB_ICONS: Record<string, { active: string; inactive: string }> = {
|
|||||||
const App: React.FC = () => {
|
const App: React.FC = () => {
|
||||||
// Beim Start: gespeicherte RVS-Konfiguration laden und verbinden
|
// Beim Start: gespeicherte RVS-Konfiguration laden und verbinden
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
|
// Verbose-Logging-Setting laden BEVOR andere Module loslegen.
|
||||||
|
// initLogger ist async aber blockt nichts — solange er noch laueft,
|
||||||
|
// loggen wir normal (Default an), danach respektiert console.log das Setting.
|
||||||
|
initLogger().catch(() => {});
|
||||||
const initConnection = async () => {
|
const initConnection = async () => {
|
||||||
const config = await rvs.loadConfig();
|
const config = await rvs.loadConfig();
|
||||||
if (config) {
|
if (config) {
|
||||||
|
|||||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
BIN
Binary file not shown.
-245
@@ -1,245 +0,0 @@
|
|||||||
package org.gradle.accessors.dm;
|
|
||||||
|
|
||||||
import org.gradle.api.NonNullApi;
|
|
||||||
import org.gradle.api.artifacts.MinimalExternalModuleDependency;
|
|
||||||
import org.gradle.plugin.use.PluginDependency;
|
|
||||||
import org.gradle.api.artifacts.ExternalModuleDependencyBundle;
|
|
||||||
import org.gradle.api.artifacts.MutableVersionConstraint;
|
|
||||||
import org.gradle.api.provider.Provider;
|
|
||||||
import org.gradle.api.model.ObjectFactory;
|
|
||||||
import org.gradle.api.provider.ProviderFactory;
|
|
||||||
import org.gradle.api.internal.catalog.AbstractExternalDependencyFactory;
|
|
||||||
import org.gradle.api.internal.catalog.DefaultVersionCatalog;
|
|
||||||
import java.util.Map;
|
|
||||||
import org.gradle.api.internal.attributes.ImmutableAttributesFactory;
|
|
||||||
import org.gradle.api.internal.artifacts.dsl.CapabilityNotationParser;
|
|
||||||
import javax.inject.Inject;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A catalog of dependencies accessible via the `libs` extension.
|
|
||||||
*/
|
|
||||||
@NonNullApi
|
|
||||||
public class LibrariesForLibs extends AbstractExternalDependencyFactory {
|
|
||||||
|
|
||||||
private final AbstractExternalDependencyFactory owner = this;
|
|
||||||
private final AndroidLibraryAccessors laccForAndroidLibraryAccessors = new AndroidLibraryAccessors(owner);
|
|
||||||
private final KotlinLibraryAccessors laccForKotlinLibraryAccessors = new KotlinLibraryAccessors(owner);
|
|
||||||
private final VersionAccessors vaccForVersionAccessors = new VersionAccessors(providers, config);
|
|
||||||
private final BundleAccessors baccForBundleAccessors = new BundleAccessors(objects, providers, config, attributesFactory, capabilityNotationParser);
|
|
||||||
private final PluginAccessors paccForPluginAccessors = new PluginAccessors(providers, config);
|
|
||||||
|
|
||||||
@Inject
|
|
||||||
public LibrariesForLibs(DefaultVersionCatalog config, ProviderFactory providers, ObjectFactory objects, ImmutableAttributesFactory attributesFactory, CapabilityNotationParser capabilityNotationParser) {
|
|
||||||
super(config, providers, objects, attributesFactory, capabilityNotationParser);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a dependency provider for gson (com.google.code.gson:gson)
|
|
||||||
* This dependency was declared in catalog libs.versions.toml
|
|
||||||
*/
|
|
||||||
public Provider<MinimalExternalModuleDependency> getGson() {
|
|
||||||
return create("gson");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a dependency provider for guava (com.google.guava:guava)
|
|
||||||
* This dependency was declared in catalog libs.versions.toml
|
|
||||||
*/
|
|
||||||
public Provider<MinimalExternalModuleDependency> getGuava() {
|
|
||||||
return create("guava");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a dependency provider for javapoet (com.squareup:javapoet)
|
|
||||||
* This dependency was declared in catalog libs.versions.toml
|
|
||||||
*/
|
|
||||||
public Provider<MinimalExternalModuleDependency> getJavapoet() {
|
|
||||||
return create("javapoet");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a dependency provider for junit (junit:junit)
|
|
||||||
* This dependency was declared in catalog libs.versions.toml
|
|
||||||
*/
|
|
||||||
public Provider<MinimalExternalModuleDependency> getJunit() {
|
|
||||||
return create("junit");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the group of libraries at android
|
|
||||||
*/
|
|
||||||
public AndroidLibraryAccessors getAndroid() {
|
|
||||||
return laccForAndroidLibraryAccessors;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the group of libraries at kotlin
|
|
||||||
*/
|
|
||||||
public KotlinLibraryAccessors getKotlin() {
|
|
||||||
return laccForKotlinLibraryAccessors;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the group of versions at versions
|
|
||||||
*/
|
|
||||||
public VersionAccessors getVersions() {
|
|
||||||
return vaccForVersionAccessors;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the group of bundles at bundles
|
|
||||||
*/
|
|
||||||
public BundleAccessors getBundles() {
|
|
||||||
return baccForBundleAccessors;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the group of plugins at plugins
|
|
||||||
*/
|
|
||||||
public PluginAccessors getPlugins() {
|
|
||||||
return paccForPluginAccessors;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static class AndroidLibraryAccessors extends SubDependencyFactory {
|
|
||||||
private final AndroidGradleLibraryAccessors laccForAndroidGradleLibraryAccessors = new AndroidGradleLibraryAccessors(owner);
|
|
||||||
|
|
||||||
public AndroidLibraryAccessors(AbstractExternalDependencyFactory owner) { super(owner); }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the group of libraries at android.gradle
|
|
||||||
*/
|
|
||||||
public AndroidGradleLibraryAccessors getGradle() {
|
|
||||||
return laccForAndroidGradleLibraryAccessors;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
public static class AndroidGradleLibraryAccessors extends SubDependencyFactory {
|
|
||||||
|
|
||||||
public AndroidGradleLibraryAccessors(AbstractExternalDependencyFactory owner) { super(owner); }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a dependency provider for plugin (com.android.tools.build:gradle)
|
|
||||||
* This dependency was declared in catalog libs.versions.toml
|
|
||||||
*/
|
|
||||||
public Provider<MinimalExternalModuleDependency> getPlugin() {
|
|
||||||
return create("android.gradle.plugin");
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
public static class KotlinLibraryAccessors extends SubDependencyFactory {
|
|
||||||
private final KotlinGradleLibraryAccessors laccForKotlinGradleLibraryAccessors = new KotlinGradleLibraryAccessors(owner);
|
|
||||||
|
|
||||||
public KotlinLibraryAccessors(AbstractExternalDependencyFactory owner) { super(owner); }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the group of libraries at kotlin.gradle
|
|
||||||
*/
|
|
||||||
public KotlinGradleLibraryAccessors getGradle() {
|
|
||||||
return laccForKotlinGradleLibraryAccessors;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
public static class KotlinGradleLibraryAccessors extends SubDependencyFactory {
|
|
||||||
|
|
||||||
public KotlinGradleLibraryAccessors(AbstractExternalDependencyFactory owner) { super(owner); }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a dependency provider for plugin (org.jetbrains.kotlin:kotlin-gradle-plugin)
|
|
||||||
* This dependency was declared in catalog libs.versions.toml
|
|
||||||
*/
|
|
||||||
public Provider<MinimalExternalModuleDependency> getPlugin() {
|
|
||||||
return create("kotlin.gradle.plugin");
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
public static class VersionAccessors extends VersionFactory {
|
|
||||||
|
|
||||||
public VersionAccessors(ProviderFactory providers, DefaultVersionCatalog config) { super(providers, config); }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the version associated to this alias: agp (8.1.1)
|
|
||||||
* If the version is a rich version and that its not expressible as a
|
|
||||||
* single version string, then an empty string is returned.
|
|
||||||
* This version was declared in catalog libs.versions.toml
|
|
||||||
*/
|
|
||||||
public Provider<String> getAgp() { return getVersion("agp"); }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the version associated to this alias: gson (2.8.9)
|
|
||||||
* If the version is a rich version and that its not expressible as a
|
|
||||||
* single version string, then an empty string is returned.
|
|
||||||
* This version was declared in catalog libs.versions.toml
|
|
||||||
*/
|
|
||||||
public Provider<String> getGson() { return getVersion("gson"); }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the version associated to this alias: guava (31.0.1-jre)
|
|
||||||
* If the version is a rich version and that its not expressible as a
|
|
||||||
* single version string, then an empty string is returned.
|
|
||||||
* This version was declared in catalog libs.versions.toml
|
|
||||||
*/
|
|
||||||
public Provider<String> getGuava() { return getVersion("guava"); }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the version associated to this alias: javapoet (1.13.0)
|
|
||||||
* If the version is a rich version and that its not expressible as a
|
|
||||||
* single version string, then an empty string is returned.
|
|
||||||
* This version was declared in catalog libs.versions.toml
|
|
||||||
*/
|
|
||||||
public Provider<String> getJavapoet() { return getVersion("javapoet"); }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the version associated to this alias: junit (4.13.2)
|
|
||||||
* If the version is a rich version and that its not expressible as a
|
|
||||||
* single version string, then an empty string is returned.
|
|
||||||
* This version was declared in catalog libs.versions.toml
|
|
||||||
*/
|
|
||||||
public Provider<String> getJunit() { return getVersion("junit"); }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the version associated to this alias: kotlin (1.8.0)
|
|
||||||
* If the version is a rich version and that its not expressible as a
|
|
||||||
* single version string, then an empty string is returned.
|
|
||||||
* This version was declared in catalog libs.versions.toml
|
|
||||||
*/
|
|
||||||
public Provider<String> getKotlin() { return getVersion("kotlin"); }
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
public static class BundleAccessors extends BundleFactory {
|
|
||||||
|
|
||||||
public BundleAccessors(ObjectFactory objects, ProviderFactory providers, DefaultVersionCatalog config, ImmutableAttributesFactory attributesFactory, CapabilityNotationParser capabilityNotationParser) { super(objects, providers, config, attributesFactory, capabilityNotationParser); }
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
public static class PluginAccessors extends PluginFactory {
|
|
||||||
private final KotlinPluginAccessors paccForKotlinPluginAccessors = new KotlinPluginAccessors(providers, config);
|
|
||||||
|
|
||||||
public PluginAccessors(ProviderFactory providers, DefaultVersionCatalog config) { super(providers, config); }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the group of plugins at plugins.kotlin
|
|
||||||
*/
|
|
||||||
public KotlinPluginAccessors getKotlin() {
|
|
||||||
return paccForKotlinPluginAccessors;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
public static class KotlinPluginAccessors extends PluginFactory {
|
|
||||||
|
|
||||||
public KotlinPluginAccessors(ProviderFactory providers, DefaultVersionCatalog config) { super(providers, config); }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a plugin provider for kotlin.jvm to the plugin id 'org.jetbrains.kotlin.jvm'
|
|
||||||
* This plugin was declared in catalog libs.versions.toml
|
|
||||||
*/
|
|
||||||
public Provider<PluginDependency> getJvm() { return createPlugin("kotlin.jvm"); }
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
-298
@@ -1,298 +0,0 @@
|
|||||||
package org.gradle.accessors.dm;
|
|
||||||
|
|
||||||
import org.gradle.api.NonNullApi;
|
|
||||||
import org.gradle.api.artifacts.MinimalExternalModuleDependency;
|
|
||||||
import org.gradle.plugin.use.PluginDependency;
|
|
||||||
import org.gradle.api.artifacts.ExternalModuleDependencyBundle;
|
|
||||||
import org.gradle.api.artifacts.MutableVersionConstraint;
|
|
||||||
import org.gradle.api.provider.Provider;
|
|
||||||
import org.gradle.api.model.ObjectFactory;
|
|
||||||
import org.gradle.api.provider.ProviderFactory;
|
|
||||||
import org.gradle.api.internal.catalog.AbstractExternalDependencyFactory;
|
|
||||||
import org.gradle.api.internal.catalog.DefaultVersionCatalog;
|
|
||||||
import java.util.Map;
|
|
||||||
import org.gradle.api.internal.attributes.ImmutableAttributesFactory;
|
|
||||||
import org.gradle.api.internal.artifacts.dsl.CapabilityNotationParser;
|
|
||||||
import javax.inject.Inject;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A catalog of dependencies accessible via the `libs` extension.
|
|
||||||
*/
|
|
||||||
@NonNullApi
|
|
||||||
public class LibrariesForLibsInPluginsBlock extends AbstractExternalDependencyFactory {
|
|
||||||
|
|
||||||
private final AbstractExternalDependencyFactory owner = this;
|
|
||||||
private final AndroidLibraryAccessors laccForAndroidLibraryAccessors = new AndroidLibraryAccessors(owner);
|
|
||||||
private final KotlinLibraryAccessors laccForKotlinLibraryAccessors = new KotlinLibraryAccessors(owner);
|
|
||||||
private final VersionAccessors vaccForVersionAccessors = new VersionAccessors(providers, config);
|
|
||||||
private final BundleAccessors baccForBundleAccessors = new BundleAccessors(objects, providers, config, attributesFactory, capabilityNotationParser);
|
|
||||||
private final PluginAccessors paccForPluginAccessors = new PluginAccessors(providers, config);
|
|
||||||
|
|
||||||
@Inject
|
|
||||||
public LibrariesForLibsInPluginsBlock(DefaultVersionCatalog config, ProviderFactory providers, ObjectFactory objects, ImmutableAttributesFactory attributesFactory, CapabilityNotationParser capabilityNotationParser) {
|
|
||||||
super(config, providers, objects, attributesFactory, capabilityNotationParser);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a dependency provider for gson (com.google.code.gson:gson)
|
|
||||||
* This dependency was declared in catalog libs.versions.toml
|
|
||||||
* @deprecated Will be removed in Gradle 9.0.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public Provider<MinimalExternalModuleDependency> getGson() {
|
|
||||||
org.gradle.internal.deprecation.DeprecationLogger.deprecateBehaviour("Accessing libraries or bundles from version catalogs in the plugins block.").withAdvice("Only use versions or plugins from catalogs in the plugins block.").willBeRemovedInGradle9().withUpgradeGuideSection(8, "kotlin_dsl_deprecated_catalogs_plugins_block").nagUser();
|
|
||||||
return create("gson");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a dependency provider for guava (com.google.guava:guava)
|
|
||||||
* This dependency was declared in catalog libs.versions.toml
|
|
||||||
* @deprecated Will be removed in Gradle 9.0.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public Provider<MinimalExternalModuleDependency> getGuava() {
|
|
||||||
org.gradle.internal.deprecation.DeprecationLogger.deprecateBehaviour("Accessing libraries or bundles from version catalogs in the plugins block.").withAdvice("Only use versions or plugins from catalogs in the plugins block.").willBeRemovedInGradle9().withUpgradeGuideSection(8, "kotlin_dsl_deprecated_catalogs_plugins_block").nagUser();
|
|
||||||
return create("guava");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a dependency provider for javapoet (com.squareup:javapoet)
|
|
||||||
* This dependency was declared in catalog libs.versions.toml
|
|
||||||
* @deprecated Will be removed in Gradle 9.0.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public Provider<MinimalExternalModuleDependency> getJavapoet() {
|
|
||||||
org.gradle.internal.deprecation.DeprecationLogger.deprecateBehaviour("Accessing libraries or bundles from version catalogs in the plugins block.").withAdvice("Only use versions or plugins from catalogs in the plugins block.").willBeRemovedInGradle9().withUpgradeGuideSection(8, "kotlin_dsl_deprecated_catalogs_plugins_block").nagUser();
|
|
||||||
return create("javapoet");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a dependency provider for junit (junit:junit)
|
|
||||||
* This dependency was declared in catalog libs.versions.toml
|
|
||||||
* @deprecated Will be removed in Gradle 9.0.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public Provider<MinimalExternalModuleDependency> getJunit() {
|
|
||||||
org.gradle.internal.deprecation.DeprecationLogger.deprecateBehaviour("Accessing libraries or bundles from version catalogs in the plugins block.").withAdvice("Only use versions or plugins from catalogs in the plugins block.").willBeRemovedInGradle9().withUpgradeGuideSection(8, "kotlin_dsl_deprecated_catalogs_plugins_block").nagUser();
|
|
||||||
return create("junit");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the group of libraries at android
|
|
||||||
* @deprecated Will be removed in Gradle 9.0.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public AndroidLibraryAccessors getAndroid() {
|
|
||||||
org.gradle.internal.deprecation.DeprecationLogger.deprecateBehaviour("Accessing libraries or bundles from version catalogs in the plugins block.").withAdvice("Only use versions or plugins from catalogs in the plugins block.").willBeRemovedInGradle9().withUpgradeGuideSection(8, "kotlin_dsl_deprecated_catalogs_plugins_block").nagUser();
|
|
||||||
return laccForAndroidLibraryAccessors;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the group of libraries at kotlin
|
|
||||||
* @deprecated Will be removed in Gradle 9.0.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public KotlinLibraryAccessors getKotlin() {
|
|
||||||
org.gradle.internal.deprecation.DeprecationLogger.deprecateBehaviour("Accessing libraries or bundles from version catalogs in the plugins block.").withAdvice("Only use versions or plugins from catalogs in the plugins block.").willBeRemovedInGradle9().withUpgradeGuideSection(8, "kotlin_dsl_deprecated_catalogs_plugins_block").nagUser();
|
|
||||||
return laccForKotlinLibraryAccessors;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the group of versions at versions
|
|
||||||
*/
|
|
||||||
public VersionAccessors getVersions() {
|
|
||||||
return vaccForVersionAccessors;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the group of bundles at bundles
|
|
||||||
* @deprecated Will be removed in Gradle 9.0.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public BundleAccessors getBundles() {
|
|
||||||
org.gradle.internal.deprecation.DeprecationLogger.deprecateBehaviour("Accessing libraries or bundles from version catalogs in the plugins block.").withAdvice("Only use versions or plugins from catalogs in the plugins block.").willBeRemovedInGradle9().withUpgradeGuideSection(8, "kotlin_dsl_deprecated_catalogs_plugins_block").nagUser();
|
|
||||||
return baccForBundleAccessors;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the group of plugins at plugins
|
|
||||||
*/
|
|
||||||
public PluginAccessors getPlugins() {
|
|
||||||
return paccForPluginAccessors;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @deprecated Will be removed in Gradle 9.0.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public static class AndroidLibraryAccessors extends SubDependencyFactory {
|
|
||||||
private final AndroidGradleLibraryAccessors laccForAndroidGradleLibraryAccessors = new AndroidGradleLibraryAccessors(owner);
|
|
||||||
|
|
||||||
public AndroidLibraryAccessors(AbstractExternalDependencyFactory owner) { super(owner); }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the group of libraries at android.gradle
|
|
||||||
* @deprecated Will be removed in Gradle 9.0.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public AndroidGradleLibraryAccessors getGradle() {
|
|
||||||
org.gradle.internal.deprecation.DeprecationLogger.deprecateBehaviour("Accessing libraries or bundles from version catalogs in the plugins block.").withAdvice("Only use versions or plugins from catalogs in the plugins block.").willBeRemovedInGradle9().withUpgradeGuideSection(8, "kotlin_dsl_deprecated_catalogs_plugins_block").nagUser();
|
|
||||||
return laccForAndroidGradleLibraryAccessors;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @deprecated Will be removed in Gradle 9.0.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public static class AndroidGradleLibraryAccessors extends SubDependencyFactory {
|
|
||||||
|
|
||||||
public AndroidGradleLibraryAccessors(AbstractExternalDependencyFactory owner) { super(owner); }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a dependency provider for plugin (com.android.tools.build:gradle)
|
|
||||||
* This dependency was declared in catalog libs.versions.toml
|
|
||||||
* @deprecated Will be removed in Gradle 9.0.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public Provider<MinimalExternalModuleDependency> getPlugin() {
|
|
||||||
org.gradle.internal.deprecation.DeprecationLogger.deprecateBehaviour("Accessing libraries or bundles from version catalogs in the plugins block.").withAdvice("Only use versions or plugins from catalogs in the plugins block.").willBeRemovedInGradle9().withUpgradeGuideSection(8, "kotlin_dsl_deprecated_catalogs_plugins_block").nagUser();
|
|
||||||
return create("android.gradle.plugin");
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @deprecated Will be removed in Gradle 9.0.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public static class KotlinLibraryAccessors extends SubDependencyFactory {
|
|
||||||
private final KotlinGradleLibraryAccessors laccForKotlinGradleLibraryAccessors = new KotlinGradleLibraryAccessors(owner);
|
|
||||||
|
|
||||||
public KotlinLibraryAccessors(AbstractExternalDependencyFactory owner) { super(owner); }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the group of libraries at kotlin.gradle
|
|
||||||
* @deprecated Will be removed in Gradle 9.0.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public KotlinGradleLibraryAccessors getGradle() {
|
|
||||||
org.gradle.internal.deprecation.DeprecationLogger.deprecateBehaviour("Accessing libraries or bundles from version catalogs in the plugins block.").withAdvice("Only use versions or plugins from catalogs in the plugins block.").willBeRemovedInGradle9().withUpgradeGuideSection(8, "kotlin_dsl_deprecated_catalogs_plugins_block").nagUser();
|
|
||||||
return laccForKotlinGradleLibraryAccessors;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @deprecated Will be removed in Gradle 9.0.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public static class KotlinGradleLibraryAccessors extends SubDependencyFactory {
|
|
||||||
|
|
||||||
public KotlinGradleLibraryAccessors(AbstractExternalDependencyFactory owner) { super(owner); }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a dependency provider for plugin (org.jetbrains.kotlin:kotlin-gradle-plugin)
|
|
||||||
* This dependency was declared in catalog libs.versions.toml
|
|
||||||
* @deprecated Will be removed in Gradle 9.0.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public Provider<MinimalExternalModuleDependency> getPlugin() {
|
|
||||||
org.gradle.internal.deprecation.DeprecationLogger.deprecateBehaviour("Accessing libraries or bundles from version catalogs in the plugins block.").withAdvice("Only use versions or plugins from catalogs in the plugins block.").willBeRemovedInGradle9().withUpgradeGuideSection(8, "kotlin_dsl_deprecated_catalogs_plugins_block").nagUser();
|
|
||||||
return create("kotlin.gradle.plugin");
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
public static class VersionAccessors extends VersionFactory {
|
|
||||||
|
|
||||||
public VersionAccessors(ProviderFactory providers, DefaultVersionCatalog config) { super(providers, config); }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the version associated to this alias: agp (8.1.1)
|
|
||||||
* If the version is a rich version and that its not expressible as a
|
|
||||||
* single version string, then an empty string is returned.
|
|
||||||
* This version was declared in catalog libs.versions.toml
|
|
||||||
*/
|
|
||||||
public Provider<String> getAgp() { return getVersion("agp"); }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the version associated to this alias: gson (2.8.9)
|
|
||||||
* If the version is a rich version and that its not expressible as a
|
|
||||||
* single version string, then an empty string is returned.
|
|
||||||
* This version was declared in catalog libs.versions.toml
|
|
||||||
*/
|
|
||||||
public Provider<String> getGson() { return getVersion("gson"); }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the version associated to this alias: guava (31.0.1-jre)
|
|
||||||
* If the version is a rich version and that its not expressible as a
|
|
||||||
* single version string, then an empty string is returned.
|
|
||||||
* This version was declared in catalog libs.versions.toml
|
|
||||||
*/
|
|
||||||
public Provider<String> getGuava() { return getVersion("guava"); }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the version associated to this alias: javapoet (1.13.0)
|
|
||||||
* If the version is a rich version and that its not expressible as a
|
|
||||||
* single version string, then an empty string is returned.
|
|
||||||
* This version was declared in catalog libs.versions.toml
|
|
||||||
*/
|
|
||||||
public Provider<String> getJavapoet() { return getVersion("javapoet"); }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the version associated to this alias: junit (4.13.2)
|
|
||||||
* If the version is a rich version and that its not expressible as a
|
|
||||||
* single version string, then an empty string is returned.
|
|
||||||
* This version was declared in catalog libs.versions.toml
|
|
||||||
*/
|
|
||||||
public Provider<String> getJunit() { return getVersion("junit"); }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the version associated to this alias: kotlin (1.8.0)
|
|
||||||
* If the version is a rich version and that its not expressible as a
|
|
||||||
* single version string, then an empty string is returned.
|
|
||||||
* This version was declared in catalog libs.versions.toml
|
|
||||||
*/
|
|
||||||
public Provider<String> getKotlin() { return getVersion("kotlin"); }
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @deprecated Will be removed in Gradle 9.0.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public static class BundleAccessors extends BundleFactory {
|
|
||||||
|
|
||||||
public BundleAccessors(ObjectFactory objects, ProviderFactory providers, DefaultVersionCatalog config, ImmutableAttributesFactory attributesFactory, CapabilityNotationParser capabilityNotationParser) { super(objects, providers, config, attributesFactory, capabilityNotationParser); }
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
public static class PluginAccessors extends PluginFactory {
|
|
||||||
private final KotlinPluginAccessors paccForKotlinPluginAccessors = new KotlinPluginAccessors(providers, config);
|
|
||||||
|
|
||||||
public PluginAccessors(ProviderFactory providers, DefaultVersionCatalog config) { super(providers, config); }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the group of plugins at plugins.kotlin
|
|
||||||
*/
|
|
||||||
public KotlinPluginAccessors getKotlin() {
|
|
||||||
return paccForKotlinPluginAccessors;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
public static class KotlinPluginAccessors extends PluginFactory {
|
|
||||||
|
|
||||||
public KotlinPluginAccessors(ProviderFactory providers, DefaultVersionCatalog config) { super(providers, config); }
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a plugin provider for kotlin.jvm to the plugin id 'org.jetbrains.kotlin.jvm'
|
|
||||||
* This plugin was declared in catalog libs.versions.toml
|
|
||||||
*/
|
|
||||||
public Provider<PluginDependency> getJvm() { return createPlugin("kotlin.jvm"); }
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -1,2 +0,0 @@
|
|||||||
#Sun Mar 29 11:32:18 CEST 2026
|
|
||||||
gradle.version=8.3
|
|
||||||
Binary file not shown.
Binary file not shown.
@@ -79,8 +79,8 @@ android {
|
|||||||
applicationId "com.ariacockpit"
|
applicationId "com.ariacockpit"
|
||||||
minSdkVersion rootProject.ext.minSdkVersion
|
minSdkVersion rootProject.ext.minSdkVersion
|
||||||
targetSdkVersion rootProject.ext.targetSdkVersion
|
targetSdkVersion rootProject.ext.targetSdkVersion
|
||||||
versionCode 1
|
versionCode 10204
|
||||||
versionName "1.0"
|
versionName "0.1.2.4"
|
||||||
// Fallback fuer Libraries mit Product Flavors
|
// Fallback fuer Libraries mit Product Flavors
|
||||||
missingDimensionStrategy 'react-native-camera', 'general'
|
missingDimensionStrategy 'react-native-camera', 'general'
|
||||||
}
|
}
|
||||||
@@ -104,6 +104,19 @@ android {
|
|||||||
proguardFiles getDefaultProguardFile("proguard-android.txt"), "proguard-rules.pro"
|
proguardFiles getDefaultProguardFile("proguard-android.txt"), "proguard-rules.pro"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ABI-Split: nur arm64-v8a (jedes Android-Phone seit ~2017). Bringt die
|
||||||
|
// APK von ~136 MB auf ~35 MB — relevant weil ONNX Runtime + die anderen
|
||||||
|
// Native-Libs sonst pro Architektur dazukommen. Wer 32-bit oder Emulator
|
||||||
|
// braucht, kann hier "armeabi-v7a", "x86_64" etc. ergaenzen.
|
||||||
|
splits {
|
||||||
|
abi {
|
||||||
|
enable true
|
||||||
|
reset()
|
||||||
|
include "arm64-v8a"
|
||||||
|
universalApk false
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
dependencies {
|
dependencies {
|
||||||
@@ -111,6 +124,9 @@ dependencies {
|
|||||||
implementation("com.facebook.react:react-android")
|
implementation("com.facebook.react:react-android")
|
||||||
implementation("com.facebook.react:flipper-integration")
|
implementation("com.facebook.react:flipper-integration")
|
||||||
|
|
||||||
|
// ONNX Runtime fuer on-device Wake-Word (openWakeWord ONNX-Modelle in assets/openwakeword/)
|
||||||
|
implementation("com.microsoft.onnxruntime:onnxruntime-android:1.17.1")
|
||||||
|
|
||||||
if (hermesEnabled.toBoolean()) {
|
if (hermesEnabled.toBoolean()) {
|
||||||
implementation("com.facebook.react:hermes-android")
|
implementation("com.facebook.react:hermes-android")
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
-97
@@ -1,97 +0,0 @@
|
|||||||
|
|
||||||
package com.facebook.react;
|
|
||||||
|
|
||||||
import android.app.Application;
|
|
||||||
import android.content.Context;
|
|
||||||
import android.content.res.Resources;
|
|
||||||
|
|
||||||
import com.facebook.react.ReactPackage;
|
|
||||||
import com.facebook.react.shell.MainPackageConfig;
|
|
||||||
import com.facebook.react.shell.MainReactPackage;
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
|
|
||||||
// react-native-screens
|
|
||||||
import com.swmansion.rnscreens.RNScreensPackage;
|
|
||||||
// react-native-safe-area-context
|
|
||||||
import com.th3rdwave.safeareacontext.SafeAreaContextPackage;
|
|
||||||
// react-native-document-picker
|
|
||||||
import com.reactnativedocumentpicker.RNDocumentPickerPackage;
|
|
||||||
// react-native-sound
|
|
||||||
import com.zmxv.RNSound.RNSoundPackage;
|
|
||||||
// @react-native-community/geolocation
|
|
||||||
import com.reactnativecommunity.geolocation.GeolocationPackage;
|
|
||||||
// react-native-image-picker
|
|
||||||
import com.imagepicker.ImagePickerPackage;
|
|
||||||
// react-native-permissions
|
|
||||||
import com.zoontek.rnpermissions.RNPermissionsPackage;
|
|
||||||
// react-native-camera-kit
|
|
||||||
import com.rncamerakit.RNCameraKitPackage;
|
|
||||||
// @react-native-async-storage/async-storage
|
|
||||||
import com.reactnativecommunity.asyncstorage.AsyncStoragePackage;
|
|
||||||
// react-native-fs
|
|
||||||
import com.rnfs.RNFSPackage;
|
|
||||||
// react-native-audio-recorder-player
|
|
||||||
import com.dooboolab.audiorecorderplayer.RNAudioRecorderPlayerPackage;
|
|
||||||
// react-native-live-audio-stream
|
|
||||||
import com.imxiqi.rnliveaudiostream.RNLiveAudioStreamPackage;
|
|
||||||
|
|
||||||
public class PackageList {
|
|
||||||
private Application application;
|
|
||||||
private ReactNativeHost reactNativeHost;
|
|
||||||
private MainPackageConfig mConfig;
|
|
||||||
|
|
||||||
public PackageList(ReactNativeHost reactNativeHost) {
|
|
||||||
this(reactNativeHost, null);
|
|
||||||
}
|
|
||||||
|
|
||||||
public PackageList(Application application) {
|
|
||||||
this(application, null);
|
|
||||||
}
|
|
||||||
|
|
||||||
public PackageList(ReactNativeHost reactNativeHost, MainPackageConfig config) {
|
|
||||||
this.reactNativeHost = reactNativeHost;
|
|
||||||
mConfig = config;
|
|
||||||
}
|
|
||||||
|
|
||||||
public PackageList(Application application, MainPackageConfig config) {
|
|
||||||
this.reactNativeHost = null;
|
|
||||||
this.application = application;
|
|
||||||
mConfig = config;
|
|
||||||
}
|
|
||||||
|
|
||||||
private ReactNativeHost getReactNativeHost() {
|
|
||||||
return this.reactNativeHost;
|
|
||||||
}
|
|
||||||
|
|
||||||
private Resources getResources() {
|
|
||||||
return this.getApplication().getResources();
|
|
||||||
}
|
|
||||||
|
|
||||||
private Application getApplication() {
|
|
||||||
if (this.reactNativeHost == null) return this.application;
|
|
||||||
return this.reactNativeHost.getApplication();
|
|
||||||
}
|
|
||||||
|
|
||||||
private Context getApplicationContext() {
|
|
||||||
return this.getApplication().getApplicationContext();
|
|
||||||
}
|
|
||||||
|
|
||||||
public ArrayList<ReactPackage> getPackages() {
|
|
||||||
return new ArrayList<>(Arrays.<ReactPackage>asList(
|
|
||||||
new MainReactPackage(mConfig),
|
|
||||||
new RNScreensPackage(),
|
|
||||||
new SafeAreaContextPackage(),
|
|
||||||
new RNDocumentPickerPackage(),
|
|
||||||
new RNSoundPackage(),
|
|
||||||
new GeolocationPackage(),
|
|
||||||
new ImagePickerPackage(),
|
|
||||||
new RNPermissionsPackage(),
|
|
||||||
new RNCameraKitPackage(),
|
|
||||||
new AsyncStoragePackage(),
|
|
||||||
new RNFSPackage(),
|
|
||||||
new RNAudioRecorderPlayerPackage(),
|
|
||||||
new RNLiveAudioStreamPackage()
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
-16
@@ -1,16 +0,0 @@
|
|||||||
/**
|
|
||||||
* Automatically generated file. DO NOT MODIFY
|
|
||||||
*/
|
|
||||||
package com.ariacockpit;
|
|
||||||
|
|
||||||
public final class BuildConfig {
|
|
||||||
public static final boolean DEBUG = false;
|
|
||||||
public static final String APPLICATION_ID = "com.ariacockpit";
|
|
||||||
public static final String BUILD_TYPE = "release";
|
|
||||||
public static final int VERSION_CODE = 1;
|
|
||||||
public static final String VERSION_NAME = "1.0";
|
|
||||||
// Field from default config.
|
|
||||||
public static final boolean IS_HERMES_ENABLED = true;
|
|
||||||
// Field from default config.
|
|
||||||
public static final boolean IS_NEW_ARCHITECTURE_ENABLED = false;
|
|
||||||
}
|
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
{
|
|
||||||
"schemaVersion": "1.1.0",
|
|
||||||
"buildSystem": "Gradle",
|
|
||||||
"buildSystemVersion": "8.3",
|
|
||||||
"buildPlugin": "org.jetbrains.kotlin.gradle.plugin.KotlinAndroidPluginWrapper",
|
|
||||||
"buildPluginVersion": "1.8.0",
|
|
||||||
"projectSettings": {
|
|
||||||
"isHmppEnabled": true,
|
|
||||||
"isCompatibilityMetadataVariantEnabled": false,
|
|
||||||
"isKPMEnabled": false
|
|
||||||
},
|
|
||||||
"projectTargets": [
|
|
||||||
{
|
|
||||||
"target": "org.jetbrains.kotlin.gradle.plugin.mpp.KotlinAndroidTarget",
|
|
||||||
"platformType": "androidJvm",
|
|
||||||
"extras": {
|
|
||||||
"android": {
|
|
||||||
"sourceCompatibility": "17",
|
|
||||||
"targetCompatibility": "17"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -3,6 +3,20 @@
|
|||||||
<uses-permission android:name="android.permission.INTERNET" />
|
<uses-permission android:name="android.permission.INTERNET" />
|
||||||
<uses-permission android:name="android.permission.CAMERA" />
|
<uses-permission android:name="android.permission.CAMERA" />
|
||||||
<uses-permission android:name="android.permission.RECORD_AUDIO" />
|
<uses-permission android:name="android.permission.RECORD_AUDIO" />
|
||||||
|
<uses-permission android:name="android.permission.REQUEST_INSTALL_PACKAGES" />
|
||||||
|
<!-- Anruf-State lesen damit TTS bei klingelndem Telefon pausiert -->
|
||||||
|
<uses-permission android:name="android.permission.READ_PHONE_STATE" />
|
||||||
|
<!-- Optional: GPS-Position der Frage anhaengen (nur wenn User in Settings aktiviert) -->
|
||||||
|
<uses-permission android:name="android.permission.ACCESS_COARSE_LOCATION" />
|
||||||
|
<uses-permission android:name="android.permission.ACCESS_FINE_LOCATION" />
|
||||||
|
<!-- Foreground-Service damit TTS auch bei minimierter App weiterlaeuft.
|
||||||
|
FOREGROUND_SERVICE_MICROPHONE ist Pflicht ab Android 14 wenn der
|
||||||
|
Service waehrend des Backgrounds aufs Mikro zugreift (Wake-Word,
|
||||||
|
Aufnahme im Gespraechsmodus). -->
|
||||||
|
<uses-permission android:name="android.permission.FOREGROUND_SERVICE" />
|
||||||
|
<uses-permission android:name="android.permission.FOREGROUND_SERVICE_MEDIA_PLAYBACK" />
|
||||||
|
<uses-permission android:name="android.permission.FOREGROUND_SERVICE_MICROPHONE" />
|
||||||
|
<uses-permission android:name="android.permission.POST_NOTIFICATIONS" />
|
||||||
|
|
||||||
<application
|
<application
|
||||||
android:name=".MainApplication"
|
android:name=".MainApplication"
|
||||||
@@ -24,5 +38,20 @@
|
|||||||
<category android:name="android.intent.category.LAUNCHER" />
|
<category android:name="android.intent.category.LAUNCHER" />
|
||||||
</intent-filter>
|
</intent-filter>
|
||||||
</activity>
|
</activity>
|
||||||
|
|
||||||
|
<provider
|
||||||
|
android:name="androidx.core.content.FileProvider"
|
||||||
|
android:authorities="${applicationId}.fileprovider"
|
||||||
|
android:exported="false"
|
||||||
|
android:grantUriPermissions="true">
|
||||||
|
<meta-data
|
||||||
|
android:name="android.support.FILE_PROVIDER_PATHS"
|
||||||
|
android:resource="@xml/file_paths" />
|
||||||
|
</provider>
|
||||||
|
|
||||||
|
<service
|
||||||
|
android:name=".AriaPlaybackService"
|
||||||
|
android:exported="false"
|
||||||
|
android:foregroundServiceType="mediaPlayback|microphone" />
|
||||||
</application>
|
</application>
|
||||||
</manifest>
|
</manifest>
|
||||||
|
|||||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,44 @@
|
|||||||
|
package com.ariacockpit
|
||||||
|
|
||||||
|
import android.content.Intent
|
||||||
|
import android.net.Uri
|
||||||
|
import android.os.Build
|
||||||
|
import androidx.core.content.FileProvider
|
||||||
|
import com.facebook.react.bridge.ReactApplicationContext
|
||||||
|
import com.facebook.react.bridge.ReactContextBaseJavaModule
|
||||||
|
import com.facebook.react.bridge.ReactMethod
|
||||||
|
import com.facebook.react.bridge.Promise
|
||||||
|
import java.io.File
|
||||||
|
|
||||||
|
class ApkInstallerModule(reactContext: ReactApplicationContext) : ReactContextBaseJavaModule(reactContext) {
|
||||||
|
override fun getName() = "ApkInstaller"
|
||||||
|
|
||||||
|
@ReactMethod
|
||||||
|
fun install(filePath: String, promise: Promise) {
|
||||||
|
try {
|
||||||
|
val file = File(filePath)
|
||||||
|
if (!file.exists()) {
|
||||||
|
promise.reject("FILE_NOT_FOUND", "APK nicht gefunden: $filePath")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
val context = reactApplicationContext
|
||||||
|
val uri: Uri = if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.N) {
|
||||||
|
FileProvider.getUriForFile(context, "${context.packageName}.fileprovider", file)
|
||||||
|
} else {
|
||||||
|
Uri.fromFile(file)
|
||||||
|
}
|
||||||
|
|
||||||
|
val intent = Intent(Intent.ACTION_VIEW).apply {
|
||||||
|
setDataAndType(uri, "application/vnd.android.package-archive")
|
||||||
|
addFlags(Intent.FLAG_ACTIVITY_NEW_TASK)
|
||||||
|
addFlags(Intent.FLAG_GRANT_READ_URI_PERMISSION)
|
||||||
|
}
|
||||||
|
|
||||||
|
context.startActivity(intent)
|
||||||
|
promise.resolve(true)
|
||||||
|
} catch (e: Exception) {
|
||||||
|
promise.reject("INSTALL_ERROR", e.message, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,16 @@
|
|||||||
|
package com.ariacockpit
|
||||||
|
|
||||||
|
import com.facebook.react.ReactPackage
|
||||||
|
import com.facebook.react.bridge.NativeModule
|
||||||
|
import com.facebook.react.bridge.ReactApplicationContext
|
||||||
|
import com.facebook.react.uimanager.ViewManager
|
||||||
|
|
||||||
|
class ApkInstallerPackage : ReactPackage {
|
||||||
|
override fun createNativeModules(reactContext: ReactApplicationContext): List<NativeModule> {
|
||||||
|
return listOf(ApkInstallerModule(reactContext), FileOpenerModule(reactContext))
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun createViewManagers(reactContext: ReactApplicationContext): List<ViewManager<*, *>> {
|
||||||
|
return emptyList()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,108 @@
|
|||||||
|
package com.ariacockpit
|
||||||
|
|
||||||
|
import android.app.Notification
|
||||||
|
import android.app.NotificationChannel
|
||||||
|
import android.app.NotificationManager
|
||||||
|
import android.app.PendingIntent
|
||||||
|
import android.app.Service
|
||||||
|
import android.content.Intent
|
||||||
|
import android.os.Build
|
||||||
|
import android.os.IBinder
|
||||||
|
import android.util.Log
|
||||||
|
import androidx.core.app.NotificationCompat
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Foreground-Service der den App-Prozess waehrend TTS-Wiedergabe am Leben
|
||||||
|
* haelt — Android killt sonst den Prozess sobald die App im Hintergrund ist
|
||||||
|
* und ARIA verstummt mitten im Satz.
|
||||||
|
*
|
||||||
|
* Notification ist persistent (ongoing) waehrend der Service laeuft.
|
||||||
|
* Tap auf die Notification bringt MainActivity zurueck nach vorne.
|
||||||
|
*
|
||||||
|
* foregroundServiceType="mediaPlayback" ist Pflicht ab Android 14, sonst
|
||||||
|
* wirft startForeground() eine SecurityException.
|
||||||
|
*/
|
||||||
|
class AriaPlaybackService : Service() {
|
||||||
|
companion object {
|
||||||
|
private const val TAG = "AriaPlaybackService"
|
||||||
|
private const val CHANNEL_ID = "aria_playback"
|
||||||
|
private const val NOTIFICATION_ID = 1042
|
||||||
|
const val EXTRA_REASON = "reason" // "tts" | "wake" | "rec" | ""
|
||||||
|
}
|
||||||
|
|
||||||
|
private var currentReason: String = ""
|
||||||
|
|
||||||
|
override fun onCreate() {
|
||||||
|
super.onCreate()
|
||||||
|
ensureNotificationChannel()
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun onStartCommand(intent: Intent?, flags: Int, startId: Int): Int {
|
||||||
|
val reason = intent?.getStringExtra(EXTRA_REASON) ?: ""
|
||||||
|
currentReason = reason
|
||||||
|
Log.i(TAG, "Foreground-Service start/update (reason=$reason)")
|
||||||
|
try {
|
||||||
|
startForeground(NOTIFICATION_ID, buildNotification(reason))
|
||||||
|
} catch (e: Exception) {
|
||||||
|
Log.e(TAG, "startForeground fehlgeschlagen", e)
|
||||||
|
stopSelf()
|
||||||
|
}
|
||||||
|
// START_NOT_STICKY: wenn Android den Service killt, NICHT automatisch
|
||||||
|
// wieder starten — die App entscheidet wann der Service noetig ist.
|
||||||
|
return START_NOT_STICKY
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun onDestroy() {
|
||||||
|
Log.i(TAG, "Foreground-Service gestoppt")
|
||||||
|
super.onDestroy()
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun onBind(intent: Intent?): IBinder? = null
|
||||||
|
|
||||||
|
private fun ensureNotificationChannel() {
|
||||||
|
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
|
||||||
|
val nm = getSystemService(NotificationManager::class.java) ?: return
|
||||||
|
if (nm.getNotificationChannel(CHANNEL_ID) == null) {
|
||||||
|
val channel = NotificationChannel(
|
||||||
|
CHANNEL_ID,
|
||||||
|
"ARIA Audio-Wiedergabe",
|
||||||
|
NotificationManager.IMPORTANCE_LOW,
|
||||||
|
).apply {
|
||||||
|
description = "Notification waehrend ARIA spricht (haelt die App im Hintergrund am Leben)"
|
||||||
|
setShowBadge(false)
|
||||||
|
}
|
||||||
|
nm.createNotificationChannel(channel)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun buildNotification(reason: String): Notification {
|
||||||
|
val launchIntent = Intent(this, MainActivity::class.java).apply {
|
||||||
|
flags = Intent.FLAG_ACTIVITY_NEW_TASK or Intent.FLAG_ACTIVITY_CLEAR_TOP
|
||||||
|
}
|
||||||
|
val pendingFlags = if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M)
|
||||||
|
PendingIntent.FLAG_IMMUTABLE or PendingIntent.FLAG_UPDATE_CURRENT
|
||||||
|
else
|
||||||
|
PendingIntent.FLAG_UPDATE_CURRENT
|
||||||
|
val pendingIntent = PendingIntent.getActivity(this, 0, launchIntent, pendingFlags)
|
||||||
|
|
||||||
|
val (title, body) = when (reason) {
|
||||||
|
"tts" -> "ARIA spricht" to "Antwort wird abgespielt — antippen oeffnet die App"
|
||||||
|
"rec" -> "ARIA hoert zu" to "Sprachaufnahme laeuft — antippen oeffnet die App"
|
||||||
|
"wake" -> "ARIA bereit" to "Wake-Word lauscht passiv — antippen oeffnet die App"
|
||||||
|
else -> "ARIA aktiv" to "Hintergrund-Modus — antippen oeffnet die App"
|
||||||
|
}
|
||||||
|
|
||||||
|
return NotificationCompat.Builder(this, CHANNEL_ID)
|
||||||
|
.setContentTitle(title)
|
||||||
|
.setContentText(body)
|
||||||
|
.setSmallIcon(R.mipmap.ic_launcher)
|
||||||
|
.setContentIntent(pendingIntent)
|
||||||
|
.setOngoing(true)
|
||||||
|
.setShowWhen(false)
|
||||||
|
.setPriority(NotificationCompat.PRIORITY_LOW)
|
||||||
|
.setCategory(NotificationCompat.CATEGORY_SERVICE)
|
||||||
|
.setVisibility(NotificationCompat.VISIBILITY_PUBLIC)
|
||||||
|
.build()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,212 @@
|
|||||||
|
package com.ariacockpit
|
||||||
|
|
||||||
|
import android.content.Context
|
||||||
|
import android.media.AudioAttributes
|
||||||
|
import android.media.AudioFocusRequest
|
||||||
|
import android.media.AudioManager
|
||||||
|
import android.os.Build
|
||||||
|
import android.util.Log
|
||||||
|
import com.facebook.react.bridge.Arguments
|
||||||
|
import com.facebook.react.bridge.Promise
|
||||||
|
import com.facebook.react.bridge.ReactApplicationContext
|
||||||
|
import com.facebook.react.bridge.ReactContextBaseJavaModule
|
||||||
|
import com.facebook.react.bridge.ReactMethod
|
||||||
|
import com.facebook.react.modules.core.DeviceEventManagerModule
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Steuert Audio-Focus fuer Ducking/Muten anderer Apps + emittiert Loss-Events
|
||||||
|
* an JS damit ARIA bei VoIP-Anrufen (WhatsApp/Signal/Discord/...) aufhoert
|
||||||
|
* zu sprechen — diese Anrufe gehen nicht ueber TelephonyManager, sondern
|
||||||
|
* requestn AudioFocus_GAIN_TRANSIENT_EXCLUSIVE was wir hier mitbekommen.
|
||||||
|
*
|
||||||
|
* - requestDuck() → andere Apps werden leiser (ARIA spricht TTS)
|
||||||
|
* - requestExclusive() → andere Apps werden pausiert (Mikrofon-Aufnahme)
|
||||||
|
* - release() → Focus abgeben, andere Apps duerfen wieder
|
||||||
|
*
|
||||||
|
* Events:
|
||||||
|
* - "AudioFocusChanged" mit type:
|
||||||
|
* "loss" — endgueltiger Verlust (Anruf, andere App permanent)
|
||||||
|
* "loss_transient" — vorruebergehender Verlust (kurze Unterbrechung)
|
||||||
|
* "gain" — Fokus zurueck
|
||||||
|
*/
|
||||||
|
class AudioFocusModule(reactContext: ReactApplicationContext) : ReactContextBaseJavaModule(reactContext) {
|
||||||
|
override fun getName() = "AudioFocus"
|
||||||
|
|
||||||
|
companion object { private const val TAG = "AudioFocus" }
|
||||||
|
|
||||||
|
private var currentRequest: AudioFocusRequest? = null
|
||||||
|
|
||||||
|
private fun audioManager(): AudioManager? =
|
||||||
|
reactApplicationContext.getSystemService(Context.AUDIO_SERVICE) as? AudioManager
|
||||||
|
|
||||||
|
private fun emitFocusChange(type: String) {
|
||||||
|
try {
|
||||||
|
val params = Arguments.createMap().apply { putString("type", type) }
|
||||||
|
reactApplicationContext.getJSModule(DeviceEventManagerModule.RCTDeviceEventEmitter::class.java)
|
||||||
|
.emit("AudioFocusChanged", params)
|
||||||
|
} catch (e: Exception) {
|
||||||
|
Log.w(TAG, "emit failed: ${e.message}")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private val focusListener = AudioManager.OnAudioFocusChangeListener { focusChange ->
|
||||||
|
when (focusChange) {
|
||||||
|
AudioManager.AUDIOFOCUS_LOSS -> {
|
||||||
|
Log.i(TAG, "AUDIOFOCUS_LOSS (z.B. Anruf, anderer Player permanent)")
|
||||||
|
emitFocusChange("loss")
|
||||||
|
}
|
||||||
|
AudioManager.AUDIOFOCUS_LOSS_TRANSIENT -> {
|
||||||
|
Log.i(TAG, "AUDIOFOCUS_LOSS_TRANSIENT (kurze Unterbrechung)")
|
||||||
|
emitFocusChange("loss_transient")
|
||||||
|
}
|
||||||
|
AudioManager.AUDIOFOCUS_LOSS_TRANSIENT_CAN_DUCK -> {
|
||||||
|
// Notification-Sound o.ae. — wir ignorieren das, ARIA macht weiter
|
||||||
|
Log.d(TAG, "AUDIOFOCUS_LOSS_CAN_DUCK ignoriert")
|
||||||
|
}
|
||||||
|
AudioManager.AUDIOFOCUS_GAIN -> {
|
||||||
|
Log.i(TAG, "AUDIOFOCUS_GAIN")
|
||||||
|
emitFocusChange("gain")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun requestFocus(durationHint: Int, usage: Int, promise: Promise) {
|
||||||
|
val am = audioManager()
|
||||||
|
if (am == null) {
|
||||||
|
promise.reject("NO_AUDIO_MANAGER", "AudioManager nicht verfuegbar")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
release()
|
||||||
|
|
||||||
|
val result: Int = if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
|
||||||
|
val attrs = AudioAttributes.Builder()
|
||||||
|
.setUsage(usage)
|
||||||
|
.setContentType(AudioAttributes.CONTENT_TYPE_SPEECH)
|
||||||
|
.build()
|
||||||
|
val req = AudioFocusRequest.Builder(durationHint)
|
||||||
|
.setAudioAttributes(attrs)
|
||||||
|
.setOnAudioFocusChangeListener(focusListener)
|
||||||
|
.build()
|
||||||
|
currentRequest = req
|
||||||
|
am.requestAudioFocus(req)
|
||||||
|
} else {
|
||||||
|
@Suppress("DEPRECATION")
|
||||||
|
am.requestAudioFocus(focusListener, AudioManager.STREAM_MUSIC, durationHint)
|
||||||
|
}
|
||||||
|
|
||||||
|
promise.resolve(result == AudioManager.AUDIOFOCUS_REQUEST_GRANTED)
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Andere Apps werden pausiert (TTS spricht).
|
||||||
|
*
|
||||||
|
* TRANSIENT (statt TRANSIENT_MAY_DUCK): Spotify/YouTube pausieren komplett
|
||||||
|
* statt nur leiser zu werden. Verhindert auch das "kommt-wieder-hoch"-
|
||||||
|
* Problem mit MAY_DUCK, wo das System nach kurzer Zeit den Duck-Effekt
|
||||||
|
* wieder aufgehoben hat obwohl wir den Fokus noch hielten.
|
||||||
|
*/
|
||||||
|
@ReactMethod
|
||||||
|
fun requestDuck(promise: Promise) {
|
||||||
|
requestFocus(
|
||||||
|
AudioManager.AUDIOFOCUS_GAIN_TRANSIENT,
|
||||||
|
AudioAttributes.USAGE_ASSISTANT,
|
||||||
|
promise,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Andere Apps werden pausiert (Mikrofon-Aufnahme / Gespraech). */
|
||||||
|
@ReactMethod
|
||||||
|
fun requestExclusive(promise: Promise) {
|
||||||
|
requestFocus(
|
||||||
|
AudioManager.AUDIOFOCUS_GAIN_TRANSIENT_EXCLUSIVE,
|
||||||
|
AudioAttributes.USAGE_VOICE_COMMUNICATION,
|
||||||
|
promise,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Focus abgeben — andere Apps duerfen wieder volle Lautstaerke. */
|
||||||
|
@ReactMethod
|
||||||
|
fun release(promise: Promise) {
|
||||||
|
release()
|
||||||
|
promise.resolve(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Den USAGE_MEDIA-Focus-Stack im System aufmischen, damit Spotify/YouTube
|
||||||
|
* resumen wenn ein anderer Player (z.B. react-native-sound) seinen Focus
|
||||||
|
* nicht ordnungsgemaess released hat. Strategie: kurz selbst USAGE_MEDIA
|
||||||
|
* GAIN beanspruchen — das System invalidiert dabei den haengenden Stack-
|
||||||
|
* Eintrag des anderen Players — und sofort wieder abandonen. Spotify
|
||||||
|
* bekommt den Focus-Gain und resumed.
|
||||||
|
*
|
||||||
|
* Workaround fuer das react-native-sound-Bug: Sound.stop()/release()
|
||||||
|
* laesst den AudioFocusRequest haengen.
|
||||||
|
*/
|
||||||
|
@ReactMethod
|
||||||
|
fun kickReleaseMedia(promise: Promise) {
|
||||||
|
val am = audioManager()
|
||||||
|
if (am == null) {
|
||||||
|
promise.resolve(false)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Async laufen lassen — wir wollen einen request, Pause, dann abandon.
|
||||||
|
// Ohne Pause merkt das System (und damit Spotify) die kurze Owner-
|
||||||
|
// Wechsel oft gar nicht. 250ms reicht erfahrungsgemaess fuer den
|
||||||
|
// Focus-Stack-Refresh.
|
||||||
|
Thread {
|
||||||
|
try {
|
||||||
|
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
|
||||||
|
val attrs = AudioAttributes.Builder()
|
||||||
|
.setUsage(AudioAttributes.USAGE_MEDIA)
|
||||||
|
.setContentType(AudioAttributes.CONTENT_TYPE_MUSIC)
|
||||||
|
.build()
|
||||||
|
val kickListener = AudioManager.OnAudioFocusChangeListener { /* ignorieren */ }
|
||||||
|
val kickReq = AudioFocusRequest.Builder(AudioManager.AUDIOFOCUS_GAIN)
|
||||||
|
.setAudioAttributes(attrs)
|
||||||
|
.setOnAudioFocusChangeListener(kickListener)
|
||||||
|
.build()
|
||||||
|
am.requestAudioFocus(kickReq)
|
||||||
|
Thread.sleep(250)
|
||||||
|
am.abandonAudioFocusRequest(kickReq)
|
||||||
|
} else {
|
||||||
|
val kickListener = AudioManager.OnAudioFocusChangeListener { /* ignorieren */ }
|
||||||
|
@Suppress("DEPRECATION")
|
||||||
|
am.requestAudioFocus(kickListener, AudioManager.STREAM_MUSIC, AudioManager.AUDIOFOCUS_GAIN)
|
||||||
|
Thread.sleep(250)
|
||||||
|
@Suppress("DEPRECATION")
|
||||||
|
am.abandonAudioFocus(kickListener)
|
||||||
|
}
|
||||||
|
Log.i(TAG, "kickReleaseMedia: USAGE_MEDIA-Stack aufgemischt (250ms Pause)")
|
||||||
|
} catch (e: Exception) {
|
||||||
|
Log.w(TAG, "kickReleaseMedia failed: ${e.message}")
|
||||||
|
}
|
||||||
|
}.start()
|
||||||
|
promise.resolve(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun release() {
|
||||||
|
val am = audioManager() ?: return
|
||||||
|
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
|
||||||
|
currentRequest?.let { am.abandonAudioFocusRequest(it) }
|
||||||
|
} else {
|
||||||
|
@Suppress("DEPRECATION")
|
||||||
|
am.abandonAudioFocus(focusListener)
|
||||||
|
}
|
||||||
|
currentRequest = null
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Aktueller Audio-Mode: NORMAL=0, IN_CALL=2, IN_COMMUNICATION=3, CALL_SCREENING=4.
|
||||||
|
* IN_COMMUNICATION ist der typische VoIP-Anruf-Mode (WhatsApp, Signal, etc.) —
|
||||||
|
* kann gepollt werden um zu erkennen wann der Anruf vorbei ist (zurueck NORMAL). */
|
||||||
|
@ReactMethod
|
||||||
|
fun getMode(promise: Promise) {
|
||||||
|
val am = audioManager()
|
||||||
|
if (am == null) {
|
||||||
|
promise.resolve(0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
promise.resolve(am.mode)
|
||||||
|
}
|
||||||
|
|
||||||
|
@ReactMethod fun addListener(eventName: String) {}
|
||||||
|
@ReactMethod fun removeListeners(count: Int) {}
|
||||||
|
}
|
||||||
@@ -0,0 +1,16 @@
|
|||||||
|
package com.ariacockpit
|
||||||
|
|
||||||
|
import com.facebook.react.ReactPackage
|
||||||
|
import com.facebook.react.bridge.NativeModule
|
||||||
|
import com.facebook.react.bridge.ReactApplicationContext
|
||||||
|
import com.facebook.react.uimanager.ViewManager
|
||||||
|
|
||||||
|
class AudioFocusPackage : ReactPackage {
|
||||||
|
override fun createNativeModules(reactContext: ReactApplicationContext): List<NativeModule> {
|
||||||
|
return listOf(AudioFocusModule(reactContext))
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun createViewManagers(reactContext: ReactApplicationContext): List<ViewManager<*, *>> {
|
||||||
|
return emptyList()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,59 @@
|
|||||||
|
package com.ariacockpit
|
||||||
|
|
||||||
|
import android.content.Intent
|
||||||
|
import android.os.Build
|
||||||
|
import android.util.Log
|
||||||
|
import com.facebook.react.bridge.Promise
|
||||||
|
import com.facebook.react.bridge.ReactApplicationContext
|
||||||
|
import com.facebook.react.bridge.ReactContextBaseJavaModule
|
||||||
|
import com.facebook.react.bridge.ReactMethod
|
||||||
|
|
||||||
|
/**
|
||||||
|
* RN-Bridge fuer den AriaPlaybackService.
|
||||||
|
*
|
||||||
|
* Wird vom JS waehrend einer TTS-Wiedergabe gestartet damit Android den
|
||||||
|
* App-Prozess nicht killt wenn die App im Hintergrund ist (= ARIA spricht
|
||||||
|
* weiter, auch wenn Stefan die App minimiert hat).
|
||||||
|
*
|
||||||
|
* Service stoppt entweder explizit per stop() oder wird von Android
|
||||||
|
* mitgekillt wenn der Prozess weg ist (was bei Foreground-Service nur
|
||||||
|
* passiert wenn der User die App force-stopped).
|
||||||
|
*/
|
||||||
|
class BackgroundAudioModule(reactContext: ReactApplicationContext) : ReactContextBaseJavaModule(reactContext) {
|
||||||
|
override fun getName() = "BackgroundAudio"
|
||||||
|
|
||||||
|
companion object { private const val TAG = "BackgroundAudio" }
|
||||||
|
|
||||||
|
@ReactMethod
|
||||||
|
fun start(reason: String, promise: Promise) {
|
||||||
|
try {
|
||||||
|
val ctx = reactApplicationContext
|
||||||
|
val intent = Intent(ctx, AriaPlaybackService::class.java)
|
||||||
|
intent.putExtra(AriaPlaybackService.EXTRA_REASON, reason ?: "")
|
||||||
|
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
|
||||||
|
ctx.startForegroundService(intent)
|
||||||
|
} else {
|
||||||
|
ctx.startService(intent)
|
||||||
|
}
|
||||||
|
promise.resolve(true)
|
||||||
|
} catch (e: Exception) {
|
||||||
|
Log.w(TAG, "start fehlgeschlagen: ${e.message}")
|
||||||
|
promise.reject("START_FAILED", e.message ?: "Unbekannter Fehler", e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@ReactMethod
|
||||||
|
fun stop(promise: Promise) {
|
||||||
|
try {
|
||||||
|
val ctx = reactApplicationContext
|
||||||
|
ctx.stopService(Intent(ctx, AriaPlaybackService::class.java))
|
||||||
|
promise.resolve(true)
|
||||||
|
} catch (e: Exception) {
|
||||||
|
Log.w(TAG, "stop fehlgeschlagen: ${e.message}")
|
||||||
|
promise.reject("STOP_FAILED", e.message ?: "Unbekannter Fehler", e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@ReactMethod fun addListener(eventName: String) {}
|
||||||
|
@ReactMethod fun removeListeners(count: Int) {}
|
||||||
|
}
|
||||||
@@ -0,0 +1,16 @@
|
|||||||
|
package com.ariacockpit
|
||||||
|
|
||||||
|
import com.facebook.react.ReactPackage
|
||||||
|
import com.facebook.react.bridge.NativeModule
|
||||||
|
import com.facebook.react.bridge.ReactApplicationContext
|
||||||
|
import com.facebook.react.uimanager.ViewManager
|
||||||
|
|
||||||
|
class BackgroundAudioPackage : ReactPackage {
|
||||||
|
override fun createNativeModules(reactContext: ReactApplicationContext): List<NativeModule> {
|
||||||
|
return listOf(BackgroundAudioModule(reactContext))
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun createViewManagers(reactContext: ReactApplicationContext): List<ViewManager<*, *>> {
|
||||||
|
return emptyList()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,55 @@
|
|||||||
|
package com.ariacockpit
|
||||||
|
|
||||||
|
import android.content.Intent
|
||||||
|
import android.net.Uri
|
||||||
|
import android.os.Build
|
||||||
|
import androidx.core.content.FileProvider
|
||||||
|
import com.facebook.react.bridge.Promise
|
||||||
|
import com.facebook.react.bridge.ReactApplicationContext
|
||||||
|
import com.facebook.react.bridge.ReactContextBaseJavaModule
|
||||||
|
import com.facebook.react.bridge.ReactMethod
|
||||||
|
import java.io.File
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Oeffnet eine beliebige Datei (PDF, Bild, Office-Doc, ...) mit der vom User
|
||||||
|
* gewaehlten App via Android-Intent-Picker. Nutzt FileProvider damit auch
|
||||||
|
* Android 7+ (content:// statt file://) das URI lesen darf.
|
||||||
|
*
|
||||||
|
* MIME-Type wird vom Caller bestimmt — App-Auswahl ist davon abhaengig (PDF
|
||||||
|
* geht an PDF-Viewer, image/jpeg an Galerie, etc.).
|
||||||
|
*/
|
||||||
|
class FileOpenerModule(reactContext: ReactApplicationContext) : ReactContextBaseJavaModule(reactContext) {
|
||||||
|
override fun getName() = "FileOpener"
|
||||||
|
|
||||||
|
@ReactMethod
|
||||||
|
fun open(filePath: String, mimeType: String, promise: Promise) {
|
||||||
|
try {
|
||||||
|
val cleanPath = filePath.removePrefix("file://")
|
||||||
|
val file = File(cleanPath)
|
||||||
|
if (!file.exists()) {
|
||||||
|
promise.reject("FILE_NOT_FOUND", "Datei nicht gefunden: $cleanPath")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
val context = reactApplicationContext
|
||||||
|
val uri: Uri = if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.N) {
|
||||||
|
FileProvider.getUriForFile(context, "${context.packageName}.fileprovider", file)
|
||||||
|
} else {
|
||||||
|
Uri.fromFile(file)
|
||||||
|
}
|
||||||
|
val safeMime = if (mimeType.isBlank()) "application/octet-stream" else mimeType
|
||||||
|
val intent = Intent(Intent.ACTION_VIEW).apply {
|
||||||
|
setDataAndType(uri, safeMime)
|
||||||
|
addFlags(Intent.FLAG_ACTIVITY_NEW_TASK)
|
||||||
|
addFlags(Intent.FLAG_GRANT_READ_URI_PERMISSION)
|
||||||
|
}
|
||||||
|
// Chooser zeigt Android-Auswahl falls mehrere Apps das MIME oeffnen koennen.
|
||||||
|
val chooser = Intent.createChooser(intent, "Oeffnen mit").apply {
|
||||||
|
addFlags(Intent.FLAG_ACTIVITY_NEW_TASK)
|
||||||
|
}
|
||||||
|
context.startActivity(chooser)
|
||||||
|
promise.resolve(true)
|
||||||
|
} catch (e: Exception) {
|
||||||
|
promise.reject("OPEN_ERROR", e.message, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -18,8 +18,12 @@ class MainApplication : Application(), ReactApplication {
|
|||||||
object : DefaultReactNativeHost(this) {
|
object : DefaultReactNativeHost(this) {
|
||||||
override fun getPackages(): List<ReactPackage> =
|
override fun getPackages(): List<ReactPackage> =
|
||||||
PackageList(this).packages.apply {
|
PackageList(this).packages.apply {
|
||||||
// Packages that cannot be autolinked yet can be added manually here, for example:
|
add(ApkInstallerPackage())
|
||||||
// add(MyReactNativePackage())
|
add(AudioFocusPackage())
|
||||||
|
add(PcmStreamPlayerPackage())
|
||||||
|
add(OpenWakeWordPackage())
|
||||||
|
add(PhoneCallPackage())
|
||||||
|
add(BackgroundAudioPackage())
|
||||||
}
|
}
|
||||||
|
|
||||||
override fun getJSMainModuleName(): String = "index"
|
override fun getJSMainModuleName(): String = "index"
|
||||||
|
|||||||
@@ -0,0 +1,413 @@
|
|||||||
|
package com.ariacockpit
|
||||||
|
|
||||||
|
import ai.onnxruntime.OnnxTensor
|
||||||
|
import ai.onnxruntime.OrtEnvironment
|
||||||
|
import ai.onnxruntime.OrtSession
|
||||||
|
import android.Manifest
|
||||||
|
import android.content.pm.PackageManager
|
||||||
|
import android.media.AudioFormat
|
||||||
|
import android.media.AudioRecord
|
||||||
|
import android.media.MediaRecorder
|
||||||
|
import android.media.audiofx.AcousticEchoCanceler
|
||||||
|
import android.media.audiofx.AutomaticGainControl
|
||||||
|
import android.media.audiofx.NoiseSuppressor
|
||||||
|
import android.util.Log
|
||||||
|
import androidx.core.content.ContextCompat
|
||||||
|
import com.facebook.react.bridge.Promise
|
||||||
|
import com.facebook.react.bridge.ReactApplicationContext
|
||||||
|
import com.facebook.react.bridge.ReactContextBaseJavaModule
|
||||||
|
import com.facebook.react.bridge.ReactMethod
|
||||||
|
import com.facebook.react.modules.core.DeviceEventManagerModule
|
||||||
|
import java.nio.FloatBuffer
|
||||||
|
import java.util.concurrent.atomic.AtomicBoolean
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Wake-Word Erkennung on-device via openWakeWord (https://github.com/dscripka/openWakeWord).
|
||||||
|
*
|
||||||
|
* Drei-stufige ONNX Pipeline:
|
||||||
|
* 1. Audio (16kHz mono int16, 1280-Sample-Chunks) → Melspectrogram → 32-mel Frames
|
||||||
|
* 2. 76 Mel-Frames Sliding Window (stride 8) → Speech-Embedding → 96-dim Vektor
|
||||||
|
* 3. Letzte 16 Embeddings (~1.28s Kontext) → Wake-Word-Klassifikator → Sigmoid-Score
|
||||||
|
*
|
||||||
|
* Modelle liegen in assets/openwakeword/ (mel + embedding shared, plus pro Keyword
|
||||||
|
* ein eigenes .onnx). Erkennung feuert nach `patience` aufeinanderfolgenden
|
||||||
|
* Frames ueber `threshold` und unterdrueckt Wiederholungen fuer `debounceMs`.
|
||||||
|
*
|
||||||
|
* Emittiert "WakeWordDetected" als RN-Event wenn ein Trigger erkannt wurde.
|
||||||
|
*/
|
||||||
|
class OpenWakeWordModule(reactContext: ReactApplicationContext) : ReactContextBaseJavaModule(reactContext) {
|
||||||
|
override fun getName() = "OpenWakeWord"
|
||||||
|
|
||||||
|
companion object {
|
||||||
|
private const val TAG = "OpenWakeWord"
|
||||||
|
private const val SAMPLE_RATE = 16000
|
||||||
|
private const val CHUNK_SAMPLES = 1280 // 80ms @ 16kHz
|
||||||
|
private const val MEL_FRAMES_PER_EMBEDDING = 76 // Embedding-Fenster
|
||||||
|
private const val EMBEDDING_STRIDE = 8 // Slide um 8 Mel-Frames
|
||||||
|
private const val EMBEDDING_DIM = 96
|
||||||
|
private const val MEL_BINS = 32
|
||||||
|
private const val DEFAULT_WW_INPUT_FRAMES = 16 // Fallback wenn Modell-Metadata fehlt
|
||||||
|
}
|
||||||
|
|
||||||
|
private val env: OrtEnvironment = OrtEnvironment.getEnvironment()
|
||||||
|
private var melSession: OrtSession? = null
|
||||||
|
private var embSession: OrtSession? = null
|
||||||
|
private var wwSession: OrtSession? = null
|
||||||
|
|
||||||
|
private var melInputName: String = "input"
|
||||||
|
private var embInputName: String = "input_1"
|
||||||
|
private var wwInputName: String = "input"
|
||||||
|
// Anzahl Embedding-Frames die der Wake-Word-Klassifikator pro Inferenz erwartet —
|
||||||
|
// hey_jarvis hat 16, andere Community-Modelle koennen abweichen (z.B. 28).
|
||||||
|
// Wird beim init() aus den Modell-Metadaten gelesen.
|
||||||
|
private var wwInputFrames: Int = DEFAULT_WW_INPUT_FRAMES
|
||||||
|
|
||||||
|
// Konfiguration
|
||||||
|
private var threshold: Float = 0.5f
|
||||||
|
private var patience: Int = 2
|
||||||
|
private var debounceMs: Long = 1500
|
||||||
|
private var modelName: String = "hey_jarvis"
|
||||||
|
|
||||||
|
// Audio-Capture-Thread
|
||||||
|
private var audioRecord: AudioRecord? = null
|
||||||
|
private val running = AtomicBoolean(false)
|
||||||
|
private var captureThread: Thread? = null
|
||||||
|
|
||||||
|
// Audio-Effects: Echo-Cancellation (gegen ARIAs eigene TTS-Stimme die sonst
|
||||||
|
// das Wake-Word triggern wuerde) + Noise-Suppression. Per VOICE_COMMUNICATION
|
||||||
|
// Audio-Source schon vorhanden, aber explizites Aktivieren ist robuster.
|
||||||
|
private var aec: AcousticEchoCanceler? = null
|
||||||
|
private var ns: NoiseSuppressor? = null
|
||||||
|
private var agc: AutomaticGainControl? = null
|
||||||
|
|
||||||
|
// Inferenz-State
|
||||||
|
private val melBuffer: ArrayList<FloatArray> = ArrayList(256) // Liste von 32-dim Frames
|
||||||
|
private var melProcessedIdx: Int = 0
|
||||||
|
private val embBuffer: ArrayDeque<FloatArray> = ArrayDeque(32) // Ringpuffer letzter Embeddings
|
||||||
|
private var consecutiveAboveThreshold: Int = 0
|
||||||
|
private var lastDetectionMs: Long = 0L
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initialisiert die ONNX-Sessions fuer ein bestimmtes Wake-Word.
|
||||||
|
* modelName: dateiname ohne Suffix (z.B. "hey_jarvis", "alexa", "hey_mycroft", "hey_rhasspy")
|
||||||
|
*/
|
||||||
|
@ReactMethod
|
||||||
|
fun init(modelName: String, threshold: Double, patience: Int, debounceMs: Int, promise: Promise) {
|
||||||
|
try {
|
||||||
|
disposeSessions()
|
||||||
|
this.modelName = modelName
|
||||||
|
this.threshold = threshold.toFloat()
|
||||||
|
this.patience = patience.coerceAtLeast(1)
|
||||||
|
this.debounceMs = debounceMs.toLong()
|
||||||
|
|
||||||
|
val ctx = reactApplicationContext
|
||||||
|
val melBytes = ctx.assets.open("openwakeword/melspectrogram.onnx").use { it.readBytes() }
|
||||||
|
val embBytes = ctx.assets.open("openwakeword/embedding_model.onnx").use { it.readBytes() }
|
||||||
|
val wwBytes = ctx.assets.open("openwakeword/$modelName.onnx").use { it.readBytes() }
|
||||||
|
|
||||||
|
val opts = OrtSession.SessionOptions()
|
||||||
|
melSession = env.createSession(melBytes, opts)
|
||||||
|
embSession = env.createSession(embBytes, opts)
|
||||||
|
wwSession = env.createSession(wwBytes, opts)
|
||||||
|
|
||||||
|
melInputName = melSession!!.inputNames.first()
|
||||||
|
embInputName = embSession!!.inputNames.first()
|
||||||
|
wwInputName = wwSession!!.inputNames.first()
|
||||||
|
|
||||||
|
// WW-Input-Frame-Count aus dem Modell lesen — variiert pro Keyword.
|
||||||
|
// Erwartete Form: (1, N, 96), N steht in der Modell-Metadaten.
|
||||||
|
val wwInputInfo = wwSession!!.inputInfo[wwInputName]
|
||||||
|
val wwShape = (wwInputInfo?.info as? ai.onnxruntime.TensorInfo)?.shape
|
||||||
|
wwInputFrames = wwShape?.getOrNull(1)?.toInt()?.takeIf { it > 0 } ?: DEFAULT_WW_INPUT_FRAMES
|
||||||
|
|
||||||
|
Log.i(TAG, "Init OK: model=$modelName wwFrames=$wwInputFrames threshold=$threshold patience=$patience " +
|
||||||
|
"debounce=${debounceMs}ms (inputs: mel=$melInputName emb=$embInputName ww=$wwInputName)")
|
||||||
|
promise.resolve(true)
|
||||||
|
} catch (e: Exception) {
|
||||||
|
Log.e(TAG, "Init fehlgeschlagen: ${e.message}", e)
|
||||||
|
disposeSessions()
|
||||||
|
promise.reject("INIT_FAILED", e.message ?: "Unbekannter Fehler", e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@ReactMethod
|
||||||
|
fun start(promise: Promise) {
|
||||||
|
if (running.get()) {
|
||||||
|
promise.resolve(true)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if (melSession == null || embSession == null || wwSession == null) {
|
||||||
|
promise.reject("NOT_INITIALIZED", "init() muss vor start() aufgerufen werden")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Berechtigung pruefen — der App-Code holt die ueblicherweise schon vorher,
|
||||||
|
// aber wir bestehen hier explizit darauf damit AudioRecord nicht stumm
|
||||||
|
// failt.
|
||||||
|
val perm = ContextCompat.checkSelfPermission(reactApplicationContext, Manifest.permission.RECORD_AUDIO)
|
||||||
|
if (perm != PackageManager.PERMISSION_GRANTED) {
|
||||||
|
promise.reject("NO_MIC_PERMISSION", "RECORD_AUDIO Permission fehlt")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
val minBuf = AudioRecord.getMinBufferSize(
|
||||||
|
SAMPLE_RATE,
|
||||||
|
AudioFormat.CHANNEL_IN_MONO,
|
||||||
|
AudioFormat.ENCODING_PCM_16BIT,
|
||||||
|
).coerceAtLeast(CHUNK_SAMPLES * 2 * 4)
|
||||||
|
|
||||||
|
// VOICE_COMMUNICATION-Source: aktiviert auf den meisten Android-Geraeten
|
||||||
|
// automatisch Echo-Cancellation + Noise-Suppression. Wichtig damit
|
||||||
|
// ARIAs eigene Stimme nicht das Wake-Word triggert wenn parallel
|
||||||
|
// zur TTS-Wiedergabe gelauscht wird.
|
||||||
|
val record = AudioRecord(
|
||||||
|
MediaRecorder.AudioSource.VOICE_COMMUNICATION,
|
||||||
|
SAMPLE_RATE,
|
||||||
|
AudioFormat.CHANNEL_IN_MONO,
|
||||||
|
AudioFormat.ENCODING_PCM_16BIT,
|
||||||
|
minBuf,
|
||||||
|
)
|
||||||
|
if (record.state != AudioRecord.STATE_INITIALIZED) {
|
||||||
|
record.release()
|
||||||
|
promise.reject("AUDIO_INIT", "AudioRecord nicht initialisiert (Mikro belegt?)")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
audioRecord = record
|
||||||
|
|
||||||
|
// Audio-Effects ZUSAETZLICH explizit aktivieren — manche Geraete
|
||||||
|
// benoetigen das, obwohl VOICE_COMMUNICATION es eigentlich schon
|
||||||
|
// mitbringt. Failure ist nicht kritisch (continue ohne Effects).
|
||||||
|
try {
|
||||||
|
if (AcousticEchoCanceler.isAvailable()) {
|
||||||
|
aec = AcousticEchoCanceler.create(record.audioSessionId)?.apply { enabled = true }
|
||||||
|
Log.i(TAG, "AEC aktiviert (enabled=${aec?.enabled})")
|
||||||
|
}
|
||||||
|
} catch (e: Exception) { Log.w(TAG, "AEC failed: ${e.message}") }
|
||||||
|
try {
|
||||||
|
if (NoiseSuppressor.isAvailable()) {
|
||||||
|
ns = NoiseSuppressor.create(record.audioSessionId)?.apply { enabled = true }
|
||||||
|
}
|
||||||
|
} catch (e: Exception) { Log.w(TAG, "NS failed: ${e.message}") }
|
||||||
|
try {
|
||||||
|
if (AutomaticGainControl.isAvailable()) {
|
||||||
|
agc = AutomaticGainControl.create(record.audioSessionId)?.apply { enabled = true }
|
||||||
|
}
|
||||||
|
} catch (e: Exception) { Log.w(TAG, "AGC failed: ${e.message}") }
|
||||||
|
|
||||||
|
resetInferenceState()
|
||||||
|
running.set(true)
|
||||||
|
record.startRecording()
|
||||||
|
|
||||||
|
captureThread = Thread({ captureLoop() }, "OpenWakeWordCapture").apply {
|
||||||
|
isDaemon = true
|
||||||
|
start()
|
||||||
|
}
|
||||||
|
|
||||||
|
Log.i(TAG, "Lauschen gestartet (model=$modelName)")
|
||||||
|
promise.resolve(true)
|
||||||
|
} catch (e: Exception) {
|
||||||
|
Log.e(TAG, "start fehlgeschlagen", e)
|
||||||
|
running.set(false)
|
||||||
|
audioRecord?.release()
|
||||||
|
audioRecord = null
|
||||||
|
promise.reject("START_FAILED", e.message ?: "Unbekannter Fehler", e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun releaseAudioEffects() {
|
||||||
|
try { aec?.release() } catch (_: Exception) {}
|
||||||
|
try { ns?.release() } catch (_: Exception) {}
|
||||||
|
try { agc?.release() } catch (_: Exception) {}
|
||||||
|
aec = null; ns = null; agc = null
|
||||||
|
}
|
||||||
|
|
||||||
|
@ReactMethod
|
||||||
|
fun stop(promise: Promise) {
|
||||||
|
running.set(false)
|
||||||
|
try {
|
||||||
|
captureThread?.join(1500)
|
||||||
|
} catch (_: InterruptedException) {}
|
||||||
|
captureThread = null
|
||||||
|
try { audioRecord?.stop() } catch (_: Exception) {}
|
||||||
|
try { audioRecord?.release() } catch (_: Exception) {}
|
||||||
|
audioRecord = null
|
||||||
|
releaseAudioEffects()
|
||||||
|
Log.i(TAG, "Lauschen gestoppt")
|
||||||
|
promise.resolve(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
@ReactMethod
|
||||||
|
fun dispose(promise: Promise) {
|
||||||
|
running.set(false)
|
||||||
|
try { captureThread?.join(1000) } catch (_: InterruptedException) {}
|
||||||
|
captureThread = null
|
||||||
|
try { audioRecord?.stop() } catch (_: Exception) {}
|
||||||
|
try { audioRecord?.release() } catch (_: Exception) {}
|
||||||
|
audioRecord = null
|
||||||
|
releaseAudioEffects()
|
||||||
|
disposeSessions()
|
||||||
|
promise.resolve(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
@ReactMethod
|
||||||
|
fun isAvailable(promise: Promise) {
|
||||||
|
// Wake-Word ist immer verfuegbar (kein API-Key, alles on-device)
|
||||||
|
promise.resolve(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RN-Event-Subscriptions — RN-Konvention, sonst Warnung im Debug-Build
|
||||||
|
@ReactMethod fun addListener(eventName: String) {}
|
||||||
|
@ReactMethod fun removeListeners(count: Int) {}
|
||||||
|
|
||||||
|
private fun disposeSessions() {
|
||||||
|
try { melSession?.close() } catch (_: Exception) {}
|
||||||
|
try { embSession?.close() } catch (_: Exception) {}
|
||||||
|
try { wwSession?.close() } catch (_: Exception) {}
|
||||||
|
melSession = null
|
||||||
|
embSession = null
|
||||||
|
wwSession = null
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun resetInferenceState() {
|
||||||
|
melBuffer.clear()
|
||||||
|
melProcessedIdx = 0
|
||||||
|
embBuffer.clear()
|
||||||
|
consecutiveAboveThreshold = 0
|
||||||
|
lastDetectionMs = 0L
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun emitDetected() {
|
||||||
|
val params = com.facebook.react.bridge.Arguments.createMap().apply {
|
||||||
|
putString("model", modelName)
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
reactApplicationContext
|
||||||
|
.getJSModule(DeviceEventManagerModule.RCTDeviceEventEmitter::class.java)
|
||||||
|
.emit("WakeWordDetected", params)
|
||||||
|
} catch (e: Exception) {
|
||||||
|
Log.w(TAG, "emit fehlgeschlagen: ${e.message}")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun captureLoop() {
|
||||||
|
val buf = ShortArray(CHUNK_SAMPLES)
|
||||||
|
val record = audioRecord ?: return
|
||||||
|
Log.i(TAG, "Capture-Loop gestartet")
|
||||||
|
while (running.get()) {
|
||||||
|
var read = 0
|
||||||
|
while (read < CHUNK_SAMPLES && running.get()) {
|
||||||
|
val n = record.read(buf, read, CHUNK_SAMPLES - read)
|
||||||
|
if (n <= 0) {
|
||||||
|
Log.w(TAG, "AudioRecord.read returned $n — Loop ende")
|
||||||
|
running.set(false)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
read += n
|
||||||
|
}
|
||||||
|
if (!running.get()) break
|
||||||
|
try {
|
||||||
|
processChunk(buf)
|
||||||
|
} catch (e: Exception) {
|
||||||
|
Log.w(TAG, "processChunk: ${e.message}")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Log.i(TAG, "Capture-Loop beendet")
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Verarbeitet einen 1280-Sample int16 Audio-Chunk. */
|
||||||
|
private fun processChunk(audio: ShortArray) {
|
||||||
|
// 1) Audio → mel (output (1, 1, frames, 32))
|
||||||
|
val floats = FloatArray(audio.size) { audio[it].toFloat() }
|
||||||
|
val melTensor = OnnxTensor.createTensor(
|
||||||
|
env,
|
||||||
|
FloatBuffer.wrap(floats),
|
||||||
|
longArrayOf(1L, audio.size.toLong()),
|
||||||
|
)
|
||||||
|
val melResult = melSession!!.run(mapOf(melInputName to melTensor))
|
||||||
|
val melOut = melResult.get(0).value
|
||||||
|
melTensor.close()
|
||||||
|
@Suppress("UNCHECKED_CAST")
|
||||||
|
val mel4 = melOut as Array<Array<Array<FloatArray>>>
|
||||||
|
val frames = mel4[0][0]
|
||||||
|
// openWakeWord wendet `mel/10 + 2` an, bevor es ans Embedding-Modell geht
|
||||||
|
for (frame in frames) {
|
||||||
|
val scaled = FloatArray(frame.size) { frame[it] / 10f + 2f }
|
||||||
|
melBuffer.add(scaled)
|
||||||
|
}
|
||||||
|
melResult.close()
|
||||||
|
|
||||||
|
// 2) Sliding window: alle vollstaendigen 76-Frame-Fenster verarbeiten
|
||||||
|
while (melBuffer.size >= melProcessedIdx + MEL_FRAMES_PER_EMBEDDING) {
|
||||||
|
val flat = FloatArray(MEL_FRAMES_PER_EMBEDDING * MEL_BINS)
|
||||||
|
var pos = 0
|
||||||
|
for (i in 0 until MEL_FRAMES_PER_EMBEDDING) {
|
||||||
|
val src = melBuffer[melProcessedIdx + i]
|
||||||
|
System.arraycopy(src, 0, flat, pos, MEL_BINS)
|
||||||
|
pos += MEL_BINS
|
||||||
|
}
|
||||||
|
val embIn = OnnxTensor.createTensor(
|
||||||
|
env,
|
||||||
|
FloatBuffer.wrap(flat),
|
||||||
|
longArrayOf(1L, MEL_FRAMES_PER_EMBEDDING.toLong(), MEL_BINS.toLong(), 1L),
|
||||||
|
)
|
||||||
|
val embRes = embSession!!.run(mapOf(embInputName to embIn))
|
||||||
|
val embOut = embRes.get(0).value
|
||||||
|
embIn.close()
|
||||||
|
// Erwartete Output-Form: (1, 1, 1, 96) — rank-4, NICHT (1, 96).
|
||||||
|
// Die Google-Embedding-Pipeline behaelt extra Dimensionen.
|
||||||
|
@Suppress("UNCHECKED_CAST")
|
||||||
|
val embArr = embOut as Array<Array<Array<FloatArray>>>
|
||||||
|
embBuffer.addLast(embArr[0][0][0].copyOf())
|
||||||
|
while (embBuffer.size > wwInputFrames) embBuffer.removeFirst()
|
||||||
|
embRes.close()
|
||||||
|
|
||||||
|
melProcessedIdx += EMBEDDING_STRIDE
|
||||||
|
}
|
||||||
|
// Mel-Buffer trimmen — verhindert Memory-Wachstum
|
||||||
|
if (melProcessedIdx > MEL_FRAMES_PER_EMBEDDING) {
|
||||||
|
val keepFrom = melProcessedIdx - MEL_FRAMES_PER_EMBEDDING
|
||||||
|
val newList = ArrayList<FloatArray>(melBuffer.size - keepFrom)
|
||||||
|
for (i in keepFrom until melBuffer.size) newList.add(melBuffer[i])
|
||||||
|
melBuffer.clear()
|
||||||
|
melBuffer.addAll(newList)
|
||||||
|
melProcessedIdx = MEL_FRAMES_PER_EMBEDDING
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3) Klassifikation — sobald wir 16 Embeddings haben
|
||||||
|
if (embBuffer.size < wwInputFrames) return
|
||||||
|
val flatEmb = FloatArray(wwInputFrames * EMBEDDING_DIM)
|
||||||
|
var p = 0
|
||||||
|
// Letzte wwInputFrames Embeddings nehmen (embBuffer ist auf wwInputFrames begrenzt)
|
||||||
|
for (e in embBuffer) {
|
||||||
|
System.arraycopy(e, 0, flatEmb, p, EMBEDDING_DIM)
|
||||||
|
p += EMBEDDING_DIM
|
||||||
|
}
|
||||||
|
val wwIn = OnnxTensor.createTensor(
|
||||||
|
env,
|
||||||
|
FloatBuffer.wrap(flatEmb),
|
||||||
|
longArrayOf(1L, wwInputFrames.toLong(), EMBEDDING_DIM.toLong()),
|
||||||
|
)
|
||||||
|
val wwRes = wwSession!!.run(mapOf(wwInputName to wwIn))
|
||||||
|
val wwOut = wwRes.get(0).value
|
||||||
|
wwIn.close()
|
||||||
|
// Erwartete Output-Form: (1, 1) → Array<FloatArray>
|
||||||
|
@Suppress("UNCHECKED_CAST")
|
||||||
|
val score = (wwOut as Array<FloatArray>)[0][0]
|
||||||
|
wwRes.close()
|
||||||
|
|
||||||
|
if (score >= threshold) {
|
||||||
|
consecutiveAboveThreshold++
|
||||||
|
if (consecutiveAboveThreshold >= patience) {
|
||||||
|
val now = System.currentTimeMillis()
|
||||||
|
if (now - lastDetectionMs >= debounceMs) {
|
||||||
|
lastDetectionMs = now
|
||||||
|
consecutiveAboveThreshold = 0
|
||||||
|
Log.i(TAG, "Wake-Word erkannt! score=$score model=$modelName")
|
||||||
|
emitDetected()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
consecutiveAboveThreshold = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,16 @@
|
|||||||
|
package com.ariacockpit
|
||||||
|
|
||||||
|
import com.facebook.react.ReactPackage
|
||||||
|
import com.facebook.react.bridge.NativeModule
|
||||||
|
import com.facebook.react.bridge.ReactApplicationContext
|
||||||
|
import com.facebook.react.uimanager.ViewManager
|
||||||
|
|
||||||
|
class OpenWakeWordPackage : ReactPackage {
|
||||||
|
override fun createNativeModules(reactContext: ReactApplicationContext): List<NativeModule> {
|
||||||
|
return listOf(OpenWakeWordModule(reactContext))
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun createViewManagers(reactContext: ReactApplicationContext): List<ViewManager<*, *>> {
|
||||||
|
return emptyList()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,374 @@
|
|||||||
|
package com.ariacockpit
|
||||||
|
|
||||||
|
import android.media.AudioAttributes
|
||||||
|
import android.media.AudioFormat
|
||||||
|
import android.media.AudioManager
|
||||||
|
import android.media.AudioTrack
|
||||||
|
import android.os.Build
|
||||||
|
import android.util.Base64
|
||||||
|
import android.util.Log
|
||||||
|
import com.facebook.react.bridge.Arguments
|
||||||
|
import com.facebook.react.bridge.Promise
|
||||||
|
import com.facebook.react.bridge.ReactApplicationContext
|
||||||
|
import com.facebook.react.bridge.ReactContextBaseJavaModule
|
||||||
|
import com.facebook.react.bridge.ReactMethod
|
||||||
|
import com.facebook.react.modules.core.DeviceEventManagerModule
|
||||||
|
import java.util.concurrent.LinkedBlockingQueue
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Streamt PCM-s16le Audio direkt via AudioTrack MODE_STREAM mit Pre-Roll.
|
||||||
|
*
|
||||||
|
* Pre-Roll: AudioTrack wird zwar direkt gebaut und gefuttert, aber play()
|
||||||
|
* wird erst aufgerufen wenn PREROLL_SECONDS Audio im Buffer ist. So hat
|
||||||
|
* der Stream Zeit einen Vorrat aufzubauen — wenn XTTS mit RTF>1 rendert
|
||||||
|
* (langsamer als Echtzeit), laeuft der Buffer trotzdem nicht leer.
|
||||||
|
*
|
||||||
|
* Flow:
|
||||||
|
* JS: start(sampleRate, channels) → öffnet AudioTrack (noch nicht play())
|
||||||
|
* JS: writeChunk(base64) → dekodiert, queued, Writer schreibt
|
||||||
|
* Writer: spielt los sobald PREROLL erreicht ist
|
||||||
|
* JS: end() → wartet bis Queue leer, schließt
|
||||||
|
* JS: stop() → Hart stoppen (Cancel)
|
||||||
|
*/
|
||||||
|
class PcmStreamPlayerModule(reactContext: ReactApplicationContext) : ReactContextBaseJavaModule(reactContext) {
|
||||||
|
companion object {
|
||||||
|
private const val TAG = "PcmStreamPlayer"
|
||||||
|
// Fallback wenn JS keinen Wert uebergibt.
|
||||||
|
private const val DEFAULT_PREROLL_SECONDS = 3.5
|
||||||
|
// 0.0 = sofortige Wiedergabe — play() direkt beim ersten Chunk.
|
||||||
|
// Macht Sinn fuer F5-TTS weil Render so schnell ist dass ein Puffer
|
||||||
|
// unnoetig ist und bei kurzen Saetzen sogar stoeren kann.
|
||||||
|
private const val MIN_PREROLL_SECONDS = 0.0
|
||||||
|
private const val MAX_PREROLL_SECONDS = 10.0
|
||||||
|
// Stille am Stream-Anfang, damit AudioTrack sauber anfaehrt und die
|
||||||
|
// ersten Samples nicht abgeschnitten werden (XTTS-Warmup + play()-Latenz).
|
||||||
|
private const val LEADING_SILENCE_SECONDS = 0.3
|
||||||
|
// Stille am Ende — puffert das Hardware-Flushen damit die letzten
|
||||||
|
// echten Samples garantiert ausgespielt werden bevor stop() kommt.
|
||||||
|
private const val TRAILING_SILENCE_SECONDS = 0.3
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun getName() = "PcmStreamPlayer"
|
||||||
|
|
||||||
|
private var track: AudioTrack? = null
|
||||||
|
private val queue = LinkedBlockingQueue<ByteArray>()
|
||||||
|
private var writerThread: Thread? = null
|
||||||
|
@Volatile private var writerShouldStop = false
|
||||||
|
@Volatile private var endRequested = false
|
||||||
|
@Volatile private var prerollBytes: Int = 0
|
||||||
|
@Volatile private var playbackStarted = false
|
||||||
|
@Volatile private var bytesBuffered: Long = 0
|
||||||
|
@Volatile private var streamBytesPerFrame: Int = 2 // mono s16le default
|
||||||
|
|
||||||
|
// ── Lifecycle ──
|
||||||
|
|
||||||
|
@ReactMethod
|
||||||
|
fun start(sampleRate: Int, channels: Int, prerollSeconds: Double, promise: Promise) {
|
||||||
|
try {
|
||||||
|
// Alte Session beenden falls vorhanden
|
||||||
|
stopInternal()
|
||||||
|
|
||||||
|
// Nur NaN/Inf → Default. 0.0 ist gueltig (= sofortige Wiedergabe).
|
||||||
|
val prerollSec = if (prerollSeconds.isFinite() && prerollSeconds >= 0.0) {
|
||||||
|
prerollSeconds.coerceIn(MIN_PREROLL_SECONDS, MAX_PREROLL_SECONDS)
|
||||||
|
} else {
|
||||||
|
DEFAULT_PREROLL_SECONDS
|
||||||
|
}
|
||||||
|
|
||||||
|
val channelConfig = if (channels == 2) AudioFormat.CHANNEL_OUT_STEREO else AudioFormat.CHANNEL_OUT_MONO
|
||||||
|
val encoding = AudioFormat.ENCODING_PCM_16BIT
|
||||||
|
val minBuf = AudioTrack.getMinBufferSize(sampleRate, channelConfig, encoding)
|
||||||
|
val bytesPerSecond = sampleRate * channels * 2 // 16-bit = 2 bytes
|
||||||
|
val prerollTarget = (bytesPerSecond * prerollSec).toInt()
|
||||||
|
// Buffer entkoppelt von Preroll — fester ~4s-Buffer. OnePlus A12
|
||||||
|
// mit USAGE_ASSISTANT laeuft AudioTrack erst ab ~3s gepufferter
|
||||||
|
// Daten an. Wir padden Kurztexte vor play() auf 3s (siehe Block
|
||||||
|
// nach mainLoop), Buffer braucht ~1s Headroom weil write() blockt.
|
||||||
|
val bufferSize = (bytesPerSecond * 4).coerceAtLeast(minBuf * 8)
|
||||||
|
prerollBytes = prerollTarget
|
||||||
|
bytesBuffered = 0
|
||||||
|
playbackStarted = false
|
||||||
|
streamBytesPerFrame = channels * 2 // s16 = 2 bytes per sample
|
||||||
|
|
||||||
|
val newTrack = AudioTrack.Builder()
|
||||||
|
.setAudioAttributes(
|
||||||
|
AudioAttributes.Builder()
|
||||||
|
.setUsage(AudioAttributes.USAGE_ASSISTANT)
|
||||||
|
.setContentType(AudioAttributes.CONTENT_TYPE_SPEECH)
|
||||||
|
.build(),
|
||||||
|
)
|
||||||
|
.setAudioFormat(
|
||||||
|
AudioFormat.Builder()
|
||||||
|
.setSampleRate(sampleRate)
|
||||||
|
.setChannelMask(channelConfig)
|
||||||
|
.setEncoding(encoding)
|
||||||
|
.build(),
|
||||||
|
)
|
||||||
|
.setBufferSizeInBytes(bufferSize)
|
||||||
|
.setTransferMode(AudioTrack.MODE_STREAM)
|
||||||
|
.build()
|
||||||
|
|
||||||
|
// Start-Threshold runterdrehen: Default ist bufferSize/2 (= 2s bei 4s
|
||||||
|
// Buffer). AudioTrack startet sonst nicht bevor 2s im Puffer sind —
|
||||||
|
// bei kurzen TTS-Antworten (3 Worte ~ 1.4s) bleibt pos auf 0 stehen.
|
||||||
|
// 0.1s reicht damit AudioTrack sofort mit dem ersten Chunk anlaeuft.
|
||||||
|
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.S) {
|
||||||
|
try {
|
||||||
|
val startFrames = (sampleRate / 10).coerceAtLeast(1) // 100ms
|
||||||
|
newTrack.setStartThresholdInFrames(startFrames)
|
||||||
|
Log.i(TAG, "Start-Threshold gesetzt: ${startFrames} frames (~100ms)")
|
||||||
|
} catch (e: Exception) {
|
||||||
|
Log.w(TAG, "setStartThresholdInFrames failed: ${e.message}")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
track = newTrack
|
||||||
|
queue.clear()
|
||||||
|
writerShouldStop = false
|
||||||
|
endRequested = false
|
||||||
|
|
||||||
|
writerThread = Thread({
|
||||||
|
val t = track ?: return@Thread
|
||||||
|
try {
|
||||||
|
// Leading-Silence in den Buffer — gibt AudioTrack Zeit anzufahren.
|
||||||
|
val leadingBytes = ((sampleRate * channels * 2) * LEADING_SILENCE_SECONDS).toInt() and 0x7FFFFFFE
|
||||||
|
if (leadingBytes > 0) {
|
||||||
|
val silence = ByteArray(leadingBytes)
|
||||||
|
var silOff = 0
|
||||||
|
while (silOff < silence.size && !writerShouldStop) {
|
||||||
|
val w = t.write(silence, silOff, silence.size - silOff)
|
||||||
|
if (w <= 0) break
|
||||||
|
silOff += w
|
||||||
|
}
|
||||||
|
bytesBuffered += silence.size
|
||||||
|
}
|
||||||
|
// Bei preroll=0: play() SOFORT nach Leading-Silence aufrufen,
|
||||||
|
// nicht erst bei Ankunft des ersten echten Chunks. Android's
|
||||||
|
// AudioTrack haelt den Play-State und wartet auf neue Samples.
|
||||||
|
// So verschluckt es keine Worte wenn der erste Chunk erst
|
||||||
|
// nach play()-Startup-Latenz eintrifft.
|
||||||
|
if (prerollBytes == 0 && !playbackStarted) {
|
||||||
|
try {
|
||||||
|
t.play()
|
||||||
|
playbackStarted = true
|
||||||
|
Log.i(TAG, "Playback sofort gestartet (preroll=0, ${bytesBuffered}B silence)")
|
||||||
|
} catch (e: Exception) {
|
||||||
|
Log.w(TAG, "play() sofort failed: ${e.message}")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Idle-Cutoff: wenn endRequested NICHT kam aber lange nichts mehr
|
||||||
|
// reinkommt, brechen wir ab (Bridge-Crash, verlorener final).
|
||||||
|
// 120s damit lange F5-TTS-Render-Pausen zwischen Saetzen (z.B. bei
|
||||||
|
// Modell-Wechsel oder kalter GPU) nicht den Stream abreissen.
|
||||||
|
var idleMs = 0L
|
||||||
|
val maxIdleMs = 120_000L
|
||||||
|
// Zielpufferfuellung — unter diesem Wasserstand fuettern wir
|
||||||
|
// Stille rein damit AudioTrack nicht underrunt waehrend die
|
||||||
|
// Bridge den naechsten Satz rendert. Spotify/YouTube reagieren
|
||||||
|
// sonst mit eigenmaechtiger Wiederaufnahme nach ~10s Stille.
|
||||||
|
val underrunGuardFrames = sampleRate / 10 // ~100ms
|
||||||
|
val silenceFillFrames = sampleRate / 20 // ~50ms pro Refill
|
||||||
|
|
||||||
|
mainLoop@ while (!writerShouldStop) {
|
||||||
|
val data = queue.poll(50, java.util.concurrent.TimeUnit.MILLISECONDS)
|
||||||
|
if (data == null) {
|
||||||
|
if (endRequested) {
|
||||||
|
// Falls play() noch gar nicht lief (Stream ohne data
|
||||||
|
// ueberhaupt — sehr seltene Edge-Case): jetzt anstossen
|
||||||
|
// damit das finally{}-Wait nicht endlos blockt.
|
||||||
|
if (!playbackStarted) {
|
||||||
|
try { t.play(); playbackStarted = true } catch (_: Exception) {}
|
||||||
|
}
|
||||||
|
break@mainLoop
|
||||||
|
}
|
||||||
|
// Underrun-Schutz: Stille reinfuettern wenn der AudioTrack-
|
||||||
|
// Puffer leerzulaufen droht. Spotify resumed sonst nach
|
||||||
|
// ~10s Pause auf eigene Faust, obwohl wir den Fokus halten.
|
||||||
|
if (playbackStarted) {
|
||||||
|
val framesWritten = bytesBuffered / streamBytesPerFrame
|
||||||
|
val framesPlayed = t.playbackHeadPosition.toLong()
|
||||||
|
val framesInBuffer = framesWritten - framesPlayed
|
||||||
|
if (framesInBuffer < underrunGuardFrames) {
|
||||||
|
val fillBytes = silenceFillFrames * streamBytesPerFrame
|
||||||
|
val silence = ByteArray(fillBytes)
|
||||||
|
var silOff = 0
|
||||||
|
while (silOff < silence.size && !writerShouldStop) {
|
||||||
|
val w = t.write(silence, silOff, silence.size - silOff)
|
||||||
|
if (w <= 0) break
|
||||||
|
silOff += w
|
||||||
|
}
|
||||||
|
bytesBuffered += silence.size
|
||||||
|
}
|
||||||
|
}
|
||||||
|
idleMs += 50L
|
||||||
|
if (idleMs >= maxIdleMs) {
|
||||||
|
Log.w(TAG, "Idle-Cutoff: ${maxIdleMs}ms keine Daten — Stream wird beendet")
|
||||||
|
break@mainLoop
|
||||||
|
}
|
||||||
|
continue@mainLoop
|
||||||
|
}
|
||||||
|
idleMs = 0L
|
||||||
|
|
||||||
|
// play() beim ALLERERSTEN data-chunk aufrufen — egal wie wenig
|
||||||
|
// Daten da sind. Sonst stallt AudioTrack auf OnePlus A12 wenn
|
||||||
|
// play() erst gerufen wird nachdem der Buffer komplett gefuellt
|
||||||
|
// ist. Pre-Roll als "Vorrat aufbauen" passiert dann waehrend
|
||||||
|
// der Track schon spielt — Underrun-Schutz fuettert ggf. Stille.
|
||||||
|
if (!playbackStarted) {
|
||||||
|
try {
|
||||||
|
t.play()
|
||||||
|
playbackStarted = true
|
||||||
|
Log.i(TAG, "Playback gestartet beim 1. Chunk (${bytesBuffered}B leading + ${data.size}B data)")
|
||||||
|
} catch (e: Exception) {
|
||||||
|
Log.w(TAG, "play() failed: ${e.message}")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var offset = 0
|
||||||
|
while (offset < data.size && !writerShouldStop) {
|
||||||
|
val written = t.write(data, offset, data.size - offset)
|
||||||
|
if (written <= 0) break
|
||||||
|
offset += written
|
||||||
|
}
|
||||||
|
bytesBuffered += data.size
|
||||||
|
}
|
||||||
|
// Trailing-Silence damit die letzten echten Samples garantiert
|
||||||
|
// durch das Hardware-Buffering kommen bevor stop() sie abschneidet
|
||||||
|
val trailingBytes = ((sampleRate * channels * 2) * TRAILING_SILENCE_SECONDS).toInt() and 0x7FFFFFFE
|
||||||
|
if (trailingBytes > 0 && !writerShouldStop) {
|
||||||
|
val silence = ByteArray(trailingBytes)
|
||||||
|
var silOff = 0
|
||||||
|
while (silOff < silence.size && !writerShouldStop) {
|
||||||
|
val w = t.write(silence, silOff, silence.size - silOff)
|
||||||
|
if (w <= 0) break
|
||||||
|
silOff += w
|
||||||
|
}
|
||||||
|
bytesBuffered += silence.size
|
||||||
|
}
|
||||||
|
} catch (e: Exception) {
|
||||||
|
Log.w(TAG, "Writer-Thread Fehler: ${e.message}")
|
||||||
|
} finally {
|
||||||
|
// Warten bis alle geschriebenen Samples tatsaechlich abgespielt sind,
|
||||||
|
// sonst cuttet t.release() die letzten Sekunden ab.
|
||||||
|
try {
|
||||||
|
val totalFrames = (bytesBuffered / streamBytesPerFrame).toInt()
|
||||||
|
var lastPos = -1
|
||||||
|
var stalledCount = 0
|
||||||
|
var retried = false
|
||||||
|
while (!writerShouldStop) {
|
||||||
|
val pos = t.playbackHeadPosition
|
||||||
|
if (pos >= totalFrames) break
|
||||||
|
if (pos == lastPos) {
|
||||||
|
stalledCount++
|
||||||
|
// Nach 500ms Stillstand: AudioTrack-Quirk auf manchen
|
||||||
|
// Geraeten (OnePlus A12) — play() nochmal anstossen.
|
||||||
|
if (stalledCount == 10 && pos == 0 && !retried) {
|
||||||
|
retried = true
|
||||||
|
Log.w(TAG, "playback nicht angefahren — retry play()")
|
||||||
|
try { t.play() } catch (e: Exception) {
|
||||||
|
Log.w(TAG, "retry play() failed: ${e.message}")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (stalledCount > 40) {
|
||||||
|
Log.w(TAG, "playback stalled at $pos/$totalFrames — give up")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
stalledCount = 0
|
||||||
|
lastPos = pos
|
||||||
|
}
|
||||||
|
Thread.sleep(50)
|
||||||
|
}
|
||||||
|
Log.i(TAG, "Playback fertig: frames=$totalFrames pos=${t.playbackHeadPosition}")
|
||||||
|
} catch (_: Exception) {}
|
||||||
|
try { t.stop() } catch (_: Exception) {}
|
||||||
|
try { t.release() } catch (_: Exception) {}
|
||||||
|
// RN-Event: AudioTrack ist wirklich durch (alle Samples gespielt).
|
||||||
|
// JS released erst JETZT den AudioFocus — sonst spielt Spotify
|
||||||
|
// beim end()-Cap waehrend ARIA noch redet (15s+ je nach Buffer).
|
||||||
|
try {
|
||||||
|
val params = Arguments.createMap()
|
||||||
|
reactApplicationContext
|
||||||
|
.getJSModule(DeviceEventManagerModule.RCTDeviceEventEmitter::class.java)
|
||||||
|
.emit("PcmPlaybackFinished", params)
|
||||||
|
} catch (e: Exception) {
|
||||||
|
Log.w(TAG, "PlaybackFinished emit failed: ${e.message}")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, "PcmStreamWriter").apply { start() }
|
||||||
|
|
||||||
|
Log.i(TAG, "Stream gestartet: ${sampleRate}Hz ch=$channels buf=${bufferSize}B preroll=${prerollBytes}B (${prerollSec}s)")
|
||||||
|
promise.resolve(true)
|
||||||
|
} catch (e: Exception) {
|
||||||
|
Log.e(TAG, "start fehlgeschlagen", e)
|
||||||
|
promise.reject("START_FAILED", e.message, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@ReactMethod
|
||||||
|
fun writeChunk(base64Pcm: String, promise: Promise) {
|
||||||
|
try {
|
||||||
|
if (base64Pcm.isEmpty()) {
|
||||||
|
promise.resolve(true)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
val bytes = Base64.decode(base64Pcm, Base64.DEFAULT)
|
||||||
|
queue.put(bytes)
|
||||||
|
promise.resolve(true)
|
||||||
|
} catch (e: Exception) {
|
||||||
|
promise.reject("WRITE_FAILED", e.message, e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Signalisiert: keine weiteren Chunks. Writer spielt aus, dann stoppt.
|
||||||
|
* Das Promise resolved erst wenn der Writer-Thread fertig ist —
|
||||||
|
* wichtig damit der Aufrufer den AudioFocus erst NACH dem letzten
|
||||||
|
* abgespielten Sample wieder freigibt (sonst dreht Spotify hoch
|
||||||
|
* waehrend das Pre-Roll noch ausspielt).
|
||||||
|
*/
|
||||||
|
@ReactMethod
|
||||||
|
fun end(promise: Promise) {
|
||||||
|
endRequested = true
|
||||||
|
val t = writerThread
|
||||||
|
if (t == null || !t.isAlive) {
|
||||||
|
promise.resolve(true)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Im Hintergrund auf den Writer warten — kein Threading-Block fuer JS-Bridge
|
||||||
|
Thread({
|
||||||
|
try {
|
||||||
|
t.join(15_000) // hartes Cap, falls Writer haengt
|
||||||
|
} catch (_: InterruptedException) {}
|
||||||
|
promise.resolve(true)
|
||||||
|
}, "PcmStreamEndWaiter").start()
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Harter Stop (Cancel) — Queue verwerfen. */
|
||||||
|
@ReactMethod
|
||||||
|
fun stop(promise: Promise) {
|
||||||
|
stopInternal()
|
||||||
|
promise.resolve(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
@ReactMethod fun addListener(eventName: String) {}
|
||||||
|
@ReactMethod fun removeListeners(count: Int) {}
|
||||||
|
|
||||||
|
private fun stopInternal() {
|
||||||
|
writerShouldStop = true
|
||||||
|
endRequested = true
|
||||||
|
queue.clear()
|
||||||
|
writerThread?.interrupt()
|
||||||
|
writerThread = null
|
||||||
|
val t = track
|
||||||
|
if (t != null) {
|
||||||
|
try { t.stop() } catch (_: Exception) {}
|
||||||
|
try { t.release() } catch (_: Exception) {}
|
||||||
|
}
|
||||||
|
track = null
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun onCatalystInstanceDestroy() {
|
||||||
|
stopInternal()
|
||||||
|
super.onCatalystInstanceDestroy()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,16 @@
|
|||||||
|
package com.ariacockpit
|
||||||
|
|
||||||
|
import com.facebook.react.ReactPackage
|
||||||
|
import com.facebook.react.bridge.NativeModule
|
||||||
|
import com.facebook.react.bridge.ReactApplicationContext
|
||||||
|
import com.facebook.react.uimanager.ViewManager
|
||||||
|
|
||||||
|
class PcmStreamPlayerPackage : ReactPackage {
|
||||||
|
override fun createNativeModules(reactContext: ReactApplicationContext): List<NativeModule> {
|
||||||
|
return listOf(PcmStreamPlayerModule(reactContext))
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun createViewManagers(reactContext: ReactApplicationContext): List<ViewManager<*, *>> {
|
||||||
|
return emptyList()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,126 @@
|
|||||||
|
package com.ariacockpit
|
||||||
|
|
||||||
|
import android.Manifest
|
||||||
|
import android.content.Context
|
||||||
|
import android.content.pm.PackageManager
|
||||||
|
import android.os.Build
|
||||||
|
import android.telephony.PhoneStateListener
|
||||||
|
import android.telephony.TelephonyCallback
|
||||||
|
import android.telephony.TelephonyManager
|
||||||
|
import android.util.Log
|
||||||
|
import androidx.core.content.ContextCompat
|
||||||
|
import com.facebook.react.bridge.Arguments
|
||||||
|
import com.facebook.react.bridge.Promise
|
||||||
|
import com.facebook.react.bridge.ReactApplicationContext
|
||||||
|
import com.facebook.react.bridge.ReactContextBaseJavaModule
|
||||||
|
import com.facebook.react.bridge.ReactMethod
|
||||||
|
import com.facebook.react.modules.core.DeviceEventManagerModule
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Lauscht auf Anruf-Statusaenderungen — wenn das Telefon klingelt oder ein
|
||||||
|
* Anruf laeuft, sendet das Modul ein "PhoneCallStateChanged"-Event an JS.
|
||||||
|
*
|
||||||
|
* JS-Side stoppt dann die TTS-Wiedergabe damit ARIA nicht mitten ins Gespraech
|
||||||
|
* weiterredet. Ohne READ_PHONE_STATE-Permission failt start() leise — der Rest
|
||||||
|
* der App funktioniert wie bisher.
|
||||||
|
*
|
||||||
|
* State-Strings: "idle" | "ringing" | "offhook"
|
||||||
|
*/
|
||||||
|
class PhoneCallModule(reactContext: ReactApplicationContext) : ReactContextBaseJavaModule(reactContext) {
|
||||||
|
override fun getName() = "PhoneCall"
|
||||||
|
|
||||||
|
companion object { private const val TAG = "PhoneCall" }
|
||||||
|
|
||||||
|
private var telephonyManager: TelephonyManager? = null
|
||||||
|
private var legacyListener: PhoneStateListener? = null
|
||||||
|
private var modernCallback: Any? = null // TelephonyCallback ab API 31
|
||||||
|
private var lastState: Int = TelephonyManager.CALL_STATE_IDLE
|
||||||
|
|
||||||
|
@ReactMethod
|
||||||
|
fun start(promise: Promise) {
|
||||||
|
try {
|
||||||
|
val perm = ContextCompat.checkSelfPermission(reactApplicationContext, Manifest.permission.READ_PHONE_STATE)
|
||||||
|
if (perm != PackageManager.PERMISSION_GRANTED) {
|
||||||
|
Log.w(TAG, "READ_PHONE_STATE Permission fehlt — Anruf-Erkennung inaktiv")
|
||||||
|
promise.resolve(false)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
val tm = reactApplicationContext.getSystemService(Context.TELEPHONY_SERVICE) as? TelephonyManager
|
||||||
|
if (tm == null) {
|
||||||
|
Log.w(TAG, "TelephonyManager nicht verfuegbar")
|
||||||
|
promise.resolve(false)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
telephonyManager = tm
|
||||||
|
|
||||||
|
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.S) {
|
||||||
|
val cb = object : TelephonyCallback(), TelephonyCallback.CallStateListener {
|
||||||
|
override fun onCallStateChanged(state: Int) {
|
||||||
|
handleStateChange(state)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tm.registerTelephonyCallback(reactApplicationContext.mainExecutor, cb)
|
||||||
|
modernCallback = cb
|
||||||
|
} else {
|
||||||
|
@Suppress("DEPRECATION")
|
||||||
|
val l = object : PhoneStateListener() {
|
||||||
|
override fun onCallStateChanged(state: Int, phoneNumber: String?) {
|
||||||
|
handleStateChange(state)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@Suppress("DEPRECATION")
|
||||||
|
tm.listen(l, PhoneStateListener.LISTEN_CALL_STATE)
|
||||||
|
legacyListener = l
|
||||||
|
}
|
||||||
|
Log.i(TAG, "PhoneCall-Listener aktiv")
|
||||||
|
promise.resolve(true)
|
||||||
|
} catch (e: Exception) {
|
||||||
|
Log.e(TAG, "start fehlgeschlagen", e)
|
||||||
|
promise.reject("START_FAILED", e.message ?: "Unbekannter Fehler", e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@ReactMethod
|
||||||
|
fun stop(promise: Promise) {
|
||||||
|
try {
|
||||||
|
val tm = telephonyManager
|
||||||
|
if (tm != null) {
|
||||||
|
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.S) {
|
||||||
|
(modernCallback as? TelephonyCallback)?.let { tm.unregisterTelephonyCallback(it) }
|
||||||
|
modernCallback = null
|
||||||
|
} else {
|
||||||
|
@Suppress("DEPRECATION")
|
||||||
|
legacyListener?.let { tm.listen(it, PhoneStateListener.LISTEN_NONE) }
|
||||||
|
legacyListener = null
|
||||||
|
}
|
||||||
|
}
|
||||||
|
telephonyManager = null
|
||||||
|
lastState = TelephonyManager.CALL_STATE_IDLE
|
||||||
|
promise.resolve(true)
|
||||||
|
} catch (e: Exception) {
|
||||||
|
promise.reject("STOP_FAILED", e.message ?: "")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private fun handleStateChange(state: Int) {
|
||||||
|
if (state == lastState) return
|
||||||
|
lastState = state
|
||||||
|
val name = when (state) {
|
||||||
|
TelephonyManager.CALL_STATE_RINGING -> "ringing"
|
||||||
|
TelephonyManager.CALL_STATE_OFFHOOK -> "offhook"
|
||||||
|
TelephonyManager.CALL_STATE_IDLE -> "idle"
|
||||||
|
else -> return
|
||||||
|
}
|
||||||
|
Log.i(TAG, "Telefon-State: $name")
|
||||||
|
val params = Arguments.createMap().apply { putString("state", name) }
|
||||||
|
try {
|
||||||
|
reactApplicationContext.getJSModule(DeviceEventManagerModule.RCTDeviceEventEmitter::class.java)
|
||||||
|
.emit("PhoneCallStateChanged", params)
|
||||||
|
} catch (e: Exception) {
|
||||||
|
Log.w(TAG, "Event-emit fehlgeschlagen: ${e.message}")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@ReactMethod fun addListener(eventName: String) {}
|
||||||
|
@ReactMethod fun removeListeners(count: Int) {}
|
||||||
|
}
|
||||||
@@ -0,0 +1,16 @@
|
|||||||
|
package com.ariacockpit
|
||||||
|
|
||||||
|
import com.facebook.react.ReactPackage
|
||||||
|
import com.facebook.react.bridge.NativeModule
|
||||||
|
import com.facebook.react.bridge.ReactApplicationContext
|
||||||
|
import com.facebook.react.uimanager.ViewManager
|
||||||
|
|
||||||
|
class PhoneCallPackage : ReactPackage {
|
||||||
|
override fun createNativeModules(reactContext: ReactApplicationContext): List<NativeModule> {
|
||||||
|
return listOf(PhoneCallModule(reactContext))
|
||||||
|
}
|
||||||
|
|
||||||
|
override fun createViewManagers(reactContext: ReactApplicationContext): List<ViewManager<*, *>> {
|
||||||
|
return emptyList()
|
||||||
|
}
|
||||||
|
}
|
||||||
Binary file not shown.
@@ -0,0 +1,8 @@
|
|||||||
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
|
<paths>
|
||||||
|
<cache-path name="cache" path="." />
|
||||||
|
<files-path name="files" path="." />
|
||||||
|
<external-path name="external" path="." />
|
||||||
|
<external-files-path name="external_files" path="." />
|
||||||
|
<external-cache-path name="external_cache" path="." />
|
||||||
|
</paths>
|
||||||
@@ -1,7 +1,9 @@
|
|||||||
buildscript {
|
buildscript {
|
||||||
ext {
|
ext {
|
||||||
buildToolsVersion = "34.0.0"
|
buildToolsVersion = "34.0.0"
|
||||||
minSdkVersion = 23
|
// 24 = Android 7.0 (Nougat). Verlangt von Porcupine (Picovoice).
|
||||||
|
// Realistisch eh das Minimum: alles unter 7.0 hat <1% Marktanteil.
|
||||||
|
minSdkVersion = 24
|
||||||
compileSdkVersion = 34
|
compileSdkVersion = 34
|
||||||
targetSdkVersion = 34
|
targetSdkVersion = 34
|
||||||
ndkVersion = "25.1.8937393"
|
ndkVersion = "25.1.8937393"
|
||||||
|
|||||||
+15
-2
@@ -167,10 +167,23 @@ export CI=true
|
|||||||
|
|
||||||
if [ "$MODE" = "debug" ]; then
|
if [ "$MODE" = "debug" ]; then
|
||||||
./gradlew assembleDebug
|
./gradlew assembleDebug
|
||||||
APK_PATH="app/build/outputs/apk/debug/app-debug.apk"
|
OUT_DIR="app/build/outputs/apk/debug"
|
||||||
else
|
else
|
||||||
./gradlew assembleRelease
|
./gradlew assembleRelease
|
||||||
APK_PATH="app/build/outputs/apk/release/app-release.apk"
|
OUT_DIR="app/build/outputs/apk/release"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Mit ABI-Splits heisst die APK z.B. app-arm64-v8a-release.apk statt
|
||||||
|
# app-release.apk. arm64-v8a-Variante zuerst probieren (das ist unser
|
||||||
|
# Standard), Universal-APK als Fallback falls Splits deaktiviert sind.
|
||||||
|
if [ -f "$OUT_DIR/app-arm64-v8a-${MODE}.apk" ]; then
|
||||||
|
APK_PATH="$OUT_DIR/app-arm64-v8a-${MODE}.apk"
|
||||||
|
elif [ -f "$OUT_DIR/app-${MODE}.apk" ]; then
|
||||||
|
APK_PATH="$OUT_DIR/app-${MODE}.apk"
|
||||||
|
else
|
||||||
|
echo -e "${RED}Keine passende APK in $OUT_DIR gefunden${NC}"
|
||||||
|
cd ..
|
||||||
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cd ..
|
cd ..
|
||||||
|
|||||||
Generated
-12840
File diff suppressed because it is too large
Load Diff
+18
-18
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "aria-cockpit",
|
"name": "aria-cockpit",
|
||||||
"version": "0.1.0",
|
"version": "0.1.2.4",
|
||||||
"private": true,
|
"private": true,
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"android": "react-native run-android",
|
"android": "react-native run-android",
|
||||||
@@ -10,32 +10,32 @@
|
|||||||
"build:apk": "cd android && ./gradlew assembleRelease"
|
"build:apk": "cd android && ./gradlew assembleRelease"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
|
"@react-native-async-storage/async-storage": "^1.21.0",
|
||||||
|
"@react-native-community/geolocation": "^3.2.1",
|
||||||
|
"@react-navigation/bottom-tabs": "^6.5.11",
|
||||||
|
"@react-navigation/native": "^6.1.9",
|
||||||
"react": "18.2.0",
|
"react": "18.2.0",
|
||||||
"react-native": "0.73.4",
|
"react-native": "0.73.4",
|
||||||
"@react-navigation/native": "^6.1.9",
|
"react-native-audio-recorder-player": "^3.6.7",
|
||||||
"@react-navigation/bottom-tabs": "^6.5.11",
|
"react-native-camera-kit": "^13.0.0",
|
||||||
"react-native-screens": "3.27.0",
|
|
||||||
"react-native-safe-area-context": "^4.8.2",
|
|
||||||
"react-native-document-picker": "^9.1.1",
|
"react-native-document-picker": "^9.1.1",
|
||||||
"react-native-sound": "^0.11.2",
|
"react-native-fs": "^2.20.0",
|
||||||
"@react-native-community/geolocation": "^3.2.1",
|
|
||||||
"react-native-image-picker": "^7.1.0",
|
"react-native-image-picker": "^7.1.0",
|
||||||
"react-native-permissions": "^4.1.4",
|
"react-native-permissions": "^4.1.4",
|
||||||
"react-native-camera-kit": "^13.0.0",
|
"react-native-safe-area-context": "^4.8.2",
|
||||||
"@react-native-async-storage/async-storage": "^1.21.0",
|
"react-native-screens": "3.27.0",
|
||||||
"react-native-fs": "^2.20.0",
|
"react-native-sound": "^0.11.2",
|
||||||
"react-native-audio-recorder-player": "^3.6.7",
|
"react-native-svg": "^14.1.0"
|
||||||
"react-native-live-audio-stream": "^1.1.1"
|
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"typescript": "^5.3.3",
|
"@react-native/eslint-config": "^0.73.2",
|
||||||
|
"@react-native/metro-config": "^0.73.5",
|
||||||
|
"@react-native/typescript-config": "^0.73.1",
|
||||||
|
"@types/jest": "^29.5.11",
|
||||||
"@types/react": "^18.2.48",
|
"@types/react": "^18.2.48",
|
||||||
"@types/react-native": "^0.73.0",
|
"@types/react-native": "^0.73.0",
|
||||||
"@react-native/eslint-config": "^0.73.2",
|
|
||||||
"@react-native/typescript-config": "^0.73.1",
|
|
||||||
"@react-native/metro-config": "^0.73.5",
|
|
||||||
"metro-react-native-babel-preset": "^0.77.0",
|
|
||||||
"jest": "^29.7.0",
|
"jest": "^29.7.0",
|
||||||
"@types/jest": "^29.5.11"
|
"metro-react-native-babel-preset": "^0.77.0",
|
||||||
|
"typescript": "^5.3.3"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Binary file not shown.
@@ -17,6 +17,7 @@ import {
|
|||||||
import DocumentPicker, {
|
import DocumentPicker, {
|
||||||
DocumentPickerResponse,
|
DocumentPickerResponse,
|
||||||
} from 'react-native-document-picker';
|
} from 'react-native-document-picker';
|
||||||
|
import RNFS from 'react-native-fs';
|
||||||
|
|
||||||
// --- Typen ---
|
// --- Typen ---
|
||||||
|
|
||||||
@@ -74,15 +75,17 @@ const FileUpload: React.FC<FileUploadProps> = ({ onFileSelected, onCancel }) =>
|
|||||||
|
|
||||||
setLoading(true);
|
setLoading(true);
|
||||||
try {
|
try {
|
||||||
// In Produktion: Datei lesen und zu Base64 konvertieren
|
// Datei lesen und zu Base64 konvertieren
|
||||||
// const base64 = await RNFS.readFile(selectedFile.fileCopyUri || selectedFile.uri, 'base64');
|
const filePath = selectedFile.fileCopyUri || selectedFile.uri;
|
||||||
const base64Placeholder = '';
|
// URI-Schema entfernen fuer RNFS (file:// → absoluter Pfad)
|
||||||
|
const cleanPath = filePath.replace('file://', '');
|
||||||
|
const base64 = await RNFS.readFile(cleanPath, 'base64');
|
||||||
|
|
||||||
const fileData: FileData = {
|
const fileData: FileData = {
|
||||||
name: selectedFile.name || 'unbenannt',
|
name: selectedFile.name || 'unbenannt',
|
||||||
type: selectedFile.type || 'application/octet-stream',
|
type: selectedFile.type || 'application/octet-stream',
|
||||||
size: selectedFile.size || 0,
|
size: selectedFile.size || 0,
|
||||||
base64: base64Placeholder,
|
base64,
|
||||||
uri: selectedFile.uri,
|
uri: selectedFile.uri,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,88 @@
|
|||||||
|
/**
|
||||||
|
* MessageText — selektierbarer Chat-Text mit Android-Auto-Linkifizierung,
|
||||||
|
* plus Inline-Image-Rendering wenn der Text Bild-URLs enthaelt.
|
||||||
|
*
|
||||||
|
* - Markdown-Syntax `` und plain `https://...image.png` werden
|
||||||
|
* erkannt — die URL bleibt im Text sichtbar (klickbar via Linkify),
|
||||||
|
* zusaetzlich wird das Bild als <Image> oder <SvgUri> drunter gerendert.
|
||||||
|
* - Wir nutzen Androids dataDetectorType="all" (System macht Phone/URL/Email
|
||||||
|
* automatisch klickbar) und ein einzelnes <Text selectable> ohne nested
|
||||||
|
* <Text> mit eigenem onPress — Nested Text mit onPress fing die Long-Press-
|
||||||
|
* Geste ab, damit war Markieren+Kopieren defekt.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import React, { useEffect, useState } from 'react';
|
||||||
|
import { View, Text, Image, TextStyle, StyleProp } from 'react-native';
|
||||||
|
import { SvgUri } from 'react-native-svg';
|
||||||
|
|
||||||
|
interface Props {
|
||||||
|
text: string;
|
||||||
|
style?: StyleProp<TextStyle>;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bild-URL-Pattern: http(s)://... endend auf gaengige Bild-Endungen.
|
||||||
|
const IMG_URL_RE = /https?:\/\/[^\s)<"']+\.(?:jpe?g|png|gif|webp|bmp|ico|svg)(?:\?[^\s)<"']*)?/gi;
|
||||||
|
|
||||||
|
function extractImageUrls(text: string): string[] {
|
||||||
|
const urls = new Set<string>();
|
||||||
|
const matches = text.match(IMG_URL_RE);
|
||||||
|
if (matches) matches.forEach(u => urls.add(u));
|
||||||
|
return Array.from(urls);
|
||||||
|
}
|
||||||
|
|
||||||
|
const SVG_RE = /\.svg(?:\?|$)/i;
|
||||||
|
|
||||||
|
/** Image mit dynamischer Aspect-Ratio aus echten Bilddimensionen.
|
||||||
|
* SVGs werden ueber react-native-svg gerendert (kein Image.getSize). */
|
||||||
|
const InlineImage: React.FC<{ uri: string }> = ({ uri }) => {
|
||||||
|
const isSvg = SVG_RE.test(uri);
|
||||||
|
const [aspectRatio, setAspectRatio] = useState<number>(1);
|
||||||
|
const [failed, setFailed] = useState(false);
|
||||||
|
useEffect(() => {
|
||||||
|
if (isSvg) return; // Image.getSize geht fuer SVG nicht
|
||||||
|
let cancelled = false;
|
||||||
|
Image.getSize(
|
||||||
|
uri,
|
||||||
|
(w, h) => { if (!cancelled && w > 0 && h > 0) setAspectRatio(Math.max(0.5, Math.min(2.5, w / h))); },
|
||||||
|
() => { if (!cancelled) setFailed(true); },
|
||||||
|
);
|
||||||
|
return () => { cancelled = true; };
|
||||||
|
}, [uri, isSvg]);
|
||||||
|
if (failed) return null;
|
||||||
|
if (isSvg) {
|
||||||
|
return (
|
||||||
|
<View style={{ marginTop: 8, width: 260, height: 260, backgroundColor: '#0D0D1A', borderRadius: 8, alignItems: 'center', justifyContent: 'center' }}>
|
||||||
|
<SvgUri uri={uri} width="100%" height="100%" onError={() => setFailed(true)} />
|
||||||
|
</View>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return (
|
||||||
|
<Image
|
||||||
|
source={{ uri }}
|
||||||
|
style={{ width: 260, aspectRatio, borderRadius: 8, marginTop: 8, backgroundColor: '#0D0D1A' }}
|
||||||
|
resizeMode="cover"
|
||||||
|
onError={() => setFailed(true)}
|
||||||
|
/>
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
const MessageText: React.FC<Props> = ({ text, style }) => {
|
||||||
|
const imageUrls = extractImageUrls(text || '');
|
||||||
|
if (imageUrls.length === 0) {
|
||||||
|
return (
|
||||||
|
<Text style={style} selectable dataDetectorType="all">
|
||||||
|
{text}
|
||||||
|
</Text>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return (
|
||||||
|
<View>
|
||||||
|
<Text style={style} selectable dataDetectorType="all">
|
||||||
|
{text}
|
||||||
|
</Text>
|
||||||
|
{imageUrls.map(u => <InlineImage key={u} uri={u} />)}
|
||||||
|
</View>
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
export default MessageText;
|
||||||
@@ -44,7 +44,6 @@ const VoiceButton: React.FC<VoiceButtonProps> = ({
|
|||||||
const [meterDb, setMeterDb] = useState(-160);
|
const [meterDb, setMeterDb] = useState(-160);
|
||||||
const pulseAnim = useRef(new Animated.Value(1)).current;
|
const pulseAnim = useRef(new Animated.Value(1)).current;
|
||||||
const durationTimer = useRef<ReturnType<typeof setInterval> | null>(null);
|
const durationTimer = useRef<ReturnType<typeof setInterval> | null>(null);
|
||||||
const isLongPress = useRef(false);
|
|
||||||
|
|
||||||
// Puls-Animation starten/stoppen
|
// Puls-Animation starten/stoppen
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
@@ -93,67 +92,63 @@ const VoiceButton: React.FC<VoiceButtonProps> = ({
|
|||||||
}
|
}
|
||||||
}, [isRecording]);
|
}, [isRecording]);
|
||||||
|
|
||||||
// VAD Silence Callback — Auto-Stop
|
// VAD Silence Callback — Auto-Stop.
|
||||||
|
// WICHTIG: NICHT auf isRecording prüfen (Closure ist stale) — stattdessen
|
||||||
|
// audioService selber fragen. Empty deps → Listener wird EINMAL registriert.
|
||||||
|
// audioService garantiert jetzt dass der Callback pro Aufnahme nur einmal
|
||||||
|
// feuert (silenceFired-Latch).
|
||||||
|
const onCompleteRef = useRef(onRecordingComplete);
|
||||||
|
useEffect(() => { onCompleteRef.current = onRecordingComplete; }, [onRecordingComplete]);
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
const unsubSilence = audioService.onSilenceDetected(async () => {
|
const unsubSilence = audioService.onSilenceDetected(async () => {
|
||||||
if (!isRecording) return;
|
if (audioService.getRecordingState() !== 'recording') return;
|
||||||
setIsRecording(false);
|
|
||||||
const result = await audioService.stopRecording();
|
const result = await audioService.stopRecording();
|
||||||
|
setIsRecording(false);
|
||||||
if (result && result.durationMs > 500) {
|
if (result && result.durationMs > 500) {
|
||||||
onRecordingComplete(result);
|
onCompleteRef.current(result);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
return unsubSilence;
|
return unsubSilence;
|
||||||
}, [isRecording, onRecordingComplete]);
|
}, []);
|
||||||
|
|
||||||
// Auto-Start fuer Wake Word (extern getriggert)
|
// Auto-Start fuer Wake Word (extern getriggert)
|
||||||
const startAutoRecording = useCallback(async () => {
|
const startAutoRecording = useCallback(async () => {
|
||||||
if (disabled || isRecording) return;
|
if (disabled || isRecording) return;
|
||||||
const started = await audioService.startRecording(true); // autoStop = true
|
const started = await audioService.startRecording(true); // autoStop = true
|
||||||
if (started) {
|
if (started) {
|
||||||
isLongPress.current = false;
|
|
||||||
setIsRecording(true);
|
setIsRecording(true);
|
||||||
}
|
}
|
||||||
}, [disabled, isRecording]);
|
}, [disabled, isRecording]);
|
||||||
|
|
||||||
// Push-to-Talk: Lang druecken
|
// Tap-to-Talk: Einmal tippen startet mit Auto-Stop.
|
||||||
const handlePressIn = async () => {
|
// Guard gegen Doppel-Tap während asyncer Start/Stop.
|
||||||
if (disabled || isRecording) return;
|
const tapBusy = useRef(false);
|
||||||
isLongPress.current = true;
|
|
||||||
const started = await audioService.startRecording(false); // kein autoStop
|
|
||||||
if (started) {
|
|
||||||
setIsRecording(true);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const handlePressOut = async () => {
|
|
||||||
if (!isRecording || !isLongPress.current) return;
|
|
||||||
isLongPress.current = false;
|
|
||||||
setIsRecording(false);
|
|
||||||
const result = await audioService.stopRecording();
|
|
||||||
if (result && result.durationMs > 300) {
|
|
||||||
onRecordingComplete(result);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Tap-to-Talk: Einmal tippen startet mit Auto-Stop
|
|
||||||
const handleTap = async () => {
|
const handleTap = async () => {
|
||||||
if (disabled) return;
|
if (disabled || tapBusy.current) return;
|
||||||
if (isRecording) {
|
tapBusy.current = true;
|
||||||
|
try {
|
||||||
|
// Fragen WIR den Service, nicht den React-State (Closure kann stale sein)
|
||||||
|
const svcState = audioService.getRecordingState();
|
||||||
|
if (svcState === 'recording') {
|
||||||
// Aufnahme manuell stoppen
|
// Aufnahme manuell stoppen
|
||||||
setIsRecording(false);
|
|
||||||
const result = await audioService.stopRecording();
|
const result = await audioService.stopRecording();
|
||||||
|
setIsRecording(false);
|
||||||
if (result && result.durationMs > 300) {
|
if (result && result.durationMs > 300) {
|
||||||
onRecordingComplete(result);
|
onRecordingComplete(result);
|
||||||
}
|
}
|
||||||
} else {
|
} else if (svcState === 'idle') {
|
||||||
// Aufnahme mit Auto-Stop starten
|
// Aufnahme mit Auto-Stop starten
|
||||||
const started = await audioService.startRecording(true);
|
const started = await audioService.startRecording(true);
|
||||||
if (started) {
|
if (started) {
|
||||||
isLongPress.current = false;
|
|
||||||
setIsRecording(true);
|
setIsRecording(true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// svcState === 'processing': Stopp in progress — nichts tun, User
|
||||||
|
// muss nochmal tippen wenn fertig. Aber wir blockieren mit tapBusy
|
||||||
|
// kurz damit der User's UI-Feedback synchron bleibt.
|
||||||
|
} finally {
|
||||||
|
tapBusy.current = false;
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Expose startAutoRecording via ref fuer Wake Word
|
// Expose startAutoRecording via ref fuer Wake Word
|
||||||
@@ -183,10 +178,6 @@ const VoiceButton: React.FC<VoiceButtonProps> = ({
|
|||||||
isRecording && styles.buttonOuterRecording,
|
isRecording && styles.buttonOuterRecording,
|
||||||
{ transform: [{ scale: pulseAnim }] },
|
{ transform: [{ scale: pulseAnim }] },
|
||||||
]}
|
]}
|
||||||
onStartShouldSetResponder={() => true}
|
|
||||||
onResponderGrant={handlePressIn}
|
|
||||||
onResponderRelease={handlePressOut}
|
|
||||||
onResponderTerminate={handlePressOut}
|
|
||||||
>
|
>
|
||||||
<TouchableOpacity
|
<TouchableOpacity
|
||||||
activeOpacity={0.8}
|
activeOpacity={0.8}
|
||||||
|
|||||||
@@ -0,0 +1,362 @@
|
|||||||
|
/**
|
||||||
|
* VoiceCloneModal — Eigene Stimme aufnehmen und an XTTS uploaden.
|
||||||
|
*
|
||||||
|
* Flow:
|
||||||
|
* - Modal zeigt Vorlesetext (>30s Lesedauer) + Aufnahme-Button
|
||||||
|
* - Bei Aufnahme: max 30s, Fortschrittsbalken, Countdown
|
||||||
|
* - Bei Stop: Name abfragen, dann als voice_upload ueber RVS schicken
|
||||||
|
* - XTTS-Bridge speichert /voices/<name>.wav, antwortet mit xtts_voice_saved
|
||||||
|
*/
|
||||||
|
|
||||||
|
import React, { useCallback, useEffect, useRef, useState } from 'react';
|
||||||
|
import {
|
||||||
|
Modal,
|
||||||
|
View,
|
||||||
|
Text,
|
||||||
|
TouchableOpacity,
|
||||||
|
StyleSheet,
|
||||||
|
Alert,
|
||||||
|
ScrollView,
|
||||||
|
ActivityIndicator,
|
||||||
|
TextInput,
|
||||||
|
} from 'react-native';
|
||||||
|
import audioService from '../services/audio';
|
||||||
|
import rvs from '../services/rvs';
|
||||||
|
|
||||||
|
interface Props {
|
||||||
|
visible: boolean;
|
||||||
|
onClose: () => void;
|
||||||
|
}
|
||||||
|
|
||||||
|
const SAMPLE_TEXT = `Das ist meine eigene Stimme fuer ARIA. Ich lese jetzt einen laengeren Absatz laut vor, damit das Voice-Cloning eine gute Grundlage hat. Guten Tag, ich heisse Stefan und baue gerade mit grosser Begeisterung an meinem persoenlichen KI-Assistenten. Wir automatisieren Infrastruktur, managen Sessions und spielen mit Sprachsynthese. Die letzten Jahre habe ich viel gelernt, vor allem dass Geduld genauso wichtig ist wie Neugier. Hoert sich das jetzt an wie ich selbst? Wenn alles klappt, spricht ARIA bald mit dieser Stimme.`;
|
||||||
|
|
||||||
|
const MAX_DURATION_MS = 30000;
|
||||||
|
const TARGET_DURATION_MS = 15000;
|
||||||
|
|
||||||
|
const VoiceCloneModal: React.FC<Props> = ({ visible, onClose }) => {
|
||||||
|
const [recording, setRecording] = useState(false);
|
||||||
|
const [durationMs, setDurationMs] = useState(0);
|
||||||
|
const [voiceName, setVoiceName] = useState('');
|
||||||
|
const [processing, setProcessing] = useState(false);
|
||||||
|
const [recordingPath, setRecordingPath] = useState('');
|
||||||
|
const timerRef = useRef<ReturnType<typeof setInterval> | null>(null);
|
||||||
|
const startTimeRef = useRef<number>(0);
|
||||||
|
|
||||||
|
// Zustand zuruecksetzen wenn Modal schliesst/oeffnet
|
||||||
|
useEffect(() => {
|
||||||
|
if (!visible) {
|
||||||
|
setRecording(false);
|
||||||
|
setDurationMs(0);
|
||||||
|
setVoiceName('');
|
||||||
|
setProcessing(false);
|
||||||
|
setRecordingPath('');
|
||||||
|
if (timerRef.current) clearInterval(timerRef.current);
|
||||||
|
}
|
||||||
|
}, [visible]);
|
||||||
|
|
||||||
|
// Cleanup bei Unmount
|
||||||
|
useEffect(() => {
|
||||||
|
return () => {
|
||||||
|
if (timerRef.current) clearInterval(timerRef.current);
|
||||||
|
if (recording) audioService.stopRecording().catch(() => {});
|
||||||
|
};
|
||||||
|
}, [recording]);
|
||||||
|
|
||||||
|
const startRecording = useCallback(async () => {
|
||||||
|
// Frische Aufnahme
|
||||||
|
setDurationMs(0);
|
||||||
|
setRecordingPath('');
|
||||||
|
const ok = await audioService.startRecording(false);
|
||||||
|
if (!ok) {
|
||||||
|
Alert.alert('Fehler', 'Aufnahme konnte nicht gestartet werden (Mikrofon-Berechtigung?)');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
setRecording(true);
|
||||||
|
startTimeRef.current = Date.now();
|
||||||
|
timerRef.current = setInterval(async () => {
|
||||||
|
const elapsed = Date.now() - startTimeRef.current;
|
||||||
|
setDurationMs(elapsed);
|
||||||
|
if (elapsed >= MAX_DURATION_MS) {
|
||||||
|
await stopRecording();
|
||||||
|
}
|
||||||
|
}, 100);
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
const stopRecording = useCallback(async () => {
|
||||||
|
if (timerRef.current) {
|
||||||
|
clearInterval(timerRef.current);
|
||||||
|
timerRef.current = null;
|
||||||
|
}
|
||||||
|
if (!recording) return;
|
||||||
|
const result = await audioService.stopRecording();
|
||||||
|
setRecording(false);
|
||||||
|
if (!result) {
|
||||||
|
Alert.alert('Keine Sprache erkannt', 'Versuch es bitte nochmal — sprich bis der Timer mindestens 10 Sekunden anzeigt.');
|
||||||
|
setDurationMs(0);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// Temp-Datei wurde schon geloescht (stopRecording cleaned up).
|
||||||
|
// Wir brauchen aber base64 aus result direkt fuers Upload.
|
||||||
|
// result.base64 ist bereits da.
|
||||||
|
setRecordingPath(result.base64);
|
||||||
|
}, [recording]);
|
||||||
|
|
||||||
|
const uploadVoice = useCallback(async () => {
|
||||||
|
const name = voiceName.trim();
|
||||||
|
if (!name) {
|
||||||
|
Alert.alert('Name fehlt', 'Bitte gib der Stimme einen Namen (nur Buchstaben, Zahlen, _ und -).');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (!/^[a-zA-Z0-9_-]+$/.test(name)) {
|
||||||
|
Alert.alert('Ungueltiger Name', 'Nur Buchstaben, Zahlen, _ und - erlaubt.');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (!recordingPath) {
|
||||||
|
Alert.alert('Keine Aufnahme', 'Bitte zuerst aufnehmen.');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
setProcessing(true);
|
||||||
|
try {
|
||||||
|
// voice_upload erwartet samples als Array mit base64 (aus Diagnostic-Format kopiert)
|
||||||
|
rvs.send('voice_upload' as any, {
|
||||||
|
name,
|
||||||
|
samples: [{ base64: recordingPath }],
|
||||||
|
});
|
||||||
|
Alert.alert('Hochgeladen', `Stimme "${name}" wird vom XTTS-Server verarbeitet. Nach ein paar Sekunden in der Liste verfuegbar.`);
|
||||||
|
onClose();
|
||||||
|
} catch (err: any) {
|
||||||
|
Alert.alert('Fehler', err.message);
|
||||||
|
} finally {
|
||||||
|
setProcessing(false);
|
||||||
|
}
|
||||||
|
}, [voiceName, recordingPath, onClose]);
|
||||||
|
|
||||||
|
const progress = Math.min(durationMs / MAX_DURATION_MS, 1);
|
||||||
|
const sec = Math.floor(durationMs / 1000);
|
||||||
|
const enoughRecorded = durationMs >= TARGET_DURATION_MS;
|
||||||
|
|
||||||
|
return (
|
||||||
|
<Modal visible={visible} animationType="slide" onRequestClose={onClose}>
|
||||||
|
<View style={styles.container}>
|
||||||
|
<View style={styles.header}>
|
||||||
|
<Text style={styles.title}>Eigene Stimme aufnehmen</Text>
|
||||||
|
<TouchableOpacity onPress={onClose}>
|
||||||
|
<Text style={styles.closeX}>{'\u2715'}</Text>
|
||||||
|
</TouchableOpacity>
|
||||||
|
</View>
|
||||||
|
|
||||||
|
<ScrollView style={styles.content} contentContainerStyle={{padding: 16}}>
|
||||||
|
<Text style={styles.hint}>
|
||||||
|
Lies den Text laut und deutlich vor. Maximal 30 Sekunden. Je mehr du sprichst
|
||||||
|
(ziel: bis zum Ende des Textes, ca. 20-30s), desto besser wird die geklonte
|
||||||
|
Stimme.
|
||||||
|
</Text>
|
||||||
|
|
||||||
|
<View style={styles.sampleTextBox}>
|
||||||
|
<Text style={styles.sampleText}>{SAMPLE_TEXT}</Text>
|
||||||
|
</View>
|
||||||
|
|
||||||
|
{/* Timer + Fortschritt */}
|
||||||
|
<View style={{marginTop: 20, alignItems: 'center'}}>
|
||||||
|
<Text style={[styles.timer, recording && styles.timerActive]}>
|
||||||
|
{sec.toString().padStart(2, '0')} / 30 s
|
||||||
|
</Text>
|
||||||
|
<View style={styles.progressBar}>
|
||||||
|
<View style={[styles.progressFill, {width: `${progress * 100}%`, backgroundColor: recording ? '#FF3B30' : '#0096FF'}]} />
|
||||||
|
</View>
|
||||||
|
</View>
|
||||||
|
|
||||||
|
{/* Aufnahme-Button */}
|
||||||
|
{!recordingPath && (
|
||||||
|
<TouchableOpacity
|
||||||
|
style={[styles.recordBtn, recording && styles.recordBtnActive]}
|
||||||
|
onPress={recording ? stopRecording : startRecording}
|
||||||
|
>
|
||||||
|
<Text style={styles.recordIcon}>{recording ? '\u25A0' : '\u25CF'}</Text>
|
||||||
|
<Text style={styles.recordLabel}>{recording ? 'Stop' : 'Aufnahme starten'}</Text>
|
||||||
|
</TouchableOpacity>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{/* Nach Aufnahme: Name + Upload */}
|
||||||
|
{recordingPath && (
|
||||||
|
<View style={{marginTop: 20}}>
|
||||||
|
<Text style={styles.hint}>
|
||||||
|
Aufnahme ({sec}s) fertig. Vergib einen Namen und lade hoch.
|
||||||
|
</Text>
|
||||||
|
<TextInput
|
||||||
|
style={styles.nameInput}
|
||||||
|
value={voiceName}
|
||||||
|
onChangeText={setVoiceName}
|
||||||
|
placeholder="z.B. stefan"
|
||||||
|
placeholderTextColor="#555570"
|
||||||
|
autoCapitalize="none"
|
||||||
|
autoCorrect={false}
|
||||||
|
/>
|
||||||
|
<View style={{flexDirection: 'row', gap: 8, marginTop: 12}}>
|
||||||
|
<TouchableOpacity
|
||||||
|
style={[styles.secondaryBtn, {flex: 1}]}
|
||||||
|
onPress={() => { setRecordingPath(''); setDurationMs(0); }}
|
||||||
|
>
|
||||||
|
<Text style={styles.secondaryBtnText}>Nochmal aufnehmen</Text>
|
||||||
|
</TouchableOpacity>
|
||||||
|
<TouchableOpacity
|
||||||
|
style={[styles.primaryBtn, {flex: 1}]}
|
||||||
|
onPress={uploadVoice}
|
||||||
|
disabled={processing}
|
||||||
|
>
|
||||||
|
{processing
|
||||||
|
? <ActivityIndicator color="#fff" />
|
||||||
|
: <Text style={styles.primaryBtnText}>Hochladen</Text>
|
||||||
|
}
|
||||||
|
</TouchableOpacity>
|
||||||
|
</View>
|
||||||
|
</View>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{recording && !enoughRecorded && (
|
||||||
|
<Text style={[styles.hint, {marginTop: 12, color: '#FFD60A', textAlign: 'center'}]}>
|
||||||
|
Bitte weiter lesen — mindestens 15 Sekunden
|
||||||
|
</Text>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{recording && enoughRecorded && (
|
||||||
|
<Text style={[styles.hint, {marginTop: 12, color: '#34C759', textAlign: 'center'}]}>
|
||||||
|
Genug Audio fuer eine gute Clonung. Du kannst stoppen.
|
||||||
|
</Text>
|
||||||
|
)}
|
||||||
|
</ScrollView>
|
||||||
|
</View>
|
||||||
|
</Modal>
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
const styles = StyleSheet.create({
|
||||||
|
container: {
|
||||||
|
flex: 1,
|
||||||
|
backgroundColor: '#0D0D1A',
|
||||||
|
},
|
||||||
|
header: {
|
||||||
|
flexDirection: 'row',
|
||||||
|
alignItems: 'center',
|
||||||
|
justifyContent: 'space-between',
|
||||||
|
paddingHorizontal: 16,
|
||||||
|
paddingTop: 48,
|
||||||
|
paddingBottom: 16,
|
||||||
|
borderBottomWidth: 1,
|
||||||
|
borderBottomColor: '#1E1E2E',
|
||||||
|
},
|
||||||
|
title: {
|
||||||
|
color: '#FFFFFF',
|
||||||
|
fontSize: 18,
|
||||||
|
fontWeight: '700',
|
||||||
|
},
|
||||||
|
closeX: {
|
||||||
|
color: '#8888AA',
|
||||||
|
fontSize: 24,
|
||||||
|
paddingHorizontal: 8,
|
||||||
|
},
|
||||||
|
content: {
|
||||||
|
flex: 1,
|
||||||
|
},
|
||||||
|
hint: {
|
||||||
|
color: '#8888AA',
|
||||||
|
fontSize: 13,
|
||||||
|
lineHeight: 20,
|
||||||
|
},
|
||||||
|
sampleTextBox: {
|
||||||
|
marginTop: 12,
|
||||||
|
padding: 14,
|
||||||
|
backgroundColor: '#12122A',
|
||||||
|
borderRadius: 10,
|
||||||
|
borderWidth: 1,
|
||||||
|
borderColor: '#1E1E2E',
|
||||||
|
},
|
||||||
|
sampleText: {
|
||||||
|
color: '#E0E0F0',
|
||||||
|
fontSize: 15,
|
||||||
|
lineHeight: 24,
|
||||||
|
},
|
||||||
|
timer: {
|
||||||
|
color: '#666680',
|
||||||
|
fontSize: 42,
|
||||||
|
fontWeight: '700',
|
||||||
|
fontVariant: ['tabular-nums'],
|
||||||
|
},
|
||||||
|
timerActive: {
|
||||||
|
color: '#FF3B30',
|
||||||
|
},
|
||||||
|
progressBar: {
|
||||||
|
marginTop: 8,
|
||||||
|
width: '100%',
|
||||||
|
height: 8,
|
||||||
|
backgroundColor: '#1E1E2E',
|
||||||
|
borderRadius: 4,
|
||||||
|
overflow: 'hidden',
|
||||||
|
},
|
||||||
|
progressFill: {
|
||||||
|
height: '100%',
|
||||||
|
},
|
||||||
|
recordBtn: {
|
||||||
|
marginTop: 24,
|
||||||
|
flexDirection: 'row',
|
||||||
|
alignItems: 'center',
|
||||||
|
justifyContent: 'center',
|
||||||
|
gap: 12,
|
||||||
|
backgroundColor: '#1E1E2E',
|
||||||
|
borderRadius: 12,
|
||||||
|
padding: 18,
|
||||||
|
borderWidth: 2,
|
||||||
|
borderColor: '#34C759',
|
||||||
|
},
|
||||||
|
recordBtnActive: {
|
||||||
|
borderColor: '#FF3B30',
|
||||||
|
backgroundColor: 'rgba(255,59,48,0.15)',
|
||||||
|
},
|
||||||
|
recordIcon: {
|
||||||
|
color: '#FF3B30',
|
||||||
|
fontSize: 24,
|
||||||
|
fontWeight: '700',
|
||||||
|
},
|
||||||
|
recordLabel: {
|
||||||
|
color: '#FFFFFF',
|
||||||
|
fontSize: 17,
|
||||||
|
fontWeight: '600',
|
||||||
|
},
|
||||||
|
nameInput: {
|
||||||
|
marginTop: 10,
|
||||||
|
backgroundColor: '#1E1E2E',
|
||||||
|
borderRadius: 8,
|
||||||
|
paddingHorizontal: 14,
|
||||||
|
paddingVertical: 12,
|
||||||
|
color: '#FFFFFF',
|
||||||
|
fontSize: 15,
|
||||||
|
borderWidth: 1,
|
||||||
|
borderColor: '#2A2A3E',
|
||||||
|
},
|
||||||
|
primaryBtn: {
|
||||||
|
backgroundColor: '#0096FF',
|
||||||
|
borderRadius: 10,
|
||||||
|
padding: 14,
|
||||||
|
alignItems: 'center',
|
||||||
|
},
|
||||||
|
primaryBtnText: {
|
||||||
|
color: '#FFFFFF',
|
||||||
|
fontSize: 15,
|
||||||
|
fontWeight: '700',
|
||||||
|
},
|
||||||
|
secondaryBtn: {
|
||||||
|
backgroundColor: '#1E1E2E',
|
||||||
|
borderRadius: 10,
|
||||||
|
padding: 14,
|
||||||
|
alignItems: 'center',
|
||||||
|
borderWidth: 1,
|
||||||
|
borderColor: '#2A2A3E',
|
||||||
|
},
|
||||||
|
secondaryBtnText: {
|
||||||
|
color: '#8888AA',
|
||||||
|
fontSize: 14,
|
||||||
|
fontWeight: '600',
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
export default VoiceCloneModal;
|
||||||
@@ -0,0 +1,224 @@
|
|||||||
|
/**
|
||||||
|
* ZoomableImage — Pinch-to-Zoom + Pan fuers Vollbild-Modal.
|
||||||
|
*
|
||||||
|
* Reine RN-Implementation, ohne react-native-gesture-handler.
|
||||||
|
*
|
||||||
|
* - 2 Finger: Pinch (Zoom 1x..5x) + simultaner Pan via Focal-Punkt
|
||||||
|
* - 1 Finger: Pan wenn schon gezoomt (>1.02x)
|
||||||
|
* - Doppel-Tap (<300ms zw. zwei Single-Taps): Toggle 1x ↔ 2.5x
|
||||||
|
*
|
||||||
|
* Implementierungs-Hinweise zur alten Version (warum's nicht ging):
|
||||||
|
* - `gestureState.numberActiveTouches` ist nicht zuverlaessig direkt
|
||||||
|
* nach onPanResponderGrant. Wir lesen Finger-Anzahl jetzt
|
||||||
|
* ausschliesslich aus `e.nativeEvent.touches.length`.
|
||||||
|
* - Beim Wechsel von 2 → 1 Fingern bleib die Pinch-Referenz haengen.
|
||||||
|
* Jetzt: bei jedem Finger-Wechsel re-snapshotten wir die Geste.
|
||||||
|
* - Animated.Image bekommt jetzt pointerEvents="none" damit der View
|
||||||
|
* GARANTIERT die Touches abbekommt.
|
||||||
|
* - useNativeDriver ist bewusst AUS — sonst koennen wir setValue()
|
||||||
|
* nicht synchron mit dem Pan-Responder zusammen nutzen.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import React, { useMemo, useRef } from 'react';
|
||||||
|
import {
|
||||||
|
Animated,
|
||||||
|
PanResponder,
|
||||||
|
GestureResponderEvent,
|
||||||
|
ImageStyle,
|
||||||
|
StyleProp,
|
||||||
|
StyleSheet,
|
||||||
|
View,
|
||||||
|
} from 'react-native';
|
||||||
|
|
||||||
|
interface Props {
|
||||||
|
uri: string;
|
||||||
|
containerWidth: number;
|
||||||
|
containerHeight: number;
|
||||||
|
style?: StyleProp<ImageStyle>;
|
||||||
|
}
|
||||||
|
|
||||||
|
const MIN_SCALE = 1;
|
||||||
|
const MAX_SCALE = 5;
|
||||||
|
const DOUBLE_TAP_MS = 300;
|
||||||
|
const DOUBLE_TAP_DIST = 30; // Bewegung max. damit ein Tap als Tap gilt
|
||||||
|
const PAN_SLOP_AT_SCALE_1 = 4; // Mikro-Movement nicht als Pan werten
|
||||||
|
|
||||||
|
const ZoomableImage: React.FC<Props> = ({ uri, containerWidth, containerHeight, style }) => {
|
||||||
|
// Animated-Werte fuer die Render-Transformation
|
||||||
|
const scale = useRef(new Animated.Value(1)).current;
|
||||||
|
const tx = useRef(new Animated.Value(0)).current;
|
||||||
|
const ty = useRef(new Animated.Value(0)).current;
|
||||||
|
|
||||||
|
// Logische Zustaende — wir lesen Animated.Value nicht zurueck (waere async)
|
||||||
|
const view = useRef({ scale: 1, x: 0, y: 0 }).current;
|
||||||
|
|
||||||
|
// Geste-Snapshot: was war zu Beginn dieser Geste-Phase
|
||||||
|
const gesture = useRef({
|
||||||
|
fingers: 0, // aktuelle Finger-Anzahl
|
||||||
|
startScale: 1,
|
||||||
|
startX: 0,
|
||||||
|
startY: 0,
|
||||||
|
startDist: 0, // Pinch-Referenz-Distanz
|
||||||
|
startFocalX: 0,
|
||||||
|
startFocalY: 0,
|
||||||
|
movedSinceTouch: 0, // fuer Tap-Erkennung
|
||||||
|
touchStartedAt: 0,
|
||||||
|
touchStartX: 0,
|
||||||
|
touchStartY: 0,
|
||||||
|
}).current;
|
||||||
|
|
||||||
|
// Doppel-Tap
|
||||||
|
const lastTap = useRef({ at: 0, x: 0, y: 0 });
|
||||||
|
|
||||||
|
const clamp = (v: number, lo: number, hi: number) => Math.max(lo, Math.min(hi, v));
|
||||||
|
|
||||||
|
const applyClamped = (s: number, x: number, y: number) => {
|
||||||
|
const ns = clamp(s, MIN_SCALE, MAX_SCALE);
|
||||||
|
// Translation auf das verfuegbare Volumen begrenzen
|
||||||
|
const maxX = Math.max(0, (containerWidth * ns - containerWidth) / 2);
|
||||||
|
const maxY = Math.max(0, (containerHeight * ns - containerHeight) / 2);
|
||||||
|
const nx = clamp(x, -maxX, maxX);
|
||||||
|
const ny = clamp(y, -maxY, maxY);
|
||||||
|
view.scale = ns;
|
||||||
|
view.x = nx;
|
||||||
|
view.y = ny;
|
||||||
|
scale.setValue(ns);
|
||||||
|
tx.setValue(nx);
|
||||||
|
ty.setValue(ny);
|
||||||
|
};
|
||||||
|
|
||||||
|
const distance = (touches: any[]) =>
|
||||||
|
Math.hypot(touches[0].pageX - touches[1].pageX, touches[0].pageY - touches[1].pageY);
|
||||||
|
|
||||||
|
const focal = (touches: any[]) => ({
|
||||||
|
x: (touches[0].pageX + touches[1].pageX) / 2,
|
||||||
|
y: (touches[0].pageY + touches[1].pageY) / 2,
|
||||||
|
});
|
||||||
|
|
||||||
|
// Snapshot vor jedem Phasenwechsel (1↔2 Finger) — verhindert Spruenge
|
||||||
|
const snapshot = (touches: any[]) => {
|
||||||
|
gesture.startScale = view.scale;
|
||||||
|
gesture.startX = view.x;
|
||||||
|
gesture.startY = view.y;
|
||||||
|
if (touches.length >= 2) {
|
||||||
|
gesture.startDist = distance(touches);
|
||||||
|
const f = focal(touches);
|
||||||
|
gesture.startFocalX = f.x;
|
||||||
|
gesture.startFocalY = f.y;
|
||||||
|
} else if (touches.length === 1) {
|
||||||
|
gesture.startDist = 0;
|
||||||
|
gesture.startFocalX = touches[0].pageX;
|
||||||
|
gesture.startFocalY = touches[0].pageY;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const responder = useMemo(
|
||||||
|
() =>
|
||||||
|
PanResponder.create({
|
||||||
|
onStartShouldSetPanResponder: () => true,
|
||||||
|
onStartShouldSetPanResponderCapture: () => true,
|
||||||
|
onMoveShouldSetPanResponder: () => true,
|
||||||
|
onMoveShouldSetPanResponderCapture: () => true,
|
||||||
|
|
||||||
|
onPanResponderGrant: (e: GestureResponderEvent) => {
|
||||||
|
const touches = e.nativeEvent.touches as any[];
|
||||||
|
gesture.fingers = touches.length;
|
||||||
|
gesture.movedSinceTouch = 0;
|
||||||
|
gesture.touchStartedAt = Date.now();
|
||||||
|
gesture.touchStartX = touches[0]?.pageX ?? 0;
|
||||||
|
gesture.touchStartY = touches[0]?.pageY ?? 0;
|
||||||
|
snapshot(touches);
|
||||||
|
},
|
||||||
|
|
||||||
|
onPanResponderMove: (e: GestureResponderEvent, _gs) => {
|
||||||
|
const touches = e.nativeEvent.touches as any[];
|
||||||
|
|
||||||
|
// Phasenwechsel? → Re-Snapshot, damit nicht gesprungen wird
|
||||||
|
if (touches.length !== gesture.fingers) {
|
||||||
|
gesture.fingers = touches.length;
|
||||||
|
snapshot(touches);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
gesture.movedSinceTouch += 1;
|
||||||
|
|
||||||
|
if (touches.length >= 2) {
|
||||||
|
// Pinch + Pan via Focal
|
||||||
|
const d = distance(touches);
|
||||||
|
if (gesture.startDist === 0) {
|
||||||
|
// Sicherheitsnetz falls Snapshot gemissed wurde
|
||||||
|
snapshot(touches);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const factor = d / gesture.startDist;
|
||||||
|
const f = focal(touches);
|
||||||
|
const newScale = clamp(gesture.startScale * factor, MIN_SCALE, MAX_SCALE);
|
||||||
|
// Focal-basierter Pan: zoomt um den Mittelpunkt der zwei Finger
|
||||||
|
const newX = gesture.startX + (f.x - gesture.startFocalX);
|
||||||
|
const newY = gesture.startY + (f.y - gesture.startFocalY);
|
||||||
|
applyClamped(newScale, newX, newY);
|
||||||
|
} else if (touches.length === 1 && view.scale > 1.02) {
|
||||||
|
const dx = touches[0].pageX - gesture.startFocalX;
|
||||||
|
const dy = touches[0].pageY - gesture.startFocalY;
|
||||||
|
if (Math.abs(dx) < PAN_SLOP_AT_SCALE_1 && Math.abs(dy) < PAN_SLOP_AT_SCALE_1) return;
|
||||||
|
applyClamped(view.scale, gesture.startX + dx, gesture.startY + dy);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
|
onPanResponderRelease: (e: GestureResponderEvent) => {
|
||||||
|
const elapsed = Date.now() - gesture.touchStartedAt;
|
||||||
|
const dx = (e.nativeEvent.changedTouches?.[0]?.pageX ?? gesture.touchStartX) - gesture.touchStartX;
|
||||||
|
const dy = (e.nativeEvent.changedTouches?.[0]?.pageY ?? gesture.touchStartY) - gesture.touchStartY;
|
||||||
|
const wasTap =
|
||||||
|
elapsed < 280 &&
|
||||||
|
Math.abs(dx) < DOUBLE_TAP_DIST &&
|
||||||
|
Math.abs(dy) < DOUBLE_TAP_DIST;
|
||||||
|
if (wasTap) {
|
||||||
|
const now = Date.now();
|
||||||
|
if (now - lastTap.current.at < DOUBLE_TAP_MS) {
|
||||||
|
// Doppel-Tap → Zoom-Toggle
|
||||||
|
if (view.scale > 1.1) {
|
||||||
|
applyClamped(1, 0, 0);
|
||||||
|
} else {
|
||||||
|
applyClamped(2.5, 0, 0);
|
||||||
|
}
|
||||||
|
lastTap.current = { at: 0, x: 0, y: 0 };
|
||||||
|
} else {
|
||||||
|
lastTap.current = { at: now, x: gesture.touchStartX, y: gesture.touchStartY };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
gesture.fingers = 0;
|
||||||
|
gesture.startDist = 0;
|
||||||
|
},
|
||||||
|
|
||||||
|
onPanResponderTerminate: () => {
|
||||||
|
gesture.fingers = 0;
|
||||||
|
gesture.startDist = 0;
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
[],
|
||||||
|
);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<View
|
||||||
|
style={StyleSheet.absoluteFill}
|
||||||
|
collapsable={false}
|
||||||
|
{...responder.panHandlers}
|
||||||
|
>
|
||||||
|
<Animated.View pointerEvents="none" style={StyleSheet.absoluteFill}>
|
||||||
|
<Animated.Image
|
||||||
|
source={{ uri }}
|
||||||
|
style={[
|
||||||
|
style,
|
||||||
|
{
|
||||||
|
transform: [{ translateX: tx }, { translateY: ty }, { scale }],
|
||||||
|
},
|
||||||
|
]}
|
||||||
|
resizeMode="contain"
|
||||||
|
/>
|
||||||
|
</Animated.View>
|
||||||
|
</View>
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
export default ZoomableImage;
|
||||||
+1403
-99
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
+1152
-41
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,76 @@
|
|||||||
|
/**
|
||||||
|
* Background-Audio: ARIAs TTS, Mic-Aufnahme und Wake-Word-Lauschen sollen
|
||||||
|
* auch bei minimierter App weiterlaufen. Wir starten dafuer einen Foreground-
|
||||||
|
* Service mit foregroundServiceType=mediaPlayback|microphone, der eine
|
||||||
|
* persistente Notification zeigt waehrend irgendein Audio-Slot aktiv ist.
|
||||||
|
*
|
||||||
|
* Mehrere Komponenten koennen den Service unabhaengig "halten":
|
||||||
|
* - 'tts' : ARIA spricht
|
||||||
|
* - 'rec' : Aufnahme laeuft
|
||||||
|
* - 'wake' : Wake-Word lauscht passiv (Ohr aktiv)
|
||||||
|
*
|
||||||
|
* Solange mindestens ein Slot aktiv ist, laeuft der Service. Wenn alle
|
||||||
|
* Slots leer sind, wird er gestoppt. Der Notification-Text passt sich an
|
||||||
|
* den hoechstprioren Slot an (tts > rec > wake).
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { NativeModules } from 'react-native';
|
||||||
|
|
||||||
|
interface BackgroundAudioNative {
|
||||||
|
start(reason: string): Promise<boolean>;
|
||||||
|
stop(): Promise<boolean>;
|
||||||
|
}
|
||||||
|
|
||||||
|
const { BackgroundAudio } = NativeModules as { BackgroundAudio?: BackgroundAudioNative };
|
||||||
|
|
||||||
|
type Slot = 'tts' | 'rec' | 'wake';
|
||||||
|
|
||||||
|
const slots = new Set<Slot>();
|
||||||
|
|
||||||
|
// Prioritaet fuer den Notification-Text — hoechste zuerst.
|
||||||
|
const PRIORITY: Slot[] = ['tts', 'rec', 'wake'];
|
||||||
|
|
||||||
|
function topReason(): string {
|
||||||
|
for (const s of PRIORITY) {
|
||||||
|
if (slots.has(s)) return s;
|
||||||
|
}
|
||||||
|
return '';
|
||||||
|
}
|
||||||
|
|
||||||
|
async function applyState(): Promise<void> {
|
||||||
|
if (!BackgroundAudio) return;
|
||||||
|
if (slots.size === 0) {
|
||||||
|
try { await BackgroundAudio.stop(); } catch {}
|
||||||
|
console.log('[BackgroundAudio] Service gestoppt (keine Slots)');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const reason = topReason();
|
||||||
|
try {
|
||||||
|
await BackgroundAudio.start(reason);
|
||||||
|
console.log('[BackgroundAudio] Service aktiv (slot=%s, slots=%s)',
|
||||||
|
reason, [...slots].join('+'));
|
||||||
|
} catch (err: any) {
|
||||||
|
console.warn('[BackgroundAudio] start fehlgeschlagen:', err?.message || err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function acquireBackgroundAudio(slot: Slot): Promise<void> {
|
||||||
|
if (slots.has(slot)) return;
|
||||||
|
slots.add(slot);
|
||||||
|
await applyState();
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function releaseBackgroundAudio(slot: Slot): Promise<void> {
|
||||||
|
if (!slots.has(slot)) return;
|
||||||
|
slots.delete(slot);
|
||||||
|
await applyState();
|
||||||
|
}
|
||||||
|
|
||||||
|
export function backgroundAudioActive(): boolean {
|
||||||
|
return slots.size > 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Legacy API (nur tts-Slot) — fuer Aufruf-Sites die noch nichts vom Slot-
|
||||||
|
// system wissen. Mappt auf den 'tts'-Slot. ---
|
||||||
|
export const startBackgroundAudio = () => acquireBackgroundAudio('tts');
|
||||||
|
export const stopBackgroundAudio = () => releaseBackgroundAudio('tts');
|
||||||
@@ -0,0 +1,41 @@
|
|||||||
|
/**
|
||||||
|
* Verbose-Logging-Toggle: console.log laesst sich global stummschalten.
|
||||||
|
* console.warn/console.error bleiben immer an — Fehler will man immer sehen.
|
||||||
|
*
|
||||||
|
* Default: an (true). Toggle ueber Settings → Protokoll → Verbose Logging.
|
||||||
|
* Beim Start wird der gespeicherte Wert geladen, vorher loggen wir normal.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import AsyncStorage from '@react-native-async-storage/async-storage';
|
||||||
|
|
||||||
|
export const VERBOSE_LOGGING_KEY = 'aria_verbose_logging';
|
||||||
|
|
||||||
|
// Original-console.log retten, damit wir die Wrapper jederzeit wieder
|
||||||
|
// "scharf" stellen koennen (sonst waere ein Toggle-an nach -aus tot).
|
||||||
|
const originalLog = console.log.bind(console);
|
||||||
|
const noop = () => {};
|
||||||
|
|
||||||
|
let _verbose = true;
|
||||||
|
|
||||||
|
function applyState(): void {
|
||||||
|
console.log = _verbose ? originalLog : noop;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Wert aus AsyncStorage laden und anwenden. Beim App-Start aufrufen. */
|
||||||
|
export async function initLogger(): Promise<void> {
|
||||||
|
try {
|
||||||
|
const v = await AsyncStorage.getItem(VERBOSE_LOGGING_KEY);
|
||||||
|
_verbose = v !== 'false'; // default: true
|
||||||
|
} catch {}
|
||||||
|
applyState();
|
||||||
|
}
|
||||||
|
|
||||||
|
export function isVerboseLogging(): boolean {
|
||||||
|
return _verbose;
|
||||||
|
}
|
||||||
|
|
||||||
|
export function setVerboseLogging(verbose: boolean): void {
|
||||||
|
_verbose = verbose;
|
||||||
|
applyState();
|
||||||
|
AsyncStorage.setItem(VERBOSE_LOGGING_KEY, String(verbose)).catch(() => {});
|
||||||
|
}
|
||||||
@@ -0,0 +1,222 @@
|
|||||||
|
/**
|
||||||
|
* PhoneCall-Service — pausiert ARIA bei Telefonaten:
|
||||||
|
*
|
||||||
|
* 1. Klassischer Mobilfunk-Anruf via TelephonyManager (PhoneCallModule.kt)
|
||||||
|
* Status: idle / ringing / offhook
|
||||||
|
*
|
||||||
|
* 2. VoIP-Anrufe (WhatsApp, Signal, Discord, Telegram, Teams, ...) via
|
||||||
|
* AudioFocus-Loss-Event (AudioFocusModule.kt). Diese Apps requestn
|
||||||
|
* AUDIOFOCUS_GAIN_TRANSIENT_EXCLUSIVE wenn ein Anruf reinkommt — wir
|
||||||
|
* bekommen ein "loss" Event und reagieren genauso wie auf RINGING.
|
||||||
|
*
|
||||||
|
* In beiden Faellen wird audioService.haltAllPlayback() + wakeWordService.
|
||||||
|
* pauseForCall() gerufen. Bei call-end (idle / focus-gain) → resumeFromCall.
|
||||||
|
*
|
||||||
|
* Permission READ_PHONE_STATE ist nur fuer Pfad 1 noetig — Pfad 2 braucht
|
||||||
|
* keine extra Berechtigung weil unser eigener AudioFocus-Listener feuert.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import {
|
||||||
|
NativeEventEmitter,
|
||||||
|
NativeModules,
|
||||||
|
PermissionsAndroid,
|
||||||
|
Platform,
|
||||||
|
ToastAndroid,
|
||||||
|
} from 'react-native';
|
||||||
|
import audioService from './audio';
|
||||||
|
import wakeWordService from './wakeword';
|
||||||
|
|
||||||
|
interface PhoneCallNative {
|
||||||
|
start(): Promise<boolean>;
|
||||||
|
stop(): Promise<boolean>;
|
||||||
|
}
|
||||||
|
|
||||||
|
const { PhoneCall } = NativeModules as { PhoneCall?: PhoneCallNative };
|
||||||
|
|
||||||
|
type PhoneState = 'idle' | 'ringing' | 'offhook';
|
||||||
|
|
||||||
|
class PhoneCallService {
|
||||||
|
private started: boolean = false;
|
||||||
|
private subscription: { remove: () => void } | null = null;
|
||||||
|
private focusSubscription: { remove: () => void } | null = null;
|
||||||
|
private lastState: PhoneState = 'idle';
|
||||||
|
/** Damit Resume nach VoIP-Loss nicht doppelt feuert wenn auch
|
||||||
|
* TelephonyManager-IDLE-Event kommt. */
|
||||||
|
private interruptedByFocus: boolean = false;
|
||||||
|
|
||||||
|
async start(): Promise<boolean> {
|
||||||
|
if (this.started || Platform.OS !== 'android') return false;
|
||||||
|
|
||||||
|
// 1. AudioFocus-Listener IMMER registrieren — fangs VoIP-Calls (WhatsApp,
|
||||||
|
// Signal, Discord etc.) abdecken, brauchen keine Permission.
|
||||||
|
try {
|
||||||
|
const focusEmitter = new NativeEventEmitter(NativeModules.AudioFocus as any);
|
||||||
|
this.focusSubscription = focusEmitter.addListener(
|
||||||
|
'AudioFocusChanged',
|
||||||
|
(e: { type: 'loss' | 'loss_transient' | 'gain' }) => this._onFocusChanged(e.type),
|
||||||
|
);
|
||||||
|
console.log('[PhoneCall] AudioFocus-Listener aktiv (fuer VoIP-Calls)');
|
||||||
|
} catch (err: any) {
|
||||||
|
console.warn('[PhoneCall] AudioFocus-Subscription gescheitert', err?.message || err);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. TelephonyManager-Listener — fuer klassische Mobilfunk-Anrufe
|
||||||
|
if (PhoneCall) {
|
||||||
|
try {
|
||||||
|
const granted = await PermissionsAndroid.request(
|
||||||
|
PermissionsAndroid.PERMISSIONS.READ_PHONE_STATE,
|
||||||
|
{
|
||||||
|
title: 'ARIA Cockpit — Anruf-Erkennung',
|
||||||
|
message: 'Damit ARIA bei einem eingehenden Anruf nicht weiterredet, '
|
||||||
|
+ 'darf die App den Anruf-Status sehen (Klingeln/Aktiv/Aufgelegt). '
|
||||||
|
+ 'Es werden keine Anrufdaten gelesen oder gespeichert.',
|
||||||
|
buttonPositive: 'Erlauben',
|
||||||
|
buttonNegative: 'Spaeter',
|
||||||
|
},
|
||||||
|
);
|
||||||
|
if (granted === PermissionsAndroid.RESULTS.GRANTED) {
|
||||||
|
const ok = await PhoneCall.start();
|
||||||
|
if (ok) {
|
||||||
|
const emitter = new NativeEventEmitter(NativeModules.PhoneCall as any);
|
||||||
|
this.subscription = emitter.addListener(
|
||||||
|
'PhoneCallStateChanged',
|
||||||
|
(e: { state: PhoneState }) => this._onStateChanged(e.state),
|
||||||
|
);
|
||||||
|
console.log('[PhoneCall] TelephonyManager-Listener aktiv');
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
console.warn('[PhoneCall] READ_PHONE_STATE abgelehnt — VoIP-Calls werden trotzdem ueber AudioFocus erkannt');
|
||||||
|
}
|
||||||
|
} catch (err: any) {
|
||||||
|
console.warn('[PhoneCall] TelephonyManager-Setup gescheitert:', err?.message || err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
this.started = true;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
async stop(): Promise<void> {
|
||||||
|
if (!this.started) return;
|
||||||
|
try { this.subscription?.remove(); } catch {}
|
||||||
|
try { this.focusSubscription?.remove(); } catch {}
|
||||||
|
this.subscription = null;
|
||||||
|
this.focusSubscription = null;
|
||||||
|
if (PhoneCall) {
|
||||||
|
try { await PhoneCall.stop(); } catch {}
|
||||||
|
}
|
||||||
|
this.started = false;
|
||||||
|
this.lastState = 'idle';
|
||||||
|
this.interruptedByFocus = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
private _onStateChanged(state: PhoneState): void {
|
||||||
|
if (state === this.lastState) return;
|
||||||
|
const prev = this.lastState;
|
||||||
|
console.log('[PhoneCall] State: %s → %s', prev, state);
|
||||||
|
this.lastState = state;
|
||||||
|
if (state === 'ringing' || state === 'offhook') {
|
||||||
|
this._haltForCall(state === 'ringing' ? 'Anruf — ARIA pausiert' : 'Im Gespraech — ARIA pausiert');
|
||||||
|
} else if (state === 'idle' && prev !== 'idle') {
|
||||||
|
// Wenn schon durch AudioFocus-Loss pausiert wurde, NICHT doppelt resumen.
|
||||||
|
// Der Focus-Gain-Event triggert das Resume.
|
||||||
|
if (!this.interruptedByFocus) {
|
||||||
|
this._resumeAfterCall('Anruf beendet — ARIA wieder aktiv');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** AudioFocus-Loss = irgendeine andere App hat den Focus uebernommen.
|
||||||
|
* Das passiert bei VoIP-Anrufen (was wir wollen) ABER auch bei normalen
|
||||||
|
* Audio-Playern (anderer Player startet, Notification-Sound, sogar
|
||||||
|
* unsere eigenen Sound-Calls beim Play-Button). Daher checken wir den
|
||||||
|
* AudioMode — nur IN_CALL (2) oder IN_COMMUNICATION (3) zaehlt als Anruf. */
|
||||||
|
private async _onFocusChanged(type: 'loss' | 'loss_transient' | 'gain'): Promise<void> {
|
||||||
|
if (type === 'loss' || type === 'loss_transient') {
|
||||||
|
// Schon durch klassischen TelephonyManager pausiert? Dann nichts doppeln.
|
||||||
|
if (this.lastState === 'ringing' || this.lastState === 'offhook') return;
|
||||||
|
// Mode pruefen — nur echte Anrufe behandeln.
|
||||||
|
let mode = -1;
|
||||||
|
try { mode = await (NativeModules.AudioFocus as any)?.getMode?.(); } catch {}
|
||||||
|
if (mode !== 2 && mode !== 3) {
|
||||||
|
// NORMAL-Mode → kein Anruf (Stefan hat z.B. Play-Button gedrueckt
|
||||||
|
// oder Spotify hat sich neu reingedraengelt). Keine Toasts.
|
||||||
|
console.log('[PhoneCall] FOCUS_LOSS ignoriert (AudioMode=%d, kein Call)', mode);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
this.interruptedByFocus = true;
|
||||||
|
this._haltForCall('Anruf erkannt (VoIP) — ARIA pausiert');
|
||||||
|
// Pollen, weil GAIN nicht zuverlaessig kommt (wir releasen den Focus
|
||||||
|
// selbst beim halt → kein automatischer GAIN). AudioMode != IN_COMMUNICATION
|
||||||
|
// = Call vorbei.
|
||||||
|
this._startVoipResumePoll();
|
||||||
|
} else if (type === 'gain') {
|
||||||
|
if (this.interruptedByFocus) {
|
||||||
|
this.interruptedByFocus = false;
|
||||||
|
this._stopVoipResumePoll();
|
||||||
|
this._resumeAfterCall('Audio frei — ARIA wieder aktiv');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Polling-Fallback: alle 3s checken ob AudioMode wieder NORMAL ist. */
|
||||||
|
private voipPollTimer: ReturnType<typeof setInterval> | null = null;
|
||||||
|
private _startVoipResumePoll(): void {
|
||||||
|
if (this.voipPollTimer) return;
|
||||||
|
this.voipPollTimer = setInterval(async () => {
|
||||||
|
if (!this.interruptedByFocus) {
|
||||||
|
this._stopVoipResumePoll();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
const mode = await (NativeModules.AudioFocus as any)?.getMode?.();
|
||||||
|
// 0 = MODE_NORMAL — Call ist vorbei
|
||||||
|
if (typeof mode === 'number' && mode === 0) {
|
||||||
|
this.interruptedByFocus = false;
|
||||||
|
this._stopVoipResumePoll();
|
||||||
|
this._resumeAfterCall('Anruf beendet — ARIA wieder aktiv');
|
||||||
|
}
|
||||||
|
} catch {}
|
||||||
|
}, 3000);
|
||||||
|
}
|
||||||
|
private _stopVoipResumePoll(): void {
|
||||||
|
if (this.voipPollTimer) {
|
||||||
|
clearInterval(this.voipPollTimer);
|
||||||
|
this.voipPollTimer = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private _haltForCall(toast: string): void {
|
||||||
|
// Position merken bevor wir den Stream killen — fuer Auto-Resume.
|
||||||
|
audioService.captureInterruption();
|
||||||
|
// pauseForCall (statt haltAllPlayback): pcmBuffer + messageId bleiben,
|
||||||
|
// weitere Chunks werden weiter gesammelt damit isFinal die WAV schreibt.
|
||||||
|
audioService.pauseForCall(toast);
|
||||||
|
wakeWordService.pauseForCall().catch(() => {});
|
||||||
|
ToastAndroid.show(toast, ToastAndroid.SHORT);
|
||||||
|
}
|
||||||
|
|
||||||
|
private _resumeAfterCall(toast: string): void {
|
||||||
|
// Anruf-Pause aufheben — neue Chunks duerfen wieder direkt abgespielt
|
||||||
|
// werden (falls die Bridge mid-Anruf isFinal noch nicht geschickt hat).
|
||||||
|
audioService.endCallPause();
|
||||||
|
wakeWordService.resumeFromCall().catch(() => {});
|
||||||
|
ToastAndroid.show(toast, ToastAndroid.SHORT);
|
||||||
|
// 800ms warten bevor Auto-Resume — sonst kollidiert ARIA's neuer Focus-
|
||||||
|
// Request mit Spotify's Auto-Resume nach Anruf-Ende. System haengt nach
|
||||||
|
// dem Auflegen noch im IN_CALL-Mode-Uebergang, Spotify schaut auf Focus-
|
||||||
|
// Gain und wuerde sofort wieder LOSS sehen → bleibt pausiert.
|
||||||
|
// Mit Delay: Spotify resumed kurz, dann pausiert ARIA wieder ordnungs-
|
||||||
|
// gemaess. Wenn ARIA nichts pending hat, bleibt Spotify einfach an.
|
||||||
|
setTimeout(() => {
|
||||||
|
audioService.resumeFromInterruption(30000).then(ok => {
|
||||||
|
if (ok) {
|
||||||
|
console.log('[PhoneCall] Auto-Resume von gemerkter Position gestartet');
|
||||||
|
}
|
||||||
|
}).catch(() => {});
|
||||||
|
}, 800);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const phoneCallService = new PhoneCallService();
|
||||||
|
export default phoneCallService;
|
||||||
@@ -12,7 +12,7 @@ import AsyncStorage from '@react-native-async-storage/async-storage';
|
|||||||
|
|
||||||
export type ConnectionState = 'connecting' | 'connected' | 'disconnected';
|
export type ConnectionState = 'connecting' | 'connected' | 'disconnected';
|
||||||
|
|
||||||
export type MessageType = 'chat' | 'audio' | 'file' | 'location' | 'mode' | 'log' | 'event';
|
export type MessageType = 'chat' | 'audio' | 'file' | 'location' | 'mode' | 'log' | 'event' | 'update_available' | string;
|
||||||
|
|
||||||
export interface RVSMessage {
|
export interface RVSMessage {
|
||||||
type: MessageType;
|
type: MessageType;
|
||||||
|
|||||||
@@ -0,0 +1,232 @@
|
|||||||
|
/**
|
||||||
|
* Auto-Update Service — prueft und installiert App-Updates via RVS
|
||||||
|
*
|
||||||
|
* Flow:
|
||||||
|
* 1. App sendet "update_check" mit aktueller Version an RVS
|
||||||
|
* 2. RVS vergleicht → sendet "update_available" mit Download-URL
|
||||||
|
* 3. App zeigt Benachrichtigung → User bestaetigt → Download + Install
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { Alert, Linking, Platform, NativeModules } from 'react-native';
|
||||||
|
import RNFS from 'react-native-fs';
|
||||||
|
import rvs, { RVSMessage } from './rvs';
|
||||||
|
|
||||||
|
// Version aus package.json (wird beim Build eingebettet)
|
||||||
|
const packageJson = require('../../package.json');
|
||||||
|
const APP_VERSION = packageJson.version || '0.0.0.0';
|
||||||
|
|
||||||
|
type UpdateCallback = (info: UpdateInfo) => void;
|
||||||
|
|
||||||
|
export interface UpdateInfo {
|
||||||
|
version: string;
|
||||||
|
downloadUrl: string;
|
||||||
|
size: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
class UpdateService {
|
||||||
|
private listeners: UpdateCallback[] = [];
|
||||||
|
private checking = false;
|
||||||
|
private downloading = false;
|
||||||
|
|
||||||
|
constructor() {
|
||||||
|
// Beim Start alte APK-Reste aus dem Cache wegraeumen — wenn diese App
|
||||||
|
// laeuft, sind frueher heruntergeladene APKs entweder schon installiert
|
||||||
|
// oder unvollstaendig gewesen. Spart sonst pro Update 20-30MB auf dem Handy.
|
||||||
|
this.cleanupOldApks().catch(() => {});
|
||||||
|
|
||||||
|
// Auf update_available Nachrichten lauschen
|
||||||
|
rvs.onMessage((msg: RVSMessage) => {
|
||||||
|
if (msg.type === 'update_available' as any) {
|
||||||
|
const info: UpdateInfo = {
|
||||||
|
version: (msg.payload.version as string) || '',
|
||||||
|
downloadUrl: (msg.payload.downloadUrl as string) || '',
|
||||||
|
size: (msg.payload.size as number) || 0,
|
||||||
|
};
|
||||||
|
if (info.version && this.isNewer(info.version)) {
|
||||||
|
console.log(`[Update] Neue Version verfuegbar: ${info.version} (aktuell: ${APP_VERSION})`);
|
||||||
|
this.listeners.forEach(cb => cb(info));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Sucht ueberall wo .apk-Dateien herumliegen koennten. */
|
||||||
|
private async _apkSearchDirs(): Promise<string[]> {
|
||||||
|
const dirs = [RNFS.CachesDirectoryPath, RNFS.DocumentDirectoryPath];
|
||||||
|
if ((RNFS as any).ExternalCachesDirectoryPath) {
|
||||||
|
dirs.push((RNFS as any).ExternalCachesDirectoryPath);
|
||||||
|
}
|
||||||
|
if (RNFS.ExternalDirectoryPath) {
|
||||||
|
dirs.push(RNFS.ExternalDirectoryPath);
|
||||||
|
}
|
||||||
|
return dirs;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Raeumt alte heruntergeladene APK-Dateien aus den App-Verzeichnissen auf.
|
||||||
|
* Public damit Settings den Button "Update-Cache leeren" benutzen kann. */
|
||||||
|
async cleanupOldApks(keepCurrentName?: string): Promise<{ removed: number; freedMB: number }> {
|
||||||
|
const dirs = await this._apkSearchDirs();
|
||||||
|
let removed = 0;
|
||||||
|
let freed = 0;
|
||||||
|
for (const dir of dirs) {
|
||||||
|
try {
|
||||||
|
if (!(await RNFS.exists(dir))) continue;
|
||||||
|
const files = await RNFS.readDir(dir);
|
||||||
|
const apks = files.filter(f => /\.apk$/i.test(f.name));
|
||||||
|
for (const f of apks) {
|
||||||
|
if (keepCurrentName && f.name === keepCurrentName) continue;
|
||||||
|
try {
|
||||||
|
const size = parseInt(f.size as any, 10) || 0;
|
||||||
|
await RNFS.unlink(f.path);
|
||||||
|
removed += 1;
|
||||||
|
freed += size;
|
||||||
|
console.log(`[Update] APK geloescht: ${f.path} (${(size / 1024 / 1024).toFixed(1)}MB)`);
|
||||||
|
} catch (err: any) {
|
||||||
|
console.warn(`[Update] APK-Loeschen fehlgeschlagen: ${f.path} (${err?.message || err})`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (err: any) {
|
||||||
|
console.warn(`[Update] Cleanup-Fehler in ${dir}: ${err?.message || err}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const freedMB = freed / 1024 / 1024;
|
||||||
|
if (removed > 0) {
|
||||||
|
console.log(`[Update] Cleanup fertig: ${removed} APK${removed === 1 ? '' : 's'} entfernt, ${freedMB.toFixed(1)}MB freigegeben`);
|
||||||
|
}
|
||||||
|
return { removed, freedMB };
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Aktuelle Groesse aller APK-Dateien in den App-Verzeichnissen (in MB). */
|
||||||
|
async getApkCacheSize(): Promise<{ count: number; totalMB: number }> {
|
||||||
|
const dirs = await this._apkSearchDirs();
|
||||||
|
let count = 0;
|
||||||
|
let total = 0;
|
||||||
|
for (const dir of dirs) {
|
||||||
|
try {
|
||||||
|
if (!(await RNFS.exists(dir))) continue;
|
||||||
|
const files = await RNFS.readDir(dir);
|
||||||
|
for (const f of files) {
|
||||||
|
if (!f.isFile() || !/\.apk$/i.test(f.name)) continue;
|
||||||
|
count += 1;
|
||||||
|
total += parseInt(f.size as any, 10) || 0;
|
||||||
|
}
|
||||||
|
} catch {}
|
||||||
|
}
|
||||||
|
return { count, totalMB: total / 1024 / 1024 };
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Bei App-Start Update pruefen */
|
||||||
|
checkForUpdate(): void {
|
||||||
|
if (this.checking) return;
|
||||||
|
this.checking = true;
|
||||||
|
|
||||||
|
console.log(`[Update] Pruefe auf Updates (aktuell: ${APP_VERSION})`);
|
||||||
|
rvs.send('update_check' as any, { version: APP_VERSION });
|
||||||
|
|
||||||
|
setTimeout(() => { this.checking = false; }, 10000);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Callback registrieren */
|
||||||
|
onUpdateAvailable(callback: UpdateCallback): () => void {
|
||||||
|
this.listeners.push(callback);
|
||||||
|
return () => {
|
||||||
|
this.listeners = this.listeners.filter(cb => cb !== callback);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Update-Dialog anzeigen */
|
||||||
|
promptUpdate(info: UpdateInfo): void {
|
||||||
|
const sizeMB = (info.size / 1024 / 1024).toFixed(1);
|
||||||
|
Alert.alert(
|
||||||
|
'ARIA Update verfuegbar',
|
||||||
|
`Version ${info.version} (${sizeMB} MB)\n\nAktuell: ${APP_VERSION}\n\nJetzt herunterladen und installieren?`,
|
||||||
|
[
|
||||||
|
{ text: 'Spaeter', style: 'cancel' },
|
||||||
|
{
|
||||||
|
text: 'Installieren',
|
||||||
|
onPress: () => this.downloadAndInstall(info),
|
||||||
|
},
|
||||||
|
],
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** APK ueber WebSocket herunterladen und installieren */
|
||||||
|
async downloadAndInstall(info: UpdateInfo): Promise<void> {
|
||||||
|
if (this.downloading) return;
|
||||||
|
this.downloading = true;
|
||||||
|
|
||||||
|
try {
|
||||||
|
console.log(`[Update] Fordere APK v${info.version} an...`);
|
||||||
|
Alert.alert('Download gestartet', `Version ${info.version} wird ueber RVS heruntergeladen...`);
|
||||||
|
|
||||||
|
// APK ueber WebSocket anfordern
|
||||||
|
rvs.send('update_download' as any, {});
|
||||||
|
|
||||||
|
// Auf update_data warten (einmalig)
|
||||||
|
const apkData = await new Promise<{base64: string, fileName: string}>((resolve, reject) => {
|
||||||
|
const timeout = setTimeout(() => reject(new Error('Download-Timeout (60s)')), 60000);
|
||||||
|
const unsub = rvs.onMessage((msg: RVSMessage) => {
|
||||||
|
if ((msg.type as string) === 'update_data') {
|
||||||
|
clearTimeout(timeout);
|
||||||
|
unsub();
|
||||||
|
if (msg.payload.error) {
|
||||||
|
reject(new Error(msg.payload.error as string));
|
||||||
|
} else {
|
||||||
|
resolve({
|
||||||
|
base64: msg.payload.base64 as string,
|
||||||
|
fileName: msg.payload.fileName as string || `ARIA-${info.version}.apk`,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
// Vor dem Schreiben alte APKs im Cache wegraeumen — falls mehrere
|
||||||
|
// Updates in einer Session gezogen werden
|
||||||
|
await this.cleanupOldApks();
|
||||||
|
|
||||||
|
// Base64 als APK-Datei speichern
|
||||||
|
const destPath = `${RNFS.CachesDirectoryPath}/${apkData.fileName}`;
|
||||||
|
await RNFS.writeFile(destPath, apkData.base64, 'base64');
|
||||||
|
const fileSize = await RNFS.stat(destPath);
|
||||||
|
console.log(`[Update] APK gespeichert: ${destPath} (${(parseInt(fileSize.size) / 1024 / 1024).toFixed(1)}MB)`);
|
||||||
|
|
||||||
|
// APK installieren via natives ApkInstaller Module (FileProvider + Intent)
|
||||||
|
if (Platform.OS === 'android') {
|
||||||
|
try {
|
||||||
|
const { ApkInstaller } = NativeModules;
|
||||||
|
await ApkInstaller.install(destPath);
|
||||||
|
} catch (installErr: any) {
|
||||||
|
Alert.alert(
|
||||||
|
'APK heruntergeladen',
|
||||||
|
`Version ${info.version} gespeichert.\n\nBitte manuell installieren:\nDateimanager → ${apkData.fileName} antippen.\n\n(${installErr.message})`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (err: any) {
|
||||||
|
console.error(`[Update] Fehler: ${err.message}`);
|
||||||
|
Alert.alert('Update fehlgeschlagen', err.message);
|
||||||
|
} finally {
|
||||||
|
this.downloading = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Versionsvergleich */
|
||||||
|
private isNewer(remote: string): boolean {
|
||||||
|
const r = remote.split('.').map(Number);
|
||||||
|
const l = APP_VERSION.split('.').map(Number);
|
||||||
|
for (let i = 0; i < Math.max(r.length, l.length); i++) {
|
||||||
|
const diff = (r[i] || 0) - (l[i] || 0);
|
||||||
|
if (diff > 0) return true;
|
||||||
|
if (diff < 0) return false;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
getCurrentVersion(): string {
|
||||||
|
return APP_VERSION;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const updateService = new UpdateService();
|
||||||
|
export default updateService;
|
||||||
@@ -0,0 +1,71 @@
|
|||||||
|
/**
|
||||||
|
* Spielt einen kurzen "Bereit"-Sound (Airplane Ding-Dong) wenn das Mikrofon
|
||||||
|
* nach Wake-Word-Erkennung wirklich offen ist. Datei liegt in
|
||||||
|
* android/app/src/main/res/raw/wake_ready_sound.mp3 — wird ueber Android's
|
||||||
|
* Resource-System per react-native-sound abgespielt.
|
||||||
|
*
|
||||||
|
* Toggle: AsyncStorage-Key 'aria_wake_ready_sound_enabled' (default true).
|
||||||
|
*/
|
||||||
|
|
||||||
|
import Sound from 'react-native-sound';
|
||||||
|
import AsyncStorage from '@react-native-async-storage/async-storage';
|
||||||
|
|
||||||
|
export const WAKE_READY_SOUND_STORAGE_KEY = 'aria_wake_ready_sound_enabled';
|
||||||
|
|
||||||
|
Sound.setCategory('Playback', false);
|
||||||
|
|
||||||
|
let cachedSound: Sound | null = null;
|
||||||
|
let cachedFailed = false;
|
||||||
|
|
||||||
|
function getSound(): Promise<Sound | null> {
|
||||||
|
if (cachedFailed) return Promise.resolve(null);
|
||||||
|
if (cachedSound) return Promise.resolve(cachedSound);
|
||||||
|
return new Promise(resolve => {
|
||||||
|
const s = new Sound('wake_ready_sound', Sound.MAIN_BUNDLE, (err) => {
|
||||||
|
if (err) {
|
||||||
|
console.warn('[WakeReadySound] Konnte nicht geladen werden:', err);
|
||||||
|
cachedFailed = true;
|
||||||
|
resolve(null);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
cachedSound = s;
|
||||||
|
resolve(s);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/** True wenn der User den "Bereit"-Sound aktiviert hat. Default: true. */
|
||||||
|
export async function isWakeReadySoundEnabled(): Promise<boolean> {
|
||||||
|
try {
|
||||||
|
const raw = await AsyncStorage.getItem(WAKE_READY_SOUND_STORAGE_KEY);
|
||||||
|
if (raw === null) return true; // Default an
|
||||||
|
return raw === 'true';
|
||||||
|
} catch {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function setWakeReadySoundEnabled(enabled: boolean): Promise<void> {
|
||||||
|
try {
|
||||||
|
await AsyncStorage.setItem(WAKE_READY_SOUND_STORAGE_KEY, String(enabled));
|
||||||
|
} catch {}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Spielt den Bereit-Sound einmal ab — non-blocking. Wenn der User ihn
|
||||||
|
* in den Settings deaktiviert hat oder die Datei nicht ladbar ist,
|
||||||
|
* passiert einfach nichts. */
|
||||||
|
export async function playWakeReadySound(): Promise<void> {
|
||||||
|
if (!(await isWakeReadySoundEnabled())) return;
|
||||||
|
const s = await getSound();
|
||||||
|
if (!s) return;
|
||||||
|
try {
|
||||||
|
s.stop(() => {
|
||||||
|
s.setCurrentTime(0);
|
||||||
|
s.play((success) => {
|
||||||
|
if (!success) console.warn('[WakeReadySound] Wiedergabe fehlgeschlagen');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
} catch (e) {
|
||||||
|
console.warn('[WakeReadySound] play() Exception:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,96 +1,372 @@
|
|||||||
/**
|
/**
|
||||||
* Wake Word Service — "ARIA" Erkennung
|
* Gespraechsmodus / Wake Word Service
|
||||||
*
|
*
|
||||||
* Nutzt react-native-live-audio-stream fuer kontinuierliches Mikrofon-Monitoring.
|
* Wake-Word-Engine: openWakeWord (https://github.com/dscripka/openWakeWord),
|
||||||
* Erkennt Sprache per Energie-Schwellwert und sendet kurze Audio-Clips
|
* komplett on-device via ONNX Runtime in Native-Kotlin (siehe
|
||||||
* zur serverseitigen Wake-Word-Pruefung (openwakeword in der Bridge).
|
* OpenWakeWordModule.kt + assets/openwakeword/). Kein API-Key, kein Cloud-
|
||||||
|
* Roundtrip, kein Cent Lizenzgebuehren.
|
||||||
*
|
*
|
||||||
* Architektur:
|
* Drei Zustaende:
|
||||||
* App (Mikrofon) → Energie-Erkennung → Audio-Buffer
|
* off — Ohr aus, nichts laeuft
|
||||||
* → RVS "wake_check" → Bridge → openwakeword → Bestaetigung
|
* armed — Ohr aktiv, openWakeWord hoert passiv auf das Wake-Word.
|
||||||
* → App startet Aufnahme
|
* Das Mikro ist von OpenWakeWord belegt; AudioRecorder ist aus.
|
||||||
|
* conversing — Wake-Word getriggert (oder Ohr-Tap manuell):
|
||||||
|
* aktive Konversation. OpenWakeWord pausiert (gibt Mikro frei),
|
||||||
|
* AudioRecorder uebernimmt fuer die Aufnahme.
|
||||||
|
* Nach jeder ARIA-Antwort oeffnet das Mikro fuer X Sekunden
|
||||||
|
* (Conversation-Window). Stille im Fenster → zurueck zu armed.
|
||||||
*
|
*
|
||||||
* Aktuell (Phase 1): Einfacher Tap-to-Talk + Auto-Stop.
|
* Faellt das Native-Modul aus (alte App-Version, ONNX-Init-Fehler), geht
|
||||||
* Spaeter (Phase 2): Porcupine on-device "ARIA" Keyword.
|
* 'start' direkt in 'conversing' (klassischer Direkt-Aufnahme-Modus).
|
||||||
*/
|
*/
|
||||||
|
|
||||||
import LiveAudioStream from 'react-native-live-audio-stream';
|
import { NativeEventEmitter, NativeModules, ToastAndroid } from 'react-native';
|
||||||
|
import AsyncStorage from '@react-native-async-storage/async-storage';
|
||||||
|
import { acquireBackgroundAudio } from './backgroundAudio';
|
||||||
|
|
||||||
type WakeWordCallback = () => void;
|
type WakeWordCallback = () => void;
|
||||||
type StateCallback = (state: WakeWordState) => void;
|
type StateCallback = (state: WakeWordState) => void;
|
||||||
|
|
||||||
export type WakeWordState = 'off' | 'listening' | 'detected';
|
export type WakeWordState = 'off' | 'armed' | 'conversing';
|
||||||
|
|
||||||
|
export const WAKE_KEYWORD_STORAGE = 'aria_wake_keyword';
|
||||||
|
|
||||||
|
/** Verfuegbare Wake-Words — entsprechen den .onnx Dateien in
|
||||||
|
* android/app/src/main/assets/openwakeword/. Custom-Keywords (eigenes
|
||||||
|
* Training via openwakeword Notebook) muessen aktuell als Asset eingebaut
|
||||||
|
* werden — Diagnostic-Upload ist Phase 2. */
|
||||||
|
export const WAKE_KEYWORDS = [
|
||||||
|
'hey_jarvis',
|
||||||
|
'computer',
|
||||||
|
'alexa',
|
||||||
|
'hey_mycroft',
|
||||||
|
'hey_rhasspy',
|
||||||
|
] as const;
|
||||||
|
export type WakeKeyword = typeof WAKE_KEYWORDS[number];
|
||||||
|
export const DEFAULT_KEYWORD: WakeKeyword = 'hey_jarvis';
|
||||||
|
|
||||||
|
/** Hilfs-Mapping fuer die Anzeige im UI. */
|
||||||
|
export const KEYWORD_LABELS: Record<WakeKeyword, string> = {
|
||||||
|
hey_jarvis: 'Hey Jarvis',
|
||||||
|
computer: 'Computer',
|
||||||
|
alexa: 'Alexa',
|
||||||
|
hey_mycroft: 'Hey Mycroft',
|
||||||
|
hey_rhasspy: 'Hey Rhasspy',
|
||||||
|
};
|
||||||
|
|
||||||
|
// Detection-Tuning — kann in Settings spaeter konfigurierbar werden.
|
||||||
|
const DEFAULT_THRESHOLD = 0.5;
|
||||||
|
const DEFAULT_PATIENCE = 2;
|
||||||
|
const DEFAULT_DEBOUNCE_MS = 1500;
|
||||||
|
|
||||||
|
interface OpenWakeWordModule {
|
||||||
|
init(modelName: string, threshold: number, patience: number, debounceMs: number): Promise<boolean>;
|
||||||
|
start(): Promise<boolean>;
|
||||||
|
stop(): Promise<boolean>;
|
||||||
|
dispose(): Promise<boolean>;
|
||||||
|
isAvailable(): Promise<boolean>;
|
||||||
|
}
|
||||||
|
|
||||||
|
const { OpenWakeWord } = NativeModules as { OpenWakeWord?: OpenWakeWordModule };
|
||||||
|
|
||||||
class WakeWordService {
|
class WakeWordService {
|
||||||
private state: WakeWordState = 'off';
|
private state: WakeWordState = 'off';
|
||||||
private wakeCallbacks: WakeWordCallback[] = [];
|
private wakeCallbacks: WakeWordCallback[] = [];
|
||||||
private stateCallbacks: StateCallback[] = [];
|
private stateCallbacks: StateCallback[] = [];
|
||||||
private isInitialized = false;
|
/** Barge-In-Callbacks: feuern wenn Wake-Word WAEHREND ARIA spricht erkannt
|
||||||
|
* wird. ChatScreen reagiert mit TTS-stop + neuer Aufnahme. */
|
||||||
|
private bargeCallbacks: WakeWordCallback[] = [];
|
||||||
|
/** True solange Wake-Word parallel zu TTS aktiv ist. */
|
||||||
|
private bargeListening: boolean = false;
|
||||||
|
/** Anruf-Pause: state wird gemerkt damit nach Auflegen wiederhergestellt wird. */
|
||||||
|
private callPaused: boolean = false;
|
||||||
|
private preCallState: WakeWordState = 'off';
|
||||||
|
/** Cooldown nach App-Resume: kurze Phase in der Wake-Word-Detections
|
||||||
|
* ignoriert werden. Beim Wechsel von Background nach Vordergrund gibt's
|
||||||
|
* oft einen Audio-Pegel-Spike (AudioFocus-Switch, AudioTrack re-route),
|
||||||
|
* der openWakeWord faelschlich triggern kann. */
|
||||||
|
private cooldownUntilMs: number = 0;
|
||||||
|
|
||||||
/** Wake Word Erkennung starten */
|
private keyword: WakeKeyword = DEFAULT_KEYWORD;
|
||||||
async start(): Promise<boolean> {
|
private nativeReady: boolean = false;
|
||||||
if (this.state === 'listening') return true;
|
private initInProgress: Promise<boolean> | null = null;
|
||||||
|
private eventSub: { remove: () => void } | null = null;
|
||||||
|
|
||||||
|
/** Beim App-Start aufrufen — laedt Settings, baut Native-Modul. */
|
||||||
|
async loadFromStorage(): Promise<void> {
|
||||||
try {
|
try {
|
||||||
if (!this.isInitialized) {
|
const w = await AsyncStorage.getItem(WAKE_KEYWORD_STORAGE);
|
||||||
LiveAudioStream.init({
|
const wt = (w || DEFAULT_KEYWORD).trim() as WakeKeyword;
|
||||||
sampleRate: 16000,
|
this.keyword = (WAKE_KEYWORDS as readonly string[]).includes(wt) ? wt : DEFAULT_KEYWORD;
|
||||||
channels: 1,
|
await this.initNative();
|
||||||
bitsPerSample: 16,
|
|
||||||
audioSource: 6, // VOICE_RECOGNITION
|
|
||||||
bufferSize: 4096,
|
|
||||||
});
|
|
||||||
this.isInitialized = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Audio-Stream starten und auf Energie pruefen
|
|
||||||
LiveAudioStream.start();
|
|
||||||
|
|
||||||
LiveAudioStream.on('data', (base64Chunk: string) => {
|
|
||||||
if (this.state !== 'listening') return;
|
|
||||||
|
|
||||||
// Base64 → Int16 Array → RMS berechnen
|
|
||||||
const raw = this._base64ToInt16(base64Chunk);
|
|
||||||
const rms = this._calculateRMS(raw);
|
|
||||||
|
|
||||||
// Schwellwert: wenn laut genug → Wake Word erkannt
|
|
||||||
// Phase 1: Einfache Energie-Erkennung (jemand spricht)
|
|
||||||
// Phase 2: Porcupine "ARIA" Keyword
|
|
||||||
if (rms > 2000) {
|
|
||||||
this.setState('detected');
|
|
||||||
this.wakeCallbacks.forEach(cb => cb());
|
|
||||||
// Nach Detection kurz pausieren, Aufnahme uebernimmt das Mikrofon
|
|
||||||
this.stop();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
this.setState('listening');
|
|
||||||
console.log('[WakeWord] Listening gestartet');
|
|
||||||
return true;
|
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
console.error('[WakeWord] Start fehlgeschlagen:', err);
|
console.warn('[WakeWord] loadFromStorage', err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Settings-Wechsel: anderes Wake-Word. Re-Init des Native-Moduls. */
|
||||||
|
async configure(keyword: string): Promise<boolean> {
|
||||||
|
const next: WakeKeyword = (WAKE_KEYWORDS as readonly string[]).includes(keyword)
|
||||||
|
? (keyword as WakeKeyword)
|
||||||
|
: DEFAULT_KEYWORD;
|
||||||
|
this.keyword = next;
|
||||||
|
await AsyncStorage.setItem(WAKE_KEYWORD_STORAGE, next);
|
||||||
|
|
||||||
|
// Laufende Instanz stoppen + neu initialisieren
|
||||||
|
await this.disposeNative();
|
||||||
|
const ok = await this.initNative();
|
||||||
|
if (!ok) {
|
||||||
|
ToastAndroid.show(
|
||||||
|
`Wake-Word "${KEYWORD_LABELS[next]}" konnte nicht initialisiert werden — Logs pruefen`,
|
||||||
|
ToastAndroid.LONG,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return ok;
|
||||||
|
}
|
||||||
|
|
||||||
|
private async initNative(): Promise<boolean> {
|
||||||
|
if (!OpenWakeWord) {
|
||||||
|
console.warn('[WakeWord] OpenWakeWord Native-Modul nicht verfuegbar — Direkt-Aufnahme-Fallback aktiv');
|
||||||
|
this.nativeReady = false;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
if (this.initInProgress) return this.initInProgress;
|
||||||
|
this.initInProgress = (async () => {
|
||||||
/** Wake Word Erkennung stoppen */
|
|
||||||
stop(): void {
|
|
||||||
if (this.state === 'off') return;
|
|
||||||
try {
|
try {
|
||||||
LiveAudioStream.stop();
|
await OpenWakeWord.init(this.keyword, DEFAULT_THRESHOLD, DEFAULT_PATIENCE, DEFAULT_DEBOUNCE_MS);
|
||||||
} catch {}
|
// Subscribe nur einmal
|
||||||
this.setState('off');
|
if (!this.eventSub) {
|
||||||
console.log('[WakeWord] Gestoppt');
|
const emitter = new NativeEventEmitter(NativeModules.OpenWakeWord);
|
||||||
|
this.eventSub = emitter.addListener('WakeWordDetected', () => {
|
||||||
|
console.log('[WakeWord] Native Detection-Event empfangen');
|
||||||
|
this.onWakeDetected().catch(err =>
|
||||||
|
console.warn('[WakeWord] onWakeDetected crashed:', err));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
this.nativeReady = true;
|
||||||
|
console.log('[WakeWord] Init OK (model=%s)', this.keyword);
|
||||||
|
return true;
|
||||||
|
} catch (err: any) {
|
||||||
|
console.warn('[WakeWord] Init fehlgeschlagen:', err?.message || err);
|
||||||
|
this.nativeReady = false;
|
||||||
|
return false;
|
||||||
|
} finally {
|
||||||
|
this.initInProgress = null;
|
||||||
|
}
|
||||||
|
})();
|
||||||
|
return this.initInProgress;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Nach Aufnahme erneut starten */
|
private async disposeNative(): Promise<void> {
|
||||||
async resume(): Promise<void> {
|
if (!OpenWakeWord) return;
|
||||||
// Kurze Pause damit Aufnahme das Mikrofon freigeben kann
|
try { await OpenWakeWord.dispose(); } catch {}
|
||||||
|
this.nativeReady = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Ohr-Button gedrueckt — startet passives Lauschen oder direkt Konversation. */
|
||||||
|
async start(): Promise<boolean> {
|
||||||
|
if (this.state !== 'off') return true;
|
||||||
|
// Foreground-Service VOR dem Mic-Zugriff hochziehen damit Background-
|
||||||
|
// Lauschen funktioniert (Android braucht foregroundServiceType=microphone
|
||||||
|
// aktiv zum Zeitpunkt des AudioRecord.startRecording).
|
||||||
|
await acquireBackgroundAudio('wake');
|
||||||
|
if (this.nativeReady && OpenWakeWord) {
|
||||||
|
try {
|
||||||
|
await OpenWakeWord.start();
|
||||||
|
console.log('[WakeWord] armed — warte auf "%s"', this.keyword);
|
||||||
|
ToastAndroid.show(`Lausche auf "${KEYWORD_LABELS[this.keyword]}"`, ToastAndroid.SHORT);
|
||||||
|
this.setState('armed');
|
||||||
|
return true;
|
||||||
|
} catch (err: any) {
|
||||||
|
console.warn('[WakeWord] start fehlgeschlagen — Fallback Direkt-Aufnahme:',
|
||||||
|
err?.message || err);
|
||||||
|
ToastAndroid.show(
|
||||||
|
`Wake-Word-Start failed: ${err?.message || err}`,
|
||||||
|
ToastAndroid.LONG,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
console.warn('[WakeWord] Native-Modul nicht bereit — Direkt-Aufnahme-Fallback');
|
||||||
|
ToastAndroid.show(
|
||||||
|
'Wake-Word nicht aktiv — direkte Aufnahme startet (Mikro hoert mit)',
|
||||||
|
ToastAndroid.LONG,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
// Fallback: direkt in Konversation
|
||||||
|
console.log('[WakeWord] Direkt-Aufnahme startet (kein Wake-Word)');
|
||||||
|
this.setState('conversing');
|
||||||
setTimeout(() => {
|
setTimeout(() => {
|
||||||
if (this.state === 'off') {
|
if (this.state === 'conversing') {
|
||||||
this.start();
|
this.wakeCallbacks.forEach(cb => cb());
|
||||||
}
|
}
|
||||||
}, 500);
|
}, 500);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Komplett ausschalten (Ohr abschalten) */
|
||||||
|
async stop(): Promise<void> {
|
||||||
|
console.log('[WakeWord] Ohr deaktiviert');
|
||||||
|
if (this.nativeReady && OpenWakeWord) {
|
||||||
|
try { await OpenWakeWord.stop(); } catch {}
|
||||||
|
}
|
||||||
|
this.bargeListening = false;
|
||||||
|
this.setState('off');
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Cooldown setzen — alle Wake-Word-Detections in den naechsten ms ignorieren.
|
||||||
|
* Wird beim App-Resume gerufen weil AppState-Wechsel Audio-Spikes erzeugen
|
||||||
|
* die openWakeWord faelschlich als Trigger interpretiert. */
|
||||||
|
setResumeCooldown(ms: number = 1500): void {
|
||||||
|
this.cooldownUntilMs = Date.now() + ms;
|
||||||
|
console.log('[WakeWord] Cooldown aktiv fuer %dms', ms);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Wake-Word getriggert: Native-Modul pausieren, Konversation starten. */
|
||||||
|
private async onWakeDetected(): Promise<void> {
|
||||||
|
const now = Date.now();
|
||||||
|
if (now < this.cooldownUntilMs) {
|
||||||
|
const left = this.cooldownUntilMs - now;
|
||||||
|
console.log('[WakeWord] Trigger ignoriert (Cooldown noch %dms aktiv — wahrscheinlich App-Resume-Spike)', left);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
console.log('[WakeWord] Wake-Word "%s" erkannt! (state=%s, barge=%s)',
|
||||||
|
this.keyword, this.state, this.bargeListening);
|
||||||
|
if (this.nativeReady && OpenWakeWord) {
|
||||||
|
try { await OpenWakeWord.stop(); } catch {}
|
||||||
|
}
|
||||||
|
this.bargeListening = false;
|
||||||
|
// Wenn wir bereits in 'conversing' sind und der Trigger waehrend ARIAs TTS
|
||||||
|
// kam (Barge-In via Wake-Word), feuern wir einen separaten Callback damit
|
||||||
|
// ChatScreen das TTS abbrechen + neue Aufnahme starten kann. Sonst normal.
|
||||||
|
if (this.state === 'conversing') {
|
||||||
|
this.bargeCallbacks.forEach(cb => {
|
||||||
|
try { cb(); } catch (e) { console.warn('[WakeWord] barge cb err:', e); }
|
||||||
|
});
|
||||||
|
// Kein erneutes setState — wir bleiben in 'conversing'.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
this.setState('conversing');
|
||||||
|
setTimeout(() => {
|
||||||
|
if (this.state === 'conversing') {
|
||||||
|
this.wakeCallbacks.forEach(cb => cb());
|
||||||
|
}
|
||||||
|
}, 200);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Wake-Word PARALLEL zur TTS-Wiedergabe lauschen lassen — User kann
|
||||||
|
* "Computer" sagen waehrend ARIA noch redet, AcousticEchoCanceler im
|
||||||
|
* Native-Modul verhindert dass ARIAs eigene Stimme triggert.
|
||||||
|
* Voraussetzung: AudioRecorder muss frei sein (Recording aus). Wenn der
|
||||||
|
* AudioRecorder gerade laeuft, hat der Vorrang — Wake-Word geht nicht. */
|
||||||
|
async startBargeListening(): Promise<void> {
|
||||||
|
if (!this.nativeReady || !OpenWakeWord) return;
|
||||||
|
if (this.state !== 'conversing') return;
|
||||||
|
if (this.bargeListening) return;
|
||||||
|
try {
|
||||||
|
await OpenWakeWord.start();
|
||||||
|
this.bargeListening = true;
|
||||||
|
console.log('[WakeWord] Barge-Listening aktiv (parallel zu TTS)');
|
||||||
|
} catch (err) {
|
||||||
|
console.warn('[WakeWord] Barge-Listening start fehlgeschlagen:', err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Barge-Listening wieder aus — z.B. wenn der AudioRecorder fuer die
|
||||||
|
* naechste Aufnahme das Mikro braucht. */
|
||||||
|
async stopBargeListening(): Promise<void> {
|
||||||
|
if (!this.bargeListening) return;
|
||||||
|
if (this.nativeReady && OpenWakeWord) {
|
||||||
|
try { await OpenWakeWord.stop(); } catch {}
|
||||||
|
}
|
||||||
|
this.bargeListening = false;
|
||||||
|
console.log('[WakeWord] Barge-Listening aus');
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Bei eingehendem Anruf: Wake-Word + Aufnahme stoppen, Pre-Call-State
|
||||||
|
* merken. Telefonie-App belegt das Mikro waehrend des Anrufs, plus ARIA
|
||||||
|
* soll nicht in laufende Telefonate reinhoeren. */
|
||||||
|
async pauseForCall(): Promise<void> {
|
||||||
|
if (this.callPaused) return;
|
||||||
|
this.preCallState = this.state;
|
||||||
|
if (this.state === 'off') {
|
||||||
|
this.callPaused = true; // merken dass wir pausiert wurden
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
this.callPaused = true;
|
||||||
|
if (this.nativeReady && OpenWakeWord) {
|
||||||
|
try { await OpenWakeWord.stop(); } catch {}
|
||||||
|
}
|
||||||
|
this.bargeListening = false;
|
||||||
|
console.log('[WakeWord] Anruf — Wake-Word pausiert (war: %s)', this.preCallState);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Nach Auflegen: Pre-Call-State wiederherstellen. Aktive Konversation
|
||||||
|
* geht zu armed zurueck (User soll nicht in einen halben Dialog springen). */
|
||||||
|
async resumeFromCall(): Promise<void> {
|
||||||
|
if (!this.callPaused) return;
|
||||||
|
const restoreTo = this.preCallState;
|
||||||
|
this.callPaused = false;
|
||||||
|
this.preCallState = 'off';
|
||||||
|
console.log('[WakeWord] Anruf zu Ende — restore state=%s', restoreTo);
|
||||||
|
if (restoreTo === 'off') return;
|
||||||
|
// Aktive Konversation war wahrscheinlich durch haltAllPlayback eh abgebrochen,
|
||||||
|
// sicher zu armed degraden.
|
||||||
|
if (restoreTo === 'conversing') this.setState('armed');
|
||||||
|
if (this.nativeReady && OpenWakeWord) {
|
||||||
|
try { await OpenWakeWord.start(); } catch (err) {
|
||||||
|
console.warn('[WakeWord] Restore-Start fehlgeschlagen:', err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Konversation beenden — User hat im Window nichts gesagt.
|
||||||
|
* Mit Wake-Word: zurueck zu 'armed' (Listener wieder an).
|
||||||
|
* Ohne: zurueck zu 'off'.
|
||||||
|
*/
|
||||||
|
async endConversation(): Promise<void> {
|
||||||
|
if (this.state !== 'conversing') return;
|
||||||
|
if (this.nativeReady && OpenWakeWord) {
|
||||||
|
try {
|
||||||
|
await OpenWakeWord.start();
|
||||||
|
console.log('[WakeWord] Konversation zu Ende — zurueck zu armed');
|
||||||
|
ToastAndroid.show(`Lausche wieder auf "${KEYWORD_LABELS[this.keyword]}"`, ToastAndroid.SHORT);
|
||||||
|
this.setState('armed');
|
||||||
|
return;
|
||||||
|
} catch (err) {
|
||||||
|
console.warn('[WakeWord] re-arm fehlgeschlagen:', err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
console.log('[WakeWord] Konversation zu Ende — Ohr aus');
|
||||||
|
ToastAndroid.show('Mikro aus', ToastAndroid.SHORT);
|
||||||
|
this.setState('off');
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Nach ARIA-Antwort (TTS fertig): naechste Aufnahme im Conversation-Window starten */
|
||||||
|
async resume(): Promise<void> {
|
||||||
|
if (this.state !== 'conversing') return;
|
||||||
|
// Kurze Pause damit TTS-Audio nicht ins Mikrofon geht
|
||||||
|
await new Promise(resolve => setTimeout(resolve, 800));
|
||||||
|
if (this.state === 'conversing') {
|
||||||
|
console.log('[WakeWord] TTS fertig — naechste Aufnahme im Conversation-Window');
|
||||||
|
this.wakeCallbacks.forEach(cb => cb());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** True solange das Ohr aktiv ist (armed ODER conversing). */
|
||||||
|
isActive(): boolean {
|
||||||
|
return this.state !== 'off';
|
||||||
|
}
|
||||||
|
|
||||||
|
isConversing(): boolean {
|
||||||
|
return this.state === 'conversing';
|
||||||
|
}
|
||||||
|
|
||||||
|
hasWakeWord(): boolean {
|
||||||
|
return this.nativeReady;
|
||||||
|
}
|
||||||
|
|
||||||
|
getKeyword(): WakeKeyword {
|
||||||
|
return this.keyword;
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- Callbacks ---
|
// --- Callbacks ---
|
||||||
@@ -102,6 +378,19 @@ class WakeWordService {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Subscribe auf Barge-In-Events: Wake-Word erkannt waehrend ARIA noch
|
||||||
|
* spricht. ChatScreen sollte dann TTS abbrechen + neue Aufnahme starten. */
|
||||||
|
onBargeIn(callback: WakeWordCallback): () => void {
|
||||||
|
this.bargeCallbacks.push(callback);
|
||||||
|
return () => {
|
||||||
|
this.bargeCallbacks = this.bargeCallbacks.filter(cb => cb !== callback);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
isBargeListening(): boolean {
|
||||||
|
return this.bargeListening;
|
||||||
|
}
|
||||||
|
|
||||||
onStateChange(callback: StateCallback): () => void {
|
onStateChange(callback: StateCallback): () => void {
|
||||||
this.stateCallbacks.push(callback);
|
this.stateCallbacks.push(callback);
|
||||||
return () => {
|
return () => {
|
||||||
@@ -113,32 +402,12 @@ class WakeWordService {
|
|||||||
return this.state;
|
return this.state;
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- Hilfsfunktionen ---
|
|
||||||
|
|
||||||
private setState(state: WakeWordState): void {
|
private setState(state: WakeWordState): void {
|
||||||
if (this.state !== state) {
|
if (this.state !== state) {
|
||||||
this.state = state;
|
this.state = state;
|
||||||
this.stateCallbacks.forEach(cb => cb(state));
|
this.stateCallbacks.forEach(cb => cb(state));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private _base64ToInt16(base64: string): Int16Array {
|
|
||||||
const binary = atob(base64);
|
|
||||||
const bytes = new Uint8Array(binary.length);
|
|
||||||
for (let i = 0; i < binary.length; i++) {
|
|
||||||
bytes[i] = binary.charCodeAt(i);
|
|
||||||
}
|
|
||||||
return new Int16Array(bytes.buffer);
|
|
||||||
}
|
|
||||||
|
|
||||||
private _calculateRMS(samples: Int16Array): number {
|
|
||||||
if (samples.length === 0) return 0;
|
|
||||||
let sum = 0;
|
|
||||||
for (let i = 0; i < samples.length; i++) {
|
|
||||||
sum += samples[i] * samples[i];
|
|
||||||
}
|
|
||||||
return Math.sqrt(sum / samples.length);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const wakeWordService = new WakeWordService();
|
const wakeWordService = new WakeWordService();
|
||||||
|
|||||||
@@ -0,0 +1,35 @@
|
|||||||
|
# ════════════════════════════════════════════════════════════
|
||||||
|
# ARIA Brain — Agent + Memory Container
|
||||||
|
#
|
||||||
|
# FastAPI-Server mit Vector-DB-Memory (Qdrant).
|
||||||
|
# Spricht via HTTP/WebSocket mit Bridge und Diagnostic.
|
||||||
|
# LLM-Calls gehen ueber den Proxy (claude-max-api-proxy).
|
||||||
|
# ════════════════════════════════════════════════════════════
|
||||||
|
|
||||||
|
FROM python:3.12-slim
|
||||||
|
|
||||||
|
# System-Tools die Skills brauchen koennten (curl, jq, git, ssh-client,
|
||||||
|
# Build-Basics fuer venv-Compiles). Bewusst sparsam — alles weitere
|
||||||
|
# bringt der Skill selbst mit (siehe execution=local-bin).
|
||||||
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
|
curl \
|
||||||
|
jq \
|
||||||
|
git \
|
||||||
|
openssh-client \
|
||||||
|
ca-certificates \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
COPY requirements.txt .
|
||||||
|
RUN pip install --no-cache-dir -r requirements.txt
|
||||||
|
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# Embedding-Model-Cache und Skills landen unter /data (Volume)
|
||||||
|
ENV SENTENCE_TRANSFORMERS_HOME=/data/_models
|
||||||
|
ENV ARIA_DATA_DIR=/data
|
||||||
|
|
||||||
|
EXPOSE 8080
|
||||||
|
|
||||||
|
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8080"]
|
||||||
@@ -0,0 +1,385 @@
|
|||||||
|
"""
|
||||||
|
Conversation-Loop. Eine Anfrage von Stefan, eine Antwort von ARIA.
|
||||||
|
|
||||||
|
Pro Turn:
|
||||||
|
1. user-Turn an die laufende Conversation appenden
|
||||||
|
2. Hot Memory holen (alle pinned Punkte)
|
||||||
|
3. Cold Memory holen (Top-K semantisch zur user-Nachricht)
|
||||||
|
4. System-Prompt aus Hot+Cold bauen
|
||||||
|
5. Messages = [system, *window, user]
|
||||||
|
6. Claude via Proxy aufrufen
|
||||||
|
7. Assistant-Reply in Conversation appenden + zurueckgeben
|
||||||
|
|
||||||
|
Memory-Destillat laeuft asynchron NACH dem Reply, gesteuert vom
|
||||||
|
/chat-Endpoint ueber BackgroundTasks.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from conversation import Conversation, Turn
|
||||||
|
from memory import Embedder, VectorStore, MemoryPoint
|
||||||
|
from prompts import build_system_prompt
|
||||||
|
from proxy_client import ProxyClient, Message as ProxyMessage
|
||||||
|
import skills as skills_mod
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
# Meta-Tool: ARIA kann selbst neue Skills bauen
|
||||||
|
META_TOOLS = [
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "skill_create",
|
||||||
|
"description": (
|
||||||
|
"Erstelle einen neuen Skill (wiederverwendbare Faehigkeit). "
|
||||||
|
"Skills sind IMMER Python — jeder Skill bekommt seine eigene venv "
|
||||||
|
"mit den pip_packages die er braucht.\n\n"
|
||||||
|
"HARTE REGEL — IMMER Skill anlegen wenn: die Loesung erfordert eine "
|
||||||
|
"pip-Library. Sonst muesste der Install bei jedem Container-Restart "
|
||||||
|
"neu laufen (Brain hat keinen persistenten State ausser /data/skills/).\n\n"
|
||||||
|
"Sonst NUR wenn ALLE Kriterien erfuellt sind:\n"
|
||||||
|
" 1) wiederkehrend (Aufgabe kommt realistisch nochmal),\n"
|
||||||
|
" 2) nicht-trivial (mehrere Schritte),\n"
|
||||||
|
" 3) parametrisierbar (nimmt Eingaben, gibt Ergebnis),\n"
|
||||||
|
" 4) wiederverwendbar als ganzes Paket.\n"
|
||||||
|
"NICHT fuer einzelne Shell-Befehle (date, hostname, ls etc.) und "
|
||||||
|
"nicht fuer Einmal-Faelle. Stefan kann Skill-Erstellung explizit "
|
||||||
|
"triggern (\"bau daraus einen Skill\").\n\n"
|
||||||
|
"Wenn etwas nur via apt-Paket geht — Stefan fragen ob es ins "
|
||||||
|
"Brain-Dockerfile soll, NICHT als Skill bauen."
|
||||||
|
),
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"name": {"type": "string", "description": "kurz, kebab-case, a-z 0-9 - _"},
|
||||||
|
"description": {"type": "string", "description": "Was kann der Skill? 1 Satz."},
|
||||||
|
"entry_code": {
|
||||||
|
"type": "string",
|
||||||
|
"description": (
|
||||||
|
"Python-Code. Args lesen via os.environ['ARG_NAME']. "
|
||||||
|
"Resultat per print() (stdout) zurueck. Bei Fehler: "
|
||||||
|
"non-zero exit (sys.exit(1) o.ae.)."
|
||||||
|
),
|
||||||
|
},
|
||||||
|
"readme": {"type": "string", "description": "Markdown — was macht der Skill, Beispiel-Aufrufe"},
|
||||||
|
"pip_packages": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "string"},
|
||||||
|
"description": "pip-Pakete die in der venv installiert werden (z.B. requests, yt-dlp, pypdf)",
|
||||||
|
},
|
||||||
|
"args": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {"type": "object"},
|
||||||
|
"description": "Argumente-Schema [{name, type, required, description}]",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"required": ["name", "description", "entry_code"],
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": "skill_list",
|
||||||
|
"description": "Zeigt alle Skills (inkl. deaktivierte). Sollte selten noetig sein — die Liste steht eh im System-Prompt.",
|
||||||
|
"parameters": {"type": "object", "properties": {}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def _skill_to_tool(s: dict) -> dict:
|
||||||
|
"""Mappt einen Skill auf ein OpenAI-Function-Tool."""
|
||||||
|
args = s.get("args") or []
|
||||||
|
props = {}
|
||||||
|
required = []
|
||||||
|
for a in args:
|
||||||
|
if not isinstance(a, dict):
|
||||||
|
continue
|
||||||
|
name = a.get("name") or ""
|
||||||
|
if not name:
|
||||||
|
continue
|
||||||
|
props[name] = {
|
||||||
|
"type": a.get("type", "string"),
|
||||||
|
"description": a.get("description", ""),
|
||||||
|
}
|
||||||
|
if a.get("required"):
|
||||||
|
required.append(name)
|
||||||
|
return {
|
||||||
|
"type": "function",
|
||||||
|
"function": {
|
||||||
|
"name": f"run_{s['name']}",
|
||||||
|
"description": s.get("description", "(ohne Beschreibung)"),
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": props,
|
||||||
|
"required": required,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class Agent:
|
||||||
|
def __init__(self, store: VectorStore, embedder: Embedder,
|
||||||
|
conversation: Conversation, proxy: ProxyClient,
|
||||||
|
cold_k: int = 5):
|
||||||
|
self.store = store
|
||||||
|
self.embedder = embedder
|
||||||
|
self.conversation = conversation
|
||||||
|
self.proxy = proxy
|
||||||
|
self.cold_k = cold_k
|
||||||
|
# Side-Channel-Events die im Turn entstehen (z.B. skill_create).
|
||||||
|
# Werden vom /chat-Endpoint in der Response mitgeschickt, damit
|
||||||
|
# Stefan in der App und Diagnostic eine sichtbare Bubble bekommt.
|
||||||
|
self._pending_events: list[dict] = []
|
||||||
|
|
||||||
|
def pop_events(self) -> list[dict]:
|
||||||
|
"""Holt die Events des letzten chat()-Calls und leert die Liste."""
|
||||||
|
events = self._pending_events
|
||||||
|
self._pending_events = []
|
||||||
|
return events
|
||||||
|
|
||||||
|
# ── Hauptpfad: ein User-Turn → Tool-Loop → finaler Reply ──
|
||||||
|
|
||||||
|
MAX_TOOL_ITERATIONS = 8 # Schutz vor Endlos-Loops
|
||||||
|
|
||||||
|
def chat(self, user_message: str, source: str = "") -> str:
|
||||||
|
user_message = (user_message or "").strip()
|
||||||
|
if not user_message:
|
||||||
|
raise ValueError("Leere Nachricht")
|
||||||
|
|
||||||
|
# Events vom letzten Turn weglassen
|
||||||
|
self._pending_events = []
|
||||||
|
|
||||||
|
# 1. User-Turn an die Konversation
|
||||||
|
self.conversation.add("user", user_message, source=source)
|
||||||
|
|
||||||
|
# 2. Hot Memory (alle pinned Punkte)
|
||||||
|
hot = self.store.list_pinned()
|
||||||
|
|
||||||
|
# 3. Cold Memory (Top-K semantic)
|
||||||
|
try:
|
||||||
|
qvec = self.embedder.embed(user_message)
|
||||||
|
cold = self.store.search(qvec, k=self.cold_k, exclude_pinned=True)
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning("Cold-Search fehlgeschlagen: %s", exc)
|
||||||
|
cold = []
|
||||||
|
|
||||||
|
# 4. Aktive Skills holen + Tool-Liste bauen
|
||||||
|
all_skills = skills_mod.list_skills(active_only=False)
|
||||||
|
active_skills = [s for s in all_skills if s.get("active", True)]
|
||||||
|
tools = list(META_TOOLS) + [_skill_to_tool(s) for s in active_skills]
|
||||||
|
|
||||||
|
# 5. System-Prompt + Window-Messages
|
||||||
|
system_prompt = build_system_prompt(hot, cold, skills=all_skills)
|
||||||
|
messages = [ProxyMessage(role="system", content=system_prompt)]
|
||||||
|
for t in self.conversation.window():
|
||||||
|
messages.append(ProxyMessage(role=t.role, content=t.content))
|
||||||
|
|
||||||
|
logger.info("chat: pinned=%d cold=%d skills=%d/%d window=%d prompt_chars=%d",
|
||||||
|
len(hot), len(cold), len(active_skills), len(all_skills),
|
||||||
|
len(self.conversation.window()), len(system_prompt))
|
||||||
|
|
||||||
|
# 6. Tool-Use-Loop
|
||||||
|
final_reply = ""
|
||||||
|
for iteration in range(self.MAX_TOOL_ITERATIONS):
|
||||||
|
result = self.proxy.chat_full(messages, tools=tools)
|
||||||
|
if result.tool_calls:
|
||||||
|
# Assistant-Turn mit tool_calls in messages anhaengen (nicht in Conversation!)
|
||||||
|
messages.append(ProxyMessage(
|
||||||
|
role="assistant",
|
||||||
|
content=result.content or None,
|
||||||
|
tool_calls=[{
|
||||||
|
"id": tc["id"], "type": "function",
|
||||||
|
"function": {"name": tc["name"], "arguments": json.dumps(tc["arguments"])},
|
||||||
|
} for tc in result.tool_calls],
|
||||||
|
))
|
||||||
|
# Tools ausfuehren + Ergebnis als role=tool zurueck
|
||||||
|
for tc in result.tool_calls:
|
||||||
|
tool_result = self._dispatch_tool(tc["name"], tc["arguments"])
|
||||||
|
messages.append(ProxyMessage(
|
||||||
|
role="tool",
|
||||||
|
tool_call_id=tc["id"],
|
||||||
|
name=tc["name"],
|
||||||
|
content=tool_result[:8000],
|
||||||
|
))
|
||||||
|
continue # next iteration mit Tool-Results
|
||||||
|
# Kein Tool-Call mehr → final reply
|
||||||
|
final_reply = (result.content or "").strip()
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
# Loop-Limit erreicht
|
||||||
|
final_reply = "[Tool-Loop-Limit erreicht — ARIA hat zu viele Tool-Calls gemacht ohne fertig zu werden]"
|
||||||
|
logger.warning("Tool-Loop hit MAX_TOOL_ITERATIONS=%d", self.MAX_TOOL_ITERATIONS)
|
||||||
|
|
||||||
|
if not final_reply:
|
||||||
|
raise RuntimeError("Leerer Reply vom Proxy")
|
||||||
|
|
||||||
|
# 7. Assistant-Turn (final reply) in die Conversation
|
||||||
|
self.conversation.add("assistant", final_reply)
|
||||||
|
return final_reply
|
||||||
|
|
||||||
|
# ── Tool-Dispatcher ───────────────────────────────────────
|
||||||
|
|
||||||
|
def _dispatch_tool(self, name: str, arguments: dict) -> str:
|
||||||
|
"""Fuehrt einen Tool-Call aus und gibt ein kurzes Text-Resultat zurueck.
|
||||||
|
Niemals werfen — Fehler werden als Text-Resultat reportet damit Claude
|
||||||
|
weitermachen kann."""
|
||||||
|
try:
|
||||||
|
if name == "skill_create":
|
||||||
|
# ARIA-Skills sind immer Python — execution ist nicht mehr im Schema
|
||||||
|
manifest = skills_mod.create_skill(
|
||||||
|
name=arguments["name"],
|
||||||
|
description=arguments["description"],
|
||||||
|
execution="local-venv",
|
||||||
|
entry_code=arguments["entry_code"],
|
||||||
|
readme=arguments.get("readme", ""),
|
||||||
|
args=arguments.get("args", []),
|
||||||
|
pip_packages=arguments.get("pip_packages", []),
|
||||||
|
author="aria",
|
||||||
|
)
|
||||||
|
# Side-Channel-Event: Stefan soll sehen wenn ARIA was anlegt
|
||||||
|
self._pending_events.append({
|
||||||
|
"type": "skill_created",
|
||||||
|
"skill": {
|
||||||
|
"name": manifest["name"],
|
||||||
|
"description": manifest.get("description", ""),
|
||||||
|
"execution": manifest.get("execution", ""),
|
||||||
|
"active": manifest.get("active", True),
|
||||||
|
"setup_error": manifest.get("setup_error"),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return f"OK — Skill '{manifest['name']}' erstellt (active={manifest['active']})."
|
||||||
|
if name == "skill_list":
|
||||||
|
items = skills_mod.list_skills(active_only=False)
|
||||||
|
if not items:
|
||||||
|
return "(keine Skills vorhanden)"
|
||||||
|
return "\n".join(
|
||||||
|
f"- {s['name']} ({s['execution']}) {'aktiv' if s.get('active', True) else 'DEAKTIVIERT'}: {s.get('description', '')}"
|
||||||
|
for s in items
|
||||||
|
)
|
||||||
|
if name.startswith("run_"):
|
||||||
|
skill_name = name[len("run_"):]
|
||||||
|
res = skills_mod.run_skill(skill_name, args=arguments)
|
||||||
|
snippet = (res.get("stdout") or "")[:2000] or "(kein stdout)"
|
||||||
|
err = (res.get("stderr") or "")[:500]
|
||||||
|
marker = "OK" if res["ok"] else f"FEHLER (exit={res['exit_code']})"
|
||||||
|
out = f"{marker} · {res['duration_sec']}s\nstdout:\n{snippet}"
|
||||||
|
if err:
|
||||||
|
out += f"\nstderr:\n{err}"
|
||||||
|
return out
|
||||||
|
return f"Unbekanntes Tool: {name}"
|
||||||
|
except Exception as exc:
|
||||||
|
logger.exception("Tool '%s' fehlgeschlagen", name)
|
||||||
|
return f"FEHLER: {exc}"
|
||||||
|
|
||||||
|
# ── Memory-Destillat (laeuft im Hintergrund) ──────────────
|
||||||
|
|
||||||
|
def distill_old_turns(self) -> dict:
|
||||||
|
"""Nimmt die N aeltesten Turns und destilliert sie zu fact-Memories.
|
||||||
|
|
||||||
|
Pattern: separater Claude-Call, lieferte 3-7 JSON-Facts, die als
|
||||||
|
type=fact, source=distilled gespeichert werden. Erfolgreiches
|
||||||
|
Schreiben → Turns aus dem Window entfernen.
|
||||||
|
"""
|
||||||
|
if not self.conversation.needs_distill():
|
||||||
|
return {"distilled": 0, "reason": "kein Bedarf"}
|
||||||
|
|
||||||
|
old_turns = self.conversation.take_oldest_for_distill()
|
||||||
|
if not old_turns:
|
||||||
|
return {"distilled": 0, "reason": "keine alten Turns"}
|
||||||
|
|
||||||
|
# Konversation als Klartext bauen
|
||||||
|
transcript = "\n".join(
|
||||||
|
f"[{t.role.upper()}] {t.content}" for t in old_turns
|
||||||
|
)[:30000] # Cap auf 30k Zeichen damit der Prompt nicht explodiert
|
||||||
|
|
||||||
|
system = (
|
||||||
|
"Du extrahierst aus einer Konversation zwischen Stefan und ARIA die "
|
||||||
|
"wichtigsten dauerhaft relevanten Fakten — keine Smalltalk-Details, "
|
||||||
|
"keine flüchtigen Zustände. Antworte AUSSCHLIESSLICH mit gültigem JSON "
|
||||||
|
"im Format: {\"facts\": [{\"title\": \"kurz, max 80 Zeichen\", "
|
||||||
|
"\"content\": \"1-3 Sätze, konkret und nützlich\"}]}. "
|
||||||
|
"Mindestens 0, höchstens 7 Facts. Wenn nichts wichtig genug ist: leeres Array."
|
||||||
|
)
|
||||||
|
user = (
|
||||||
|
"Hier ist der Konversations-Abschnitt:\n\n"
|
||||||
|
f"{transcript}\n\n"
|
||||||
|
"Extrahiere die wichtigsten Fakten als JSON."
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
raw = self.proxy.chat([
|
||||||
|
ProxyMessage(role="system", content=system),
|
||||||
|
ProxyMessage(role="user", content=user),
|
||||||
|
])
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning("Destillat-Call fehlgeschlagen: %s — Turns bleiben", exc)
|
||||||
|
return {"distilled": 0, "error": str(exc)}
|
||||||
|
|
||||||
|
facts = self._parse_facts(raw)
|
||||||
|
if facts is None:
|
||||||
|
logger.warning("Destillat lieferte unparsbares JSON: %r", raw[:200])
|
||||||
|
return {"distilled": 0, "error": "JSON parse failed", "raw": raw[:200]}
|
||||||
|
|
||||||
|
# Facts in die DB schreiben
|
||||||
|
created = 0
|
||||||
|
for f in facts:
|
||||||
|
content = (f.get("content") or "").strip()
|
||||||
|
if not content:
|
||||||
|
continue
|
||||||
|
title = (f.get("title") or "").strip()[:120] or "Fakt"
|
||||||
|
point = MemoryPoint(
|
||||||
|
id="",
|
||||||
|
type="fact",
|
||||||
|
title=title,
|
||||||
|
content=content,
|
||||||
|
pinned=False,
|
||||||
|
category="konversation",
|
||||||
|
source="distilled",
|
||||||
|
tags=[],
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
vec = self.embedder.embed(content)
|
||||||
|
self.store.upsert(point, vec)
|
||||||
|
created += 1
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning("Fakt schreiben fehlgeschlagen: %s", exc)
|
||||||
|
|
||||||
|
# Erst nach erfolgreichem Schreiben aus dem Window entfernen
|
||||||
|
last_ts = old_turns[-1].ts
|
||||||
|
self.conversation.commit_distill(last_ts)
|
||||||
|
logger.info("Destillat: %d Facts geschrieben, %d Turns aus Window entfernt",
|
||||||
|
created, len(old_turns))
|
||||||
|
return {"distilled": created, "removed_turns": len(old_turns)}
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _parse_facts(raw: str) -> Optional[list]:
|
||||||
|
if not raw:
|
||||||
|
return None
|
||||||
|
# JSON robust extrahieren — Claude kann Code-Fences setzen
|
||||||
|
cleaned = raw.strip()
|
||||||
|
if cleaned.startswith("```"):
|
||||||
|
# ```json oder ``` rauswerfen
|
||||||
|
cleaned = cleaned.split("\n", 1)[1] if "\n" in cleaned else cleaned[3:]
|
||||||
|
if cleaned.endswith("```"):
|
||||||
|
cleaned = cleaned[: -3]
|
||||||
|
cleaned = cleaned.strip()
|
||||||
|
# Erstes { bis letztes }
|
||||||
|
start = cleaned.find("{")
|
||||||
|
end = cleaned.rfind("}")
|
||||||
|
if start == -1 or end == -1 or end < start:
|
||||||
|
return None
|
||||||
|
try:
|
||||||
|
obj = json.loads(cleaned[start: end + 1])
|
||||||
|
except Exception:
|
||||||
|
return None
|
||||||
|
facts = obj.get("facts") if isinstance(obj, dict) else None
|
||||||
|
if not isinstance(facts, list):
|
||||||
|
return None
|
||||||
|
return facts
|
||||||
@@ -0,0 +1,130 @@
|
|||||||
|
"""
|
||||||
|
Conversation-State — ein einziger Rolling-Window-State fuer ARIAs
|
||||||
|
laufendes Gespraech mit Stefan.
|
||||||
|
|
||||||
|
Stefan-Entscheidung: KEINE Sessions, KEIN Multi-Thread. EIN Strang,
|
||||||
|
intern rollend. Was rausfaellt, wird ggf. destilliert und landet
|
||||||
|
als type=fact Memory in der Vector-DB.
|
||||||
|
|
||||||
|
Persistenz: append-only JSONL unter /data/conversation.jsonl.
|
||||||
|
Bei Restart wird die letzte N gelesen (komplett vermeidet Memory-
|
||||||
|
Overhead bei sehr langen Verlaeufen).
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
from dataclasses import dataclass, field, asdict
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List, Optional
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
CONVERSATION_FILE = Path(os.environ.get("CONVERSATION_FILE", "/data/conversation.jsonl"))
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class Turn:
|
||||||
|
role: str # "user" | "assistant"
|
||||||
|
content: str
|
||||||
|
ts: str = field(default_factory=lambda: datetime.now(timezone.utc).isoformat())
|
||||||
|
source: str = "" # "app" / "diagnostic" / "stt" — optional
|
||||||
|
|
||||||
|
|
||||||
|
class Conversation:
|
||||||
|
"""In-Memory Rolling Window, mit JSONL-Persistenz."""
|
||||||
|
|
||||||
|
def __init__(self, max_window: int = 50, distill_threshold: int = 60,
|
||||||
|
distill_count: int = 30):
|
||||||
|
self.max_window = max_window
|
||||||
|
self.distill_threshold = distill_threshold
|
||||||
|
self.distill_count = distill_count
|
||||||
|
self.turns: List[Turn] = []
|
||||||
|
self._load()
|
||||||
|
|
||||||
|
def _load(self):
|
||||||
|
if not CONVERSATION_FILE.exists():
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
lines = CONVERSATION_FILE.read_text(encoding="utf-8").splitlines()
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning("Konversation laden fehlgeschlagen: %s", exc)
|
||||||
|
return
|
||||||
|
loaded: List[Turn] = []
|
||||||
|
for line in lines:
|
||||||
|
line = line.strip()
|
||||||
|
if not line:
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
obj = json.loads(line)
|
||||||
|
except Exception:
|
||||||
|
continue
|
||||||
|
if obj.get("op") == "distill":
|
||||||
|
# Marker: bis hierhin wurde alles destilliert
|
||||||
|
drop_until_ts = obj.get("ts", "")
|
||||||
|
if drop_until_ts:
|
||||||
|
loaded = [t for t in loaded if t.ts > drop_until_ts]
|
||||||
|
continue
|
||||||
|
role = obj.get("role")
|
||||||
|
content = obj.get("content")
|
||||||
|
if role in ("user", "assistant") and isinstance(content, str):
|
||||||
|
loaded.append(Turn(role=role, content=content,
|
||||||
|
ts=obj.get("ts", ""),
|
||||||
|
source=obj.get("source", "")))
|
||||||
|
self.turns = loaded
|
||||||
|
logger.info("Konversation geladen: %d Turns aus %s", len(self.turns), CONVERSATION_FILE)
|
||||||
|
|
||||||
|
def _append_to_file(self, record: dict):
|
||||||
|
try:
|
||||||
|
CONVERSATION_FILE.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
with CONVERSATION_FILE.open("a", encoding="utf-8") as f:
|
||||||
|
f.write(json.dumps(record, ensure_ascii=False) + "\n")
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning("Konversation persist fehlgeschlagen: %s", exc)
|
||||||
|
|
||||||
|
def add(self, role: str, content: str, source: str = "") -> Turn:
|
||||||
|
t = Turn(role=role, content=content, source=source)
|
||||||
|
self.turns.append(t)
|
||||||
|
self._append_to_file({
|
||||||
|
"ts": t.ts, "role": t.role, "content": t.content, "source": t.source,
|
||||||
|
})
|
||||||
|
return t
|
||||||
|
|
||||||
|
def window(self) -> List[Turn]:
|
||||||
|
"""Die letzten max_window Turns — gehen in den LLM-Prompt."""
|
||||||
|
return self.turns[-self.max_window:]
|
||||||
|
|
||||||
|
def needs_distill(self) -> bool:
|
||||||
|
return len(self.turns) > self.distill_threshold
|
||||||
|
|
||||||
|
def take_oldest_for_distill(self) -> List[Turn]:
|
||||||
|
"""Gibt die N aeltesten Turns zurueck — fuer den Destillat-Call.
|
||||||
|
Entfernt sie NICHT — das macht commit_distill nach erfolgreichem Call."""
|
||||||
|
return self.turns[: self.distill_count]
|
||||||
|
|
||||||
|
def commit_distill(self, last_distilled_ts: str):
|
||||||
|
"""Schreibt einen Distill-Marker, entfernt aus dem In-Memory-Window."""
|
||||||
|
self._append_to_file({"op": "distill", "ts": last_distilled_ts})
|
||||||
|
self.turns = [t for t in self.turns if t.ts > last_distilled_ts]
|
||||||
|
logger.info("Distill commit bei ts=%s — Window jetzt %d Turns", last_distilled_ts, len(self.turns))
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
"""Hardes Reset — verwende vorsichtig (Diagnostic-Button)."""
|
||||||
|
try:
|
||||||
|
if CONVERSATION_FILE.exists():
|
||||||
|
CONVERSATION_FILE.unlink()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
self.turns = []
|
||||||
|
logger.warning("Konversation komplett zurueckgesetzt")
|
||||||
|
|
||||||
|
def stats(self) -> dict:
|
||||||
|
return {
|
||||||
|
"turns": len(self.turns),
|
||||||
|
"max_window": self.max_window,
|
||||||
|
"distill_threshold": self.distill_threshold,
|
||||||
|
"needs_distill": self.needs_distill(),
|
||||||
|
}
|
||||||
@@ -0,0 +1,528 @@
|
|||||||
|
"""
|
||||||
|
ARIA Brain — FastAPI-Einstieg.
|
||||||
|
|
||||||
|
Phase B Punkt 1: nur Skeleton.
|
||||||
|
- /health → Liveness
|
||||||
|
- /memory/list → alle Punkte (gefiltert)
|
||||||
|
- /memory/pinned → Hot Memory
|
||||||
|
- /memory/search?q=...&k=5 → semantische Suche
|
||||||
|
- /memory/save → neuen Punkt anlegen
|
||||||
|
- /memory/update/{id} → Punkt aendern (re-embed wenn content geaendert)
|
||||||
|
- /memory/delete/{id} → Punkt loeschen
|
||||||
|
- /memory/stats → Anzahl Punkte pro Type
|
||||||
|
|
||||||
|
/chat (Conversation-Loop) und /skills/* kommen in spaeteren Phasen.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
from typing import List, Optional
|
||||||
|
|
||||||
|
from fastapi import FastAPI, HTTPException, BackgroundTasks, Request
|
||||||
|
from fastapi.responses import Response
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
from memory import Embedder, VectorStore, MemoryPoint
|
||||||
|
from conversation import Conversation
|
||||||
|
from proxy_client import ProxyClient
|
||||||
|
from agent import Agent
|
||||||
|
import skills as skills_mod
|
||||||
|
import metrics as metrics_mod
|
||||||
|
|
||||||
|
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(name)s: %(message)s")
|
||||||
|
logger = logging.getLogger("aria-brain")
|
||||||
|
|
||||||
|
QDRANT_HOST = os.environ.get("QDRANT_HOST", "aria-qdrant")
|
||||||
|
QDRANT_PORT = int(os.environ.get("QDRANT_PORT", "6333"))
|
||||||
|
|
||||||
|
app = FastAPI(title="ARIA Brain", version="0.1.0")
|
||||||
|
|
||||||
|
_embedder: Optional[Embedder] = None
|
||||||
|
_store: Optional[VectorStore] = None
|
||||||
|
_conversation: Optional[Conversation] = None
|
||||||
|
_proxy: Optional[ProxyClient] = None
|
||||||
|
_agent: Optional[Agent] = None
|
||||||
|
|
||||||
|
|
||||||
|
def embedder() -> Embedder:
|
||||||
|
global _embedder
|
||||||
|
if _embedder is None:
|
||||||
|
_embedder = Embedder()
|
||||||
|
return _embedder
|
||||||
|
|
||||||
|
|
||||||
|
def store() -> VectorStore:
|
||||||
|
global _store
|
||||||
|
if _store is None:
|
||||||
|
_store = VectorStore(host=QDRANT_HOST, port=QDRANT_PORT)
|
||||||
|
return _store
|
||||||
|
|
||||||
|
|
||||||
|
def conversation() -> Conversation:
|
||||||
|
global _conversation
|
||||||
|
if _conversation is None:
|
||||||
|
_conversation = Conversation()
|
||||||
|
return _conversation
|
||||||
|
|
||||||
|
|
||||||
|
def proxy_client() -> ProxyClient:
|
||||||
|
global _proxy
|
||||||
|
if _proxy is None:
|
||||||
|
_proxy = ProxyClient()
|
||||||
|
return _proxy
|
||||||
|
|
||||||
|
|
||||||
|
def agent() -> Agent:
|
||||||
|
global _agent
|
||||||
|
if _agent is None:
|
||||||
|
_agent = Agent(store(), embedder(), conversation(), proxy_client())
|
||||||
|
return _agent
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Pydantic-Schemas ─────────────────────────────────────────────────
|
||||||
|
|
||||||
|
class MemoryIn(BaseModel):
|
||||||
|
type: str = Field(..., description="identity|rule|preference|tool|skill|fact|conversation|reminder")
|
||||||
|
title: str
|
||||||
|
content: str
|
||||||
|
pinned: bool = False
|
||||||
|
category: str = ""
|
||||||
|
source: str = "manual"
|
||||||
|
tags: List[str] = Field(default_factory=list)
|
||||||
|
conversation_id: Optional[str] = None
|
||||||
|
|
||||||
|
|
||||||
|
class MemoryUpdate(BaseModel):
|
||||||
|
title: Optional[str] = None
|
||||||
|
content: Optional[str] = None
|
||||||
|
pinned: Optional[bool] = None
|
||||||
|
category: Optional[str] = None
|
||||||
|
tags: Optional[List[str]] = None
|
||||||
|
|
||||||
|
|
||||||
|
class MemoryOut(BaseModel):
|
||||||
|
id: str
|
||||||
|
type: str
|
||||||
|
title: str
|
||||||
|
content: str
|
||||||
|
pinned: bool
|
||||||
|
category: str
|
||||||
|
source: str
|
||||||
|
tags: List[str]
|
||||||
|
created_at: str
|
||||||
|
updated_at: str
|
||||||
|
conversation_id: Optional[str] = None
|
||||||
|
score: Optional[float] = None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_point(cls, p: MemoryPoint) -> "MemoryOut":
|
||||||
|
return cls(**p.__dict__)
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Health ───────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
@app.get("/health")
|
||||||
|
def health():
|
||||||
|
try:
|
||||||
|
n = store().count()
|
||||||
|
return {"status": "ok", "memory_count": n, "qdrant": f"{QDRANT_HOST}:{QDRANT_PORT}"}
|
||||||
|
except Exception as exc:
|
||||||
|
return {"status": "degraded", "error": str(exc), "qdrant": f"{QDRANT_HOST}:{QDRANT_PORT}"}
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Memory-Endpoints ─────────────────────────────────────────────────
|
||||||
|
|
||||||
|
@app.get("/memory/stats")
|
||||||
|
def memory_stats():
|
||||||
|
s = store()
|
||||||
|
points = s.list_all()
|
||||||
|
by_type = {}
|
||||||
|
pinned = 0
|
||||||
|
for p in points:
|
||||||
|
by_type[p.type] = by_type.get(p.type, 0) + 1
|
||||||
|
if p.pinned:
|
||||||
|
pinned += 1
|
||||||
|
return {"total": len(points), "pinned": pinned, "by_type": by_type}
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/memory/list", response_model=List[MemoryOut])
|
||||||
|
def memory_list(type: Optional[str] = None, limit: int = 200):
|
||||||
|
s = store()
|
||||||
|
points = s.list_by_type(type, limit=limit) if type else s.list_all(limit=limit)
|
||||||
|
return [MemoryOut.from_point(p) for p in points]
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/memory/pinned", response_model=List[MemoryOut])
|
||||||
|
def memory_pinned():
|
||||||
|
return [MemoryOut.from_point(p) for p in store().list_pinned()]
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/memory/search", response_model=List[MemoryOut])
|
||||||
|
def memory_search(q: str, k: int = 5, type: Optional[str] = None, include_pinned: bool = False):
|
||||||
|
vec = embedder().embed(q)
|
||||||
|
points = store().search(vec, k=k, type_filter=type, exclude_pinned=not include_pinned)
|
||||||
|
return [MemoryOut.from_point(p) for p in points]
|
||||||
|
|
||||||
|
|
||||||
|
@app.post("/memory/save", response_model=MemoryOut)
|
||||||
|
def memory_save(body: MemoryIn):
|
||||||
|
s = store()
|
||||||
|
vec = embedder().embed(body.content)
|
||||||
|
point = MemoryPoint(
|
||||||
|
id="",
|
||||||
|
type=body.type,
|
||||||
|
title=body.title,
|
||||||
|
content=body.content,
|
||||||
|
pinned=body.pinned,
|
||||||
|
category=body.category,
|
||||||
|
source=body.source,
|
||||||
|
tags=body.tags,
|
||||||
|
conversation_id=body.conversation_id,
|
||||||
|
)
|
||||||
|
pid = s.upsert(point, vec)
|
||||||
|
saved = s.get(pid)
|
||||||
|
return MemoryOut.from_point(saved)
|
||||||
|
|
||||||
|
|
||||||
|
@app.patch("/memory/update/{point_id}", response_model=MemoryOut)
|
||||||
|
def memory_update(point_id: str, body: MemoryUpdate):
|
||||||
|
s = store()
|
||||||
|
existing = s.get(point_id)
|
||||||
|
if not existing:
|
||||||
|
raise HTTPException(404, f"Memory {point_id} nicht gefunden")
|
||||||
|
|
||||||
|
content_changed = body.content is not None and body.content != existing.content
|
||||||
|
if body.title is not None:
|
||||||
|
existing.title = body.title
|
||||||
|
if body.content is not None:
|
||||||
|
existing.content = body.content
|
||||||
|
if body.pinned is not None:
|
||||||
|
existing.pinned = body.pinned
|
||||||
|
if body.category is not None:
|
||||||
|
existing.category = body.category
|
||||||
|
if body.tags is not None:
|
||||||
|
existing.tags = body.tags
|
||||||
|
|
||||||
|
vec = embedder().embed(existing.content) if content_changed else None
|
||||||
|
if vec is None:
|
||||||
|
# Vektor unveraendert lassen — nur Payload neu schreiben
|
||||||
|
from qdrant_client.http import models as qm
|
||||||
|
from memory.vector_store import COLLECTION
|
||||||
|
s.client.set_payload(
|
||||||
|
collection_name=COLLECTION,
|
||||||
|
payload=existing.to_payload() | {"updated_at": __import__("datetime").datetime.now(__import__("datetime").timezone.utc).isoformat()},
|
||||||
|
points=[point_id],
|
||||||
|
)
|
||||||
|
saved = s.get(point_id)
|
||||||
|
else:
|
||||||
|
s.upsert(existing, vec)
|
||||||
|
saved = s.get(point_id)
|
||||||
|
return MemoryOut.from_point(saved)
|
||||||
|
|
||||||
|
|
||||||
|
@app.delete("/memory/delete/{point_id}")
|
||||||
|
def memory_delete(point_id: str):
|
||||||
|
s = store()
|
||||||
|
if not s.get(point_id):
|
||||||
|
raise HTTPException(404, f"Memory {point_id} nicht gefunden")
|
||||||
|
s.delete(point_id)
|
||||||
|
return {"deleted": point_id}
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Migration aus brain-import/ ──────────────────────────────────────
|
||||||
|
|
||||||
|
IMPORT_DIR = os.environ.get("IMPORT_DIR", "/import")
|
||||||
|
|
||||||
|
|
||||||
|
@app.post("/memory/migrate")
|
||||||
|
def memory_migrate():
|
||||||
|
"""Liest /import/*.md und schreibt atomare Memory-Punkte in die DB.
|
||||||
|
Idempotent: bei Re-Run werden Punkte mit gleicher migration_key ersetzt."""
|
||||||
|
from pathlib import Path
|
||||||
|
from migration import run_migration
|
||||||
|
s = store()
|
||||||
|
e = embedder()
|
||||||
|
result = run_migration(Path(IMPORT_DIR), s, e)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/memory/import-files")
|
||||||
|
def memory_import_files():
|
||||||
|
"""Listet was unter /import/ liegt — fuer die Diagnostic-UI."""
|
||||||
|
from pathlib import Path
|
||||||
|
d = Path(IMPORT_DIR)
|
||||||
|
if not d.exists():
|
||||||
|
return {"import_dir": str(d), "exists": False, "files": []}
|
||||||
|
out = []
|
||||||
|
for p in sorted(d.iterdir()):
|
||||||
|
if p.is_file():
|
||||||
|
try:
|
||||||
|
out.append({"name": p.name, "size": p.stat().st_size})
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
return {"import_dir": str(d), "exists": True, "files": out}
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Bootstrap-Snapshot ───────────────────────────────────────────────
|
||||||
|
# "Bootstrap" = alle pinned Memories. Export/Import zum schnellen
|
||||||
|
# Wiederherstellen einer schlanken ARIA nach Wipe.
|
||||||
|
|
||||||
|
@app.get("/memory/export-bootstrap")
|
||||||
|
def memory_export_bootstrap():
|
||||||
|
"""Gibt alle pinned Memories als JSON zurueck — fuer Browser-Download."""
|
||||||
|
s = store()
|
||||||
|
pinned = s.list_pinned()
|
||||||
|
return {
|
||||||
|
"version": 1,
|
||||||
|
"exported_at": __import__("datetime").datetime.now(
|
||||||
|
__import__("datetime").timezone.utc
|
||||||
|
).isoformat(),
|
||||||
|
"count": len(pinned),
|
||||||
|
"memories": [
|
||||||
|
{
|
||||||
|
"type": p.type,
|
||||||
|
"title": p.title,
|
||||||
|
"content": p.content,
|
||||||
|
"pinned": True,
|
||||||
|
"category": p.category,
|
||||||
|
"source": p.source,
|
||||||
|
"tags": p.tags,
|
||||||
|
}
|
||||||
|
for p in pinned
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class BootstrapBundle(BaseModel):
|
||||||
|
version: int = 1
|
||||||
|
memories: List[dict]
|
||||||
|
|
||||||
|
|
||||||
|
@app.post("/memory/import-bootstrap")
|
||||||
|
def memory_import_bootstrap(body: BootstrapBundle):
|
||||||
|
"""Loescht alle pinned Memories und importiert die im Bundle.
|
||||||
|
Cold Memory (unpinned) bleibt unangetastet.
|
||||||
|
|
||||||
|
Wenn keine Memories im Bundle: nur loeschen ist NICHT erlaubt — der
|
||||||
|
Caller soll erst exportieren und dann importieren.
|
||||||
|
"""
|
||||||
|
if not body.memories:
|
||||||
|
raise HTTPException(400, "Bundle hat keine memories — Abbruch zur Sicherheit")
|
||||||
|
|
||||||
|
s = store()
|
||||||
|
e = embedder()
|
||||||
|
|
||||||
|
# Alle aktuell pinned Punkte loeschen
|
||||||
|
from qdrant_client.http import models as qm
|
||||||
|
from memory.vector_store import COLLECTION
|
||||||
|
s.client.delete(
|
||||||
|
collection_name=COLLECTION,
|
||||||
|
points_selector=qm.FilterSelector(filter=qm.Filter(must=[
|
||||||
|
qm.FieldCondition(key="pinned", match=qm.MatchValue(value=True))
|
||||||
|
])),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Neue Punkte einspeisen
|
||||||
|
created = 0
|
||||||
|
for m in body.memories:
|
||||||
|
content = (m.get("content") or "").strip()
|
||||||
|
if not content:
|
||||||
|
continue
|
||||||
|
point = MemoryPoint(
|
||||||
|
id="",
|
||||||
|
type=m.get("type", "fact"),
|
||||||
|
title=m.get("title", "(ohne Titel)"),
|
||||||
|
content=content,
|
||||||
|
pinned=True,
|
||||||
|
category=m.get("category", ""),
|
||||||
|
source=m.get("source", "bootstrap-import"),
|
||||||
|
tags=list(m.get("tags", [])),
|
||||||
|
)
|
||||||
|
vec = e.embed(content)
|
||||||
|
s.upsert(point, vec)
|
||||||
|
created += 1
|
||||||
|
|
||||||
|
return {"created": created, "deleted_previous_pinned": True}
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Conversation-Loop ──────────────────────────────────────────────
|
||||||
|
|
||||||
|
class ChatIn(BaseModel):
|
||||||
|
message: str
|
||||||
|
source: str = "" # "app" / "diagnostic" / "stt" — optional
|
||||||
|
|
||||||
|
|
||||||
|
class ChatOut(BaseModel):
|
||||||
|
reply: str
|
||||||
|
turns: int
|
||||||
|
distilling: bool
|
||||||
|
events: list = Field(default_factory=list)
|
||||||
|
|
||||||
|
|
||||||
|
@app.post("/chat", response_model=ChatOut)
|
||||||
|
def chat(body: ChatIn, background: BackgroundTasks):
|
||||||
|
"""Hauptpfad. Antwort kommt synchron. Memory-Destillat laeuft
|
||||||
|
im Hintergrund nachdem die Response rausging."""
|
||||||
|
a = agent()
|
||||||
|
try:
|
||||||
|
reply = a.chat(body.message, source=body.source)
|
||||||
|
except ValueError as exc:
|
||||||
|
raise HTTPException(400, str(exc))
|
||||||
|
except RuntimeError as exc:
|
||||||
|
logger.error("chat fehlgeschlagen: %s", exc)
|
||||||
|
raise HTTPException(502, str(exc))
|
||||||
|
|
||||||
|
needs_distill = a.conversation.needs_distill()
|
||||||
|
if needs_distill:
|
||||||
|
background.add_task(a.distill_old_turns)
|
||||||
|
return ChatOut(
|
||||||
|
reply=reply,
|
||||||
|
turns=len(a.conversation.turns),
|
||||||
|
distilling=needs_distill,
|
||||||
|
events=a.pop_events(),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/conversation/stats")
|
||||||
|
def conversation_stats():
|
||||||
|
return conversation().stats()
|
||||||
|
|
||||||
|
|
||||||
|
@app.post("/conversation/reset")
|
||||||
|
def conversation_reset():
|
||||||
|
"""Hardes Reset — der Rolling-Window-Verlauf wird komplett geleert.
|
||||||
|
Destillierte facts bleiben in der DB."""
|
||||||
|
conversation().reset()
|
||||||
|
return {"ok": True, "turns": 0}
|
||||||
|
|
||||||
|
|
||||||
|
@app.post("/conversation/distill")
|
||||||
|
def conversation_distill_now():
|
||||||
|
"""Manueller Trigger fuer Destillat — fuer Tests oder vor einem
|
||||||
|
bewussten Reset."""
|
||||||
|
return agent().distill_old_turns()
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Call-Metrics (Token / Quota-Monitoring) ────────────────────────
|
||||||
|
|
||||||
|
@app.get("/metrics/calls")
|
||||||
|
def metrics_calls():
|
||||||
|
"""Liefert Aggregate fuer 1h / 5h / 24h / 30d.
|
||||||
|
Jedes Window: {window_seconds, calls, tokens_in, tokens_out, by_model}."""
|
||||||
|
return metrics_mod.stats()
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Skills ─────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
class SkillCreate(BaseModel):
|
||||||
|
name: str
|
||||||
|
description: str
|
||||||
|
execution: str # local-venv | local-bin | bash
|
||||||
|
entry_code: str
|
||||||
|
readme: str = ""
|
||||||
|
args: list = Field(default_factory=list)
|
||||||
|
requires: dict = Field(default_factory=dict)
|
||||||
|
pip_packages: list = Field(default_factory=list)
|
||||||
|
author: str = "stefan"
|
||||||
|
|
||||||
|
|
||||||
|
class SkillRun(BaseModel):
|
||||||
|
name: str
|
||||||
|
args: dict = Field(default_factory=dict)
|
||||||
|
timeout_sec: int = 300
|
||||||
|
|
||||||
|
|
||||||
|
class SkillPatch(BaseModel):
|
||||||
|
description: str | None = None
|
||||||
|
active: bool | None = None
|
||||||
|
args: list | None = None
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/skills/list")
|
||||||
|
def skills_list(active_only: bool = False):
|
||||||
|
return {"skills": skills_mod.list_skills(active_only=active_only)}
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/skills/{name}")
|
||||||
|
def skills_get(name: str):
|
||||||
|
m = skills_mod.read_manifest(name)
|
||||||
|
if m is None:
|
||||||
|
raise HTTPException(404, f"Skill '{name}' nicht gefunden")
|
||||||
|
readme = skills_mod.read_readme(name)
|
||||||
|
return {"manifest": m, "readme": readme}
|
||||||
|
|
||||||
|
|
||||||
|
@app.post("/skills/create")
|
||||||
|
def skills_create(body: SkillCreate):
|
||||||
|
try:
|
||||||
|
return skills_mod.create_skill(
|
||||||
|
name=body.name,
|
||||||
|
description=body.description,
|
||||||
|
execution=body.execution,
|
||||||
|
entry_code=body.entry_code,
|
||||||
|
readme=body.readme,
|
||||||
|
args=body.args,
|
||||||
|
requires=body.requires,
|
||||||
|
pip_packages=body.pip_packages,
|
||||||
|
author=body.author,
|
||||||
|
)
|
||||||
|
except ValueError as exc:
|
||||||
|
raise HTTPException(400, str(exc))
|
||||||
|
|
||||||
|
|
||||||
|
@app.post("/skills/run")
|
||||||
|
def skills_run(body: SkillRun):
|
||||||
|
try:
|
||||||
|
return skills_mod.run_skill(body.name, args=body.args, timeout_sec=body.timeout_sec)
|
||||||
|
except ValueError as exc:
|
||||||
|
raise HTTPException(400, str(exc))
|
||||||
|
|
||||||
|
|
||||||
|
@app.patch("/skills/{name}")
|
||||||
|
def skills_patch(name: str, body: SkillPatch):
|
||||||
|
patch = {k: v for k, v in body.model_dump().items() if v is not None}
|
||||||
|
try:
|
||||||
|
return skills_mod.update_skill(name, patch)
|
||||||
|
except ValueError as exc:
|
||||||
|
raise HTTPException(404, str(exc))
|
||||||
|
|
||||||
|
|
||||||
|
@app.delete("/skills/{name}")
|
||||||
|
def skills_delete(name: str):
|
||||||
|
try:
|
||||||
|
skills_mod.delete_skill(name)
|
||||||
|
except ValueError as exc:
|
||||||
|
raise HTTPException(404, str(exc))
|
||||||
|
return {"deleted": name}
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/skills/{name}/logs")
|
||||||
|
def skills_logs(name: str, limit: int = 50):
|
||||||
|
return {"logs": skills_mod.list_logs(name, limit=limit)}
|
||||||
|
|
||||||
|
|
||||||
|
@app.get("/skills/{name}/export")
|
||||||
|
def skills_export(name: str):
|
||||||
|
try:
|
||||||
|
data = skills_mod.export_skill(name)
|
||||||
|
except ValueError as exc:
|
||||||
|
raise HTTPException(404, str(exc))
|
||||||
|
return Response(
|
||||||
|
content=data,
|
||||||
|
media_type="application/gzip",
|
||||||
|
headers={"Content-Disposition": f'attachment; filename="skill-{name}.tar.gz"'},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@app.post("/skills/import")
|
||||||
|
async def skills_import(request: Request, overwrite: bool = False):
|
||||||
|
data = await request.body()
|
||||||
|
if not data:
|
||||||
|
raise HTTPException(400, "Leerer Body")
|
||||||
|
try:
|
||||||
|
manifest = skills_mod.import_skill(data, overwrite=overwrite)
|
||||||
|
except ValueError as exc:
|
||||||
|
raise HTTPException(400, str(exc))
|
||||||
|
return {"imported": manifest}
|
||||||
@@ -0,0 +1,4 @@
|
|||||||
|
from .embedder import Embedder
|
||||||
|
from .vector_store import VectorStore, MemoryPoint, MemoryType
|
||||||
|
|
||||||
|
__all__ = ["Embedder", "VectorStore", "MemoryPoint", "MemoryType"]
|
||||||
@@ -0,0 +1,42 @@
|
|||||||
|
"""
|
||||||
|
Lokaler Embedder fuer Memory-Texte.
|
||||||
|
|
||||||
|
Nutzt sentence-transformers (paraphrase-multilingual-MiniLM-L12-v2):
|
||||||
|
- Deutsch + Englisch
|
||||||
|
- 384-dimensionale Vektoren
|
||||||
|
- Laeuft auf CPU, ~30ms pro kurzer Text
|
||||||
|
- Modell wird beim ersten Aufruf in /data/_models gecached
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
MODEL_NAME = "paraphrase-multilingual-MiniLM-L12-v2"
|
||||||
|
VECTOR_DIM = 384
|
||||||
|
|
||||||
|
|
||||||
|
class Embedder:
|
||||||
|
def __init__(self, model_name: str = MODEL_NAME):
|
||||||
|
self.model_name = model_name
|
||||||
|
self._model = None
|
||||||
|
|
||||||
|
def _load(self):
|
||||||
|
if self._model is None:
|
||||||
|
logger.info("Lade Embedding-Modell %s ...", self.model_name)
|
||||||
|
from sentence_transformers import SentenceTransformer
|
||||||
|
self._model = SentenceTransformer(self.model_name)
|
||||||
|
logger.info("Embedding-Modell geladen.")
|
||||||
|
|
||||||
|
def embed(self, text: str) -> List[float]:
|
||||||
|
self._load()
|
||||||
|
vec = self._model.encode(text, convert_to_numpy=True, normalize_embeddings=True)
|
||||||
|
return vec.tolist()
|
||||||
|
|
||||||
|
def embed_batch(self, texts: List[str]) -> List[List[float]]:
|
||||||
|
self._load()
|
||||||
|
vecs = self._model.encode(texts, convert_to_numpy=True, normalize_embeddings=True)
|
||||||
|
return vecs.tolist()
|
||||||
@@ -0,0 +1,209 @@
|
|||||||
|
"""
|
||||||
|
Vector-Store-Wrapper um Qdrant.
|
||||||
|
|
||||||
|
Eine Collection "aria_memory" haelt ALLE Memory-Punkte.
|
||||||
|
Trennung nach Type/Pinned-Status via Payload-Filter.
|
||||||
|
|
||||||
|
Punkt-Schema (Payload):
|
||||||
|
type — identity | rule | preference | tool | skill | fact | conversation | reminder
|
||||||
|
category — frei, fuer UI-Gruppierung
|
||||||
|
title — kurze Ueberschrift
|
||||||
|
content — eigentlicher Text (wird embedded)
|
||||||
|
pinned — bool, True = Hot Memory (immer in Prompt)
|
||||||
|
source — import | conversation | manual
|
||||||
|
tags — Liste von Strings
|
||||||
|
created_at, updated_at — ISO-Strings
|
||||||
|
conversation_id — optional, nur fuer type=conversation
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import uuid
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from enum import Enum
|
||||||
|
from typing import List, Optional
|
||||||
|
|
||||||
|
from qdrant_client import QdrantClient
|
||||||
|
from qdrant_client.http import models as qm
|
||||||
|
|
||||||
|
from .embedder import VECTOR_DIM
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
COLLECTION = "aria_memory"
|
||||||
|
|
||||||
|
|
||||||
|
class MemoryType(str, Enum):
|
||||||
|
IDENTITY = "identity"
|
||||||
|
RULE = "rule"
|
||||||
|
PREFERENCE = "preference"
|
||||||
|
TOOL = "tool"
|
||||||
|
SKILL = "skill"
|
||||||
|
FACT = "fact"
|
||||||
|
CONVERSATION = "conversation"
|
||||||
|
REMINDER = "reminder"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class MemoryPoint:
|
||||||
|
id: str
|
||||||
|
type: str
|
||||||
|
title: str
|
||||||
|
content: str
|
||||||
|
pinned: bool = False
|
||||||
|
category: str = ""
|
||||||
|
source: str = "manual"
|
||||||
|
tags: List[str] = field(default_factory=list)
|
||||||
|
created_at: str = ""
|
||||||
|
updated_at: str = ""
|
||||||
|
conversation_id: Optional[str] = None
|
||||||
|
score: Optional[float] = None # nur bei Search gesetzt
|
||||||
|
|
||||||
|
def to_payload(self) -> dict:
|
||||||
|
p = {
|
||||||
|
"type": self.type,
|
||||||
|
"title": self.title,
|
||||||
|
"content": self.content,
|
||||||
|
"pinned": self.pinned,
|
||||||
|
"category": self.category,
|
||||||
|
"source": self.source,
|
||||||
|
"tags": self.tags,
|
||||||
|
"created_at": self.created_at,
|
||||||
|
"updated_at": self.updated_at,
|
||||||
|
}
|
||||||
|
if self.conversation_id:
|
||||||
|
p["conversation_id"] = self.conversation_id
|
||||||
|
return p
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_qdrant(cls, point) -> "MemoryPoint":
|
||||||
|
payload = point.payload or {}
|
||||||
|
return cls(
|
||||||
|
id=str(point.id),
|
||||||
|
type=payload.get("type", "fact"),
|
||||||
|
title=payload.get("title", ""),
|
||||||
|
content=payload.get("content", ""),
|
||||||
|
pinned=payload.get("pinned", False),
|
||||||
|
category=payload.get("category", ""),
|
||||||
|
source=payload.get("source", "manual"),
|
||||||
|
tags=payload.get("tags", []),
|
||||||
|
created_at=payload.get("created_at", ""),
|
||||||
|
updated_at=payload.get("updated_at", ""),
|
||||||
|
conversation_id=payload.get("conversation_id"),
|
||||||
|
score=getattr(point, "score", None),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _now() -> str:
|
||||||
|
return datetime.now(timezone.utc).isoformat()
|
||||||
|
|
||||||
|
|
||||||
|
class VectorStore:
|
||||||
|
def __init__(self, host: str, port: int = 6333):
|
||||||
|
self.client = QdrantClient(host=host, port=port)
|
||||||
|
self._ensure_collection()
|
||||||
|
|
||||||
|
def _ensure_collection(self):
|
||||||
|
existing = [c.name for c in self.client.get_collections().collections]
|
||||||
|
if COLLECTION not in existing:
|
||||||
|
logger.info("Erstelle Collection %s ...", COLLECTION)
|
||||||
|
self.client.create_collection(
|
||||||
|
collection_name=COLLECTION,
|
||||||
|
vectors_config=qm.VectorParams(size=VECTOR_DIM, distance=qm.Distance.COSINE),
|
||||||
|
)
|
||||||
|
# Indexe fuer typische Filter-Felder
|
||||||
|
for field_name in ("type", "pinned", "category", "source", "migration_key"):
|
||||||
|
self.client.create_payload_index(
|
||||||
|
collection_name=COLLECTION,
|
||||||
|
field_name=field_name,
|
||||||
|
field_schema=qm.PayloadSchemaType.KEYWORD if field_name != "pinned"
|
||||||
|
else qm.PayloadSchemaType.BOOL,
|
||||||
|
)
|
||||||
|
|
||||||
|
# ─── Schreib-Operationen ─────────────────────────────────────────
|
||||||
|
|
||||||
|
def upsert(self, point: MemoryPoint, vector: List[float]) -> str:
|
||||||
|
if not point.id:
|
||||||
|
point.id = str(uuid.uuid4())
|
||||||
|
if not point.created_at:
|
||||||
|
point.created_at = _now()
|
||||||
|
point.updated_at = _now()
|
||||||
|
|
||||||
|
self.client.upsert(
|
||||||
|
collection_name=COLLECTION,
|
||||||
|
points=[qm.PointStruct(id=point.id, vector=vector, payload=point.to_payload())],
|
||||||
|
)
|
||||||
|
return point.id
|
||||||
|
|
||||||
|
def delete(self, point_id: str):
|
||||||
|
self.client.delete(
|
||||||
|
collection_name=COLLECTION,
|
||||||
|
points_selector=qm.PointIdsList(points=[point_id]),
|
||||||
|
)
|
||||||
|
|
||||||
|
# ─── Lese-Operationen ────────────────────────────────────────────
|
||||||
|
|
||||||
|
def get(self, point_id: str) -> Optional[MemoryPoint]:
|
||||||
|
result = self.client.retrieve(collection_name=COLLECTION, ids=[point_id], with_payload=True)
|
||||||
|
if not result:
|
||||||
|
return None
|
||||||
|
return MemoryPoint.from_qdrant(result[0])
|
||||||
|
|
||||||
|
def list_pinned(self) -> List[MemoryPoint]:
|
||||||
|
"""Alle pinned Punkte — Hot Memory."""
|
||||||
|
return self._scroll(filter=qm.Filter(must=[
|
||||||
|
qm.FieldCondition(key="pinned", match=qm.MatchValue(value=True))
|
||||||
|
]))
|
||||||
|
|
||||||
|
def list_by_type(self, type_: str, limit: int = 100) -> List[MemoryPoint]:
|
||||||
|
return self._scroll(
|
||||||
|
filter=qm.Filter(must=[
|
||||||
|
qm.FieldCondition(key="type", match=qm.MatchValue(value=type_))
|
||||||
|
]),
|
||||||
|
limit=limit,
|
||||||
|
)
|
||||||
|
|
||||||
|
def list_all(self, limit: int = 1000) -> List[MemoryPoint]:
|
||||||
|
return self._scroll(filter=None, limit=limit)
|
||||||
|
|
||||||
|
def _scroll(self, filter, limit: int = 1000) -> List[MemoryPoint]:
|
||||||
|
points, _ = self.client.scroll(
|
||||||
|
collection_name=COLLECTION,
|
||||||
|
scroll_filter=filter,
|
||||||
|
limit=limit,
|
||||||
|
with_payload=True,
|
||||||
|
with_vectors=False,
|
||||||
|
)
|
||||||
|
return [MemoryPoint.from_qdrant(p) for p in points]
|
||||||
|
|
||||||
|
def search(
|
||||||
|
self,
|
||||||
|
query_vector: List[float],
|
||||||
|
k: int = 5,
|
||||||
|
type_filter: Optional[str] = None,
|
||||||
|
exclude_pinned: bool = True,
|
||||||
|
) -> List[MemoryPoint]:
|
||||||
|
"""Semantische Search. Standard: pinned-Punkte ausgeschlossen
|
||||||
|
(die kommen separat via list_pinned in den Prompt)."""
|
||||||
|
must = []
|
||||||
|
must_not = []
|
||||||
|
if type_filter:
|
||||||
|
must.append(qm.FieldCondition(key="type", match=qm.MatchValue(value=type_filter)))
|
||||||
|
if exclude_pinned:
|
||||||
|
must_not.append(qm.FieldCondition(key="pinned", match=qm.MatchValue(value=True)))
|
||||||
|
|
||||||
|
flt = qm.Filter(must=must or None, must_not=must_not or None)
|
||||||
|
|
||||||
|
results = self.client.search(
|
||||||
|
collection_name=COLLECTION,
|
||||||
|
query_vector=query_vector,
|
||||||
|
query_filter=flt if (must or must_not) else None,
|
||||||
|
limit=k,
|
||||||
|
with_payload=True,
|
||||||
|
)
|
||||||
|
return [MemoryPoint.from_qdrant(p) for p in results]
|
||||||
|
|
||||||
|
def count(self) -> int:
|
||||||
|
return self.client.count(collection_name=COLLECTION, exact=True).count
|
||||||
@@ -0,0 +1,133 @@
|
|||||||
|
"""
|
||||||
|
Call-Metrics fuer den Proxy-Client.
|
||||||
|
|
||||||
|
Pro Claude-Call wird ein Eintrag in /data/metrics.jsonl angehaengt:
|
||||||
|
|
||||||
|
{"ts": <ms>, "model": "...", "in": <tokens_in_estimate>, "out": <tokens_out_estimate>}
|
||||||
|
|
||||||
|
Tokens-Schaetzung: characters / 4 (Anthropic-Default-Heuristik). Nicht exakt
|
||||||
|
aber gut genug fuer Quota-Monitoring. Wir summieren nicht in-memory weil
|
||||||
|
der Brain-Container neugestartet werden kann — alles auf Disk.
|
||||||
|
|
||||||
|
Auswertung via aggregate(window_seconds) — liefert {calls, tokens_in, tokens_out}
|
||||||
|
fuer die letzten N Sekunden. Lazy gelesen, keine grossen Datenmengen erwartet
|
||||||
|
(bei 1000 Calls/Tag ~70 KB pro Monat).
|
||||||
|
|
||||||
|
Auto-Rotate: bei > 50k Zeilen werden die aeltesten 25k weggeschnitten.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
METRICS_FILE = Path(os.environ.get("METRICS_FILE", "/data/metrics.jsonl"))
|
||||||
|
ROTATE_AT = 50_000
|
||||||
|
ROTATE_KEEP = 25_000
|
||||||
|
|
||||||
|
|
||||||
|
def _estimate_tokens(text: str) -> int:
|
||||||
|
"""Anthropic-Default: ~4 chars pro Token. Grob genug."""
|
||||||
|
if not text:
|
||||||
|
return 0
|
||||||
|
return max(1, len(text) // 4)
|
||||||
|
|
||||||
|
|
||||||
|
def _messages_tokens(messages: list) -> int:
|
||||||
|
total = 0
|
||||||
|
for m in messages:
|
||||||
|
# Pydantic-Model oder dict
|
||||||
|
if hasattr(m, "content"):
|
||||||
|
total += _estimate_tokens(m.content or "")
|
||||||
|
elif isinstance(m, dict):
|
||||||
|
c = m.get("content") or ""
|
||||||
|
if isinstance(c, str):
|
||||||
|
total += _estimate_tokens(c)
|
||||||
|
return total
|
||||||
|
|
||||||
|
|
||||||
|
def log_call(model: str, messages_in: list, reply_text: str = "") -> None:
|
||||||
|
"""Eine Call-Metric anhaengen. Robust gegen Fehler (silent fail)."""
|
||||||
|
try:
|
||||||
|
tokens_in = _messages_tokens(messages_in)
|
||||||
|
tokens_out = _estimate_tokens(reply_text)
|
||||||
|
line = json.dumps({
|
||||||
|
"ts": int(time.time() * 1000),
|
||||||
|
"model": model,
|
||||||
|
"in": tokens_in,
|
||||||
|
"out": tokens_out,
|
||||||
|
})
|
||||||
|
METRICS_FILE.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
with METRICS_FILE.open("a", encoding="utf-8") as f:
|
||||||
|
f.write(line + "\n")
|
||||||
|
# Sanftes Rotate ohne hohe IO-Kosten — nur alle 1000 Calls checken
|
||||||
|
if (tokens_in + tokens_out) % 1000 < 4:
|
||||||
|
_maybe_rotate()
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning("metrics.log_call: %s", exc)
|
||||||
|
|
||||||
|
|
||||||
|
def _maybe_rotate() -> None:
|
||||||
|
try:
|
||||||
|
if not METRICS_FILE.exists():
|
||||||
|
return
|
||||||
|
with METRICS_FILE.open("r", encoding="utf-8") as f:
|
||||||
|
lines = f.readlines()
|
||||||
|
if len(lines) > ROTATE_AT:
|
||||||
|
keep = lines[-ROTATE_KEEP:]
|
||||||
|
METRICS_FILE.write_text("".join(keep), encoding="utf-8")
|
||||||
|
logger.info("metrics rotated: %d → %d Zeilen", len(lines), len(keep))
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning("metrics rotate: %s", exc)
|
||||||
|
|
||||||
|
|
||||||
|
def aggregate(window_seconds: int) -> dict:
|
||||||
|
"""Aggregiert die Calls der letzten N Sekunden."""
|
||||||
|
now_ms = int(time.time() * 1000)
|
||||||
|
cutoff_ms = now_ms - (window_seconds * 1000)
|
||||||
|
calls = 0
|
||||||
|
tokens_in = 0
|
||||||
|
tokens_out = 0
|
||||||
|
by_model: dict[str, int] = {}
|
||||||
|
if METRICS_FILE.exists():
|
||||||
|
try:
|
||||||
|
for raw in METRICS_FILE.read_text(encoding="utf-8").splitlines():
|
||||||
|
raw = raw.strip()
|
||||||
|
if not raw:
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
obj = json.loads(raw)
|
||||||
|
except Exception:
|
||||||
|
continue
|
||||||
|
if obj.get("ts", 0) < cutoff_ms:
|
||||||
|
continue
|
||||||
|
calls += 1
|
||||||
|
tokens_in += int(obj.get("in") or 0)
|
||||||
|
tokens_out += int(obj.get("out") or 0)
|
||||||
|
m = obj.get("model", "?")
|
||||||
|
by_model[m] = by_model.get(m, 0) + 1
|
||||||
|
except Exception as exc:
|
||||||
|
logger.warning("metrics aggregate: %s", exc)
|
||||||
|
return {
|
||||||
|
"window_seconds": window_seconds,
|
||||||
|
"calls": calls,
|
||||||
|
"tokens_in": tokens_in,
|
||||||
|
"tokens_out": tokens_out,
|
||||||
|
"by_model": by_model,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def stats() -> dict:
|
||||||
|
"""Komplett-Snapshot mit den drei wichtigsten Fenstern."""
|
||||||
|
return {
|
||||||
|
"h1": aggregate(3600),
|
||||||
|
"h5": aggregate(5 * 3600),
|
||||||
|
"h24": aggregate(24 * 3600),
|
||||||
|
"d30": aggregate(30 * 24 * 3600),
|
||||||
|
}
|
||||||
@@ -0,0 +1,399 @@
|
|||||||
|
"""
|
||||||
|
Migration aus aria-data/brain-import/ → Vector-DB.
|
||||||
|
|
||||||
|
Parst die mitgelieferten Markdown-Dateien (AGENT.md, USER.md, TOOLING.md)
|
||||||
|
und zerlegt sie in atomare Memory-Punkte. Jeder Punkt bekommt:
|
||||||
|
|
||||||
|
source = "import"
|
||||||
|
migration_key = stabiler Identifier (z.B. "agent.md/rule-1") fuer Idempotenz
|
||||||
|
pinned = True
|
||||||
|
|
||||||
|
Beim Re-Run werden vorhandene Punkte mit gleicher migration_key entfernt
|
||||||
|
und neu geschrieben.
|
||||||
|
|
||||||
|
Mapping pro Datei:
|
||||||
|
|
||||||
|
AGENT.md
|
||||||
|
"Identitaet" → 1 Punkt type=identity
|
||||||
|
"Persoenlichkeit" (Intro) → 1 Punkt type=identity
|
||||||
|
"Kern-Eigenschaften" (Liste) → 1 Punkt pro Bullet type=identity
|
||||||
|
"Tool-Freigaben" → 1 Punkt type=tool
|
||||||
|
"Sicherheitsregeln" (Liste) → 1 Punkt pro Bullet type=rule
|
||||||
|
"Arbeitsprinzipien" (Liste) → 1 Punkt pro Bullet type=rule
|
||||||
|
"Dateien an Stefan zurueckgeben"→ 1 Punkt type=skill
|
||||||
|
"Stimme" → 1 Punkt type=tool
|
||||||
|
|
||||||
|
USER.md
|
||||||
|
"Allgemein" (Liste) → 1 Punkt pro Bullet type=preference
|
||||||
|
"Bestaetigung erforderlich" → 1 Punkt type=preference
|
||||||
|
"Autonomes Arbeiten OK fuer" → 1 Punkt type=preference
|
||||||
|
"Tools & Infrastruktur" → 1 Punkt type=preference
|
||||||
|
|
||||||
|
TOOLING.md
|
||||||
|
gesamter Inhalt → 1 Punkt type=tool, title="Tooling-Stack"
|
||||||
|
|
||||||
|
BOOTSTRAP.md ist eine Variante von AGENT.md — wird (vorerst) ignoriert
|
||||||
|
damit keine doppelten Punkte landen.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List, Optional
|
||||||
|
|
||||||
|
from memory import Embedder, VectorStore, MemoryPoint
|
||||||
|
from memory.vector_store import COLLECTION
|
||||||
|
from qdrant_client.http import models as qm
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class _Block:
|
||||||
|
title: str
|
||||||
|
content: str
|
||||||
|
|
||||||
|
|
||||||
|
def _split_h2(md: str) -> List[_Block]:
|
||||||
|
"""Zerlegt Markdown in H2-Bloecke. Inhalt vor dem ersten H2 wird verworfen."""
|
||||||
|
blocks: List[_Block] = []
|
||||||
|
current: Optional[_Block] = None
|
||||||
|
for line in md.splitlines():
|
||||||
|
m = re.match(r"^##\s+(.+?)\s*$", line)
|
||||||
|
if m and not line.startswith("### "):
|
||||||
|
if current:
|
||||||
|
blocks.append(current)
|
||||||
|
current = _Block(title=m.group(1).strip(), content="")
|
||||||
|
continue
|
||||||
|
if current is not None:
|
||||||
|
current.content += line + "\n"
|
||||||
|
if current:
|
||||||
|
blocks.append(current)
|
||||||
|
return blocks
|
||||||
|
|
||||||
|
|
||||||
|
def _split_h3(content: str) -> List[_Block]:
|
||||||
|
"""Zerlegt einen H2-Block in H3-Untersektionen + 'header'-Block davor."""
|
||||||
|
blocks: List[_Block] = []
|
||||||
|
header_lines: List[str] = []
|
||||||
|
current: Optional[_Block] = None
|
||||||
|
for line in content.splitlines():
|
||||||
|
m = re.match(r"^###\s+(.+?)\s*$", line)
|
||||||
|
if m:
|
||||||
|
if current is None and header_lines:
|
||||||
|
blocks.append(_Block(title="_intro", content="\n".join(header_lines).strip()))
|
||||||
|
if current:
|
||||||
|
blocks.append(current)
|
||||||
|
current = _Block(title=m.group(1).strip(), content="")
|
||||||
|
continue
|
||||||
|
if current is None:
|
||||||
|
header_lines.append(line)
|
||||||
|
else:
|
||||||
|
current.content += line + "\n"
|
||||||
|
if current:
|
||||||
|
blocks.append(current)
|
||||||
|
elif header_lines:
|
||||||
|
blocks.append(_Block(title="_intro", content="\n".join(header_lines).strip()))
|
||||||
|
return blocks
|
||||||
|
|
||||||
|
|
||||||
|
def _extract_bullets(content: str) -> List[tuple[str, str]]:
|
||||||
|
"""Findet "- **Title** — Body" oder "N. **Title** — Body" Bullets.
|
||||||
|
|
||||||
|
Returns: Liste von (title, full_bullet_text).
|
||||||
|
"""
|
||||||
|
bullets: List[tuple[str, str]] = []
|
||||||
|
current_lines: List[str] = []
|
||||||
|
current_title: Optional[str] = None
|
||||||
|
|
||||||
|
def flush():
|
||||||
|
if current_title and current_lines:
|
||||||
|
bullets.append((current_title, "\n".join(current_lines).strip()))
|
||||||
|
|
||||||
|
for line in content.splitlines():
|
||||||
|
m = re.match(r"^\s*(?:[-*]|\d+\.)\s+\*\*([^*]+?)\*\*\s*[—\-:]?\s*(.*)$", line)
|
||||||
|
if m:
|
||||||
|
flush()
|
||||||
|
current_title = m.group(1).strip()
|
||||||
|
current_lines = [line]
|
||||||
|
continue
|
||||||
|
# Folge-Zeilen mit Einrueckung gehoeren zum aktuellen Bullet
|
||||||
|
if current_title and (line.startswith(" ") or line.startswith("\t") or not line.strip()):
|
||||||
|
current_lines.append(line)
|
||||||
|
continue
|
||||||
|
if current_title and not re.match(r"^\s*(?:[-*]|\d+\.)\s+", line):
|
||||||
|
current_lines.append(line)
|
||||||
|
continue
|
||||||
|
# Neuer Bullet ohne **Title** Format
|
||||||
|
if re.match(r"^\s*(?:[-*]|\d+\.)\s+", line):
|
||||||
|
flush()
|
||||||
|
text = re.sub(r"^\s*(?:[-*]|\d+\.)\s+", "", line).strip()
|
||||||
|
short_title = (text[:60] + "…") if len(text) > 60 else text
|
||||||
|
bullets.append((short_title, line.strip()))
|
||||||
|
current_title = None
|
||||||
|
current_lines = []
|
||||||
|
flush()
|
||||||
|
return bullets
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Pro Datei eine Parser-Funktion ──────────────────────────────────
|
||||||
|
|
||||||
|
def _parse_agent_md(md: str, source_file: str) -> List[MemoryPoint]:
|
||||||
|
points: List[MemoryPoint] = []
|
||||||
|
h2_blocks = _split_h2(md)
|
||||||
|
for h2 in h2_blocks:
|
||||||
|
title = h2.title
|
||||||
|
content = h2.content.strip()
|
||||||
|
if not content:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if title.lower() == "identitaet" or title.lower() == "identität":
|
||||||
|
points.append(_mk(
|
||||||
|
type_="identity", title="ARIA — Identitaet",
|
||||||
|
content=f"## {title}\n\n{content}",
|
||||||
|
category="persoenlichkeit",
|
||||||
|
migration_key=f"{source_file}/identity",
|
||||||
|
))
|
||||||
|
|
||||||
|
elif title.lower() == "persoenlichkeit" or title.lower() == "persönlichkeit":
|
||||||
|
# Intro-Absatz + Kern-Eigenschaften-Liste trennen
|
||||||
|
sub = _split_h3(content)
|
||||||
|
for s in sub:
|
||||||
|
if s.title == "_intro" and s.content.strip():
|
||||||
|
points.append(_mk(
|
||||||
|
type_="identity", title="Persoenlichkeit — Grundsatz",
|
||||||
|
content=s.content.strip(),
|
||||||
|
category="persoenlichkeit",
|
||||||
|
migration_key=f"{source_file}/personality-intro",
|
||||||
|
))
|
||||||
|
elif s.title.lower().startswith("kern"):
|
||||||
|
for idx, (btitle, btext) in enumerate(_extract_bullets(s.content), 1):
|
||||||
|
points.append(_mk(
|
||||||
|
type_="identity", title=f"Eigenschaft: {btitle}",
|
||||||
|
content=btext, category="persoenlichkeit",
|
||||||
|
migration_key=f"{source_file}/personality-trait-{idx}",
|
||||||
|
))
|
||||||
|
|
||||||
|
elif "sicherheitsregel" in title.lower():
|
||||||
|
for idx, (btitle, btext) in enumerate(_extract_bullets(content), 1):
|
||||||
|
points.append(_mk(
|
||||||
|
type_="rule", title=f"Sicherheit: {btitle}",
|
||||||
|
content=btext, category="sicherheit",
|
||||||
|
migration_key=f"{source_file}/security-{idx}",
|
||||||
|
))
|
||||||
|
|
||||||
|
elif "arbeitsprinzipien" in title.lower() or "arbeitsprinzip" in title.lower():
|
||||||
|
for idx, (btitle, btext) in enumerate(_extract_bullets(content), 1):
|
||||||
|
points.append(_mk(
|
||||||
|
type_="rule", title=f"Prinzip: {btitle}",
|
||||||
|
content=btext, category="arbeitsweise",
|
||||||
|
migration_key=f"{source_file}/work-principle-{idx}",
|
||||||
|
))
|
||||||
|
|
||||||
|
elif "tool-freigaben" in title.lower() or "tool freigaben" in title.lower():
|
||||||
|
points.append(_mk(
|
||||||
|
type_="tool", title="Tool-Freigaben — Vollzugriff",
|
||||||
|
content=content, category="infrastruktur",
|
||||||
|
migration_key=f"{source_file}/tool-access",
|
||||||
|
))
|
||||||
|
|
||||||
|
elif "dateien an stefan" in title.lower() or "dateien zurueckgeben" in title.lower() or "dateien zur" in title.lower():
|
||||||
|
points.append(_mk(
|
||||||
|
type_="skill", title="Dateien an User zurueckgeben",
|
||||||
|
content=content, category="ausgabe",
|
||||||
|
migration_key=f"{source_file}/file-return-skill",
|
||||||
|
))
|
||||||
|
|
||||||
|
elif title.lower() == "stimme":
|
||||||
|
points.append(_mk(
|
||||||
|
type_="tool", title="Stimme (F5-TTS)",
|
||||||
|
content=content, category="infrastruktur",
|
||||||
|
migration_key=f"{source_file}/voice",
|
||||||
|
))
|
||||||
|
|
||||||
|
# Permanente Freigaben (in BOOTSTRAP) — als rule
|
||||||
|
elif "freigaben" in title.lower():
|
||||||
|
points.append(_mk(
|
||||||
|
type_="rule", title=title,
|
||||||
|
content=content, category="freigaben",
|
||||||
|
migration_key=f"{source_file}/permissions",
|
||||||
|
))
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Unbekannter Block: als generischer fact ablegen, NICHT pinned
|
||||||
|
logger.info("Unbekannter H2-Block '%s' in %s — als fact (unpinned)", title, source_file)
|
||||||
|
points.append(_mk(
|
||||||
|
type_="fact", title=f"{source_file}: {title}",
|
||||||
|
content=content, pinned=False,
|
||||||
|
migration_key=f"{source_file}/section-{title.lower().replace(' ', '-')}",
|
||||||
|
))
|
||||||
|
return points
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_user_md(md: str, source_file: str) -> List[MemoryPoint]:
|
||||||
|
points: List[MemoryPoint] = []
|
||||||
|
for h2 in _split_h2(md):
|
||||||
|
title = h2.title
|
||||||
|
content = h2.content.strip()
|
||||||
|
if not content:
|
||||||
|
continue
|
||||||
|
# Template-Platzhalter herausfiltern: Beispiel-Zeilen mit <Tag>
|
||||||
|
if "<Beispiel-Tool>" in content or "<Username>" in title:
|
||||||
|
continue
|
||||||
|
if title.lower() == "allgemein":
|
||||||
|
for idx, (btitle, btext) in enumerate(_extract_bullets(content), 1):
|
||||||
|
# Template-Platzhalter ueberspringen
|
||||||
|
if "<z.B." in btext or "<XYZ>" in btext:
|
||||||
|
continue
|
||||||
|
points.append(_mk(
|
||||||
|
type_="preference", title=f"User: {btitle}",
|
||||||
|
content=btext, category="allgemein",
|
||||||
|
migration_key=f"{source_file}/general-{idx}",
|
||||||
|
))
|
||||||
|
else:
|
||||||
|
cat_key = re.sub(r"[^a-z0-9]+", "-", title.lower()).strip("-") or "allgemein"
|
||||||
|
points.append(_mk(
|
||||||
|
type_="preference", title=title,
|
||||||
|
content=content, category=cat_key,
|
||||||
|
migration_key=f"{source_file}/{cat_key}",
|
||||||
|
))
|
||||||
|
return points
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_tooling_md(md: str, source_file: str) -> List[MemoryPoint]:
|
||||||
|
md = md.strip()
|
||||||
|
if not md:
|
||||||
|
return []
|
||||||
|
return [_mk(
|
||||||
|
type_="tool", title="Tooling-Stack (VM)",
|
||||||
|
content=md, category="infrastruktur",
|
||||||
|
migration_key=f"{source_file}/tooling-full",
|
||||||
|
)]
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Helper ─────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
def _mk(
|
||||||
|
type_: str,
|
||||||
|
title: str,
|
||||||
|
content: str,
|
||||||
|
migration_key: str,
|
||||||
|
pinned: bool = True,
|
||||||
|
category: str = "",
|
||||||
|
) -> MemoryPoint:
|
||||||
|
p = MemoryPoint(
|
||||||
|
id="",
|
||||||
|
type=type_,
|
||||||
|
title=title,
|
||||||
|
content=content.strip(),
|
||||||
|
pinned=pinned,
|
||||||
|
category=category,
|
||||||
|
source="import",
|
||||||
|
tags=[],
|
||||||
|
)
|
||||||
|
# migration_key wird ueber Payload-Index angesprochen — in to_payload manuell anhaengen
|
||||||
|
setattr(p, "_migration_key", migration_key)
|
||||||
|
return p
|
||||||
|
|
||||||
|
|
||||||
|
# ─── Eintrittspunkt ─────────────────────────────────────────────────
|
||||||
|
|
||||||
|
def run_migration(
|
||||||
|
import_dir: Path,
|
||||||
|
store: VectorStore,
|
||||||
|
embedder: Embedder,
|
||||||
|
) -> dict:
|
||||||
|
"""Liest alle .md-Dateien aus import_dir, parst sie, schreibt in DB.
|
||||||
|
|
||||||
|
Idempotent: vorhandene Punkte mit gleicher migration_key werden geloescht
|
||||||
|
und neu geschrieben.
|
||||||
|
|
||||||
|
Returns: {"created": int, "updated": int, "skipped": int, "files": [...]}
|
||||||
|
"""
|
||||||
|
if not import_dir.exists():
|
||||||
|
return {"created": 0, "updated": 0, "skipped": 0, "files": [], "error": f"{import_dir} nicht gefunden"}
|
||||||
|
|
||||||
|
parsers = {
|
||||||
|
"AGENT.md": _parse_agent_md,
|
||||||
|
"BOOTSTRAP.md": _parse_agent_md, # gleicher Parser, ggf. ueberlappende Eintraege
|
||||||
|
"USER.md": _parse_user_md,
|
||||||
|
"USER.md.example": _parse_user_md,
|
||||||
|
"TOOLING.md": _parse_tooling_md,
|
||||||
|
"TOOLING.md.example": _parse_tooling_md,
|
||||||
|
}
|
||||||
|
|
||||||
|
# USER.md hat Vorrang vor USER.md.example
|
||||||
|
file_priority = ["AGENT.md", "BOOTSTRAP.md", "USER.md", "USER.md.example",
|
||||||
|
"TOOLING.md", "TOOLING.md.example"]
|
||||||
|
seen_kinds: set[str] = set() # "USER" / "TOOLING" — nur einmal
|
||||||
|
|
||||||
|
points: List[MemoryPoint] = []
|
||||||
|
processed_files: List[str] = []
|
||||||
|
|
||||||
|
for fname in file_priority:
|
||||||
|
fp = import_dir / fname
|
||||||
|
if not fp.exists():
|
||||||
|
continue
|
||||||
|
kind = fname.split(".")[0] # "AGENT", "BOOTSTRAP", "USER", "TOOLING"
|
||||||
|
# USER.md.example nur wenn USER.md fehlt
|
||||||
|
if kind in ("USER", "TOOLING") and kind in seen_kinds:
|
||||||
|
continue
|
||||||
|
seen_kinds.add(kind)
|
||||||
|
parser = parsers.get(fname)
|
||||||
|
if not parser:
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
md = fp.read_text(encoding="utf-8")
|
||||||
|
file_points = parser(md, fname)
|
||||||
|
points.extend(file_points)
|
||||||
|
processed_files.append(f"{fname} ({len(file_points)})")
|
||||||
|
logger.info("Migration: %s → %d Punkte", fname, len(file_points))
|
||||||
|
except Exception as exc:
|
||||||
|
logger.exception("Migration: %s fehlgeschlagen", fname)
|
||||||
|
processed_files.append(f"{fname} (FEHLER: {exc})")
|
||||||
|
|
||||||
|
if not points:
|
||||||
|
return {"created": 0, "updated": 0, "skipped": 0, "files": processed_files}
|
||||||
|
|
||||||
|
# Erst alte Migration-Punkte mit gleicher migration_key loeschen
|
||||||
|
migration_keys = [getattr(p, "_migration_key", None) for p in points]
|
||||||
|
migration_keys = [k for k in migration_keys if k]
|
||||||
|
if migration_keys:
|
||||||
|
store.client.delete(
|
||||||
|
collection_name=COLLECTION,
|
||||||
|
points_selector=qm.FilterSelector(filter=qm.Filter(must=[
|
||||||
|
qm.FieldCondition(key="migration_key", match=qm.MatchAny(any=migration_keys))
|
||||||
|
])),
|
||||||
|
)
|
||||||
|
logger.info("Migration: %d alte Punkte mit gleicher migration_key entfernt", len(migration_keys))
|
||||||
|
|
||||||
|
# Embed in Batches
|
||||||
|
texts = [p.content for p in points]
|
||||||
|
vectors = embedder.embed_batch(texts)
|
||||||
|
|
||||||
|
created = 0
|
||||||
|
for p, vec in zip(points, vectors):
|
||||||
|
payload = p.to_payload()
|
||||||
|
mkey = getattr(p, "_migration_key", None)
|
||||||
|
if mkey:
|
||||||
|
payload["migration_key"] = mkey
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
import uuid as _uuid
|
||||||
|
pid = str(_uuid.uuid4())
|
||||||
|
now = datetime.now(timezone.utc).isoformat()
|
||||||
|
payload["created_at"] = now
|
||||||
|
payload["updated_at"] = now
|
||||||
|
store.client.upsert(
|
||||||
|
collection_name=COLLECTION,
|
||||||
|
points=[qm.PointStruct(id=pid, vector=vec, payload=payload)],
|
||||||
|
)
|
||||||
|
created += 1
|
||||||
|
|
||||||
|
return {
|
||||||
|
"created": created,
|
||||||
|
"files": processed_files,
|
||||||
|
"import_dir": str(import_dir),
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user