feat: App TTS-Einstellungen vereinfacht + Mund-Button fuer lokales Muten

SettingsScreen:
- Piper-Reste entfernt (defaultVoice, highlightVoice, Speed-Slider,
  Highlight-Trigger-Info)
- Nur noch EIN Toggle 'Sprachausgabe auf diesem Geraet' — geraetelokal,
  persistent in aria_tts_enabled (AsyncStorage)
- Keine Config-Propagation mehr via RVS (das waere ja global gewesen)
- Hinweis dass Stimme + Voice-Cloning zentral in der Diagnose sind

ChatScreen: Mund-Button (👄 / 🤐)
- Neben Ohr-Button im Eingabebereich, NUR sichtbar wenn TTS im Setting
  grundsaetzlich aktiv ist
- Tap toggelt Mute: 👄 an / 🤐 rot gemutet
- Persistent in aria_tts_muted (AsyncStorage)
- Stoppt bei Muten sofort laufende Wiedergabe (stopPlayback)
- Settings-Toggle wird alle 2s gepollt damit Aenderungen greifen
  (einfache Loesung ohne globalen State-Context)

Audio-Handling respektiert lokalen Zustand
- Incoming audio/audio_pcm: nur abspielen wenn ttsDeviceEnabled && !ttsMuted
- Cache wird TROTZDEM immer geschrieben — Play-Button funktioniert
  spaeter aus Cache, auch waehrend Mute
- audioService.handlePcmChunk akzeptiert silent-Flag: skipt AudioTrack
  aber baut weiterhin den WAV-Cache pro messageId

Jedes Android-Geraet mit der App hat seinen eigenen Mute-Zustand.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
duffyduck 2026-04-19 22:33:36 +02:00
parent f801d99748
commit 40e48b046b
3 changed files with 78 additions and 154 deletions

View File

@ -107,6 +107,9 @@ const ChatScreen: React.FC = () => {
const [searchVisible, setSearchVisible] = useState(false);
const [pendingAttachments, setPendingAttachments] = useState<{file: any, isPhoto: boolean}[]>([]);
const [agentActivity, setAgentActivity] = useState<{activity: string, tool: string}>({activity: 'idle', tool: ''});
// Gerätelokale TTS-Config: globaler Toggle (aus Settings) + temporäres Muten (Mund-Button)
const [ttsDeviceEnabled, setTtsDeviceEnabled] = useState(true);
const [ttsMuted, setTtsMuted] = useState(false);
const flatListRef = useRef<FlatList>(null);
const messageIdCounter = useRef(0);
@ -117,6 +120,30 @@ const ChatScreen: React.FC = () => {
return `msg_${Date.now()}_${messageIdCounter.current}`;
};
// TTS-Settings beim Mount + bei Screen-Fokus neu laden (damit Settings-Toggle sofort greift)
useEffect(() => {
const loadTtsSettings = async () => {
const enabled = await AsyncStorage.getItem('aria_tts_enabled');
setTtsDeviceEnabled(enabled !== 'false'); // default true
const muted = await AsyncStorage.getItem('aria_tts_muted');
setTtsMuted(muted === 'true'); // default false
};
loadTtsSettings();
// Poll alle 2s um Settings-Aenderung mitzubekommen (einfache Loesung ohne Context)
const interval = setInterval(loadTtsSettings, 2000);
return () => clearInterval(interval);
}, []);
const toggleMute = useCallback(() => {
setTtsMuted(prev => {
const next = !prev;
AsyncStorage.setItem('aria_tts_muted', String(next));
// Bei Muten sofort laufende Wiedergabe stoppen
if (next) audioService.stopPlayback();
return next;
});
}, []);
// Chat-Verlauf aus AsyncStorage laden
const isInitialLoad = useRef(true);
useEffect(() => {
@ -258,12 +285,13 @@ const ChatScreen: React.FC = () => {
});
}
// TTS-Audio abspielen wenn vorhanden
// TTS-Audio abspielen wenn vorhanden — respektiert geraetelokalen Mute/Disable
const canPlay = ttsDeviceEnabled && !ttsMuted;
if (message.type === 'audio' && message.payload.base64) {
const b64 = message.payload.base64 as string;
const refId = (message.payload.messageId as string) || '';
audioService.playAudio(b64);
// Wenn messageId mitgeliefert wurde: Audio in Cache speichern + Pfad in Message eintragen
if (canPlay) audioService.playAudio(b64);
// Cache IMMER schreiben — Play-Button soll auch bei Mute spaeter funktionieren
if (refId) {
audioService.cacheAudio(b64, refId).then(audioPath => {
if (!audioPath) return;
@ -274,12 +302,11 @@ const ChatScreen: React.FC = () => {
}
}
// XTTS PCM-Stream: direkt an AudioTrack, bei final WAV-Cache schreiben
// XTTS PCM-Stream: Cache IMMER bauen, Playback nur wenn nicht gemutet
if (message.type === ('audio_pcm' as any)) {
const p = message.payload as any;
const p = { ...(message.payload as any), silent: !canPlay };
const refId = (p.messageId as string) || '';
audioService.handlePcmChunk(p).then((audioPath: any) => {
// Wenn final + Cache-Pfad zurueckkam, Message aktualisieren
if (p.final && audioPath && refId) {
setMessages(prev => prev.map(m =>
m.messageId === refId ? { ...m, audioPath } : m
@ -825,6 +852,17 @@ const ChatScreen: React.FC = () => {
disabled={connectionState !== 'connected'}
wakeWordActive={wakeWordActive}
/>
{/* Mund-Button: TTS auf diesem Geraet muten/aufheben.
Nur sichtbar wenn TTS in den Settings grundsaetzlich aktiv ist. */}
{ttsDeviceEnabled && (
<TouchableOpacity
style={[styles.wakeWordBtn, ttsMuted && styles.mouthBtnMuted]}
onPress={toggleMute}
accessibilityLabel={ttsMuted ? 'Sprachausgabe einschalten' : 'Sprachausgabe stumm schalten'}
>
<Text style={styles.wakeWordIcon}>{ttsMuted ? '🤐' : '👄'}</Text>
</TouchableOpacity>
)}
<TouchableOpacity
style={[styles.wakeWordBtn, wakeWordActive && styles.wakeWordBtnActive]}
onPress={toggleWakeWord}
@ -1042,6 +1080,9 @@ const styles = StyleSheet.create({
wakeWordBtnActive: {
backgroundColor: 'rgba(52, 199, 89, 0.3)',
},
mouthBtnMuted: {
backgroundColor: 'rgba(255, 59, 48, 0.25)',
},
wakeWordIcon: {
fontSize: 16,
},

View File

@ -72,10 +72,6 @@ const SettingsScreen: React.FC = () => {
const [autoDownload, setAutoDownload] = useState(true);
const [storageSize, setStorageSize] = useState('...');
const [ttsEnabled, setTtsEnabled] = useState(true);
const [defaultVoice, setDefaultVoice] = useState('ramona');
const [highlightVoice, setHighlightVoice] = useState('thorsten');
const [speedRamona, setSpeedRamona] = useState(1.0);
const [speedThorsten, setSpeedThorsten] = useState(1.0);
const [editingPath, setEditingPath] = useState(false);
const [tempPath, setTempPath] = useState('');
@ -99,18 +95,6 @@ const SettingsScreen: React.FC = () => {
AsyncStorage.getItem('aria_tts_enabled').then(saved => {
if (saved !== null) setTtsEnabled(saved === 'true');
});
AsyncStorage.getItem('aria_default_voice').then(saved => {
if (saved) setDefaultVoice(saved);
});
AsyncStorage.getItem('aria_highlight_voice').then(saved => {
if (saved) setHighlightVoice(saved);
});
AsyncStorage.getItem('aria_speed_ramona').then(saved => {
if (saved) setSpeedRamona(parseFloat(saved));
});
AsyncStorage.getItem('aria_speed_thorsten').then(saved => {
if (saved) setSpeedThorsten(parseFloat(saved));
});
}, []);
// Speichergroesse berechnen
@ -462,131 +446,28 @@ const SettingsScreen: React.FC = () => {
</View>
</View>
{/* === Sprachausgabe === */}
{/* === Sprachausgabe (geraetelokal) === */}
<Text style={styles.sectionTitle}>Sprachausgabe</Text>
<View style={styles.card}>
{/* TTS An/Aus */}
<View style={styles.toggleRow}>
<View style={styles.toggleInfo}>
<Text style={styles.toggleLabel}>Sprachausgabe</Text>
<Text style={styles.toggleHint}>ARIA antwortet per Sprache (TTS)</Text>
<Text style={styles.toggleLabel}>Sprachausgabe auf diesem Geraet</Text>
<Text style={styles.toggleHint}>
Nur lokal andere Geraete sind unabhaengig.
Wenn aus, erscheint im Chat auch kein Mund-Button.
Stimme und Voice-Cloning werden zentral in der Diagnose eingestellt.
</Text>
</View>
<Switch
value={ttsEnabled}
onValueChange={(val) => {
setTtsEnabled(val);
AsyncStorage.setItem('aria_tts_enabled', String(val));
rvs.send('config' as any, { ttsEnabled: val });
}}
trackColor={{ false: '#2A2A3E', true: '#0096FF' }}
thumbColor={ttsEnabled ? '#FFFFFF' : '#666680'}
/>
</View>
{/* Standard-Stimme */}
<View style={{marginTop: 16}}>
<Text style={styles.toggleLabel}>Standard-Stimme</Text>
<Text style={styles.toggleHint}>Fuer normale Antworten und Gespraeche</Text>
<View style={{flexDirection: 'row', gap: 8, marginTop: 8}}>
<TouchableOpacity
style={[styles.voiceBtn, defaultVoice === 'ramona' && styles.voiceBtnActive]}
onPress={() => { setDefaultVoice('ramona'); AsyncStorage.setItem('aria_default_voice', 'ramona'); rvs.send('config' as any, { defaultVoice: 'ramona' }); }}
>
<Text style={styles.voiceBtnIcon}>{'\uD83D\uDE4E\u200D\u2640\uFE0F'}</Text>
<Text style={[styles.voiceBtnText, defaultVoice === 'ramona' && styles.voiceBtnTextActive]}>Ramona</Text>
<Text style={styles.voiceBtnHint}>Weiblich, warm</Text>
</TouchableOpacity>
<TouchableOpacity
style={[styles.voiceBtn, defaultVoice === 'thorsten' && styles.voiceBtnActive]}
onPress={() => { setDefaultVoice('thorsten'); AsyncStorage.setItem('aria_default_voice', 'thorsten'); rvs.send('config' as any, { defaultVoice: 'thorsten' }); }}
>
<Text style={styles.voiceBtnIcon}>{'\uD83E\uDDD4'}</Text>
<Text style={[styles.voiceBtnText, defaultVoice === 'thorsten' && styles.voiceBtnTextActive]}>Thorsten</Text>
<Text style={styles.voiceBtnHint}>Maennlich, tief</Text>
</TouchableOpacity>
</View>
</View>
{/* Highlight-Stimme */}
<View style={{marginTop: 16}}>
<Text style={styles.toggleLabel}>Highlight-Stimme</Text>
<Text style={styles.toggleHint}>Fuer besondere Ereignisse (Deploy, Alarm, Erfolg)</Text>
<View style={{flexDirection: 'row', gap: 8, marginTop: 8}}>
<TouchableOpacity
style={[styles.voiceBtn, highlightVoice === 'thorsten' && styles.voiceBtnActive]}
onPress={() => { setHighlightVoice('thorsten'); AsyncStorage.setItem('aria_highlight_voice', 'thorsten'); rvs.send('config' as any, { highlightVoice: 'thorsten' }); }}
>
<Text style={styles.voiceBtnIcon}>{'\uD83E\uDDD4'}</Text>
<Text style={[styles.voiceBtnText, highlightVoice === 'thorsten' && styles.voiceBtnTextActive]}>Thorsten</Text>
</TouchableOpacity>
<TouchableOpacity
style={[styles.voiceBtn, highlightVoice === 'ramona' && styles.voiceBtnActive]}
onPress={() => { setHighlightVoice('ramona'); AsyncStorage.setItem('aria_highlight_voice', 'ramona'); rvs.send('config' as any, { highlightVoice: 'ramona' }); }}
>
<Text style={styles.voiceBtnIcon}>{'\uD83D\uDE4E\u200D\u2640\uFE0F'}</Text>
<Text style={[styles.voiceBtnText, highlightVoice === 'ramona' && styles.voiceBtnTextActive]}>Ramona</Text>
</TouchableOpacity>
</View>
</View>
{/* Sprechgeschwindigkeit Ramona */}
<View style={{marginTop: 16}}>
<Text style={styles.toggleLabel}>Ramona Speed: {speedRamona.toFixed(1)}x</Text>
<View style={{flexDirection: 'row', justifyContent: 'space-around', marginTop: 8}}>
{[0.5, 0.75, 1.0, 1.25, 1.5, 2.0].map(speed => (
<TouchableOpacity
key={speed}
onPress={() => {
setSpeedRamona(speed);
AsyncStorage.setItem('aria_speed_ramona', String(speed));
rvs.send('config' as any, { speedRamona: speed });
}}
style={{
paddingHorizontal: 10, paddingVertical: 6, borderRadius: 6,
backgroundColor: speedRamona === speed ? '#0096FF' : '#1E1E2E',
}}
>
<Text style={{color: speedRamona === speed ? '#fff' : '#8888AA', fontSize: 12, fontWeight: '600'}}>
{speed}x
</Text>
</TouchableOpacity>
))}
</View>
</View>
{/* Sprechgeschwindigkeit Thorsten */}
<View style={{marginTop: 16}}>
<Text style={styles.toggleLabel}>Thorsten Speed: {speedThorsten.toFixed(1)}x</Text>
<View style={{flexDirection: 'row', justifyContent: 'space-around', marginTop: 8}}>
{[0.5, 0.75, 1.0, 1.25, 1.5, 2.0].map(speed => (
<TouchableOpacity
key={speed}
onPress={() => {
setSpeedThorsten(speed);
AsyncStorage.setItem('aria_speed_thorsten', String(speed));
rvs.send('config' as any, { speedThorsten: speed });
}}
style={{
paddingHorizontal: 10, paddingVertical: 6, borderRadius: 6,
backgroundColor: speedThorsten === speed ? '#0096FF' : '#1E1E2E',
}}
>
<Text style={{color: speedThorsten === speed ? '#fff' : '#8888AA', fontSize: 12, fontWeight: '600'}}>
{speed}x
</Text>
</TouchableOpacity>
))}
</View>
</View>
{/* Highlight-Trigger Info */}
<View style={{marginTop: 16, padding: 10, backgroundColor: '#1E1E2E', borderRadius: 8}}>
<Text style={styles.toggleLabel}>{'\u26A1'} Highlight-Trigger</Text>
<Text style={[styles.toggleHint, {marginTop: 4}]}>
Die Highlight-Stimme wird automatisch bei diesen Woertern verwendet:{'\n'}
deploy, erfolgreich, alarm, so soll es sein, kritisch, server down, sicherheitswarnung, ticket geloest, aufgabe abgeschlossen
</Text>
</View>
</View>
{/* === Speicher === */}

View File

@ -335,7 +335,8 @@ class AudioService {
}
}
/** Einen PCM-Chunk aus einer audio_pcm Nachricht empfangen und spielen/cachen.
/** Einen PCM-Chunk aus einer audio_pcm Nachricht empfangen.
* silent=true nur cachen, nicht abspielen (z.B. wenn TTS geraetelokal gemutet).
* Gibt bei final=true den Cache-Pfad zurueck (file://) oder '' wenn nicht gecached. */
async handlePcmChunk(payload: {
base64: string;
@ -344,8 +345,10 @@ class AudioService {
messageId?: string;
chunk?: number;
final?: boolean;
silent?: boolean;
}): Promise<string> {
if (!PcmStreamPlayer) {
const silent = !!payload.silent;
if (!silent && !PcmStreamPlayer) {
console.warn('[Audio] PcmStreamPlayer Native Module nicht verfuegbar');
return '';
}
@ -358,10 +361,8 @@ class AudioService {
// Neuer Stream? (messageId Wechsel oder nicht aktiv)
if (!this.pcmStreamActive || this.pcmMessageId !== messageId) {
// Vorherigen Stream clean beenden (falls da)
if (this.pcmStreamActive) {
try { await PcmStreamPlayer.stop(); } catch {}
// Altes Buffer verwerfen (wurde nicht final — neue Message kam dazwischen)
if (this.pcmStreamActive && !silent) {
try { await PcmStreamPlayer!.stop(); } catch {}
this.pcmBuffer = [];
this.pcmBytesCollected = 0;
}
@ -371,35 +372,36 @@ class AudioService {
this.pcmChannels = channels;
this.pcmBuffer = [];
this.pcmBytesCollected = 0;
try {
await PcmStreamPlayer.start(sampleRate, channels);
} catch (err) {
console.error('[Audio] PcmStreamPlayer.start fehlgeschlagen:', err);
this.pcmStreamActive = false;
return '';
if (!silent) {
try {
await PcmStreamPlayer!.start(sampleRate, channels);
} catch (err) {
console.error('[Audio] PcmStreamPlayer.start fehlgeschlagen:', err);
this.pcmStreamActive = false;
return '';
}
AudioFocus?.requestDuck().catch(() => {});
}
// Audio-Focus: andere Apps ducken
AudioFocus?.requestDuck().catch(() => {});
}
// Chunk abspielen + cachen
// Chunk — immer cachen, nur bei !silent auch abspielen
if (base64) {
try { await PcmStreamPlayer.writeChunk(base64); } catch (err) { console.warn('[Audio] writeChunk', err); }
// Buffer fuer Cache sammeln (wenn noch nicht zu gross)
if (!silent) {
try { await PcmStreamPlayer!.writeChunk(base64); } catch (err) { console.warn('[Audio] writeChunk', err); }
}
if (messageId && this.pcmBytesCollected < this.PCM_MAX_CACHE_BYTES) {
this.pcmBuffer.push(base64);
// 4 base64-chars ≈ 3 bytes — grobe Schaetzung
this.pcmBytesCollected += Math.floor(base64.length * 0.75);
}
}
if (isFinal) {
// Stream sauber beenden (spielt noch bis Puffer leer ist)
try { await PcmStreamPlayer.end(); } catch {}
if (!silent) {
try { await PcmStreamPlayer!.end(); } catch {}
AudioFocus?.release().catch(() => {});
}
this.pcmStreamActive = false;
AudioFocus?.release().catch(() => {});
// Aus gesammelten PCM-Chunks eine WAV-Datei fuer Replay bauen
if (messageId && this.pcmBuffer.length > 0) {
const audioPath = await this._savePcmBufferAsWav(messageId);
this.pcmBuffer = [];