feat: Conversation-Window — Gespraech endet nach Stille statt Endlos-Loop
Der Gespraechsmodus war bisher ein Endless-Loop: Mikro hat sich nach
jeder ARIA-Antwort wieder geoeffnet bis MAX_RECORDING_MS, danach Speech-
Gate verworfen und neu starten. Das Ohr blieb ewig an.
Neue Logik:
audio.ts: startRecording(autoStop, noSpeechTimeoutMs?) — wenn der User
innerhalb des Timeouts nicht anfaengt zu sprechen, wird Stille
gemeldet → stopRecording → Speech-Gate verwirft → result=null.
wakeword.ts: drei States off/armed/conversing. start() geht direkt in
'conversing' (kein Wake-Word verfuegbar; Stub fuer spaetere Porcupine-
Integration). endConversation() bei No-Speech.
ChatScreen: Aufnahme bekommt das Window aus AsyncStorage durchgereicht.
Bei null-Result → endConversation, UI-State synchron.
Settings: neuer +/- Block "Konversations-Fenster" 3-20s (Default 8).
Mit dem Stub ist die Architektur bereit fuer Porcupine: dann geht
endConversation auf 'armed' statt 'off' und der Wake-Word-Detector
laeuft passiv weiter.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
578ade3544
commit
1b8a51aad0
|
|
@ -29,7 +29,7 @@ import updateService from '../services/updater';
|
||||||
import VoiceButton from '../components/VoiceButton';
|
import VoiceButton from '../components/VoiceButton';
|
||||||
import FileUpload, { FileData } from '../components/FileUpload';
|
import FileUpload, { FileData } from '../components/FileUpload';
|
||||||
import CameraUpload, { PhotoData } from '../components/CameraUpload';
|
import CameraUpload, { PhotoData } from '../components/CameraUpload';
|
||||||
import { RecordingResult } from '../services/audio';
|
import { RecordingResult, loadConvWindowMs } from '../services/audio';
|
||||||
import Geolocation from '@react-native-community/geolocation';
|
import Geolocation from '@react-native-community/geolocation';
|
||||||
|
|
||||||
// --- Typen ---
|
// --- Typen ---
|
||||||
|
|
@ -385,10 +385,11 @@ const ChatScreen: React.FC = () => {
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
const unsubWake = wakeWordService.onWakeWord(async () => {
|
const unsubWake = wakeWordService.onWakeWord(async () => {
|
||||||
console.log('[Chat] Gespraechsmodus — starte Auto-Aufnahme');
|
console.log('[Chat] Gespraechsmodus — starte Auto-Aufnahme');
|
||||||
// Aufnahme mit Auto-Stop (VAD) starten
|
// Conversation-Window: User hat X Sekunden um anzufangen, sonst Konversation aus
|
||||||
const started = await audioService.startRecording(true);
|
const windowMs = await loadConvWindowMs();
|
||||||
|
const started = await audioService.startRecording(true, windowMs);
|
||||||
if (!started) {
|
if (!started) {
|
||||||
// Mikrofon nicht verfuegbar, Wake Word wieder aktivieren
|
// Mikrofon nicht verfuegbar, naechsten Versuch
|
||||||
wakeWordService.resume();
|
wakeWordService.resume();
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
@ -397,7 +398,7 @@ const ChatScreen: React.FC = () => {
|
||||||
const unsubSilence = audioService.onSilenceDetected(async () => {
|
const unsubSilence = audioService.onSilenceDetected(async () => {
|
||||||
const result = await audioService.stopRecording();
|
const result = await audioService.stopRecording();
|
||||||
if (result && result.durationMs > 500) {
|
if (result && result.durationMs > 500) {
|
||||||
// Sprachnachricht senden (gleiche Logik wie handleVoiceRecording)
|
// User hat im Fenster gesprochen → Sprachnachricht senden
|
||||||
const location = await getCurrentLocation();
|
const location = await getCurrentLocation();
|
||||||
const userMsg: ChatMessage = {
|
const userMsg: ChatMessage = {
|
||||||
id: nextId(),
|
id: nextId(),
|
||||||
|
|
@ -414,9 +415,14 @@ const ChatScreen: React.FC = () => {
|
||||||
voice: localXttsVoiceRef.current,
|
voice: localXttsVoiceRef.current,
|
||||||
...(location && { location }),
|
...(location && { location }),
|
||||||
});
|
});
|
||||||
|
// resume() wird durch onPlaybackFinished nach ARIAs Antwort getriggert.
|
||||||
|
} else {
|
||||||
|
// Kein Speech im Window → Konversation beenden (Ohr geht aus oder
|
||||||
|
// bleibt armed wenn Wake Word verfuegbar)
|
||||||
|
wakeWordService.endConversation();
|
||||||
|
// UI-State synchron halten
|
||||||
|
if (!wakeWordService.isActive()) setWakeWordActive(false);
|
||||||
}
|
}
|
||||||
// Wake Word wieder aktivieren
|
|
||||||
if (wakeWordActive) wakeWordService.resume();
|
|
||||||
});
|
});
|
||||||
|
|
||||||
return () => {
|
return () => {
|
||||||
|
|
|
||||||
|
|
@ -31,6 +31,10 @@ import {
|
||||||
VAD_SILENCE_MIN_SEC,
|
VAD_SILENCE_MIN_SEC,
|
||||||
VAD_SILENCE_MAX_SEC,
|
VAD_SILENCE_MAX_SEC,
|
||||||
VAD_SILENCE_STORAGE_KEY,
|
VAD_SILENCE_STORAGE_KEY,
|
||||||
|
CONV_WINDOW_DEFAULT_SEC,
|
||||||
|
CONV_WINDOW_MIN_SEC,
|
||||||
|
CONV_WINDOW_MAX_SEC,
|
||||||
|
CONV_WINDOW_STORAGE_KEY,
|
||||||
} from '../services/audio';
|
} from '../services/audio';
|
||||||
import ModeSelector from '../components/ModeSelector';
|
import ModeSelector from '../components/ModeSelector';
|
||||||
import QRScanner from '../components/QRScanner';
|
import QRScanner from '../components/QRScanner';
|
||||||
|
|
@ -87,6 +91,7 @@ const SettingsScreen: React.FC = () => {
|
||||||
const [ttsEnabled, setTtsEnabled] = useState(true);
|
const [ttsEnabled, setTtsEnabled] = useState(true);
|
||||||
const [ttsPrerollSec, setTtsPrerollSec] = useState<number>(TTS_PREROLL_DEFAULT_SEC);
|
const [ttsPrerollSec, setTtsPrerollSec] = useState<number>(TTS_PREROLL_DEFAULT_SEC);
|
||||||
const [vadSilenceSec, setVadSilenceSec] = useState<number>(VAD_SILENCE_DEFAULT_SEC);
|
const [vadSilenceSec, setVadSilenceSec] = useState<number>(VAD_SILENCE_DEFAULT_SEC);
|
||||||
|
const [convWindowSec, setConvWindowSec] = useState<number>(CONV_WINDOW_DEFAULT_SEC);
|
||||||
const [editingPath, setEditingPath] = useState(false);
|
const [editingPath, setEditingPath] = useState(false);
|
||||||
const [xttsVoice, setXttsVoice] = useState('');
|
const [xttsVoice, setXttsVoice] = useState('');
|
||||||
const [loadingVoice, setLoadingVoice] = useState<string | null>(null);
|
const [loadingVoice, setLoadingVoice] = useState<string | null>(null);
|
||||||
|
|
@ -130,6 +135,14 @@ const SettingsScreen: React.FC = () => {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
AsyncStorage.getItem(CONV_WINDOW_STORAGE_KEY).then(saved => {
|
||||||
|
if (saved != null) {
|
||||||
|
const n = parseFloat(saved);
|
||||||
|
if (isFinite(n) && n >= CONV_WINDOW_MIN_SEC && n <= CONV_WINDOW_MAX_SEC) {
|
||||||
|
setConvWindowSec(n);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
AsyncStorage.getItem('aria_xtts_voice').then(saved => {
|
AsyncStorage.getItem('aria_xtts_voice').then(saved => {
|
||||||
if (saved) setXttsVoice(saved);
|
if (saved) setXttsVoice(saved);
|
||||||
});
|
});
|
||||||
|
|
@ -603,6 +616,39 @@ const SettingsScreen: React.FC = () => {
|
||||||
<Text style={styles.prerollButtonText}>+0.5</Text>
|
<Text style={styles.prerollButtonText}>+0.5</Text>
|
||||||
</TouchableOpacity>
|
</TouchableOpacity>
|
||||||
</View>
|
</View>
|
||||||
|
|
||||||
|
<Text style={[styles.toggleLabel, {marginTop: 24}]}>Konversations-Fenster</Text>
|
||||||
|
<Text style={styles.toggleHint}>
|
||||||
|
Im Gespraechsmodus (Ohr-Button): nach ARIA's Antwort hast du so lange
|
||||||
|
Zeit, weiter zu sprechen, bevor die Konversation automatisch beendet wird.
|
||||||
|
Sprichst du nichts → Mikrofon zu.
|
||||||
|
Default: {CONV_WINDOW_DEFAULT_SEC.toFixed(1)}s.
|
||||||
|
</Text>
|
||||||
|
<View style={styles.prerollRow}>
|
||||||
|
<TouchableOpacity
|
||||||
|
style={styles.prerollButton}
|
||||||
|
onPress={() => {
|
||||||
|
const next = Math.max(CONV_WINDOW_MIN_SEC, Math.round((convWindowSec - 1) * 10) / 10);
|
||||||
|
setConvWindowSec(next);
|
||||||
|
AsyncStorage.setItem(CONV_WINDOW_STORAGE_KEY, String(next));
|
||||||
|
}}
|
||||||
|
disabled={convWindowSec <= CONV_WINDOW_MIN_SEC}
|
||||||
|
>
|
||||||
|
<Text style={styles.prerollButtonText}>−1</Text>
|
||||||
|
</TouchableOpacity>
|
||||||
|
<Text style={styles.prerollValue}>{convWindowSec.toFixed(0)} s</Text>
|
||||||
|
<TouchableOpacity
|
||||||
|
style={styles.prerollButton}
|
||||||
|
onPress={() => {
|
||||||
|
const next = Math.min(CONV_WINDOW_MAX_SEC, Math.round((convWindowSec + 1) * 10) / 10);
|
||||||
|
setConvWindowSec(next);
|
||||||
|
AsyncStorage.setItem(CONV_WINDOW_STORAGE_KEY, String(next));
|
||||||
|
}}
|
||||||
|
disabled={convWindowSec >= CONV_WINDOW_MAX_SEC}
|
||||||
|
>
|
||||||
|
<Text style={styles.prerollButtonText}>+1</Text>
|
||||||
|
</TouchableOpacity>
|
||||||
|
</View>
|
||||||
</View>
|
</View>
|
||||||
|
|
||||||
{/* === Sprachausgabe (geraetelokal) === */}
|
{/* === Sprachausgabe (geraetelokal) === */}
|
||||||
|
|
|
||||||
|
|
@ -84,6 +84,27 @@ export const VAD_SILENCE_MIN_SEC = 1.0;
|
||||||
export const VAD_SILENCE_MAX_SEC = 8.0;
|
export const VAD_SILENCE_MAX_SEC = 8.0;
|
||||||
export const VAD_SILENCE_STORAGE_KEY = 'aria_vad_silence_sec';
|
export const VAD_SILENCE_STORAGE_KEY = 'aria_vad_silence_sec';
|
||||||
|
|
||||||
|
// Konversations-Fenster (in Sekunden) — nach ARIA's Antwort hat der User so
|
||||||
|
// lange Zeit, im Gespraechsmodus weiter zu sprechen, ohne dass die Konversation
|
||||||
|
// beendet wird. Sprichst du im Fenster nichts → Konversation aus.
|
||||||
|
export const CONV_WINDOW_DEFAULT_SEC = 8.0;
|
||||||
|
export const CONV_WINDOW_MIN_SEC = 3.0;
|
||||||
|
export const CONV_WINDOW_MAX_SEC = 20.0;
|
||||||
|
export const CONV_WINDOW_STORAGE_KEY = 'aria_conv_window_sec';
|
||||||
|
|
||||||
|
export async function loadConvWindowMs(): Promise<number> {
|
||||||
|
try {
|
||||||
|
const raw = await AsyncStorage.getItem(CONV_WINDOW_STORAGE_KEY);
|
||||||
|
if (raw != null) {
|
||||||
|
const n = parseFloat(raw);
|
||||||
|
if (isFinite(n) && n >= CONV_WINDOW_MIN_SEC && n <= CONV_WINDOW_MAX_SEC) {
|
||||||
|
return Math.round(n * 1000);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch {}
|
||||||
|
return Math.round(CONV_WINDOW_DEFAULT_SEC * 1000);
|
||||||
|
}
|
||||||
|
|
||||||
async function loadVadSilenceMs(): Promise<number> {
|
async function loadVadSilenceMs(): Promise<number> {
|
||||||
try {
|
try {
|
||||||
const raw = await AsyncStorage.getItem(VAD_SILENCE_STORAGE_KEY);
|
const raw = await AsyncStorage.getItem(VAD_SILENCE_STORAGE_KEY);
|
||||||
|
|
@ -157,6 +178,7 @@ class AudioService {
|
||||||
private lastSpeechTime: number = 0;
|
private lastSpeechTime: number = 0;
|
||||||
private vadTimer: ReturnType<typeof setInterval> | null = null;
|
private vadTimer: ReturnType<typeof setInterval> | null = null;
|
||||||
private maxDurationTimer: ReturnType<typeof setTimeout> | null = null;
|
private maxDurationTimer: ReturnType<typeof setTimeout> | null = null;
|
||||||
|
private noSpeechTimer: ReturnType<typeof setTimeout> | null = null;
|
||||||
|
|
||||||
constructor() {
|
constructor() {
|
||||||
this.recorder = new AudioRecorderPlayer();
|
this.recorder = new AudioRecorderPlayer();
|
||||||
|
|
@ -189,8 +211,16 @@ class AudioService {
|
||||||
|
|
||||||
// --- Aufnahme ---
|
// --- Aufnahme ---
|
||||||
|
|
||||||
/** Mikrofon-Aufnahme starten */
|
/** Mikrofon-Aufnahme starten.
|
||||||
async startRecording(autoStop: boolean = false): Promise<boolean> {
|
*
|
||||||
|
* @param autoStop VAD aktivieren — Auto-Stop bei Stille
|
||||||
|
* @param noSpeechTimeoutMs Wenn der User innerhalb dieser Zeit nichts sagt,
|
||||||
|
* wird Stille gemeldet (Recording wird verworfen).
|
||||||
|
* Fuer Conversation-Window: nach ARIA's Antwort
|
||||||
|
* hast du nur N Sekunden um anzufangen, sonst
|
||||||
|
* Gespraech zu Ende.
|
||||||
|
*/
|
||||||
|
async startRecording(autoStop: boolean = false, noSpeechTimeoutMs: number = 0): Promise<boolean> {
|
||||||
if (this.recordingState !== 'idle') {
|
if (this.recordingState !== 'idle') {
|
||||||
console.warn('[Audio] Aufnahme laeuft bereits');
|
console.warn('[Audio] Aufnahme laeuft bereits');
|
||||||
return false;
|
return false;
|
||||||
|
|
@ -276,6 +306,18 @@ class AudioService {
|
||||||
}, MAX_RECORDING_MS);
|
}, MAX_RECORDING_MS);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Conversation-Window: Wenn der User innerhalb noSpeechTimeoutMs nicht
|
||||||
|
// anfaengt zu sprechen → Aufnahme abbrechen (Speech-Gate verwirft sie),
|
||||||
|
// ChatScreen erkennt das und beendet die Konversation.
|
||||||
|
if (noSpeechTimeoutMs > 0) {
|
||||||
|
this.noSpeechTimer = setTimeout(() => {
|
||||||
|
if (!this.speechDetected && this.recordingState === 'recording') {
|
||||||
|
console.log(`[Audio] Conversation-Window ${noSpeechTimeoutMs}ms ohne Sprache — Stop`);
|
||||||
|
this.silenceListeners.forEach(cb => cb());
|
||||||
|
}
|
||||||
|
}, noSpeechTimeoutMs);
|
||||||
|
}
|
||||||
|
|
||||||
console.log('[Audio] Aufnahme gestartet (autoStop: %s)', autoStop);
|
console.log('[Audio] Aufnahme gestartet (autoStop: %s)', autoStop);
|
||||||
return true;
|
return true;
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
|
|
@ -302,6 +344,10 @@ class AudioService {
|
||||||
clearTimeout(this.maxDurationTimer);
|
clearTimeout(this.maxDurationTimer);
|
||||||
this.maxDurationTimer = null;
|
this.maxDurationTimer = null;
|
||||||
}
|
}
|
||||||
|
if (this.noSpeechTimer) {
|
||||||
|
clearTimeout(this.noSpeechTimer);
|
||||||
|
this.noSpeechTimer = null;
|
||||||
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
await this.recorder.stopRecorder();
|
await this.recorder.stopRecorder();
|
||||||
|
|
|
||||||
|
|
@ -1,56 +1,92 @@
|
||||||
/**
|
/**
|
||||||
* Gespraechsmodus — "Ohr-Button"
|
* Gespraechsmodus / Wake Word Service
|
||||||
*
|
*
|
||||||
* Wenn aktiv: Nach jeder ARIA-Antwort (TTS fertig) startet automatisch die Aufnahme.
|
* Drei Zustaende:
|
||||||
* Wie ein Walkie-Talkie / natuerliches Gespraech:
|
* off — Ohr aus, nichts laeuft
|
||||||
* ARIA spricht → Aufnahme startet → User spricht → VAD stoppt → ARIA antwortet → ...
|
* armed — Ohr aktiv, wartet auf Wake Word ("ARIA"). Mikro IST AUS.
|
||||||
|
* (Sobald Porcupine integriert ist, hoert hier der Wake-Word-
|
||||||
|
* Detektor passiv mit. Aktuell ist das gleichbedeutend mit "off"
|
||||||
|
* bis der User wieder tippt — Stub fuer spaeter.)
|
||||||
|
* conversing — Wake Word getriggert / Ohr-Tap ohne Wake Word: aktive Konvers-
|
||||||
|
* ation mit ARIA. Mikro oeffnet nach jeder ARIA-Antwort fuer X
|
||||||
|
* Sekunden (Conversation-Window). Spricht der User nichts in dem
|
||||||
|
* Fenster → zurueck auf armed (kein erneuter Tap noetig sobald
|
||||||
|
* Porcupine drin ist).
|
||||||
*
|
*
|
||||||
* Phase 2 (geplant): Porcupine "ARIA" Wake Word fuer passives Lauschen.
|
* Aktuell ohne Porcupine: armed wird nur als Lifecycle-State gefuehrt; bei
|
||||||
|
* Conversation-Ende geht's direkt auf 'off' damit User klares Feedback bekommt.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
type WakeWordCallback = () => void;
|
type WakeWordCallback = () => void;
|
||||||
type StateCallback = (state: WakeWordState) => void;
|
type StateCallback = (state: WakeWordState) => void;
|
||||||
|
|
||||||
export type WakeWordState = 'off' | 'listening' | 'detected';
|
export type WakeWordState = 'off' | 'armed' | 'conversing';
|
||||||
|
|
||||||
class WakeWordService {
|
class WakeWordService {
|
||||||
private state: WakeWordState = 'off';
|
private state: WakeWordState = 'off';
|
||||||
private wakeCallbacks: WakeWordCallback[] = [];
|
private wakeCallbacks: WakeWordCallback[] = [];
|
||||||
private stateCallbacks: StateCallback[] = [];
|
private stateCallbacks: StateCallback[] = [];
|
||||||
|
private wakeWordSupported: boolean = false; // wird gesetzt wenn Porcupine spaeter integriert ist
|
||||||
|
|
||||||
/** Gespraechsmodus starten */
|
/** Ohr-Button gedrueckt — startet Konversation (oder armed wenn Wake-Word verfuegbar) */
|
||||||
async start(): Promise<boolean> {
|
async start(): Promise<boolean> {
|
||||||
if (this.state === 'listening') return true;
|
if (this.state !== 'off') return true;
|
||||||
console.log('[WakeWord] Gespraechsmodus aktiviert — starte sofort Aufnahme');
|
if (this.wakeWordSupported) {
|
||||||
this.setState('listening');
|
// Spaeter: Porcupine starten und auf "ARIA" warten
|
||||||
// Sofort erste Aufnahme starten
|
console.log('[WakeWord] armed — warte auf Wake Word');
|
||||||
setTimeout(() => {
|
this.setState('armed');
|
||||||
if (this.state === 'listening') {
|
} else {
|
||||||
this.wakeCallbacks.forEach(cb => cb());
|
// Heute: direkt in die Konversation
|
||||||
}
|
console.log('[WakeWord] Konversation startet sofort (kein Wake-Word)');
|
||||||
}, 500);
|
this.setState('conversing');
|
||||||
|
setTimeout(() => {
|
||||||
|
if (this.state === 'conversing') {
|
||||||
|
this.wakeCallbacks.forEach(cb => cb());
|
||||||
|
}
|
||||||
|
}, 500);
|
||||||
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Gespraechsmodus stoppen */
|
/** Komplett ausschalten (Ohr abschalten) */
|
||||||
stop(): void {
|
stop(): void {
|
||||||
console.log('[WakeWord] Gespraechsmodus deaktiviert');
|
console.log('[WakeWord] Ohr deaktiviert');
|
||||||
this.setState('off');
|
this.setState('off');
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Nach ARIA-Antwort (TTS fertig): Aufnahme automatisch starten */
|
/** Konversation beenden — User hat im Window nichts gesagt.
|
||||||
|
* Mit Porcupine: zurueck zu 'armed'. Ohne: zurueck zu 'off'.
|
||||||
|
*/
|
||||||
|
endConversation(): void {
|
||||||
|
if (this.state !== 'conversing') return;
|
||||||
|
if (this.wakeWordSupported) {
|
||||||
|
console.log('[WakeWord] Konversation zu Ende — zurueck zu armed (warte auf Wake Word)');
|
||||||
|
this.setState('armed');
|
||||||
|
} else {
|
||||||
|
console.log('[WakeWord] Konversation zu Ende — Ohr aus (kein Wake Word verfuegbar)');
|
||||||
|
this.setState('off');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Nach ARIA-Antwort (TTS fertig): naechste Aufnahme im Conversation-Window starten */
|
||||||
async resume(): Promise<void> {
|
async resume(): Promise<void> {
|
||||||
if (this.state !== 'listening') return;
|
if (this.state !== 'conversing') return;
|
||||||
// Kurze Pause damit TTS-Audio nicht ins Mikrofon geht
|
// Kurze Pause damit TTS-Audio nicht ins Mikrofon geht
|
||||||
await new Promise(resolve => setTimeout(resolve, 800));
|
await new Promise(resolve => setTimeout(resolve, 800));
|
||||||
if (this.state === 'listening') {
|
if (this.state === 'conversing') {
|
||||||
console.log('[WakeWord] TTS fertig — starte automatisch Aufnahme');
|
console.log('[WakeWord] TTS fertig — naechste Aufnahme im Conversation-Window');
|
||||||
this.wakeCallbacks.forEach(cb => cb());
|
this.wakeCallbacks.forEach(cb => cb());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** True solange das Ohr aktiv ist (armed ODER conversing). */
|
||||||
isActive(): boolean {
|
isActive(): boolean {
|
||||||
return this.state === 'listening';
|
return this.state !== 'off';
|
||||||
|
}
|
||||||
|
|
||||||
|
/** True wenn gerade aktiv aufgenommen / mit ARIA gesprochen wird. */
|
||||||
|
isConversing(): boolean {
|
||||||
|
return this.state === 'conversing';
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- Callbacks ---
|
// --- Callbacks ---
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue