diff --git a/diagnostic/index.html b/diagnostic/index.html
index 33577f7..690770d 100644
--- a/diagnostic/index.html
+++ b/diagnostic/index.html
@@ -127,6 +127,15 @@
+
+
@@ -914,6 +923,11 @@
return;
}
+ if (msg.type === 'service_status') {
+ updateServiceStatus(msg.payload || {});
+ return;
+ }
+
if (msg.type === 'voice_ready') {
const v = msg.payload?.voice || '';
const err = msg.payload?.error;
@@ -1452,6 +1466,68 @@
'Glob': '\uD83D\uDCC1 Dateien suchen',
'Agent': '\uD83E\uDD16 Sub-Agent',
};
+ // ── Service-Status Banner (Gamebox: F5-TTS / Whisper Lade-Status) ──
+ // Aggregiert die Status-Infos der Bridges. Wenn irgendwas am Laden
+ // ist, zeigt das Banner unten rechts. Sobald alles auf 'ready' ist,
+ // bleibt's einen Moment und wird dann vom User weggeklickt (oder
+ // nach 8s automatisch).
+ const _serviceState = {}; // { f5tts: {state, model, ...}, whisper: {...} }
+ let _serviceFadeTimer = null;
+ function updateServiceStatus(p) {
+ const svc = p.service || '?';
+ _serviceState[svc] = p;
+
+ const banner = document.getElementById('service-status-banner');
+ const list = document.getElementById('service-status-list');
+ const icon = document.getElementById('service-status-icon');
+ const closeBtn = document.getElementById('service-status-close');
+
+ // Liste neu aufbauen
+ list.innerHTML = '';
+ let anyLoading = false, anyError = false;
+ const labels = { f5tts: 'F5-TTS', whisper: 'Whisper STT' };
+ for (const [s, info] of Object.entries(_serviceState)) {
+ const row = document.createElement('div');
+ row.style.cssText = 'display:flex;align-items:center;gap:6px;';
+ let dot = '⚫', color = '#666680', text = '';
+ if (info.state === 'loading') {
+ dot = '⏳'; color = '#FFD60A'; anyLoading = true;
+ text = `${labels[s] || s}: laedt${info.model ? ' ' + info.model : ''}...`;
+ } else if (info.state === 'ready') {
+ dot = '✅'; color = '#34C759';
+ const sec = info.loadSeconds ? ` (${info.loadSeconds.toFixed(1)}s)` : '';
+ text = `${labels[s] || s}: bereit${info.model ? ' ' + info.model : ''}${sec}`;
+ } else if (info.state === 'error') {
+ dot = '❌'; color = '#FF3B30'; anyError = true;
+ text = `${labels[s] || s}: Fehler ${info.error || ''}`;
+ } else {
+ text = `${labels[s] || s}: ${info.state}`;
+ }
+ row.innerHTML = `${dot}${text}`;
+ list.appendChild(row);
+ }
+
+ // Icon spiegelt Gesamt-Status
+ if (anyError) icon.innerHTML = '❌';
+ else if (anyLoading) icon.innerHTML = '⏳';
+ else icon.innerHTML = '✅';
+
+ banner.style.display = 'block';
+
+ // Wenn alles ready (kein Loading, kein Error): X-Button anzeigen
+ // + nach 8s automatisch wegfaden
+ if (!anyLoading && !anyError) {
+ closeBtn.style.display = 'block';
+ clearTimeout(_serviceFadeTimer);
+ _serviceFadeTimer = setTimeout(() => {
+ banner.style.display = 'none';
+ }, 8000);
+ } else {
+ closeBtn.style.display = 'none';
+ clearTimeout(_serviceFadeTimer);
+ }
+ }
+
function updateThinkingIndicator(msg) {
const indicators = [
document.getElementById('thinking-indicator'),
diff --git a/diagnostic/server.js b/diagnostic/server.js
index a780680..ffd6b17 100644
--- a/diagnostic/server.js
+++ b/diagnostic/server.js
@@ -637,6 +637,22 @@ function connectRVS(forcePlain) {
log("info", "rvs", `Voice "${v || "default"}" geladen${ms ? ` in ${(ms/1000).toFixed(1)}s` : ""}`);
}
broadcast({ type: "voice_ready", payload: msg.payload });
+ } else if (msg.type === "service_status") {
+ // Gamebox-Bridges (f5tts/whisper) melden ihren Lade-Status —
+ // an Browser durchreichen fuer das Banner unten rechts
+ const svc = msg.payload?.service || "?";
+ const state = msg.payload?.state || "?";
+ const model = msg.payload?.model || "";
+ const sec = msg.payload?.loadSeconds;
+ const err = msg.payload?.error;
+ if (err) {
+ log("warn", "rvs", `service_status ${svc}: ${err}`);
+ } else if (state === "ready" && sec) {
+ log("info", "rvs", `service_status ${svc} ready (${model}, ${sec.toFixed(1)}s)`);
+ } else {
+ log("info", "rvs", `service_status ${svc} ${state}${model ? ` (${model})` : ""}`);
+ }
+ broadcast({ type: "service_status", payload: msg.payload });
} else {
log("debug", "rvs", `Nachricht: ${JSON.stringify(msg).slice(0, 150)}`);
}
diff --git a/rvs/server.js b/rvs/server.js
index a72d146..255ca7b 100644
--- a/rvs/server.js
+++ b/rvs/server.js
@@ -21,6 +21,7 @@ const ALLOWED_TYPES = new Set([
"xtts_delete_voice",
"voice_preload", "voice_ready",
"stt_request", "stt_response",
+ "service_status",
]);
// Token-Raum: token -> { clients: Set }
diff --git a/xtts/.gitignore b/xtts/.gitignore
index 8678e47..fc4c60d 100644
--- a/xtts/.gitignore
+++ b/xtts/.gitignore
@@ -1,7 +1,3 @@
-# HuggingFace Model-Cache (geteilt zwischen f5tts + whisper bridge,
-# wird via Bind-Mount in die Container reingehaengt)
-hf-cache/
-
# Voice-Samples (lokal, gehoert nicht ins Repo)
voices/
diff --git a/xtts/docker-compose.yml b/xtts/docker-compose.yml
index bea7587..7f31f92 100644
--- a/xtts/docker-compose.yml
+++ b/xtts/docker-compose.yml
@@ -31,11 +31,11 @@ services:
capabilities: [gpu]
volumes:
- ./voices:/voices # WAV + TXT Referenz
- - ./hf-cache:/root/.cache/huggingface # HF-Cache als Bind-Mount.
- # Direkt sichtbar im xtts/hf-cache/,
- # einfach zu loeschen, kein Docker-
- # Desktop .vhdx Bloat.
- # Wird mit whisper-bridge geteilt.
+ # KEIN HF-Cache-Mount mehr —
+ # Modell wird beim Start neu
+ # gezogen. Diagnostic zeigt
+ # "TTS laedt..." Banner bis
+ # service_status: ready kommt.
environment:
# Bootstrap-only — alle anderen F5-TTS-Settings (Modell, cfg_strength,
# nfe_step, Custom-Checkpoint) kommen ueber Diagnostic via RVS-config.
@@ -77,6 +77,6 @@ services:
- WHISPER_DEVICE=${WHISPER_DEVICE:-cuda}
- WHISPER_COMPUTE_TYPE=${WHISPER_COMPUTE_TYPE:-float16}
- WHISPER_LANGUAGE=${WHISPER_LANGUAGE:-de}
- volumes:
- - ./hf-cache:/root/.cache/huggingface # gleicher Cache wie f5tts-bridge
+ # KEIN HF-Cache-Mount — Whisper-Modell wird beim Start neu gezogen.
+ # Wechsel via Diagnostic triggert ebenso Re-Download.
restart: unless-stopped
diff --git a/xtts/f5tts/bridge.py b/xtts/f5tts/bridge.py
index 7ae4cee..ffd8fe0 100644
--- a/xtts/f5tts/bridge.py
+++ b/xtts/f5tts/bridge.py
@@ -110,20 +110,26 @@ class F5Runner:
self.vocab_file: str = DEFAULT_F5TTS_VOCAB_FILE
self.cfg_strength: float = DEFAULT_F5TTS_CFG_STRENGTH
self.nfe_step: int = DEFAULT_F5TTS_NFE_STEP
+ # Last load-time fuer service_status Broadcast
+ self.last_load_seconds: float = 0.0
+ self._load_started_at: float = 0.0
def _load_blocking(self) -> None:
cls = _get_f5tts_cls()
logger.info("Lade F5-TTS '%s' (device=%s, ckpt=%s)...",
self.model_id, F5TTS_DEVICE, self.ckpt_file or "default")
- t0 = time.time()
+ self._load_started_at = time.time()
kwargs = {"model": self.model_id, "device": F5TTS_DEVICE}
if self.ckpt_file:
kwargs["ckpt_file"] = self.ckpt_file
if self.vocab_file:
kwargs["vocab_file"] = self.vocab_file
self.model = cls(**kwargs)
+ elapsed = time.time() - self._load_started_at
logger.info("F5-TTS geladen in %.1fs (cfg_strength=%.1f, nfe=%d)",
- time.time() - t0, self.cfg_strength, self.nfe_step)
+ elapsed, self.cfg_strength, self.nfe_step)
+ # Wird von outside (run_loop) gelesen um service_status auf 'ready' zu setzen
+ self.last_load_seconds = elapsed
async def ensure_loaded(self) -> None:
async with self._lock:
@@ -580,10 +586,15 @@ async def handle_voice_preload(ws, payload: dict, runner: F5Runner) -> None:
# ── Haupt-Loop ──────────────────────────────────────────────
-async def run_loop(runner: F5Runner) -> None:
- # Preload im Hintergrund starten damit der Startup nicht blockiert
- asyncio.create_task(runner.ensure_loaded())
+async def _broadcast_status(ws, state: str, **extra) -> None:
+ """Sendet service_status fuer das F5-TTS Modul.
+ state: 'loading' | 'ready' | 'error'."""
+ payload = {"service": "f5tts", "state": state}
+ payload.update(extra)
+ await _send(ws, "service_status", payload)
+
+async def run_loop(runner: F5Runner) -> None:
use_tls = RVS_TLS
retry_s = 2
tls_fallback_tried = False
@@ -601,6 +612,25 @@ async def run_loop(runner: F5Runner) -> None:
retry_s = 2
tls_fallback_tried = False
+ # Status-Broadcast: erst loading, dann ready nach erfolgreichem Load.
+ # Modell wird hier (nicht ausserhalb der Schleife) gestartet damit
+ # der Loading-Status auch wirklich uebertragen werden kann.
+ async def _load_with_status():
+ if runner.model is not None:
+ await _broadcast_status(ws, "ready",
+ model=runner.model_id,
+ loadSeconds=runner.last_load_seconds)
+ return
+ await _broadcast_status(ws, "loading", model=runner.model_id)
+ try:
+ await runner.ensure_loaded()
+ await _broadcast_status(ws, "ready",
+ model=runner.model_id,
+ loadSeconds=runner.last_load_seconds)
+ except Exception as e:
+ await _broadcast_status(ws, "error", error=str(e)[:200])
+ asyncio.create_task(_load_with_status())
+
# TTS-Worker fuer diese Verbindung starten
worker = asyncio.create_task(_tts_worker(ws, runner))
@@ -640,7 +670,26 @@ async def run_loop(runner: F5Runner) -> None:
fut.set_result(payload.get("text") or "")
elif mtype == "config":
# F5-TTS-Settings aktualisieren (Modell, cfg_strength, nfe)
- asyncio.create_task(runner.update_config(payload))
+ async def _update_with_status(p):
+ # Schaut ob ein Modell-Wechsel ansteht — falls ja:
+ # erst loading-Status, dann update, dann ready.
+ old_model = (runner.model_id, runner.ckpt_file, runner.vocab_file)
+ new_model_id = (p.get("f5ttsModel") or runner.model_id,
+ p.get("f5ttsCkptFile", runner.ckpt_file) or "",
+ p.get("f5ttsVocabFile", runner.vocab_file) or "")
+ will_reload = old_model != new_model_id
+ if will_reload:
+ await _broadcast_status(ws, "loading", model=new_model_id[0])
+ try:
+ await runner.update_config(p)
+ if will_reload:
+ await _broadcast_status(ws, "ready",
+ model=runner.model_id,
+ loadSeconds=runner.last_load_seconds)
+ except Exception as e:
+ if will_reload:
+ await _broadcast_status(ws, "error", error=str(e)[:200])
+ asyncio.create_task(_update_with_status(payload))
# Voice-Preload bei Wechsel
v = (payload.get("xttsVoice") or "").strip()
if v and v != _last_diag_voice:
diff --git a/xtts/whisper/bridge.py b/xtts/whisper/bridge.py
index 69128b1..9210062 100644
--- a/xtts/whisper/bridge.py
+++ b/xtts/whisper/bridge.py
@@ -184,13 +184,15 @@ async def handle_stt_request(ws, payload: dict, runner: WhisperRunner) -> None:
})
-async def run_loop(runner: WhisperRunner) -> None:
- # Modell vorab laden damit erste Anfrage flott ist
- try:
- await runner.ensure_loaded(WHISPER_MODEL)
- except Exception as e:
- logger.error("Preload fehlgeschlagen: %s — Fortsetzung, wird bei erstem Request nachgeladen", e)
+async def _broadcast_status(ws, state: str, **extra) -> None:
+ """Sendet service_status fuer das Whisper-Modul.
+ state: 'loading' | 'ready' | 'error'."""
+ payload = {"service": "whisper", "state": state}
+ payload.update(extra)
+ await _send(ws, "service_status", payload)
+
+async def run_loop(runner: WhisperRunner) -> None:
use_tls = RVS_TLS
retry_s = 2
tls_fallback_tried = False
@@ -205,6 +207,24 @@ async def run_loop(runner: WhisperRunner) -> None:
logger.info("RVS verbunden")
retry_s = 2
tls_fallback_tried = False
+
+ # Modell laden, dabei loading→ready broadcasten
+ async def _load_with_status():
+ if runner.model is not None:
+ await _broadcast_status(ws, "ready", model=runner.model_size)
+ return
+ await _broadcast_status(ws, "loading", model=WHISPER_MODEL)
+ try:
+ t0 = time.time()
+ await runner.ensure_loaded(WHISPER_MODEL)
+ elapsed = time.time() - t0
+ await _broadcast_status(ws, "ready",
+ model=runner.model_size,
+ loadSeconds=elapsed)
+ except Exception as e:
+ await _broadcast_status(ws, "error", error=str(e)[:200])
+ asyncio.create_task(_load_with_status())
+
async for raw in ws:
try:
msg = json.loads(raw)
@@ -222,8 +242,19 @@ async def run_loop(runner: WhisperRunner) -> None:
elif mtype == "config":
new_model = payload.get("whisperModel")
if new_model and new_model != runner.model_size:
- logger.info("Config-Broadcast: Whisper-Modell → %s", new_model)
- asyncio.create_task(runner.ensure_loaded(new_model))
+ logger.info("Config-Broadcast: Whisper-Modell -> %s", new_model)
+ async def _swap_with_status(target):
+ await _broadcast_status(ws, "loading", model=target)
+ try:
+ t0 = time.time()
+ await runner.ensure_loaded(target)
+ elapsed = time.time() - t0
+ await _broadcast_status(ws, "ready",
+ model=runner.model_size,
+ loadSeconds=elapsed)
+ except Exception as e:
+ await _broadcast_status(ws, "error", error=str(e)[:200])
+ asyncio.create_task(_swap_with_status(new_model))
else:
# Alle anderen Nachrichten debug-loggen — hilft beim Diagnostizieren,
# ob stt_request ueberhaupt durch den RVS kommt