feat(flux): Bildgenerierung via FLUX.1-dev — flux-bridge auf Gamebox
Eigener Compose-Stack im /flux Verzeichnis (kann auf separater Maschine laufen). aria-bridge routet flux_request via RVS, ARIA referenziert das fertige PNG im Reply mit [FILE: ...]-Marker. Brain-Tool flux_generate mit Caps fuer steps/dimension. Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -18,6 +18,9 @@ from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import urllib.error
|
||||
import urllib.request
|
||||
from typing import Optional
|
||||
|
||||
from conversation import Conversation, Turn
|
||||
@@ -28,6 +31,12 @@ import skills as skills_mod
|
||||
import triggers as triggers_mod
|
||||
import watcher as watcher_mod
|
||||
|
||||
BRIDGE_URL = os.environ.get("BRIDGE_URL", "http://aria-bridge:8090")
|
||||
# FLUX-Render kann bis ~90s dauern, beim ersten Render nach Container-Start
|
||||
# laedt die flux-bridge zudem ~24 GB Modell von HF (~5-10 min). Brain wartet
|
||||
# synchron — Stefan kuendigt es vorher an wenn er weiss dass es feuert.
|
||||
FLUX_HTTP_TIMEOUT_SEC = 1200
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -215,6 +224,47 @@ META_TOOLS = [
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "flux_generate",
|
||||
"description": (
|
||||
"Generiere ein Bild aus einem Text-Prompt via FLUX.1-dev auf der Gamebox-"
|
||||
"GPU. Brauchbar fuer 'mal mir ein X', 'wie sieht ein Y aus?', "
|
||||
"Mockups, Konzept-Skizzen. Render dauert 20-90s — Stefan kuendigt "
|
||||
"es an wenn er weiss dass es laeuft.\n\n"
|
||||
"**Schreibe deine Antwort wie immer auf Deutsch**, und referenziere das "
|
||||
"fertige Bild MIT dem `[FILE: ...]`-Marker, GENAU im Pfad-Format das das "
|
||||
"Tool zurueckgibt. Beispiel:\n"
|
||||
" 'Hier dein Aquarell:\\n[FILE: /shared/uploads/aria_generated_1234.png]'\n\n"
|
||||
"Der Marker wird beim App-Renderer ausgeblendet und das Bild stattdessen "
|
||||
"inline als Anhang gezeigt.\n\n"
|
||||
"**Prompt-Sprache: bevorzugt Englisch.** FLUX versteht zwar Deutsch, "
|
||||
"liefert aber mit englischen Prompts deutlich konsistentere Ergebnisse. "
|
||||
"Uebersetze Stefans deutsche Beschreibung selbststaendig.\n\n"
|
||||
"Caps:\n"
|
||||
"- `width`/`height`: 256-1536, wird auf Vielfache von 64 gesnappt (Default 1024)\n"
|
||||
"- `steps`: 1-50 (Default 28 fuer FLUX.1-dev, 4 fuer schnell)\n"
|
||||
"- `guidance_scale`: 0.0-20.0 (Default 3.5)\n"
|
||||
"- `seed`: optional, gleicher seed + gleicher prompt → gleiches Bild"
|
||||
),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"prompt": {
|
||||
"type": "string",
|
||||
"description": "Englischer Bild-Prompt. So konkret wie moeglich (Motiv, Stil, Licht, Kamera).",
|
||||
},
|
||||
"width": {"type": "integer", "description": "Breite in px (Default 1024, max 1536)"},
|
||||
"height": {"type": "integer", "description": "Hoehe in px (Default 1024, max 1536)"},
|
||||
"steps": {"type": "integer", "description": "Inference-Steps (Default 28, max 50). Mehr = besser+langsamer."},
|
||||
"guidance_scale": {"type": "number", "description": "Wie strikt am Prompt kleben (Default 3.5)"},
|
||||
"seed": {"type": "integer", "description": "Reproduzierbarkeits-Seed (optional)"},
|
||||
},
|
||||
"required": ["prompt"],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
@@ -607,6 +657,58 @@ class Agent:
|
||||
else:
|
||||
lines.append(f"- {t['name']} ({t['type']}, {state})")
|
||||
return "\n".join(lines)
|
||||
if name == "flux_generate":
|
||||
prompt = (arguments.get("prompt") or "").strip()
|
||||
if not prompt:
|
||||
return "FEHLER: prompt ist Pflicht."
|
||||
req: dict = {"prompt": prompt}
|
||||
for key in ("width", "height", "steps", "seed"):
|
||||
if key in arguments and arguments[key] is not None:
|
||||
try:
|
||||
req[key] = int(arguments[key])
|
||||
except (TypeError, ValueError):
|
||||
pass
|
||||
if arguments.get("guidance_scale") is not None:
|
||||
try:
|
||||
req["guidance_scale"] = float(arguments["guidance_scale"])
|
||||
except (TypeError, ValueError):
|
||||
pass
|
||||
try:
|
||||
body = json.dumps(req).encode("utf-8")
|
||||
http_req = urllib.request.Request(
|
||||
f"{BRIDGE_URL}/internal/flux-generate", data=body, method="POST",
|
||||
headers={"Content-Type": "application/json"},
|
||||
)
|
||||
with urllib.request.urlopen(http_req, timeout=FLUX_HTTP_TIMEOUT_SEC) as resp:
|
||||
raw = resp.read()
|
||||
result = json.loads(raw.decode("utf-8", "ignore"))
|
||||
except urllib.error.HTTPError as exc:
|
||||
try:
|
||||
err_body = exc.read().decode("utf-8", "ignore")
|
||||
err_data = json.loads(err_body)
|
||||
err = err_data.get("error") or err_body
|
||||
except Exception:
|
||||
err = str(exc)
|
||||
return f"FEHLER (flux-bridge): {err}"
|
||||
except Exception as exc:
|
||||
logger.exception("flux_generate HTTP-Call fehlgeschlagen")
|
||||
return f"FEHLER: flux-bridge nicht erreichbar ({exc})"
|
||||
|
||||
if not result.get("ok"):
|
||||
return f"FEHLER (flux-bridge): {result.get('error', 'unbekannt')}"
|
||||
# Kompakte Rueckmeldung: Pfad + Render-Stats. Brain bettet den
|
||||
# Pfad in ihre Antwort als [FILE: ...]-Marker ein (siehe Tool-Beschreibung).
|
||||
return (
|
||||
f"OK — Bild generiert.\n"
|
||||
f"path: {result['path']}\n"
|
||||
f"size: {result.get('width','?')}x{result.get('height','?')} "
|
||||
f"({result.get('sizeBytes',0)//1024} KB)\n"
|
||||
f"steps={result.get('steps','?')} guidance={result.get('guidance','?')} "
|
||||
f"seed={result.get('seed','?')} model={result.get('model','?')}\n"
|
||||
f"renderSeconds={result.get('renderSeconds','?')}\n\n"
|
||||
f"WICHTIG: Schreibe in deiner Antwort an Stefan den Pfad EXAKT als "
|
||||
f"Marker: [FILE: {result['path']}] — dann zeigt die App das Bild inline."
|
||||
)
|
||||
if name == "memory_search":
|
||||
query = (arguments.get("query") or "").strip()
|
||||
if not query:
|
||||
|
||||
+170
-1
@@ -541,6 +541,12 @@ class ARIABridge:
|
||||
# Beeinflusst das Timeout fuer stt_request — bei "loading" warten wir laenger,
|
||||
# weil das Modell beim ersten Request noch ~1-2 Min runtergeladen werden kann.
|
||||
self._remote_stt_ready: bool = False
|
||||
# FLUX-Render-Requests die aktuell auf Antwort der flux-bridge (Gamebox) warten.
|
||||
# requestId → Future mit dem flux_response-Payload (oder None bei Fehler).
|
||||
self._pending_flux: dict[str, asyncio.Future] = {}
|
||||
# flux-bridge service_status: True wenn ready. Render-Timeouts werden
|
||||
# bei 'loading' deutlich grosszuegiger gesetzt (Modell-Download ~24 GB).
|
||||
self._remote_flux_ready: bool = False
|
||||
# User-Message-Counter fuer Auto-Compact. Bei zu langer Konversation
|
||||
# sprengt die argv-Liste beim Claude-Subprocess-Spawn (E2BIG). Bei
|
||||
# COMPACT_AFTER erreicht → Sessions reset + Container restart.
|
||||
@@ -2309,8 +2315,36 @@ class ARIABridge:
|
||||
future.set_result(text)
|
||||
return
|
||||
|
||||
elif msg_type == "flux_response":
|
||||
# Antwort der flux-bridge auf unseren flux_request. Erste Nachricht
|
||||
# mit state='rendering' ist nur Progress-Ping — die echte Antwort
|
||||
# kommt mit state='done' (oder error).
|
||||
request_id = payload.get("requestId", "")
|
||||
future = self._pending_flux.get(request_id)
|
||||
if future is None or future.done():
|
||||
return
|
||||
error = payload.get("error", "")
|
||||
if error:
|
||||
logger.warning("[rvs] flux_response Fehler: %s", error)
|
||||
future.set_result({"error": error})
|
||||
return
|
||||
state = payload.get("state", "")
|
||||
if state == "rendering":
|
||||
# Nur Progress-Info, future bleibt offen
|
||||
logger.info("[rvs] flux: rendering %dx%d steps=%d ...",
|
||||
payload.get("width", 0), payload.get("height", 0),
|
||||
payload.get("steps", 0))
|
||||
return
|
||||
# state == "done" oder fehlt → final
|
||||
logger.info("[rvs] flux fertig: %dx%d, %.1fs, %d KB",
|
||||
payload.get("width", 0), payload.get("height", 0),
|
||||
payload.get("renderSeconds", 0),
|
||||
(payload.get("sizeBytes", 0)) // 1024)
|
||||
future.set_result(payload)
|
||||
return
|
||||
|
||||
elif msg_type == "service_status":
|
||||
# Gamebox-Bridges (whisper / f5tts) melden ihren Lade-Status.
|
||||
# Gamebox-Bridges (whisper / f5tts / flux) melden ihren Lade-Status.
|
||||
# Wir nutzen das fuer den dynamischen STT-Timeout: solange whisper
|
||||
# im 'loading' steckt, geben wir der Bridge mehr Zeit (Modell-Download
|
||||
# kann 1-2 Min dauern), statt nach 45s lokal zu fallbacken.
|
||||
@@ -2321,6 +2355,11 @@ class ARIABridge:
|
||||
self._remote_stt_ready = (state == "ready")
|
||||
if self._remote_stt_ready != was_ready:
|
||||
logger.info("[rvs] whisper-bridge -> %s", state)
|
||||
elif svc == "flux":
|
||||
was_ready = self._remote_flux_ready
|
||||
self._remote_flux_ready = (state == "ready")
|
||||
if self._remote_flux_ready != was_ready:
|
||||
logger.info("[rvs] flux-bridge -> %s", state)
|
||||
return
|
||||
|
||||
elif msg_type == "config_request":
|
||||
@@ -2505,6 +2544,101 @@ class ARIABridge:
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
# ── Flux-Roundtrip: Brain → Bridge → RVS → flux-bridge → zurueck ──
|
||||
# FLUX-Render auf der 3060 dauert je nach Aufloesung/Steps 20-90 s.
|
||||
# Beim 1. Render frisch nach Container-Start muss zudem das ~24 GB
|
||||
# Modell von HF geladen werden — daher der grosse Loading-Timeout.
|
||||
_FLUX_TIMEOUT_READY_S = 240.0 # 4 min nach erstem Render
|
||||
_FLUX_TIMEOUT_LOADING_S = 900.0 # 15 min beim allerersten Mal (Modell-Download)
|
||||
|
||||
async def _flux_generate(self, prompt: str, width: int, height: int,
|
||||
steps: Optional[int], guidance: Optional[float],
|
||||
seed: Optional[int]) -> dict:
|
||||
"""Schickt einen flux_request an die flux-bridge, wartet auf das fertige
|
||||
PNG, speichert es nach /shared/uploads/aria_generated_<ts>.png.
|
||||
|
||||
Rueckgabe:
|
||||
{ok: True, path, sizeBytes, width, height, steps, guidance, seed, model, renderSeconds}
|
||||
{ok: False, error}
|
||||
"""
|
||||
if self.ws_rvs is None:
|
||||
return {"ok": False, "error": "RVS-Verbindung nicht aktiv"}
|
||||
|
||||
request_id = str(uuid.uuid4())
|
||||
loop = asyncio.get_event_loop()
|
||||
future: asyncio.Future = loop.create_future()
|
||||
self._pending_flux[request_id] = future
|
||||
|
||||
try:
|
||||
req_payload: dict = {"requestId": request_id, "prompt": prompt,
|
||||
"width": width, "height": height}
|
||||
if steps is not None:
|
||||
req_payload["steps"] = steps
|
||||
if guidance is not None:
|
||||
req_payload["guidance_scale"] = guidance
|
||||
if seed is not None:
|
||||
req_payload["seed"] = seed
|
||||
|
||||
logger.info("[rvs] flux_request → flux-bridge (id=%s, %dx%d, steps=%s, prompt=%r)",
|
||||
request_id[:8], width, height, steps, prompt[:60])
|
||||
ok = await self._send_to_rvs({
|
||||
"type": "flux_request",
|
||||
"payload": req_payload,
|
||||
"timestamp": int(time.time() * 1000),
|
||||
})
|
||||
if not ok:
|
||||
return {"ok": False, "error": "flux_request konnte nicht gesendet werden"}
|
||||
|
||||
timeout_s = (self._FLUX_TIMEOUT_READY_S
|
||||
if self._remote_flux_ready
|
||||
else self._FLUX_TIMEOUT_LOADING_S)
|
||||
result = await asyncio.wait_for(future, timeout=timeout_s)
|
||||
|
||||
if not isinstance(result, dict) or result.get("error"):
|
||||
err = (result or {}).get("error") if isinstance(result, dict) else "leeres Resultat"
|
||||
return {"ok": False, "error": err or "flux-bridge Fehler"}
|
||||
|
||||
b64 = result.get("base64") or ""
|
||||
if not b64:
|
||||
return {"ok": False, "error": "flux_response ohne Bilddaten"}
|
||||
|
||||
try:
|
||||
png_bytes = base64.b64decode(b64)
|
||||
except Exception as e:
|
||||
return {"ok": False, "error": f"PNG-Decode fehlgeschlagen: {e}"}
|
||||
|
||||
SHARED_DIR = "/shared/uploads"
|
||||
os.makedirs(SHARED_DIR, exist_ok=True)
|
||||
ts_ms = int(time.time() * 1000)
|
||||
file_name = f"aria_generated_{ts_ms}.png"
|
||||
path = os.path.join(SHARED_DIR, file_name)
|
||||
try:
|
||||
with open(path, "wb") as f:
|
||||
f.write(png_bytes)
|
||||
except Exception as e:
|
||||
return {"ok": False, "error": f"Speichern fehlgeschlagen: {e}"}
|
||||
|
||||
logger.info("[rvs] flux PNG gespeichert: %s (%d KB)", path, len(png_bytes) // 1024)
|
||||
return {
|
||||
"ok": True,
|
||||
"path": path,
|
||||
"sizeBytes": len(png_bytes),
|
||||
"width": result.get("width", width),
|
||||
"height": result.get("height", height),
|
||||
"steps": result.get("steps"),
|
||||
"guidance": result.get("guidance"),
|
||||
"seed": result.get("seed"),
|
||||
"model": result.get("model", ""),
|
||||
"renderSeconds": result.get("renderSeconds", 0),
|
||||
}
|
||||
except asyncio.TimeoutError:
|
||||
return {"ok": False, "error": f"Render-Timeout ({int(timeout_s)}s) — flux-bridge offline?"}
|
||||
except Exception as e:
|
||||
logger.exception("[rvs] _flux_generate Fehler")
|
||||
return {"ok": False, "error": str(e)[:200]}
|
||||
finally:
|
||||
self._pending_flux.pop(request_id, None)
|
||||
|
||||
async def _send_to_rvs(self, message: dict) -> bool:
|
||||
"""Sendet eine Nachricht an die App (via RVS) mit Verbindungs-Check.
|
||||
|
||||
@@ -2735,6 +2869,41 @@ class ARIABridge:
|
||||
# selbst wenn derselbe Name zweimal in Folge kommt.
|
||||
asyncio.create_task(self._emit_activity("tool", tool, force=True))
|
||||
await _send_response(writer, 200, {"ok": True})
|
||||
elif method == "POST" and path == "/internal/flux-generate":
|
||||
# Vom Brain (flux_generate-Tool) gefeuert. Wir routen den
|
||||
# Render-Request via RVS an die flux-bridge (Gamebox),
|
||||
# warten synchron auf die PNG-Antwort, speichern sie nach
|
||||
# /shared/uploads/ und melden Pfad + Render-Stats zurueck.
|
||||
# Brain referenziert das Bild dann mit [FILE:]-Marker in
|
||||
# seiner Antwort, die Bridge broadcastet daraufhin
|
||||
# automatisch ein file_from_aria-Event an App+Diagnostic.
|
||||
try:
|
||||
data = json.loads(body.decode("utf-8", "ignore"))
|
||||
except Exception as exc:
|
||||
await _send_response(writer, 400, {"error": f"bad json: {exc}"})
|
||||
return
|
||||
prompt = (data.get("prompt") or "").strip()
|
||||
if not prompt:
|
||||
await _send_response(writer, 400, {"error": "prompt erforderlich"})
|
||||
return
|
||||
try:
|
||||
width = int(data.get("width") or 1024)
|
||||
height = int(data.get("height") or 1024)
|
||||
except (TypeError, ValueError):
|
||||
width, height = 1024, 1024
|
||||
steps_raw = data.get("steps")
|
||||
guidance_raw = data.get("guidance_scale")
|
||||
seed_raw = data.get("seed")
|
||||
steps = int(steps_raw) if isinstance(steps_raw, (int, float)) else None
|
||||
guidance = float(guidance_raw) if isinstance(guidance_raw, (int, float)) else None
|
||||
seed = int(seed_raw) if isinstance(seed_raw, (int, float)) else None
|
||||
|
||||
result = await self._flux_generate(
|
||||
prompt=prompt, width=width, height=height,
|
||||
steps=steps, guidance=guidance, seed=seed,
|
||||
)
|
||||
status = 200 if result.get("ok") else 502
|
||||
await _send_response(writer, status, result)
|
||||
elif method == "POST" and path == "/internal/delete-chat-message":
|
||||
try:
|
||||
data = json.loads(body.decode("utf-8", "ignore"))
|
||||
|
||||
@@ -2123,7 +2123,7 @@
|
||||
// Liste neu aufbauen
|
||||
list.innerHTML = '';
|
||||
let anyLoading = false, anyError = false;
|
||||
const labels = { f5tts: 'F5-TTS', whisper: 'Whisper STT' };
|
||||
const labels = { f5tts: 'F5-TTS', whisper: 'Whisper STT', flux: 'FLUX Image-Gen' };
|
||||
for (const [s, info] of Object.entries(_serviceState)) {
|
||||
const row = document.createElement('div');
|
||||
row.style.cssText = 'display:flex;align-items:center;gap:6px;';
|
||||
|
||||
@@ -0,0 +1,180 @@
|
||||
# FLUX.1-dev Bildgenerierung — Architektur & Stand
|
||||
|
||||
Ergaenzung des ARIA-Agent-Stacks um native Text-to-Image-Generierung via
|
||||
FLUX.1-dev auf der Gamebox. Folgt dem **gleichen Pattern wie f5tts / whisper**:
|
||||
ein eigener Container auf dem Gaming-PC, der sich selbst per WebSocket zum
|
||||
RVS verbindet und auf seinen Request-Typ lauscht.
|
||||
|
||||
## Pipeline
|
||||
|
||||
```
|
||||
Stefan / App
|
||||
│ Chat-Nachricht ("mal mir einen Sonnenuntergang ueberm Hangar")
|
||||
▼
|
||||
aria-bridge ── send_to_core ──▶ aria-brain
|
||||
│ chooses tool: flux_generate(prompt=..., width=..., ...)
|
||||
│ POST /internal/flux-generate
|
||||
▼
|
||||
aria-bridge (VM)
|
||||
│ pushes {type: "flux_request",
|
||||
│ payload: {requestId, prompt, ...}}
|
||||
│ via RVS-Broadcast
|
||||
▼
|
||||
RVS
|
||||
│ fanout
|
||||
▼
|
||||
flux-bridge (Gamebox)
|
||||
│ FluxPipeline.from_pretrained(...)
|
||||
│ pipeline(prompt, width, height, steps, guidance).images[0]
|
||||
│ PIL → PNG → base64
|
||||
│ {type: "flux_response", payload: {state:"done",
|
||||
│ requestId, base64, mimeType, ...}}
|
||||
▼
|
||||
RVS
|
||||
│
|
||||
▼
|
||||
aria-bridge (VM)
|
||||
│ _pending_flux[requestId].set_result(payload)
|
||||
│ base64-decode → /shared/uploads/aria_generated_<ts>.png
|
||||
│ HTTP 200 zurueck an Brain mit {path, sizeBytes, ...}
|
||||
▼
|
||||
aria-brain
|
||||
│ Tool-Result + Hint: "schreib [FILE: {path}] in deine Antwort"
|
||||
│ Final-Reply: "Hier dein Bild:\n[FILE: /shared/uploads/aria_generated_<ts>.png]"
|
||||
▼
|
||||
aria-bridge
|
||||
│ _FILE_MARKER_RE → file_from_aria-Event
|
||||
│ Marker bleibt im Chat-Text fuer Hist; App rendert das Bild inline
|
||||
▼
|
||||
App + Diagnostic
|
||||
```
|
||||
|
||||
## Komponenten
|
||||
|
||||
### 1. `flux/bridge.py` (neu) — flux-bridge Container
|
||||
|
||||
- `FluxPipeline` (diffusers) mit `enable_model_cpu_offload()` als Default,
|
||||
damit FLUX.1-dev (~24 GB on disk, ~12 B params) auf einer RTX 3060
|
||||
(12 GB VRAM) ueberhaupt laeuft.
|
||||
- Lazy-Load: Modell wird beim ersten `flux_request` (oder im Initial-Load)
|
||||
geladen, `service_status: "flux", state: "loading" | "ready" | "error"`
|
||||
wird via RVS broadcastet → Diagnostic-Badge zeigt's an.
|
||||
- Single-Worker-Queue (`_flux_queue`) — GPU darf nicht parallel rendern,
|
||||
sonst OOM oder Crash.
|
||||
- Progress-Ping: `flux_response {state: "rendering"}` direkt nach
|
||||
Queue-Pickup, damit die aria-bridge weiss "Auftrag angekommen", auch
|
||||
wenn der eigentliche Render 60s braucht.
|
||||
- Caps:
|
||||
- `width`/`height`: 256 .. `FLUX_MAX_DIM` (Default 1536), gesnappt auf
|
||||
Vielfache von 64.
|
||||
- `steps`: 1 .. `FLUX_MAX_STEPS` (Default 50).
|
||||
- `guidance_scale`: 0.0 .. 20.0.
|
||||
- `prompt`: max 2000 chars.
|
||||
- Env-Switches:
|
||||
- `FLUX_MODEL` — Default `black-forest-labs/FLUX.1-dev` (non-commercial).
|
||||
Alt: `FLUX.1-schnell` (Apache-2.0, 4 Steps, deutlich schneller).
|
||||
- `FLUX_OFFLOAD` — `model` (default), `sequential` (sparsamer, langsamer)
|
||||
oder `none` (alles auf GPU; nur fuer >=24 GB VRAM-Karten).
|
||||
- `FLUX_DTYPE` — `bfloat16` (default) oder `float16`.
|
||||
- `HF_TOKEN` — FLUX.1-dev braucht HuggingFace-Login.
|
||||
|
||||
### 2. `flux/docker-compose.yml` — eigener Stack
|
||||
|
||||
Bewusst NICHT mit in `xtts/docker-compose.yml` gepackt: FLUX kann auch
|
||||
separat laufen (z.B. spaeter auf einer 4090, waehrend die 3060 weiter
|
||||
TTS+STT bedient). Eigener Compose, eigene `.env.example`, eigenes
|
||||
`hf-cache/`-Volume.
|
||||
|
||||
- GPU-Reservation analog zu f5tts/whisper.
|
||||
- Volume `./hf-cache:/root/.cache/huggingface` — wenn flux auf der
|
||||
gleichen Maschine wie xtts laeuft kann man `../xtts/hf-cache`
|
||||
symlinken, dann ist der Modell-Cache geteilt.
|
||||
- Restart `unless-stopped`.
|
||||
|
||||
### 3. `rvs/server.js` — Allowlist erweitert
|
||||
|
||||
Neue Typen: `flux_request`, `flux_response` (auch wenn das Initial-Load-
|
||||
broadcast `service_status` bereits zugelassen war).
|
||||
|
||||
### 4. `bridge/aria_bridge.py`
|
||||
|
||||
- `self._pending_flux: dict[str, asyncio.Future]` — request_id → future.
|
||||
- `self._remote_flux_ready: bool` — wird von `service_status` Updates
|
||||
gefuellt; steuert den HTTP-Timeout (240 s wenn ready, 900 s waehrend
|
||||
des allerersten Modell-Downloads).
|
||||
- `flux_response`-Handler: Progress-Ping (`state == "rendering"`) bleibt
|
||||
no-op auf der Future; `state == "done"` setzt die Future, Error setzt
|
||||
`{"error": ...}`.
|
||||
- `_flux_generate(prompt, width, height, steps, guidance, seed)` — Helper:
|
||||
1. UUID + Future
|
||||
2. `flux_request` broadcasten
|
||||
3. `asyncio.wait_for(future, timeout=...)`
|
||||
4. base64 → `/shared/uploads/aria_generated_<ts>.png`
|
||||
5. dict mit `{ok, path, sizeBytes, width, height, steps, guidance, seed, model, renderSeconds}`
|
||||
- HTTP-Endpoint `POST /internal/flux-generate` im internen Listener
|
||||
(Port 8090). Validiert prompt + clamps, ruft `_flux_generate`, gibt
|
||||
Result als JSON zurueck.
|
||||
|
||||
### 5. `aria-brain/agent.py` — META-Tool `flux_generate`
|
||||
|
||||
```jsonc
|
||||
{
|
||||
"name": "flux_generate",
|
||||
"parameters": {
|
||||
"prompt": "string (englischer Prompt — FLUX liefert auf EN besser)",
|
||||
"width": "integer (256..1536, default 1024)",
|
||||
"height": "integer (256..1536, default 1024)",
|
||||
"steps": "integer (1..50, default 28)",
|
||||
"guidance_scale": "number (default 3.5)",
|
||||
"seed": "integer (optional)"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Dispatcher:
|
||||
- POSTet `{prompt, width, height, steps, guidance_scale, seed}` an
|
||||
`http://aria-bridge:8090/internal/flux-generate` (urllib, 1200 s Timeout
|
||||
— der erste Render kann den 24 GB Modell-Download triggern).
|
||||
- Bei `ok=true` gibt das Tool den **Pfad** + Render-Stats zurueck und
|
||||
weist Claude explizit an: *"Schreibe `[FILE: <path>]` in deine
|
||||
Antwort an Stefan, dann zeigt die App das Bild inline."*
|
||||
- Brain ueberlegt sich den Begleittext selber und packt den Marker an
|
||||
passende Stelle.
|
||||
|
||||
### 6. `diagnostic/index.html` — Status-Badge
|
||||
|
||||
Label `flux: 'FLUX Image-Gen'` zum bestehenden `updateServiceStatus()`-Switch
|
||||
hinzugefuegt — kein neuer Code, gleicher Banner-Mechanismus wie F5-TTS /
|
||||
Whisper.
|
||||
|
||||
## File-Lifecycle
|
||||
|
||||
Generierte Bilder leben unter `/shared/uploads/aria_generated_<ts>.png`
|
||||
(gleicher Folder wie User-Uploads). Damit:
|
||||
- `[FILE: ...]`-Marker funktioniert (Bridge erlaubt nur Pfade unter
|
||||
`/shared/uploads/`).
|
||||
- File-Manager-Endpoints in Diagnostic (Liste/Loeschen/Zip) sehen sie
|
||||
ohne Sonderbehandlung.
|
||||
- Memory-Anhaenge: ARIA kann ein generiertes Bild im selben Turn an
|
||||
einen Memory-Eintrag haengen (`memory_save(attach_paths=[path])`).
|
||||
|
||||
## Bekannte Stolpersteine
|
||||
|
||||
- **HF-Login**: FLUX.1-dev ist gated. Vor erstem Start `HF_TOKEN` im
|
||||
`.env` setzen oder im Container `huggingface-cli login` machen, sonst
|
||||
403 beim ersten Download.
|
||||
- **Erster Render dauert lang**: 24 GB Modell laden + CUDA-Warmup → 5-10
|
||||
min realistisch. Brain-HTTP-Timeout ist 1200 s, RVS-Future-Timeout
|
||||
900 s (loading-Modus). Stefan sollte beim ersten "Mal mir was"-Request
|
||||
ein bisschen Geduld haben — danach sind Renders ~30-90 s.
|
||||
- **Lizenz**: FLUX.1-dev ist *non-commercial* (FLUX.1 Dev Non-Commercial
|
||||
License). Fuer kommerzielle Nutzung muss man auf `FLUX.1-schnell`
|
||||
(Apache-2.0) oder `FLUX.1-pro` (API only) wechseln. Stefan kann das
|
||||
ueber `FLUX_MODEL` in der `.env` umstellen.
|
||||
- **VRAM**: 12 GB (3060) reichen NUR mit `enable_model_cpu_offload`. Bei
|
||||
Out-of-Memory in den Logs auf `FLUX_OFFLOAD=sequential` switchen
|
||||
(deutlich langsamer, aber peak-VRAM ~6 GB).
|
||||
- **Parallele Calls**: Single-Worker-Queue in der flux-bridge — ein
|
||||
zweiter `flux_generate`-Tool-Call von Brain wartet, bis der erste fertig
|
||||
ist. In der Praxis kein Problem, weil Stefan eh nicht zwei Bilder
|
||||
gleichzeitig macht.
|
||||
@@ -0,0 +1,36 @@
|
||||
# ════════════════════════════════════════════════
|
||||
# ARIA FLUX-Bridge — Konfiguration
|
||||
# Kopieren nach .env und anpassen
|
||||
# ════════════════════════════════════════════════
|
||||
|
||||
# RVS Verbindung (gleiche Daten wie auf der ARIA-VM / xtts/.env)
|
||||
RVS_HOST=mobil.hacker-net.de
|
||||
RVS_PORT=444
|
||||
RVS_TLS=true
|
||||
RVS_TLS_FALLBACK=true
|
||||
RVS_TOKEN=dein_token_hier
|
||||
|
||||
# HuggingFace-Token — FLUX.1-dev ist gated (auf
|
||||
# https://huggingface.co/black-forest-labs/FLUX.1-dev "Agree" klicken,
|
||||
# dann unter https://huggingface.co/settings/tokens ein "Read"-Token
|
||||
# erzeugen). Fuer FLUX.1-schnell nicht noetig.
|
||||
HF_TOKEN=
|
||||
|
||||
# Modell:
|
||||
# black-forest-labs/FLUX.1-dev (Default, ~24 GB, non-commercial)
|
||||
# black-forest-labs/FLUX.1-schnell (4 Steps, Apache-2.0, schneller)
|
||||
FLUX_MODEL=black-forest-labs/FLUX.1-dev
|
||||
|
||||
# Offloading-Strategie (VRAM-Steuerung):
|
||||
# model — Default. Komponentenweise CPU-Offload, gut fuer 12 GB Karten.
|
||||
# sequential — sparsamer (Peak ~6 GB), aber 2-3x langsamer.
|
||||
# none — alles auf GPU. Nur fuer >= 24 GB VRAM-Karten.
|
||||
FLUX_OFFLOAD=model
|
||||
|
||||
# Float-Type. bfloat16 ist FLUX-native; auf alten Karten ohne BF16-Support
|
||||
# auf float16 wechseln.
|
||||
FLUX_DTYPE=bfloat16
|
||||
|
||||
# Hard-Caps gegen versehentlich teure Renders
|
||||
FLUX_MAX_STEPS=50
|
||||
FLUX_MAX_DIM=1536
|
||||
@@ -0,0 +1,5 @@
|
||||
# HuggingFace Model-Cache (FLUX.1-dev ~24 GB on disk)
|
||||
hf-cache/
|
||||
|
||||
# Docker .env
|
||||
.env
|
||||
@@ -0,0 +1,23 @@
|
||||
FROM nvidia/cuda:12.2.2-cudnn8-runtime-ubuntu22.04
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
python3 python3-pip git \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# PyTorch CUDA-Wheels zuerst, damit diffusers nicht CPU-Torch zieht.
|
||||
# Versionsmatrix wie bei f5tts gehalten (cu121, Torch 2.3.1) — gleicher
|
||||
# Treiber-Footprint, gleicher HF-Cache-Pfad.
|
||||
RUN pip3 install --no-cache-dir torch==2.3.1 \
|
||||
--index-url https://download.pytorch.org/whl/cu121
|
||||
|
||||
COPY requirements.txt .
|
||||
RUN pip3 install --no-cache-dir -r requirements.txt
|
||||
|
||||
COPY bridge.py .
|
||||
|
||||
CMD ["python3", "bridge.py"]
|
||||
+394
@@ -0,0 +1,394 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
ARIA FLUX-Bridge — laeuft auf der Gamebox (RTX 3060).
|
||||
|
||||
Empfaengt flux_request via RVS → FLUX.1-dev/-schnell auf GPU → sendet
|
||||
flux_response mit base64-PNG zurueck an die aria-bridge. Diese speichert
|
||||
die Datei nach /shared/uploads/ und ARIA referenziert sie mit
|
||||
[FILE: ...]-Marker in ihrer Antwort.
|
||||
|
||||
12 GB VRAM auf der 3060 reichen fuer FLUX.1-dev nur mit
|
||||
`enable_model_cpu_offload()` — sonst OOM. Setze FLUX_OFFLOAD=sequential
|
||||
fuer Maximal-Sparsamkeit (langsamer) oder FLUX_OFFLOAD=none wenn die
|
||||
GPU genug VRAM hat (z.B. spaeter 4090).
|
||||
|
||||
Env:
|
||||
RVS_HOST, RVS_PORT, RVS_TLS, RVS_TLS_FALLBACK, RVS_TOKEN
|
||||
FLUX_MODEL Default: black-forest-labs/FLUX.1-dev
|
||||
Alt: black-forest-labs/FLUX.1-schnell (4-Step, Apache-2.0)
|
||||
FLUX_DEVICE Default: cuda
|
||||
FLUX_DTYPE Default: bfloat16 (alt: float16)
|
||||
FLUX_OFFLOAD Default: model (alt: sequential | none)
|
||||
FLUX_MAX_STEPS Default: 50
|
||||
FLUX_MAX_DIM Default: 1536
|
||||
"""
|
||||
import asyncio
|
||||
import base64
|
||||
import io
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import uuid
|
||||
from typing import Optional
|
||||
|
||||
import websockets
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format="%(asctime)s [%(levelname)s] %(message)s",
|
||||
datefmt="%H:%M:%S",
|
||||
)
|
||||
logger = logging.getLogger("flux-bridge")
|
||||
# HuggingFace/Torch download-Logs daempfen
|
||||
logging.getLogger("httpx").setLevel(logging.WARNING)
|
||||
logging.getLogger("urllib3").setLevel(logging.WARNING)
|
||||
|
||||
RVS_HOST = os.getenv("RVS_HOST", "").strip()
|
||||
RVS_PORT = int(os.getenv("RVS_PORT", "443"))
|
||||
RVS_TLS = os.getenv("RVS_TLS", "true").lower() == "true"
|
||||
RVS_TLS_FALLBACK = os.getenv("RVS_TLS_FALLBACK", "true").lower() == "true"
|
||||
RVS_TOKEN = os.getenv("RVS_TOKEN", "").strip()
|
||||
|
||||
FLUX_MODEL = os.getenv("FLUX_MODEL", "black-forest-labs/FLUX.1-dev").strip()
|
||||
FLUX_DEVICE = os.getenv("FLUX_DEVICE", "cuda").strip()
|
||||
FLUX_DTYPE = os.getenv("FLUX_DTYPE", "bfloat16").strip().lower()
|
||||
FLUX_OFFLOAD = os.getenv("FLUX_OFFLOAD", "model").strip().lower()
|
||||
FLUX_MAX_STEPS = int(os.getenv("FLUX_MAX_STEPS", "50"))
|
||||
FLUX_MAX_DIM = int(os.getenv("FLUX_MAX_DIM", "1536"))
|
||||
|
||||
# FLUX-dev native: guidance=3.5, steps=28. FLUX-schnell: guidance=0.0, steps=4.
|
||||
DEFAULT_STEPS_DEV = 28
|
||||
DEFAULT_STEPS_SCHNELL = 4
|
||||
DEFAULT_GUIDANCE_DEV = 3.5
|
||||
DEFAULT_GUIDANCE_SCHNELL = 0.0
|
||||
|
||||
|
||||
def _is_schnell(model_id: str) -> bool:
|
||||
return "schnell" in model_id.lower()
|
||||
|
||||
|
||||
def _torch_dtype():
|
||||
"""Lazy-resolve damit Torch erst beim Modell-Laden importiert wird."""
|
||||
import torch
|
||||
return {"bfloat16": torch.bfloat16, "float16": torch.float16, "float32": torch.float32}\
|
||||
.get(FLUX_DTYPE, torch.bfloat16)
|
||||
|
||||
|
||||
def _snap_dim(v: int, default: int = 1024) -> int:
|
||||
"""FLUX braucht Multiples von 16 (sicher: 64). Clamp + Snap."""
|
||||
try:
|
||||
n = int(v)
|
||||
except (TypeError, ValueError):
|
||||
n = default
|
||||
n = max(256, min(FLUX_MAX_DIM, n))
|
||||
# Auf naechstes Vielfaches von 64 abrunden
|
||||
n = (n // 64) * 64
|
||||
return max(256, n)
|
||||
|
||||
|
||||
class FluxRunner:
|
||||
"""Haelt die FLUX-Pipeline. Synthese laeuft im Executor (blocking).
|
||||
|
||||
GPU ist die Engstelle — wir serialisieren via Queue im Caller, hier
|
||||
nur Single-Lock fuer load. Ein Render auf der 3060 dauert je nach
|
||||
Steps/Aufloesung 20-90 s.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.pipe = None
|
||||
self._lock = asyncio.Lock()
|
||||
self.model_id: str = FLUX_MODEL
|
||||
self.last_load_seconds: float = 0.0
|
||||
|
||||
def _load_blocking(self) -> None:
|
||||
import torch
|
||||
from diffusers import FluxPipeline
|
||||
|
||||
logger.info("Lade FLUX '%s' (dtype=%s, offload=%s)...",
|
||||
self.model_id, FLUX_DTYPE, FLUX_OFFLOAD)
|
||||
t0 = time.time()
|
||||
pipe = FluxPipeline.from_pretrained(self.model_id, torch_dtype=_torch_dtype())
|
||||
|
||||
if FLUX_OFFLOAD == "sequential":
|
||||
pipe.enable_sequential_cpu_offload()
|
||||
elif FLUX_OFFLOAD == "none":
|
||||
pipe.to(FLUX_DEVICE)
|
||||
else: # "model" — default, Sweet-Spot fuer 12 GB Karten
|
||||
pipe.enable_model_cpu_offload()
|
||||
|
||||
# VAE-Tiling spart VRAM bei grossen Bildern (>1024)
|
||||
try:
|
||||
pipe.vae.enable_tiling()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
self.pipe = pipe
|
||||
self.last_load_seconds = time.time() - t0
|
||||
logger.info("FLUX geladen in %.1fs", self.last_load_seconds)
|
||||
# CUDA-Cache nach dem Load aufraeumen
|
||||
try:
|
||||
torch.cuda.empty_cache()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
async def ensure_loaded(self) -> None:
|
||||
async with self._lock:
|
||||
if self.pipe is not None:
|
||||
return
|
||||
loop = asyncio.get_event_loop()
|
||||
await loop.run_in_executor(None, self._load_blocking)
|
||||
|
||||
def _generate_blocking(self, prompt: str, width: int, height: int,
|
||||
steps: int, guidance: float, seed: Optional[int]) -> bytes:
|
||||
import torch
|
||||
gen = None
|
||||
if seed is not None and seed >= 0:
|
||||
gen = torch.Generator(device=FLUX_DEVICE).manual_seed(int(seed))
|
||||
|
||||
logger.info("Render: %dx%d, steps=%d, guidance=%.2f, seed=%s, prompt=%r",
|
||||
width, height, steps, guidance, seed, prompt[:80])
|
||||
out = self.pipe(
|
||||
prompt=prompt,
|
||||
width=width,
|
||||
height=height,
|
||||
num_inference_steps=steps,
|
||||
guidance_scale=guidance,
|
||||
generator=gen,
|
||||
)
|
||||
image = out.images[0]
|
||||
buf = io.BytesIO()
|
||||
image.save(buf, format="PNG", optimize=True)
|
||||
png_bytes = buf.getvalue()
|
||||
# VRAM zurueckgeben fuer den naechsten Render
|
||||
try:
|
||||
torch.cuda.empty_cache()
|
||||
except Exception:
|
||||
pass
|
||||
return png_bytes
|
||||
|
||||
async def generate(self, prompt: str, width: int, height: int,
|
||||
steps: int, guidance: float, seed: Optional[int]) -> bytes:
|
||||
await self.ensure_loaded()
|
||||
loop = asyncio.get_event_loop()
|
||||
return await loop.run_in_executor(
|
||||
None, self._generate_blocking, prompt, width, height, steps, guidance, seed,
|
||||
)
|
||||
|
||||
|
||||
# ── Helpers ─────────────────────────────────────────────────
|
||||
|
||||
|
||||
async def _send(ws, mtype: str, payload: dict) -> None:
|
||||
try:
|
||||
await ws.send(json.dumps({
|
||||
"type": mtype,
|
||||
"payload": payload,
|
||||
"timestamp": int(time.time() * 1000),
|
||||
}))
|
||||
except Exception as e:
|
||||
logger.warning("Send fehlgeschlagen (%s): %s", mtype, e)
|
||||
|
||||
|
||||
async def _broadcast_status(ws, state: str, **extra) -> None:
|
||||
"""Sendet service_status fuer das Flux-Modul.
|
||||
state: 'loading' | 'ready' | 'error'."""
|
||||
payload = {"service": "flux", "state": state}
|
||||
payload.update(extra)
|
||||
await _send(ws, "service_status", payload)
|
||||
|
||||
|
||||
# ── Flux-Request Queue ──────────────────────────────────────
|
||||
|
||||
# Eine GPU, ein Render gleichzeitig. Parallele Requests OOM-en sonst.
|
||||
_flux_queue: "asyncio.Queue[tuple]" = asyncio.Queue()
|
||||
|
||||
|
||||
def _resolve_request(payload: dict, runner: FluxRunner) -> tuple[str, int, int, int, float, Optional[int]]:
|
||||
"""Liest Felder aus dem flux_request payload + clampt auf Caps."""
|
||||
prompt = (payload.get("prompt") or "").strip()
|
||||
if not prompt:
|
||||
raise ValueError("prompt fehlt")
|
||||
if len(prompt) > 2000:
|
||||
prompt = prompt[:2000]
|
||||
|
||||
width = _snap_dim(payload.get("width", 1024))
|
||||
height = _snap_dim(payload.get("height", 1024))
|
||||
|
||||
schnell = _is_schnell(runner.model_id)
|
||||
default_steps = DEFAULT_STEPS_SCHNELL if schnell else DEFAULT_STEPS_DEV
|
||||
default_guidance = DEFAULT_GUIDANCE_SCHNELL if schnell else DEFAULT_GUIDANCE_DEV
|
||||
|
||||
try:
|
||||
steps = int(payload.get("steps", default_steps))
|
||||
except (TypeError, ValueError):
|
||||
steps = default_steps
|
||||
steps = max(1, min(FLUX_MAX_STEPS, steps))
|
||||
|
||||
try:
|
||||
guidance = float(payload.get("guidance_scale", default_guidance))
|
||||
except (TypeError, ValueError):
|
||||
guidance = default_guidance
|
||||
if not (0.0 <= guidance <= 20.0):
|
||||
guidance = default_guidance
|
||||
|
||||
seed = payload.get("seed")
|
||||
if seed is not None:
|
||||
try:
|
||||
seed = int(seed)
|
||||
except (TypeError, ValueError):
|
||||
seed = None
|
||||
|
||||
return prompt, width, height, steps, guidance, seed
|
||||
|
||||
|
||||
async def _flux_worker(ws, runner: FluxRunner) -> None:
|
||||
"""Serialisiert Renders — eine GPU, ein Bild gleichzeitig."""
|
||||
while True:
|
||||
payload = await _flux_queue.get()
|
||||
request_id = payload.get("requestId") or str(uuid.uuid4())
|
||||
try:
|
||||
await _do_render(ws, runner, payload, request_id)
|
||||
except Exception:
|
||||
logger.exception("Flux-Worker Fehler")
|
||||
await _send(ws, "flux_response", {
|
||||
"requestId": request_id,
|
||||
"error": "internal error",
|
||||
})
|
||||
finally:
|
||||
_flux_queue.task_done()
|
||||
|
||||
|
||||
async def _do_render(ws, runner: FluxRunner, payload: dict, request_id: str) -> None:
|
||||
t0 = time.time()
|
||||
try:
|
||||
prompt, width, height, steps, guidance, seed = _resolve_request(payload, runner)
|
||||
except ValueError as e:
|
||||
logger.warning("flux_request invalid: %s", e)
|
||||
await _send(ws, "flux_response", {"requestId": request_id, "error": str(e)})
|
||||
return
|
||||
|
||||
# Progress-Ping: User soll sehen dass was passiert (Render >30s realistisch)
|
||||
await _send(ws, "flux_response", {
|
||||
"requestId": request_id,
|
||||
"state": "rendering",
|
||||
"width": width, "height": height, "steps": steps,
|
||||
})
|
||||
|
||||
try:
|
||||
png = await runner.generate(prompt, width, height, steps, guidance, seed)
|
||||
except Exception as e:
|
||||
logger.exception("FLUX Render-Fehler")
|
||||
await _send(ws, "flux_response", {"requestId": request_id, "error": str(e)[:200]})
|
||||
return
|
||||
|
||||
dt = time.time() - t0
|
||||
b64 = base64.b64encode(png).decode("ascii")
|
||||
logger.info("Render fertig: %dx%d, %d KB PNG, %.1fs", width, height, len(png) // 1024, dt)
|
||||
|
||||
await _send(ws, "flux_response", {
|
||||
"requestId": request_id,
|
||||
"state": "done",
|
||||
"base64": b64,
|
||||
"mimeType": "image/png",
|
||||
"width": width,
|
||||
"height": height,
|
||||
"steps": steps,
|
||||
"guidance": guidance,
|
||||
"seed": seed,
|
||||
"model": runner.model_id,
|
||||
"renderSeconds": round(dt, 2),
|
||||
"sizeBytes": len(png),
|
||||
})
|
||||
|
||||
|
||||
# ── Haupt-Loop ──────────────────────────────────────────────
|
||||
|
||||
|
||||
async def run_loop(runner: FluxRunner) -> None:
|
||||
use_tls = RVS_TLS
|
||||
retry_s = 2
|
||||
tls_fallback_tried = False
|
||||
|
||||
while True:
|
||||
scheme = "wss" if use_tls else "ws"
|
||||
url = f"{scheme}://{RVS_HOST}:{RVS_PORT}/ws?token={RVS_TOKEN}"
|
||||
masked = url.replace(RVS_TOKEN, "***") if RVS_TOKEN else url
|
||||
|
||||
try:
|
||||
logger.info("Verbinde zu RVS: %s", masked)
|
||||
# max_size 100 MB damit ein 4 MP PNG (~5-10 MB → ~13 MB base64)
|
||||
# locker reinpasst. Mit dem RVS-Limit (100 MB) konsistent.
|
||||
async with websockets.connect(url, ping_interval=20, ping_timeout=10,
|
||||
max_size=100 * 1024 * 1024) as ws:
|
||||
logger.info("RVS verbunden")
|
||||
retry_s = 2
|
||||
tls_fallback_tried = False
|
||||
|
||||
async def _load_with_status():
|
||||
try:
|
||||
if runner.pipe is not None:
|
||||
logger.info("Initial: broadcaste ready (Pipeline schon im RAM: %s)",
|
||||
runner.model_id)
|
||||
await _broadcast_status(ws, "ready",
|
||||
model=runner.model_id,
|
||||
loadSeconds=runner.last_load_seconds)
|
||||
else:
|
||||
logger.info("Initial: broadcaste loading + lade '%s'", runner.model_id)
|
||||
await _broadcast_status(ws, "loading", model=runner.model_id)
|
||||
await runner.ensure_loaded()
|
||||
await _broadcast_status(ws, "ready",
|
||||
model=runner.model_id,
|
||||
loadSeconds=runner.last_load_seconds)
|
||||
except Exception as e:
|
||||
logger.exception("Initial-Load crashed: %s", e)
|
||||
try:
|
||||
await _broadcast_status(ws, "error", error=str(e)[:200])
|
||||
except Exception:
|
||||
pass
|
||||
asyncio.create_task(_load_with_status())
|
||||
|
||||
worker = asyncio.create_task(_flux_worker(ws, runner))
|
||||
|
||||
try:
|
||||
async for raw in ws:
|
||||
try:
|
||||
msg = json.loads(raw)
|
||||
except Exception:
|
||||
continue
|
||||
mtype = msg.get("type", "")
|
||||
payload = msg.get("payload", {}) or {}
|
||||
|
||||
if mtype == "flux_request":
|
||||
await _flux_queue.put(payload)
|
||||
finally:
|
||||
worker.cancel()
|
||||
try:
|
||||
await worker
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
except Exception as e:
|
||||
logger.warning("Verbindung verloren: %s", e)
|
||||
if use_tls and RVS_TLS_FALLBACK and not tls_fallback_tried:
|
||||
logger.info("TLS fehlgeschlagen — Fallback auf ws://")
|
||||
use_tls = False
|
||||
tls_fallback_tried = True
|
||||
continue
|
||||
await asyncio.sleep(min(retry_s, 30))
|
||||
retry_s = min(retry_s * 2, 30)
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
if not RVS_HOST:
|
||||
logger.error("RVS_HOST nicht gesetzt — Abbruch")
|
||||
sys.exit(1)
|
||||
runner = FluxRunner()
|
||||
await run_loop(runner)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
asyncio.run(main())
|
||||
except KeyboardInterrupt:
|
||||
sys.exit(0)
|
||||
@@ -0,0 +1,60 @@
|
||||
# ════════════════════════════════════════════════
|
||||
# ARIA FLUX-Bridge — Text-to-Image (GPU)
|
||||
# Eigener Stack, weil FLUX auch auf einer anderen
|
||||
# Maschine als f5tts/whisper laufen kann (z.B. 4090
|
||||
# separat vom Gaming-PC). Verbindet sich selbst per
|
||||
# WebSocket zum RVS und lauscht auf flux_request.
|
||||
# ════════════════════════════════════════════════
|
||||
#
|
||||
# Voraussetzungen:
|
||||
# - NVIDIA-GPU mit >= 12 GB VRAM (3060 reicht mit
|
||||
# enable_model_cpu_offload). Bei < 12 GB:
|
||||
# FLUX_OFFLOAD=sequential setzen, sonst OOM.
|
||||
# - Docker mit NVIDIA Container Toolkit
|
||||
# - HuggingFace-Token in .env (FLUX.1-dev ist gated)
|
||||
# - .env mit RVS-Verbindungsdaten (gleiche wie xtts!)
|
||||
#
|
||||
# Start: docker compose up -d
|
||||
# ════════════════════════════════════════════════
|
||||
|
||||
services:
|
||||
|
||||
# ─── FLUX.1-dev Bildgenerierung (GPU) ─────────
|
||||
# Empfaengt flux_request via RVS, rendert PNG mit FLUX.1-dev (12B Params)
|
||||
# und broadcastet flux_response mit base64-PNG zurueck. aria-bridge speichert
|
||||
# die Datei nach /shared/uploads/ und ARIA referenziert sie via [FILE:]-Marker.
|
||||
#
|
||||
# Modell-Wahl per FLUX_MODEL:
|
||||
# - black-forest-labs/FLUX.1-dev (Default, 28 Steps, non-commercial)
|
||||
# - black-forest-labs/FLUX.1-schnell (4 Steps, Apache-2.0, schneller)
|
||||
# HuggingFace-Token noetig fuer FLUX.1-dev — vorher `huggingface-cli login`
|
||||
# oder HF_TOKEN in .env setzen, sonst 403 beim ersten Download.
|
||||
flux-bridge:
|
||||
build: .
|
||||
container_name: aria-flux-bridge
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: 1
|
||||
capabilities: [gpu]
|
||||
environment:
|
||||
- RVS_HOST=${RVS_HOST}
|
||||
- RVS_PORT=${RVS_PORT:-443}
|
||||
- RVS_TLS=${RVS_TLS:-true}
|
||||
- RVS_TLS_FALLBACK=${RVS_TLS_FALLBACK:-true}
|
||||
- RVS_TOKEN=${RVS_TOKEN}
|
||||
- FLUX_MODEL=${FLUX_MODEL:-black-forest-labs/FLUX.1-dev}
|
||||
- FLUX_DEVICE=${FLUX_DEVICE:-cuda}
|
||||
- FLUX_DTYPE=${FLUX_DTYPE:-bfloat16}
|
||||
- FLUX_OFFLOAD=${FLUX_OFFLOAD:-model}
|
||||
- FLUX_MAX_STEPS=${FLUX_MAX_STEPS:-50}
|
||||
- FLUX_MAX_DIM=${FLUX_MAX_DIM:-1536}
|
||||
- HF_TOKEN=${HF_TOKEN:-} # FLUX.1-dev braucht Login-Token
|
||||
volumes:
|
||||
- ./hf-cache:/root/.cache/huggingface # Bind-Mount. FLUX.1-dev ~24 GB on disk!
|
||||
# Wenn flux auf der gleichen Maschine
|
||||
# wie xtts laeuft: ../xtts/hf-cache
|
||||
# symlinken um den Cache zu teilen.
|
||||
restart: unless-stopped
|
||||
@@ -0,0 +1,9 @@
|
||||
diffusers>=0.30.0
|
||||
transformers>=4.43.0
|
||||
accelerate>=0.33.0
|
||||
sentencepiece>=0.2.0
|
||||
protobuf>=4.25.0
|
||||
pillow>=10.0.0
|
||||
huggingface_hub>=0.24.0
|
||||
websockets>=12.0
|
||||
numpy>=1.24
|
||||
@@ -39,6 +39,7 @@ const ALLOWED_TYPES = new Set([
|
||||
"stt_request", "stt_response",
|
||||
"service_status",
|
||||
"config_request",
|
||||
"flux_request", "flux_response",
|
||||
]);
|
||||
|
||||
// Token-Raum: token -> { clients: Set<ws> }
|
||||
|
||||
@@ -2,6 +2,9 @@
|
||||
# ARIA Gamebox Stack — GPU F5-TTS + Whisper STT
|
||||
# Laeuft auf dem Gaming-PC (RTX 3060)
|
||||
# Verbindet sich zum RVS fuer TTS/STT-Requests
|
||||
#
|
||||
# FLUX-Bildgenerierung liegt im /flux Verzeichnis im Repo-Root —
|
||||
# eigener Compose-Stack, kann auch auf einer anderen Maschine laufen.
|
||||
# ════════════════════════════════════════════════
|
||||
#
|
||||
# Voraussetzungen:
|
||||
|
||||
Reference in New Issue
Block a user