feat: Echtzeit-Sync via SSE + Journal-basierter 3-Wege-Vergleich

Desktop-Client komplett ueberarbeitet nach Nextcloud-Vorbild:
- Persistentes SQLite-Journal (journal.rs) speichert letzten bekannten
  Stand pro Datei - ueberlebt Client-Neustarts (Hauptbug behoben).
- Engine.rs neu: 3-Wege-Vergleich Local <-> Journal <-> Server mit
  sauberer Konflikt-Kopie (inkl. Username + Zeitstempel).
- Loesch-Propagation: Lokal geloeschte Dateien landen im Server-
  Papierkorb des Owners (auch bei Freigaben). Auf dem Server
  geloeschte Dateien werden lokal entfernt.
- Lock-Flow repariert: frischer Token bei jedem Call, Fehler-Feedback.

Echtzeit-Sync:
- Backend: SSE-Endpoint /api/sync/events mit In-Memory-Broadcaster.
  Events bei Create/Update/Delete/Lock/Unlock, Zustellung an Owner
  plus alle User mit Share-Permission.
- Client: persistente SSE-Verbindung mit Auto-Reconnect. Events
  triggern sofortigen Sync (<100ms). 30s-Polling bleibt als
  Fallback fuer Netzwerk-Aussetzer.

Weitere Fixes:
- /api/sync/tree filtert is_trashed=False (Papierkorb wird nicht
  mehr an Clients gesynct).
- Web-GUI: Lock/Unlock-Buttons pro Datei, Admin darf fremde Locks
  zwangsweise loesen. Rename/Delete disabled bei fremdem Lock.
- Lock-Check im Backend bei PUT/DELETE (423 Locked Response).
- Background-Sync nur noch einmal pro Prozess gestartet, liest
  sync_paths pro Iteration neu - add/remove wirkt sofort, kein
  Client-Neustart mehr noetig.
- Watcher werden pro Sync-Pfad individuell verwaltet.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Stefan Hacker
2026-04-12 09:50:44 +02:00
parent e65d330d1d
commit 50385faa02
11 changed files with 849 additions and 448 deletions
+61 -1
View File
@@ -16,6 +16,21 @@ from app.api.auth import token_required
from app.extensions import db, bcrypt
from app.models.file import File, FilePermission, ShareLink
from app.models.file_lock import FileLock
from app.services.events import broadcaster, notify_file_change
def _share_recipients(file_obj):
"""Return a list of user ids (besides the owner) that should see changes
to this file because they have a direct share permission on it or on
any of its ancestor folders."""
ids = set()
cur = file_obj
while cur is not None:
for p in FilePermission.query.filter_by(file_id=cur.id).all():
ids.add(p.user_id)
cur = cur.parent
ids.discard(file_obj.owner_id)
return list(ids)
def _user_upload_dir(user_id):
@@ -137,6 +152,8 @@ def create_folder():
)
db.session.add(folder)
db.session.commit()
notify_file_change(folder.owner_id, folder.id, 'created',
shared_with=_share_recipients(folder))
return jsonify(folder.to_dict()), 201
@@ -228,6 +245,8 @@ def upload_file():
existing.checksum = checksum
existing.updated_at = datetime.now(timezone.utc)
db.session.commit()
notify_file_change(existing.owner_id, existing.id, 'updated',
shared_with=_share_recipients(existing))
return jsonify(existing.to_dict()), 200
file_obj = File(
@@ -242,6 +261,8 @@ def upload_file():
)
db.session.add(file_obj)
db.session.commit()
notify_file_change(file_obj.owner_id, file_obj.id, 'created',
shared_with=_share_recipients(file_obj))
return jsonify(file_obj.to_dict()), 201
@@ -306,6 +327,11 @@ def update_file(file_id):
if err:
return err
# Lock-Check: fremder Lock blockiert Aenderungen (admin kann durch)
lock = FileLock.get_lock(file_id)
if lock and lock.locked_by != user.id and user.role != 'admin':
return jsonify({'error': f'Datei ist von {lock.user.username} ausgecheckt'}), 423
data = request.get_json()
if 'name' in data:
name = data['name'].strip()
@@ -331,6 +357,8 @@ def update_file(file_id):
f.updated_at = datetime.now(timezone.utc)
db.session.commit()
notify_file_change(f.owner_id, f.id, 'updated',
shared_with=_share_recipients(f))
return jsonify(f.to_dict()), 200
@@ -346,9 +374,18 @@ def delete_file(file_id):
if not f or f.owner_id != user.id:
return jsonify({'error': 'Zugriff verweigert'}), 403
# Lock-Check
lock = FileLock.get_lock(file_id)
if lock and lock.locked_by != user.id and user.role != 'admin':
return jsonify({'error': f'Datei ist von {lock.user.username} ausgecheckt'}), 423
# Capture recipients BEFORE we detach the file from its parent tree
recipients = _share_recipients(f)
owner_id = f.owner_id
# Soft-delete: move to trash
_trash_recursive(f)
db.session.commit()
notify_file_change(owner_id, f.id, 'deleted', shared_with=recipients)
return jsonify({'message': 'In Papierkorb verschoben'}), 200
@@ -1014,6 +1051,8 @@ def lock_file(file_id):
)
db.session.add(lock)
db.session.commit()
notify_file_change(f.owner_id, f.id, 'locked',
shared_with=_share_recipients(f))
return jsonify(lock.to_dict()), 200
@@ -1031,6 +1070,10 @@ def unlock_file(file_id):
db.session.delete(lock)
db.session.commit()
f = db.session.get(File, file_id)
if f:
notify_file_change(f.owner_id, f.id, 'unlocked',
shared_with=_share_recipients(f))
return jsonify({'message': 'Datei entsperrt'}), 200
@@ -1088,7 +1131,7 @@ def sync_tree():
user = request.current_user
def _build_tree(parent_id):
files = File.query.filter_by(owner_id=user.id, parent_id=parent_id)\
files = File.query.filter_by(owner_id=user.id, parent_id=parent_id, is_trashed=False)\
.order_by(File.is_folder.desc(), File.name).all()
result = []
for f in files:
@@ -1112,6 +1155,23 @@ def sync_tree():
return jsonify({'tree': _build_tree(None)}), 200
@api_bp.route('/sync/events', methods=['GET'])
@token_required
def sync_events():
"""Server-Sent Events stream: real-time file change notifications."""
user = request.current_user
user_id = user.id
def event_stream():
yield from broadcaster.stream(user_id)
resp = Response(event_stream(), mimetype='text/event-stream')
resp.headers['Cache-Control'] = 'no-cache'
resp.headers['X-Accel-Buffering'] = 'no' # disable nginx buffering
resp.headers['Connection'] = 'keep-alive'
return resp
@api_bp.route('/sync/changes', methods=['GET'])
@token_required
def sync_changes():
+81
View File
@@ -0,0 +1,81 @@
"""In-memory event broadcaster for SSE clients.
Each logged-in user can have multiple connected clients (desktop, web,
mobile). Every client gets its own queue. Mutating file operations push
an event into the queues of every affected user.
"""
from __future__ import annotations
import json
import queue
import threading
import time
from typing import Iterable
class EventBroadcaster:
def __init__(self) -> None:
self._lock = threading.Lock()
# user_id -> list[queue.Queue]
self._subs: dict[int, list[queue.Queue]] = {}
def subscribe(self, user_id: int) -> queue.Queue:
q: queue.Queue = queue.Queue(maxsize=256)
with self._lock:
self._subs.setdefault(user_id, []).append(q)
return q
def unsubscribe(self, user_id: int, q: queue.Queue) -> None:
with self._lock:
lst = self._subs.get(user_id)
if not lst:
return
try:
lst.remove(q)
except ValueError:
pass
if not lst:
self._subs.pop(user_id, None)
def publish(self, user_ids: Iterable[int], event: dict) -> None:
payload = dict(event)
payload.setdefault('ts', time.time())
with self._lock:
for uid in set(user_ids):
for q in self._subs.get(uid, []):
try:
q.put_nowait(payload)
except queue.Full:
pass # slow client - drop event
def stream(self, user_id: int):
"""Generator yielding SSE-formatted strings for one client."""
q = self.subscribe(user_id)
try:
# Initial hello so the client knows it's connected
yield f"event: hello\ndata: {json.dumps({'user_id': user_id})}\n\n"
while True:
try:
event = q.get(timeout=20.0)
except queue.Empty:
# Heartbeat / keepalive comment - also keeps proxies happy
yield ": keepalive\n\n"
continue
kind = event.get('type', 'change')
yield f"event: {kind}\ndata: {json.dumps(event)}\n\n"
finally:
self.unsubscribe(user_id, q)
broadcaster = EventBroadcaster()
def notify_file_change(owner_id: int, file_id: int | None, change: str,
shared_with: Iterable[int] = ()) -> None:
"""Emit a file change event to the owner plus any users with share access."""
recipients = [owner_id, *shared_with]
broadcaster.publish(recipients, {
'type': 'file',
'change': change, # 'created' | 'updated' | 'deleted' | 'locked' | 'unlocked'
'file_id': file_id,
})