feat: Echtzeit-Sync via SSE + Journal-basierter 3-Wege-Vergleich

Desktop-Client komplett ueberarbeitet nach Nextcloud-Vorbild:
- Persistentes SQLite-Journal (journal.rs) speichert letzten bekannten
  Stand pro Datei - ueberlebt Client-Neustarts (Hauptbug behoben).
- Engine.rs neu: 3-Wege-Vergleich Local <-> Journal <-> Server mit
  sauberer Konflikt-Kopie (inkl. Username + Zeitstempel).
- Loesch-Propagation: Lokal geloeschte Dateien landen im Server-
  Papierkorb des Owners (auch bei Freigaben). Auf dem Server
  geloeschte Dateien werden lokal entfernt.
- Lock-Flow repariert: frischer Token bei jedem Call, Fehler-Feedback.

Echtzeit-Sync:
- Backend: SSE-Endpoint /api/sync/events mit In-Memory-Broadcaster.
  Events bei Create/Update/Delete/Lock/Unlock, Zustellung an Owner
  plus alle User mit Share-Permission.
- Client: persistente SSE-Verbindung mit Auto-Reconnect. Events
  triggern sofortigen Sync (<100ms). 30s-Polling bleibt als
  Fallback fuer Netzwerk-Aussetzer.

Weitere Fixes:
- /api/sync/tree filtert is_trashed=False (Papierkorb wird nicht
  mehr an Clients gesynct).
- Web-GUI: Lock/Unlock-Buttons pro Datei, Admin darf fremde Locks
  zwangsweise loesen. Rename/Delete disabled bei fremdem Lock.
- Lock-Check im Backend bei PUT/DELETE (423 Locked Response).
- Background-Sync nur noch einmal pro Prozess gestartet, liest
  sync_paths pro Iteration neu - add/remove wirkt sofort, kein
  Client-Neustart mehr noetig.
- Watcher werden pro Sync-Pfad individuell verwaltet.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Stefan Hacker
2026-04-12 09:50:44 +02:00
parent e65d330d1d
commit 50385faa02
11 changed files with 849 additions and 448 deletions
+14
View File
@@ -241,6 +241,20 @@ impl MiniCloudApi {
Ok(())
}
pub async fn delete_file(&self, file_id: i64) -> Result<(), String> {
let url = format!("{}/api/files/{}", self.server_url, file_id);
let resp = self.client.delete(&url)
.header("Authorization", self.auth_header())
.send()
.await
.map_err(|e| format!("Delete Fehler: {}", e))?;
if !resp.status().is_success() {
let text = resp.text().await.unwrap_or_default();
return Err(format!("Delete fehlgeschlagen: {}", text));
}
Ok(())
}
pub async fn heartbeat(&self, file_id: i64) -> Result<(), String> {
let url = format!("{}/api/files/{}/heartbeat", self.server_url, file_id);
self.client.post(&url)
+374 -417
View File
@@ -1,27 +1,28 @@
use crate::sync::api::{FileEntry, MiniCloudApi};
use crate::sync::journal::{Journal, JournalEntry};
use sha2::{Digest, Sha256};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::sync::Arc;
/// A configured sync path: maps a server folder to a local folder
/// A configured sync path: maps a server folder to a local folder.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SyncPath {
pub id: String, // unique ID
pub server_path: String, // e.g. "/" (root) or "/Projekte/2026"
pub server_folder_id: Option<i64>, // server folder ID (None = root)
pub local_dir: String, // local directory path
pub mode: SyncMode, // virtual or full
pub id: String,
pub server_path: String,
pub server_folder_id: Option<i64>,
pub local_dir: String,
pub mode: SyncMode,
pub enabled: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum SyncMode {
Virtual, // .cloud placeholder files, download on demand
Full, // full sync, all files downloaded
Virtual,
Full,
}
/// Cloud placeholder file content (small JSON inside .cloud files)
/// `.cloud` placeholder content (JSON payload of the 0-byte-ish placeholder).
#[derive(Debug, Serialize, Deserialize)]
struct CloudPlaceholder {
id: i64,
@@ -35,484 +36,440 @@ struct CloudPlaceholder {
pub struct SyncEngine {
pub api: MiniCloudApi,
pub sync_paths: Vec<SyncPath>,
last_sync: Option<String>,
/// Checksums from last sync - used to detect who changed a file
/// Key: file path (relative), Value: server checksum at last sync
known_checksums: HashMap<String, String>,
pub journal: Arc<Journal>,
pub username: String,
}
impl SyncEngine {
pub fn new(api: MiniCloudApi) -> Self {
Self { api, sync_paths: Vec::new(), last_sync: None, known_checksums: HashMap::new() }
pub fn new(api: MiniCloudApi, journal: Arc<Journal>, username: String) -> Self {
Self { api, sync_paths: Vec::new(), journal, username }
}
/// Sync all configured paths
/// Sync every configured path.
pub async fn sync_all(&mut self) -> Result<Vec<String>, String> {
let mut all_logs = Vec::new();
let mut log = Vec::new();
let tree = self.api.get_sync_tree().await?;
let sync_paths = self.sync_paths.clone();
for sp in &sync_paths {
if !sp.enabled { continue; }
let local_dir = PathBuf::from(&sp.local_dir);
std::fs::create_dir_all(&local_dir).ok();
// Find the server subtree for this sync path
let subtree = if sp.server_folder_id.is_some() {
find_subtree(&tree, sp.server_folder_id.unwrap())
} else {
Some(tree.clone())
let subtree = match sp.server_folder_id {
Some(id) => find_subtree(&tree, id).unwrap_or_default(),
None => tree.clone(),
};
if let Some(entries) = subtree {
let mut log = Vec::new();
match sp.mode {
SyncMode::Virtual => {
self.sync_virtual(&entries, &local_dir, &sp.server_path, &mut log).await;
// Also upload new local files (not on server yet)
self.sync_upload_new(&entries, &local_dir, sp.server_folder_id, &mut log).await;
}
SyncMode::Full => {
self.sync_full_download(&entries, &local_dir, &mut log).await;
self.sync_full_upload(&entries, &local_dir, sp.server_folder_id, &mut log).await;
}
}
all_logs.extend(log);
}
}
// Phase 1: propagate deletions based on journal history.
self.detect_deletions(sp, &subtree, &local_dir, &mut log).await;
self.last_sync = Some(chrono::Utc::now().to_rfc3339());
Ok(all_logs)
// Phase 2: normal sync (downloads, uploads, conflicts).
self.sync_dir(&subtree, &local_dir, "", sp.server_folder_id, sp, &mut log).await;
}
Ok(log)
}
/// Virtual sync: create .cloud placeholder files
async fn sync_virtual(&mut self, entries: &[FileEntry], local_dir: &Path,
server_path: &str, log: &mut Vec<String>) {
for entry in entries {
let local_path = local_dir.join(&entry.name);
/// Walks the journal for this sync path and reconciles existence:
/// - file was in journal and is gone locally but still on server -> delete on server
/// - file was in journal and is gone on server but still local -> delete locally
/// - file is gone on both sides -> clean journal entry
async fn detect_deletions(
&self,
sp: &SyncPath,
subtree: &[FileEntry],
local_root: &Path,
log: &mut Vec<String>,
) {
use std::collections::HashMap;
let mut server_files: HashMap<String, i64> = HashMap::new();
collect_server_files(subtree, "", &mut server_files);
for je in self.journal.list_for_sync(&sp.id) {
let local_real = local_root.join(&je.relative_path);
let local_cloud = {
let parent = local_real.parent().map(|p| p.to_path_buf());
let fname = local_real.file_name().map(|n| n.to_string_lossy().to_string());
match (parent, fname) {
(Some(p), Some(n)) => p.join(format!("{}.cloud", n)),
_ => PathBuf::new(),
}
};
let local_exists = local_real.exists() || local_cloud.exists();
let server_id = server_files.get(&je.relative_path).copied();
match (local_exists, server_id) {
(true, Some(_)) => { /* present on both sides - normal sync handles it */ }
(false, None) => {
let _ = self.journal.delete(&sp.id, &je.relative_path);
}
(false, Some(id)) => {
match self.api.delete_file(id).await {
Ok(_) => {
log.push(format!("Server-Papierkorb: {}", je.relative_path));
let _ = self.journal.delete(&sp.id, &je.relative_path);
}
Err(e) => log.push(format!("Server-Delete-Fehler {}: {}", je.relative_path, e)),
}
}
(true, None) => {
std::fs::remove_file(&local_real).ok();
std::fs::remove_file(&local_cloud).ok();
let _ = self.journal.delete(&sp.id, &je.relative_path);
log.push(format!("Lokal geloescht: {}", je.relative_path));
}
}
}
}
/// Recursively sync a single directory level.
/// `rel_prefix` is the journal-relative path prefix (e.g. "", or "sub/dir/").
async fn sync_dir(
&mut self,
server_entries: &[FileEntry],
local_dir: &Path,
rel_prefix: &str,
parent_id: Option<i64>,
sp: &SyncPath,
log: &mut Vec<String>,
) {
use std::collections::HashMap;
let server_by_name: HashMap<String, &FileEntry> = server_entries
.iter().map(|e| (e.name.clone(), e)).collect();
// --- Pass 1: iterate server entries, reconcile each against local/journal ---
for entry in server_entries {
let rel = if rel_prefix.is_empty() {
entry.name.clone()
} else {
format!("{}/{}", rel_prefix, entry.name)
};
if entry.is_folder {
std::fs::create_dir_all(&local_path).ok();
let sub_local = local_dir.join(&entry.name);
std::fs::create_dir_all(&sub_local).ok();
if let Some(children) = &entry.children {
let sub_path = format!("{}/{}", server_path.trim_end_matches('/'), entry.name);
Box::pin(self.sync_virtual(children, &local_path, &sub_path, log)).await;
Box::pin(self.sync_dir(children, &sub_local, &rel, Some(entry.id), sp, log)).await;
}
} else {
// Check if real file exists (manually downloaded or offline-marked)
if local_path.exists() {
let local_hash = compute_file_hash(&local_path);
let server_hash = entry.checksum.as_deref().unwrap_or("");
let file_key = format!("{}/{}", server_path, entry.name);
if local_hash != server_hash {
if entry.locked.unwrap_or(false) {
log.push(format!("Zurueckgehalten (gesperrt): {}", entry.name));
continue;
}
// Check if WE changed the file locally
let last_known = self.known_checksums.get(&file_key);
let local_changed = match last_known {
Some(known) => local_hash != *known, // local differs from last sync
None => false, // first sync, don't assume local changed
};
let server_changed = match last_known {
Some(known) => server_hash != known, // server differs from last sync
None => true, // first sync, trust server
};
if server_changed && !local_changed {
// Only server changed -> download
match self.api.download_file(entry.id, &local_path).await {
Ok(_) => log.push(format!("Server->Lokal: {}", entry.name)),
Err(e) => log.push(format!("Download-Fehler {}: {}", entry.name, e)),
}
} else if local_changed && !server_changed {
// Only local changed -> upload
match self.api.upload_file(&local_path, None).await {
Ok(_) => log.push(format!("Lokal->Server: {}", entry.name)),
Err(e) => log.push(format!("Upload-Fehler {}: {}", entry.name, e)),
}
} else {
// Both changed -> conflict! Download server, keep local as conflict copy
let conflict_name = format!("{} (Konflikt).{}",
local_path.file_stem().unwrap().to_string_lossy(),
local_path.extension().map(|e| e.to_string_lossy().to_string()).unwrap_or_default());
let conflict_path = local_path.parent().unwrap().join(&conflict_name);
std::fs::rename(&local_path, &conflict_path).ok();
match self.api.download_file(entry.id, &local_path).await {
Ok(_) => log.push(format!("KONFLIKT: {} (lokale Kopie: {})", entry.name, conflict_name)),
Err(e) => log.push(format!("Download-Fehler {}: {}", entry.name, e)),
}
}
}
// Track current server checksum
self.known_checksums.insert(file_key, server_hash.to_string());
continue;
}
// Create or update .cloud placeholder
let cloud_path = local_dir.join(format!("{}.cloud", entry.name));
let needs_update = if cloud_path.exists() {
// Check if server version changed
if let Ok(content) = std::fs::read_to_string(&cloud_path) {
if let Ok(old) = serde_json::from_str::<CloudPlaceholder>(&content) {
old.checksum != entry.checksum.as_deref().unwrap_or("")
} else { true }
} else { true }
} else { true };
if needs_update {
let placeholder = CloudPlaceholder {
id: entry.id,
name: entry.name.clone(),
size: entry.size.unwrap_or(0),
checksum: entry.checksum.clone().unwrap_or_default(),
updated_at: entry.updated_at.clone().unwrap_or_default(),
server_path: format!("{}/{}", server_path.trim_end_matches('/'), entry.name),
};
if let Ok(json) = serde_json::to_string_pretty(&placeholder) {
std::fs::write(&cloud_path, json).ok();
if !cloud_path.exists() {
log.push(format!("Platzhalter: {}.cloud", entry.name));
}
}
}
}
}
// Remove .cloud files for deleted server files
if let Ok(dir_entries) = std::fs::read_dir(local_dir) {
for entry in dir_entries.flatten() {
let name = entry.file_name().to_string_lossy().to_string();
if name.ends_with(".cloud") {
let real_name = name.trim_end_matches(".cloud");
let exists_on_server = entries.iter().any(|e| e.name == real_name);
if !exists_on_server {
std::fs::remove_file(entry.path()).ok();
log.push(format!("Entfernt: {}", name));
}
}
}
}
}
/// Upload new local files that don't exist on server yet (for both Virtual + Full mode)
async fn sync_upload_new(&mut self, server_entries: &[FileEntry], local_dir: &Path,
parent_id: Option<i64>, log: &mut Vec<String>) {
let server_names: std::collections::HashSet<String> = server_entries.iter()
.map(|e| e.name.clone()).collect();
let entries = match std::fs::read_dir(local_dir) {
Ok(e) => e,
Err(_) => return,
};
for entry in entries.flatten() {
let name = entry.file_name().to_string_lossy().to_string();
let path = entry.path();
// Skip hidden, temp, .cloud files
if name.starts_with('.') || name.starts_with('~')
|| name.ends_with(".tmp") || name.ends_with(".cloud") {
continue;
}
if path.is_dir() {
// New folder: create on server + recurse
if !server_names.contains(&name) {
match self.api.create_folder(&name, parent_id).await {
Ok(folder) => {
log.push(format!("Ordner erstellt: {}", name));
Box::pin(self.sync_upload_new(&[], &path, Some(folder.id), log)).await;
}
Err(e) => log.push(format!("Ordner-Fehler {}: {}", name, e)),
self.reconcile_file(entry, local_dir, &rel, parent_id, sp, log).await;
}
// --- Pass 2: iterate local entries, upload new local files/folders ---
let dir_iter = match std::fs::read_dir(local_dir) {
Ok(d) => d,
Err(_) => return,
};
for e in dir_iter.flatten() {
let name = e.file_name().to_string_lossy().to_string();
if should_skip_name(&name) { continue; }
let path = e.path();
let is_dir = path.is_dir();
// `.cloud` placeholders are stored locally under "foo.txt.cloud"
// but represent the server-side "foo.txt".
let real_name = name.trim_end_matches(".cloud").to_string();
let is_placeholder = name.ends_with(".cloud") && !is_dir;
// Already covered by server pass?
if server_by_name.contains_key(&real_name) { continue; }
if is_placeholder { continue; } // orphan placeholder - handled below
let rel = if rel_prefix.is_empty() {
real_name.clone()
} else {
format!("{}/{}", rel_prefix, real_name)
};
if is_dir {
match self.api.create_folder(&real_name, parent_id).await {
Ok(folder) => {
log.push(format!("Ordner erstellt: {}", rel));
self.upload_local_tree(&path, Some(folder.id), &rel, sp, log).await;
}
} else {
// Existing folder: recurse into it
let sub = server_entries.iter().find(|e| e.name == name);
let children = sub.and_then(|e| e.children.as_ref())
.map(|c| c.as_slice()).unwrap_or(&[]);
let sub_id = sub.map(|e| e.id);
Box::pin(self.sync_upload_new(children, &path, sub_id, log)).await;
Err(e) => log.push(format!("Ordner-Fehler {}: {}", rel, e)),
}
} else {
// New file: upload
if !server_names.contains(&name) {
match self.api.upload_file(&path, parent_id).await {
Ok(_) => log.push(format!("Hochgeladen: {}", name)),
Err(e) => log.push(format!("Upload-Fehler {}: {}", name, e)),
}
} else {
if let Some(se) = server_entries.iter().find(|e| e.name == name) {
if se.locked.unwrap_or(false) {
log.push(format!("Zurueckgehalten (gesperrt): {}", name));
continue;
}
let local_hash = compute_file_hash(&path);
let server_hash = se.checksum.as_deref().unwrap_or("");
if local_hash != server_hash {
let file_key = name.clone();
let last_known = self.known_checksums.get(&file_key);
let local_changed = match last_known {
Some(known) => local_hash != *known,
None => false,
};
let server_changed = match last_known {
Some(known) => server_hash != known,
None => true,
};
if server_changed && !local_changed {
match self.api.download_file(se.id, &path).await {
Ok(_) => log.push(format!("Server->Lokal: {}", name)),
Err(e) => log.push(format!("Download-Fehler {}: {}", name, e)),
}
} else if local_changed && !server_changed {
match self.api.upload_file(&path, parent_id).await {
Ok(_) => log.push(format!("Lokal->Server: {}", name)),
Err(e) => log.push(format!("Upload-Fehler {}: {}", name, e)),
}
} else {
// Both changed -> server wins, local becomes conflict copy
let ext = path.extension().map(|e| e.to_string_lossy().to_string()).unwrap_or_default();
let stem = path.file_stem().unwrap().to_string_lossy();
let conflict_path = path.parent().unwrap().join(format!("{} (Konflikt).{}", stem, ext));
std::fs::rename(&path, &conflict_path).ok();
match self.api.download_file(se.id, &path).await {
Ok(_) => log.push(format!("KONFLIKT: {} -> {}", name, conflict_path.file_name().unwrap().to_string_lossy())),
Err(e) => log.push(format!("Download-Fehler {}: {}", name, e)),
}
}
}
self.known_checksums.insert(name, server_hash.to_string());
match self.api.upload_file(&path, parent_id).await {
Ok(fe) => {
log.push(format!("Hochgeladen: {}", rel));
let checksum = fe.checksum.unwrap_or_default();
let size = fe.size.unwrap_or(0);
let _ = self.journal.upsert(&JournalEntry {
sync_path_id: sp.id.clone(),
relative_path: rel.clone(),
file_id: Some(fe.id),
synced_checksum: checksum,
synced_size: size,
synced_mtime: fe.updated_at.unwrap_or_default(),
local_state: "offline".to_string(),
});
}
Err(e) => log.push(format!("Upload-Fehler {}: {}", rel, e)),
}
}
}
}
/// Full sync: download all files from server
async fn sync_full_download(&self, entries: &[FileEntry], local_dir: &Path,
log: &mut Vec<String>) {
for entry in entries {
let local_path = local_dir.join(&entry.name);
if entry.is_folder {
std::fs::create_dir_all(&local_path).ok();
if let Some(children) = &entry.children {
Box::pin(self.sync_full_download(children, &local_path, log)).await;
}
} else {
if entry.locked.unwrap_or(false) { continue; }
let needs_download = if local_path.exists() {
let local_hash = compute_file_hash(&local_path);
local_hash != entry.checksum.as_deref().unwrap_or("")
// --- Pass 3: clean up orphan .cloud placeholders for files gone from server ---
if let Ok(dir_iter) = std::fs::read_dir(local_dir) {
for e in dir_iter.flatten() {
let name = e.file_name().to_string_lossy().to_string();
if !name.ends_with(".cloud") || e.path().is_dir() { continue; }
let real_name = name.trim_end_matches(".cloud");
if server_by_name.contains_key(real_name) { continue; }
std::fs::remove_file(e.path()).ok();
let rel = if rel_prefix.is_empty() {
real_name.to_string()
} else {
true
format!("{}/{}", rel_prefix, real_name)
};
// Remove stale .cloud placeholder
let cloud_path = local_dir.join(format!("{}.cloud", entry.name));
if cloud_path.exists() {
std::fs::remove_file(&cloud_path).ok();
}
if needs_download {
match self.api.download_file(entry.id, &local_path).await {
Ok(_) => log.push(format!("Heruntergeladen: {}", entry.name)),
Err(e) => log.push(format!("Fehler {}: {}", entry.name, e)),
}
}
let _ = self.journal.delete(&sp.id, &rel);
log.push(format!("Entfernt (Server): {}", name));
}
}
}
/// Full sync: upload new/changed local files
async fn sync_full_upload(&mut self, server_entries: &[FileEntry], local_dir: &Path,
parent_id: Option<i64>, log: &mut Vec<String>) {
let server_names: HashMap<String, &FileEntry> = server_entries.iter()
.map(|e| (e.name.clone(), e))
.collect();
/// Core 3-way reconciliation for a single server file.
async fn reconcile_file(
&self,
entry: &FileEntry,
local_dir: &Path,
rel: &str,
parent_id: Option<i64>,
sp: &SyncPath,
log: &mut Vec<String>,
) {
let real_path = local_dir.join(&entry.name);
let cloud_path = local_dir.join(format!("{}.cloud", entry.name));
let journal_entry = self.journal.get(&sp.id, rel);
let server_hash = entry.checksum.clone().unwrap_or_default();
let server_size = entry.size.unwrap_or(0);
let server_mtime = entry.updated_at.clone().unwrap_or_default();
let entries = match std::fs::read_dir(local_dir) {
Ok(e) => e,
Err(_) => return,
};
// Case A: real file exists locally = offline state
if real_path.exists() && !real_path.is_dir() {
// Avoid race: if placeholder still around, remove it
if cloud_path.exists() { std::fs::remove_file(&cloud_path).ok(); }
for entry in entries.flatten() {
let name = entry.file_name().to_string_lossy().to_string();
let path = entry.path();
let local_hash = compute_file_hash(&real_path);
// Skip hidden, temp, .cloud files
if name.starts_with('.') || name.starts_with('~') || name.ends_with(".tmp")
|| name.ends_with(".cloud") {
continue;
if local_hash == server_hash {
// In sync - just (re)record journal
self.journal_offline(sp, rel, entry, &server_hash, server_size, &server_mtime);
return;
}
if path.is_dir() {
if let Some(se) = server_names.get(&name) {
if let Some(children) = &se.children {
Box::pin(self.sync_full_upload(children, &path, Some(se.id), log)).await;
// Hashes differ. Locked by someone else? Hold back.
if entry.locked.unwrap_or(false) {
let by = entry.locked_by.clone().unwrap_or_default();
if by != self.username {
log.push(format!("Zurueckgehalten (gesperrt von {}): {}", by, rel));
return;
}
}
let (local_changed, server_changed) = match &journal_entry {
Some(j) => (local_hash != j.synced_checksum, server_hash != j.synced_checksum),
None => (true, true), // unknown history - treat as conflict to be safe
};
if local_changed && !server_changed {
// Upload
match self.api.upload_file(&real_path, parent_id).await {
Ok(fe) => {
log.push(format!("Lokal->Server: {}", rel));
let new_hash = fe.checksum.unwrap_or(local_hash.clone());
self.journal_offline(sp, rel, entry, &new_hash,
fe.size.unwrap_or(server_size),
&fe.updated_at.unwrap_or(server_mtime.clone()));
}
} else {
match self.api.create_folder(&name, parent_id).await {
Ok(folder) => {
log.push(format!("Ordner erstellt: {}", name));
Box::pin(self.sync_full_upload(&[], &path, Some(folder.id), log)).await;
}
Err(e) => log.push(format!("Ordner-Fehler {}: {}", name, e)),
Err(e) => log.push(format!("Upload-Fehler {}: {}", rel, e)),
}
} else if server_changed && !local_changed {
// Download
match self.api.download_file(entry.id, &real_path).await {
Ok(_) => {
log.push(format!("Server->Lokal: {}", rel));
self.journal_offline(sp, rel, entry, &server_hash, server_size, &server_mtime);
}
Err(e) => log.push(format!("Download-Fehler {}: {}", rel, e)),
}
} else {
if let Some(se) = server_names.get(&name) {
if se.locked.unwrap_or(false) {
log.push(format!("Zurueckgehalten (gesperrt): {}", name));
continue;
// Both changed OR no journal -> conflict copy
let conflict_path = make_conflict_path(&real_path, &self.username);
std::fs::rename(&real_path, &conflict_path).ok();
match self.api.download_file(entry.id, &real_path).await {
Ok(_) => {
log.push(format!("KONFLIKT: {} (lokal: {})", rel,
conflict_path.file_name().unwrap().to_string_lossy()));
self.journal_offline(sp, rel, entry, &server_hash, server_size, &server_mtime);
}
let local_hash = compute_file_hash(&path);
let server_hash = se.checksum.as_deref().unwrap_or("");
if local_hash != server_hash {
let last_known = self.known_checksums.get(&name);
let local_changed = match last_known {
Some(known) => local_hash != *known,
None => false,
};
let server_changed = match last_known {
Some(known) => server_hash != known,
None => true,
};
Err(e) => {
// Restore original
std::fs::rename(&conflict_path, &real_path).ok();
log.push(format!("Download-Fehler {}: {}", rel, e));
}
}
}
return;
}
if server_changed && !local_changed {
match self.api.download_file(se.id, &path).await {
Ok(_) => log.push(format!("Server->Lokal: {}", name)),
Err(e) => log.push(format!("Download-Fehler {}: {}", name, e)),
}
} else if local_changed && !server_changed {
match self.api.upload_file(&path, parent_id).await {
Ok(_) => log.push(format!("Lokal->Server: {}", name)),
Err(e) => log.push(format!("Upload-Fehler {}: {}", name, e)),
}
} else {
let ext = path.extension().map(|e| e.to_string_lossy().to_string()).unwrap_or_default();
let stem = path.file_stem().unwrap().to_string_lossy();
let conflict_path = path.parent().unwrap().join(format!("{} (Konflikt).{}", stem, ext));
std::fs::rename(&path, &conflict_path).ok();
match self.api.download_file(se.id, &path).await {
Ok(_) => log.push(format!("KONFLIKT: {} -> {}", name, conflict_path.file_name().unwrap().to_string_lossy())),
Err(e) => log.push(format!("Download-Fehler {}: {}", name, e)),
}
}
}
self.known_checksums.insert(name, server_hash.to_string());
// Case B: local has a .cloud placeholder (or neither) = virtual state
// Virtual placeholders never have local edits, just keep them fresh.
let needs_write = match std::fs::read_to_string(&cloud_path) {
Ok(content) => match serde_json::from_str::<CloudPlaceholder>(&content) {
Ok(old) => old.checksum != server_hash || old.id != entry.id,
Err(_) => true,
},
Err(_) => true,
};
if needs_write {
let placeholder = CloudPlaceholder {
id: entry.id,
name: entry.name.clone(),
size: server_size,
checksum: server_hash.clone(),
updated_at: server_mtime.clone(),
server_path: rel.to_string(),
};
if let Ok(json) = serde_json::to_string_pretty(&placeholder) {
if !cloud_path.exists() {
log.push(format!("Platzhalter: {}.cloud", entry.name));
} else {
// New file, not on server
match self.api.upload_file(&path, parent_id).await {
Ok(_) => log.push(format!("Hochgeladen: {}", name)),
Err(e) => log.push(format!("Upload-Fehler {}: {}", name, e)),
}
log.push(format!("Platzhalter aktualisiert: {}.cloud", entry.name));
}
std::fs::write(&cloud_path, json).ok();
}
}
self.journal.upsert(&JournalEntry {
sync_path_id: sp.id.clone(),
relative_path: rel.to_string(),
file_id: Some(entry.id),
synced_checksum: server_hash,
synced_size: server_size,
synced_mtime: server_mtime,
local_state: "virtual".to_string(),
}).ok();
// If Full mode and no real file yet, download now
if sp.mode == SyncMode::Full && !real_path.exists() {
if let Err(e) = self.api.download_file(entry.id, &real_path).await {
log.push(format!("Full-Download-Fehler {}: {}", rel, e));
} else {
std::fs::remove_file(&cloud_path).ok();
log.push(format!("Heruntergeladen: {}", rel));
// Update journal to offline
if let Some(mut j) = self.journal.get(&sp.id, rel) {
j.local_state = "offline".to_string();
let _ = self.journal.upsert(&j);
}
}
}
}
/// Open a .cloud placeholder file: download the real file, rename, return path
#[allow(dead_code)]
pub async fn open_cloud_file(&self, cloud_path: &Path) -> Result<PathBuf, String> {
let content = std::fs::read_to_string(cloud_path)
.map_err(|e| format!("Platzhalter lesen: {}", e))?;
let placeholder: CloudPlaceholder = serde_json::from_str(&content)
.map_err(|e| format!("Platzhalter ungueltig: {}", e))?;
let _real_path = cloud_path.with_extension("");
// Remove .cloud extension to get real filename
let real_path = cloud_path.parent().unwrap().join(&placeholder.name);
// Download
self.api.download_file(placeholder.id, &real_path).await?;
// Remove placeholder
std::fs::remove_file(cloud_path).ok();
// Lock on server
let _ = self.api.lock_file(placeholder.id, "Desktop Sync Client").await;
Ok(real_path)
fn journal_offline(
&self, sp: &SyncPath, rel: &str, entry: &FileEntry,
hash: &str, size: i64, mtime: &str,
) {
let _ = self.journal.upsert(&JournalEntry {
sync_path_id: sp.id.clone(),
relative_path: rel.to_string(),
file_id: Some(entry.id),
synced_checksum: hash.to_string(),
synced_size: size,
synced_mtime: mtime.to_string(),
local_state: "offline".to_string(),
});
}
/// Close a previously opened file: sync back, recreate .cloud, unlock
#[allow(dead_code)]
pub async fn close_cloud_file(&self, real_path: &Path, file_id: i64) -> Result<(), String> {
// Upload changes
// We need the parent_id - for now upload to the same location
// The server handles overwrite by filename
let _ = self.api.upload_file(real_path, None).await;
// Unlock
let _ = self.api.unlock_file(file_id).await;
// Delete local copy and recreate placeholder
let cloud_path = real_path.parent().unwrap()
.join(format!("{}.cloud", real_path.file_name().unwrap().to_string_lossy()));
let size = std::fs::metadata(real_path).map(|m| m.len() as i64).unwrap_or(0);
let checksum = compute_file_hash(real_path);
let placeholder = CloudPlaceholder {
id: file_id,
name: real_path.file_name().unwrap().to_string_lossy().to_string(),
size,
checksum,
updated_at: chrono::Utc::now().to_rfc3339(),
server_path: String::new(),
};
if let Ok(json) = serde_json::to_string_pretty(&placeholder) {
std::fs::write(&cloud_path, json).ok();
/// Walk a freshly-created local tree and upload every file (used after
/// creating a new folder on the server).
async fn upload_local_tree(
&self, dir: &Path, parent_id: Option<i64>, rel_prefix: &str,
sp: &SyncPath, log: &mut Vec<String>,
) {
let iter = match std::fs::read_dir(dir) { Ok(d) => d, Err(_) => return };
for e in iter.flatten() {
let name = e.file_name().to_string_lossy().to_string();
if should_skip_name(&name) { continue; }
let path = e.path();
let rel = format!("{}/{}", rel_prefix, name);
if path.is_dir() {
match self.api.create_folder(&name, parent_id).await {
Ok(folder) => {
log.push(format!("Ordner erstellt: {}", rel));
Box::pin(self.upload_local_tree(&path, Some(folder.id), &rel, sp, log)).await;
}
Err(e) => log.push(format!("Ordner-Fehler {}: {}", rel, e)),
}
} else {
match self.api.upload_file(&path, parent_id).await {
Ok(fe) => {
log.push(format!("Hochgeladen: {}", rel));
self.journal_offline(sp, &rel, &fe,
&fe.checksum.clone().unwrap_or_default(),
fe.size.unwrap_or(0),
&fe.updated_at.clone().unwrap_or_default());
}
Err(e) => log.push(format!("Upload-Fehler {}: {}", rel, e)),
}
}
}
}
}
std::fs::remove_file(real_path).ok();
Ok(())
fn should_skip_name(name: &str) -> bool {
name.starts_with('.') || name.starts_with('~') || name.ends_with(".tmp")
}
fn make_conflict_path(original: &Path, username: &str) -> PathBuf {
let stem = original.file_stem().map(|s| s.to_string_lossy().to_string()).unwrap_or_default();
let ext = original.extension().map(|e| e.to_string_lossy().to_string());
let ts = chrono::Local::now().format("%Y-%m-%d %H%M%S").to_string();
let name = match ext {
Some(e) if !e.is_empty() => format!("{} (Konflikt {} {}).{}", stem, username, ts, e),
_ => format!("{} (Konflikt {} {})", stem, username, ts),
};
original.parent().map(|p| p.join(&name)).unwrap_or_else(|| PathBuf::from(&name))
}
fn collect_server_files(
entries: &[FileEntry],
prefix: &str,
out: &mut std::collections::HashMap<String, i64>,
) {
for e in entries {
let rel = if prefix.is_empty() {
e.name.clone()
} else {
format!("{}/{}", prefix, e.name)
};
if e.is_folder {
if let Some(children) = &e.children {
collect_server_files(children, &rel, out);
}
} else {
out.insert(rel, e.id);
}
}
}
fn find_subtree(tree: &[FileEntry], folder_id: i64) -> Option<Vec<FileEntry>> {
for entry in tree {
if entry.id == folder_id {
return entry.children.clone();
}
if entry.id == folder_id { return entry.children.clone(); }
if let Some(children) = &entry.children {
if let Some(result) = find_subtree(children, folder_id) {
return Some(result);
}
if let Some(r) = find_subtree(children, folder_id) { return Some(r); }
}
}
None
}
/// Parse a server timestamp (may or may not have timezone)
fn parse_server_time(s: &str) -> Option<std::time::SystemTime> {
// Try with timezone first (RFC3339)
if let Ok(dt) = chrono::DateTime::parse_from_rfc3339(s) {
return Some(std::time::SystemTime::from(dt));
}
// Try without timezone (naive, assume UTC)
if let Ok(dt) = chrono::NaiveDateTime::parse_from_str(s, "%Y-%m-%dT%H:%M:%S%.f") {
let utc = dt.and_utc();
return Some(std::time::SystemTime::from(utc));
}
if let Ok(dt) = chrono::NaiveDateTime::parse_from_str(s, "%Y-%m-%dT%H:%M:%S") {
let utc = dt.and_utc();
return Some(std::time::SystemTime::from(utc));
}
None
}
pub fn compute_file_hash(path: &Path) -> String {
let data = match std::fs::read(path) {
Ok(d) => d,
@@ -0,0 +1,120 @@
use rusqlite::{params, Connection};
use std::path::PathBuf;
use std::sync::Mutex;
/// One row of the sync journal. Represents the "last known synced state"
/// for a single file within a sync path. The server and local checksum
/// matched this value at the last successful sync.
#[derive(Debug, Clone)]
pub struct JournalEntry {
pub sync_path_id: String,
pub relative_path: String,
pub file_id: Option<i64>,
pub synced_checksum: String,
pub synced_size: i64,
pub synced_mtime: String,
pub local_state: String, // "virtual" or "offline"
}
pub struct Journal {
conn: Mutex<Connection>,
}
impl Journal {
pub fn open() -> Result<Self, String> {
let dir = dirs::config_dir()
.or_else(|| dirs::home_dir().map(|h| h.join(".config")))
.unwrap_or_else(|| PathBuf::from("."))
.join("MiniCloud Sync");
std::fs::create_dir_all(&dir).ok();
let path = dir.join("journal.db");
let conn = Connection::open(&path).map_err(|e| format!("Journal open: {}", e))?;
conn.execute_batch(
r#"
CREATE TABLE IF NOT EXISTS sync_journal (
sync_path_id TEXT NOT NULL,
relative_path TEXT NOT NULL,
file_id INTEGER,
synced_checksum TEXT NOT NULL DEFAULT '',
synced_size INTEGER NOT NULL DEFAULT 0,
synced_mtime TEXT NOT NULL DEFAULT '',
local_state TEXT NOT NULL DEFAULT 'virtual',
PRIMARY KEY (sync_path_id, relative_path)
);
"#,
).map_err(|e| format!("Journal schema: {}", e))?;
Ok(Self { conn: Mutex::new(conn) })
}
pub fn get(&self, sync_path_id: &str, rel: &str) -> Option<JournalEntry> {
let conn = self.conn.lock().unwrap();
conn.query_row(
"SELECT file_id, synced_checksum, synced_size, synced_mtime, local_state
FROM sync_journal WHERE sync_path_id = ?1 AND relative_path = ?2",
params![sync_path_id, rel],
|row| Ok(JournalEntry {
sync_path_id: sync_path_id.to_string(),
relative_path: rel.to_string(),
file_id: row.get(0)?,
synced_checksum: row.get(1)?,
synced_size: row.get(2)?,
synced_mtime: row.get(3)?,
local_state: row.get(4)?,
}),
).ok()
}
pub fn upsert(&self, e: &JournalEntry) -> Result<(), String> {
let conn = self.conn.lock().unwrap();
conn.execute(
"INSERT INTO sync_journal
(sync_path_id, relative_path, file_id, synced_checksum, synced_size, synced_mtime, local_state)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)
ON CONFLICT(sync_path_id, relative_path) DO UPDATE SET
file_id = excluded.file_id,
synced_checksum = excluded.synced_checksum,
synced_size = excluded.synced_size,
synced_mtime = excluded.synced_mtime,
local_state = excluded.local_state",
params![e.sync_path_id, e.relative_path, e.file_id, e.synced_checksum,
e.synced_size, e.synced_mtime, e.local_state],
).map_err(|e| format!("Journal upsert: {}", e))?;
Ok(())
}
pub fn delete(&self, sync_path_id: &str, rel: &str) -> Result<(), String> {
let conn = self.conn.lock().unwrap();
conn.execute(
"DELETE FROM sync_journal WHERE sync_path_id = ?1 AND relative_path = ?2",
params![sync_path_id, rel],
).map_err(|e| format!("Journal delete: {}", e))?;
Ok(())
}
pub fn list_for_sync(&self, sync_path_id: &str) -> Vec<JournalEntry> {
let conn = self.conn.lock().unwrap();
let mut stmt = match conn.prepare(
"SELECT relative_path, file_id, synced_checksum, synced_size, synced_mtime, local_state
FROM sync_journal WHERE sync_path_id = ?1") {
Ok(s) => s,
Err(_) => return Vec::new(),
};
let rows = stmt.query_map(params![sync_path_id], |row| {
Ok(JournalEntry {
sync_path_id: sync_path_id.to_string(),
relative_path: row.get(0)?,
file_id: row.get(1)?,
synced_checksum: row.get(2)?,
synced_size: row.get(3)?,
synced_mtime: row.get(4)?,
local_state: row.get(5)?,
})
});
match rows {
Ok(it) => it.filter_map(|r| r.ok()).collect(),
Err(_) => Vec::new(),
}
}
}
@@ -1,4 +1,5 @@
pub mod api;
pub mod config;
pub mod engine;
pub mod journal;
pub mod watcher;
@@ -5,6 +5,7 @@ use std::sync::mpsc;
pub struct FileWatcher {
_watcher: RecommendedWatcher,
pub receiver: mpsc::Receiver<FileChange>,
pub path: PathBuf,
}
#[derive(Debug, Clone)]
@@ -53,6 +54,6 @@ impl FileWatcher {
watcher.watch(watch_dir.as_ref(), RecursiveMode::Recursive)
.map_err(|e| format!("Watch-Fehler: {}", e))?;
Ok(Self { _watcher: watcher, receiver: rx })
Ok(Self { _watcher: watcher, receiver: rx, path: watch_dir.clone() })
}
}