23563622f8
Die lokale Dateiliste im Client zeigt jetzt pro Datei ein 🔒-Badge mit Nutzername wenn ausgecheckt (wie Server-Ansicht + Web-GUI). browse_sync_folder zieht den Server-Tree bei jedem Aufruf und korreliert via Journal-Lookup (oder .cloud-Metadaten) die lokale Datei mit dem File-Lock-Status. Rechtsklick-Menue reagiert jetzt auf den Lock-Status: - Frei -> "Auschecken (sperren)" - Eigener/fremder -> "Entsperren (einchecken)" Neuer Tauri-Command lock_file_cmd fuer reines Sperren ohne Oeffnen. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
1023 lines
37 KiB
Rust
1023 lines
37 KiB
Rust
mod sync;
|
|
|
|
use std::path::PathBuf;
|
|
use std::sync::atomic::{AtomicBool, Ordering};
|
|
use std::sync::{Arc, Mutex};
|
|
use std::time::Duration;
|
|
use tauri::{
|
|
menu::{Menu, MenuItem},
|
|
tray::TrayIconBuilder,
|
|
AppHandle, Emitter, Manager, State,
|
|
};
|
|
|
|
use sync::api::MiniCloudApi;
|
|
use sync::config::AppConfig;
|
|
use sync::engine::{SyncEngine, SyncMode, SyncPath};
|
|
use sync::journal::Journal;
|
|
use sync::watcher::{FileWatcher, ChangeKind};
|
|
|
|
struct AppState {
|
|
api: Mutex<Option<MiniCloudApi>>,
|
|
sync_engine: Mutex<Option<SyncEngine>>,
|
|
username: Mutex<Option<String>>,
|
|
watchers: Mutex<Vec<FileWatcher>>,
|
|
locked_files: Mutex<Vec<i64>>, // file IDs we have locked on server
|
|
sync_running: Arc<Mutex<bool>>,
|
|
sync_paths: Mutex<Vec<SyncPath>>,
|
|
journal: Arc<Journal>,
|
|
background_started: AtomicBool,
|
|
}
|
|
|
|
// --- Auth ---
|
|
|
|
#[tauri::command]
|
|
async fn login(
|
|
state: State<'_, AppState>,
|
|
server_url: String,
|
|
username: String,
|
|
password: String,
|
|
) -> Result<serde_json::Value, String> {
|
|
let mut api = MiniCloudApi::new(&server_url);
|
|
let result = api.login(&username, &password).await?;
|
|
|
|
*state.api.lock().unwrap() = Some(api);
|
|
*state.username.lock().unwrap() = Some(username.clone());
|
|
|
|
// Save credentials to config file
|
|
let mut config = AppConfig::load();
|
|
config.server_url = server_url;
|
|
config.username = username;
|
|
config.save_password(&password);
|
|
let _ = config.save();
|
|
|
|
Ok(serde_json::json!({
|
|
"username": result.user.username,
|
|
"role": result.user.role,
|
|
}))
|
|
}
|
|
|
|
// --- Saved Config ---
|
|
|
|
#[tauri::command]
|
|
fn load_saved_config() -> Result<serde_json::Value, String> {
|
|
let config = AppConfig::load();
|
|
let has_credentials = !config.server_url.is_empty()
|
|
&& !config.username.is_empty()
|
|
&& config.get_password().is_some();
|
|
|
|
Ok(serde_json::json!({
|
|
"server_url": config.server_url,
|
|
"username": config.username,
|
|
"sync_paths": config.sync_paths,
|
|
"has_credentials": has_credentials,
|
|
"has_config": !config.server_url.is_empty(),
|
|
}))
|
|
}
|
|
|
|
#[tauri::command]
|
|
async fn auto_login(state: State<'_, AppState>) -> Result<serde_json::Value, String> {
|
|
let config = AppConfig::load();
|
|
if config.server_url.is_empty() || config.username.is_empty() {
|
|
return Err("Keine gespeicherten Zugangsdaten".to_string());
|
|
}
|
|
|
|
let password = config.get_password()
|
|
.ok_or("Passwort nicht gespeichert")?;
|
|
|
|
let mut api = MiniCloudApi::new(&config.server_url);
|
|
let result = api.login(&config.username, &password).await?;
|
|
|
|
*state.api.lock().unwrap() = Some(api);
|
|
*state.username.lock().unwrap() = Some(config.username.clone());
|
|
*state.sync_paths.lock().unwrap() = config.sync_paths;
|
|
|
|
Ok(serde_json::json!({
|
|
"username": result.user.username,
|
|
"role": result.user.role,
|
|
}))
|
|
}
|
|
|
|
#[tauri::command]
|
|
fn set_start_minimized(minimized: bool) -> Result<String, String> {
|
|
let mut config = AppConfig::load();
|
|
config.start_minimized = minimized;
|
|
config.save()?;
|
|
Ok(format!("Minimiert starten: {}", minimized))
|
|
}
|
|
|
|
#[tauri::command]
|
|
fn get_start_minimized() -> bool {
|
|
AppConfig::load().start_minimized
|
|
}
|
|
|
|
// --- Sync Paths ---
|
|
|
|
#[tauri::command]
|
|
fn add_sync_path(
|
|
state: State<'_, AppState>,
|
|
server_path: String,
|
|
server_folder_id: Option<i64>,
|
|
local_dir: String,
|
|
mode: String, // "virtual" or "full"
|
|
) -> Result<serde_json::Value, String> {
|
|
let local = PathBuf::from(&local_dir);
|
|
std::fs::create_dir_all(&local).map_err(|e| format!("Ordner erstellen: {}", e))?;
|
|
|
|
let sync_mode = if mode == "full" { SyncMode::Full } else { SyncMode::Virtual };
|
|
let id = format!("{}_{}", server_folder_id.unwrap_or(0), local_dir.replace(['/', '\\'], "_"));
|
|
|
|
let sp = SyncPath {
|
|
id: id.clone(),
|
|
server_path,
|
|
server_folder_id,
|
|
local_dir,
|
|
mode: sync_mode,
|
|
enabled: true,
|
|
};
|
|
|
|
state.sync_paths.lock().unwrap().push(sp.clone());
|
|
|
|
// Also attach a filesystem watcher for this path so background sync picks it up
|
|
if let Ok(w) = FileWatcher::new(&local) {
|
|
state.watchers.lock().unwrap().push(w);
|
|
}
|
|
|
|
// Save to config
|
|
let mut config = AppConfig::load();
|
|
config.sync_paths = state.sync_paths.lock().unwrap().clone();
|
|
let _ = config.save();
|
|
|
|
Ok(serde_json::to_value(sp).map_err(|e| e.to_string())?)
|
|
}
|
|
|
|
#[tauri::command]
|
|
fn remove_sync_path(state: State<'_, AppState>, id: String) -> Result<String, String> {
|
|
// Capture the local_dir of the removed path so we can drop its watcher too
|
|
let removed_dir = {
|
|
let paths = state.sync_paths.lock().unwrap();
|
|
paths.iter().find(|p| p.id == id).map(|p| p.local_dir.clone())
|
|
};
|
|
state.sync_paths.lock().unwrap().retain(|p| p.id != id);
|
|
|
|
if let Some(dir) = removed_dir {
|
|
let target = PathBuf::from(&dir);
|
|
state.watchers.lock().unwrap().retain(|w| w.path != target);
|
|
}
|
|
|
|
let mut config = AppConfig::load();
|
|
config.sync_paths = state.sync_paths.lock().unwrap().clone();
|
|
let _ = config.save();
|
|
|
|
Ok("Sync-Pfad entfernt".to_string())
|
|
}
|
|
|
|
#[tauri::command]
|
|
fn get_sync_paths(state: State<'_, AppState>) -> Result<serde_json::Value, String> {
|
|
let paths = state.sync_paths.lock().unwrap().clone();
|
|
Ok(serde_json::to_value(paths).map_err(|e| e.to_string())?)
|
|
}
|
|
|
|
#[tauri::command]
|
|
fn toggle_sync_mode(state: State<'_, AppState>, id: String) -> Result<String, String> {
|
|
let mut paths = state.sync_paths.lock().unwrap();
|
|
if let Some(p) = paths.iter_mut().find(|p| p.id == id) {
|
|
p.mode = match p.mode {
|
|
SyncMode::Virtual => SyncMode::Full,
|
|
SyncMode::Full => SyncMode::Virtual,
|
|
};
|
|
Ok(format!("Modus: {:?}", p.mode))
|
|
} else {
|
|
Err("Pfad nicht gefunden".to_string())
|
|
}
|
|
}
|
|
|
|
// --- Sync ---
|
|
|
|
#[tauri::command]
|
|
async fn start_sync(app: AppHandle, state: State<'_, AppState>) -> Result<Vec<String>, String> {
|
|
let api = state.api.lock().unwrap().clone().ok_or("Nicht eingeloggt")?;
|
|
let paths = state.sync_paths.lock().unwrap().clone();
|
|
|
|
if paths.is_empty() {
|
|
return Err("Keine Sync-Pfade konfiguriert".to_string());
|
|
}
|
|
|
|
let username = state.username.lock().unwrap().clone().unwrap_or_default();
|
|
let journal = state.journal.clone();
|
|
let mut engine = SyncEngine::new(api.clone(), journal, username);
|
|
engine.sync_paths = paths.clone();
|
|
let log = engine.sync_all().await?;
|
|
|
|
*state.sync_engine.lock().unwrap() = Some(engine);
|
|
|
|
// Ensure a watcher exists for every sync path (skip paths already watched)
|
|
{
|
|
let mut guard = state.watchers.lock().unwrap();
|
|
for sp in &paths {
|
|
let target = PathBuf::from(&sp.local_dir);
|
|
if guard.iter().any(|w| w.path == target) { continue; }
|
|
if let Ok(w) = FileWatcher::new(&target) {
|
|
guard.push(w);
|
|
}
|
|
}
|
|
}
|
|
|
|
// Start background threads only once per process lifetime.
|
|
// They re-read sync_paths from state each iteration, so adding/removing
|
|
// paths later takes effect without respawning threads.
|
|
if !state.background_started.swap(true, Ordering::SeqCst) {
|
|
let username = state.username.lock().unwrap().clone().unwrap_or_default();
|
|
let journal = state.journal.clone();
|
|
start_background_sync(app, state.sync_running.clone(), api, journal, username);
|
|
}
|
|
|
|
Ok(log)
|
|
}
|
|
|
|
#[tauri::command]
|
|
async fn run_sync_now(state: State<'_, AppState>) -> Result<Vec<String>, String> {
|
|
let mut engine = {
|
|
let mut guard = state.sync_engine.lock().unwrap();
|
|
guard.take().ok_or("Sync nicht gestartet")?
|
|
};
|
|
// Sync engine's API token with current state (refresh_token may have updated it)
|
|
if let Some(ref api) = *state.api.lock().unwrap() {
|
|
engine.api.access_token = api.access_token.clone();
|
|
}
|
|
// Refresh sync_paths from state: user may have added/removed paths
|
|
engine.sync_paths = state.sync_paths.lock().unwrap().clone();
|
|
let result = engine.sync_all().await;
|
|
*state.sync_engine.lock().unwrap() = Some(engine);
|
|
result
|
|
}
|
|
|
|
// --- File Operations ---
|
|
|
|
#[tauri::command]
|
|
async fn open_cloud_file(state: State<'_, AppState>, cloud_path: String) -> Result<String, String> {
|
|
let api = state.api.lock().unwrap().clone()
|
|
.ok_or("Nicht eingeloggt - bitte zuerst anmelden")?;
|
|
|
|
let path = PathBuf::from(&cloud_path);
|
|
if !path.exists() {
|
|
return Err(format!("Datei nicht gefunden: {}", cloud_path));
|
|
}
|
|
|
|
// Read placeholder JSON
|
|
let content = std::fs::read_to_string(&path)
|
|
.map_err(|e| format!("Platzhalter lesen: {}", e))?;
|
|
let placeholder: serde_json::Value = serde_json::from_str(&content)
|
|
.map_err(|e| format!("Platzhalter ungueltig: {}", e))?;
|
|
let file_id = placeholder.get("id").and_then(|v| v.as_i64())
|
|
.ok_or("Keine Datei-ID im Platzhalter")?;
|
|
|
|
// Get real filename: from JSON "name" field, or strip .cloud from filename
|
|
let file_name = placeholder.get("name")
|
|
.and_then(|v| v.as_str())
|
|
.map(|s| s.to_string())
|
|
.unwrap_or_else(|| {
|
|
let name = path.file_name().unwrap().to_string_lossy().to_string();
|
|
name.strip_suffix(".cloud").unwrap_or(&name).to_string()
|
|
});
|
|
|
|
let real_path = path.parent().unwrap().join(&file_name);
|
|
eprintln!("[OpenCloud] {} -> {} (ID: {})", cloud_path, real_path.display(), file_id);
|
|
|
|
// Download the actual file
|
|
api.download_file(file_id, &real_path).await
|
|
.map_err(|e| format!("Download fehlgeschlagen: {}", e))?;
|
|
|
|
// Verify file was downloaded
|
|
if !real_path.exists() {
|
|
return Err(format!("Download fehlgeschlagen - Datei nicht vorhanden: {}", real_path.display()));
|
|
}
|
|
eprintln!("[OpenCloud] Downloaded {} bytes", std::fs::metadata(&real_path).map(|m| m.len()).unwrap_or(0));
|
|
|
|
// Remove .cloud placeholder - file stays as real file
|
|
// Changes will be synced automatically by the file watcher
|
|
// User can "unmark offline" or "unlock" via right-click
|
|
std::fs::remove_file(&path).ok();
|
|
|
|
// Lock on server (fresh token) - prevents others from editing
|
|
let fresh_api = state.api.lock().unwrap().clone().ok_or("Nicht eingeloggt")?;
|
|
match fresh_api.lock_file(file_id, "Desktop Sync Client").await {
|
|
Ok(_) => {
|
|
eprintln!("[OpenCloud] Locked on server");
|
|
state.locked_files.lock().unwrap().push(file_id);
|
|
}
|
|
Err(e) => {
|
|
eprintln!("[OpenCloud] Lock failed: {}", e);
|
|
return Err(format!("Datei heruntergeladen, aber Sperre fehlgeschlagen: {}", e));
|
|
}
|
|
}
|
|
|
|
// Open with default application for this file type
|
|
eprintln!("[OpenCloud] Opening with default app: {}", real_path.display());
|
|
open::that(&real_path)
|
|
.map_err(|e| format!("Oeffnen fehlgeschlagen: {} - {}", real_path.display(), e))?;
|
|
|
|
Ok(real_path.to_string_lossy().to_string())
|
|
}
|
|
|
|
/// Open a real (already-downloaded) file: lock it on the server, then open
|
|
/// it with the default application. Used for files that are already offline-
|
|
/// available so they still get checked out.
|
|
#[tauri::command]
|
|
async fn open_offline_file(state: State<'_, AppState>, real_path: String) -> Result<String, String> {
|
|
let path = PathBuf::from(&real_path);
|
|
if !path.exists() {
|
|
return Err(format!("Datei nicht gefunden: {}", real_path));
|
|
}
|
|
|
|
// Resolve file_id by matching this path against the configured sync paths
|
|
// and looking the relative path up in the journal.
|
|
let (sync_path_id, rel_path) = {
|
|
let paths = state.sync_paths.lock().unwrap().clone();
|
|
let mut best: Option<(String, String)> = None;
|
|
for sp in &paths {
|
|
let base = PathBuf::from(&sp.local_dir);
|
|
if let Ok(rel) = path.strip_prefix(&base) {
|
|
let rel_str = rel.to_string_lossy().replace('\\', "/");
|
|
best = Some((sp.id.clone(), rel_str));
|
|
break;
|
|
}
|
|
}
|
|
best.ok_or("Datei gehoert zu keinem konfigurierten Sync-Pfad")?
|
|
};
|
|
|
|
let journal = state.journal.clone();
|
|
let entry = journal.get(&sync_path_id, &rel_path)
|
|
.ok_or("Datei nicht im Sync-Journal - erst einmal synchronisieren")?;
|
|
let file_id = entry.file_id.ok_or("Keine Server-ID im Journal")?;
|
|
|
|
let api = state.api.lock().unwrap().clone().ok_or("Nicht eingeloggt")?;
|
|
match api.lock_file(file_id, "Desktop Sync Client").await {
|
|
Ok(_) => {
|
|
eprintln!("[OpenOffline] Locked {} on server", rel_path);
|
|
let mut locked = state.locked_files.lock().unwrap();
|
|
if !locked.contains(&file_id) { locked.push(file_id); }
|
|
}
|
|
Err(e) => return Err(format!("Sperre fehlgeschlagen: {}", e)),
|
|
}
|
|
|
|
open::that(&path)
|
|
.map_err(|e| format!("Oeffnen fehlgeschlagen: {}", e))?;
|
|
Ok(real_path)
|
|
}
|
|
|
|
#[tauri::command]
|
|
async fn unlock_file_cmd(state: State<'_, AppState>, file_id: i64) -> Result<String, String> {
|
|
let api = state.api.lock().unwrap().clone().ok_or("Nicht eingeloggt")?;
|
|
api.unlock_file(file_id).await?;
|
|
state.locked_files.lock().unwrap().retain(|&id| id != file_id);
|
|
Ok("Datei entsperrt".to_string())
|
|
}
|
|
|
|
#[tauri::command]
|
|
async fn lock_file_cmd(state: State<'_, AppState>, file_id: i64) -> Result<String, String> {
|
|
let api = state.api.lock().unwrap().clone().ok_or("Nicht eingeloggt")?;
|
|
api.lock_file(file_id, "Desktop Sync Client").await?;
|
|
let mut locked = state.locked_files.lock().unwrap();
|
|
if !locked.contains(&file_id) { locked.push(file_id); }
|
|
Ok("Datei ausgecheckt".to_string())
|
|
}
|
|
|
|
#[tauri::command]
|
|
async fn get_file_tree(state: State<'_, AppState>) -> Result<serde_json::Value, String> {
|
|
let api = state.api.lock().unwrap().clone().ok_or("Nicht eingeloggt")?;
|
|
let tree = api.get_sync_tree().await?;
|
|
Ok(serde_json::to_value(tree).map_err(|e| e.to_string())?)
|
|
}
|
|
|
|
#[tauri::command]
|
|
async fn get_status(state: State<'_, AppState>) -> Result<serde_json::Value, String> {
|
|
let logged_in = state.api.lock().unwrap().is_some();
|
|
let username = state.username.lock().unwrap().clone();
|
|
let syncing = *state.sync_running.lock().unwrap();
|
|
let paths = state.sync_paths.lock().unwrap().len();
|
|
let locked = state.locked_files.lock().unwrap().len();
|
|
|
|
Ok(serde_json::json!({
|
|
"logged_in": logged_in,
|
|
"username": username,
|
|
"syncing": syncing,
|
|
"sync_paths": paths,
|
|
"locked_files": locked,
|
|
}))
|
|
}
|
|
|
|
|
|
// --- Local File Browser ---
|
|
|
|
#[derive(serde::Serialize)]
|
|
struct LocalFileEntry {
|
|
name: String,
|
|
path: String,
|
|
is_folder: bool,
|
|
is_cloud: bool, // .cloud placeholder
|
|
is_offline: bool, // real file (offline available)
|
|
size: i64,
|
|
cloud_size: Option<i64>, // original size from .cloud metadata
|
|
file_id: Option<i64>,
|
|
locked: bool,
|
|
locked_by: Option<String>,
|
|
}
|
|
|
|
fn collect_locks(
|
|
entries: &[sync::api::FileEntry],
|
|
out: &mut std::collections::HashMap<i64, (bool, Option<String>)>,
|
|
) {
|
|
for e in entries {
|
|
if e.locked.unwrap_or(false) {
|
|
out.insert(e.id, (true, e.locked_by.clone()));
|
|
}
|
|
if let Some(children) = &e.children {
|
|
collect_locks(children, out);
|
|
}
|
|
}
|
|
}
|
|
|
|
#[tauri::command]
|
|
async fn browse_sync_folder(state: State<'_, AppState>, sub_path: Option<String>) -> Result<Vec<LocalFileEntry>, String> {
|
|
let (paths, api_opt, journal) = {
|
|
let p = state.sync_paths.lock().unwrap().clone();
|
|
let a = state.api.lock().unwrap().clone();
|
|
(p, a, state.journal.clone())
|
|
};
|
|
|
|
if paths.is_empty() {
|
|
return Err("Keine Sync-Pfade konfiguriert".to_string());
|
|
}
|
|
|
|
// If sub_path given, use it directly; otherwise use first sync path
|
|
let base_dir = if let Some(ref sp) = sub_path {
|
|
PathBuf::from(sp)
|
|
} else {
|
|
PathBuf::from(&paths[0].local_dir)
|
|
};
|
|
|
|
if !base_dir.exists() {
|
|
return Ok(Vec::new());
|
|
}
|
|
|
|
// Figure out which sync path this base_dir belongs to so we can compute
|
|
// relative paths for the journal lookup.
|
|
let sync_path = paths.iter().find(|sp| {
|
|
base_dir.starts_with(&sp.local_dir) || PathBuf::from(&sp.local_dir) == base_dir
|
|
}).cloned();
|
|
|
|
// Fetch server tree once so we know which files are locked. If the
|
|
// server is unreachable we simply show no lock badges.
|
|
let locks: std::collections::HashMap<i64, (bool, Option<String>)> = if let Some(api) = api_opt {
|
|
match api.get_sync_tree().await {
|
|
Ok(tree) => {
|
|
let mut map = std::collections::HashMap::new();
|
|
collect_locks(&tree, &mut map);
|
|
map
|
|
}
|
|
Err(_) => std::collections::HashMap::new(),
|
|
}
|
|
} else {
|
|
std::collections::HashMap::new()
|
|
};
|
|
|
|
let mut entries = Vec::new();
|
|
let dir = std::fs::read_dir(&base_dir).map_err(|e| e.to_string())?;
|
|
|
|
for entry in dir.flatten() {
|
|
let name = entry.file_name().to_string_lossy().to_string();
|
|
let path = entry.path();
|
|
|
|
if name.starts_with('.') || name.starts_with('~') { continue; }
|
|
|
|
let is_folder = path.is_dir();
|
|
let is_cloud = name.ends_with(".cloud");
|
|
let size = std::fs::metadata(&path).map(|m| m.len() as i64).unwrap_or(0);
|
|
|
|
let mut cloud_size = None;
|
|
let mut display_name = name.clone();
|
|
let mut file_id: Option<i64> = None;
|
|
|
|
if is_cloud {
|
|
display_name = name.trim_end_matches(".cloud").to_string();
|
|
if let Ok(content) = std::fs::read_to_string(&path) {
|
|
if let Ok(json) = serde_json::from_str::<serde_json::Value>(&content) {
|
|
cloud_size = json.get("size").and_then(|v| v.as_i64());
|
|
file_id = json.get("id").and_then(|v| v.as_i64());
|
|
}
|
|
}
|
|
}
|
|
|
|
// For offline files / folders: look up file_id via journal
|
|
if file_id.is_none() && !is_folder {
|
|
if let Some(sp) = &sync_path {
|
|
if let Ok(rel) = path.strip_prefix(&sp.local_dir) {
|
|
let rel_str = rel.to_string_lossy().replace('\\', "/");
|
|
if let Some(je) = journal.get(&sp.id, &rel_str) {
|
|
file_id = je.file_id;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
let (locked, locked_by) = file_id
|
|
.and_then(|id| locks.get(&id))
|
|
.cloned()
|
|
.map(|(b, by)| (b, by))
|
|
.unwrap_or((false, None));
|
|
|
|
let is_offline = !is_cloud && !is_folder;
|
|
|
|
entries.push(LocalFileEntry {
|
|
name: display_name,
|
|
path: path.to_string_lossy().to_string(),
|
|
is_folder,
|
|
is_cloud,
|
|
is_offline,
|
|
size,
|
|
cloud_size,
|
|
file_id,
|
|
locked,
|
|
locked_by,
|
|
});
|
|
}
|
|
|
|
entries.sort_by(|a, b| {
|
|
b.is_folder.cmp(&a.is_folder).then(a.name.to_lowercase().cmp(&b.name.to_lowercase()))
|
|
});
|
|
|
|
Ok(entries)
|
|
}
|
|
|
|
// --- Offline-Markierung ---
|
|
|
|
#[tauri::command]
|
|
async fn mark_offline(state: State<'_, AppState>, cloud_path: String) -> Result<String, String> {
|
|
// Read .cloud placeholder, download real file, keep it local
|
|
let path = PathBuf::from(&cloud_path);
|
|
let content = std::fs::read_to_string(&path).map_err(|e| e.to_string())?;
|
|
let placeholder: serde_json::Value = serde_json::from_str(&content).map_err(|e| e.to_string())?;
|
|
let file_id = placeholder.get("id").and_then(|v| v.as_i64()).ok_or("Keine ID")?;
|
|
let file_name = placeholder.get("name").and_then(|v| v.as_str()).unwrap_or("file");
|
|
|
|
let api = state.api.lock().unwrap().clone().ok_or("Nicht eingeloggt")?;
|
|
let real_path = path.parent().unwrap().join(file_name);
|
|
|
|
// Download
|
|
api.download_file(file_id, &real_path).await?;
|
|
|
|
// Remove .cloud placeholder (real file stays permanently)
|
|
std::fs::remove_file(&path).ok();
|
|
|
|
Ok(format!("{} ist jetzt offline verfuegbar", file_name))
|
|
}
|
|
|
|
#[tauri::command]
|
|
fn unmark_offline(cloud_path: String) -> Result<String, String> {
|
|
// Convert real file back to .cloud placeholder
|
|
let path = PathBuf::from(&cloud_path);
|
|
if !path.exists() { return Err("Datei nicht gefunden".to_string()); }
|
|
|
|
let name = path.file_name().unwrap().to_string_lossy().to_string();
|
|
let size = std::fs::metadata(&path).map(|m| m.len() as i64).unwrap_or(0);
|
|
let checksum = sync::engine::compute_file_hash(&path);
|
|
|
|
let placeholder = serde_json::json!({
|
|
"id": 0, // will be updated on next sync
|
|
"name": name,
|
|
"size": size,
|
|
"checksum": checksum,
|
|
"updated_at": chrono::Utc::now().to_rfc3339(),
|
|
"server_path": "",
|
|
});
|
|
|
|
let cloud_path = path.parent().unwrap().join(format!("{}.cloud", name));
|
|
std::fs::write(&cloud_path, serde_json::to_string_pretty(&placeholder).unwrap()).ok();
|
|
std::fs::remove_file(&path).ok();
|
|
|
|
Ok(format!("{} ist nicht mehr offline", name))
|
|
}
|
|
|
|
// --- Background Threads ---
|
|
|
|
fn start_background_sync(
|
|
app: AppHandle,
|
|
sync_running: Arc<Mutex<bool>>,
|
|
api: MiniCloudApi,
|
|
journal: Arc<Journal>,
|
|
username: String,
|
|
) {
|
|
// Shared flag: watcher sets true when changes detected, sync thread checks it
|
|
let watcher_triggered = Arc::new(Mutex::new(false));
|
|
|
|
// Main sync thread: syncs on watcher trigger OR every 60s as fallback
|
|
let app_sync = app.clone();
|
|
let api_sync = api.clone();
|
|
let trigger_sync = watcher_triggered.clone();
|
|
|
|
let journal_sync = journal.clone();
|
|
let username_sync = username.clone();
|
|
std::thread::spawn(move || {
|
|
let rt = tokio::runtime::Runtime::new().unwrap();
|
|
let mut engine = SyncEngine::new(api_sync, journal_sync, username_sync);
|
|
let mut idle_counter = 0u32;
|
|
|
|
loop {
|
|
// Check every 2 seconds if watcher triggered
|
|
std::thread::sleep(Duration::from_secs(2));
|
|
idle_counter += 2;
|
|
|
|
let should_sync = {
|
|
let mut triggered = trigger_sync.lock().unwrap();
|
|
if *triggered {
|
|
*triggered = false;
|
|
true
|
|
} else {
|
|
// Fallback: sync every 30 seconds even without changes
|
|
idle_counter >= 30
|
|
}
|
|
};
|
|
|
|
if !should_sync { continue; }
|
|
idle_counter = 0;
|
|
|
|
// Re-read sync_paths from state every iteration so add/remove
|
|
// takes effect without restarting the thread.
|
|
let paths_now = {
|
|
let state = app_sync.state::<AppState>();
|
|
let p = state.sync_paths.lock().unwrap().clone();
|
|
p
|
|
};
|
|
if paths_now.is_empty() {
|
|
// Nothing to sync - idle quietly.
|
|
continue;
|
|
}
|
|
engine.sync_paths = paths_now;
|
|
|
|
// Run sync
|
|
*sync_running.lock().unwrap() = true;
|
|
let _ = app_sync.emit("sync-status", "syncing");
|
|
|
|
// Refresh engine's API token from state (token may have been refreshed)
|
|
let fresh_token: Option<String> = {
|
|
let state = app_sync.state::<AppState>();
|
|
let t = state.api.lock().unwrap().as_ref().map(|a| a.access_token.clone());
|
|
t
|
|
};
|
|
if let Some(t) = fresh_token {
|
|
engine.api.access_token = t;
|
|
}
|
|
|
|
match rt.block_on(engine.sync_all()) {
|
|
Ok(log) => {
|
|
if !log.is_empty() {
|
|
let _ = app_sync.emit("sync-log", log);
|
|
}
|
|
}
|
|
Err(e) => { let _ = app_sync.emit("sync-error", e); }
|
|
}
|
|
|
|
*sync_running.lock().unwrap() = false;
|
|
let _ = app_sync.emit("sync-status", "synced");
|
|
}
|
|
});
|
|
|
|
// Token refresh (every 10 min) + Heartbeat for locks (every 60s)
|
|
let app_hb = app.clone();
|
|
let api_hb = api.clone();
|
|
std::thread::spawn(move || {
|
|
let rt = tokio::runtime::Runtime::new().unwrap();
|
|
let mut api_mut = api_hb.clone();
|
|
let mut tick = 0u32;
|
|
|
|
loop {
|
|
std::thread::sleep(Duration::from_secs(10));
|
|
tick += 10;
|
|
let state = app_hb.state::<AppState>();
|
|
|
|
// Heartbeat every 60 seconds for locked files
|
|
if tick % 60 == 0 {
|
|
let locked = state.locked_files.lock().unwrap().clone();
|
|
for file_id in &locked {
|
|
let _ = rt.block_on(api_mut.heartbeat(*file_id));
|
|
}
|
|
}
|
|
|
|
// Token refresh every 10 minutes
|
|
if tick >= 600 {
|
|
tick = 0;
|
|
if let Ok(new_token) = rt.block_on(api_mut.refresh_token()) {
|
|
if let Some(ref mut api) = *state.api.lock().unwrap() {
|
|
api.access_token = new_token;
|
|
}
|
|
eprintln!("[Auth] Token refreshed");
|
|
}
|
|
}
|
|
}
|
|
});
|
|
|
|
// Server-Sent Events: real-time change notifications from server
|
|
let app_sse = app.clone();
|
|
let trigger_sse = watcher_triggered.clone();
|
|
std::thread::spawn(move || {
|
|
let rt = tokio::runtime::Runtime::new().unwrap();
|
|
loop {
|
|
let (server_url, token) = {
|
|
let state = app_sse.state::<AppState>();
|
|
let guard = state.api.lock().unwrap();
|
|
match guard.as_ref() {
|
|
Some(a) => (a.server_url.clone(), a.access_token.clone()),
|
|
None => { drop(guard); std::thread::sleep(Duration::from_secs(3)); continue; }
|
|
}
|
|
};
|
|
if token.is_empty() {
|
|
std::thread::sleep(Duration::from_secs(3));
|
|
continue;
|
|
}
|
|
|
|
let url = format!("{}/api/sync/events?token={}", server_url, token);
|
|
let trigger = trigger_sse.clone();
|
|
let app_cb = app_sse.clone();
|
|
|
|
let result: Result<(), String> = rt.block_on(async move {
|
|
let client = reqwest::Client::builder()
|
|
.connect_timeout(Duration::from_secs(10))
|
|
.build()
|
|
.map_err(|e| e.to_string())?;
|
|
let mut resp = client.get(&url).send().await.map_err(|e| e.to_string())?;
|
|
if !resp.status().is_success() {
|
|
return Err(format!("SSE status {}", resp.status()));
|
|
}
|
|
eprintln!("[SSE] Connected");
|
|
let _ = app_cb.emit("sse-status", "connected");
|
|
|
|
let mut buffer = String::new();
|
|
while let Some(chunk) = resp.chunk().await.map_err(|e| e.to_string())? {
|
|
buffer.push_str(&String::from_utf8_lossy(&chunk));
|
|
while let Some(pos) = buffer.find("\n\n") {
|
|
let raw = buffer[..pos].to_string();
|
|
buffer.drain(..pos + 2);
|
|
let lines: Vec<&str> = raw.lines().collect();
|
|
// Skip keepalive/comment lines (start with ':')
|
|
if lines.iter().all(|l| l.starts_with(':') || l.is_empty()) {
|
|
continue;
|
|
}
|
|
let mut event_name = String::from("message");
|
|
for l in &lines {
|
|
if let Some(v) = l.strip_prefix("event: ") { event_name = v.to_string(); }
|
|
}
|
|
if event_name == "hello" { continue; }
|
|
// Any real event -> trigger sync
|
|
*trigger.lock().unwrap() = true;
|
|
let _ = app_cb.emit("sse-event", event_name);
|
|
}
|
|
}
|
|
Ok(())
|
|
});
|
|
|
|
if let Err(e) = result {
|
|
eprintln!("[SSE] Disconnected: {}", e);
|
|
let _ = app_sse.emit("sse-status", format!("reconnecting: {}", e));
|
|
}
|
|
std::thread::sleep(Duration::from_secs(3));
|
|
}
|
|
});
|
|
|
|
// File watcher: detects changes and triggers immediate sync
|
|
let app_w = app.clone();
|
|
let trigger_w = watcher_triggered.clone();
|
|
std::thread::spawn(move || {
|
|
// Debounce: wait 3 seconds after last change before triggering sync
|
|
let mut last_change = std::time::Instant::now() - Duration::from_secs(100);
|
|
let mut pending = false;
|
|
|
|
loop {
|
|
std::thread::sleep(Duration::from_millis(500));
|
|
|
|
let state = app_w.state::<AppState>();
|
|
let watchers = state.watchers.lock().unwrap();
|
|
let mut _had_changes = false;
|
|
|
|
for watcher in watchers.iter() {
|
|
while let Ok(change) = watcher.receiver.try_recv() {
|
|
let name = change.path.file_name()
|
|
.and_then(|n| n.to_str()).unwrap_or("?");
|
|
|
|
// Skip .cloud files from triggering sync
|
|
if name.ends_with(".cloud") { continue; }
|
|
|
|
let msg = match change.kind {
|
|
ChangeKind::Created => format!("Neu: {}", name),
|
|
ChangeKind::Modified => format!("Geaendert: {}", name),
|
|
ChangeKind::Deleted => format!("Geloescht: {}", name),
|
|
};
|
|
let _ = app_w.emit("file-change", msg);
|
|
_had_changes = true;
|
|
last_change = std::time::Instant::now();
|
|
pending = true;
|
|
}
|
|
}
|
|
|
|
// Debounce: trigger sync 3 seconds after last change
|
|
if pending && last_change.elapsed() >= Duration::from_secs(3) {
|
|
*trigger_w.lock().unwrap() = true;
|
|
pending = false;
|
|
let _ = app_w.emit("file-change", "→ Sync ausgeloest".to_string());
|
|
}
|
|
}
|
|
});
|
|
}
|
|
|
|
// --- App Setup ---
|
|
|
|
/// Check if another instance is running. If yes, pass the .cloud file to it and exit.
|
|
/// Single instance per user. On terminal servers each user gets their own
|
|
/// instance because the lock file is in %APPDATA% (user-specific).
|
|
fn handle_single_instance() {
|
|
let config_dir = dirs::config_dir()
|
|
.unwrap_or_else(|| PathBuf::from("."))
|
|
.join("MiniCloud Sync");
|
|
std::fs::create_dir_all(&config_dir).ok();
|
|
|
|
let lock_file = config_dir.join("instance.lock");
|
|
let args: Vec<String> = std::env::args().collect();
|
|
|
|
// Check if another instance of THIS USER is running
|
|
let other_running = if lock_file.exists() {
|
|
if let Ok(pid_str) = std::fs::read_to_string(&lock_file) {
|
|
let pid = pid_str.trim().parse::<u32>().unwrap_or(0);
|
|
if pid > 0 && pid != std::process::id() {
|
|
// Check if that process is still alive
|
|
#[cfg(target_os = "windows")]
|
|
{
|
|
std::process::Command::new("tasklist")
|
|
.args(["/FI", &format!("PID eq {}", pid), "/NH"])
|
|
.output()
|
|
.map(|o| String::from_utf8_lossy(&o.stdout).contains(&pid.to_string()))
|
|
.unwrap_or(false)
|
|
}
|
|
#[cfg(not(target_os = "windows"))]
|
|
{
|
|
std::path::Path::new(&format!("/proc/{}", pid)).exists()
|
|
}
|
|
} else { false }
|
|
} else { false }
|
|
} else { false };
|
|
|
|
// If .cloud file argument and another instance runs -> delegate and exit
|
|
if args.len() > 1 && args[1].ends_with(".cloud") && other_running {
|
|
let request_file = config_dir.join("open_request.txt");
|
|
std::fs::write(&request_file, &args[1]).ok();
|
|
eprintln!("[SingleInstance] Delegated {} to running instance (PID in lock)", args[1]);
|
|
std::process::exit(0);
|
|
}
|
|
|
|
// If no .cloud argument but another instance runs -> just bring it to front and exit
|
|
if other_running && args.len() <= 1 {
|
|
eprintln!("[SingleInstance] Already running, exiting");
|
|
std::process::exit(0);
|
|
}
|
|
|
|
// We are the main instance - write our PID
|
|
std::fs::write(&lock_file, std::process::id().to_string()).ok();
|
|
}
|
|
|
|
|
|
|
|
#[cfg_attr(mobile, tauri::mobile_entry_point)]
|
|
pub fn run() {
|
|
handle_single_instance();
|
|
|
|
tauri::Builder::default()
|
|
.plugin(tauri_plugin_opener::init())
|
|
.plugin(tauri_plugin_notification::init())
|
|
.plugin(tauri_plugin_dialog::init())
|
|
.manage(AppState {
|
|
api: Mutex::new(None),
|
|
sync_engine: Mutex::new(None),
|
|
username: Mutex::new(None),
|
|
watchers: Mutex::new(Vec::new()),
|
|
sync_running: Arc::new(Mutex::new(false)),
|
|
locked_files: Mutex::new(Vec::new()),
|
|
sync_paths: Mutex::new(Vec::new()),
|
|
journal: Arc::new(Journal::open().expect("Journal konnte nicht geoeffnet werden")),
|
|
background_started: AtomicBool::new(false),
|
|
})
|
|
.on_window_event(|window, event| {
|
|
// Close button = minimize to tray instead of quit
|
|
if let tauri::WindowEvent::CloseRequested { api, .. } = event {
|
|
api.prevent_close();
|
|
let _ = window.hide();
|
|
}
|
|
})
|
|
.setup(|app| {
|
|
let quit = MenuItem::with_id(app, "quit", "Beenden", true, None::<&str>)?;
|
|
let show = MenuItem::with_id(app, "show", "Oeffnen", true, None::<&str>)?;
|
|
let sync_now = MenuItem::with_id(app, "sync", "Jetzt synchronisieren", true, None::<&str>)?;
|
|
let menu = Menu::with_items(app, &[&show, &sync_now, &quit])?;
|
|
|
|
// Use window icon for tray (set via tauri.conf.json)
|
|
let icon = app.default_window_icon().cloned();
|
|
|
|
// Watch for open requests from other instances
|
|
let app_req = app.handle().clone();
|
|
std::thread::spawn(move || {
|
|
let config_dir = dirs::config_dir()
|
|
.unwrap_or_else(|| PathBuf::from("."))
|
|
.join("MiniCloud Sync");
|
|
let request_file = config_dir.join("open_request.txt");
|
|
loop {
|
|
std::thread::sleep(Duration::from_secs(1));
|
|
if request_file.exists() {
|
|
if let Ok(path) = std::fs::read_to_string(&request_file) {
|
|
let path = path.trim().to_string();
|
|
if !path.is_empty() {
|
|
let _ = app_req.emit("open-cloud-file", path);
|
|
// Don't show window - user opens it via tray when needed
|
|
}
|
|
}
|
|
std::fs::remove_file(&request_file).ok();
|
|
}
|
|
}
|
|
});
|
|
|
|
// Start minimized if configured
|
|
let config = AppConfig::load();
|
|
if config.start_minimized {
|
|
if let Some(w) = app.get_webview_window("main") {
|
|
let _ = w.hide();
|
|
}
|
|
}
|
|
|
|
// Handle .cloud file opened via file association (double-click)
|
|
let args: Vec<String> = std::env::args().collect();
|
|
if args.len() > 1 {
|
|
let file_arg = &args[1];
|
|
if file_arg.ends_with(".cloud") {
|
|
let cloud_path = file_arg.to_string();
|
|
let app_handle = app.handle().clone();
|
|
// Open the .cloud file after app is ready
|
|
std::thread::spawn(move || {
|
|
std::thread::sleep(Duration::from_secs(2));
|
|
let _ = app_handle.emit("open-cloud-file", cloud_path);
|
|
});
|
|
}
|
|
}
|
|
|
|
let mut tray = TrayIconBuilder::new();
|
|
if let Some(ic) = icon { tray = tray.icon(ic); }
|
|
tray
|
|
.tooltip("Mini-Cloud Sync")
|
|
.menu(&menu)
|
|
.on_menu_event(|app, event| {
|
|
match event.id.as_ref() {
|
|
"quit" => std::process::exit(0),
|
|
"show" => {
|
|
if let Some(w) = app.get_webview_window("main") {
|
|
let _ = w.show();
|
|
let _ = w.set_focus();
|
|
}
|
|
}
|
|
"sync" => { let _ = app.emit("trigger-sync", ()); }
|
|
_ => {}
|
|
}
|
|
})
|
|
.on_tray_icon_event(|tray, event| {
|
|
// Double-click on tray icon = show window
|
|
if let tauri::tray::TrayIconEvent::DoubleClick { .. } = event {
|
|
if let Some(w) = tray.app_handle().get_webview_window("main") {
|
|
let _ = w.show();
|
|
let _ = w.set_focus();
|
|
}
|
|
}
|
|
})
|
|
.build(app)?;
|
|
|
|
Ok(())
|
|
})
|
|
.invoke_handler(tauri::generate_handler![
|
|
load_saved_config,
|
|
auto_login,
|
|
login,
|
|
set_start_minimized,
|
|
get_start_minimized,
|
|
add_sync_path,
|
|
remove_sync_path,
|
|
get_sync_paths,
|
|
toggle_sync_mode,
|
|
start_sync,
|
|
run_sync_now,
|
|
open_cloud_file,
|
|
open_offline_file,
|
|
get_file_tree,
|
|
get_status,
|
|
unlock_file_cmd,
|
|
lock_file_cmd,
|
|
browse_sync_folder,
|
|
mark_offline,
|
|
unmark_offline,
|
|
])
|
|
.run(tauri::generate_context!())
|
|
.expect("error while running tauri application");
|
|
}
|