From c6fe2c590f371340af0ae74610e169af00ea7d25 Mon Sep 17 00:00:00 2001 From: Stefan Hacker Date: Sat, 11 Apr 2026 17:55:39 +0200 Subject: [PATCH] feat: Backup & Restore mit Chunked Upload fuer grosse Dateien Backup: - Erstellt streaming ZIP mit SQLite-DB (via sqlite3.backup API) + allen hochgeladenen Dateien + metadata.json - Download als ZIP direkt aus dem Admin-Panel Restore: - Kleine Backups (<100MB): Direkter Upload - Grosse Backups (>100MB bis TB+): Chunked Upload in 10MB-Stuecken mit Fortschrittsanzeige - DB-Merge: INSERT OR REPLACE auf gemeinsame Spalten, so dass neue Schema-Aenderungen erhalten bleiben und Backup-Daten eingefuegt werden - Dateien werden in data/files/ wiederhergestellt - Restore-Anleitung direkt in der UI mit Hinweis auf SECRET_KEY/JWT_SECRET_KEY Backend: /admin/backup, /admin/restore/init, /admin/restore/chunk, /admin/restore/finalize, /admin/restore/direct Co-Authored-By: Claude Opus 4.6 (1M context) --- backend/app/api/__init__.py | 2 +- backend/app/api/backup.py | 355 +++++++++++++++++++++++++++++++ frontend/src/views/AdminView.vue | 210 ++++++++++++++++++ 3 files changed, 566 insertions(+), 1 deletion(-) create mode 100644 backend/app/api/backup.py diff --git a/backend/app/api/__init__.py b/backend/app/api/__init__.py index 2cfe196..644583b 100644 --- a/backend/app/api/__init__.py +++ b/backend/app/api/__init__.py @@ -2,4 +2,4 @@ from flask import Blueprint api_bp = Blueprint('api', __name__, url_prefix='/api') -from app.api import auth, users, files, calendar, contacts, email, office, passwords # noqa: E402, F401 +from app.api import auth, users, files, calendar, contacts, email, office, passwords, backup # noqa: E402, F401 diff --git a/backend/app/api/backup.py b/backend/app/api/backup.py new file mode 100644 index 0000000..f1367fe --- /dev/null +++ b/backend/app/api/backup.py @@ -0,0 +1,355 @@ +import io +import json +import os +import shutil +import sqlite3 +import tempfile +import uuid +import zipfile +from datetime import datetime, timezone +from pathlib import Path + +from flask import request, jsonify, current_app, Response + +from app.api import api_bp +from app.api.auth import admin_required +from app.extensions import db + + +# Store active chunked uploads in memory (upload_id -> metadata) +_active_uploads = {} + + +# --- Backup --- + +@api_bp.route('/admin/backup', methods=['POST']) +@admin_required +def create_backup(): + """Create a full backup as streaming ZIP download. + + Contains: + - metadata.json (version, timestamp, stats) + - database.sqlite3 (copy of the SQLite DB) + - files/ (all uploaded user files) + """ + db_uri = current_app.config['SQLALCHEMY_DATABASE_URI'] + db_path = db_uri.replace('sqlite:///', '') + upload_path = Path(current_app.config['UPLOAD_PATH']) + + # Gather stats + from app.models.user import User + from app.models.file import File + user_count = User.query.count() + file_count = File.query.filter_by(is_folder=False).count() + + metadata = { + 'version': '1.0', + 'created_at': datetime.now(timezone.utc).isoformat(), + 'user_count': user_count, + 'file_count': file_count, + 'description': 'Mini-Cloud Full Backup', + } + + def generate_zip(): + """Stream ZIP file in chunks.""" + # We create the ZIP in a temp file to handle large data + with tempfile.NamedTemporaryFile(delete=False, suffix='.zip') as tmp: + tmp_path = tmp.name + + try: + with zipfile.ZipFile(tmp_path, 'w', zipfile.ZIP_DEFLATED, + allowZip64=True) as zf: + # 1. Metadata + zf.writestr('metadata.json', json.dumps(metadata, indent=2)) + + # 2. SQLite database (safe copy via backup API) + db_backup_path = tmp_path + '.db' + try: + source = sqlite3.connect(db_path) + dest = sqlite3.connect(db_backup_path) + source.backup(dest) + source.close() + dest.close() + zf.write(db_backup_path, 'database.sqlite3') + finally: + if os.path.exists(db_backup_path): + os.unlink(db_backup_path) + + # 3. User files + if upload_path.exists(): + for file_path in upload_path.rglob('*'): + if file_path.is_file(): + arcname = 'files/' + str(file_path.relative_to(upload_path)) + zf.write(str(file_path), arcname) + + # Stream the ZIP file in 1MB chunks + with open(tmp_path, 'rb') as f: + while True: + chunk = f.read(1024 * 1024) + if not chunk: + break + yield chunk + finally: + if os.path.exists(tmp_path): + os.unlink(tmp_path) + + timestamp = datetime.now().strftime('%Y%m%d_%H%M%S') + filename = f'minicloud_backup_{timestamp}.zip' + + return Response( + generate_zip(), + mimetype='application/zip', + headers={ + 'Content-Disposition': f'attachment; filename="{filename}"', + 'X-Accel-Buffering': 'no', + }, + ) + + +# --- Chunked Restore Upload --- + +@api_bp.route('/admin/restore/init', methods=['POST']) +@admin_required +def restore_init(): + """Initialize a chunked restore upload. + + Returns an upload_id to use for subsequent chunk uploads. + """ + data = request.get_json() or {} + total_size = data.get('total_size', 0) + total_chunks = data.get('total_chunks', 0) + filename = data.get('filename', 'backup.zip') + + upload_id = str(uuid.uuid4()) + upload_dir = Path(tempfile.gettempdir()) / f'minicloud_restore_{upload_id}' + upload_dir.mkdir(parents=True, exist_ok=True) + + _active_uploads[upload_id] = { + 'dir': str(upload_dir), + 'total_size': total_size, + 'total_chunks': total_chunks, + 'received_chunks': set(), + 'filename': filename, + 'created_at': datetime.now(timezone.utc).isoformat(), + } + + return jsonify({ + 'upload_id': upload_id, + 'chunk_size': 10 * 1024 * 1024, # 10 MB recommended chunk size + }), 200 + + +@api_bp.route('/admin/restore/chunk', methods=['POST']) +@admin_required +def restore_chunk(): + """Upload a single chunk of the backup file.""" + upload_id = request.form.get('upload_id', '') + chunk_number = int(request.form.get('chunk_number', 0)) + + if upload_id not in _active_uploads: + return jsonify({'error': 'Upload-ID unbekannt. Bitte neu starten.'}), 404 + + if 'chunk' not in request.files: + return jsonify({'error': 'Kein Chunk gesendet'}), 400 + + upload_info = _active_uploads[upload_id] + upload_dir = Path(upload_info['dir']) + + chunk_file = request.files['chunk'] + chunk_path = upload_dir / f'chunk_{chunk_number:06d}' + chunk_file.save(str(chunk_path)) + + upload_info['received_chunks'].add(chunk_number) + + return jsonify({ + 'chunk_number': chunk_number, + 'received': len(upload_info['received_chunks']), + 'total': upload_info['total_chunks'], + }), 200 + + +@api_bp.route('/admin/restore/finalize', methods=['POST']) +@admin_required +def restore_finalize(): + """Assemble chunks and perform the restore.""" + data = request.get_json() or {} + upload_id = data.get('upload_id', '') + + if upload_id not in _active_uploads: + return jsonify({'error': 'Upload-ID unbekannt'}), 404 + + upload_info = _active_uploads[upload_id] + upload_dir = Path(upload_info['dir']) + + try: + # Assemble chunks into ZIP + zip_path = upload_dir / 'backup.zip' + with open(str(zip_path), 'wb') as outfile: + chunk_num = 0 + while True: + chunk_path = upload_dir / f'chunk_{chunk_num:06d}' + if not chunk_path.exists(): + break + with open(str(chunk_path), 'rb') as cf: + shutil.copyfileobj(cf, outfile) + chunk_num += 1 + + if chunk_num == 0: + return jsonify({'error': 'Keine Chunks gefunden'}), 400 + + # Verify it's a valid ZIP + if not zipfile.is_zipfile(str(zip_path)): + return jsonify({'error': 'Ungueltige ZIP-Datei'}), 400 + + # Perform restore + result = _perform_restore(str(zip_path)) + + return jsonify(result), 200 + + except Exception as e: + return jsonify({'error': f'Restore fehlgeschlagen: {str(e)}'}), 500 + + finally: + # Cleanup + shutil.rmtree(str(upload_dir), ignore_errors=True) + _active_uploads.pop(upload_id, None) + + +@api_bp.route('/admin/restore/direct', methods=['POST']) +@admin_required +def restore_direct(): + """Direct restore from a small backup file (non-chunked, for files < 500MB).""" + if 'file' not in request.files: + return jsonify({'error': 'Keine Datei gesendet'}), 400 + + backup_file = request.files['file'] + + with tempfile.NamedTemporaryFile(delete=False, suffix='.zip') as tmp: + backup_file.save(tmp.name) + tmp_path = tmp.name + + try: + if not zipfile.is_zipfile(tmp_path): + return jsonify({'error': 'Ungueltige ZIP-Datei'}), 400 + + result = _perform_restore(tmp_path) + return jsonify(result), 200 + + except Exception as e: + return jsonify({'error': f'Restore fehlgeschlagen: {str(e)}'}), 500 + finally: + os.unlink(tmp_path) + + +def _perform_restore(zip_path): + """Perform the actual restore from a validated ZIP file. + + Strategy for DB merge: + - Open backup SQLite as a separate connection + - For each table in the backup, read all rows + - INSERT OR REPLACE into the live database + - This preserves any new tables/columns in the current schema + - Existing data with same primary keys gets overwritten by backup data + """ + upload_path = Path(current_app.config['UPLOAD_PATH']) + stats = {'users': 0, 'files_db': 0, 'files_disk': 0, 'tables': []} + + with tempfile.TemporaryDirectory() as extract_dir: + extract_path = Path(extract_dir) + + # Extract ZIP + with zipfile.ZipFile(zip_path, 'r') as zf: + zf.extractall(str(extract_path)) + + # Read metadata + metadata_path = extract_path / 'metadata.json' + metadata = {} + if metadata_path.exists(): + metadata = json.loads(metadata_path.read_text()) + stats['backup_date'] = metadata.get('created_at', 'Unbekannt') + stats['backup_users'] = metadata.get('user_count', '?') + stats['backup_files'] = metadata.get('file_count', '?') + + # Restore database via merge + backup_db_path = extract_path / 'database.sqlite3' + if backup_db_path.exists(): + live_db_uri = current_app.config['SQLALCHEMY_DATABASE_URI'] + live_db_path = live_db_uri.replace('sqlite:///', '') + + backup_conn = sqlite3.connect(str(backup_db_path)) + backup_conn.row_factory = sqlite3.Row + live_conn = sqlite3.connect(live_db_path) + + try: + # Get list of tables in backup + backup_tables = [row[0] for row in + backup_conn.execute( + "SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'sqlite_%'" + ).fetchall()] + + # Get list of tables in live DB + live_tables = [row[0] for row in + live_conn.execute( + "SELECT name FROM sqlite_master WHERE type='table' AND name NOT LIKE 'sqlite_%'" + ).fetchall()] + + for table in backup_tables: + if table == 'alembic_version': + continue + if table not in live_tables: + continue + + # Get column names from live table + live_cols = [col[1] for col in + live_conn.execute(f'PRAGMA table_info("{table}")').fetchall()] + backup_cols = [col[1] for col in + backup_conn.execute(f'PRAGMA table_info("{table}")').fetchall()] + + # Use only columns that exist in both + common_cols = [c for c in backup_cols if c in live_cols] + if not common_cols: + continue + + cols_str = ', '.join(f'"{c}"' for c in common_cols) + placeholders = ', '.join('?' for _ in common_cols) + + rows = backup_conn.execute( + f'SELECT {cols_str} FROM "{table}"' + ).fetchall() + + row_count = 0 + for row in rows: + try: + live_conn.execute( + f'INSERT OR REPLACE INTO "{table}" ({cols_str}) VALUES ({placeholders})', + tuple(row) + ) + row_count += 1 + except Exception: + continue + + if row_count > 0: + stats['tables'].append({'name': table, 'rows': row_count}) + + live_conn.commit() + finally: + backup_conn.close() + live_conn.close() + + # Restore files + backup_files_dir = extract_path / 'files' + if backup_files_dir.exists(): + upload_path.mkdir(parents=True, exist_ok=True) + file_count = 0 + for src_file in backup_files_dir.rglob('*'): + if src_file.is_file(): + rel_path = src_file.relative_to(backup_files_dir) + dest = upload_path / rel_path + dest.parent.mkdir(parents=True, exist_ok=True) + shutil.copy2(str(src_file), str(dest)) + file_count += 1 + stats['files_disk'] = file_count + + stats['success'] = True + stats['message'] = 'Restore erfolgreich abgeschlossen' + return stats diff --git a/frontend/src/views/AdminView.vue b/frontend/src/views/AdminView.vue index 0add4a0..7bfb89c 100644 --- a/frontend/src/views/AdminView.vue +++ b/frontend/src/views/AdminView.vue @@ -75,6 +75,70 @@ + +
+

Backup & Restore

+ +
+ +
+

Backup erstellen

+

Erstellt eine ZIP-Datei mit der kompletten Datenbank und allen hochgeladenen Dateien.

+
+ + +
+

Restore

+ +
+ Anleitung: +
    +
  1. Neue Mini-Cloud-Instanz aufsetzen (Docker oder manuell)
  2. +
  3. Admin-Benutzer registrieren
  4. +
  5. Wichtig: In der .env muessen SECRET_KEY und JWT_SECRET_KEY identisch zur alten Instanz sein, sonst koennen verschluesselte Daten (E-Mail-Passwoerter, Passwort-Manager) nicht entschluesselt werden!
  6. +
  7. Backup-ZIP hier hochladen
  8. +
  9. Alle Benutzer, Dateien, Kalender, Kontakte und Einstellungen werden wiederhergestellt
  10. +
+
+ + + Die SECRET_KEY und JWT_SECRET_KEY in der .env muessen mit dem Backup uebereinstimmen! + + +
+ + +
+ +
+

Datei: {{ restoreFile.name }} ({{ formatSize(restoreFile.size) }})

+
+ +
+

Restore laeuft...

+ +

{{ restoreStatus }}

+
+ +
+ + {{ restoreResult.message }} + +
+ Wiederhergestellte Tabellen: +
    +
  • {{ t.name }}: {{ t.rows }} Eintraege
  • +
+

Dateien auf Festplatte: {{ restoreResult.files_disk }}

+

Backup vom: {{ restoreResult.backup_date }}

+
+
+
+
+
+
@@ -257,6 +321,7 @@ import InputSwitch from 'primevue/inputswitch' import Message from 'primevue/message' import TabView from 'primevue/tabview' import TabPanel from 'primevue/tabpanel' +import ProgressBar from 'primevue/progressbar' const toast = useToast() const auth = useAuthStore() @@ -278,6 +343,16 @@ const smtpForm = ref({ const smtpPasswordSet = ref(false) const smtpTesting = ref(false) +// Backup & Restore +const backupLoading = ref(false) +const restoreFileInput = ref(null) +const restoreFile = ref(null) +const restoreInProgress = ref(false) +const restoreProgress = ref(0) +const restoreStatus = ref('') +const restoreResult = ref(null) +const CHUNK_SIZE = 10 * 1024 * 1024 // 10 MB + const showUserDialog = ref(false) const editingUser = ref(null) const userForm = ref({ username: '', email: '', password: '', role: 'user', storage_quota_mb: 5120, is_active: true }) @@ -343,6 +418,124 @@ async function saveSettings() { } } +// --- Backup & Restore --- +function formatSize(bytes) { + if (!bytes) return '0 B' + const units = ['B', 'KB', 'MB', 'GB', 'TB'] + let i = 0; let size = bytes + while (size >= 1024 && i < units.length - 1) { size /= 1024; i++ } + return `${size.toFixed(i > 0 ? 1 : 0)} ${units[i]}` +} + +async function createBackup() { + backupLoading.value = true + try { + const response = await apiClient.post('/admin/backup', {}, { responseType: 'blob' }) + const url = URL.createObjectURL(response.data) + const a = document.createElement('a') + a.href = url + const disposition = response.headers['content-disposition'] || '' + const match = disposition.match(/filename="?(.+?)"?$/) + a.download = match ? match[1] : `minicloud_backup_${new Date().toISOString().slice(0,10)}.zip` + a.click() + URL.revokeObjectURL(url) + toast.add({ severity: 'success', summary: 'Backup heruntergeladen', life: 3000 }) + } catch (err) { + toast.add({ severity: 'error', summary: 'Backup fehlgeschlagen', detail: err.response?.data?.error, life: 5000 }) + } finally { + backupLoading.value = false + } +} + +function onRestoreFileSelected(event) { + restoreFile.value = event.target.files[0] || null + restoreResult.value = null +} + +async function startRestore() { + if (!restoreFile.value) return + + restoreInProgress.value = true + restoreProgress.value = 0 + restoreStatus.value = 'Starte Upload...' + restoreResult.value = null + + const file = restoreFile.value + const totalChunks = Math.ceil(file.size / CHUNK_SIZE) + + try { + if (file.size <= 100 * 1024 * 1024) { + // Small file: direct upload + restoreStatus.value = 'Lade Datei hoch...' + restoreProgress.value = 50 + const formData = new FormData() + formData.append('file', file) + const res = await apiClient.post('/admin/restore/direct', formData, { + headers: { 'Content-Type': 'multipart/form-data' }, + timeout: 600000, + }) + restoreProgress.value = 100 + restoreResult.value = res.data + } else { + // Large file: chunked upload + // 1. Init + restoreStatus.value = 'Initialisiere Upload...' + const initRes = await apiClient.post('/admin/restore/init', { + total_size: file.size, + total_chunks: totalChunks, + filename: file.name, + }) + const uploadId = initRes.data.upload_id + + // 2. Upload chunks + for (let i = 0; i < totalChunks; i++) { + const start = i * CHUNK_SIZE + const end = Math.min(start + CHUNK_SIZE, file.size) + const chunk = file.slice(start, end) + + const formData = new FormData() + formData.append('upload_id', uploadId) + formData.append('chunk_number', i.toString()) + formData.append('chunk', chunk) + + await apiClient.post('/admin/restore/chunk', formData, { + headers: { 'Content-Type': 'multipart/form-data' }, + timeout: 120000, + }) + + restoreProgress.value = Math.round(((i + 1) / totalChunks) * 80) + restoreStatus.value = `Chunk ${i + 1} / ${totalChunks} hochgeladen (${formatSize(end)} / ${formatSize(file.size)})` + } + + // 3. Finalize + restoreStatus.value = 'Stelle Daten wieder her...' + restoreProgress.value = 85 + const finalRes = await apiClient.post('/admin/restore/finalize', { + upload_id: uploadId, + }, { timeout: 600000 }) + + restoreProgress.value = 100 + restoreResult.value = finalRes.data + } + + restoreStatus.value = 'Fertig!' + if (restoreResult.value?.success) { + toast.add({ severity: 'success', summary: 'Restore erfolgreich', life: 5000 }) + await loadUsers() + } + } catch (err) { + restoreResult.value = { + success: false, + message: err.response?.data?.error || 'Restore fehlgeschlagen: ' + String(err), + } + toast.add({ severity: 'error', summary: 'Restore fehlgeschlagen', life: 5000 }) + } finally { + restoreInProgress.value = false + restoreFile.value = null + if (restoreFileInput.value) restoreFileInput.value.value = '' + } +} + // --- Invite links --- async function createInvite() { inviteLoading.value = true @@ -567,4 +760,21 @@ onMounted(() => { .acc-actions { display: flex; } .empty-hint-small { padding: 1rem; color: var(--p-text-muted-color); font-size: 0.875rem; text-align: center; } .section-title { margin: 1rem 0 0.5rem; font-size: 0.95rem; font-weight: 600; } +.backup-grid { display: grid; grid-template-columns: 1fr 1fr; gap: 1.5rem; } +@media (max-width: 900px) { .backup-grid { grid-template-columns: 1fr; } } +.backup-card { border: 1px solid var(--p-surface-200); border-radius: 8px; padding: 1.25rem; } +.backup-card h4 { margin: 0 0 0.75rem; display: flex; align-items: center; gap: 0.5rem; } +.backup-card p { font-size: 0.875rem; color: var(--p-text-muted-color); margin: 0 0 1rem; } +.restore-instructions { background: var(--p-surface-50); border-radius: 6px; padding: 1rem; margin-bottom: 1rem; font-size: 0.85rem; } +.restore-instructions ol { margin: 0.5rem 0 0; padding-left: 1.25rem; } +.restore-instructions li { margin-bottom: 0.375rem; line-height: 1.4; } +.restore-instructions code { background: var(--p-surface-200); padding: 0.125rem 0.375rem; border-radius: 3px; font-size: 0.8rem; } +.restore-info { margin-top: 1rem; } +.restore-info p { margin-bottom: 0.75rem; } +.restore-progress { margin-top: 1rem; } +.restore-progress p { margin: 0.5rem 0; } +.progress-text { font-size: 0.825rem; color: var(--p-text-muted-color); } +.restore-result { margin-top: 1rem; } +.result-details { font-size: 0.85rem; margin-top: 0.5rem; } +.result-details ul { margin: 0.25rem 0; padding-left: 1.25rem; }