From 0770259d3d8c348a4ae5dbfb8aa4fb920efc52bf Mon Sep 17 00:00:00 2001 From: Stefan Hacker Date: Thu, 16 Apr 2026 11:00:51 +0200 Subject: [PATCH] Add file upload portal with per-customer links and WebDAV admin access - Customer upload via token link (no login), optional password + expiry, drag & drop for files and folders with preserved structure - Admin portal with setup wizard, role-based users (admin/staff), per-customer WebDAV access rules (read/write), session auth - WebDAV container (Debian apache2) with htpasswd + access.conf auto-generated from the SQLite DB and reloaded via inotifywait - Configurable public base URL and janitor cron interval in admin UI; janitor reconciles the uploads table with the filesystem Co-Authored-By: Claude Opus 4.6 (1M context) --- .env.example | 3 + Dockerfile | 27 +++ README.md | 73 +++++++ docker-compose.yml | 32 +++ package.json | 17 ++ public/admin/index.html | 452 ++++++++++++++++++++++++++++++++++++++++ public/upload.html | 191 +++++++++++++++++ src/auth.js | 134 ++++++++++++ src/db.js | 65 ++++++ src/janitor.js | 90 ++++++++ src/server.js | 436 ++++++++++++++++++++++++++++++++++++++ src/settings.js | 37 ++++ src/webdav-config.js | 84 ++++++++ webdav/Dockerfile | 28 +++ webdav/entrypoint.sh | 32 +++ webdav/webdav.conf | 32 +++ 16 files changed, 1733 insertions(+) create mode 100644 .env.example create mode 100644 Dockerfile create mode 100644 README.md create mode 100644 docker-compose.yml create mode 100644 package.json create mode 100644 public/admin/index.html create mode 100644 public/upload.html create mode 100644 src/auth.js create mode 100644 src/db.js create mode 100644 src/janitor.js create mode 100644 src/server.js create mode 100644 src/settings.js create mode 100644 src/webdav-config.js create mode 100644 webdav/Dockerfile create mode 100644 webdav/entrypoint.sh create mode 100644 webdav/webdav.conf diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..8401e69 --- /dev/null +++ b/.env.example @@ -0,0 +1,3 @@ +# Ports, auf denen die Services vom Host erreichbar sind. +APP_PORT=3500 +WEBDAV_PORT=1900 diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..e6a04a3 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,27 @@ +FROM node:20-bookworm-slim + +WORKDIR /app + +RUN apt-get update && apt-get install -y --no-install-recommends \ + python3 make g++ \ + && rm -rf /var/lib/apt/lists/* + +COPY package.json ./ +RUN npm install --omit=dev + +COPY src ./src +COPY public ./public + +# Prepare mount points with UID 1000 ownership so named volumes inherit it. +RUN mkdir -p /data/db /data/uploads /webdav-config \ + && chown -R 1000:1000 /data /webdav-config /app + +ENV NODE_ENV=production \ + PORT=3000 \ + UPLOAD_ROOT=/data/uploads \ + DB_PATH=/data/db/app.db \ + WEBDAV_CONFIG_DIR=/webdav-config + +EXPOSE 3000 + +CMD ["node", "src/server.js"] diff --git a/README.md b/README.md new file mode 100644 index 0000000..36e6f5b --- /dev/null +++ b/README.md @@ -0,0 +1,73 @@ +# Simple File Upload + +Kunden laden Dateien/Ordner über einen individuellen Link hoch (ohne Login). +Admin + Sachbearbeiter verwalten Kunden im Adminportal und greifen per WebDAV zu. + +## Start + +```bash +cp .env.example .env # optional: Ports anpassen +docker compose up -d --build +``` + +- Adminportal: +- WebDAV: `webdav://HOST:1900/` (Basic Auth, dieselben Benutzer wie im Adminportal) + +**Beim ersten Start** öffnet sich ein **Setup-Wizard** — dort legst du den ersten Admin an. + +## Rollen + +- **Admin**: legt Kunden + weitere Benutzer (Admins / Sachbearbeiter) an, verwaltet Upload-Links und pro-Kunde-Zugriffe. Hat WebDAV-Vollzugriff auf alles. +- **Sachbearbeiter (Staff)**: sieht im Adminportal nur die Kunden, auf die ihm der Admin Zugriff erteilt hat. Per WebDAV greift er auf die zugeteilten Kundenordner zu (`read` oder `write`). +- **Kunde**: kein Login; Upload via individuellem Token-Link (optional mit Passwort + Ablaufdatum). Sieht keine Dateiliste. + +## Konfiguration + +### `.env` (Ports) + +| Variable | Default | Zweck | +|---------------|---------|--------------------------------------| +| `APP_PORT` | `3500` | Host-Port für Adminportal + Upload | +| `WEBDAV_PORT` | `1900` | Host-Port für WebDAV | + +### Admin-GUI → Einstellungen + +- **Öffentliche Basis-URL**: wird in generierten Upload-Links verwendet. Leer = aus Request ableiten. +- **Cron-Intervall (Minuten)**: periodischer DB/FS-Abgleich (entfernt verwaiste DB-Einträge von via WebDAV gelöschten Dateien und erfasst direkt per WebDAV hochgeladene Dateien). + +### Volumes + +- `./data/db/` → SQLite-Datei neben der `docker-compose.yml` +- `./data/uploads/` → ein Unterordner pro Kunde (Slug) +- Named Volume `webdav-config` → dynamisch generierte Apache-Config + +Beide Container laufen als UID `1000:1000`. Falls vorhandene Daten root gehören: + +```bash +sudo chown -R 1000:1000 data/ +``` + +## Wie die WebDAV-ACLs funktionieren + +Der App-Container erzeugt bei jeder Benutzer-/Kundenänderung: + +- `/webdav-config/htpasswd` — alle Benutzer (bcrypt-Hashes direkt aus DB) +- `/webdav-config/access.conf` — pro Kundenordner ein ``-Block: + - gleiche Read- und Write-User → ein `Require user …` + - unterschiedliche → `` + `` für saubere Trennung + +Der WebDAV-Container (Debian Apache) beobachtet das Verzeichnis via `inotifywait` und ruft `apachectl graceful` auf → Änderungen sind in ~2 Sek. wirksam. + +## Kunden-Upload + +- Datei-Button, Ordner-Button oder Drag & Drop (Dateien **und** Ordner — Struktur bleibt erhalten). +- Optional Passwortabfrage, optional Ablaufdatum. +- Kunde sieht keine Dateiliste, nur eigenes Upload-Feedback. + +## WebDAV-Zugriff + +- macOS Finder: `Gehe zu → Mit Server verbinden → http://HOST:1900/` +- Windows: Netzlaufwerk hinzufügen → `http://HOST:1900/` +- Linux / KDE Dolphin: `webdav://@HOST:1900/` +- Write-Rechte umfassen: `PUT`, `DELETE`, `MKCOL`, `MOVE`, `COPY`, `PROPPATCH`, `LOCK`, `UNLOCK`. +- In Dolphin löscht **Shift+Entf** direkt (umgeht den nicht existierenden WebDAV-Papierkorb). diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..a18b4d4 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,32 @@ +services: + app: + build: . + container_name: simple-file-upload + restart: unless-stopped + user: "1000:1000" + ports: + - "${APP_PORT:-3500}:3000" # Web (Upload + Admin) + environment: + DB_PATH: /data/db/app.db + UPLOAD_ROOT: /data/uploads + WEBDAV_CONFIG_DIR: /webdav-config + # PUBLIC_BASE_URL can be set via the admin Einstellungen tab instead. + volumes: + - ./data/db:/data/db # SQLite DB liegt hier neben docker-compose.yml + - ./data/uploads:/data/uploads # Kunden-Uploads + - webdav-config:/webdav-config # dynamisch generierte htpasswd + access.conf + + webdav: + build: ./webdav + container_name: simple-file-upload-webdav + restart: unless-stopped + ports: + - "${WEBDAV_PORT:-1900}:80" # WebDAV — Login via DB-Benutzer + environment: + WEBDAV_CONFIG_DIR: /webdav-config + volumes: + - ./data/uploads:/data/uploads + - webdav-config:/webdav-config + +volumes: + webdav-config: diff --git a/package.json b/package.json new file mode 100644 index 0000000..c0392e0 --- /dev/null +++ b/package.json @@ -0,0 +1,17 @@ +{ + "name": "simple-file-upload", + "version": "1.0.0", + "description": "Customer file upload portal with admin and WebDAV", + "main": "src/server.js", + "scripts": { + "start": "node src/server.js" + }, + "dependencies": { + "bcrypt": "^5.1.1", + "better-sqlite3": "^11.3.0", + "express": "^4.21.0", + "express-basic-auth": "^1.2.1", + "multer": "^1.4.5-lts.1", + "nanoid": "^3.3.7" + } +} diff --git a/public/admin/index.html b/public/admin/index.html new file mode 100644 index 0000000..1f97555 --- /dev/null +++ b/public/admin/index.html @@ -0,0 +1,452 @@ + + + + + +Adminportal + + + + + + + + + + + + + + + + + + + + + + diff --git a/public/upload.html b/public/upload.html new file mode 100644 index 0000000..a1f6a18 --- /dev/null +++ b/public/upload.html @@ -0,0 +1,191 @@ + + + + + +Datei-Upload + + + +

Datei-Upload

+
+ +
+

Dieser Link ist passwortgeschützt.

+ + + +
+ + + + + + diff --git a/src/auth.js b/src/auth.js new file mode 100644 index 0000000..f86901f --- /dev/null +++ b/src/auth.js @@ -0,0 +1,134 @@ +const bcrypt = require('bcrypt'); +const { nanoid } = require('nanoid'); +const db = require('./db'); + +const SESSION_TTL_MS = 30 * 24 * 3600 * 1000; // 30 days +const COOKIE_NAME = 'sfu_session'; +const USERNAME_RE = /^[a-z0-9._-]{2,32}$/i; + +function parseCookies(req) { + const raw = req.headers.cookie || ''; + const out = {}; + raw.split(';').forEach(part => { + const idx = part.indexOf('='); + if (idx < 0) return; + const k = part.slice(0, idx).trim(); + const v = part.slice(idx + 1).trim(); + if (k) { + try { out[k] = decodeURIComponent(v); } catch { out[k] = v; } + } + }); + return out; +} + +function hasAnyUser() { + return !!db.prepare('SELECT 1 FROM users LIMIT 1').get(); +} + +function validateUsername(u) { + return typeof u === 'string' && USERNAME_RE.test(u); +} + +function validatePassword(p) { + return typeof p === 'string' && p.length >= 6; +} + +async function createUser(username, password, role = 'staff') { + if (!validateUsername(username)) throw new Error('invalid username'); + if (!validatePassword(password)) throw new Error('password too short'); + if (role !== 'admin' && role !== 'staff') throw new Error('invalid role'); + const hash = await bcrypt.hash(password, 10); + const info = db.prepare( + 'INSERT INTO users (username, password_hash, role, created_at) VALUES (?, ?, ?, ?)' + ).run(username.toLowerCase(), hash, role, Date.now()); + return info.lastInsertRowid; +} + +async function setUserPassword(id, password) { + if (!validatePassword(password)) throw new Error('password too short'); + const hash = await bcrypt.hash(password, 10); + db.prepare('UPDATE users SET password_hash = ? WHERE id = ?').run(hash, id); +} + +async function verifyCredentials(username, password) { + const u = db.prepare('SELECT * FROM users WHERE username = ?').get((username || '').toLowerCase()); + if (!u) return null; + const ok = await bcrypt.compare(password || '', u.password_hash); + return ok ? u : null; +} + +function createSession(user_id) { + const token = nanoid(32); + const now = Date.now(); + db.prepare( + 'INSERT INTO sessions (token, user_id, created_at, expires_at) VALUES (?, ?, ?, ?)' + ).run(token, user_id, now, now + SESSION_TTL_MS); + return { token, expires_at: now + SESSION_TTL_MS }; +} + +function getSessionUser(req) { + const token = parseCookies(req)[COOKIE_NAME]; + if (!token) return null; + const row = db.prepare(` + SELECT u.* FROM sessions s + JOIN users u ON u.id = s.user_id + WHERE s.token = ? AND s.expires_at > ? + `).get(token, Date.now()); + return row || null; +} + +function deleteSession(token) { + if (token) db.prepare('DELETE FROM sessions WHERE token = ?').run(token); +} + +function setSessionCookie(res, token) { + res.cookie(COOKIE_NAME, token, { + httpOnly: true, + sameSite: 'lax', + maxAge: SESSION_TTL_MS, + path: '/', + }); +} + +function clearSessionCookie(res) { + res.clearCookie(COOKIE_NAME, { path: '/' }); +} + +function requireAuth(req, res, next) { + const u = getSessionUser(req); + if (!u) return res.status(401).json({ error: 'unauthorized' }); + req.user = u; + next(); +} + +function requireAdmin(req, res, next) { + const u = getSessionUser(req); + if (!u) return res.status(401).json({ error: 'unauthorized' }); + if (u.role !== 'admin') return res.status(403).json({ error: 'admin only' }); + req.user = u; + next(); +} + +function cleanupExpiredSessions() { + db.prepare('DELETE FROM sessions WHERE expires_at < ?').run(Date.now()); +} + +module.exports = { + COOKIE_NAME, + SESSION_TTL_MS, + parseCookies, + hasAnyUser, + validateUsername, + validatePassword, + createUser, + setUserPassword, + verifyCredentials, + createSession, + getSessionUser, + deleteSession, + setSessionCookie, + clearSessionCookie, + requireAuth, + requireAdmin, + cleanupExpiredSessions, +}; diff --git a/src/db.js b/src/db.js new file mode 100644 index 0000000..8b9178b --- /dev/null +++ b/src/db.js @@ -0,0 +1,65 @@ +const Database = require('better-sqlite3'); +const path = require('path'); +const fs = require('fs'); + +const DB_PATH = process.env.DB_PATH || '/data/db/app.db'; + +fs.mkdirSync(path.dirname(DB_PATH), { recursive: true }); + +const db = new Database(DB_PATH); +db.pragma('journal_mode = WAL'); +db.pragma('foreign_keys = ON'); + +db.exec(` + CREATE TABLE IF NOT EXISTS customers ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + name TEXT NOT NULL, + slug TEXT NOT NULL UNIQUE, + token TEXT NOT NULL UNIQUE, + password_hash TEXT, + expires_at INTEGER, + created_at INTEGER NOT NULL + ); + + CREATE TABLE IF NOT EXISTS uploads ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + customer_id INTEGER NOT NULL, + filename TEXT NOT NULL, + relative_path TEXT NOT NULL, + size INTEGER NOT NULL, + uploaded_at INTEGER NOT NULL, + FOREIGN KEY(customer_id) REFERENCES customers(id) ON DELETE CASCADE + ); + + CREATE TABLE IF NOT EXISTS users ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + username TEXT NOT NULL UNIQUE, + password_hash TEXT NOT NULL, + role TEXT NOT NULL DEFAULT 'staff', + created_at INTEGER NOT NULL + ); + + CREATE TABLE IF NOT EXISTS sessions ( + token TEXT PRIMARY KEY, + user_id INTEGER NOT NULL, + created_at INTEGER NOT NULL, + expires_at INTEGER NOT NULL, + FOREIGN KEY(user_id) REFERENCES users(id) ON DELETE CASCADE + ); + + CREATE TABLE IF NOT EXISTS customer_access ( + customer_id INTEGER NOT NULL, + user_id INTEGER NOT NULL, + access TEXT NOT NULL DEFAULT 'read', + PRIMARY KEY(customer_id, user_id), + FOREIGN KEY(customer_id) REFERENCES customers(id) ON DELETE CASCADE, + FOREIGN KEY(user_id) REFERENCES users(id) ON DELETE CASCADE + ); + + CREATE TABLE IF NOT EXISTS settings ( + key TEXT PRIMARY KEY, + value TEXT + ); +`); + +module.exports = db; diff --git a/src/janitor.js b/src/janitor.js new file mode 100644 index 0000000..5c0ce85 --- /dev/null +++ b/src/janitor.js @@ -0,0 +1,90 @@ +const fs = require('fs'); +const path = require('path'); +const db = require('./db'); + +const UPLOAD_ROOT = process.env.UPLOAD_ROOT || '/data/uploads'; +const DEFAULT_INTERVAL_MS = 30 * 60 * 1000; // 30 minutes + +/** + * Reconcile the uploads table against the filesystem: + * - remove DB rows whose file was deleted via WebDAV (or any other means) + * - insert DB rows for files that appear on disk without a corresponding row + * (e.g. files uploaded directly via WebDAV by a staff user) + */ +function runOnce() { + const customers = db.prepare('SELECT id, slug FROM customers').all(); + let removed = 0; + let added = 0; + + const delStmt = db.prepare('DELETE FROM uploads WHERE id = ?'); + const insStmt = db.prepare(` + INSERT INTO uploads (customer_id, filename, relative_path, size, uploaded_at) + VALUES (?, ?, ?, ?, ?) + `); + const findStmt = db.prepare( + 'SELECT id FROM uploads WHERE customer_id = ? AND relative_path = ?' + ); + + const tx = db.transaction(() => { + for (const c of customers) { + const base = path.join(UPLOAD_ROOT, c.slug); + if (!fs.existsSync(base)) continue; + + // 1) Remove DB rows for missing files + const rows = db.prepare('SELECT id, relative_path FROM uploads WHERE customer_id = ?').all(c.id); + for (const r of rows) { + const abs = path.join(base, r.relative_path); + if (!fs.existsSync(abs)) { + delStmt.run(r.id); + removed++; + } + } + + // 2) Add DB rows for files on disk without entry + walk(base, base, (abs, rel) => { + if (findStmt.get(c.id, rel)) return; + let st; + try { st = fs.statSync(abs); } catch { return; } + insStmt.run(c.id, path.basename(rel), rel, st.size, st.mtimeMs); + added++; + }); + } + }); + + tx(); + return { removed, added }; +} + +function walk(base, dir, cb) { + let entries; + try { entries = fs.readdirSync(dir, { withFileTypes: true }); } catch { return; } + for (const e of entries) { + const abs = path.join(dir, e.name); + if (e.isDirectory()) { + walk(base, abs, cb); + } else if (e.isFile()) { + const rel = path.relative(base, abs).split(path.sep).join('/'); + cb(abs, rel); + } + } +} + +let currentTimer = null; + +function tick() { + try { + const { removed, added } = runOnce(); + if (removed || added) console.log(`[janitor] synced uploads: +${added} -${removed}`); + } catch (e) { console.error('[janitor] error:', e.message); } +} + +function start(intervalMs = DEFAULT_INTERVAL_MS) { + if (currentTimer) clearInterval(currentTimer); + setTimeout(tick, 10_000); + currentTimer = setInterval(tick, intervalMs); + return currentTimer; +} + +function restart(intervalMs) { return start(intervalMs); } + +module.exports = { start, restart, runOnce }; diff --git a/src/server.js b/src/server.js new file mode 100644 index 0000000..55fcd6d --- /dev/null +++ b/src/server.js @@ -0,0 +1,436 @@ +const express = require('express'); +const path = require('path'); +const fs = require('fs'); +const multer = require('multer'); +const bcrypt = require('bcrypt'); +const { nanoid } = require('nanoid'); +const db = require('./db'); +const auth = require('./auth'); +const webdavConfig = require('./webdav-config'); +const settings = require('./settings'); +const janitor = require('./janitor'); + +const PORT = parseInt(process.env.PORT || '3000', 10); +const UPLOAD_ROOT = process.env.UPLOAD_ROOT || '/data/uploads'; + +settings.seedFromEnv(); + +fs.mkdirSync(UPLOAD_ROOT, { recursive: true }); + +const app = express(); +app.use(express.json()); +app.use(express.urlencoded({ extended: true })); + +// ---------- Helpers ---------- +function slugify(name) { + return name + .toLowerCase() + .normalize('NFKD') + .replace(/[\u0300-\u036f]/g, '') + .replace(/[^a-z0-9]+/g, '-') + .replace(/^-+|-+$/g, '') + .slice(0, 60) || 'customer'; +} + +function ensureUniqueSlug(base) { + let slug = base; + let i = 1; + while (db.prepare('SELECT 1 FROM customers WHERE slug = ?').get(slug)) { + slug = `${base}-${++i}`; + } + return slug; +} + +function customerDir(slug) { + const dir = path.join(UPLOAD_ROOT, slug); + fs.mkdirSync(dir, { recursive: true }); + return dir; +} + +function safeJoin(base, target) { + const resolved = path.resolve(base, target); + const baseResolved = path.resolve(base); + if (resolved !== baseResolved && !resolved.startsWith(baseResolved + path.sep)) { + throw new Error('Path traversal detected'); + } + return resolved; +} + +function sanitizeRelPath(p) { + if (!p) return ''; + const cleaned = String(p).replace(/\\/g, '/').replace(/^\/+/, ''); + return cleaned.split('/').filter(s => s && s !== '.' && s !== '..').join('/'); +} + +function getCustomerByToken(token) { + return db.prepare('SELECT * FROM customers WHERE token = ?').get(token); +} + +function isExpired(customer) { + return customer.expires_at && customer.expires_at < Date.now(); +} + +function canAccessCustomer(user, customer, needWrite = false) { + if (user.role === 'admin') return true; + const row = db.prepare( + 'SELECT access FROM customer_access WHERE customer_id = ? AND user_id = ?' + ).get(customer.id, user.id); + if (!row) return false; + if (needWrite) return row.access === 'write'; + return true; +} + +// ---------- Setup & Auth API ---------- +const publicApi = express.Router(); + +publicApi.get('/status', (req, res) => { + const u = auth.getSessionUser(req); + res.json({ + setup_required: !auth.hasAnyUser(), + authenticated: !!u, + user: u ? { id: u.id, username: u.username, role: u.role } : null, + }); +}); + +publicApi.post('/setup', async (req, res) => { + if (auth.hasAnyUser()) return res.status(409).json({ error: 'already configured' }); + const { username, password } = req.body || {}; + if (!auth.validateUsername(username)) return res.status(400).json({ error: 'invalid username' }); + if (!auth.validatePassword(password)) return res.status(400).json({ error: 'password too short (min 6)' }); + try { + const id = await auth.createUser(username, password, 'admin'); + const s = auth.createSession(id); + auth.setSessionCookie(res, s.token); + webdavConfig.sync(); + res.json({ ok: true }); + } catch (e) { + res.status(400).json({ error: e.message }); + } +}); + +publicApi.post('/login', async (req, res) => { + const { username, password } = req.body || {}; + const u = await auth.verifyCredentials(username, password); + if (!u) return res.status(401).json({ error: 'invalid credentials' }); + const s = auth.createSession(u.id); + auth.setSessionCookie(res, s.token); + res.json({ ok: true, user: { id: u.id, username: u.username, role: u.role } }); +}); + +publicApi.post('/logout', (req, res) => { + const token = auth.parseCookies(req)[auth.COOKIE_NAME]; + auth.deleteSession(token); + auth.clearSessionCookie(res); + res.json({ ok: true }); +}); + +app.use('/admin/api', publicApi); + +// ---------- Admin static (public page, gated by JS) ---------- +app.use('/admin', express.static(path.join(__dirname, '..', 'public', 'admin'))); + +// ---------- Authenticated API ---------- +const api = express.Router(); + +// --- Users (admin only) --- +api.get('/users', auth.requireAdmin, (req, res) => { + const rows = db.prepare('SELECT id, username, role, created_at FROM users ORDER BY username').all(); + res.json(rows); +}); + +api.post('/users', auth.requireAdmin, async (req, res) => { + const { username, password, role } = req.body || {}; + try { + const id = await auth.createUser(username, password, role || 'staff'); + webdavConfig.sync(); + res.json({ id }); + } catch (e) { + if (String(e.message).includes('UNIQUE')) return res.status(409).json({ error: 'username exists' }); + res.status(400).json({ error: e.message }); + } +}); + +api.patch('/users/:id', auth.requireAdmin, async (req, res) => { + const id = parseInt(req.params.id, 10); + const u = db.prepare('SELECT * FROM users WHERE id = ?').get(id); + if (!u) return res.status(404).json({ error: 'not found' }); + const { password, role } = req.body || {}; + if (role && role !== 'admin' && role !== 'staff') return res.status(400).json({ error: 'invalid role' }); + // Don't let admin demote themselves to the last non-admin + if (role && role !== 'admin' && u.id === req.user.id) { + const otherAdmins = db.prepare("SELECT COUNT(*) AS n FROM users WHERE role = 'admin' AND id != ?").get(id).n; + if (!otherAdmins) return res.status(400).json({ error: 'cannot demote last admin' }); + } + try { + if (password) await auth.setUserPassword(id, password); + if (role) db.prepare('UPDATE users SET role = ? WHERE id = ?').run(role, id); + webdavConfig.sync(); + res.json({ ok: true }); + } catch (e) { + res.status(400).json({ error: e.message }); + } +}); + +api.delete('/users/:id', auth.requireAdmin, (req, res) => { + const id = parseInt(req.params.id, 10); + const u = db.prepare('SELECT * FROM users WHERE id = ?').get(id); + if (!u) return res.status(404).json({ error: 'not found' }); + if (u.role === 'admin') { + const otherAdmins = db.prepare("SELECT COUNT(*) AS n FROM users WHERE role = 'admin' AND id != ?").get(id).n; + if (!otherAdmins) return res.status(400).json({ error: 'cannot delete last admin' }); + } + db.prepare('DELETE FROM users WHERE id = ?').run(id); + webdavConfig.sync(); + res.json({ ok: true }); +}); + +// --- Settings (admin only) --- +api.get('/settings', auth.requireAdmin, (req, res) => { + res.json({ + public_base_url: settings.get('public_base_url', ''), + janitor_interval_minutes: parseInt(settings.get('janitor_interval_minutes', '30'), 10), + }); +}); + +api.put('/settings', auth.requireAdmin, (req, res) => { + const { public_base_url, janitor_interval_minutes } = req.body || {}; + if (public_base_url !== undefined) { + const v = String(public_base_url || '').trim().replace(/\/+$/, ''); + settings.set('public_base_url', v); + } + if (janitor_interval_minutes !== undefined) { + const n = Math.max(1, parseInt(janitor_interval_minutes, 10) || 30); + settings.set('janitor_interval_minutes', String(n)); + janitor.restart(n * 60 * 1000); + } + res.json({ ok: true }); +}); + +api.post('/janitor/run', auth.requireAdmin, (req, res) => { + try { + const r = janitor.runOnce(); + res.json({ ok: true, ...r }); + } catch (e) { + res.status(500).json({ error: e.message }); + } +}); + +// --- Customers --- +api.get('/customers', auth.requireAuth, (req, res) => { + const isAdmin = req.user.role === 'admin'; + const baseUrl = settings.getPublicBaseUrl(req); + const rows = isAdmin + ? db.prepare(` + SELECT c.*, + (SELECT COUNT(*) FROM uploads u WHERE u.customer_id = c.id) AS upload_count, + (SELECT COALESCE(SUM(size),0) FROM uploads u WHERE u.customer_id = c.id) AS total_size + FROM customers c ORDER BY c.created_at DESC + `).all() + : db.prepare(` + SELECT c.*, ca.access AS my_access, + (SELECT COUNT(*) FROM uploads u WHERE u.customer_id = c.id) AS upload_count, + (SELECT COALESCE(SUM(size),0) FROM uploads u WHERE u.customer_id = c.id) AS total_size + FROM customers c + JOIN customer_access ca ON ca.customer_id = c.id + WHERE ca.user_id = ? + ORDER BY c.created_at DESC + `).all(req.user.id); + + res.json(rows.map(r => ({ + id: r.id, + name: r.name, + slug: r.slug, + token: isAdmin ? r.token : undefined, + has_password: !!r.password_hash, + expires_at: r.expires_at, + created_at: r.created_at, + upload_count: r.upload_count, + total_size: r.total_size, + my_access: isAdmin ? 'admin' : r.my_access, + upload_url: isAdmin ? `${baseUrl}/u/${r.token}` : undefined, + }))); +}); + +api.post('/customers', auth.requireAdmin, async (req, res) => { + const { name, password, expires_at } = req.body || {}; + if (!name || !String(name).trim()) return res.status(400).json({ error: 'name required' }); + const base = slugify(name); + const slug = ensureUniqueSlug(base); + const token = nanoid(24); + const password_hash = password ? await bcrypt.hash(password, 10) : null; + const exp = expires_at ? parseInt(expires_at, 10) : null; + const created_at = Date.now(); + const info = db.prepare(` + INSERT INTO customers (name, slug, token, password_hash, expires_at, created_at) + VALUES (?, ?, ?, ?, ?, ?) + `).run(String(name).trim(), slug, token, password_hash, exp, created_at); + customerDir(slug); + webdavConfig.sync(); + res.json({ + id: info.lastInsertRowid, + slug, + token, + upload_url: `${settings.getPublicBaseUrl(req)}/u/${token}`, + }); +}); + +api.patch('/customers/:id', auth.requireAdmin, async (req, res) => { + const id = parseInt(req.params.id, 10); + const c = db.prepare('SELECT * FROM customers WHERE id = ?').get(id); + if (!c) return res.status(404).json({ error: 'not found' }); + const { password, expires_at, clear_password } = req.body || {}; + let pwHash = c.password_hash; + if (clear_password) pwHash = null; + else if (password) pwHash = await bcrypt.hash(password, 10); + let exp = c.expires_at; + if (expires_at === null) exp = null; + else if (expires_at) exp = parseInt(expires_at, 10); + db.prepare('UPDATE customers SET password_hash = ?, expires_at = ? WHERE id = ?').run(pwHash, exp, id); + res.json({ ok: true }); +}); + +api.post('/customers/:id/regenerate-token', auth.requireAdmin, (req, res) => { + const id = parseInt(req.params.id, 10); + const token = nanoid(24); + const r = db.prepare('UPDATE customers SET token = ? WHERE id = ?').run(token, id); + if (!r.changes) return res.status(404).json({ error: 'not found' }); + res.json({ token, upload_url: `${settings.getPublicBaseUrl(req)}/u/${token}` }); +}); + +api.delete('/customers/:id', auth.requireAdmin, (req, res) => { + const c = db.prepare('SELECT * FROM customers WHERE id = ?').get(req.params.id); + if (!c) return res.status(404).json({ error: 'not found' }); + db.prepare('DELETE FROM customers WHERE id = ?').run(c.id); + webdavConfig.sync(); + // Files are kept on disk; admin can remove via WebDAV. + res.json({ ok: true }); +}); + +// --- Customer access assignments (admin only) --- +api.get('/customers/:id/access', auth.requireAdmin, (req, res) => { + const id = parseInt(req.params.id, 10); + const rows = db.prepare(` + SELECT u.id AS user_id, u.username, u.role, ca.access + FROM users u + LEFT JOIN customer_access ca ON ca.user_id = u.id AND ca.customer_id = ? + WHERE u.role = 'staff' + ORDER BY u.username + `).all(id); + res.json(rows); +}); + +api.put('/customers/:id/access', auth.requireAdmin, (req, res) => { + const id = parseInt(req.params.id, 10); + const c = db.prepare('SELECT 1 FROM customers WHERE id = ?').get(id); + if (!c) return res.status(404).json({ error: 'not found' }); + const entries = Array.isArray(req.body && req.body.access) ? req.body.access : []; + const tx = db.transaction((items) => { + db.prepare('DELETE FROM customer_access WHERE customer_id = ?').run(id); + const stmt = db.prepare( + 'INSERT INTO customer_access (customer_id, user_id, access) VALUES (?, ?, ?)' + ); + for (const e of items) { + if (!e || !e.user_id) continue; + const acc = e.access === 'write' ? 'write' : 'read'; + stmt.run(id, parseInt(e.user_id, 10), acc); + } + }); + tx(entries); + webdavConfig.sync(); + res.json({ ok: true }); +}); + +app.use('/admin/api', api); + +// ---------- Customer Upload Portal ---------- +app.get('/u/:token', (req, res) => { + const c = getCustomerByToken(req.params.token); + if (!c) return res.status(404).send('Link nicht gefunden.'); + if (isExpired(c)) return res.status(410).send('Link ist abgelaufen.'); + res.sendFile(path.join(__dirname, '..', 'public', 'upload.html')); +}); + +app.post('/u/:token/auth', async (req, res) => { + const c = getCustomerByToken(req.params.token); + if (!c || isExpired(c)) return res.status(404).json({ error: 'invalid' }); + if (!c.password_hash) return res.json({ ok: true }); + const ok = await bcrypt.compare(req.body.password || '', c.password_hash); + res.json({ ok }); +}); + +app.get('/u/:token/info', (req, res) => { + const c = getCustomerByToken(req.params.token); + if (!c || isExpired(c)) return res.status(404).json({ error: 'invalid' }); + res.json({ + name: c.name, + has_password: !!c.password_hash, + expires_at: c.expires_at, + }); +}); + +const upload = multer({ + storage: multer.diskStorage({ + destination: (req, file, cb) => { + try { + const c = req._customer; + const base = customerDir(c.slug); + const rel = sanitizeRelPath(req.body.path || file.originalname || ''); + const dir = rel.includes('/') ? rel.split('/').slice(0, -1).join('/') : ''; + const target = safeJoin(base, dir || '.'); + fs.mkdirSync(target, { recursive: true }); + cb(null, target); + } catch (e) { + cb(e); + } + }, + filename: (req, file, cb) => { + const rel = sanitizeRelPath(req.body.path || file.originalname || 'file'); + cb(null, path.basename(rel) || 'file'); + }, + }), + limits: { fileSize: 10 * 1024 * 1024 * 1024 }, +}); + +function uploadAuth(req, res, next) { + const c = getCustomerByToken(req.params.token); + if (!c || isExpired(c)) return res.status(404).json({ error: 'invalid' }); + if (c.password_hash) { + const provided = req.headers['x-upload-password'] || ''; + bcrypt.compare(provided, c.password_hash).then(ok => { + if (!ok) return res.status(401).json({ error: 'auth required' }); + req._customer = c; + next(); + }).catch(next); + } else { + req._customer = c; + next(); + } +} + +app.post('/u/:token/upload', uploadAuth, upload.single('file'), (req, res) => { + const c = req._customer; + const f = req.file; + if (!f) return res.status(400).json({ error: 'no file' }); + const base = customerDir(c.slug); + const rel = path.relative(base, path.join(f.destination, f.filename)); + db.prepare(` + INSERT INTO uploads (customer_id, filename, relative_path, size, uploaded_at) + VALUES (?, ?, ?, ?, ?) + `).run(c.id, f.filename, rel, f.size, Date.now()); + res.json({ ok: true, file: { name: f.filename, path: rel, size: f.size } }); +}); + +// ---------- Root ---------- +app.get('/', (req, res) => res.redirect('/admin/')); + +// Initial WebDAV config write so Apache has valid files present. +webdavConfig.sync(); +setInterval(() => auth.cleanupExpiredSessions(), 60 * 60 * 1000); + +// Janitor: reconcile uploads table with filesystem (handles WebDAV-side deletes/adds). +const janitorIntervalMin = parseInt(settings.get('janitor_interval_minutes', '30'), 10); +janitor.start(janitorIntervalMin * 60 * 1000); + +app.listen(PORT, () => { + console.log(`HTTP on :${PORT}`); +}); diff --git a/src/settings.js b/src/settings.js new file mode 100644 index 0000000..2b08976 --- /dev/null +++ b/src/settings.js @@ -0,0 +1,37 @@ +const db = require('./db'); + +function get(key, fallback = null) { + const row = db.prepare('SELECT value FROM settings WHERE key = ?').get(key); + return row ? row.value : fallback; +} + +function set(key, value) { + db.prepare(` + INSERT INTO settings (key, value) VALUES (?, ?) + ON CONFLICT(key) DO UPDATE SET value = excluded.value + `).run(key, value == null ? null : String(value)); +} + +function getAll() { + return db.prepare('SELECT key, value FROM settings').all(); +} + +function getPublicBaseUrl(req) { + const stored = get('public_base_url', '').trim(); + if (stored) return stored.replace(/\/+$/, ''); + if (req) { + const proto = (req.headers['x-forwarded-proto'] || req.protocol || 'http').split(',')[0].trim(); + const host = req.headers['x-forwarded-host'] || req.get('host'); + if (host) return `${proto}://${host}`; + } + return ''; +} + +// Seed from env on first start so existing compose setups keep working. +function seedFromEnv() { + if (!get('public_base_url') && process.env.PUBLIC_BASE_URL) { + set('public_base_url', process.env.PUBLIC_BASE_URL); + } +} + +module.exports = { get, set, getAll, getPublicBaseUrl, seedFromEnv }; diff --git a/src/webdav-config.js b/src/webdav-config.js new file mode 100644 index 0000000..79dc790 --- /dev/null +++ b/src/webdav-config.js @@ -0,0 +1,84 @@ +const fs = require('fs'); +const path = require('path'); +const db = require('./db'); + +const WEBDAV_CONFIG_DIR = process.env.WEBDAV_CONFIG_DIR || '/webdav-config'; + +function ensureDir() { + fs.mkdirSync(WEBDAV_CONFIG_DIR, { recursive: true }); +} + +function atomicWrite(filepath, content) { + const tmp = filepath + '.tmp'; + fs.writeFileSync(tmp, content); + fs.renameSync(tmp, filepath); +} + +// Apache mod_authn_file accepts bcrypt hashes ($2a$/$2b$/$2y$), +// which is exactly what bcrypt npm produces. No re-hashing needed. +function buildHtpasswd() { + const users = db.prepare('SELECT username, password_hash FROM users ORDER BY username').all(); + return users.map(u => `${u.username}:${u.password_hash}`).join('\n') + (users.length ? '\n' : ''); +} + +function buildAccessConf() { + const admins = db.prepare("SELECT username FROM users WHERE role = 'admin' ORDER BY username") + .all().map(r => r.username); + const customers = db.prepare('SELECT * FROM customers ORDER BY slug').all(); + + let out = '# Auto-generated by app — do not edit.\n\n'; + // Root listing auth is already enforced by in webdav.conf. + // Adding a broad here would shadow /icons/ and break autoindex graphics. + + for (const c of customers) { + const assigns = db.prepare(` + SELECT u.username, ca.access FROM customer_access ca + JOIN users u ON u.id = ca.user_id + WHERE ca.customer_id = ? + ORDER BY u.username + `).all(c.id); + + const staffRead = assigns.map(a => a.username); + const staffWrite = assigns.filter(a => a.access === 'write').map(a => a.username); + + const readUsers = [...new Set([...admins, ...staffRead])]; + const writeUsers = [...new Set([...admins, ...staffWrite])]; + + const locPath = `/${c.slug}/`; + out += `\n`; + const sameSet = readUsers.length === writeUsers.length && + readUsers.every(u => writeUsers.includes(u)); + if (sameSet) { + // Same users for read and write — one Require covers it all. + if (readUsers.length) out += ` Require user ${readUsers.join(' ')}\n`; + else out += ` Require all denied\n`; + } else { + // Split explicitly by method so Apache's default RequireAny (OR) + // doesn't let readers inherit write access from a broader outer Require. + out += ` \n`; + if (readUsers.length) out += ` Require user ${readUsers.join(' ')}\n`; + else out += ` Require all denied\n`; + out += ` \n`; + out += ` \n`; + if (writeUsers.length) out += ` Require user ${writeUsers.join(' ')}\n`; + else out += ` Require all denied\n`; + out += ` \n`; + } + out += `\n\n`; + } + return out; +} + +function sync() { + try { + ensureDir(); + atomicWrite(path.join(WEBDAV_CONFIG_DIR, 'htpasswd'), buildHtpasswd()); + atomicWrite(path.join(WEBDAV_CONFIG_DIR, 'access.conf'), buildAccessConf()); + // reload trigger (watched by apache entrypoint) + atomicWrite(path.join(WEBDAV_CONFIG_DIR, 'reload.trigger'), String(Date.now())); + } catch (e) { + console.error('webdav-config sync failed:', e.message); + } +} + +module.exports = { sync, WEBDAV_CONFIG_DIR }; diff --git a/webdav/Dockerfile b/webdav/Dockerfile new file mode 100644 index 0000000..cea8c8c --- /dev/null +++ b/webdav/Dockerfile @@ -0,0 +1,28 @@ +FROM debian:bookworm-slim + +ENV DEBIAN_FRONTEND=noninteractive + +RUN apt-get update && apt-get install -y --no-install-recommends \ + apache2 apache2-utils inotify-tools \ + && rm -rf /var/lib/apt/lists/* + +RUN a2enmod dav dav_fs auth_basic authn_file authz_user authz_core \ + setenvif mime alias autoindex dir \ + && a2dissite 000-default + +# Create a user with UID 1000 so file ownership matches the app container. +RUN groupadd -g 1000 webdav \ + && useradd -u 1000 -g 1000 -s /usr/sbin/nologin -M webdav \ + && sed -i \ + -e 's|^export APACHE_RUN_USER=.*|export APACHE_RUN_USER=webdav|' \ + -e 's|^export APACHE_RUN_GROUP=.*|export APACHE_RUN_GROUP=webdav|' \ + /etc/apache2/envvars + +COPY webdav.conf /etc/apache2/conf-enabled/webdav.conf + +COPY entrypoint.sh /entrypoint.sh +RUN chmod +x /entrypoint.sh + +EXPOSE 80 +ENTRYPOINT ["/entrypoint.sh"] +CMD ["apachectl", "-D", "FOREGROUND"] diff --git a/webdav/entrypoint.sh b/webdav/entrypoint.sh new file mode 100644 index 0000000..f3e33a5 --- /dev/null +++ b/webdav/entrypoint.sh @@ -0,0 +1,32 @@ +#!/bin/sh +set -e + +CONFIG_DIR="${WEBDAV_CONFIG_DIR:-/webdav-config}" + +mkdir -p /var/lib/dav /data/uploads "$CONFIG_DIR" +chown -R 1000:1000 /var/lib/dav /data/uploads "$CONFIG_DIR" 2>/dev/null || true + +# Ensure the referenced files exist so Apache starts even before first sync. +[ -f "$CONFIG_DIR/htpasswd" ] || : > "$CONFIG_DIR/htpasswd" +[ -f "$CONFIG_DIR/access.conf" ] || : > "$CONFIG_DIR/access.conf" +chown 1000:1000 "$CONFIG_DIR/htpasswd" "$CONFIG_DIR/access.conf" 2>/dev/null || true + +# Graceful-reload watcher: triggered when htpasswd / access.conf are rewritten. +( + sleep 2 + while :; do + if command -v inotifywait >/dev/null 2>&1; then + inotifywait -q -e close_write,moved_to,create,delete -- "$CONFIG_DIR" >/dev/null 2>&1 || sleep 2 + else + sleep 5 + fi + sleep 1 + echo "[webdav] config changed -> apachectl graceful" + apachectl graceful 2>/dev/null || true + done +) & + +# Apache2 needs these env vars when started via apachectl +. /etc/apache2/envvars + +exec "$@" diff --git a/webdav/webdav.conf b/webdav/webdav.conf new file mode 100644 index 0000000..66fd9cf --- /dev/null +++ b/webdav/webdav.conf @@ -0,0 +1,32 @@ +DavLockDB "/var/lib/dav/DavLock" + + + DocumentRoot "/data/uploads" + + # Autoindex icons (explicit, no auth) + Alias /icons/ "/usr/share/apache2/icons/" + + Options FollowSymLinks + AllowOverride None + Require all granted + + + + DAV On + AllowOverride None + Options Indexes + AuthType Basic + AuthName "WebDAV" + AuthBasicProvider file + AuthUserFile /webdav-config/htpasswd + Require valid-user + + + LimitXMLRequestBody 0 + + # Per-customer ACLs (regenerated by the app container) + Include /webdav-config/access.conf + + ErrorLog /dev/stderr + CustomLog /dev/stdout combined +