added messgage bundel not all at one, added sepearte memory at every filter

This commit is contained in:
2026-03-19 16:05:25 +01:00
parent d148248682
commit 076733fb53
5 changed files with 179 additions and 103 deletions
+28 -26
View File
@@ -41,7 +41,6 @@ def export_backup(db: Session | None = None) -> str:
"poll_interval_seconds": acc.poll_interval_seconds,
"enabled": acc.enabled,
"filter_rules": [],
"processed_mails": [],
}
for rule in sorted(acc.filter_rules, key=lambda r: r.priority):
@@ -67,23 +66,25 @@ def export_backup(db: Session | None = None) -> str:
}
for action in rule.actions
],
"processed_mails": [],
}
account_data["filter_rules"].append(rule_data)
# Verarbeitete Mails exportieren
processed = (
db.query(ProcessedMail)
.filter(ProcessedMail.account_id == acc.id)
.all()
)
for pm in processed:
account_data["processed_mails"].append({
"folder": pm.folder,
"mail_uid": pm.mail_uid,
"mail_subject": pm.mail_subject,
"mail_from": pm.mail_from,
"processed_at": pm.processed_at.isoformat() if pm.processed_at else None,
})
# Verarbeitete Mails pro Regel
processed = (
db.query(ProcessedMail)
.filter(ProcessedMail.rule_id == rule.id)
.all()
)
for pm in processed:
rule_data["processed_mails"].append({
"folder": pm.folder,
"mail_uid": pm.mail_uid,
"mail_subject": pm.mail_subject,
"mail_from": pm.mail_from,
"processed_at": pm.processed_at.isoformat() if pm.processed_at else None,
})
account_data["filter_rules"].append(rule_data)
data["accounts"].append(account_data)
@@ -192,16 +193,17 @@ def import_backup(json_content: str, db: Session | None = None) -> dict:
stats["rules_created"] += 1
# Verarbeitete Mails wiederherstellen
for pm_data in acc_data.get("processed_mails", []):
db.add(ProcessedMail(
account_id=account.id,
folder=pm_data["folder"],
mail_uid=pm_data["mail_uid"],
mail_subject=pm_data.get("mail_subject"),
mail_from=pm_data.get("mail_from"),
))
stats["processed_restored"] += 1
# Verarbeitete Mails pro Regel wiederherstellen
for pm_data in rule_data.get("processed_mails", []):
db.add(ProcessedMail(
account_id=account.id,
rule_id=rule.id,
folder=pm_data["folder"],
mail_uid=pm_data["mail_uid"],
mail_subject=pm_data.get("mail_subject"),
mail_from=pm_data.get("mail_from"),
))
stats["processed_restored"] += 1
db.commit()
logger.info("Backup-Import abgeschlossen: %s", stats)
+101 -63
View File
@@ -8,7 +8,7 @@ from sqlalchemy.orm import Session
from app.database import SessionLocal
from app.models.db_models import Account, FilterRule, LogLevel, ProcessedMail
from app.services.encryption import decrypt
from app.services.filter_engine import apply_rules
from app.services.filter_engine import evaluate_conditions, execute_action
from app.services.imap_client import IMAPClient
from app.services.log_service import cleanup_old_logs, write_log
@@ -73,13 +73,18 @@ def _poll_account_sync(account_id: int) -> None:
total_actions = 0
total_errors = 0
with client:
for folder in source_folders:
folder_rules = [r for r in rules if r.source_folder == folder]
# Mail-Cache: einmal geladene Mails pro Ordner/UID wiederverwenden
mail_cache: dict[tuple[str, str], "MailMessage"] = {}
# UIDs pro Ordner cachen
folder_uids: dict[str, list[str]] = {}
# Alle UIDs im Ordner holen
with client:
# Phase 1: UIDs pro Ordner laden
for folder in source_folders:
try:
all_uids = client.get_all_uids(folder, search="ALL")
folder_uids[folder] = all_uids
total_mails += len(all_uids)
except Exception as e:
write_log(
message=f"Fehler beim Abrufen von Ordner '{folder}'",
@@ -90,104 +95,122 @@ def _poll_account_sync(account_id: int) -> None:
details=str(e),
db=db,
)
# Phase 2: Pro Regel die unverarbeiteten Mails prüfen
for rule in rules:
folder = rule.source_folder
if folder not in folder_uids:
continue
total_mails += len(all_uids)
all_uids = folder_uids[folder]
# Bereits verarbeitete UIDs aus DB laden
# Bereits verarbeitete UIDs für DIESE Regel
processed_uids = set(
row[0] for row in db.query(ProcessedMail.mail_uid)
.filter(
ProcessedMail.account_id == account.id,
ProcessedMail.rule_id == rule.id,
ProcessedMail.folder == folder,
)
.all()
)
# Neue (unverarbeitete) UIDs ermitteln
new_uids = [uid for uid in all_uids if uid not in processed_uids]
total_new += len(new_uids)
if not new_uids:
write_log(
message=f"Keine neuen Mails in '{folder}' ({len(all_uids)} gesamt, alle bereits verarbeitet)",
message=f"Regel '{rule.name}': keine neuen Mails in '{folder}' ({len(all_uids)} gesamt, alle bereits geprüft)",
level=LogLevel.INFO,
account_id=account.id,
account_name=account.name,
rule_name=rule.name,
folder=folder,
db=db,
)
continue
# Batch-Limit: maximal 500 Mails pro Regel pro Poll
BATCH_LIMIT = 500
batch_uids = new_uids[:BATCH_LIMIT]
remaining = len(new_uids) - len(batch_uids)
total_new += len(batch_uids)
msg = f"Regel '{rule.name}': {len(batch_uids)} Mail(s) in '{folder}' prüfen ({len(processed_uids)} bereits geprüft)"
if remaining > 0:
msg += f"{remaining} weitere beim nächsten Poll"
write_log(
message=f"{len(new_uids)} neue Mail(s) in '{folder}' ({len(all_uids)} gesamt, {len(processed_uids)} bereits verarbeitet)",
message=msg,
level=LogLevel.INFO,
account_id=account.id,
account_name=account.name,
rule_name=rule.name,
folder=folder,
db=db,
)
new_uids = batch_uids
# Neue Mails abrufen und verarbeiten
for uid in new_uids:
try:
mail = client.fetch_mail(uid)
except Exception as e:
write_log(
message=f"Fehler beim Abrufen von Mail {uid}",
level=LogLevel.ERROR,
account_id=account.id,
account_name=account.name,
mail_uid=uid,
folder=folder,
details=str(e),
db=db,
)
continue
if not mail:
continue
results, eval_details = apply_rules(client, mail, folder_rules, smtp_config)
# Eval-Details für Log aufbereiten
eval_summary = []
for ev in eval_details:
status = "TREFFER" if ev["matched"] else "kein Treffer"
checks = " | ".join(ev["details"])
eval_summary.append(f"Regel '{ev['rule']}': {status} [{checks}]")
if not results:
write_log(
message=f"Keine Regel trifft zu",
level=LogLevel.INFO,
account_id=account.id,
account_name=account.name,
mail_uid=mail.uid,
mail_subject=mail.subject,
mail_from=mail.from_addr,
folder=folder,
details="\n".join(eval_summary),
db=db,
)
# Mail aus Cache oder vom Server laden
cache_key = (folder, uid)
if cache_key in mail_cache:
mail = mail_cache[cache_key]
else:
try:
# Ordner muss ausgewählt sein
if not hasattr(client, '_current_folder') or client._current_folder != folder:
client.conn.select(folder)
client._current_folder = folder
mail = client.fetch_mail(uid)
except Exception as e:
write_log(
message=f"Fehler beim Abrufen von Mail {uid}",
level=LogLevel.ERROR,
account_id=account.id,
account_name=account.name,
mail_uid=uid,
folder=folder,
details=str(e),
db=db,
)
# Trotzdem als verarbeitet markieren damit wir nicht endlos retrien
db.add(ProcessedMail(
account_id=account.id, rule_id=rule.id,
folder=folder, mail_uid=uid,
))
continue
if not mail:
db.add(ProcessedMail(
account_id=account.id, rule_id=rule.id,
folder=folder, mail_uid=uid,
))
continue
mail_cache[cache_key] = mail
# Regel gegen Mail prüfen
matched, details = evaluate_conditions(mail, rule.conditions)
detail_str = " | ".join(details)
if matched:
total_matched += 1
for r in results:
action_label = r["action"]
param = r.get("parameter", "")
# Aktionen ausführen
for action in rule.actions:
success = execute_action(client, mail, action, smtp_config)
action_label = action.action_type.value
param = action.parameter or ""
if param:
action_label += f"{param}"
if r["success"]:
if success:
total_actions += 1
write_log(
message=f"Aktion ausgeführt: {action_label}",
level=LogLevel.SUCCESS,
account_id=account.id,
account_name=account.name,
rule_name=r["rule"],
action_type=r["action"],
mail_uid=r["mail_uid"],
rule_name=rule.name,
action_type=action.action_type.value,
mail_uid=mail.uid,
mail_subject=mail.subject,
mail_from=mail.from_addr,
folder=folder,
@@ -201,19 +224,34 @@ def _poll_account_sync(account_id: int) -> None:
level=LogLevel.ERROR,
account_id=account.id,
account_name=account.name,
rule_name=r["rule"],
action_type=r["action"],
mail_uid=r["mail_uid"],
rule_name=rule.name,
action_type=action.action_type.value,
mail_uid=mail.uid,
mail_subject=mail.subject,
mail_from=mail.from_addr,
folder=folder,
details=param,
db=db,
)
else:
write_log(
message=f"Keine Übereinstimmung",
level=LogLevel.INFO,
account_id=account.id,
account_name=account.name,
rule_name=rule.name,
mail_uid=mail.uid,
mail_subject=mail.subject,
mail_from=mail.from_addr,
folder=folder,
details=detail_str,
db=db,
)
# Mail als verarbeitet markieren
# Mail für DIESE Regel als verarbeitet markieren
db.add(ProcessedMail(
account_id=account.id,
rule_id=rule.id,
folder=folder,
mail_uid=mail.uid,
mail_subject=mail.subject[:500] if mail.subject else None,