MyFSIO v0.4.1 Release

Reviewed-on: #34
This commit was merged in pull request #34.
This commit is contained in:
2026-03-25 04:29:28 +00:00
15 changed files with 1109 additions and 222 deletions

View File

@@ -293,6 +293,7 @@ def create_app(
multipart_max_age_days=app.config.get("GC_MULTIPART_MAX_AGE_DAYS", 7),
lock_file_max_age_hours=app.config.get("GC_LOCK_FILE_MAX_AGE_HOURS", 1.0),
dry_run=app.config.get("GC_DRY_RUN", False),
io_throttle_ms=app.config.get("GC_IO_THROTTLE_MS", 10),
)
gc_collector.start()
@@ -304,6 +305,7 @@ def create_app(
batch_size=app.config.get("INTEGRITY_BATCH_SIZE", 1000),
auto_heal=app.config.get("INTEGRITY_AUTO_HEAL", False),
dry_run=app.config.get("INTEGRITY_DRY_RUN", False),
io_throttle_ms=app.config.get("INTEGRITY_IO_THROTTLE_MS", 10),
)
integrity_checker.start()

View File

@@ -907,15 +907,11 @@ def gc_run_now():
if not gc:
return _json_error("InvalidRequest", "GC is not enabled", 400)
payload = request.get_json(silent=True) or {}
original_dry_run = gc.dry_run
if "dry_run" in payload:
gc.dry_run = bool(payload["dry_run"])
try:
result = gc.run_now()
finally:
gc.dry_run = original_dry_run
started = gc.run_async(dry_run=payload.get("dry_run"))
logger.info("GC manual run by %s", principal.access_key)
return jsonify(result.to_dict())
if not started:
return _json_error("Conflict", "GC is already in progress", 409)
return jsonify({"status": "started"})
@admin_api_bp.route("/gc/history", methods=["GET"])
@@ -961,12 +957,14 @@ def integrity_run_now():
payload = request.get_json(silent=True) or {}
override_dry_run = payload.get("dry_run")
override_auto_heal = payload.get("auto_heal")
result = checker.run_now(
started = checker.run_async(
auto_heal=override_auto_heal if override_auto_heal is not None else None,
dry_run=override_dry_run if override_dry_run is not None else None,
)
logger.info("Integrity manual run by %s", principal.access_key)
return jsonify(result.to_dict())
if not started:
return _json_error("Conflict", "A scan is already in progress", 409)
return jsonify({"status": "started"})
@admin_api_bp.route("/integrity/history", methods=["GET"])

View File

@@ -157,11 +157,13 @@ class AppConfig:
gc_multipart_max_age_days: int
gc_lock_file_max_age_hours: float
gc_dry_run: bool
gc_io_throttle_ms: int
integrity_enabled: bool
integrity_interval_hours: float
integrity_batch_size: int
integrity_auto_heal: bool
integrity_dry_run: bool
integrity_io_throttle_ms: int
@classmethod
def from_env(cls, overrides: Optional[Dict[str, Any]] = None) -> "AppConfig":
@@ -338,11 +340,13 @@ class AppConfig:
gc_multipart_max_age_days = int(_get("GC_MULTIPART_MAX_AGE_DAYS", 7))
gc_lock_file_max_age_hours = float(_get("GC_LOCK_FILE_MAX_AGE_HOURS", 1.0))
gc_dry_run = str(_get("GC_DRY_RUN", "0")).lower() in {"1", "true", "yes", "on"}
gc_io_throttle_ms = int(_get("GC_IO_THROTTLE_MS", 10))
integrity_enabled = str(_get("INTEGRITY_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
integrity_interval_hours = float(_get("INTEGRITY_INTERVAL_HOURS", 24.0))
integrity_batch_size = int(_get("INTEGRITY_BATCH_SIZE", 1000))
integrity_auto_heal = str(_get("INTEGRITY_AUTO_HEAL", "0")).lower() in {"1", "true", "yes", "on"}
integrity_dry_run = str(_get("INTEGRITY_DRY_RUN", "0")).lower() in {"1", "true", "yes", "on"}
integrity_io_throttle_ms = int(_get("INTEGRITY_IO_THROTTLE_MS", 10))
return cls(storage_root=storage_root,
max_upload_size=max_upload_size,
@@ -438,11 +442,13 @@ class AppConfig:
gc_multipart_max_age_days=gc_multipart_max_age_days,
gc_lock_file_max_age_hours=gc_lock_file_max_age_hours,
gc_dry_run=gc_dry_run,
gc_io_throttle_ms=gc_io_throttle_ms,
integrity_enabled=integrity_enabled,
integrity_interval_hours=integrity_interval_hours,
integrity_batch_size=integrity_batch_size,
integrity_auto_heal=integrity_auto_heal,
integrity_dry_run=integrity_dry_run)
integrity_dry_run=integrity_dry_run,
integrity_io_throttle_ms=integrity_io_throttle_ms)
def validate_and_report(self) -> list[str]:
"""Validate configuration and return a list of warnings/issues.
@@ -663,9 +669,11 @@ class AppConfig:
"GC_MULTIPART_MAX_AGE_DAYS": self.gc_multipart_max_age_days,
"GC_LOCK_FILE_MAX_AGE_HOURS": self.gc_lock_file_max_age_hours,
"GC_DRY_RUN": self.gc_dry_run,
"GC_IO_THROTTLE_MS": self.gc_io_throttle_ms,
"INTEGRITY_ENABLED": self.integrity_enabled,
"INTEGRITY_INTERVAL_HOURS": self.integrity_interval_hours,
"INTEGRITY_BATCH_SIZE": self.integrity_batch_size,
"INTEGRITY_AUTO_HEAL": self.integrity_auto_heal,
"INTEGRITY_DRY_RUN": self.integrity_dry_run,
"INTEGRITY_IO_THROTTLE_MS": self.integrity_io_throttle_ms,
}

View File

@@ -175,13 +175,21 @@ def handle_app_error(error: AppError) -> Response:
def handle_rate_limit_exceeded(e: RateLimitExceeded) -> Response:
g.s3_error_code = "SlowDown"
if request.path.startswith("/ui") or request.path.startswith("/buckets"):
wants_json = (
request.is_json or
request.headers.get("X-Requested-With") == "XMLHttpRequest" or
"application/json" in request.accept_mimetypes.values()
)
if wants_json:
return jsonify({"success": False, "error": {"code": "SlowDown", "message": "Please reduce your request rate."}}), 429
error = Element("Error")
SubElement(error, "Code").text = "SlowDown"
SubElement(error, "Message").text = "Please reduce your request rate."
SubElement(error, "Resource").text = request.path
SubElement(error, "RequestId").text = getattr(g, "request_id", "")
xml_bytes = tostring(error, encoding="utf-8")
return Response(xml_bytes, status=429, mimetype="application/xml")
return Response(xml_bytes, status="429 Too Many Requests", mimetype="application/xml")
def register_error_handlers(app):

133
app/gc.py
View File

@@ -162,6 +162,7 @@ class GarbageCollector:
lock_file_max_age_hours: float = 1.0,
dry_run: bool = False,
max_history: int = 50,
io_throttle_ms: int = 10,
) -> None:
self.storage_root = Path(storage_root)
self.interval_seconds = interval_hours * 3600.0
@@ -172,6 +173,9 @@ class GarbageCollector:
self._timer: Optional[threading.Timer] = None
self._shutdown = False
self._lock = threading.Lock()
self._scanning = False
self._scan_start_time: Optional[float] = None
self._io_throttle = max(0, io_throttle_ms) / 1000.0
self.history_store = GCHistoryStore(storage_root, max_records=max_history)
def start(self) -> None:
@@ -212,49 +216,81 @@ class GarbageCollector:
finally:
self._schedule_next()
def run_now(self) -> GCResult:
start = time.time()
result = GCResult()
def run_now(self, dry_run: Optional[bool] = None) -> GCResult:
if not self._lock.acquire(blocking=False):
raise RuntimeError("GC is already in progress")
self._clean_temp_files(result)
self._clean_orphaned_multipart(result)
self._clean_stale_locks(result)
self._clean_orphaned_metadata(result)
self._clean_orphaned_versions(result)
self._clean_empty_dirs(result)
effective_dry_run = dry_run if dry_run is not None else self.dry_run
result.execution_time_seconds = time.time() - start
try:
self._scanning = True
self._scan_start_time = time.time()
if result.has_work or result.errors:
logger.info(
"GC completed in %.2fs: temp=%d (%.1f MB), multipart=%d (%.1f MB), "
"locks=%d, meta=%d, versions=%d (%.1f MB), dirs=%d, errors=%d%s",
result.execution_time_seconds,
result.temp_files_deleted,
result.temp_bytes_freed / (1024 * 1024),
result.multipart_uploads_deleted,
result.multipart_bytes_freed / (1024 * 1024),
result.lock_files_deleted,
result.orphaned_metadata_deleted,
result.orphaned_versions_deleted,
result.orphaned_version_bytes_freed / (1024 * 1024),
result.empty_dirs_removed,
len(result.errors),
" (dry run)" if self.dry_run else "",
start = self._scan_start_time
result = GCResult()
original_dry_run = self.dry_run
self.dry_run = effective_dry_run
try:
self._clean_temp_files(result)
self._clean_orphaned_multipart(result)
self._clean_stale_locks(result)
self._clean_orphaned_metadata(result)
self._clean_orphaned_versions(result)
self._clean_empty_dirs(result)
finally:
self.dry_run = original_dry_run
result.execution_time_seconds = time.time() - start
if result.has_work or result.errors:
logger.info(
"GC completed in %.2fs: temp=%d (%.1f MB), multipart=%d (%.1f MB), "
"locks=%d, meta=%d, versions=%d (%.1f MB), dirs=%d, errors=%d%s",
result.execution_time_seconds,
result.temp_files_deleted,
result.temp_bytes_freed / (1024 * 1024),
result.multipart_uploads_deleted,
result.multipart_bytes_freed / (1024 * 1024),
result.lock_files_deleted,
result.orphaned_metadata_deleted,
result.orphaned_versions_deleted,
result.orphaned_version_bytes_freed / (1024 * 1024),
result.empty_dirs_removed,
len(result.errors),
" (dry run)" if effective_dry_run else "",
)
record = GCExecutionRecord(
timestamp=time.time(),
result=result.to_dict(),
dry_run=effective_dry_run,
)
self.history_store.add(record)
record = GCExecutionRecord(
timestamp=time.time(),
result=result.to_dict(),
dry_run=self.dry_run,
)
self.history_store.add(record)
return result
finally:
self._scanning = False
self._scan_start_time = None
self._lock.release()
return result
def run_async(self, dry_run: Optional[bool] = None) -> bool:
if self._scanning:
return False
t = threading.Thread(target=self.run_now, args=(dry_run,), daemon=True)
t.start()
return True
def _system_path(self) -> Path:
return self.storage_root / self.SYSTEM_ROOT
def _throttle(self) -> bool:
if self._shutdown:
return True
if self._io_throttle > 0:
time.sleep(self._io_throttle)
return self._shutdown
def _list_bucket_names(self) -> List[str]:
names = []
try:
@@ -271,6 +307,8 @@ class GarbageCollector:
return
try:
for entry in tmp_dir.iterdir():
if self._throttle():
return
if not entry.is_file():
continue
age = _file_age_hours(entry)
@@ -292,6 +330,8 @@ class GarbageCollector:
bucket_names = self._list_bucket_names()
for bucket_name in bucket_names:
if self._shutdown:
return
for multipart_root in (
self._system_path() / self.SYSTEM_MULTIPART_DIR / bucket_name,
self.storage_root / bucket_name / ".multipart",
@@ -300,6 +340,8 @@ class GarbageCollector:
continue
try:
for upload_dir in multipart_root.iterdir():
if self._throttle():
return
if not upload_dir.is_dir():
continue
self._maybe_clean_upload(upload_dir, cutoff_hours, result)
@@ -329,6 +371,8 @@ class GarbageCollector:
try:
for bucket_dir in buckets_root.iterdir():
if self._shutdown:
return
if not bucket_dir.is_dir():
continue
locks_dir = bucket_dir / "locks"
@@ -336,6 +380,8 @@ class GarbageCollector:
continue
try:
for lock_file in locks_dir.iterdir():
if self._throttle():
return
if not lock_file.is_file() or not lock_file.name.endswith(".lock"):
continue
age = _file_age_hours(lock_file)
@@ -356,6 +402,8 @@ class GarbageCollector:
bucket_names = self._list_bucket_names()
for bucket_name in bucket_names:
if self._shutdown:
return
legacy_meta = self.storage_root / bucket_name / ".meta"
if legacy_meta.exists():
self._clean_legacy_metadata(bucket_name, legacy_meta, result)
@@ -368,6 +416,8 @@ class GarbageCollector:
bucket_path = self.storage_root / bucket_name
try:
for meta_file in meta_root.rglob("*.meta.json"):
if self._throttle():
return
if not meta_file.is_file():
continue
try:
@@ -387,6 +437,8 @@ class GarbageCollector:
bucket_path = self.storage_root / bucket_name
try:
for index_file in meta_root.rglob("_index.json"):
if self._throttle():
return
if not index_file.is_file():
continue
try:
@@ -430,6 +482,8 @@ class GarbageCollector:
bucket_names = self._list_bucket_names()
for bucket_name in bucket_names:
if self._shutdown:
return
bucket_path = self.storage_root / bucket_name
for versions_root in (
self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_VERSIONS_DIR,
@@ -439,6 +493,8 @@ class GarbageCollector:
continue
try:
for key_dir in versions_root.iterdir():
if self._throttle():
return
if not key_dir.is_dir():
continue
self._clean_versions_for_key(bucket_path, versions_root, key_dir, result)
@@ -489,6 +545,8 @@ class GarbageCollector:
self._remove_empty_dirs_recursive(root, root, result)
def _remove_empty_dirs_recursive(self, path: Path, stop_at: Path, result: GCResult) -> bool:
if self._shutdown:
return False
if not path.is_dir():
return False
@@ -499,6 +557,8 @@ class GarbageCollector:
all_empty = True
for child in children:
if self._throttle():
return False
if child.is_dir():
if not self._remove_empty_dirs_recursive(child, stop_at, result):
all_empty = False
@@ -520,12 +580,17 @@ class GarbageCollector:
return [r.to_dict() for r in records]
def get_status(self) -> dict:
return {
status: Dict[str, Any] = {
"enabled": not self._shutdown or self._timer is not None,
"running": self._timer is not None and not self._shutdown,
"scanning": self._scanning,
"interval_hours": self.interval_seconds / 3600.0,
"temp_file_max_age_hours": self.temp_file_max_age_hours,
"multipart_max_age_days": self.multipart_max_age_days,
"lock_file_max_age_hours": self.lock_file_max_age_hours,
"dry_run": self.dry_run,
"io_throttle_ms": round(self._io_throttle * 1000),
}
if self._scanning and self._scan_start_time:
status["scan_elapsed_seconds"] = time.time() - self._scan_start_time
return status

View File

@@ -162,6 +162,76 @@ class IntegrityHistoryStore:
return self.load()[offset : offset + limit]
class IntegrityCursorStore:
def __init__(self, storage_root: Path) -> None:
self.storage_root = storage_root
self._lock = threading.Lock()
def _get_path(self) -> Path:
return self.storage_root / ".myfsio.sys" / "config" / "integrity_cursor.json"
def load(self) -> Dict[str, Any]:
path = self._get_path()
if not path.exists():
return {"buckets": {}}
try:
with open(path, "r", encoding="utf-8") as f:
data = json.load(f)
if not isinstance(data.get("buckets"), dict):
return {"buckets": {}}
return data
except (OSError, ValueError, KeyError):
return {"buckets": {}}
def save(self, data: Dict[str, Any]) -> None:
path = self._get_path()
path.parent.mkdir(parents=True, exist_ok=True)
try:
with open(path, "w", encoding="utf-8") as f:
json.dump(data, f, indent=2)
except OSError as e:
logger.error("Failed to save integrity cursor: %s", e)
def update_bucket(self, bucket_name: str, timestamp: float) -> None:
with self._lock:
data = self.load()
data["buckets"][bucket_name] = {"last_scanned": timestamp}
self.save(data)
def clean_stale(self, existing_buckets: List[str]) -> None:
with self._lock:
data = self.load()
existing_set = set(existing_buckets)
stale_keys = [k for k in data["buckets"] if k not in existing_set]
if stale_keys:
for k in stale_keys:
del data["buckets"][k]
self.save(data)
def get_bucket_order(self, bucket_names: List[str]) -> List[str]:
data = self.load()
buckets_info = data.get("buckets", {})
def sort_key(name: str) -> float:
entry = buckets_info.get(name)
if entry is None:
return 0.0
return entry.get("last_scanned", 0.0)
return sorted(bucket_names, key=sort_key)
def get_info(self) -> Dict[str, Any]:
data = self.load()
buckets = data.get("buckets", {})
return {
"tracked_buckets": len(buckets),
"buckets": {
name: info.get("last_scanned")
for name, info in buckets.items()
},
}
MAX_ISSUES = 500
@@ -180,6 +250,7 @@ class IntegrityChecker:
auto_heal: bool = False,
dry_run: bool = False,
max_history: int = 50,
io_throttle_ms: int = 10,
) -> None:
self.storage_root = Path(storage_root)
self.interval_seconds = interval_hours * 3600.0
@@ -189,7 +260,11 @@ class IntegrityChecker:
self._timer: Optional[threading.Timer] = None
self._shutdown = False
self._lock = threading.Lock()
self._scanning = False
self._scan_start_time: Optional[float] = None
self._io_throttle = max(0, io_throttle_ms) / 1000.0
self.history_store = IntegrityHistoryStore(storage_root, max_records=max_history)
self.cursor_store = IntegrityCursorStore(self.storage_root)
def start(self) -> None:
if self._timer is not None:
@@ -229,52 +304,73 @@ class IntegrityChecker:
self._schedule_next()
def run_now(self, auto_heal: Optional[bool] = None, dry_run: Optional[bool] = None) -> IntegrityResult:
effective_auto_heal = auto_heal if auto_heal is not None else self.auto_heal
effective_dry_run = dry_run if dry_run is not None else self.dry_run
if not self._lock.acquire(blocking=False):
raise RuntimeError("Integrity scan is already in progress")
start = time.time()
result = IntegrityResult()
try:
self._scanning = True
self._scan_start_time = time.time()
bucket_names = self._list_bucket_names()
effective_auto_heal = auto_heal if auto_heal is not None else self.auto_heal
effective_dry_run = dry_run if dry_run is not None else self.dry_run
for bucket_name in bucket_names:
if result.objects_scanned >= self.batch_size:
break
result.buckets_scanned += 1
self._check_corrupted_objects(bucket_name, result, effective_auto_heal, effective_dry_run)
self._check_orphaned_objects(bucket_name, result, effective_auto_heal, effective_dry_run)
self._check_phantom_metadata(bucket_name, result, effective_auto_heal, effective_dry_run)
self._check_stale_versions(bucket_name, result, effective_auto_heal, effective_dry_run)
self._check_etag_cache(bucket_name, result, effective_auto_heal, effective_dry_run)
self._check_legacy_metadata(bucket_name, result, effective_auto_heal, effective_dry_run)
start = self._scan_start_time
result = IntegrityResult()
result.execution_time_seconds = time.time() - start
bucket_names = self._list_bucket_names()
self.cursor_store.clean_stale(bucket_names)
ordered_buckets = self.cursor_store.get_bucket_order(bucket_names)
if result.has_issues or result.errors:
logger.info(
"Integrity check completed in %.2fs: corrupted=%d, orphaned=%d, phantom=%d, "
"stale_versions=%d, etag_cache=%d, legacy_drift=%d, healed=%d, errors=%d%s",
result.execution_time_seconds,
result.corrupted_objects,
result.orphaned_objects,
result.phantom_metadata,
result.stale_versions,
result.etag_cache_inconsistencies,
result.legacy_metadata_drifts,
result.issues_healed,
len(result.errors),
" (dry run)" if effective_dry_run else "",
for bucket_name in ordered_buckets:
if self._batch_exhausted(result):
break
result.buckets_scanned += 1
self._check_corrupted_objects(bucket_name, result, effective_auto_heal, effective_dry_run)
self._check_orphaned_objects(bucket_name, result, effective_auto_heal, effective_dry_run)
self._check_phantom_metadata(bucket_name, result, effective_auto_heal, effective_dry_run)
self._check_stale_versions(bucket_name, result, effective_auto_heal, effective_dry_run)
self._check_etag_cache(bucket_name, result, effective_auto_heal, effective_dry_run)
self._check_legacy_metadata(bucket_name, result, effective_auto_heal, effective_dry_run)
self.cursor_store.update_bucket(bucket_name, time.time())
result.execution_time_seconds = time.time() - start
if result.has_issues or result.errors:
logger.info(
"Integrity check completed in %.2fs: corrupted=%d, orphaned=%d, phantom=%d, "
"stale_versions=%d, etag_cache=%d, legacy_drift=%d, healed=%d, errors=%d%s",
result.execution_time_seconds,
result.corrupted_objects,
result.orphaned_objects,
result.phantom_metadata,
result.stale_versions,
result.etag_cache_inconsistencies,
result.legacy_metadata_drifts,
result.issues_healed,
len(result.errors),
" (dry run)" if effective_dry_run else "",
)
record = IntegrityExecutionRecord(
timestamp=time.time(),
result=result.to_dict(),
dry_run=effective_dry_run,
auto_heal=effective_auto_heal,
)
self.history_store.add(record)
record = IntegrityExecutionRecord(
timestamp=time.time(),
result=result.to_dict(),
dry_run=effective_dry_run,
auto_heal=effective_auto_heal,
)
self.history_store.add(record)
return result
finally:
self._scanning = False
self._scan_start_time = None
self._lock.release()
return result
def run_async(self, auto_heal: Optional[bool] = None, dry_run: Optional[bool] = None) -> bool:
if self._scanning:
return False
t = threading.Thread(target=self.run_now, args=(auto_heal, dry_run), daemon=True)
t.start()
return True
def _system_path(self) -> Path:
return self.storage_root / self.SYSTEM_ROOT
@@ -289,6 +385,16 @@ class IntegrityChecker:
pass
return names
def _throttle(self) -> bool:
if self._shutdown:
return True
if self._io_throttle > 0:
time.sleep(self._io_throttle)
return self._shutdown
def _batch_exhausted(self, result: IntegrityResult) -> bool:
return self._shutdown or result.objects_scanned >= self.batch_size
def _add_issue(self, result: IntegrityResult, issue: IntegrityIssue) -> None:
if len(result.issues) < MAX_ISSUES:
result.issues.append(issue)
@@ -304,7 +410,9 @@ class IntegrityChecker:
try:
for index_file in meta_root.rglob("_index.json"):
if result.objects_scanned >= self.batch_size:
if self._throttle():
return
if self._batch_exhausted(result):
return
if not index_file.is_file():
continue
@@ -314,7 +422,9 @@ class IntegrityChecker:
continue
for key_name, entry in list(index_data.items()):
if result.objects_scanned >= self.batch_size:
if self._throttle():
return
if self._batch_exhausted(result):
return
rel_dir = index_file.parent.relative_to(meta_root)
@@ -374,7 +484,9 @@ class IntegrityChecker:
try:
for entry in bucket_path.rglob("*"):
if result.objects_scanned >= self.batch_size:
if self._throttle():
return
if self._batch_exhausted(result):
return
if not entry.is_file():
continue
@@ -385,6 +497,7 @@ class IntegrityChecker:
if rel.parts and rel.parts[0] in self.INTERNAL_FOLDERS:
continue
result.objects_scanned += 1
full_key = rel.as_posix()
key_name = rel.name
parent = rel.parent
@@ -449,6 +562,10 @@ class IntegrityChecker:
try:
for index_file in meta_root.rglob("_index.json"):
if self._throttle():
return
if self._batch_exhausted(result):
return
if not index_file.is_file():
continue
try:
@@ -458,6 +575,9 @@ class IntegrityChecker:
keys_to_remove = []
for key_name in list(index_data.keys()):
if self._batch_exhausted(result):
break
result.objects_scanned += 1
rel_dir = index_file.parent.relative_to(meta_root)
if rel_dir == Path("."):
full_key = key_name
@@ -503,6 +623,10 @@ class IntegrityChecker:
try:
for key_dir in versions_root.rglob("*"):
if self._throttle():
return
if self._batch_exhausted(result):
return
if not key_dir.is_dir():
continue
@@ -510,6 +634,9 @@ class IntegrityChecker:
json_files = {f.stem: f for f in key_dir.glob("*.json")}
for stem, bin_file in bin_files.items():
if self._batch_exhausted(result):
return
result.objects_scanned += 1
if stem not in json_files:
result.stale_versions += 1
issue = IntegrityIssue(
@@ -529,6 +656,9 @@ class IntegrityChecker:
self._add_issue(result, issue)
for stem, json_file in json_files.items():
if self._batch_exhausted(result):
return
result.objects_scanned += 1
if stem not in bin_files:
result.stale_versions += 1
issue = IntegrityIssue(
@@ -569,6 +699,9 @@ class IntegrityChecker:
found_mismatch = False
for full_key, cached_etag in etag_cache.items():
if self._batch_exhausted(result):
break
result.objects_scanned += 1
key_path = Path(full_key)
key_name = key_path.name
parent = key_path.parent
@@ -626,9 +759,14 @@ class IntegrityChecker:
try:
for meta_file in legacy_meta_root.rglob("*.meta.json"):
if self._throttle():
return
if self._batch_exhausted(result):
return
if not meta_file.is_file():
continue
result.objects_scanned += 1
try:
rel = meta_file.relative_to(legacy_meta_root)
except ValueError:
@@ -728,11 +866,17 @@ class IntegrityChecker:
return [r.to_dict() for r in records]
def get_status(self) -> dict:
return {
status: Dict[str, Any] = {
"enabled": not self._shutdown or self._timer is not None,
"running": self._timer is not None and not self._shutdown,
"scanning": self._scanning,
"interval_hours": self.interval_seconds / 3600.0,
"batch_size": self.batch_size,
"auto_heal": self.auto_heal,
"dry_run": self.dry_run,
"io_throttle_ms": round(self._io_throttle * 1000),
}
if self._scanning and self._scan_start_time is not None:
status["scan_elapsed_seconds"] = round(time.time() - self._scan_start_time, 1)
status["cursor"] = self.cursor_store.get_info()
return status

126
app/ui.py
View File

@@ -1063,6 +1063,27 @@ def bulk_delete_objects(bucket_name: str):
return _respond(False, f"A maximum of {MAX_KEYS} objects can be deleted per request", status_code=400)
unique_keys = list(dict.fromkeys(cleaned))
folder_prefixes = [k for k in unique_keys if k.endswith("/")]
if folder_prefixes:
try:
client = get_session_s3_client()
for prefix in folder_prefixes:
unique_keys.remove(prefix)
paginator = client.get_paginator("list_objects_v2")
for page in paginator.paginate(Bucket=bucket_name, Prefix=prefix):
for obj in page.get("Contents", []):
if obj["Key"] not in unique_keys:
unique_keys.append(obj["Key"])
except (ClientError, EndpointConnectionError, ConnectionClosedError) as exc:
if isinstance(exc, ClientError):
err, status = handle_client_error(exc)
return _respond(False, err["error"], status_code=status)
return _respond(False, "S3 API server is unreachable", status_code=502)
if not unique_keys:
return _respond(False, "No objects found under the selected folders", status_code=400)
try:
_authorize_ui(principal, bucket_name, "delete")
except IamError as exc:
@@ -1093,13 +1114,17 @@ def bulk_delete_objects(bucket_name: str):
else:
try:
client = get_session_s3_client()
objects_to_delete = [{"Key": k} for k in unique_keys]
resp = client.delete_objects(
Bucket=bucket_name,
Delete={"Objects": objects_to_delete, "Quiet": False},
)
deleted = [d["Key"] for d in resp.get("Deleted", [])]
errors = [{"key": e["Key"], "error": e.get("Message", e.get("Code", "Unknown error"))} for e in resp.get("Errors", [])]
deleted = []
errors = []
for i in range(0, len(unique_keys), 1000):
batch = unique_keys[i:i + 1000]
objects_to_delete = [{"Key": k} for k in batch]
resp = client.delete_objects(
Bucket=bucket_name,
Delete={"Objects": objects_to_delete, "Quiet": False},
)
deleted.extend(d["Key"] for d in resp.get("Deleted", []))
errors.extend({"key": e["Key"], "error": e.get("Message", e.get("Code", "Unknown error"))} for e in resp.get("Errors", []))
for key in deleted:
_replication_manager().trigger_replication(bucket_name, key, action="delete")
except (ClientError, EndpointConnectionError, ConnectionClosedError) as exc:
@@ -4126,7 +4151,7 @@ def system_dashboard():
r = rec.get("result", {})
total_freed = r.get("temp_bytes_freed", 0) + r.get("multipart_bytes_freed", 0) + r.get("orphaned_version_bytes_freed", 0)
rec["bytes_freed_display"] = _format_bytes(total_freed)
rec["timestamp_display"] = datetime.fromtimestamp(rec["timestamp"], tz=dt_timezone.utc).strftime("%Y-%m-%d %H:%M UTC")
rec["timestamp_display"] = _format_datetime_display(datetime.fromtimestamp(rec["timestamp"], tz=dt_timezone.utc))
gc_history_records.append(rec)
checker = current_app.extensions.get("integrity")
@@ -4135,7 +4160,7 @@ def system_dashboard():
if checker:
raw = checker.get_history(limit=10, offset=0)
for rec in raw:
rec["timestamp_display"] = datetime.fromtimestamp(rec["timestamp"], tz=dt_timezone.utc).strftime("%Y-%m-%d %H:%M UTC")
rec["timestamp_display"] = _format_datetime_display(datetime.fromtimestamp(rec["timestamp"], tz=dt_timezone.utc))
integrity_history_records.append(rec)
features = [
@@ -4163,6 +4188,7 @@ def system_dashboard():
gc_history=gc_history_records,
integrity_status=integrity_status,
integrity_history=integrity_history_records,
display_timezone=current_app.config.get("DISPLAY_TIMEZONE", "UTC"),
)
@@ -4179,14 +4205,43 @@ def system_gc_run():
return jsonify({"error": "GC is not enabled"}), 400
payload = request.get_json(silent=True) or {}
original_dry_run = gc.dry_run
if "dry_run" in payload:
gc.dry_run = bool(payload["dry_run"])
started = gc.run_async(dry_run=payload.get("dry_run"))
if not started:
return jsonify({"error": "GC is already in progress"}), 409
return jsonify({"status": "started"})
@ui_bp.get("/system/gc/status")
def system_gc_status():
principal = _current_principal()
try:
result = gc.run_now()
finally:
gc.dry_run = original_dry_run
return jsonify(result.to_dict())
_iam().authorize(principal, None, "iam:*")
except IamError:
return jsonify({"error": "Access denied"}), 403
gc = current_app.extensions.get("gc")
if not gc:
return jsonify({"error": "GC is not enabled"}), 400
return jsonify(gc.get_status())
@ui_bp.get("/system/gc/history")
def system_gc_history():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:*")
except IamError:
return jsonify({"error": "Access denied"}), 403
gc = current_app.extensions.get("gc")
if not gc:
return jsonify({"executions": []})
limit = min(int(request.args.get("limit", 10)), 200)
offset = int(request.args.get("offset", 0))
records = gc.get_history(limit=limit, offset=offset)
return jsonify({"executions": records})
@ui_bp.post("/system/integrity/run")
@@ -4202,11 +4257,46 @@ def system_integrity_run():
return jsonify({"error": "Integrity checker is not enabled"}), 400
payload = request.get_json(silent=True) or {}
result = checker.run_now(
started = checker.run_async(
auto_heal=payload.get("auto_heal"),
dry_run=payload.get("dry_run"),
)
return jsonify(result.to_dict())
if not started:
return jsonify({"error": "A scan is already in progress"}), 409
return jsonify({"status": "started"})
@ui_bp.get("/system/integrity/status")
def system_integrity_status():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:*")
except IamError:
return jsonify({"error": "Access denied"}), 403
checker = current_app.extensions.get("integrity")
if not checker:
return jsonify({"error": "Integrity checker is not enabled"}), 400
return jsonify(checker.get_status())
@ui_bp.get("/system/integrity/history")
def system_integrity_history():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:*")
except IamError:
return jsonify({"error": "Access denied"}), 403
checker = current_app.extensions.get("integrity")
if not checker:
return jsonify({"executions": []})
limit = min(int(request.args.get("limit", 10)), 200)
offset = int(request.args.get("offset", 0))
records = checker.get_history(limit=limit, offset=offset)
return jsonify({"executions": records})
@ui_bp.app_errorhandler(404)

View File

@@ -1,6 +1,6 @@
from __future__ import annotations
APP_VERSION = "0.4.0"
APP_VERSION = "0.4.1"
def get_version() -> str:

View File

@@ -2655,7 +2655,7 @@ pre code {
}
.objects-table-container {
max-height: none;
max-height: 60vh;
}
.preview-card {

View File

@@ -98,6 +98,9 @@
const previewMetadata = document.getElementById('preview-metadata');
const previewMetadataList = document.getElementById('preview-metadata-list');
const previewPlaceholder = document.getElementById('preview-placeholder');
const previewPlaceholderDefault = previewPlaceholder ? previewPlaceholder.innerHTML : '';
const previewErrorAlert = document.getElementById('preview-error-alert');
const previewDetailsMeta = document.getElementById('preview-details-meta');
const previewImage = document.getElementById('preview-image');
const previewVideo = document.getElementById('preview-video');
const previewAudio = document.getElementById('preview-audio');
@@ -866,6 +869,11 @@
const checkbox = row.querySelector('[data-folder-select]');
checkbox?.addEventListener('change', (e) => {
e.stopPropagation();
if (checkbox.checked) {
selectedRows.set(folderPath, { key: folderPath, isFolder: true });
} else {
selectedRows.delete(folderPath);
}
const folderObjects = allObjects.filter(obj => obj.key.startsWith(folderPath));
folderObjects.forEach(obj => {
if (checkbox.checked) {
@@ -1350,8 +1358,11 @@
}
if (selectAllCheckbox) {
const filesInView = visibleItems.filter(item => item.type === 'file');
const total = filesInView.length;
const visibleSelectedCount = filesInView.filter(item => selectedRows.has(item.data.key)).length;
const foldersInView = visibleItems.filter(item => item.type === 'folder');
const total = filesInView.length + foldersInView.length;
const fileSelectedCount = filesInView.filter(item => selectedRows.has(item.data.key)).length;
const folderSelectedCount = foldersInView.filter(item => selectedRows.has(item.path)).length;
const visibleSelectedCount = fileSelectedCount + folderSelectedCount;
selectAllCheckbox.disabled = total === 0;
selectAllCheckbox.checked = visibleSelectedCount > 0 && visibleSelectedCount === total && total > 0;
selectAllCheckbox.indeterminate = visibleSelectedCount > 0 && visibleSelectedCount < total;
@@ -1373,8 +1384,12 @@
const keys = Array.from(selectedRows.keys());
bulkDeleteList.innerHTML = '';
if (bulkDeleteCount) {
const label = keys.length === 1 ? 'object' : 'objects';
bulkDeleteCount.textContent = `${keys.length} ${label} selected`;
const folderCount = keys.filter(k => k.endsWith('/')).length;
const objectCount = keys.length - folderCount;
const parts = [];
if (folderCount) parts.push(`${folderCount} folder${folderCount !== 1 ? 's' : ''}`);
if (objectCount) parts.push(`${objectCount} object${objectCount !== 1 ? 's' : ''}`);
bulkDeleteCount.textContent = `${parts.join(' and ')} selected`;
}
if (!keys.length) {
const empty = document.createElement('li');
@@ -1513,7 +1528,7 @@
};
const response = await fetch(endpoint, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
headers: { 'Content-Type': 'application/json', 'X-CSRFToken': window.getCsrfToken ? window.getCsrfToken() : '' },
body: JSON.stringify(payload),
});
const data = await response.json();
@@ -1957,6 +1972,10 @@
[previewImage, previewVideo, previewAudio, previewIframe].forEach((el) => {
if (!el) return;
el.classList.add('d-none');
if (el.tagName === 'IMG') {
el.removeAttribute('src');
el.onload = null;
}
if (el.tagName === 'VIDEO' || el.tagName === 'AUDIO') {
el.pause();
el.removeAttribute('src');
@@ -1969,9 +1988,38 @@
previewText.classList.add('d-none');
previewText.textContent = '';
}
previewPlaceholder.innerHTML = previewPlaceholderDefault;
previewPlaceholder.classList.remove('d-none');
};
let previewFailed = false;
const handlePreviewError = () => {
previewFailed = true;
if (downloadButton) {
downloadButton.classList.add('disabled');
downloadButton.removeAttribute('href');
}
if (presignButton) presignButton.disabled = true;
if (generatePresignButton) generatePresignButton.disabled = true;
if (previewDetailsMeta) previewDetailsMeta.classList.add('d-none');
if (previewMetadata) previewMetadata.classList.add('d-none');
const tagsPanel = document.getElementById('preview-tags');
if (tagsPanel) tagsPanel.classList.add('d-none');
const versionPanel = document.getElementById('version-panel');
if (versionPanel) versionPanel.classList.add('d-none');
if (previewErrorAlert) {
previewErrorAlert.textContent = 'Unable to load object \u2014 it may have been deleted, or the server returned an error.';
previewErrorAlert.classList.remove('d-none');
}
};
const clearPreviewError = () => {
previewFailed = false;
if (previewErrorAlert) previewErrorAlert.classList.add('d-none');
if (previewDetailsMeta) previewDetailsMeta.classList.remove('d-none');
};
async function fetchMetadata(metadataUrl) {
if (!metadataUrl) return null;
try {
@@ -1993,6 +2041,7 @@
previewPanel.classList.remove('d-none');
activeRow = row;
renderMetadata(null);
clearPreviewError();
previewKey.textContent = row.dataset.key;
previewSize.textContent = formatBytes(Number(row.dataset.size));
@@ -2016,18 +2065,71 @@
const previewUrl = row.dataset.previewUrl;
const lower = row.dataset.key.toLowerCase();
if (previewUrl && lower.match(/\.(png|jpg|jpeg|gif|webp|svg|ico|bmp)$/)) {
previewImage.src = previewUrl;
previewImage.classList.remove('d-none');
previewPlaceholder.classList.add('d-none');
previewPlaceholder.innerHTML = '<div class="spinner-border spinner-border-sm text-secondary" role="status"></div><div class="small mt-2">Loading preview\u2026</div>';
const currentRow = row;
fetch(previewUrl)
.then((r) => {
if (activeRow !== currentRow) return;
if (!r.ok) {
previewPlaceholder.innerHTML = '<div class="small text-muted">Failed to load preview</div>';
handlePreviewError();
return;
}
return r.blob();
})
.then((blob) => {
if (!blob || activeRow !== currentRow) return;
const url = URL.createObjectURL(blob);
previewImage.onload = () => {
if (activeRow !== currentRow) { URL.revokeObjectURL(url); return; }
previewImage.classList.remove('d-none');
previewPlaceholder.classList.add('d-none');
};
previewImage.onerror = () => {
if (activeRow !== currentRow) { URL.revokeObjectURL(url); return; }
URL.revokeObjectURL(url);
previewPlaceholder.innerHTML = '<div class="small text-muted">Failed to load preview</div>';
};
previewImage.src = url;
})
.catch(() => {
if (activeRow !== currentRow) return;
previewPlaceholder.innerHTML = '<div class="small text-muted">Failed to load preview</div>';
handlePreviewError();
});
} else if (previewUrl && lower.match(/\.(mp4|webm|ogv|mov|avi|mkv)$/)) {
const currentRow = row;
previewVideo.onerror = () => {
if (activeRow !== currentRow) return;
previewVideo.classList.add('d-none');
previewPlaceholder.classList.remove('d-none');
previewPlaceholder.innerHTML = '<div class="small text-muted">Failed to load preview</div>';
handlePreviewError();
};
previewVideo.src = previewUrl;
previewVideo.classList.remove('d-none');
previewPlaceholder.classList.add('d-none');
} else if (previewUrl && lower.match(/\.(mp3|wav|flac|ogg|aac|m4a|wma)$/)) {
const currentRow = row;
previewAudio.onerror = () => {
if (activeRow !== currentRow) return;
previewAudio.classList.add('d-none');
previewPlaceholder.classList.remove('d-none');
previewPlaceholder.innerHTML = '<div class="small text-muted">Failed to load preview</div>';
handlePreviewError();
};
previewAudio.src = previewUrl;
previewAudio.classList.remove('d-none');
previewPlaceholder.classList.add('d-none');
} else if (previewUrl && lower.match(/\.(pdf)$/)) {
const currentRow = row;
previewIframe.onerror = () => {
if (activeRow !== currentRow) return;
previewIframe.classList.add('d-none');
previewPlaceholder.classList.remove('d-none');
previewPlaceholder.innerHTML = '<div class="small text-muted">Failed to load preview</div>';
handlePreviewError();
};
previewIframe.src = previewUrl;
previewIframe.style.minHeight = '500px';
previewIframe.classList.remove('d-none');
@@ -2052,14 +2154,17 @@
})
.catch(() => {
if (activeRow !== currentRow) return;
previewText.textContent = 'Failed to load preview';
previewText.classList.add('d-none');
previewPlaceholder.classList.remove('d-none');
previewPlaceholder.innerHTML = '<div class="small text-muted">Failed to load preview</div>';
handlePreviewError();
});
}
const metadataUrl = row.dataset.metadataUrl;
if (metadataUrl) {
const metadata = await fetchMetadata(metadataUrl);
if (activeRow === row) {
if (activeRow === row && !previewFailed) {
renderMetadata(metadata);
}
}
@@ -3157,6 +3262,15 @@
}
});
const foldersInView = visibleItems.filter(item => item.type === 'folder');
foldersInView.forEach(item => {
if (shouldSelect) {
selectedRows.set(item.path, { key: item.path, isFolder: true });
} else {
selectedRows.delete(item.path);
}
});
document.querySelectorAll('[data-folder-select]').forEach(cb => {
cb.checked = shouldSelect;
});
@@ -3957,6 +4071,10 @@
const loadObjectTags = async (row) => {
if (!row || !previewTagsPanel) return;
if (previewFailed) {
previewTagsPanel.classList.add('d-none');
return;
}
const tagsUrl = row.dataset.tagsUrl;
if (!tagsUrl) {
previewTagsPanel.classList.add('d-none');

View File

@@ -3,6 +3,8 @@ window.BucketDetailUpload = (function() {
const MULTIPART_THRESHOLD = 8 * 1024 * 1024;
const CHUNK_SIZE = 8 * 1024 * 1024;
const MAX_PART_RETRIES = 3;
const RETRY_BASE_DELAY_MS = 1000;
let state = {
isUploading: false,
@@ -204,6 +206,67 @@ window.BucketDetailUpload = (function() {
}
}
function uploadPartXHR(url, chunk, csrfToken, baseBytes, fileSize, progressItem, partNumber, totalParts) {
return new Promise((resolve, reject) => {
const xhr = new XMLHttpRequest();
xhr.open('PUT', url, true);
xhr.setRequestHeader('X-CSRFToken', csrfToken || '');
xhr.upload.addEventListener('progress', (e) => {
if (e.lengthComputable) {
updateProgressItem(progressItem, {
status: `Part ${partNumber}/${totalParts}`,
loaded: baseBytes + e.loaded,
total: fileSize
});
}
});
xhr.addEventListener('load', () => {
if (xhr.status >= 200 && xhr.status < 300) {
try {
resolve(JSON.parse(xhr.responseText));
} catch {
reject(new Error(`Part ${partNumber}: invalid response`));
}
} else {
try {
const data = JSON.parse(xhr.responseText);
reject(new Error(data.error || `Part ${partNumber} failed (${xhr.status})`));
} catch {
reject(new Error(`Part ${partNumber} failed (${xhr.status})`));
}
}
});
xhr.addEventListener('error', () => reject(new Error(`Part ${partNumber}: network error`)));
xhr.addEventListener('abort', () => reject(new Error(`Part ${partNumber}: aborted`)));
xhr.send(chunk);
});
}
async function uploadPartWithRetry(url, chunk, csrfToken, baseBytes, fileSize, progressItem, partNumber, totalParts) {
let lastError;
for (let attempt = 0; attempt <= MAX_PART_RETRIES; attempt++) {
try {
return await uploadPartXHR(url, chunk, csrfToken, baseBytes, fileSize, progressItem, partNumber, totalParts);
} catch (err) {
lastError = err;
if (attempt < MAX_PART_RETRIES) {
const delay = RETRY_BASE_DELAY_MS * Math.pow(2, attempt);
updateProgressItem(progressItem, {
status: `Part ${partNumber}/${totalParts} retry ${attempt + 1}/${MAX_PART_RETRIES}...`,
loaded: baseBytes,
total: fileSize
});
await new Promise(r => setTimeout(r, delay));
}
}
}
throw lastError;
}
async function uploadMultipart(file, objectKey, metadata, progressItem, urls) {
const csrfToken = document.querySelector('input[name="csrf_token"]')?.value;
@@ -233,26 +296,14 @@ window.BucketDetailUpload = (function() {
const end = Math.min(start + CHUNK_SIZE, file.size);
const chunk = file.slice(start, end);
updateProgressItem(progressItem, {
status: `Part ${partNumber}/${totalParts}`,
loaded: uploadedBytes,
total: file.size
});
const partData = await uploadPartWithRetry(
`${partUrl}?partNumber=${partNumber}`,
chunk, csrfToken, uploadedBytes, file.size,
progressItem, partNumber, totalParts
);
const partResp = await fetch(`${partUrl}?partNumber=${partNumber}`, {
method: 'PUT',
headers: { 'X-CSRFToken': csrfToken || '' },
body: chunk
});
if (!partResp.ok) {
const err = await partResp.json().catch(() => ({}));
throw new Error(err.error || `Part ${partNumber} failed`);
}
const partData = await partResp.json();
parts.push({ part_number: partNumber, etag: partData.etag });
uploadedBytes += chunk.size;
uploadedBytes += (end - start);
updateProgressItem(progressItem, {
loaded: uploadedBytes,

View File

@@ -257,7 +257,8 @@
Share Link
</button>
</div>
<div class="p-3 rounded mb-3" style="background: var(--myfsio-preview-bg);">
<div id="preview-error-alert" class="alert alert-warning d-none py-2 px-3 mb-3 small" role="alert"></div>
<div id="preview-details-meta" class="p-3 rounded mb-3" style="background: var(--myfsio-preview-bg);">
<dl class="row small mb-0">
<dt class="col-5 text-muted fw-normal">Last modified</dt>
<dd class="col-7 mb-2 fw-medium" id="preview-modified"></dd>
@@ -2057,7 +2058,7 @@
<div class="col-12">
<label class="form-label fw-medium">Select files</label>
<input class="form-control" type="file" name="object" id="uploadFileInput" multiple required />
<div class="form-text">Select one or more files from your device. Files ≥ 8&nbsp;MB automatically switch to multipart uploads.</div>
<div class="form-text">Select one or more files from your device. Files ≥ 8&nbsp;MB use multipart uploads with automatic retry.</div>
</div>
<div class="col-12">
<div class="upload-dropzone text-center" data-dropzone>

View File

@@ -122,6 +122,13 @@
</button>
</div>
<div id="gcScanningBanner" class="mb-3 {% if not gc_status.scanning %}d-none{% endif %}">
<div class="alert alert-info mb-0 small d-flex align-items-center gap-2">
<div class="spinner-border spinner-border-sm text-info" role="status"></div>
<span>GC in progress<span id="gcScanElapsed"></span></span>
</div>
</div>
<div id="gcResult" class="mb-3 d-none">
<div class="alert mb-0 small" id="gcResultAlert">
<div class="d-flex justify-content-between align-items-start">
@@ -148,6 +155,7 @@
</div>
</div>
<div id="gcHistoryContainer">
{% if gc_history %}
<h6 class="fw-semibold small text-muted mb-2 d-flex align-items-center gap-2">
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
@@ -193,6 +201,7 @@
<p class="text-muted small mb-0">No executions recorded yet.</p>
</div>
{% endif %}
</div>
{% else %}
<div class="text-center py-4">
@@ -233,21 +242,28 @@
<div class="card-body px-4 pb-4">
{% if integrity_status.enabled %}
<div class="d-flex gap-2 flex-wrap mb-3">
<button class="btn btn-primary btn-sm d-inline-flex align-items-center" id="integrityRunBtn" onclick="runIntegrity(false, false)">
<button class="btn btn-primary btn-sm d-inline-flex align-items-center" id="integrityRunBtn" onclick="runIntegrity(false, false)" {% if integrity_status.scanning %}disabled{% endif %}>
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1 flex-shrink-0" viewBox="0 0 16 16">
<path fill-rule="evenodd" d="M8 3a5 5 0 1 0 4.546 2.914.5.5 0 0 1 .908-.417A6 6 0 1 1 8 2v1z"/>
<path d="M8 4.466V.534a.25.25 0 0 1 .41-.192l2.36 1.966c.12.1.12.284 0 .384L8.41 4.658A.25.25 0 0 1 8 4.466z"/>
</svg>
Scan Now
</button>
<button class="btn btn-outline-warning btn-sm" id="integrityHealBtn" onclick="runIntegrity(false, true)">
<button class="btn btn-outline-warning btn-sm" id="integrityHealBtn" onclick="runIntegrity(false, true)" {% if integrity_status.scanning %}disabled{% endif %}>
Scan &amp; Heal
</button>
<button class="btn btn-outline-secondary btn-sm" id="integrityDryRunBtn" onclick="runIntegrity(true, false)">
<button class="btn btn-outline-secondary btn-sm" id="integrityDryRunBtn" onclick="runIntegrity(true, false)" {% if integrity_status.scanning %}disabled{% endif %}>
Dry Run
</button>
</div>
<div id="integrityScanningBanner" class="mb-3 {% if not integrity_status.scanning %}d-none{% endif %}">
<div class="alert alert-info mb-0 small d-flex align-items-center gap-2">
<div class="spinner-border spinner-border-sm text-info" role="status"></div>
<span>Scan in progress<span id="integrityScanElapsed"></span></span>
</div>
</div>
<div id="integrityResult" class="mb-3 d-none">
<div class="alert mb-0 small" id="integrityResultAlert">
<div class="d-flex justify-content-between align-items-start">
@@ -273,6 +289,7 @@
</div>
</div>
<div id="integrityHistoryContainer">
{% if integrity_history %}
<h6 class="fw-semibold small text-muted mb-2 d-flex align-items-center gap-2">
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
@@ -326,6 +343,7 @@
<p class="text-muted small mb-0">No scans recorded yet.</p>
</div>
{% endif %}
</div>
{% else %}
<div class="text-center py-4">
@@ -369,9 +387,195 @@
return (i === 0 ? b : b.toFixed(1)) + ' ' + units[i];
}
var _displayTimezone = {{ display_timezone|tojson }};
function formatTimestamp(ts) {
var d = new Date(ts * 1000);
try {
var opts = {year: 'numeric', month: 'short', day: '2-digit', hour: '2-digit', minute: '2-digit', hour12: false, timeZone: _displayTimezone, timeZoneName: 'short'};
return d.toLocaleString('en-US', opts);
} catch (e) {
var pad = function (n) { return n < 10 ? '0' + n : '' + n; };
return d.getUTCFullYear() + '-' + pad(d.getUTCMonth() + 1) + '-' + pad(d.getUTCDate()) +
' ' + pad(d.getUTCHours()) + ':' + pad(d.getUTCMinutes()) + ' UTC';
}
}
var _gcHistoryIcon = '<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">' +
'<path d="M8.515 1.019A7 7 0 0 0 8 1V0a8 8 0 0 1 .589.022l-.074.997zm2.004.45a7.003 7.003 0 0 0-.985-.299l.219-.976c.383.086.76.2 1.126.342l-.36.933zm1.37.71a7.01 7.01 0 0 0-.439-.27l.493-.87a8.025 8.025 0 0 1 .979.654l-.615.789a6.996 6.996 0 0 0-.418-.302zm1.834 1.79a6.99 6.99 0 0 0-.653-.796l.724-.69c.27.285.52.59.747.91l-.818.576zm.744 1.352a7.08 7.08 0 0 0-.214-.468l.893-.45a7.976 7.976 0 0 1 .45 1.088l-.95.313a7.023 7.023 0 0 0-.179-.483zm.53 2.507a6.991 6.991 0 0 0-.1-1.025l.985-.17c.067.386.106.778.116 1.17l-1 .025zm-.131 1.538c.033-.17.06-.339.081-.51l.993.123a7.957 7.957 0 0 1-.23 1.155l-.964-.267c.046-.165.086-.332.12-.501zm-.952 2.379c.184-.29.346-.594.486-.908l.914.405c-.16.36-.345.706-.555 1.038l-.845-.535zm-.964 1.205c.122-.122.239-.248.35-.378l.758.653a8.073 8.073 0 0 1-.401.432l-.707-.707z"/>' +
'<path d="M8 1a7 7 0 1 0 4.95 11.95l.707.707A8.001 8.001 0 1 1 8 0v1z"/>' +
'<path d="M7.5 3a.5.5 0 0 1 .5.5v5.21l3.248 1.856a.5.5 0 0 1-.496.868l-3.5-2A.5.5 0 0 1 7 8V3.5a.5.5 0 0 1 .5-.5z"/></svg>';
function _gcRefreshHistory() {
fetch('{{ url_for("ui.system_gc_history") }}?limit=10', {
headers: {'X-CSRFToken': csrfToken}
})
.then(function (r) { return r.json(); })
.then(function (hist) {
var container = document.getElementById('gcHistoryContainer');
if (!container) return;
var execs = hist.executions || [];
if (execs.length === 0) {
container.innerHTML = '<div class="text-center py-2"><p class="text-muted small mb-0">No executions recorded yet.</p></div>';
return;
}
var html = '<h6 class="fw-semibold small text-muted mb-2 d-flex align-items-center gap-2">' +
_gcHistoryIcon + ' Recent Executions</h6>' +
'<div class="table-responsive"><table class="table table-sm small mb-0">' +
'<thead class="table-light"><tr><th>Time</th><th class="text-center">Cleaned</th>' +
'<th class="text-center">Freed</th><th class="text-center">Mode</th></tr></thead><tbody>';
execs.forEach(function (exec) {
var r = exec.result || {};
var cleaned = (r.temp_files_deleted || 0) + (r.multipart_uploads_deleted || 0) +
(r.lock_files_deleted || 0) + (r.orphaned_metadata_deleted || 0) +
(r.orphaned_versions_deleted || 0) + (r.empty_dirs_removed || 0);
var freed = (r.temp_bytes_freed || 0) + (r.multipart_bytes_freed || 0) +
(r.orphaned_version_bytes_freed || 0);
var mode = exec.dry_run
? '<span class="badge bg-warning bg-opacity-10 text-warning">Dry run</span>'
: '<span class="badge bg-primary bg-opacity-10 text-primary">Live</span>';
html += '<tr><td class="text-nowrap">' + formatTimestamp(exec.timestamp) + '</td>' +
'<td class="text-center">' + cleaned + '</td>' +
'<td class="text-center">' + formatBytes(freed) + '</td>' +
'<td class="text-center">' + mode + '</td></tr>';
});
html += '</tbody></table></div>';
container.innerHTML = html;
})
.catch(function () {});
}
function _integrityRefreshHistory() {
fetch('{{ url_for("ui.system_integrity_history") }}?limit=10', {
headers: {'X-CSRFToken': csrfToken}
})
.then(function (r) { return r.json(); })
.then(function (hist) {
var container = document.getElementById('integrityHistoryContainer');
if (!container) return;
var execs = hist.executions || [];
if (execs.length === 0) {
container.innerHTML = '<div class="text-center py-2"><p class="text-muted small mb-0">No scans recorded yet.</p></div>';
return;
}
var html = '<h6 class="fw-semibold small text-muted mb-2 d-flex align-items-center gap-2">' +
_gcHistoryIcon + ' Recent Scans</h6>' +
'<div class="table-responsive"><table class="table table-sm small mb-0">' +
'<thead class="table-light"><tr><th>Time</th><th class="text-center">Scanned</th>' +
'<th class="text-center">Issues</th><th class="text-center">Healed</th>' +
'<th class="text-center">Mode</th></tr></thead><tbody>';
execs.forEach(function (exec) {
var r = exec.result || {};
var issues = (r.corrupted_objects || 0) + (r.orphaned_objects || 0) +
(r.phantom_metadata || 0) + (r.stale_versions || 0) +
(r.etag_cache_inconsistencies || 0) + (r.legacy_metadata_drifts || 0);
var issueHtml = issues > 0
? '<span class="text-danger fw-medium">' + issues + '</span>'
: '<span class="text-success">0</span>';
var mode = exec.dry_run
? '<span class="badge bg-warning bg-opacity-10 text-warning">Dry</span>'
: (exec.auto_heal
? '<span class="badge bg-success bg-opacity-10 text-success">Heal</span>'
: '<span class="badge bg-primary bg-opacity-10 text-primary">Scan</span>');
html += '<tr><td class="text-nowrap">' + formatTimestamp(exec.timestamp) + '</td>' +
'<td class="text-center">' + (r.objects_scanned || 0) + '</td>' +
'<td class="text-center">' + issueHtml + '</td>' +
'<td class="text-center">' + (r.issues_healed || 0) + '</td>' +
'<td class="text-center">' + mode + '</td></tr>';
});
html += '</tbody></table></div>';
container.innerHTML = html;
})
.catch(function () {});
}
var _gcPollTimer = null;
var _gcLastDryRun = false;
function _gcSetScanning(scanning) {
var banner = document.getElementById('gcScanningBanner');
var btns = ['gcRunBtn', 'gcDryRunBtn'];
if (scanning) {
banner.classList.remove('d-none');
btns.forEach(function (id) {
var el = document.getElementById(id);
if (el) el.disabled = true;
});
} else {
banner.classList.add('d-none');
document.getElementById('gcScanElapsed').textContent = '';
btns.forEach(function (id) {
var el = document.getElementById(id);
if (el) el.disabled = false;
});
}
}
function _gcShowResult(data, dryRun) {
var container = document.getElementById('gcResult');
var alert = document.getElementById('gcResultAlert');
var title = document.getElementById('gcResultTitle');
var body = document.getElementById('gcResultBody');
container.classList.remove('d-none');
var totalItems = (data.temp_files_deleted || 0) + (data.multipart_uploads_deleted || 0) +
(data.lock_files_deleted || 0) + (data.orphaned_metadata_deleted || 0) +
(data.orphaned_versions_deleted || 0) + (data.empty_dirs_removed || 0);
var totalFreed = (data.temp_bytes_freed || 0) + (data.multipart_bytes_freed || 0) +
(data.orphaned_version_bytes_freed || 0);
alert.className = totalItems > 0 ? 'alert alert-success mb-0 small' : 'alert alert-info mb-0 small';
title.textContent = (dryRun ? '[Dry Run] ' : '') + 'Completed in ' + (data.execution_time_seconds || 0).toFixed(2) + 's';
var lines = [];
if (data.temp_files_deleted) lines.push('Temp files: ' + data.temp_files_deleted + ' (' + formatBytes(data.temp_bytes_freed) + ')');
if (data.multipart_uploads_deleted) lines.push('Multipart uploads: ' + data.multipart_uploads_deleted + ' (' + formatBytes(data.multipart_bytes_freed) + ')');
if (data.lock_files_deleted) lines.push('Lock files: ' + data.lock_files_deleted);
if (data.orphaned_metadata_deleted) lines.push('Orphaned metadata: ' + data.orphaned_metadata_deleted);
if (data.orphaned_versions_deleted) lines.push('Orphaned versions: ' + data.orphaned_versions_deleted + ' (' + formatBytes(data.orphaned_version_bytes_freed) + ')');
if (data.empty_dirs_removed) lines.push('Empty directories: ' + data.empty_dirs_removed);
if (totalItems === 0) lines.push('Nothing to clean up.');
if (totalFreed > 0) lines.push('Total freed: ' + formatBytes(totalFreed));
if (data.errors && data.errors.length > 0) lines.push('Errors: ' + data.errors.join(', '));
body.innerHTML = lines.join('<br>');
}
function _gcPoll() {
fetch('{{ url_for("ui.system_gc_status") }}', {
headers: {'X-CSRFToken': csrfToken}
})
.then(function (r) { return r.json(); })
.then(function (status) {
if (status.scanning) {
var elapsed = status.scan_elapsed_seconds || 0;
document.getElementById('gcScanElapsed').textContent = ' (' + elapsed.toFixed(0) + 's)';
_gcPollTimer = setTimeout(_gcPoll, 2000);
} else {
_gcSetScanning(false);
_gcRefreshHistory();
fetch('{{ url_for("ui.system_gc_history") }}?limit=1', {
headers: {'X-CSRFToken': csrfToken}
})
.then(function (r) { return r.json(); })
.then(function (hist) {
if (hist.executions && hist.executions.length > 0) {
var latest = hist.executions[0];
_gcShowResult(latest.result, latest.dry_run);
}
})
.catch(function () {});
}
})
.catch(function () {
_gcPollTimer = setTimeout(_gcPoll, 3000);
});
}
window.runGC = function (dryRun) {
setLoading(dryRun ? 'gcDryRunBtn' : 'gcRunBtn', true);
setLoading(dryRun ? 'gcRunBtn' : 'gcDryRunBtn', true, true);
_gcLastDryRun = dryRun;
document.getElementById('gcResult').classList.add('d-none');
_gcSetScanning(true);
fetch('{{ url_for("ui.system_gc_run") }}', {
method: 'POST',
@@ -380,42 +584,22 @@
})
.then(function (r) { return r.json(); })
.then(function (data) {
var container = document.getElementById('gcResult');
var alert = document.getElementById('gcResultAlert');
var title = document.getElementById('gcResultTitle');
var body = document.getElementById('gcResultBody');
container.classList.remove('d-none');
if (data.error) {
_gcSetScanning(false);
var container = document.getElementById('gcResult');
var alert = document.getElementById('gcResultAlert');
var title = document.getElementById('gcResultTitle');
var body = document.getElementById('gcResultBody');
container.classList.remove('d-none');
alert.className = 'alert alert-danger mb-0 small';
title.textContent = 'Error';
body.textContent = data.error;
return;
}
var totalItems = (data.temp_files_deleted || 0) + (data.multipart_uploads_deleted || 0) +
(data.lock_files_deleted || 0) + (data.orphaned_metadata_deleted || 0) +
(data.orphaned_versions_deleted || 0) + (data.empty_dirs_removed || 0);
var totalFreed = (data.temp_bytes_freed || 0) + (data.multipart_bytes_freed || 0) +
(data.orphaned_version_bytes_freed || 0);
alert.className = totalItems > 0 ? 'alert alert-success mb-0 small' : 'alert alert-info mb-0 small';
title.textContent = (dryRun ? '[Dry Run] ' : '') + 'Completed in ' + (data.execution_time_seconds || 0).toFixed(2) + 's';
var lines = [];
if (data.temp_files_deleted) lines.push('Temp files: ' + data.temp_files_deleted + ' (' + formatBytes(data.temp_bytes_freed) + ')');
if (data.multipart_uploads_deleted) lines.push('Multipart uploads: ' + data.multipart_uploads_deleted + ' (' + formatBytes(data.multipart_bytes_freed) + ')');
if (data.lock_files_deleted) lines.push('Lock files: ' + data.lock_files_deleted);
if (data.orphaned_metadata_deleted) lines.push('Orphaned metadata: ' + data.orphaned_metadata_deleted);
if (data.orphaned_versions_deleted) lines.push('Orphaned versions: ' + data.orphaned_versions_deleted + ' (' + formatBytes(data.orphaned_version_bytes_freed) + ')');
if (data.empty_dirs_removed) lines.push('Empty directories: ' + data.empty_dirs_removed);
if (totalItems === 0) lines.push('Nothing to clean up.');
if (totalFreed > 0) lines.push('Total freed: ' + formatBytes(totalFreed));
if (data.errors && data.errors.length > 0) lines.push('Errors: ' + data.errors.join(', '));
body.innerHTML = lines.join('<br>');
_gcPollTimer = setTimeout(_gcPoll, 2000);
})
.catch(function (err) {
_gcSetScanning(false);
var container = document.getElementById('gcResult');
var alert = document.getElementById('gcResultAlert');
var title = document.getElementById('gcResultTitle');
@@ -424,18 +608,104 @@
alert.className = 'alert alert-danger mb-0 small';
title.textContent = 'Error';
body.textContent = err.message;
})
.finally(function () {
setLoading('gcRunBtn', false);
setLoading('gcDryRunBtn', false);
});
};
window.runIntegrity = function (dryRun, autoHeal) {
var activeBtn = dryRun ? 'integrityDryRunBtn' : (autoHeal ? 'integrityHealBtn' : 'integrityRunBtn');
['integrityRunBtn', 'integrityHealBtn', 'integrityDryRunBtn'].forEach(function (id) {
setLoading(id, true, id !== activeBtn);
{% if gc_status.scanning %}
_gcSetScanning(true);
_gcPollTimer = setTimeout(_gcPoll, 2000);
{% endif %}
var _integrityPollTimer = null;
var _integrityLastMode = {dryRun: false, autoHeal: false};
function _integritySetScanning(scanning) {
var banner = document.getElementById('integrityScanningBanner');
var btns = ['integrityRunBtn', 'integrityHealBtn', 'integrityDryRunBtn'];
if (scanning) {
banner.classList.remove('d-none');
btns.forEach(function (id) {
var el = document.getElementById(id);
if (el) el.disabled = true;
});
} else {
banner.classList.add('d-none');
document.getElementById('integrityScanElapsed').textContent = '';
btns.forEach(function (id) {
var el = document.getElementById(id);
if (el) el.disabled = false;
});
}
}
function _integrityShowResult(data, dryRun, autoHeal) {
var container = document.getElementById('integrityResult');
var alert = document.getElementById('integrityResultAlert');
var title = document.getElementById('integrityResultTitle');
var body = document.getElementById('integrityResultBody');
container.classList.remove('d-none');
var totalIssues = (data.corrupted_objects || 0) + (data.orphaned_objects || 0) +
(data.phantom_metadata || 0) + (data.stale_versions || 0) +
(data.etag_cache_inconsistencies || 0) + (data.legacy_metadata_drifts || 0);
var prefix = dryRun ? '[Dry Run] ' : (autoHeal ? '[Heal] ' : '');
alert.className = totalIssues > 0 ? 'alert alert-warning mb-0 small' : 'alert alert-success mb-0 small';
title.textContent = prefix + 'Completed in ' + (data.execution_time_seconds || 0).toFixed(2) + 's';
var lines = [];
lines.push('Scanned: ' + (data.objects_scanned || 0) + ' objects in ' + (data.buckets_scanned || 0) + ' buckets');
if (totalIssues === 0) {
lines.push('No issues found.');
} else {
if (data.corrupted_objects) lines.push('Corrupted objects: ' + data.corrupted_objects);
if (data.orphaned_objects) lines.push('Orphaned objects: ' + data.orphaned_objects);
if (data.phantom_metadata) lines.push('Phantom metadata: ' + data.phantom_metadata);
if (data.stale_versions) lines.push('Stale versions: ' + data.stale_versions);
if (data.etag_cache_inconsistencies) lines.push('ETag inconsistencies: ' + data.etag_cache_inconsistencies);
if (data.legacy_metadata_drifts) lines.push('Legacy metadata drifts: ' + data.legacy_metadata_drifts);
if (data.issues_healed) lines.push('Issues healed: ' + data.issues_healed);
}
if (data.errors && data.errors.length > 0) lines.push('Errors: ' + data.errors.join(', '));
body.innerHTML = lines.join('<br>');
}
function _integrityPoll() {
fetch('{{ url_for("ui.system_integrity_status") }}', {
headers: {'X-CSRFToken': csrfToken}
})
.then(function (r) { return r.json(); })
.then(function (status) {
if (status.scanning) {
var elapsed = status.scan_elapsed_seconds || 0;
document.getElementById('integrityScanElapsed').textContent = ' (' + elapsed.toFixed(0) + 's)';
_integrityPollTimer = setTimeout(_integrityPoll, 2000);
} else {
_integritySetScanning(false);
_integrityRefreshHistory();
fetch('{{ url_for("ui.system_integrity_history") }}?limit=1', {
headers: {'X-CSRFToken': csrfToken}
})
.then(function (r) { return r.json(); })
.then(function (hist) {
if (hist.executions && hist.executions.length > 0) {
var latest = hist.executions[0];
_integrityShowResult(latest.result, latest.dry_run, latest.auto_heal);
}
})
.catch(function () {});
}
})
.catch(function () {
_integrityPollTimer = setTimeout(_integrityPoll, 3000);
});
}
window.runIntegrity = function (dryRun, autoHeal) {
_integrityLastMode = {dryRun: dryRun, autoHeal: autoHeal};
document.getElementById('integrityResult').classList.add('d-none');
_integritySetScanning(true);
fetch('{{ url_for("ui.system_integrity_run") }}', {
method: 'POST',
@@ -444,45 +714,22 @@
})
.then(function (r) { return r.json(); })
.then(function (data) {
var container = document.getElementById('integrityResult');
var alert = document.getElementById('integrityResultAlert');
var title = document.getElementById('integrityResultTitle');
var body = document.getElementById('integrityResultBody');
container.classList.remove('d-none');
if (data.error) {
_integritySetScanning(false);
var container = document.getElementById('integrityResult');
var alert = document.getElementById('integrityResultAlert');
var title = document.getElementById('integrityResultTitle');
var body = document.getElementById('integrityResultBody');
container.classList.remove('d-none');
alert.className = 'alert alert-danger mb-0 small';
title.textContent = 'Error';
body.textContent = data.error;
return;
}
var totalIssues = (data.corrupted_objects || 0) + (data.orphaned_objects || 0) +
(data.phantom_metadata || 0) + (data.stale_versions || 0) +
(data.etag_cache_inconsistencies || 0) + (data.legacy_metadata_drifts || 0);
var prefix = dryRun ? '[Dry Run] ' : (autoHeal ? '[Heal] ' : '');
alert.className = totalIssues > 0 ? 'alert alert-warning mb-0 small' : 'alert alert-success mb-0 small';
title.textContent = prefix + 'Completed in ' + (data.execution_time_seconds || 0).toFixed(2) + 's';
var lines = [];
lines.push('Scanned: ' + (data.objects_scanned || 0) + ' objects in ' + (data.buckets_scanned || 0) + ' buckets');
if (totalIssues === 0) {
lines.push('No issues found.');
} else {
if (data.corrupted_objects) lines.push('Corrupted objects: ' + data.corrupted_objects);
if (data.orphaned_objects) lines.push('Orphaned objects: ' + data.orphaned_objects);
if (data.phantom_metadata) lines.push('Phantom metadata: ' + data.phantom_metadata);
if (data.stale_versions) lines.push('Stale versions: ' + data.stale_versions);
if (data.etag_cache_inconsistencies) lines.push('ETag inconsistencies: ' + data.etag_cache_inconsistencies);
if (data.legacy_metadata_drifts) lines.push('Legacy metadata drifts: ' + data.legacy_metadata_drifts);
if (data.issues_healed) lines.push('Issues healed: ' + data.issues_healed);
}
if (data.errors && data.errors.length > 0) lines.push('Errors: ' + data.errors.join(', '));
body.innerHTML = lines.join('<br>');
_integrityPollTimer = setTimeout(_integrityPoll, 2000);
})
.catch(function (err) {
_integritySetScanning(false);
var container = document.getElementById('integrityResult');
var alert = document.getElementById('integrityResultAlert');
var title = document.getElementById('integrityResultTitle');
@@ -491,13 +738,13 @@
alert.className = 'alert alert-danger mb-0 small';
title.textContent = 'Error';
body.textContent = err.message;
})
.finally(function () {
setLoading('integrityRunBtn', false);
setLoading('integrityHealBtn', false);
setLoading('integrityDryRunBtn', false);
});
};
{% if integrity_status.scanning %}
_integritySetScanning(true);
_integrityPollTimer = setTimeout(_integrityPoll, 2000);
{% endif %}
})();
</script>
{% endblock %}

View File

@@ -317,7 +317,7 @@ class TestAdminAPI:
)
assert resp.status_code == 200
data = resp.get_json()
assert "temp_files_deleted" in data
assert data["status"] == "started"
def test_gc_dry_run(self, gc_app):
client = gc_app.test_client()
@@ -329,11 +329,17 @@ class TestAdminAPI:
)
assert resp.status_code == 200
data = resp.get_json()
assert "temp_files_deleted" in data
assert data["status"] == "started"
def test_gc_history(self, gc_app):
import time
client = gc_app.test_client()
client.post("/admin/gc/run", headers={"X-Access-Key": "admin", "X-Secret-Key": "adminsecret"})
for _ in range(50):
time.sleep(0.1)
status = client.get("/admin/gc/status", headers={"X-Access-Key": "admin", "X-Secret-Key": "adminsecret"}).get_json()
if not status.get("scanning"):
break
resp = client.get("/admin/gc/history", headers={"X-Access-Key": "admin", "X-Secret-Key": "adminsecret"})
assert resp.status_code == 200
data = resp.get_json()

View File

@@ -2,13 +2,25 @@ import hashlib
import json
import os
import sys
import time
from pathlib import Path
import pytest
sys.path.insert(0, str(Path(__file__).resolve().parents[1]))
from app.integrity import IntegrityChecker, IntegrityResult
from app.integrity import IntegrityChecker, IntegrityCursorStore, IntegrityResult
def _wait_scan_done(client, headers, timeout=10):
deadline = time.time() + timeout
while time.time() < deadline:
resp = client.get("/admin/integrity/status", headers=headers)
data = resp.get_json()
if not data.get("scanning"):
return
time.sleep(0.1)
raise TimeoutError("scan did not complete")
def _md5(data: bytes) -> str:
@@ -106,7 +118,7 @@ class TestCorruptedObjects:
result = checker.run_now()
assert result.corrupted_objects == 0
assert result.objects_scanned == 1
assert result.objects_scanned >= 1
def test_corrupted_nested_key(self, storage_root, checker):
_setup_bucket(storage_root, "mybucket", {"sub/dir/file.txt": b"nested content"})
@@ -413,8 +425,13 @@ class TestAdminAPI:
resp = client.post("/admin/integrity/run", headers=AUTH_HEADERS, json={})
assert resp.status_code == 200
data = resp.get_json()
assert "corrupted_objects" in data
assert "objects_scanned" in data
assert data["status"] == "started"
_wait_scan_done(client, AUTH_HEADERS)
resp = client.get("/admin/integrity/history?limit=1", headers=AUTH_HEADERS)
hist = resp.get_json()
assert len(hist["executions"]) >= 1
assert "corrupted_objects" in hist["executions"][0]["result"]
assert "objects_scanned" in hist["executions"][0]["result"]
def test_run_with_overrides(self, integrity_app):
client = integrity_app.test_client()
@@ -424,10 +441,12 @@ class TestAdminAPI:
json={"dry_run": True, "auto_heal": True},
)
assert resp.status_code == 200
_wait_scan_done(client, AUTH_HEADERS)
def test_history_endpoint(self, integrity_app):
client = integrity_app.test_client()
client.post("/admin/integrity/run", headers=AUTH_HEADERS, json={})
_wait_scan_done(client, AUTH_HEADERS)
resp = client.get("/admin/integrity/history", headers=AUTH_HEADERS)
assert resp.status_code == 200
data = resp.get_json()
@@ -484,7 +503,7 @@ class TestMultipleBuckets:
result = checker.run_now()
assert result.buckets_scanned == 2
assert result.objects_scanned == 2
assert result.objects_scanned >= 2
assert result.corrupted_objects == 0
@@ -497,3 +516,133 @@ class TestGetStatus:
assert "batch_size" in status
assert "auto_heal" in status
assert "dry_run" in status
def test_status_includes_cursor(self, storage_root, checker):
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
checker.run_now()
status = checker.get_status()
assert "cursor" in status
assert status["cursor"]["tracked_buckets"] == 1
assert "mybucket" in status["cursor"]["buckets"]
class TestUnifiedBatchCounter:
def test_orphaned_objects_count_toward_batch(self, storage_root):
_setup_bucket(storage_root, "mybucket", {})
for i in range(10):
(storage_root / "mybucket" / f"orphan{i}.txt").write_bytes(f"data{i}".encode())
checker = IntegrityChecker(storage_root=storage_root, batch_size=3)
result = checker.run_now()
assert result.objects_scanned <= 3
def test_phantom_metadata_counts_toward_batch(self, storage_root):
objects = {f"file{i}.txt": f"data{i}".encode() for i in range(10)}
_setup_bucket(storage_root, "mybucket", objects)
for i in range(10):
(storage_root / "mybucket" / f"file{i}.txt").unlink()
checker = IntegrityChecker(storage_root=storage_root, batch_size=5)
result = checker.run_now()
assert result.objects_scanned <= 5
def test_all_check_types_contribute(self, storage_root):
_setup_bucket(storage_root, "mybucket", {"valid.txt": b"hello"})
(storage_root / "mybucket" / "orphan.txt").write_bytes(b"orphan")
checker = IntegrityChecker(storage_root=storage_root, batch_size=1000)
result = checker.run_now()
assert result.objects_scanned > 2
class TestCursorRotation:
def test_oldest_bucket_scanned_first(self, storage_root):
_setup_bucket(storage_root, "bucket-a", {"a.txt": b"aaa"})
_setup_bucket(storage_root, "bucket-b", {"b.txt": b"bbb"})
_setup_bucket(storage_root, "bucket-c", {"c.txt": b"ccc"})
checker = IntegrityChecker(storage_root=storage_root, batch_size=5)
checker.cursor_store.update_bucket("bucket-a", 1000.0)
checker.cursor_store.update_bucket("bucket-b", 3000.0)
checker.cursor_store.update_bucket("bucket-c", 2000.0)
ordered = checker.cursor_store.get_bucket_order(["bucket-a", "bucket-b", "bucket-c"])
assert ordered[0] == "bucket-a"
assert ordered[1] == "bucket-c"
assert ordered[2] == "bucket-b"
def test_never_scanned_buckets_first(self, storage_root):
_setup_bucket(storage_root, "bucket-old", {"a.txt": b"aaa"})
_setup_bucket(storage_root, "bucket-new", {"b.txt": b"bbb"})
checker = IntegrityChecker(storage_root=storage_root, batch_size=1000)
checker.cursor_store.update_bucket("bucket-old", time.time())
ordered = checker.cursor_store.get_bucket_order(["bucket-old", "bucket-new"])
assert ordered[0] == "bucket-new"
def test_rotation_covers_all_buckets(self, storage_root):
for name in ["bucket-a", "bucket-b", "bucket-c"]:
_setup_bucket(storage_root, name, {f"{name}.txt": name.encode()})
checker = IntegrityChecker(storage_root=storage_root, batch_size=4)
result1 = checker.run_now()
scanned_buckets_1 = set()
for issue_bucket in [storage_root]:
pass
assert result1.buckets_scanned >= 1
result2 = checker.run_now()
result3 = checker.run_now()
cursor_info = checker.cursor_store.get_info()
assert cursor_info["tracked_buckets"] == 3
def test_cursor_persistence(self, storage_root):
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
checker1 = IntegrityChecker(storage_root=storage_root, batch_size=1000)
checker1.run_now()
cursor1 = checker1.cursor_store.get_info()
assert cursor1["tracked_buckets"] == 1
assert "mybucket" in cursor1["buckets"]
checker2 = IntegrityChecker(storage_root=storage_root, batch_size=1000)
cursor2 = checker2.cursor_store.get_info()
assert cursor2["tracked_buckets"] == 1
assert "mybucket" in cursor2["buckets"]
def test_stale_cursor_cleanup(self, storage_root):
_setup_bucket(storage_root, "bucket-a", {"a.txt": b"aaa"})
_setup_bucket(storage_root, "bucket-b", {"b.txt": b"bbb"})
checker = IntegrityChecker(storage_root=storage_root, batch_size=1000)
checker.run_now()
import shutil
shutil.rmtree(storage_root / "bucket-b")
meta_b = storage_root / ".myfsio.sys" / "buckets" / "bucket-b"
if meta_b.exists():
shutil.rmtree(meta_b)
checker.run_now()
cursor_info = checker.cursor_store.get_info()
assert "bucket-b" not in cursor_info["buckets"]
assert "bucket-a" in cursor_info["buckets"]
def test_cursor_updates_after_scan(self, storage_root):
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
checker = IntegrityChecker(storage_root=storage_root, batch_size=1000)
before = time.time()
checker.run_now()
after = time.time()
cursor_info = checker.cursor_store.get_info()
ts = cursor_info["buckets"]["mybucket"]
assert before <= ts <= after