40 Commits

Author SHA1 Message Date
f0c95ac0a9 MyFSIO v0.4.1 Release
Reviewed-on: #34
2026-03-25 04:29:28 +00:00
0e392e18b4 Hide ghost details in object panel when preview fails to load 2026-03-24 15:15:03 +08:00
8996f1ce06 Fix folder selection not showing delete button in bucket browser 2026-03-24 12:10:38 +08:00
f60dbaf9c9 Respect DISPLAY_TIMEZONE in GC and integrity scanner history tables 2026-03-23 18:36:13 +08:00
1a5a7aa9e1 Auto-refresh Recent Scans/Executions tables after GC and integrity scan completion 2026-03-23 18:31:13 +08:00
326367ae4c Fix integrity scanner batch limit and add cursor-based rotation 2026-03-23 17:46:27 +08:00
a7f9b0a22f Convert GC to async with polling to prevent proxy timeouts 2026-03-23 17:14:04 +08:00
0e525713b1 Fix missing CSRF token on presigned URL request 2026-03-23 16:48:25 +08:00
f43fad02fb Replace fetch with XHR for multipart upload progress and add retry logic 2026-03-23 16:27:28 +08:00
eff3e378f3 Fix mobile infinite scroll on object list and ghost preview on fast object swap 2026-03-23 11:55:46 +08:00
5e32cef792 Add I/O throttling to GC and integrity scanner to prevent HDD starvation 2026-03-23 11:36:38 +08:00
9898167f8d Make integrity scan async with progress indicator in UI 2026-03-22 14:17:43 +08:00
8ff4797041 MyFSIO v0.4.0 Release
Reviewed-on: #33
2026-03-22 05:06:47 +00:00
4a553555d3 Clean up debug code 2026-03-22 11:38:29 +08:00
7a3202c996 Possible fix for the issue 2026-03-22 11:27:52 +08:00
bd20ca86ab Further debugging on s3 api issues on Granian 2026-03-22 11:22:24 +08:00
532cf95d59 Debug s3 api issues on Granian 2026-03-22 11:14:32 +08:00
366f8ce60d the middleware now also triggers when Content-Length is '0' but X-Amz-Decoded-Content-Length or aws-chunked headers indicate a body should be present 2026-03-22 00:24:04 +08:00
7612cb054a further fixes 2026-03-22 00:16:30 +08:00
966d524dca Fix 0-byte uploads caused by Granian stripping Expect header and missing CONTENT_LENGTH for chunked transfers 2026-03-22 00:04:55 +08:00
e84f1f1851 Fix SigV4 SignatureDoesNotMatch when Expect header is stripped by WSGI server 2026-03-21 23:48:19 +08:00
a059f0502d Fix 0-byte uploads caused by Granian default buffer size; Add SERVER_MAX_BUFFER_SIZE config 2026-03-21 22:57:48 +08:00
afd7173ba0 Fix buttons all showing Running state when only one action is triggered 2026-03-21 14:51:43 +08:00
c807bb2388 Update install/uninstall scripts for encrypted IAM config 2026-03-20 17:51:00 +08:00
aa4f9f5566 Bypass boto3 proxy for object streaming, read directly from storage layer; Add streaming object iterator to eliminate O(n²) directory rescanning on large buckets; Add iter_objects_shallow delegation to EncryptedObjectStorage 2026-03-20 17:35:10 +08:00
14786151e5 Fix selected object losing highlight on scroll in virtual list 2026-03-20 12:10:26 +08:00
a496862902 Fix stale object count on dashboard after deleting all objects in bucket 2026-03-17 23:25:30 +08:00
df4f27ca2e Fix IAM policy editor injecting prefix on existing policies without one 2026-03-15 16:04:35 +08:00
d72e0a347e Overhaul IAM: granular actions, multi-key users, prefix-scoped policies 2026-03-14 23:50:44 +08:00
6ed4b7d8ea Add System page: server info, feature flags, GC and integrity scanner UI 2026-03-14 20:27:57 +08:00
31ebbea680 Fix Docker healthcheck failure: Granian cannot run inside daemon process 2026-03-14 18:31:12 +08:00
d878134ebf Switch from Waitress to Granian (Rust/hyper WSGI server) for improved concurrency 2026-03-14 18:17:39 +08:00
50fb5aa387 MyFSIO v0.3.9 Release
Reviewed-on: #32
2026-03-14 09:44:14 +00:00
55568d6892 Fix video seekbar in static website hosting by adding HTTP Range request support 2026-03-10 22:21:55 +08:00
a4ae81c77c Add integrity scanner: background detection and healing of corrupted objects, orphaned files, phantom metadata, stale versions, etag cache inconsistencies, and legacy metadata drift 2026-03-10 22:14:39 +08:00
9da7104887 Redesign tags UI: split pills, grid editor with column headers, ghost delete buttons 2026-03-10 17:48:17 +08:00
cc161bf362 MyFSIO v0.3.8 Release
Reviewed-on: #31
2026-03-10 08:31:27 +00:00
de5377e5ac Add garbage collection: background cleanup of orphaned temp files, multipart uploads, lock files, metadata, versions, and empty directories 2026-03-09 17:34:21 +08:00
80b77b64eb Fix bucket dashboard missing created date and incorrect object count badge in folder view 2026-03-09 15:27:08 +08:00
6c912a3d71 Add conditional GET/HEAD headers: If-Match, If-None-Match, If-Modified-Since, If-Unmodified-Since 2026-03-09 15:09:15 +08:00
32 changed files with 5643 additions and 349 deletions

View File

@@ -18,6 +18,8 @@ from flask_cors import CORS
from flask_wtf.csrf import CSRFError
from werkzeug.middleware.proxy_fix import ProxyFix
import io
from .access_logging import AccessLoggingService
from .operation_metrics import OperationMetricsCollector, classify_endpoint
from .compression import GzipMiddleware
@@ -29,6 +31,8 @@ from .encryption import EncryptionManager
from .extensions import limiter, csrf
from .iam import IamService
from .kms import KMSManager
from .gc import GarbageCollector
from .integrity import IntegrityChecker
from .lifecycle import LifecycleManager
from .notifications import NotificationService
from .object_lock import ObjectLockService
@@ -42,6 +46,64 @@ from .website_domains import WebsiteDomainStore
_request_counter = itertools.count(1)
class _ChunkedTransferMiddleware:
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
if environ.get("REQUEST_METHOD") not in ("PUT", "POST"):
return self.app(environ, start_response)
transfer_encoding = environ.get("HTTP_TRANSFER_ENCODING", "")
content_length = environ.get("CONTENT_LENGTH")
if "chunked" in transfer_encoding.lower():
if content_length:
del environ["HTTP_TRANSFER_ENCODING"]
else:
raw = environ.get("wsgi.input")
if raw:
try:
if hasattr(raw, "seek"):
raw.seek(0)
body = raw.read()
except Exception:
body = b""
if body:
environ["wsgi.input"] = io.BytesIO(body)
environ["CONTENT_LENGTH"] = str(len(body))
del environ["HTTP_TRANSFER_ENCODING"]
content_length = environ.get("CONTENT_LENGTH")
if not content_length or content_length == "0":
sha256 = environ.get("HTTP_X_AMZ_CONTENT_SHA256", "")
decoded_len = environ.get("HTTP_X_AMZ_DECODED_CONTENT_LENGTH", "")
content_encoding = environ.get("HTTP_CONTENT_ENCODING", "")
if ("STREAMING" in sha256.upper() or decoded_len
or "aws-chunked" in content_encoding.lower()):
raw = environ.get("wsgi.input")
if raw:
try:
if hasattr(raw, "seek"):
raw.seek(0)
body = raw.read()
except Exception:
body = b""
if body:
environ["wsgi.input"] = io.BytesIO(body)
environ["CONTENT_LENGTH"] = str(len(body))
raw = environ.get("wsgi.input")
if raw and hasattr(raw, "seek"):
try:
raw.seek(0)
except Exception:
pass
return self.app(environ, start_response)
def _migrate_config_file(active_path: Path, legacy_paths: List[Path]) -> Path:
"""Migrate config file from legacy locations to the active path.
@@ -105,10 +167,11 @@ def create_app(
)
app.wsgi_app = ProxyFix(app.wsgi_app, x_for=num_proxies, x_proto=num_proxies, x_host=num_proxies, x_prefix=num_proxies)
# Enable gzip compression for responses (10-20x smaller JSON payloads)
if app.config.get("ENABLE_GZIP", True):
app.wsgi_app = GzipMiddleware(app.wsgi_app, compression_level=6)
app.wsgi_app = _ChunkedTransferMiddleware(app.wsgi_app)
_configure_cors(app)
_configure_logging(app)
@@ -221,6 +284,31 @@ def create_app(
)
lifecycle_manager.start()
gc_collector = None
if app.config.get("GC_ENABLED", False):
gc_collector = GarbageCollector(
storage_root=storage_root,
interval_hours=app.config.get("GC_INTERVAL_HOURS", 6.0),
temp_file_max_age_hours=app.config.get("GC_TEMP_FILE_MAX_AGE_HOURS", 24.0),
multipart_max_age_days=app.config.get("GC_MULTIPART_MAX_AGE_DAYS", 7),
lock_file_max_age_hours=app.config.get("GC_LOCK_FILE_MAX_AGE_HOURS", 1.0),
dry_run=app.config.get("GC_DRY_RUN", False),
io_throttle_ms=app.config.get("GC_IO_THROTTLE_MS", 10),
)
gc_collector.start()
integrity_checker = None
if app.config.get("INTEGRITY_ENABLED", False):
integrity_checker = IntegrityChecker(
storage_root=storage_root,
interval_hours=app.config.get("INTEGRITY_INTERVAL_HOURS", 24.0),
batch_size=app.config.get("INTEGRITY_BATCH_SIZE", 1000),
auto_heal=app.config.get("INTEGRITY_AUTO_HEAL", False),
dry_run=app.config.get("INTEGRITY_DRY_RUN", False),
io_throttle_ms=app.config.get("INTEGRITY_IO_THROTTLE_MS", 10),
)
integrity_checker.start()
app.extensions["object_storage"] = storage
app.extensions["iam"] = iam
app.extensions["bucket_policies"] = bucket_policies
@@ -232,6 +320,8 @@ def create_app(
app.extensions["kms"] = kms_manager
app.extensions["acl"] = acl_service
app.extensions["lifecycle"] = lifecycle_manager
app.extensions["gc"] = gc_collector
app.extensions["integrity"] = integrity_checker
app.extensions["object_lock"] = object_lock_service
app.extensions["notifications"] = notification_service
app.extensions["access_logging"] = access_logging_service
@@ -535,30 +625,57 @@ def _configure_logging(app: Flask) -> None:
is_encrypted = "x-amz-server-side-encryption" in metadata
except (StorageError, OSError):
pass
if request.method == "HEAD":
response = Response(status=200)
if is_encrypted and hasattr(storage, "get_object_data"):
try:
data, _ = storage.get_object_data(bucket, object_key)
response.headers["Content-Length"] = len(data)
file_size = len(data)
except (StorageError, OSError):
return _website_error_response(500, "Internal Server Error")
else:
data = None
try:
stat = obj_path.stat()
response.headers["Content-Length"] = stat.st_size
file_size = stat.st_size
except OSError:
return _website_error_response(500, "Internal Server Error")
if request.method == "HEAD":
response = Response(status=200)
response.headers["Content-Length"] = file_size
response.headers["Content-Type"] = content_type
response.headers["Accept-Ranges"] = "bytes"
return response
if is_encrypted and hasattr(storage, "get_object_data"):
try:
data, _ = storage.get_object_data(bucket, object_key)
from .s3_api import _parse_range_header
range_header = request.headers.get("Range")
if range_header:
ranges = _parse_range_header(range_header, file_size)
if ranges is None:
return Response(status=416, headers={"Content-Range": f"bytes */{file_size}"})
start, end = ranges[0]
length = end - start + 1
if data is not None:
partial_data = data[start:end + 1]
response = Response(partial_data, status=206, mimetype=content_type)
else:
def _stream_range(file_path, start_pos, length_to_read):
with file_path.open("rb") as f:
f.seek(start_pos)
remaining = length_to_read
while remaining > 0:
chunk = f.read(min(262144, remaining))
if not chunk:
break
remaining -= len(chunk)
yield chunk
response = Response(_stream_range(obj_path, start, length), status=206, mimetype=content_type, direct_passthrough=True)
response.headers["Content-Range"] = f"bytes {start}-{end}/{file_size}"
response.headers["Content-Length"] = length
response.headers["Accept-Ranges"] = "bytes"
return response
if data is not None:
response = Response(data, mimetype=content_type)
response.headers["Content-Length"] = len(data)
response.headers["Content-Length"] = file_size
response.headers["Accept-Ranges"] = "bytes"
return response
except (StorageError, OSError):
return _website_error_response(500, "Internal Server Error")
def _stream(file_path):
with file_path.open("rb") as f:
while True:
@@ -566,13 +683,10 @@ def _configure_logging(app: Flask) -> None:
if not chunk:
break
yield chunk
try:
stat = obj_path.stat()
response = Response(_stream(obj_path), mimetype=content_type, direct_passthrough=True)
response.headers["Content-Length"] = stat.st_size
response.headers["Content-Length"] = file_size
response.headers["Accept-Ranges"] = "bytes"
return response
except OSError:
return _website_error_response(500, "Internal Server Error")
def _serve_website_error(storage, bucket, error_doc_key, status_code):
if not error_doc_key:
@@ -627,6 +741,7 @@ def _configure_logging(app: Flask) -> None:
},
)
response.headers["X-Request-Duration-ms"] = f"{duration_ms:.2f}"
response.headers["Server"] = "MyFSIO"
operation_metrics = app.extensions.get("operation_metrics")
if operation_metrics:

View File

@@ -14,6 +14,8 @@ from flask import Blueprint, Response, current_app, jsonify, request
from .connections import ConnectionStore
from .extensions import limiter
from .gc import GarbageCollector
from .integrity import IntegrityChecker
from .iam import IamError, Principal
from .replication import ReplicationManager
from .site_registry import PeerSite, SiteInfo, SiteRegistry
@@ -684,6 +686,107 @@ def _storage():
return current_app.extensions["object_storage"]
def _require_iam_action(action: str):
principal, error = _require_principal()
if error:
return None, error
try:
_iam().authorize(principal, None, action)
return principal, None
except IamError:
return None, _json_error("AccessDenied", f"Requires {action} permission", 403)
@admin_api_bp.route("/iam/users", methods=["GET"])
@limiter.limit(lambda: _get_admin_rate_limit())
def iam_list_users():
principal, error = _require_iam_action("iam:list_users")
if error:
return error
return jsonify({"users": _iam().list_users()})
@admin_api_bp.route("/iam/users/<identifier>", methods=["GET"])
@limiter.limit(lambda: _get_admin_rate_limit())
def iam_get_user(identifier):
principal, error = _require_iam_action("iam:get_user")
if error:
return error
try:
user_id = _iam().resolve_user_id(identifier)
return jsonify(_iam().get_user_by_id(user_id))
except IamError as exc:
return _json_error("NotFound", str(exc), 404)
@admin_api_bp.route("/iam/users/<identifier>/policies", methods=["GET"])
@limiter.limit(lambda: _get_admin_rate_limit())
def iam_get_user_policies(identifier):
principal, error = _require_iam_action("iam:get_policy")
if error:
return error
try:
return jsonify({"policies": _iam().get_user_policies(identifier)})
except IamError as exc:
return _json_error("NotFound", str(exc), 404)
@admin_api_bp.route("/iam/users/<identifier>/keys", methods=["POST"])
@limiter.limit(lambda: _get_admin_rate_limit())
def iam_create_access_key(identifier):
principal, error = _require_iam_action("iam:create_key")
if error:
return error
try:
result = _iam().create_access_key(identifier)
logger.info("Access key created for %s by %s", identifier, principal.access_key)
return jsonify(result), 201
except IamError as exc:
return _json_error("InvalidRequest", str(exc), 400)
@admin_api_bp.route("/iam/users/<identifier>/keys/<access_key>", methods=["DELETE"])
@limiter.limit(lambda: _get_admin_rate_limit())
def iam_delete_access_key(identifier, access_key):
principal, error = _require_iam_action("iam:delete_key")
if error:
return error
try:
_iam().delete_access_key(access_key)
logger.info("Access key %s deleted by %s", access_key, principal.access_key)
return "", 204
except IamError as exc:
return _json_error("InvalidRequest", str(exc), 400)
@admin_api_bp.route("/iam/users/<identifier>/disable", methods=["POST"])
@limiter.limit(lambda: _get_admin_rate_limit())
def iam_disable_user(identifier):
principal, error = _require_iam_action("iam:disable_user")
if error:
return error
try:
_iam().disable_user(identifier)
logger.info("User %s disabled by %s", identifier, principal.access_key)
return jsonify({"status": "disabled"})
except IamError as exc:
return _json_error("InvalidRequest", str(exc), 400)
@admin_api_bp.route("/iam/users/<identifier>/enable", methods=["POST"])
@limiter.limit(lambda: _get_admin_rate_limit())
def iam_enable_user(identifier):
principal, error = _require_iam_action("iam:disable_user")
if error:
return error
try:
_iam().enable_user(identifier)
logger.info("User %s enabled by %s", identifier, principal.access_key)
return jsonify({"status": "enabled"})
except IamError as exc:
return _json_error("InvalidRequest", str(exc), 400)
@admin_api_bp.route("/website-domains", methods=["GET"])
@limiter.limit(lambda: _get_admin_rate_limit())
def list_website_domains():
@@ -776,3 +879,106 @@ def delete_website_domain(domain: str):
return _json_error("NotFound", f"No mapping found for domain '{domain}'", 404)
logger.info("Website domain mapping deleted: %s", domain)
return Response(status=204)
def _gc() -> Optional[GarbageCollector]:
return current_app.extensions.get("gc")
@admin_api_bp.route("/gc/status", methods=["GET"])
@limiter.limit(lambda: _get_admin_rate_limit())
def gc_status():
principal, error = _require_admin()
if error:
return error
gc = _gc()
if not gc:
return jsonify({"enabled": False, "message": "GC is not enabled. Set GC_ENABLED=true to enable."})
return jsonify(gc.get_status())
@admin_api_bp.route("/gc/run", methods=["POST"])
@limiter.limit(lambda: _get_admin_rate_limit())
def gc_run_now():
principal, error = _require_admin()
if error:
return error
gc = _gc()
if not gc:
return _json_error("InvalidRequest", "GC is not enabled", 400)
payload = request.get_json(silent=True) or {}
started = gc.run_async(dry_run=payload.get("dry_run"))
logger.info("GC manual run by %s", principal.access_key)
if not started:
return _json_error("Conflict", "GC is already in progress", 409)
return jsonify({"status": "started"})
@admin_api_bp.route("/gc/history", methods=["GET"])
@limiter.limit(lambda: _get_admin_rate_limit())
def gc_history():
principal, error = _require_admin()
if error:
return error
gc = _gc()
if not gc:
return jsonify({"executions": []})
limit = min(int(request.args.get("limit", 50)), 200)
offset = int(request.args.get("offset", 0))
records = gc.get_history(limit=limit, offset=offset)
return jsonify({"executions": records})
def _integrity() -> Optional[IntegrityChecker]:
return current_app.extensions.get("integrity")
@admin_api_bp.route("/integrity/status", methods=["GET"])
@limiter.limit(lambda: _get_admin_rate_limit())
def integrity_status():
principal, error = _require_admin()
if error:
return error
checker = _integrity()
if not checker:
return jsonify({"enabled": False, "message": "Integrity checker is not enabled. Set INTEGRITY_ENABLED=true to enable."})
return jsonify(checker.get_status())
@admin_api_bp.route("/integrity/run", methods=["POST"])
@limiter.limit(lambda: _get_admin_rate_limit())
def integrity_run_now():
principal, error = _require_admin()
if error:
return error
checker = _integrity()
if not checker:
return _json_error("InvalidRequest", "Integrity checker is not enabled", 400)
payload = request.get_json(silent=True) or {}
override_dry_run = payload.get("dry_run")
override_auto_heal = payload.get("auto_heal")
started = checker.run_async(
auto_heal=override_auto_heal if override_auto_heal is not None else None,
dry_run=override_dry_run if override_dry_run is not None else None,
)
logger.info("Integrity manual run by %s", principal.access_key)
if not started:
return _json_error("Conflict", "A scan is already in progress", 409)
return jsonify({"status": "started"})
@admin_api_bp.route("/integrity/history", methods=["GET"])
@limiter.limit(lambda: _get_admin_rate_limit())
def integrity_history():
principal, error = _require_admin()
if error:
return error
checker = _integrity()
if not checker:
return jsonify({"executions": []})
limit = min(int(request.args.get("limit", 50)), 200)
offset = int(request.args.get("offset", 0))
records = checker.get_history(limit=limit, offset=offset)
return jsonify({"executions": records})

View File

@@ -25,7 +25,7 @@ def _calculate_auto_connection_limit() -> int:
def _calculate_auto_backlog(connection_limit: int) -> int:
return max(64, min(connection_limit * 2, 4096))
return max(128, min(connection_limit * 2, 4096))
def _validate_rate_limit(value: str) -> str:
@@ -115,6 +115,7 @@ class AppConfig:
server_connection_limit: int
server_backlog: int
server_channel_timeout: int
server_max_buffer_size: int
server_threads_auto: bool
server_connection_limit_auto: bool
server_backlog_auto: bool
@@ -150,6 +151,19 @@ class AppConfig:
allowed_redirect_hosts: list[str]
allow_internal_endpoints: bool
website_hosting_enabled: bool
gc_enabled: bool
gc_interval_hours: float
gc_temp_file_max_age_hours: float
gc_multipart_max_age_days: int
gc_lock_file_max_age_hours: float
gc_dry_run: bool
gc_io_throttle_ms: int
integrity_enabled: bool
integrity_interval_hours: float
integrity_batch_size: int
integrity_auto_heal: bool
integrity_dry_run: bool
integrity_io_throttle_ms: int
@classmethod
def from_env(cls, overrides: Optional[Dict[str, Any]] = None) -> "AppConfig":
@@ -282,6 +296,7 @@ class AppConfig:
server_backlog_auto = False
server_channel_timeout = int(_get("SERVER_CHANNEL_TIMEOUT", 120))
server_max_buffer_size = int(_get("SERVER_MAX_BUFFER_SIZE", 1024 * 1024 * 128))
site_sync_enabled = str(_get("SITE_SYNC_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
site_sync_interval_seconds = int(_get("SITE_SYNC_INTERVAL_SECONDS", 60))
site_sync_batch_size = int(_get("SITE_SYNC_BATCH_SIZE", 100))
@@ -319,6 +334,19 @@ class AppConfig:
allowed_redirect_hosts = [h.strip() for h in str(allowed_redirect_hosts_raw).split(",") if h.strip()]
allow_internal_endpoints = str(_get("ALLOW_INTERNAL_ENDPOINTS", "0")).lower() in {"1", "true", "yes", "on"}
website_hosting_enabled = str(_get("WEBSITE_HOSTING_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
gc_enabled = str(_get("GC_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
gc_interval_hours = float(_get("GC_INTERVAL_HOURS", 6.0))
gc_temp_file_max_age_hours = float(_get("GC_TEMP_FILE_MAX_AGE_HOURS", 24.0))
gc_multipart_max_age_days = int(_get("GC_MULTIPART_MAX_AGE_DAYS", 7))
gc_lock_file_max_age_hours = float(_get("GC_LOCK_FILE_MAX_AGE_HOURS", 1.0))
gc_dry_run = str(_get("GC_DRY_RUN", "0")).lower() in {"1", "true", "yes", "on"}
gc_io_throttle_ms = int(_get("GC_IO_THROTTLE_MS", 10))
integrity_enabled = str(_get("INTEGRITY_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
integrity_interval_hours = float(_get("INTEGRITY_INTERVAL_HOURS", 24.0))
integrity_batch_size = int(_get("INTEGRITY_BATCH_SIZE", 1000))
integrity_auto_heal = str(_get("INTEGRITY_AUTO_HEAL", "0")).lower() in {"1", "true", "yes", "on"}
integrity_dry_run = str(_get("INTEGRITY_DRY_RUN", "0")).lower() in {"1", "true", "yes", "on"}
integrity_io_throttle_ms = int(_get("INTEGRITY_IO_THROTTLE_MS", 10))
return cls(storage_root=storage_root,
max_upload_size=max_upload_size,
@@ -372,6 +400,7 @@ class AppConfig:
server_connection_limit=server_connection_limit,
server_backlog=server_backlog,
server_channel_timeout=server_channel_timeout,
server_max_buffer_size=server_max_buffer_size,
server_threads_auto=server_threads_auto,
server_connection_limit_auto=server_connection_limit_auto,
server_backlog_auto=server_backlog_auto,
@@ -406,7 +435,20 @@ class AppConfig:
num_trusted_proxies=num_trusted_proxies,
allowed_redirect_hosts=allowed_redirect_hosts,
allow_internal_endpoints=allow_internal_endpoints,
website_hosting_enabled=website_hosting_enabled)
website_hosting_enabled=website_hosting_enabled,
gc_enabled=gc_enabled,
gc_interval_hours=gc_interval_hours,
gc_temp_file_max_age_hours=gc_temp_file_max_age_hours,
gc_multipart_max_age_days=gc_multipart_max_age_days,
gc_lock_file_max_age_hours=gc_lock_file_max_age_hours,
gc_dry_run=gc_dry_run,
gc_io_throttle_ms=gc_io_throttle_ms,
integrity_enabled=integrity_enabled,
integrity_interval_hours=integrity_interval_hours,
integrity_batch_size=integrity_batch_size,
integrity_auto_heal=integrity_auto_heal,
integrity_dry_run=integrity_dry_run,
integrity_io_throttle_ms=integrity_io_throttle_ms)
def validate_and_report(self) -> list[str]:
"""Validate configuration and return a list of warnings/issues.
@@ -471,10 +513,12 @@ class AppConfig:
issues.append(f"CRITICAL: SERVER_THREADS={self.server_threads} is outside valid range (1-64). Server cannot start.")
if not (10 <= self.server_connection_limit <= 1000):
issues.append(f"CRITICAL: SERVER_CONNECTION_LIMIT={self.server_connection_limit} is outside valid range (10-1000). Server cannot start.")
if not (64 <= self.server_backlog <= 4096):
issues.append(f"CRITICAL: SERVER_BACKLOG={self.server_backlog} is outside valid range (64-4096). Server cannot start.")
if not (128 <= self.server_backlog <= 4096):
issues.append(f"CRITICAL: SERVER_BACKLOG={self.server_backlog} is outside valid range (128-4096). Server cannot start.")
if not (10 <= self.server_channel_timeout <= 300):
issues.append(f"CRITICAL: SERVER_CHANNEL_TIMEOUT={self.server_channel_timeout} is outside valid range (10-300). Server cannot start.")
if self.server_max_buffer_size < 1024 * 1024:
issues.append(f"WARNING: SERVER_MAX_BUFFER_SIZE={self.server_max_buffer_size} is less than 1MB. Large uploads will fail.")
if sys.platform != "win32":
try:
@@ -520,6 +564,7 @@ class AppConfig:
print(f" CONNECTION_LIMIT: {self.server_connection_limit}{_auto(self.server_connection_limit_auto)}")
print(f" BACKLOG: {self.server_backlog}{_auto(self.server_backlog_auto)}")
print(f" CHANNEL_TIMEOUT: {self.server_channel_timeout}s")
print(f" MAX_BUFFER_SIZE: {self.server_max_buffer_size // (1024 * 1024)}MB")
print("=" * 60)
issues = self.validate_and_report()
@@ -585,6 +630,7 @@ class AppConfig:
"SERVER_CONNECTION_LIMIT": self.server_connection_limit,
"SERVER_BACKLOG": self.server_backlog,
"SERVER_CHANNEL_TIMEOUT": self.server_channel_timeout,
"SERVER_MAX_BUFFER_SIZE": self.server_max_buffer_size,
"SITE_SYNC_ENABLED": self.site_sync_enabled,
"SITE_SYNC_INTERVAL_SECONDS": self.site_sync_interval_seconds,
"SITE_SYNC_BATCH_SIZE": self.site_sync_batch_size,
@@ -617,4 +663,17 @@ class AppConfig:
"ALLOWED_REDIRECT_HOSTS": self.allowed_redirect_hosts,
"ALLOW_INTERNAL_ENDPOINTS": self.allow_internal_endpoints,
"WEBSITE_HOSTING_ENABLED": self.website_hosting_enabled,
"GC_ENABLED": self.gc_enabled,
"GC_INTERVAL_HOURS": self.gc_interval_hours,
"GC_TEMP_FILE_MAX_AGE_HOURS": self.gc_temp_file_max_age_hours,
"GC_MULTIPART_MAX_AGE_DAYS": self.gc_multipart_max_age_days,
"GC_LOCK_FILE_MAX_AGE_HOURS": self.gc_lock_file_max_age_hours,
"GC_DRY_RUN": self.gc_dry_run,
"GC_IO_THROTTLE_MS": self.gc_io_throttle_ms,
"INTEGRITY_ENABLED": self.integrity_enabled,
"INTEGRITY_INTERVAL_HOURS": self.integrity_interval_hours,
"INTEGRITY_BATCH_SIZE": self.integrity_batch_size,
"INTEGRITY_AUTO_HEAL": self.integrity_auto_heal,
"INTEGRITY_DRY_RUN": self.integrity_dry_run,
"INTEGRITY_IO_THROTTLE_MS": self.integrity_io_throttle_ms,
}

View File

@@ -193,6 +193,9 @@ class EncryptedObjectStorage:
def list_objects_shallow(self, bucket_name: str, **kwargs):
return self.storage.list_objects_shallow(bucket_name, **kwargs)
def iter_objects_shallow(self, bucket_name: str, **kwargs):
return self.storage.iter_objects_shallow(bucket_name, **kwargs)
def search_objects(self, bucket_name: str, query: str, **kwargs):
return self.storage.search_objects(bucket_name, query, **kwargs)

View File

@@ -175,13 +175,21 @@ def handle_app_error(error: AppError) -> Response:
def handle_rate_limit_exceeded(e: RateLimitExceeded) -> Response:
g.s3_error_code = "SlowDown"
if request.path.startswith("/ui") or request.path.startswith("/buckets"):
wants_json = (
request.is_json or
request.headers.get("X-Requested-With") == "XMLHttpRequest" or
"application/json" in request.accept_mimetypes.values()
)
if wants_json:
return jsonify({"success": False, "error": {"code": "SlowDown", "message": "Please reduce your request rate."}}), 429
error = Element("Error")
SubElement(error, "Code").text = "SlowDown"
SubElement(error, "Message").text = "Please reduce your request rate."
SubElement(error, "Resource").text = request.path
SubElement(error, "RequestId").text = getattr(g, "request_id", "")
xml_bytes = tostring(error, encoding="utf-8")
return Response(xml_bytes, status=429, mimetype="application/xml")
return Response(xml_bytes, status="429 Too Many Requests", mimetype="application/xml")
def register_error_handlers(app):

596
app/gc.py Normal file
View File

@@ -0,0 +1,596 @@
from __future__ import annotations
import json
import logging
import os
import shutil
import threading
import time
from dataclasses import dataclass, field
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Dict, List, Optional
logger = logging.getLogger(__name__)
@dataclass
class GCResult:
temp_files_deleted: int = 0
temp_bytes_freed: int = 0
multipart_uploads_deleted: int = 0
multipart_bytes_freed: int = 0
lock_files_deleted: int = 0
orphaned_metadata_deleted: int = 0
orphaned_versions_deleted: int = 0
orphaned_version_bytes_freed: int = 0
empty_dirs_removed: int = 0
errors: List[str] = field(default_factory=list)
execution_time_seconds: float = 0.0
def to_dict(self) -> dict:
return {
"temp_files_deleted": self.temp_files_deleted,
"temp_bytes_freed": self.temp_bytes_freed,
"multipart_uploads_deleted": self.multipart_uploads_deleted,
"multipart_bytes_freed": self.multipart_bytes_freed,
"lock_files_deleted": self.lock_files_deleted,
"orphaned_metadata_deleted": self.orphaned_metadata_deleted,
"orphaned_versions_deleted": self.orphaned_versions_deleted,
"orphaned_version_bytes_freed": self.orphaned_version_bytes_freed,
"empty_dirs_removed": self.empty_dirs_removed,
"errors": self.errors,
"execution_time_seconds": self.execution_time_seconds,
}
@property
def total_bytes_freed(self) -> int:
return self.temp_bytes_freed + self.multipart_bytes_freed + self.orphaned_version_bytes_freed
@property
def has_work(self) -> bool:
return (
self.temp_files_deleted > 0
or self.multipart_uploads_deleted > 0
or self.lock_files_deleted > 0
or self.orphaned_metadata_deleted > 0
or self.orphaned_versions_deleted > 0
or self.empty_dirs_removed > 0
)
@dataclass
class GCExecutionRecord:
timestamp: float
result: dict
dry_run: bool
def to_dict(self) -> dict:
return {
"timestamp": self.timestamp,
"result": self.result,
"dry_run": self.dry_run,
}
@classmethod
def from_dict(cls, data: dict) -> GCExecutionRecord:
return cls(
timestamp=data["timestamp"],
result=data["result"],
dry_run=data.get("dry_run", False),
)
class GCHistoryStore:
def __init__(self, storage_root: Path, max_records: int = 50) -> None:
self.storage_root = storage_root
self.max_records = max_records
self._lock = threading.Lock()
def _get_path(self) -> Path:
return self.storage_root / ".myfsio.sys" / "config" / "gc_history.json"
def load(self) -> List[GCExecutionRecord]:
path = self._get_path()
if not path.exists():
return []
try:
with open(path, "r", encoding="utf-8") as f:
data = json.load(f)
return [GCExecutionRecord.from_dict(d) for d in data.get("executions", [])]
except (OSError, ValueError, KeyError) as e:
logger.error("Failed to load GC history: %s", e)
return []
def save(self, records: List[GCExecutionRecord]) -> None:
path = self._get_path()
path.parent.mkdir(parents=True, exist_ok=True)
data = {"executions": [r.to_dict() for r in records[: self.max_records]]}
try:
with open(path, "w", encoding="utf-8") as f:
json.dump(data, f, indent=2)
except OSError as e:
logger.error("Failed to save GC history: %s", e)
def add(self, record: GCExecutionRecord) -> None:
with self._lock:
records = self.load()
records.insert(0, record)
self.save(records)
def get_history(self, limit: int = 50, offset: int = 0) -> List[GCExecutionRecord]:
return self.load()[offset : offset + limit]
def _dir_size(path: Path) -> int:
total = 0
try:
for f in path.rglob("*"):
if f.is_file():
try:
total += f.stat().st_size
except OSError:
pass
except OSError:
pass
return total
def _file_age_hours(path: Path) -> float:
try:
mtime = path.stat().st_mtime
return (time.time() - mtime) / 3600.0
except OSError:
return 0.0
class GarbageCollector:
SYSTEM_ROOT = ".myfsio.sys"
SYSTEM_TMP_DIR = "tmp"
SYSTEM_MULTIPART_DIR = "multipart"
SYSTEM_BUCKETS_DIR = "buckets"
BUCKET_META_DIR = "meta"
BUCKET_VERSIONS_DIR = "versions"
INTERNAL_FOLDERS = {".meta", ".versions", ".multipart"}
def __init__(
self,
storage_root: Path,
interval_hours: float = 6.0,
temp_file_max_age_hours: float = 24.0,
multipart_max_age_days: int = 7,
lock_file_max_age_hours: float = 1.0,
dry_run: bool = False,
max_history: int = 50,
io_throttle_ms: int = 10,
) -> None:
self.storage_root = Path(storage_root)
self.interval_seconds = interval_hours * 3600.0
self.temp_file_max_age_hours = temp_file_max_age_hours
self.multipart_max_age_days = multipart_max_age_days
self.lock_file_max_age_hours = lock_file_max_age_hours
self.dry_run = dry_run
self._timer: Optional[threading.Timer] = None
self._shutdown = False
self._lock = threading.Lock()
self._scanning = False
self._scan_start_time: Optional[float] = None
self._io_throttle = max(0, io_throttle_ms) / 1000.0
self.history_store = GCHistoryStore(storage_root, max_records=max_history)
def start(self) -> None:
if self._timer is not None:
return
self._shutdown = False
self._schedule_next()
logger.info(
"GC started: interval=%.1fh, temp_max_age=%.1fh, multipart_max_age=%dd, lock_max_age=%.1fh, dry_run=%s",
self.interval_seconds / 3600.0,
self.temp_file_max_age_hours,
self.multipart_max_age_days,
self.lock_file_max_age_hours,
self.dry_run,
)
def stop(self) -> None:
self._shutdown = True
if self._timer:
self._timer.cancel()
self._timer = None
logger.info("GC stopped")
def _schedule_next(self) -> None:
if self._shutdown:
return
self._timer = threading.Timer(self.interval_seconds, self._run_cycle)
self._timer.daemon = True
self._timer.start()
def _run_cycle(self) -> None:
if self._shutdown:
return
try:
self.run_now()
except Exception as e:
logger.error("GC cycle failed: %s", e)
finally:
self._schedule_next()
def run_now(self, dry_run: Optional[bool] = None) -> GCResult:
if not self._lock.acquire(blocking=False):
raise RuntimeError("GC is already in progress")
effective_dry_run = dry_run if dry_run is not None else self.dry_run
try:
self._scanning = True
self._scan_start_time = time.time()
start = self._scan_start_time
result = GCResult()
original_dry_run = self.dry_run
self.dry_run = effective_dry_run
try:
self._clean_temp_files(result)
self._clean_orphaned_multipart(result)
self._clean_stale_locks(result)
self._clean_orphaned_metadata(result)
self._clean_orphaned_versions(result)
self._clean_empty_dirs(result)
finally:
self.dry_run = original_dry_run
result.execution_time_seconds = time.time() - start
if result.has_work or result.errors:
logger.info(
"GC completed in %.2fs: temp=%d (%.1f MB), multipart=%d (%.1f MB), "
"locks=%d, meta=%d, versions=%d (%.1f MB), dirs=%d, errors=%d%s",
result.execution_time_seconds,
result.temp_files_deleted,
result.temp_bytes_freed / (1024 * 1024),
result.multipart_uploads_deleted,
result.multipart_bytes_freed / (1024 * 1024),
result.lock_files_deleted,
result.orphaned_metadata_deleted,
result.orphaned_versions_deleted,
result.orphaned_version_bytes_freed / (1024 * 1024),
result.empty_dirs_removed,
len(result.errors),
" (dry run)" if effective_dry_run else "",
)
record = GCExecutionRecord(
timestamp=time.time(),
result=result.to_dict(),
dry_run=effective_dry_run,
)
self.history_store.add(record)
return result
finally:
self._scanning = False
self._scan_start_time = None
self._lock.release()
def run_async(self, dry_run: Optional[bool] = None) -> bool:
if self._scanning:
return False
t = threading.Thread(target=self.run_now, args=(dry_run,), daemon=True)
t.start()
return True
def _system_path(self) -> Path:
return self.storage_root / self.SYSTEM_ROOT
def _throttle(self) -> bool:
if self._shutdown:
return True
if self._io_throttle > 0:
time.sleep(self._io_throttle)
return self._shutdown
def _list_bucket_names(self) -> List[str]:
names = []
try:
for entry in self.storage_root.iterdir():
if entry.is_dir() and entry.name != self.SYSTEM_ROOT:
names.append(entry.name)
except OSError:
pass
return names
def _clean_temp_files(self, result: GCResult) -> None:
tmp_dir = self._system_path() / self.SYSTEM_TMP_DIR
if not tmp_dir.exists():
return
try:
for entry in tmp_dir.iterdir():
if self._throttle():
return
if not entry.is_file():
continue
age = _file_age_hours(entry)
if age < self.temp_file_max_age_hours:
continue
try:
size = entry.stat().st_size
if not self.dry_run:
entry.unlink()
result.temp_files_deleted += 1
result.temp_bytes_freed += size
except OSError as e:
result.errors.append(f"temp file {entry.name}: {e}")
except OSError as e:
result.errors.append(f"scan tmp dir: {e}")
def _clean_orphaned_multipart(self, result: GCResult) -> None:
cutoff_hours = self.multipart_max_age_days * 24.0
bucket_names = self._list_bucket_names()
for bucket_name in bucket_names:
if self._shutdown:
return
for multipart_root in (
self._system_path() / self.SYSTEM_MULTIPART_DIR / bucket_name,
self.storage_root / bucket_name / ".multipart",
):
if not multipart_root.exists():
continue
try:
for upload_dir in multipart_root.iterdir():
if self._throttle():
return
if not upload_dir.is_dir():
continue
self._maybe_clean_upload(upload_dir, cutoff_hours, result)
except OSError as e:
result.errors.append(f"scan multipart {bucket_name}: {e}")
def _maybe_clean_upload(self, upload_dir: Path, cutoff_hours: float, result: GCResult) -> None:
manifest_path = upload_dir / "manifest.json"
age = _file_age_hours(manifest_path) if manifest_path.exists() else _file_age_hours(upload_dir)
if age < cutoff_hours:
return
dir_bytes = _dir_size(upload_dir)
try:
if not self.dry_run:
shutil.rmtree(upload_dir, ignore_errors=True)
result.multipart_uploads_deleted += 1
result.multipart_bytes_freed += dir_bytes
except OSError as e:
result.errors.append(f"multipart {upload_dir.name}: {e}")
def _clean_stale_locks(self, result: GCResult) -> None:
buckets_root = self._system_path() / self.SYSTEM_BUCKETS_DIR
if not buckets_root.exists():
return
try:
for bucket_dir in buckets_root.iterdir():
if self._shutdown:
return
if not bucket_dir.is_dir():
continue
locks_dir = bucket_dir / "locks"
if not locks_dir.exists():
continue
try:
for lock_file in locks_dir.iterdir():
if self._throttle():
return
if not lock_file.is_file() or not lock_file.name.endswith(".lock"):
continue
age = _file_age_hours(lock_file)
if age < self.lock_file_max_age_hours:
continue
try:
if not self.dry_run:
lock_file.unlink(missing_ok=True)
result.lock_files_deleted += 1
except OSError as e:
result.errors.append(f"lock {lock_file.name}: {e}")
except OSError as e:
result.errors.append(f"scan locks {bucket_dir.name}: {e}")
except OSError as e:
result.errors.append(f"scan buckets for locks: {e}")
def _clean_orphaned_metadata(self, result: GCResult) -> None:
bucket_names = self._list_bucket_names()
for bucket_name in bucket_names:
if self._shutdown:
return
legacy_meta = self.storage_root / bucket_name / ".meta"
if legacy_meta.exists():
self._clean_legacy_metadata(bucket_name, legacy_meta, result)
new_meta = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
if new_meta.exists():
self._clean_index_metadata(bucket_name, new_meta, result)
def _clean_legacy_metadata(self, bucket_name: str, meta_root: Path, result: GCResult) -> None:
bucket_path = self.storage_root / bucket_name
try:
for meta_file in meta_root.rglob("*.meta.json"):
if self._throttle():
return
if not meta_file.is_file():
continue
try:
rel = meta_file.relative_to(meta_root)
object_key = rel.as_posix().removesuffix(".meta.json")
object_path = bucket_path / object_key
if not object_path.exists():
if not self.dry_run:
meta_file.unlink(missing_ok=True)
result.orphaned_metadata_deleted += 1
except (OSError, ValueError) as e:
result.errors.append(f"legacy meta {bucket_name}/{meta_file.name}: {e}")
except OSError as e:
result.errors.append(f"scan legacy meta {bucket_name}: {e}")
def _clean_index_metadata(self, bucket_name: str, meta_root: Path, result: GCResult) -> None:
bucket_path = self.storage_root / bucket_name
try:
for index_file in meta_root.rglob("_index.json"):
if self._throttle():
return
if not index_file.is_file():
continue
try:
with open(index_file, "r", encoding="utf-8") as f:
index_data = json.load(f)
except (OSError, json.JSONDecodeError):
continue
keys_to_remove = []
for key in index_data:
rel_dir = index_file.parent.relative_to(meta_root)
if rel_dir == Path("."):
full_key = key
else:
full_key = rel_dir.as_posix() + "/" + key
object_path = bucket_path / full_key
if not object_path.exists():
keys_to_remove.append(key)
if keys_to_remove:
if not self.dry_run:
for k in keys_to_remove:
index_data.pop(k, None)
if index_data:
try:
with open(index_file, "w", encoding="utf-8") as f:
json.dump(index_data, f)
except OSError as e:
result.errors.append(f"write index {bucket_name}: {e}")
continue
else:
try:
index_file.unlink(missing_ok=True)
except OSError:
pass
result.orphaned_metadata_deleted += len(keys_to_remove)
except OSError as e:
result.errors.append(f"scan index meta {bucket_name}: {e}")
def _clean_orphaned_versions(self, result: GCResult) -> None:
bucket_names = self._list_bucket_names()
for bucket_name in bucket_names:
if self._shutdown:
return
bucket_path = self.storage_root / bucket_name
for versions_root in (
self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_VERSIONS_DIR,
self.storage_root / bucket_name / ".versions",
):
if not versions_root.exists():
continue
try:
for key_dir in versions_root.iterdir():
if self._throttle():
return
if not key_dir.is_dir():
continue
self._clean_versions_for_key(bucket_path, versions_root, key_dir, result)
except OSError as e:
result.errors.append(f"scan versions {bucket_name}: {e}")
def _clean_versions_for_key(
self, bucket_path: Path, versions_root: Path, key_dir: Path, result: GCResult
) -> None:
try:
rel = key_dir.relative_to(versions_root)
except ValueError:
return
object_path = bucket_path / rel
if object_path.exists():
return
version_files = list(key_dir.glob("*.bin")) + list(key_dir.glob("*.json"))
if not version_files:
return
for vf in version_files:
try:
size = vf.stat().st_size if vf.suffix == ".bin" else 0
if not self.dry_run:
vf.unlink(missing_ok=True)
if vf.suffix == ".bin":
result.orphaned_version_bytes_freed += size
result.orphaned_versions_deleted += 1
except OSError as e:
result.errors.append(f"version file {vf.name}: {e}")
def _clean_empty_dirs(self, result: GCResult) -> None:
targets = [
self._system_path() / self.SYSTEM_TMP_DIR,
self._system_path() / self.SYSTEM_MULTIPART_DIR,
self._system_path() / self.SYSTEM_BUCKETS_DIR,
]
for bucket_name in self._list_bucket_names():
targets.append(self.storage_root / bucket_name / ".meta")
targets.append(self.storage_root / bucket_name / ".versions")
targets.append(self.storage_root / bucket_name / ".multipart")
for root in targets:
if not root.exists():
continue
self._remove_empty_dirs_recursive(root, root, result)
def _remove_empty_dirs_recursive(self, path: Path, stop_at: Path, result: GCResult) -> bool:
if self._shutdown:
return False
if not path.is_dir():
return False
try:
children = list(path.iterdir())
except OSError:
return False
all_empty = True
for child in children:
if self._throttle():
return False
if child.is_dir():
if not self._remove_empty_dirs_recursive(child, stop_at, result):
all_empty = False
else:
all_empty = False
if all_empty and path != stop_at:
try:
if not self.dry_run:
path.rmdir()
result.empty_dirs_removed += 1
return True
except OSError:
return False
return all_empty
def get_history(self, limit: int = 50, offset: int = 0) -> List[dict]:
records = self.history_store.get_history(limit, offset)
return [r.to_dict() for r in records]
def get_status(self) -> dict:
status: Dict[str, Any] = {
"enabled": not self._shutdown or self._timer is not None,
"running": self._timer is not None and not self._shutdown,
"scanning": self._scanning,
"interval_hours": self.interval_seconds / 3600.0,
"temp_file_max_age_hours": self.temp_file_max_age_hours,
"multipart_max_age_days": self.multipart_max_age_days,
"lock_file_max_age_hours": self.lock_file_max_age_hours,
"dry_run": self.dry_run,
"io_throttle_ms": round(self._io_throttle * 1000),
}
if self._scanning and self._scan_start_time:
status["scan_elapsed_seconds"] = time.time() - self._scan_start_time
return status

View File

@@ -10,7 +10,7 @@ import secrets
import threading
import time
from collections import deque
from dataclasses import dataclass
from dataclasses import dataclass, field
from datetime import datetime, timedelta, timezone
from pathlib import Path
from typing import Any, Deque, Dict, Iterable, List, Optional, Sequence, Set, Tuple
@@ -22,16 +22,37 @@ class IamError(RuntimeError):
"""Raised when authentication or authorization fails."""
S3_ACTIONS = {"list", "read", "write", "delete", "share", "policy", "replication", "lifecycle", "cors"}
S3_ACTIONS = {
"list", "read", "write", "delete", "share", "policy",
"replication", "lifecycle", "cors",
"create_bucket", "delete_bucket",
"versioning", "tagging", "encryption", "quota",
"object_lock", "notification", "logging", "website",
}
IAM_ACTIONS = {
"iam:list_users",
"iam:create_user",
"iam:delete_user",
"iam:rotate_key",
"iam:update_policy",
"iam:create_key",
"iam:delete_key",
"iam:get_user",
"iam:get_policy",
"iam:disable_user",
}
ALLOWED_ACTIONS = (S3_ACTIONS | IAM_ACTIONS) | {"iam:*"}
_V1_IMPLIED_ACTIONS = {
"write": {"create_bucket"},
"delete": {"delete_bucket"},
"policy": {
"versioning", "tagging", "encryption", "quota",
"object_lock", "notification", "logging", "website",
"cors", "lifecycle", "replication", "share",
},
}
ACTION_ALIASES = {
"list": "list",
"s3:listbucket": "list",
@@ -45,14 +66,11 @@ ACTION_ALIASES = {
"s3:getobjecttagging": "read",
"s3:getobjectversiontagging": "read",
"s3:getobjectacl": "read",
"s3:getbucketversioning": "read",
"s3:headobject": "read",
"s3:headbucket": "read",
"write": "write",
"s3:putobject": "write",
"s3:createbucket": "write",
"s3:putobjecttagging": "write",
"s3:putbucketversioning": "write",
"s3:createmultipartupload": "write",
"s3:uploadpart": "write",
"s3:completemultipartupload": "write",
@@ -61,8 +79,11 @@ ACTION_ALIASES = {
"delete": "delete",
"s3:deleteobject": "delete",
"s3:deleteobjectversion": "delete",
"s3:deletebucket": "delete",
"s3:deleteobjecttagging": "delete",
"create_bucket": "create_bucket",
"s3:createbucket": "create_bucket",
"delete_bucket": "delete_bucket",
"s3:deletebucket": "delete_bucket",
"share": "share",
"s3:putobjectacl": "share",
"s3:putbucketacl": "share",
@@ -88,11 +109,50 @@ ACTION_ALIASES = {
"s3:getbucketcors": "cors",
"s3:putbucketcors": "cors",
"s3:deletebucketcors": "cors",
"versioning": "versioning",
"s3:getbucketversioning": "versioning",
"s3:putbucketversioning": "versioning",
"tagging": "tagging",
"s3:getbuckettagging": "tagging",
"s3:putbuckettagging": "tagging",
"s3:deletebuckettagging": "tagging",
"encryption": "encryption",
"s3:getencryptionconfiguration": "encryption",
"s3:putencryptionconfiguration": "encryption",
"s3:deleteencryptionconfiguration": "encryption",
"quota": "quota",
"s3:getbucketquota": "quota",
"s3:putbucketquota": "quota",
"s3:deletebucketquota": "quota",
"object_lock": "object_lock",
"s3:getobjectlockconfiguration": "object_lock",
"s3:putobjectlockconfiguration": "object_lock",
"s3:putobjectretention": "object_lock",
"s3:getobjectretention": "object_lock",
"s3:putobjectlegalhold": "object_lock",
"s3:getobjectlegalhold": "object_lock",
"notification": "notification",
"s3:getbucketnotificationconfiguration": "notification",
"s3:putbucketnotificationconfiguration": "notification",
"s3:deletebucketnotificationconfiguration": "notification",
"logging": "logging",
"s3:getbucketlogging": "logging",
"s3:putbucketlogging": "logging",
"s3:deletebucketlogging": "logging",
"website": "website",
"s3:getbucketwebsite": "website",
"s3:putbucketwebsite": "website",
"s3:deletebucketwebsite": "website",
"iam:listusers": "iam:list_users",
"iam:createuser": "iam:create_user",
"iam:deleteuser": "iam:delete_user",
"iam:rotateaccesskey": "iam:rotate_key",
"iam:putuserpolicy": "iam:update_policy",
"iam:createaccesskey": "iam:create_key",
"iam:deleteaccesskey": "iam:delete_key",
"iam:getuser": "iam:get_user",
"iam:getpolicy": "iam:get_policy",
"iam:disableuser": "iam:disable_user",
"iam:*": "iam:*",
}
@@ -101,6 +161,7 @@ ACTION_ALIASES = {
class Policy:
bucket: str
actions: Set[str]
prefix: str = "*"
@dataclass
@@ -117,6 +178,16 @@ def _derive_fernet_key(secret: str) -> bytes:
_IAM_ENCRYPTED_PREFIX = b"MYFSIO_IAM_ENC:"
_CONFIG_VERSION = 2
def _expand_v1_actions(actions: Set[str]) -> Set[str]:
expanded = set(actions)
for action, implied in _V1_IMPLIED_ACTIONS.items():
if action in expanded:
expanded.update(implied)
return expanded
class IamService:
"""Loads IAM configuration, manages users, and evaluates policies."""
@@ -131,7 +202,10 @@ class IamService:
self.config_path.parent.mkdir(parents=True, exist_ok=True)
if not self.config_path.exists():
self._write_default()
self._users: Dict[str, Dict[str, Any]] = {}
self._user_records: Dict[str, Dict[str, Any]] = {}
self._key_index: Dict[str, str] = {}
self._key_secrets: Dict[str, str] = {}
self._key_status: Dict[str, str] = {}
self._raw_config: Dict[str, Any] = {}
self._failed_attempts: Dict[str, Deque[datetime]] = {}
self._last_load_time = 0.0
@@ -146,7 +220,6 @@ class IamService:
self._load_lockout_state()
def _maybe_reload(self) -> None:
"""Reload configuration if the file has changed on disk."""
now = time.time()
if now - self._last_stat_check < self._stat_check_interval:
return
@@ -183,11 +256,20 @@ class IamService:
raise IamError(
f"Access temporarily locked. Try again in {seconds} seconds."
)
record = self._users.get(access_key)
stored_secret = record["secret_key"] if record else secrets.token_urlsafe(24)
if not record or not hmac.compare_digest(stored_secret, secret_key):
user_id = self._key_index.get(access_key)
stored_secret = self._key_secrets.get(access_key, secrets.token_urlsafe(24))
if not user_id or not hmac.compare_digest(stored_secret, secret_key):
self._record_failed_attempt(access_key)
raise IamError("Invalid credentials")
key_status = self._key_status.get(access_key, "active")
if key_status != "active":
raise IamError("Access key is inactive")
record = self._user_records.get(user_id)
if not record:
self._record_failed_attempt(access_key)
raise IamError("Invalid credentials")
if not record.get("enabled", True):
raise IamError("User account is disabled")
self._check_expiry(access_key, record)
self._clear_failed_attempts(access_key)
return self._build_principal(access_key, record)
@@ -215,7 +297,6 @@ class IamService:
return self.config_path.parent / "lockout_state.json"
def _load_lockout_state(self) -> None:
"""Load lockout state from disk."""
try:
if self._lockout_file().exists():
data = json.loads(self._lockout_file().read_text(encoding="utf-8"))
@@ -235,7 +316,6 @@ class IamService:
pass
def _save_lockout_state(self) -> None:
"""Persist lockout state to disk."""
data: Dict[str, Any] = {"failed_attempts": {}}
for key, attempts in self._failed_attempts.items():
data["failed_attempts"][key] = [ts.isoformat() for ts in attempts]
@@ -270,10 +350,9 @@ class IamService:
return int(max(0, self.auth_lockout_window.total_seconds() - elapsed))
def create_session_token(self, access_key: str, duration_seconds: int = 3600) -> str:
"""Create a temporary session token for an access key."""
self._maybe_reload()
record = self._users.get(access_key)
if not record:
user_id = self._key_index.get(access_key)
if not user_id or user_id not in self._user_records:
raise IamError("Unknown access key")
self._cleanup_expired_sessions()
token = secrets.token_urlsafe(32)
@@ -285,7 +364,6 @@ class IamService:
return token
def validate_session_token(self, access_key: str, session_token: str) -> bool:
"""Validate a session token for an access key (thread-safe, constant-time)."""
dummy_key = secrets.token_urlsafe(16)
dummy_token = secrets.token_urlsafe(32)
with self._session_lock:
@@ -304,7 +382,6 @@ class IamService:
return True
def _cleanup_expired_sessions(self) -> None:
"""Remove expired session tokens."""
now = time.time()
expired = [token for token, data in self._sessions.items() if now > data["expires_at"]]
for token in expired:
@@ -316,13 +393,18 @@ class IamService:
if cached:
principal, cached_time = cached
if now - cached_time < self._cache_ttl:
record = self._users.get(access_key)
user_id = self._key_index.get(access_key)
if user_id:
record = self._user_records.get(user_id)
if record:
self._check_expiry(access_key, record)
return principal
self._maybe_reload()
record = self._users.get(access_key)
user_id = self._key_index.get(access_key)
if not user_id:
raise IamError("Unknown access key")
record = self._user_records.get(user_id)
if not record:
raise IamError("Unknown access key")
self._check_expiry(access_key, record)
@@ -332,22 +414,26 @@ class IamService:
def secret_for_key(self, access_key: str) -> str:
self._maybe_reload()
record = self._users.get(access_key)
if not record:
secret = self._key_secrets.get(access_key)
if not secret:
raise IamError("Unknown access key")
user_id = self._key_index.get(access_key)
if user_id:
record = self._user_records.get(user_id)
if record:
self._check_expiry(access_key, record)
return record["secret_key"]
return secret
def authorize(self, principal: Principal, bucket_name: str | None, action: str) -> None:
def authorize(self, principal: Principal, bucket_name: str | None, action: str, *, object_key: str | None = None) -> None:
action = self._normalize_action(action)
if action not in ALLOWED_ACTIONS:
raise IamError(f"Unknown action '{action}'")
bucket_name = bucket_name or "*"
normalized = bucket_name.lower() if bucket_name != "*" else bucket_name
if not self._is_allowed(principal, normalized, action):
if not self._is_allowed(principal, normalized, action, object_key=object_key):
raise IamError(f"Access denied for action '{action}' on bucket '{bucket_name}'")
def check_permissions(self, principal: Principal, bucket_name: str | None, actions: Iterable[str]) -> Dict[str, bool]:
def check_permissions(self, principal: Principal, bucket_name: str | None, actions: Iterable[str], *, object_key: str | None = None) -> Dict[str, bool]:
self._maybe_reload()
bucket_name = (bucket_name or "*").lower() if bucket_name != "*" else (bucket_name or "*")
normalized_actions = {a: self._normalize_action(a) for a in actions}
@@ -356,37 +442,53 @@ class IamService:
if canonical not in ALLOWED_ACTIONS:
results[original] = False
else:
results[original] = self._is_allowed(principal, bucket_name, canonical)
results[original] = self._is_allowed(principal, bucket_name, canonical, object_key=object_key)
return results
def buckets_for_principal(self, principal: Principal, buckets: Iterable[str]) -> List[str]:
return [bucket for bucket in buckets if self._is_allowed(principal, bucket, "list")]
def _is_allowed(self, principal: Principal, bucket_name: str, action: str) -> bool:
def _is_allowed(self, principal: Principal, bucket_name: str, action: str, *, object_key: str | None = None) -> bool:
bucket_name = bucket_name.lower()
for policy in principal.policies:
if policy.bucket not in {"*", bucket_name}:
continue
if "*" in policy.actions or action in policy.actions:
return True
if "iam:*" in policy.actions and action.startswith("iam:"):
action_match = "*" in policy.actions or action in policy.actions
if not action_match and "iam:*" in policy.actions and action.startswith("iam:"):
action_match = True
if not action_match:
continue
if object_key is not None and policy.prefix != "*":
prefix = policy.prefix.rstrip("*")
if not object_key.startswith(prefix):
continue
return True
return False
def list_users(self) -> List[Dict[str, Any]]:
listing: List[Dict[str, Any]] = []
for access_key, record in self._users.items():
listing.append(
{
"access_key": access_key,
for user_id, record in self._user_records.items():
access_keys = []
for key_info in record.get("access_keys", []):
access_keys.append({
"access_key": key_info["access_key"],
"status": key_info.get("status", "active"),
"created_at": key_info.get("created_at"),
})
user_entry: Dict[str, Any] = {
"user_id": user_id,
"display_name": record["display_name"],
"enabled": record.get("enabled", True),
"expires_at": record.get("expires_at"),
"access_keys": access_keys,
"policies": [
{"bucket": policy.bucket, "actions": sorted(policy.actions)}
{**{"bucket": policy.bucket, "actions": sorted(policy.actions)}, **({"prefix": policy.prefix} if policy.prefix != "*" else {})}
for policy in record["policies"]
],
}
)
if access_keys:
user_entry["access_key"] = access_keys[0]["access_key"]
listing.append(user_entry)
return listing
def create_user(
@@ -397,20 +499,33 @@ class IamService:
access_key: str | None = None,
secret_key: str | None = None,
expires_at: str | None = None,
user_id: str | None = None,
) -> Dict[str, str]:
access_key = (access_key or self._generate_access_key()).strip()
if not access_key:
raise IamError("Access key cannot be empty")
if access_key in self._users:
if access_key in self._key_index:
raise IamError("Access key already exists")
if expires_at:
self._validate_expires_at(expires_at)
secret_key = secret_key or self._generate_secret_key()
sanitized_policies = self._prepare_policy_payload(policies)
user_id = user_id or self._generate_user_id()
if user_id in self._user_records:
raise IamError("User ID already exists")
now_iso = datetime.now(timezone.utc).isoformat()
record: Dict[str, Any] = {
"user_id": user_id,
"display_name": display_name or access_key,
"enabled": True,
"access_keys": [
{
"access_key": access_key,
"secret_key": secret_key,
"display_name": display_name or access_key,
"status": "active",
"created_at": now_iso,
}
],
"policies": sanitized_policies,
}
if expires_at:
@@ -418,12 +533,108 @@ class IamService:
self._raw_config.setdefault("users", []).append(record)
self._save()
self._load()
return {"access_key": access_key, "secret_key": secret_key}
return {"user_id": user_id, "access_key": access_key, "secret_key": secret_key}
def create_access_key(self, identifier: str) -> Dict[str, str]:
user_raw, _ = self._resolve_raw_user(identifier)
new_access_key = self._generate_access_key()
new_secret_key = self._generate_secret_key()
now_iso = datetime.now(timezone.utc).isoformat()
key_entry = {
"access_key": new_access_key,
"secret_key": new_secret_key,
"status": "active",
"created_at": now_iso,
}
user_raw.setdefault("access_keys", []).append(key_entry)
self._save()
self._load()
return {"access_key": new_access_key, "secret_key": new_secret_key}
def delete_access_key(self, access_key: str) -> None:
user_raw, _ = self._resolve_raw_user(access_key)
keys = user_raw.get("access_keys", [])
if len(keys) <= 1:
raise IamError("Cannot delete the only access key for a user")
remaining = [k for k in keys if k["access_key"] != access_key]
if len(remaining) == len(keys):
raise IamError("Access key not found")
user_raw["access_keys"] = remaining
self._save()
self._principal_cache.pop(access_key, None)
self._secret_key_cache.pop(access_key, None)
from .s3_api import clear_signing_key_cache
clear_signing_key_cache()
self._load()
def disable_user(self, identifier: str) -> None:
user_raw, _ = self._resolve_raw_user(identifier)
user_raw["enabled"] = False
self._save()
for key_info in user_raw.get("access_keys", []):
ak = key_info["access_key"]
self._principal_cache.pop(ak, None)
self._secret_key_cache.pop(ak, None)
from .s3_api import clear_signing_key_cache
clear_signing_key_cache()
self._load()
def enable_user(self, identifier: str) -> None:
user_raw, _ = self._resolve_raw_user(identifier)
user_raw["enabled"] = True
self._save()
self._load()
def get_user_by_id(self, user_id: str) -> Dict[str, Any]:
record = self._user_records.get(user_id)
if not record:
raise IamError("User not found")
access_keys = []
for key_info in record.get("access_keys", []):
access_keys.append({
"access_key": key_info["access_key"],
"status": key_info.get("status", "active"),
"created_at": key_info.get("created_at"),
})
return {
"user_id": user_id,
"display_name": record["display_name"],
"enabled": record.get("enabled", True),
"expires_at": record.get("expires_at"),
"access_keys": access_keys,
"policies": [
{"bucket": p.bucket, "actions": sorted(p.actions), "prefix": p.prefix}
for p in record["policies"]
],
}
def get_user_policies(self, identifier: str) -> List[Dict[str, Any]]:
_, user_id = self._resolve_raw_user(identifier)
record = self._user_records.get(user_id)
if not record:
raise IamError("User not found")
return [
{**{"bucket": p.bucket, "actions": sorted(p.actions)}, **({"prefix": p.prefix} if p.prefix != "*" else {})}
for p in record["policies"]
]
def resolve_user_id(self, identifier: str) -> str:
if identifier in self._user_records:
return identifier
user_id = self._key_index.get(identifier)
if user_id:
return user_id
raise IamError("User not found")
def rotate_secret(self, access_key: str) -> str:
user = self._get_raw_user(access_key)
user_raw, _ = self._resolve_raw_user(access_key)
new_secret = self._generate_secret_key()
user["secret_key"] = new_secret
for key_info in user_raw.get("access_keys", []):
if key_info["access_key"] == access_key:
key_info["secret_key"] = new_secret
break
else:
raise IamError("Access key not found")
self._save()
self._principal_cache.pop(access_key, None)
self._secret_key_cache.pop(access_key, None)
@@ -433,8 +644,8 @@ class IamService:
return new_secret
def update_user(self, access_key: str, display_name: str) -> None:
user = self._get_raw_user(access_key)
user["display_name"] = display_name
user_raw, _ = self._resolve_raw_user(access_key)
user_raw["display_name"] = display_name
self._save()
self._load()
@@ -442,32 +653,43 @@ class IamService:
users = self._raw_config.get("users", [])
if len(users) <= 1:
raise IamError("Cannot delete the only user")
remaining = [user for user in users if user["access_key"] != access_key]
if len(remaining) == len(users):
_, target_user_id = self._resolve_raw_user(access_key)
target_user_raw = None
remaining = []
for u in users:
if u.get("user_id") == target_user_id:
target_user_raw = u
else:
remaining.append(u)
if target_user_raw is None:
raise IamError("User not found")
self._raw_config["users"] = remaining
self._save()
self._principal_cache.pop(access_key, None)
self._secret_key_cache.pop(access_key, None)
for key_info in target_user_raw.get("access_keys", []):
ak = key_info["access_key"]
self._principal_cache.pop(ak, None)
self._secret_key_cache.pop(ak, None)
from .s3_api import clear_signing_key_cache
clear_signing_key_cache()
self._load()
def update_user_expiry(self, access_key: str, expires_at: str | None) -> None:
user = self._get_raw_user(access_key)
user_raw, _ = self._resolve_raw_user(access_key)
if expires_at:
self._validate_expires_at(expires_at)
user["expires_at"] = expires_at
user_raw["expires_at"] = expires_at
else:
user.pop("expires_at", None)
user_raw.pop("expires_at", None)
self._save()
self._principal_cache.pop(access_key, None)
self._secret_key_cache.pop(access_key, None)
for key_info in user_raw.get("access_keys", []):
ak = key_info["access_key"]
self._principal_cache.pop(ak, None)
self._secret_key_cache.pop(ak, None)
self._load()
def update_user_policies(self, access_key: str, policies: Sequence[Dict[str, Any]]) -> None:
user = self._get_raw_user(access_key)
user["policies"] = self._prepare_policy_payload(policies)
user_raw, _ = self._resolve_raw_user(access_key)
user_raw["policies"] = self._prepare_policy_payload(policies)
self._save()
self._load()
@@ -482,6 +704,52 @@ class IamService:
raise IamError("Cannot decrypt IAM config. SECRET_KEY may have changed. Use 'python run.py reset-cred' to reset credentials.")
return raw_bytes.decode("utf-8")
def _is_v2_config(self, raw: Dict[str, Any]) -> bool:
return raw.get("version", 1) >= _CONFIG_VERSION
def _migrate_v1_to_v2(self, raw: Dict[str, Any]) -> Dict[str, Any]:
migrated_users = []
now_iso = datetime.now(timezone.utc).isoformat()
for user in raw.get("users", []):
old_policies = user.get("policies", [])
expanded_policies = []
for p in old_policies:
raw_actions = p.get("actions", [])
if isinstance(raw_actions, str):
raw_actions = [raw_actions]
action_set: Set[str] = set()
for a in raw_actions:
canonical = self._normalize_action(a)
if canonical == "*":
action_set = set(ALLOWED_ACTIONS)
break
if canonical:
action_set.add(canonical)
action_set = _expand_v1_actions(action_set)
expanded_policies.append({
"bucket": p.get("bucket", "*"),
"actions": sorted(action_set),
"prefix": p.get("prefix", "*"),
})
migrated_user: Dict[str, Any] = {
"user_id": user["access_key"],
"display_name": user.get("display_name", user["access_key"]),
"enabled": True,
"access_keys": [
{
"access_key": user["access_key"],
"secret_key": user["secret_key"],
"status": "active",
"created_at": now_iso,
}
],
"policies": expanded_policies,
}
if user.get("expires_at"):
migrated_user["expires_at"] = user["expires_at"]
migrated_users.append(migrated_user)
return {"version": _CONFIG_VERSION, "users": migrated_users}
def _load(self) -> None:
try:
self._last_load_time = self.config_path.stat().st_mtime
@@ -500,35 +768,67 @@ class IamService:
raise IamError(f"Failed to load IAM config: {e}")
was_plaintext = not raw_bytes.startswith(_IAM_ENCRYPTED_PREFIX)
was_v1 = not self._is_v2_config(raw)
if was_v1:
raw = self._migrate_v1_to_v2(raw)
user_records: Dict[str, Dict[str, Any]] = {}
key_index: Dict[str, str] = {}
key_secrets: Dict[str, str] = {}
key_status_map: Dict[str, str] = {}
users: Dict[str, Dict[str, Any]] = {}
for user in raw.get("users", []):
user_id = user["user_id"]
policies = self._build_policy_objects(user.get("policies", []))
user_record: Dict[str, Any] = {
"secret_key": user["secret_key"],
"display_name": user.get("display_name", user["access_key"]),
access_keys_raw = user.get("access_keys", [])
access_keys_info = []
for key_entry in access_keys_raw:
ak = key_entry["access_key"]
sk = key_entry["secret_key"]
status = key_entry.get("status", "active")
key_index[ak] = user_id
key_secrets[ak] = sk
key_status_map[ak] = status
access_keys_info.append({
"access_key": ak,
"secret_key": sk,
"status": status,
"created_at": key_entry.get("created_at"),
})
record: Dict[str, Any] = {
"display_name": user.get("display_name", user_id),
"enabled": user.get("enabled", True),
"policies": policies,
"access_keys": access_keys_info,
}
if user.get("expires_at"):
user_record["expires_at"] = user["expires_at"]
users[user["access_key"]] = user_record
if not users:
raise IamError("IAM configuration contains no users")
self._users = users
raw_users: List[Dict[str, Any]] = []
for entry in raw.get("users", []):
raw_entry: Dict[str, Any] = {
"access_key": entry["access_key"],
"secret_key": entry["secret_key"],
"display_name": entry.get("display_name", entry["access_key"]),
"policies": entry.get("policies", []),
}
if entry.get("expires_at"):
raw_entry["expires_at"] = entry["expires_at"]
raw_users.append(raw_entry)
self._raw_config = {"users": raw_users}
record["expires_at"] = user["expires_at"]
user_records[user_id] = record
if was_plaintext and self._fernet:
if not user_records:
raise IamError("IAM configuration contains no users")
self._user_records = user_records
self._key_index = key_index
self._key_secrets = key_secrets
self._key_status = key_status_map
raw_users: List[Dict[str, Any]] = []
for user in raw.get("users", []):
raw_entry: Dict[str, Any] = {
"user_id": user["user_id"],
"display_name": user.get("display_name", user["user_id"]),
"enabled": user.get("enabled", True),
"access_keys": user.get("access_keys", []),
"policies": user.get("policies", []),
}
if user.get("expires_at"):
raw_entry["expires_at"] = user["expires_at"]
raw_users.append(raw_entry)
self._raw_config = {"version": _CONFIG_VERSION, "users": raw_users}
if was_v1 or (was_plaintext and self._fernet):
self._save()
def _save(self) -> None:
@@ -547,19 +847,30 @@ class IamService:
def config_summary(self) -> Dict[str, Any]:
return {
"path": str(self.config_path),
"user_count": len(self._users),
"user_count": len(self._user_records),
"allowed_actions": sorted(ALLOWED_ACTIONS),
}
def export_config(self, mask_secrets: bool = True) -> Dict[str, Any]:
payload: Dict[str, Any] = {"users": []}
payload: Dict[str, Any] = {"version": _CONFIG_VERSION, "users": []}
for user in self._raw_config.get("users", []):
access_keys = []
for key_info in user.get("access_keys", []):
access_keys.append({
"access_key": key_info["access_key"],
"secret_key": "\u2022\u2022\u2022\u2022\u2022\u2022\u2022\u2022\u2022\u2022" if mask_secrets else key_info["secret_key"],
"status": key_info.get("status", "active"),
"created_at": key_info.get("created_at"),
})
record: Dict[str, Any] = {
"access_key": user["access_key"],
"secret_key": "••••••••••" if mask_secrets else user["secret_key"],
"user_id": user["user_id"],
"display_name": user["display_name"],
"enabled": user.get("enabled", True),
"access_keys": access_keys,
"policies": user["policies"],
}
if access_keys:
record["access_key"] = access_keys[0]["access_key"]
if user.get("expires_at"):
record["expires_at"] = user["expires_at"]
payload["users"].append(record)
@@ -569,6 +880,7 @@ class IamService:
entries: List[Policy] = []
for policy in policies:
bucket = str(policy.get("bucket", "*")).lower()
prefix = str(policy.get("prefix", "*"))
raw_actions = policy.get("actions", [])
if isinstance(raw_actions, str):
raw_actions = [raw_actions]
@@ -581,7 +893,7 @@ class IamService:
if canonical:
action_set.add(canonical)
if action_set:
entries.append(Policy(bucket=bucket, actions=action_set))
entries.append(Policy(bucket=bucket, actions=action_set, prefix=prefix))
return entries
def _prepare_policy_payload(self, policies: Optional[Sequence[Dict[str, Any]]]) -> List[Dict[str, Any]]:
@@ -589,12 +901,14 @@ class IamService:
policies = (
{
"bucket": "*",
"actions": ["list", "read", "write", "delete", "share", "policy"],
"actions": ["list", "read", "write", "delete", "share", "policy",
"create_bucket", "delete_bucket"],
},
)
sanitized: List[Dict[str, Any]] = []
for policy in policies:
bucket = str(policy.get("bucket", "*")).lower()
prefix = str(policy.get("prefix", "*"))
raw_actions = policy.get("actions", [])
if isinstance(raw_actions, str):
raw_actions = [raw_actions]
@@ -608,7 +922,10 @@ class IamService:
action_set.add(canonical)
if not action_set:
continue
sanitized.append({"bucket": bucket, "actions": sorted(action_set)})
entry: Dict[str, Any] = {"bucket": bucket, "actions": sorted(action_set)}
if prefix != "*":
entry["prefix"] = prefix
sanitized.append(entry)
if not sanitized:
raise IamError("At least one policy with valid actions is required")
return sanitized
@@ -633,12 +950,23 @@ class IamService:
access_key = os.environ.get("ADMIN_ACCESS_KEY", "").strip() or secrets.token_hex(12)
secret_key = os.environ.get("ADMIN_SECRET_KEY", "").strip() or secrets.token_urlsafe(32)
custom_keys = bool(os.environ.get("ADMIN_ACCESS_KEY", "").strip())
user_id = self._generate_user_id()
now_iso = datetime.now(timezone.utc).isoformat()
default = {
"version": _CONFIG_VERSION,
"users": [
{
"user_id": user_id,
"display_name": "Local Admin",
"enabled": True,
"access_keys": [
{
"access_key": access_key,
"secret_key": secret_key,
"display_name": "Local Admin",
"status": "active",
"created_at": now_iso,
}
],
"policies": [
{"bucket": "*", "actions": list(ALLOWED_ACTIONS)}
],
@@ -660,6 +988,7 @@ class IamService:
else:
print(f"Access Key: {access_key}")
print(f"Secret Key: {secret_key}")
print(f"User ID: {user_id}")
print(f"{'='*60}")
if self._fernet:
print("IAM config is encrypted at rest.")
@@ -682,30 +1011,46 @@ class IamService:
def _generate_secret_key(self) -> str:
return secrets.token_urlsafe(24)
def _get_raw_user(self, access_key: str) -> Dict[str, Any]:
def _generate_user_id(self) -> str:
return f"u-{secrets.token_hex(8)}"
def _resolve_raw_user(self, identifier: str) -> Tuple[Dict[str, Any], str]:
for user in self._raw_config.get("users", []):
if user["access_key"] == access_key:
return user
if user.get("user_id") == identifier:
return user, identifier
for user in self._raw_config.get("users", []):
for key_info in user.get("access_keys", []):
if key_info["access_key"] == identifier:
return user, user["user_id"]
raise IamError("User not found")
def _get_raw_user(self, access_key: str) -> Dict[str, Any]:
user, _ = self._resolve_raw_user(access_key)
return user
def get_secret_key(self, access_key: str) -> str | None:
now = time.time()
cached = self._secret_key_cache.get(access_key)
if cached:
secret_key, cached_time = cached
if now - cached_time < self._cache_ttl:
record = self._users.get(access_key)
user_id = self._key_index.get(access_key)
if user_id:
record = self._user_records.get(user_id)
if record:
self._check_expiry(access_key, record)
return secret_key
self._maybe_reload()
record = self._users.get(access_key)
secret = self._key_secrets.get(access_key)
if secret:
user_id = self._key_index.get(access_key)
if user_id:
record = self._user_records.get(user_id)
if record:
self._check_expiry(access_key, record)
secret_key = record["secret_key"]
self._secret_key_cache[access_key] = (secret_key, now)
return secret_key
self._secret_key_cache[access_key] = (secret, now)
return secret
return None
def get_principal(self, access_key: str) -> Principal | None:
@@ -714,13 +1059,17 @@ class IamService:
if cached:
principal, cached_time = cached
if now - cached_time < self._cache_ttl:
record = self._users.get(access_key)
user_id = self._key_index.get(access_key)
if user_id:
record = self._user_records.get(user_id)
if record:
self._check_expiry(access_key, record)
return principal
self._maybe_reload()
record = self._users.get(access_key)
user_id = self._key_index.get(access_key)
if user_id:
record = self._user_records.get(user_id)
if record:
self._check_expiry(access_key, record)
principal = self._build_principal(access_key, record)

882
app/integrity.py Normal file
View File

@@ -0,0 +1,882 @@
from __future__ import annotations
import hashlib
import json
import logging
import os
import threading
import time
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Dict, List, Optional
try:
import myfsio_core as _rc
_HAS_RUST = True
except ImportError:
_HAS_RUST = False
logger = logging.getLogger(__name__)
def _compute_etag(path: Path) -> str:
if _HAS_RUST:
return _rc.md5_file(str(path))
checksum = hashlib.md5()
with path.open("rb") as handle:
for chunk in iter(lambda: handle.read(8192), b""):
checksum.update(chunk)
return checksum.hexdigest()
@dataclass
class IntegrityIssue:
issue_type: str
bucket: str
key: str
detail: str
healed: bool = False
heal_action: str = ""
def to_dict(self) -> dict:
return {
"issue_type": self.issue_type,
"bucket": self.bucket,
"key": self.key,
"detail": self.detail,
"healed": self.healed,
"heal_action": self.heal_action,
}
@dataclass
class IntegrityResult:
corrupted_objects: int = 0
orphaned_objects: int = 0
phantom_metadata: int = 0
stale_versions: int = 0
etag_cache_inconsistencies: int = 0
legacy_metadata_drifts: int = 0
issues_healed: int = 0
issues: List[IntegrityIssue] = field(default_factory=list)
errors: List[str] = field(default_factory=list)
objects_scanned: int = 0
buckets_scanned: int = 0
execution_time_seconds: float = 0.0
def to_dict(self) -> dict:
return {
"corrupted_objects": self.corrupted_objects,
"orphaned_objects": self.orphaned_objects,
"phantom_metadata": self.phantom_metadata,
"stale_versions": self.stale_versions,
"etag_cache_inconsistencies": self.etag_cache_inconsistencies,
"legacy_metadata_drifts": self.legacy_metadata_drifts,
"issues_healed": self.issues_healed,
"issues": [i.to_dict() for i in self.issues],
"errors": self.errors,
"objects_scanned": self.objects_scanned,
"buckets_scanned": self.buckets_scanned,
"execution_time_seconds": self.execution_time_seconds,
}
@property
def total_issues(self) -> int:
return (
self.corrupted_objects
+ self.orphaned_objects
+ self.phantom_metadata
+ self.stale_versions
+ self.etag_cache_inconsistencies
+ self.legacy_metadata_drifts
)
@property
def has_issues(self) -> bool:
return self.total_issues > 0
@dataclass
class IntegrityExecutionRecord:
timestamp: float
result: dict
dry_run: bool
auto_heal: bool
def to_dict(self) -> dict:
return {
"timestamp": self.timestamp,
"result": self.result,
"dry_run": self.dry_run,
"auto_heal": self.auto_heal,
}
@classmethod
def from_dict(cls, data: dict) -> IntegrityExecutionRecord:
return cls(
timestamp=data["timestamp"],
result=data["result"],
dry_run=data.get("dry_run", False),
auto_heal=data.get("auto_heal", False),
)
class IntegrityHistoryStore:
def __init__(self, storage_root: Path, max_records: int = 50) -> None:
self.storage_root = storage_root
self.max_records = max_records
self._lock = threading.Lock()
def _get_path(self) -> Path:
return self.storage_root / ".myfsio.sys" / "config" / "integrity_history.json"
def load(self) -> List[IntegrityExecutionRecord]:
path = self._get_path()
if not path.exists():
return []
try:
with open(path, "r", encoding="utf-8") as f:
data = json.load(f)
return [IntegrityExecutionRecord.from_dict(d) for d in data.get("executions", [])]
except (OSError, ValueError, KeyError) as e:
logger.error("Failed to load integrity history: %s", e)
return []
def save(self, records: List[IntegrityExecutionRecord]) -> None:
path = self._get_path()
path.parent.mkdir(parents=True, exist_ok=True)
data = {"executions": [r.to_dict() for r in records[: self.max_records]]}
try:
with open(path, "w", encoding="utf-8") as f:
json.dump(data, f, indent=2)
except OSError as e:
logger.error("Failed to save integrity history: %s", e)
def add(self, record: IntegrityExecutionRecord) -> None:
with self._lock:
records = self.load()
records.insert(0, record)
self.save(records)
def get_history(self, limit: int = 50, offset: int = 0) -> List[IntegrityExecutionRecord]:
return self.load()[offset : offset + limit]
class IntegrityCursorStore:
def __init__(self, storage_root: Path) -> None:
self.storage_root = storage_root
self._lock = threading.Lock()
def _get_path(self) -> Path:
return self.storage_root / ".myfsio.sys" / "config" / "integrity_cursor.json"
def load(self) -> Dict[str, Any]:
path = self._get_path()
if not path.exists():
return {"buckets": {}}
try:
with open(path, "r", encoding="utf-8") as f:
data = json.load(f)
if not isinstance(data.get("buckets"), dict):
return {"buckets": {}}
return data
except (OSError, ValueError, KeyError):
return {"buckets": {}}
def save(self, data: Dict[str, Any]) -> None:
path = self._get_path()
path.parent.mkdir(parents=True, exist_ok=True)
try:
with open(path, "w", encoding="utf-8") as f:
json.dump(data, f, indent=2)
except OSError as e:
logger.error("Failed to save integrity cursor: %s", e)
def update_bucket(self, bucket_name: str, timestamp: float) -> None:
with self._lock:
data = self.load()
data["buckets"][bucket_name] = {"last_scanned": timestamp}
self.save(data)
def clean_stale(self, existing_buckets: List[str]) -> None:
with self._lock:
data = self.load()
existing_set = set(existing_buckets)
stale_keys = [k for k in data["buckets"] if k not in existing_set]
if stale_keys:
for k in stale_keys:
del data["buckets"][k]
self.save(data)
def get_bucket_order(self, bucket_names: List[str]) -> List[str]:
data = self.load()
buckets_info = data.get("buckets", {})
def sort_key(name: str) -> float:
entry = buckets_info.get(name)
if entry is None:
return 0.0
return entry.get("last_scanned", 0.0)
return sorted(bucket_names, key=sort_key)
def get_info(self) -> Dict[str, Any]:
data = self.load()
buckets = data.get("buckets", {})
return {
"tracked_buckets": len(buckets),
"buckets": {
name: info.get("last_scanned")
for name, info in buckets.items()
},
}
MAX_ISSUES = 500
class IntegrityChecker:
SYSTEM_ROOT = ".myfsio.sys"
SYSTEM_BUCKETS_DIR = "buckets"
BUCKET_META_DIR = "meta"
BUCKET_VERSIONS_DIR = "versions"
INTERNAL_FOLDERS = {".meta", ".versions", ".multipart"}
def __init__(
self,
storage_root: Path,
interval_hours: float = 24.0,
batch_size: int = 1000,
auto_heal: bool = False,
dry_run: bool = False,
max_history: int = 50,
io_throttle_ms: int = 10,
) -> None:
self.storage_root = Path(storage_root)
self.interval_seconds = interval_hours * 3600.0
self.batch_size = batch_size
self.auto_heal = auto_heal
self.dry_run = dry_run
self._timer: Optional[threading.Timer] = None
self._shutdown = False
self._lock = threading.Lock()
self._scanning = False
self._scan_start_time: Optional[float] = None
self._io_throttle = max(0, io_throttle_ms) / 1000.0
self.history_store = IntegrityHistoryStore(storage_root, max_records=max_history)
self.cursor_store = IntegrityCursorStore(self.storage_root)
def start(self) -> None:
if self._timer is not None:
return
self._shutdown = False
self._schedule_next()
logger.info(
"Integrity checker started: interval=%.1fh, batch_size=%d, auto_heal=%s, dry_run=%s",
self.interval_seconds / 3600.0,
self.batch_size,
self.auto_heal,
self.dry_run,
)
def stop(self) -> None:
self._shutdown = True
if self._timer:
self._timer.cancel()
self._timer = None
logger.info("Integrity checker stopped")
def _schedule_next(self) -> None:
if self._shutdown:
return
self._timer = threading.Timer(self.interval_seconds, self._run_cycle)
self._timer.daemon = True
self._timer.start()
def _run_cycle(self) -> None:
if self._shutdown:
return
try:
self.run_now()
except Exception as e:
logger.error("Integrity check cycle failed: %s", e)
finally:
self._schedule_next()
def run_now(self, auto_heal: Optional[bool] = None, dry_run: Optional[bool] = None) -> IntegrityResult:
if not self._lock.acquire(blocking=False):
raise RuntimeError("Integrity scan is already in progress")
try:
self._scanning = True
self._scan_start_time = time.time()
effective_auto_heal = auto_heal if auto_heal is not None else self.auto_heal
effective_dry_run = dry_run if dry_run is not None else self.dry_run
start = self._scan_start_time
result = IntegrityResult()
bucket_names = self._list_bucket_names()
self.cursor_store.clean_stale(bucket_names)
ordered_buckets = self.cursor_store.get_bucket_order(bucket_names)
for bucket_name in ordered_buckets:
if self._batch_exhausted(result):
break
result.buckets_scanned += 1
self._check_corrupted_objects(bucket_name, result, effective_auto_heal, effective_dry_run)
self._check_orphaned_objects(bucket_name, result, effective_auto_heal, effective_dry_run)
self._check_phantom_metadata(bucket_name, result, effective_auto_heal, effective_dry_run)
self._check_stale_versions(bucket_name, result, effective_auto_heal, effective_dry_run)
self._check_etag_cache(bucket_name, result, effective_auto_heal, effective_dry_run)
self._check_legacy_metadata(bucket_name, result, effective_auto_heal, effective_dry_run)
self.cursor_store.update_bucket(bucket_name, time.time())
result.execution_time_seconds = time.time() - start
if result.has_issues or result.errors:
logger.info(
"Integrity check completed in %.2fs: corrupted=%d, orphaned=%d, phantom=%d, "
"stale_versions=%d, etag_cache=%d, legacy_drift=%d, healed=%d, errors=%d%s",
result.execution_time_seconds,
result.corrupted_objects,
result.orphaned_objects,
result.phantom_metadata,
result.stale_versions,
result.etag_cache_inconsistencies,
result.legacy_metadata_drifts,
result.issues_healed,
len(result.errors),
" (dry run)" if effective_dry_run else "",
)
record = IntegrityExecutionRecord(
timestamp=time.time(),
result=result.to_dict(),
dry_run=effective_dry_run,
auto_heal=effective_auto_heal,
)
self.history_store.add(record)
return result
finally:
self._scanning = False
self._scan_start_time = None
self._lock.release()
def run_async(self, auto_heal: Optional[bool] = None, dry_run: Optional[bool] = None) -> bool:
if self._scanning:
return False
t = threading.Thread(target=self.run_now, args=(auto_heal, dry_run), daemon=True)
t.start()
return True
def _system_path(self) -> Path:
return self.storage_root / self.SYSTEM_ROOT
def _list_bucket_names(self) -> List[str]:
names = []
try:
for entry in self.storage_root.iterdir():
if entry.is_dir() and entry.name != self.SYSTEM_ROOT:
names.append(entry.name)
except OSError:
pass
return names
def _throttle(self) -> bool:
if self._shutdown:
return True
if self._io_throttle > 0:
time.sleep(self._io_throttle)
return self._shutdown
def _batch_exhausted(self, result: IntegrityResult) -> bool:
return self._shutdown or result.objects_scanned >= self.batch_size
def _add_issue(self, result: IntegrityResult, issue: IntegrityIssue) -> None:
if len(result.issues) < MAX_ISSUES:
result.issues.append(issue)
def _check_corrupted_objects(
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool
) -> None:
bucket_path = self.storage_root / bucket_name
meta_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
if not meta_root.exists():
return
try:
for index_file in meta_root.rglob("_index.json"):
if self._throttle():
return
if self._batch_exhausted(result):
return
if not index_file.is_file():
continue
try:
index_data = json.loads(index_file.read_text(encoding="utf-8"))
except (OSError, json.JSONDecodeError):
continue
for key_name, entry in list(index_data.items()):
if self._throttle():
return
if self._batch_exhausted(result):
return
rel_dir = index_file.parent.relative_to(meta_root)
if rel_dir == Path("."):
full_key = key_name
else:
full_key = rel_dir.as_posix() + "/" + key_name
object_path = bucket_path / full_key
if not object_path.exists():
continue
result.objects_scanned += 1
meta = entry.get("metadata", {}) if isinstance(entry, dict) else {}
stored_etag = meta.get("__etag__")
if not stored_etag:
continue
try:
actual_etag = _compute_etag(object_path)
except OSError:
continue
if actual_etag != stored_etag:
result.corrupted_objects += 1
issue = IntegrityIssue(
issue_type="corrupted_object",
bucket=bucket_name,
key=full_key,
detail=f"stored_etag={stored_etag} actual_etag={actual_etag}",
)
if auto_heal and not dry_run:
try:
stat = object_path.stat()
meta["__etag__"] = actual_etag
meta["__size__"] = str(stat.st_size)
meta["__last_modified__"] = str(stat.st_mtime)
index_data[key_name] = {"metadata": meta}
self._atomic_write_index(index_file, index_data)
issue.healed = True
issue.heal_action = "updated etag in index"
result.issues_healed += 1
except OSError as e:
result.errors.append(f"heal corrupted {bucket_name}/{full_key}: {e}")
self._add_issue(result, issue)
except OSError as e:
result.errors.append(f"check corrupted {bucket_name}: {e}")
def _check_orphaned_objects(
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool
) -> None:
bucket_path = self.storage_root / bucket_name
meta_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
try:
for entry in bucket_path.rglob("*"):
if self._throttle():
return
if self._batch_exhausted(result):
return
if not entry.is_file():
continue
try:
rel = entry.relative_to(bucket_path)
except ValueError:
continue
if rel.parts and rel.parts[0] in self.INTERNAL_FOLDERS:
continue
result.objects_scanned += 1
full_key = rel.as_posix()
key_name = rel.name
parent = rel.parent
if parent == Path("."):
index_path = meta_root / "_index.json"
else:
index_path = meta_root / parent / "_index.json"
has_entry = False
if index_path.exists():
try:
index_data = json.loads(index_path.read_text(encoding="utf-8"))
has_entry = key_name in index_data
except (OSError, json.JSONDecodeError):
pass
if not has_entry:
result.orphaned_objects += 1
issue = IntegrityIssue(
issue_type="orphaned_object",
bucket=bucket_name,
key=full_key,
detail="file exists without metadata entry",
)
if auto_heal and not dry_run:
try:
etag = _compute_etag(entry)
stat = entry.stat()
meta = {
"__etag__": etag,
"__size__": str(stat.st_size),
"__last_modified__": str(stat.st_mtime),
}
index_data = {}
if index_path.exists():
try:
index_data = json.loads(index_path.read_text(encoding="utf-8"))
except (OSError, json.JSONDecodeError):
pass
index_data[key_name] = {"metadata": meta}
self._atomic_write_index(index_path, index_data)
issue.healed = True
issue.heal_action = "created metadata entry"
result.issues_healed += 1
except OSError as e:
result.errors.append(f"heal orphaned {bucket_name}/{full_key}: {e}")
self._add_issue(result, issue)
except OSError as e:
result.errors.append(f"check orphaned {bucket_name}: {e}")
def _check_phantom_metadata(
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool
) -> None:
bucket_path = self.storage_root / bucket_name
meta_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
if not meta_root.exists():
return
try:
for index_file in meta_root.rglob("_index.json"):
if self._throttle():
return
if self._batch_exhausted(result):
return
if not index_file.is_file():
continue
try:
index_data = json.loads(index_file.read_text(encoding="utf-8"))
except (OSError, json.JSONDecodeError):
continue
keys_to_remove = []
for key_name in list(index_data.keys()):
if self._batch_exhausted(result):
break
result.objects_scanned += 1
rel_dir = index_file.parent.relative_to(meta_root)
if rel_dir == Path("."):
full_key = key_name
else:
full_key = rel_dir.as_posix() + "/" + key_name
object_path = bucket_path / full_key
if not object_path.exists():
result.phantom_metadata += 1
issue = IntegrityIssue(
issue_type="phantom_metadata",
bucket=bucket_name,
key=full_key,
detail="metadata entry without file on disk",
)
if auto_heal and not dry_run:
keys_to_remove.append(key_name)
issue.healed = True
issue.heal_action = "removed stale index entry"
result.issues_healed += 1
self._add_issue(result, issue)
if keys_to_remove and auto_heal and not dry_run:
try:
for k in keys_to_remove:
index_data.pop(k, None)
if index_data:
self._atomic_write_index(index_file, index_data)
else:
index_file.unlink(missing_ok=True)
except OSError as e:
result.errors.append(f"heal phantom {bucket_name}: {e}")
except OSError as e:
result.errors.append(f"check phantom {bucket_name}: {e}")
def _check_stale_versions(
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool
) -> None:
versions_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_VERSIONS_DIR
if not versions_root.exists():
return
try:
for key_dir in versions_root.rglob("*"):
if self._throttle():
return
if self._batch_exhausted(result):
return
if not key_dir.is_dir():
continue
bin_files = {f.stem: f for f in key_dir.glob("*.bin")}
json_files = {f.stem: f for f in key_dir.glob("*.json")}
for stem, bin_file in bin_files.items():
if self._batch_exhausted(result):
return
result.objects_scanned += 1
if stem not in json_files:
result.stale_versions += 1
issue = IntegrityIssue(
issue_type="stale_version",
bucket=bucket_name,
key=f"{key_dir.relative_to(versions_root).as_posix()}/{bin_file.name}",
detail="version data without manifest",
)
if auto_heal and not dry_run:
try:
bin_file.unlink(missing_ok=True)
issue.healed = True
issue.heal_action = "removed orphaned version data"
result.issues_healed += 1
except OSError as e:
result.errors.append(f"heal stale version {bin_file}: {e}")
self._add_issue(result, issue)
for stem, json_file in json_files.items():
if self._batch_exhausted(result):
return
result.objects_scanned += 1
if stem not in bin_files:
result.stale_versions += 1
issue = IntegrityIssue(
issue_type="stale_version",
bucket=bucket_name,
key=f"{key_dir.relative_to(versions_root).as_posix()}/{json_file.name}",
detail="version manifest without data",
)
if auto_heal and not dry_run:
try:
json_file.unlink(missing_ok=True)
issue.healed = True
issue.heal_action = "removed orphaned version manifest"
result.issues_healed += 1
except OSError as e:
result.errors.append(f"heal stale version {json_file}: {e}")
self._add_issue(result, issue)
except OSError as e:
result.errors.append(f"check stale versions {bucket_name}: {e}")
def _check_etag_cache(
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool
) -> None:
etag_index_path = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / "etag_index.json"
if not etag_index_path.exists():
return
meta_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
if not meta_root.exists():
return
try:
etag_cache = json.loads(etag_index_path.read_text(encoding="utf-8"))
except (OSError, json.JSONDecodeError):
return
found_mismatch = False
for full_key, cached_etag in etag_cache.items():
if self._batch_exhausted(result):
break
result.objects_scanned += 1
key_path = Path(full_key)
key_name = key_path.name
parent = key_path.parent
if parent == Path("."):
index_path = meta_root / "_index.json"
else:
index_path = meta_root / parent / "_index.json"
if not index_path.exists():
continue
try:
index_data = json.loads(index_path.read_text(encoding="utf-8"))
except (OSError, json.JSONDecodeError):
continue
entry = index_data.get(key_name)
if not entry:
continue
meta = entry.get("metadata", {}) if isinstance(entry, dict) else {}
stored_etag = meta.get("__etag__")
if stored_etag and cached_etag != stored_etag:
result.etag_cache_inconsistencies += 1
found_mismatch = True
issue = IntegrityIssue(
issue_type="etag_cache_inconsistency",
bucket=bucket_name,
key=full_key,
detail=f"cached_etag={cached_etag} index_etag={stored_etag}",
)
self._add_issue(result, issue)
if found_mismatch and auto_heal and not dry_run:
try:
etag_index_path.unlink(missing_ok=True)
for issue in result.issues:
if issue.issue_type == "etag_cache_inconsistency" and issue.bucket == bucket_name and not issue.healed:
issue.healed = True
issue.heal_action = "deleted etag_index.json"
result.issues_healed += 1
except OSError as e:
result.errors.append(f"heal etag cache {bucket_name}: {e}")
def _check_legacy_metadata(
self, bucket_name: str, result: IntegrityResult, auto_heal: bool, dry_run: bool
) -> None:
legacy_meta_root = self.storage_root / bucket_name / ".meta"
if not legacy_meta_root.exists():
return
meta_root = self._system_path() / self.SYSTEM_BUCKETS_DIR / bucket_name / self.BUCKET_META_DIR
try:
for meta_file in legacy_meta_root.rglob("*.meta.json"):
if self._throttle():
return
if self._batch_exhausted(result):
return
if not meta_file.is_file():
continue
result.objects_scanned += 1
try:
rel = meta_file.relative_to(legacy_meta_root)
except ValueError:
continue
full_key = rel.as_posix().removesuffix(".meta.json")
key_path = Path(full_key)
key_name = key_path.name
parent = key_path.parent
if parent == Path("."):
index_path = meta_root / "_index.json"
else:
index_path = meta_root / parent / "_index.json"
try:
legacy_data = json.loads(meta_file.read_text(encoding="utf-8"))
except (OSError, json.JSONDecodeError):
continue
index_entry = None
if index_path.exists():
try:
index_data = json.loads(index_path.read_text(encoding="utf-8"))
index_entry = index_data.get(key_name)
except (OSError, json.JSONDecodeError):
pass
if index_entry is None:
result.legacy_metadata_drifts += 1
issue = IntegrityIssue(
issue_type="legacy_metadata_drift",
bucket=bucket_name,
key=full_key,
detail="unmigrated legacy .meta.json",
)
if auto_heal and not dry_run:
try:
index_data = {}
if index_path.exists():
try:
index_data = json.loads(index_path.read_text(encoding="utf-8"))
except (OSError, json.JSONDecodeError):
pass
index_data[key_name] = {"metadata": legacy_data}
self._atomic_write_index(index_path, index_data)
meta_file.unlink(missing_ok=True)
issue.healed = True
issue.heal_action = "migrated to index and deleted legacy file"
result.issues_healed += 1
except OSError as e:
result.errors.append(f"heal legacy {bucket_name}/{full_key}: {e}")
self._add_issue(result, issue)
else:
index_meta = index_entry.get("metadata", {}) if isinstance(index_entry, dict) else {}
if legacy_data != index_meta:
result.legacy_metadata_drifts += 1
issue = IntegrityIssue(
issue_type="legacy_metadata_drift",
bucket=bucket_name,
key=full_key,
detail="legacy .meta.json differs from index entry",
)
if auto_heal and not dry_run:
try:
meta_file.unlink(missing_ok=True)
issue.healed = True
issue.heal_action = "deleted legacy file (index is authoritative)"
result.issues_healed += 1
except OSError as e:
result.errors.append(f"heal legacy drift {bucket_name}/{full_key}: {e}")
self._add_issue(result, issue)
except OSError as e:
result.errors.append(f"check legacy meta {bucket_name}: {e}")
@staticmethod
def _atomic_write_index(index_path: Path, data: Dict[str, Any]) -> None:
index_path.parent.mkdir(parents=True, exist_ok=True)
tmp_path = index_path.with_suffix(".tmp")
try:
with open(tmp_path, "w", encoding="utf-8") as f:
json.dump(data, f)
os.replace(str(tmp_path), str(index_path))
except BaseException:
try:
tmp_path.unlink(missing_ok=True)
except OSError:
pass
raise
def get_history(self, limit: int = 50, offset: int = 0) -> List[dict]:
records = self.history_store.get_history(limit, offset)
return [r.to_dict() for r in records]
def get_status(self) -> dict:
status: Dict[str, Any] = {
"enabled": not self._shutdown or self._timer is not None,
"running": self._timer is not None and not self._shutdown,
"scanning": self._scanning,
"interval_hours": self.interval_seconds / 3600.0,
"batch_size": self.batch_size,
"auto_heal": self.auto_heal,
"dry_run": self.dry_run,
"io_throttle_ms": round(self._io_throttle * 1000),
}
if self._scanning and self._scan_start_time is not None:
status["scan_elapsed_seconds"] = round(time.time() - self._scan_start_time, 1)
status["cursor"] = self.cursor_store.get_info()
return status

View File

@@ -301,7 +301,12 @@ def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
if _HAS_RUST:
query_params = list(req.args.items(multi=True))
header_values = [(h, req.headers.get(h) or "") for h in signed_headers_str.split(";")]
header_values = []
for h in signed_headers_str.split(";"):
val = req.headers.get(h) or ""
if h.lower() == "expect" and val == "":
val = "100-continue"
header_values.append((h, val))
if not _rc.verify_sigv4_signature(
req.method, canonical_uri, query_params, signed_headers_str,
header_values, payload_hash, amz_date, date_stamp, region,
@@ -390,7 +395,12 @@ def _verify_sigv4_query(req: Any) -> Principal | None:
if _HAS_RUST:
query_params = [(k, v) for k, v in req.args.items(multi=True) if k != "X-Amz-Signature"]
header_values = [(h, req.headers.get(h) or "") for h in signed_headers_str.split(";")]
header_values = []
for h in signed_headers_str.split(";"):
val = req.headers.get(h) or ""
if h.lower() == "expect" and val == "":
val = "100-continue"
header_values.append((h, val))
if not _rc.verify_sigv4_signature(
req.method, canonical_uri, query_params, signed_headers_str,
header_values, "UNSIGNED-PAYLOAD", amz_date, date_stamp, region,
@@ -488,7 +498,7 @@ def _authorize_action(principal: Principal | None, bucket_name: str | None, acti
iam_error: IamError | None = None
if principal is not None:
try:
_iam().authorize(principal, bucket_name, action)
_iam().authorize(principal, bucket_name, action, object_key=object_key)
iam_allowed = True
except IamError as exc:
iam_error = exc
@@ -1019,6 +1029,58 @@ def _method_not_allowed(allowed: list[str]) -> Response:
return response
def _check_conditional_headers(etag: str, last_modified: float | None) -> Response | None:
from email.utils import parsedate_to_datetime
if_match = request.headers.get("If-Match")
if if_match:
if if_match.strip() != "*":
match_etags = [e.strip().strip('"') for e in if_match.split(",")]
if etag not in match_etags:
return Response(status=412)
if_unmodified = request.headers.get("If-Unmodified-Since")
if not if_match and if_unmodified and last_modified is not None:
try:
dt = parsedate_to_datetime(if_unmodified)
obj_dt = datetime.fromtimestamp(last_modified, timezone.utc)
if obj_dt > dt:
return Response(status=412)
except (TypeError, ValueError):
pass
if_none_match = request.headers.get("If-None-Match")
if if_none_match:
if if_none_match.strip() == "*":
resp = Response(status=304)
resp.headers["ETag"] = f'"{etag}"'
if last_modified is not None:
resp.headers["Last-Modified"] = http_date(last_modified)
return resp
none_match_etags = [e.strip().strip('"') for e in if_none_match.split(",")]
if etag in none_match_etags:
resp = Response(status=304)
resp.headers["ETag"] = f'"{etag}"'
if last_modified is not None:
resp.headers["Last-Modified"] = http_date(last_modified)
return resp
if_modified = request.headers.get("If-Modified-Since")
if not if_none_match and if_modified and last_modified is not None:
try:
dt = parsedate_to_datetime(if_modified)
obj_dt = datetime.fromtimestamp(last_modified, timezone.utc)
if obj_dt <= dt:
resp = Response(status=304)
resp.headers["ETag"] = f'"{etag}"'
resp.headers["Last-Modified"] = http_date(last_modified)
return resp
except (TypeError, ValueError):
pass
return None
def _apply_object_headers(
response: Response,
*,
@@ -1083,7 +1145,7 @@ def _bucket_versioning_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "policy")
_authorize_action(principal, bucket_name, "versioning")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
storage = _storage()
@@ -1130,7 +1192,7 @@ def _bucket_tagging_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "policy")
_authorize_action(principal, bucket_name, "tagging")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
storage = _storage()
@@ -1295,7 +1357,7 @@ def _bucket_cors_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "policy")
_authorize_action(principal, bucket_name, "cors")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
storage = _storage()
@@ -1348,7 +1410,7 @@ def _bucket_encryption_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "policy")
_authorize_action(principal, bucket_name, "encryption")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
storage = _storage()
@@ -1423,7 +1485,7 @@ def _bucket_acl_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "policy")
_authorize_action(principal, bucket_name, "share")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
storage = _storage()
@@ -1671,7 +1733,7 @@ def _bucket_lifecycle_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "policy")
_authorize_action(principal, bucket_name, "lifecycle")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
@@ -1835,7 +1897,7 @@ def _bucket_quota_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "policy")
_authorize_action(principal, bucket_name, "quota")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
@@ -1912,7 +1974,7 @@ def _bucket_object_lock_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "policy")
_authorize_action(principal, bucket_name, "object_lock")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
@@ -1958,7 +2020,7 @@ def _bucket_notification_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "policy")
_authorize_action(principal, bucket_name, "notification")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
@@ -2054,7 +2116,7 @@ def _bucket_logging_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "policy")
_authorize_action(principal, bucket_name, "logging")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
@@ -2196,7 +2258,7 @@ def _object_retention_handler(bucket_name: str, object_key: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "write" if request.method == "PUT" else "read", object_key=object_key)
_authorize_action(principal, bucket_name, "object_lock", object_key=object_key)
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
@@ -2272,7 +2334,7 @@ def _object_legal_hold_handler(bucket_name: str, object_key: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "write" if request.method == "PUT" else "read", object_key=object_key)
_authorize_action(principal, bucket_name, "object_lock", object_key=object_key)
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
@@ -2605,7 +2667,7 @@ def bucket_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "write")
_authorize_action(principal, bucket_name, "create_bucket")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
try:
@@ -2622,7 +2684,7 @@ def bucket_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "delete")
_authorize_action(principal, bucket_name, "delete_bucket")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
try:
@@ -2898,6 +2960,23 @@ def object_handler(bucket_name: str, object_key: str):
is_encrypted = "x-amz-server-side-encryption" in metadata
cond_etag = metadata.get("__etag__")
if not cond_etag and not is_encrypted:
try:
cond_etag = storage._compute_etag(path)
except OSError:
cond_etag = None
if cond_etag:
cond_mtime = float(metadata["__last_modified__"]) if "__last_modified__" in metadata else None
if cond_mtime is None:
try:
cond_mtime = path.stat().st_mtime
except OSError:
pass
cond_resp = _check_conditional_headers(cond_etag, cond_mtime)
if cond_resp:
return cond_resp
if request.method == "GET":
range_header = request.headers.get("Range")
@@ -3160,7 +3239,7 @@ def _bucket_replication_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "policy")
_authorize_action(principal, bucket_name, "replication")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
storage = _storage()
@@ -3243,7 +3322,7 @@ def _bucket_website_handler(bucket_name: str) -> Response:
if error:
return error
try:
_authorize_action(principal, bucket_name, "policy")
_authorize_action(principal, bucket_name, "website")
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
storage = _storage()
@@ -3367,6 +3446,16 @@ def head_object(bucket_name: str, object_key: str) -> Response:
metadata = _storage().get_object_metadata(bucket_name, object_key)
etag = metadata.get("__etag__") or _storage()._compute_etag(path)
head_mtime = float(metadata["__last_modified__"]) if "__last_modified__" in metadata else None
if head_mtime is None:
try:
head_mtime = path.stat().st_mtime
except OSError:
pass
cond_resp = _check_conditional_headers(etag, head_mtime)
if cond_resp:
return cond_resp
cached_size = metadata.get("__size__")
cached_mtime = metadata.get("__last_modified__")
if cached_size is not None and cached_mtime is not None:

View File

@@ -406,6 +406,10 @@ class ObjectStorage:
self._stats_serial[bucket_id] = self._stats_serial.get(bucket_id, 0) + 1
self._stats_mem_time[bucket_id] = time.monotonic()
self._stats_dirty.add(bucket_id)
needs_immediate = data["objects"] == 0 and objects_delta < 0
if needs_immediate:
self._flush_stats()
else:
self._schedule_stats_flush()
def _schedule_stats_flush(self) -> None:
@@ -710,6 +714,73 @@ class ObjectStorage:
next_continuation_token=next_token,
)
def iter_objects_shallow(
self,
bucket_name: str,
*,
prefix: str = "",
delimiter: str = "/",
) -> Generator[tuple[str, ObjectMeta | str], None, None]:
bucket_path = self._bucket_path(bucket_name)
if not bucket_path.exists():
raise BucketNotFoundError("Bucket does not exist")
bucket_id = bucket_path.name
target_dir = bucket_path
if prefix:
safe_prefix_path = Path(prefix.rstrip("/"))
if ".." in safe_prefix_path.parts:
return
target_dir = bucket_path / safe_prefix_path
try:
resolved = target_dir.resolve()
bucket_resolved = bucket_path.resolve()
if not str(resolved).startswith(str(bucket_resolved) + os.sep) and resolved != bucket_resolved:
return
except (OSError, ValueError):
return
if not target_dir.exists() or not target_dir.is_dir():
return
etag_index_path = self._system_bucket_root(bucket_id) / "etag_index.json"
meta_cache: Dict[str, str] = {}
if etag_index_path.exists():
try:
with open(etag_index_path, 'r', encoding='utf-8') as f:
meta_cache = json.load(f)
except (OSError, json.JSONDecodeError):
pass
try:
with os.scandir(str(target_dir)) as it:
for entry in it:
name = entry.name
if name in self.INTERNAL_FOLDERS:
continue
if entry.is_dir(follow_symlinks=False):
yield ("folder", prefix + name + delimiter)
elif entry.is_file(follow_symlinks=False):
key = prefix + name
try:
st = entry.stat()
etag = meta_cache.get(key)
if etag is None:
safe_key = PurePosixPath(key)
meta = self._read_metadata(bucket_id, Path(safe_key))
etag = meta.get("__etag__") if meta else None
yield ("object", ObjectMeta(
key=key,
size=st.st_size,
last_modified=datetime.fromtimestamp(st.st_mtime, timezone.utc),
etag=etag,
metadata=None,
))
except OSError:
pass
except OSError:
return
def _shallow_via_full_scan(
self,
bucket_name: str,

282
app/ui.py
View File

@@ -618,20 +618,77 @@ def stream_bucket_objects(bucket_name: str):
prefix = request.args.get("prefix") or None
delimiter = request.args.get("delimiter") or None
storage = _storage()
try:
client = get_session_s3_client()
except (PermissionError, RuntimeError) as exc:
return jsonify({"error": str(exc)}), 403
versioning_enabled = get_versioning_via_s3(client, bucket_name)
versioning_enabled = storage.is_versioning_enabled(bucket_name)
except StorageError:
versioning_enabled = False
url_templates = build_url_templates(bucket_name)
display_tz = current_app.config.get("DISPLAY_TIMEZONE", "UTC")
def generate():
yield json.dumps({
"type": "meta",
"versioning_enabled": versioning_enabled,
"url_templates": url_templates,
}) + "\n"
yield json.dumps({"type": "count", "total_count": 0}) + "\n"
running_count = 0
try:
if delimiter:
for item_type, item in storage.iter_objects_shallow(
bucket_name, prefix=prefix or "", delimiter=delimiter,
):
if item_type == "folder":
yield json.dumps({"type": "folder", "prefix": item}) + "\n"
else:
last_mod = item.last_modified
yield json.dumps({
"type": "object",
"key": item.key,
"size": item.size,
"last_modified": last_mod.isoformat(),
"last_modified_display": _format_datetime_display(last_mod, display_tz),
"last_modified_iso": _format_datetime_iso(last_mod, display_tz),
"etag": item.etag or "",
}) + "\n"
running_count += 1
if running_count % 1000 == 0:
yield json.dumps({"type": "count", "total_count": running_count}) + "\n"
else:
continuation_token = None
while True:
result = storage.list_objects(
bucket_name,
max_keys=1000,
continuation_token=continuation_token,
prefix=prefix,
)
for obj in result.objects:
last_mod = obj.last_modified
yield json.dumps({
"type": "object",
"key": obj.key,
"size": obj.size,
"last_modified": last_mod.isoformat(),
"last_modified_display": _format_datetime_display(last_mod, display_tz),
"last_modified_iso": _format_datetime_iso(last_mod, display_tz),
"etag": obj.etag or "",
}) + "\n"
running_count += len(result.objects)
yield json.dumps({"type": "count", "total_count": running_count}) + "\n"
if not result.is_truncated:
break
continuation_token = result.next_continuation_token
except StorageError as exc:
yield json.dumps({"type": "error", "error": str(exc)}) + "\n"
return
yield json.dumps({"type": "count", "total_count": running_count}) + "\n"
yield json.dumps({"type": "done"}) + "\n"
return Response(
stream_objects_ndjson(
client, bucket_name, prefix, url_templates, display_tz, versioning_enabled,
delimiter=delimiter,
),
generate(),
mimetype='application/x-ndjson',
headers={
'Cache-Control': 'no-cache',
@@ -1006,6 +1063,27 @@ def bulk_delete_objects(bucket_name: str):
return _respond(False, f"A maximum of {MAX_KEYS} objects can be deleted per request", status_code=400)
unique_keys = list(dict.fromkeys(cleaned))
folder_prefixes = [k for k in unique_keys if k.endswith("/")]
if folder_prefixes:
try:
client = get_session_s3_client()
for prefix in folder_prefixes:
unique_keys.remove(prefix)
paginator = client.get_paginator("list_objects_v2")
for page in paginator.paginate(Bucket=bucket_name, Prefix=prefix):
for obj in page.get("Contents", []):
if obj["Key"] not in unique_keys:
unique_keys.append(obj["Key"])
except (ClientError, EndpointConnectionError, ConnectionClosedError) as exc:
if isinstance(exc, ClientError):
err, status = handle_client_error(exc)
return _respond(False, err["error"], status_code=status)
return _respond(False, "S3 API server is unreachable", status_code=502)
if not unique_keys:
return _respond(False, "No objects found under the selected folders", status_code=400)
try:
_authorize_ui(principal, bucket_name, "delete")
except IamError as exc:
@@ -1036,13 +1114,17 @@ def bulk_delete_objects(bucket_name: str):
else:
try:
client = get_session_s3_client()
objects_to_delete = [{"Key": k} for k in unique_keys]
deleted = []
errors = []
for i in range(0, len(unique_keys), 1000):
batch = unique_keys[i:i + 1000]
objects_to_delete = [{"Key": k} for k in batch]
resp = client.delete_objects(
Bucket=bucket_name,
Delete={"Objects": objects_to_delete, "Quiet": False},
)
deleted = [d["Key"] for d in resp.get("Deleted", [])]
errors = [{"key": e["Key"], "error": e.get("Message", e.get("Code", "Unknown error"))} for e in resp.get("Errors", [])]
deleted.extend(d["Key"] for d in resp.get("Deleted", []))
errors.extend({"key": e["Key"], "error": e.get("Message", e.get("Code", "Unknown error"))} for e in resp.get("Errors", []))
for key in deleted:
_replication_manager().trigger_replication(bucket_name, key, action="delete")
except (ClientError, EndpointConnectionError, ConnectionClosedError) as exc:
@@ -4041,6 +4123,182 @@ def get_peer_sync_stats(site_id: str):
return jsonify(stats)
@ui_bp.get("/system")
def system_dashboard():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:*")
except IamError:
flash("Access denied: System page requires admin permissions", "danger")
return redirect(url_for("ui.buckets_overview"))
import platform as _platform
import sys
from app.version import APP_VERSION
try:
import myfsio_core as _rc
has_rust = True
except ImportError:
has_rust = False
gc = current_app.extensions.get("gc")
gc_status = gc.get_status() if gc else {"enabled": False}
gc_history_records = []
if gc:
raw = gc.get_history(limit=10, offset=0)
for rec in raw:
r = rec.get("result", {})
total_freed = r.get("temp_bytes_freed", 0) + r.get("multipart_bytes_freed", 0) + r.get("orphaned_version_bytes_freed", 0)
rec["bytes_freed_display"] = _format_bytes(total_freed)
rec["timestamp_display"] = _format_datetime_display(datetime.fromtimestamp(rec["timestamp"], tz=dt_timezone.utc))
gc_history_records.append(rec)
checker = current_app.extensions.get("integrity")
integrity_status = checker.get_status() if checker else {"enabled": False}
integrity_history_records = []
if checker:
raw = checker.get_history(limit=10, offset=0)
for rec in raw:
rec["timestamp_display"] = _format_datetime_display(datetime.fromtimestamp(rec["timestamp"], tz=dt_timezone.utc))
integrity_history_records.append(rec)
features = [
{"label": "Encryption (SSE-S3)", "enabled": current_app.config.get("ENCRYPTION_ENABLED", False)},
{"label": "KMS", "enabled": current_app.config.get("KMS_ENABLED", False)},
{"label": "Versioning Lifecycle", "enabled": current_app.config.get("LIFECYCLE_ENABLED", False)},
{"label": "Metrics History", "enabled": current_app.config.get("METRICS_HISTORY_ENABLED", False)},
{"label": "Operation Metrics", "enabled": current_app.config.get("OPERATION_METRICS_ENABLED", False)},
{"label": "Site Sync", "enabled": current_app.config.get("SITE_SYNC_ENABLED", False)},
{"label": "Website Hosting", "enabled": current_app.config.get("WEBSITE_HOSTING_ENABLED", False)},
{"label": "Garbage Collection", "enabled": current_app.config.get("GC_ENABLED", False)},
{"label": "Integrity Scanner", "enabled": current_app.config.get("INTEGRITY_ENABLED", False)},
]
return render_template(
"system.html",
principal=principal,
app_version=APP_VERSION,
storage_root=current_app.config.get("STORAGE_ROOT", "./data"),
platform=_platform.platform(),
python_version=sys.version.split()[0],
has_rust=has_rust,
features=features,
gc_status=gc_status,
gc_history=gc_history_records,
integrity_status=integrity_status,
integrity_history=integrity_history_records,
display_timezone=current_app.config.get("DISPLAY_TIMEZONE", "UTC"),
)
@ui_bp.post("/system/gc/run")
def system_gc_run():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:*")
except IamError:
return jsonify({"error": "Access denied"}), 403
gc = current_app.extensions.get("gc")
if not gc:
return jsonify({"error": "GC is not enabled"}), 400
payload = request.get_json(silent=True) or {}
started = gc.run_async(dry_run=payload.get("dry_run"))
if not started:
return jsonify({"error": "GC is already in progress"}), 409
return jsonify({"status": "started"})
@ui_bp.get("/system/gc/status")
def system_gc_status():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:*")
except IamError:
return jsonify({"error": "Access denied"}), 403
gc = current_app.extensions.get("gc")
if not gc:
return jsonify({"error": "GC is not enabled"}), 400
return jsonify(gc.get_status())
@ui_bp.get("/system/gc/history")
def system_gc_history():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:*")
except IamError:
return jsonify({"error": "Access denied"}), 403
gc = current_app.extensions.get("gc")
if not gc:
return jsonify({"executions": []})
limit = min(int(request.args.get("limit", 10)), 200)
offset = int(request.args.get("offset", 0))
records = gc.get_history(limit=limit, offset=offset)
return jsonify({"executions": records})
@ui_bp.post("/system/integrity/run")
def system_integrity_run():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:*")
except IamError:
return jsonify({"error": "Access denied"}), 403
checker = current_app.extensions.get("integrity")
if not checker:
return jsonify({"error": "Integrity checker is not enabled"}), 400
payload = request.get_json(silent=True) or {}
started = checker.run_async(
auto_heal=payload.get("auto_heal"),
dry_run=payload.get("dry_run"),
)
if not started:
return jsonify({"error": "A scan is already in progress"}), 409
return jsonify({"status": "started"})
@ui_bp.get("/system/integrity/status")
def system_integrity_status():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:*")
except IamError:
return jsonify({"error": "Access denied"}), 403
checker = current_app.extensions.get("integrity")
if not checker:
return jsonify({"error": "Integrity checker is not enabled"}), 400
return jsonify(checker.get_status())
@ui_bp.get("/system/integrity/history")
def system_integrity_history():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:*")
except IamError:
return jsonify({"error": "Access denied"}), 403
checker = current_app.extensions.get("integrity")
if not checker:
return jsonify({"executions": []})
limit = min(int(request.args.get("limit", 10)), 200)
offset = int(request.args.get("offset", 0))
records = checker.get_history(limit=limit, offset=offset)
return jsonify({"executions": records})
@ui_bp.app_errorhandler(404)
def ui_not_found(error): # type: ignore[override]
prefix = ui_bp.url_prefix or ""

View File

@@ -1,6 +1,6 @@
from __future__ import annotations
APP_VERSION = "0.3.7"
APP_VERSION = "0.4.1"
def get_version() -> str:

196
docs.md
View File

@@ -180,9 +180,9 @@ All configuration is done via environment variables. The table below lists every
| Variable | Default | Notes |
| --- | --- | --- |
| `SERVER_THREADS` | `0` (auto) | Waitress worker threads (1-64). Set to `0` for auto-calculation based on CPU cores (×2). |
| `SERVER_CONNECTION_LIMIT` | `0` (auto) | Maximum concurrent connections (10-1000). Set to `0` for auto-calculation based on available RAM. |
| `SERVER_BACKLOG` | `0` (auto) | TCP listen backlog (64-4096). Set to `0` for auto-calculation (connection_limit × 2). |
| `SERVER_THREADS` | `0` (auto) | Granian blocking threads (1-64). Set to `0` for auto-calculation based on CPU cores (×2). |
| `SERVER_CONNECTION_LIMIT` | `0` (auto) | Maximum concurrent requests per worker (10-1000). Set to `0` for auto-calculation based on available RAM. |
| `SERVER_BACKLOG` | `0` (auto) | TCP listen backlog (128-4096). Set to `0` for auto-calculation (connection_limit × 2). |
| `SERVER_CHANNEL_TIMEOUT` | `120` | Seconds before idle connections are closed (10-300). |
### Logging
@@ -252,6 +252,60 @@ Once enabled, configure lifecycle rules via:
</LifecycleConfiguration>
```
## Garbage Collection
The garbage collector (GC) automatically cleans up orphaned data that accumulates over time: stale temporary files from failed uploads, abandoned multipart uploads, stale lock files, orphaned metadata entries, orphaned version files, and empty directories.
### Enabling GC
By default, GC is disabled. Enable it by setting:
```bash
GC_ENABLED=true python run.py
```
Or in your `myfsio.env` file:
```
GC_ENABLED=true
GC_INTERVAL_HOURS=6 # Run every 6 hours (default)
GC_TEMP_FILE_MAX_AGE_HOURS=24 # Delete temp files older than 24h
GC_MULTIPART_MAX_AGE_DAYS=7 # Delete orphaned multipart uploads older than 7 days
GC_LOCK_FILE_MAX_AGE_HOURS=1 # Delete stale lock files older than 1h
GC_DRY_RUN=false # Set to true to log without deleting
```
### What Gets Cleaned
| Type | Location | Condition |
|------|----------|-----------|
| **Temp files** | `.myfsio.sys/tmp/` | Older than `GC_TEMP_FILE_MAX_AGE_HOURS` |
| **Orphaned multipart uploads** | `.myfsio.sys/multipart/` and `<bucket>/.multipart/` | Older than `GC_MULTIPART_MAX_AGE_DAYS` |
| **Stale lock files** | `.myfsio.sys/buckets/<bucket>/locks/` | Older than `GC_LOCK_FILE_MAX_AGE_HOURS` |
| **Orphaned metadata** | `.myfsio.sys/buckets/<bucket>/meta/` and `<bucket>/.meta/` | Object file no longer exists |
| **Orphaned versions** | `.myfsio.sys/buckets/<bucket>/versions/` and `<bucket>/.versions/` | Main object no longer exists |
| **Empty directories** | Various internal directories | Directory is empty after cleanup |
### Admin API
All GC endpoints require admin (`iam:*`) permissions.
| Method | Route | Description |
|--------|-------|-------------|
| `GET` | `/admin/gc/status` | Get GC status and configuration |
| `POST` | `/admin/gc/run` | Trigger a manual GC run (body: `{"dry_run": true}` for preview) |
| `GET` | `/admin/gc/history` | Get GC execution history (query: `?limit=50&offset=0`) |
### Dry Run Mode
Set `GC_DRY_RUN=true` to log what would be deleted without actually removing anything. You can also trigger a one-time dry run via the admin API:
```bash
curl -X POST "http://localhost:5000/admin/gc/run" \
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
-H "Content-Type: application/json" \
-d '{"dry_run": true}'
```
### Performance Tuning
| Variable | Default | Notes |
@@ -285,7 +339,7 @@ Before deploying to production, ensure you:
4. **Enable HTTPS** - Use a reverse proxy (nginx, Cloudflare) with TLS termination
5. **Review rate limits** - Adjust `RATE_LIMIT_DEFAULT` based on your needs
6. **Secure master keys** - Back up `ENCRYPTION_MASTER_KEY_PATH` if using encryption
7. **Use `--prod` flag** - Runs with Waitress instead of Flask dev server
7. **Use `--prod` flag** - Runs with Granian instead of Flask dev server
8. **Set credential expiry** - Assign `expires_at` to non-admin users for time-limited access
### Proxy Configuration
@@ -302,6 +356,69 @@ The application automatically trusts these headers to generate correct presigned
| `ALLOWED_REDIRECT_HOSTS` | `""` | Comma-separated whitelist of safe redirect targets. Empty allows only same-host redirects. |
| `ALLOW_INTERNAL_ENDPOINTS` | `false` | Allow connections to internal/private IPs for webhooks and replication targets. **Keep disabled in production unless needed.** |
## Integrity Scanner
The integrity scanner detects and optionally auto-repairs data inconsistencies: corrupted objects (ETag mismatch), orphaned files without metadata, phantom metadata without files, stale version archives, ETag cache drift, and unmigrated legacy `.meta.json` files.
### Enabling Integrity Scanner
By default, the integrity scanner is disabled. Enable it by setting:
```bash
INTEGRITY_ENABLED=true python run.py
```
Or in your `myfsio.env` file:
```
INTEGRITY_ENABLED=true
INTEGRITY_INTERVAL_HOURS=24 # Run every 24 hours (default)
INTEGRITY_BATCH_SIZE=1000 # Max objects to scan per cycle
INTEGRITY_AUTO_HEAL=false # Automatically repair detected issues
INTEGRITY_DRY_RUN=false # Set to true to log without healing
```
### What Gets Checked
| Check | Detection | Heal Action |
|-------|-----------|-------------|
| **Corrupted objects** | File MD5 does not match stored `__etag__` | Update `__etag__` in index (disk data is authoritative) |
| **Orphaned objects** | File exists on disk without metadata entry | Create index entry with computed MD5/size/mtime |
| **Phantom metadata** | Index entry exists but file is missing from disk | Remove stale entry from `_index.json` |
| **Stale versions** | `.json` manifest without `.bin` data or vice versa | Remove orphaned version file |
| **ETag cache inconsistency** | `etag_index.json` entry differs from metadata `__etag__` | Delete `etag_index.json` (auto-rebuilt on next list) |
| **Legacy metadata drift** | Legacy `.meta.json` differs from index or is unmigrated | Migrate to index and delete legacy file |
### Admin API
All integrity endpoints require admin (`iam:*`) permissions.
| Method | Route | Description |
|--------|-------|-------------|
| `GET` | `/admin/integrity/status` | Get scanner status and configuration |
| `POST` | `/admin/integrity/run` | Trigger a manual scan (body: `{"dry_run": true, "auto_heal": true}`) |
| `GET` | `/admin/integrity/history` | Get scan history (query: `?limit=50&offset=0`) |
### Dry Run Mode
Set `INTEGRITY_DRY_RUN=true` to log detected issues without making any changes. You can also trigger a one-time dry run via the admin API:
```bash
curl -X POST "http://localhost:5000/admin/integrity/run" \
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
-H "Content-Type: application/json" \
-d '{"dry_run": true, "auto_heal": true}'
```
### Configuration Reference
| Variable | Default | Description |
|----------|---------|-------------|
| `INTEGRITY_ENABLED` | `false` | Enable background integrity scanning |
| `INTEGRITY_INTERVAL_HOURS` | `24` | Hours between scan cycles |
| `INTEGRITY_BATCH_SIZE` | `1000` | Max objects to scan per cycle |
| `INTEGRITY_AUTO_HEAL` | `false` | Automatically repair detected issues |
| `INTEGRITY_DRY_RUN` | `false` | Log issues without healing |
## 4. Upgrading and Updates
### Version Checking
@@ -641,7 +758,7 @@ MyFSIO implements a comprehensive Identity and Access Management (IAM) system th
- **Create user**: supply a display name, optional JSON inline policy array, and optional credential expiry date.
- **Set expiry**: assign an expiration date to any user's credentials. Expired credentials are rejected at authentication time. The UI shows expiry badges and preset durations (1h, 24h, 7d, 30d, 90d).
- **Rotate secret**: generates a new secret key; the UI surfaces it once.
- **Policy editor**: select a user, paste an array of objects (`{"bucket": "*", "actions": ["list", "read"]}`), and submit. Alias support includes AWS-style verbs (e.g., `s3:GetObject`).
- **Policy editor**: select a user, paste an array of objects (`{"bucket": "*", "actions": ["list", "read"]}`), and submit. An optional `"prefix"` field restricts object-level actions to a key prefix (e.g., `"uploads/"`). Alias support includes AWS-style verbs (e.g., `s3:GetObject`).
3. Wildcard action `iam:*` is supported for admin user definitions.
> **Breaking Change (v0.2.0+):** Previous versions used fixed default credentials (`localadmin/localadmin`). If upgrading from an older version, your existing credentials remain unchanged, but new installations will generate random credentials.
@@ -680,13 +797,23 @@ Both layers are evaluated for each request. A user must have permission in their
| --- | --- | --- |
| `list` | List buckets and objects | `s3:ListBucket`, `s3:ListAllMyBuckets`, `s3:ListBucketVersions`, `s3:ListMultipartUploads`, `s3:ListParts` |
| `read` | Download objects, get metadata | `s3:GetObject`, `s3:GetObjectVersion`, `s3:GetObjectTagging`, `s3:GetObjectVersionTagging`, `s3:GetObjectAcl`, `s3:GetBucketVersioning`, `s3:HeadObject`, `s3:HeadBucket` |
| `write` | Upload objects, create buckets, manage tags | `s3:PutObject`, `s3:CreateBucket`, `s3:PutObjectTagging`, `s3:PutBucketVersioning`, `s3:CreateMultipartUpload`, `s3:UploadPart`, `s3:CompleteMultipartUpload`, `s3:AbortMultipartUpload`, `s3:CopyObject` |
| `delete` | Remove objects, versions, and buckets | `s3:DeleteObject`, `s3:DeleteObjectVersion`, `s3:DeleteBucket`, `s3:DeleteObjectTagging` |
| `write` | Upload objects, manage object tags | `s3:PutObject`, `s3:PutObjectTagging`, `s3:CreateMultipartUpload`, `s3:UploadPart`, `s3:CompleteMultipartUpload`, `s3:AbortMultipartUpload`, `s3:CopyObject` |
| `delete` | Remove objects and versions | `s3:DeleteObject`, `s3:DeleteObjectVersion`, `s3:DeleteObjectTagging` |
| `create_bucket` | Create new buckets | `s3:CreateBucket` |
| `delete_bucket` | Delete buckets | `s3:DeleteBucket` |
| `share` | Manage Access Control Lists (ACLs) | `s3:PutObjectAcl`, `s3:PutBucketAcl`, `s3:GetBucketAcl` |
| `policy` | Manage bucket policies | `s3:PutBucketPolicy`, `s3:GetBucketPolicy`, `s3:DeleteBucketPolicy` |
| `versioning` | Manage bucket versioning configuration | `s3:GetBucketVersioning`, `s3:PutBucketVersioning` |
| `tagging` | Manage bucket-level tags | `s3:GetBucketTagging`, `s3:PutBucketTagging`, `s3:DeleteBucketTagging` |
| `encryption` | Manage bucket encryption configuration | `s3:GetEncryptionConfiguration`, `s3:PutEncryptionConfiguration`, `s3:DeleteEncryptionConfiguration` |
| `lifecycle` | Manage lifecycle rules | `s3:GetLifecycleConfiguration`, `s3:PutLifecycleConfiguration`, `s3:DeleteLifecycleConfiguration`, `s3:GetBucketLifecycle`, `s3:PutBucketLifecycle` |
| `cors` | Manage CORS configuration | `s3:GetBucketCors`, `s3:PutBucketCors`, `s3:DeleteBucketCors` |
| `replication` | Configure and manage replication | `s3:GetReplicationConfiguration`, `s3:PutReplicationConfiguration`, `s3:DeleteReplicationConfiguration`, `s3:ReplicateObject`, `s3:ReplicateTags`, `s3:ReplicateDelete` |
| `quota` | Manage bucket storage quotas | `s3:GetBucketQuota`, `s3:PutBucketQuota`, `s3:DeleteBucketQuota` |
| `object_lock` | Manage object lock, retention, and legal holds | `s3:GetObjectLockConfiguration`, `s3:PutObjectLockConfiguration`, `s3:PutObjectRetention`, `s3:GetObjectRetention`, `s3:PutObjectLegalHold`, `s3:GetObjectLegalHold` |
| `notification` | Manage bucket event notifications | `s3:GetBucketNotificationConfiguration`, `s3:PutBucketNotificationConfiguration`, `s3:DeleteBucketNotificationConfiguration` |
| `logging` | Manage bucket access logging | `s3:GetBucketLogging`, `s3:PutBucketLogging`, `s3:DeleteBucketLogging` |
| `website` | Manage static website hosting configuration | `s3:GetBucketWebsite`, `s3:PutBucketWebsite`, `s3:DeleteBucketWebsite` |
#### IAM Actions (User Management)
@@ -697,25 +824,31 @@ Both layers are evaluated for each request. A user must have permission in their
| `iam:delete_user` | Delete IAM users | `iam:DeleteUser` |
| `iam:rotate_key` | Rotate user secret keys | `iam:RotateAccessKey` |
| `iam:update_policy` | Modify user policies | `iam:PutUserPolicy` |
| `iam:create_key` | Create additional access keys for a user | `iam:CreateAccessKey` |
| `iam:delete_key` | Delete an access key from a user | `iam:DeleteAccessKey` |
| `iam:get_user` | View user details and access keys | `iam:GetUser` |
| `iam:get_policy` | View user policy configuration | `iam:GetPolicy` |
| `iam:disable_user` | Temporarily disable/enable a user account | `iam:DisableUser` |
| `iam:*` | **Admin wildcard** grants all IAM actions | — |
#### Wildcards
| Wildcard | Scope | Description |
| --- | --- | --- |
| `*` (in actions) | All S3 actions | Grants `list`, `read`, `write`, `delete`, `share`, `policy`, `lifecycle`, `cors`, `replication` |
| `*` (in actions) | All S3 actions | Grants all 19 S3 actions including `list`, `read`, `write`, `delete`, `create_bucket`, `delete_bucket`, `share`, `policy`, `versioning`, `tagging`, `encryption`, `lifecycle`, `cors`, `replication`, `quota`, `object_lock`, `notification`, `logging`, `website` |
| `iam:*` | All IAM actions | Grants all `iam:*` actions for user management |
| `*` (in bucket) | All buckets | Policy applies to every bucket |
### IAM Policy Structure
User policies are stored as a JSON array of policy objects. Each object specifies a bucket and the allowed actions:
User policies are stored as a JSON array of policy objects. Each object specifies a bucket, the allowed actions, and an optional prefix for object-level scoping:
```json
[
{
"bucket": "<bucket-name-or-wildcard>",
"actions": ["<action1>", "<action2>", ...]
"actions": ["<action1>", "<action2>", ...],
"prefix": "<optional-key-prefix>"
}
]
```
@@ -723,12 +856,13 @@ User policies are stored as a JSON array of policy objects. Each object specifie
**Fields:**
- `bucket`: The bucket name (case-insensitive) or `*` for all buckets
- `actions`: Array of action strings (simple names or AWS aliases)
- `prefix`: *(optional)* Restrict object-level actions to keys starting with this prefix. Defaults to `*` (all objects). Example: `"uploads/"` restricts to keys under `uploads/`
### Example User Policies
**Full Administrator (complete system access):**
```json
[{"bucket": "*", "actions": ["list", "read", "write", "delete", "share", "policy", "lifecycle", "cors", "replication", "iam:*"]}]
[{"bucket": "*", "actions": ["list", "read", "write", "delete", "share", "policy", "create_bucket", "delete_bucket", "versioning", "tagging", "encryption", "lifecycle", "cors", "replication", "quota", "object_lock", "notification", "logging", "website", "iam:*"]}]
```
**Read-Only User (browse and download only):**
@@ -741,6 +875,11 @@ User policies are stored as a JSON array of policy objects. Each object specifie
[{"bucket": "user-bucket", "actions": ["list", "read", "write", "delete"]}]
```
**Operator (data operations + bucket management, no config):**
```json
[{"bucket": "*", "actions": ["list", "read", "write", "delete", "create_bucket", "delete_bucket"]}]
```
**Multiple Bucket Access (different permissions per bucket):**
```json
[
@@ -750,9 +889,14 @@ User policies are stored as a JSON array of policy objects. Each object specifie
]
```
**Prefix-Scoped Access (restrict to a folder inside a shared bucket):**
```json
[{"bucket": "shared-data", "actions": ["list", "read", "write", "delete"], "prefix": "team-a/"}]
```
**IAM Manager (manage users but no data access):**
```json
[{"bucket": "*", "actions": ["iam:list_users", "iam:create_user", "iam:delete_user", "iam:rotate_key", "iam:update_policy"]}]
[{"bucket": "*", "actions": ["iam:list_users", "iam:create_user", "iam:delete_user", "iam:rotate_key", "iam:update_policy", "iam:create_key", "iam:delete_key", "iam:get_user", "iam:get_policy", "iam:disable_user"]}]
```
**Replication Operator (manage replication only):**
@@ -772,10 +916,10 @@ User policies are stored as a JSON array of policy objects. Each object specifie
**Bucket Administrator (full bucket config, no IAM access):**
```json
[{"bucket": "my-bucket", "actions": ["list", "read", "write", "delete", "policy", "lifecycle", "cors"]}]
[{"bucket": "my-bucket", "actions": ["list", "read", "write", "delete", "create_bucket", "delete_bucket", "share", "policy", "versioning", "tagging", "encryption", "lifecycle", "cors", "replication", "quota", "object_lock", "notification", "logging", "website"]}]
```
**Upload-Only User (write but cannot read back):**
**Upload-Only User (write but cannot create/delete buckets):**
```json
[{"bucket": "drop-box", "actions": ["write"]}]
```
@@ -850,6 +994,30 @@ curl -X POST http://localhost:5000/iam/users/<access-key>/expiry \
# Delete a user (requires iam:delete_user)
curl -X DELETE http://localhost:5000/iam/users/<access-key> \
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
# Get user details (requires iam:get_user) — via Admin API
curl http://localhost:5000/admin/iam/users/<user-id-or-access-key> \
-H "Authorization: AWS4-HMAC-SHA256 ..."
# Get user policies (requires iam:get_policy) — via Admin API
curl http://localhost:5000/admin/iam/users/<user-id-or-access-key>/policies \
-H "Authorization: AWS4-HMAC-SHA256 ..."
# Create additional access key for a user (requires iam:create_key)
curl -X POST http://localhost:5000/admin/iam/users/<user-id-or-access-key>/keys \
-H "Authorization: AWS4-HMAC-SHA256 ..."
# Delete an access key (requires iam:delete_key)
curl -X DELETE http://localhost:5000/admin/iam/users/<user-id>/keys/<access-key> \
-H "Authorization: AWS4-HMAC-SHA256 ..."
# Disable a user account (requires iam:disable_user)
curl -X POST http://localhost:5000/admin/iam/users/<user-id-or-access-key>/disable \
-H "Authorization: AWS4-HMAC-SHA256 ..."
# Re-enable a user account (requires iam:disable_user)
curl -X POST http://localhost:5000/admin/iam/users/<user-id-or-access-key>/enable \
-H "Authorization: AWS4-HMAC-SHA256 ..."
```
### Permission Precedence

View File

@@ -6,7 +6,7 @@ python-dotenv>=1.2.1
pytest>=9.0.2
requests>=2.32.5
boto3>=1.42.14
waitress>=3.0.2
granian>=2.2.0
psutil>=7.1.3
cryptography>=46.0.3
defusedxml>=0.7.1

109
run.py
View File

@@ -2,7 +2,9 @@
from __future__ import annotations
import argparse
import atexit
import os
import signal
import sys
import warnings
import multiprocessing
@@ -40,24 +42,42 @@ def _is_frozen() -> bool:
return getattr(sys, 'frozen', False) or '__compiled__' in globals()
def serve_api(port: int, prod: bool = False, config: Optional[AppConfig] = None) -> None:
app = create_api_app()
if prod:
from waitress import serve
def _serve_granian(target: str, port: int, config: Optional[AppConfig] = None) -> None:
from granian import Granian
from granian.constants import Interfaces
from granian.http import HTTP1Settings
kwargs: dict = {
"target": target,
"address": _server_host(),
"port": port,
"interface": Interfaces.WSGI,
"factory": True,
"workers": 1,
}
if config:
serve(
app,
host=_server_host(),
port=port,
ident="MyFSIO",
threads=config.server_threads,
connection_limit=config.server_connection_limit,
backlog=config.server_backlog,
channel_timeout=config.server_channel_timeout,
kwargs["blocking_threads"] = config.server_threads
kwargs["backlog"] = config.server_backlog
kwargs["backpressure"] = config.server_connection_limit
kwargs["http1_settings"] = HTTP1Settings(
header_read_timeout=config.server_channel_timeout * 1000,
max_buffer_size=config.server_max_buffer_size,
)
else:
serve(app, host=_server_host(), port=port, ident="MyFSIO")
kwargs["http1_settings"] = HTTP1Settings(
max_buffer_size=1024 * 1024 * 128,
)
server = Granian(**kwargs)
server.serve()
def serve_api(port: int, prod: bool = False, config: Optional[AppConfig] = None) -> None:
if prod:
_serve_granian("app:create_api_app", port, config)
else:
app = create_api_app()
debug = _is_debug_enabled()
if debug:
warnings.warn("DEBUG MODE ENABLED - DO NOT USE IN PRODUCTION", RuntimeWarning)
@@ -65,23 +85,10 @@ def serve_api(port: int, prod: bool = False, config: Optional[AppConfig] = None)
def serve_ui(port: int, prod: bool = False, config: Optional[AppConfig] = None) -> None:
app = create_ui_app()
if prod:
from waitress import serve
if config:
serve(
app,
host=_server_host(),
port=port,
ident="MyFSIO",
threads=config.server_threads,
connection_limit=config.server_connection_limit,
backlog=config.server_backlog,
channel_timeout=config.server_channel_timeout,
)
else:
serve(app, host=_server_host(), port=port, ident="MyFSIO")
_serve_granian("app:create_ui_app", port, config)
else:
app = create_ui_app()
debug = _is_debug_enabled()
if debug:
warnings.warn("DEBUG MODE ENABLED - DO NOT USE IN PRODUCTION", RuntimeWarning)
@@ -126,6 +133,7 @@ def reset_credentials() -> None:
pass
if raw_config and raw_config.get("users"):
is_v2 = raw_config.get("version", 1) >= 2
admin_user = None
for user in raw_config["users"]:
policies = user.get("policies", [])
@@ -139,15 +147,39 @@ def reset_credentials() -> None:
if not admin_user:
admin_user = raw_config["users"][0]
if is_v2:
admin_keys = admin_user.get("access_keys", [])
if admin_keys:
admin_keys[0]["access_key"] = access_key
admin_keys[0]["secret_key"] = secret_key
else:
from datetime import datetime as _dt, timezone as _tz
admin_user["access_keys"] = [{
"access_key": access_key,
"secret_key": secret_key,
"status": "active",
"created_at": _dt.now(_tz.utc).isoformat(),
}]
else:
admin_user["access_key"] = access_key
admin_user["secret_key"] = secret_key
else:
from datetime import datetime as _dt, timezone as _tz
raw_config = {
"version": 2,
"users": [
{
"user_id": f"u-{secrets.token_hex(8)}",
"display_name": "Local Admin",
"enabled": True,
"access_keys": [
{
"access_key": access_key,
"secret_key": secret_key,
"display_name": "Local Admin",
"status": "active",
"created_at": _dt.now(_tz.utc).isoformat(),
}
],
"policies": [
{"bucket": "*", "actions": list(ALLOWED_ACTIONS)}
],
@@ -192,7 +224,7 @@ if __name__ == "__main__":
parser.add_argument("--mode", choices=["api", "ui", "both", "reset-cred"], default="both")
parser.add_argument("--api-port", type=int, default=5000)
parser.add_argument("--ui-port", type=int, default=5100)
parser.add_argument("--prod", action="store_true", help="Run in production mode using Waitress")
parser.add_argument("--prod", action="store_true", help="Run in production mode using Granian")
parser.add_argument("--dev", action="store_true", help="Force development mode (Flask dev server)")
parser.add_argument("--check-config", action="store_true", help="Validate configuration and exit")
parser.add_argument("--show-config", action="store_true", help="Show configuration summary and exit")
@@ -235,7 +267,7 @@ if __name__ == "__main__":
pass
if prod_mode:
print("Running in production mode (Waitress)")
print("Running in production mode (Granian)")
issues = config.validate_and_report()
critical_issues = [i for i in issues if i.startswith("CRITICAL:")]
if critical_issues:
@@ -248,11 +280,22 @@ if __name__ == "__main__":
if args.mode in {"api", "both"}:
print(f"Starting API server on port {args.api_port}...")
api_proc = Process(target=serve_api, args=(args.api_port, prod_mode, config), daemon=True)
api_proc = Process(target=serve_api, args=(args.api_port, prod_mode, config))
api_proc.start()
else:
api_proc = None
def _cleanup_api():
if api_proc and api_proc.is_alive():
api_proc.terminate()
api_proc.join(timeout=5)
if api_proc.is_alive():
api_proc.kill()
if api_proc:
atexit.register(_cleanup_api)
signal.signal(signal.SIGTERM, lambda *_: sys.exit(0))
if args.mode in {"ui", "both"}:
print(f"Starting UI server on port {args.ui_port}...")
serve_ui(args.ui_port, prod_mode, config)

View File

@@ -379,29 +379,25 @@ if [[ "$SKIP_SYSTEMD" != true ]]; then
echo " ---------------"
if systemctl is-active --quiet myfsio; then
echo " [OK] MyFSIO is running"
IAM_FILE="$DATA_DIR/.myfsio.sys/config/iam.json"
if [[ -f "$IAM_FILE" ]]; then
echo ""
echo " ============================================"
echo " ADMIN CREDENTIALS (save these securely!)"
echo " ============================================"
if command -v jq &>/dev/null; then
ACCESS_KEY=$(jq -r '.users[0].access_key' "$IAM_FILE" 2>/dev/null)
SECRET_KEY=$(jq -r '.users[0].secret_key' "$IAM_FILE" 2>/dev/null)
else
ACCESS_KEY=$(grep -o '"access_key"[[:space:]]*:[[:space:]]*"[^"]*"' "$IAM_FILE" | head -1 | sed 's/.*"\([^"]*\)"$/\1/')
SECRET_KEY=$(grep -o '"secret_key"[[:space:]]*:[[:space:]]*"[^"]*"' "$IAM_FILE" | head -1 | sed 's/.*"\([^"]*\)"$/\1/')
fi
if [[ -n "$ACCESS_KEY" && -n "$SECRET_KEY" ]]; then
CRED_OUTPUT=$(journalctl -u myfsio --no-pager -n 50 2>/dev/null | grep -A 5 "FIRST RUN - ADMIN CREDENTIALS")
ACCESS_KEY=$(echo "$CRED_OUTPUT" | grep "Access Key:" | head -1 | sed 's/.*Access Key: //' | awk '{print $1}')
SECRET_KEY=$(echo "$CRED_OUTPUT" | grep "Secret Key:" | head -1 | sed 's/.*Secret Key: //' | awk '{print $1}')
if [[ -n "$ACCESS_KEY" && "$ACCESS_KEY" != *"from"* && -n "$SECRET_KEY" && "$SECRET_KEY" != *"from"* ]]; then
echo " Access Key: $ACCESS_KEY"
echo " Secret Key: $SECRET_KEY"
else
echo " [!] Could not parse credentials from $IAM_FILE"
echo " Check the file manually or view service logs."
echo " [!] Could not extract credentials from service logs."
echo " Check startup output: journalctl -u myfsio --no-pager | grep -A 5 'ADMIN CREDENTIALS'"
echo " Or reset credentials: $INSTALL_DIR/myfsio reset-cred"
fi
echo " ============================================"
fi
echo ""
echo " NOTE: The IAM config file is encrypted at rest."
echo " Credentials are only shown on first run or after reset."
else
echo " [WARNING] MyFSIO may not have started correctly"
echo " Check logs with: journalctl -u myfsio -f"
@@ -427,12 +423,13 @@ echo " API: http://$(hostname -I 2>/dev/null | awk '{print $1}' || echo "local
echo " UI: http://$(hostname -I 2>/dev/null | awk '{print $1}' || echo "localhost"):$UI_PORT/ui"
echo ""
echo "Credentials:"
echo " Admin credentials were shown above (if service was started)."
echo " You can also find them in: $DATA_DIR/.myfsio.sys/config/iam.json"
echo " Admin credentials are shown on first service start (see above)."
echo " The IAM config is encrypted at rest and cannot be read directly."
echo " To reset credentials: $INSTALL_DIR/myfsio reset-cred"
echo ""
echo "Configuration Files:"
echo " Environment: $INSTALL_DIR/myfsio.env"
echo " IAM Users: $DATA_DIR/.myfsio.sys/config/iam.json"
echo " IAM Users: $DATA_DIR/.myfsio.sys/config/iam.json (encrypted)"
echo " Bucket Policies: $DATA_DIR/.myfsio.sys/config/bucket_policies.json"
echo " Secret Key: $DATA_DIR/.myfsio.sys/config/.secret (auto-generated)"
echo ""

View File

@@ -230,11 +230,14 @@ if [[ "$KEEP_DATA" == true ]]; then
echo ""
echo "Preserved files include:"
echo " - All buckets and objects"
echo " - IAM configuration: $DATA_DIR/.myfsio.sys/config/iam.json"
echo " - IAM configuration: $DATA_DIR/.myfsio.sys/config/iam.json (encrypted at rest)"
echo " - Bucket policies: $DATA_DIR/.myfsio.sys/config/bucket_policies.json"
echo " - Secret key: $DATA_DIR/.myfsio.sys/config/.secret"
echo " - Encryption keys: $DATA_DIR/.myfsio.sys/keys/ (if encryption was enabled)"
echo ""
echo "NOTE: The IAM config is encrypted and requires the SECRET_KEY to read."
echo " Keep the .secret file intact for reinstallation."
echo ""
echo "To reinstall MyFSIO with existing data:"
echo " ./install.sh --data-dir $DATA_DIR"
echo ""

View File

@@ -15,6 +15,12 @@
--myfsio-hover-bg: rgba(59, 130, 246, 0.12);
--myfsio-accent: #3b82f6;
--myfsio-accent-hover: #2563eb;
--myfsio-tag-key-bg: #e0e7ff;
--myfsio-tag-key-text: #3730a3;
--myfsio-tag-value-bg: #f0f1fa;
--myfsio-tag-value-text: #4338ca;
--myfsio-tag-border: #c7d2fe;
--myfsio-tag-delete-hover: #ef4444;
}
[data-theme='dark'] {
@@ -34,6 +40,12 @@
--myfsio-hover-bg: rgba(59, 130, 246, 0.2);
--myfsio-accent: #60a5fa;
--myfsio-accent-hover: #3b82f6;
--myfsio-tag-key-bg: #312e81;
--myfsio-tag-key-text: #c7d2fe;
--myfsio-tag-value-bg: #1e1b4b;
--myfsio-tag-value-text: #a5b4fc;
--myfsio-tag-border: #4338ca;
--myfsio-tag-delete-hover: #f87171;
}
[data-theme='dark'] body,
@@ -2643,7 +2655,7 @@ pre code {
}
.objects-table-container {
max-height: none;
max-height: 60vh;
}
.preview-card {
@@ -3002,6 +3014,89 @@ body:has(.login-card) .main-wrapper {
padding: 0.375rem 1rem;
}
.tag-pill {
display: inline-flex;
border-radius: 9999px;
border: 1px solid var(--myfsio-tag-border);
overflow: hidden;
font-size: 0.75rem;
line-height: 1;
}
.tag-pill-key {
padding: 0.3rem 0.5rem;
background: var(--myfsio-tag-key-bg);
color: var(--myfsio-tag-key-text);
font-weight: 600;
}
.tag-pill-value {
padding: 0.3rem 0.5rem;
background: var(--myfsio-tag-value-bg);
color: var(--myfsio-tag-value-text);
font-weight: 400;
}
.tag-editor-card {
background: var(--myfsio-preview-bg);
border-radius: 0.5rem;
padding: 0.75rem;
}
.tag-editor-header,
.tag-editor-row {
display: grid;
grid-template-columns: 1fr 1fr 28px;
gap: 0.5rem;
align-items: center;
}
.tag-editor-header {
padding-bottom: 0.375rem;
border-bottom: 1px solid var(--myfsio-card-border);
margin-bottom: 0.5rem;
}
.tag-editor-header span {
font-size: 0.7rem;
font-weight: 600;
text-transform: uppercase;
color: var(--myfsio-muted);
letter-spacing: 0.05em;
}
.tag-editor-row {
margin-bottom: 0.375rem;
}
.tag-editor-delete {
display: inline-flex;
align-items: center;
justify-content: center;
width: 28px;
height: 28px;
border: none;
background: transparent;
color: var(--myfsio-muted);
border-radius: 0.375rem;
cursor: pointer;
transition: color 0.15s, background 0.15s;
}
.tag-editor-delete:hover {
color: var(--myfsio-tag-delete-hover);
background: rgba(239, 68, 68, 0.1);
}
.tag-editor-actions {
display: flex;
align-items: center;
gap: 0.5rem;
margin-top: 0.75rem;
padding-top: 0.5rem;
border-top: 1px solid var(--myfsio-card-border);
}
@media (prefers-reduced-motion: reduce) {
*,
*::before,

View File

@@ -98,6 +98,9 @@
const previewMetadata = document.getElementById('preview-metadata');
const previewMetadataList = document.getElementById('preview-metadata-list');
const previewPlaceholder = document.getElementById('preview-placeholder');
const previewPlaceholderDefault = previewPlaceholder ? previewPlaceholder.innerHTML : '';
const previewErrorAlert = document.getElementById('preview-error-alert');
const previewDetailsMeta = document.getElementById('preview-details-meta');
const previewImage = document.getElementById('preview-image');
const previewVideo = document.getElementById('preview-video');
const previewAudio = document.getElementById('preview-audio');
@@ -702,7 +705,7 @@
flushPendingStreamObjects();
hasMoreObjects = false;
totalObjectCount = loadedObjectCount;
if (!currentPrefix) bucketTotalObjects = totalObjectCount;
if (!currentPrefix && !useDelimiterMode) bucketTotalObjects = totalObjectCount;
updateObjectCountBadge();
if (objectsLoadingRow && objectsLoadingRow.parentNode) {
@@ -767,7 +770,7 @@
}
totalObjectCount = data.total_count || 0;
if (!append && !currentPrefix) bucketTotalObjects = totalObjectCount;
if (!append && !currentPrefix && !useDelimiterMode) bucketTotalObjects = totalObjectCount;
nextContinuationToken = data.next_continuation_token;
if (!append && objectsLoadingRow) {
@@ -849,6 +852,11 @@
selectCheckbox.checked = true;
row.classList.add('table-active');
}
if (activeRow && activeRow.dataset.key === row.dataset.key) {
row.classList.add('table-active');
activeRow = row;
}
});
const folderRows = document.querySelectorAll('.folder-row');
@@ -861,6 +869,11 @@
const checkbox = row.querySelector('[data-folder-select]');
checkbox?.addEventListener('change', (e) => {
e.stopPropagation();
if (checkbox.checked) {
selectedRows.set(folderPath, { key: folderPath, isFolder: true });
} else {
selectedRows.delete(folderPath);
}
const folderObjects = allObjects.filter(obj => obj.key.startsWith(folderPath));
folderObjects.forEach(obj => {
if (checkbox.checked) {
@@ -1345,8 +1358,11 @@
}
if (selectAllCheckbox) {
const filesInView = visibleItems.filter(item => item.type === 'file');
const total = filesInView.length;
const visibleSelectedCount = filesInView.filter(item => selectedRows.has(item.data.key)).length;
const foldersInView = visibleItems.filter(item => item.type === 'folder');
const total = filesInView.length + foldersInView.length;
const fileSelectedCount = filesInView.filter(item => selectedRows.has(item.data.key)).length;
const folderSelectedCount = foldersInView.filter(item => selectedRows.has(item.path)).length;
const visibleSelectedCount = fileSelectedCount + folderSelectedCount;
selectAllCheckbox.disabled = total === 0;
selectAllCheckbox.checked = visibleSelectedCount > 0 && visibleSelectedCount === total && total > 0;
selectAllCheckbox.indeterminate = visibleSelectedCount > 0 && visibleSelectedCount < total;
@@ -1368,8 +1384,12 @@
const keys = Array.from(selectedRows.keys());
bulkDeleteList.innerHTML = '';
if (bulkDeleteCount) {
const label = keys.length === 1 ? 'object' : 'objects';
bulkDeleteCount.textContent = `${keys.length} ${label} selected`;
const folderCount = keys.filter(k => k.endsWith('/')).length;
const objectCount = keys.length - folderCount;
const parts = [];
if (folderCount) parts.push(`${folderCount} folder${folderCount !== 1 ? 's' : ''}`);
if (objectCount) parts.push(`${objectCount} object${objectCount !== 1 ? 's' : ''}`);
bulkDeleteCount.textContent = `${parts.join(' and ')} selected`;
}
if (!keys.length) {
const empty = document.createElement('li');
@@ -1508,7 +1528,7 @@
};
const response = await fetch(endpoint, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
headers: { 'Content-Type': 'application/json', 'X-CSRFToken': window.getCsrfToken ? window.getCsrfToken() : '' },
body: JSON.stringify(payload),
});
const data = await response.json();
@@ -1952,6 +1972,10 @@
[previewImage, previewVideo, previewAudio, previewIframe].forEach((el) => {
if (!el) return;
el.classList.add('d-none');
if (el.tagName === 'IMG') {
el.removeAttribute('src');
el.onload = null;
}
if (el.tagName === 'VIDEO' || el.tagName === 'AUDIO') {
el.pause();
el.removeAttribute('src');
@@ -1964,9 +1988,38 @@
previewText.classList.add('d-none');
previewText.textContent = '';
}
previewPlaceholder.innerHTML = previewPlaceholderDefault;
previewPlaceholder.classList.remove('d-none');
};
let previewFailed = false;
const handlePreviewError = () => {
previewFailed = true;
if (downloadButton) {
downloadButton.classList.add('disabled');
downloadButton.removeAttribute('href');
}
if (presignButton) presignButton.disabled = true;
if (generatePresignButton) generatePresignButton.disabled = true;
if (previewDetailsMeta) previewDetailsMeta.classList.add('d-none');
if (previewMetadata) previewMetadata.classList.add('d-none');
const tagsPanel = document.getElementById('preview-tags');
if (tagsPanel) tagsPanel.classList.add('d-none');
const versionPanel = document.getElementById('version-panel');
if (versionPanel) versionPanel.classList.add('d-none');
if (previewErrorAlert) {
previewErrorAlert.textContent = 'Unable to load object \u2014 it may have been deleted, or the server returned an error.';
previewErrorAlert.classList.remove('d-none');
}
};
const clearPreviewError = () => {
previewFailed = false;
if (previewErrorAlert) previewErrorAlert.classList.add('d-none');
if (previewDetailsMeta) previewDetailsMeta.classList.remove('d-none');
};
async function fetchMetadata(metadataUrl) {
if (!metadataUrl) return null;
try {
@@ -1988,6 +2041,7 @@
previewPanel.classList.remove('d-none');
activeRow = row;
renderMetadata(null);
clearPreviewError();
previewKey.textContent = row.dataset.key;
previewSize.textContent = formatBytes(Number(row.dataset.size));
@@ -2011,18 +2065,71 @@
const previewUrl = row.dataset.previewUrl;
const lower = row.dataset.key.toLowerCase();
if (previewUrl && lower.match(/\.(png|jpg|jpeg|gif|webp|svg|ico|bmp)$/)) {
previewImage.src = previewUrl;
previewPlaceholder.innerHTML = '<div class="spinner-border spinner-border-sm text-secondary" role="status"></div><div class="small mt-2">Loading preview\u2026</div>';
const currentRow = row;
fetch(previewUrl)
.then((r) => {
if (activeRow !== currentRow) return;
if (!r.ok) {
previewPlaceholder.innerHTML = '<div class="small text-muted">Failed to load preview</div>';
handlePreviewError();
return;
}
return r.blob();
})
.then((blob) => {
if (!blob || activeRow !== currentRow) return;
const url = URL.createObjectURL(blob);
previewImage.onload = () => {
if (activeRow !== currentRow) { URL.revokeObjectURL(url); return; }
previewImage.classList.remove('d-none');
previewPlaceholder.classList.add('d-none');
};
previewImage.onerror = () => {
if (activeRow !== currentRow) { URL.revokeObjectURL(url); return; }
URL.revokeObjectURL(url);
previewPlaceholder.innerHTML = '<div class="small text-muted">Failed to load preview</div>';
};
previewImage.src = url;
})
.catch(() => {
if (activeRow !== currentRow) return;
previewPlaceholder.innerHTML = '<div class="small text-muted">Failed to load preview</div>';
handlePreviewError();
});
} else if (previewUrl && lower.match(/\.(mp4|webm|ogv|mov|avi|mkv)$/)) {
const currentRow = row;
previewVideo.onerror = () => {
if (activeRow !== currentRow) return;
previewVideo.classList.add('d-none');
previewPlaceholder.classList.remove('d-none');
previewPlaceholder.innerHTML = '<div class="small text-muted">Failed to load preview</div>';
handlePreviewError();
};
previewVideo.src = previewUrl;
previewVideo.classList.remove('d-none');
previewPlaceholder.classList.add('d-none');
} else if (previewUrl && lower.match(/\.(mp3|wav|flac|ogg|aac|m4a|wma)$/)) {
const currentRow = row;
previewAudio.onerror = () => {
if (activeRow !== currentRow) return;
previewAudio.classList.add('d-none');
previewPlaceholder.classList.remove('d-none');
previewPlaceholder.innerHTML = '<div class="small text-muted">Failed to load preview</div>';
handlePreviewError();
};
previewAudio.src = previewUrl;
previewAudio.classList.remove('d-none');
previewPlaceholder.classList.add('d-none');
} else if (previewUrl && lower.match(/\.(pdf)$/)) {
const currentRow = row;
previewIframe.onerror = () => {
if (activeRow !== currentRow) return;
previewIframe.classList.add('d-none');
previewPlaceholder.classList.remove('d-none');
previewPlaceholder.innerHTML = '<div class="small text-muted">Failed to load preview</div>';
handlePreviewError();
};
previewIframe.src = previewUrl;
previewIframe.style.minHeight = '500px';
previewIframe.classList.remove('d-none');
@@ -2047,14 +2154,17 @@
})
.catch(() => {
if (activeRow !== currentRow) return;
previewText.textContent = 'Failed to load preview';
previewText.classList.add('d-none');
previewPlaceholder.classList.remove('d-none');
previewPlaceholder.innerHTML = '<div class="small text-muted">Failed to load preview</div>';
handlePreviewError();
});
}
const metadataUrl = row.dataset.metadataUrl;
if (metadataUrl) {
const metadata = await fetchMetadata(metadataUrl);
if (activeRow === row) {
if (activeRow === row && !previewFailed) {
renderMetadata(metadata);
}
}
@@ -3152,6 +3262,15 @@
}
});
const foldersInView = visibleItems.filter(item => item.type === 'folder');
foldersInView.forEach(item => {
if (shouldSelect) {
selectedRows.set(item.path, { key: item.path, isFolder: true });
} else {
selectedRows.delete(item.path);
}
});
document.querySelectorAll('[data-folder-select]').forEach(cb => {
cb.checked = shouldSelect;
});
@@ -3948,9 +4067,14 @@
const cancelTagsButton = document.getElementById('cancelTagsButton');
let currentObjectTags = [];
let isEditingTags = false;
let savedObjectTags = [];
const loadObjectTags = async (row) => {
if (!row || !previewTagsPanel) return;
if (previewFailed) {
previewTagsPanel.classList.add('d-none');
return;
}
const tagsUrl = row.dataset.tagsUrl;
if (!tagsUrl) {
previewTagsPanel.classList.add('d-none');
@@ -3976,17 +4100,26 @@
previewTagsEmpty.classList.remove('d-none');
} else {
previewTagsEmpty.classList.add('d-none');
previewTagsList.innerHTML = currentObjectTags.map(t => `<span class="badge bg-info-subtle text-info">${escapeHtml(t.Key)}=${escapeHtml(t.Value)}</span>`).join('');
previewTagsList.innerHTML = currentObjectTags.map(t => `<span class="tag-pill"><span class="tag-pill-key">${escapeHtml(t.Key)}</span><span class="tag-pill-value">${escapeHtml(t.Value)}</span></span>`).join('');
}
};
const syncTagInputs = () => {
previewTagsInputs?.querySelectorAll('.tag-editor-row').forEach((row, idx) => {
if (idx < currentObjectTags.length) {
currentObjectTags[idx].Key = row.querySelector(`[data-tag-key="${idx}"]`)?.value || '';
currentObjectTags[idx].Value = row.querySelector(`[data-tag-value="${idx}"]`)?.value || '';
}
});
};
const renderTagEditor = () => {
if (!previewTagsInputs) return;
previewTagsInputs.innerHTML = currentObjectTags.map((t, idx) => `
<div class="input-group input-group-sm mb-1">
<input type="text" class="form-control" placeholder="Key" value="${escapeHtml(t.Key)}" data-tag-key="${idx}">
<input type="text" class="form-control" placeholder="Value" value="${escapeHtml(t.Value)}" data-tag-value="${idx}">
<button class="btn btn-outline-danger" type="button" onclick="removeTagRow(${idx})">
<div class="tag-editor-row">
<input type="text" class="form-control form-control-sm" placeholder="e.g. Environment" value="${escapeHtml(t.Key)}" data-tag-key="${idx}">
<input type="text" class="form-control form-control-sm" placeholder="e.g. Production" value="${escapeHtml(t.Value)}" data-tag-value="${idx}">
<button class="tag-editor-delete" type="button" onclick="removeTagRow(${idx})">
<svg xmlns="http://www.w3.org/2000/svg" width="12" height="12" fill="currentColor" viewBox="0 0 16 16"><path d="M4.646 4.646a.5.5 0 0 1 .708 0L8 7.293l2.646-2.647a.5.5 0 0 1 .708.708L8.707 8l2.647 2.646a.5.5 0 0 1-.708.708L8 8.707l-2.646 2.647a.5.5 0 0 1-.708-.708L7.293 8 4.646 5.354a.5.5 0 0 1 0-.708z"/></svg>
</button>
</div>
@@ -3994,20 +4127,29 @@
};
window.removeTagRow = (idx) => {
syncTagInputs();
currentObjectTags.splice(idx, 1);
renderTagEditor();
};
editTagsButton?.addEventListener('click', () => {
savedObjectTags = currentObjectTags.map(t => ({ Key: t.Key, Value: t.Value }));
isEditingTags = true;
previewTagsList.classList.add('d-none');
previewTagsEmpty.classList.add('d-none');
previewTagsEditor?.classList.remove('d-none');
const card = previewTagsEditor?.querySelector('.tag-editor-card');
if (card) {
card.style.opacity = '0';
card.style.transition = 'opacity 0.2s ease';
requestAnimationFrame(() => { card.style.opacity = '1'; });
}
renderTagEditor();
});
cancelTagsButton?.addEventListener('click', () => {
isEditingTags = false;
currentObjectTags = savedObjectTags.map(t => ({ Key: t.Key, Value: t.Value }));
previewTagsEditor?.classList.add('d-none');
previewTagsList.classList.remove('d-none');
renderObjectTags();
@@ -4018,6 +4160,7 @@
showMessage({ title: 'Limit reached', body: 'Maximum 10 tags allowed per object.', variant: 'warning' });
return;
}
syncTagInputs();
currentObjectTags.push({ Key: '', Value: '' });
renderTagEditor();
});
@@ -4026,7 +4169,7 @@
if (!activeRow) return;
const tagsUrl = activeRow.dataset.tagsUrl;
if (!tagsUrl) return;
const inputs = previewTagsInputs?.querySelectorAll('.input-group');
const inputs = previewTagsInputs?.querySelectorAll('.tag-editor-row');
const newTags = [];
inputs?.forEach((group, idx) => {
const key = group.querySelector(`[data-tag-key="${idx}"]`)?.value?.trim() || '';

View File

@@ -3,6 +3,8 @@ window.BucketDetailUpload = (function() {
const MULTIPART_THRESHOLD = 8 * 1024 * 1024;
const CHUNK_SIZE = 8 * 1024 * 1024;
const MAX_PART_RETRIES = 3;
const RETRY_BASE_DELAY_MS = 1000;
let state = {
isUploading: false,
@@ -204,6 +206,67 @@ window.BucketDetailUpload = (function() {
}
}
function uploadPartXHR(url, chunk, csrfToken, baseBytes, fileSize, progressItem, partNumber, totalParts) {
return new Promise((resolve, reject) => {
const xhr = new XMLHttpRequest();
xhr.open('PUT', url, true);
xhr.setRequestHeader('X-CSRFToken', csrfToken || '');
xhr.upload.addEventListener('progress', (e) => {
if (e.lengthComputable) {
updateProgressItem(progressItem, {
status: `Part ${partNumber}/${totalParts}`,
loaded: baseBytes + e.loaded,
total: fileSize
});
}
});
xhr.addEventListener('load', () => {
if (xhr.status >= 200 && xhr.status < 300) {
try {
resolve(JSON.parse(xhr.responseText));
} catch {
reject(new Error(`Part ${partNumber}: invalid response`));
}
} else {
try {
const data = JSON.parse(xhr.responseText);
reject(new Error(data.error || `Part ${partNumber} failed (${xhr.status})`));
} catch {
reject(new Error(`Part ${partNumber} failed (${xhr.status})`));
}
}
});
xhr.addEventListener('error', () => reject(new Error(`Part ${partNumber}: network error`)));
xhr.addEventListener('abort', () => reject(new Error(`Part ${partNumber}: aborted`)));
xhr.send(chunk);
});
}
async function uploadPartWithRetry(url, chunk, csrfToken, baseBytes, fileSize, progressItem, partNumber, totalParts) {
let lastError;
for (let attempt = 0; attempt <= MAX_PART_RETRIES; attempt++) {
try {
return await uploadPartXHR(url, chunk, csrfToken, baseBytes, fileSize, progressItem, partNumber, totalParts);
} catch (err) {
lastError = err;
if (attempt < MAX_PART_RETRIES) {
const delay = RETRY_BASE_DELAY_MS * Math.pow(2, attempt);
updateProgressItem(progressItem, {
status: `Part ${partNumber}/${totalParts} retry ${attempt + 1}/${MAX_PART_RETRIES}...`,
loaded: baseBytes,
total: fileSize
});
await new Promise(r => setTimeout(r, delay));
}
}
}
throw lastError;
}
async function uploadMultipart(file, objectKey, metadata, progressItem, urls) {
const csrfToken = document.querySelector('input[name="csrf_token"]')?.value;
@@ -233,26 +296,14 @@ window.BucketDetailUpload = (function() {
const end = Math.min(start + CHUNK_SIZE, file.size);
const chunk = file.slice(start, end);
updateProgressItem(progressItem, {
status: `Part ${partNumber}/${totalParts}`,
loaded: uploadedBytes,
total: file.size
});
const partData = await uploadPartWithRetry(
`${partUrl}?partNumber=${partNumber}`,
chunk, csrfToken, uploadedBytes, file.size,
progressItem, partNumber, totalParts
);
const partResp = await fetch(`${partUrl}?partNumber=${partNumber}`, {
method: 'PUT',
headers: { 'X-CSRFToken': csrfToken || '' },
body: chunk
});
if (!partResp.ok) {
const err = await partResp.json().catch(() => ({}));
throw new Error(err.error || `Part ${partNumber} failed`);
}
const partData = await partResp.json();
parts.push({ part_number: partNumber, etag: partData.etag });
uploadedBytes += chunk.size;
uploadedBytes += (end - start);
updateProgressItem(progressItem, {
loaded: uploadedBytes,

View File

@@ -17,12 +17,20 @@ window.IAMManagement = (function() {
var currentDeleteKey = null;
var currentExpiryKey = null;
var ALL_S3_ACTIONS = ['list', 'read', 'write', 'delete', 'share', 'policy', 'replication', 'lifecycle', 'cors'];
var ALL_S3_ACTIONS = [
'list', 'read', 'write', 'delete', 'share', 'policy',
'replication', 'lifecycle', 'cors',
'create_bucket', 'delete_bucket',
'versioning', 'tagging', 'encryption', 'quota',
'object_lock', 'notification', 'logging', 'website'
];
var policyTemplates = {
full: [{ bucket: '*', actions: ['list', 'read', 'write', 'delete', 'share', 'policy', 'replication', 'lifecycle', 'cors', 'iam:*'] }],
full: [{ bucket: '*', actions: ['list', 'read', 'write', 'delete', 'share', 'policy', 'create_bucket', 'delete_bucket', 'replication', 'lifecycle', 'cors', 'versioning', 'tagging', 'encryption', 'quota', 'object_lock', 'notification', 'logging', 'website', 'iam:*'] }],
readonly: [{ bucket: '*', actions: ['list', 'read'] }],
writer: [{ bucket: '*', actions: ['list', 'read', 'write'] }]
writer: [{ bucket: '*', actions: ['list', 'read', 'write'] }],
operator: [{ bucket: '*', actions: ['list', 'read', 'write', 'delete', 'create_bucket', 'delete_bucket'] }],
bucketadmin: [{ bucket: '*', actions: ['list', 'read', 'write', 'delete', 'share', 'policy', 'create_bucket', 'delete_bucket', 'versioning', 'tagging', 'encryption', 'cors', 'lifecycle', 'quota', 'object_lock', 'notification', 'logging', 'website', 'replication'] }]
};
function isAdminUser(policies) {

View File

@@ -110,6 +110,14 @@
<span>Domains</span>
</a>
{% endif %}
{% if can_manage_iam %}
<a href="{{ url_for('ui.system_dashboard') }}" class="sidebar-link {% if request.endpoint == 'ui.system_dashboard' %}active{% endif %}">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
<path d="M9.405 1.05c-.413-1.4-2.397-1.4-2.81 0l-.1.34a1.464 1.464 0 0 1-2.105.872l-.31-.17c-1.283-.698-2.686.705-1.987 1.987l.169.311c.446.82.023 1.841-.872 2.105l-.34.1c-1.4.413-1.4 2.397 0 2.81l.34.1a1.464 1.464 0 0 1 .872 2.105l-.17.31c-.698 1.283.705 2.686 1.987 1.987l.311-.169a1.464 1.464 0 0 1 2.105.872l.1.34c.413 1.4 2.397 1.4 2.81 0l.1-.34a1.464 1.464 0 0 1 2.105-.872l.31.17c1.283.698 2.686-.705 1.987-1.987l-.169-.311a1.464 1.464 0 0 1 .872-2.105l.34-.1c1.4-.413 1.4-2.397 0-2.81l-.34-.1a1.464 1.464 0 0 1-.872-2.105l.17-.31c.698-1.283-.705-2.686-1.987-1.987l-.311.169a1.464 1.464 0 0 1-2.105-.872l-.1-.34zM8 10.93a2.929 2.929 0 1 1 0-5.86 2.929 2.929 0 0 1 0 5.858z"/>
</svg>
<span>System</span>
</a>
{% endif %}
</div>
<div class="nav-section">
<span class="nav-section-title">Resources</span>
@@ -210,6 +218,14 @@
<span class="sidebar-link-text">Domains</span>
</a>
{% endif %}
{% if can_manage_iam %}
<a href="{{ url_for('ui.system_dashboard') }}" class="sidebar-link {% if request.endpoint == 'ui.system_dashboard' %}active{% endif %}" data-tooltip="System">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
<path d="M9.405 1.05c-.413-1.4-2.397-1.4-2.81 0l-.1.34a1.464 1.464 0 0 1-2.105.872l-.31-.17c-1.283-.698-2.686.705-1.987 1.987l.169.311c.446.82.023 1.841-.872 2.105l-.34.1c-1.4.413-1.4 2.397 0 2.81l.34.1a1.464 1.464 0 0 1 .872 2.105l-.17.31c-.698 1.283.705 2.686 1.987 1.987l.311-.169a1.464 1.464 0 0 1 2.105.872l.1.34c.413 1.4 2.397 1.4 2.81 0l.1-.34a1.464 1.464 0 0 1 2.105-.872l.31.17c1.283.698 2.686-.705 1.987-1.987l-.169-.311a1.464 1.464 0 0 1 .872-2.105l.34-.1c1.4-.413 1.4-2.397 0-2.81l-.34-.1a1.464 1.464 0 0 1-.872-2.105l.17-.31c.698-1.283-.705-2.686-1.987-1.987l-.311.169a1.464 1.464 0 0 1-2.105-.872l-.1-.34zM8 10.93a2.929 2.929 0 1 1 0-5.86 2.929 2.929 0 0 1 0 5.858z"/>
</svg>
<span class="sidebar-link-text">System</span>
</a>
{% endif %}
</div>
<div class="nav-section">
<span class="nav-section-title">Resources</span>

View File

@@ -257,7 +257,8 @@
Share Link
</button>
</div>
<div class="p-3 rounded mb-3" style="background: var(--myfsio-preview-bg);">
<div id="preview-error-alert" class="alert alert-warning d-none py-2 px-3 mb-3 small" role="alert"></div>
<div id="preview-details-meta" class="p-3 rounded mb-3" style="background: var(--myfsio-preview-bg);">
<dl class="row small mb-0">
<dt class="col-5 text-muted fw-normal">Last modified</dt>
<dd class="col-7 mb-2 fw-medium" id="preview-modified"></dd>
@@ -292,19 +293,28 @@
Edit
</button>
</div>
<div id="preview-tags-list" class="d-flex flex-wrap gap-1"></div>
<div id="preview-tags-list" class="d-flex flex-wrap gap-2"></div>
<div id="preview-tags-empty" class="text-muted small p-2 bg-body-tertiary rounded">No tags</div>
<div id="preview-tags-editor" class="d-none mt-2">
<div id="preview-tags-inputs" class="mb-2"></div>
<div class="d-flex gap-2">
<button class="btn btn-sm btn-outline-secondary flex-grow-1" type="button" id="addTagRow">
<div class="tag-editor-card">
<div class="tag-editor-header">
<span>Key</span>
<span>Value</span>
<span></span>
</div>
<div id="preview-tags-inputs"></div>
<div class="tag-editor-actions">
<button class="btn btn-sm btn-outline-secondary" type="button" id="addTagRow">
<svg xmlns="http://www.w3.org/2000/svg" width="12" height="12" fill="currentColor" class="me-1" viewBox="0 0 16 16">
<path d="M8 4a.5.5 0 0 1 .5.5v3h3a.5.5 0 0 1 0 1h-3v3a.5.5 0 0 1-1 0v-3h-3a.5.5 0 0 1 0-1h3v-3A.5.5 0 0 1 8 4z"/>
</svg>
Add Tag
</button>
<button class="btn btn-sm btn-primary" type="button" id="saveTagsButton">Save</button>
<div class="ms-auto d-flex gap-2">
<button class="btn btn-sm btn-outline-secondary" type="button" id="cancelTagsButton">Cancel</button>
<button class="btn btn-sm btn-primary" type="button" id="saveTagsButton">Save</button>
</div>
</div>
</div>
<div class="form-text mt-1">Maximum 10 tags. Keys and values up to 256 characters.</div>
</div>
@@ -2048,7 +2058,7 @@
<div class="col-12">
<label class="form-label fw-medium">Select files</label>
<input class="form-control" type="file" name="object" id="uploadFileInput" multiple required />
<div class="form-text">Select one or more files from your device. Files ≥ 8&nbsp;MB automatically switch to multipart uploads.</div>
<div class="form-text">Select one or more files from your device. Files ≥ 8&nbsp;MB use multipart uploads with automatic retry.</div>
</div>
<div class="col-12">
<div class="upload-dropzone text-center" data-dropzone>

View File

@@ -51,7 +51,7 @@
</div>
<div>
<h5 class="bucket-name text-break">{{ bucket.meta.name }}</h5>
<small class="text-muted">Created {{ bucket.meta.created_at | format_datetime }}</small>
<small class="text-muted">Created {{ bucket.meta.creation_date | format_datetime }}</small>
</div>
</div>
<span class="badge {{ bucket.access_badge }} bucket-access-badge">{{ bucket.access_label }}</span>

View File

@@ -40,6 +40,8 @@
<li><a href="#quotas">Bucket Quotas</a></li>
<li><a href="#encryption">Encryption</a></li>
<li><a href="#lifecycle">Lifecycle Rules</a></li>
<li><a href="#garbage-collection">Garbage Collection</a></li>
<li><a href="#integrity">Integrity Scanner</a></li>
<li><a href="#metrics">Metrics History</a></li>
<li><a href="#operation-metrics">Operation Metrics</a></li>
<li><a href="#troubleshooting">Troubleshooting</a></li>
@@ -82,7 +84,7 @@ pip install -r requirements.txt
# Run both API and UI (Development)
python run.py
# Run in Production (Waitress server)
# Run in Production (Granian server)
python run.py --prod
# Or run individually
@@ -218,7 +220,7 @@ python run.py --mode ui
<tr>
<td><code>SERVER_THREADS</code></td>
<td><code>0</code> (auto)</td>
<td>Waitress worker threads (1-64). 0 = auto (CPU cores × 2).</td>
<td>Granian blocking threads (1-64). 0 = auto (CPU cores × 2).</td>
</tr>
<tr>
<td><code>SERVER_CONNECTION_LIMIT</code></td>
@@ -1627,10 +1629,217 @@ curl "{{ api_base }}/&lt;bucket&gt;?lifecycle" \
</div>
</div>
</article>
<article id="metrics" class="card shadow-sm docs-section">
<article id="garbage-collection" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">14</span>
<h2 class="h4 mb-0">Garbage Collection</h2>
</div>
<p class="text-muted">Automatically clean up orphaned data that accumulates over time: stale temp files, abandoned multipart uploads, stale lock files, orphaned metadata, orphaned versions, and empty directories.</p>
<h3 class="h6 text-uppercase text-muted mt-4">Enabling GC</h3>
<p class="small text-muted">Disabled by default. Enable via environment variable:</p>
<pre class="mb-3"><code class="language-bash">GC_ENABLED=true python run.py</code></pre>
<h3 class="h6 text-uppercase text-muted mt-4">Configuration</h3>
<div class="table-responsive mb-3">
<table class="table table-sm table-bordered small">
<thead class="table-light">
<tr>
<th>Variable</th>
<th>Default</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr><td><code>GC_ENABLED</code></td><td><code>false</code></td><td>Enable garbage collection</td></tr>
<tr><td><code>GC_INTERVAL_HOURS</code></td><td><code>6</code></td><td>Hours between GC cycles</td></tr>
<tr><td><code>GC_TEMP_FILE_MAX_AGE_HOURS</code></td><td><code>24</code></td><td>Delete temp files older than this</td></tr>
<tr><td><code>GC_MULTIPART_MAX_AGE_DAYS</code></td><td><code>7</code></td><td>Delete orphaned multipart uploads older than this</td></tr>
<tr><td><code>GC_LOCK_FILE_MAX_AGE_HOURS</code></td><td><code>1</code></td><td>Delete stale lock files older than this</td></tr>
<tr><td><code>GC_DRY_RUN</code></td><td><code>false</code></td><td>Log what would be deleted without removing</td></tr>
</tbody>
</table>
</div>
<h3 class="h6 text-uppercase text-muted mt-4">What Gets Cleaned</h3>
<div class="table-responsive mb-3">
<table class="table table-sm table-bordered small">
<thead class="table-light">
<tr>
<th>Type</th>
<th>Location</th>
<th>Condition</th>
</tr>
</thead>
<tbody>
<tr><td><strong>Temp files</strong></td><td><code>.myfsio.sys/tmp/</code></td><td>Older than configured max age</td></tr>
<tr><td><strong>Orphaned multipart</strong></td><td><code>.myfsio.sys/multipart/</code></td><td>Older than configured max age</td></tr>
<tr><td><strong>Stale lock files</strong></td><td><code>.myfsio.sys/buckets/&lt;bucket&gt;/locks/</code></td><td>Older than configured max age</td></tr>
<tr><td><strong>Orphaned metadata</strong></td><td><code>.myfsio.sys/buckets/&lt;bucket&gt;/meta/</code></td><td>Object file no longer exists</td></tr>
<tr><td><strong>Orphaned versions</strong></td><td><code>.myfsio.sys/buckets/&lt;bucket&gt;/versions/</code></td><td>Main object no longer exists</td></tr>
<tr><td><strong>Empty directories</strong></td><td>Various internal dirs</td><td>Directory is empty after cleanup</td></tr>
</tbody>
</table>
</div>
<h3 class="h6 text-uppercase text-muted mt-4">Admin API</h3>
<div class="table-responsive mb-3">
<table class="table table-sm table-bordered small">
<thead class="table-light">
<tr>
<th>Method</th>
<th>Route</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr><td><code>GET</code></td><td><code>/admin/gc/status</code></td><td>Get GC status and configuration</td></tr>
<tr><td><code>POST</code></td><td><code>/admin/gc/run</code></td><td>Trigger manual GC run</td></tr>
<tr><td><code>GET</code></td><td><code>/admin/gc/history</code></td><td>Get execution history</td></tr>
</tbody>
</table>
</div>
<pre class="mb-3"><code class="language-bash"># Trigger a dry run (preview what would be cleaned)
curl -X POST "{{ api_base }}/admin/gc/run" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;" \
-H "Content-Type: application/json" \
-d '{"dry_run": true}'
# Trigger actual GC
curl -X POST "{{ api_base }}/admin/gc/run" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;"
# Check status
curl "{{ api_base }}/admin/gc/status" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;"
# View history
curl "{{ api_base }}/admin/gc/history?limit=10" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;"</code></pre>
<div class="alert alert-light border mb-0">
<div class="d-flex gap-2">
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-info-circle text-muted mt-1 flex-shrink-0" viewBox="0 0 16 16">
<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>
<path d="m8.93 6.588-2.29.287-.082.38.45.083c.294.07.352.176.288.469l-.738 3.468c-.194.897.105 1.319.808 1.319.545 0 1.178-.252 1.465-.598l.088-.416c-.2.176-.492.246-.686.246-.275 0-.375-.193-.304-.533L8.93 6.588zM9 4.5a1 1 0 1 1-2 0 1 1 0 0 1 2 0z"/>
</svg>
<div>
<strong>Dry Run:</strong> Use <code>GC_DRY_RUN=true</code> or pass <code>{"dry_run": true}</code> to the API to preview what would be deleted without actually removing anything. Check the logs or API response for details.
</div>
</div>
</div>
</div>
</article>
<article id="integrity" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">15</span>
<h2 class="h4 mb-0">Integrity Scanner</h2>
</div>
<p class="text-muted">Detect and optionally auto-repair data inconsistencies: corrupted objects, orphaned files, phantom metadata, stale versions, ETag cache drift, and unmigrated legacy metadata.</p>
<h3 class="h6 text-uppercase text-muted mt-4">Enabling Integrity Scanner</h3>
<p class="small text-muted">Disabled by default. Enable via environment variable:</p>
<pre class="mb-3"><code class="language-bash">INTEGRITY_ENABLED=true python run.py</code></pre>
<h3 class="h6 text-uppercase text-muted mt-4">Configuration</h3>
<div class="table-responsive mb-3">
<table class="table table-sm table-bordered small">
<thead class="table-light">
<tr>
<th>Variable</th>
<th>Default</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr><td><code>INTEGRITY_ENABLED</code></td><td><code>false</code></td><td>Enable background integrity scanning</td></tr>
<tr><td><code>INTEGRITY_INTERVAL_HOURS</code></td><td><code>24</code></td><td>Hours between scan cycles</td></tr>
<tr><td><code>INTEGRITY_BATCH_SIZE</code></td><td><code>1000</code></td><td>Max objects to scan per cycle</td></tr>
<tr><td><code>INTEGRITY_AUTO_HEAL</code></td><td><code>false</code></td><td>Automatically repair detected issues</td></tr>
<tr><td><code>INTEGRITY_DRY_RUN</code></td><td><code>false</code></td><td>Log issues without healing</td></tr>
</tbody>
</table>
</div>
<h3 class="h6 text-uppercase text-muted mt-4">What Gets Checked</h3>
<div class="table-responsive mb-3">
<table class="table table-sm table-bordered small">
<thead class="table-light">
<tr>
<th>Check</th>
<th>Detection</th>
<th>Heal Action</th>
</tr>
</thead>
<tbody>
<tr><td><strong>Corrupted objects</strong></td><td>File MD5 does not match stored ETag</td><td>Update ETag in index (disk is authoritative)</td></tr>
<tr><td><strong>Orphaned objects</strong></td><td>File exists without metadata entry</td><td>Create index entry with computed MD5/size/mtime</td></tr>
<tr><td><strong>Phantom metadata</strong></td><td>Index entry exists but file is missing</td><td>Remove stale entry from index</td></tr>
<tr><td><strong>Stale versions</strong></td><td>Manifest without data or vice versa</td><td>Remove orphaned version file</td></tr>
<tr><td><strong>ETag cache</strong></td><td><code>etag_index.json</code> differs from metadata</td><td>Delete cache file (auto-rebuilt)</td></tr>
<tr><td><strong>Legacy metadata</strong></td><td>Legacy <code>.meta.json</code> differs or unmigrated</td><td>Migrate to index, delete legacy file</td></tr>
</tbody>
</table>
</div>
<h3 class="h6 text-uppercase text-muted mt-4">Admin API</h3>
<div class="table-responsive mb-3">
<table class="table table-sm table-bordered small">
<thead class="table-light">
<tr>
<th>Method</th>
<th>Route</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr><td><code>GET</code></td><td><code>/admin/integrity/status</code></td><td>Get scanner status and configuration</td></tr>
<tr><td><code>POST</code></td><td><code>/admin/integrity/run</code></td><td>Trigger manual scan</td></tr>
<tr><td><code>GET</code></td><td><code>/admin/integrity/history</code></td><td>Get scan history</td></tr>
</tbody>
</table>
</div>
<pre class="mb-3"><code class="language-bash"># Trigger a dry run with auto-heal preview
curl -X POST "{{ api_base }}/admin/integrity/run" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;" \
-H "Content-Type: application/json" \
-d '{"dry_run": true, "auto_heal": true}'
# Trigger actual scan with healing
curl -X POST "{{ api_base }}/admin/integrity/run" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;" \
-H "Content-Type: application/json" \
-d '{"auto_heal": true}'
# Check status
curl "{{ api_base }}/admin/integrity/status" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;"
# View history
curl "{{ api_base }}/admin/integrity/history?limit=10" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;"</code></pre>
<div class="alert alert-light border mb-0">
<div class="d-flex gap-2">
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-info-circle text-muted mt-1 flex-shrink-0" viewBox="0 0 16 16">
<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>
<path d="m8.93 6.588-2.29.287-.082.38.45.083c.294.07.352.176.288.469l-.738 3.468c-.194.897.105 1.319.808 1.319.545 0 1.178-.252 1.465-.598l.088-.416c-.2.176-.492.246-.686.246-.275 0-.375-.193-.304-.533L8.93 6.588zM9 4.5a1 1 0 1 1-2 0 1 1 0 0 1 2 0z"/>
</svg>
<div>
<strong>Dry Run:</strong> Use <code>INTEGRITY_DRY_RUN=true</code> or pass <code>{"dry_run": true}</code> to the API to preview detected issues without making any changes. Combine with <code>{"auto_heal": true}</code> to see what would be repaired.
</div>
</div>
</div>
</div>
</article>
<article id="metrics" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">16</span>
<h2 class="h4 mb-0">Metrics History</h2>
</div>
<p class="text-muted">Track CPU, memory, and disk usage over time with optional metrics history. Disabled by default to minimize overhead.</p>
@@ -1714,7 +1923,7 @@ curl -X PUT "{{ api_base | replace('/api', '/ui') }}/metrics/settings" \
<article id="operation-metrics" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">15</span>
<span class="docs-section-kicker">17</span>
<h2 class="h4 mb-0">Operation Metrics</h2>
</div>
<p class="text-muted">Track API request statistics including request counts, latency, error rates, and bandwidth usage. Provides real-time visibility into API operations.</p>
@@ -1821,7 +2030,7 @@ curl "{{ api_base | replace('/api', '/ui') }}/metrics/operations/history?hours=6
<article id="troubleshooting" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">16</span>
<span class="docs-section-kicker">18</span>
<h2 class="h4 mb-0">Troubleshooting &amp; tips</h2>
</div>
<div class="table-responsive">
@@ -1872,7 +2081,7 @@ curl "{{ api_base | replace('/api', '/ui') }}/metrics/operations/history?hours=6
<article id="health-check" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">17</span>
<span class="docs-section-kicker">19</span>
<h2 class="h4 mb-0">Health Check Endpoint</h2>
</div>
<p class="text-muted">The API exposes a health check endpoint for monitoring and load balancer integration.</p>
@@ -1894,7 +2103,7 @@ curl {{ api_base }}/myfsio/health
<article id="object-lock" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">18</span>
<span class="docs-section-kicker">20</span>
<h2 class="h4 mb-0">Object Lock &amp; Retention</h2>
</div>
<p class="text-muted">Object Lock prevents objects from being deleted or overwritten for a specified retention period.</p>
@@ -1954,7 +2163,7 @@ curl "{{ api_base }}/&lt;bucket&gt;/&lt;key&gt;?legal-hold" \
<article id="access-logging" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">19</span>
<span class="docs-section-kicker">21</span>
<h2 class="h4 mb-0">Access Logging</h2>
</div>
<p class="text-muted">Enable S3-style access logging to track all requests to your buckets for audit and analysis.</p>
@@ -1981,7 +2190,7 @@ curl "{{ api_base }}/&lt;bucket&gt;?logging" \
<article id="notifications" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">20</span>
<span class="docs-section-kicker">22</span>
<h2 class="h4 mb-0">Notifications &amp; Webhooks</h2>
</div>
<p class="text-muted">Configure event notifications to trigger webhooks when objects are created or deleted.</p>
@@ -2044,7 +2253,7 @@ curl -X PUT "{{ api_base }}/&lt;bucket&gt;?notification" \
<article id="select-content" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">21</span>
<span class="docs-section-kicker">23</span>
<h2 class="h4 mb-0">SelectObjectContent (SQL)</h2>
</div>
<p class="text-muted">Query CSV, JSON, or Parquet files directly using SQL without downloading the entire object.</p>
@@ -2089,7 +2298,7 @@ curl -X POST "{{ api_base }}/&lt;bucket&gt;/data.csv?select" \
<article id="advanced-ops" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">22</span>
<span class="docs-section-kicker">24</span>
<h2 class="h4 mb-0">Advanced S3 Operations</h2>
</div>
<p class="text-muted">Copy, move, and partially download objects using advanced S3 operations.</p>
@@ -2163,7 +2372,7 @@ curl "{{ api_base }}/&lt;bucket&gt;/&lt;key&gt;" \
<article id="acls" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">23</span>
<span class="docs-section-kicker">25</span>
<h2 class="h4 mb-0">Access Control Lists (ACLs)</h2>
</div>
<p class="text-muted">ACLs provide legacy-style permission management for buckets and objects.</p>
@@ -2217,7 +2426,7 @@ curl -X PUT "{{ api_base }}/&lt;bucket&gt;/&lt;key&gt;" \
<article id="tagging" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">24</span>
<span class="docs-section-kicker">26</span>
<h2 class="h4 mb-0">Object &amp; Bucket Tagging</h2>
</div>
<p class="text-muted">Add metadata tags to buckets and objects for organization, cost allocation, or lifecycle rule filtering.</p>
@@ -2276,7 +2485,7 @@ curl -X PUT "{{ api_base }}/&lt;bucket&gt;?tagging" \
<article id="website-hosting" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">25</span>
<span class="docs-section-kicker">27</span>
<h2 class="h4 mb-0">Static Website Hosting</h2>
</div>
<p class="text-muted">Host static websites directly from S3 buckets with custom index and error pages, served via custom domain mapping.</p>
@@ -2369,7 +2578,7 @@ server {
<article id="cors-config" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">26</span>
<span class="docs-section-kicker">28</span>
<h2 class="h4 mb-0">CORS Configuration</h2>
</div>
<p class="text-muted">Configure per-bucket Cross-Origin Resource Sharing rules to control which origins can access your bucket from a browser.</p>
@@ -2436,7 +2645,7 @@ curl -X DELETE "{{ api_base }}/&lt;bucket&gt;?cors" \
<article id="post-object" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">27</span>
<span class="docs-section-kicker">29</span>
<h2 class="h4 mb-0">PostObject (HTML Form Upload)</h2>
</div>
<p class="text-muted">Upload objects directly from an HTML form using browser-based POST uploads with policy-based authorization.</p>
@@ -2478,7 +2687,7 @@ curl -X DELETE "{{ api_base }}/&lt;bucket&gt;?cors" \
<article id="list-objects-v2" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">28</span>
<span class="docs-section-kicker">30</span>
<h2 class="h4 mb-0">List Objects API v2</h2>
</div>
<p class="text-muted">Use the v2 list API for improved pagination with continuation tokens instead of markers.</p>
@@ -2522,7 +2731,7 @@ curl "{{ api_base }}/&lt;bucket&gt;?list-type=2&amp;start-after=photos/2025/" \
<article id="upgrading" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">29</span>
<span class="docs-section-kicker">31</span>
<h2 class="h4 mb-0">Upgrading &amp; Updates</h2>
</div>
<p class="text-muted">How to safely update MyFSIO to a new version.</p>
@@ -2555,7 +2764,7 @@ cp -r logs/ logs-backup/</code></pre>
<article id="api-matrix" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">30</span>
<span class="docs-section-kicker">32</span>
<h2 class="h4 mb-0">Full API Reference</h2>
</div>
<p class="text-muted">Complete list of all S3-compatible, admin, and KMS endpoints.</p>
@@ -2653,6 +2862,7 @@ POST /kms/generate-random # Generate random bytes</code></pre>
<li><a href="#quotas">Bucket Quotas</a></li>
<li><a href="#encryption">Encryption</a></li>
<li><a href="#lifecycle">Lifecycle Rules</a></li>
<li><a href="#garbage-collection">Garbage Collection</a></li>
<li><a href="#metrics">Metrics History</a></li>
<li><a href="#operation-metrics">Operation Metrics</a></li>
<li><a href="#troubleshooting">Troubleshooting</a></li>

View File

@@ -235,7 +235,7 @@
{% set bucket_label = 'All Buckets' if policy.bucket == '*' else policy.bucket %}
{% if '*' in policy.actions %}
{% set perm_label = 'Full Access' %}
{% elif policy.actions|length >= 9 %}
{% elif policy.actions|length >= 19 %}
{% set perm_label = 'Full Access' %}
{% elif 'list' in policy.actions and 'read' in policy.actions and 'write' in policy.actions and 'delete' in policy.actions %}
{% set perm_label = 'Read + Write + Delete' %}
@@ -354,6 +354,8 @@
<button class="btn btn-outline-secondary btn-sm" type="button" data-create-policy-template="full">Full Control</button>
<button class="btn btn-outline-secondary btn-sm" type="button" data-create-policy-template="readonly">Read-Only</button>
<button class="btn btn-outline-secondary btn-sm" type="button" data-create-policy-template="writer">Read + Write</button>
<button class="btn btn-outline-secondary btn-sm" type="button" data-create-policy-template="operator">Operator</button>
<button class="btn btn-outline-secondary btn-sm" type="button" data-create-policy-template="bucketadmin">Bucket Admin</button>
</div>
</div>
<div class="modal-footer">
@@ -404,6 +406,8 @@
<button class="btn btn-outline-secondary btn-sm" type="button" data-policy-template="full">Full Control</button>
<button class="btn btn-outline-secondary btn-sm" type="button" data-policy-template="readonly">Read-Only</button>
<button class="btn btn-outline-secondary btn-sm" type="button" data-policy-template="writer">Read + Write</button>
<button class="btn btn-outline-secondary btn-sm" type="button" data-policy-template="operator">Operator</button>
<button class="btn btn-outline-secondary btn-sm" type="button" data-policy-template="bucketadmin">Bucket Admin</button>
</div>
</form>
</div>

View File

@@ -210,9 +210,6 @@
<div class="fw-bold" data-metric="health_uptime">{{ app.uptime_days }}d</div>
<small class="opacity-75" style="font-size: 0.7rem;">Uptime</small>
</div>
<div class="text-center">
<span class="badge bg-white bg-opacity-25 fw-semibold px-2 py-1">v{{ app.version }}</span>
</div>
</div>
</div>
</div>

750
templates/system.html Normal file
View File

@@ -0,0 +1,750 @@
{% extends "base.html" %}
{% block title %}System - MyFSIO Console{% endblock %}
{% block content %}
<div class="page-header d-flex justify-content-between align-items-center mb-4">
<div>
<p class="text-uppercase text-muted small mb-1">Administration</p>
<h1 class="h3 mb-1 d-flex align-items-center gap-2">
<svg xmlns="http://www.w3.org/2000/svg" width="28" height="28" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
<path d="M9.405 1.05c-.413-1.4-2.397-1.4-2.81 0l-.1.34a1.464 1.464 0 0 1-2.105.872l-.31-.17c-1.283-.698-2.686.705-1.987 1.987l.169.311c.446.82.023 1.841-.872 2.105l-.34.1c-1.4.413-1.4 2.397 0 2.81l.34.1a1.464 1.464 0 0 1 .872 2.105l-.17.31c-.698 1.283.705 2.686 1.987 1.987l.311-.169a1.464 1.464 0 0 1 2.105.872l.1.34c.413 1.4 2.397 1.4 2.81 0l.1-.34a1.464 1.464 0 0 1 2.105-.872l.31.17c1.283.698 2.686-.705 1.987-1.987l-.169-.311a1.464 1.464 0 0 1 .872-2.105l.34-.1c1.4-.413 1.4-2.397 0-2.81l-.34-.1a1.464 1.464 0 0 1-.872-2.105l.17-.31c.698-1.283-.705-2.686-1.987-1.987l-.311.169a1.464 1.464 0 0 1-2.105-.872l-.1-.34zM8 10.93a2.929 2.929 0 1 1 0-5.86 2.929 2.929 0 0 1 0 5.858z"/>
</svg>
System
</h1>
<p class="text-muted mb-0 mt-1">Server information, feature flags, and maintenance tools.</p>
</div>
<div class="d-none d-md-block">
<span class="badge bg-primary bg-opacity-10 text-primary fs-6 px-3 py-2">v{{ app_version }}</span>
</div>
</div>
<div class="row g-4 mb-4">
<div class="col-lg-6">
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
<path d="M5 0a.5.5 0 0 1 .5.5V2h1V.5a.5.5 0 0 1 1 0V2h1V.5a.5.5 0 0 1 1 0V2h1V.5a.5.5 0 0 1 1 0V2A2.5 2.5 0 0 1 14 4.5h1.5a.5.5 0 0 1 0 1H14v1h1.5a.5.5 0 0 1 0 1H14v1h1.5a.5.5 0 0 1 0 1H14v1h1.5a.5.5 0 0 1 0 1H14a2.5 2.5 0 0 1-2.5 2.5v1.5a.5.5 0 0 1-1 0V14h-1v1.5a.5.5 0 0 1-1 0V14h-1v1.5a.5.5 0 0 1-1 0V14h-1v1.5a.5.5 0 0 1-1 0V14A2.5 2.5 0 0 1 2 11.5H.5a.5.5 0 0 1 0-1H2v-1H.5a.5.5 0 0 1 0-1H2v-1H.5a.5.5 0 0 1 0-1H2v-1H.5a.5.5 0 0 1 0-1H2A2.5 2.5 0 0 1 4.5 2V.5A.5.5 0 0 1 5 0zm-.5 3A1.5 1.5 0 0 0 3 4.5v7A1.5 1.5 0 0 0 4.5 13h7a1.5 1.5 0 0 0 1.5-1.5v-7A1.5 1.5 0 0 0 11.5 3h-7zM5 6.5A1.5 1.5 0 0 1 6.5 5h3A1.5 1.5 0 0 1 11 6.5v3A1.5 1.5 0 0 1 9.5 11h-3A1.5 1.5 0 0 1 5 9.5v-3zM6.5 6a.5.5 0 0 0-.5.5v3a.5.5 0 0 0 .5.5h3a.5.5 0 0 0 .5-.5v-3a.5.5 0 0 0-.5-.5h-3z"/>
</svg>
Server Information
</h5>
<p class="text-muted small mb-0">Runtime environment and configuration</p>
</div>
<div class="card-body px-4 pb-4">
<table class="table table-sm mb-0">
<tbody>
<tr><td class="text-muted" style="width:40%">Version</td><td class="fw-medium">{{ app_version }}</td></tr>
<tr><td class="text-muted">Storage Root</td><td><code>{{ storage_root }}</code></td></tr>
<tr><td class="text-muted">Platform</td><td>{{ platform }}</td></tr>
<tr><td class="text-muted">Python</td><td>{{ python_version }}</td></tr>
<tr><td class="text-muted">Rust Extension</td><td>
{% if has_rust %}
<span class="badge bg-success bg-opacity-10 text-success">Loaded</span>
{% else %}
<span class="badge bg-secondary bg-opacity-10 text-secondary">Not loaded</span>
{% endif %}
</td></tr>
</tbody>
</table>
</div>
</div>
</div>
<div class="col-lg-6">
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
<path fill-rule="evenodd" d="M11.5 2a1.5 1.5 0 1 0 0 3 1.5 1.5 0 0 0 0-3zM9.05 3a2.5 2.5 0 0 1 4.9 0H16v1h-2.05a2.5 2.5 0 0 1-4.9 0H0V3h9.05zM4.5 7a1.5 1.5 0 1 0 0 3 1.5 1.5 0 0 0 0-3zM2.05 8a2.5 2.5 0 0 1 4.9 0H16v1H6.95a2.5 2.5 0 0 1-4.9 0H0V8h2.05zm9.45 4a1.5 1.5 0 1 0 0 3 1.5 1.5 0 0 0 0-3zm-2.45 1a2.5 2.5 0 0 1 4.9 0H16v1h-2.05a2.5 2.5 0 0 1-4.9 0H0v-1h9.05z"/>
</svg>
Feature Flags
</h5>
<p class="text-muted small mb-0">Features configured via environment variables</p>
</div>
<div class="card-body px-4 pb-4">
<table class="table table-sm mb-0">
<tbody>
{% for feat in features %}
<tr>
<td class="text-muted" style="width:55%">{{ feat.label }}</td>
<td class="text-end">
{% if feat.enabled %}
<span class="badge bg-success bg-opacity-10 text-success">Enabled</span>
{% else %}
<span class="badge bg-secondary bg-opacity-10 text-secondary">Disabled</span>
{% endif %}
</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
</div>
</div>
</div>
<div class="row g-4 mb-4">
<div class="col-lg-6">
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
<div class="d-flex justify-content-between align-items-start">
<div>
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
<path d="M2.5 1a1 1 0 0 0-1 1v1a1 1 0 0 0 1 1H3v9a2 2 0 0 0 2 2h6a2 2 0 0 0 2-2V4h.5a1 1 0 0 0 1-1V2a1 1 0 0 0-1-1H10a1 1 0 0 0-1-1H7a1 1 0 0 0-1 1H2.5zm3 4a.5.5 0 0 1 .5.5v7a.5.5 0 0 1-1 0v-7a.5.5 0 0 1 .5-.5zM8 5a.5.5 0 0 1 .5.5v7a.5.5 0 0 1-1 0v-7A.5.5 0 0 1 8 5zm3 .5v7a.5.5 0 0 1-1 0v-7a.5.5 0 0 1 1 0z"/>
</svg>
Garbage Collection
</h5>
<p class="text-muted small mb-0">Clean up temporary files, orphaned uploads, and stale locks</p>
</div>
<div>
{% if gc_status.enabled %}
<span class="badge bg-success bg-opacity-10 text-success">Active</span>
{% else %}
<span class="badge bg-secondary bg-opacity-10 text-secondary">Disabled</span>
{% endif %}
</div>
</div>
</div>
<div class="card-body px-4 pb-4">
{% if gc_status.enabled %}
<div class="d-flex gap-2 mb-3">
<button class="btn btn-primary btn-sm d-inline-flex align-items-center" id="gcRunBtn" onclick="runGC(false)">
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1 flex-shrink-0" viewBox="0 0 16 16">
<path fill-rule="evenodd" d="M8 3a5 5 0 1 0 4.546 2.914.5.5 0 0 1 .908-.417A6 6 0 1 1 8 2v1z"/>
<path d="M8 4.466V.534a.25.25 0 0 1 .41-.192l2.36 1.966c.12.1.12.284 0 .384L8.41 4.658A.25.25 0 0 1 8 4.466z"/>
</svg>
Run Now
</button>
<button class="btn btn-outline-secondary btn-sm" id="gcDryRunBtn" onclick="runGC(true)">
Dry Run
</button>
</div>
<div id="gcScanningBanner" class="mb-3 {% if not gc_status.scanning %}d-none{% endif %}">
<div class="alert alert-info mb-0 small d-flex align-items-center gap-2">
<div class="spinner-border spinner-border-sm text-info" role="status"></div>
<span>GC in progress<span id="gcScanElapsed"></span></span>
</div>
</div>
<div id="gcResult" class="mb-3 d-none">
<div class="alert mb-0 small" id="gcResultAlert">
<div class="d-flex justify-content-between align-items-start">
<div class="fw-semibold mb-1" id="gcResultTitle"></div>
<button type="button" class="btn-close btn-close-sm" style="font-size:0.65rem" onclick="document.getElementById('gcResult').classList.add('d-none')"></button>
</div>
<div id="gcResultBody"></div>
</div>
</div>
<div class="border rounded p-3 mb-3" style="background: var(--bs-tertiary-bg, #f8f9fa);">
<div class="d-flex align-items-center gap-2 mb-2">
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
<path d="M9.405 1.05c-.413-1.4-2.397-1.4-2.81 0l-.1.34a1.464 1.464 0 0 1-2.105.872l-.31-.17c-1.283-.698-2.686.705-1.987 1.987l.169.311c.446.82.023 1.841-.872 2.105l-.34.1c-1.4.413-1.4 2.397 0 2.81l.34.1a1.464 1.464 0 0 1 .872 2.105l-.17.31c-.698 1.283.705 2.686 1.987 1.987l.311-.169a1.464 1.464 0 0 1 2.105.872l.1.34c.413 1.4 2.397 1.4 2.81 0l.1-.34a1.464 1.464 0 0 1 2.105-.872l.31.17c1.283.698 2.686-.705 1.987-1.987l-.169-.311a1.464 1.464 0 0 1 .872-2.105l.34-.1c1.4-.413 1.4-2.397 0-2.81l-.34-.1a1.464 1.464 0 0 1-.872-2.105l.17-.31c.698-1.283-.705-2.686-1.987-1.987l-.311.169a1.464 1.464 0 0 1-2.105-.872l-.1-.34zM8 10.93a2.929 2.929 0 1 1 0-5.86 2.929 2.929 0 0 1 0 5.858z"/>
</svg>
<span class="small fw-semibold text-muted">Configuration</span>
</div>
<div class="row small">
<div class="col-6 mb-1"><span class="text-muted">Interval:</span> {{ gc_status.interval_hours }}h</div>
<div class="col-6 mb-1"><span class="text-muted">Dry run:</span> {{ "Yes" if gc_status.dry_run else "No" }}</div>
<div class="col-6 mb-1"><span class="text-muted">Temp max age:</span> {{ gc_status.temp_file_max_age_hours }}h</div>
<div class="col-6 mb-1"><span class="text-muted">Lock max age:</span> {{ gc_status.lock_file_max_age_hours }}h</div>
<div class="col-6"><span class="text-muted">Multipart max age:</span> {{ gc_status.multipart_max_age_days }}d</div>
</div>
</div>
<div id="gcHistoryContainer">
{% if gc_history %}
<h6 class="fw-semibold small text-muted mb-2 d-flex align-items-center gap-2">
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
<path d="M8.515 1.019A7 7 0 0 0 8 1V0a8 8 0 0 1 .589.022l-.074.997zm2.004.45a7.003 7.003 0 0 0-.985-.299l.219-.976c.383.086.76.2 1.126.342l-.36.933zm1.37.71a7.01 7.01 0 0 0-.439-.27l.493-.87a8.025 8.025 0 0 1 .979.654l-.615.789a6.996 6.996 0 0 0-.418-.302zm1.834 1.79a6.99 6.99 0 0 0-.653-.796l.724-.69c.27.285.52.59.747.91l-.818.576zm.744 1.352a7.08 7.08 0 0 0-.214-.468l.893-.45a7.976 7.976 0 0 1 .45 1.088l-.95.313a7.023 7.023 0 0 0-.179-.483zm.53 2.507a6.991 6.991 0 0 0-.1-1.025l.985-.17c.067.386.106.778.116 1.17l-1 .025zm-.131 1.538c.033-.17.06-.339.081-.51l.993.123a7.957 7.957 0 0 1-.23 1.155l-.964-.267c.046-.165.086-.332.12-.501zm-.952 2.379c.184-.29.346-.594.486-.908l.914.405c-.16.36-.345.706-.555 1.038l-.845-.535zm-.964 1.205c.122-.122.239-.248.35-.378l.758.653a8.073 8.073 0 0 1-.401.432l-.707-.707z"/>
<path d="M8 1a7 7 0 1 0 4.95 11.95l.707.707A8.001 8.001 0 1 1 8 0v1z"/>
<path d="M7.5 3a.5.5 0 0 1 .5.5v5.21l3.248 1.856a.5.5 0 0 1-.496.868l-3.5-2A.5.5 0 0 1 7 8V3.5a.5.5 0 0 1 .5-.5z"/>
</svg>
Recent Executions
</h6>
<div class="table-responsive">
<table class="table table-sm small mb-0">
<thead class="table-light">
<tr>
<th>Time</th>
<th class="text-center">Cleaned</th>
<th class="text-center">Freed</th>
<th class="text-center">Mode</th>
</tr>
</thead>
<tbody>
{% for exec in gc_history %}
<tr>
<td class="text-nowrap">{{ exec.timestamp_display }}</td>
<td class="text-center">
{% set r = exec.result %}
{{ (r.temp_files_deleted|d(0)) + (r.multipart_uploads_deleted|d(0)) + (r.lock_files_deleted|d(0)) + (r.orphaned_metadata_deleted|d(0)) + (r.orphaned_versions_deleted|d(0)) + (r.empty_dirs_removed|d(0)) }}
</td>
<td class="text-center">{{ exec.bytes_freed_display }}</td>
<td class="text-center">
{% if exec.dry_run %}
<span class="badge bg-warning bg-opacity-10 text-warning">Dry run</span>
{% else %}
<span class="badge bg-primary bg-opacity-10 text-primary">Live</span>
{% endif %}
</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% else %}
<div class="text-center py-2">
<p class="text-muted small mb-0">No executions recorded yet.</p>
</div>
{% endif %}
</div>
{% else %}
<div class="text-center py-4">
<svg xmlns="http://www.w3.org/2000/svg" width="40" height="40" fill="currentColor" class="text-muted mb-2 opacity-50" viewBox="0 0 16 16">
<path d="M2.5 1a1 1 0 0 0-1 1v1a1 1 0 0 0 1 1H3v9a2 2 0 0 0 2 2h6a2 2 0 0 0 2-2V4h.5a1 1 0 0 0 1-1V2a1 1 0 0 0-1-1H10a1 1 0 0 0-1-1H7a1 1 0 0 0-1 1H2.5zm3 4a.5.5 0 0 1 .5.5v7a.5.5 0 0 1-1 0v-7a.5.5 0 0 1 .5-.5zM8 5a.5.5 0 0 1 .5.5v7a.5.5 0 0 1-1 0v-7A.5.5 0 0 1 8 5zm3 .5v7a.5.5 0 0 1-1 0v-7a.5.5 0 0 1 1 0z"/>
</svg>
<p class="text-muted mb-1">Garbage collection is not enabled.</p>
<p class="text-muted small mb-0">Set <code>GC_ENABLED=true</code> to enable automatic cleanup.</p>
</div>
{% endif %}
</div>
</div>
</div>
<div class="col-lg-6">
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
<div class="d-flex justify-content-between align-items-start">
<div>
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
<path d="M5.338 1.59a61.44 61.44 0 0 0-2.837.856.481.481 0 0 0-.328.39c-.554 4.157.726 7.19 2.253 9.188a10.725 10.725 0 0 0 2.287 2.233c.346.244.652.42.893.533.12.057.218.095.293.118a.55.55 0 0 0 .101.025.615.615 0 0 0 .1-.025c.076-.023.174-.061.294-.118.24-.113.547-.29.893-.533a10.726 10.726 0 0 0 2.287-2.233c1.527-1.997 2.807-5.031 2.253-9.188a.48.48 0 0 0-.328-.39c-.651-.213-1.75-.56-2.837-.855C9.552 1.29 8.531 1.067 8 1.067c-.53 0-1.552.223-2.662.524zM5.072.56C6.157.265 7.31 0 8 0s1.843.265 2.928.56c1.11.3 2.229.655 2.887.87a1.54 1.54 0 0 1 1.044 1.262c.596 4.477-.787 7.795-2.465 9.99a11.775 11.775 0 0 1-2.517 2.453 7.159 7.159 0 0 1-1.048.625c-.28.132-.581.24-.829.24s-.548-.108-.829-.24a7.158 7.158 0 0 1-1.048-.625 11.777 11.777 0 0 1-2.517-2.453C1.928 10.487.545 7.169 1.141 2.692A1.54 1.54 0 0 1 2.185 1.43 62.456 62.456 0 0 1 5.072.56z"/>
<path d="M10.854 5.146a.5.5 0 0 1 0 .708l-3 3a.5.5 0 0 1-.708 0l-1.5-1.5a.5.5 0 1 1 .708-.708L7.5 7.793l2.646-2.647a.5.5 0 0 1 .708 0z"/>
</svg>
Integrity Scanner
</h5>
<p class="text-muted small mb-0">Detect and heal corrupted objects, orphaned files, and metadata drift</p>
</div>
<div>
{% if integrity_status.enabled %}
<span class="badge bg-success bg-opacity-10 text-success">Active</span>
{% else %}
<span class="badge bg-secondary bg-opacity-10 text-secondary">Disabled</span>
{% endif %}
</div>
</div>
</div>
<div class="card-body px-4 pb-4">
{% if integrity_status.enabled %}
<div class="d-flex gap-2 flex-wrap mb-3">
<button class="btn btn-primary btn-sm d-inline-flex align-items-center" id="integrityRunBtn" onclick="runIntegrity(false, false)" {% if integrity_status.scanning %}disabled{% endif %}>
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1 flex-shrink-0" viewBox="0 0 16 16">
<path fill-rule="evenodd" d="M8 3a5 5 0 1 0 4.546 2.914.5.5 0 0 1 .908-.417A6 6 0 1 1 8 2v1z"/>
<path d="M8 4.466V.534a.25.25 0 0 1 .41-.192l2.36 1.966c.12.1.12.284 0 .384L8.41 4.658A.25.25 0 0 1 8 4.466z"/>
</svg>
Scan Now
</button>
<button class="btn btn-outline-warning btn-sm" id="integrityHealBtn" onclick="runIntegrity(false, true)" {% if integrity_status.scanning %}disabled{% endif %}>
Scan &amp; Heal
</button>
<button class="btn btn-outline-secondary btn-sm" id="integrityDryRunBtn" onclick="runIntegrity(true, false)" {% if integrity_status.scanning %}disabled{% endif %}>
Dry Run
</button>
</div>
<div id="integrityScanningBanner" class="mb-3 {% if not integrity_status.scanning %}d-none{% endif %}">
<div class="alert alert-info mb-0 small d-flex align-items-center gap-2">
<div class="spinner-border spinner-border-sm text-info" role="status"></div>
<span>Scan in progress<span id="integrityScanElapsed"></span></span>
</div>
</div>
<div id="integrityResult" class="mb-3 d-none">
<div class="alert mb-0 small" id="integrityResultAlert">
<div class="d-flex justify-content-between align-items-start">
<div class="fw-semibold mb-1" id="integrityResultTitle"></div>
<button type="button" class="btn-close btn-close-sm" style="font-size:0.65rem" onclick="document.getElementById('integrityResult').classList.add('d-none')"></button>
</div>
<div id="integrityResultBody"></div>
</div>
</div>
<div class="border rounded p-3 mb-3" style="background: var(--bs-tertiary-bg, #f8f9fa);">
<div class="d-flex align-items-center gap-2 mb-2">
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
<path d="M9.405 1.05c-.413-1.4-2.397-1.4-2.81 0l-.1.34a1.464 1.464 0 0 1-2.105.872l-.31-.17c-1.283-.698-2.686.705-1.987 1.987l.169.311c.446.82.023 1.841-.872 2.105l-.34.1c-1.4.413-1.4 2.397 0 2.81l.34.1a1.464 1.464 0 0 1 .872 2.105l-.17.31c-.698 1.283.705 2.686 1.987 1.987l.311-.169a1.464 1.464 0 0 1 2.105.872l.1.34c.413 1.4 2.397 1.4 2.81 0l.1-.34a1.464 1.464 0 0 1 2.105-.872l.31.17c1.283.698 2.686-.705 1.987-1.987l-.169-.311a1.464 1.464 0 0 1 .872-2.105l.34-.1c1.4-.413 1.4-2.397 0-2.81l-.34-.1a1.464 1.464 0 0 1-.872-2.105l.17-.31c.698-1.283-.705-2.686-1.987-1.987l-.311.169a1.464 1.464 0 0 1-2.105-.872l-.1-.34zM8 10.93a2.929 2.929 0 1 1 0-5.86 2.929 2.929 0 0 1 0 5.858z"/>
</svg>
<span class="small fw-semibold text-muted">Configuration</span>
</div>
<div class="row small">
<div class="col-6 mb-1"><span class="text-muted">Interval:</span> {{ integrity_status.interval_hours }}h</div>
<div class="col-6 mb-1"><span class="text-muted">Dry run:</span> {{ "Yes" if integrity_status.dry_run else "No" }}</div>
<div class="col-6"><span class="text-muted">Batch size:</span> {{ integrity_status.batch_size }}</div>
<div class="col-6"><span class="text-muted">Auto-heal:</span> {{ "Yes" if integrity_status.auto_heal else "No" }}</div>
</div>
</div>
<div id="integrityHistoryContainer">
{% if integrity_history %}
<h6 class="fw-semibold small text-muted mb-2 d-flex align-items-center gap-2">
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
<path d="M8.515 1.019A7 7 0 0 0 8 1V0a8 8 0 0 1 .589.022l-.074.997zm2.004.45a7.003 7.003 0 0 0-.985-.299l.219-.976c.383.086.76.2 1.126.342l-.36.933zm1.37.71a7.01 7.01 0 0 0-.439-.27l.493-.87a8.025 8.025 0 0 1 .979.654l-.615.789a6.996 6.996 0 0 0-.418-.302zm1.834 1.79a6.99 6.99 0 0 0-.653-.796l.724-.69c.27.285.52.59.747.91l-.818.576zm.744 1.352a7.08 7.08 0 0 0-.214-.468l.893-.45a7.976 7.976 0 0 1 .45 1.088l-.95.313a7.023 7.023 0 0 0-.179-.483zm.53 2.507a6.991 6.991 0 0 0-.1-1.025l.985-.17c.067.386.106.778.116 1.17l-1 .025zm-.131 1.538c.033-.17.06-.339.081-.51l.993.123a7.957 7.957 0 0 1-.23 1.155l-.964-.267c.046-.165.086-.332.12-.501zm-.952 2.379c.184-.29.346-.594.486-.908l.914.405c-.16.36-.345.706-.555 1.038l-.845-.535zm-.964 1.205c.122-.122.239-.248.35-.378l.758.653a8.073 8.073 0 0 1-.401.432l-.707-.707z"/>
<path d="M8 1a7 7 0 1 0 4.95 11.95l.707.707A8.001 8.001 0 1 1 8 0v1z"/>
<path d="M7.5 3a.5.5 0 0 1 .5.5v5.21l3.248 1.856a.5.5 0 0 1-.496.868l-3.5-2A.5.5 0 0 1 7 8V3.5a.5.5 0 0 1 .5-.5z"/>
</svg>
Recent Scans
</h6>
<div class="table-responsive">
<table class="table table-sm small mb-0">
<thead class="table-light">
<tr>
<th>Time</th>
<th class="text-center">Scanned</th>
<th class="text-center">Issues</th>
<th class="text-center">Healed</th>
<th class="text-center">Mode</th>
</tr>
</thead>
<tbody>
{% for exec in integrity_history %}
<tr>
<td class="text-nowrap">{{ exec.timestamp_display }}</td>
<td class="text-center">{{ exec.result.objects_scanned|d(0) }}</td>
<td class="text-center">
{% set total_issues = (exec.result.corrupted_objects|d(0)) + (exec.result.orphaned_objects|d(0)) + (exec.result.phantom_metadata|d(0)) + (exec.result.stale_versions|d(0)) + (exec.result.etag_cache_inconsistencies|d(0)) + (exec.result.legacy_metadata_drifts|d(0)) %}
{% if total_issues > 0 %}
<span class="text-danger fw-medium">{{ total_issues }}</span>
{% else %}
<span class="text-success">0</span>
{% endif %}
</td>
<td class="text-center">{{ exec.result.issues_healed|d(0) }}</td>
<td class="text-center">
{% if exec.dry_run %}
<span class="badge bg-warning bg-opacity-10 text-warning">Dry</span>
{% elif exec.auto_heal %}
<span class="badge bg-success bg-opacity-10 text-success">Heal</span>
{% else %}
<span class="badge bg-primary bg-opacity-10 text-primary">Scan</span>
{% endif %}
</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
{% else %}
<div class="text-center py-2">
<p class="text-muted small mb-0">No scans recorded yet.</p>
</div>
{% endif %}
</div>
{% else %}
<div class="text-center py-4">
<svg xmlns="http://www.w3.org/2000/svg" width="40" height="40" fill="currentColor" class="text-muted mb-2 opacity-50" viewBox="0 0 16 16">
<path d="M5.338 1.59a61.44 61.44 0 0 0-2.837.856.481.481 0 0 0-.328.39c-.554 4.157.726 7.19 2.253 9.188a10.725 10.725 0 0 0 2.287 2.233c.346.244.652.42.893.533.12.057.218.095.293.118a.55.55 0 0 0 .101.025.615.615 0 0 0 .1-.025c.076-.023.174-.061.294-.118.24-.113.547-.29.893-.533a10.726 10.726 0 0 0 2.287-2.233c1.527-1.997 2.807-5.031 2.253-9.188a.48.48 0 0 0-.328-.39c-.651-.213-1.75-.56-2.837-.855C9.552 1.29 8.531 1.067 8 1.067c-.53 0-1.552.223-2.662.524zM5.072.56C6.157.265 7.31 0 8 0s1.843.265 2.928.56c1.11.3 2.229.655 2.887.87a1.54 1.54 0 0 1 1.044 1.262c.596 4.477-.787 7.795-2.465 9.99a11.775 11.775 0 0 1-2.517 2.453 7.159 7.159 0 0 1-1.048.625c-.28.132-.581.24-.829.24s-.548-.108-.829-.24a7.158 7.158 0 0 1-1.048-.625 11.777 11.777 0 0 1-2.517-2.453C1.928 10.487.545 7.169 1.141 2.692A1.54 1.54 0 0 1 2.185 1.43 62.456 62.456 0 0 1 5.072.56z"/>
<path d="M10.854 5.146a.5.5 0 0 1 0 .708l-3 3a.5.5 0 0 1-.708 0l-1.5-1.5a.5.5 0 1 1 .708-.708L7.5 7.793l2.646-2.647a.5.5 0 0 1 .708 0z"/>
</svg>
<p class="text-muted mb-1">Integrity scanner is not enabled.</p>
<p class="text-muted small mb-0">Set <code>INTEGRITY_ENABLED=true</code> to enable automatic scanning.</p>
</div>
{% endif %}
</div>
</div>
</div>
</div>
{% endblock %}
{% block extra_scripts %}
<script>
(function () {
var csrfToken = document.querySelector('meta[name="csrf-token"]')?.getAttribute('content') || '';
function setLoading(btnId, loading, spinnerOnly) {
var btn = document.getElementById(btnId);
if (!btn) return;
btn.disabled = loading;
if (loading && !spinnerOnly) {
btn.dataset.originalHtml = btn.innerHTML;
btn.innerHTML = '<span class="spinner-border spinner-border-sm me-1" role="status"></span>Running...';
} else if (!loading && btn.dataset.originalHtml) {
btn.innerHTML = btn.dataset.originalHtml;
}
}
function formatBytes(bytes) {
if (!bytes || bytes === 0) return '0 B';
var units = ['B', 'KB', 'MB', 'GB'];
var i = 0;
var b = bytes;
while (b >= 1024 && i < units.length - 1) { b /= 1024; i++; }
return (i === 0 ? b : b.toFixed(1)) + ' ' + units[i];
}
var _displayTimezone = {{ display_timezone|tojson }};
function formatTimestamp(ts) {
var d = new Date(ts * 1000);
try {
var opts = {year: 'numeric', month: 'short', day: '2-digit', hour: '2-digit', minute: '2-digit', hour12: false, timeZone: _displayTimezone, timeZoneName: 'short'};
return d.toLocaleString('en-US', opts);
} catch (e) {
var pad = function (n) { return n < 10 ? '0' + n : '' + n; };
return d.getUTCFullYear() + '-' + pad(d.getUTCMonth() + 1) + '-' + pad(d.getUTCDate()) +
' ' + pad(d.getUTCHours()) + ':' + pad(d.getUTCMinutes()) + ' UTC';
}
}
var _gcHistoryIcon = '<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">' +
'<path d="M8.515 1.019A7 7 0 0 0 8 1V0a8 8 0 0 1 .589.022l-.074.997zm2.004.45a7.003 7.003 0 0 0-.985-.299l.219-.976c.383.086.76.2 1.126.342l-.36.933zm1.37.71a7.01 7.01 0 0 0-.439-.27l.493-.87a8.025 8.025 0 0 1 .979.654l-.615.789a6.996 6.996 0 0 0-.418-.302zm1.834 1.79a6.99 6.99 0 0 0-.653-.796l.724-.69c.27.285.52.59.747.91l-.818.576zm.744 1.352a7.08 7.08 0 0 0-.214-.468l.893-.45a7.976 7.976 0 0 1 .45 1.088l-.95.313a7.023 7.023 0 0 0-.179-.483zm.53 2.507a6.991 6.991 0 0 0-.1-1.025l.985-.17c.067.386.106.778.116 1.17l-1 .025zm-.131 1.538c.033-.17.06-.339.081-.51l.993.123a7.957 7.957 0 0 1-.23 1.155l-.964-.267c.046-.165.086-.332.12-.501zm-.952 2.379c.184-.29.346-.594.486-.908l.914.405c-.16.36-.345.706-.555 1.038l-.845-.535zm-.964 1.205c.122-.122.239-.248.35-.378l.758.653a8.073 8.073 0 0 1-.401.432l-.707-.707z"/>' +
'<path d="M8 1a7 7 0 1 0 4.95 11.95l.707.707A8.001 8.001 0 1 1 8 0v1z"/>' +
'<path d="M7.5 3a.5.5 0 0 1 .5.5v5.21l3.248 1.856a.5.5 0 0 1-.496.868l-3.5-2A.5.5 0 0 1 7 8V3.5a.5.5 0 0 1 .5-.5z"/></svg>';
function _gcRefreshHistory() {
fetch('{{ url_for("ui.system_gc_history") }}?limit=10', {
headers: {'X-CSRFToken': csrfToken}
})
.then(function (r) { return r.json(); })
.then(function (hist) {
var container = document.getElementById('gcHistoryContainer');
if (!container) return;
var execs = hist.executions || [];
if (execs.length === 0) {
container.innerHTML = '<div class="text-center py-2"><p class="text-muted small mb-0">No executions recorded yet.</p></div>';
return;
}
var html = '<h6 class="fw-semibold small text-muted mb-2 d-flex align-items-center gap-2">' +
_gcHistoryIcon + ' Recent Executions</h6>' +
'<div class="table-responsive"><table class="table table-sm small mb-0">' +
'<thead class="table-light"><tr><th>Time</th><th class="text-center">Cleaned</th>' +
'<th class="text-center">Freed</th><th class="text-center">Mode</th></tr></thead><tbody>';
execs.forEach(function (exec) {
var r = exec.result || {};
var cleaned = (r.temp_files_deleted || 0) + (r.multipart_uploads_deleted || 0) +
(r.lock_files_deleted || 0) + (r.orphaned_metadata_deleted || 0) +
(r.orphaned_versions_deleted || 0) + (r.empty_dirs_removed || 0);
var freed = (r.temp_bytes_freed || 0) + (r.multipart_bytes_freed || 0) +
(r.orphaned_version_bytes_freed || 0);
var mode = exec.dry_run
? '<span class="badge bg-warning bg-opacity-10 text-warning">Dry run</span>'
: '<span class="badge bg-primary bg-opacity-10 text-primary">Live</span>';
html += '<tr><td class="text-nowrap">' + formatTimestamp(exec.timestamp) + '</td>' +
'<td class="text-center">' + cleaned + '</td>' +
'<td class="text-center">' + formatBytes(freed) + '</td>' +
'<td class="text-center">' + mode + '</td></tr>';
});
html += '</tbody></table></div>';
container.innerHTML = html;
})
.catch(function () {});
}
function _integrityRefreshHistory() {
fetch('{{ url_for("ui.system_integrity_history") }}?limit=10', {
headers: {'X-CSRFToken': csrfToken}
})
.then(function (r) { return r.json(); })
.then(function (hist) {
var container = document.getElementById('integrityHistoryContainer');
if (!container) return;
var execs = hist.executions || [];
if (execs.length === 0) {
container.innerHTML = '<div class="text-center py-2"><p class="text-muted small mb-0">No scans recorded yet.</p></div>';
return;
}
var html = '<h6 class="fw-semibold small text-muted mb-2 d-flex align-items-center gap-2">' +
_gcHistoryIcon + ' Recent Scans</h6>' +
'<div class="table-responsive"><table class="table table-sm small mb-0">' +
'<thead class="table-light"><tr><th>Time</th><th class="text-center">Scanned</th>' +
'<th class="text-center">Issues</th><th class="text-center">Healed</th>' +
'<th class="text-center">Mode</th></tr></thead><tbody>';
execs.forEach(function (exec) {
var r = exec.result || {};
var issues = (r.corrupted_objects || 0) + (r.orphaned_objects || 0) +
(r.phantom_metadata || 0) + (r.stale_versions || 0) +
(r.etag_cache_inconsistencies || 0) + (r.legacy_metadata_drifts || 0);
var issueHtml = issues > 0
? '<span class="text-danger fw-medium">' + issues + '</span>'
: '<span class="text-success">0</span>';
var mode = exec.dry_run
? '<span class="badge bg-warning bg-opacity-10 text-warning">Dry</span>'
: (exec.auto_heal
? '<span class="badge bg-success bg-opacity-10 text-success">Heal</span>'
: '<span class="badge bg-primary bg-opacity-10 text-primary">Scan</span>');
html += '<tr><td class="text-nowrap">' + formatTimestamp(exec.timestamp) + '</td>' +
'<td class="text-center">' + (r.objects_scanned || 0) + '</td>' +
'<td class="text-center">' + issueHtml + '</td>' +
'<td class="text-center">' + (r.issues_healed || 0) + '</td>' +
'<td class="text-center">' + mode + '</td></tr>';
});
html += '</tbody></table></div>';
container.innerHTML = html;
})
.catch(function () {});
}
var _gcPollTimer = null;
var _gcLastDryRun = false;
function _gcSetScanning(scanning) {
var banner = document.getElementById('gcScanningBanner');
var btns = ['gcRunBtn', 'gcDryRunBtn'];
if (scanning) {
banner.classList.remove('d-none');
btns.forEach(function (id) {
var el = document.getElementById(id);
if (el) el.disabled = true;
});
} else {
banner.classList.add('d-none');
document.getElementById('gcScanElapsed').textContent = '';
btns.forEach(function (id) {
var el = document.getElementById(id);
if (el) el.disabled = false;
});
}
}
function _gcShowResult(data, dryRun) {
var container = document.getElementById('gcResult');
var alert = document.getElementById('gcResultAlert');
var title = document.getElementById('gcResultTitle');
var body = document.getElementById('gcResultBody');
container.classList.remove('d-none');
var totalItems = (data.temp_files_deleted || 0) + (data.multipart_uploads_deleted || 0) +
(data.lock_files_deleted || 0) + (data.orphaned_metadata_deleted || 0) +
(data.orphaned_versions_deleted || 0) + (data.empty_dirs_removed || 0);
var totalFreed = (data.temp_bytes_freed || 0) + (data.multipart_bytes_freed || 0) +
(data.orphaned_version_bytes_freed || 0);
alert.className = totalItems > 0 ? 'alert alert-success mb-0 small' : 'alert alert-info mb-0 small';
title.textContent = (dryRun ? '[Dry Run] ' : '') + 'Completed in ' + (data.execution_time_seconds || 0).toFixed(2) + 's';
var lines = [];
if (data.temp_files_deleted) lines.push('Temp files: ' + data.temp_files_deleted + ' (' + formatBytes(data.temp_bytes_freed) + ')');
if (data.multipart_uploads_deleted) lines.push('Multipart uploads: ' + data.multipart_uploads_deleted + ' (' + formatBytes(data.multipart_bytes_freed) + ')');
if (data.lock_files_deleted) lines.push('Lock files: ' + data.lock_files_deleted);
if (data.orphaned_metadata_deleted) lines.push('Orphaned metadata: ' + data.orphaned_metadata_deleted);
if (data.orphaned_versions_deleted) lines.push('Orphaned versions: ' + data.orphaned_versions_deleted + ' (' + formatBytes(data.orphaned_version_bytes_freed) + ')');
if (data.empty_dirs_removed) lines.push('Empty directories: ' + data.empty_dirs_removed);
if (totalItems === 0) lines.push('Nothing to clean up.');
if (totalFreed > 0) lines.push('Total freed: ' + formatBytes(totalFreed));
if (data.errors && data.errors.length > 0) lines.push('Errors: ' + data.errors.join(', '));
body.innerHTML = lines.join('<br>');
}
function _gcPoll() {
fetch('{{ url_for("ui.system_gc_status") }}', {
headers: {'X-CSRFToken': csrfToken}
})
.then(function (r) { return r.json(); })
.then(function (status) {
if (status.scanning) {
var elapsed = status.scan_elapsed_seconds || 0;
document.getElementById('gcScanElapsed').textContent = ' (' + elapsed.toFixed(0) + 's)';
_gcPollTimer = setTimeout(_gcPoll, 2000);
} else {
_gcSetScanning(false);
_gcRefreshHistory();
fetch('{{ url_for("ui.system_gc_history") }}?limit=1', {
headers: {'X-CSRFToken': csrfToken}
})
.then(function (r) { return r.json(); })
.then(function (hist) {
if (hist.executions && hist.executions.length > 0) {
var latest = hist.executions[0];
_gcShowResult(latest.result, latest.dry_run);
}
})
.catch(function () {});
}
})
.catch(function () {
_gcPollTimer = setTimeout(_gcPoll, 3000);
});
}
window.runGC = function (dryRun) {
_gcLastDryRun = dryRun;
document.getElementById('gcResult').classList.add('d-none');
_gcSetScanning(true);
fetch('{{ url_for("ui.system_gc_run") }}', {
method: 'POST',
headers: {'Content-Type': 'application/json', 'X-CSRFToken': csrfToken},
body: JSON.stringify({dry_run: dryRun})
})
.then(function (r) { return r.json(); })
.then(function (data) {
if (data.error) {
_gcSetScanning(false);
var container = document.getElementById('gcResult');
var alert = document.getElementById('gcResultAlert');
var title = document.getElementById('gcResultTitle');
var body = document.getElementById('gcResultBody');
container.classList.remove('d-none');
alert.className = 'alert alert-danger mb-0 small';
title.textContent = 'Error';
body.textContent = data.error;
return;
}
_gcPollTimer = setTimeout(_gcPoll, 2000);
})
.catch(function (err) {
_gcSetScanning(false);
var container = document.getElementById('gcResult');
var alert = document.getElementById('gcResultAlert');
var title = document.getElementById('gcResultTitle');
var body = document.getElementById('gcResultBody');
container.classList.remove('d-none');
alert.className = 'alert alert-danger mb-0 small';
title.textContent = 'Error';
body.textContent = err.message;
});
};
{% if gc_status.scanning %}
_gcSetScanning(true);
_gcPollTimer = setTimeout(_gcPoll, 2000);
{% endif %}
var _integrityPollTimer = null;
var _integrityLastMode = {dryRun: false, autoHeal: false};
function _integritySetScanning(scanning) {
var banner = document.getElementById('integrityScanningBanner');
var btns = ['integrityRunBtn', 'integrityHealBtn', 'integrityDryRunBtn'];
if (scanning) {
banner.classList.remove('d-none');
btns.forEach(function (id) {
var el = document.getElementById(id);
if (el) el.disabled = true;
});
} else {
banner.classList.add('d-none');
document.getElementById('integrityScanElapsed').textContent = '';
btns.forEach(function (id) {
var el = document.getElementById(id);
if (el) el.disabled = false;
});
}
}
function _integrityShowResult(data, dryRun, autoHeal) {
var container = document.getElementById('integrityResult');
var alert = document.getElementById('integrityResultAlert');
var title = document.getElementById('integrityResultTitle');
var body = document.getElementById('integrityResultBody');
container.classList.remove('d-none');
var totalIssues = (data.corrupted_objects || 0) + (data.orphaned_objects || 0) +
(data.phantom_metadata || 0) + (data.stale_versions || 0) +
(data.etag_cache_inconsistencies || 0) + (data.legacy_metadata_drifts || 0);
var prefix = dryRun ? '[Dry Run] ' : (autoHeal ? '[Heal] ' : '');
alert.className = totalIssues > 0 ? 'alert alert-warning mb-0 small' : 'alert alert-success mb-0 small';
title.textContent = prefix + 'Completed in ' + (data.execution_time_seconds || 0).toFixed(2) + 's';
var lines = [];
lines.push('Scanned: ' + (data.objects_scanned || 0) + ' objects in ' + (data.buckets_scanned || 0) + ' buckets');
if (totalIssues === 0) {
lines.push('No issues found.');
} else {
if (data.corrupted_objects) lines.push('Corrupted objects: ' + data.corrupted_objects);
if (data.orphaned_objects) lines.push('Orphaned objects: ' + data.orphaned_objects);
if (data.phantom_metadata) lines.push('Phantom metadata: ' + data.phantom_metadata);
if (data.stale_versions) lines.push('Stale versions: ' + data.stale_versions);
if (data.etag_cache_inconsistencies) lines.push('ETag inconsistencies: ' + data.etag_cache_inconsistencies);
if (data.legacy_metadata_drifts) lines.push('Legacy metadata drifts: ' + data.legacy_metadata_drifts);
if (data.issues_healed) lines.push('Issues healed: ' + data.issues_healed);
}
if (data.errors && data.errors.length > 0) lines.push('Errors: ' + data.errors.join(', '));
body.innerHTML = lines.join('<br>');
}
function _integrityPoll() {
fetch('{{ url_for("ui.system_integrity_status") }}', {
headers: {'X-CSRFToken': csrfToken}
})
.then(function (r) { return r.json(); })
.then(function (status) {
if (status.scanning) {
var elapsed = status.scan_elapsed_seconds || 0;
document.getElementById('integrityScanElapsed').textContent = ' (' + elapsed.toFixed(0) + 's)';
_integrityPollTimer = setTimeout(_integrityPoll, 2000);
} else {
_integritySetScanning(false);
_integrityRefreshHistory();
fetch('{{ url_for("ui.system_integrity_history") }}?limit=1', {
headers: {'X-CSRFToken': csrfToken}
})
.then(function (r) { return r.json(); })
.then(function (hist) {
if (hist.executions && hist.executions.length > 0) {
var latest = hist.executions[0];
_integrityShowResult(latest.result, latest.dry_run, latest.auto_heal);
}
})
.catch(function () {});
}
})
.catch(function () {
_integrityPollTimer = setTimeout(_integrityPoll, 3000);
});
}
window.runIntegrity = function (dryRun, autoHeal) {
_integrityLastMode = {dryRun: dryRun, autoHeal: autoHeal};
document.getElementById('integrityResult').classList.add('d-none');
_integritySetScanning(true);
fetch('{{ url_for("ui.system_integrity_run") }}', {
method: 'POST',
headers: {'Content-Type': 'application/json', 'X-CSRFToken': csrfToken},
body: JSON.stringify({dry_run: dryRun, auto_heal: autoHeal})
})
.then(function (r) { return r.json(); })
.then(function (data) {
if (data.error) {
_integritySetScanning(false);
var container = document.getElementById('integrityResult');
var alert = document.getElementById('integrityResultAlert');
var title = document.getElementById('integrityResultTitle');
var body = document.getElementById('integrityResultBody');
container.classList.remove('d-none');
alert.className = 'alert alert-danger mb-0 small';
title.textContent = 'Error';
body.textContent = data.error;
return;
}
_integrityPollTimer = setTimeout(_integrityPoll, 2000);
})
.catch(function (err) {
_integritySetScanning(false);
var container = document.getElementById('integrityResult');
var alert = document.getElementById('integrityResultAlert');
var title = document.getElementById('integrityResultTitle');
var body = document.getElementById('integrityResultBody');
container.classList.remove('d-none');
alert.className = 'alert alert-danger mb-0 small';
title.textContent = 'Error';
body.textContent = err.message;
});
};
{% if integrity_status.scanning %}
_integritySetScanning(true);
_integrityPollTimer = setTimeout(_integrityPoll, 2000);
{% endif %}
})();
</script>
{% endblock %}

View File

@@ -27,7 +27,10 @@ def app(tmp_path: Path):
"access_key": "test",
"secret_key": "secret",
"display_name": "Test User",
"policies": [{"bucket": "*", "actions": ["list", "read", "write", "delete", "policy"]}],
"policies": [{"bucket": "*", "actions": ["list", "read", "write", "delete", "policy",
"create_bucket", "delete_bucket", "share", "versioning", "tagging",
"encryption", "cors", "lifecycle", "replication", "quota",
"object_lock", "notification", "logging", "website"]}],
}
]
}

View File

@@ -0,0 +1,156 @@
import hashlib
import time
import pytest
@pytest.fixture()
def bucket(client, signer):
headers = signer("PUT", "/cond-test")
client.put("/cond-test", headers=headers)
return "cond-test"
@pytest.fixture()
def uploaded(client, signer, bucket):
body = b"hello conditional"
etag = hashlib.md5(body).hexdigest()
headers = signer("PUT", f"/{bucket}/obj.txt", body=body)
resp = client.put(f"/{bucket}/obj.txt", headers=headers, data=body)
last_modified = resp.headers.get("Last-Modified")
return {"etag": etag, "last_modified": last_modified}
class TestIfMatch:
def test_get_matching_etag(self, client, signer, bucket, uploaded):
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-Match": f'"{uploaded["etag"]}"'})
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
assert resp.status_code == 200
def test_get_non_matching_etag(self, client, signer, bucket, uploaded):
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-Match": '"wrongetag"'})
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
assert resp.status_code == 412
def test_head_matching_etag(self, client, signer, bucket, uploaded):
headers = signer("HEAD", f"/{bucket}/obj.txt", headers={"If-Match": f'"{uploaded["etag"]}"'})
resp = client.head(f"/{bucket}/obj.txt", headers=headers)
assert resp.status_code == 200
def test_head_non_matching_etag(self, client, signer, bucket, uploaded):
headers = signer("HEAD", f"/{bucket}/obj.txt", headers={"If-Match": '"wrongetag"'})
resp = client.head(f"/{bucket}/obj.txt", headers=headers)
assert resp.status_code == 412
def test_wildcard_match(self, client, signer, bucket, uploaded):
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-Match": "*"})
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
assert resp.status_code == 200
def test_multiple_etags_one_matches(self, client, signer, bucket, uploaded):
etag_list = f'"bad1", "{uploaded["etag"]}", "bad2"'
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-Match": etag_list})
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
assert resp.status_code == 200
def test_multiple_etags_none_match(self, client, signer, bucket, uploaded):
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-Match": '"bad1", "bad2"'})
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
assert resp.status_code == 412
class TestIfNoneMatch:
def test_get_matching_etag_returns_304(self, client, signer, bucket, uploaded):
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-None-Match": f'"{uploaded["etag"]}"'})
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
assert resp.status_code == 304
assert uploaded["etag"] in resp.headers.get("ETag", "")
def test_get_non_matching_etag_returns_200(self, client, signer, bucket, uploaded):
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-None-Match": '"wrongetag"'})
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
assert resp.status_code == 200
def test_head_matching_etag_returns_304(self, client, signer, bucket, uploaded):
headers = signer("HEAD", f"/{bucket}/obj.txt", headers={"If-None-Match": f'"{uploaded["etag"]}"'})
resp = client.head(f"/{bucket}/obj.txt", headers=headers)
assert resp.status_code == 304
def test_head_non_matching_etag_returns_200(self, client, signer, bucket, uploaded):
headers = signer("HEAD", f"/{bucket}/obj.txt", headers={"If-None-Match": '"wrongetag"'})
resp = client.head(f"/{bucket}/obj.txt", headers=headers)
assert resp.status_code == 200
def test_wildcard_returns_304(self, client, signer, bucket, uploaded):
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-None-Match": "*"})
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
assert resp.status_code == 304
class TestIfModifiedSince:
def test_not_modified_returns_304(self, client, signer, bucket, uploaded):
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-Modified-Since": "Sun, 01 Jan 2034 00:00:00 GMT"})
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
assert resp.status_code == 304
assert "ETag" in resp.headers
def test_modified_returns_200(self, client, signer, bucket, uploaded):
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-Modified-Since": "Sun, 01 Jan 2000 00:00:00 GMT"})
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
assert resp.status_code == 200
def test_head_not_modified(self, client, signer, bucket, uploaded):
headers = signer("HEAD", f"/{bucket}/obj.txt", headers={"If-Modified-Since": "Sun, 01 Jan 2034 00:00:00 GMT"})
resp = client.head(f"/{bucket}/obj.txt", headers=headers)
assert resp.status_code == 304
def test_if_none_match_takes_precedence(self, client, signer, bucket, uploaded):
headers = signer("GET", f"/{bucket}/obj.txt", headers={
"If-None-Match": '"wrongetag"',
"If-Modified-Since": "Sun, 01 Jan 2034 00:00:00 GMT",
})
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
assert resp.status_code == 200
class TestIfUnmodifiedSince:
def test_unmodified_returns_200(self, client, signer, bucket, uploaded):
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-Unmodified-Since": "Sun, 01 Jan 2034 00:00:00 GMT"})
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
assert resp.status_code == 200
def test_modified_returns_412(self, client, signer, bucket, uploaded):
headers = signer("GET", f"/{bucket}/obj.txt", headers={"If-Unmodified-Since": "Sun, 01 Jan 2000 00:00:00 GMT"})
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
assert resp.status_code == 412
def test_head_modified_returns_412(self, client, signer, bucket, uploaded):
headers = signer("HEAD", f"/{bucket}/obj.txt", headers={"If-Unmodified-Since": "Sun, 01 Jan 2000 00:00:00 GMT"})
resp = client.head(f"/{bucket}/obj.txt", headers=headers)
assert resp.status_code == 412
def test_if_match_takes_precedence(self, client, signer, bucket, uploaded):
headers = signer("GET", f"/{bucket}/obj.txt", headers={
"If-Match": f'"{uploaded["etag"]}"',
"If-Unmodified-Since": "Sun, 01 Jan 2000 00:00:00 GMT",
})
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
assert resp.status_code == 200
class TestConditionalWithRange:
def test_if_match_with_range(self, client, signer, bucket, uploaded):
headers = signer("GET", f"/{bucket}/obj.txt", headers={
"If-Match": f'"{uploaded["etag"]}"',
"Range": "bytes=0-4",
})
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
assert resp.status_code == 206
def test_if_match_fails_with_range(self, client, signer, bucket, uploaded):
headers = signer("GET", f"/{bucket}/obj.txt", headers={
"If-Match": '"wrongetag"',
"Range": "bytes=0-4",
})
resp = client.get(f"/{bucket}/obj.txt", headers=headers)
assert resp.status_code == 412

356
tests/test_gc.py Normal file
View File

@@ -0,0 +1,356 @@
import json
import os
import time
from pathlib import Path
import pytest
from app.gc import GarbageCollector, GCResult
@pytest.fixture
def storage_root(tmp_path):
root = tmp_path / "data"
root.mkdir()
sys_root = root / ".myfsio.sys"
sys_root.mkdir()
(sys_root / "config").mkdir(parents=True)
(sys_root / "tmp").mkdir()
(sys_root / "multipart").mkdir()
(sys_root / "buckets").mkdir()
return root
@pytest.fixture
def gc(storage_root):
return GarbageCollector(
storage_root=storage_root,
interval_hours=1.0,
temp_file_max_age_hours=1.0,
multipart_max_age_days=1,
lock_file_max_age_hours=0.5,
dry_run=False,
)
def _make_old(path, hours=48):
old_time = time.time() - hours * 3600
os.utime(path, (old_time, old_time))
class TestTempFileCleanup:
def test_old_temp_files_deleted(self, storage_root, gc):
tmp_dir = storage_root / ".myfsio.sys" / "tmp"
old_file = tmp_dir / "abc123.tmp"
old_file.write_bytes(b"x" * 1000)
_make_old(old_file, hours=48)
result = gc.run_now()
assert result.temp_files_deleted == 1
assert result.temp_bytes_freed == 1000
assert not old_file.exists()
def test_recent_temp_files_kept(self, storage_root, gc):
tmp_dir = storage_root / ".myfsio.sys" / "tmp"
new_file = tmp_dir / "recent.tmp"
new_file.write_bytes(b"data")
result = gc.run_now()
assert result.temp_files_deleted == 0
assert new_file.exists()
def test_dry_run_keeps_files(self, storage_root, gc):
gc.dry_run = True
tmp_dir = storage_root / ".myfsio.sys" / "tmp"
old_file = tmp_dir / "stale.tmp"
old_file.write_bytes(b"x" * 500)
_make_old(old_file, hours=48)
result = gc.run_now()
assert result.temp_files_deleted == 1
assert result.temp_bytes_freed == 500
assert old_file.exists()
class TestMultipartCleanup:
def test_old_orphaned_multipart_deleted(self, storage_root, gc):
bucket = storage_root / "test-bucket"
bucket.mkdir()
mp_root = storage_root / ".myfsio.sys" / "multipart" / "test-bucket"
mp_root.mkdir(parents=True)
upload_dir = mp_root / "upload-123"
upload_dir.mkdir()
manifest = upload_dir / "manifest.json"
manifest.write_text(json.dumps({"upload_id": "upload-123", "object_key": "foo.txt"}))
part = upload_dir / "part-00001.part"
part.write_bytes(b"x" * 2000)
_make_old(manifest, hours=200)
_make_old(part, hours=200)
_make_old(upload_dir, hours=200)
result = gc.run_now()
assert result.multipart_uploads_deleted == 1
assert result.multipart_bytes_freed > 0
assert not upload_dir.exists()
def test_recent_multipart_kept(self, storage_root, gc):
bucket = storage_root / "test-bucket"
bucket.mkdir()
mp_root = storage_root / ".myfsio.sys" / "multipart" / "test-bucket"
mp_root.mkdir(parents=True)
upload_dir = mp_root / "upload-new"
upload_dir.mkdir()
manifest = upload_dir / "manifest.json"
manifest.write_text(json.dumps({"upload_id": "upload-new", "object_key": "bar.txt"}))
result = gc.run_now()
assert result.multipart_uploads_deleted == 0
assert upload_dir.exists()
def test_legacy_multipart_cleaned(self, storage_root, gc):
bucket = storage_root / "test-bucket"
bucket.mkdir()
legacy_mp = bucket / ".multipart" / "upload-old"
legacy_mp.mkdir(parents=True)
part = legacy_mp / "part-00001.part"
part.write_bytes(b"y" * 500)
_make_old(part, hours=200)
_make_old(legacy_mp, hours=200)
result = gc.run_now()
assert result.multipart_uploads_deleted == 1
class TestLockFileCleanup:
def test_stale_lock_files_deleted(self, storage_root, gc):
locks_dir = storage_root / ".myfsio.sys" / "buckets" / "test-bucket" / "locks"
locks_dir.mkdir(parents=True)
lock = locks_dir / "some_key.lock"
lock.write_text("")
_make_old(lock, hours=2)
result = gc.run_now()
assert result.lock_files_deleted == 1
assert not lock.exists()
def test_recent_lock_kept(self, storage_root, gc):
locks_dir = storage_root / ".myfsio.sys" / "buckets" / "test-bucket" / "locks"
locks_dir.mkdir(parents=True)
lock = locks_dir / "active.lock"
lock.write_text("")
result = gc.run_now()
assert result.lock_files_deleted == 0
assert lock.exists()
class TestOrphanedMetadataCleanup:
def test_legacy_orphaned_metadata_deleted(self, storage_root, gc):
bucket = storage_root / "test-bucket"
bucket.mkdir()
meta_dir = bucket / ".meta"
meta_dir.mkdir()
orphan = meta_dir / "deleted_file.txt.meta.json"
orphan.write_text(json.dumps({"etag": "abc"}))
result = gc.run_now()
assert result.orphaned_metadata_deleted == 1
assert not orphan.exists()
def test_valid_metadata_kept(self, storage_root, gc):
bucket = storage_root / "test-bucket"
bucket.mkdir()
obj = bucket / "exists.txt"
obj.write_text("hello")
meta_dir = bucket / ".meta"
meta_dir.mkdir()
meta = meta_dir / "exists.txt.meta.json"
meta.write_text(json.dumps({"etag": "abc"}))
result = gc.run_now()
assert result.orphaned_metadata_deleted == 0
assert meta.exists()
def test_index_orphaned_entries_cleaned(self, storage_root, gc):
bucket = storage_root / "test-bucket"
bucket.mkdir()
obj = bucket / "keep.txt"
obj.write_text("hello")
meta_dir = storage_root / ".myfsio.sys" / "buckets" / "test-bucket" / "meta"
meta_dir.mkdir(parents=True)
index = meta_dir / "_index.json"
index.write_text(json.dumps({"keep.txt": {"etag": "a"}, "gone.txt": {"etag": "b"}}))
result = gc.run_now()
assert result.orphaned_metadata_deleted == 1
updated = json.loads(index.read_text())
assert "keep.txt" in updated
assert "gone.txt" not in updated
class TestOrphanedVersionsCleanup:
def test_orphaned_versions_deleted(self, storage_root, gc):
bucket = storage_root / "test-bucket"
bucket.mkdir()
versions_dir = storage_root / ".myfsio.sys" / "buckets" / "test-bucket" / "versions" / "deleted_obj.txt"
versions_dir.mkdir(parents=True)
v_bin = versions_dir / "v1.bin"
v_json = versions_dir / "v1.json"
v_bin.write_bytes(b"old data" * 100)
v_json.write_text(json.dumps({"version_id": "v1", "size": 800}))
result = gc.run_now()
assert result.orphaned_versions_deleted == 2
assert result.orphaned_version_bytes_freed == 800
def test_active_versions_kept(self, storage_root, gc):
bucket = storage_root / "test-bucket"
bucket.mkdir()
obj = bucket / "active.txt"
obj.write_text("current")
versions_dir = storage_root / ".myfsio.sys" / "buckets" / "test-bucket" / "versions" / "active.txt"
versions_dir.mkdir(parents=True)
v_bin = versions_dir / "v1.bin"
v_bin.write_bytes(b"old version")
result = gc.run_now()
assert result.orphaned_versions_deleted == 0
assert v_bin.exists()
class TestEmptyDirCleanup:
def test_empty_dirs_removed(self, storage_root, gc):
empty = storage_root / ".myfsio.sys" / "buckets" / "test-bucket" / "locks" / "sub"
empty.mkdir(parents=True)
result = gc.run_now()
assert result.empty_dirs_removed > 0
assert not empty.exists()
class TestHistory:
def test_history_recorded(self, storage_root, gc):
gc.run_now()
history = gc.get_history()
assert len(history) == 1
assert "result" in history[0]
assert "timestamp" in history[0]
def test_multiple_runs(self, storage_root, gc):
gc.run_now()
gc.run_now()
gc.run_now()
history = gc.get_history()
assert len(history) == 3
assert history[0]["timestamp"] >= history[1]["timestamp"]
class TestStatus:
def test_get_status(self, storage_root, gc):
status = gc.get_status()
assert status["interval_hours"] == 1.0
assert status["dry_run"] is False
assert status["temp_file_max_age_hours"] == 1.0
assert status["multipart_max_age_days"] == 1
assert status["lock_file_max_age_hours"] == 0.5
class TestGCResult:
def test_total_bytes_freed(self):
r = GCResult(temp_bytes_freed=100, multipart_bytes_freed=200, orphaned_version_bytes_freed=300)
assert r.total_bytes_freed == 600
def test_has_work(self):
assert not GCResult().has_work
assert GCResult(temp_files_deleted=1).has_work
assert GCResult(lock_files_deleted=1).has_work
assert GCResult(empty_dirs_removed=1).has_work
class TestAdminAPI:
@pytest.fixture
def gc_app(self, tmp_path):
from app import create_api_app
storage_root = tmp_path / "data"
iam_config = tmp_path / "iam.json"
bucket_policies = tmp_path / "bucket_policies.json"
iam_payload = {
"users": [
{
"access_key": "admin",
"secret_key": "adminsecret",
"display_name": "Admin",
"policies": [{"bucket": "*", "actions": ["list", "read", "write", "delete", "policy", "iam:*"]}],
}
]
}
iam_config.write_text(json.dumps(iam_payload))
flask_app = create_api_app({
"TESTING": True,
"SECRET_KEY": "testing",
"STORAGE_ROOT": storage_root,
"IAM_CONFIG": iam_config,
"BUCKET_POLICY_PATH": bucket_policies,
"GC_ENABLED": True,
"GC_INTERVAL_HOURS": 1.0,
})
yield flask_app
gc = flask_app.extensions.get("gc")
if gc:
gc.stop()
def test_gc_status(self, gc_app):
client = gc_app.test_client()
resp = client.get("/admin/gc/status", headers={"X-Access-Key": "admin", "X-Secret-Key": "adminsecret"})
assert resp.status_code == 200
data = resp.get_json()
assert data["enabled"] is True
def test_gc_run(self, gc_app):
client = gc_app.test_client()
resp = client.post(
"/admin/gc/run",
headers={"X-Access-Key": "admin", "X-Secret-Key": "adminsecret"},
content_type="application/json",
)
assert resp.status_code == 200
data = resp.get_json()
assert data["status"] == "started"
def test_gc_dry_run(self, gc_app):
client = gc_app.test_client()
resp = client.post(
"/admin/gc/run",
headers={"X-Access-Key": "admin", "X-Secret-Key": "adminsecret"},
data=json.dumps({"dry_run": True}),
content_type="application/json",
)
assert resp.status_code == 200
data = resp.get_json()
assert data["status"] == "started"
def test_gc_history(self, gc_app):
import time
client = gc_app.test_client()
client.post("/admin/gc/run", headers={"X-Access-Key": "admin", "X-Secret-Key": "adminsecret"})
for _ in range(50):
time.sleep(0.1)
status = client.get("/admin/gc/status", headers={"X-Access-Key": "admin", "X-Secret-Key": "adminsecret"}).get_json()
if not status.get("scanning"):
break
resp = client.get("/admin/gc/history", headers={"X-Access-Key": "admin", "X-Secret-Key": "adminsecret"})
assert resp.status_code == 200
data = resp.get_json()
assert len(data["executions"]) >= 1
def test_gc_requires_admin(self, gc_app):
iam = gc_app.extensions["iam"]
user = iam.create_user(display_name="Regular")
client = gc_app.test_client()
resp = client.get(
"/admin/gc/status",
headers={"X-Access-Key": user["access_key"], "X-Secret-Key": user["secret_key"]},
)
assert resp.status_code == 403

648
tests/test_integrity.py Normal file
View File

@@ -0,0 +1,648 @@
import hashlib
import json
import os
import sys
import time
from pathlib import Path
import pytest
sys.path.insert(0, str(Path(__file__).resolve().parents[1]))
from app.integrity import IntegrityChecker, IntegrityCursorStore, IntegrityResult
def _wait_scan_done(client, headers, timeout=10):
deadline = time.time() + timeout
while time.time() < deadline:
resp = client.get("/admin/integrity/status", headers=headers)
data = resp.get_json()
if not data.get("scanning"):
return
time.sleep(0.1)
raise TimeoutError("scan did not complete")
def _md5(data: bytes) -> str:
return hashlib.md5(data).hexdigest()
def _setup_bucket(storage_root: Path, bucket_name: str, objects: dict[str, bytes]) -> None:
bucket_path = storage_root / bucket_name
bucket_path.mkdir(parents=True, exist_ok=True)
meta_root = storage_root / ".myfsio.sys" / "buckets" / bucket_name / "meta"
meta_root.mkdir(parents=True, exist_ok=True)
bucket_json = storage_root / ".myfsio.sys" / "buckets" / bucket_name / ".bucket.json"
bucket_json.write_text(json.dumps({"created": "2025-01-01"}))
for key, data in objects.items():
obj_path = bucket_path / key
obj_path.parent.mkdir(parents=True, exist_ok=True)
obj_path.write_bytes(data)
etag = _md5(data)
stat = obj_path.stat()
meta = {
"__etag__": etag,
"__size__": str(stat.st_size),
"__last_modified__": str(stat.st_mtime),
}
key_path = Path(key)
parent = key_path.parent
key_name = key_path.name
if parent == Path("."):
index_path = meta_root / "_index.json"
else:
index_path = meta_root / parent / "_index.json"
index_path.parent.mkdir(parents=True, exist_ok=True)
index_data = {}
if index_path.exists():
index_data = json.loads(index_path.read_text())
index_data[key_name] = {"metadata": meta}
index_path.write_text(json.dumps(index_data))
def _issues_of_type(result, issue_type):
return [i for i in result.issues if i.issue_type == issue_type]
@pytest.fixture
def storage_root(tmp_path):
root = tmp_path / "data"
root.mkdir()
(root / ".myfsio.sys" / "config").mkdir(parents=True, exist_ok=True)
return root
@pytest.fixture
def checker(storage_root):
return IntegrityChecker(
storage_root=storage_root,
interval_hours=24.0,
batch_size=1000,
auto_heal=False,
dry_run=False,
)
class TestCorruptedObjects:
def test_detect_corrupted(self, storage_root, checker):
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello world"})
(storage_root / "mybucket" / "file.txt").write_bytes(b"corrupted data")
result = checker.run_now()
assert result.corrupted_objects == 1
issues = _issues_of_type(result, "corrupted_object")
assert len(issues) == 1
assert issues[0].bucket == "mybucket"
assert issues[0].key == "file.txt"
assert not issues[0].healed
def test_heal_corrupted(self, storage_root, checker):
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello world"})
(storage_root / "mybucket" / "file.txt").write_bytes(b"corrupted data")
result = checker.run_now(auto_heal=True)
assert result.corrupted_objects == 1
assert result.issues_healed == 1
issues = _issues_of_type(result, "corrupted_object")
assert issues[0].healed
result2 = checker.run_now()
assert result2.corrupted_objects == 0
def test_valid_objects_pass(self, storage_root, checker):
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello world"})
result = checker.run_now()
assert result.corrupted_objects == 0
assert result.objects_scanned >= 1
def test_corrupted_nested_key(self, storage_root, checker):
_setup_bucket(storage_root, "mybucket", {"sub/dir/file.txt": b"nested content"})
(storage_root / "mybucket" / "sub" / "dir" / "file.txt").write_bytes(b"bad")
result = checker.run_now()
assert result.corrupted_objects == 1
issues = _issues_of_type(result, "corrupted_object")
assert issues[0].key == "sub/dir/file.txt"
class TestOrphanedObjects:
def test_detect_orphaned(self, storage_root, checker):
_setup_bucket(storage_root, "mybucket", {})
(storage_root / "mybucket" / "orphan.txt").write_bytes(b"orphan data")
result = checker.run_now()
assert result.orphaned_objects == 1
issues = _issues_of_type(result, "orphaned_object")
assert len(issues) == 1
def test_heal_orphaned(self, storage_root, checker):
_setup_bucket(storage_root, "mybucket", {})
(storage_root / "mybucket" / "orphan.txt").write_bytes(b"orphan data")
result = checker.run_now(auto_heal=True)
assert result.orphaned_objects == 1
assert result.issues_healed == 1
issues = _issues_of_type(result, "orphaned_object")
assert issues[0].healed
result2 = checker.run_now()
assert result2.orphaned_objects == 0
assert result2.objects_scanned >= 1
class TestPhantomMetadata:
def test_detect_phantom(self, storage_root, checker):
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
(storage_root / "mybucket" / "file.txt").unlink()
result = checker.run_now()
assert result.phantom_metadata == 1
issues = _issues_of_type(result, "phantom_metadata")
assert len(issues) == 1
def test_heal_phantom(self, storage_root, checker):
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
(storage_root / "mybucket" / "file.txt").unlink()
result = checker.run_now(auto_heal=True)
assert result.phantom_metadata == 1
assert result.issues_healed == 1
result2 = checker.run_now()
assert result2.phantom_metadata == 0
class TestStaleVersions:
def test_manifest_without_data(self, storage_root, checker):
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
versions_root = storage_root / ".myfsio.sys" / "buckets" / "mybucket" / "versions" / "file.txt"
versions_root.mkdir(parents=True)
(versions_root / "v1.json").write_text(json.dumps({"etag": "abc"}))
result = checker.run_now()
assert result.stale_versions == 1
issues = _issues_of_type(result, "stale_version")
assert "manifest without data" in issues[0].detail
def test_data_without_manifest(self, storage_root, checker):
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
versions_root = storage_root / ".myfsio.sys" / "buckets" / "mybucket" / "versions" / "file.txt"
versions_root.mkdir(parents=True)
(versions_root / "v1.bin").write_bytes(b"old data")
result = checker.run_now()
assert result.stale_versions == 1
issues = _issues_of_type(result, "stale_version")
assert "data without manifest" in issues[0].detail
def test_heal_stale_versions(self, storage_root, checker):
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
versions_root = storage_root / ".myfsio.sys" / "buckets" / "mybucket" / "versions" / "file.txt"
versions_root.mkdir(parents=True)
(versions_root / "v1.json").write_text(json.dumps({"etag": "abc"}))
(versions_root / "v2.bin").write_bytes(b"old data")
result = checker.run_now(auto_heal=True)
assert result.stale_versions == 2
assert result.issues_healed == 2
assert not (versions_root / "v1.json").exists()
assert not (versions_root / "v2.bin").exists()
def test_valid_versions_pass(self, storage_root, checker):
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
versions_root = storage_root / ".myfsio.sys" / "buckets" / "mybucket" / "versions" / "file.txt"
versions_root.mkdir(parents=True)
(versions_root / "v1.json").write_text(json.dumps({"etag": "abc"}))
(versions_root / "v1.bin").write_bytes(b"old data")
result = checker.run_now()
assert result.stale_versions == 0
class TestEtagCache:
def test_detect_mismatch(self, storage_root, checker):
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
etag_path = storage_root / ".myfsio.sys" / "buckets" / "mybucket" / "etag_index.json"
etag_path.write_text(json.dumps({"file.txt": "wrong_etag"}))
result = checker.run_now()
assert result.etag_cache_inconsistencies == 1
issues = _issues_of_type(result, "etag_cache_inconsistency")
assert len(issues) == 1
def test_heal_mismatch(self, storage_root, checker):
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
etag_path = storage_root / ".myfsio.sys" / "buckets" / "mybucket" / "etag_index.json"
etag_path.write_text(json.dumps({"file.txt": "wrong_etag"}))
result = checker.run_now(auto_heal=True)
assert result.etag_cache_inconsistencies == 1
assert result.issues_healed == 1
assert not etag_path.exists()
class TestLegacyMetadata:
def test_detect_unmigrated(self, storage_root, checker):
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
legacy_meta = storage_root / "mybucket" / ".meta" / "file.txt.meta.json"
legacy_meta.parent.mkdir(parents=True)
legacy_meta.write_text(json.dumps({"__etag__": "different_value"}))
meta_root = storage_root / ".myfsio.sys" / "buckets" / "mybucket" / "meta"
index_path = meta_root / "_index.json"
index_path.unlink()
result = checker.run_now()
assert result.legacy_metadata_drifts == 1
issues = _issues_of_type(result, "legacy_metadata_drift")
assert len(issues) == 1
assert issues[0].detail == "unmigrated legacy .meta.json"
def test_detect_drift(self, storage_root, checker):
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
legacy_meta = storage_root / "mybucket" / ".meta" / "file.txt.meta.json"
legacy_meta.parent.mkdir(parents=True)
legacy_meta.write_text(json.dumps({"__etag__": "different_value"}))
result = checker.run_now()
assert result.legacy_metadata_drifts == 1
issues = _issues_of_type(result, "legacy_metadata_drift")
assert "differs from index" in issues[0].detail
def test_heal_unmigrated(self, storage_root, checker):
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
legacy_meta = storage_root / "mybucket" / ".meta" / "file.txt.meta.json"
legacy_meta.parent.mkdir(parents=True)
legacy_data = {"__etag__": _md5(b"hello"), "__size__": "5"}
legacy_meta.write_text(json.dumps(legacy_data))
meta_root = storage_root / ".myfsio.sys" / "buckets" / "mybucket" / "meta"
index_path = meta_root / "_index.json"
index_path.unlink()
result = checker.run_now(auto_heal=True)
assert result.legacy_metadata_drifts == 1
legacy_issues = _issues_of_type(result, "legacy_metadata_drift")
assert len(legacy_issues) == 1
assert legacy_issues[0].healed
assert not legacy_meta.exists()
index_data = json.loads(index_path.read_text())
assert "file.txt" in index_data
assert index_data["file.txt"]["metadata"]["__etag__"] == _md5(b"hello")
def test_heal_drift(self, storage_root, checker):
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
legacy_meta = storage_root / "mybucket" / ".meta" / "file.txt.meta.json"
legacy_meta.parent.mkdir(parents=True)
legacy_meta.write_text(json.dumps({"__etag__": "different_value"}))
result = checker.run_now(auto_heal=True)
assert result.legacy_metadata_drifts == 1
legacy_issues = _issues_of_type(result, "legacy_metadata_drift")
assert legacy_issues[0].healed
assert not legacy_meta.exists()
class TestDryRun:
def test_dry_run_no_changes(self, storage_root, checker):
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
(storage_root / "mybucket" / "file.txt").write_bytes(b"corrupted")
(storage_root / "mybucket" / "orphan.txt").write_bytes(b"orphan")
result = checker.run_now(auto_heal=True, dry_run=True)
assert result.corrupted_objects == 1
assert result.orphaned_objects == 1
assert result.issues_healed == 0
meta_root = storage_root / ".myfsio.sys" / "buckets" / "mybucket" / "meta"
index_data = json.loads((meta_root / "_index.json").read_text())
assert "orphan.txt" not in index_data
class TestBatchSize:
def test_batch_limits_scan(self, storage_root):
objects = {f"file{i}.txt": f"data{i}".encode() for i in range(10)}
_setup_bucket(storage_root, "mybucket", objects)
checker = IntegrityChecker(
storage_root=storage_root,
batch_size=3,
)
result = checker.run_now()
assert result.objects_scanned <= 3
class TestHistory:
def test_history_recorded(self, storage_root, checker):
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
checker.run_now()
history = checker.get_history()
assert len(history) == 1
assert "corrupted_objects" in history[0]["result"]
def test_history_multiple(self, storage_root, checker):
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
checker.run_now()
checker.run_now()
checker.run_now()
history = checker.get_history()
assert len(history) == 3
def test_history_pagination(self, storage_root, checker):
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
for _ in range(5):
checker.run_now()
history = checker.get_history(limit=2, offset=1)
assert len(history) == 2
AUTH_HEADERS = {"X-Access-Key": "admin", "X-Secret-Key": "adminsecret"}
class TestAdminAPI:
@pytest.fixture
def integrity_app(self, tmp_path):
from app import create_api_app
storage_root = tmp_path / "data"
iam_config = tmp_path / "iam.json"
bucket_policies = tmp_path / "bucket_policies.json"
iam_payload = {
"users": [
{
"access_key": "admin",
"secret_key": "adminsecret",
"display_name": "Admin",
"policies": [{"bucket": "*", "actions": ["list", "read", "write", "delete", "policy", "iam:*"]}],
}
]
}
iam_config.write_text(json.dumps(iam_payload))
flask_app = create_api_app({
"TESTING": True,
"SECRET_KEY": "testing",
"STORAGE_ROOT": storage_root,
"IAM_CONFIG": iam_config,
"BUCKET_POLICY_PATH": bucket_policies,
"API_BASE_URL": "http://testserver",
"INTEGRITY_ENABLED": True,
"INTEGRITY_AUTO_HEAL": False,
"INTEGRITY_DRY_RUN": False,
})
yield flask_app
storage = flask_app.extensions.get("object_storage")
if storage:
base = getattr(storage, "storage", storage)
if hasattr(base, "shutdown_stats"):
base.shutdown_stats()
ic = flask_app.extensions.get("integrity")
if ic:
ic.stop()
def test_status_endpoint(self, integrity_app):
client = integrity_app.test_client()
resp = client.get("/admin/integrity/status", headers=AUTH_HEADERS)
assert resp.status_code == 200
data = resp.get_json()
assert data["enabled"] is True
assert "interval_hours" in data
def test_run_endpoint(self, integrity_app):
client = integrity_app.test_client()
resp = client.post("/admin/integrity/run", headers=AUTH_HEADERS, json={})
assert resp.status_code == 200
data = resp.get_json()
assert data["status"] == "started"
_wait_scan_done(client, AUTH_HEADERS)
resp = client.get("/admin/integrity/history?limit=1", headers=AUTH_HEADERS)
hist = resp.get_json()
assert len(hist["executions"]) >= 1
assert "corrupted_objects" in hist["executions"][0]["result"]
assert "objects_scanned" in hist["executions"][0]["result"]
def test_run_with_overrides(self, integrity_app):
client = integrity_app.test_client()
resp = client.post(
"/admin/integrity/run",
headers=AUTH_HEADERS,
json={"dry_run": True, "auto_heal": True},
)
assert resp.status_code == 200
_wait_scan_done(client, AUTH_HEADERS)
def test_history_endpoint(self, integrity_app):
client = integrity_app.test_client()
client.post("/admin/integrity/run", headers=AUTH_HEADERS, json={})
_wait_scan_done(client, AUTH_HEADERS)
resp = client.get("/admin/integrity/history", headers=AUTH_HEADERS)
assert resp.status_code == 200
data = resp.get_json()
assert "executions" in data
assert len(data["executions"]) >= 1
def test_auth_required(self, integrity_app):
client = integrity_app.test_client()
resp = client.get("/admin/integrity/status")
assert resp.status_code in (401, 403)
def test_disabled_status(self, tmp_path):
from app import create_api_app
storage_root = tmp_path / "data2"
iam_config = tmp_path / "iam2.json"
bucket_policies = tmp_path / "bp2.json"
iam_payload = {
"users": [
{
"access_key": "admin",
"secret_key": "adminsecret",
"display_name": "Admin",
"policies": [{"bucket": "*", "actions": ["list", "read", "write", "delete", "policy", "iam:*"]}],
}
]
}
iam_config.write_text(json.dumps(iam_payload))
flask_app = create_api_app({
"TESTING": True,
"SECRET_KEY": "testing",
"STORAGE_ROOT": storage_root,
"IAM_CONFIG": iam_config,
"BUCKET_POLICY_PATH": bucket_policies,
"API_BASE_URL": "http://testserver",
"INTEGRITY_ENABLED": False,
})
c = flask_app.test_client()
resp = c.get("/admin/integrity/status", headers=AUTH_HEADERS)
assert resp.status_code == 200
data = resp.get_json()
assert data["enabled"] is False
storage = flask_app.extensions.get("object_storage")
if storage:
base = getattr(storage, "storage", storage)
if hasattr(base, "shutdown_stats"):
base.shutdown_stats()
class TestMultipleBuckets:
def test_scans_multiple_buckets(self, storage_root, checker):
_setup_bucket(storage_root, "bucket1", {"a.txt": b"aaa"})
_setup_bucket(storage_root, "bucket2", {"b.txt": b"bbb"})
result = checker.run_now()
assert result.buckets_scanned == 2
assert result.objects_scanned >= 2
assert result.corrupted_objects == 0
class TestGetStatus:
def test_status_fields(self, checker):
status = checker.get_status()
assert "enabled" in status
assert "running" in status
assert "interval_hours" in status
assert "batch_size" in status
assert "auto_heal" in status
assert "dry_run" in status
def test_status_includes_cursor(self, storage_root, checker):
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
checker.run_now()
status = checker.get_status()
assert "cursor" in status
assert status["cursor"]["tracked_buckets"] == 1
assert "mybucket" in status["cursor"]["buckets"]
class TestUnifiedBatchCounter:
def test_orphaned_objects_count_toward_batch(self, storage_root):
_setup_bucket(storage_root, "mybucket", {})
for i in range(10):
(storage_root / "mybucket" / f"orphan{i}.txt").write_bytes(f"data{i}".encode())
checker = IntegrityChecker(storage_root=storage_root, batch_size=3)
result = checker.run_now()
assert result.objects_scanned <= 3
def test_phantom_metadata_counts_toward_batch(self, storage_root):
objects = {f"file{i}.txt": f"data{i}".encode() for i in range(10)}
_setup_bucket(storage_root, "mybucket", objects)
for i in range(10):
(storage_root / "mybucket" / f"file{i}.txt").unlink()
checker = IntegrityChecker(storage_root=storage_root, batch_size=5)
result = checker.run_now()
assert result.objects_scanned <= 5
def test_all_check_types_contribute(self, storage_root):
_setup_bucket(storage_root, "mybucket", {"valid.txt": b"hello"})
(storage_root / "mybucket" / "orphan.txt").write_bytes(b"orphan")
checker = IntegrityChecker(storage_root=storage_root, batch_size=1000)
result = checker.run_now()
assert result.objects_scanned > 2
class TestCursorRotation:
def test_oldest_bucket_scanned_first(self, storage_root):
_setup_bucket(storage_root, "bucket-a", {"a.txt": b"aaa"})
_setup_bucket(storage_root, "bucket-b", {"b.txt": b"bbb"})
_setup_bucket(storage_root, "bucket-c", {"c.txt": b"ccc"})
checker = IntegrityChecker(storage_root=storage_root, batch_size=5)
checker.cursor_store.update_bucket("bucket-a", 1000.0)
checker.cursor_store.update_bucket("bucket-b", 3000.0)
checker.cursor_store.update_bucket("bucket-c", 2000.0)
ordered = checker.cursor_store.get_bucket_order(["bucket-a", "bucket-b", "bucket-c"])
assert ordered[0] == "bucket-a"
assert ordered[1] == "bucket-c"
assert ordered[2] == "bucket-b"
def test_never_scanned_buckets_first(self, storage_root):
_setup_bucket(storage_root, "bucket-old", {"a.txt": b"aaa"})
_setup_bucket(storage_root, "bucket-new", {"b.txt": b"bbb"})
checker = IntegrityChecker(storage_root=storage_root, batch_size=1000)
checker.cursor_store.update_bucket("bucket-old", time.time())
ordered = checker.cursor_store.get_bucket_order(["bucket-old", "bucket-new"])
assert ordered[0] == "bucket-new"
def test_rotation_covers_all_buckets(self, storage_root):
for name in ["bucket-a", "bucket-b", "bucket-c"]:
_setup_bucket(storage_root, name, {f"{name}.txt": name.encode()})
checker = IntegrityChecker(storage_root=storage_root, batch_size=4)
result1 = checker.run_now()
scanned_buckets_1 = set()
for issue_bucket in [storage_root]:
pass
assert result1.buckets_scanned >= 1
result2 = checker.run_now()
result3 = checker.run_now()
cursor_info = checker.cursor_store.get_info()
assert cursor_info["tracked_buckets"] == 3
def test_cursor_persistence(self, storage_root):
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
checker1 = IntegrityChecker(storage_root=storage_root, batch_size=1000)
checker1.run_now()
cursor1 = checker1.cursor_store.get_info()
assert cursor1["tracked_buckets"] == 1
assert "mybucket" in cursor1["buckets"]
checker2 = IntegrityChecker(storage_root=storage_root, batch_size=1000)
cursor2 = checker2.cursor_store.get_info()
assert cursor2["tracked_buckets"] == 1
assert "mybucket" in cursor2["buckets"]
def test_stale_cursor_cleanup(self, storage_root):
_setup_bucket(storage_root, "bucket-a", {"a.txt": b"aaa"})
_setup_bucket(storage_root, "bucket-b", {"b.txt": b"bbb"})
checker = IntegrityChecker(storage_root=storage_root, batch_size=1000)
checker.run_now()
import shutil
shutil.rmtree(storage_root / "bucket-b")
meta_b = storage_root / ".myfsio.sys" / "buckets" / "bucket-b"
if meta_b.exists():
shutil.rmtree(meta_b)
checker.run_now()
cursor_info = checker.cursor_store.get_info()
assert "bucket-b" not in cursor_info["buckets"]
assert "bucket-a" in cursor_info["buckets"]
def test_cursor_updates_after_scan(self, storage_root):
_setup_bucket(storage_root, "mybucket", {"file.txt": b"hello"})
checker = IntegrityChecker(storage_root=storage_root, batch_size=1000)
before = time.time()
checker.run_now()
after = time.time()
cursor_info = checker.cursor_store.get_info()
ts = cursor_info["buckets"]["mybucket"]
assert before <= ts <= after