diff --git a/app/__init__.py b/app/__init__.py index c86e458..2a95281 100644 --- a/app/__init__.py +++ b/app/__init__.py @@ -105,6 +105,18 @@ def create_app( value /= 1024.0 return f"{value:.1f} PB" + @app.template_filter("timestamp_to_datetime") + def timestamp_to_datetime(value: float) -> str: + """Format Unix timestamp as human-readable datetime.""" + from datetime import datetime + if not value: + return "Never" + try: + dt = datetime.fromtimestamp(value) + return dt.strftime("%Y-%m-%d %H:%M:%S") + except (ValueError, OSError): + return "Unknown" + if include_api: from .s3_api import s3_api_bp diff --git a/app/config.py b/app/config.py index b5fa037..a5d3783 100644 --- a/app/config.py +++ b/app/config.py @@ -65,6 +65,7 @@ class AppConfig: secret_ttl_seconds: int stream_chunk_size: int multipart_min_part_size: int + bucket_stats_cache_ttl: int @classmethod def from_env(cls, overrides: Optional[Dict[str, Any]] = None) -> "AppConfig": @@ -85,8 +86,6 @@ class AppConfig: default_secret = "dev-secret-key" secret_key = str(_get("SECRET_KEY", default_secret)) - # If using default/missing secret, try to load/persist a generated one from disk - # This ensures consistency across Gunicorn workers if not secret_key or secret_key == default_secret: secret_file = storage_root / ".myfsio.sys" / "config" / ".secret" if secret_file.exists(): @@ -100,7 +99,6 @@ class AppConfig: secret_file.write_text(generated) secret_key = generated except OSError: - # Fallback if we can't write to disk (e.g. read-only fs) secret_key = generated iam_env_override = "IAM_CONFIG" in overrides or "IAM_CONFIG" in os.environ @@ -156,6 +154,7 @@ class AppConfig: "X-Amz-Signature", ]) session_lifetime_days = int(_get("SESSION_LIFETIME_DAYS", 30)) + bucket_stats_cache_ttl = int(_get("BUCKET_STATS_CACHE_TTL", 60)) # Default 60 seconds return cls(storage_root=storage_root, max_upload_size=max_upload_size, @@ -182,7 +181,8 @@ class AppConfig: bulk_delete_max_keys=bulk_delete_max_keys, secret_ttl_seconds=secret_ttl_seconds, stream_chunk_size=stream_chunk_size, - multipart_min_part_size=multipart_min_part_size) + multipart_min_part_size=multipart_min_part_size, + bucket_stats_cache_ttl=bucket_stats_cache_ttl) def to_flask_config(self) -> Dict[str, Any]: return { @@ -202,6 +202,7 @@ class AppConfig: "SECRET_TTL_SECONDS": self.secret_ttl_seconds, "STREAM_CHUNK_SIZE": self.stream_chunk_size, "MULTIPART_MIN_PART_SIZE": self.multipart_min_part_size, + "BUCKET_STATS_CACHE_TTL": self.bucket_stats_cache_ttl, "LOG_LEVEL": self.log_level, "LOG_FILE": str(self.log_path), "LOG_MAX_BYTES": self.log_max_bytes, diff --git a/app/errors.py b/app/errors.py new file mode 100644 index 0000000..23056f1 --- /dev/null +++ b/app/errors.py @@ -0,0 +1,167 @@ +"""Standardized error handling for API and UI responses.""" +from __future__ import annotations + +import logging +from dataclasses import dataclass, field +from typing import Optional, Dict, Any +from xml.etree.ElementTree import Element, SubElement, tostring + +from flask import Response, jsonify, request, flash, redirect, url_for, g + +logger = logging.getLogger(__name__) + + +@dataclass +class AppError(Exception): + """Base application error with multi-format response support.""" + code: str + message: str + status_code: int = 500 + details: Optional[Dict[str, Any]] = field(default=None) + + def __post_init__(self): + super().__init__(self.message) + + def to_xml_response(self) -> Response: + """Convert to S3 API XML error response.""" + error = Element("Error") + SubElement(error, "Code").text = self.code + SubElement(error, "Message").text = self.message + request_id = getattr(g, 'request_id', None) if g else None + SubElement(error, "RequestId").text = request_id or "unknown" + xml_bytes = tostring(error, encoding="utf-8") + return Response(xml_bytes, status=self.status_code, mimetype="application/xml") + + def to_json_response(self) -> tuple[Response, int]: + """Convert to JSON error response for UI AJAX calls.""" + payload: Dict[str, Any] = { + "success": False, + "error": { + "code": self.code, + "message": self.message + } + } + if self.details: + payload["error"]["details"] = self.details + return jsonify(payload), self.status_code + + def to_flash_message(self) -> str: + """Convert to user-friendly flash message.""" + return self.message + + +@dataclass +class BucketNotFoundError(AppError): + """Bucket does not exist.""" + code: str = "NoSuchBucket" + message: str = "The specified bucket does not exist" + status_code: int = 404 + + +@dataclass +class BucketAlreadyExistsError(AppError): + """Bucket already exists.""" + code: str = "BucketAlreadyExists" + message: str = "The requested bucket name is not available" + status_code: int = 409 + + +@dataclass +class BucketNotEmptyError(AppError): + """Bucket is not empty.""" + code: str = "BucketNotEmpty" + message: str = "The bucket you tried to delete is not empty" + status_code: int = 409 + + +@dataclass +class ObjectNotFoundError(AppError): + """Object does not exist.""" + code: str = "NoSuchKey" + message: str = "The specified key does not exist" + status_code: int = 404 + + +@dataclass +class InvalidObjectKeyError(AppError): + """Invalid object key.""" + code: str = "InvalidKey" + message: str = "The specified key is not valid" + status_code: int = 400 + + +@dataclass +class AccessDeniedError(AppError): + """Access denied.""" + code: str = "AccessDenied" + message: str = "Access Denied" + status_code: int = 403 + + +@dataclass +class InvalidCredentialsError(AppError): + """Invalid credentials.""" + code: str = "InvalidAccessKeyId" + message: str = "The access key ID you provided does not exist" + status_code: int = 403 + +@dataclass +class MalformedRequestError(AppError): + """Malformed request.""" + code: str = "MalformedXML" + message: str = "The XML you provided was not well-formed" + status_code: int = 400 + + +@dataclass +class InvalidArgumentError(AppError): + """Invalid argument.""" + code: str = "InvalidArgument" + message: str = "Invalid argument" + status_code: int = 400 + + +@dataclass +class EntityTooLargeError(AppError): + """Entity too large.""" + code: str = "EntityTooLarge" + message: str = "Your proposed upload exceeds the maximum allowed size" + status_code: int = 413 + + +def handle_app_error(error: AppError) -> Response: + """Handle application errors with appropriate response format.""" + log_extra = {"error_code": error.code} + if error.details: + log_extra["details"] = error.details + + logger.error(f"{error.code}: {error.message}", extra=log_extra) + + if request.path.startswith('/ui'): + wants_json = ( + request.is_json or + request.headers.get('X-Requested-With') == 'XMLHttpRequest' or + 'application/json' in request.accept_mimetypes.values() + ) + if wants_json: + return error.to_json_response() + flash(error.to_flash_message(), 'danger') + referrer = request.referrer + if referrer and request.host in referrer: + return redirect(referrer) + return redirect(url_for('ui.buckets_overview')) + else: + return error.to_xml_response() + + +def register_error_handlers(app): + """Register error handlers with a Flask app.""" + app.register_error_handler(AppError, handle_app_error) + + for error_class in [ + BucketNotFoundError, BucketAlreadyExistsError, BucketNotEmptyError, + ObjectNotFoundError, InvalidObjectKeyError, + AccessDeniedError, InvalidCredentialsError, + MalformedRequestError, InvalidArgumentError, EntityTooLargeError, + ]: + app.register_error_handler(error_class, handle_app_error) diff --git a/app/extensions.py b/app/extensions.py index 1f8b71a..0fc97a6 100644 --- a/app/extensions.py +++ b/app/extensions.py @@ -1,10 +1,17 @@ """Application-wide extension instances.""" +from flask import g from flask_limiter import Limiter from flask_limiter.util import get_remote_address from flask_wtf import CSRFProtect +def get_rate_limit_key(): + """Generate rate limit key based on authenticated user.""" + if hasattr(g, 'principal') and g.principal: + return g.principal.access_key + return get_remote_address() + # Shared rate limiter instance; configured in app factory. -limiter = Limiter(key_func=get_remote_address) +limiter = Limiter(key_func=get_rate_limit_key) # Global CSRF protection for UI routes. csrf = CSRFProtect() diff --git a/app/iam.py b/app/iam.py index 3ab64c2..1fe8d28 100644 --- a/app/iam.py +++ b/app/iam.py @@ -409,9 +409,11 @@ class IamService: raise IamError("User not found") def get_secret_key(self, access_key: str) -> str | None: + self._maybe_reload() record = self._users.get(access_key) return record["secret_key"] if record else None def get_principal(self, access_key: str) -> Principal | None: + self._maybe_reload() record = self._users.get(access_key) return self._build_principal(access_key, record) if record else None diff --git a/app/replication.py b/app/replication.py index c604727..6c30a7b 100644 --- a/app/replication.py +++ b/app/replication.py @@ -1,11 +1,13 @@ """Background replication worker.""" from __future__ import annotations +import json import logging import mimetypes import threading +import time from concurrent.futures import ThreadPoolExecutor -from dataclasses import dataclass +from dataclasses import dataclass, field from pathlib import Path from typing import Dict, Optional @@ -21,6 +23,41 @@ logger = logging.getLogger(__name__) REPLICATION_USER_AGENT = "S3ReplicationAgent/1.0" +REPLICATION_MODE_NEW_ONLY = "new_only" +REPLICATION_MODE_ALL = "all" + + +@dataclass +class ReplicationStats: + """Statistics for replication operations - computed dynamically.""" + objects_synced: int = 0 # Objects that exist in both source and destination + objects_pending: int = 0 # Objects in source but not in destination + objects_orphaned: int = 0 # Objects in destination but not in source (will be deleted) + bytes_synced: int = 0 # Total bytes synced to destination + last_sync_at: Optional[float] = None + last_sync_key: Optional[str] = None + + def to_dict(self) -> dict: + return { + "objects_synced": self.objects_synced, + "objects_pending": self.objects_pending, + "objects_orphaned": self.objects_orphaned, + "bytes_synced": self.bytes_synced, + "last_sync_at": self.last_sync_at, + "last_sync_key": self.last_sync_key, + } + + @classmethod + def from_dict(cls, data: dict) -> "ReplicationStats": + return cls( + objects_synced=data.get("objects_synced", 0), + objects_pending=data.get("objects_pending", 0), + objects_orphaned=data.get("objects_orphaned", 0), + bytes_synced=data.get("bytes_synced", 0), + last_sync_at=data.get("last_sync_at"), + last_sync_key=data.get("last_sync_key"), + ) + @dataclass class ReplicationRule: @@ -28,6 +65,32 @@ class ReplicationRule: target_connection_id: str target_bucket: str enabled: bool = True + mode: str = REPLICATION_MODE_NEW_ONLY + created_at: Optional[float] = None + stats: ReplicationStats = field(default_factory=ReplicationStats) + + def to_dict(self) -> dict: + return { + "bucket_name": self.bucket_name, + "target_connection_id": self.target_connection_id, + "target_bucket": self.target_bucket, + "enabled": self.enabled, + "mode": self.mode, + "created_at": self.created_at, + "stats": self.stats.to_dict(), + } + + @classmethod + def from_dict(cls, data: dict) -> "ReplicationRule": + stats_data = data.pop("stats", {}) + # Handle old rules without mode/created_at + if "mode" not in data: + data["mode"] = REPLICATION_MODE_NEW_ONLY + if "created_at" not in data: + data["created_at"] = None + rule = cls(**data) + rule.stats = ReplicationStats.from_dict(stats_data) if stats_data else ReplicationStats() + return rule class ReplicationManager: @@ -36,6 +99,7 @@ class ReplicationManager: self.connections = connections self.rules_path = rules_path self._rules: Dict[str, ReplicationRule] = {} + self._stats_lock = threading.Lock() self._executor = ThreadPoolExecutor(max_workers=4, thread_name_prefix="ReplicationWorker") self.reload_rules() @@ -44,17 +108,15 @@ class ReplicationManager: self._rules = {} return try: - import json with open(self.rules_path, "r") as f: data = json.load(f) for bucket, rule_data in data.items(): - self._rules[bucket] = ReplicationRule(**rule_data) + self._rules[bucket] = ReplicationRule.from_dict(rule_data) except (OSError, ValueError) as e: logger.error(f"Failed to load replication rules: {e}") def save_rules(self) -> None: - import json - data = {b: rule.__dict__ for b, rule in self._rules.items()} + data = {b: rule.to_dict() for b, rule in self._rules.items()} self.rules_path.parent.mkdir(parents=True, exist_ok=True) with open(self.rules_path, "w") as f: json.dump(data, f, indent=2) @@ -70,6 +132,99 @@ class ReplicationManager: if bucket_name in self._rules: del self._rules[bucket_name] self.save_rules() + + def _update_last_sync(self, bucket_name: str, object_key: str = "") -> None: + """Update last sync timestamp after a successful operation.""" + with self._stats_lock: + rule = self._rules.get(bucket_name) + if not rule: + return + rule.stats.last_sync_at = time.time() + rule.stats.last_sync_key = object_key + self.save_rules() + + def get_sync_status(self, bucket_name: str) -> Optional[ReplicationStats]: + """Dynamically compute replication status by comparing source and destination buckets.""" + rule = self.get_rule(bucket_name) + if not rule: + return None + + connection = self.connections.get(rule.target_connection_id) + if not connection: + return rule.stats # Return cached stats if connection unavailable + + try: + # Get source objects + source_objects = self.storage.list_objects(bucket_name) + source_keys = {obj.key: obj.size for obj in source_objects} + + # Get destination objects + s3 = boto3.client( + "s3", + endpoint_url=connection.endpoint_url, + aws_access_key_id=connection.access_key, + aws_secret_access_key=connection.secret_key, + region_name=connection.region, + ) + + dest_keys = set() + bytes_synced = 0 + paginator = s3.get_paginator('list_objects_v2') + try: + for page in paginator.paginate(Bucket=rule.target_bucket): + for obj in page.get('Contents', []): + dest_keys.add(obj['Key']) + if obj['Key'] in source_keys: + bytes_synced += obj.get('Size', 0) + except ClientError as e: + if e.response['Error']['Code'] == 'NoSuchBucket': + # Destination bucket doesn't exist yet + dest_keys = set() + else: + raise + + # Compute stats + synced = source_keys.keys() & dest_keys # Objects in both + orphaned = dest_keys - source_keys.keys() # In dest but not source + + # For "new_only" mode, we can't determine pending since we don't know + # which objects existed before replication was enabled. Only "all" mode + # should show pending (objects that should be replicated but aren't yet). + if rule.mode == REPLICATION_MODE_ALL: + pending = source_keys.keys() - dest_keys # In source but not dest + else: + pending = set() # New-only mode: don't show pre-existing as pending + + # Update cached stats with computed values + rule.stats.objects_synced = len(synced) + rule.stats.objects_pending = len(pending) + rule.stats.objects_orphaned = len(orphaned) + rule.stats.bytes_synced = bytes_synced + + return rule.stats + + except (ClientError, StorageError) as e: + logger.error(f"Failed to compute sync status for {bucket_name}: {e}") + return rule.stats # Return cached stats on error + + def replicate_existing_objects(self, bucket_name: str) -> None: + """Trigger replication for all existing objects in a bucket.""" + rule = self.get_rule(bucket_name) + if not rule or not rule.enabled: + return + + connection = self.connections.get(rule.target_connection_id) + if not connection: + logger.warning(f"Cannot replicate existing objects: Connection {rule.target_connection_id} not found") + return + + try: + objects = self.storage.list_objects(bucket_name) + logger.info(f"Starting replication of {len(objects)} existing objects from {bucket_name}") + for obj in objects: + self._executor.submit(self._replicate_task, bucket_name, obj.key, rule, connection, "write") + except StorageError as e: + logger.error(f"Failed to list objects for replication: {e}") def create_remote_bucket(self, connection_id: str, bucket_name: str) -> None: """Create a bucket on the remote connection.""" @@ -103,8 +258,19 @@ class ReplicationManager: self._executor.submit(self._replicate_task, bucket_name, object_key, rule, connection, action) def _replicate_task(self, bucket_name: str, object_key: str, rule: ReplicationRule, conn: RemoteConnection, action: str) -> None: + if ".." in object_key or object_key.startswith("/") or object_key.startswith("\\"): + logger.error(f"Invalid object key in replication (path traversal attempt): {object_key}") + return + + try: + from .storage import ObjectStorage + ObjectStorage._sanitize_object_key(object_key) + except StorageError as e: + logger.error(f"Object key validation failed in replication: {e}") + return + + file_size = 0 try: - # Using boto3 to upload config = Config(user_agent_extra=REPLICATION_USER_AGENT) s3 = boto3.client( "s3", @@ -119,21 +285,15 @@ class ReplicationManager: try: s3.delete_object(Bucket=rule.target_bucket, Key=object_key) logger.info(f"Replicated DELETE {bucket_name}/{object_key} to {conn.name} ({rule.target_bucket})") + self._update_last_sync(bucket_name, object_key) except ClientError as e: logger.error(f"Replication DELETE failed for {bucket_name}/{object_key}: {e}") return - # 1. Get local file path - # Note: We are accessing internal storage structure here. - # Ideally storage.py should expose a 'get_file_path' or we read the stream. - # For efficiency, we'll try to read the file directly if we can, or use storage.get_object - - # We need the file content. - # Since ObjectStorage is filesystem based, let's get the stream. - # We need to be careful about closing it. try: path = self.storage.get_object_path(bucket_name, object_key) except StorageError: + logger.error(f"Source object not found: {bucket_name}/{object_key}") return metadata = self.storage.get_object_metadata(bucket_name, object_key) @@ -159,7 +319,6 @@ class ReplicationManager: Metadata=metadata or {} ) except (ClientError, S3UploadFailedError) as e: - # Check if it's a NoSuchBucket error (either direct or wrapped) is_no_bucket = False if isinstance(e, ClientError): if e.response['Error']['Code'] == 'NoSuchBucket': @@ -189,6 +348,7 @@ class ReplicationManager: raise e logger.info(f"Replicated {bucket_name}/{object_key} to {conn.name} ({rule.target_bucket})") + self._update_last_sync(bucket_name, object_key) except (ClientError, OSError, ValueError) as e: logger.error(f"Replication failed for {bucket_name}/{object_key}: {e}") diff --git a/app/s3_api.py b/app/s3_api.py index b080346..c424f48 100644 --- a/app/s3_api.py +++ b/app/s3_api.py @@ -11,7 +11,7 @@ from typing import Any, Dict from urllib.parse import quote, urlencode, urlparse from xml.etree.ElementTree import Element, SubElement, tostring, fromstring, ParseError -from flask import Blueprint, Response, current_app, jsonify, request +from flask import Blueprint, Response, current_app, jsonify, request, g from werkzeug.http import http_date from .bucket_policies import BucketPolicyStore @@ -127,14 +127,33 @@ def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None: if not amz_date: raise IamError("Missing Date header") + try: + request_time = datetime.strptime(amz_date, "%Y%m%dT%H%M%SZ").replace(tzinfo=timezone.utc) + except ValueError: + raise IamError("Invalid X-Amz-Date format") + + now = datetime.now(timezone.utc) + time_diff = abs((now - request_time).total_seconds()) + if time_diff > 900: # 15 minutes + raise IamError("Request timestamp too old or too far in the future") + + required_headers = {'host', 'x-amz-date'} + signed_headers_set = set(signed_headers_str.split(';')) + if not required_headers.issubset(signed_headers_set): + # Some clients might sign 'date' instead of 'x-amz-date' + if 'date' in signed_headers_set: + required_headers.remove('x-amz-date') + required_headers.add('date') + + if not required_headers.issubset(signed_headers_set): + raise IamError("Required headers not signed") + credential_scope = f"{date_stamp}/{region}/{service}/aws4_request" string_to_sign = f"AWS4-HMAC-SHA256\n{amz_date}\n{credential_scope}\n{hashlib.sha256(canonical_request.encode('utf-8')).hexdigest()}" - - # Calculate Signature signing_key = _get_signature_key(secret_key, date_stamp, region, service) calculated_signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest() - if calculated_signature != signature: + if not hmac.compare_digest(calculated_signature, signature): raise IamError("SignatureDoesNotMatch") return _iam().get_principal(access_key) @@ -155,7 +174,6 @@ def _verify_sigv4_query(req: Any) -> Principal | None: except ValueError: raise IamError("Invalid Credential format") - # Check expiration try: req_time = datetime.strptime(amz_date, "%Y%m%dT%H%M%SZ").replace(tzinfo=timezone.utc) except ValueError: @@ -190,7 +208,6 @@ def _verify_sigv4_query(req: Any) -> Principal | None: canonical_headers_parts = [] for header in signed_headers_list: val = req.headers.get(header, "").strip() - # Collapse multiple spaces val = " ".join(val.split()) canonical_headers_parts.append(f"{header}:{val}\n") canonical_headers = "".join(canonical_headers_parts) @@ -240,7 +257,6 @@ def _verify_sigv4(req: Any) -> Principal | None: def _require_principal(): - # Try SigV4 first if ("Authorization" in request.headers and request.headers["Authorization"].startswith("AWS4-HMAC-SHA256")) or \ (request.args.get("X-Amz-Algorithm") == "AWS4-HMAC-SHA256"): try: @@ -568,6 +584,73 @@ def _render_tagging_document(tags: list[dict[str, str]]) -> Element: SubElement(tag_el, "Value").text = tag.get("Value", "") return root +DANGEROUS_CONTENT_TYPES = frozenset([ + "text/html", + "application/xhtml+xml", + "application/javascript", + "text/javascript", + "application/x-javascript", + "text/ecmascript", + "application/ecmascript", + "image/svg+xml", +]) + +SAFE_EXTENSION_MAP = { + ".txt": ["text/plain"], + ".json": ["application/json"], + ".xml": ["application/xml", "text/xml"], + ".csv": ["text/csv"], + ".pdf": ["application/pdf"], + ".png": ["image/png"], + ".jpg": ["image/jpeg"], + ".jpeg": ["image/jpeg"], + ".gif": ["image/gif"], + ".webp": ["image/webp"], + ".mp4": ["video/mp4"], + ".mp3": ["audio/mpeg"], + ".zip": ["application/zip"], + ".gz": ["application/gzip"], + ".tar": ["application/x-tar"], +} + + +def _validate_content_type(object_key: str, content_type: str | None) -> str | None: + """Validate Content-Type header for security. + + Returns an error message if validation fails, None otherwise. + + Rules: + 1. Block dangerous MIME types that can execute scripts (unless explicitly allowed) + 2. Warn if Content-Type doesn't match file extension (but don't block) + """ + if not content_type: + return None + + base_type = content_type.split(";")[0].strip().lower() + + if base_type in DANGEROUS_CONTENT_TYPES: + ext = "." + object_key.rsplit(".", 1)[-1].lower() if "." in object_key else "" + + allowed_dangerous = { + ".svg": "image/svg+xml", + ".html": "text/html", + ".htm": "text/html", + ".xhtml": "application/xhtml+xml", + ".js": "application/javascript", + ".mjs": "application/javascript", + } + + if ext in allowed_dangerous and base_type == allowed_dangerous[ext]: + return None + + return ( + f"Content-Type '{content_type}' is potentially dangerous and not allowed " + f"for object key '{object_key}'. Use a safe Content-Type or rename the file " + f"with an appropriate extension." + ) + + return None + def _parse_cors_document(payload: bytes) -> list[dict[str, Any]]: try: @@ -715,6 +798,10 @@ def _maybe_handle_bucket_subresource(bucket_name: str) -> Response | None: "tagging": _bucket_tagging_handler, "cors": _bucket_cors_handler, "encryption": _bucket_encryption_handler, + "location": _bucket_location_handler, + "acl": _bucket_acl_handler, + "versions": _bucket_list_versions_handler, + "lifecycle": _bucket_lifecycle_handler, } requested = [key for key in handlers if key in request.args] if not requested: @@ -730,8 +817,8 @@ def _maybe_handle_bucket_subresource(bucket_name: str) -> Response | None: def _bucket_versioning_handler(bucket_name: str) -> Response: - if request.method != "GET": - return _method_not_allowed(["GET"]) + if request.method not in {"GET", "PUT"}: + return _method_not_allowed(["GET", "PUT"]) principal, error = _require_principal() if error: return error @@ -740,6 +827,31 @@ def _bucket_versioning_handler(bucket_name: str) -> Response: except IamError as exc: return _error_response("AccessDenied", str(exc), 403) storage = _storage() + + if request.method == "PUT": + payload = request.get_data(cache=False) or b"" + if not payload.strip(): + return _error_response("MalformedXML", "Request body is required", 400) + try: + root = fromstring(payload) + except ParseError: + return _error_response("MalformedXML", "Unable to parse XML document", 400) + if _strip_ns(root.tag) != "VersioningConfiguration": + return _error_response("MalformedXML", "Root element must be VersioningConfiguration", 400) + status_el = root.find("{*}Status") + if status_el is None: + status_el = root.find("Status") + status = (status_el.text or "").strip() if status_el is not None else "" + if status not in {"Enabled", "Suspended", ""}: + return _error_response("MalformedXML", "Status must be Enabled or Suspended", 400) + try: + storage.set_bucket_versioning(bucket_name, status == "Enabled") + except StorageError as exc: + return _error_response("NoSuchBucket", str(exc), 404) + current_app.logger.info("Bucket versioning updated", extra={"bucket": bucket_name, "status": status}) + return Response(status=200) + + # GET try: enabled = storage.is_versioning_enabled(bucket_name) except StorageError as exc: @@ -750,8 +862,8 @@ def _bucket_versioning_handler(bucket_name: str) -> Response: def _bucket_tagging_handler(bucket_name: str) -> Response: - if request.method not in {"GET", "PUT"}: - return _method_not_allowed(["GET", "PUT"]) + if request.method not in {"GET", "PUT", "DELETE"}: + return _method_not_allowed(["GET", "PUT", "DELETE"]) principal, error = _require_principal() if error: return error @@ -768,6 +880,14 @@ def _bucket_tagging_handler(bucket_name: str) -> Response: if not tags: return _error_response("NoSuchTagSet", "No tags are configured for this bucket", 404) return _xml_response(_render_tagging_document(tags)) + if request.method == "DELETE": + try: + storage.set_bucket_tags(bucket_name, None) + except StorageError as exc: + return _error_response("NoSuchBucket", str(exc), 404) + current_app.logger.info("Bucket tags deleted", extra={"bucket": bucket_name}) + return Response(status=204) + # PUT payload = request.get_data(cache=False) or b"" try: tags = _parse_tagging_document(payload) @@ -783,6 +903,64 @@ def _bucket_tagging_handler(bucket_name: str) -> Response: return Response(status=204) +def _object_tagging_handler(bucket_name: str, object_key: str) -> Response: + """Handle object tagging operations (GET/PUT/DELETE //?tagging).""" + if request.method not in {"GET", "PUT", "DELETE"}: + return _method_not_allowed(["GET", "PUT", "DELETE"]) + + principal, error = _require_principal() + if error: + return error + + # For tagging, we use read permission for GET, write for PUT/DELETE + action = "read" if request.method == "GET" else "write" + try: + _authorize_action(principal, bucket_name, action, object_key=object_key) + except IamError as exc: + return _error_response("AccessDenied", str(exc), 403) + + storage = _storage() + + if request.method == "GET": + try: + tags = storage.get_object_tags(bucket_name, object_key) + except StorageError as exc: + message = str(exc) + if "Bucket" in message: + return _error_response("NoSuchBucket", message, 404) + return _error_response("NoSuchKey", message, 404) + return _xml_response(_render_tagging_document(tags)) + + if request.method == "DELETE": + try: + storage.delete_object_tags(bucket_name, object_key) + except StorageError as exc: + message = str(exc) + if "Bucket" in message: + return _error_response("NoSuchBucket", message, 404) + return _error_response("NoSuchKey", message, 404) + current_app.logger.info("Object tags deleted", extra={"bucket": bucket_name, "key": object_key}) + return Response(status=204) + + # PUT + payload = request.get_data(cache=False) or b"" + try: + tags = _parse_tagging_document(payload) + except ValueError as exc: + return _error_response("MalformedXML", str(exc), 400) + if len(tags) > 10: + return _error_response("InvalidTag", "A maximum of 10 tags is supported for objects", 400) + try: + storage.set_object_tags(bucket_name, object_key, tags) + except StorageError as exc: + message = str(exc) + if "Bucket" in message: + return _error_response("NoSuchBucket", message, 404) + return _error_response("NoSuchKey", message, 404) + current_app.logger.info("Object tags updated", extra={"bucket": bucket_name, "key": object_key, "tags": len(tags)}) + return Response(status=204) + + def _sanitize_cors_rules(rules: list[dict[str, Any]]) -> list[dict[str, Any]]: sanitized: list[dict[str, Any]] = [] for rule in rules: @@ -807,8 +985,8 @@ def _sanitize_cors_rules(rules: list[dict[str, Any]]) -> list[dict[str, Any]]: def _bucket_cors_handler(bucket_name: str) -> Response: - if request.method not in {"GET", "PUT"}: - return _method_not_allowed(["GET", "PUT"]) + if request.method not in {"GET", "PUT", "DELETE"}: + return _method_not_allowed(["GET", "PUT", "DELETE"]) principal, error = _require_principal() if error: return error @@ -825,6 +1003,14 @@ def _bucket_cors_handler(bucket_name: str) -> Response: if not rules: return _error_response("NoSuchCORSConfiguration", "No CORS configuration found", 404) return _xml_response(_render_cors_document(rules)) + if request.method == "DELETE": + try: + storage.set_bucket_cors(bucket_name, None) + except StorageError as exc: + return _error_response("NoSuchBucket", str(exc), 404) + current_app.logger.info("Bucket CORS deleted", extra={"bucket": bucket_name}) + return Response(status=204) + # PUT payload = request.get_data(cache=False) or b"" if not payload.strip(): try: @@ -891,6 +1077,328 @@ def _bucket_encryption_handler(bucket_name: str) -> Response: return Response(status=204) +def _bucket_location_handler(bucket_name: str) -> Response: + if request.method != "GET": + return _method_not_allowed(["GET"]) + principal, error = _require_principal() + if error: + return error + try: + _authorize_action(principal, bucket_name, "list") + except IamError as exc: + return _error_response("AccessDenied", str(exc), 403) + storage = _storage() + if not storage.bucket_exists(bucket_name): + return _error_response("NoSuchBucket", "Bucket does not exist", 404) + + # Return the configured AWS_REGION + region = current_app.config.get("AWS_REGION", "us-east-1") + root = Element("LocationConstraint") + # AWS returns empty for us-east-1, but we'll be explicit + root.text = region if region != "us-east-1" else None + return _xml_response(root) + + +def _bucket_acl_handler(bucket_name: str) -> Response: + if request.method not in {"GET", "PUT"}: + return _method_not_allowed(["GET", "PUT"]) + principal, error = _require_principal() + if error: + return error + try: + _authorize_action(principal, bucket_name, "policy") + except IamError as exc: + return _error_response("AccessDenied", str(exc), 403) + storage = _storage() + if not storage.bucket_exists(bucket_name): + return _error_response("NoSuchBucket", "Bucket does not exist", 404) + + if request.method == "PUT": + # We don't fully implement ACLs, but we accept the request for compatibility + # Check for canned ACL header + canned_acl = request.headers.get("x-amz-acl", "private") + current_app.logger.info("Bucket ACL set (canned)", extra={"bucket": bucket_name, "acl": canned_acl}) + return Response(status=200) + + # GET - Return a basic ACL document showing full control for owner + root = Element("AccessControlPolicy") + owner = SubElement(root, "Owner") + SubElement(owner, "ID").text = principal.access_key if principal else "anonymous" + SubElement(owner, "DisplayName").text = principal.display_name if principal else "Anonymous" + + acl = SubElement(root, "AccessControlList") + grant = SubElement(acl, "Grant") + grantee = SubElement(grant, "Grantee") + grantee.set("{http://www.w3.org/2001/XMLSchema-instance}type", "CanonicalUser") + SubElement(grantee, "ID").text = principal.access_key if principal else "anonymous" + SubElement(grantee, "DisplayName").text = principal.display_name if principal else "Anonymous" + SubElement(grant, "Permission").text = "FULL_CONTROL" + + return _xml_response(root) + + +def _bucket_list_versions_handler(bucket_name: str) -> Response: + """Handle ListObjectVersions (GET /?versions).""" + if request.method != "GET": + return _method_not_allowed(["GET"]) + + principal, error = _require_principal() + try: + _authorize_action(principal, bucket_name, "list") + except IamError as exc: + if error: + return error + return _error_response("AccessDenied", str(exc), 403) + + storage = _storage() + + try: + objects = storage.list_objects(bucket_name) + except StorageError as exc: + return _error_response("NoSuchBucket", str(exc), 404) + + prefix = request.args.get("prefix", "") + delimiter = request.args.get("delimiter", "") + max_keys = min(int(request.args.get("max-keys", 1000)), 1000) + key_marker = request.args.get("key-marker", "") + + if prefix: + objects = [obj for obj in objects if obj.key.startswith(prefix)] + + if key_marker: + objects = [obj for obj in objects if obj.key > key_marker] + + # Build XML response + root = Element("ListVersionsResult", xmlns="http://s3.amazonaws.com/doc/2006-03-01/") + SubElement(root, "Name").text = bucket_name + SubElement(root, "Prefix").text = prefix + SubElement(root, "KeyMarker").text = key_marker + SubElement(root, "MaxKeys").text = str(max_keys) + if delimiter: + SubElement(root, "Delimiter").text = delimiter + + version_count = 0 + is_truncated = False + next_key_marker = "" + + for obj in objects: + if version_count >= max_keys: + is_truncated = True + break + + # Current version + version = SubElement(root, "Version") + SubElement(version, "Key").text = obj.key + SubElement(version, "VersionId").text = "null" # Current version ID + SubElement(version, "IsLatest").text = "true" + SubElement(version, "LastModified").text = obj.last_modified.strftime("%Y-%m-%dT%H:%M:%S.000Z") + SubElement(version, "ETag").text = f'"{obj.etag}"' + SubElement(version, "Size").text = str(obj.size) + SubElement(version, "StorageClass").text = "STANDARD" + + owner = SubElement(version, "Owner") + SubElement(owner, "ID").text = "local-owner" + SubElement(owner, "DisplayName").text = "Local Owner" + + version_count += 1 + next_key_marker = obj.key + + # Get historical versions + try: + versions = storage.list_object_versions(bucket_name, obj.key) + for v in versions: + if version_count >= max_keys: + is_truncated = True + break + + ver_elem = SubElement(root, "Version") + SubElement(ver_elem, "Key").text = obj.key + SubElement(ver_elem, "VersionId").text = v.get("version_id", "unknown") + SubElement(ver_elem, "IsLatest").text = "false" + SubElement(ver_elem, "LastModified").text = v.get("archived_at", "") + SubElement(ver_elem, "ETag").text = f'"{v.get("etag", "")}"' + SubElement(ver_elem, "Size").text = str(v.get("size", 0)) + SubElement(ver_elem, "StorageClass").text = "STANDARD" + + owner = SubElement(ver_elem, "Owner") + SubElement(owner, "ID").text = "local-owner" + SubElement(owner, "DisplayName").text = "Local Owner" + + version_count += 1 + except StorageError: + pass + + SubElement(root, "IsTruncated").text = "true" if is_truncated else "false" + if is_truncated and next_key_marker: + SubElement(root, "NextKeyMarker").text = next_key_marker + + return _xml_response(root) + + +def _bucket_lifecycle_handler(bucket_name: str) -> Response: + """Handle bucket lifecycle configuration (GET/PUT/DELETE /?lifecycle).""" + if request.method not in {"GET", "PUT", "DELETE"}: + return _method_not_allowed(["GET", "PUT", "DELETE"]) + + principal, error = _require_principal() + if error: + return error + try: + _authorize_action(principal, bucket_name, "policy") + except IamError as exc: + return _error_response("AccessDenied", str(exc), 403) + + storage = _storage() + + if not storage.bucket_exists(bucket_name): + return _error_response("NoSuchBucket", "Bucket does not exist", 404) + + if request.method == "GET": + config = storage.get_bucket_lifecycle(bucket_name) + if not config: + return _error_response("NoSuchLifecycleConfiguration", "The lifecycle configuration does not exist", 404) + return _xml_response(_render_lifecycle_config(config)) + + if request.method == "DELETE": + storage.set_bucket_lifecycle(bucket_name, None) + current_app.logger.info("Bucket lifecycle deleted", extra={"bucket": bucket_name}) + return Response(status=204) + + # PUT + payload = request.get_data(cache=False) or b"" + if not payload.strip(): + return _error_response("MalformedXML", "Request body is required", 400) + try: + config = _parse_lifecycle_config(payload) + storage.set_bucket_lifecycle(bucket_name, config) + except ValueError as exc: + return _error_response("MalformedXML", str(exc), 400) + except StorageError as exc: + return _error_response("NoSuchBucket", str(exc), 404) + + current_app.logger.info("Bucket lifecycle updated", extra={"bucket": bucket_name}) + return Response(status=200) + + +def _render_lifecycle_config(config: list) -> Element: + """Render lifecycle configuration to XML.""" + root = Element("LifecycleConfiguration", xmlns="http://s3.amazonaws.com/doc/2006-03-01/") + for rule in config: + rule_el = SubElement(root, "Rule") + SubElement(rule_el, "ID").text = rule.get("ID", "") + + # Filter + filter_el = SubElement(rule_el, "Filter") + if rule.get("Prefix"): + SubElement(filter_el, "Prefix").text = rule.get("Prefix", "") + + SubElement(rule_el, "Status").text = rule.get("Status", "Enabled") + + # Expiration + if "Expiration" in rule: + exp = rule["Expiration"] + exp_el = SubElement(rule_el, "Expiration") + if "Days" in exp: + SubElement(exp_el, "Days").text = str(exp["Days"]) + if "Date" in exp: + SubElement(exp_el, "Date").text = exp["Date"] + if exp.get("ExpiredObjectDeleteMarker"): + SubElement(exp_el, "ExpiredObjectDeleteMarker").text = "true" + + # NoncurrentVersionExpiration + if "NoncurrentVersionExpiration" in rule: + nve = rule["NoncurrentVersionExpiration"] + nve_el = SubElement(rule_el, "NoncurrentVersionExpiration") + if "NoncurrentDays" in nve: + SubElement(nve_el, "NoncurrentDays").text = str(nve["NoncurrentDays"]) + + # AbortIncompleteMultipartUpload + if "AbortIncompleteMultipartUpload" in rule: + aimu = rule["AbortIncompleteMultipartUpload"] + aimu_el = SubElement(rule_el, "AbortIncompleteMultipartUpload") + if "DaysAfterInitiation" in aimu: + SubElement(aimu_el, "DaysAfterInitiation").text = str(aimu["DaysAfterInitiation"]) + + return root + + +def _parse_lifecycle_config(payload: bytes) -> list: + """Parse lifecycle configuration from XML.""" + try: + root = fromstring(payload) + except ParseError as exc: + raise ValueError(f"Unable to parse XML document: {exc}") from exc + + if _strip_ns(root.tag) != "LifecycleConfiguration": + raise ValueError("Root element must be LifecycleConfiguration") + + rules = [] + for rule_el in root.findall("{*}Rule") or root.findall("Rule"): + rule: dict = {} + + # ID + id_el = rule_el.find("{*}ID") or rule_el.find("ID") + if id_el is not None and id_el.text: + rule["ID"] = id_el.text.strip() + + # Filter/Prefix + filter_el = rule_el.find("{*}Filter") or rule_el.find("Filter") + if filter_el is not None: + prefix_el = filter_el.find("{*}Prefix") or filter_el.find("Prefix") + if prefix_el is not None and prefix_el.text: + rule["Prefix"] = prefix_el.text + + # Legacy Prefix (outside Filter) + if "Prefix" not in rule: + prefix_el = rule_el.find("{*}Prefix") or rule_el.find("Prefix") + if prefix_el is not None: + rule["Prefix"] = prefix_el.text or "" + + # Status + status_el = rule_el.find("{*}Status") or rule_el.find("Status") + rule["Status"] = (status_el.text or "Enabled").strip() if status_el is not None else "Enabled" + + # Expiration + exp_el = rule_el.find("{*}Expiration") or rule_el.find("Expiration") + if exp_el is not None: + expiration: dict = {} + days_el = exp_el.find("{*}Days") or exp_el.find("Days") + if days_el is not None and days_el.text: + expiration["Days"] = int(days_el.text.strip()) + date_el = exp_el.find("{*}Date") or exp_el.find("Date") + if date_el is not None and date_el.text: + expiration["Date"] = date_el.text.strip() + eodm_el = exp_el.find("{*}ExpiredObjectDeleteMarker") or exp_el.find("ExpiredObjectDeleteMarker") + if eodm_el is not None and (eodm_el.text or "").strip().lower() in {"true", "1"}: + expiration["ExpiredObjectDeleteMarker"] = True + if expiration: + rule["Expiration"] = expiration + + # NoncurrentVersionExpiration + nve_el = rule_el.find("{*}NoncurrentVersionExpiration") or rule_el.find("NoncurrentVersionExpiration") + if nve_el is not None: + nve: dict = {} + days_el = nve_el.find("{*}NoncurrentDays") or nve_el.find("NoncurrentDays") + if days_el is not None and days_el.text: + nve["NoncurrentDays"] = int(days_el.text.strip()) + if nve: + rule["NoncurrentVersionExpiration"] = nve + + # AbortIncompleteMultipartUpload + aimu_el = rule_el.find("{*}AbortIncompleteMultipartUpload") or rule_el.find("AbortIncompleteMultipartUpload") + if aimu_el is not None: + aimu: dict = {} + days_el = aimu_el.find("{*}DaysAfterInitiation") or aimu_el.find("DaysAfterInitiation") + if days_el is not None and days_el.text: + aimu["DaysAfterInitiation"] = int(days_el.text.strip()) + if aimu: + rule["AbortIncompleteMultipartUpload"] = aimu + + rules.append(rule) + + return rules + + def _bulk_delete_handler(bucket_name: str) -> Response: principal, error = _require_principal() if error: @@ -1051,7 +1559,7 @@ def bucket_handler(bucket_name: str) -> Response: current_app.logger.info("Bucket deleted", extra={"bucket": bucket_name}) return Response(status=204) - # GET - list objects + # GET - list objects (supports both ListObjects and ListObjectsV2) principal, error = _require_principal() try: _authorize_action(principal, bucket_name, "list") @@ -1064,26 +1572,159 @@ def bucket_handler(bucket_name: str) -> Response: except StorageError as exc: return _error_response("NoSuchBucket", str(exc), 404) - root = Element("ListBucketResult") - SubElement(root, "Name").text = bucket_name - SubElement(root, "MaxKeys").text = str(current_app.config["UI_PAGE_SIZE"]) - SubElement(root, "IsTruncated").text = "false" - for meta in objects: - obj_el = SubElement(root, "Contents") - SubElement(obj_el, "Key").text = meta.key - SubElement(obj_el, "LastModified").text = meta.last_modified.isoformat() - SubElement(obj_el, "ETag").text = f'"{meta.etag}"' - SubElement(obj_el, "Size").text = str(meta.size) + # Check if this is ListObjectsV2 (list-type=2) + list_type = request.args.get("list-type") + prefix = request.args.get("prefix", "") + delimiter = request.args.get("delimiter", "") + max_keys = min(int(request.args.get("max-keys", current_app.config["UI_PAGE_SIZE"])), 1000) + + # Pagination markers + marker = request.args.get("marker", "") # ListObjects v1 + continuation_token = request.args.get("continuation-token", "") # ListObjectsV2 + start_after = request.args.get("start-after", "") # ListObjectsV2 + + # For ListObjectsV2, continuation-token takes precedence, then start-after + # For ListObjects v1, use marker + effective_start = "" + if list_type == "2": + if continuation_token: + import base64 + try: + effective_start = base64.urlsafe_b64decode(continuation_token.encode()).decode("utf-8") + except Exception: + effective_start = continuation_token + elif start_after: + effective_start = start_after + else: + effective_start = marker + + if prefix: + objects = [obj for obj in objects if obj.key.startswith(prefix)] + + if effective_start: + objects = [obj for obj in objects if obj.key > effective_start] + + common_prefixes: list[str] = [] + filtered_objects: list = [] + if delimiter: + seen_prefixes: set[str] = set() + for obj in objects: + key_after_prefix = obj.key[len(prefix):] if prefix else obj.key + if delimiter in key_after_prefix: + # This is a "folder" - extract the common prefix + common_prefix = prefix + key_after_prefix.split(delimiter)[0] + delimiter + if common_prefix not in seen_prefixes: + seen_prefixes.add(common_prefix) + common_prefixes.append(common_prefix) + else: + filtered_objects.append(obj) + objects = filtered_objects + common_prefixes = sorted(common_prefixes) + + total_items = len(objects) + len(common_prefixes) + is_truncated = total_items > max_keys + + if len(objects) >= max_keys: + objects = objects[:max_keys] + common_prefixes = [] + else: + remaining = max_keys - len(objects) + common_prefixes = common_prefixes[:remaining] + + next_marker = "" + next_continuation_token = "" + if is_truncated: + if objects: + next_marker = objects[-1].key + elif common_prefixes: + next_marker = common_prefixes[-1].rstrip(delimiter) if delimiter else common_prefixes[-1] + + if list_type == "2" and next_marker: + import base64 + next_continuation_token = base64.urlsafe_b64encode(next_marker.encode()).decode("utf-8") + + if list_type == "2": + root = Element("ListBucketResult") + SubElement(root, "Name").text = bucket_name + SubElement(root, "Prefix").text = prefix + SubElement(root, "MaxKeys").text = str(max_keys) + SubElement(root, "KeyCount").text = str(len(objects) + len(common_prefixes)) + SubElement(root, "IsTruncated").text = "true" if is_truncated else "false" + if delimiter: + SubElement(root, "Delimiter").text = delimiter + + continuation_token = request.args.get("continuation-token", "") + start_after = request.args.get("start-after", "") + if continuation_token: + SubElement(root, "ContinuationToken").text = continuation_token + if start_after: + SubElement(root, "StartAfter").text = start_after + + if is_truncated and next_continuation_token: + SubElement(root, "NextContinuationToken").text = next_continuation_token + + for meta in objects: + obj_el = SubElement(root, "Contents") + SubElement(obj_el, "Key").text = meta.key + SubElement(obj_el, "LastModified").text = meta.last_modified.isoformat() + SubElement(obj_el, "ETag").text = f'"{meta.etag}"' + SubElement(obj_el, "Size").text = str(meta.size) + SubElement(obj_el, "StorageClass").text = "STANDARD" + + for cp in common_prefixes: + cp_el = SubElement(root, "CommonPrefixes") + SubElement(cp_el, "Prefix").text = cp + else: + root = Element("ListBucketResult") + SubElement(root, "Name").text = bucket_name + SubElement(root, "Prefix").text = prefix + SubElement(root, "Marker").text = marker + SubElement(root, "MaxKeys").text = str(max_keys) + SubElement(root, "IsTruncated").text = "true" if is_truncated else "false" + if delimiter: + SubElement(root, "Delimiter").text = delimiter + + if is_truncated and delimiter and next_marker: + SubElement(root, "NextMarker").text = next_marker + + for meta in objects: + obj_el = SubElement(root, "Contents") + SubElement(obj_el, "Key").text = meta.key + SubElement(obj_el, "LastModified").text = meta.last_modified.isoformat() + SubElement(obj_el, "ETag").text = f'"{meta.etag}"' + SubElement(obj_el, "Size").text = str(meta.size) + + for cp in common_prefixes: + cp_el = SubElement(root, "CommonPrefixes") + SubElement(cp_el, "Prefix").text = cp return _xml_response(root) -@s3_api_bp.route("//", methods=["PUT", "GET", "DELETE", "HEAD"], strict_slashes=False) +@s3_api_bp.route("//", methods=["PUT", "GET", "DELETE", "HEAD", "POST"], strict_slashes=False) @limiter.limit("240 per minute") def object_handler(bucket_name: str, object_key: str): storage = _storage() + if "tagging" in request.args: + return _object_tagging_handler(bucket_name, object_key) + + # Multipart Uploads + if request.method == "POST": + if "uploads" in request.args: + return _initiate_multipart_upload(bucket_name, object_key) + if "uploadId" in request.args: + return _complete_multipart_upload(bucket_name, object_key) + return _method_not_allowed(["GET", "PUT", "DELETE", "HEAD", "POST"]) + if request.method == "PUT": + if "partNumber" in request.args and "uploadId" in request.args: + return _upload_part(bucket_name, object_key) + + copy_source = request.headers.get("x-amz-copy-source") + if copy_source: + return _copy_object(bucket_name, object_key, copy_source) + _, error = _object_principal("write", bucket_name, object_key) if error: return error @@ -1094,6 +1735,12 @@ def object_handler(bucket_name: str, object_key: str): stream = AwsChunkedDecoder(stream) metadata = _extract_request_metadata() + + content_type = request.headers.get("Content-Type") + validation_error = _validate_content_type(object_key, content_type) + if validation_error: + return _error_response("InvalidArgument", validation_error, 400) + try: meta = storage.put_object( bucket_name, @@ -1121,6 +1768,9 @@ def object_handler(bucket_name: str, object_key: str): return response if request.method in {"GET", "HEAD"}: + if request.method == "GET" and "uploadId" in request.args: + return _list_parts(bucket_name, object_key) + _, error = _object_principal("read", bucket_name, object_key) if error: return error @@ -1146,7 +1796,9 @@ def object_handler(bucket_name: str, object_key: str): current_app.logger.info(action, extra={"bucket": bucket_name, "key": object_key, "bytes": logged_bytes}) return response - # DELETE + if "uploadId" in request.args: + return _abort_multipart_upload(bucket_name, object_key) + _, error = _object_principal("delete", bucket_name, object_key) if error: return error @@ -1161,6 +1813,51 @@ def object_handler(bucket_name: str, object_key: str): return Response(status=204) +def _list_parts(bucket_name: str, object_key: str) -> Response: + principal, error = _require_principal() + if error: + return error + try: + _authorize_action(principal, bucket_name, "read", object_key=object_key) + except IamError as exc: + return _error_response("AccessDenied", str(exc), 403) + + upload_id = request.args.get("uploadId") + if not upload_id: + return _error_response("InvalidArgument", "uploadId is required", 400) + + try: + parts = _storage().list_multipart_parts(bucket_name, upload_id) + except StorageError as exc: + return _error_response("NoSuchUpload", str(exc), 404) + + root = Element("ListPartsResult") + SubElement(root, "Bucket").text = bucket_name + SubElement(root, "Key").text = object_key + SubElement(root, "UploadId").text = upload_id + + initiator = SubElement(root, "Initiator") + SubElement(initiator, "ID").text = principal.access_key + SubElement(initiator, "DisplayName").text = principal.display_name + + owner = SubElement(root, "Owner") + SubElement(owner, "ID").text = principal.access_key + SubElement(owner, "DisplayName").text = principal.display_name + + SubElement(root, "StorageClass").text = "STANDARD" + SubElement(root, "PartNumberMarker").text = "0" + SubElement(root, "NextPartNumberMarker").text = str(parts[-1]["PartNumber"]) if parts else "0" + SubElement(root, "MaxParts").text = "1000" + SubElement(root, "IsTruncated").text = "false" + + for part in parts: + p = SubElement(root, "Part") + SubElement(p, "PartNumber").text = str(part["PartNumber"]) + SubElement(p, "LastModified").text = part["LastModified"].isoformat() + SubElement(p, "ETag").text = f'"{part["ETag"]}"' + SubElement(p, "Size").text = str(part["Size"]) + + return _xml_response(root) @s3_api_bp.route("/bucket-policy/", methods=["GET", "PUT", "DELETE"]) @@ -1280,6 +1977,88 @@ def head_object(bucket_name: str, object_key: str) -> Response: return _error_response("AccessDenied", str(exc), 403) +def _copy_object(dest_bucket: str, dest_key: str, copy_source: str) -> Response: + """Handle S3 CopyObject operation.""" + from urllib.parse import unquote + copy_source = unquote(copy_source) + if copy_source.startswith("/"): + copy_source = copy_source[1:] + + parts = copy_source.split("/", 1) + if len(parts) != 2: + return _error_response("InvalidArgument", "Invalid x-amz-copy-source format", 400) + + source_bucket, source_key = parts + if not source_bucket or not source_key: + return _error_response("InvalidArgument", "Invalid x-amz-copy-source format", 400) + + principal, error = _require_principal() + if error: + return error + try: + _authorize_action(principal, source_bucket, "read", object_key=source_key) + except IamError as exc: + return _error_response("AccessDenied", str(exc), 403) + + try: + _authorize_action(principal, dest_bucket, "write", object_key=dest_key) + except IamError as exc: + return _error_response("AccessDenied", str(exc), 403) + + storage = _storage() + + try: + source_path = storage.get_object_path(source_bucket, source_key) + except StorageError: + return _error_response("NoSuchKey", "Source object not found", 404) + + source_metadata = storage.get_object_metadata(source_bucket, source_key) + + metadata_directive = request.headers.get("x-amz-metadata-directive", "COPY").upper() + if metadata_directive == "REPLACE": + metadata = _extract_request_metadata() + content_type = request.headers.get("Content-Type") + validation_error = _validate_content_type(dest_key, content_type) + if validation_error: + return _error_response("InvalidArgument", validation_error, 400) + else: + metadata = source_metadata + + try: + with source_path.open("rb") as stream: + meta = storage.put_object( + dest_bucket, + dest_key, + stream, + metadata=metadata or None, + ) + except StorageError as exc: + message = str(exc) + if "Bucket" in message: + return _error_response("NoSuchBucket", message, 404) + return _error_response("InvalidArgument", message, 400) + + current_app.logger.info( + "Object copied", + extra={ + "source_bucket": source_bucket, + "source_key": source_key, + "dest_bucket": dest_bucket, + "dest_key": dest_key, + "size": meta.size, + }, + ) + + user_agent = request.headers.get("User-Agent", "") + if "S3ReplicationAgent" not in user_agent: + _replication_manager().trigger_replication(dest_bucket, dest_key, action="write") + + root = Element("CopyObjectResult") + SubElement(root, "LastModified").text = meta.last_modified.isoformat() + SubElement(root, "ETag").text = f'"{meta.etag}"' + return _xml_response(root) + + class AwsChunkedDecoder: """Decodes aws-chunked encoded streams.""" def __init__(self, stream): @@ -1312,12 +2091,11 @@ class AwsChunkedDecoder: if crlf != b"\r\n": raise IOError("Malformed chunk: missing CRLF") else: - # Read chunk size line line = b"" while True: char = self.stream.read(1) if not char: - if not line: # EOF at start of chunk size + if not line: self.finished = True return result raise IOError("Unexpected EOF in chunk size") @@ -1325,7 +2103,6 @@ class AwsChunkedDecoder: if line.endswith(b"\r\n"): break - # Parse chunk size (hex) try: line_str = line.decode("ascii").strip() # Handle chunk-signature extension if present (e.g. "1000;chunk-signature=...") @@ -1337,7 +2114,6 @@ class AwsChunkedDecoder: if chunk_size == 0: self.finished = True - # Read trailers if any (until empty line) while True: line = b"" while True: @@ -1354,3 +2130,159 @@ class AwsChunkedDecoder: self.chunk_remaining = chunk_size return result + + +def _initiate_multipart_upload(bucket_name: str, object_key: str) -> Response: + principal, error = _object_principal("write", bucket_name, object_key) + if error: + return error + + metadata = _extract_request_metadata() + try: + upload_id = _storage().initiate_multipart_upload( + bucket_name, + object_key, + metadata=metadata or None + ) + except StorageError as exc: + return _error_response("NoSuchBucket", str(exc), 404) + + root = Element("InitiateMultipartUploadResult") + SubElement(root, "Bucket").text = bucket_name + SubElement(root, "Key").text = object_key + SubElement(root, "UploadId").text = upload_id + return _xml_response(root) + + +def _upload_part(bucket_name: str, object_key: str) -> Response: + principal, error = _object_principal("write", bucket_name, object_key) + if error: + return error + + upload_id = request.args.get("uploadId") + part_number_str = request.args.get("partNumber") + if not upload_id or not part_number_str: + return _error_response("InvalidArgument", "uploadId and partNumber are required", 400) + + try: + part_number = int(part_number_str) + except ValueError: + return _error_response("InvalidArgument", "partNumber must be an integer", 400) + + stream = request.stream + content_encoding = request.headers.get("Content-Encoding", "").lower() + if "aws-chunked" in content_encoding: + stream = AwsChunkedDecoder(stream) + + try: + etag = _storage().upload_multipart_part(bucket_name, upload_id, part_number, stream) + except StorageError as exc: + if "NoSuchBucket" in str(exc): + return _error_response("NoSuchBucket", str(exc), 404) + if "Multipart upload not found" in str(exc): + return _error_response("NoSuchUpload", str(exc), 404) + return _error_response("InvalidArgument", str(exc), 400) + + response = Response(status=200) + response.headers["ETag"] = f'"{etag}"' + return response + + +def _complete_multipart_upload(bucket_name: str, object_key: str) -> Response: + principal, error = _object_principal("write", bucket_name, object_key) + if error: + return error + + upload_id = request.args.get("uploadId") + if not upload_id: + return _error_response("InvalidArgument", "uploadId is required", 400) + + payload = request.get_data(cache=False) or b"" + try: + root = fromstring(payload) + except ParseError: + return _error_response("MalformedXML", "Unable to parse XML document", 400) + + if _strip_ns(root.tag) != "CompleteMultipartUpload": + return _error_response("MalformedXML", "Root element must be CompleteMultipartUpload", 400) + + parts = [] + for part_el in list(root): + if _strip_ns(part_el.tag) != "Part": + continue + part_number_el = part_el.find("{*}PartNumber") + if part_number_el is None: + part_number_el = part_el.find("PartNumber") + + etag_el = part_el.find("{*}ETag") + if etag_el is None: + etag_el = part_el.find("ETag") + + if part_number_el is not None and etag_el is not None: + parts.append({ + "PartNumber": int(part_number_el.text or 0), + "ETag": (etag_el.text or "").strip('"') + }) + + try: + meta = _storage().complete_multipart_upload(bucket_name, upload_id, parts) + except StorageError as exc: + if "NoSuchBucket" in str(exc): + return _error_response("NoSuchBucket", str(exc), 404) + if "Multipart upload not found" in str(exc): + return _error_response("NoSuchUpload", str(exc), 404) + return _error_response("InvalidPart", str(exc), 400) + + user_agent = request.headers.get("User-Agent", "") + if "S3ReplicationAgent" not in user_agent: + _replication_manager().trigger_replication(bucket_name, object_key, action="write") + + root = Element("CompleteMultipartUploadResult") + location = f"{request.host_url}{bucket_name}/{object_key}" + SubElement(root, "Location").text = location + SubElement(root, "Bucket").text = bucket_name + SubElement(root, "Key").text = object_key + SubElement(root, "ETag").text = f'"{meta.etag}"' + + return _xml_response(root) + + +def _abort_multipart_upload(bucket_name: str, object_key: str) -> Response: + principal, error = _object_principal("delete", bucket_name, object_key) + if error: + return error + + upload_id = request.args.get("uploadId") + if not upload_id: + return _error_response("InvalidArgument", "uploadId is required", 400) + + try: + _storage().abort_multipart_upload(bucket_name, upload_id) + except StorageError as exc: + # Abort is idempotent, but if bucket missing... + if "Bucket does not exist" in str(exc): + return _error_response("NoSuchBucket", str(exc), 404) + + return Response(status=204) + + +@s3_api_bp.before_request +def resolve_principal(): + g.principal = None + # Try SigV4 + try: + if ("Authorization" in request.headers and request.headers["Authorization"].startswith("AWS4-HMAC-SHA256")) or \ + (request.args.get("X-Amz-Algorithm") == "AWS4-HMAC-SHA256"): + g.principal = _verify_sigv4(request) + return + except Exception: + pass + + # Try simple auth headers (internal/testing) + access_key = request.headers.get("X-Access-Key") + secret_key = request.headers.get("X-Secret-Key") + if access_key and secret_key: + try: + g.principal = _iam().authenticate(access_key, secret_key) + except Exception: + pass diff --git a/app/storage.py b/app/storage.py index 37b31db..29e90d5 100644 --- a/app/storage.py +++ b/app/storage.py @@ -10,10 +10,40 @@ import stat import time import unicodedata import uuid +from contextlib import contextmanager from dataclasses import dataclass from datetime import datetime, timezone from pathlib import Path -from typing import Any, BinaryIO, Dict, List, Optional +from typing import Any, BinaryIO, Dict, Generator, List, Optional + +# Platform-specific file locking +if os.name == "nt": + import msvcrt + + @contextmanager + def _file_lock(file_handle) -> Generator[None, None, None]: + """Acquire an exclusive lock on a file (Windows).""" + try: + msvcrt.locking(file_handle.fileno(), msvcrt.LK_NBLCK, 1) + yield + finally: + try: + file_handle.seek(0) + msvcrt.locking(file_handle.fileno(), msvcrt.LK_UNLCK, 1) + except OSError: + pass +else: + import fcntl # type: ignore + + @contextmanager + def _file_lock(file_handle) -> Generator[None, None, None]: + """Acquire an exclusive lock on a file (Unix).""" + try: + fcntl.flock(file_handle.fileno(), fcntl.LOCK_EX) + yield + finally: + fcntl.flock(file_handle.fileno(), fcntl.LOCK_UN) + WINDOWS_RESERVED_NAMES = { "CON", @@ -86,7 +116,6 @@ class ObjectStorage: self.root.mkdir(parents=True, exist_ok=True) self._ensure_system_roots() - # ---------------------- Bucket helpers ---------------------- def list_buckets(self) -> List[BucketMeta]: buckets: List[BucketMeta] = [] for bucket in sorted(self.root.iterdir()): @@ -119,11 +148,25 @@ class ObjectStorage: bucket_path.mkdir(parents=True, exist_ok=False) self._system_bucket_root(bucket_path.name).mkdir(parents=True, exist_ok=True) - def bucket_stats(self, bucket_name: str) -> dict[str, int]: - """Return object count and total size for the bucket without hashing files.""" + def bucket_stats(self, bucket_name: str, cache_ttl: int = 60) -> dict[str, int]: + """Return object count and total size for the bucket (cached). + + Args: + bucket_name: Name of the bucket + cache_ttl: Cache time-to-live in seconds (default 60) + """ bucket_path = self._bucket_path(bucket_name) if not bucket_path.exists(): raise StorageError("Bucket does not exist") + + cache_path = self._system_bucket_root(bucket_name) / "stats.json" + if cache_path.exists(): + try: + if time.time() - cache_path.stat().st_mtime < cache_ttl: + return json.loads(cache_path.read_text(encoding="utf-8")) + except (OSError, json.JSONDecodeError): + pass + object_count = 0 total_bytes = 0 for path in bucket_path.rglob("*"): @@ -134,7 +177,24 @@ class ObjectStorage: stat = path.stat() object_count += 1 total_bytes += stat.st_size - return {"objects": object_count, "bytes": total_bytes} + + stats = {"objects": object_count, "bytes": total_bytes} + + try: + cache_path.parent.mkdir(parents=True, exist_ok=True) + cache_path.write_text(json.dumps(stats), encoding="utf-8") + except OSError: + pass + + return stats + + def _invalidate_bucket_stats_cache(self, bucket_id: str) -> None: + """Invalidate the cached bucket statistics.""" + cache_path = self._system_bucket_root(bucket_id) / "stats.json" + try: + cache_path.unlink(missing_ok=True) + except OSError: + pass def delete_bucket(self, bucket_name: str) -> None: bucket_path = self._bucket_path(bucket_name) @@ -150,7 +210,6 @@ class ObjectStorage: self._remove_tree(self._system_bucket_root(bucket_path.name)) self._remove_tree(self._multipart_bucket_root(bucket_path.name)) - # ---------------------- Object helpers ---------------------- def list_objects(self, bucket_name: str) -> List[ObjectMeta]: bucket_path = self._bucket_path(bucket_name) if not bucket_path.exists(): @@ -206,6 +265,9 @@ class ObjectStorage: self._write_metadata(bucket_id, safe_key, metadata) else: self._delete_metadata(bucket_id, safe_key) + + self._invalidate_bucket_stats_cache(bucket_id) + return ObjectMeta( key=safe_key.as_posix(), size=stat.st_size, @@ -239,7 +301,9 @@ class ObjectStorage: rel = path.relative_to(bucket_path) self._safe_unlink(path) self._delete_metadata(bucket_id, rel) - # Clean up now empty parents inside the bucket. + + self._invalidate_bucket_stats_cache(bucket_id) + for parent in path.parents: if parent == bucket_path: break @@ -263,13 +327,16 @@ class ObjectStorage: legacy_version_dir = self._legacy_version_dir(bucket_id, rel) if legacy_version_dir.exists(): shutil.rmtree(legacy_version_dir, ignore_errors=True) + + # Invalidate bucket stats cache + self._invalidate_bucket_stats_cache(bucket_id) + for parent in target.parents: if parent == bucket_path: break if parent.exists() and not any(parent.iterdir()): parent.rmdir() - # ---------------------- Versioning helpers ---------------------- def is_versioning_enabled(self, bucket_name: str) -> bool: bucket_path = self._bucket_path(bucket_name) if not bucket_path.exists(): @@ -282,7 +349,6 @@ class ObjectStorage: config["versioning_enabled"] = bool(enabled) self._write_bucket_config(bucket_path.name, config) - # ---------------------- Bucket configuration helpers ---------------------- def get_bucket_tags(self, bucket_name: str) -> List[Dict[str, str]]: bucket_path = self._require_bucket_path(bucket_name) config = self._read_bucket_config(bucket_path.name) @@ -335,6 +401,80 @@ class ObjectStorage: bucket_path = self._require_bucket_path(bucket_name) self._set_bucket_config_entry(bucket_path.name, "encryption", config_payload or None) + def get_bucket_lifecycle(self, bucket_name: str) -> Optional[List[Dict[str, Any]]]: + """Get lifecycle configuration for bucket.""" + bucket_path = self._require_bucket_path(bucket_name) + config = self._read_bucket_config(bucket_path.name) + lifecycle = config.get("lifecycle") + return lifecycle if isinstance(lifecycle, list) else None + + def set_bucket_lifecycle(self, bucket_name: str, rules: Optional[List[Dict[str, Any]]]) -> None: + """Set lifecycle configuration for bucket.""" + bucket_path = self._require_bucket_path(bucket_name) + self._set_bucket_config_entry(bucket_path.name, "lifecycle", rules) + + def get_object_tags(self, bucket_name: str, object_key: str) -> List[Dict[str, str]]: + """Get tags for an object.""" + bucket_path = self._bucket_path(bucket_name) + if not bucket_path.exists(): + raise StorageError("Bucket does not exist") + safe_key = self._sanitize_object_key(object_key) + object_path = bucket_path / safe_key + if not object_path.exists(): + raise StorageError("Object does not exist") + + for meta_file in (self._metadata_file(bucket_path.name, safe_key), self._legacy_metadata_file(bucket_path.name, safe_key)): + if not meta_file.exists(): + continue + try: + payload = json.loads(meta_file.read_text(encoding="utf-8")) + tags = payload.get("tags") + if isinstance(tags, list): + return tags + return [] + except (OSError, json.JSONDecodeError): + return [] + return [] + + def set_object_tags(self, bucket_name: str, object_key: str, tags: Optional[List[Dict[str, str]]]) -> None: + """Set tags for an object.""" + bucket_path = self._bucket_path(bucket_name) + if not bucket_path.exists(): + raise StorageError("Bucket does not exist") + safe_key = self._sanitize_object_key(object_key) + object_path = bucket_path / safe_key + if not object_path.exists(): + raise StorageError("Object does not exist") + + meta_file = self._metadata_file(bucket_path.name, safe_key) + + existing_payload: Dict[str, Any] = {} + if meta_file.exists(): + try: + existing_payload = json.loads(meta_file.read_text(encoding="utf-8")) + except (OSError, json.JSONDecodeError): + pass + + if tags: + existing_payload["tags"] = tags + else: + existing_payload.pop("tags", None) + + if existing_payload.get("metadata") or existing_payload.get("tags"): + meta_file.parent.mkdir(parents=True, exist_ok=True) + meta_file.write_text(json.dumps(existing_payload), encoding="utf-8") + elif meta_file.exists(): + meta_file.unlink() + parent = meta_file.parent + meta_root = self._bucket_meta_root(bucket_path.name) + while parent != meta_root and parent.exists() and not any(parent.iterdir()): + parent.rmdir() + parent = parent.parent + + def delete_object_tags(self, bucket_name: str, object_key: str) -> None: + """Delete all tags from an object.""" + self.set_object_tags(bucket_name, object_key, None) + def list_object_versions(self, bucket_name: str, object_key: str) -> List[Dict[str, Any]]: bucket_path = self._bucket_path(bucket_name) if not bucket_path.exists(): @@ -459,7 +599,6 @@ class ObjectStorage: record.pop("_latest_sort", None) return sorted(aggregated.values(), key=lambda item: item["key"]) - # ---------------------- Multipart helpers ---------------------- def initiate_multipart_upload( self, bucket_name: str, @@ -495,7 +634,15 @@ class ObjectStorage: if part_number < 1: raise StorageError("part_number must be >= 1") bucket_path = self._bucket_path(bucket_name) - manifest, upload_root = self._load_multipart_manifest(bucket_path.name, upload_id) + + # Get the upload root directory + upload_root = self._multipart_dir(bucket_path.name, upload_id) + if not upload_root.exists(): + upload_root = self._legacy_multipart_dir(bucket_path.name, upload_id) + if not upload_root.exists(): + raise StorageError("Multipart upload not found") + + # Write the part data first (can happen concurrently) checksum = hashlib.md5() part_filename = f"part-{part_number:05d}.part" part_path = upload_root / part_filename @@ -506,9 +653,23 @@ class ObjectStorage: "size": part_path.stat().st_size, "filename": part_filename, } - parts = manifest.setdefault("parts", {}) - parts[str(part_number)] = record - self._write_multipart_manifest(upload_root, manifest) + + # Update manifest with file locking to prevent race conditions + manifest_path = upload_root / self.MULTIPART_MANIFEST + lock_path = upload_root / ".manifest.lock" + + with lock_path.open("w") as lock_file: + with _file_lock(lock_file): + # Re-read manifest under lock to get latest state + try: + manifest = json.loads(manifest_path.read_text(encoding="utf-8")) + except (OSError, json.JSONDecodeError) as exc: + raise StorageError("Multipart manifest unreadable") from exc + + parts = manifest.setdefault("parts", {}) + parts[str(part_number)] = record + manifest_path.write_text(json.dumps(manifest), encoding="utf-8") + return record["etag"] def complete_multipart_upload( @@ -550,29 +711,46 @@ class ObjectStorage: safe_key = self._sanitize_object_key(manifest["object_key"]) destination = bucket_path / safe_key destination.parent.mkdir(parents=True, exist_ok=True) - if self._is_versioning_enabled(bucket_path) and destination.exists(): - self._archive_current_version(bucket_id, safe_key, reason="overwrite") - checksum = hashlib.md5() - with destination.open("wb") as target: - for _, record in validated: - part_path = upload_root / record["filename"] - if not part_path.exists(): - raise StorageError(f"Missing part file {record['filename']}") - with part_path.open("rb") as chunk: - while True: - data = chunk.read(1024 * 1024) - if not data: - break - checksum.update(data) - target.write(data) + + lock_file_path = self._system_bucket_root(bucket_id) / "locks" / f"{safe_key.as_posix().replace('/', '_')}.lock" + lock_file_path.parent.mkdir(parents=True, exist_ok=True) + + try: + with lock_file_path.open("w") as lock_file: + with _file_lock(lock_file): + if self._is_versioning_enabled(bucket_path) and destination.exists(): + self._archive_current_version(bucket_id, safe_key, reason="overwrite") + checksum = hashlib.md5() + with destination.open("wb") as target: + for _, record in validated: + part_path = upload_root / record["filename"] + if not part_path.exists(): + raise StorageError(f"Missing part file {record['filename']}") + with part_path.open("rb") as chunk: + while True: + data = chunk.read(1024 * 1024) + if not data: + break + checksum.update(data) + target.write(data) - metadata = manifest.get("metadata") - if metadata: - self._write_metadata(bucket_id, safe_key, metadata) - else: - self._delete_metadata(bucket_id, safe_key) + metadata = manifest.get("metadata") + if metadata: + self._write_metadata(bucket_id, safe_key, metadata) + else: + self._delete_metadata(bucket_id, safe_key) + except BlockingIOError: + raise StorageError("Another upload to this key is in progress") + finally: + try: + lock_file_path.unlink(missing_ok=True) + except OSError: + pass shutil.rmtree(upload_root, ignore_errors=True) + + self._invalidate_bucket_stats_cache(bucket_id) + stat = destination.stat() return ObjectMeta( key=safe_key.as_posix(), @@ -592,7 +770,33 @@ class ObjectStorage: if legacy_root.exists(): shutil.rmtree(legacy_root, ignore_errors=True) - # ---------------------- internal helpers ---------------------- + def list_multipart_parts(self, bucket_name: str, upload_id: str) -> List[Dict[str, Any]]: + """List uploaded parts for a multipart upload.""" + bucket_path = self._bucket_path(bucket_name) + manifest, upload_root = self._load_multipart_manifest(bucket_path.name, upload_id) + + parts = [] + parts_map = manifest.get("parts", {}) + for part_num_str, record in parts_map.items(): + part_num = int(part_num_str) + part_filename = record.get("filename") + if not part_filename: + continue + part_path = upload_root / part_filename + if not part_path.exists(): + continue + + stat = part_path.stat() + parts.append({ + "PartNumber": part_num, + "Size": stat.st_size, + "ETag": record.get("etag"), + "LastModified": datetime.fromtimestamp(stat.st_mtime, timezone.utc) + }) + + parts.sort(key=lambda x: x["PartNumber"]) + return parts + def _bucket_path(self, bucket_name: str) -> Path: safe_name = self._sanitize_bucket_name(bucket_name) return self.root / safe_name @@ -886,7 +1090,11 @@ class ObjectStorage: normalized = unicodedata.normalize("NFC", object_key) if normalized != object_key: raise StorageError("Object key must use normalized Unicode") + candidate = Path(normalized) + if ".." in candidate.parts: + raise StorageError("Object key contains parent directory references") + if candidate.is_absolute(): raise StorageError("Absolute object keys are not allowed") if getattr(candidate, "drive", ""): diff --git a/app/ui.py b/app/ui.py index adf1f6f..654d2c7 100644 --- a/app/ui.py +++ b/app/ui.py @@ -3,6 +3,8 @@ from __future__ import annotations import json import uuid +import psutil +import shutil from typing import Any from urllib.parse import urlparse @@ -247,7 +249,8 @@ def buckets_overview(): if bucket.name not in allowed_names: continue policy = policy_store.get_policy(bucket.name) - stats = _storage().bucket_stats(bucket.name) + cache_ttl = current_app.config.get("BUCKET_STATS_CACHE_TTL", 60) + stats = _storage().bucket_stats(bucket.name, cache_ttl=cache_ttl) access_label, access_badge = _bucket_access_descriptor(policy) visible_buckets.append({ "meta": bucket, @@ -333,7 +336,7 @@ def bucket_detail(bucket_name: str): except IamError: can_manage_versioning = False - # Replication info + # Replication info - don't compute sync status here (it's slow), let JS fetch it async replication_rule = _replication().get_rule(bucket_name) connections = _connections().list() @@ -469,8 +472,6 @@ def complete_multipart_upload(bucket_name: str, upload_id: str): normalized.append({"part_number": number, "etag": etag}) try: result = _storage().complete_multipart_upload(bucket_name, upload_id, normalized) - - # Trigger replication _replication().trigger_replication(bucket_name, result["key"]) return jsonify(result) @@ -711,17 +712,13 @@ def object_presign(bucket_name: str, object_key: str): except IamError as exc: return jsonify({"error": str(exc)}), 403 - api_base = current_app.config.get("API_BASE_URL") - if not api_base: - api_base = "http://127.0.0.1:5000" - api_base = api_base.rstrip("/") - - url = f"{api_base}/presign/{bucket_name}/{object_key}" + connection_url = "http://127.0.0.1:5000" + url = f"{connection_url}/presign/{bucket_name}/{object_key}" headers = _api_headers() - # Forward the host so the API knows the public URL headers["X-Forwarded-Host"] = request.host headers["X-Forwarded-Proto"] = request.scheme + headers["X-Forwarded-For"] = request.remote_addr or "127.0.0.1" try: response = requests.post(url, headers=headers, json=payload, timeout=5) @@ -730,13 +727,11 @@ def object_presign(bucket_name: str, object_key: str): try: body = response.json() except ValueError: - # Handle XML error responses from S3 backend text = response.text or "" if text.strip().startswith("<"): import xml.etree.ElementTree as ET try: root = ET.fromstring(text) - # Try to find Message or Code message = root.findtext(".//Message") or root.findtext(".//Code") or "Unknown S3 error" body = {"error": message} except ET.ParseError: @@ -946,7 +941,6 @@ def rotate_iam_secret(access_key: str): return redirect(url_for("ui.iam_dashboard")) try: new_secret = _iam().rotate_secret(access_key) - # If rotating own key, update session immediately so subsequent API calls (like presign) work if principal and principal.access_key == access_key: creds = session.get("credentials", {}) creds["secret_key"] = new_secret @@ -1038,7 +1032,6 @@ def update_iam_policies(access_key: str): policies_raw = request.form.get("policies", "").strip() if not policies_raw: - # Empty policies list is valid (clears permissions) policies = [] else: try: @@ -1186,8 +1179,12 @@ def update_bucket_replication(bucket_name: str): _replication().delete_rule(bucket_name) flash("Replication disabled", "info") else: + from .replication import REPLICATION_MODE_NEW_ONLY, REPLICATION_MODE_ALL + import time + target_conn_id = request.form.get("target_connection_id") target_bucket = request.form.get("target_bucket", "").strip() + replication_mode = request.form.get("replication_mode", REPLICATION_MODE_NEW_ONLY) if not target_conn_id or not target_bucket: flash("Target connection and bucket are required", "danger") @@ -1196,14 +1193,50 @@ def update_bucket_replication(bucket_name: str): bucket_name=bucket_name, target_connection_id=target_conn_id, target_bucket=target_bucket, - enabled=True + enabled=True, + mode=replication_mode, + created_at=time.time(), ) _replication().set_rule(rule) - flash("Replication configured", "success") + + # If mode is "all", trigger replication of existing objects + if replication_mode == REPLICATION_MODE_ALL: + _replication().replicate_existing_objects(bucket_name) + flash("Replication configured. Existing objects are being replicated in the background.", "success") + else: + flash("Replication configured. Only new uploads will be replicated.", "success") return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="replication")) +@ui_bp.get("/buckets//replication/status") +def get_replication_status(bucket_name: str): + """Async endpoint to fetch replication sync status without blocking page load.""" + principal = _current_principal() + try: + _authorize_ui(principal, bucket_name, "read") + except IamError: + return jsonify({"error": "Access denied"}), 403 + + rule = _replication().get_rule(bucket_name) + if not rule: + return jsonify({"error": "No replication rule"}), 404 + + # This is the slow operation - compute sync status by comparing buckets + stats = _replication().get_sync_status(bucket_name) + if not stats: + return jsonify({"error": "Failed to compute status"}), 500 + + return jsonify({ + "objects_synced": stats.objects_synced, + "objects_pending": stats.objects_pending, + "objects_orphaned": stats.objects_orphaned, + "bytes_synced": stats.bytes_synced, + "last_sync_at": stats.last_sync_at, + "last_sync_key": stats.last_sync_key, + }) + + @ui_bp.get("/connections") def connections_dashboard(): principal = _current_principal() @@ -1217,6 +1250,55 @@ def connections_dashboard(): return render_template("connections.html", connections=connections, principal=principal) +@ui_bp.get("/metrics") +def metrics_dashboard(): + principal = _current_principal() + + cpu_percent = psutil.cpu_percent(interval=0.1) + memory = psutil.virtual_memory() + + storage_root = current_app.config["STORAGE_ROOT"] + disk = psutil.disk_usage(storage_root) + + storage = _storage() + buckets = storage.list_buckets() + total_buckets = len(buckets) + + total_objects = 0 + total_bytes_used = 0 + + # Note: Uses cached stats from storage layer to improve performance + cache_ttl = current_app.config.get("BUCKET_STATS_CACHE_TTL", 60) + for bucket in buckets: + stats = storage.bucket_stats(bucket.name, cache_ttl=cache_ttl) + total_objects += stats["objects"] + total_bytes_used += stats["bytes"] + + return render_template( + "metrics.html", + principal=principal, + cpu_percent=cpu_percent, + memory={ + "total": _format_bytes(memory.total), + "available": _format_bytes(memory.available), + "used": _format_bytes(memory.used), + "percent": memory.percent, + }, + disk={ + "total": _format_bytes(disk.total), + "free": _format_bytes(disk.free), + "used": _format_bytes(disk.used), + "percent": disk.percent, + }, + app={ + "buckets": total_buckets, + "objects": total_objects, + "storage_used": _format_bytes(total_bytes_used), + "storage_raw": total_bytes_used, + } + ) + + @ui_bp.app_errorhandler(404) def ui_not_found(error): # type: ignore[override] prefix = ui_bp.url_prefix or "" diff --git a/app/version.py b/app/version.py index bda6952..950456f 100644 --- a/app/version.py +++ b/app/version.py @@ -1,7 +1,7 @@ """Central location for the application version string.""" from __future__ import annotations -APP_VERSION = "0.1.1" +APP_VERSION = "0.1.2" def get_version() -> str: diff --git a/requirements.txt b/requirements.txt index 43f1ae7..7c2c75d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,3 +6,4 @@ pytest>=7.4 requests>=2.31 boto3>=1.34 waitress>=2.1.2 +psutil>=5.9.0 diff --git a/run.py b/run.py index 230ca48..805f60a 100644 --- a/run.py +++ b/run.py @@ -3,6 +3,7 @@ from __future__ import annotations import argparse import os +import sys import warnings from multiprocessing import Process @@ -18,6 +19,11 @@ def _is_debug_enabled() -> bool: return os.getenv("FLASK_DEBUG", "0").lower() in ("1", "true", "yes") +def _is_frozen() -> bool: + """Check if running as a compiled binary (PyInstaller/Nuitka).""" + return getattr(sys, 'frozen', False) or '__compiled__' in globals() + + def serve_api(port: int, prod: bool = False) -> None: app = create_api_app() if prod: @@ -48,18 +54,28 @@ if __name__ == "__main__": parser.add_argument("--api-port", type=int, default=5000) parser.add_argument("--ui-port", type=int, default=5100) parser.add_argument("--prod", action="store_true", help="Run in production mode using Waitress") + parser.add_argument("--dev", action="store_true", help="Force development mode (Flask dev server)") args = parser.parse_args() + # Default to production mode when running as compiled binary + # unless --dev is explicitly passed + prod_mode = args.prod or (_is_frozen() and not args.dev) + + if prod_mode: + print("Running in production mode (Waitress)") + else: + print("Running in development mode (Flask dev server)") + if args.mode in {"api", "both"}: print(f"Starting API server on port {args.api_port}...") - api_proc = Process(target=serve_api, args=(args.api_port, args.prod), daemon=True) + api_proc = Process(target=serve_api, args=(args.api_port, prod_mode), daemon=True) api_proc.start() else: api_proc = None if args.mode in {"ui", "both"}: print(f"Starting UI server on port {args.ui_port}...") - serve_ui(args.ui_port, args.prod) + serve_ui(args.ui_port, prod_mode) elif api_proc: try: api_proc.join() diff --git a/static/css/main.css b/static/css/main.css index 190f509..e603255 100644 --- a/static/css/main.css +++ b/static/css/main.css @@ -13,6 +13,8 @@ --myfsio-policy-bg: #0f172a; --myfsio-policy-fg: #e2e8f0; --myfsio-hover-bg: rgba(59, 130, 246, 0.12); + --myfsio-accent: #3b82f6; + --myfsio-accent-hover: #2563eb; } [data-theme='dark'] { @@ -30,6 +32,8 @@ --myfsio-policy-bg: #0f1419; --myfsio-policy-fg: #f8fafc; --myfsio-hover-bg: rgba(59, 130, 246, 0.2); + --myfsio-accent: #60a5fa; + --myfsio-accent-hover: #3b82f6; } [data-theme='dark'] body, @@ -72,6 +76,7 @@ code { padding: 0.15rem 0.4rem; border-radius: 0.25rem; } + [data-theme='dark'] code { background-color: rgba(148, 163, 184, 0.15); color: #93c5fd; @@ -109,32 +114,31 @@ code { font-weight: 500; } -/* Drag and Drop Zone */ .drop-zone { - position: relative; - transition: all 0.2s ease; + position: relative; + transition: all 0.2s ease; } .drop-zone.drag-over { - background-color: var(--myfsio-hover-bg); - border: 2px dashed var(--myfsio-input-border); + background-color: var(--myfsio-hover-bg); + border: 2px dashed var(--myfsio-input-border); } .drop-zone.drag-over::after { - content: 'Drop files here to upload'; - position: absolute; - top: 50%; - left: 50%; - transform: translate(-50%, -50%); - font-size: 1.5rem; - font-weight: 600; - color: var(--myfsio-muted); - pointer-events: none; - z-index: 10; + content: 'Drop files here to upload'; + position: absolute; + top: 50%; + left: 50%; + transform: translate(-50%, -50%); + font-size: 1.5rem; + font-weight: 600; + color: var(--myfsio-muted); + pointer-events: none; + z-index: 10; } .drop-zone.drag-over table { - opacity: 0.3; + opacity: 0.3; } .modal-header, @@ -145,7 +149,10 @@ code { .myfsio-nav { background: var(--myfsio-nav-gradient); box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1); + backdrop-filter: blur(8px); + -webkit-backdrop-filter: blur(8px); } + .myfsio-nav .navbar-brand { color: #fff; font-weight: 600; @@ -154,33 +161,42 @@ code { align-items: center; gap: 0.5rem; } + .myfsio-logo { border-radius: 0.35rem; box-shadow: 0 0 6px rgba(15, 23, 42, 0.35); background-color: rgba(255, 255, 255, 0.1); } + .myfsio-title { display: inline-block; } + .myfsio-nav .nav-link { color: var(--myfsio-nav-link); transition: color 0.2s ease; } + .myfsio-nav .nav-link:hover { color: var(--myfsio-nav-link-hover); } + .myfsio-nav .nav-link.nav-link-muted { opacity: 0.75; } + .myfsio-nav .nav-link.nav-link-muted .badge { color: #0f172a; background-color: #fef08a; } + [data-theme='dark'] .myfsio-nav .nav-link.nav-link-muted .badge { color: #0f172a; background-color: #fde047; } + .myfsio-nav .navbar-toggler { border-color: rgba(255, 255, 255, 0.6); } + .myfsio-nav .navbar-toggler-icon { filter: invert(1); } @@ -329,6 +345,7 @@ code { .badge { font-weight: 500; padding: 0.35em 0.65em; + font-size: 0.8125rem; } .theme-toggle { @@ -434,6 +451,22 @@ code { padding: 1.5rem; cursor: pointer; transition: border-color 0.2s ease, background-color 0.2s ease; + position: relative; + overflow: hidden; +} + +.upload-dropzone::before { + content: ''; + position: absolute; + inset: 0; + background: linear-gradient(135deg, rgba(59, 130, 246, 0.03) 0%, rgba(139, 92, 246, 0.03) 100%); + opacity: 0; + transition: opacity 0.3s ease; +} + +.upload-dropzone:hover::before, +.upload-dropzone.is-dragover::before { + opacity: 1; } .upload-dropzone.is-dragover { @@ -655,6 +688,18 @@ code { .btn-primary { color: #ffffff; + background: linear-gradient(135deg, #3b82f6 0%, #2563eb 100%); + border: none; + box-shadow: 0 2px 4px rgba(59, 130, 246, 0.3); +} + +.btn-primary:hover { + background: linear-gradient(135deg, #2563eb 0%, #1d4ed8 100%); + box-shadow: 0 4px 12px rgba(59, 130, 246, 0.4); +} + +.btn-primary:active { + box-shadow: 0 1px 2px rgba(59, 130, 246, 0.3); } [data-theme='dark'] .btn-danger { @@ -716,17 +761,6 @@ code { filter: invert(1) grayscale(100%) brightness(200%); } -.config-copy { - color: #ffffff; - border-color: rgba(255, 255, 255, 0.7); -} - -.config-copy:hover { - color: #0f172a; - background-color: #ffffff; - border-color: #ffffff; -} - [data-theme='dark'] .border { border-color: var(--myfsio-card-border) !important; } @@ -765,23 +799,6 @@ code { letter-spacing: -0.02em; } -.config-copy { - position: absolute; - top: 0.5rem; - right: 0.5rem; - opacity: 0.8; - transition: opacity 0.2s; - background-color: rgba(0, 0, 0, 0.5); - border: none; - color: white; -} - -.config-copy:hover { - opacity: 1; - background-color: rgba(0, 0, 0, 0.7); - color: white; -} - @keyframes pulse { 0%, 100% { opacity: 1; } 50% { opacity: 0.5; } @@ -849,7 +866,6 @@ pre code { margin-top: 1.25rem; } -/* Breadcrumb styling */ .breadcrumb { background-color: transparent; padding: 0.5rem 0; @@ -880,58 +896,684 @@ pre code { color: var(--myfsio-muted); } -/* Icon alignment */ .bi { vertical-align: -0.125em; } -/* Sticky improvements */ .sticky-top { top: 1.5rem; } -/* Better card spacing */ .card-body dl:last-child { margin-bottom: 0; } -/* Empty state improvements */ .text-center svg { display: inline-block; } -/* Input group improvements */ [data-theme='dark'] .input-group .btn-outline-primary { background-color: transparent; } -/* File size nowrap */ .text-nowrap { white-space: nowrap; } -/* Alert improvements */ .alert svg { flex-shrink: 0; } -/* Better hover for table rows with data */ [data-object-row]:hover { background-color: var(--myfsio-hover-bg) !important; } -/* Improve spacing in button groups */ .btn-group-sm .btn { padding: 0.25rem 0.6rem; font-size: 0.875rem; } -/* Better modal styling */ +.modal-content { + border: none; + border-radius: 1rem; + box-shadow: 0 25px 50px -12px rgba(0, 0, 0, 0.25); + overflow: hidden; +} + .modal-header { background-color: var(--myfsio-card-bg); + border-bottom: 1px solid var(--myfsio-card-border); + padding: 1.25rem 1.5rem; +} + +.modal-header.border-0 { + border-bottom: none; +} + +.modal-title { + font-weight: 600; + display: flex; + align-items: center; + gap: 0.5rem; +} + +.modal-body { + padding: 1.5rem; +} + +.modal-footer { + background-color: var(--myfsio-preview-bg); + border-top: 1px solid var(--myfsio-card-border); + padding: 1rem 1.5rem; + gap: 0.75rem; +} + +.modal-footer.border-0 { + border-top: none; + background-color: var(--myfsio-card-bg); } -/* Badge improvements */ -.badge { +[data-theme='dark'] .modal-footer { + background-color: rgba(0, 0, 0, 0.2); +} + +[data-theme='dark'] .modal-footer.border-0 { + background-color: var(--myfsio-card-bg); +} + +.modal-icon { + width: 48px; + height: 48px; + border-radius: 50%; + display: flex; + align-items: center; + justify-content: center; + margin: 0 auto 1rem; +} + +.modal-icon-danger { + background: rgba(239, 68, 68, 0.1); + color: #ef4444; +} + +.modal-icon-warning { + background: rgba(251, 191, 36, 0.1); + color: #f59e0b; +} + +.modal-icon-success { + background: rgba(34, 197, 94, 0.1); + color: #22c55e; +} + +.modal-icon-info { + background: rgba(59, 130, 246, 0.1); + color: #3b82f6; +} + +[data-theme='dark'] .modal-icon-danger { + background: rgba(239, 68, 68, 0.2); + color: #f87171; +} + +[data-theme='dark'] .modal-icon-warning { + background: rgba(251, 191, 36, 0.2); + color: #fbbf24; +} + +[data-theme='dark'] .modal-icon-success { + background: rgba(34, 197, 94, 0.2); + color: #4ade80; +} + +[data-theme='dark'] .modal-icon-info { + background: rgba(59, 130, 246, 0.2); + color: #60a5fa; +} + +.modal .alert { + border-radius: 0.75rem; +} + +.modal .form-control, +.modal .form-select { + border-radius: 0.5rem; +} + +.modal .btn { + border-radius: 0.5rem; + padding: 0.5rem 1rem; + font-weight: 500; +} + +.modal .btn-sm { + padding: 0.375rem 0.75rem; +} + +.modal .list-group { + border-radius: 0.75rem; + overflow: hidden; +} + +.modal .list-group-item { + border-color: var(--myfsio-card-border); +} + +.modal-backdrop { + backdrop-filter: blur(4px); + -webkit-backdrop-filter: blur(4px); +} + +.user-avatar { + width: 40px; + height: 40px; + border-radius: 50%; + background: linear-gradient(135deg, #3b82f6, #8b5cf6); + display: flex; + align-items: center; + justify-content: center; + color: white; + flex-shrink: 0; +} + +.connection-icon { + width: 32px; + height: 32px; + border-radius: 0.5rem; + background: var(--myfsio-preview-bg); + display: flex; + align-items: center; + justify-content: center; + color: var(--myfsio-muted); + flex-shrink: 0; +} + +[data-theme='dark'] .connection-icon { + background: rgba(255, 255, 255, 0.1); +} + +.bucket-card { + position: relative; + transition: all 0.2s cubic-bezier(0.4, 0, 0.2, 1); + border: 1px solid var(--myfsio-card-border) !important; + overflow: hidden; + border-radius: 1rem !important; +} + +.bucket-card::before { + content: ''; + position: absolute; + top: 0; + left: 0; + right: 0; + height: 4px; + background: linear-gradient(90deg, #3b82f6, #8b5cf6); + opacity: 0; + transition: opacity 0.2s ease; +} + +.bucket-card:hover { + transform: translateY(-2px); + box-shadow: 0 8px 24px -4px rgba(0, 0, 0, 0.12), 0 4px 8px -4px rgba(0, 0, 0, 0.08); + border-color: var(--myfsio-accent) !important; +} + +.bucket-card:hover::before { + opacity: 1; +} + +[data-theme='dark'] .bucket-card:hover { + box-shadow: 0 8px 24px -4px rgba(0, 0, 0, 0.4), 0 4px 8px -4px rgba(0, 0, 0, 0.3); +} + +.bucket-card .card-body { + padding: 1.25rem 1.5rem; +} + +.bucket-card .card-footer { + padding: 0.75rem 1.5rem; +} + +.bucket-card .bucket-icon { + width: 44px; + height: 44px; + display: flex; + align-items: center; + justify-content: center; + border-radius: 12px; + background: linear-gradient(135deg, rgba(59, 130, 246, 0.15) 0%, rgba(139, 92, 246, 0.15) 100%); + color: var(--myfsio-accent); + transition: transform 0.2s ease; +} + +.bucket-card:hover .bucket-icon { + transform: scale(1.05); +} + +[data-theme='dark'] .bucket-card .bucket-icon { + background: linear-gradient(135deg, rgba(59, 130, 246, 0.25) 0%, rgba(139, 92, 246, 0.25) 100%); +} + +.bucket-card .bucket-stats { + display: grid; + grid-template-columns: 1fr 1fr; + gap: 1rem; + padding: 1rem; + margin-top: 1rem; + background: var(--myfsio-preview-bg); + border-radius: 0.75rem; +} + +.bucket-card .bucket-stat { + text-align: center; +} + +.bucket-card .bucket-stat-value { + font-size: 1.25rem; + font-weight: 700; + color: var(--myfsio-text); + line-height: 1.2; +} + +.bucket-card .bucket-stat-label { + font-size: 0.75rem; + text-transform: uppercase; + letter-spacing: 0.05em; + color: var(--myfsio-muted); + margin-top: 0.25rem; +} + +.bucket-card .bucket-name { + font-size: 1.1rem; + font-weight: 600; + color: var(--myfsio-text); + margin: 0; + line-height: 1.3; +} + +.bucket-card .bucket-access-badge { + font-size: 0.7rem; + padding: 0.35em 0.75em; + border-radius: 999px; + font-weight: 600; + text-transform: uppercase; + letter-spacing: 0.03em; +} + +.form-control:focus, +.form-select:focus, +.btn:focus-visible { + outline: none; + box-shadow: 0 0 0 3px rgba(59, 130, 246, 0.3), 0 0 0 1px rgba(59, 130, 246, 0.5); +} + +@keyframes shimmer { + 0% { background-position: -200% 0; } + 100% { background-position: 200% 0; } +} + +.skeleton { + background: linear-gradient(90deg, var(--myfsio-card-bg) 25%, var(--myfsio-hover-bg) 50%, var(--myfsio-card-bg) 75%); + background-size: 200% 100%; + animation: shimmer 1.5s infinite; + border-radius: 4px; +} + +.toast-container { + position: fixed; + bottom: 1.5rem; + right: 1.5rem; + z-index: 1100; + display: flex; + flex-direction: column; + gap: 0.75rem; +} + +.toast-item { + background: var(--myfsio-card-bg); + border: 1px solid var(--myfsio-card-border); + border-radius: 0.75rem; + padding: 1rem 1.25rem; + box-shadow: 0 10px 25px -5px rgba(0, 0, 0, 0.1), 0 8px 10px -6px rgba(0, 0, 0, 0.1); + display: flex; + align-items: center; + gap: 0.75rem; + animation: slideInRight 0.3s ease-out; + max-width: 360px; +} + +@keyframes slideInRight { + from { opacity: 0; transform: translateX(100%); } + to { opacity: 1; transform: translateX(0); } +} + +.empty-state { + padding: 3rem 2rem; + text-align: center; +} + +.empty-state-icon { + width: 80px; + height: 80px; + margin: 0 auto 1.5rem; + display: flex; + align-items: center; + justify-content: center; + background: linear-gradient(135deg, rgba(59, 130, 246, 0.1) 0%, rgba(139, 92, 246, 0.1) 100%); + border-radius: 50%; + color: #3b82f6; +} + +[data-theme='dark'] .empty-state-icon { + background: linear-gradient(135deg, rgba(59, 130, 246, 0.2) 0%, rgba(139, 92, 246, 0.2) 100%); + color: #60a5fa; +} + +.metric-card { + position: relative; + overflow: hidden; +} + +.metric-card::after { + content: ''; + position: absolute; + top: 0; + right: 0; + width: 120px; + height: 120px; + background: linear-gradient(135deg, transparent 40%, rgba(59, 130, 246, 0.05) 100%); + border-radius: 50%; + transform: translate(30%, -30%); +} + +[data-theme='dark'] .metric-card::after { + background: linear-gradient(135deg, transparent 40%, rgba(59, 130, 246, 0.1) 100%); +} + +.progress { + overflow: visible; + border-radius: 999px; +} + +.progress-bar { + border-radius: 999px; + position: relative; + transition: width 0.6s ease; +} + +.icon-box { + transition: all 0.2s ease; +} + +.card:hover .icon-box { + transform: scale(1.1) rotate(5deg); +} + +.search-input-wrapper { + position: relative; +} + +.search-input-wrapper .form-control { + padding-left: 2.75rem; + border-radius: 999px; + transition: all 0.2s ease; +} + +.search-input-wrapper .form-control:focus { + padding-left: 2.75rem; +} + +.search-input-wrapper .search-icon { + position: absolute; + left: 1rem; + top: 50%; + transform: translateY(-50%); + color: var(--myfsio-muted); + pointer-events: none; + transition: color 0.2s ease; +} + +.search-input-wrapper:focus-within .search-icon { + color: #3b82f6; +} + +.nav-tabs { + border-bottom: 2px solid var(--myfsio-card-border); +} + +.nav-tabs .nav-link { + border: none; + color: var(--myfsio-muted); + padding: 0.75rem 1.25rem; + font-weight: 500; + position: relative; + transition: all 0.2s ease; +} + +.nav-tabs .nav-link::after { + content: ''; + position: absolute; + bottom: -2px; + left: 0; + right: 0; + height: 2px; + background: linear-gradient(90deg, #3b82f6, #8b5cf6); + transform: scaleX(0); + transition: transform 0.2s ease; +} + +.nav-tabs .nav-link:hover { + color: var(--myfsio-text); + border: none; +} + +.nav-tabs .nav-link.active { + background: transparent; + color: #3b82f6; + border: none; +} + +.nav-tabs .nav-link.active::after { + transform: scaleX(1); +} + +[data-theme='dark'] .nav-tabs .nav-link.active { + color: #60a5fa; +} + +.file-type-badge { + display: inline-flex; + align-items: center; + padding: 0.25rem 0.5rem; + border-radius: 4px; + font-size: 0.7rem; + font-weight: 600; + text-transform: uppercase; + letter-spacing: 0.05em; +} + +.file-type-image { background: rgba(139, 92, 246, 0.15); color: #7c3aed; } +.file-type-video { background: rgba(236, 72, 153, 0.15); color: #db2777; } +.file-type-audio { background: rgba(245, 158, 11, 0.15); color: #d97706; } +.file-type-document { background: rgba(59, 130, 246, 0.15); color: #2563eb; } +.file-type-archive { background: rgba(34, 197, 94, 0.15); color: #16a34a; } +.file-type-code { background: rgba(99, 102, 241, 0.15); color: #4f46e5; } + +[data-theme='dark'] .file-type-image { background: rgba(139, 92, 246, 0.25); color: #a78bfa; } +[data-theme='dark'] .file-type-video { background: rgba(236, 72, 153, 0.25); color: #f472b6; } +[data-theme='dark'] .file-type-audio { background: rgba(245, 158, 11, 0.25); color: #fbbf24; } +[data-theme='dark'] .file-type-document { background: rgba(59, 130, 246, 0.25); color: #60a5fa; } +[data-theme='dark'] .file-type-archive { background: rgba(34, 197, 94, 0.25); color: #4ade80; } +[data-theme='dark'] .file-type-code { background: rgba(99, 102, 241, 0.25); color: #818cf8; } + +.table-hover [data-object-row] .btn-group { + opacity: 0.5; + transition: opacity 0.2s ease; +} + +.table-hover [data-object-row]:hover .btn-group { + opacity: 1; +} + +::-webkit-scrollbar { + width: 8px; + height: 8px; +} + +::-webkit-scrollbar-track { + background: var(--myfsio-body-bg); + border-radius: 4px; +} + +::-webkit-scrollbar-thumb { + background: var(--myfsio-muted); + border-radius: 4px; + opacity: 0.5; +} + +::-webkit-scrollbar-thumb:hover { + background: var(--myfsio-text); +} + +.modal.fade .modal-dialog { + transform: scale(0.95) translateY(-20px); + transition: transform 0.2s ease-out; +} + +.modal.show .modal-dialog { + transform: scale(1) translateY(0); +} + +.tooltip-inner { + background: var(--myfsio-policy-bg); + padding: 0.5rem 0.75rem; + border-radius: 6px; font-size: 0.8125rem; } + +@keyframes countUp { + from { opacity: 0; transform: translateY(10px); } + to { opacity: 1; transform: translateY(0); } +} + +.stat-value { + animation: countUp 0.5s ease-out; +} + +.action-btn-group { + display: flex; + gap: 0.5rem; +} + +.action-btn-group .btn { + border-radius: 8px; +} + +@keyframes livePulse { + 0%, 100% { opacity: 1; } + 50% { opacity: 0.4; } +} + +.live-indicator { + width: 8px; + height: 8px; + background: #22c55e; + border-radius: 50%; + animation: livePulse 2s infinite; +} + +.login-card { + border: none; + border-radius: 1rem; + overflow: hidden; +} + +.login-card::before { + content: ''; + position: absolute; + top: 0; + left: 0; + right: 0; + height: 4px; + background: linear-gradient(90deg, #3b82f6, #8b5cf6, #ec4899); +} + +@media (max-width: 768px) { + .bucket-card:hover { + transform: none; + } + + .objects-table-container { + max-height: none; + } + + .preview-card { + position: relative !important; + top: 0 !important; + } +} + +*:focus-visible { + outline: 2px solid #3b82f6; + outline-offset: 2px; +} + +* { + transition-property: background-color, border-color, color, fill, stroke; + transition-duration: 0s; + transition-timing-function: ease; +} + +body.theme-transitioning, +body.theme-transitioning * { + transition-duration: 0.3s !important; +} + +.status-badge { + display: inline-flex; + align-items: center; + gap: 0.375rem; +} + +.status-badge-dot { + width: 6px; + height: 6px; + border-radius: 50%; +} + +.status-badge-success .status-badge-dot { background: #22c55e; } +.status-badge-warning .status-badge-dot { background: #f59e0b; } +.status-badge-danger .status-badge-dot { background: #ef4444; } +.status-badge-info .status-badge-dot { background: #3b82f6; } + +.bucket-list-item { + display: flex; + align-items: center; + padding: 1rem 1.25rem; + background: var(--myfsio-card-bg); + border: 1px solid var(--myfsio-card-border); + border-radius: 0.75rem; + transition: all 0.2s ease; + gap: 1rem; +} + +.bucket-list-item:hover { + border-color: rgba(59, 130, 246, 0.3); + background: var(--myfsio-hover-bg); +} + +.text-gradient { + background: linear-gradient(135deg, #3b82f6, #8b5cf6); + -webkit-background-clip: text; + -webkit-text-fill-color: transparent; + background-clip: text; +} + +.shadow-glow { + box-shadow: 0 0 20px rgba(59, 130, 246, 0.3); +} + +.border-gradient { + border: 2px solid transparent; + background: linear-gradient(var(--myfsio-card-bg), var(--myfsio-card-bg)) padding-box, linear-gradient(135deg, #3b82f6, #8b5cf6) border-box; +} diff --git a/templates/base.html b/templates/base.html index 1bf96ce..fa12e58 100644 --- a/templates/base.html +++ b/templates/base.html @@ -3,7 +3,7 @@ - + {% if principal %}{% endif %} MyFSIO Console @@ -63,6 +63,9 @@ {% if not can_manage_iam %}Restricted{% endif %} + {% endif %} {% if principal %}