7 Commits

27 changed files with 657 additions and 1960 deletions

View File

@@ -8,7 +8,7 @@ MyFSIO is a batteries-included, Flask-based recreation of Amazon S3 and IAM work
- **IAM + access keys:** Users, access keys, key rotation, and bucket-scoped actions (`list/read/write/delete/policy`) now live in `data/.myfsio.sys/config/iam.json` and are editable from the IAM dashboard. - **IAM + access keys:** Users, access keys, key rotation, and bucket-scoped actions (`list/read/write/delete/policy`) now live in `data/.myfsio.sys/config/iam.json` and are editable from the IAM dashboard.
- **Bucket policies + hot reload:** `data/.myfsio.sys/config/bucket_policies.json` uses AWS' policy grammar (Version `2012-10-17`) with a built-in watcher, so editing the JSON file applies immediately. The UI also ships Public/Private/Custom presets for faster edits. - **Bucket policies + hot reload:** `data/.myfsio.sys/config/bucket_policies.json` uses AWS' policy grammar (Version `2012-10-17`) with a built-in watcher, so editing the JSON file applies immediately. The UI also ships Public/Private/Custom presets for faster edits.
- **Presigned URLs everywhere:** Signature Version 4 presigned URLs respect IAM + bucket policies and replace the now-removed "share link" feature for public access scenarios. - **Presigned URLs everywhere:** Signature Version 4 presigned URLs respect IAM + bucket policies and replace the now-removed "share link" feature for public access scenarios.
- **Modern UI:** Responsive tables, quick filters, preview sidebar, object-level delete buttons, a presign modal, and an inline JSON policy editor that respects dark mode keep bucket management friendly. The object browser supports folder navigation, infinite scroll pagination, bulk operations, and automatic retry on load failures. - **Modern UI:** Responsive tables, quick filters, preview sidebar, object-level delete buttons, a presign modal, and an inline JSON policy editor that respects dark mode keep bucket management friendly.
- **Tests & health:** `/healthz` for smoke checks and `pytest` coverage for IAM, CRUD, presign, and policy flows. - **Tests & health:** `/healthz` for smoke checks and `pytest` coverage for IAM, CRUD, presign, and policy flows.
## Architecture at a Glance ## Architecture at a Glance

View File

@@ -45,6 +45,7 @@ def _migrate_config_file(active_path: Path, legacy_paths: List[Path]) -> Path:
try: try:
shutil.move(str(legacy_path), str(active_path)) shutil.move(str(legacy_path), str(active_path))
except OSError: except OSError:
# Fall back to copy + delete if move fails (e.g., cross-device)
shutil.copy2(legacy_path, active_path) shutil.copy2(legacy_path, active_path)
try: try:
legacy_path.unlink(missing_ok=True) legacy_path.unlink(missing_ok=True)
@@ -100,28 +101,32 @@ def create_app(
bucket_policies = BucketPolicyStore(Path(app.config["BUCKET_POLICY_PATH"])) bucket_policies = BucketPolicyStore(Path(app.config["BUCKET_POLICY_PATH"]))
secret_store = EphemeralSecretStore(default_ttl=app.config.get("SECRET_TTL_SECONDS", 300)) secret_store = EphemeralSecretStore(default_ttl=app.config.get("SECRET_TTL_SECONDS", 300))
# Initialize Replication components
# Store config files in the system config directory for consistency
storage_root = Path(app.config["STORAGE_ROOT"]) storage_root = Path(app.config["STORAGE_ROOT"])
config_dir = storage_root / ".myfsio.sys" / "config" config_dir = storage_root / ".myfsio.sys" / "config"
config_dir.mkdir(parents=True, exist_ok=True) config_dir.mkdir(parents=True, exist_ok=True)
# Define paths with migration from legacy locations
connections_path = _migrate_config_file( connections_path = _migrate_config_file(
active_path=config_dir / "connections.json", active_path=config_dir / "connections.json",
legacy_paths=[ legacy_paths=[
storage_root / ".myfsio.sys" / "connections.json", storage_root / ".myfsio.sys" / "connections.json", # Previous location
storage_root / ".connections.json", storage_root / ".connections.json", # Original legacy location
], ],
) )
replication_rules_path = _migrate_config_file( replication_rules_path = _migrate_config_file(
active_path=config_dir / "replication_rules.json", active_path=config_dir / "replication_rules.json",
legacy_paths=[ legacy_paths=[
storage_root / ".myfsio.sys" / "replication_rules.json", storage_root / ".myfsio.sys" / "replication_rules.json", # Previous location
storage_root / ".replication_rules.json", storage_root / ".replication_rules.json", # Original legacy location
], ],
) )
connections = ConnectionStore(connections_path) connections = ConnectionStore(connections_path)
replication = ReplicationManager(storage, connections, replication_rules_path) replication = ReplicationManager(storage, connections, replication_rules_path)
# Initialize encryption and KMS
encryption_config = { encryption_config = {
"encryption_enabled": app.config.get("ENCRYPTION_ENABLED", False), "encryption_enabled": app.config.get("ENCRYPTION_ENABLED", False),
"encryption_master_key_path": app.config.get("ENCRYPTION_MASTER_KEY_PATH"), "encryption_master_key_path": app.config.get("ENCRYPTION_MASTER_KEY_PATH"),
@@ -136,6 +141,7 @@ def create_app(
kms_manager = KMSManager(kms_keys_path, kms_master_key_path) kms_manager = KMSManager(kms_keys_path, kms_master_key_path)
encryption_manager.set_kms_provider(kms_manager) encryption_manager.set_kms_provider(kms_manager)
# Wrap storage with encryption layer if encryption is enabled
if app.config.get("ENCRYPTION_ENABLED", False): if app.config.get("ENCRYPTION_ENABLED", False):
from .encrypted_storage import EncryptedObjectStorage from .encrypted_storage import EncryptedObjectStorage
storage = EncryptedObjectStorage(storage, encryption_manager) storage = EncryptedObjectStorage(storage, encryption_manager)
@@ -238,7 +244,7 @@ def _configure_cors(app: Flask) -> None:
class _RequestContextFilter(logging.Filter): class _RequestContextFilter(logging.Filter):
"""Inject request-specific attributes into log records.""" """Inject request-specific attributes into log records."""
def filter(self, record: logging.LogRecord) -> bool: def filter(self, record: logging.LogRecord) -> bool: # pragma: no cover - simple boilerplate
if has_request_context(): if has_request_context():
record.request_id = getattr(g, "request_id", "-") record.request_id = getattr(g, "request_id", "-")
record.path = request.path record.path = request.path

View File

@@ -188,6 +188,7 @@ class BucketPolicyStore:
except FileNotFoundError: except FileNotFoundError:
return None return None
# ------------------------------------------------------------------
def evaluate( def evaluate(
self, self,
access_key: Optional[str], access_key: Optional[str],
@@ -228,6 +229,7 @@ class BucketPolicyStore:
self._policies.pop(bucket, None) self._policies.pop(bucket, None)
self._persist() self._persist()
# ------------------------------------------------------------------
def _load(self) -> None: def _load(self) -> None:
try: try:
content = self.policy_path.read_text(encoding='utf-8') content = self.policy_path.read_text(encoding='utf-8')

View File

@@ -153,8 +153,9 @@ class AppConfig:
cors_allow_headers = _csv(str(_get("CORS_ALLOW_HEADERS", "*")), ["*"]) cors_allow_headers = _csv(str(_get("CORS_ALLOW_HEADERS", "*")), ["*"])
cors_expose_headers = _csv(str(_get("CORS_EXPOSE_HEADERS", "*")), ["*"]) cors_expose_headers = _csv(str(_get("CORS_EXPOSE_HEADERS", "*")), ["*"])
session_lifetime_days = int(_get("SESSION_LIFETIME_DAYS", 30)) session_lifetime_days = int(_get("SESSION_LIFETIME_DAYS", 30))
bucket_stats_cache_ttl = int(_get("BUCKET_STATS_CACHE_TTL", 60)) bucket_stats_cache_ttl = int(_get("BUCKET_STATS_CACHE_TTL", 60)) # Default 60 seconds
# Encryption settings
encryption_enabled = str(_get("ENCRYPTION_ENABLED", "0")).lower() in {"1", "true", "yes", "on"} encryption_enabled = str(_get("ENCRYPTION_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
encryption_keys_dir = storage_root / ".myfsio.sys" / "keys" encryption_keys_dir = storage_root / ".myfsio.sys" / "keys"
encryption_master_key_path = Path(_get("ENCRYPTION_MASTER_KEY_PATH", encryption_keys_dir / "master.key")).resolve() encryption_master_key_path = Path(_get("ENCRYPTION_MASTER_KEY_PATH", encryption_keys_dir / "master.key")).resolve()
@@ -205,6 +206,7 @@ class AppConfig:
""" """
issues = [] issues = []
# Check if storage_root is writable
try: try:
test_file = self.storage_root / ".write_test" test_file = self.storage_root / ".write_test"
test_file.touch() test_file.touch()
@@ -212,20 +214,24 @@ class AppConfig:
except (OSError, PermissionError) as e: except (OSError, PermissionError) as e:
issues.append(f"CRITICAL: STORAGE_ROOT '{self.storage_root}' is not writable: {e}") issues.append(f"CRITICAL: STORAGE_ROOT '{self.storage_root}' is not writable: {e}")
# Check if storage_root looks like a temp directory
storage_str = str(self.storage_root).lower() storage_str = str(self.storage_root).lower()
if "/tmp" in storage_str or "\\temp" in storage_str or "appdata\\local\\temp" in storage_str: if "/tmp" in storage_str or "\\temp" in storage_str or "appdata\\local\\temp" in storage_str:
issues.append(f"WARNING: STORAGE_ROOT '{self.storage_root}' appears to be a temporary directory. Data may be lost on reboot!") issues.append(f"WARNING: STORAGE_ROOT '{self.storage_root}' appears to be a temporary directory. Data may be lost on reboot!")
# Check if IAM config path is under storage_root
try: try:
self.iam_config_path.relative_to(self.storage_root) self.iam_config_path.relative_to(self.storage_root)
except ValueError: except ValueError:
issues.append(f"WARNING: IAM_CONFIG '{self.iam_config_path}' is outside STORAGE_ROOT '{self.storage_root}'. Consider setting IAM_CONFIG explicitly or ensuring paths are aligned.") issues.append(f"WARNING: IAM_CONFIG '{self.iam_config_path}' is outside STORAGE_ROOT '{self.storage_root}'. Consider setting IAM_CONFIG explicitly or ensuring paths are aligned.")
# Check if bucket policy path is under storage_root
try: try:
self.bucket_policy_path.relative_to(self.storage_root) self.bucket_policy_path.relative_to(self.storage_root)
except ValueError: except ValueError:
issues.append(f"WARNING: BUCKET_POLICY_PATH '{self.bucket_policy_path}' is outside STORAGE_ROOT '{self.storage_root}'. Consider setting BUCKET_POLICY_PATH explicitly.") issues.append(f"WARNING: BUCKET_POLICY_PATH '{self.bucket_policy_path}' is outside STORAGE_ROOT '{self.storage_root}'. Consider setting BUCKET_POLICY_PATH explicitly.")
# Check if log path is writable
try: try:
self.log_path.parent.mkdir(parents=True, exist_ok=True) self.log_path.parent.mkdir(parents=True, exist_ok=True)
test_log = self.log_path.parent / ".write_test" test_log = self.log_path.parent / ".write_test"
@@ -234,22 +240,26 @@ class AppConfig:
except (OSError, PermissionError) as e: except (OSError, PermissionError) as e:
issues.append(f"WARNING: Log directory '{self.log_path.parent}' is not writable: {e}") issues.append(f"WARNING: Log directory '{self.log_path.parent}' is not writable: {e}")
# Check log path location
log_str = str(self.log_path).lower() log_str = str(self.log_path).lower()
if "/tmp" in log_str or "\\temp" in log_str or "appdata\\local\\temp" in log_str: if "/tmp" in log_str or "\\temp" in log_str or "appdata\\local\\temp" in log_str:
issues.append(f"WARNING: LOG_DIR '{self.log_path.parent}' appears to be a temporary directory. Logs may be lost on reboot!") issues.append(f"WARNING: LOG_DIR '{self.log_path.parent}' appears to be a temporary directory. Logs may be lost on reboot!")
# Check if encryption keys path is under storage_root (when encryption is enabled)
if self.encryption_enabled: if self.encryption_enabled:
try: try:
self.encryption_master_key_path.relative_to(self.storage_root) self.encryption_master_key_path.relative_to(self.storage_root)
except ValueError: except ValueError:
issues.append(f"WARNING: ENCRYPTION_MASTER_KEY_PATH '{self.encryption_master_key_path}' is outside STORAGE_ROOT. Ensure proper backup procedures.") issues.append(f"WARNING: ENCRYPTION_MASTER_KEY_PATH '{self.encryption_master_key_path}' is outside STORAGE_ROOT. Ensure proper backup procedures.")
# Check if KMS keys path is under storage_root (when KMS is enabled)
if self.kms_enabled: if self.kms_enabled:
try: try:
self.kms_keys_path.relative_to(self.storage_root) self.kms_keys_path.relative_to(self.storage_root)
except ValueError: except ValueError:
issues.append(f"WARNING: KMS_KEYS_PATH '{self.kms_keys_path}' is outside STORAGE_ROOT. Ensure proper backup procedures.") issues.append(f"WARNING: KMS_KEYS_PATH '{self.kms_keys_path}' is outside STORAGE_ROOT. Ensure proper backup procedures.")
# Warn about production settings
if self.secret_key == "dev-secret-key": if self.secret_key == "dev-secret-key":
issues.append("WARNING: Using default SECRET_KEY. Set SECRET_KEY environment variable for production.") issues.append("WARNING: Using default SECRET_KEY. Set SECRET_KEY environment variable for production.")

View File

@@ -188,11 +188,8 @@ class EncryptedObjectStorage:
def bucket_stats(self, bucket_name: str, cache_ttl: int = 60): def bucket_stats(self, bucket_name: str, cache_ttl: int = 60):
return self.storage.bucket_stats(bucket_name, cache_ttl) return self.storage.bucket_stats(bucket_name, cache_ttl)
def list_objects(self, bucket_name: str, **kwargs): def list_objects(self, bucket_name: str):
return self.storage.list_objects(bucket_name, **kwargs) return self.storage.list_objects(bucket_name)
def list_objects_all(self, bucket_name: str):
return self.storage.list_objects_all(bucket_name)
def get_object_path(self, bucket_name: str, object_key: str): def get_object_path(self, bucket_name: str, object_key: str):
return self.storage.get_object_path(bucket_name, object_key) return self.storage.get_object_path(bucket_name, object_key)

View File

@@ -125,6 +125,7 @@ class IamService:
except OSError: except OSError:
pass pass
# ---------------------- authz helpers ----------------------
def authenticate(self, access_key: str, secret_key: str) -> Principal: def authenticate(self, access_key: str, secret_key: str) -> Principal:
self._maybe_reload() self._maybe_reload()
access_key = (access_key or "").strip() access_key = (access_key or "").strip()
@@ -217,6 +218,7 @@ class IamService:
return True return True
return False return False
# ---------------------- management helpers ----------------------
def list_users(self) -> List[Dict[str, Any]]: def list_users(self) -> List[Dict[str, Any]]:
listing: List[Dict[str, Any]] = [] listing: List[Dict[str, Any]] = []
for access_key, record in self._users.items(): for access_key, record in self._users.items():
@@ -289,6 +291,7 @@ class IamService:
self._save() self._save()
self._load() self._load()
# ---------------------- config helpers ----------------------
def _load(self) -> None: def _load(self) -> None:
try: try:
self._last_load_time = self.config_path.stat().st_mtime self._last_load_time = self.config_path.stat().st_mtime
@@ -334,6 +337,7 @@ class IamService:
except (OSError, PermissionError) as e: except (OSError, PermissionError) as e:
raise IamError(f"Cannot save IAM config: {e}") raise IamError(f"Cannot save IAM config: {e}")
# ---------------------- insight helpers ----------------------
def config_summary(self) -> Dict[str, Any]: def config_summary(self) -> Dict[str, Any]:
return { return {
"path": str(self.config_path), "path": str(self.config_path),

View File

@@ -33,6 +33,9 @@ def _encryption():
def _error_response(code: str, message: str, status: int) -> tuple[Dict[str, Any], int]: def _error_response(code: str, message: str, status: int) -> tuple[Dict[str, Any], int]:
return {"__type": code, "message": message}, status return {"__type": code, "message": message}, status
# ---------------------- Key Management ----------------------
@kms_api_bp.route("/keys", methods=["GET", "POST"]) @kms_api_bp.route("/keys", methods=["GET", "POST"])
@limiter.limit("30 per minute") @limiter.limit("30 per minute")
def list_or_create_keys(): def list_or_create_keys():
@@ -62,6 +65,7 @@ def list_or_create_keys():
except EncryptionError as exc: except EncryptionError as exc:
return _error_response("KMSInternalException", str(exc), 400) return _error_response("KMSInternalException", str(exc), 400)
# GET - List keys
keys = kms.list_keys() keys = kms.list_keys()
return jsonify({ return jsonify({
"Keys": [{"KeyId": k.key_id, "KeyArn": k.arn} for k in keys], "Keys": [{"KeyId": k.key_id, "KeyArn": k.arn} for k in keys],
@@ -92,6 +96,7 @@ def get_or_delete_key(key_id: str):
except EncryptionError as exc: except EncryptionError as exc:
return _error_response("NotFoundException", str(exc), 404) return _error_response("NotFoundException", str(exc), 404)
# GET
key = kms.get_key(key_id) key = kms.get_key(key_id)
if not key: if not key:
return _error_response("NotFoundException", f"Key not found: {key_id}", 404) return _error_response("NotFoundException", f"Key not found: {key_id}", 404)
@@ -144,6 +149,9 @@ def disable_key(key_id: str):
except EncryptionError as exc: except EncryptionError as exc:
return _error_response("NotFoundException", str(exc), 404) return _error_response("NotFoundException", str(exc), 404)
# ---------------------- Encryption Operations ----------------------
@kms_api_bp.route("/encrypt", methods=["POST"]) @kms_api_bp.route("/encrypt", methods=["POST"])
@limiter.limit("60 per minute") @limiter.limit("60 per minute")
def encrypt_data(): def encrypt_data():
@@ -243,6 +251,7 @@ def generate_data_key():
try: try:
plaintext_key, encrypted_key = kms.generate_data_key(key_id, context) plaintext_key, encrypted_key = kms.generate_data_key(key_id, context)
# Trim key if AES_128 requested
if key_spec == "AES_128": if key_spec == "AES_128":
plaintext_key = plaintext_key[:16] plaintext_key = plaintext_key[:16]
@@ -313,7 +322,10 @@ def re_encrypt():
return _error_response("ValidationException", "CiphertextBlob must be base64 encoded", 400) return _error_response("ValidationException", "CiphertextBlob must be base64 encoded", 400)
try: try:
# First decrypt, get source key id
plaintext, source_key_id = kms.decrypt(ciphertext, source_context) plaintext, source_key_id = kms.decrypt(ciphertext, source_context)
# Re-encrypt with destination key
new_ciphertext = kms.encrypt(destination_key_id, plaintext, destination_context) new_ciphertext = kms.encrypt(destination_key_id, plaintext, destination_context)
return jsonify({ return jsonify({
@@ -353,6 +365,9 @@ def generate_random():
except EncryptionError as exc: except EncryptionError as exc:
return _error_response("ValidationException", str(exc), 400) return _error_response("ValidationException", str(exc), 400)
# ---------------------- Client-Side Encryption Helpers ----------------------
@kms_api_bp.route("/client/generate-key", methods=["POST"]) @kms_api_bp.route("/client/generate-key", methods=["POST"])
@limiter.limit("30 per minute") @limiter.limit("30 per minute")
def generate_client_key(): def generate_client_key():
@@ -412,6 +427,9 @@ def client_decrypt():
except Exception as exc: except Exception as exc:
return _error_response("DecryptionError", str(exc), 400) return _error_response("DecryptionError", str(exc), 400)
# ---------------------- Encryption Materials for S3 Client-Side Encryption ----------------------
@kms_api_bp.route("/materials/<key_id>", methods=["POST"]) @kms_api_bp.route("/materials/<key_id>", methods=["POST"])
@limiter.limit("60 per minute") @limiter.limit("60 per minute")
def get_encryption_materials(key_id: str): def get_encryption_materials(key_id: str):

View File

@@ -22,8 +22,6 @@ from .storage import ObjectStorage, StorageError
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
REPLICATION_USER_AGENT = "S3ReplicationAgent/1.0" REPLICATION_USER_AGENT = "S3ReplicationAgent/1.0"
REPLICATION_CONNECT_TIMEOUT = 5
REPLICATION_READ_TIMEOUT = 30
REPLICATION_MODE_NEW_ONLY = "new_only" REPLICATION_MODE_NEW_ONLY = "new_only"
REPLICATION_MODE_ALL = "all" REPLICATION_MODE_ALL = "all"
@@ -32,10 +30,10 @@ REPLICATION_MODE_ALL = "all"
@dataclass @dataclass
class ReplicationStats: class ReplicationStats:
"""Statistics for replication operations - computed dynamically.""" """Statistics for replication operations - computed dynamically."""
objects_synced: int = 0 objects_synced: int = 0 # Objects that exist in both source and destination
objects_pending: int = 0 objects_pending: int = 0 # Objects in source but not in destination
objects_orphaned: int = 0 objects_orphaned: int = 0 # Objects in destination but not in source (will be deleted)
bytes_synced: int = 0 bytes_synced: int = 0 # Total bytes synced to destination
last_sync_at: Optional[float] = None last_sync_at: Optional[float] = None
last_sync_key: Optional[str] = None last_sync_key: Optional[str] = None
@@ -85,6 +83,7 @@ class ReplicationRule:
@classmethod @classmethod
def from_dict(cls, data: dict) -> "ReplicationRule": def from_dict(cls, data: dict) -> "ReplicationRule":
stats_data = data.pop("stats", {}) stats_data = data.pop("stats", {})
# Handle old rules without mode/created_at
if "mode" not in data: if "mode" not in data:
data["mode"] = REPLICATION_MODE_NEW_ONLY data["mode"] = REPLICATION_MODE_NEW_ONLY
if "created_at" not in data: if "created_at" not in data:
@@ -122,33 +121,6 @@ class ReplicationManager:
with open(self.rules_path, "w") as f: with open(self.rules_path, "w") as f:
json.dump(data, f, indent=2) json.dump(data, f, indent=2)
def check_endpoint_health(self, connection: RemoteConnection) -> bool:
"""Check if a remote endpoint is reachable and responsive.
Returns True if endpoint is healthy, False otherwise.
Uses short timeouts to prevent blocking.
"""
try:
config = Config(
user_agent_extra=REPLICATION_USER_AGENT,
connect_timeout=REPLICATION_CONNECT_TIMEOUT,
read_timeout=REPLICATION_READ_TIMEOUT,
retries={'max_attempts': 1}
)
s3 = boto3.client(
"s3",
endpoint_url=connection.endpoint_url,
aws_access_key_id=connection.access_key,
aws_secret_access_key=connection.secret_key,
region_name=connection.region,
config=config,
)
s3.list_buckets()
return True
except Exception as e:
logger.warning(f"Endpoint health check failed for {connection.name} ({connection.endpoint_url}): {e}")
return False
def get_rule(self, bucket_name: str) -> Optional[ReplicationRule]: def get_rule(self, bucket_name: str) -> Optional[ReplicationRule]:
return self._rules.get(bucket_name) return self._rules.get(bucket_name)
@@ -179,12 +151,14 @@ class ReplicationManager:
connection = self.connections.get(rule.target_connection_id) connection = self.connections.get(rule.target_connection_id)
if not connection: if not connection:
return rule.stats return rule.stats # Return cached stats if connection unavailable
try: try:
source_objects = self.storage.list_objects_all(bucket_name) # Get source objects
source_objects = self.storage.list_objects(bucket_name)
source_keys = {obj.key: obj.size for obj in source_objects} source_keys = {obj.key: obj.size for obj in source_objects}
# Get destination objects
s3 = boto3.client( s3 = boto3.client(
"s3", "s3",
endpoint_url=connection.endpoint_url, endpoint_url=connection.endpoint_url,
@@ -204,18 +178,24 @@ class ReplicationManager:
bytes_synced += obj.get('Size', 0) bytes_synced += obj.get('Size', 0)
except ClientError as e: except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchBucket': if e.response['Error']['Code'] == 'NoSuchBucket':
# Destination bucket doesn't exist yet
dest_keys = set() dest_keys = set()
else: else:
raise raise
synced = source_keys.keys() & dest_keys # Compute stats
orphaned = dest_keys - source_keys.keys() synced = source_keys.keys() & dest_keys # Objects in both
orphaned = dest_keys - source_keys.keys() # In dest but not source
# For "new_only" mode, we can't determine pending since we don't know
# which objects existed before replication was enabled. Only "all" mode
# should show pending (objects that should be replicated but aren't yet).
if rule.mode == REPLICATION_MODE_ALL: if rule.mode == REPLICATION_MODE_ALL:
pending = source_keys.keys() - dest_keys pending = source_keys.keys() - dest_keys # In source but not dest
else: else:
pending = set() pending = set() # New-only mode: don't show pre-existing as pending
# Update cached stats with computed values
rule.stats.objects_synced = len(synced) rule.stats.objects_synced = len(synced)
rule.stats.objects_pending = len(pending) rule.stats.objects_pending = len(pending)
rule.stats.objects_orphaned = len(orphaned) rule.stats.objects_orphaned = len(orphaned)
@@ -225,7 +205,7 @@ class ReplicationManager:
except (ClientError, StorageError) as e: except (ClientError, StorageError) as e:
logger.error(f"Failed to compute sync status for {bucket_name}: {e}") logger.error(f"Failed to compute sync status for {bucket_name}: {e}")
return rule.stats return rule.stats # Return cached stats on error
def replicate_existing_objects(self, bucket_name: str) -> None: def replicate_existing_objects(self, bucket_name: str) -> None:
"""Trigger replication for all existing objects in a bucket.""" """Trigger replication for all existing objects in a bucket."""
@@ -238,12 +218,8 @@ class ReplicationManager:
logger.warning(f"Cannot replicate existing objects: Connection {rule.target_connection_id} not found") logger.warning(f"Cannot replicate existing objects: Connection {rule.target_connection_id} not found")
return return
if not self.check_endpoint_health(connection):
logger.warning(f"Cannot replicate existing objects: Endpoint {connection.name} ({connection.endpoint_url}) is not reachable")
return
try: try:
objects = self.storage.list_objects_all(bucket_name) objects = self.storage.list_objects(bucket_name)
logger.info(f"Starting replication of {len(objects)} existing objects from {bucket_name}") logger.info(f"Starting replication of {len(objects)} existing objects from {bucket_name}")
for obj in objects: for obj in objects:
self._executor.submit(self._replicate_task, bucket_name, obj.key, rule, connection, "write") self._executor.submit(self._replicate_task, bucket_name, obj.key, rule, connection, "write")
@@ -279,10 +255,6 @@ class ReplicationManager:
logger.warning(f"Replication skipped for {bucket_name}/{object_key}: Connection {rule.target_connection_id} not found") logger.warning(f"Replication skipped for {bucket_name}/{object_key}: Connection {rule.target_connection_id} not found")
return return
if not self.check_endpoint_health(connection):
logger.warning(f"Replication skipped for {bucket_name}/{object_key}: Endpoint {connection.name} ({connection.endpoint_url}) is not reachable")
return
self._executor.submit(self._replicate_task, bucket_name, object_key, rule, connection, action) self._executor.submit(self._replicate_task, bucket_name, object_key, rule, connection, action)
def _replicate_task(self, bucket_name: str, object_key: str, rule: ReplicationRule, conn: RemoteConnection, action: str) -> None: def _replicate_task(self, bucket_name: str, object_key: str, rule: ReplicationRule, conn: RemoteConnection, action: str) -> None:
@@ -299,26 +271,13 @@ class ReplicationManager:
file_size = 0 file_size = 0
try: try:
config = Config( config = Config(user_agent_extra=REPLICATION_USER_AGENT)
user_agent_extra=REPLICATION_USER_AGENT,
connect_timeout=REPLICATION_CONNECT_TIMEOUT,
read_timeout=REPLICATION_READ_TIMEOUT,
retries={'max_attempts': 2}, # Limited retries to prevent long hangs
signature_version='s3v4', # Force signature v4 for compatibility
s3={
'addressing_style': 'path', # Use path-style addressing for compatibility
},
# Disable SDK automatic checksums - they cause SignatureDoesNotMatch errors
# with S3-compatible servers that don't support CRC32 checksum headers
request_checksum_calculation='when_required',
response_checksum_validation='when_required',
)
s3 = boto3.client( s3 = boto3.client(
"s3", "s3",
endpoint_url=conn.endpoint_url, endpoint_url=conn.endpoint_url,
aws_access_key_id=conn.access_key, aws_access_key_id=conn.access_key,
aws_secret_access_key=conn.secret_key, aws_secret_access_key=conn.secret_key,
region_name=conn.region or 'us-east-1', # Default region if not set region_name=conn.region,
config=config, config=config,
) )
@@ -337,59 +296,54 @@ class ReplicationManager:
logger.error(f"Source object not found: {bucket_name}/{object_key}") logger.error(f"Source object not found: {bucket_name}/{object_key}")
return return
# Don't replicate metadata - destination server will generate its own metadata = self.storage.get_object_metadata(bucket_name, object_key)
# __etag__ and __size__. Replicating them causes signature mismatches when they have None/empty values.
extra_args = {}
if metadata:
extra_args["Metadata"] = metadata
# Guess content type to prevent corruption/wrong handling
content_type, _ = mimetypes.guess_type(path) content_type, _ = mimetypes.guess_type(path)
file_size = path.stat().st_size file_size = path.stat().st_size
logger.info(f"Replicating {bucket_name}/{object_key}: Size={file_size}, ContentType={content_type}") logger.info(f"Replicating {bucket_name}/{object_key}: Size={file_size}, ContentType={content_type}")
def do_put_object() -> None:
"""Helper to upload object.
Reads the file content into memory first to avoid signature calculation
issues with certain binary file types (like GIFs) when streaming.
Do NOT set ContentLength explicitly - boto3 calculates it from the bytes
and setting it manually can cause SignatureDoesNotMatch errors.
"""
file_content = path.read_bytes()
put_kwargs = {
"Bucket": rule.target_bucket,
"Key": object_key,
"Body": file_content,
}
if content_type:
put_kwargs["ContentType"] = content_type
s3.put_object(**put_kwargs)
try: try:
do_put_object() with path.open("rb") as f:
s3.put_object(
Bucket=rule.target_bucket,
Key=object_key,
Body=f,
ContentLength=file_size,
ContentType=content_type or "application/octet-stream",
Metadata=metadata or {}
)
except (ClientError, S3UploadFailedError) as e: except (ClientError, S3UploadFailedError) as e:
error_code = None is_no_bucket = False
if isinstance(e, ClientError): if isinstance(e, ClientError):
error_code = e.response['Error']['Code'] if e.response['Error']['Code'] == 'NoSuchBucket':
is_no_bucket = True
elif isinstance(e, S3UploadFailedError): elif isinstance(e, S3UploadFailedError):
if "NoSuchBucket" in str(e): if "NoSuchBucket" in str(e):
error_code = 'NoSuchBucket' is_no_bucket = True
if error_code == 'NoSuchBucket': if is_no_bucket:
logger.info(f"Target bucket {rule.target_bucket} not found. Attempting to create it.") logger.info(f"Target bucket {rule.target_bucket} not found. Attempting to create it.")
bucket_ready = False
try: try:
s3.create_bucket(Bucket=rule.target_bucket) s3.create_bucket(Bucket=rule.target_bucket)
bucket_ready = True # Retry upload
logger.info(f"Created target bucket {rule.target_bucket}") with path.open("rb") as f:
except ClientError as bucket_err: s3.put_object(
if bucket_err.response['Error']['Code'] in ('BucketAlreadyExists', 'BucketAlreadyOwnedByYou'): Bucket=rule.target_bucket,
logger.debug(f"Bucket {rule.target_bucket} already exists (created by another thread)") Key=object_key,
bucket_ready = True Body=f,
else: ContentLength=file_size,
logger.error(f"Failed to create target bucket {rule.target_bucket}: {bucket_err}") ContentType=content_type or "application/octet-stream",
raise e Metadata=metadata or {}
)
if bucket_ready: except Exception as create_err:
do_put_object() logger.error(f"Failed to create target bucket {rule.target_bucket}: {create_err}")
raise e # Raise original error
else: else:
raise e raise e
@@ -400,4 +354,3 @@ class ReplicationManager:
logger.error(f"Replication failed for {bucket_name}/{object_key}: {e}") logger.error(f"Replication failed for {bucket_name}/{object_key}: {e}")
except Exception: except Exception:
logger.exception(f"Unexpected error during replication for {bucket_name}/{object_key}") logger.exception(f"Unexpected error during replication for {bucket_name}/{object_key}")

View File

@@ -8,7 +8,7 @@ import re
import uuid import uuid
from datetime import datetime, timedelta, timezone from datetime import datetime, timedelta, timezone
from typing import Any, Dict from typing import Any, Dict
from urllib.parse import quote, urlencode, urlparse, unquote from urllib.parse import quote, urlencode, urlparse
from xml.etree.ElementTree import Element, SubElement, tostring, fromstring, ParseError from xml.etree.ElementTree import Element, SubElement, tostring, fromstring, ParseError
from flask import Blueprint, Response, current_app, jsonify, request, g from flask import Blueprint, Response, current_app, jsonify, request, g
@@ -22,6 +22,8 @@ from .storage import ObjectStorage, StorageError, QuotaExceededError
s3_api_bp = Blueprint("s3_api", __name__) s3_api_bp = Blueprint("s3_api", __name__)
# ---------------------- helpers ----------------------
def _storage() -> ObjectStorage: def _storage() -> ObjectStorage:
return current_app.extensions["object_storage"] return current_app.extensions["object_storage"]
@@ -66,26 +68,9 @@ def _get_signature_key(key: str, date_stamp: str, region_name: str, service_name
return k_signing return k_signing
def _get_canonical_uri(req: Any) -> str:
"""Get the canonical URI for SigV4 signature verification.
AWS SigV4 requires the canonical URI to be URL-encoded exactly as the client
sent it. Flask/Werkzeug automatically URL-decodes request.path, so we need
to get the raw path from the environ.
The canonical URI should have each path segment URL-encoded (with '/' preserved),
and the encoding should match what the client used when signing.
"""
raw_uri = req.environ.get('RAW_URI') or req.environ.get('REQUEST_URI')
if raw_uri:
path = raw_uri.split('?')[0]
return path
return quote(req.path, safe="/-_.~")
def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None: def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
# Parse Authorization header
# AWS4-HMAC-SHA256 Credential=AKIA.../20230101/us-east-1/s3/aws4_request, SignedHeaders=host;x-amz-date, Signature=...
match = re.match( match = re.match(
r"AWS4-HMAC-SHA256 Credential=([^/]+)/([^/]+)/([^/]+)/([^/]+)/aws4_request, SignedHeaders=([^,]+), Signature=(.+)", r"AWS4-HMAC-SHA256 Credential=([^/]+)/([^/]+)/([^/]+)/([^/]+)/aws4_request, SignedHeaders=([^,]+), Signature=(.+)",
auth_header, auth_header,
@@ -94,13 +79,17 @@ def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
return None return None
access_key, date_stamp, region, service, signed_headers_str, signature = match.groups() access_key, date_stamp, region, service, signed_headers_str, signature = match.groups()
# Get secret key
secret_key = _iam().get_secret_key(access_key) secret_key = _iam().get_secret_key(access_key)
if not secret_key: if not secret_key:
raise IamError("Invalid access key") raise IamError("Invalid access key")
# Canonical Request
method = req.method method = req.method
canonical_uri = _get_canonical_uri(req) canonical_uri = quote(req.path, safe="/-_.~")
# Canonical Query String
query_args = [] query_args = []
for key, value in req.args.items(multi=True): for key, value in req.args.items(multi=True):
query_args.append((key, value)) query_args.append((key, value))
@@ -111,6 +100,7 @@ def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
canonical_query_parts.append(f"{quote(k, safe='-_.~')}={quote(v, safe='-_.~')}") canonical_query_parts.append(f"{quote(k, safe='-_.~')}={quote(v, safe='-_.~')}")
canonical_query_string = "&".join(canonical_query_parts) canonical_query_string = "&".join(canonical_query_parts)
# Canonical Headers
signed_headers_list = signed_headers_str.split(";") signed_headers_list = signed_headers_str.split(";")
canonical_headers_parts = [] canonical_headers_parts = []
for header in signed_headers_list: for header in signed_headers_list:
@@ -122,22 +112,18 @@ def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
canonical_headers_parts.append(f"{header.lower()}:{header_val}\n") canonical_headers_parts.append(f"{header.lower()}:{header_val}\n")
canonical_headers = "".join(canonical_headers_parts) canonical_headers = "".join(canonical_headers_parts)
# Payload Hash
payload_hash = req.headers.get("X-Amz-Content-Sha256") payload_hash = req.headers.get("X-Amz-Content-Sha256")
if not payload_hash: if not payload_hash:
payload_hash = hashlib.sha256(req.get_data()).hexdigest() payload_hash = hashlib.sha256(req.get_data()).hexdigest()
canonical_request = f"{method}\n{canonical_uri}\n{canonical_query_string}\n{canonical_headers}\n{signed_headers_str}\n{payload_hash}" canonical_request = f"{method}\n{canonical_uri}\n{canonical_query_string}\n{canonical_headers}\n{signed_headers_str}\n{payload_hash}"
# Debug logging for signature issues # String to Sign
import logging amz_date = req.headers.get("X-Amz-Date")
logger = logging.getLogger(__name__) if not amz_date:
logger.debug(f"SigV4 Debug - Method: {method}, URI: {canonical_uri}") amz_date = req.headers.get("Date")
logger.debug(f"SigV4 Debug - Payload hash from header: {req.headers.get('X-Amz-Content-Sha256')}")
logger.debug(f"SigV4 Debug - Signed headers: {signed_headers_str}")
logger.debug(f"SigV4 Debug - Content-Type: {req.headers.get('Content-Type')}")
logger.debug(f"SigV4 Debug - Content-Length: {req.headers.get('Content-Length')}")
amz_date = req.headers.get("X-Amz-Date") or req.headers.get("Date")
if not amz_date: if not amz_date:
raise IamError("Missing Date header") raise IamError("Missing Date header")
@@ -148,12 +134,13 @@ def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
now = datetime.now(timezone.utc) now = datetime.now(timezone.utc)
time_diff = abs((now - request_time).total_seconds()) time_diff = abs((now - request_time).total_seconds())
if time_diff > 900: if time_diff > 900: # 15 minutes
raise IamError("Request timestamp too old or too far in the future") raise IamError("Request timestamp too old or too far in the future")
required_headers = {'host', 'x-amz-date'} required_headers = {'host', 'x-amz-date'}
signed_headers_set = set(signed_headers_str.split(';')) signed_headers_set = set(signed_headers_str.split(';'))
if not required_headers.issubset(signed_headers_set): if not required_headers.issubset(signed_headers_set):
# Some clients might sign 'date' instead of 'x-amz-date'
if 'date' in signed_headers_set: if 'date' in signed_headers_set:
required_headers.remove('x-amz-date') required_headers.remove('x-amz-date')
required_headers.add('date') required_headers.add('date')
@@ -167,24 +154,6 @@ def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
calculated_signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest() calculated_signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest()
if not hmac.compare_digest(calculated_signature, signature): if not hmac.compare_digest(calculated_signature, signature):
# Debug logging for signature mismatch
import logging
logger = logging.getLogger(__name__)
logger.error(f"Signature mismatch for {req.path}")
logger.error(f" Content-Type: {req.headers.get('Content-Type')}")
logger.error(f" Content-Length: {req.headers.get('Content-Length')}")
logger.error(f" X-Amz-Content-Sha256: {req.headers.get('X-Amz-Content-Sha256')}")
logger.error(f" Canonical URI: {canonical_uri}")
logger.error(f" Signed headers: {signed_headers_str}")
# Log each signed header's value
for h in signed_headers_list:
logger.error(f" Header '{h}': {repr(req.headers.get(h))}")
logger.error(f" Expected sig: {signature[:16]}...")
logger.error(f" Calculated sig: {calculated_signature[:16]}...")
# Log first part of canonical request to compare
logger.error(f" Canonical request hash: {hashlib.sha256(canonical_request.encode('utf-8')).hexdigest()[:16]}...")
# Log the full canonical request for debugging
logger.error(f" Canonical request:\n{canonical_request[:500]}...")
raise IamError("SignatureDoesNotMatch") raise IamError("SignatureDoesNotMatch")
return _iam().get_principal(access_key) return _iam().get_principal(access_key)
@@ -218,9 +187,11 @@ def _verify_sigv4_query(req: Any) -> Principal | None:
if not secret_key: if not secret_key:
raise IamError("Invalid access key") raise IamError("Invalid access key")
# Canonical Request
method = req.method method = req.method
canonical_uri = _get_canonical_uri(req) canonical_uri = quote(req.path, safe="/-_.~")
# Canonical Query String
query_args = [] query_args = []
for key, value in req.args.items(multi=True): for key, value in req.args.items(multi=True):
if key != "X-Amz-Signature": if key != "X-Amz-Signature":
@@ -232,6 +203,7 @@ def _verify_sigv4_query(req: Any) -> Principal | None:
canonical_query_parts.append(f"{quote(k, safe='-_.~')}={quote(v, safe='-_.~')}") canonical_query_parts.append(f"{quote(k, safe='-_.~')}={quote(v, safe='-_.~')}")
canonical_query_string = "&".join(canonical_query_parts) canonical_query_string = "&".join(canonical_query_parts)
# Canonical Headers
signed_headers_list = signed_headers_str.split(";") signed_headers_list = signed_headers_str.split(";")
canonical_headers_parts = [] canonical_headers_parts = []
for header in signed_headers_list: for header in signed_headers_list:
@@ -240,6 +212,7 @@ def _verify_sigv4_query(req: Any) -> Principal | None:
canonical_headers_parts.append(f"{header}:{val}\n") canonical_headers_parts.append(f"{header}:{val}\n")
canonical_headers = "".join(canonical_headers_parts) canonical_headers = "".join(canonical_headers_parts)
# Payload Hash
payload_hash = "UNSIGNED-PAYLOAD" payload_hash = "UNSIGNED-PAYLOAD"
canonical_request = "\n".join([ canonical_request = "\n".join([
@@ -251,6 +224,7 @@ def _verify_sigv4_query(req: Any) -> Principal | None:
payload_hash payload_hash
]) ])
# String to Sign
algorithm = "AWS4-HMAC-SHA256" algorithm = "AWS4-HMAC-SHA256"
credential_scope = f"{date_stamp}/{region}/{service}/aws4_request" credential_scope = f"{date_stamp}/{region}/{service}/aws4_request"
hashed_request = hashlib.sha256(canonical_request.encode('utf-8')).hexdigest() hashed_request = hashlib.sha256(canonical_request.encode('utf-8')).hexdigest()
@@ -261,6 +235,7 @@ def _verify_sigv4_query(req: Any) -> Principal | None:
hashed_request hashed_request
]) ])
# Signature
signing_key = _get_signature_key(secret_key, date_stamp, region, service) signing_key = _get_signature_key(secret_key, date_stamp, region, service)
calculated_signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest() calculated_signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest()
@@ -518,6 +493,7 @@ def _generate_presigned_url(
} }
canonical_query = _encode_query_params(query_params) canonical_query = _encode_query_params(query_params)
# Determine host and scheme from config or request
api_base = current_app.config.get("API_BASE_URL") api_base = current_app.config.get("API_BASE_URL")
if api_base: if api_base:
parsed = urlparse(api_base) parsed = urlparse(api_base)
@@ -877,6 +853,7 @@ def _bucket_versioning_handler(bucket_name: str) -> Response:
current_app.logger.info("Bucket versioning updated", extra={"bucket": bucket_name, "status": status}) current_app.logger.info("Bucket versioning updated", extra={"bucket": bucket_name, "status": status})
return Response(status=200) return Response(status=200)
# GET
try: try:
enabled = storage.is_versioning_enabled(bucket_name) enabled = storage.is_versioning_enabled(bucket_name)
except StorageError as exc: except StorageError as exc:
@@ -912,7 +889,7 @@ def _bucket_tagging_handler(bucket_name: str) -> Response:
return _error_response("NoSuchBucket", str(exc), 404) return _error_response("NoSuchBucket", str(exc), 404)
current_app.logger.info("Bucket tags deleted", extra={"bucket": bucket_name}) current_app.logger.info("Bucket tags deleted", extra={"bucket": bucket_name})
return Response(status=204) return Response(status=204)
# PUT
payload = request.get_data(cache=False) or b"" payload = request.get_data(cache=False) or b""
try: try:
tags = _parse_tagging_document(payload) tags = _parse_tagging_document(payload)
@@ -937,6 +914,7 @@ def _object_tagging_handler(bucket_name: str, object_key: str) -> Response:
if error: if error:
return error return error
# For tagging, we use read permission for GET, write for PUT/DELETE
action = "read" if request.method == "GET" else "write" action = "read" if request.method == "GET" else "write"
try: try:
_authorize_action(principal, bucket_name, action, object_key=object_key) _authorize_action(principal, bucket_name, action, object_key=object_key)
@@ -1115,8 +1093,10 @@ def _bucket_location_handler(bucket_name: str) -> Response:
if not storage.bucket_exists(bucket_name): if not storage.bucket_exists(bucket_name):
return _error_response("NoSuchBucket", "Bucket does not exist", 404) return _error_response("NoSuchBucket", "Bucket does not exist", 404)
# Return the configured AWS_REGION
region = current_app.config.get("AWS_REGION", "us-east-1") region = current_app.config.get("AWS_REGION", "us-east-1")
root = Element("LocationConstraint") root = Element("LocationConstraint")
# AWS returns empty for us-east-1, but we'll be explicit
root.text = region if region != "us-east-1" else None root.text = region if region != "us-east-1" else None
return _xml_response(root) return _xml_response(root)
@@ -1136,11 +1116,13 @@ def _bucket_acl_handler(bucket_name: str) -> Response:
return _error_response("NoSuchBucket", "Bucket does not exist", 404) return _error_response("NoSuchBucket", "Bucket does not exist", 404)
if request.method == "PUT": if request.method == "PUT":
# Accept canned ACL headers for S3 compatibility (not fully implemented) # We don't fully implement ACLs, but we accept the request for compatibility
# Check for canned ACL header
canned_acl = request.headers.get("x-amz-acl", "private") canned_acl = request.headers.get("x-amz-acl", "private")
current_app.logger.info("Bucket ACL set (canned)", extra={"bucket": bucket_name, "acl": canned_acl}) current_app.logger.info("Bucket ACL set (canned)", extra={"bucket": bucket_name, "acl": canned_acl})
return Response(status=200) return Response(status=200)
# GET - Return a basic ACL document showing full control for owner
root = Element("AccessControlPolicy") root = Element("AccessControlPolicy")
owner = SubElement(root, "Owner") owner = SubElement(root, "Owner")
SubElement(owner, "ID").text = principal.access_key if principal else "anonymous" SubElement(owner, "ID").text = principal.access_key if principal else "anonymous"
@@ -1173,7 +1155,7 @@ def _bucket_list_versions_handler(bucket_name: str) -> Response:
storage = _storage() storage = _storage()
try: try:
objects = storage.list_objects_all(bucket_name) objects = storage.list_objects(bucket_name)
except StorageError as exc: except StorageError as exc:
return _error_response("NoSuchBucket", str(exc), 404) return _error_response("NoSuchBucket", str(exc), 404)
@@ -1188,6 +1170,7 @@ def _bucket_list_versions_handler(bucket_name: str) -> Response:
if key_marker: if key_marker:
objects = [obj for obj in objects if obj.key > key_marker] objects = [obj for obj in objects if obj.key > key_marker]
# Build XML response
root = Element("ListVersionsResult", xmlns="http://s3.amazonaws.com/doc/2006-03-01/") root = Element("ListVersionsResult", xmlns="http://s3.amazonaws.com/doc/2006-03-01/")
SubElement(root, "Name").text = bucket_name SubElement(root, "Name").text = bucket_name
SubElement(root, "Prefix").text = prefix SubElement(root, "Prefix").text = prefix
@@ -1205,9 +1188,10 @@ def _bucket_list_versions_handler(bucket_name: str) -> Response:
is_truncated = True is_truncated = True
break break
# Current version
version = SubElement(root, "Version") version = SubElement(root, "Version")
SubElement(version, "Key").text = obj.key SubElement(version, "Key").text = obj.key
SubElement(version, "VersionId").text = "null" SubElement(version, "VersionId").text = "null" # Current version ID
SubElement(version, "IsLatest").text = "true" SubElement(version, "IsLatest").text = "true"
SubElement(version, "LastModified").text = obj.last_modified.strftime("%Y-%m-%dT%H:%M:%S.000Z") SubElement(version, "LastModified").text = obj.last_modified.strftime("%Y-%m-%dT%H:%M:%S.000Z")
SubElement(version, "ETag").text = f'"{obj.etag}"' SubElement(version, "ETag").text = f'"{obj.etag}"'
@@ -1221,6 +1205,7 @@ def _bucket_list_versions_handler(bucket_name: str) -> Response:
version_count += 1 version_count += 1
next_key_marker = obj.key next_key_marker = obj.key
# Get historical versions
try: try:
versions = storage.list_object_versions(bucket_name, obj.key) versions = storage.list_object_versions(bucket_name, obj.key)
for v in versions: for v in versions:
@@ -1304,12 +1289,14 @@ def _render_lifecycle_config(config: list) -> Element:
rule_el = SubElement(root, "Rule") rule_el = SubElement(root, "Rule")
SubElement(rule_el, "ID").text = rule.get("ID", "") SubElement(rule_el, "ID").text = rule.get("ID", "")
# Filter
filter_el = SubElement(rule_el, "Filter") filter_el = SubElement(rule_el, "Filter")
if rule.get("Prefix"): if rule.get("Prefix"):
SubElement(filter_el, "Prefix").text = rule.get("Prefix", "") SubElement(filter_el, "Prefix").text = rule.get("Prefix", "")
SubElement(rule_el, "Status").text = rule.get("Status", "Enabled") SubElement(rule_el, "Status").text = rule.get("Status", "Enabled")
# Expiration
if "Expiration" in rule: if "Expiration" in rule:
exp = rule["Expiration"] exp = rule["Expiration"]
exp_el = SubElement(rule_el, "Expiration") exp_el = SubElement(rule_el, "Expiration")
@@ -1320,12 +1307,14 @@ def _render_lifecycle_config(config: list) -> Element:
if exp.get("ExpiredObjectDeleteMarker"): if exp.get("ExpiredObjectDeleteMarker"):
SubElement(exp_el, "ExpiredObjectDeleteMarker").text = "true" SubElement(exp_el, "ExpiredObjectDeleteMarker").text = "true"
# NoncurrentVersionExpiration
if "NoncurrentVersionExpiration" in rule: if "NoncurrentVersionExpiration" in rule:
nve = rule["NoncurrentVersionExpiration"] nve = rule["NoncurrentVersionExpiration"]
nve_el = SubElement(rule_el, "NoncurrentVersionExpiration") nve_el = SubElement(rule_el, "NoncurrentVersionExpiration")
if "NoncurrentDays" in nve: if "NoncurrentDays" in nve:
SubElement(nve_el, "NoncurrentDays").text = str(nve["NoncurrentDays"]) SubElement(nve_el, "NoncurrentDays").text = str(nve["NoncurrentDays"])
# AbortIncompleteMultipartUpload
if "AbortIncompleteMultipartUpload" in rule: if "AbortIncompleteMultipartUpload" in rule:
aimu = rule["AbortIncompleteMultipartUpload"] aimu = rule["AbortIncompleteMultipartUpload"]
aimu_el = SubElement(rule_el, "AbortIncompleteMultipartUpload") aimu_el = SubElement(rule_el, "AbortIncompleteMultipartUpload")
@@ -1349,24 +1338,29 @@ def _parse_lifecycle_config(payload: bytes) -> list:
for rule_el in root.findall("{*}Rule") or root.findall("Rule"): for rule_el in root.findall("{*}Rule") or root.findall("Rule"):
rule: dict = {} rule: dict = {}
# ID
id_el = rule_el.find("{*}ID") or rule_el.find("ID") id_el = rule_el.find("{*}ID") or rule_el.find("ID")
if id_el is not None and id_el.text: if id_el is not None and id_el.text:
rule["ID"] = id_el.text.strip() rule["ID"] = id_el.text.strip()
# Filter/Prefix
filter_el = rule_el.find("{*}Filter") or rule_el.find("Filter") filter_el = rule_el.find("{*}Filter") or rule_el.find("Filter")
if filter_el is not None: if filter_el is not None:
prefix_el = filter_el.find("{*}Prefix") or filter_el.find("Prefix") prefix_el = filter_el.find("{*}Prefix") or filter_el.find("Prefix")
if prefix_el is not None and prefix_el.text: if prefix_el is not None and prefix_el.text:
rule["Prefix"] = prefix_el.text rule["Prefix"] = prefix_el.text
# Legacy Prefix (outside Filter)
if "Prefix" not in rule: if "Prefix" not in rule:
prefix_el = rule_el.find("{*}Prefix") or rule_el.find("Prefix") prefix_el = rule_el.find("{*}Prefix") or rule_el.find("Prefix")
if prefix_el is not None: if prefix_el is not None:
rule["Prefix"] = prefix_el.text or "" rule["Prefix"] = prefix_el.text or ""
# Status
status_el = rule_el.find("{*}Status") or rule_el.find("Status") status_el = rule_el.find("{*}Status") or rule_el.find("Status")
rule["Status"] = (status_el.text or "Enabled").strip() if status_el is not None else "Enabled" rule["Status"] = (status_el.text or "Enabled").strip() if status_el is not None else "Enabled"
# Expiration
exp_el = rule_el.find("{*}Expiration") or rule_el.find("Expiration") exp_el = rule_el.find("{*}Expiration") or rule_el.find("Expiration")
if exp_el is not None: if exp_el is not None:
expiration: dict = {} expiration: dict = {}
@@ -1382,6 +1376,7 @@ def _parse_lifecycle_config(payload: bytes) -> list:
if expiration: if expiration:
rule["Expiration"] = expiration rule["Expiration"] = expiration
# NoncurrentVersionExpiration
nve_el = rule_el.find("{*}NoncurrentVersionExpiration") or rule_el.find("NoncurrentVersionExpiration") nve_el = rule_el.find("{*}NoncurrentVersionExpiration") or rule_el.find("NoncurrentVersionExpiration")
if nve_el is not None: if nve_el is not None:
nve: dict = {} nve: dict = {}
@@ -1391,6 +1386,7 @@ def _parse_lifecycle_config(payload: bytes) -> list:
if nve: if nve:
rule["NoncurrentVersionExpiration"] = nve rule["NoncurrentVersionExpiration"] = nve
# AbortIncompleteMultipartUpload
aimu_el = rule_el.find("{*}AbortIncompleteMultipartUpload") or rule_el.find("AbortIncompleteMultipartUpload") aimu_el = rule_el.find("{*}AbortIncompleteMultipartUpload") or rule_el.find("AbortIncompleteMultipartUpload")
if aimu_el is not None: if aimu_el is not None:
aimu: dict = {} aimu: dict = {}
@@ -1428,6 +1424,7 @@ def _bucket_quota_handler(bucket_name: str) -> Response:
if not quota: if not quota:
return _error_response("NoSuchQuotaConfiguration", "No quota configuration found", 404) return _error_response("NoSuchQuotaConfiguration", "No quota configuration found", 404)
# Return as JSON for simplicity (not a standard S3 API)
stats = storage.bucket_stats(bucket_name) stats = storage.bucket_stats(bucket_name)
return jsonify({ return jsonify({
"quota": quota, "quota": quota,
@@ -1456,6 +1453,7 @@ def _bucket_quota_handler(bucket_name: str) -> Response:
if max_size_bytes is None and max_objects is None: if max_size_bytes is None and max_objects is None:
return _error_response("InvalidArgument", "At least one of max_size_bytes or max_objects is required", 400) return _error_response("InvalidArgument", "At least one of max_size_bytes or max_objects is required", 400)
# Validate types
if max_size_bytes is not None: if max_size_bytes is not None:
try: try:
max_size_bytes = int(max_size_bytes) max_size_bytes = int(max_size_bytes)
@@ -1566,6 +1564,7 @@ def _bulk_delete_handler(bucket_name: str) -> Response:
return _xml_response(result, status=200) return _xml_response(result, status=200)
# ---------------------- routes ----------------------
@s3_api_bp.get("/") @s3_api_bp.get("/")
@limiter.limit("60 per minute") @limiter.limit("60 per minute")
def list_buckets() -> Response: def list_buckets() -> Response:
@@ -1643,6 +1642,7 @@ def bucket_handler(bucket_name: str) -> Response:
current_app.logger.info("Bucket deleted", extra={"bucket": bucket_name}) current_app.logger.info("Bucket deleted", extra={"bucket": bucket_name})
return Response(status=204) return Response(status=204)
# GET - list objects (supports both ListObjects and ListObjectsV2)
principal, error = _require_principal() principal, error = _require_principal()
try: try:
_authorize_action(principal, bucket_name, "list") _authorize_action(principal, bucket_name, "list")
@@ -1650,12 +1650,18 @@ def bucket_handler(bucket_name: str) -> Response:
if error: if error:
return error return error
return _error_response("AccessDenied", str(exc), 403) return _error_response("AccessDenied", str(exc), 403)
try:
objects = storage.list_objects(bucket_name)
except StorageError as exc:
return _error_response("NoSuchBucket", str(exc), 404)
# Check if this is ListObjectsV2 (list-type=2)
list_type = request.args.get("list-type") list_type = request.args.get("list-type")
prefix = request.args.get("prefix", "") prefix = request.args.get("prefix", "")
delimiter = request.args.get("delimiter", "") delimiter = request.args.get("delimiter", "")
max_keys = min(int(request.args.get("max-keys", current_app.config["UI_PAGE_SIZE"])), 1000) max_keys = min(int(request.args.get("max-keys", current_app.config["UI_PAGE_SIZE"])), 1000)
# Pagination markers
marker = request.args.get("marker", "") # ListObjects v1 marker = request.args.get("marker", "") # ListObjects v1
continuation_token = request.args.get("continuation-token", "") # ListObjectsV2 continuation_token = request.args.get("continuation-token", "") # ListObjectsV2
start_after = request.args.get("start-after", "") # ListObjectsV2 start_after = request.args.get("start-after", "") # ListObjectsV2
@@ -1675,17 +1681,11 @@ def bucket_handler(bucket_name: str) -> Response:
else: else:
effective_start = marker effective_start = marker
fetch_keys = max_keys * 10 if delimiter else max_keys if prefix:
try: objects = [obj for obj in objects if obj.key.startswith(prefix)]
list_result = storage.list_objects(
bucket_name, if effective_start:
max_keys=fetch_keys, objects = [obj for obj in objects if obj.key > effective_start]
continuation_token=effective_start or None,
prefix=prefix or None,
)
objects = list_result.objects
except StorageError as exc:
return _error_response("NoSuchBucket", str(exc), 404)
common_prefixes: list[str] = [] common_prefixes: list[str] = []
filtered_objects: list = [] filtered_objects: list = []
@@ -1694,6 +1694,7 @@ def bucket_handler(bucket_name: str) -> Response:
for obj in objects: for obj in objects:
key_after_prefix = obj.key[len(prefix):] if prefix else obj.key key_after_prefix = obj.key[len(prefix):] if prefix else obj.key
if delimiter in key_after_prefix: if delimiter in key_after_prefix:
# This is a "folder" - extract the common prefix
common_prefix = prefix + key_after_prefix.split(delimiter)[0] + delimiter common_prefix = prefix + key_after_prefix.split(delimiter)[0] + delimiter
if common_prefix not in seen_prefixes: if common_prefix not in seen_prefixes:
seen_prefixes.add(common_prefix) seen_prefixes.add(common_prefix)
@@ -1704,7 +1705,7 @@ def bucket_handler(bucket_name: str) -> Response:
common_prefixes = sorted(common_prefixes) common_prefixes = sorted(common_prefixes)
total_items = len(objects) + len(common_prefixes) total_items = len(objects) + len(common_prefixes)
is_truncated = total_items > max_keys or list_result.is_truncated is_truncated = total_items > max_keys
if len(objects) >= max_keys: if len(objects) >= max_keys:
objects = objects[:max_keys] objects = objects[:max_keys]
@@ -1791,6 +1792,7 @@ def object_handler(bucket_name: str, object_key: str):
if "tagging" in request.args: if "tagging" in request.args:
return _object_tagging_handler(bucket_name, object_key) return _object_tagging_handler(bucket_name, object_key)
# Multipart Uploads
if request.method == "POST": if request.method == "POST":
if "uploads" in request.args: if "uploads" in request.args:
return _initiate_multipart_upload(bucket_name, object_key) return _initiate_multipart_upload(bucket_name, object_key)
@@ -1843,7 +1845,9 @@ def object_handler(bucket_name: str, object_key: str):
response = Response(status=200) response = Response(status=200)
response.headers["ETag"] = f'"{meta.etag}"' response.headers["ETag"] = f'"{meta.etag}"'
if "S3ReplicationAgent" not in request.headers.get("User-Agent", ""): # Trigger replication if not a replication request
user_agent = request.headers.get("User-Agent", "")
if "S3ReplicationAgent" not in user_agent:
_replication_manager().trigger_replication(bucket_name, object_key, action="write") _replication_manager().trigger_replication(bucket_name, object_key, action="write")
return response return response
@@ -1862,25 +1866,31 @@ def object_handler(bucket_name: str, object_key: str):
metadata = storage.get_object_metadata(bucket_name, object_key) metadata = storage.get_object_metadata(bucket_name, object_key)
mimetype = mimetypes.guess_type(object_key)[0] or "application/octet-stream" mimetype = mimetypes.guess_type(object_key)[0] or "application/octet-stream"
# Check if object is encrypted and needs decryption
is_encrypted = "x-amz-server-side-encryption" in metadata is_encrypted = "x-amz-server-side-encryption" in metadata
if request.method == "GET": if request.method == "GET":
if is_encrypted and hasattr(storage, 'get_object_data'): if is_encrypted and hasattr(storage, 'get_object_data'):
# Use encrypted storage to decrypt
try: try:
data, clean_metadata = storage.get_object_data(bucket_name, object_key) data, clean_metadata = storage.get_object_data(bucket_name, object_key)
response = Response(data, mimetype=mimetype) response = Response(data, mimetype=mimetype)
logged_bytes = len(data) logged_bytes = len(data)
# Use decrypted size for Content-Length
response.headers["Content-Length"] = len(data) response.headers["Content-Length"] = len(data)
etag = hashlib.md5(data).hexdigest() etag = hashlib.md5(data).hexdigest()
except StorageError as exc: except StorageError as exc:
return _error_response("InternalError", str(exc), 500) return _error_response("InternalError", str(exc), 500)
else: else:
# Stream unencrypted file directly
stat = path.stat() stat = path.stat()
response = Response(_stream_file(path), mimetype=mimetype, direct_passthrough=True) response = Response(_stream_file(path), mimetype=mimetype, direct_passthrough=True)
logged_bytes = stat.st_size logged_bytes = stat.st_size
etag = storage._compute_etag(path) etag = storage._compute_etag(path)
else: else:
# HEAD request
if is_encrypted and hasattr(storage, 'get_object_data'): if is_encrypted and hasattr(storage, 'get_object_data'):
# For encrypted objects, we need to report decrypted size
try: try:
data, _ = storage.get_object_data(bucket_name, object_key) data, _ = storage.get_object_data(bucket_name, object_key)
response = Response(status=200) response = Response(status=200)
@@ -1909,6 +1919,7 @@ def object_handler(bucket_name: str, object_key: str):
storage.delete_object(bucket_name, object_key) storage.delete_object(bucket_name, object_key)
current_app.logger.info("Object deleted", extra={"bucket": bucket_name, "key": object_key}) current_app.logger.info("Object deleted", extra={"bucket": bucket_name, "key": object_key})
# Trigger replication if not a replication request
user_agent = request.headers.get("User-Agent", "") user_agent = request.headers.get("User-Agent", "")
if "S3ReplicationAgent" not in user_agent: if "S3ReplicationAgent" not in user_agent:
_replication_manager().trigger_replication(bucket_name, object_key, action="delete") _replication_manager().trigger_replication(bucket_name, object_key, action="delete")
@@ -2189,6 +2200,7 @@ class AwsChunkedDecoder:
self.chunk_remaining -= len(chunk) self.chunk_remaining -= len(chunk)
if self.chunk_remaining == 0: if self.chunk_remaining == 0:
# Read CRLF after chunk data
crlf = self.stream.read(2) crlf = self.stream.read(2)
if crlf != b"\r\n": if crlf != b"\r\n":
raise IOError("Malformed chunk: missing CRLF") raise IOError("Malformed chunk: missing CRLF")
@@ -2207,6 +2219,7 @@ class AwsChunkedDecoder:
try: try:
line_str = line.decode("ascii").strip() line_str = line.decode("ascii").strip()
# Handle chunk-signature extension if present (e.g. "1000;chunk-signature=...")
if ";" in line_str: if ";" in line_str:
line_str = line_str.split(";")[0] line_str = line_str.split(";")[0]
chunk_size = int(line_str, 16) chunk_size = int(line_str, 16)
@@ -2362,6 +2375,7 @@ def _abort_multipart_upload(bucket_name: str, object_key: str) -> Response:
try: try:
_storage().abort_multipart_upload(bucket_name, upload_id) _storage().abort_multipart_upload(bucket_name, upload_id)
except StorageError as exc: except StorageError as exc:
# Abort is idempotent, but if bucket missing...
if "Bucket does not exist" in str(exc): if "Bucket does not exist" in str(exc):
return _error_response("NoSuchBucket", str(exc), 404) return _error_response("NoSuchBucket", str(exc), 404)
@@ -2371,6 +2385,7 @@ def _abort_multipart_upload(bucket_name: str, object_key: str) -> Response:
@s3_api_bp.before_request @s3_api_bp.before_request
def resolve_principal(): def resolve_principal():
g.principal = None g.principal = None
# Try SigV4
try: try:
if ("Authorization" in request.headers and request.headers["Authorization"].startswith("AWS4-HMAC-SHA256")) or \ if ("Authorization" in request.headers and request.headers["Authorization"].startswith("AWS4-HMAC-SHA256")) or \
(request.args.get("X-Amz-Algorithm") == "AWS4-HMAC-SHA256"): (request.args.get("X-Amz-Algorithm") == "AWS4-HMAC-SHA256"):
@@ -2379,6 +2394,7 @@ def resolve_principal():
except Exception: except Exception:
pass pass
# Try simple auth headers (internal/testing)
access_key = request.headers.get("X-Access-Key") access_key = request.headers.get("X-Access-Key")
secret_key = request.headers.get("X-Secret-Key") secret_key = request.headers.get("X-Secret-Key")
if access_key and secret_key: if access_key and secret_key:

View File

@@ -99,15 +99,6 @@ class BucketMeta:
created_at: datetime created_at: datetime
@dataclass
class ListObjectsResult:
"""Paginated result for object listing."""
objects: List[ObjectMeta]
is_truncated: bool
next_continuation_token: Optional[str]
total_count: Optional[int] = None # Total objects in bucket (from stats cache)
def _utcnow() -> datetime: def _utcnow() -> datetime:
return datetime.now(timezone.utc) return datetime.now(timezone.utc)
@@ -128,14 +119,11 @@ class ObjectStorage:
BUCKET_VERSIONS_DIR = "versions" BUCKET_VERSIONS_DIR = "versions"
MULTIPART_MANIFEST = "manifest.json" MULTIPART_MANIFEST = "manifest.json"
BUCKET_CONFIG_FILE = ".bucket.json" BUCKET_CONFIG_FILE = ".bucket.json"
KEY_INDEX_CACHE_TTL = 30 # seconds - longer TTL for better browsing performance
def __init__(self, root: Path) -> None: def __init__(self, root: Path) -> None:
self.root = Path(root) self.root = Path(root)
self.root.mkdir(parents=True, exist_ok=True) self.root.mkdir(parents=True, exist_ok=True)
self._ensure_system_roots() self._ensure_system_roots()
# In-memory object metadata cache: bucket_id -> (dict[key -> ObjectMeta], timestamp)
self._object_cache: Dict[str, tuple[Dict[str, ObjectMeta], float]] = {}
def list_buckets(self) -> List[BucketMeta]: def list_buckets(self) -> List[BucketMeta]:
buckets: List[BucketMeta] = [] buckets: List[BucketMeta] = []
@@ -253,84 +241,31 @@ class ObjectStorage:
self._remove_tree(self._system_bucket_root(bucket_path.name)) self._remove_tree(self._system_bucket_root(bucket_path.name))
self._remove_tree(self._multipart_bucket_root(bucket_path.name)) self._remove_tree(self._multipart_bucket_root(bucket_path.name))
def list_objects( def list_objects(self, bucket_name: str) -> List[ObjectMeta]:
self,
bucket_name: str,
*,
max_keys: int = 1000,
continuation_token: Optional[str] = None,
prefix: Optional[str] = None,
) -> ListObjectsResult:
"""List objects in a bucket with pagination support.
Args:
bucket_name: Name of the bucket
max_keys: Maximum number of objects to return (default 1000)
continuation_token: Token from previous request for pagination
prefix: Filter objects by key prefix
Returns:
ListObjectsResult with objects, truncation status, and continuation token
"""
bucket_path = self._bucket_path(bucket_name) bucket_path = self._bucket_path(bucket_name)
if not bucket_path.exists(): if not bucket_path.exists():
raise StorageError("Bucket does not exist") raise StorageError("Bucket does not exist")
bucket_id = bucket_path.name bucket_id = bucket_path.name
# Use cached object metadata for fast listing
object_cache = self._get_object_cache(bucket_id, bucket_path)
# Get sorted keys
all_keys = sorted(object_cache.keys())
# Apply prefix filter if specified
if prefix:
all_keys = [k for k in all_keys if k.startswith(prefix)]
total_count = len(all_keys)
# Handle continuation token (the key to start after)
start_index = 0
if continuation_token:
try:
# Binary search for efficiency on large lists
import bisect
start_index = bisect.bisect_right(all_keys, continuation_token)
if start_index >= total_count:
return ListObjectsResult(
objects=[],
is_truncated=False,
next_continuation_token=None,
total_count=total_count,
)
except Exception:
pass # Invalid token, start from beginning
# Get the slice we need
end_index = start_index + max_keys
keys_slice = all_keys[start_index:end_index]
is_truncated = end_index < total_count
# Build result from cached metadata (no file I/O!)
objects: List[ObjectMeta] = [] objects: List[ObjectMeta] = []
for key in keys_slice: for path in bucket_path.rglob("*"):
obj = object_cache.get(key) if path.is_file():
if obj: stat = path.stat()
objects.append(obj) rel = path.relative_to(bucket_path)
if rel.parts and rel.parts[0] in self.INTERNAL_FOLDERS:
next_token = keys_slice[-1] if is_truncated and keys_slice else None continue
metadata = self._read_metadata(bucket_id, rel)
return ListObjectsResult( objects.append(
objects=objects, ObjectMeta(
is_truncated=is_truncated, key=str(rel.as_posix()),
next_continuation_token=next_token, size=stat.st_size,
total_count=total_count, last_modified=datetime.fromtimestamp(stat.st_mtime),
etag=self._compute_etag(path),
metadata=metadata or None,
) )
)
def list_objects_all(self, bucket_name: str) -> List[ObjectMeta]: objects.sort(key=lambda meta: meta.key)
"""List all objects in a bucket (no pagination). Use with caution for large buckets.""" return objects
result = self.list_objects(bucket_name, max_keys=100000)
return result.objects
def put_object( def put_object(
self, self,
@@ -398,21 +333,18 @@ class ObjectStorage:
pass pass
stat = destination.stat() stat = destination.stat()
etag = checksum.hexdigest() if metadata:
self._write_metadata(bucket_id, safe_key, metadata)
# Always store internal metadata (etag, size) alongside user metadata else:
internal_meta = {"__etag__": etag, "__size__": str(stat.st_size)} self._delete_metadata(bucket_id, safe_key)
combined_meta = {**internal_meta, **(metadata or {})}
self._write_metadata(bucket_id, safe_key, combined_meta)
self._invalidate_bucket_stats_cache(bucket_id) self._invalidate_bucket_stats_cache(bucket_id)
self._invalidate_object_cache(bucket_id)
return ObjectMeta( return ObjectMeta(
key=safe_key.as_posix(), key=safe_key.as_posix(),
size=stat.st_size, size=stat.st_size,
last_modified=datetime.fromtimestamp(stat.st_mtime), last_modified=datetime.fromtimestamp(stat.st_mtime),
etag=etag, etag=checksum.hexdigest(),
metadata=metadata, metadata=metadata,
) )
@@ -464,7 +396,6 @@ class ObjectStorage:
self._delete_metadata(bucket_id, rel) self._delete_metadata(bucket_id, rel)
self._invalidate_bucket_stats_cache(bucket_id) self._invalidate_bucket_stats_cache(bucket_id)
self._invalidate_object_cache(bucket_id)
self._cleanup_empty_parents(path, bucket_path) self._cleanup_empty_parents(path, bucket_path)
def purge_object(self, bucket_name: str, object_key: str) -> None: def purge_object(self, bucket_name: str, object_key: str) -> None:
@@ -487,7 +418,6 @@ class ObjectStorage:
# Invalidate bucket stats cache # Invalidate bucket stats cache
self._invalidate_bucket_stats_cache(bucket_id) self._invalidate_bucket_stats_cache(bucket_id)
self._invalidate_object_cache(bucket_id)
self._cleanup_empty_parents(target, bucket_path) self._cleanup_empty_parents(target, bucket_path)
def is_versioning_enabled(self, bucket_name: str) -> bool: def is_versioning_enabled(self, bucket_name: str) -> bool:
@@ -1150,187 +1080,6 @@ class ObjectStorage:
def _legacy_multipart_dir(self, bucket_name: str, upload_id: str) -> Path: def _legacy_multipart_dir(self, bucket_name: str, upload_id: str) -> Path:
return self._legacy_multipart_bucket_root(bucket_name) / upload_id return self._legacy_multipart_bucket_root(bucket_name) / upload_id
def _fast_list_keys(self, bucket_path: Path) -> List[str]:
"""Fast directory walk using os.scandir instead of pathlib.rglob.
This is significantly faster for large directories (10K+ files).
Returns just the keys (for backward compatibility).
"""
return list(self._build_object_cache(bucket_path).keys())
def _build_object_cache(self, bucket_path: Path) -> Dict[str, ObjectMeta]:
"""Build a complete object metadata cache for a bucket.
Uses os.scandir for fast directory walking and a persistent etag index.
"""
from concurrent.futures import ThreadPoolExecutor
bucket_id = bucket_path.name
objects: Dict[str, ObjectMeta] = {}
bucket_str = str(bucket_path)
bucket_len = len(bucket_str) + 1 # +1 for the separator
# Try to load persisted etag index first (single file read vs thousands)
etag_index_path = self._system_bucket_root(bucket_id) / "etag_index.json"
meta_cache: Dict[str, str] = {}
index_mtime: float = 0
if etag_index_path.exists():
try:
index_mtime = etag_index_path.stat().st_mtime
with open(etag_index_path, 'r', encoding='utf-8') as f:
meta_cache = json.load(f)
except (OSError, json.JSONDecodeError):
meta_cache = {}
# Check if we need to rebuild the index
meta_root = self._bucket_meta_root(bucket_id)
needs_rebuild = False
if meta_root.exists() and index_mtime > 0:
# Quick check: if any meta file is newer than index, rebuild
def check_newer(dir_path: str) -> bool:
try:
with os.scandir(dir_path) as it:
for entry in it:
if entry.is_dir(follow_symlinks=False):
if check_newer(entry.path):
return True
elif entry.is_file(follow_symlinks=False) and entry.name.endswith('.meta.json'):
if entry.stat().st_mtime > index_mtime:
return True
except OSError:
pass
return False
needs_rebuild = check_newer(str(meta_root))
elif not meta_cache:
needs_rebuild = True
if needs_rebuild and meta_root.exists():
meta_str = str(meta_root)
meta_len = len(meta_str) + 1
meta_files: list[tuple[str, str]] = []
# Collect all metadata file paths
def collect_meta_files(dir_path: str) -> None:
try:
with os.scandir(dir_path) as it:
for entry in it:
if entry.is_dir(follow_symlinks=False):
collect_meta_files(entry.path)
elif entry.is_file(follow_symlinks=False) and entry.name.endswith('.meta.json'):
rel = entry.path[meta_len:]
key = rel[:-10].replace(os.sep, '/')
meta_files.append((key, entry.path))
except OSError:
pass
collect_meta_files(meta_str)
# Parallel read of metadata files - only extract __etag__
def read_meta_file(item: tuple[str, str]) -> tuple[str, str | None]:
key, path = item
try:
with open(path, 'rb') as f:
content = f.read()
etag_marker = b'"__etag__"'
idx = content.find(etag_marker)
if idx != -1:
start = content.find(b'"', idx + len(etag_marker) + 1)
if start != -1:
end = content.find(b'"', start + 1)
if end != -1:
return key, content[start+1:end].decode('utf-8')
return key, None
except (OSError, UnicodeDecodeError):
return key, None
if meta_files:
meta_cache = {}
with ThreadPoolExecutor(max_workers=min(64, len(meta_files))) as executor:
for key, etag in executor.map(read_meta_file, meta_files):
if etag:
meta_cache[key] = etag
# Persist the index for next time
try:
etag_index_path.parent.mkdir(parents=True, exist_ok=True)
with open(etag_index_path, 'w', encoding='utf-8') as f:
json.dump(meta_cache, f)
except OSError:
pass
# Now scan objects and use cached etags
def scan_dir(dir_path: str) -> None:
try:
with os.scandir(dir_path) as it:
for entry in it:
if entry.is_dir(follow_symlinks=False):
# Skip internal folders
rel_start = entry.path[bucket_len:].split(os.sep)[0] if len(entry.path) > bucket_len else entry.name
if rel_start in self.INTERNAL_FOLDERS:
continue
scan_dir(entry.path)
elif entry.is_file(follow_symlinks=False):
# Get relative path and convert to POSIX
rel = entry.path[bucket_len:]
# Check if in internal folder
first_part = rel.split(os.sep)[0] if os.sep in rel else rel
if first_part in self.INTERNAL_FOLDERS:
continue
key = rel.replace(os.sep, '/')
try:
# Use entry.stat() which is cached from scandir
stat = entry.stat()
# Get etag from cache (now just a string, not dict)
etag = meta_cache.get(key)
# Use placeholder for legacy objects without stored etag
if not etag:
etag = f'"{stat.st_size}-{int(stat.st_mtime)}"'
objects[key] = ObjectMeta(
key=key,
size=stat.st_size,
last_modified=datetime.fromtimestamp(stat.st_mtime),
etag=etag,
metadata=None, # Don't include user metadata in listing
)
except OSError:
pass
except OSError:
pass
scan_dir(bucket_str)
return objects
def _get_object_cache(self, bucket_id: str, bucket_path: Path) -> Dict[str, ObjectMeta]:
"""Get cached object metadata for a bucket, refreshing if stale."""
now = time.time()
cached = self._object_cache.get(bucket_id)
if cached:
objects, timestamp = cached
if now - timestamp < self.KEY_INDEX_CACHE_TTL:
return objects
# Rebuild cache
objects = self._build_object_cache(bucket_path)
self._object_cache[bucket_id] = (objects, now)
return objects
def _invalidate_object_cache(self, bucket_id: str) -> None:
"""Invalidate the object cache and etag index for a bucket."""
self._object_cache.pop(bucket_id, None)
# Also invalidate persisted etag index
etag_index_path = self._system_bucket_root(bucket_id) / "etag_index.json"
try:
etag_index_path.unlink(missing_ok=True)
except OSError:
pass
def _ensure_system_roots(self) -> None: def _ensure_system_roots(self) -> None:
for path in ( for path in (
self._system_root_path(), self._system_root_path(),

171
app/ui.py
View File

@@ -189,7 +189,7 @@ def inject_nav_state() -> dict[str, Any]:
return { return {
"principal": principal, "principal": principal,
"can_manage_iam": can_manage, "can_manage_iam": can_manage,
"can_view_metrics": can_manage, "can_view_metrics": can_manage, # Only admins can view metrics
"csrf_token": generate_csrf, "csrf_token": generate_csrf,
} }
@@ -294,8 +294,7 @@ def bucket_detail(bucket_name: str):
storage = _storage() storage = _storage()
try: try:
_authorize_ui(principal, bucket_name, "list") _authorize_ui(principal, bucket_name, "list")
if not storage.bucket_exists(bucket_name): objects = storage.list_objects(bucket_name)
raise StorageError("Bucket does not exist")
except (StorageError, IamError) as exc: except (StorageError, IamError) as exc:
flash(_friendly_error_message(exc), "danger") flash(_friendly_error_message(exc), "danger")
return redirect(url_for("ui.buckets_overview")) return redirect(url_for("ui.buckets_overview"))
@@ -342,6 +341,7 @@ def bucket_detail(bucket_name: str):
except IamError: except IamError:
can_manage_versioning = False can_manage_versioning = False
# Check replication permission
can_manage_replication = False can_manage_replication = False
if principal: if principal:
try: try:
@@ -350,6 +350,7 @@ def bucket_detail(bucket_name: str):
except IamError: except IamError:
can_manage_replication = False can_manage_replication = False
# Check if user is admin (can configure replication settings, not just toggle)
is_replication_admin = False is_replication_admin = False
if principal: if principal:
try: try:
@@ -358,9 +359,12 @@ def bucket_detail(bucket_name: str):
except IamError: except IamError:
is_replication_admin = False is_replication_admin = False
# Replication info - don't compute sync status here (it's slow), let JS fetch it async
replication_rule = _replication().get_rule(bucket_name) replication_rule = _replication().get_rule(bucket_name)
# Load connections for admin, or for non-admin if there's an existing rule (to show target name)
connections = _connections().list() if (is_replication_admin or replication_rule) else [] connections = _connections().list() if (is_replication_admin or replication_rule) else []
# Encryption settings
encryption_config = storage.get_bucket_encryption(bucket_name) encryption_config = storage.get_bucket_encryption(bucket_name)
kms_manager = _kms() kms_manager = _kms()
kms_keys = kms_manager.list_keys() if kms_manager else [] kms_keys = kms_manager.list_keys() if kms_manager else []
@@ -368,6 +372,7 @@ def bucket_detail(bucket_name: str):
encryption_enabled = current_app.config.get("ENCRYPTION_ENABLED", False) encryption_enabled = current_app.config.get("ENCRYPTION_ENABLED", False)
can_manage_encryption = can_manage_versioning # Same as other bucket properties can_manage_encryption = can_manage_versioning # Same as other bucket properties
# Quota settings (admin only)
bucket_quota = storage.get_bucket_quota(bucket_name) bucket_quota = storage.get_bucket_quota(bucket_name)
bucket_stats = storage.bucket_stats(bucket_name) bucket_stats = storage.bucket_stats(bucket_name)
can_manage_quota = False can_manage_quota = False
@@ -377,12 +382,10 @@ def bucket_detail(bucket_name: str):
except IamError: except IamError:
pass pass
objects_api_url = url_for("ui.list_bucket_objects", bucket_name=bucket_name)
return render_template( return render_template(
"bucket_detail.html", "bucket_detail.html",
bucket_name=bucket_name, bucket_name=bucket_name,
objects_api_url=objects_api_url, objects=objects,
principal=principal, principal=principal,
bucket_policy_text=policy_text, bucket_policy_text=policy_text,
bucket_policy=bucket_policy, bucket_policy=bucket_policy,
@@ -405,61 +408,6 @@ def bucket_detail(bucket_name: str):
) )
@ui_bp.get("/buckets/<bucket_name>/objects")
def list_bucket_objects(bucket_name: str):
"""API endpoint for paginated object listing."""
principal = _current_principal()
storage = _storage()
try:
_authorize_ui(principal, bucket_name, "list")
except IamError as exc:
return jsonify({"error": str(exc)}), 403
max_keys = min(int(request.args.get("max_keys", 1000)), 10000)
continuation_token = request.args.get("continuation_token") or None
prefix = request.args.get("prefix") or None
try:
result = storage.list_objects(
bucket_name,
max_keys=max_keys,
continuation_token=continuation_token,
prefix=prefix,
)
except StorageError as exc:
return jsonify({"error": str(exc)}), 400
try:
versioning_enabled = storage.is_versioning_enabled(bucket_name)
except StorageError:
versioning_enabled = False
objects_data = []
for obj in result.objects:
objects_data.append({
"key": obj.key,
"size": obj.size,
"last_modified": obj.last_modified.isoformat(),
"last_modified_display": obj.last_modified.strftime("%b %d, %Y %H:%M"),
"etag": obj.etag,
"metadata": obj.metadata or {},
"preview_url": url_for("ui.object_preview", bucket_name=bucket_name, object_key=obj.key),
"download_url": url_for("ui.object_preview", bucket_name=bucket_name, object_key=obj.key) + "?download=1",
"presign_endpoint": url_for("ui.object_presign", bucket_name=bucket_name, object_key=obj.key),
"delete_endpoint": url_for("ui.delete_object", bucket_name=bucket_name, object_key=obj.key),
"versions_endpoint": url_for("ui.object_versions", bucket_name=bucket_name, object_key=obj.key),
"restore_template": url_for("ui.restore_object_version", bucket_name=bucket_name, object_key=obj.key, version_id="VERSION_ID_PLACEHOLDER"),
})
return jsonify({
"objects": objects_data,
"is_truncated": result.is_truncated,
"next_continuation_token": result.next_continuation_token,
"total_count": result.total_count,
"versioning_enabled": versioning_enabled,
})
@ui_bp.post("/buckets/<bucket_name>/upload") @ui_bp.post("/buckets/<bucket_name>/upload")
@limiter.limit("30 per minute") @limiter.limit("30 per minute")
def upload_object(bucket_name: str): def upload_object(bucket_name: str):
@@ -730,30 +678,41 @@ def bulk_download_objects(bucket_name: str):
unique_keys = list(dict.fromkeys(cleaned)) unique_keys = list(dict.fromkeys(cleaned))
storage = _storage() storage = _storage()
# Verify permission to read bucket contents # Check permissions for all keys first (or at least bucket read)
# We'll check bucket read once, then object read for each if needed?
# _authorize_ui checks bucket level if object_key is None, but we need to check each object if fine-grained policies exist.
# For simplicity/performance, we check bucket list/read.
try: try:
_authorize_ui(principal, bucket_name, "read") _authorize_ui(principal, bucket_name, "read")
except IamError as exc: except IamError as exc:
return jsonify({"error": str(exc)}), 403 return jsonify({"error": str(exc)}), 403
# Create ZIP archive of selected objects # Create ZIP
buffer = io.BytesIO() buffer = io.BytesIO()
with zipfile.ZipFile(buffer, "w", zipfile.ZIP_DEFLATED) as zf: with zipfile.ZipFile(buffer, "w", zipfile.ZIP_DEFLATED) as zf:
for key in unique_keys: for key in unique_keys:
try: try:
# Verify individual object permission if needed?
# _authorize_ui(principal, bucket_name, "read", object_key=key)
# This might be slow for many objects. Assuming bucket read is enough for now or we accept the overhead.
# Let's skip individual check for bulk speed, assuming bucket read implies object read unless denied.
# But strictly we should check. Let's check.
_authorize_ui(principal, bucket_name, "read", object_key=key) _authorize_ui(principal, bucket_name, "read", object_key=key)
# Check if object is encrypted
metadata = storage.get_object_metadata(bucket_name, key) metadata = storage.get_object_metadata(bucket_name, key)
is_encrypted = "x-amz-server-side-encryption" in metadata is_encrypted = "x-amz-server-side-encryption" in metadata
if is_encrypted and hasattr(storage, 'get_object_data'): if is_encrypted and hasattr(storage, 'get_object_data'):
# Decrypt and add to zip
data, _ = storage.get_object_data(bucket_name, key) data, _ = storage.get_object_data(bucket_name, key)
zf.writestr(key, data) zf.writestr(key, data)
else: else:
# Add unencrypted file directly
path = storage.get_object_path(bucket_name, key) path = storage.get_object_path(bucket_name, key)
zf.write(path, arcname=key) zf.write(path, arcname=key)
except (StorageError, IamError): except (StorageError, IamError):
# Skip objects that can't be accessed # Skip files we can't read or don't exist
continue continue
buffer.seek(0) buffer.seek(0)
@@ -1058,6 +1017,7 @@ def update_bucket_encryption(bucket_name: str):
action = request.form.get("action", "enable") action = request.form.get("action", "enable")
if action == "disable": if action == "disable":
# Disable encryption
try: try:
_storage().set_bucket_encryption(bucket_name, None) _storage().set_bucket_encryption(bucket_name, None)
flash("Default encryption disabled", "info") flash("Default encryption disabled", "info")
@@ -1065,14 +1025,16 @@ def update_bucket_encryption(bucket_name: str):
flash(_friendly_error_message(exc), "danger") flash(_friendly_error_message(exc), "danger")
return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="properties")) return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="properties"))
# Enable or update encryption
algorithm = request.form.get("algorithm", "AES256") algorithm = request.form.get("algorithm", "AES256")
kms_key_id = request.form.get("kms_key_id", "").strip() or None kms_key_id = request.form.get("kms_key_id", "").strip() or None
# Validate algorithm
if algorithm not in ("AES256", "aws:kms"): if algorithm not in ("AES256", "aws:kms"):
flash("Invalid encryption algorithm", "danger") flash("Invalid encryption algorithm", "danger")
return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="properties")) return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="properties"))
# Build encryption configuration in AWS S3 format # Build encryption config following AWS format
encryption_config: dict[str, Any] = { encryption_config: dict[str, Any] = {
"Rules": [ "Rules": [
{ {
@@ -1248,6 +1210,7 @@ def delete_iam_user(access_key: str):
return redirect(url_for("ui.iam_dashboard")) return redirect(url_for("ui.iam_dashboard"))
if access_key == principal.access_key: if access_key == principal.access_key:
# Self-deletion
try: try:
_iam().delete_user(access_key) _iam().delete_user(access_key)
session.pop("credentials", None) session.pop("credentials", None)
@@ -1329,9 +1292,6 @@ def create_connection():
@ui_bp.post("/connections/test") @ui_bp.post("/connections/test")
def test_connection(): def test_connection():
from botocore.config import Config as BotoConfig
from botocore.exceptions import ConnectTimeoutError, EndpointConnectionError, ReadTimeoutError
principal = _current_principal() principal = _current_principal()
try: try:
_iam().authorize(principal, None, "iam:list_users") _iam().authorize(principal, None, "iam:list_users")
@@ -1348,32 +1308,18 @@ def test_connection():
return jsonify({"status": "error", "message": "Missing credentials"}), 400 return jsonify({"status": "error", "message": "Missing credentials"}), 400
try: try:
config = BotoConfig(
connect_timeout=5,
read_timeout=10,
retries={'max_attempts': 1}
)
s3 = boto3.client( s3 = boto3.client(
"s3", "s3",
endpoint_url=endpoint, endpoint_url=endpoint,
aws_access_key_id=access_key, aws_access_key_id=access_key,
aws_secret_access_key=secret_key, aws_secret_access_key=secret_key,
region_name=region, region_name=region,
config=config,
) )
# Try to list buckets to verify credentials and endpoint
s3.list_buckets() s3.list_buckets()
return jsonify({"status": "ok", "message": "Connection successful"}) return jsonify({"status": "ok", "message": "Connection successful"})
except (ConnectTimeoutError, ReadTimeoutError):
return jsonify({"status": "error", "message": f"Connection timed out - endpoint may be down or unreachable: {endpoint}"}), 400
except EndpointConnectionError:
return jsonify({"status": "error", "message": f"Could not connect to endpoint: {endpoint}"}), 400
except ClientError as e:
error_code = e.response.get('Error', {}).get('Code', 'Unknown')
error_msg = e.response.get('Error', {}).get('Message', str(e))
return jsonify({"status": "error", "message": f"Connection failed ({error_code}): {error_msg}"}), 400
except Exception as e: except Exception as e:
return jsonify({"status": "error", "message": f"Connection failed: {str(e)}"}), 400 return jsonify({"status": "error", "message": str(e)}), 400
@ui_bp.post("/connections/<connection_id>/update") @ui_bp.post("/connections/<connection_id>/update")
@@ -1434,6 +1380,7 @@ def update_bucket_replication(bucket_name: str):
flash(str(exc), "danger") flash(str(exc), "danger")
return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="replication")) return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="replication"))
# Check if user is admin (required for create/delete operations)
is_admin = False is_admin = False
try: try:
_iam().authorize(principal, None, "iam:list_users") _iam().authorize(principal, None, "iam:list_users")
@@ -1444,12 +1391,14 @@ def update_bucket_replication(bucket_name: str):
action = request.form.get("action") action = request.form.get("action")
if action == "delete": if action == "delete":
# Admin only - remove configuration entirely
if not is_admin: if not is_admin:
flash("Only administrators can remove replication configuration", "danger") flash("Only administrators can remove replication configuration", "danger")
return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="replication")) return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="replication"))
_replication().delete_rule(bucket_name) _replication().delete_rule(bucket_name)
flash("Replication configuration removed", "info") flash("Replication configuration removed", "info")
elif action == "pause": elif action == "pause":
# Users can pause - just set enabled=False
rule = _replication().get_rule(bucket_name) rule = _replication().get_rule(bucket_name)
if rule: if rule:
rule.enabled = False rule.enabled = False
@@ -1458,6 +1407,7 @@ def update_bucket_replication(bucket_name: str):
else: else:
flash("No replication configuration to pause", "warning") flash("No replication configuration to pause", "warning")
elif action == "resume": elif action == "resume":
# Users can resume - just set enabled=True
rule = _replication().get_rule(bucket_name) rule = _replication().get_rule(bucket_name)
if rule: if rule:
rule.enabled = True rule.enabled = True
@@ -1466,6 +1416,7 @@ def update_bucket_replication(bucket_name: str):
else: else:
flash("No replication configuration to resume", "warning") flash("No replication configuration to resume", "warning")
elif action == "create": elif action == "create":
# Admin only - create new configuration
if not is_admin: if not is_admin:
flash("Only administrators can configure replication settings", "danger") flash("Only administrators can configure replication settings", "danger")
return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="replication")) return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="replication"))
@@ -1490,6 +1441,7 @@ def update_bucket_replication(bucket_name: str):
) )
_replication().set_rule(rule) _replication().set_rule(rule)
# If mode is "all", trigger replication of existing objects
if replication_mode == REPLICATION_MODE_ALL: if replication_mode == REPLICATION_MODE_ALL:
_replication().replicate_existing_objects(bucket_name) _replication().replicate_existing_objects(bucket_name)
flash("Replication configured. Existing objects are being replicated in the background.", "success") flash("Replication configured. Existing objects are being replicated in the background.", "success")
@@ -1514,31 +1466,10 @@ def get_replication_status(bucket_name: str):
if not rule: if not rule:
return jsonify({"error": "No replication rule"}), 404 return jsonify({"error": "No replication rule"}), 404
connection = _connections().get(rule.target_connection_id) # This is the slow operation - compute sync status by comparing buckets
endpoint_healthy = False
endpoint_error = None
if connection:
endpoint_healthy = _replication().check_endpoint_health(connection)
if not endpoint_healthy:
endpoint_error = f"Cannot reach endpoint: {connection.endpoint_url}"
else:
endpoint_error = "Target connection not found"
stats = None
if endpoint_healthy:
stats = _replication().get_sync_status(bucket_name) stats = _replication().get_sync_status(bucket_name)
if not stats: if not stats:
return jsonify({ return jsonify({"error": "Failed to compute status"}), 500
"objects_synced": 0,
"objects_pending": 0,
"objects_orphaned": 0,
"bytes_synced": 0,
"last_sync_at": rule.stats.last_sync_at if rule.stats else None,
"last_sync_key": rule.stats.last_sync_key if rule.stats else None,
"endpoint_healthy": endpoint_healthy,
"endpoint_error": endpoint_error,
})
return jsonify({ return jsonify({
"objects_synced": stats.objects_synced, "objects_synced": stats.objects_synced,
@@ -1547,28 +1478,6 @@ def get_replication_status(bucket_name: str):
"bytes_synced": stats.bytes_synced, "bytes_synced": stats.bytes_synced,
"last_sync_at": stats.last_sync_at, "last_sync_at": stats.last_sync_at,
"last_sync_key": stats.last_sync_key, "last_sync_key": stats.last_sync_key,
"endpoint_healthy": endpoint_healthy,
"endpoint_error": endpoint_error,
})
@ui_bp.get("/connections/<connection_id>/health")
def check_connection_health(connection_id: str):
"""Check if a connection endpoint is reachable."""
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:list_users")
except IamError:
return jsonify({"error": "Access denied"}), 403
conn = _connections().get(connection_id)
if not conn:
return jsonify({"healthy": False, "error": "Connection not found"}), 404
healthy = _replication().check_endpoint_health(conn)
return jsonify({
"healthy": healthy,
"error": None if healthy else f"Cannot reach endpoint: {conn.endpoint_url}"
}) })
@@ -1589,6 +1498,7 @@ def connections_dashboard():
def metrics_dashboard(): def metrics_dashboard():
principal = _current_principal() principal = _current_principal()
# Metrics are restricted to admin users
try: try:
_iam().authorize(principal, None, "iam:list_users") _iam().authorize(principal, None, "iam:list_users")
except IamError: except IamError:
@@ -1612,13 +1522,16 @@ def metrics_dashboard():
total_bytes_used = 0 total_bytes_used = 0
total_versions = 0 total_versions = 0
# Note: Uses cached stats from storage layer to improve performance
cache_ttl = current_app.config.get("BUCKET_STATS_CACHE_TTL", 60) cache_ttl = current_app.config.get("BUCKET_STATS_CACHE_TTL", 60)
for bucket in buckets: for bucket in buckets:
stats = storage.bucket_stats(bucket.name, cache_ttl=cache_ttl) stats = storage.bucket_stats(bucket.name, cache_ttl=cache_ttl)
# Use totals which include archived versions
total_objects += stats.get("total_objects", stats.get("objects", 0)) total_objects += stats.get("total_objects", stats.get("objects", 0))
total_bytes_used += stats.get("total_bytes", stats.get("bytes", 0)) total_bytes_used += stats.get("total_bytes", stats.get("bytes", 0))
total_versions += stats.get("version_count", 0) total_versions += stats.get("version_count", 0)
# Calculate system uptime
boot_time = psutil.boot_time() boot_time = psutil.boot_time()
uptime_seconds = time.time() - boot_time uptime_seconds = time.time() - boot_time
uptime_days = int(uptime_seconds / 86400) uptime_days = int(uptime_seconds / 86400)

View File

@@ -1,7 +1,7 @@
"""Central location for the application version string.""" """Central location for the application version string."""
from __future__ import annotations from __future__ import annotations
APP_VERSION = "0.1.8" APP_VERSION = "0.1.6"
def get_version() -> str: def get_version() -> str:

43
docs.md
View File

@@ -341,7 +341,6 @@ Before upgrading across major versions, verify compatibility:
| From Version | To Version | Breaking Changes | Migration Required | | From Version | To Version | Breaking Changes | Migration Required |
|--------------|------------|------------------|-------------------| |--------------|------------|------------------|-------------------|
| 0.1.x | 0.2.x | None expected | No | | 0.1.x | 0.2.x | None expected | No |
| 0.1.6 | 0.1.7 | None | No |
| < 0.1.0 | >= 0.1.0 | New IAM config format | Yes - run migration script | | < 0.1.0 | >= 0.1.0 | New IAM config format | Yes - run migration script |
**Automatic compatibility detection:** **Automatic compatibility detection:**
@@ -635,48 +634,6 @@ curl -X PUT http://127.0.0.1:5000/bucket-policy/test \
The UI will reflect this change as soon as the request completes thanks to the hot reload. The UI will reflect this change as soon as the request completes thanks to the hot reload.
### UI Object Browser
The bucket detail page includes a powerful object browser with the following features:
#### Folder Navigation
Objects with forward slashes (`/`) in their keys are displayed as a folder hierarchy. Click a folder row to navigate into it. A breadcrumb navigation bar shows your current path and allows quick navigation back to parent folders or the root.
#### Pagination & Infinite Scroll
- Objects load in configurable batches (50, 100, 150, 200, or 250 per page)
- Scroll to the bottom to automatically load more objects (infinite scroll)
- A **Load more** button is available as a fallback for touch devices or when infinite scroll doesn't trigger
- The footer shows the current load status (e.g., "Showing 100 of 500 objects")
#### Bulk Operations
- Select multiple objects using checkboxes
- **Bulk Delete**: Delete multiple objects at once
- **Bulk Download**: Download selected objects as individual files
#### Search & Filter
Use the search box to filter objects by name in real-time. The filter applies to the currently loaded objects.
#### Error Handling
If object loading fails (e.g., network error), a friendly error message is displayed with a **Retry** button to attempt loading again.
#### Object Preview
Click any object row to view its details in the preview sidebar:
- File size and last modified date
- ETag (content hash)
- Custom metadata (if present)
- Download and presign (share link) buttons
- Version history (when versioning is enabled)
#### Drag & Drop Upload
Drag files directly onto the objects table to upload them to the current bucket and folder path.
## 6. Presigned URLs ## 6. Presigned URLs
- Trigger from the UI using the **Presign** button after selecting an object. - Trigger from the UI using the **Presign** button after selecting an object.

View File

@@ -1,3 +0,0 @@
[pytest]
testpaths = tests
norecursedirs = data .git __pycache__ .venv

View File

@@ -1,10 +1,10 @@
Flask>=3.1.2 Flask>=3.1.2
Flask-Limiter>=4.1.1 Flask-Limiter>=4.1.0
Flask-Cors>=6.0.2 Flask-Cors>=6.0.1
Flask-WTF>=1.2.2 Flask-WTF>=1.2.2
pytest>=9.0.2 pytest>=9.0.1
requests>=2.32.5 requests>=2.32.5
boto3>=1.42.14 boto3>=1.42.1
waitress>=3.0.2 waitress>=3.0.2
psutil>=7.1.3 psutil>=7.1.3
cryptography>=46.0.3 cryptography>=46.0.3

View File

@@ -66,28 +66,8 @@ html {
color: var(--myfsio-muted) !important; color: var(--myfsio-muted) !important;
} }
.table-responsive { .table-responsive { border-radius: 0.5rem; overflow: hidden; }
border-radius: 0.5rem;
overflow-x: auto;
-webkit-overflow-scrolling: touch;
}
.message-stack { position: sticky; top: 1rem; z-index: 100; } .message-stack { position: sticky; top: 1rem; z-index: 100; }
/* Mobile-friendly table improvements */
.table-responsive table {
min-width: 600px;
}
.table-responsive table th,
.table-responsive table td {
white-space: nowrap;
}
/* Allow text wrapping for description columns */
.table-responsive table td.text-wrap {
white-space: normal;
min-width: 200px;
}
code { font-size: 0.85rem; } code { font-size: 0.85rem; }
code { code {
@@ -409,22 +389,8 @@ code {
.bucket-table th:last-child { white-space: nowrap; } .bucket-table th:last-child { white-space: nowrap; }
.object-key { .object-key {
max-width: 0; word-break: break-word;
width: 100%; max-width: 32rem;
overflow: hidden;
text-overflow: ellipsis;
}
.object-key .fw-medium {
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
}
.object-key .text-muted {
white-space: nowrap;
overflow: hidden;
text-overflow: ellipsis;
} }
.preview-card { top: 1rem; } .preview-card { top: 1rem; }
@@ -551,22 +517,6 @@ code {
overflow-y: auto; overflow-y: auto;
} }
.objects-table-container thead {
position: sticky;
top: 0;
z-index: 10;
}
.objects-table-container thead th {
background-color: #f8f9fa;
border-bottom: 1px solid var(--myfsio-card-border);
box-shadow: 0 1px 2px rgba(0, 0, 0, 0.05);
}
[data-theme='dark'] .objects-table-container thead th {
background-color: #1e293b;
}
.btn-group form { display: inline; } .btn-group form { display: inline; }
.font-monospace { font-family: 'SFMono-Regular', Consolas, 'Liberation Mono', Menlo, monospace; } .font-monospace { font-family: 'SFMono-Regular', Consolas, 'Liberation Mono', Menlo, monospace; }
@@ -1587,41 +1537,6 @@ pre code {
position: relative !important; position: relative !important;
top: 0 !important; top: 0 !important;
} }
/* Ensure tables are scrollable on mobile */
.card-body .table-responsive {
margin: -1rem;
padding: 0;
width: calc(100% + 2rem);
}
.card-body .table-responsive table {
margin-bottom: 0;
}
/* IAM users table mobile adjustments */
.table th,
.table td {
padding: 0.5rem 0.75rem;
}
/* Better touch scrolling indicator */
.table-responsive::after {
content: '';
position: absolute;
top: 0;
right: 0;
bottom: 0;
width: 20px;
background: linear-gradient(to left, var(--myfsio-card-bg), transparent);
pointer-events: none;
opacity: 0;
transition: opacity 0.3s;
}
.table-responsive:not(:hover)::after {
opacity: 0.8;
}
} }
*:focus-visible { *:focus-visible {

View File

@@ -199,7 +199,7 @@
})(); })();
</script> </script>
<script> <script>
// Toast utility
window.showToast = function(message, title = 'Notification', type = 'info') { window.showToast = function(message, title = 'Notification', type = 'info') {
const toastEl = document.getElementById('liveToast'); const toastEl = document.getElementById('liveToast');
const toastTitle = document.getElementById('toastTitle'); const toastTitle = document.getElementById('toastTitle');
@@ -208,6 +208,7 @@
toastTitle.textContent = title; toastTitle.textContent = title;
toastMessage.textContent = message; toastMessage.textContent = message;
// Reset classes
toastEl.classList.remove('text-bg-primary', 'text-bg-success', 'text-bg-danger', 'text-bg-warning'); toastEl.classList.remove('text-bg-primary', 'text-bg-success', 'text-bg-danger', 'text-bg-warning');
if (type === 'success') toastEl.classList.add('text-bg-success'); if (type === 'success') toastEl.classList.add('text-bg-success');
@@ -220,11 +221,13 @@
</script> </script>
<script> <script>
(function () { (function () {
// Show flashed messages as toasts
{% with messages = get_flashed_messages(with_categories=true) %} {% with messages = get_flashed_messages(with_categories=true) %}
{% if messages %} {% if messages %}
{% for category, message in messages %} {% for category, message in messages %}
// Map Flask categories to Toast types
// Flask: success, danger, warning, info
// Toast: success, error, warning, info
var type = "{{ category }}"; var type = "{{ category }}";
if (type === "danger") type = "error"; if (type === "danger") type = "error";
window.showToast({{ message | tojson | safe }}, "Notification", type); window.showToast({{ message | tojson | safe }}, "Notification", type);

File diff suppressed because it is too large Load Diff

View File

@@ -131,7 +131,7 @@
{{ super() }} {{ super() }}
<script> <script>
(function () { (function () {
// Search functionality
const searchInput = document.getElementById('bucket-search'); const searchInput = document.getElementById('bucket-search');
const bucketItems = document.querySelectorAll('.bucket-item'); const bucketItems = document.querySelectorAll('.bucket-item');
const noBucketsMsg = document.querySelector('.text-center.py-5'); // The "No buckets found" empty state const noBucketsMsg = document.querySelector('.text-center.py-5'); // The "No buckets found" empty state
@@ -153,6 +153,7 @@
}); });
} }
// View toggle functionality
const viewGrid = document.getElementById('view-grid'); const viewGrid = document.getElementById('view-grid');
const viewList = document.getElementById('view-list'); const viewList = document.getElementById('view-list');
const container = document.getElementById('buckets-container'); const container = document.getElementById('buckets-container');
@@ -167,7 +168,8 @@
}); });
cards.forEach(card => { cards.forEach(card => {
card.classList.remove('h-100'); card.classList.remove('h-100');
// Optional: Add flex-row to card-body content if we want a horizontal layout
// For now, full-width stacked cards is a good list view
}); });
localStorage.setItem('bucket-view-pref', 'list'); localStorage.setItem('bucket-view-pref', 'list');
} else { } else {
@@ -186,6 +188,7 @@
viewGrid.addEventListener('change', () => setView('grid')); viewGrid.addEventListener('change', () => setView('grid'));
viewList.addEventListener('change', () => setView('list')); viewList.addEventListener('change', () => setView('list'));
// Restore preference
const pref = localStorage.getItem('bucket-view-pref'); const pref = localStorage.getItem('bucket-view-pref');
if (pref === 'list') { if (pref === 'list') {
viewList.checked = true; viewList.checked = true;

View File

@@ -104,7 +104,6 @@
<table class="table table-hover align-middle mb-0"> <table class="table table-hover align-middle mb-0">
<thead class="table-light"> <thead class="table-light">
<tr> <tr>
<th scope="col" style="width: 50px;">Status</th>
<th scope="col">Name</th> <th scope="col">Name</th>
<th scope="col">Endpoint</th> <th scope="col">Endpoint</th>
<th scope="col">Region</th> <th scope="col">Region</th>
@@ -114,12 +113,7 @@
</thead> </thead>
<tbody> <tbody>
{% for conn in connections %} {% for conn in connections %}
<tr data-connection-id="{{ conn.id }}"> <tr>
<td class="text-center">
<span class="connection-status" data-status="checking" title="Checking...">
<span class="spinner-border spinner-border-sm text-muted" role="status" style="width: 12px; height: 12px;"></span>
</span>
</td>
<td> <td>
<div class="d-flex align-items-center gap-2"> <div class="d-flex align-items-center gap-2">
<div class="connection-icon"> <div class="connection-icon">
@@ -187,6 +181,7 @@
</div> </div>
</div> </div>
<!-- Edit Connection Modal -->
<div class="modal fade" id="editConnectionModal" tabindex="-1" aria-hidden="true"> <div class="modal fade" id="editConnectionModal" tabindex="-1" aria-hidden="true">
<div class="modal-dialog modal-dialog-centered"> <div class="modal-dialog modal-dialog-centered">
<div class="modal-content"> <div class="modal-content">
@@ -252,6 +247,7 @@
</div> </div>
</div> </div>
<!-- Delete Connection Modal -->
<div class="modal fade" id="deleteConnectionModal" tabindex="-1" aria-hidden="true"> <div class="modal fade" id="deleteConnectionModal" tabindex="-1" aria-hidden="true">
<div class="modal-dialog modal-dialog-centered"> <div class="modal-dialog modal-dialog-centered">
<div class="modal-content"> <div class="modal-content">
@@ -301,17 +297,14 @@
} }
} }
// Test Connection Logic
async function testConnection(formId, resultId) { async function testConnection(formId, resultId) {
const form = document.getElementById(formId); const form = document.getElementById(formId);
const resultDiv = document.getElementById(resultId); const resultDiv = document.getElementById(resultId);
const formData = new FormData(form); const formData = new FormData(form);
const data = Object.fromEntries(formData.entries()); const data = Object.fromEntries(formData.entries());
resultDiv.innerHTML = '<div class="text-info"><span class="spinner-border spinner-border-sm" role="status" aria-hidden="true"></span> Testing connection...</div>'; resultDiv.innerHTML = '<div class="text-info"><span class="spinner-border spinner-border-sm" role="status" aria-hidden="true"></span> Testing...</div>';
// Use AbortController to timeout client-side after 20 seconds
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), 20000);
try { try {
const response = await fetch("{{ url_for('ui.test_connection') }}", { const response = await fetch("{{ url_for('ui.test_connection') }}", {
@@ -320,44 +313,17 @@
"Content-Type": "application/json", "Content-Type": "application/json",
"X-CSRFToken": "{{ csrf_token() }}" "X-CSRFToken": "{{ csrf_token() }}"
}, },
body: JSON.stringify(data), body: JSON.stringify(data)
signal: controller.signal
}); });
clearTimeout(timeoutId);
const result = await response.json(); const result = await response.json();
if (response.ok) { if (response.ok) {
resultDiv.innerHTML = `<div class="text-success"> resultDiv.innerHTML = `<div class="text-success"><i class="bi bi-check-circle"></i> ${result.message}</div>`;
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="me-1" viewBox="0 0 16 16">
<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zm-3.97-3.03a.75.75 0 0 0-1.08.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-.01-1.05z"/>
</svg>
${result.message}
</div>`;
} else { } else {
resultDiv.innerHTML = `<div class="text-danger"> resultDiv.innerHTML = `<div class="text-danger"><i class="bi bi-exclamation-circle"></i> ${result.message}</div>`;
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="me-1" viewBox="0 0 16 16">
<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zM5.354 4.646a.5.5 0 1 0-.708.708L7.293 8l-2.647 2.646a.5.5 0 0 0 .708.708L8 8.707l2.646 2.647a.5.5 0 0 0 .708-.708L8.707 8l2.647-2.646a.5.5 0 0 0-.708-.708L8 7.293 5.354 4.646z"/>
</svg>
${result.message}
</div>`;
} }
} catch (error) { } catch (error) {
clearTimeout(timeoutId); resultDiv.innerHTML = `<div class="text-danger"><i class="bi bi-exclamation-circle"></i> Connection failed</div>`;
if (error.name === 'AbortError') {
resultDiv.innerHTML = `<div class="text-danger">
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="me-1" viewBox="0 0 16 16">
<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zM5.354 4.646a.5.5 0 1 0-.708.708L7.293 8l-2.647 2.646a.5.5 0 0 0 .708.708L8 8.707l2.646 2.647a.5.5 0 0 0 .708-.708L8.707 8l2.647-2.646a.5.5 0 0 0-.708-.708L8 7.293 5.354 4.646z"/>
</svg>
Connection test timed out - endpoint may be unreachable
</div>`;
} else {
resultDiv.innerHTML = `<div class="text-danger">
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="me-1" viewBox="0 0 16 16">
<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zM5.354 4.646a.5.5 0 1 0-.708.708L7.293 8l-2.647 2.646a.5.5 0 0 0 .708.708L8 8.707l2.646 2.647a.5.5 0 0 0 .708-.708L8.707 8l2.647-2.646a.5.5 0 0 0-.708-.708L8 7.293 5.354 4.646z"/>
</svg>
Connection failed: Network error
</div>`;
}
} }
} }
@@ -369,6 +335,7 @@
testConnection('editConnectionForm', 'editTestResult'); testConnection('editConnectionForm', 'editTestResult');
}); });
// Modal Event Listeners
const editModal = document.getElementById('editConnectionModal'); const editModal = document.getElementById('editConnectionModal');
editModal.addEventListener('show.bs.modal', event => { editModal.addEventListener('show.bs.modal', event => {
const button = event.relatedTarget; const button = event.relatedTarget;
@@ -395,54 +362,5 @@
const form = document.getElementById('deleteConnectionForm'); const form = document.getElementById('deleteConnectionForm');
form.action = "{{ url_for('ui.delete_connection', connection_id='CONN_ID') }}".replace('CONN_ID', id); form.action = "{{ url_for('ui.delete_connection', connection_id='CONN_ID') }}".replace('CONN_ID', id);
}); });
// Check connection health for each connection in the table
// Uses staggered requests to avoid overwhelming the server
async function checkConnectionHealth(connectionId, statusEl) {
try {
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), 15000);
const response = await fetch(`/ui/connections/${connectionId}/health`, {
signal: controller.signal
});
clearTimeout(timeoutId);
const data = await response.json();
if (data.healthy) {
statusEl.innerHTML = `
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-success" viewBox="0 0 16 16">
<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zm-3.97-3.03a.75.75 0 0 0-1.08.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-.01-1.05z"/>
</svg>`;
statusEl.setAttribute('data-status', 'healthy');
statusEl.setAttribute('title', 'Connected');
} else {
statusEl.innerHTML = `
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-danger" viewBox="0 0 16 16">
<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zM5.354 4.646a.5.5 0 1 0-.708.708L7.293 8l-2.647 2.646a.5.5 0 0 0 .708.708L8 8.707l2.646 2.647a.5.5 0 0 0 .708-.708L8.707 8l2.647-2.646a.5.5 0 0 0-.708-.708L8 7.293 5.354 4.646z"/>
</svg>`;
statusEl.setAttribute('data-status', 'unhealthy');
statusEl.setAttribute('title', data.error || 'Unreachable');
}
} catch (error) {
statusEl.innerHTML = `
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-warning" viewBox="0 0 16 16">
<path d="M8.982 1.566a1.13 1.13 0 0 0-1.96 0L.165 13.233c-.457.778.091 1.767.98 1.767h13.713c.889 0 1.438-.99.98-1.767L8.982 1.566zM8 5c.535 0 .954.462.9.995l-.35 3.507a.552.552 0 0 1-1.1 0L7.1 5.995A.905.905 0 0 1 8 5zm.002 6a1 1 0 1 1 0 2 1 1 0 0 1 0-2z"/>
</svg>`;
statusEl.setAttribute('data-status', 'unknown');
statusEl.setAttribute('title', 'Could not check status');
}
}
// Stagger health checks to avoid all requests at once
const connectionRows = document.querySelectorAll('tr[data-connection-id]');
connectionRows.forEach((row, index) => {
const connectionId = row.getAttribute('data-connection-id');
const statusEl = row.querySelector('.connection-status');
if (statusEl) {
// Stagger requests by 200ms each
setTimeout(() => checkConnectionHealth(connectionId, statusEl), index * 200);
}
});
</script> </script>
{% endblock %} {% endblock %}

View File

@@ -47,9 +47,9 @@ python run.py --mode ui
<table class="table table-sm table-bordered small mb-0"> <table class="table table-sm table-bordered small mb-0">
<thead class="table-light"> <thead class="table-light">
<tr> <tr>
<th style="min-width: 180px;">Variable</th> <th>Variable</th>
<th style="min-width: 120px;">Default</th> <th>Default</th>
<th class="text-wrap" style="min-width: 250px;">Description</th> <th>Description</th>
</tr> </tr>
</thead> </thead>
<tbody> <tbody>
@@ -255,15 +255,6 @@ sudo journalctl -u myfsio -f # View logs</code></pre>
<li>Progress rows highlight retries, throughput, and completion even if you close the modal.</li> <li>Progress rows highlight retries, throughput, and completion even if you close the modal.</li>
</ul> </ul>
</div> </div>
<div>
<h3 class="h6 text-uppercase text-muted">Object browser</h3>
<ul>
<li>Navigate folder hierarchies using breadcrumbs. Objects with <code>/</code> in keys display as folders.</li>
<li>Infinite scroll loads more objects automatically. Choose batch size (50250) from the footer dropdown.</li>
<li>Bulk select objects for multi-delete or multi-download. Filter by name using the search box.</li>
<li>If loading fails, click <strong>Retry</strong> to attempt again—no page refresh needed.</li>
</ul>
</div>
<div> <div>
<h3 class="h6 text-uppercase text-muted">Object details</h3> <h3 class="h6 text-uppercase text-muted">Object details</h3>
<ul> <ul>

View File

@@ -203,6 +203,7 @@
{% endif %} {% endif %}
</div> </div>
<!-- Create User Modal -->
<div class="modal fade" id="createUserModal" tabindex="-1" aria-hidden="true"> <div class="modal fade" id="createUserModal" tabindex="-1" aria-hidden="true">
<div class="modal-dialog modal-dialog-centered"> <div class="modal-dialog modal-dialog-centered">
<div class="modal-content"> <div class="modal-content">
@@ -251,6 +252,7 @@
</div> </div>
</div> </div>
<!-- Policy Editor Modal -->
<div class="modal fade" id="policyEditorModal" tabindex="-1" aria-hidden="true"> <div class="modal fade" id="policyEditorModal" tabindex="-1" aria-hidden="true">
<div class="modal-dialog modal-lg modal-dialog-centered"> <div class="modal-dialog modal-lg modal-dialog-centered">
<div class="modal-content"> <div class="modal-content">
@@ -301,6 +303,7 @@
</div> </div>
</div> </div>
<!-- Edit User Modal -->
<div class="modal fade" id="editUserModal" tabindex="-1" aria-hidden="true"> <div class="modal fade" id="editUserModal" tabindex="-1" aria-hidden="true">
<div class="modal-dialog modal-dialog-centered"> <div class="modal-dialog modal-dialog-centered">
<div class="modal-content"> <div class="modal-content">
@@ -335,6 +338,7 @@
</div> </div>
</div> </div>
<!-- Delete User Modal -->
<div class="modal fade" id="deleteUserModal" tabindex="-1" aria-hidden="true"> <div class="modal fade" id="deleteUserModal" tabindex="-1" aria-hidden="true">
<div class="modal-dialog modal-dialog-centered"> <div class="modal-dialog modal-dialog-centered">
<div class="modal-content"> <div class="modal-content">
@@ -378,6 +382,7 @@
</div> </div>
</div> </div>
<!-- Rotate Secret Modal -->
<div class="modal fade" id="rotateSecretModal" tabindex="-1" aria-hidden="true"> <div class="modal fade" id="rotateSecretModal" tabindex="-1" aria-hidden="true">
<div class="modal-dialog modal-dialog-centered"> <div class="modal-dialog modal-dialog-centered">
<div class="modal-content"> <div class="modal-content">
@@ -482,6 +487,7 @@
const iamUsersData = document.getElementById('iamUsersJson'); const iamUsersData = document.getElementById('iamUsersJson');
const users = iamUsersData ? JSON.parse(iamUsersData.textContent || '[]') : []; const users = iamUsersData ? JSON.parse(iamUsersData.textContent || '[]') : [];
// Policy Editor Logic
const policyModalEl = document.getElementById('policyEditorModal'); const policyModalEl = document.getElementById('policyEditorModal');
const policyModal = new bootstrap.Modal(policyModalEl); const policyModal = new bootstrap.Modal(policyModalEl);
const userLabelEl = document.getElementById('policyEditorUserLabel'); const userLabelEl = document.getElementById('policyEditorUserLabel');
@@ -528,6 +534,7 @@
button.addEventListener('click', () => applyTemplate(button.dataset.policyTemplate)); button.addEventListener('click', () => applyTemplate(button.dataset.policyTemplate));
}); });
// Create User modal template buttons
const createUserPoliciesEl = document.getElementById('createUserPolicies'); const createUserPoliciesEl = document.getElementById('createUserPolicies');
const createTemplateButtons = document.querySelectorAll('[data-create-policy-template]'); const createTemplateButtons = document.querySelectorAll('[data-create-policy-template]');
@@ -584,6 +591,7 @@
}); });
}); });
// Edit User Logic
const editUserModal = new bootstrap.Modal(document.getElementById('editUserModal')); const editUserModal = new bootstrap.Modal(document.getElementById('editUserModal'));
const editUserForm = document.getElementById('editUserForm'); const editUserForm = document.getElementById('editUserForm');
const editUserDisplayName = document.getElementById('editUserDisplayName'); const editUserDisplayName = document.getElementById('editUserDisplayName');
@@ -598,6 +606,7 @@
}); });
}); });
// Delete User Logic
const deleteUserModal = new bootstrap.Modal(document.getElementById('deleteUserModal')); const deleteUserModal = new bootstrap.Modal(document.getElementById('deleteUserModal'));
const deleteUserForm = document.getElementById('deleteUserForm'); const deleteUserForm = document.getElementById('deleteUserForm');
const deleteUserLabel = document.getElementById('deleteUserLabel'); const deleteUserLabel = document.getElementById('deleteUserLabel');
@@ -619,6 +628,7 @@
}); });
}); });
// Rotate Secret Logic
const rotateSecretModal = new bootstrap.Modal(document.getElementById('rotateSecretModal')); const rotateSecretModal = new bootstrap.Modal(document.getElementById('rotateSecretModal'));
const rotateUserLabel = document.getElementById('rotateUserLabel'); const rotateUserLabel = document.getElementById('rotateUserLabel');
const confirmRotateBtn = document.getElementById('confirmRotateBtn'); const confirmRotateBtn = document.getElementById('confirmRotateBtn');
@@ -635,6 +645,7 @@
currentRotateKey = btn.dataset.rotateUser; currentRotateKey = btn.dataset.rotateUser;
rotateUserLabel.textContent = currentRotateKey; rotateUserLabel.textContent = currentRotateKey;
// Reset Modal State
rotateSecretConfirm.classList.remove('d-none'); rotateSecretConfirm.classList.remove('d-none');
rotateSecretResult.classList.add('d-none'); rotateSecretResult.classList.add('d-none');
confirmRotateBtn.classList.remove('d-none'); confirmRotateBtn.classList.remove('d-none');
@@ -669,6 +680,7 @@
const data = await response.json(); const data = await response.json();
newSecretKeyInput.value = data.secret_key; newSecretKeyInput.value = data.secret_key;
// Show Result
rotateSecretConfirm.classList.add('d-none'); rotateSecretConfirm.classList.add('d-none');
rotateSecretResult.classList.remove('d-none'); rotateSecretResult.classList.remove('d-none');
confirmRotateBtn.classList.add('d-none'); confirmRotateBtn.classList.add('d-none');

View File

@@ -38,7 +38,7 @@ def test_unicode_bucket_and_object_names(tmp_path: Path):
assert storage.get_object_path("unicode-test", key).exists() assert storage.get_object_path("unicode-test", key).exists()
# Verify listing # Verify listing
objects = storage.list_objects_all("unicode-test") objects = storage.list_objects("unicode-test")
assert any(o.key == key for o in objects) assert any(o.key == key for o in objects)
def test_special_characters_in_metadata(tmp_path: Path): def test_special_characters_in_metadata(tmp_path: Path):

View File

@@ -220,7 +220,7 @@ def test_bucket_config_filename_allowed(tmp_path):
storage.create_bucket("demo") storage.create_bucket("demo")
storage.put_object("demo", ".bucket.json", io.BytesIO(b"{}")) storage.put_object("demo", ".bucket.json", io.BytesIO(b"{}"))
objects = storage.list_objects_all("demo") objects = storage.list_objects("demo")
assert any(meta.key == ".bucket.json" for meta in objects) assert any(meta.key == ".bucket.json" for meta in objects)

View File

@@ -62,7 +62,7 @@ def test_bulk_delete_json_route(tmp_path: Path):
assert set(payload["deleted"]) == {"first.txt", "missing.txt"} assert set(payload["deleted"]) == {"first.txt", "missing.txt"}
assert payload["errors"] == [] assert payload["errors"] == []
listing = storage.list_objects_all("demo") listing = storage.list_objects("demo")
assert {meta.key for meta in listing} == {"second.txt"} assert {meta.key for meta in listing} == {"second.txt"}
@@ -92,5 +92,5 @@ def test_bulk_delete_validation(tmp_path: Path):
assert limit_response.status_code == 400 assert limit_response.status_code == 400
assert limit_response.get_json()["status"] == "error" assert limit_response.get_json()["status"] == "error"
still_there = storage.list_objects_all("demo") still_there = storage.list_objects("demo")
assert {meta.key for meta in still_there} == {"keep.txt"} assert {meta.key for meta in still_there} == {"keep.txt"}

View File

@@ -1,183 +0,0 @@
"""Tests for UI pagination of bucket objects."""
import json
from io import BytesIO
from pathlib import Path
import pytest
from app import create_app
def _make_app(tmp_path: Path):
"""Create an app for testing."""
storage_root = tmp_path / "data"
iam_config = tmp_path / "iam.json"
bucket_policies = tmp_path / "bucket_policies.json"
iam_payload = {
"users": [
{
"access_key": "test",
"secret_key": "secret",
"display_name": "Test User",
"policies": [{"bucket": "*", "actions": ["list", "read", "write", "delete", "policy"]}],
},
]
}
iam_config.write_text(json.dumps(iam_payload))
flask_app = create_app(
{
"TESTING": True,
"WTF_CSRF_ENABLED": False,
"STORAGE_ROOT": storage_root,
"IAM_CONFIG": iam_config,
"BUCKET_POLICY_PATH": bucket_policies,
}
)
return flask_app
class TestPaginatedObjectListing:
"""Test paginated object listing API."""
def test_objects_api_returns_paginated_results(self, tmp_path):
"""Objects API should return paginated results."""
app = _make_app(tmp_path)
storage = app.extensions["object_storage"]
storage.create_bucket("test-bucket")
# Create 10 test objects
for i in range(10):
storage.put_object("test-bucket", f"file{i:02d}.txt", BytesIO(b"content"))
with app.test_client() as client:
# Login first
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
# Request first page of 3 objects
resp = client.get("/ui/buckets/test-bucket/objects?max_keys=3")
assert resp.status_code == 200
data = resp.get_json()
assert len(data["objects"]) == 3
assert data["is_truncated"] is True
assert data["next_continuation_token"] is not None
assert data["total_count"] == 10
def test_objects_api_pagination_continuation(self, tmp_path):
"""Objects API should support continuation tokens."""
app = _make_app(tmp_path)
storage = app.extensions["object_storage"]
storage.create_bucket("test-bucket")
# Create 5 test objects
for i in range(5):
storage.put_object("test-bucket", f"file{i:02d}.txt", BytesIO(b"content"))
with app.test_client() as client:
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
# Get first page
resp = client.get("/ui/buckets/test-bucket/objects?max_keys=2")
assert resp.status_code == 200
data = resp.get_json()
first_page_keys = [obj["key"] for obj in data["objects"]]
assert len(first_page_keys) == 2
assert data["is_truncated"] is True
# Get second page
token = data["next_continuation_token"]
resp = client.get(f"/ui/buckets/test-bucket/objects?max_keys=2&continuation_token={token}")
assert resp.status_code == 200
data = resp.get_json()
second_page_keys = [obj["key"] for obj in data["objects"]]
assert len(second_page_keys) == 2
# No overlap between pages
assert set(first_page_keys).isdisjoint(set(second_page_keys))
def test_objects_api_prefix_filter(self, tmp_path):
"""Objects API should support prefix filtering."""
app = _make_app(tmp_path)
storage = app.extensions["object_storage"]
storage.create_bucket("test-bucket")
# Create objects with different prefixes
storage.put_object("test-bucket", "logs/access.log", BytesIO(b"log"))
storage.put_object("test-bucket", "logs/error.log", BytesIO(b"log"))
storage.put_object("test-bucket", "data/file.txt", BytesIO(b"data"))
with app.test_client() as client:
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
# Filter by prefix
resp = client.get("/ui/buckets/test-bucket/objects?prefix=logs/")
assert resp.status_code == 200
data = resp.get_json()
keys = [obj["key"] for obj in data["objects"]]
assert all(k.startswith("logs/") for k in keys)
assert len(keys) == 2
def test_objects_api_requires_authentication(self, tmp_path):
"""Objects API should require login."""
app = _make_app(tmp_path)
storage = app.extensions["object_storage"]
storage.create_bucket("test-bucket")
with app.test_client() as client:
# Don't login
resp = client.get("/ui/buckets/test-bucket/objects")
# Should redirect to login
assert resp.status_code == 302
assert "/ui/login" in resp.headers.get("Location", "")
def test_objects_api_returns_object_metadata(self, tmp_path):
"""Objects API should return complete object metadata."""
app = _make_app(tmp_path)
storage = app.extensions["object_storage"]
storage.create_bucket("test-bucket")
storage.put_object("test-bucket", "test.txt", BytesIO(b"test content"))
with app.test_client() as client:
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
resp = client.get("/ui/buckets/test-bucket/objects")
assert resp.status_code == 200
data = resp.get_json()
assert len(data["objects"]) == 1
obj = data["objects"][0]
# Check all expected fields
assert obj["key"] == "test.txt"
assert obj["size"] == 12 # len("test content")
assert "last_modified" in obj
assert "last_modified_display" in obj
assert "etag" in obj
assert "preview_url" in obj
assert "download_url" in obj
assert "delete_endpoint" in obj
def test_bucket_detail_page_loads_without_objects(self, tmp_path):
"""Bucket detail page should load even with many objects."""
app = _make_app(tmp_path)
storage = app.extensions["object_storage"]
storage.create_bucket("test-bucket")
# Create many objects
for i in range(100):
storage.put_object("test-bucket", f"file{i:03d}.txt", BytesIO(b"x"))
with app.test_client() as client:
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
# The page should load quickly (objects loaded via JS)
resp = client.get("/ui/buckets/test-bucket")
assert resp.status_code == 200
html = resp.data.decode("utf-8")
# Should have the JavaScript loading infrastructure
assert "loadObjects" in html or "objectsApiUrl" in html

View File

@@ -70,12 +70,8 @@ def test_ui_bucket_policy_enforcement_toggle(tmp_path: Path, enforce: bool):
assert b"Access denied by bucket policy" in response.data assert b"Access denied by bucket policy" in response.data
else: else:
assert response.status_code == 200 assert response.status_code == 200
assert b"vid.mp4" in response.data
assert b"Access denied by bucket policy" not in response.data assert b"Access denied by bucket policy" not in response.data
# Objects are now loaded via async API - check the objects endpoint
objects_response = client.get("/ui/buckets/testbucket/objects")
assert objects_response.status_code == 200
data = objects_response.get_json()
assert any(obj["key"] == "vid.mp4" for obj in data["objects"])
def test_ui_bucket_policy_disabled_by_default(tmp_path: Path): def test_ui_bucket_policy_disabled_by_default(tmp_path: Path):
@@ -113,9 +109,5 @@ def test_ui_bucket_policy_disabled_by_default(tmp_path: Path):
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True) client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
response = client.get("/ui/buckets/testbucket", follow_redirects=True) response = client.get("/ui/buckets/testbucket", follow_redirects=True)
assert response.status_code == 200 assert response.status_code == 200
assert b"vid.mp4" in response.data
assert b"Access denied by bucket policy" not in response.data assert b"Access denied by bucket policy" not in response.data
# Objects are now loaded via async API - check the objects endpoint
objects_response = client.get("/ui/buckets/testbucket/objects")
assert objects_response.status_code == 200
data = objects_response.get_json()
assert any(obj["key"] == "vid.mp4" for obj in data["objects"])