diff --git a/README.md b/README.md
index 93e077f..1f6519d 100644
--- a/README.md
+++ b/README.md
@@ -86,7 +86,7 @@ Presigned URLs follow the AWS CLI playbook:
| `AWS_REGION` | `us-east-1` | Region used in Signature V4 scope |
| `AWS_SERVICE` | `s3` | Service used in Signature V4 scope |
-> Buckets now live directly under `data/` while system metadata (versions, IAM, bucket policies, multipart uploads, etc.) lives in `data/.myfsio.sys`. Existing installs can keep their environment variables, but the defaults now match MinIO's `data/.system` pattern for easier bind-mounting.
+> Buckets now live directly under `data/` while system metadata (versions, IAM, bucket policies, multipart uploads, etc.) lives in `data/.myfsio.sys`.
## API Cheatsheet (IAM headers required)
diff --git a/app/__init__.py b/app/__init__.py
index 2a95281..946668d 100644
--- a/app/__init__.py
+++ b/app/__init__.py
@@ -18,8 +18,10 @@ from werkzeug.middleware.proxy_fix import ProxyFix
from .bucket_policies import BucketPolicyStore
from .config import AppConfig
from .connections import ConnectionStore
+from .encryption import EncryptionManager
from .extensions import limiter, csrf
from .iam import IamService
+from .kms import KMSManager
from .replication import ReplicationManager
from .secret_store import EphemeralSecretStore
from .storage import ObjectStorage
@@ -77,6 +79,26 @@ def create_app(
connections = ConnectionStore(connections_path)
replication = ReplicationManager(storage, connections, replication_rules_path)
+
+ # Initialize encryption and KMS
+ encryption_config = {
+ "encryption_enabled": app.config.get("ENCRYPTION_ENABLED", False),
+ "encryption_master_key_path": app.config.get("ENCRYPTION_MASTER_KEY_PATH"),
+ "default_encryption_algorithm": app.config.get("DEFAULT_ENCRYPTION_ALGORITHM", "AES256"),
+ }
+ encryption_manager = EncryptionManager(encryption_config)
+
+ kms_manager = None
+ if app.config.get("KMS_ENABLED", False):
+ kms_keys_path = Path(app.config.get("KMS_KEYS_PATH", ""))
+ kms_master_key_path = Path(app.config.get("ENCRYPTION_MASTER_KEY_PATH", ""))
+ kms_manager = KMSManager(kms_keys_path, kms_master_key_path)
+ encryption_manager.set_kms_provider(kms_manager)
+
+ # Wrap storage with encryption layer if encryption is enabled
+ if app.config.get("ENCRYPTION_ENABLED", False):
+ from .encrypted_storage import EncryptedObjectStorage
+ storage = EncryptedObjectStorage(storage, encryption_manager)
app.extensions["object_storage"] = storage
app.extensions["iam"] = iam
@@ -85,6 +107,8 @@ def create_app(
app.extensions["limiter"] = limiter
app.extensions["connections"] = connections
app.extensions["replication"] = replication
+ app.extensions["encryption"] = encryption_manager
+ app.extensions["kms"] = kms_manager
@app.errorhandler(500)
def internal_error(error):
@@ -119,9 +143,12 @@ def create_app(
if include_api:
from .s3_api import s3_api_bp
+ from .kms_api import kms_api_bp
app.register_blueprint(s3_api_bp)
+ app.register_blueprint(kms_api_bp)
csrf.exempt(s3_api_bp)
+ csrf.exempt(kms_api_bp)
if include_ui:
from .ui import ui_bp
diff --git a/app/bucket_policies.py b/app/bucket_policies.py
index ce2b894..73a28b7 100644
--- a/app/bucket_policies.py
+++ b/app/bucket_policies.py
@@ -11,17 +11,51 @@ from typing import Any, Dict, Iterable, List, Optional, Sequence
RESOURCE_PREFIX = "arn:aws:s3:::"
ACTION_ALIASES = {
- "s3:getobject": "read",
- "s3:getobjectversion": "read",
+ # List actions
"s3:listbucket": "list",
"s3:listallmybuckets": "list",
+ "s3:listbucketversions": "list",
+ "s3:listmultipartuploads": "list",
+ "s3:listparts": "list",
+ # Read actions
+ "s3:getobject": "read",
+ "s3:getobjectversion": "read",
+ "s3:getobjecttagging": "read",
+ "s3:getobjectversiontagging": "read",
+ "s3:getobjectacl": "read",
+ "s3:getbucketversioning": "read",
+ "s3:headobject": "read",
+ "s3:headbucket": "read",
+ # Write actions
"s3:putobject": "write",
"s3:createbucket": "write",
+ "s3:putobjecttagging": "write",
+ "s3:putbucketversioning": "write",
+ "s3:createmultipartupload": "write",
+ "s3:uploadpart": "write",
+ "s3:completemultipartupload": "write",
+ "s3:abortmultipartupload": "write",
+ "s3:copyobject": "write",
+ # Delete actions
"s3:deleteobject": "delete",
"s3:deleteobjectversion": "delete",
"s3:deletebucket": "delete",
+ "s3:deleteobjecttagging": "delete",
+ # Share actions (ACL)
"s3:putobjectacl": "share",
+ "s3:putbucketacl": "share",
+ "s3:getbucketacl": "share",
+ # Policy actions
"s3:putbucketpolicy": "policy",
+ "s3:getbucketpolicy": "policy",
+ "s3:deletebucketpolicy": "policy",
+ # Replication actions
+ "s3:getreplicationconfiguration": "replication",
+ "s3:putreplicationconfiguration": "replication",
+ "s3:deletereplicationconfiguration": "replication",
+ "s3:replicateobject": "replication",
+ "s3:replicatetags": "replication",
+ "s3:replicatedelete": "replication",
}
diff --git a/app/config.py b/app/config.py
index a5d3783..47afc96 100644
--- a/app/config.py
+++ b/app/config.py
@@ -50,6 +50,7 @@ class AppConfig:
aws_service: str
ui_enforce_bucket_policies: bool
log_level: str
+ log_to_file: bool
log_path: Path
log_max_bytes: int
log_backup_count: int
@@ -66,6 +67,11 @@ class AppConfig:
stream_chunk_size: int
multipart_min_part_size: int
bucket_stats_cache_ttl: int
+ encryption_enabled: bool
+ encryption_master_key_path: Path
+ kms_enabled: bool
+ kms_keys_path: Path
+ default_encryption_algorithm: str
@classmethod
def from_env(cls, overrides: Optional[Dict[str, Any]] = None) -> "AppConfig":
@@ -126,6 +132,7 @@ class AppConfig:
aws_service = str(_get("AWS_SERVICE", "s3"))
enforce_ui_policies = str(_get("UI_ENFORCE_BUCKET_POLICIES", "0")).lower() in {"1", "true", "yes", "on"}
log_level = str(_get("LOG_LEVEL", "INFO")).upper()
+ log_to_file = str(_get("LOG_TO_FILE", "1")).lower() in {"1", "true", "yes", "on"}
log_dir = Path(_get("LOG_DIR", PROJECT_ROOT / "logs")).resolve()
log_dir.mkdir(parents=True, exist_ok=True)
log_path = log_dir / str(_get("LOG_FILE", "app.log"))
@@ -155,6 +162,14 @@ class AppConfig:
])
session_lifetime_days = int(_get("SESSION_LIFETIME_DAYS", 30))
bucket_stats_cache_ttl = int(_get("BUCKET_STATS_CACHE_TTL", 60)) # Default 60 seconds
+
+ # Encryption settings
+ encryption_enabled = str(_get("ENCRYPTION_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
+ encryption_keys_dir = storage_root / ".myfsio.sys" / "keys"
+ encryption_master_key_path = Path(_get("ENCRYPTION_MASTER_KEY_PATH", encryption_keys_dir / "master.key")).resolve()
+ kms_enabled = str(_get("KMS_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
+ kms_keys_path = Path(_get("KMS_KEYS_PATH", encryption_keys_dir / "kms_keys.json")).resolve()
+ default_encryption_algorithm = str(_get("DEFAULT_ENCRYPTION_ALGORITHM", "AES256"))
return cls(storage_root=storage_root,
max_upload_size=max_upload_size,
@@ -167,6 +182,7 @@ class AppConfig:
aws_service=aws_service,
ui_enforce_bucket_policies=enforce_ui_policies,
log_level=log_level,
+ log_to_file=log_to_file,
log_path=log_path,
log_max_bytes=log_max_bytes,
log_backup_count=log_backup_count,
@@ -182,7 +198,12 @@ class AppConfig:
secret_ttl_seconds=secret_ttl_seconds,
stream_chunk_size=stream_chunk_size,
multipart_min_part_size=multipart_min_part_size,
- bucket_stats_cache_ttl=bucket_stats_cache_ttl)
+ bucket_stats_cache_ttl=bucket_stats_cache_ttl,
+ encryption_enabled=encryption_enabled,
+ encryption_master_key_path=encryption_master_key_path,
+ kms_enabled=kms_enabled,
+ kms_keys_path=kms_keys_path,
+ default_encryption_algorithm=default_encryption_algorithm)
def to_flask_config(self) -> Dict[str, Any]:
return {
@@ -204,6 +225,7 @@ class AppConfig:
"MULTIPART_MIN_PART_SIZE": self.multipart_min_part_size,
"BUCKET_STATS_CACHE_TTL": self.bucket_stats_cache_ttl,
"LOG_LEVEL": self.log_level,
+ "LOG_TO_FILE": self.log_to_file,
"LOG_FILE": str(self.log_path),
"LOG_MAX_BYTES": self.log_max_bytes,
"LOG_BACKUP_COUNT": self.log_backup_count,
@@ -213,4 +235,9 @@ class AppConfig:
"CORS_METHODS": self.cors_methods,
"CORS_ALLOW_HEADERS": self.cors_allow_headers,
"SESSION_LIFETIME_DAYS": self.session_lifetime_days,
+ "ENCRYPTION_ENABLED": self.encryption_enabled,
+ "ENCRYPTION_MASTER_KEY_PATH": str(self.encryption_master_key_path),
+ "KMS_ENABLED": self.kms_enabled,
+ "KMS_KEYS_PATH": str(self.kms_keys_path),
+ "DEFAULT_ENCRYPTION_ALGORITHM": self.default_encryption_algorithm,
}
diff --git a/app/encrypted_storage.py b/app/encrypted_storage.py
new file mode 100644
index 0000000..2acc679
--- /dev/null
+++ b/app/encrypted_storage.py
@@ -0,0 +1,276 @@
+"""Encrypted storage layer that wraps ObjectStorage with encryption support."""
+from __future__ import annotations
+
+import io
+from pathlib import Path
+from typing import Any, BinaryIO, Dict, Optional
+
+from .encryption import EncryptionManager, EncryptionMetadata, EncryptionError
+from .storage import ObjectStorage, ObjectMeta, StorageError
+
+
+class EncryptedObjectStorage:
+ """Object storage with transparent server-side encryption.
+
+ This class wraps ObjectStorage and provides transparent encryption/decryption
+ of objects based on bucket encryption configuration.
+
+ Encryption is applied when:
+ 1. Bucket has default encryption configured (SSE-S3 or SSE-KMS)
+ 2. Client explicitly requests encryption via headers
+
+ The encryption metadata is stored alongside object metadata.
+ """
+
+ STREAMING_THRESHOLD = 64 * 1024
+
+ def __init__(self, storage: ObjectStorage, encryption_manager: EncryptionManager):
+ self.storage = storage
+ self.encryption = encryption_manager
+
+ @property
+ def root(self) -> Path:
+ return self.storage.root
+
+ def _should_encrypt(self, bucket_name: str,
+ server_side_encryption: str | None = None) -> tuple[bool, str, str | None]:
+ """Determine if object should be encrypted.
+
+ Returns:
+ Tuple of (should_encrypt, algorithm, kms_key_id)
+ """
+ if not self.encryption.enabled:
+ return False, "", None
+
+ if server_side_encryption:
+ if server_side_encryption == "AES256":
+ return True, "AES256", None
+ elif server_side_encryption.startswith("aws:kms"):
+ parts = server_side_encryption.split(":")
+ kms_key_id = parts[2] if len(parts) > 2 else None
+ return True, "aws:kms", kms_key_id
+
+ try:
+ encryption_config = self.storage.get_bucket_encryption(bucket_name)
+ if encryption_config and encryption_config.get("Rules"):
+ rule = encryption_config["Rules"][0]
+ # AWS format: Rules[].ApplyServerSideEncryptionByDefault.SSEAlgorithm
+ sse_default = rule.get("ApplyServerSideEncryptionByDefault", {})
+ algorithm = sse_default.get("SSEAlgorithm", "AES256")
+ kms_key_id = sse_default.get("KMSMasterKeyID")
+ return True, algorithm, kms_key_id
+ except StorageError:
+ pass
+
+ return False, "", None
+
+ def _is_encrypted(self, metadata: Dict[str, str]) -> bool:
+ """Check if object is encrypted based on its metadata."""
+ return "x-amz-server-side-encryption" in metadata
+
+ def put_object(
+ self,
+ bucket_name: str,
+ object_key: str,
+ stream: BinaryIO,
+ *,
+ metadata: Optional[Dict[str, str]] = None,
+ server_side_encryption: Optional[str] = None,
+ kms_key_id: Optional[str] = None,
+ ) -> ObjectMeta:
+ """Store an object, optionally with encryption.
+
+ Args:
+ bucket_name: Name of the bucket
+ object_key: Key for the object
+ stream: Binary stream of object data
+ metadata: Optional user metadata
+ server_side_encryption: Encryption algorithm ("AES256" or "aws:kms")
+ kms_key_id: KMS key ID (for aws:kms encryption)
+
+ Returns:
+ ObjectMeta with object information
+ """
+ should_encrypt, algorithm, detected_kms_key = self._should_encrypt(
+ bucket_name, server_side_encryption
+ )
+
+ if kms_key_id is None:
+ kms_key_id = detected_kms_key
+
+ if should_encrypt:
+ data = stream.read()
+
+ try:
+ ciphertext, enc_metadata = self.encryption.encrypt_object(
+ data,
+ algorithm=algorithm,
+ kms_key_id=kms_key_id,
+ context={"bucket": bucket_name, "key": object_key},
+ )
+
+ combined_metadata = metadata.copy() if metadata else {}
+ combined_metadata.update(enc_metadata.to_dict())
+
+ encrypted_stream = io.BytesIO(ciphertext)
+ result = self.storage.put_object(
+ bucket_name,
+ object_key,
+ encrypted_stream,
+ metadata=combined_metadata,
+ )
+
+ result.metadata = combined_metadata
+ return result
+
+ except EncryptionError as exc:
+ raise StorageError(f"Encryption failed: {exc}") from exc
+ else:
+ return self.storage.put_object(
+ bucket_name,
+ object_key,
+ stream,
+ metadata=metadata,
+ )
+
+ def get_object_data(self, bucket_name: str, object_key: str) -> tuple[bytes, Dict[str, str]]:
+ """Get object data, decrypting if necessary.
+
+ Returns:
+ Tuple of (data, metadata)
+ """
+ path = self.storage.get_object_path(bucket_name, object_key)
+ metadata = self.storage.get_object_metadata(bucket_name, object_key)
+
+ with path.open("rb") as f:
+ data = f.read()
+
+ enc_metadata = EncryptionMetadata.from_dict(metadata)
+ if enc_metadata:
+ try:
+ data = self.encryption.decrypt_object(
+ data,
+ enc_metadata,
+ context={"bucket": bucket_name, "key": object_key},
+ )
+ except EncryptionError as exc:
+ raise StorageError(f"Decryption failed: {exc}") from exc
+
+ clean_metadata = {
+ k: v for k, v in metadata.items()
+ if not k.startswith("x-amz-encryption")
+ and k != "x-amz-encrypted-data-key"
+ }
+
+ return data, clean_metadata
+
+ def get_object_stream(self, bucket_name: str, object_key: str) -> tuple[BinaryIO, Dict[str, str], int]:
+ """Get object as a stream, decrypting if necessary.
+
+ Returns:
+ Tuple of (stream, metadata, original_size)
+ """
+ data, metadata = self.get_object_data(bucket_name, object_key)
+ return io.BytesIO(data), metadata, len(data)
+
+ def list_buckets(self):
+ return self.storage.list_buckets()
+
+ def bucket_exists(self, bucket_name: str) -> bool:
+ return self.storage.bucket_exists(bucket_name)
+
+ def create_bucket(self, bucket_name: str) -> None:
+ return self.storage.create_bucket(bucket_name)
+
+ def delete_bucket(self, bucket_name: str) -> None:
+ return self.storage.delete_bucket(bucket_name)
+
+ def bucket_stats(self, bucket_name: str, cache_ttl: int = 60):
+ return self.storage.bucket_stats(bucket_name, cache_ttl)
+
+ def list_objects(self, bucket_name: str):
+ return self.storage.list_objects(bucket_name)
+
+ def get_object_path(self, bucket_name: str, object_key: str):
+ return self.storage.get_object_path(bucket_name, object_key)
+
+ def get_object_metadata(self, bucket_name: str, object_key: str):
+ return self.storage.get_object_metadata(bucket_name, object_key)
+
+ def delete_object(self, bucket_name: str, object_key: str) -> None:
+ return self.storage.delete_object(bucket_name, object_key)
+
+ def purge_object(self, bucket_name: str, object_key: str) -> None:
+ return self.storage.purge_object(bucket_name, object_key)
+
+ def is_versioning_enabled(self, bucket_name: str) -> bool:
+ return self.storage.is_versioning_enabled(bucket_name)
+
+ def set_bucket_versioning(self, bucket_name: str, enabled: bool) -> None:
+ return self.storage.set_bucket_versioning(bucket_name, enabled)
+
+ def get_bucket_tags(self, bucket_name: str):
+ return self.storage.get_bucket_tags(bucket_name)
+
+ def set_bucket_tags(self, bucket_name: str, tags):
+ return self.storage.set_bucket_tags(bucket_name, tags)
+
+ def get_bucket_cors(self, bucket_name: str):
+ return self.storage.get_bucket_cors(bucket_name)
+
+ def set_bucket_cors(self, bucket_name: str, rules):
+ return self.storage.set_bucket_cors(bucket_name, rules)
+
+ def get_bucket_encryption(self, bucket_name: str):
+ return self.storage.get_bucket_encryption(bucket_name)
+
+ def set_bucket_encryption(self, bucket_name: str, config_payload):
+ return self.storage.set_bucket_encryption(bucket_name, config_payload)
+
+ def get_bucket_lifecycle(self, bucket_name: str):
+ return self.storage.get_bucket_lifecycle(bucket_name)
+
+ def set_bucket_lifecycle(self, bucket_name: str, rules):
+ return self.storage.set_bucket_lifecycle(bucket_name, rules)
+
+ def get_object_tags(self, bucket_name: str, object_key: str):
+ return self.storage.get_object_tags(bucket_name, object_key)
+
+ def set_object_tags(self, bucket_name: str, object_key: str, tags):
+ return self.storage.set_object_tags(bucket_name, object_key, tags)
+
+ def delete_object_tags(self, bucket_name: str, object_key: str):
+ return self.storage.delete_object_tags(bucket_name, object_key)
+
+ def list_object_versions(self, bucket_name: str, object_key: str):
+ return self.storage.list_object_versions(bucket_name, object_key)
+
+ def restore_object_version(self, bucket_name: str, object_key: str, version_id: str):
+ return self.storage.restore_object_version(bucket_name, object_key, version_id)
+
+ def list_orphaned_objects(self, bucket_name: str):
+ return self.storage.list_orphaned_objects(bucket_name)
+
+ def initiate_multipart_upload(self, bucket_name: str, object_key: str, *, metadata=None) -> str:
+ return self.storage.initiate_multipart_upload(bucket_name, object_key, metadata=metadata)
+
+ def upload_multipart_part(self, bucket_name: str, upload_id: str, part_number: int, stream: BinaryIO) -> str:
+ return self.storage.upload_multipart_part(bucket_name, upload_id, part_number, stream)
+
+ def complete_multipart_upload(self, bucket_name: str, upload_id: str, ordered_parts):
+ return self.storage.complete_multipart_upload(bucket_name, upload_id, ordered_parts)
+
+ def abort_multipart_upload(self, bucket_name: str, upload_id: str) -> None:
+ return self.storage.abort_multipart_upload(bucket_name, upload_id)
+
+ def list_multipart_parts(self, bucket_name: str, upload_id: str):
+ return self.storage.list_multipart_parts(bucket_name, upload_id)
+
+ def get_bucket_quota(self, bucket_name: str):
+ return self.storage.get_bucket_quota(bucket_name)
+
+ def set_bucket_quota(self, bucket_name: str, *, max_bytes=None, max_objects=None):
+ return self.storage.set_bucket_quota(bucket_name, max_bytes=max_bytes, max_objects=max_objects)
+
+ def _compute_etag(self, path: Path) -> str:
+ return self.storage._compute_etag(path)
diff --git a/app/encryption.py b/app/encryption.py
new file mode 100644
index 0000000..aa98cb4
--- /dev/null
+++ b/app/encryption.py
@@ -0,0 +1,395 @@
+"""Encryption providers for server-side and client-side encryption."""
+from __future__ import annotations
+
+import base64
+import io
+import json
+import secrets
+from dataclasses import dataclass
+from pathlib import Path
+from typing import Any, BinaryIO, Dict, Generator, Optional
+
+from cryptography.hazmat.primitives.ciphers.aead import AESGCM
+
+
+class EncryptionError(Exception):
+ """Raised when encryption/decryption fails."""
+
+
+@dataclass
+class EncryptionResult:
+ """Result of encrypting data."""
+ ciphertext: bytes
+ nonce: bytes
+ key_id: str
+ encrypted_data_key: bytes
+
+
+@dataclass
+class EncryptionMetadata:
+ """Metadata stored with encrypted objects."""
+ algorithm: str
+ key_id: str
+ nonce: bytes
+ encrypted_data_key: bytes
+
+ def to_dict(self) -> Dict[str, str]:
+ return {
+ "x-amz-server-side-encryption": self.algorithm,
+ "x-amz-encryption-key-id": self.key_id,
+ "x-amz-encryption-nonce": base64.b64encode(self.nonce).decode(),
+ "x-amz-encrypted-data-key": base64.b64encode(self.encrypted_data_key).decode(),
+ }
+
+ @classmethod
+ def from_dict(cls, data: Dict[str, str]) -> Optional["EncryptionMetadata"]:
+ algorithm = data.get("x-amz-server-side-encryption")
+ if not algorithm:
+ return None
+ try:
+ return cls(
+ algorithm=algorithm,
+ key_id=data.get("x-amz-encryption-key-id", "local"),
+ nonce=base64.b64decode(data.get("x-amz-encryption-nonce", "")),
+ encrypted_data_key=base64.b64decode(data.get("x-amz-encrypted-data-key", "")),
+ )
+ except Exception:
+ return None
+
+
+class EncryptionProvider:
+ """Base class for encryption providers."""
+
+ def encrypt(self, plaintext: bytes, context: Dict[str, str] | None = None) -> EncryptionResult:
+ raise NotImplementedError
+
+ def decrypt(self, ciphertext: bytes, nonce: bytes, encrypted_data_key: bytes,
+ key_id: str, context: Dict[str, str] | None = None) -> bytes:
+ raise NotImplementedError
+
+ def generate_data_key(self) -> tuple[bytes, bytes]:
+ """Generate a data key and its encrypted form.
+
+ Returns:
+ Tuple of (plaintext_key, encrypted_key)
+ """
+ raise NotImplementedError
+
+
+class LocalKeyEncryption(EncryptionProvider):
+ """SSE-S3 style encryption using a local master key.
+
+ Uses envelope encryption:
+ 1. Generate a unique data key for each object
+ 2. Encrypt the data with the data key (AES-256-GCM)
+ 3. Encrypt the data key with the master key
+ 4. Store the encrypted data key alongside the ciphertext
+ """
+
+ KEY_ID = "local"
+
+ def __init__(self, master_key_path: Path):
+ self.master_key_path = master_key_path
+ self._master_key: bytes | None = None
+
+ @property
+ def master_key(self) -> bytes:
+ if self._master_key is None:
+ self._master_key = self._load_or_create_master_key()
+ return self._master_key
+
+ def _load_or_create_master_key(self) -> bytes:
+ """Load master key from file or generate a new one."""
+ if self.master_key_path.exists():
+ try:
+ return base64.b64decode(self.master_key_path.read_text().strip())
+ except Exception as exc:
+ raise EncryptionError(f"Failed to load master key: {exc}") from exc
+
+ key = secrets.token_bytes(32)
+ try:
+ self.master_key_path.parent.mkdir(parents=True, exist_ok=True)
+ self.master_key_path.write_text(base64.b64encode(key).decode())
+ except OSError as exc:
+ raise EncryptionError(f"Failed to save master key: {exc}") from exc
+ return key
+
+ def _encrypt_data_key(self, data_key: bytes) -> bytes:
+ """Encrypt the data key with the master key."""
+ aesgcm = AESGCM(self.master_key)
+ nonce = secrets.token_bytes(12)
+ encrypted = aesgcm.encrypt(nonce, data_key, None)
+ return nonce + encrypted
+
+ def _decrypt_data_key(self, encrypted_data_key: bytes) -> bytes:
+ """Decrypt the data key using the master key."""
+ if len(encrypted_data_key) < 12 + 32 + 16: # nonce + key + tag
+ raise EncryptionError("Invalid encrypted data key")
+ aesgcm = AESGCM(self.master_key)
+ nonce = encrypted_data_key[:12]
+ ciphertext = encrypted_data_key[12:]
+ try:
+ return aesgcm.decrypt(nonce, ciphertext, None)
+ except Exception as exc:
+ raise EncryptionError(f"Failed to decrypt data key: {exc}") from exc
+
+ def generate_data_key(self) -> tuple[bytes, bytes]:
+ """Generate a data key and its encrypted form."""
+ plaintext_key = secrets.token_bytes(32)
+ encrypted_key = self._encrypt_data_key(plaintext_key)
+ return plaintext_key, encrypted_key
+
+ def encrypt(self, plaintext: bytes, context: Dict[str, str] | None = None) -> EncryptionResult:
+ """Encrypt data using envelope encryption."""
+ data_key, encrypted_data_key = self.generate_data_key()
+
+ aesgcm = AESGCM(data_key)
+ nonce = secrets.token_bytes(12)
+ ciphertext = aesgcm.encrypt(nonce, plaintext, None)
+
+ return EncryptionResult(
+ ciphertext=ciphertext,
+ nonce=nonce,
+ key_id=self.KEY_ID,
+ encrypted_data_key=encrypted_data_key,
+ )
+
+ def decrypt(self, ciphertext: bytes, nonce: bytes, encrypted_data_key: bytes,
+ key_id: str, context: Dict[str, str] | None = None) -> bytes:
+ """Decrypt data using envelope encryption."""
+ # Decrypt the data key
+ data_key = self._decrypt_data_key(encrypted_data_key)
+
+ # Decrypt the data
+ aesgcm = AESGCM(data_key)
+ try:
+ return aesgcm.decrypt(nonce, ciphertext, None)
+ except Exception as exc:
+ raise EncryptionError(f"Failed to decrypt data: {exc}") from exc
+
+
+class StreamingEncryptor:
+ """Encrypts/decrypts data in streaming fashion for large files.
+
+ For large files, we encrypt in chunks. Each chunk is encrypted with the
+ same data key but a unique nonce derived from the base nonce + chunk index.
+ """
+
+ CHUNK_SIZE = 64 * 1024
+ HEADER_SIZE = 4
+
+ def __init__(self, provider: EncryptionProvider, chunk_size: int = CHUNK_SIZE):
+ self.provider = provider
+ self.chunk_size = chunk_size
+
+ def _derive_chunk_nonce(self, base_nonce: bytes, chunk_index: int) -> bytes:
+ """Derive a unique nonce for each chunk."""
+ # XOR the base nonce with the chunk index
+ nonce_int = int.from_bytes(base_nonce, "big")
+ derived = nonce_int ^ chunk_index
+ return derived.to_bytes(12, "big")
+
+ def encrypt_stream(self, stream: BinaryIO,
+ context: Dict[str, str] | None = None) -> tuple[BinaryIO, EncryptionMetadata]:
+ """Encrypt a stream and return encrypted stream + metadata."""
+
+ data_key, encrypted_data_key = self.provider.generate_data_key()
+ base_nonce = secrets.token_bytes(12)
+
+ aesgcm = AESGCM(data_key)
+ encrypted_chunks = []
+ chunk_index = 0
+
+ while True:
+ chunk = stream.read(self.chunk_size)
+ if not chunk:
+ break
+
+ chunk_nonce = self._derive_chunk_nonce(base_nonce, chunk_index)
+ encrypted_chunk = aesgcm.encrypt(chunk_nonce, chunk, None)
+
+ size_prefix = len(encrypted_chunk).to_bytes(self.HEADER_SIZE, "big")
+ encrypted_chunks.append(size_prefix + encrypted_chunk)
+ chunk_index += 1
+
+ header = chunk_index.to_bytes(4, "big")
+ encrypted_data = header + b"".join(encrypted_chunks)
+
+ metadata = EncryptionMetadata(
+ algorithm="AES256",
+ key_id=self.provider.KEY_ID if hasattr(self.provider, "KEY_ID") else "local",
+ nonce=base_nonce,
+ encrypted_data_key=encrypted_data_key,
+ )
+
+ return io.BytesIO(encrypted_data), metadata
+
+ def decrypt_stream(self, stream: BinaryIO, metadata: EncryptionMetadata) -> BinaryIO:
+ """Decrypt a stream using the provided metadata."""
+ if isinstance(self.provider, LocalKeyEncryption):
+ data_key = self.provider._decrypt_data_key(metadata.encrypted_data_key)
+ else:
+ raise EncryptionError("Unsupported provider for streaming decryption")
+
+ aesgcm = AESGCM(data_key)
+ base_nonce = metadata.nonce
+
+ chunk_count_bytes = stream.read(4)
+ if len(chunk_count_bytes) < 4:
+ raise EncryptionError("Invalid encrypted stream: missing header")
+ chunk_count = int.from_bytes(chunk_count_bytes, "big")
+
+ decrypted_chunks = []
+ for chunk_index in range(chunk_count):
+ size_bytes = stream.read(self.HEADER_SIZE)
+ if len(size_bytes) < self.HEADER_SIZE:
+ raise EncryptionError(f"Invalid encrypted stream: truncated at chunk {chunk_index}")
+ chunk_size = int.from_bytes(size_bytes, "big")
+
+ encrypted_chunk = stream.read(chunk_size)
+ if len(encrypted_chunk) < chunk_size:
+ raise EncryptionError(f"Invalid encrypted stream: incomplete chunk {chunk_index}")
+
+ chunk_nonce = self._derive_chunk_nonce(base_nonce, chunk_index)
+ try:
+ decrypted_chunk = aesgcm.decrypt(chunk_nonce, encrypted_chunk, None)
+ decrypted_chunks.append(decrypted_chunk)
+ except Exception as exc:
+ raise EncryptionError(f"Failed to decrypt chunk {chunk_index}: {exc}") from exc
+
+ return io.BytesIO(b"".join(decrypted_chunks))
+
+
+class EncryptionManager:
+ """Manages encryption providers and operations."""
+
+ def __init__(self, config: Dict[str, Any]):
+ self.config = config
+ self._local_provider: LocalKeyEncryption | None = None
+ self._kms_provider: Any = None # Set by KMS module
+ self._streaming_encryptor: StreamingEncryptor | None = None
+
+ @property
+ def enabled(self) -> bool:
+ return self.config.get("encryption_enabled", False)
+
+ @property
+ def default_algorithm(self) -> str:
+ return self.config.get("default_encryption_algorithm", "AES256")
+
+ def get_local_provider(self) -> LocalKeyEncryption:
+ if self._local_provider is None:
+ key_path = Path(self.config.get("encryption_master_key_path", "data/.myfsio.sys/keys/master.key"))
+ self._local_provider = LocalKeyEncryption(key_path)
+ return self._local_provider
+
+ def set_kms_provider(self, kms_provider: Any) -> None:
+ """Set the KMS provider (injected from kms module)."""
+ self._kms_provider = kms_provider
+
+ def get_provider(self, algorithm: str, kms_key_id: str | None = None) -> EncryptionProvider:
+ """Get the appropriate encryption provider for the algorithm."""
+ if algorithm == "AES256":
+ return self.get_local_provider()
+ elif algorithm == "aws:kms":
+ if self._kms_provider is None:
+ raise EncryptionError("KMS is not configured")
+ return self._kms_provider.get_provider(kms_key_id)
+ else:
+ raise EncryptionError(f"Unsupported encryption algorithm: {algorithm}")
+
+ def get_streaming_encryptor(self) -> StreamingEncryptor:
+ if self._streaming_encryptor is None:
+ self._streaming_encryptor = StreamingEncryptor(self.get_local_provider())
+ return self._streaming_encryptor
+
+ def encrypt_object(self, data: bytes, algorithm: str = "AES256",
+ kms_key_id: str | None = None,
+ context: Dict[str, str] | None = None) -> tuple[bytes, EncryptionMetadata]:
+ """Encrypt object data."""
+ provider = self.get_provider(algorithm, kms_key_id)
+ result = provider.encrypt(data, context)
+
+ metadata = EncryptionMetadata(
+ algorithm=algorithm,
+ key_id=result.key_id,
+ nonce=result.nonce,
+ encrypted_data_key=result.encrypted_data_key,
+ )
+
+ return result.ciphertext, metadata
+
+ def decrypt_object(self, ciphertext: bytes, metadata: EncryptionMetadata,
+ context: Dict[str, str] | None = None) -> bytes:
+ """Decrypt object data."""
+ provider = self.get_provider(metadata.algorithm, metadata.key_id)
+ return provider.decrypt(
+ ciphertext,
+ metadata.nonce,
+ metadata.encrypted_data_key,
+ metadata.key_id,
+ context,
+ )
+
+ def encrypt_stream(self, stream: BinaryIO, algorithm: str = "AES256",
+ context: Dict[str, str] | None = None) -> tuple[BinaryIO, EncryptionMetadata]:
+ """Encrypt a stream for large files."""
+ encryptor = self.get_streaming_encryptor()
+ return encryptor.encrypt_stream(stream, context)
+
+ def decrypt_stream(self, stream: BinaryIO, metadata: EncryptionMetadata) -> BinaryIO:
+ """Decrypt a stream."""
+ encryptor = self.get_streaming_encryptor()
+ return encryptor.decrypt_stream(stream, metadata)
+
+
+class ClientEncryptionHelper:
+ """Helpers for client-side encryption.
+
+ Client-side encryption is performed by the client, but this helper
+ provides key generation and materials for clients that need them.
+ """
+
+ @staticmethod
+ def generate_client_key() -> Dict[str, str]:
+ """Generate a new client encryption key."""
+ from datetime import datetime, timezone
+ key = secrets.token_bytes(32)
+ return {
+ "key": base64.b64encode(key).decode(),
+ "algorithm": "AES-256-GCM",
+ "created_at": datetime.now(timezone.utc).isoformat(),
+ }
+
+ @staticmethod
+ def encrypt_with_key(plaintext: bytes, key_b64: str) -> Dict[str, str]:
+ """Encrypt data with a client-provided key."""
+ key = base64.b64decode(key_b64)
+ if len(key) != 32:
+ raise EncryptionError("Key must be 256 bits (32 bytes)")
+
+ aesgcm = AESGCM(key)
+ nonce = secrets.token_bytes(12)
+ ciphertext = aesgcm.encrypt(nonce, plaintext, None)
+
+ return {
+ "ciphertext": base64.b64encode(ciphertext).decode(),
+ "nonce": base64.b64encode(nonce).decode(),
+ "algorithm": "AES-256-GCM",
+ }
+
+ @staticmethod
+ def decrypt_with_key(ciphertext_b64: str, nonce_b64: str, key_b64: str) -> bytes:
+ """Decrypt data with a client-provided key."""
+ key = base64.b64decode(key_b64)
+ nonce = base64.b64decode(nonce_b64)
+ ciphertext = base64.b64decode(ciphertext_b64)
+
+ if len(key) != 32:
+ raise EncryptionError("Key must be 256 bits (32 bytes)")
+
+ aesgcm = AESGCM(key)
+ try:
+ return aesgcm.decrypt(nonce, ciphertext, None)
+ except Exception as exc:
+ raise EncryptionError(f"Decryption failed: {exc}") from exc
diff --git a/app/errors.py b/app/errors.py
index 23056f1..bdf2004 100644
--- a/app/errors.py
+++ b/app/errors.py
@@ -129,6 +129,25 @@ class EntityTooLargeError(AppError):
status_code: int = 413
+@dataclass
+class QuotaExceededAppError(AppError):
+ """Bucket quota exceeded."""
+ code: str = "QuotaExceeded"
+ message: str = "The bucket quota has been exceeded"
+ status_code: int = 403
+ quota: Optional[Dict[str, Any]] = None
+ usage: Optional[Dict[str, int]] = None
+
+ def __post_init__(self):
+ if self.quota or self.usage:
+ self.details = {}
+ if self.quota:
+ self.details["quota"] = self.quota
+ if self.usage:
+ self.details["usage"] = self.usage
+ super().__post_init__()
+
+
def handle_app_error(error: AppError) -> Response:
"""Handle application errors with appropriate response format."""
log_extra = {"error_code": error.code}
@@ -163,5 +182,6 @@ def register_error_handlers(app):
ObjectNotFoundError, InvalidObjectKeyError,
AccessDeniedError, InvalidCredentialsError,
MalformedRequestError, InvalidArgumentError, EntityTooLargeError,
+ QuotaExceededAppError,
]:
app.register_error_handler(error_class, handle_app_error)
diff --git a/app/iam.py b/app/iam.py
index 1fe8d28..d8c3c8d 100644
--- a/app/iam.py
+++ b/app/iam.py
@@ -15,7 +15,7 @@ class IamError(RuntimeError):
"""Raised when authentication or authorization fails."""
-S3_ACTIONS = {"list", "read", "write", "delete", "share", "policy"}
+S3_ACTIONS = {"list", "read", "write", "delete", "share", "policy", "replication"}
IAM_ACTIONS = {
"iam:list_users",
"iam:create_user",
@@ -26,22 +26,59 @@ IAM_ACTIONS = {
ALLOWED_ACTIONS = (S3_ACTIONS | IAM_ACTIONS) | {"iam:*"}
ACTION_ALIASES = {
+ # List actions
"list": "list",
"s3:listbucket": "list",
"s3:listallmybuckets": "list",
+ "s3:listbucketversions": "list",
+ "s3:listmultipartuploads": "list",
+ "s3:listparts": "list",
+ # Read actions
"read": "read",
"s3:getobject": "read",
"s3:getobjectversion": "read",
+ "s3:getobjecttagging": "read",
+ "s3:getobjectversiontagging": "read",
+ "s3:getobjectacl": "read",
+ "s3:getbucketversioning": "read",
+ "s3:headobject": "read",
+ "s3:headbucket": "read",
+ # Write actions
"write": "write",
"s3:putobject": "write",
"s3:createbucket": "write",
+ "s3:putobjecttagging": "write",
+ "s3:putbucketversioning": "write",
+ "s3:createmultipartupload": "write",
+ "s3:uploadpart": "write",
+ "s3:completemultipartupload": "write",
+ "s3:abortmultipartupload": "write",
+ "s3:copyobject": "write",
+ # Delete actions
"delete": "delete",
"s3:deleteobject": "delete",
+ "s3:deleteobjectversion": "delete",
"s3:deletebucket": "delete",
+ "s3:deleteobjecttagging": "delete",
+ # Share actions (ACL)
"share": "share",
"s3:putobjectacl": "share",
+ "s3:putbucketacl": "share",
+ "s3:getbucketacl": "share",
+ # Policy actions
"policy": "policy",
"s3:putbucketpolicy": "policy",
+ "s3:getbucketpolicy": "policy",
+ "s3:deletebucketpolicy": "policy",
+ # Replication actions
+ "replication": "replication",
+ "s3:getreplicationconfiguration": "replication",
+ "s3:putreplicationconfiguration": "replication",
+ "s3:deletereplicationconfiguration": "replication",
+ "s3:replicateobject": "replication",
+ "s3:replicatetags": "replication",
+ "s3:replicatedelete": "replication",
+ # IAM actions
"iam:listusers": "iam:list_users",
"iam:createuser": "iam:create_user",
"iam:deleteuser": "iam:delete_user",
diff --git a/app/kms.py b/app/kms.py
new file mode 100644
index 0000000..4ed72da
--- /dev/null
+++ b/app/kms.py
@@ -0,0 +1,344 @@
+"""Key Management Service (KMS) for encryption key management."""
+from __future__ import annotations
+
+import base64
+import json
+import secrets
+import uuid
+from dataclasses import dataclass, field
+from datetime import datetime, timezone
+from pathlib import Path
+from typing import Any, Dict, List, Optional
+
+from cryptography.hazmat.primitives.ciphers.aead import AESGCM
+
+from .encryption import EncryptionError, EncryptionProvider, EncryptionResult
+
+
+@dataclass
+class KMSKey:
+ """Represents a KMS encryption key."""
+ key_id: str
+ description: str
+ created_at: str
+ enabled: bool = True
+ key_material: bytes = field(default_factory=lambda: b"", repr=False)
+
+ @property
+ def arn(self) -> str:
+ return f"arn:aws:kms:local:000000000000:key/{self.key_id}"
+
+ def to_dict(self, include_key: bool = False) -> Dict[str, Any]:
+ data = {
+ "KeyId": self.key_id,
+ "Arn": self.arn,
+ "Description": self.description,
+ "CreationDate": self.created_at,
+ "Enabled": self.enabled,
+ "KeyState": "Enabled" if self.enabled else "Disabled",
+ "KeyUsage": "ENCRYPT_DECRYPT",
+ "KeySpec": "SYMMETRIC_DEFAULT",
+ }
+ if include_key:
+ data["KeyMaterial"] = base64.b64encode(self.key_material).decode()
+ return data
+
+ @classmethod
+ def from_dict(cls, data: Dict[str, Any]) -> "KMSKey":
+ key_material = b""
+ if "KeyMaterial" in data:
+ key_material = base64.b64decode(data["KeyMaterial"])
+ return cls(
+ key_id=data["KeyId"],
+ description=data.get("Description", ""),
+ created_at=data.get("CreationDate", datetime.now(timezone.utc).isoformat()),
+ enabled=data.get("Enabled", True),
+ key_material=key_material,
+ )
+
+
+class KMSEncryptionProvider(EncryptionProvider):
+ """Encryption provider using a specific KMS key."""
+
+ def __init__(self, kms: "KMSManager", key_id: str):
+ self.kms = kms
+ self.key_id = key_id
+
+ @property
+ def KEY_ID(self) -> str:
+ return self.key_id
+
+ def generate_data_key(self) -> tuple[bytes, bytes]:
+ """Generate a data key encrypted with the KMS key."""
+ return self.kms.generate_data_key(self.key_id)
+
+ def encrypt(self, plaintext: bytes, context: Dict[str, str] | None = None) -> EncryptionResult:
+ """Encrypt data using envelope encryption with KMS."""
+ data_key, encrypted_data_key = self.generate_data_key()
+
+ aesgcm = AESGCM(data_key)
+ nonce = secrets.token_bytes(12)
+ ciphertext = aesgcm.encrypt(nonce, plaintext,
+ json.dumps(context).encode() if context else None)
+
+ return EncryptionResult(
+ ciphertext=ciphertext,
+ nonce=nonce,
+ key_id=self.key_id,
+ encrypted_data_key=encrypted_data_key,
+ )
+
+ def decrypt(self, ciphertext: bytes, nonce: bytes, encrypted_data_key: bytes,
+ key_id: str, context: Dict[str, str] | None = None) -> bytes:
+ """Decrypt data using envelope encryption with KMS."""
+ # Note: Data key is encrypted without context (AAD), so we decrypt without context
+ data_key = self.kms.decrypt_data_key(key_id, encrypted_data_key, context=None)
+
+ aesgcm = AESGCM(data_key)
+ try:
+ return aesgcm.decrypt(nonce, ciphertext,
+ json.dumps(context).encode() if context else None)
+ except Exception as exc:
+ raise EncryptionError(f"Failed to decrypt data: {exc}") from exc
+
+
+class KMSManager:
+ """Manages KMS keys and operations.
+
+ This is a local implementation that mimics AWS KMS functionality.
+ Keys are stored encrypted on disk.
+ """
+
+ def __init__(self, keys_path: Path, master_key_path: Path):
+ self.keys_path = keys_path
+ self.master_key_path = master_key_path
+ self._keys: Dict[str, KMSKey] = {}
+ self._master_key: bytes | None = None
+ self._loaded = False
+
+ @property
+ def master_key(self) -> bytes:
+ """Load or create the master key for encrypting KMS keys."""
+ if self._master_key is None:
+ if self.master_key_path.exists():
+ self._master_key = base64.b64decode(
+ self.master_key_path.read_text().strip()
+ )
+ else:
+ self._master_key = secrets.token_bytes(32)
+ self.master_key_path.parent.mkdir(parents=True, exist_ok=True)
+ self.master_key_path.write_text(
+ base64.b64encode(self._master_key).decode()
+ )
+ return self._master_key
+
+ def _load_keys(self) -> None:
+ """Load keys from disk."""
+ if self._loaded:
+ return
+
+ if self.keys_path.exists():
+ try:
+ data = json.loads(self.keys_path.read_text(encoding="utf-8"))
+ for key_data in data.get("keys", []):
+ key = KMSKey.from_dict(key_data)
+ if key_data.get("EncryptedKeyMaterial"):
+ encrypted = base64.b64decode(key_data["EncryptedKeyMaterial"])
+ key.key_material = self._decrypt_key_material(encrypted)
+ self._keys[key.key_id] = key
+ except Exception:
+ pass
+
+ self._loaded = True
+
+ def _save_keys(self) -> None:
+ """Save keys to disk (with encrypted key material)."""
+ keys_data = []
+ for key in self._keys.values():
+ data = key.to_dict(include_key=False)
+ encrypted = self._encrypt_key_material(key.key_material)
+ data["EncryptedKeyMaterial"] = base64.b64encode(encrypted).decode()
+ keys_data.append(data)
+
+ self.keys_path.parent.mkdir(parents=True, exist_ok=True)
+ self.keys_path.write_text(
+ json.dumps({"keys": keys_data}, indent=2),
+ encoding="utf-8"
+ )
+
+ def _encrypt_key_material(self, key_material: bytes) -> bytes:
+ """Encrypt key material with the master key."""
+ aesgcm = AESGCM(self.master_key)
+ nonce = secrets.token_bytes(12)
+ ciphertext = aesgcm.encrypt(nonce, key_material, None)
+ return nonce + ciphertext
+
+ def _decrypt_key_material(self, encrypted: bytes) -> bytes:
+ """Decrypt key material with the master key."""
+ aesgcm = AESGCM(self.master_key)
+ nonce = encrypted[:12]
+ ciphertext = encrypted[12:]
+ return aesgcm.decrypt(nonce, ciphertext, None)
+
+ def create_key(self, description: str = "", key_id: str | None = None) -> KMSKey:
+ """Create a new KMS key."""
+ self._load_keys()
+
+ if key_id is None:
+ key_id = str(uuid.uuid4())
+
+ if key_id in self._keys:
+ raise EncryptionError(f"Key already exists: {key_id}")
+
+ key = KMSKey(
+ key_id=key_id,
+ description=description,
+ created_at=datetime.now(timezone.utc).isoformat(),
+ enabled=True,
+ key_material=secrets.token_bytes(32),
+ )
+
+ self._keys[key_id] = key
+ self._save_keys()
+ return key
+
+ def get_key(self, key_id: str) -> KMSKey | None:
+ """Get a key by ID."""
+ self._load_keys()
+ return self._keys.get(key_id)
+
+ def list_keys(self) -> List[KMSKey]:
+ """List all keys."""
+ self._load_keys()
+ return list(self._keys.values())
+
+ def enable_key(self, key_id: str) -> None:
+ """Enable a key."""
+ self._load_keys()
+ key = self._keys.get(key_id)
+ if not key:
+ raise EncryptionError(f"Key not found: {key_id}")
+ key.enabled = True
+ self._save_keys()
+
+ def disable_key(self, key_id: str) -> None:
+ """Disable a key."""
+ self._load_keys()
+ key = self._keys.get(key_id)
+ if not key:
+ raise EncryptionError(f"Key not found: {key_id}")
+ key.enabled = False
+ self._save_keys()
+
+ def delete_key(self, key_id: str) -> None:
+ """Delete a key (schedule for deletion in real KMS)."""
+ self._load_keys()
+ if key_id not in self._keys:
+ raise EncryptionError(f"Key not found: {key_id}")
+ del self._keys[key_id]
+ self._save_keys()
+
+ def encrypt(self, key_id: str, plaintext: bytes,
+ context: Dict[str, str] | None = None) -> bytes:
+ """Encrypt data directly with a KMS key."""
+ self._load_keys()
+ key = self._keys.get(key_id)
+ if not key:
+ raise EncryptionError(f"Key not found: {key_id}")
+ if not key.enabled:
+ raise EncryptionError(f"Key is disabled: {key_id}")
+
+ aesgcm = AESGCM(key.key_material)
+ nonce = secrets.token_bytes(12)
+ aad = json.dumps(context).encode() if context else None
+ ciphertext = aesgcm.encrypt(nonce, plaintext, aad)
+
+ key_id_bytes = key_id.encode("utf-8")
+ return len(key_id_bytes).to_bytes(2, "big") + key_id_bytes + nonce + ciphertext
+
+ def decrypt(self, ciphertext: bytes,
+ context: Dict[str, str] | None = None) -> tuple[bytes, str]:
+ """Decrypt data directly with a KMS key.
+
+ Returns:
+ Tuple of (plaintext, key_id)
+ """
+ self._load_keys()
+
+ key_id_len = int.from_bytes(ciphertext[:2], "big")
+ key_id = ciphertext[2:2 + key_id_len].decode("utf-8")
+ rest = ciphertext[2 + key_id_len:]
+
+ key = self._keys.get(key_id)
+ if not key:
+ raise EncryptionError(f"Key not found: {key_id}")
+ if not key.enabled:
+ raise EncryptionError(f"Key is disabled: {key_id}")
+
+ nonce = rest[:12]
+ encrypted = rest[12:]
+
+ aesgcm = AESGCM(key.key_material)
+ aad = json.dumps(context).encode() if context else None
+ try:
+ plaintext = aesgcm.decrypt(nonce, encrypted, aad)
+ return plaintext, key_id
+ except Exception as exc:
+ raise EncryptionError(f"Decryption failed: {exc}") from exc
+
+ def generate_data_key(self, key_id: str,
+ context: Dict[str, str] | None = None) -> tuple[bytes, bytes]:
+ """Generate a data key and return both plaintext and encrypted versions.
+
+ Returns:
+ Tuple of (plaintext_key, encrypted_key)
+ """
+ self._load_keys()
+ key = self._keys.get(key_id)
+ if not key:
+ raise EncryptionError(f"Key not found: {key_id}")
+ if not key.enabled:
+ raise EncryptionError(f"Key is disabled: {key_id}")
+
+ plaintext_key = secrets.token_bytes(32)
+
+ encrypted_key = self.encrypt(key_id, plaintext_key, context)
+
+ return plaintext_key, encrypted_key
+
+ def decrypt_data_key(self, key_id: str, encrypted_key: bytes,
+ context: Dict[str, str] | None = None) -> bytes:
+ """Decrypt a data key."""
+ plaintext, _ = self.decrypt(encrypted_key, context)
+ return plaintext
+
+ def get_provider(self, key_id: str | None = None) -> KMSEncryptionProvider:
+ """Get an encryption provider for a specific key."""
+ self._load_keys()
+
+ if key_id is None:
+ if not self._keys:
+ key = self.create_key("Default KMS Key")
+ key_id = key.key_id
+ else:
+ key_id = next(iter(self._keys.keys()))
+
+ if key_id not in self._keys:
+ raise EncryptionError(f"Key not found: {key_id}")
+
+ return KMSEncryptionProvider(self, key_id)
+
+ def re_encrypt(self, ciphertext: bytes, destination_key_id: str,
+ source_context: Dict[str, str] | None = None,
+ destination_context: Dict[str, str] | None = None) -> bytes:
+ """Re-encrypt data with a different key."""
+
+ plaintext, source_key_id = self.decrypt(ciphertext, source_context)
+
+ return self.encrypt(destination_key_id, plaintext, destination_context)
+
+ def generate_random(self, num_bytes: int = 32) -> bytes:
+ """Generate cryptographically secure random bytes."""
+ if num_bytes < 1 or num_bytes > 1024:
+ raise EncryptionError("Number of bytes must be between 1 and 1024")
+ return secrets.token_bytes(num_bytes)
diff --git a/app/kms_api.py b/app/kms_api.py
new file mode 100644
index 0000000..551d262
--- /dev/null
+++ b/app/kms_api.py
@@ -0,0 +1,463 @@
+"""KMS and encryption API endpoints."""
+from __future__ import annotations
+
+import base64
+import uuid
+from typing import Any, Dict
+
+from flask import Blueprint, Response, current_app, jsonify, request
+
+from .encryption import ClientEncryptionHelper, EncryptionError
+from .extensions import limiter
+from .iam import IamError
+
+kms_api_bp = Blueprint("kms_api", __name__, url_prefix="/kms")
+
+
+def _require_principal():
+ """Require authentication for KMS operations."""
+ from .s3_api import _require_principal as s3_require_principal
+ return s3_require_principal()
+
+
+def _kms():
+ """Get KMS manager from app extensions."""
+ return current_app.extensions.get("kms")
+
+
+def _encryption():
+ """Get encryption manager from app extensions."""
+ return current_app.extensions.get("encryption")
+
+
+def _error_response(code: str, message: str, status: int) -> tuple[Dict[str, Any], int]:
+ return {"__type": code, "message": message}, status
+
+
+# ---------------------- Key Management ----------------------
+
+@kms_api_bp.route("/keys", methods=["GET", "POST"])
+@limiter.limit("30 per minute")
+def list_or_create_keys():
+ """List all KMS keys or create a new key."""
+ principal, error = _require_principal()
+ if error:
+ return error
+
+ kms = _kms()
+ if not kms:
+ return _error_response("KMSNotEnabled", "KMS is not configured", 400)
+
+ if request.method == "POST":
+ payload = request.get_json(silent=True) or {}
+ key_id = payload.get("KeyId") or payload.get("key_id")
+ description = payload.get("Description") or payload.get("description", "")
+
+ try:
+ key = kms.create_key(description=description, key_id=key_id)
+ current_app.logger.info(
+ "KMS key created",
+ extra={"key_id": key.key_id, "principal": principal.access_key},
+ )
+ return jsonify({
+ "KeyMetadata": key.to_dict(),
+ })
+ except EncryptionError as exc:
+ return _error_response("KMSInternalException", str(exc), 400)
+
+ # GET - List keys
+ keys = kms.list_keys()
+ return jsonify({
+ "Keys": [{"KeyId": k.key_id, "KeyArn": k.arn} for k in keys],
+ "Truncated": False,
+ })
+
+
+@kms_api_bp.route("/keys/", methods=["GET", "DELETE"])
+@limiter.limit("30 per minute")
+def get_or_delete_key(key_id: str):
+ """Get or delete a specific KMS key."""
+ principal, error = _require_principal()
+ if error:
+ return error
+
+ kms = _kms()
+ if not kms:
+ return _error_response("KMSNotEnabled", "KMS is not configured", 400)
+
+ if request.method == "DELETE":
+ try:
+ kms.delete_key(key_id)
+ current_app.logger.info(
+ "KMS key deleted",
+ extra={"key_id": key_id, "principal": principal.access_key},
+ )
+ return Response(status=204)
+ except EncryptionError as exc:
+ return _error_response("NotFoundException", str(exc), 404)
+
+ # GET
+ key = kms.get_key(key_id)
+ if not key:
+ return _error_response("NotFoundException", f"Key not found: {key_id}", 404)
+
+ return jsonify({"KeyMetadata": key.to_dict()})
+
+
+@kms_api_bp.route("/keys//enable", methods=["POST"])
+@limiter.limit("30 per minute")
+def enable_key(key_id: str):
+ """Enable a KMS key."""
+ principal, error = _require_principal()
+ if error:
+ return error
+
+ kms = _kms()
+ if not kms:
+ return _error_response("KMSNotEnabled", "KMS is not configured", 400)
+
+ try:
+ kms.enable_key(key_id)
+ current_app.logger.info(
+ "KMS key enabled",
+ extra={"key_id": key_id, "principal": principal.access_key},
+ )
+ return Response(status=200)
+ except EncryptionError as exc:
+ return _error_response("NotFoundException", str(exc), 404)
+
+
+@kms_api_bp.route("/keys//disable", methods=["POST"])
+@limiter.limit("30 per minute")
+def disable_key(key_id: str):
+ """Disable a KMS key."""
+ principal, error = _require_principal()
+ if error:
+ return error
+
+ kms = _kms()
+ if not kms:
+ return _error_response("KMSNotEnabled", "KMS is not configured", 400)
+
+ try:
+ kms.disable_key(key_id)
+ current_app.logger.info(
+ "KMS key disabled",
+ extra={"key_id": key_id, "principal": principal.access_key},
+ )
+ return Response(status=200)
+ except EncryptionError as exc:
+ return _error_response("NotFoundException", str(exc), 404)
+
+
+# ---------------------- Encryption Operations ----------------------
+
+@kms_api_bp.route("/encrypt", methods=["POST"])
+@limiter.limit("60 per minute")
+def encrypt_data():
+ """Encrypt data using a KMS key."""
+ principal, error = _require_principal()
+ if error:
+ return error
+
+ kms = _kms()
+ if not kms:
+ return _error_response("KMSNotEnabled", "KMS is not configured", 400)
+
+ payload = request.get_json(silent=True) or {}
+ key_id = payload.get("KeyId")
+ plaintext_b64 = payload.get("Plaintext")
+ context = payload.get("EncryptionContext")
+
+ if not key_id:
+ return _error_response("ValidationException", "KeyId is required", 400)
+ if not plaintext_b64:
+ return _error_response("ValidationException", "Plaintext is required", 400)
+
+ try:
+ plaintext = base64.b64decode(plaintext_b64)
+ except Exception:
+ return _error_response("ValidationException", "Plaintext must be base64 encoded", 400)
+
+ try:
+ ciphertext = kms.encrypt(key_id, plaintext, context)
+ return jsonify({
+ "CiphertextBlob": base64.b64encode(ciphertext).decode(),
+ "KeyId": key_id,
+ "EncryptionAlgorithm": "SYMMETRIC_DEFAULT",
+ })
+ except EncryptionError as exc:
+ return _error_response("KMSInternalException", str(exc), 400)
+
+
+@kms_api_bp.route("/decrypt", methods=["POST"])
+@limiter.limit("60 per minute")
+def decrypt_data():
+ """Decrypt data using a KMS key."""
+ principal, error = _require_principal()
+ if error:
+ return error
+
+ kms = _kms()
+ if not kms:
+ return _error_response("KMSNotEnabled", "KMS is not configured", 400)
+
+ payload = request.get_json(silent=True) or {}
+ ciphertext_b64 = payload.get("CiphertextBlob")
+ context = payload.get("EncryptionContext")
+
+ if not ciphertext_b64:
+ return _error_response("ValidationException", "CiphertextBlob is required", 400)
+
+ try:
+ ciphertext = base64.b64decode(ciphertext_b64)
+ except Exception:
+ return _error_response("ValidationException", "CiphertextBlob must be base64 encoded", 400)
+
+ try:
+ plaintext, key_id = kms.decrypt(ciphertext, context)
+ return jsonify({
+ "Plaintext": base64.b64encode(plaintext).decode(),
+ "KeyId": key_id,
+ "EncryptionAlgorithm": "SYMMETRIC_DEFAULT",
+ })
+ except EncryptionError as exc:
+ return _error_response("InvalidCiphertextException", str(exc), 400)
+
+
+@kms_api_bp.route("/generate-data-key", methods=["POST"])
+@limiter.limit("60 per minute")
+def generate_data_key():
+ """Generate a data encryption key."""
+ principal, error = _require_principal()
+ if error:
+ return error
+
+ kms = _kms()
+ if not kms:
+ return _error_response("KMSNotEnabled", "KMS is not configured", 400)
+
+ payload = request.get_json(silent=True) or {}
+ key_id = payload.get("KeyId")
+ context = payload.get("EncryptionContext")
+ key_spec = payload.get("KeySpec", "AES_256")
+
+ if not key_id:
+ return _error_response("ValidationException", "KeyId is required", 400)
+
+ if key_spec not in {"AES_256", "AES_128"}:
+ return _error_response("ValidationException", "KeySpec must be AES_256 or AES_128", 400)
+
+ try:
+ plaintext_key, encrypted_key = kms.generate_data_key(key_id, context)
+
+ # Trim key if AES_128 requested
+ if key_spec == "AES_128":
+ plaintext_key = plaintext_key[:16]
+
+ return jsonify({
+ "Plaintext": base64.b64encode(plaintext_key).decode(),
+ "CiphertextBlob": base64.b64encode(encrypted_key).decode(),
+ "KeyId": key_id,
+ })
+ except EncryptionError as exc:
+ return _error_response("KMSInternalException", str(exc), 400)
+
+
+@kms_api_bp.route("/generate-data-key-without-plaintext", methods=["POST"])
+@limiter.limit("60 per minute")
+def generate_data_key_without_plaintext():
+ """Generate a data encryption key without returning the plaintext."""
+ principal, error = _require_principal()
+ if error:
+ return error
+
+ kms = _kms()
+ if not kms:
+ return _error_response("KMSNotEnabled", "KMS is not configured", 400)
+
+ payload = request.get_json(silent=True) or {}
+ key_id = payload.get("KeyId")
+ context = payload.get("EncryptionContext")
+
+ if not key_id:
+ return _error_response("ValidationException", "KeyId is required", 400)
+
+ try:
+ _, encrypted_key = kms.generate_data_key(key_id, context)
+ return jsonify({
+ "CiphertextBlob": base64.b64encode(encrypted_key).decode(),
+ "KeyId": key_id,
+ })
+ except EncryptionError as exc:
+ return _error_response("KMSInternalException", str(exc), 400)
+
+
+@kms_api_bp.route("/re-encrypt", methods=["POST"])
+@limiter.limit("30 per minute")
+def re_encrypt():
+ """Re-encrypt data with a different key."""
+ principal, error = _require_principal()
+ if error:
+ return error
+
+ kms = _kms()
+ if not kms:
+ return _error_response("KMSNotEnabled", "KMS is not configured", 400)
+
+ payload = request.get_json(silent=True) or {}
+ ciphertext_b64 = payload.get("CiphertextBlob")
+ destination_key_id = payload.get("DestinationKeyId")
+ source_context = payload.get("SourceEncryptionContext")
+ destination_context = payload.get("DestinationEncryptionContext")
+
+ if not ciphertext_b64:
+ return _error_response("ValidationException", "CiphertextBlob is required", 400)
+ if not destination_key_id:
+ return _error_response("ValidationException", "DestinationKeyId is required", 400)
+
+ try:
+ ciphertext = base64.b64decode(ciphertext_b64)
+ except Exception:
+ return _error_response("ValidationException", "CiphertextBlob must be base64 encoded", 400)
+
+ try:
+ # First decrypt, get source key id
+ plaintext, source_key_id = kms.decrypt(ciphertext, source_context)
+
+ # Re-encrypt with destination key
+ new_ciphertext = kms.encrypt(destination_key_id, plaintext, destination_context)
+
+ return jsonify({
+ "CiphertextBlob": base64.b64encode(new_ciphertext).decode(),
+ "SourceKeyId": source_key_id,
+ "KeyId": destination_key_id,
+ })
+ except EncryptionError as exc:
+ return _error_response("KMSInternalException", str(exc), 400)
+
+
+@kms_api_bp.route("/generate-random", methods=["POST"])
+@limiter.limit("60 per minute")
+def generate_random():
+ """Generate random bytes."""
+ principal, error = _require_principal()
+ if error:
+ return error
+
+ kms = _kms()
+ if not kms:
+ return _error_response("KMSNotEnabled", "KMS is not configured", 400)
+
+ payload = request.get_json(silent=True) or {}
+ num_bytes = payload.get("NumberOfBytes", 32)
+
+ try:
+ num_bytes = int(num_bytes)
+ except (TypeError, ValueError):
+ return _error_response("ValidationException", "NumberOfBytes must be an integer", 400)
+
+ try:
+ random_bytes = kms.generate_random(num_bytes)
+ return jsonify({
+ "Plaintext": base64.b64encode(random_bytes).decode(),
+ })
+ except EncryptionError as exc:
+ return _error_response("ValidationException", str(exc), 400)
+
+
+# ---------------------- Client-Side Encryption Helpers ----------------------
+
+@kms_api_bp.route("/client/generate-key", methods=["POST"])
+@limiter.limit("30 per minute")
+def generate_client_key():
+ """Generate a client-side encryption key."""
+ principal, error = _require_principal()
+ if error:
+ return error
+
+ key_info = ClientEncryptionHelper.generate_client_key()
+ return jsonify(key_info)
+
+
+@kms_api_bp.route("/client/encrypt", methods=["POST"])
+@limiter.limit("60 per minute")
+def client_encrypt():
+ """Encrypt data using client-side encryption."""
+ principal, error = _require_principal()
+ if error:
+ return error
+
+ payload = request.get_json(silent=True) or {}
+ plaintext_b64 = payload.get("Plaintext")
+ key_b64 = payload.get("Key")
+
+ if not plaintext_b64 or not key_b64:
+ return _error_response("ValidationException", "Plaintext and Key are required", 400)
+
+ try:
+ plaintext = base64.b64decode(plaintext_b64)
+ result = ClientEncryptionHelper.encrypt_with_key(plaintext, key_b64)
+ return jsonify(result)
+ except Exception as exc:
+ return _error_response("EncryptionError", str(exc), 400)
+
+
+@kms_api_bp.route("/client/decrypt", methods=["POST"])
+@limiter.limit("60 per minute")
+def client_decrypt():
+ """Decrypt data using client-side encryption."""
+ principal, error = _require_principal()
+ if error:
+ return error
+
+ payload = request.get_json(silent=True) or {}
+ ciphertext_b64 = payload.get("Ciphertext") or payload.get("ciphertext")
+ nonce_b64 = payload.get("Nonce") or payload.get("nonce")
+ key_b64 = payload.get("Key") or payload.get("key")
+
+ if not ciphertext_b64 or not nonce_b64 or not key_b64:
+ return _error_response("ValidationException", "Ciphertext, Nonce, and Key are required", 400)
+
+ try:
+ plaintext = ClientEncryptionHelper.decrypt_with_key(ciphertext_b64, nonce_b64, key_b64)
+ return jsonify({
+ "Plaintext": base64.b64encode(plaintext).decode(),
+ })
+ except Exception as exc:
+ return _error_response("DecryptionError", str(exc), 400)
+
+
+# ---------------------- Encryption Materials for S3 Client-Side Encryption ----------------------
+
+@kms_api_bp.route("/materials/", methods=["POST"])
+@limiter.limit("60 per minute")
+def get_encryption_materials(key_id: str):
+ """Get encryption materials for client-side S3 encryption.
+
+ This is used by S3 encryption clients that want to use KMS for
+ key management but perform encryption client-side.
+ """
+ principal, error = _require_principal()
+ if error:
+ return error
+
+ kms = _kms()
+ if not kms:
+ return _error_response("KMSNotEnabled", "KMS is not configured", 400)
+
+ payload = request.get_json(silent=True) or {}
+ context = payload.get("EncryptionContext")
+
+ try:
+ plaintext_key, encrypted_key = kms.generate_data_key(key_id, context)
+
+ return jsonify({
+ "PlaintextKey": base64.b64encode(plaintext_key).decode(),
+ "EncryptedKey": base64.b64encode(encrypted_key).decode(),
+ "KeyId": key_id,
+ "Algorithm": "AES-256-GCM",
+ "KeyWrapAlgorithm": "kms",
+ })
+ except EncryptionError as exc:
+ return _error_response("KMSInternalException", str(exc), 400)
diff --git a/app/s3_api.py b/app/s3_api.py
index c424f48..f67d816 100644
--- a/app/s3_api.py
+++ b/app/s3_api.py
@@ -18,7 +18,7 @@ from .bucket_policies import BucketPolicyStore
from .extensions import limiter
from .iam import IamError, Principal
from .replication import ReplicationManager
-from .storage import ObjectStorage, StorageError
+from .storage import ObjectStorage, StorageError, QuotaExceededError
s3_api_bp = Blueprint("s3_api", __name__)
@@ -784,8 +784,9 @@ def _apply_object_headers(
metadata: Dict[str, str] | None,
etag: str,
) -> None:
- response.headers["Content-Length"] = str(file_stat.st_size)
- response.headers["Last-Modified"] = http_date(file_stat.st_mtime)
+ if file_stat is not None:
+ response.headers["Content-Length"] = str(file_stat.st_size)
+ response.headers["Last-Modified"] = http_date(file_stat.st_mtime)
response.headers["ETag"] = f'"{etag}"'
response.headers["Accept-Ranges"] = "bytes"
for key, value in (metadata or {}).items():
@@ -802,6 +803,7 @@ def _maybe_handle_bucket_subresource(bucket_name: str) -> Response | None:
"acl": _bucket_acl_handler,
"versions": _bucket_list_versions_handler,
"lifecycle": _bucket_lifecycle_handler,
+ "quota": _bucket_quota_handler,
}
requested = [key for key in handlers if key in request.args]
if not requested:
@@ -1399,6 +1401,87 @@ def _parse_lifecycle_config(payload: bytes) -> list:
return rules
+def _bucket_quota_handler(bucket_name: str) -> Response:
+ """Handle bucket quota configuration (GET/PUT/DELETE /?quota)."""
+ if request.method not in {"GET", "PUT", "DELETE"}:
+ return _method_not_allowed(["GET", "PUT", "DELETE"])
+
+ principal, error = _require_principal()
+ if error:
+ return error
+ try:
+ _authorize_action(principal, bucket_name, "policy")
+ except IamError as exc:
+ return _error_response("AccessDenied", str(exc), 403)
+
+ storage = _storage()
+
+ if not storage.bucket_exists(bucket_name):
+ return _error_response("NoSuchBucket", "Bucket does not exist", 404)
+
+ if request.method == "GET":
+ quota = storage.get_bucket_quota(bucket_name)
+ if not quota:
+ return _error_response("NoSuchQuotaConfiguration", "No quota configuration found", 404)
+
+ # Return as JSON for simplicity (not a standard S3 API)
+ stats = storage.bucket_stats(bucket_name)
+ return jsonify({
+ "quota": quota,
+ "usage": {
+ "bytes": stats.get("bytes", 0),
+ "objects": stats.get("objects", 0),
+ }
+ })
+
+ if request.method == "DELETE":
+ try:
+ storage.set_bucket_quota(bucket_name, max_size_bytes=None, max_objects=None)
+ except StorageError as exc:
+ return _error_response("NoSuchBucket", str(exc), 404)
+ current_app.logger.info("Bucket quota deleted", extra={"bucket": bucket_name})
+ return Response(status=204)
+
+ # PUT
+ payload = request.get_json(silent=True)
+ if not payload:
+ return _error_response("MalformedRequest", "Request body must be JSON with quota limits", 400)
+
+ max_size_bytes = payload.get("max_size_bytes")
+ max_objects = payload.get("max_objects")
+
+ if max_size_bytes is None and max_objects is None:
+ return _error_response("InvalidArgument", "At least one of max_size_bytes or max_objects is required", 400)
+
+ # Validate types
+ if max_size_bytes is not None:
+ try:
+ max_size_bytes = int(max_size_bytes)
+ if max_size_bytes < 0:
+ raise ValueError("must be non-negative")
+ except (TypeError, ValueError) as exc:
+ return _error_response("InvalidArgument", f"max_size_bytes {exc}", 400)
+
+ if max_objects is not None:
+ try:
+ max_objects = int(max_objects)
+ if max_objects < 0:
+ raise ValueError("must be non-negative")
+ except (TypeError, ValueError) as exc:
+ return _error_response("InvalidArgument", f"max_objects {exc}", 400)
+
+ try:
+ storage.set_bucket_quota(bucket_name, max_size_bytes=max_size_bytes, max_objects=max_objects)
+ except StorageError as exc:
+ return _error_response("NoSuchBucket", str(exc), 404)
+
+ current_app.logger.info(
+ "Bucket quota updated",
+ extra={"bucket": bucket_name, "max_size_bytes": max_size_bytes, "max_objects": max_objects}
+ )
+ return Response(status=204)
+
+
def _bulk_delete_handler(bucket_name: str) -> Response:
principal, error = _require_principal()
if error:
@@ -1748,6 +1831,8 @@ def object_handler(bucket_name: str, object_key: str):
stream,
metadata=metadata or None,
)
+ except QuotaExceededError as exc:
+ return _error_response("QuotaExceeded", str(exc), 403)
except StorageError as exc:
message = str(exc)
if "Bucket" in message:
@@ -1779,19 +1864,48 @@ def object_handler(bucket_name: str, object_key: str):
except StorageError as exc:
return _error_response("NoSuchKey", str(exc), 404)
metadata = storage.get_object_metadata(bucket_name, object_key)
- stat = path.stat()
- mimetype = mimetypes.guess_type(path.name)[0] or "application/octet-stream"
- etag = storage._compute_etag(path)
-
+ mimetype = mimetypes.guess_type(object_key)[0] or "application/octet-stream"
+
+ # Check if object is encrypted and needs decryption
+ is_encrypted = "x-amz-server-side-encryption" in metadata
+
if request.method == "GET":
- response = Response(_stream_file(path), mimetype=mimetype, direct_passthrough=True)
- logged_bytes = stat.st_size
+ if is_encrypted and hasattr(storage, 'get_object_data'):
+ # Use encrypted storage to decrypt
+ try:
+ data, clean_metadata = storage.get_object_data(bucket_name, object_key)
+ response = Response(data, mimetype=mimetype)
+ logged_bytes = len(data)
+ # Use decrypted size for Content-Length
+ response.headers["Content-Length"] = len(data)
+ etag = hashlib.md5(data).hexdigest()
+ except StorageError as exc:
+ return _error_response("InternalError", str(exc), 500)
+ else:
+ # Stream unencrypted file directly
+ stat = path.stat()
+ response = Response(_stream_file(path), mimetype=mimetype, direct_passthrough=True)
+ logged_bytes = stat.st_size
+ etag = storage._compute_etag(path)
else:
- response = Response(status=200)
+ # HEAD request
+ if is_encrypted and hasattr(storage, 'get_object_data'):
+ # For encrypted objects, we need to report decrypted size
+ try:
+ data, _ = storage.get_object_data(bucket_name, object_key)
+ response = Response(status=200)
+ response.headers["Content-Length"] = len(data)
+ etag = hashlib.md5(data).hexdigest()
+ except StorageError as exc:
+ return _error_response("InternalError", str(exc), 500)
+ else:
+ stat = path.stat()
+ response = Response(status=200)
+ etag = storage._compute_etag(path)
response.headers["Content-Type"] = mimetype
logged_bytes = 0
- _apply_object_headers(response, file_stat=stat, metadata=metadata, etag=etag)
+ _apply_object_headers(response, file_stat=path.stat() if not is_encrypted else None, metadata=metadata, etag=etag)
action = "Object read" if request.method == "GET" else "Object head"
current_app.logger.info(action, extra={"bucket": bucket_name, "key": object_key, "bytes": logged_bytes})
return response
@@ -2226,6 +2340,8 @@ def _complete_multipart_upload(bucket_name: str, object_key: str) -> Response:
try:
meta = _storage().complete_multipart_upload(bucket_name, upload_id, parts)
+ except QuotaExceededError as exc:
+ return _error_response("QuotaExceeded", str(exc), 403)
except StorageError as exc:
if "NoSuchBucket" in str(exc):
return _error_response("NoSuchBucket", str(exc), 404)
diff --git a/app/storage.py b/app/storage.py
index 29e90d5..7f91596 100644
--- a/app/storage.py
+++ b/app/storage.py
@@ -75,6 +75,15 @@ class StorageError(RuntimeError):
"""Raised when the storage layer encounters an unrecoverable problem."""
+class QuotaExceededError(StorageError):
+ """Raised when an operation would exceed bucket quota limits."""
+
+ def __init__(self, message: str, quota: Dict[str, Any], usage: Dict[str, int]):
+ super().__init__(message)
+ self.quota = quota
+ self.usage = usage
+
+
@dataclass
class ObjectMeta:
key: str
@@ -169,16 +178,38 @@ class ObjectStorage:
object_count = 0
total_bytes = 0
+ version_count = 0
+ version_bytes = 0
+
+ # Count current objects in the bucket folder
for path in bucket_path.rglob("*"):
if path.is_file():
rel = path.relative_to(bucket_path)
- if rel.parts and rel.parts[0] in self.INTERNAL_FOLDERS:
+ if not rel.parts:
continue
- stat = path.stat()
- object_count += 1
- total_bytes += stat.st_size
+ top_folder = rel.parts[0]
+ if top_folder not in self.INTERNAL_FOLDERS:
+ stat = path.stat()
+ object_count += 1
+ total_bytes += stat.st_size
- stats = {"objects": object_count, "bytes": total_bytes}
+ # Count archived versions in the system folder
+ versions_root = self._bucket_versions_root(bucket_name)
+ if versions_root.exists():
+ for path in versions_root.rglob("*.bin"):
+ if path.is_file():
+ stat = path.stat()
+ version_count += 1
+ version_bytes += stat.st_size
+
+ stats = {
+ "objects": object_count,
+ "bytes": total_bytes,
+ "version_count": version_count,
+ "version_bytes": version_bytes,
+ "total_objects": object_count + version_count, # All objects including versions
+ "total_bytes": total_bytes + version_bytes, # All storage including versions
+ }
try:
cache_path.parent.mkdir(parents=True, exist_ok=True)
@@ -243,6 +274,7 @@ class ObjectStorage:
stream: BinaryIO,
*,
metadata: Optional[Dict[str, str]] = None,
+ enforce_quota: bool = True,
) -> ObjectMeta:
bucket_path = self._bucket_path(bucket_name)
if not bucket_path.exists():
@@ -253,12 +285,52 @@ class ObjectStorage:
destination = bucket_path / safe_key
destination.parent.mkdir(parents=True, exist_ok=True)
- if self._is_versioning_enabled(bucket_path) and destination.exists():
+ # Check if this is an overwrite (won't add to object count)
+ is_overwrite = destination.exists()
+ existing_size = destination.stat().st_size if is_overwrite else 0
+
+ if self._is_versioning_enabled(bucket_path) and is_overwrite:
self._archive_current_version(bucket_id, safe_key, reason="overwrite")
- checksum = hashlib.md5()
- with destination.open("wb") as target:
- shutil.copyfileobj(_HashingReader(stream, checksum), target)
+ # Write to temp file first to get actual size
+ tmp_dir = self._system_root_path() / self.SYSTEM_TMP_DIR
+ tmp_dir.mkdir(parents=True, exist_ok=True)
+ tmp_path = tmp_dir / f"{uuid.uuid4().hex}.tmp"
+
+ try:
+ checksum = hashlib.md5()
+ with tmp_path.open("wb") as target:
+ shutil.copyfileobj(_HashingReader(stream, checksum), target)
+
+ new_size = tmp_path.stat().st_size
+
+ # Check quota before finalizing
+ if enforce_quota:
+ # Calculate net change (new size minus size being replaced)
+ size_delta = new_size - existing_size
+ object_delta = 0 if is_overwrite else 1
+
+ quota_check = self.check_quota(
+ bucket_name,
+ additional_bytes=max(0, size_delta),
+ additional_objects=object_delta,
+ )
+ if not quota_check["allowed"]:
+ raise QuotaExceededError(
+ quota_check["message"] or "Quota exceeded",
+ quota_check["quota"],
+ quota_check["usage"],
+ )
+
+ # Move to final destination
+ shutil.move(str(tmp_path), str(destination))
+
+ finally:
+ # Clean up temp file if it still exists
+ try:
+ tmp_path.unlink(missing_ok=True)
+ except OSError:
+ pass
stat = destination.stat()
if metadata:
@@ -289,6 +361,27 @@ class ObjectStorage:
safe_key = self._sanitize_object_key(object_key)
return self._read_metadata(bucket_path.name, safe_key) or {}
+ def _cleanup_empty_parents(self, path: Path, stop_at: Path) -> None:
+ """Remove empty parent directories up to (but not including) stop_at.
+
+ On Windows/OneDrive, directories may be locked briefly after file deletion.
+ This method retries with a small delay to handle that case.
+ """
+ for parent in path.parents:
+ if parent == stop_at:
+ break
+ # Retry a few times with small delays for Windows/OneDrive
+ for attempt in range(3):
+ try:
+ if parent.exists() and not any(parent.iterdir()):
+ parent.rmdir()
+ break # Success, move to next parent
+ except OSError:
+ if attempt < 2:
+ time.sleep(0.1) # Brief delay before retry
+ # Final attempt failed - continue to next parent
+ break
+
def delete_object(self, bucket_name: str, object_key: str) -> None:
bucket_path = self._bucket_path(bucket_name)
path = self._object_path(bucket_name, object_key)
@@ -303,12 +396,7 @@ class ObjectStorage:
self._delete_metadata(bucket_id, rel)
self._invalidate_bucket_stats_cache(bucket_id)
-
- for parent in path.parents:
- if parent == bucket_path:
- break
- if parent.exists() and not any(parent.iterdir()):
- parent.rmdir()
+ self._cleanup_empty_parents(path, bucket_path)
def purge_object(self, bucket_name: str, object_key: str) -> None:
bucket_path = self._bucket_path(bucket_name)
@@ -330,12 +418,7 @@ class ObjectStorage:
# Invalidate bucket stats cache
self._invalidate_bucket_stats_cache(bucket_id)
-
- for parent in target.parents:
- if parent == bucket_path:
- break
- if parent.exists() and not any(parent.iterdir()):
- parent.rmdir()
+ self._cleanup_empty_parents(target, bucket_path)
def is_versioning_enabled(self, bucket_name: str) -> bool:
bucket_path = self._bucket_path(bucket_name)
@@ -413,6 +496,124 @@ class ObjectStorage:
bucket_path = self._require_bucket_path(bucket_name)
self._set_bucket_config_entry(bucket_path.name, "lifecycle", rules)
+ def get_bucket_quota(self, bucket_name: str) -> Dict[str, Any]:
+ """Get quota configuration for bucket.
+
+ Returns:
+ Dict with 'max_bytes' and 'max_objects' (None if unlimited).
+ """
+ bucket_path = self._require_bucket_path(bucket_name)
+ config = self._read_bucket_config(bucket_path.name)
+ quota = config.get("quota")
+ if isinstance(quota, dict):
+ return {
+ "max_bytes": quota.get("max_bytes"),
+ "max_objects": quota.get("max_objects"),
+ }
+ return {"max_bytes": None, "max_objects": None}
+
+ def set_bucket_quota(
+ self,
+ bucket_name: str,
+ *,
+ max_bytes: Optional[int] = None,
+ max_objects: Optional[int] = None,
+ ) -> None:
+ """Set quota limits for a bucket.
+
+ Args:
+ bucket_name: Name of the bucket
+ max_bytes: Maximum total size in bytes (None to remove limit)
+ max_objects: Maximum number of objects (None to remove limit)
+ """
+ bucket_path = self._require_bucket_path(bucket_name)
+
+ if max_bytes is None and max_objects is None:
+ # Remove quota entirely
+ self._set_bucket_config_entry(bucket_path.name, "quota", None)
+ return
+
+ quota: Dict[str, Any] = {}
+ if max_bytes is not None:
+ if max_bytes < 0:
+ raise StorageError("max_bytes must be non-negative")
+ quota["max_bytes"] = max_bytes
+ if max_objects is not None:
+ if max_objects < 0:
+ raise StorageError("max_objects must be non-negative")
+ quota["max_objects"] = max_objects
+
+ self._set_bucket_config_entry(bucket_path.name, "quota", quota)
+
+ def check_quota(
+ self,
+ bucket_name: str,
+ additional_bytes: int = 0,
+ additional_objects: int = 0,
+ ) -> Dict[str, Any]:
+ """Check if an operation would exceed bucket quota.
+
+ Args:
+ bucket_name: Name of the bucket
+ additional_bytes: Bytes that would be added
+ additional_objects: Objects that would be added
+
+ Returns:
+ Dict with 'allowed' (bool), 'quota' (current limits),
+ 'usage' (current usage), and 'message' (if not allowed).
+ """
+ quota = self.get_bucket_quota(bucket_name)
+ if not quota:
+ return {
+ "allowed": True,
+ "quota": None,
+ "usage": None,
+ "message": None,
+ }
+
+ # Get current stats (uses cache when available)
+ stats = self.bucket_stats(bucket_name)
+ # Use totals which include versions for quota enforcement
+ current_bytes = stats.get("total_bytes", stats.get("bytes", 0))
+ current_objects = stats.get("total_objects", stats.get("objects", 0))
+
+ result = {
+ "allowed": True,
+ "quota": quota,
+ "usage": {
+ "bytes": current_bytes,
+ "objects": current_objects,
+ "version_count": stats.get("version_count", 0),
+ "version_bytes": stats.get("version_bytes", 0),
+ },
+ "message": None,
+ }
+
+ max_bytes_limit = quota.get("max_bytes")
+ max_objects = quota.get("max_objects")
+
+ if max_bytes_limit is not None:
+ projected_bytes = current_bytes + additional_bytes
+ if projected_bytes > max_bytes_limit:
+ result["allowed"] = False
+ result["message"] = (
+ f"Quota exceeded: adding {additional_bytes} bytes would result in "
+ f"{projected_bytes} bytes, exceeding limit of {max_bytes_limit} bytes"
+ )
+ return result
+
+ if max_objects is not None:
+ projected_objects = current_objects + additional_objects
+ if projected_objects > max_objects:
+ result["allowed"] = False
+ result["message"] = (
+ f"Quota exceeded: adding {additional_objects} objects would result in "
+ f"{projected_objects} objects, exceeding limit of {max_objects} objects"
+ )
+ return result
+
+ return result
+
def get_object_tags(self, bucket_name: str, object_key: str) -> List[Dict[str, str]]:
"""Get tags for an object."""
bucket_path = self._bucket_path(bucket_name)
@@ -529,6 +730,7 @@ class ObjectStorage:
else:
self._delete_metadata(bucket_id, safe_key)
stat = destination.stat()
+ self._invalidate_bucket_stats_cache(bucket_id)
return ObjectMeta(
key=safe_key.as_posix(),
size=stat.st_size,
@@ -677,6 +879,7 @@ class ObjectStorage:
bucket_name: str,
upload_id: str,
ordered_parts: List[Dict[str, Any]],
+ enforce_quota: bool = True,
) -> ObjectMeta:
if not ordered_parts:
raise StorageError("parts list required")
@@ -687,6 +890,7 @@ class ObjectStorage:
if not parts_map:
raise StorageError("No uploaded parts found")
validated: List[tuple[int, Dict[str, Any]]] = []
+ total_size = 0
for part in ordered_parts:
raw_number = part.get("part_number")
if raw_number is None:
@@ -706,10 +910,33 @@ class ObjectStorage:
if supplied_etag and record.get("etag") and supplied_etag.strip('"') != record["etag"]:
raise StorageError(f"ETag mismatch for part {number}")
validated.append((number, record))
+ total_size += record.get("size", 0)
validated.sort(key=lambda entry: entry[0])
safe_key = self._sanitize_object_key(manifest["object_key"])
destination = bucket_path / safe_key
+
+ # Check if this is an overwrite
+ is_overwrite = destination.exists()
+ existing_size = destination.stat().st_size if is_overwrite else 0
+
+ # Check quota before writing
+ if enforce_quota:
+ size_delta = total_size - existing_size
+ object_delta = 0 if is_overwrite else 1
+
+ quota_check = self.check_quota(
+ bucket_name,
+ additional_bytes=max(0, size_delta),
+ additional_objects=object_delta,
+ )
+ if not quota_check["allowed"]:
+ raise QuotaExceededError(
+ quota_check["message"] or "Quota exceeded",
+ quota_check["quota"],
+ quota_check["usage"],
+ )
+
destination.parent.mkdir(parents=True, exist_ok=True)
lock_file_path = self._system_bucket_root(bucket_id) / "locks" / f"{safe_key.as_posix().replace('/', '_')}.lock"
diff --git a/app/ui.py b/app/ui.py
index 654d2c7..81e905e 100644
--- a/app/ui.py
+++ b/app/ui.py
@@ -6,7 +6,7 @@ import uuid
import psutil
import shutil
from typing import Any
-from urllib.parse import urlparse
+from urllib.parse import quote, urlparse
import boto3
import requests
@@ -30,6 +30,7 @@ from .bucket_policies import BucketPolicyStore
from .connections import ConnectionStore, RemoteConnection
from .extensions import limiter
from .iam import IamError
+from .kms import KMSManager
from .replication import ReplicationManager, ReplicationRule
from .secret_store import EphemeralSecretStore
from .storage import ObjectStorage, StorageError
@@ -50,6 +51,9 @@ def _iam():
return current_app.extensions["iam"]
+def _kms() -> KMSManager | None:
+ return current_app.extensions.get("kms")
+
def _bucket_policies() -> BucketPolicyStore:
store: BucketPolicyStore = current_app.extensions["bucket_policies"]
@@ -185,6 +189,7 @@ def inject_nav_state() -> dict[str, Any]:
return {
"principal": principal,
"can_manage_iam": can_manage,
+ "can_view_metrics": can_manage, # Only admins can view metrics
"csrf_token": generate_csrf,
}
@@ -255,9 +260,9 @@ def buckets_overview():
visible_buckets.append({
"meta": bucket,
"summary": {
- "objects": stats["objects"],
- "total_bytes": stats["bytes"],
- "human_size": _format_bytes(stats["bytes"]),
+ "objects": stats["total_objects"],
+ "total_bytes": stats["total_bytes"],
+ "human_size": _format_bytes(stats["total_bytes"]),
},
"access_label": access_label,
"access_badge": access_badge,
@@ -336,9 +341,46 @@ def bucket_detail(bucket_name: str):
except IamError:
can_manage_versioning = False
+ # Check replication permission
+ can_manage_replication = False
+ if principal:
+ try:
+ _iam().authorize(principal, bucket_name, "replication")
+ can_manage_replication = True
+ except IamError:
+ can_manage_replication = False
+
+ # Check if user is admin (can configure replication settings, not just toggle)
+ is_replication_admin = False
+ if principal:
+ try:
+ _iam().authorize(principal, None, "iam:list_users")
+ is_replication_admin = True
+ except IamError:
+ is_replication_admin = False
+
# Replication info - don't compute sync status here (it's slow), let JS fetch it async
replication_rule = _replication().get_rule(bucket_name)
- connections = _connections().list()
+ # Load connections for admin, or for non-admin if there's an existing rule (to show target name)
+ connections = _connections().list() if (is_replication_admin or replication_rule) else []
+
+ # Encryption settings
+ encryption_config = storage.get_bucket_encryption(bucket_name)
+ kms_manager = _kms()
+ kms_keys = kms_manager.list_keys() if kms_manager else []
+ kms_enabled = current_app.config.get("KMS_ENABLED", False)
+ encryption_enabled = current_app.config.get("ENCRYPTION_ENABLED", False)
+ can_manage_encryption = can_manage_versioning # Same as other bucket properties
+
+ # Quota settings (admin only)
+ bucket_quota = storage.get_bucket_quota(bucket_name)
+ bucket_stats = storage.bucket_stats(bucket_name)
+ can_manage_quota = False
+ try:
+ _iam().authorize(principal, None, "iam:list_users")
+ can_manage_quota = True
+ except IamError:
+ pass
return render_template(
"bucket_detail.html",
@@ -349,10 +391,20 @@ def bucket_detail(bucket_name: str):
bucket_policy=bucket_policy,
can_edit_policy=can_edit_policy,
can_manage_versioning=can_manage_versioning,
+ can_manage_replication=can_manage_replication,
+ can_manage_encryption=can_manage_encryption,
+ is_replication_admin=is_replication_admin,
default_policy=default_policy,
versioning_enabled=versioning_enabled,
replication_rule=replication_rule,
connections=connections,
+ encryption_config=encryption_config,
+ kms_keys=kms_keys,
+ kms_enabled=kms_enabled,
+ encryption_enabled=encryption_enabled,
+ bucket_quota=bucket_quota,
+ bucket_stats=bucket_stats,
+ can_manage_quota=can_manage_quota,
)
@@ -647,9 +699,18 @@ def bulk_download_objects(bucket_name: str):
# But strictly we should check. Let's check.
_authorize_ui(principal, bucket_name, "read", object_key=key)
- path = storage.get_object_path(bucket_name, key)
- # Use the key as the filename in the zip
- zf.write(path, arcname=key)
+ # Check if object is encrypted
+ metadata = storage.get_object_metadata(bucket_name, key)
+ is_encrypted = "x-amz-server-side-encryption" in metadata
+
+ if is_encrypted and hasattr(storage, 'get_object_data'):
+ # Decrypt and add to zip
+ data, _ = storage.get_object_data(bucket_name, key)
+ zf.writestr(key, data)
+ else:
+ # Add unencrypted file directly
+ path = storage.get_object_path(bucket_name, key)
+ zf.write(path, arcname=key)
except (StorageError, IamError):
# Skip files we can't read or don't exist
continue
@@ -691,13 +752,34 @@ def purge_object_versions(bucket_name: str, object_key: str):
@ui_bp.get("/buckets//objects//preview")
def object_preview(bucket_name: str, object_key: str) -> Response:
principal = _current_principal()
+ storage = _storage()
try:
_authorize_ui(principal, bucket_name, "read", object_key=object_key)
- path = _storage().get_object_path(bucket_name, object_key)
+ path = storage.get_object_path(bucket_name, object_key)
+ metadata = storage.get_object_metadata(bucket_name, object_key)
except (StorageError, IamError) as exc:
status = 403 if isinstance(exc, IamError) else 404
return Response(str(exc), status=status)
+
download = request.args.get("download") == "1"
+
+ # Check if object is encrypted and needs decryption
+ is_encrypted = "x-amz-server-side-encryption" in metadata
+ if is_encrypted and hasattr(storage, 'get_object_data'):
+ try:
+ data, _ = storage.get_object_data(bucket_name, object_key)
+ import io
+ import mimetypes
+ mimetype = mimetypes.guess_type(object_key)[0] or "application/octet-stream"
+ return send_file(
+ io.BytesIO(data),
+ mimetype=mimetype,
+ as_attachment=download,
+ download_name=path.name
+ )
+ except StorageError as exc:
+ return Response(f"Decryption failed: {exc}", status=500)
+
return send_file(path, as_attachment=download, download_name=path.name)
@@ -712,12 +794,16 @@ def object_presign(bucket_name: str, object_key: str):
except IamError as exc:
return jsonify({"error": str(exc)}), 403
- connection_url = "http://127.0.0.1:5000"
- url = f"{connection_url}/presign/{bucket_name}/{object_key}"
+ api_base = current_app.config.get("API_BASE_URL") or "http://127.0.0.1:5000"
+ api_base = api_base.rstrip("/")
+ encoded_key = quote(object_key, safe="")
+ url = f"{api_base}/presign/{bucket_name}/{encoded_key}"
+ # Use API base URL for forwarded headers so presigned URLs point to API, not UI
+ parsed_api = urlparse(api_base)
headers = _api_headers()
- headers["X-Forwarded-Host"] = request.host
- headers["X-Forwarded-Proto"] = request.scheme
+ headers["X-Forwarded-Host"] = parsed_api.netloc or "127.0.0.1:5000"
+ headers["X-Forwarded-Proto"] = parsed_api.scheme or "http"
headers["X-Forwarded-For"] = request.remote_addr or "127.0.0.1"
try:
@@ -853,6 +939,127 @@ def update_bucket_versioning(bucket_name: str):
return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="properties"))
+@ui_bp.post("/buckets//quota")
+def update_bucket_quota(bucket_name: str):
+ """Update bucket quota configuration (admin only)."""
+ principal = _current_principal()
+
+ # Quota management is admin-only
+ is_admin = False
+ try:
+ _iam().authorize(principal, None, "iam:list_users")
+ is_admin = True
+ except IamError:
+ pass
+
+ if not is_admin:
+ flash("Only administrators can manage bucket quotas", "danger")
+ return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="properties"))
+
+ action = request.form.get("action", "set")
+
+ if action == "remove":
+ try:
+ _storage().set_bucket_quota(bucket_name, max_bytes=None, max_objects=None)
+ flash("Bucket quota removed", "info")
+ except StorageError as exc:
+ flash(_friendly_error_message(exc), "danger")
+ return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="properties"))
+
+ # Parse quota values
+ max_mb_str = request.form.get("max_mb", "").strip()
+ max_objects_str = request.form.get("max_objects", "").strip()
+
+ max_bytes = None
+ max_objects = None
+
+ if max_mb_str:
+ try:
+ max_mb = int(max_mb_str)
+ if max_mb < 1:
+ raise ValueError("Size must be at least 1 MB")
+ max_bytes = max_mb * 1024 * 1024 # Convert MB to bytes
+ except ValueError as exc:
+ flash(f"Invalid size value: {exc}", "danger")
+ return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="properties"))
+
+ if max_objects_str:
+ try:
+ max_objects = int(max_objects_str)
+ if max_objects < 0:
+ raise ValueError("Object count must be non-negative")
+ except ValueError as exc:
+ flash(f"Invalid object count: {exc}", "danger")
+ return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="properties"))
+
+ try:
+ _storage().set_bucket_quota(bucket_name, max_bytes=max_bytes, max_objects=max_objects)
+ if max_bytes is None and max_objects is None:
+ flash("Bucket quota removed", "info")
+ else:
+ flash("Bucket quota updated", "success")
+ except StorageError as exc:
+ flash(_friendly_error_message(exc), "danger")
+
+ return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="properties"))
+
+
+@ui_bp.post("/buckets//encryption")
+def update_bucket_encryption(bucket_name: str):
+ """Update bucket default encryption configuration."""
+ principal = _current_principal()
+ try:
+ _authorize_ui(principal, bucket_name, "write")
+ except IamError as exc:
+ flash(_friendly_error_message(exc), "danger")
+ return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="properties"))
+
+ action = request.form.get("action", "enable")
+
+ if action == "disable":
+ # Disable encryption
+ try:
+ _storage().set_bucket_encryption(bucket_name, None)
+ flash("Default encryption disabled", "info")
+ except StorageError as exc:
+ flash(_friendly_error_message(exc), "danger")
+ return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="properties"))
+
+ # Enable or update encryption
+ algorithm = request.form.get("algorithm", "AES256")
+ kms_key_id = request.form.get("kms_key_id", "").strip() or None
+
+ # Validate algorithm
+ if algorithm not in ("AES256", "aws:kms"):
+ flash("Invalid encryption algorithm", "danger")
+ return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="properties"))
+
+ # Build encryption config following AWS format
+ encryption_config: dict[str, Any] = {
+ "Rules": [
+ {
+ "ApplyServerSideEncryptionByDefault": {
+ "SSEAlgorithm": algorithm,
+ }
+ }
+ ]
+ }
+
+ if algorithm == "aws:kms" and kms_key_id:
+ encryption_config["Rules"][0]["ApplyServerSideEncryptionByDefault"]["KMSMasterKeyID"] = kms_key_id
+
+ try:
+ _storage().set_bucket_encryption(bucket_name, encryption_config)
+ if algorithm == "aws:kms":
+ flash("Default KMS encryption enabled", "success")
+ else:
+ flash("Default AES-256 encryption enabled", "success")
+ except StorageError as exc:
+ flash(_friendly_error_message(exc), "danger")
+
+ return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="properties"))
+
+
@ui_bp.get("/iam")
def iam_dashboard():
principal = _current_principal()
@@ -1168,17 +1375,52 @@ def delete_connection(connection_id: str):
def update_bucket_replication(bucket_name: str):
principal = _current_principal()
try:
- _authorize_ui(principal, bucket_name, "write")
+ _authorize_ui(principal, bucket_name, "replication")
except IamError as exc:
flash(str(exc), "danger")
return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="replication"))
+
+ # Check if user is admin (required for create/delete operations)
+ is_admin = False
+ try:
+ _iam().authorize(principal, None, "iam:list_users")
+ is_admin = True
+ except IamError:
+ is_admin = False
action = request.form.get("action")
if action == "delete":
+ # Admin only - remove configuration entirely
+ if not is_admin:
+ flash("Only administrators can remove replication configuration", "danger")
+ return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="replication"))
_replication().delete_rule(bucket_name)
- flash("Replication disabled", "info")
- else:
+ flash("Replication configuration removed", "info")
+ elif action == "pause":
+ # Users can pause - just set enabled=False
+ rule = _replication().get_rule(bucket_name)
+ if rule:
+ rule.enabled = False
+ _replication().set_rule(rule)
+ flash("Replication paused", "info")
+ else:
+ flash("No replication configuration to pause", "warning")
+ elif action == "resume":
+ # Users can resume - just set enabled=True
+ rule = _replication().get_rule(bucket_name)
+ if rule:
+ rule.enabled = True
+ _replication().set_rule(rule)
+ flash("Replication resumed", "success")
+ else:
+ flash("No replication configuration to resume", "warning")
+ elif action == "create":
+ # Admin only - create new configuration
+ if not is_admin:
+ flash("Only administrators can configure replication settings", "danger")
+ return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="replication"))
+
from .replication import REPLICATION_MODE_NEW_ONLY, REPLICATION_MODE_ALL
import time
@@ -1205,6 +1447,8 @@ def update_bucket_replication(bucket_name: str):
flash("Replication configured. Existing objects are being replicated in the background.", "success")
else:
flash("Replication configured. Only new uploads will be replicated.", "success")
+ else:
+ flash("Invalid action", "danger")
return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="replication"))
@@ -1214,7 +1458,7 @@ def get_replication_status(bucket_name: str):
"""Async endpoint to fetch replication sync status without blocking page load."""
principal = _current_principal()
try:
- _authorize_ui(principal, bucket_name, "read")
+ _authorize_ui(principal, bucket_name, "replication")
except IamError:
return jsonify({"error": "Access denied"}), 403
@@ -1254,6 +1498,13 @@ def connections_dashboard():
def metrics_dashboard():
principal = _current_principal()
+ # Metrics are restricted to admin users
+ try:
+ _iam().authorize(principal, None, "iam:list_users")
+ except IamError:
+ flash("Access denied: Metrics require admin permissions", "danger")
+ return redirect(url_for("ui.buckets_overview"))
+
cpu_percent = psutil.cpu_percent(interval=0.1)
memory = psutil.virtual_memory()
@@ -1266,13 +1517,16 @@ def metrics_dashboard():
total_objects = 0
total_bytes_used = 0
+ total_versions = 0
# Note: Uses cached stats from storage layer to improve performance
cache_ttl = current_app.config.get("BUCKET_STATS_CACHE_TTL", 60)
for bucket in buckets:
stats = storage.bucket_stats(bucket.name, cache_ttl=cache_ttl)
- total_objects += stats["objects"]
- total_bytes_used += stats["bytes"]
+ # Use totals which include archived versions
+ total_objects += stats.get("total_objects", stats.get("objects", 0))
+ total_bytes_used += stats.get("total_bytes", stats.get("bytes", 0))
+ total_versions += stats.get("version_count", 0)
return render_template(
"metrics.html",
@@ -1293,6 +1547,7 @@ def metrics_dashboard():
app={
"buckets": total_buckets,
"objects": total_objects,
+ "versions": total_versions,
"storage_used": _format_bytes(total_bytes_used),
"storage_raw": total_bytes_used,
}
diff --git a/app/version.py b/app/version.py
index 950456f..4a04f44 100644
--- a/app/version.py
+++ b/app/version.py
@@ -1,7 +1,7 @@
"""Central location for the application version string."""
from __future__ import annotations
-APP_VERSION = "0.1.2"
+APP_VERSION = "0.1.3"
def get_version() -> str:
diff --git a/docs.md b/docs.md
index 016ffbb..eef7ef5 100644
--- a/docs.md
+++ b/docs.md
@@ -80,6 +80,10 @@ The repo now tracks a human-friendly release string inside `app/version.py` (see
| `API_BASE_URL` | `None` | Used by the UI to hit API endpoints (presign/policy). If unset, the UI will auto-detect the host or use `X-Forwarded-*` headers. |
| `AWS_REGION` | `us-east-1` | Region embedded in SigV4 credential scope. |
| `AWS_SERVICE` | `s3` | Service string for SigV4. |
+| `ENCRYPTION_ENABLED` | `false` | Enable server-side encryption support. |
+| `KMS_ENABLED` | `false` | Enable KMS key management for encryption. |
+| `KMS_KEYS_PATH` | `data/kms_keys.json` | Path to store KMS key metadata. |
+| `ENCRYPTION_MASTER_KEY_PATH` | `data/master.key` | Path to the master encryption key file. |
Set env vars (or pass overrides to `create_app`) to point the servers at custom paths.
@@ -102,6 +106,46 @@ The application automatically trusts these headers to generate correct presigned
The API expects every request to include `X-Access-Key` and `X-Secret-Key` headers. The UI persists them in the Flask session after login.
+### Available IAM Actions
+
+| Action | Description | AWS Aliases |
+| --- | --- | --- |
+| `list` | List buckets and objects | `s3:ListBucket`, `s3:ListAllMyBuckets`, `s3:ListBucketVersions`, `s3:ListMultipartUploads`, `s3:ListParts` |
+| `read` | Download objects | `s3:GetObject`, `s3:GetObjectVersion`, `s3:GetObjectTagging`, `s3:HeadObject`, `s3:HeadBucket` |
+| `write` | Upload objects, create buckets | `s3:PutObject`, `s3:CreateBucket`, `s3:CreateMultipartUpload`, `s3:UploadPart`, `s3:CompleteMultipartUpload`, `s3:AbortMultipartUpload`, `s3:CopyObject` |
+| `delete` | Remove objects and buckets | `s3:DeleteObject`, `s3:DeleteObjectVersion`, `s3:DeleteBucket` |
+| `share` | Manage ACLs | `s3:PutObjectAcl`, `s3:PutBucketAcl`, `s3:GetBucketAcl` |
+| `policy` | Manage bucket policies | `s3:PutBucketPolicy`, `s3:GetBucketPolicy`, `s3:DeleteBucketPolicy` |
+| `replication` | Configure and manage replication | `s3:GetReplicationConfiguration`, `s3:PutReplicationConfiguration`, `s3:ReplicateObject`, `s3:ReplicateTags`, `s3:ReplicateDelete` |
+| `iam:list_users` | View IAM users | `iam:ListUsers` |
+| `iam:create_user` | Create IAM users | `iam:CreateUser` |
+| `iam:delete_user` | Delete IAM users | `iam:DeleteUser` |
+| `iam:rotate_key` | Rotate user secrets | `iam:RotateAccessKey` |
+| `iam:update_policy` | Modify user policies | `iam:PutUserPolicy` |
+| `iam:*` | All IAM actions (admin wildcard) | — |
+
+### Example Policies
+
+**Full Control (admin):**
+```json
+[{"bucket": "*", "actions": ["list", "read", "write", "delete", "share", "policy", "replication", "iam:*"]}]
+```
+
+**Read-Only:**
+```json
+[{"bucket": "*", "actions": ["list", "read"]}]
+```
+
+**Single Bucket Access (no listing other buckets):**
+```json
+[{"bucket": "user-bucket", "actions": ["read", "write", "delete"]}]
+```
+
+**Bucket Access with Replication:**
+```json
+[{"bucket": "my-bucket", "actions": ["list", "read", "write", "delete", "replication"]}]
+```
+
## 5. Bucket Policies & Presets
- **Storage**: Policies are persisted in `data/.myfsio.sys/config/bucket_policies.json` under `{"policies": {"bucket": {...}}}`.
@@ -173,9 +217,207 @@ s3.complete_multipart_upload(
)
```
-## 6. Site Replication
+## 7. Encryption
-MyFSIO supports **Site Replication**, allowing you to automatically copy new objects from one MyFSIO instance (Source) to another (Target). This is useful for disaster recovery, data locality, or backups.
+MyFSIO supports **server-side encryption at rest** to protect your data. When enabled, objects are encrypted using AES-256-GCM before being written to disk.
+
+### Encryption Types
+
+| Type | Description |
+|------|-------------|
+| **AES-256 (SSE-S3)** | Server-managed encryption using a local master key |
+| **KMS (SSE-KMS)** | Encryption using customer-managed keys via the built-in KMS |
+
+### Enabling Encryption
+
+#### 1. Set Environment Variables
+
+```powershell
+# PowerShell
+$env:ENCRYPTION_ENABLED = "true"
+$env:KMS_ENABLED = "true" # Optional, for KMS key management
+python run.py
+```
+
+```bash
+# Bash
+export ENCRYPTION_ENABLED=true
+export KMS_ENABLED=true
+python run.py
+```
+
+#### 2. Configure Bucket Default Encryption (UI)
+
+1. Navigate to your bucket in the UI
+2. Click the **Properties** tab
+3. Find the **Default Encryption** card
+4. Click **Enable Encryption**
+5. Choose algorithm:
+ - **AES-256**: Uses the server's master key
+ - **aws:kms**: Uses a KMS-managed key (select from dropdown)
+6. Save changes
+
+Once enabled, all **new objects** uploaded to the bucket will be automatically encrypted.
+
+### KMS Key Management
+
+When `KMS_ENABLED=true`, you can manage encryption keys via the KMS API:
+
+```bash
+# Create a new KMS key
+curl -X POST http://localhost:5000/kms/keys \
+ -H "Content-Type: application/json" \
+ -H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
+ -d '{"alias": "my-key", "description": "Production encryption key"}'
+
+# List all keys
+curl http://localhost:5000/kms/keys \
+ -H "X-Access-Key: ..." -H "X-Secret-Key: ..."
+
+# Get key details
+curl http://localhost:5000/kms/keys/{key-id} \
+ -H "X-Access-Key: ..." -H "X-Secret-Key: ..."
+
+# Rotate a key (creates new key material)
+curl -X POST http://localhost:5000/kms/keys/{key-id}/rotate \
+ -H "X-Access-Key: ..." -H "X-Secret-Key: ..."
+
+# Disable/Enable a key
+curl -X POST http://localhost:5000/kms/keys/{key-id}/disable \
+ -H "X-Access-Key: ..." -H "X-Secret-Key: ..."
+
+curl -X POST http://localhost:5000/kms/keys/{key-id}/enable \
+ -H "X-Access-Key: ..." -H "X-Secret-Key: ..."
+
+# Schedule key deletion (30-day waiting period)
+curl -X DELETE http://localhost:5000/kms/keys/{key-id}?waiting_period_days=30 \
+ -H "X-Access-Key: ..." -H "X-Secret-Key: ..."
+```
+
+### How It Works
+
+1. **Envelope Encryption**: Each object is encrypted with a unique Data Encryption Key (DEK)
+2. **Key Wrapping**: The DEK is encrypted (wrapped) by the master key or KMS key
+3. **Storage**: The encrypted DEK is stored alongside the encrypted object
+4. **Decryption**: On read, the DEK is unwrapped and used to decrypt the object
+
+### Client-Side Encryption
+
+For additional security, you can use client-side encryption. The `ClientEncryptionHelper` class provides utilities:
+
+```python
+from app.encryption import ClientEncryptionHelper
+
+# Generate a client-side key
+key = ClientEncryptionHelper.generate_key()
+key_b64 = ClientEncryptionHelper.key_to_base64(key)
+
+# Encrypt before upload
+plaintext = b"sensitive data"
+encrypted, metadata = ClientEncryptionHelper.encrypt_for_upload(plaintext, key)
+
+# Upload with metadata headers
+# x-amz-meta-x-amz-key:
+# x-amz-meta-x-amz-iv:
+# x-amz-meta-x-amz-matdesc:
+
+# Decrypt after download
+decrypted = ClientEncryptionHelper.decrypt_from_download(encrypted, metadata, key)
+```
+
+### Important Notes
+
+- **Existing objects are NOT encrypted** - Only new uploads after enabling encryption are encrypted
+- **Master key security** - The master key file (`master.key`) should be backed up securely and protected
+- **Key rotation** - Rotating a KMS key creates new key material; existing objects remain encrypted with the old material
+- **Disabled keys** - Objects encrypted with a disabled key cannot be decrypted until the key is re-enabled
+- **Deleted keys** - Once a key is deleted (after the waiting period), objects encrypted with it are permanently inaccessible
+
+### Verifying Encryption
+
+To verify an object is encrypted:
+1. Check the raw file in `data//` - it should be unreadable binary
+2. Look for `.meta` files containing encryption metadata
+3. Download via the API/UI - the object should be automatically decrypted
+
+## 8. Bucket Quotas
+
+MyFSIO supports **storage quotas** to limit how much data a bucket can hold. Quotas are enforced on uploads and multipart completions.
+
+### Quota Types
+
+| Limit | Description |
+|-------|-------------|
+| **Max Size (MB)** | Maximum total storage in megabytes (includes current objects + archived versions) |
+| **Max Objects** | Maximum number of objects (includes current objects + archived versions) |
+
+### Managing Quotas (Admin Only)
+
+Quota management is restricted to administrators (users with `iam:*` or `iam:list_users` permissions).
+
+#### Via UI
+
+1. Navigate to your bucket in the UI
+2. Click the **Properties** tab
+3. Find the **Storage Quota** card
+4. Enter limits:
+ - **Max Size (MB)**: Leave empty for unlimited
+ - **Max Objects**: Leave empty for unlimited
+5. Click **Update Quota**
+
+To remove a quota, click **Remove Quota**.
+
+#### Via API
+
+```bash
+# Set quota (max 100MB, max 1000 objects)
+curl -X PUT "http://localhost:5000/bucket/?quota" \
+ -H "Content-Type: application/json" \
+ -H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
+ -d '{"max_bytes": 104857600, "max_objects": 1000}'
+
+# Get current quota
+curl "http://localhost:5000/bucket/?quota" \
+ -H "X-Access-Key: ..." -H "X-Secret-Key: ..."
+
+# Remove quota
+curl -X PUT "http://localhost:5000/bucket/?quota" \
+ -H "Content-Type: application/json" \
+ -H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
+ -d '{"max_bytes": null, "max_objects": null}'
+```
+
+### Quota Behavior
+
+- **Version Counting**: When versioning is enabled, archived versions count toward the quota
+- **Enforcement Points**: Quotas are checked during `PUT` object and `CompleteMultipartUpload` operations
+- **Error Response**: When quota is exceeded, the API returns `HTTP 400` with error code `QuotaExceeded`
+- **Visibility**: All users can view quota usage in the bucket detail page, but only admins can modify quotas
+
+### Example Error
+
+```xml
+
+ QuotaExceeded
+ Bucket quota exceeded: storage limit reached
+ my-bucket
+
+```
+
+## 9. Site Replication
+
+### Permission Model
+
+Replication uses a two-tier permission system:
+
+| Role | Capabilities |
+|------|--------------|
+| **Admin** (users with `iam:*` permissions) | Create/delete replication rules, configure connections and target buckets |
+| **Users** (with `replication` permission) | Enable/disable (pause/resume) existing replication rules |
+
+> **Note:** The Replication tab is hidden for users without the `replication` permission on the bucket.
+
+This separation allows administrators to pre-configure where data should replicate, while allowing authorized users to toggle replication on/off without accessing connection credentials.
### Architecture
@@ -253,13 +495,15 @@ Now, configure the primary instance to replicate to the target.
- **Secret Key**: The secret you generated on the Target.
- Click **Add Connection**.
-3. **Enable Replication**:
+3. **Enable Replication** (Admin):
- Navigate to **Buckets** and select the source bucket.
- Switch to the **Replication** tab.
- Select the `Secondary Site` connection.
- Enter the target bucket name (`backup-bucket`).
- Click **Enable Replication**.
+ Once configured, users with `replication` permission on this bucket can pause/resume replication without needing access to connection details.
+
### Verification
1. Upload a file to the source bucket.
@@ -270,6 +514,18 @@ Now, configure the primary instance to replicate to the target.
aws --endpoint-url http://target-server:5002 s3 ls s3://backup-bucket
```
+### Pausing and Resuming Replication
+
+Users with the `replication` permission (but not admin rights) can pause and resume existing replication rules:
+
+1. Navigate to the bucket's **Replication** tab.
+2. If replication is **Active**, click **Pause Replication** to temporarily stop syncing.
+3. If replication is **Paused**, click **Resume Replication** to continue syncing.
+
+When paused, new objects uploaded to the source will not replicate until replication is resumed. Objects uploaded while paused will be replicated once resumed.
+
+> **Note:** Only admins can create new replication rules, change the target connection/bucket, or delete rules entirely.
+
### Bidirectional Replication (Active-Active)
To set up two-way replication (Server A ↔ Server B):
@@ -285,7 +541,7 @@ To set up two-way replication (Server A ↔ Server B):
**Note**: Deleting a bucket will automatically remove its associated replication configuration.
-## 7. Running Tests
+## 11. Running Tests
```bash
pytest -q
@@ -295,7 +551,7 @@ The suite now includes a boto3 integration test that spins up a live HTTP server
The suite covers bucket CRUD, presigned downloads, bucket policy enforcement, and regression tests for anonymous reads when a Public policy is attached.
-## 8. Troubleshooting
+## 12. Troubleshooting
| Symptom | Likely Cause | Fix |
| --- | --- | --- |
@@ -304,7 +560,7 @@ The suite covers bucket CRUD, presigned downloads, bucket policy enforcement, an
| Presign modal errors with 403 | IAM user lacks `read/write/delete` for target bucket or bucket policy denies | Update IAM inline policies or remove conflicting deny statements. |
| Large upload rejected immediately | File exceeds `MAX_UPLOAD_SIZE` | Increase env var or shrink object. |
-## 9. API Matrix
+## 13. API Matrix
```
GET / # List buckets
@@ -318,9 +574,11 @@ POST /presign// # Generate SigV4 URL
GET /bucket-policy/ # Fetch policy
PUT /bucket-policy/ # Upsert policy
DELETE /bucket-policy/ # Delete policy
+GET /?quota # Get bucket quota
+PUT /?quota # Set bucket quota (admin only)
```
-## 10. Next Steps
+## 14. Next Steps
- Tailor IAM + policy JSON files for team-ready presets.
- Wrap `run_api.py` with gunicorn or another WSGI server for long-running workloads.
diff --git a/requirements.txt b/requirements.txt
index 7c2c75d..544968d 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,9 +1,10 @@
-Flask>=3.0.2
-Flask-Limiter>=3.5.0
-Flask-Cors>=4.0.0
-Flask-WTF>=1.2.1
-pytest>=7.4
-requests>=2.31
-boto3>=1.34
-waitress>=2.1.2
-psutil>=5.9.0
+Flask>=3.1.2
+Flask-Limiter>=4.1.0
+Flask-Cors>=6.0.1
+Flask-WTF>=1.2.2
+pytest>=9.0.1
+requests>=2.32.5
+boto3>=1.42.1
+waitress>=3.0.2
+psutil>=7.1.3
+cryptography>=46.0.3
\ No newline at end of file
diff --git a/static/css/main.css b/static/css/main.css
index e603255..a75838a 100644
--- a/static/css/main.css
+++ b/static/css/main.css
@@ -396,12 +396,25 @@ code {
.preview-card { top: 1rem; }
.preview-stage {
- min-height: 260px;
background-color: var(--myfsio-preview-bg);
overflow: hidden;
border-color: var(--myfsio-card-border) !important;
}
+.preview-stage:has(#preview-placeholder:not(.d-none)) {
+ min-height: 0;
+}
+
+.preview-stage:has(#preview-image:not(.d-none)),
+.preview-stage:has(#preview-video:not(.d-none)),
+.preview-stage:has(#preview-iframe:not(.d-none)) {
+ min-height: 200px;
+}
+
+#preview-placeholder {
+ padding: 2rem 1rem;
+}
+
.upload-progress-stack {
display: flex;
flex-direction: column;
@@ -928,6 +941,19 @@ pre code {
background-color: var(--myfsio-hover-bg) !important;
}
+.folder-row {
+ background-color: var(--myfsio-section-bg);
+ transition: background-color 0.15s ease;
+}
+
+.folder-row:hover {
+ background-color: var(--myfsio-hover-bg) !important;
+}
+
+.folder-row td:first-child {
+ padding-left: 0.5rem;
+}
+
.btn-group-sm .btn {
padding: 0.25rem 0.6rem;
font-size: 0.875rem;
diff --git a/templates/base.html b/templates/base.html
index fa12e58..67f71b5 100644
--- a/templates/base.html
+++ b/templates/base.html
@@ -51,22 +51,18 @@
Buckets
+ {% if can_manage_iam %}
-
- IAM
- {% if not can_manage_iam %}Restricted {% endif %}
-
+ IAM
-
- Connections
- {% if not can_manage_iam %}Restricted {% endif %}
-
+ Connections
Metrics
{% endif %}
+ {% endif %}
{% if principal %}
Docs
diff --git a/templates/bucket_detail.html b/templates/bucket_detail.html
index 47f95f4..c56b059 100644
--- a/templates/bucket_detail.html
+++ b/templates/bucket_detail.html
@@ -59,11 +59,13 @@
Permissions
+ {% if can_manage_replication %}
Replication
+ {% endif %}
@@ -84,7 +86,7 @@
Upload
-
+
@@ -100,6 +102,19 @@
Download
+
+
+
+
+
+
+
+
+ Root
+
+
+
+
-
-
+
-
-
-
+
+
+
@@ -592,6 +607,281 @@
{% endif %}
+
+
+ {% if encryption_enabled %}
+
+
+
+ {% set enc_rules = encryption_config.get('Rules', []) %}
+ {% set enc_default = enc_rules[0].get('ApplyServerSideEncryptionByDefault', {}) if enc_rules else {} %}
+ {% set enc_algorithm = enc_default.get('SSEAlgorithm', '') %}
+ {% set enc_kms_key = enc_default.get('KMSMasterKeyID', '') %}
+
+ {% if enc_algorithm %}
+
+
+
+
+
+
+
Default encryption enabled
+
+ {% if enc_algorithm == 'aws:kms' %}
+ Objects are encrypted with AWS KMS (SSE-KMS).
+ {% if enc_kms_key %}Key: {{ enc_kms_key[:20] }}...{% endif %}
+ {% else %}
+ Objects are encrypted with AES-256 (SSE-S3).
+ {% endif %}
+
+
+
+ {% else %}
+
+
+
+
+
+
+
+
Default encryption disabled
+
Objects are stored without default encryption. You can enable server-side encryption below.
+
+
+ {% endif %}
+
+ {% if can_manage_encryption %}
+
+ {% else %}
+
+
+
+
+
You do not have permission to modify encryption settings for this bucket.
+
+ {% endif %}
+
+
+ {% endif %}
+
+
+
+
+
+ {% set max_bytes = bucket_quota.get('max_bytes') %}
+ {% set max_objects = bucket_quota.get('max_objects') %}
+ {% set has_quota = max_bytes is not none or max_objects is not none %}
+ {% set current_objects = bucket_stats.get('objects', 0) %}
+ {% set version_count = bucket_stats.get('version_count', 0) %}
+ {% set total_objects = bucket_stats.get('total_objects', current_objects) %}
+ {% set current_bytes = bucket_stats.get('bytes', 0) %}
+ {% set version_bytes = bucket_stats.get('version_bytes', 0) %}
+ {% set total_bytes = bucket_stats.get('total_bytes', current_bytes) %}
+
+
+
+
Current Usage
+
+
+
+
{{ total_objects }}
+
Total Objects
+ {% if max_objects is not none %}
+
+ {% set obj_pct = (total_objects / max_objects * 100) | int if max_objects > 0 else 0 %}
+
+
+
{{ obj_pct }}% of {{ max_objects }} limit
+ {% else %}
+
No limit
+ {% endif %}
+ {% if version_count > 0 %}
+
+ ({{ current_objects }} current + {{ version_count }} versions)
+
+ {% endif %}
+
+
+
+
+
{{ total_bytes | filesizeformat }}
+
Total Storage
+ {% if max_bytes is not none %}
+
+ {% set bytes_pct = (total_bytes / max_bytes * 100) | int if max_bytes > 0 else 0 %}
+
+
+
{{ bytes_pct }}% of {{ max_bytes | filesizeformat }} limit
+ {% else %}
+
No limit
+ {% endif %}
+ {% if version_bytes > 0 %}
+
+ ({{ current_bytes | filesizeformat }} current + {{ version_bytes | filesizeformat }} versions)
+
+ {% endif %}
+
+
+
+
+
+ {% if has_quota %}
+
+
+
+
+
+
+
Storage quota enabled
+
+ {% if max_bytes is not none and max_objects is not none %}
+ Limited to {{ max_bytes | filesizeformat }} and {{ max_objects }} objects.
+ {% elif max_bytes is not none %}
+ Limited to {{ max_bytes | filesizeformat }} storage.
+ {% else %}
+ Limited to {{ max_objects }} objects.
+ {% endif %}
+
+
+
+ {% else %}
+
+
+
+
+
+
+
+
No storage quota
+
This bucket has no storage or object count limits. Set limits below to control usage.
+
+
+ {% endif %}
+
+ {% if can_manage_quota %}
+
+ {% else %}
+
+
+
+
+
You do not have permission to modify quota settings for this bucket.
+
+ {% endif %}
+
+
@@ -641,10 +931,40 @@
{% endif %}
+
+ {% if encryption_enabled %}
+
+
+
+
+
+
+ About Encryption
+
+
+ Server-side encryption protects data at rest. Objects are encrypted when stored and decrypted when retrieved.
+
+
+
Encryption Types
+
+ SSE-S3 (AES-256) — S3-managed keys, automatic encryption
+ SSE-KMS — KMS-managed keys with audit trail and key rotation
+
+
+
How It Works
+
+ New objects are encrypted using the default setting
+ Existing objects are not automatically re-encrypted
+ Decryption is transparent during download
+
+
+
+ {% endif %}
+ {% if can_manage_replication %}
@@ -658,8 +978,8 @@
Replication Configuration
- {% if replication_rule %}
-
+ {% if replication_rule and replication_rule.enabled %}
+
@@ -817,17 +1137,103 @@
Refresh
+
+
+ {% if is_replication_admin %}
+
- Disable Replication
+ Remove Configuration
+ {% endif %}
+
+
+ {% elif replication_rule and not replication_rule.enabled %}
+
+
+
+
+
+
+ Replication Paused —
+ Replication is configured but currently paused. New uploads will not be replicated until resumed.
+
+
+
+
+
Replication Target
+
+
+
+
+
+ {% set target_conn = connections | selectattr("id", "equalto", replication_rule.target_connection_id) | first %}
+
{{ target_conn.name if target_conn else 'Unknown Connection' }}
+
+
+
+
+
+ {{ replication_rule.target_bucket }}
+
+
+
+
+
+
+
+
+
+
+ {% if is_replication_admin %}
+
+
+
+
+
+
+ Remove Configuration
+
+ {% endif %}
{% else %}
-
+
+ {% if is_replication_admin %}
Set Up Replication
Automatically copy new objects from this bucket to a bucket in another S3-compatible service.
+ {% else %}
+
Replication Not Configured
+
An administrator needs to configure replication settings for this bucket before you can enable it.
+ {% endif %}
- {% if connections %}
+ {% if is_replication_admin and connections %}
- {% else %}
+ {% elif is_replication_admin %}
+ {% endif %}
@@ -1022,18 +1434,18 @@
>
-
Upload files to {{ bucket_name }}. Leave object key blank to use the filename.
+
Upload files to {{ bucket_name }}. You can select multiple files at once.
-
Select file
-
-
Select a file from your device. Files ≥ 8 MB automatically switch to multipart uploads.
+
Select files
+
+
Select one or more files from your device. Files ≥ 8 MB automatically switch to multipart uploads.
Drag & drop files here
-
or click to browse
-
No file selected
+
or click to browse (multiple files supported)
+
No files selected
@@ -1051,12 +1463,17 @@
-
Object key
-
-
Leave blank to reuse the original filename.
+
+
Object key
+
+
Leave blank to reuse the original filename. (Only applies when uploading a single file)
+
+
Key prefix (optional)
+
+
Add a prefix to all uploaded files (e.g., folder/subfolder/).
Metadata (JSON)
-
Store custom key/value pairs alongside the object.
+
Store custom key/value pairs alongside each object.
@@ -1064,16 +1481,44 @@
+
+
+
+ Uploading files...
+ 0/0
+
+
+
+
+
+
+
+
+
+
+
0 file(s) uploaded successfully
+
+
+
+
+
+
+
0 file(s) failed to upload
+
+
+
@@ -1443,6 +1888,277 @@
if (generatePresignButton) generatePresignButton.disabled = true;
if (downloadButton) downloadButton.classList.add('disabled');
+ // ========== Folder Navigation ==========
+ const folderBreadcrumb = document.getElementById('folder-breadcrumb');
+ const objectsTableBody = document.querySelector('#objects-table tbody');
+ let currentPrefix = '';
+ let allObjects = []; // Store all object data for folder navigation
+
+ // Collect all object data from the table rows
+ rows.forEach(row => {
+ allObjects.push({
+ key: row.dataset.key,
+ size: row.dataset.size,
+ lastModified: row.dataset.lastModified,
+ etag: row.dataset.etag,
+ previewUrl: row.dataset.previewUrl,
+ downloadUrl: row.dataset.downloadUrl,
+ presignEndpoint: row.dataset.presignEndpoint,
+ deleteEndpoint: row.dataset.deleteEndpoint,
+ metadata: row.dataset.metadata,
+ versionsEndpoint: row.dataset.versionsEndpoint,
+ restoreTemplate: row.dataset.restoreTemplate,
+ element: row
+ });
+ });
+
+ // Check if we have any prefixed objects (folders)
+ const hasFolders = allObjects.some(obj => obj.key.includes('/'));
+
+ // Get unique folder prefixes at a given level
+ const getFoldersAtPrefix = (prefix) => {
+ const folders = new Set();
+ const files = [];
+
+ allObjects.forEach(obj => {
+ const key = obj.key;
+ if (!key.startsWith(prefix)) return;
+
+ const remainder = key.slice(prefix.length);
+ const slashIndex = remainder.indexOf('/');
+
+ if (slashIndex === -1) {
+ // This is a file at this level
+ files.push(obj);
+ } else {
+ // This is a folder
+ const folderName = remainder.slice(0, slashIndex + 1);
+ folders.add(prefix + folderName);
+ }
+ });
+
+ return { folders: Array.from(folders).sort(), files };
+ };
+
+ // Count objects in a folder
+ const countObjectsInFolder = (folderPrefix) => {
+ return allObjects.filter(obj => obj.key.startsWith(folderPrefix)).length;
+ };
+
+ // Render breadcrumb
+ const renderBreadcrumb = (prefix) => {
+ if (!folderBreadcrumb) return;
+
+ if (!prefix && !hasFolders) {
+ folderBreadcrumb.classList.add('d-none');
+ return;
+ }
+
+ folderBreadcrumb.classList.remove('d-none');
+ const ol = folderBreadcrumb.querySelector('ol');
+ ol.innerHTML = '';
+
+ // Root item
+ const rootLi = document.createElement('li');
+ rootLi.className = 'breadcrumb-item';
+ if (!prefix) {
+ rootLi.classList.add('active');
+ rootLi.setAttribute('aria-current', 'page');
+ rootLi.innerHTML = `
+
+
+
+ Root
+ `;
+ } else {
+ rootLi.innerHTML = `
+
+
+
+
+ Root
+
+ `;
+ }
+ ol.appendChild(rootLi);
+
+ // Build path segments
+ if (prefix) {
+ const parts = prefix.split('/').filter(Boolean);
+ let accumulated = '';
+ parts.forEach((part, index) => {
+ accumulated += part + '/';
+ const li = document.createElement('li');
+ li.className = 'breadcrumb-item';
+
+ if (index === parts.length - 1) {
+ li.classList.add('active');
+ li.setAttribute('aria-current', 'page');
+ li.textContent = part;
+ } else {
+ const a = document.createElement('a');
+ a.href = '#';
+ a.className = 'text-decoration-none';
+ a.dataset.folderNav = accumulated;
+ a.textContent = part;
+ li.appendChild(a);
+ }
+ ol.appendChild(li);
+ });
+ }
+
+ // Add click handlers
+ ol.querySelectorAll('[data-folder-nav]').forEach(link => {
+ link.addEventListener('click', (e) => {
+ e.preventDefault();
+ navigateToFolder(link.dataset.folderNav);
+ });
+ });
+ };
+
+ // Get all objects inside a folder (for bulk selection)
+ const getObjectsInFolder = (folderPrefix) => {
+ return allObjects.filter(obj => obj.key.startsWith(folderPrefix));
+ };
+
+ // Create folder row element
+ const createFolderRow = (folderPath) => {
+ const folderName = folderPath.slice(currentPrefix.length).replace(/\/$/, '');
+ const objectCount = countObjectsInFolder(folderPath);
+
+ const tr = document.createElement('tr');
+ tr.className = 'folder-row';
+ tr.dataset.folderPath = folderPath;
+ tr.style.cursor = 'pointer';
+
+ tr.innerHTML = `
+
+
+
+
+
+
+
+
+
${escapeHtml(folderName)}/
+
+ ${objectCount} object${objectCount !== 1 ? 's' : ''}
+
+
+ —
+
+
+
+
+
+
+
+
+ `;
+
+ // Handle folder checkbox
+ const checkbox = tr.querySelector('[data-folder-select]');
+ checkbox?.addEventListener('change', (e) => {
+ e.stopPropagation();
+ const folderObjects = getObjectsInFolder(folderPath);
+ folderObjects.forEach(obj => {
+ const objCheckbox = obj.element.querySelector('[data-object-select]');
+ if (objCheckbox) {
+ objCheckbox.checked = checkbox.checked;
+ }
+ toggleRowSelection(obj.element, checkbox.checked);
+ });
+ });
+
+ tr.addEventListener('click', (e) => {
+ if (e.target.closest('[data-folder-select]') || e.target.closest('button')) return;
+ navigateToFolder(folderPath);
+ });
+
+ return tr;
+ };
+
+ // Navigate to a folder
+ const navigateToFolder = (prefix) => {
+ currentPrefix = prefix;
+ renderBreadcrumb(prefix);
+ renderObjectsView();
+ // Clear selection when navigating
+ selectedRows.clear();
+ // Defer updateBulkDeleteState call to ensure it's defined
+ if (typeof updateBulkDeleteState === 'function') {
+ updateBulkDeleteState();
+ }
+ // Clear preview
+ if (previewPanel) previewPanel.classList.add('d-none');
+ if (previewEmpty) previewEmpty.classList.remove('d-none');
+ activeRow = null;
+ };
+
+ // Render objects view based on current prefix
+ const renderObjectsView = () => {
+ if (!objectsTableBody) return;
+
+ const { folders, files } = getFoldersAtPrefix(currentPrefix);
+
+ // Clear table
+ objectsTableBody.innerHTML = '';
+
+ // Add folder rows first
+ folders.forEach(folderPath => {
+ objectsTableBody.appendChild(createFolderRow(folderPath));
+ });
+
+ // Add file rows
+ files.forEach(obj => {
+ objectsTableBody.appendChild(obj.element);
+ obj.element.style.display = '';
+ // Update displayed key to show just filename when inside a folder
+ const keyCell = obj.element.querySelector('.object-key .fw-medium');
+ if (keyCell && currentPrefix) {
+ const displayName = obj.key.slice(currentPrefix.length);
+ keyCell.textContent = displayName;
+ keyCell.closest('.object-key').title = obj.key; // Full path in tooltip
+ } else if (keyCell) {
+ keyCell.textContent = obj.key; // Reset to full key at root
+ }
+ });
+
+ // Hide files not in current view
+ allObjects.forEach(obj => {
+ if (!files.includes(obj)) {
+ obj.element.style.display = 'none';
+ }
+ });
+
+ // Show empty state if no content
+ if (folders.length === 0 && files.length === 0) {
+ const emptyRow = document.createElement('tr');
+ emptyRow.innerHTML = `
+
+
+
+
Empty folder
+
This folder contains no objects.
+
+
+ `;
+ objectsTableBody.appendChild(emptyRow);
+ }
+
+ // Update select all checkbox state - deferred to ensure function is defined
+ if (typeof updateBulkDeleteState === 'function') {
+ updateBulkDeleteState();
+ }
+ };
+
+ // Folder view initialization moved after updateBulkDeleteState is defined
+ // ========== End Folder Navigation ==========
+
const showMessage = ({ title = 'Notice', body = '', bodyHtml = null, variant = 'info', actionText = null, onAction = null }) => {
if (!messageModal) {
window.alert(body || title);
@@ -1589,10 +2305,15 @@
bulkDeleteConfirm.disabled = selectedCount === 0 || bulkDeleting;
}
if (selectAllCheckbox) {
- const total = rows.length;
+ // Count only visible rows in current folder view
+ const visibleRows = hasFolders
+ ? allObjects.filter(obj => obj.key.startsWith(currentPrefix) && !obj.key.slice(currentPrefix.length).includes('/')).map(obj => obj.element)
+ : Array.from(rows);
+ const total = visibleRows.filter(r => r.style.display !== 'none').length;
+ const visibleSelectedCount = visibleRows.filter(r => r.style.display !== 'none' && selectedRows.has(r.dataset.key)).length;
selectAllCheckbox.disabled = total === 0;
- selectAllCheckbox.checked = selectedCount > 0 && selectedCount === total && total > 0;
- selectAllCheckbox.indeterminate = selectedCount > 0 && selectedCount < total;
+ selectAllCheckbox.checked = visibleSelectedCount > 0 && visibleSelectedCount === total && total > 0;
+ selectAllCheckbox.indeterminate = visibleSelectedCount > 0 && visibleSelectedCount < total;
}
};
@@ -2206,15 +2927,49 @@
updateBulkDeleteState();
+ // Initialize folder view if there are folders (must be after updateBulkDeleteState is defined)
+ if (hasFolders) {
+ renderBreadcrumb('');
+ renderObjectsView();
+ }
+
bulkDeleteButton?.addEventListener('click', () => openBulkDeleteModal());
bulkDeleteConfirm?.addEventListener('click', () => performBulkDelete());
document.getElementById('object-search')?.addEventListener('input', (event) => {
const term = event.target.value.toLowerCase();
- rows.forEach((row) => {
- const key = row.dataset.key.toLowerCase();
- row.style.display = key.includes(term) ? '' : 'none';
- });
+
+ if (hasFolders) {
+ // With folder navigation: re-render view with search filter
+ const { folders, files } = getFoldersAtPrefix(currentPrefix);
+ const tbody = objectsTableBody;
+
+ // Clear and re-add matching content
+ tbody.innerHTML = '';
+
+ // Filter and add matching folders
+ folders.forEach(folderPath => {
+ const folderName = folderPath.slice(currentPrefix.length).replace(/\/$/, '').toLowerCase();
+ if (folderName.includes(term)) {
+ tbody.appendChild(createFolderRow(folderPath));
+ }
+ });
+
+ // Filter and add matching files
+ files.forEach(obj => {
+ const keyName = obj.key.slice(currentPrefix.length).toLowerCase();
+ if (keyName.includes(term)) {
+ tbody.appendChild(obj.element);
+ obj.element.style.display = '';
+ }
+ });
+ } else {
+ // Original behavior without folders
+ rows.forEach((row) => {
+ const key = row.dataset.key.toLowerCase();
+ row.style.display = key.includes(term) ? '' : 'none';
+ });
+ }
});
refreshVersionsButton?.addEventListener('click', () => {
@@ -2247,36 +3002,284 @@
if (!presignLink?.value) {
return;
}
- try {
- await navigator.clipboard.writeText(presignLink.value);
+
+ // Helper function for fallback copy
+ const fallbackCopy = (text) => {
+ const textArea = document.createElement('textarea');
+ textArea.value = text;
+ textArea.style.position = 'fixed';
+ textArea.style.left = '-999999px';
+ textArea.style.top = '-999999px';
+ document.body.appendChild(textArea);
+ textArea.focus();
+ textArea.select();
+ let success = false;
+ try {
+ success = document.execCommand('copy');
+ } catch (err) {
+ success = false;
+ }
+ textArea.remove();
+ return success;
+ };
+
+ let copied = false;
+
+ // Try modern clipboard API first
+ if (navigator.clipboard && window.isSecureContext) {
+ try {
+ await navigator.clipboard.writeText(presignLink.value);
+ copied = true;
+ } catch (error) {
+ // Fall through to fallback
+ }
+ }
+
+ // Fallback for non-secure contexts
+ if (!copied) {
+ copied = fallbackCopy(presignLink.value);
+ }
+
+ if (copied) {
copyPresignLink.textContent = 'Copied!';
window.setTimeout(() => {
copyPresignLink.textContent = copyPresignDefaultLabel;
}, 1500);
- } catch (error) {
- if (window.showToast) {
- window.showToast('Unable to copy link to clipboard.', 'Copy failed', 'warning');
- } else {
- alert('Unable to copy link to clipboard.');
- }
+ } else {
+ showMessage({ title: 'Copy Failed', body: 'Unable to copy link to clipboard. Please select the link and copy manually.', variant: 'warning' });
}
});
if (uploadForm && uploadFileInput) {
+ const uploadSubmitBtn = document.getElementById('uploadSubmitBtn');
+ const uploadCancelBtn = document.getElementById('uploadCancelBtn');
+ const uploadBtnText = document.getElementById('uploadBtnText');
+ const bulkUploadProgress = document.getElementById('bulkUploadProgress');
+ const bulkUploadStatus = document.getElementById('bulkUploadStatus');
+ const bulkUploadCounter = document.getElementById('bulkUploadCounter');
+ const bulkUploadProgressBar = document.getElementById('bulkUploadProgressBar');
+ const bulkUploadCurrentFile = document.getElementById('bulkUploadCurrentFile');
+ const bulkUploadResults = document.getElementById('bulkUploadResults');
+ const bulkUploadSuccessAlert = document.getElementById('bulkUploadSuccessAlert');
+ const bulkUploadErrorAlert = document.getElementById('bulkUploadErrorAlert');
+ const bulkUploadSuccessCount = document.getElementById('bulkUploadSuccessCount');
+ const bulkUploadErrorCount = document.getElementById('bulkUploadErrorCount');
+ const bulkUploadErrorList = document.getElementById('bulkUploadErrorList');
+ const uploadKeyPrefix = document.getElementById('uploadKeyPrefix');
+ const singleFileOptions = document.getElementById('singleFileOptions');
+ let isUploading = false;
+
const refreshUploadDropLabel = () => {
if (!uploadDropZoneLabel) return;
const files = uploadFileInput.files;
if (!files || files.length === 0) {
uploadDropZoneLabel.textContent = 'No file selected';
+ if (singleFileOptions) singleFileOptions.classList.remove('d-none');
return;
}
uploadDropZoneLabel.textContent = files.length === 1 ? files[0].name : `${files.length} files selected`;
+ // Hide single file object key option when multiple files selected
+ if (singleFileOptions) {
+ singleFileOptions.classList.toggle('d-none', files.length > 1);
+ }
+ };
+
+ const updateUploadBtnText = () => {
+ if (!uploadBtnText) return;
+ const files = uploadFileInput.files;
+ if (!files || files.length <= 1) {
+ uploadBtnText.textContent = 'Upload';
+ } else {
+ uploadBtnText.textContent = `Upload ${files.length} files`;
+ }
+ };
+
+ const resetUploadUI = () => {
+ if (bulkUploadProgress) bulkUploadProgress.classList.add('d-none');
+ if (bulkUploadResults) bulkUploadResults.classList.add('d-none');
+ if (bulkUploadSuccessAlert) bulkUploadSuccessAlert.classList.remove('d-none');
+ if (bulkUploadErrorAlert) bulkUploadErrorAlert.classList.add('d-none');
+ if (bulkUploadErrorList) bulkUploadErrorList.innerHTML = '';
+ if (uploadSubmitBtn) uploadSubmitBtn.disabled = false;
+ if (uploadFileInput) uploadFileInput.disabled = false;
+ isUploading = false;
+ };
+
+ const uploadSingleFile = async (file, keyPrefix = '', metadata = null) => {
+ const formData = new FormData();
+ formData.append('object', file);
+ const objectKey = keyPrefix ? `${keyPrefix}${file.name}` : file.name;
+ formData.append('object_key', objectKey);
+ if (metadata) {
+ formData.append('metadata', JSON.stringify(metadata));
+ }
+ // Get CSRF token
+ const csrfToken = document.querySelector('input[name="csrf_token"]')?.value;
+ if (csrfToken) {
+ formData.append('csrf_token', csrfToken);
+ }
+
+ const response = await fetch(uploadForm.action, {
+ method: 'POST',
+ body: formData,
+ headers: {
+ 'X-Requested-With': 'XMLHttpRequest'
+ }
+ });
+
+ const data = await response.json().catch(() => ({}));
+ if (!response.ok || data.status === 'error') {
+ throw new Error(data.message || 'Upload failed');
+ }
+ return data;
+ };
+
+ const performBulkUpload = async (files) => {
+ if (isUploading || !files || files.length === 0) return;
+
+ isUploading = true;
+ const keyPrefix = (uploadKeyPrefix?.value || '').trim();
+ const metadataRaw = uploadForm.querySelector('textarea[name="metadata"]')?.value?.trim();
+ let metadata = null;
+ if (metadataRaw) {
+ try {
+ metadata = JSON.parse(metadataRaw);
+ } catch {
+ showMessage({ title: 'Invalid metadata', body: 'Metadata must be valid JSON.', variant: 'danger' });
+ resetUploadUI();
+ return;
+ }
+ }
+
+ // Show progress UI
+ if (bulkUploadProgress) bulkUploadProgress.classList.remove('d-none');
+ if (bulkUploadResults) bulkUploadResults.classList.add('d-none');
+ if (uploadSubmitBtn) uploadSubmitBtn.disabled = true;
+ if (uploadFileInput) uploadFileInput.disabled = true;
+
+ const successFiles = [];
+ const errorFiles = [];
+ const total = files.length;
+
+ for (let i = 0; i < total; i++) {
+ const file = files[i];
+ const current = i + 1;
+
+ // Update progress
+ if (bulkUploadCounter) bulkUploadCounter.textContent = `${current}/${total}`;
+ if (bulkUploadCurrentFile) bulkUploadCurrentFile.textContent = `Uploading: ${file.name}`;
+ if (bulkUploadProgressBar) {
+ const percent = Math.round((current / total) * 100);
+ bulkUploadProgressBar.style.width = `${percent}%`;
+ }
+
+ try {
+ await uploadSingleFile(file, keyPrefix, metadata);
+ successFiles.push(file.name);
+ } catch (error) {
+ errorFiles.push({ name: file.name, error: error.message || 'Unknown error' });
+ }
+ }
+
+ // Show results
+ if (bulkUploadProgress) bulkUploadProgress.classList.add('d-none');
+ if (bulkUploadResults) bulkUploadResults.classList.remove('d-none');
+
+ if (bulkUploadSuccessCount) bulkUploadSuccessCount.textContent = successFiles.length;
+ if (successFiles.length === 0 && bulkUploadSuccessAlert) {
+ bulkUploadSuccessAlert.classList.add('d-none');
+ }
+
+ if (errorFiles.length > 0) {
+ if (bulkUploadErrorCount) bulkUploadErrorCount.textContent = errorFiles.length;
+ if (bulkUploadErrorAlert) bulkUploadErrorAlert.classList.remove('d-none');
+ if (bulkUploadErrorList) {
+ bulkUploadErrorList.innerHTML = errorFiles
+ .map(f => `${escapeHtml(f.name)} : ${escapeHtml(f.error)} `)
+ .join('');
+ }
+ }
+
+ isUploading = false;
+
+ // Reload page if any files were uploaded successfully
+ if (successFiles.length > 0) {
+ // Keep button disabled and show uploading state until reload
+ if (uploadBtnText) uploadBtnText.textContent = 'Refreshing...';
+ // Short delay to show results, then reload
+ window.setTimeout(() => window.location.reload(), 800);
+ } else {
+ // Only re-enable if no success (all failed)
+ if (uploadSubmitBtn) uploadSubmitBtn.disabled = false;
+ if (uploadFileInput) uploadFileInput.disabled = false;
+ }
};
refreshUploadDropLabel();
- uploadFileInput.addEventListener('change', refreshUploadDropLabel);
+ uploadFileInput.addEventListener('change', () => {
+ refreshUploadDropLabel();
+ updateUploadBtnText();
+ resetUploadUI();
+ });
uploadDropZone?.addEventListener('click', () => uploadFileInput?.click());
+ // Handle form submission for bulk upload
+ uploadForm.addEventListener('submit', async (event) => {
+ const files = uploadFileInput.files;
+ if (!files || files.length === 0) return;
+
+ const keyPrefix = (uploadKeyPrefix?.value || '').trim();
+
+ // For single file with custom object key and NO prefix, use default form submission
+ if (files.length === 1 && !keyPrefix) {
+ const customKey = uploadForm.querySelector('input[name="object_key"]')?.value?.trim();
+ if (customKey) {
+ // Single file with custom key - let form submit normally
+ // Disable button immediately for feedback
+ if (uploadSubmitBtn) {
+ uploadSubmitBtn.disabled = true;
+ if (uploadBtnText) uploadBtnText.textContent = 'Uploading...';
+ }
+ return;
+ }
+ }
+
+ // Bulk upload or prefix specified - handle with JavaScript
+ event.preventDefault();
+
+ // Disable button immediately
+ if (uploadSubmitBtn) {
+ uploadSubmitBtn.disabled = true;
+ if (uploadBtnText) uploadBtnText.textContent = 'Uploading...';
+ }
+
+ await performBulkUpload(Array.from(files));
+ });
+
+ // Pre-fill key prefix with current folder when modal opens
+ uploadModalEl?.addEventListener('show.bs.modal', () => {
+ if (hasFolders && currentPrefix) {
+ uploadKeyPrefix.value = currentPrefix;
+ // Auto-expand advanced options if there's a prefix
+ const advancedToggle = document.querySelector('[data-bs-target="#advancedUploadOptions"]');
+ const advancedCollapse = document.getElementById('advancedUploadOptions');
+ if (advancedToggle && advancedCollapse && !advancedCollapse.classList.contains('show')) {
+ new bootstrap.Collapse(advancedCollapse, { show: true });
+ }
+ } else if (uploadKeyPrefix) {
+ // Clear prefix when at root
+ uploadKeyPrefix.value = '';
+ }
+ });
+
+ // Reset UI when modal is closed
+ uploadModalEl?.addEventListener('hidden.bs.modal', () => {
+ resetUploadUI();
+ uploadFileInput.value = '';
+ refreshUploadDropLabel();
+ updateUploadBtnText();
+ });
+
const preventDefaults = (event) => {
event.preventDefault();
event.stopPropagation();
@@ -2344,14 +3347,34 @@
selectAllCheckbox?.addEventListener('change', (event) => {
const shouldSelect = Boolean(event.target?.checked);
- rows.forEach((row) => {
- const checkbox = row.querySelector('[data-object-select]');
- if (!checkbox || checkbox.disabled) {
- return;
- }
- checkbox.checked = shouldSelect;
- toggleRowSelection(row, shouldSelect);
- });
+
+ if (hasFolders) {
+ // Select all objects that start with current prefix (including inside subfolders)
+ const objectsInCurrentView = allObjects.filter(obj => obj.key.startsWith(currentPrefix));
+ objectsInCurrentView.forEach(obj => {
+ const checkbox = obj.element.querySelector('[data-object-select]');
+ if (checkbox && !checkbox.disabled) {
+ checkbox.checked = shouldSelect;
+ }
+ toggleRowSelection(obj.element, shouldSelect);
+ });
+
+ // Also toggle folder checkboxes
+ document.querySelectorAll('[data-folder-select]').forEach(cb => {
+ cb.checked = shouldSelect;
+ });
+ } else {
+ // Original behavior without folders
+ rows.forEach((row) => {
+ if (row.style.display === 'none') return;
+ const checkbox = row.querySelector('[data-object-select]');
+ if (!checkbox || checkbox.disabled) {
+ return;
+ }
+ checkbox.checked = shouldSelect;
+ toggleRowSelection(row, shouldSelect);
+ });
+ }
setTimeout(updateBulkDownloadState, 0);
});
@@ -2391,11 +3414,7 @@
window.URL.revokeObjectURL(url);
a.remove();
} catch (error) {
- if (window.showToast) {
- window.showToast(error.message, 'Download Failed', 'error');
- } else {
- alert(error.message);
- }
+ showMessage({ title: 'Download Failed', body: error.message, variant: 'danger' });
} finally {
bulkDownloadButton.disabled = false;
bulkDownloadButton.innerHTML = originalHtml;
@@ -2461,5 +3480,33 @@
loadReplicationStats();
});
}
+
+ // Encryption settings
+ const algoAes256Radio = document.getElementById('algo_aes256');
+ const algoKmsRadio = document.getElementById('algo_kms');
+ const kmsKeySection = document.getElementById('kmsKeySection');
+ const encryptionForm = document.getElementById('encryptionForm');
+ const encryptionAction = document.getElementById('encryptionAction');
+ const disableEncryptionBtn = document.getElementById('disableEncryptionBtn');
+
+ // Toggle KMS key section visibility based on selected algorithm
+ const updateKmsKeyVisibility = () => {
+ if (!kmsKeySection) return;
+ const showKms = algoKmsRadio?.checked;
+ kmsKeySection.style.display = showKms ? '' : 'none';
+ };
+
+ algoAes256Radio?.addEventListener('change', updateKmsKeyVisibility);
+ algoKmsRadio?.addEventListener('change', updateKmsKeyVisibility);
+
+ // Handle disable encryption button
+ disableEncryptionBtn?.addEventListener('click', () => {
+ if (encryptionAction && encryptionForm) {
+ if (confirm('Are you sure you want to disable default encryption? New objects will not be encrypted automatically.')) {
+ encryptionAction.value = 'disable';
+ encryptionForm.submit();
+ }
+ }
+ });
{% endblock %}
diff --git a/templates/docs.html b/templates/docs.html
index 3114668..fd7935c 100644
--- a/templates/docs.html
+++ b/templates/docs.html
@@ -83,6 +83,16 @@ python run.py --mode ui
5000
Listen port.
+
+ ENCRYPTION_ENABLED
+ false
+ Enable server-side encryption support.
+
+
+ KMS_ENABLED
+ false
+ Enable KMS key management for encryption.
+
@@ -408,10 +418,172 @@ s3.complete_multipart_upload(
+
+
+
+ 10
+
Bucket Quotas
+
+
Limit how much data a bucket can hold using storage quotas. Quotas are enforced on uploads and multipart completions.
+
+
Quota Types
+
+
+
+
+ Limit
+ Description
+
+
+
+
+ Max Size (MB)
+ Maximum total storage in megabytes (includes current objects + archived versions)
+
+
+ Max Objects
+ Maximum number of objects (includes current objects + archived versions)
+
+
+
+
+
+
Managing Quotas (Admin Only)
+
Quota management is restricted to administrators (users with iam:* permissions).
+
+ Navigate to your bucket → Properties tab → Storage Quota card.
+ Enter limits: Max Size (MB) and/or Max Objects . Leave empty for unlimited.
+ Click Update Quota to save, or Remove Quota to clear limits.
+
+
+
API Usage
+
# Set quota (max 100MB, max 1000 objects)
+curl -X PUT "{{ api_base }}/bucket/<bucket>?quota" \
+ -H "Content-Type: application/json" \
+ -H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
+ -d '{"max_bytes": 104857600, "max_objects": 1000}'
+
+# Get current quota
+curl "{{ api_base }}/bucket/<bucket>?quota" \
+ -H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"
+
+# Remove quota
+curl -X PUT "{{ api_base }}/bucket/<bucket>?quota" \
+ -H "Content-Type: application/json" \
+ -H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
+ -d '{"max_bytes": null, "max_objects": null}'
+
+
+
+
+
+
+
+
+ Version Counting: When versioning is enabled, archived versions count toward the quota. The quota is checked against total storage, not just current objects.
+
+
+
+
+
+
+
+
+ 11
+
Encryption
+
+
Protect data at rest with server-side encryption using AES-256-GCM. Objects are encrypted before being written to disk and decrypted transparently on read.
+
+
Encryption Types
+
+
+
+
+ Type
+ Description
+
+
+
+
+ AES-256 (SSE-S3)
+ Server-managed encryption using a local master key
+
+
+ KMS (SSE-KMS)
+ Encryption using customer-managed keys via the built-in KMS
+
+
+
+
+
+
Enabling Encryption
+
+
+ Set environment variables:
+# PowerShell
+$env:ENCRYPTION_ENABLED = "true"
+$env:KMS_ENABLED = "true" # Optional
+python run.py
+
+# Bash
+export ENCRYPTION_ENABLED=true
+export KMS_ENABLED=true
+python run.py
+
+
+ Configure bucket encryption: Navigate to your bucket → Properties tab → Default Encryption card → Click Enable Encryption .
+
+
+ Choose algorithm: Select AES-256 for server-managed keys or aws:kms to use a KMS-managed key.
+
+
+
+
+
+
+
+
+
+
+ Important: Only new uploads after enabling encryption will be encrypted. Existing objects remain unencrypted.
+
+
+
+
+
KMS Key Management
+
When KMS_ENABLED=true, manage encryption keys via the API:
+
# Create a new KMS key
+curl -X POST {{ api_base }}/kms/keys \
+ -H "Content-Type: application/json" \
+ -H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
+ -d '{"alias": "my-key", "description": "Production key"}'
+
+# List all keys
+curl {{ api_base }}/kms/keys \
+ -H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"
+
+# Rotate a key (creates new key material)
+curl -X POST {{ api_base }}/kms/keys/{key-id}/rotate \
+ -H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"
+
+# Disable/Enable a key
+curl -X POST {{ api_base }}/kms/keys/{key-id}/disable \
+ -H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"
+
+# Schedule key deletion (30-day waiting period)
+curl -X DELETE "{{ api_base }}/kms/keys/{key-id}?waiting_period_days=30" \
+ -H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"
+
+
How It Works
+
+ Envelope Encryption: Each object is encrypted with a unique Data Encryption Key (DEK). The DEK is then encrypted (wrapped) by the master key or KMS key and stored alongside the ciphertext. On read, the DEK is unwrapped and used to decrypt the object transparently.
+
+
+
- 09
+ 12
Troubleshooting & tips