Compare commits
22 Commits
e792b86485
...
v0.2.0
| Author | SHA1 | Date | |
|---|---|---|---|
| caf01d6ada | |||
| a5d19e2982 | |||
| 692e7e3a6e | |||
| 78dba93ee0 | |||
| 93a5aa6618 | |||
| 9ab750650c | |||
| 609e9db2f7 | |||
| 94a55cf2b7 | |||
| b9cfc45aa2 | |||
| 2d60e36fbf | |||
| c78f7fa6b0 | |||
| b3dce8d13e | |||
| bb366cb4cd | |||
| a2745ff2ee | |||
| 28cb656d94 | |||
| 3c44152fc6 | |||
| 397515edce | |||
| 980fced7e4 | |||
| bae5009ec4 | |||
| 233780617f | |||
| fd8fb21517 | |||
| c6cbe822e1 |
@@ -124,7 +124,7 @@ def create_app(
|
||||
)
|
||||
|
||||
connections = ConnectionStore(connections_path)
|
||||
replication = ReplicationManager(storage, connections, replication_rules_path)
|
||||
replication = ReplicationManager(storage, connections, replication_rules_path, storage_root)
|
||||
|
||||
encryption_config = {
|
||||
"encryption_enabled": app.config.get("ENCRYPTION_ENABLED", False),
|
||||
@@ -156,6 +156,7 @@ def create_app(
|
||||
lifecycle_manager = LifecycleManager(
|
||||
base_storage,
|
||||
interval_seconds=app.config.get("LIFECYCLE_INTERVAL_SECONDS", 3600),
|
||||
storage_root=storage_root,
|
||||
)
|
||||
lifecycle_manager.start()
|
||||
|
||||
@@ -289,17 +290,17 @@ def _configure_logging(app: Flask) -> None:
|
||||
formatter = logging.Formatter(
|
||||
"%(asctime)s | %(levelname)s | %(request_id)s | %(method)s %(path)s | %(message)s"
|
||||
)
|
||||
|
||||
# Stream Handler (stdout) - Primary for Docker
|
||||
|
||||
stream_handler = logging.StreamHandler(sys.stdout)
|
||||
stream_handler.setFormatter(formatter)
|
||||
stream_handler.addFilter(_RequestContextFilter())
|
||||
|
||||
logger = app.logger
|
||||
for handler in logger.handlers[:]:
|
||||
handler.close()
|
||||
logger.handlers.clear()
|
||||
logger.addHandler(stream_handler)
|
||||
|
||||
# File Handler (optional, if configured)
|
||||
if app.config.get("LOG_TO_FILE"):
|
||||
log_file = Path(app.config["LOG_FILE"])
|
||||
log_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
@@ -196,18 +196,21 @@ class AccessLoggingService:
|
||||
)
|
||||
|
||||
target_key = f"{config.target_bucket}:{config.target_prefix}"
|
||||
should_flush = False
|
||||
with self._buffer_lock:
|
||||
if target_key not in self._buffer:
|
||||
self._buffer[target_key] = []
|
||||
self._buffer[target_key].append(entry)
|
||||
should_flush = len(self._buffer[target_key]) >= self.max_buffer_size
|
||||
|
||||
if len(self._buffer[target_key]) >= self.max_buffer_size:
|
||||
self._flush_buffer(target_key)
|
||||
if should_flush:
|
||||
self._flush_buffer(target_key)
|
||||
|
||||
def _flush_loop(self) -> None:
|
||||
while not self._shutdown.is_set():
|
||||
time.sleep(self.flush_interval)
|
||||
self._flush_all()
|
||||
self._shutdown.wait(timeout=self.flush_interval)
|
||||
if not self._shutdown.is_set():
|
||||
self._flush_all()
|
||||
|
||||
def _flush_all(self) -> None:
|
||||
with self._buffer_lock:
|
||||
|
||||
@@ -84,7 +84,7 @@ class AppConfig:
|
||||
return overrides.get(name, os.getenv(name, default))
|
||||
|
||||
storage_root = Path(_get("STORAGE_ROOT", PROJECT_ROOT / "data")).resolve()
|
||||
max_upload_size = int(_get("MAX_UPLOAD_SIZE", 1024 * 1024 * 1024)) # 1 GiB default
|
||||
max_upload_size = int(_get("MAX_UPLOAD_SIZE", 1024 * 1024 * 1024))
|
||||
ui_page_size = int(_get("UI_PAGE_SIZE", 100))
|
||||
auth_max_attempts = int(_get("AUTH_MAX_ATTEMPTS", 5))
|
||||
auth_lockout_minutes = int(_get("AUTH_LOCKOUT_MINUTES", 15))
|
||||
@@ -108,6 +108,10 @@ class AppConfig:
|
||||
try:
|
||||
secret_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
secret_file.write_text(generated)
|
||||
try:
|
||||
os.chmod(secret_file, 0o600)
|
||||
except OSError:
|
||||
pass
|
||||
secret_key = generated
|
||||
except OSError:
|
||||
secret_key = generated
|
||||
|
||||
21
app/iam.py
21
app/iam.py
@@ -26,14 +26,12 @@ IAM_ACTIONS = {
|
||||
ALLOWED_ACTIONS = (S3_ACTIONS | IAM_ACTIONS) | {"iam:*"}
|
||||
|
||||
ACTION_ALIASES = {
|
||||
# List actions
|
||||
"list": "list",
|
||||
"s3:listbucket": "list",
|
||||
"s3:listallmybuckets": "list",
|
||||
"s3:listbucketversions": "list",
|
||||
"s3:listmultipartuploads": "list",
|
||||
"s3:listparts": "list",
|
||||
# Read actions
|
||||
"read": "read",
|
||||
"s3:getobject": "read",
|
||||
"s3:getobjectversion": "read",
|
||||
@@ -43,7 +41,6 @@ ACTION_ALIASES = {
|
||||
"s3:getbucketversioning": "read",
|
||||
"s3:headobject": "read",
|
||||
"s3:headbucket": "read",
|
||||
# Write actions
|
||||
"write": "write",
|
||||
"s3:putobject": "write",
|
||||
"s3:createbucket": "write",
|
||||
@@ -54,23 +51,19 @@ ACTION_ALIASES = {
|
||||
"s3:completemultipartupload": "write",
|
||||
"s3:abortmultipartupload": "write",
|
||||
"s3:copyobject": "write",
|
||||
# Delete actions
|
||||
"delete": "delete",
|
||||
"s3:deleteobject": "delete",
|
||||
"s3:deleteobjectversion": "delete",
|
||||
"s3:deletebucket": "delete",
|
||||
"s3:deleteobjecttagging": "delete",
|
||||
# Share actions (ACL)
|
||||
"share": "share",
|
||||
"s3:putobjectacl": "share",
|
||||
"s3:putbucketacl": "share",
|
||||
"s3:getbucketacl": "share",
|
||||
# Policy actions
|
||||
"policy": "policy",
|
||||
"s3:putbucketpolicy": "policy",
|
||||
"s3:getbucketpolicy": "policy",
|
||||
"s3:deletebucketpolicy": "policy",
|
||||
# Replication actions
|
||||
"replication": "replication",
|
||||
"s3:getreplicationconfiguration": "replication",
|
||||
"s3:putreplicationconfiguration": "replication",
|
||||
@@ -78,7 +71,6 @@ ACTION_ALIASES = {
|
||||
"s3:replicateobject": "replication",
|
||||
"s3:replicatetags": "replication",
|
||||
"s3:replicatedelete": "replication",
|
||||
# IAM actions
|
||||
"iam:listusers": "iam:list_users",
|
||||
"iam:createuser": "iam:create_user",
|
||||
"iam:deleteuser": "iam:delete_user",
|
||||
@@ -115,17 +107,15 @@ class IamService:
|
||||
self._raw_config: Dict[str, Any] = {}
|
||||
self._failed_attempts: Dict[str, Deque[datetime]] = {}
|
||||
self._last_load_time = 0.0
|
||||
# Performance: credential cache with TTL
|
||||
self._credential_cache: Dict[str, Tuple[str, Principal, float]] = {}
|
||||
self._cache_ttl = 60.0 # Cache credentials for 60 seconds
|
||||
self._cache_ttl = 60.0
|
||||
self._last_stat_check = 0.0
|
||||
self._stat_check_interval = 1.0 # Only stat() file every 1 second
|
||||
self._stat_check_interval = 1.0
|
||||
self._sessions: Dict[str, Dict[str, Any]] = {}
|
||||
self._load()
|
||||
|
||||
def _maybe_reload(self) -> None:
|
||||
"""Reload configuration if the file has changed on disk."""
|
||||
# Performance: Skip stat check if we checked recently
|
||||
now = time.time()
|
||||
if now - self._last_stat_check < self._stat_check_interval:
|
||||
return
|
||||
@@ -133,7 +123,7 @@ class IamService:
|
||||
try:
|
||||
if self.config_path.stat().st_mtime > self._last_load_time:
|
||||
self._load()
|
||||
self._credential_cache.clear() # Invalidate cache on reload
|
||||
self._credential_cache.clear()
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
@@ -227,7 +217,6 @@ class IamService:
|
||||
del self._sessions[token]
|
||||
|
||||
def principal_for_key(self, access_key: str) -> Principal:
|
||||
# Performance: Check cache first
|
||||
now = time.time()
|
||||
cached = self._credential_cache.get(access_key)
|
||||
if cached:
|
||||
@@ -244,7 +233,6 @@ class IamService:
|
||||
return principal
|
||||
|
||||
def secret_for_key(self, access_key: str) -> str:
|
||||
# Performance: Check cache first
|
||||
now = time.time()
|
||||
cached = self._credential_cache.get(access_key)
|
||||
if cached:
|
||||
@@ -508,7 +496,6 @@ class IamService:
|
||||
raise IamError("User not found")
|
||||
|
||||
def get_secret_key(self, access_key: str) -> str | None:
|
||||
# Performance: Check cache first
|
||||
now = time.time()
|
||||
cached = self._credential_cache.get(access_key)
|
||||
if cached:
|
||||
@@ -519,14 +506,12 @@ class IamService:
|
||||
self._maybe_reload()
|
||||
record = self._users.get(access_key)
|
||||
if record:
|
||||
# Cache the result
|
||||
principal = self._build_principal(access_key, record)
|
||||
self._credential_cache[access_key] = (record["secret_key"], principal, now)
|
||||
return record["secret_key"]
|
||||
return None
|
||||
|
||||
def get_principal(self, access_key: str) -> Principal | None:
|
||||
# Performance: Check cache first
|
||||
now = time.time()
|
||||
cached = self._credential_cache.get(access_key)
|
||||
if cached:
|
||||
|
||||
104
app/lifecycle.py
104
app/lifecycle.py
@@ -1,5 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
@@ -23,13 +24,104 @@ class LifecycleResult:
|
||||
execution_time_seconds: float = 0.0
|
||||
|
||||
|
||||
@dataclass
|
||||
class LifecycleExecutionRecord:
|
||||
timestamp: float
|
||||
bucket_name: str
|
||||
objects_deleted: int
|
||||
versions_deleted: int
|
||||
uploads_aborted: int
|
||||
errors: List[str]
|
||||
execution_time_seconds: float
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"timestamp": self.timestamp,
|
||||
"bucket_name": self.bucket_name,
|
||||
"objects_deleted": self.objects_deleted,
|
||||
"versions_deleted": self.versions_deleted,
|
||||
"uploads_aborted": self.uploads_aborted,
|
||||
"errors": self.errors,
|
||||
"execution_time_seconds": self.execution_time_seconds,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> "LifecycleExecutionRecord":
|
||||
return cls(
|
||||
timestamp=data["timestamp"],
|
||||
bucket_name=data["bucket_name"],
|
||||
objects_deleted=data["objects_deleted"],
|
||||
versions_deleted=data["versions_deleted"],
|
||||
uploads_aborted=data["uploads_aborted"],
|
||||
errors=data.get("errors", []),
|
||||
execution_time_seconds=data["execution_time_seconds"],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_result(cls, result: LifecycleResult) -> "LifecycleExecutionRecord":
|
||||
return cls(
|
||||
timestamp=time.time(),
|
||||
bucket_name=result.bucket_name,
|
||||
objects_deleted=result.objects_deleted,
|
||||
versions_deleted=result.versions_deleted,
|
||||
uploads_aborted=result.uploads_aborted,
|
||||
errors=result.errors.copy(),
|
||||
execution_time_seconds=result.execution_time_seconds,
|
||||
)
|
||||
|
||||
|
||||
class LifecycleHistoryStore:
|
||||
MAX_HISTORY_PER_BUCKET = 50
|
||||
|
||||
def __init__(self, storage_root: Path) -> None:
|
||||
self.storage_root = storage_root
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def _get_history_path(self, bucket_name: str) -> Path:
|
||||
return self.storage_root / ".myfsio.sys" / "buckets" / bucket_name / "lifecycle_history.json"
|
||||
|
||||
def load_history(self, bucket_name: str) -> List[LifecycleExecutionRecord]:
|
||||
path = self._get_history_path(bucket_name)
|
||||
if not path.exists():
|
||||
return []
|
||||
try:
|
||||
with open(path, "r") as f:
|
||||
data = json.load(f)
|
||||
return [LifecycleExecutionRecord.from_dict(d) for d in data.get("executions", [])]
|
||||
except (OSError, ValueError, KeyError) as e:
|
||||
logger.error(f"Failed to load lifecycle history for {bucket_name}: {e}")
|
||||
return []
|
||||
|
||||
def save_history(self, bucket_name: str, records: List[LifecycleExecutionRecord]) -> None:
|
||||
path = self._get_history_path(bucket_name)
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
data = {"executions": [r.to_dict() for r in records[:self.MAX_HISTORY_PER_BUCKET]]}
|
||||
try:
|
||||
with open(path, "w") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
except OSError as e:
|
||||
logger.error(f"Failed to save lifecycle history for {bucket_name}: {e}")
|
||||
|
||||
def add_record(self, bucket_name: str, record: LifecycleExecutionRecord) -> None:
|
||||
with self._lock:
|
||||
records = self.load_history(bucket_name)
|
||||
records.insert(0, record)
|
||||
self.save_history(bucket_name, records)
|
||||
|
||||
def get_history(self, bucket_name: str, limit: int = 50, offset: int = 0) -> List[LifecycleExecutionRecord]:
|
||||
records = self.load_history(bucket_name)
|
||||
return records[offset:offset + limit]
|
||||
|
||||
|
||||
class LifecycleManager:
|
||||
def __init__(self, storage: ObjectStorage, interval_seconds: int = 3600):
|
||||
def __init__(self, storage: ObjectStorage, interval_seconds: int = 3600, storage_root: Optional[Path] = None):
|
||||
self.storage = storage
|
||||
self.interval_seconds = interval_seconds
|
||||
self.storage_root = storage_root
|
||||
self._timer: Optional[threading.Timer] = None
|
||||
self._shutdown = False
|
||||
self._lock = threading.Lock()
|
||||
self.history_store = LifecycleHistoryStore(storage_root) if storage_root else None
|
||||
|
||||
def start(self) -> None:
|
||||
if self._timer is not None:
|
||||
@@ -98,12 +190,15 @@ class LifecycleManager:
|
||||
logger.error(f"Lifecycle enforcement error for {bucket_name}: {e}")
|
||||
|
||||
result.execution_time_seconds = time.time() - start_time
|
||||
if result.objects_deleted > 0 or result.versions_deleted > 0 or result.uploads_aborted > 0:
|
||||
if result.objects_deleted > 0 or result.versions_deleted > 0 or result.uploads_aborted > 0 or result.errors:
|
||||
logger.info(
|
||||
f"Lifecycle enforcement for {bucket_name}: "
|
||||
f"deleted={result.objects_deleted}, versions={result.versions_deleted}, "
|
||||
f"aborted={result.uploads_aborted}, time={result.execution_time_seconds:.2f}s"
|
||||
)
|
||||
if self.history_store:
|
||||
record = LifecycleExecutionRecord.from_result(result)
|
||||
self.history_store.add_record(bucket_name, record)
|
||||
return result
|
||||
|
||||
def _enforce_expiration(
|
||||
@@ -233,3 +328,8 @@ class LifecycleManager:
|
||||
if bucket_name:
|
||||
return {bucket_name: self.enforce_rules(bucket_name)}
|
||||
return self.enforce_all_buckets()
|
||||
|
||||
def get_execution_history(self, bucket_name: str, limit: int = 50, offset: int = 0) -> List[LifecycleExecutionRecord]:
|
||||
if not self.history_store:
|
||||
return []
|
||||
return self.history_store.get_history(bucket_name, limit, offset)
|
||||
|
||||
@@ -8,7 +8,7 @@ import time
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import boto3
|
||||
from botocore.config import Config
|
||||
@@ -23,7 +23,7 @@ logger = logging.getLogger(__name__)
|
||||
REPLICATION_USER_AGENT = "S3ReplicationAgent/1.0"
|
||||
REPLICATION_CONNECT_TIMEOUT = 5
|
||||
REPLICATION_READ_TIMEOUT = 30
|
||||
STREAMING_THRESHOLD_BYTES = 10 * 1024 * 1024 # 10 MiB - use streaming for larger files
|
||||
STREAMING_THRESHOLD_BYTES = 10 * 1024 * 1024
|
||||
|
||||
REPLICATION_MODE_NEW_ONLY = "new_only"
|
||||
REPLICATION_MODE_ALL = "all"
|
||||
@@ -87,6 +87,40 @@ class ReplicationStats:
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ReplicationFailure:
|
||||
object_key: str
|
||||
error_message: str
|
||||
timestamp: float
|
||||
failure_count: int
|
||||
bucket_name: str
|
||||
action: str
|
||||
last_error_code: Optional[str] = None
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"object_key": self.object_key,
|
||||
"error_message": self.error_message,
|
||||
"timestamp": self.timestamp,
|
||||
"failure_count": self.failure_count,
|
||||
"bucket_name": self.bucket_name,
|
||||
"action": self.action,
|
||||
"last_error_code": self.last_error_code,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> "ReplicationFailure":
|
||||
return cls(
|
||||
object_key=data["object_key"],
|
||||
error_message=data["error_message"],
|
||||
timestamp=data["timestamp"],
|
||||
failure_count=data["failure_count"],
|
||||
bucket_name=data["bucket_name"],
|
||||
action=data["action"],
|
||||
last_error_code=data.get("last_error_code"),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ReplicationRule:
|
||||
bucket_name: str
|
||||
@@ -120,15 +154,86 @@ class ReplicationRule:
|
||||
return rule
|
||||
|
||||
|
||||
class ReplicationFailureStore:
|
||||
MAX_FAILURES_PER_BUCKET = 50
|
||||
|
||||
def __init__(self, storage_root: Path) -> None:
|
||||
self.storage_root = storage_root
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def _get_failures_path(self, bucket_name: str) -> Path:
|
||||
return self.storage_root / ".myfsio.sys" / "buckets" / bucket_name / "replication_failures.json"
|
||||
|
||||
def load_failures(self, bucket_name: str) -> List[ReplicationFailure]:
|
||||
path = self._get_failures_path(bucket_name)
|
||||
if not path.exists():
|
||||
return []
|
||||
try:
|
||||
with open(path, "r") as f:
|
||||
data = json.load(f)
|
||||
return [ReplicationFailure.from_dict(d) for d in data.get("failures", [])]
|
||||
except (OSError, ValueError, KeyError) as e:
|
||||
logger.error(f"Failed to load replication failures for {bucket_name}: {e}")
|
||||
return []
|
||||
|
||||
def save_failures(self, bucket_name: str, failures: List[ReplicationFailure]) -> None:
|
||||
path = self._get_failures_path(bucket_name)
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
data = {"failures": [f.to_dict() for f in failures[:self.MAX_FAILURES_PER_BUCKET]]}
|
||||
try:
|
||||
with open(path, "w") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
except OSError as e:
|
||||
logger.error(f"Failed to save replication failures for {bucket_name}: {e}")
|
||||
|
||||
def add_failure(self, bucket_name: str, failure: ReplicationFailure) -> None:
|
||||
with self._lock:
|
||||
failures = self.load_failures(bucket_name)
|
||||
existing = next((f for f in failures if f.object_key == failure.object_key), None)
|
||||
if existing:
|
||||
existing.failure_count += 1
|
||||
existing.timestamp = failure.timestamp
|
||||
existing.error_message = failure.error_message
|
||||
existing.last_error_code = failure.last_error_code
|
||||
else:
|
||||
failures.insert(0, failure)
|
||||
self.save_failures(bucket_name, failures)
|
||||
|
||||
def remove_failure(self, bucket_name: str, object_key: str) -> bool:
|
||||
with self._lock:
|
||||
failures = self.load_failures(bucket_name)
|
||||
original_len = len(failures)
|
||||
failures = [f for f in failures if f.object_key != object_key]
|
||||
if len(failures) < original_len:
|
||||
self.save_failures(bucket_name, failures)
|
||||
return True
|
||||
return False
|
||||
|
||||
def clear_failures(self, bucket_name: str) -> None:
|
||||
with self._lock:
|
||||
path = self._get_failures_path(bucket_name)
|
||||
if path.exists():
|
||||
path.unlink()
|
||||
|
||||
def get_failure(self, bucket_name: str, object_key: str) -> Optional[ReplicationFailure]:
|
||||
failures = self.load_failures(bucket_name)
|
||||
return next((f for f in failures if f.object_key == object_key), None)
|
||||
|
||||
def get_failure_count(self, bucket_name: str) -> int:
|
||||
return len(self.load_failures(bucket_name))
|
||||
|
||||
|
||||
class ReplicationManager:
|
||||
def __init__(self, storage: ObjectStorage, connections: ConnectionStore, rules_path: Path) -> None:
|
||||
def __init__(self, storage: ObjectStorage, connections: ConnectionStore, rules_path: Path, storage_root: Path) -> None:
|
||||
self.storage = storage
|
||||
self.connections = connections
|
||||
self.rules_path = rules_path
|
||||
self.storage_root = storage_root
|
||||
self._rules: Dict[str, ReplicationRule] = {}
|
||||
self._stats_lock = threading.Lock()
|
||||
self._executor = ThreadPoolExecutor(max_workers=4, thread_name_prefix="ReplicationWorker")
|
||||
self._shutdown = False
|
||||
self.failure_store = ReplicationFailureStore(storage_root)
|
||||
self.reload_rules()
|
||||
|
||||
def shutdown(self, wait: bool = True) -> None:
|
||||
@@ -307,7 +412,6 @@ class ReplicationManager:
|
||||
if self._shutdown:
|
||||
return
|
||||
|
||||
# Re-check if rule is still enabled (may have been paused after task was submitted)
|
||||
current_rule = self.get_rule(bucket_name)
|
||||
if not current_rule or not current_rule.enabled:
|
||||
logger.debug(f"Replication skipped for {bucket_name}/{object_key}: rule disabled or removed")
|
||||
@@ -332,8 +436,19 @@ class ReplicationManager:
|
||||
s3.delete_object(Bucket=rule.target_bucket, Key=object_key)
|
||||
logger.info(f"Replicated DELETE {bucket_name}/{object_key} to {conn.name} ({rule.target_bucket})")
|
||||
self._update_last_sync(bucket_name, object_key)
|
||||
self.failure_store.remove_failure(bucket_name, object_key)
|
||||
except ClientError as e:
|
||||
error_code = e.response.get('Error', {}).get('Code')
|
||||
logger.error(f"Replication DELETE failed for {bucket_name}/{object_key}: {e}")
|
||||
self.failure_store.add_failure(bucket_name, ReplicationFailure(
|
||||
object_key=object_key,
|
||||
error_message=str(e),
|
||||
timestamp=time.time(),
|
||||
failure_count=1,
|
||||
bucket_name=bucket_name,
|
||||
action="delete",
|
||||
last_error_code=error_code,
|
||||
))
|
||||
return
|
||||
|
||||
try:
|
||||
@@ -358,7 +473,6 @@ class ReplicationManager:
|
||||
extra_args["ContentType"] = content_type
|
||||
|
||||
if file_size >= STREAMING_THRESHOLD_BYTES:
|
||||
# Use multipart upload for large files
|
||||
s3.upload_file(
|
||||
str(path),
|
||||
rule.target_bucket,
|
||||
@@ -366,7 +480,6 @@ class ReplicationManager:
|
||||
ExtraArgs=extra_args if extra_args else None,
|
||||
)
|
||||
else:
|
||||
# Read small files into memory
|
||||
file_content = path.read_bytes()
|
||||
put_kwargs = {
|
||||
"Bucket": rule.target_bucket,
|
||||
@@ -408,9 +521,89 @@ class ReplicationManager:
|
||||
|
||||
logger.info(f"Replicated {bucket_name}/{object_key} to {conn.name} ({rule.target_bucket})")
|
||||
self._update_last_sync(bucket_name, object_key)
|
||||
self.failure_store.remove_failure(bucket_name, object_key)
|
||||
|
||||
except (ClientError, OSError, ValueError) as e:
|
||||
error_code = None
|
||||
if isinstance(e, ClientError):
|
||||
error_code = e.response.get('Error', {}).get('Code')
|
||||
logger.error(f"Replication failed for {bucket_name}/{object_key}: {e}")
|
||||
except Exception:
|
||||
self.failure_store.add_failure(bucket_name, ReplicationFailure(
|
||||
object_key=object_key,
|
||||
error_message=str(e),
|
||||
timestamp=time.time(),
|
||||
failure_count=1,
|
||||
bucket_name=bucket_name,
|
||||
action=action,
|
||||
last_error_code=error_code,
|
||||
))
|
||||
except Exception as e:
|
||||
logger.exception(f"Unexpected error during replication for {bucket_name}/{object_key}")
|
||||
self.failure_store.add_failure(bucket_name, ReplicationFailure(
|
||||
object_key=object_key,
|
||||
error_message=str(e),
|
||||
timestamp=time.time(),
|
||||
failure_count=1,
|
||||
bucket_name=bucket_name,
|
||||
action=action,
|
||||
last_error_code=None,
|
||||
))
|
||||
|
||||
def get_failed_items(self, bucket_name: str, limit: int = 50, offset: int = 0) -> List[ReplicationFailure]:
|
||||
failures = self.failure_store.load_failures(bucket_name)
|
||||
return failures[offset:offset + limit]
|
||||
|
||||
def get_failure_count(self, bucket_name: str) -> int:
|
||||
return self.failure_store.get_failure_count(bucket_name)
|
||||
|
||||
def retry_failed_item(self, bucket_name: str, object_key: str) -> bool:
|
||||
failure = self.failure_store.get_failure(bucket_name, object_key)
|
||||
if not failure:
|
||||
return False
|
||||
|
||||
rule = self.get_rule(bucket_name)
|
||||
if not rule or not rule.enabled:
|
||||
return False
|
||||
|
||||
connection = self.connections.get(rule.target_connection_id)
|
||||
if not connection:
|
||||
logger.warning(f"Cannot retry: Connection {rule.target_connection_id} not found")
|
||||
return False
|
||||
|
||||
if not self.check_endpoint_health(connection):
|
||||
logger.warning(f"Cannot retry: Endpoint {connection.name} is not reachable")
|
||||
return False
|
||||
|
||||
self._executor.submit(self._replicate_task, bucket_name, object_key, rule, connection, failure.action)
|
||||
return True
|
||||
|
||||
def retry_all_failed(self, bucket_name: str) -> Dict[str, int]:
|
||||
failures = self.failure_store.load_failures(bucket_name)
|
||||
if not failures:
|
||||
return {"submitted": 0, "skipped": 0}
|
||||
|
||||
rule = self.get_rule(bucket_name)
|
||||
if not rule or not rule.enabled:
|
||||
return {"submitted": 0, "skipped": len(failures)}
|
||||
|
||||
connection = self.connections.get(rule.target_connection_id)
|
||||
if not connection:
|
||||
logger.warning(f"Cannot retry: Connection {rule.target_connection_id} not found")
|
||||
return {"submitted": 0, "skipped": len(failures)}
|
||||
|
||||
if not self.check_endpoint_health(connection):
|
||||
logger.warning(f"Cannot retry: Endpoint {connection.name} is not reachable")
|
||||
return {"submitted": 0, "skipped": len(failures)}
|
||||
|
||||
submitted = 0
|
||||
for failure in failures:
|
||||
self._executor.submit(self._replicate_task, bucket_name, failure.object_key, rule, connection, failure.action)
|
||||
submitted += 1
|
||||
|
||||
return {"submitted": submitted, "skipped": 0}
|
||||
|
||||
def dismiss_failure(self, bucket_name: str, object_key: str) -> bool:
|
||||
return self.failure_store.remove_failure(bucket_name, object_key)
|
||||
|
||||
def clear_failures(self, bucket_name: str) -> None:
|
||||
self.failure_store.clear_failures(bucket_name)
|
||||
|
||||
123
app/s3_api.py
123
app/s3_api.py
@@ -25,7 +25,7 @@ from .iam import IamError, Principal
|
||||
from .notifications import NotificationService, NotificationConfiguration, WebhookDestination
|
||||
from .object_lock import ObjectLockService, ObjectLockRetention, ObjectLockConfig, ObjectLockError, RetentionMode
|
||||
from .replication import ReplicationManager
|
||||
from .storage import ObjectStorage, StorageError, QuotaExceededError
|
||||
from .storage import ObjectStorage, StorageError, QuotaExceededError, BucketNotFoundError, ObjectNotFoundError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -217,7 +217,6 @@ def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
|
||||
calculated_signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest()
|
||||
|
||||
if not hmac.compare_digest(calculated_signature, signature):
|
||||
# Only log detailed signature debug info if DEBUG_SIGV4 is enabled
|
||||
if current_app.config.get("DEBUG_SIGV4"):
|
||||
logger.warning(
|
||||
"SigV4 signature mismatch",
|
||||
@@ -260,7 +259,13 @@ def _verify_sigv4_query(req: Any) -> Principal | None:
|
||||
raise IamError("Invalid Date format")
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
if now > req_time + timedelta(seconds=int(expires)):
|
||||
try:
|
||||
expires_seconds = int(expires)
|
||||
if expires_seconds <= 0:
|
||||
raise IamError("Invalid Expires value: must be positive")
|
||||
except ValueError:
|
||||
raise IamError("Invalid Expires value: must be an integer")
|
||||
if now > req_time + timedelta(seconds=expires_seconds):
|
||||
raise IamError("Request expired")
|
||||
|
||||
secret_key = _iam().get_secret_key(access_key)
|
||||
@@ -1036,21 +1041,23 @@ def _object_tagging_handler(bucket_name: str, object_key: str) -> Response:
|
||||
if request.method == "GET":
|
||||
try:
|
||||
tags = storage.get_object_tags(bucket_name, object_key)
|
||||
except BucketNotFoundError as exc:
|
||||
return _error_response("NoSuchBucket", str(exc), 404)
|
||||
except ObjectNotFoundError as exc:
|
||||
return _error_response("NoSuchKey", str(exc), 404)
|
||||
except StorageError as exc:
|
||||
message = str(exc)
|
||||
if "Bucket" in message:
|
||||
return _error_response("NoSuchBucket", message, 404)
|
||||
return _error_response("NoSuchKey", message, 404)
|
||||
return _error_response("InternalError", str(exc), 500)
|
||||
return _xml_response(_render_tagging_document(tags))
|
||||
|
||||
if request.method == "DELETE":
|
||||
try:
|
||||
storage.delete_object_tags(bucket_name, object_key)
|
||||
except BucketNotFoundError as exc:
|
||||
return _error_response("NoSuchBucket", str(exc), 404)
|
||||
except ObjectNotFoundError as exc:
|
||||
return _error_response("NoSuchKey", str(exc), 404)
|
||||
except StorageError as exc:
|
||||
message = str(exc)
|
||||
if "Bucket" in message:
|
||||
return _error_response("NoSuchBucket", message, 404)
|
||||
return _error_response("NoSuchKey", message, 404)
|
||||
return _error_response("InternalError", str(exc), 500)
|
||||
current_app.logger.info("Object tags deleted", extra={"bucket": bucket_name, "key": object_key})
|
||||
return Response(status=204)
|
||||
|
||||
@@ -1063,11 +1070,12 @@ def _object_tagging_handler(bucket_name: str, object_key: str) -> Response:
|
||||
return _error_response("InvalidTag", "A maximum of 10 tags is supported for objects", 400)
|
||||
try:
|
||||
storage.set_object_tags(bucket_name, object_key, tags)
|
||||
except BucketNotFoundError as exc:
|
||||
return _error_response("NoSuchBucket", str(exc), 404)
|
||||
except ObjectNotFoundError as exc:
|
||||
return _error_response("NoSuchKey", str(exc), 404)
|
||||
except StorageError as exc:
|
||||
message = str(exc)
|
||||
if "Bucket" in message:
|
||||
return _error_response("NoSuchBucket", message, 404)
|
||||
return _error_response("NoSuchKey", message, 404)
|
||||
return _error_response("InternalError", str(exc), 500)
|
||||
current_app.logger.info("Object tags updated", extra={"bucket": bucket_name, "key": object_key, "tags": len(tags)})
|
||||
return Response(status=204)
|
||||
|
||||
@@ -1283,7 +1291,10 @@ def _bucket_list_versions_handler(bucket_name: str) -> Response:
|
||||
|
||||
prefix = request.args.get("prefix", "")
|
||||
delimiter = request.args.get("delimiter", "")
|
||||
max_keys = min(int(request.args.get("max-keys", 1000)), 1000)
|
||||
try:
|
||||
max_keys = max(1, min(int(request.args.get("max-keys", 1000)), 1000))
|
||||
except ValueError:
|
||||
return _error_response("InvalidArgument", "max-keys must be an integer", 400)
|
||||
key_marker = request.args.get("key-marker", "")
|
||||
|
||||
if prefix:
|
||||
@@ -1314,7 +1325,8 @@ def _bucket_list_versions_handler(bucket_name: str) -> Response:
|
||||
SubElement(version, "VersionId").text = "null"
|
||||
SubElement(version, "IsLatest").text = "true"
|
||||
SubElement(version, "LastModified").text = obj.last_modified.strftime("%Y-%m-%dT%H:%M:%S.000Z")
|
||||
SubElement(version, "ETag").text = f'"{obj.etag}"'
|
||||
if obj.etag:
|
||||
SubElement(version, "ETag").text = f'"{obj.etag}"'
|
||||
SubElement(version, "Size").text = str(obj.size)
|
||||
SubElement(version, "StorageClass").text = "STANDARD"
|
||||
|
||||
@@ -1475,7 +1487,10 @@ def _parse_lifecycle_config(payload: bytes) -> list:
|
||||
expiration: dict = {}
|
||||
days_el = exp_el.find("{*}Days") or exp_el.find("Days")
|
||||
if days_el is not None and days_el.text:
|
||||
expiration["Days"] = int(days_el.text.strip())
|
||||
days_val = int(days_el.text.strip())
|
||||
if days_val <= 0:
|
||||
raise ValueError("Expiration Days must be a positive integer")
|
||||
expiration["Days"] = days_val
|
||||
date_el = exp_el.find("{*}Date") or exp_el.find("Date")
|
||||
if date_el is not None and date_el.text:
|
||||
expiration["Date"] = date_el.text.strip()
|
||||
@@ -1490,7 +1505,10 @@ def _parse_lifecycle_config(payload: bytes) -> list:
|
||||
nve: dict = {}
|
||||
days_el = nve_el.find("{*}NoncurrentDays") or nve_el.find("NoncurrentDays")
|
||||
if days_el is not None and days_el.text:
|
||||
nve["NoncurrentDays"] = int(days_el.text.strip())
|
||||
noncurrent_days = int(days_el.text.strip())
|
||||
if noncurrent_days <= 0:
|
||||
raise ValueError("NoncurrentDays must be a positive integer")
|
||||
nve["NoncurrentDays"] = noncurrent_days
|
||||
if nve:
|
||||
rule["NoncurrentVersionExpiration"] = nve
|
||||
|
||||
@@ -1499,7 +1517,10 @@ def _parse_lifecycle_config(payload: bytes) -> list:
|
||||
aimu: dict = {}
|
||||
days_el = aimu_el.find("{*}DaysAfterInitiation") or aimu_el.find("DaysAfterInitiation")
|
||||
if days_el is not None and days_el.text:
|
||||
aimu["DaysAfterInitiation"] = int(days_el.text.strip())
|
||||
days_after = int(days_el.text.strip())
|
||||
if days_after <= 0:
|
||||
raise ValueError("DaysAfterInitiation must be a positive integer")
|
||||
aimu["DaysAfterInitiation"] = days_after
|
||||
if aimu:
|
||||
rule["AbortIncompleteMultipartUpload"] = aimu
|
||||
|
||||
@@ -2085,7 +2106,10 @@ def bucket_handler(bucket_name: str) -> Response:
|
||||
list_type = request.args.get("list-type")
|
||||
prefix = request.args.get("prefix", "")
|
||||
delimiter = request.args.get("delimiter", "")
|
||||
max_keys = min(int(request.args.get("max-keys", current_app.config["UI_PAGE_SIZE"])), 1000)
|
||||
try:
|
||||
max_keys = max(1, min(int(request.args.get("max-keys", current_app.config["UI_PAGE_SIZE"])), 1000))
|
||||
except ValueError:
|
||||
return _error_response("InvalidArgument", "max-keys must be an integer", 400)
|
||||
|
||||
marker = request.args.get("marker", "") # ListObjects v1
|
||||
continuation_token = request.args.get("continuation-token", "") # ListObjectsV2
|
||||
@@ -2098,7 +2122,7 @@ def bucket_handler(bucket_name: str) -> Response:
|
||||
if continuation_token:
|
||||
try:
|
||||
effective_start = base64.urlsafe_b64decode(continuation_token.encode()).decode("utf-8")
|
||||
except Exception:
|
||||
except (ValueError, UnicodeDecodeError):
|
||||
effective_start = continuation_token
|
||||
elif start_after:
|
||||
effective_start = start_after
|
||||
@@ -2178,10 +2202,11 @@ def bucket_handler(bucket_name: str) -> Response:
|
||||
obj_el = SubElement(root, "Contents")
|
||||
SubElement(obj_el, "Key").text = meta.key
|
||||
SubElement(obj_el, "LastModified").text = meta.last_modified.isoformat()
|
||||
SubElement(obj_el, "ETag").text = f'"{meta.etag}"'
|
||||
if meta.etag:
|
||||
SubElement(obj_el, "ETag").text = f'"{meta.etag}"'
|
||||
SubElement(obj_el, "Size").text = str(meta.size)
|
||||
SubElement(obj_el, "StorageClass").text = "STANDARD"
|
||||
|
||||
|
||||
for cp in common_prefixes:
|
||||
cp_el = SubElement(root, "CommonPrefixes")
|
||||
SubElement(cp_el, "Prefix").text = cp
|
||||
@@ -2194,15 +2219,16 @@ def bucket_handler(bucket_name: str) -> Response:
|
||||
SubElement(root, "IsTruncated").text = "true" if is_truncated else "false"
|
||||
if delimiter:
|
||||
SubElement(root, "Delimiter").text = delimiter
|
||||
|
||||
|
||||
if is_truncated and delimiter and next_marker:
|
||||
SubElement(root, "NextMarker").text = next_marker
|
||||
|
||||
|
||||
for meta in objects:
|
||||
obj_el = SubElement(root, "Contents")
|
||||
SubElement(obj_el, "Key").text = meta.key
|
||||
SubElement(obj_el, "LastModified").text = meta.last_modified.isoformat()
|
||||
SubElement(obj_el, "ETag").text = f'"{meta.etag}"'
|
||||
if meta.etag:
|
||||
SubElement(obj_el, "ETag").text = f'"{meta.etag}"'
|
||||
SubElement(obj_el, "Size").text = str(meta.size)
|
||||
|
||||
for cp in common_prefixes:
|
||||
@@ -2282,7 +2308,8 @@ def object_handler(bucket_name: str, object_key: str):
|
||||
extra={"bucket": bucket_name, "key": object_key, "size": meta.size},
|
||||
)
|
||||
response = Response(status=200)
|
||||
response.headers["ETag"] = f'"{meta.etag}"'
|
||||
if meta.etag:
|
||||
response.headers["ETag"] = f'"{meta.etag}"'
|
||||
|
||||
_notifications().emit_object_created(
|
||||
bucket_name,
|
||||
@@ -2725,7 +2752,8 @@ def _copy_object(dest_bucket: str, dest_key: str, copy_source: str) -> Response:
|
||||
|
||||
root = Element("CopyObjectResult")
|
||||
SubElement(root, "LastModified").text = meta.last_modified.isoformat()
|
||||
SubElement(root, "ETag").text = f'"{meta.etag}"'
|
||||
if meta.etag:
|
||||
SubElement(root, "ETag").text = f'"{meta.etag}"'
|
||||
return _xml_response(root)
|
||||
|
||||
|
||||
@@ -2737,7 +2765,7 @@ class AwsChunkedDecoder:
|
||||
|
||||
def __init__(self, stream):
|
||||
self.stream = stream
|
||||
self._read_buffer = bytearray() # Performance: Pre-allocated buffer
|
||||
self._read_buffer = bytearray()
|
||||
self.chunk_remaining = 0
|
||||
self.finished = False
|
||||
|
||||
@@ -2748,20 +2776,15 @@ class AwsChunkedDecoder:
|
||||
"""
|
||||
line = bytearray()
|
||||
while True:
|
||||
# Check if we have data in buffer
|
||||
if self._read_buffer:
|
||||
# Look for CRLF in buffer
|
||||
idx = self._read_buffer.find(b"\r\n")
|
||||
if idx != -1:
|
||||
# Found CRLF - extract line and update buffer
|
||||
line.extend(self._read_buffer[: idx + 2])
|
||||
del self._read_buffer[: idx + 2]
|
||||
return bytes(line)
|
||||
# No CRLF yet - consume entire buffer
|
||||
line.extend(self._read_buffer)
|
||||
self._read_buffer.clear()
|
||||
|
||||
# Read more data in larger chunks (64 bytes is enough for chunk headers)
|
||||
chunk = self.stream.read(64)
|
||||
if not chunk:
|
||||
return bytes(line) if line else b""
|
||||
@@ -2770,14 +2793,11 @@ class AwsChunkedDecoder:
|
||||
def _read_exact(self, n: int) -> bytes:
|
||||
"""Read exactly n bytes, using buffer first."""
|
||||
result = bytearray()
|
||||
# Use buffered data first
|
||||
if self._read_buffer:
|
||||
take = min(len(self._read_buffer), n)
|
||||
result.extend(self._read_buffer[:take])
|
||||
del self._read_buffer[:take]
|
||||
n -= take
|
||||
|
||||
# Read remaining directly from stream
|
||||
if n > 0:
|
||||
data = self.stream.read(n)
|
||||
if data:
|
||||
@@ -2789,7 +2809,7 @@ class AwsChunkedDecoder:
|
||||
if self.finished:
|
||||
return b""
|
||||
|
||||
result = bytearray() # Performance: Use bytearray for building result
|
||||
result = bytearray()
|
||||
while size == -1 or len(result) < size:
|
||||
if self.chunk_remaining > 0:
|
||||
to_read = self.chunk_remaining
|
||||
@@ -2823,7 +2843,6 @@ class AwsChunkedDecoder:
|
||||
|
||||
if chunk_size == 0:
|
||||
self.finished = True
|
||||
# Skip trailing headers
|
||||
while True:
|
||||
trailer = self._read_line()
|
||||
if trailer == b"\r\n" or not trailer:
|
||||
@@ -2947,8 +2966,9 @@ def _complete_multipart_upload(bucket_name: str, object_key: str) -> Response:
|
||||
SubElement(root, "Location").text = location
|
||||
SubElement(root, "Bucket").text = bucket_name
|
||||
SubElement(root, "Key").text = object_key
|
||||
SubElement(root, "ETag").text = f'"{meta.etag}"'
|
||||
|
||||
if meta.etag:
|
||||
SubElement(root, "ETag").text = f'"{meta.etag}"'
|
||||
|
||||
return _xml_response(root)
|
||||
|
||||
|
||||
@@ -2963,10 +2983,11 @@ def _abort_multipart_upload(bucket_name: str, object_key: str) -> Response:
|
||||
|
||||
try:
|
||||
_storage().abort_multipart_upload(bucket_name, upload_id)
|
||||
except BucketNotFoundError as exc:
|
||||
return _error_response("NoSuchBucket", str(exc), 404)
|
||||
except StorageError as exc:
|
||||
if "Bucket does not exist" in str(exc):
|
||||
return _error_response("NoSuchBucket", str(exc), 404)
|
||||
|
||||
current_app.logger.warning(f"Error aborting multipart upload: {exc}")
|
||||
|
||||
return Response(status=204)
|
||||
|
||||
|
||||
@@ -2978,13 +2999,15 @@ def resolve_principal():
|
||||
(request.args.get("X-Amz-Algorithm") == "AWS4-HMAC-SHA256"):
|
||||
g.principal = _verify_sigv4(request)
|
||||
return
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
except IamError as exc:
|
||||
logger.debug(f"SigV4 authentication failed: {exc}")
|
||||
except (ValueError, KeyError) as exc:
|
||||
logger.debug(f"SigV4 parsing error: {exc}")
|
||||
|
||||
access_key = request.headers.get("X-Access-Key")
|
||||
secret_key = request.headers.get("X-Secret-Key")
|
||||
if access_key and secret_key:
|
||||
try:
|
||||
g.principal = _iam().authenticate(access_key, secret_key)
|
||||
except Exception:
|
||||
pass
|
||||
except IamError as exc:
|
||||
logger.debug(f"Header authentication failed: {exc}")
|
||||
|
||||
119
app/storage.py
119
app/storage.py
@@ -76,6 +76,14 @@ class StorageError(RuntimeError):
|
||||
"""Raised when the storage layer encounters an unrecoverable problem."""
|
||||
|
||||
|
||||
class BucketNotFoundError(StorageError):
|
||||
"""Raised when the bucket does not exist."""
|
||||
|
||||
|
||||
class ObjectNotFoundError(StorageError):
|
||||
"""Raised when the object does not exist."""
|
||||
|
||||
|
||||
class QuotaExceededError(StorageError):
|
||||
"""Raised when an operation would exceed bucket quota limits."""
|
||||
|
||||
@@ -90,7 +98,7 @@ class ObjectMeta:
|
||||
key: str
|
||||
size: int
|
||||
last_modified: datetime
|
||||
etag: str
|
||||
etag: Optional[str] = None
|
||||
metadata: Optional[Dict[str, str]] = None
|
||||
|
||||
|
||||
@@ -106,7 +114,7 @@ class ListObjectsResult:
|
||||
objects: List[ObjectMeta]
|
||||
is_truncated: bool
|
||||
next_continuation_token: Optional[str]
|
||||
total_count: Optional[int] = None # Total objects in bucket (from stats cache)
|
||||
total_count: Optional[int] = None
|
||||
|
||||
|
||||
def _utcnow() -> datetime:
|
||||
@@ -130,22 +138,18 @@ class ObjectStorage:
|
||||
MULTIPART_MANIFEST = "manifest.json"
|
||||
BUCKET_CONFIG_FILE = ".bucket.json"
|
||||
KEY_INDEX_CACHE_TTL = 30
|
||||
OBJECT_CACHE_MAX_SIZE = 100 # Maximum number of buckets to cache
|
||||
OBJECT_CACHE_MAX_SIZE = 100
|
||||
|
||||
def __init__(self, root: Path) -> None:
|
||||
self.root = Path(root)
|
||||
self.root.mkdir(parents=True, exist_ok=True)
|
||||
self._ensure_system_roots()
|
||||
# LRU cache for object metadata with thread-safe access
|
||||
self._object_cache: OrderedDict[str, tuple[Dict[str, ObjectMeta], float]] = OrderedDict()
|
||||
self._cache_lock = threading.Lock() # Global lock for cache structure
|
||||
# Performance: Per-bucket locks to reduce contention
|
||||
self._cache_lock = threading.Lock()
|
||||
self._bucket_locks: Dict[str, threading.Lock] = {}
|
||||
# Cache version counter for detecting stale reads
|
||||
self._cache_version: Dict[str, int] = {}
|
||||
# Performance: Bucket config cache with TTL
|
||||
self._bucket_config_cache: Dict[str, tuple[dict[str, Any], float]] = {}
|
||||
self._bucket_config_cache_ttl = 30.0 # 30 second TTL
|
||||
self._bucket_config_cache_ttl = 30.0
|
||||
|
||||
def _get_bucket_lock(self, bucket_id: str) -> threading.Lock:
|
||||
"""Get or create a lock for a specific bucket. Reduces global lock contention."""
|
||||
@@ -170,6 +174,11 @@ class ObjectStorage:
|
||||
def bucket_exists(self, bucket_name: str) -> bool:
|
||||
return self._bucket_path(bucket_name).exists()
|
||||
|
||||
def _require_bucket_exists(self, bucket_path: Path) -> None:
|
||||
"""Raise BucketNotFoundError if bucket does not exist."""
|
||||
if not bucket_path.exists():
|
||||
raise BucketNotFoundError("Bucket does not exist")
|
||||
|
||||
def _validate_bucket_name(self, bucket_name: str) -> None:
|
||||
if len(bucket_name) < 3 or len(bucket_name) > 63:
|
||||
raise StorageError("Bucket name must be between 3 and 63 characters")
|
||||
@@ -188,14 +197,14 @@ class ObjectStorage:
|
||||
|
||||
def bucket_stats(self, bucket_name: str, cache_ttl: int = 60) -> dict[str, int]:
|
||||
"""Return object count and total size for the bucket (cached).
|
||||
|
||||
|
||||
Args:
|
||||
bucket_name: Name of the bucket
|
||||
cache_ttl: Cache time-to-live in seconds (default 60)
|
||||
"""
|
||||
bucket_path = self._bucket_path(bucket_name)
|
||||
if not bucket_path.exists():
|
||||
raise StorageError("Bucket does not exist")
|
||||
raise BucketNotFoundError("Bucket does not exist")
|
||||
|
||||
cache_path = self._system_bucket_root(bucket_name) / "stats.json"
|
||||
if cache_path.exists():
|
||||
@@ -257,8 +266,7 @@ class ObjectStorage:
|
||||
def delete_bucket(self, bucket_name: str) -> None:
|
||||
bucket_path = self._bucket_path(bucket_name)
|
||||
if not bucket_path.exists():
|
||||
raise StorageError("Bucket does not exist")
|
||||
# Performance: Single check instead of three separate traversals
|
||||
raise BucketNotFoundError("Bucket does not exist")
|
||||
has_objects, has_versions, has_multipart = self._check_bucket_contents(bucket_path)
|
||||
if has_objects:
|
||||
raise StorageError("Bucket not empty")
|
||||
@@ -291,7 +299,7 @@ class ObjectStorage:
|
||||
"""
|
||||
bucket_path = self._bucket_path(bucket_name)
|
||||
if not bucket_path.exists():
|
||||
raise StorageError("Bucket does not exist")
|
||||
raise BucketNotFoundError("Bucket does not exist")
|
||||
bucket_id = bucket_path.name
|
||||
|
||||
object_cache = self._get_object_cache(bucket_id, bucket_path)
|
||||
@@ -352,7 +360,7 @@ class ObjectStorage:
|
||||
) -> ObjectMeta:
|
||||
bucket_path = self._bucket_path(bucket_name)
|
||||
if not bucket_path.exists():
|
||||
raise StorageError("Bucket does not exist")
|
||||
raise BucketNotFoundError("Bucket does not exist")
|
||||
bucket_id = bucket_path.name
|
||||
|
||||
safe_key = self._sanitize_object_key(object_key)
|
||||
@@ -409,7 +417,6 @@ class ObjectStorage:
|
||||
|
||||
self._invalidate_bucket_stats_cache(bucket_id)
|
||||
|
||||
# Performance: Lazy update - only update the affected key instead of invalidating whole cache
|
||||
obj_meta = ObjectMeta(
|
||||
key=safe_key.as_posix(),
|
||||
size=stat.st_size,
|
||||
@@ -424,7 +431,7 @@ class ObjectStorage:
|
||||
def get_object_path(self, bucket_name: str, object_key: str) -> Path:
|
||||
path = self._object_path(bucket_name, object_key)
|
||||
if not path.exists():
|
||||
raise StorageError("Object not found")
|
||||
raise ObjectNotFoundError("Object not found")
|
||||
return path
|
||||
|
||||
def get_object_metadata(self, bucket_name: str, object_key: str) -> Dict[str, str]:
|
||||
@@ -467,7 +474,6 @@ class ObjectStorage:
|
||||
self._delete_metadata(bucket_id, rel)
|
||||
|
||||
self._invalidate_bucket_stats_cache(bucket_id)
|
||||
# Performance: Lazy update - only remove the affected key instead of invalidating whole cache
|
||||
self._update_object_cache_entry(bucket_id, safe_key.as_posix(), None)
|
||||
self._cleanup_empty_parents(path, bucket_path)
|
||||
|
||||
@@ -490,14 +496,13 @@ class ObjectStorage:
|
||||
shutil.rmtree(legacy_version_dir, ignore_errors=True)
|
||||
|
||||
self._invalidate_bucket_stats_cache(bucket_id)
|
||||
# Performance: Lazy update - only remove the affected key instead of invalidating whole cache
|
||||
self._update_object_cache_entry(bucket_id, rel.as_posix(), None)
|
||||
self._cleanup_empty_parents(target, bucket_path)
|
||||
|
||||
def is_versioning_enabled(self, bucket_name: str) -> bool:
|
||||
bucket_path = self._bucket_path(bucket_name)
|
||||
if not bucket_path.exists():
|
||||
raise StorageError("Bucket does not exist")
|
||||
raise BucketNotFoundError("Bucket does not exist")
|
||||
return self._is_versioning_enabled(bucket_path)
|
||||
|
||||
def set_bucket_versioning(self, bucket_name: str, enabled: bool) -> None:
|
||||
@@ -689,11 +694,11 @@ class ObjectStorage:
|
||||
"""Get tags for an object."""
|
||||
bucket_path = self._bucket_path(bucket_name)
|
||||
if not bucket_path.exists():
|
||||
raise StorageError("Bucket does not exist")
|
||||
raise BucketNotFoundError("Bucket does not exist")
|
||||
safe_key = self._sanitize_object_key(object_key)
|
||||
object_path = bucket_path / safe_key
|
||||
if not object_path.exists():
|
||||
raise StorageError("Object does not exist")
|
||||
raise ObjectNotFoundError("Object does not exist")
|
||||
|
||||
for meta_file in (self._metadata_file(bucket_path.name, safe_key), self._legacy_metadata_file(bucket_path.name, safe_key)):
|
||||
if not meta_file.exists():
|
||||
@@ -712,11 +717,11 @@ class ObjectStorage:
|
||||
"""Set tags for an object."""
|
||||
bucket_path = self._bucket_path(bucket_name)
|
||||
if not bucket_path.exists():
|
||||
raise StorageError("Bucket does not exist")
|
||||
raise BucketNotFoundError("Bucket does not exist")
|
||||
safe_key = self._sanitize_object_key(object_key)
|
||||
object_path = bucket_path / safe_key
|
||||
if not object_path.exists():
|
||||
raise StorageError("Object does not exist")
|
||||
raise ObjectNotFoundError("Object does not exist")
|
||||
|
||||
meta_file = self._metadata_file(bucket_path.name, safe_key)
|
||||
|
||||
@@ -750,7 +755,7 @@ class ObjectStorage:
|
||||
def list_object_versions(self, bucket_name: str, object_key: str) -> List[Dict[str, Any]]:
|
||||
bucket_path = self._bucket_path(bucket_name)
|
||||
if not bucket_path.exists():
|
||||
raise StorageError("Bucket does not exist")
|
||||
raise BucketNotFoundError("Bucket does not exist")
|
||||
bucket_id = bucket_path.name
|
||||
safe_key = self._sanitize_object_key(object_key)
|
||||
version_dir = self._version_dir(bucket_id, safe_key)
|
||||
@@ -774,7 +779,7 @@ class ObjectStorage:
|
||||
def restore_object_version(self, bucket_name: str, object_key: str, version_id: str) -> ObjectMeta:
|
||||
bucket_path = self._bucket_path(bucket_name)
|
||||
if not bucket_path.exists():
|
||||
raise StorageError("Bucket does not exist")
|
||||
raise BucketNotFoundError("Bucket does not exist")
|
||||
bucket_id = bucket_path.name
|
||||
safe_key = self._sanitize_object_key(object_key)
|
||||
version_dir = self._version_dir(bucket_id, safe_key)
|
||||
@@ -811,7 +816,7 @@ class ObjectStorage:
|
||||
def delete_object_version(self, bucket_name: str, object_key: str, version_id: str) -> None:
|
||||
bucket_path = self._bucket_path(bucket_name)
|
||||
if not bucket_path.exists():
|
||||
raise StorageError("Bucket does not exist")
|
||||
raise BucketNotFoundError("Bucket does not exist")
|
||||
bucket_id = bucket_path.name
|
||||
safe_key = self._sanitize_object_key(object_key)
|
||||
version_dir = self._version_dir(bucket_id, safe_key)
|
||||
@@ -834,7 +839,7 @@ class ObjectStorage:
|
||||
def list_orphaned_objects(self, bucket_name: str) -> List[Dict[str, Any]]:
|
||||
bucket_path = self._bucket_path(bucket_name)
|
||||
if not bucket_path.exists():
|
||||
raise StorageError("Bucket does not exist")
|
||||
raise BucketNotFoundError("Bucket does not exist")
|
||||
bucket_id = bucket_path.name
|
||||
version_roots = [self._bucket_versions_root(bucket_id), self._legacy_versions_root(bucket_id)]
|
||||
if not any(root.exists() for root in version_roots):
|
||||
@@ -902,7 +907,7 @@ class ObjectStorage:
|
||||
) -> str:
|
||||
bucket_path = self._bucket_path(bucket_name)
|
||||
if not bucket_path.exists():
|
||||
raise StorageError("Bucket does not exist")
|
||||
raise BucketNotFoundError("Bucket does not exist")
|
||||
bucket_id = bucket_path.name
|
||||
safe_key = self._sanitize_object_key(object_key)
|
||||
upload_id = uuid.uuid4().hex
|
||||
@@ -929,8 +934,8 @@ class ObjectStorage:
|
||||
|
||||
Uses file locking to safely update the manifest and handle concurrent uploads.
|
||||
"""
|
||||
if part_number < 1:
|
||||
raise StorageError("part_number must be >= 1")
|
||||
if part_number < 1 or part_number > 10000:
|
||||
raise StorageError("part_number must be between 1 and 10000")
|
||||
bucket_path = self._bucket_path(bucket_name)
|
||||
|
||||
upload_root = self._multipart_dir(bucket_path.name, upload_id)
|
||||
@@ -939,7 +944,6 @@ class ObjectStorage:
|
||||
if not upload_root.exists():
|
||||
raise StorageError("Multipart upload not found")
|
||||
|
||||
# Write part to temporary file first, then rename atomically
|
||||
checksum = hashlib.md5()
|
||||
part_filename = f"part-{part_number:05d}.part"
|
||||
part_path = upload_root / part_filename
|
||||
@@ -948,11 +952,8 @@ class ObjectStorage:
|
||||
try:
|
||||
with temp_path.open("wb") as target:
|
||||
shutil.copyfileobj(_HashingReader(stream, checksum), target)
|
||||
|
||||
# Atomic rename (or replace on Windows)
|
||||
temp_path.replace(part_path)
|
||||
except OSError:
|
||||
# Clean up temp file on failure
|
||||
try:
|
||||
temp_path.unlink(missing_ok=True)
|
||||
except OSError:
|
||||
@@ -968,7 +969,6 @@ class ObjectStorage:
|
||||
manifest_path = upload_root / self.MULTIPART_MANIFEST
|
||||
lock_path = upload_root / ".manifest.lock"
|
||||
|
||||
# Retry loop for handling transient lock/read failures
|
||||
max_retries = 3
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
@@ -1079,11 +1079,6 @@ class ObjectStorage:
|
||||
checksum.update(data)
|
||||
target.write(data)
|
||||
|
||||
metadata = manifest.get("metadata")
|
||||
if metadata:
|
||||
self._write_metadata(bucket_id, safe_key, metadata)
|
||||
else:
|
||||
self._delete_metadata(bucket_id, safe_key)
|
||||
except BlockingIOError:
|
||||
raise StorageError("Another upload to this key is in progress")
|
||||
finally:
|
||||
@@ -1097,12 +1092,18 @@ class ObjectStorage:
|
||||
self._invalidate_bucket_stats_cache(bucket_id)
|
||||
|
||||
stat = destination.stat()
|
||||
# Performance: Lazy update - only update the affected key instead of invalidating whole cache
|
||||
etag = checksum.hexdigest()
|
||||
metadata = manifest.get("metadata")
|
||||
|
||||
internal_meta = {"__etag__": etag, "__size__": str(stat.st_size)}
|
||||
combined_meta = {**internal_meta, **(metadata or {})}
|
||||
self._write_metadata(bucket_id, safe_key, combined_meta)
|
||||
|
||||
obj_meta = ObjectMeta(
|
||||
key=safe_key.as_posix(),
|
||||
size=stat.st_size,
|
||||
last_modified=datetime.fromtimestamp(stat.st_mtime, timezone.utc),
|
||||
etag=checksum.hexdigest(),
|
||||
etag=etag,
|
||||
metadata=metadata,
|
||||
)
|
||||
self._update_object_cache_entry(bucket_id, safe_key.as_posix(), obj_meta)
|
||||
@@ -1150,10 +1151,10 @@ class ObjectStorage:
|
||||
"""List all active multipart uploads for a bucket."""
|
||||
bucket_path = self._bucket_path(bucket_name)
|
||||
if not bucket_path.exists():
|
||||
raise StorageError("Bucket does not exist")
|
||||
raise BucketNotFoundError("Bucket does not exist")
|
||||
bucket_id = bucket_path.name
|
||||
uploads = []
|
||||
multipart_root = self._bucket_multipart_root(bucket_id)
|
||||
multipart_root = self._multipart_bucket_root(bucket_id)
|
||||
if multipart_root.exists():
|
||||
for upload_dir in multipart_root.iterdir():
|
||||
if not upload_dir.is_dir():
|
||||
@@ -1170,7 +1171,7 @@ class ObjectStorage:
|
||||
})
|
||||
except (OSError, json.JSONDecodeError):
|
||||
continue
|
||||
legacy_root = self._legacy_multipart_root(bucket_id)
|
||||
legacy_root = self._legacy_multipart_bucket_root(bucket_id)
|
||||
if legacy_root.exists():
|
||||
for upload_dir in legacy_root.iterdir():
|
||||
if not upload_dir.is_dir():
|
||||
@@ -1369,10 +1370,7 @@ class ObjectStorage:
|
||||
stat = entry.stat()
|
||||
|
||||
etag = meta_cache.get(key)
|
||||
|
||||
if not etag:
|
||||
etag = f'"{stat.st_size}-{int(stat.st_mtime)}"'
|
||||
|
||||
|
||||
objects[key] = ObjectMeta(
|
||||
key=key,
|
||||
size=stat.st_size,
|
||||
@@ -1396,7 +1394,6 @@ class ObjectStorage:
|
||||
"""
|
||||
now = time.time()
|
||||
|
||||
# Quick check with global lock (brief)
|
||||
with self._cache_lock:
|
||||
cached = self._object_cache.get(bucket_id)
|
||||
if cached:
|
||||
@@ -1406,10 +1403,8 @@ class ObjectStorage:
|
||||
return objects
|
||||
cache_version = self._cache_version.get(bucket_id, 0)
|
||||
|
||||
# Use per-bucket lock for cache building (allows parallel builds for different buckets)
|
||||
bucket_lock = self._get_bucket_lock(bucket_id)
|
||||
with bucket_lock:
|
||||
# Double-check cache after acquiring per-bucket lock
|
||||
with self._cache_lock:
|
||||
cached = self._object_cache.get(bucket_id)
|
||||
if cached:
|
||||
@@ -1417,17 +1412,12 @@ class ObjectStorage:
|
||||
if now - timestamp < self.KEY_INDEX_CACHE_TTL:
|
||||
self._object_cache.move_to_end(bucket_id)
|
||||
return objects
|
||||
|
||||
# Build cache with per-bucket lock held (prevents duplicate work)
|
||||
objects = self._build_object_cache(bucket_path)
|
||||
|
||||
with self._cache_lock:
|
||||
# Check if cache was invalidated while we were building
|
||||
current_version = self._cache_version.get(bucket_id, 0)
|
||||
if current_version != cache_version:
|
||||
objects = self._build_object_cache(bucket_path)
|
||||
|
||||
# Evict oldest entries if cache is full
|
||||
while len(self._object_cache) >= self.OBJECT_CACHE_MAX_SIZE:
|
||||
self._object_cache.popitem(last=False)
|
||||
|
||||
@@ -1461,12 +1451,9 @@ class ObjectStorage:
|
||||
if cached:
|
||||
objects, timestamp = cached
|
||||
if meta is None:
|
||||
# Delete operation - remove key from cache
|
||||
objects.pop(key, None)
|
||||
else:
|
||||
# Put operation - update/add key in cache
|
||||
objects[key] = meta
|
||||
# Keep same timestamp - don't reset TTL for single key updates
|
||||
|
||||
def _ensure_system_roots(self) -> None:
|
||||
for path in (
|
||||
@@ -1487,13 +1474,12 @@ class ObjectStorage:
|
||||
return self._system_bucket_root(bucket_name) / self.BUCKET_CONFIG_FILE
|
||||
|
||||
def _read_bucket_config(self, bucket_name: str) -> dict[str, Any]:
|
||||
# Performance: Check cache first
|
||||
now = time.time()
|
||||
cached = self._bucket_config_cache.get(bucket_name)
|
||||
if cached:
|
||||
config, cached_time = cached
|
||||
if now - cached_time < self._bucket_config_cache_ttl:
|
||||
return config.copy() # Return copy to prevent mutation
|
||||
return config.copy()
|
||||
|
||||
config_path = self._bucket_config_path(bucket_name)
|
||||
if not config_path.exists():
|
||||
@@ -1512,7 +1498,6 @@ class ObjectStorage:
|
||||
config_path = self._bucket_config_path(bucket_name)
|
||||
config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
config_path.write_text(json.dumps(payload), encoding="utf-8")
|
||||
# Performance: Update cache immediately after write
|
||||
self._bucket_config_cache[bucket_name] = (payload.copy(), time.time())
|
||||
|
||||
def _set_bucket_config_entry(self, bucket_name: str, key: str, value: Any | None) -> None:
|
||||
@@ -1638,7 +1623,6 @@ class ObjectStorage:
|
||||
def _check_bucket_contents(self, bucket_path: Path) -> tuple[bool, bool, bool]:
|
||||
"""Check bucket for objects, versions, and multipart uploads in a single pass.
|
||||
|
||||
Performance optimization: Combines three separate rglob traversals into one.
|
||||
Returns (has_visible_objects, has_archived_versions, has_active_multipart_uploads).
|
||||
Uses early exit when all three are found.
|
||||
"""
|
||||
@@ -1647,7 +1631,6 @@ class ObjectStorage:
|
||||
has_multipart = False
|
||||
bucket_name = bucket_path.name
|
||||
|
||||
# Check visible objects in bucket
|
||||
for path in bucket_path.rglob("*"):
|
||||
if has_objects:
|
||||
break
|
||||
@@ -1658,7 +1641,6 @@ class ObjectStorage:
|
||||
continue
|
||||
has_objects = True
|
||||
|
||||
# Check archived versions (only if needed)
|
||||
for version_root in (
|
||||
self._bucket_versions_root(bucket_name),
|
||||
self._legacy_versions_root(bucket_name),
|
||||
@@ -1671,7 +1653,6 @@ class ObjectStorage:
|
||||
has_versions = True
|
||||
break
|
||||
|
||||
# Check multipart uploads (only if needed)
|
||||
for uploads_root in (
|
||||
self._multipart_bucket_root(bucket_name),
|
||||
self._legacy_multipart_bucket_root(bucket_name),
|
||||
@@ -1705,7 +1686,7 @@ class ObjectStorage:
|
||||
try:
|
||||
os.chmod(target_path, stat.S_IRWXU)
|
||||
func(target_path)
|
||||
except Exception as exc: # pragma: no cover - fallback failure
|
||||
except Exception as exc:
|
||||
raise StorageError(f"Unable to delete bucket contents: {exc}") from exc
|
||||
|
||||
try:
|
||||
|
||||
153
app/ui.py
153
app/ui.py
@@ -1,5 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
import json
|
||||
import uuid
|
||||
import psutil
|
||||
@@ -28,7 +29,7 @@ from flask_wtf.csrf import generate_csrf
|
||||
from .acl import AclService, create_canned_acl, CANNED_ACLS
|
||||
from .bucket_policies import BucketPolicyStore
|
||||
from .connections import ConnectionStore, RemoteConnection
|
||||
from .extensions import limiter
|
||||
from .extensions import limiter, csrf
|
||||
from .iam import IamError
|
||||
from .kms import KMSManager
|
||||
from .replication import ReplicationManager, ReplicationRule
|
||||
@@ -275,6 +276,9 @@ def buckets_overview():
|
||||
})
|
||||
return render_template("buckets.html", buckets=visible_buckets, principal=principal)
|
||||
|
||||
@ui_bp.get("/buckets")
|
||||
def buckets_redirect():
|
||||
return redirect(url_for("ui.buckets_overview"))
|
||||
|
||||
@ui_bp.post("/buckets")
|
||||
def create_bucket():
|
||||
@@ -370,7 +374,7 @@ def bucket_detail(bucket_name: str):
|
||||
kms_keys = kms_manager.list_keys() if kms_manager else []
|
||||
kms_enabled = current_app.config.get("KMS_ENABLED", False)
|
||||
encryption_enabled = current_app.config.get("ENCRYPTION_ENABLED", False)
|
||||
can_manage_encryption = can_manage_versioning # Same as other bucket properties
|
||||
can_manage_encryption = can_manage_versioning
|
||||
|
||||
bucket_quota = storage.get_bucket_quota(bucket_name)
|
||||
bucket_stats = storage.bucket_stats(bucket_name)
|
||||
@@ -449,8 +453,6 @@ def list_bucket_objects(bucket_name: str):
|
||||
except StorageError:
|
||||
versioning_enabled = False
|
||||
|
||||
# Pre-compute URL templates once (not per-object) for performance
|
||||
# Frontend will construct actual URLs by replacing KEY_PLACEHOLDER
|
||||
preview_template = url_for("ui.object_preview", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||
delete_template = url_for("ui.delete_object", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||
presign_template = url_for("ui.object_presign", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||
@@ -526,8 +528,6 @@ def upload_object(bucket_name: str):
|
||||
try:
|
||||
_authorize_ui(principal, bucket_name, "write")
|
||||
_storage().put_object(bucket_name, object_key, file.stream, metadata=metadata)
|
||||
|
||||
# Trigger replication
|
||||
_replication().trigger_replication(bucket_name, object_key)
|
||||
|
||||
message = f"Uploaded '{object_key}'"
|
||||
@@ -563,6 +563,8 @@ def initiate_multipart_upload(bucket_name: str):
|
||||
|
||||
|
||||
@ui_bp.put("/buckets/<bucket_name>/multipart/<upload_id>/parts")
|
||||
@limiter.exempt
|
||||
@csrf.exempt
|
||||
def upload_multipart_part(bucket_name: str, upload_id: str):
|
||||
principal = _current_principal()
|
||||
try:
|
||||
@@ -576,7 +578,11 @@ def upload_multipart_part(bucket_name: str, upload_id: str):
|
||||
if part_number < 1:
|
||||
return jsonify({"error": "partNumber must be >= 1"}), 400
|
||||
try:
|
||||
etag = _storage().upload_multipart_part(bucket_name, upload_id, part_number, request.stream)
|
||||
data = request.get_data()
|
||||
if not data:
|
||||
return jsonify({"error": "Empty request body"}), 400
|
||||
stream = io.BytesIO(data)
|
||||
etag = _storage().upload_multipart_part(bucket_name, upload_id, part_number, stream)
|
||||
except StorageError as exc:
|
||||
return jsonify({"error": str(exc)}), 400
|
||||
return jsonify({"etag": etag, "part_number": part_number})
|
||||
@@ -606,9 +612,14 @@ def complete_multipart_upload(bucket_name: str, upload_id: str):
|
||||
normalized.append({"part_number": number, "etag": etag})
|
||||
try:
|
||||
result = _storage().complete_multipart_upload(bucket_name, upload_id, normalized)
|
||||
_replication().trigger_replication(bucket_name, result["key"])
|
||||
|
||||
return jsonify(result)
|
||||
_replication().trigger_replication(bucket_name, result.key)
|
||||
|
||||
return jsonify({
|
||||
"key": result.key,
|
||||
"size": result.size,
|
||||
"etag": result.etag,
|
||||
"last_modified": result.last_modified.isoformat() if result.last_modified else None,
|
||||
})
|
||||
except StorageError as exc:
|
||||
return jsonify({"error": str(exc)}), 400
|
||||
|
||||
@@ -753,20 +764,18 @@ def bulk_download_objects(bucket_name: str):
|
||||
if not cleaned:
|
||||
return jsonify({"error": "Select at least one object to download"}), 400
|
||||
|
||||
MAX_KEYS = current_app.config.get("BULK_DELETE_MAX_KEYS", 500) # Reuse same limit for now
|
||||
MAX_KEYS = current_app.config.get("BULK_DELETE_MAX_KEYS", 500)
|
||||
if len(cleaned) > MAX_KEYS:
|
||||
return jsonify({"error": f"A maximum of {MAX_KEYS} objects can be downloaded per request"}), 400
|
||||
|
||||
unique_keys = list(dict.fromkeys(cleaned))
|
||||
storage = _storage()
|
||||
|
||||
# Verify permission to read bucket contents
|
||||
try:
|
||||
_authorize_ui(principal, bucket_name, "read")
|
||||
except IamError as exc:
|
||||
return jsonify({"error": str(exc)}), 403
|
||||
|
||||
# Create ZIP archive of selected objects
|
||||
buffer = io.BytesIO()
|
||||
with zipfile.ZipFile(buffer, "w", zipfile.ZIP_DEFLATED) as zf:
|
||||
for key in unique_keys:
|
||||
@@ -783,7 +792,6 @@ def bulk_download_objects(bucket_name: str):
|
||||
path = storage.get_object_path(bucket_name, key)
|
||||
zf.write(path, arcname=key)
|
||||
except (StorageError, IamError):
|
||||
# Skip objects that can't be accessed
|
||||
continue
|
||||
|
||||
buffer.seek(0)
|
||||
@@ -834,7 +842,6 @@ def object_preview(bucket_name: str, object_key: str) -> Response:
|
||||
|
||||
download = request.args.get("download") == "1"
|
||||
|
||||
# Check if object is encrypted and needs decryption
|
||||
is_encrypted = "x-amz-server-side-encryption" in metadata
|
||||
if is_encrypted and hasattr(storage, 'get_object_data'):
|
||||
try:
|
||||
@@ -870,7 +877,6 @@ def object_presign(bucket_name: str, object_key: str):
|
||||
encoded_key = quote(object_key, safe="/")
|
||||
url = f"{api_base}/presign/{bucket_name}/{encoded_key}"
|
||||
|
||||
# Use API base URL for forwarded headers so presigned URLs point to API, not UI
|
||||
parsed_api = urlparse(api_base)
|
||||
headers = _api_headers()
|
||||
headers["X-Forwarded-Host"] = parsed_api.netloc or "127.0.0.1:5000"
|
||||
@@ -1015,7 +1021,6 @@ def update_bucket_quota(bucket_name: str):
|
||||
"""Update bucket quota configuration (admin only)."""
|
||||
principal = _current_principal()
|
||||
|
||||
# Quota management is admin-only
|
||||
is_admin = False
|
||||
try:
|
||||
_iam().authorize(principal, None, "iam:list_users")
|
||||
@@ -1037,7 +1042,6 @@ def update_bucket_quota(bucket_name: str):
|
||||
flash(_friendly_error_message(exc), "danger")
|
||||
return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="properties"))
|
||||
|
||||
# Parse quota values
|
||||
max_mb_str = request.form.get("max_mb", "").strip()
|
||||
max_objects_str = request.form.get("max_objects", "").strip()
|
||||
|
||||
@@ -1049,7 +1053,7 @@ def update_bucket_quota(bucket_name: str):
|
||||
max_mb = int(max_mb_str)
|
||||
if max_mb < 1:
|
||||
raise ValueError("Size must be at least 1 MB")
|
||||
max_bytes = max_mb * 1024 * 1024 # Convert MB to bytes
|
||||
max_bytes = max_mb * 1024 * 1024
|
||||
except ValueError as exc:
|
||||
flash(f"Invalid size value: {exc}", "danger")
|
||||
return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="properties"))
|
||||
@@ -1102,7 +1106,6 @@ def update_bucket_encryption(bucket_name: str):
|
||||
flash("Invalid encryption algorithm", "danger")
|
||||
return redirect(url_for("ui.bucket_detail", bucket_name=bucket_name, tab="properties"))
|
||||
|
||||
# Build encryption configuration in AWS S3 format
|
||||
encryption_config: dict[str, Any] = {
|
||||
"Rules": [
|
||||
{
|
||||
@@ -1493,7 +1496,6 @@ def update_bucket_replication(bucket_name: str):
|
||||
if rule:
|
||||
rule.enabled = True
|
||||
_replication().set_rule(rule)
|
||||
# When resuming, sync any pending objects that accumulated while paused
|
||||
if rule.mode == REPLICATION_MODE_ALL:
|
||||
_replication().replicate_existing_objects(bucket_name)
|
||||
flash("Replication resumed. Syncing pending objects in background.", "success")
|
||||
@@ -1588,6 +1590,84 @@ def get_replication_status(bucket_name: str):
|
||||
})
|
||||
|
||||
|
||||
@ui_bp.get("/buckets/<bucket_name>/replication/failures")
|
||||
def get_replication_failures(bucket_name: str):
|
||||
principal = _current_principal()
|
||||
try:
|
||||
_authorize_ui(principal, bucket_name, "replication")
|
||||
except IamError:
|
||||
return jsonify({"error": "Access denied"}), 403
|
||||
|
||||
limit = request.args.get("limit", 50, type=int)
|
||||
offset = request.args.get("offset", 0, type=int)
|
||||
|
||||
failures = _replication().get_failed_items(bucket_name, limit, offset)
|
||||
total = _replication().get_failure_count(bucket_name)
|
||||
|
||||
return jsonify({
|
||||
"failures": [f.to_dict() for f in failures],
|
||||
"total": total,
|
||||
"limit": limit,
|
||||
"offset": offset,
|
||||
})
|
||||
|
||||
|
||||
@ui_bp.post("/buckets/<bucket_name>/replication/failures/<path:object_key>/retry")
|
||||
def retry_replication_failure(bucket_name: str, object_key: str):
|
||||
principal = _current_principal()
|
||||
try:
|
||||
_authorize_ui(principal, bucket_name, "replication")
|
||||
except IamError:
|
||||
return jsonify({"error": "Access denied"}), 403
|
||||
|
||||
success = _replication().retry_failed_item(bucket_name, object_key)
|
||||
if success:
|
||||
return jsonify({"status": "submitted", "object_key": object_key})
|
||||
return jsonify({"error": "Failed to submit retry"}), 400
|
||||
|
||||
|
||||
@ui_bp.post("/buckets/<bucket_name>/replication/failures/retry-all")
|
||||
def retry_all_replication_failures(bucket_name: str):
|
||||
principal = _current_principal()
|
||||
try:
|
||||
_authorize_ui(principal, bucket_name, "replication")
|
||||
except IamError:
|
||||
return jsonify({"error": "Access denied"}), 403
|
||||
|
||||
result = _replication().retry_all_failed(bucket_name)
|
||||
return jsonify({
|
||||
"status": "submitted",
|
||||
"submitted": result["submitted"],
|
||||
"skipped": result["skipped"],
|
||||
})
|
||||
|
||||
|
||||
@ui_bp.delete("/buckets/<bucket_name>/replication/failures/<path:object_key>")
|
||||
def dismiss_replication_failure(bucket_name: str, object_key: str):
|
||||
principal = _current_principal()
|
||||
try:
|
||||
_authorize_ui(principal, bucket_name, "replication")
|
||||
except IamError:
|
||||
return jsonify({"error": "Access denied"}), 403
|
||||
|
||||
success = _replication().dismiss_failure(bucket_name, object_key)
|
||||
if success:
|
||||
return jsonify({"status": "dismissed", "object_key": object_key})
|
||||
return jsonify({"error": "Failure not found"}), 404
|
||||
|
||||
|
||||
@ui_bp.delete("/buckets/<bucket_name>/replication/failures")
|
||||
def clear_replication_failures(bucket_name: str):
|
||||
principal = _current_principal()
|
||||
try:
|
||||
_authorize_ui(principal, bucket_name, "replication")
|
||||
except IamError:
|
||||
return jsonify({"error": "Access denied"}), 403
|
||||
|
||||
_replication().clear_failures(bucket_name)
|
||||
return jsonify({"status": "cleared"})
|
||||
|
||||
|
||||
@ui_bp.get("/connections/<connection_id>/health")
|
||||
def check_connection_health(connection_id: str):
|
||||
"""Check if a connection endpoint is reachable."""
|
||||
@@ -1740,6 +1820,37 @@ def bucket_lifecycle(bucket_name: str):
|
||||
return jsonify({"status": "ok", "message": "Lifecycle configuration saved", "rules": validated_rules})
|
||||
|
||||
|
||||
@ui_bp.get("/buckets/<bucket_name>/lifecycle/history")
|
||||
def get_lifecycle_history(bucket_name: str):
|
||||
principal = _current_principal()
|
||||
try:
|
||||
_authorize_ui(principal, bucket_name, "policy")
|
||||
except IamError:
|
||||
return jsonify({"error": "Access denied"}), 403
|
||||
|
||||
limit = request.args.get("limit", 50, type=int)
|
||||
offset = request.args.get("offset", 0, type=int)
|
||||
|
||||
lifecycle_manager = current_app.extensions.get("lifecycle")
|
||||
if not lifecycle_manager:
|
||||
return jsonify({
|
||||
"executions": [],
|
||||
"total": 0,
|
||||
"limit": limit,
|
||||
"offset": offset,
|
||||
"enabled": False,
|
||||
})
|
||||
|
||||
records = lifecycle_manager.get_execution_history(bucket_name, limit, offset)
|
||||
return jsonify({
|
||||
"executions": [r.to_dict() for r in records],
|
||||
"total": len(lifecycle_manager.get_execution_history(bucket_name, 1000, 0)),
|
||||
"limit": limit,
|
||||
"offset": offset,
|
||||
"enabled": True,
|
||||
})
|
||||
|
||||
|
||||
@ui_bp.route("/buckets/<bucket_name>/cors", methods=["GET", "POST", "DELETE"])
|
||||
def bucket_cors(bucket_name: str):
|
||||
principal = _current_principal()
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
[pytest]
|
||||
testpaths = tests
|
||||
norecursedirs = data .git __pycache__ .venv
|
||||
markers =
|
||||
integration: marks tests as integration tests (may require external services)
|
||||
|
||||
1051
static/css/main.css
1051
static/css/main.css
File diff suppressed because it is too large
Load Diff
192
static/js/bucket-detail-operations.js
Normal file
192
static/js/bucket-detail-operations.js
Normal file
@@ -0,0 +1,192 @@
|
||||
window.BucketDetailOperations = (function() {
|
||||
'use strict';
|
||||
|
||||
let showMessage = function() {};
|
||||
let escapeHtml = function(s) { return s; };
|
||||
|
||||
function init(config) {
|
||||
showMessage = config.showMessage || showMessage;
|
||||
escapeHtml = config.escapeHtml || escapeHtml;
|
||||
}
|
||||
|
||||
async function loadLifecycleRules(card, endpoint) {
|
||||
if (!card || !endpoint) return;
|
||||
const body = card.querySelector('[data-lifecycle-body]');
|
||||
if (!body) return;
|
||||
|
||||
try {
|
||||
const response = await fetch(endpoint);
|
||||
const data = await response.json();
|
||||
|
||||
if (!response.ok) {
|
||||
body.innerHTML = `<tr><td colspan="5" class="text-center text-danger py-3">${escapeHtml(data.error || 'Failed to load')}</td></tr>`;
|
||||
return;
|
||||
}
|
||||
|
||||
const rules = data.rules || [];
|
||||
if (rules.length === 0) {
|
||||
body.innerHTML = '<tr><td colspan="5" class="text-center text-muted py-3">No lifecycle rules configured</td></tr>';
|
||||
return;
|
||||
}
|
||||
|
||||
body.innerHTML = rules.map(rule => {
|
||||
const actions = [];
|
||||
if (rule.expiration_days) actions.push(`Delete after ${rule.expiration_days} days`);
|
||||
if (rule.noncurrent_days) actions.push(`Delete old versions after ${rule.noncurrent_days} days`);
|
||||
if (rule.abort_mpu_days) actions.push(`Abort incomplete MPU after ${rule.abort_mpu_days} days`);
|
||||
|
||||
return `
|
||||
<tr>
|
||||
<td class="fw-medium">${escapeHtml(rule.id)}</td>
|
||||
<td><code>${escapeHtml(rule.prefix || '(all)')}</code></td>
|
||||
<td>${actions.map(a => `<div class="small">${escapeHtml(a)}</div>`).join('')}</td>
|
||||
<td>
|
||||
<span class="badge ${rule.status === 'Enabled' ? 'text-bg-success' : 'text-bg-secondary'}">${escapeHtml(rule.status)}</span>
|
||||
</td>
|
||||
<td class="text-end">
|
||||
<button class="btn btn-sm btn-outline-danger" onclick="BucketDetailOperations.deleteLifecycleRule('${escapeHtml(rule.id)}')">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="12" height="12" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M5.5 5.5A.5.5 0 0 1 6 6v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm3 .5a.5.5 0 0 0-1 0v6a.5.5 0 0 0 1 0V6z"/>
|
||||
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/>
|
||||
</svg>
|
||||
</button>
|
||||
</td>
|
||||
</tr>
|
||||
`;
|
||||
}).join('');
|
||||
} catch (err) {
|
||||
body.innerHTML = `<tr><td colspan="5" class="text-center text-danger py-3">${escapeHtml(err.message)}</td></tr>`;
|
||||
}
|
||||
}
|
||||
|
||||
async function loadCorsRules(card, endpoint) {
|
||||
if (!card || !endpoint) return;
|
||||
const body = document.getElementById('cors-rules-body');
|
||||
if (!body) return;
|
||||
|
||||
try {
|
||||
const response = await fetch(endpoint);
|
||||
const data = await response.json();
|
||||
|
||||
if (!response.ok) {
|
||||
body.innerHTML = `<tr><td colspan="5" class="text-center text-danger py-3">${escapeHtml(data.error || 'Failed to load')}</td></tr>`;
|
||||
return;
|
||||
}
|
||||
|
||||
const rules = data.rules || [];
|
||||
if (rules.length === 0) {
|
||||
body.innerHTML = '<tr><td colspan="5" class="text-center text-muted py-3">No CORS rules configured</td></tr>';
|
||||
return;
|
||||
}
|
||||
|
||||
body.innerHTML = rules.map((rule, idx) => `
|
||||
<tr>
|
||||
<td>${(rule.allowed_origins || []).map(o => `<code class="d-block">${escapeHtml(o)}</code>`).join('')}</td>
|
||||
<td>${(rule.allowed_methods || []).map(m => `<span class="badge text-bg-secondary me-1">${escapeHtml(m)}</span>`).join('')}</td>
|
||||
<td class="small text-muted">${(rule.allowed_headers || []).slice(0, 3).join(', ')}${(rule.allowed_headers || []).length > 3 ? '...' : ''}</td>
|
||||
<td class="text-muted">${rule.max_age_seconds || 0}s</td>
|
||||
<td class="text-end">
|
||||
<button class="btn btn-sm btn-outline-danger" onclick="BucketDetailOperations.deleteCorsRule(${idx})">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="12" height="12" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M5.5 5.5A.5.5 0 0 1 6 6v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm3 .5a.5.5 0 0 0-1 0v6a.5.5 0 0 0 1 0V6z"/>
|
||||
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/>
|
||||
</svg>
|
||||
</button>
|
||||
</td>
|
||||
</tr>
|
||||
`).join('');
|
||||
} catch (err) {
|
||||
body.innerHTML = `<tr><td colspan="5" class="text-center text-danger py-3">${escapeHtml(err.message)}</td></tr>`;
|
||||
}
|
||||
}
|
||||
|
||||
async function loadAcl(card, endpoint) {
|
||||
if (!card || !endpoint) return;
|
||||
const body = card.querySelector('[data-acl-body]');
|
||||
if (!body) return;
|
||||
|
||||
try {
|
||||
const response = await fetch(endpoint);
|
||||
const data = await response.json();
|
||||
|
||||
if (!response.ok) {
|
||||
body.innerHTML = `<tr><td colspan="3" class="text-center text-danger py-3">${escapeHtml(data.error || 'Failed to load')}</td></tr>`;
|
||||
return;
|
||||
}
|
||||
|
||||
const grants = data.grants || [];
|
||||
if (grants.length === 0) {
|
||||
body.innerHTML = '<tr><td colspan="3" class="text-center text-muted py-3">No ACL grants configured</td></tr>';
|
||||
return;
|
||||
}
|
||||
|
||||
body.innerHTML = grants.map(grant => {
|
||||
const grantee = grant.grantee_type === 'CanonicalUser'
|
||||
? grant.display_name || grant.grantee_id
|
||||
: grant.grantee_uri || grant.grantee_type;
|
||||
return `
|
||||
<tr>
|
||||
<td class="fw-medium">${escapeHtml(grantee)}</td>
|
||||
<td><span class="badge text-bg-info">${escapeHtml(grant.permission)}</span></td>
|
||||
<td class="text-muted small">${escapeHtml(grant.grantee_type)}</td>
|
||||
</tr>
|
||||
`;
|
||||
}).join('');
|
||||
} catch (err) {
|
||||
body.innerHTML = `<tr><td colspan="3" class="text-center text-danger py-3">${escapeHtml(err.message)}</td></tr>`;
|
||||
}
|
||||
}
|
||||
|
||||
async function deleteLifecycleRule(ruleId) {
|
||||
if (!confirm(`Delete lifecycle rule "${ruleId}"?`)) return;
|
||||
const card = document.getElementById('lifecycle-rules-card');
|
||||
if (!card) return;
|
||||
const endpoint = card.dataset.lifecycleUrl;
|
||||
const csrfToken = window.getCsrfToken ? window.getCsrfToken() : '';
|
||||
|
||||
try {
|
||||
const resp = await fetch(endpoint, {
|
||||
method: 'DELETE',
|
||||
headers: { 'Content-Type': 'application/json', 'X-CSRFToken': csrfToken },
|
||||
body: JSON.stringify({ rule_id: ruleId })
|
||||
});
|
||||
const data = await resp.json();
|
||||
if (!resp.ok) throw new Error(data.error || 'Failed to delete');
|
||||
showMessage({ title: 'Rule deleted', body: `Lifecycle rule "${ruleId}" has been deleted.`, variant: 'success' });
|
||||
loadLifecycleRules(card, endpoint);
|
||||
} catch (err) {
|
||||
showMessage({ title: 'Delete failed', body: err.message, variant: 'danger' });
|
||||
}
|
||||
}
|
||||
|
||||
async function deleteCorsRule(index) {
|
||||
if (!confirm('Delete this CORS rule?')) return;
|
||||
const card = document.getElementById('cors-rules-card');
|
||||
if (!card) return;
|
||||
const endpoint = card.dataset.corsUrl;
|
||||
const csrfToken = window.getCsrfToken ? window.getCsrfToken() : '';
|
||||
|
||||
try {
|
||||
const resp = await fetch(endpoint, {
|
||||
method: 'DELETE',
|
||||
headers: { 'Content-Type': 'application/json', 'X-CSRFToken': csrfToken },
|
||||
body: JSON.stringify({ rule_index: index })
|
||||
});
|
||||
const data = await resp.json();
|
||||
if (!resp.ok) throw new Error(data.error || 'Failed to delete');
|
||||
showMessage({ title: 'Rule deleted', body: 'CORS rule has been deleted.', variant: 'success' });
|
||||
loadCorsRules(card, endpoint);
|
||||
} catch (err) {
|
||||
showMessage({ title: 'Delete failed', body: err.message, variant: 'danger' });
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
init: init,
|
||||
loadLifecycleRules: loadLifecycleRules,
|
||||
loadCorsRules: loadCorsRules,
|
||||
loadAcl: loadAcl,
|
||||
deleteLifecycleRule: deleteLifecycleRule,
|
||||
deleteCorsRule: deleteCorsRule
|
||||
};
|
||||
})();
|
||||
548
static/js/bucket-detail-upload.js
Normal file
548
static/js/bucket-detail-upload.js
Normal file
@@ -0,0 +1,548 @@
|
||||
window.BucketDetailUpload = (function() {
|
||||
'use strict';
|
||||
|
||||
const MULTIPART_THRESHOLD = 8 * 1024 * 1024;
|
||||
const CHUNK_SIZE = 8 * 1024 * 1024;
|
||||
|
||||
let state = {
|
||||
isUploading: false,
|
||||
uploadProgress: { current: 0, total: 0, currentFile: '' }
|
||||
};
|
||||
|
||||
let elements = {};
|
||||
let callbacks = {};
|
||||
|
||||
function init(config) {
|
||||
elements = {
|
||||
uploadForm: config.uploadForm,
|
||||
uploadFileInput: config.uploadFileInput,
|
||||
uploadModal: config.uploadModal,
|
||||
uploadModalEl: config.uploadModalEl,
|
||||
uploadSubmitBtn: config.uploadSubmitBtn,
|
||||
uploadCancelBtn: config.uploadCancelBtn,
|
||||
uploadBtnText: config.uploadBtnText,
|
||||
uploadDropZone: config.uploadDropZone,
|
||||
uploadDropZoneLabel: config.uploadDropZoneLabel,
|
||||
uploadProgressStack: config.uploadProgressStack,
|
||||
uploadKeyPrefix: config.uploadKeyPrefix,
|
||||
singleFileOptions: config.singleFileOptions,
|
||||
bulkUploadProgress: config.bulkUploadProgress,
|
||||
bulkUploadStatus: config.bulkUploadStatus,
|
||||
bulkUploadCounter: config.bulkUploadCounter,
|
||||
bulkUploadProgressBar: config.bulkUploadProgressBar,
|
||||
bulkUploadCurrentFile: config.bulkUploadCurrentFile,
|
||||
bulkUploadResults: config.bulkUploadResults,
|
||||
bulkUploadSuccessAlert: config.bulkUploadSuccessAlert,
|
||||
bulkUploadErrorAlert: config.bulkUploadErrorAlert,
|
||||
bulkUploadSuccessCount: config.bulkUploadSuccessCount,
|
||||
bulkUploadErrorCount: config.bulkUploadErrorCount,
|
||||
bulkUploadErrorList: config.bulkUploadErrorList,
|
||||
floatingProgress: config.floatingProgress,
|
||||
floatingProgressBar: config.floatingProgressBar,
|
||||
floatingProgressStatus: config.floatingProgressStatus,
|
||||
floatingProgressTitle: config.floatingProgressTitle,
|
||||
floatingProgressExpand: config.floatingProgressExpand
|
||||
};
|
||||
|
||||
callbacks = {
|
||||
showMessage: config.showMessage || function() {},
|
||||
formatBytes: config.formatBytes || function(b) { return b + ' bytes'; },
|
||||
escapeHtml: config.escapeHtml || function(s) { return s; },
|
||||
onUploadComplete: config.onUploadComplete || function() {},
|
||||
hasFolders: config.hasFolders || function() { return false; },
|
||||
getCurrentPrefix: config.getCurrentPrefix || function() { return ''; }
|
||||
};
|
||||
|
||||
setupEventListeners();
|
||||
setupBeforeUnload();
|
||||
}
|
||||
|
||||
function isUploading() {
|
||||
return state.isUploading;
|
||||
}
|
||||
|
||||
function setupBeforeUnload() {
|
||||
window.addEventListener('beforeunload', (e) => {
|
||||
if (state.isUploading) {
|
||||
e.preventDefault();
|
||||
e.returnValue = 'Upload in progress. Are you sure you want to leave?';
|
||||
return e.returnValue;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function showFloatingProgress() {
|
||||
if (elements.floatingProgress) {
|
||||
elements.floatingProgress.classList.remove('d-none');
|
||||
}
|
||||
}
|
||||
|
||||
function hideFloatingProgress() {
|
||||
if (elements.floatingProgress) {
|
||||
elements.floatingProgress.classList.add('d-none');
|
||||
}
|
||||
}
|
||||
|
||||
function updateFloatingProgress(current, total, currentFile) {
|
||||
state.uploadProgress = { current, total, currentFile: currentFile || '' };
|
||||
if (elements.floatingProgressBar && total > 0) {
|
||||
const percent = Math.round((current / total) * 100);
|
||||
elements.floatingProgressBar.style.width = `${percent}%`;
|
||||
}
|
||||
if (elements.floatingProgressStatus) {
|
||||
if (currentFile) {
|
||||
elements.floatingProgressStatus.textContent = `${current}/${total} files - ${currentFile}`;
|
||||
} else {
|
||||
elements.floatingProgressStatus.textContent = `${current}/${total} files completed`;
|
||||
}
|
||||
}
|
||||
if (elements.floatingProgressTitle) {
|
||||
elements.floatingProgressTitle.textContent = `Uploading ${total} file${total !== 1 ? 's' : ''}...`;
|
||||
}
|
||||
}
|
||||
|
||||
function refreshUploadDropLabel() {
|
||||
if (!elements.uploadDropZoneLabel || !elements.uploadFileInput) return;
|
||||
const files = elements.uploadFileInput.files;
|
||||
if (!files || files.length === 0) {
|
||||
elements.uploadDropZoneLabel.textContent = 'No file selected';
|
||||
if (elements.singleFileOptions) elements.singleFileOptions.classList.remove('d-none');
|
||||
return;
|
||||
}
|
||||
elements.uploadDropZoneLabel.textContent = files.length === 1 ? files[0].name : `${files.length} files selected`;
|
||||
if (elements.singleFileOptions) {
|
||||
elements.singleFileOptions.classList.toggle('d-none', files.length > 1);
|
||||
}
|
||||
}
|
||||
|
||||
function updateUploadBtnText() {
|
||||
if (!elements.uploadBtnText || !elements.uploadFileInput) return;
|
||||
const files = elements.uploadFileInput.files;
|
||||
if (!files || files.length <= 1) {
|
||||
elements.uploadBtnText.textContent = 'Upload';
|
||||
} else {
|
||||
elements.uploadBtnText.textContent = `Upload ${files.length} files`;
|
||||
}
|
||||
}
|
||||
|
||||
function resetUploadUI() {
|
||||
if (elements.bulkUploadProgress) elements.bulkUploadProgress.classList.add('d-none');
|
||||
if (elements.bulkUploadResults) elements.bulkUploadResults.classList.add('d-none');
|
||||
if (elements.bulkUploadSuccessAlert) elements.bulkUploadSuccessAlert.classList.remove('d-none');
|
||||
if (elements.bulkUploadErrorAlert) elements.bulkUploadErrorAlert.classList.add('d-none');
|
||||
if (elements.bulkUploadErrorList) elements.bulkUploadErrorList.innerHTML = '';
|
||||
if (elements.uploadSubmitBtn) elements.uploadSubmitBtn.disabled = false;
|
||||
if (elements.uploadFileInput) elements.uploadFileInput.disabled = false;
|
||||
if (elements.uploadProgressStack) elements.uploadProgressStack.innerHTML = '';
|
||||
if (elements.uploadDropZone) {
|
||||
elements.uploadDropZone.classList.remove('upload-locked');
|
||||
elements.uploadDropZone.style.pointerEvents = '';
|
||||
}
|
||||
state.isUploading = false;
|
||||
hideFloatingProgress();
|
||||
}
|
||||
|
||||
function setUploadLockState(locked) {
|
||||
if (elements.uploadDropZone) {
|
||||
elements.uploadDropZone.classList.toggle('upload-locked', locked);
|
||||
elements.uploadDropZone.style.pointerEvents = locked ? 'none' : '';
|
||||
}
|
||||
if (elements.uploadFileInput) {
|
||||
elements.uploadFileInput.disabled = locked;
|
||||
}
|
||||
}
|
||||
|
||||
function createProgressItem(file) {
|
||||
const item = document.createElement('div');
|
||||
item.className = 'upload-progress-item';
|
||||
item.dataset.state = 'uploading';
|
||||
item.innerHTML = `
|
||||
<div class="d-flex justify-content-between align-items-start">
|
||||
<div class="min-width-0 flex-grow-1">
|
||||
<div class="file-name">${callbacks.escapeHtml(file.name)}</div>
|
||||
<div class="file-size">${callbacks.formatBytes(file.size)}</div>
|
||||
</div>
|
||||
<div class="upload-status text-end ms-2">Preparing...</div>
|
||||
</div>
|
||||
<div class="progress-container">
|
||||
<div class="progress">
|
||||
<div class="progress-bar bg-primary" role="progressbar" style="width: 0%"></div>
|
||||
</div>
|
||||
<div class="progress-text">
|
||||
<span class="progress-loaded">0 B</span>
|
||||
<span class="progress-percent">0%</span>
|
||||
</div>
|
||||
</div>
|
||||
`;
|
||||
return item;
|
||||
}
|
||||
|
||||
function updateProgressItem(item, { loaded, total, status, progressState, error }) {
|
||||
if (progressState) item.dataset.state = progressState;
|
||||
const statusEl = item.querySelector('.upload-status');
|
||||
const progressBar = item.querySelector('.progress-bar');
|
||||
const progressLoaded = item.querySelector('.progress-loaded');
|
||||
const progressPercent = item.querySelector('.progress-percent');
|
||||
|
||||
if (status) {
|
||||
statusEl.textContent = status;
|
||||
statusEl.className = 'upload-status text-end ms-2';
|
||||
if (progressState === 'success') statusEl.classList.add('success');
|
||||
if (progressState === 'error') statusEl.classList.add('error');
|
||||
}
|
||||
if (typeof loaded === 'number' && typeof total === 'number' && total > 0) {
|
||||
const percent = Math.round((loaded / total) * 100);
|
||||
progressBar.style.width = `${percent}%`;
|
||||
progressLoaded.textContent = `${callbacks.formatBytes(loaded)} / ${callbacks.formatBytes(total)}`;
|
||||
progressPercent.textContent = `${percent}%`;
|
||||
}
|
||||
if (error) {
|
||||
const progressContainer = item.querySelector('.progress-container');
|
||||
if (progressContainer) {
|
||||
progressContainer.innerHTML = `<div class="text-danger small mt-1">${callbacks.escapeHtml(error)}</div>`;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function uploadMultipart(file, objectKey, metadata, progressItem, urls) {
|
||||
const csrfToken = document.querySelector('input[name="csrf_token"]')?.value;
|
||||
|
||||
updateProgressItem(progressItem, { status: 'Initiating...', loaded: 0, total: file.size });
|
||||
const initResp = await fetch(urls.initUrl, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', 'X-CSRFToken': csrfToken || '' },
|
||||
body: JSON.stringify({ object_key: objectKey, metadata })
|
||||
});
|
||||
if (!initResp.ok) {
|
||||
const err = await initResp.json().catch(() => ({}));
|
||||
throw new Error(err.error || 'Failed to initiate upload');
|
||||
}
|
||||
const { upload_id } = await initResp.json();
|
||||
|
||||
const partUrl = urls.partTemplate.replace('UPLOAD_ID_PLACEHOLDER', upload_id);
|
||||
const completeUrl = urls.completeTemplate.replace('UPLOAD_ID_PLACEHOLDER', upload_id);
|
||||
const abortUrl = urls.abortTemplate.replace('UPLOAD_ID_PLACEHOLDER', upload_id);
|
||||
|
||||
const parts = [];
|
||||
const totalParts = Math.ceil(file.size / CHUNK_SIZE);
|
||||
let uploadedBytes = 0;
|
||||
|
||||
try {
|
||||
for (let partNumber = 1; partNumber <= totalParts; partNumber++) {
|
||||
const start = (partNumber - 1) * CHUNK_SIZE;
|
||||
const end = Math.min(start + CHUNK_SIZE, file.size);
|
||||
const chunk = file.slice(start, end);
|
||||
|
||||
updateProgressItem(progressItem, {
|
||||
status: `Part ${partNumber}/${totalParts}`,
|
||||
loaded: uploadedBytes,
|
||||
total: file.size
|
||||
});
|
||||
|
||||
const partResp = await fetch(`${partUrl}?partNumber=${partNumber}`, {
|
||||
method: 'PUT',
|
||||
headers: { 'X-CSRFToken': csrfToken || '' },
|
||||
body: chunk
|
||||
});
|
||||
|
||||
if (!partResp.ok) {
|
||||
const err = await partResp.json().catch(() => ({}));
|
||||
throw new Error(err.error || `Part ${partNumber} failed`);
|
||||
}
|
||||
|
||||
const partData = await partResp.json();
|
||||
parts.push({ part_number: partNumber, etag: partData.etag });
|
||||
uploadedBytes += chunk.size;
|
||||
|
||||
updateProgressItem(progressItem, {
|
||||
loaded: uploadedBytes,
|
||||
total: file.size
|
||||
});
|
||||
}
|
||||
|
||||
updateProgressItem(progressItem, { status: 'Completing...', loaded: file.size, total: file.size });
|
||||
const completeResp = await fetch(completeUrl, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json', 'X-CSRFToken': csrfToken || '' },
|
||||
body: JSON.stringify({ parts })
|
||||
});
|
||||
|
||||
if (!completeResp.ok) {
|
||||
const err = await completeResp.json().catch(() => ({}));
|
||||
throw new Error(err.error || 'Failed to complete upload');
|
||||
}
|
||||
|
||||
return await completeResp.json();
|
||||
} catch (err) {
|
||||
try {
|
||||
await fetch(abortUrl, { method: 'DELETE', headers: { 'X-CSRFToken': csrfToken || '' } });
|
||||
} catch {}
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async function uploadRegular(file, objectKey, metadata, progressItem, formAction) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const formData = new FormData();
|
||||
formData.append('object', file);
|
||||
formData.append('object_key', objectKey);
|
||||
if (metadata) formData.append('metadata', JSON.stringify(metadata));
|
||||
const csrfToken = document.querySelector('input[name="csrf_token"]')?.value;
|
||||
if (csrfToken) formData.append('csrf_token', csrfToken);
|
||||
|
||||
const xhr = new XMLHttpRequest();
|
||||
xhr.open('POST', formAction, true);
|
||||
xhr.setRequestHeader('X-Requested-With', 'XMLHttpRequest');
|
||||
|
||||
xhr.upload.addEventListener('progress', (e) => {
|
||||
if (e.lengthComputable) {
|
||||
updateProgressItem(progressItem, {
|
||||
status: 'Uploading...',
|
||||
loaded: e.loaded,
|
||||
total: e.total
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
xhr.addEventListener('load', () => {
|
||||
if (xhr.status >= 200 && xhr.status < 300) {
|
||||
try {
|
||||
const data = JSON.parse(xhr.responseText);
|
||||
if (data.status === 'error') {
|
||||
reject(new Error(data.message || 'Upload failed'));
|
||||
} else {
|
||||
resolve(data);
|
||||
}
|
||||
} catch {
|
||||
resolve({});
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
const data = JSON.parse(xhr.responseText);
|
||||
reject(new Error(data.message || `Upload failed (${xhr.status})`));
|
||||
} catch {
|
||||
reject(new Error(`Upload failed (${xhr.status})`));
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
xhr.addEventListener('error', () => reject(new Error('Network error')));
|
||||
xhr.addEventListener('abort', () => reject(new Error('Upload aborted')));
|
||||
|
||||
xhr.send(formData);
|
||||
});
|
||||
}
|
||||
|
||||
async function uploadSingleFile(file, keyPrefix, metadata, progressItem, urls) {
|
||||
const objectKey = keyPrefix ? `${keyPrefix}${file.name}` : file.name;
|
||||
const shouldUseMultipart = file.size >= MULTIPART_THRESHOLD && urls.initUrl;
|
||||
|
||||
if (!progressItem && elements.uploadProgressStack) {
|
||||
progressItem = createProgressItem(file);
|
||||
elements.uploadProgressStack.appendChild(progressItem);
|
||||
}
|
||||
|
||||
try {
|
||||
let result;
|
||||
if (shouldUseMultipart) {
|
||||
updateProgressItem(progressItem, { status: 'Multipart upload...', loaded: 0, total: file.size });
|
||||
result = await uploadMultipart(file, objectKey, metadata, progressItem, urls);
|
||||
} else {
|
||||
updateProgressItem(progressItem, { status: 'Uploading...', loaded: 0, total: file.size });
|
||||
result = await uploadRegular(file, objectKey, metadata, progressItem, urls.formAction);
|
||||
}
|
||||
updateProgressItem(progressItem, { progressState: 'success', status: 'Complete', loaded: file.size, total: file.size });
|
||||
return result;
|
||||
} catch (err) {
|
||||
updateProgressItem(progressItem, { progressState: 'error', status: 'Failed', error: err.message });
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async function performBulkUpload(files, urls) {
|
||||
if (state.isUploading || !files || files.length === 0) return;
|
||||
|
||||
state.isUploading = true;
|
||||
setUploadLockState(true);
|
||||
const keyPrefix = (elements.uploadKeyPrefix?.value || '').trim();
|
||||
const metadataRaw = elements.uploadForm?.querySelector('textarea[name="metadata"]')?.value?.trim();
|
||||
let metadata = null;
|
||||
if (metadataRaw) {
|
||||
try {
|
||||
metadata = JSON.parse(metadataRaw);
|
||||
} catch {
|
||||
callbacks.showMessage({ title: 'Invalid metadata', body: 'Metadata must be valid JSON.', variant: 'danger' });
|
||||
resetUploadUI();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (elements.bulkUploadProgress) elements.bulkUploadProgress.classList.remove('d-none');
|
||||
if (elements.bulkUploadResults) elements.bulkUploadResults.classList.add('d-none');
|
||||
if (elements.uploadSubmitBtn) elements.uploadSubmitBtn.disabled = true;
|
||||
if (elements.uploadFileInput) elements.uploadFileInput.disabled = true;
|
||||
|
||||
const successFiles = [];
|
||||
const errorFiles = [];
|
||||
const total = files.length;
|
||||
|
||||
updateFloatingProgress(0, total, files[0]?.name || '');
|
||||
|
||||
for (let i = 0; i < total; i++) {
|
||||
const file = files[i];
|
||||
const current = i + 1;
|
||||
|
||||
if (elements.bulkUploadCounter) elements.bulkUploadCounter.textContent = `${current}/${total}`;
|
||||
if (elements.bulkUploadCurrentFile) elements.bulkUploadCurrentFile.textContent = `Uploading: ${file.name}`;
|
||||
if (elements.bulkUploadProgressBar) {
|
||||
const percent = Math.round((current / total) * 100);
|
||||
elements.bulkUploadProgressBar.style.width = `${percent}%`;
|
||||
}
|
||||
updateFloatingProgress(i, total, file.name);
|
||||
|
||||
try {
|
||||
await uploadSingleFile(file, keyPrefix, metadata, null, urls);
|
||||
successFiles.push(file.name);
|
||||
} catch (error) {
|
||||
errorFiles.push({ name: file.name, error: error.message || 'Unknown error' });
|
||||
}
|
||||
}
|
||||
updateFloatingProgress(total, total);
|
||||
|
||||
if (elements.bulkUploadProgress) elements.bulkUploadProgress.classList.add('d-none');
|
||||
if (elements.bulkUploadResults) elements.bulkUploadResults.classList.remove('d-none');
|
||||
|
||||
if (elements.bulkUploadSuccessCount) elements.bulkUploadSuccessCount.textContent = successFiles.length;
|
||||
if (successFiles.length === 0 && elements.bulkUploadSuccessAlert) {
|
||||
elements.bulkUploadSuccessAlert.classList.add('d-none');
|
||||
}
|
||||
|
||||
if (errorFiles.length > 0) {
|
||||
if (elements.bulkUploadErrorCount) elements.bulkUploadErrorCount.textContent = errorFiles.length;
|
||||
if (elements.bulkUploadErrorAlert) elements.bulkUploadErrorAlert.classList.remove('d-none');
|
||||
if (elements.bulkUploadErrorList) {
|
||||
elements.bulkUploadErrorList.innerHTML = errorFiles
|
||||
.map(f => `<li><strong>${callbacks.escapeHtml(f.name)}</strong>: ${callbacks.escapeHtml(f.error)}</li>`)
|
||||
.join('');
|
||||
}
|
||||
}
|
||||
|
||||
state.isUploading = false;
|
||||
setUploadLockState(false);
|
||||
|
||||
if (successFiles.length > 0) {
|
||||
if (elements.uploadBtnText) elements.uploadBtnText.textContent = 'Refreshing...';
|
||||
callbacks.onUploadComplete(successFiles, errorFiles);
|
||||
} else {
|
||||
if (elements.uploadSubmitBtn) elements.uploadSubmitBtn.disabled = false;
|
||||
if (elements.uploadFileInput) elements.uploadFileInput.disabled = false;
|
||||
}
|
||||
}
|
||||
|
||||
function setupEventListeners() {
|
||||
if (elements.uploadFileInput) {
|
||||
elements.uploadFileInput.addEventListener('change', () => {
|
||||
if (state.isUploading) return;
|
||||
refreshUploadDropLabel();
|
||||
updateUploadBtnText();
|
||||
resetUploadUI();
|
||||
});
|
||||
}
|
||||
|
||||
if (elements.uploadDropZone) {
|
||||
elements.uploadDropZone.addEventListener('click', () => {
|
||||
if (state.isUploading) return;
|
||||
elements.uploadFileInput?.click();
|
||||
});
|
||||
}
|
||||
|
||||
if (elements.floatingProgressExpand) {
|
||||
elements.floatingProgressExpand.addEventListener('click', () => {
|
||||
if (elements.uploadModal) {
|
||||
elements.uploadModal.show();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (elements.uploadModalEl) {
|
||||
elements.uploadModalEl.addEventListener('hide.bs.modal', () => {
|
||||
if (state.isUploading) {
|
||||
showFloatingProgress();
|
||||
}
|
||||
});
|
||||
|
||||
elements.uploadModalEl.addEventListener('hidden.bs.modal', () => {
|
||||
if (!state.isUploading) {
|
||||
resetUploadUI();
|
||||
if (elements.uploadFileInput) elements.uploadFileInput.value = '';
|
||||
refreshUploadDropLabel();
|
||||
updateUploadBtnText();
|
||||
}
|
||||
});
|
||||
|
||||
elements.uploadModalEl.addEventListener('show.bs.modal', () => {
|
||||
if (state.isUploading) {
|
||||
hideFloatingProgress();
|
||||
}
|
||||
if (callbacks.hasFolders() && callbacks.getCurrentPrefix()) {
|
||||
if (elements.uploadKeyPrefix) {
|
||||
elements.uploadKeyPrefix.value = callbacks.getCurrentPrefix();
|
||||
}
|
||||
} else if (elements.uploadKeyPrefix) {
|
||||
elements.uploadKeyPrefix.value = '';
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function wireDropTarget(target, options) {
|
||||
const { highlightClass = '', autoOpenModal = false } = options || {};
|
||||
if (!target) return;
|
||||
|
||||
const preventDefaults = (event) => {
|
||||
event.preventDefault();
|
||||
event.stopPropagation();
|
||||
};
|
||||
|
||||
['dragenter', 'dragover'].forEach((eventName) => {
|
||||
target.addEventListener(eventName, (event) => {
|
||||
preventDefaults(event);
|
||||
if (state.isUploading) return;
|
||||
if (highlightClass) {
|
||||
target.classList.add(highlightClass);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
['dragleave', 'drop'].forEach((eventName) => {
|
||||
target.addEventListener(eventName, (event) => {
|
||||
preventDefaults(event);
|
||||
if (highlightClass) {
|
||||
target.classList.remove(highlightClass);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
target.addEventListener('drop', (event) => {
|
||||
if (state.isUploading) return;
|
||||
if (!event.dataTransfer?.files?.length || !elements.uploadFileInput) {
|
||||
return;
|
||||
}
|
||||
elements.uploadFileInput.files = event.dataTransfer.files;
|
||||
elements.uploadFileInput.dispatchEvent(new Event('change', { bubbles: true }));
|
||||
if (autoOpenModal && elements.uploadModal) {
|
||||
elements.uploadModal.show();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return {
|
||||
init: init,
|
||||
isUploading: isUploading,
|
||||
performBulkUpload: performBulkUpload,
|
||||
wireDropTarget: wireDropTarget,
|
||||
resetUploadUI: resetUploadUI,
|
||||
refreshUploadDropLabel: refreshUploadDropLabel,
|
||||
updateUploadBtnText: updateUploadBtnText
|
||||
};
|
||||
})();
|
||||
120
static/js/bucket-detail-utils.js
Normal file
120
static/js/bucket-detail-utils.js
Normal file
@@ -0,0 +1,120 @@
|
||||
window.BucketDetailUtils = (function() {
|
||||
'use strict';
|
||||
|
||||
function setupJsonAutoIndent(textarea) {
|
||||
if (!textarea) return;
|
||||
|
||||
textarea.addEventListener('keydown', function(e) {
|
||||
if (e.key === 'Enter') {
|
||||
e.preventDefault();
|
||||
|
||||
const start = this.selectionStart;
|
||||
const end = this.selectionEnd;
|
||||
const value = this.value;
|
||||
|
||||
const lineStart = value.lastIndexOf('\n', start - 1) + 1;
|
||||
const currentLine = value.substring(lineStart, start);
|
||||
|
||||
const indentMatch = currentLine.match(/^(\s*)/);
|
||||
let indent = indentMatch ? indentMatch[1] : '';
|
||||
|
||||
const trimmedLine = currentLine.trim();
|
||||
const lastChar = trimmedLine.slice(-1);
|
||||
|
||||
let newIndent = indent;
|
||||
let insertAfter = '';
|
||||
|
||||
if (lastChar === '{' || lastChar === '[') {
|
||||
newIndent = indent + ' ';
|
||||
|
||||
const charAfterCursor = value.substring(start, start + 1).trim();
|
||||
if ((lastChar === '{' && charAfterCursor === '}') ||
|
||||
(lastChar === '[' && charAfterCursor === ']')) {
|
||||
insertAfter = '\n' + indent;
|
||||
}
|
||||
} else if (lastChar === ',' || lastChar === ':') {
|
||||
newIndent = indent;
|
||||
}
|
||||
|
||||
const insertion = '\n' + newIndent + insertAfter;
|
||||
const newValue = value.substring(0, start) + insertion + value.substring(end);
|
||||
|
||||
this.value = newValue;
|
||||
|
||||
const newCursorPos = start + 1 + newIndent.length;
|
||||
this.selectionStart = this.selectionEnd = newCursorPos;
|
||||
|
||||
this.dispatchEvent(new Event('input', { bubbles: true }));
|
||||
}
|
||||
|
||||
if (e.key === 'Tab') {
|
||||
e.preventDefault();
|
||||
const start = this.selectionStart;
|
||||
const end = this.selectionEnd;
|
||||
|
||||
if (e.shiftKey) {
|
||||
const lineStart = this.value.lastIndexOf('\n', start - 1) + 1;
|
||||
const lineContent = this.value.substring(lineStart, start);
|
||||
if (lineContent.startsWith(' ')) {
|
||||
this.value = this.value.substring(0, lineStart) +
|
||||
this.value.substring(lineStart + 2);
|
||||
this.selectionStart = this.selectionEnd = Math.max(lineStart, start - 2);
|
||||
}
|
||||
} else {
|
||||
this.value = this.value.substring(0, start) + ' ' + this.value.substring(end);
|
||||
this.selectionStart = this.selectionEnd = start + 2;
|
||||
}
|
||||
|
||||
this.dispatchEvent(new Event('input', { bubbles: true }));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function formatBytes(bytes) {
|
||||
if (!Number.isFinite(bytes)) return `${bytes} bytes`;
|
||||
const units = ['bytes', 'KB', 'MB', 'GB', 'TB'];
|
||||
let i = 0;
|
||||
let size = bytes;
|
||||
while (size >= 1024 && i < units.length - 1) {
|
||||
size /= 1024;
|
||||
i++;
|
||||
}
|
||||
return `${size.toFixed(i === 0 ? 0 : 1)} ${units[i]}`;
|
||||
}
|
||||
|
||||
function escapeHtml(value) {
|
||||
if (value === null || value === undefined) return '';
|
||||
return String(value)
|
||||
.replace(/&/g, '&')
|
||||
.replace(/</g, '<')
|
||||
.replace(/>/g, '>')
|
||||
.replace(/"/g, '"')
|
||||
.replace(/'/g, ''');
|
||||
}
|
||||
|
||||
function fallbackCopy(text) {
|
||||
const textArea = document.createElement('textarea');
|
||||
textArea.value = text;
|
||||
textArea.style.position = 'fixed';
|
||||
textArea.style.left = '-9999px';
|
||||
textArea.style.top = '-9999px';
|
||||
document.body.appendChild(textArea);
|
||||
textArea.focus();
|
||||
textArea.select();
|
||||
let success = false;
|
||||
try {
|
||||
success = document.execCommand('copy');
|
||||
} catch {
|
||||
success = false;
|
||||
}
|
||||
document.body.removeChild(textArea);
|
||||
return success;
|
||||
}
|
||||
|
||||
return {
|
||||
setupJsonAutoIndent: setupJsonAutoIndent,
|
||||
formatBytes: formatBytes,
|
||||
escapeHtml: escapeHtml,
|
||||
fallbackCopy: fallbackCopy
|
||||
};
|
||||
})();
|
||||
@@ -24,105 +24,218 @@
|
||||
document.documentElement.dataset.bsTheme = 'light';
|
||||
document.documentElement.dataset.theme = 'light';
|
||||
}
|
||||
try {
|
||||
if (localStorage.getItem('myfsio-sidebar-collapsed') === 'true') {
|
||||
document.documentElement.classList.add('sidebar-will-collapse');
|
||||
}
|
||||
} catch (err) {}
|
||||
})();
|
||||
</script>
|
||||
<link rel="stylesheet" href="{{ url_for('static', filename='css/main.css') }}" />
|
||||
</head>
|
||||
<body>
|
||||
<nav class="navbar navbar-expand-lg myfsio-nav shadow-sm">
|
||||
<div class="container-fluid">
|
||||
<a class="navbar-brand fw-semibold" href="{{ url_for('ui.buckets_overview') }}">
|
||||
<img
|
||||
src="{{ url_for('static', filename='images/MyFSIO.png') }}"
|
||||
alt="MyFSIO logo"
|
||||
class="myfsio-logo"
|
||||
width="32"
|
||||
height="32"
|
||||
decoding="async"
|
||||
/>
|
||||
<span class="myfsio-title">MyFSIO</span>
|
||||
<header class="mobile-header d-lg-none">
|
||||
<button class="sidebar-toggle-btn" type="button" data-bs-toggle="offcanvas" data-bs-target="#mobileSidebar" aria-controls="mobileSidebar" aria-label="Toggle navigation">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M2.5 12a.5.5 0 0 1 .5-.5h10a.5.5 0 0 1 0 1H3a.5.5 0 0 1-.5-.5zm0-4a.5.5 0 0 1 .5-.5h10a.5.5 0 0 1 0 1H3a.5.5 0 0 1-.5-.5zm0-4a.5.5 0 0 1 .5-.5h10a.5.5 0 0 1 0 1H3a.5.5 0 0 1-.5-.5z"/>
|
||||
</svg>
|
||||
</button>
|
||||
<a class="mobile-brand" href="{{ url_for('ui.buckets_overview') }}">
|
||||
<img src="{{ url_for('static', filename='images/MyFSIO.png') }}" alt="MyFSIO logo" width="28" height="28" />
|
||||
<span>MyFSIO</span>
|
||||
</a>
|
||||
<button class="theme-toggle-mobile" type="button" id="themeToggleMobile" aria-label="Toggle dark mode">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="theme-icon-mobile" id="themeToggleSunMobile" viewBox="0 0 16 16">
|
||||
<path d="M8 11.5a3.5 3.5 0 1 1 0-7 3.5 3.5 0 0 1 0 7zm0 1.5a5 5 0 1 0 0-10 5 5 0 0 0 0 10zM8 0a.5.5 0 0 1 .5.5v1.555a.5.5 0 0 1-1 0V.5A.5.5 0 0 1 8 0zm0 12.945a.5.5 0 0 1 .5.5v2.055a.5.5 0 0 1-1 0v-2.055a.5.5 0 0 1 .5-.5zM2.343 2.343a.5.5 0 0 1 .707 0l1.1 1.1a.5.5 0 1 1-.708.707l-1.1-1.1a.5.5 0 0 1 0-.707zm9.507 9.507a.5.5 0 0 1 .707 0l1.1 1.1a.5.5 0 1 1-.707.708l-1.1-1.1a.5.5 0 0 1 0-.708zM0 8a.5.5 0 0 1 .5-.5h1.555a.5.5 0 0 1 0 1H.5A.5.5 0 0 1 0 8zm12.945 0a.5.5 0 0 1 .5-.5H15.5a.5.5 0 0 1 0 1h-2.055a.5.5 0 0 1-.5-.5zM2.343 13.657a.5.5 0 0 1 0-.707l1.1-1.1a.5.5 0 1 1 .708.707l-1.1 1.1a.5.5 0 0 1-.708 0zm9.507-9.507a.5.5 0 0 1 0-.707l1.1-1.1a.5.5 0 0 1 .707.708l-1.1 1.1a.5.5 0 0 1-.707 0z"/>
|
||||
</svg>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="theme-icon-mobile" id="themeToggleMoonMobile" viewBox="0 0 16 16">
|
||||
<path d="M6 .278a.768.768 0 0 1 .08.858 7.208 7.208 0 0 0-.878 3.46c0 4.021 3.278 7.277 7.318 7.277.527 0 1.04-.055 1.533-.16a.787.787 0 0 1 .81.316.733.733 0 0 1-.031.893A8.349 8.349 0 0 1 8.344 16C3.734 16 0 12.286 0 7.71 0 4.266 2.114 1.312 5.124.06A.752.752 0 0 1 6 .278z"/>
|
||||
<path d="M10.794 3.148a.217.217 0 0 1 .412 0l.387 1.162c.173.518.579.924 1.097 1.097l1.162.387a.217.217 0 0 1 0 .412l-1.162.387a1.734 1.734 0 0 0-1.097 1.097l-.387 1.162a.217.217 0 0 1-.412 0l-.387-1.162A1.734 1.734 0 0 0 9.31 6.593l-1.162-.387a.217.217 0 0 1 0-.412l1.162-.387a1.734 1.734 0 0 0 1.097-1.097l.387-1.162zM13.863.099a.145.145 0 0 1 .274 0l.258.774c.115.346.386.617.732.732l.774.258a.145.145 0 0 1 0 .274l-.774.258a1.156 1.156 0 0 0-.732.732l-.258.774a.145.145 0 0 1-.274 0l-.258-.774a1.156 1.156 0 0 0-.732-.732l-.774-.258a.145.145 0 0 1 0-.274l.774-.258c.346-.115.617-.386.732-.732L13.863.1z"/>
|
||||
</svg>
|
||||
</button>
|
||||
</header>
|
||||
|
||||
<div class="offcanvas offcanvas-start sidebar-offcanvas" tabindex="-1" id="mobileSidebar" aria-labelledby="mobileSidebarLabel">
|
||||
<div class="offcanvas-header sidebar-header">
|
||||
<a class="sidebar-brand" href="{{ url_for('ui.buckets_overview') }}">
|
||||
<img src="{{ url_for('static', filename='images/MyFSIO.png') }}" alt="MyFSIO logo" class="sidebar-logo" width="36" height="36" />
|
||||
<span class="sidebar-title">MyFSIO</span>
|
||||
</a>
|
||||
<button class="navbar-toggler" type="button" data-bs-toggle="collapse" data-bs-target="#navContent" aria-controls="navContent" aria-expanded="false" aria-label="Toggle navigation">
|
||||
<span class="navbar-toggler-icon"></span>
|
||||
</button>
|
||||
<div class="collapse navbar-collapse" id="navContent">
|
||||
<ul class="navbar-nav me-auto mb-2 mb-lg-0">
|
||||
{% if principal %}
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="{{ url_for('ui.buckets_overview') }}">Buckets</a>
|
||||
</li>
|
||||
<button type="button" class="btn-close btn-close-white" data-bs-dismiss="offcanvas" aria-label="Close"></button>
|
||||
</div>
|
||||
<div class="offcanvas-body sidebar-body">
|
||||
<nav class="sidebar-nav">
|
||||
{% if principal %}
|
||||
<div class="nav-section">
|
||||
<span class="nav-section-title">Navigation</span>
|
||||
<a href="{{ url_for('ui.buckets_overview') }}" class="sidebar-link {% if request.endpoint == 'ui.buckets_overview' or request.endpoint == 'ui.bucket_detail' %}active{% endif %}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M2.522 5H2a.5.5 0 0 0-.494.574l1.372 9.149A1.5 1.5 0 0 0 4.36 16h7.278a1.5 1.5 0 0 0 1.483-1.277l1.373-9.149A.5.5 0 0 0 14 5h-.522A5.5 5.5 0 0 0 2.522 5zm1.005 0a4.5 4.5 0 0 1 8.945 0H3.527z"/>
|
||||
</svg>
|
||||
<span>Buckets</span>
|
||||
</a>
|
||||
{% if can_manage_iam %}
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="{{ url_for('ui.iam_dashboard') }}">IAM</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="{{ url_for('ui.connections_dashboard') }}">Connections</a>
|
||||
</li>
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="{{ url_for('ui.metrics_dashboard') }}">Metrics</a>
|
||||
</li>
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% if principal %}
|
||||
<li class="nav-item">
|
||||
<a class="nav-link" href="{{ url_for('ui.docs_page') }}">Docs</a>
|
||||
</li>
|
||||
{% endif %}
|
||||
</ul>
|
||||
<div class="ms-lg-auto d-flex align-items-center gap-3 text-light flex-wrap">
|
||||
<button
|
||||
class="btn btn-outline-light btn-sm theme-toggle"
|
||||
type="button"
|
||||
id="themeToggle"
|
||||
aria-pressed="false"
|
||||
aria-label="Toggle dark mode"
|
||||
>
|
||||
<span id="themeToggleLabel" class="visually-hidden">Toggle dark mode</span>
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
width="16"
|
||||
height="16"
|
||||
fill="currentColor"
|
||||
class="theme-icon"
|
||||
id="themeToggleSun"
|
||||
viewBox="0 0 16 16"
|
||||
aria-hidden="true"
|
||||
>
|
||||
<path
|
||||
d="M8 11.5a3.5 3.5 0 1 1 0-7 3.5 3.5 0 0 1 0 7zm0 1.5a5 5 0 1 0 0-10 5 5 0 0 0 0 10zM8 0a.5.5 0 0 1 .5.5v1.555a.5.5 0 0 1-1 0V.5A.5.5 0 0 1 8 0zm0 12.945a.5.5 0 0 1 .5.5v2.055a.5.5 0 0 1-1 0v-2.055a.5.5 0 0 1 .5-.5zM2.343 2.343a.5.5 0 0 1 .707 0l1.1 1.1a.5.5 0 1 1-.708.707l-1.1-1.1a.5.5 0 0 1 0-.707zm9.507 9.507a.5.5 0 0 1 .707 0l1.1 1.1a.5.5 0 1 1-.707.708l-1.1-1.1a.5.5 0 0 1 0-.708zM0 8a.5.5 0 0 1 .5-.5h1.555a.5.5 0 0 1 0 1H.5A.5.5 0 0 1 0 8zm12.945 0a.5.5 0 0 1 .5-.5H15.5a.5.5 0 0 1 0 1h-2.055a.5.5 0 0 1-.5-.5zM2.343 13.657a.5.5 0 0 1 0-.707l1.1-1.1a.5.5 0 1 1 .708.707l-1.1 1.1a.5.5 0 0 1-.708 0zm9.507-9.507a.5.5 0 0 1 0-.707l1.1-1.1a.5.5 0 0 1 .707.708l-1.1 1.1a.5.5 0 0 1-.707 0z"
|
||||
/>
|
||||
<a href="{{ url_for('ui.iam_dashboard') }}" class="sidebar-link {% if request.endpoint == 'ui.iam_dashboard' %}active{% endif %}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M15 14s1 0 1-1-1-4-5-4-5 3-5 4 1 1 1 1h8zm-7.978-1A.261.261 0 0 1 7 12.996c.001-.264.167-1.03.76-1.72C8.312 10.629 9.282 10 11 10c1.717 0 2.687.63 3.24 1.276.593.69.758 1.457.76 1.72l-.008.002a.274.274 0 0 1-.014.002H7.022zM11 7a2 2 0 1 0 0-4 2 2 0 0 0 0 4zm3-2a3 3 0 1 1-6 0 3 3 0 0 1 6 0zM6.936 9.28a5.88 5.88 0 0 0-1.23-.247A7.35 7.35 0 0 0 5 9c-4 0-5 3-5 4 0 .667.333 1 1 1h4.216A2.238 2.238 0 0 1 5 13c0-1.01.377-2.042 1.09-2.904.243-.294.526-.569.846-.816zM4.92 10A5.493 5.493 0 0 0 4 13H1c0-.26.164-1.03.76-1.724.545-.636 1.492-1.256 3.16-1.275zM1.5 5.5a3 3 0 1 1 6 0 3 3 0 0 1-6 0zm3-2a2 2 0 1 0 0 4 2 2 0 0 0 0-4z"/>
|
||||
</svg>
|
||||
<svg
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
width="16"
|
||||
height="16"
|
||||
fill="currentColor"
|
||||
class="theme-icon d-none"
|
||||
id="themeToggleMoon"
|
||||
viewBox="0 0 16 16"
|
||||
aria-hidden="true"
|
||||
>
|
||||
<path d="M6 .278a.768.768 0 0 1 .08.858 7.208 7.208 0 0 0-.878 3.46c0 4.021 3.278 7.277 7.318 7.277.527 0 1.04-.055 1.533-.16a.787.787 0 0 1 .81.316.733.733 0 0 1-.031.893A8.349 8.349 0 0 1 8.344 16C3.734 16 0 12.286 0 7.71 0 4.266 2.114 1.312 5.124.06A.752.752 0 0 1 6 .278z"/>
|
||||
<path d="M10.794 3.148a.217.217 0 0 1 .412 0l.387 1.162c.173.518.579.924 1.097 1.097l1.162.387a.217.217 0 0 1 0 .412l-1.162.387a1.734 1.734 0 0 0-1.097 1.097l-.387 1.162a.217.217 0 0 1-.412 0l-.387-1.162A1.734 1.734 0 0 0 9.31 6.593l-1.162-.387a.217.217 0 0 1 0-.412l1.162-.387a1.734 1.734 0 0 0 1.097-1.097l.387-1.162zM13.863.099a.145.145 0 0 1 .274 0l.258.774c.115.346.386.617.732.732l.774.258a.145.145 0 0 1 0 .274l-.774.258a1.156 1.156 0 0 0-.732.732l-.258.774a.145.145 0 0 1-.274 0l-.258-.774a1.156 1.156 0 0 0-.732-.732l-.774-.258a.145.145 0 0 1 0-.274l.774-.258c.346-.115.617-.386.732-.732L13.863.1z"/>
|
||||
<span>IAM</span>
|
||||
</a>
|
||||
<a href="{{ url_for('ui.connections_dashboard') }}" class="sidebar-link {% if request.endpoint == 'ui.connections_dashboard' %}active{% endif %}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M6 3.5A1.5 1.5 0 0 1 7.5 2h1A1.5 1.5 0 0 1 10 3.5v1A1.5 1.5 0 0 1 8.5 6v1H14a.5.5 0 0 1 .5.5v1a.5.5 0 0 1-1 0V8h-5v.5a.5.5 0 0 1-1 0V8h-5v.5a.5.5 0 0 1-1 0v-1A.5.5 0 0 1 2 7h5.5V6A1.5 1.5 0 0 1 6 4.5v-1zM8.5 5a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1zM0 11.5A1.5 1.5 0 0 1 1.5 10h1A1.5 1.5 0 0 1 4 11.5v1A1.5 1.5 0 0 1 2.5 14h-1A1.5 1.5 0 0 1 0 12.5v-1zm1.5-.5a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1zm4.5.5A1.5 1.5 0 0 1 7.5 10h1a1.5 1.5 0 0 1 1.5 1.5v1A1.5 1.5 0 0 1 8.5 14h-1A1.5 1.5 0 0 1 6 12.5v-1zm1.5-.5a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1zm4.5.5a1.5 1.5 0 0 1 1.5-1.5h1a1.5 1.5 0 0 1 1.5 1.5v1a1.5 1.5 0 0 1-1.5 1.5h-1a1.5 1.5 0 0 1-1.5-1.5v-1zm1.5-.5a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1z"/>
|
||||
</svg>
|
||||
</button>
|
||||
{% if principal %}
|
||||
<div class="text-end small">
|
||||
<div class="fw-semibold" title="{{ principal.display_name }}">{{ principal.display_name | truncate(20, true) }}</div>
|
||||
<div class="opacity-75">{{ principal.access_key }}</div>
|
||||
</div>
|
||||
<form method="post" action="{{ url_for('ui.logout') }}">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}" />
|
||||
<button class="btn btn-outline-light btn-sm" type="submit">Sign out</button>
|
||||
</form>
|
||||
<span>Connections</span>
|
||||
</a>
|
||||
<a href="{{ url_for('ui.metrics_dashboard') }}" class="sidebar-link {% if request.endpoint == 'ui.metrics_dashboard' %}active{% endif %}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M8 4a.5.5 0 0 1 .5.5V6a.5.5 0 0 1-1 0V4.5A.5.5 0 0 1 8 4zM3.732 5.732a.5.5 0 0 1 .707 0l.915.914a.5.5 0 1 1-.708.708l-.914-.915a.5.5 0 0 1 0-.707zM2 10a.5.5 0 0 1 .5-.5h1.586a.5.5 0 0 1 0 1H2.5A.5.5 0 0 1 2 10zm9.5 0a.5.5 0 0 1 .5-.5h1.5a.5.5 0 0 1 0 1H12a.5.5 0 0 1-.5-.5zm.754-4.246a.389.389 0 0 0-.527-.02L7.547 9.31a.91.91 0 1 0 1.302 1.258l3.434-4.297a.389.389 0 0 0-.029-.518z"/>
|
||||
<path fill-rule="evenodd" d="M0 10a8 8 0 1 1 15.547 2.661c-.442 1.253-1.845 1.602-2.932 1.25C11.309 13.488 9.475 13 8 13c-1.474 0-3.31.488-4.615.911-1.087.352-2.49.003-2.932-1.25A7.988 7.988 0 0 1 0 10zm8-7a7 7 0 0 0-6.603 9.329c.203.575.923.876 1.68.63C4.397 12.533 6.358 12 8 12s3.604.532 4.923.96c.757.245 1.477-.056 1.68-.631A7 7 0 0 0 8 3z"/>
|
||||
</svg>
|
||||
<span>Metrics</span>
|
||||
</a>
|
||||
{% endif %}
|
||||
</div>
|
||||
<div class="nav-section">
|
||||
<span class="nav-section-title">Resources</span>
|
||||
<a href="{{ url_for('ui.docs_page') }}" class="sidebar-link {% if request.endpoint == 'ui.docs_page' %}active{% endif %}">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M1 2.828c.885-.37 2.154-.769 3.388-.893 1.33-.134 2.458.063 3.112.752v9.746c-.935-.53-2.12-.603-3.213-.493-1.18.12-2.37.461-3.287.811V2.828zm7.5-.141c.654-.689 1.782-.886 3.112-.752 1.234.124 2.503.523 3.388.893v9.923c-.918-.35-2.107-.692-3.287-.81-1.094-.111-2.278-.039-3.213.492V2.687zM8 1.783C7.015.936 5.587.81 4.287.94c-1.514.153-3.042.672-3.994 1.105A.5.5 0 0 0 0 2.5v11a.5.5 0 0 0 .707.455c.882-.4 2.303-.881 3.68-1.02 1.409-.142 2.59.087 3.223.877a.5.5 0 0 0 .78 0c.633-.79 1.814-1.019 3.222-.877 1.378.139 2.8.62 3.681 1.02A.5.5 0 0 0 16 13.5v-11a.5.5 0 0 0-.293-.455c-.952-.433-2.48-.952-3.994-1.105C10.413.809 8.985.936 8 1.783z"/>
|
||||
</svg>
|
||||
<span>Documentation</span>
|
||||
</a>
|
||||
</div>
|
||||
{% endif %}
|
||||
</nav>
|
||||
{% if principal %}
|
||||
<div class="sidebar-footer">
|
||||
<div class="sidebar-user">
|
||||
<div class="user-avatar">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M11 6a3 3 0 1 1-6 0 3 3 0 0 1 6 0z"/>
|
||||
<path fill-rule="evenodd" d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm8-7a7 7 0 0 0-5.468 11.37C3.242 11.226 4.805 10 8 10s4.757 1.225 5.468 2.37A7 7 0 0 0 8 1z"/>
|
||||
</svg>
|
||||
</div>
|
||||
<div class="user-info">
|
||||
<div class="user-name" title="{{ principal.display_name }}">{{ principal.display_name | truncate(16, true) }}</div>
|
||||
<div class="user-key">{{ principal.access_key | truncate(12, true) }}</div>
|
||||
</div>
|
||||
</div>
|
||||
<form method="post" action="{{ url_for('ui.logout') }}" class="w-100">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}" />
|
||||
<button class="sidebar-logout-btn" type="submit">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M10 12.5a.5.5 0 0 1-.5.5h-8a.5.5 0 0 1-.5-.5v-9a.5.5 0 0 1 .5-.5h8a.5.5 0 0 1 .5.5v2a.5.5 0 0 0 1 0v-2A1.5 1.5 0 0 0 9.5 2h-8A1.5 1.5 0 0 0 0 3.5v9A1.5 1.5 0 0 0 1.5 14h8a1.5 1.5 0 0 0 1.5-1.5v-2a.5.5 0 0 0-1 0v2z"/>
|
||||
<path fill-rule="evenodd" d="M15.854 8.354a.5.5 0 0 0 0-.708l-3-3a.5.5 0 0 0-.708.708L14.293 7.5H5.5a.5.5 0 0 0 0 1h8.793l-2.147 2.146a.5.5 0 0 0 .708.708l3-3z"/>
|
||||
</svg>
|
||||
<span>Sign out</span>
|
||||
</button>
|
||||
</form>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
</nav>
|
||||
<main class="container py-4">
|
||||
{% block content %}{% endblock %}
|
||||
</main>
|
||||
</div>
|
||||
|
||||
<aside class="sidebar d-none d-lg-flex" id="desktopSidebar">
|
||||
<div class="sidebar-header">
|
||||
<div class="sidebar-brand" id="sidebarBrand">
|
||||
<img src="{{ url_for('static', filename='images/MyFSIO.png') }}" alt="MyFSIO logo" class="sidebar-logo" width="36" height="36" />
|
||||
<span class="sidebar-title">MyFSIO</span>
|
||||
</div>
|
||||
<button class="sidebar-collapse-btn" type="button" id="sidebarCollapseBtn" aria-label="Collapse sidebar">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M11.354 1.646a.5.5 0 0 1 0 .708L5.707 8l5.647 5.646a.5.5 0 0 1-.708.708l-6-6a.5.5 0 0 1 0-.708l6-6a.5.5 0 0 1 .708 0z"/>
|
||||
</svg>
|
||||
</button>
|
||||
</div>
|
||||
<div class="sidebar-body">
|
||||
<nav class="sidebar-nav">
|
||||
{% if principal %}
|
||||
<div class="nav-section">
|
||||
<span class="nav-section-title">Navigation</span>
|
||||
<a href="{{ url_for('ui.buckets_overview') }}" class="sidebar-link {% if request.endpoint == 'ui.buckets_overview' or request.endpoint == 'ui.bucket_detail' %}active{% endif %}" data-tooltip="Buckets">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M2.522 5H2a.5.5 0 0 0-.494.574l1.372 9.149A1.5 1.5 0 0 0 4.36 16h7.278a1.5 1.5 0 0 0 1.483-1.277l1.373-9.149A.5.5 0 0 0 14 5h-.522A5.5 5.5 0 0 0 2.522 5zm1.005 0a4.5 4.5 0 0 1 8.945 0H3.527z"/>
|
||||
</svg>
|
||||
<span class="sidebar-link-text">Buckets</span>
|
||||
</a>
|
||||
{% if can_manage_iam %}
|
||||
<a href="{{ url_for('ui.iam_dashboard') }}" class="sidebar-link {% if request.endpoint == 'ui.iam_dashboard' %}active{% endif %}" data-tooltip="IAM">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M15 14s1 0 1-1-1-4-5-4-5 3-5 4 1 1 1 1h8zm-7.978-1A.261.261 0 0 1 7 12.996c.001-.264.167-1.03.76-1.72C8.312 10.629 9.282 10 11 10c1.717 0 2.687.63 3.24 1.276.593.69.758 1.457.76 1.72l-.008.002a.274.274 0 0 1-.014.002H7.022zM11 7a2 2 0 1 0 0-4 2 2 0 0 0 0 4zm3-2a3 3 0 1 1-6 0 3 3 0 0 1 6 0zM6.936 9.28a5.88 5.88 0 0 0-1.23-.247A7.35 7.35 0 0 0 5 9c-4 0-5 3-5 4 0 .667.333 1 1 1h4.216A2.238 2.238 0 0 1 5 13c0-1.01.377-2.042 1.09-2.904.243-.294.526-.569.846-.816zM4.92 10A5.493 5.493 0 0 0 4 13H1c0-.26.164-1.03.76-1.724.545-.636 1.492-1.256 3.16-1.275zM1.5 5.5a3 3 0 1 1 6 0 3 3 0 0 1-6 0zm3-2a2 2 0 1 0 0 4 2 2 0 0 0 0-4z"/>
|
||||
</svg>
|
||||
<span class="sidebar-link-text">IAM</span>
|
||||
</a>
|
||||
<a href="{{ url_for('ui.connections_dashboard') }}" class="sidebar-link {% if request.endpoint == 'ui.connections_dashboard' %}active{% endif %}" data-tooltip="Connections">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M6 3.5A1.5 1.5 0 0 1 7.5 2h1A1.5 1.5 0 0 1 10 3.5v1A1.5 1.5 0 0 1 8.5 6v1H14a.5.5 0 0 1 .5.5v1a.5.5 0 0 1-1 0V8h-5v.5a.5.5 0 0 1-1 0V8h-5v.5a.5.5 0 0 1-1 0v-1A.5.5 0 0 1 2 7h5.5V6A1.5 1.5 0 0 1 6 4.5v-1zM8.5 5a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1zM0 11.5A1.5 1.5 0 0 1 1.5 10h1A1.5 1.5 0 0 1 4 11.5v1A1.5 1.5 0 0 1 2.5 14h-1A1.5 1.5 0 0 1 0 12.5v-1zm1.5-.5a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1zm4.5.5A1.5 1.5 0 0 1 7.5 10h1a1.5 1.5 0 0 1 1.5 1.5v1A1.5 1.5 0 0 1 8.5 14h-1A1.5 1.5 0 0 1 6 12.5v-1zm1.5-.5a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1zm4.5.5a1.5 1.5 0 0 1 1.5-1.5h1a1.5 1.5 0 0 1 1.5 1.5v1a1.5 1.5 0 0 1-1.5 1.5h-1a1.5 1.5 0 0 1-1.5-1.5v-1zm1.5-.5a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1z"/>
|
||||
</svg>
|
||||
<span class="sidebar-link-text">Connections</span>
|
||||
</a>
|
||||
<a href="{{ url_for('ui.metrics_dashboard') }}" class="sidebar-link {% if request.endpoint == 'ui.metrics_dashboard' %}active{% endif %}" data-tooltip="Metrics">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M8 4a.5.5 0 0 1 .5.5V6a.5.5 0 0 1-1 0V4.5A.5.5 0 0 1 8 4zM3.732 5.732a.5.5 0 0 1 .707 0l.915.914a.5.5 0 1 1-.708.708l-.914-.915a.5.5 0 0 1 0-.707zM2 10a.5.5 0 0 1 .5-.5h1.586a.5.5 0 0 1 0 1H2.5A.5.5 0 0 1 2 10zm9.5 0a.5.5 0 0 1 .5-.5h1.5a.5.5 0 0 1 0 1H12a.5.5 0 0 1-.5-.5zm.754-4.246a.389.389 0 0 0-.527-.02L7.547 9.31a.91.91 0 1 0 1.302 1.258l3.434-4.297a.389.389 0 0 0-.029-.518z"/>
|
||||
<path fill-rule="evenodd" d="M0 10a8 8 0 1 1 15.547 2.661c-.442 1.253-1.845 1.602-2.932 1.25C11.309 13.488 9.475 13 8 13c-1.474 0-3.31.488-4.615.911-1.087.352-2.49.003-2.932-1.25A7.988 7.988 0 0 1 0 10zm8-7a7 7 0 0 0-6.603 9.329c.203.575.923.876 1.68.63C4.397 12.533 6.358 12 8 12s3.604.532 4.923.96c.757.245 1.477-.056 1.68-.631A7 7 0 0 0 8 3z"/>
|
||||
</svg>
|
||||
<span class="sidebar-link-text">Metrics</span>
|
||||
</a>
|
||||
{% endif %}
|
||||
</div>
|
||||
<div class="nav-section">
|
||||
<span class="nav-section-title">Resources</span>
|
||||
<a href="{{ url_for('ui.docs_page') }}" class="sidebar-link {% if request.endpoint == 'ui.docs_page' %}active{% endif %}" data-tooltip="Documentation">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M1 2.828c.885-.37 2.154-.769 3.388-.893 1.33-.134 2.458.063 3.112.752v9.746c-.935-.53-2.12-.603-3.213-.493-1.18.12-2.37.461-3.287.811V2.828zm7.5-.141c.654-.689 1.782-.886 3.112-.752 1.234.124 2.503.523 3.388.893v9.923c-.918-.35-2.107-.692-3.287-.81-1.094-.111-2.278-.039-3.213.492V2.687zM8 1.783C7.015.936 5.587.81 4.287.94c-1.514.153-3.042.672-3.994 1.105A.5.5 0 0 0 0 2.5v11a.5.5 0 0 0 .707.455c.882-.4 2.303-.881 3.68-1.02 1.409-.142 2.59.087 3.223.877a.5.5 0 0 0 .78 0c.633-.79 1.814-1.019 3.222-.877 1.378.139 2.8.62 3.681 1.02A.5.5 0 0 0 16 13.5v-11a.5.5 0 0 0-.293-.455c-.952-.433-2.48-.952-3.994-1.105C10.413.809 8.985.936 8 1.783z"/>
|
||||
</svg>
|
||||
<span class="sidebar-link-text">Documentation</span>
|
||||
</a>
|
||||
</div>
|
||||
{% endif %}
|
||||
</nav>
|
||||
</div>
|
||||
<div class="sidebar-footer">
|
||||
<button class="theme-toggle-sidebar" type="button" id="themeToggle" aria-label="Toggle dark mode">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="theme-icon" id="themeToggleSun" viewBox="0 0 16 16">
|
||||
<path d="M8 11.5a3.5 3.5 0 1 1 0-7 3.5 3.5 0 0 1 0 7zm0 1.5a5 5 0 1 0 0-10 5 5 0 0 0 0 10zM8 0a.5.5 0 0 1 .5.5v1.555a.5.5 0 0 1-1 0V.5A.5.5 0 0 1 8 0zm0 12.945a.5.5 0 0 1 .5.5v2.055a.5.5 0 0 1-1 0v-2.055a.5.5 0 0 1 .5-.5zM2.343 2.343a.5.5 0 0 1 .707 0l1.1 1.1a.5.5 0 1 1-.708.707l-1.1-1.1a.5.5 0 0 1 0-.707zm9.507 9.507a.5.5 0 0 1 .707 0l1.1 1.1a.5.5 0 1 1-.707.708l-1.1-1.1a.5.5 0 0 1 0-.708zM0 8a.5.5 0 0 1 .5-.5h1.555a.5.5 0 0 1 0 1H.5A.5.5 0 0 1 0 8zm12.945 0a.5.5 0 0 1 .5-.5H15.5a.5.5 0 0 1 0 1h-2.055a.5.5 0 0 1-.5-.5zM2.343 13.657a.5.5 0 0 1 0-.707l1.1-1.1a.5.5 0 1 1 .708.707l-1.1 1.1a.5.5 0 0 1-.708 0zm9.507-9.507a.5.5 0 0 1 0-.707l1.1-1.1a.5.5 0 0 1 .707.708l-1.1 1.1a.5.5 0 0 1-.707 0z"/>
|
||||
</svg>
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="theme-icon" id="themeToggleMoon" viewBox="0 0 16 16">
|
||||
<path d="M6 .278a.768.768 0 0 1 .08.858 7.208 7.208 0 0 0-.878 3.46c0 4.021 3.278 7.277 7.318 7.277.527 0 1.04-.055 1.533-.16a.787.787 0 0 1 .81.316.733.733 0 0 1-.031.893A8.349 8.349 0 0 1 8.344 16C3.734 16 0 12.286 0 7.71 0 4.266 2.114 1.312 5.124.06A.752.752 0 0 1 6 .278z"/>
|
||||
<path d="M10.794 3.148a.217.217 0 0 1 .412 0l.387 1.162c.173.518.579.924 1.097 1.097l1.162.387a.217.217 0 0 1 0 .412l-1.162.387a1.734 1.734 0 0 0-1.097 1.097l-.387 1.162a.217.217 0 0 1-.412 0l-.387-1.162A1.734 1.734 0 0 0 9.31 6.593l-1.162-.387a.217.217 0 0 1 0-.412l1.162-.387a1.734 1.734 0 0 0 1.097-1.097l.387-1.162zM13.863.099a.145.145 0 0 1 .274 0l.258.774c.115.346.386.617.732.732l.774.258a.145.145 0 0 1 0 .274l-.774.258a1.156 1.156 0 0 0-.732.732l-.258.774a.145.145 0 0 1-.274 0l-.258-.774a1.156 1.156 0 0 0-.732-.732l-.774-.258a.145.145 0 0 1 0-.274l.774-.258c.346-.115.617-.386.732-.732L13.863.1z"/>
|
||||
</svg>
|
||||
<span class="theme-toggle-text">Toggle theme</span>
|
||||
</button>
|
||||
{% if principal %}
|
||||
<div class="sidebar-user" data-username="{{ principal.display_name }}">
|
||||
<div class="user-avatar">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path d="M11 6a3 3 0 1 1-6 0 3 3 0 0 1 6 0z"/>
|
||||
<path fill-rule="evenodd" d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm8-7a7 7 0 0 0-5.468 11.37C3.242 11.226 4.805 10 8 10s4.757 1.225 5.468 2.37A7 7 0 0 0 8 1z"/>
|
||||
</svg>
|
||||
</div>
|
||||
<div class="user-info">
|
||||
<div class="user-name" title="{{ principal.display_name }}">{{ principal.display_name | truncate(16, true) }}</div>
|
||||
<div class="user-key">{{ principal.access_key | truncate(12, true) }}</div>
|
||||
</div>
|
||||
</div>
|
||||
<form method="post" action="{{ url_for('ui.logout') }}" class="w-100">
|
||||
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}" />
|
||||
<button class="sidebar-logout-btn" type="submit">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M10 12.5a.5.5 0 0 1-.5.5h-8a.5.5 0 0 1-.5-.5v-9a.5.5 0 0 1 .5-.5h8a.5.5 0 0 1 .5.5v2a.5.5 0 0 0 1 0v-2A1.5 1.5 0 0 0 9.5 2h-8A1.5 1.5 0 0 0 0 3.5v9A1.5 1.5 0 0 0 1.5 14h8a1.5 1.5 0 0 0 1.5-1.5v-2a.5.5 0 0 0-1 0v2z"/>
|
||||
<path fill-rule="evenodd" d="M15.854 8.354a.5.5 0 0 0 0-.708l-3-3a.5.5 0 0 0-.708.708L14.293 7.5H5.5a.5.5 0 0 0 0 1h8.793l-2.147 2.146a.5.5 0 0 0 .708.708l3-3z"/>
|
||||
</svg>
|
||||
<span class="logout-text">Sign out</span>
|
||||
</button>
|
||||
</form>
|
||||
{% endif %}
|
||||
</div>
|
||||
</aside>
|
||||
|
||||
<div class="main-wrapper">
|
||||
<main class="main-content">
|
||||
{% block content %}{% endblock %}
|
||||
</main>
|
||||
</div>
|
||||
<div class="toast-container position-fixed bottom-0 end-0 p-3">
|
||||
<div id="liveToast" class="toast" role="alert" aria-live="assertive" aria-atomic="true">
|
||||
<div class="toast-header">
|
||||
@@ -162,9 +275,11 @@
|
||||
(function () {
|
||||
const storageKey = 'myfsio-theme';
|
||||
const toggle = document.getElementById('themeToggle');
|
||||
const label = document.getElementById('themeToggleLabel');
|
||||
const toggleMobile = document.getElementById('themeToggleMobile');
|
||||
const sunIcon = document.getElementById('themeToggleSun');
|
||||
const moonIcon = document.getElementById('themeToggleMoon');
|
||||
const sunIconMobile = document.getElementById('themeToggleSunMobile');
|
||||
const moonIconMobile = document.getElementById('themeToggleMoonMobile');
|
||||
|
||||
const applyTheme = (theme) => {
|
||||
document.documentElement.dataset.bsTheme = theme;
|
||||
@@ -172,29 +287,74 @@
|
||||
try {
|
||||
localStorage.setItem(storageKey, theme);
|
||||
} catch (err) {
|
||||
/* localStorage unavailable */
|
||||
}
|
||||
if (label) {
|
||||
label.textContent = theme === 'dark' ? 'Switch to light mode' : 'Switch to dark mode';
|
||||
}
|
||||
if (toggle) {
|
||||
toggle.setAttribute('aria-pressed', theme === 'dark' ? 'true' : 'false');
|
||||
toggle.setAttribute('title', theme === 'dark' ? 'Switch to light mode' : 'Switch to dark mode');
|
||||
toggle.setAttribute('aria-label', theme === 'dark' ? 'Switch to light mode' : 'Switch to dark mode');
|
||||
console.log("Error: local storage not available, cannot save theme preference.");
|
||||
}
|
||||
const isDark = theme === 'dark';
|
||||
if (sunIcon && moonIcon) {
|
||||
const isDark = theme === 'dark';
|
||||
sunIcon.classList.toggle('d-none', !isDark);
|
||||
moonIcon.classList.toggle('d-none', isDark);
|
||||
}
|
||||
if (sunIconMobile && moonIconMobile) {
|
||||
sunIconMobile.classList.toggle('d-none', !isDark);
|
||||
moonIconMobile.classList.toggle('d-none', isDark);
|
||||
}
|
||||
[toggle, toggleMobile].forEach(btn => {
|
||||
if (btn) {
|
||||
btn.setAttribute('aria-pressed', isDark ? 'true' : 'false');
|
||||
btn.setAttribute('title', isDark ? 'Switch to light mode' : 'Switch to dark mode');
|
||||
btn.setAttribute('aria-label', isDark ? 'Switch to light mode' : 'Switch to dark mode');
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
const current = document.documentElement.dataset.bsTheme || 'light';
|
||||
applyTheme(current);
|
||||
|
||||
toggle?.addEventListener('click', () => {
|
||||
const handleToggle = () => {
|
||||
const next = document.documentElement.dataset.bsTheme === 'dark' ? 'light' : 'dark';
|
||||
applyTheme(next);
|
||||
};
|
||||
|
||||
toggle?.addEventListener('click', handleToggle);
|
||||
toggleMobile?.addEventListener('click', handleToggle);
|
||||
})();
|
||||
</script>
|
||||
<script>
|
||||
(function () {
|
||||
const sidebar = document.getElementById('desktopSidebar');
|
||||
const collapseBtn = document.getElementById('sidebarCollapseBtn');
|
||||
const sidebarBrand = document.getElementById('sidebarBrand');
|
||||
const storageKey = 'myfsio-sidebar-collapsed';
|
||||
|
||||
if (!sidebar || !collapseBtn) return;
|
||||
|
||||
const applyCollapsed = (collapsed) => {
|
||||
sidebar.classList.toggle('sidebar-collapsed', collapsed);
|
||||
document.body.classList.toggle('sidebar-is-collapsed', collapsed);
|
||||
document.documentElement.classList.remove('sidebar-will-collapse');
|
||||
try {
|
||||
localStorage.setItem(storageKey, collapsed ? 'true' : 'false');
|
||||
} catch (err) {}
|
||||
};
|
||||
|
||||
try {
|
||||
const stored = localStorage.getItem(storageKey);
|
||||
applyCollapsed(stored === 'true');
|
||||
} catch (err) {
|
||||
document.documentElement.classList.remove('sidebar-will-collapse');
|
||||
}
|
||||
|
||||
collapseBtn.addEventListener('click', () => {
|
||||
const isCollapsed = sidebar.classList.contains('sidebar-collapsed');
|
||||
applyCollapsed(!isCollapsed);
|
||||
});
|
||||
|
||||
sidebarBrand?.addEventListener('click', (e) => {
|
||||
const isCollapsed = sidebar.classList.contains('sidebar-collapsed');
|
||||
if (isCollapsed) {
|
||||
e.preventDefault();
|
||||
applyCollapsed(false);
|
||||
}
|
||||
});
|
||||
})();
|
||||
</script>
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -38,6 +38,7 @@
|
||||
<li><a href="#versioning">Object Versioning</a></li>
|
||||
<li><a href="#quotas">Bucket Quotas</a></li>
|
||||
<li><a href="#encryption">Encryption</a></li>
|
||||
<li><a href="#lifecycle">Lifecycle Rules</a></li>
|
||||
<li><a href="#troubleshooting">Troubleshooting</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
@@ -606,11 +607,49 @@ except Exception as e:
|
||||
<li>Follow the steps above to replicate <strong>A → B</strong>.</li>
|
||||
<li>Repeat the process on Server B to replicate <strong>B → A</strong> (create a connection to A, enable rule).</li>
|
||||
</ol>
|
||||
<p class="small text-muted mb-0">
|
||||
<p class="small text-muted mb-3">
|
||||
<strong>Loop Prevention:</strong> The system automatically detects replication traffic using a custom User-Agent (<code>S3ReplicationAgent</code>). This prevents infinite loops where an object replicated from A to B is immediately replicated back to A.
|
||||
<br>
|
||||
<strong>Deletes:</strong> Deleting an object on one server will propagate the deletion to the other server.
|
||||
</p>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Error Handling & Rate Limits</h3>
|
||||
<p class="small text-muted mb-3">The replication system handles transient failures automatically:</p>
|
||||
<div class="table-responsive mb-3">
|
||||
<table class="table table-sm table-bordered small">
|
||||
<thead class="table-light">
|
||||
<tr>
|
||||
<th>Behavior</th>
|
||||
<th>Details</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td><strong>Retry Logic</strong></td>
|
||||
<td>boto3 automatically handles 429 (rate limit) errors using exponential backoff with <code>max_attempts=2</code></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><strong>Concurrency</strong></td>
|
||||
<td>Uses a ThreadPoolExecutor with 4 parallel workers for replication tasks</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><strong>Timeouts</strong></td>
|
||||
<td>Connect: 5s, Read: 30s. Large files use streaming transfers</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<div class="alert alert-warning border mb-0">
|
||||
<div class="d-flex gap-2">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-exclamation-triangle text-warning mt-1 flex-shrink-0" viewBox="0 0 16 16">
|
||||
<path d="M7.938 2.016A.13.13 0 0 1 8.002 2a.13.13 0 0 1 .063.016.146.146 0 0 1 .054.057l6.857 11.667c.036.06.035.124.002.183a.163.163 0 0 1-.054.06.116.116 0 0 1-.066.017H1.146a.115.115 0 0 1-.066-.017.163.163 0 0 1-.054-.06.176.176 0 0 1 .002-.183L7.884 2.073a.147.147 0 0 1 .054-.057zm1.044-.45a1.13 1.13 0 0 0-1.96 0L.165 13.233c-.457.778.091 1.767.98 1.767h13.713c.889 0 1.438-.99.98-1.767L8.982 1.566z"/>
|
||||
<path d="M7.002 12a1 1 0 1 1 2 0 1 1 0 0 1-2 0zM7.1 5.995a.905.905 0 1 1 1.8 0l-.35 3.507a.552.552 0 0 1-1.1 0L7.1 5.995z"/>
|
||||
</svg>
|
||||
<div>
|
||||
<strong>Large File Counts:</strong> When replicating buckets with many objects, the target server's rate limits may cause delays. There is no built-in pause mechanism. Consider increasing <code>RATE_LIMIT_DEFAULT</code> on the target server during bulk replication operations.
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</article>
|
||||
<article id="versioning" class="card shadow-sm docs-section">
|
||||
@@ -855,10 +894,92 @@ curl -X DELETE "{{ api_base }}/kms/keys/{key-id}?waiting_period_days=30" \
|
||||
</p>
|
||||
</div>
|
||||
</article>
|
||||
<article id="troubleshooting" class="card shadow-sm docs-section">
|
||||
<article id="lifecycle" class="card shadow-sm docs-section">
|
||||
<div class="card-body">
|
||||
<div class="d-flex align-items-center gap-2 mb-3">
|
||||
<span class="docs-section-kicker">12</span>
|
||||
<h2 class="h4 mb-0">Lifecycle Rules</h2>
|
||||
</div>
|
||||
<p class="text-muted">Automatically delete expired objects, clean up old versions, and abort incomplete multipart uploads using time-based lifecycle rules.</p>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">How It Works</h3>
|
||||
<p class="small text-muted mb-3">
|
||||
Lifecycle rules run on a background timer (Python <code>threading.Timer</code>), not a system cronjob. The enforcement cycle triggers every <strong>3600 seconds (1 hour)</strong> by default. Each cycle scans all buckets with lifecycle configurations and applies matching rules.
|
||||
</p>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Expiration Types</h3>
|
||||
<div class="table-responsive mb-3">
|
||||
<table class="table table-sm table-bordered small">
|
||||
<thead class="table-light">
|
||||
<tr>
|
||||
<th>Type</th>
|
||||
<th>Description</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td><strong>Expiration (Days)</strong></td>
|
||||
<td>Delete current objects older than N days from their last modification</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><strong>Expiration (Date)</strong></td>
|
||||
<td>Delete current objects after a specific date (ISO 8601 format)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><strong>NoncurrentVersionExpiration</strong></td>
|
||||
<td>Delete non-current (archived) versions older than N days from when they became non-current</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><strong>AbortIncompleteMultipartUpload</strong></td>
|
||||
<td>Abort multipart uploads that have been in progress longer than N days</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">API Usage</h3>
|
||||
<pre class="mb-3"><code class="language-bash"># Set lifecycle rule (delete objects older than 30 days)
|
||||
curl -X PUT "{{ api_base }}/<bucket>?lifecycle" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||
-d '[{
|
||||
"ID": "expire-old-objects",
|
||||
"Status": "Enabled",
|
||||
"Prefix": "",
|
||||
"Expiration": {"Days": 30}
|
||||
}]'
|
||||
|
||||
# Abort incomplete multipart uploads after 7 days
|
||||
curl -X PUT "{{ api_base }}/<bucket>?lifecycle" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||
-d '[{
|
||||
"ID": "cleanup-multipart",
|
||||
"Status": "Enabled",
|
||||
"AbortIncompleteMultipartUpload": {"DaysAfterInitiation": 7}
|
||||
}]'
|
||||
|
||||
# Get current lifecycle configuration
|
||||
curl "{{ api_base }}/<bucket>?lifecycle" \
|
||||
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"</code></pre>
|
||||
|
||||
<div class="alert alert-light border mb-0">
|
||||
<div class="d-flex gap-2">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-info-circle text-muted mt-1 flex-shrink-0" viewBox="0 0 16 16">
|
||||
<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>
|
||||
<path d="m8.93 6.588-2.29.287-.082.38.45.083c.294.07.352.176.288.469l-.738 3.468c-.194.897.105 1.319.808 1.319.545 0 1.178-.252 1.465-.598l.088-.416c-.2.176-.492.246-.686.246-.275 0-.375-.193-.304-.533L8.93 6.588zM9 4.5a1 1 0 1 1-2 0 1 1 0 0 1 2 0z"/>
|
||||
</svg>
|
||||
<div>
|
||||
<strong>Prefix Filtering:</strong> Use the <code>Prefix</code> field to scope rules to specific paths (e.g., <code>"logs/"</code>). Leave empty to apply to all objects in the bucket.
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</article>
|
||||
<article id="troubleshooting" class="card shadow-sm docs-section">
|
||||
<div class="card-body">
|
||||
<div class="d-flex align-items-center gap-2 mb-3">
|
||||
<span class="docs-section-kicker">13</span>
|
||||
<h2 class="h4 mb-0">Troubleshooting & tips</h2>
|
||||
</div>
|
||||
<div class="table-responsive">
|
||||
@@ -896,6 +1017,11 @@ curl -X DELETE "{{ api_base }}/kms/keys/{key-id}?waiting_period_days=30" \
|
||||
<td>Proxy headers missing or <code>API_BASE_URL</code> incorrect</td>
|
||||
<td>Ensure your proxy sends <code>X-Forwarded-Host</code>/<code>Proto</code> headers, or explicitly set <code>API_BASE_URL</code> to your public domain.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Large folder uploads hitting rate limits (429)</td>
|
||||
<td><code>RATE_LIMIT_DEFAULT</code> exceeded (200/min)</td>
|
||||
<td>Increase rate limit in env config, use Redis backend (<code>RATE_LIMIT_STORAGE_URI=redis://host:port</code>) for distributed setups, or upload in smaller batches.</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
@@ -918,6 +1044,7 @@ curl -X DELETE "{{ api_base }}/kms/keys/{key-id}?waiting_period_days=30" \
|
||||
<li><a href="#versioning">Object Versioning</a></li>
|
||||
<li><a href="#quotas">Bucket Quotas</a></li>
|
||||
<li><a href="#encryption">Encryption</a></li>
|
||||
<li><a href="#lifecycle">Lifecycle Rules</a></li>
|
||||
<li><a href="#troubleshooting">Troubleshooting</a></li>
|
||||
</ul>
|
||||
<div class="docs-sidebar-callouts">
|
||||
|
||||
@@ -355,8 +355,8 @@
|
||||
<div class="modal-header border-0 pb-0">
|
||||
<h1 class="modal-title fs-5 fw-semibold">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-danger" viewBox="0 0 16 16">
|
||||
<path d="M1 14s-1 0-1-1 1-4 6-4 6 3 6 4-1 1-1 1H1zm5-6a3 3 0 1 0 0-6 3 3 0 0 0 0 6z"/>
|
||||
<path fill-rule="evenodd" d="M11 1.5v1h5v1h-1v9a2 2 0 0 1-2 2H3a2 2 0 0 1-2-2v-9H0v-1h5v-1a1 1 0 0 1 1-1h4a1 1 0 0 1 1 1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118z"/>
|
||||
<path d="M11 5a3 3 0 1 1-6 0 3 3 0 0 1 6 0M8 7a2 2 0 1 0 0-4 2 2 0 0 0 0 4m.256 7a4.5 4.5 0 0 1-.229-1.004H3c.001-.246.154-.986.832-1.664C4.484 10.68 5.711 10 8 10q.39 0 .74.025c.226-.341.496-.65.804-.918Q9.077 9.014 8 9c-5 0-6 3-6 4s1 1 1 1h5.256Z"/>
|
||||
<path d="M12.5 16a3.5 3.5 0 1 0 0-7 3.5 3.5 0 0 0 0 7m-.646-4.854.646.647.646-.647a.5.5 0 0 1 .708.708l-.647.646.647.646a.5.5 0 0 1-.708.708l-.646-.647-.646.647a.5.5 0 0 1-.708-.708l.647-.646-.647-.646a.5.5 0 0 1 .708-.708"/>
|
||||
</svg>
|
||||
Delete User
|
||||
</h1>
|
||||
|
||||
339
tests/test_access_logging.py
Normal file
339
tests/test_access_logging.py
Normal file
@@ -0,0 +1,339 @@
|
||||
import io
|
||||
import json
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from app.access_logging import (
|
||||
AccessLogEntry,
|
||||
AccessLoggingService,
|
||||
LoggingConfiguration,
|
||||
)
|
||||
from app.storage import ObjectStorage
|
||||
|
||||
|
||||
class TestAccessLogEntry:
|
||||
def test_default_values(self):
|
||||
entry = AccessLogEntry()
|
||||
assert entry.bucket_owner == "-"
|
||||
assert entry.bucket == "-"
|
||||
assert entry.remote_ip == "-"
|
||||
assert entry.requester == "-"
|
||||
assert entry.operation == "-"
|
||||
assert entry.http_status == 200
|
||||
assert len(entry.request_id) == 16
|
||||
|
||||
def test_to_log_line(self):
|
||||
entry = AccessLogEntry(
|
||||
bucket_owner="owner123",
|
||||
bucket="my-bucket",
|
||||
remote_ip="192.168.1.1",
|
||||
requester="user456",
|
||||
request_id="REQ123456789012",
|
||||
operation="REST.PUT.OBJECT",
|
||||
key="test/key.txt",
|
||||
request_uri="PUT /my-bucket/test/key.txt HTTP/1.1",
|
||||
http_status=200,
|
||||
bytes_sent=1024,
|
||||
object_size=2048,
|
||||
total_time_ms=150,
|
||||
referrer="http://example.com",
|
||||
user_agent="aws-cli/2.0",
|
||||
version_id="v1",
|
||||
)
|
||||
log_line = entry.to_log_line()
|
||||
|
||||
assert "owner123" in log_line
|
||||
assert "my-bucket" in log_line
|
||||
assert "192.168.1.1" in log_line
|
||||
assert "user456" in log_line
|
||||
assert "REST.PUT.OBJECT" in log_line
|
||||
assert "test/key.txt" in log_line
|
||||
assert "200" in log_line
|
||||
|
||||
def test_to_dict(self):
|
||||
entry = AccessLogEntry(
|
||||
bucket_owner="owner",
|
||||
bucket="bucket",
|
||||
remote_ip="10.0.0.1",
|
||||
requester="admin",
|
||||
request_id="ABC123",
|
||||
operation="REST.GET.OBJECT",
|
||||
key="file.txt",
|
||||
request_uri="GET /bucket/file.txt HTTP/1.1",
|
||||
http_status=200,
|
||||
bytes_sent=512,
|
||||
object_size=512,
|
||||
total_time_ms=50,
|
||||
)
|
||||
result = entry.to_dict()
|
||||
|
||||
assert result["bucket_owner"] == "owner"
|
||||
assert result["bucket"] == "bucket"
|
||||
assert result["remote_ip"] == "10.0.0.1"
|
||||
assert result["requester"] == "admin"
|
||||
assert result["operation"] == "REST.GET.OBJECT"
|
||||
assert result["key"] == "file.txt"
|
||||
assert result["http_status"] == 200
|
||||
assert result["bytes_sent"] == 512
|
||||
|
||||
|
||||
class TestLoggingConfiguration:
|
||||
def test_default_values(self):
|
||||
config = LoggingConfiguration(target_bucket="log-bucket")
|
||||
assert config.target_bucket == "log-bucket"
|
||||
assert config.target_prefix == ""
|
||||
assert config.enabled is True
|
||||
|
||||
def test_to_dict(self):
|
||||
config = LoggingConfiguration(
|
||||
target_bucket="logs",
|
||||
target_prefix="access-logs/",
|
||||
enabled=True,
|
||||
)
|
||||
result = config.to_dict()
|
||||
|
||||
assert "LoggingEnabled" in result
|
||||
assert result["LoggingEnabled"]["TargetBucket"] == "logs"
|
||||
assert result["LoggingEnabled"]["TargetPrefix"] == "access-logs/"
|
||||
|
||||
def test_from_dict(self):
|
||||
data = {
|
||||
"LoggingEnabled": {
|
||||
"TargetBucket": "my-logs",
|
||||
"TargetPrefix": "bucket-logs/",
|
||||
}
|
||||
}
|
||||
config = LoggingConfiguration.from_dict(data)
|
||||
|
||||
assert config is not None
|
||||
assert config.target_bucket == "my-logs"
|
||||
assert config.target_prefix == "bucket-logs/"
|
||||
assert config.enabled is True
|
||||
|
||||
def test_from_dict_no_logging(self):
|
||||
data = {}
|
||||
config = LoggingConfiguration.from_dict(data)
|
||||
assert config is None
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def storage(tmp_path: Path):
|
||||
storage_root = tmp_path / "data"
|
||||
storage_root.mkdir(parents=True)
|
||||
return ObjectStorage(storage_root)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def logging_service(tmp_path: Path, storage):
|
||||
service = AccessLoggingService(
|
||||
tmp_path,
|
||||
flush_interval=3600,
|
||||
max_buffer_size=10,
|
||||
)
|
||||
service.set_storage(storage)
|
||||
yield service
|
||||
service.shutdown()
|
||||
|
||||
|
||||
class TestAccessLoggingService:
|
||||
def test_get_bucket_logging_not_configured(self, logging_service):
|
||||
result = logging_service.get_bucket_logging("unconfigured-bucket")
|
||||
assert result is None
|
||||
|
||||
def test_set_and_get_bucket_logging(self, logging_service):
|
||||
config = LoggingConfiguration(
|
||||
target_bucket="log-bucket",
|
||||
target_prefix="logs/",
|
||||
)
|
||||
logging_service.set_bucket_logging("source-bucket", config)
|
||||
|
||||
retrieved = logging_service.get_bucket_logging("source-bucket")
|
||||
assert retrieved is not None
|
||||
assert retrieved.target_bucket == "log-bucket"
|
||||
assert retrieved.target_prefix == "logs/"
|
||||
|
||||
def test_delete_bucket_logging(self, logging_service):
|
||||
config = LoggingConfiguration(target_bucket="logs")
|
||||
logging_service.set_bucket_logging("to-delete", config)
|
||||
assert logging_service.get_bucket_logging("to-delete") is not None
|
||||
|
||||
logging_service.delete_bucket_logging("to-delete")
|
||||
logging_service._configs.clear()
|
||||
assert logging_service.get_bucket_logging("to-delete") is None
|
||||
|
||||
def test_log_request_no_config(self, logging_service):
|
||||
logging_service.log_request(
|
||||
"no-config-bucket",
|
||||
operation="REST.GET.OBJECT",
|
||||
key="test.txt",
|
||||
)
|
||||
stats = logging_service.get_stats()
|
||||
assert stats["buffered_entries"] == 0
|
||||
|
||||
def test_log_request_with_config(self, logging_service, storage):
|
||||
storage.create_bucket("log-target")
|
||||
|
||||
config = LoggingConfiguration(
|
||||
target_bucket="log-target",
|
||||
target_prefix="access/",
|
||||
)
|
||||
logging_service.set_bucket_logging("source-bucket", config)
|
||||
|
||||
logging_service.log_request(
|
||||
"source-bucket",
|
||||
operation="REST.PUT.OBJECT",
|
||||
key="uploaded.txt",
|
||||
remote_ip="192.168.1.100",
|
||||
requester="test-user",
|
||||
http_status=200,
|
||||
bytes_sent=1024,
|
||||
)
|
||||
|
||||
stats = logging_service.get_stats()
|
||||
assert stats["buffered_entries"] == 1
|
||||
|
||||
def test_log_request_disabled_config(self, logging_service):
|
||||
config = LoggingConfiguration(
|
||||
target_bucket="logs",
|
||||
enabled=False,
|
||||
)
|
||||
logging_service.set_bucket_logging("disabled-bucket", config)
|
||||
|
||||
logging_service.log_request(
|
||||
"disabled-bucket",
|
||||
operation="REST.GET.OBJECT",
|
||||
key="test.txt",
|
||||
)
|
||||
|
||||
stats = logging_service.get_stats()
|
||||
assert stats["buffered_entries"] == 0
|
||||
|
||||
def test_flush_buffer(self, logging_service, storage):
|
||||
storage.create_bucket("flush-target")
|
||||
|
||||
config = LoggingConfiguration(
|
||||
target_bucket="flush-target",
|
||||
target_prefix="logs/",
|
||||
)
|
||||
logging_service.set_bucket_logging("flush-source", config)
|
||||
|
||||
for i in range(3):
|
||||
logging_service.log_request(
|
||||
"flush-source",
|
||||
operation="REST.GET.OBJECT",
|
||||
key=f"file{i}.txt",
|
||||
)
|
||||
|
||||
logging_service.flush()
|
||||
|
||||
objects = storage.list_objects_all("flush-target")
|
||||
assert len(objects) >= 1
|
||||
|
||||
def test_auto_flush_on_buffer_size(self, logging_service, storage):
|
||||
storage.create_bucket("auto-flush-target")
|
||||
|
||||
config = LoggingConfiguration(
|
||||
target_bucket="auto-flush-target",
|
||||
target_prefix="",
|
||||
)
|
||||
logging_service.set_bucket_logging("auto-source", config)
|
||||
|
||||
for i in range(15):
|
||||
logging_service.log_request(
|
||||
"auto-source",
|
||||
operation="REST.GET.OBJECT",
|
||||
key=f"file{i}.txt",
|
||||
)
|
||||
|
||||
objects = storage.list_objects_all("auto-flush-target")
|
||||
assert len(objects) >= 1
|
||||
|
||||
def test_get_stats(self, logging_service, storage):
|
||||
storage.create_bucket("stats-target")
|
||||
config = LoggingConfiguration(target_bucket="stats-target")
|
||||
logging_service.set_bucket_logging("stats-bucket", config)
|
||||
|
||||
logging_service.log_request(
|
||||
"stats-bucket",
|
||||
operation="REST.GET.OBJECT",
|
||||
key="test.txt",
|
||||
)
|
||||
|
||||
stats = logging_service.get_stats()
|
||||
assert "buffered_entries" in stats
|
||||
assert "target_buckets" in stats
|
||||
assert stats["buffered_entries"] >= 1
|
||||
|
||||
def test_shutdown_flushes_buffer(self, tmp_path, storage):
|
||||
storage.create_bucket("shutdown-target")
|
||||
|
||||
service = AccessLoggingService(tmp_path, flush_interval=3600, max_buffer_size=100)
|
||||
service.set_storage(storage)
|
||||
|
||||
config = LoggingConfiguration(target_bucket="shutdown-target")
|
||||
service.set_bucket_logging("shutdown-source", config)
|
||||
|
||||
service.log_request(
|
||||
"shutdown-source",
|
||||
operation="REST.PUT.OBJECT",
|
||||
key="final.txt",
|
||||
)
|
||||
|
||||
service.shutdown()
|
||||
|
||||
objects = storage.list_objects_all("shutdown-target")
|
||||
assert len(objects) >= 1
|
||||
|
||||
def test_logging_caching(self, logging_service):
|
||||
config = LoggingConfiguration(target_bucket="cached-logs")
|
||||
logging_service.set_bucket_logging("cached-bucket", config)
|
||||
|
||||
logging_service.get_bucket_logging("cached-bucket")
|
||||
assert "cached-bucket" in logging_service._configs
|
||||
|
||||
def test_log_request_all_fields(self, logging_service, storage):
|
||||
storage.create_bucket("detailed-target")
|
||||
|
||||
config = LoggingConfiguration(target_bucket="detailed-target", target_prefix="detailed/")
|
||||
logging_service.set_bucket_logging("detailed-source", config)
|
||||
|
||||
logging_service.log_request(
|
||||
"detailed-source",
|
||||
operation="REST.PUT.OBJECT",
|
||||
key="detailed/file.txt",
|
||||
remote_ip="10.0.0.1",
|
||||
requester="admin-user",
|
||||
request_uri="PUT /detailed-source/detailed/file.txt HTTP/1.1",
|
||||
http_status=201,
|
||||
error_code="",
|
||||
bytes_sent=2048,
|
||||
object_size=2048,
|
||||
total_time_ms=100,
|
||||
referrer="http://admin.example.com",
|
||||
user_agent="curl/7.68.0",
|
||||
version_id="v1.0",
|
||||
request_id="CUSTOM_REQ_ID",
|
||||
)
|
||||
|
||||
stats = logging_service.get_stats()
|
||||
assert stats["buffered_entries"] == 1
|
||||
|
||||
def test_failed_flush_returns_to_buffer(self, logging_service):
|
||||
config = LoggingConfiguration(target_bucket="nonexistent-target")
|
||||
logging_service.set_bucket_logging("fail-source", config)
|
||||
|
||||
logging_service.log_request(
|
||||
"fail-source",
|
||||
operation="REST.GET.OBJECT",
|
||||
key="test.txt",
|
||||
)
|
||||
|
||||
initial_count = logging_service.get_stats()["buffered_entries"]
|
||||
logging_service.flush()
|
||||
|
||||
final_count = logging_service.get_stats()["buffered_entries"]
|
||||
assert final_count >= initial_count
|
||||
284
tests/test_acl.py
Normal file
284
tests/test_acl.py
Normal file
@@ -0,0 +1,284 @@
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from app.acl import (
|
||||
Acl,
|
||||
AclGrant,
|
||||
AclService,
|
||||
ACL_PERMISSION_FULL_CONTROL,
|
||||
ACL_PERMISSION_READ,
|
||||
ACL_PERMISSION_WRITE,
|
||||
ACL_PERMISSION_READ_ACP,
|
||||
ACL_PERMISSION_WRITE_ACP,
|
||||
GRANTEE_ALL_USERS,
|
||||
GRANTEE_AUTHENTICATED_USERS,
|
||||
PERMISSION_TO_ACTIONS,
|
||||
create_canned_acl,
|
||||
CANNED_ACLS,
|
||||
)
|
||||
|
||||
|
||||
class TestAclGrant:
|
||||
def test_to_dict(self):
|
||||
grant = AclGrant(grantee="user123", permission=ACL_PERMISSION_READ)
|
||||
result = grant.to_dict()
|
||||
assert result == {"grantee": "user123", "permission": "READ"}
|
||||
|
||||
def test_from_dict(self):
|
||||
data = {"grantee": "admin", "permission": "FULL_CONTROL"}
|
||||
grant = AclGrant.from_dict(data)
|
||||
assert grant.grantee == "admin"
|
||||
assert grant.permission == ACL_PERMISSION_FULL_CONTROL
|
||||
|
||||
|
||||
class TestAcl:
|
||||
def test_to_dict(self):
|
||||
acl = Acl(
|
||||
owner="owner-user",
|
||||
grants=[
|
||||
AclGrant(grantee="owner-user", permission=ACL_PERMISSION_FULL_CONTROL),
|
||||
AclGrant(grantee=GRANTEE_ALL_USERS, permission=ACL_PERMISSION_READ),
|
||||
],
|
||||
)
|
||||
result = acl.to_dict()
|
||||
assert result["owner"] == "owner-user"
|
||||
assert len(result["grants"]) == 2
|
||||
assert result["grants"][0]["grantee"] == "owner-user"
|
||||
assert result["grants"][1]["grantee"] == "*"
|
||||
|
||||
def test_from_dict(self):
|
||||
data = {
|
||||
"owner": "the-owner",
|
||||
"grants": [
|
||||
{"grantee": "the-owner", "permission": "FULL_CONTROL"},
|
||||
{"grantee": "authenticated", "permission": "READ"},
|
||||
],
|
||||
}
|
||||
acl = Acl.from_dict(data)
|
||||
assert acl.owner == "the-owner"
|
||||
assert len(acl.grants) == 2
|
||||
assert acl.grants[0].grantee == "the-owner"
|
||||
assert acl.grants[1].grantee == GRANTEE_AUTHENTICATED_USERS
|
||||
|
||||
def test_from_dict_empty_grants(self):
|
||||
data = {"owner": "solo-owner"}
|
||||
acl = Acl.from_dict(data)
|
||||
assert acl.owner == "solo-owner"
|
||||
assert len(acl.grants) == 0
|
||||
|
||||
def test_get_allowed_actions_owner(self):
|
||||
acl = Acl(owner="owner123", grants=[])
|
||||
actions = acl.get_allowed_actions("owner123", is_authenticated=True)
|
||||
assert actions == PERMISSION_TO_ACTIONS[ACL_PERMISSION_FULL_CONTROL]
|
||||
|
||||
def test_get_allowed_actions_all_users(self):
|
||||
acl = Acl(
|
||||
owner="owner",
|
||||
grants=[AclGrant(grantee=GRANTEE_ALL_USERS, permission=ACL_PERMISSION_READ)],
|
||||
)
|
||||
actions = acl.get_allowed_actions(None, is_authenticated=False)
|
||||
assert "read" in actions
|
||||
assert "list" in actions
|
||||
assert "write" not in actions
|
||||
|
||||
def test_get_allowed_actions_authenticated_users(self):
|
||||
acl = Acl(
|
||||
owner="owner",
|
||||
grants=[AclGrant(grantee=GRANTEE_AUTHENTICATED_USERS, permission=ACL_PERMISSION_WRITE)],
|
||||
)
|
||||
actions_authenticated = acl.get_allowed_actions("some-user", is_authenticated=True)
|
||||
assert "write" in actions_authenticated
|
||||
assert "delete" in actions_authenticated
|
||||
|
||||
actions_anonymous = acl.get_allowed_actions(None, is_authenticated=False)
|
||||
assert "write" not in actions_anonymous
|
||||
|
||||
def test_get_allowed_actions_specific_grantee(self):
|
||||
acl = Acl(
|
||||
owner="owner",
|
||||
grants=[
|
||||
AclGrant(grantee="user-abc", permission=ACL_PERMISSION_READ),
|
||||
AclGrant(grantee="user-xyz", permission=ACL_PERMISSION_WRITE),
|
||||
],
|
||||
)
|
||||
abc_actions = acl.get_allowed_actions("user-abc", is_authenticated=True)
|
||||
assert "read" in abc_actions
|
||||
assert "list" in abc_actions
|
||||
assert "write" not in abc_actions
|
||||
|
||||
xyz_actions = acl.get_allowed_actions("user-xyz", is_authenticated=True)
|
||||
assert "write" in xyz_actions
|
||||
assert "read" not in xyz_actions
|
||||
|
||||
def test_get_allowed_actions_combined(self):
|
||||
acl = Acl(
|
||||
owner="owner",
|
||||
grants=[
|
||||
AclGrant(grantee=GRANTEE_ALL_USERS, permission=ACL_PERMISSION_READ),
|
||||
AclGrant(grantee="special-user", permission=ACL_PERMISSION_WRITE),
|
||||
],
|
||||
)
|
||||
actions = acl.get_allowed_actions("special-user", is_authenticated=True)
|
||||
assert "read" in actions
|
||||
assert "list" in actions
|
||||
assert "write" in actions
|
||||
assert "delete" in actions
|
||||
|
||||
|
||||
class TestCannedAcls:
|
||||
def test_private_acl(self):
|
||||
acl = create_canned_acl("private", "the-owner")
|
||||
assert acl.owner == "the-owner"
|
||||
assert len(acl.grants) == 1
|
||||
assert acl.grants[0].grantee == "the-owner"
|
||||
assert acl.grants[0].permission == ACL_PERMISSION_FULL_CONTROL
|
||||
|
||||
def test_public_read_acl(self):
|
||||
acl = create_canned_acl("public-read", "owner")
|
||||
assert acl.owner == "owner"
|
||||
has_owner_full_control = any(
|
||||
g.grantee == "owner" and g.permission == ACL_PERMISSION_FULL_CONTROL for g in acl.grants
|
||||
)
|
||||
has_public_read = any(
|
||||
g.grantee == GRANTEE_ALL_USERS and g.permission == ACL_PERMISSION_READ for g in acl.grants
|
||||
)
|
||||
assert has_owner_full_control
|
||||
assert has_public_read
|
||||
|
||||
def test_public_read_write_acl(self):
|
||||
acl = create_canned_acl("public-read-write", "owner")
|
||||
assert acl.owner == "owner"
|
||||
has_public_read = any(
|
||||
g.grantee == GRANTEE_ALL_USERS and g.permission == ACL_PERMISSION_READ for g in acl.grants
|
||||
)
|
||||
has_public_write = any(
|
||||
g.grantee == GRANTEE_ALL_USERS and g.permission == ACL_PERMISSION_WRITE for g in acl.grants
|
||||
)
|
||||
assert has_public_read
|
||||
assert has_public_write
|
||||
|
||||
def test_authenticated_read_acl(self):
|
||||
acl = create_canned_acl("authenticated-read", "owner")
|
||||
has_authenticated_read = any(
|
||||
g.grantee == GRANTEE_AUTHENTICATED_USERS and g.permission == ACL_PERMISSION_READ for g in acl.grants
|
||||
)
|
||||
assert has_authenticated_read
|
||||
|
||||
def test_unknown_canned_acl_defaults_to_private(self):
|
||||
acl = create_canned_acl("unknown-acl", "owner")
|
||||
private_acl = create_canned_acl("private", "owner")
|
||||
assert acl.to_dict() == private_acl.to_dict()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def acl_service(tmp_path: Path):
|
||||
return AclService(tmp_path)
|
||||
|
||||
|
||||
class TestAclService:
|
||||
def test_get_bucket_acl_not_exists(self, acl_service):
|
||||
result = acl_service.get_bucket_acl("nonexistent-bucket")
|
||||
assert result is None
|
||||
|
||||
def test_set_and_get_bucket_acl(self, acl_service):
|
||||
acl = Acl(
|
||||
owner="bucket-owner",
|
||||
grants=[AclGrant(grantee="bucket-owner", permission=ACL_PERMISSION_FULL_CONTROL)],
|
||||
)
|
||||
acl_service.set_bucket_acl("my-bucket", acl)
|
||||
|
||||
retrieved = acl_service.get_bucket_acl("my-bucket")
|
||||
assert retrieved is not None
|
||||
assert retrieved.owner == "bucket-owner"
|
||||
assert len(retrieved.grants) == 1
|
||||
|
||||
def test_bucket_acl_caching(self, acl_service):
|
||||
acl = Acl(owner="cached-owner", grants=[])
|
||||
acl_service.set_bucket_acl("cached-bucket", acl)
|
||||
|
||||
acl_service.get_bucket_acl("cached-bucket")
|
||||
assert "cached-bucket" in acl_service._bucket_acl_cache
|
||||
|
||||
retrieved = acl_service.get_bucket_acl("cached-bucket")
|
||||
assert retrieved.owner == "cached-owner"
|
||||
|
||||
def test_set_bucket_canned_acl(self, acl_service):
|
||||
result = acl_service.set_bucket_canned_acl("new-bucket", "public-read", "the-owner")
|
||||
assert result.owner == "the-owner"
|
||||
|
||||
retrieved = acl_service.get_bucket_acl("new-bucket")
|
||||
assert retrieved is not None
|
||||
has_public_read = any(
|
||||
g.grantee == GRANTEE_ALL_USERS and g.permission == ACL_PERMISSION_READ for g in retrieved.grants
|
||||
)
|
||||
assert has_public_read
|
||||
|
||||
def test_delete_bucket_acl(self, acl_service):
|
||||
acl = Acl(owner="to-delete-owner", grants=[])
|
||||
acl_service.set_bucket_acl("delete-me", acl)
|
||||
assert acl_service.get_bucket_acl("delete-me") is not None
|
||||
|
||||
acl_service.delete_bucket_acl("delete-me")
|
||||
acl_service._bucket_acl_cache.clear()
|
||||
assert acl_service.get_bucket_acl("delete-me") is None
|
||||
|
||||
def test_evaluate_bucket_acl_allowed(self, acl_service):
|
||||
acl = Acl(
|
||||
owner="owner",
|
||||
grants=[AclGrant(grantee=GRANTEE_ALL_USERS, permission=ACL_PERMISSION_READ)],
|
||||
)
|
||||
acl_service.set_bucket_acl("public-bucket", acl)
|
||||
|
||||
result = acl_service.evaluate_bucket_acl("public-bucket", None, "read", is_authenticated=False)
|
||||
assert result is True
|
||||
|
||||
def test_evaluate_bucket_acl_denied(self, acl_service):
|
||||
acl = Acl(
|
||||
owner="owner",
|
||||
grants=[AclGrant(grantee="owner", permission=ACL_PERMISSION_FULL_CONTROL)],
|
||||
)
|
||||
acl_service.set_bucket_acl("private-bucket", acl)
|
||||
|
||||
result = acl_service.evaluate_bucket_acl("private-bucket", "other-user", "write", is_authenticated=True)
|
||||
assert result is False
|
||||
|
||||
def test_evaluate_bucket_acl_no_acl(self, acl_service):
|
||||
result = acl_service.evaluate_bucket_acl("no-acl-bucket", "anyone", "read")
|
||||
assert result is False
|
||||
|
||||
def test_get_object_acl_from_metadata(self, acl_service):
|
||||
metadata = {
|
||||
"__acl__": {
|
||||
"owner": "object-owner",
|
||||
"grants": [{"grantee": "object-owner", "permission": "FULL_CONTROL"}],
|
||||
}
|
||||
}
|
||||
result = acl_service.get_object_acl("bucket", "key", metadata)
|
||||
assert result is not None
|
||||
assert result.owner == "object-owner"
|
||||
|
||||
def test_get_object_acl_no_acl_in_metadata(self, acl_service):
|
||||
metadata = {"Content-Type": "text/plain"}
|
||||
result = acl_service.get_object_acl("bucket", "key", metadata)
|
||||
assert result is None
|
||||
|
||||
def test_create_object_acl_metadata(self, acl_service):
|
||||
acl = Acl(owner="obj-owner", grants=[])
|
||||
result = acl_service.create_object_acl_metadata(acl)
|
||||
assert "__acl__" in result
|
||||
assert result["__acl__"]["owner"] == "obj-owner"
|
||||
|
||||
def test_evaluate_object_acl(self, acl_service):
|
||||
metadata = {
|
||||
"__acl__": {
|
||||
"owner": "obj-owner",
|
||||
"grants": [{"grantee": "*", "permission": "READ"}],
|
||||
}
|
||||
}
|
||||
result = acl_service.evaluate_object_acl(metadata, None, "read", is_authenticated=False)
|
||||
assert result is True
|
||||
|
||||
result = acl_service.evaluate_object_acl(metadata, None, "write", is_authenticated=False)
|
||||
assert result is False
|
||||
238
tests/test_lifecycle.py
Normal file
238
tests/test_lifecycle.py
Normal file
@@ -0,0 +1,238 @@
|
||||
import io
|
||||
import time
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from app.lifecycle import LifecycleManager, LifecycleResult
|
||||
from app.storage import ObjectStorage
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def storage(tmp_path: Path):
|
||||
storage_root = tmp_path / "data"
|
||||
storage_root.mkdir(parents=True)
|
||||
return ObjectStorage(storage_root)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def lifecycle_manager(storage):
|
||||
manager = LifecycleManager(storage, interval_seconds=3600)
|
||||
yield manager
|
||||
manager.stop()
|
||||
|
||||
|
||||
class TestLifecycleResult:
|
||||
def test_default_values(self):
|
||||
result = LifecycleResult(bucket_name="test-bucket")
|
||||
assert result.bucket_name == "test-bucket"
|
||||
assert result.objects_deleted == 0
|
||||
assert result.versions_deleted == 0
|
||||
assert result.uploads_aborted == 0
|
||||
assert result.errors == []
|
||||
assert result.execution_time_seconds == 0.0
|
||||
|
||||
|
||||
class TestLifecycleManager:
|
||||
def test_start_and_stop(self, lifecycle_manager):
|
||||
lifecycle_manager.start()
|
||||
assert lifecycle_manager._timer is not None
|
||||
assert lifecycle_manager._shutdown is False
|
||||
|
||||
lifecycle_manager.stop()
|
||||
assert lifecycle_manager._shutdown is True
|
||||
assert lifecycle_manager._timer is None
|
||||
|
||||
def test_start_only_once(self, lifecycle_manager):
|
||||
lifecycle_manager.start()
|
||||
first_timer = lifecycle_manager._timer
|
||||
|
||||
lifecycle_manager.start()
|
||||
assert lifecycle_manager._timer is first_timer
|
||||
|
||||
def test_enforce_rules_no_lifecycle(self, lifecycle_manager, storage):
|
||||
storage.create_bucket("no-lifecycle-bucket")
|
||||
|
||||
result = lifecycle_manager.enforce_rules("no-lifecycle-bucket")
|
||||
assert result.bucket_name == "no-lifecycle-bucket"
|
||||
assert result.objects_deleted == 0
|
||||
|
||||
def test_enforce_rules_disabled_rule(self, lifecycle_manager, storage):
|
||||
storage.create_bucket("disabled-bucket")
|
||||
storage.set_bucket_lifecycle("disabled-bucket", [
|
||||
{
|
||||
"ID": "disabled-rule",
|
||||
"Status": "Disabled",
|
||||
"Prefix": "",
|
||||
"Expiration": {"Days": 1},
|
||||
}
|
||||
])
|
||||
|
||||
old_object = storage.put_object(
|
||||
"disabled-bucket",
|
||||
"old-file.txt",
|
||||
io.BytesIO(b"old content"),
|
||||
)
|
||||
|
||||
result = lifecycle_manager.enforce_rules("disabled-bucket")
|
||||
assert result.objects_deleted == 0
|
||||
|
||||
def test_enforce_expiration_by_days(self, lifecycle_manager, storage):
|
||||
storage.create_bucket("expire-bucket")
|
||||
storage.set_bucket_lifecycle("expire-bucket", [
|
||||
{
|
||||
"ID": "expire-30-days",
|
||||
"Status": "Enabled",
|
||||
"Prefix": "",
|
||||
"Expiration": {"Days": 30},
|
||||
}
|
||||
])
|
||||
|
||||
storage.put_object(
|
||||
"expire-bucket",
|
||||
"recent-file.txt",
|
||||
io.BytesIO(b"recent content"),
|
||||
)
|
||||
|
||||
result = lifecycle_manager.enforce_rules("expire-bucket")
|
||||
assert result.objects_deleted == 0
|
||||
|
||||
def test_enforce_expiration_with_prefix(self, lifecycle_manager, storage):
|
||||
storage.create_bucket("prefix-bucket")
|
||||
storage.set_bucket_lifecycle("prefix-bucket", [
|
||||
{
|
||||
"ID": "expire-logs",
|
||||
"Status": "Enabled",
|
||||
"Prefix": "logs/",
|
||||
"Expiration": {"Days": 1},
|
||||
}
|
||||
])
|
||||
|
||||
storage.put_object("prefix-bucket", "logs/old.log", io.BytesIO(b"log data"))
|
||||
storage.put_object("prefix-bucket", "data/keep.txt", io.BytesIO(b"keep this"))
|
||||
|
||||
result = lifecycle_manager.enforce_rules("prefix-bucket")
|
||||
|
||||
def test_enforce_all_buckets(self, lifecycle_manager, storage):
|
||||
storage.create_bucket("bucket1")
|
||||
storage.create_bucket("bucket2")
|
||||
|
||||
results = lifecycle_manager.enforce_all_buckets()
|
||||
assert isinstance(results, dict)
|
||||
|
||||
def test_run_now_single_bucket(self, lifecycle_manager, storage):
|
||||
storage.create_bucket("run-now-bucket")
|
||||
|
||||
results = lifecycle_manager.run_now("run-now-bucket")
|
||||
assert "run-now-bucket" in results
|
||||
|
||||
def test_run_now_all_buckets(self, lifecycle_manager, storage):
|
||||
storage.create_bucket("all-bucket-1")
|
||||
storage.create_bucket("all-bucket-2")
|
||||
|
||||
results = lifecycle_manager.run_now()
|
||||
assert isinstance(results, dict)
|
||||
|
||||
def test_enforce_abort_multipart(self, lifecycle_manager, storage):
|
||||
storage.create_bucket("multipart-bucket")
|
||||
storage.set_bucket_lifecycle("multipart-bucket", [
|
||||
{
|
||||
"ID": "abort-old-uploads",
|
||||
"Status": "Enabled",
|
||||
"Prefix": "",
|
||||
"AbortIncompleteMultipartUpload": {"DaysAfterInitiation": 7},
|
||||
}
|
||||
])
|
||||
|
||||
upload_id = storage.initiate_multipart_upload("multipart-bucket", "large-file.bin")
|
||||
|
||||
result = lifecycle_manager.enforce_rules("multipart-bucket")
|
||||
assert result.uploads_aborted == 0
|
||||
|
||||
def test_enforce_noncurrent_version_expiration(self, lifecycle_manager, storage):
|
||||
storage.create_bucket("versioned-bucket")
|
||||
storage.set_bucket_versioning("versioned-bucket", True)
|
||||
storage.set_bucket_lifecycle("versioned-bucket", [
|
||||
{
|
||||
"ID": "expire-old-versions",
|
||||
"Status": "Enabled",
|
||||
"Prefix": "",
|
||||
"NoncurrentVersionExpiration": {"NoncurrentDays": 30},
|
||||
}
|
||||
])
|
||||
|
||||
storage.put_object("versioned-bucket", "file.txt", io.BytesIO(b"v1"))
|
||||
storage.put_object("versioned-bucket", "file.txt", io.BytesIO(b"v2"))
|
||||
|
||||
result = lifecycle_manager.enforce_rules("versioned-bucket")
|
||||
assert result.bucket_name == "versioned-bucket"
|
||||
|
||||
def test_execution_time_tracking(self, lifecycle_manager, storage):
|
||||
storage.create_bucket("timed-bucket")
|
||||
storage.set_bucket_lifecycle("timed-bucket", [
|
||||
{
|
||||
"ID": "timer-test",
|
||||
"Status": "Enabled",
|
||||
"Expiration": {"Days": 1},
|
||||
}
|
||||
])
|
||||
|
||||
result = lifecycle_manager.enforce_rules("timed-bucket")
|
||||
assert result.execution_time_seconds >= 0
|
||||
|
||||
def test_enforce_rules_with_error(self, lifecycle_manager, storage):
|
||||
result = lifecycle_manager.enforce_rules("nonexistent-bucket")
|
||||
assert len(result.errors) > 0 or result.objects_deleted == 0
|
||||
|
||||
def test_lifecycle_with_date_expiration(self, lifecycle_manager, storage):
|
||||
storage.create_bucket("date-bucket")
|
||||
past_date = (datetime.now(timezone.utc) - timedelta(days=1)).strftime("%Y-%m-%dT00:00:00Z")
|
||||
storage.set_bucket_lifecycle("date-bucket", [
|
||||
{
|
||||
"ID": "expire-by-date",
|
||||
"Status": "Enabled",
|
||||
"Prefix": "",
|
||||
"Expiration": {"Date": past_date},
|
||||
}
|
||||
])
|
||||
|
||||
storage.put_object("date-bucket", "should-expire.txt", io.BytesIO(b"content"))
|
||||
|
||||
result = lifecycle_manager.enforce_rules("date-bucket")
|
||||
|
||||
def test_enforce_with_filter_prefix(self, lifecycle_manager, storage):
|
||||
storage.create_bucket("filter-bucket")
|
||||
storage.set_bucket_lifecycle("filter-bucket", [
|
||||
{
|
||||
"ID": "filter-prefix-rule",
|
||||
"Status": "Enabled",
|
||||
"Filter": {"Prefix": "archive/"},
|
||||
"Expiration": {"Days": 1},
|
||||
}
|
||||
])
|
||||
|
||||
result = lifecycle_manager.enforce_rules("filter-bucket")
|
||||
assert result.bucket_name == "filter-bucket"
|
||||
|
||||
|
||||
class TestLifecycleManagerScheduling:
|
||||
def test_schedule_next_respects_shutdown(self, storage):
|
||||
manager = LifecycleManager(storage, interval_seconds=1)
|
||||
manager._shutdown = True
|
||||
manager._schedule_next()
|
||||
assert manager._timer is None
|
||||
|
||||
@patch.object(LifecycleManager, "enforce_all_buckets")
|
||||
def test_run_enforcement_catches_exceptions(self, mock_enforce, storage):
|
||||
mock_enforce.side_effect = Exception("Test error")
|
||||
manager = LifecycleManager(storage, interval_seconds=3600)
|
||||
manager._shutdown = True
|
||||
manager._run_enforcement()
|
||||
|
||||
def test_shutdown_flag_prevents_scheduling(self, storage):
|
||||
manager = LifecycleManager(storage, interval_seconds=1)
|
||||
manager.start()
|
||||
manager.stop()
|
||||
assert manager._shutdown is True
|
||||
374
tests/test_notifications.py
Normal file
374
tests/test_notifications.py
Normal file
@@ -0,0 +1,374 @@
|
||||
import json
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from app.notifications import (
|
||||
NotificationConfiguration,
|
||||
NotificationEvent,
|
||||
NotificationService,
|
||||
WebhookDestination,
|
||||
)
|
||||
|
||||
|
||||
class TestNotificationEvent:
|
||||
def test_default_values(self):
|
||||
event = NotificationEvent(
|
||||
event_name="s3:ObjectCreated:Put",
|
||||
bucket_name="test-bucket",
|
||||
object_key="test/key.txt",
|
||||
)
|
||||
assert event.event_name == "s3:ObjectCreated:Put"
|
||||
assert event.bucket_name == "test-bucket"
|
||||
assert event.object_key == "test/key.txt"
|
||||
assert event.object_size == 0
|
||||
assert event.etag == ""
|
||||
assert event.version_id is None
|
||||
assert event.request_id != ""
|
||||
|
||||
def test_to_s3_event(self):
|
||||
event = NotificationEvent(
|
||||
event_name="s3:ObjectCreated:Put",
|
||||
bucket_name="my-bucket",
|
||||
object_key="my/object.txt",
|
||||
object_size=1024,
|
||||
etag="abc123",
|
||||
version_id="v1",
|
||||
source_ip="192.168.1.1",
|
||||
user_identity="user123",
|
||||
)
|
||||
result = event.to_s3_event()
|
||||
|
||||
assert "Records" in result
|
||||
assert len(result["Records"]) == 1
|
||||
|
||||
record = result["Records"][0]
|
||||
assert record["eventVersion"] == "2.1"
|
||||
assert record["eventSource"] == "myfsio:s3"
|
||||
assert record["eventName"] == "s3:ObjectCreated:Put"
|
||||
assert record["s3"]["bucket"]["name"] == "my-bucket"
|
||||
assert record["s3"]["object"]["key"] == "my/object.txt"
|
||||
assert record["s3"]["object"]["size"] == 1024
|
||||
assert record["s3"]["object"]["eTag"] == "abc123"
|
||||
assert record["s3"]["object"]["versionId"] == "v1"
|
||||
assert record["userIdentity"]["principalId"] == "user123"
|
||||
assert record["requestParameters"]["sourceIPAddress"] == "192.168.1.1"
|
||||
|
||||
|
||||
class TestWebhookDestination:
|
||||
def test_default_values(self):
|
||||
dest = WebhookDestination(url="http://example.com/webhook")
|
||||
assert dest.url == "http://example.com/webhook"
|
||||
assert dest.headers == {}
|
||||
assert dest.timeout_seconds == 30
|
||||
assert dest.retry_count == 3
|
||||
assert dest.retry_delay_seconds == 1
|
||||
|
||||
def test_to_dict(self):
|
||||
dest = WebhookDestination(
|
||||
url="http://example.com/webhook",
|
||||
headers={"X-Custom": "value"},
|
||||
timeout_seconds=60,
|
||||
retry_count=5,
|
||||
retry_delay_seconds=2,
|
||||
)
|
||||
result = dest.to_dict()
|
||||
assert result["url"] == "http://example.com/webhook"
|
||||
assert result["headers"] == {"X-Custom": "value"}
|
||||
assert result["timeout_seconds"] == 60
|
||||
assert result["retry_count"] == 5
|
||||
assert result["retry_delay_seconds"] == 2
|
||||
|
||||
def test_from_dict(self):
|
||||
data = {
|
||||
"url": "http://hook.example.com",
|
||||
"headers": {"Authorization": "Bearer token"},
|
||||
"timeout_seconds": 45,
|
||||
"retry_count": 2,
|
||||
"retry_delay_seconds": 5,
|
||||
}
|
||||
dest = WebhookDestination.from_dict(data)
|
||||
assert dest.url == "http://hook.example.com"
|
||||
assert dest.headers == {"Authorization": "Bearer token"}
|
||||
assert dest.timeout_seconds == 45
|
||||
assert dest.retry_count == 2
|
||||
assert dest.retry_delay_seconds == 5
|
||||
|
||||
|
||||
class TestNotificationConfiguration:
|
||||
def test_matches_event_exact_match(self):
|
||||
config = NotificationConfiguration(
|
||||
id="config1",
|
||||
events=["s3:ObjectCreated:Put"],
|
||||
destination=WebhookDestination(url="http://example.com"),
|
||||
)
|
||||
assert config.matches_event("s3:ObjectCreated:Put", "any/key.txt") is True
|
||||
assert config.matches_event("s3:ObjectCreated:Post", "any/key.txt") is False
|
||||
|
||||
def test_matches_event_wildcard(self):
|
||||
config = NotificationConfiguration(
|
||||
id="config1",
|
||||
events=["s3:ObjectCreated:*"],
|
||||
destination=WebhookDestination(url="http://example.com"),
|
||||
)
|
||||
assert config.matches_event("s3:ObjectCreated:Put", "key.txt") is True
|
||||
assert config.matches_event("s3:ObjectCreated:Copy", "key.txt") is True
|
||||
assert config.matches_event("s3:ObjectRemoved:Delete", "key.txt") is False
|
||||
|
||||
def test_matches_event_with_prefix_filter(self):
|
||||
config = NotificationConfiguration(
|
||||
id="config1",
|
||||
events=["s3:ObjectCreated:*"],
|
||||
destination=WebhookDestination(url="http://example.com"),
|
||||
prefix_filter="logs/",
|
||||
)
|
||||
assert config.matches_event("s3:ObjectCreated:Put", "logs/app.log") is True
|
||||
assert config.matches_event("s3:ObjectCreated:Put", "data/file.txt") is False
|
||||
|
||||
def test_matches_event_with_suffix_filter(self):
|
||||
config = NotificationConfiguration(
|
||||
id="config1",
|
||||
events=["s3:ObjectCreated:*"],
|
||||
destination=WebhookDestination(url="http://example.com"),
|
||||
suffix_filter=".jpg",
|
||||
)
|
||||
assert config.matches_event("s3:ObjectCreated:Put", "photos/image.jpg") is True
|
||||
assert config.matches_event("s3:ObjectCreated:Put", "photos/image.png") is False
|
||||
|
||||
def test_matches_event_with_both_filters(self):
|
||||
config = NotificationConfiguration(
|
||||
id="config1",
|
||||
events=["s3:ObjectCreated:*"],
|
||||
destination=WebhookDestination(url="http://example.com"),
|
||||
prefix_filter="images/",
|
||||
suffix_filter=".png",
|
||||
)
|
||||
assert config.matches_event("s3:ObjectCreated:Put", "images/photo.png") is True
|
||||
assert config.matches_event("s3:ObjectCreated:Put", "images/photo.jpg") is False
|
||||
assert config.matches_event("s3:ObjectCreated:Put", "documents/file.png") is False
|
||||
|
||||
def test_to_dict(self):
|
||||
config = NotificationConfiguration(
|
||||
id="my-config",
|
||||
events=["s3:ObjectCreated:Put", "s3:ObjectRemoved:Delete"],
|
||||
destination=WebhookDestination(url="http://example.com"),
|
||||
prefix_filter="logs/",
|
||||
suffix_filter=".log",
|
||||
)
|
||||
result = config.to_dict()
|
||||
assert result["Id"] == "my-config"
|
||||
assert result["Events"] == ["s3:ObjectCreated:Put", "s3:ObjectRemoved:Delete"]
|
||||
assert "Destination" in result
|
||||
assert result["Filter"]["Key"]["FilterRules"][0]["Value"] == "logs/"
|
||||
assert result["Filter"]["Key"]["FilterRules"][1]["Value"] == ".log"
|
||||
|
||||
def test_from_dict(self):
|
||||
data = {
|
||||
"Id": "parsed-config",
|
||||
"Events": ["s3:ObjectCreated:*"],
|
||||
"Destination": {"url": "http://hook.example.com"},
|
||||
"Filter": {
|
||||
"Key": {
|
||||
"FilterRules": [
|
||||
{"Name": "prefix", "Value": "data/"},
|
||||
{"Name": "suffix", "Value": ".csv"},
|
||||
]
|
||||
}
|
||||
},
|
||||
}
|
||||
config = NotificationConfiguration.from_dict(data)
|
||||
assert config.id == "parsed-config"
|
||||
assert config.events == ["s3:ObjectCreated:*"]
|
||||
assert config.destination.url == "http://hook.example.com"
|
||||
assert config.prefix_filter == "data/"
|
||||
assert config.suffix_filter == ".csv"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def notification_service(tmp_path: Path):
|
||||
service = NotificationService(tmp_path, worker_count=1)
|
||||
yield service
|
||||
service.shutdown()
|
||||
|
||||
|
||||
class TestNotificationService:
|
||||
def test_get_bucket_notifications_empty(self, notification_service):
|
||||
result = notification_service.get_bucket_notifications("nonexistent-bucket")
|
||||
assert result == []
|
||||
|
||||
def test_set_and_get_bucket_notifications(self, notification_service):
|
||||
configs = [
|
||||
NotificationConfiguration(
|
||||
id="config1",
|
||||
events=["s3:ObjectCreated:*"],
|
||||
destination=WebhookDestination(url="http://example.com/webhook1"),
|
||||
),
|
||||
NotificationConfiguration(
|
||||
id="config2",
|
||||
events=["s3:ObjectRemoved:*"],
|
||||
destination=WebhookDestination(url="http://example.com/webhook2"),
|
||||
),
|
||||
]
|
||||
notification_service.set_bucket_notifications("my-bucket", configs)
|
||||
|
||||
retrieved = notification_service.get_bucket_notifications("my-bucket")
|
||||
assert len(retrieved) == 2
|
||||
assert retrieved[0].id == "config1"
|
||||
assert retrieved[1].id == "config2"
|
||||
|
||||
def test_delete_bucket_notifications(self, notification_service):
|
||||
configs = [
|
||||
NotificationConfiguration(
|
||||
id="to-delete",
|
||||
events=["s3:ObjectCreated:*"],
|
||||
destination=WebhookDestination(url="http://example.com"),
|
||||
),
|
||||
]
|
||||
notification_service.set_bucket_notifications("delete-bucket", configs)
|
||||
assert len(notification_service.get_bucket_notifications("delete-bucket")) == 1
|
||||
|
||||
notification_service.delete_bucket_notifications("delete-bucket")
|
||||
notification_service._configs.clear()
|
||||
assert len(notification_service.get_bucket_notifications("delete-bucket")) == 0
|
||||
|
||||
def test_emit_event_no_config(self, notification_service):
|
||||
event = NotificationEvent(
|
||||
event_name="s3:ObjectCreated:Put",
|
||||
bucket_name="no-config-bucket",
|
||||
object_key="test.txt",
|
||||
)
|
||||
notification_service.emit_event(event)
|
||||
assert notification_service._stats["events_queued"] == 0
|
||||
|
||||
def test_emit_event_matching_config(self, notification_service):
|
||||
configs = [
|
||||
NotificationConfiguration(
|
||||
id="match-config",
|
||||
events=["s3:ObjectCreated:*"],
|
||||
destination=WebhookDestination(url="http://example.com/webhook"),
|
||||
),
|
||||
]
|
||||
notification_service.set_bucket_notifications("event-bucket", configs)
|
||||
|
||||
event = NotificationEvent(
|
||||
event_name="s3:ObjectCreated:Put",
|
||||
bucket_name="event-bucket",
|
||||
object_key="test.txt",
|
||||
)
|
||||
notification_service.emit_event(event)
|
||||
assert notification_service._stats["events_queued"] == 1
|
||||
|
||||
def test_emit_event_non_matching_config(self, notification_service):
|
||||
configs = [
|
||||
NotificationConfiguration(
|
||||
id="delete-only",
|
||||
events=["s3:ObjectRemoved:*"],
|
||||
destination=WebhookDestination(url="http://example.com/webhook"),
|
||||
),
|
||||
]
|
||||
notification_service.set_bucket_notifications("delete-bucket", configs)
|
||||
|
||||
event = NotificationEvent(
|
||||
event_name="s3:ObjectCreated:Put",
|
||||
bucket_name="delete-bucket",
|
||||
object_key="test.txt",
|
||||
)
|
||||
notification_service.emit_event(event)
|
||||
assert notification_service._stats["events_queued"] == 0
|
||||
|
||||
def test_emit_object_created(self, notification_service):
|
||||
configs = [
|
||||
NotificationConfiguration(
|
||||
id="create-config",
|
||||
events=["s3:ObjectCreated:Put"],
|
||||
destination=WebhookDestination(url="http://example.com/webhook"),
|
||||
),
|
||||
]
|
||||
notification_service.set_bucket_notifications("create-bucket", configs)
|
||||
|
||||
notification_service.emit_object_created(
|
||||
"create-bucket",
|
||||
"new-file.txt",
|
||||
size=1024,
|
||||
etag="abc123",
|
||||
operation="Put",
|
||||
)
|
||||
assert notification_service._stats["events_queued"] == 1
|
||||
|
||||
def test_emit_object_removed(self, notification_service):
|
||||
configs = [
|
||||
NotificationConfiguration(
|
||||
id="remove-config",
|
||||
events=["s3:ObjectRemoved:Delete"],
|
||||
destination=WebhookDestination(url="http://example.com/webhook"),
|
||||
),
|
||||
]
|
||||
notification_service.set_bucket_notifications("remove-bucket", configs)
|
||||
|
||||
notification_service.emit_object_removed(
|
||||
"remove-bucket",
|
||||
"deleted-file.txt",
|
||||
operation="Delete",
|
||||
)
|
||||
assert notification_service._stats["events_queued"] == 1
|
||||
|
||||
def test_get_stats(self, notification_service):
|
||||
stats = notification_service.get_stats()
|
||||
assert "events_queued" in stats
|
||||
assert "events_sent" in stats
|
||||
assert "events_failed" in stats
|
||||
|
||||
@patch("app.notifications.requests.post")
|
||||
def test_send_notification_success(self, mock_post, notification_service):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_post.return_value = mock_response
|
||||
|
||||
event = NotificationEvent(
|
||||
event_name="s3:ObjectCreated:Put",
|
||||
bucket_name="test-bucket",
|
||||
object_key="test.txt",
|
||||
)
|
||||
destination = WebhookDestination(url="http://example.com/webhook")
|
||||
|
||||
notification_service._send_notification(event, destination)
|
||||
mock_post.assert_called_once()
|
||||
|
||||
@patch("app.notifications.requests.post")
|
||||
def test_send_notification_retry_on_failure(self, mock_post, notification_service):
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 500
|
||||
mock_response.text = "Internal Server Error"
|
||||
mock_post.return_value = mock_response
|
||||
|
||||
event = NotificationEvent(
|
||||
event_name="s3:ObjectCreated:Put",
|
||||
bucket_name="test-bucket",
|
||||
object_key="test.txt",
|
||||
)
|
||||
destination = WebhookDestination(
|
||||
url="http://example.com/webhook",
|
||||
retry_count=2,
|
||||
retry_delay_seconds=0,
|
||||
)
|
||||
|
||||
with pytest.raises(RuntimeError) as exc_info:
|
||||
notification_service._send_notification(event, destination)
|
||||
assert "Failed after 2 attempts" in str(exc_info.value)
|
||||
assert mock_post.call_count == 2
|
||||
|
||||
def test_notification_caching(self, notification_service):
|
||||
configs = [
|
||||
NotificationConfiguration(
|
||||
id="cached-config",
|
||||
events=["s3:ObjectCreated:*"],
|
||||
destination=WebhookDestination(url="http://example.com"),
|
||||
),
|
||||
]
|
||||
notification_service.set_bucket_notifications("cached-bucket", configs)
|
||||
|
||||
notification_service.get_bucket_notifications("cached-bucket")
|
||||
assert "cached-bucket" in notification_service._configs
|
||||
332
tests/test_object_lock.py
Normal file
332
tests/test_object_lock.py
Normal file
@@ -0,0 +1,332 @@
|
||||
import json
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from app.object_lock import (
|
||||
ObjectLockConfig,
|
||||
ObjectLockError,
|
||||
ObjectLockRetention,
|
||||
ObjectLockService,
|
||||
RetentionMode,
|
||||
)
|
||||
|
||||
|
||||
class TestRetentionMode:
|
||||
def test_governance_mode(self):
|
||||
assert RetentionMode.GOVERNANCE.value == "GOVERNANCE"
|
||||
|
||||
def test_compliance_mode(self):
|
||||
assert RetentionMode.COMPLIANCE.value == "COMPLIANCE"
|
||||
|
||||
|
||||
class TestObjectLockRetention:
|
||||
def test_to_dict(self):
|
||||
retain_until = datetime(2025, 12, 31, 23, 59, 59, tzinfo=timezone.utc)
|
||||
retention = ObjectLockRetention(
|
||||
mode=RetentionMode.GOVERNANCE,
|
||||
retain_until_date=retain_until,
|
||||
)
|
||||
result = retention.to_dict()
|
||||
assert result["Mode"] == "GOVERNANCE"
|
||||
assert "2025-12-31" in result["RetainUntilDate"]
|
||||
|
||||
def test_from_dict(self):
|
||||
data = {
|
||||
"Mode": "COMPLIANCE",
|
||||
"RetainUntilDate": "2030-06-15T12:00:00+00:00",
|
||||
}
|
||||
retention = ObjectLockRetention.from_dict(data)
|
||||
assert retention is not None
|
||||
assert retention.mode == RetentionMode.COMPLIANCE
|
||||
assert retention.retain_until_date.year == 2030
|
||||
|
||||
def test_from_dict_empty(self):
|
||||
result = ObjectLockRetention.from_dict({})
|
||||
assert result is None
|
||||
|
||||
def test_from_dict_missing_mode(self):
|
||||
data = {"RetainUntilDate": "2030-06-15T12:00:00+00:00"}
|
||||
result = ObjectLockRetention.from_dict(data)
|
||||
assert result is None
|
||||
|
||||
def test_from_dict_missing_date(self):
|
||||
data = {"Mode": "GOVERNANCE"}
|
||||
result = ObjectLockRetention.from_dict(data)
|
||||
assert result is None
|
||||
|
||||
def test_is_expired_future_date(self):
|
||||
future = datetime.now(timezone.utc) + timedelta(days=30)
|
||||
retention = ObjectLockRetention(
|
||||
mode=RetentionMode.GOVERNANCE,
|
||||
retain_until_date=future,
|
||||
)
|
||||
assert retention.is_expired() is False
|
||||
|
||||
def test_is_expired_past_date(self):
|
||||
past = datetime.now(timezone.utc) - timedelta(days=30)
|
||||
retention = ObjectLockRetention(
|
||||
mode=RetentionMode.GOVERNANCE,
|
||||
retain_until_date=past,
|
||||
)
|
||||
assert retention.is_expired() is True
|
||||
|
||||
|
||||
class TestObjectLockConfig:
|
||||
def test_to_dict_enabled(self):
|
||||
config = ObjectLockConfig(enabled=True)
|
||||
result = config.to_dict()
|
||||
assert result["ObjectLockEnabled"] == "Enabled"
|
||||
|
||||
def test_to_dict_disabled(self):
|
||||
config = ObjectLockConfig(enabled=False)
|
||||
result = config.to_dict()
|
||||
assert result["ObjectLockEnabled"] == "Disabled"
|
||||
|
||||
def test_from_dict_enabled(self):
|
||||
data = {"ObjectLockEnabled": "Enabled"}
|
||||
config = ObjectLockConfig.from_dict(data)
|
||||
assert config.enabled is True
|
||||
|
||||
def test_from_dict_disabled(self):
|
||||
data = {"ObjectLockEnabled": "Disabled"}
|
||||
config = ObjectLockConfig.from_dict(data)
|
||||
assert config.enabled is False
|
||||
|
||||
def test_from_dict_with_default_retention_days(self):
|
||||
data = {
|
||||
"ObjectLockEnabled": "Enabled",
|
||||
"Rule": {
|
||||
"DefaultRetention": {
|
||||
"Mode": "GOVERNANCE",
|
||||
"Days": 30,
|
||||
}
|
||||
},
|
||||
}
|
||||
config = ObjectLockConfig.from_dict(data)
|
||||
assert config.enabled is True
|
||||
assert config.default_retention is not None
|
||||
assert config.default_retention.mode == RetentionMode.GOVERNANCE
|
||||
|
||||
def test_from_dict_with_default_retention_years(self):
|
||||
data = {
|
||||
"ObjectLockEnabled": "Enabled",
|
||||
"Rule": {
|
||||
"DefaultRetention": {
|
||||
"Mode": "COMPLIANCE",
|
||||
"Years": 1,
|
||||
}
|
||||
},
|
||||
}
|
||||
config = ObjectLockConfig.from_dict(data)
|
||||
assert config.enabled is True
|
||||
assert config.default_retention is not None
|
||||
assert config.default_retention.mode == RetentionMode.COMPLIANCE
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def lock_service(tmp_path: Path):
|
||||
return ObjectLockService(tmp_path)
|
||||
|
||||
|
||||
class TestObjectLockService:
|
||||
def test_get_bucket_lock_config_default(self, lock_service):
|
||||
config = lock_service.get_bucket_lock_config("nonexistent-bucket")
|
||||
assert config.enabled is False
|
||||
assert config.default_retention is None
|
||||
|
||||
def test_set_and_get_bucket_lock_config(self, lock_service):
|
||||
config = ObjectLockConfig(enabled=True)
|
||||
lock_service.set_bucket_lock_config("my-bucket", config)
|
||||
|
||||
retrieved = lock_service.get_bucket_lock_config("my-bucket")
|
||||
assert retrieved.enabled is True
|
||||
|
||||
def test_enable_bucket_lock(self, lock_service):
|
||||
lock_service.enable_bucket_lock("lock-bucket")
|
||||
|
||||
config = lock_service.get_bucket_lock_config("lock-bucket")
|
||||
assert config.enabled is True
|
||||
|
||||
def test_is_bucket_lock_enabled(self, lock_service):
|
||||
assert lock_service.is_bucket_lock_enabled("new-bucket") is False
|
||||
|
||||
lock_service.enable_bucket_lock("new-bucket")
|
||||
assert lock_service.is_bucket_lock_enabled("new-bucket") is True
|
||||
|
||||
def test_get_object_retention_not_set(self, lock_service):
|
||||
result = lock_service.get_object_retention("bucket", "key.txt")
|
||||
assert result is None
|
||||
|
||||
def test_set_and_get_object_retention(self, lock_service):
|
||||
future = datetime.now(timezone.utc) + timedelta(days=30)
|
||||
retention = ObjectLockRetention(
|
||||
mode=RetentionMode.GOVERNANCE,
|
||||
retain_until_date=future,
|
||||
)
|
||||
lock_service.set_object_retention("bucket", "key.txt", retention)
|
||||
|
||||
retrieved = lock_service.get_object_retention("bucket", "key.txt")
|
||||
assert retrieved is not None
|
||||
assert retrieved.mode == RetentionMode.GOVERNANCE
|
||||
|
||||
def test_cannot_modify_compliance_retention(self, lock_service):
|
||||
future = datetime.now(timezone.utc) + timedelta(days=30)
|
||||
retention = ObjectLockRetention(
|
||||
mode=RetentionMode.COMPLIANCE,
|
||||
retain_until_date=future,
|
||||
)
|
||||
lock_service.set_object_retention("bucket", "locked.txt", retention)
|
||||
|
||||
new_retention = ObjectLockRetention(
|
||||
mode=RetentionMode.GOVERNANCE,
|
||||
retain_until_date=future + timedelta(days=10),
|
||||
)
|
||||
with pytest.raises(ObjectLockError) as exc_info:
|
||||
lock_service.set_object_retention("bucket", "locked.txt", new_retention)
|
||||
assert "COMPLIANCE" in str(exc_info.value)
|
||||
|
||||
def test_cannot_modify_governance_without_bypass(self, lock_service):
|
||||
future = datetime.now(timezone.utc) + timedelta(days=30)
|
||||
retention = ObjectLockRetention(
|
||||
mode=RetentionMode.GOVERNANCE,
|
||||
retain_until_date=future,
|
||||
)
|
||||
lock_service.set_object_retention("bucket", "gov.txt", retention)
|
||||
|
||||
new_retention = ObjectLockRetention(
|
||||
mode=RetentionMode.GOVERNANCE,
|
||||
retain_until_date=future + timedelta(days=10),
|
||||
)
|
||||
with pytest.raises(ObjectLockError) as exc_info:
|
||||
lock_service.set_object_retention("bucket", "gov.txt", new_retention)
|
||||
assert "GOVERNANCE" in str(exc_info.value)
|
||||
|
||||
def test_can_modify_governance_with_bypass(self, lock_service):
|
||||
future = datetime.now(timezone.utc) + timedelta(days=30)
|
||||
retention = ObjectLockRetention(
|
||||
mode=RetentionMode.GOVERNANCE,
|
||||
retain_until_date=future,
|
||||
)
|
||||
lock_service.set_object_retention("bucket", "bypassable.txt", retention)
|
||||
|
||||
new_retention = ObjectLockRetention(
|
||||
mode=RetentionMode.GOVERNANCE,
|
||||
retain_until_date=future + timedelta(days=10),
|
||||
)
|
||||
lock_service.set_object_retention("bucket", "bypassable.txt", new_retention, bypass_governance=True)
|
||||
retrieved = lock_service.get_object_retention("bucket", "bypassable.txt")
|
||||
assert retrieved.retain_until_date > future
|
||||
|
||||
def test_can_modify_expired_retention(self, lock_service):
|
||||
past = datetime.now(timezone.utc) - timedelta(days=30)
|
||||
retention = ObjectLockRetention(
|
||||
mode=RetentionMode.COMPLIANCE,
|
||||
retain_until_date=past,
|
||||
)
|
||||
lock_service.set_object_retention("bucket", "expired.txt", retention)
|
||||
|
||||
future = datetime.now(timezone.utc) + timedelta(days=30)
|
||||
new_retention = ObjectLockRetention(
|
||||
mode=RetentionMode.GOVERNANCE,
|
||||
retain_until_date=future,
|
||||
)
|
||||
lock_service.set_object_retention("bucket", "expired.txt", new_retention)
|
||||
retrieved = lock_service.get_object_retention("bucket", "expired.txt")
|
||||
assert retrieved.mode == RetentionMode.GOVERNANCE
|
||||
|
||||
def test_get_legal_hold_not_set(self, lock_service):
|
||||
result = lock_service.get_legal_hold("bucket", "key.txt")
|
||||
assert result is False
|
||||
|
||||
def test_set_and_get_legal_hold(self, lock_service):
|
||||
lock_service.set_legal_hold("bucket", "held.txt", True)
|
||||
assert lock_service.get_legal_hold("bucket", "held.txt") is True
|
||||
|
||||
lock_service.set_legal_hold("bucket", "held.txt", False)
|
||||
assert lock_service.get_legal_hold("bucket", "held.txt") is False
|
||||
|
||||
def test_can_delete_object_no_lock(self, lock_service):
|
||||
can_delete, reason = lock_service.can_delete_object("bucket", "unlocked.txt")
|
||||
assert can_delete is True
|
||||
assert reason == ""
|
||||
|
||||
def test_cannot_delete_object_with_legal_hold(self, lock_service):
|
||||
lock_service.set_legal_hold("bucket", "held.txt", True)
|
||||
|
||||
can_delete, reason = lock_service.can_delete_object("bucket", "held.txt")
|
||||
assert can_delete is False
|
||||
assert "legal hold" in reason.lower()
|
||||
|
||||
def test_cannot_delete_object_with_compliance_retention(self, lock_service):
|
||||
future = datetime.now(timezone.utc) + timedelta(days=30)
|
||||
retention = ObjectLockRetention(
|
||||
mode=RetentionMode.COMPLIANCE,
|
||||
retain_until_date=future,
|
||||
)
|
||||
lock_service.set_object_retention("bucket", "compliant.txt", retention)
|
||||
|
||||
can_delete, reason = lock_service.can_delete_object("bucket", "compliant.txt")
|
||||
assert can_delete is False
|
||||
assert "COMPLIANCE" in reason
|
||||
|
||||
def test_cannot_delete_governance_without_bypass(self, lock_service):
|
||||
future = datetime.now(timezone.utc) + timedelta(days=30)
|
||||
retention = ObjectLockRetention(
|
||||
mode=RetentionMode.GOVERNANCE,
|
||||
retain_until_date=future,
|
||||
)
|
||||
lock_service.set_object_retention("bucket", "governed.txt", retention)
|
||||
|
||||
can_delete, reason = lock_service.can_delete_object("bucket", "governed.txt")
|
||||
assert can_delete is False
|
||||
assert "GOVERNANCE" in reason
|
||||
|
||||
def test_can_delete_governance_with_bypass(self, lock_service):
|
||||
future = datetime.now(timezone.utc) + timedelta(days=30)
|
||||
retention = ObjectLockRetention(
|
||||
mode=RetentionMode.GOVERNANCE,
|
||||
retain_until_date=future,
|
||||
)
|
||||
lock_service.set_object_retention("bucket", "governed.txt", retention)
|
||||
|
||||
can_delete, reason = lock_service.can_delete_object("bucket", "governed.txt", bypass_governance=True)
|
||||
assert can_delete is True
|
||||
assert reason == ""
|
||||
|
||||
def test_can_delete_expired_retention(self, lock_service):
|
||||
past = datetime.now(timezone.utc) - timedelta(days=30)
|
||||
retention = ObjectLockRetention(
|
||||
mode=RetentionMode.COMPLIANCE,
|
||||
retain_until_date=past,
|
||||
)
|
||||
lock_service.set_object_retention("bucket", "expired.txt", retention)
|
||||
|
||||
can_delete, reason = lock_service.can_delete_object("bucket", "expired.txt")
|
||||
assert can_delete is True
|
||||
|
||||
def test_can_overwrite_is_same_as_delete(self, lock_service):
|
||||
future = datetime.now(timezone.utc) + timedelta(days=30)
|
||||
retention = ObjectLockRetention(
|
||||
mode=RetentionMode.GOVERNANCE,
|
||||
retain_until_date=future,
|
||||
)
|
||||
lock_service.set_object_retention("bucket", "overwrite.txt", retention)
|
||||
|
||||
can_overwrite, _ = lock_service.can_overwrite_object("bucket", "overwrite.txt")
|
||||
can_delete, _ = lock_service.can_delete_object("bucket", "overwrite.txt")
|
||||
assert can_overwrite == can_delete
|
||||
|
||||
def test_delete_object_lock_metadata(self, lock_service):
|
||||
lock_service.set_legal_hold("bucket", "cleanup.txt", True)
|
||||
lock_service.delete_object_lock_metadata("bucket", "cleanup.txt")
|
||||
|
||||
assert lock_service.get_legal_hold("bucket", "cleanup.txt") is False
|
||||
|
||||
def test_config_caching(self, lock_service):
|
||||
config = ObjectLockConfig(enabled=True)
|
||||
lock_service.set_bucket_lock_config("cached-bucket", config)
|
||||
|
||||
lock_service.get_bucket_lock_config("cached-bucket")
|
||||
assert "cached-bucket" in lock_service._config_cache
|
||||
287
tests/test_replication.py
Normal file
287
tests/test_replication.py
Normal file
@@ -0,0 +1,287 @@
|
||||
import json
|
||||
import time
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from app.connections import ConnectionStore, RemoteConnection
|
||||
from app.replication import (
|
||||
ReplicationManager,
|
||||
ReplicationRule,
|
||||
ReplicationStats,
|
||||
REPLICATION_MODE_ALL,
|
||||
REPLICATION_MODE_NEW_ONLY,
|
||||
_create_s3_client,
|
||||
)
|
||||
from app.storage import ObjectStorage
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def storage(tmp_path: Path):
|
||||
storage_root = tmp_path / "data"
|
||||
storage_root.mkdir(parents=True)
|
||||
return ObjectStorage(storage_root)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def connections(tmp_path: Path):
|
||||
connections_path = tmp_path / "connections.json"
|
||||
store = ConnectionStore(connections_path)
|
||||
conn = RemoteConnection(
|
||||
id="test-conn",
|
||||
name="Test Remote",
|
||||
endpoint_url="http://localhost:9000",
|
||||
access_key="remote-access",
|
||||
secret_key="remote-secret",
|
||||
region="us-east-1",
|
||||
)
|
||||
store.add(conn)
|
||||
return store
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def replication_manager(storage, connections, tmp_path):
|
||||
rules_path = tmp_path / "replication_rules.json"
|
||||
storage_root = tmp_path / "data"
|
||||
storage_root.mkdir(exist_ok=True)
|
||||
manager = ReplicationManager(storage, connections, rules_path, storage_root)
|
||||
yield manager
|
||||
manager.shutdown(wait=False)
|
||||
|
||||
|
||||
class TestReplicationStats:
|
||||
def test_to_dict(self):
|
||||
stats = ReplicationStats(
|
||||
objects_synced=10,
|
||||
objects_pending=5,
|
||||
objects_orphaned=2,
|
||||
bytes_synced=1024,
|
||||
last_sync_at=1234567890.0,
|
||||
last_sync_key="test/key.txt",
|
||||
)
|
||||
result = stats.to_dict()
|
||||
assert result["objects_synced"] == 10
|
||||
assert result["objects_pending"] == 5
|
||||
assert result["objects_orphaned"] == 2
|
||||
assert result["bytes_synced"] == 1024
|
||||
assert result["last_sync_at"] == 1234567890.0
|
||||
assert result["last_sync_key"] == "test/key.txt"
|
||||
|
||||
def test_from_dict(self):
|
||||
data = {
|
||||
"objects_synced": 15,
|
||||
"objects_pending": 3,
|
||||
"objects_orphaned": 1,
|
||||
"bytes_synced": 2048,
|
||||
"last_sync_at": 9876543210.0,
|
||||
"last_sync_key": "another/key.txt",
|
||||
}
|
||||
stats = ReplicationStats.from_dict(data)
|
||||
assert stats.objects_synced == 15
|
||||
assert stats.objects_pending == 3
|
||||
assert stats.objects_orphaned == 1
|
||||
assert stats.bytes_synced == 2048
|
||||
assert stats.last_sync_at == 9876543210.0
|
||||
assert stats.last_sync_key == "another/key.txt"
|
||||
|
||||
def test_from_dict_with_defaults(self):
|
||||
stats = ReplicationStats.from_dict({})
|
||||
assert stats.objects_synced == 0
|
||||
assert stats.objects_pending == 0
|
||||
assert stats.objects_orphaned == 0
|
||||
assert stats.bytes_synced == 0
|
||||
assert stats.last_sync_at is None
|
||||
assert stats.last_sync_key is None
|
||||
|
||||
|
||||
class TestReplicationRule:
|
||||
def test_to_dict(self):
|
||||
rule = ReplicationRule(
|
||||
bucket_name="source-bucket",
|
||||
target_connection_id="test-conn",
|
||||
target_bucket="dest-bucket",
|
||||
enabled=True,
|
||||
mode=REPLICATION_MODE_ALL,
|
||||
created_at=1234567890.0,
|
||||
)
|
||||
result = rule.to_dict()
|
||||
assert result["bucket_name"] == "source-bucket"
|
||||
assert result["target_connection_id"] == "test-conn"
|
||||
assert result["target_bucket"] == "dest-bucket"
|
||||
assert result["enabled"] is True
|
||||
assert result["mode"] == REPLICATION_MODE_ALL
|
||||
assert result["created_at"] == 1234567890.0
|
||||
assert "stats" in result
|
||||
|
||||
def test_from_dict(self):
|
||||
data = {
|
||||
"bucket_name": "my-bucket",
|
||||
"target_connection_id": "conn-123",
|
||||
"target_bucket": "remote-bucket",
|
||||
"enabled": False,
|
||||
"mode": REPLICATION_MODE_NEW_ONLY,
|
||||
"created_at": 1111111111.0,
|
||||
"stats": {"objects_synced": 5},
|
||||
}
|
||||
rule = ReplicationRule.from_dict(data)
|
||||
assert rule.bucket_name == "my-bucket"
|
||||
assert rule.target_connection_id == "conn-123"
|
||||
assert rule.target_bucket == "remote-bucket"
|
||||
assert rule.enabled is False
|
||||
assert rule.mode == REPLICATION_MODE_NEW_ONLY
|
||||
assert rule.created_at == 1111111111.0
|
||||
assert rule.stats.objects_synced == 5
|
||||
|
||||
def test_from_dict_defaults_mode(self):
|
||||
data = {
|
||||
"bucket_name": "my-bucket",
|
||||
"target_connection_id": "conn-123",
|
||||
"target_bucket": "remote-bucket",
|
||||
}
|
||||
rule = ReplicationRule.from_dict(data)
|
||||
assert rule.mode == REPLICATION_MODE_NEW_ONLY
|
||||
assert rule.created_at is None
|
||||
|
||||
|
||||
class TestReplicationManager:
|
||||
def test_get_rule_not_exists(self, replication_manager):
|
||||
rule = replication_manager.get_rule("nonexistent-bucket")
|
||||
assert rule is None
|
||||
|
||||
def test_set_and_get_rule(self, replication_manager):
|
||||
rule = ReplicationRule(
|
||||
bucket_name="my-bucket",
|
||||
target_connection_id="test-conn",
|
||||
target_bucket="remote-bucket",
|
||||
enabled=True,
|
||||
mode=REPLICATION_MODE_NEW_ONLY,
|
||||
created_at=time.time(),
|
||||
)
|
||||
replication_manager.set_rule(rule)
|
||||
|
||||
retrieved = replication_manager.get_rule("my-bucket")
|
||||
assert retrieved is not None
|
||||
assert retrieved.bucket_name == "my-bucket"
|
||||
assert retrieved.target_connection_id == "test-conn"
|
||||
assert retrieved.target_bucket == "remote-bucket"
|
||||
|
||||
def test_delete_rule(self, replication_manager):
|
||||
rule = ReplicationRule(
|
||||
bucket_name="to-delete",
|
||||
target_connection_id="test-conn",
|
||||
target_bucket="remote-bucket",
|
||||
)
|
||||
replication_manager.set_rule(rule)
|
||||
assert replication_manager.get_rule("to-delete") is not None
|
||||
|
||||
replication_manager.delete_rule("to-delete")
|
||||
assert replication_manager.get_rule("to-delete") is None
|
||||
|
||||
def test_save_and_reload_rules(self, replication_manager, tmp_path):
|
||||
rule = ReplicationRule(
|
||||
bucket_name="persistent-bucket",
|
||||
target_connection_id="test-conn",
|
||||
target_bucket="remote-bucket",
|
||||
enabled=True,
|
||||
)
|
||||
replication_manager.set_rule(rule)
|
||||
|
||||
rules_path = tmp_path / "replication_rules.json"
|
||||
assert rules_path.exists()
|
||||
data = json.loads(rules_path.read_text())
|
||||
assert "persistent-bucket" in data
|
||||
|
||||
@patch("app.replication._create_s3_client")
|
||||
def test_check_endpoint_health_success(self, mock_create_client, replication_manager, connections):
|
||||
mock_client = MagicMock()
|
||||
mock_client.list_buckets.return_value = {"Buckets": []}
|
||||
mock_create_client.return_value = mock_client
|
||||
|
||||
conn = connections.get("test-conn")
|
||||
result = replication_manager.check_endpoint_health(conn)
|
||||
assert result is True
|
||||
mock_client.list_buckets.assert_called_once()
|
||||
|
||||
@patch("app.replication._create_s3_client")
|
||||
def test_check_endpoint_health_failure(self, mock_create_client, replication_manager, connections):
|
||||
mock_client = MagicMock()
|
||||
mock_client.list_buckets.side_effect = Exception("Connection refused")
|
||||
mock_create_client.return_value = mock_client
|
||||
|
||||
conn = connections.get("test-conn")
|
||||
result = replication_manager.check_endpoint_health(conn)
|
||||
assert result is False
|
||||
|
||||
def test_trigger_replication_no_rule(self, replication_manager):
|
||||
replication_manager.trigger_replication("no-such-bucket", "test.txt", "write")
|
||||
|
||||
def test_trigger_replication_disabled_rule(self, replication_manager):
|
||||
rule = ReplicationRule(
|
||||
bucket_name="disabled-bucket",
|
||||
target_connection_id="test-conn",
|
||||
target_bucket="remote-bucket",
|
||||
enabled=False,
|
||||
)
|
||||
replication_manager.set_rule(rule)
|
||||
replication_manager.trigger_replication("disabled-bucket", "test.txt", "write")
|
||||
|
||||
def test_trigger_replication_missing_connection(self, replication_manager):
|
||||
rule = ReplicationRule(
|
||||
bucket_name="orphan-bucket",
|
||||
target_connection_id="missing-conn",
|
||||
target_bucket="remote-bucket",
|
||||
enabled=True,
|
||||
)
|
||||
replication_manager.set_rule(rule)
|
||||
replication_manager.trigger_replication("orphan-bucket", "test.txt", "write")
|
||||
|
||||
def test_replicate_task_path_traversal_blocked(self, replication_manager, connections):
|
||||
rule = ReplicationRule(
|
||||
bucket_name="secure-bucket",
|
||||
target_connection_id="test-conn",
|
||||
target_bucket="remote-bucket",
|
||||
enabled=True,
|
||||
)
|
||||
replication_manager.set_rule(rule)
|
||||
conn = connections.get("test-conn")
|
||||
|
||||
replication_manager._replicate_task("secure-bucket", "../../../etc/passwd", rule, conn, "write")
|
||||
replication_manager._replicate_task("secure-bucket", "/root/secret", rule, conn, "write")
|
||||
replication_manager._replicate_task("secure-bucket", "..\\..\\windows\\system32", rule, conn, "write")
|
||||
|
||||
|
||||
class TestCreateS3Client:
|
||||
@patch("app.replication.boto3.client")
|
||||
def test_creates_client_with_correct_config(self, mock_boto_client):
|
||||
conn = RemoteConnection(
|
||||
id="test",
|
||||
name="Test",
|
||||
endpoint_url="http://localhost:9000",
|
||||
access_key="access",
|
||||
secret_key="secret",
|
||||
region="eu-west-1",
|
||||
)
|
||||
_create_s3_client(conn)
|
||||
|
||||
mock_boto_client.assert_called_once()
|
||||
call_kwargs = mock_boto_client.call_args[1]
|
||||
assert call_kwargs["endpoint_url"] == "http://localhost:9000"
|
||||
assert call_kwargs["aws_access_key_id"] == "access"
|
||||
assert call_kwargs["aws_secret_access_key"] == "secret"
|
||||
assert call_kwargs["region_name"] == "eu-west-1"
|
||||
|
||||
@patch("app.replication.boto3.client")
|
||||
def test_health_check_mode_minimal_retries(self, mock_boto_client):
|
||||
conn = RemoteConnection(
|
||||
id="test",
|
||||
name="Test",
|
||||
endpoint_url="http://localhost:9000",
|
||||
access_key="access",
|
||||
secret_key="secret",
|
||||
)
|
||||
_create_s3_client(conn, health_check=True)
|
||||
|
||||
call_kwargs = mock_boto_client.call_args[1]
|
||||
config = call_kwargs["config"]
|
||||
assert config.retries["max_attempts"] == 1
|
||||
Reference in New Issue
Block a user