Compare commits
25 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 476dc79e42 | |||
| 87c7f1bc7d | |||
| 23ea164215 | |||
| 7a8acfb933 | |||
| 71327bcbf1 | |||
| c0603c592b | |||
| 912a7dc74f | |||
| bb6590fc5e | |||
| 4de936cea9 | |||
| adb9017580 | |||
| 4adfcc4131 | |||
| ebc315c1cc | |||
| 5ab62a00ff | |||
| 9c3518de63 | |||
| a52657e684 | |||
| 53297abe1e | |||
| a3b9db544c | |||
| f5d2e1c488 | |||
| f04c6a9cdc | |||
| 7a494abb96 | |||
| 956d17a649 | |||
| 5522f9ac04 | |||
| 3742f0228e | |||
| ba694cb717 | |||
| 433d291b4b |
@@ -32,6 +32,6 @@ ENV APP_HOST=0.0.0.0 \
|
||||
FLASK_DEBUG=0
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
|
||||
CMD python -c "import requests; requests.get('http://localhost:5000/healthz', timeout=2)"
|
||||
CMD python -c "import requests; requests.get('http://localhost:5000/myfsio/health', timeout=2)"
|
||||
|
||||
CMD ["./docker-entrypoint.sh"]
|
||||
|
||||
16
README.md
16
README.md
@@ -149,19 +149,13 @@ All endpoints require AWS Signature Version 4 authentication unless using presig
|
||||
| `POST` | `/<bucket>/<key>?uploadId=X` | Complete multipart upload |
|
||||
| `DELETE` | `/<bucket>/<key>?uploadId=X` | Abort multipart upload |
|
||||
|
||||
### Presigned URLs
|
||||
### Bucket Policies (S3-compatible)
|
||||
|
||||
| Method | Endpoint | Description |
|
||||
|--------|----------|-------------|
|
||||
| `POST` | `/presign/<bucket>/<key>` | Generate presigned URL |
|
||||
|
||||
### Bucket Policies
|
||||
|
||||
| Method | Endpoint | Description |
|
||||
|--------|----------|-------------|
|
||||
| `GET` | `/bucket-policy/<bucket>` | Get bucket policy |
|
||||
| `PUT` | `/bucket-policy/<bucket>` | Set bucket policy |
|
||||
| `DELETE` | `/bucket-policy/<bucket>` | Delete bucket policy |
|
||||
| `GET` | `/<bucket>?policy` | Get bucket policy |
|
||||
| `PUT` | `/<bucket>?policy` | Set bucket policy |
|
||||
| `DELETE` | `/<bucket>?policy` | Delete bucket policy |
|
||||
|
||||
### Versioning
|
||||
|
||||
@@ -175,7 +169,7 @@ All endpoints require AWS Signature Version 4 authentication unless using presig
|
||||
|
||||
| Method | Endpoint | Description |
|
||||
|--------|----------|-------------|
|
||||
| `GET` | `/healthz` | Health check endpoint |
|
||||
| `GET` | `/myfsio/health` | Health check endpoint |
|
||||
|
||||
## IAM & Access Control
|
||||
|
||||
|
||||
@@ -16,6 +16,7 @@ from flask_wtf.csrf import CSRFError
|
||||
from werkzeug.middleware.proxy_fix import ProxyFix
|
||||
|
||||
from .access_logging import AccessLoggingService
|
||||
from .operation_metrics import OperationMetricsCollector, classify_endpoint
|
||||
from .compression import GzipMiddleware
|
||||
from .acl import AclService
|
||||
from .bucket_policies import BucketPolicyStore
|
||||
@@ -187,6 +188,40 @@ def create_app(
|
||||
app.extensions["notifications"] = notification_service
|
||||
app.extensions["access_logging"] = access_logging_service
|
||||
|
||||
operation_metrics_collector = None
|
||||
if app.config.get("OPERATION_METRICS_ENABLED", False):
|
||||
operation_metrics_collector = OperationMetricsCollector(
|
||||
storage_root,
|
||||
interval_minutes=app.config.get("OPERATION_METRICS_INTERVAL_MINUTES", 5),
|
||||
retention_hours=app.config.get("OPERATION_METRICS_RETENTION_HOURS", 24),
|
||||
)
|
||||
app.extensions["operation_metrics"] = operation_metrics_collector
|
||||
|
||||
system_metrics_collector = None
|
||||
if app.config.get("METRICS_HISTORY_ENABLED", False):
|
||||
from .system_metrics import SystemMetricsCollector
|
||||
system_metrics_collector = SystemMetricsCollector(
|
||||
storage_root,
|
||||
interval_minutes=app.config.get("METRICS_HISTORY_INTERVAL_MINUTES", 5),
|
||||
retention_hours=app.config.get("METRICS_HISTORY_RETENTION_HOURS", 24),
|
||||
)
|
||||
system_metrics_collector.set_storage(storage)
|
||||
app.extensions["system_metrics"] = system_metrics_collector
|
||||
|
||||
site_sync_worker = None
|
||||
if app.config.get("SITE_SYNC_ENABLED", False):
|
||||
from .site_sync import SiteSyncWorker
|
||||
site_sync_worker = SiteSyncWorker(
|
||||
storage=storage,
|
||||
connections=connections,
|
||||
replication_manager=replication,
|
||||
storage_root=storage_root,
|
||||
interval_seconds=app.config.get("SITE_SYNC_INTERVAL_SECONDS", 60),
|
||||
batch_size=app.config.get("SITE_SYNC_BATCH_SIZE", 100),
|
||||
)
|
||||
site_sync_worker.start()
|
||||
app.extensions["site_sync"] = site_sync_worker
|
||||
|
||||
@app.errorhandler(500)
|
||||
def internal_error(error):
|
||||
return render_template('500.html'), 500
|
||||
@@ -227,6 +262,30 @@ def create_app(
|
||||
except (ValueError, OSError):
|
||||
return "Unknown"
|
||||
|
||||
@app.template_filter("format_datetime")
|
||||
def format_datetime_filter(dt, include_tz: bool = True) -> str:
|
||||
"""Format datetime object as human-readable string in configured timezone."""
|
||||
from datetime import datetime, timezone as dt_timezone
|
||||
from zoneinfo import ZoneInfo
|
||||
if not dt:
|
||||
return ""
|
||||
try:
|
||||
display_tz = app.config.get("DISPLAY_TIMEZONE", "UTC")
|
||||
if display_tz and display_tz != "UTC":
|
||||
try:
|
||||
tz = ZoneInfo(display_tz)
|
||||
if dt.tzinfo is None:
|
||||
dt = dt.replace(tzinfo=dt_timezone.utc)
|
||||
dt = dt.astimezone(tz)
|
||||
except (KeyError, ValueError):
|
||||
pass
|
||||
tz_abbr = dt.strftime("%Z") or "UTC"
|
||||
if include_tz:
|
||||
return f"{dt.strftime('%b %d, %Y %H:%M')} ({tz_abbr})"
|
||||
return dt.strftime("%b %d, %Y %H:%M")
|
||||
except (ValueError, AttributeError):
|
||||
return str(dt)
|
||||
|
||||
if include_api:
|
||||
from .s3_api import s3_api_bp
|
||||
from .kms_api import kms_api_bp
|
||||
@@ -254,9 +313,9 @@ def create_app(
|
||||
return render_template("404.html"), 404
|
||||
return error
|
||||
|
||||
@app.get("/healthz")
|
||||
@app.get("/myfsio/health")
|
||||
def healthcheck() -> Dict[str, str]:
|
||||
return {"status": "ok", "version": app.config.get("APP_VERSION", "unknown")}
|
||||
return {"status": "ok"}
|
||||
|
||||
return app
|
||||
|
||||
@@ -332,6 +391,7 @@ def _configure_logging(app: Flask) -> None:
|
||||
def _log_request_start() -> None:
|
||||
g.request_id = uuid.uuid4().hex
|
||||
g.request_started_at = time.perf_counter()
|
||||
g.request_bytes_in = request.content_length or 0
|
||||
app.logger.info(
|
||||
"Request started",
|
||||
extra={"path": request.path, "method": request.method, "remote_addr": request.remote_addr},
|
||||
@@ -353,4 +413,21 @@ def _configure_logging(app: Flask) -> None:
|
||||
},
|
||||
)
|
||||
response.headers["X-Request-Duration-ms"] = f"{duration_ms:.2f}"
|
||||
|
||||
operation_metrics = app.extensions.get("operation_metrics")
|
||||
if operation_metrics:
|
||||
bytes_in = getattr(g, "request_bytes_in", 0)
|
||||
bytes_out = response.content_length or 0
|
||||
error_code = getattr(g, "s3_error_code", None)
|
||||
endpoint_type = classify_endpoint(request.path)
|
||||
operation_metrics.record_request(
|
||||
method=request.method,
|
||||
endpoint_type=endpoint_type,
|
||||
status_code=response.status_code,
|
||||
latency_ms=duration_ms,
|
||||
bytes_in=bytes_in,
|
||||
bytes_out=bytes_out,
|
||||
error_code=error_code,
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import ipaddress
|
||||
import json
|
||||
import re
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from dataclasses import dataclass, field
|
||||
from fnmatch import fnmatch, translate
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Iterable, List, Optional, Pattern, Sequence, Tuple
|
||||
@@ -11,14 +12,71 @@ from typing import Any, Dict, Iterable, List, Optional, Pattern, Sequence, Tuple
|
||||
|
||||
RESOURCE_PREFIX = "arn:aws:s3:::"
|
||||
|
||||
|
||||
def _match_string_like(value: str, pattern: str) -> bool:
|
||||
regex = translate(pattern)
|
||||
return bool(re.match(regex, value, re.IGNORECASE))
|
||||
|
||||
|
||||
def _ip_in_cidr(ip_str: str, cidr: str) -> bool:
|
||||
try:
|
||||
ip = ipaddress.ip_address(ip_str)
|
||||
network = ipaddress.ip_network(cidr, strict=False)
|
||||
return ip in network
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def _evaluate_condition_operator(
|
||||
operator: str,
|
||||
condition_key: str,
|
||||
condition_values: List[str],
|
||||
context: Dict[str, Any],
|
||||
) -> bool:
|
||||
context_value = context.get(condition_key)
|
||||
op_lower = operator.lower()
|
||||
if_exists = op_lower.endswith("ifexists")
|
||||
if if_exists:
|
||||
op_lower = op_lower[:-8]
|
||||
|
||||
if context_value is None:
|
||||
return if_exists
|
||||
|
||||
context_value_str = str(context_value)
|
||||
context_value_lower = context_value_str.lower()
|
||||
|
||||
if op_lower == "stringequals":
|
||||
return context_value_str in condition_values
|
||||
elif op_lower == "stringnotequals":
|
||||
return context_value_str not in condition_values
|
||||
elif op_lower == "stringequalsignorecase":
|
||||
return context_value_lower in [v.lower() for v in condition_values]
|
||||
elif op_lower == "stringnotequalsignorecase":
|
||||
return context_value_lower not in [v.lower() for v in condition_values]
|
||||
elif op_lower == "stringlike":
|
||||
return any(_match_string_like(context_value_str, p) for p in condition_values)
|
||||
elif op_lower == "stringnotlike":
|
||||
return not any(_match_string_like(context_value_str, p) for p in condition_values)
|
||||
elif op_lower == "ipaddress":
|
||||
return any(_ip_in_cidr(context_value_str, cidr) for cidr in condition_values)
|
||||
elif op_lower == "notipaddress":
|
||||
return not any(_ip_in_cidr(context_value_str, cidr) for cidr in condition_values)
|
||||
elif op_lower == "bool":
|
||||
bool_val = context_value_lower in ("true", "1", "yes")
|
||||
return str(bool_val).lower() in [v.lower() for v in condition_values]
|
||||
elif op_lower == "null":
|
||||
is_null = context_value is None or context_value == ""
|
||||
expected_null = condition_values[0].lower() in ("true", "1", "yes") if condition_values else True
|
||||
return is_null == expected_null
|
||||
|
||||
return True
|
||||
|
||||
ACTION_ALIASES = {
|
||||
# List actions
|
||||
"s3:listbucket": "list",
|
||||
"s3:listallmybuckets": "list",
|
||||
"s3:listbucketversions": "list",
|
||||
"s3:listmultipartuploads": "list",
|
||||
"s3:listparts": "list",
|
||||
# Read actions
|
||||
"s3:getobject": "read",
|
||||
"s3:getobjectversion": "read",
|
||||
"s3:getobjecttagging": "read",
|
||||
@@ -27,7 +85,6 @@ ACTION_ALIASES = {
|
||||
"s3:getbucketversioning": "read",
|
||||
"s3:headobject": "read",
|
||||
"s3:headbucket": "read",
|
||||
# Write actions
|
||||
"s3:putobject": "write",
|
||||
"s3:createbucket": "write",
|
||||
"s3:putobjecttagging": "write",
|
||||
@@ -37,26 +94,30 @@ ACTION_ALIASES = {
|
||||
"s3:completemultipartupload": "write",
|
||||
"s3:abortmultipartupload": "write",
|
||||
"s3:copyobject": "write",
|
||||
# Delete actions
|
||||
"s3:deleteobject": "delete",
|
||||
"s3:deleteobjectversion": "delete",
|
||||
"s3:deletebucket": "delete",
|
||||
"s3:deleteobjecttagging": "delete",
|
||||
# Share actions (ACL)
|
||||
"s3:putobjectacl": "share",
|
||||
"s3:putbucketacl": "share",
|
||||
"s3:getbucketacl": "share",
|
||||
# Policy actions
|
||||
"s3:putbucketpolicy": "policy",
|
||||
"s3:getbucketpolicy": "policy",
|
||||
"s3:deletebucketpolicy": "policy",
|
||||
# Replication actions
|
||||
"s3:getreplicationconfiguration": "replication",
|
||||
"s3:putreplicationconfiguration": "replication",
|
||||
"s3:deletereplicationconfiguration": "replication",
|
||||
"s3:replicateobject": "replication",
|
||||
"s3:replicatetags": "replication",
|
||||
"s3:replicatedelete": "replication",
|
||||
"s3:getlifecycleconfiguration": "lifecycle",
|
||||
"s3:putlifecycleconfiguration": "lifecycle",
|
||||
"s3:deletelifecycleconfiguration": "lifecycle",
|
||||
"s3:getbucketlifecycle": "lifecycle",
|
||||
"s3:putbucketlifecycle": "lifecycle",
|
||||
"s3:getbucketcors": "cors",
|
||||
"s3:putbucketcors": "cors",
|
||||
"s3:deletebucketcors": "cors",
|
||||
}
|
||||
|
||||
|
||||
@@ -135,18 +196,16 @@ class BucketPolicyStatement:
|
||||
principals: List[str] | str
|
||||
actions: List[str]
|
||||
resources: List[Tuple[str | None, str | None]]
|
||||
# Performance: Pre-compiled regex patterns for resource matching
|
||||
conditions: Dict[str, Dict[str, List[str]]] = field(default_factory=dict)
|
||||
_compiled_patterns: List[Tuple[str | None, Optional[Pattern[str]]]] | None = None
|
||||
|
||||
def _get_compiled_patterns(self) -> List[Tuple[str | None, Optional[Pattern[str]]]]:
|
||||
"""Lazily compile fnmatch patterns to regex for faster matching."""
|
||||
if self._compiled_patterns is None:
|
||||
self._compiled_patterns = []
|
||||
for resource_bucket, key_pattern in self.resources:
|
||||
if key_pattern is None:
|
||||
self._compiled_patterns.append((resource_bucket, None))
|
||||
else:
|
||||
# Convert fnmatch pattern to regex
|
||||
regex_pattern = translate(key_pattern)
|
||||
self._compiled_patterns.append((resource_bucket, re.compile(regex_pattern)))
|
||||
return self._compiled_patterns
|
||||
@@ -173,11 +232,21 @@ class BucketPolicyStatement:
|
||||
if not key:
|
||||
return True
|
||||
continue
|
||||
# Performance: Use pre-compiled regex instead of fnmatch
|
||||
if compiled_pattern.match(key):
|
||||
return True
|
||||
return False
|
||||
|
||||
def matches_condition(self, context: Optional[Dict[str, Any]]) -> bool:
|
||||
if not self.conditions:
|
||||
return True
|
||||
if context is None:
|
||||
context = {}
|
||||
for operator, key_values in self.conditions.items():
|
||||
for condition_key, condition_values in key_values.items():
|
||||
if not _evaluate_condition_operator(operator, condition_key, condition_values, context):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class BucketPolicyStore:
|
||||
"""Loads bucket policies from disk and evaluates statements."""
|
||||
@@ -219,6 +288,7 @@ class BucketPolicyStore:
|
||||
bucket: Optional[str],
|
||||
object_key: Optional[str],
|
||||
action: str,
|
||||
context: Optional[Dict[str, Any]] = None,
|
||||
) -> str | None:
|
||||
bucket = (bucket or "").lower()
|
||||
statements = self._policies.get(bucket) or []
|
||||
@@ -230,6 +300,8 @@ class BucketPolicyStore:
|
||||
continue
|
||||
if not statement.matches_resource(bucket, object_key):
|
||||
continue
|
||||
if not statement.matches_condition(context):
|
||||
continue
|
||||
if statement.effect == "deny":
|
||||
return "deny"
|
||||
decision = "allow"
|
||||
@@ -294,6 +366,7 @@ class BucketPolicyStore:
|
||||
if not resources:
|
||||
continue
|
||||
effect = statement.get("Effect", "Allow").lower()
|
||||
conditions = self._normalize_conditions(statement.get("Condition", {}))
|
||||
statements.append(
|
||||
BucketPolicyStatement(
|
||||
sid=statement.get("Sid"),
|
||||
@@ -301,6 +374,24 @@ class BucketPolicyStore:
|
||||
principals=principals,
|
||||
actions=actions or ["*"],
|
||||
resources=resources,
|
||||
conditions=conditions,
|
||||
)
|
||||
)
|
||||
return statements
|
||||
return statements
|
||||
|
||||
def _normalize_conditions(self, condition_block: Dict[str, Any]) -> Dict[str, Dict[str, List[str]]]:
|
||||
if not condition_block or not isinstance(condition_block, dict):
|
||||
return {}
|
||||
normalized: Dict[str, Dict[str, List[str]]] = {}
|
||||
for operator, key_values in condition_block.items():
|
||||
if not isinstance(key_values, dict):
|
||||
continue
|
||||
normalized[operator] = {}
|
||||
for cond_key, cond_values in key_values.items():
|
||||
if isinstance(cond_values, str):
|
||||
normalized[operator][cond_key] = [cond_values]
|
||||
elif isinstance(cond_values, list):
|
||||
normalized[operator][cond_key] = [str(v) for v in cond_values]
|
||||
else:
|
||||
normalized[operator][cond_key] = [str(cond_values)]
|
||||
return normalized
|
||||
101
app/config.py
101
app/config.py
@@ -1,6 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import re
|
||||
import secrets
|
||||
import shutil
|
||||
import sys
|
||||
@@ -9,6 +10,13 @@ from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
|
||||
def _validate_rate_limit(value: str) -> str:
|
||||
pattern = r"^\d+\s+per\s+(second|minute|hour|day)$"
|
||||
if not re.match(pattern, value):
|
||||
raise ValueError(f"Invalid rate limit format: {value}. Expected format: '200 per minute'")
|
||||
return value
|
||||
|
||||
if getattr(sys, "frozen", False):
|
||||
# Running in a PyInstaller bundle
|
||||
PROJECT_ROOT = Path(sys._MEIPASS)
|
||||
@@ -76,6 +84,19 @@ class AppConfig:
|
||||
display_timezone: str
|
||||
lifecycle_enabled: bool
|
||||
lifecycle_interval_seconds: int
|
||||
metrics_history_enabled: bool
|
||||
metrics_history_retention_hours: int
|
||||
metrics_history_interval_minutes: int
|
||||
operation_metrics_enabled: bool
|
||||
operation_metrics_interval_minutes: int
|
||||
operation_metrics_retention_hours: int
|
||||
server_threads: int
|
||||
server_connection_limit: int
|
||||
server_backlog: int
|
||||
server_channel_timeout: int
|
||||
site_sync_enabled: bool
|
||||
site_sync_interval_seconds: int
|
||||
site_sync_batch_size: int
|
||||
|
||||
@classmethod
|
||||
def from_env(cls, overrides: Optional[Dict[str, Any]] = None) -> "AppConfig":
|
||||
@@ -148,7 +169,7 @@ class AppConfig:
|
||||
log_path = log_dir / str(_get("LOG_FILE", "app.log"))
|
||||
log_max_bytes = int(_get("LOG_MAX_BYTES", 5 * 1024 * 1024))
|
||||
log_backup_count = int(_get("LOG_BACKUP_COUNT", 3))
|
||||
ratelimit_default = str(_get("RATE_LIMIT_DEFAULT", "200 per minute"))
|
||||
ratelimit_default = _validate_rate_limit(str(_get("RATE_LIMIT_DEFAULT", "200 per minute")))
|
||||
ratelimit_storage_uri = str(_get("RATE_LIMIT_STORAGE_URI", "memory://"))
|
||||
|
||||
def _csv(value: str, default: list[str]) -> list[str]:
|
||||
@@ -172,6 +193,20 @@ class AppConfig:
|
||||
kms_keys_path = Path(_get("KMS_KEYS_PATH", encryption_keys_dir / "kms_keys.json")).resolve()
|
||||
default_encryption_algorithm = str(_get("DEFAULT_ENCRYPTION_ALGORITHM", "AES256"))
|
||||
display_timezone = str(_get("DISPLAY_TIMEZONE", "UTC"))
|
||||
metrics_history_enabled = str(_get("METRICS_HISTORY_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
metrics_history_retention_hours = int(_get("METRICS_HISTORY_RETENTION_HOURS", 24))
|
||||
metrics_history_interval_minutes = int(_get("METRICS_HISTORY_INTERVAL_MINUTES", 5))
|
||||
operation_metrics_enabled = str(_get("OPERATION_METRICS_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
operation_metrics_interval_minutes = int(_get("OPERATION_METRICS_INTERVAL_MINUTES", 5))
|
||||
operation_metrics_retention_hours = int(_get("OPERATION_METRICS_RETENTION_HOURS", 24))
|
||||
|
||||
server_threads = int(_get("SERVER_THREADS", 4))
|
||||
server_connection_limit = int(_get("SERVER_CONNECTION_LIMIT", 100))
|
||||
server_backlog = int(_get("SERVER_BACKLOG", 1024))
|
||||
server_channel_timeout = int(_get("SERVER_CHANNEL_TIMEOUT", 120))
|
||||
site_sync_enabled = str(_get("SITE_SYNC_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
|
||||
site_sync_interval_seconds = int(_get("SITE_SYNC_INTERVAL_SECONDS", 60))
|
||||
site_sync_batch_size = int(_get("SITE_SYNC_BATCH_SIZE", 100))
|
||||
|
||||
return cls(storage_root=storage_root,
|
||||
max_upload_size=max_upload_size,
|
||||
@@ -210,7 +245,20 @@ class AppConfig:
|
||||
default_encryption_algorithm=default_encryption_algorithm,
|
||||
display_timezone=display_timezone,
|
||||
lifecycle_enabled=lifecycle_enabled,
|
||||
lifecycle_interval_seconds=lifecycle_interval_seconds)
|
||||
lifecycle_interval_seconds=lifecycle_interval_seconds,
|
||||
metrics_history_enabled=metrics_history_enabled,
|
||||
metrics_history_retention_hours=metrics_history_retention_hours,
|
||||
metrics_history_interval_minutes=metrics_history_interval_minutes,
|
||||
operation_metrics_enabled=operation_metrics_enabled,
|
||||
operation_metrics_interval_minutes=operation_metrics_interval_minutes,
|
||||
operation_metrics_retention_hours=operation_metrics_retention_hours,
|
||||
server_threads=server_threads,
|
||||
server_connection_limit=server_connection_limit,
|
||||
server_backlog=server_backlog,
|
||||
server_channel_timeout=server_channel_timeout,
|
||||
site_sync_enabled=site_sync_enabled,
|
||||
site_sync_interval_seconds=site_sync_interval_seconds,
|
||||
site_sync_batch_size=site_sync_batch_size)
|
||||
|
||||
def validate_and_report(self) -> list[str]:
|
||||
"""Validate configuration and return a list of warnings/issues.
|
||||
@@ -270,7 +318,35 @@ class AppConfig:
|
||||
|
||||
if "*" in self.cors_origins:
|
||||
issues.append("INFO: CORS_ORIGINS is set to '*'. Consider restricting to specific domains in production.")
|
||||
|
||||
|
||||
if not (1 <= self.server_threads <= 64):
|
||||
issues.append(f"CRITICAL: SERVER_THREADS={self.server_threads} is outside valid range (1-64). Server cannot start.")
|
||||
if not (10 <= self.server_connection_limit <= 1000):
|
||||
issues.append(f"CRITICAL: SERVER_CONNECTION_LIMIT={self.server_connection_limit} is outside valid range (10-1000). Server cannot start.")
|
||||
if not (64 <= self.server_backlog <= 4096):
|
||||
issues.append(f"CRITICAL: SERVER_BACKLOG={self.server_backlog} is outside valid range (64-4096). Server cannot start.")
|
||||
if not (10 <= self.server_channel_timeout <= 300):
|
||||
issues.append(f"CRITICAL: SERVER_CHANNEL_TIMEOUT={self.server_channel_timeout} is outside valid range (10-300). Server cannot start.")
|
||||
|
||||
if sys.platform != "win32":
|
||||
try:
|
||||
import resource
|
||||
soft_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
|
||||
threshold = int(soft_limit * 0.8)
|
||||
if self.server_connection_limit > threshold:
|
||||
issues.append(f"WARNING: SERVER_CONNECTION_LIMIT={self.server_connection_limit} exceeds 80% of system file descriptor limit (soft={soft_limit}). Consider running 'ulimit -n {self.server_connection_limit + 100}'.")
|
||||
except (ImportError, OSError):
|
||||
pass
|
||||
|
||||
try:
|
||||
import psutil
|
||||
available_mb = psutil.virtual_memory().available / (1024 * 1024)
|
||||
estimated_mb = self.server_threads * 50
|
||||
if estimated_mb > available_mb * 0.5:
|
||||
issues.append(f"WARNING: SERVER_THREADS={self.server_threads} may require ~{estimated_mb}MB memory, exceeding 50% of available RAM ({int(available_mb)}MB).")
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
return issues
|
||||
|
||||
def print_startup_summary(self) -> None:
|
||||
@@ -288,6 +364,10 @@ class AppConfig:
|
||||
print(f" ENCRYPTION: Enabled (Master key: {self.encryption_master_key_path})")
|
||||
if self.kms_enabled:
|
||||
print(f" KMS: Enabled (Keys: {self.kms_keys_path})")
|
||||
print(f" SERVER_THREADS: {self.server_threads}")
|
||||
print(f" CONNECTION_LIMIT: {self.server_connection_limit}")
|
||||
print(f" BACKLOG: {self.server_backlog}")
|
||||
print(f" CHANNEL_TIMEOUT: {self.server_channel_timeout}s")
|
||||
print("=" * 60)
|
||||
|
||||
issues = self.validate_and_report()
|
||||
@@ -337,4 +417,19 @@ class AppConfig:
|
||||
"KMS_KEYS_PATH": str(self.kms_keys_path),
|
||||
"DEFAULT_ENCRYPTION_ALGORITHM": self.default_encryption_algorithm,
|
||||
"DISPLAY_TIMEZONE": self.display_timezone,
|
||||
"LIFECYCLE_ENABLED": self.lifecycle_enabled,
|
||||
"LIFECYCLE_INTERVAL_SECONDS": self.lifecycle_interval_seconds,
|
||||
"METRICS_HISTORY_ENABLED": self.metrics_history_enabled,
|
||||
"METRICS_HISTORY_RETENTION_HOURS": self.metrics_history_retention_hours,
|
||||
"METRICS_HISTORY_INTERVAL_MINUTES": self.metrics_history_interval_minutes,
|
||||
"OPERATION_METRICS_ENABLED": self.operation_metrics_enabled,
|
||||
"OPERATION_METRICS_INTERVAL_MINUTES": self.operation_metrics_interval_minutes,
|
||||
"OPERATION_METRICS_RETENTION_HOURS": self.operation_metrics_retention_hours,
|
||||
"SERVER_THREADS": self.server_threads,
|
||||
"SERVER_CONNECTION_LIMIT": self.server_connection_limit,
|
||||
"SERVER_BACKLOG": self.server_backlog,
|
||||
"SERVER_CHANNEL_TIMEOUT": self.server_channel_timeout,
|
||||
"SITE_SYNC_ENABLED": self.site_sync_enabled,
|
||||
"SITE_SYNC_INTERVAL_SECONDS": self.site_sync_interval_seconds,
|
||||
"SITE_SYNC_BATCH_SIZE": self.site_sync_batch_size,
|
||||
}
|
||||
|
||||
15
app/iam.py
15
app/iam.py
@@ -1,5 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import hmac
|
||||
import json
|
||||
import math
|
||||
import secrets
|
||||
@@ -15,7 +16,7 @@ class IamError(RuntimeError):
|
||||
"""Raised when authentication or authorization fails."""
|
||||
|
||||
|
||||
S3_ACTIONS = {"list", "read", "write", "delete", "share", "policy", "replication"}
|
||||
S3_ACTIONS = {"list", "read", "write", "delete", "share", "policy", "replication", "lifecycle", "cors"}
|
||||
IAM_ACTIONS = {
|
||||
"iam:list_users",
|
||||
"iam:create_user",
|
||||
@@ -71,6 +72,16 @@ ACTION_ALIASES = {
|
||||
"s3:replicateobject": "replication",
|
||||
"s3:replicatetags": "replication",
|
||||
"s3:replicatedelete": "replication",
|
||||
"lifecycle": "lifecycle",
|
||||
"s3:getlifecycleconfiguration": "lifecycle",
|
||||
"s3:putlifecycleconfiguration": "lifecycle",
|
||||
"s3:deletelifecycleconfiguration": "lifecycle",
|
||||
"s3:getbucketlifecycle": "lifecycle",
|
||||
"s3:putbucketlifecycle": "lifecycle",
|
||||
"cors": "cors",
|
||||
"s3:getbucketcors": "cors",
|
||||
"s3:putbucketcors": "cors",
|
||||
"s3:deletebucketcors": "cors",
|
||||
"iam:listusers": "iam:list_users",
|
||||
"iam:createuser": "iam:create_user",
|
||||
"iam:deleteuser": "iam:delete_user",
|
||||
@@ -139,7 +150,7 @@ class IamService:
|
||||
f"Access temporarily locked. Try again in {seconds} seconds."
|
||||
)
|
||||
record = self._users.get(access_key)
|
||||
if not record or record["secret_key"] != secret_key:
|
||||
if not record or not hmac.compare_digest(record["secret_key"], secret_key):
|
||||
self._record_failed_attempt(access_key)
|
||||
raise IamError("Invalid credentials")
|
||||
self._clear_failed_attempts(access_key)
|
||||
|
||||
271
app/operation_metrics.py
Normal file
271
app/operation_metrics.py
Normal file
@@ -0,0 +1,271 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class OperationStats:
|
||||
count: int = 0
|
||||
success_count: int = 0
|
||||
error_count: int = 0
|
||||
latency_sum_ms: float = 0.0
|
||||
latency_min_ms: float = float("inf")
|
||||
latency_max_ms: float = 0.0
|
||||
bytes_in: int = 0
|
||||
bytes_out: int = 0
|
||||
|
||||
def record(self, latency_ms: float, success: bool, bytes_in: int = 0, bytes_out: int = 0) -> None:
|
||||
self.count += 1
|
||||
if success:
|
||||
self.success_count += 1
|
||||
else:
|
||||
self.error_count += 1
|
||||
self.latency_sum_ms += latency_ms
|
||||
if latency_ms < self.latency_min_ms:
|
||||
self.latency_min_ms = latency_ms
|
||||
if latency_ms > self.latency_max_ms:
|
||||
self.latency_max_ms = latency_ms
|
||||
self.bytes_in += bytes_in
|
||||
self.bytes_out += bytes_out
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
avg_latency = self.latency_sum_ms / self.count if self.count > 0 else 0.0
|
||||
min_latency = self.latency_min_ms if self.latency_min_ms != float("inf") else 0.0
|
||||
return {
|
||||
"count": self.count,
|
||||
"success_count": self.success_count,
|
||||
"error_count": self.error_count,
|
||||
"latency_avg_ms": round(avg_latency, 2),
|
||||
"latency_min_ms": round(min_latency, 2),
|
||||
"latency_max_ms": round(self.latency_max_ms, 2),
|
||||
"bytes_in": self.bytes_in,
|
||||
"bytes_out": self.bytes_out,
|
||||
}
|
||||
|
||||
def merge(self, other: "OperationStats") -> None:
|
||||
self.count += other.count
|
||||
self.success_count += other.success_count
|
||||
self.error_count += other.error_count
|
||||
self.latency_sum_ms += other.latency_sum_ms
|
||||
if other.latency_min_ms < self.latency_min_ms:
|
||||
self.latency_min_ms = other.latency_min_ms
|
||||
if other.latency_max_ms > self.latency_max_ms:
|
||||
self.latency_max_ms = other.latency_max_ms
|
||||
self.bytes_in += other.bytes_in
|
||||
self.bytes_out += other.bytes_out
|
||||
|
||||
|
||||
@dataclass
|
||||
class MetricsSnapshot:
|
||||
timestamp: datetime
|
||||
window_seconds: int
|
||||
by_method: Dict[str, Dict[str, Any]]
|
||||
by_endpoint: Dict[str, Dict[str, Any]]
|
||||
by_status_class: Dict[str, int]
|
||||
error_codes: Dict[str, int]
|
||||
totals: Dict[str, Any]
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"timestamp": self.timestamp.isoformat(),
|
||||
"window_seconds": self.window_seconds,
|
||||
"by_method": self.by_method,
|
||||
"by_endpoint": self.by_endpoint,
|
||||
"by_status_class": self.by_status_class,
|
||||
"error_codes": self.error_codes,
|
||||
"totals": self.totals,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "MetricsSnapshot":
|
||||
return cls(
|
||||
timestamp=datetime.fromisoformat(data["timestamp"]),
|
||||
window_seconds=data.get("window_seconds", 300),
|
||||
by_method=data.get("by_method", {}),
|
||||
by_endpoint=data.get("by_endpoint", {}),
|
||||
by_status_class=data.get("by_status_class", {}),
|
||||
error_codes=data.get("error_codes", {}),
|
||||
totals=data.get("totals", {}),
|
||||
)
|
||||
|
||||
|
||||
class OperationMetricsCollector:
|
||||
def __init__(
|
||||
self,
|
||||
storage_root: Path,
|
||||
interval_minutes: int = 5,
|
||||
retention_hours: int = 24,
|
||||
):
|
||||
self.storage_root = storage_root
|
||||
self.interval_seconds = interval_minutes * 60
|
||||
self.retention_hours = retention_hours
|
||||
self._lock = threading.Lock()
|
||||
self._by_method: Dict[str, OperationStats] = {}
|
||||
self._by_endpoint: Dict[str, OperationStats] = {}
|
||||
self._by_status_class: Dict[str, int] = {}
|
||||
self._error_codes: Dict[str, int] = {}
|
||||
self._totals = OperationStats()
|
||||
self._window_start = time.time()
|
||||
self._shutdown = threading.Event()
|
||||
self._snapshots: List[MetricsSnapshot] = []
|
||||
|
||||
self._load_history()
|
||||
|
||||
self._snapshot_thread = threading.Thread(
|
||||
target=self._snapshot_loop, name="operation-metrics-snapshot", daemon=True
|
||||
)
|
||||
self._snapshot_thread.start()
|
||||
|
||||
def _config_path(self) -> Path:
|
||||
return self.storage_root / ".myfsio.sys" / "config" / "operation_metrics.json"
|
||||
|
||||
def _load_history(self) -> None:
|
||||
config_path = self._config_path()
|
||||
if not config_path.exists():
|
||||
return
|
||||
try:
|
||||
data = json.loads(config_path.read_text(encoding="utf-8"))
|
||||
snapshots_data = data.get("snapshots", [])
|
||||
self._snapshots = [MetricsSnapshot.from_dict(s) for s in snapshots_data]
|
||||
self._prune_old_snapshots()
|
||||
except (json.JSONDecodeError, OSError, KeyError) as e:
|
||||
logger.warning(f"Failed to load operation metrics history: {e}")
|
||||
|
||||
def _save_history(self) -> None:
|
||||
config_path = self._config_path()
|
||||
config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
try:
|
||||
data = {"snapshots": [s.to_dict() for s in self._snapshots]}
|
||||
config_path.write_text(json.dumps(data, indent=2), encoding="utf-8")
|
||||
except OSError as e:
|
||||
logger.warning(f"Failed to save operation metrics history: {e}")
|
||||
|
||||
def _prune_old_snapshots(self) -> None:
|
||||
if not self._snapshots:
|
||||
return
|
||||
cutoff = datetime.now(timezone.utc).timestamp() - (self.retention_hours * 3600)
|
||||
self._snapshots = [
|
||||
s for s in self._snapshots if s.timestamp.timestamp() > cutoff
|
||||
]
|
||||
|
||||
def _snapshot_loop(self) -> None:
|
||||
while not self._shutdown.is_set():
|
||||
self._shutdown.wait(timeout=self.interval_seconds)
|
||||
if not self._shutdown.is_set():
|
||||
self._take_snapshot()
|
||||
|
||||
def _take_snapshot(self) -> None:
|
||||
with self._lock:
|
||||
now = datetime.now(timezone.utc)
|
||||
window_seconds = int(time.time() - self._window_start)
|
||||
|
||||
snapshot = MetricsSnapshot(
|
||||
timestamp=now,
|
||||
window_seconds=window_seconds,
|
||||
by_method={k: v.to_dict() for k, v in self._by_method.items()},
|
||||
by_endpoint={k: v.to_dict() for k, v in self._by_endpoint.items()},
|
||||
by_status_class=dict(self._by_status_class),
|
||||
error_codes=dict(self._error_codes),
|
||||
totals=self._totals.to_dict(),
|
||||
)
|
||||
|
||||
self._snapshots.append(snapshot)
|
||||
self._prune_old_snapshots()
|
||||
self._save_history()
|
||||
|
||||
self._by_method.clear()
|
||||
self._by_endpoint.clear()
|
||||
self._by_status_class.clear()
|
||||
self._error_codes.clear()
|
||||
self._totals = OperationStats()
|
||||
self._window_start = time.time()
|
||||
|
||||
def record_request(
|
||||
self,
|
||||
method: str,
|
||||
endpoint_type: str,
|
||||
status_code: int,
|
||||
latency_ms: float,
|
||||
bytes_in: int = 0,
|
||||
bytes_out: int = 0,
|
||||
error_code: Optional[str] = None,
|
||||
) -> None:
|
||||
success = 200 <= status_code < 400
|
||||
status_class = f"{status_code // 100}xx"
|
||||
|
||||
with self._lock:
|
||||
if method not in self._by_method:
|
||||
self._by_method[method] = OperationStats()
|
||||
self._by_method[method].record(latency_ms, success, bytes_in, bytes_out)
|
||||
|
||||
if endpoint_type not in self._by_endpoint:
|
||||
self._by_endpoint[endpoint_type] = OperationStats()
|
||||
self._by_endpoint[endpoint_type].record(latency_ms, success, bytes_in, bytes_out)
|
||||
|
||||
self._by_status_class[status_class] = self._by_status_class.get(status_class, 0) + 1
|
||||
|
||||
if error_code:
|
||||
self._error_codes[error_code] = self._error_codes.get(error_code, 0) + 1
|
||||
|
||||
self._totals.record(latency_ms, success, bytes_in, bytes_out)
|
||||
|
||||
def get_current_stats(self) -> Dict[str, Any]:
|
||||
with self._lock:
|
||||
window_seconds = int(time.time() - self._window_start)
|
||||
return {
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"window_seconds": window_seconds,
|
||||
"by_method": {k: v.to_dict() for k, v in self._by_method.items()},
|
||||
"by_endpoint": {k: v.to_dict() for k, v in self._by_endpoint.items()},
|
||||
"by_status_class": dict(self._by_status_class),
|
||||
"error_codes": dict(self._error_codes),
|
||||
"totals": self._totals.to_dict(),
|
||||
}
|
||||
|
||||
def get_history(self, hours: Optional[int] = None) -> List[Dict[str, Any]]:
|
||||
with self._lock:
|
||||
snapshots = list(self._snapshots)
|
||||
|
||||
if hours:
|
||||
cutoff = datetime.now(timezone.utc).timestamp() - (hours * 3600)
|
||||
snapshots = [s for s in snapshots if s.timestamp.timestamp() > cutoff]
|
||||
|
||||
return [s.to_dict() for s in snapshots]
|
||||
|
||||
def shutdown(self) -> None:
|
||||
self._shutdown.set()
|
||||
self._take_snapshot()
|
||||
self._snapshot_thread.join(timeout=5.0)
|
||||
|
||||
|
||||
def classify_endpoint(path: str) -> str:
|
||||
if not path or path == "/":
|
||||
return "service"
|
||||
|
||||
path = path.rstrip("/")
|
||||
|
||||
if path.startswith("/ui"):
|
||||
return "ui"
|
||||
|
||||
if path.startswith("/kms"):
|
||||
return "kms"
|
||||
|
||||
if path.startswith("/myfsio"):
|
||||
return "service"
|
||||
|
||||
parts = path.lstrip("/").split("/")
|
||||
if len(parts) == 0:
|
||||
return "service"
|
||||
elif len(parts) == 1:
|
||||
return "bucket"
|
||||
else:
|
||||
return "object"
|
||||
@@ -27,6 +27,7 @@ STREAMING_THRESHOLD_BYTES = 10 * 1024 * 1024
|
||||
|
||||
REPLICATION_MODE_NEW_ONLY = "new_only"
|
||||
REPLICATION_MODE_ALL = "all"
|
||||
REPLICATION_MODE_BIDIRECTIONAL = "bidirectional"
|
||||
|
||||
|
||||
def _create_s3_client(connection: RemoteConnection, *, health_check: bool = False) -> Any:
|
||||
@@ -127,10 +128,12 @@ class ReplicationRule:
|
||||
target_connection_id: str
|
||||
target_bucket: str
|
||||
enabled: bool = True
|
||||
mode: str = REPLICATION_MODE_NEW_ONLY
|
||||
mode: str = REPLICATION_MODE_NEW_ONLY
|
||||
created_at: Optional[float] = None
|
||||
stats: ReplicationStats = field(default_factory=ReplicationStats)
|
||||
|
||||
sync_deletions: bool = True
|
||||
last_pull_at: Optional[float] = None
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"bucket_name": self.bucket_name,
|
||||
@@ -140,8 +143,10 @@ class ReplicationRule:
|
||||
"mode": self.mode,
|
||||
"created_at": self.created_at,
|
||||
"stats": self.stats.to_dict(),
|
||||
"sync_deletions": self.sync_deletions,
|
||||
"last_pull_at": self.last_pull_at,
|
||||
}
|
||||
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> "ReplicationRule":
|
||||
stats_data = data.pop("stats", {})
|
||||
@@ -149,6 +154,10 @@ class ReplicationRule:
|
||||
data["mode"] = REPLICATION_MODE_NEW_ONLY
|
||||
if "created_at" not in data:
|
||||
data["created_at"] = None
|
||||
if "sync_deletions" not in data:
|
||||
data["sync_deletions"] = True
|
||||
if "last_pull_at" not in data:
|
||||
data["last_pull_at"] = None
|
||||
rule = cls(**data)
|
||||
rule.stats = ReplicationStats.from_dict(stats_data) if stats_data else ReplicationStats()
|
||||
return rule
|
||||
|
||||
206
app/s3_api.py
206
app/s3_api.py
@@ -11,7 +11,8 @@ import uuid
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from typing import Any, Dict, Optional
|
||||
from urllib.parse import quote, urlencode, urlparse, unquote
|
||||
from xml.etree.ElementTree import Element, SubElement, tostring, fromstring, ParseError
|
||||
from xml.etree.ElementTree import Element, SubElement, tostring, ParseError
|
||||
from defusedxml.ElementTree import fromstring
|
||||
|
||||
from flask import Blueprint, Response, current_app, jsonify, request, g
|
||||
from werkzeug.http import http_date
|
||||
@@ -29,6 +30,8 @@ from .storage import ObjectStorage, StorageError, QuotaExceededError, BucketNotF
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
S3_NS = "http://s3.amazonaws.com/doc/2006-03-01/"
|
||||
|
||||
s3_api_bp = Blueprint("s3_api", __name__)
|
||||
|
||||
def _storage() -> ObjectStorage:
|
||||
@@ -53,6 +56,20 @@ def _bucket_policies() -> BucketPolicyStore:
|
||||
return store
|
||||
|
||||
|
||||
def _build_policy_context() -> Dict[str, Any]:
|
||||
ctx: Dict[str, Any] = {}
|
||||
if request.headers.get("Referer"):
|
||||
ctx["aws:Referer"] = request.headers.get("Referer")
|
||||
if request.access_route:
|
||||
ctx["aws:SourceIp"] = request.access_route[0]
|
||||
elif request.remote_addr:
|
||||
ctx["aws:SourceIp"] = request.remote_addr
|
||||
ctx["aws:SecureTransport"] = str(request.is_secure).lower()
|
||||
if request.headers.get("User-Agent"):
|
||||
ctx["aws:UserAgent"] = request.headers.get("User-Agent")
|
||||
return ctx
|
||||
|
||||
|
||||
def _object_lock() -> ObjectLockService:
|
||||
return current_app.extensions["object_lock"]
|
||||
|
||||
@@ -71,6 +88,7 @@ def _xml_response(element: Element, status: int = 200) -> Response:
|
||||
|
||||
|
||||
def _error_response(code: str, message: str, status: int) -> Response:
|
||||
g.s3_error_code = code
|
||||
error = Element("Error")
|
||||
SubElement(error, "Code").text = code
|
||||
SubElement(error, "Message").text = message
|
||||
@@ -79,6 +97,13 @@ def _error_response(code: str, message: str, status: int) -> Response:
|
||||
return _xml_response(error, status)
|
||||
|
||||
|
||||
def _require_xml_content_type() -> Response | None:
|
||||
ct = request.headers.get("Content-Type", "")
|
||||
if ct and not ct.startswith(("application/xml", "text/xml")):
|
||||
return _error_response("InvalidRequest", "Content-Type must be application/xml or text/xml", 400)
|
||||
return None
|
||||
|
||||
|
||||
def _parse_range_header(range_header: str, file_size: int) -> list[tuple[int, int]] | None:
|
||||
if not range_header.startswith("bytes="):
|
||||
return None
|
||||
@@ -218,16 +243,7 @@ def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
|
||||
|
||||
if not hmac.compare_digest(calculated_signature, signature):
|
||||
if current_app.config.get("DEBUG_SIGV4"):
|
||||
logger.warning(
|
||||
"SigV4 signature mismatch",
|
||||
extra={
|
||||
"path": req.path,
|
||||
"method": method,
|
||||
"signed_headers": signed_headers_str,
|
||||
"content_type": req.headers.get("Content-Type"),
|
||||
"content_length": req.headers.get("Content-Length"),
|
||||
}
|
||||
)
|
||||
logger.warning("SigV4 signature mismatch for %s %s", method, req.path)
|
||||
raise IamError("SignatureDoesNotMatch")
|
||||
|
||||
session_token = req.headers.get("X-Amz-Security-Token")
|
||||
@@ -293,7 +309,7 @@ def _verify_sigv4_query(req: Any) -> Principal | None:
|
||||
if header.lower() == 'expect' and val == "":
|
||||
val = "100-continue"
|
||||
val = " ".join(val.split())
|
||||
canonical_headers_parts.append(f"{header}:{val}\n")
|
||||
canonical_headers_parts.append(f"{header.lower()}:{val}\n")
|
||||
canonical_headers = "".join(canonical_headers_parts)
|
||||
|
||||
payload_hash = "UNSIGNED-PAYLOAD"
|
||||
@@ -380,7 +396,8 @@ def _authorize_action(principal: Principal | None, bucket_name: str | None, acti
|
||||
policy_decision = None
|
||||
access_key = principal.access_key if principal else None
|
||||
if bucket_name:
|
||||
policy_decision = _bucket_policies().evaluate(access_key, bucket_name, object_key, action)
|
||||
policy_context = _build_policy_context()
|
||||
policy_decision = _bucket_policies().evaluate(access_key, bucket_name, object_key, action, policy_context)
|
||||
if policy_decision == "deny":
|
||||
raise IamError("Access denied by bucket policy")
|
||||
|
||||
@@ -407,11 +424,13 @@ def _authorize_action(principal: Principal | None, bucket_name: str | None, acti
|
||||
def _enforce_bucket_policy(principal: Principal | None, bucket_name: str | None, object_key: str | None, action: str) -> None:
|
||||
if not bucket_name:
|
||||
return
|
||||
policy_context = _build_policy_context()
|
||||
decision = _bucket_policies().evaluate(
|
||||
principal.access_key if principal else None,
|
||||
bucket_name,
|
||||
object_key,
|
||||
action,
|
||||
policy_context,
|
||||
)
|
||||
if decision == "deny":
|
||||
raise IamError("Access denied by bucket policy")
|
||||
@@ -572,6 +591,7 @@ def _generate_presigned_url(
|
||||
bucket_name: str,
|
||||
object_key: str,
|
||||
expires_in: int,
|
||||
api_base_url: str | None = None,
|
||||
) -> str:
|
||||
region = current_app.config["AWS_REGION"]
|
||||
service = current_app.config["AWS_SERVICE"]
|
||||
@@ -592,7 +612,7 @@ def _generate_presigned_url(
|
||||
}
|
||||
canonical_query = _encode_query_params(query_params)
|
||||
|
||||
api_base = current_app.config.get("API_BASE_URL")
|
||||
api_base = api_base_url or current_app.config.get("API_BASE_URL")
|
||||
if api_base:
|
||||
parsed = urlparse(api_base)
|
||||
host = parsed.netloc
|
||||
@@ -644,11 +664,11 @@ def _strip_ns(tag: str | None) -> str:
|
||||
|
||||
|
||||
def _find_element(parent: Element, name: str) -> Optional[Element]:
|
||||
"""Find a child element by name, trying both namespaced and non-namespaced variants.
|
||||
"""Find a child element by name, trying S3 namespace then no namespace.
|
||||
|
||||
This handles XML documents that may or may not include namespace prefixes.
|
||||
"""
|
||||
el = parent.find(f"{{*}}{name}")
|
||||
el = parent.find(f"{{{S3_NS}}}{name}")
|
||||
if el is None:
|
||||
el = parent.find(name)
|
||||
return el
|
||||
@@ -672,7 +692,7 @@ def _parse_tagging_document(payload: bytes) -> list[dict[str, str]]:
|
||||
raise ValueError("Malformed XML") from exc
|
||||
if _strip_ns(root.tag) != "Tagging":
|
||||
raise ValueError("Root element must be Tagging")
|
||||
tagset = root.find(".//{*}TagSet")
|
||||
tagset = root.find(".//{http://s3.amazonaws.com/doc/2006-03-01/}TagSet")
|
||||
if tagset is None:
|
||||
tagset = root.find("TagSet")
|
||||
if tagset is None:
|
||||
@@ -840,13 +860,13 @@ def _parse_encryption_document(payload: bytes) -> dict[str, Any]:
|
||||
bucket_key_el = child
|
||||
if default_el is None:
|
||||
continue
|
||||
algo_el = default_el.find("{*}SSEAlgorithm")
|
||||
algo_el = default_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}SSEAlgorithm")
|
||||
if algo_el is None:
|
||||
algo_el = default_el.find("SSEAlgorithm")
|
||||
if algo_el is None or not (algo_el.text or "").strip():
|
||||
raise ValueError("SSEAlgorithm is required")
|
||||
rule: dict[str, Any] = {"SSEAlgorithm": algo_el.text.strip()}
|
||||
kms_el = default_el.find("{*}KMSMasterKeyID")
|
||||
kms_el = default_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}KMSMasterKeyID")
|
||||
if kms_el is None:
|
||||
kms_el = default_el.find("KMSMasterKeyID")
|
||||
if kms_el is not None and kms_el.text:
|
||||
@@ -922,6 +942,7 @@ def _maybe_handle_bucket_subresource(bucket_name: str) -> Response | None:
|
||||
"notification": _bucket_notification_handler,
|
||||
"logging": _bucket_logging_handler,
|
||||
"uploads": _bucket_uploads_handler,
|
||||
"policy": _bucket_policy_handler,
|
||||
}
|
||||
requested = [key for key in handlers if key in request.args]
|
||||
if not requested:
|
||||
@@ -947,8 +968,11 @@ def _bucket_versioning_handler(bucket_name: str) -> Response:
|
||||
except IamError as exc:
|
||||
return _error_response("AccessDenied", str(exc), 403)
|
||||
storage = _storage()
|
||||
|
||||
|
||||
if request.method == "PUT":
|
||||
ct_error = _require_xml_content_type()
|
||||
if ct_error:
|
||||
return ct_error
|
||||
payload = request.get_data(cache=False) or b""
|
||||
if not payload.strip():
|
||||
return _error_response("MalformedXML", "Request body is required", 400)
|
||||
@@ -958,7 +982,7 @@ def _bucket_versioning_handler(bucket_name: str) -> Response:
|
||||
return _error_response("MalformedXML", "Unable to parse XML document", 400)
|
||||
if _strip_ns(root.tag) != "VersioningConfiguration":
|
||||
return _error_response("MalformedXML", "Root element must be VersioningConfiguration", 400)
|
||||
status_el = root.find("{*}Status")
|
||||
status_el = root.find("{http://s3.amazonaws.com/doc/2006-03-01/}Status")
|
||||
if status_el is None:
|
||||
status_el = root.find("Status")
|
||||
status = (status_el.text or "").strip() if status_el is not None else ""
|
||||
@@ -1007,6 +1031,9 @@ def _bucket_tagging_handler(bucket_name: str) -> Response:
|
||||
current_app.logger.info("Bucket tags deleted", extra={"bucket": bucket_name})
|
||||
return Response(status=204)
|
||||
|
||||
ct_error = _require_xml_content_type()
|
||||
if ct_error:
|
||||
return ct_error
|
||||
payload = request.get_data(cache=False) or b""
|
||||
try:
|
||||
tags = _parse_tagging_document(payload)
|
||||
@@ -1062,6 +1089,9 @@ def _object_tagging_handler(bucket_name: str, object_key: str) -> Response:
|
||||
current_app.logger.info("Object tags deleted", extra={"bucket": bucket_name, "key": object_key})
|
||||
return Response(status=204)
|
||||
|
||||
ct_error = _require_xml_content_type()
|
||||
if ct_error:
|
||||
return ct_error
|
||||
payload = request.get_data(cache=False) or b""
|
||||
try:
|
||||
tags = _parse_tagging_document(payload)
|
||||
@@ -1131,6 +1161,9 @@ def _bucket_cors_handler(bucket_name: str) -> Response:
|
||||
current_app.logger.info("Bucket CORS deleted", extra={"bucket": bucket_name})
|
||||
return Response(status=204)
|
||||
|
||||
ct_error = _require_xml_content_type()
|
||||
if ct_error:
|
||||
return ct_error
|
||||
payload = request.get_data(cache=False) or b""
|
||||
if not payload.strip():
|
||||
try:
|
||||
@@ -1177,6 +1210,9 @@ def _bucket_encryption_handler(bucket_name: str) -> Response:
|
||||
404,
|
||||
)
|
||||
return _xml_response(_render_encryption_document(config))
|
||||
ct_error = _require_xml_content_type()
|
||||
if ct_error:
|
||||
return ct_error
|
||||
payload = request.get_data(cache=False) or b""
|
||||
if not payload.strip():
|
||||
try:
|
||||
@@ -1349,7 +1385,7 @@ def _bucket_list_versions_handler(bucket_name: str) -> Response:
|
||||
SubElement(ver_elem, "Key").text = obj.key
|
||||
SubElement(ver_elem, "VersionId").text = v.get("version_id", "unknown")
|
||||
SubElement(ver_elem, "IsLatest").text = "false"
|
||||
SubElement(ver_elem, "LastModified").text = v.get("archived_at", "")
|
||||
SubElement(ver_elem, "LastModified").text = v.get("archived_at") or "1970-01-01T00:00:00Z"
|
||||
SubElement(ver_elem, "ETag").text = f'"{v.get("etag", "")}"'
|
||||
SubElement(ver_elem, "Size").text = str(v.get("size", 0))
|
||||
SubElement(ver_elem, "StorageClass").text = "STANDARD"
|
||||
@@ -1398,6 +1434,9 @@ def _bucket_lifecycle_handler(bucket_name: str) -> Response:
|
||||
current_app.logger.info("Bucket lifecycle deleted", extra={"bucket": bucket_name})
|
||||
return Response(status=204)
|
||||
|
||||
ct_error = _require_xml_content_type()
|
||||
if ct_error:
|
||||
return ct_error
|
||||
payload = request.get_data(cache=False) or b""
|
||||
if not payload.strip():
|
||||
return _error_response("MalformedXML", "Request body is required", 400)
|
||||
@@ -1462,49 +1501,49 @@ def _parse_lifecycle_config(payload: bytes) -> list:
|
||||
raise ValueError("Root element must be LifecycleConfiguration")
|
||||
|
||||
rules = []
|
||||
for rule_el in root.findall("{*}Rule") or root.findall("Rule"):
|
||||
for rule_el in root.findall("{http://s3.amazonaws.com/doc/2006-03-01/}Rule") or root.findall("Rule"):
|
||||
rule: dict = {}
|
||||
|
||||
id_el = rule_el.find("{*}ID") or rule_el.find("ID")
|
||||
id_el = rule_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}ID") or rule_el.find("ID")
|
||||
if id_el is not None and id_el.text:
|
||||
rule["ID"] = id_el.text.strip()
|
||||
|
||||
filter_el = rule_el.find("{*}Filter") or rule_el.find("Filter")
|
||||
filter_el = rule_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}Filter") or rule_el.find("Filter")
|
||||
if filter_el is not None:
|
||||
prefix_el = filter_el.find("{*}Prefix") or filter_el.find("Prefix")
|
||||
prefix_el = filter_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}Prefix") or filter_el.find("Prefix")
|
||||
if prefix_el is not None and prefix_el.text:
|
||||
rule["Prefix"] = prefix_el.text
|
||||
|
||||
if "Prefix" not in rule:
|
||||
prefix_el = rule_el.find("{*}Prefix") or rule_el.find("Prefix")
|
||||
prefix_el = rule_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}Prefix") or rule_el.find("Prefix")
|
||||
if prefix_el is not None:
|
||||
rule["Prefix"] = prefix_el.text or ""
|
||||
|
||||
status_el = rule_el.find("{*}Status") or rule_el.find("Status")
|
||||
status_el = rule_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}Status") or rule_el.find("Status")
|
||||
rule["Status"] = (status_el.text or "Enabled").strip() if status_el is not None else "Enabled"
|
||||
|
||||
exp_el = rule_el.find("{*}Expiration") or rule_el.find("Expiration")
|
||||
exp_el = rule_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}Expiration") or rule_el.find("Expiration")
|
||||
if exp_el is not None:
|
||||
expiration: dict = {}
|
||||
days_el = exp_el.find("{*}Days") or exp_el.find("Days")
|
||||
days_el = exp_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}Days") or exp_el.find("Days")
|
||||
if days_el is not None and days_el.text:
|
||||
days_val = int(days_el.text.strip())
|
||||
if days_val <= 0:
|
||||
raise ValueError("Expiration Days must be a positive integer")
|
||||
expiration["Days"] = days_val
|
||||
date_el = exp_el.find("{*}Date") or exp_el.find("Date")
|
||||
date_el = exp_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}Date") or exp_el.find("Date")
|
||||
if date_el is not None and date_el.text:
|
||||
expiration["Date"] = date_el.text.strip()
|
||||
eodm_el = exp_el.find("{*}ExpiredObjectDeleteMarker") or exp_el.find("ExpiredObjectDeleteMarker")
|
||||
eodm_el = exp_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}ExpiredObjectDeleteMarker") or exp_el.find("ExpiredObjectDeleteMarker")
|
||||
if eodm_el is not None and (eodm_el.text or "").strip().lower() in {"true", "1"}:
|
||||
expiration["ExpiredObjectDeleteMarker"] = True
|
||||
if expiration:
|
||||
rule["Expiration"] = expiration
|
||||
|
||||
nve_el = rule_el.find("{*}NoncurrentVersionExpiration") or rule_el.find("NoncurrentVersionExpiration")
|
||||
nve_el = rule_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}NoncurrentVersionExpiration") or rule_el.find("NoncurrentVersionExpiration")
|
||||
if nve_el is not None:
|
||||
nve: dict = {}
|
||||
days_el = nve_el.find("{*}NoncurrentDays") or nve_el.find("NoncurrentDays")
|
||||
days_el = nve_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}NoncurrentDays") or nve_el.find("NoncurrentDays")
|
||||
if days_el is not None and days_el.text:
|
||||
noncurrent_days = int(days_el.text.strip())
|
||||
if noncurrent_days <= 0:
|
||||
@@ -1513,10 +1552,10 @@ def _parse_lifecycle_config(payload: bytes) -> list:
|
||||
if nve:
|
||||
rule["NoncurrentVersionExpiration"] = nve
|
||||
|
||||
aimu_el = rule_el.find("{*}AbortIncompleteMultipartUpload") or rule_el.find("AbortIncompleteMultipartUpload")
|
||||
aimu_el = rule_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}AbortIncompleteMultipartUpload") or rule_el.find("AbortIncompleteMultipartUpload")
|
||||
if aimu_el is not None:
|
||||
aimu: dict = {}
|
||||
days_el = aimu_el.find("{*}DaysAfterInitiation") or aimu_el.find("DaysAfterInitiation")
|
||||
days_el = aimu_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}DaysAfterInitiation") or aimu_el.find("DaysAfterInitiation")
|
||||
if days_el is not None and days_el.text:
|
||||
days_after = int(days_el.text.strip())
|
||||
if days_after <= 0:
|
||||
@@ -1632,6 +1671,9 @@ def _bucket_object_lock_handler(bucket_name: str) -> Response:
|
||||
SubElement(root, "ObjectLockEnabled").text = "Enabled" if config.enabled else "Disabled"
|
||||
return _xml_response(root)
|
||||
|
||||
ct_error = _require_xml_content_type()
|
||||
if ct_error:
|
||||
return ct_error
|
||||
payload = request.get_data(cache=False) or b""
|
||||
if not payload.strip():
|
||||
return _error_response("MalformedXML", "Request body is required", 400)
|
||||
@@ -1641,7 +1683,7 @@ def _bucket_object_lock_handler(bucket_name: str) -> Response:
|
||||
except ParseError:
|
||||
return _error_response("MalformedXML", "Unable to parse XML document", 400)
|
||||
|
||||
enabled_el = root.find("{*}ObjectLockEnabled") or root.find("ObjectLockEnabled")
|
||||
enabled_el = root.find("{http://s3.amazonaws.com/doc/2006-03-01/}ObjectLockEnabled") or root.find("ObjectLockEnabled")
|
||||
enabled = (enabled_el.text or "").strip() == "Enabled" if enabled_el is not None else False
|
||||
|
||||
config = ObjectLockConfig(enabled=enabled)
|
||||
@@ -1697,6 +1739,9 @@ def _bucket_notification_handler(bucket_name: str) -> Response:
|
||||
current_app.logger.info("Bucket notifications deleted", extra={"bucket": bucket_name})
|
||||
return Response(status=204)
|
||||
|
||||
ct_error = _require_xml_content_type()
|
||||
if ct_error:
|
||||
return ct_error
|
||||
payload = request.get_data(cache=False) or b""
|
||||
if not payload.strip():
|
||||
notification_service.delete_bucket_notifications(bucket_name)
|
||||
@@ -1708,9 +1753,9 @@ def _bucket_notification_handler(bucket_name: str) -> Response:
|
||||
return _error_response("MalformedXML", "Unable to parse XML document", 400)
|
||||
|
||||
configs: list[NotificationConfiguration] = []
|
||||
for webhook_el in root.findall("{*}WebhookConfiguration") or root.findall("WebhookConfiguration"):
|
||||
for webhook_el in root.findall("{http://s3.amazonaws.com/doc/2006-03-01/}WebhookConfiguration") or root.findall("WebhookConfiguration"):
|
||||
config_id = _find_element_text(webhook_el, "Id") or uuid.uuid4().hex
|
||||
events = [el.text for el in webhook_el.findall("{*}Event") or webhook_el.findall("Event") if el.text]
|
||||
events = [el.text for el in webhook_el.findall("{http://s3.amazonaws.com/doc/2006-03-01/}Event") or webhook_el.findall("Event") if el.text]
|
||||
|
||||
dest_el = _find_element(webhook_el, "Destination")
|
||||
url = _find_element_text(dest_el, "Url") if dest_el else ""
|
||||
@@ -1723,7 +1768,7 @@ def _bucket_notification_handler(bucket_name: str) -> Response:
|
||||
if filter_el:
|
||||
key_el = _find_element(filter_el, "S3Key")
|
||||
if key_el:
|
||||
for rule_el in key_el.findall("{*}FilterRule") or key_el.findall("FilterRule"):
|
||||
for rule_el in key_el.findall("{http://s3.amazonaws.com/doc/2006-03-01/}FilterRule") or key_el.findall("FilterRule"):
|
||||
name = _find_element_text(rule_el, "Name")
|
||||
value = _find_element_text(rule_el, "Value")
|
||||
if name == "prefix":
|
||||
@@ -1776,6 +1821,9 @@ def _bucket_logging_handler(bucket_name: str) -> Response:
|
||||
current_app.logger.info("Bucket logging deleted", extra={"bucket": bucket_name})
|
||||
return Response(status=204)
|
||||
|
||||
ct_error = _require_xml_content_type()
|
||||
if ct_error:
|
||||
return ct_error
|
||||
payload = request.get_data(cache=False) or b""
|
||||
if not payload.strip():
|
||||
logging_service.delete_bucket_logging(bucket_name)
|
||||
@@ -1913,6 +1961,9 @@ def _object_retention_handler(bucket_name: str, object_key: str) -> Response:
|
||||
SubElement(root, "RetainUntilDate").text = retention.retain_until_date.strftime("%Y-%m-%dT%H:%M:%S.000Z")
|
||||
return _xml_response(root)
|
||||
|
||||
ct_error = _require_xml_content_type()
|
||||
if ct_error:
|
||||
return ct_error
|
||||
payload = request.get_data(cache=False) or b""
|
||||
if not payload.strip():
|
||||
return _error_response("MalformedXML", "Request body is required", 400)
|
||||
@@ -1982,6 +2033,9 @@ def _object_legal_hold_handler(bucket_name: str, object_key: str) -> Response:
|
||||
SubElement(root, "Status").text = "ON" if enabled else "OFF"
|
||||
return _xml_response(root)
|
||||
|
||||
ct_error = _require_xml_content_type()
|
||||
if ct_error:
|
||||
return ct_error
|
||||
payload = request.get_data(cache=False) or b""
|
||||
if not payload.strip():
|
||||
return _error_response("MalformedXML", "Request body is required", 400)
|
||||
@@ -2013,6 +2067,9 @@ def _bulk_delete_handler(bucket_name: str) -> Response:
|
||||
except IamError as exc:
|
||||
return _error_response("AccessDenied", str(exc), 403)
|
||||
|
||||
ct_error = _require_xml_content_type()
|
||||
if ct_error:
|
||||
return ct_error
|
||||
payload = request.get_data(cache=False) or b""
|
||||
if not payload.strip():
|
||||
return _error_response("MalformedXML", "Request body must include a Delete specification", 400)
|
||||
@@ -2389,7 +2446,8 @@ def object_handler(bucket_name: str, object_key: str):
|
||||
operation="Put",
|
||||
)
|
||||
|
||||
if "S3ReplicationAgent" not in request.headers.get("User-Agent", ""):
|
||||
user_agent = request.headers.get("User-Agent", "")
|
||||
if "S3ReplicationAgent" not in user_agent and "SiteSyncAgent" not in user_agent:
|
||||
_replication_manager().trigger_replication(bucket_name, object_key, action="write")
|
||||
|
||||
return response
|
||||
@@ -2535,7 +2593,7 @@ def object_handler(bucket_name: str, object_key: str):
|
||||
)
|
||||
|
||||
user_agent = request.headers.get("User-Agent", "")
|
||||
if "S3ReplicationAgent" not in user_agent:
|
||||
if "S3ReplicationAgent" not in user_agent and "SiteSyncAgent" not in user_agent:
|
||||
_replication_manager().trigger_replication(bucket_name, object_key, action="delete")
|
||||
|
||||
return Response(status=204)
|
||||
@@ -2588,9 +2646,9 @@ def _list_parts(bucket_name: str, object_key: str) -> Response:
|
||||
return _xml_response(root)
|
||||
|
||||
|
||||
@s3_api_bp.route("/bucket-policy/<bucket_name>", methods=["GET", "PUT", "DELETE"])
|
||||
@limiter.limit("30 per minute")
|
||||
def bucket_policy_handler(bucket_name: str) -> Response:
|
||||
def _bucket_policy_handler(bucket_name: str) -> Response:
|
||||
if request.method not in {"GET", "PUT", "DELETE"}:
|
||||
return _method_not_allowed(["GET", "PUT", "DELETE"])
|
||||
principal, error = _require_principal()
|
||||
if error:
|
||||
return error
|
||||
@@ -2622,51 +2680,6 @@ def bucket_policy_handler(bucket_name: str) -> Response:
|
||||
return Response(status=204)
|
||||
|
||||
|
||||
@s3_api_bp.post("/presign/<bucket_name>/<path:object_key>")
|
||||
@limiter.limit("45 per minute")
|
||||
def presign_object(bucket_name: str, object_key: str):
|
||||
payload = request.get_json(silent=True) or {}
|
||||
method = str(payload.get("method", "GET")).upper()
|
||||
allowed_methods = {"GET", "PUT", "DELETE"}
|
||||
if method not in allowed_methods:
|
||||
return _error_response("InvalidRequest", "Method must be GET, PUT, or DELETE", 400)
|
||||
try:
|
||||
expires = int(payload.get("expires_in", 900))
|
||||
except (TypeError, ValueError):
|
||||
return _error_response("InvalidRequest", "expires_in must be an integer", 400)
|
||||
expires = max(1, min(expires, 7 * 24 * 3600))
|
||||
action = "read" if method == "GET" else ("delete" if method == "DELETE" else "write")
|
||||
principal, error = _require_principal()
|
||||
if error:
|
||||
return error
|
||||
try:
|
||||
_authorize_action(principal, bucket_name, action, object_key=object_key)
|
||||
except IamError as exc:
|
||||
return _error_response("AccessDenied", str(exc), 403)
|
||||
storage = _storage()
|
||||
if not storage.bucket_exists(bucket_name):
|
||||
return _error_response("NoSuchBucket", "Bucket does not exist", 404)
|
||||
if action != "write":
|
||||
try:
|
||||
storage.get_object_path(bucket_name, object_key)
|
||||
except StorageError:
|
||||
return _error_response("NoSuchKey", "Object not found", 404)
|
||||
secret = _iam().secret_for_key(principal.access_key)
|
||||
url = _generate_presigned_url(
|
||||
principal=principal,
|
||||
secret_key=secret,
|
||||
method=method,
|
||||
bucket_name=bucket_name,
|
||||
object_key=object_key,
|
||||
expires_in=expires,
|
||||
)
|
||||
current_app.logger.info(
|
||||
"Presigned URL generated",
|
||||
extra={"bucket": bucket_name, "key": object_key, "method": method},
|
||||
)
|
||||
return jsonify({"url": url, "method": method, "expires_in": expires})
|
||||
|
||||
|
||||
@s3_api_bp.route("/<bucket_name>", methods=["HEAD"])
|
||||
@limiter.limit("100 per minute")
|
||||
def head_bucket(bucket_name: str) -> Response:
|
||||
@@ -2814,9 +2827,9 @@ def _copy_object(dest_bucket: str, dest_key: str, copy_source: str) -> Response:
|
||||
)
|
||||
|
||||
user_agent = request.headers.get("User-Agent", "")
|
||||
if "S3ReplicationAgent" not in user_agent:
|
||||
if "S3ReplicationAgent" not in user_agent and "SiteSyncAgent" not in user_agent:
|
||||
_replication_manager().trigger_replication(dest_bucket, dest_key, action="write")
|
||||
|
||||
|
||||
root = Element("CopyObjectResult")
|
||||
SubElement(root, "LastModified").text = meta.last_modified.isoformat()
|
||||
if meta.etag:
|
||||
@@ -2986,6 +2999,9 @@ def _complete_multipart_upload(bucket_name: str, object_key: str) -> Response:
|
||||
if not upload_id:
|
||||
return _error_response("InvalidArgument", "uploadId is required", 400)
|
||||
|
||||
ct_error = _require_xml_content_type()
|
||||
if ct_error:
|
||||
return ct_error
|
||||
payload = request.get_data(cache=False) or b""
|
||||
try:
|
||||
root = fromstring(payload)
|
||||
@@ -2999,11 +3015,11 @@ def _complete_multipart_upload(bucket_name: str, object_key: str) -> Response:
|
||||
for part_el in list(root):
|
||||
if _strip_ns(part_el.tag) != "Part":
|
||||
continue
|
||||
part_number_el = part_el.find("{*}PartNumber")
|
||||
part_number_el = part_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}PartNumber")
|
||||
if part_number_el is None:
|
||||
part_number_el = part_el.find("PartNumber")
|
||||
|
||||
etag_el = part_el.find("{*}ETag")
|
||||
etag_el = part_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}ETag")
|
||||
if etag_el is None:
|
||||
etag_el = part_el.find("ETag")
|
||||
|
||||
@@ -3025,7 +3041,7 @@ def _complete_multipart_upload(bucket_name: str, object_key: str) -> Response:
|
||||
return _error_response("InvalidPart", str(exc), 400)
|
||||
|
||||
user_agent = request.headers.get("User-Agent", "")
|
||||
if "S3ReplicationAgent" not in user_agent:
|
||||
if "S3ReplicationAgent" not in user_agent and "SiteSyncAgent" not in user_agent:
|
||||
_replication_manager().trigger_replication(bucket_name, object_key, action="write")
|
||||
|
||||
root = Element("CompleteMultipartUploadResult")
|
||||
|
||||
396
app/site_sync.py
Normal file
396
app/site_sync.py
Normal file
@@ -0,0 +1,396 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import tempfile
|
||||
import threading
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, TYPE_CHECKING
|
||||
|
||||
import boto3
|
||||
from botocore.config import Config
|
||||
from botocore.exceptions import ClientError
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .connections import ConnectionStore, RemoteConnection
|
||||
from .replication import ReplicationManager, ReplicationRule
|
||||
from .storage import ObjectStorage
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
SITE_SYNC_USER_AGENT = "SiteSyncAgent/1.0"
|
||||
SITE_SYNC_CONNECT_TIMEOUT = 10
|
||||
SITE_SYNC_READ_TIMEOUT = 120
|
||||
CLOCK_SKEW_TOLERANCE_SECONDS = 1.0
|
||||
|
||||
|
||||
@dataclass
|
||||
class SyncedObjectInfo:
|
||||
last_synced_at: float
|
||||
remote_etag: str
|
||||
source: str
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"last_synced_at": self.last_synced_at,
|
||||
"remote_etag": self.remote_etag,
|
||||
"source": self.source,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "SyncedObjectInfo":
|
||||
return cls(
|
||||
last_synced_at=data["last_synced_at"],
|
||||
remote_etag=data["remote_etag"],
|
||||
source=data["source"],
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SyncState:
|
||||
synced_objects: Dict[str, SyncedObjectInfo] = field(default_factory=dict)
|
||||
last_full_sync: Optional[float] = None
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"synced_objects": {k: v.to_dict() for k, v in self.synced_objects.items()},
|
||||
"last_full_sync": self.last_full_sync,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "SyncState":
|
||||
synced_objects = {}
|
||||
for k, v in data.get("synced_objects", {}).items():
|
||||
synced_objects[k] = SyncedObjectInfo.from_dict(v)
|
||||
return cls(
|
||||
synced_objects=synced_objects,
|
||||
last_full_sync=data.get("last_full_sync"),
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SiteSyncStats:
|
||||
last_sync_at: Optional[float] = None
|
||||
objects_pulled: int = 0
|
||||
objects_skipped: int = 0
|
||||
conflicts_resolved: int = 0
|
||||
deletions_applied: int = 0
|
||||
errors: int = 0
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"last_sync_at": self.last_sync_at,
|
||||
"objects_pulled": self.objects_pulled,
|
||||
"objects_skipped": self.objects_skipped,
|
||||
"conflicts_resolved": self.conflicts_resolved,
|
||||
"deletions_applied": self.deletions_applied,
|
||||
"errors": self.errors,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class RemoteObjectMeta:
|
||||
key: str
|
||||
size: int
|
||||
last_modified: datetime
|
||||
etag: str
|
||||
|
||||
@classmethod
|
||||
def from_s3_object(cls, obj: Dict[str, Any]) -> "RemoteObjectMeta":
|
||||
return cls(
|
||||
key=obj["Key"],
|
||||
size=obj.get("Size", 0),
|
||||
last_modified=obj["LastModified"],
|
||||
etag=obj.get("ETag", "").strip('"'),
|
||||
)
|
||||
|
||||
|
||||
def _create_sync_client(connection: "RemoteConnection") -> Any:
|
||||
config = Config(
|
||||
user_agent_extra=SITE_SYNC_USER_AGENT,
|
||||
connect_timeout=SITE_SYNC_CONNECT_TIMEOUT,
|
||||
read_timeout=SITE_SYNC_READ_TIMEOUT,
|
||||
retries={"max_attempts": 2},
|
||||
signature_version="s3v4",
|
||||
s3={"addressing_style": "path"},
|
||||
request_checksum_calculation="when_required",
|
||||
response_checksum_validation="when_required",
|
||||
)
|
||||
return boto3.client(
|
||||
"s3",
|
||||
endpoint_url=connection.endpoint_url,
|
||||
aws_access_key_id=connection.access_key,
|
||||
aws_secret_access_key=connection.secret_key,
|
||||
region_name=connection.region or "us-east-1",
|
||||
config=config,
|
||||
)
|
||||
|
||||
|
||||
class SiteSyncWorker:
|
||||
def __init__(
|
||||
self,
|
||||
storage: "ObjectStorage",
|
||||
connections: "ConnectionStore",
|
||||
replication_manager: "ReplicationManager",
|
||||
storage_root: Path,
|
||||
interval_seconds: int = 60,
|
||||
batch_size: int = 100,
|
||||
):
|
||||
self.storage = storage
|
||||
self.connections = connections
|
||||
self.replication_manager = replication_manager
|
||||
self.storage_root = storage_root
|
||||
self.interval_seconds = interval_seconds
|
||||
self.batch_size = batch_size
|
||||
self._lock = threading.Lock()
|
||||
self._shutdown = threading.Event()
|
||||
self._sync_thread: Optional[threading.Thread] = None
|
||||
self._bucket_stats: Dict[str, SiteSyncStats] = {}
|
||||
|
||||
def start(self) -> None:
|
||||
if self._sync_thread is not None and self._sync_thread.is_alive():
|
||||
return
|
||||
self._shutdown.clear()
|
||||
self._sync_thread = threading.Thread(
|
||||
target=self._sync_loop, name="site-sync-worker", daemon=True
|
||||
)
|
||||
self._sync_thread.start()
|
||||
logger.info("Site sync worker started (interval=%ds)", self.interval_seconds)
|
||||
|
||||
def shutdown(self) -> None:
|
||||
self._shutdown.set()
|
||||
if self._sync_thread is not None:
|
||||
self._sync_thread.join(timeout=10.0)
|
||||
logger.info("Site sync worker shut down")
|
||||
|
||||
def trigger_sync(self, bucket_name: str) -> Optional[SiteSyncStats]:
|
||||
from .replication import REPLICATION_MODE_BIDIRECTIONAL
|
||||
rule = self.replication_manager.get_rule(bucket_name)
|
||||
if not rule or rule.mode != REPLICATION_MODE_BIDIRECTIONAL or not rule.enabled:
|
||||
return None
|
||||
return self._sync_bucket(rule)
|
||||
|
||||
def get_stats(self, bucket_name: str) -> Optional[SiteSyncStats]:
|
||||
with self._lock:
|
||||
return self._bucket_stats.get(bucket_name)
|
||||
|
||||
def _sync_loop(self) -> None:
|
||||
while not self._shutdown.is_set():
|
||||
self._shutdown.wait(timeout=self.interval_seconds)
|
||||
if self._shutdown.is_set():
|
||||
break
|
||||
self._run_sync_cycle()
|
||||
|
||||
def _run_sync_cycle(self) -> None:
|
||||
from .replication import REPLICATION_MODE_BIDIRECTIONAL
|
||||
for bucket_name, rule in list(self.replication_manager._rules.items()):
|
||||
if self._shutdown.is_set():
|
||||
break
|
||||
if rule.mode != REPLICATION_MODE_BIDIRECTIONAL or not rule.enabled:
|
||||
continue
|
||||
try:
|
||||
stats = self._sync_bucket(rule)
|
||||
with self._lock:
|
||||
self._bucket_stats[bucket_name] = stats
|
||||
except Exception as e:
|
||||
logger.exception("Site sync failed for bucket %s: %s", bucket_name, e)
|
||||
|
||||
def _sync_bucket(self, rule: "ReplicationRule") -> SiteSyncStats:
|
||||
stats = SiteSyncStats()
|
||||
connection = self.connections.get(rule.target_connection_id)
|
||||
if not connection:
|
||||
logger.warning("Connection %s not found for bucket %s", rule.target_connection_id, rule.bucket_name)
|
||||
stats.errors += 1
|
||||
return stats
|
||||
|
||||
try:
|
||||
local_objects = self._list_local_objects(rule.bucket_name)
|
||||
except Exception as e:
|
||||
logger.error("Failed to list local objects for %s: %s", rule.bucket_name, e)
|
||||
stats.errors += 1
|
||||
return stats
|
||||
|
||||
try:
|
||||
remote_objects = self._list_remote_objects(rule, connection)
|
||||
except Exception as e:
|
||||
logger.error("Failed to list remote objects for %s: %s", rule.bucket_name, e)
|
||||
stats.errors += 1
|
||||
return stats
|
||||
|
||||
sync_state = self._load_sync_state(rule.bucket_name)
|
||||
local_keys = set(local_objects.keys())
|
||||
remote_keys = set(remote_objects.keys())
|
||||
|
||||
to_pull = []
|
||||
for key in remote_keys:
|
||||
remote_meta = remote_objects[key]
|
||||
local_meta = local_objects.get(key)
|
||||
if local_meta is None:
|
||||
to_pull.append(key)
|
||||
else:
|
||||
resolution = self._resolve_conflict(local_meta, remote_meta)
|
||||
if resolution == "pull":
|
||||
to_pull.append(key)
|
||||
stats.conflicts_resolved += 1
|
||||
else:
|
||||
stats.objects_skipped += 1
|
||||
|
||||
pulled_count = 0
|
||||
for key in to_pull:
|
||||
if self._shutdown.is_set():
|
||||
break
|
||||
if pulled_count >= self.batch_size:
|
||||
break
|
||||
remote_meta = remote_objects[key]
|
||||
success = self._pull_object(rule, key, connection, remote_meta)
|
||||
if success:
|
||||
stats.objects_pulled += 1
|
||||
pulled_count += 1
|
||||
sync_state.synced_objects[key] = SyncedObjectInfo(
|
||||
last_synced_at=time.time(),
|
||||
remote_etag=remote_meta.etag,
|
||||
source="remote",
|
||||
)
|
||||
else:
|
||||
stats.errors += 1
|
||||
|
||||
if rule.sync_deletions:
|
||||
for key in list(sync_state.synced_objects.keys()):
|
||||
if key not in remote_keys and key in local_keys:
|
||||
tracked = sync_state.synced_objects[key]
|
||||
if tracked.source == "remote":
|
||||
local_meta = local_objects.get(key)
|
||||
if local_meta and local_meta.last_modified.timestamp() <= tracked.last_synced_at:
|
||||
success = self._apply_remote_deletion(rule.bucket_name, key)
|
||||
if success:
|
||||
stats.deletions_applied += 1
|
||||
del sync_state.synced_objects[key]
|
||||
|
||||
sync_state.last_full_sync = time.time()
|
||||
self._save_sync_state(rule.bucket_name, sync_state)
|
||||
|
||||
with self.replication_manager._stats_lock:
|
||||
rule.last_pull_at = time.time()
|
||||
self.replication_manager.save_rules()
|
||||
|
||||
stats.last_sync_at = time.time()
|
||||
logger.info(
|
||||
"Site sync completed for %s: pulled=%d, skipped=%d, conflicts=%d, deletions=%d, errors=%d",
|
||||
rule.bucket_name,
|
||||
stats.objects_pulled,
|
||||
stats.objects_skipped,
|
||||
stats.conflicts_resolved,
|
||||
stats.deletions_applied,
|
||||
stats.errors,
|
||||
)
|
||||
return stats
|
||||
|
||||
def _list_local_objects(self, bucket_name: str) -> Dict[str, Any]:
|
||||
from .storage import ObjectMeta
|
||||
objects = self.storage.list_objects_all(bucket_name)
|
||||
return {obj.key: obj for obj in objects}
|
||||
|
||||
def _list_remote_objects(self, rule: "ReplicationRule", connection: "RemoteConnection") -> Dict[str, RemoteObjectMeta]:
|
||||
s3 = _create_sync_client(connection)
|
||||
result: Dict[str, RemoteObjectMeta] = {}
|
||||
paginator = s3.get_paginator("list_objects_v2")
|
||||
try:
|
||||
for page in paginator.paginate(Bucket=rule.target_bucket):
|
||||
for obj in page.get("Contents", []):
|
||||
meta = RemoteObjectMeta.from_s3_object(obj)
|
||||
result[meta.key] = meta
|
||||
except ClientError as e:
|
||||
if e.response["Error"]["Code"] == "NoSuchBucket":
|
||||
return {}
|
||||
raise
|
||||
return result
|
||||
|
||||
def _resolve_conflict(self, local_meta: Any, remote_meta: RemoteObjectMeta) -> str:
|
||||
local_ts = local_meta.last_modified.timestamp()
|
||||
remote_ts = remote_meta.last_modified.timestamp()
|
||||
|
||||
if abs(remote_ts - local_ts) < CLOCK_SKEW_TOLERANCE_SECONDS:
|
||||
local_etag = local_meta.etag or ""
|
||||
if remote_meta.etag == local_etag:
|
||||
return "skip"
|
||||
return "pull" if remote_meta.etag > local_etag else "keep"
|
||||
|
||||
return "pull" if remote_ts > local_ts else "keep"
|
||||
|
||||
def _pull_object(
|
||||
self,
|
||||
rule: "ReplicationRule",
|
||||
object_key: str,
|
||||
connection: "RemoteConnection",
|
||||
remote_meta: RemoteObjectMeta,
|
||||
) -> bool:
|
||||
s3 = _create_sync_client(connection)
|
||||
tmp_path = None
|
||||
try:
|
||||
tmp_dir = self.storage_root / ".myfsio.sys" / "tmp"
|
||||
tmp_dir.mkdir(parents=True, exist_ok=True)
|
||||
with tempfile.NamedTemporaryFile(dir=tmp_dir, delete=False) as tmp_file:
|
||||
tmp_path = Path(tmp_file.name)
|
||||
|
||||
s3.download_file(rule.target_bucket, object_key, str(tmp_path))
|
||||
|
||||
head_response = s3.head_object(Bucket=rule.target_bucket, Key=object_key)
|
||||
user_metadata = head_response.get("Metadata", {})
|
||||
|
||||
with open(tmp_path, "rb") as f:
|
||||
self.storage.put_object(
|
||||
rule.bucket_name,
|
||||
object_key,
|
||||
f,
|
||||
metadata=user_metadata if user_metadata else None,
|
||||
)
|
||||
|
||||
logger.debug("Pulled object %s/%s from remote", rule.bucket_name, object_key)
|
||||
return True
|
||||
|
||||
except ClientError as e:
|
||||
logger.error("Failed to pull %s/%s: %s", rule.bucket_name, object_key, e)
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error("Failed to store pulled object %s/%s: %s", rule.bucket_name, object_key, e)
|
||||
return False
|
||||
finally:
|
||||
if tmp_path and tmp_path.exists():
|
||||
try:
|
||||
tmp_path.unlink()
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def _apply_remote_deletion(self, bucket_name: str, object_key: str) -> bool:
|
||||
try:
|
||||
self.storage.delete_object(bucket_name, object_key)
|
||||
logger.debug("Applied remote deletion for %s/%s", bucket_name, object_key)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error("Failed to apply remote deletion for %s/%s: %s", bucket_name, object_key, e)
|
||||
return False
|
||||
|
||||
def _sync_state_path(self, bucket_name: str) -> Path:
|
||||
return self.storage_root / ".myfsio.sys" / "buckets" / bucket_name / "site_sync_state.json"
|
||||
|
||||
def _load_sync_state(self, bucket_name: str) -> SyncState:
|
||||
path = self._sync_state_path(bucket_name)
|
||||
if not path.exists():
|
||||
return SyncState()
|
||||
try:
|
||||
data = json.loads(path.read_text(encoding="utf-8"))
|
||||
return SyncState.from_dict(data)
|
||||
except (json.JSONDecodeError, OSError, KeyError) as e:
|
||||
logger.warning("Failed to load sync state for %s: %s", bucket_name, e)
|
||||
return SyncState()
|
||||
|
||||
def _save_sync_state(self, bucket_name: str, state: SyncState) -> None:
|
||||
path = self._sync_state_path(bucket_name)
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
try:
|
||||
path.write_text(json.dumps(state.to_dict(), indent=2), encoding="utf-8")
|
||||
except OSError as e:
|
||||
logger.warning("Failed to save sync state for %s: %s", bucket_name, e)
|
||||
@@ -774,7 +774,7 @@ class ObjectStorage:
|
||||
continue
|
||||
payload.setdefault("version_id", meta_file.stem)
|
||||
versions.append(payload)
|
||||
versions.sort(key=lambda item: item.get("archived_at", ""), reverse=True)
|
||||
versions.sort(key=lambda item: item.get("archived_at") or "1970-01-01T00:00:00Z", reverse=True)
|
||||
return versions
|
||||
|
||||
def restore_object_version(self, bucket_name: str, object_key: str, version_id: str) -> ObjectMeta:
|
||||
@@ -866,7 +866,7 @@ class ObjectStorage:
|
||||
except (OSError, json.JSONDecodeError):
|
||||
payload = {}
|
||||
version_id = payload.get("version_id") or meta_file.stem
|
||||
archived_at = payload.get("archived_at") or ""
|
||||
archived_at = payload.get("archived_at") or "1970-01-01T00:00:00Z"
|
||||
size = int(payload.get("size") or 0)
|
||||
reason = payload.get("reason") or "update"
|
||||
record = aggregated.setdefault(
|
||||
@@ -1773,11 +1773,9 @@ class ObjectStorage:
|
||||
raise StorageError("Object key contains null bytes")
|
||||
if object_key.startswith(("/", "\\")):
|
||||
raise StorageError("Object key cannot start with a slash")
|
||||
normalized = unicodedata.normalize("NFC", object_key)
|
||||
if normalized != object_key:
|
||||
raise StorageError("Object key must use normalized Unicode")
|
||||
|
||||
candidate = Path(normalized)
|
||||
object_key = unicodedata.normalize("NFC", object_key)
|
||||
|
||||
candidate = Path(object_key)
|
||||
if ".." in candidate.parts:
|
||||
raise StorageError("Object key contains parent directory references")
|
||||
|
||||
|
||||
215
app/system_metrics.py
Normal file
215
app/system_metrics.py
Normal file
@@ -0,0 +1,215 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, TYPE_CHECKING
|
||||
|
||||
import psutil
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .storage import ObjectStorage
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SystemMetricsSnapshot:
|
||||
timestamp: datetime
|
||||
cpu_percent: float
|
||||
memory_percent: float
|
||||
disk_percent: float
|
||||
storage_bytes: int
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"timestamp": self.timestamp.strftime("%Y-%m-%dT%H:%M:%SZ"),
|
||||
"cpu_percent": round(self.cpu_percent, 2),
|
||||
"memory_percent": round(self.memory_percent, 2),
|
||||
"disk_percent": round(self.disk_percent, 2),
|
||||
"storage_bytes": self.storage_bytes,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "SystemMetricsSnapshot":
|
||||
timestamp_str = data["timestamp"]
|
||||
if timestamp_str.endswith("Z"):
|
||||
timestamp_str = timestamp_str[:-1] + "+00:00"
|
||||
return cls(
|
||||
timestamp=datetime.fromisoformat(timestamp_str),
|
||||
cpu_percent=data.get("cpu_percent", 0.0),
|
||||
memory_percent=data.get("memory_percent", 0.0),
|
||||
disk_percent=data.get("disk_percent", 0.0),
|
||||
storage_bytes=data.get("storage_bytes", 0),
|
||||
)
|
||||
|
||||
|
||||
class SystemMetricsCollector:
|
||||
def __init__(
|
||||
self,
|
||||
storage_root: Path,
|
||||
interval_minutes: int = 5,
|
||||
retention_hours: int = 24,
|
||||
):
|
||||
self.storage_root = storage_root
|
||||
self.interval_seconds = interval_minutes * 60
|
||||
self.retention_hours = retention_hours
|
||||
self._lock = threading.Lock()
|
||||
self._shutdown = threading.Event()
|
||||
self._snapshots: List[SystemMetricsSnapshot] = []
|
||||
self._storage_ref: Optional["ObjectStorage"] = None
|
||||
|
||||
self._load_history()
|
||||
|
||||
self._snapshot_thread = threading.Thread(
|
||||
target=self._snapshot_loop,
|
||||
name="system-metrics-snapshot",
|
||||
daemon=True,
|
||||
)
|
||||
self._snapshot_thread.start()
|
||||
|
||||
def set_storage(self, storage: "ObjectStorage") -> None:
|
||||
with self._lock:
|
||||
self._storage_ref = storage
|
||||
|
||||
def _config_path(self) -> Path:
|
||||
return self.storage_root / ".myfsio.sys" / "config" / "metrics_history.json"
|
||||
|
||||
def _load_history(self) -> None:
|
||||
config_path = self._config_path()
|
||||
if not config_path.exists():
|
||||
return
|
||||
try:
|
||||
data = json.loads(config_path.read_text(encoding="utf-8"))
|
||||
history_data = data.get("history", [])
|
||||
self._snapshots = [SystemMetricsSnapshot.from_dict(s) for s in history_data]
|
||||
self._prune_old_snapshots()
|
||||
except (json.JSONDecodeError, OSError, KeyError) as e:
|
||||
logger.warning(f"Failed to load system metrics history: {e}")
|
||||
|
||||
def _save_history(self) -> None:
|
||||
config_path = self._config_path()
|
||||
config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
try:
|
||||
data = {"history": [s.to_dict() for s in self._snapshots]}
|
||||
config_path.write_text(json.dumps(data, indent=2), encoding="utf-8")
|
||||
except OSError as e:
|
||||
logger.warning(f"Failed to save system metrics history: {e}")
|
||||
|
||||
def _prune_old_snapshots(self) -> None:
|
||||
if not self._snapshots:
|
||||
return
|
||||
cutoff = datetime.now(timezone.utc).timestamp() - (self.retention_hours * 3600)
|
||||
self._snapshots = [
|
||||
s for s in self._snapshots if s.timestamp.timestamp() > cutoff
|
||||
]
|
||||
|
||||
def _snapshot_loop(self) -> None:
|
||||
while not self._shutdown.is_set():
|
||||
self._shutdown.wait(timeout=self.interval_seconds)
|
||||
if not self._shutdown.is_set():
|
||||
self._take_snapshot()
|
||||
|
||||
def _take_snapshot(self) -> None:
|
||||
try:
|
||||
cpu_percent = psutil.cpu_percent(interval=0.1)
|
||||
memory = psutil.virtual_memory()
|
||||
disk = psutil.disk_usage(str(self.storage_root))
|
||||
|
||||
storage_bytes = 0
|
||||
with self._lock:
|
||||
storage = self._storage_ref
|
||||
if storage:
|
||||
try:
|
||||
buckets = storage.list_buckets()
|
||||
for bucket in buckets:
|
||||
stats = storage.bucket_stats(bucket.name, cache_ttl=60)
|
||||
storage_bytes += stats.get("total_bytes", stats.get("bytes", 0))
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to collect bucket stats: {e}")
|
||||
|
||||
snapshot = SystemMetricsSnapshot(
|
||||
timestamp=datetime.now(timezone.utc),
|
||||
cpu_percent=cpu_percent,
|
||||
memory_percent=memory.percent,
|
||||
disk_percent=disk.percent,
|
||||
storage_bytes=storage_bytes,
|
||||
)
|
||||
|
||||
with self._lock:
|
||||
self._snapshots.append(snapshot)
|
||||
self._prune_old_snapshots()
|
||||
self._save_history()
|
||||
|
||||
logger.debug(f"System metrics snapshot taken: CPU={cpu_percent:.1f}%, Memory={memory.percent:.1f}%")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to take system metrics snapshot: {e}")
|
||||
|
||||
def get_current(self) -> Dict[str, Any]:
|
||||
cpu_percent = psutil.cpu_percent(interval=0.1)
|
||||
memory = psutil.virtual_memory()
|
||||
disk = psutil.disk_usage(str(self.storage_root))
|
||||
boot_time = psutil.boot_time()
|
||||
uptime_seconds = time.time() - boot_time
|
||||
uptime_days = int(uptime_seconds / 86400)
|
||||
|
||||
total_buckets = 0
|
||||
total_objects = 0
|
||||
total_bytes_used = 0
|
||||
total_versions = 0
|
||||
|
||||
with self._lock:
|
||||
storage = self._storage_ref
|
||||
if storage:
|
||||
try:
|
||||
buckets = storage.list_buckets()
|
||||
total_buckets = len(buckets)
|
||||
for bucket in buckets:
|
||||
stats = storage.bucket_stats(bucket.name, cache_ttl=60)
|
||||
total_objects += stats.get("total_objects", stats.get("objects", 0))
|
||||
total_bytes_used += stats.get("total_bytes", stats.get("bytes", 0))
|
||||
total_versions += stats.get("version_count", 0)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to collect current bucket stats: {e}")
|
||||
|
||||
return {
|
||||
"cpu_percent": round(cpu_percent, 2),
|
||||
"memory": {
|
||||
"total": memory.total,
|
||||
"available": memory.available,
|
||||
"used": memory.used,
|
||||
"percent": round(memory.percent, 2),
|
||||
},
|
||||
"disk": {
|
||||
"total": disk.total,
|
||||
"free": disk.free,
|
||||
"used": disk.used,
|
||||
"percent": round(disk.percent, 2),
|
||||
},
|
||||
"app": {
|
||||
"buckets": total_buckets,
|
||||
"objects": total_objects,
|
||||
"versions": total_versions,
|
||||
"storage_bytes": total_bytes_used,
|
||||
"uptime_days": uptime_days,
|
||||
},
|
||||
}
|
||||
|
||||
def get_history(self, hours: Optional[int] = None) -> List[Dict[str, Any]]:
|
||||
with self._lock:
|
||||
snapshots = list(self._snapshots)
|
||||
|
||||
if hours:
|
||||
cutoff = datetime.now(timezone.utc).timestamp() - (hours * 3600)
|
||||
snapshots = [s for s in snapshots if s.timestamp.timestamp() > cutoff]
|
||||
|
||||
return [s.to_dict() for s in snapshots]
|
||||
|
||||
def shutdown(self) -> None:
|
||||
self._shutdown.set()
|
||||
self._take_snapshot()
|
||||
self._snapshot_thread.join(timeout=5.0)
|
||||
300
app/ui.py
300
app/ui.py
@@ -5,8 +5,11 @@ import json
|
||||
import uuid
|
||||
import psutil
|
||||
import shutil
|
||||
from datetime import datetime, timezone as dt_timezone
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from urllib.parse import quote, urlparse
|
||||
from zoneinfo import ZoneInfo
|
||||
|
||||
import boto3
|
||||
import requests
|
||||
@@ -33,12 +36,56 @@ from .extensions import limiter, csrf
|
||||
from .iam import IamError
|
||||
from .kms import KMSManager
|
||||
from .replication import ReplicationManager, ReplicationRule
|
||||
from .s3_api import _generate_presigned_url
|
||||
from .secret_store import EphemeralSecretStore
|
||||
from .storage import ObjectStorage, StorageError
|
||||
|
||||
ui_bp = Blueprint("ui", __name__, template_folder="../templates", url_prefix="/ui")
|
||||
|
||||
|
||||
def _convert_to_display_tz(dt: datetime, display_tz: str | None = None) -> datetime:
|
||||
"""Convert a datetime to the configured display timezone.
|
||||
|
||||
Args:
|
||||
dt: The datetime to convert
|
||||
display_tz: Optional timezone string. If not provided, reads from current_app.config.
|
||||
"""
|
||||
if display_tz is None:
|
||||
display_tz = current_app.config.get("DISPLAY_TIMEZONE", "UTC")
|
||||
if display_tz and display_tz != "UTC":
|
||||
try:
|
||||
tz = ZoneInfo(display_tz)
|
||||
if dt.tzinfo is None:
|
||||
dt = dt.replace(tzinfo=dt_timezone.utc)
|
||||
dt = dt.astimezone(tz)
|
||||
except (KeyError, ValueError):
|
||||
pass
|
||||
return dt
|
||||
|
||||
|
||||
def _format_datetime_display(dt: datetime, display_tz: str | None = None) -> str:
|
||||
"""Format a datetime for display using the configured timezone.
|
||||
|
||||
Args:
|
||||
dt: The datetime to format
|
||||
display_tz: Optional timezone string. If not provided, reads from current_app.config.
|
||||
"""
|
||||
dt = _convert_to_display_tz(dt, display_tz)
|
||||
tz_abbr = dt.strftime("%Z") or "UTC"
|
||||
return f"{dt.strftime('%b %d, %Y %H:%M')} ({tz_abbr})"
|
||||
|
||||
|
||||
def _format_datetime_iso(dt: datetime, display_tz: str | None = None) -> str:
|
||||
"""Format a datetime as ISO format using the configured timezone.
|
||||
|
||||
Args:
|
||||
dt: The datetime to format
|
||||
display_tz: Optional timezone string. If not provided, reads from current_app.config.
|
||||
"""
|
||||
dt = _convert_to_display_tz(dt, display_tz)
|
||||
return dt.isoformat()
|
||||
|
||||
|
||||
|
||||
def _storage() -> ObjectStorage:
|
||||
return current_app.extensions["object_storage"]
|
||||
@@ -62,6 +109,20 @@ def _bucket_policies() -> BucketPolicyStore:
|
||||
return store
|
||||
|
||||
|
||||
def _build_policy_context() -> dict[str, Any]:
|
||||
ctx: dict[str, Any] = {}
|
||||
if request.headers.get("Referer"):
|
||||
ctx["aws:Referer"] = request.headers.get("Referer")
|
||||
if request.access_route:
|
||||
ctx["aws:SourceIp"] = request.access_route[0]
|
||||
elif request.remote_addr:
|
||||
ctx["aws:SourceIp"] = request.remote_addr
|
||||
ctx["aws:SecureTransport"] = str(request.is_secure).lower()
|
||||
if request.headers.get("User-Agent"):
|
||||
ctx["aws:UserAgent"] = request.headers.get("User-Agent")
|
||||
return ctx
|
||||
|
||||
|
||||
def _connections() -> ConnectionStore:
|
||||
return current_app.extensions["connections"]
|
||||
|
||||
@@ -80,6 +141,10 @@ def _acl() -> AclService:
|
||||
return current_app.extensions["acl"]
|
||||
|
||||
|
||||
def _operation_metrics():
|
||||
return current_app.extensions.get("operation_metrics")
|
||||
|
||||
|
||||
def _format_bytes(num: int) -> str:
|
||||
step = 1024
|
||||
units = ["B", "KB", "MB", "GB", "TB", "PB"]
|
||||
@@ -172,7 +237,8 @@ def _authorize_ui(principal, bucket_name: str | None, action: str, *, object_key
|
||||
enforce_bucket_policies = current_app.config.get("UI_ENFORCE_BUCKET_POLICIES", True)
|
||||
if bucket_name and enforce_bucket_policies:
|
||||
access_key = principal.access_key if principal else None
|
||||
decision = _bucket_policies().evaluate(access_key, bucket_name, object_key, action)
|
||||
policy_context = _build_policy_context()
|
||||
decision = _bucket_policies().evaluate(access_key, bucket_name, object_key, action, policy_context)
|
||||
if decision == "deny":
|
||||
raise IamError("Access denied by bucket policy")
|
||||
if not iam_allowed and decision != "allow":
|
||||
@@ -350,6 +416,23 @@ def bucket_detail(bucket_name: str):
|
||||
can_edit_policy = True
|
||||
except IamError:
|
||||
can_edit_policy = False
|
||||
|
||||
can_manage_lifecycle = False
|
||||
if principal:
|
||||
try:
|
||||
_iam().authorize(principal, bucket_name, "lifecycle")
|
||||
can_manage_lifecycle = True
|
||||
except IamError:
|
||||
can_manage_lifecycle = False
|
||||
|
||||
can_manage_cors = False
|
||||
if principal:
|
||||
try:
|
||||
_iam().authorize(principal, bucket_name, "cors")
|
||||
can_manage_cors = True
|
||||
except IamError:
|
||||
can_manage_cors = False
|
||||
|
||||
try:
|
||||
versioning_enabled = storage.is_versioning_enabled(bucket_name)
|
||||
except StorageError:
|
||||
@@ -387,6 +470,7 @@ def bucket_detail(bucket_name: str):
|
||||
kms_enabled = current_app.config.get("KMS_ENABLED", False)
|
||||
encryption_enabled = current_app.config.get("ENCRYPTION_ENABLED", False)
|
||||
lifecycle_enabled = current_app.config.get("LIFECYCLE_ENABLED", False)
|
||||
site_sync_enabled = current_app.config.get("SITE_SYNC_ENABLED", False)
|
||||
can_manage_encryption = can_manage_versioning
|
||||
|
||||
bucket_quota = storage.get_bucket_quota(bucket_name)
|
||||
@@ -421,6 +505,8 @@ def bucket_detail(bucket_name: str):
|
||||
bucket_policy_text=policy_text,
|
||||
bucket_policy=bucket_policy,
|
||||
can_edit_policy=can_edit_policy,
|
||||
can_manage_lifecycle=can_manage_lifecycle,
|
||||
can_manage_cors=can_manage_cors,
|
||||
can_manage_versioning=can_manage_versioning,
|
||||
can_manage_replication=can_manage_replication,
|
||||
can_manage_encryption=can_manage_encryption,
|
||||
@@ -437,6 +523,7 @@ def bucket_detail(bucket_name: str):
|
||||
bucket_quota=bucket_quota,
|
||||
bucket_stats=bucket_stats,
|
||||
can_manage_quota=can_manage_quota,
|
||||
site_sync_enabled=site_sync_enabled,
|
||||
)
|
||||
|
||||
|
||||
@@ -477,6 +564,7 @@ def list_bucket_objects(bucket_name: str):
|
||||
tags_template = url_for("ui.object_tags", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||
copy_template = url_for("ui.copy_object", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||
move_template = url_for("ui.move_object", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||
metadata_template = url_for("ui.object_metadata", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||
|
||||
objects_data = []
|
||||
for obj in result.objects:
|
||||
@@ -484,7 +572,8 @@ def list_bucket_objects(bucket_name: str):
|
||||
"key": obj.key,
|
||||
"size": obj.size,
|
||||
"last_modified": obj.last_modified.isoformat(),
|
||||
"last_modified_display": obj.last_modified.strftime("%b %d, %Y %H:%M"),
|
||||
"last_modified_display": _format_datetime_display(obj.last_modified),
|
||||
"last_modified_iso": _format_datetime_iso(obj.last_modified),
|
||||
"etag": obj.etag,
|
||||
})
|
||||
|
||||
@@ -504,6 +593,7 @@ def list_bucket_objects(bucket_name: str):
|
||||
"tags": tags_template,
|
||||
"copy": copy_template,
|
||||
"move": move_template,
|
||||
"metadata": metadata_template,
|
||||
},
|
||||
})
|
||||
|
||||
@@ -537,6 +627,8 @@ def stream_bucket_objects(bucket_name: str):
|
||||
tags_template = url_for("ui.object_tags", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||
copy_template = url_for("ui.copy_object", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||
move_template = url_for("ui.move_object", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||
metadata_template = url_for("ui.object_metadata", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||
display_tz = current_app.config.get("DISPLAY_TIMEZONE", "UTC")
|
||||
|
||||
def generate():
|
||||
meta_line = json.dumps({
|
||||
@@ -552,6 +644,7 @@ def stream_bucket_objects(bucket_name: str):
|
||||
"tags": tags_template,
|
||||
"copy": copy_template,
|
||||
"move": move_template,
|
||||
"metadata": metadata_template,
|
||||
},
|
||||
}) + "\n"
|
||||
yield meta_line
|
||||
@@ -582,7 +675,8 @@ def stream_bucket_objects(bucket_name: str):
|
||||
"key": obj.key,
|
||||
"size": obj.size,
|
||||
"last_modified": obj.last_modified.isoformat(),
|
||||
"last_modified_display": obj.last_modified.strftime("%b %d, %Y %H:%M"),
|
||||
"last_modified_display": _format_datetime_display(obj.last_modified, display_tz),
|
||||
"last_modified_iso": _format_datetime_iso(obj.last_modified, display_tz),
|
||||
"etag": obj.etag,
|
||||
}) + "\n"
|
||||
|
||||
@@ -985,42 +1079,57 @@ def object_presign(bucket_name: str, object_key: str):
|
||||
principal = _current_principal()
|
||||
payload = request.get_json(silent=True) or {}
|
||||
method = str(payload.get("method", "GET")).upper()
|
||||
allowed_methods = {"GET", "PUT", "DELETE"}
|
||||
if method not in allowed_methods:
|
||||
return jsonify({"error": "Method must be GET, PUT, or DELETE"}), 400
|
||||
action = "read" if method == "GET" else ("delete" if method == "DELETE" else "write")
|
||||
try:
|
||||
_authorize_ui(principal, bucket_name, action, object_key=object_key)
|
||||
except IamError as exc:
|
||||
return jsonify({"error": str(exc)}), 403
|
||||
|
||||
try:
|
||||
expires = int(payload.get("expires_in", 900))
|
||||
except (TypeError, ValueError):
|
||||
return jsonify({"error": "expires_in must be an integer"}), 400
|
||||
expires = max(1, min(expires, 7 * 24 * 3600))
|
||||
storage = _storage()
|
||||
if not storage.bucket_exists(bucket_name):
|
||||
return jsonify({"error": "Bucket does not exist"}), 404
|
||||
if action != "write":
|
||||
try:
|
||||
storage.get_object_path(bucket_name, object_key)
|
||||
except StorageError:
|
||||
return jsonify({"error": "Object not found"}), 404
|
||||
secret = _iam().secret_for_key(principal.access_key)
|
||||
api_base = current_app.config.get("API_BASE_URL") or "http://127.0.0.1:5000"
|
||||
api_base = api_base.rstrip("/")
|
||||
encoded_key = quote(object_key, safe="/")
|
||||
url = f"{api_base}/presign/{bucket_name}/{encoded_key}"
|
||||
|
||||
parsed_api = urlparse(api_base)
|
||||
headers = _api_headers()
|
||||
headers["X-Forwarded-Host"] = parsed_api.netloc or "127.0.0.1:5000"
|
||||
headers["X-Forwarded-Proto"] = parsed_api.scheme or "http"
|
||||
headers["X-Forwarded-For"] = request.remote_addr or "127.0.0.1"
|
||||
|
||||
url = _generate_presigned_url(
|
||||
principal=principal,
|
||||
secret_key=secret,
|
||||
method=method,
|
||||
bucket_name=bucket_name,
|
||||
object_key=object_key,
|
||||
expires_in=expires,
|
||||
api_base_url=api_base,
|
||||
)
|
||||
current_app.logger.info(
|
||||
"Presigned URL generated",
|
||||
extra={"bucket": bucket_name, "key": object_key, "method": method},
|
||||
)
|
||||
return jsonify({"url": url, "method": method, "expires_in": expires})
|
||||
|
||||
|
||||
@ui_bp.get("/buckets/<bucket_name>/objects/<path:object_key>/metadata")
|
||||
def object_metadata(bucket_name: str, object_key: str):
|
||||
principal = _current_principal()
|
||||
storage = _storage()
|
||||
try:
|
||||
response = requests.post(url, headers=headers, json=payload, timeout=5)
|
||||
except requests.RequestException as exc:
|
||||
return jsonify({"error": f"API unavailable: {exc}"}), 502
|
||||
try:
|
||||
body = response.json()
|
||||
except ValueError:
|
||||
text = response.text or ""
|
||||
if text.strip().startswith("<"):
|
||||
import xml.etree.ElementTree as ET
|
||||
try:
|
||||
root = ET.fromstring(text)
|
||||
message = root.findtext(".//Message") or root.findtext(".//Code") or "Unknown S3 error"
|
||||
body = {"error": message}
|
||||
except ET.ParseError:
|
||||
body = {"error": text or "API returned an empty response"}
|
||||
else:
|
||||
body = {"error": text or "API returned an empty response"}
|
||||
return jsonify(body), response.status_code
|
||||
_authorize_ui(principal, bucket_name, "read", object_key=object_key)
|
||||
metadata = storage.get_object_metadata(bucket_name, object_key)
|
||||
return jsonify({"metadata": metadata})
|
||||
except IamError as exc:
|
||||
return jsonify({"error": str(exc)}), 403
|
||||
except StorageError as exc:
|
||||
return jsonify({"error": str(exc)}), 404
|
||||
|
||||
|
||||
@ui_bp.get("/buckets/<bucket_name>/objects/<path:object_key>/versions")
|
||||
@@ -2007,18 +2116,18 @@ def metrics_dashboard():
|
||||
return render_template(
|
||||
"metrics.html",
|
||||
principal=principal,
|
||||
cpu_percent=cpu_percent,
|
||||
cpu_percent=round(cpu_percent, 2),
|
||||
memory={
|
||||
"total": _format_bytes(memory.total),
|
||||
"available": _format_bytes(memory.available),
|
||||
"used": _format_bytes(memory.used),
|
||||
"percent": memory.percent,
|
||||
"percent": round(memory.percent, 2),
|
||||
},
|
||||
disk={
|
||||
"total": _format_bytes(disk.total),
|
||||
"free": _format_bytes(disk.free),
|
||||
"used": _format_bytes(disk.used),
|
||||
"percent": disk.percent,
|
||||
"percent": round(disk.percent, 2),
|
||||
},
|
||||
app={
|
||||
"buckets": total_buckets,
|
||||
@@ -2028,7 +2137,9 @@ def metrics_dashboard():
|
||||
"storage_raw": total_bytes_used,
|
||||
"version": APP_VERSION,
|
||||
"uptime_days": uptime_days,
|
||||
}
|
||||
},
|
||||
metrics_history_enabled=current_app.config.get("METRICS_HISTORY_ENABLED", False),
|
||||
operation_metrics_enabled=current_app.config.get("OPERATION_METRICS_ENABLED", False),
|
||||
)
|
||||
|
||||
|
||||
@@ -2069,18 +2180,18 @@ def metrics_api():
|
||||
uptime_days = int(uptime_seconds / 86400)
|
||||
|
||||
return jsonify({
|
||||
"cpu_percent": cpu_percent,
|
||||
"cpu_percent": round(cpu_percent, 2),
|
||||
"memory": {
|
||||
"total": _format_bytes(memory.total),
|
||||
"available": _format_bytes(memory.available),
|
||||
"used": _format_bytes(memory.used),
|
||||
"percent": memory.percent,
|
||||
"percent": round(memory.percent, 2),
|
||||
},
|
||||
"disk": {
|
||||
"total": _format_bytes(disk.total),
|
||||
"free": _format_bytes(disk.free),
|
||||
"used": _format_bytes(disk.used),
|
||||
"percent": disk.percent,
|
||||
"percent": round(disk.percent, 2),
|
||||
},
|
||||
"app": {
|
||||
"buckets": total_buckets,
|
||||
@@ -2093,11 +2204,116 @@ def metrics_api():
|
||||
})
|
||||
|
||||
|
||||
@ui_bp.route("/metrics/history")
|
||||
def metrics_history():
|
||||
principal = _current_principal()
|
||||
|
||||
try:
|
||||
_iam().authorize(principal, None, "iam:list_users")
|
||||
except IamError:
|
||||
return jsonify({"error": "Access denied"}), 403
|
||||
|
||||
system_metrics = current_app.extensions.get("system_metrics")
|
||||
if not system_metrics:
|
||||
return jsonify({"enabled": False, "history": []})
|
||||
|
||||
hours = request.args.get("hours", type=int)
|
||||
if hours is None:
|
||||
hours = current_app.config.get("METRICS_HISTORY_RETENTION_HOURS", 24)
|
||||
|
||||
history = system_metrics.get_history(hours=hours)
|
||||
|
||||
return jsonify({
|
||||
"enabled": True,
|
||||
"retention_hours": current_app.config.get("METRICS_HISTORY_RETENTION_HOURS", 24),
|
||||
"interval_minutes": current_app.config.get("METRICS_HISTORY_INTERVAL_MINUTES", 5),
|
||||
"history": history,
|
||||
})
|
||||
|
||||
|
||||
@ui_bp.route("/metrics/settings", methods=["GET", "PUT"])
|
||||
def metrics_settings():
|
||||
principal = _current_principal()
|
||||
|
||||
try:
|
||||
_iam().authorize(principal, None, "iam:list_users")
|
||||
except IamError:
|
||||
return jsonify({"error": "Access denied"}), 403
|
||||
|
||||
if request.method == "GET":
|
||||
return jsonify({
|
||||
"enabled": current_app.config.get("METRICS_HISTORY_ENABLED", False),
|
||||
"retention_hours": current_app.config.get("METRICS_HISTORY_RETENTION_HOURS", 24),
|
||||
"interval_minutes": current_app.config.get("METRICS_HISTORY_INTERVAL_MINUTES", 5),
|
||||
})
|
||||
|
||||
data = request.get_json() or {}
|
||||
|
||||
if "enabled" in data:
|
||||
current_app.config["METRICS_HISTORY_ENABLED"] = bool(data["enabled"])
|
||||
if "retention_hours" in data:
|
||||
current_app.config["METRICS_HISTORY_RETENTION_HOURS"] = max(1, int(data["retention_hours"]))
|
||||
if "interval_minutes" in data:
|
||||
current_app.config["METRICS_HISTORY_INTERVAL_MINUTES"] = max(1, int(data["interval_minutes"]))
|
||||
|
||||
return jsonify({
|
||||
"enabled": current_app.config.get("METRICS_HISTORY_ENABLED", False),
|
||||
"retention_hours": current_app.config.get("METRICS_HISTORY_RETENTION_HOURS", 24),
|
||||
"interval_minutes": current_app.config.get("METRICS_HISTORY_INTERVAL_MINUTES", 5),
|
||||
})
|
||||
|
||||
|
||||
@ui_bp.get("/metrics/operations")
|
||||
def metrics_operations():
|
||||
principal = _current_principal()
|
||||
|
||||
try:
|
||||
_iam().authorize(principal, None, "iam:list_users")
|
||||
except IamError:
|
||||
return jsonify({"error": "Access denied"}), 403
|
||||
|
||||
collector = _operation_metrics()
|
||||
if not collector:
|
||||
return jsonify({
|
||||
"enabled": False,
|
||||
"stats": None,
|
||||
})
|
||||
|
||||
return jsonify({
|
||||
"enabled": True,
|
||||
"stats": collector.get_current_stats(),
|
||||
})
|
||||
|
||||
|
||||
@ui_bp.get("/metrics/operations/history")
|
||||
def metrics_operations_history():
|
||||
principal = _current_principal()
|
||||
|
||||
try:
|
||||
_iam().authorize(principal, None, "iam:list_users")
|
||||
except IamError:
|
||||
return jsonify({"error": "Access denied"}), 403
|
||||
|
||||
collector = _operation_metrics()
|
||||
if not collector:
|
||||
return jsonify({
|
||||
"enabled": False,
|
||||
"history": [],
|
||||
})
|
||||
|
||||
hours = request.args.get("hours", type=int)
|
||||
return jsonify({
|
||||
"enabled": True,
|
||||
"history": collector.get_history(hours),
|
||||
"interval_minutes": current_app.config.get("OPERATION_METRICS_INTERVAL_MINUTES", 5),
|
||||
})
|
||||
|
||||
|
||||
@ui_bp.route("/buckets/<bucket_name>/lifecycle", methods=["GET", "POST", "DELETE"])
|
||||
def bucket_lifecycle(bucket_name: str):
|
||||
principal = _current_principal()
|
||||
try:
|
||||
_authorize_ui(principal, bucket_name, "policy")
|
||||
_authorize_ui(principal, bucket_name, "lifecycle")
|
||||
except IamError as exc:
|
||||
return jsonify({"error": str(exc)}), 403
|
||||
|
||||
@@ -2150,7 +2366,7 @@ def bucket_lifecycle(bucket_name: str):
|
||||
def get_lifecycle_history(bucket_name: str):
|
||||
principal = _current_principal()
|
||||
try:
|
||||
_authorize_ui(principal, bucket_name, "policy")
|
||||
_authorize_ui(principal, bucket_name, "lifecycle")
|
||||
except IamError:
|
||||
return jsonify({"error": "Access denied"}), 403
|
||||
|
||||
@@ -2181,7 +2397,7 @@ def get_lifecycle_history(bucket_name: str):
|
||||
def bucket_cors(bucket_name: str):
|
||||
principal = _current_principal()
|
||||
try:
|
||||
_authorize_ui(principal, bucket_name, "policy")
|
||||
_authorize_ui(principal, bucket_name, "cors")
|
||||
except IamError as exc:
|
||||
return jsonify({"error": str(exc)}), 403
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
APP_VERSION = "0.2.1"
|
||||
APP_VERSION = "0.2.3"
|
||||
|
||||
|
||||
def get_version() -> str:
|
||||
|
||||
474
docs.md
474
docs.md
@@ -122,7 +122,7 @@ With these volumes attached you can rebuild/restart the container without losing
|
||||
|
||||
### Versioning
|
||||
|
||||
The repo now tracks a human-friendly release string inside `app/version.py` (see the `APP_VERSION` constant). Edit that value whenever you cut a release. The constant flows into Flask as `APP_VERSION` and is exposed via `GET /healthz`, so you can monitor deployments or surface it in UIs.
|
||||
The repo now tracks a human-friendly release string inside `app/version.py` (see the `APP_VERSION` constant). Edit that value whenever you cut a release. The constant flows into Flask as `APP_VERSION` and is exposed via `GET /myfsio/health`, so you can monitor deployments or surface it in UIs.
|
||||
|
||||
## 3. Configuration Reference
|
||||
|
||||
@@ -168,6 +168,15 @@ All configuration is done via environment variables. The table below lists every
|
||||
| `RATE_LIMIT_DEFAULT` | `200 per minute` | Default rate limit for API endpoints. |
|
||||
| `RATE_LIMIT_STORAGE_URI` | `memory://` | Storage backend for rate limits. Use `redis://host:port` for distributed setups. |
|
||||
|
||||
### Server Configuration
|
||||
|
||||
| Variable | Default | Notes |
|
||||
| --- | --- | --- |
|
||||
| `SERVER_THREADS` | `4` | Waitress worker threads (1-64). More threads handle more concurrent requests but use more memory. |
|
||||
| `SERVER_CONNECTION_LIMIT` | `100` | Maximum concurrent connections (10-1000). Ensure OS file descriptor limits support this value. |
|
||||
| `SERVER_BACKLOG` | `1024` | TCP listen backlog (64-4096). Connections queue here when all threads are busy. |
|
||||
| `SERVER_CHANNEL_TIMEOUT` | `120` | Seconds before idle connections are closed (10-300). |
|
||||
|
||||
### Logging
|
||||
|
||||
| Variable | Default | Notes |
|
||||
@@ -277,14 +286,14 @@ The application automatically trusts these headers to generate correct presigned
|
||||
### Version Checking
|
||||
|
||||
The application version is tracked in `app/version.py` and exposed via:
|
||||
- **Health endpoint:** `GET /healthz` returns JSON with `version` field
|
||||
- **Health endpoint:** `GET /myfsio/health` returns JSON with `version` field
|
||||
- **Metrics dashboard:** Navigate to `/ui/metrics` to see the running version in the System Status card
|
||||
|
||||
To check your current version:
|
||||
|
||||
```bash
|
||||
# API health endpoint
|
||||
curl http://localhost:5000/healthz
|
||||
curl http://localhost:5000/myfsio/health
|
||||
|
||||
# Or inspect version.py directly
|
||||
cat app/version.py | grep APP_VERSION
|
||||
@@ -377,7 +386,7 @@ docker run -d \
|
||||
myfsio:latest
|
||||
|
||||
# 5. Verify health
|
||||
curl http://localhost:5000/healthz
|
||||
curl http://localhost:5000/myfsio/health
|
||||
```
|
||||
|
||||
### Version Compatibility Checks
|
||||
@@ -502,7 +511,7 @@ docker run -d \
|
||||
myfsio:0.1.3 # specify previous version tag
|
||||
|
||||
# 3. Verify
|
||||
curl http://localhost:5000/healthz
|
||||
curl http://localhost:5000/myfsio/health
|
||||
```
|
||||
|
||||
#### Emergency Config Restore
|
||||
@@ -528,7 +537,7 @@ For production environments requiring zero downtime:
|
||||
APP_PORT=5001 UI_PORT=5101 python run.py &
|
||||
|
||||
# 2. Health check new instance
|
||||
curl http://localhost:5001/healthz
|
||||
curl http://localhost:5001/myfsio/health
|
||||
|
||||
# 3. Update load balancer to route to new ports
|
||||
|
||||
@@ -544,7 +553,7 @@ After any update, verify functionality:
|
||||
|
||||
```bash
|
||||
# 1. Health check
|
||||
curl http://localhost:5000/healthz
|
||||
curl http://localhost:5000/myfsio/health
|
||||
|
||||
# 2. Login to UI
|
||||
open http://localhost:5100/ui
|
||||
@@ -588,7 +597,7 @@ APP_PID=$!
|
||||
|
||||
# Wait and health check
|
||||
sleep 5
|
||||
if curl -f http://localhost:5000/healthz; then
|
||||
if curl -f http://localhost:5000/myfsio/health; then
|
||||
echo "Update successful!"
|
||||
else
|
||||
echo "Health check failed, rolling back..."
|
||||
@@ -602,6 +611,10 @@ fi
|
||||
|
||||
## 4. Authentication & IAM
|
||||
|
||||
MyFSIO implements a comprehensive Identity and Access Management (IAM) system that controls who can access your buckets and what operations they can perform. The system supports both simple action-based permissions and AWS-compatible policy syntax.
|
||||
|
||||
### Getting Started
|
||||
|
||||
1. On first boot, `data/.myfsio.sys/config/iam.json` is seeded with `localadmin / localadmin` that has wildcard access.
|
||||
2. Sign into the UI using those credentials, then open **IAM**:
|
||||
- **Create user**: supply a display name and optional JSON inline policy array.
|
||||
@@ -609,48 +622,241 @@ fi
|
||||
- **Policy editor**: select a user, paste an array of objects (`{"bucket": "*", "actions": ["list", "read"]}`), and submit. Alias support includes AWS-style verbs (e.g., `s3:GetObject`).
|
||||
3. Wildcard action `iam:*` is supported for admin user definitions.
|
||||
|
||||
The API expects every request to include `X-Access-Key` and `X-Secret-Key` headers. The UI persists them in the Flask session after login.
|
||||
### Authentication
|
||||
|
||||
The API expects every request to include authentication headers. The UI persists them in the Flask session after login.
|
||||
|
||||
| Header | Description |
|
||||
| --- | --- |
|
||||
| `X-Access-Key` | The user's access key identifier |
|
||||
| `X-Secret-Key` | The user's secret key for signing |
|
||||
|
||||
**Security Features:**
|
||||
- **Lockout Protection**: After `AUTH_MAX_ATTEMPTS` (default: 5) failed login attempts, the account is locked for `AUTH_LOCKOUT_MINUTES` (default: 15 minutes).
|
||||
- **Session Management**: UI sessions remain valid for `SESSION_LIFETIME_DAYS` (default: 30 days).
|
||||
- **Hot Reload**: IAM configuration changes take effect immediately without restart.
|
||||
|
||||
### Permission Model
|
||||
|
||||
MyFSIO uses a two-layer permission model:
|
||||
|
||||
1. **IAM User Policies** – Define what a user can do across the system (stored in `iam.json`)
|
||||
2. **Bucket Policies** – Define who can access a specific bucket (stored in `bucket_policies.json`)
|
||||
|
||||
Both layers are evaluated for each request. A user must have permission in their IAM policy AND the bucket policy must allow the action (or have no explicit deny).
|
||||
|
||||
### Available IAM Actions
|
||||
|
||||
#### S3 Actions (Bucket/Object Operations)
|
||||
|
||||
| Action | Description | AWS Aliases |
|
||||
| --- | --- | --- |
|
||||
| `list` | List buckets and objects | `s3:ListBucket`, `s3:ListAllMyBuckets`, `s3:ListBucketVersions`, `s3:ListMultipartUploads`, `s3:ListParts` |
|
||||
| `read` | Download objects | `s3:GetObject`, `s3:GetObjectVersion`, `s3:GetObjectTagging`, `s3:HeadObject`, `s3:HeadBucket` |
|
||||
| `write` | Upload objects, create buckets | `s3:PutObject`, `s3:CreateBucket`, `s3:CreateMultipartUpload`, `s3:UploadPart`, `s3:CompleteMultipartUpload`, `s3:AbortMultipartUpload`, `s3:CopyObject` |
|
||||
| `delete` | Remove objects and buckets | `s3:DeleteObject`, `s3:DeleteObjectVersion`, `s3:DeleteBucket` |
|
||||
| `share` | Manage ACLs | `s3:PutObjectAcl`, `s3:PutBucketAcl`, `s3:GetBucketAcl` |
|
||||
| `read` | Download objects, get metadata | `s3:GetObject`, `s3:GetObjectVersion`, `s3:GetObjectTagging`, `s3:GetObjectVersionTagging`, `s3:GetObjectAcl`, `s3:GetBucketVersioning`, `s3:HeadObject`, `s3:HeadBucket` |
|
||||
| `write` | Upload objects, create buckets, manage tags | `s3:PutObject`, `s3:CreateBucket`, `s3:PutObjectTagging`, `s3:PutBucketVersioning`, `s3:CreateMultipartUpload`, `s3:UploadPart`, `s3:CompleteMultipartUpload`, `s3:AbortMultipartUpload`, `s3:CopyObject` |
|
||||
| `delete` | Remove objects, versions, and buckets | `s3:DeleteObject`, `s3:DeleteObjectVersion`, `s3:DeleteBucket`, `s3:DeleteObjectTagging` |
|
||||
| `share` | Manage Access Control Lists (ACLs) | `s3:PutObjectAcl`, `s3:PutBucketAcl`, `s3:GetBucketAcl` |
|
||||
| `policy` | Manage bucket policies | `s3:PutBucketPolicy`, `s3:GetBucketPolicy`, `s3:DeleteBucketPolicy` |
|
||||
| `replication` | Configure and manage replication | `s3:GetReplicationConfiguration`, `s3:PutReplicationConfiguration`, `s3:ReplicateObject`, `s3:ReplicateTags`, `s3:ReplicateDelete` |
|
||||
| `iam:list_users` | View IAM users | `iam:ListUsers` |
|
||||
| `iam:create_user` | Create IAM users | `iam:CreateUser` |
|
||||
| `lifecycle` | Manage lifecycle rules | `s3:GetLifecycleConfiguration`, `s3:PutLifecycleConfiguration`, `s3:DeleteLifecycleConfiguration`, `s3:GetBucketLifecycle`, `s3:PutBucketLifecycle` |
|
||||
| `cors` | Manage CORS configuration | `s3:GetBucketCors`, `s3:PutBucketCors`, `s3:DeleteBucketCors` |
|
||||
| `replication` | Configure and manage replication | `s3:GetReplicationConfiguration`, `s3:PutReplicationConfiguration`, `s3:DeleteReplicationConfiguration`, `s3:ReplicateObject`, `s3:ReplicateTags`, `s3:ReplicateDelete` |
|
||||
|
||||
#### IAM Actions (User Management)
|
||||
|
||||
| Action | Description | AWS Aliases |
|
||||
| --- | --- | --- |
|
||||
| `iam:list_users` | View all IAM users and their policies | `iam:ListUsers` |
|
||||
| `iam:create_user` | Create new IAM users | `iam:CreateUser` |
|
||||
| `iam:delete_user` | Delete IAM users | `iam:DeleteUser` |
|
||||
| `iam:rotate_key` | Rotate user secrets | `iam:RotateAccessKey` |
|
||||
| `iam:rotate_key` | Rotate user secret keys | `iam:RotateAccessKey` |
|
||||
| `iam:update_policy` | Modify user policies | `iam:PutUserPolicy` |
|
||||
| `iam:*` | All IAM actions (admin wildcard) | — |
|
||||
| `iam:*` | **Admin wildcard** – grants all IAM actions | — |
|
||||
|
||||
### Example Policies
|
||||
#### Wildcards
|
||||
|
||||
| Wildcard | Scope | Description |
|
||||
| --- | --- | --- |
|
||||
| `*` (in actions) | All S3 actions | Grants `list`, `read`, `write`, `delete`, `share`, `policy`, `lifecycle`, `cors`, `replication` |
|
||||
| `iam:*` | All IAM actions | Grants all `iam:*` actions for user management |
|
||||
| `*` (in bucket) | All buckets | Policy applies to every bucket |
|
||||
|
||||
### IAM Policy Structure
|
||||
|
||||
User policies are stored as a JSON array of policy objects. Each object specifies a bucket and the allowed actions:
|
||||
|
||||
**Full Control (admin):**
|
||||
```json
|
||||
[{"bucket": "*", "actions": ["list", "read", "write", "delete", "share", "policy", "replication", "iam:*"]}]
|
||||
[
|
||||
{
|
||||
"bucket": "<bucket-name-or-wildcard>",
|
||||
"actions": ["<action1>", "<action2>", ...]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
**Read-Only:**
|
||||
**Fields:**
|
||||
- `bucket`: The bucket name (case-insensitive) or `*` for all buckets
|
||||
- `actions`: Array of action strings (simple names or AWS aliases)
|
||||
|
||||
### Example User Policies
|
||||
|
||||
**Full Administrator (complete system access):**
|
||||
```json
|
||||
[{"bucket": "*", "actions": ["list", "read", "write", "delete", "share", "policy", "lifecycle", "cors", "replication", "iam:*"]}]
|
||||
```
|
||||
|
||||
**Read-Only User (browse and download only):**
|
||||
```json
|
||||
[{"bucket": "*", "actions": ["list", "read"]}]
|
||||
```
|
||||
|
||||
**Single Bucket Access (no listing other buckets):**
|
||||
**Single Bucket Full Access (no access to other buckets):**
|
||||
```json
|
||||
[{"bucket": "user-bucket", "actions": ["read", "write", "delete"]}]
|
||||
[{"bucket": "user-bucket", "actions": ["list", "read", "write", "delete"]}]
|
||||
```
|
||||
|
||||
**Bucket Access with Replication:**
|
||||
**Multiple Bucket Access (different permissions per bucket):**
|
||||
```json
|
||||
[{"bucket": "my-bucket", "actions": ["list", "read", "write", "delete", "replication"]}]
|
||||
[
|
||||
{"bucket": "public-data", "actions": ["list", "read"]},
|
||||
{"bucket": "my-uploads", "actions": ["list", "read", "write", "delete"]},
|
||||
{"bucket": "team-shared", "actions": ["list", "read", "write"]}
|
||||
]
|
||||
```
|
||||
|
||||
**IAM Manager (manage users but no data access):**
|
||||
```json
|
||||
[{"bucket": "*", "actions": ["iam:list_users", "iam:create_user", "iam:delete_user", "iam:rotate_key", "iam:update_policy"]}]
|
||||
```
|
||||
|
||||
**Replication Operator (manage replication only):**
|
||||
```json
|
||||
[{"bucket": "*", "actions": ["list", "read", "replication"]}]
|
||||
```
|
||||
|
||||
**Lifecycle Manager (configure object expiration):**
|
||||
```json
|
||||
[{"bucket": "*", "actions": ["list", "lifecycle"]}]
|
||||
```
|
||||
|
||||
**CORS Administrator (configure cross-origin access):**
|
||||
```json
|
||||
[{"bucket": "*", "actions": ["cors"]}]
|
||||
```
|
||||
|
||||
**Bucket Administrator (full bucket config, no IAM access):**
|
||||
```json
|
||||
[{"bucket": "my-bucket", "actions": ["list", "read", "write", "delete", "policy", "lifecycle", "cors"]}]
|
||||
```
|
||||
|
||||
**Upload-Only User (write but cannot read back):**
|
||||
```json
|
||||
[{"bucket": "drop-box", "actions": ["write"]}]
|
||||
```
|
||||
|
||||
**Backup Operator (read, list, and replicate):**
|
||||
```json
|
||||
[{"bucket": "*", "actions": ["list", "read", "replication"]}]
|
||||
```
|
||||
|
||||
### Using AWS-Style Action Names
|
||||
|
||||
You can use AWS S3 action names instead of simple names. They are automatically normalized:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"bucket": "my-bucket",
|
||||
"actions": [
|
||||
"s3:ListBucket",
|
||||
"s3:GetObject",
|
||||
"s3:PutObject",
|
||||
"s3:DeleteObject"
|
||||
]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
This is equivalent to:
|
||||
```json
|
||||
[{"bucket": "my-bucket", "actions": ["list", "read", "write", "delete"]}]
|
||||
```
|
||||
|
||||
### Managing Users via API
|
||||
|
||||
```bash
|
||||
# List all users (requires iam:list_users)
|
||||
curl http://localhost:5000/iam/users \
|
||||
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
|
||||
|
||||
# Create a new user (requires iam:create_user)
|
||||
curl -X POST http://localhost:5000/iam/users \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||
-d '{
|
||||
"display_name": "New User",
|
||||
"policies": [{"bucket": "*", "actions": ["list", "read"]}]
|
||||
}'
|
||||
|
||||
# Rotate user secret (requires iam:rotate_key)
|
||||
curl -X POST http://localhost:5000/iam/users/<access-key>/rotate \
|
||||
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
|
||||
|
||||
# Update user policies (requires iam:update_policy)
|
||||
curl -X PUT http://localhost:5000/iam/users/<access-key>/policies \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||
-d '[{"bucket": "*", "actions": ["list", "read", "write"]}]'
|
||||
|
||||
# Delete a user (requires iam:delete_user)
|
||||
curl -X DELETE http://localhost:5000/iam/users/<access-key> \
|
||||
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
|
||||
```
|
||||
|
||||
### Permission Precedence
|
||||
|
||||
When a request is made, permissions are evaluated in this order:
|
||||
|
||||
1. **Authentication** – Verify the access key and secret key are valid
|
||||
2. **Lockout Check** – Ensure the account is not locked due to failed attempts
|
||||
3. **IAM Policy Check** – Verify the user has the required action for the target bucket
|
||||
4. **Bucket Policy Check** – If a bucket policy exists, verify it allows the action
|
||||
|
||||
A request is allowed only if:
|
||||
- The IAM policy grants the action, AND
|
||||
- The bucket policy allows the action (or no bucket policy exists)
|
||||
|
||||
### Common Permission Scenarios
|
||||
|
||||
| Scenario | Required Actions |
|
||||
| --- | --- |
|
||||
| Browse bucket contents | `list` |
|
||||
| Download a file | `read` |
|
||||
| Upload a file | `write` |
|
||||
| Delete a file | `delete` |
|
||||
| Generate presigned URL (GET) | `read` |
|
||||
| Generate presigned URL (PUT) | `write` |
|
||||
| Generate presigned URL (DELETE) | `delete` |
|
||||
| Enable versioning | `write` (includes `s3:PutBucketVersioning`) |
|
||||
| View bucket policy | `policy` |
|
||||
| Modify bucket policy | `policy` |
|
||||
| Configure lifecycle rules | `lifecycle` |
|
||||
| View lifecycle rules | `lifecycle` |
|
||||
| Configure CORS | `cors` |
|
||||
| View CORS rules | `cors` |
|
||||
| Configure replication | `replication` (admin-only for creation) |
|
||||
| Pause/resume replication | `replication` |
|
||||
| Manage other users | `iam:*` or specific `iam:` actions |
|
||||
| Set bucket quotas | `iam:*` or `iam:list_users` (admin feature) |
|
||||
|
||||
### Security Best Practices
|
||||
|
||||
1. **Principle of Least Privilege** – Grant only the permissions users need
|
||||
2. **Avoid Wildcards** – Use specific bucket names instead of `*` when possible
|
||||
3. **Rotate Secrets Regularly** – Use the rotate key feature periodically
|
||||
4. **Separate Admin Accounts** – Don't use admin accounts for daily operations
|
||||
5. **Monitor Failed Logins** – Check logs for repeated authentication failures
|
||||
6. **Use Bucket Policies for Fine-Grained Control** – Combine with IAM for defense in depth
|
||||
|
||||
## 5. Bucket Policies & Presets
|
||||
|
||||
- **Storage**: Policies are persisted in `data/.myfsio.sys/config/bucket_policies.json` under `{"policies": {"bucket": {...}}}`.
|
||||
@@ -663,7 +869,7 @@ The API expects every request to include `X-Access-Key` and `X-Secret-Key` heade
|
||||
### Editing via CLI
|
||||
|
||||
```bash
|
||||
curl -X PUT http://127.0.0.1:5000/bucket-policy/test \
|
||||
curl -X PUT "http://127.0.0.1:5000/test?policy" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||
-d '{
|
||||
@@ -726,9 +932,8 @@ Drag files directly onto the objects table to upload them to the current bucket
|
||||
## 6. Presigned URLs
|
||||
|
||||
- Trigger from the UI using the **Presign** button after selecting an object.
|
||||
- Or call `POST /presign/<bucket>/<key>` with JSON `{ "method": "GET", "expires_in": 900 }`.
|
||||
- Supported methods: `GET`, `PUT`, `DELETE`; expiration must be `1..604800` seconds.
|
||||
- The service signs requests using the caller’s IAM credentials and enforces bucket policies both when issuing and when the presigned URL is used.
|
||||
- The service signs requests using the caller's IAM credentials and enforces bucket policies both when issuing and when the presigned URL is used.
|
||||
- Legacy share links have been removed; presigned URLs now handle both private and public workflows.
|
||||
|
||||
### Multipart Upload Example
|
||||
@@ -951,7 +1156,84 @@ curl -X PUT "http://localhost:5000/bucket/<bucket>?quota" \
|
||||
</Error>
|
||||
```
|
||||
|
||||
## 9. Site Replication
|
||||
## 9. Operation Metrics
|
||||
|
||||
Operation metrics provide real-time visibility into API request statistics, including request counts, latency, error rates, and bandwidth usage.
|
||||
|
||||
### Enabling Operation Metrics
|
||||
|
||||
By default, operation metrics are disabled. Enable by setting the environment variable:
|
||||
|
||||
```bash
|
||||
OPERATION_METRICS_ENABLED=true python run.py
|
||||
```
|
||||
|
||||
Or in your `myfsio.env` file:
|
||||
```
|
||||
OPERATION_METRICS_ENABLED=true
|
||||
OPERATION_METRICS_INTERVAL_MINUTES=5
|
||||
OPERATION_METRICS_RETENTION_HOURS=24
|
||||
```
|
||||
|
||||
### Configuration Options
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `OPERATION_METRICS_ENABLED` | `false` | Enable/disable operation metrics |
|
||||
| `OPERATION_METRICS_INTERVAL_MINUTES` | `5` | Snapshot interval (minutes) |
|
||||
| `OPERATION_METRICS_RETENTION_HOURS` | `24` | History retention period (hours) |
|
||||
|
||||
### What's Tracked
|
||||
|
||||
**Request Statistics:**
|
||||
- Request counts by HTTP method (GET, PUT, POST, DELETE, HEAD, OPTIONS)
|
||||
- Response status codes grouped by class (2xx, 3xx, 4xx, 5xx)
|
||||
- Latency statistics (min, max, average)
|
||||
- Bytes transferred in/out
|
||||
|
||||
**Endpoint Breakdown:**
|
||||
- `object` - Object operations (GET/PUT/DELETE objects)
|
||||
- `bucket` - Bucket operations (list, create, delete buckets)
|
||||
- `ui` - Web UI requests
|
||||
- `service` - Health checks, internal endpoints
|
||||
- `kms` - KMS API operations
|
||||
|
||||
**S3 Error Codes:**
|
||||
Tracks API-specific error codes like `NoSuchKey`, `AccessDenied`, `BucketNotFound`. Note: These are separate from HTTP status codes - a 404 from the UI won't appear here, only S3 API errors.
|
||||
|
||||
### API Endpoints
|
||||
|
||||
```bash
|
||||
# Get current operation metrics
|
||||
curl http://localhost:5100/ui/metrics/operations \
|
||||
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
|
||||
|
||||
# Get operation metrics history
|
||||
curl http://localhost:5100/ui/metrics/operations/history \
|
||||
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
|
||||
|
||||
# Filter history by time range
|
||||
curl "http://localhost:5100/ui/metrics/operations/history?hours=6" \
|
||||
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
|
||||
```
|
||||
|
||||
### Storage Location
|
||||
|
||||
Operation metrics data is stored at:
|
||||
```
|
||||
data/.myfsio.sys/config/operation_metrics.json
|
||||
```
|
||||
|
||||
### UI Dashboard
|
||||
|
||||
When enabled, the Metrics page (`/ui/metrics`) shows an "API Operations" section with:
|
||||
- Summary cards: Requests, Success Rate, Errors, Latency, Bytes In, Bytes Out
|
||||
- Charts: Requests by Method (doughnut), Requests by Status (bar), Requests by Endpoint (horizontal bar)
|
||||
- S3 Error Codes table with distribution
|
||||
|
||||
Data refreshes every 5 seconds.
|
||||
|
||||
## 10. Site Replication
|
||||
|
||||
### Permission Model
|
||||
|
||||
@@ -966,12 +1248,22 @@ Replication uses a two-tier permission system:
|
||||
|
||||
This separation allows administrators to pre-configure where data should replicate, while allowing authorized users to toggle replication on/off without accessing connection credentials.
|
||||
|
||||
### Replication Modes
|
||||
|
||||
| Mode | Behavior |
|
||||
|------|----------|
|
||||
| `new_only` | Only replicate new/modified objects (default) |
|
||||
| `all` | Sync all existing objects when rule is enabled |
|
||||
| `bidirectional` | Two-way sync with Last-Write-Wins conflict resolution |
|
||||
|
||||
### Architecture
|
||||
|
||||
- **Source Instance**: The MyFSIO instance where you upload files. It runs the replication worker.
|
||||
- **Target Instance**: Another MyFSIO instance (or any S3-compatible service like AWS S3, MinIO) that receives the copies.
|
||||
|
||||
Replication is **asynchronous** (happens in the background) and **one-way** (Source -> Target).
|
||||
For `new_only` and `all` modes, replication is **asynchronous** (happens in the background) and **one-way** (Source -> Target).
|
||||
|
||||
For `bidirectional` mode, replication is **two-way** with automatic conflict resolution.
|
||||
|
||||
### Setup Guide
|
||||
|
||||
@@ -1073,22 +1365,123 @@ When paused, new objects uploaded to the source will not replicate until replica
|
||||
|
||||
> **Note:** Only admins can create new replication rules, change the target connection/bucket, or delete rules entirely.
|
||||
|
||||
### Bidirectional Replication (Active-Active)
|
||||
### Bidirectional Site Replication
|
||||
|
||||
To set up two-way replication (Server A ↔ Server B):
|
||||
For true two-way synchronization with automatic conflict resolution, use the `bidirectional` replication mode. This enables a background sync worker that periodically pulls changes from the remote site.
|
||||
|
||||
> **Important:** Both sites must be configured to sync with each other. Each site pushes its changes and pulls from the other. You must set up connections and replication rules on both ends.
|
||||
|
||||
#### Step 1: Enable Site Sync on Both Sites
|
||||
|
||||
Set these environment variables on **both** Site A and Site B:
|
||||
|
||||
```bash
|
||||
SITE_SYNC_ENABLED=true
|
||||
SITE_SYNC_INTERVAL_SECONDS=60 # How often to pull changes (default: 60)
|
||||
SITE_SYNC_BATCH_SIZE=100 # Max objects per sync cycle (default: 100)
|
||||
```
|
||||
|
||||
#### Step 2: Create IAM Users for Cross-Site Access
|
||||
|
||||
On each site, create an IAM user that the other site will use to connect:
|
||||
|
||||
| Site | Create User For | Required Permissions |
|
||||
|------|-----------------|---------------------|
|
||||
| Site A | Site B to connect | `read`, `write`, `list`, `delete` on target bucket |
|
||||
| Site B | Site A to connect | `read`, `write`, `list`, `delete` on target bucket |
|
||||
|
||||
Example policy for the replication user:
|
||||
```json
|
||||
[{"bucket": "my-bucket", "actions": ["read", "write", "list", "delete"]}]
|
||||
```
|
||||
|
||||
#### Step 3: Create Connections
|
||||
|
||||
On each site, add a connection pointing to the other:
|
||||
|
||||
**On Site A:**
|
||||
- Go to **Connections** and add a connection to Site B
|
||||
- Endpoint: `https://site-b.example.com`
|
||||
- Credentials: Site B's IAM user (created in Step 2)
|
||||
|
||||
**On Site B:**
|
||||
- Go to **Connections** and add a connection to Site A
|
||||
- Endpoint: `https://site-a.example.com`
|
||||
- Credentials: Site A's IAM user (created in Step 2)
|
||||
|
||||
#### Step 4: Enable Bidirectional Replication
|
||||
|
||||
On each site, go to the bucket's **Replication** tab and enable with mode `bidirectional`:
|
||||
|
||||
**On Site A:**
|
||||
- Source bucket: `my-bucket`
|
||||
- Target connection: Site B connection
|
||||
- Target bucket: `my-bucket`
|
||||
- Mode: **Bidirectional sync**
|
||||
|
||||
**On Site B:**
|
||||
- Source bucket: `my-bucket`
|
||||
- Target connection: Site A connection
|
||||
- Target bucket: `my-bucket`
|
||||
- Mode: **Bidirectional sync**
|
||||
|
||||
#### How It Works
|
||||
|
||||
- **PUSH**: Local changes replicate to remote immediately on write/delete
|
||||
- **PULL**: Background worker fetches remote changes every `SITE_SYNC_INTERVAL_SECONDS`
|
||||
- **Loop Prevention**: `S3ReplicationAgent` and `SiteSyncAgent` User-Agents prevent infinite sync loops
|
||||
|
||||
#### Conflict Resolution (Last-Write-Wins)
|
||||
|
||||
When the same object exists on both sites, the system uses Last-Write-Wins (LWW) based on `last_modified` timestamps:
|
||||
|
||||
- **Remote newer**: Pull the remote version
|
||||
- **Local newer**: Keep the local version
|
||||
- **Same timestamp**: Use ETag as tiebreaker (higher ETag wins)
|
||||
|
||||
A 1-second clock skew tolerance prevents false conflicts from minor time differences.
|
||||
|
||||
#### Deletion Synchronization
|
||||
|
||||
When `sync_deletions=true` (default), remote deletions propagate locally only if:
|
||||
1. The object was previously synced FROM remote (tracked in sync state)
|
||||
2. The local version hasn't been modified since last sync
|
||||
|
||||
This prevents accidental deletion of local-only objects.
|
||||
|
||||
#### Sync State Storage
|
||||
|
||||
Sync state is stored at: `data/.myfsio.sys/buckets/<bucket>/site_sync_state.json`
|
||||
|
||||
```json
|
||||
{
|
||||
"synced_objects": {
|
||||
"path/to/file.txt": {
|
||||
"last_synced_at": 1706100000.0,
|
||||
"remote_etag": "abc123",
|
||||
"source": "remote"
|
||||
}
|
||||
},
|
||||
"last_full_sync": 1706100000.0
|
||||
}
|
||||
```
|
||||
|
||||
### Legacy Bidirectional Setup (Manual)
|
||||
|
||||
For simpler use cases without the site sync worker, you can manually configure two one-way rules:
|
||||
|
||||
1. Follow the steps above to replicate **A → B**.
|
||||
2. Repeat the process on Server B to replicate **B → A**:
|
||||
- Create a connection on Server B pointing to Server A.
|
||||
- Enable replication on the target bucket on Server B.
|
||||
|
||||
**Loop Prevention**: The system automatically detects replication traffic using a custom User-Agent (`S3ReplicationAgent`). This prevents infinite loops where an object replicated from A to B is immediately replicated back to A.
|
||||
**Loop Prevention**: The system automatically detects replication traffic using custom User-Agents (`S3ReplicationAgent` and `SiteSyncAgent`). This prevents infinite loops where an object replicated from A to B is immediately replicated back to A.
|
||||
|
||||
**Deletes**: Deleting an object on one server will propagate the deletion to the other server.
|
||||
|
||||
**Note**: Deleting a bucket will automatically remove its associated replication configuration.
|
||||
|
||||
## 11. Running Tests
|
||||
## 12. Running Tests
|
||||
|
||||
```bash
|
||||
pytest -q
|
||||
@@ -1098,7 +1491,7 @@ The suite now includes a boto3 integration test that spins up a live HTTP server
|
||||
|
||||
The suite covers bucket CRUD, presigned downloads, bucket policy enforcement, and regression tests for anonymous reads when a Public policy is attached.
|
||||
|
||||
## 12. Troubleshooting
|
||||
## 13. Troubleshooting
|
||||
|
||||
| Symptom | Likely Cause | Fix |
|
||||
| --- | --- | --- |
|
||||
@@ -1107,7 +1500,7 @@ The suite covers bucket CRUD, presigned downloads, bucket policy enforcement, an
|
||||
| Presign modal errors with 403 | IAM user lacks `read/write/delete` for target bucket or bucket policy denies | Update IAM inline policies or remove conflicting deny statements. |
|
||||
| Large upload rejected immediately | File exceeds `MAX_UPLOAD_SIZE` | Increase env var or shrink object. |
|
||||
|
||||
## 13. API Matrix
|
||||
## 14. API Matrix
|
||||
|
||||
```
|
||||
GET / # List buckets
|
||||
@@ -1117,10 +1510,9 @@ GET /<bucket> # List objects
|
||||
PUT /<bucket>/<key> # Upload object
|
||||
GET /<bucket>/<key> # Download object
|
||||
DELETE /<bucket>/<key> # Delete object
|
||||
POST /presign/<bucket>/<key> # Generate SigV4 URL
|
||||
GET /bucket-policy/<bucket> # Fetch policy
|
||||
PUT /bucket-policy/<bucket> # Upsert policy
|
||||
DELETE /bucket-policy/<bucket> # Delete policy
|
||||
GET /<bucket>?policy # Fetch policy
|
||||
PUT /<bucket>?policy # Upsert policy
|
||||
DELETE /<bucket>?policy # Delete policy
|
||||
GET /<bucket>?quota # Get bucket quota
|
||||
PUT /<bucket>?quota # Set bucket quota (admin only)
|
||||
```
|
||||
|
||||
@@ -8,4 +8,5 @@ requests>=2.32.5
|
||||
boto3>=1.42.14
|
||||
waitress>=3.0.2
|
||||
psutil>=7.1.3
|
||||
cryptography>=46.0.3
|
||||
cryptography>=46.0.3
|
||||
defusedxml>=0.7.1
|
||||
56
run.py
56
run.py
@@ -18,6 +18,8 @@ for _env_file in [
|
||||
if _env_file.exists():
|
||||
load_dotenv(_env_file, override=True)
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from app import create_api_app, create_ui_app
|
||||
from app.config import AppConfig
|
||||
|
||||
@@ -36,11 +38,23 @@ def _is_frozen() -> bool:
|
||||
return getattr(sys, 'frozen', False) or '__compiled__' in globals()
|
||||
|
||||
|
||||
def serve_api(port: int, prod: bool = False) -> None:
|
||||
def serve_api(port: int, prod: bool = False, config: Optional[AppConfig] = None) -> None:
|
||||
app = create_api_app()
|
||||
if prod:
|
||||
from waitress import serve
|
||||
serve(app, host=_server_host(), port=port, ident="MyFSIO")
|
||||
if config:
|
||||
serve(
|
||||
app,
|
||||
host=_server_host(),
|
||||
port=port,
|
||||
ident="MyFSIO",
|
||||
threads=config.server_threads,
|
||||
connection_limit=config.server_connection_limit,
|
||||
backlog=config.server_backlog,
|
||||
channel_timeout=config.server_channel_timeout,
|
||||
)
|
||||
else:
|
||||
serve(app, host=_server_host(), port=port, ident="MyFSIO")
|
||||
else:
|
||||
debug = _is_debug_enabled()
|
||||
if debug:
|
||||
@@ -48,11 +62,23 @@ def serve_api(port: int, prod: bool = False) -> None:
|
||||
app.run(host=_server_host(), port=port, debug=debug)
|
||||
|
||||
|
||||
def serve_ui(port: int, prod: bool = False) -> None:
|
||||
def serve_ui(port: int, prod: bool = False, config: Optional[AppConfig] = None) -> None:
|
||||
app = create_ui_app()
|
||||
if prod:
|
||||
from waitress import serve
|
||||
serve(app, host=_server_host(), port=port, ident="MyFSIO")
|
||||
if config:
|
||||
serve(
|
||||
app,
|
||||
host=_server_host(),
|
||||
port=port,
|
||||
ident="MyFSIO",
|
||||
threads=config.server_threads,
|
||||
connection_limit=config.server_connection_limit,
|
||||
backlog=config.server_backlog,
|
||||
channel_timeout=config.server_channel_timeout,
|
||||
)
|
||||
else:
|
||||
serve(app, host=_server_host(), port=port, ident="MyFSIO")
|
||||
else:
|
||||
debug = _is_debug_enabled()
|
||||
if debug:
|
||||
@@ -71,7 +97,6 @@ if __name__ == "__main__":
|
||||
parser.add_argument("--show-config", action="store_true", help="Show configuration summary and exit")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Handle config check/show modes
|
||||
if args.check_config or args.show_config:
|
||||
config = AppConfig.from_env()
|
||||
config.print_startup_summary()
|
||||
@@ -81,49 +106,50 @@ if __name__ == "__main__":
|
||||
sys.exit(1 if critical else 0)
|
||||
sys.exit(0)
|
||||
|
||||
# Default to production mode when running as compiled binary
|
||||
# unless --dev is explicitly passed
|
||||
prod_mode = args.prod or (_is_frozen() and not args.dev)
|
||||
|
||||
# Validate configuration before starting
|
||||
config = AppConfig.from_env()
|
||||
|
||||
# Show startup summary only on first run (when marker file doesn't exist)
|
||||
first_run_marker = config.storage_root / ".myfsio.sys" / ".initialized"
|
||||
is_first_run = not first_run_marker.exists()
|
||||
|
||||
if is_first_run:
|
||||
config.print_startup_summary()
|
||||
|
||||
# Check for critical issues that should prevent startup
|
||||
issues = config.validate_and_report()
|
||||
critical_issues = [i for i in issues if i.startswith("CRITICAL:")]
|
||||
if critical_issues:
|
||||
print("ABORTING: Critical configuration issues detected. Fix them before starting.")
|
||||
print("ABORTING: Critical configuration issues detected. Please fix them before starting.")
|
||||
sys.exit(1)
|
||||
|
||||
# Create the marker file to indicate successful first run
|
||||
try:
|
||||
first_run_marker.parent.mkdir(parents=True, exist_ok=True)
|
||||
first_run_marker.write_text(f"Initialized on {__import__('datetime').datetime.now().isoformat()}\n")
|
||||
except OSError:
|
||||
pass # Non-critical, just skip marker creation
|
||||
pass
|
||||
|
||||
if prod_mode:
|
||||
print("Running in production mode (Waitress)")
|
||||
issues = config.validate_and_report()
|
||||
critical_issues = [i for i in issues if i.startswith("CRITICAL:")]
|
||||
if critical_issues:
|
||||
for issue in critical_issues:
|
||||
print(f" {issue}")
|
||||
print("ABORTING: Critical configuration issues detected. Please fix them before starting.")
|
||||
sys.exit(1)
|
||||
else:
|
||||
print("Running in development mode (Flask dev server)")
|
||||
|
||||
if args.mode in {"api", "both"}:
|
||||
print(f"Starting API server on port {args.api_port}...")
|
||||
api_proc = Process(target=serve_api, args=(args.api_port, prod_mode), daemon=True)
|
||||
api_proc = Process(target=serve_api, args=(args.api_port, prod_mode, config), daemon=True)
|
||||
api_proc.start()
|
||||
else:
|
||||
api_proc = None
|
||||
|
||||
if args.mode in {"ui", "both"}:
|
||||
print(f"Starting UI server on port {args.ui_port}...")
|
||||
serve_ui(args.ui_port, prod_mode)
|
||||
serve_ui(args.ui_port, prod_mode, config)
|
||||
elif api_proc:
|
||||
try:
|
||||
api_proc.join()
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
(function() {
|
||||
(function () {
|
||||
'use strict';
|
||||
|
||||
const { formatBytes, escapeHtml, fallbackCopy, setupJsonAutoIndent } = window.BucketDetailUtils || {
|
||||
@@ -23,11 +23,62 @@
|
||||
.replace(/'/g, ''');
|
||||
},
|
||||
fallbackCopy: () => false,
|
||||
setupJsonAutoIndent: () => {}
|
||||
setupJsonAutoIndent: () => { }
|
||||
};
|
||||
|
||||
setupJsonAutoIndent(document.getElementById('policyDocument'));
|
||||
|
||||
const getFileTypeIcon = (key) => {
|
||||
const ext = (key.split('.').pop() || '').toLowerCase();
|
||||
const iconMap = {
|
||||
image: ['jpg', 'jpeg', 'png', 'gif', 'svg', 'webp', 'ico', 'bmp', 'tiff', 'tif'],
|
||||
document: ['pdf', 'doc', 'docx', 'txt', 'rtf', 'odt', 'pages'],
|
||||
spreadsheet: ['xls', 'xlsx', 'csv', 'ods', 'numbers'],
|
||||
archive: ['zip', 'rar', '7z', 'tar', 'gz', 'bz2', 'xz', 'tgz'],
|
||||
code: ['js', 'ts', 'jsx', 'tsx', 'py', 'java', 'cpp', 'c', 'h', 'hpp', 'cs', 'go', 'rs', 'rb', 'php', 'html', 'htm', 'css', 'scss', 'sass', 'less', 'json', 'xml', 'yaml', 'yml', 'md', 'sh', 'bat', 'ps1', 'sql'],
|
||||
audio: ['mp3', 'wav', 'flac', 'ogg', 'aac', 'm4a', 'wma', 'aiff'],
|
||||
video: ['mp4', 'avi', 'mov', 'mkv', 'webm', 'wmv', 'flv', 'm4v', 'mpeg', 'mpg'],
|
||||
};
|
||||
const icons = {
|
||||
image: `<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-success flex-shrink-0" viewBox="0 0 16 16">
|
||||
<path d="M6.002 5.5a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0z"/>
|
||||
<path d="M2.002 1a2 2 0 0 0-2 2v10a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V3a2 2 0 0 0-2-2h-12zm12 1a1 1 0 0 1 1 1v6.5l-3.777-1.947a.5.5 0 0 0-.577.093l-3.71 3.71-2.66-1.772a.5.5 0 0 0-.63.062L1.002 12V3a1 1 0 0 1 1-1h12z"/>
|
||||
</svg>`,
|
||||
document: `<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-danger flex-shrink-0" viewBox="0 0 16 16">
|
||||
<path d="M14 4.5V14a2 2 0 0 1-2 2H4a2 2 0 0 1-2-2V2a2 2 0 0 1 2-2h5.5L14 4.5zm-3 0A1.5 1.5 0 0 1 9.5 3V1H4a1 1 0 0 0-1 1v12a1 1 0 0 0 1 1h8a1 1 0 0 0 1-1V4.5h-2z"/>
|
||||
<path d="M4.5 12.5A.5.5 0 0 1 5 12h3a.5.5 0 0 1 0 1H5a.5.5 0 0 1-.5-.5zm0-2A.5.5 0 0 1 5 10h6a.5.5 0 0 1 0 1H5a.5.5 0 0 1-.5-.5zm0-2A.5.5 0 0 1 5 8h6a.5.5 0 0 1 0 1H5a.5.5 0 0 1-.5-.5zm0-2A.5.5 0 0 1 5 6h6a.5.5 0 0 1 0 1H5a.5.5 0 0 1-.5-.5z"/>
|
||||
</svg>`,
|
||||
spreadsheet: `<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-success flex-shrink-0" viewBox="0 0 16 16">
|
||||
<path d="M14 14V4.5L9.5 0H4a2 2 0 0 0-2 2v12a2 2 0 0 0 2 2h8a2 2 0 0 0 2-2zM9.5 3A1.5 1.5 0 0 0 11 4.5h2V9H3V2a1 1 0 0 1 1-1h5.5v2zM3 12v-2h2v2H3zm0 1h2v2H4a1 1 0 0 1-1-1v-1zm3 2v-2h3v2H6zm4 0v-2h3v1a1 1 0 0 1-1 1h-2zm3-3h-3v-2h3v2zm-7 0v-2h3v2H6z"/>
|
||||
</svg>`,
|
||||
archive: `<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-secondary flex-shrink-0" viewBox="0 0 16 16">
|
||||
<path d="M6.5 7.5a1 1 0 0 1 1-1h1a1 1 0 0 1 1 1v.938l.4 1.599a1 1 0 0 1-.416 1.074l-.93.62a1 1 0 0 1-1.109 0l-.93-.62a1 1 0 0 1-.415-1.074l.4-1.599V7.5z"/>
|
||||
<path d="M14 4.5V14a2 2 0 0 1-2 2H4a2 2 0 0 1-2-2V2a2 2 0 0 1 2-2h5.5L14 4.5zm-3 0A1.5 1.5 0 0 1 9.5 3V1h-2v1h-1v1h1v1h-1v1h1v1H6V5H5V4h1V3H5V2h1V1H4a1 1 0 0 0-1 1v12a1 1 0 0 0 1 1h8a1 1 0 0 0 1-1V4.5h-2z"/>
|
||||
</svg>`,
|
||||
code: `<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-info flex-shrink-0" viewBox="0 0 16 16">
|
||||
<path d="M14 4.5V14a2 2 0 0 1-2 2H4a2 2 0 0 1-2-2V2a2 2 0 0 1 2-2h5.5L14 4.5zm-3 0A1.5 1.5 0 0 1 9.5 3V1H4a1 1 0 0 0-1 1v12a1 1 0 0 0 1 1h8a1 1 0 0 0 1-1V4.5h-2z"/>
|
||||
<path d="M8.646 6.646a.5.5 0 0 1 .708 0l2 2a.5.5 0 0 1 0 .708l-2 2a.5.5 0 0 1-.708-.708L10.293 9 8.646 7.354a.5.5 0 0 1 0-.708zm-1.292 0a.5.5 0 0 0-.708 0l-2 2a.5.5 0 0 0 0 .708l2 2a.5.5 0 0 0 .708-.708L5.707 9l1.647-1.646a.5.5 0 0 0 0-.708z"/>
|
||||
</svg>`,
|
||||
audio: `<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-primary flex-shrink-0" viewBox="0 0 16 16">
|
||||
<path d="M6 13c0 1.105-1.12 2-2.5 2S1 14.105 1 13c0-1.104 1.12-2 2.5-2s2.5.896 2.5 2zm9-2c0 1.105-1.12 2-2.5 2s-2.5-.895-2.5-2 1.12-2 2.5-2 2.5.895 2.5 2z"/>
|
||||
<path fill-rule="evenodd" d="M14 11V2h1v9h-1zM6 3v10H5V3h1z"/>
|
||||
<path d="M5 2.905a1 1 0 0 1 .9-.995l8-.8a1 1 0 0 1 1.1.995V3L5 4V2.905z"/>
|
||||
</svg>`,
|
||||
video: `<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-danger flex-shrink-0" viewBox="0 0 16 16">
|
||||
<path d="M0 12V4a2 2 0 0 1 2-2h12a2 2 0 0 1 2 2v8a2 2 0 0 1-2 2H2a2 2 0 0 1-2-2zm6.79-6.907A.5.5 0 0 0 6 5.5v5a.5.5 0 0 0 .79.407l3.5-2.5a.5.5 0 0 0 0-.814l-3.5-2.5z"/>
|
||||
</svg>`,
|
||||
default: `<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-muted flex-shrink-0" viewBox="0 0 16 16">
|
||||
<path d="M14 4.5V14a2 2 0 0 1-2 2H4a2 2 0 0 1-2-2V2a2 2 0 0 1 2-2h5.5L14 4.5zm-3 0A1.5 1.5 0 0 1 9.5 3V1H4a1 1 0 0 0-1 1v12a1 1 0 0 0 1 1h8a1 1 0 0 0 1-1V4.5h-2z"/>
|
||||
</svg>`,
|
||||
};
|
||||
for (const [type, extensions] of Object.entries(iconMap)) {
|
||||
if (extensions.includes(ext)) {
|
||||
return icons[type];
|
||||
}
|
||||
}
|
||||
return icons.default;
|
||||
};
|
||||
|
||||
const selectAllCheckbox = document.querySelector('[data-select-all]');
|
||||
const bulkDeleteButton = document.querySelector('[data-bulk-delete-trigger]');
|
||||
const bulkDeleteLabel = bulkDeleteButton?.querySelector('[data-bulk-delete-label]');
|
||||
@@ -49,6 +100,7 @@
|
||||
const previewPlaceholder = document.getElementById('preview-placeholder');
|
||||
const previewImage = document.getElementById('preview-image');
|
||||
const previewVideo = document.getElementById('preview-video');
|
||||
const previewAudio = document.getElementById('preview-audio');
|
||||
const previewIframe = document.getElementById('preview-iframe');
|
||||
const downloadButton = document.getElementById('downloadButton');
|
||||
const presignButton = document.getElementById('presignButton');
|
||||
@@ -135,18 +187,20 @@
|
||||
tr.dataset.objectRow = '';
|
||||
tr.dataset.key = obj.key;
|
||||
tr.dataset.size = obj.size;
|
||||
tr.dataset.lastModified = obj.lastModified || obj.last_modified;
|
||||
tr.dataset.etag = obj.etag;
|
||||
tr.dataset.previewUrl = obj.previewUrl || obj.preview_url;
|
||||
tr.dataset.downloadUrl = obj.downloadUrl || obj.download_url;
|
||||
tr.dataset.presignEndpoint = obj.presignEndpoint || obj.presign_endpoint;
|
||||
tr.dataset.deleteEndpoint = obj.deleteEndpoint || obj.delete_endpoint;
|
||||
tr.dataset.metadata = typeof obj.metadata === 'string' ? obj.metadata : JSON.stringify(obj.metadata || {});
|
||||
tr.dataset.versionsEndpoint = obj.versionsEndpoint || obj.versions_endpoint;
|
||||
tr.dataset.restoreTemplate = obj.restoreTemplate || obj.restore_template;
|
||||
tr.dataset.tagsUrl = obj.tagsUrl || obj.tags_url;
|
||||
tr.dataset.copyUrl = obj.copyUrl || obj.copy_url;
|
||||
tr.dataset.moveUrl = obj.moveUrl || obj.move_url;
|
||||
tr.dataset.lastModified = obj.lastModified ?? obj.last_modified ?? '';
|
||||
tr.dataset.lastModifiedDisplay = obj.lastModifiedDisplay ?? obj.last_modified_display ?? new Date(obj.lastModified || obj.last_modified).toLocaleString();
|
||||
tr.dataset.lastModifiedIso = obj.lastModifiedIso ?? obj.last_modified_iso ?? obj.lastModified ?? obj.last_modified ?? '';
|
||||
tr.dataset.etag = obj.etag ?? '';
|
||||
tr.dataset.previewUrl = obj.previewUrl ?? obj.preview_url ?? '';
|
||||
tr.dataset.downloadUrl = obj.downloadUrl ?? obj.download_url ?? '';
|
||||
tr.dataset.presignEndpoint = obj.presignEndpoint ?? obj.presign_endpoint ?? '';
|
||||
tr.dataset.deleteEndpoint = obj.deleteEndpoint ?? obj.delete_endpoint ?? '';
|
||||
tr.dataset.metadataUrl = obj.metadataUrl ?? obj.metadata_url ?? '';
|
||||
tr.dataset.versionsEndpoint = obj.versionsEndpoint ?? obj.versions_endpoint ?? '';
|
||||
tr.dataset.restoreTemplate = obj.restoreTemplate ?? obj.restore_template ?? '';
|
||||
tr.dataset.tagsUrl = obj.tagsUrl ?? obj.tags_url ?? '';
|
||||
tr.dataset.copyUrl = obj.copyUrl ?? obj.copy_url ?? '';
|
||||
tr.dataset.moveUrl = obj.moveUrl ?? obj.move_url ?? '';
|
||||
|
||||
const keyToShow = displayKey || obj.key;
|
||||
const lastModDisplay = obj.lastModifiedDisplay || obj.last_modified_display || new Date(obj.lastModified || obj.last_modified).toLocaleDateString();
|
||||
@@ -156,8 +210,11 @@
|
||||
<input class="form-check-input" type="checkbox" data-object-select aria-label="Select ${escapeHtml(obj.key)}" />
|
||||
</td>
|
||||
<td class="object-key text-break" title="${escapeHtml(obj.key)}">
|
||||
<div class="fw-medium">${escapeHtml(keyToShow)}</div>
|
||||
<div class="text-muted small">Modified ${escapeHtml(lastModDisplay)}</div>
|
||||
<div class="fw-medium d-flex align-items-center gap-2">
|
||||
${getFileTypeIcon(obj.key)}
|
||||
<span>${escapeHtml(keyToShow)}</span>
|
||||
</div>
|
||||
<div class="text-muted small ms-4 ps-2">Modified ${escapeHtml(lastModDisplay)}</div>
|
||||
</td>
|
||||
<td class="text-end text-nowrap">
|
||||
<span class="text-muted small">${formatBytes(obj.size)}</span>
|
||||
@@ -323,7 +380,7 @@
|
||||
const bKey = b.type === 'folder' ? b.path : b.data.key;
|
||||
return aKey.localeCompare(bKey);
|
||||
});
|
||||
|
||||
|
||||
return items;
|
||||
};
|
||||
|
||||
@@ -400,14 +457,14 @@
|
||||
} else {
|
||||
renderVirtualRows();
|
||||
}
|
||||
|
||||
|
||||
updateFolderViewStatus();
|
||||
};
|
||||
|
||||
|
||||
const updateFolderViewStatus = () => {
|
||||
const folderViewStatusEl = document.getElementById('folder-view-status');
|
||||
if (!folderViewStatusEl) return;
|
||||
|
||||
|
||||
if (currentPrefix) {
|
||||
const folderCount = visibleItems.filter(i => i.type === 'folder').length;
|
||||
const fileCount = visibleItems.filter(i => i.type === 'file').length;
|
||||
@@ -425,12 +482,13 @@
|
||||
size: obj.size,
|
||||
lastModified: obj.last_modified,
|
||||
lastModifiedDisplay: obj.last_modified_display,
|
||||
lastModifiedIso: obj.last_modified_iso,
|
||||
etag: obj.etag,
|
||||
previewUrl: urlTemplates ? buildUrlFromTemplate(urlTemplates.preview, key) : '',
|
||||
downloadUrl: urlTemplates ? buildUrlFromTemplate(urlTemplates.download, key) : '',
|
||||
presignEndpoint: urlTemplates ? buildUrlFromTemplate(urlTemplates.presign, key) : '',
|
||||
deleteEndpoint: urlTemplates ? buildUrlFromTemplate(urlTemplates.delete, key) : '',
|
||||
metadata: '{}',
|
||||
metadataUrl: urlTemplates ? buildUrlFromTemplate(urlTemplates.metadata, key) : '',
|
||||
versionsEndpoint: urlTemplates ? buildUrlFromTemplate(urlTemplates.versions, key) : '',
|
||||
restoreTemplate: urlTemplates ? urlTemplates.restore.replace('KEY_PLACEHOLDER', encodeURIComponent(key).replace(/%2F/g, '/')) : '',
|
||||
tagsUrl: urlTemplates ? buildUrlFromTemplate(urlTemplates.tags, key) : '',
|
||||
@@ -548,7 +606,7 @@
|
||||
} else if (msg.type === 'done') {
|
||||
streamingComplete = true;
|
||||
}
|
||||
} catch (e) {}
|
||||
} catch (e) { }
|
||||
}
|
||||
|
||||
flushPendingStreamObjects();
|
||||
@@ -559,9 +617,6 @@
|
||||
if (loadMoreStatus) {
|
||||
loadMoreStatus.textContent = `${loadedObjectCount.toLocaleString()} objects`;
|
||||
}
|
||||
if (typeof updateLoadMoreButton === 'function') {
|
||||
updateLoadMoreButton();
|
||||
}
|
||||
refreshVirtualList();
|
||||
renderBreadcrumb(currentPrefix);
|
||||
|
||||
@@ -640,10 +695,6 @@
|
||||
}
|
||||
}
|
||||
|
||||
if (typeof updateLoadMoreButton === 'function') {
|
||||
updateLoadMoreButton();
|
||||
}
|
||||
|
||||
refreshVirtualList();
|
||||
renderBreadcrumb(currentPrefix);
|
||||
|
||||
@@ -694,20 +745,20 @@
|
||||
selectCheckbox?.addEventListener('change', () => {
|
||||
toggleRowSelection(row, selectCheckbox.checked);
|
||||
});
|
||||
|
||||
|
||||
if (selectedRows.has(row.dataset.key)) {
|
||||
selectCheckbox.checked = true;
|
||||
row.classList.add('table-active');
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
const folderRows = document.querySelectorAll('.folder-row');
|
||||
folderRows.forEach(row => {
|
||||
if (row.dataset.handlersAttached) return;
|
||||
row.dataset.handlersAttached = 'true';
|
||||
|
||||
|
||||
const folderPath = row.dataset.folderPath;
|
||||
|
||||
|
||||
const checkbox = row.querySelector('[data-folder-select]');
|
||||
checkbox?.addEventListener('change', (e) => {
|
||||
e.stopPropagation();
|
||||
@@ -727,7 +778,7 @@
|
||||
e.stopPropagation();
|
||||
navigateToFolder(folderPath);
|
||||
});
|
||||
|
||||
|
||||
row.addEventListener('click', (e) => {
|
||||
if (e.target.closest('[data-folder-select]') || e.target.closest('button')) return;
|
||||
navigateToFolder(folderPath);
|
||||
@@ -739,24 +790,11 @@
|
||||
|
||||
const scrollSentinel = document.getElementById('scroll-sentinel');
|
||||
const scrollContainer = document.querySelector('.objects-table-container');
|
||||
const loadMoreBtn = document.getElementById('load-more-btn');
|
||||
|
||||
if (scrollContainer) {
|
||||
scrollContainer.addEventListener('scroll', handleVirtualScroll, { passive: true });
|
||||
}
|
||||
|
||||
loadMoreBtn?.addEventListener('click', () => {
|
||||
if (hasMoreObjects && !isLoadingObjects) {
|
||||
loadObjects(true);
|
||||
}
|
||||
});
|
||||
|
||||
function updateLoadMoreButton() {
|
||||
if (loadMoreBtn) {
|
||||
loadMoreBtn.classList.toggle('d-none', !hasMoreObjects);
|
||||
}
|
||||
}
|
||||
|
||||
if (scrollSentinel && scrollContainer) {
|
||||
const containerObserver = new IntersectionObserver((entries) => {
|
||||
entries.forEach(entry => {
|
||||
@@ -770,7 +808,7 @@
|
||||
threshold: 0
|
||||
});
|
||||
containerObserver.observe(scrollSentinel);
|
||||
|
||||
|
||||
const viewportObserver = new IntersectionObserver((entries) => {
|
||||
entries.forEach(entry => {
|
||||
if (entry.isIntersecting && hasMoreObjects && !isLoadingObjects) {
|
||||
@@ -785,10 +823,6 @@
|
||||
viewportObserver.observe(scrollSentinel);
|
||||
}
|
||||
|
||||
const pageSizeSelect = document.getElementById('page-size-select');
|
||||
pageSizeSelect?.addEventListener('change', (e) => {
|
||||
pageSize = parseInt(e.target.value, 10);
|
||||
});
|
||||
|
||||
if (objectsApiUrl) {
|
||||
loadObjects();
|
||||
@@ -805,7 +839,7 @@
|
||||
if (e.target.closest('[data-delete-object]') || e.target.closest('[data-object-select]') || e.target.closest('a')) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
selectRow(row);
|
||||
});
|
||||
}
|
||||
@@ -815,14 +849,14 @@
|
||||
const getFoldersAtPrefix = (prefix) => {
|
||||
const folders = new Set();
|
||||
const files = [];
|
||||
|
||||
|
||||
allObjects.forEach(obj => {
|
||||
const key = obj.key;
|
||||
if (!key.startsWith(prefix)) return;
|
||||
|
||||
|
||||
const remainder = key.slice(prefix.length);
|
||||
const slashIndex = remainder.indexOf('/');
|
||||
|
||||
|
||||
if (slashIndex === -1) {
|
||||
|
||||
files.push(obj);
|
||||
@@ -832,7 +866,7 @@
|
||||
folders.add(prefix + folderName);
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
return { folders: Array.from(folders).sort(), files };
|
||||
};
|
||||
|
||||
@@ -843,12 +877,12 @@
|
||||
|
||||
const renderBreadcrumb = (prefix) => {
|
||||
if (!folderBreadcrumb) return;
|
||||
|
||||
|
||||
if (!prefix && !hasFolders()) {
|
||||
folderBreadcrumb.classList.add('d-none');
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
folderBreadcrumb.classList.remove('d-none');
|
||||
const ol = folderBreadcrumb.querySelector('ol');
|
||||
ol.innerHTML = '';
|
||||
@@ -883,7 +917,7 @@
|
||||
accumulated += part + '/';
|
||||
const li = document.createElement('li');
|
||||
li.className = 'breadcrumb-item';
|
||||
|
||||
|
||||
if (index === parts.length - 1) {
|
||||
li.classList.add('active');
|
||||
li.setAttribute('aria-current', 'page');
|
||||
@@ -916,12 +950,12 @@
|
||||
const folderName = displayName || folderPath.slice(currentPrefix.length).replace(/\/$/, '');
|
||||
const { count: objectCount, mayHaveMore } = countObjectsInFolder(folderPath);
|
||||
const countDisplay = mayHaveMore ? `${objectCount}+` : objectCount;
|
||||
|
||||
|
||||
const tr = document.createElement('tr');
|
||||
tr.className = 'folder-row';
|
||||
tr.dataset.folderPath = folderPath;
|
||||
tr.style.cursor = 'pointer';
|
||||
|
||||
|
||||
tr.innerHTML = `
|
||||
<td class="text-center align-middle" onclick="event.stopPropagation();">
|
||||
<input class="form-check-input" type="checkbox" data-folder-select="${escapeHtml(folderPath)}" aria-label="Select folder" />
|
||||
@@ -946,7 +980,7 @@
|
||||
</button>
|
||||
</td>
|
||||
`;
|
||||
|
||||
|
||||
return tr;
|
||||
};
|
||||
|
||||
@@ -971,7 +1005,7 @@
|
||||
|
||||
const renderObjectsView = () => {
|
||||
if (!objectsTableBody) return;
|
||||
|
||||
|
||||
const { folders, files } = getFoldersAtPrefix(currentPrefix);
|
||||
|
||||
objectsTableBody.innerHTML = '';
|
||||
@@ -1378,15 +1412,30 @@
|
||||
}
|
||||
};
|
||||
|
||||
const INTERNAL_METADATA_KEYS = new Set([
|
||||
'__etag__',
|
||||
'__size__',
|
||||
'__content_type__',
|
||||
'__last_modified__',
|
||||
'__storage_class__',
|
||||
]);
|
||||
|
||||
const isInternalKey = (key) => INTERNAL_METADATA_KEYS.has(key.toLowerCase());
|
||||
|
||||
const renderMetadata = (metadata) => {
|
||||
if (!previewMetadata || !previewMetadataList) return;
|
||||
previewMetadataList.innerHTML = '';
|
||||
if (!metadata || Object.keys(metadata).length === 0) {
|
||||
if (!metadata) {
|
||||
previewMetadata.classList.add('d-none');
|
||||
return;
|
||||
}
|
||||
const userMetadata = Object.entries(metadata).filter(([key]) => !isInternalKey(key));
|
||||
if (userMetadata.length === 0) {
|
||||
previewMetadata.classList.add('d-none');
|
||||
return;
|
||||
}
|
||||
previewMetadata.classList.remove('d-none');
|
||||
Object.entries(metadata).forEach(([key, value]) => {
|
||||
userMetadata.forEach(([key, value]) => {
|
||||
const wrapper = document.createElement('div');
|
||||
wrapper.className = 'metadata-entry';
|
||||
const label = document.createElement('div');
|
||||
@@ -1421,11 +1470,11 @@
|
||||
const metadata = version.metadata && typeof version.metadata === 'object' ? Object.entries(version.metadata) : [];
|
||||
const metadataHtml = metadata.length
|
||||
? `<div class="mt-3"><div class="fw-semibold text-uppercase small">Metadata</div><hr class="my-2"><div class="metadata-stack small">${metadata
|
||||
.map(
|
||||
([key, value]) =>
|
||||
`<div class="metadata-entry"><div class="metadata-key small">${escapeHtml(key)}</div><div class="metadata-value text-break">${escapeHtml(value)}</div></div>`
|
||||
)
|
||||
.join('')}</div></div>`
|
||||
.map(
|
||||
([key, value]) =>
|
||||
`<div class="metadata-entry"><div class="metadata-key small">${escapeHtml(key)}</div><div class="metadata-value text-break">${escapeHtml(value)}</div></div>`
|
||||
)
|
||||
.join('')}</div></div>`
|
||||
: '';
|
||||
const summaryHtml = `
|
||||
<div class="small">
|
||||
@@ -1697,7 +1746,7 @@
|
||||
if (!endpoint) {
|
||||
versionPanel.classList.add('d-none');
|
||||
return;
|
||||
}
|
||||
}
|
||||
versionPanel.classList.remove('d-none');
|
||||
if (!force && versionsCache.has(endpoint)) {
|
||||
renderVersionEntries(versionsCache.get(endpoint), row);
|
||||
@@ -1778,9 +1827,10 @@
|
||||
}
|
||||
|
||||
const resetPreviewMedia = () => {
|
||||
[previewImage, previewVideo, previewIframe].forEach((el) => {
|
||||
[previewImage, previewVideo, previewAudio, previewIframe].forEach((el) => {
|
||||
if (!el) return;
|
||||
el.classList.add('d-none');
|
||||
if (el.tagName === 'VIDEO') {
|
||||
if (el.tagName === 'VIDEO' || el.tagName === 'AUDIO') {
|
||||
el.pause();
|
||||
el.removeAttribute('src');
|
||||
}
|
||||
@@ -1791,32 +1841,31 @@
|
||||
previewPlaceholder.classList.remove('d-none');
|
||||
};
|
||||
|
||||
function metadataFromRow(row) {
|
||||
if (!row || !row.dataset.metadata) {
|
||||
return null;
|
||||
}
|
||||
async function fetchMetadata(metadataUrl) {
|
||||
if (!metadataUrl) return null;
|
||||
try {
|
||||
const parsed = JSON.parse(row.dataset.metadata);
|
||||
if (parsed && typeof parsed === 'object' && !Array.isArray(parsed)) {
|
||||
return parsed;
|
||||
const resp = await fetch(metadataUrl);
|
||||
if (resp.ok) {
|
||||
const data = await resp.json();
|
||||
return data.metadata || {};
|
||||
}
|
||||
} catch (err) {
|
||||
console.warn('Failed to parse metadata for row', err);
|
||||
} catch (e) {
|
||||
console.warn('Failed to load metadata', e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function selectRow(row) {
|
||||
async function selectRow(row) {
|
||||
document.querySelectorAll('[data-object-row]').forEach((r) => r.classList.remove('table-active'));
|
||||
row.classList.add('table-active');
|
||||
previewEmpty.classList.add('d-none');
|
||||
previewPanel.classList.remove('d-none');
|
||||
activeRow = row;
|
||||
renderMetadata(metadataFromRow(row));
|
||||
renderMetadata(null);
|
||||
|
||||
previewKey.textContent = row.dataset.key;
|
||||
previewSize.textContent = formatBytes(Number(row.dataset.size));
|
||||
previewModified.textContent = row.dataset.lastModified;
|
||||
previewModified.textContent = row.dataset.lastModifiedIso || row.dataset.lastModified;
|
||||
previewEtag.textContent = row.dataset.etag;
|
||||
downloadButton.href = row.dataset.downloadUrl;
|
||||
downloadButton.classList.remove('disabled');
|
||||
@@ -1835,18 +1884,36 @@
|
||||
resetPreviewMedia();
|
||||
const previewUrl = row.dataset.previewUrl;
|
||||
const lower = row.dataset.key.toLowerCase();
|
||||
if (lower.match(/\.(png|jpg|jpeg|gif|webp|svg)$/)) {
|
||||
if (previewUrl && lower.match(/\.(png|jpg|jpeg|gif|webp|svg|ico|bmp)$/)) {
|
||||
previewImage.src = previewUrl;
|
||||
previewImage.classList.remove('d-none');
|
||||
previewPlaceholder.classList.add('d-none');
|
||||
} else if (lower.match(/\.(mp4|webm|ogg)$/)) {
|
||||
} else if (previewUrl && lower.match(/\.(mp4|webm|ogv|mov|avi|mkv)$/)) {
|
||||
previewVideo.src = previewUrl;
|
||||
previewVideo.classList.remove('d-none');
|
||||
previewPlaceholder.classList.add('d-none');
|
||||
} else if (lower.match(/\.(txt|log|json|md|csv)$/)) {
|
||||
} else if (previewUrl && lower.match(/\.(mp3|wav|flac|ogg|aac|m4a|wma)$/)) {
|
||||
previewAudio.src = previewUrl;
|
||||
previewAudio.classList.remove('d-none');
|
||||
previewPlaceholder.classList.add('d-none');
|
||||
} else if (previewUrl && lower.match(/\.(pdf)$/)) {
|
||||
previewIframe.src = previewUrl;
|
||||
previewIframe.style.minHeight = '500px';
|
||||
previewIframe.classList.remove('d-none');
|
||||
previewPlaceholder.classList.add('d-none');
|
||||
} else if (previewUrl && lower.match(/\.(txt|log|json|md|csv|xml|html|htm|js|ts|py|java|c|cpp|h|css|scss|yaml|yml|toml|ini|cfg|conf|sh|bat)$/)) {
|
||||
previewIframe.src = previewUrl;
|
||||
previewIframe.style.minHeight = '200px';
|
||||
previewIframe.classList.remove('d-none');
|
||||
previewPlaceholder.classList.add('d-none');
|
||||
}
|
||||
|
||||
const metadataUrl = row.dataset.metadataUrl;
|
||||
if (metadataUrl) {
|
||||
const metadata = await fetchMetadata(metadataUrl);
|
||||
if (activeRow === row) {
|
||||
renderMetadata(metadata);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1937,7 +2004,7 @@
|
||||
textArea.remove();
|
||||
return success;
|
||||
};
|
||||
|
||||
|
||||
let copied = false;
|
||||
|
||||
if (navigator.clipboard && window.isSecureContext) {
|
||||
@@ -1952,7 +2019,7 @@
|
||||
if (!copied) {
|
||||
copied = fallbackCopy(presignLink.value);
|
||||
}
|
||||
|
||||
|
||||
if (copied) {
|
||||
copyPresignLink.textContent = 'Copied!';
|
||||
window.setTimeout(() => {
|
||||
@@ -2064,7 +2131,7 @@
|
||||
uploadCancelled = true;
|
||||
|
||||
activeXHRs.forEach(xhr => {
|
||||
try { xhr.abort(); } catch {}
|
||||
try { xhr.abort(); } catch { }
|
||||
});
|
||||
activeXHRs = [];
|
||||
|
||||
@@ -2073,7 +2140,7 @@
|
||||
const csrfToken = document.querySelector('input[name="csrf_token"]')?.value;
|
||||
try {
|
||||
await fetch(abortUrl, { method: 'DELETE', headers: { 'X-CSRFToken': csrfToken || '' } });
|
||||
} catch {}
|
||||
} catch { }
|
||||
activeMultipartUpload = null;
|
||||
}
|
||||
|
||||
@@ -2299,7 +2366,7 @@
|
||||
if (!uploadCancelled) {
|
||||
try {
|
||||
await fetch(abortUrl, { method: 'DELETE', headers: { 'X-CSRFToken': csrfToken || '' } });
|
||||
} catch {}
|
||||
} catch { }
|
||||
}
|
||||
activeMultipartUpload = null;
|
||||
throw err;
|
||||
@@ -2612,7 +2679,7 @@
|
||||
uploadForm.addEventListener('submit', async (event) => {
|
||||
const files = uploadFileInput.files;
|
||||
if (!files || files.length === 0) return;
|
||||
|
||||
|
||||
const keyPrefix = (uploadKeyPrefix?.value || '').trim();
|
||||
|
||||
if (files.length === 1 && !keyPrefix) {
|
||||
@@ -2633,7 +2700,7 @@
|
||||
uploadSubmitBtn.disabled = true;
|
||||
if (uploadBtnText) uploadBtnText.textContent = 'Uploading...';
|
||||
}
|
||||
|
||||
|
||||
await performBulkUpload(Array.from(files));
|
||||
});
|
||||
|
||||
@@ -2834,7 +2901,7 @@
|
||||
}
|
||||
}
|
||||
if (statusAlert) statusAlert.classList.add('d-none');
|
||||
|
||||
|
||||
// Update status badge to show "Paused" with warning styling
|
||||
if (statusBadge) {
|
||||
statusBadge.className = 'badge bg-warning-subtle text-warning px-3 py-2';
|
||||
@@ -2844,14 +2911,14 @@
|
||||
</svg>
|
||||
<span>Paused (Endpoint Unavailable)</span>`;
|
||||
}
|
||||
|
||||
|
||||
// Hide the pause button since replication is effectively already paused
|
||||
if (pauseForm) pauseForm.classList.add('d-none');
|
||||
} else {
|
||||
// Hide warning and show success alert
|
||||
if (endpointWarning) endpointWarning.classList.add('d-none');
|
||||
if (statusAlert) statusAlert.classList.remove('d-none');
|
||||
|
||||
|
||||
// Restore status badge to show "Enabled"
|
||||
if (statusBadge) {
|
||||
statusBadge.className = 'badge bg-success-subtle text-success px-3 py-2';
|
||||
@@ -2861,7 +2928,7 @@
|
||||
</svg>
|
||||
<span>Enabled</span>`;
|
||||
}
|
||||
|
||||
|
||||
// Show the pause button
|
||||
if (pauseForm) pauseForm.classList.remove('d-none');
|
||||
}
|
||||
@@ -3098,7 +3165,7 @@
|
||||
|
||||
const targetBucketInput = document.getElementById('target_bucket');
|
||||
const targetBucketFeedback = document.getElementById('target_bucket_feedback');
|
||||
|
||||
|
||||
const validateBucketName = (name) => {
|
||||
if (!name) return { valid: false, error: 'Bucket name is required' };
|
||||
if (name.length < 3) return { valid: false, error: 'Bucket name must be at least 3 characters' };
|
||||
@@ -3201,7 +3268,7 @@
|
||||
|
||||
const loadLifecycleRules = async () => {
|
||||
if (!lifecycleUrl || !lifecycleRulesBody) return;
|
||||
lifecycleRulesBody.innerHTML = '<tr><td colspan="6" class="text-center text-muted py-4"><div class="spinner-border spinner-border-sm me-2" role="status"></div>Loading...</td></tr>';
|
||||
lifecycleRulesBody.innerHTML = '<tr><td colspan="7" class="text-center text-muted py-4"><div class="spinner-border spinner-border-sm me-2" role="status"></div>Loading...</td></tr>';
|
||||
try {
|
||||
const resp = await fetch(lifecycleUrl);
|
||||
const data = await resp.json();
|
||||
@@ -3209,19 +3276,20 @@
|
||||
lifecycleRules = data.rules || [];
|
||||
renderLifecycleRules();
|
||||
} catch (err) {
|
||||
lifecycleRulesBody.innerHTML = `<tr><td colspan="6" class="text-center text-danger py-4">${escapeHtml(err.message)}</td></tr>`;
|
||||
lifecycleRulesBody.innerHTML = `<tr><td colspan="7" class="text-center text-danger py-4">${escapeHtml(err.message)}</td></tr>`;
|
||||
}
|
||||
};
|
||||
|
||||
const renderLifecycleRules = () => {
|
||||
if (!lifecycleRulesBody) return;
|
||||
if (lifecycleRules.length === 0) {
|
||||
lifecycleRulesBody.innerHTML = '<tr><td colspan="6" class="text-center text-muted py-4">No lifecycle rules configured</td></tr>';
|
||||
lifecycleRulesBody.innerHTML = '<tr><td colspan="7" class="text-center text-muted py-4">No lifecycle rules configured</td></tr>';
|
||||
return;
|
||||
}
|
||||
lifecycleRulesBody.innerHTML = lifecycleRules.map((rule, idx) => {
|
||||
const expiration = rule.Expiration?.Days ? `${rule.Expiration.Days}d` : '-';
|
||||
const noncurrent = rule.NoncurrentVersionExpiration?.NoncurrentDays ? `${rule.NoncurrentVersionExpiration.NoncurrentDays}d` : '-';
|
||||
const abortMpu = rule.AbortIncompleteMultipartUpload?.DaysAfterInitiation ? `${rule.AbortIncompleteMultipartUpload.DaysAfterInitiation}d` : '-';
|
||||
const statusClass = rule.Status === 'Enabled' ? 'bg-success' : 'bg-secondary';
|
||||
return `<tr>
|
||||
<td><code class="small">${escapeHtml(rule.ID || '')}</code></td>
|
||||
@@ -3229,6 +3297,7 @@
|
||||
<td><span class="badge ${statusClass}">${escapeHtml(rule.Status)}</span></td>
|
||||
<td class="small">${expiration}</td>
|
||||
<td class="small">${noncurrent}</td>
|
||||
<td class="small">${abortMpu}</td>
|
||||
<td class="text-end">
|
||||
<div class="btn-group btn-group-sm">
|
||||
<button class="btn btn-outline-secondary" onclick="editLifecycleRule(${idx})" title="Edit rule">
|
||||
@@ -3514,7 +3583,7 @@
|
||||
});
|
||||
});
|
||||
|
||||
document.getElementById('objects-table')?.addEventListener('show.bs.dropdown', function(e) {
|
||||
document.getElementById('objects-table')?.addEventListener('show.bs.dropdown', function (e) {
|
||||
const dropdown = e.target.closest('.dropdown');
|
||||
const menu = dropdown?.querySelector('.dropdown-menu');
|
||||
const btn = e.target;
|
||||
@@ -3706,8 +3775,8 @@
|
||||
});
|
||||
|
||||
const originalSelectRow = selectRow;
|
||||
selectRow = (row) => {
|
||||
originalSelectRow(row);
|
||||
selectRow = async (row) => {
|
||||
await originalSelectRow(row);
|
||||
loadObjectTags(row);
|
||||
};
|
||||
|
||||
@@ -3813,18 +3882,18 @@
|
||||
var form = document.getElementById(formId);
|
||||
if (!form) return;
|
||||
|
||||
form.addEventListener('submit', function(e) {
|
||||
form.addEventListener('submit', function (e) {
|
||||
e.preventDefault();
|
||||
window.UICore.submitFormAjax(form, {
|
||||
successMessage: options.successMessage || 'Operation completed',
|
||||
onSuccess: function(data) {
|
||||
onSuccess: function (data) {
|
||||
if (options.onSuccess) options.onSuccess(data);
|
||||
if (options.closeModal) {
|
||||
var modal = bootstrap.Modal.getInstance(document.getElementById(options.closeModal));
|
||||
if (modal) modal.hide();
|
||||
}
|
||||
if (options.reload) {
|
||||
setTimeout(function() { location.reload(); }, 500);
|
||||
setTimeout(function () { location.reload(); }, 500);
|
||||
}
|
||||
}
|
||||
});
|
||||
@@ -3879,11 +3948,11 @@
|
||||
var newForm = document.getElementById('enableVersioningForm');
|
||||
if (newForm) {
|
||||
newForm.setAttribute('action', window.BucketDetailConfig?.endpoints?.versioning || '');
|
||||
newForm.addEventListener('submit', function(e) {
|
||||
newForm.addEventListener('submit', function (e) {
|
||||
e.preventDefault();
|
||||
window.UICore.submitFormAjax(newForm, {
|
||||
successMessage: 'Versioning enabled',
|
||||
onSuccess: function() {
|
||||
onSuccess: function () {
|
||||
updateVersioningBadge(true);
|
||||
updateVersioningCard(true);
|
||||
}
|
||||
@@ -3973,7 +4042,7 @@
|
||||
'<p class="mb-0 small">No bucket policy is attached. Access is controlled by IAM policies only.</p></div>';
|
||||
}
|
||||
}
|
||||
document.querySelectorAll('.preset-btn').forEach(function(btn) {
|
||||
document.querySelectorAll('.preset-btn').forEach(function (btn) {
|
||||
btn.classList.remove('active');
|
||||
if (btn.dataset.preset === preset) btn.classList.add('active');
|
||||
});
|
||||
@@ -3987,7 +4056,7 @@
|
||||
|
||||
interceptForm('enableVersioningForm', {
|
||||
successMessage: 'Versioning enabled',
|
||||
onSuccess: function(data) {
|
||||
onSuccess: function (data) {
|
||||
updateVersioningBadge(true);
|
||||
updateVersioningCard(true);
|
||||
}
|
||||
@@ -3996,7 +4065,7 @@
|
||||
interceptForm('suspendVersioningForm', {
|
||||
successMessage: 'Versioning suspended',
|
||||
closeModal: 'suspendVersioningModal',
|
||||
onSuccess: function(data) {
|
||||
onSuccess: function (data) {
|
||||
updateVersioningBadge(false);
|
||||
updateVersioningCard(false);
|
||||
}
|
||||
@@ -4004,36 +4073,36 @@
|
||||
|
||||
interceptForm('encryptionForm', {
|
||||
successMessage: 'Encryption settings saved',
|
||||
onSuccess: function(data) {
|
||||
onSuccess: function (data) {
|
||||
updateEncryptionCard(data.enabled !== false, data.algorithm || 'AES256');
|
||||
}
|
||||
});
|
||||
|
||||
interceptForm('quotaForm', {
|
||||
successMessage: 'Quota settings saved',
|
||||
onSuccess: function(data) {
|
||||
onSuccess: function (data) {
|
||||
updateQuotaCard(data.has_quota, data.max_bytes, data.max_objects);
|
||||
}
|
||||
});
|
||||
|
||||
interceptForm('bucketPolicyForm', {
|
||||
successMessage: 'Bucket policy saved',
|
||||
onSuccess: function(data) {
|
||||
onSuccess: function (data) {
|
||||
var policyModeEl = document.getElementById('policyMode');
|
||||
var policyPresetEl = document.getElementById('policyPreset');
|
||||
var preset = policyModeEl && policyModeEl.value === 'delete' ? 'private' :
|
||||
(policyPresetEl?.value || 'custom');
|
||||
(policyPresetEl?.value || 'custom');
|
||||
updatePolicyCard(preset !== 'private', preset);
|
||||
}
|
||||
});
|
||||
|
||||
var deletePolicyForm = document.getElementById('deletePolicyForm');
|
||||
if (deletePolicyForm) {
|
||||
deletePolicyForm.addEventListener('submit', function(e) {
|
||||
deletePolicyForm.addEventListener('submit', function (e) {
|
||||
e.preventDefault();
|
||||
window.UICore.submitFormAjax(deletePolicyForm, {
|
||||
successMessage: 'Bucket policy deleted',
|
||||
onSuccess: function(data) {
|
||||
onSuccess: function (data) {
|
||||
var modal = bootstrap.Modal.getInstance(document.getElementById('deletePolicyModal'));
|
||||
if (modal) modal.hide();
|
||||
updatePolicyCard(false, 'private');
|
||||
@@ -4046,13 +4115,13 @@
|
||||
|
||||
var disableEncBtn = document.getElementById('disableEncryptionBtn');
|
||||
if (disableEncBtn) {
|
||||
disableEncBtn.addEventListener('click', function() {
|
||||
disableEncBtn.addEventListener('click', function () {
|
||||
var form = document.getElementById('encryptionForm');
|
||||
if (!form) return;
|
||||
document.getElementById('encryptionAction').value = 'disable';
|
||||
window.UICore.submitFormAjax(form, {
|
||||
successMessage: 'Encryption disabled',
|
||||
onSuccess: function(data) {
|
||||
onSuccess: function (data) {
|
||||
document.getElementById('encryptionAction').value = 'enable';
|
||||
updateEncryptionCard(false, null);
|
||||
}
|
||||
@@ -4062,13 +4131,13 @@
|
||||
|
||||
var removeQuotaBtn = document.getElementById('removeQuotaBtn');
|
||||
if (removeQuotaBtn) {
|
||||
removeQuotaBtn.addEventListener('click', function() {
|
||||
removeQuotaBtn.addEventListener('click', function () {
|
||||
var form = document.getElementById('quotaForm');
|
||||
if (!form) return;
|
||||
document.getElementById('quotaAction').value = 'remove';
|
||||
window.UICore.submitFormAjax(form, {
|
||||
successMessage: 'Quota removed',
|
||||
onSuccess: function(data) {
|
||||
onSuccess: function (data) {
|
||||
document.getElementById('quotaAction').value = 'set';
|
||||
updateQuotaCard(false, null, null);
|
||||
}
|
||||
@@ -4082,39 +4151,39 @@
|
||||
fetch(window.location.pathname + '?tab=replication', {
|
||||
headers: { 'X-Requested-With': 'XMLHttpRequest' }
|
||||
})
|
||||
.then(function(resp) { return resp.text(); })
|
||||
.then(function(html) {
|
||||
var parser = new DOMParser();
|
||||
var doc = parser.parseFromString(html, 'text/html');
|
||||
var newPane = doc.getElementById('replication-pane');
|
||||
if (newPane) {
|
||||
replicationPane.innerHTML = newPane.innerHTML;
|
||||
initReplicationForms();
|
||||
initReplicationStats();
|
||||
}
|
||||
})
|
||||
.catch(function(err) {
|
||||
console.error('Failed to reload replication pane:', err);
|
||||
});
|
||||
.then(function (resp) { return resp.text(); })
|
||||
.then(function (html) {
|
||||
var parser = new DOMParser();
|
||||
var doc = parser.parseFromString(html, 'text/html');
|
||||
var newPane = doc.getElementById('replication-pane');
|
||||
if (newPane) {
|
||||
replicationPane.innerHTML = newPane.innerHTML;
|
||||
initReplicationForms();
|
||||
initReplicationStats();
|
||||
}
|
||||
})
|
||||
.catch(function (err) {
|
||||
console.error('Failed to reload replication pane:', err);
|
||||
});
|
||||
}
|
||||
|
||||
function initReplicationForms() {
|
||||
document.querySelectorAll('form[action*="replication"]').forEach(function(form) {
|
||||
document.querySelectorAll('form[action*="replication"]').forEach(function (form) {
|
||||
if (form.dataset.ajaxBound) return;
|
||||
form.dataset.ajaxBound = 'true';
|
||||
var actionInput = form.querySelector('input[name="action"]');
|
||||
if (!actionInput) return;
|
||||
var action = actionInput.value;
|
||||
|
||||
form.addEventListener('submit', function(e) {
|
||||
form.addEventListener('submit', function (e) {
|
||||
e.preventDefault();
|
||||
var msg = action === 'pause' ? 'Replication paused' :
|
||||
action === 'resume' ? 'Replication resumed' :
|
||||
action === 'delete' ? 'Replication disabled' :
|
||||
action === 'create' ? 'Replication configured' : 'Operation completed';
|
||||
action === 'resume' ? 'Replication resumed' :
|
||||
action === 'delete' ? 'Replication disabled' :
|
||||
action === 'create' ? 'Replication configured' : 'Operation completed';
|
||||
window.UICore.submitFormAjax(form, {
|
||||
successMessage: msg,
|
||||
onSuccess: function(data) {
|
||||
onSuccess: function (data) {
|
||||
var modal = bootstrap.Modal.getInstance(document.getElementById('disableReplicationModal'));
|
||||
if (modal) modal.hide();
|
||||
reloadReplicationPane();
|
||||
@@ -4136,14 +4205,14 @@
|
||||
var bytesEl = statsContainer.querySelector('[data-stat="bytes"]');
|
||||
|
||||
fetch(statusEndpoint)
|
||||
.then(function(resp) { return resp.json(); })
|
||||
.then(function(data) {
|
||||
.then(function (resp) { return resp.json(); })
|
||||
.then(function (data) {
|
||||
if (syncedEl) syncedEl.textContent = data.objects_synced || 0;
|
||||
if (pendingEl) pendingEl.textContent = data.objects_pending || 0;
|
||||
if (orphanedEl) orphanedEl.textContent = data.objects_orphaned || 0;
|
||||
if (bytesEl) bytesEl.textContent = formatBytes(data.bytes_synced || 0);
|
||||
})
|
||||
.catch(function(err) {
|
||||
.catch(function (err) {
|
||||
console.error('Failed to load replication stats:', err);
|
||||
});
|
||||
}
|
||||
@@ -4153,10 +4222,10 @@
|
||||
|
||||
var deleteBucketForm = document.getElementById('deleteBucketForm');
|
||||
if (deleteBucketForm) {
|
||||
deleteBucketForm.addEventListener('submit', function(e) {
|
||||
deleteBucketForm.addEventListener('submit', function (e) {
|
||||
e.preventDefault();
|
||||
window.UICore.submitFormAjax(deleteBucketForm, {
|
||||
onSuccess: function() {
|
||||
onSuccess: function () {
|
||||
sessionStorage.setItem('flashMessage', JSON.stringify({ title: 'Bucket deleted', variant: 'success' }));
|
||||
window.location.href = window.BucketDetailConfig?.endpoints?.bucketsOverview || '/ui/buckets';
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ window.IAMManagement = (function() {
|
||||
var currentDeleteKey = null;
|
||||
|
||||
var policyTemplates = {
|
||||
full: [{ bucket: '*', actions: ['list', 'read', 'write', 'delete', 'share', 'policy', 'replication', 'iam:list_users', 'iam:*'] }],
|
||||
full: [{ bucket: '*', actions: ['list', 'read', 'write', 'delete', 'share', 'policy', 'replication', 'lifecycle', 'cors', 'iam:*'] }],
|
||||
readonly: [{ bucket: '*', actions: ['list', 'read'] }],
|
||||
writer: [{ bucket: '*', actions: ['list', 'read', 'write'] }]
|
||||
};
|
||||
|
||||
@@ -67,12 +67,14 @@
|
||||
</button>
|
||||
</li>
|
||||
{% endif %}
|
||||
{% if can_edit_policy %}
|
||||
{% if can_manage_lifecycle %}
|
||||
<li class="nav-item" role="presentation">
|
||||
<button class="nav-link {{ 'active' if active_tab == 'lifecycle' else '' }}" id="lifecycle-tab" data-bs-toggle="tab" data-bs-target="#lifecycle-pane" type="button" role="tab" aria-controls="lifecycle-pane" aria-selected="{{ 'true' if active_tab == 'lifecycle' else 'false' }}">
|
||||
Lifecycle
|
||||
</button>
|
||||
</li>
|
||||
{% endif %}
|
||||
{% if can_manage_cors %}
|
||||
<li class="nav-item" role="presentation">
|
||||
<button class="nav-link {{ 'active' if active_tab == 'cors' else '' }}" id="cors-tab" data-bs-toggle="tab" data-bs-target="#cors-pane" type="button" role="tab" aria-controls="cors-pane" aria-selected="{{ 'true' if active_tab == 'cors' else 'false' }}">
|
||||
CORS
|
||||
@@ -187,20 +189,6 @@
|
||||
</div>
|
||||
<span id="load-more-status" class="text-muted"></span>
|
||||
<span id="folder-view-status" class="text-muted d-none"></span>
|
||||
<button id="load-more-btn" class="btn btn-link btn-sm p-0 d-none" style="font-size: 0.75rem;">Load more</button>
|
||||
</div>
|
||||
<div class="d-flex align-items-center gap-1">
|
||||
<span class="text-muted">Batch</span>
|
||||
<select id="page-size-select" class="form-select form-select-sm py-0" style="width: auto; font-size: 0.75rem;" title="Number of objects to load per batch">
|
||||
<option value="1000">1K</option>
|
||||
<option value="5000" selected>5K</option>
|
||||
<option value="10000">10K</option>
|
||||
<option value="25000">25K</option>
|
||||
<option value="50000">50K</option>
|
||||
<option value="75000">75K</option>
|
||||
<option value="100000">100K</option>
|
||||
</select>
|
||||
<span class="text-muted">per batch</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -332,6 +320,7 @@
|
||||
</div>
|
||||
<img id="preview-image" class="img-fluid d-none w-100" alt="Object preview" style="display: block;" />
|
||||
<video id="preview-video" class="w-100 d-none" controls style="display: block;"></video>
|
||||
<audio id="preview-audio" class="w-100 d-none" controls style="display: block;"></audio>
|
||||
<iframe id="preview-iframe" class="w-100 d-none" loading="lazy" style="min-height: 200px;"></iframe>
|
||||
</div>
|
||||
</div>
|
||||
@@ -1076,8 +1065,10 @@
|
||||
<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zm-3.97-3.03a.75.75 0 0 0-1.08.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-.01-1.05z"/>
|
||||
</svg>
|
||||
<div>
|
||||
<strong>Replication Active</strong> —
|
||||
{% if replication_rule.mode == 'all' %}
|
||||
<strong>Replication Active</strong> —
|
||||
{% if replication_rule.mode == 'bidirectional' %}
|
||||
Bi-directional sync enabled with LWW conflict resolution.
|
||||
{% elif replication_rule.mode == 'all' %}
|
||||
All objects (existing + new) are being replicated.
|
||||
{% else %}
|
||||
New uploads to this bucket are automatically replicated.
|
||||
@@ -1170,7 +1161,7 @@
|
||||
</div>
|
||||
<div class="text-muted small text-uppercase">Mode</div>
|
||||
<div class="fw-semibold small">
|
||||
{% if replication_rule.mode == 'all' %}All Objects{% else %}New Only{% endif %}
|
||||
{% if replication_rule.mode == 'bidirectional' %}Bidirectional{% elif replication_rule.mode == 'all' %}All Objects{% else %}New Only{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
@@ -1321,7 +1312,9 @@
|
||||
<div>
|
||||
<strong>Replication Paused</strong>
|
||||
<p class="mb-1">Replication is configured but currently paused. New uploads will not be replicated until resumed.</p>
|
||||
{% if replication_rule.mode == 'all' %}
|
||||
{% if replication_rule.mode == 'bidirectional' %}
|
||||
<p class="mb-0 small text-dark"><strong>Tip:</strong> When you resume, bi-directional sync will continue and any missed changes will be reconciled using LWW conflict resolution.</p>
|
||||
{% elif replication_rule.mode == 'all' %}
|
||||
<p class="mb-0 small text-dark"><strong>Tip:</strong> When you resume, any objects uploaded while paused will be automatically synced to the target.</p>
|
||||
{% else %}
|
||||
<p class="mb-0 small text-dark"><strong>Note:</strong> Objects uploaded while paused will not be synced (mode: new_only). Consider switching to "All Objects" mode if you need to sync missed uploads.</p>
|
||||
@@ -1446,17 +1439,26 @@
|
||||
<div class="text-muted small">Only replicate objects uploaded after enabling replication. Existing objects will not be copied.</div>
|
||||
</label>
|
||||
</div>
|
||||
<div class="form-check p-3 m-0">
|
||||
<div class="form-check p-3 border-bottom m-0">
|
||||
<input class="form-check-input" type="radio" name="replication_mode" id="mode_all" value="all">
|
||||
<label class="form-check-label w-100" for="mode_all">
|
||||
<span class="fw-medium">All objects (existing + new)</span>
|
||||
<div class="text-muted small">Replicate all existing objects immediately, plus all future uploads. <span class="text-warning">This may take time for large buckets.</span></div>
|
||||
</label>
|
||||
</div>
|
||||
{% if site_sync_enabled %}
|
||||
<div class="form-check p-3 m-0">
|
||||
<input class="form-check-input" type="radio" name="replication_mode" id="mode_bidirectional" value="bidirectional">
|
||||
<label class="form-check-label w-100" for="mode_bidirectional">
|
||||
<span class="fw-medium">Bidirectional sync</span>
|
||||
<div class="text-muted small">Two-way sync with Last-Write-Wins conflict resolution.</div>
|
||||
</label>
|
||||
</div>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
<button type="submit" class="btn btn-primary">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M8 3a5 5 0 1 0 4.546 2.914.5.5 0 0 1 .908-.417A6 6 0 1 1 8 2v1z"/>
|
||||
@@ -1532,7 +1534,7 @@
|
||||
</div>
|
||||
{% endif %}
|
||||
|
||||
{% if can_edit_policy %}
|
||||
{% if can_manage_lifecycle %}
|
||||
<div class="tab-pane fade {{ 'show active' if active_tab == 'lifecycle' else '' }}" id="lifecycle-pane" role="tabpanel" aria-labelledby="lifecycle-tab" tabindex="0">
|
||||
{% if not lifecycle_enabled %}
|
||||
<div class="alert alert-warning d-flex align-items-start mb-4" role="alert">
|
||||
@@ -1574,12 +1576,13 @@
|
||||
<th>Status</th>
|
||||
<th>Expiration</th>
|
||||
<th>Noncurrent</th>
|
||||
<th>Abort MPU</th>
|
||||
<th class="text-end">Actions</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="lifecycle-rules-body">
|
||||
<tr>
|
||||
<td colspan="6" class="text-center text-muted py-4">
|
||||
<td colspan="7" class="text-center text-muted py-4">
|
||||
<div class="spinner-border spinner-border-sm me-2" role="status"></div>
|
||||
Loading...
|
||||
</td>
|
||||
@@ -1690,7 +1693,9 @@
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
|
||||
{% if can_manage_cors %}
|
||||
<div class="tab-pane fade {{ 'show active' if active_tab == 'cors' else '' }}" id="cors-pane" role="tabpanel" aria-labelledby="cors-tab" tabindex="0">
|
||||
<div class="row g-4">
|
||||
<div class="col-lg-8">
|
||||
|
||||
@@ -51,7 +51,7 @@
|
||||
</div>
|
||||
<div>
|
||||
<h5 class="bucket-name text-break">{{ bucket.meta.name }}</h5>
|
||||
<small class="text-muted">Created {{ bucket.meta.created_at.strftime('%b %d, %Y') }}</small>
|
||||
<small class="text-muted">Created {{ bucket.meta.created_at | format_datetime }}</small>
|
||||
</div>
|
||||
</div>
|
||||
<span class="badge {{ bucket.access_badge }} bucket-access-badge">{{ bucket.access_label }}</span>
|
||||
|
||||
@@ -34,11 +34,13 @@
|
||||
<li><a href="#automation">Automation / CLI</a></li>
|
||||
<li><a href="#api">REST endpoints</a></li>
|
||||
<li><a href="#examples">API Examples</a></li>
|
||||
<li><a href="#replication">Site Replication</a></li>
|
||||
<li><a href="#replication">Site Replication & Sync</a></li>
|
||||
<li><a href="#versioning">Object Versioning</a></li>
|
||||
<li><a href="#quotas">Bucket Quotas</a></li>
|
||||
<li><a href="#encryption">Encryption</a></li>
|
||||
<li><a href="#lifecycle">Lifecycle Rules</a></li>
|
||||
<li><a href="#metrics">Metrics History</a></li>
|
||||
<li><a href="#operation-metrics">Operation Metrics</a></li>
|
||||
<li><a href="#troubleshooting">Troubleshooting</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
@@ -155,6 +157,29 @@ python run.py --mode ui
|
||||
<td><code>200 per minute</code></td>
|
||||
<td>Default API rate limit.</td>
|
||||
</tr>
|
||||
<tr class="table-secondary">
|
||||
<td colspan="3" class="fw-semibold">Server Settings</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>SERVER_THREADS</code></td>
|
||||
<td><code>4</code></td>
|
||||
<td>Waitress worker threads (1-64).</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>SERVER_CONNECTION_LIMIT</code></td>
|
||||
<td><code>100</code></td>
|
||||
<td>Max concurrent connections (10-1000).</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>SERVER_BACKLOG</code></td>
|
||||
<td><code>1024</code></td>
|
||||
<td>TCP listen backlog (64-4096).</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>SERVER_CHANNEL_TIMEOUT</code></td>
|
||||
<td><code>120</code></td>
|
||||
<td>Idle connection timeout in seconds (10-300).</td>
|
||||
</tr>
|
||||
<tr class="table-secondary">
|
||||
<td colspan="3" class="fw-semibold">Encryption Settings</td>
|
||||
</tr>
|
||||
@@ -181,6 +206,42 @@ python run.py --mode ui
|
||||
<td><code>true</code></td>
|
||||
<td>Enable file logging.</td>
|
||||
</tr>
|
||||
<tr class="table-secondary">
|
||||
<td colspan="3" class="fw-semibold">Metrics History Settings</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>METRICS_HISTORY_ENABLED</code></td>
|
||||
<td><code>false</code></td>
|
||||
<td>Enable metrics history recording and charts (opt-in).</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>METRICS_HISTORY_RETENTION_HOURS</code></td>
|
||||
<td><code>24</code></td>
|
||||
<td>How long to retain metrics history data.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>METRICS_HISTORY_INTERVAL_MINUTES</code></td>
|
||||
<td><code>5</code></td>
|
||||
<td>Interval between history snapshots.</td>
|
||||
</tr>
|
||||
<tr class="table-secondary">
|
||||
<td colspan="3" class="fw-semibold">Site Sync Settings (Bidirectional Replication)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>SITE_SYNC_ENABLED</code></td>
|
||||
<td><code>false</code></td>
|
||||
<td>Enable bi-directional site sync background worker.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>SITE_SYNC_INTERVAL_SECONDS</code></td>
|
||||
<td><code>60</code></td>
|
||||
<td>Interval between sync cycles (seconds).</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>SITE_SYNC_BATCH_SIZE</code></td>
|
||||
<td><code>100</code></td>
|
||||
<td>Max objects to pull per sync cycle.</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
@@ -356,11 +417,8 @@ curl -X PUT {{ api_base }}/demo/notes.txt \
|
||||
-H "X-Secret-Key: <secret_key>" \
|
||||
--data-binary @notes.txt
|
||||
|
||||
curl -X POST {{ api_base }}/presign/demo/notes.txt \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-Access-Key: <access_key>" \
|
||||
-H "X-Secret-Key: <secret_key>" \
|
||||
-d '{"method":"GET", "expires_in": 900}'
|
||||
# Presigned URLs are generated via the UI
|
||||
# Use the "Presign" button in the object browser
|
||||
</code></pre>
|
||||
</div>
|
||||
</div>
|
||||
@@ -418,13 +476,8 @@ curl -X POST {{ api_base }}/presign/demo/notes.txt \
|
||||
</tr>
|
||||
<tr>
|
||||
<td>GET/PUT/DELETE</td>
|
||||
<td><code>/bucket-policy/<bucket></code></td>
|
||||
<td>Fetch, upsert, or remove a bucket policy.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>POST</td>
|
||||
<td><code>/presign/<bucket>/<key></code></td>
|
||||
<td>Generate SigV4 URLs for GET/PUT/DELETE with custom expiry.</td>
|
||||
<td><code>/<bucket>?policy</code></td>
|
||||
<td>Fetch, upsert, or remove a bucket policy (S3-compatible).</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
@@ -523,26 +576,25 @@ s3.complete_multipart_upload(
|
||||
)</code></pre>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Presigned URLs for Sharing</h3>
|
||||
<pre class="mb-0"><code class="language-bash"># Generate a download link valid for 15 minutes
|
||||
curl -X POST "{{ api_base }}/presign/mybucket/photo.jpg" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||
-d '{"method": "GET", "expires_in": 900}'
|
||||
<pre class="mb-0"><code class="language-text"># Generate presigned URLs via the UI:
|
||||
# 1. Navigate to your bucket in the object browser
|
||||
# 2. Select the object you want to share
|
||||
# 3. Click the "Presign" button
|
||||
# 4. Choose method (GET/PUT/DELETE) and expiration time
|
||||
# 5. Copy the generated URL
|
||||
|
||||
# Generate an upload link (PUT) valid for 1 hour
|
||||
curl -X POST "{{ api_base }}/presign/mybucket/upload.bin" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||
-d '{"method": "PUT", "expires_in": 3600}'</code></pre>
|
||||
# Supported options:
|
||||
# - Method: GET (download), PUT (upload), DELETE (remove)
|
||||
# - Expiration: 1 second to 7 days (604800 seconds)</code></pre>
|
||||
</div>
|
||||
</article>
|
||||
<article id="replication" class="card shadow-sm docs-section">
|
||||
<div class="card-body">
|
||||
<div class="d-flex align-items-center gap-2 mb-3">
|
||||
<span class="docs-section-kicker">08</span>
|
||||
<h2 class="h4 mb-0">Site Replication</h2>
|
||||
<h2 class="h4 mb-0">Site Replication & Sync</h2>
|
||||
</div>
|
||||
<p class="text-muted">Automatically copy new objects to another MyFSIO instance or S3-compatible service for backup or disaster recovery.</p>
|
||||
<p class="text-muted">Replicate objects to another MyFSIO instance or S3-compatible service. Supports one-way replication for backup and bi-directional sync for geo-distributed deployments.</p>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Setup Guide</h3>
|
||||
<ol class="docs-steps mb-3">
|
||||
@@ -601,17 +653,147 @@ except Exception as e:
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Bidirectional Replication (Active-Active)</h3>
|
||||
<p class="small text-muted">To set up two-way replication (Server A ↔ Server B):</p>
|
||||
<ol class="docs-steps mb-3">
|
||||
<li>Follow the steps above to replicate <strong>A → B</strong>.</li>
|
||||
<li>Repeat the process on Server B to replicate <strong>B → A</strong> (create a connection to A, enable rule).</li>
|
||||
</ol>
|
||||
<p class="small text-muted mb-3">
|
||||
<strong>Loop Prevention:</strong> The system automatically detects replication traffic using a custom User-Agent (<code>S3ReplicationAgent</code>). This prevents infinite loops where an object replicated from A to B is immediately replicated back to A.
|
||||
<br>
|
||||
<strong>Deletes:</strong> Deleting an object on one server will propagate the deletion to the other server.
|
||||
</p>
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Replication Modes</h3>
|
||||
<div class="table-responsive mb-3">
|
||||
<table class="table table-sm table-bordered small">
|
||||
<thead class="table-light">
|
||||
<tr>
|
||||
<th>Mode</th>
|
||||
<th>Description</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td><code>new_only</code></td>
|
||||
<td>Only replicate new/modified objects (default, one-way)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>all</code></td>
|
||||
<td>Sync all existing objects when rule is enabled (one-way)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><strong><code>bidirectional</code></strong></td>
|
||||
<td>Two-way sync with Last-Write-Wins conflict resolution</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Bidirectional Site Replication</h3>
|
||||
<p class="small text-muted">For true two-way synchronization with automatic conflict resolution, use the <code>bidirectional</code> mode. Both sites must be configured to sync with each other.</p>
|
||||
|
||||
<div class="alert alert-info border small mb-3">
|
||||
<div class="d-flex gap-2">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-info-circle text-info mt-1 flex-shrink-0" viewBox="0 0 16 16">
|
||||
<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>
|
||||
<path d="m8.93 6.588-2.29.287-.082.38.45.083c.294.07.352.176.288.469l-.738 3.468c-.194.897.105 1.319.808 1.319.545 0 1.178-.252 1.465-.598l.088-.416c-.2.176-.492.246-.686.246-.275 0-.375-.193-.304-.533L8.93 6.588zM9 4.5a1 1 0 1 1-2 0 1 1 0 0 1 2 0z"/>
|
||||
</svg>
|
||||
<div>
|
||||
<strong>Both sites need configuration.</strong> Each site pushes its changes and pulls from the other. You must set up connections and replication rules on both ends.
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<h4 class="h6 mt-4 mb-2">Step 1: Enable Site Sync on Both Sites</h4>
|
||||
<p class="small text-muted">Set these environment variables on <strong>both</strong> Site A and Site B:</p>
|
||||
<pre class="mb-3"><code class="language-bash">SITE_SYNC_ENABLED=true
|
||||
SITE_SYNC_INTERVAL_SECONDS=60 # How often to pull changes
|
||||
SITE_SYNC_BATCH_SIZE=100 # Max objects per sync cycle</code></pre>
|
||||
|
||||
<h4 class="h6 mt-4 mb-2">Step 2: Create IAM Users for Cross-Site Access</h4>
|
||||
<p class="small text-muted">On each site, create an IAM user that the other site will use to connect:</p>
|
||||
<div class="table-responsive mb-3">
|
||||
<table class="table table-sm table-bordered small">
|
||||
<thead class="table-light">
|
||||
<tr>
|
||||
<th>Site</th>
|
||||
<th>Create User For</th>
|
||||
<th>Required Permissions</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>Site A</td>
|
||||
<td>Site B to connect</td>
|
||||
<td><code>read</code>, <code>write</code>, <code>list</code>, <code>delete</code> on target bucket</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Site B</td>
|
||||
<td>Site A to connect</td>
|
||||
<td><code>read</code>, <code>write</code>, <code>list</code>, <code>delete</code> on target bucket</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<h4 class="h6 mt-4 mb-2">Step 3: Create Connections</h4>
|
||||
<p class="small text-muted">On each site, add a connection pointing to the other:</p>
|
||||
<div class="row g-3 mb-3">
|
||||
<div class="col-md-6">
|
||||
<div class="card border h-100">
|
||||
<div class="card-header bg-light py-2"><strong class="small">On Site A</strong></div>
|
||||
<div class="card-body small">
|
||||
<p class="mb-1">Go to <strong>Connections</strong> and add:</p>
|
||||
<ul class="mb-0 ps-3">
|
||||
<li>Endpoint: <code>https://site-b.example.com</code></li>
|
||||
<li>Credentials: Site B's IAM user</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-md-6">
|
||||
<div class="card border h-100">
|
||||
<div class="card-header bg-light py-2"><strong class="small">On Site B</strong></div>
|
||||
<div class="card-body small">
|
||||
<p class="mb-1">Go to <strong>Connections</strong> and add:</p>
|
||||
<ul class="mb-0 ps-3">
|
||||
<li>Endpoint: <code>https://site-a.example.com</code></li>
|
||||
<li>Credentials: Site A's IAM user</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<h4 class="h6 mt-4 mb-2">Step 4: Enable Bidirectional Replication</h4>
|
||||
<p class="small text-muted">On each site, go to the bucket's <strong>Replication</strong> tab and enable with mode <code>bidirectional</code>:</p>
|
||||
<div class="row g-3 mb-3">
|
||||
<div class="col-md-6">
|
||||
<div class="card border h-100">
|
||||
<div class="card-header bg-light py-2"><strong class="small">On Site A</strong></div>
|
||||
<div class="card-body small">
|
||||
<ul class="mb-0 ps-3">
|
||||
<li>Source bucket: <code>my-bucket</code></li>
|
||||
<li>Target: Site B connection</li>
|
||||
<li>Target bucket: <code>my-bucket</code></li>
|
||||
<li>Mode: <strong>Bidirectional sync</strong></li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-md-6">
|
||||
<div class="card border h-100">
|
||||
<div class="card-header bg-light py-2"><strong class="small">On Site B</strong></div>
|
||||
<div class="card-body small">
|
||||
<ul class="mb-0 ps-3">
|
||||
<li>Source bucket: <code>my-bucket</code></li>
|
||||
<li>Target: Site A connection</li>
|
||||
<li>Target bucket: <code>my-bucket</code></li>
|
||||
<li>Mode: <strong>Bidirectional sync</strong></li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<h4 class="h6 mt-4 mb-2">How It Works</h4>
|
||||
<ul class="small text-muted mb-3">
|
||||
<li><strong>PUSH:</strong> Local changes replicate to remote immediately on write/delete</li>
|
||||
<li><strong>PULL:</strong> Background worker fetches remote changes every <code>SITE_SYNC_INTERVAL_SECONDS</code></li>
|
||||
<li><strong>Conflict Resolution:</strong> Last-Write-Wins based on <code>last_modified</code> timestamps (1-second clock skew tolerance)</li>
|
||||
<li><strong>Deletion Sync:</strong> Remote deletions propagate locally only for objects originally synced from remote</li>
|
||||
<li><strong>Loop Prevention:</strong> <code>S3ReplicationAgent</code> and <code>SiteSyncAgent</code> User-Agents prevent infinite sync loops</li>
|
||||
</ul>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Error Handling & Rate Limits</h3>
|
||||
<p class="small text-muted mb-3">The replication system handles transient failures automatically:</p>
|
||||
@@ -976,10 +1158,201 @@ curl "{{ api_base }}/<bucket>?lifecycle" \
|
||||
</div>
|
||||
</div>
|
||||
</article>
|
||||
<article id="troubleshooting" class="card shadow-sm docs-section">
|
||||
<article id="metrics" class="card shadow-sm docs-section">
|
||||
<div class="card-body">
|
||||
<div class="d-flex align-items-center gap-2 mb-3">
|
||||
<span class="docs-section-kicker">13</span>
|
||||
<h2 class="h4 mb-0">Metrics History</h2>
|
||||
</div>
|
||||
<p class="text-muted">Track CPU, memory, and disk usage over time with optional metrics history. Disabled by default to minimize overhead.</p>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Enabling Metrics History</h3>
|
||||
<p class="small text-muted">Set the environment variable to opt-in:</p>
|
||||
<pre class="mb-3"><code class="language-bash"># PowerShell
|
||||
$env:METRICS_HISTORY_ENABLED = "true"
|
||||
python run.py
|
||||
|
||||
# Bash
|
||||
export METRICS_HISTORY_ENABLED=true
|
||||
python run.py</code></pre>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Configuration Options</h3>
|
||||
<div class="table-responsive mb-3">
|
||||
<table class="table table-sm table-bordered small">
|
||||
<thead class="table-light">
|
||||
<tr>
|
||||
<th>Variable</th>
|
||||
<th>Default</th>
|
||||
<th>Description</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td><code>METRICS_HISTORY_ENABLED</code></td>
|
||||
<td><code>false</code></td>
|
||||
<td>Enable/disable metrics history recording</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>METRICS_HISTORY_RETENTION_HOURS</code></td>
|
||||
<td><code>24</code></td>
|
||||
<td>How long to keep history data (hours)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>METRICS_HISTORY_INTERVAL_MINUTES</code></td>
|
||||
<td><code>5</code></td>
|
||||
<td>Interval between snapshots (minutes)</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">API Endpoints</h3>
|
||||
<pre class="mb-3"><code class="language-bash"># Get metrics history (last 24 hours by default)
|
||||
curl "{{ api_base | replace('/api', '/ui') }}/metrics/history" \
|
||||
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"
|
||||
|
||||
# Get history for specific time range
|
||||
curl "{{ api_base | replace('/api', '/ui') }}/metrics/history?hours=6" \
|
||||
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"
|
||||
|
||||
# Get current settings
|
||||
curl "{{ api_base | replace('/api', '/ui') }}/metrics/settings" \
|
||||
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"
|
||||
|
||||
# Update settings at runtime
|
||||
curl -X PUT "{{ api_base | replace('/api', '/ui') }}/metrics/settings" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||
-d '{"enabled": true, "retention_hours": 48, "interval_minutes": 10}'</code></pre>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Storage Location</h3>
|
||||
<p class="small text-muted mb-3">History data is stored at:</p>
|
||||
<code class="d-block mb-3">data/.myfsio.sys/config/metrics_history.json</code>
|
||||
|
||||
<div class="alert alert-light border mb-0">
|
||||
<div class="d-flex gap-2">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-info-circle text-muted mt-1 flex-shrink-0" viewBox="0 0 16 16">
|
||||
<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>
|
||||
<path d="m8.93 6.588-2.29.287-.082.38.45.083c.294.07.352.176.288.469l-.738 3.468c-.194.897.105 1.319.808 1.319.545 0 1.178-.252 1.465-.598l.088-.416c-.2.176-.492.246-.686.246-.275 0-.375-.193-.304-.533L8.93 6.588zM9 4.5a1 1 0 1 1-2 0 1 1 0 0 1 2 0z"/>
|
||||
</svg>
|
||||
<div>
|
||||
<strong>UI Charts:</strong> When enabled, the Metrics dashboard displays line charts showing CPU, memory, and disk usage trends with a time range selector (1h, 6h, 24h, 7d).
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</article>
|
||||
<article id="operation-metrics" class="card shadow-sm docs-section">
|
||||
<div class="card-body">
|
||||
<div class="d-flex align-items-center gap-2 mb-3">
|
||||
<span class="docs-section-kicker">14</span>
|
||||
<h2 class="h4 mb-0">Operation Metrics</h2>
|
||||
</div>
|
||||
<p class="text-muted">Track API request statistics including request counts, latency, error rates, and bandwidth usage. Provides real-time visibility into API operations.</p>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Enabling Operation Metrics</h3>
|
||||
<p class="small text-muted">Set the environment variable to opt-in:</p>
|
||||
<pre class="mb-3"><code class="language-bash"># PowerShell
|
||||
$env:OPERATION_METRICS_ENABLED = "true"
|
||||
python run.py
|
||||
|
||||
# Bash
|
||||
export OPERATION_METRICS_ENABLED=true
|
||||
python run.py</code></pre>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Configuration Options</h3>
|
||||
<div class="table-responsive mb-3">
|
||||
<table class="table table-sm table-bordered small">
|
||||
<thead class="table-light">
|
||||
<tr>
|
||||
<th>Variable</th>
|
||||
<th>Default</th>
|
||||
<th>Description</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td><code>OPERATION_METRICS_ENABLED</code></td>
|
||||
<td><code>false</code></td>
|
||||
<td>Enable/disable operation metrics collection</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>OPERATION_METRICS_INTERVAL_MINUTES</code></td>
|
||||
<td><code>5</code></td>
|
||||
<td>Interval between snapshots (minutes)</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>OPERATION_METRICS_RETENTION_HOURS</code></td>
|
||||
<td><code>24</code></td>
|
||||
<td>How long to keep history data (hours)</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">What's Tracked</h3>
|
||||
<div class="row g-3 mb-4">
|
||||
<div class="col-md-6">
|
||||
<div class="bg-light rounded p-3 h-100">
|
||||
<h6 class="small fw-bold mb-2">Request Statistics</h6>
|
||||
<ul class="small text-muted mb-0 ps-3">
|
||||
<li>Request counts by HTTP method (GET, PUT, POST, DELETE)</li>
|
||||
<li>Response status codes (2xx, 3xx, 4xx, 5xx)</li>
|
||||
<li>Average, min, max latency</li>
|
||||
<li>Bytes transferred in/out</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-md-6">
|
||||
<div class="bg-light rounded p-3 h-100">
|
||||
<h6 class="small fw-bold mb-2">Endpoint Breakdown</h6>
|
||||
<ul class="small text-muted mb-0 ps-3">
|
||||
<li><code>object</code> - Object operations (GET/PUT/DELETE)</li>
|
||||
<li><code>bucket</code> - Bucket operations</li>
|
||||
<li><code>ui</code> - Web UI requests</li>
|
||||
<li><code>service</code> - Health checks, etc.</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">S3 Error Codes</h3>
|
||||
<p class="small text-muted">The dashboard tracks S3 API-specific error codes like <code>NoSuchKey</code>, <code>AccessDenied</code>, <code>BucketNotFound</code>. These are separate from HTTP status codes – a 404 from the UI won't appear here, only S3 API errors.</p>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">API Endpoints</h3>
|
||||
<pre class="mb-3"><code class="language-bash"># Get current operation metrics
|
||||
curl "{{ api_base | replace('/api', '/ui') }}/metrics/operations" \
|
||||
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"
|
||||
|
||||
# Get operation metrics history
|
||||
curl "{{ api_base | replace('/api', '/ui') }}/metrics/operations/history" \
|
||||
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"
|
||||
|
||||
# Filter history by time range
|
||||
curl "{{ api_base | replace('/api', '/ui') }}/metrics/operations/history?hours=6" \
|
||||
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"</code></pre>
|
||||
|
||||
<h3 class="h6 text-uppercase text-muted mt-4">Storage Location</h3>
|
||||
<p class="small text-muted mb-3">Operation metrics data is stored at:</p>
|
||||
<code class="d-block mb-3">data/.myfsio.sys/config/operation_metrics.json</code>
|
||||
|
||||
<div class="alert alert-light border mb-0">
|
||||
<div class="d-flex gap-2">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-info-circle text-muted mt-1 flex-shrink-0" viewBox="0 0 16 16">
|
||||
<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>
|
||||
<path d="m8.93 6.588-2.29.287-.082.38.45.083c.294.07.352.176.288.469l-.738 3.468c-.194.897.105 1.319.808 1.319.545 0 1.178-.252 1.465-.598l.088-.416c-.2.176-.492.246-.686.246-.275 0-.375-.193-.304-.533L8.93 6.588zM9 4.5a1 1 0 1 1-2 0 1 1 0 0 1 2 0z"/>
|
||||
</svg>
|
||||
<div>
|
||||
<strong>UI Dashboard:</strong> When enabled, the Metrics page shows an "API Operations" section with summary cards, charts for requests by method/status/endpoint, and an S3 error codes table. Data refreshes every 5 seconds.
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</article>
|
||||
<article id="troubleshooting" class="card shadow-sm docs-section">
|
||||
<div class="card-body">
|
||||
<div class="d-flex align-items-center gap-2 mb-3">
|
||||
<span class="docs-section-kicker">15</span>
|
||||
<h2 class="h4 mb-0">Troubleshooting & tips</h2>
|
||||
</div>
|
||||
<div class="table-responsive">
|
||||
@@ -1040,11 +1413,13 @@ curl "{{ api_base }}/<bucket>?lifecycle" \
|
||||
<li><a href="#automation">Automation / CLI</a></li>
|
||||
<li><a href="#api">REST endpoints</a></li>
|
||||
<li><a href="#examples">API Examples</a></li>
|
||||
<li><a href="#replication">Site Replication</a></li>
|
||||
<li><a href="#replication">Site Replication & Sync</a></li>
|
||||
<li><a href="#versioning">Object Versioning</a></li>
|
||||
<li><a href="#quotas">Bucket Quotas</a></li>
|
||||
<li><a href="#encryption">Encryption</a></li>
|
||||
<li><a href="#lifecycle">Lifecycle Rules</a></li>
|
||||
<li><a href="#metrics">Metrics History</a></li>
|
||||
<li><a href="#operation-metrics">Operation Metrics</a></li>
|
||||
<li><a href="#troubleshooting">Troubleshooting</a></li>
|
||||
</ul>
|
||||
<div class="docs-sidebar-callouts">
|
||||
|
||||
@@ -218,10 +218,10 @@
|
||||
|
||||
<div class="col-lg-4">
|
||||
{% set has_issues = (cpu_percent > 80) or (memory.percent > 85) or (disk.percent > 90) %}
|
||||
<div class="card shadow-sm border-0 h-100 overflow-hidden" style="background: linear-gradient(135deg, {% if has_issues %}#ef4444 0%, #f97316{% else %}#3b82f6 0%, #8b5cf6{% endif %} 100%);">
|
||||
<div id="systemHealthCard" class="card shadow-sm border-0 h-100 overflow-hidden" style="background: linear-gradient(135deg, {% if has_issues %}#ef4444 0%, #f97316{% else %}#3b82f6 0%, #8b5cf6{% endif %} 100%);">
|
||||
<div class="card-body p-4 d-flex flex-column justify-content-center text-white position-relative">
|
||||
<div class="position-absolute top-0 end-0 opacity-25" style="transform: translate(20%, -20%);">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="160" height="160" fill="currentColor" class="bi bi-{% if has_issues %}exclamation-triangle{% else %}cloud-check{% endif %}" viewBox="0 0 16 16">
|
||||
<svg id="healthIcon" xmlns="http://www.w3.org/2000/svg" width="160" height="160" fill="currentColor" viewBox="0 0 16 16">
|
||||
{% if has_issues %}
|
||||
<path d="M7.938 2.016A.13.13 0 0 1 8.002 2a.13.13 0 0 1 .063.016.146.146 0 0 1 .054.057l6.857 11.667c.036.06.035.124.002.183a.163.163 0 0 1-.054.06.116.116 0 0 1-.066.017H1.146a.115.115 0 0 1-.066-.017.163.163 0 0 1-.054-.06.176.176 0 0 1 .002-.183L7.884 2.073a.147.147 0 0 1 .054-.057zm1.044-.45a1.13 1.13 0 0 0-1.96 0L.165 13.233c-.457.778.091 1.767.98 1.767h13.713c.889 0 1.438-.99.98-1.767L8.982 1.566z"/>
|
||||
<path d="M7.002 12a1 1 0 1 1 2 0 1 1 0 0 1-2 0zM7.1 5.995a.905.905 0 1 1 1.8 0l-.35 3.507a.552.552 0 0 1-1.1 0L7.1 5.995z"/>
|
||||
@@ -232,8 +232,8 @@
|
||||
</svg>
|
||||
</div>
|
||||
<div class="mb-3">
|
||||
<span class="badge bg-white {% if has_issues %}text-danger{% else %}text-primary{% endif %} fw-semibold px-3 py-2">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="bi bi-{% if has_issues %}exclamation-circle-fill{% else %}check-circle-fill{% endif %} me-1" viewBox="0 0 16 16">
|
||||
<span id="healthBadge" class="badge bg-white {% if has_issues %}text-danger{% else %}text-primary{% endif %} fw-semibold px-3 py-2">
|
||||
<svg id="healthBadgeIcon" xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||
{% if has_issues %}
|
||||
<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zM8 4a.905.905 0 0 0-.9.995l.35 3.507a.552.552 0 0 0 1.1 0l.35-3.507A.905.905 0 0 0 8 4zm.002 6a1 1 0 1 0 0 2 1 1 0 0 0 0-2z"/>
|
||||
{% else %}
|
||||
@@ -244,22 +244,24 @@
|
||||
</span>
|
||||
</div>
|
||||
<h4 class="card-title fw-bold mb-3">System Health</h4>
|
||||
{% if has_issues %}
|
||||
<ul class="list-unstyled small mb-4 opacity-90">
|
||||
{% if cpu_percent > 80 %}<li class="mb-1">CPU usage is high ({{ cpu_percent }}%)</li>{% endif %}
|
||||
{% if memory.percent > 85 %}<li class="mb-1">Memory usage is high ({{ memory.percent }}%)</li>{% endif %}
|
||||
{% if disk.percent > 90 %}<li class="mb-1">Disk space is critically low ({{ disk.percent }}% used)</li>{% endif %}
|
||||
</ul>
|
||||
{% else %}
|
||||
<p class="card-text opacity-90 mb-4 small">All resources are within normal operating parameters.</p>
|
||||
{% endif %}
|
||||
<div id="healthContent">
|
||||
{% if has_issues %}
|
||||
<ul class="list-unstyled small mb-4 opacity-90">
|
||||
{% if cpu_percent > 80 %}<li class="mb-1">CPU usage is high ({{ cpu_percent }}%)</li>{% endif %}
|
||||
{% if memory.percent > 85 %}<li class="mb-1">Memory usage is high ({{ memory.percent }}%)</li>{% endif %}
|
||||
{% if disk.percent > 90 %}<li class="mb-1">Disk space is critically low ({{ disk.percent }}% used)</li>{% endif %}
|
||||
</ul>
|
||||
{% else %}
|
||||
<p class="card-text opacity-90 mb-4 small">All resources are within normal operating parameters.</p>
|
||||
{% endif %}
|
||||
</div>
|
||||
<div class="d-flex gap-4">
|
||||
<div>
|
||||
<div class="h3 fw-bold mb-0">{{ app.uptime_days }}d</div>
|
||||
<div class="h3 fw-bold mb-0" data-metric="health_uptime">{{ app.uptime_days }}d</div>
|
||||
<small class="opacity-75">Uptime</small>
|
||||
</div>
|
||||
<div>
|
||||
<div class="h3 fw-bold mb-0">{{ app.buckets }}</div>
|
||||
<div class="h3 fw-bold mb-0" data-metric="health_buckets">{{ app.buckets }}</div>
|
||||
<small class="opacity-75">Active Buckets</small>
|
||||
</div>
|
||||
</div>
|
||||
@@ -267,9 +269,164 @@
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{% if operation_metrics_enabled %}
|
||||
<div class="row g-4 mt-2">
|
||||
<div class="col-12">
|
||||
<div class="card shadow-sm border-0">
|
||||
<div class="card-header bg-transparent border-0 pt-4 px-4 d-flex justify-content-between align-items-center">
|
||||
<h5 class="card-title mb-0 fw-semibold">API Operations</h5>
|
||||
<div class="d-flex align-items-center gap-3">
|
||||
<span class="small text-muted" id="opStatus">Loading...</span>
|
||||
<button class="btn btn-outline-secondary btn-sm" id="resetOpMetricsBtn" title="Reset current window">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="bi bi-arrow-counterclockwise" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M8 3a5 5 0 1 1-4.546 2.914.5.5 0 0 0-.908-.417A6 6 0 1 0 8 2v1z"/>
|
||||
<path d="M8 4.466V.534a.25.25 0 0 0-.41-.192L5.23 2.308a.25.25 0 0 0 0 .384l2.36 1.966A.25.25 0 0 0 8 4.466z"/>
|
||||
</svg>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="card-body p-4">
|
||||
<div class="row g-3 mb-4">
|
||||
<div class="col-6 col-md-4 col-lg-2">
|
||||
<div class="text-center p-3 bg-light rounded h-100">
|
||||
<h4 class="fw-bold mb-1" id="opTotalRequests">0</h4>
|
||||
<small class="text-muted">Requests</small>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-6 col-md-4 col-lg-2">
|
||||
<div class="text-center p-3 bg-light rounded h-100">
|
||||
<h4 class="fw-bold mb-1 text-success" id="opSuccessRate">0%</h4>
|
||||
<small class="text-muted">Success</small>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-6 col-md-4 col-lg-2">
|
||||
<div class="text-center p-3 bg-light rounded h-100">
|
||||
<h4 class="fw-bold mb-1 text-danger" id="opErrorCount">0</h4>
|
||||
<small class="text-muted">Errors</small>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-6 col-md-4 col-lg-2">
|
||||
<div class="text-center p-3 bg-light rounded h-100">
|
||||
<h4 class="fw-bold mb-1 text-info" id="opAvgLatency">0ms</h4>
|
||||
<small class="text-muted">Latency</small>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-6 col-md-4 col-lg-2">
|
||||
<div class="text-center p-3 bg-light rounded h-100">
|
||||
<h4 class="fw-bold mb-1 text-primary" id="opBytesIn">0 B</h4>
|
||||
<small class="text-muted">Bytes In</small>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-6 col-md-4 col-lg-2">
|
||||
<div class="text-center p-3 bg-light rounded h-100">
|
||||
<h4 class="fw-bold mb-1 text-secondary" id="opBytesOut">0 B</h4>
|
||||
<small class="text-muted">Bytes Out</small>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="row g-4">
|
||||
<div class="col-lg-6">
|
||||
<div class="bg-light rounded p-3">
|
||||
<h6 class="text-muted small fw-bold text-uppercase mb-3">Requests by Method</h6>
|
||||
<div style="height: 220px; display: flex; align-items: center; justify-content: center;">
|
||||
<canvas id="methodChart"></canvas>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-lg-6">
|
||||
<div class="bg-light rounded p-3">
|
||||
<h6 class="text-muted small fw-bold text-uppercase mb-3">Requests by Status</h6>
|
||||
<div style="height: 220px;">
|
||||
<canvas id="statusChart"></canvas>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="row g-4 mt-1">
|
||||
<div class="col-lg-6">
|
||||
<div class="bg-light rounded p-3">
|
||||
<h6 class="text-muted small fw-bold text-uppercase mb-3">Requests by Endpoint</h6>
|
||||
<div style="height: 180px;">
|
||||
<canvas id="endpointChart"></canvas>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="col-lg-6">
|
||||
<div class="bg-light rounded p-3 h-100 d-flex flex-column">
|
||||
<div class="d-flex justify-content-between align-items-start mb-3">
|
||||
<h6 class="text-muted small fw-bold text-uppercase mb-0">S3 Error Codes</h6>
|
||||
<span class="badge bg-secondary-subtle text-secondary" style="font-size: 0.65rem;" title="Tracks S3 API errors like NoSuchKey, AccessDenied, etc.">API Only</span>
|
||||
</div>
|
||||
<div class="flex-grow-1 d-flex flex-column" style="min-height: 150px;">
|
||||
<div class="d-flex border-bottom pb-2 mb-2" style="font-size: 0.75rem;">
|
||||
<div class="text-muted fw-semibold" style="flex: 1;">Code</div>
|
||||
<div class="text-muted fw-semibold text-end" style="width: 60px;">Count</div>
|
||||
<div class="text-muted fw-semibold text-end" style="width: 100px;">Distribution</div>
|
||||
</div>
|
||||
<div id="errorCodesContainer" class="flex-grow-1" style="overflow-y: auto;">
|
||||
<div id="errorCodesBody">
|
||||
<div class="text-muted small text-center py-4">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="currentColor" class="bi bi-check-circle mb-2 text-success" viewBox="0 0 16 16">
|
||||
<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>
|
||||
<path d="M10.97 4.97a.235.235 0 0 0-.02.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-1.071-1.05z"/>
|
||||
</svg>
|
||||
<div>No S3 API errors</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
|
||||
{% if metrics_history_enabled %}
|
||||
<div class="row g-4 mt-2">
|
||||
<div class="col-12">
|
||||
<div class="card shadow-sm border-0">
|
||||
<div class="card-header bg-transparent border-0 pt-4 px-4 d-flex justify-content-between align-items-center">
|
||||
<h5 class="card-title mb-0 fw-semibold">Metrics History</h5>
|
||||
<div class="d-flex gap-2 align-items-center">
|
||||
<select class="form-select form-select-sm" id="historyTimeRange" style="width: auto;">
|
||||
<option value="1">Last 1 hour</option>
|
||||
<option value="6">Last 6 hours</option>
|
||||
<option value="24" selected>Last 24 hours</option>
|
||||
<option value="168">Last 7 days</option>
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
<div class="card-body p-4">
|
||||
<div class="row">
|
||||
<div class="col-md-4 mb-4">
|
||||
<h6 class="text-muted small fw-bold text-uppercase mb-3">CPU Usage</h6>
|
||||
<canvas id="cpuHistoryChart" height="200"></canvas>
|
||||
</div>
|
||||
<div class="col-md-4 mb-4">
|
||||
<h6 class="text-muted small fw-bold text-uppercase mb-3">Memory Usage</h6>
|
||||
<canvas id="memoryHistoryChart" height="200"></canvas>
|
||||
</div>
|
||||
<div class="col-md-4 mb-4">
|
||||
<h6 class="text-muted small fw-bold text-uppercase mb-3">Disk Usage</h6>
|
||||
<canvas id="diskHistoryChart" height="200"></canvas>
|
||||
</div>
|
||||
</div>
|
||||
<p class="text-muted small mb-0 text-center" id="historyStatus">Loading history data...</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
{% endblock %}
|
||||
|
||||
{% block extra_scripts %}
|
||||
{% if metrics_history_enabled or operation_metrics_enabled %}
|
||||
<script src="https://cdn.jsdelivr.net/npm/chart.js@4.4.1/dist/chart.umd.min.js"></script>
|
||||
{% endif %}
|
||||
<script>
|
||||
(function() {
|
||||
var refreshInterval = 5000;
|
||||
@@ -285,7 +442,7 @@
|
||||
.then(function(data) {
|
||||
var el;
|
||||
el = document.querySelector('[data-metric="cpu_percent"]');
|
||||
if (el) el.textContent = data.cpu_percent;
|
||||
if (el) el.textContent = data.cpu_percent.toFixed(2);
|
||||
el = document.querySelector('[data-metric="cpu_bar"]');
|
||||
if (el) {
|
||||
el.style.width = data.cpu_percent + '%';
|
||||
@@ -298,7 +455,7 @@
|
||||
}
|
||||
|
||||
el = document.querySelector('[data-metric="memory_percent"]');
|
||||
if (el) el.textContent = data.memory.percent;
|
||||
if (el) el.textContent = data.memory.percent.toFixed(2);
|
||||
el = document.querySelector('[data-metric="memory_bar"]');
|
||||
if (el) el.style.width = data.memory.percent + '%';
|
||||
el = document.querySelector('[data-metric="memory_used"]');
|
||||
@@ -307,7 +464,7 @@
|
||||
if (el) el.textContent = data.memory.total;
|
||||
|
||||
el = document.querySelector('[data-metric="disk_percent"]');
|
||||
if (el) el.textContent = data.disk.percent;
|
||||
if (el) el.textContent = data.disk.percent.toFixed(2);
|
||||
el = document.querySelector('[data-metric="disk_bar"]');
|
||||
if (el) {
|
||||
el.style.width = data.disk.percent + '%';
|
||||
@@ -325,6 +482,55 @@
|
||||
el = document.querySelector('[data-metric="objects_count"]');
|
||||
if (el) el.textContent = data.app.objects;
|
||||
|
||||
var cpuHigh = data.cpu_percent > 80;
|
||||
var memHigh = data.memory.percent > 85;
|
||||
var diskHigh = data.disk.percent > 90;
|
||||
var hasIssues = cpuHigh || memHigh || diskHigh;
|
||||
|
||||
var healthCard = document.getElementById('systemHealthCard');
|
||||
if (healthCard) {
|
||||
healthCard.style.background = hasIssues
|
||||
? 'linear-gradient(135deg, #ef4444 0%, #f97316 100%)'
|
||||
: 'linear-gradient(135deg, #3b82f6 0%, #8b5cf6 100%)';
|
||||
}
|
||||
|
||||
var healthIcon = document.getElementById('healthIcon');
|
||||
if (healthIcon) {
|
||||
healthIcon.innerHTML = hasIssues
|
||||
? '<path d="M7.938 2.016A.13.13 0 0 1 8.002 2a.13.13 0 0 1 .063.016.146.146 0 0 1 .054.057l6.857 11.667c.036.06.035.124.002.183a.163.163 0 0 1-.054.06.116.116 0 0 1-.066.017H1.146a.115.115 0 0 1-.066-.017.163.163 0 0 1-.054-.06.176.176 0 0 1 .002-.183L7.884 2.073a.147.147 0 0 1 .054-.057zm1.044-.45a1.13 1.13 0 0 0-1.96 0L.165 13.233c-.457.778.091 1.767.98 1.767h13.713c.889 0 1.438-.99.98-1.767L8.982 1.566z"/><path d="M7.002 12a1 1 0 1 1 2 0 1 1 0 0 1-2 0zM7.1 5.995a.905.905 0 1 1 1.8 0l-.35 3.507a.552.552 0 0 1-1.1 0L7.1 5.995z"/>'
|
||||
: '<path fill-rule="evenodd" d="M10.354 6.146a.5.5 0 0 1 0 .708l-3 3a.5.5 0 0 1-.708 0l-1.5-1.5a.5.5 0 1 1 .708-.708L7 8.793l2.646-2.647a.5.5 0 0 1 .708 0z"/><path d="M4.406 3.342A5.53 5.53 0 0 1 8 2c2.69 0 4.923 2 5.166 4.579C14.758 6.804 16 8.137 16 9.773 16 11.569 14.502 13 12.687 13H3.781C1.708 13 0 11.366 0 9.318c0-1.763 1.266-3.223 2.942-3.593.143-.863.698-1.723 1.464-2.383z"/>';
|
||||
}
|
||||
|
||||
var healthBadge = document.getElementById('healthBadge');
|
||||
if (healthBadge) {
|
||||
healthBadge.className = 'badge bg-white fw-semibold px-3 py-2 ' + (hasIssues ? 'text-danger' : 'text-primary');
|
||||
}
|
||||
|
||||
var healthBadgeIcon = document.getElementById('healthBadgeIcon');
|
||||
if (healthBadgeIcon) {
|
||||
healthBadgeIcon.innerHTML = hasIssues
|
||||
? '<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zM8 4a.905.905 0 0 0-.9.995l.35 3.507a.552.552 0 0 0 1.1 0l.35-3.507A.905.905 0 0 0 8 4zm.002 6a1 1 0 1 0 0 2 1 1 0 0 0 0-2z"/>'
|
||||
: '<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zm-3.97-3.03a.75.75 0 0 0-1.08.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-.01-1.05z"/>';
|
||||
}
|
||||
|
||||
var healthContent = document.getElementById('healthContent');
|
||||
if (healthContent) {
|
||||
if (hasIssues) {
|
||||
var issues = [];
|
||||
if (cpuHigh) issues.push('<li class="mb-1">CPU usage is high (' + data.cpu_percent.toFixed(1) + '%)</li>');
|
||||
if (memHigh) issues.push('<li class="mb-1">Memory usage is high (' + data.memory.percent.toFixed(1) + '%)</li>');
|
||||
if (diskHigh) issues.push('<li class="mb-1">Disk space is critically low (' + data.disk.percent.toFixed(1) + '% used)</li>');
|
||||
healthContent.innerHTML = '<ul class="list-unstyled small mb-4 opacity-90">' + issues.join('') + '</ul>';
|
||||
} else {
|
||||
healthContent.innerHTML = '<p class="card-text opacity-90 mb-4 small">All resources are within normal operating parameters.</p>';
|
||||
}
|
||||
}
|
||||
|
||||
el = document.querySelector('[data-metric="health_uptime"]');
|
||||
if (el) el.textContent = data.app.uptime_days + 'd';
|
||||
el = document.querySelector('[data-metric="health_buckets"]');
|
||||
if (el) el.textContent = data.app.buckets;
|
||||
|
||||
countdown = 5;
|
||||
})
|
||||
.catch(function(err) {
|
||||
@@ -372,5 +578,369 @@
|
||||
|
||||
startPolling();
|
||||
})();
|
||||
|
||||
{% if operation_metrics_enabled %}
|
||||
(function() {
|
||||
var methodChart = null;
|
||||
var statusChart = null;
|
||||
var endpointChart = null;
|
||||
var opStatus = document.getElementById('opStatus');
|
||||
var opTimer = null;
|
||||
var methodColors = {
|
||||
'GET': '#0d6efd',
|
||||
'PUT': '#198754',
|
||||
'POST': '#ffc107',
|
||||
'DELETE': '#dc3545',
|
||||
'HEAD': '#6c757d',
|
||||
'OPTIONS': '#0dcaf0'
|
||||
};
|
||||
var statusColors = {
|
||||
'2xx': '#198754',
|
||||
'3xx': '#0dcaf0',
|
||||
'4xx': '#ffc107',
|
||||
'5xx': '#dc3545'
|
||||
};
|
||||
var endpointColors = {
|
||||
'object': '#0d6efd',
|
||||
'bucket': '#198754',
|
||||
'ui': '#6c757d',
|
||||
'service': '#0dcaf0',
|
||||
'kms': '#ffc107'
|
||||
};
|
||||
|
||||
function formatBytes(bytes) {
|
||||
if (bytes === 0) return '0 B';
|
||||
var k = 1024;
|
||||
var sizes = ['B', 'KB', 'MB', 'GB', 'TB'];
|
||||
var i = Math.floor(Math.log(bytes) / Math.log(k));
|
||||
return parseFloat((bytes / Math.pow(k, i)).toFixed(1)) + ' ' + sizes[i];
|
||||
}
|
||||
|
||||
function initOpCharts() {
|
||||
var methodCtx = document.getElementById('methodChart');
|
||||
var statusCtx = document.getElementById('statusChart');
|
||||
var endpointCtx = document.getElementById('endpointChart');
|
||||
|
||||
if (methodCtx) {
|
||||
methodChart = new Chart(methodCtx, {
|
||||
type: 'doughnut',
|
||||
data: {
|
||||
labels: [],
|
||||
datasets: [{
|
||||
data: [],
|
||||
backgroundColor: []
|
||||
}]
|
||||
},
|
||||
options: {
|
||||
responsive: true,
|
||||
maintainAspectRatio: false,
|
||||
animation: false,
|
||||
plugins: {
|
||||
legend: { position: 'right', labels: { boxWidth: 12, font: { size: 11 } } }
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (statusCtx) {
|
||||
statusChart = new Chart(statusCtx, {
|
||||
type: 'bar',
|
||||
data: {
|
||||
labels: [],
|
||||
datasets: [{
|
||||
data: [],
|
||||
backgroundColor: []
|
||||
}]
|
||||
},
|
||||
options: {
|
||||
responsive: true,
|
||||
maintainAspectRatio: false,
|
||||
animation: false,
|
||||
plugins: { legend: { display: false } },
|
||||
scales: {
|
||||
y: { beginAtZero: true, ticks: { stepSize: 1 } }
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
if (endpointCtx) {
|
||||
endpointChart = new Chart(endpointCtx, {
|
||||
type: 'bar',
|
||||
data: {
|
||||
labels: [],
|
||||
datasets: [{
|
||||
data: [],
|
||||
backgroundColor: []
|
||||
}]
|
||||
},
|
||||
options: {
|
||||
responsive: true,
|
||||
maintainAspectRatio: false,
|
||||
indexAxis: 'y',
|
||||
animation: false,
|
||||
plugins: { legend: { display: false } },
|
||||
scales: {
|
||||
x: { beginAtZero: true, ticks: { stepSize: 1 } }
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
function updateOpMetrics() {
|
||||
if (document.hidden) return;
|
||||
fetch('/ui/metrics/operations')
|
||||
.then(function(r) { return r.json(); })
|
||||
.then(function(data) {
|
||||
if (!data.enabled || !data.stats) {
|
||||
if (opStatus) opStatus.textContent = 'Operation metrics not available';
|
||||
return;
|
||||
}
|
||||
var stats = data.stats;
|
||||
var totals = stats.totals || {};
|
||||
|
||||
var totalEl = document.getElementById('opTotalRequests');
|
||||
var successEl = document.getElementById('opSuccessRate');
|
||||
var errorEl = document.getElementById('opErrorCount');
|
||||
var latencyEl = document.getElementById('opAvgLatency');
|
||||
var bytesInEl = document.getElementById('opBytesIn');
|
||||
var bytesOutEl = document.getElementById('opBytesOut');
|
||||
|
||||
if (totalEl) totalEl.textContent = totals.count || 0;
|
||||
if (successEl) {
|
||||
var rate = totals.count > 0 ? ((totals.success_count / totals.count) * 100).toFixed(1) : 0;
|
||||
successEl.textContent = rate + '%';
|
||||
}
|
||||
if (errorEl) errorEl.textContent = totals.error_count || 0;
|
||||
if (latencyEl) latencyEl.textContent = (totals.latency_avg_ms || 0).toFixed(1) + 'ms';
|
||||
if (bytesInEl) bytesInEl.textContent = formatBytes(totals.bytes_in || 0);
|
||||
if (bytesOutEl) bytesOutEl.textContent = formatBytes(totals.bytes_out || 0);
|
||||
|
||||
if (methodChart && stats.by_method) {
|
||||
var methods = Object.keys(stats.by_method);
|
||||
var methodData = methods.map(function(m) { return stats.by_method[m].count; });
|
||||
var methodBg = methods.map(function(m) { return methodColors[m] || '#6c757d'; });
|
||||
methodChart.data.labels = methods;
|
||||
methodChart.data.datasets[0].data = methodData;
|
||||
methodChart.data.datasets[0].backgroundColor = methodBg;
|
||||
methodChart.update('none');
|
||||
}
|
||||
|
||||
if (statusChart && stats.by_status_class) {
|
||||
var statuses = Object.keys(stats.by_status_class).sort();
|
||||
var statusData = statuses.map(function(s) { return stats.by_status_class[s]; });
|
||||
var statusBg = statuses.map(function(s) { return statusColors[s] || '#6c757d'; });
|
||||
statusChart.data.labels = statuses;
|
||||
statusChart.data.datasets[0].data = statusData;
|
||||
statusChart.data.datasets[0].backgroundColor = statusBg;
|
||||
statusChart.update('none');
|
||||
}
|
||||
|
||||
if (endpointChart && stats.by_endpoint) {
|
||||
var endpoints = Object.keys(stats.by_endpoint);
|
||||
var endpointData = endpoints.map(function(e) { return stats.by_endpoint[e].count; });
|
||||
var endpointBg = endpoints.map(function(e) { return endpointColors[e] || '#6c757d'; });
|
||||
endpointChart.data.labels = endpoints;
|
||||
endpointChart.data.datasets[0].data = endpointData;
|
||||
endpointChart.data.datasets[0].backgroundColor = endpointBg;
|
||||
endpointChart.update('none');
|
||||
}
|
||||
|
||||
var errorBody = document.getElementById('errorCodesBody');
|
||||
if (errorBody && stats.error_codes) {
|
||||
var errorCodes = Object.entries(stats.error_codes);
|
||||
errorCodes.sort(function(a, b) { return b[1] - a[1]; });
|
||||
var totalErrors = errorCodes.reduce(function(sum, e) { return sum + e[1]; }, 0);
|
||||
errorCodes = errorCodes.slice(0, 10);
|
||||
if (errorCodes.length === 0) {
|
||||
errorBody.innerHTML = '<div class="text-muted small text-center py-4">' +
|
||||
'<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="currentColor" class="bi bi-check-circle mb-2 text-success" viewBox="0 0 16 16">' +
|
||||
'<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>' +
|
||||
'<path d="M10.97 4.97a.235.235 0 0 0-.02.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-1.071-1.05z"/>' +
|
||||
'</svg><div>No S3 API errors</div></div>';
|
||||
} else {
|
||||
errorBody.innerHTML = errorCodes.map(function(e) {
|
||||
var pct = totalErrors > 0 ? ((e[1] / totalErrors) * 100).toFixed(0) : 0;
|
||||
return '<div class="d-flex align-items-center py-1" style="font-size: 0.8rem;">' +
|
||||
'<div style="flex: 1;"><code class="text-danger">' + e[0] + '</code></div>' +
|
||||
'<div class="text-end fw-semibold" style="width: 60px;">' + e[1] + '</div>' +
|
||||
'<div style="width: 100px; padding-left: 10px;"><div class="progress" style="height: 6px;"><div class="progress-bar bg-danger" style="width: ' + pct + '%"></div></div></div>' +
|
||||
'</div>';
|
||||
}).join('');
|
||||
}
|
||||
}
|
||||
|
||||
var windowMins = Math.floor(stats.window_seconds / 60);
|
||||
var windowSecs = stats.window_seconds % 60;
|
||||
var windowStr = windowMins > 0 ? windowMins + 'm ' + windowSecs + 's' : windowSecs + 's';
|
||||
if (opStatus) opStatus.textContent = 'Window: ' + windowStr + ' | ' + new Date().toLocaleTimeString();
|
||||
})
|
||||
.catch(function(err) {
|
||||
console.error('Operation metrics fetch error:', err);
|
||||
if (opStatus) opStatus.textContent = 'Failed to load';
|
||||
});
|
||||
}
|
||||
|
||||
function startOpPolling() {
|
||||
if (opTimer) clearInterval(opTimer);
|
||||
opTimer = setInterval(updateOpMetrics, 5000);
|
||||
}
|
||||
|
||||
var resetBtn = document.getElementById('resetOpMetricsBtn');
|
||||
if (resetBtn) {
|
||||
resetBtn.addEventListener('click', function() {
|
||||
updateOpMetrics();
|
||||
});
|
||||
}
|
||||
|
||||
document.addEventListener('visibilitychange', function() {
|
||||
if (document.hidden) {
|
||||
if (opTimer) clearInterval(opTimer);
|
||||
opTimer = null;
|
||||
} else {
|
||||
updateOpMetrics();
|
||||
startOpPolling();
|
||||
}
|
||||
});
|
||||
|
||||
initOpCharts();
|
||||
updateOpMetrics();
|
||||
startOpPolling();
|
||||
})();
|
||||
{% endif %}
|
||||
|
||||
{% if metrics_history_enabled %}
|
||||
(function() {
|
||||
var cpuChart = null;
|
||||
var memoryChart = null;
|
||||
var diskChart = null;
|
||||
var historyStatus = document.getElementById('historyStatus');
|
||||
var timeRangeSelect = document.getElementById('historyTimeRange');
|
||||
var historyTimer = null;
|
||||
var MAX_DATA_POINTS = 500;
|
||||
|
||||
function createChart(ctx, label, color) {
|
||||
return new Chart(ctx, {
|
||||
type: 'line',
|
||||
data: {
|
||||
labels: [],
|
||||
datasets: [{
|
||||
label: label,
|
||||
data: [],
|
||||
borderColor: color,
|
||||
backgroundColor: color + '20',
|
||||
fill: true,
|
||||
tension: 0.3,
|
||||
pointRadius: 3,
|
||||
pointHoverRadius: 6,
|
||||
hitRadius: 10,
|
||||
}]
|
||||
},
|
||||
options: {
|
||||
responsive: true,
|
||||
maintainAspectRatio: true,
|
||||
animation: false,
|
||||
plugins: {
|
||||
legend: { display: false },
|
||||
tooltip: {
|
||||
callbacks: {
|
||||
label: function(ctx) { return ctx.parsed.y.toFixed(2) + '%'; }
|
||||
}
|
||||
}
|
||||
},
|
||||
scales: {
|
||||
x: {
|
||||
display: true,
|
||||
ticks: { maxTicksAuto: true, maxRotation: 0, font: { size: 10 }, autoSkip: true, maxTicksLimit: 10 }
|
||||
},
|
||||
y: {
|
||||
display: true,
|
||||
min: 0,
|
||||
max: 100,
|
||||
ticks: { callback: function(v) { return v + '%'; } }
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function initCharts() {
|
||||
var cpuCtx = document.getElementById('cpuHistoryChart');
|
||||
var memCtx = document.getElementById('memoryHistoryChart');
|
||||
var diskCtx = document.getElementById('diskHistoryChart');
|
||||
if (cpuCtx) cpuChart = createChart(cpuCtx, 'CPU %', '#0d6efd');
|
||||
if (memCtx) memoryChart = createChart(memCtx, 'Memory %', '#0dcaf0');
|
||||
if (diskCtx) diskChart = createChart(diskCtx, 'Disk %', '#ffc107');
|
||||
}
|
||||
|
||||
function formatTime(ts) {
|
||||
var d = new Date(ts);
|
||||
return d.toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' });
|
||||
}
|
||||
|
||||
function loadHistory() {
|
||||
if (document.hidden) return;
|
||||
var hours = timeRangeSelect ? timeRangeSelect.value : 24;
|
||||
fetch('/ui/metrics/history?hours=' + hours)
|
||||
.then(function(r) { return r.json(); })
|
||||
.then(function(data) {
|
||||
if (!data.enabled || !data.history || data.history.length === 0) {
|
||||
if (historyStatus) historyStatus.textContent = 'No history data available yet. Data is recorded every ' + (data.interval_minutes || 5) + ' minutes.';
|
||||
return;
|
||||
}
|
||||
var history = data.history.slice(-MAX_DATA_POINTS);
|
||||
var labels = history.map(function(h) { return formatTime(h.timestamp); });
|
||||
var cpuData = history.map(function(h) { return h.cpu_percent; });
|
||||
var memData = history.map(function(h) { return h.memory_percent; });
|
||||
var diskData = history.map(function(h) { return h.disk_percent; });
|
||||
|
||||
if (cpuChart) {
|
||||
cpuChart.data.labels = labels;
|
||||
cpuChart.data.datasets[0].data = cpuData;
|
||||
cpuChart.update('none');
|
||||
}
|
||||
if (memoryChart) {
|
||||
memoryChart.data.labels = labels;
|
||||
memoryChart.data.datasets[0].data = memData;
|
||||
memoryChart.update('none');
|
||||
}
|
||||
if (diskChart) {
|
||||
diskChart.data.labels = labels;
|
||||
diskChart.data.datasets[0].data = diskData;
|
||||
diskChart.update('none');
|
||||
}
|
||||
if (historyStatus) historyStatus.textContent = 'Showing ' + history.length + ' data points';
|
||||
})
|
||||
.catch(function(err) {
|
||||
console.error('History fetch error:', err);
|
||||
if (historyStatus) historyStatus.textContent = 'Failed to load history data';
|
||||
});
|
||||
}
|
||||
|
||||
function startHistoryPolling() {
|
||||
if (historyTimer) clearInterval(historyTimer);
|
||||
historyTimer = setInterval(loadHistory, 60000);
|
||||
}
|
||||
|
||||
if (timeRangeSelect) {
|
||||
timeRangeSelect.addEventListener('change', loadHistory);
|
||||
}
|
||||
|
||||
document.addEventListener('visibilitychange', function() {
|
||||
if (document.hidden) {
|
||||
if (historyTimer) clearInterval(historyTimer);
|
||||
historyTimer = null;
|
||||
} else {
|
||||
loadHistory();
|
||||
startHistoryPolling();
|
||||
}
|
||||
});
|
||||
|
||||
initCharts();
|
||||
loadHistory();
|
||||
startHistoryPolling();
|
||||
})();
|
||||
{% endif %}
|
||||
</script>
|
||||
{% endblock %}
|
||||
|
||||
@@ -35,6 +35,7 @@ def app(tmp_path: Path):
|
||||
flask_app = create_api_app(
|
||||
{
|
||||
"TESTING": True,
|
||||
"SECRET_KEY": "testing",
|
||||
"STORAGE_ROOT": storage_root,
|
||||
"IAM_CONFIG": iam_config,
|
||||
"BUCKET_POLICY_PATH": bucket_policies,
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
from urllib.parse import urlsplit
|
||||
|
||||
|
||||
def test_bucket_and_object_lifecycle(client, signer):
|
||||
headers = signer("PUT", "/photos")
|
||||
response = client.put("/photos", headers=headers)
|
||||
@@ -104,12 +101,12 @@ def test_request_id_header_present(client, signer):
|
||||
assert response.headers.get("X-Request-ID")
|
||||
|
||||
|
||||
def test_healthcheck_returns_version(client):
|
||||
response = client.get("/healthz")
|
||||
def test_healthcheck_returns_status(client):
|
||||
response = client.get("/myfsio/health")
|
||||
data = response.get_json()
|
||||
assert response.status_code == 200
|
||||
assert data["status"] == "ok"
|
||||
assert "version" in data
|
||||
assert "version" not in data
|
||||
|
||||
|
||||
def test_missing_credentials_denied(client):
|
||||
@@ -117,36 +114,20 @@ def test_missing_credentials_denied(client):
|
||||
assert response.status_code == 403
|
||||
|
||||
|
||||
def test_presign_and_bucket_policies(client, signer):
|
||||
# Create bucket and object
|
||||
def test_bucket_policies_deny_reads(client, signer):
|
||||
import json
|
||||
|
||||
headers = signer("PUT", "/docs")
|
||||
assert client.put("/docs", headers=headers).status_code == 200
|
||||
|
||||
|
||||
headers = signer("PUT", "/docs/readme.txt", body=b"content")
|
||||
assert client.put("/docs/readme.txt", headers=headers, data=b"content").status_code == 200
|
||||
|
||||
# Generate presigned GET URL and follow it
|
||||
json_body = {"method": "GET", "expires_in": 120}
|
||||
# Flask test client json parameter automatically sets Content-Type and serializes body
|
||||
# But for signing we need the body bytes.
|
||||
import json
|
||||
body_bytes = json.dumps(json_body).encode("utf-8")
|
||||
headers = signer("POST", "/presign/docs/readme.txt", headers={"Content-Type": "application/json"}, body=body_bytes)
|
||||
|
||||
response = client.post(
|
||||
"/presign/docs/readme.txt",
|
||||
headers=headers,
|
||||
json=json_body,
|
||||
)
|
||||
headers = signer("GET", "/docs/readme.txt")
|
||||
response = client.get("/docs/readme.txt", headers=headers)
|
||||
assert response.status_code == 200
|
||||
presigned_url = response.get_json()["url"]
|
||||
parts = urlsplit(presigned_url)
|
||||
presigned_path = f"{parts.path}?{parts.query}"
|
||||
download = client.get(presigned_path)
|
||||
assert download.status_code == 200
|
||||
assert download.data == b"content"
|
||||
assert response.data == b"content"
|
||||
|
||||
# Attach a deny policy for GETs
|
||||
policy = {
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
@@ -160,29 +141,26 @@ def test_presign_and_bucket_policies(client, signer):
|
||||
],
|
||||
}
|
||||
policy_bytes = json.dumps(policy).encode("utf-8")
|
||||
headers = signer("PUT", "/bucket-policy/docs", headers={"Content-Type": "application/json"}, body=policy_bytes)
|
||||
assert client.put("/bucket-policy/docs", headers=headers, json=policy).status_code == 204
|
||||
|
||||
headers = signer("GET", "/bucket-policy/docs")
|
||||
fetched = client.get("/bucket-policy/docs", headers=headers)
|
||||
headers = signer("PUT", "/docs?policy", headers={"Content-Type": "application/json"}, body=policy_bytes)
|
||||
assert client.put("/docs?policy", headers=headers, json=policy).status_code == 204
|
||||
|
||||
headers = signer("GET", "/docs?policy")
|
||||
fetched = client.get("/docs?policy", headers=headers)
|
||||
assert fetched.status_code == 200
|
||||
assert fetched.get_json()["Version"] == "2012-10-17"
|
||||
|
||||
# Reads are now denied by bucket policy
|
||||
headers = signer("GET", "/docs/readme.txt")
|
||||
denied = client.get("/docs/readme.txt", headers=headers)
|
||||
assert denied.status_code == 403
|
||||
|
||||
# Presign attempts are also denied
|
||||
json_body = {"method": "GET", "expires_in": 60}
|
||||
body_bytes = json.dumps(json_body).encode("utf-8")
|
||||
headers = signer("POST", "/presign/docs/readme.txt", headers={"Content-Type": "application/json"}, body=body_bytes)
|
||||
response = client.post(
|
||||
"/presign/docs/readme.txt",
|
||||
headers=headers,
|
||||
json=json_body,
|
||||
)
|
||||
assert response.status_code == 403
|
||||
headers = signer("DELETE", "/docs?policy")
|
||||
assert client.delete("/docs?policy", headers=headers).status_code == 204
|
||||
|
||||
headers = signer("DELETE", "/docs/readme.txt")
|
||||
assert client.delete("/docs/readme.txt", headers=headers).status_code == 204
|
||||
|
||||
headers = signer("DELETE", "/docs")
|
||||
assert client.delete("/docs", headers=headers).status_code == 204
|
||||
|
||||
|
||||
def test_trailing_slash_returns_xml(client):
|
||||
@@ -193,9 +171,11 @@ def test_trailing_slash_returns_xml(client):
|
||||
|
||||
|
||||
def test_public_policy_allows_anonymous_list_and_read(client, signer):
|
||||
import json
|
||||
|
||||
headers = signer("PUT", "/public")
|
||||
assert client.put("/public", headers=headers).status_code == 200
|
||||
|
||||
|
||||
headers = signer("PUT", "/public/hello.txt", body=b"hi")
|
||||
assert client.put("/public/hello.txt", headers=headers, data=b"hi").status_code == 200
|
||||
|
||||
@@ -221,10 +201,9 @@ def test_public_policy_allows_anonymous_list_and_read(client, signer):
|
||||
},
|
||||
],
|
||||
}
|
||||
import json
|
||||
policy_bytes = json.dumps(policy).encode("utf-8")
|
||||
headers = signer("PUT", "/bucket-policy/public", headers={"Content-Type": "application/json"}, body=policy_bytes)
|
||||
assert client.put("/bucket-policy/public", headers=headers, json=policy).status_code == 204
|
||||
headers = signer("PUT", "/public?policy", headers={"Content-Type": "application/json"}, body=policy_bytes)
|
||||
assert client.put("/public?policy", headers=headers, json=policy).status_code == 204
|
||||
|
||||
list_response = client.get("/public")
|
||||
assert list_response.status_code == 200
|
||||
@@ -236,18 +215,20 @@ def test_public_policy_allows_anonymous_list_and_read(client, signer):
|
||||
|
||||
headers = signer("DELETE", "/public/hello.txt")
|
||||
assert client.delete("/public/hello.txt", headers=headers).status_code == 204
|
||||
|
||||
headers = signer("DELETE", "/bucket-policy/public")
|
||||
assert client.delete("/bucket-policy/public", headers=headers).status_code == 204
|
||||
|
||||
|
||||
headers = signer("DELETE", "/public?policy")
|
||||
assert client.delete("/public?policy", headers=headers).status_code == 204
|
||||
|
||||
headers = signer("DELETE", "/public")
|
||||
assert client.delete("/public", headers=headers).status_code == 204
|
||||
|
||||
|
||||
def test_principal_dict_with_object_get_only(client, signer):
|
||||
import json
|
||||
|
||||
headers = signer("PUT", "/mixed")
|
||||
assert client.put("/mixed", headers=headers).status_code == 200
|
||||
|
||||
|
||||
headers = signer("PUT", "/mixed/only.txt", body=b"ok")
|
||||
assert client.put("/mixed/only.txt", headers=headers, data=b"ok").status_code == 200
|
||||
|
||||
@@ -270,10 +251,9 @@ def test_principal_dict_with_object_get_only(client, signer):
|
||||
},
|
||||
],
|
||||
}
|
||||
import json
|
||||
policy_bytes = json.dumps(policy).encode("utf-8")
|
||||
headers = signer("PUT", "/bucket-policy/mixed", headers={"Content-Type": "application/json"}, body=policy_bytes)
|
||||
assert client.put("/bucket-policy/mixed", headers=headers, json=policy).status_code == 204
|
||||
headers = signer("PUT", "/mixed?policy", headers={"Content-Type": "application/json"}, body=policy_bytes)
|
||||
assert client.put("/mixed?policy", headers=headers, json=policy).status_code == 204
|
||||
|
||||
assert client.get("/mixed").status_code == 403
|
||||
allowed = client.get("/mixed/only.txt")
|
||||
@@ -282,18 +262,20 @@ def test_principal_dict_with_object_get_only(client, signer):
|
||||
|
||||
headers = signer("DELETE", "/mixed/only.txt")
|
||||
assert client.delete("/mixed/only.txt", headers=headers).status_code == 204
|
||||
|
||||
headers = signer("DELETE", "/bucket-policy/mixed")
|
||||
assert client.delete("/bucket-policy/mixed", headers=headers).status_code == 204
|
||||
|
||||
|
||||
headers = signer("DELETE", "/mixed?policy")
|
||||
assert client.delete("/mixed?policy", headers=headers).status_code == 204
|
||||
|
||||
headers = signer("DELETE", "/mixed")
|
||||
assert client.delete("/mixed", headers=headers).status_code == 204
|
||||
|
||||
|
||||
def test_bucket_policy_wildcard_resource_allows_object_get(client, signer):
|
||||
import json
|
||||
|
||||
headers = signer("PUT", "/test")
|
||||
assert client.put("/test", headers=headers).status_code == 200
|
||||
|
||||
|
||||
headers = signer("PUT", "/test/vid.mp4", body=b"video")
|
||||
assert client.put("/test/vid.mp4", headers=headers, data=b"video").status_code == 200
|
||||
|
||||
@@ -314,10 +296,9 @@ def test_bucket_policy_wildcard_resource_allows_object_get(client, signer):
|
||||
},
|
||||
],
|
||||
}
|
||||
import json
|
||||
policy_bytes = json.dumps(policy).encode("utf-8")
|
||||
headers = signer("PUT", "/bucket-policy/test", headers={"Content-Type": "application/json"}, body=policy_bytes)
|
||||
assert client.put("/bucket-policy/test", headers=headers, json=policy).status_code == 204
|
||||
headers = signer("PUT", "/test?policy", headers={"Content-Type": "application/json"}, body=policy_bytes)
|
||||
assert client.put("/test?policy", headers=headers, json=policy).status_code == 204
|
||||
|
||||
listing = client.get("/test")
|
||||
assert listing.status_code == 403
|
||||
@@ -327,10 +308,10 @@ def test_bucket_policy_wildcard_resource_allows_object_get(client, signer):
|
||||
|
||||
headers = signer("DELETE", "/test/vid.mp4")
|
||||
assert client.delete("/test/vid.mp4", headers=headers).status_code == 204
|
||||
|
||||
headers = signer("DELETE", "/bucket-policy/test")
|
||||
assert client.delete("/bucket-policy/test", headers=headers).status_code == 204
|
||||
|
||||
|
||||
headers = signer("DELETE", "/test?policy")
|
||||
assert client.delete("/test?policy", headers=headers).status_code == 204
|
||||
|
||||
headers = signer("DELETE", "/test")
|
||||
assert client.delete("/test", headers=headers).status_code == 204
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ def kms_client(tmp_path):
|
||||
|
||||
app = create_app({
|
||||
"TESTING": True,
|
||||
"SECRET_KEY": "testing",
|
||||
"STORAGE_ROOT": str(tmp_path / "storage"),
|
||||
"IAM_CONFIG": str(tmp_path / "iam.json"),
|
||||
"BUCKET_POLICY_PATH": str(tmp_path / "policies.json"),
|
||||
|
||||
297
tests/test_operation_metrics.py
Normal file
297
tests/test_operation_metrics.py
Normal file
@@ -0,0 +1,297 @@
|
||||
import threading
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from app.operation_metrics import (
|
||||
OperationMetricsCollector,
|
||||
OperationStats,
|
||||
classify_endpoint,
|
||||
)
|
||||
|
||||
|
||||
class TestOperationStats:
|
||||
def test_initial_state(self):
|
||||
stats = OperationStats()
|
||||
assert stats.count == 0
|
||||
assert stats.success_count == 0
|
||||
assert stats.error_count == 0
|
||||
assert stats.latency_sum_ms == 0.0
|
||||
assert stats.bytes_in == 0
|
||||
assert stats.bytes_out == 0
|
||||
|
||||
def test_record_success(self):
|
||||
stats = OperationStats()
|
||||
stats.record(latency_ms=50.0, success=True, bytes_in=100, bytes_out=200)
|
||||
|
||||
assert stats.count == 1
|
||||
assert stats.success_count == 1
|
||||
assert stats.error_count == 0
|
||||
assert stats.latency_sum_ms == 50.0
|
||||
assert stats.latency_min_ms == 50.0
|
||||
assert stats.latency_max_ms == 50.0
|
||||
assert stats.bytes_in == 100
|
||||
assert stats.bytes_out == 200
|
||||
|
||||
def test_record_error(self):
|
||||
stats = OperationStats()
|
||||
stats.record(latency_ms=100.0, success=False, bytes_in=50, bytes_out=0)
|
||||
|
||||
assert stats.count == 1
|
||||
assert stats.success_count == 0
|
||||
assert stats.error_count == 1
|
||||
|
||||
def test_latency_min_max(self):
|
||||
stats = OperationStats()
|
||||
stats.record(latency_ms=50.0, success=True)
|
||||
stats.record(latency_ms=10.0, success=True)
|
||||
stats.record(latency_ms=100.0, success=True)
|
||||
|
||||
assert stats.latency_min_ms == 10.0
|
||||
assert stats.latency_max_ms == 100.0
|
||||
assert stats.latency_sum_ms == 160.0
|
||||
|
||||
def test_to_dict(self):
|
||||
stats = OperationStats()
|
||||
stats.record(latency_ms=50.0, success=True, bytes_in=100, bytes_out=200)
|
||||
stats.record(latency_ms=100.0, success=False, bytes_in=50, bytes_out=0)
|
||||
|
||||
result = stats.to_dict()
|
||||
assert result["count"] == 2
|
||||
assert result["success_count"] == 1
|
||||
assert result["error_count"] == 1
|
||||
assert result["latency_avg_ms"] == 75.0
|
||||
assert result["latency_min_ms"] == 50.0
|
||||
assert result["latency_max_ms"] == 100.0
|
||||
assert result["bytes_in"] == 150
|
||||
assert result["bytes_out"] == 200
|
||||
|
||||
def test_to_dict_empty(self):
|
||||
stats = OperationStats()
|
||||
result = stats.to_dict()
|
||||
assert result["count"] == 0
|
||||
assert result["latency_avg_ms"] == 0.0
|
||||
assert result["latency_min_ms"] == 0.0
|
||||
|
||||
def test_merge(self):
|
||||
stats1 = OperationStats()
|
||||
stats1.record(latency_ms=50.0, success=True, bytes_in=100, bytes_out=200)
|
||||
|
||||
stats2 = OperationStats()
|
||||
stats2.record(latency_ms=10.0, success=True, bytes_in=50, bytes_out=100)
|
||||
stats2.record(latency_ms=100.0, success=False, bytes_in=25, bytes_out=50)
|
||||
|
||||
stats1.merge(stats2)
|
||||
|
||||
assert stats1.count == 3
|
||||
assert stats1.success_count == 2
|
||||
assert stats1.error_count == 1
|
||||
assert stats1.latency_min_ms == 10.0
|
||||
assert stats1.latency_max_ms == 100.0
|
||||
assert stats1.bytes_in == 175
|
||||
assert stats1.bytes_out == 350
|
||||
|
||||
|
||||
class TestClassifyEndpoint:
|
||||
def test_root_path(self):
|
||||
assert classify_endpoint("/") == "service"
|
||||
assert classify_endpoint("") == "service"
|
||||
|
||||
def test_ui_paths(self):
|
||||
assert classify_endpoint("/ui") == "ui"
|
||||
assert classify_endpoint("/ui/buckets") == "ui"
|
||||
assert classify_endpoint("/ui/metrics") == "ui"
|
||||
|
||||
def test_kms_paths(self):
|
||||
assert classify_endpoint("/kms") == "kms"
|
||||
assert classify_endpoint("/kms/keys") == "kms"
|
||||
|
||||
def test_service_paths(self):
|
||||
assert classify_endpoint("/myfsio/health") == "service"
|
||||
|
||||
def test_bucket_paths(self):
|
||||
assert classify_endpoint("/mybucket") == "bucket"
|
||||
assert classify_endpoint("/mybucket/") == "bucket"
|
||||
|
||||
def test_object_paths(self):
|
||||
assert classify_endpoint("/mybucket/mykey") == "object"
|
||||
assert classify_endpoint("/mybucket/folder/nested/key.txt") == "object"
|
||||
|
||||
|
||||
class TestOperationMetricsCollector:
|
||||
def test_record_and_get_stats(self, tmp_path: Path):
|
||||
collector = OperationMetricsCollector(
|
||||
storage_root=tmp_path,
|
||||
interval_minutes=60,
|
||||
retention_hours=24,
|
||||
)
|
||||
|
||||
try:
|
||||
collector.record_request(
|
||||
method="GET",
|
||||
endpoint_type="bucket",
|
||||
status_code=200,
|
||||
latency_ms=50.0,
|
||||
bytes_in=0,
|
||||
bytes_out=1000,
|
||||
)
|
||||
|
||||
collector.record_request(
|
||||
method="PUT",
|
||||
endpoint_type="object",
|
||||
status_code=201,
|
||||
latency_ms=100.0,
|
||||
bytes_in=500,
|
||||
bytes_out=0,
|
||||
)
|
||||
|
||||
collector.record_request(
|
||||
method="GET",
|
||||
endpoint_type="object",
|
||||
status_code=404,
|
||||
latency_ms=25.0,
|
||||
bytes_in=0,
|
||||
bytes_out=0,
|
||||
error_code="NoSuchKey",
|
||||
)
|
||||
|
||||
stats = collector.get_current_stats()
|
||||
|
||||
assert stats["totals"]["count"] == 3
|
||||
assert stats["totals"]["success_count"] == 2
|
||||
assert stats["totals"]["error_count"] == 1
|
||||
|
||||
assert "GET" in stats["by_method"]
|
||||
assert stats["by_method"]["GET"]["count"] == 2
|
||||
assert "PUT" in stats["by_method"]
|
||||
assert stats["by_method"]["PUT"]["count"] == 1
|
||||
|
||||
assert "bucket" in stats["by_endpoint"]
|
||||
assert "object" in stats["by_endpoint"]
|
||||
assert stats["by_endpoint"]["object"]["count"] == 2
|
||||
|
||||
assert stats["by_status_class"]["2xx"] == 2
|
||||
assert stats["by_status_class"]["4xx"] == 1
|
||||
|
||||
assert stats["error_codes"]["NoSuchKey"] == 1
|
||||
finally:
|
||||
collector.shutdown()
|
||||
|
||||
def test_thread_safety(self, tmp_path: Path):
|
||||
collector = OperationMetricsCollector(
|
||||
storage_root=tmp_path,
|
||||
interval_minutes=60,
|
||||
retention_hours=24,
|
||||
)
|
||||
|
||||
try:
|
||||
num_threads = 5
|
||||
requests_per_thread = 100
|
||||
threads = []
|
||||
|
||||
def record_requests():
|
||||
for _ in range(requests_per_thread):
|
||||
collector.record_request(
|
||||
method="GET",
|
||||
endpoint_type="object",
|
||||
status_code=200,
|
||||
latency_ms=10.0,
|
||||
)
|
||||
|
||||
for _ in range(num_threads):
|
||||
t = threading.Thread(target=record_requests)
|
||||
threads.append(t)
|
||||
t.start()
|
||||
|
||||
for t in threads:
|
||||
t.join()
|
||||
|
||||
stats = collector.get_current_stats()
|
||||
assert stats["totals"]["count"] == num_threads * requests_per_thread
|
||||
finally:
|
||||
collector.shutdown()
|
||||
|
||||
def test_status_class_categorization(self, tmp_path: Path):
|
||||
collector = OperationMetricsCollector(
|
||||
storage_root=tmp_path,
|
||||
interval_minutes=60,
|
||||
retention_hours=24,
|
||||
)
|
||||
|
||||
try:
|
||||
collector.record_request("GET", "object", 200, 10.0)
|
||||
collector.record_request("GET", "object", 204, 10.0)
|
||||
collector.record_request("GET", "object", 301, 10.0)
|
||||
collector.record_request("GET", "object", 304, 10.0)
|
||||
collector.record_request("GET", "object", 400, 10.0)
|
||||
collector.record_request("GET", "object", 403, 10.0)
|
||||
collector.record_request("GET", "object", 404, 10.0)
|
||||
collector.record_request("GET", "object", 500, 10.0)
|
||||
collector.record_request("GET", "object", 503, 10.0)
|
||||
|
||||
stats = collector.get_current_stats()
|
||||
assert stats["by_status_class"]["2xx"] == 2
|
||||
assert stats["by_status_class"]["3xx"] == 2
|
||||
assert stats["by_status_class"]["4xx"] == 3
|
||||
assert stats["by_status_class"]["5xx"] == 2
|
||||
finally:
|
||||
collector.shutdown()
|
||||
|
||||
def test_error_code_tracking(self, tmp_path: Path):
|
||||
collector = OperationMetricsCollector(
|
||||
storage_root=tmp_path,
|
||||
interval_minutes=60,
|
||||
retention_hours=24,
|
||||
)
|
||||
|
||||
try:
|
||||
collector.record_request("GET", "object", 404, 10.0, error_code="NoSuchKey")
|
||||
collector.record_request("GET", "object", 404, 10.0, error_code="NoSuchKey")
|
||||
collector.record_request("GET", "bucket", 403, 10.0, error_code="AccessDenied")
|
||||
collector.record_request("PUT", "object", 500, 10.0, error_code="InternalError")
|
||||
|
||||
stats = collector.get_current_stats()
|
||||
assert stats["error_codes"]["NoSuchKey"] == 2
|
||||
assert stats["error_codes"]["AccessDenied"] == 1
|
||||
assert stats["error_codes"]["InternalError"] == 1
|
||||
finally:
|
||||
collector.shutdown()
|
||||
|
||||
def test_history_persistence(self, tmp_path: Path):
|
||||
collector = OperationMetricsCollector(
|
||||
storage_root=tmp_path,
|
||||
interval_minutes=60,
|
||||
retention_hours=24,
|
||||
)
|
||||
|
||||
try:
|
||||
collector.record_request("GET", "object", 200, 10.0)
|
||||
collector._take_snapshot()
|
||||
|
||||
history = collector.get_history()
|
||||
assert len(history) == 1
|
||||
assert history[0]["totals"]["count"] == 1
|
||||
|
||||
config_path = tmp_path / ".myfsio.sys" / "config" / "operation_metrics.json"
|
||||
assert config_path.exists()
|
||||
finally:
|
||||
collector.shutdown()
|
||||
|
||||
def test_get_history_with_hours_filter(self, tmp_path: Path):
|
||||
collector = OperationMetricsCollector(
|
||||
storage_root=tmp_path,
|
||||
interval_minutes=60,
|
||||
retention_hours=24,
|
||||
)
|
||||
|
||||
try:
|
||||
collector.record_request("GET", "object", 200, 10.0)
|
||||
collector._take_snapshot()
|
||||
|
||||
history_all = collector.get_history()
|
||||
history_recent = collector.get_history(hours=1)
|
||||
|
||||
assert len(history_all) >= len(history_recent)
|
||||
finally:
|
||||
collector.shutdown()
|
||||
461
tests/test_site_sync.py
Normal file
461
tests/test_site_sync.py
Normal file
@@ -0,0 +1,461 @@
|
||||
import io
|
||||
import json
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from app.connections import ConnectionStore, RemoteConnection
|
||||
from app.replication import (
|
||||
ReplicationManager,
|
||||
ReplicationRule,
|
||||
REPLICATION_MODE_BIDIRECTIONAL,
|
||||
REPLICATION_MODE_NEW_ONLY,
|
||||
)
|
||||
from app.site_sync import (
|
||||
SiteSyncWorker,
|
||||
SyncState,
|
||||
SyncedObjectInfo,
|
||||
SiteSyncStats,
|
||||
RemoteObjectMeta,
|
||||
CLOCK_SKEW_TOLERANCE_SECONDS,
|
||||
)
|
||||
from app.storage import ObjectStorage
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def storage(tmp_path: Path):
|
||||
storage_root = tmp_path / "data"
|
||||
storage_root.mkdir(parents=True)
|
||||
return ObjectStorage(storage_root)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def connections(tmp_path: Path):
|
||||
connections_path = tmp_path / "connections.json"
|
||||
store = ConnectionStore(connections_path)
|
||||
conn = RemoteConnection(
|
||||
id="test-conn",
|
||||
name="Test Remote",
|
||||
endpoint_url="http://localhost:9000",
|
||||
access_key="remote-access",
|
||||
secret_key="remote-secret",
|
||||
region="us-east-1",
|
||||
)
|
||||
store.add(conn)
|
||||
return store
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def replication_manager(storage, connections, tmp_path):
|
||||
rules_path = tmp_path / "replication_rules.json"
|
||||
storage_root = tmp_path / "data"
|
||||
storage_root.mkdir(exist_ok=True)
|
||||
manager = ReplicationManager(storage, connections, rules_path, storage_root)
|
||||
yield manager
|
||||
manager.shutdown(wait=False)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def site_sync_worker(storage, connections, replication_manager, tmp_path):
|
||||
storage_root = tmp_path / "data"
|
||||
worker = SiteSyncWorker(
|
||||
storage=storage,
|
||||
connections=connections,
|
||||
replication_manager=replication_manager,
|
||||
storage_root=storage_root,
|
||||
interval_seconds=60,
|
||||
batch_size=100,
|
||||
)
|
||||
yield worker
|
||||
worker.shutdown()
|
||||
|
||||
|
||||
class TestSyncedObjectInfo:
|
||||
def test_to_dict(self):
|
||||
info = SyncedObjectInfo(
|
||||
last_synced_at=1234567890.0,
|
||||
remote_etag="abc123",
|
||||
source="remote",
|
||||
)
|
||||
result = info.to_dict()
|
||||
assert result["last_synced_at"] == 1234567890.0
|
||||
assert result["remote_etag"] == "abc123"
|
||||
assert result["source"] == "remote"
|
||||
|
||||
def test_from_dict(self):
|
||||
data = {
|
||||
"last_synced_at": 9876543210.0,
|
||||
"remote_etag": "def456",
|
||||
"source": "local",
|
||||
}
|
||||
info = SyncedObjectInfo.from_dict(data)
|
||||
assert info.last_synced_at == 9876543210.0
|
||||
assert info.remote_etag == "def456"
|
||||
assert info.source == "local"
|
||||
|
||||
|
||||
class TestSyncState:
|
||||
def test_to_dict(self):
|
||||
state = SyncState(
|
||||
synced_objects={
|
||||
"test.txt": SyncedObjectInfo(
|
||||
last_synced_at=1000.0,
|
||||
remote_etag="etag1",
|
||||
source="remote",
|
||||
)
|
||||
},
|
||||
last_full_sync=2000.0,
|
||||
)
|
||||
result = state.to_dict()
|
||||
assert "test.txt" in result["synced_objects"]
|
||||
assert result["synced_objects"]["test.txt"]["remote_etag"] == "etag1"
|
||||
assert result["last_full_sync"] == 2000.0
|
||||
|
||||
def test_from_dict(self):
|
||||
data = {
|
||||
"synced_objects": {
|
||||
"file.txt": {
|
||||
"last_synced_at": 3000.0,
|
||||
"remote_etag": "etag2",
|
||||
"source": "remote",
|
||||
}
|
||||
},
|
||||
"last_full_sync": 4000.0,
|
||||
}
|
||||
state = SyncState.from_dict(data)
|
||||
assert "file.txt" in state.synced_objects
|
||||
assert state.synced_objects["file.txt"].remote_etag == "etag2"
|
||||
assert state.last_full_sync == 4000.0
|
||||
|
||||
def test_from_dict_empty(self):
|
||||
state = SyncState.from_dict({})
|
||||
assert state.synced_objects == {}
|
||||
assert state.last_full_sync is None
|
||||
|
||||
|
||||
class TestSiteSyncStats:
|
||||
def test_to_dict(self):
|
||||
stats = SiteSyncStats(
|
||||
last_sync_at=1234567890.0,
|
||||
objects_pulled=10,
|
||||
objects_skipped=5,
|
||||
conflicts_resolved=2,
|
||||
deletions_applied=1,
|
||||
errors=0,
|
||||
)
|
||||
result = stats.to_dict()
|
||||
assert result["objects_pulled"] == 10
|
||||
assert result["objects_skipped"] == 5
|
||||
assert result["conflicts_resolved"] == 2
|
||||
assert result["deletions_applied"] == 1
|
||||
assert result["errors"] == 0
|
||||
|
||||
|
||||
class TestRemoteObjectMeta:
|
||||
def test_from_s3_object(self):
|
||||
obj = {
|
||||
"Key": "test/file.txt",
|
||||
"Size": 1024,
|
||||
"LastModified": datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc),
|
||||
"ETag": '"abc123def456"',
|
||||
}
|
||||
meta = RemoteObjectMeta.from_s3_object(obj)
|
||||
assert meta.key == "test/file.txt"
|
||||
assert meta.size == 1024
|
||||
assert meta.last_modified == datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc)
|
||||
assert meta.etag == "abc123def456"
|
||||
|
||||
|
||||
class TestReplicationRuleBidirectional:
|
||||
def test_rule_with_bidirectional_mode(self):
|
||||
rule = ReplicationRule(
|
||||
bucket_name="sync-bucket",
|
||||
target_connection_id="test-conn",
|
||||
target_bucket="remote-bucket",
|
||||
enabled=True,
|
||||
mode=REPLICATION_MODE_BIDIRECTIONAL,
|
||||
sync_deletions=True,
|
||||
)
|
||||
assert rule.mode == REPLICATION_MODE_BIDIRECTIONAL
|
||||
assert rule.sync_deletions is True
|
||||
assert rule.last_pull_at is None
|
||||
|
||||
def test_rule_to_dict_includes_new_fields(self):
|
||||
rule = ReplicationRule(
|
||||
bucket_name="sync-bucket",
|
||||
target_connection_id="test-conn",
|
||||
target_bucket="remote-bucket",
|
||||
mode=REPLICATION_MODE_BIDIRECTIONAL,
|
||||
sync_deletions=False,
|
||||
last_pull_at=1234567890.0,
|
||||
)
|
||||
result = rule.to_dict()
|
||||
assert result["mode"] == REPLICATION_MODE_BIDIRECTIONAL
|
||||
assert result["sync_deletions"] is False
|
||||
assert result["last_pull_at"] == 1234567890.0
|
||||
|
||||
def test_rule_from_dict_with_new_fields(self):
|
||||
data = {
|
||||
"bucket_name": "sync-bucket",
|
||||
"target_connection_id": "test-conn",
|
||||
"target_bucket": "remote-bucket",
|
||||
"mode": REPLICATION_MODE_BIDIRECTIONAL,
|
||||
"sync_deletions": False,
|
||||
"last_pull_at": 1234567890.0,
|
||||
}
|
||||
rule = ReplicationRule.from_dict(data)
|
||||
assert rule.mode == REPLICATION_MODE_BIDIRECTIONAL
|
||||
assert rule.sync_deletions is False
|
||||
assert rule.last_pull_at == 1234567890.0
|
||||
|
||||
def test_rule_from_dict_defaults_new_fields(self):
|
||||
data = {
|
||||
"bucket_name": "sync-bucket",
|
||||
"target_connection_id": "test-conn",
|
||||
"target_bucket": "remote-bucket",
|
||||
}
|
||||
rule = ReplicationRule.from_dict(data)
|
||||
assert rule.sync_deletions is True
|
||||
assert rule.last_pull_at is None
|
||||
|
||||
|
||||
class TestSiteSyncWorker:
|
||||
def test_start_and_shutdown(self, site_sync_worker):
|
||||
site_sync_worker.start()
|
||||
assert site_sync_worker._sync_thread is not None
|
||||
assert site_sync_worker._sync_thread.is_alive()
|
||||
site_sync_worker.shutdown()
|
||||
assert not site_sync_worker._sync_thread.is_alive()
|
||||
|
||||
def test_trigger_sync_no_rule(self, site_sync_worker):
|
||||
result = site_sync_worker.trigger_sync("nonexistent-bucket")
|
||||
assert result is None
|
||||
|
||||
def test_trigger_sync_wrong_mode(self, site_sync_worker, replication_manager):
|
||||
rule = ReplicationRule(
|
||||
bucket_name="new-only-bucket",
|
||||
target_connection_id="test-conn",
|
||||
target_bucket="remote-bucket",
|
||||
mode=REPLICATION_MODE_NEW_ONLY,
|
||||
enabled=True,
|
||||
)
|
||||
replication_manager.set_rule(rule)
|
||||
result = site_sync_worker.trigger_sync("new-only-bucket")
|
||||
assert result is None
|
||||
|
||||
def test_trigger_sync_disabled_rule(self, site_sync_worker, replication_manager):
|
||||
rule = ReplicationRule(
|
||||
bucket_name="disabled-bucket",
|
||||
target_connection_id="test-conn",
|
||||
target_bucket="remote-bucket",
|
||||
mode=REPLICATION_MODE_BIDIRECTIONAL,
|
||||
enabled=False,
|
||||
)
|
||||
replication_manager.set_rule(rule)
|
||||
result = site_sync_worker.trigger_sync("disabled-bucket")
|
||||
assert result is None
|
||||
|
||||
def test_get_stats_no_sync(self, site_sync_worker):
|
||||
stats = site_sync_worker.get_stats("nonexistent")
|
||||
assert stats is None
|
||||
|
||||
def test_resolve_conflict_remote_newer(self, site_sync_worker):
|
||||
local_meta = MagicMock()
|
||||
local_meta.last_modified = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc)
|
||||
local_meta.etag = "local123"
|
||||
|
||||
remote_meta = RemoteObjectMeta(
|
||||
key="test.txt",
|
||||
size=100,
|
||||
last_modified=datetime(2025, 1, 2, 12, 0, 0, tzinfo=timezone.utc),
|
||||
etag="remote456",
|
||||
)
|
||||
|
||||
result = site_sync_worker._resolve_conflict(local_meta, remote_meta)
|
||||
assert result == "pull"
|
||||
|
||||
def test_resolve_conflict_local_newer(self, site_sync_worker):
|
||||
local_meta = MagicMock()
|
||||
local_meta.last_modified = datetime(2025, 1, 2, 12, 0, 0, tzinfo=timezone.utc)
|
||||
local_meta.etag = "local123"
|
||||
|
||||
remote_meta = RemoteObjectMeta(
|
||||
key="test.txt",
|
||||
size=100,
|
||||
last_modified=datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc),
|
||||
etag="remote456",
|
||||
)
|
||||
|
||||
result = site_sync_worker._resolve_conflict(local_meta, remote_meta)
|
||||
assert result == "keep"
|
||||
|
||||
def test_resolve_conflict_same_time_same_etag(self, site_sync_worker):
|
||||
ts = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc)
|
||||
local_meta = MagicMock()
|
||||
local_meta.last_modified = ts
|
||||
local_meta.etag = "same123"
|
||||
|
||||
remote_meta = RemoteObjectMeta(
|
||||
key="test.txt",
|
||||
size=100,
|
||||
last_modified=ts,
|
||||
etag="same123",
|
||||
)
|
||||
|
||||
result = site_sync_worker._resolve_conflict(local_meta, remote_meta)
|
||||
assert result == "skip"
|
||||
|
||||
def test_resolve_conflict_same_time_different_etag(self, site_sync_worker):
|
||||
ts = datetime(2025, 1, 1, 12, 0, 0, tzinfo=timezone.utc)
|
||||
local_meta = MagicMock()
|
||||
local_meta.last_modified = ts
|
||||
local_meta.etag = "aaa"
|
||||
|
||||
remote_meta = RemoteObjectMeta(
|
||||
key="test.txt",
|
||||
size=100,
|
||||
last_modified=ts,
|
||||
etag="zzz",
|
||||
)
|
||||
|
||||
result = site_sync_worker._resolve_conflict(local_meta, remote_meta)
|
||||
assert result == "pull"
|
||||
|
||||
def test_sync_state_persistence(self, site_sync_worker, tmp_path):
|
||||
bucket_name = "test-bucket"
|
||||
state = SyncState(
|
||||
synced_objects={
|
||||
"file1.txt": SyncedObjectInfo(
|
||||
last_synced_at=time.time(),
|
||||
remote_etag="etag1",
|
||||
source="remote",
|
||||
)
|
||||
},
|
||||
last_full_sync=time.time(),
|
||||
)
|
||||
|
||||
site_sync_worker._save_sync_state(bucket_name, state)
|
||||
|
||||
loaded = site_sync_worker._load_sync_state(bucket_name)
|
||||
assert "file1.txt" in loaded.synced_objects
|
||||
assert loaded.synced_objects["file1.txt"].remote_etag == "etag1"
|
||||
|
||||
def test_load_sync_state_nonexistent(self, site_sync_worker):
|
||||
state = site_sync_worker._load_sync_state("nonexistent-bucket")
|
||||
assert state.synced_objects == {}
|
||||
assert state.last_full_sync is None
|
||||
|
||||
@patch("app.site_sync._create_sync_client")
|
||||
def test_list_remote_objects(self, mock_create_client, site_sync_worker, connections, replication_manager):
|
||||
mock_client = MagicMock()
|
||||
mock_paginator = MagicMock()
|
||||
mock_paginator.paginate.return_value = [
|
||||
{
|
||||
"Contents": [
|
||||
{
|
||||
"Key": "file1.txt",
|
||||
"Size": 100,
|
||||
"LastModified": datetime(2025, 1, 1, tzinfo=timezone.utc),
|
||||
"ETag": '"etag1"',
|
||||
},
|
||||
{
|
||||
"Key": "file2.txt",
|
||||
"Size": 200,
|
||||
"LastModified": datetime(2025, 1, 2, tzinfo=timezone.utc),
|
||||
"ETag": '"etag2"',
|
||||
},
|
||||
]
|
||||
}
|
||||
]
|
||||
mock_client.get_paginator.return_value = mock_paginator
|
||||
mock_create_client.return_value = mock_client
|
||||
|
||||
rule = ReplicationRule(
|
||||
bucket_name="local-bucket",
|
||||
target_connection_id="test-conn",
|
||||
target_bucket="remote-bucket",
|
||||
mode=REPLICATION_MODE_BIDIRECTIONAL,
|
||||
)
|
||||
conn = connections.get("test-conn")
|
||||
|
||||
result = site_sync_worker._list_remote_objects(rule, conn)
|
||||
|
||||
assert "file1.txt" in result
|
||||
assert "file2.txt" in result
|
||||
assert result["file1.txt"].size == 100
|
||||
assert result["file2.txt"].size == 200
|
||||
|
||||
def test_list_local_objects(self, site_sync_worker, storage):
|
||||
storage.create_bucket("test-bucket")
|
||||
storage.put_object("test-bucket", "file1.txt", io.BytesIO(b"content1"))
|
||||
storage.put_object("test-bucket", "file2.txt", io.BytesIO(b"content2"))
|
||||
|
||||
result = site_sync_worker._list_local_objects("test-bucket")
|
||||
|
||||
assert "file1.txt" in result
|
||||
assert "file2.txt" in result
|
||||
|
||||
@patch("app.site_sync._create_sync_client")
|
||||
def test_sync_bucket_connection_not_found(self, mock_create_client, site_sync_worker, replication_manager):
|
||||
rule = ReplicationRule(
|
||||
bucket_name="test-bucket",
|
||||
target_connection_id="missing-conn",
|
||||
target_bucket="remote-bucket",
|
||||
mode=REPLICATION_MODE_BIDIRECTIONAL,
|
||||
enabled=True,
|
||||
)
|
||||
replication_manager.set_rule(rule)
|
||||
|
||||
stats = site_sync_worker._sync_bucket(rule)
|
||||
assert stats.errors == 1
|
||||
|
||||
|
||||
class TestSiteSyncIntegration:
|
||||
@patch("app.site_sync._create_sync_client")
|
||||
def test_full_sync_cycle(self, mock_create_client, site_sync_worker, storage, connections, replication_manager):
|
||||
storage.create_bucket("sync-bucket")
|
||||
storage.put_object("sync-bucket", "local-only.txt", io.BytesIO(b"local content"))
|
||||
|
||||
mock_client = MagicMock()
|
||||
mock_paginator = MagicMock()
|
||||
mock_paginator.paginate.return_value = [
|
||||
{
|
||||
"Contents": [
|
||||
{
|
||||
"Key": "remote-only.txt",
|
||||
"Size": 100,
|
||||
"LastModified": datetime(2025, 1, 15, tzinfo=timezone.utc),
|
||||
"ETag": '"remoteetag"',
|
||||
},
|
||||
]
|
||||
}
|
||||
]
|
||||
mock_client.get_paginator.return_value = mock_paginator
|
||||
mock_client.head_object.return_value = {"Metadata": {}}
|
||||
|
||||
def mock_download(bucket, key, path):
|
||||
Path(path).write_bytes(b"remote content")
|
||||
|
||||
mock_client.download_file.side_effect = mock_download
|
||||
mock_create_client.return_value = mock_client
|
||||
|
||||
rule = ReplicationRule(
|
||||
bucket_name="sync-bucket",
|
||||
target_connection_id="test-conn",
|
||||
target_bucket="remote-bucket",
|
||||
mode=REPLICATION_MODE_BIDIRECTIONAL,
|
||||
enabled=True,
|
||||
)
|
||||
replication_manager.set_rule(rule)
|
||||
|
||||
stats = site_sync_worker._sync_bucket(rule)
|
||||
|
||||
assert stats.objects_pulled == 1
|
||||
assert stats.errors == 0
|
||||
|
||||
objects = site_sync_worker._list_local_objects("sync-bucket")
|
||||
assert "local-only.txt" in objects
|
||||
assert "remote-only.txt" in objects
|
||||
@@ -28,6 +28,7 @@ def _make_app(tmp_path: Path):
|
||||
flask_app = create_app(
|
||||
{
|
||||
"TESTING": True,
|
||||
"SECRET_KEY": "testing",
|
||||
"WTF_CSRF_ENABLED": False,
|
||||
"STORAGE_ROOT": storage_root,
|
||||
"IAM_CONFIG": iam_config,
|
||||
|
||||
Reference in New Issue
Block a user