Merge pull request 'MyFSIO v0.2.2 Release' (#14) from next into main

Reviewed-on: #14
This commit was merged in pull request #14.
This commit is contained in:
2026-01-19 07:12:15 +00:00
23 changed files with 2563 additions and 472 deletions

View File

@@ -32,6 +32,6 @@ ENV APP_HOST=0.0.0.0 \
FLASK_DEBUG=0
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD python -c "import requests; requests.get('http://localhost:5000/healthz', timeout=2)"
CMD python -c "import requests; requests.get('http://localhost:5000/myfsio/health', timeout=2)"
CMD ["./docker-entrypoint.sh"]

View File

@@ -149,19 +149,13 @@ All endpoints require AWS Signature Version 4 authentication unless using presig
| `POST` | `/<bucket>/<key>?uploadId=X` | Complete multipart upload |
| `DELETE` | `/<bucket>/<key>?uploadId=X` | Abort multipart upload |
### Presigned URLs
### Bucket Policies (S3-compatible)
| Method | Endpoint | Description |
|--------|----------|-------------|
| `POST` | `/presign/<bucket>/<key>` | Generate presigned URL |
### Bucket Policies
| Method | Endpoint | Description |
|--------|----------|-------------|
| `GET` | `/bucket-policy/<bucket>` | Get bucket policy |
| `PUT` | `/bucket-policy/<bucket>` | Set bucket policy |
| `DELETE` | `/bucket-policy/<bucket>` | Delete bucket policy |
| `GET` | `/<bucket>?policy` | Get bucket policy |
| `PUT` | `/<bucket>?policy` | Set bucket policy |
| `DELETE` | `/<bucket>?policy` | Delete bucket policy |
### Versioning
@@ -175,7 +169,7 @@ All endpoints require AWS Signature Version 4 authentication unless using presig
| Method | Endpoint | Description |
|--------|----------|-------------|
| `GET` | `/healthz` | Health check endpoint |
| `GET` | `/myfsio/health` | Health check endpoint |
## IAM & Access Control

View File

@@ -16,6 +16,7 @@ from flask_wtf.csrf import CSRFError
from werkzeug.middleware.proxy_fix import ProxyFix
from .access_logging import AccessLoggingService
from .operation_metrics import OperationMetricsCollector, classify_endpoint
from .compression import GzipMiddleware
from .acl import AclService
from .bucket_policies import BucketPolicyStore
@@ -187,6 +188,15 @@ def create_app(
app.extensions["notifications"] = notification_service
app.extensions["access_logging"] = access_logging_service
operation_metrics_collector = None
if app.config.get("OPERATION_METRICS_ENABLED", False):
operation_metrics_collector = OperationMetricsCollector(
storage_root,
interval_minutes=app.config.get("OPERATION_METRICS_INTERVAL_MINUTES", 5),
retention_hours=app.config.get("OPERATION_METRICS_RETENTION_HOURS", 24),
)
app.extensions["operation_metrics"] = operation_metrics_collector
@app.errorhandler(500)
def internal_error(error):
return render_template('500.html'), 500
@@ -227,6 +237,30 @@ def create_app(
except (ValueError, OSError):
return "Unknown"
@app.template_filter("format_datetime")
def format_datetime_filter(dt, include_tz: bool = True) -> str:
"""Format datetime object as human-readable string in configured timezone."""
from datetime import datetime, timezone as dt_timezone
from zoneinfo import ZoneInfo
if not dt:
return ""
try:
display_tz = app.config.get("DISPLAY_TIMEZONE", "UTC")
if display_tz and display_tz != "UTC":
try:
tz = ZoneInfo(display_tz)
if dt.tzinfo is None:
dt = dt.replace(tzinfo=dt_timezone.utc)
dt = dt.astimezone(tz)
except (KeyError, ValueError):
pass
tz_abbr = dt.strftime("%Z") or "UTC"
if include_tz:
return f"{dt.strftime('%b %d, %Y %H:%M')} ({tz_abbr})"
return dt.strftime("%b %d, %Y %H:%M")
except (ValueError, AttributeError):
return str(dt)
if include_api:
from .s3_api import s3_api_bp
from .kms_api import kms_api_bp
@@ -254,9 +288,9 @@ def create_app(
return render_template("404.html"), 404
return error
@app.get("/healthz")
@app.get("/myfsio/health")
def healthcheck() -> Dict[str, str]:
return {"status": "ok", "version": app.config.get("APP_VERSION", "unknown")}
return {"status": "ok"}
return app
@@ -332,6 +366,7 @@ def _configure_logging(app: Flask) -> None:
def _log_request_start() -> None:
g.request_id = uuid.uuid4().hex
g.request_started_at = time.perf_counter()
g.request_bytes_in = request.content_length or 0
app.logger.info(
"Request started",
extra={"path": request.path, "method": request.method, "remote_addr": request.remote_addr},
@@ -353,4 +388,21 @@ def _configure_logging(app: Flask) -> None:
},
)
response.headers["X-Request-Duration-ms"] = f"{duration_ms:.2f}"
operation_metrics = app.extensions.get("operation_metrics")
if operation_metrics:
bytes_in = getattr(g, "request_bytes_in", 0)
bytes_out = response.content_length or 0
error_code = getattr(g, "s3_error_code", None)
endpoint_type = classify_endpoint(request.path)
operation_metrics.record_request(
method=request.method,
endpoint_type=endpoint_type,
status_code=response.status_code,
latency_ms=duration_ms,
bytes_in=bytes_in,
bytes_out=bytes_out,
error_code=error_code,
)
return response

View File

@@ -1,9 +1,10 @@
from __future__ import annotations
import ipaddress
import json
import re
import time
from dataclasses import dataclass
from dataclasses import dataclass, field
from fnmatch import fnmatch, translate
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional, Pattern, Sequence, Tuple
@@ -11,14 +12,71 @@ from typing import Any, Dict, Iterable, List, Optional, Pattern, Sequence, Tuple
RESOURCE_PREFIX = "arn:aws:s3:::"
def _match_string_like(value: str, pattern: str) -> bool:
regex = translate(pattern)
return bool(re.match(regex, value, re.IGNORECASE))
def _ip_in_cidr(ip_str: str, cidr: str) -> bool:
try:
ip = ipaddress.ip_address(ip_str)
network = ipaddress.ip_network(cidr, strict=False)
return ip in network
except ValueError:
return False
def _evaluate_condition_operator(
operator: str,
condition_key: str,
condition_values: List[str],
context: Dict[str, Any],
) -> bool:
context_value = context.get(condition_key)
op_lower = operator.lower()
if_exists = op_lower.endswith("ifexists")
if if_exists:
op_lower = op_lower[:-8]
if context_value is None:
return if_exists
context_value_str = str(context_value)
context_value_lower = context_value_str.lower()
if op_lower == "stringequals":
return context_value_str in condition_values
elif op_lower == "stringnotequals":
return context_value_str not in condition_values
elif op_lower == "stringequalsignorecase":
return context_value_lower in [v.lower() for v in condition_values]
elif op_lower == "stringnotequalsignorecase":
return context_value_lower not in [v.lower() for v in condition_values]
elif op_lower == "stringlike":
return any(_match_string_like(context_value_str, p) for p in condition_values)
elif op_lower == "stringnotlike":
return not any(_match_string_like(context_value_str, p) for p in condition_values)
elif op_lower == "ipaddress":
return any(_ip_in_cidr(context_value_str, cidr) for cidr in condition_values)
elif op_lower == "notipaddress":
return not any(_ip_in_cidr(context_value_str, cidr) for cidr in condition_values)
elif op_lower == "bool":
bool_val = context_value_lower in ("true", "1", "yes")
return str(bool_val).lower() in [v.lower() for v in condition_values]
elif op_lower == "null":
is_null = context_value is None or context_value == ""
expected_null = condition_values[0].lower() in ("true", "1", "yes") if condition_values else True
return is_null == expected_null
return True
ACTION_ALIASES = {
# List actions
"s3:listbucket": "list",
"s3:listallmybuckets": "list",
"s3:listbucketversions": "list",
"s3:listmultipartuploads": "list",
"s3:listparts": "list",
# Read actions
"s3:getobject": "read",
"s3:getobjectversion": "read",
"s3:getobjecttagging": "read",
@@ -27,7 +85,6 @@ ACTION_ALIASES = {
"s3:getbucketversioning": "read",
"s3:headobject": "read",
"s3:headbucket": "read",
# Write actions
"s3:putobject": "write",
"s3:createbucket": "write",
"s3:putobjecttagging": "write",
@@ -37,26 +94,30 @@ ACTION_ALIASES = {
"s3:completemultipartupload": "write",
"s3:abortmultipartupload": "write",
"s3:copyobject": "write",
# Delete actions
"s3:deleteobject": "delete",
"s3:deleteobjectversion": "delete",
"s3:deletebucket": "delete",
"s3:deleteobjecttagging": "delete",
# Share actions (ACL)
"s3:putobjectacl": "share",
"s3:putbucketacl": "share",
"s3:getbucketacl": "share",
# Policy actions
"s3:putbucketpolicy": "policy",
"s3:getbucketpolicy": "policy",
"s3:deletebucketpolicy": "policy",
# Replication actions
"s3:getreplicationconfiguration": "replication",
"s3:putreplicationconfiguration": "replication",
"s3:deletereplicationconfiguration": "replication",
"s3:replicateobject": "replication",
"s3:replicatetags": "replication",
"s3:replicatedelete": "replication",
"s3:getlifecycleconfiguration": "lifecycle",
"s3:putlifecycleconfiguration": "lifecycle",
"s3:deletelifecycleconfiguration": "lifecycle",
"s3:getbucketlifecycle": "lifecycle",
"s3:putbucketlifecycle": "lifecycle",
"s3:getbucketcors": "cors",
"s3:putbucketcors": "cors",
"s3:deletebucketcors": "cors",
}
@@ -135,18 +196,16 @@ class BucketPolicyStatement:
principals: List[str] | str
actions: List[str]
resources: List[Tuple[str | None, str | None]]
# Performance: Pre-compiled regex patterns for resource matching
conditions: Dict[str, Dict[str, List[str]]] = field(default_factory=dict)
_compiled_patterns: List[Tuple[str | None, Optional[Pattern[str]]]] | None = None
def _get_compiled_patterns(self) -> List[Tuple[str | None, Optional[Pattern[str]]]]:
"""Lazily compile fnmatch patterns to regex for faster matching."""
if self._compiled_patterns is None:
self._compiled_patterns = []
for resource_bucket, key_pattern in self.resources:
if key_pattern is None:
self._compiled_patterns.append((resource_bucket, None))
else:
# Convert fnmatch pattern to regex
regex_pattern = translate(key_pattern)
self._compiled_patterns.append((resource_bucket, re.compile(regex_pattern)))
return self._compiled_patterns
@@ -173,11 +232,21 @@ class BucketPolicyStatement:
if not key:
return True
continue
# Performance: Use pre-compiled regex instead of fnmatch
if compiled_pattern.match(key):
return True
return False
def matches_condition(self, context: Optional[Dict[str, Any]]) -> bool:
if not self.conditions:
return True
if context is None:
context = {}
for operator, key_values in self.conditions.items():
for condition_key, condition_values in key_values.items():
if not _evaluate_condition_operator(operator, condition_key, condition_values, context):
return False
return True
class BucketPolicyStore:
"""Loads bucket policies from disk and evaluates statements."""
@@ -219,6 +288,7 @@ class BucketPolicyStore:
bucket: Optional[str],
object_key: Optional[str],
action: str,
context: Optional[Dict[str, Any]] = None,
) -> str | None:
bucket = (bucket or "").lower()
statements = self._policies.get(bucket) or []
@@ -230,6 +300,8 @@ class BucketPolicyStore:
continue
if not statement.matches_resource(bucket, object_key):
continue
if not statement.matches_condition(context):
continue
if statement.effect == "deny":
return "deny"
decision = "allow"
@@ -294,6 +366,7 @@ class BucketPolicyStore:
if not resources:
continue
effect = statement.get("Effect", "Allow").lower()
conditions = self._normalize_conditions(statement.get("Condition", {}))
statements.append(
BucketPolicyStatement(
sid=statement.get("Sid"),
@@ -301,6 +374,24 @@ class BucketPolicyStore:
principals=principals,
actions=actions or ["*"],
resources=resources,
conditions=conditions,
)
)
return statements
return statements
def _normalize_conditions(self, condition_block: Dict[str, Any]) -> Dict[str, Dict[str, List[str]]]:
if not condition_block or not isinstance(condition_block, dict):
return {}
normalized: Dict[str, Dict[str, List[str]]] = {}
for operator, key_values in condition_block.items():
if not isinstance(key_values, dict):
continue
normalized[operator] = {}
for cond_key, cond_values in key_values.items():
if isinstance(cond_values, str):
normalized[operator][cond_key] = [cond_values]
elif isinstance(cond_values, list):
normalized[operator][cond_key] = [str(v) for v in cond_values]
else:
normalized[operator][cond_key] = [str(cond_values)]
return normalized

View File

@@ -1,6 +1,7 @@
from __future__ import annotations
import os
import re
import secrets
import shutil
import sys
@@ -9,6 +10,13 @@ from dataclasses import dataclass
from pathlib import Path
from typing import Any, Dict, Optional
def _validate_rate_limit(value: str) -> str:
pattern = r"^\d+\s+per\s+(second|minute|hour|day)$"
if not re.match(pattern, value):
raise ValueError(f"Invalid rate limit format: {value}. Expected format: '200 per minute'")
return value
if getattr(sys, "frozen", False):
# Running in a PyInstaller bundle
PROJECT_ROOT = Path(sys._MEIPASS)
@@ -76,6 +84,12 @@ class AppConfig:
display_timezone: str
lifecycle_enabled: bool
lifecycle_interval_seconds: int
metrics_history_enabled: bool
metrics_history_retention_hours: int
metrics_history_interval_minutes: int
operation_metrics_enabled: bool
operation_metrics_interval_minutes: int
operation_metrics_retention_hours: int
@classmethod
def from_env(cls, overrides: Optional[Dict[str, Any]] = None) -> "AppConfig":
@@ -148,7 +162,7 @@ class AppConfig:
log_path = log_dir / str(_get("LOG_FILE", "app.log"))
log_max_bytes = int(_get("LOG_MAX_BYTES", 5 * 1024 * 1024))
log_backup_count = int(_get("LOG_BACKUP_COUNT", 3))
ratelimit_default = str(_get("RATE_LIMIT_DEFAULT", "200 per minute"))
ratelimit_default = _validate_rate_limit(str(_get("RATE_LIMIT_DEFAULT", "200 per minute")))
ratelimit_storage_uri = str(_get("RATE_LIMIT_STORAGE_URI", "memory://"))
def _csv(value: str, default: list[str]) -> list[str]:
@@ -172,6 +186,12 @@ class AppConfig:
kms_keys_path = Path(_get("KMS_KEYS_PATH", encryption_keys_dir / "kms_keys.json")).resolve()
default_encryption_algorithm = str(_get("DEFAULT_ENCRYPTION_ALGORITHM", "AES256"))
display_timezone = str(_get("DISPLAY_TIMEZONE", "UTC"))
metrics_history_enabled = str(_get("METRICS_HISTORY_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
metrics_history_retention_hours = int(_get("METRICS_HISTORY_RETENTION_HOURS", 24))
metrics_history_interval_minutes = int(_get("METRICS_HISTORY_INTERVAL_MINUTES", 5))
operation_metrics_enabled = str(_get("OPERATION_METRICS_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
operation_metrics_interval_minutes = int(_get("OPERATION_METRICS_INTERVAL_MINUTES", 5))
operation_metrics_retention_hours = int(_get("OPERATION_METRICS_RETENTION_HOURS", 24))
return cls(storage_root=storage_root,
max_upload_size=max_upload_size,
@@ -210,7 +230,13 @@ class AppConfig:
default_encryption_algorithm=default_encryption_algorithm,
display_timezone=display_timezone,
lifecycle_enabled=lifecycle_enabled,
lifecycle_interval_seconds=lifecycle_interval_seconds)
lifecycle_interval_seconds=lifecycle_interval_seconds,
metrics_history_enabled=metrics_history_enabled,
metrics_history_retention_hours=metrics_history_retention_hours,
metrics_history_interval_minutes=metrics_history_interval_minutes,
operation_metrics_enabled=operation_metrics_enabled,
operation_metrics_interval_minutes=operation_metrics_interval_minutes,
operation_metrics_retention_hours=operation_metrics_retention_hours)
def validate_and_report(self) -> list[str]:
"""Validate configuration and return a list of warnings/issues.
@@ -337,4 +363,12 @@ class AppConfig:
"KMS_KEYS_PATH": str(self.kms_keys_path),
"DEFAULT_ENCRYPTION_ALGORITHM": self.default_encryption_algorithm,
"DISPLAY_TIMEZONE": self.display_timezone,
"LIFECYCLE_ENABLED": self.lifecycle_enabled,
"LIFECYCLE_INTERVAL_SECONDS": self.lifecycle_interval_seconds,
"METRICS_HISTORY_ENABLED": self.metrics_history_enabled,
"METRICS_HISTORY_RETENTION_HOURS": self.metrics_history_retention_hours,
"METRICS_HISTORY_INTERVAL_MINUTES": self.metrics_history_interval_minutes,
"OPERATION_METRICS_ENABLED": self.operation_metrics_enabled,
"OPERATION_METRICS_INTERVAL_MINUTES": self.operation_metrics_interval_minutes,
"OPERATION_METRICS_RETENTION_HOURS": self.operation_metrics_retention_hours,
}

View File

@@ -1,5 +1,6 @@
from __future__ import annotations
import hmac
import json
import math
import secrets
@@ -15,7 +16,7 @@ class IamError(RuntimeError):
"""Raised when authentication or authorization fails."""
S3_ACTIONS = {"list", "read", "write", "delete", "share", "policy", "replication"}
S3_ACTIONS = {"list", "read", "write", "delete", "share", "policy", "replication", "lifecycle", "cors"}
IAM_ACTIONS = {
"iam:list_users",
"iam:create_user",
@@ -71,6 +72,16 @@ ACTION_ALIASES = {
"s3:replicateobject": "replication",
"s3:replicatetags": "replication",
"s3:replicatedelete": "replication",
"lifecycle": "lifecycle",
"s3:getlifecycleconfiguration": "lifecycle",
"s3:putlifecycleconfiguration": "lifecycle",
"s3:deletelifecycleconfiguration": "lifecycle",
"s3:getbucketlifecycle": "lifecycle",
"s3:putbucketlifecycle": "lifecycle",
"cors": "cors",
"s3:getbucketcors": "cors",
"s3:putbucketcors": "cors",
"s3:deletebucketcors": "cors",
"iam:listusers": "iam:list_users",
"iam:createuser": "iam:create_user",
"iam:deleteuser": "iam:delete_user",
@@ -139,7 +150,7 @@ class IamService:
f"Access temporarily locked. Try again in {seconds} seconds."
)
record = self._users.get(access_key)
if not record or record["secret_key"] != secret_key:
if not record or not hmac.compare_digest(record["secret_key"], secret_key):
self._record_failed_attempt(access_key)
raise IamError("Invalid credentials")
self._clear_failed_attempts(access_key)

271
app/operation_metrics.py Normal file
View File

@@ -0,0 +1,271 @@
from __future__ import annotations
import json
import logging
import threading
import time
from dataclasses import dataclass, field
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Dict, List, Optional
logger = logging.getLogger(__name__)
@dataclass
class OperationStats:
count: int = 0
success_count: int = 0
error_count: int = 0
latency_sum_ms: float = 0.0
latency_min_ms: float = float("inf")
latency_max_ms: float = 0.0
bytes_in: int = 0
bytes_out: int = 0
def record(self, latency_ms: float, success: bool, bytes_in: int = 0, bytes_out: int = 0) -> None:
self.count += 1
if success:
self.success_count += 1
else:
self.error_count += 1
self.latency_sum_ms += latency_ms
if latency_ms < self.latency_min_ms:
self.latency_min_ms = latency_ms
if latency_ms > self.latency_max_ms:
self.latency_max_ms = latency_ms
self.bytes_in += bytes_in
self.bytes_out += bytes_out
def to_dict(self) -> Dict[str, Any]:
avg_latency = self.latency_sum_ms / self.count if self.count > 0 else 0.0
min_latency = self.latency_min_ms if self.latency_min_ms != float("inf") else 0.0
return {
"count": self.count,
"success_count": self.success_count,
"error_count": self.error_count,
"latency_avg_ms": round(avg_latency, 2),
"latency_min_ms": round(min_latency, 2),
"latency_max_ms": round(self.latency_max_ms, 2),
"bytes_in": self.bytes_in,
"bytes_out": self.bytes_out,
}
def merge(self, other: "OperationStats") -> None:
self.count += other.count
self.success_count += other.success_count
self.error_count += other.error_count
self.latency_sum_ms += other.latency_sum_ms
if other.latency_min_ms < self.latency_min_ms:
self.latency_min_ms = other.latency_min_ms
if other.latency_max_ms > self.latency_max_ms:
self.latency_max_ms = other.latency_max_ms
self.bytes_in += other.bytes_in
self.bytes_out += other.bytes_out
@dataclass
class MetricsSnapshot:
timestamp: datetime
window_seconds: int
by_method: Dict[str, Dict[str, Any]]
by_endpoint: Dict[str, Dict[str, Any]]
by_status_class: Dict[str, int]
error_codes: Dict[str, int]
totals: Dict[str, Any]
def to_dict(self) -> Dict[str, Any]:
return {
"timestamp": self.timestamp.isoformat(),
"window_seconds": self.window_seconds,
"by_method": self.by_method,
"by_endpoint": self.by_endpoint,
"by_status_class": self.by_status_class,
"error_codes": self.error_codes,
"totals": self.totals,
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "MetricsSnapshot":
return cls(
timestamp=datetime.fromisoformat(data["timestamp"]),
window_seconds=data.get("window_seconds", 300),
by_method=data.get("by_method", {}),
by_endpoint=data.get("by_endpoint", {}),
by_status_class=data.get("by_status_class", {}),
error_codes=data.get("error_codes", {}),
totals=data.get("totals", {}),
)
class OperationMetricsCollector:
def __init__(
self,
storage_root: Path,
interval_minutes: int = 5,
retention_hours: int = 24,
):
self.storage_root = storage_root
self.interval_seconds = interval_minutes * 60
self.retention_hours = retention_hours
self._lock = threading.Lock()
self._by_method: Dict[str, OperationStats] = {}
self._by_endpoint: Dict[str, OperationStats] = {}
self._by_status_class: Dict[str, int] = {}
self._error_codes: Dict[str, int] = {}
self._totals = OperationStats()
self._window_start = time.time()
self._shutdown = threading.Event()
self._snapshots: List[MetricsSnapshot] = []
self._load_history()
self._snapshot_thread = threading.Thread(
target=self._snapshot_loop, name="operation-metrics-snapshot", daemon=True
)
self._snapshot_thread.start()
def _config_path(self) -> Path:
return self.storage_root / ".myfsio.sys" / "config" / "operation_metrics.json"
def _load_history(self) -> None:
config_path = self._config_path()
if not config_path.exists():
return
try:
data = json.loads(config_path.read_text(encoding="utf-8"))
snapshots_data = data.get("snapshots", [])
self._snapshots = [MetricsSnapshot.from_dict(s) for s in snapshots_data]
self._prune_old_snapshots()
except (json.JSONDecodeError, OSError, KeyError) as e:
logger.warning(f"Failed to load operation metrics history: {e}")
def _save_history(self) -> None:
config_path = self._config_path()
config_path.parent.mkdir(parents=True, exist_ok=True)
try:
data = {"snapshots": [s.to_dict() for s in self._snapshots]}
config_path.write_text(json.dumps(data, indent=2), encoding="utf-8")
except OSError as e:
logger.warning(f"Failed to save operation metrics history: {e}")
def _prune_old_snapshots(self) -> None:
if not self._snapshots:
return
cutoff = datetime.now(timezone.utc).timestamp() - (self.retention_hours * 3600)
self._snapshots = [
s for s in self._snapshots if s.timestamp.timestamp() > cutoff
]
def _snapshot_loop(self) -> None:
while not self._shutdown.is_set():
self._shutdown.wait(timeout=self.interval_seconds)
if not self._shutdown.is_set():
self._take_snapshot()
def _take_snapshot(self) -> None:
with self._lock:
now = datetime.now(timezone.utc)
window_seconds = int(time.time() - self._window_start)
snapshot = MetricsSnapshot(
timestamp=now,
window_seconds=window_seconds,
by_method={k: v.to_dict() for k, v in self._by_method.items()},
by_endpoint={k: v.to_dict() for k, v in self._by_endpoint.items()},
by_status_class=dict(self._by_status_class),
error_codes=dict(self._error_codes),
totals=self._totals.to_dict(),
)
self._snapshots.append(snapshot)
self._prune_old_snapshots()
self._save_history()
self._by_method.clear()
self._by_endpoint.clear()
self._by_status_class.clear()
self._error_codes.clear()
self._totals = OperationStats()
self._window_start = time.time()
def record_request(
self,
method: str,
endpoint_type: str,
status_code: int,
latency_ms: float,
bytes_in: int = 0,
bytes_out: int = 0,
error_code: Optional[str] = None,
) -> None:
success = 200 <= status_code < 400
status_class = f"{status_code // 100}xx"
with self._lock:
if method not in self._by_method:
self._by_method[method] = OperationStats()
self._by_method[method].record(latency_ms, success, bytes_in, bytes_out)
if endpoint_type not in self._by_endpoint:
self._by_endpoint[endpoint_type] = OperationStats()
self._by_endpoint[endpoint_type].record(latency_ms, success, bytes_in, bytes_out)
self._by_status_class[status_class] = self._by_status_class.get(status_class, 0) + 1
if error_code:
self._error_codes[error_code] = self._error_codes.get(error_code, 0) + 1
self._totals.record(latency_ms, success, bytes_in, bytes_out)
def get_current_stats(self) -> Dict[str, Any]:
with self._lock:
window_seconds = int(time.time() - self._window_start)
return {
"timestamp": datetime.now(timezone.utc).isoformat(),
"window_seconds": window_seconds,
"by_method": {k: v.to_dict() for k, v in self._by_method.items()},
"by_endpoint": {k: v.to_dict() for k, v in self._by_endpoint.items()},
"by_status_class": dict(self._by_status_class),
"error_codes": dict(self._error_codes),
"totals": self._totals.to_dict(),
}
def get_history(self, hours: Optional[int] = None) -> List[Dict[str, Any]]:
with self._lock:
snapshots = list(self._snapshots)
if hours:
cutoff = datetime.now(timezone.utc).timestamp() - (hours * 3600)
snapshots = [s for s in snapshots if s.timestamp.timestamp() > cutoff]
return [s.to_dict() for s in snapshots]
def shutdown(self) -> None:
self._shutdown.set()
self._take_snapshot()
self._snapshot_thread.join(timeout=5.0)
def classify_endpoint(path: str) -> str:
if not path or path == "/":
return "service"
path = path.rstrip("/")
if path.startswith("/ui"):
return "ui"
if path.startswith("/kms"):
return "kms"
if path.startswith("/myfsio"):
return "service"
parts = path.lstrip("/").split("/")
if len(parts) == 0:
return "service"
elif len(parts) == 1:
return "bucket"
else:
return "object"

View File

@@ -11,7 +11,8 @@ import uuid
from datetime import datetime, timedelta, timezone
from typing import Any, Dict, Optional
from urllib.parse import quote, urlencode, urlparse, unquote
from xml.etree.ElementTree import Element, SubElement, tostring, fromstring, ParseError
from xml.etree.ElementTree import Element, SubElement, tostring, ParseError
from defusedxml.ElementTree import fromstring
from flask import Blueprint, Response, current_app, jsonify, request, g
from werkzeug.http import http_date
@@ -29,6 +30,8 @@ from .storage import ObjectStorage, StorageError, QuotaExceededError, BucketNotF
logger = logging.getLogger(__name__)
S3_NS = "http://s3.amazonaws.com/doc/2006-03-01/"
s3_api_bp = Blueprint("s3_api", __name__)
def _storage() -> ObjectStorage:
@@ -53,6 +56,20 @@ def _bucket_policies() -> BucketPolicyStore:
return store
def _build_policy_context() -> Dict[str, Any]:
ctx: Dict[str, Any] = {}
if request.headers.get("Referer"):
ctx["aws:Referer"] = request.headers.get("Referer")
if request.access_route:
ctx["aws:SourceIp"] = request.access_route[0]
elif request.remote_addr:
ctx["aws:SourceIp"] = request.remote_addr
ctx["aws:SecureTransport"] = str(request.is_secure).lower()
if request.headers.get("User-Agent"):
ctx["aws:UserAgent"] = request.headers.get("User-Agent")
return ctx
def _object_lock() -> ObjectLockService:
return current_app.extensions["object_lock"]
@@ -71,6 +88,7 @@ def _xml_response(element: Element, status: int = 200) -> Response:
def _error_response(code: str, message: str, status: int) -> Response:
g.s3_error_code = code
error = Element("Error")
SubElement(error, "Code").text = code
SubElement(error, "Message").text = message
@@ -79,6 +97,13 @@ def _error_response(code: str, message: str, status: int) -> Response:
return _xml_response(error, status)
def _require_xml_content_type() -> Response | None:
ct = request.headers.get("Content-Type", "")
if ct and not ct.startswith(("application/xml", "text/xml")):
return _error_response("InvalidRequest", "Content-Type must be application/xml or text/xml", 400)
return None
def _parse_range_header(range_header: str, file_size: int) -> list[tuple[int, int]] | None:
if not range_header.startswith("bytes="):
return None
@@ -218,16 +243,7 @@ def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
if not hmac.compare_digest(calculated_signature, signature):
if current_app.config.get("DEBUG_SIGV4"):
logger.warning(
"SigV4 signature mismatch",
extra={
"path": req.path,
"method": method,
"signed_headers": signed_headers_str,
"content_type": req.headers.get("Content-Type"),
"content_length": req.headers.get("Content-Length"),
}
)
logger.warning("SigV4 signature mismatch for %s %s", method, req.path)
raise IamError("SignatureDoesNotMatch")
session_token = req.headers.get("X-Amz-Security-Token")
@@ -293,7 +309,7 @@ def _verify_sigv4_query(req: Any) -> Principal | None:
if header.lower() == 'expect' and val == "":
val = "100-continue"
val = " ".join(val.split())
canonical_headers_parts.append(f"{header}:{val}\n")
canonical_headers_parts.append(f"{header.lower()}:{val}\n")
canonical_headers = "".join(canonical_headers_parts)
payload_hash = "UNSIGNED-PAYLOAD"
@@ -380,7 +396,8 @@ def _authorize_action(principal: Principal | None, bucket_name: str | None, acti
policy_decision = None
access_key = principal.access_key if principal else None
if bucket_name:
policy_decision = _bucket_policies().evaluate(access_key, bucket_name, object_key, action)
policy_context = _build_policy_context()
policy_decision = _bucket_policies().evaluate(access_key, bucket_name, object_key, action, policy_context)
if policy_decision == "deny":
raise IamError("Access denied by bucket policy")
@@ -407,11 +424,13 @@ def _authorize_action(principal: Principal | None, bucket_name: str | None, acti
def _enforce_bucket_policy(principal: Principal | None, bucket_name: str | None, object_key: str | None, action: str) -> None:
if not bucket_name:
return
policy_context = _build_policy_context()
decision = _bucket_policies().evaluate(
principal.access_key if principal else None,
bucket_name,
object_key,
action,
policy_context,
)
if decision == "deny":
raise IamError("Access denied by bucket policy")
@@ -572,6 +591,7 @@ def _generate_presigned_url(
bucket_name: str,
object_key: str,
expires_in: int,
api_base_url: str | None = None,
) -> str:
region = current_app.config["AWS_REGION"]
service = current_app.config["AWS_SERVICE"]
@@ -592,7 +612,7 @@ def _generate_presigned_url(
}
canonical_query = _encode_query_params(query_params)
api_base = current_app.config.get("API_BASE_URL")
api_base = api_base_url or current_app.config.get("API_BASE_URL")
if api_base:
parsed = urlparse(api_base)
host = parsed.netloc
@@ -644,11 +664,11 @@ def _strip_ns(tag: str | None) -> str:
def _find_element(parent: Element, name: str) -> Optional[Element]:
"""Find a child element by name, trying both namespaced and non-namespaced variants.
"""Find a child element by name, trying S3 namespace then no namespace.
This handles XML documents that may or may not include namespace prefixes.
"""
el = parent.find(f"{{*}}{name}")
el = parent.find(f"{{{S3_NS}}}{name}")
if el is None:
el = parent.find(name)
return el
@@ -672,7 +692,7 @@ def _parse_tagging_document(payload: bytes) -> list[dict[str, str]]:
raise ValueError("Malformed XML") from exc
if _strip_ns(root.tag) != "Tagging":
raise ValueError("Root element must be Tagging")
tagset = root.find(".//{*}TagSet")
tagset = root.find(".//{http://s3.amazonaws.com/doc/2006-03-01/}TagSet")
if tagset is None:
tagset = root.find("TagSet")
if tagset is None:
@@ -840,13 +860,13 @@ def _parse_encryption_document(payload: bytes) -> dict[str, Any]:
bucket_key_el = child
if default_el is None:
continue
algo_el = default_el.find("{*}SSEAlgorithm")
algo_el = default_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}SSEAlgorithm")
if algo_el is None:
algo_el = default_el.find("SSEAlgorithm")
if algo_el is None or not (algo_el.text or "").strip():
raise ValueError("SSEAlgorithm is required")
rule: dict[str, Any] = {"SSEAlgorithm": algo_el.text.strip()}
kms_el = default_el.find("{*}KMSMasterKeyID")
kms_el = default_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}KMSMasterKeyID")
if kms_el is None:
kms_el = default_el.find("KMSMasterKeyID")
if kms_el is not None and kms_el.text:
@@ -922,6 +942,7 @@ def _maybe_handle_bucket_subresource(bucket_name: str) -> Response | None:
"notification": _bucket_notification_handler,
"logging": _bucket_logging_handler,
"uploads": _bucket_uploads_handler,
"policy": _bucket_policy_handler,
}
requested = [key for key in handlers if key in request.args]
if not requested:
@@ -947,8 +968,11 @@ def _bucket_versioning_handler(bucket_name: str) -> Response:
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
storage = _storage()
if request.method == "PUT":
ct_error = _require_xml_content_type()
if ct_error:
return ct_error
payload = request.get_data(cache=False) or b""
if not payload.strip():
return _error_response("MalformedXML", "Request body is required", 400)
@@ -958,7 +982,7 @@ def _bucket_versioning_handler(bucket_name: str) -> Response:
return _error_response("MalformedXML", "Unable to parse XML document", 400)
if _strip_ns(root.tag) != "VersioningConfiguration":
return _error_response("MalformedXML", "Root element must be VersioningConfiguration", 400)
status_el = root.find("{*}Status")
status_el = root.find("{http://s3.amazonaws.com/doc/2006-03-01/}Status")
if status_el is None:
status_el = root.find("Status")
status = (status_el.text or "").strip() if status_el is not None else ""
@@ -1007,6 +1031,9 @@ def _bucket_tagging_handler(bucket_name: str) -> Response:
current_app.logger.info("Bucket tags deleted", extra={"bucket": bucket_name})
return Response(status=204)
ct_error = _require_xml_content_type()
if ct_error:
return ct_error
payload = request.get_data(cache=False) or b""
try:
tags = _parse_tagging_document(payload)
@@ -1062,6 +1089,9 @@ def _object_tagging_handler(bucket_name: str, object_key: str) -> Response:
current_app.logger.info("Object tags deleted", extra={"bucket": bucket_name, "key": object_key})
return Response(status=204)
ct_error = _require_xml_content_type()
if ct_error:
return ct_error
payload = request.get_data(cache=False) or b""
try:
tags = _parse_tagging_document(payload)
@@ -1131,6 +1161,9 @@ def _bucket_cors_handler(bucket_name: str) -> Response:
current_app.logger.info("Bucket CORS deleted", extra={"bucket": bucket_name})
return Response(status=204)
ct_error = _require_xml_content_type()
if ct_error:
return ct_error
payload = request.get_data(cache=False) or b""
if not payload.strip():
try:
@@ -1177,6 +1210,9 @@ def _bucket_encryption_handler(bucket_name: str) -> Response:
404,
)
return _xml_response(_render_encryption_document(config))
ct_error = _require_xml_content_type()
if ct_error:
return ct_error
payload = request.get_data(cache=False) or b""
if not payload.strip():
try:
@@ -1349,7 +1385,7 @@ def _bucket_list_versions_handler(bucket_name: str) -> Response:
SubElement(ver_elem, "Key").text = obj.key
SubElement(ver_elem, "VersionId").text = v.get("version_id", "unknown")
SubElement(ver_elem, "IsLatest").text = "false"
SubElement(ver_elem, "LastModified").text = v.get("archived_at", "")
SubElement(ver_elem, "LastModified").text = v.get("archived_at") or "1970-01-01T00:00:00Z"
SubElement(ver_elem, "ETag").text = f'"{v.get("etag", "")}"'
SubElement(ver_elem, "Size").text = str(v.get("size", 0))
SubElement(ver_elem, "StorageClass").text = "STANDARD"
@@ -1398,6 +1434,9 @@ def _bucket_lifecycle_handler(bucket_name: str) -> Response:
current_app.logger.info("Bucket lifecycle deleted", extra={"bucket": bucket_name})
return Response(status=204)
ct_error = _require_xml_content_type()
if ct_error:
return ct_error
payload = request.get_data(cache=False) or b""
if not payload.strip():
return _error_response("MalformedXML", "Request body is required", 400)
@@ -1462,49 +1501,49 @@ def _parse_lifecycle_config(payload: bytes) -> list:
raise ValueError("Root element must be LifecycleConfiguration")
rules = []
for rule_el in root.findall("{*}Rule") or root.findall("Rule"):
for rule_el in root.findall("{http://s3.amazonaws.com/doc/2006-03-01/}Rule") or root.findall("Rule"):
rule: dict = {}
id_el = rule_el.find("{*}ID") or rule_el.find("ID")
id_el = rule_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}ID") or rule_el.find("ID")
if id_el is not None and id_el.text:
rule["ID"] = id_el.text.strip()
filter_el = rule_el.find("{*}Filter") or rule_el.find("Filter")
filter_el = rule_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}Filter") or rule_el.find("Filter")
if filter_el is not None:
prefix_el = filter_el.find("{*}Prefix") or filter_el.find("Prefix")
prefix_el = filter_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}Prefix") or filter_el.find("Prefix")
if prefix_el is not None and prefix_el.text:
rule["Prefix"] = prefix_el.text
if "Prefix" not in rule:
prefix_el = rule_el.find("{*}Prefix") or rule_el.find("Prefix")
prefix_el = rule_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}Prefix") or rule_el.find("Prefix")
if prefix_el is not None:
rule["Prefix"] = prefix_el.text or ""
status_el = rule_el.find("{*}Status") or rule_el.find("Status")
status_el = rule_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}Status") or rule_el.find("Status")
rule["Status"] = (status_el.text or "Enabled").strip() if status_el is not None else "Enabled"
exp_el = rule_el.find("{*}Expiration") or rule_el.find("Expiration")
exp_el = rule_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}Expiration") or rule_el.find("Expiration")
if exp_el is not None:
expiration: dict = {}
days_el = exp_el.find("{*}Days") or exp_el.find("Days")
days_el = exp_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}Days") or exp_el.find("Days")
if days_el is not None and days_el.text:
days_val = int(days_el.text.strip())
if days_val <= 0:
raise ValueError("Expiration Days must be a positive integer")
expiration["Days"] = days_val
date_el = exp_el.find("{*}Date") or exp_el.find("Date")
date_el = exp_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}Date") or exp_el.find("Date")
if date_el is not None and date_el.text:
expiration["Date"] = date_el.text.strip()
eodm_el = exp_el.find("{*}ExpiredObjectDeleteMarker") or exp_el.find("ExpiredObjectDeleteMarker")
eodm_el = exp_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}ExpiredObjectDeleteMarker") or exp_el.find("ExpiredObjectDeleteMarker")
if eodm_el is not None and (eodm_el.text or "").strip().lower() in {"true", "1"}:
expiration["ExpiredObjectDeleteMarker"] = True
if expiration:
rule["Expiration"] = expiration
nve_el = rule_el.find("{*}NoncurrentVersionExpiration") or rule_el.find("NoncurrentVersionExpiration")
nve_el = rule_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}NoncurrentVersionExpiration") or rule_el.find("NoncurrentVersionExpiration")
if nve_el is not None:
nve: dict = {}
days_el = nve_el.find("{*}NoncurrentDays") or nve_el.find("NoncurrentDays")
days_el = nve_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}NoncurrentDays") or nve_el.find("NoncurrentDays")
if days_el is not None and days_el.text:
noncurrent_days = int(days_el.text.strip())
if noncurrent_days <= 0:
@@ -1513,10 +1552,10 @@ def _parse_lifecycle_config(payload: bytes) -> list:
if nve:
rule["NoncurrentVersionExpiration"] = nve
aimu_el = rule_el.find("{*}AbortIncompleteMultipartUpload") or rule_el.find("AbortIncompleteMultipartUpload")
aimu_el = rule_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}AbortIncompleteMultipartUpload") or rule_el.find("AbortIncompleteMultipartUpload")
if aimu_el is not None:
aimu: dict = {}
days_el = aimu_el.find("{*}DaysAfterInitiation") or aimu_el.find("DaysAfterInitiation")
days_el = aimu_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}DaysAfterInitiation") or aimu_el.find("DaysAfterInitiation")
if days_el is not None and days_el.text:
days_after = int(days_el.text.strip())
if days_after <= 0:
@@ -1632,6 +1671,9 @@ def _bucket_object_lock_handler(bucket_name: str) -> Response:
SubElement(root, "ObjectLockEnabled").text = "Enabled" if config.enabled else "Disabled"
return _xml_response(root)
ct_error = _require_xml_content_type()
if ct_error:
return ct_error
payload = request.get_data(cache=False) or b""
if not payload.strip():
return _error_response("MalformedXML", "Request body is required", 400)
@@ -1641,7 +1683,7 @@ def _bucket_object_lock_handler(bucket_name: str) -> Response:
except ParseError:
return _error_response("MalformedXML", "Unable to parse XML document", 400)
enabled_el = root.find("{*}ObjectLockEnabled") or root.find("ObjectLockEnabled")
enabled_el = root.find("{http://s3.amazonaws.com/doc/2006-03-01/}ObjectLockEnabled") or root.find("ObjectLockEnabled")
enabled = (enabled_el.text or "").strip() == "Enabled" if enabled_el is not None else False
config = ObjectLockConfig(enabled=enabled)
@@ -1697,6 +1739,9 @@ def _bucket_notification_handler(bucket_name: str) -> Response:
current_app.logger.info("Bucket notifications deleted", extra={"bucket": bucket_name})
return Response(status=204)
ct_error = _require_xml_content_type()
if ct_error:
return ct_error
payload = request.get_data(cache=False) or b""
if not payload.strip():
notification_service.delete_bucket_notifications(bucket_name)
@@ -1708,9 +1753,9 @@ def _bucket_notification_handler(bucket_name: str) -> Response:
return _error_response("MalformedXML", "Unable to parse XML document", 400)
configs: list[NotificationConfiguration] = []
for webhook_el in root.findall("{*}WebhookConfiguration") or root.findall("WebhookConfiguration"):
for webhook_el in root.findall("{http://s3.amazonaws.com/doc/2006-03-01/}WebhookConfiguration") or root.findall("WebhookConfiguration"):
config_id = _find_element_text(webhook_el, "Id") or uuid.uuid4().hex
events = [el.text for el in webhook_el.findall("{*}Event") or webhook_el.findall("Event") if el.text]
events = [el.text for el in webhook_el.findall("{http://s3.amazonaws.com/doc/2006-03-01/}Event") or webhook_el.findall("Event") if el.text]
dest_el = _find_element(webhook_el, "Destination")
url = _find_element_text(dest_el, "Url") if dest_el else ""
@@ -1723,7 +1768,7 @@ def _bucket_notification_handler(bucket_name: str) -> Response:
if filter_el:
key_el = _find_element(filter_el, "S3Key")
if key_el:
for rule_el in key_el.findall("{*}FilterRule") or key_el.findall("FilterRule"):
for rule_el in key_el.findall("{http://s3.amazonaws.com/doc/2006-03-01/}FilterRule") or key_el.findall("FilterRule"):
name = _find_element_text(rule_el, "Name")
value = _find_element_text(rule_el, "Value")
if name == "prefix":
@@ -1776,6 +1821,9 @@ def _bucket_logging_handler(bucket_name: str) -> Response:
current_app.logger.info("Bucket logging deleted", extra={"bucket": bucket_name})
return Response(status=204)
ct_error = _require_xml_content_type()
if ct_error:
return ct_error
payload = request.get_data(cache=False) or b""
if not payload.strip():
logging_service.delete_bucket_logging(bucket_name)
@@ -1913,6 +1961,9 @@ def _object_retention_handler(bucket_name: str, object_key: str) -> Response:
SubElement(root, "RetainUntilDate").text = retention.retain_until_date.strftime("%Y-%m-%dT%H:%M:%S.000Z")
return _xml_response(root)
ct_error = _require_xml_content_type()
if ct_error:
return ct_error
payload = request.get_data(cache=False) or b""
if not payload.strip():
return _error_response("MalformedXML", "Request body is required", 400)
@@ -1982,6 +2033,9 @@ def _object_legal_hold_handler(bucket_name: str, object_key: str) -> Response:
SubElement(root, "Status").text = "ON" if enabled else "OFF"
return _xml_response(root)
ct_error = _require_xml_content_type()
if ct_error:
return ct_error
payload = request.get_data(cache=False) or b""
if not payload.strip():
return _error_response("MalformedXML", "Request body is required", 400)
@@ -2013,6 +2067,9 @@ def _bulk_delete_handler(bucket_name: str) -> Response:
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
ct_error = _require_xml_content_type()
if ct_error:
return ct_error
payload = request.get_data(cache=False) or b""
if not payload.strip():
return _error_response("MalformedXML", "Request body must include a Delete specification", 400)
@@ -2588,9 +2645,9 @@ def _list_parts(bucket_name: str, object_key: str) -> Response:
return _xml_response(root)
@s3_api_bp.route("/bucket-policy/<bucket_name>", methods=["GET", "PUT", "DELETE"])
@limiter.limit("30 per minute")
def bucket_policy_handler(bucket_name: str) -> Response:
def _bucket_policy_handler(bucket_name: str) -> Response:
if request.method not in {"GET", "PUT", "DELETE"}:
return _method_not_allowed(["GET", "PUT", "DELETE"])
principal, error = _require_principal()
if error:
return error
@@ -2622,51 +2679,6 @@ def bucket_policy_handler(bucket_name: str) -> Response:
return Response(status=204)
@s3_api_bp.post("/presign/<bucket_name>/<path:object_key>")
@limiter.limit("45 per minute")
def presign_object(bucket_name: str, object_key: str):
payload = request.get_json(silent=True) or {}
method = str(payload.get("method", "GET")).upper()
allowed_methods = {"GET", "PUT", "DELETE"}
if method not in allowed_methods:
return _error_response("InvalidRequest", "Method must be GET, PUT, or DELETE", 400)
try:
expires = int(payload.get("expires_in", 900))
except (TypeError, ValueError):
return _error_response("InvalidRequest", "expires_in must be an integer", 400)
expires = max(1, min(expires, 7 * 24 * 3600))
action = "read" if method == "GET" else ("delete" if method == "DELETE" else "write")
principal, error = _require_principal()
if error:
return error
try:
_authorize_action(principal, bucket_name, action, object_key=object_key)
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
storage = _storage()
if not storage.bucket_exists(bucket_name):
return _error_response("NoSuchBucket", "Bucket does not exist", 404)
if action != "write":
try:
storage.get_object_path(bucket_name, object_key)
except StorageError:
return _error_response("NoSuchKey", "Object not found", 404)
secret = _iam().secret_for_key(principal.access_key)
url = _generate_presigned_url(
principal=principal,
secret_key=secret,
method=method,
bucket_name=bucket_name,
object_key=object_key,
expires_in=expires,
)
current_app.logger.info(
"Presigned URL generated",
extra={"bucket": bucket_name, "key": object_key, "method": method},
)
return jsonify({"url": url, "method": method, "expires_in": expires})
@s3_api_bp.route("/<bucket_name>", methods=["HEAD"])
@limiter.limit("100 per minute")
def head_bucket(bucket_name: str) -> Response:
@@ -2986,6 +2998,9 @@ def _complete_multipart_upload(bucket_name: str, object_key: str) -> Response:
if not upload_id:
return _error_response("InvalidArgument", "uploadId is required", 400)
ct_error = _require_xml_content_type()
if ct_error:
return ct_error
payload = request.get_data(cache=False) or b""
try:
root = fromstring(payload)
@@ -2999,11 +3014,11 @@ def _complete_multipart_upload(bucket_name: str, object_key: str) -> Response:
for part_el in list(root):
if _strip_ns(part_el.tag) != "Part":
continue
part_number_el = part_el.find("{*}PartNumber")
part_number_el = part_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}PartNumber")
if part_number_el is None:
part_number_el = part_el.find("PartNumber")
etag_el = part_el.find("{*}ETag")
etag_el = part_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}ETag")
if etag_el is None:
etag_el = part_el.find("ETag")

View File

@@ -774,7 +774,7 @@ class ObjectStorage:
continue
payload.setdefault("version_id", meta_file.stem)
versions.append(payload)
versions.sort(key=lambda item: item.get("archived_at", ""), reverse=True)
versions.sort(key=lambda item: item.get("archived_at") or "1970-01-01T00:00:00Z", reverse=True)
return versions
def restore_object_version(self, bucket_name: str, object_key: str, version_id: str) -> ObjectMeta:
@@ -866,7 +866,7 @@ class ObjectStorage:
except (OSError, json.JSONDecodeError):
payload = {}
version_id = payload.get("version_id") or meta_file.stem
archived_at = payload.get("archived_at") or ""
archived_at = payload.get("archived_at") or "1970-01-01T00:00:00Z"
size = int(payload.get("size") or 0)
reason = payload.get("reason") or "update"
record = aggregated.setdefault(
@@ -1773,11 +1773,9 @@ class ObjectStorage:
raise StorageError("Object key contains null bytes")
if object_key.startswith(("/", "\\")):
raise StorageError("Object key cannot start with a slash")
normalized = unicodedata.normalize("NFC", object_key)
if normalized != object_key:
raise StorageError("Object key must use normalized Unicode")
candidate = Path(normalized)
object_key = unicodedata.normalize("NFC", object_key)
candidate = Path(object_key)
if ".." in candidate.parts:
raise StorageError("Object key contains parent directory references")

371
app/ui.py
View File

@@ -5,8 +5,11 @@ import json
import uuid
import psutil
import shutil
from datetime import datetime, timezone as dt_timezone
from pathlib import Path
from typing import Any
from urllib.parse import quote, urlparse
from zoneinfo import ZoneInfo
import boto3
import requests
@@ -33,12 +36,56 @@ from .extensions import limiter, csrf
from .iam import IamError
from .kms import KMSManager
from .replication import ReplicationManager, ReplicationRule
from .s3_api import _generate_presigned_url
from .secret_store import EphemeralSecretStore
from .storage import ObjectStorage, StorageError
ui_bp = Blueprint("ui", __name__, template_folder="../templates", url_prefix="/ui")
def _convert_to_display_tz(dt: datetime, display_tz: str | None = None) -> datetime:
"""Convert a datetime to the configured display timezone.
Args:
dt: The datetime to convert
display_tz: Optional timezone string. If not provided, reads from current_app.config.
"""
if display_tz is None:
display_tz = current_app.config.get("DISPLAY_TIMEZONE", "UTC")
if display_tz and display_tz != "UTC":
try:
tz = ZoneInfo(display_tz)
if dt.tzinfo is None:
dt = dt.replace(tzinfo=dt_timezone.utc)
dt = dt.astimezone(tz)
except (KeyError, ValueError):
pass
return dt
def _format_datetime_display(dt: datetime, display_tz: str | None = None) -> str:
"""Format a datetime for display using the configured timezone.
Args:
dt: The datetime to format
display_tz: Optional timezone string. If not provided, reads from current_app.config.
"""
dt = _convert_to_display_tz(dt, display_tz)
tz_abbr = dt.strftime("%Z") or "UTC"
return f"{dt.strftime('%b %d, %Y %H:%M')} ({tz_abbr})"
def _format_datetime_iso(dt: datetime, display_tz: str | None = None) -> str:
"""Format a datetime as ISO format using the configured timezone.
Args:
dt: The datetime to format
display_tz: Optional timezone string. If not provided, reads from current_app.config.
"""
dt = _convert_to_display_tz(dt, display_tz)
return dt.isoformat()
def _storage() -> ObjectStorage:
return current_app.extensions["object_storage"]
@@ -62,6 +109,20 @@ def _bucket_policies() -> BucketPolicyStore:
return store
def _build_policy_context() -> dict[str, Any]:
ctx: dict[str, Any] = {}
if request.headers.get("Referer"):
ctx["aws:Referer"] = request.headers.get("Referer")
if request.access_route:
ctx["aws:SourceIp"] = request.access_route[0]
elif request.remote_addr:
ctx["aws:SourceIp"] = request.remote_addr
ctx["aws:SecureTransport"] = str(request.is_secure).lower()
if request.headers.get("User-Agent"):
ctx["aws:UserAgent"] = request.headers.get("User-Agent")
return ctx
def _connections() -> ConnectionStore:
return current_app.extensions["connections"]
@@ -80,6 +141,10 @@ def _acl() -> AclService:
return current_app.extensions["acl"]
def _operation_metrics():
return current_app.extensions.get("operation_metrics")
def _format_bytes(num: int) -> str:
step = 1024
units = ["B", "KB", "MB", "GB", "TB", "PB"]
@@ -93,6 +158,69 @@ def _format_bytes(num: int) -> str:
return f"{value:.1f} PB"
_metrics_last_save_time: float = 0.0
def _get_metrics_history_path() -> Path:
storage_root = Path(current_app.config["STORAGE_ROOT"])
return storage_root / ".myfsio.sys" / "config" / "metrics_history.json"
def _load_metrics_history() -> dict:
path = _get_metrics_history_path()
if not path.exists():
return {"history": []}
try:
return json.loads(path.read_text(encoding="utf-8"))
except (json.JSONDecodeError, OSError):
return {"history": []}
def _save_metrics_snapshot(cpu_percent: float, memory_percent: float, disk_percent: float, storage_bytes: int) -> None:
global _metrics_last_save_time
if not current_app.config.get("METRICS_HISTORY_ENABLED", False):
return
import time
from datetime import datetime, timezone
interval_minutes = current_app.config.get("METRICS_HISTORY_INTERVAL_MINUTES", 5)
now_ts = time.time()
if now_ts - _metrics_last_save_time < interval_minutes * 60:
return
path = _get_metrics_history_path()
path.parent.mkdir(parents=True, exist_ok=True)
data = _load_metrics_history()
history = data.get("history", [])
retention_hours = current_app.config.get("METRICS_HISTORY_RETENTION_HOURS", 24)
now = datetime.now(timezone.utc)
snapshot = {
"timestamp": now.strftime("%Y-%m-%dT%H:%M:%SZ"),
"cpu_percent": round(cpu_percent, 2),
"memory_percent": round(memory_percent, 2),
"disk_percent": round(disk_percent, 2),
"storage_bytes": storage_bytes,
}
history.append(snapshot)
cutoff = now.timestamp() - (retention_hours * 3600)
history = [
h for h in history
if datetime.fromisoformat(h["timestamp"].replace("Z", "+00:00")).timestamp() > cutoff
]
data["history"] = history
try:
path.write_text(json.dumps(data, indent=2), encoding="utf-8")
_metrics_last_save_time = now_ts
except OSError:
pass
def _friendly_error_message(exc: Exception) -> str:
message = str(exc) or "An unexpected error occurred"
if isinstance(exc, IamError):
@@ -172,7 +300,8 @@ def _authorize_ui(principal, bucket_name: str | None, action: str, *, object_key
enforce_bucket_policies = current_app.config.get("UI_ENFORCE_BUCKET_POLICIES", True)
if bucket_name and enforce_bucket_policies:
access_key = principal.access_key if principal else None
decision = _bucket_policies().evaluate(access_key, bucket_name, object_key, action)
policy_context = _build_policy_context()
decision = _bucket_policies().evaluate(access_key, bucket_name, object_key, action, policy_context)
if decision == "deny":
raise IamError("Access denied by bucket policy")
if not iam_allowed and decision != "allow":
@@ -350,6 +479,23 @@ def bucket_detail(bucket_name: str):
can_edit_policy = True
except IamError:
can_edit_policy = False
can_manage_lifecycle = False
if principal:
try:
_iam().authorize(principal, bucket_name, "lifecycle")
can_manage_lifecycle = True
except IamError:
can_manage_lifecycle = False
can_manage_cors = False
if principal:
try:
_iam().authorize(principal, bucket_name, "cors")
can_manage_cors = True
except IamError:
can_manage_cors = False
try:
versioning_enabled = storage.is_versioning_enabled(bucket_name)
except StorageError:
@@ -421,6 +567,8 @@ def bucket_detail(bucket_name: str):
bucket_policy_text=policy_text,
bucket_policy=bucket_policy,
can_edit_policy=can_edit_policy,
can_manage_lifecycle=can_manage_lifecycle,
can_manage_cors=can_manage_cors,
can_manage_versioning=can_manage_versioning,
can_manage_replication=can_manage_replication,
can_manage_encryption=can_manage_encryption,
@@ -477,6 +625,7 @@ def list_bucket_objects(bucket_name: str):
tags_template = url_for("ui.object_tags", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
copy_template = url_for("ui.copy_object", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
move_template = url_for("ui.move_object", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
metadata_template = url_for("ui.object_metadata", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
objects_data = []
for obj in result.objects:
@@ -484,7 +633,8 @@ def list_bucket_objects(bucket_name: str):
"key": obj.key,
"size": obj.size,
"last_modified": obj.last_modified.isoformat(),
"last_modified_display": obj.last_modified.strftime("%b %d, %Y %H:%M"),
"last_modified_display": _format_datetime_display(obj.last_modified),
"last_modified_iso": _format_datetime_iso(obj.last_modified),
"etag": obj.etag,
})
@@ -504,6 +654,7 @@ def list_bucket_objects(bucket_name: str):
"tags": tags_template,
"copy": copy_template,
"move": move_template,
"metadata": metadata_template,
},
})
@@ -537,6 +688,8 @@ def stream_bucket_objects(bucket_name: str):
tags_template = url_for("ui.object_tags", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
copy_template = url_for("ui.copy_object", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
move_template = url_for("ui.move_object", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
metadata_template = url_for("ui.object_metadata", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
display_tz = current_app.config.get("DISPLAY_TIMEZONE", "UTC")
def generate():
meta_line = json.dumps({
@@ -552,6 +705,7 @@ def stream_bucket_objects(bucket_name: str):
"tags": tags_template,
"copy": copy_template,
"move": move_template,
"metadata": metadata_template,
},
}) + "\n"
yield meta_line
@@ -582,7 +736,8 @@ def stream_bucket_objects(bucket_name: str):
"key": obj.key,
"size": obj.size,
"last_modified": obj.last_modified.isoformat(),
"last_modified_display": obj.last_modified.strftime("%b %d, %Y %H:%M"),
"last_modified_display": _format_datetime_display(obj.last_modified, display_tz),
"last_modified_iso": _format_datetime_iso(obj.last_modified, display_tz),
"etag": obj.etag,
}) + "\n"
@@ -985,42 +1140,57 @@ def object_presign(bucket_name: str, object_key: str):
principal = _current_principal()
payload = request.get_json(silent=True) or {}
method = str(payload.get("method", "GET")).upper()
allowed_methods = {"GET", "PUT", "DELETE"}
if method not in allowed_methods:
return jsonify({"error": "Method must be GET, PUT, or DELETE"}), 400
action = "read" if method == "GET" else ("delete" if method == "DELETE" else "write")
try:
_authorize_ui(principal, bucket_name, action, object_key=object_key)
except IamError as exc:
return jsonify({"error": str(exc)}), 403
try:
expires = int(payload.get("expires_in", 900))
except (TypeError, ValueError):
return jsonify({"error": "expires_in must be an integer"}), 400
expires = max(1, min(expires, 7 * 24 * 3600))
storage = _storage()
if not storage.bucket_exists(bucket_name):
return jsonify({"error": "Bucket does not exist"}), 404
if action != "write":
try:
storage.get_object_path(bucket_name, object_key)
except StorageError:
return jsonify({"error": "Object not found"}), 404
secret = _iam().secret_for_key(principal.access_key)
api_base = current_app.config.get("API_BASE_URL") or "http://127.0.0.1:5000"
api_base = api_base.rstrip("/")
encoded_key = quote(object_key, safe="/")
url = f"{api_base}/presign/{bucket_name}/{encoded_key}"
parsed_api = urlparse(api_base)
headers = _api_headers()
headers["X-Forwarded-Host"] = parsed_api.netloc or "127.0.0.1:5000"
headers["X-Forwarded-Proto"] = parsed_api.scheme or "http"
headers["X-Forwarded-For"] = request.remote_addr or "127.0.0.1"
url = _generate_presigned_url(
principal=principal,
secret_key=secret,
method=method,
bucket_name=bucket_name,
object_key=object_key,
expires_in=expires,
api_base_url=api_base,
)
current_app.logger.info(
"Presigned URL generated",
extra={"bucket": bucket_name, "key": object_key, "method": method},
)
return jsonify({"url": url, "method": method, "expires_in": expires})
@ui_bp.get("/buckets/<bucket_name>/objects/<path:object_key>/metadata")
def object_metadata(bucket_name: str, object_key: str):
principal = _current_principal()
storage = _storage()
try:
response = requests.post(url, headers=headers, json=payload, timeout=5)
except requests.RequestException as exc:
return jsonify({"error": f"API unavailable: {exc}"}), 502
try:
body = response.json()
except ValueError:
text = response.text or ""
if text.strip().startswith("<"):
import xml.etree.ElementTree as ET
try:
root = ET.fromstring(text)
message = root.findtext(".//Message") or root.findtext(".//Code") or "Unknown S3 error"
body = {"error": message}
except ET.ParseError:
body = {"error": text or "API returned an empty response"}
else:
body = {"error": text or "API returned an empty response"}
return jsonify(body), response.status_code
_authorize_ui(principal, bucket_name, "read", object_key=object_key)
metadata = storage.get_object_metadata(bucket_name, object_key)
return jsonify({"metadata": metadata})
except IamError as exc:
return jsonify({"error": str(exc)}), 403
except StorageError as exc:
return jsonify({"error": str(exc)}), 404
@ui_bp.get("/buckets/<bucket_name>/objects/<path:object_key>/versions")
@@ -2007,18 +2177,18 @@ def metrics_dashboard():
return render_template(
"metrics.html",
principal=principal,
cpu_percent=cpu_percent,
cpu_percent=round(cpu_percent, 2),
memory={
"total": _format_bytes(memory.total),
"available": _format_bytes(memory.available),
"used": _format_bytes(memory.used),
"percent": memory.percent,
"percent": round(memory.percent, 2),
},
disk={
"total": _format_bytes(disk.total),
"free": _format_bytes(disk.free),
"used": _format_bytes(disk.used),
"percent": disk.percent,
"percent": round(disk.percent, 2),
},
app={
"buckets": total_buckets,
@@ -2028,7 +2198,9 @@ def metrics_dashboard():
"storage_raw": total_bytes_used,
"version": APP_VERSION,
"uptime_days": uptime_days,
}
},
metrics_history_enabled=current_app.config.get("METRICS_HISTORY_ENABLED", False),
operation_metrics_enabled=current_app.config.get("OPERATION_METRICS_ENABLED", False),
)
@@ -2068,19 +2240,21 @@ def metrics_api():
uptime_seconds = time.time() - boot_time
uptime_days = int(uptime_seconds / 86400)
_save_metrics_snapshot(cpu_percent, memory.percent, disk.percent, total_bytes_used)
return jsonify({
"cpu_percent": cpu_percent,
"cpu_percent": round(cpu_percent, 2),
"memory": {
"total": _format_bytes(memory.total),
"available": _format_bytes(memory.available),
"used": _format_bytes(memory.used),
"percent": memory.percent,
"percent": round(memory.percent, 2),
},
"disk": {
"total": _format_bytes(disk.total),
"free": _format_bytes(disk.free),
"used": _format_bytes(disk.used),
"percent": disk.percent,
"percent": round(disk.percent, 2),
},
"app": {
"buckets": total_buckets,
@@ -2093,11 +2267,124 @@ def metrics_api():
})
@ui_bp.route("/metrics/history")
def metrics_history():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:list_users")
except IamError:
return jsonify({"error": "Access denied"}), 403
if not current_app.config.get("METRICS_HISTORY_ENABLED", False):
return jsonify({"enabled": False, "history": []})
hours = request.args.get("hours", type=int)
if hours is None:
hours = current_app.config.get("METRICS_HISTORY_RETENTION_HOURS", 24)
data = _load_metrics_history()
history = data.get("history", [])
if hours:
from datetime import datetime, timezone
cutoff = datetime.now(timezone.utc).timestamp() - (hours * 3600)
history = [
h for h in history
if datetime.fromisoformat(h["timestamp"].replace("Z", "+00:00")).timestamp() > cutoff
]
return jsonify({
"enabled": True,
"retention_hours": current_app.config.get("METRICS_HISTORY_RETENTION_HOURS", 24),
"interval_minutes": current_app.config.get("METRICS_HISTORY_INTERVAL_MINUTES", 5),
"history": history,
})
@ui_bp.route("/metrics/settings", methods=["GET", "PUT"])
def metrics_settings():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:list_users")
except IamError:
return jsonify({"error": "Access denied"}), 403
if request.method == "GET":
return jsonify({
"enabled": current_app.config.get("METRICS_HISTORY_ENABLED", False),
"retention_hours": current_app.config.get("METRICS_HISTORY_RETENTION_HOURS", 24),
"interval_minutes": current_app.config.get("METRICS_HISTORY_INTERVAL_MINUTES", 5),
})
data = request.get_json() or {}
if "enabled" in data:
current_app.config["METRICS_HISTORY_ENABLED"] = bool(data["enabled"])
if "retention_hours" in data:
current_app.config["METRICS_HISTORY_RETENTION_HOURS"] = max(1, int(data["retention_hours"]))
if "interval_minutes" in data:
current_app.config["METRICS_HISTORY_INTERVAL_MINUTES"] = max(1, int(data["interval_minutes"]))
return jsonify({
"enabled": current_app.config.get("METRICS_HISTORY_ENABLED", False),
"retention_hours": current_app.config.get("METRICS_HISTORY_RETENTION_HOURS", 24),
"interval_minutes": current_app.config.get("METRICS_HISTORY_INTERVAL_MINUTES", 5),
})
@ui_bp.get("/metrics/operations")
def metrics_operations():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:list_users")
except IamError:
return jsonify({"error": "Access denied"}), 403
collector = _operation_metrics()
if not collector:
return jsonify({
"enabled": False,
"stats": None,
})
return jsonify({
"enabled": True,
"stats": collector.get_current_stats(),
})
@ui_bp.get("/metrics/operations/history")
def metrics_operations_history():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:list_users")
except IamError:
return jsonify({"error": "Access denied"}), 403
collector = _operation_metrics()
if not collector:
return jsonify({
"enabled": False,
"history": [],
})
hours = request.args.get("hours", type=int)
return jsonify({
"enabled": True,
"history": collector.get_history(hours),
"interval_minutes": current_app.config.get("OPERATION_METRICS_INTERVAL_MINUTES", 5),
})
@ui_bp.route("/buckets/<bucket_name>/lifecycle", methods=["GET", "POST", "DELETE"])
def bucket_lifecycle(bucket_name: str):
principal = _current_principal()
try:
_authorize_ui(principal, bucket_name, "policy")
_authorize_ui(principal, bucket_name, "lifecycle")
except IamError as exc:
return jsonify({"error": str(exc)}), 403
@@ -2150,7 +2437,7 @@ def bucket_lifecycle(bucket_name: str):
def get_lifecycle_history(bucket_name: str):
principal = _current_principal()
try:
_authorize_ui(principal, bucket_name, "policy")
_authorize_ui(principal, bucket_name, "lifecycle")
except IamError:
return jsonify({"error": "Access denied"}), 403
@@ -2181,7 +2468,7 @@ def get_lifecycle_history(bucket_name: str):
def bucket_cors(bucket_name: str):
principal = _current_principal()
try:
_authorize_ui(principal, bucket_name, "policy")
_authorize_ui(principal, bucket_name, "cors")
except IamError as exc:
return jsonify({"error": str(exc)}), 403

View File

@@ -1,6 +1,6 @@
from __future__ import annotations
APP_VERSION = "0.2.1"
APP_VERSION = "0.2.2"
def get_version() -> str:

346
docs.md
View File

@@ -122,7 +122,7 @@ With these volumes attached you can rebuild/restart the container without losing
### Versioning
The repo now tracks a human-friendly release string inside `app/version.py` (see the `APP_VERSION` constant). Edit that value whenever you cut a release. The constant flows into Flask as `APP_VERSION` and is exposed via `GET /healthz`, so you can monitor deployments or surface it in UIs.
The repo now tracks a human-friendly release string inside `app/version.py` (see the `APP_VERSION` constant). Edit that value whenever you cut a release. The constant flows into Flask as `APP_VERSION` and is exposed via `GET /myfsio/health`, so you can monitor deployments or surface it in UIs.
## 3. Configuration Reference
@@ -277,14 +277,14 @@ The application automatically trusts these headers to generate correct presigned
### Version Checking
The application version is tracked in `app/version.py` and exposed via:
- **Health endpoint:** `GET /healthz` returns JSON with `version` field
- **Health endpoint:** `GET /myfsio/health` returns JSON with `version` field
- **Metrics dashboard:** Navigate to `/ui/metrics` to see the running version in the System Status card
To check your current version:
```bash
# API health endpoint
curl http://localhost:5000/healthz
curl http://localhost:5000/myfsio/health
# Or inspect version.py directly
cat app/version.py | grep APP_VERSION
@@ -377,7 +377,7 @@ docker run -d \
myfsio:latest
# 5. Verify health
curl http://localhost:5000/healthz
curl http://localhost:5000/myfsio/health
```
### Version Compatibility Checks
@@ -502,7 +502,7 @@ docker run -d \
myfsio:0.1.3 # specify previous version tag
# 3. Verify
curl http://localhost:5000/healthz
curl http://localhost:5000/myfsio/health
```
#### Emergency Config Restore
@@ -528,7 +528,7 @@ For production environments requiring zero downtime:
APP_PORT=5001 UI_PORT=5101 python run.py &
# 2. Health check new instance
curl http://localhost:5001/healthz
curl http://localhost:5001/myfsio/health
# 3. Update load balancer to route to new ports
@@ -544,7 +544,7 @@ After any update, verify functionality:
```bash
# 1. Health check
curl http://localhost:5000/healthz
curl http://localhost:5000/myfsio/health
# 2. Login to UI
open http://localhost:5100/ui
@@ -588,7 +588,7 @@ APP_PID=$!
# Wait and health check
sleep 5
if curl -f http://localhost:5000/healthz; then
if curl -f http://localhost:5000/myfsio/health; then
echo "Update successful!"
else
echo "Health check failed, rolling back..."
@@ -602,6 +602,10 @@ fi
## 4. Authentication & IAM
MyFSIO implements a comprehensive Identity and Access Management (IAM) system that controls who can access your buckets and what operations they can perform. The system supports both simple action-based permissions and AWS-compatible policy syntax.
### Getting Started
1. On first boot, `data/.myfsio.sys/config/iam.json` is seeded with `localadmin / localadmin` that has wildcard access.
2. Sign into the UI using those credentials, then open **IAM**:
- **Create user**: supply a display name and optional JSON inline policy array.
@@ -609,48 +613,241 @@ fi
- **Policy editor**: select a user, paste an array of objects (`{"bucket": "*", "actions": ["list", "read"]}`), and submit. Alias support includes AWS-style verbs (e.g., `s3:GetObject`).
3. Wildcard action `iam:*` is supported for admin user definitions.
The API expects every request to include `X-Access-Key` and `X-Secret-Key` headers. The UI persists them in the Flask session after login.
### Authentication
The API expects every request to include authentication headers. The UI persists them in the Flask session after login.
| Header | Description |
| --- | --- |
| `X-Access-Key` | The user's access key identifier |
| `X-Secret-Key` | The user's secret key for signing |
**Security Features:**
- **Lockout Protection**: After `AUTH_MAX_ATTEMPTS` (default: 5) failed login attempts, the account is locked for `AUTH_LOCKOUT_MINUTES` (default: 15 minutes).
- **Session Management**: UI sessions remain valid for `SESSION_LIFETIME_DAYS` (default: 30 days).
- **Hot Reload**: IAM configuration changes take effect immediately without restart.
### Permission Model
MyFSIO uses a two-layer permission model:
1. **IAM User Policies** Define what a user can do across the system (stored in `iam.json`)
2. **Bucket Policies** Define who can access a specific bucket (stored in `bucket_policies.json`)
Both layers are evaluated for each request. A user must have permission in their IAM policy AND the bucket policy must allow the action (or have no explicit deny).
### Available IAM Actions
#### S3 Actions (Bucket/Object Operations)
| Action | Description | AWS Aliases |
| --- | --- | --- |
| `list` | List buckets and objects | `s3:ListBucket`, `s3:ListAllMyBuckets`, `s3:ListBucketVersions`, `s3:ListMultipartUploads`, `s3:ListParts` |
| `read` | Download objects | `s3:GetObject`, `s3:GetObjectVersion`, `s3:GetObjectTagging`, `s3:HeadObject`, `s3:HeadBucket` |
| `write` | Upload objects, create buckets | `s3:PutObject`, `s3:CreateBucket`, `s3:CreateMultipartUpload`, `s3:UploadPart`, `s3:CompleteMultipartUpload`, `s3:AbortMultipartUpload`, `s3:CopyObject` |
| `delete` | Remove objects and buckets | `s3:DeleteObject`, `s3:DeleteObjectVersion`, `s3:DeleteBucket` |
| `share` | Manage ACLs | `s3:PutObjectAcl`, `s3:PutBucketAcl`, `s3:GetBucketAcl` |
| `read` | Download objects, get metadata | `s3:GetObject`, `s3:GetObjectVersion`, `s3:GetObjectTagging`, `s3:GetObjectVersionTagging`, `s3:GetObjectAcl`, `s3:GetBucketVersioning`, `s3:HeadObject`, `s3:HeadBucket` |
| `write` | Upload objects, create buckets, manage tags | `s3:PutObject`, `s3:CreateBucket`, `s3:PutObjectTagging`, `s3:PutBucketVersioning`, `s3:CreateMultipartUpload`, `s3:UploadPart`, `s3:CompleteMultipartUpload`, `s3:AbortMultipartUpload`, `s3:CopyObject` |
| `delete` | Remove objects, versions, and buckets | `s3:DeleteObject`, `s3:DeleteObjectVersion`, `s3:DeleteBucket`, `s3:DeleteObjectTagging` |
| `share` | Manage Access Control Lists (ACLs) | `s3:PutObjectAcl`, `s3:PutBucketAcl`, `s3:GetBucketAcl` |
| `policy` | Manage bucket policies | `s3:PutBucketPolicy`, `s3:GetBucketPolicy`, `s3:DeleteBucketPolicy` |
| `replication` | Configure and manage replication | `s3:GetReplicationConfiguration`, `s3:PutReplicationConfiguration`, `s3:ReplicateObject`, `s3:ReplicateTags`, `s3:ReplicateDelete` |
| `iam:list_users` | View IAM users | `iam:ListUsers` |
| `iam:create_user` | Create IAM users | `iam:CreateUser` |
| `lifecycle` | Manage lifecycle rules | `s3:GetLifecycleConfiguration`, `s3:PutLifecycleConfiguration`, `s3:DeleteLifecycleConfiguration`, `s3:GetBucketLifecycle`, `s3:PutBucketLifecycle` |
| `cors` | Manage CORS configuration | `s3:GetBucketCors`, `s3:PutBucketCors`, `s3:DeleteBucketCors` |
| `replication` | Configure and manage replication | `s3:GetReplicationConfiguration`, `s3:PutReplicationConfiguration`, `s3:DeleteReplicationConfiguration`, `s3:ReplicateObject`, `s3:ReplicateTags`, `s3:ReplicateDelete` |
#### IAM Actions (User Management)
| Action | Description | AWS Aliases |
| --- | --- | --- |
| `iam:list_users` | View all IAM users and their policies | `iam:ListUsers` |
| `iam:create_user` | Create new IAM users | `iam:CreateUser` |
| `iam:delete_user` | Delete IAM users | `iam:DeleteUser` |
| `iam:rotate_key` | Rotate user secrets | `iam:RotateAccessKey` |
| `iam:rotate_key` | Rotate user secret keys | `iam:RotateAccessKey` |
| `iam:update_policy` | Modify user policies | `iam:PutUserPolicy` |
| `iam:*` | All IAM actions (admin wildcard) | — |
| `iam:*` | **Admin wildcard** grants all IAM actions | — |
### Example Policies
#### Wildcards
| Wildcard | Scope | Description |
| --- | --- | --- |
| `*` (in actions) | All S3 actions | Grants `list`, `read`, `write`, `delete`, `share`, `policy`, `lifecycle`, `cors`, `replication` |
| `iam:*` | All IAM actions | Grants all `iam:*` actions for user management |
| `*` (in bucket) | All buckets | Policy applies to every bucket |
### IAM Policy Structure
User policies are stored as a JSON array of policy objects. Each object specifies a bucket and the allowed actions:
**Full Control (admin):**
```json
[{"bucket": "*", "actions": ["list", "read", "write", "delete", "share", "policy", "replication", "iam:*"]}]
[
{
"bucket": "<bucket-name-or-wildcard>",
"actions": ["<action1>", "<action2>", ...]
}
]
```
**Read-Only:**
**Fields:**
- `bucket`: The bucket name (case-insensitive) or `*` for all buckets
- `actions`: Array of action strings (simple names or AWS aliases)
### Example User Policies
**Full Administrator (complete system access):**
```json
[{"bucket": "*", "actions": ["list", "read", "write", "delete", "share", "policy", "lifecycle", "cors", "replication", "iam:*"]}]
```
**Read-Only User (browse and download only):**
```json
[{"bucket": "*", "actions": ["list", "read"]}]
```
**Single Bucket Access (no listing other buckets):**
**Single Bucket Full Access (no access to other buckets):**
```json
[{"bucket": "user-bucket", "actions": ["read", "write", "delete"]}]
[{"bucket": "user-bucket", "actions": ["list", "read", "write", "delete"]}]
```
**Bucket Access with Replication:**
**Multiple Bucket Access (different permissions per bucket):**
```json
[{"bucket": "my-bucket", "actions": ["list", "read", "write", "delete", "replication"]}]
[
{"bucket": "public-data", "actions": ["list", "read"]},
{"bucket": "my-uploads", "actions": ["list", "read", "write", "delete"]},
{"bucket": "team-shared", "actions": ["list", "read", "write"]}
]
```
**IAM Manager (manage users but no data access):**
```json
[{"bucket": "*", "actions": ["iam:list_users", "iam:create_user", "iam:delete_user", "iam:rotate_key", "iam:update_policy"]}]
```
**Replication Operator (manage replication only):**
```json
[{"bucket": "*", "actions": ["list", "read", "replication"]}]
```
**Lifecycle Manager (configure object expiration):**
```json
[{"bucket": "*", "actions": ["list", "lifecycle"]}]
```
**CORS Administrator (configure cross-origin access):**
```json
[{"bucket": "*", "actions": ["cors"]}]
```
**Bucket Administrator (full bucket config, no IAM access):**
```json
[{"bucket": "my-bucket", "actions": ["list", "read", "write", "delete", "policy", "lifecycle", "cors"]}]
```
**Upload-Only User (write but cannot read back):**
```json
[{"bucket": "drop-box", "actions": ["write"]}]
```
**Backup Operator (read, list, and replicate):**
```json
[{"bucket": "*", "actions": ["list", "read", "replication"]}]
```
### Using AWS-Style Action Names
You can use AWS S3 action names instead of simple names. They are automatically normalized:
```json
[
{
"bucket": "my-bucket",
"actions": [
"s3:ListBucket",
"s3:GetObject",
"s3:PutObject",
"s3:DeleteObject"
]
}
]
```
This is equivalent to:
```json
[{"bucket": "my-bucket", "actions": ["list", "read", "write", "delete"]}]
```
### Managing Users via API
```bash
# List all users (requires iam:list_users)
curl http://localhost:5000/iam/users \
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
# Create a new user (requires iam:create_user)
curl -X POST http://localhost:5000/iam/users \
-H "Content-Type: application/json" \
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
-d '{
"display_name": "New User",
"policies": [{"bucket": "*", "actions": ["list", "read"]}]
}'
# Rotate user secret (requires iam:rotate_key)
curl -X POST http://localhost:5000/iam/users/<access-key>/rotate \
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
# Update user policies (requires iam:update_policy)
curl -X PUT http://localhost:5000/iam/users/<access-key>/policies \
-H "Content-Type: application/json" \
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
-d '[{"bucket": "*", "actions": ["list", "read", "write"]}]'
# Delete a user (requires iam:delete_user)
curl -X DELETE http://localhost:5000/iam/users/<access-key> \
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
```
### Permission Precedence
When a request is made, permissions are evaluated in this order:
1. **Authentication** Verify the access key and secret key are valid
2. **Lockout Check** Ensure the account is not locked due to failed attempts
3. **IAM Policy Check** Verify the user has the required action for the target bucket
4. **Bucket Policy Check** If a bucket policy exists, verify it allows the action
A request is allowed only if:
- The IAM policy grants the action, AND
- The bucket policy allows the action (or no bucket policy exists)
### Common Permission Scenarios
| Scenario | Required Actions |
| --- | --- |
| Browse bucket contents | `list` |
| Download a file | `read` |
| Upload a file | `write` |
| Delete a file | `delete` |
| Generate presigned URL (GET) | `read` |
| Generate presigned URL (PUT) | `write` |
| Generate presigned URL (DELETE) | `delete` |
| Enable versioning | `write` (includes `s3:PutBucketVersioning`) |
| View bucket policy | `policy` |
| Modify bucket policy | `policy` |
| Configure lifecycle rules | `lifecycle` |
| View lifecycle rules | `lifecycle` |
| Configure CORS | `cors` |
| View CORS rules | `cors` |
| Configure replication | `replication` (admin-only for creation) |
| Pause/resume replication | `replication` |
| Manage other users | `iam:*` or specific `iam:` actions |
| Set bucket quotas | `iam:*` or `iam:list_users` (admin feature) |
### Security Best Practices
1. **Principle of Least Privilege** Grant only the permissions users need
2. **Avoid Wildcards** Use specific bucket names instead of `*` when possible
3. **Rotate Secrets Regularly** Use the rotate key feature periodically
4. **Separate Admin Accounts** Don't use admin accounts for daily operations
5. **Monitor Failed Logins** Check logs for repeated authentication failures
6. **Use Bucket Policies for Fine-Grained Control** Combine with IAM for defense in depth
## 5. Bucket Policies & Presets
- **Storage**: Policies are persisted in `data/.myfsio.sys/config/bucket_policies.json` under `{"policies": {"bucket": {...}}}`.
@@ -663,7 +860,7 @@ The API expects every request to include `X-Access-Key` and `X-Secret-Key` heade
### Editing via CLI
```bash
curl -X PUT http://127.0.0.1:5000/bucket-policy/test \
curl -X PUT "http://127.0.0.1:5000/test?policy" \
-H "Content-Type: application/json" \
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
-d '{
@@ -726,9 +923,8 @@ Drag files directly onto the objects table to upload them to the current bucket
## 6. Presigned URLs
- Trigger from the UI using the **Presign** button after selecting an object.
- Or call `POST /presign/<bucket>/<key>` with JSON `{ "method": "GET", "expires_in": 900 }`.
- Supported methods: `GET`, `PUT`, `DELETE`; expiration must be `1..604800` seconds.
- The service signs requests using the callers IAM credentials and enforces bucket policies both when issuing and when the presigned URL is used.
- The service signs requests using the caller's IAM credentials and enforces bucket policies both when issuing and when the presigned URL is used.
- Legacy share links have been removed; presigned URLs now handle both private and public workflows.
### Multipart Upload Example
@@ -951,7 +1147,84 @@ curl -X PUT "http://localhost:5000/bucket/<bucket>?quota" \
</Error>
```
## 9. Site Replication
## 9. Operation Metrics
Operation metrics provide real-time visibility into API request statistics, including request counts, latency, error rates, and bandwidth usage.
### Enabling Operation Metrics
By default, operation metrics are disabled. Enable by setting the environment variable:
```bash
OPERATION_METRICS_ENABLED=true python run.py
```
Or in your `myfsio.env` file:
```
OPERATION_METRICS_ENABLED=true
OPERATION_METRICS_INTERVAL_MINUTES=5
OPERATION_METRICS_RETENTION_HOURS=24
```
### Configuration Options
| Variable | Default | Description |
|----------|---------|-------------|
| `OPERATION_METRICS_ENABLED` | `false` | Enable/disable operation metrics |
| `OPERATION_METRICS_INTERVAL_MINUTES` | `5` | Snapshot interval (minutes) |
| `OPERATION_METRICS_RETENTION_HOURS` | `24` | History retention period (hours) |
### What's Tracked
**Request Statistics:**
- Request counts by HTTP method (GET, PUT, POST, DELETE, HEAD, OPTIONS)
- Response status codes grouped by class (2xx, 3xx, 4xx, 5xx)
- Latency statistics (min, max, average)
- Bytes transferred in/out
**Endpoint Breakdown:**
- `object` - Object operations (GET/PUT/DELETE objects)
- `bucket` - Bucket operations (list, create, delete buckets)
- `ui` - Web UI requests
- `service` - Health checks, internal endpoints
- `kms` - KMS API operations
**S3 Error Codes:**
Tracks API-specific error codes like `NoSuchKey`, `AccessDenied`, `BucketNotFound`. Note: These are separate from HTTP status codes - a 404 from the UI won't appear here, only S3 API errors.
### API Endpoints
```bash
# Get current operation metrics
curl http://localhost:5100/ui/metrics/operations \
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
# Get operation metrics history
curl http://localhost:5100/ui/metrics/operations/history \
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
# Filter history by time range
curl "http://localhost:5100/ui/metrics/operations/history?hours=6" \
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
```
### Storage Location
Operation metrics data is stored at:
```
data/.myfsio.sys/config/operation_metrics.json
```
### UI Dashboard
When enabled, the Metrics page (`/ui/metrics`) shows an "API Operations" section with:
- Summary cards: Requests, Success Rate, Errors, Latency, Bytes In, Bytes Out
- Charts: Requests by Method (doughnut), Requests by Status (bar), Requests by Endpoint (horizontal bar)
- S3 Error Codes table with distribution
Data refreshes every 5 seconds.
## 10. Site Replication
### Permission Model
@@ -1088,7 +1361,7 @@ To set up two-way replication (Server A ↔ Server B):
**Note**: Deleting a bucket will automatically remove its associated replication configuration.
## 11. Running Tests
## 12. Running Tests
```bash
pytest -q
@@ -1098,7 +1371,7 @@ The suite now includes a boto3 integration test that spins up a live HTTP server
The suite covers bucket CRUD, presigned downloads, bucket policy enforcement, and regression tests for anonymous reads when a Public policy is attached.
## 12. Troubleshooting
## 13. Troubleshooting
| Symptom | Likely Cause | Fix |
| --- | --- | --- |
@@ -1107,7 +1380,7 @@ The suite covers bucket CRUD, presigned downloads, bucket policy enforcement, an
| Presign modal errors with 403 | IAM user lacks `read/write/delete` for target bucket or bucket policy denies | Update IAM inline policies or remove conflicting deny statements. |
| Large upload rejected immediately | File exceeds `MAX_UPLOAD_SIZE` | Increase env var or shrink object. |
## 13. API Matrix
## 14. API Matrix
```
GET / # List buckets
@@ -1117,10 +1390,9 @@ GET /<bucket> # List objects
PUT /<bucket>/<key> # Upload object
GET /<bucket>/<key> # Download object
DELETE /<bucket>/<key> # Delete object
POST /presign/<bucket>/<key> # Generate SigV4 URL
GET /bucket-policy/<bucket> # Fetch policy
PUT /bucket-policy/<bucket> # Upsert policy
DELETE /bucket-policy/<bucket> # Delete policy
GET /<bucket>?policy # Fetch policy
PUT /<bucket>?policy # Upsert policy
DELETE /<bucket>?policy # Delete policy
GET /<bucket>?quota # Get bucket quota
PUT /<bucket>?quota # Set bucket quota (admin only)
```

View File

@@ -8,4 +8,5 @@ requests>=2.32.5
boto3>=1.42.14
waitress>=3.0.2
psutil>=7.1.3
cryptography>=46.0.3
cryptography>=46.0.3
defusedxml>=0.7.1

View File

@@ -1,4 +1,4 @@
(function() {
(function () {
'use strict';
const { formatBytes, escapeHtml, fallbackCopy, setupJsonAutoIndent } = window.BucketDetailUtils || {
@@ -23,11 +23,62 @@
.replace(/'/g, '&#039;');
},
fallbackCopy: () => false,
setupJsonAutoIndent: () => {}
setupJsonAutoIndent: () => { }
};
setupJsonAutoIndent(document.getElementById('policyDocument'));
const getFileTypeIcon = (key) => {
const ext = (key.split('.').pop() || '').toLowerCase();
const iconMap = {
image: ['jpg', 'jpeg', 'png', 'gif', 'svg', 'webp', 'ico', 'bmp', 'tiff', 'tif'],
document: ['pdf', 'doc', 'docx', 'txt', 'rtf', 'odt', 'pages'],
spreadsheet: ['xls', 'xlsx', 'csv', 'ods', 'numbers'],
archive: ['zip', 'rar', '7z', 'tar', 'gz', 'bz2', 'xz', 'tgz'],
code: ['js', 'ts', 'jsx', 'tsx', 'py', 'java', 'cpp', 'c', 'h', 'hpp', 'cs', 'go', 'rs', 'rb', 'php', 'html', 'htm', 'css', 'scss', 'sass', 'less', 'json', 'xml', 'yaml', 'yml', 'md', 'sh', 'bat', 'ps1', 'sql'],
audio: ['mp3', 'wav', 'flac', 'ogg', 'aac', 'm4a', 'wma', 'aiff'],
video: ['mp4', 'avi', 'mov', 'mkv', 'webm', 'wmv', 'flv', 'm4v', 'mpeg', 'mpg'],
};
const icons = {
image: `<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-success flex-shrink-0" viewBox="0 0 16 16">
<path d="M6.002 5.5a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0z"/>
<path d="M2.002 1a2 2 0 0 0-2 2v10a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V3a2 2 0 0 0-2-2h-12zm12 1a1 1 0 0 1 1 1v6.5l-3.777-1.947a.5.5 0 0 0-.577.093l-3.71 3.71-2.66-1.772a.5.5 0 0 0-.63.062L1.002 12V3a1 1 0 0 1 1-1h12z"/>
</svg>`,
document: `<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-danger flex-shrink-0" viewBox="0 0 16 16">
<path d="M14 4.5V14a2 2 0 0 1-2 2H4a2 2 0 0 1-2-2V2a2 2 0 0 1 2-2h5.5L14 4.5zm-3 0A1.5 1.5 0 0 1 9.5 3V1H4a1 1 0 0 0-1 1v12a1 1 0 0 0 1 1h8a1 1 0 0 0 1-1V4.5h-2z"/>
<path d="M4.5 12.5A.5.5 0 0 1 5 12h3a.5.5 0 0 1 0 1H5a.5.5 0 0 1-.5-.5zm0-2A.5.5 0 0 1 5 10h6a.5.5 0 0 1 0 1H5a.5.5 0 0 1-.5-.5zm0-2A.5.5 0 0 1 5 8h6a.5.5 0 0 1 0 1H5a.5.5 0 0 1-.5-.5zm0-2A.5.5 0 0 1 5 6h6a.5.5 0 0 1 0 1H5a.5.5 0 0 1-.5-.5z"/>
</svg>`,
spreadsheet: `<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-success flex-shrink-0" viewBox="0 0 16 16">
<path d="M14 14V4.5L9.5 0H4a2 2 0 0 0-2 2v12a2 2 0 0 0 2 2h8a2 2 0 0 0 2-2zM9.5 3A1.5 1.5 0 0 0 11 4.5h2V9H3V2a1 1 0 0 1 1-1h5.5v2zM3 12v-2h2v2H3zm0 1h2v2H4a1 1 0 0 1-1-1v-1zm3 2v-2h3v2H6zm4 0v-2h3v1a1 1 0 0 1-1 1h-2zm3-3h-3v-2h3v2zm-7 0v-2h3v2H6z"/>
</svg>`,
archive: `<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-secondary flex-shrink-0" viewBox="0 0 16 16">
<path d="M6.5 7.5a1 1 0 0 1 1-1h1a1 1 0 0 1 1 1v.938l.4 1.599a1 1 0 0 1-.416 1.074l-.93.62a1 1 0 0 1-1.109 0l-.93-.62a1 1 0 0 1-.415-1.074l.4-1.599V7.5z"/>
<path d="M14 4.5V14a2 2 0 0 1-2 2H4a2 2 0 0 1-2-2V2a2 2 0 0 1 2-2h5.5L14 4.5zm-3 0A1.5 1.5 0 0 1 9.5 3V1h-2v1h-1v1h1v1h-1v1h1v1H6V5H5V4h1V3H5V2h1V1H4a1 1 0 0 0-1 1v12a1 1 0 0 0 1 1h8a1 1 0 0 0 1-1V4.5h-2z"/>
</svg>`,
code: `<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-info flex-shrink-0" viewBox="0 0 16 16">
<path d="M14 4.5V14a2 2 0 0 1-2 2H4a2 2 0 0 1-2-2V2a2 2 0 0 1 2-2h5.5L14 4.5zm-3 0A1.5 1.5 0 0 1 9.5 3V1H4a1 1 0 0 0-1 1v12a1 1 0 0 0 1 1h8a1 1 0 0 0 1-1V4.5h-2z"/>
<path d="M8.646 6.646a.5.5 0 0 1 .708 0l2 2a.5.5 0 0 1 0 .708l-2 2a.5.5 0 0 1-.708-.708L10.293 9 8.646 7.354a.5.5 0 0 1 0-.708zm-1.292 0a.5.5 0 0 0-.708 0l-2 2a.5.5 0 0 0 0 .708l2 2a.5.5 0 0 0 .708-.708L5.707 9l1.647-1.646a.5.5 0 0 0 0-.708z"/>
</svg>`,
audio: `<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-primary flex-shrink-0" viewBox="0 0 16 16">
<path d="M6 13c0 1.105-1.12 2-2.5 2S1 14.105 1 13c0-1.104 1.12-2 2.5-2s2.5.896 2.5 2zm9-2c0 1.105-1.12 2-2.5 2s-2.5-.895-2.5-2 1.12-2 2.5-2 2.5.895 2.5 2z"/>
<path fill-rule="evenodd" d="M14 11V2h1v9h-1zM6 3v10H5V3h1z"/>
<path d="M5 2.905a1 1 0 0 1 .9-.995l8-.8a1 1 0 0 1 1.1.995V3L5 4V2.905z"/>
</svg>`,
video: `<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-danger flex-shrink-0" viewBox="0 0 16 16">
<path d="M0 12V4a2 2 0 0 1 2-2h12a2 2 0 0 1 2 2v8a2 2 0 0 1-2 2H2a2 2 0 0 1-2-2zm6.79-6.907A.5.5 0 0 0 6 5.5v5a.5.5 0 0 0 .79.407l3.5-2.5a.5.5 0 0 0 0-.814l-3.5-2.5z"/>
</svg>`,
default: `<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-muted flex-shrink-0" viewBox="0 0 16 16">
<path d="M14 4.5V14a2 2 0 0 1-2 2H4a2 2 0 0 1-2-2V2a2 2 0 0 1 2-2h5.5L14 4.5zm-3 0A1.5 1.5 0 0 1 9.5 3V1H4a1 1 0 0 0-1 1v12a1 1 0 0 0 1 1h8a1 1 0 0 0 1-1V4.5h-2z"/>
</svg>`,
};
for (const [type, extensions] of Object.entries(iconMap)) {
if (extensions.includes(ext)) {
return icons[type];
}
}
return icons.default;
};
const selectAllCheckbox = document.querySelector('[data-select-all]');
const bulkDeleteButton = document.querySelector('[data-bulk-delete-trigger]');
const bulkDeleteLabel = bulkDeleteButton?.querySelector('[data-bulk-delete-label]');
@@ -49,6 +100,7 @@
const previewPlaceholder = document.getElementById('preview-placeholder');
const previewImage = document.getElementById('preview-image');
const previewVideo = document.getElementById('preview-video');
const previewAudio = document.getElementById('preview-audio');
const previewIframe = document.getElementById('preview-iframe');
const downloadButton = document.getElementById('downloadButton');
const presignButton = document.getElementById('presignButton');
@@ -135,18 +187,20 @@
tr.dataset.objectRow = '';
tr.dataset.key = obj.key;
tr.dataset.size = obj.size;
tr.dataset.lastModified = obj.lastModified || obj.last_modified;
tr.dataset.etag = obj.etag;
tr.dataset.previewUrl = obj.previewUrl || obj.preview_url;
tr.dataset.downloadUrl = obj.downloadUrl || obj.download_url;
tr.dataset.presignEndpoint = obj.presignEndpoint || obj.presign_endpoint;
tr.dataset.deleteEndpoint = obj.deleteEndpoint || obj.delete_endpoint;
tr.dataset.metadata = typeof obj.metadata === 'string' ? obj.metadata : JSON.stringify(obj.metadata || {});
tr.dataset.versionsEndpoint = obj.versionsEndpoint || obj.versions_endpoint;
tr.dataset.restoreTemplate = obj.restoreTemplate || obj.restore_template;
tr.dataset.tagsUrl = obj.tagsUrl || obj.tags_url;
tr.dataset.copyUrl = obj.copyUrl || obj.copy_url;
tr.dataset.moveUrl = obj.moveUrl || obj.move_url;
tr.dataset.lastModified = obj.lastModified ?? obj.last_modified ?? '';
tr.dataset.lastModifiedDisplay = obj.lastModifiedDisplay ?? obj.last_modified_display ?? new Date(obj.lastModified || obj.last_modified).toLocaleString();
tr.dataset.lastModifiedIso = obj.lastModifiedIso ?? obj.last_modified_iso ?? obj.lastModified ?? obj.last_modified ?? '';
tr.dataset.etag = obj.etag ?? '';
tr.dataset.previewUrl = obj.previewUrl ?? obj.preview_url ?? '';
tr.dataset.downloadUrl = obj.downloadUrl ?? obj.download_url ?? '';
tr.dataset.presignEndpoint = obj.presignEndpoint ?? obj.presign_endpoint ?? '';
tr.dataset.deleteEndpoint = obj.deleteEndpoint ?? obj.delete_endpoint ?? '';
tr.dataset.metadataUrl = obj.metadataUrl ?? obj.metadata_url ?? '';
tr.dataset.versionsEndpoint = obj.versionsEndpoint ?? obj.versions_endpoint ?? '';
tr.dataset.restoreTemplate = obj.restoreTemplate ?? obj.restore_template ?? '';
tr.dataset.tagsUrl = obj.tagsUrl ?? obj.tags_url ?? '';
tr.dataset.copyUrl = obj.copyUrl ?? obj.copy_url ?? '';
tr.dataset.moveUrl = obj.moveUrl ?? obj.move_url ?? '';
const keyToShow = displayKey || obj.key;
const lastModDisplay = obj.lastModifiedDisplay || obj.last_modified_display || new Date(obj.lastModified || obj.last_modified).toLocaleDateString();
@@ -156,8 +210,11 @@
<input class="form-check-input" type="checkbox" data-object-select aria-label="Select ${escapeHtml(obj.key)}" />
</td>
<td class="object-key text-break" title="${escapeHtml(obj.key)}">
<div class="fw-medium">${escapeHtml(keyToShow)}</div>
<div class="text-muted small">Modified ${escapeHtml(lastModDisplay)}</div>
<div class="fw-medium d-flex align-items-center gap-2">
${getFileTypeIcon(obj.key)}
<span>${escapeHtml(keyToShow)}</span>
</div>
<div class="text-muted small ms-4 ps-2">Modified ${escapeHtml(lastModDisplay)}</div>
</td>
<td class="text-end text-nowrap">
<span class="text-muted small">${formatBytes(obj.size)}</span>
@@ -323,7 +380,7 @@
const bKey = b.type === 'folder' ? b.path : b.data.key;
return aKey.localeCompare(bKey);
});
return items;
};
@@ -400,14 +457,14 @@
} else {
renderVirtualRows();
}
updateFolderViewStatus();
};
const updateFolderViewStatus = () => {
const folderViewStatusEl = document.getElementById('folder-view-status');
if (!folderViewStatusEl) return;
if (currentPrefix) {
const folderCount = visibleItems.filter(i => i.type === 'folder').length;
const fileCount = visibleItems.filter(i => i.type === 'file').length;
@@ -425,12 +482,13 @@
size: obj.size,
lastModified: obj.last_modified,
lastModifiedDisplay: obj.last_modified_display,
lastModifiedIso: obj.last_modified_iso,
etag: obj.etag,
previewUrl: urlTemplates ? buildUrlFromTemplate(urlTemplates.preview, key) : '',
downloadUrl: urlTemplates ? buildUrlFromTemplate(urlTemplates.download, key) : '',
presignEndpoint: urlTemplates ? buildUrlFromTemplate(urlTemplates.presign, key) : '',
deleteEndpoint: urlTemplates ? buildUrlFromTemplate(urlTemplates.delete, key) : '',
metadata: '{}',
metadataUrl: urlTemplates ? buildUrlFromTemplate(urlTemplates.metadata, key) : '',
versionsEndpoint: urlTemplates ? buildUrlFromTemplate(urlTemplates.versions, key) : '',
restoreTemplate: urlTemplates ? urlTemplates.restore.replace('KEY_PLACEHOLDER', encodeURIComponent(key).replace(/%2F/g, '/')) : '',
tagsUrl: urlTemplates ? buildUrlFromTemplate(urlTemplates.tags, key) : '',
@@ -548,7 +606,7 @@
} else if (msg.type === 'done') {
streamingComplete = true;
}
} catch (e) {}
} catch (e) { }
}
flushPendingStreamObjects();
@@ -559,9 +617,6 @@
if (loadMoreStatus) {
loadMoreStatus.textContent = `${loadedObjectCount.toLocaleString()} objects`;
}
if (typeof updateLoadMoreButton === 'function') {
updateLoadMoreButton();
}
refreshVirtualList();
renderBreadcrumb(currentPrefix);
@@ -640,10 +695,6 @@
}
}
if (typeof updateLoadMoreButton === 'function') {
updateLoadMoreButton();
}
refreshVirtualList();
renderBreadcrumb(currentPrefix);
@@ -694,20 +745,20 @@
selectCheckbox?.addEventListener('change', () => {
toggleRowSelection(row, selectCheckbox.checked);
});
if (selectedRows.has(row.dataset.key)) {
selectCheckbox.checked = true;
row.classList.add('table-active');
}
});
const folderRows = document.querySelectorAll('.folder-row');
folderRows.forEach(row => {
if (row.dataset.handlersAttached) return;
row.dataset.handlersAttached = 'true';
const folderPath = row.dataset.folderPath;
const checkbox = row.querySelector('[data-folder-select]');
checkbox?.addEventListener('change', (e) => {
e.stopPropagation();
@@ -727,7 +778,7 @@
e.stopPropagation();
navigateToFolder(folderPath);
});
row.addEventListener('click', (e) => {
if (e.target.closest('[data-folder-select]') || e.target.closest('button')) return;
navigateToFolder(folderPath);
@@ -739,24 +790,11 @@
const scrollSentinel = document.getElementById('scroll-sentinel');
const scrollContainer = document.querySelector('.objects-table-container');
const loadMoreBtn = document.getElementById('load-more-btn');
if (scrollContainer) {
scrollContainer.addEventListener('scroll', handleVirtualScroll, { passive: true });
}
loadMoreBtn?.addEventListener('click', () => {
if (hasMoreObjects && !isLoadingObjects) {
loadObjects(true);
}
});
function updateLoadMoreButton() {
if (loadMoreBtn) {
loadMoreBtn.classList.toggle('d-none', !hasMoreObjects);
}
}
if (scrollSentinel && scrollContainer) {
const containerObserver = new IntersectionObserver((entries) => {
entries.forEach(entry => {
@@ -770,7 +808,7 @@
threshold: 0
});
containerObserver.observe(scrollSentinel);
const viewportObserver = new IntersectionObserver((entries) => {
entries.forEach(entry => {
if (entry.isIntersecting && hasMoreObjects && !isLoadingObjects) {
@@ -785,10 +823,6 @@
viewportObserver.observe(scrollSentinel);
}
const pageSizeSelect = document.getElementById('page-size-select');
pageSizeSelect?.addEventListener('change', (e) => {
pageSize = parseInt(e.target.value, 10);
});
if (objectsApiUrl) {
loadObjects();
@@ -805,7 +839,7 @@
if (e.target.closest('[data-delete-object]') || e.target.closest('[data-object-select]') || e.target.closest('a')) {
return;
}
selectRow(row);
});
}
@@ -815,14 +849,14 @@
const getFoldersAtPrefix = (prefix) => {
const folders = new Set();
const files = [];
allObjects.forEach(obj => {
const key = obj.key;
if (!key.startsWith(prefix)) return;
const remainder = key.slice(prefix.length);
const slashIndex = remainder.indexOf('/');
if (slashIndex === -1) {
files.push(obj);
@@ -832,7 +866,7 @@
folders.add(prefix + folderName);
}
});
return { folders: Array.from(folders).sort(), files };
};
@@ -843,12 +877,12 @@
const renderBreadcrumb = (prefix) => {
if (!folderBreadcrumb) return;
if (!prefix && !hasFolders()) {
folderBreadcrumb.classList.add('d-none');
return;
}
folderBreadcrumb.classList.remove('d-none');
const ol = folderBreadcrumb.querySelector('ol');
ol.innerHTML = '';
@@ -883,7 +917,7 @@
accumulated += part + '/';
const li = document.createElement('li');
li.className = 'breadcrumb-item';
if (index === parts.length - 1) {
li.classList.add('active');
li.setAttribute('aria-current', 'page');
@@ -916,12 +950,12 @@
const folderName = displayName || folderPath.slice(currentPrefix.length).replace(/\/$/, '');
const { count: objectCount, mayHaveMore } = countObjectsInFolder(folderPath);
const countDisplay = mayHaveMore ? `${objectCount}+` : objectCount;
const tr = document.createElement('tr');
tr.className = 'folder-row';
tr.dataset.folderPath = folderPath;
tr.style.cursor = 'pointer';
tr.innerHTML = `
<td class="text-center align-middle" onclick="event.stopPropagation();">
<input class="form-check-input" type="checkbox" data-folder-select="${escapeHtml(folderPath)}" aria-label="Select folder" />
@@ -946,7 +980,7 @@
</button>
</td>
`;
return tr;
};
@@ -971,7 +1005,7 @@
const renderObjectsView = () => {
if (!objectsTableBody) return;
const { folders, files } = getFoldersAtPrefix(currentPrefix);
objectsTableBody.innerHTML = '';
@@ -1378,15 +1412,30 @@
}
};
const INTERNAL_METADATA_KEYS = new Set([
'__etag__',
'__size__',
'__content_type__',
'__last_modified__',
'__storage_class__',
]);
const isInternalKey = (key) => INTERNAL_METADATA_KEYS.has(key.toLowerCase());
const renderMetadata = (metadata) => {
if (!previewMetadata || !previewMetadataList) return;
previewMetadataList.innerHTML = '';
if (!metadata || Object.keys(metadata).length === 0) {
if (!metadata) {
previewMetadata.classList.add('d-none');
return;
}
const userMetadata = Object.entries(metadata).filter(([key]) => !isInternalKey(key));
if (userMetadata.length === 0) {
previewMetadata.classList.add('d-none');
return;
}
previewMetadata.classList.remove('d-none');
Object.entries(metadata).forEach(([key, value]) => {
userMetadata.forEach(([key, value]) => {
const wrapper = document.createElement('div');
wrapper.className = 'metadata-entry';
const label = document.createElement('div');
@@ -1421,11 +1470,11 @@
const metadata = version.metadata && typeof version.metadata === 'object' ? Object.entries(version.metadata) : [];
const metadataHtml = metadata.length
? `<div class="mt-3"><div class="fw-semibold text-uppercase small">Metadata</div><hr class="my-2"><div class="metadata-stack small">${metadata
.map(
([key, value]) =>
`<div class="metadata-entry"><div class="metadata-key small">${escapeHtml(key)}</div><div class="metadata-value text-break">${escapeHtml(value)}</div></div>`
)
.join('')}</div></div>`
.map(
([key, value]) =>
`<div class="metadata-entry"><div class="metadata-key small">${escapeHtml(key)}</div><div class="metadata-value text-break">${escapeHtml(value)}</div></div>`
)
.join('')}</div></div>`
: '';
const summaryHtml = `
<div class="small">
@@ -1697,7 +1746,7 @@
if (!endpoint) {
versionPanel.classList.add('d-none');
return;
}
}
versionPanel.classList.remove('d-none');
if (!force && versionsCache.has(endpoint)) {
renderVersionEntries(versionsCache.get(endpoint), row);
@@ -1778,9 +1827,10 @@
}
const resetPreviewMedia = () => {
[previewImage, previewVideo, previewIframe].forEach((el) => {
[previewImage, previewVideo, previewAudio, previewIframe].forEach((el) => {
if (!el) return;
el.classList.add('d-none');
if (el.tagName === 'VIDEO') {
if (el.tagName === 'VIDEO' || el.tagName === 'AUDIO') {
el.pause();
el.removeAttribute('src');
}
@@ -1791,32 +1841,31 @@
previewPlaceholder.classList.remove('d-none');
};
function metadataFromRow(row) {
if (!row || !row.dataset.metadata) {
return null;
}
async function fetchMetadata(metadataUrl) {
if (!metadataUrl) return null;
try {
const parsed = JSON.parse(row.dataset.metadata);
if (parsed && typeof parsed === 'object' && !Array.isArray(parsed)) {
return parsed;
const resp = await fetch(metadataUrl);
if (resp.ok) {
const data = await resp.json();
return data.metadata || {};
}
} catch (err) {
console.warn('Failed to parse metadata for row', err);
} catch (e) {
console.warn('Failed to load metadata', e);
}
return null;
}
function selectRow(row) {
async function selectRow(row) {
document.querySelectorAll('[data-object-row]').forEach((r) => r.classList.remove('table-active'));
row.classList.add('table-active');
previewEmpty.classList.add('d-none');
previewPanel.classList.remove('d-none');
activeRow = row;
renderMetadata(metadataFromRow(row));
renderMetadata(null);
previewKey.textContent = row.dataset.key;
previewSize.textContent = formatBytes(Number(row.dataset.size));
previewModified.textContent = row.dataset.lastModified;
previewModified.textContent = row.dataset.lastModifiedIso || row.dataset.lastModified;
previewEtag.textContent = row.dataset.etag;
downloadButton.href = row.dataset.downloadUrl;
downloadButton.classList.remove('disabled');
@@ -1835,18 +1884,36 @@
resetPreviewMedia();
const previewUrl = row.dataset.previewUrl;
const lower = row.dataset.key.toLowerCase();
if (lower.match(/\.(png|jpg|jpeg|gif|webp|svg)$/)) {
if (previewUrl && lower.match(/\.(png|jpg|jpeg|gif|webp|svg|ico|bmp)$/)) {
previewImage.src = previewUrl;
previewImage.classList.remove('d-none');
previewPlaceholder.classList.add('d-none');
} else if (lower.match(/\.(mp4|webm|ogg)$/)) {
} else if (previewUrl && lower.match(/\.(mp4|webm|ogv|mov|avi|mkv)$/)) {
previewVideo.src = previewUrl;
previewVideo.classList.remove('d-none');
previewPlaceholder.classList.add('d-none');
} else if (lower.match(/\.(txt|log|json|md|csv)$/)) {
} else if (previewUrl && lower.match(/\.(mp3|wav|flac|ogg|aac|m4a|wma)$/)) {
previewAudio.src = previewUrl;
previewAudio.classList.remove('d-none');
previewPlaceholder.classList.add('d-none');
} else if (previewUrl && lower.match(/\.(pdf)$/)) {
previewIframe.src = previewUrl;
previewIframe.style.minHeight = '500px';
previewIframe.classList.remove('d-none');
previewPlaceholder.classList.add('d-none');
} else if (previewUrl && lower.match(/\.(txt|log|json|md|csv|xml|html|htm|js|ts|py|java|c|cpp|h|css|scss|yaml|yml|toml|ini|cfg|conf|sh|bat)$/)) {
previewIframe.src = previewUrl;
previewIframe.style.minHeight = '200px';
previewIframe.classList.remove('d-none');
previewPlaceholder.classList.add('d-none');
}
const metadataUrl = row.dataset.metadataUrl;
if (metadataUrl) {
const metadata = await fetchMetadata(metadataUrl);
if (activeRow === row) {
renderMetadata(metadata);
}
}
}
@@ -1937,7 +2004,7 @@
textArea.remove();
return success;
};
let copied = false;
if (navigator.clipboard && window.isSecureContext) {
@@ -1952,7 +2019,7 @@
if (!copied) {
copied = fallbackCopy(presignLink.value);
}
if (copied) {
copyPresignLink.textContent = 'Copied!';
window.setTimeout(() => {
@@ -2064,7 +2131,7 @@
uploadCancelled = true;
activeXHRs.forEach(xhr => {
try { xhr.abort(); } catch {}
try { xhr.abort(); } catch { }
});
activeXHRs = [];
@@ -2073,7 +2140,7 @@
const csrfToken = document.querySelector('input[name="csrf_token"]')?.value;
try {
await fetch(abortUrl, { method: 'DELETE', headers: { 'X-CSRFToken': csrfToken || '' } });
} catch {}
} catch { }
activeMultipartUpload = null;
}
@@ -2299,7 +2366,7 @@
if (!uploadCancelled) {
try {
await fetch(abortUrl, { method: 'DELETE', headers: { 'X-CSRFToken': csrfToken || '' } });
} catch {}
} catch { }
}
activeMultipartUpload = null;
throw err;
@@ -2612,7 +2679,7 @@
uploadForm.addEventListener('submit', async (event) => {
const files = uploadFileInput.files;
if (!files || files.length === 0) return;
const keyPrefix = (uploadKeyPrefix?.value || '').trim();
if (files.length === 1 && !keyPrefix) {
@@ -2633,7 +2700,7 @@
uploadSubmitBtn.disabled = true;
if (uploadBtnText) uploadBtnText.textContent = 'Uploading...';
}
await performBulkUpload(Array.from(files));
});
@@ -2834,7 +2901,7 @@
}
}
if (statusAlert) statusAlert.classList.add('d-none');
// Update status badge to show "Paused" with warning styling
if (statusBadge) {
statusBadge.className = 'badge bg-warning-subtle text-warning px-3 py-2';
@@ -2844,14 +2911,14 @@
</svg>
<span>Paused (Endpoint Unavailable)</span>`;
}
// Hide the pause button since replication is effectively already paused
if (pauseForm) pauseForm.classList.add('d-none');
} else {
// Hide warning and show success alert
if (endpointWarning) endpointWarning.classList.add('d-none');
if (statusAlert) statusAlert.classList.remove('d-none');
// Restore status badge to show "Enabled"
if (statusBadge) {
statusBadge.className = 'badge bg-success-subtle text-success px-3 py-2';
@@ -2861,7 +2928,7 @@
</svg>
<span>Enabled</span>`;
}
// Show the pause button
if (pauseForm) pauseForm.classList.remove('d-none');
}
@@ -3098,7 +3165,7 @@
const targetBucketInput = document.getElementById('target_bucket');
const targetBucketFeedback = document.getElementById('target_bucket_feedback');
const validateBucketName = (name) => {
if (!name) return { valid: false, error: 'Bucket name is required' };
if (name.length < 3) return { valid: false, error: 'Bucket name must be at least 3 characters' };
@@ -3201,7 +3268,7 @@
const loadLifecycleRules = async () => {
if (!lifecycleUrl || !lifecycleRulesBody) return;
lifecycleRulesBody.innerHTML = '<tr><td colspan="6" class="text-center text-muted py-4"><div class="spinner-border spinner-border-sm me-2" role="status"></div>Loading...</td></tr>';
lifecycleRulesBody.innerHTML = '<tr><td colspan="7" class="text-center text-muted py-4"><div class="spinner-border spinner-border-sm me-2" role="status"></div>Loading...</td></tr>';
try {
const resp = await fetch(lifecycleUrl);
const data = await resp.json();
@@ -3209,19 +3276,20 @@
lifecycleRules = data.rules || [];
renderLifecycleRules();
} catch (err) {
lifecycleRulesBody.innerHTML = `<tr><td colspan="6" class="text-center text-danger py-4">${escapeHtml(err.message)}</td></tr>`;
lifecycleRulesBody.innerHTML = `<tr><td colspan="7" class="text-center text-danger py-4">${escapeHtml(err.message)}</td></tr>`;
}
};
const renderLifecycleRules = () => {
if (!lifecycleRulesBody) return;
if (lifecycleRules.length === 0) {
lifecycleRulesBody.innerHTML = '<tr><td colspan="6" class="text-center text-muted py-4">No lifecycle rules configured</td></tr>';
lifecycleRulesBody.innerHTML = '<tr><td colspan="7" class="text-center text-muted py-4">No lifecycle rules configured</td></tr>';
return;
}
lifecycleRulesBody.innerHTML = lifecycleRules.map((rule, idx) => {
const expiration = rule.Expiration?.Days ? `${rule.Expiration.Days}d` : '-';
const noncurrent = rule.NoncurrentVersionExpiration?.NoncurrentDays ? `${rule.NoncurrentVersionExpiration.NoncurrentDays}d` : '-';
const abortMpu = rule.AbortIncompleteMultipartUpload?.DaysAfterInitiation ? `${rule.AbortIncompleteMultipartUpload.DaysAfterInitiation}d` : '-';
const statusClass = rule.Status === 'Enabled' ? 'bg-success' : 'bg-secondary';
return `<tr>
<td><code class="small">${escapeHtml(rule.ID || '')}</code></td>
@@ -3229,6 +3297,7 @@
<td><span class="badge ${statusClass}">${escapeHtml(rule.Status)}</span></td>
<td class="small">${expiration}</td>
<td class="small">${noncurrent}</td>
<td class="small">${abortMpu}</td>
<td class="text-end">
<div class="btn-group btn-group-sm">
<button class="btn btn-outline-secondary" onclick="editLifecycleRule(${idx})" title="Edit rule">
@@ -3514,7 +3583,7 @@
});
});
document.getElementById('objects-table')?.addEventListener('show.bs.dropdown', function(e) {
document.getElementById('objects-table')?.addEventListener('show.bs.dropdown', function (e) {
const dropdown = e.target.closest('.dropdown');
const menu = dropdown?.querySelector('.dropdown-menu');
const btn = e.target;
@@ -3706,8 +3775,8 @@
});
const originalSelectRow = selectRow;
selectRow = (row) => {
originalSelectRow(row);
selectRow = async (row) => {
await originalSelectRow(row);
loadObjectTags(row);
};
@@ -3813,18 +3882,18 @@
var form = document.getElementById(formId);
if (!form) return;
form.addEventListener('submit', function(e) {
form.addEventListener('submit', function (e) {
e.preventDefault();
window.UICore.submitFormAjax(form, {
successMessage: options.successMessage || 'Operation completed',
onSuccess: function(data) {
onSuccess: function (data) {
if (options.onSuccess) options.onSuccess(data);
if (options.closeModal) {
var modal = bootstrap.Modal.getInstance(document.getElementById(options.closeModal));
if (modal) modal.hide();
}
if (options.reload) {
setTimeout(function() { location.reload(); }, 500);
setTimeout(function () { location.reload(); }, 500);
}
}
});
@@ -3879,11 +3948,11 @@
var newForm = document.getElementById('enableVersioningForm');
if (newForm) {
newForm.setAttribute('action', window.BucketDetailConfig?.endpoints?.versioning || '');
newForm.addEventListener('submit', function(e) {
newForm.addEventListener('submit', function (e) {
e.preventDefault();
window.UICore.submitFormAjax(newForm, {
successMessage: 'Versioning enabled',
onSuccess: function() {
onSuccess: function () {
updateVersioningBadge(true);
updateVersioningCard(true);
}
@@ -3973,7 +4042,7 @@
'<p class="mb-0 small">No bucket policy is attached. Access is controlled by IAM policies only.</p></div>';
}
}
document.querySelectorAll('.preset-btn').forEach(function(btn) {
document.querySelectorAll('.preset-btn').forEach(function (btn) {
btn.classList.remove('active');
if (btn.dataset.preset === preset) btn.classList.add('active');
});
@@ -3987,7 +4056,7 @@
interceptForm('enableVersioningForm', {
successMessage: 'Versioning enabled',
onSuccess: function(data) {
onSuccess: function (data) {
updateVersioningBadge(true);
updateVersioningCard(true);
}
@@ -3996,7 +4065,7 @@
interceptForm('suspendVersioningForm', {
successMessage: 'Versioning suspended',
closeModal: 'suspendVersioningModal',
onSuccess: function(data) {
onSuccess: function (data) {
updateVersioningBadge(false);
updateVersioningCard(false);
}
@@ -4004,36 +4073,36 @@
interceptForm('encryptionForm', {
successMessage: 'Encryption settings saved',
onSuccess: function(data) {
onSuccess: function (data) {
updateEncryptionCard(data.enabled !== false, data.algorithm || 'AES256');
}
});
interceptForm('quotaForm', {
successMessage: 'Quota settings saved',
onSuccess: function(data) {
onSuccess: function (data) {
updateQuotaCard(data.has_quota, data.max_bytes, data.max_objects);
}
});
interceptForm('bucketPolicyForm', {
successMessage: 'Bucket policy saved',
onSuccess: function(data) {
onSuccess: function (data) {
var policyModeEl = document.getElementById('policyMode');
var policyPresetEl = document.getElementById('policyPreset');
var preset = policyModeEl && policyModeEl.value === 'delete' ? 'private' :
(policyPresetEl?.value || 'custom');
(policyPresetEl?.value || 'custom');
updatePolicyCard(preset !== 'private', preset);
}
});
var deletePolicyForm = document.getElementById('deletePolicyForm');
if (deletePolicyForm) {
deletePolicyForm.addEventListener('submit', function(e) {
deletePolicyForm.addEventListener('submit', function (e) {
e.preventDefault();
window.UICore.submitFormAjax(deletePolicyForm, {
successMessage: 'Bucket policy deleted',
onSuccess: function(data) {
onSuccess: function (data) {
var modal = bootstrap.Modal.getInstance(document.getElementById('deletePolicyModal'));
if (modal) modal.hide();
updatePolicyCard(false, 'private');
@@ -4046,13 +4115,13 @@
var disableEncBtn = document.getElementById('disableEncryptionBtn');
if (disableEncBtn) {
disableEncBtn.addEventListener('click', function() {
disableEncBtn.addEventListener('click', function () {
var form = document.getElementById('encryptionForm');
if (!form) return;
document.getElementById('encryptionAction').value = 'disable';
window.UICore.submitFormAjax(form, {
successMessage: 'Encryption disabled',
onSuccess: function(data) {
onSuccess: function (data) {
document.getElementById('encryptionAction').value = 'enable';
updateEncryptionCard(false, null);
}
@@ -4062,13 +4131,13 @@
var removeQuotaBtn = document.getElementById('removeQuotaBtn');
if (removeQuotaBtn) {
removeQuotaBtn.addEventListener('click', function() {
removeQuotaBtn.addEventListener('click', function () {
var form = document.getElementById('quotaForm');
if (!form) return;
document.getElementById('quotaAction').value = 'remove';
window.UICore.submitFormAjax(form, {
successMessage: 'Quota removed',
onSuccess: function(data) {
onSuccess: function (data) {
document.getElementById('quotaAction').value = 'set';
updateQuotaCard(false, null, null);
}
@@ -4082,39 +4151,39 @@
fetch(window.location.pathname + '?tab=replication', {
headers: { 'X-Requested-With': 'XMLHttpRequest' }
})
.then(function(resp) { return resp.text(); })
.then(function(html) {
var parser = new DOMParser();
var doc = parser.parseFromString(html, 'text/html');
var newPane = doc.getElementById('replication-pane');
if (newPane) {
replicationPane.innerHTML = newPane.innerHTML;
initReplicationForms();
initReplicationStats();
}
})
.catch(function(err) {
console.error('Failed to reload replication pane:', err);
});
.then(function (resp) { return resp.text(); })
.then(function (html) {
var parser = new DOMParser();
var doc = parser.parseFromString(html, 'text/html');
var newPane = doc.getElementById('replication-pane');
if (newPane) {
replicationPane.innerHTML = newPane.innerHTML;
initReplicationForms();
initReplicationStats();
}
})
.catch(function (err) {
console.error('Failed to reload replication pane:', err);
});
}
function initReplicationForms() {
document.querySelectorAll('form[action*="replication"]').forEach(function(form) {
document.querySelectorAll('form[action*="replication"]').forEach(function (form) {
if (form.dataset.ajaxBound) return;
form.dataset.ajaxBound = 'true';
var actionInput = form.querySelector('input[name="action"]');
if (!actionInput) return;
var action = actionInput.value;
form.addEventListener('submit', function(e) {
form.addEventListener('submit', function (e) {
e.preventDefault();
var msg = action === 'pause' ? 'Replication paused' :
action === 'resume' ? 'Replication resumed' :
action === 'delete' ? 'Replication disabled' :
action === 'create' ? 'Replication configured' : 'Operation completed';
action === 'resume' ? 'Replication resumed' :
action === 'delete' ? 'Replication disabled' :
action === 'create' ? 'Replication configured' : 'Operation completed';
window.UICore.submitFormAjax(form, {
successMessage: msg,
onSuccess: function(data) {
onSuccess: function (data) {
var modal = bootstrap.Modal.getInstance(document.getElementById('disableReplicationModal'));
if (modal) modal.hide();
reloadReplicationPane();
@@ -4136,14 +4205,14 @@
var bytesEl = statsContainer.querySelector('[data-stat="bytes"]');
fetch(statusEndpoint)
.then(function(resp) { return resp.json(); })
.then(function(data) {
.then(function (resp) { return resp.json(); })
.then(function (data) {
if (syncedEl) syncedEl.textContent = data.objects_synced || 0;
if (pendingEl) pendingEl.textContent = data.objects_pending || 0;
if (orphanedEl) orphanedEl.textContent = data.objects_orphaned || 0;
if (bytesEl) bytesEl.textContent = formatBytes(data.bytes_synced || 0);
})
.catch(function(err) {
.catch(function (err) {
console.error('Failed to load replication stats:', err);
});
}
@@ -4153,10 +4222,10 @@
var deleteBucketForm = document.getElementById('deleteBucketForm');
if (deleteBucketForm) {
deleteBucketForm.addEventListener('submit', function(e) {
deleteBucketForm.addEventListener('submit', function (e) {
e.preventDefault();
window.UICore.submitFormAjax(deleteBucketForm, {
onSuccess: function() {
onSuccess: function () {
sessionStorage.setItem('flashMessage', JSON.stringify({ title: 'Bucket deleted', variant: 'success' }));
window.location.href = window.BucketDetailConfig?.endpoints?.bucketsOverview || '/ui/buckets';
}

View File

@@ -67,12 +67,14 @@
</button>
</li>
{% endif %}
{% if can_edit_policy %}
{% if can_manage_lifecycle %}
<li class="nav-item" role="presentation">
<button class="nav-link {{ 'active' if active_tab == 'lifecycle' else '' }}" id="lifecycle-tab" data-bs-toggle="tab" data-bs-target="#lifecycle-pane" type="button" role="tab" aria-controls="lifecycle-pane" aria-selected="{{ 'true' if active_tab == 'lifecycle' else 'false' }}">
Lifecycle
</button>
</li>
{% endif %}
{% if can_manage_cors %}
<li class="nav-item" role="presentation">
<button class="nav-link {{ 'active' if active_tab == 'cors' else '' }}" id="cors-tab" data-bs-toggle="tab" data-bs-target="#cors-pane" type="button" role="tab" aria-controls="cors-pane" aria-selected="{{ 'true' if active_tab == 'cors' else 'false' }}">
CORS
@@ -187,20 +189,6 @@
</div>
<span id="load-more-status" class="text-muted"></span>
<span id="folder-view-status" class="text-muted d-none"></span>
<button id="load-more-btn" class="btn btn-link btn-sm p-0 d-none" style="font-size: 0.75rem;">Load more</button>
</div>
<div class="d-flex align-items-center gap-1">
<span class="text-muted">Batch</span>
<select id="page-size-select" class="form-select form-select-sm py-0" style="width: auto; font-size: 0.75rem;" title="Number of objects to load per batch">
<option value="1000">1K</option>
<option value="5000" selected>5K</option>
<option value="10000">10K</option>
<option value="25000">25K</option>
<option value="50000">50K</option>
<option value="75000">75K</option>
<option value="100000">100K</option>
</select>
<span class="text-muted">per batch</span>
</div>
</div>
</div>
@@ -332,6 +320,7 @@
</div>
<img id="preview-image" class="img-fluid d-none w-100" alt="Object preview" style="display: block;" />
<video id="preview-video" class="w-100 d-none" controls style="display: block;"></video>
<audio id="preview-audio" class="w-100 d-none" controls style="display: block;"></audio>
<iframe id="preview-iframe" class="w-100 d-none" loading="lazy" style="min-height: 200px;"></iframe>
</div>
</div>
@@ -1532,7 +1521,7 @@
</div>
{% endif %}
{% if can_edit_policy %}
{% if can_manage_lifecycle %}
<div class="tab-pane fade {{ 'show active' if active_tab == 'lifecycle' else '' }}" id="lifecycle-pane" role="tabpanel" aria-labelledby="lifecycle-tab" tabindex="0">
{% if not lifecycle_enabled %}
<div class="alert alert-warning d-flex align-items-start mb-4" role="alert">
@@ -1574,12 +1563,13 @@
<th>Status</th>
<th>Expiration</th>
<th>Noncurrent</th>
<th>Abort MPU</th>
<th class="text-end">Actions</th>
</tr>
</thead>
<tbody id="lifecycle-rules-body">
<tr>
<td colspan="6" class="text-center text-muted py-4">
<td colspan="7" class="text-center text-muted py-4">
<div class="spinner-border spinner-border-sm me-2" role="status"></div>
Loading...
</td>
@@ -1690,7 +1680,9 @@
</div>
</div>
</div>
{% endif %}
{% if can_manage_cors %}
<div class="tab-pane fade {{ 'show active' if active_tab == 'cors' else '' }}" id="cors-pane" role="tabpanel" aria-labelledby="cors-tab" tabindex="0">
<div class="row g-4">
<div class="col-lg-8">

View File

@@ -51,7 +51,7 @@
</div>
<div>
<h5 class="bucket-name text-break">{{ bucket.meta.name }}</h5>
<small class="text-muted">Created {{ bucket.meta.created_at.strftime('%b %d, %Y') }}</small>
<small class="text-muted">Created {{ bucket.meta.created_at | format_datetime }}</small>
</div>
</div>
<span class="badge {{ bucket.access_badge }} bucket-access-badge">{{ bucket.access_label }}</span>

View File

@@ -39,6 +39,8 @@
<li><a href="#quotas">Bucket Quotas</a></li>
<li><a href="#encryption">Encryption</a></li>
<li><a href="#lifecycle">Lifecycle Rules</a></li>
<li><a href="#metrics">Metrics History</a></li>
<li><a href="#operation-metrics">Operation Metrics</a></li>
<li><a href="#troubleshooting">Troubleshooting</a></li>
</ul>
</div>
@@ -181,6 +183,24 @@ python run.py --mode ui
<td><code>true</code></td>
<td>Enable file logging.</td>
</tr>
<tr class="table-secondary">
<td colspan="3" class="fw-semibold">Metrics History Settings</td>
</tr>
<tr>
<td><code>METRICS_HISTORY_ENABLED</code></td>
<td><code>false</code></td>
<td>Enable metrics history recording and charts (opt-in).</td>
</tr>
<tr>
<td><code>METRICS_HISTORY_RETENTION_HOURS</code></td>
<td><code>24</code></td>
<td>How long to retain metrics history data.</td>
</tr>
<tr>
<td><code>METRICS_HISTORY_INTERVAL_MINUTES</code></td>
<td><code>5</code></td>
<td>Interval between history snapshots.</td>
</tr>
</tbody>
</table>
</div>
@@ -356,11 +376,8 @@ curl -X PUT {{ api_base }}/demo/notes.txt \
-H "X-Secret-Key: &lt;secret_key&gt;" \
--data-binary @notes.txt
curl -X POST {{ api_base }}/presign/demo/notes.txt \
-H "Content-Type: application/json" \
-H "X-Access-Key: &lt;access_key&gt;" \
-H "X-Secret-Key: &lt;secret_key&gt;" \
-d '{"method":"GET", "expires_in": 900}'
# Presigned URLs are generated via the UI
# Use the "Presign" button in the object browser
</code></pre>
</div>
</div>
@@ -418,13 +435,8 @@ curl -X POST {{ api_base }}/presign/demo/notes.txt \
</tr>
<tr>
<td>GET/PUT/DELETE</td>
<td><code>/bucket-policy/&lt;bucket&gt;</code></td>
<td>Fetch, upsert, or remove a bucket policy.</td>
</tr>
<tr>
<td>POST</td>
<td><code>/presign/&lt;bucket&gt;/&lt;key&gt;</code></td>
<td>Generate SigV4 URLs for GET/PUT/DELETE with custom expiry.</td>
<td><code>/&lt;bucket&gt;?policy</code></td>
<td>Fetch, upsert, or remove a bucket policy (S3-compatible).</td>
</tr>
</tbody>
</table>
@@ -523,17 +535,16 @@ s3.complete_multipart_upload(
)</code></pre>
<h3 class="h6 text-uppercase text-muted mt-4">Presigned URLs for Sharing</h3>
<pre class="mb-0"><code class="language-bash"># Generate a download link valid for 15 minutes
curl -X POST "{{ api_base }}/presign/mybucket/photo.jpg" \
-H "Content-Type: application/json" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;" \
-d '{"method": "GET", "expires_in": 900}'
<pre class="mb-0"><code class="language-text"># Generate presigned URLs via the UI:
# 1. Navigate to your bucket in the object browser
# 2. Select the object you want to share
# 3. Click the "Presign" button
# 4. Choose method (GET/PUT/DELETE) and expiration time
# 5. Copy the generated URL
# Generate an upload link (PUT) valid for 1 hour
curl -X POST "{{ api_base }}/presign/mybucket/upload.bin" \
-H "Content-Type: application/json" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;" \
-d '{"method": "PUT", "expires_in": 3600}'</code></pre>
# Supported options:
# - Method: GET (download), PUT (upload), DELETE (remove)
# - Expiration: 1 second to 7 days (604800 seconds)</code></pre>
</div>
</article>
<article id="replication" class="card shadow-sm docs-section">
@@ -976,10 +987,201 @@ curl "{{ api_base }}/&lt;bucket&gt;?lifecycle" \
</div>
</div>
</article>
<article id="troubleshooting" class="card shadow-sm docs-section">
<article id="metrics" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">13</span>
<h2 class="h4 mb-0">Metrics History</h2>
</div>
<p class="text-muted">Track CPU, memory, and disk usage over time with optional metrics history. Disabled by default to minimize overhead.</p>
<h3 class="h6 text-uppercase text-muted mt-4">Enabling Metrics History</h3>
<p class="small text-muted">Set the environment variable to opt-in:</p>
<pre class="mb-3"><code class="language-bash"># PowerShell
$env:METRICS_HISTORY_ENABLED = "true"
python run.py
# Bash
export METRICS_HISTORY_ENABLED=true
python run.py</code></pre>
<h3 class="h6 text-uppercase text-muted mt-4">Configuration Options</h3>
<div class="table-responsive mb-3">
<table class="table table-sm table-bordered small">
<thead class="table-light">
<tr>
<th>Variable</th>
<th>Default</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td><code>METRICS_HISTORY_ENABLED</code></td>
<td><code>false</code></td>
<td>Enable/disable metrics history recording</td>
</tr>
<tr>
<td><code>METRICS_HISTORY_RETENTION_HOURS</code></td>
<td><code>24</code></td>
<td>How long to keep history data (hours)</td>
</tr>
<tr>
<td><code>METRICS_HISTORY_INTERVAL_MINUTES</code></td>
<td><code>5</code></td>
<td>Interval between snapshots (minutes)</td>
</tr>
</tbody>
</table>
</div>
<h3 class="h6 text-uppercase text-muted mt-4">API Endpoints</h3>
<pre class="mb-3"><code class="language-bash"># Get metrics history (last 24 hours by default)
curl "{{ api_base | replace('/api', '/ui') }}/metrics/history" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;"
# Get history for specific time range
curl "{{ api_base | replace('/api', '/ui') }}/metrics/history?hours=6" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;"
# Get current settings
curl "{{ api_base | replace('/api', '/ui') }}/metrics/settings" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;"
# Update settings at runtime
curl -X PUT "{{ api_base | replace('/api', '/ui') }}/metrics/settings" \
-H "Content-Type: application/json" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;" \
-d '{"enabled": true, "retention_hours": 48, "interval_minutes": 10}'</code></pre>
<h3 class="h6 text-uppercase text-muted mt-4">Storage Location</h3>
<p class="small text-muted mb-3">History data is stored at:</p>
<code class="d-block mb-3">data/.myfsio.sys/config/metrics_history.json</code>
<div class="alert alert-light border mb-0">
<div class="d-flex gap-2">
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-info-circle text-muted mt-1 flex-shrink-0" viewBox="0 0 16 16">
<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>
<path d="m8.93 6.588-2.29.287-.082.38.45.083c.294.07.352.176.288.469l-.738 3.468c-.194.897.105 1.319.808 1.319.545 0 1.178-.252 1.465-.598l.088-.416c-.2.176-.492.246-.686.246-.275 0-.375-.193-.304-.533L8.93 6.588zM9 4.5a1 1 0 1 1-2 0 1 1 0 0 1 2 0z"/>
</svg>
<div>
<strong>UI Charts:</strong> When enabled, the Metrics dashboard displays line charts showing CPU, memory, and disk usage trends with a time range selector (1h, 6h, 24h, 7d).
</div>
</div>
</div>
</div>
</article>
<article id="operation-metrics" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">14</span>
<h2 class="h4 mb-0">Operation Metrics</h2>
</div>
<p class="text-muted">Track API request statistics including request counts, latency, error rates, and bandwidth usage. Provides real-time visibility into API operations.</p>
<h3 class="h6 text-uppercase text-muted mt-4">Enabling Operation Metrics</h3>
<p class="small text-muted">Set the environment variable to opt-in:</p>
<pre class="mb-3"><code class="language-bash"># PowerShell
$env:OPERATION_METRICS_ENABLED = "true"
python run.py
# Bash
export OPERATION_METRICS_ENABLED=true
python run.py</code></pre>
<h3 class="h6 text-uppercase text-muted mt-4">Configuration Options</h3>
<div class="table-responsive mb-3">
<table class="table table-sm table-bordered small">
<thead class="table-light">
<tr>
<th>Variable</th>
<th>Default</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td><code>OPERATION_METRICS_ENABLED</code></td>
<td><code>false</code></td>
<td>Enable/disable operation metrics collection</td>
</tr>
<tr>
<td><code>OPERATION_METRICS_INTERVAL_MINUTES</code></td>
<td><code>5</code></td>
<td>Interval between snapshots (minutes)</td>
</tr>
<tr>
<td><code>OPERATION_METRICS_RETENTION_HOURS</code></td>
<td><code>24</code></td>
<td>How long to keep history data (hours)</td>
</tr>
</tbody>
</table>
</div>
<h3 class="h6 text-uppercase text-muted mt-4">What's Tracked</h3>
<div class="row g-3 mb-4">
<div class="col-md-6">
<div class="bg-light rounded p-3 h-100">
<h6 class="small fw-bold mb-2">Request Statistics</h6>
<ul class="small text-muted mb-0 ps-3">
<li>Request counts by HTTP method (GET, PUT, POST, DELETE)</li>
<li>Response status codes (2xx, 3xx, 4xx, 5xx)</li>
<li>Average, min, max latency</li>
<li>Bytes transferred in/out</li>
</ul>
</div>
</div>
<div class="col-md-6">
<div class="bg-light rounded p-3 h-100">
<h6 class="small fw-bold mb-2">Endpoint Breakdown</h6>
<ul class="small text-muted mb-0 ps-3">
<li><code>object</code> - Object operations (GET/PUT/DELETE)</li>
<li><code>bucket</code> - Bucket operations</li>
<li><code>ui</code> - Web UI requests</li>
<li><code>service</code> - Health checks, etc.</li>
</ul>
</div>
</div>
</div>
<h3 class="h6 text-uppercase text-muted mt-4">S3 Error Codes</h3>
<p class="small text-muted">The dashboard tracks S3 API-specific error codes like <code>NoSuchKey</code>, <code>AccessDenied</code>, <code>BucketNotFound</code>. These are separate from HTTP status codes &ndash; a 404 from the UI won't appear here, only S3 API errors.</p>
<h3 class="h6 text-uppercase text-muted mt-4">API Endpoints</h3>
<pre class="mb-3"><code class="language-bash"># Get current operation metrics
curl "{{ api_base | replace('/api', '/ui') }}/metrics/operations" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;"
# Get operation metrics history
curl "{{ api_base | replace('/api', '/ui') }}/metrics/operations/history" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;"
# Filter history by time range
curl "{{ api_base | replace('/api', '/ui') }}/metrics/operations/history?hours=6" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;"</code></pre>
<h3 class="h6 text-uppercase text-muted mt-4">Storage Location</h3>
<p class="small text-muted mb-3">Operation metrics data is stored at:</p>
<code class="d-block mb-3">data/.myfsio.sys/config/operation_metrics.json</code>
<div class="alert alert-light border mb-0">
<div class="d-flex gap-2">
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-info-circle text-muted mt-1 flex-shrink-0" viewBox="0 0 16 16">
<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>
<path d="m8.93 6.588-2.29.287-.082.38.45.083c.294.07.352.176.288.469l-.738 3.468c-.194.897.105 1.319.808 1.319.545 0 1.178-.252 1.465-.598l.088-.416c-.2.176-.492.246-.686.246-.275 0-.375-.193-.304-.533L8.93 6.588zM9 4.5a1 1 0 1 1-2 0 1 1 0 0 1 2 0z"/>
</svg>
<div>
<strong>UI Dashboard:</strong> When enabled, the Metrics page shows an "API Operations" section with summary cards, charts for requests by method/status/endpoint, and an S3 error codes table. Data refreshes every 5 seconds.
</div>
</div>
</div>
</div>
</article>
<article id="troubleshooting" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">15</span>
<h2 class="h4 mb-0">Troubleshooting &amp; tips</h2>
</div>
<div class="table-responsive">
@@ -1045,6 +1247,8 @@ curl "{{ api_base }}/&lt;bucket&gt;?lifecycle" \
<li><a href="#quotas">Bucket Quotas</a></li>
<li><a href="#encryption">Encryption</a></li>
<li><a href="#lifecycle">Lifecycle Rules</a></li>
<li><a href="#metrics">Metrics History</a></li>
<li><a href="#operation-metrics">Operation Metrics</a></li>
<li><a href="#troubleshooting">Troubleshooting</a></li>
</ul>
<div class="docs-sidebar-callouts">

View File

@@ -267,9 +267,164 @@
</div>
</div>
</div>
{% if operation_metrics_enabled %}
<div class="row g-4 mt-2">
<div class="col-12">
<div class="card shadow-sm border-0">
<div class="card-header bg-transparent border-0 pt-4 px-4 d-flex justify-content-between align-items-center">
<h5 class="card-title mb-0 fw-semibold">API Operations</h5>
<div class="d-flex align-items-center gap-3">
<span class="small text-muted" id="opStatus">Loading...</span>
<button class="btn btn-outline-secondary btn-sm" id="resetOpMetricsBtn" title="Reset current window">
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="bi bi-arrow-counterclockwise" viewBox="0 0 16 16">
<path fill-rule="evenodd" d="M8 3a5 5 0 1 1-4.546 2.914.5.5 0 0 0-.908-.417A6 6 0 1 0 8 2v1z"/>
<path d="M8 4.466V.534a.25.25 0 0 0-.41-.192L5.23 2.308a.25.25 0 0 0 0 .384l2.36 1.966A.25.25 0 0 0 8 4.466z"/>
</svg>
</button>
</div>
</div>
<div class="card-body p-4">
<div class="row g-3 mb-4">
<div class="col-6 col-md-4 col-lg-2">
<div class="text-center p-3 bg-light rounded h-100">
<h4 class="fw-bold mb-1" id="opTotalRequests">0</h4>
<small class="text-muted">Requests</small>
</div>
</div>
<div class="col-6 col-md-4 col-lg-2">
<div class="text-center p-3 bg-light rounded h-100">
<h4 class="fw-bold mb-1 text-success" id="opSuccessRate">0%</h4>
<small class="text-muted">Success</small>
</div>
</div>
<div class="col-6 col-md-4 col-lg-2">
<div class="text-center p-3 bg-light rounded h-100">
<h4 class="fw-bold mb-1 text-danger" id="opErrorCount">0</h4>
<small class="text-muted">Errors</small>
</div>
</div>
<div class="col-6 col-md-4 col-lg-2">
<div class="text-center p-3 bg-light rounded h-100">
<h4 class="fw-bold mb-1 text-info" id="opAvgLatency">0ms</h4>
<small class="text-muted">Latency</small>
</div>
</div>
<div class="col-6 col-md-4 col-lg-2">
<div class="text-center p-3 bg-light rounded h-100">
<h4 class="fw-bold mb-1 text-primary" id="opBytesIn">0 B</h4>
<small class="text-muted">Bytes In</small>
</div>
</div>
<div class="col-6 col-md-4 col-lg-2">
<div class="text-center p-3 bg-light rounded h-100">
<h4 class="fw-bold mb-1 text-secondary" id="opBytesOut">0 B</h4>
<small class="text-muted">Bytes Out</small>
</div>
</div>
</div>
<div class="row g-4">
<div class="col-lg-6">
<div class="bg-light rounded p-3">
<h6 class="text-muted small fw-bold text-uppercase mb-3">Requests by Method</h6>
<div style="height: 220px; display: flex; align-items: center; justify-content: center;">
<canvas id="methodChart"></canvas>
</div>
</div>
</div>
<div class="col-lg-6">
<div class="bg-light rounded p-3">
<h6 class="text-muted small fw-bold text-uppercase mb-3">Requests by Status</h6>
<div style="height: 220px;">
<canvas id="statusChart"></canvas>
</div>
</div>
</div>
</div>
<div class="row g-4 mt-1">
<div class="col-lg-6">
<div class="bg-light rounded p-3">
<h6 class="text-muted small fw-bold text-uppercase mb-3">Requests by Endpoint</h6>
<div style="height: 180px;">
<canvas id="endpointChart"></canvas>
</div>
</div>
</div>
<div class="col-lg-6">
<div class="bg-light rounded p-3 h-100 d-flex flex-column">
<div class="d-flex justify-content-between align-items-start mb-3">
<h6 class="text-muted small fw-bold text-uppercase mb-0">S3 Error Codes</h6>
<span class="badge bg-secondary-subtle text-secondary" style="font-size: 0.65rem;" title="Tracks S3 API errors like NoSuchKey, AccessDenied, etc.">API Only</span>
</div>
<div class="flex-grow-1 d-flex flex-column" style="min-height: 150px;">
<div class="d-flex border-bottom pb-2 mb-2" style="font-size: 0.75rem;">
<div class="text-muted fw-semibold" style="flex: 1;">Code</div>
<div class="text-muted fw-semibold text-end" style="width: 60px;">Count</div>
<div class="text-muted fw-semibold text-end" style="width: 100px;">Distribution</div>
</div>
<div id="errorCodesContainer" class="flex-grow-1" style="overflow-y: auto;">
<div id="errorCodesBody">
<div class="text-muted small text-center py-4">
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="currentColor" class="bi bi-check-circle mb-2 text-success" viewBox="0 0 16 16">
<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>
<path d="M10.97 4.97a.235.235 0 0 0-.02.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-1.071-1.05z"/>
</svg>
<div>No S3 API errors</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
{% endif %}
{% if metrics_history_enabled %}
<div class="row g-4 mt-2">
<div class="col-12">
<div class="card shadow-sm border-0">
<div class="card-header bg-transparent border-0 pt-4 px-4 d-flex justify-content-between align-items-center">
<h5 class="card-title mb-0 fw-semibold">Metrics History</h5>
<div class="d-flex gap-2 align-items-center">
<select class="form-select form-select-sm" id="historyTimeRange" style="width: auto;">
<option value="1">Last 1 hour</option>
<option value="6">Last 6 hours</option>
<option value="24" selected>Last 24 hours</option>
<option value="168">Last 7 days</option>
</select>
</div>
</div>
<div class="card-body p-4">
<div class="row">
<div class="col-md-4 mb-4">
<h6 class="text-muted small fw-bold text-uppercase mb-3">CPU Usage</h6>
<canvas id="cpuHistoryChart" height="200"></canvas>
</div>
<div class="col-md-4 mb-4">
<h6 class="text-muted small fw-bold text-uppercase mb-3">Memory Usage</h6>
<canvas id="memoryHistoryChart" height="200"></canvas>
</div>
<div class="col-md-4 mb-4">
<h6 class="text-muted small fw-bold text-uppercase mb-3">Disk Usage</h6>
<canvas id="diskHistoryChart" height="200"></canvas>
</div>
</div>
<p class="text-muted small mb-0 text-center" id="historyStatus">Loading history data...</p>
</div>
</div>
</div>
</div>
{% endif %}
{% endblock %}
{% block extra_scripts %}
{% if metrics_history_enabled or operation_metrics_enabled %}
<script src="https://cdn.jsdelivr.net/npm/chart.js@4.4.1/dist/chart.umd.min.js"></script>
{% endif %}
<script>
(function() {
var refreshInterval = 5000;
@@ -285,7 +440,7 @@
.then(function(data) {
var el;
el = document.querySelector('[data-metric="cpu_percent"]');
if (el) el.textContent = data.cpu_percent;
if (el) el.textContent = data.cpu_percent.toFixed(2);
el = document.querySelector('[data-metric="cpu_bar"]');
if (el) {
el.style.width = data.cpu_percent + '%';
@@ -298,7 +453,7 @@
}
el = document.querySelector('[data-metric="memory_percent"]');
if (el) el.textContent = data.memory.percent;
if (el) el.textContent = data.memory.percent.toFixed(2);
el = document.querySelector('[data-metric="memory_bar"]');
if (el) el.style.width = data.memory.percent + '%';
el = document.querySelector('[data-metric="memory_used"]');
@@ -307,7 +462,7 @@
if (el) el.textContent = data.memory.total;
el = document.querySelector('[data-metric="disk_percent"]');
if (el) el.textContent = data.disk.percent;
if (el) el.textContent = data.disk.percent.toFixed(2);
el = document.querySelector('[data-metric="disk_bar"]');
if (el) {
el.style.width = data.disk.percent + '%';
@@ -372,5 +527,369 @@
startPolling();
})();
{% if operation_metrics_enabled %}
(function() {
var methodChart = null;
var statusChart = null;
var endpointChart = null;
var opStatus = document.getElementById('opStatus');
var opTimer = null;
var methodColors = {
'GET': '#0d6efd',
'PUT': '#198754',
'POST': '#ffc107',
'DELETE': '#dc3545',
'HEAD': '#6c757d',
'OPTIONS': '#0dcaf0'
};
var statusColors = {
'2xx': '#198754',
'3xx': '#0dcaf0',
'4xx': '#ffc107',
'5xx': '#dc3545'
};
var endpointColors = {
'object': '#0d6efd',
'bucket': '#198754',
'ui': '#6c757d',
'service': '#0dcaf0',
'kms': '#ffc107'
};
function formatBytes(bytes) {
if (bytes === 0) return '0 B';
var k = 1024;
var sizes = ['B', 'KB', 'MB', 'GB', 'TB'];
var i = Math.floor(Math.log(bytes) / Math.log(k));
return parseFloat((bytes / Math.pow(k, i)).toFixed(1)) + ' ' + sizes[i];
}
function initOpCharts() {
var methodCtx = document.getElementById('methodChart');
var statusCtx = document.getElementById('statusChart');
var endpointCtx = document.getElementById('endpointChart');
if (methodCtx) {
methodChart = new Chart(methodCtx, {
type: 'doughnut',
data: {
labels: [],
datasets: [{
data: [],
backgroundColor: []
}]
},
options: {
responsive: true,
maintainAspectRatio: false,
animation: false,
plugins: {
legend: { position: 'right', labels: { boxWidth: 12, font: { size: 11 } } }
}
}
});
}
if (statusCtx) {
statusChart = new Chart(statusCtx, {
type: 'bar',
data: {
labels: [],
datasets: [{
data: [],
backgroundColor: []
}]
},
options: {
responsive: true,
maintainAspectRatio: false,
animation: false,
plugins: { legend: { display: false } },
scales: {
y: { beginAtZero: true, ticks: { stepSize: 1 } }
}
}
});
}
if (endpointCtx) {
endpointChart = new Chart(endpointCtx, {
type: 'bar',
data: {
labels: [],
datasets: [{
data: [],
backgroundColor: []
}]
},
options: {
responsive: true,
maintainAspectRatio: false,
indexAxis: 'y',
animation: false,
plugins: { legend: { display: false } },
scales: {
x: { beginAtZero: true, ticks: { stepSize: 1 } }
}
}
});
}
}
function updateOpMetrics() {
if (document.hidden) return;
fetch('/ui/metrics/operations')
.then(function(r) { return r.json(); })
.then(function(data) {
if (!data.enabled || !data.stats) {
if (opStatus) opStatus.textContent = 'Operation metrics not available';
return;
}
var stats = data.stats;
var totals = stats.totals || {};
var totalEl = document.getElementById('opTotalRequests');
var successEl = document.getElementById('opSuccessRate');
var errorEl = document.getElementById('opErrorCount');
var latencyEl = document.getElementById('opAvgLatency');
var bytesInEl = document.getElementById('opBytesIn');
var bytesOutEl = document.getElementById('opBytesOut');
if (totalEl) totalEl.textContent = totals.count || 0;
if (successEl) {
var rate = totals.count > 0 ? ((totals.success_count / totals.count) * 100).toFixed(1) : 0;
successEl.textContent = rate + '%';
}
if (errorEl) errorEl.textContent = totals.error_count || 0;
if (latencyEl) latencyEl.textContent = (totals.latency_avg_ms || 0).toFixed(1) + 'ms';
if (bytesInEl) bytesInEl.textContent = formatBytes(totals.bytes_in || 0);
if (bytesOutEl) bytesOutEl.textContent = formatBytes(totals.bytes_out || 0);
if (methodChart && stats.by_method) {
var methods = Object.keys(stats.by_method);
var methodData = methods.map(function(m) { return stats.by_method[m].count; });
var methodBg = methods.map(function(m) { return methodColors[m] || '#6c757d'; });
methodChart.data.labels = methods;
methodChart.data.datasets[0].data = methodData;
methodChart.data.datasets[0].backgroundColor = methodBg;
methodChart.update('none');
}
if (statusChart && stats.by_status_class) {
var statuses = Object.keys(stats.by_status_class).sort();
var statusData = statuses.map(function(s) { return stats.by_status_class[s]; });
var statusBg = statuses.map(function(s) { return statusColors[s] || '#6c757d'; });
statusChart.data.labels = statuses;
statusChart.data.datasets[0].data = statusData;
statusChart.data.datasets[0].backgroundColor = statusBg;
statusChart.update('none');
}
if (endpointChart && stats.by_endpoint) {
var endpoints = Object.keys(stats.by_endpoint);
var endpointData = endpoints.map(function(e) { return stats.by_endpoint[e].count; });
var endpointBg = endpoints.map(function(e) { return endpointColors[e] || '#6c757d'; });
endpointChart.data.labels = endpoints;
endpointChart.data.datasets[0].data = endpointData;
endpointChart.data.datasets[0].backgroundColor = endpointBg;
endpointChart.update('none');
}
var errorBody = document.getElementById('errorCodesBody');
if (errorBody && stats.error_codes) {
var errorCodes = Object.entries(stats.error_codes);
errorCodes.sort(function(a, b) { return b[1] - a[1]; });
var totalErrors = errorCodes.reduce(function(sum, e) { return sum + e[1]; }, 0);
errorCodes = errorCodes.slice(0, 10);
if (errorCodes.length === 0) {
errorBody.innerHTML = '<div class="text-muted small text-center py-4">' +
'<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="currentColor" class="bi bi-check-circle mb-2 text-success" viewBox="0 0 16 16">' +
'<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>' +
'<path d="M10.97 4.97a.235.235 0 0 0-.02.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-1.071-1.05z"/>' +
'</svg><div>No S3 API errors</div></div>';
} else {
errorBody.innerHTML = errorCodes.map(function(e) {
var pct = totalErrors > 0 ? ((e[1] / totalErrors) * 100).toFixed(0) : 0;
return '<div class="d-flex align-items-center py-1" style="font-size: 0.8rem;">' +
'<div style="flex: 1;"><code class="text-danger">' + e[0] + '</code></div>' +
'<div class="text-end fw-semibold" style="width: 60px;">' + e[1] + '</div>' +
'<div style="width: 100px; padding-left: 10px;"><div class="progress" style="height: 6px;"><div class="progress-bar bg-danger" style="width: ' + pct + '%"></div></div></div>' +
'</div>';
}).join('');
}
}
var windowMins = Math.floor(stats.window_seconds / 60);
var windowSecs = stats.window_seconds % 60;
var windowStr = windowMins > 0 ? windowMins + 'm ' + windowSecs + 's' : windowSecs + 's';
if (opStatus) opStatus.textContent = 'Window: ' + windowStr + ' | ' + new Date().toLocaleTimeString();
})
.catch(function(err) {
console.error('Operation metrics fetch error:', err);
if (opStatus) opStatus.textContent = 'Failed to load';
});
}
function startOpPolling() {
if (opTimer) clearInterval(opTimer);
opTimer = setInterval(updateOpMetrics, 5000);
}
var resetBtn = document.getElementById('resetOpMetricsBtn');
if (resetBtn) {
resetBtn.addEventListener('click', function() {
updateOpMetrics();
});
}
document.addEventListener('visibilitychange', function() {
if (document.hidden) {
if (opTimer) clearInterval(opTimer);
opTimer = null;
} else {
updateOpMetrics();
startOpPolling();
}
});
initOpCharts();
updateOpMetrics();
startOpPolling();
})();
{% endif %}
{% if metrics_history_enabled %}
(function() {
var cpuChart = null;
var memoryChart = null;
var diskChart = null;
var historyStatus = document.getElementById('historyStatus');
var timeRangeSelect = document.getElementById('historyTimeRange');
var historyTimer = null;
var MAX_DATA_POINTS = 500;
function createChart(ctx, label, color) {
return new Chart(ctx, {
type: 'line',
data: {
labels: [],
datasets: [{
label: label,
data: [],
borderColor: color,
backgroundColor: color + '20',
fill: true,
tension: 0.3,
pointRadius: 3,
pointHoverRadius: 6,
hitRadius: 10,
}]
},
options: {
responsive: true,
maintainAspectRatio: true,
animation: false,
plugins: {
legend: { display: false },
tooltip: {
callbacks: {
label: function(ctx) { return ctx.parsed.y.toFixed(2) + '%'; }
}
}
},
scales: {
x: {
display: true,
ticks: { maxTicksAuto: true, maxRotation: 0, font: { size: 10 }, autoSkip: true, maxTicksLimit: 10 }
},
y: {
display: true,
min: 0,
max: 100,
ticks: { callback: function(v) { return v + '%'; } }
}
}
}
});
}
function initCharts() {
var cpuCtx = document.getElementById('cpuHistoryChart');
var memCtx = document.getElementById('memoryHistoryChart');
var diskCtx = document.getElementById('diskHistoryChart');
if (cpuCtx) cpuChart = createChart(cpuCtx, 'CPU %', '#0d6efd');
if (memCtx) memoryChart = createChart(memCtx, 'Memory %', '#0dcaf0');
if (diskCtx) diskChart = createChart(diskCtx, 'Disk %', '#ffc107');
}
function formatTime(ts) {
var d = new Date(ts);
return d.toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' });
}
function loadHistory() {
if (document.hidden) return;
var hours = timeRangeSelect ? timeRangeSelect.value : 24;
fetch('/ui/metrics/history?hours=' + hours)
.then(function(r) { return r.json(); })
.then(function(data) {
if (!data.enabled || !data.history || data.history.length === 0) {
if (historyStatus) historyStatus.textContent = 'No history data available yet. Data is recorded every ' + (data.interval_minutes || 5) + ' minutes.';
return;
}
var history = data.history.slice(-MAX_DATA_POINTS);
var labels = history.map(function(h) { return formatTime(h.timestamp); });
var cpuData = history.map(function(h) { return h.cpu_percent; });
var memData = history.map(function(h) { return h.memory_percent; });
var diskData = history.map(function(h) { return h.disk_percent; });
if (cpuChart) {
cpuChart.data.labels = labels;
cpuChart.data.datasets[0].data = cpuData;
cpuChart.update('none');
}
if (memoryChart) {
memoryChart.data.labels = labels;
memoryChart.data.datasets[0].data = memData;
memoryChart.update('none');
}
if (diskChart) {
diskChart.data.labels = labels;
diskChart.data.datasets[0].data = diskData;
diskChart.update('none');
}
if (historyStatus) historyStatus.textContent = 'Showing ' + history.length + ' data points';
})
.catch(function(err) {
console.error('History fetch error:', err);
if (historyStatus) historyStatus.textContent = 'Failed to load history data';
});
}
function startHistoryPolling() {
if (historyTimer) clearInterval(historyTimer);
historyTimer = setInterval(loadHistory, 60000);
}
if (timeRangeSelect) {
timeRangeSelect.addEventListener('change', loadHistory);
}
document.addEventListener('visibilitychange', function() {
if (document.hidden) {
if (historyTimer) clearInterval(historyTimer);
historyTimer = null;
} else {
loadHistory();
startHistoryPolling();
}
});
initCharts();
loadHistory();
startHistoryPolling();
})();
{% endif %}
</script>
{% endblock %}

View File

@@ -35,6 +35,7 @@ def app(tmp_path: Path):
flask_app = create_api_app(
{
"TESTING": True,
"SECRET_KEY": "testing",
"STORAGE_ROOT": storage_root,
"IAM_CONFIG": iam_config,
"BUCKET_POLICY_PATH": bucket_policies,

View File

@@ -1,6 +1,3 @@
from urllib.parse import urlsplit
def test_bucket_and_object_lifecycle(client, signer):
headers = signer("PUT", "/photos")
response = client.put("/photos", headers=headers)
@@ -104,12 +101,12 @@ def test_request_id_header_present(client, signer):
assert response.headers.get("X-Request-ID")
def test_healthcheck_returns_version(client):
response = client.get("/healthz")
def test_healthcheck_returns_status(client):
response = client.get("/myfsio/health")
data = response.get_json()
assert response.status_code == 200
assert data["status"] == "ok"
assert "version" in data
assert "version" not in data
def test_missing_credentials_denied(client):
@@ -117,36 +114,20 @@ def test_missing_credentials_denied(client):
assert response.status_code == 403
def test_presign_and_bucket_policies(client, signer):
# Create bucket and object
def test_bucket_policies_deny_reads(client, signer):
import json
headers = signer("PUT", "/docs")
assert client.put("/docs", headers=headers).status_code == 200
headers = signer("PUT", "/docs/readme.txt", body=b"content")
assert client.put("/docs/readme.txt", headers=headers, data=b"content").status_code == 200
# Generate presigned GET URL and follow it
json_body = {"method": "GET", "expires_in": 120}
# Flask test client json parameter automatically sets Content-Type and serializes body
# But for signing we need the body bytes.
import json
body_bytes = json.dumps(json_body).encode("utf-8")
headers = signer("POST", "/presign/docs/readme.txt", headers={"Content-Type": "application/json"}, body=body_bytes)
response = client.post(
"/presign/docs/readme.txt",
headers=headers,
json=json_body,
)
headers = signer("GET", "/docs/readme.txt")
response = client.get("/docs/readme.txt", headers=headers)
assert response.status_code == 200
presigned_url = response.get_json()["url"]
parts = urlsplit(presigned_url)
presigned_path = f"{parts.path}?{parts.query}"
download = client.get(presigned_path)
assert download.status_code == 200
assert download.data == b"content"
assert response.data == b"content"
# Attach a deny policy for GETs
policy = {
"Version": "2012-10-17",
"Statement": [
@@ -160,29 +141,26 @@ def test_presign_and_bucket_policies(client, signer):
],
}
policy_bytes = json.dumps(policy).encode("utf-8")
headers = signer("PUT", "/bucket-policy/docs", headers={"Content-Type": "application/json"}, body=policy_bytes)
assert client.put("/bucket-policy/docs", headers=headers, json=policy).status_code == 204
headers = signer("GET", "/bucket-policy/docs")
fetched = client.get("/bucket-policy/docs", headers=headers)
headers = signer("PUT", "/docs?policy", headers={"Content-Type": "application/json"}, body=policy_bytes)
assert client.put("/docs?policy", headers=headers, json=policy).status_code == 204
headers = signer("GET", "/docs?policy")
fetched = client.get("/docs?policy", headers=headers)
assert fetched.status_code == 200
assert fetched.get_json()["Version"] == "2012-10-17"
# Reads are now denied by bucket policy
headers = signer("GET", "/docs/readme.txt")
denied = client.get("/docs/readme.txt", headers=headers)
assert denied.status_code == 403
# Presign attempts are also denied
json_body = {"method": "GET", "expires_in": 60}
body_bytes = json.dumps(json_body).encode("utf-8")
headers = signer("POST", "/presign/docs/readme.txt", headers={"Content-Type": "application/json"}, body=body_bytes)
response = client.post(
"/presign/docs/readme.txt",
headers=headers,
json=json_body,
)
assert response.status_code == 403
headers = signer("DELETE", "/docs?policy")
assert client.delete("/docs?policy", headers=headers).status_code == 204
headers = signer("DELETE", "/docs/readme.txt")
assert client.delete("/docs/readme.txt", headers=headers).status_code == 204
headers = signer("DELETE", "/docs")
assert client.delete("/docs", headers=headers).status_code == 204
def test_trailing_slash_returns_xml(client):
@@ -193,9 +171,11 @@ def test_trailing_slash_returns_xml(client):
def test_public_policy_allows_anonymous_list_and_read(client, signer):
import json
headers = signer("PUT", "/public")
assert client.put("/public", headers=headers).status_code == 200
headers = signer("PUT", "/public/hello.txt", body=b"hi")
assert client.put("/public/hello.txt", headers=headers, data=b"hi").status_code == 200
@@ -221,10 +201,9 @@ def test_public_policy_allows_anonymous_list_and_read(client, signer):
},
],
}
import json
policy_bytes = json.dumps(policy).encode("utf-8")
headers = signer("PUT", "/bucket-policy/public", headers={"Content-Type": "application/json"}, body=policy_bytes)
assert client.put("/bucket-policy/public", headers=headers, json=policy).status_code == 204
headers = signer("PUT", "/public?policy", headers={"Content-Type": "application/json"}, body=policy_bytes)
assert client.put("/public?policy", headers=headers, json=policy).status_code == 204
list_response = client.get("/public")
assert list_response.status_code == 200
@@ -236,18 +215,20 @@ def test_public_policy_allows_anonymous_list_and_read(client, signer):
headers = signer("DELETE", "/public/hello.txt")
assert client.delete("/public/hello.txt", headers=headers).status_code == 204
headers = signer("DELETE", "/bucket-policy/public")
assert client.delete("/bucket-policy/public", headers=headers).status_code == 204
headers = signer("DELETE", "/public?policy")
assert client.delete("/public?policy", headers=headers).status_code == 204
headers = signer("DELETE", "/public")
assert client.delete("/public", headers=headers).status_code == 204
def test_principal_dict_with_object_get_only(client, signer):
import json
headers = signer("PUT", "/mixed")
assert client.put("/mixed", headers=headers).status_code == 200
headers = signer("PUT", "/mixed/only.txt", body=b"ok")
assert client.put("/mixed/only.txt", headers=headers, data=b"ok").status_code == 200
@@ -270,10 +251,9 @@ def test_principal_dict_with_object_get_only(client, signer):
},
],
}
import json
policy_bytes = json.dumps(policy).encode("utf-8")
headers = signer("PUT", "/bucket-policy/mixed", headers={"Content-Type": "application/json"}, body=policy_bytes)
assert client.put("/bucket-policy/mixed", headers=headers, json=policy).status_code == 204
headers = signer("PUT", "/mixed?policy", headers={"Content-Type": "application/json"}, body=policy_bytes)
assert client.put("/mixed?policy", headers=headers, json=policy).status_code == 204
assert client.get("/mixed").status_code == 403
allowed = client.get("/mixed/only.txt")
@@ -282,18 +262,20 @@ def test_principal_dict_with_object_get_only(client, signer):
headers = signer("DELETE", "/mixed/only.txt")
assert client.delete("/mixed/only.txt", headers=headers).status_code == 204
headers = signer("DELETE", "/bucket-policy/mixed")
assert client.delete("/bucket-policy/mixed", headers=headers).status_code == 204
headers = signer("DELETE", "/mixed?policy")
assert client.delete("/mixed?policy", headers=headers).status_code == 204
headers = signer("DELETE", "/mixed")
assert client.delete("/mixed", headers=headers).status_code == 204
def test_bucket_policy_wildcard_resource_allows_object_get(client, signer):
import json
headers = signer("PUT", "/test")
assert client.put("/test", headers=headers).status_code == 200
headers = signer("PUT", "/test/vid.mp4", body=b"video")
assert client.put("/test/vid.mp4", headers=headers, data=b"video").status_code == 200
@@ -314,10 +296,9 @@ def test_bucket_policy_wildcard_resource_allows_object_get(client, signer):
},
],
}
import json
policy_bytes = json.dumps(policy).encode("utf-8")
headers = signer("PUT", "/bucket-policy/test", headers={"Content-Type": "application/json"}, body=policy_bytes)
assert client.put("/bucket-policy/test", headers=headers, json=policy).status_code == 204
headers = signer("PUT", "/test?policy", headers={"Content-Type": "application/json"}, body=policy_bytes)
assert client.put("/test?policy", headers=headers, json=policy).status_code == 204
listing = client.get("/test")
assert listing.status_code == 403
@@ -327,10 +308,10 @@ def test_bucket_policy_wildcard_resource_allows_object_get(client, signer):
headers = signer("DELETE", "/test/vid.mp4")
assert client.delete("/test/vid.mp4", headers=headers).status_code == 204
headers = signer("DELETE", "/bucket-policy/test")
assert client.delete("/bucket-policy/test", headers=headers).status_code == 204
headers = signer("DELETE", "/test?policy")
assert client.delete("/test?policy", headers=headers).status_code == 204
headers = signer("DELETE", "/test")
assert client.delete("/test", headers=headers).status_code == 204

View File

@@ -15,6 +15,7 @@ def kms_client(tmp_path):
app = create_app({
"TESTING": True,
"SECRET_KEY": "testing",
"STORAGE_ROOT": str(tmp_path / "storage"),
"IAM_CONFIG": str(tmp_path / "iam.json"),
"BUCKET_POLICY_PATH": str(tmp_path / "policies.json"),

View File

@@ -0,0 +1,297 @@
import threading
import time
from pathlib import Path
import pytest
from app.operation_metrics import (
OperationMetricsCollector,
OperationStats,
classify_endpoint,
)
class TestOperationStats:
def test_initial_state(self):
stats = OperationStats()
assert stats.count == 0
assert stats.success_count == 0
assert stats.error_count == 0
assert stats.latency_sum_ms == 0.0
assert stats.bytes_in == 0
assert stats.bytes_out == 0
def test_record_success(self):
stats = OperationStats()
stats.record(latency_ms=50.0, success=True, bytes_in=100, bytes_out=200)
assert stats.count == 1
assert stats.success_count == 1
assert stats.error_count == 0
assert stats.latency_sum_ms == 50.0
assert stats.latency_min_ms == 50.0
assert stats.latency_max_ms == 50.0
assert stats.bytes_in == 100
assert stats.bytes_out == 200
def test_record_error(self):
stats = OperationStats()
stats.record(latency_ms=100.0, success=False, bytes_in=50, bytes_out=0)
assert stats.count == 1
assert stats.success_count == 0
assert stats.error_count == 1
def test_latency_min_max(self):
stats = OperationStats()
stats.record(latency_ms=50.0, success=True)
stats.record(latency_ms=10.0, success=True)
stats.record(latency_ms=100.0, success=True)
assert stats.latency_min_ms == 10.0
assert stats.latency_max_ms == 100.0
assert stats.latency_sum_ms == 160.0
def test_to_dict(self):
stats = OperationStats()
stats.record(latency_ms=50.0, success=True, bytes_in=100, bytes_out=200)
stats.record(latency_ms=100.0, success=False, bytes_in=50, bytes_out=0)
result = stats.to_dict()
assert result["count"] == 2
assert result["success_count"] == 1
assert result["error_count"] == 1
assert result["latency_avg_ms"] == 75.0
assert result["latency_min_ms"] == 50.0
assert result["latency_max_ms"] == 100.0
assert result["bytes_in"] == 150
assert result["bytes_out"] == 200
def test_to_dict_empty(self):
stats = OperationStats()
result = stats.to_dict()
assert result["count"] == 0
assert result["latency_avg_ms"] == 0.0
assert result["latency_min_ms"] == 0.0
def test_merge(self):
stats1 = OperationStats()
stats1.record(latency_ms=50.0, success=True, bytes_in=100, bytes_out=200)
stats2 = OperationStats()
stats2.record(latency_ms=10.0, success=True, bytes_in=50, bytes_out=100)
stats2.record(latency_ms=100.0, success=False, bytes_in=25, bytes_out=50)
stats1.merge(stats2)
assert stats1.count == 3
assert stats1.success_count == 2
assert stats1.error_count == 1
assert stats1.latency_min_ms == 10.0
assert stats1.latency_max_ms == 100.0
assert stats1.bytes_in == 175
assert stats1.bytes_out == 350
class TestClassifyEndpoint:
def test_root_path(self):
assert classify_endpoint("/") == "service"
assert classify_endpoint("") == "service"
def test_ui_paths(self):
assert classify_endpoint("/ui") == "ui"
assert classify_endpoint("/ui/buckets") == "ui"
assert classify_endpoint("/ui/metrics") == "ui"
def test_kms_paths(self):
assert classify_endpoint("/kms") == "kms"
assert classify_endpoint("/kms/keys") == "kms"
def test_service_paths(self):
assert classify_endpoint("/myfsio/health") == "service"
def test_bucket_paths(self):
assert classify_endpoint("/mybucket") == "bucket"
assert classify_endpoint("/mybucket/") == "bucket"
def test_object_paths(self):
assert classify_endpoint("/mybucket/mykey") == "object"
assert classify_endpoint("/mybucket/folder/nested/key.txt") == "object"
class TestOperationMetricsCollector:
def test_record_and_get_stats(self, tmp_path: Path):
collector = OperationMetricsCollector(
storage_root=tmp_path,
interval_minutes=60,
retention_hours=24,
)
try:
collector.record_request(
method="GET",
endpoint_type="bucket",
status_code=200,
latency_ms=50.0,
bytes_in=0,
bytes_out=1000,
)
collector.record_request(
method="PUT",
endpoint_type="object",
status_code=201,
latency_ms=100.0,
bytes_in=500,
bytes_out=0,
)
collector.record_request(
method="GET",
endpoint_type="object",
status_code=404,
latency_ms=25.0,
bytes_in=0,
bytes_out=0,
error_code="NoSuchKey",
)
stats = collector.get_current_stats()
assert stats["totals"]["count"] == 3
assert stats["totals"]["success_count"] == 2
assert stats["totals"]["error_count"] == 1
assert "GET" in stats["by_method"]
assert stats["by_method"]["GET"]["count"] == 2
assert "PUT" in stats["by_method"]
assert stats["by_method"]["PUT"]["count"] == 1
assert "bucket" in stats["by_endpoint"]
assert "object" in stats["by_endpoint"]
assert stats["by_endpoint"]["object"]["count"] == 2
assert stats["by_status_class"]["2xx"] == 2
assert stats["by_status_class"]["4xx"] == 1
assert stats["error_codes"]["NoSuchKey"] == 1
finally:
collector.shutdown()
def test_thread_safety(self, tmp_path: Path):
collector = OperationMetricsCollector(
storage_root=tmp_path,
interval_minutes=60,
retention_hours=24,
)
try:
num_threads = 5
requests_per_thread = 100
threads = []
def record_requests():
for _ in range(requests_per_thread):
collector.record_request(
method="GET",
endpoint_type="object",
status_code=200,
latency_ms=10.0,
)
for _ in range(num_threads):
t = threading.Thread(target=record_requests)
threads.append(t)
t.start()
for t in threads:
t.join()
stats = collector.get_current_stats()
assert stats["totals"]["count"] == num_threads * requests_per_thread
finally:
collector.shutdown()
def test_status_class_categorization(self, tmp_path: Path):
collector = OperationMetricsCollector(
storage_root=tmp_path,
interval_minutes=60,
retention_hours=24,
)
try:
collector.record_request("GET", "object", 200, 10.0)
collector.record_request("GET", "object", 204, 10.0)
collector.record_request("GET", "object", 301, 10.0)
collector.record_request("GET", "object", 304, 10.0)
collector.record_request("GET", "object", 400, 10.0)
collector.record_request("GET", "object", 403, 10.0)
collector.record_request("GET", "object", 404, 10.0)
collector.record_request("GET", "object", 500, 10.0)
collector.record_request("GET", "object", 503, 10.0)
stats = collector.get_current_stats()
assert stats["by_status_class"]["2xx"] == 2
assert stats["by_status_class"]["3xx"] == 2
assert stats["by_status_class"]["4xx"] == 3
assert stats["by_status_class"]["5xx"] == 2
finally:
collector.shutdown()
def test_error_code_tracking(self, tmp_path: Path):
collector = OperationMetricsCollector(
storage_root=tmp_path,
interval_minutes=60,
retention_hours=24,
)
try:
collector.record_request("GET", "object", 404, 10.0, error_code="NoSuchKey")
collector.record_request("GET", "object", 404, 10.0, error_code="NoSuchKey")
collector.record_request("GET", "bucket", 403, 10.0, error_code="AccessDenied")
collector.record_request("PUT", "object", 500, 10.0, error_code="InternalError")
stats = collector.get_current_stats()
assert stats["error_codes"]["NoSuchKey"] == 2
assert stats["error_codes"]["AccessDenied"] == 1
assert stats["error_codes"]["InternalError"] == 1
finally:
collector.shutdown()
def test_history_persistence(self, tmp_path: Path):
collector = OperationMetricsCollector(
storage_root=tmp_path,
interval_minutes=60,
retention_hours=24,
)
try:
collector.record_request("GET", "object", 200, 10.0)
collector._take_snapshot()
history = collector.get_history()
assert len(history) == 1
assert history[0]["totals"]["count"] == 1
config_path = tmp_path / ".myfsio.sys" / "config" / "operation_metrics.json"
assert config_path.exists()
finally:
collector.shutdown()
def test_get_history_with_hours_filter(self, tmp_path: Path):
collector = OperationMetricsCollector(
storage_root=tmp_path,
interval_minutes=60,
retention_hours=24,
)
try:
collector.record_request("GET", "object", 200, 10.0)
collector._take_snapshot()
history_all = collector.get_history()
history_recent = collector.get_history(hours=1)
assert len(history_all) >= len(history_recent)
finally:
collector.shutdown()

View File

@@ -28,6 +28,7 @@ def _make_app(tmp_path: Path):
flask_app = create_app(
{
"TESTING": True,
"SECRET_KEY": "testing",
"WTF_CSRF_ENABLED": False,
"STORAGE_ROOT": storage_root,
"IAM_CONFIG": iam_config,