28 Commits

Author SHA1 Message Date
bb6590fc5e Merge pull request 'MyFSIO v0.2.2 Release' (#14) from next into main
Reviewed-on: #14
2026-01-19 07:12:15 +00:00
4de936cea9 Update docs 2026-01-19 12:33:47 +08:00
adb9017580 Add operation metrics with logging integration in metrics UI 2026-01-18 23:50:47 +08:00
4adfcc4131 Improve pytest tests 2026-01-18 21:53:39 +08:00
ebc315c1cc Fix routing conflicts: move admin endpoints to reserved paths 2026-01-18 21:35:39 +08:00
5ab62a00ff Fix security vulnerabilities: XXE, timing attacks, info leaks 2026-01-18 17:18:12 +08:00
9c3518de63 Add new filetype previews; Remove metadata from bucket streaming 2026-01-17 15:40:58 +08:00
a52657e684 enhance date formats with timezone 2026-01-16 20:19:52 +08:00
53297abe1e Add metrics history with charts, fix percentage formatting to 2 d.p. 2026-01-16 19:57:23 +08:00
a3b9db544c Add file type icons, enhance bucket date format, fix metadata display bug 2026-01-16 13:18:06 +08:00
f5d2e1c488 Fix last_modified field still returning wrong timezone 2026-01-14 23:07:47 +08:00
f04c6a9cdc Fix reflect time zone in object browser 2026-01-14 22:51:41 +08:00
7a494abb96 Reflect timezone in Object Details; Fix latest IAM bucket policy bugs 2026-01-14 22:47:29 +08:00
956d17a649 Add new bucket policies; update docs 2026-01-14 22:05:31 +08:00
5522f9ac04 Fix missing column for 'Abort Incomplete MPU' in the lifecycle panel 2026-01-14 21:48:06 +08:00
3742f0228e Fix timezone UI not reflecting correctly 2026-01-14 21:41:41 +08:00
899db3421b Merge pull request 'MyFSIO v0.2.1 Release' (#13) from next into main
Reviewed-on: #13
2026-01-12 08:03:29 +00:00
caf01d6ada Merge pull request 'MyFSIO v0.2.0 Release' (#12) from next into main
Reviewed-on: #12
2026-01-05 15:48:03 +00:00
bb366cb4cd Merge pull request 'MyFSIO v0.1.9 Release' (#10) from next into main
Reviewed-on: #10
2025-12-29 06:49:48 +00:00
a2745ff2ee Merge pull request 'MyFSIO v0.1.8 Release' (#9) from next into main
Reviewed-on: #9
2025-12-23 06:01:32 +00:00
28cb656d94 Merge pull request 'MyFSIO v0.1.7 Release' (#8) from next into main
Reviewed-on: #8
2025-12-22 03:10:35 +00:00
3c44152fc6 Merge pull request 'MyFSIO v0.1.6 Release' (#7) from next into main
Reviewed-on: #7
2025-12-21 06:30:21 +00:00
397515edce Merge pull request 'MyFSIO v0.1.5 Release' (#6) from next into main
Reviewed-on: #6
2025-12-13 15:41:03 +00:00
980fced7e4 Merge pull request 'MyFSIO v0.1.4 Release' (#5) from next into main
Reviewed-on: #5
2025-12-13 08:22:43 +00:00
bae5009ec4 Merge pull request 'Release v0.1.3' (#4) from next into main
Reviewed-on: #4
2025-12-03 04:14:57 +00:00
233780617f Merge pull request 'Release V0.1.2' (#3) from next into main
Reviewed-on: #3
2025-11-26 04:59:15 +00:00
fd8fb21517 Merge pull request 'Prepare for binary release' (#2) from next into main
Reviewed-on: #2
2025-11-22 12:33:38 +00:00
c6cbe822e1 Merge pull request 'Release v0.1.1' (#1) from next into main
Reviewed-on: #1
2025-11-22 12:31:27 +00:00
22 changed files with 2430 additions and 425 deletions

View File

@@ -32,6 +32,6 @@ ENV APP_HOST=0.0.0.0 \
FLASK_DEBUG=0 FLASK_DEBUG=0
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \ HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
CMD python -c "import requests; requests.get('http://localhost:5000/healthz', timeout=2)" CMD python -c "import requests; requests.get('http://localhost:5000/myfsio/health', timeout=2)"
CMD ["./docker-entrypoint.sh"] CMD ["./docker-entrypoint.sh"]

View File

@@ -149,19 +149,13 @@ All endpoints require AWS Signature Version 4 authentication unless using presig
| `POST` | `/<bucket>/<key>?uploadId=X` | Complete multipart upload | | `POST` | `/<bucket>/<key>?uploadId=X` | Complete multipart upload |
| `DELETE` | `/<bucket>/<key>?uploadId=X` | Abort multipart upload | | `DELETE` | `/<bucket>/<key>?uploadId=X` | Abort multipart upload |
### Presigned URLs ### Bucket Policies (S3-compatible)
| Method | Endpoint | Description | | Method | Endpoint | Description |
|--------|----------|-------------| |--------|----------|-------------|
| `POST` | `/presign/<bucket>/<key>` | Generate presigned URL | | `GET` | `/<bucket>?policy` | Get bucket policy |
| `PUT` | `/<bucket>?policy` | Set bucket policy |
### Bucket Policies | `DELETE` | `/<bucket>?policy` | Delete bucket policy |
| Method | Endpoint | Description |
|--------|----------|-------------|
| `GET` | `/bucket-policy/<bucket>` | Get bucket policy |
| `PUT` | `/bucket-policy/<bucket>` | Set bucket policy |
| `DELETE` | `/bucket-policy/<bucket>` | Delete bucket policy |
### Versioning ### Versioning
@@ -175,7 +169,7 @@ All endpoints require AWS Signature Version 4 authentication unless using presig
| Method | Endpoint | Description | | Method | Endpoint | Description |
|--------|----------|-------------| |--------|----------|-------------|
| `GET` | `/healthz` | Health check endpoint | | `GET` | `/myfsio/health` | Health check endpoint |
## IAM & Access Control ## IAM & Access Control

View File

@@ -16,6 +16,7 @@ from flask_wtf.csrf import CSRFError
from werkzeug.middleware.proxy_fix import ProxyFix from werkzeug.middleware.proxy_fix import ProxyFix
from .access_logging import AccessLoggingService from .access_logging import AccessLoggingService
from .operation_metrics import OperationMetricsCollector, classify_endpoint
from .compression import GzipMiddleware from .compression import GzipMiddleware
from .acl import AclService from .acl import AclService
from .bucket_policies import BucketPolicyStore from .bucket_policies import BucketPolicyStore
@@ -187,6 +188,15 @@ def create_app(
app.extensions["notifications"] = notification_service app.extensions["notifications"] = notification_service
app.extensions["access_logging"] = access_logging_service app.extensions["access_logging"] = access_logging_service
operation_metrics_collector = None
if app.config.get("OPERATION_METRICS_ENABLED", False):
operation_metrics_collector = OperationMetricsCollector(
storage_root,
interval_minutes=app.config.get("OPERATION_METRICS_INTERVAL_MINUTES", 5),
retention_hours=app.config.get("OPERATION_METRICS_RETENTION_HOURS", 24),
)
app.extensions["operation_metrics"] = operation_metrics_collector
@app.errorhandler(500) @app.errorhandler(500)
def internal_error(error): def internal_error(error):
return render_template('500.html'), 500 return render_template('500.html'), 500
@@ -227,6 +237,30 @@ def create_app(
except (ValueError, OSError): except (ValueError, OSError):
return "Unknown" return "Unknown"
@app.template_filter("format_datetime")
def format_datetime_filter(dt, include_tz: bool = True) -> str:
"""Format datetime object as human-readable string in configured timezone."""
from datetime import datetime, timezone as dt_timezone
from zoneinfo import ZoneInfo
if not dt:
return ""
try:
display_tz = app.config.get("DISPLAY_TIMEZONE", "UTC")
if display_tz and display_tz != "UTC":
try:
tz = ZoneInfo(display_tz)
if dt.tzinfo is None:
dt = dt.replace(tzinfo=dt_timezone.utc)
dt = dt.astimezone(tz)
except (KeyError, ValueError):
pass
tz_abbr = dt.strftime("%Z") or "UTC"
if include_tz:
return f"{dt.strftime('%b %d, %Y %H:%M')} ({tz_abbr})"
return dt.strftime("%b %d, %Y %H:%M")
except (ValueError, AttributeError):
return str(dt)
if include_api: if include_api:
from .s3_api import s3_api_bp from .s3_api import s3_api_bp
from .kms_api import kms_api_bp from .kms_api import kms_api_bp
@@ -254,9 +288,9 @@ def create_app(
return render_template("404.html"), 404 return render_template("404.html"), 404
return error return error
@app.get("/healthz") @app.get("/myfsio/health")
def healthcheck() -> Dict[str, str]: def healthcheck() -> Dict[str, str]:
return {"status": "ok", "version": app.config.get("APP_VERSION", "unknown")} return {"status": "ok"}
return app return app
@@ -332,6 +366,7 @@ def _configure_logging(app: Flask) -> None:
def _log_request_start() -> None: def _log_request_start() -> None:
g.request_id = uuid.uuid4().hex g.request_id = uuid.uuid4().hex
g.request_started_at = time.perf_counter() g.request_started_at = time.perf_counter()
g.request_bytes_in = request.content_length or 0
app.logger.info( app.logger.info(
"Request started", "Request started",
extra={"path": request.path, "method": request.method, "remote_addr": request.remote_addr}, extra={"path": request.path, "method": request.method, "remote_addr": request.remote_addr},
@@ -353,4 +388,21 @@ def _configure_logging(app: Flask) -> None:
}, },
) )
response.headers["X-Request-Duration-ms"] = f"{duration_ms:.2f}" response.headers["X-Request-Duration-ms"] = f"{duration_ms:.2f}"
operation_metrics = app.extensions.get("operation_metrics")
if operation_metrics:
bytes_in = getattr(g, "request_bytes_in", 0)
bytes_out = response.content_length or 0
error_code = getattr(g, "s3_error_code", None)
endpoint_type = classify_endpoint(request.path)
operation_metrics.record_request(
method=request.method,
endpoint_type=endpoint_type,
status_code=response.status_code,
latency_ms=duration_ms,
bytes_in=bytes_in,
bytes_out=bytes_out,
error_code=error_code,
)
return response return response

View File

@@ -72,13 +72,11 @@ def _evaluate_condition_operator(
return True return True
ACTION_ALIASES = { ACTION_ALIASES = {
# List actions
"s3:listbucket": "list", "s3:listbucket": "list",
"s3:listallmybuckets": "list", "s3:listallmybuckets": "list",
"s3:listbucketversions": "list", "s3:listbucketversions": "list",
"s3:listmultipartuploads": "list", "s3:listmultipartuploads": "list",
"s3:listparts": "list", "s3:listparts": "list",
# Read actions
"s3:getobject": "read", "s3:getobject": "read",
"s3:getobjectversion": "read", "s3:getobjectversion": "read",
"s3:getobjecttagging": "read", "s3:getobjecttagging": "read",
@@ -87,7 +85,6 @@ ACTION_ALIASES = {
"s3:getbucketversioning": "read", "s3:getbucketversioning": "read",
"s3:headobject": "read", "s3:headobject": "read",
"s3:headbucket": "read", "s3:headbucket": "read",
# Write actions
"s3:putobject": "write", "s3:putobject": "write",
"s3:createbucket": "write", "s3:createbucket": "write",
"s3:putobjecttagging": "write", "s3:putobjecttagging": "write",
@@ -97,26 +94,30 @@ ACTION_ALIASES = {
"s3:completemultipartupload": "write", "s3:completemultipartupload": "write",
"s3:abortmultipartupload": "write", "s3:abortmultipartupload": "write",
"s3:copyobject": "write", "s3:copyobject": "write",
# Delete actions
"s3:deleteobject": "delete", "s3:deleteobject": "delete",
"s3:deleteobjectversion": "delete", "s3:deleteobjectversion": "delete",
"s3:deletebucket": "delete", "s3:deletebucket": "delete",
"s3:deleteobjecttagging": "delete", "s3:deleteobjecttagging": "delete",
# Share actions (ACL)
"s3:putobjectacl": "share", "s3:putobjectacl": "share",
"s3:putbucketacl": "share", "s3:putbucketacl": "share",
"s3:getbucketacl": "share", "s3:getbucketacl": "share",
# Policy actions
"s3:putbucketpolicy": "policy", "s3:putbucketpolicy": "policy",
"s3:getbucketpolicy": "policy", "s3:getbucketpolicy": "policy",
"s3:deletebucketpolicy": "policy", "s3:deletebucketpolicy": "policy",
# Replication actions
"s3:getreplicationconfiguration": "replication", "s3:getreplicationconfiguration": "replication",
"s3:putreplicationconfiguration": "replication", "s3:putreplicationconfiguration": "replication",
"s3:deletereplicationconfiguration": "replication", "s3:deletereplicationconfiguration": "replication",
"s3:replicateobject": "replication", "s3:replicateobject": "replication",
"s3:replicatetags": "replication", "s3:replicatetags": "replication",
"s3:replicatedelete": "replication", "s3:replicatedelete": "replication",
"s3:getlifecycleconfiguration": "lifecycle",
"s3:putlifecycleconfiguration": "lifecycle",
"s3:deletelifecycleconfiguration": "lifecycle",
"s3:getbucketlifecycle": "lifecycle",
"s3:putbucketlifecycle": "lifecycle",
"s3:getbucketcors": "cors",
"s3:putbucketcors": "cors",
"s3:deletebucketcors": "cors",
} }

View File

@@ -1,6 +1,7 @@
from __future__ import annotations from __future__ import annotations
import os import os
import re
import secrets import secrets
import shutil import shutil
import sys import sys
@@ -9,6 +10,13 @@ from dataclasses import dataclass
from pathlib import Path from pathlib import Path
from typing import Any, Dict, Optional from typing import Any, Dict, Optional
def _validate_rate_limit(value: str) -> str:
pattern = r"^\d+\s+per\s+(second|minute|hour|day)$"
if not re.match(pattern, value):
raise ValueError(f"Invalid rate limit format: {value}. Expected format: '200 per minute'")
return value
if getattr(sys, "frozen", False): if getattr(sys, "frozen", False):
# Running in a PyInstaller bundle # Running in a PyInstaller bundle
PROJECT_ROOT = Path(sys._MEIPASS) PROJECT_ROOT = Path(sys._MEIPASS)
@@ -76,6 +84,12 @@ class AppConfig:
display_timezone: str display_timezone: str
lifecycle_enabled: bool lifecycle_enabled: bool
lifecycle_interval_seconds: int lifecycle_interval_seconds: int
metrics_history_enabled: bool
metrics_history_retention_hours: int
metrics_history_interval_minutes: int
operation_metrics_enabled: bool
operation_metrics_interval_minutes: int
operation_metrics_retention_hours: int
@classmethod @classmethod
def from_env(cls, overrides: Optional[Dict[str, Any]] = None) -> "AppConfig": def from_env(cls, overrides: Optional[Dict[str, Any]] = None) -> "AppConfig":
@@ -148,7 +162,7 @@ class AppConfig:
log_path = log_dir / str(_get("LOG_FILE", "app.log")) log_path = log_dir / str(_get("LOG_FILE", "app.log"))
log_max_bytes = int(_get("LOG_MAX_BYTES", 5 * 1024 * 1024)) log_max_bytes = int(_get("LOG_MAX_BYTES", 5 * 1024 * 1024))
log_backup_count = int(_get("LOG_BACKUP_COUNT", 3)) log_backup_count = int(_get("LOG_BACKUP_COUNT", 3))
ratelimit_default = str(_get("RATE_LIMIT_DEFAULT", "200 per minute")) ratelimit_default = _validate_rate_limit(str(_get("RATE_LIMIT_DEFAULT", "200 per minute")))
ratelimit_storage_uri = str(_get("RATE_LIMIT_STORAGE_URI", "memory://")) ratelimit_storage_uri = str(_get("RATE_LIMIT_STORAGE_URI", "memory://"))
def _csv(value: str, default: list[str]) -> list[str]: def _csv(value: str, default: list[str]) -> list[str]:
@@ -172,6 +186,12 @@ class AppConfig:
kms_keys_path = Path(_get("KMS_KEYS_PATH", encryption_keys_dir / "kms_keys.json")).resolve() kms_keys_path = Path(_get("KMS_KEYS_PATH", encryption_keys_dir / "kms_keys.json")).resolve()
default_encryption_algorithm = str(_get("DEFAULT_ENCRYPTION_ALGORITHM", "AES256")) default_encryption_algorithm = str(_get("DEFAULT_ENCRYPTION_ALGORITHM", "AES256"))
display_timezone = str(_get("DISPLAY_TIMEZONE", "UTC")) display_timezone = str(_get("DISPLAY_TIMEZONE", "UTC"))
metrics_history_enabled = str(_get("METRICS_HISTORY_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
metrics_history_retention_hours = int(_get("METRICS_HISTORY_RETENTION_HOURS", 24))
metrics_history_interval_minutes = int(_get("METRICS_HISTORY_INTERVAL_MINUTES", 5))
operation_metrics_enabled = str(_get("OPERATION_METRICS_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
operation_metrics_interval_minutes = int(_get("OPERATION_METRICS_INTERVAL_MINUTES", 5))
operation_metrics_retention_hours = int(_get("OPERATION_METRICS_RETENTION_HOURS", 24))
return cls(storage_root=storage_root, return cls(storage_root=storage_root,
max_upload_size=max_upload_size, max_upload_size=max_upload_size,
@@ -210,7 +230,13 @@ class AppConfig:
default_encryption_algorithm=default_encryption_algorithm, default_encryption_algorithm=default_encryption_algorithm,
display_timezone=display_timezone, display_timezone=display_timezone,
lifecycle_enabled=lifecycle_enabled, lifecycle_enabled=lifecycle_enabled,
lifecycle_interval_seconds=lifecycle_interval_seconds) lifecycle_interval_seconds=lifecycle_interval_seconds,
metrics_history_enabled=metrics_history_enabled,
metrics_history_retention_hours=metrics_history_retention_hours,
metrics_history_interval_minutes=metrics_history_interval_minutes,
operation_metrics_enabled=operation_metrics_enabled,
operation_metrics_interval_minutes=operation_metrics_interval_minutes,
operation_metrics_retention_hours=operation_metrics_retention_hours)
def validate_and_report(self) -> list[str]: def validate_and_report(self) -> list[str]:
"""Validate configuration and return a list of warnings/issues. """Validate configuration and return a list of warnings/issues.
@@ -339,4 +365,10 @@ class AppConfig:
"DISPLAY_TIMEZONE": self.display_timezone, "DISPLAY_TIMEZONE": self.display_timezone,
"LIFECYCLE_ENABLED": self.lifecycle_enabled, "LIFECYCLE_ENABLED": self.lifecycle_enabled,
"LIFECYCLE_INTERVAL_SECONDS": self.lifecycle_interval_seconds, "LIFECYCLE_INTERVAL_SECONDS": self.lifecycle_interval_seconds,
"METRICS_HISTORY_ENABLED": self.metrics_history_enabled,
"METRICS_HISTORY_RETENTION_HOURS": self.metrics_history_retention_hours,
"METRICS_HISTORY_INTERVAL_MINUTES": self.metrics_history_interval_minutes,
"OPERATION_METRICS_ENABLED": self.operation_metrics_enabled,
"OPERATION_METRICS_INTERVAL_MINUTES": self.operation_metrics_interval_minutes,
"OPERATION_METRICS_RETENTION_HOURS": self.operation_metrics_retention_hours,
} }

View File

@@ -1,5 +1,6 @@
from __future__ import annotations from __future__ import annotations
import hmac
import json import json
import math import math
import secrets import secrets
@@ -15,7 +16,7 @@ class IamError(RuntimeError):
"""Raised when authentication or authorization fails.""" """Raised when authentication or authorization fails."""
S3_ACTIONS = {"list", "read", "write", "delete", "share", "policy", "replication"} S3_ACTIONS = {"list", "read", "write", "delete", "share", "policy", "replication", "lifecycle", "cors"}
IAM_ACTIONS = { IAM_ACTIONS = {
"iam:list_users", "iam:list_users",
"iam:create_user", "iam:create_user",
@@ -71,6 +72,16 @@ ACTION_ALIASES = {
"s3:replicateobject": "replication", "s3:replicateobject": "replication",
"s3:replicatetags": "replication", "s3:replicatetags": "replication",
"s3:replicatedelete": "replication", "s3:replicatedelete": "replication",
"lifecycle": "lifecycle",
"s3:getlifecycleconfiguration": "lifecycle",
"s3:putlifecycleconfiguration": "lifecycle",
"s3:deletelifecycleconfiguration": "lifecycle",
"s3:getbucketlifecycle": "lifecycle",
"s3:putbucketlifecycle": "lifecycle",
"cors": "cors",
"s3:getbucketcors": "cors",
"s3:putbucketcors": "cors",
"s3:deletebucketcors": "cors",
"iam:listusers": "iam:list_users", "iam:listusers": "iam:list_users",
"iam:createuser": "iam:create_user", "iam:createuser": "iam:create_user",
"iam:deleteuser": "iam:delete_user", "iam:deleteuser": "iam:delete_user",
@@ -139,7 +150,7 @@ class IamService:
f"Access temporarily locked. Try again in {seconds} seconds." f"Access temporarily locked. Try again in {seconds} seconds."
) )
record = self._users.get(access_key) record = self._users.get(access_key)
if not record or record["secret_key"] != secret_key: if not record or not hmac.compare_digest(record["secret_key"], secret_key):
self._record_failed_attempt(access_key) self._record_failed_attempt(access_key)
raise IamError("Invalid credentials") raise IamError("Invalid credentials")
self._clear_failed_attempts(access_key) self._clear_failed_attempts(access_key)

271
app/operation_metrics.py Normal file
View File

@@ -0,0 +1,271 @@
from __future__ import annotations
import json
import logging
import threading
import time
from dataclasses import dataclass, field
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Dict, List, Optional
logger = logging.getLogger(__name__)
@dataclass
class OperationStats:
count: int = 0
success_count: int = 0
error_count: int = 0
latency_sum_ms: float = 0.0
latency_min_ms: float = float("inf")
latency_max_ms: float = 0.0
bytes_in: int = 0
bytes_out: int = 0
def record(self, latency_ms: float, success: bool, bytes_in: int = 0, bytes_out: int = 0) -> None:
self.count += 1
if success:
self.success_count += 1
else:
self.error_count += 1
self.latency_sum_ms += latency_ms
if latency_ms < self.latency_min_ms:
self.latency_min_ms = latency_ms
if latency_ms > self.latency_max_ms:
self.latency_max_ms = latency_ms
self.bytes_in += bytes_in
self.bytes_out += bytes_out
def to_dict(self) -> Dict[str, Any]:
avg_latency = self.latency_sum_ms / self.count if self.count > 0 else 0.0
min_latency = self.latency_min_ms if self.latency_min_ms != float("inf") else 0.0
return {
"count": self.count,
"success_count": self.success_count,
"error_count": self.error_count,
"latency_avg_ms": round(avg_latency, 2),
"latency_min_ms": round(min_latency, 2),
"latency_max_ms": round(self.latency_max_ms, 2),
"bytes_in": self.bytes_in,
"bytes_out": self.bytes_out,
}
def merge(self, other: "OperationStats") -> None:
self.count += other.count
self.success_count += other.success_count
self.error_count += other.error_count
self.latency_sum_ms += other.latency_sum_ms
if other.latency_min_ms < self.latency_min_ms:
self.latency_min_ms = other.latency_min_ms
if other.latency_max_ms > self.latency_max_ms:
self.latency_max_ms = other.latency_max_ms
self.bytes_in += other.bytes_in
self.bytes_out += other.bytes_out
@dataclass
class MetricsSnapshot:
timestamp: datetime
window_seconds: int
by_method: Dict[str, Dict[str, Any]]
by_endpoint: Dict[str, Dict[str, Any]]
by_status_class: Dict[str, int]
error_codes: Dict[str, int]
totals: Dict[str, Any]
def to_dict(self) -> Dict[str, Any]:
return {
"timestamp": self.timestamp.isoformat(),
"window_seconds": self.window_seconds,
"by_method": self.by_method,
"by_endpoint": self.by_endpoint,
"by_status_class": self.by_status_class,
"error_codes": self.error_codes,
"totals": self.totals,
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "MetricsSnapshot":
return cls(
timestamp=datetime.fromisoformat(data["timestamp"]),
window_seconds=data.get("window_seconds", 300),
by_method=data.get("by_method", {}),
by_endpoint=data.get("by_endpoint", {}),
by_status_class=data.get("by_status_class", {}),
error_codes=data.get("error_codes", {}),
totals=data.get("totals", {}),
)
class OperationMetricsCollector:
def __init__(
self,
storage_root: Path,
interval_minutes: int = 5,
retention_hours: int = 24,
):
self.storage_root = storage_root
self.interval_seconds = interval_minutes * 60
self.retention_hours = retention_hours
self._lock = threading.Lock()
self._by_method: Dict[str, OperationStats] = {}
self._by_endpoint: Dict[str, OperationStats] = {}
self._by_status_class: Dict[str, int] = {}
self._error_codes: Dict[str, int] = {}
self._totals = OperationStats()
self._window_start = time.time()
self._shutdown = threading.Event()
self._snapshots: List[MetricsSnapshot] = []
self._load_history()
self._snapshot_thread = threading.Thread(
target=self._snapshot_loop, name="operation-metrics-snapshot", daemon=True
)
self._snapshot_thread.start()
def _config_path(self) -> Path:
return self.storage_root / ".myfsio.sys" / "config" / "operation_metrics.json"
def _load_history(self) -> None:
config_path = self._config_path()
if not config_path.exists():
return
try:
data = json.loads(config_path.read_text(encoding="utf-8"))
snapshots_data = data.get("snapshots", [])
self._snapshots = [MetricsSnapshot.from_dict(s) for s in snapshots_data]
self._prune_old_snapshots()
except (json.JSONDecodeError, OSError, KeyError) as e:
logger.warning(f"Failed to load operation metrics history: {e}")
def _save_history(self) -> None:
config_path = self._config_path()
config_path.parent.mkdir(parents=True, exist_ok=True)
try:
data = {"snapshots": [s.to_dict() for s in self._snapshots]}
config_path.write_text(json.dumps(data, indent=2), encoding="utf-8")
except OSError as e:
logger.warning(f"Failed to save operation metrics history: {e}")
def _prune_old_snapshots(self) -> None:
if not self._snapshots:
return
cutoff = datetime.now(timezone.utc).timestamp() - (self.retention_hours * 3600)
self._snapshots = [
s for s in self._snapshots if s.timestamp.timestamp() > cutoff
]
def _snapshot_loop(self) -> None:
while not self._shutdown.is_set():
self._shutdown.wait(timeout=self.interval_seconds)
if not self._shutdown.is_set():
self._take_snapshot()
def _take_snapshot(self) -> None:
with self._lock:
now = datetime.now(timezone.utc)
window_seconds = int(time.time() - self._window_start)
snapshot = MetricsSnapshot(
timestamp=now,
window_seconds=window_seconds,
by_method={k: v.to_dict() for k, v in self._by_method.items()},
by_endpoint={k: v.to_dict() for k, v in self._by_endpoint.items()},
by_status_class=dict(self._by_status_class),
error_codes=dict(self._error_codes),
totals=self._totals.to_dict(),
)
self._snapshots.append(snapshot)
self._prune_old_snapshots()
self._save_history()
self._by_method.clear()
self._by_endpoint.clear()
self._by_status_class.clear()
self._error_codes.clear()
self._totals = OperationStats()
self._window_start = time.time()
def record_request(
self,
method: str,
endpoint_type: str,
status_code: int,
latency_ms: float,
bytes_in: int = 0,
bytes_out: int = 0,
error_code: Optional[str] = None,
) -> None:
success = 200 <= status_code < 400
status_class = f"{status_code // 100}xx"
with self._lock:
if method not in self._by_method:
self._by_method[method] = OperationStats()
self._by_method[method].record(latency_ms, success, bytes_in, bytes_out)
if endpoint_type not in self._by_endpoint:
self._by_endpoint[endpoint_type] = OperationStats()
self._by_endpoint[endpoint_type].record(latency_ms, success, bytes_in, bytes_out)
self._by_status_class[status_class] = self._by_status_class.get(status_class, 0) + 1
if error_code:
self._error_codes[error_code] = self._error_codes.get(error_code, 0) + 1
self._totals.record(latency_ms, success, bytes_in, bytes_out)
def get_current_stats(self) -> Dict[str, Any]:
with self._lock:
window_seconds = int(time.time() - self._window_start)
return {
"timestamp": datetime.now(timezone.utc).isoformat(),
"window_seconds": window_seconds,
"by_method": {k: v.to_dict() for k, v in self._by_method.items()},
"by_endpoint": {k: v.to_dict() for k, v in self._by_endpoint.items()},
"by_status_class": dict(self._by_status_class),
"error_codes": dict(self._error_codes),
"totals": self._totals.to_dict(),
}
def get_history(self, hours: Optional[int] = None) -> List[Dict[str, Any]]:
with self._lock:
snapshots = list(self._snapshots)
if hours:
cutoff = datetime.now(timezone.utc).timestamp() - (hours * 3600)
snapshots = [s for s in snapshots if s.timestamp.timestamp() > cutoff]
return [s.to_dict() for s in snapshots]
def shutdown(self) -> None:
self._shutdown.set()
self._take_snapshot()
self._snapshot_thread.join(timeout=5.0)
def classify_endpoint(path: str) -> str:
if not path or path == "/":
return "service"
path = path.rstrip("/")
if path.startswith("/ui"):
return "ui"
if path.startswith("/kms"):
return "kms"
if path.startswith("/myfsio"):
return "service"
parts = path.lstrip("/").split("/")
if len(parts) == 0:
return "service"
elif len(parts) == 1:
return "bucket"
else:
return "object"

View File

@@ -11,7 +11,8 @@ import uuid
from datetime import datetime, timedelta, timezone from datetime import datetime, timedelta, timezone
from typing import Any, Dict, Optional from typing import Any, Dict, Optional
from urllib.parse import quote, urlencode, urlparse, unquote from urllib.parse import quote, urlencode, urlparse, unquote
from xml.etree.ElementTree import Element, SubElement, tostring, fromstring, ParseError from xml.etree.ElementTree import Element, SubElement, tostring, ParseError
from defusedxml.ElementTree import fromstring
from flask import Blueprint, Response, current_app, jsonify, request, g from flask import Blueprint, Response, current_app, jsonify, request, g
from werkzeug.http import http_date from werkzeug.http import http_date
@@ -29,6 +30,8 @@ from .storage import ObjectStorage, StorageError, QuotaExceededError, BucketNotF
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
S3_NS = "http://s3.amazonaws.com/doc/2006-03-01/"
s3_api_bp = Blueprint("s3_api", __name__) s3_api_bp = Blueprint("s3_api", __name__)
def _storage() -> ObjectStorage: def _storage() -> ObjectStorage:
@@ -85,6 +88,7 @@ def _xml_response(element: Element, status: int = 200) -> Response:
def _error_response(code: str, message: str, status: int) -> Response: def _error_response(code: str, message: str, status: int) -> Response:
g.s3_error_code = code
error = Element("Error") error = Element("Error")
SubElement(error, "Code").text = code SubElement(error, "Code").text = code
SubElement(error, "Message").text = message SubElement(error, "Message").text = message
@@ -93,6 +97,13 @@ def _error_response(code: str, message: str, status: int) -> Response:
return _xml_response(error, status) return _xml_response(error, status)
def _require_xml_content_type() -> Response | None:
ct = request.headers.get("Content-Type", "")
if ct and not ct.startswith(("application/xml", "text/xml")):
return _error_response("InvalidRequest", "Content-Type must be application/xml or text/xml", 400)
return None
def _parse_range_header(range_header: str, file_size: int) -> list[tuple[int, int]] | None: def _parse_range_header(range_header: str, file_size: int) -> list[tuple[int, int]] | None:
if not range_header.startswith("bytes="): if not range_header.startswith("bytes="):
return None return None
@@ -232,16 +243,7 @@ def _verify_sigv4_header(req: Any, auth_header: str) -> Principal | None:
if not hmac.compare_digest(calculated_signature, signature): if not hmac.compare_digest(calculated_signature, signature):
if current_app.config.get("DEBUG_SIGV4"): if current_app.config.get("DEBUG_SIGV4"):
logger.warning( logger.warning("SigV4 signature mismatch for %s %s", method, req.path)
"SigV4 signature mismatch",
extra={
"path": req.path,
"method": method,
"signed_headers": signed_headers_str,
"content_type": req.headers.get("Content-Type"),
"content_length": req.headers.get("Content-Length"),
}
)
raise IamError("SignatureDoesNotMatch") raise IamError("SignatureDoesNotMatch")
session_token = req.headers.get("X-Amz-Security-Token") session_token = req.headers.get("X-Amz-Security-Token")
@@ -307,7 +309,7 @@ def _verify_sigv4_query(req: Any) -> Principal | None:
if header.lower() == 'expect' and val == "": if header.lower() == 'expect' and val == "":
val = "100-continue" val = "100-continue"
val = " ".join(val.split()) val = " ".join(val.split())
canonical_headers_parts.append(f"{header}:{val}\n") canonical_headers_parts.append(f"{header.lower()}:{val}\n")
canonical_headers = "".join(canonical_headers_parts) canonical_headers = "".join(canonical_headers_parts)
payload_hash = "UNSIGNED-PAYLOAD" payload_hash = "UNSIGNED-PAYLOAD"
@@ -589,6 +591,7 @@ def _generate_presigned_url(
bucket_name: str, bucket_name: str,
object_key: str, object_key: str,
expires_in: int, expires_in: int,
api_base_url: str | None = None,
) -> str: ) -> str:
region = current_app.config["AWS_REGION"] region = current_app.config["AWS_REGION"]
service = current_app.config["AWS_SERVICE"] service = current_app.config["AWS_SERVICE"]
@@ -609,7 +612,7 @@ def _generate_presigned_url(
} }
canonical_query = _encode_query_params(query_params) canonical_query = _encode_query_params(query_params)
api_base = current_app.config.get("API_BASE_URL") api_base = api_base_url or current_app.config.get("API_BASE_URL")
if api_base: if api_base:
parsed = urlparse(api_base) parsed = urlparse(api_base)
host = parsed.netloc host = parsed.netloc
@@ -661,11 +664,11 @@ def _strip_ns(tag: str | None) -> str:
def _find_element(parent: Element, name: str) -> Optional[Element]: def _find_element(parent: Element, name: str) -> Optional[Element]:
"""Find a child element by name, trying both namespaced and non-namespaced variants. """Find a child element by name, trying S3 namespace then no namespace.
This handles XML documents that may or may not include namespace prefixes. This handles XML documents that may or may not include namespace prefixes.
""" """
el = parent.find(f"{{*}}{name}") el = parent.find(f"{{{S3_NS}}}{name}")
if el is None: if el is None:
el = parent.find(name) el = parent.find(name)
return el return el
@@ -689,7 +692,7 @@ def _parse_tagging_document(payload: bytes) -> list[dict[str, str]]:
raise ValueError("Malformed XML") from exc raise ValueError("Malformed XML") from exc
if _strip_ns(root.tag) != "Tagging": if _strip_ns(root.tag) != "Tagging":
raise ValueError("Root element must be Tagging") raise ValueError("Root element must be Tagging")
tagset = root.find(".//{*}TagSet") tagset = root.find(".//{http://s3.amazonaws.com/doc/2006-03-01/}TagSet")
if tagset is None: if tagset is None:
tagset = root.find("TagSet") tagset = root.find("TagSet")
if tagset is None: if tagset is None:
@@ -857,13 +860,13 @@ def _parse_encryption_document(payload: bytes) -> dict[str, Any]:
bucket_key_el = child bucket_key_el = child
if default_el is None: if default_el is None:
continue continue
algo_el = default_el.find("{*}SSEAlgorithm") algo_el = default_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}SSEAlgorithm")
if algo_el is None: if algo_el is None:
algo_el = default_el.find("SSEAlgorithm") algo_el = default_el.find("SSEAlgorithm")
if algo_el is None or not (algo_el.text or "").strip(): if algo_el is None or not (algo_el.text or "").strip():
raise ValueError("SSEAlgorithm is required") raise ValueError("SSEAlgorithm is required")
rule: dict[str, Any] = {"SSEAlgorithm": algo_el.text.strip()} rule: dict[str, Any] = {"SSEAlgorithm": algo_el.text.strip()}
kms_el = default_el.find("{*}KMSMasterKeyID") kms_el = default_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}KMSMasterKeyID")
if kms_el is None: if kms_el is None:
kms_el = default_el.find("KMSMasterKeyID") kms_el = default_el.find("KMSMasterKeyID")
if kms_el is not None and kms_el.text: if kms_el is not None and kms_el.text:
@@ -939,6 +942,7 @@ def _maybe_handle_bucket_subresource(bucket_name: str) -> Response | None:
"notification": _bucket_notification_handler, "notification": _bucket_notification_handler,
"logging": _bucket_logging_handler, "logging": _bucket_logging_handler,
"uploads": _bucket_uploads_handler, "uploads": _bucket_uploads_handler,
"policy": _bucket_policy_handler,
} }
requested = [key for key in handlers if key in request.args] requested = [key for key in handlers if key in request.args]
if not requested: if not requested:
@@ -966,6 +970,9 @@ def _bucket_versioning_handler(bucket_name: str) -> Response:
storage = _storage() storage = _storage()
if request.method == "PUT": if request.method == "PUT":
ct_error = _require_xml_content_type()
if ct_error:
return ct_error
payload = request.get_data(cache=False) or b"" payload = request.get_data(cache=False) or b""
if not payload.strip(): if not payload.strip():
return _error_response("MalformedXML", "Request body is required", 400) return _error_response("MalformedXML", "Request body is required", 400)
@@ -975,7 +982,7 @@ def _bucket_versioning_handler(bucket_name: str) -> Response:
return _error_response("MalformedXML", "Unable to parse XML document", 400) return _error_response("MalformedXML", "Unable to parse XML document", 400)
if _strip_ns(root.tag) != "VersioningConfiguration": if _strip_ns(root.tag) != "VersioningConfiguration":
return _error_response("MalformedXML", "Root element must be VersioningConfiguration", 400) return _error_response("MalformedXML", "Root element must be VersioningConfiguration", 400)
status_el = root.find("{*}Status") status_el = root.find("{http://s3.amazonaws.com/doc/2006-03-01/}Status")
if status_el is None: if status_el is None:
status_el = root.find("Status") status_el = root.find("Status")
status = (status_el.text or "").strip() if status_el is not None else "" status = (status_el.text or "").strip() if status_el is not None else ""
@@ -1024,6 +1031,9 @@ def _bucket_tagging_handler(bucket_name: str) -> Response:
current_app.logger.info("Bucket tags deleted", extra={"bucket": bucket_name}) current_app.logger.info("Bucket tags deleted", extra={"bucket": bucket_name})
return Response(status=204) return Response(status=204)
ct_error = _require_xml_content_type()
if ct_error:
return ct_error
payload = request.get_data(cache=False) or b"" payload = request.get_data(cache=False) or b""
try: try:
tags = _parse_tagging_document(payload) tags = _parse_tagging_document(payload)
@@ -1079,6 +1089,9 @@ def _object_tagging_handler(bucket_name: str, object_key: str) -> Response:
current_app.logger.info("Object tags deleted", extra={"bucket": bucket_name, "key": object_key}) current_app.logger.info("Object tags deleted", extra={"bucket": bucket_name, "key": object_key})
return Response(status=204) return Response(status=204)
ct_error = _require_xml_content_type()
if ct_error:
return ct_error
payload = request.get_data(cache=False) or b"" payload = request.get_data(cache=False) or b""
try: try:
tags = _parse_tagging_document(payload) tags = _parse_tagging_document(payload)
@@ -1148,6 +1161,9 @@ def _bucket_cors_handler(bucket_name: str) -> Response:
current_app.logger.info("Bucket CORS deleted", extra={"bucket": bucket_name}) current_app.logger.info("Bucket CORS deleted", extra={"bucket": bucket_name})
return Response(status=204) return Response(status=204)
ct_error = _require_xml_content_type()
if ct_error:
return ct_error
payload = request.get_data(cache=False) or b"" payload = request.get_data(cache=False) or b""
if not payload.strip(): if not payload.strip():
try: try:
@@ -1194,6 +1210,9 @@ def _bucket_encryption_handler(bucket_name: str) -> Response:
404, 404,
) )
return _xml_response(_render_encryption_document(config)) return _xml_response(_render_encryption_document(config))
ct_error = _require_xml_content_type()
if ct_error:
return ct_error
payload = request.get_data(cache=False) or b"" payload = request.get_data(cache=False) or b""
if not payload.strip(): if not payload.strip():
try: try:
@@ -1366,7 +1385,7 @@ def _bucket_list_versions_handler(bucket_name: str) -> Response:
SubElement(ver_elem, "Key").text = obj.key SubElement(ver_elem, "Key").text = obj.key
SubElement(ver_elem, "VersionId").text = v.get("version_id", "unknown") SubElement(ver_elem, "VersionId").text = v.get("version_id", "unknown")
SubElement(ver_elem, "IsLatest").text = "false" SubElement(ver_elem, "IsLatest").text = "false"
SubElement(ver_elem, "LastModified").text = v.get("archived_at", "") SubElement(ver_elem, "LastModified").text = v.get("archived_at") or "1970-01-01T00:00:00Z"
SubElement(ver_elem, "ETag").text = f'"{v.get("etag", "")}"' SubElement(ver_elem, "ETag").text = f'"{v.get("etag", "")}"'
SubElement(ver_elem, "Size").text = str(v.get("size", 0)) SubElement(ver_elem, "Size").text = str(v.get("size", 0))
SubElement(ver_elem, "StorageClass").text = "STANDARD" SubElement(ver_elem, "StorageClass").text = "STANDARD"
@@ -1415,6 +1434,9 @@ def _bucket_lifecycle_handler(bucket_name: str) -> Response:
current_app.logger.info("Bucket lifecycle deleted", extra={"bucket": bucket_name}) current_app.logger.info("Bucket lifecycle deleted", extra={"bucket": bucket_name})
return Response(status=204) return Response(status=204)
ct_error = _require_xml_content_type()
if ct_error:
return ct_error
payload = request.get_data(cache=False) or b"" payload = request.get_data(cache=False) or b""
if not payload.strip(): if not payload.strip():
return _error_response("MalformedXML", "Request body is required", 400) return _error_response("MalformedXML", "Request body is required", 400)
@@ -1479,49 +1501,49 @@ def _parse_lifecycle_config(payload: bytes) -> list:
raise ValueError("Root element must be LifecycleConfiguration") raise ValueError("Root element must be LifecycleConfiguration")
rules = [] rules = []
for rule_el in root.findall("{*}Rule") or root.findall("Rule"): for rule_el in root.findall("{http://s3.amazonaws.com/doc/2006-03-01/}Rule") or root.findall("Rule"):
rule: dict = {} rule: dict = {}
id_el = rule_el.find("{*}ID") or rule_el.find("ID") id_el = rule_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}ID") or rule_el.find("ID")
if id_el is not None and id_el.text: if id_el is not None and id_el.text:
rule["ID"] = id_el.text.strip() rule["ID"] = id_el.text.strip()
filter_el = rule_el.find("{*}Filter") or rule_el.find("Filter") filter_el = rule_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}Filter") or rule_el.find("Filter")
if filter_el is not None: if filter_el is not None:
prefix_el = filter_el.find("{*}Prefix") or filter_el.find("Prefix") prefix_el = filter_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}Prefix") or filter_el.find("Prefix")
if prefix_el is not None and prefix_el.text: if prefix_el is not None and prefix_el.text:
rule["Prefix"] = prefix_el.text rule["Prefix"] = prefix_el.text
if "Prefix" not in rule: if "Prefix" not in rule:
prefix_el = rule_el.find("{*}Prefix") or rule_el.find("Prefix") prefix_el = rule_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}Prefix") or rule_el.find("Prefix")
if prefix_el is not None: if prefix_el is not None:
rule["Prefix"] = prefix_el.text or "" rule["Prefix"] = prefix_el.text or ""
status_el = rule_el.find("{*}Status") or rule_el.find("Status") status_el = rule_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}Status") or rule_el.find("Status")
rule["Status"] = (status_el.text or "Enabled").strip() if status_el is not None else "Enabled" rule["Status"] = (status_el.text or "Enabled").strip() if status_el is not None else "Enabled"
exp_el = rule_el.find("{*}Expiration") or rule_el.find("Expiration") exp_el = rule_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}Expiration") or rule_el.find("Expiration")
if exp_el is not None: if exp_el is not None:
expiration: dict = {} expiration: dict = {}
days_el = exp_el.find("{*}Days") or exp_el.find("Days") days_el = exp_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}Days") or exp_el.find("Days")
if days_el is not None and days_el.text: if days_el is not None and days_el.text:
days_val = int(days_el.text.strip()) days_val = int(days_el.text.strip())
if days_val <= 0: if days_val <= 0:
raise ValueError("Expiration Days must be a positive integer") raise ValueError("Expiration Days must be a positive integer")
expiration["Days"] = days_val expiration["Days"] = days_val
date_el = exp_el.find("{*}Date") or exp_el.find("Date") date_el = exp_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}Date") or exp_el.find("Date")
if date_el is not None and date_el.text: if date_el is not None and date_el.text:
expiration["Date"] = date_el.text.strip() expiration["Date"] = date_el.text.strip()
eodm_el = exp_el.find("{*}ExpiredObjectDeleteMarker") or exp_el.find("ExpiredObjectDeleteMarker") eodm_el = exp_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}ExpiredObjectDeleteMarker") or exp_el.find("ExpiredObjectDeleteMarker")
if eodm_el is not None and (eodm_el.text or "").strip().lower() in {"true", "1"}: if eodm_el is not None and (eodm_el.text or "").strip().lower() in {"true", "1"}:
expiration["ExpiredObjectDeleteMarker"] = True expiration["ExpiredObjectDeleteMarker"] = True
if expiration: if expiration:
rule["Expiration"] = expiration rule["Expiration"] = expiration
nve_el = rule_el.find("{*}NoncurrentVersionExpiration") or rule_el.find("NoncurrentVersionExpiration") nve_el = rule_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}NoncurrentVersionExpiration") or rule_el.find("NoncurrentVersionExpiration")
if nve_el is not None: if nve_el is not None:
nve: dict = {} nve: dict = {}
days_el = nve_el.find("{*}NoncurrentDays") or nve_el.find("NoncurrentDays") days_el = nve_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}NoncurrentDays") or nve_el.find("NoncurrentDays")
if days_el is not None and days_el.text: if days_el is not None and days_el.text:
noncurrent_days = int(days_el.text.strip()) noncurrent_days = int(days_el.text.strip())
if noncurrent_days <= 0: if noncurrent_days <= 0:
@@ -1530,10 +1552,10 @@ def _parse_lifecycle_config(payload: bytes) -> list:
if nve: if nve:
rule["NoncurrentVersionExpiration"] = nve rule["NoncurrentVersionExpiration"] = nve
aimu_el = rule_el.find("{*}AbortIncompleteMultipartUpload") or rule_el.find("AbortIncompleteMultipartUpload") aimu_el = rule_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}AbortIncompleteMultipartUpload") or rule_el.find("AbortIncompleteMultipartUpload")
if aimu_el is not None: if aimu_el is not None:
aimu: dict = {} aimu: dict = {}
days_el = aimu_el.find("{*}DaysAfterInitiation") or aimu_el.find("DaysAfterInitiation") days_el = aimu_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}DaysAfterInitiation") or aimu_el.find("DaysAfterInitiation")
if days_el is not None and days_el.text: if days_el is not None and days_el.text:
days_after = int(days_el.text.strip()) days_after = int(days_el.text.strip())
if days_after <= 0: if days_after <= 0:
@@ -1649,6 +1671,9 @@ def _bucket_object_lock_handler(bucket_name: str) -> Response:
SubElement(root, "ObjectLockEnabled").text = "Enabled" if config.enabled else "Disabled" SubElement(root, "ObjectLockEnabled").text = "Enabled" if config.enabled else "Disabled"
return _xml_response(root) return _xml_response(root)
ct_error = _require_xml_content_type()
if ct_error:
return ct_error
payload = request.get_data(cache=False) or b"" payload = request.get_data(cache=False) or b""
if not payload.strip(): if not payload.strip():
return _error_response("MalformedXML", "Request body is required", 400) return _error_response("MalformedXML", "Request body is required", 400)
@@ -1658,7 +1683,7 @@ def _bucket_object_lock_handler(bucket_name: str) -> Response:
except ParseError: except ParseError:
return _error_response("MalformedXML", "Unable to parse XML document", 400) return _error_response("MalformedXML", "Unable to parse XML document", 400)
enabled_el = root.find("{*}ObjectLockEnabled") or root.find("ObjectLockEnabled") enabled_el = root.find("{http://s3.amazonaws.com/doc/2006-03-01/}ObjectLockEnabled") or root.find("ObjectLockEnabled")
enabled = (enabled_el.text or "").strip() == "Enabled" if enabled_el is not None else False enabled = (enabled_el.text or "").strip() == "Enabled" if enabled_el is not None else False
config = ObjectLockConfig(enabled=enabled) config = ObjectLockConfig(enabled=enabled)
@@ -1714,6 +1739,9 @@ def _bucket_notification_handler(bucket_name: str) -> Response:
current_app.logger.info("Bucket notifications deleted", extra={"bucket": bucket_name}) current_app.logger.info("Bucket notifications deleted", extra={"bucket": bucket_name})
return Response(status=204) return Response(status=204)
ct_error = _require_xml_content_type()
if ct_error:
return ct_error
payload = request.get_data(cache=False) or b"" payload = request.get_data(cache=False) or b""
if not payload.strip(): if not payload.strip():
notification_service.delete_bucket_notifications(bucket_name) notification_service.delete_bucket_notifications(bucket_name)
@@ -1725,9 +1753,9 @@ def _bucket_notification_handler(bucket_name: str) -> Response:
return _error_response("MalformedXML", "Unable to parse XML document", 400) return _error_response("MalformedXML", "Unable to parse XML document", 400)
configs: list[NotificationConfiguration] = [] configs: list[NotificationConfiguration] = []
for webhook_el in root.findall("{*}WebhookConfiguration") or root.findall("WebhookConfiguration"): for webhook_el in root.findall("{http://s3.amazonaws.com/doc/2006-03-01/}WebhookConfiguration") or root.findall("WebhookConfiguration"):
config_id = _find_element_text(webhook_el, "Id") or uuid.uuid4().hex config_id = _find_element_text(webhook_el, "Id") or uuid.uuid4().hex
events = [el.text for el in webhook_el.findall("{*}Event") or webhook_el.findall("Event") if el.text] events = [el.text for el in webhook_el.findall("{http://s3.amazonaws.com/doc/2006-03-01/}Event") or webhook_el.findall("Event") if el.text]
dest_el = _find_element(webhook_el, "Destination") dest_el = _find_element(webhook_el, "Destination")
url = _find_element_text(dest_el, "Url") if dest_el else "" url = _find_element_text(dest_el, "Url") if dest_el else ""
@@ -1740,7 +1768,7 @@ def _bucket_notification_handler(bucket_name: str) -> Response:
if filter_el: if filter_el:
key_el = _find_element(filter_el, "S3Key") key_el = _find_element(filter_el, "S3Key")
if key_el: if key_el:
for rule_el in key_el.findall("{*}FilterRule") or key_el.findall("FilterRule"): for rule_el in key_el.findall("{http://s3.amazonaws.com/doc/2006-03-01/}FilterRule") or key_el.findall("FilterRule"):
name = _find_element_text(rule_el, "Name") name = _find_element_text(rule_el, "Name")
value = _find_element_text(rule_el, "Value") value = _find_element_text(rule_el, "Value")
if name == "prefix": if name == "prefix":
@@ -1793,6 +1821,9 @@ def _bucket_logging_handler(bucket_name: str) -> Response:
current_app.logger.info("Bucket logging deleted", extra={"bucket": bucket_name}) current_app.logger.info("Bucket logging deleted", extra={"bucket": bucket_name})
return Response(status=204) return Response(status=204)
ct_error = _require_xml_content_type()
if ct_error:
return ct_error
payload = request.get_data(cache=False) or b"" payload = request.get_data(cache=False) or b""
if not payload.strip(): if not payload.strip():
logging_service.delete_bucket_logging(bucket_name) logging_service.delete_bucket_logging(bucket_name)
@@ -1930,6 +1961,9 @@ def _object_retention_handler(bucket_name: str, object_key: str) -> Response:
SubElement(root, "RetainUntilDate").text = retention.retain_until_date.strftime("%Y-%m-%dT%H:%M:%S.000Z") SubElement(root, "RetainUntilDate").text = retention.retain_until_date.strftime("%Y-%m-%dT%H:%M:%S.000Z")
return _xml_response(root) return _xml_response(root)
ct_error = _require_xml_content_type()
if ct_error:
return ct_error
payload = request.get_data(cache=False) or b"" payload = request.get_data(cache=False) or b""
if not payload.strip(): if not payload.strip():
return _error_response("MalformedXML", "Request body is required", 400) return _error_response("MalformedXML", "Request body is required", 400)
@@ -1999,6 +2033,9 @@ def _object_legal_hold_handler(bucket_name: str, object_key: str) -> Response:
SubElement(root, "Status").text = "ON" if enabled else "OFF" SubElement(root, "Status").text = "ON" if enabled else "OFF"
return _xml_response(root) return _xml_response(root)
ct_error = _require_xml_content_type()
if ct_error:
return ct_error
payload = request.get_data(cache=False) or b"" payload = request.get_data(cache=False) or b""
if not payload.strip(): if not payload.strip():
return _error_response("MalformedXML", "Request body is required", 400) return _error_response("MalformedXML", "Request body is required", 400)
@@ -2030,6 +2067,9 @@ def _bulk_delete_handler(bucket_name: str) -> Response:
except IamError as exc: except IamError as exc:
return _error_response("AccessDenied", str(exc), 403) return _error_response("AccessDenied", str(exc), 403)
ct_error = _require_xml_content_type()
if ct_error:
return ct_error
payload = request.get_data(cache=False) or b"" payload = request.get_data(cache=False) or b""
if not payload.strip(): if not payload.strip():
return _error_response("MalformedXML", "Request body must include a Delete specification", 400) return _error_response("MalformedXML", "Request body must include a Delete specification", 400)
@@ -2605,9 +2645,9 @@ def _list_parts(bucket_name: str, object_key: str) -> Response:
return _xml_response(root) return _xml_response(root)
@s3_api_bp.route("/bucket-policy/<bucket_name>", methods=["GET", "PUT", "DELETE"]) def _bucket_policy_handler(bucket_name: str) -> Response:
@limiter.limit("30 per minute") if request.method not in {"GET", "PUT", "DELETE"}:
def bucket_policy_handler(bucket_name: str) -> Response: return _method_not_allowed(["GET", "PUT", "DELETE"])
principal, error = _require_principal() principal, error = _require_principal()
if error: if error:
return error return error
@@ -2639,51 +2679,6 @@ def bucket_policy_handler(bucket_name: str) -> Response:
return Response(status=204) return Response(status=204)
@s3_api_bp.post("/presign/<bucket_name>/<path:object_key>")
@limiter.limit("45 per minute")
def presign_object(bucket_name: str, object_key: str):
payload = request.get_json(silent=True) or {}
method = str(payload.get("method", "GET")).upper()
allowed_methods = {"GET", "PUT", "DELETE"}
if method not in allowed_methods:
return _error_response("InvalidRequest", "Method must be GET, PUT, or DELETE", 400)
try:
expires = int(payload.get("expires_in", 900))
except (TypeError, ValueError):
return _error_response("InvalidRequest", "expires_in must be an integer", 400)
expires = max(1, min(expires, 7 * 24 * 3600))
action = "read" if method == "GET" else ("delete" if method == "DELETE" else "write")
principal, error = _require_principal()
if error:
return error
try:
_authorize_action(principal, bucket_name, action, object_key=object_key)
except IamError as exc:
return _error_response("AccessDenied", str(exc), 403)
storage = _storage()
if not storage.bucket_exists(bucket_name):
return _error_response("NoSuchBucket", "Bucket does not exist", 404)
if action != "write":
try:
storage.get_object_path(bucket_name, object_key)
except StorageError:
return _error_response("NoSuchKey", "Object not found", 404)
secret = _iam().secret_for_key(principal.access_key)
url = _generate_presigned_url(
principal=principal,
secret_key=secret,
method=method,
bucket_name=bucket_name,
object_key=object_key,
expires_in=expires,
)
current_app.logger.info(
"Presigned URL generated",
extra={"bucket": bucket_name, "key": object_key, "method": method},
)
return jsonify({"url": url, "method": method, "expires_in": expires})
@s3_api_bp.route("/<bucket_name>", methods=["HEAD"]) @s3_api_bp.route("/<bucket_name>", methods=["HEAD"])
@limiter.limit("100 per minute") @limiter.limit("100 per minute")
def head_bucket(bucket_name: str) -> Response: def head_bucket(bucket_name: str) -> Response:
@@ -3003,6 +2998,9 @@ def _complete_multipart_upload(bucket_name: str, object_key: str) -> Response:
if not upload_id: if not upload_id:
return _error_response("InvalidArgument", "uploadId is required", 400) return _error_response("InvalidArgument", "uploadId is required", 400)
ct_error = _require_xml_content_type()
if ct_error:
return ct_error
payload = request.get_data(cache=False) or b"" payload = request.get_data(cache=False) or b""
try: try:
root = fromstring(payload) root = fromstring(payload)
@@ -3016,11 +3014,11 @@ def _complete_multipart_upload(bucket_name: str, object_key: str) -> Response:
for part_el in list(root): for part_el in list(root):
if _strip_ns(part_el.tag) != "Part": if _strip_ns(part_el.tag) != "Part":
continue continue
part_number_el = part_el.find("{*}PartNumber") part_number_el = part_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}PartNumber")
if part_number_el is None: if part_number_el is None:
part_number_el = part_el.find("PartNumber") part_number_el = part_el.find("PartNumber")
etag_el = part_el.find("{*}ETag") etag_el = part_el.find("{http://s3.amazonaws.com/doc/2006-03-01/}ETag")
if etag_el is None: if etag_el is None:
etag_el = part_el.find("ETag") etag_el = part_el.find("ETag")

View File

@@ -774,7 +774,7 @@ class ObjectStorage:
continue continue
payload.setdefault("version_id", meta_file.stem) payload.setdefault("version_id", meta_file.stem)
versions.append(payload) versions.append(payload)
versions.sort(key=lambda item: item.get("archived_at", ""), reverse=True) versions.sort(key=lambda item: item.get("archived_at") or "1970-01-01T00:00:00Z", reverse=True)
return versions return versions
def restore_object_version(self, bucket_name: str, object_key: str, version_id: str) -> ObjectMeta: def restore_object_version(self, bucket_name: str, object_key: str, version_id: str) -> ObjectMeta:
@@ -866,7 +866,7 @@ class ObjectStorage:
except (OSError, json.JSONDecodeError): except (OSError, json.JSONDecodeError):
payload = {} payload = {}
version_id = payload.get("version_id") or meta_file.stem version_id = payload.get("version_id") or meta_file.stem
archived_at = payload.get("archived_at") or "" archived_at = payload.get("archived_at") or "1970-01-01T00:00:00Z"
size = int(payload.get("size") or 0) size = int(payload.get("size") or 0)
reason = payload.get("reason") or "update" reason = payload.get("reason") or "update"
record = aggregated.setdefault( record = aggregated.setdefault(
@@ -1773,11 +1773,9 @@ class ObjectStorage:
raise StorageError("Object key contains null bytes") raise StorageError("Object key contains null bytes")
if object_key.startswith(("/", "\\")): if object_key.startswith(("/", "\\")):
raise StorageError("Object key cannot start with a slash") raise StorageError("Object key cannot start with a slash")
normalized = unicodedata.normalize("NFC", object_key) object_key = unicodedata.normalize("NFC", object_key)
if normalized != object_key:
raise StorageError("Object key must use normalized Unicode")
candidate = Path(normalized) candidate = Path(object_key)
if ".." in candidate.parts: if ".." in candidate.parts:
raise StorageError("Object key contains parent directory references") raise StorageError("Object key contains parent directory references")

350
app/ui.py
View File

@@ -5,8 +5,11 @@ import json
import uuid import uuid
import psutil import psutil
import shutil import shutil
from datetime import datetime, timezone as dt_timezone
from pathlib import Path
from typing import Any from typing import Any
from urllib.parse import quote, urlparse from urllib.parse import quote, urlparse
from zoneinfo import ZoneInfo
import boto3 import boto3
import requests import requests
@@ -33,12 +36,56 @@ from .extensions import limiter, csrf
from .iam import IamError from .iam import IamError
from .kms import KMSManager from .kms import KMSManager
from .replication import ReplicationManager, ReplicationRule from .replication import ReplicationManager, ReplicationRule
from .s3_api import _generate_presigned_url
from .secret_store import EphemeralSecretStore from .secret_store import EphemeralSecretStore
from .storage import ObjectStorage, StorageError from .storage import ObjectStorage, StorageError
ui_bp = Blueprint("ui", __name__, template_folder="../templates", url_prefix="/ui") ui_bp = Blueprint("ui", __name__, template_folder="../templates", url_prefix="/ui")
def _convert_to_display_tz(dt: datetime, display_tz: str | None = None) -> datetime:
"""Convert a datetime to the configured display timezone.
Args:
dt: The datetime to convert
display_tz: Optional timezone string. If not provided, reads from current_app.config.
"""
if display_tz is None:
display_tz = current_app.config.get("DISPLAY_TIMEZONE", "UTC")
if display_tz and display_tz != "UTC":
try:
tz = ZoneInfo(display_tz)
if dt.tzinfo is None:
dt = dt.replace(tzinfo=dt_timezone.utc)
dt = dt.astimezone(tz)
except (KeyError, ValueError):
pass
return dt
def _format_datetime_display(dt: datetime, display_tz: str | None = None) -> str:
"""Format a datetime for display using the configured timezone.
Args:
dt: The datetime to format
display_tz: Optional timezone string. If not provided, reads from current_app.config.
"""
dt = _convert_to_display_tz(dt, display_tz)
tz_abbr = dt.strftime("%Z") or "UTC"
return f"{dt.strftime('%b %d, %Y %H:%M')} ({tz_abbr})"
def _format_datetime_iso(dt: datetime, display_tz: str | None = None) -> str:
"""Format a datetime as ISO format using the configured timezone.
Args:
dt: The datetime to format
display_tz: Optional timezone string. If not provided, reads from current_app.config.
"""
dt = _convert_to_display_tz(dt, display_tz)
return dt.isoformat()
def _storage() -> ObjectStorage: def _storage() -> ObjectStorage:
return current_app.extensions["object_storage"] return current_app.extensions["object_storage"]
@@ -94,6 +141,10 @@ def _acl() -> AclService:
return current_app.extensions["acl"] return current_app.extensions["acl"]
def _operation_metrics():
return current_app.extensions.get("operation_metrics")
def _format_bytes(num: int) -> str: def _format_bytes(num: int) -> str:
step = 1024 step = 1024
units = ["B", "KB", "MB", "GB", "TB", "PB"] units = ["B", "KB", "MB", "GB", "TB", "PB"]
@@ -107,6 +158,69 @@ def _format_bytes(num: int) -> str:
return f"{value:.1f} PB" return f"{value:.1f} PB"
_metrics_last_save_time: float = 0.0
def _get_metrics_history_path() -> Path:
storage_root = Path(current_app.config["STORAGE_ROOT"])
return storage_root / ".myfsio.sys" / "config" / "metrics_history.json"
def _load_metrics_history() -> dict:
path = _get_metrics_history_path()
if not path.exists():
return {"history": []}
try:
return json.loads(path.read_text(encoding="utf-8"))
except (json.JSONDecodeError, OSError):
return {"history": []}
def _save_metrics_snapshot(cpu_percent: float, memory_percent: float, disk_percent: float, storage_bytes: int) -> None:
global _metrics_last_save_time
if not current_app.config.get("METRICS_HISTORY_ENABLED", False):
return
import time
from datetime import datetime, timezone
interval_minutes = current_app.config.get("METRICS_HISTORY_INTERVAL_MINUTES", 5)
now_ts = time.time()
if now_ts - _metrics_last_save_time < interval_minutes * 60:
return
path = _get_metrics_history_path()
path.parent.mkdir(parents=True, exist_ok=True)
data = _load_metrics_history()
history = data.get("history", [])
retention_hours = current_app.config.get("METRICS_HISTORY_RETENTION_HOURS", 24)
now = datetime.now(timezone.utc)
snapshot = {
"timestamp": now.strftime("%Y-%m-%dT%H:%M:%SZ"),
"cpu_percent": round(cpu_percent, 2),
"memory_percent": round(memory_percent, 2),
"disk_percent": round(disk_percent, 2),
"storage_bytes": storage_bytes,
}
history.append(snapshot)
cutoff = now.timestamp() - (retention_hours * 3600)
history = [
h for h in history
if datetime.fromisoformat(h["timestamp"].replace("Z", "+00:00")).timestamp() > cutoff
]
data["history"] = history
try:
path.write_text(json.dumps(data, indent=2), encoding="utf-8")
_metrics_last_save_time = now_ts
except OSError:
pass
def _friendly_error_message(exc: Exception) -> str: def _friendly_error_message(exc: Exception) -> str:
message = str(exc) or "An unexpected error occurred" message = str(exc) or "An unexpected error occurred"
if isinstance(exc, IamError): if isinstance(exc, IamError):
@@ -365,6 +479,23 @@ def bucket_detail(bucket_name: str):
can_edit_policy = True can_edit_policy = True
except IamError: except IamError:
can_edit_policy = False can_edit_policy = False
can_manage_lifecycle = False
if principal:
try:
_iam().authorize(principal, bucket_name, "lifecycle")
can_manage_lifecycle = True
except IamError:
can_manage_lifecycle = False
can_manage_cors = False
if principal:
try:
_iam().authorize(principal, bucket_name, "cors")
can_manage_cors = True
except IamError:
can_manage_cors = False
try: try:
versioning_enabled = storage.is_versioning_enabled(bucket_name) versioning_enabled = storage.is_versioning_enabled(bucket_name)
except StorageError: except StorageError:
@@ -436,6 +567,8 @@ def bucket_detail(bucket_name: str):
bucket_policy_text=policy_text, bucket_policy_text=policy_text,
bucket_policy=bucket_policy, bucket_policy=bucket_policy,
can_edit_policy=can_edit_policy, can_edit_policy=can_edit_policy,
can_manage_lifecycle=can_manage_lifecycle,
can_manage_cors=can_manage_cors,
can_manage_versioning=can_manage_versioning, can_manage_versioning=can_manage_versioning,
can_manage_replication=can_manage_replication, can_manage_replication=can_manage_replication,
can_manage_encryption=can_manage_encryption, can_manage_encryption=can_manage_encryption,
@@ -492,6 +625,7 @@ def list_bucket_objects(bucket_name: str):
tags_template = url_for("ui.object_tags", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER") tags_template = url_for("ui.object_tags", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
copy_template = url_for("ui.copy_object", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER") copy_template = url_for("ui.copy_object", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
move_template = url_for("ui.move_object", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER") move_template = url_for("ui.move_object", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
metadata_template = url_for("ui.object_metadata", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
objects_data = [] objects_data = []
for obj in result.objects: for obj in result.objects:
@@ -499,7 +633,8 @@ def list_bucket_objects(bucket_name: str):
"key": obj.key, "key": obj.key,
"size": obj.size, "size": obj.size,
"last_modified": obj.last_modified.isoformat(), "last_modified": obj.last_modified.isoformat(),
"last_modified_display": obj.last_modified.strftime("%b %d, %Y %H:%M"), "last_modified_display": _format_datetime_display(obj.last_modified),
"last_modified_iso": _format_datetime_iso(obj.last_modified),
"etag": obj.etag, "etag": obj.etag,
}) })
@@ -519,6 +654,7 @@ def list_bucket_objects(bucket_name: str):
"tags": tags_template, "tags": tags_template,
"copy": copy_template, "copy": copy_template,
"move": move_template, "move": move_template,
"metadata": metadata_template,
}, },
}) })
@@ -552,6 +688,8 @@ def stream_bucket_objects(bucket_name: str):
tags_template = url_for("ui.object_tags", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER") tags_template = url_for("ui.object_tags", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
copy_template = url_for("ui.copy_object", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER") copy_template = url_for("ui.copy_object", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
move_template = url_for("ui.move_object", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER") move_template = url_for("ui.move_object", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
metadata_template = url_for("ui.object_metadata", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
display_tz = current_app.config.get("DISPLAY_TIMEZONE", "UTC")
def generate(): def generate():
meta_line = json.dumps({ meta_line = json.dumps({
@@ -567,6 +705,7 @@ def stream_bucket_objects(bucket_name: str):
"tags": tags_template, "tags": tags_template,
"copy": copy_template, "copy": copy_template,
"move": move_template, "move": move_template,
"metadata": metadata_template,
}, },
}) + "\n" }) + "\n"
yield meta_line yield meta_line
@@ -597,7 +736,8 @@ def stream_bucket_objects(bucket_name: str):
"key": obj.key, "key": obj.key,
"size": obj.size, "size": obj.size,
"last_modified": obj.last_modified.isoformat(), "last_modified": obj.last_modified.isoformat(),
"last_modified_display": obj.last_modified.strftime("%b %d, %Y %H:%M"), "last_modified_display": _format_datetime_display(obj.last_modified, display_tz),
"last_modified_iso": _format_datetime_iso(obj.last_modified, display_tz),
"etag": obj.etag, "etag": obj.etag,
}) + "\n" }) + "\n"
@@ -1000,42 +1140,57 @@ def object_presign(bucket_name: str, object_key: str):
principal = _current_principal() principal = _current_principal()
payload = request.get_json(silent=True) or {} payload = request.get_json(silent=True) or {}
method = str(payload.get("method", "GET")).upper() method = str(payload.get("method", "GET")).upper()
allowed_methods = {"GET", "PUT", "DELETE"}
if method not in allowed_methods:
return jsonify({"error": "Method must be GET, PUT, or DELETE"}), 400
action = "read" if method == "GET" else ("delete" if method == "DELETE" else "write") action = "read" if method == "GET" else ("delete" if method == "DELETE" else "write")
try: try:
_authorize_ui(principal, bucket_name, action, object_key=object_key) _authorize_ui(principal, bucket_name, action, object_key=object_key)
except IamError as exc: except IamError as exc:
return jsonify({"error": str(exc)}), 403 return jsonify({"error": str(exc)}), 403
try:
expires = int(payload.get("expires_in", 900))
except (TypeError, ValueError):
return jsonify({"error": "expires_in must be an integer"}), 400
expires = max(1, min(expires, 7 * 24 * 3600))
storage = _storage()
if not storage.bucket_exists(bucket_name):
return jsonify({"error": "Bucket does not exist"}), 404
if action != "write":
try:
storage.get_object_path(bucket_name, object_key)
except StorageError:
return jsonify({"error": "Object not found"}), 404
secret = _iam().secret_for_key(principal.access_key)
api_base = current_app.config.get("API_BASE_URL") or "http://127.0.0.1:5000" api_base = current_app.config.get("API_BASE_URL") or "http://127.0.0.1:5000"
api_base = api_base.rstrip("/") url = _generate_presigned_url(
encoded_key = quote(object_key, safe="/") principal=principal,
url = f"{api_base}/presign/{bucket_name}/{encoded_key}" secret_key=secret,
method=method,
bucket_name=bucket_name,
object_key=object_key,
expires_in=expires,
api_base_url=api_base,
)
current_app.logger.info(
"Presigned URL generated",
extra={"bucket": bucket_name, "key": object_key, "method": method},
)
return jsonify({"url": url, "method": method, "expires_in": expires})
parsed_api = urlparse(api_base)
headers = _api_headers()
headers["X-Forwarded-Host"] = parsed_api.netloc or "127.0.0.1:5000"
headers["X-Forwarded-Proto"] = parsed_api.scheme or "http"
headers["X-Forwarded-For"] = request.remote_addr or "127.0.0.1"
@ui_bp.get("/buckets/<bucket_name>/objects/<path:object_key>/metadata")
def object_metadata(bucket_name: str, object_key: str):
principal = _current_principal()
storage = _storage()
try: try:
response = requests.post(url, headers=headers, json=payload, timeout=5) _authorize_ui(principal, bucket_name, "read", object_key=object_key)
except requests.RequestException as exc: metadata = storage.get_object_metadata(bucket_name, object_key)
return jsonify({"error": f"API unavailable: {exc}"}), 502 return jsonify({"metadata": metadata})
try: except IamError as exc:
body = response.json() return jsonify({"error": str(exc)}), 403
except ValueError: except StorageError as exc:
text = response.text or "" return jsonify({"error": str(exc)}), 404
if text.strip().startswith("<"):
import xml.etree.ElementTree as ET
try:
root = ET.fromstring(text)
message = root.findtext(".//Message") or root.findtext(".//Code") or "Unknown S3 error"
body = {"error": message}
except ET.ParseError:
body = {"error": text or "API returned an empty response"}
else:
body = {"error": text or "API returned an empty response"}
return jsonify(body), response.status_code
@ui_bp.get("/buckets/<bucket_name>/objects/<path:object_key>/versions") @ui_bp.get("/buckets/<bucket_name>/objects/<path:object_key>/versions")
@@ -2022,18 +2177,18 @@ def metrics_dashboard():
return render_template( return render_template(
"metrics.html", "metrics.html",
principal=principal, principal=principal,
cpu_percent=cpu_percent, cpu_percent=round(cpu_percent, 2),
memory={ memory={
"total": _format_bytes(memory.total), "total": _format_bytes(memory.total),
"available": _format_bytes(memory.available), "available": _format_bytes(memory.available),
"used": _format_bytes(memory.used), "used": _format_bytes(memory.used),
"percent": memory.percent, "percent": round(memory.percent, 2),
}, },
disk={ disk={
"total": _format_bytes(disk.total), "total": _format_bytes(disk.total),
"free": _format_bytes(disk.free), "free": _format_bytes(disk.free),
"used": _format_bytes(disk.used), "used": _format_bytes(disk.used),
"percent": disk.percent, "percent": round(disk.percent, 2),
}, },
app={ app={
"buckets": total_buckets, "buckets": total_buckets,
@@ -2043,7 +2198,9 @@ def metrics_dashboard():
"storage_raw": total_bytes_used, "storage_raw": total_bytes_used,
"version": APP_VERSION, "version": APP_VERSION,
"uptime_days": uptime_days, "uptime_days": uptime_days,
} },
metrics_history_enabled=current_app.config.get("METRICS_HISTORY_ENABLED", False),
operation_metrics_enabled=current_app.config.get("OPERATION_METRICS_ENABLED", False),
) )
@@ -2083,19 +2240,21 @@ def metrics_api():
uptime_seconds = time.time() - boot_time uptime_seconds = time.time() - boot_time
uptime_days = int(uptime_seconds / 86400) uptime_days = int(uptime_seconds / 86400)
_save_metrics_snapshot(cpu_percent, memory.percent, disk.percent, total_bytes_used)
return jsonify({ return jsonify({
"cpu_percent": cpu_percent, "cpu_percent": round(cpu_percent, 2),
"memory": { "memory": {
"total": _format_bytes(memory.total), "total": _format_bytes(memory.total),
"available": _format_bytes(memory.available), "available": _format_bytes(memory.available),
"used": _format_bytes(memory.used), "used": _format_bytes(memory.used),
"percent": memory.percent, "percent": round(memory.percent, 2),
}, },
"disk": { "disk": {
"total": _format_bytes(disk.total), "total": _format_bytes(disk.total),
"free": _format_bytes(disk.free), "free": _format_bytes(disk.free),
"used": _format_bytes(disk.used), "used": _format_bytes(disk.used),
"percent": disk.percent, "percent": round(disk.percent, 2),
}, },
"app": { "app": {
"buckets": total_buckets, "buckets": total_buckets,
@@ -2108,11 +2267,124 @@ def metrics_api():
}) })
@ui_bp.route("/metrics/history")
def metrics_history():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:list_users")
except IamError:
return jsonify({"error": "Access denied"}), 403
if not current_app.config.get("METRICS_HISTORY_ENABLED", False):
return jsonify({"enabled": False, "history": []})
hours = request.args.get("hours", type=int)
if hours is None:
hours = current_app.config.get("METRICS_HISTORY_RETENTION_HOURS", 24)
data = _load_metrics_history()
history = data.get("history", [])
if hours:
from datetime import datetime, timezone
cutoff = datetime.now(timezone.utc).timestamp() - (hours * 3600)
history = [
h for h in history
if datetime.fromisoformat(h["timestamp"].replace("Z", "+00:00")).timestamp() > cutoff
]
return jsonify({
"enabled": True,
"retention_hours": current_app.config.get("METRICS_HISTORY_RETENTION_HOURS", 24),
"interval_minutes": current_app.config.get("METRICS_HISTORY_INTERVAL_MINUTES", 5),
"history": history,
})
@ui_bp.route("/metrics/settings", methods=["GET", "PUT"])
def metrics_settings():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:list_users")
except IamError:
return jsonify({"error": "Access denied"}), 403
if request.method == "GET":
return jsonify({
"enabled": current_app.config.get("METRICS_HISTORY_ENABLED", False),
"retention_hours": current_app.config.get("METRICS_HISTORY_RETENTION_HOURS", 24),
"interval_minutes": current_app.config.get("METRICS_HISTORY_INTERVAL_MINUTES", 5),
})
data = request.get_json() or {}
if "enabled" in data:
current_app.config["METRICS_HISTORY_ENABLED"] = bool(data["enabled"])
if "retention_hours" in data:
current_app.config["METRICS_HISTORY_RETENTION_HOURS"] = max(1, int(data["retention_hours"]))
if "interval_minutes" in data:
current_app.config["METRICS_HISTORY_INTERVAL_MINUTES"] = max(1, int(data["interval_minutes"]))
return jsonify({
"enabled": current_app.config.get("METRICS_HISTORY_ENABLED", False),
"retention_hours": current_app.config.get("METRICS_HISTORY_RETENTION_HOURS", 24),
"interval_minutes": current_app.config.get("METRICS_HISTORY_INTERVAL_MINUTES", 5),
})
@ui_bp.get("/metrics/operations")
def metrics_operations():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:list_users")
except IamError:
return jsonify({"error": "Access denied"}), 403
collector = _operation_metrics()
if not collector:
return jsonify({
"enabled": False,
"stats": None,
})
return jsonify({
"enabled": True,
"stats": collector.get_current_stats(),
})
@ui_bp.get("/metrics/operations/history")
def metrics_operations_history():
principal = _current_principal()
try:
_iam().authorize(principal, None, "iam:list_users")
except IamError:
return jsonify({"error": "Access denied"}), 403
collector = _operation_metrics()
if not collector:
return jsonify({
"enabled": False,
"history": [],
})
hours = request.args.get("hours", type=int)
return jsonify({
"enabled": True,
"history": collector.get_history(hours),
"interval_minutes": current_app.config.get("OPERATION_METRICS_INTERVAL_MINUTES", 5),
})
@ui_bp.route("/buckets/<bucket_name>/lifecycle", methods=["GET", "POST", "DELETE"]) @ui_bp.route("/buckets/<bucket_name>/lifecycle", methods=["GET", "POST", "DELETE"])
def bucket_lifecycle(bucket_name: str): def bucket_lifecycle(bucket_name: str):
principal = _current_principal() principal = _current_principal()
try: try:
_authorize_ui(principal, bucket_name, "policy") _authorize_ui(principal, bucket_name, "lifecycle")
except IamError as exc: except IamError as exc:
return jsonify({"error": str(exc)}), 403 return jsonify({"error": str(exc)}), 403
@@ -2165,7 +2437,7 @@ def bucket_lifecycle(bucket_name: str):
def get_lifecycle_history(bucket_name: str): def get_lifecycle_history(bucket_name: str):
principal = _current_principal() principal = _current_principal()
try: try:
_authorize_ui(principal, bucket_name, "policy") _authorize_ui(principal, bucket_name, "lifecycle")
except IamError: except IamError:
return jsonify({"error": "Access denied"}), 403 return jsonify({"error": "Access denied"}), 403
@@ -2196,7 +2468,7 @@ def get_lifecycle_history(bucket_name: str):
def bucket_cors(bucket_name: str): def bucket_cors(bucket_name: str):
principal = _current_principal() principal = _current_principal()
try: try:
_authorize_ui(principal, bucket_name, "policy") _authorize_ui(principal, bucket_name, "cors")
except IamError as exc: except IamError as exc:
return jsonify({"error": str(exc)}), 403 return jsonify({"error": str(exc)}), 403

346
docs.md
View File

@@ -122,7 +122,7 @@ With these volumes attached you can rebuild/restart the container without losing
### Versioning ### Versioning
The repo now tracks a human-friendly release string inside `app/version.py` (see the `APP_VERSION` constant). Edit that value whenever you cut a release. The constant flows into Flask as `APP_VERSION` and is exposed via `GET /healthz`, so you can monitor deployments or surface it in UIs. The repo now tracks a human-friendly release string inside `app/version.py` (see the `APP_VERSION` constant). Edit that value whenever you cut a release. The constant flows into Flask as `APP_VERSION` and is exposed via `GET /myfsio/health`, so you can monitor deployments or surface it in UIs.
## 3. Configuration Reference ## 3. Configuration Reference
@@ -277,14 +277,14 @@ The application automatically trusts these headers to generate correct presigned
### Version Checking ### Version Checking
The application version is tracked in `app/version.py` and exposed via: The application version is tracked in `app/version.py` and exposed via:
- **Health endpoint:** `GET /healthz` returns JSON with `version` field - **Health endpoint:** `GET /myfsio/health` returns JSON with `version` field
- **Metrics dashboard:** Navigate to `/ui/metrics` to see the running version in the System Status card - **Metrics dashboard:** Navigate to `/ui/metrics` to see the running version in the System Status card
To check your current version: To check your current version:
```bash ```bash
# API health endpoint # API health endpoint
curl http://localhost:5000/healthz curl http://localhost:5000/myfsio/health
# Or inspect version.py directly # Or inspect version.py directly
cat app/version.py | grep APP_VERSION cat app/version.py | grep APP_VERSION
@@ -377,7 +377,7 @@ docker run -d \
myfsio:latest myfsio:latest
# 5. Verify health # 5. Verify health
curl http://localhost:5000/healthz curl http://localhost:5000/myfsio/health
``` ```
### Version Compatibility Checks ### Version Compatibility Checks
@@ -502,7 +502,7 @@ docker run -d \
myfsio:0.1.3 # specify previous version tag myfsio:0.1.3 # specify previous version tag
# 3. Verify # 3. Verify
curl http://localhost:5000/healthz curl http://localhost:5000/myfsio/health
``` ```
#### Emergency Config Restore #### Emergency Config Restore
@@ -528,7 +528,7 @@ For production environments requiring zero downtime:
APP_PORT=5001 UI_PORT=5101 python run.py & APP_PORT=5001 UI_PORT=5101 python run.py &
# 2. Health check new instance # 2. Health check new instance
curl http://localhost:5001/healthz curl http://localhost:5001/myfsio/health
# 3. Update load balancer to route to new ports # 3. Update load balancer to route to new ports
@@ -544,7 +544,7 @@ After any update, verify functionality:
```bash ```bash
# 1. Health check # 1. Health check
curl http://localhost:5000/healthz curl http://localhost:5000/myfsio/health
# 2. Login to UI # 2. Login to UI
open http://localhost:5100/ui open http://localhost:5100/ui
@@ -588,7 +588,7 @@ APP_PID=$!
# Wait and health check # Wait and health check
sleep 5 sleep 5
if curl -f http://localhost:5000/healthz; then if curl -f http://localhost:5000/myfsio/health; then
echo "Update successful!" echo "Update successful!"
else else
echo "Health check failed, rolling back..." echo "Health check failed, rolling back..."
@@ -602,6 +602,10 @@ fi
## 4. Authentication & IAM ## 4. Authentication & IAM
MyFSIO implements a comprehensive Identity and Access Management (IAM) system that controls who can access your buckets and what operations they can perform. The system supports both simple action-based permissions and AWS-compatible policy syntax.
### Getting Started
1. On first boot, `data/.myfsio.sys/config/iam.json` is seeded with `localadmin / localadmin` that has wildcard access. 1. On first boot, `data/.myfsio.sys/config/iam.json` is seeded with `localadmin / localadmin` that has wildcard access.
2. Sign into the UI using those credentials, then open **IAM**: 2. Sign into the UI using those credentials, then open **IAM**:
- **Create user**: supply a display name and optional JSON inline policy array. - **Create user**: supply a display name and optional JSON inline policy array.
@@ -609,48 +613,241 @@ fi
- **Policy editor**: select a user, paste an array of objects (`{"bucket": "*", "actions": ["list", "read"]}`), and submit. Alias support includes AWS-style verbs (e.g., `s3:GetObject`). - **Policy editor**: select a user, paste an array of objects (`{"bucket": "*", "actions": ["list", "read"]}`), and submit. Alias support includes AWS-style verbs (e.g., `s3:GetObject`).
3. Wildcard action `iam:*` is supported for admin user definitions. 3. Wildcard action `iam:*` is supported for admin user definitions.
The API expects every request to include `X-Access-Key` and `X-Secret-Key` headers. The UI persists them in the Flask session after login. ### Authentication
The API expects every request to include authentication headers. The UI persists them in the Flask session after login.
| Header | Description |
| --- | --- |
| `X-Access-Key` | The user's access key identifier |
| `X-Secret-Key` | The user's secret key for signing |
**Security Features:**
- **Lockout Protection**: After `AUTH_MAX_ATTEMPTS` (default: 5) failed login attempts, the account is locked for `AUTH_LOCKOUT_MINUTES` (default: 15 minutes).
- **Session Management**: UI sessions remain valid for `SESSION_LIFETIME_DAYS` (default: 30 days).
- **Hot Reload**: IAM configuration changes take effect immediately without restart.
### Permission Model
MyFSIO uses a two-layer permission model:
1. **IAM User Policies** Define what a user can do across the system (stored in `iam.json`)
2. **Bucket Policies** Define who can access a specific bucket (stored in `bucket_policies.json`)
Both layers are evaluated for each request. A user must have permission in their IAM policy AND the bucket policy must allow the action (or have no explicit deny).
### Available IAM Actions ### Available IAM Actions
#### S3 Actions (Bucket/Object Operations)
| Action | Description | AWS Aliases | | Action | Description | AWS Aliases |
| --- | --- | --- | | --- | --- | --- |
| `list` | List buckets and objects | `s3:ListBucket`, `s3:ListAllMyBuckets`, `s3:ListBucketVersions`, `s3:ListMultipartUploads`, `s3:ListParts` | | `list` | List buckets and objects | `s3:ListBucket`, `s3:ListAllMyBuckets`, `s3:ListBucketVersions`, `s3:ListMultipartUploads`, `s3:ListParts` |
| `read` | Download objects | `s3:GetObject`, `s3:GetObjectVersion`, `s3:GetObjectTagging`, `s3:HeadObject`, `s3:HeadBucket` | | `read` | Download objects, get metadata | `s3:GetObject`, `s3:GetObjectVersion`, `s3:GetObjectTagging`, `s3:GetObjectVersionTagging`, `s3:GetObjectAcl`, `s3:GetBucketVersioning`, `s3:HeadObject`, `s3:HeadBucket` |
| `write` | Upload objects, create buckets | `s3:PutObject`, `s3:CreateBucket`, `s3:CreateMultipartUpload`, `s3:UploadPart`, `s3:CompleteMultipartUpload`, `s3:AbortMultipartUpload`, `s3:CopyObject` | | `write` | Upload objects, create buckets, manage tags | `s3:PutObject`, `s3:CreateBucket`, `s3:PutObjectTagging`, `s3:PutBucketVersioning`, `s3:CreateMultipartUpload`, `s3:UploadPart`, `s3:CompleteMultipartUpload`, `s3:AbortMultipartUpload`, `s3:CopyObject` |
| `delete` | Remove objects and buckets | `s3:DeleteObject`, `s3:DeleteObjectVersion`, `s3:DeleteBucket` | | `delete` | Remove objects, versions, and buckets | `s3:DeleteObject`, `s3:DeleteObjectVersion`, `s3:DeleteBucket`, `s3:DeleteObjectTagging` |
| `share` | Manage ACLs | `s3:PutObjectAcl`, `s3:PutBucketAcl`, `s3:GetBucketAcl` | | `share` | Manage Access Control Lists (ACLs) | `s3:PutObjectAcl`, `s3:PutBucketAcl`, `s3:GetBucketAcl` |
| `policy` | Manage bucket policies | `s3:PutBucketPolicy`, `s3:GetBucketPolicy`, `s3:DeleteBucketPolicy` | | `policy` | Manage bucket policies | `s3:PutBucketPolicy`, `s3:GetBucketPolicy`, `s3:DeleteBucketPolicy` |
| `replication` | Configure and manage replication | `s3:GetReplicationConfiguration`, `s3:PutReplicationConfiguration`, `s3:ReplicateObject`, `s3:ReplicateTags`, `s3:ReplicateDelete` | | `lifecycle` | Manage lifecycle rules | `s3:GetLifecycleConfiguration`, `s3:PutLifecycleConfiguration`, `s3:DeleteLifecycleConfiguration`, `s3:GetBucketLifecycle`, `s3:PutBucketLifecycle` |
| `iam:list_users` | View IAM users | `iam:ListUsers` | | `cors` | Manage CORS configuration | `s3:GetBucketCors`, `s3:PutBucketCors`, `s3:DeleteBucketCors` |
| `iam:create_user` | Create IAM users | `iam:CreateUser` | | `replication` | Configure and manage replication | `s3:GetReplicationConfiguration`, `s3:PutReplicationConfiguration`, `s3:DeleteReplicationConfiguration`, `s3:ReplicateObject`, `s3:ReplicateTags`, `s3:ReplicateDelete` |
#### IAM Actions (User Management)
| Action | Description | AWS Aliases |
| --- | --- | --- |
| `iam:list_users` | View all IAM users and their policies | `iam:ListUsers` |
| `iam:create_user` | Create new IAM users | `iam:CreateUser` |
| `iam:delete_user` | Delete IAM users | `iam:DeleteUser` | | `iam:delete_user` | Delete IAM users | `iam:DeleteUser` |
| `iam:rotate_key` | Rotate user secrets | `iam:RotateAccessKey` | | `iam:rotate_key` | Rotate user secret keys | `iam:RotateAccessKey` |
| `iam:update_policy` | Modify user policies | `iam:PutUserPolicy` | | `iam:update_policy` | Modify user policies | `iam:PutUserPolicy` |
| `iam:*` | All IAM actions (admin wildcard) | — | | `iam:*` | **Admin wildcard** grants all IAM actions | — |
### Example Policies #### Wildcards
| Wildcard | Scope | Description |
| --- | --- | --- |
| `*` (in actions) | All S3 actions | Grants `list`, `read`, `write`, `delete`, `share`, `policy`, `lifecycle`, `cors`, `replication` |
| `iam:*` | All IAM actions | Grants all `iam:*` actions for user management |
| `*` (in bucket) | All buckets | Policy applies to every bucket |
### IAM Policy Structure
User policies are stored as a JSON array of policy objects. Each object specifies a bucket and the allowed actions:
**Full Control (admin):**
```json ```json
[{"bucket": "*", "actions": ["list", "read", "write", "delete", "share", "policy", "replication", "iam:*"]}] [
{
"bucket": "<bucket-name-or-wildcard>",
"actions": ["<action1>", "<action2>", ...]
}
]
``` ```
**Read-Only:** **Fields:**
- `bucket`: The bucket name (case-insensitive) or `*` for all buckets
- `actions`: Array of action strings (simple names or AWS aliases)
### Example User Policies
**Full Administrator (complete system access):**
```json
[{"bucket": "*", "actions": ["list", "read", "write", "delete", "share", "policy", "lifecycle", "cors", "replication", "iam:*"]}]
```
**Read-Only User (browse and download only):**
```json ```json
[{"bucket": "*", "actions": ["list", "read"]}] [{"bucket": "*", "actions": ["list", "read"]}]
``` ```
**Single Bucket Access (no listing other buckets):** **Single Bucket Full Access (no access to other buckets):**
```json ```json
[{"bucket": "user-bucket", "actions": ["read", "write", "delete"]}] [{"bucket": "user-bucket", "actions": ["list", "read", "write", "delete"]}]
``` ```
**Bucket Access with Replication:** **Multiple Bucket Access (different permissions per bucket):**
```json ```json
[{"bucket": "my-bucket", "actions": ["list", "read", "write", "delete", "replication"]}] [
{"bucket": "public-data", "actions": ["list", "read"]},
{"bucket": "my-uploads", "actions": ["list", "read", "write", "delete"]},
{"bucket": "team-shared", "actions": ["list", "read", "write"]}
]
``` ```
**IAM Manager (manage users but no data access):**
```json
[{"bucket": "*", "actions": ["iam:list_users", "iam:create_user", "iam:delete_user", "iam:rotate_key", "iam:update_policy"]}]
```
**Replication Operator (manage replication only):**
```json
[{"bucket": "*", "actions": ["list", "read", "replication"]}]
```
**Lifecycle Manager (configure object expiration):**
```json
[{"bucket": "*", "actions": ["list", "lifecycle"]}]
```
**CORS Administrator (configure cross-origin access):**
```json
[{"bucket": "*", "actions": ["cors"]}]
```
**Bucket Administrator (full bucket config, no IAM access):**
```json
[{"bucket": "my-bucket", "actions": ["list", "read", "write", "delete", "policy", "lifecycle", "cors"]}]
```
**Upload-Only User (write but cannot read back):**
```json
[{"bucket": "drop-box", "actions": ["write"]}]
```
**Backup Operator (read, list, and replicate):**
```json
[{"bucket": "*", "actions": ["list", "read", "replication"]}]
```
### Using AWS-Style Action Names
You can use AWS S3 action names instead of simple names. They are automatically normalized:
```json
[
{
"bucket": "my-bucket",
"actions": [
"s3:ListBucket",
"s3:GetObject",
"s3:PutObject",
"s3:DeleteObject"
]
}
]
```
This is equivalent to:
```json
[{"bucket": "my-bucket", "actions": ["list", "read", "write", "delete"]}]
```
### Managing Users via API
```bash
# List all users (requires iam:list_users)
curl http://localhost:5000/iam/users \
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
# Create a new user (requires iam:create_user)
curl -X POST http://localhost:5000/iam/users \
-H "Content-Type: application/json" \
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
-d '{
"display_name": "New User",
"policies": [{"bucket": "*", "actions": ["list", "read"]}]
}'
# Rotate user secret (requires iam:rotate_key)
curl -X POST http://localhost:5000/iam/users/<access-key>/rotate \
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
# Update user policies (requires iam:update_policy)
curl -X PUT http://localhost:5000/iam/users/<access-key>/policies \
-H "Content-Type: application/json" \
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
-d '[{"bucket": "*", "actions": ["list", "read", "write"]}]'
# Delete a user (requires iam:delete_user)
curl -X DELETE http://localhost:5000/iam/users/<access-key> \
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
```
### Permission Precedence
When a request is made, permissions are evaluated in this order:
1. **Authentication** Verify the access key and secret key are valid
2. **Lockout Check** Ensure the account is not locked due to failed attempts
3. **IAM Policy Check** Verify the user has the required action for the target bucket
4. **Bucket Policy Check** If a bucket policy exists, verify it allows the action
A request is allowed only if:
- The IAM policy grants the action, AND
- The bucket policy allows the action (or no bucket policy exists)
### Common Permission Scenarios
| Scenario | Required Actions |
| --- | --- |
| Browse bucket contents | `list` |
| Download a file | `read` |
| Upload a file | `write` |
| Delete a file | `delete` |
| Generate presigned URL (GET) | `read` |
| Generate presigned URL (PUT) | `write` |
| Generate presigned URL (DELETE) | `delete` |
| Enable versioning | `write` (includes `s3:PutBucketVersioning`) |
| View bucket policy | `policy` |
| Modify bucket policy | `policy` |
| Configure lifecycle rules | `lifecycle` |
| View lifecycle rules | `lifecycle` |
| Configure CORS | `cors` |
| View CORS rules | `cors` |
| Configure replication | `replication` (admin-only for creation) |
| Pause/resume replication | `replication` |
| Manage other users | `iam:*` or specific `iam:` actions |
| Set bucket quotas | `iam:*` or `iam:list_users` (admin feature) |
### Security Best Practices
1. **Principle of Least Privilege** Grant only the permissions users need
2. **Avoid Wildcards** Use specific bucket names instead of `*` when possible
3. **Rotate Secrets Regularly** Use the rotate key feature periodically
4. **Separate Admin Accounts** Don't use admin accounts for daily operations
5. **Monitor Failed Logins** Check logs for repeated authentication failures
6. **Use Bucket Policies for Fine-Grained Control** Combine with IAM for defense in depth
## 5. Bucket Policies & Presets ## 5. Bucket Policies & Presets
- **Storage**: Policies are persisted in `data/.myfsio.sys/config/bucket_policies.json` under `{"policies": {"bucket": {...}}}`. - **Storage**: Policies are persisted in `data/.myfsio.sys/config/bucket_policies.json` under `{"policies": {"bucket": {...}}}`.
@@ -663,7 +860,7 @@ The API expects every request to include `X-Access-Key` and `X-Secret-Key` heade
### Editing via CLI ### Editing via CLI
```bash ```bash
curl -X PUT http://127.0.0.1:5000/bucket-policy/test \ curl -X PUT "http://127.0.0.1:5000/test?policy" \
-H "Content-Type: application/json" \ -H "Content-Type: application/json" \
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \ -H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
-d '{ -d '{
@@ -726,9 +923,8 @@ Drag files directly onto the objects table to upload them to the current bucket
## 6. Presigned URLs ## 6. Presigned URLs
- Trigger from the UI using the **Presign** button after selecting an object. - Trigger from the UI using the **Presign** button after selecting an object.
- Or call `POST /presign/<bucket>/<key>` with JSON `{ "method": "GET", "expires_in": 900 }`.
- Supported methods: `GET`, `PUT`, `DELETE`; expiration must be `1..604800` seconds. - Supported methods: `GET`, `PUT`, `DELETE`; expiration must be `1..604800` seconds.
- The service signs requests using the callers IAM credentials and enforces bucket policies both when issuing and when the presigned URL is used. - The service signs requests using the caller's IAM credentials and enforces bucket policies both when issuing and when the presigned URL is used.
- Legacy share links have been removed; presigned URLs now handle both private and public workflows. - Legacy share links have been removed; presigned URLs now handle both private and public workflows.
### Multipart Upload Example ### Multipart Upload Example
@@ -951,7 +1147,84 @@ curl -X PUT "http://localhost:5000/bucket/<bucket>?quota" \
</Error> </Error>
``` ```
## 9. Site Replication ## 9. Operation Metrics
Operation metrics provide real-time visibility into API request statistics, including request counts, latency, error rates, and bandwidth usage.
### Enabling Operation Metrics
By default, operation metrics are disabled. Enable by setting the environment variable:
```bash
OPERATION_METRICS_ENABLED=true python run.py
```
Or in your `myfsio.env` file:
```
OPERATION_METRICS_ENABLED=true
OPERATION_METRICS_INTERVAL_MINUTES=5
OPERATION_METRICS_RETENTION_HOURS=24
```
### Configuration Options
| Variable | Default | Description |
|----------|---------|-------------|
| `OPERATION_METRICS_ENABLED` | `false` | Enable/disable operation metrics |
| `OPERATION_METRICS_INTERVAL_MINUTES` | `5` | Snapshot interval (minutes) |
| `OPERATION_METRICS_RETENTION_HOURS` | `24` | History retention period (hours) |
### What's Tracked
**Request Statistics:**
- Request counts by HTTP method (GET, PUT, POST, DELETE, HEAD, OPTIONS)
- Response status codes grouped by class (2xx, 3xx, 4xx, 5xx)
- Latency statistics (min, max, average)
- Bytes transferred in/out
**Endpoint Breakdown:**
- `object` - Object operations (GET/PUT/DELETE objects)
- `bucket` - Bucket operations (list, create, delete buckets)
- `ui` - Web UI requests
- `service` - Health checks, internal endpoints
- `kms` - KMS API operations
**S3 Error Codes:**
Tracks API-specific error codes like `NoSuchKey`, `AccessDenied`, `BucketNotFound`. Note: These are separate from HTTP status codes - a 404 from the UI won't appear here, only S3 API errors.
### API Endpoints
```bash
# Get current operation metrics
curl http://localhost:5100/ui/metrics/operations \
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
# Get operation metrics history
curl http://localhost:5100/ui/metrics/operations/history \
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
# Filter history by time range
curl "http://localhost:5100/ui/metrics/operations/history?hours=6" \
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
```
### Storage Location
Operation metrics data is stored at:
```
data/.myfsio.sys/config/operation_metrics.json
```
### UI Dashboard
When enabled, the Metrics page (`/ui/metrics`) shows an "API Operations" section with:
- Summary cards: Requests, Success Rate, Errors, Latency, Bytes In, Bytes Out
- Charts: Requests by Method (doughnut), Requests by Status (bar), Requests by Endpoint (horizontal bar)
- S3 Error Codes table with distribution
Data refreshes every 5 seconds.
## 10. Site Replication
### Permission Model ### Permission Model
@@ -1088,7 +1361,7 @@ To set up two-way replication (Server A ↔ Server B):
**Note**: Deleting a bucket will automatically remove its associated replication configuration. **Note**: Deleting a bucket will automatically remove its associated replication configuration.
## 11. Running Tests ## 12. Running Tests
```bash ```bash
pytest -q pytest -q
@@ -1098,7 +1371,7 @@ The suite now includes a boto3 integration test that spins up a live HTTP server
The suite covers bucket CRUD, presigned downloads, bucket policy enforcement, and regression tests for anonymous reads when a Public policy is attached. The suite covers bucket CRUD, presigned downloads, bucket policy enforcement, and regression tests for anonymous reads when a Public policy is attached.
## 12. Troubleshooting ## 13. Troubleshooting
| Symptom | Likely Cause | Fix | | Symptom | Likely Cause | Fix |
| --- | --- | --- | | --- | --- | --- |
@@ -1107,7 +1380,7 @@ The suite covers bucket CRUD, presigned downloads, bucket policy enforcement, an
| Presign modal errors with 403 | IAM user lacks `read/write/delete` for target bucket or bucket policy denies | Update IAM inline policies or remove conflicting deny statements. | | Presign modal errors with 403 | IAM user lacks `read/write/delete` for target bucket or bucket policy denies | Update IAM inline policies or remove conflicting deny statements. |
| Large upload rejected immediately | File exceeds `MAX_UPLOAD_SIZE` | Increase env var or shrink object. | | Large upload rejected immediately | File exceeds `MAX_UPLOAD_SIZE` | Increase env var or shrink object. |
## 13. API Matrix ## 14. API Matrix
``` ```
GET / # List buckets GET / # List buckets
@@ -1117,10 +1390,9 @@ GET /<bucket> # List objects
PUT /<bucket>/<key> # Upload object PUT /<bucket>/<key> # Upload object
GET /<bucket>/<key> # Download object GET /<bucket>/<key> # Download object
DELETE /<bucket>/<key> # Delete object DELETE /<bucket>/<key> # Delete object
POST /presign/<bucket>/<key> # Generate SigV4 URL GET /<bucket>?policy # Fetch policy
GET /bucket-policy/<bucket> # Fetch policy PUT /<bucket>?policy # Upsert policy
PUT /bucket-policy/<bucket> # Upsert policy DELETE /<bucket>?policy # Delete policy
DELETE /bucket-policy/<bucket> # Delete policy
GET /<bucket>?quota # Get bucket quota GET /<bucket>?quota # Get bucket quota
PUT /<bucket>?quota # Set bucket quota (admin only) PUT /<bucket>?quota # Set bucket quota (admin only)
``` ```

View File

@@ -9,3 +9,4 @@ boto3>=1.42.14
waitress>=3.0.2 waitress>=3.0.2
psutil>=7.1.3 psutil>=7.1.3
cryptography>=46.0.3 cryptography>=46.0.3
defusedxml>=0.7.1

View File

@@ -1,4 +1,4 @@
(function() { (function () {
'use strict'; 'use strict';
const { formatBytes, escapeHtml, fallbackCopy, setupJsonAutoIndent } = window.BucketDetailUtils || { const { formatBytes, escapeHtml, fallbackCopy, setupJsonAutoIndent } = window.BucketDetailUtils || {
@@ -23,11 +23,62 @@
.replace(/'/g, '&#039;'); .replace(/'/g, '&#039;');
}, },
fallbackCopy: () => false, fallbackCopy: () => false,
setupJsonAutoIndent: () => {} setupJsonAutoIndent: () => { }
}; };
setupJsonAutoIndent(document.getElementById('policyDocument')); setupJsonAutoIndent(document.getElementById('policyDocument'));
const getFileTypeIcon = (key) => {
const ext = (key.split('.').pop() || '').toLowerCase();
const iconMap = {
image: ['jpg', 'jpeg', 'png', 'gif', 'svg', 'webp', 'ico', 'bmp', 'tiff', 'tif'],
document: ['pdf', 'doc', 'docx', 'txt', 'rtf', 'odt', 'pages'],
spreadsheet: ['xls', 'xlsx', 'csv', 'ods', 'numbers'],
archive: ['zip', 'rar', '7z', 'tar', 'gz', 'bz2', 'xz', 'tgz'],
code: ['js', 'ts', 'jsx', 'tsx', 'py', 'java', 'cpp', 'c', 'h', 'hpp', 'cs', 'go', 'rs', 'rb', 'php', 'html', 'htm', 'css', 'scss', 'sass', 'less', 'json', 'xml', 'yaml', 'yml', 'md', 'sh', 'bat', 'ps1', 'sql'],
audio: ['mp3', 'wav', 'flac', 'ogg', 'aac', 'm4a', 'wma', 'aiff'],
video: ['mp4', 'avi', 'mov', 'mkv', 'webm', 'wmv', 'flv', 'm4v', 'mpeg', 'mpg'],
};
const icons = {
image: `<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-success flex-shrink-0" viewBox="0 0 16 16">
<path d="M6.002 5.5a1.5 1.5 0 1 1-3 0 1.5 1.5 0 0 1 3 0z"/>
<path d="M2.002 1a2 2 0 0 0-2 2v10a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2V3a2 2 0 0 0-2-2h-12zm12 1a1 1 0 0 1 1 1v6.5l-3.777-1.947a.5.5 0 0 0-.577.093l-3.71 3.71-2.66-1.772a.5.5 0 0 0-.63.062L1.002 12V3a1 1 0 0 1 1-1h12z"/>
</svg>`,
document: `<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-danger flex-shrink-0" viewBox="0 0 16 16">
<path d="M14 4.5V14a2 2 0 0 1-2 2H4a2 2 0 0 1-2-2V2a2 2 0 0 1 2-2h5.5L14 4.5zm-3 0A1.5 1.5 0 0 1 9.5 3V1H4a1 1 0 0 0-1 1v12a1 1 0 0 0 1 1h8a1 1 0 0 0 1-1V4.5h-2z"/>
<path d="M4.5 12.5A.5.5 0 0 1 5 12h3a.5.5 0 0 1 0 1H5a.5.5 0 0 1-.5-.5zm0-2A.5.5 0 0 1 5 10h6a.5.5 0 0 1 0 1H5a.5.5 0 0 1-.5-.5zm0-2A.5.5 0 0 1 5 8h6a.5.5 0 0 1 0 1H5a.5.5 0 0 1-.5-.5zm0-2A.5.5 0 0 1 5 6h6a.5.5 0 0 1 0 1H5a.5.5 0 0 1-.5-.5z"/>
</svg>`,
spreadsheet: `<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-success flex-shrink-0" viewBox="0 0 16 16">
<path d="M14 14V4.5L9.5 0H4a2 2 0 0 0-2 2v12a2 2 0 0 0 2 2h8a2 2 0 0 0 2-2zM9.5 3A1.5 1.5 0 0 0 11 4.5h2V9H3V2a1 1 0 0 1 1-1h5.5v2zM3 12v-2h2v2H3zm0 1h2v2H4a1 1 0 0 1-1-1v-1zm3 2v-2h3v2H6zm4 0v-2h3v1a1 1 0 0 1-1 1h-2zm3-3h-3v-2h3v2zm-7 0v-2h3v2H6z"/>
</svg>`,
archive: `<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-secondary flex-shrink-0" viewBox="0 0 16 16">
<path d="M6.5 7.5a1 1 0 0 1 1-1h1a1 1 0 0 1 1 1v.938l.4 1.599a1 1 0 0 1-.416 1.074l-.93.62a1 1 0 0 1-1.109 0l-.93-.62a1 1 0 0 1-.415-1.074l.4-1.599V7.5z"/>
<path d="M14 4.5V14a2 2 0 0 1-2 2H4a2 2 0 0 1-2-2V2a2 2 0 0 1 2-2h5.5L14 4.5zm-3 0A1.5 1.5 0 0 1 9.5 3V1h-2v1h-1v1h1v1h-1v1h1v1H6V5H5V4h1V3H5V2h1V1H4a1 1 0 0 0-1 1v12a1 1 0 0 0 1 1h8a1 1 0 0 0 1-1V4.5h-2z"/>
</svg>`,
code: `<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-info flex-shrink-0" viewBox="0 0 16 16">
<path d="M14 4.5V14a2 2 0 0 1-2 2H4a2 2 0 0 1-2-2V2a2 2 0 0 1 2-2h5.5L14 4.5zm-3 0A1.5 1.5 0 0 1 9.5 3V1H4a1 1 0 0 0-1 1v12a1 1 0 0 0 1 1h8a1 1 0 0 0 1-1V4.5h-2z"/>
<path d="M8.646 6.646a.5.5 0 0 1 .708 0l2 2a.5.5 0 0 1 0 .708l-2 2a.5.5 0 0 1-.708-.708L10.293 9 8.646 7.354a.5.5 0 0 1 0-.708zm-1.292 0a.5.5 0 0 0-.708 0l-2 2a.5.5 0 0 0 0 .708l2 2a.5.5 0 0 0 .708-.708L5.707 9l1.647-1.646a.5.5 0 0 0 0-.708z"/>
</svg>`,
audio: `<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-primary flex-shrink-0" viewBox="0 0 16 16">
<path d="M6 13c0 1.105-1.12 2-2.5 2S1 14.105 1 13c0-1.104 1.12-2 2.5-2s2.5.896 2.5 2zm9-2c0 1.105-1.12 2-2.5 2s-2.5-.895-2.5-2 1.12-2 2.5-2 2.5.895 2.5 2z"/>
<path fill-rule="evenodd" d="M14 11V2h1v9h-1zM6 3v10H5V3h1z"/>
<path d="M5 2.905a1 1 0 0 1 .9-.995l8-.8a1 1 0 0 1 1.1.995V3L5 4V2.905z"/>
</svg>`,
video: `<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-danger flex-shrink-0" viewBox="0 0 16 16">
<path d="M0 12V4a2 2 0 0 1 2-2h12a2 2 0 0 1 2 2v8a2 2 0 0 1-2 2H2a2 2 0 0 1-2-2zm6.79-6.907A.5.5 0 0 0 6 5.5v5a.5.5 0 0 0 .79.407l3.5-2.5a.5.5 0 0 0 0-.814l-3.5-2.5z"/>
</svg>`,
default: `<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-muted flex-shrink-0" viewBox="0 0 16 16">
<path d="M14 4.5V14a2 2 0 0 1-2 2H4a2 2 0 0 1-2-2V2a2 2 0 0 1 2-2h5.5L14 4.5zm-3 0A1.5 1.5 0 0 1 9.5 3V1H4a1 1 0 0 0-1 1v12a1 1 0 0 0 1 1h8a1 1 0 0 0 1-1V4.5h-2z"/>
</svg>`,
};
for (const [type, extensions] of Object.entries(iconMap)) {
if (extensions.includes(ext)) {
return icons[type];
}
}
return icons.default;
};
const selectAllCheckbox = document.querySelector('[data-select-all]'); const selectAllCheckbox = document.querySelector('[data-select-all]');
const bulkDeleteButton = document.querySelector('[data-bulk-delete-trigger]'); const bulkDeleteButton = document.querySelector('[data-bulk-delete-trigger]');
const bulkDeleteLabel = bulkDeleteButton?.querySelector('[data-bulk-delete-label]'); const bulkDeleteLabel = bulkDeleteButton?.querySelector('[data-bulk-delete-label]');
@@ -49,6 +100,7 @@
const previewPlaceholder = document.getElementById('preview-placeholder'); const previewPlaceholder = document.getElementById('preview-placeholder');
const previewImage = document.getElementById('preview-image'); const previewImage = document.getElementById('preview-image');
const previewVideo = document.getElementById('preview-video'); const previewVideo = document.getElementById('preview-video');
const previewAudio = document.getElementById('preview-audio');
const previewIframe = document.getElementById('preview-iframe'); const previewIframe = document.getElementById('preview-iframe');
const downloadButton = document.getElementById('downloadButton'); const downloadButton = document.getElementById('downloadButton');
const presignButton = document.getElementById('presignButton'); const presignButton = document.getElementById('presignButton');
@@ -135,18 +187,20 @@
tr.dataset.objectRow = ''; tr.dataset.objectRow = '';
tr.dataset.key = obj.key; tr.dataset.key = obj.key;
tr.dataset.size = obj.size; tr.dataset.size = obj.size;
tr.dataset.lastModified = obj.lastModified || obj.last_modified; tr.dataset.lastModified = obj.lastModified ?? obj.last_modified ?? '';
tr.dataset.etag = obj.etag; tr.dataset.lastModifiedDisplay = obj.lastModifiedDisplay ?? obj.last_modified_display ?? new Date(obj.lastModified || obj.last_modified).toLocaleString();
tr.dataset.previewUrl = obj.previewUrl || obj.preview_url; tr.dataset.lastModifiedIso = obj.lastModifiedIso ?? obj.last_modified_iso ?? obj.lastModified ?? obj.last_modified ?? '';
tr.dataset.downloadUrl = obj.downloadUrl || obj.download_url; tr.dataset.etag = obj.etag ?? '';
tr.dataset.presignEndpoint = obj.presignEndpoint || obj.presign_endpoint; tr.dataset.previewUrl = obj.previewUrl ?? obj.preview_url ?? '';
tr.dataset.deleteEndpoint = obj.deleteEndpoint || obj.delete_endpoint; tr.dataset.downloadUrl = obj.downloadUrl ?? obj.download_url ?? '';
tr.dataset.metadata = typeof obj.metadata === 'string' ? obj.metadata : JSON.stringify(obj.metadata || {}); tr.dataset.presignEndpoint = obj.presignEndpoint ?? obj.presign_endpoint ?? '';
tr.dataset.versionsEndpoint = obj.versionsEndpoint || obj.versions_endpoint; tr.dataset.deleteEndpoint = obj.deleteEndpoint ?? obj.delete_endpoint ?? '';
tr.dataset.restoreTemplate = obj.restoreTemplate || obj.restore_template; tr.dataset.metadataUrl = obj.metadataUrl ?? obj.metadata_url ?? '';
tr.dataset.tagsUrl = obj.tagsUrl || obj.tags_url; tr.dataset.versionsEndpoint = obj.versionsEndpoint ?? obj.versions_endpoint ?? '';
tr.dataset.copyUrl = obj.copyUrl || obj.copy_url; tr.dataset.restoreTemplate = obj.restoreTemplate ?? obj.restore_template ?? '';
tr.dataset.moveUrl = obj.moveUrl || obj.move_url; tr.dataset.tagsUrl = obj.tagsUrl ?? obj.tags_url ?? '';
tr.dataset.copyUrl = obj.copyUrl ?? obj.copy_url ?? '';
tr.dataset.moveUrl = obj.moveUrl ?? obj.move_url ?? '';
const keyToShow = displayKey || obj.key; const keyToShow = displayKey || obj.key;
const lastModDisplay = obj.lastModifiedDisplay || obj.last_modified_display || new Date(obj.lastModified || obj.last_modified).toLocaleDateString(); const lastModDisplay = obj.lastModifiedDisplay || obj.last_modified_display || new Date(obj.lastModified || obj.last_modified).toLocaleDateString();
@@ -156,8 +210,11 @@
<input class="form-check-input" type="checkbox" data-object-select aria-label="Select ${escapeHtml(obj.key)}" /> <input class="form-check-input" type="checkbox" data-object-select aria-label="Select ${escapeHtml(obj.key)}" />
</td> </td>
<td class="object-key text-break" title="${escapeHtml(obj.key)}"> <td class="object-key text-break" title="${escapeHtml(obj.key)}">
<div class="fw-medium">${escapeHtml(keyToShow)}</div> <div class="fw-medium d-flex align-items-center gap-2">
<div class="text-muted small">Modified ${escapeHtml(lastModDisplay)}</div> ${getFileTypeIcon(obj.key)}
<span>${escapeHtml(keyToShow)}</span>
</div>
<div class="text-muted small ms-4 ps-2">Modified ${escapeHtml(lastModDisplay)}</div>
</td> </td>
<td class="text-end text-nowrap"> <td class="text-end text-nowrap">
<span class="text-muted small">${formatBytes(obj.size)}</span> <span class="text-muted small">${formatBytes(obj.size)}</span>
@@ -425,12 +482,13 @@
size: obj.size, size: obj.size,
lastModified: obj.last_modified, lastModified: obj.last_modified,
lastModifiedDisplay: obj.last_modified_display, lastModifiedDisplay: obj.last_modified_display,
lastModifiedIso: obj.last_modified_iso,
etag: obj.etag, etag: obj.etag,
previewUrl: urlTemplates ? buildUrlFromTemplate(urlTemplates.preview, key) : '', previewUrl: urlTemplates ? buildUrlFromTemplate(urlTemplates.preview, key) : '',
downloadUrl: urlTemplates ? buildUrlFromTemplate(urlTemplates.download, key) : '', downloadUrl: urlTemplates ? buildUrlFromTemplate(urlTemplates.download, key) : '',
presignEndpoint: urlTemplates ? buildUrlFromTemplate(urlTemplates.presign, key) : '', presignEndpoint: urlTemplates ? buildUrlFromTemplate(urlTemplates.presign, key) : '',
deleteEndpoint: urlTemplates ? buildUrlFromTemplate(urlTemplates.delete, key) : '', deleteEndpoint: urlTemplates ? buildUrlFromTemplate(urlTemplates.delete, key) : '',
metadata: '{}', metadataUrl: urlTemplates ? buildUrlFromTemplate(urlTemplates.metadata, key) : '',
versionsEndpoint: urlTemplates ? buildUrlFromTemplate(urlTemplates.versions, key) : '', versionsEndpoint: urlTemplates ? buildUrlFromTemplate(urlTemplates.versions, key) : '',
restoreTemplate: urlTemplates ? urlTemplates.restore.replace('KEY_PLACEHOLDER', encodeURIComponent(key).replace(/%2F/g, '/')) : '', restoreTemplate: urlTemplates ? urlTemplates.restore.replace('KEY_PLACEHOLDER', encodeURIComponent(key).replace(/%2F/g, '/')) : '',
tagsUrl: urlTemplates ? buildUrlFromTemplate(urlTemplates.tags, key) : '', tagsUrl: urlTemplates ? buildUrlFromTemplate(urlTemplates.tags, key) : '',
@@ -548,7 +606,7 @@
} else if (msg.type === 'done') { } else if (msg.type === 'done') {
streamingComplete = true; streamingComplete = true;
} }
} catch (e) {} } catch (e) { }
} }
flushPendingStreamObjects(); flushPendingStreamObjects();
@@ -1354,15 +1412,30 @@
} }
}; };
const INTERNAL_METADATA_KEYS = new Set([
'__etag__',
'__size__',
'__content_type__',
'__last_modified__',
'__storage_class__',
]);
const isInternalKey = (key) => INTERNAL_METADATA_KEYS.has(key.toLowerCase());
const renderMetadata = (metadata) => { const renderMetadata = (metadata) => {
if (!previewMetadata || !previewMetadataList) return; if (!previewMetadata || !previewMetadataList) return;
previewMetadataList.innerHTML = ''; previewMetadataList.innerHTML = '';
if (!metadata || Object.keys(metadata).length === 0) { if (!metadata) {
previewMetadata.classList.add('d-none');
return;
}
const userMetadata = Object.entries(metadata).filter(([key]) => !isInternalKey(key));
if (userMetadata.length === 0) {
previewMetadata.classList.add('d-none'); previewMetadata.classList.add('d-none');
return; return;
} }
previewMetadata.classList.remove('d-none'); previewMetadata.classList.remove('d-none');
Object.entries(metadata).forEach(([key, value]) => { userMetadata.forEach(([key, value]) => {
const wrapper = document.createElement('div'); const wrapper = document.createElement('div');
wrapper.className = 'metadata-entry'; wrapper.className = 'metadata-entry';
const label = document.createElement('div'); const label = document.createElement('div');
@@ -1754,9 +1827,10 @@
} }
const resetPreviewMedia = () => { const resetPreviewMedia = () => {
[previewImage, previewVideo, previewIframe].forEach((el) => { [previewImage, previewVideo, previewAudio, previewIframe].forEach((el) => {
if (!el) return;
el.classList.add('d-none'); el.classList.add('d-none');
if (el.tagName === 'VIDEO') { if (el.tagName === 'VIDEO' || el.tagName === 'AUDIO') {
el.pause(); el.pause();
el.removeAttribute('src'); el.removeAttribute('src');
} }
@@ -1767,32 +1841,31 @@
previewPlaceholder.classList.remove('d-none'); previewPlaceholder.classList.remove('d-none');
}; };
function metadataFromRow(row) { async function fetchMetadata(metadataUrl) {
if (!row || !row.dataset.metadata) { if (!metadataUrl) return null;
return null;
}
try { try {
const parsed = JSON.parse(row.dataset.metadata); const resp = await fetch(metadataUrl);
if (parsed && typeof parsed === 'object' && !Array.isArray(parsed)) { if (resp.ok) {
return parsed; const data = await resp.json();
return data.metadata || {};
} }
} catch (err) { } catch (e) {
console.warn('Failed to parse metadata for row', err); console.warn('Failed to load metadata', e);
} }
return null; return null;
} }
function selectRow(row) { async function selectRow(row) {
document.querySelectorAll('[data-object-row]').forEach((r) => r.classList.remove('table-active')); document.querySelectorAll('[data-object-row]').forEach((r) => r.classList.remove('table-active'));
row.classList.add('table-active'); row.classList.add('table-active');
previewEmpty.classList.add('d-none'); previewEmpty.classList.add('d-none');
previewPanel.classList.remove('d-none'); previewPanel.classList.remove('d-none');
activeRow = row; activeRow = row;
renderMetadata(metadataFromRow(row)); renderMetadata(null);
previewKey.textContent = row.dataset.key; previewKey.textContent = row.dataset.key;
previewSize.textContent = formatBytes(Number(row.dataset.size)); previewSize.textContent = formatBytes(Number(row.dataset.size));
previewModified.textContent = row.dataset.lastModified; previewModified.textContent = row.dataset.lastModifiedIso || row.dataset.lastModified;
previewEtag.textContent = row.dataset.etag; previewEtag.textContent = row.dataset.etag;
downloadButton.href = row.dataset.downloadUrl; downloadButton.href = row.dataset.downloadUrl;
downloadButton.classList.remove('disabled'); downloadButton.classList.remove('disabled');
@@ -1811,18 +1884,36 @@
resetPreviewMedia(); resetPreviewMedia();
const previewUrl = row.dataset.previewUrl; const previewUrl = row.dataset.previewUrl;
const lower = row.dataset.key.toLowerCase(); const lower = row.dataset.key.toLowerCase();
if (lower.match(/\.(png|jpg|jpeg|gif|webp|svg)$/)) { if (previewUrl && lower.match(/\.(png|jpg|jpeg|gif|webp|svg|ico|bmp)$/)) {
previewImage.src = previewUrl; previewImage.src = previewUrl;
previewImage.classList.remove('d-none'); previewImage.classList.remove('d-none');
previewPlaceholder.classList.add('d-none'); previewPlaceholder.classList.add('d-none');
} else if (lower.match(/\.(mp4|webm|ogg)$/)) { } else if (previewUrl && lower.match(/\.(mp4|webm|ogv|mov|avi|mkv)$/)) {
previewVideo.src = previewUrl; previewVideo.src = previewUrl;
previewVideo.classList.remove('d-none'); previewVideo.classList.remove('d-none');
previewPlaceholder.classList.add('d-none'); previewPlaceholder.classList.add('d-none');
} else if (lower.match(/\.(txt|log|json|md|csv)$/)) { } else if (previewUrl && lower.match(/\.(mp3|wav|flac|ogg|aac|m4a|wma)$/)) {
previewAudio.src = previewUrl;
previewAudio.classList.remove('d-none');
previewPlaceholder.classList.add('d-none');
} else if (previewUrl && lower.match(/\.(pdf)$/)) {
previewIframe.src = previewUrl; previewIframe.src = previewUrl;
previewIframe.style.minHeight = '500px';
previewIframe.classList.remove('d-none'); previewIframe.classList.remove('d-none');
previewPlaceholder.classList.add('d-none'); previewPlaceholder.classList.add('d-none');
} else if (previewUrl && lower.match(/\.(txt|log|json|md|csv|xml|html|htm|js|ts|py|java|c|cpp|h|css|scss|yaml|yml|toml|ini|cfg|conf|sh|bat)$/)) {
previewIframe.src = previewUrl;
previewIframe.style.minHeight = '200px';
previewIframe.classList.remove('d-none');
previewPlaceholder.classList.add('d-none');
}
const metadataUrl = row.dataset.metadataUrl;
if (metadataUrl) {
const metadata = await fetchMetadata(metadataUrl);
if (activeRow === row) {
renderMetadata(metadata);
}
} }
} }
@@ -2040,7 +2131,7 @@
uploadCancelled = true; uploadCancelled = true;
activeXHRs.forEach(xhr => { activeXHRs.forEach(xhr => {
try { xhr.abort(); } catch {} try { xhr.abort(); } catch { }
}); });
activeXHRs = []; activeXHRs = [];
@@ -2049,7 +2140,7 @@
const csrfToken = document.querySelector('input[name="csrf_token"]')?.value; const csrfToken = document.querySelector('input[name="csrf_token"]')?.value;
try { try {
await fetch(abortUrl, { method: 'DELETE', headers: { 'X-CSRFToken': csrfToken || '' } }); await fetch(abortUrl, { method: 'DELETE', headers: { 'X-CSRFToken': csrfToken || '' } });
} catch {} } catch { }
activeMultipartUpload = null; activeMultipartUpload = null;
} }
@@ -2275,7 +2366,7 @@
if (!uploadCancelled) { if (!uploadCancelled) {
try { try {
await fetch(abortUrl, { method: 'DELETE', headers: { 'X-CSRFToken': csrfToken || '' } }); await fetch(abortUrl, { method: 'DELETE', headers: { 'X-CSRFToken': csrfToken || '' } });
} catch {} } catch { }
} }
activeMultipartUpload = null; activeMultipartUpload = null;
throw err; throw err;
@@ -3177,7 +3268,7 @@
const loadLifecycleRules = async () => { const loadLifecycleRules = async () => {
if (!lifecycleUrl || !lifecycleRulesBody) return; if (!lifecycleUrl || !lifecycleRulesBody) return;
lifecycleRulesBody.innerHTML = '<tr><td colspan="6" class="text-center text-muted py-4"><div class="spinner-border spinner-border-sm me-2" role="status"></div>Loading...</td></tr>'; lifecycleRulesBody.innerHTML = '<tr><td colspan="7" class="text-center text-muted py-4"><div class="spinner-border spinner-border-sm me-2" role="status"></div>Loading...</td></tr>';
try { try {
const resp = await fetch(lifecycleUrl); const resp = await fetch(lifecycleUrl);
const data = await resp.json(); const data = await resp.json();
@@ -3185,19 +3276,20 @@
lifecycleRules = data.rules || []; lifecycleRules = data.rules || [];
renderLifecycleRules(); renderLifecycleRules();
} catch (err) { } catch (err) {
lifecycleRulesBody.innerHTML = `<tr><td colspan="6" class="text-center text-danger py-4">${escapeHtml(err.message)}</td></tr>`; lifecycleRulesBody.innerHTML = `<tr><td colspan="7" class="text-center text-danger py-4">${escapeHtml(err.message)}</td></tr>`;
} }
}; };
const renderLifecycleRules = () => { const renderLifecycleRules = () => {
if (!lifecycleRulesBody) return; if (!lifecycleRulesBody) return;
if (lifecycleRules.length === 0) { if (lifecycleRules.length === 0) {
lifecycleRulesBody.innerHTML = '<tr><td colspan="6" class="text-center text-muted py-4">No lifecycle rules configured</td></tr>'; lifecycleRulesBody.innerHTML = '<tr><td colspan="7" class="text-center text-muted py-4">No lifecycle rules configured</td></tr>';
return; return;
} }
lifecycleRulesBody.innerHTML = lifecycleRules.map((rule, idx) => { lifecycleRulesBody.innerHTML = lifecycleRules.map((rule, idx) => {
const expiration = rule.Expiration?.Days ? `${rule.Expiration.Days}d` : '-'; const expiration = rule.Expiration?.Days ? `${rule.Expiration.Days}d` : '-';
const noncurrent = rule.NoncurrentVersionExpiration?.NoncurrentDays ? `${rule.NoncurrentVersionExpiration.NoncurrentDays}d` : '-'; const noncurrent = rule.NoncurrentVersionExpiration?.NoncurrentDays ? `${rule.NoncurrentVersionExpiration.NoncurrentDays}d` : '-';
const abortMpu = rule.AbortIncompleteMultipartUpload?.DaysAfterInitiation ? `${rule.AbortIncompleteMultipartUpload.DaysAfterInitiation}d` : '-';
const statusClass = rule.Status === 'Enabled' ? 'bg-success' : 'bg-secondary'; const statusClass = rule.Status === 'Enabled' ? 'bg-success' : 'bg-secondary';
return `<tr> return `<tr>
<td><code class="small">${escapeHtml(rule.ID || '')}</code></td> <td><code class="small">${escapeHtml(rule.ID || '')}</code></td>
@@ -3205,6 +3297,7 @@
<td><span class="badge ${statusClass}">${escapeHtml(rule.Status)}</span></td> <td><span class="badge ${statusClass}">${escapeHtml(rule.Status)}</span></td>
<td class="small">${expiration}</td> <td class="small">${expiration}</td>
<td class="small">${noncurrent}</td> <td class="small">${noncurrent}</td>
<td class="small">${abortMpu}</td>
<td class="text-end"> <td class="text-end">
<div class="btn-group btn-group-sm"> <div class="btn-group btn-group-sm">
<button class="btn btn-outline-secondary" onclick="editLifecycleRule(${idx})" title="Edit rule"> <button class="btn btn-outline-secondary" onclick="editLifecycleRule(${idx})" title="Edit rule">
@@ -3490,7 +3583,7 @@
}); });
}); });
document.getElementById('objects-table')?.addEventListener('show.bs.dropdown', function(e) { document.getElementById('objects-table')?.addEventListener('show.bs.dropdown', function (e) {
const dropdown = e.target.closest('.dropdown'); const dropdown = e.target.closest('.dropdown');
const menu = dropdown?.querySelector('.dropdown-menu'); const menu = dropdown?.querySelector('.dropdown-menu');
const btn = e.target; const btn = e.target;
@@ -3682,8 +3775,8 @@
}); });
const originalSelectRow = selectRow; const originalSelectRow = selectRow;
selectRow = (row) => { selectRow = async (row) => {
originalSelectRow(row); await originalSelectRow(row);
loadObjectTags(row); loadObjectTags(row);
}; };
@@ -3789,18 +3882,18 @@
var form = document.getElementById(formId); var form = document.getElementById(formId);
if (!form) return; if (!form) return;
form.addEventListener('submit', function(e) { form.addEventListener('submit', function (e) {
e.preventDefault(); e.preventDefault();
window.UICore.submitFormAjax(form, { window.UICore.submitFormAjax(form, {
successMessage: options.successMessage || 'Operation completed', successMessage: options.successMessage || 'Operation completed',
onSuccess: function(data) { onSuccess: function (data) {
if (options.onSuccess) options.onSuccess(data); if (options.onSuccess) options.onSuccess(data);
if (options.closeModal) { if (options.closeModal) {
var modal = bootstrap.Modal.getInstance(document.getElementById(options.closeModal)); var modal = bootstrap.Modal.getInstance(document.getElementById(options.closeModal));
if (modal) modal.hide(); if (modal) modal.hide();
} }
if (options.reload) { if (options.reload) {
setTimeout(function() { location.reload(); }, 500); setTimeout(function () { location.reload(); }, 500);
} }
} }
}); });
@@ -3855,11 +3948,11 @@
var newForm = document.getElementById('enableVersioningForm'); var newForm = document.getElementById('enableVersioningForm');
if (newForm) { if (newForm) {
newForm.setAttribute('action', window.BucketDetailConfig?.endpoints?.versioning || ''); newForm.setAttribute('action', window.BucketDetailConfig?.endpoints?.versioning || '');
newForm.addEventListener('submit', function(e) { newForm.addEventListener('submit', function (e) {
e.preventDefault(); e.preventDefault();
window.UICore.submitFormAjax(newForm, { window.UICore.submitFormAjax(newForm, {
successMessage: 'Versioning enabled', successMessage: 'Versioning enabled',
onSuccess: function() { onSuccess: function () {
updateVersioningBadge(true); updateVersioningBadge(true);
updateVersioningCard(true); updateVersioningCard(true);
} }
@@ -3949,7 +4042,7 @@
'<p class="mb-0 small">No bucket policy is attached. Access is controlled by IAM policies only.</p></div>'; '<p class="mb-0 small">No bucket policy is attached. Access is controlled by IAM policies only.</p></div>';
} }
} }
document.querySelectorAll('.preset-btn').forEach(function(btn) { document.querySelectorAll('.preset-btn').forEach(function (btn) {
btn.classList.remove('active'); btn.classList.remove('active');
if (btn.dataset.preset === preset) btn.classList.add('active'); if (btn.dataset.preset === preset) btn.classList.add('active');
}); });
@@ -3963,7 +4056,7 @@
interceptForm('enableVersioningForm', { interceptForm('enableVersioningForm', {
successMessage: 'Versioning enabled', successMessage: 'Versioning enabled',
onSuccess: function(data) { onSuccess: function (data) {
updateVersioningBadge(true); updateVersioningBadge(true);
updateVersioningCard(true); updateVersioningCard(true);
} }
@@ -3972,7 +4065,7 @@
interceptForm('suspendVersioningForm', { interceptForm('suspendVersioningForm', {
successMessage: 'Versioning suspended', successMessage: 'Versioning suspended',
closeModal: 'suspendVersioningModal', closeModal: 'suspendVersioningModal',
onSuccess: function(data) { onSuccess: function (data) {
updateVersioningBadge(false); updateVersioningBadge(false);
updateVersioningCard(false); updateVersioningCard(false);
} }
@@ -3980,21 +4073,21 @@
interceptForm('encryptionForm', { interceptForm('encryptionForm', {
successMessage: 'Encryption settings saved', successMessage: 'Encryption settings saved',
onSuccess: function(data) { onSuccess: function (data) {
updateEncryptionCard(data.enabled !== false, data.algorithm || 'AES256'); updateEncryptionCard(data.enabled !== false, data.algorithm || 'AES256');
} }
}); });
interceptForm('quotaForm', { interceptForm('quotaForm', {
successMessage: 'Quota settings saved', successMessage: 'Quota settings saved',
onSuccess: function(data) { onSuccess: function (data) {
updateQuotaCard(data.has_quota, data.max_bytes, data.max_objects); updateQuotaCard(data.has_quota, data.max_bytes, data.max_objects);
} }
}); });
interceptForm('bucketPolicyForm', { interceptForm('bucketPolicyForm', {
successMessage: 'Bucket policy saved', successMessage: 'Bucket policy saved',
onSuccess: function(data) { onSuccess: function (data) {
var policyModeEl = document.getElementById('policyMode'); var policyModeEl = document.getElementById('policyMode');
var policyPresetEl = document.getElementById('policyPreset'); var policyPresetEl = document.getElementById('policyPreset');
var preset = policyModeEl && policyModeEl.value === 'delete' ? 'private' : var preset = policyModeEl && policyModeEl.value === 'delete' ? 'private' :
@@ -4005,11 +4098,11 @@
var deletePolicyForm = document.getElementById('deletePolicyForm'); var deletePolicyForm = document.getElementById('deletePolicyForm');
if (deletePolicyForm) { if (deletePolicyForm) {
deletePolicyForm.addEventListener('submit', function(e) { deletePolicyForm.addEventListener('submit', function (e) {
e.preventDefault(); e.preventDefault();
window.UICore.submitFormAjax(deletePolicyForm, { window.UICore.submitFormAjax(deletePolicyForm, {
successMessage: 'Bucket policy deleted', successMessage: 'Bucket policy deleted',
onSuccess: function(data) { onSuccess: function (data) {
var modal = bootstrap.Modal.getInstance(document.getElementById('deletePolicyModal')); var modal = bootstrap.Modal.getInstance(document.getElementById('deletePolicyModal'));
if (modal) modal.hide(); if (modal) modal.hide();
updatePolicyCard(false, 'private'); updatePolicyCard(false, 'private');
@@ -4022,13 +4115,13 @@
var disableEncBtn = document.getElementById('disableEncryptionBtn'); var disableEncBtn = document.getElementById('disableEncryptionBtn');
if (disableEncBtn) { if (disableEncBtn) {
disableEncBtn.addEventListener('click', function() { disableEncBtn.addEventListener('click', function () {
var form = document.getElementById('encryptionForm'); var form = document.getElementById('encryptionForm');
if (!form) return; if (!form) return;
document.getElementById('encryptionAction').value = 'disable'; document.getElementById('encryptionAction').value = 'disable';
window.UICore.submitFormAjax(form, { window.UICore.submitFormAjax(form, {
successMessage: 'Encryption disabled', successMessage: 'Encryption disabled',
onSuccess: function(data) { onSuccess: function (data) {
document.getElementById('encryptionAction').value = 'enable'; document.getElementById('encryptionAction').value = 'enable';
updateEncryptionCard(false, null); updateEncryptionCard(false, null);
} }
@@ -4038,13 +4131,13 @@
var removeQuotaBtn = document.getElementById('removeQuotaBtn'); var removeQuotaBtn = document.getElementById('removeQuotaBtn');
if (removeQuotaBtn) { if (removeQuotaBtn) {
removeQuotaBtn.addEventListener('click', function() { removeQuotaBtn.addEventListener('click', function () {
var form = document.getElementById('quotaForm'); var form = document.getElementById('quotaForm');
if (!form) return; if (!form) return;
document.getElementById('quotaAction').value = 'remove'; document.getElementById('quotaAction').value = 'remove';
window.UICore.submitFormAjax(form, { window.UICore.submitFormAjax(form, {
successMessage: 'Quota removed', successMessage: 'Quota removed',
onSuccess: function(data) { onSuccess: function (data) {
document.getElementById('quotaAction').value = 'set'; document.getElementById('quotaAction').value = 'set';
updateQuotaCard(false, null, null); updateQuotaCard(false, null, null);
} }
@@ -4058,8 +4151,8 @@
fetch(window.location.pathname + '?tab=replication', { fetch(window.location.pathname + '?tab=replication', {
headers: { 'X-Requested-With': 'XMLHttpRequest' } headers: { 'X-Requested-With': 'XMLHttpRequest' }
}) })
.then(function(resp) { return resp.text(); }) .then(function (resp) { return resp.text(); })
.then(function(html) { .then(function (html) {
var parser = new DOMParser(); var parser = new DOMParser();
var doc = parser.parseFromString(html, 'text/html'); var doc = parser.parseFromString(html, 'text/html');
var newPane = doc.getElementById('replication-pane'); var newPane = doc.getElementById('replication-pane');
@@ -4069,20 +4162,20 @@
initReplicationStats(); initReplicationStats();
} }
}) })
.catch(function(err) { .catch(function (err) {
console.error('Failed to reload replication pane:', err); console.error('Failed to reload replication pane:', err);
}); });
} }
function initReplicationForms() { function initReplicationForms() {
document.querySelectorAll('form[action*="replication"]').forEach(function(form) { document.querySelectorAll('form[action*="replication"]').forEach(function (form) {
if (form.dataset.ajaxBound) return; if (form.dataset.ajaxBound) return;
form.dataset.ajaxBound = 'true'; form.dataset.ajaxBound = 'true';
var actionInput = form.querySelector('input[name="action"]'); var actionInput = form.querySelector('input[name="action"]');
if (!actionInput) return; if (!actionInput) return;
var action = actionInput.value; var action = actionInput.value;
form.addEventListener('submit', function(e) { form.addEventListener('submit', function (e) {
e.preventDefault(); e.preventDefault();
var msg = action === 'pause' ? 'Replication paused' : var msg = action === 'pause' ? 'Replication paused' :
action === 'resume' ? 'Replication resumed' : action === 'resume' ? 'Replication resumed' :
@@ -4090,7 +4183,7 @@
action === 'create' ? 'Replication configured' : 'Operation completed'; action === 'create' ? 'Replication configured' : 'Operation completed';
window.UICore.submitFormAjax(form, { window.UICore.submitFormAjax(form, {
successMessage: msg, successMessage: msg,
onSuccess: function(data) { onSuccess: function (data) {
var modal = bootstrap.Modal.getInstance(document.getElementById('disableReplicationModal')); var modal = bootstrap.Modal.getInstance(document.getElementById('disableReplicationModal'));
if (modal) modal.hide(); if (modal) modal.hide();
reloadReplicationPane(); reloadReplicationPane();
@@ -4112,14 +4205,14 @@
var bytesEl = statsContainer.querySelector('[data-stat="bytes"]'); var bytesEl = statsContainer.querySelector('[data-stat="bytes"]');
fetch(statusEndpoint) fetch(statusEndpoint)
.then(function(resp) { return resp.json(); }) .then(function (resp) { return resp.json(); })
.then(function(data) { .then(function (data) {
if (syncedEl) syncedEl.textContent = data.objects_synced || 0; if (syncedEl) syncedEl.textContent = data.objects_synced || 0;
if (pendingEl) pendingEl.textContent = data.objects_pending || 0; if (pendingEl) pendingEl.textContent = data.objects_pending || 0;
if (orphanedEl) orphanedEl.textContent = data.objects_orphaned || 0; if (orphanedEl) orphanedEl.textContent = data.objects_orphaned || 0;
if (bytesEl) bytesEl.textContent = formatBytes(data.bytes_synced || 0); if (bytesEl) bytesEl.textContent = formatBytes(data.bytes_synced || 0);
}) })
.catch(function(err) { .catch(function (err) {
console.error('Failed to load replication stats:', err); console.error('Failed to load replication stats:', err);
}); });
} }
@@ -4129,10 +4222,10 @@
var deleteBucketForm = document.getElementById('deleteBucketForm'); var deleteBucketForm = document.getElementById('deleteBucketForm');
if (deleteBucketForm) { if (deleteBucketForm) {
deleteBucketForm.addEventListener('submit', function(e) { deleteBucketForm.addEventListener('submit', function (e) {
e.preventDefault(); e.preventDefault();
window.UICore.submitFormAjax(deleteBucketForm, { window.UICore.submitFormAjax(deleteBucketForm, {
onSuccess: function() { onSuccess: function () {
sessionStorage.setItem('flashMessage', JSON.stringify({ title: 'Bucket deleted', variant: 'success' })); sessionStorage.setItem('flashMessage', JSON.stringify({ title: 'Bucket deleted', variant: 'success' }));
window.location.href = window.BucketDetailConfig?.endpoints?.bucketsOverview || '/ui/buckets'; window.location.href = window.BucketDetailConfig?.endpoints?.bucketsOverview || '/ui/buckets';
} }

View File

@@ -67,12 +67,14 @@
</button> </button>
</li> </li>
{% endif %} {% endif %}
{% if can_edit_policy %} {% if can_manage_lifecycle %}
<li class="nav-item" role="presentation"> <li class="nav-item" role="presentation">
<button class="nav-link {{ 'active' if active_tab == 'lifecycle' else '' }}" id="lifecycle-tab" data-bs-toggle="tab" data-bs-target="#lifecycle-pane" type="button" role="tab" aria-controls="lifecycle-pane" aria-selected="{{ 'true' if active_tab == 'lifecycle' else 'false' }}"> <button class="nav-link {{ 'active' if active_tab == 'lifecycle' else '' }}" id="lifecycle-tab" data-bs-toggle="tab" data-bs-target="#lifecycle-pane" type="button" role="tab" aria-controls="lifecycle-pane" aria-selected="{{ 'true' if active_tab == 'lifecycle' else 'false' }}">
Lifecycle Lifecycle
</button> </button>
</li> </li>
{% endif %}
{% if can_manage_cors %}
<li class="nav-item" role="presentation"> <li class="nav-item" role="presentation">
<button class="nav-link {{ 'active' if active_tab == 'cors' else '' }}" id="cors-tab" data-bs-toggle="tab" data-bs-target="#cors-pane" type="button" role="tab" aria-controls="cors-pane" aria-selected="{{ 'true' if active_tab == 'cors' else 'false' }}"> <button class="nav-link {{ 'active' if active_tab == 'cors' else '' }}" id="cors-tab" data-bs-toggle="tab" data-bs-target="#cors-pane" type="button" role="tab" aria-controls="cors-pane" aria-selected="{{ 'true' if active_tab == 'cors' else 'false' }}">
CORS CORS
@@ -318,6 +320,7 @@
</div> </div>
<img id="preview-image" class="img-fluid d-none w-100" alt="Object preview" style="display: block;" /> <img id="preview-image" class="img-fluid d-none w-100" alt="Object preview" style="display: block;" />
<video id="preview-video" class="w-100 d-none" controls style="display: block;"></video> <video id="preview-video" class="w-100 d-none" controls style="display: block;"></video>
<audio id="preview-audio" class="w-100 d-none" controls style="display: block;"></audio>
<iframe id="preview-iframe" class="w-100 d-none" loading="lazy" style="min-height: 200px;"></iframe> <iframe id="preview-iframe" class="w-100 d-none" loading="lazy" style="min-height: 200px;"></iframe>
</div> </div>
</div> </div>
@@ -1518,7 +1521,7 @@
</div> </div>
{% endif %} {% endif %}
{% if can_edit_policy %} {% if can_manage_lifecycle %}
<div class="tab-pane fade {{ 'show active' if active_tab == 'lifecycle' else '' }}" id="lifecycle-pane" role="tabpanel" aria-labelledby="lifecycle-tab" tabindex="0"> <div class="tab-pane fade {{ 'show active' if active_tab == 'lifecycle' else '' }}" id="lifecycle-pane" role="tabpanel" aria-labelledby="lifecycle-tab" tabindex="0">
{% if not lifecycle_enabled %} {% if not lifecycle_enabled %}
<div class="alert alert-warning d-flex align-items-start mb-4" role="alert"> <div class="alert alert-warning d-flex align-items-start mb-4" role="alert">
@@ -1560,12 +1563,13 @@
<th>Status</th> <th>Status</th>
<th>Expiration</th> <th>Expiration</th>
<th>Noncurrent</th> <th>Noncurrent</th>
<th>Abort MPU</th>
<th class="text-end">Actions</th> <th class="text-end">Actions</th>
</tr> </tr>
</thead> </thead>
<tbody id="lifecycle-rules-body"> <tbody id="lifecycle-rules-body">
<tr> <tr>
<td colspan="6" class="text-center text-muted py-4"> <td colspan="7" class="text-center text-muted py-4">
<div class="spinner-border spinner-border-sm me-2" role="status"></div> <div class="spinner-border spinner-border-sm me-2" role="status"></div>
Loading... Loading...
</td> </td>
@@ -1676,7 +1680,9 @@
</div> </div>
</div> </div>
</div> </div>
{% endif %}
{% if can_manage_cors %}
<div class="tab-pane fade {{ 'show active' if active_tab == 'cors' else '' }}" id="cors-pane" role="tabpanel" aria-labelledby="cors-tab" tabindex="0"> <div class="tab-pane fade {{ 'show active' if active_tab == 'cors' else '' }}" id="cors-pane" role="tabpanel" aria-labelledby="cors-tab" tabindex="0">
<div class="row g-4"> <div class="row g-4">
<div class="col-lg-8"> <div class="col-lg-8">

View File

@@ -51,7 +51,7 @@
</div> </div>
<div> <div>
<h5 class="bucket-name text-break">{{ bucket.meta.name }}</h5> <h5 class="bucket-name text-break">{{ bucket.meta.name }}</h5>
<small class="text-muted">Created {{ bucket.meta.created_at.strftime('%b %d, %Y') }}</small> <small class="text-muted">Created {{ bucket.meta.created_at | format_datetime }}</small>
</div> </div>
</div> </div>
<span class="badge {{ bucket.access_badge }} bucket-access-badge">{{ bucket.access_label }}</span> <span class="badge {{ bucket.access_badge }} bucket-access-badge">{{ bucket.access_label }}</span>

View File

@@ -39,6 +39,8 @@
<li><a href="#quotas">Bucket Quotas</a></li> <li><a href="#quotas">Bucket Quotas</a></li>
<li><a href="#encryption">Encryption</a></li> <li><a href="#encryption">Encryption</a></li>
<li><a href="#lifecycle">Lifecycle Rules</a></li> <li><a href="#lifecycle">Lifecycle Rules</a></li>
<li><a href="#metrics">Metrics History</a></li>
<li><a href="#operation-metrics">Operation Metrics</a></li>
<li><a href="#troubleshooting">Troubleshooting</a></li> <li><a href="#troubleshooting">Troubleshooting</a></li>
</ul> </ul>
</div> </div>
@@ -181,6 +183,24 @@ python run.py --mode ui
<td><code>true</code></td> <td><code>true</code></td>
<td>Enable file logging.</td> <td>Enable file logging.</td>
</tr> </tr>
<tr class="table-secondary">
<td colspan="3" class="fw-semibold">Metrics History Settings</td>
</tr>
<tr>
<td><code>METRICS_HISTORY_ENABLED</code></td>
<td><code>false</code></td>
<td>Enable metrics history recording and charts (opt-in).</td>
</tr>
<tr>
<td><code>METRICS_HISTORY_RETENTION_HOURS</code></td>
<td><code>24</code></td>
<td>How long to retain metrics history data.</td>
</tr>
<tr>
<td><code>METRICS_HISTORY_INTERVAL_MINUTES</code></td>
<td><code>5</code></td>
<td>Interval between history snapshots.</td>
</tr>
</tbody> </tbody>
</table> </table>
</div> </div>
@@ -356,11 +376,8 @@ curl -X PUT {{ api_base }}/demo/notes.txt \
-H "X-Secret-Key: &lt;secret_key&gt;" \ -H "X-Secret-Key: &lt;secret_key&gt;" \
--data-binary @notes.txt --data-binary @notes.txt
curl -X POST {{ api_base }}/presign/demo/notes.txt \ # Presigned URLs are generated via the UI
-H "Content-Type: application/json" \ # Use the "Presign" button in the object browser
-H "X-Access-Key: &lt;access_key&gt;" \
-H "X-Secret-Key: &lt;secret_key&gt;" \
-d '{"method":"GET", "expires_in": 900}'
</code></pre> </code></pre>
</div> </div>
</div> </div>
@@ -418,13 +435,8 @@ curl -X POST {{ api_base }}/presign/demo/notes.txt \
</tr> </tr>
<tr> <tr>
<td>GET/PUT/DELETE</td> <td>GET/PUT/DELETE</td>
<td><code>/bucket-policy/&lt;bucket&gt;</code></td> <td><code>/&lt;bucket&gt;?policy</code></td>
<td>Fetch, upsert, or remove a bucket policy.</td> <td>Fetch, upsert, or remove a bucket policy (S3-compatible).</td>
</tr>
<tr>
<td>POST</td>
<td><code>/presign/&lt;bucket&gt;/&lt;key&gt;</code></td>
<td>Generate SigV4 URLs for GET/PUT/DELETE with custom expiry.</td>
</tr> </tr>
</tbody> </tbody>
</table> </table>
@@ -523,17 +535,16 @@ s3.complete_multipart_upload(
)</code></pre> )</code></pre>
<h3 class="h6 text-uppercase text-muted mt-4">Presigned URLs for Sharing</h3> <h3 class="h6 text-uppercase text-muted mt-4">Presigned URLs for Sharing</h3>
<pre class="mb-0"><code class="language-bash"># Generate a download link valid for 15 minutes <pre class="mb-0"><code class="language-text"># Generate presigned URLs via the UI:
curl -X POST "{{ api_base }}/presign/mybucket/photo.jpg" \ # 1. Navigate to your bucket in the object browser
-H "Content-Type: application/json" \ # 2. Select the object you want to share
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;" \ # 3. Click the "Presign" button
-d '{"method": "GET", "expires_in": 900}' # 4. Choose method (GET/PUT/DELETE) and expiration time
# 5. Copy the generated URL
# Generate an upload link (PUT) valid for 1 hour # Supported options:
curl -X POST "{{ api_base }}/presign/mybucket/upload.bin" \ # - Method: GET (download), PUT (upload), DELETE (remove)
-H "Content-Type: application/json" \ # - Expiration: 1 second to 7 days (604800 seconds)</code></pre>
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;" \
-d '{"method": "PUT", "expires_in": 3600}'</code></pre>
</div> </div>
</article> </article>
<article id="replication" class="card shadow-sm docs-section"> <article id="replication" class="card shadow-sm docs-section">
@@ -976,10 +987,201 @@ curl "{{ api_base }}/&lt;bucket&gt;?lifecycle" \
</div> </div>
</div> </div>
</article> </article>
<article id="troubleshooting" class="card shadow-sm docs-section"> <article id="metrics" class="card shadow-sm docs-section">
<div class="card-body"> <div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3"> <div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">13</span> <span class="docs-section-kicker">13</span>
<h2 class="h4 mb-0">Metrics History</h2>
</div>
<p class="text-muted">Track CPU, memory, and disk usage over time with optional metrics history. Disabled by default to minimize overhead.</p>
<h3 class="h6 text-uppercase text-muted mt-4">Enabling Metrics History</h3>
<p class="small text-muted">Set the environment variable to opt-in:</p>
<pre class="mb-3"><code class="language-bash"># PowerShell
$env:METRICS_HISTORY_ENABLED = "true"
python run.py
# Bash
export METRICS_HISTORY_ENABLED=true
python run.py</code></pre>
<h3 class="h6 text-uppercase text-muted mt-4">Configuration Options</h3>
<div class="table-responsive mb-3">
<table class="table table-sm table-bordered small">
<thead class="table-light">
<tr>
<th>Variable</th>
<th>Default</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td><code>METRICS_HISTORY_ENABLED</code></td>
<td><code>false</code></td>
<td>Enable/disable metrics history recording</td>
</tr>
<tr>
<td><code>METRICS_HISTORY_RETENTION_HOURS</code></td>
<td><code>24</code></td>
<td>How long to keep history data (hours)</td>
</tr>
<tr>
<td><code>METRICS_HISTORY_INTERVAL_MINUTES</code></td>
<td><code>5</code></td>
<td>Interval between snapshots (minutes)</td>
</tr>
</tbody>
</table>
</div>
<h3 class="h6 text-uppercase text-muted mt-4">API Endpoints</h3>
<pre class="mb-3"><code class="language-bash"># Get metrics history (last 24 hours by default)
curl "{{ api_base | replace('/api', '/ui') }}/metrics/history" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;"
# Get history for specific time range
curl "{{ api_base | replace('/api', '/ui') }}/metrics/history?hours=6" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;"
# Get current settings
curl "{{ api_base | replace('/api', '/ui') }}/metrics/settings" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;"
# Update settings at runtime
curl -X PUT "{{ api_base | replace('/api', '/ui') }}/metrics/settings" \
-H "Content-Type: application/json" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;" \
-d '{"enabled": true, "retention_hours": 48, "interval_minutes": 10}'</code></pre>
<h3 class="h6 text-uppercase text-muted mt-4">Storage Location</h3>
<p class="small text-muted mb-3">History data is stored at:</p>
<code class="d-block mb-3">data/.myfsio.sys/config/metrics_history.json</code>
<div class="alert alert-light border mb-0">
<div class="d-flex gap-2">
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-info-circle text-muted mt-1 flex-shrink-0" viewBox="0 0 16 16">
<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>
<path d="m8.93 6.588-2.29.287-.082.38.45.083c.294.07.352.176.288.469l-.738 3.468c-.194.897.105 1.319.808 1.319.545 0 1.178-.252 1.465-.598l.088-.416c-.2.176-.492.246-.686.246-.275 0-.375-.193-.304-.533L8.93 6.588zM9 4.5a1 1 0 1 1-2 0 1 1 0 0 1 2 0z"/>
</svg>
<div>
<strong>UI Charts:</strong> When enabled, the Metrics dashboard displays line charts showing CPU, memory, and disk usage trends with a time range selector (1h, 6h, 24h, 7d).
</div>
</div>
</div>
</div>
</article>
<article id="operation-metrics" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">14</span>
<h2 class="h4 mb-0">Operation Metrics</h2>
</div>
<p class="text-muted">Track API request statistics including request counts, latency, error rates, and bandwidth usage. Provides real-time visibility into API operations.</p>
<h3 class="h6 text-uppercase text-muted mt-4">Enabling Operation Metrics</h3>
<p class="small text-muted">Set the environment variable to opt-in:</p>
<pre class="mb-3"><code class="language-bash"># PowerShell
$env:OPERATION_METRICS_ENABLED = "true"
python run.py
# Bash
export OPERATION_METRICS_ENABLED=true
python run.py</code></pre>
<h3 class="h6 text-uppercase text-muted mt-4">Configuration Options</h3>
<div class="table-responsive mb-3">
<table class="table table-sm table-bordered small">
<thead class="table-light">
<tr>
<th>Variable</th>
<th>Default</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td><code>OPERATION_METRICS_ENABLED</code></td>
<td><code>false</code></td>
<td>Enable/disable operation metrics collection</td>
</tr>
<tr>
<td><code>OPERATION_METRICS_INTERVAL_MINUTES</code></td>
<td><code>5</code></td>
<td>Interval between snapshots (minutes)</td>
</tr>
<tr>
<td><code>OPERATION_METRICS_RETENTION_HOURS</code></td>
<td><code>24</code></td>
<td>How long to keep history data (hours)</td>
</tr>
</tbody>
</table>
</div>
<h3 class="h6 text-uppercase text-muted mt-4">What's Tracked</h3>
<div class="row g-3 mb-4">
<div class="col-md-6">
<div class="bg-light rounded p-3 h-100">
<h6 class="small fw-bold mb-2">Request Statistics</h6>
<ul class="small text-muted mb-0 ps-3">
<li>Request counts by HTTP method (GET, PUT, POST, DELETE)</li>
<li>Response status codes (2xx, 3xx, 4xx, 5xx)</li>
<li>Average, min, max latency</li>
<li>Bytes transferred in/out</li>
</ul>
</div>
</div>
<div class="col-md-6">
<div class="bg-light rounded p-3 h-100">
<h6 class="small fw-bold mb-2">Endpoint Breakdown</h6>
<ul class="small text-muted mb-0 ps-3">
<li><code>object</code> - Object operations (GET/PUT/DELETE)</li>
<li><code>bucket</code> - Bucket operations</li>
<li><code>ui</code> - Web UI requests</li>
<li><code>service</code> - Health checks, etc.</li>
</ul>
</div>
</div>
</div>
<h3 class="h6 text-uppercase text-muted mt-4">S3 Error Codes</h3>
<p class="small text-muted">The dashboard tracks S3 API-specific error codes like <code>NoSuchKey</code>, <code>AccessDenied</code>, <code>BucketNotFound</code>. These are separate from HTTP status codes &ndash; a 404 from the UI won't appear here, only S3 API errors.</p>
<h3 class="h6 text-uppercase text-muted mt-4">API Endpoints</h3>
<pre class="mb-3"><code class="language-bash"># Get current operation metrics
curl "{{ api_base | replace('/api', '/ui') }}/metrics/operations" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;"
# Get operation metrics history
curl "{{ api_base | replace('/api', '/ui') }}/metrics/operations/history" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;"
# Filter history by time range
curl "{{ api_base | replace('/api', '/ui') }}/metrics/operations/history?hours=6" \
-H "X-Access-Key: &lt;key&gt;" -H "X-Secret-Key: &lt;secret&gt;"</code></pre>
<h3 class="h6 text-uppercase text-muted mt-4">Storage Location</h3>
<p class="small text-muted mb-3">Operation metrics data is stored at:</p>
<code class="d-block mb-3">data/.myfsio.sys/config/operation_metrics.json</code>
<div class="alert alert-light border mb-0">
<div class="d-flex gap-2">
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-info-circle text-muted mt-1 flex-shrink-0" viewBox="0 0 16 16">
<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>
<path d="m8.93 6.588-2.29.287-.082.38.45.083c.294.07.352.176.288.469l-.738 3.468c-.194.897.105 1.319.808 1.319.545 0 1.178-.252 1.465-.598l.088-.416c-.2.176-.492.246-.686.246-.275 0-.375-.193-.304-.533L8.93 6.588zM9 4.5a1 1 0 1 1-2 0 1 1 0 0 1 2 0z"/>
</svg>
<div>
<strong>UI Dashboard:</strong> When enabled, the Metrics page shows an "API Operations" section with summary cards, charts for requests by method/status/endpoint, and an S3 error codes table. Data refreshes every 5 seconds.
</div>
</div>
</div>
</div>
</article>
<article id="troubleshooting" class="card shadow-sm docs-section">
<div class="card-body">
<div class="d-flex align-items-center gap-2 mb-3">
<span class="docs-section-kicker">15</span>
<h2 class="h4 mb-0">Troubleshooting &amp; tips</h2> <h2 class="h4 mb-0">Troubleshooting &amp; tips</h2>
</div> </div>
<div class="table-responsive"> <div class="table-responsive">
@@ -1045,6 +1247,8 @@ curl "{{ api_base }}/&lt;bucket&gt;?lifecycle" \
<li><a href="#quotas">Bucket Quotas</a></li> <li><a href="#quotas">Bucket Quotas</a></li>
<li><a href="#encryption">Encryption</a></li> <li><a href="#encryption">Encryption</a></li>
<li><a href="#lifecycle">Lifecycle Rules</a></li> <li><a href="#lifecycle">Lifecycle Rules</a></li>
<li><a href="#metrics">Metrics History</a></li>
<li><a href="#operation-metrics">Operation Metrics</a></li>
<li><a href="#troubleshooting">Troubleshooting</a></li> <li><a href="#troubleshooting">Troubleshooting</a></li>
</ul> </ul>
<div class="docs-sidebar-callouts"> <div class="docs-sidebar-callouts">

View File

@@ -267,9 +267,164 @@
</div> </div>
</div> </div>
</div> </div>
{% if operation_metrics_enabled %}
<div class="row g-4 mt-2">
<div class="col-12">
<div class="card shadow-sm border-0">
<div class="card-header bg-transparent border-0 pt-4 px-4 d-flex justify-content-between align-items-center">
<h5 class="card-title mb-0 fw-semibold">API Operations</h5>
<div class="d-flex align-items-center gap-3">
<span class="small text-muted" id="opStatus">Loading...</span>
<button class="btn btn-outline-secondary btn-sm" id="resetOpMetricsBtn" title="Reset current window">
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="bi bi-arrow-counterclockwise" viewBox="0 0 16 16">
<path fill-rule="evenodd" d="M8 3a5 5 0 1 1-4.546 2.914.5.5 0 0 0-.908-.417A6 6 0 1 0 8 2v1z"/>
<path d="M8 4.466V.534a.25.25 0 0 0-.41-.192L5.23 2.308a.25.25 0 0 0 0 .384l2.36 1.966A.25.25 0 0 0 8 4.466z"/>
</svg>
</button>
</div>
</div>
<div class="card-body p-4">
<div class="row g-3 mb-4">
<div class="col-6 col-md-4 col-lg-2">
<div class="text-center p-3 bg-light rounded h-100">
<h4 class="fw-bold mb-1" id="opTotalRequests">0</h4>
<small class="text-muted">Requests</small>
</div>
</div>
<div class="col-6 col-md-4 col-lg-2">
<div class="text-center p-3 bg-light rounded h-100">
<h4 class="fw-bold mb-1 text-success" id="opSuccessRate">0%</h4>
<small class="text-muted">Success</small>
</div>
</div>
<div class="col-6 col-md-4 col-lg-2">
<div class="text-center p-3 bg-light rounded h-100">
<h4 class="fw-bold mb-1 text-danger" id="opErrorCount">0</h4>
<small class="text-muted">Errors</small>
</div>
</div>
<div class="col-6 col-md-4 col-lg-2">
<div class="text-center p-3 bg-light rounded h-100">
<h4 class="fw-bold mb-1 text-info" id="opAvgLatency">0ms</h4>
<small class="text-muted">Latency</small>
</div>
</div>
<div class="col-6 col-md-4 col-lg-2">
<div class="text-center p-3 bg-light rounded h-100">
<h4 class="fw-bold mb-1 text-primary" id="opBytesIn">0 B</h4>
<small class="text-muted">Bytes In</small>
</div>
</div>
<div class="col-6 col-md-4 col-lg-2">
<div class="text-center p-3 bg-light rounded h-100">
<h4 class="fw-bold mb-1 text-secondary" id="opBytesOut">0 B</h4>
<small class="text-muted">Bytes Out</small>
</div>
</div>
</div>
<div class="row g-4">
<div class="col-lg-6">
<div class="bg-light rounded p-3">
<h6 class="text-muted small fw-bold text-uppercase mb-3">Requests by Method</h6>
<div style="height: 220px; display: flex; align-items: center; justify-content: center;">
<canvas id="methodChart"></canvas>
</div>
</div>
</div>
<div class="col-lg-6">
<div class="bg-light rounded p-3">
<h6 class="text-muted small fw-bold text-uppercase mb-3">Requests by Status</h6>
<div style="height: 220px;">
<canvas id="statusChart"></canvas>
</div>
</div>
</div>
</div>
<div class="row g-4 mt-1">
<div class="col-lg-6">
<div class="bg-light rounded p-3">
<h6 class="text-muted small fw-bold text-uppercase mb-3">Requests by Endpoint</h6>
<div style="height: 180px;">
<canvas id="endpointChart"></canvas>
</div>
</div>
</div>
<div class="col-lg-6">
<div class="bg-light rounded p-3 h-100 d-flex flex-column">
<div class="d-flex justify-content-between align-items-start mb-3">
<h6 class="text-muted small fw-bold text-uppercase mb-0">S3 Error Codes</h6>
<span class="badge bg-secondary-subtle text-secondary" style="font-size: 0.65rem;" title="Tracks S3 API errors like NoSuchKey, AccessDenied, etc.">API Only</span>
</div>
<div class="flex-grow-1 d-flex flex-column" style="min-height: 150px;">
<div class="d-flex border-bottom pb-2 mb-2" style="font-size: 0.75rem;">
<div class="text-muted fw-semibold" style="flex: 1;">Code</div>
<div class="text-muted fw-semibold text-end" style="width: 60px;">Count</div>
<div class="text-muted fw-semibold text-end" style="width: 100px;">Distribution</div>
</div>
<div id="errorCodesContainer" class="flex-grow-1" style="overflow-y: auto;">
<div id="errorCodesBody">
<div class="text-muted small text-center py-4">
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="currentColor" class="bi bi-check-circle mb-2 text-success" viewBox="0 0 16 16">
<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>
<path d="M10.97 4.97a.235.235 0 0 0-.02.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-1.071-1.05z"/>
</svg>
<div>No S3 API errors</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
{% endif %}
{% if metrics_history_enabled %}
<div class="row g-4 mt-2">
<div class="col-12">
<div class="card shadow-sm border-0">
<div class="card-header bg-transparent border-0 pt-4 px-4 d-flex justify-content-between align-items-center">
<h5 class="card-title mb-0 fw-semibold">Metrics History</h5>
<div class="d-flex gap-2 align-items-center">
<select class="form-select form-select-sm" id="historyTimeRange" style="width: auto;">
<option value="1">Last 1 hour</option>
<option value="6">Last 6 hours</option>
<option value="24" selected>Last 24 hours</option>
<option value="168">Last 7 days</option>
</select>
</div>
</div>
<div class="card-body p-4">
<div class="row">
<div class="col-md-4 mb-4">
<h6 class="text-muted small fw-bold text-uppercase mb-3">CPU Usage</h6>
<canvas id="cpuHistoryChart" height="200"></canvas>
</div>
<div class="col-md-4 mb-4">
<h6 class="text-muted small fw-bold text-uppercase mb-3">Memory Usage</h6>
<canvas id="memoryHistoryChart" height="200"></canvas>
</div>
<div class="col-md-4 mb-4">
<h6 class="text-muted small fw-bold text-uppercase mb-3">Disk Usage</h6>
<canvas id="diskHistoryChart" height="200"></canvas>
</div>
</div>
<p class="text-muted small mb-0 text-center" id="historyStatus">Loading history data...</p>
</div>
</div>
</div>
</div>
{% endif %}
{% endblock %} {% endblock %}
{% block extra_scripts %} {% block extra_scripts %}
{% if metrics_history_enabled or operation_metrics_enabled %}
<script src="https://cdn.jsdelivr.net/npm/chart.js@4.4.1/dist/chart.umd.min.js"></script>
{% endif %}
<script> <script>
(function() { (function() {
var refreshInterval = 5000; var refreshInterval = 5000;
@@ -285,7 +440,7 @@
.then(function(data) { .then(function(data) {
var el; var el;
el = document.querySelector('[data-metric="cpu_percent"]'); el = document.querySelector('[data-metric="cpu_percent"]');
if (el) el.textContent = data.cpu_percent; if (el) el.textContent = data.cpu_percent.toFixed(2);
el = document.querySelector('[data-metric="cpu_bar"]'); el = document.querySelector('[data-metric="cpu_bar"]');
if (el) { if (el) {
el.style.width = data.cpu_percent + '%'; el.style.width = data.cpu_percent + '%';
@@ -298,7 +453,7 @@
} }
el = document.querySelector('[data-metric="memory_percent"]'); el = document.querySelector('[data-metric="memory_percent"]');
if (el) el.textContent = data.memory.percent; if (el) el.textContent = data.memory.percent.toFixed(2);
el = document.querySelector('[data-metric="memory_bar"]'); el = document.querySelector('[data-metric="memory_bar"]');
if (el) el.style.width = data.memory.percent + '%'; if (el) el.style.width = data.memory.percent + '%';
el = document.querySelector('[data-metric="memory_used"]'); el = document.querySelector('[data-metric="memory_used"]');
@@ -307,7 +462,7 @@
if (el) el.textContent = data.memory.total; if (el) el.textContent = data.memory.total;
el = document.querySelector('[data-metric="disk_percent"]'); el = document.querySelector('[data-metric="disk_percent"]');
if (el) el.textContent = data.disk.percent; if (el) el.textContent = data.disk.percent.toFixed(2);
el = document.querySelector('[data-metric="disk_bar"]'); el = document.querySelector('[data-metric="disk_bar"]');
if (el) { if (el) {
el.style.width = data.disk.percent + '%'; el.style.width = data.disk.percent + '%';
@@ -372,5 +527,369 @@
startPolling(); startPolling();
})(); })();
{% if operation_metrics_enabled %}
(function() {
var methodChart = null;
var statusChart = null;
var endpointChart = null;
var opStatus = document.getElementById('opStatus');
var opTimer = null;
var methodColors = {
'GET': '#0d6efd',
'PUT': '#198754',
'POST': '#ffc107',
'DELETE': '#dc3545',
'HEAD': '#6c757d',
'OPTIONS': '#0dcaf0'
};
var statusColors = {
'2xx': '#198754',
'3xx': '#0dcaf0',
'4xx': '#ffc107',
'5xx': '#dc3545'
};
var endpointColors = {
'object': '#0d6efd',
'bucket': '#198754',
'ui': '#6c757d',
'service': '#0dcaf0',
'kms': '#ffc107'
};
function formatBytes(bytes) {
if (bytes === 0) return '0 B';
var k = 1024;
var sizes = ['B', 'KB', 'MB', 'GB', 'TB'];
var i = Math.floor(Math.log(bytes) / Math.log(k));
return parseFloat((bytes / Math.pow(k, i)).toFixed(1)) + ' ' + sizes[i];
}
function initOpCharts() {
var methodCtx = document.getElementById('methodChart');
var statusCtx = document.getElementById('statusChart');
var endpointCtx = document.getElementById('endpointChart');
if (methodCtx) {
methodChart = new Chart(methodCtx, {
type: 'doughnut',
data: {
labels: [],
datasets: [{
data: [],
backgroundColor: []
}]
},
options: {
responsive: true,
maintainAspectRatio: false,
animation: false,
plugins: {
legend: { position: 'right', labels: { boxWidth: 12, font: { size: 11 } } }
}
}
});
}
if (statusCtx) {
statusChart = new Chart(statusCtx, {
type: 'bar',
data: {
labels: [],
datasets: [{
data: [],
backgroundColor: []
}]
},
options: {
responsive: true,
maintainAspectRatio: false,
animation: false,
plugins: { legend: { display: false } },
scales: {
y: { beginAtZero: true, ticks: { stepSize: 1 } }
}
}
});
}
if (endpointCtx) {
endpointChart = new Chart(endpointCtx, {
type: 'bar',
data: {
labels: [],
datasets: [{
data: [],
backgroundColor: []
}]
},
options: {
responsive: true,
maintainAspectRatio: false,
indexAxis: 'y',
animation: false,
plugins: { legend: { display: false } },
scales: {
x: { beginAtZero: true, ticks: { stepSize: 1 } }
}
}
});
}
}
function updateOpMetrics() {
if (document.hidden) return;
fetch('/ui/metrics/operations')
.then(function(r) { return r.json(); })
.then(function(data) {
if (!data.enabled || !data.stats) {
if (opStatus) opStatus.textContent = 'Operation metrics not available';
return;
}
var stats = data.stats;
var totals = stats.totals || {};
var totalEl = document.getElementById('opTotalRequests');
var successEl = document.getElementById('opSuccessRate');
var errorEl = document.getElementById('opErrorCount');
var latencyEl = document.getElementById('opAvgLatency');
var bytesInEl = document.getElementById('opBytesIn');
var bytesOutEl = document.getElementById('opBytesOut');
if (totalEl) totalEl.textContent = totals.count || 0;
if (successEl) {
var rate = totals.count > 0 ? ((totals.success_count / totals.count) * 100).toFixed(1) : 0;
successEl.textContent = rate + '%';
}
if (errorEl) errorEl.textContent = totals.error_count || 0;
if (latencyEl) latencyEl.textContent = (totals.latency_avg_ms || 0).toFixed(1) + 'ms';
if (bytesInEl) bytesInEl.textContent = formatBytes(totals.bytes_in || 0);
if (bytesOutEl) bytesOutEl.textContent = formatBytes(totals.bytes_out || 0);
if (methodChart && stats.by_method) {
var methods = Object.keys(stats.by_method);
var methodData = methods.map(function(m) { return stats.by_method[m].count; });
var methodBg = methods.map(function(m) { return methodColors[m] || '#6c757d'; });
methodChart.data.labels = methods;
methodChart.data.datasets[0].data = methodData;
methodChart.data.datasets[0].backgroundColor = methodBg;
methodChart.update('none');
}
if (statusChart && stats.by_status_class) {
var statuses = Object.keys(stats.by_status_class).sort();
var statusData = statuses.map(function(s) { return stats.by_status_class[s]; });
var statusBg = statuses.map(function(s) { return statusColors[s] || '#6c757d'; });
statusChart.data.labels = statuses;
statusChart.data.datasets[0].data = statusData;
statusChart.data.datasets[0].backgroundColor = statusBg;
statusChart.update('none');
}
if (endpointChart && stats.by_endpoint) {
var endpoints = Object.keys(stats.by_endpoint);
var endpointData = endpoints.map(function(e) { return stats.by_endpoint[e].count; });
var endpointBg = endpoints.map(function(e) { return endpointColors[e] || '#6c757d'; });
endpointChart.data.labels = endpoints;
endpointChart.data.datasets[0].data = endpointData;
endpointChart.data.datasets[0].backgroundColor = endpointBg;
endpointChart.update('none');
}
var errorBody = document.getElementById('errorCodesBody');
if (errorBody && stats.error_codes) {
var errorCodes = Object.entries(stats.error_codes);
errorCodes.sort(function(a, b) { return b[1] - a[1]; });
var totalErrors = errorCodes.reduce(function(sum, e) { return sum + e[1]; }, 0);
errorCodes = errorCodes.slice(0, 10);
if (errorCodes.length === 0) {
errorBody.innerHTML = '<div class="text-muted small text-center py-4">' +
'<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="currentColor" class="bi bi-check-circle mb-2 text-success" viewBox="0 0 16 16">' +
'<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>' +
'<path d="M10.97 4.97a.235.235 0 0 0-.02.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-1.071-1.05z"/>' +
'</svg><div>No S3 API errors</div></div>';
} else {
errorBody.innerHTML = errorCodes.map(function(e) {
var pct = totalErrors > 0 ? ((e[1] / totalErrors) * 100).toFixed(0) : 0;
return '<div class="d-flex align-items-center py-1" style="font-size: 0.8rem;">' +
'<div style="flex: 1;"><code class="text-danger">' + e[0] + '</code></div>' +
'<div class="text-end fw-semibold" style="width: 60px;">' + e[1] + '</div>' +
'<div style="width: 100px; padding-left: 10px;"><div class="progress" style="height: 6px;"><div class="progress-bar bg-danger" style="width: ' + pct + '%"></div></div></div>' +
'</div>';
}).join('');
}
}
var windowMins = Math.floor(stats.window_seconds / 60);
var windowSecs = stats.window_seconds % 60;
var windowStr = windowMins > 0 ? windowMins + 'm ' + windowSecs + 's' : windowSecs + 's';
if (opStatus) opStatus.textContent = 'Window: ' + windowStr + ' | ' + new Date().toLocaleTimeString();
})
.catch(function(err) {
console.error('Operation metrics fetch error:', err);
if (opStatus) opStatus.textContent = 'Failed to load';
});
}
function startOpPolling() {
if (opTimer) clearInterval(opTimer);
opTimer = setInterval(updateOpMetrics, 5000);
}
var resetBtn = document.getElementById('resetOpMetricsBtn');
if (resetBtn) {
resetBtn.addEventListener('click', function() {
updateOpMetrics();
});
}
document.addEventListener('visibilitychange', function() {
if (document.hidden) {
if (opTimer) clearInterval(opTimer);
opTimer = null;
} else {
updateOpMetrics();
startOpPolling();
}
});
initOpCharts();
updateOpMetrics();
startOpPolling();
})();
{% endif %}
{% if metrics_history_enabled %}
(function() {
var cpuChart = null;
var memoryChart = null;
var diskChart = null;
var historyStatus = document.getElementById('historyStatus');
var timeRangeSelect = document.getElementById('historyTimeRange');
var historyTimer = null;
var MAX_DATA_POINTS = 500;
function createChart(ctx, label, color) {
return new Chart(ctx, {
type: 'line',
data: {
labels: [],
datasets: [{
label: label,
data: [],
borderColor: color,
backgroundColor: color + '20',
fill: true,
tension: 0.3,
pointRadius: 3,
pointHoverRadius: 6,
hitRadius: 10,
}]
},
options: {
responsive: true,
maintainAspectRatio: true,
animation: false,
plugins: {
legend: { display: false },
tooltip: {
callbacks: {
label: function(ctx) { return ctx.parsed.y.toFixed(2) + '%'; }
}
}
},
scales: {
x: {
display: true,
ticks: { maxTicksAuto: true, maxRotation: 0, font: { size: 10 }, autoSkip: true, maxTicksLimit: 10 }
},
y: {
display: true,
min: 0,
max: 100,
ticks: { callback: function(v) { return v + '%'; } }
}
}
}
});
}
function initCharts() {
var cpuCtx = document.getElementById('cpuHistoryChart');
var memCtx = document.getElementById('memoryHistoryChart');
var diskCtx = document.getElementById('diskHistoryChart');
if (cpuCtx) cpuChart = createChart(cpuCtx, 'CPU %', '#0d6efd');
if (memCtx) memoryChart = createChart(memCtx, 'Memory %', '#0dcaf0');
if (diskCtx) diskChart = createChart(diskCtx, 'Disk %', '#ffc107');
}
function formatTime(ts) {
var d = new Date(ts);
return d.toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' });
}
function loadHistory() {
if (document.hidden) return;
var hours = timeRangeSelect ? timeRangeSelect.value : 24;
fetch('/ui/metrics/history?hours=' + hours)
.then(function(r) { return r.json(); })
.then(function(data) {
if (!data.enabled || !data.history || data.history.length === 0) {
if (historyStatus) historyStatus.textContent = 'No history data available yet. Data is recorded every ' + (data.interval_minutes || 5) + ' minutes.';
return;
}
var history = data.history.slice(-MAX_DATA_POINTS);
var labels = history.map(function(h) { return formatTime(h.timestamp); });
var cpuData = history.map(function(h) { return h.cpu_percent; });
var memData = history.map(function(h) { return h.memory_percent; });
var diskData = history.map(function(h) { return h.disk_percent; });
if (cpuChart) {
cpuChart.data.labels = labels;
cpuChart.data.datasets[0].data = cpuData;
cpuChart.update('none');
}
if (memoryChart) {
memoryChart.data.labels = labels;
memoryChart.data.datasets[0].data = memData;
memoryChart.update('none');
}
if (diskChart) {
diskChart.data.labels = labels;
diskChart.data.datasets[0].data = diskData;
diskChart.update('none');
}
if (historyStatus) historyStatus.textContent = 'Showing ' + history.length + ' data points';
})
.catch(function(err) {
console.error('History fetch error:', err);
if (historyStatus) historyStatus.textContent = 'Failed to load history data';
});
}
function startHistoryPolling() {
if (historyTimer) clearInterval(historyTimer);
historyTimer = setInterval(loadHistory, 60000);
}
if (timeRangeSelect) {
timeRangeSelect.addEventListener('change', loadHistory);
}
document.addEventListener('visibilitychange', function() {
if (document.hidden) {
if (historyTimer) clearInterval(historyTimer);
historyTimer = null;
} else {
loadHistory();
startHistoryPolling();
}
});
initCharts();
loadHistory();
startHistoryPolling();
})();
{% endif %}
</script> </script>
{% endblock %} {% endblock %}

View File

@@ -35,6 +35,7 @@ def app(tmp_path: Path):
flask_app = create_api_app( flask_app = create_api_app(
{ {
"TESTING": True, "TESTING": True,
"SECRET_KEY": "testing",
"STORAGE_ROOT": storage_root, "STORAGE_ROOT": storage_root,
"IAM_CONFIG": iam_config, "IAM_CONFIG": iam_config,
"BUCKET_POLICY_PATH": bucket_policies, "BUCKET_POLICY_PATH": bucket_policies,

View File

@@ -1,6 +1,3 @@
from urllib.parse import urlsplit
def test_bucket_and_object_lifecycle(client, signer): def test_bucket_and_object_lifecycle(client, signer):
headers = signer("PUT", "/photos") headers = signer("PUT", "/photos")
response = client.put("/photos", headers=headers) response = client.put("/photos", headers=headers)
@@ -104,12 +101,12 @@ def test_request_id_header_present(client, signer):
assert response.headers.get("X-Request-ID") assert response.headers.get("X-Request-ID")
def test_healthcheck_returns_version(client): def test_healthcheck_returns_status(client):
response = client.get("/healthz") response = client.get("/myfsio/health")
data = response.get_json() data = response.get_json()
assert response.status_code == 200 assert response.status_code == 200
assert data["status"] == "ok" assert data["status"] == "ok"
assert "version" in data assert "version" not in data
def test_missing_credentials_denied(client): def test_missing_credentials_denied(client):
@@ -117,36 +114,20 @@ def test_missing_credentials_denied(client):
assert response.status_code == 403 assert response.status_code == 403
def test_presign_and_bucket_policies(client, signer): def test_bucket_policies_deny_reads(client, signer):
# Create bucket and object import json
headers = signer("PUT", "/docs") headers = signer("PUT", "/docs")
assert client.put("/docs", headers=headers).status_code == 200 assert client.put("/docs", headers=headers).status_code == 200
headers = signer("PUT", "/docs/readme.txt", body=b"content") headers = signer("PUT", "/docs/readme.txt", body=b"content")
assert client.put("/docs/readme.txt", headers=headers, data=b"content").status_code == 200 assert client.put("/docs/readme.txt", headers=headers, data=b"content").status_code == 200
# Generate presigned GET URL and follow it headers = signer("GET", "/docs/readme.txt")
json_body = {"method": "GET", "expires_in": 120} response = client.get("/docs/readme.txt", headers=headers)
# Flask test client json parameter automatically sets Content-Type and serializes body
# But for signing we need the body bytes.
import json
body_bytes = json.dumps(json_body).encode("utf-8")
headers = signer("POST", "/presign/docs/readme.txt", headers={"Content-Type": "application/json"}, body=body_bytes)
response = client.post(
"/presign/docs/readme.txt",
headers=headers,
json=json_body,
)
assert response.status_code == 200 assert response.status_code == 200
presigned_url = response.get_json()["url"] assert response.data == b"content"
parts = urlsplit(presigned_url)
presigned_path = f"{parts.path}?{parts.query}"
download = client.get(presigned_path)
assert download.status_code == 200
assert download.data == b"content"
# Attach a deny policy for GETs
policy = { policy = {
"Version": "2012-10-17", "Version": "2012-10-17",
"Statement": [ "Statement": [
@@ -160,29 +141,26 @@ def test_presign_and_bucket_policies(client, signer):
], ],
} }
policy_bytes = json.dumps(policy).encode("utf-8") policy_bytes = json.dumps(policy).encode("utf-8")
headers = signer("PUT", "/bucket-policy/docs", headers={"Content-Type": "application/json"}, body=policy_bytes) headers = signer("PUT", "/docs?policy", headers={"Content-Type": "application/json"}, body=policy_bytes)
assert client.put("/bucket-policy/docs", headers=headers, json=policy).status_code == 204 assert client.put("/docs?policy", headers=headers, json=policy).status_code == 204
headers = signer("GET", "/bucket-policy/docs") headers = signer("GET", "/docs?policy")
fetched = client.get("/bucket-policy/docs", headers=headers) fetched = client.get("/docs?policy", headers=headers)
assert fetched.status_code == 200 assert fetched.status_code == 200
assert fetched.get_json()["Version"] == "2012-10-17" assert fetched.get_json()["Version"] == "2012-10-17"
# Reads are now denied by bucket policy
headers = signer("GET", "/docs/readme.txt") headers = signer("GET", "/docs/readme.txt")
denied = client.get("/docs/readme.txt", headers=headers) denied = client.get("/docs/readme.txt", headers=headers)
assert denied.status_code == 403 assert denied.status_code == 403
# Presign attempts are also denied headers = signer("DELETE", "/docs?policy")
json_body = {"method": "GET", "expires_in": 60} assert client.delete("/docs?policy", headers=headers).status_code == 204
body_bytes = json.dumps(json_body).encode("utf-8")
headers = signer("POST", "/presign/docs/readme.txt", headers={"Content-Type": "application/json"}, body=body_bytes) headers = signer("DELETE", "/docs/readme.txt")
response = client.post( assert client.delete("/docs/readme.txt", headers=headers).status_code == 204
"/presign/docs/readme.txt",
headers=headers, headers = signer("DELETE", "/docs")
json=json_body, assert client.delete("/docs", headers=headers).status_code == 204
)
assert response.status_code == 403
def test_trailing_slash_returns_xml(client): def test_trailing_slash_returns_xml(client):
@@ -193,6 +171,8 @@ def test_trailing_slash_returns_xml(client):
def test_public_policy_allows_anonymous_list_and_read(client, signer): def test_public_policy_allows_anonymous_list_and_read(client, signer):
import json
headers = signer("PUT", "/public") headers = signer("PUT", "/public")
assert client.put("/public", headers=headers).status_code == 200 assert client.put("/public", headers=headers).status_code == 200
@@ -221,10 +201,9 @@ def test_public_policy_allows_anonymous_list_and_read(client, signer):
}, },
], ],
} }
import json
policy_bytes = json.dumps(policy).encode("utf-8") policy_bytes = json.dumps(policy).encode("utf-8")
headers = signer("PUT", "/bucket-policy/public", headers={"Content-Type": "application/json"}, body=policy_bytes) headers = signer("PUT", "/public?policy", headers={"Content-Type": "application/json"}, body=policy_bytes)
assert client.put("/bucket-policy/public", headers=headers, json=policy).status_code == 204 assert client.put("/public?policy", headers=headers, json=policy).status_code == 204
list_response = client.get("/public") list_response = client.get("/public")
assert list_response.status_code == 200 assert list_response.status_code == 200
@@ -237,14 +216,16 @@ def test_public_policy_allows_anonymous_list_and_read(client, signer):
headers = signer("DELETE", "/public/hello.txt") headers = signer("DELETE", "/public/hello.txt")
assert client.delete("/public/hello.txt", headers=headers).status_code == 204 assert client.delete("/public/hello.txt", headers=headers).status_code == 204
headers = signer("DELETE", "/bucket-policy/public") headers = signer("DELETE", "/public?policy")
assert client.delete("/bucket-policy/public", headers=headers).status_code == 204 assert client.delete("/public?policy", headers=headers).status_code == 204
headers = signer("DELETE", "/public") headers = signer("DELETE", "/public")
assert client.delete("/public", headers=headers).status_code == 204 assert client.delete("/public", headers=headers).status_code == 204
def test_principal_dict_with_object_get_only(client, signer): def test_principal_dict_with_object_get_only(client, signer):
import json
headers = signer("PUT", "/mixed") headers = signer("PUT", "/mixed")
assert client.put("/mixed", headers=headers).status_code == 200 assert client.put("/mixed", headers=headers).status_code == 200
@@ -270,10 +251,9 @@ def test_principal_dict_with_object_get_only(client, signer):
}, },
], ],
} }
import json
policy_bytes = json.dumps(policy).encode("utf-8") policy_bytes = json.dumps(policy).encode("utf-8")
headers = signer("PUT", "/bucket-policy/mixed", headers={"Content-Type": "application/json"}, body=policy_bytes) headers = signer("PUT", "/mixed?policy", headers={"Content-Type": "application/json"}, body=policy_bytes)
assert client.put("/bucket-policy/mixed", headers=headers, json=policy).status_code == 204 assert client.put("/mixed?policy", headers=headers, json=policy).status_code == 204
assert client.get("/mixed").status_code == 403 assert client.get("/mixed").status_code == 403
allowed = client.get("/mixed/only.txt") allowed = client.get("/mixed/only.txt")
@@ -283,14 +263,16 @@ def test_principal_dict_with_object_get_only(client, signer):
headers = signer("DELETE", "/mixed/only.txt") headers = signer("DELETE", "/mixed/only.txt")
assert client.delete("/mixed/only.txt", headers=headers).status_code == 204 assert client.delete("/mixed/only.txt", headers=headers).status_code == 204
headers = signer("DELETE", "/bucket-policy/mixed") headers = signer("DELETE", "/mixed?policy")
assert client.delete("/bucket-policy/mixed", headers=headers).status_code == 204 assert client.delete("/mixed?policy", headers=headers).status_code == 204
headers = signer("DELETE", "/mixed") headers = signer("DELETE", "/mixed")
assert client.delete("/mixed", headers=headers).status_code == 204 assert client.delete("/mixed", headers=headers).status_code == 204
def test_bucket_policy_wildcard_resource_allows_object_get(client, signer): def test_bucket_policy_wildcard_resource_allows_object_get(client, signer):
import json
headers = signer("PUT", "/test") headers = signer("PUT", "/test")
assert client.put("/test", headers=headers).status_code == 200 assert client.put("/test", headers=headers).status_code == 200
@@ -314,10 +296,9 @@ def test_bucket_policy_wildcard_resource_allows_object_get(client, signer):
}, },
], ],
} }
import json
policy_bytes = json.dumps(policy).encode("utf-8") policy_bytes = json.dumps(policy).encode("utf-8")
headers = signer("PUT", "/bucket-policy/test", headers={"Content-Type": "application/json"}, body=policy_bytes) headers = signer("PUT", "/test?policy", headers={"Content-Type": "application/json"}, body=policy_bytes)
assert client.put("/bucket-policy/test", headers=headers, json=policy).status_code == 204 assert client.put("/test?policy", headers=headers, json=policy).status_code == 204
listing = client.get("/test") listing = client.get("/test")
assert listing.status_code == 403 assert listing.status_code == 403
@@ -328,8 +309,8 @@ def test_bucket_policy_wildcard_resource_allows_object_get(client, signer):
headers = signer("DELETE", "/test/vid.mp4") headers = signer("DELETE", "/test/vid.mp4")
assert client.delete("/test/vid.mp4", headers=headers).status_code == 204 assert client.delete("/test/vid.mp4", headers=headers).status_code == 204
headers = signer("DELETE", "/bucket-policy/test") headers = signer("DELETE", "/test?policy")
assert client.delete("/bucket-policy/test", headers=headers).status_code == 204 assert client.delete("/test?policy", headers=headers).status_code == 204
headers = signer("DELETE", "/test") headers = signer("DELETE", "/test")
assert client.delete("/test", headers=headers).status_code == 204 assert client.delete("/test", headers=headers).status_code == 204

View File

@@ -15,6 +15,7 @@ def kms_client(tmp_path):
app = create_app({ app = create_app({
"TESTING": True, "TESTING": True,
"SECRET_KEY": "testing",
"STORAGE_ROOT": str(tmp_path / "storage"), "STORAGE_ROOT": str(tmp_path / "storage"),
"IAM_CONFIG": str(tmp_path / "iam.json"), "IAM_CONFIG": str(tmp_path / "iam.json"),
"BUCKET_POLICY_PATH": str(tmp_path / "policies.json"), "BUCKET_POLICY_PATH": str(tmp_path / "policies.json"),

View File

@@ -0,0 +1,297 @@
import threading
import time
from pathlib import Path
import pytest
from app.operation_metrics import (
OperationMetricsCollector,
OperationStats,
classify_endpoint,
)
class TestOperationStats:
def test_initial_state(self):
stats = OperationStats()
assert stats.count == 0
assert stats.success_count == 0
assert stats.error_count == 0
assert stats.latency_sum_ms == 0.0
assert stats.bytes_in == 0
assert stats.bytes_out == 0
def test_record_success(self):
stats = OperationStats()
stats.record(latency_ms=50.0, success=True, bytes_in=100, bytes_out=200)
assert stats.count == 1
assert stats.success_count == 1
assert stats.error_count == 0
assert stats.latency_sum_ms == 50.0
assert stats.latency_min_ms == 50.0
assert stats.latency_max_ms == 50.0
assert stats.bytes_in == 100
assert stats.bytes_out == 200
def test_record_error(self):
stats = OperationStats()
stats.record(latency_ms=100.0, success=False, bytes_in=50, bytes_out=0)
assert stats.count == 1
assert stats.success_count == 0
assert stats.error_count == 1
def test_latency_min_max(self):
stats = OperationStats()
stats.record(latency_ms=50.0, success=True)
stats.record(latency_ms=10.0, success=True)
stats.record(latency_ms=100.0, success=True)
assert stats.latency_min_ms == 10.0
assert stats.latency_max_ms == 100.0
assert stats.latency_sum_ms == 160.0
def test_to_dict(self):
stats = OperationStats()
stats.record(latency_ms=50.0, success=True, bytes_in=100, bytes_out=200)
stats.record(latency_ms=100.0, success=False, bytes_in=50, bytes_out=0)
result = stats.to_dict()
assert result["count"] == 2
assert result["success_count"] == 1
assert result["error_count"] == 1
assert result["latency_avg_ms"] == 75.0
assert result["latency_min_ms"] == 50.0
assert result["latency_max_ms"] == 100.0
assert result["bytes_in"] == 150
assert result["bytes_out"] == 200
def test_to_dict_empty(self):
stats = OperationStats()
result = stats.to_dict()
assert result["count"] == 0
assert result["latency_avg_ms"] == 0.0
assert result["latency_min_ms"] == 0.0
def test_merge(self):
stats1 = OperationStats()
stats1.record(latency_ms=50.0, success=True, bytes_in=100, bytes_out=200)
stats2 = OperationStats()
stats2.record(latency_ms=10.0, success=True, bytes_in=50, bytes_out=100)
stats2.record(latency_ms=100.0, success=False, bytes_in=25, bytes_out=50)
stats1.merge(stats2)
assert stats1.count == 3
assert stats1.success_count == 2
assert stats1.error_count == 1
assert stats1.latency_min_ms == 10.0
assert stats1.latency_max_ms == 100.0
assert stats1.bytes_in == 175
assert stats1.bytes_out == 350
class TestClassifyEndpoint:
def test_root_path(self):
assert classify_endpoint("/") == "service"
assert classify_endpoint("") == "service"
def test_ui_paths(self):
assert classify_endpoint("/ui") == "ui"
assert classify_endpoint("/ui/buckets") == "ui"
assert classify_endpoint("/ui/metrics") == "ui"
def test_kms_paths(self):
assert classify_endpoint("/kms") == "kms"
assert classify_endpoint("/kms/keys") == "kms"
def test_service_paths(self):
assert classify_endpoint("/myfsio/health") == "service"
def test_bucket_paths(self):
assert classify_endpoint("/mybucket") == "bucket"
assert classify_endpoint("/mybucket/") == "bucket"
def test_object_paths(self):
assert classify_endpoint("/mybucket/mykey") == "object"
assert classify_endpoint("/mybucket/folder/nested/key.txt") == "object"
class TestOperationMetricsCollector:
def test_record_and_get_stats(self, tmp_path: Path):
collector = OperationMetricsCollector(
storage_root=tmp_path,
interval_minutes=60,
retention_hours=24,
)
try:
collector.record_request(
method="GET",
endpoint_type="bucket",
status_code=200,
latency_ms=50.0,
bytes_in=0,
bytes_out=1000,
)
collector.record_request(
method="PUT",
endpoint_type="object",
status_code=201,
latency_ms=100.0,
bytes_in=500,
bytes_out=0,
)
collector.record_request(
method="GET",
endpoint_type="object",
status_code=404,
latency_ms=25.0,
bytes_in=0,
bytes_out=0,
error_code="NoSuchKey",
)
stats = collector.get_current_stats()
assert stats["totals"]["count"] == 3
assert stats["totals"]["success_count"] == 2
assert stats["totals"]["error_count"] == 1
assert "GET" in stats["by_method"]
assert stats["by_method"]["GET"]["count"] == 2
assert "PUT" in stats["by_method"]
assert stats["by_method"]["PUT"]["count"] == 1
assert "bucket" in stats["by_endpoint"]
assert "object" in stats["by_endpoint"]
assert stats["by_endpoint"]["object"]["count"] == 2
assert stats["by_status_class"]["2xx"] == 2
assert stats["by_status_class"]["4xx"] == 1
assert stats["error_codes"]["NoSuchKey"] == 1
finally:
collector.shutdown()
def test_thread_safety(self, tmp_path: Path):
collector = OperationMetricsCollector(
storage_root=tmp_path,
interval_minutes=60,
retention_hours=24,
)
try:
num_threads = 5
requests_per_thread = 100
threads = []
def record_requests():
for _ in range(requests_per_thread):
collector.record_request(
method="GET",
endpoint_type="object",
status_code=200,
latency_ms=10.0,
)
for _ in range(num_threads):
t = threading.Thread(target=record_requests)
threads.append(t)
t.start()
for t in threads:
t.join()
stats = collector.get_current_stats()
assert stats["totals"]["count"] == num_threads * requests_per_thread
finally:
collector.shutdown()
def test_status_class_categorization(self, tmp_path: Path):
collector = OperationMetricsCollector(
storage_root=tmp_path,
interval_minutes=60,
retention_hours=24,
)
try:
collector.record_request("GET", "object", 200, 10.0)
collector.record_request("GET", "object", 204, 10.0)
collector.record_request("GET", "object", 301, 10.0)
collector.record_request("GET", "object", 304, 10.0)
collector.record_request("GET", "object", 400, 10.0)
collector.record_request("GET", "object", 403, 10.0)
collector.record_request("GET", "object", 404, 10.0)
collector.record_request("GET", "object", 500, 10.0)
collector.record_request("GET", "object", 503, 10.0)
stats = collector.get_current_stats()
assert stats["by_status_class"]["2xx"] == 2
assert stats["by_status_class"]["3xx"] == 2
assert stats["by_status_class"]["4xx"] == 3
assert stats["by_status_class"]["5xx"] == 2
finally:
collector.shutdown()
def test_error_code_tracking(self, tmp_path: Path):
collector = OperationMetricsCollector(
storage_root=tmp_path,
interval_minutes=60,
retention_hours=24,
)
try:
collector.record_request("GET", "object", 404, 10.0, error_code="NoSuchKey")
collector.record_request("GET", "object", 404, 10.0, error_code="NoSuchKey")
collector.record_request("GET", "bucket", 403, 10.0, error_code="AccessDenied")
collector.record_request("PUT", "object", 500, 10.0, error_code="InternalError")
stats = collector.get_current_stats()
assert stats["error_codes"]["NoSuchKey"] == 2
assert stats["error_codes"]["AccessDenied"] == 1
assert stats["error_codes"]["InternalError"] == 1
finally:
collector.shutdown()
def test_history_persistence(self, tmp_path: Path):
collector = OperationMetricsCollector(
storage_root=tmp_path,
interval_minutes=60,
retention_hours=24,
)
try:
collector.record_request("GET", "object", 200, 10.0)
collector._take_snapshot()
history = collector.get_history()
assert len(history) == 1
assert history[0]["totals"]["count"] == 1
config_path = tmp_path / ".myfsio.sys" / "config" / "operation_metrics.json"
assert config_path.exists()
finally:
collector.shutdown()
def test_get_history_with_hours_filter(self, tmp_path: Path):
collector = OperationMetricsCollector(
storage_root=tmp_path,
interval_minutes=60,
retention_hours=24,
)
try:
collector.record_request("GET", "object", 200, 10.0)
collector._take_snapshot()
history_all = collector.get_history()
history_recent = collector.get_history(hours=1)
assert len(history_all) >= len(history_recent)
finally:
collector.shutdown()

View File

@@ -28,6 +28,7 @@ def _make_app(tmp_path: Path):
flask_app = create_app( flask_app = create_app(
{ {
"TESTING": True, "TESTING": True,
"SECRET_KEY": "testing",
"WTF_CSRF_ENABLED": False, "WTF_CSRF_ENABLED": False,
"STORAGE_ROOT": storage_root, "STORAGE_ROOT": storage_root,
"IAM_CONFIG": iam_config, "IAM_CONFIG": iam_config,