Compare commits
43 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 52660570c1 | |||
| 67f057ca1c | |||
| 35f61313e0 | |||
| 01e79e6993 | |||
| 1e3c4b545f | |||
| c470cfb576 | |||
| 4ecd32a554 | |||
| aa6d7c4d28 | |||
| 6e6d6d32bf | |||
| 54705ab9c4 | |||
| d96955deee | |||
| 77a46d0725 | |||
| 0f750b9d89 | |||
| e0dee9db36 | |||
| 126657c99f | |||
| 07fb1ac773 | |||
| 147962e1dd | |||
| 2643a79121 | |||
| e9a035827b | |||
| 033b8a82be | |||
| e76c311231 | |||
| cbdf1a27c8 | |||
| 4a60cb269a | |||
| ebe7f6222d | |||
| 70b61fd8e6 | |||
| 85181f0be6 | |||
| a779b002d7 | |||
| d5ca7a8be1 | |||
| 45d21cce21 | |||
| 9629507acd | |||
| 5d6cb4efa1 | |||
| 56ad83bbaf | |||
| 847933b7c0 | |||
| be55d08c0a | |||
| 8c4bf67974 | |||
| 9385d1fe1c | |||
| 0ea54457e8 | |||
| ae26d22388 | |||
| 6b715851b9 | |||
| 62c36f7a6c | |||
| b32f1f94f7 | |||
| 6e3d280a75 | |||
| 704f79dc44 |
@@ -1,12 +1,10 @@
|
|||||||
# syntax=docker/dockerfile:1.7
|
FROM python:3.14.3-slim
|
||||||
FROM python:3.12.12-slim
|
|
||||||
|
|
||||||
ENV PYTHONDONTWRITEBYTECODE=1 \
|
ENV PYTHONDONTWRITEBYTECODE=1 \
|
||||||
PYTHONUNBUFFERED=1
|
PYTHONUNBUFFERED=1
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
# Install build deps for any wheels that need compilation, then clean up
|
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
&& apt-get install -y --no-install-recommends build-essential \
|
&& apt-get install -y --no-install-recommends build-essential \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
@@ -16,10 +14,8 @@ RUN pip install --no-cache-dir -r requirements.txt
|
|||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
# Make entrypoint executable
|
|
||||||
RUN chmod +x docker-entrypoint.sh
|
RUN chmod +x docker-entrypoint.sh
|
||||||
|
|
||||||
# Create data directory and set permissions
|
|
||||||
RUN mkdir -p /app/data \
|
RUN mkdir -p /app/data \
|
||||||
&& useradd -m -u 1000 myfsio \
|
&& useradd -m -u 1000 myfsio \
|
||||||
&& chown -R myfsio:myfsio /app
|
&& chown -R myfsio:myfsio /app
|
||||||
|
|||||||
@@ -102,6 +102,11 @@ python run.py --mode ui # UI only (port 5100)
|
|||||||
| `ENCRYPTION_ENABLED` | `false` | Enable server-side encryption |
|
| `ENCRYPTION_ENABLED` | `false` | Enable server-side encryption |
|
||||||
| `KMS_ENABLED` | `false` | Enable Key Management Service |
|
| `KMS_ENABLED` | `false` | Enable Key Management Service |
|
||||||
| `LOG_LEVEL` | `INFO` | Logging verbosity |
|
| `LOG_LEVEL` | `INFO` | Logging verbosity |
|
||||||
|
| `SIGV4_TIMESTAMP_TOLERANCE_SECONDS` | `900` | Max time skew for SigV4 requests |
|
||||||
|
| `PRESIGNED_URL_MAX_EXPIRY_SECONDS` | `604800` | Max presigned URL expiry (7 days) |
|
||||||
|
| `REPLICATION_CONNECT_TIMEOUT_SECONDS` | `5` | Replication connection timeout |
|
||||||
|
| `SITE_SYNC_ENABLED` | `false` | Enable bi-directional site sync |
|
||||||
|
| `OBJECT_TAG_LIMIT` | `50` | Maximum tags per object |
|
||||||
|
|
||||||
## Data Layout
|
## Data Layout
|
||||||
|
|
||||||
|
|||||||
216
app/__init__.py
216
app/__init__.py
@@ -1,6 +1,8 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import html as html_module
|
||||||
import logging
|
import logging
|
||||||
|
import mimetypes
|
||||||
import shutil
|
import shutil
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
@@ -10,7 +12,7 @@ from pathlib import Path
|
|||||||
from datetime import timedelta
|
from datetime import timedelta
|
||||||
from typing import Any, Dict, List, Optional
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
from flask import Flask, g, has_request_context, redirect, render_template, request, url_for
|
from flask import Flask, Response, g, has_request_context, redirect, render_template, request, url_for
|
||||||
from flask_cors import CORS
|
from flask_cors import CORS
|
||||||
from flask_wtf.csrf import CSRFError
|
from flask_wtf.csrf import CSRFError
|
||||||
from werkzeug.middleware.proxy_fix import ProxyFix
|
from werkzeug.middleware.proxy_fix import ProxyFix
|
||||||
@@ -31,8 +33,10 @@ from .notifications import NotificationService
|
|||||||
from .object_lock import ObjectLockService
|
from .object_lock import ObjectLockService
|
||||||
from .replication import ReplicationManager
|
from .replication import ReplicationManager
|
||||||
from .secret_store import EphemeralSecretStore
|
from .secret_store import EphemeralSecretStore
|
||||||
from .storage import ObjectStorage
|
from .site_registry import SiteRegistry, SiteInfo
|
||||||
|
from .storage import ObjectStorage, StorageError
|
||||||
from .version import get_version
|
from .version import get_version
|
||||||
|
from .website_domains import WebsiteDomainStore
|
||||||
|
|
||||||
|
|
||||||
def _migrate_config_file(active_path: Path, legacy_paths: List[Path]) -> Path:
|
def _migrate_config_file(active_path: Path, legacy_paths: List[Path]) -> Path:
|
||||||
@@ -104,6 +108,9 @@ def create_app(
|
|||||||
storage = ObjectStorage(
|
storage = ObjectStorage(
|
||||||
Path(app.config["STORAGE_ROOT"]),
|
Path(app.config["STORAGE_ROOT"]),
|
||||||
cache_ttl=app.config.get("OBJECT_CACHE_TTL", 5),
|
cache_ttl=app.config.get("OBJECT_CACHE_TTL", 5),
|
||||||
|
object_cache_max_size=app.config.get("OBJECT_CACHE_MAX_SIZE", 100),
|
||||||
|
bucket_config_cache_ttl=app.config.get("BUCKET_CONFIG_CACHE_TTL_SECONDS", 30.0),
|
||||||
|
object_key_max_length_bytes=app.config.get("OBJECT_KEY_MAX_LENGTH_BYTES", 1024),
|
||||||
)
|
)
|
||||||
|
|
||||||
if app.config.get("WARM_CACHE_ON_STARTUP", True) and not app.config.get("TESTING"):
|
if app.config.get("WARM_CACHE_ON_STARTUP", True) and not app.config.get("TESTING"):
|
||||||
@@ -137,12 +144,33 @@ def create_app(
|
|||||||
)
|
)
|
||||||
|
|
||||||
connections = ConnectionStore(connections_path)
|
connections = ConnectionStore(connections_path)
|
||||||
replication = ReplicationManager(storage, connections, replication_rules_path, storage_root)
|
replication = ReplicationManager(
|
||||||
|
storage,
|
||||||
|
connections,
|
||||||
|
replication_rules_path,
|
||||||
|
storage_root,
|
||||||
|
connect_timeout=app.config.get("REPLICATION_CONNECT_TIMEOUT_SECONDS", 5),
|
||||||
|
read_timeout=app.config.get("REPLICATION_READ_TIMEOUT_SECONDS", 30),
|
||||||
|
max_retries=app.config.get("REPLICATION_MAX_RETRIES", 2),
|
||||||
|
streaming_threshold_bytes=app.config.get("REPLICATION_STREAMING_THRESHOLD_BYTES", 10 * 1024 * 1024),
|
||||||
|
max_failures_per_bucket=app.config.get("REPLICATION_MAX_FAILURES_PER_BUCKET", 50),
|
||||||
|
)
|
||||||
|
|
||||||
|
site_registry_path = config_dir / "site_registry.json"
|
||||||
|
site_registry = SiteRegistry(site_registry_path)
|
||||||
|
if app.config.get("SITE_ID") and not site_registry.get_local_site():
|
||||||
|
site_registry.set_local_site(SiteInfo(
|
||||||
|
site_id=app.config["SITE_ID"],
|
||||||
|
endpoint=app.config.get("SITE_ENDPOINT") or "",
|
||||||
|
region=app.config.get("SITE_REGION", "us-east-1"),
|
||||||
|
priority=app.config.get("SITE_PRIORITY", 100),
|
||||||
|
))
|
||||||
|
|
||||||
encryption_config = {
|
encryption_config = {
|
||||||
"encryption_enabled": app.config.get("ENCRYPTION_ENABLED", False),
|
"encryption_enabled": app.config.get("ENCRYPTION_ENABLED", False),
|
||||||
"encryption_master_key_path": app.config.get("ENCRYPTION_MASTER_KEY_PATH"),
|
"encryption_master_key_path": app.config.get("ENCRYPTION_MASTER_KEY_PATH"),
|
||||||
"default_encryption_algorithm": app.config.get("DEFAULT_ENCRYPTION_ALGORITHM", "AES256"),
|
"default_encryption_algorithm": app.config.get("DEFAULT_ENCRYPTION_ALGORITHM", "AES256"),
|
||||||
|
"encryption_chunk_size_bytes": app.config.get("ENCRYPTION_CHUNK_SIZE_BYTES", 64 * 1024),
|
||||||
}
|
}
|
||||||
encryption_manager = EncryptionManager(encryption_config)
|
encryption_manager = EncryptionManager(encryption_config)
|
||||||
|
|
||||||
@@ -150,7 +178,12 @@ def create_app(
|
|||||||
if app.config.get("KMS_ENABLED", False):
|
if app.config.get("KMS_ENABLED", False):
|
||||||
kms_keys_path = Path(app.config.get("KMS_KEYS_PATH", ""))
|
kms_keys_path = Path(app.config.get("KMS_KEYS_PATH", ""))
|
||||||
kms_master_key_path = Path(app.config.get("ENCRYPTION_MASTER_KEY_PATH", ""))
|
kms_master_key_path = Path(app.config.get("ENCRYPTION_MASTER_KEY_PATH", ""))
|
||||||
kms_manager = KMSManager(kms_keys_path, kms_master_key_path)
|
kms_manager = KMSManager(
|
||||||
|
kms_keys_path,
|
||||||
|
kms_master_key_path,
|
||||||
|
generate_data_key_min_bytes=app.config.get("KMS_GENERATE_DATA_KEY_MIN_BYTES", 1),
|
||||||
|
generate_data_key_max_bytes=app.config.get("KMS_GENERATE_DATA_KEY_MAX_BYTES", 1024),
|
||||||
|
)
|
||||||
encryption_manager.set_kms_provider(kms_manager)
|
encryption_manager.set_kms_provider(kms_manager)
|
||||||
|
|
||||||
if app.config.get("ENCRYPTION_ENABLED", False):
|
if app.config.get("ENCRYPTION_ENABLED", False):
|
||||||
@@ -159,7 +192,10 @@ def create_app(
|
|||||||
|
|
||||||
acl_service = AclService(storage_root)
|
acl_service = AclService(storage_root)
|
||||||
object_lock_service = ObjectLockService(storage_root)
|
object_lock_service = ObjectLockService(storage_root)
|
||||||
notification_service = NotificationService(storage_root)
|
notification_service = NotificationService(
|
||||||
|
storage_root,
|
||||||
|
allow_internal_endpoints=app.config.get("ALLOW_INTERNAL_ENDPOINTS", False),
|
||||||
|
)
|
||||||
access_logging_service = AccessLoggingService(storage_root)
|
access_logging_service = AccessLoggingService(storage_root)
|
||||||
access_logging_service.set_storage(storage)
|
access_logging_service.set_storage(storage)
|
||||||
|
|
||||||
@@ -170,6 +206,7 @@ def create_app(
|
|||||||
base_storage,
|
base_storage,
|
||||||
interval_seconds=app.config.get("LIFECYCLE_INTERVAL_SECONDS", 3600),
|
interval_seconds=app.config.get("LIFECYCLE_INTERVAL_SECONDS", 3600),
|
||||||
storage_root=storage_root,
|
storage_root=storage_root,
|
||||||
|
max_history_per_bucket=app.config.get("LIFECYCLE_MAX_HISTORY_PER_BUCKET", 50),
|
||||||
)
|
)
|
||||||
lifecycle_manager.start()
|
lifecycle_manager.start()
|
||||||
|
|
||||||
@@ -187,6 +224,20 @@ def create_app(
|
|||||||
app.extensions["object_lock"] = object_lock_service
|
app.extensions["object_lock"] = object_lock_service
|
||||||
app.extensions["notifications"] = notification_service
|
app.extensions["notifications"] = notification_service
|
||||||
app.extensions["access_logging"] = access_logging_service
|
app.extensions["access_logging"] = access_logging_service
|
||||||
|
app.extensions["site_registry"] = site_registry
|
||||||
|
|
||||||
|
website_domains_store = None
|
||||||
|
if app.config.get("WEBSITE_HOSTING_ENABLED", False):
|
||||||
|
website_domains_path = config_dir / "website_domains.json"
|
||||||
|
website_domains_store = WebsiteDomainStore(website_domains_path)
|
||||||
|
app.extensions["website_domains"] = website_domains_store
|
||||||
|
|
||||||
|
from .s3_client import S3ProxyClient
|
||||||
|
api_base = app.config.get("API_BASE_URL") or "http://127.0.0.1:5000"
|
||||||
|
app.extensions["s3_proxy"] = S3ProxyClient(
|
||||||
|
api_base_url=api_base,
|
||||||
|
region=app.config.get("AWS_REGION", "us-east-1"),
|
||||||
|
)
|
||||||
|
|
||||||
operation_metrics_collector = None
|
operation_metrics_collector = None
|
||||||
if app.config.get("OPERATION_METRICS_ENABLED", False):
|
if app.config.get("OPERATION_METRICS_ENABLED", False):
|
||||||
@@ -218,17 +269,47 @@ def create_app(
|
|||||||
storage_root=storage_root,
|
storage_root=storage_root,
|
||||||
interval_seconds=app.config.get("SITE_SYNC_INTERVAL_SECONDS", 60),
|
interval_seconds=app.config.get("SITE_SYNC_INTERVAL_SECONDS", 60),
|
||||||
batch_size=app.config.get("SITE_SYNC_BATCH_SIZE", 100),
|
batch_size=app.config.get("SITE_SYNC_BATCH_SIZE", 100),
|
||||||
|
connect_timeout=app.config.get("SITE_SYNC_CONNECT_TIMEOUT_SECONDS", 10),
|
||||||
|
read_timeout=app.config.get("SITE_SYNC_READ_TIMEOUT_SECONDS", 120),
|
||||||
|
max_retries=app.config.get("SITE_SYNC_MAX_RETRIES", 2),
|
||||||
|
clock_skew_tolerance_seconds=app.config.get("SITE_SYNC_CLOCK_SKEW_TOLERANCE_SECONDS", 1.0),
|
||||||
)
|
)
|
||||||
site_sync_worker.start()
|
site_sync_worker.start()
|
||||||
app.extensions["site_sync"] = site_sync_worker
|
app.extensions["site_sync"] = site_sync_worker
|
||||||
|
|
||||||
@app.errorhandler(500)
|
@app.errorhandler(500)
|
||||||
def internal_error(error):
|
def internal_error(error):
|
||||||
|
wants_html = request.accept_mimetypes.accept_html
|
||||||
|
path = request.path or ""
|
||||||
|
if include_ui and wants_html and (path.startswith("/ui") or path == "/"):
|
||||||
return render_template('500.html'), 500
|
return render_template('500.html'), 500
|
||||||
|
error_xml = (
|
||||||
|
'<?xml version="1.0" encoding="UTF-8"?>'
|
||||||
|
'<Error>'
|
||||||
|
'<Code>InternalError</Code>'
|
||||||
|
'<Message>An internal server error occurred</Message>'
|
||||||
|
f'<Resource>{path}</Resource>'
|
||||||
|
f'<RequestId>{getattr(g, "request_id", "-")}</RequestId>'
|
||||||
|
'</Error>'
|
||||||
|
)
|
||||||
|
return error_xml, 500, {'Content-Type': 'application/xml'}
|
||||||
|
|
||||||
@app.errorhandler(CSRFError)
|
@app.errorhandler(CSRFError)
|
||||||
def handle_csrf_error(e):
|
def handle_csrf_error(e):
|
||||||
|
wants_html = request.accept_mimetypes.accept_html
|
||||||
|
path = request.path or ""
|
||||||
|
if include_ui and wants_html and (path.startswith("/ui") or path == "/"):
|
||||||
return render_template('csrf_error.html', reason=e.description), 400
|
return render_template('csrf_error.html', reason=e.description), 400
|
||||||
|
error_xml = (
|
||||||
|
'<?xml version="1.0" encoding="UTF-8"?>'
|
||||||
|
'<Error>'
|
||||||
|
'<Code>CSRFError</Code>'
|
||||||
|
f'<Message>{e.description}</Message>'
|
||||||
|
f'<Resource>{path}</Resource>'
|
||||||
|
f'<RequestId>{getattr(g, "request_id", "-")}</RequestId>'
|
||||||
|
'</Error>'
|
||||||
|
)
|
||||||
|
return error_xml, 400, {'Content-Type': 'application/xml'}
|
||||||
|
|
||||||
@app.template_filter("filesizeformat")
|
@app.template_filter("filesizeformat")
|
||||||
def filesizeformat(value: int) -> str:
|
def filesizeformat(value: int) -> str:
|
||||||
@@ -289,11 +370,14 @@ def create_app(
|
|||||||
if include_api:
|
if include_api:
|
||||||
from .s3_api import s3_api_bp
|
from .s3_api import s3_api_bp
|
||||||
from .kms_api import kms_api_bp
|
from .kms_api import kms_api_bp
|
||||||
|
from .admin_api import admin_api_bp
|
||||||
|
|
||||||
app.register_blueprint(s3_api_bp)
|
app.register_blueprint(s3_api_bp)
|
||||||
app.register_blueprint(kms_api_bp)
|
app.register_blueprint(kms_api_bp)
|
||||||
|
app.register_blueprint(admin_api_bp)
|
||||||
csrf.exempt(s3_api_bp)
|
csrf.exempt(s3_api_bp)
|
||||||
csrf.exempt(kms_api_bp)
|
csrf.exempt(kms_api_bp)
|
||||||
|
csrf.exempt(admin_api_bp)
|
||||||
|
|
||||||
if include_ui:
|
if include_ui:
|
||||||
from .ui import ui_bp
|
from .ui import ui_bp
|
||||||
@@ -397,6 +481,128 @@ def _configure_logging(app: Flask) -> None:
|
|||||||
extra={"path": request.path, "method": request.method, "remote_addr": request.remote_addr},
|
extra={"path": request.path, "method": request.method, "remote_addr": request.remote_addr},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@app.before_request
|
||||||
|
def _maybe_serve_website():
|
||||||
|
if not app.config.get("WEBSITE_HOSTING_ENABLED"):
|
||||||
|
return None
|
||||||
|
if request.method not in {"GET", "HEAD"}:
|
||||||
|
return None
|
||||||
|
host = request.host
|
||||||
|
if ":" in host:
|
||||||
|
host = host.rsplit(":", 1)[0]
|
||||||
|
host = host.lower()
|
||||||
|
store = app.extensions.get("website_domains")
|
||||||
|
if not store:
|
||||||
|
return None
|
||||||
|
bucket = store.get_bucket(host)
|
||||||
|
if not bucket:
|
||||||
|
return None
|
||||||
|
storage = app.extensions["object_storage"]
|
||||||
|
if not storage.bucket_exists(bucket):
|
||||||
|
return _website_error_response(404, "Not Found")
|
||||||
|
website_config = storage.get_bucket_website(bucket)
|
||||||
|
if not website_config:
|
||||||
|
return _website_error_response(404, "Not Found")
|
||||||
|
index_doc = website_config.get("index_document", "index.html")
|
||||||
|
error_doc = website_config.get("error_document")
|
||||||
|
req_path = request.path.lstrip("/")
|
||||||
|
if not req_path or req_path.endswith("/"):
|
||||||
|
object_key = req_path + index_doc
|
||||||
|
else:
|
||||||
|
object_key = req_path
|
||||||
|
try:
|
||||||
|
obj_path = storage.get_object_path(bucket, object_key)
|
||||||
|
except (StorageError, OSError):
|
||||||
|
if object_key == req_path:
|
||||||
|
try:
|
||||||
|
obj_path = storage.get_object_path(bucket, req_path + "/" + index_doc)
|
||||||
|
object_key = req_path + "/" + index_doc
|
||||||
|
except (StorageError, OSError):
|
||||||
|
return _serve_website_error(storage, bucket, error_doc, 404)
|
||||||
|
else:
|
||||||
|
return _serve_website_error(storage, bucket, error_doc, 404)
|
||||||
|
content_type = mimetypes.guess_type(object_key)[0] or "application/octet-stream"
|
||||||
|
is_encrypted = False
|
||||||
|
try:
|
||||||
|
metadata = storage.get_object_metadata(bucket, object_key)
|
||||||
|
is_encrypted = "x-amz-server-side-encryption" in metadata
|
||||||
|
except (StorageError, OSError):
|
||||||
|
pass
|
||||||
|
if request.method == "HEAD":
|
||||||
|
response = Response(status=200)
|
||||||
|
if is_encrypted and hasattr(storage, "get_object_data"):
|
||||||
|
try:
|
||||||
|
data, _ = storage.get_object_data(bucket, object_key)
|
||||||
|
response.headers["Content-Length"] = len(data)
|
||||||
|
except (StorageError, OSError):
|
||||||
|
return _website_error_response(500, "Internal Server Error")
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
stat = obj_path.stat()
|
||||||
|
response.headers["Content-Length"] = stat.st_size
|
||||||
|
except OSError:
|
||||||
|
return _website_error_response(500, "Internal Server Error")
|
||||||
|
response.headers["Content-Type"] = content_type
|
||||||
|
return response
|
||||||
|
if is_encrypted and hasattr(storage, "get_object_data"):
|
||||||
|
try:
|
||||||
|
data, _ = storage.get_object_data(bucket, object_key)
|
||||||
|
response = Response(data, mimetype=content_type)
|
||||||
|
response.headers["Content-Length"] = len(data)
|
||||||
|
return response
|
||||||
|
except (StorageError, OSError):
|
||||||
|
return _website_error_response(500, "Internal Server Error")
|
||||||
|
def _stream(file_path):
|
||||||
|
with file_path.open("rb") as f:
|
||||||
|
while True:
|
||||||
|
chunk = f.read(65536)
|
||||||
|
if not chunk:
|
||||||
|
break
|
||||||
|
yield chunk
|
||||||
|
try:
|
||||||
|
stat = obj_path.stat()
|
||||||
|
response = Response(_stream(obj_path), mimetype=content_type, direct_passthrough=True)
|
||||||
|
response.headers["Content-Length"] = stat.st_size
|
||||||
|
return response
|
||||||
|
except OSError:
|
||||||
|
return _website_error_response(500, "Internal Server Error")
|
||||||
|
|
||||||
|
def _serve_website_error(storage, bucket, error_doc_key, status_code):
|
||||||
|
if not error_doc_key:
|
||||||
|
return _website_error_response(status_code, "Not Found" if status_code == 404 else "Error")
|
||||||
|
try:
|
||||||
|
obj_path = storage.get_object_path(bucket, error_doc_key)
|
||||||
|
except (StorageError, OSError):
|
||||||
|
return _website_error_response(status_code, "Not Found")
|
||||||
|
content_type = mimetypes.guess_type(error_doc_key)[0] or "text/html"
|
||||||
|
is_encrypted = False
|
||||||
|
try:
|
||||||
|
metadata = storage.get_object_metadata(bucket, error_doc_key)
|
||||||
|
is_encrypted = "x-amz-server-side-encryption" in metadata
|
||||||
|
except (StorageError, OSError):
|
||||||
|
pass
|
||||||
|
if is_encrypted and hasattr(storage, "get_object_data"):
|
||||||
|
try:
|
||||||
|
data, _ = storage.get_object_data(bucket, error_doc_key)
|
||||||
|
response = Response(data, status=status_code, mimetype=content_type)
|
||||||
|
response.headers["Content-Length"] = len(data)
|
||||||
|
return response
|
||||||
|
except (StorageError, OSError):
|
||||||
|
return _website_error_response(status_code, "Not Found")
|
||||||
|
try:
|
||||||
|
data = obj_path.read_bytes()
|
||||||
|
response = Response(data, status=status_code, mimetype=content_type)
|
||||||
|
response.headers["Content-Length"] = len(data)
|
||||||
|
return response
|
||||||
|
except OSError:
|
||||||
|
return _website_error_response(status_code, "Not Found")
|
||||||
|
|
||||||
|
def _website_error_response(status_code, message):
|
||||||
|
safe_msg = html_module.escape(str(message))
|
||||||
|
safe_code = html_module.escape(str(status_code))
|
||||||
|
body = f"<html><head><title>{safe_code} {safe_msg}</title></head><body><h1>{safe_code} {safe_msg}</h1></body></html>"
|
||||||
|
return Response(body, status=status_code, mimetype="text/html")
|
||||||
|
|
||||||
@app.after_request
|
@app.after_request
|
||||||
def _log_request_end(response):
|
def _log_request_end(response):
|
||||||
duration_ms = 0.0
|
duration_ms = 0.0
|
||||||
|
|||||||
771
app/admin_api.py
Normal file
771
app/admin_api.py
Normal file
@@ -0,0 +1,771 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import ipaddress
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
import socket
|
||||||
|
import time
|
||||||
|
from typing import Any, Dict, Optional, Tuple
|
||||||
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
|
import requests
|
||||||
|
from flask import Blueprint, Response, current_app, jsonify, request
|
||||||
|
|
||||||
|
from .connections import ConnectionStore
|
||||||
|
from .extensions import limiter
|
||||||
|
from .iam import IamError, Principal
|
||||||
|
from .replication import ReplicationManager
|
||||||
|
from .site_registry import PeerSite, SiteInfo, SiteRegistry
|
||||||
|
from .website_domains import WebsiteDomainStore
|
||||||
|
|
||||||
|
|
||||||
|
def _is_safe_url(url: str, allow_internal: bool = False) -> bool:
|
||||||
|
"""Check if a URL is safe to make requests to (not internal/private).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
url: The URL to check.
|
||||||
|
allow_internal: If True, allows internal/private IP addresses.
|
||||||
|
Use for self-hosted deployments on internal networks.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
parsed = urlparse(url)
|
||||||
|
hostname = parsed.hostname
|
||||||
|
if not hostname:
|
||||||
|
return False
|
||||||
|
cloud_metadata_hosts = {
|
||||||
|
"metadata.google.internal",
|
||||||
|
"169.254.169.254",
|
||||||
|
}
|
||||||
|
if hostname.lower() in cloud_metadata_hosts:
|
||||||
|
return False
|
||||||
|
if allow_internal:
|
||||||
|
return True
|
||||||
|
blocked_hosts = {
|
||||||
|
"localhost",
|
||||||
|
"127.0.0.1",
|
||||||
|
"0.0.0.0",
|
||||||
|
"::1",
|
||||||
|
"[::1]",
|
||||||
|
}
|
||||||
|
if hostname.lower() in blocked_hosts:
|
||||||
|
return False
|
||||||
|
try:
|
||||||
|
resolved_ip = socket.gethostbyname(hostname)
|
||||||
|
ip = ipaddress.ip_address(resolved_ip)
|
||||||
|
if ip.is_private or ip.is_loopback or ip.is_link_local or ip.is_reserved:
|
||||||
|
return False
|
||||||
|
except (socket.gaierror, ValueError):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
except Exception:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def _validate_endpoint(endpoint: str) -> Optional[str]:
|
||||||
|
"""Validate endpoint URL format. Returns error message or None."""
|
||||||
|
try:
|
||||||
|
parsed = urlparse(endpoint)
|
||||||
|
if not parsed.scheme or parsed.scheme not in ("http", "https"):
|
||||||
|
return "Endpoint must be http or https URL"
|
||||||
|
if not parsed.netloc:
|
||||||
|
return "Endpoint must have a host"
|
||||||
|
return None
|
||||||
|
except Exception:
|
||||||
|
return "Invalid endpoint URL"
|
||||||
|
|
||||||
|
|
||||||
|
def _validate_priority(priority: Any) -> Optional[str]:
|
||||||
|
"""Validate priority value. Returns error message or None."""
|
||||||
|
try:
|
||||||
|
p = int(priority)
|
||||||
|
if p < 0 or p > 1000:
|
||||||
|
return "Priority must be between 0 and 1000"
|
||||||
|
return None
|
||||||
|
except (TypeError, ValueError):
|
||||||
|
return "Priority must be an integer"
|
||||||
|
|
||||||
|
|
||||||
|
def _validate_region(region: str) -> Optional[str]:
|
||||||
|
"""Validate region format. Returns error message or None."""
|
||||||
|
if not re.match(r"^[a-z]{2,}-[a-z]+-\d+$", region):
|
||||||
|
return "Region must match format like us-east-1"
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def _validate_site_id(site_id: str) -> Optional[str]:
|
||||||
|
"""Validate site_id format. Returns error message or None."""
|
||||||
|
if not site_id or len(site_id) > 63:
|
||||||
|
return "site_id must be 1-63 characters"
|
||||||
|
if not re.match(r'^[a-zA-Z0-9][a-zA-Z0-9_-]*$', site_id):
|
||||||
|
return "site_id must start with alphanumeric and contain only alphanumeric, hyphens, underscores"
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
admin_api_bp = Blueprint("admin_api", __name__, url_prefix="/admin")
|
||||||
|
|
||||||
|
|
||||||
|
def _require_principal() -> Tuple[Optional[Principal], Optional[Tuple[Dict[str, Any], int]]]:
|
||||||
|
from .s3_api import _require_principal as s3_require_principal
|
||||||
|
return s3_require_principal()
|
||||||
|
|
||||||
|
|
||||||
|
def _require_admin() -> Tuple[Optional[Principal], Optional[Tuple[Dict[str, Any], int]]]:
|
||||||
|
principal, error = _require_principal()
|
||||||
|
if error:
|
||||||
|
return None, error
|
||||||
|
|
||||||
|
try:
|
||||||
|
_iam().authorize(principal, None, "iam:*")
|
||||||
|
return principal, None
|
||||||
|
except IamError:
|
||||||
|
return None, _json_error("AccessDenied", "Admin access required", 403)
|
||||||
|
|
||||||
|
|
||||||
|
def _site_registry() -> SiteRegistry:
|
||||||
|
return current_app.extensions["site_registry"]
|
||||||
|
|
||||||
|
|
||||||
|
def _connections() -> ConnectionStore:
|
||||||
|
return current_app.extensions["connections"]
|
||||||
|
|
||||||
|
|
||||||
|
def _replication() -> ReplicationManager:
|
||||||
|
return current_app.extensions["replication"]
|
||||||
|
|
||||||
|
|
||||||
|
def _iam():
|
||||||
|
return current_app.extensions["iam"]
|
||||||
|
|
||||||
|
|
||||||
|
def _json_error(code: str, message: str, status: int) -> Tuple[Dict[str, Any], int]:
|
||||||
|
return {"error": {"code": code, "message": message}}, status
|
||||||
|
|
||||||
|
|
||||||
|
def _get_admin_rate_limit() -> str:
|
||||||
|
return current_app.config.get("RATE_LIMIT_ADMIN", "60 per minute")
|
||||||
|
|
||||||
|
|
||||||
|
@admin_api_bp.route("/site", methods=["GET"])
|
||||||
|
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||||
|
def get_local_site():
|
||||||
|
principal, error = _require_admin()
|
||||||
|
if error:
|
||||||
|
return error
|
||||||
|
|
||||||
|
registry = _site_registry()
|
||||||
|
local_site = registry.get_local_site()
|
||||||
|
|
||||||
|
if local_site:
|
||||||
|
return jsonify(local_site.to_dict())
|
||||||
|
|
||||||
|
config_site_id = current_app.config.get("SITE_ID")
|
||||||
|
config_endpoint = current_app.config.get("SITE_ENDPOINT")
|
||||||
|
|
||||||
|
if config_site_id:
|
||||||
|
return jsonify({
|
||||||
|
"site_id": config_site_id,
|
||||||
|
"endpoint": config_endpoint or "",
|
||||||
|
"region": current_app.config.get("SITE_REGION", "us-east-1"),
|
||||||
|
"priority": current_app.config.get("SITE_PRIORITY", 100),
|
||||||
|
"display_name": config_site_id,
|
||||||
|
"source": "environment",
|
||||||
|
})
|
||||||
|
|
||||||
|
return _json_error("NotFound", "Local site not configured", 404)
|
||||||
|
|
||||||
|
|
||||||
|
@admin_api_bp.route("/site", methods=["PUT"])
|
||||||
|
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||||
|
def update_local_site():
|
||||||
|
principal, error = _require_admin()
|
||||||
|
if error:
|
||||||
|
return error
|
||||||
|
|
||||||
|
payload = request.get_json(silent=True) or {}
|
||||||
|
|
||||||
|
site_id = payload.get("site_id")
|
||||||
|
endpoint = payload.get("endpoint")
|
||||||
|
|
||||||
|
if not site_id:
|
||||||
|
return _json_error("ValidationError", "site_id is required", 400)
|
||||||
|
|
||||||
|
site_id_error = _validate_site_id(site_id)
|
||||||
|
if site_id_error:
|
||||||
|
return _json_error("ValidationError", site_id_error, 400)
|
||||||
|
|
||||||
|
if endpoint:
|
||||||
|
endpoint_error = _validate_endpoint(endpoint)
|
||||||
|
if endpoint_error:
|
||||||
|
return _json_error("ValidationError", endpoint_error, 400)
|
||||||
|
|
||||||
|
if "priority" in payload:
|
||||||
|
priority_error = _validate_priority(payload["priority"])
|
||||||
|
if priority_error:
|
||||||
|
return _json_error("ValidationError", priority_error, 400)
|
||||||
|
|
||||||
|
if "region" in payload:
|
||||||
|
region_error = _validate_region(payload["region"])
|
||||||
|
if region_error:
|
||||||
|
return _json_error("ValidationError", region_error, 400)
|
||||||
|
|
||||||
|
registry = _site_registry()
|
||||||
|
existing = registry.get_local_site()
|
||||||
|
|
||||||
|
site = SiteInfo(
|
||||||
|
site_id=site_id,
|
||||||
|
endpoint=endpoint or "",
|
||||||
|
region=payload.get("region", "us-east-1"),
|
||||||
|
priority=payload.get("priority", 100),
|
||||||
|
display_name=payload.get("display_name", site_id),
|
||||||
|
created_at=existing.created_at if existing else None,
|
||||||
|
)
|
||||||
|
|
||||||
|
registry.set_local_site(site)
|
||||||
|
|
||||||
|
logger.info("Local site updated", extra={"site_id": site_id, "principal": principal.access_key})
|
||||||
|
return jsonify(site.to_dict())
|
||||||
|
|
||||||
|
|
||||||
|
@admin_api_bp.route("/sites", methods=["GET"])
|
||||||
|
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||||
|
def list_all_sites():
|
||||||
|
principal, error = _require_admin()
|
||||||
|
if error:
|
||||||
|
return error
|
||||||
|
|
||||||
|
registry = _site_registry()
|
||||||
|
local = registry.get_local_site()
|
||||||
|
peers = registry.list_peers()
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"local": local.to_dict() if local else None,
|
||||||
|
"peers": [peer.to_dict() for peer in peers],
|
||||||
|
"total_peers": len(peers),
|
||||||
|
}
|
||||||
|
|
||||||
|
return jsonify(result)
|
||||||
|
|
||||||
|
|
||||||
|
@admin_api_bp.route("/sites", methods=["POST"])
|
||||||
|
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||||
|
def register_peer_site():
|
||||||
|
principal, error = _require_admin()
|
||||||
|
if error:
|
||||||
|
return error
|
||||||
|
|
||||||
|
payload = request.get_json(silent=True) or {}
|
||||||
|
|
||||||
|
site_id = payload.get("site_id")
|
||||||
|
endpoint = payload.get("endpoint")
|
||||||
|
|
||||||
|
if not site_id:
|
||||||
|
return _json_error("ValidationError", "site_id is required", 400)
|
||||||
|
|
||||||
|
site_id_error = _validate_site_id(site_id)
|
||||||
|
if site_id_error:
|
||||||
|
return _json_error("ValidationError", site_id_error, 400)
|
||||||
|
|
||||||
|
if not endpoint:
|
||||||
|
return _json_error("ValidationError", "endpoint is required", 400)
|
||||||
|
|
||||||
|
endpoint_error = _validate_endpoint(endpoint)
|
||||||
|
if endpoint_error:
|
||||||
|
return _json_error("ValidationError", endpoint_error, 400)
|
||||||
|
|
||||||
|
region = payload.get("region", "us-east-1")
|
||||||
|
region_error = _validate_region(region)
|
||||||
|
if region_error:
|
||||||
|
return _json_error("ValidationError", region_error, 400)
|
||||||
|
|
||||||
|
priority = payload.get("priority", 100)
|
||||||
|
priority_error = _validate_priority(priority)
|
||||||
|
if priority_error:
|
||||||
|
return _json_error("ValidationError", priority_error, 400)
|
||||||
|
|
||||||
|
registry = _site_registry()
|
||||||
|
|
||||||
|
if registry.get_peer(site_id):
|
||||||
|
return _json_error("AlreadyExists", f"Peer site '{site_id}' already exists", 409)
|
||||||
|
|
||||||
|
connection_id = payload.get("connection_id")
|
||||||
|
if connection_id:
|
||||||
|
if not _connections().get(connection_id):
|
||||||
|
return _json_error("ValidationError", f"Connection '{connection_id}' not found", 400)
|
||||||
|
|
||||||
|
peer = PeerSite(
|
||||||
|
site_id=site_id,
|
||||||
|
endpoint=endpoint,
|
||||||
|
region=region,
|
||||||
|
priority=int(priority),
|
||||||
|
display_name=payload.get("display_name", site_id),
|
||||||
|
connection_id=connection_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
registry.add_peer(peer)
|
||||||
|
|
||||||
|
logger.info("Peer site registered", extra={"site_id": site_id, "principal": principal.access_key})
|
||||||
|
return jsonify(peer.to_dict()), 201
|
||||||
|
|
||||||
|
|
||||||
|
@admin_api_bp.route("/sites/<site_id>", methods=["GET"])
|
||||||
|
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||||
|
def get_peer_site(site_id: str):
|
||||||
|
principal, error = _require_admin()
|
||||||
|
if error:
|
||||||
|
return error
|
||||||
|
|
||||||
|
registry = _site_registry()
|
||||||
|
peer = registry.get_peer(site_id)
|
||||||
|
|
||||||
|
if not peer:
|
||||||
|
return _json_error("NotFound", f"Peer site '{site_id}' not found", 404)
|
||||||
|
|
||||||
|
return jsonify(peer.to_dict())
|
||||||
|
|
||||||
|
|
||||||
|
@admin_api_bp.route("/sites/<site_id>", methods=["PUT"])
|
||||||
|
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||||
|
def update_peer_site(site_id: str):
|
||||||
|
principal, error = _require_admin()
|
||||||
|
if error:
|
||||||
|
return error
|
||||||
|
|
||||||
|
registry = _site_registry()
|
||||||
|
existing = registry.get_peer(site_id)
|
||||||
|
|
||||||
|
if not existing:
|
||||||
|
return _json_error("NotFound", f"Peer site '{site_id}' not found", 404)
|
||||||
|
|
||||||
|
payload = request.get_json(silent=True) or {}
|
||||||
|
|
||||||
|
if "endpoint" in payload:
|
||||||
|
endpoint_error = _validate_endpoint(payload["endpoint"])
|
||||||
|
if endpoint_error:
|
||||||
|
return _json_error("ValidationError", endpoint_error, 400)
|
||||||
|
|
||||||
|
if "priority" in payload:
|
||||||
|
priority_error = _validate_priority(payload["priority"])
|
||||||
|
if priority_error:
|
||||||
|
return _json_error("ValidationError", priority_error, 400)
|
||||||
|
|
||||||
|
if "region" in payload:
|
||||||
|
region_error = _validate_region(payload["region"])
|
||||||
|
if region_error:
|
||||||
|
return _json_error("ValidationError", region_error, 400)
|
||||||
|
|
||||||
|
if "connection_id" in payload:
|
||||||
|
if payload["connection_id"] and not _connections().get(payload["connection_id"]):
|
||||||
|
return _json_error("ValidationError", f"Connection '{payload['connection_id']}' not found", 400)
|
||||||
|
|
||||||
|
peer = PeerSite(
|
||||||
|
site_id=site_id,
|
||||||
|
endpoint=payload.get("endpoint", existing.endpoint),
|
||||||
|
region=payload.get("region", existing.region),
|
||||||
|
priority=payload.get("priority", existing.priority),
|
||||||
|
display_name=payload.get("display_name", existing.display_name),
|
||||||
|
connection_id=payload.get("connection_id", existing.connection_id),
|
||||||
|
created_at=existing.created_at,
|
||||||
|
is_healthy=existing.is_healthy,
|
||||||
|
last_health_check=existing.last_health_check,
|
||||||
|
)
|
||||||
|
|
||||||
|
registry.update_peer(peer)
|
||||||
|
|
||||||
|
logger.info("Peer site updated", extra={"site_id": site_id, "principal": principal.access_key})
|
||||||
|
return jsonify(peer.to_dict())
|
||||||
|
|
||||||
|
|
||||||
|
@admin_api_bp.route("/sites/<site_id>", methods=["DELETE"])
|
||||||
|
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||||
|
def delete_peer_site(site_id: str):
|
||||||
|
principal, error = _require_admin()
|
||||||
|
if error:
|
||||||
|
return error
|
||||||
|
|
||||||
|
registry = _site_registry()
|
||||||
|
|
||||||
|
if not registry.delete_peer(site_id):
|
||||||
|
return _json_error("NotFound", f"Peer site '{site_id}' not found", 404)
|
||||||
|
|
||||||
|
logger.info("Peer site deleted", extra={"site_id": site_id, "principal": principal.access_key})
|
||||||
|
return Response(status=204)
|
||||||
|
|
||||||
|
|
||||||
|
@admin_api_bp.route("/sites/<site_id>/health", methods=["GET"])
|
||||||
|
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||||
|
def check_peer_health(site_id: str):
|
||||||
|
principal, error = _require_admin()
|
||||||
|
if error:
|
||||||
|
return error
|
||||||
|
|
||||||
|
registry = _site_registry()
|
||||||
|
peer = registry.get_peer(site_id)
|
||||||
|
|
||||||
|
if not peer:
|
||||||
|
return _json_error("NotFound", f"Peer site '{site_id}' not found", 404)
|
||||||
|
|
||||||
|
is_healthy = False
|
||||||
|
error_message = None
|
||||||
|
|
||||||
|
if peer.connection_id:
|
||||||
|
connection = _connections().get(peer.connection_id)
|
||||||
|
if connection:
|
||||||
|
is_healthy = _replication().check_endpoint_health(connection)
|
||||||
|
else:
|
||||||
|
error_message = f"Connection '{peer.connection_id}' not found"
|
||||||
|
else:
|
||||||
|
error_message = "No connection configured for this peer"
|
||||||
|
|
||||||
|
registry.update_health(site_id, is_healthy)
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"site_id": site_id,
|
||||||
|
"is_healthy": is_healthy,
|
||||||
|
"checked_at": time.time(),
|
||||||
|
}
|
||||||
|
if error_message:
|
||||||
|
result["error"] = error_message
|
||||||
|
|
||||||
|
return jsonify(result)
|
||||||
|
|
||||||
|
|
||||||
|
@admin_api_bp.route("/topology", methods=["GET"])
|
||||||
|
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||||
|
def get_topology():
|
||||||
|
principal, error = _require_admin()
|
||||||
|
if error:
|
||||||
|
return error
|
||||||
|
|
||||||
|
registry = _site_registry()
|
||||||
|
local = registry.get_local_site()
|
||||||
|
peers = registry.list_peers()
|
||||||
|
|
||||||
|
sites = []
|
||||||
|
|
||||||
|
if local:
|
||||||
|
sites.append({
|
||||||
|
**local.to_dict(),
|
||||||
|
"is_local": True,
|
||||||
|
"is_healthy": True,
|
||||||
|
})
|
||||||
|
|
||||||
|
for peer in peers:
|
||||||
|
sites.append({
|
||||||
|
**peer.to_dict(),
|
||||||
|
"is_local": False,
|
||||||
|
})
|
||||||
|
|
||||||
|
sites.sort(key=lambda s: s.get("priority", 100))
|
||||||
|
|
||||||
|
return jsonify({
|
||||||
|
"sites": sites,
|
||||||
|
"total": len(sites),
|
||||||
|
"healthy_count": sum(1 for s in sites if s.get("is_healthy")),
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
@admin_api_bp.route("/sites/<site_id>/bidirectional-status", methods=["GET"])
|
||||||
|
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||||
|
def check_bidirectional_status(site_id: str):
|
||||||
|
principal, error = _require_admin()
|
||||||
|
if error:
|
||||||
|
return error
|
||||||
|
|
||||||
|
registry = _site_registry()
|
||||||
|
peer = registry.get_peer(site_id)
|
||||||
|
|
||||||
|
if not peer:
|
||||||
|
return _json_error("NotFound", f"Peer site '{site_id}' not found", 404)
|
||||||
|
|
||||||
|
local_site = registry.get_local_site()
|
||||||
|
replication = _replication()
|
||||||
|
local_rules = replication.list_rules()
|
||||||
|
|
||||||
|
local_bidir_rules = []
|
||||||
|
for rule in local_rules:
|
||||||
|
if rule.target_connection_id == peer.connection_id and rule.mode == "bidirectional":
|
||||||
|
local_bidir_rules.append({
|
||||||
|
"bucket_name": rule.bucket_name,
|
||||||
|
"target_bucket": rule.target_bucket,
|
||||||
|
"enabled": rule.enabled,
|
||||||
|
})
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"site_id": site_id,
|
||||||
|
"local_site_id": local_site.site_id if local_site else None,
|
||||||
|
"local_endpoint": local_site.endpoint if local_site else None,
|
||||||
|
"local_bidirectional_rules": local_bidir_rules,
|
||||||
|
"local_site_sync_enabled": current_app.config.get("SITE_SYNC_ENABLED", False),
|
||||||
|
"remote_status": None,
|
||||||
|
"issues": [],
|
||||||
|
"is_fully_configured": False,
|
||||||
|
}
|
||||||
|
|
||||||
|
if not local_site or not local_site.site_id:
|
||||||
|
result["issues"].append({
|
||||||
|
"code": "NO_LOCAL_SITE_ID",
|
||||||
|
"message": "Local site identity not configured",
|
||||||
|
"severity": "error",
|
||||||
|
})
|
||||||
|
|
||||||
|
if not local_site or not local_site.endpoint:
|
||||||
|
result["issues"].append({
|
||||||
|
"code": "NO_LOCAL_ENDPOINT",
|
||||||
|
"message": "Local site endpoint not configured (remote site cannot reach back)",
|
||||||
|
"severity": "error",
|
||||||
|
})
|
||||||
|
|
||||||
|
if not peer.connection_id:
|
||||||
|
result["issues"].append({
|
||||||
|
"code": "NO_CONNECTION",
|
||||||
|
"message": "No connection configured for this peer",
|
||||||
|
"severity": "error",
|
||||||
|
})
|
||||||
|
return jsonify(result)
|
||||||
|
|
||||||
|
connection = _connections().get(peer.connection_id)
|
||||||
|
if not connection:
|
||||||
|
result["issues"].append({
|
||||||
|
"code": "CONNECTION_NOT_FOUND",
|
||||||
|
"message": f"Connection '{peer.connection_id}' not found",
|
||||||
|
"severity": "error",
|
||||||
|
})
|
||||||
|
return jsonify(result)
|
||||||
|
|
||||||
|
if not local_bidir_rules:
|
||||||
|
result["issues"].append({
|
||||||
|
"code": "NO_LOCAL_BIDIRECTIONAL_RULES",
|
||||||
|
"message": "No bidirectional replication rules configured on this site",
|
||||||
|
"severity": "warning",
|
||||||
|
})
|
||||||
|
|
||||||
|
if not result["local_site_sync_enabled"]:
|
||||||
|
result["issues"].append({
|
||||||
|
"code": "SITE_SYNC_DISABLED",
|
||||||
|
"message": "Site sync worker is disabled (SITE_SYNC_ENABLED=false). Pull operations will not work.",
|
||||||
|
"severity": "warning",
|
||||||
|
})
|
||||||
|
|
||||||
|
if not replication.check_endpoint_health(connection):
|
||||||
|
result["issues"].append({
|
||||||
|
"code": "REMOTE_UNREACHABLE",
|
||||||
|
"message": "Remote endpoint is not reachable",
|
||||||
|
"severity": "error",
|
||||||
|
})
|
||||||
|
return jsonify(result)
|
||||||
|
|
||||||
|
allow_internal = current_app.config.get("ALLOW_INTERNAL_ENDPOINTS", False)
|
||||||
|
if not _is_safe_url(peer.endpoint, allow_internal=allow_internal):
|
||||||
|
result["issues"].append({
|
||||||
|
"code": "ENDPOINT_NOT_ALLOWED",
|
||||||
|
"message": "Peer endpoint points to cloud metadata service (SSRF protection)",
|
||||||
|
"severity": "error",
|
||||||
|
})
|
||||||
|
return jsonify(result)
|
||||||
|
|
||||||
|
try:
|
||||||
|
admin_url = peer.endpoint.rstrip("/") + "/admin/sites"
|
||||||
|
resp = requests.get(
|
||||||
|
admin_url,
|
||||||
|
timeout=10,
|
||||||
|
headers={
|
||||||
|
"Accept": "application/json",
|
||||||
|
"X-Access-Key": connection.access_key,
|
||||||
|
"X-Secret-Key": connection.secret_key,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
if resp.status_code == 200:
|
||||||
|
try:
|
||||||
|
remote_data = resp.json()
|
||||||
|
if not isinstance(remote_data, dict):
|
||||||
|
raise ValueError("Expected JSON object")
|
||||||
|
remote_local = remote_data.get("local")
|
||||||
|
if remote_local is not None and not isinstance(remote_local, dict):
|
||||||
|
raise ValueError("Expected 'local' to be an object")
|
||||||
|
remote_peers = remote_data.get("peers", [])
|
||||||
|
if not isinstance(remote_peers, list):
|
||||||
|
raise ValueError("Expected 'peers' to be a list")
|
||||||
|
except (ValueError, json.JSONDecodeError) as e:
|
||||||
|
logger.warning("Invalid JSON from remote admin API: %s", e)
|
||||||
|
result["remote_status"] = {"reachable": True, "invalid_response": True}
|
||||||
|
result["issues"].append({
|
||||||
|
"code": "REMOTE_INVALID_RESPONSE",
|
||||||
|
"message": "Remote admin API returned invalid JSON",
|
||||||
|
"severity": "warning",
|
||||||
|
})
|
||||||
|
return jsonify(result)
|
||||||
|
|
||||||
|
result["remote_status"] = {
|
||||||
|
"reachable": True,
|
||||||
|
"local_site": remote_local,
|
||||||
|
"site_sync_enabled": None,
|
||||||
|
"has_peer_for_us": False,
|
||||||
|
"peer_connection_configured": False,
|
||||||
|
"has_bidirectional_rules_for_us": False,
|
||||||
|
}
|
||||||
|
|
||||||
|
for rp in remote_peers:
|
||||||
|
if not isinstance(rp, dict):
|
||||||
|
continue
|
||||||
|
if local_site and (
|
||||||
|
rp.get("site_id") == local_site.site_id or
|
||||||
|
rp.get("endpoint") == local_site.endpoint
|
||||||
|
):
|
||||||
|
result["remote_status"]["has_peer_for_us"] = True
|
||||||
|
result["remote_status"]["peer_connection_configured"] = bool(rp.get("connection_id"))
|
||||||
|
break
|
||||||
|
|
||||||
|
if not result["remote_status"]["has_peer_for_us"]:
|
||||||
|
result["issues"].append({
|
||||||
|
"code": "REMOTE_NO_PEER_FOR_US",
|
||||||
|
"message": "Remote site does not have this site registered as a peer",
|
||||||
|
"severity": "error",
|
||||||
|
})
|
||||||
|
elif not result["remote_status"]["peer_connection_configured"]:
|
||||||
|
result["issues"].append({
|
||||||
|
"code": "REMOTE_NO_CONNECTION_FOR_US",
|
||||||
|
"message": "Remote site has us as peer but no connection configured (cannot push back)",
|
||||||
|
"severity": "error",
|
||||||
|
})
|
||||||
|
elif resp.status_code == 401 or resp.status_code == 403:
|
||||||
|
result["remote_status"] = {
|
||||||
|
"reachable": True,
|
||||||
|
"admin_access_denied": True,
|
||||||
|
}
|
||||||
|
result["issues"].append({
|
||||||
|
"code": "REMOTE_ADMIN_ACCESS_DENIED",
|
||||||
|
"message": "Cannot verify remote configuration (admin access denied)",
|
||||||
|
"severity": "warning",
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
result["remote_status"] = {
|
||||||
|
"reachable": True,
|
||||||
|
"admin_api_error": resp.status_code,
|
||||||
|
}
|
||||||
|
result["issues"].append({
|
||||||
|
"code": "REMOTE_ADMIN_API_ERROR",
|
||||||
|
"message": f"Remote admin API returned status {resp.status_code}",
|
||||||
|
"severity": "warning",
|
||||||
|
})
|
||||||
|
except requests.RequestException as e:
|
||||||
|
logger.warning("Remote admin API unreachable: %s", e)
|
||||||
|
result["remote_status"] = {
|
||||||
|
"reachable": False,
|
||||||
|
"error": "Connection failed",
|
||||||
|
}
|
||||||
|
result["issues"].append({
|
||||||
|
"code": "REMOTE_ADMIN_UNREACHABLE",
|
||||||
|
"message": "Could not reach remote admin API",
|
||||||
|
"severity": "warning",
|
||||||
|
})
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning("Error checking remote bidirectional status: %s", e, exc_info=True)
|
||||||
|
result["issues"].append({
|
||||||
|
"code": "VERIFICATION_ERROR",
|
||||||
|
"message": "Internal error during verification",
|
||||||
|
"severity": "warning",
|
||||||
|
})
|
||||||
|
|
||||||
|
error_issues = [i for i in result["issues"] if i["severity"] == "error"]
|
||||||
|
result["is_fully_configured"] = len(error_issues) == 0 and len(local_bidir_rules) > 0
|
||||||
|
|
||||||
|
return jsonify(result)
|
||||||
|
|
||||||
|
|
||||||
|
def _website_domains() -> WebsiteDomainStore:
|
||||||
|
return current_app.extensions["website_domains"]
|
||||||
|
|
||||||
|
|
||||||
|
def _storage():
|
||||||
|
return current_app.extensions["object_storage"]
|
||||||
|
|
||||||
|
|
||||||
|
@admin_api_bp.route("/website-domains", methods=["GET"])
|
||||||
|
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||||
|
def list_website_domains():
|
||||||
|
principal, error = _require_admin()
|
||||||
|
if error:
|
||||||
|
return error
|
||||||
|
if not current_app.config.get("WEBSITE_HOSTING_ENABLED", False):
|
||||||
|
return _json_error("InvalidRequest", "Website hosting is not enabled", 400)
|
||||||
|
return jsonify(_website_domains().list_all())
|
||||||
|
|
||||||
|
|
||||||
|
@admin_api_bp.route("/website-domains", methods=["POST"])
|
||||||
|
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||||
|
def create_website_domain():
|
||||||
|
principal, error = _require_admin()
|
||||||
|
if error:
|
||||||
|
return error
|
||||||
|
if not current_app.config.get("WEBSITE_HOSTING_ENABLED", False):
|
||||||
|
return _json_error("InvalidRequest", "Website hosting is not enabled", 400)
|
||||||
|
payload = request.get_json(silent=True) or {}
|
||||||
|
domain = (payload.get("domain") or "").strip().lower()
|
||||||
|
bucket = (payload.get("bucket") or "").strip()
|
||||||
|
if not domain:
|
||||||
|
return _json_error("ValidationError", "domain is required", 400)
|
||||||
|
if not bucket:
|
||||||
|
return _json_error("ValidationError", "bucket is required", 400)
|
||||||
|
storage = _storage()
|
||||||
|
if not storage.bucket_exists(bucket):
|
||||||
|
return _json_error("NoSuchBucket", f"Bucket '{bucket}' does not exist", 404)
|
||||||
|
store = _website_domains()
|
||||||
|
existing = store.get_bucket(domain)
|
||||||
|
if existing:
|
||||||
|
return _json_error("Conflict", f"Domain '{domain}' is already mapped to bucket '{existing}'", 409)
|
||||||
|
store.set_mapping(domain, bucket)
|
||||||
|
logger.info("Website domain mapping created: %s -> %s", domain, bucket)
|
||||||
|
return jsonify({"domain": domain, "bucket": bucket}), 201
|
||||||
|
|
||||||
|
|
||||||
|
@admin_api_bp.route("/website-domains/<domain>", methods=["GET"])
|
||||||
|
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||||
|
def get_website_domain(domain: str):
|
||||||
|
principal, error = _require_admin()
|
||||||
|
if error:
|
||||||
|
return error
|
||||||
|
if not current_app.config.get("WEBSITE_HOSTING_ENABLED", False):
|
||||||
|
return _json_error("InvalidRequest", "Website hosting is not enabled", 400)
|
||||||
|
bucket = _website_domains().get_bucket(domain)
|
||||||
|
if not bucket:
|
||||||
|
return _json_error("NotFound", f"No mapping found for domain '{domain}'", 404)
|
||||||
|
return jsonify({"domain": domain.lower(), "bucket": bucket})
|
||||||
|
|
||||||
|
|
||||||
|
@admin_api_bp.route("/website-domains/<domain>", methods=["PUT"])
|
||||||
|
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||||
|
def update_website_domain(domain: str):
|
||||||
|
principal, error = _require_admin()
|
||||||
|
if error:
|
||||||
|
return error
|
||||||
|
if not current_app.config.get("WEBSITE_HOSTING_ENABLED", False):
|
||||||
|
return _json_error("InvalidRequest", "Website hosting is not enabled", 400)
|
||||||
|
payload = request.get_json(silent=True) or {}
|
||||||
|
bucket = (payload.get("bucket") or "").strip()
|
||||||
|
if not bucket:
|
||||||
|
return _json_error("ValidationError", "bucket is required", 400)
|
||||||
|
storage = _storage()
|
||||||
|
if not storage.bucket_exists(bucket):
|
||||||
|
return _json_error("NoSuchBucket", f"Bucket '{bucket}' does not exist", 404)
|
||||||
|
store = _website_domains()
|
||||||
|
store.set_mapping(domain, bucket)
|
||||||
|
logger.info("Website domain mapping updated: %s -> %s", domain, bucket)
|
||||||
|
return jsonify({"domain": domain.lower(), "bucket": bucket})
|
||||||
|
|
||||||
|
|
||||||
|
@admin_api_bp.route("/website-domains/<domain>", methods=["DELETE"])
|
||||||
|
@limiter.limit(lambda: _get_admin_rate_limit())
|
||||||
|
def delete_website_domain(domain: str):
|
||||||
|
principal, error = _require_admin()
|
||||||
|
if error:
|
||||||
|
return error
|
||||||
|
if not current_app.config.get("WEBSITE_HOSTING_ENABLED", False):
|
||||||
|
return _json_error("InvalidRequest", "Website hosting is not enabled", 400)
|
||||||
|
if not _website_domains().delete_mapping(domain):
|
||||||
|
return _json_error("NotFound", f"No mapping found for domain '{domain}'", 404)
|
||||||
|
logger.info("Website domain mapping deleted: %s", domain)
|
||||||
|
return Response(status=204)
|
||||||
@@ -6,6 +6,7 @@ import re
|
|||||||
import time
|
import time
|
||||||
from dataclasses import dataclass, field
|
from dataclasses import dataclass, field
|
||||||
from fnmatch import fnmatch, translate
|
from fnmatch import fnmatch, translate
|
||||||
|
from functools import lru_cache
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Dict, Iterable, List, Optional, Pattern, Sequence, Tuple
|
from typing import Any, Dict, Iterable, List, Optional, Pattern, Sequence, Tuple
|
||||||
|
|
||||||
@@ -13,9 +14,14 @@ from typing import Any, Dict, Iterable, List, Optional, Pattern, Sequence, Tuple
|
|||||||
RESOURCE_PREFIX = "arn:aws:s3:::"
|
RESOURCE_PREFIX = "arn:aws:s3:::"
|
||||||
|
|
||||||
|
|
||||||
|
@lru_cache(maxsize=256)
|
||||||
|
def _compile_pattern(pattern: str) -> Pattern[str]:
|
||||||
|
return re.compile(translate(pattern), re.IGNORECASE)
|
||||||
|
|
||||||
|
|
||||||
def _match_string_like(value: str, pattern: str) -> bool:
|
def _match_string_like(value: str, pattern: str) -> bool:
|
||||||
regex = translate(pattern)
|
compiled = _compile_pattern(pattern)
|
||||||
return bool(re.match(regex, value, re.IGNORECASE))
|
return bool(compiled.match(value))
|
||||||
|
|
||||||
|
|
||||||
def _ip_in_cidr(ip_str: str, cidr: str) -> bool:
|
def _ip_in_cidr(ip_str: str, cidr: str) -> bool:
|
||||||
|
|||||||
@@ -36,10 +36,11 @@ class GzipMiddleware:
|
|||||||
content_type = None
|
content_type = None
|
||||||
content_length = None
|
content_length = None
|
||||||
should_compress = False
|
should_compress = False
|
||||||
|
passthrough = False
|
||||||
exc_info_holder = [None]
|
exc_info_holder = [None]
|
||||||
|
|
||||||
def custom_start_response(status: str, headers: List[Tuple[str, str]], exc_info=None):
|
def custom_start_response(status: str, headers: List[Tuple[str, str]], exc_info=None):
|
||||||
nonlocal response_started, status_code, response_headers, content_type, content_length, should_compress
|
nonlocal response_started, status_code, response_headers, content_type, content_length, should_compress, passthrough
|
||||||
response_started = True
|
response_started = True
|
||||||
status_code = int(status.split(' ', 1)[0])
|
status_code = int(status.split(' ', 1)[0])
|
||||||
response_headers = list(headers)
|
response_headers = list(headers)
|
||||||
@@ -50,18 +51,32 @@ class GzipMiddleware:
|
|||||||
if name_lower == 'content-type':
|
if name_lower == 'content-type':
|
||||||
content_type = value.split(';')[0].strip().lower()
|
content_type = value.split(';')[0].strip().lower()
|
||||||
elif name_lower == 'content-length':
|
elif name_lower == 'content-length':
|
||||||
|
try:
|
||||||
content_length = int(value)
|
content_length = int(value)
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
pass
|
||||||
elif name_lower == 'content-encoding':
|
elif name_lower == 'content-encoding':
|
||||||
should_compress = False
|
passthrough = True
|
||||||
|
return start_response(status, headers, exc_info)
|
||||||
|
elif name_lower == 'x-stream-response':
|
||||||
|
passthrough = True
|
||||||
return start_response(status, headers, exc_info)
|
return start_response(status, headers, exc_info)
|
||||||
|
|
||||||
if content_type and content_type in COMPRESSIBLE_MIMES:
|
if content_type and content_type in COMPRESSIBLE_MIMES:
|
||||||
if content_length is None or content_length >= self.min_size:
|
if content_length is None or content_length >= self.min_size:
|
||||||
should_compress = True
|
should_compress = True
|
||||||
|
else:
|
||||||
|
passthrough = True
|
||||||
|
return start_response(status, headers, exc_info)
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
response_body = b''.join(self.app(environ, custom_start_response))
|
app_iter = self.app(environ, custom_start_response)
|
||||||
|
|
||||||
|
if passthrough:
|
||||||
|
return app_iter
|
||||||
|
|
||||||
|
response_body = b''.join(app_iter)
|
||||||
|
|
||||||
if not response_started:
|
if not response_started:
|
||||||
return [response_body]
|
return [response_body]
|
||||||
|
|||||||
199
app/config.py
199
app/config.py
@@ -10,6 +10,23 @@ from dataclasses import dataclass
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Dict, Optional
|
from typing import Any, Dict, Optional
|
||||||
|
|
||||||
|
import psutil
|
||||||
|
|
||||||
|
|
||||||
|
def _calculate_auto_threads() -> int:
|
||||||
|
cpu_count = psutil.cpu_count(logical=True) or 4
|
||||||
|
return max(1, min(cpu_count * 2, 64))
|
||||||
|
|
||||||
|
|
||||||
|
def _calculate_auto_connection_limit() -> int:
|
||||||
|
available_mb = psutil.virtual_memory().available / (1024 * 1024)
|
||||||
|
calculated = int(available_mb / 5)
|
||||||
|
return max(20, min(calculated, 1000))
|
||||||
|
|
||||||
|
|
||||||
|
def _calculate_auto_backlog(connection_limit: int) -> int:
|
||||||
|
return max(64, min(connection_limit * 2, 4096))
|
||||||
|
|
||||||
|
|
||||||
def _validate_rate_limit(value: str) -> str:
|
def _validate_rate_limit(value: str) -> str:
|
||||||
pattern = r"^\d+\s+per\s+(second|minute|hour|day)$"
|
pattern = r"^\d+\s+per\s+(second|minute|hour|day)$"
|
||||||
@@ -63,6 +80,10 @@ class AppConfig:
|
|||||||
log_backup_count: int
|
log_backup_count: int
|
||||||
ratelimit_default: str
|
ratelimit_default: str
|
||||||
ratelimit_storage_uri: str
|
ratelimit_storage_uri: str
|
||||||
|
ratelimit_list_buckets: str
|
||||||
|
ratelimit_bucket_ops: str
|
||||||
|
ratelimit_object_ops: str
|
||||||
|
ratelimit_head_ops: str
|
||||||
cors_origins: list[str]
|
cors_origins: list[str]
|
||||||
cors_methods: list[str]
|
cors_methods: list[str]
|
||||||
cors_allow_headers: list[str]
|
cors_allow_headers: list[str]
|
||||||
@@ -94,9 +115,41 @@ class AppConfig:
|
|||||||
server_connection_limit: int
|
server_connection_limit: int
|
||||||
server_backlog: int
|
server_backlog: int
|
||||||
server_channel_timeout: int
|
server_channel_timeout: int
|
||||||
|
server_threads_auto: bool
|
||||||
|
server_connection_limit_auto: bool
|
||||||
|
server_backlog_auto: bool
|
||||||
site_sync_enabled: bool
|
site_sync_enabled: bool
|
||||||
site_sync_interval_seconds: int
|
site_sync_interval_seconds: int
|
||||||
site_sync_batch_size: int
|
site_sync_batch_size: int
|
||||||
|
sigv4_timestamp_tolerance_seconds: int
|
||||||
|
presigned_url_min_expiry_seconds: int
|
||||||
|
presigned_url_max_expiry_seconds: int
|
||||||
|
replication_connect_timeout_seconds: int
|
||||||
|
replication_read_timeout_seconds: int
|
||||||
|
replication_max_retries: int
|
||||||
|
replication_streaming_threshold_bytes: int
|
||||||
|
replication_max_failures_per_bucket: int
|
||||||
|
site_sync_connect_timeout_seconds: int
|
||||||
|
site_sync_read_timeout_seconds: int
|
||||||
|
site_sync_max_retries: int
|
||||||
|
site_sync_clock_skew_tolerance_seconds: float
|
||||||
|
object_key_max_length_bytes: int
|
||||||
|
object_cache_max_size: int
|
||||||
|
bucket_config_cache_ttl_seconds: float
|
||||||
|
object_tag_limit: int
|
||||||
|
encryption_chunk_size_bytes: int
|
||||||
|
kms_generate_data_key_min_bytes: int
|
||||||
|
kms_generate_data_key_max_bytes: int
|
||||||
|
lifecycle_max_history_per_bucket: int
|
||||||
|
site_id: Optional[str]
|
||||||
|
site_endpoint: Optional[str]
|
||||||
|
site_region: str
|
||||||
|
site_priority: int
|
||||||
|
ratelimit_admin: str
|
||||||
|
num_trusted_proxies: int
|
||||||
|
allowed_redirect_hosts: list[str]
|
||||||
|
allow_internal_endpoints: bool
|
||||||
|
website_hosting_enabled: bool
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_env(cls, overrides: Optional[Dict[str, Any]] = None) -> "AppConfig":
|
def from_env(cls, overrides: Optional[Dict[str, Any]] = None) -> "AppConfig":
|
||||||
@@ -171,6 +224,10 @@ class AppConfig:
|
|||||||
log_backup_count = int(_get("LOG_BACKUP_COUNT", 3))
|
log_backup_count = int(_get("LOG_BACKUP_COUNT", 3))
|
||||||
ratelimit_default = _validate_rate_limit(str(_get("RATE_LIMIT_DEFAULT", "200 per minute")))
|
ratelimit_default = _validate_rate_limit(str(_get("RATE_LIMIT_DEFAULT", "200 per minute")))
|
||||||
ratelimit_storage_uri = str(_get("RATE_LIMIT_STORAGE_URI", "memory://"))
|
ratelimit_storage_uri = str(_get("RATE_LIMIT_STORAGE_URI", "memory://"))
|
||||||
|
ratelimit_list_buckets = _validate_rate_limit(str(_get("RATE_LIMIT_LIST_BUCKETS", "60 per minute")))
|
||||||
|
ratelimit_bucket_ops = _validate_rate_limit(str(_get("RATE_LIMIT_BUCKET_OPS", "120 per minute")))
|
||||||
|
ratelimit_object_ops = _validate_rate_limit(str(_get("RATE_LIMIT_OBJECT_OPS", "240 per minute")))
|
||||||
|
ratelimit_head_ops = _validate_rate_limit(str(_get("RATE_LIMIT_HEAD_OPS", "100 per minute")))
|
||||||
|
|
||||||
def _csv(value: str, default: list[str]) -> list[str]:
|
def _csv(value: str, default: list[str]) -> list[str]:
|
||||||
if not value:
|
if not value:
|
||||||
@@ -200,14 +257,69 @@ class AppConfig:
|
|||||||
operation_metrics_interval_minutes = int(_get("OPERATION_METRICS_INTERVAL_MINUTES", 5))
|
operation_metrics_interval_minutes = int(_get("OPERATION_METRICS_INTERVAL_MINUTES", 5))
|
||||||
operation_metrics_retention_hours = int(_get("OPERATION_METRICS_RETENTION_HOURS", 24))
|
operation_metrics_retention_hours = int(_get("OPERATION_METRICS_RETENTION_HOURS", 24))
|
||||||
|
|
||||||
server_threads = int(_get("SERVER_THREADS", 4))
|
_raw_threads = int(_get("SERVER_THREADS", 0))
|
||||||
server_connection_limit = int(_get("SERVER_CONNECTION_LIMIT", 100))
|
if _raw_threads == 0:
|
||||||
server_backlog = int(_get("SERVER_BACKLOG", 1024))
|
server_threads = _calculate_auto_threads()
|
||||||
|
server_threads_auto = True
|
||||||
|
else:
|
||||||
|
server_threads = _raw_threads
|
||||||
|
server_threads_auto = False
|
||||||
|
|
||||||
|
_raw_conn_limit = int(_get("SERVER_CONNECTION_LIMIT", 0))
|
||||||
|
if _raw_conn_limit == 0:
|
||||||
|
server_connection_limit = _calculate_auto_connection_limit()
|
||||||
|
server_connection_limit_auto = True
|
||||||
|
else:
|
||||||
|
server_connection_limit = _raw_conn_limit
|
||||||
|
server_connection_limit_auto = False
|
||||||
|
|
||||||
|
_raw_backlog = int(_get("SERVER_BACKLOG", 0))
|
||||||
|
if _raw_backlog == 0:
|
||||||
|
server_backlog = _calculate_auto_backlog(server_connection_limit)
|
||||||
|
server_backlog_auto = True
|
||||||
|
else:
|
||||||
|
server_backlog = _raw_backlog
|
||||||
|
server_backlog_auto = False
|
||||||
|
|
||||||
server_channel_timeout = int(_get("SERVER_CHANNEL_TIMEOUT", 120))
|
server_channel_timeout = int(_get("SERVER_CHANNEL_TIMEOUT", 120))
|
||||||
site_sync_enabled = str(_get("SITE_SYNC_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
|
site_sync_enabled = str(_get("SITE_SYNC_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
|
||||||
site_sync_interval_seconds = int(_get("SITE_SYNC_INTERVAL_SECONDS", 60))
|
site_sync_interval_seconds = int(_get("SITE_SYNC_INTERVAL_SECONDS", 60))
|
||||||
site_sync_batch_size = int(_get("SITE_SYNC_BATCH_SIZE", 100))
|
site_sync_batch_size = int(_get("SITE_SYNC_BATCH_SIZE", 100))
|
||||||
|
|
||||||
|
sigv4_timestamp_tolerance_seconds = int(_get("SIGV4_TIMESTAMP_TOLERANCE_SECONDS", 900))
|
||||||
|
presigned_url_min_expiry_seconds = int(_get("PRESIGNED_URL_MIN_EXPIRY_SECONDS", 1))
|
||||||
|
presigned_url_max_expiry_seconds = int(_get("PRESIGNED_URL_MAX_EXPIRY_SECONDS", 604800))
|
||||||
|
replication_connect_timeout_seconds = int(_get("REPLICATION_CONNECT_TIMEOUT_SECONDS", 5))
|
||||||
|
replication_read_timeout_seconds = int(_get("REPLICATION_READ_TIMEOUT_SECONDS", 30))
|
||||||
|
replication_max_retries = int(_get("REPLICATION_MAX_RETRIES", 2))
|
||||||
|
replication_streaming_threshold_bytes = int(_get("REPLICATION_STREAMING_THRESHOLD_BYTES", 10 * 1024 * 1024))
|
||||||
|
replication_max_failures_per_bucket = int(_get("REPLICATION_MAX_FAILURES_PER_BUCKET", 50))
|
||||||
|
site_sync_connect_timeout_seconds = int(_get("SITE_SYNC_CONNECT_TIMEOUT_SECONDS", 10))
|
||||||
|
site_sync_read_timeout_seconds = int(_get("SITE_SYNC_READ_TIMEOUT_SECONDS", 120))
|
||||||
|
site_sync_max_retries = int(_get("SITE_SYNC_MAX_RETRIES", 2))
|
||||||
|
site_sync_clock_skew_tolerance_seconds = float(_get("SITE_SYNC_CLOCK_SKEW_TOLERANCE_SECONDS", 1.0))
|
||||||
|
object_key_max_length_bytes = int(_get("OBJECT_KEY_MAX_LENGTH_BYTES", 1024))
|
||||||
|
object_cache_max_size = int(_get("OBJECT_CACHE_MAX_SIZE", 100))
|
||||||
|
bucket_config_cache_ttl_seconds = float(_get("BUCKET_CONFIG_CACHE_TTL_SECONDS", 30.0))
|
||||||
|
object_tag_limit = int(_get("OBJECT_TAG_LIMIT", 50))
|
||||||
|
encryption_chunk_size_bytes = int(_get("ENCRYPTION_CHUNK_SIZE_BYTES", 64 * 1024))
|
||||||
|
kms_generate_data_key_min_bytes = int(_get("KMS_GENERATE_DATA_KEY_MIN_BYTES", 1))
|
||||||
|
kms_generate_data_key_max_bytes = int(_get("KMS_GENERATE_DATA_KEY_MAX_BYTES", 1024))
|
||||||
|
lifecycle_max_history_per_bucket = int(_get("LIFECYCLE_MAX_HISTORY_PER_BUCKET", 50))
|
||||||
|
|
||||||
|
site_id_raw = _get("SITE_ID", None)
|
||||||
|
site_id = str(site_id_raw).strip() if site_id_raw else None
|
||||||
|
site_endpoint_raw = _get("SITE_ENDPOINT", None)
|
||||||
|
site_endpoint = str(site_endpoint_raw).strip() if site_endpoint_raw else None
|
||||||
|
site_region = str(_get("SITE_REGION", "us-east-1"))
|
||||||
|
site_priority = int(_get("SITE_PRIORITY", 100))
|
||||||
|
ratelimit_admin = _validate_rate_limit(str(_get("RATE_LIMIT_ADMIN", "60 per minute")))
|
||||||
|
num_trusted_proxies = int(_get("NUM_TRUSTED_PROXIES", 0))
|
||||||
|
allowed_redirect_hosts_raw = _get("ALLOWED_REDIRECT_HOSTS", "")
|
||||||
|
allowed_redirect_hosts = [h.strip() for h in str(allowed_redirect_hosts_raw).split(",") if h.strip()]
|
||||||
|
allow_internal_endpoints = str(_get("ALLOW_INTERNAL_ENDPOINTS", "0")).lower() in {"1", "true", "yes", "on"}
|
||||||
|
website_hosting_enabled = str(_get("WEBSITE_HOSTING_ENABLED", "0")).lower() in {"1", "true", "yes", "on"}
|
||||||
|
|
||||||
return cls(storage_root=storage_root,
|
return cls(storage_root=storage_root,
|
||||||
max_upload_size=max_upload_size,
|
max_upload_size=max_upload_size,
|
||||||
ui_page_size=ui_page_size,
|
ui_page_size=ui_page_size,
|
||||||
@@ -225,6 +337,10 @@ class AppConfig:
|
|||||||
log_backup_count=log_backup_count,
|
log_backup_count=log_backup_count,
|
||||||
ratelimit_default=ratelimit_default,
|
ratelimit_default=ratelimit_default,
|
||||||
ratelimit_storage_uri=ratelimit_storage_uri,
|
ratelimit_storage_uri=ratelimit_storage_uri,
|
||||||
|
ratelimit_list_buckets=ratelimit_list_buckets,
|
||||||
|
ratelimit_bucket_ops=ratelimit_bucket_ops,
|
||||||
|
ratelimit_object_ops=ratelimit_object_ops,
|
||||||
|
ratelimit_head_ops=ratelimit_head_ops,
|
||||||
cors_origins=cors_origins,
|
cors_origins=cors_origins,
|
||||||
cors_methods=cors_methods,
|
cors_methods=cors_methods,
|
||||||
cors_allow_headers=cors_allow_headers,
|
cors_allow_headers=cors_allow_headers,
|
||||||
@@ -256,9 +372,41 @@ class AppConfig:
|
|||||||
server_connection_limit=server_connection_limit,
|
server_connection_limit=server_connection_limit,
|
||||||
server_backlog=server_backlog,
|
server_backlog=server_backlog,
|
||||||
server_channel_timeout=server_channel_timeout,
|
server_channel_timeout=server_channel_timeout,
|
||||||
|
server_threads_auto=server_threads_auto,
|
||||||
|
server_connection_limit_auto=server_connection_limit_auto,
|
||||||
|
server_backlog_auto=server_backlog_auto,
|
||||||
site_sync_enabled=site_sync_enabled,
|
site_sync_enabled=site_sync_enabled,
|
||||||
site_sync_interval_seconds=site_sync_interval_seconds,
|
site_sync_interval_seconds=site_sync_interval_seconds,
|
||||||
site_sync_batch_size=site_sync_batch_size)
|
site_sync_batch_size=site_sync_batch_size,
|
||||||
|
sigv4_timestamp_tolerance_seconds=sigv4_timestamp_tolerance_seconds,
|
||||||
|
presigned_url_min_expiry_seconds=presigned_url_min_expiry_seconds,
|
||||||
|
presigned_url_max_expiry_seconds=presigned_url_max_expiry_seconds,
|
||||||
|
replication_connect_timeout_seconds=replication_connect_timeout_seconds,
|
||||||
|
replication_read_timeout_seconds=replication_read_timeout_seconds,
|
||||||
|
replication_max_retries=replication_max_retries,
|
||||||
|
replication_streaming_threshold_bytes=replication_streaming_threshold_bytes,
|
||||||
|
replication_max_failures_per_bucket=replication_max_failures_per_bucket,
|
||||||
|
site_sync_connect_timeout_seconds=site_sync_connect_timeout_seconds,
|
||||||
|
site_sync_read_timeout_seconds=site_sync_read_timeout_seconds,
|
||||||
|
site_sync_max_retries=site_sync_max_retries,
|
||||||
|
site_sync_clock_skew_tolerance_seconds=site_sync_clock_skew_tolerance_seconds,
|
||||||
|
object_key_max_length_bytes=object_key_max_length_bytes,
|
||||||
|
object_cache_max_size=object_cache_max_size,
|
||||||
|
bucket_config_cache_ttl_seconds=bucket_config_cache_ttl_seconds,
|
||||||
|
object_tag_limit=object_tag_limit,
|
||||||
|
encryption_chunk_size_bytes=encryption_chunk_size_bytes,
|
||||||
|
kms_generate_data_key_min_bytes=kms_generate_data_key_min_bytes,
|
||||||
|
kms_generate_data_key_max_bytes=kms_generate_data_key_max_bytes,
|
||||||
|
lifecycle_max_history_per_bucket=lifecycle_max_history_per_bucket,
|
||||||
|
site_id=site_id,
|
||||||
|
site_endpoint=site_endpoint,
|
||||||
|
site_region=site_region,
|
||||||
|
site_priority=site_priority,
|
||||||
|
ratelimit_admin=ratelimit_admin,
|
||||||
|
num_trusted_proxies=num_trusted_proxies,
|
||||||
|
allowed_redirect_hosts=allowed_redirect_hosts,
|
||||||
|
allow_internal_endpoints=allow_internal_endpoints,
|
||||||
|
website_hosting_enabled=website_hosting_enabled)
|
||||||
|
|
||||||
def validate_and_report(self) -> list[str]:
|
def validate_and_report(self) -> list[str]:
|
||||||
"""Validate configuration and return a list of warnings/issues.
|
"""Validate configuration and return a list of warnings/issues.
|
||||||
@@ -364,9 +512,13 @@ class AppConfig:
|
|||||||
print(f" ENCRYPTION: Enabled (Master key: {self.encryption_master_key_path})")
|
print(f" ENCRYPTION: Enabled (Master key: {self.encryption_master_key_path})")
|
||||||
if self.kms_enabled:
|
if self.kms_enabled:
|
||||||
print(f" KMS: Enabled (Keys: {self.kms_keys_path})")
|
print(f" KMS: Enabled (Keys: {self.kms_keys_path})")
|
||||||
print(f" SERVER_THREADS: {self.server_threads}")
|
if self.website_hosting_enabled:
|
||||||
print(f" CONNECTION_LIMIT: {self.server_connection_limit}")
|
print(f" WEBSITE_HOSTING: Enabled")
|
||||||
print(f" BACKLOG: {self.server_backlog}")
|
def _auto(flag: bool) -> str:
|
||||||
|
return " (auto)" if flag else ""
|
||||||
|
print(f" SERVER_THREADS: {self.server_threads}{_auto(self.server_threads_auto)}")
|
||||||
|
print(f" CONNECTION_LIMIT: {self.server_connection_limit}{_auto(self.server_connection_limit_auto)}")
|
||||||
|
print(f" BACKLOG: {self.server_backlog}{_auto(self.server_backlog_auto)}")
|
||||||
print(f" CHANNEL_TIMEOUT: {self.server_channel_timeout}s")
|
print(f" CHANNEL_TIMEOUT: {self.server_channel_timeout}s")
|
||||||
print("=" * 60)
|
print("=" * 60)
|
||||||
|
|
||||||
@@ -406,6 +558,10 @@ class AppConfig:
|
|||||||
"LOG_BACKUP_COUNT": self.log_backup_count,
|
"LOG_BACKUP_COUNT": self.log_backup_count,
|
||||||
"RATELIMIT_DEFAULT": self.ratelimit_default,
|
"RATELIMIT_DEFAULT": self.ratelimit_default,
|
||||||
"RATELIMIT_STORAGE_URI": self.ratelimit_storage_uri,
|
"RATELIMIT_STORAGE_URI": self.ratelimit_storage_uri,
|
||||||
|
"RATELIMIT_LIST_BUCKETS": self.ratelimit_list_buckets,
|
||||||
|
"RATELIMIT_BUCKET_OPS": self.ratelimit_bucket_ops,
|
||||||
|
"RATELIMIT_OBJECT_OPS": self.ratelimit_object_ops,
|
||||||
|
"RATELIMIT_HEAD_OPS": self.ratelimit_head_ops,
|
||||||
"CORS_ORIGINS": self.cors_origins,
|
"CORS_ORIGINS": self.cors_origins,
|
||||||
"CORS_METHODS": self.cors_methods,
|
"CORS_METHODS": self.cors_methods,
|
||||||
"CORS_ALLOW_HEADERS": self.cors_allow_headers,
|
"CORS_ALLOW_HEADERS": self.cors_allow_headers,
|
||||||
@@ -432,4 +588,33 @@ class AppConfig:
|
|||||||
"SITE_SYNC_ENABLED": self.site_sync_enabled,
|
"SITE_SYNC_ENABLED": self.site_sync_enabled,
|
||||||
"SITE_SYNC_INTERVAL_SECONDS": self.site_sync_interval_seconds,
|
"SITE_SYNC_INTERVAL_SECONDS": self.site_sync_interval_seconds,
|
||||||
"SITE_SYNC_BATCH_SIZE": self.site_sync_batch_size,
|
"SITE_SYNC_BATCH_SIZE": self.site_sync_batch_size,
|
||||||
|
"SIGV4_TIMESTAMP_TOLERANCE_SECONDS": self.sigv4_timestamp_tolerance_seconds,
|
||||||
|
"PRESIGNED_URL_MIN_EXPIRY_SECONDS": self.presigned_url_min_expiry_seconds,
|
||||||
|
"PRESIGNED_URL_MAX_EXPIRY_SECONDS": self.presigned_url_max_expiry_seconds,
|
||||||
|
"REPLICATION_CONNECT_TIMEOUT_SECONDS": self.replication_connect_timeout_seconds,
|
||||||
|
"REPLICATION_READ_TIMEOUT_SECONDS": self.replication_read_timeout_seconds,
|
||||||
|
"REPLICATION_MAX_RETRIES": self.replication_max_retries,
|
||||||
|
"REPLICATION_STREAMING_THRESHOLD_BYTES": self.replication_streaming_threshold_bytes,
|
||||||
|
"REPLICATION_MAX_FAILURES_PER_BUCKET": self.replication_max_failures_per_bucket,
|
||||||
|
"SITE_SYNC_CONNECT_TIMEOUT_SECONDS": self.site_sync_connect_timeout_seconds,
|
||||||
|
"SITE_SYNC_READ_TIMEOUT_SECONDS": self.site_sync_read_timeout_seconds,
|
||||||
|
"SITE_SYNC_MAX_RETRIES": self.site_sync_max_retries,
|
||||||
|
"SITE_SYNC_CLOCK_SKEW_TOLERANCE_SECONDS": self.site_sync_clock_skew_tolerance_seconds,
|
||||||
|
"OBJECT_KEY_MAX_LENGTH_BYTES": self.object_key_max_length_bytes,
|
||||||
|
"OBJECT_CACHE_MAX_SIZE": self.object_cache_max_size,
|
||||||
|
"BUCKET_CONFIG_CACHE_TTL_SECONDS": self.bucket_config_cache_ttl_seconds,
|
||||||
|
"OBJECT_TAG_LIMIT": self.object_tag_limit,
|
||||||
|
"ENCRYPTION_CHUNK_SIZE_BYTES": self.encryption_chunk_size_bytes,
|
||||||
|
"KMS_GENERATE_DATA_KEY_MIN_BYTES": self.kms_generate_data_key_min_bytes,
|
||||||
|
"KMS_GENERATE_DATA_KEY_MAX_BYTES": self.kms_generate_data_key_max_bytes,
|
||||||
|
"LIFECYCLE_MAX_HISTORY_PER_BUCKET": self.lifecycle_max_history_per_bucket,
|
||||||
|
"SITE_ID": self.site_id,
|
||||||
|
"SITE_ENDPOINT": self.site_endpoint,
|
||||||
|
"SITE_REGION": self.site_region,
|
||||||
|
"SITE_PRIORITY": self.site_priority,
|
||||||
|
"RATE_LIMIT_ADMIN": self.ratelimit_admin,
|
||||||
|
"NUM_TRUSTED_PROXIES": self.num_trusted_proxies,
|
||||||
|
"ALLOWED_REDIRECT_HOSTS": self.allowed_redirect_hosts,
|
||||||
|
"ALLOW_INTERNAL_ENDPOINTS": self.allow_internal_endpoints,
|
||||||
|
"WEBSITE_HOSTING_ENABLED": self.website_hosting_enabled,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -274,5 +274,11 @@ class EncryptedObjectStorage:
|
|||||||
def set_bucket_quota(self, bucket_name: str, *, max_bytes=None, max_objects=None):
|
def set_bucket_quota(self, bucket_name: str, *, max_bytes=None, max_objects=None):
|
||||||
return self.storage.set_bucket_quota(bucket_name, max_bytes=max_bytes, max_objects=max_objects)
|
return self.storage.set_bucket_quota(bucket_name, max_bytes=max_bytes, max_objects=max_objects)
|
||||||
|
|
||||||
|
def get_bucket_website(self, bucket_name: str):
|
||||||
|
return self.storage.get_bucket_website(bucket_name)
|
||||||
|
|
||||||
|
def set_bucket_website(self, bucket_name: str, website_config):
|
||||||
|
return self.storage.set_bucket_website(bucket_name, website_config)
|
||||||
|
|
||||||
def _compute_etag(self, path: Path) -> str:
|
def _compute_etag(self, path: Path) -> str:
|
||||||
return self.storage._compute_etag(path)
|
return self.storage._compute_etag(path)
|
||||||
|
|||||||
@@ -1,15 +1,44 @@
|
|||||||
"""Encryption providers for server-side and client-side encryption."""
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import base64
|
import base64
|
||||||
import io
|
import io
|
||||||
import json
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
import secrets
|
import secrets
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, BinaryIO, Dict, Generator, Optional
|
from typing import Any, BinaryIO, Dict, Generator, Optional
|
||||||
|
|
||||||
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
|
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
|
||||||
|
from cryptography.hazmat.primitives.kdf.hkdf import HKDF
|
||||||
|
from cryptography.hazmat.primitives import hashes
|
||||||
|
|
||||||
|
if sys.platform != "win32":
|
||||||
|
import fcntl
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def _set_secure_file_permissions(file_path: Path) -> None:
|
||||||
|
"""Set restrictive file permissions (owner read/write only)."""
|
||||||
|
if sys.platform == "win32":
|
||||||
|
try:
|
||||||
|
username = os.environ.get("USERNAME", "")
|
||||||
|
if username:
|
||||||
|
subprocess.run(
|
||||||
|
["icacls", str(file_path), "/inheritance:r",
|
||||||
|
"/grant:r", f"{username}:F"],
|
||||||
|
check=True, capture_output=True
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.warning("Could not set secure permissions on %s: USERNAME not set", file_path)
|
||||||
|
except (subprocess.SubprocessError, OSError) as exc:
|
||||||
|
logger.warning("Failed to set secure permissions on %s: %s", file_path, exc)
|
||||||
|
else:
|
||||||
|
os.chmod(file_path, 0o600)
|
||||||
|
|
||||||
|
|
||||||
class EncryptionError(Exception):
|
class EncryptionError(Exception):
|
||||||
@@ -75,6 +104,18 @@ class EncryptionProvider:
|
|||||||
"""
|
"""
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def decrypt_data_key(self, encrypted_data_key: bytes, key_id: str | None = None) -> bytes:
|
||||||
|
"""Decrypt an encrypted data key.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
encrypted_data_key: The encrypted data key bytes
|
||||||
|
key_id: Optional key identifier (used by KMS providers)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The decrypted data key
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
||||||
class LocalKeyEncryption(EncryptionProvider):
|
class LocalKeyEncryption(EncryptionProvider):
|
||||||
"""SSE-S3 style encryption using a local master key.
|
"""SSE-S3 style encryption using a local master key.
|
||||||
@@ -99,26 +140,46 @@ class LocalKeyEncryption(EncryptionProvider):
|
|||||||
return self._master_key
|
return self._master_key
|
||||||
|
|
||||||
def _load_or_create_master_key(self) -> bytes:
|
def _load_or_create_master_key(self) -> bytes:
|
||||||
"""Load master key from file or generate a new one."""
|
"""Load master key from file or generate a new one (with file locking)."""
|
||||||
|
lock_path = self.master_key_path.with_suffix(".lock")
|
||||||
|
lock_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(lock_path, "w") as lock_file:
|
||||||
|
if sys.platform == "win32":
|
||||||
|
import msvcrt
|
||||||
|
msvcrt.locking(lock_file.fileno(), msvcrt.LK_LOCK, 1)
|
||||||
|
else:
|
||||||
|
fcntl.flock(lock_file.fileno(), fcntl.LOCK_EX)
|
||||||
|
try:
|
||||||
if self.master_key_path.exists():
|
if self.master_key_path.exists():
|
||||||
try:
|
try:
|
||||||
return base64.b64decode(self.master_key_path.read_text().strip())
|
return base64.b64decode(self.master_key_path.read_text().strip())
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
raise EncryptionError(f"Failed to load master key: {exc}") from exc
|
raise EncryptionError(f"Failed to load master key: {exc}") from exc
|
||||||
|
|
||||||
key = secrets.token_bytes(32)
|
key = secrets.token_bytes(32)
|
||||||
try:
|
try:
|
||||||
self.master_key_path.parent.mkdir(parents=True, exist_ok=True)
|
|
||||||
self.master_key_path.write_text(base64.b64encode(key).decode())
|
self.master_key_path.write_text(base64.b64encode(key).decode())
|
||||||
|
_set_secure_file_permissions(self.master_key_path)
|
||||||
except OSError as exc:
|
except OSError as exc:
|
||||||
raise EncryptionError(f"Failed to save master key: {exc}") from exc
|
raise EncryptionError(f"Failed to save master key: {exc}") from exc
|
||||||
return key
|
return key
|
||||||
|
finally:
|
||||||
|
if sys.platform == "win32":
|
||||||
|
import msvcrt
|
||||||
|
msvcrt.locking(lock_file.fileno(), msvcrt.LK_UNLCK, 1)
|
||||||
|
else:
|
||||||
|
fcntl.flock(lock_file.fileno(), fcntl.LOCK_UN)
|
||||||
|
except OSError as exc:
|
||||||
|
raise EncryptionError(f"Failed to acquire lock for master key: {exc}") from exc
|
||||||
|
|
||||||
|
DATA_KEY_AAD = b'{"purpose":"data_key","version":1}'
|
||||||
|
|
||||||
def _encrypt_data_key(self, data_key: bytes) -> bytes:
|
def _encrypt_data_key(self, data_key: bytes) -> bytes:
|
||||||
"""Encrypt the data key with the master key."""
|
"""Encrypt the data key with the master key."""
|
||||||
aesgcm = AESGCM(self.master_key)
|
aesgcm = AESGCM(self.master_key)
|
||||||
nonce = secrets.token_bytes(12)
|
nonce = secrets.token_bytes(12)
|
||||||
encrypted = aesgcm.encrypt(nonce, data_key, None)
|
encrypted = aesgcm.encrypt(nonce, data_key, self.DATA_KEY_AAD)
|
||||||
return nonce + encrypted
|
return nonce + encrypted
|
||||||
|
|
||||||
def _decrypt_data_key(self, encrypted_data_key: bytes) -> bytes:
|
def _decrypt_data_key(self, encrypted_data_key: bytes) -> bytes:
|
||||||
@@ -128,11 +189,18 @@ class LocalKeyEncryption(EncryptionProvider):
|
|||||||
aesgcm = AESGCM(self.master_key)
|
aesgcm = AESGCM(self.master_key)
|
||||||
nonce = encrypted_data_key[:12]
|
nonce = encrypted_data_key[:12]
|
||||||
ciphertext = encrypted_data_key[12:]
|
ciphertext = encrypted_data_key[12:]
|
||||||
|
try:
|
||||||
|
return aesgcm.decrypt(nonce, ciphertext, self.DATA_KEY_AAD)
|
||||||
|
except Exception:
|
||||||
try:
|
try:
|
||||||
return aesgcm.decrypt(nonce, ciphertext, None)
|
return aesgcm.decrypt(nonce, ciphertext, None)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
raise EncryptionError(f"Failed to decrypt data key: {exc}") from exc
|
raise EncryptionError(f"Failed to decrypt data key: {exc}") from exc
|
||||||
|
|
||||||
|
def decrypt_data_key(self, encrypted_data_key: bytes, key_id: str | None = None) -> bytes:
|
||||||
|
"""Decrypt an encrypted data key (key_id ignored for local encryption)."""
|
||||||
|
return self._decrypt_data_key(encrypted_data_key)
|
||||||
|
|
||||||
def generate_data_key(self) -> tuple[bytes, bytes]:
|
def generate_data_key(self) -> tuple[bytes, bytes]:
|
||||||
"""Generate a data key and its encrypted form."""
|
"""Generate a data key and its encrypted form."""
|
||||||
plaintext_key = secrets.token_bytes(32)
|
plaintext_key = secrets.token_bytes(32)
|
||||||
@@ -145,7 +213,8 @@ class LocalKeyEncryption(EncryptionProvider):
|
|||||||
|
|
||||||
aesgcm = AESGCM(data_key)
|
aesgcm = AESGCM(data_key)
|
||||||
nonce = secrets.token_bytes(12)
|
nonce = secrets.token_bytes(12)
|
||||||
ciphertext = aesgcm.encrypt(nonce, plaintext, None)
|
aad = json.dumps(context, sort_keys=True).encode() if context else None
|
||||||
|
ciphertext = aesgcm.encrypt(nonce, plaintext, aad)
|
||||||
|
|
||||||
return EncryptionResult(
|
return EncryptionResult(
|
||||||
ciphertext=ciphertext,
|
ciphertext=ciphertext,
|
||||||
@@ -159,10 +228,11 @@ class LocalKeyEncryption(EncryptionProvider):
|
|||||||
"""Decrypt data using envelope encryption."""
|
"""Decrypt data using envelope encryption."""
|
||||||
data_key = self._decrypt_data_key(encrypted_data_key)
|
data_key = self._decrypt_data_key(encrypted_data_key)
|
||||||
aesgcm = AESGCM(data_key)
|
aesgcm = AESGCM(data_key)
|
||||||
|
aad = json.dumps(context, sort_keys=True).encode() if context else None
|
||||||
try:
|
try:
|
||||||
return aesgcm.decrypt(nonce, ciphertext, None)
|
return aesgcm.decrypt(nonce, ciphertext, aad)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
raise EncryptionError(f"Failed to decrypt data: {exc}") from exc
|
raise EncryptionError("Failed to decrypt data") from exc
|
||||||
|
|
||||||
|
|
||||||
class StreamingEncryptor:
|
class StreamingEncryptor:
|
||||||
@@ -180,12 +250,14 @@ class StreamingEncryptor:
|
|||||||
self.chunk_size = chunk_size
|
self.chunk_size = chunk_size
|
||||||
|
|
||||||
def _derive_chunk_nonce(self, base_nonce: bytes, chunk_index: int) -> bytes:
|
def _derive_chunk_nonce(self, base_nonce: bytes, chunk_index: int) -> bytes:
|
||||||
"""Derive a unique nonce for each chunk.
|
"""Derive a unique nonce for each chunk using HKDF."""
|
||||||
|
hkdf = HKDF(
|
||||||
Performance: Use direct byte manipulation instead of full int conversion.
|
algorithm=hashes.SHA256(),
|
||||||
"""
|
length=12,
|
||||||
# Performance: Only modify last 4 bytes instead of full 12-byte conversion
|
salt=base_nonce,
|
||||||
return base_nonce[:8] + (chunk_index ^ int.from_bytes(base_nonce[8:], "big")).to_bytes(4, "big")
|
info=chunk_index.to_bytes(4, "big"),
|
||||||
|
)
|
||||||
|
return hkdf.derive(b"chunk_nonce")
|
||||||
|
|
||||||
def encrypt_stream(self, stream: BinaryIO,
|
def encrypt_stream(self, stream: BinaryIO,
|
||||||
context: Dict[str, str] | None = None) -> tuple[BinaryIO, EncryptionMetadata]:
|
context: Dict[str, str] | None = None) -> tuple[BinaryIO, EncryptionMetadata]:
|
||||||
@@ -234,10 +306,7 @@ class StreamingEncryptor:
|
|||||||
|
|
||||||
Performance: Writes chunks directly to output buffer instead of accumulating in list.
|
Performance: Writes chunks directly to output buffer instead of accumulating in list.
|
||||||
"""
|
"""
|
||||||
if isinstance(self.provider, LocalKeyEncryption):
|
data_key = self.provider.decrypt_data_key(metadata.encrypted_data_key, metadata.key_id)
|
||||||
data_key = self.provider._decrypt_data_key(metadata.encrypted_data_key)
|
|
||||||
else:
|
|
||||||
raise EncryptionError("Unsupported provider for streaming decryption")
|
|
||||||
|
|
||||||
aesgcm = AESGCM(data_key)
|
aesgcm = AESGCM(data_key)
|
||||||
base_nonce = metadata.nonce
|
base_nonce = metadata.nonce
|
||||||
@@ -310,7 +379,8 @@ class EncryptionManager:
|
|||||||
|
|
||||||
def get_streaming_encryptor(self) -> StreamingEncryptor:
|
def get_streaming_encryptor(self) -> StreamingEncryptor:
|
||||||
if self._streaming_encryptor is None:
|
if self._streaming_encryptor is None:
|
||||||
self._streaming_encryptor = StreamingEncryptor(self.get_local_provider())
|
chunk_size = self.config.get("encryption_chunk_size_bytes", 64 * 1024)
|
||||||
|
self._streaming_encryptor = StreamingEncryptor(self.get_local_provider(), chunk_size=chunk_size)
|
||||||
return self._streaming_encryptor
|
return self._streaming_encryptor
|
||||||
|
|
||||||
def encrypt_object(self, data: bytes, algorithm: str = "AES256",
|
def encrypt_object(self, data: bytes, algorithm: str = "AES256",
|
||||||
@@ -403,7 +473,8 @@ class SSECEncryption(EncryptionProvider):
|
|||||||
def encrypt(self, plaintext: bytes, context: Dict[str, str] | None = None) -> EncryptionResult:
|
def encrypt(self, plaintext: bytes, context: Dict[str, str] | None = None) -> EncryptionResult:
|
||||||
aesgcm = AESGCM(self.customer_key)
|
aesgcm = AESGCM(self.customer_key)
|
||||||
nonce = secrets.token_bytes(12)
|
nonce = secrets.token_bytes(12)
|
||||||
ciphertext = aesgcm.encrypt(nonce, plaintext, None)
|
aad = json.dumps(context, sort_keys=True).encode() if context else None
|
||||||
|
ciphertext = aesgcm.encrypt(nonce, plaintext, aad)
|
||||||
|
|
||||||
return EncryptionResult(
|
return EncryptionResult(
|
||||||
ciphertext=ciphertext,
|
ciphertext=ciphertext,
|
||||||
@@ -415,10 +486,11 @@ class SSECEncryption(EncryptionProvider):
|
|||||||
def decrypt(self, ciphertext: bytes, nonce: bytes, encrypted_data_key: bytes,
|
def decrypt(self, ciphertext: bytes, nonce: bytes, encrypted_data_key: bytes,
|
||||||
key_id: str, context: Dict[str, str] | None = None) -> bytes:
|
key_id: str, context: Dict[str, str] | None = None) -> bytes:
|
||||||
aesgcm = AESGCM(self.customer_key)
|
aesgcm = AESGCM(self.customer_key)
|
||||||
|
aad = json.dumps(context, sort_keys=True).encode() if context else None
|
||||||
try:
|
try:
|
||||||
return aesgcm.decrypt(nonce, ciphertext, None)
|
return aesgcm.decrypt(nonce, ciphertext, aad)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
raise EncryptionError(f"SSE-C decryption failed: {exc}") from exc
|
raise EncryptionError("SSE-C decryption failed") from exc
|
||||||
|
|
||||||
def generate_data_key(self) -> tuple[bytes, bytes]:
|
def generate_data_key(self) -> tuple[bytes, bytes]:
|
||||||
return self.customer_key, b""
|
return self.customer_key, b""
|
||||||
@@ -472,7 +544,7 @@ class ClientEncryptionHelper:
|
|||||||
}
|
}
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def encrypt_with_key(plaintext: bytes, key_b64: str) -> Dict[str, str]:
|
def encrypt_with_key(plaintext: bytes, key_b64: str, context: Dict[str, str] | None = None) -> Dict[str, str]:
|
||||||
"""Encrypt data with a client-provided key."""
|
"""Encrypt data with a client-provided key."""
|
||||||
key = base64.b64decode(key_b64)
|
key = base64.b64decode(key_b64)
|
||||||
if len(key) != 32:
|
if len(key) != 32:
|
||||||
@@ -480,7 +552,8 @@ class ClientEncryptionHelper:
|
|||||||
|
|
||||||
aesgcm = AESGCM(key)
|
aesgcm = AESGCM(key)
|
||||||
nonce = secrets.token_bytes(12)
|
nonce = secrets.token_bytes(12)
|
||||||
ciphertext = aesgcm.encrypt(nonce, plaintext, None)
|
aad = json.dumps(context, sort_keys=True).encode() if context else None
|
||||||
|
ciphertext = aesgcm.encrypt(nonce, plaintext, aad)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"ciphertext": base64.b64encode(ciphertext).decode(),
|
"ciphertext": base64.b64encode(ciphertext).decode(),
|
||||||
@@ -489,7 +562,7 @@ class ClientEncryptionHelper:
|
|||||||
}
|
}
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def decrypt_with_key(ciphertext_b64: str, nonce_b64: str, key_b64: str) -> bytes:
|
def decrypt_with_key(ciphertext_b64: str, nonce_b64: str, key_b64: str, context: Dict[str, str] | None = None) -> bytes:
|
||||||
"""Decrypt data with a client-provided key."""
|
"""Decrypt data with a client-provided key."""
|
||||||
key = base64.b64decode(key_b64)
|
key = base64.b64decode(key_b64)
|
||||||
nonce = base64.b64decode(nonce_b64)
|
nonce = base64.b64decode(nonce_b64)
|
||||||
@@ -499,7 +572,8 @@ class ClientEncryptionHelper:
|
|||||||
raise EncryptionError("Key must be 256 bits (32 bytes)")
|
raise EncryptionError("Key must be 256 bits (32 bytes)")
|
||||||
|
|
||||||
aesgcm = AESGCM(key)
|
aesgcm = AESGCM(key)
|
||||||
|
aad = json.dumps(context, sort_keys=True).encode() if context else None
|
||||||
try:
|
try:
|
||||||
return aesgcm.decrypt(nonce, ciphertext, None)
|
return aesgcm.decrypt(nonce, ciphertext, aad)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
raise EncryptionError(f"Decryption failed: {exc}") from exc
|
raise EncryptionError("Decryption failed") from exc
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ from typing import Optional, Dict, Any
|
|||||||
from xml.etree.ElementTree import Element, SubElement, tostring
|
from xml.etree.ElementTree import Element, SubElement, tostring
|
||||||
|
|
||||||
from flask import Response, jsonify, request, flash, redirect, url_for, g
|
from flask import Response, jsonify, request, flash, redirect, url_for, g
|
||||||
|
from flask_limiter import RateLimitExceeded
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -172,9 +173,21 @@ def handle_app_error(error: AppError) -> Response:
|
|||||||
return error.to_xml_response()
|
return error.to_xml_response()
|
||||||
|
|
||||||
|
|
||||||
|
def handle_rate_limit_exceeded(e: RateLimitExceeded) -> Response:
|
||||||
|
g.s3_error_code = "SlowDown"
|
||||||
|
error = Element("Error")
|
||||||
|
SubElement(error, "Code").text = "SlowDown"
|
||||||
|
SubElement(error, "Message").text = "Please reduce your request rate."
|
||||||
|
SubElement(error, "Resource").text = request.path
|
||||||
|
SubElement(error, "RequestId").text = getattr(g, "request_id", "")
|
||||||
|
xml_bytes = tostring(error, encoding="utf-8")
|
||||||
|
return Response(xml_bytes, status=429, mimetype="application/xml")
|
||||||
|
|
||||||
|
|
||||||
def register_error_handlers(app):
|
def register_error_handlers(app):
|
||||||
"""Register error handlers with a Flask app."""
|
"""Register error handlers with a Flask app."""
|
||||||
app.register_error_handler(AppError, handle_app_error)
|
app.register_error_handler(AppError, handle_app_error)
|
||||||
|
app.register_error_handler(RateLimitExceeded, handle_rate_limit_exceeded)
|
||||||
|
|
||||||
for error_class in [
|
for error_class in [
|
||||||
BucketNotFoundError, BucketAlreadyExistsError, BucketNotEmptyError,
|
BucketNotFoundError, BucketAlreadyExistsError, BucketNotEmptyError,
|
||||||
|
|||||||
133
app/iam.py
133
app/iam.py
@@ -1,9 +1,12 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import hashlib
|
||||||
import hmac
|
import hmac
|
||||||
import json
|
import json
|
||||||
import math
|
import math
|
||||||
|
import os
|
||||||
import secrets
|
import secrets
|
||||||
|
import threading
|
||||||
import time
|
import time
|
||||||
from collections import deque
|
from collections import deque
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
@@ -118,12 +121,15 @@ class IamService:
|
|||||||
self._raw_config: Dict[str, Any] = {}
|
self._raw_config: Dict[str, Any] = {}
|
||||||
self._failed_attempts: Dict[str, Deque[datetime]] = {}
|
self._failed_attempts: Dict[str, Deque[datetime]] = {}
|
||||||
self._last_load_time = 0.0
|
self._last_load_time = 0.0
|
||||||
self._credential_cache: Dict[str, Tuple[str, Principal, float]] = {}
|
self._principal_cache: Dict[str, Tuple[Principal, float]] = {}
|
||||||
self._cache_ttl = 60.0
|
self._secret_key_cache: Dict[str, Tuple[str, float]] = {}
|
||||||
|
self._cache_ttl = float(os.environ.get("IAM_CACHE_TTL_SECONDS", "5.0"))
|
||||||
self._last_stat_check = 0.0
|
self._last_stat_check = 0.0
|
||||||
self._stat_check_interval = 1.0
|
self._stat_check_interval = 1.0
|
||||||
self._sessions: Dict[str, Dict[str, Any]] = {}
|
self._sessions: Dict[str, Dict[str, Any]] = {}
|
||||||
|
self._session_lock = threading.Lock()
|
||||||
self._load()
|
self._load()
|
||||||
|
self._load_lockout_state()
|
||||||
|
|
||||||
def _maybe_reload(self) -> None:
|
def _maybe_reload(self) -> None:
|
||||||
"""Reload configuration if the file has changed on disk."""
|
"""Reload configuration if the file has changed on disk."""
|
||||||
@@ -134,7 +140,8 @@ class IamService:
|
|||||||
try:
|
try:
|
||||||
if self.config_path.stat().st_mtime > self._last_load_time:
|
if self.config_path.stat().st_mtime > self._last_load_time:
|
||||||
self._load()
|
self._load()
|
||||||
self._credential_cache.clear()
|
self._principal_cache.clear()
|
||||||
|
self._secret_key_cache.clear()
|
||||||
except OSError:
|
except OSError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@@ -150,7 +157,8 @@ class IamService:
|
|||||||
f"Access temporarily locked. Try again in {seconds} seconds."
|
f"Access temporarily locked. Try again in {seconds} seconds."
|
||||||
)
|
)
|
||||||
record = self._users.get(access_key)
|
record = self._users.get(access_key)
|
||||||
if not record or not hmac.compare_digest(record["secret_key"], secret_key):
|
stored_secret = record["secret_key"] if record else secrets.token_urlsafe(24)
|
||||||
|
if not record or not hmac.compare_digest(stored_secret, secret_key):
|
||||||
self._record_failed_attempt(access_key)
|
self._record_failed_attempt(access_key)
|
||||||
raise IamError("Invalid credentials")
|
raise IamError("Invalid credentials")
|
||||||
self._clear_failed_attempts(access_key)
|
self._clear_failed_attempts(access_key)
|
||||||
@@ -162,11 +170,46 @@ class IamService:
|
|||||||
attempts = self._failed_attempts.setdefault(access_key, deque())
|
attempts = self._failed_attempts.setdefault(access_key, deque())
|
||||||
self._prune_attempts(attempts)
|
self._prune_attempts(attempts)
|
||||||
attempts.append(datetime.now(timezone.utc))
|
attempts.append(datetime.now(timezone.utc))
|
||||||
|
self._save_lockout_state()
|
||||||
|
|
||||||
def _clear_failed_attempts(self, access_key: str) -> None:
|
def _clear_failed_attempts(self, access_key: str) -> None:
|
||||||
if not access_key:
|
if not access_key:
|
||||||
return
|
return
|
||||||
self._failed_attempts.pop(access_key, None)
|
if self._failed_attempts.pop(access_key, None) is not None:
|
||||||
|
self._save_lockout_state()
|
||||||
|
|
||||||
|
def _lockout_file(self) -> Path:
|
||||||
|
return self.config_path.parent / "lockout_state.json"
|
||||||
|
|
||||||
|
def _load_lockout_state(self) -> None:
|
||||||
|
"""Load lockout state from disk."""
|
||||||
|
try:
|
||||||
|
if self._lockout_file().exists():
|
||||||
|
data = json.loads(self._lockout_file().read_text(encoding="utf-8"))
|
||||||
|
cutoff = datetime.now(timezone.utc) - self.auth_lockout_window
|
||||||
|
for key, timestamps in data.get("failed_attempts", {}).items():
|
||||||
|
valid = []
|
||||||
|
for ts in timestamps:
|
||||||
|
try:
|
||||||
|
dt = datetime.fromisoformat(ts)
|
||||||
|
if dt > cutoff:
|
||||||
|
valid.append(dt)
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
continue
|
||||||
|
if valid:
|
||||||
|
self._failed_attempts[key] = deque(valid)
|
||||||
|
except (OSError, json.JSONDecodeError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def _save_lockout_state(self) -> None:
|
||||||
|
"""Persist lockout state to disk."""
|
||||||
|
data: Dict[str, Any] = {"failed_attempts": {}}
|
||||||
|
for key, attempts in self._failed_attempts.items():
|
||||||
|
data["failed_attempts"][key] = [ts.isoformat() for ts in attempts]
|
||||||
|
try:
|
||||||
|
self._lockout_file().write_text(json.dumps(data), encoding="utf-8")
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
|
||||||
def _prune_attempts(self, attempts: Deque[datetime]) -> None:
|
def _prune_attempts(self, attempts: Deque[datetime]) -> None:
|
||||||
cutoff = datetime.now(timezone.utc) - self.auth_lockout_window
|
cutoff = datetime.now(timezone.utc) - self.auth_lockout_window
|
||||||
@@ -209,14 +252,21 @@ class IamService:
|
|||||||
return token
|
return token
|
||||||
|
|
||||||
def validate_session_token(self, access_key: str, session_token: str) -> bool:
|
def validate_session_token(self, access_key: str, session_token: str) -> bool:
|
||||||
"""Validate a session token for an access key."""
|
"""Validate a session token for an access key (thread-safe, constant-time)."""
|
||||||
|
dummy_key = secrets.token_urlsafe(16)
|
||||||
|
dummy_token = secrets.token_urlsafe(32)
|
||||||
|
with self._session_lock:
|
||||||
session = self._sessions.get(session_token)
|
session = self._sessions.get(session_token)
|
||||||
if not session:
|
if not session:
|
||||||
|
hmac.compare_digest(access_key, dummy_key)
|
||||||
|
hmac.compare_digest(session_token, dummy_token)
|
||||||
return False
|
return False
|
||||||
if session["access_key"] != access_key:
|
key_match = hmac.compare_digest(session["access_key"], access_key)
|
||||||
|
if not key_match:
|
||||||
|
hmac.compare_digest(session_token, dummy_token)
|
||||||
return False
|
return False
|
||||||
if time.time() > session["expires_at"]:
|
if time.time() > session["expires_at"]:
|
||||||
del self._sessions[session_token]
|
self._sessions.pop(session_token, None)
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@@ -229,9 +279,9 @@ class IamService:
|
|||||||
|
|
||||||
def principal_for_key(self, access_key: str) -> Principal:
|
def principal_for_key(self, access_key: str) -> Principal:
|
||||||
now = time.time()
|
now = time.time()
|
||||||
cached = self._credential_cache.get(access_key)
|
cached = self._principal_cache.get(access_key)
|
||||||
if cached:
|
if cached:
|
||||||
secret, principal, cached_time = cached
|
principal, cached_time = cached
|
||||||
if now - cached_time < self._cache_ttl:
|
if now - cached_time < self._cache_ttl:
|
||||||
return principal
|
return principal
|
||||||
|
|
||||||
@@ -240,23 +290,14 @@ class IamService:
|
|||||||
if not record:
|
if not record:
|
||||||
raise IamError("Unknown access key")
|
raise IamError("Unknown access key")
|
||||||
principal = self._build_principal(access_key, record)
|
principal = self._build_principal(access_key, record)
|
||||||
self._credential_cache[access_key] = (record["secret_key"], principal, now)
|
self._principal_cache[access_key] = (principal, now)
|
||||||
return principal
|
return principal
|
||||||
|
|
||||||
def secret_for_key(self, access_key: str) -> str:
|
def secret_for_key(self, access_key: str) -> str:
|
||||||
now = time.time()
|
|
||||||
cached = self._credential_cache.get(access_key)
|
|
||||||
if cached:
|
|
||||||
secret, principal, cached_time = cached
|
|
||||||
if now - cached_time < self._cache_ttl:
|
|
||||||
return secret
|
|
||||||
|
|
||||||
self._maybe_reload()
|
self._maybe_reload()
|
||||||
record = self._users.get(access_key)
|
record = self._users.get(access_key)
|
||||||
if not record:
|
if not record:
|
||||||
raise IamError("Unknown access key")
|
raise IamError("Unknown access key")
|
||||||
principal = self._build_principal(access_key, record)
|
|
||||||
self._credential_cache[access_key] = (record["secret_key"], principal, now)
|
|
||||||
return record["secret_key"]
|
return record["secret_key"]
|
||||||
|
|
||||||
def authorize(self, principal: Principal, bucket_name: str | None, action: str) -> None:
|
def authorize(self, principal: Principal, bucket_name: str | None, action: str) -> None:
|
||||||
@@ -268,6 +309,18 @@ class IamService:
|
|||||||
if not self._is_allowed(principal, normalized, action):
|
if not self._is_allowed(principal, normalized, action):
|
||||||
raise IamError(f"Access denied for action '{action}' on bucket '{bucket_name}'")
|
raise IamError(f"Access denied for action '{action}' on bucket '{bucket_name}'")
|
||||||
|
|
||||||
|
def check_permissions(self, principal: Principal, bucket_name: str | None, actions: Iterable[str]) -> Dict[str, bool]:
|
||||||
|
self._maybe_reload()
|
||||||
|
bucket_name = (bucket_name or "*").lower() if bucket_name != "*" else (bucket_name or "*")
|
||||||
|
normalized_actions = {a: self._normalize_action(a) for a in actions}
|
||||||
|
results: Dict[str, bool] = {}
|
||||||
|
for original, canonical in normalized_actions.items():
|
||||||
|
if canonical not in ALLOWED_ACTIONS:
|
||||||
|
results[original] = False
|
||||||
|
else:
|
||||||
|
results[original] = self._is_allowed(principal, bucket_name, canonical)
|
||||||
|
return results
|
||||||
|
|
||||||
def buckets_for_principal(self, principal: Principal, buckets: Iterable[str]) -> List[str]:
|
def buckets_for_principal(self, principal: Principal, buckets: Iterable[str]) -> List[str]:
|
||||||
return [bucket for bucket in buckets if self._is_allowed(principal, bucket, "list")]
|
return [bucket for bucket in buckets if self._is_allowed(principal, bucket, "list")]
|
||||||
|
|
||||||
@@ -328,6 +381,10 @@ class IamService:
|
|||||||
new_secret = self._generate_secret_key()
|
new_secret = self._generate_secret_key()
|
||||||
user["secret_key"] = new_secret
|
user["secret_key"] = new_secret
|
||||||
self._save()
|
self._save()
|
||||||
|
self._principal_cache.pop(access_key, None)
|
||||||
|
self._secret_key_cache.pop(access_key, None)
|
||||||
|
from .s3_api import clear_signing_key_cache
|
||||||
|
clear_signing_key_cache()
|
||||||
self._load()
|
self._load()
|
||||||
return new_secret
|
return new_secret
|
||||||
|
|
||||||
@@ -346,6 +403,10 @@ class IamService:
|
|||||||
raise IamError("User not found")
|
raise IamError("User not found")
|
||||||
self._raw_config["users"] = remaining
|
self._raw_config["users"] = remaining
|
||||||
self._save()
|
self._save()
|
||||||
|
self._principal_cache.pop(access_key, None)
|
||||||
|
self._secret_key_cache.pop(access_key, None)
|
||||||
|
from .s3_api import clear_signing_key_cache
|
||||||
|
clear_signing_key_cache()
|
||||||
self._load()
|
self._load()
|
||||||
|
|
||||||
def update_user_policies(self, access_key: str, policies: Sequence[Dict[str, Any]]) -> None:
|
def update_user_policies(self, access_key: str, policies: Sequence[Dict[str, Any]]) -> None:
|
||||||
@@ -480,11 +541,13 @@ class IamService:
|
|||||||
return candidate if candidate in ALLOWED_ACTIONS else ""
|
return candidate if candidate in ALLOWED_ACTIONS else ""
|
||||||
|
|
||||||
def _write_default(self) -> None:
|
def _write_default(self) -> None:
|
||||||
|
access_key = secrets.token_hex(12)
|
||||||
|
secret_key = secrets.token_urlsafe(32)
|
||||||
default = {
|
default = {
|
||||||
"users": [
|
"users": [
|
||||||
{
|
{
|
||||||
"access_key": "localadmin",
|
"access_key": access_key,
|
||||||
"secret_key": "localadmin",
|
"secret_key": secret_key,
|
||||||
"display_name": "Local Admin",
|
"display_name": "Local Admin",
|
||||||
"policies": [
|
"policies": [
|
||||||
{"bucket": "*", "actions": list(ALLOWED_ACTIONS)}
|
{"bucket": "*", "actions": list(ALLOWED_ACTIONS)}
|
||||||
@@ -493,6 +556,14 @@ class IamService:
|
|||||||
]
|
]
|
||||||
}
|
}
|
||||||
self.config_path.write_text(json.dumps(default, indent=2))
|
self.config_path.write_text(json.dumps(default, indent=2))
|
||||||
|
print(f"\n{'='*60}")
|
||||||
|
print("MYFSIO FIRST RUN - ADMIN CREDENTIALS GENERATED")
|
||||||
|
print(f"{'='*60}")
|
||||||
|
print(f"Access Key: {access_key}")
|
||||||
|
print(f"Secret Key: {secret_key}")
|
||||||
|
print(f"{'='*60}")
|
||||||
|
print(f"Missed this? Check: {self.config_path}")
|
||||||
|
print(f"{'='*60}\n")
|
||||||
|
|
||||||
def _generate_access_key(self) -> str:
|
def _generate_access_key(self) -> str:
|
||||||
return secrets.token_hex(8)
|
return secrets.token_hex(8)
|
||||||
@@ -508,25 +579,25 @@ class IamService:
|
|||||||
|
|
||||||
def get_secret_key(self, access_key: str) -> str | None:
|
def get_secret_key(self, access_key: str) -> str | None:
|
||||||
now = time.time()
|
now = time.time()
|
||||||
cached = self._credential_cache.get(access_key)
|
cached = self._secret_key_cache.get(access_key)
|
||||||
if cached:
|
if cached:
|
||||||
secret, principal, cached_time = cached
|
secret_key, cached_time = cached
|
||||||
if now - cached_time < self._cache_ttl:
|
if now - cached_time < self._cache_ttl:
|
||||||
return secret
|
return secret_key
|
||||||
|
|
||||||
self._maybe_reload()
|
self._maybe_reload()
|
||||||
record = self._users.get(access_key)
|
record = self._users.get(access_key)
|
||||||
if record:
|
if record:
|
||||||
principal = self._build_principal(access_key, record)
|
secret_key = record["secret_key"]
|
||||||
self._credential_cache[access_key] = (record["secret_key"], principal, now)
|
self._secret_key_cache[access_key] = (secret_key, now)
|
||||||
return record["secret_key"]
|
return secret_key
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def get_principal(self, access_key: str) -> Principal | None:
|
def get_principal(self, access_key: str) -> Principal | None:
|
||||||
now = time.time()
|
now = time.time()
|
||||||
cached = self._credential_cache.get(access_key)
|
cached = self._principal_cache.get(access_key)
|
||||||
if cached:
|
if cached:
|
||||||
secret, principal, cached_time = cached
|
principal, cached_time = cached
|
||||||
if now - cached_time < self._cache_ttl:
|
if now - cached_time < self._cache_ttl:
|
||||||
return principal
|
return principal
|
||||||
|
|
||||||
@@ -534,6 +605,6 @@ class IamService:
|
|||||||
record = self._users.get(access_key)
|
record = self._users.get(access_key)
|
||||||
if record:
|
if record:
|
||||||
principal = self._build_principal(access_key, record)
|
principal = self._build_principal(access_key, record)
|
||||||
self._credential_cache[access_key] = (record["secret_key"], principal, now)
|
self._principal_cache[access_key] = (principal, now)
|
||||||
return principal
|
return principal
|
||||||
return None
|
return None
|
||||||
|
|||||||
135
app/kms.py
135
app/kms.py
@@ -2,7 +2,11 @@ from __future__ import annotations
|
|||||||
|
|
||||||
import base64
|
import base64
|
||||||
import json
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
import secrets
|
import secrets
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
import uuid
|
import uuid
|
||||||
from dataclasses import dataclass, field
|
from dataclasses import dataclass, field
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
@@ -13,6 +17,30 @@ from cryptography.hazmat.primitives.ciphers.aead import AESGCM
|
|||||||
|
|
||||||
from .encryption import EncryptionError, EncryptionProvider, EncryptionResult
|
from .encryption import EncryptionError, EncryptionProvider, EncryptionResult
|
||||||
|
|
||||||
|
if sys.platform != "win32":
|
||||||
|
import fcntl
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def _set_secure_file_permissions(file_path: Path) -> None:
|
||||||
|
"""Set restrictive file permissions (owner read/write only)."""
|
||||||
|
if sys.platform == "win32":
|
||||||
|
try:
|
||||||
|
username = os.environ.get("USERNAME", "")
|
||||||
|
if username:
|
||||||
|
subprocess.run(
|
||||||
|
["icacls", str(file_path), "/inheritance:r",
|
||||||
|
"/grant:r", f"{username}:F"],
|
||||||
|
check=True, capture_output=True
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.warning("Could not set secure permissions on %s: USERNAME not set", file_path)
|
||||||
|
except (subprocess.SubprocessError, OSError) as exc:
|
||||||
|
logger.warning("Failed to set secure permissions on %s: %s", file_path, exc)
|
||||||
|
else:
|
||||||
|
os.chmod(file_path, 0o600)
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class KMSKey:
|
class KMSKey:
|
||||||
@@ -78,7 +106,7 @@ class KMSEncryptionProvider(EncryptionProvider):
|
|||||||
aesgcm = AESGCM(data_key)
|
aesgcm = AESGCM(data_key)
|
||||||
nonce = secrets.token_bytes(12)
|
nonce = secrets.token_bytes(12)
|
||||||
ciphertext = aesgcm.encrypt(nonce, plaintext,
|
ciphertext = aesgcm.encrypt(nonce, plaintext,
|
||||||
json.dumps(context).encode() if context else None)
|
json.dumps(context, sort_keys=True).encode() if context else None)
|
||||||
|
|
||||||
return EncryptionResult(
|
return EncryptionResult(
|
||||||
ciphertext=ciphertext,
|
ciphertext=ciphertext,
|
||||||
@@ -90,15 +118,26 @@ class KMSEncryptionProvider(EncryptionProvider):
|
|||||||
def decrypt(self, ciphertext: bytes, nonce: bytes, encrypted_data_key: bytes,
|
def decrypt(self, ciphertext: bytes, nonce: bytes, encrypted_data_key: bytes,
|
||||||
key_id: str, context: Dict[str, str] | None = None) -> bytes:
|
key_id: str, context: Dict[str, str] | None = None) -> bytes:
|
||||||
"""Decrypt data using envelope encryption with KMS."""
|
"""Decrypt data using envelope encryption with KMS."""
|
||||||
# Note: Data key is encrypted without context (AAD), so we decrypt without context
|
|
||||||
data_key = self.kms.decrypt_data_key(key_id, encrypted_data_key, context=None)
|
data_key = self.kms.decrypt_data_key(key_id, encrypted_data_key, context=None)
|
||||||
|
if len(data_key) != 32:
|
||||||
|
raise EncryptionError("Invalid data key size")
|
||||||
|
|
||||||
aesgcm = AESGCM(data_key)
|
aesgcm = AESGCM(data_key)
|
||||||
try:
|
try:
|
||||||
return aesgcm.decrypt(nonce, ciphertext,
|
return aesgcm.decrypt(nonce, ciphertext,
|
||||||
json.dumps(context).encode() if context else None)
|
json.dumps(context, sort_keys=True).encode() if context else None)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
raise EncryptionError(f"Failed to decrypt data: {exc}") from exc
|
logger.debug("KMS decryption failed: %s", exc)
|
||||||
|
raise EncryptionError("Failed to decrypt data") from exc
|
||||||
|
|
||||||
|
def decrypt_data_key(self, encrypted_data_key: bytes, key_id: str | None = None) -> bytes:
|
||||||
|
"""Decrypt an encrypted data key using KMS."""
|
||||||
|
if key_id is None:
|
||||||
|
key_id = self.key_id
|
||||||
|
data_key = self.kms.decrypt_data_key(key_id, encrypted_data_key, context=None)
|
||||||
|
if len(data_key) != 32:
|
||||||
|
raise EncryptionError("Invalid data key size")
|
||||||
|
return data_key
|
||||||
|
|
||||||
|
|
||||||
class KMSManager:
|
class KMSManager:
|
||||||
@@ -108,27 +147,52 @@ class KMSManager:
|
|||||||
Keys are stored encrypted on disk.
|
Keys are stored encrypted on disk.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, keys_path: Path, master_key_path: Path):
|
def __init__(
|
||||||
|
self,
|
||||||
|
keys_path: Path,
|
||||||
|
master_key_path: Path,
|
||||||
|
generate_data_key_min_bytes: int = 1,
|
||||||
|
generate_data_key_max_bytes: int = 1024,
|
||||||
|
):
|
||||||
self.keys_path = keys_path
|
self.keys_path = keys_path
|
||||||
self.master_key_path = master_key_path
|
self.master_key_path = master_key_path
|
||||||
|
self.generate_data_key_min_bytes = generate_data_key_min_bytes
|
||||||
|
self.generate_data_key_max_bytes = generate_data_key_max_bytes
|
||||||
self._keys: Dict[str, KMSKey] = {}
|
self._keys: Dict[str, KMSKey] = {}
|
||||||
self._master_key: bytes | None = None
|
self._master_key: bytes | None = None
|
||||||
|
self._master_aesgcm: AESGCM | None = None
|
||||||
self._loaded = False
|
self._loaded = False
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def master_key(self) -> bytes:
|
def master_key(self) -> bytes:
|
||||||
"""Load or create the master key for encrypting KMS keys."""
|
"""Load or create the master key for encrypting KMS keys (with file locking)."""
|
||||||
if self._master_key is None:
|
if self._master_key is None:
|
||||||
|
lock_path = self.master_key_path.with_suffix(".lock")
|
||||||
|
lock_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
with open(lock_path, "w") as lock_file:
|
||||||
|
if sys.platform == "win32":
|
||||||
|
import msvcrt
|
||||||
|
msvcrt.locking(lock_file.fileno(), msvcrt.LK_LOCK, 1)
|
||||||
|
else:
|
||||||
|
fcntl.flock(lock_file.fileno(), fcntl.LOCK_EX)
|
||||||
|
try:
|
||||||
if self.master_key_path.exists():
|
if self.master_key_path.exists():
|
||||||
self._master_key = base64.b64decode(
|
self._master_key = base64.b64decode(
|
||||||
self.master_key_path.read_text().strip()
|
self.master_key_path.read_text().strip()
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
self._master_key = secrets.token_bytes(32)
|
self._master_key = secrets.token_bytes(32)
|
||||||
self.master_key_path.parent.mkdir(parents=True, exist_ok=True)
|
|
||||||
self.master_key_path.write_text(
|
self.master_key_path.write_text(
|
||||||
base64.b64encode(self._master_key).decode()
|
base64.b64encode(self._master_key).decode()
|
||||||
)
|
)
|
||||||
|
_set_secure_file_permissions(self.master_key_path)
|
||||||
|
finally:
|
||||||
|
if sys.platform == "win32":
|
||||||
|
import msvcrt
|
||||||
|
msvcrt.locking(lock_file.fileno(), msvcrt.LK_UNLCK, 1)
|
||||||
|
else:
|
||||||
|
fcntl.flock(lock_file.fileno(), fcntl.LOCK_UN)
|
||||||
|
self._master_aesgcm = AESGCM(self._master_key)
|
||||||
return self._master_key
|
return self._master_key
|
||||||
|
|
||||||
def _load_keys(self) -> None:
|
def _load_keys(self) -> None:
|
||||||
@@ -145,8 +209,10 @@ class KMSManager:
|
|||||||
encrypted = base64.b64decode(key_data["EncryptedKeyMaterial"])
|
encrypted = base64.b64decode(key_data["EncryptedKeyMaterial"])
|
||||||
key.key_material = self._decrypt_key_material(encrypted)
|
key.key_material = self._decrypt_key_material(encrypted)
|
||||||
self._keys[key.key_id] = key
|
self._keys[key.key_id] = key
|
||||||
except Exception:
|
except json.JSONDecodeError as exc:
|
||||||
pass
|
logger.error("Failed to parse KMS keys file: %s", exc)
|
||||||
|
except (ValueError, KeyError) as exc:
|
||||||
|
logger.error("Invalid KMS key data: %s", exc)
|
||||||
|
|
||||||
self._loaded = True
|
self._loaded = True
|
||||||
|
|
||||||
@@ -164,20 +230,19 @@ class KMSManager:
|
|||||||
json.dumps({"keys": keys_data}, indent=2),
|
json.dumps({"keys": keys_data}, indent=2),
|
||||||
encoding="utf-8"
|
encoding="utf-8"
|
||||||
)
|
)
|
||||||
|
_set_secure_file_permissions(self.keys_path)
|
||||||
|
|
||||||
def _encrypt_key_material(self, key_material: bytes) -> bytes:
|
def _encrypt_key_material(self, key_material: bytes) -> bytes:
|
||||||
"""Encrypt key material with the master key."""
|
_ = self.master_key
|
||||||
aesgcm = AESGCM(self.master_key)
|
|
||||||
nonce = secrets.token_bytes(12)
|
nonce = secrets.token_bytes(12)
|
||||||
ciphertext = aesgcm.encrypt(nonce, key_material, None)
|
ciphertext = self._master_aesgcm.encrypt(nonce, key_material, None)
|
||||||
return nonce + ciphertext
|
return nonce + ciphertext
|
||||||
|
|
||||||
def _decrypt_key_material(self, encrypted: bytes) -> bytes:
|
def _decrypt_key_material(self, encrypted: bytes) -> bytes:
|
||||||
"""Decrypt key material with the master key."""
|
_ = self.master_key
|
||||||
aesgcm = AESGCM(self.master_key)
|
|
||||||
nonce = encrypted[:12]
|
nonce = encrypted[:12]
|
||||||
ciphertext = encrypted[12:]
|
ciphertext = encrypted[12:]
|
||||||
return aesgcm.decrypt(nonce, ciphertext, None)
|
return self._master_aesgcm.decrypt(nonce, ciphertext, None)
|
||||||
|
|
||||||
def create_key(self, description: str = "", key_id: str | None = None) -> KMSKey:
|
def create_key(self, description: str = "", key_id: str | None = None) -> KMSKey:
|
||||||
"""Create a new KMS key."""
|
"""Create a new KMS key."""
|
||||||
@@ -269,7 +334,7 @@ class KMSManager:
|
|||||||
|
|
||||||
aesgcm = AESGCM(key.key_material)
|
aesgcm = AESGCM(key.key_material)
|
||||||
nonce = secrets.token_bytes(12)
|
nonce = secrets.token_bytes(12)
|
||||||
aad = json.dumps(context).encode() if context else None
|
aad = json.dumps(context, sort_keys=True).encode() if context else None
|
||||||
ciphertext = aesgcm.encrypt(nonce, plaintext, aad)
|
ciphertext = aesgcm.encrypt(nonce, plaintext, aad)
|
||||||
|
|
||||||
key_id_bytes = key_id.encode("utf-8")
|
key_id_bytes = key_id.encode("utf-8")
|
||||||
@@ -298,17 +363,24 @@ class KMSManager:
|
|||||||
encrypted = rest[12:]
|
encrypted = rest[12:]
|
||||||
|
|
||||||
aesgcm = AESGCM(key.key_material)
|
aesgcm = AESGCM(key.key_material)
|
||||||
aad = json.dumps(context).encode() if context else None
|
aad = json.dumps(context, sort_keys=True).encode() if context else None
|
||||||
try:
|
try:
|
||||||
plaintext = aesgcm.decrypt(nonce, encrypted, aad)
|
plaintext = aesgcm.decrypt(nonce, encrypted, aad)
|
||||||
return plaintext, key_id
|
return plaintext, key_id
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
raise EncryptionError(f"Decryption failed: {exc}") from exc
|
logger.debug("KMS decrypt operation failed: %s", exc)
|
||||||
|
raise EncryptionError("Decryption failed") from exc
|
||||||
|
|
||||||
def generate_data_key(self, key_id: str,
|
def generate_data_key(self, key_id: str,
|
||||||
context: Dict[str, str] | None = None) -> tuple[bytes, bytes]:
|
context: Dict[str, str] | None = None,
|
||||||
|
key_spec: str = "AES_256") -> tuple[bytes, bytes]:
|
||||||
"""Generate a data key and return both plaintext and encrypted versions.
|
"""Generate a data key and return both plaintext and encrypted versions.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
key_id: The KMS key ID to use for encryption
|
||||||
|
context: Optional encryption context
|
||||||
|
key_spec: Key specification - AES_128 or AES_256 (default)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Tuple of (plaintext_key, encrypted_key)
|
Tuple of (plaintext_key, encrypted_key)
|
||||||
"""
|
"""
|
||||||
@@ -319,7 +391,8 @@ class KMSManager:
|
|||||||
if not key.enabled:
|
if not key.enabled:
|
||||||
raise EncryptionError(f"Key is disabled: {key_id}")
|
raise EncryptionError(f"Key is disabled: {key_id}")
|
||||||
|
|
||||||
plaintext_key = secrets.token_bytes(32)
|
key_bytes = 32 if key_spec == "AES_256" else 16
|
||||||
|
plaintext_key = secrets.token_bytes(key_bytes)
|
||||||
|
|
||||||
encrypted_key = self.encrypt(key_id, plaintext_key, context)
|
encrypted_key = self.encrypt(key_id, plaintext_key, context)
|
||||||
|
|
||||||
@@ -331,22 +404,6 @@ class KMSManager:
|
|||||||
plaintext, _ = self.decrypt(encrypted_key, context)
|
plaintext, _ = self.decrypt(encrypted_key, context)
|
||||||
return plaintext
|
return plaintext
|
||||||
|
|
||||||
def get_provider(self, key_id: str | None = None) -> KMSEncryptionProvider:
|
|
||||||
"""Get an encryption provider for a specific key."""
|
|
||||||
self._load_keys()
|
|
||||||
|
|
||||||
if key_id is None:
|
|
||||||
if not self._keys:
|
|
||||||
key = self.create_key("Default KMS Key")
|
|
||||||
key_id = key.key_id
|
|
||||||
else:
|
|
||||||
key_id = next(iter(self._keys.keys()))
|
|
||||||
|
|
||||||
if key_id not in self._keys:
|
|
||||||
raise EncryptionError(f"Key not found: {key_id}")
|
|
||||||
|
|
||||||
return KMSEncryptionProvider(self, key_id)
|
|
||||||
|
|
||||||
def re_encrypt(self, ciphertext: bytes, destination_key_id: str,
|
def re_encrypt(self, ciphertext: bytes, destination_key_id: str,
|
||||||
source_context: Dict[str, str] | None = None,
|
source_context: Dict[str, str] | None = None,
|
||||||
destination_context: Dict[str, str] | None = None) -> bytes:
|
destination_context: Dict[str, str] | None = None) -> bytes:
|
||||||
@@ -358,6 +415,8 @@ class KMSManager:
|
|||||||
|
|
||||||
def generate_random(self, num_bytes: int = 32) -> bytes:
|
def generate_random(self, num_bytes: int = 32) -> bytes:
|
||||||
"""Generate cryptographically secure random bytes."""
|
"""Generate cryptographically secure random bytes."""
|
||||||
if num_bytes < 1 or num_bytes > 1024:
|
if num_bytes < self.generate_data_key_min_bytes or num_bytes > self.generate_data_key_max_bytes:
|
||||||
raise EncryptionError("Number of bytes must be between 1 and 1024")
|
raise EncryptionError(
|
||||||
|
f"Number of bytes must be between {self.generate_data_key_min_bytes} and {self.generate_data_key_max_bytes}"
|
||||||
|
)
|
||||||
return secrets.token_bytes(num_bytes)
|
return secrets.token_bytes(num_bytes)
|
||||||
|
|||||||
@@ -71,10 +71,9 @@ class LifecycleExecutionRecord:
|
|||||||
|
|
||||||
|
|
||||||
class LifecycleHistoryStore:
|
class LifecycleHistoryStore:
|
||||||
MAX_HISTORY_PER_BUCKET = 50
|
def __init__(self, storage_root: Path, max_history_per_bucket: int = 50) -> None:
|
||||||
|
|
||||||
def __init__(self, storage_root: Path) -> None:
|
|
||||||
self.storage_root = storage_root
|
self.storage_root = storage_root
|
||||||
|
self.max_history_per_bucket = max_history_per_bucket
|
||||||
self._lock = threading.Lock()
|
self._lock = threading.Lock()
|
||||||
|
|
||||||
def _get_history_path(self, bucket_name: str) -> Path:
|
def _get_history_path(self, bucket_name: str) -> Path:
|
||||||
@@ -95,7 +94,7 @@ class LifecycleHistoryStore:
|
|||||||
def save_history(self, bucket_name: str, records: List[LifecycleExecutionRecord]) -> None:
|
def save_history(self, bucket_name: str, records: List[LifecycleExecutionRecord]) -> None:
|
||||||
path = self._get_history_path(bucket_name)
|
path = self._get_history_path(bucket_name)
|
||||||
path.parent.mkdir(parents=True, exist_ok=True)
|
path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
data = {"executions": [r.to_dict() for r in records[:self.MAX_HISTORY_PER_BUCKET]]}
|
data = {"executions": [r.to_dict() for r in records[:self.max_history_per_bucket]]}
|
||||||
try:
|
try:
|
||||||
with open(path, "w") as f:
|
with open(path, "w") as f:
|
||||||
json.dump(data, f, indent=2)
|
json.dump(data, f, indent=2)
|
||||||
@@ -114,14 +113,20 @@ class LifecycleHistoryStore:
|
|||||||
|
|
||||||
|
|
||||||
class LifecycleManager:
|
class LifecycleManager:
|
||||||
def __init__(self, storage: ObjectStorage, interval_seconds: int = 3600, storage_root: Optional[Path] = None):
|
def __init__(
|
||||||
|
self,
|
||||||
|
storage: ObjectStorage,
|
||||||
|
interval_seconds: int = 3600,
|
||||||
|
storage_root: Optional[Path] = None,
|
||||||
|
max_history_per_bucket: int = 50,
|
||||||
|
):
|
||||||
self.storage = storage
|
self.storage = storage
|
||||||
self.interval_seconds = interval_seconds
|
self.interval_seconds = interval_seconds
|
||||||
self.storage_root = storage_root
|
self.storage_root = storage_root
|
||||||
self._timer: Optional[threading.Timer] = None
|
self._timer: Optional[threading.Timer] = None
|
||||||
self._shutdown = False
|
self._shutdown = False
|
||||||
self._lock = threading.Lock()
|
self._lock = threading.Lock()
|
||||||
self.history_store = LifecycleHistoryStore(storage_root) if storage_root else None
|
self.history_store = LifecycleHistoryStore(storage_root, max_history_per_bucket) if storage_root else None
|
||||||
|
|
||||||
def start(self) -> None:
|
def start(self) -> None:
|
||||||
if self._timer is not None:
|
if self._timer is not None:
|
||||||
|
|||||||
@@ -1,8 +1,10 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import ipaddress
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import queue
|
import queue
|
||||||
|
import socket
|
||||||
import threading
|
import threading
|
||||||
import time
|
import time
|
||||||
import uuid
|
import uuid
|
||||||
@@ -14,6 +16,48 @@ from urllib.parse import urlparse
|
|||||||
|
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
|
|
||||||
|
def _is_safe_url(url: str, allow_internal: bool = False) -> bool:
|
||||||
|
"""Check if a URL is safe to make requests to (not internal/private).
|
||||||
|
|
||||||
|
Args:
|
||||||
|
url: The URL to check.
|
||||||
|
allow_internal: If True, allows internal/private IP addresses.
|
||||||
|
Use for self-hosted deployments on internal networks.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
parsed = urlparse(url)
|
||||||
|
hostname = parsed.hostname
|
||||||
|
if not hostname:
|
||||||
|
return False
|
||||||
|
cloud_metadata_hosts = {
|
||||||
|
"metadata.google.internal",
|
||||||
|
"169.254.169.254",
|
||||||
|
}
|
||||||
|
if hostname.lower() in cloud_metadata_hosts:
|
||||||
|
return False
|
||||||
|
if allow_internal:
|
||||||
|
return True
|
||||||
|
blocked_hosts = {
|
||||||
|
"localhost",
|
||||||
|
"127.0.0.1",
|
||||||
|
"0.0.0.0",
|
||||||
|
"::1",
|
||||||
|
"[::1]",
|
||||||
|
}
|
||||||
|
if hostname.lower() in blocked_hosts:
|
||||||
|
return False
|
||||||
|
try:
|
||||||
|
resolved_ip = socket.gethostbyname(hostname)
|
||||||
|
ip = ipaddress.ip_address(resolved_ip)
|
||||||
|
if ip.is_private or ip.is_loopback or ip.is_link_local or ip.is_reserved:
|
||||||
|
return False
|
||||||
|
except (socket.gaierror, ValueError):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
except Exception:
|
||||||
|
return False
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
@@ -165,8 +209,9 @@ class NotificationConfiguration:
|
|||||||
|
|
||||||
|
|
||||||
class NotificationService:
|
class NotificationService:
|
||||||
def __init__(self, storage_root: Path, worker_count: int = 2):
|
def __init__(self, storage_root: Path, worker_count: int = 2, allow_internal_endpoints: bool = False):
|
||||||
self.storage_root = storage_root
|
self.storage_root = storage_root
|
||||||
|
self._allow_internal_endpoints = allow_internal_endpoints
|
||||||
self._configs: Dict[str, List[NotificationConfiguration]] = {}
|
self._configs: Dict[str, List[NotificationConfiguration]] = {}
|
||||||
self._queue: queue.Queue[tuple[NotificationEvent, WebhookDestination]] = queue.Queue()
|
self._queue: queue.Queue[tuple[NotificationEvent, WebhookDestination]] = queue.Queue()
|
||||||
self._workers: List[threading.Thread] = []
|
self._workers: List[threading.Thread] = []
|
||||||
@@ -299,6 +344,8 @@ class NotificationService:
|
|||||||
self._queue.task_done()
|
self._queue.task_done()
|
||||||
|
|
||||||
def _send_notification(self, event: NotificationEvent, destination: WebhookDestination) -> None:
|
def _send_notification(self, event: NotificationEvent, destination: WebhookDestination) -> None:
|
||||||
|
if not _is_safe_url(destination.url, allow_internal=self._allow_internal_endpoints):
|
||||||
|
raise RuntimeError(f"Blocked request to cloud metadata service (SSRF protection): {destination.url}")
|
||||||
payload = event.to_s3_event()
|
payload = event.to_s3_event()
|
||||||
headers = {"Content-Type": "application/json", **destination.headers}
|
headers = {"Content-Type": "application/json", **destination.headers}
|
||||||
|
|
||||||
|
|||||||
@@ -21,16 +21,20 @@ from .storage import ObjectStorage, StorageError
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
REPLICATION_USER_AGENT = "S3ReplicationAgent/1.0"
|
REPLICATION_USER_AGENT = "S3ReplicationAgent/1.0"
|
||||||
REPLICATION_CONNECT_TIMEOUT = 5
|
|
||||||
REPLICATION_READ_TIMEOUT = 30
|
|
||||||
STREAMING_THRESHOLD_BYTES = 10 * 1024 * 1024
|
|
||||||
|
|
||||||
REPLICATION_MODE_NEW_ONLY = "new_only"
|
REPLICATION_MODE_NEW_ONLY = "new_only"
|
||||||
REPLICATION_MODE_ALL = "all"
|
REPLICATION_MODE_ALL = "all"
|
||||||
REPLICATION_MODE_BIDIRECTIONAL = "bidirectional"
|
REPLICATION_MODE_BIDIRECTIONAL = "bidirectional"
|
||||||
|
|
||||||
|
|
||||||
def _create_s3_client(connection: RemoteConnection, *, health_check: bool = False) -> Any:
|
def _create_s3_client(
|
||||||
|
connection: RemoteConnection,
|
||||||
|
*,
|
||||||
|
health_check: bool = False,
|
||||||
|
connect_timeout: int = 5,
|
||||||
|
read_timeout: int = 30,
|
||||||
|
max_retries: int = 2,
|
||||||
|
) -> Any:
|
||||||
"""Create a boto3 S3 client for the given connection.
|
"""Create a boto3 S3 client for the given connection.
|
||||||
Args:
|
Args:
|
||||||
connection: Remote S3 connection configuration
|
connection: Remote S3 connection configuration
|
||||||
@@ -38,9 +42,9 @@ def _create_s3_client(connection: RemoteConnection, *, health_check: bool = Fals
|
|||||||
"""
|
"""
|
||||||
config = Config(
|
config = Config(
|
||||||
user_agent_extra=REPLICATION_USER_AGENT,
|
user_agent_extra=REPLICATION_USER_AGENT,
|
||||||
connect_timeout=REPLICATION_CONNECT_TIMEOUT,
|
connect_timeout=connect_timeout,
|
||||||
read_timeout=REPLICATION_READ_TIMEOUT,
|
read_timeout=read_timeout,
|
||||||
retries={'max_attempts': 1 if health_check else 2},
|
retries={'max_attempts': 1 if health_check else max_retries},
|
||||||
signature_version='s3v4',
|
signature_version='s3v4',
|
||||||
s3={'addressing_style': 'path'},
|
s3={'addressing_style': 'path'},
|
||||||
request_checksum_calculation='when_required',
|
request_checksum_calculation='when_required',
|
||||||
@@ -133,6 +137,7 @@ class ReplicationRule:
|
|||||||
stats: ReplicationStats = field(default_factory=ReplicationStats)
|
stats: ReplicationStats = field(default_factory=ReplicationStats)
|
||||||
sync_deletions: bool = True
|
sync_deletions: bool = True
|
||||||
last_pull_at: Optional[float] = None
|
last_pull_at: Optional[float] = None
|
||||||
|
filter_prefix: Optional[str] = None
|
||||||
|
|
||||||
def to_dict(self) -> dict:
|
def to_dict(self) -> dict:
|
||||||
return {
|
return {
|
||||||
@@ -145,6 +150,7 @@ class ReplicationRule:
|
|||||||
"stats": self.stats.to_dict(),
|
"stats": self.stats.to_dict(),
|
||||||
"sync_deletions": self.sync_deletions,
|
"sync_deletions": self.sync_deletions,
|
||||||
"last_pull_at": self.last_pull_at,
|
"last_pull_at": self.last_pull_at,
|
||||||
|
"filter_prefix": self.filter_prefix,
|
||||||
}
|
}
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@@ -158,22 +164,24 @@ class ReplicationRule:
|
|||||||
data["sync_deletions"] = True
|
data["sync_deletions"] = True
|
||||||
if "last_pull_at" not in data:
|
if "last_pull_at" not in data:
|
||||||
data["last_pull_at"] = None
|
data["last_pull_at"] = None
|
||||||
|
if "filter_prefix" not in data:
|
||||||
|
data["filter_prefix"] = None
|
||||||
rule = cls(**data)
|
rule = cls(**data)
|
||||||
rule.stats = ReplicationStats.from_dict(stats_data) if stats_data else ReplicationStats()
|
rule.stats = ReplicationStats.from_dict(stats_data) if stats_data else ReplicationStats()
|
||||||
return rule
|
return rule
|
||||||
|
|
||||||
|
|
||||||
class ReplicationFailureStore:
|
class ReplicationFailureStore:
|
||||||
MAX_FAILURES_PER_BUCKET = 50
|
def __init__(self, storage_root: Path, max_failures_per_bucket: int = 50) -> None:
|
||||||
|
|
||||||
def __init__(self, storage_root: Path) -> None:
|
|
||||||
self.storage_root = storage_root
|
self.storage_root = storage_root
|
||||||
|
self.max_failures_per_bucket = max_failures_per_bucket
|
||||||
self._lock = threading.Lock()
|
self._lock = threading.Lock()
|
||||||
|
self._cache: Dict[str, List[ReplicationFailure]] = {}
|
||||||
|
|
||||||
def _get_failures_path(self, bucket_name: str) -> Path:
|
def _get_failures_path(self, bucket_name: str) -> Path:
|
||||||
return self.storage_root / ".myfsio.sys" / "buckets" / bucket_name / "replication_failures.json"
|
return self.storage_root / ".myfsio.sys" / "buckets" / bucket_name / "replication_failures.json"
|
||||||
|
|
||||||
def load_failures(self, bucket_name: str) -> List[ReplicationFailure]:
|
def _load_from_disk(self, bucket_name: str) -> List[ReplicationFailure]:
|
||||||
path = self._get_failures_path(bucket_name)
|
path = self._get_failures_path(bucket_name)
|
||||||
if not path.exists():
|
if not path.exists():
|
||||||
return []
|
return []
|
||||||
@@ -185,16 +193,28 @@ class ReplicationFailureStore:
|
|||||||
logger.error(f"Failed to load replication failures for {bucket_name}: {e}")
|
logger.error(f"Failed to load replication failures for {bucket_name}: {e}")
|
||||||
return []
|
return []
|
||||||
|
|
||||||
def save_failures(self, bucket_name: str, failures: List[ReplicationFailure]) -> None:
|
def _save_to_disk(self, bucket_name: str, failures: List[ReplicationFailure]) -> None:
|
||||||
path = self._get_failures_path(bucket_name)
|
path = self._get_failures_path(bucket_name)
|
||||||
path.parent.mkdir(parents=True, exist_ok=True)
|
path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
data = {"failures": [f.to_dict() for f in failures[:self.MAX_FAILURES_PER_BUCKET]]}
|
data = {"failures": [f.to_dict() for f in failures[:self.max_failures_per_bucket]]}
|
||||||
try:
|
try:
|
||||||
with open(path, "w") as f:
|
with open(path, "w") as f:
|
||||||
json.dump(data, f, indent=2)
|
json.dump(data, f, indent=2)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
logger.error(f"Failed to save replication failures for {bucket_name}: {e}")
|
logger.error(f"Failed to save replication failures for {bucket_name}: {e}")
|
||||||
|
|
||||||
|
def load_failures(self, bucket_name: str) -> List[ReplicationFailure]:
|
||||||
|
if bucket_name in self._cache:
|
||||||
|
return list(self._cache[bucket_name])
|
||||||
|
failures = self._load_from_disk(bucket_name)
|
||||||
|
self._cache[bucket_name] = failures
|
||||||
|
return list(failures)
|
||||||
|
|
||||||
|
def save_failures(self, bucket_name: str, failures: List[ReplicationFailure]) -> None:
|
||||||
|
trimmed = failures[:self.max_failures_per_bucket]
|
||||||
|
self._cache[bucket_name] = trimmed
|
||||||
|
self._save_to_disk(bucket_name, trimmed)
|
||||||
|
|
||||||
def add_failure(self, bucket_name: str, failure: ReplicationFailure) -> None:
|
def add_failure(self, bucket_name: str, failure: ReplicationFailure) -> None:
|
||||||
with self._lock:
|
with self._lock:
|
||||||
failures = self.load_failures(bucket_name)
|
failures = self.load_failures(bucket_name)
|
||||||
@@ -220,6 +240,7 @@ class ReplicationFailureStore:
|
|||||||
|
|
||||||
def clear_failures(self, bucket_name: str) -> None:
|
def clear_failures(self, bucket_name: str) -> None:
|
||||||
with self._lock:
|
with self._lock:
|
||||||
|
self._cache.pop(bucket_name, None)
|
||||||
path = self._get_failures_path(bucket_name)
|
path = self._get_failures_path(bucket_name)
|
||||||
if path.exists():
|
if path.exists():
|
||||||
path.unlink()
|
path.unlink()
|
||||||
@@ -233,18 +254,43 @@ class ReplicationFailureStore:
|
|||||||
|
|
||||||
|
|
||||||
class ReplicationManager:
|
class ReplicationManager:
|
||||||
def __init__(self, storage: ObjectStorage, connections: ConnectionStore, rules_path: Path, storage_root: Path) -> None:
|
def __init__(
|
||||||
|
self,
|
||||||
|
storage: ObjectStorage,
|
||||||
|
connections: ConnectionStore,
|
||||||
|
rules_path: Path,
|
||||||
|
storage_root: Path,
|
||||||
|
connect_timeout: int = 5,
|
||||||
|
read_timeout: int = 30,
|
||||||
|
max_retries: int = 2,
|
||||||
|
streaming_threshold_bytes: int = 10 * 1024 * 1024,
|
||||||
|
max_failures_per_bucket: int = 50,
|
||||||
|
) -> None:
|
||||||
self.storage = storage
|
self.storage = storage
|
||||||
self.connections = connections
|
self.connections = connections
|
||||||
self.rules_path = rules_path
|
self.rules_path = rules_path
|
||||||
self.storage_root = storage_root
|
self.storage_root = storage_root
|
||||||
|
self.connect_timeout = connect_timeout
|
||||||
|
self.read_timeout = read_timeout
|
||||||
|
self.max_retries = max_retries
|
||||||
|
self.streaming_threshold_bytes = streaming_threshold_bytes
|
||||||
self._rules: Dict[str, ReplicationRule] = {}
|
self._rules: Dict[str, ReplicationRule] = {}
|
||||||
self._stats_lock = threading.Lock()
|
self._stats_lock = threading.Lock()
|
||||||
self._executor = ThreadPoolExecutor(max_workers=4, thread_name_prefix="ReplicationWorker")
|
self._executor = ThreadPoolExecutor(max_workers=4, thread_name_prefix="ReplicationWorker")
|
||||||
self._shutdown = False
|
self._shutdown = False
|
||||||
self.failure_store = ReplicationFailureStore(storage_root)
|
self.failure_store = ReplicationFailureStore(storage_root, max_failures_per_bucket)
|
||||||
self.reload_rules()
|
self.reload_rules()
|
||||||
|
|
||||||
|
def _create_client(self, connection: RemoteConnection, *, health_check: bool = False) -> Any:
|
||||||
|
"""Create an S3 client with the manager's configured timeouts."""
|
||||||
|
return _create_s3_client(
|
||||||
|
connection,
|
||||||
|
health_check=health_check,
|
||||||
|
connect_timeout=self.connect_timeout,
|
||||||
|
read_timeout=self.read_timeout,
|
||||||
|
max_retries=self.max_retries,
|
||||||
|
)
|
||||||
|
|
||||||
def shutdown(self, wait: bool = True) -> None:
|
def shutdown(self, wait: bool = True) -> None:
|
||||||
"""Shutdown the replication executor gracefully.
|
"""Shutdown the replication executor gracefully.
|
||||||
|
|
||||||
@@ -280,7 +326,7 @@ class ReplicationManager:
|
|||||||
Uses short timeouts to prevent blocking.
|
Uses short timeouts to prevent blocking.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
s3 = _create_s3_client(connection, health_check=True)
|
s3 = self._create_client(connection, health_check=True)
|
||||||
s3.list_buckets()
|
s3.list_buckets()
|
||||||
return True
|
return True
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -290,6 +336,9 @@ class ReplicationManager:
|
|||||||
def get_rule(self, bucket_name: str) -> Optional[ReplicationRule]:
|
def get_rule(self, bucket_name: str) -> Optional[ReplicationRule]:
|
||||||
return self._rules.get(bucket_name)
|
return self._rules.get(bucket_name)
|
||||||
|
|
||||||
|
def list_rules(self) -> List[ReplicationRule]:
|
||||||
|
return list(self._rules.values())
|
||||||
|
|
||||||
def set_rule(self, rule: ReplicationRule) -> None:
|
def set_rule(self, rule: ReplicationRule) -> None:
|
||||||
old_rule = self._rules.get(rule.bucket_name)
|
old_rule = self._rules.get(rule.bucket_name)
|
||||||
was_all_mode = old_rule and old_rule.mode == REPLICATION_MODE_ALL if old_rule else False
|
was_all_mode = old_rule and old_rule.mode == REPLICATION_MODE_ALL if old_rule else False
|
||||||
@@ -329,7 +378,7 @@ class ReplicationManager:
|
|||||||
source_objects = self.storage.list_objects_all(bucket_name)
|
source_objects = self.storage.list_objects_all(bucket_name)
|
||||||
source_keys = {obj.key: obj.size for obj in source_objects}
|
source_keys = {obj.key: obj.size for obj in source_objects}
|
||||||
|
|
||||||
s3 = _create_s3_client(connection)
|
s3 = self._create_client(connection)
|
||||||
|
|
||||||
dest_keys = set()
|
dest_keys = set()
|
||||||
bytes_synced = 0
|
bytes_synced = 0
|
||||||
@@ -395,7 +444,7 @@ class ReplicationManager:
|
|||||||
raise ValueError(f"Connection {connection_id} not found")
|
raise ValueError(f"Connection {connection_id} not found")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
s3 = _create_s3_client(connection)
|
s3 = self._create_client(connection)
|
||||||
s3.create_bucket(Bucket=bucket_name)
|
s3.create_bucket(Bucket=bucket_name)
|
||||||
except ClientError as e:
|
except ClientError as e:
|
||||||
logger.error(f"Failed to create remote bucket {bucket_name}: {e}")
|
logger.error(f"Failed to create remote bucket {bucket_name}: {e}")
|
||||||
@@ -438,7 +487,7 @@ class ReplicationManager:
|
|||||||
return
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
s3 = _create_s3_client(conn)
|
s3 = self._create_client(conn)
|
||||||
|
|
||||||
if action == "delete":
|
if action == "delete":
|
||||||
try:
|
try:
|
||||||
@@ -481,7 +530,7 @@ class ReplicationManager:
|
|||||||
if content_type:
|
if content_type:
|
||||||
extra_args["ContentType"] = content_type
|
extra_args["ContentType"] = content_type
|
||||||
|
|
||||||
if file_size >= STREAMING_THRESHOLD_BYTES:
|
if file_size >= self.streaming_threshold_bytes:
|
||||||
s3.upload_file(
|
s3.upload_file(
|
||||||
str(path),
|
str(path),
|
||||||
rule.target_bucket,
|
rule.target_bucket,
|
||||||
|
|||||||
811
app/s3_api.py
811
app/s3_api.py
File diff suppressed because it is too large
Load Diff
284
app/s3_client.py
Normal file
284
app/s3_client.py
Normal file
@@ -0,0 +1,284 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
from typing import Any, Generator, Optional
|
||||||
|
|
||||||
|
import boto3
|
||||||
|
from botocore.config import Config
|
||||||
|
from botocore.exceptions import ClientError, EndpointConnectionError, ConnectionClosedError
|
||||||
|
from flask import current_app, session
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
UI_PROXY_USER_AGENT = "MyFSIO-UIProxy/1.0"
|
||||||
|
|
||||||
|
_BOTO_ERROR_MAP = {
|
||||||
|
"NoSuchBucket": 404,
|
||||||
|
"NoSuchKey": 404,
|
||||||
|
"NoSuchUpload": 404,
|
||||||
|
"BucketAlreadyExists": 409,
|
||||||
|
"BucketAlreadyOwnedByYou": 409,
|
||||||
|
"BucketNotEmpty": 409,
|
||||||
|
"AccessDenied": 403,
|
||||||
|
"InvalidAccessKeyId": 403,
|
||||||
|
"SignatureDoesNotMatch": 403,
|
||||||
|
"InvalidBucketName": 400,
|
||||||
|
"InvalidArgument": 400,
|
||||||
|
"MalformedXML": 400,
|
||||||
|
"EntityTooLarge": 400,
|
||||||
|
"QuotaExceeded": 403,
|
||||||
|
}
|
||||||
|
|
||||||
|
_UPLOAD_REGISTRY_MAX_AGE = 86400
|
||||||
|
_UPLOAD_REGISTRY_CLEANUP_INTERVAL = 3600
|
||||||
|
|
||||||
|
|
||||||
|
class UploadRegistry:
|
||||||
|
def __init__(self) -> None:
|
||||||
|
self._entries: dict[str, tuple[str, str, float]] = {}
|
||||||
|
self._lock = threading.Lock()
|
||||||
|
self._last_cleanup = time.monotonic()
|
||||||
|
|
||||||
|
def register(self, upload_id: str, bucket_name: str, object_key: str) -> None:
|
||||||
|
with self._lock:
|
||||||
|
self._entries[upload_id] = (bucket_name, object_key, time.monotonic())
|
||||||
|
self._maybe_cleanup()
|
||||||
|
|
||||||
|
def get_key(self, upload_id: str, bucket_name: str) -> Optional[str]:
|
||||||
|
with self._lock:
|
||||||
|
entry = self._entries.get(upload_id)
|
||||||
|
if entry is None:
|
||||||
|
return None
|
||||||
|
stored_bucket, key, created_at = entry
|
||||||
|
if stored_bucket != bucket_name:
|
||||||
|
return None
|
||||||
|
if time.monotonic() - created_at > _UPLOAD_REGISTRY_MAX_AGE:
|
||||||
|
del self._entries[upload_id]
|
||||||
|
return None
|
||||||
|
return key
|
||||||
|
|
||||||
|
def remove(self, upload_id: str) -> None:
|
||||||
|
with self._lock:
|
||||||
|
self._entries.pop(upload_id, None)
|
||||||
|
|
||||||
|
def _maybe_cleanup(self) -> None:
|
||||||
|
now = time.monotonic()
|
||||||
|
if now - self._last_cleanup < _UPLOAD_REGISTRY_CLEANUP_INTERVAL:
|
||||||
|
return
|
||||||
|
self._last_cleanup = now
|
||||||
|
cutoff = now - _UPLOAD_REGISTRY_MAX_AGE
|
||||||
|
stale = [uid for uid, (_, _, ts) in self._entries.items() if ts < cutoff]
|
||||||
|
for uid in stale:
|
||||||
|
del self._entries[uid]
|
||||||
|
|
||||||
|
|
||||||
|
class S3ProxyClient:
|
||||||
|
def __init__(self, api_base_url: str, region: str = "us-east-1") -> None:
|
||||||
|
if not api_base_url:
|
||||||
|
raise ValueError("api_base_url is required for S3ProxyClient")
|
||||||
|
self._api_base_url = api_base_url.rstrip("/")
|
||||||
|
self._region = region
|
||||||
|
self.upload_registry = UploadRegistry()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def api_base_url(self) -> str:
|
||||||
|
return self._api_base_url
|
||||||
|
|
||||||
|
def get_client(self, access_key: str, secret_key: str) -> Any:
|
||||||
|
if not access_key or not secret_key:
|
||||||
|
raise ValueError("Both access_key and secret_key are required")
|
||||||
|
config = Config(
|
||||||
|
user_agent_extra=UI_PROXY_USER_AGENT,
|
||||||
|
connect_timeout=5,
|
||||||
|
read_timeout=30,
|
||||||
|
retries={"max_attempts": 0},
|
||||||
|
signature_version="s3v4",
|
||||||
|
s3={"addressing_style": "path"},
|
||||||
|
request_checksum_calculation="when_required",
|
||||||
|
response_checksum_validation="when_required",
|
||||||
|
)
|
||||||
|
return boto3.client(
|
||||||
|
"s3",
|
||||||
|
endpoint_url=self._api_base_url,
|
||||||
|
aws_access_key_id=access_key,
|
||||||
|
aws_secret_access_key=secret_key,
|
||||||
|
region_name=self._region,
|
||||||
|
config=config,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_proxy() -> S3ProxyClient:
|
||||||
|
proxy = current_app.extensions.get("s3_proxy")
|
||||||
|
if proxy is None:
|
||||||
|
raise RuntimeError(
|
||||||
|
"S3 proxy not configured. Set API_BASE_URL or run both API and UI servers."
|
||||||
|
)
|
||||||
|
return proxy
|
||||||
|
|
||||||
|
|
||||||
|
def _get_session_creds() -> tuple[str, str]:
|
||||||
|
secret_store = current_app.extensions["secret_store"]
|
||||||
|
secret_store.purge_expired()
|
||||||
|
token = session.get("cred_token")
|
||||||
|
if not token:
|
||||||
|
raise PermissionError("Not authenticated")
|
||||||
|
creds = secret_store.peek(token)
|
||||||
|
if not creds:
|
||||||
|
raise PermissionError("Session expired")
|
||||||
|
access_key = creds.get("access_key", "")
|
||||||
|
secret_key = creds.get("secret_key", "")
|
||||||
|
if not access_key or not secret_key:
|
||||||
|
raise PermissionError("Invalid session credentials")
|
||||||
|
return access_key, secret_key
|
||||||
|
|
||||||
|
|
||||||
|
def get_session_s3_client() -> Any:
|
||||||
|
proxy = _get_proxy()
|
||||||
|
access_key, secret_key = _get_session_creds()
|
||||||
|
return proxy.get_client(access_key, secret_key)
|
||||||
|
|
||||||
|
|
||||||
|
def get_upload_registry() -> UploadRegistry:
|
||||||
|
return _get_proxy().upload_registry
|
||||||
|
|
||||||
|
|
||||||
|
def handle_client_error(exc: ClientError) -> tuple[dict[str, str], int]:
|
||||||
|
error_info = exc.response.get("Error", {})
|
||||||
|
code = error_info.get("Code", "InternalError")
|
||||||
|
message = error_info.get("Message") or "S3 operation failed"
|
||||||
|
http_status = _BOTO_ERROR_MAP.get(code)
|
||||||
|
if http_status is None:
|
||||||
|
http_status = exc.response.get("ResponseMetadata", {}).get("HTTPStatusCode", 500)
|
||||||
|
return {"error": message}, http_status
|
||||||
|
|
||||||
|
|
||||||
|
def handle_connection_error(exc: Exception) -> tuple[dict[str, str], int]:
|
||||||
|
logger.error("S3 API connection failed: %s", exc)
|
||||||
|
return {"error": "S3 API server is unreachable. Ensure the API server is running."}, 502
|
||||||
|
|
||||||
|
|
||||||
|
def format_datetime_display(dt: Any, display_tz: str = "UTC") -> str:
|
||||||
|
from .ui import _format_datetime_display
|
||||||
|
return _format_datetime_display(dt, display_tz)
|
||||||
|
|
||||||
|
|
||||||
|
def format_datetime_iso(dt: Any, display_tz: str = "UTC") -> str:
|
||||||
|
from .ui import _format_datetime_iso
|
||||||
|
return _format_datetime_iso(dt, display_tz)
|
||||||
|
|
||||||
|
|
||||||
|
def build_url_templates(bucket_name: str) -> dict[str, str]:
|
||||||
|
from flask import url_for
|
||||||
|
preview_t = url_for("ui.object_preview", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||||
|
delete_t = url_for("ui.delete_object", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||||
|
presign_t = url_for("ui.object_presign", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||||
|
versions_t = url_for("ui.object_versions", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||||
|
restore_t = url_for(
|
||||||
|
"ui.restore_object_version",
|
||||||
|
bucket_name=bucket_name,
|
||||||
|
object_key="KEY_PLACEHOLDER",
|
||||||
|
version_id="VERSION_ID_PLACEHOLDER",
|
||||||
|
)
|
||||||
|
tags_t = url_for("ui.object_tags", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||||
|
copy_t = url_for("ui.copy_object", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||||
|
move_t = url_for("ui.move_object", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||||
|
metadata_t = url_for("ui.object_metadata", bucket_name=bucket_name, object_key="KEY_PLACEHOLDER")
|
||||||
|
return {
|
||||||
|
"preview": preview_t,
|
||||||
|
"download": preview_t + "?download=1",
|
||||||
|
"presign": presign_t,
|
||||||
|
"delete": delete_t,
|
||||||
|
"versions": versions_t,
|
||||||
|
"restore": restore_t,
|
||||||
|
"tags": tags_t,
|
||||||
|
"copy": copy_t,
|
||||||
|
"move": move_t,
|
||||||
|
"metadata": metadata_t,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def translate_list_objects(
|
||||||
|
boto3_response: dict[str, Any],
|
||||||
|
url_templates: dict[str, str],
|
||||||
|
display_tz: str = "UTC",
|
||||||
|
versioning_enabled: bool = False,
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
objects_data = []
|
||||||
|
for obj in boto3_response.get("Contents", []):
|
||||||
|
last_mod = obj["LastModified"]
|
||||||
|
objects_data.append({
|
||||||
|
"key": obj["Key"],
|
||||||
|
"size": obj["Size"],
|
||||||
|
"last_modified": last_mod.isoformat(),
|
||||||
|
"last_modified_display": format_datetime_display(last_mod, display_tz),
|
||||||
|
"last_modified_iso": format_datetime_iso(last_mod, display_tz),
|
||||||
|
"etag": obj.get("ETag", "").strip('"'),
|
||||||
|
})
|
||||||
|
return {
|
||||||
|
"objects": objects_data,
|
||||||
|
"is_truncated": boto3_response.get("IsTruncated", False),
|
||||||
|
"next_continuation_token": boto3_response.get("NextContinuationToken"),
|
||||||
|
"total_count": boto3_response.get("KeyCount", len(objects_data)),
|
||||||
|
"versioning_enabled": versioning_enabled,
|
||||||
|
"url_templates": url_templates,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def get_versioning_via_s3(client: Any, bucket_name: str) -> bool:
|
||||||
|
try:
|
||||||
|
resp = client.get_bucket_versioning(Bucket=bucket_name)
|
||||||
|
return resp.get("Status") == "Enabled"
|
||||||
|
except ClientError as exc:
|
||||||
|
code = exc.response.get("Error", {}).get("Code", "")
|
||||||
|
if code != "NoSuchBucket":
|
||||||
|
logger.warning("Failed to check versioning for %s: %s", bucket_name, code)
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def stream_objects_ndjson(
|
||||||
|
client: Any,
|
||||||
|
bucket_name: str,
|
||||||
|
prefix: Optional[str],
|
||||||
|
url_templates: dict[str, str],
|
||||||
|
display_tz: str = "UTC",
|
||||||
|
versioning_enabled: bool = False,
|
||||||
|
) -> Generator[str, None, None]:
|
||||||
|
meta_line = json.dumps({
|
||||||
|
"type": "meta",
|
||||||
|
"versioning_enabled": versioning_enabled,
|
||||||
|
"url_templates": url_templates,
|
||||||
|
}) + "\n"
|
||||||
|
yield meta_line
|
||||||
|
|
||||||
|
yield json.dumps({"type": "count", "total_count": 0}) + "\n"
|
||||||
|
|
||||||
|
kwargs: dict[str, Any] = {"Bucket": bucket_name, "MaxKeys": 1000}
|
||||||
|
if prefix:
|
||||||
|
kwargs["Prefix"] = prefix
|
||||||
|
|
||||||
|
try:
|
||||||
|
paginator = client.get_paginator("list_objects_v2")
|
||||||
|
for page in paginator.paginate(**kwargs):
|
||||||
|
for obj in page.get("Contents", []):
|
||||||
|
last_mod = obj["LastModified"]
|
||||||
|
yield json.dumps({
|
||||||
|
"type": "object",
|
||||||
|
"key": obj["Key"],
|
||||||
|
"size": obj["Size"],
|
||||||
|
"last_modified": last_mod.isoformat(),
|
||||||
|
"last_modified_display": format_datetime_display(last_mod, display_tz),
|
||||||
|
"last_modified_iso": format_datetime_iso(last_mod, display_tz),
|
||||||
|
"etag": obj.get("ETag", "").strip('"'),
|
||||||
|
}) + "\n"
|
||||||
|
except ClientError as exc:
|
||||||
|
error_msg = exc.response.get("Error", {}).get("Message", "S3 operation failed")
|
||||||
|
yield json.dumps({"type": "error", "error": error_msg}) + "\n"
|
||||||
|
return
|
||||||
|
except (EndpointConnectionError, ConnectionClosedError):
|
||||||
|
yield json.dumps({"type": "error", "error": "S3 API server is unreachable"}) + "\n"
|
||||||
|
return
|
||||||
|
|
||||||
|
yield json.dumps({"type": "done"}) + "\n"
|
||||||
@@ -18,6 +18,18 @@ class EphemeralSecretStore:
|
|||||||
self._store[token] = (payload, expires_at)
|
self._store[token] = (payload, expires_at)
|
||||||
return token
|
return token
|
||||||
|
|
||||||
|
def peek(self, token: str | None) -> Any | None:
|
||||||
|
if not token:
|
||||||
|
return None
|
||||||
|
entry = self._store.get(token)
|
||||||
|
if not entry:
|
||||||
|
return None
|
||||||
|
payload, expires_at = entry
|
||||||
|
if expires_at < time.time():
|
||||||
|
self._store.pop(token, None)
|
||||||
|
return None
|
||||||
|
return payload
|
||||||
|
|
||||||
def pop(self, token: str | None) -> Any | None:
|
def pop(self, token: str | None) -> Any | None:
|
||||||
if not token:
|
if not token:
|
||||||
return None
|
return None
|
||||||
|
|||||||
171
app/select_content.py
Normal file
171
app/select_content.py
Normal file
@@ -0,0 +1,171 @@
|
|||||||
|
"""S3 SelectObjectContent SQL query execution using DuckDB."""
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Dict, Generator, Optional
|
||||||
|
|
||||||
|
try:
|
||||||
|
import duckdb
|
||||||
|
DUCKDB_AVAILABLE = True
|
||||||
|
except ImportError:
|
||||||
|
DUCKDB_AVAILABLE = False
|
||||||
|
|
||||||
|
|
||||||
|
class SelectError(Exception):
|
||||||
|
"""Error during SELECT query execution."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def execute_select_query(
|
||||||
|
file_path: Path,
|
||||||
|
expression: str,
|
||||||
|
input_format: str,
|
||||||
|
input_config: Dict[str, Any],
|
||||||
|
output_format: str,
|
||||||
|
output_config: Dict[str, Any],
|
||||||
|
chunk_size: int = 65536,
|
||||||
|
) -> Generator[bytes, None, None]:
|
||||||
|
"""Execute SQL query on object content."""
|
||||||
|
if not DUCKDB_AVAILABLE:
|
||||||
|
raise SelectError("DuckDB is not installed. Install with: pip install duckdb")
|
||||||
|
|
||||||
|
conn = duckdb.connect(":memory:")
|
||||||
|
|
||||||
|
try:
|
||||||
|
if input_format == "CSV":
|
||||||
|
_load_csv(conn, file_path, input_config)
|
||||||
|
elif input_format == "JSON":
|
||||||
|
_load_json(conn, file_path, input_config)
|
||||||
|
elif input_format == "Parquet":
|
||||||
|
_load_parquet(conn, file_path)
|
||||||
|
else:
|
||||||
|
raise SelectError(f"Unsupported input format: {input_format}")
|
||||||
|
|
||||||
|
normalized_expression = expression.replace("s3object", "data").replace("S3Object", "data")
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = conn.execute(normalized_expression)
|
||||||
|
except duckdb.Error as exc:
|
||||||
|
raise SelectError(f"SQL execution error: {exc}")
|
||||||
|
|
||||||
|
if output_format == "CSV":
|
||||||
|
yield from _output_csv(result, output_config, chunk_size)
|
||||||
|
elif output_format == "JSON":
|
||||||
|
yield from _output_json(result, output_config, chunk_size)
|
||||||
|
else:
|
||||||
|
raise SelectError(f"Unsupported output format: {output_format}")
|
||||||
|
|
||||||
|
finally:
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
|
||||||
|
def _load_csv(conn, file_path: Path, config: Dict[str, Any]) -> None:
|
||||||
|
"""Load CSV file into DuckDB."""
|
||||||
|
file_header_info = config.get("file_header_info", "NONE")
|
||||||
|
delimiter = config.get("field_delimiter", ",")
|
||||||
|
quote = config.get("quote_character", '"')
|
||||||
|
|
||||||
|
header = file_header_info in ("USE", "IGNORE")
|
||||||
|
path_str = str(file_path).replace("\\", "/")
|
||||||
|
|
||||||
|
conn.execute(f"""
|
||||||
|
CREATE TABLE data AS
|
||||||
|
SELECT * FROM read_csv('{path_str}',
|
||||||
|
header={header},
|
||||||
|
delim='{delimiter}',
|
||||||
|
quote='{quote}'
|
||||||
|
)
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
def _load_json(conn, file_path: Path, config: Dict[str, Any]) -> None:
|
||||||
|
"""Load JSON file into DuckDB."""
|
||||||
|
json_type = config.get("type", "DOCUMENT")
|
||||||
|
path_str = str(file_path).replace("\\", "/")
|
||||||
|
|
||||||
|
if json_type == "LINES":
|
||||||
|
conn.execute(f"""
|
||||||
|
CREATE TABLE data AS
|
||||||
|
SELECT * FROM read_json_auto('{path_str}', format='newline_delimited')
|
||||||
|
""")
|
||||||
|
else:
|
||||||
|
conn.execute(f"""
|
||||||
|
CREATE TABLE data AS
|
||||||
|
SELECT * FROM read_json_auto('{path_str}', format='array')
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
def _load_parquet(conn, file_path: Path) -> None:
|
||||||
|
"""Load Parquet file into DuckDB."""
|
||||||
|
path_str = str(file_path).replace("\\", "/")
|
||||||
|
conn.execute(f"CREATE TABLE data AS SELECT * FROM read_parquet('{path_str}')")
|
||||||
|
|
||||||
|
|
||||||
|
def _output_csv(
|
||||||
|
result,
|
||||||
|
config: Dict[str, Any],
|
||||||
|
chunk_size: int,
|
||||||
|
) -> Generator[bytes, None, None]:
|
||||||
|
"""Output query results as CSV."""
|
||||||
|
delimiter = config.get("field_delimiter", ",")
|
||||||
|
record_delimiter = config.get("record_delimiter", "\n")
|
||||||
|
quote = config.get("quote_character", '"')
|
||||||
|
|
||||||
|
buffer = ""
|
||||||
|
|
||||||
|
while True:
|
||||||
|
rows = result.fetchmany(1000)
|
||||||
|
if not rows:
|
||||||
|
break
|
||||||
|
|
||||||
|
for row in rows:
|
||||||
|
fields = []
|
||||||
|
for value in row:
|
||||||
|
if value is None:
|
||||||
|
fields.append("")
|
||||||
|
elif isinstance(value, str):
|
||||||
|
if delimiter in value or quote in value or record_delimiter in value:
|
||||||
|
escaped = value.replace(quote, quote + quote)
|
||||||
|
fields.append(f'{quote}{escaped}{quote}')
|
||||||
|
else:
|
||||||
|
fields.append(value)
|
||||||
|
else:
|
||||||
|
fields.append(str(value))
|
||||||
|
|
||||||
|
buffer += delimiter.join(fields) + record_delimiter
|
||||||
|
|
||||||
|
while len(buffer) >= chunk_size:
|
||||||
|
yield buffer[:chunk_size].encode("utf-8")
|
||||||
|
buffer = buffer[chunk_size:]
|
||||||
|
|
||||||
|
if buffer:
|
||||||
|
yield buffer.encode("utf-8")
|
||||||
|
|
||||||
|
|
||||||
|
def _output_json(
|
||||||
|
result,
|
||||||
|
config: Dict[str, Any],
|
||||||
|
chunk_size: int,
|
||||||
|
) -> Generator[bytes, None, None]:
|
||||||
|
"""Output query results as JSON Lines."""
|
||||||
|
record_delimiter = config.get("record_delimiter", "\n")
|
||||||
|
columns = [desc[0] for desc in result.description]
|
||||||
|
|
||||||
|
buffer = ""
|
||||||
|
|
||||||
|
while True:
|
||||||
|
rows = result.fetchmany(1000)
|
||||||
|
if not rows:
|
||||||
|
break
|
||||||
|
|
||||||
|
for row in rows:
|
||||||
|
record = dict(zip(columns, row))
|
||||||
|
buffer += json.dumps(record, default=str) + record_delimiter
|
||||||
|
|
||||||
|
while len(buffer) >= chunk_size:
|
||||||
|
yield buffer[:chunk_size].encode("utf-8")
|
||||||
|
buffer = buffer[chunk_size:]
|
||||||
|
|
||||||
|
if buffer:
|
||||||
|
yield buffer.encode("utf-8")
|
||||||
177
app/site_registry.py
Normal file
177
app/site_registry.py
Normal file
@@ -0,0 +1,177 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class SiteInfo:
|
||||||
|
site_id: str
|
||||||
|
endpoint: str
|
||||||
|
region: str = "us-east-1"
|
||||||
|
priority: int = 100
|
||||||
|
display_name: str = ""
|
||||||
|
created_at: Optional[float] = None
|
||||||
|
updated_at: Optional[float] = None
|
||||||
|
|
||||||
|
def __post_init__(self) -> None:
|
||||||
|
if not self.display_name:
|
||||||
|
self.display_name = self.site_id
|
||||||
|
if self.created_at is None:
|
||||||
|
self.created_at = time.time()
|
||||||
|
|
||||||
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
|
return {
|
||||||
|
"site_id": self.site_id,
|
||||||
|
"endpoint": self.endpoint,
|
||||||
|
"region": self.region,
|
||||||
|
"priority": self.priority,
|
||||||
|
"display_name": self.display_name,
|
||||||
|
"created_at": self.created_at,
|
||||||
|
"updated_at": self.updated_at,
|
||||||
|
}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, data: Dict[str, Any]) -> SiteInfo:
|
||||||
|
return cls(
|
||||||
|
site_id=data["site_id"],
|
||||||
|
endpoint=data.get("endpoint", ""),
|
||||||
|
region=data.get("region", "us-east-1"),
|
||||||
|
priority=data.get("priority", 100),
|
||||||
|
display_name=data.get("display_name", ""),
|
||||||
|
created_at=data.get("created_at"),
|
||||||
|
updated_at=data.get("updated_at"),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class PeerSite:
|
||||||
|
site_id: str
|
||||||
|
endpoint: str
|
||||||
|
region: str = "us-east-1"
|
||||||
|
priority: int = 100
|
||||||
|
display_name: str = ""
|
||||||
|
created_at: Optional[float] = None
|
||||||
|
updated_at: Optional[float] = None
|
||||||
|
connection_id: Optional[str] = None
|
||||||
|
is_healthy: Optional[bool] = None
|
||||||
|
last_health_check: Optional[float] = None
|
||||||
|
|
||||||
|
def __post_init__(self) -> None:
|
||||||
|
if not self.display_name:
|
||||||
|
self.display_name = self.site_id
|
||||||
|
if self.created_at is None:
|
||||||
|
self.created_at = time.time()
|
||||||
|
|
||||||
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
|
return {
|
||||||
|
"site_id": self.site_id,
|
||||||
|
"endpoint": self.endpoint,
|
||||||
|
"region": self.region,
|
||||||
|
"priority": self.priority,
|
||||||
|
"display_name": self.display_name,
|
||||||
|
"created_at": self.created_at,
|
||||||
|
"updated_at": self.updated_at,
|
||||||
|
"connection_id": self.connection_id,
|
||||||
|
"is_healthy": self.is_healthy,
|
||||||
|
"last_health_check": self.last_health_check,
|
||||||
|
}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, data: Dict[str, Any]) -> PeerSite:
|
||||||
|
return cls(
|
||||||
|
site_id=data["site_id"],
|
||||||
|
endpoint=data.get("endpoint", ""),
|
||||||
|
region=data.get("region", "us-east-1"),
|
||||||
|
priority=data.get("priority", 100),
|
||||||
|
display_name=data.get("display_name", ""),
|
||||||
|
created_at=data.get("created_at"),
|
||||||
|
updated_at=data.get("updated_at"),
|
||||||
|
connection_id=data.get("connection_id"),
|
||||||
|
is_healthy=data.get("is_healthy"),
|
||||||
|
last_health_check=data.get("last_health_check"),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class SiteRegistry:
|
||||||
|
def __init__(self, config_path: Path) -> None:
|
||||||
|
self.config_path = config_path
|
||||||
|
self._local_site: Optional[SiteInfo] = None
|
||||||
|
self._peers: Dict[str, PeerSite] = {}
|
||||||
|
self.reload()
|
||||||
|
|
||||||
|
def reload(self) -> None:
|
||||||
|
if not self.config_path.exists():
|
||||||
|
self._local_site = None
|
||||||
|
self._peers = {}
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(self.config_path, "r", encoding="utf-8") as f:
|
||||||
|
data = json.load(f)
|
||||||
|
|
||||||
|
if data.get("local"):
|
||||||
|
self._local_site = SiteInfo.from_dict(data["local"])
|
||||||
|
else:
|
||||||
|
self._local_site = None
|
||||||
|
|
||||||
|
self._peers = {}
|
||||||
|
for peer_data in data.get("peers", []):
|
||||||
|
peer = PeerSite.from_dict(peer_data)
|
||||||
|
self._peers[peer.site_id] = peer
|
||||||
|
|
||||||
|
except (OSError, json.JSONDecodeError, KeyError):
|
||||||
|
self._local_site = None
|
||||||
|
self._peers = {}
|
||||||
|
|
||||||
|
def save(self) -> None:
|
||||||
|
self.config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
data = {
|
||||||
|
"local": self._local_site.to_dict() if self._local_site else None,
|
||||||
|
"peers": [peer.to_dict() for peer in self._peers.values()],
|
||||||
|
}
|
||||||
|
with open(self.config_path, "w", encoding="utf-8") as f:
|
||||||
|
json.dump(data, f, indent=2)
|
||||||
|
|
||||||
|
def get_local_site(self) -> Optional[SiteInfo]:
|
||||||
|
return self._local_site
|
||||||
|
|
||||||
|
def set_local_site(self, site: SiteInfo) -> None:
|
||||||
|
site.updated_at = time.time()
|
||||||
|
self._local_site = site
|
||||||
|
self.save()
|
||||||
|
|
||||||
|
def list_peers(self) -> List[PeerSite]:
|
||||||
|
return list(self._peers.values())
|
||||||
|
|
||||||
|
def get_peer(self, site_id: str) -> Optional[PeerSite]:
|
||||||
|
return self._peers.get(site_id)
|
||||||
|
|
||||||
|
def add_peer(self, peer: PeerSite) -> None:
|
||||||
|
peer.created_at = peer.created_at or time.time()
|
||||||
|
self._peers[peer.site_id] = peer
|
||||||
|
self.save()
|
||||||
|
|
||||||
|
def update_peer(self, peer: PeerSite) -> None:
|
||||||
|
if peer.site_id not in self._peers:
|
||||||
|
raise ValueError(f"Peer {peer.site_id} not found")
|
||||||
|
peer.updated_at = time.time()
|
||||||
|
self._peers[peer.site_id] = peer
|
||||||
|
self.save()
|
||||||
|
|
||||||
|
def delete_peer(self, site_id: str) -> bool:
|
||||||
|
if site_id in self._peers:
|
||||||
|
del self._peers[site_id]
|
||||||
|
self.save()
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def update_health(self, site_id: str, is_healthy: bool) -> None:
|
||||||
|
peer = self._peers.get(site_id)
|
||||||
|
if peer:
|
||||||
|
peer.is_healthy = is_healthy
|
||||||
|
peer.last_health_check = time.time()
|
||||||
|
self.save()
|
||||||
@@ -22,9 +22,6 @@ if TYPE_CHECKING:
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
SITE_SYNC_USER_AGENT = "SiteSyncAgent/1.0"
|
SITE_SYNC_USER_AGENT = "SiteSyncAgent/1.0"
|
||||||
SITE_SYNC_CONNECT_TIMEOUT = 10
|
|
||||||
SITE_SYNC_READ_TIMEOUT = 120
|
|
||||||
CLOCK_SKEW_TOLERANCE_SECONDS = 1.0
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
@@ -108,12 +105,18 @@ class RemoteObjectMeta:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def _create_sync_client(connection: "RemoteConnection") -> Any:
|
def _create_sync_client(
|
||||||
|
connection: "RemoteConnection",
|
||||||
|
*,
|
||||||
|
connect_timeout: int = 10,
|
||||||
|
read_timeout: int = 120,
|
||||||
|
max_retries: int = 2,
|
||||||
|
) -> Any:
|
||||||
config = Config(
|
config = Config(
|
||||||
user_agent_extra=SITE_SYNC_USER_AGENT,
|
user_agent_extra=SITE_SYNC_USER_AGENT,
|
||||||
connect_timeout=SITE_SYNC_CONNECT_TIMEOUT,
|
connect_timeout=connect_timeout,
|
||||||
read_timeout=SITE_SYNC_READ_TIMEOUT,
|
read_timeout=read_timeout,
|
||||||
retries={"max_attempts": 2},
|
retries={"max_attempts": max_retries},
|
||||||
signature_version="s3v4",
|
signature_version="s3v4",
|
||||||
s3={"addressing_style": "path"},
|
s3={"addressing_style": "path"},
|
||||||
request_checksum_calculation="when_required",
|
request_checksum_calculation="when_required",
|
||||||
@@ -138,6 +141,10 @@ class SiteSyncWorker:
|
|||||||
storage_root: Path,
|
storage_root: Path,
|
||||||
interval_seconds: int = 60,
|
interval_seconds: int = 60,
|
||||||
batch_size: int = 100,
|
batch_size: int = 100,
|
||||||
|
connect_timeout: int = 10,
|
||||||
|
read_timeout: int = 120,
|
||||||
|
max_retries: int = 2,
|
||||||
|
clock_skew_tolerance_seconds: float = 1.0,
|
||||||
):
|
):
|
||||||
self.storage = storage
|
self.storage = storage
|
||||||
self.connections = connections
|
self.connections = connections
|
||||||
@@ -145,11 +152,24 @@ class SiteSyncWorker:
|
|||||||
self.storage_root = storage_root
|
self.storage_root = storage_root
|
||||||
self.interval_seconds = interval_seconds
|
self.interval_seconds = interval_seconds
|
||||||
self.batch_size = batch_size
|
self.batch_size = batch_size
|
||||||
|
self.connect_timeout = connect_timeout
|
||||||
|
self.read_timeout = read_timeout
|
||||||
|
self.max_retries = max_retries
|
||||||
|
self.clock_skew_tolerance_seconds = clock_skew_tolerance_seconds
|
||||||
self._lock = threading.Lock()
|
self._lock = threading.Lock()
|
||||||
self._shutdown = threading.Event()
|
self._shutdown = threading.Event()
|
||||||
self._sync_thread: Optional[threading.Thread] = None
|
self._sync_thread: Optional[threading.Thread] = None
|
||||||
self._bucket_stats: Dict[str, SiteSyncStats] = {}
|
self._bucket_stats: Dict[str, SiteSyncStats] = {}
|
||||||
|
|
||||||
|
def _create_client(self, connection: "RemoteConnection") -> Any:
|
||||||
|
"""Create an S3 client with the worker's configured timeouts."""
|
||||||
|
return _create_sync_client(
|
||||||
|
connection,
|
||||||
|
connect_timeout=self.connect_timeout,
|
||||||
|
read_timeout=self.read_timeout,
|
||||||
|
max_retries=self.max_retries,
|
||||||
|
)
|
||||||
|
|
||||||
def start(self) -> None:
|
def start(self) -> None:
|
||||||
if self._sync_thread is not None and self._sync_thread.is_alive():
|
if self._sync_thread is not None and self._sync_thread.is_alive():
|
||||||
return
|
return
|
||||||
@@ -294,7 +314,7 @@ class SiteSyncWorker:
|
|||||||
return {obj.key: obj for obj in objects}
|
return {obj.key: obj for obj in objects}
|
||||||
|
|
||||||
def _list_remote_objects(self, rule: "ReplicationRule", connection: "RemoteConnection") -> Dict[str, RemoteObjectMeta]:
|
def _list_remote_objects(self, rule: "ReplicationRule", connection: "RemoteConnection") -> Dict[str, RemoteObjectMeta]:
|
||||||
s3 = _create_sync_client(connection)
|
s3 = self._create_client(connection)
|
||||||
result: Dict[str, RemoteObjectMeta] = {}
|
result: Dict[str, RemoteObjectMeta] = {}
|
||||||
paginator = s3.get_paginator("list_objects_v2")
|
paginator = s3.get_paginator("list_objects_v2")
|
||||||
try:
|
try:
|
||||||
@@ -312,7 +332,7 @@ class SiteSyncWorker:
|
|||||||
local_ts = local_meta.last_modified.timestamp()
|
local_ts = local_meta.last_modified.timestamp()
|
||||||
remote_ts = remote_meta.last_modified.timestamp()
|
remote_ts = remote_meta.last_modified.timestamp()
|
||||||
|
|
||||||
if abs(remote_ts - local_ts) < CLOCK_SKEW_TOLERANCE_SECONDS:
|
if abs(remote_ts - local_ts) < self.clock_skew_tolerance_seconds:
|
||||||
local_etag = local_meta.etag or ""
|
local_etag = local_meta.etag or ""
|
||||||
if remote_meta.etag == local_etag:
|
if remote_meta.etag == local_etag:
|
||||||
return "skip"
|
return "skip"
|
||||||
@@ -327,7 +347,7 @@ class SiteSyncWorker:
|
|||||||
connection: "RemoteConnection",
|
connection: "RemoteConnection",
|
||||||
remote_meta: RemoteObjectMeta,
|
remote_meta: RemoteObjectMeta,
|
||||||
) -> bool:
|
) -> bool:
|
||||||
s3 = _create_sync_client(connection)
|
s3 = self._create_client(connection)
|
||||||
tmp_path = None
|
tmp_path = None
|
||||||
try:
|
try:
|
||||||
tmp_dir = self.storage_root / ".myfsio.sys" / "tmp"
|
tmp_dir = self.storage_root / ".myfsio.sys" / "tmp"
|
||||||
|
|||||||
532
app/storage.py
532
app/storage.py
@@ -11,6 +11,7 @@ import time
|
|||||||
import unicodedata
|
import unicodedata
|
||||||
import uuid
|
import uuid
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
@@ -46,6 +47,34 @@ else:
|
|||||||
fcntl.flock(file_handle.fileno(), fcntl.LOCK_UN)
|
fcntl.flock(file_handle.fileno(), fcntl.LOCK_UN)
|
||||||
|
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def _atomic_lock_file(lock_path: Path, max_retries: int = 10, base_delay: float = 0.1) -> Generator[None, None, None]:
|
||||||
|
"""Atomically acquire a lock file with exponential backoff.
|
||||||
|
|
||||||
|
Uses O_EXCL to ensure atomic creation of the lock file.
|
||||||
|
"""
|
||||||
|
lock_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
fd = None
|
||||||
|
for attempt in range(max_retries):
|
||||||
|
try:
|
||||||
|
fd = os.open(str(lock_path), os.O_CREAT | os.O_EXCL | os.O_WRONLY)
|
||||||
|
break
|
||||||
|
except FileExistsError:
|
||||||
|
if attempt == max_retries - 1:
|
||||||
|
raise BlockingIOError("Another upload to this key is in progress")
|
||||||
|
delay = base_delay * (2 ** attempt)
|
||||||
|
time.sleep(min(delay, 2.0))
|
||||||
|
try:
|
||||||
|
yield
|
||||||
|
finally:
|
||||||
|
if fd is not None:
|
||||||
|
os.close(fd)
|
||||||
|
try:
|
||||||
|
lock_path.unlink(missing_ok=True)
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
WINDOWS_RESERVED_NAMES = {
|
WINDOWS_RESERVED_NAMES = {
|
||||||
"CON",
|
"CON",
|
||||||
"PRN",
|
"PRN",
|
||||||
@@ -137,20 +166,30 @@ class ObjectStorage:
|
|||||||
BUCKET_VERSIONS_DIR = "versions"
|
BUCKET_VERSIONS_DIR = "versions"
|
||||||
MULTIPART_MANIFEST = "manifest.json"
|
MULTIPART_MANIFEST = "manifest.json"
|
||||||
BUCKET_CONFIG_FILE = ".bucket.json"
|
BUCKET_CONFIG_FILE = ".bucket.json"
|
||||||
DEFAULT_CACHE_TTL = 5
|
|
||||||
OBJECT_CACHE_MAX_SIZE = 100
|
|
||||||
|
|
||||||
def __init__(self, root: Path, cache_ttl: int = DEFAULT_CACHE_TTL) -> None:
|
def __init__(
|
||||||
|
self,
|
||||||
|
root: Path,
|
||||||
|
cache_ttl: int = 5,
|
||||||
|
object_cache_max_size: int = 100,
|
||||||
|
bucket_config_cache_ttl: float = 30.0,
|
||||||
|
object_key_max_length_bytes: int = 1024,
|
||||||
|
) -> None:
|
||||||
self.root = Path(root)
|
self.root = Path(root)
|
||||||
self.root.mkdir(parents=True, exist_ok=True)
|
self.root.mkdir(parents=True, exist_ok=True)
|
||||||
self._ensure_system_roots()
|
self._ensure_system_roots()
|
||||||
self._object_cache: OrderedDict[str, tuple[Dict[str, ObjectMeta], float]] = OrderedDict()
|
self._object_cache: OrderedDict[str, tuple[Dict[str, ObjectMeta], float, float]] = OrderedDict()
|
||||||
self._cache_lock = threading.Lock()
|
self._cache_lock = threading.Lock()
|
||||||
self._bucket_locks: Dict[str, threading.Lock] = {}
|
self._bucket_locks: Dict[str, threading.Lock] = {}
|
||||||
self._cache_version: Dict[str, int] = {}
|
self._cache_version: Dict[str, int] = {}
|
||||||
self._bucket_config_cache: Dict[str, tuple[dict[str, Any], float]] = {}
|
self._bucket_config_cache: Dict[str, tuple[dict[str, Any], float]] = {}
|
||||||
self._bucket_config_cache_ttl = 30.0
|
self._bucket_config_cache_ttl = bucket_config_cache_ttl
|
||||||
self._cache_ttl = cache_ttl
|
self._cache_ttl = cache_ttl
|
||||||
|
self._object_cache_max_size = object_cache_max_size
|
||||||
|
self._object_key_max_length_bytes = object_key_max_length_bytes
|
||||||
|
self._sorted_key_cache: Dict[str, tuple[list[str], int]] = {}
|
||||||
|
self._meta_index_locks: Dict[str, threading.Lock] = {}
|
||||||
|
self._cleanup_executor = ThreadPoolExecutor(max_workers=1, thread_name_prefix="ParentCleanup")
|
||||||
|
|
||||||
def _get_bucket_lock(self, bucket_id: str) -> threading.Lock:
|
def _get_bucket_lock(self, bucket_id: str) -> threading.Lock:
|
||||||
"""Get or create a lock for a specific bucket. Reduces global lock contention."""
|
"""Get or create a lock for a specific bucket. Reduces global lock contention."""
|
||||||
@@ -208,10 +247,15 @@ class ObjectStorage:
|
|||||||
raise BucketNotFoundError("Bucket does not exist")
|
raise BucketNotFoundError("Bucket does not exist")
|
||||||
|
|
||||||
cache_path = self._system_bucket_root(bucket_name) / "stats.json"
|
cache_path = self._system_bucket_root(bucket_name) / "stats.json"
|
||||||
|
cached_stats = None
|
||||||
|
cache_fresh = False
|
||||||
|
|
||||||
if cache_path.exists():
|
if cache_path.exists():
|
||||||
try:
|
try:
|
||||||
if time.time() - cache_path.stat().st_mtime < cache_ttl:
|
cache_fresh = time.time() - cache_path.stat().st_mtime < cache_ttl
|
||||||
return json.loads(cache_path.read_text(encoding="utf-8"))
|
cached_stats = json.loads(cache_path.read_text(encoding="utf-8"))
|
||||||
|
if cache_fresh:
|
||||||
|
return cached_stats
|
||||||
except (OSError, json.JSONDecodeError):
|
except (OSError, json.JSONDecodeError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@@ -220,6 +264,7 @@ class ObjectStorage:
|
|||||||
version_count = 0
|
version_count = 0
|
||||||
version_bytes = 0
|
version_bytes = 0
|
||||||
|
|
||||||
|
try:
|
||||||
for path in bucket_path.rglob("*"):
|
for path in bucket_path.rglob("*"):
|
||||||
if path.is_file():
|
if path.is_file():
|
||||||
rel = path.relative_to(bucket_path)
|
rel = path.relative_to(bucket_path)
|
||||||
@@ -238,6 +283,14 @@ class ObjectStorage:
|
|||||||
stat = path.stat()
|
stat = path.stat()
|
||||||
version_count += 1
|
version_count += 1
|
||||||
version_bytes += stat.st_size
|
version_bytes += stat.st_size
|
||||||
|
except OSError:
|
||||||
|
if cached_stats is not None:
|
||||||
|
return cached_stats
|
||||||
|
raise
|
||||||
|
|
||||||
|
existing_serial = 0
|
||||||
|
if cached_stats is not None:
|
||||||
|
existing_serial = cached_stats.get("_cache_serial", 0)
|
||||||
|
|
||||||
stats = {
|
stats = {
|
||||||
"objects": object_count,
|
"objects": object_count,
|
||||||
@@ -246,6 +299,7 @@ class ObjectStorage:
|
|||||||
"version_bytes": version_bytes,
|
"version_bytes": version_bytes,
|
||||||
"total_objects": object_count + version_count,
|
"total_objects": object_count + version_count,
|
||||||
"total_bytes": total_bytes + version_bytes,
|
"total_bytes": total_bytes + version_bytes,
|
||||||
|
"_cache_serial": existing_serial,
|
||||||
}
|
}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -264,6 +318,39 @@ class ObjectStorage:
|
|||||||
except OSError:
|
except OSError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def _update_bucket_stats_cache(
|
||||||
|
self,
|
||||||
|
bucket_id: str,
|
||||||
|
*,
|
||||||
|
bytes_delta: int = 0,
|
||||||
|
objects_delta: int = 0,
|
||||||
|
version_bytes_delta: int = 0,
|
||||||
|
version_count_delta: int = 0,
|
||||||
|
) -> None:
|
||||||
|
"""Incrementally update cached bucket statistics instead of invalidating.
|
||||||
|
|
||||||
|
This avoids expensive full directory scans on every PUT/DELETE by
|
||||||
|
adjusting the cached values directly. Also signals cross-process cache
|
||||||
|
invalidation by incrementing _cache_serial.
|
||||||
|
"""
|
||||||
|
cache_path = self._system_bucket_root(bucket_id) / "stats.json"
|
||||||
|
try:
|
||||||
|
cache_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
if cache_path.exists():
|
||||||
|
data = json.loads(cache_path.read_text(encoding="utf-8"))
|
||||||
|
else:
|
||||||
|
data = {"objects": 0, "bytes": 0, "version_count": 0, "version_bytes": 0, "total_objects": 0, "total_bytes": 0, "_cache_serial": 0}
|
||||||
|
data["objects"] = max(0, data.get("objects", 0) + objects_delta)
|
||||||
|
data["bytes"] = max(0, data.get("bytes", 0) + bytes_delta)
|
||||||
|
data["version_count"] = max(0, data.get("version_count", 0) + version_count_delta)
|
||||||
|
data["version_bytes"] = max(0, data.get("version_bytes", 0) + version_bytes_delta)
|
||||||
|
data["total_objects"] = max(0, data.get("total_objects", 0) + objects_delta + version_count_delta)
|
||||||
|
data["total_bytes"] = max(0, data.get("total_bytes", 0) + bytes_delta + version_bytes_delta)
|
||||||
|
data["_cache_serial"] = data.get("_cache_serial", 0) + 1
|
||||||
|
cache_path.write_text(json.dumps(data), encoding="utf-8")
|
||||||
|
except (OSError, json.JSONDecodeError):
|
||||||
|
pass
|
||||||
|
|
||||||
def delete_bucket(self, bucket_name: str) -> None:
|
def delete_bucket(self, bucket_name: str) -> None:
|
||||||
bucket_path = self._bucket_path(bucket_name)
|
bucket_path = self._bucket_path(bucket_name)
|
||||||
if not bucket_path.exists():
|
if not bucket_path.exists():
|
||||||
@@ -298,6 +385,8 @@ class ObjectStorage:
|
|||||||
Returns:
|
Returns:
|
||||||
ListObjectsResult with objects, truncation status, and continuation token
|
ListObjectsResult with objects, truncation status, and continuation token
|
||||||
"""
|
"""
|
||||||
|
import bisect
|
||||||
|
|
||||||
bucket_path = self._bucket_path(bucket_name)
|
bucket_path = self._bucket_path(bucket_name)
|
||||||
if not bucket_path.exists():
|
if not bucket_path.exists():
|
||||||
raise BucketNotFoundError("Bucket does not exist")
|
raise BucketNotFoundError("Bucket does not exist")
|
||||||
@@ -305,16 +394,26 @@ class ObjectStorage:
|
|||||||
|
|
||||||
object_cache = self._get_object_cache(bucket_id, bucket_path)
|
object_cache = self._get_object_cache(bucket_id, bucket_path)
|
||||||
|
|
||||||
|
cache_version = self._cache_version.get(bucket_id, 0)
|
||||||
|
cached_entry = self._sorted_key_cache.get(bucket_id)
|
||||||
|
if cached_entry and cached_entry[1] == cache_version:
|
||||||
|
all_keys = cached_entry[0]
|
||||||
|
else:
|
||||||
all_keys = sorted(object_cache.keys())
|
all_keys = sorted(object_cache.keys())
|
||||||
|
self._sorted_key_cache[bucket_id] = (all_keys, cache_version)
|
||||||
|
|
||||||
if prefix:
|
if prefix:
|
||||||
all_keys = [k for k in all_keys if k.startswith(prefix)]
|
lo = bisect.bisect_left(all_keys, prefix)
|
||||||
|
hi = len(all_keys)
|
||||||
|
for i in range(lo, len(all_keys)):
|
||||||
|
if not all_keys[i].startswith(prefix):
|
||||||
|
hi = i
|
||||||
|
break
|
||||||
|
all_keys = all_keys[lo:hi]
|
||||||
|
|
||||||
total_count = len(all_keys)
|
total_count = len(all_keys)
|
||||||
start_index = 0
|
start_index = 0
|
||||||
if continuation_token:
|
if continuation_token:
|
||||||
try:
|
|
||||||
import bisect
|
|
||||||
start_index = bisect.bisect_right(all_keys, continuation_token)
|
start_index = bisect.bisect_right(all_keys, continuation_token)
|
||||||
if start_index >= total_count:
|
if start_index >= total_count:
|
||||||
return ListObjectsResult(
|
return ListObjectsResult(
|
||||||
@@ -323,8 +422,6 @@ class ObjectStorage:
|
|||||||
next_continuation_token=None,
|
next_continuation_token=None,
|
||||||
total_count=total_count,
|
total_count=total_count,
|
||||||
)
|
)
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
end_index = start_index + max_keys
|
end_index = start_index + max_keys
|
||||||
keys_slice = all_keys[start_index:end_index]
|
keys_slice = all_keys[start_index:end_index]
|
||||||
@@ -364,14 +461,16 @@ class ObjectStorage:
|
|||||||
raise BucketNotFoundError("Bucket does not exist")
|
raise BucketNotFoundError("Bucket does not exist")
|
||||||
bucket_id = bucket_path.name
|
bucket_id = bucket_path.name
|
||||||
|
|
||||||
safe_key = self._sanitize_object_key(object_key)
|
safe_key = self._sanitize_object_key(object_key, self._object_key_max_length_bytes)
|
||||||
destination = bucket_path / safe_key
|
destination = bucket_path / safe_key
|
||||||
destination.parent.mkdir(parents=True, exist_ok=True)
|
destination.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
is_overwrite = destination.exists()
|
is_overwrite = destination.exists()
|
||||||
existing_size = destination.stat().st_size if is_overwrite else 0
|
existing_size = destination.stat().st_size if is_overwrite else 0
|
||||||
|
|
||||||
|
archived_version_size = 0
|
||||||
if self._is_versioning_enabled(bucket_path) and is_overwrite:
|
if self._is_versioning_enabled(bucket_path) and is_overwrite:
|
||||||
|
archived_version_size = existing_size
|
||||||
self._archive_current_version(bucket_id, safe_key, reason="overwrite")
|
self._archive_current_version(bucket_id, safe_key, reason="overwrite")
|
||||||
|
|
||||||
tmp_dir = self._system_root_path() / self.SYSTEM_TMP_DIR
|
tmp_dir = self._system_root_path() / self.SYSTEM_TMP_DIR
|
||||||
@@ -384,11 +483,10 @@ class ObjectStorage:
|
|||||||
shutil.copyfileobj(_HashingReader(stream, checksum), target)
|
shutil.copyfileobj(_HashingReader(stream, checksum), target)
|
||||||
|
|
||||||
new_size = tmp_path.stat().st_size
|
new_size = tmp_path.stat().st_size
|
||||||
|
|
||||||
if enforce_quota:
|
|
||||||
size_delta = new_size - existing_size
|
size_delta = new_size - existing_size
|
||||||
object_delta = 0 if is_overwrite else 1
|
object_delta = 0 if is_overwrite else 1
|
||||||
|
|
||||||
|
if enforce_quota:
|
||||||
quota_check = self.check_quota(
|
quota_check = self.check_quota(
|
||||||
bucket_name,
|
bucket_name,
|
||||||
additional_bytes=max(0, size_delta),
|
additional_bytes=max(0, size_delta),
|
||||||
@@ -416,7 +514,13 @@ class ObjectStorage:
|
|||||||
combined_meta = {**internal_meta, **(metadata or {})}
|
combined_meta = {**internal_meta, **(metadata or {})}
|
||||||
self._write_metadata(bucket_id, safe_key, combined_meta)
|
self._write_metadata(bucket_id, safe_key, combined_meta)
|
||||||
|
|
||||||
self._invalidate_bucket_stats_cache(bucket_id)
|
self._update_bucket_stats_cache(
|
||||||
|
bucket_id,
|
||||||
|
bytes_delta=size_delta,
|
||||||
|
objects_delta=object_delta,
|
||||||
|
version_bytes_delta=archived_version_size,
|
||||||
|
version_count_delta=1 if archived_version_size > 0 else 0,
|
||||||
|
)
|
||||||
|
|
||||||
obj_meta = ObjectMeta(
|
obj_meta = ObjectMeta(
|
||||||
key=safe_key.as_posix(),
|
key=safe_key.as_posix(),
|
||||||
@@ -431,7 +535,7 @@ class ObjectStorage:
|
|||||||
|
|
||||||
def get_object_path(self, bucket_name: str, object_key: str) -> Path:
|
def get_object_path(self, bucket_name: str, object_key: str) -> Path:
|
||||||
path = self._object_path(bucket_name, object_key)
|
path = self._object_path(bucket_name, object_key)
|
||||||
if not path.exists():
|
if not path.is_file():
|
||||||
raise ObjectNotFoundError("Object not found")
|
raise ObjectNotFoundError("Object not found")
|
||||||
return path
|
return path
|
||||||
|
|
||||||
@@ -439,15 +543,18 @@ class ObjectStorage:
|
|||||||
bucket_path = self._bucket_path(bucket_name)
|
bucket_path = self._bucket_path(bucket_name)
|
||||||
if not bucket_path.exists():
|
if not bucket_path.exists():
|
||||||
return {}
|
return {}
|
||||||
safe_key = self._sanitize_object_key(object_key)
|
safe_key = self._sanitize_object_key(object_key, self._object_key_max_length_bytes)
|
||||||
return self._read_metadata(bucket_path.name, safe_key) or {}
|
return self._read_metadata(bucket_path.name, safe_key) or {}
|
||||||
|
|
||||||
def _cleanup_empty_parents(self, path: Path, stop_at: Path) -> None:
|
def _cleanup_empty_parents(self, path: Path, stop_at: Path) -> None:
|
||||||
"""Remove empty parent directories up to (but not including) stop_at.
|
"""Remove empty parent directories in a background thread.
|
||||||
|
|
||||||
On Windows/OneDrive, directories may be locked briefly after file deletion.
|
On Windows/OneDrive, directories may be locked briefly after file deletion.
|
||||||
This method retries with a small delay to handle that case.
|
Running this in the background avoids blocking the request thread with retries.
|
||||||
"""
|
"""
|
||||||
|
self._cleanup_executor.submit(self._do_cleanup_empty_parents, path, stop_at)
|
||||||
|
|
||||||
|
def _do_cleanup_empty_parents(self, path: Path, stop_at: Path) -> None:
|
||||||
for parent in path.parents:
|
for parent in path.parents:
|
||||||
if parent == stop_at:
|
if parent == stop_at:
|
||||||
break
|
break
|
||||||
@@ -466,15 +573,24 @@ class ObjectStorage:
|
|||||||
path = self._object_path(bucket_name, object_key)
|
path = self._object_path(bucket_name, object_key)
|
||||||
if not path.exists():
|
if not path.exists():
|
||||||
return
|
return
|
||||||
|
deleted_size = path.stat().st_size
|
||||||
safe_key = path.relative_to(bucket_path)
|
safe_key = path.relative_to(bucket_path)
|
||||||
bucket_id = bucket_path.name
|
bucket_id = bucket_path.name
|
||||||
|
archived_version_size = 0
|
||||||
if self._is_versioning_enabled(bucket_path):
|
if self._is_versioning_enabled(bucket_path):
|
||||||
|
archived_version_size = deleted_size
|
||||||
self._archive_current_version(bucket_id, safe_key, reason="delete")
|
self._archive_current_version(bucket_id, safe_key, reason="delete")
|
||||||
rel = path.relative_to(bucket_path)
|
rel = path.relative_to(bucket_path)
|
||||||
self._safe_unlink(path)
|
self._safe_unlink(path)
|
||||||
self._delete_metadata(bucket_id, rel)
|
self._delete_metadata(bucket_id, rel)
|
||||||
|
|
||||||
self._invalidate_bucket_stats_cache(bucket_id)
|
self._update_bucket_stats_cache(
|
||||||
|
bucket_id,
|
||||||
|
bytes_delta=-deleted_size,
|
||||||
|
objects_delta=-1,
|
||||||
|
version_bytes_delta=archived_version_size,
|
||||||
|
version_count_delta=1 if archived_version_size > 0 else 0,
|
||||||
|
)
|
||||||
self._update_object_cache_entry(bucket_id, safe_key.as_posix(), None)
|
self._update_object_cache_entry(bucket_id, safe_key.as_posix(), None)
|
||||||
self._cleanup_empty_parents(path, bucket_path)
|
self._cleanup_empty_parents(path, bucket_path)
|
||||||
|
|
||||||
@@ -487,7 +603,7 @@ class ObjectStorage:
|
|||||||
self._safe_unlink(target)
|
self._safe_unlink(target)
|
||||||
self._delete_metadata(bucket_id, rel)
|
self._delete_metadata(bucket_id, rel)
|
||||||
else:
|
else:
|
||||||
rel = self._sanitize_object_key(object_key)
|
rel = self._sanitize_object_key(object_key, self._object_key_max_length_bytes)
|
||||||
self._delete_metadata(bucket_id, rel)
|
self._delete_metadata(bucket_id, rel)
|
||||||
version_dir = self._version_dir(bucket_id, rel)
|
version_dir = self._version_dir(bucket_id, rel)
|
||||||
if version_dir.exists():
|
if version_dir.exists():
|
||||||
@@ -572,10 +688,19 @@ class ObjectStorage:
|
|||||||
return lifecycle if isinstance(lifecycle, list) else None
|
return lifecycle if isinstance(lifecycle, list) else None
|
||||||
|
|
||||||
def set_bucket_lifecycle(self, bucket_name: str, rules: Optional[List[Dict[str, Any]]]) -> None:
|
def set_bucket_lifecycle(self, bucket_name: str, rules: Optional[List[Dict[str, Any]]]) -> None:
|
||||||
"""Set lifecycle configuration for bucket."""
|
|
||||||
bucket_path = self._require_bucket_path(bucket_name)
|
bucket_path = self._require_bucket_path(bucket_name)
|
||||||
self._set_bucket_config_entry(bucket_path.name, "lifecycle", rules)
|
self._set_bucket_config_entry(bucket_path.name, "lifecycle", rules)
|
||||||
|
|
||||||
|
def get_bucket_website(self, bucket_name: str) -> Optional[Dict[str, Any]]:
|
||||||
|
bucket_path = self._require_bucket_path(bucket_name)
|
||||||
|
config = self._read_bucket_config(bucket_path.name)
|
||||||
|
website = config.get("website")
|
||||||
|
return website if isinstance(website, dict) else None
|
||||||
|
|
||||||
|
def set_bucket_website(self, bucket_name: str, website_config: Optional[Dict[str, Any]]) -> None:
|
||||||
|
bucket_path = self._require_bucket_path(bucket_name)
|
||||||
|
self._set_bucket_config_entry(bucket_path.name, "website", website_config)
|
||||||
|
|
||||||
def get_bucket_quota(self, bucket_name: str) -> Dict[str, Any]:
|
def get_bucket_quota(self, bucket_name: str) -> Dict[str, Any]:
|
||||||
"""Get quota configuration for bucket.
|
"""Get quota configuration for bucket.
|
||||||
|
|
||||||
@@ -696,11 +821,15 @@ class ObjectStorage:
|
|||||||
bucket_path = self._bucket_path(bucket_name)
|
bucket_path = self._bucket_path(bucket_name)
|
||||||
if not bucket_path.exists():
|
if not bucket_path.exists():
|
||||||
raise BucketNotFoundError("Bucket does not exist")
|
raise BucketNotFoundError("Bucket does not exist")
|
||||||
safe_key = self._sanitize_object_key(object_key)
|
safe_key = self._sanitize_object_key(object_key, self._object_key_max_length_bytes)
|
||||||
object_path = bucket_path / safe_key
|
object_path = bucket_path / safe_key
|
||||||
if not object_path.exists():
|
if not object_path.exists():
|
||||||
raise ObjectNotFoundError("Object does not exist")
|
raise ObjectNotFoundError("Object does not exist")
|
||||||
|
|
||||||
|
entry = self._read_index_entry(bucket_path.name, safe_key)
|
||||||
|
if entry is not None:
|
||||||
|
tags = entry.get("tags")
|
||||||
|
return tags if isinstance(tags, list) else []
|
||||||
for meta_file in (self._metadata_file(bucket_path.name, safe_key), self._legacy_metadata_file(bucket_path.name, safe_key)):
|
for meta_file in (self._metadata_file(bucket_path.name, safe_key), self._legacy_metadata_file(bucket_path.name, safe_key)):
|
||||||
if not meta_file.exists():
|
if not meta_file.exists():
|
||||||
continue
|
continue
|
||||||
@@ -719,35 +848,36 @@ class ObjectStorage:
|
|||||||
bucket_path = self._bucket_path(bucket_name)
|
bucket_path = self._bucket_path(bucket_name)
|
||||||
if not bucket_path.exists():
|
if not bucket_path.exists():
|
||||||
raise BucketNotFoundError("Bucket does not exist")
|
raise BucketNotFoundError("Bucket does not exist")
|
||||||
safe_key = self._sanitize_object_key(object_key)
|
safe_key = self._sanitize_object_key(object_key, self._object_key_max_length_bytes)
|
||||||
object_path = bucket_path / safe_key
|
object_path = bucket_path / safe_key
|
||||||
if not object_path.exists():
|
if not object_path.exists():
|
||||||
raise ObjectNotFoundError("Object does not exist")
|
raise ObjectNotFoundError("Object does not exist")
|
||||||
|
|
||||||
meta_file = self._metadata_file(bucket_path.name, safe_key)
|
bucket_id = bucket_path.name
|
||||||
|
existing_entry = self._read_index_entry(bucket_id, safe_key) or {}
|
||||||
existing_payload: Dict[str, Any] = {}
|
if not existing_entry:
|
||||||
|
meta_file = self._metadata_file(bucket_id, safe_key)
|
||||||
if meta_file.exists():
|
if meta_file.exists():
|
||||||
try:
|
try:
|
||||||
existing_payload = json.loads(meta_file.read_text(encoding="utf-8"))
|
existing_entry = json.loads(meta_file.read_text(encoding="utf-8"))
|
||||||
except (OSError, json.JSONDecodeError):
|
except (OSError, json.JSONDecodeError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if tags:
|
if tags:
|
||||||
existing_payload["tags"] = tags
|
existing_entry["tags"] = tags
|
||||||
else:
|
else:
|
||||||
existing_payload.pop("tags", None)
|
existing_entry.pop("tags", None)
|
||||||
|
|
||||||
if existing_payload.get("metadata") or existing_payload.get("tags"):
|
if existing_entry.get("metadata") or existing_entry.get("tags"):
|
||||||
meta_file.parent.mkdir(parents=True, exist_ok=True)
|
self._write_index_entry(bucket_id, safe_key, existing_entry)
|
||||||
meta_file.write_text(json.dumps(existing_payload), encoding="utf-8")
|
else:
|
||||||
elif meta_file.exists():
|
self._delete_index_entry(bucket_id, safe_key)
|
||||||
meta_file.unlink()
|
old_meta = self._metadata_file(bucket_id, safe_key)
|
||||||
parent = meta_file.parent
|
try:
|
||||||
meta_root = self._bucket_meta_root(bucket_path.name)
|
if old_meta.exists():
|
||||||
while parent != meta_root and parent.exists() and not any(parent.iterdir()):
|
old_meta.unlink()
|
||||||
parent.rmdir()
|
except OSError:
|
||||||
parent = parent.parent
|
pass
|
||||||
|
|
||||||
def delete_object_tags(self, bucket_name: str, object_key: str) -> None:
|
def delete_object_tags(self, bucket_name: str, object_key: str) -> None:
|
||||||
"""Delete all tags from an object."""
|
"""Delete all tags from an object."""
|
||||||
@@ -758,7 +888,7 @@ class ObjectStorage:
|
|||||||
if not bucket_path.exists():
|
if not bucket_path.exists():
|
||||||
raise BucketNotFoundError("Bucket does not exist")
|
raise BucketNotFoundError("Bucket does not exist")
|
||||||
bucket_id = bucket_path.name
|
bucket_id = bucket_path.name
|
||||||
safe_key = self._sanitize_object_key(object_key)
|
safe_key = self._sanitize_object_key(object_key, self._object_key_max_length_bytes)
|
||||||
version_dir = self._version_dir(bucket_id, safe_key)
|
version_dir = self._version_dir(bucket_id, safe_key)
|
||||||
if not version_dir.exists():
|
if not version_dir.exists():
|
||||||
version_dir = self._legacy_version_dir(bucket_id, safe_key)
|
version_dir = self._legacy_version_dir(bucket_id, safe_key)
|
||||||
@@ -782,7 +912,7 @@ class ObjectStorage:
|
|||||||
if not bucket_path.exists():
|
if not bucket_path.exists():
|
||||||
raise BucketNotFoundError("Bucket does not exist")
|
raise BucketNotFoundError("Bucket does not exist")
|
||||||
bucket_id = bucket_path.name
|
bucket_id = bucket_path.name
|
||||||
safe_key = self._sanitize_object_key(object_key)
|
safe_key = self._sanitize_object_key(object_key, self._object_key_max_length_bytes)
|
||||||
version_dir = self._version_dir(bucket_id, safe_key)
|
version_dir = self._version_dir(bucket_id, safe_key)
|
||||||
data_path = version_dir / f"{version_id}.bin"
|
data_path = version_dir / f"{version_id}.bin"
|
||||||
meta_path = version_dir / f"{version_id}.json"
|
meta_path = version_dir / f"{version_id}.json"
|
||||||
@@ -796,7 +926,12 @@ class ObjectStorage:
|
|||||||
if not isinstance(metadata, dict):
|
if not isinstance(metadata, dict):
|
||||||
metadata = {}
|
metadata = {}
|
||||||
destination = bucket_path / safe_key
|
destination = bucket_path / safe_key
|
||||||
if self._is_versioning_enabled(bucket_path) and destination.exists():
|
restored_size = data_path.stat().st_size
|
||||||
|
is_overwrite = destination.exists()
|
||||||
|
existing_size = destination.stat().st_size if is_overwrite else 0
|
||||||
|
archived_version_size = 0
|
||||||
|
if self._is_versioning_enabled(bucket_path) and is_overwrite:
|
||||||
|
archived_version_size = existing_size
|
||||||
self._archive_current_version(bucket_id, safe_key, reason="restore-overwrite")
|
self._archive_current_version(bucket_id, safe_key, reason="restore-overwrite")
|
||||||
destination.parent.mkdir(parents=True, exist_ok=True)
|
destination.parent.mkdir(parents=True, exist_ok=True)
|
||||||
shutil.copy2(data_path, destination)
|
shutil.copy2(data_path, destination)
|
||||||
@@ -805,7 +940,13 @@ class ObjectStorage:
|
|||||||
else:
|
else:
|
||||||
self._delete_metadata(bucket_id, safe_key)
|
self._delete_metadata(bucket_id, safe_key)
|
||||||
stat = destination.stat()
|
stat = destination.stat()
|
||||||
self._invalidate_bucket_stats_cache(bucket_id)
|
self._update_bucket_stats_cache(
|
||||||
|
bucket_id,
|
||||||
|
bytes_delta=restored_size - existing_size,
|
||||||
|
objects_delta=0 if is_overwrite else 1,
|
||||||
|
version_bytes_delta=archived_version_size,
|
||||||
|
version_count_delta=1 if archived_version_size > 0 else 0,
|
||||||
|
)
|
||||||
return ObjectMeta(
|
return ObjectMeta(
|
||||||
key=safe_key.as_posix(),
|
key=safe_key.as_posix(),
|
||||||
size=stat.st_size,
|
size=stat.st_size,
|
||||||
@@ -819,7 +960,7 @@ class ObjectStorage:
|
|||||||
if not bucket_path.exists():
|
if not bucket_path.exists():
|
||||||
raise BucketNotFoundError("Bucket does not exist")
|
raise BucketNotFoundError("Bucket does not exist")
|
||||||
bucket_id = bucket_path.name
|
bucket_id = bucket_path.name
|
||||||
safe_key = self._sanitize_object_key(object_key)
|
safe_key = self._sanitize_object_key(object_key, self._object_key_max_length_bytes)
|
||||||
version_dir = self._version_dir(bucket_id, safe_key)
|
version_dir = self._version_dir(bucket_id, safe_key)
|
||||||
data_path = version_dir / f"{version_id}.bin"
|
data_path = version_dir / f"{version_id}.bin"
|
||||||
meta_path = version_dir / f"{version_id}.json"
|
meta_path = version_dir / f"{version_id}.json"
|
||||||
@@ -829,6 +970,7 @@ class ObjectStorage:
|
|||||||
meta_path = legacy_version_dir / f"{version_id}.json"
|
meta_path = legacy_version_dir / f"{version_id}.json"
|
||||||
if not data_path.exists() and not meta_path.exists():
|
if not data_path.exists() and not meta_path.exists():
|
||||||
raise StorageError(f"Version {version_id} not found")
|
raise StorageError(f"Version {version_id} not found")
|
||||||
|
deleted_version_size = data_path.stat().st_size if data_path.exists() else 0
|
||||||
if data_path.exists():
|
if data_path.exists():
|
||||||
data_path.unlink()
|
data_path.unlink()
|
||||||
if meta_path.exists():
|
if meta_path.exists():
|
||||||
@@ -836,6 +978,12 @@ class ObjectStorage:
|
|||||||
parent = data_path.parent
|
parent = data_path.parent
|
||||||
if parent.exists() and not any(parent.iterdir()):
|
if parent.exists() and not any(parent.iterdir()):
|
||||||
parent.rmdir()
|
parent.rmdir()
|
||||||
|
if deleted_version_size > 0:
|
||||||
|
self._update_bucket_stats_cache(
|
||||||
|
bucket_id,
|
||||||
|
version_bytes_delta=-deleted_version_size,
|
||||||
|
version_count_delta=-1,
|
||||||
|
)
|
||||||
|
|
||||||
def list_orphaned_objects(self, bucket_name: str) -> List[Dict[str, Any]]:
|
def list_orphaned_objects(self, bucket_name: str) -> List[Dict[str, Any]]:
|
||||||
bucket_path = self._bucket_path(bucket_name)
|
bucket_path = self._bucket_path(bucket_name)
|
||||||
@@ -910,7 +1058,7 @@ class ObjectStorage:
|
|||||||
if not bucket_path.exists():
|
if not bucket_path.exists():
|
||||||
raise BucketNotFoundError("Bucket does not exist")
|
raise BucketNotFoundError("Bucket does not exist")
|
||||||
bucket_id = bucket_path.name
|
bucket_id = bucket_path.name
|
||||||
safe_key = self._sanitize_object_key(object_key)
|
safe_key = self._sanitize_object_key(object_key, self._object_key_max_length_bytes)
|
||||||
upload_id = uuid.uuid4().hex
|
upload_id = uuid.uuid4().hex
|
||||||
upload_root = self._multipart_dir(bucket_id, upload_id)
|
upload_root = self._multipart_dir(bucket_id, upload_id)
|
||||||
upload_root.mkdir(parents=True, exist_ok=False)
|
upload_root.mkdir(parents=True, exist_ok=False)
|
||||||
@@ -995,6 +1143,102 @@ class ObjectStorage:
|
|||||||
|
|
||||||
return record["etag"]
|
return record["etag"]
|
||||||
|
|
||||||
|
def upload_part_copy(
|
||||||
|
self,
|
||||||
|
bucket_name: str,
|
||||||
|
upload_id: str,
|
||||||
|
part_number: int,
|
||||||
|
source_bucket: str,
|
||||||
|
source_key: str,
|
||||||
|
start_byte: Optional[int] = None,
|
||||||
|
end_byte: Optional[int] = None,
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""Copy a range from an existing object as a multipart part."""
|
||||||
|
if part_number < 1 or part_number > 10000:
|
||||||
|
raise StorageError("part_number must be between 1 and 10000")
|
||||||
|
|
||||||
|
source_path = self.get_object_path(source_bucket, source_key)
|
||||||
|
source_size = source_path.stat().st_size
|
||||||
|
|
||||||
|
if start_byte is None:
|
||||||
|
start_byte = 0
|
||||||
|
if end_byte is None:
|
||||||
|
end_byte = source_size - 1
|
||||||
|
|
||||||
|
if start_byte < 0 or end_byte >= source_size or start_byte > end_byte:
|
||||||
|
raise StorageError("Invalid byte range")
|
||||||
|
|
||||||
|
bucket_path = self._bucket_path(bucket_name)
|
||||||
|
upload_root = self._multipart_dir(bucket_path.name, upload_id)
|
||||||
|
if not upload_root.exists():
|
||||||
|
upload_root = self._legacy_multipart_dir(bucket_path.name, upload_id)
|
||||||
|
if not upload_root.exists():
|
||||||
|
raise StorageError("Multipart upload not found")
|
||||||
|
|
||||||
|
checksum = hashlib.md5()
|
||||||
|
part_filename = f"part-{part_number:05d}.part"
|
||||||
|
part_path = upload_root / part_filename
|
||||||
|
temp_path = upload_root / f".{part_filename}.tmp"
|
||||||
|
|
||||||
|
try:
|
||||||
|
with source_path.open("rb") as src:
|
||||||
|
src.seek(start_byte)
|
||||||
|
bytes_to_copy = end_byte - start_byte + 1
|
||||||
|
with temp_path.open("wb") as target:
|
||||||
|
remaining = bytes_to_copy
|
||||||
|
while remaining > 0:
|
||||||
|
chunk_size = min(65536, remaining)
|
||||||
|
chunk = src.read(chunk_size)
|
||||||
|
if not chunk:
|
||||||
|
break
|
||||||
|
checksum.update(chunk)
|
||||||
|
target.write(chunk)
|
||||||
|
remaining -= len(chunk)
|
||||||
|
temp_path.replace(part_path)
|
||||||
|
except OSError:
|
||||||
|
try:
|
||||||
|
temp_path.unlink(missing_ok=True)
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
raise
|
||||||
|
|
||||||
|
record = {
|
||||||
|
"etag": checksum.hexdigest(),
|
||||||
|
"size": part_path.stat().st_size,
|
||||||
|
"filename": part_filename,
|
||||||
|
}
|
||||||
|
|
||||||
|
manifest_path = upload_root / self.MULTIPART_MANIFEST
|
||||||
|
lock_path = upload_root / ".manifest.lock"
|
||||||
|
|
||||||
|
max_retries = 3
|
||||||
|
for attempt in range(max_retries):
|
||||||
|
try:
|
||||||
|
with lock_path.open("w") as lock_file:
|
||||||
|
with _file_lock(lock_file):
|
||||||
|
try:
|
||||||
|
manifest = json.loads(manifest_path.read_text(encoding="utf-8"))
|
||||||
|
except (OSError, json.JSONDecodeError) as exc:
|
||||||
|
if attempt < max_retries - 1:
|
||||||
|
time.sleep(0.1 * (attempt + 1))
|
||||||
|
continue
|
||||||
|
raise StorageError("Multipart manifest unreadable") from exc
|
||||||
|
|
||||||
|
parts = manifest.setdefault("parts", {})
|
||||||
|
parts[str(part_number)] = record
|
||||||
|
manifest_path.write_text(json.dumps(manifest), encoding="utf-8")
|
||||||
|
break
|
||||||
|
except OSError as exc:
|
||||||
|
if attempt < max_retries - 1:
|
||||||
|
time.sleep(0.1 * (attempt + 1))
|
||||||
|
continue
|
||||||
|
raise StorageError(f"Failed to update multipart manifest: {exc}") from exc
|
||||||
|
|
||||||
|
return {
|
||||||
|
"etag": record["etag"],
|
||||||
|
"last_modified": datetime.fromtimestamp(part_path.stat().st_mtime, timezone.utc),
|
||||||
|
}
|
||||||
|
|
||||||
def complete_multipart_upload(
|
def complete_multipart_upload(
|
||||||
self,
|
self,
|
||||||
bucket_name: str,
|
bucket_name: str,
|
||||||
@@ -1034,16 +1278,16 @@ class ObjectStorage:
|
|||||||
total_size += record.get("size", 0)
|
total_size += record.get("size", 0)
|
||||||
validated.sort(key=lambda entry: entry[0])
|
validated.sort(key=lambda entry: entry[0])
|
||||||
|
|
||||||
safe_key = self._sanitize_object_key(manifest["object_key"])
|
safe_key = self._sanitize_object_key(manifest["object_key"], self._object_key_max_length_bytes)
|
||||||
destination = bucket_path / safe_key
|
destination = bucket_path / safe_key
|
||||||
|
|
||||||
is_overwrite = destination.exists()
|
is_overwrite = destination.exists()
|
||||||
existing_size = destination.stat().st_size if is_overwrite else 0
|
existing_size = destination.stat().st_size if is_overwrite else 0
|
||||||
|
|
||||||
if enforce_quota:
|
|
||||||
size_delta = total_size - existing_size
|
size_delta = total_size - existing_size
|
||||||
object_delta = 0 if is_overwrite else 1
|
object_delta = 0 if is_overwrite else 1
|
||||||
|
versioning_enabled = self._is_versioning_enabled(bucket_path)
|
||||||
|
|
||||||
|
if enforce_quota:
|
||||||
quota_check = self.check_quota(
|
quota_check = self.check_quota(
|
||||||
bucket_name,
|
bucket_name,
|
||||||
additional_bytes=max(0, size_delta),
|
additional_bytes=max(0, size_delta),
|
||||||
@@ -1059,12 +1303,12 @@ class ObjectStorage:
|
|||||||
destination.parent.mkdir(parents=True, exist_ok=True)
|
destination.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
lock_file_path = self._system_bucket_root(bucket_id) / "locks" / f"{safe_key.as_posix().replace('/', '_')}.lock"
|
lock_file_path = self._system_bucket_root(bucket_id) / "locks" / f"{safe_key.as_posix().replace('/', '_')}.lock"
|
||||||
lock_file_path.parent.mkdir(parents=True, exist_ok=True)
|
|
||||||
|
|
||||||
|
archived_version_size = 0
|
||||||
try:
|
try:
|
||||||
with lock_file_path.open("w") as lock_file:
|
with _atomic_lock_file(lock_file_path):
|
||||||
with _file_lock(lock_file):
|
if versioning_enabled and destination.exists():
|
||||||
if self._is_versioning_enabled(bucket_path) and destination.exists():
|
archived_version_size = destination.stat().st_size
|
||||||
self._archive_current_version(bucket_id, safe_key, reason="overwrite")
|
self._archive_current_version(bucket_id, safe_key, reason="overwrite")
|
||||||
checksum = hashlib.md5()
|
checksum = hashlib.md5()
|
||||||
with destination.open("wb") as target:
|
with destination.open("wb") as target:
|
||||||
@@ -1079,18 +1323,18 @@ class ObjectStorage:
|
|||||||
break
|
break
|
||||||
checksum.update(data)
|
checksum.update(data)
|
||||||
target.write(data)
|
target.write(data)
|
||||||
|
|
||||||
except BlockingIOError:
|
except BlockingIOError:
|
||||||
raise StorageError("Another upload to this key is in progress")
|
raise StorageError("Another upload to this key is in progress")
|
||||||
finally:
|
|
||||||
try:
|
|
||||||
lock_file_path.unlink(missing_ok=True)
|
|
||||||
except OSError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
shutil.rmtree(upload_root, ignore_errors=True)
|
shutil.rmtree(upload_root, ignore_errors=True)
|
||||||
|
|
||||||
self._invalidate_bucket_stats_cache(bucket_id)
|
self._update_bucket_stats_cache(
|
||||||
|
bucket_id,
|
||||||
|
bytes_delta=size_delta,
|
||||||
|
objects_delta=object_delta,
|
||||||
|
version_bytes_delta=archived_version_size,
|
||||||
|
version_count_delta=1 if archived_version_size > 0 else 0,
|
||||||
|
)
|
||||||
|
|
||||||
stat = destination.stat()
|
stat = destination.stat()
|
||||||
etag = checksum.hexdigest()
|
etag = checksum.hexdigest()
|
||||||
@@ -1213,7 +1457,7 @@ class ObjectStorage:
|
|||||||
|
|
||||||
def _object_path(self, bucket_name: str, object_key: str) -> Path:
|
def _object_path(self, bucket_name: str, object_key: str) -> Path:
|
||||||
bucket_path = self._bucket_path(bucket_name)
|
bucket_path = self._bucket_path(bucket_name)
|
||||||
safe_key = self._sanitize_object_key(object_key)
|
safe_key = self._sanitize_object_key(object_key, self._object_key_max_length_bytes)
|
||||||
return bucket_path / safe_key
|
return bucket_path / safe_key
|
||||||
|
|
||||||
def _system_root_path(self) -> Path:
|
def _system_root_path(self) -> Path:
|
||||||
@@ -1300,7 +1544,7 @@ class ObjectStorage:
|
|||||||
if entry.is_dir(follow_symlinks=False):
|
if entry.is_dir(follow_symlinks=False):
|
||||||
if check_newer(entry.path):
|
if check_newer(entry.path):
|
||||||
return True
|
return True
|
||||||
elif entry.is_file(follow_symlinks=False) and entry.name.endswith('.meta.json'):
|
elif entry.is_file(follow_symlinks=False) and (entry.name.endswith('.meta.json') or entry.name == '_index.json'):
|
||||||
if entry.stat().st_mtime > index_mtime:
|
if entry.stat().st_mtime > index_mtime:
|
||||||
return True
|
return True
|
||||||
except OSError:
|
except OSError:
|
||||||
@@ -1314,6 +1558,7 @@ class ObjectStorage:
|
|||||||
meta_str = str(meta_root)
|
meta_str = str(meta_root)
|
||||||
meta_len = len(meta_str) + 1
|
meta_len = len(meta_str) + 1
|
||||||
meta_files: list[tuple[str, str]] = []
|
meta_files: list[tuple[str, str]] = []
|
||||||
|
index_files: list[str] = []
|
||||||
|
|
||||||
def collect_meta_files(dir_path: str) -> None:
|
def collect_meta_files(dir_path: str) -> None:
|
||||||
try:
|
try:
|
||||||
@@ -1321,7 +1566,10 @@ class ObjectStorage:
|
|||||||
for entry in it:
|
for entry in it:
|
||||||
if entry.is_dir(follow_symlinks=False):
|
if entry.is_dir(follow_symlinks=False):
|
||||||
collect_meta_files(entry.path)
|
collect_meta_files(entry.path)
|
||||||
elif entry.is_file(follow_symlinks=False) and entry.name.endswith('.meta.json'):
|
elif entry.is_file(follow_symlinks=False):
|
||||||
|
if entry.name == '_index.json':
|
||||||
|
index_files.append(entry.path)
|
||||||
|
elif entry.name.endswith('.meta.json'):
|
||||||
rel = entry.path[meta_len:]
|
rel = entry.path[meta_len:]
|
||||||
key = rel[:-10].replace(os.sep, '/')
|
key = rel[:-10].replace(os.sep, '/')
|
||||||
meta_files.append((key, entry.path))
|
meta_files.append((key, entry.path))
|
||||||
@@ -1330,6 +1578,30 @@ class ObjectStorage:
|
|||||||
|
|
||||||
collect_meta_files(meta_str)
|
collect_meta_files(meta_str)
|
||||||
|
|
||||||
|
meta_cache = {}
|
||||||
|
|
||||||
|
for idx_path in index_files:
|
||||||
|
try:
|
||||||
|
with open(idx_path, 'r', encoding='utf-8') as f:
|
||||||
|
idx_data = json.load(f)
|
||||||
|
rel_dir = idx_path[meta_len:]
|
||||||
|
rel_dir = rel_dir.replace(os.sep, '/')
|
||||||
|
if rel_dir.endswith('/_index.json'):
|
||||||
|
dir_prefix = rel_dir[:-len('/_index.json')]
|
||||||
|
else:
|
||||||
|
dir_prefix = ''
|
||||||
|
for entry_name, entry_data in idx_data.items():
|
||||||
|
if dir_prefix:
|
||||||
|
key = f"{dir_prefix}/{entry_name}"
|
||||||
|
else:
|
||||||
|
key = entry_name
|
||||||
|
meta = entry_data.get("metadata", {})
|
||||||
|
etag = meta.get("__etag__")
|
||||||
|
if etag:
|
||||||
|
meta_cache[key] = etag
|
||||||
|
except (OSError, json.JSONDecodeError):
|
||||||
|
pass
|
||||||
|
|
||||||
def read_meta_file(item: tuple[str, str]) -> tuple[str, str | None]:
|
def read_meta_file(item: tuple[str, str]) -> tuple[str, str | None]:
|
||||||
key, path = item
|
key, path = item
|
||||||
try:
|
try:
|
||||||
@@ -1347,13 +1619,15 @@ class ObjectStorage:
|
|||||||
except (OSError, UnicodeDecodeError):
|
except (OSError, UnicodeDecodeError):
|
||||||
return key, None
|
return key, None
|
||||||
|
|
||||||
if meta_files:
|
legacy_meta_files = [(k, p) for k, p in meta_files if k not in meta_cache]
|
||||||
meta_cache = {}
|
if legacy_meta_files:
|
||||||
with ThreadPoolExecutor(max_workers=min(64, len(meta_files))) as executor:
|
max_workers = min((os.cpu_count() or 4) * 2, len(legacy_meta_files), 16)
|
||||||
for key, etag in executor.map(read_meta_file, meta_files):
|
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
||||||
|
for key, etag in executor.map(read_meta_file, legacy_meta_files):
|
||||||
if etag:
|
if etag:
|
||||||
meta_cache[key] = etag
|
meta_cache[key] = etag
|
||||||
|
|
||||||
|
if meta_cache:
|
||||||
try:
|
try:
|
||||||
etag_index_path.parent.mkdir(parents=True, exist_ok=True)
|
etag_index_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
with open(etag_index_path, 'w', encoding='utf-8') as f:
|
with open(etag_index_path, 'w', encoding='utf-8') as f:
|
||||||
@@ -1402,38 +1676,46 @@ class ObjectStorage:
|
|||||||
|
|
||||||
Uses LRU eviction to prevent unbounded cache growth.
|
Uses LRU eviction to prevent unbounded cache growth.
|
||||||
Thread-safe with per-bucket locks to reduce contention.
|
Thread-safe with per-bucket locks to reduce contention.
|
||||||
|
Checks stats.json for cross-process cache invalidation.
|
||||||
"""
|
"""
|
||||||
now = time.time()
|
now = time.time()
|
||||||
|
current_stats_mtime = self._get_cache_marker_mtime(bucket_id)
|
||||||
|
|
||||||
with self._cache_lock:
|
with self._cache_lock:
|
||||||
cached = self._object_cache.get(bucket_id)
|
cached = self._object_cache.get(bucket_id)
|
||||||
if cached:
|
if cached:
|
||||||
objects, timestamp = cached
|
objects, timestamp, cached_stats_mtime = cached
|
||||||
if now - timestamp < self._cache_ttl:
|
if now - timestamp < self._cache_ttl and current_stats_mtime == cached_stats_mtime:
|
||||||
self._object_cache.move_to_end(bucket_id)
|
self._object_cache.move_to_end(bucket_id)
|
||||||
return objects
|
return objects
|
||||||
cache_version = self._cache_version.get(bucket_id, 0)
|
cache_version = self._cache_version.get(bucket_id, 0)
|
||||||
|
|
||||||
bucket_lock = self._get_bucket_lock(bucket_id)
|
bucket_lock = self._get_bucket_lock(bucket_id)
|
||||||
with bucket_lock:
|
with bucket_lock:
|
||||||
|
current_stats_mtime = self._get_cache_marker_mtime(bucket_id)
|
||||||
with self._cache_lock:
|
with self._cache_lock:
|
||||||
cached = self._object_cache.get(bucket_id)
|
cached = self._object_cache.get(bucket_id)
|
||||||
if cached:
|
if cached:
|
||||||
objects, timestamp = cached
|
objects, timestamp, cached_stats_mtime = cached
|
||||||
if now - timestamp < self._cache_ttl:
|
if now - timestamp < self._cache_ttl and current_stats_mtime == cached_stats_mtime:
|
||||||
self._object_cache.move_to_end(bucket_id)
|
self._object_cache.move_to_end(bucket_id)
|
||||||
return objects
|
return objects
|
||||||
|
|
||||||
objects = self._build_object_cache(bucket_path)
|
objects = self._build_object_cache(bucket_path)
|
||||||
|
new_stats_mtime = self._get_cache_marker_mtime(bucket_id)
|
||||||
|
|
||||||
with self._cache_lock:
|
with self._cache_lock:
|
||||||
current_version = self._cache_version.get(bucket_id, 0)
|
current_version = self._cache_version.get(bucket_id, 0)
|
||||||
if current_version != cache_version:
|
if current_version != cache_version:
|
||||||
objects = self._build_object_cache(bucket_path)
|
objects = self._build_object_cache(bucket_path)
|
||||||
while len(self._object_cache) >= self.OBJECT_CACHE_MAX_SIZE:
|
new_stats_mtime = self._get_cache_marker_mtime(bucket_id)
|
||||||
|
while len(self._object_cache) >= self._object_cache_max_size:
|
||||||
self._object_cache.popitem(last=False)
|
self._object_cache.popitem(last=False)
|
||||||
|
|
||||||
self._object_cache[bucket_id] = (objects, time.time())
|
self._object_cache[bucket_id] = (objects, time.time(), new_stats_mtime)
|
||||||
self._object_cache.move_to_end(bucket_id)
|
self._object_cache.move_to_end(bucket_id)
|
||||||
|
self._cache_version[bucket_id] = current_version + 1
|
||||||
|
self._sorted_key_cache.pop(bucket_id, None)
|
||||||
|
|
||||||
return objects
|
return objects
|
||||||
|
|
||||||
@@ -1441,6 +1723,7 @@ class ObjectStorage:
|
|||||||
"""Invalidate the object cache and etag index for a bucket.
|
"""Invalidate the object cache and etag index for a bucket.
|
||||||
|
|
||||||
Increments version counter to signal stale reads.
|
Increments version counter to signal stale reads.
|
||||||
|
Cross-process invalidation is handled by checking stats.json mtime.
|
||||||
"""
|
"""
|
||||||
with self._cache_lock:
|
with self._cache_lock:
|
||||||
self._object_cache.pop(bucket_id, None)
|
self._object_cache.pop(bucket_id, None)
|
||||||
@@ -1452,19 +1735,37 @@ class ObjectStorage:
|
|||||||
except OSError:
|
except OSError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def _get_cache_marker_mtime(self, bucket_id: str) -> float:
|
||||||
|
"""Get a cache marker combining serial and object count for cross-process invalidation.
|
||||||
|
|
||||||
|
Returns a combined value that changes if either _cache_serial or object count changes.
|
||||||
|
This handles cases where the serial was reset but object count differs.
|
||||||
|
"""
|
||||||
|
stats_path = self._system_bucket_root(bucket_id) / "stats.json"
|
||||||
|
try:
|
||||||
|
data = json.loads(stats_path.read_text(encoding="utf-8"))
|
||||||
|
serial = data.get("_cache_serial", 0)
|
||||||
|
count = data.get("objects", 0)
|
||||||
|
return float(serial * 1000000 + count)
|
||||||
|
except (OSError, json.JSONDecodeError):
|
||||||
|
return 0
|
||||||
|
|
||||||
def _update_object_cache_entry(self, bucket_id: str, key: str, meta: Optional[ObjectMeta]) -> None:
|
def _update_object_cache_entry(self, bucket_id: str, key: str, meta: Optional[ObjectMeta]) -> None:
|
||||||
"""Update a single entry in the object cache instead of invalidating the whole cache.
|
"""Update a single entry in the object cache instead of invalidating the whole cache.
|
||||||
|
|
||||||
This is a performance optimization - lazy update instead of full invalidation.
|
This is a performance optimization - lazy update instead of full invalidation.
|
||||||
|
Cross-process invalidation is handled by checking stats.json mtime.
|
||||||
"""
|
"""
|
||||||
with self._cache_lock:
|
with self._cache_lock:
|
||||||
cached = self._object_cache.get(bucket_id)
|
cached = self._object_cache.get(bucket_id)
|
||||||
if cached:
|
if cached:
|
||||||
objects, timestamp = cached
|
objects, timestamp, stats_mtime = cached
|
||||||
if meta is None:
|
if meta is None:
|
||||||
objects.pop(key, None)
|
objects.pop(key, None)
|
||||||
else:
|
else:
|
||||||
objects[key] = meta
|
objects[key] = meta
|
||||||
|
self._cache_version[bucket_id] = self._cache_version.get(bucket_id, 0) + 1
|
||||||
|
self._sorted_key_cache.pop(bucket_id, None)
|
||||||
|
|
||||||
def warm_cache(self, bucket_names: Optional[List[str]] = None) -> None:
|
def warm_cache(self, bucket_names: Optional[List[str]] = None) -> None:
|
||||||
"""Pre-warm the object cache for specified buckets or all buckets.
|
"""Pre-warm the object cache for specified buckets or all buckets.
|
||||||
@@ -1576,6 +1877,64 @@ class ObjectStorage:
|
|||||||
meta_rel = Path(key.as_posix() + ".meta.json")
|
meta_rel = Path(key.as_posix() + ".meta.json")
|
||||||
return meta_root / meta_rel
|
return meta_root / meta_rel
|
||||||
|
|
||||||
|
def _index_file_for_key(self, bucket_name: str, key: Path) -> tuple[Path, str]:
|
||||||
|
meta_root = self._bucket_meta_root(bucket_name)
|
||||||
|
parent = key.parent
|
||||||
|
entry_name = key.name
|
||||||
|
if parent == Path("."):
|
||||||
|
return meta_root / "_index.json", entry_name
|
||||||
|
return meta_root / parent / "_index.json", entry_name
|
||||||
|
|
||||||
|
def _get_meta_index_lock(self, index_path: str) -> threading.Lock:
|
||||||
|
with self._cache_lock:
|
||||||
|
if index_path not in self._meta_index_locks:
|
||||||
|
self._meta_index_locks[index_path] = threading.Lock()
|
||||||
|
return self._meta_index_locks[index_path]
|
||||||
|
|
||||||
|
def _read_index_entry(self, bucket_name: str, key: Path) -> Optional[Dict[str, Any]]:
|
||||||
|
index_path, entry_name = self._index_file_for_key(bucket_name, key)
|
||||||
|
if not index_path.exists():
|
||||||
|
return None
|
||||||
|
try:
|
||||||
|
index_data = json.loads(index_path.read_text(encoding="utf-8"))
|
||||||
|
return index_data.get(entry_name)
|
||||||
|
except (OSError, json.JSONDecodeError):
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _write_index_entry(self, bucket_name: str, key: Path, entry: Dict[str, Any]) -> None:
|
||||||
|
index_path, entry_name = self._index_file_for_key(bucket_name, key)
|
||||||
|
lock = self._get_meta_index_lock(str(index_path))
|
||||||
|
with lock:
|
||||||
|
index_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
index_data: Dict[str, Any] = {}
|
||||||
|
if index_path.exists():
|
||||||
|
try:
|
||||||
|
index_data = json.loads(index_path.read_text(encoding="utf-8"))
|
||||||
|
except (OSError, json.JSONDecodeError):
|
||||||
|
pass
|
||||||
|
index_data[entry_name] = entry
|
||||||
|
index_path.write_text(json.dumps(index_data), encoding="utf-8")
|
||||||
|
|
||||||
|
def _delete_index_entry(self, bucket_name: str, key: Path) -> None:
|
||||||
|
index_path, entry_name = self._index_file_for_key(bucket_name, key)
|
||||||
|
if not index_path.exists():
|
||||||
|
return
|
||||||
|
lock = self._get_meta_index_lock(str(index_path))
|
||||||
|
with lock:
|
||||||
|
try:
|
||||||
|
index_data = json.loads(index_path.read_text(encoding="utf-8"))
|
||||||
|
except (OSError, json.JSONDecodeError):
|
||||||
|
return
|
||||||
|
if entry_name in index_data:
|
||||||
|
del index_data[entry_name]
|
||||||
|
if index_data:
|
||||||
|
index_path.write_text(json.dumps(index_data), encoding="utf-8")
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
index_path.unlink()
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
|
||||||
def _normalize_metadata(self, metadata: Optional[Dict[str, str]]) -> Optional[Dict[str, str]]:
|
def _normalize_metadata(self, metadata: Optional[Dict[str, str]]) -> Optional[Dict[str, str]]:
|
||||||
if not metadata:
|
if not metadata:
|
||||||
return None
|
return None
|
||||||
@@ -1587,9 +1946,13 @@ class ObjectStorage:
|
|||||||
if not clean:
|
if not clean:
|
||||||
self._delete_metadata(bucket_name, key)
|
self._delete_metadata(bucket_name, key)
|
||||||
return
|
return
|
||||||
meta_file = self._metadata_file(bucket_name, key)
|
self._write_index_entry(bucket_name, key, {"metadata": clean})
|
||||||
meta_file.parent.mkdir(parents=True, exist_ok=True)
|
old_meta = self._metadata_file(bucket_name, key)
|
||||||
meta_file.write_text(json.dumps({"metadata": clean}), encoding="utf-8")
|
try:
|
||||||
|
if old_meta.exists():
|
||||||
|
old_meta.unlink()
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
|
||||||
def _archive_current_version(self, bucket_name: str, key: Path, *, reason: str) -> None:
|
def _archive_current_version(self, bucket_name: str, key: Path, *, reason: str) -> None:
|
||||||
bucket_path = self._bucket_path(bucket_name)
|
bucket_path = self._bucket_path(bucket_name)
|
||||||
@@ -1616,6 +1979,10 @@ class ObjectStorage:
|
|||||||
manifest_path.write_text(json.dumps(record), encoding="utf-8")
|
manifest_path.write_text(json.dumps(record), encoding="utf-8")
|
||||||
|
|
||||||
def _read_metadata(self, bucket_name: str, key: Path) -> Dict[str, str]:
|
def _read_metadata(self, bucket_name: str, key: Path) -> Dict[str, str]:
|
||||||
|
entry = self._read_index_entry(bucket_name, key)
|
||||||
|
if entry is not None:
|
||||||
|
data = entry.get("metadata")
|
||||||
|
return data if isinstance(data, dict) else {}
|
||||||
for meta_file in (self._metadata_file(bucket_name, key), self._legacy_metadata_file(bucket_name, key)):
|
for meta_file in (self._metadata_file(bucket_name, key), self._legacy_metadata_file(bucket_name, key)):
|
||||||
if not meta_file.exists():
|
if not meta_file.exists():
|
||||||
continue
|
continue
|
||||||
@@ -1646,6 +2013,7 @@ class ObjectStorage:
|
|||||||
raise StorageError(message) from last_error
|
raise StorageError(message) from last_error
|
||||||
|
|
||||||
def _delete_metadata(self, bucket_name: str, key: Path) -> None:
|
def _delete_metadata(self, bucket_name: str, key: Path) -> None:
|
||||||
|
self._delete_index_entry(bucket_name, key)
|
||||||
locations = (
|
locations = (
|
||||||
(self._metadata_file(bucket_name, key), self._bucket_meta_root(bucket_name)),
|
(self._metadata_file(bucket_name, key), self._bucket_meta_root(bucket_name)),
|
||||||
(self._legacy_metadata_file(bucket_name, key), self._legacy_meta_root(bucket_name)),
|
(self._legacy_metadata_file(bucket_name, key), self._legacy_meta_root(bucket_name)),
|
||||||
@@ -1764,16 +2132,16 @@ class ObjectStorage:
|
|||||||
return name
|
return name
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _sanitize_object_key(object_key: str) -> Path:
|
def _sanitize_object_key(object_key: str, max_length_bytes: int = 1024) -> Path:
|
||||||
if not object_key:
|
if not object_key:
|
||||||
raise StorageError("Object key required")
|
raise StorageError("Object key required")
|
||||||
if len(object_key.encode("utf-8")) > 1024:
|
|
||||||
raise StorageError("Object key exceeds maximum length of 1024 bytes")
|
|
||||||
if "\x00" in object_key:
|
if "\x00" in object_key:
|
||||||
raise StorageError("Object key contains null bytes")
|
raise StorageError("Object key contains null bytes")
|
||||||
|
object_key = unicodedata.normalize("NFC", object_key)
|
||||||
|
if len(object_key.encode("utf-8")) > max_length_bytes:
|
||||||
|
raise StorageError(f"Object key exceeds maximum length of {max_length_bytes} bytes")
|
||||||
if object_key.startswith(("/", "\\")):
|
if object_key.startswith(("/", "\\")):
|
||||||
raise StorageError("Object key cannot start with a slash")
|
raise StorageError("Object key cannot start with a slash")
|
||||||
object_key = unicodedata.normalize("NFC", object_key)
|
|
||||||
|
|
||||||
candidate = Path(object_key)
|
candidate = Path(object_key)
|
||||||
if ".." in candidate.parts:
|
if ".." in candidate.parts:
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
APP_VERSION = "0.2.3"
|
APP_VERSION = "0.2.9"
|
||||||
|
|
||||||
|
|
||||||
def get_version() -> str:
|
def get_version() -> str:
|
||||||
|
|||||||
55
app/website_domains.py
Normal file
55
app/website_domains.py
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import json
|
||||||
|
import threading
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Optional
|
||||||
|
|
||||||
|
|
||||||
|
class WebsiteDomainStore:
|
||||||
|
def __init__(self, config_path: Path) -> None:
|
||||||
|
self.config_path = config_path
|
||||||
|
self._lock = threading.Lock()
|
||||||
|
self._domains: Dict[str, str] = {}
|
||||||
|
self.reload()
|
||||||
|
|
||||||
|
def reload(self) -> None:
|
||||||
|
if not self.config_path.exists():
|
||||||
|
self._domains = {}
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
with open(self.config_path, "r", encoding="utf-8") as f:
|
||||||
|
data = json.load(f)
|
||||||
|
if isinstance(data, dict):
|
||||||
|
self._domains = {k.lower(): v for k, v in data.items()}
|
||||||
|
else:
|
||||||
|
self._domains = {}
|
||||||
|
except (OSError, json.JSONDecodeError):
|
||||||
|
self._domains = {}
|
||||||
|
|
||||||
|
def _save(self) -> None:
|
||||||
|
self.config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
with open(self.config_path, "w", encoding="utf-8") as f:
|
||||||
|
json.dump(self._domains, f, indent=2)
|
||||||
|
|
||||||
|
def list_all(self) -> List[Dict[str, str]]:
|
||||||
|
with self._lock:
|
||||||
|
return [{"domain": d, "bucket": b} for d, b in self._domains.items()]
|
||||||
|
|
||||||
|
def get_bucket(self, domain: str) -> Optional[str]:
|
||||||
|
with self._lock:
|
||||||
|
return self._domains.get(domain.lower())
|
||||||
|
|
||||||
|
def set_mapping(self, domain: str, bucket: str) -> None:
|
||||||
|
with self._lock:
|
||||||
|
self._domains[domain.lower()] = bucket
|
||||||
|
self._save()
|
||||||
|
|
||||||
|
def delete_mapping(self, domain: str) -> bool:
|
||||||
|
with self._lock:
|
||||||
|
key = domain.lower()
|
||||||
|
if key not in self._domains:
|
||||||
|
return False
|
||||||
|
del self._domains[key]
|
||||||
|
self._save()
|
||||||
|
return True
|
||||||
859
docs.md
859
docs.md
@@ -7,7 +7,7 @@ This document expands on the README to describe the full workflow for running, c
|
|||||||
MyFSIO ships two Flask entrypoints that share the same storage, IAM, and bucket-policy state:
|
MyFSIO ships two Flask entrypoints that share the same storage, IAM, and bucket-policy state:
|
||||||
|
|
||||||
- **API server** – Implements the S3-compatible REST API, policy evaluation, and Signature Version 4 presign service.
|
- **API server** – Implements the S3-compatible REST API, policy evaluation, and Signature Version 4 presign service.
|
||||||
- **UI server** – Provides the browser console for buckets, IAM, and policies. It proxies to the API for presign operations.
|
- **UI server** – Provides the browser console for buckets, IAM, and policies. It proxies all storage operations through the S3 API via boto3 (SigV4-signed), mirroring the architecture used by MinIO and Garage.
|
||||||
|
|
||||||
Both servers read `AppConfig`, so editing JSON stores on disk instantly affects both surfaces.
|
Both servers read `AppConfig`, so editing JSON stores on disk instantly affects both surfaces.
|
||||||
|
|
||||||
@@ -136,7 +136,7 @@ All configuration is done via environment variables. The table below lists every
|
|||||||
| `MAX_UPLOAD_SIZE` | `1073741824` (1 GiB) | Bytes. Caps incoming uploads in both API + UI. |
|
| `MAX_UPLOAD_SIZE` | `1073741824` (1 GiB) | Bytes. Caps incoming uploads in both API + UI. |
|
||||||
| `UI_PAGE_SIZE` | `100` | `MaxKeys` hint shown in listings. |
|
| `UI_PAGE_SIZE` | `100` | `MaxKeys` hint shown in listings. |
|
||||||
| `SECRET_KEY` | Auto-generated | Flask session key. Auto-generates and persists if not set. **Set explicitly in production.** |
|
| `SECRET_KEY` | Auto-generated | Flask session key. Auto-generates and persists if not set. **Set explicitly in production.** |
|
||||||
| `API_BASE_URL` | `None` | Public URL for presigned URLs. Required behind proxies. |
|
| `API_BASE_URL` | `http://127.0.0.1:5000` | Internal S3 API URL used by the web UI proxy. Also used for presigned URL generation. Set to your public URL if running behind a reverse proxy. |
|
||||||
| `AWS_REGION` | `us-east-1` | Region embedded in SigV4 credential scope. |
|
| `AWS_REGION` | `us-east-1` | Region embedded in SigV4 credential scope. |
|
||||||
| `AWS_SERVICE` | `s3` | Service string for SigV4. |
|
| `AWS_SERVICE` | `s3` | Service string for SigV4. |
|
||||||
|
|
||||||
@@ -166,15 +166,19 @@ All configuration is done via environment variables. The table below lists every
|
|||||||
| Variable | Default | Notes |
|
| Variable | Default | Notes |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| `RATE_LIMIT_DEFAULT` | `200 per minute` | Default rate limit for API endpoints. |
|
| `RATE_LIMIT_DEFAULT` | `200 per minute` | Default rate limit for API endpoints. |
|
||||||
|
| `RATE_LIMIT_LIST_BUCKETS` | `60 per minute` | Rate limit for listing buckets (`GET /`). |
|
||||||
|
| `RATE_LIMIT_BUCKET_OPS` | `120 per minute` | Rate limit for bucket operations (PUT/DELETE/GET/POST on `/<bucket>`). |
|
||||||
|
| `RATE_LIMIT_OBJECT_OPS` | `240 per minute` | Rate limit for object operations (PUT/GET/DELETE/POST on `/<bucket>/<key>`). |
|
||||||
|
| `RATE_LIMIT_HEAD_OPS` | `100 per minute` | Rate limit for HEAD requests (bucket and object). |
|
||||||
| `RATE_LIMIT_STORAGE_URI` | `memory://` | Storage backend for rate limits. Use `redis://host:port` for distributed setups. |
|
| `RATE_LIMIT_STORAGE_URI` | `memory://` | Storage backend for rate limits. Use `redis://host:port` for distributed setups. |
|
||||||
|
|
||||||
### Server Configuration
|
### Server Configuration
|
||||||
|
|
||||||
| Variable | Default | Notes |
|
| Variable | Default | Notes |
|
||||||
| --- | --- | --- |
|
| --- | --- | --- |
|
||||||
| `SERVER_THREADS` | `4` | Waitress worker threads (1-64). More threads handle more concurrent requests but use more memory. |
|
| `SERVER_THREADS` | `0` (auto) | Waitress worker threads (1-64). Set to `0` for auto-calculation based on CPU cores (×2). |
|
||||||
| `SERVER_CONNECTION_LIMIT` | `100` | Maximum concurrent connections (10-1000). Ensure OS file descriptor limits support this value. |
|
| `SERVER_CONNECTION_LIMIT` | `0` (auto) | Maximum concurrent connections (10-1000). Set to `0` for auto-calculation based on available RAM. |
|
||||||
| `SERVER_BACKLOG` | `1024` | TCP listen backlog (64-4096). Connections queue here when all threads are busy. |
|
| `SERVER_BACKLOG` | `0` (auto) | TCP listen backlog (64-4096). Set to `0` for auto-calculation (connection_limit × 2). |
|
||||||
| `SERVER_CHANNEL_TIMEOUT` | `120` | Seconds before idle connections are closed (10-300). |
|
| `SERVER_CHANNEL_TIMEOUT` | `120` | Seconds before idle connections are closed (10-300). |
|
||||||
|
|
||||||
### Logging
|
### Logging
|
||||||
@@ -615,13 +619,15 @@ MyFSIO implements a comprehensive Identity and Access Management (IAM) system th
|
|||||||
|
|
||||||
### Getting Started
|
### Getting Started
|
||||||
|
|
||||||
1. On first boot, `data/.myfsio.sys/config/iam.json` is seeded with `localadmin / localadmin` that has wildcard access.
|
1. On first boot, `data/.myfsio.sys/config/iam.json` is created with a randomly generated admin user. The access key and secret key are printed to the console during first startup. If you miss it, check the `iam.json` file directly—credentials are stored in plaintext.
|
||||||
2. Sign into the UI using those credentials, then open **IAM**:
|
2. Sign into the UI using the generated credentials, then open **IAM**:
|
||||||
- **Create user**: supply a display name and optional JSON inline policy array.
|
- **Create user**: supply a display name and optional JSON inline policy array.
|
||||||
- **Rotate secret**: generates a new secret key; the UI surfaces it once.
|
- **Rotate secret**: generates a new secret key; the UI surfaces it once.
|
||||||
- **Policy editor**: select a user, paste an array of objects (`{"bucket": "*", "actions": ["list", "read"]}`), and submit. Alias support includes AWS-style verbs (e.g., `s3:GetObject`).
|
- **Policy editor**: select a user, paste an array of objects (`{"bucket": "*", "actions": ["list", "read"]}`), and submit. Alias support includes AWS-style verbs (e.g., `s3:GetObject`).
|
||||||
3. Wildcard action `iam:*` is supported for admin user definitions.
|
3. Wildcard action `iam:*` is supported for admin user definitions.
|
||||||
|
|
||||||
|
> **Breaking Change (v0.2.0+):** Previous versions used fixed default credentials (`localadmin/localadmin`). If upgrading from an older version, your existing credentials remain unchanged, but new installations will generate random credentials.
|
||||||
|
|
||||||
### Authentication
|
### Authentication
|
||||||
|
|
||||||
The API expects every request to include authentication headers. The UI persists them in the Flask session after login.
|
The API expects every request to include authentication headers. The UI persists them in the Flask session after login.
|
||||||
@@ -1503,16 +1509,841 @@ The suite covers bucket CRUD, presigned downloads, bucket policy enforcement, an
|
|||||||
## 14. API Matrix
|
## 14. API Matrix
|
||||||
|
|
||||||
```
|
```
|
||||||
|
# Service Endpoints
|
||||||
|
GET /myfsio/health # Health check
|
||||||
|
|
||||||
|
# Bucket Operations
|
||||||
GET / # List buckets
|
GET / # List buckets
|
||||||
PUT /<bucket> # Create bucket
|
PUT /<bucket> # Create bucket
|
||||||
DELETE /<bucket> # Remove bucket
|
DELETE /<bucket> # Remove bucket
|
||||||
GET /<bucket> # List objects
|
GET /<bucket> # List objects (supports ?list-type=2)
|
||||||
PUT /<bucket>/<key> # Upload object
|
HEAD /<bucket> # Check bucket exists
|
||||||
GET /<bucket>/<key> # Download object
|
POST /<bucket> # POST object upload (HTML form)
|
||||||
DELETE /<bucket>/<key> # Delete object
|
POST /<bucket>?delete # Bulk delete objects
|
||||||
GET /<bucket>?policy # Fetch policy
|
|
||||||
PUT /<bucket>?policy # Upsert policy
|
# Bucket Configuration
|
||||||
DELETE /<bucket>?policy # Delete policy
|
GET /<bucket>?policy # Fetch bucket policy
|
||||||
|
PUT /<bucket>?policy # Upsert bucket policy
|
||||||
|
DELETE /<bucket>?policy # Delete bucket policy
|
||||||
GET /<bucket>?quota # Get bucket quota
|
GET /<bucket>?quota # Get bucket quota
|
||||||
PUT /<bucket>?quota # Set bucket quota (admin only)
|
PUT /<bucket>?quota # Set bucket quota (admin only)
|
||||||
|
GET /<bucket>?versioning # Get versioning status
|
||||||
|
PUT /<bucket>?versioning # Enable/disable versioning
|
||||||
|
GET /<bucket>?lifecycle # Get lifecycle rules
|
||||||
|
PUT /<bucket>?lifecycle # Set lifecycle rules
|
||||||
|
DELETE /<bucket>?lifecycle # Delete lifecycle rules
|
||||||
|
GET /<bucket>?cors # Get CORS configuration
|
||||||
|
PUT /<bucket>?cors # Set CORS configuration
|
||||||
|
DELETE /<bucket>?cors # Delete CORS configuration
|
||||||
|
GET /<bucket>?encryption # Get encryption configuration
|
||||||
|
PUT /<bucket>?encryption # Set default encryption
|
||||||
|
DELETE /<bucket>?encryption # Delete encryption configuration
|
||||||
|
GET /<bucket>?acl # Get bucket ACL
|
||||||
|
PUT /<bucket>?acl # Set bucket ACL
|
||||||
|
GET /<bucket>?tagging # Get bucket tags
|
||||||
|
PUT /<bucket>?tagging # Set bucket tags
|
||||||
|
DELETE /<bucket>?tagging # Delete bucket tags
|
||||||
|
GET /<bucket>?replication # Get replication configuration
|
||||||
|
PUT /<bucket>?replication # Set replication rules
|
||||||
|
DELETE /<bucket>?replication # Delete replication configuration
|
||||||
|
GET /<bucket>?logging # Get access logging configuration
|
||||||
|
PUT /<bucket>?logging # Set access logging
|
||||||
|
GET /<bucket>?notification # Get event notifications
|
||||||
|
PUT /<bucket>?notification # Set event notifications (webhooks)
|
||||||
|
GET /<bucket>?object-lock # Get object lock configuration
|
||||||
|
PUT /<bucket>?object-lock # Set object lock configuration
|
||||||
|
GET /<bucket>?website # Get website configuration
|
||||||
|
PUT /<bucket>?website # Set website configuration
|
||||||
|
DELETE /<bucket>?website # Delete website configuration
|
||||||
|
GET /<bucket>?uploads # List active multipart uploads
|
||||||
|
GET /<bucket>?versions # List object versions
|
||||||
|
GET /<bucket>?location # Get bucket location/region
|
||||||
|
|
||||||
|
# Object Operations
|
||||||
|
PUT /<bucket>/<key> # Upload object
|
||||||
|
GET /<bucket>/<key> # Download object (supports Range header)
|
||||||
|
DELETE /<bucket>/<key> # Delete object
|
||||||
|
HEAD /<bucket>/<key> # Get object metadata
|
||||||
|
POST /<bucket>/<key> # POST upload with policy
|
||||||
|
POST /<bucket>/<key>?select # SelectObjectContent (SQL query)
|
||||||
|
|
||||||
|
# Object Configuration
|
||||||
|
GET /<bucket>/<key>?tagging # Get object tags
|
||||||
|
PUT /<bucket>/<key>?tagging # Set object tags
|
||||||
|
DELETE /<bucket>/<key>?tagging # Delete object tags
|
||||||
|
GET /<bucket>/<key>?acl # Get object ACL
|
||||||
|
PUT /<bucket>/<key>?acl # Set object ACL
|
||||||
|
PUT /<bucket>/<key>?retention # Set object retention
|
||||||
|
GET /<bucket>/<key>?retention # Get object retention
|
||||||
|
PUT /<bucket>/<key>?legal-hold # Set legal hold
|
||||||
|
GET /<bucket>/<key>?legal-hold # Get legal hold status
|
||||||
|
|
||||||
|
# Multipart Upload
|
||||||
|
POST /<bucket>/<key>?uploads # Initiate multipart upload
|
||||||
|
PUT /<bucket>/<key>?uploadId=X&partNumber=N # Upload part
|
||||||
|
PUT /<bucket>/<key>?uploadId=X&partNumber=N (with x-amz-copy-source) # UploadPartCopy
|
||||||
|
POST /<bucket>/<key>?uploadId=X # Complete multipart upload
|
||||||
|
DELETE /<bucket>/<key>?uploadId=X # Abort multipart upload
|
||||||
|
GET /<bucket>/<key>?uploadId=X # List parts
|
||||||
|
|
||||||
|
# Copy Operations
|
||||||
|
PUT /<bucket>/<key> (with x-amz-copy-source header) # CopyObject
|
||||||
|
|
||||||
|
# Admin API
|
||||||
|
GET /admin/site # Get local site info
|
||||||
|
PUT /admin/site # Update local site
|
||||||
|
GET /admin/sites # List peer sites
|
||||||
|
POST /admin/sites # Register peer site
|
||||||
|
GET /admin/sites/<site_id> # Get peer site
|
||||||
|
PUT /admin/sites/<site_id> # Update peer site
|
||||||
|
DELETE /admin/sites/<site_id> # Unregister peer site
|
||||||
|
GET /admin/sites/<site_id>/health # Check peer health
|
||||||
|
GET /admin/topology # Get cluster topology
|
||||||
|
GET /admin/website-domains # List domain mappings
|
||||||
|
POST /admin/website-domains # Create domain mapping
|
||||||
|
GET /admin/website-domains/<domain> # Get domain mapping
|
||||||
|
PUT /admin/website-domains/<domain> # Update domain mapping
|
||||||
|
DELETE /admin/website-domains/<domain> # Delete domain mapping
|
||||||
|
|
||||||
|
# KMS API
|
||||||
|
GET /kms/keys # List KMS keys
|
||||||
|
POST /kms/keys # Create KMS key
|
||||||
|
GET /kms/keys/<key_id> # Get key details
|
||||||
|
DELETE /kms/keys/<key_id> # Schedule key deletion
|
||||||
|
POST /kms/keys/<key_id>/enable # Enable key
|
||||||
|
POST /kms/keys/<key_id>/disable # Disable key
|
||||||
|
POST /kms/keys/<key_id>/rotate # Rotate key material
|
||||||
|
POST /kms/encrypt # Encrypt data
|
||||||
|
POST /kms/decrypt # Decrypt data
|
||||||
|
POST /kms/generate-data-key # Generate data key
|
||||||
|
POST /kms/generate-random # Generate random bytes
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## 15. Health Check Endpoint
|
||||||
|
|
||||||
|
The API exposes a simple health check endpoint for monitoring and load balancer integration:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check API health
|
||||||
|
curl http://localhost:5000/myfsio/health
|
||||||
|
|
||||||
|
# Response
|
||||||
|
{"status": "ok", "version": "0.1.7"}
|
||||||
|
```
|
||||||
|
|
||||||
|
The response includes:
|
||||||
|
- `status`: Always `"ok"` when the server is running
|
||||||
|
- `version`: Current application version from `app/version.py`
|
||||||
|
|
||||||
|
Use this endpoint for:
|
||||||
|
- Load balancer health checks
|
||||||
|
- Kubernetes liveness/readiness probes
|
||||||
|
- Monitoring system integration (Prometheus, Datadog, etc.)
|
||||||
|
|
||||||
|
## 16. Object Lock & Retention
|
||||||
|
|
||||||
|
Object Lock prevents objects from being deleted or overwritten for a specified retention period. MyFSIO supports both GOVERNANCE and COMPLIANCE modes.
|
||||||
|
|
||||||
|
### Retention Modes
|
||||||
|
|
||||||
|
| Mode | Description |
|
||||||
|
|------|-------------|
|
||||||
|
| **GOVERNANCE** | Objects can't be deleted by normal users, but users with `s3:BypassGovernanceRetention` permission can override |
|
||||||
|
| **COMPLIANCE** | Objects can't be deleted or overwritten by anyone, including root, until the retention period expires |
|
||||||
|
|
||||||
|
### Enabling Object Lock
|
||||||
|
|
||||||
|
Object Lock must be enabled when creating a bucket:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Create bucket with Object Lock enabled
|
||||||
|
curl -X PUT "http://localhost:5000/my-bucket" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||||
|
-H "x-amz-bucket-object-lock-enabled: true"
|
||||||
|
|
||||||
|
# Set default retention configuration
|
||||||
|
curl -X PUT "http://localhost:5000/my-bucket?object-lock" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||||
|
-d '{
|
||||||
|
"ObjectLockEnabled": "Enabled",
|
||||||
|
"Rule": {
|
||||||
|
"DefaultRetention": {
|
||||||
|
"Mode": "GOVERNANCE",
|
||||||
|
"Days": 30
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
### Per-Object Retention
|
||||||
|
|
||||||
|
Set retention on individual objects:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Set object retention
|
||||||
|
curl -X PUT "http://localhost:5000/my-bucket/important.pdf?retention" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||||
|
-d '{
|
||||||
|
"Mode": "COMPLIANCE",
|
||||||
|
"RetainUntilDate": "2025-12-31T23:59:59Z"
|
||||||
|
}'
|
||||||
|
|
||||||
|
# Get object retention
|
||||||
|
curl "http://localhost:5000/my-bucket/important.pdf?retention" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
|
||||||
|
```
|
||||||
|
|
||||||
|
### Legal Hold
|
||||||
|
|
||||||
|
Legal hold provides indefinite protection independent of retention settings:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Enable legal hold
|
||||||
|
curl -X PUT "http://localhost:5000/my-bucket/document.pdf?legal-hold" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||||
|
-d '{"Status": "ON"}'
|
||||||
|
|
||||||
|
# Disable legal hold
|
||||||
|
curl -X PUT "http://localhost:5000/my-bucket/document.pdf?legal-hold" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||||
|
-d '{"Status": "OFF"}'
|
||||||
|
|
||||||
|
# Check legal hold status
|
||||||
|
curl "http://localhost:5000/my-bucket/document.pdf?legal-hold" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
|
||||||
|
```
|
||||||
|
|
||||||
|
## 17. Access Logging
|
||||||
|
|
||||||
|
Enable S3-style access logging to track all requests to your buckets.
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Enable access logging
|
||||||
|
curl -X PUT "http://localhost:5000/my-bucket?logging" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||||
|
-d '{
|
||||||
|
"LoggingEnabled": {
|
||||||
|
"TargetBucket": "log-bucket",
|
||||||
|
"TargetPrefix": "logs/my-bucket/"
|
||||||
|
}
|
||||||
|
}'
|
||||||
|
|
||||||
|
# Get logging configuration
|
||||||
|
curl "http://localhost:5000/my-bucket?logging" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
|
||||||
|
|
||||||
|
# Disable logging (empty configuration)
|
||||||
|
curl -X PUT "http://localhost:5000/my-bucket?logging" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||||
|
-d '{}'
|
||||||
|
```
|
||||||
|
|
||||||
|
### Log Format
|
||||||
|
|
||||||
|
Access logs are written in S3-compatible format with fields including:
|
||||||
|
- Timestamp, bucket, key
|
||||||
|
- Operation (REST.GET.OBJECT, REST.PUT.OBJECT, etc.)
|
||||||
|
- Request ID, requester, source IP
|
||||||
|
- HTTP status, error code, bytes sent
|
||||||
|
- Total time, turn-around time
|
||||||
|
- Referrer, User-Agent
|
||||||
|
|
||||||
|
## 18. Bucket Notifications & Webhooks
|
||||||
|
|
||||||
|
Configure event notifications to trigger webhooks when objects are created or deleted.
|
||||||
|
|
||||||
|
### Supported Events
|
||||||
|
|
||||||
|
| Event Type | Description |
|
||||||
|
|-----------|-------------|
|
||||||
|
| `s3:ObjectCreated:*` | Any object creation (PUT, POST, COPY, multipart) |
|
||||||
|
| `s3:ObjectCreated:Put` | Object created via PUT |
|
||||||
|
| `s3:ObjectCreated:Post` | Object created via POST |
|
||||||
|
| `s3:ObjectCreated:Copy` | Object created via COPY |
|
||||||
|
| `s3:ObjectCreated:CompleteMultipartUpload` | Multipart upload completed |
|
||||||
|
| `s3:ObjectRemoved:*` | Any object deletion |
|
||||||
|
| `s3:ObjectRemoved:Delete` | Object deleted |
|
||||||
|
| `s3:ObjectRemoved:DeleteMarkerCreated` | Delete marker created (versioned bucket) |
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Set notification configuration
|
||||||
|
curl -X PUT "http://localhost:5000/my-bucket?notification" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||||
|
-d '{
|
||||||
|
"TopicConfigurations": [
|
||||||
|
{
|
||||||
|
"Id": "upload-notify",
|
||||||
|
"TopicArn": "https://webhook.example.com/s3-events",
|
||||||
|
"Events": ["s3:ObjectCreated:*"],
|
||||||
|
"Filter": {
|
||||||
|
"Key": {
|
||||||
|
"FilterRules": [
|
||||||
|
{"Name": "prefix", "Value": "uploads/"},
|
||||||
|
{"Name": "suffix", "Value": ".jpg"}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}'
|
||||||
|
|
||||||
|
# Get notification configuration
|
||||||
|
curl "http://localhost:5000/my-bucket?notification" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
|
||||||
|
```
|
||||||
|
|
||||||
|
### Webhook Payload
|
||||||
|
|
||||||
|
The webhook receives a JSON payload similar to AWS S3 event notifications:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"Records": [
|
||||||
|
{
|
||||||
|
"eventVersion": "2.1",
|
||||||
|
"eventSource": "myfsio:s3",
|
||||||
|
"eventTime": "2024-01-15T10:30:00.000Z",
|
||||||
|
"eventName": "ObjectCreated:Put",
|
||||||
|
"s3": {
|
||||||
|
"bucket": {"name": "my-bucket"},
|
||||||
|
"object": {
|
||||||
|
"key": "uploads/photo.jpg",
|
||||||
|
"size": 102400,
|
||||||
|
"eTag": "abc123..."
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Security Notes
|
||||||
|
|
||||||
|
- Webhook URLs are validated to prevent SSRF attacks
|
||||||
|
- Internal/private IP ranges are blocked by default
|
||||||
|
- Use HTTPS endpoints in production
|
||||||
|
|
||||||
|
## 19. SelectObjectContent (SQL Queries)
|
||||||
|
|
||||||
|
Query CSV, JSON, or Parquet files directly using SQL without downloading the entire object. Requires DuckDB to be installed.
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install duckdb
|
||||||
|
```
|
||||||
|
|
||||||
|
### Usage
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Query a CSV file
|
||||||
|
curl -X POST "http://localhost:5000/my-bucket/data.csv?select" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||||
|
-d '{
|
||||||
|
"Expression": "SELECT name, age FROM s3object WHERE age > 25",
|
||||||
|
"ExpressionType": "SQL",
|
||||||
|
"InputSerialization": {
|
||||||
|
"CSV": {
|
||||||
|
"FileHeaderInfo": "USE",
|
||||||
|
"FieldDelimiter": ","
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"OutputSerialization": {
|
||||||
|
"JSON": {}
|
||||||
|
}
|
||||||
|
}'
|
||||||
|
|
||||||
|
# Query a JSON file
|
||||||
|
curl -X POST "http://localhost:5000/my-bucket/data.json?select" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||||
|
-d '{
|
||||||
|
"Expression": "SELECT * FROM s3object s WHERE s.status = '\"active'\"",
|
||||||
|
"ExpressionType": "SQL",
|
||||||
|
"InputSerialization": {"JSON": {"Type": "LINES"}},
|
||||||
|
"OutputSerialization": {"JSON": {}}
|
||||||
|
}'
|
||||||
|
```
|
||||||
|
|
||||||
|
### Supported Input Formats
|
||||||
|
|
||||||
|
| Format | Options |
|
||||||
|
|--------|---------|
|
||||||
|
| **CSV** | `FileHeaderInfo` (USE, IGNORE, NONE), `FieldDelimiter`, `QuoteCharacter`, `RecordDelimiter` |
|
||||||
|
| **JSON** | `Type` (DOCUMENT, LINES) |
|
||||||
|
| **Parquet** | Automatic schema detection |
|
||||||
|
|
||||||
|
### Output Formats
|
||||||
|
|
||||||
|
- **JSON**: Returns results as JSON records
|
||||||
|
- **CSV**: Returns results as CSV
|
||||||
|
|
||||||
|
## 20. PostObject (HTML Form Upload)
|
||||||
|
|
||||||
|
Upload objects using HTML forms with policy-based authorization. Useful for browser-based direct uploads.
|
||||||
|
|
||||||
|
### Form Fields
|
||||||
|
|
||||||
|
| Field | Required | Description |
|
||||||
|
|-------|----------|-------------|
|
||||||
|
| `key` | Yes | Object key (can include `${filename}` placeholder) |
|
||||||
|
| `file` | Yes | The file to upload |
|
||||||
|
| `policy` | No | Base64-encoded policy document |
|
||||||
|
| `x-amz-signature` | No | Policy signature |
|
||||||
|
| `x-amz-credential` | No | Credential scope |
|
||||||
|
| `x-amz-algorithm` | No | Signing algorithm (AWS4-HMAC-SHA256) |
|
||||||
|
| `x-amz-date` | No | Request timestamp |
|
||||||
|
| `Content-Type` | No | MIME type of the file |
|
||||||
|
| `x-amz-meta-*` | No | Custom metadata |
|
||||||
|
|
||||||
|
### Example HTML Form
|
||||||
|
|
||||||
|
```html
|
||||||
|
<form action="http://localhost:5000/my-bucket" method="post" enctype="multipart/form-data">
|
||||||
|
<input type="hidden" name="key" value="uploads/${filename}">
|
||||||
|
<input type="hidden" name="Content-Type" value="image/jpeg">
|
||||||
|
<input type="hidden" name="x-amz-meta-user" value="john">
|
||||||
|
<input type="file" name="file">
|
||||||
|
<button type="submit">Upload</button>
|
||||||
|
</form>
|
||||||
|
```
|
||||||
|
|
||||||
|
### With Policy (Signed Upload)
|
||||||
|
|
||||||
|
For authenticated uploads, include a policy document:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate policy and signature using boto3 or similar
|
||||||
|
# Then include in form:
|
||||||
|
# - policy: base64(policy_document)
|
||||||
|
# - x-amz-signature: HMAC-SHA256(policy, signing_key)
|
||||||
|
# - x-amz-credential: access_key/date/region/s3/aws4_request
|
||||||
|
# - x-amz-algorithm: AWS4-HMAC-SHA256
|
||||||
|
# - x-amz-date: YYYYMMDDTHHMMSSZ
|
||||||
|
```
|
||||||
|
|
||||||
|
## 21. Advanced S3 Operations
|
||||||
|
|
||||||
|
### CopyObject
|
||||||
|
|
||||||
|
Copy objects within or between buckets:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Copy within same bucket
|
||||||
|
curl -X PUT "http://localhost:5000/my-bucket/copy-of-file.txt" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||||
|
-H "x-amz-copy-source: /my-bucket/original-file.txt"
|
||||||
|
|
||||||
|
# Copy to different bucket
|
||||||
|
curl -X PUT "http://localhost:5000/other-bucket/file.txt" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||||
|
-H "x-amz-copy-source: /my-bucket/original-file.txt"
|
||||||
|
|
||||||
|
# Copy with metadata replacement
|
||||||
|
curl -X PUT "http://localhost:5000/my-bucket/file.txt" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||||
|
-H "x-amz-copy-source: /my-bucket/file.txt" \
|
||||||
|
-H "x-amz-metadata-directive: REPLACE" \
|
||||||
|
-H "x-amz-meta-newkey: newvalue"
|
||||||
|
```
|
||||||
|
|
||||||
|
### UploadPartCopy
|
||||||
|
|
||||||
|
Copy data from an existing object into a multipart upload part:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Initiate multipart upload
|
||||||
|
UPLOAD_ID=$(curl -X POST "http://localhost:5000/my-bucket/large-file.bin?uploads" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." | jq -r '.UploadId')
|
||||||
|
|
||||||
|
# Copy bytes 0-10485759 from source as part 1
|
||||||
|
curl -X PUT "http://localhost:5000/my-bucket/large-file.bin?uploadId=$UPLOAD_ID&partNumber=1" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||||
|
-H "x-amz-copy-source: /source-bucket/source-file.bin" \
|
||||||
|
-H "x-amz-copy-source-range: bytes=0-10485759"
|
||||||
|
|
||||||
|
# Copy bytes 10485760-20971519 as part 2
|
||||||
|
curl -X PUT "http://localhost:5000/my-bucket/large-file.bin?uploadId=$UPLOAD_ID&partNumber=2" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||||
|
-H "x-amz-copy-source: /source-bucket/source-file.bin" \
|
||||||
|
-H "x-amz-copy-source-range: bytes=10485760-20971519"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Range Requests
|
||||||
|
|
||||||
|
Download partial content using the Range header:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Get first 1000 bytes
|
||||||
|
curl "http://localhost:5000/my-bucket/large-file.bin" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||||
|
-H "Range: bytes=0-999"
|
||||||
|
|
||||||
|
# Get bytes 1000-1999
|
||||||
|
curl "http://localhost:5000/my-bucket/large-file.bin" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||||
|
-H "Range: bytes=1000-1999"
|
||||||
|
|
||||||
|
# Get last 500 bytes
|
||||||
|
curl "http://localhost:5000/my-bucket/large-file.bin" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||||
|
-H "Range: bytes=-500"
|
||||||
|
|
||||||
|
# Get from byte 5000 to end
|
||||||
|
curl "http://localhost:5000/my-bucket/large-file.bin" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||||
|
-H "Range: bytes=5000-"
|
||||||
|
```
|
||||||
|
|
||||||
|
Range responses include:
|
||||||
|
- HTTP 206 Partial Content status
|
||||||
|
- `Content-Range` header showing the byte range
|
||||||
|
- `Accept-Ranges: bytes` header
|
||||||
|
|
||||||
|
### Conditional Requests
|
||||||
|
|
||||||
|
Use conditional headers for cache validation:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Only download if modified since
|
||||||
|
curl "http://localhost:5000/my-bucket/file.txt" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||||
|
-H "If-Modified-Since: Wed, 15 Jan 2025 10:00:00 GMT"
|
||||||
|
|
||||||
|
# Only download if ETag doesn't match (changed)
|
||||||
|
curl "http://localhost:5000/my-bucket/file.txt" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||||
|
-H "If-None-Match: \"abc123...\""
|
||||||
|
|
||||||
|
# Only download if ETag matches
|
||||||
|
curl "http://localhost:5000/my-bucket/file.txt" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||||
|
-H "If-Match: \"abc123...\""
|
||||||
|
```
|
||||||
|
|
||||||
|
## 22. Access Control Lists (ACLs)
|
||||||
|
|
||||||
|
ACLs provide legacy-style permission management for buckets and objects.
|
||||||
|
|
||||||
|
### Canned ACLs
|
||||||
|
|
||||||
|
| ACL | Description |
|
||||||
|
|-----|-------------|
|
||||||
|
| `private` | Owner gets FULL_CONTROL (default) |
|
||||||
|
| `public-read` | Owner FULL_CONTROL, public READ |
|
||||||
|
| `public-read-write` | Owner FULL_CONTROL, public READ and WRITE |
|
||||||
|
| `authenticated-read` | Owner FULL_CONTROL, authenticated users READ |
|
||||||
|
|
||||||
|
### Setting ACLs
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Set bucket ACL using canned ACL
|
||||||
|
curl -X PUT "http://localhost:5000/my-bucket?acl" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||||
|
-H "x-amz-acl: public-read"
|
||||||
|
|
||||||
|
# Set object ACL
|
||||||
|
curl -X PUT "http://localhost:5000/my-bucket/file.txt?acl" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||||
|
-H "x-amz-acl: private"
|
||||||
|
|
||||||
|
# Set ACL during upload
|
||||||
|
curl -X PUT "http://localhost:5000/my-bucket/file.txt" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||||
|
-H "x-amz-acl: public-read" \
|
||||||
|
--data-binary @file.txt
|
||||||
|
|
||||||
|
# Get bucket ACL
|
||||||
|
curl "http://localhost:5000/my-bucket?acl" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
|
||||||
|
|
||||||
|
# Get object ACL
|
||||||
|
curl "http://localhost:5000/my-bucket/file.txt?acl" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
|
||||||
|
```
|
||||||
|
|
||||||
|
### ACL vs Bucket Policies
|
||||||
|
|
||||||
|
- **ACLs**: Simple, limited options, legacy approach
|
||||||
|
- **Bucket Policies**: Powerful, flexible, recommended for new deployments
|
||||||
|
|
||||||
|
For most use cases, prefer bucket policies over ACLs.
|
||||||
|
|
||||||
|
## 23. Object & Bucket Tagging
|
||||||
|
|
||||||
|
Add metadata tags to buckets and objects for organization, cost allocation, or lifecycle rule filtering.
|
||||||
|
|
||||||
|
### Bucket Tagging
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Set bucket tags
|
||||||
|
curl -X PUT "http://localhost:5000/my-bucket?tagging" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||||
|
-d '{
|
||||||
|
"TagSet": [
|
||||||
|
{"Key": "Environment", "Value": "Production"},
|
||||||
|
{"Key": "Team", "Value": "Engineering"}
|
||||||
|
]
|
||||||
|
}'
|
||||||
|
|
||||||
|
# Get bucket tags
|
||||||
|
curl "http://localhost:5000/my-bucket?tagging" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
|
||||||
|
|
||||||
|
# Delete bucket tags
|
||||||
|
curl -X DELETE "http://localhost:5000/my-bucket?tagging" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
|
||||||
|
```
|
||||||
|
|
||||||
|
### Object Tagging
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Set object tags
|
||||||
|
curl -X PUT "http://localhost:5000/my-bucket/file.txt?tagging" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||||
|
-d '{
|
||||||
|
"TagSet": [
|
||||||
|
{"Key": "Classification", "Value": "Confidential"},
|
||||||
|
{"Key": "Owner", "Value": "john@example.com"}
|
||||||
|
]
|
||||||
|
}'
|
||||||
|
|
||||||
|
# Get object tags
|
||||||
|
curl "http://localhost:5000/my-bucket/file.txt?tagging" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
|
||||||
|
|
||||||
|
# Delete object tags
|
||||||
|
curl -X DELETE "http://localhost:5000/my-bucket/file.txt?tagging" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
|
||||||
|
|
||||||
|
# Set tags during upload
|
||||||
|
curl -X PUT "http://localhost:5000/my-bucket/file.txt" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||||
|
-H "x-amz-tagging: Environment=Staging&Team=QA" \
|
||||||
|
--data-binary @file.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
### Tagging Limits
|
||||||
|
|
||||||
|
- Maximum 50 tags per object (configurable via `OBJECT_TAG_LIMIT`)
|
||||||
|
- Tag key: 1-128 Unicode characters
|
||||||
|
- Tag value: 0-256 Unicode characters
|
||||||
|
|
||||||
|
### Use Cases
|
||||||
|
|
||||||
|
- **Lifecycle Rules**: Filter objects for expiration by tag
|
||||||
|
- **Access Control**: Use tag conditions in bucket policies
|
||||||
|
- **Cost Tracking**: Group objects by project or department
|
||||||
|
- **Automation**: Trigger actions based on object tags
|
||||||
|
|
||||||
|
## 24. CORS Configuration
|
||||||
|
|
||||||
|
Configure Cross-Origin Resource Sharing for browser-based applications.
|
||||||
|
|
||||||
|
### Setting CORS Rules
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Set CORS configuration
|
||||||
|
curl -X PUT "http://localhost:5000/my-bucket?cors" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..." \
|
||||||
|
-d '{
|
||||||
|
"CORSRules": [
|
||||||
|
{
|
||||||
|
"AllowedOrigins": ["https://example.com", "https://app.example.com"],
|
||||||
|
"AllowedMethods": ["GET", "PUT", "POST", "DELETE"],
|
||||||
|
"AllowedHeaders": ["*"],
|
||||||
|
"ExposeHeaders": ["ETag", "x-amz-meta-*"],
|
||||||
|
"MaxAgeSeconds": 3600
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}'
|
||||||
|
|
||||||
|
# Get CORS configuration
|
||||||
|
curl "http://localhost:5000/my-bucket?cors" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
|
||||||
|
|
||||||
|
# Delete CORS configuration
|
||||||
|
curl -X DELETE "http://localhost:5000/my-bucket?cors" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
|
||||||
|
```
|
||||||
|
|
||||||
|
### CORS Rule Fields
|
||||||
|
|
||||||
|
| Field | Description |
|
||||||
|
|-------|-------------|
|
||||||
|
| `AllowedOrigins` | Origins allowed to access the bucket (required) |
|
||||||
|
| `AllowedMethods` | HTTP methods allowed (GET, PUT, POST, DELETE, HEAD) |
|
||||||
|
| `AllowedHeaders` | Request headers allowed in preflight |
|
||||||
|
| `ExposeHeaders` | Response headers visible to browser |
|
||||||
|
| `MaxAgeSeconds` | How long browser can cache preflight response |
|
||||||
|
|
||||||
|
## 25. List Objects API v2
|
||||||
|
|
||||||
|
MyFSIO supports both ListBucketResult v1 and v2 APIs.
|
||||||
|
|
||||||
|
### Using v2 API
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# List with v2 (supports continuation tokens)
|
||||||
|
curl "http://localhost:5000/my-bucket?list-type=2" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
|
||||||
|
|
||||||
|
# With prefix and delimiter (folder-like listing)
|
||||||
|
curl "http://localhost:5000/my-bucket?list-type=2&prefix=photos/&delimiter=/" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
|
||||||
|
|
||||||
|
# Pagination with continuation token
|
||||||
|
curl "http://localhost:5000/my-bucket?list-type=2&max-keys=100&continuation-token=TOKEN" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
|
||||||
|
|
||||||
|
# Start after specific key
|
||||||
|
curl "http://localhost:5000/my-bucket?list-type=2&start-after=photos/2024/" \
|
||||||
|
-H "X-Access-Key: ..." -H "X-Secret-Key: ..."
|
||||||
|
```
|
||||||
|
|
||||||
|
### v1 vs v2 Differences
|
||||||
|
|
||||||
|
| Feature | v1 | v2 |
|
||||||
|
|---------|----|----|
|
||||||
|
| Pagination | `marker` | `continuation-token` |
|
||||||
|
| Start position | `marker` | `start-after` |
|
||||||
|
| Fetch owner info | Always included | Use `fetch-owner=true` |
|
||||||
|
| Max keys | 1000 | 1000 |
|
||||||
|
|
||||||
|
### Query Parameters
|
||||||
|
|
||||||
|
| Parameter | Description |
|
||||||
|
|-----------|-------------|
|
||||||
|
| `list-type` | Set to `2` for v2 API |
|
||||||
|
| `prefix` | Filter objects by key prefix |
|
||||||
|
| `delimiter` | Group objects (typically `/`) |
|
||||||
|
| `max-keys` | Maximum results (1-1000, default 1000) |
|
||||||
|
| `continuation-token` | Token from previous response |
|
||||||
|
| `start-after` | Start listing after this key |
|
||||||
|
| `fetch-owner` | Include owner info in response |
|
||||||
|
| `encoding-type` | Set to `url` for URL-encoded keys
|
||||||
|
|
||||||
|
## 26. Static Website Hosting
|
||||||
|
|
||||||
|
MyFSIO can serve S3 buckets as static websites via custom domain mappings. When a request arrives with a `Host` header matching a mapped domain, MyFSIO resolves the bucket and serves objects directly.
|
||||||
|
|
||||||
|
### Enabling
|
||||||
|
|
||||||
|
Set the environment variable:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
WEBSITE_HOSTING_ENABLED=true
|
||||||
|
```
|
||||||
|
|
||||||
|
When disabled, all website hosting endpoints return 400 and domain-based serving is skipped.
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
| Variable | Default | Description |
|
||||||
|
|----------|---------|-------------|
|
||||||
|
| `WEBSITE_HOSTING_ENABLED` | `false` | Master switch for website hosting |
|
||||||
|
|
||||||
|
### Setting Up a Website
|
||||||
|
|
||||||
|
**Step 1: Configure the bucket website settings**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl -X PUT "http://localhost:5000/my-site?website" \
|
||||||
|
-H "Authorization: ..." \
|
||||||
|
-d '<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<WebsiteConfiguration>
|
||||||
|
<IndexDocument><Suffix>index.html</Suffix></IndexDocument>
|
||||||
|
<ErrorDocument><Key>404.html</Key></ErrorDocument>
|
||||||
|
</WebsiteConfiguration>'
|
||||||
|
```
|
||||||
|
|
||||||
|
- `IndexDocument` with `Suffix` is required (must not contain `/`)
|
||||||
|
- `ErrorDocument` is optional
|
||||||
|
|
||||||
|
**Step 2: Map a domain to the bucket**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl -X POST "http://localhost:5000/admin/website-domains" \
|
||||||
|
-H "Authorization: ..." \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d '{"domain": "example.com", "bucket": "my-site"}'
|
||||||
|
```
|
||||||
|
|
||||||
|
**Step 3: Point your domain to MyFSIO**
|
||||||
|
|
||||||
|
For HTTP-only (direct access), point DNS to the MyFSIO host on port 5000.
|
||||||
|
|
||||||
|
For HTTPS (recommended), use a reverse proxy. The critical requirement is passing the original `Host` header so MyFSIO can match the domain to a bucket.
|
||||||
|
|
||||||
|
**nginx example:**
|
||||||
|
|
||||||
|
```nginx
|
||||||
|
server {
|
||||||
|
server_name example.com;
|
||||||
|
listen 443 ssl;
|
||||||
|
|
||||||
|
ssl_certificate /etc/ssl/certs/example.com.pem;
|
||||||
|
ssl_certificate_key /etc/ssl/private/example.com.key;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_pass http://127.0.0.1:5000;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
`proxy_set_header Host $host;` is required — without it, MyFSIO cannot match the incoming domain to a bucket. You do not need any path-based routing rules; MyFSIO handles all object resolution internally.
|
||||||
|
|
||||||
|
### How Domain Routing Works
|
||||||
|
|
||||||
|
1. A request arrives with `Host: example.com`
|
||||||
|
2. MyFSIO's `before_request` hook strips the port and looks up the domain in the `WebsiteDomainStore`
|
||||||
|
3. If a match is found, it loads the bucket's website config (index/error documents)
|
||||||
|
4. Object key resolution:
|
||||||
|
- `/` or trailing `/` → append `index_document` (e.g., `index.html`)
|
||||||
|
- `/path` → try exact match, then try `path/index_document`
|
||||||
|
- Not found → serve `error_document` with 404 status
|
||||||
|
5. If no domain match is found, the request falls through to normal S3 API / UI routing
|
||||||
|
|
||||||
|
### Domain Mapping Admin API
|
||||||
|
|
||||||
|
All endpoints require admin (`iam:*`) permissions.
|
||||||
|
|
||||||
|
| Method | Route | Body | Description |
|
||||||
|
|--------|-------|------|-------------|
|
||||||
|
| `GET` | `/admin/website-domains` | — | List all mappings |
|
||||||
|
| `POST` | `/admin/website-domains` | `{"domain": "...", "bucket": "..."}` | Create mapping |
|
||||||
|
| `GET` | `/admin/website-domains/<domain>` | — | Get single mapping |
|
||||||
|
| `PUT` | `/admin/website-domains/<domain>` | `{"bucket": "..."}` | Update mapping |
|
||||||
|
| `DELETE` | `/admin/website-domains/<domain>` | — | Delete mapping |
|
||||||
|
|
||||||
|
### Bucket Website API
|
||||||
|
|
||||||
|
| Method | Route | Description |
|
||||||
|
|--------|-------|-------------|
|
||||||
|
| `PUT` | `/<bucket>?website` | Set website config (XML body) |
|
||||||
|
| `GET` | `/<bucket>?website` | Get website config (XML response) |
|
||||||
|
| `DELETE` | `/<bucket>?website` | Remove website config |
|
||||||
|
|
||||||
|
### Web UI
|
||||||
|
|
||||||
|
- **Per-bucket config:** Bucket Details → Properties tab → "Static Website Hosting" card
|
||||||
|
- **Domain management:** Sidebar → "Domains" (visible when hosting is enabled and user is admin)
|
||||||
|
|||||||
@@ -10,3 +10,4 @@ waitress>=3.0.2
|
|||||||
psutil>=7.1.3
|
psutil>=7.1.3
|
||||||
cryptography>=46.0.3
|
cryptography>=46.0.3
|
||||||
defusedxml>=0.7.1
|
defusedxml>=0.7.1
|
||||||
|
duckdb>=1.4.4
|
||||||
5
run.py
5
run.py
@@ -5,6 +5,7 @@ import argparse
|
|||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import warnings
|
import warnings
|
||||||
|
import multiprocessing
|
||||||
from multiprocessing import Process
|
from multiprocessing import Process
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
@@ -87,6 +88,10 @@ def serve_ui(port: int, prod: bool = False, config: Optional[AppConfig] = None)
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
multiprocessing.freeze_support()
|
||||||
|
if _is_frozen():
|
||||||
|
multiprocessing.set_start_method("spawn", force=True)
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description="Run the S3 clone services.")
|
parser = argparse.ArgumentParser(description="Run the S3 clone services.")
|
||||||
parser.add_argument("--mode", choices=["api", "ui", "both"], default="both")
|
parser.add_argument("--mode", choices=["api", "ui", "both"], default="both")
|
||||||
parser.add_argument("--api-port", type=int, default=5000)
|
parser.add_argument("--api-port", type=int, default=5000)
|
||||||
|
|||||||
@@ -192,31 +192,86 @@ cat > "$INSTALL_DIR/myfsio.env" << EOF
|
|||||||
# Generated by install.sh on $(date)
|
# Generated by install.sh on $(date)
|
||||||
# Documentation: https://go.jzwsite.com/myfsio
|
# Documentation: https://go.jzwsite.com/myfsio
|
||||||
|
|
||||||
# Storage paths
|
# =============================================================================
|
||||||
|
# STORAGE PATHS
|
||||||
|
# =============================================================================
|
||||||
STORAGE_ROOT=$DATA_DIR
|
STORAGE_ROOT=$DATA_DIR
|
||||||
LOG_DIR=$LOG_DIR
|
LOG_DIR=$LOG_DIR
|
||||||
|
|
||||||
# Network
|
# =============================================================================
|
||||||
|
# NETWORK
|
||||||
|
# =============================================================================
|
||||||
APP_HOST=0.0.0.0
|
APP_HOST=0.0.0.0
|
||||||
APP_PORT=$API_PORT
|
APP_PORT=$API_PORT
|
||||||
|
|
||||||
# Security - CHANGE IN PRODUCTION
|
# Public URL (set this if behind a reverse proxy for presigned URLs)
|
||||||
SECRET_KEY=$SECRET_KEY
|
|
||||||
CORS_ORIGINS=*
|
|
||||||
|
|
||||||
# Public URL (set this if behind a reverse proxy)
|
|
||||||
$(if [[ -n "$API_URL" ]]; then echo "API_BASE_URL=$API_URL"; else echo "# API_BASE_URL=https://s3.example.com"; fi)
|
$(if [[ -n "$API_URL" ]]; then echo "API_BASE_URL=$API_URL"; else echo "# API_BASE_URL=https://s3.example.com"; fi)
|
||||||
|
|
||||||
# Logging
|
# =============================================================================
|
||||||
|
# SECURITY
|
||||||
|
# =============================================================================
|
||||||
|
# Secret key for session signing (auto-generated if not set)
|
||||||
|
SECRET_KEY=$SECRET_KEY
|
||||||
|
|
||||||
|
# CORS settings - restrict in production
|
||||||
|
CORS_ORIGINS=*
|
||||||
|
|
||||||
|
# Brute-force protection
|
||||||
|
AUTH_MAX_ATTEMPTS=5
|
||||||
|
AUTH_LOCKOUT_MINUTES=15
|
||||||
|
|
||||||
|
# Reverse proxy settings (set to number of trusted proxies in front)
|
||||||
|
# NUM_TRUSTED_PROXIES=1
|
||||||
|
|
||||||
|
# Allow internal admin endpoints (only enable on trusted networks)
|
||||||
|
# ALLOW_INTERNAL_ENDPOINTS=false
|
||||||
|
|
||||||
|
# Allowed hosts for redirects (comma-separated, empty = restrict all)
|
||||||
|
# ALLOWED_REDIRECT_HOSTS=
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# LOGGING
|
||||||
|
# =============================================================================
|
||||||
LOG_LEVEL=INFO
|
LOG_LEVEL=INFO
|
||||||
LOG_TO_FILE=true
|
LOG_TO_FILE=true
|
||||||
|
|
||||||
# Rate limiting
|
# =============================================================================
|
||||||
|
# RATE LIMITING
|
||||||
|
# =============================================================================
|
||||||
RATE_LIMIT_DEFAULT=200 per minute
|
RATE_LIMIT_DEFAULT=200 per minute
|
||||||
|
# RATE_LIMIT_LIST_BUCKETS=60 per minute
|
||||||
|
# RATE_LIMIT_BUCKET_OPS=120 per minute
|
||||||
|
# RATE_LIMIT_OBJECT_OPS=240 per minute
|
||||||
|
# RATE_LIMIT_ADMIN=60 per minute
|
||||||
|
|
||||||
# Optional: Encryption (uncomment to enable)
|
# =============================================================================
|
||||||
|
# SERVER TUNING (0 = auto-detect based on system resources)
|
||||||
|
# =============================================================================
|
||||||
|
# SERVER_THREADS=0
|
||||||
|
# SERVER_CONNECTION_LIMIT=0
|
||||||
|
# SERVER_BACKLOG=0
|
||||||
|
# SERVER_CHANNEL_TIMEOUT=120
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# ENCRYPTION (uncomment to enable)
|
||||||
|
# =============================================================================
|
||||||
# ENCRYPTION_ENABLED=true
|
# ENCRYPTION_ENABLED=true
|
||||||
# KMS_ENABLED=true
|
# KMS_ENABLED=true
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# SITE SYNC / REPLICATION (for multi-site deployments)
|
||||||
|
# =============================================================================
|
||||||
|
# SITE_ID=site-1
|
||||||
|
# SITE_ENDPOINT=https://s3-site1.example.com
|
||||||
|
# SITE_REGION=us-east-1
|
||||||
|
# SITE_SYNC_ENABLED=false
|
||||||
|
|
||||||
|
# =============================================================================
|
||||||
|
# OPTIONAL FEATURES
|
||||||
|
# =============================================================================
|
||||||
|
# LIFECYCLE_ENABLED=false
|
||||||
|
# METRICS_HISTORY_ENABLED=false
|
||||||
|
# OPERATION_METRICS_ENABLED=false
|
||||||
EOF
|
EOF
|
||||||
chmod 600 "$INSTALL_DIR/myfsio.env"
|
chmod 600 "$INSTALL_DIR/myfsio.env"
|
||||||
echo " [OK] Created $INSTALL_DIR/myfsio.env"
|
echo " [OK] Created $INSTALL_DIR/myfsio.env"
|
||||||
@@ -317,11 +372,36 @@ if [[ "$SKIP_SYSTEMD" != true ]]; then
|
|||||||
fi
|
fi
|
||||||
echo ""
|
echo ""
|
||||||
|
|
||||||
sleep 2
|
echo " Waiting for service initialization..."
|
||||||
|
sleep 3
|
||||||
|
|
||||||
echo " Service Status:"
|
echo " Service Status:"
|
||||||
echo " ---------------"
|
echo " ---------------"
|
||||||
if systemctl is-active --quiet myfsio; then
|
if systemctl is-active --quiet myfsio; then
|
||||||
echo " [OK] MyFSIO is running"
|
echo " [OK] MyFSIO is running"
|
||||||
|
|
||||||
|
IAM_FILE="$DATA_DIR/.myfsio.sys/config/iam.json"
|
||||||
|
if [[ -f "$IAM_FILE" ]]; then
|
||||||
|
echo ""
|
||||||
|
echo " ============================================"
|
||||||
|
echo " ADMIN CREDENTIALS (save these securely!)"
|
||||||
|
echo " ============================================"
|
||||||
|
if command -v jq &>/dev/null; then
|
||||||
|
ACCESS_KEY=$(jq -r '.users[0].access_key' "$IAM_FILE" 2>/dev/null)
|
||||||
|
SECRET_KEY=$(jq -r '.users[0].secret_key' "$IAM_FILE" 2>/dev/null)
|
||||||
|
else
|
||||||
|
ACCESS_KEY=$(grep -o '"access_key"[[:space:]]*:[[:space:]]*"[^"]*"' "$IAM_FILE" | head -1 | sed 's/.*"\([^"]*\)"$/\1/')
|
||||||
|
SECRET_KEY=$(grep -o '"secret_key"[[:space:]]*:[[:space:]]*"[^"]*"' "$IAM_FILE" | head -1 | sed 's/.*"\([^"]*\)"$/\1/')
|
||||||
|
fi
|
||||||
|
if [[ -n "$ACCESS_KEY" && -n "$SECRET_KEY" ]]; then
|
||||||
|
echo " Access Key: $ACCESS_KEY"
|
||||||
|
echo " Secret Key: $SECRET_KEY"
|
||||||
|
else
|
||||||
|
echo " [!] Could not parse credentials from $IAM_FILE"
|
||||||
|
echo " Check the file manually or view service logs."
|
||||||
|
fi
|
||||||
|
echo " ============================================"
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
echo " [WARNING] MyFSIO may not have started correctly"
|
echo " [WARNING] MyFSIO may not have started correctly"
|
||||||
echo " Check logs with: journalctl -u myfsio -f"
|
echo " Check logs with: journalctl -u myfsio -f"
|
||||||
@@ -346,19 +426,26 @@ echo "Access Points:"
|
|||||||
echo " API: http://$(hostname -I 2>/dev/null | awk '{print $1}' || echo "localhost"):$API_PORT"
|
echo " API: http://$(hostname -I 2>/dev/null | awk '{print $1}' || echo "localhost"):$API_PORT"
|
||||||
echo " UI: http://$(hostname -I 2>/dev/null | awk '{print $1}' || echo "localhost"):$UI_PORT/ui"
|
echo " UI: http://$(hostname -I 2>/dev/null | awk '{print $1}' || echo "localhost"):$UI_PORT/ui"
|
||||||
echo ""
|
echo ""
|
||||||
echo "Default Credentials:"
|
echo "Credentials:"
|
||||||
echo " Username: localadmin"
|
echo " Admin credentials were shown above (if service was started)."
|
||||||
echo " Password: localadmin"
|
echo " You can also find them in: $DATA_DIR/.myfsio.sys/config/iam.json"
|
||||||
echo " [!] WARNING: Change these immediately after first login!"
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "Configuration Files:"
|
echo "Configuration Files:"
|
||||||
echo " Environment: $INSTALL_DIR/myfsio.env"
|
echo " Environment: $INSTALL_DIR/myfsio.env"
|
||||||
echo " IAM Users: $DATA_DIR/.myfsio.sys/config/iam.json"
|
echo " IAM Users: $DATA_DIR/.myfsio.sys/config/iam.json"
|
||||||
echo " Bucket Policies: $DATA_DIR/.myfsio.sys/config/bucket_policies.json"
|
echo " Bucket Policies: $DATA_DIR/.myfsio.sys/config/bucket_policies.json"
|
||||||
|
echo " Secret Key: $DATA_DIR/.myfsio.sys/config/.secret (auto-generated)"
|
||||||
|
echo ""
|
||||||
|
echo "Security Notes:"
|
||||||
|
echo " - Rate limiting is enabled by default (200 req/min)"
|
||||||
|
echo " - Brute-force protection: 5 attempts, 15 min lockout"
|
||||||
|
echo " - Set CORS_ORIGINS to specific domains in production"
|
||||||
|
echo " - Set NUM_TRUSTED_PROXIES if behind a reverse proxy"
|
||||||
echo ""
|
echo ""
|
||||||
echo "Useful Commands:"
|
echo "Useful Commands:"
|
||||||
echo " Check status: sudo systemctl status myfsio"
|
echo " Check status: sudo systemctl status myfsio"
|
||||||
echo " View logs: sudo journalctl -u myfsio -f"
|
echo " View logs: sudo journalctl -u myfsio -f"
|
||||||
|
echo " Validate config: $INSTALL_DIR/myfsio --check-config"
|
||||||
echo " Restart: sudo systemctl restart myfsio"
|
echo " Restart: sudo systemctl restart myfsio"
|
||||||
echo " Stop: sudo systemctl stop myfsio"
|
echo " Stop: sudo systemctl stop myfsio"
|
||||||
echo ""
|
echo ""
|
||||||
|
|||||||
@@ -88,7 +88,8 @@ echo "The following items will be removed:"
|
|||||||
echo ""
|
echo ""
|
||||||
echo " Install directory: $INSTALL_DIR"
|
echo " Install directory: $INSTALL_DIR"
|
||||||
if [[ "$KEEP_DATA" != true ]]; then
|
if [[ "$KEEP_DATA" != true ]]; then
|
||||||
echo " Data directory: $DATA_DIR (ALL YOUR DATA WILL BE DELETED!)"
|
echo " Data directory: $DATA_DIR"
|
||||||
|
echo " [!] ALL DATA, IAM USERS, AND ENCRYPTION KEYS WILL BE DELETED!"
|
||||||
else
|
else
|
||||||
echo " Data directory: $DATA_DIR (WILL BE KEPT)"
|
echo " Data directory: $DATA_DIR (WILL BE KEPT)"
|
||||||
fi
|
fi
|
||||||
@@ -227,8 +228,15 @@ echo ""
|
|||||||
if [[ "$KEEP_DATA" == true ]]; then
|
if [[ "$KEEP_DATA" == true ]]; then
|
||||||
echo "Your data has been preserved at: $DATA_DIR"
|
echo "Your data has been preserved at: $DATA_DIR"
|
||||||
echo ""
|
echo ""
|
||||||
echo "To reinstall MyFSIO with existing data, run:"
|
echo "Preserved files include:"
|
||||||
echo " curl -fsSL https://go.jzwsite.com/myfsio-install | sudo bash"
|
echo " - All buckets and objects"
|
||||||
|
echo " - IAM configuration: $DATA_DIR/.myfsio.sys/config/iam.json"
|
||||||
|
echo " - Bucket policies: $DATA_DIR/.myfsio.sys/config/bucket_policies.json"
|
||||||
|
echo " - Secret key: $DATA_DIR/.myfsio.sys/config/.secret"
|
||||||
|
echo " - Encryption keys: $DATA_DIR/.myfsio.sys/keys/ (if encryption was enabled)"
|
||||||
|
echo ""
|
||||||
|
echo "To reinstall MyFSIO with existing data:"
|
||||||
|
echo " ./install.sh --data-dir $DATA_DIR"
|
||||||
echo ""
|
echo ""
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|||||||
@@ -1081,11 +1081,17 @@ html.sidebar-will-collapse .sidebar-user {
|
|||||||
letter-spacing: 0.08em;
|
letter-spacing: 0.08em;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.main-content:has(.docs-sidebar) {
|
||||||
|
overflow-x: visible;
|
||||||
|
}
|
||||||
|
|
||||||
.docs-sidebar {
|
.docs-sidebar {
|
||||||
position: sticky;
|
position: sticky;
|
||||||
top: 1.5rem;
|
top: 1.5rem;
|
||||||
border-radius: 1rem;
|
border-radius: 1rem;
|
||||||
border: 1px solid var(--myfsio-card-border);
|
border: 1px solid var(--myfsio-card-border);
|
||||||
|
max-height: calc(100vh - 3rem);
|
||||||
|
overflow-y: auto;
|
||||||
}
|
}
|
||||||
|
|
||||||
.docs-sidebar-callouts {
|
.docs-sidebar-callouts {
|
||||||
@@ -1282,6 +1288,20 @@ html.sidebar-will-collapse .sidebar-user {
|
|||||||
padding: 2rem 1rem;
|
padding: 2rem 1rem;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#preview-text {
|
||||||
|
padding: 1rem 1.125rem;
|
||||||
|
max-height: 360px;
|
||||||
|
overflow: auto;
|
||||||
|
white-space: pre-wrap;
|
||||||
|
word-break: break-word;
|
||||||
|
font-family: 'SFMono-Regular', 'Menlo', 'Consolas', 'Liberation Mono', monospace;
|
||||||
|
font-size: .8rem;
|
||||||
|
line-height: 1.6;
|
||||||
|
tab-size: 4;
|
||||||
|
color: var(--myfsio-text);
|
||||||
|
background: transparent;
|
||||||
|
}
|
||||||
|
|
||||||
.upload-progress-stack {
|
.upload-progress-stack {
|
||||||
display: flex;
|
display: flex;
|
||||||
flex-direction: column;
|
flex-direction: column;
|
||||||
|
|||||||
@@ -101,6 +101,7 @@
|
|||||||
const previewImage = document.getElementById('preview-image');
|
const previewImage = document.getElementById('preview-image');
|
||||||
const previewVideo = document.getElementById('preview-video');
|
const previewVideo = document.getElementById('preview-video');
|
||||||
const previewAudio = document.getElementById('preview-audio');
|
const previewAudio = document.getElementById('preview-audio');
|
||||||
|
const previewText = document.getElementById('preview-text');
|
||||||
const previewIframe = document.getElementById('preview-iframe');
|
const previewIframe = document.getElementById('preview-iframe');
|
||||||
const downloadButton = document.getElementById('downloadButton');
|
const downloadButton = document.getElementById('downloadButton');
|
||||||
const presignButton = document.getElementById('presignButton');
|
const presignButton = document.getElementById('presignButton');
|
||||||
@@ -182,6 +183,9 @@
|
|||||||
let visibleItems = [];
|
let visibleItems = [];
|
||||||
let renderedRange = { start: 0, end: 0 };
|
let renderedRange = { start: 0, end: 0 };
|
||||||
|
|
||||||
|
let memoizedVisibleItems = null;
|
||||||
|
let memoizedInputs = { objectCount: -1, prefix: null, filterTerm: null };
|
||||||
|
|
||||||
const createObjectRow = (obj, displayKey = null) => {
|
const createObjectRow = (obj, displayKey = null) => {
|
||||||
const tr = document.createElement('tr');
|
const tr = document.createElement('tr');
|
||||||
tr.dataset.objectRow = '';
|
tr.dataset.objectRow = '';
|
||||||
@@ -340,7 +344,21 @@
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
const computeVisibleItems = () => {
|
const computeVisibleItems = (forceRecompute = false) => {
|
||||||
|
const currentInputs = {
|
||||||
|
objectCount: allObjects.length,
|
||||||
|
prefix: currentPrefix,
|
||||||
|
filterTerm: currentFilterTerm
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!forceRecompute &&
|
||||||
|
memoizedVisibleItems !== null &&
|
||||||
|
memoizedInputs.objectCount === currentInputs.objectCount &&
|
||||||
|
memoizedInputs.prefix === currentInputs.prefix &&
|
||||||
|
memoizedInputs.filterTerm === currentInputs.filterTerm) {
|
||||||
|
return memoizedVisibleItems;
|
||||||
|
}
|
||||||
|
|
||||||
const items = [];
|
const items = [];
|
||||||
const folders = new Set();
|
const folders = new Set();
|
||||||
|
|
||||||
@@ -381,6 +399,8 @@
|
|||||||
return aKey.localeCompare(bKey);
|
return aKey.localeCompare(bKey);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
memoizedVisibleItems = items;
|
||||||
|
memoizedInputs = currentInputs;
|
||||||
return items;
|
return items;
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -497,6 +517,9 @@
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let lastStreamRenderTime = 0;
|
||||||
|
const STREAM_RENDER_THROTTLE_MS = 500;
|
||||||
|
|
||||||
const flushPendingStreamObjects = () => {
|
const flushPendingStreamObjects = () => {
|
||||||
if (pendingStreamObjects.length === 0) return;
|
if (pendingStreamObjects.length === 0) return;
|
||||||
const batch = pendingStreamObjects.splice(0, pendingStreamObjects.length);
|
const batch = pendingStreamObjects.splice(0, pendingStreamObjects.length);
|
||||||
@@ -513,6 +536,19 @@
|
|||||||
loadMoreStatus.textContent = `${loadedObjectCount.toLocaleString()}${countText} loading...`;
|
loadMoreStatus.textContent = `${loadedObjectCount.toLocaleString()}${countText} loading...`;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (objectsLoadingRow && objectsLoadingRow.parentNode) {
|
||||||
|
const loadingText = objectsLoadingRow.querySelector('p');
|
||||||
|
if (loadingText) {
|
||||||
|
const countText = totalObjectCount > 0 ? ` of ${totalObjectCount.toLocaleString()}` : '';
|
||||||
|
loadingText.textContent = `Loading ${loadedObjectCount.toLocaleString()}${countText} objects...`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const now = performance.now();
|
||||||
|
if (!streamingComplete && now - lastStreamRenderTime < STREAM_RENDER_THROTTLE_MS) {
|
||||||
|
streamRenderScheduled = false;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
lastStreamRenderTime = now;
|
||||||
refreshVirtualList();
|
refreshVirtualList();
|
||||||
streamRenderScheduled = false;
|
streamRenderScheduled = false;
|
||||||
};
|
};
|
||||||
@@ -533,7 +569,10 @@
|
|||||||
loadedObjectCount = 0;
|
loadedObjectCount = 0;
|
||||||
totalObjectCount = 0;
|
totalObjectCount = 0;
|
||||||
allObjects = [];
|
allObjects = [];
|
||||||
|
memoizedVisibleItems = null;
|
||||||
|
memoizedInputs = { objectCount: -1, prefix: null, filterTerm: null };
|
||||||
pendingStreamObjects = [];
|
pendingStreamObjects = [];
|
||||||
|
lastStreamRenderTime = 0;
|
||||||
|
|
||||||
streamAbortController = new AbortController();
|
streamAbortController = new AbortController();
|
||||||
|
|
||||||
@@ -548,7 +587,10 @@
|
|||||||
throw new Error(`HTTP ${response.status}`);
|
throw new Error(`HTTP ${response.status}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (objectsLoadingRow) objectsLoadingRow.remove();
|
if (objectsLoadingRow) {
|
||||||
|
const loadingText = objectsLoadingRow.querySelector('p');
|
||||||
|
if (loadingText) loadingText.textContent = 'Receiving objects...';
|
||||||
|
}
|
||||||
|
|
||||||
const reader = response.body.getReader();
|
const reader = response.body.getReader();
|
||||||
const decoder = new TextDecoder();
|
const decoder = new TextDecoder();
|
||||||
@@ -576,6 +618,10 @@
|
|||||||
break;
|
break;
|
||||||
case 'count':
|
case 'count':
|
||||||
totalObjectCount = msg.total_count || 0;
|
totalObjectCount = msg.total_count || 0;
|
||||||
|
if (objectsLoadingRow) {
|
||||||
|
const loadingText = objectsLoadingRow.querySelector('p');
|
||||||
|
if (loadingText) loadingText.textContent = `Loading 0 of ${totalObjectCount.toLocaleString()} objects...`;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case 'object':
|
case 'object':
|
||||||
pendingStreamObjects.push(processStreamObject(msg));
|
pendingStreamObjects.push(processStreamObject(msg));
|
||||||
@@ -609,11 +655,16 @@
|
|||||||
} catch (e) { }
|
} catch (e) { }
|
||||||
}
|
}
|
||||||
|
|
||||||
flushPendingStreamObjects();
|
|
||||||
streamingComplete = true;
|
streamingComplete = true;
|
||||||
|
flushPendingStreamObjects();
|
||||||
hasMoreObjects = false;
|
hasMoreObjects = false;
|
||||||
|
totalObjectCount = loadedObjectCount;
|
||||||
updateObjectCountBadge();
|
updateObjectCountBadge();
|
||||||
|
|
||||||
|
if (objectsLoadingRow && objectsLoadingRow.parentNode) {
|
||||||
|
objectsLoadingRow.remove();
|
||||||
|
}
|
||||||
|
|
||||||
if (loadMoreStatus) {
|
if (loadMoreStatus) {
|
||||||
loadMoreStatus.textContent = `${loadedObjectCount.toLocaleString()} objects`;
|
loadMoreStatus.textContent = `${loadedObjectCount.toLocaleString()} objects`;
|
||||||
}
|
}
|
||||||
@@ -643,6 +694,8 @@
|
|||||||
loadedObjectCount = 0;
|
loadedObjectCount = 0;
|
||||||
totalObjectCount = 0;
|
totalObjectCount = 0;
|
||||||
allObjects = [];
|
allObjects = [];
|
||||||
|
memoizedVisibleItems = null;
|
||||||
|
memoizedInputs = { objectCount: -1, prefix: null, filterTerm: null };
|
||||||
}
|
}
|
||||||
|
|
||||||
if (append && loadMoreSpinner) {
|
if (append && loadMoreSpinner) {
|
||||||
@@ -985,13 +1038,15 @@
|
|||||||
};
|
};
|
||||||
|
|
||||||
const navigateToFolder = (prefix) => {
|
const navigateToFolder = (prefix) => {
|
||||||
|
if (streamAbortController) {
|
||||||
|
streamAbortController.abort();
|
||||||
|
streamAbortController = null;
|
||||||
|
}
|
||||||
|
|
||||||
currentPrefix = prefix;
|
currentPrefix = prefix;
|
||||||
|
|
||||||
if (scrollContainer) scrollContainer.scrollTop = 0;
|
if (scrollContainer) scrollContainer.scrollTop = 0;
|
||||||
|
|
||||||
refreshVirtualList();
|
|
||||||
renderBreadcrumb(prefix);
|
|
||||||
|
|
||||||
selectedRows.clear();
|
selectedRows.clear();
|
||||||
|
|
||||||
if (typeof updateBulkDeleteState === 'function') {
|
if (typeof updateBulkDeleteState === 'function') {
|
||||||
@@ -1001,6 +1056,9 @@
|
|||||||
if (previewPanel) previewPanel.classList.add('d-none');
|
if (previewPanel) previewPanel.classList.add('d-none');
|
||||||
if (previewEmpty) previewEmpty.classList.remove('d-none');
|
if (previewEmpty) previewEmpty.classList.remove('d-none');
|
||||||
activeRow = null;
|
activeRow = null;
|
||||||
|
|
||||||
|
isLoadingObjects = false;
|
||||||
|
loadObjects(false);
|
||||||
};
|
};
|
||||||
|
|
||||||
const renderObjectsView = () => {
|
const renderObjectsView = () => {
|
||||||
@@ -1838,6 +1896,10 @@
|
|||||||
el.setAttribute('src', 'about:blank');
|
el.setAttribute('src', 'about:blank');
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
if (previewText) {
|
||||||
|
previewText.classList.add('d-none');
|
||||||
|
previewText.textContent = '';
|
||||||
|
}
|
||||||
previewPlaceholder.classList.remove('d-none');
|
previewPlaceholder.classList.remove('d-none');
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -1901,11 +1963,28 @@
|
|||||||
previewIframe.style.minHeight = '500px';
|
previewIframe.style.minHeight = '500px';
|
||||||
previewIframe.classList.remove('d-none');
|
previewIframe.classList.remove('d-none');
|
||||||
previewPlaceholder.classList.add('d-none');
|
previewPlaceholder.classList.add('d-none');
|
||||||
} else if (previewUrl && lower.match(/\.(txt|log|json|md|csv|xml|html|htm|js|ts|py|java|c|cpp|h|css|scss|yaml|yml|toml|ini|cfg|conf|sh|bat)$/)) {
|
} else if (previewUrl && previewText && lower.match(/\.(txt|log|json|md|csv|xml|html|htm|js|ts|py|java|c|cpp|h|css|scss|yaml|yml|toml|ini|cfg|conf|sh|bat|rs|go|rb|php|sql|r|swift|kt|scala|pl|lua|zig|ex|exs|hs|erl|ps1|psm1|psd1|fish|zsh|env|properties|gradle|makefile|dockerfile|vagrantfile|gitignore|gitattributes|editorconfig|eslintrc|prettierrc)$/)) {
|
||||||
previewIframe.src = previewUrl;
|
previewText.textContent = 'Loading\u2026';
|
||||||
previewIframe.style.minHeight = '200px';
|
previewText.classList.remove('d-none');
|
||||||
previewIframe.classList.remove('d-none');
|
|
||||||
previewPlaceholder.classList.add('d-none');
|
previewPlaceholder.classList.add('d-none');
|
||||||
|
const currentRow = row;
|
||||||
|
fetch(previewUrl)
|
||||||
|
.then((r) => {
|
||||||
|
if (!r.ok) throw new Error(r.statusText);
|
||||||
|
const len = parseInt(r.headers.get('Content-Length') || '0', 10);
|
||||||
|
if (len > 512 * 1024) {
|
||||||
|
return r.text().then((t) => t.slice(0, 512 * 1024) + '\n\n--- Truncated (file too large for preview) ---');
|
||||||
|
}
|
||||||
|
return r.text();
|
||||||
|
})
|
||||||
|
.then((text) => {
|
||||||
|
if (activeRow !== currentRow) return;
|
||||||
|
previewText.textContent = text;
|
||||||
|
})
|
||||||
|
.catch(() => {
|
||||||
|
if (activeRow !== currentRow) return;
|
||||||
|
previewText.textContent = 'Failed to load preview';
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
const metadataUrl = row.dataset.metadataUrl;
|
const metadataUrl = row.dataset.metadataUrl;
|
||||||
@@ -4085,6 +4164,13 @@
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
interceptForm('websiteForm', {
|
||||||
|
successMessage: 'Website settings saved',
|
||||||
|
onSuccess: function (data) {
|
||||||
|
updateWebsiteCard(data.enabled !== false, data.index_document, data.error_document);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
interceptForm('bucketPolicyForm', {
|
interceptForm('bucketPolicyForm', {
|
||||||
successMessage: 'Bucket policy saved',
|
successMessage: 'Bucket policy saved',
|
||||||
onSuccess: function (data) {
|
onSuccess: function (data) {
|
||||||
@@ -4145,6 +4231,59 @@
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function updateWebsiteCard(enabled, indexDoc, errorDoc) {
|
||||||
|
var card = document.getElementById('bucket-website-card');
|
||||||
|
if (!card) return;
|
||||||
|
var alertContainer = card.querySelector('.alert');
|
||||||
|
if (alertContainer) {
|
||||||
|
if (enabled) {
|
||||||
|
alertContainer.className = 'alert alert-success d-flex align-items-start mb-4';
|
||||||
|
var detail = 'Index: <code>' + escapeHtml(indexDoc || 'index.html') + '</code>';
|
||||||
|
if (errorDoc) detail += '<br>Error: <code>' + escapeHtml(errorDoc) + '</code>';
|
||||||
|
alertContainer.innerHTML = '<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="me-2 flex-shrink-0" viewBox="0 0 16 16">' +
|
||||||
|
'<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zm-3.97-3.03a.75.75 0 0 0-1.08.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-.01-1.05z"/>' +
|
||||||
|
'</svg><div><strong>Website hosting is enabled</strong>' +
|
||||||
|
'<p class="mb-0 small">' + detail + '</p></div>';
|
||||||
|
} else {
|
||||||
|
alertContainer.className = 'alert alert-secondary d-flex align-items-start mb-4';
|
||||||
|
alertContainer.innerHTML = '<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="me-2 flex-shrink-0" viewBox="0 0 16 16">' +
|
||||||
|
'<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>' +
|
||||||
|
'<path d="M4.646 4.646a.5.5 0 0 1 .708 0L8 7.293l2.646-2.647a.5.5 0 0 1 .708.708L8.707 8l2.647 2.646a.5.5 0 0 1-.708.708L8 8.707l-2.646 2.647a.5.5 0 0 1-.708-.708L7.293 8 4.646 5.354a.5.5 0 0 1 0-.708z"/>' +
|
||||||
|
'</svg><div><strong>Website hosting is disabled</strong>' +
|
||||||
|
'<p class="mb-0 small">Enable website hosting to serve bucket contents as a static website.</p></div>';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var disableBtn = document.getElementById('disableWebsiteBtn');
|
||||||
|
if (disableBtn) {
|
||||||
|
disableBtn.style.display = enabled ? '' : 'none';
|
||||||
|
}
|
||||||
|
var submitBtn = document.getElementById('websiteSubmitBtn');
|
||||||
|
if (submitBtn) {
|
||||||
|
submitBtn.classList.remove('btn-primary', 'btn-success');
|
||||||
|
submitBtn.classList.add(enabled ? 'btn-primary' : 'btn-success');
|
||||||
|
}
|
||||||
|
var submitLabel = document.getElementById('websiteSubmitLabel');
|
||||||
|
if (submitLabel) {
|
||||||
|
submitLabel.textContent = enabled ? 'Save Website Settings' : 'Enable Website Hosting';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var disableWebsiteBtn = document.getElementById('disableWebsiteBtn');
|
||||||
|
if (disableWebsiteBtn) {
|
||||||
|
disableWebsiteBtn.addEventListener('click', function () {
|
||||||
|
var form = document.getElementById('websiteForm');
|
||||||
|
if (!form) return;
|
||||||
|
document.getElementById('websiteAction').value = 'disable';
|
||||||
|
window.UICore.submitFormAjax(form, {
|
||||||
|
successMessage: 'Website hosting disabled',
|
||||||
|
onSuccess: function (data) {
|
||||||
|
document.getElementById('websiteAction').value = 'enable';
|
||||||
|
updateWebsiteCard(false, null, null);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
function reloadReplicationPane() {
|
function reloadReplicationPane() {
|
||||||
var replicationPane = document.getElementById('replication-pane');
|
var replicationPane = document.getElementById('replication-pane');
|
||||||
if (!replicationPane) return;
|
if (!replicationPane) return;
|
||||||
|
|||||||
@@ -94,6 +94,21 @@
|
|||||||
</svg>
|
</svg>
|
||||||
<span>Metrics</span>
|
<span>Metrics</span>
|
||||||
</a>
|
</a>
|
||||||
|
<a href="{{ url_for('ui.sites_dashboard') }}" class="sidebar-link {% if request.endpoint == 'ui.sites_dashboard' %}active{% endif %}">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||||
|
<path d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm7.5-6.923c-.67.204-1.335.82-1.887 1.855A7.97 7.97 0 0 0 5.145 4H7.5V1.077zM4.09 4a9.267 9.267 0 0 1 .64-1.539 6.7 6.7 0 0 1 .597-.933A7.025 7.025 0 0 0 2.255 4H4.09zm-.582 3.5c.03-.877.138-1.718.312-2.5H1.674a6.958 6.958 0 0 0-.656 2.5h2.49zM4.847 5a12.5 12.5 0 0 0-.338 2.5H7.5V5H4.847zM8.5 5v2.5h2.99a12.495 12.495 0 0 0-.337-2.5H8.5zM4.51 8.5a12.5 12.5 0 0 0 .337 2.5H7.5V8.5H4.51zm3.99 0V11h2.653c.187-.765.306-1.608.338-2.5H8.5zM5.145 12c.138.386.295.744.468 1.068.552 1.035 1.218 1.65 1.887 1.855V12H5.145zm.182 2.472a6.696 6.696 0 0 1-.597-.933A9.268 9.268 0 0 1 4.09 12H2.255a7.024 7.024 0 0 0 3.072 2.472zM3.82 11a13.652 13.652 0 0 1-.312-2.5h-2.49c.062.89.291 1.733.656 2.5H3.82zm6.853 3.472A7.024 7.024 0 0 0 13.745 12H11.91a9.27 9.27 0 0 1-.64 1.539 6.688 6.688 0 0 1-.597.933zM8.5 12v2.923c.67-.204 1.335-.82 1.887-1.855.173-.324.33-.682.468-1.068H8.5zm3.68-1h2.146c.365-.767.594-1.61.656-2.5h-2.49a13.65 13.65 0 0 1-.312 2.5zm2.802-3.5a6.959 6.959 0 0 0-.656-2.5H12.18c.174.782.282 1.623.312 2.5h2.49zM11.27 2.461c.247.464.462.98.64 1.539h1.835a7.024 7.024 0 0 0-3.072-2.472c.218.284.418.598.597.933zM10.855 4a7.966 7.966 0 0 0-.468-1.068C9.835 1.897 9.17 1.282 8.5 1.077V4h2.355z"/>
|
||||||
|
</svg>
|
||||||
|
<span>Sites</span>
|
||||||
|
</a>
|
||||||
|
{% endif %}
|
||||||
|
{% if website_hosting_nav %}
|
||||||
|
<a href="{{ url_for('ui.website_domains_dashboard') }}" class="sidebar-link {% if request.endpoint == 'ui.website_domains_dashboard' %}active{% endif %}">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||||
|
<path d="M4.715 6.542 3.343 7.914a3 3 0 1 0 4.243 4.243l1.828-1.829A3 3 0 0 0 8.586 5.5L8 6.086a1.002 1.002 0 0 0-.154.199 2 2 0 0 1 .861 3.337L6.88 11.45a2 2 0 1 1-2.83-2.83l.793-.792a4.018 4.018 0 0 1-.128-1.287z"/>
|
||||||
|
<path d="M6.586 4.672A3 3 0 0 0 7.414 9.5l.775-.776a2 2 0 0 1-.896-3.346L9.12 3.55a2 2 0 1 1 2.83 2.83l-.793.792c.112.42.155.855.128 1.287l1.372-1.372a3 3 0 1 0-4.243-4.243L6.586 4.672z"/>
|
||||||
|
</svg>
|
||||||
|
<span>Domains</span>
|
||||||
|
</a>
|
||||||
{% endif %}
|
{% endif %}
|
||||||
</div>
|
</div>
|
||||||
<div class="nav-section">
|
<div class="nav-section">
|
||||||
@@ -179,6 +194,21 @@
|
|||||||
</svg>
|
</svg>
|
||||||
<span class="sidebar-link-text">Metrics</span>
|
<span class="sidebar-link-text">Metrics</span>
|
||||||
</a>
|
</a>
|
||||||
|
<a href="{{ url_for('ui.sites_dashboard') }}" class="sidebar-link {% if request.endpoint == 'ui.sites_dashboard' %}active{% endif %}" data-tooltip="Sites">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||||
|
<path d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm7.5-6.923c-.67.204-1.335.82-1.887 1.855A7.97 7.97 0 0 0 5.145 4H7.5V1.077zM4.09 4a9.267 9.267 0 0 1 .64-1.539 6.7 6.7 0 0 1 .597-.933A7.025 7.025 0 0 0 2.255 4H4.09zm-.582 3.5c.03-.877.138-1.718.312-2.5H1.674a6.958 6.958 0 0 0-.656 2.5h2.49zM4.847 5a12.5 12.5 0 0 0-.338 2.5H7.5V5H4.847zM8.5 5v2.5h2.99a12.495 12.495 0 0 0-.337-2.5H8.5zM4.51 8.5a12.5 12.5 0 0 0 .337 2.5H7.5V8.5H4.51zm3.99 0V11h2.653c.187-.765.306-1.608.338-2.5H8.5zM5.145 12c.138.386.295.744.468 1.068.552 1.035 1.218 1.65 1.887 1.855V12H5.145zm.182 2.472a6.696 6.696 0 0 1-.597-.933A9.268 9.268 0 0 1 4.09 12H2.255a7.024 7.024 0 0 0 3.072 2.472zM3.82 11a13.652 13.652 0 0 1-.312-2.5h-2.49c.062.89.291 1.733.656 2.5H3.82zm6.853 3.472A7.024 7.024 0 0 0 13.745 12H11.91a9.27 9.27 0 0 1-.64 1.539 6.688 6.688 0 0 1-.597.933zM8.5 12v2.923c.67-.204 1.335-.82 1.887-1.855.173-.324.33-.682.468-1.068H8.5zm3.68-1h2.146c.365-.767.594-1.61.656-2.5h-2.49a13.65 13.65 0 0 1-.312 2.5zm2.802-3.5a6.959 6.959 0 0 0-.656-2.5H12.18c.174.782.282 1.623.312 2.5h2.49zM11.27 2.461c.247.464.462.98.64 1.539h1.835a7.024 7.024 0 0 0-3.072-2.472c.218.284.418.598.597.933zM10.855 4a7.966 7.966 0 0 0-.468-1.068C9.835 1.897 9.17 1.282 8.5 1.077V4h2.355z"/>
|
||||||
|
</svg>
|
||||||
|
<span class="sidebar-link-text">Sites</span>
|
||||||
|
</a>
|
||||||
|
{% endif %}
|
||||||
|
{% if website_hosting_nav %}
|
||||||
|
<a href="{{ url_for('ui.website_domains_dashboard') }}" class="sidebar-link {% if request.endpoint == 'ui.website_domains_dashboard' %}active{% endif %}" data-tooltip="Domains">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||||
|
<path d="M4.715 6.542 3.343 7.914a3 3 0 1 0 4.243 4.243l1.828-1.829A3 3 0 0 0 8.586 5.5L8 6.086a1.002 1.002 0 0 0-.154.199 2 2 0 0 1 .861 3.337L6.88 11.45a2 2 0 1 1-2.83-2.83l.793-.792a4.018 4.018 0 0 1-.128-1.287z"/>
|
||||||
|
<path d="M6.586 4.672A3 3 0 0 0 7.414 9.5l.775-.776a2 2 0 0 1-.896-3.346L9.12 3.55a2 2 0 1 1 2.83 2.83l-.793.792c.112.42.155.855.128 1.287l1.372-1.372a3 3 0 1 0-4.243-4.243L6.586 4.672z"/>
|
||||||
|
</svg>
|
||||||
|
<span class="sidebar-link-text">Domains</span>
|
||||||
|
</a>
|
||||||
{% endif %}
|
{% endif %}
|
||||||
</div>
|
</div>
|
||||||
<div class="nav-section">
|
<div class="nav-section">
|
||||||
|
|||||||
@@ -321,7 +321,8 @@
|
|||||||
<img id="preview-image" class="img-fluid d-none w-100" alt="Object preview" style="display: block;" />
|
<img id="preview-image" class="img-fluid d-none w-100" alt="Object preview" style="display: block;" />
|
||||||
<video id="preview-video" class="w-100 d-none" controls style="display: block;"></video>
|
<video id="preview-video" class="w-100 d-none" controls style="display: block;"></video>
|
||||||
<audio id="preview-audio" class="w-100 d-none" controls style="display: block;"></audio>
|
<audio id="preview-audio" class="w-100 d-none" controls style="display: block;"></audio>
|
||||||
<iframe id="preview-iframe" class="w-100 d-none" loading="lazy" style="min-height: 200px;"></iframe>
|
<pre id="preview-text" class="w-100 d-none m-0"></pre>
|
||||||
|
<iframe id="preview-iframe" class="w-100 d-none" style="min-height: 200px;"></iframe>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
@@ -965,6 +966,89 @@
|
|||||||
{% endif %}
|
{% endif %}
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
{% if website_hosting_enabled %}
|
||||||
|
<div class="card shadow-sm mt-4" id="bucket-website-card">
|
||||||
|
<div class="card-header d-flex align-items-center">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" class="text-primary me-2" viewBox="0 0 16 16">
|
||||||
|
<path d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm7.5-6.923c-.67.204-1.335.82-1.887 1.855A7.97 7.97 0 0 0 5.145 4H7.5V1.077zM4.09 4a9.267 9.267 0 0 1 .64-1.539 6.7 6.7 0 0 1 .597-.933A7.025 7.025 0 0 0 2.255 4H4.09zm-.582 3.5c.03-.877.138-1.718.312-2.5H1.674a6.958 6.958 0 0 0-.656 2.5h2.49zM4.847 5a12.5 12.5 0 0 0-.338 2.5H7.5V5H4.847zM8.5 5v2.5h2.99a12.495 12.495 0 0 0-.337-2.5H8.5zM4.51 8.5a12.5 12.5 0 0 0 .337 2.5H7.5V8.5H4.51zm3.99 0V11h2.653c.187-.765.306-1.608.338-2.5H8.5zM5.145 12c.138.386.295.744.468 1.068.552 1.035 1.218 1.65 1.887 1.855V12H5.145zm.182 2.472a6.696 6.696 0 0 1-.597-.933A9.268 9.268 0 0 1 4.09 12H2.255a7.024 7.024 0 0 0 3.072 2.472zM3.82 11a13.652 13.652 0 0 1-.312-2.5h-2.49c.062.89.291 1.733.656 2.5H3.82zm6.853 3.472A7.024 7.024 0 0 0 13.745 12H11.91a9.27 9.27 0 0 1-.64 1.539 6.688 6.688 0 0 1-.597.933zM8.5 12v2.923c.67-.204 1.335-.82 1.887-1.855.173-.324.33-.682.468-1.068H8.5zm3.68-1h2.146c.365-.767.594-1.61.656-2.5h-2.49a13.65 13.65 0 0 1-.312 2.5zm2.802-3.5a6.959 6.959 0 0 0-.656-2.5H12.18c.174.782.282 1.623.312 2.5h2.49zM11.27 2.461c.247.464.462.98.64 1.539h1.835a7.024 7.024 0 0 0-3.072-2.472c.218.284.418.598.597.933zM10.855 4a7.966 7.966 0 0 0-.468-1.068C9.835 1.897 9.17 1.282 8.5 1.077V4h2.355z"/>
|
||||||
|
</svg>
|
||||||
|
<span class="fw-semibold">Static Website Hosting</span>
|
||||||
|
</div>
|
||||||
|
<div class="card-body">
|
||||||
|
{% if website_config %}
|
||||||
|
<div class="alert alert-success d-flex align-items-start mb-4" role="alert">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="me-2 flex-shrink-0" viewBox="0 0 16 16">
|
||||||
|
<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zm-3.97-3.03a.75.75 0 0 0-1.08.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-.01-1.05z"/>
|
||||||
|
</svg>
|
||||||
|
<div>
|
||||||
|
<strong>Website hosting is enabled</strong>
|
||||||
|
<p class="mb-0 small">
|
||||||
|
Index: <code>{{ website_config.index_document }}</code>
|
||||||
|
{% if website_config.error_document %}<br>Error: <code>{{ website_config.error_document }}</code>{% endif %}
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{% else %}
|
||||||
|
<div class="alert alert-secondary d-flex align-items-start mb-4" role="alert">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="me-2 flex-shrink-0" viewBox="0 0 16 16">
|
||||||
|
<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>
|
||||||
|
<path d="M4.646 4.646a.5.5 0 0 1 .708 0L8 7.293l2.646-2.647a.5.5 0 0 1 .708.708L8.707 8l2.647 2.646a.5.5 0 0 1-.708.708L8 8.707l-2.646 2.647a.5.5 0 0 1-.708-.708L7.293 8 4.646 5.354a.5.5 0 0 1 0-.708z"/>
|
||||||
|
</svg>
|
||||||
|
<div>
|
||||||
|
<strong>Website hosting is disabled</strong>
|
||||||
|
<p class="mb-0 small">Enable website hosting to serve bucket contents as a static website.</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if can_manage_website %}
|
||||||
|
<form method="post" action="{{ url_for('ui.update_bucket_website', bucket_name=bucket_name) }}" id="websiteForm">
|
||||||
|
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}" />
|
||||||
|
<input type="hidden" name="action" value="enable" id="websiteAction" />
|
||||||
|
|
||||||
|
<div class="mb-3">
|
||||||
|
<label for="index_document" class="form-label fw-medium">Index Document</label>
|
||||||
|
<input type="text" class="form-control" id="index_document" name="index_document"
|
||||||
|
value="{{ website_config.index_document if website_config else 'index.html' }}"
|
||||||
|
placeholder="index.html">
|
||||||
|
<div class="form-text">The default page served for directory paths (e.g., index.html).</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="mb-4">
|
||||||
|
<label for="error_document" class="form-label fw-medium">Error Document</label>
|
||||||
|
<input type="text" class="form-control" id="error_document" name="error_document"
|
||||||
|
value="{{ website_config.error_document if website_config else '' }}"
|
||||||
|
placeholder="error.html">
|
||||||
|
<div class="form-text">Optional. The page served for 404 errors.</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="d-flex gap-2 flex-wrap">
|
||||||
|
<button class="btn {{ 'btn-primary' if website_config else 'btn-success' }}" type="submit" id="websiteSubmitBtn">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||||
|
<path d="M12.736 3.97a.733.733 0 0 1 1.047 0c.286.289.29.756.01 1.05L7.88 12.01a.733.733 0 0 1-1.065.02L3.217 8.384a.757.757 0 0 1 0-1.06.733.733 0 0 1 1.047 0l3.052 3.093 5.4-6.425a.247.247 0 0 1 .02-.022Z"/>
|
||||||
|
</svg>
|
||||||
|
<span id="websiteSubmitLabel">{{ 'Save Website Settings' if website_config else 'Enable Website Hosting' }}</span>
|
||||||
|
</button>
|
||||||
|
<button type="button" class="btn btn-outline-danger" id="disableWebsiteBtn"{% if not website_config %} style="display: none;"{% endif %}>
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||||
|
<path d="M4.646 4.646a.5.5 0 0 1 .708 0L8 7.293l2.646-2.647a.5.5 0 0 1 .708.708L8.707 8l2.647 2.646a.5.5 0 0 1-.708.708L8 8.707l-2.646 2.647a.5.5 0 0 1-.708-.708L7.293 8 4.646 5.354a.5.5 0 0 1 0-.708z"/>
|
||||||
|
</svg>
|
||||||
|
Disable Website Hosting
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</form>
|
||||||
|
{% else %}
|
||||||
|
<div class="text-center py-3">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="32" height="32" fill="currentColor" class="text-muted mb-2" viewBox="0 0 16 16">
|
||||||
|
<path d="M8 1a2 2 0 0 1 2 2v4H6V3a2 2 0 0 1 2-2zm3 6V3a3 3 0 0 0-6 0v4a2 2 0 0 0-2 2v5a2 2 0 0 0 2 2h6a2 2 0 0 0 2-2V9a2 2 0 0 0-2-2z"/>
|
||||||
|
</svg>
|
||||||
|
<p class="text-muted mb-0 small">You do not have permission to modify website hosting for this bucket.</p>
|
||||||
|
</div>
|
||||||
|
{% endif %}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{% endif %}
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div class="col-lg-4">
|
<div class="col-lg-4">
|
||||||
@@ -1459,6 +1543,30 @@
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
<div id="bidirWarningBucket" class="alert alert-warning d-none mb-4" role="alert">
|
||||||
|
<h6 class="alert-heading fw-bold d-flex align-items-center gap-2 mb-2">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="18" height="18" fill="currentColor" viewBox="0 0 16 16">
|
||||||
|
<path fill-rule="evenodd" d="M1 11.5a.5.5 0 0 0 .5.5h11.793l-3.147 3.146a.5.5 0 0 0 .708.708l4-4a.5.5 0 0 0 0-.708l-4-4a.5.5 0 0 0-.708.708L13.293 11H1.5a.5.5 0 0 0-.5.5zm14-7a.5.5 0 0 1-.5.5H2.707l3.147 3.146a.5.5 0 1 1-.708.708l-4-4a.5.5 0 0 1 0-.708l4-4a.5.5 0 1 1 .708.708L2.707 4H14.5a.5.5 0 0 1 .5.5z"/>
|
||||||
|
</svg>
|
||||||
|
Requires Configuration on Both Sites
|
||||||
|
</h6>
|
||||||
|
<p class="mb-2 small">For bidirectional sync to work, <strong>both sites</strong> must be configured:</p>
|
||||||
|
<ol class="mb-2 ps-3 small">
|
||||||
|
<li>This site: Enable bidirectional replication here</li>
|
||||||
|
<li>Remote site: Register this site as a peer with a connection</li>
|
||||||
|
<li>Remote site: Create matching bidirectional rule pointing back</li>
|
||||||
|
<li>Both sites: Ensure <code>SITE_SYNC_ENABLED=true</code></li>
|
||||||
|
</ol>
|
||||||
|
<div class="small">
|
||||||
|
<a href="{{ url_for('ui.sites_dashboard') }}" class="alert-link">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="12" height="12" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||||
|
<path d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm7.5-6.923c-.67.204-1.335.82-1.887 1.855A7.97 7.97 0 0 0 5.145 4H7.5V1.077z"/>
|
||||||
|
</svg>
|
||||||
|
Check bidirectional status in Sites Dashboard
|
||||||
|
</a>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
<button type="submit" class="btn btn-primary">
|
<button type="submit" class="btn btn-primary">
|
||||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||||
<path fill-rule="evenodd" d="M8 3a5 5 0 1 0 4.546 2.914.5.5 0 0 1 .908-.417A6 6 0 1 1 8 2v1z"/>
|
<path fill-rule="evenodd" d="M8 3a5 5 0 1 0 4.546 2.914.5.5 0 0 1 .908-.417A6 6 0 1 1 8 2v1z"/>
|
||||||
@@ -2569,5 +2677,26 @@ window.BucketDetailConfig = {
|
|||||||
bucketsOverview: "{{ url_for('ui.buckets_overview') }}"
|
bucketsOverview: "{{ url_for('ui.buckets_overview') }}"
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
(function() {
|
||||||
|
const bidirWarning = document.getElementById('bidirWarningBucket');
|
||||||
|
const modeRadios = document.querySelectorAll('input[name="replication_mode"]');
|
||||||
|
|
||||||
|
function updateBidirWarning() {
|
||||||
|
if (!bidirWarning) return;
|
||||||
|
const selected = document.querySelector('input[name="replication_mode"]:checked');
|
||||||
|
if (selected && selected.value === 'bidirectional') {
|
||||||
|
bidirWarning.classList.remove('d-none');
|
||||||
|
} else {
|
||||||
|
bidirWarning.classList.add('d-none');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
modeRadios.forEach(function(radio) {
|
||||||
|
radio.addEventListener('change', updateBidirWarning);
|
||||||
|
});
|
||||||
|
|
||||||
|
updateBidirWarning();
|
||||||
|
})();
|
||||||
</script>
|
</script>
|
||||||
{% endblock %}
|
{% endblock %}
|
||||||
|
|||||||
@@ -141,7 +141,7 @@
|
|||||||
let visibleCount = 0;
|
let visibleCount = 0;
|
||||||
|
|
||||||
bucketItems.forEach(item => {
|
bucketItems.forEach(item => {
|
||||||
const name = item.querySelector('.card-title').textContent.toLowerCase();
|
const name = item.querySelector('.bucket-name').textContent.toLowerCase();
|
||||||
if (name.includes(term)) {
|
if (name.includes(term)) {
|
||||||
item.classList.remove('d-none');
|
item.classList.remove('d-none');
|
||||||
visibleCount++;
|
visibleCount++;
|
||||||
|
|||||||
@@ -35,6 +35,7 @@
|
|||||||
<li><a href="#api">REST endpoints</a></li>
|
<li><a href="#api">REST endpoints</a></li>
|
||||||
<li><a href="#examples">API Examples</a></li>
|
<li><a href="#examples">API Examples</a></li>
|
||||||
<li><a href="#replication">Site Replication & Sync</a></li>
|
<li><a href="#replication">Site Replication & Sync</a></li>
|
||||||
|
<li><a href="#site-registry">Site Registry</a></li>
|
||||||
<li><a href="#versioning">Object Versioning</a></li>
|
<li><a href="#versioning">Object Versioning</a></li>
|
||||||
<li><a href="#quotas">Bucket Quotas</a></li>
|
<li><a href="#quotas">Bucket Quotas</a></li>
|
||||||
<li><a href="#encryption">Encryption</a></li>
|
<li><a href="#encryption">Encryption</a></li>
|
||||||
@@ -42,6 +43,15 @@
|
|||||||
<li><a href="#metrics">Metrics History</a></li>
|
<li><a href="#metrics">Metrics History</a></li>
|
||||||
<li><a href="#operation-metrics">Operation Metrics</a></li>
|
<li><a href="#operation-metrics">Operation Metrics</a></li>
|
||||||
<li><a href="#troubleshooting">Troubleshooting</a></li>
|
<li><a href="#troubleshooting">Troubleshooting</a></li>
|
||||||
|
<li><a href="#health-check">Health Check</a></li>
|
||||||
|
<li><a href="#object-lock">Object Lock & Retention</a></li>
|
||||||
|
<li><a href="#access-logging">Access Logging</a></li>
|
||||||
|
<li><a href="#notifications">Notifications & Webhooks</a></li>
|
||||||
|
<li><a href="#select-content">SelectObjectContent</a></li>
|
||||||
|
<li><a href="#advanced-ops">Advanced Operations</a></li>
|
||||||
|
<li><a href="#acls">Access Control Lists</a></li>
|
||||||
|
<li><a href="#tagging">Object & Bucket Tagging</a></li>
|
||||||
|
<li><a href="#website-hosting">Static Website Hosting</a></li>
|
||||||
</ul>
|
</ul>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
@@ -88,8 +98,8 @@ python run.py --mode ui
|
|||||||
<tbody>
|
<tbody>
|
||||||
<tr>
|
<tr>
|
||||||
<td><code>API_BASE_URL</code></td>
|
<td><code>API_BASE_URL</code></td>
|
||||||
<td><code>None</code></td>
|
<td><code>http://127.0.0.1:5000</code></td>
|
||||||
<td>The public URL of the API. <strong>Required</strong> if running behind a proxy. Ensures presigned URLs are generated correctly.</td>
|
<td>Internal S3 API URL used by the web UI proxy. Also used for presigned URL generation. Set to your public URL if running behind a reverse proxy.</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td><code>STORAGE_ROOT</code></td>
|
<td><code>STORAGE_ROOT</code></td>
|
||||||
@@ -157,23 +167,43 @@ python run.py --mode ui
|
|||||||
<td><code>200 per minute</code></td>
|
<td><code>200 per minute</code></td>
|
||||||
<td>Default API rate limit.</td>
|
<td>Default API rate limit.</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>RATE_LIMIT_LIST_BUCKETS</code></td>
|
||||||
|
<td><code>60 per minute</code></td>
|
||||||
|
<td>Rate limit for listing buckets.</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>RATE_LIMIT_BUCKET_OPS</code></td>
|
||||||
|
<td><code>120 per minute</code></td>
|
||||||
|
<td>Rate limit for bucket operations.</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>RATE_LIMIT_OBJECT_OPS</code></td>
|
||||||
|
<td><code>240 per minute</code></td>
|
||||||
|
<td>Rate limit for object operations.</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>RATE_LIMIT_HEAD_OPS</code></td>
|
||||||
|
<td><code>100 per minute</code></td>
|
||||||
|
<td>Rate limit for HEAD requests.</td>
|
||||||
|
</tr>
|
||||||
<tr class="table-secondary">
|
<tr class="table-secondary">
|
||||||
<td colspan="3" class="fw-semibold">Server Settings</td>
|
<td colspan="3" class="fw-semibold">Server Settings</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td><code>SERVER_THREADS</code></td>
|
<td><code>SERVER_THREADS</code></td>
|
||||||
<td><code>4</code></td>
|
<td><code>0</code> (auto)</td>
|
||||||
<td>Waitress worker threads (1-64).</td>
|
<td>Waitress worker threads (1-64). 0 = auto (CPU cores × 2).</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td><code>SERVER_CONNECTION_LIMIT</code></td>
|
<td><code>SERVER_CONNECTION_LIMIT</code></td>
|
||||||
<td><code>100</code></td>
|
<td><code>0</code> (auto)</td>
|
||||||
<td>Max concurrent connections (10-1000).</td>
|
<td>Max concurrent connections (10-1000). 0 = auto (RAM-based).</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td><code>SERVER_BACKLOG</code></td>
|
<td><code>SERVER_BACKLOG</code></td>
|
||||||
<td><code>1024</code></td>
|
<td><code>0</code> (auto)</td>
|
||||||
<td>TCP listen backlog (64-4096).</td>
|
<td>TCP listen backlog (64-4096). 0 = auto (conn_limit × 2).</td>
|
||||||
</tr>
|
</tr>
|
||||||
<tr>
|
<tr>
|
||||||
<td><code>SERVER_CHANNEL_TIMEOUT</code></td>
|
<td><code>SERVER_CHANNEL_TIMEOUT</code></td>
|
||||||
@@ -242,6 +272,115 @@ python run.py --mode ui
|
|||||||
<td><code>100</code></td>
|
<td><code>100</code></td>
|
||||||
<td>Max objects to pull per sync cycle.</td>
|
<td>Max objects to pull per sync cycle.</td>
|
||||||
</tr>
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>SITE_SYNC_CONNECT_TIMEOUT_SECONDS</code></td>
|
||||||
|
<td><code>10</code></td>
|
||||||
|
<td>Connection timeout for site sync (seconds).</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>SITE_SYNC_READ_TIMEOUT_SECONDS</code></td>
|
||||||
|
<td><code>120</code></td>
|
||||||
|
<td>Read timeout for site sync (seconds).</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>SITE_SYNC_MAX_RETRIES</code></td>
|
||||||
|
<td><code>2</code></td>
|
||||||
|
<td>Max retry attempts for site sync operations.</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>SITE_SYNC_CLOCK_SKEW_TOLERANCE_SECONDS</code></td>
|
||||||
|
<td><code>1.0</code></td>
|
||||||
|
<td>Clock skew tolerance for conflict resolution.</td>
|
||||||
|
</tr>
|
||||||
|
<tr class="table-secondary">
|
||||||
|
<td colspan="3" class="fw-semibold">Replication Settings</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>REPLICATION_CONNECT_TIMEOUT_SECONDS</code></td>
|
||||||
|
<td><code>5</code></td>
|
||||||
|
<td>Connection timeout for replication (seconds).</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>REPLICATION_READ_TIMEOUT_SECONDS</code></td>
|
||||||
|
<td><code>30</code></td>
|
||||||
|
<td>Read timeout for replication (seconds).</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>REPLICATION_MAX_RETRIES</code></td>
|
||||||
|
<td><code>2</code></td>
|
||||||
|
<td>Max retry attempts for replication operations.</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>REPLICATION_STREAMING_THRESHOLD_BYTES</code></td>
|
||||||
|
<td><code>10485760</code></td>
|
||||||
|
<td>Objects larger than this use streaming upload (10 MB).</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>REPLICATION_MAX_FAILURES_PER_BUCKET</code></td>
|
||||||
|
<td><code>50</code></td>
|
||||||
|
<td>Max failure records to keep per bucket.</td>
|
||||||
|
</tr>
|
||||||
|
<tr class="table-secondary">
|
||||||
|
<td colspan="3" class="fw-semibold">Security & Auth Settings</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>SIGV4_TIMESTAMP_TOLERANCE_SECONDS</code></td>
|
||||||
|
<td><code>900</code></td>
|
||||||
|
<td>Max time skew for SigV4 requests (15 minutes).</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>PRESIGNED_URL_MIN_EXPIRY_SECONDS</code></td>
|
||||||
|
<td><code>1</code></td>
|
||||||
|
<td>Minimum presigned URL expiry time.</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>PRESIGNED_URL_MAX_EXPIRY_SECONDS</code></td>
|
||||||
|
<td><code>604800</code></td>
|
||||||
|
<td>Maximum presigned URL expiry time (7 days).</td>
|
||||||
|
</tr>
|
||||||
|
<tr class="table-secondary">
|
||||||
|
<td colspan="3" class="fw-semibold">Storage Limits</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>OBJECT_KEY_MAX_LENGTH_BYTES</code></td>
|
||||||
|
<td><code>1024</code></td>
|
||||||
|
<td>Maximum object key length in bytes.</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>OBJECT_CACHE_MAX_SIZE</code></td>
|
||||||
|
<td><code>100</code></td>
|
||||||
|
<td>Maximum number of objects in cache.</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>BUCKET_CONFIG_CACHE_TTL_SECONDS</code></td>
|
||||||
|
<td><code>30</code></td>
|
||||||
|
<td>Bucket config cache TTL in seconds.</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>OBJECT_TAG_LIMIT</code></td>
|
||||||
|
<td><code>50</code></td>
|
||||||
|
<td>Maximum number of tags per object.</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>LIFECYCLE_MAX_HISTORY_PER_BUCKET</code></td>
|
||||||
|
<td><code>50</code></td>
|
||||||
|
<td>Max lifecycle history records per bucket.</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>ENCRYPTION_CHUNK_SIZE_BYTES</code></td>
|
||||||
|
<td><code>65536</code></td>
|
||||||
|
<td>Chunk size for streaming encryption (64 KB).</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>KMS_GENERATE_DATA_KEY_MIN_BYTES</code></td>
|
||||||
|
<td><code>1</code></td>
|
||||||
|
<td>Minimum data key size for KMS generation.</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>KMS_GENERATE_DATA_KEY_MAX_BYTES</code></td>
|
||||||
|
<td><code>1024</code></td>
|
||||||
|
<td>Maximum data key size for KMS generation.</td>
|
||||||
|
</tr>
|
||||||
</tbody>
|
</tbody>
|
||||||
</table>
|
</table>
|
||||||
</div>
|
</div>
|
||||||
@@ -313,10 +452,10 @@ sudo journalctl -u myfsio -f # View logs</code></pre>
|
|||||||
<span class="docs-section-kicker">03</span>
|
<span class="docs-section-kicker">03</span>
|
||||||
<h2 class="h4 mb-0">Authenticate & manage IAM</h2>
|
<h2 class="h4 mb-0">Authenticate & manage IAM</h2>
|
||||||
</div>
|
</div>
|
||||||
<p class="text-muted">MyFSIO seeds <code>data/.myfsio.sys/config/iam.json</code> with <code>localadmin/localadmin</code>. Sign in once, rotate it, then grant least-privilege access to teammates and tools.</p>
|
<p class="text-muted">On first startup, MyFSIO generates random admin credentials and prints them to the console. Missed it? Check <code>data/.myfsio.sys/config/iam.json</code> directly—credentials are stored in plaintext.</p>
|
||||||
<div class="docs-highlight mb-3">
|
<div class="docs-highlight mb-3">
|
||||||
<ol class="mb-0">
|
<ol class="mb-0">
|
||||||
<li>Visit <code>/ui/login</code>, enter the bootstrap credentials, and rotate them immediately from the IAM page.</li>
|
<li>Check the console output (or <code>iam.json</code>) for the generated <code>Access Key</code> and <code>Secret Key</code>, then visit <code>/ui/login</code>.</li>
|
||||||
<li>Create additional users with descriptive display names and AWS-style inline policies (for example <code>{"bucket": "*", "actions": ["list", "read"]}</code>).</li>
|
<li>Create additional users with descriptive display names and AWS-style inline policies (for example <code>{"bucket": "*", "actions": ["list", "read"]}</code>).</li>
|
||||||
<li>Rotate secrets when sharing with CI jobs—new secrets display once and persist to <code>data/.myfsio.sys/config/iam.json</code>.</li>
|
<li>Rotate secrets when sharing with CI jobs—new secrets display once and persist to <code>data/.myfsio.sys/config/iam.json</code>.</li>
|
||||||
<li>Bucket policies layer on top of IAM. Apply Private/Public presets or paste custom JSON; changes reload instantly.</li>
|
<li>Bucket policies layer on top of IAM. Apply Private/Public presets or paste custom JSON; changes reload instantly.</li>
|
||||||
@@ -834,10 +973,174 @@ SITE_SYNC_BATCH_SIZE=100 # Max objects per sync cycle</code></pre>
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</article>
|
</article>
|
||||||
<article id="versioning" class="card shadow-sm docs-section">
|
<article id="site-registry" class="card shadow-sm docs-section">
|
||||||
<div class="card-body">
|
<div class="card-body">
|
||||||
<div class="d-flex align-items-center gap-2 mb-3">
|
<div class="d-flex align-items-center gap-2 mb-3">
|
||||||
<span class="docs-section-kicker">09</span>
|
<span class="docs-section-kicker">09</span>
|
||||||
|
<h2 class="h4 mb-0">Site Registry</h2>
|
||||||
|
</div>
|
||||||
|
<p class="text-muted">Track cluster membership and site identity for geo-distributed deployments. The site registry stores local site identity and peer site information.</p>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">Connections vs Sites</h3>
|
||||||
|
<p class="small text-muted mb-3">Understanding the difference between Connections and Sites is key to configuring geo-distribution:</p>
|
||||||
|
<div class="table-responsive mb-3">
|
||||||
|
<table class="table table-sm table-bordered small">
|
||||||
|
<thead class="table-light">
|
||||||
|
<tr>
|
||||||
|
<th style="width: 20%;">Aspect</th>
|
||||||
|
<th style="width: 40%;">Connections</th>
|
||||||
|
<th style="width: 40%;">Sites</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
<tr>
|
||||||
|
<td><strong>Purpose</strong></td>
|
||||||
|
<td>Store credentials to authenticate with remote S3 endpoints</td>
|
||||||
|
<td>Track cluster membership and site identity</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><strong>Contains</strong></td>
|
||||||
|
<td>Endpoint URL, access key, secret key, region</td>
|
||||||
|
<td>Site ID, endpoint, region, priority, display name</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><strong>Used by</strong></td>
|
||||||
|
<td>Replication rules, site sync workers</td>
|
||||||
|
<td>Geo-distribution awareness, cluster topology</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><strong>Analogy</strong></td>
|
||||||
|
<td><em>"How do I log in to that server?"</em></td>
|
||||||
|
<td><em>"Who are the members of my cluster?"</em></td>
|
||||||
|
</tr>
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
<p class="small text-muted">Sites can optionally link to a Connection (via <code>connection_id</code>) to perform health checks against peer sites.</p>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">Configuration</h3>
|
||||||
|
<p class="small text-muted">Set environment variables to bootstrap local site identity on startup:</p>
|
||||||
|
<div class="table-responsive mb-3">
|
||||||
|
<table class="table table-sm table-bordered small">
|
||||||
|
<thead class="table-light">
|
||||||
|
<tr>
|
||||||
|
<th>Variable</th>
|
||||||
|
<th>Default</th>
|
||||||
|
<th>Description</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
<tr>
|
||||||
|
<td><code>SITE_ID</code></td>
|
||||||
|
<td><code>None</code></td>
|
||||||
|
<td>Unique identifier for this site (e.g., <code>us-west-1</code>)</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>SITE_ENDPOINT</code></td>
|
||||||
|
<td><code>None</code></td>
|
||||||
|
<td>Public URL for this site (e.g., <code>https://s3.us-west-1.example.com</code>)</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>SITE_REGION</code></td>
|
||||||
|
<td><code>us-east-1</code></td>
|
||||||
|
<td>AWS-style region identifier</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>SITE_PRIORITY</code></td>
|
||||||
|
<td><code>100</code></td>
|
||||||
|
<td>Routing priority (lower = preferred)</td>
|
||||||
|
</tr>
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
<pre class="mb-3"><code class="language-bash"># Example: Configure site identity
|
||||||
|
export SITE_ID=us-west-1
|
||||||
|
export SITE_ENDPOINT=https://s3.us-west-1.example.com
|
||||||
|
export SITE_REGION=us-west-1
|
||||||
|
export SITE_PRIORITY=100
|
||||||
|
python run.py</code></pre>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">Using the Sites UI</h3>
|
||||||
|
<p class="small text-muted">Navigate to <a href="{{ url_for('ui.sites_dashboard') }}">Sites</a> in the sidebar to manage site configuration:</p>
|
||||||
|
<div class="row g-3 mb-3">
|
||||||
|
<div class="col-md-6">
|
||||||
|
<div class="card border h-100">
|
||||||
|
<div class="card-header bg-light py-2"><strong class="small">Local Site Identity</strong></div>
|
||||||
|
<div class="card-body small">
|
||||||
|
<ul class="mb-0 ps-3">
|
||||||
|
<li>Configure this site's ID, endpoint, region, and priority</li>
|
||||||
|
<li>Display name for easier identification</li>
|
||||||
|
<li>Changes persist to <code>site_registry.json</code></li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="col-md-6">
|
||||||
|
<div class="card border h-100">
|
||||||
|
<div class="card-header bg-light py-2"><strong class="small">Peer Sites</strong></div>
|
||||||
|
<div class="card-body small">
|
||||||
|
<ul class="mb-0 ps-3">
|
||||||
|
<li>Register remote sites in your cluster</li>
|
||||||
|
<li>Link to a Connection for health checks</li>
|
||||||
|
<li>View health status (green/red/unknown)</li>
|
||||||
|
<li>Edit or delete peers as needed</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">Admin API Endpoints</h3>
|
||||||
|
<p class="small text-muted">The <code>/admin</code> API provides programmatic access to site registry:</p>
|
||||||
|
<pre class="mb-3"><code class="language-bash"># Get local site configuration
|
||||||
|
curl {{ api_base }}/admin/site \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"
|
||||||
|
|
||||||
|
# Update local site
|
||||||
|
curl -X PUT {{ api_base }}/admin/site \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||||
|
-d '{"site_id": "us-west-1", "endpoint": "https://s3.example.com", "region": "us-west-1"}'
|
||||||
|
|
||||||
|
# List all peer sites
|
||||||
|
curl {{ api_base }}/admin/sites \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"
|
||||||
|
|
||||||
|
# Add a peer site
|
||||||
|
curl -X POST {{ api_base }}/admin/sites \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||||
|
-d '{"site_id": "us-east-1", "endpoint": "https://s3.us-east-1.example.com"}'
|
||||||
|
|
||||||
|
# Check peer health
|
||||||
|
curl {{ api_base }}/admin/sites/us-east-1/health \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"
|
||||||
|
|
||||||
|
# Get cluster topology
|
||||||
|
curl {{ api_base }}/admin/topology \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"</code></pre>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">Storage Location</h3>
|
||||||
|
<p class="small text-muted mb-3">Site registry data is stored at:</p>
|
||||||
|
<code class="d-block mb-3">data/.myfsio.sys/config/site_registry.json</code>
|
||||||
|
|
||||||
|
<div class="alert alert-light border mb-0">
|
||||||
|
<div class="d-flex gap-2">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-info-circle text-muted mt-1 flex-shrink-0" viewBox="0 0 16 16">
|
||||||
|
<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>
|
||||||
|
<path d="m8.93 6.588-2.29.287-.082.38.45.083c.294.07.352.176.288.469l-.738 3.468c-.194.897.105 1.319.808 1.319.545 0 1.178-.252 1.465-.598l.088-.416c-.2.176-.492.246-.686.246-.275 0-.375-.193-.304-.533L8.93 6.588zM9 4.5a1 1 0 1 1-2 0 1 1 0 0 1 2 0z"/>
|
||||||
|
</svg>
|
||||||
|
<div>
|
||||||
|
<strong>Planned:</strong> The site registry lays the groundwork for features like automatic failover, intelligent routing, and multi-site consistency. Currently it provides cluster awareness and health monitoring.
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</article>
|
||||||
|
<article id="versioning" class="card shadow-sm docs-section">
|
||||||
|
<div class="card-body">
|
||||||
|
<div class="d-flex align-items-center gap-2 mb-3">
|
||||||
|
<span class="docs-section-kicker">10</span>
|
||||||
<h2 class="h4 mb-0">Object Versioning</h2>
|
<h2 class="h4 mb-0">Object Versioning</h2>
|
||||||
</div>
|
</div>
|
||||||
<p class="text-muted">Keep multiple versions of objects to protect against accidental deletions and overwrites. Restore previous versions at any time.</p>
|
<p class="text-muted">Keep multiple versions of objects to protect against accidental deletions and overwrites. Restore previous versions at any time.</p>
|
||||||
@@ -917,7 +1220,7 @@ curl "{{ api_base }}/<bucket>/<key>?versionId=<version-id>" \
|
|||||||
<article id="quotas" class="card shadow-sm docs-section">
|
<article id="quotas" class="card shadow-sm docs-section">
|
||||||
<div class="card-body">
|
<div class="card-body">
|
||||||
<div class="d-flex align-items-center gap-2 mb-3">
|
<div class="d-flex align-items-center gap-2 mb-3">
|
||||||
<span class="docs-section-kicker">10</span>
|
<span class="docs-section-kicker">11</span>
|
||||||
<h2 class="h4 mb-0">Bucket Quotas</h2>
|
<h2 class="h4 mb-0">Bucket Quotas</h2>
|
||||||
</div>
|
</div>
|
||||||
<p class="text-muted">Limit how much data a bucket can hold using storage quotas. Quotas are enforced on uploads and multipart completions.</p>
|
<p class="text-muted">Limit how much data a bucket can hold using storage quotas. Quotas are enforced on uploads and multipart completions.</p>
|
||||||
@@ -985,7 +1288,7 @@ curl -X PUT "{{ api_base }}/bucket/<bucket>?quota" \
|
|||||||
<article id="encryption" class="card shadow-sm docs-section">
|
<article id="encryption" class="card shadow-sm docs-section">
|
||||||
<div class="card-body">
|
<div class="card-body">
|
||||||
<div class="d-flex align-items-center gap-2 mb-3">
|
<div class="d-flex align-items-center gap-2 mb-3">
|
||||||
<span class="docs-section-kicker">11</span>
|
<span class="docs-section-kicker">12</span>
|
||||||
<h2 class="h4 mb-0">Encryption</h2>
|
<h2 class="h4 mb-0">Encryption</h2>
|
||||||
</div>
|
</div>
|
||||||
<p class="text-muted">Protect data at rest with server-side encryption using AES-256-GCM. Objects are encrypted before being written to disk and decrypted transparently on read.</p>
|
<p class="text-muted">Protect data at rest with server-side encryption using AES-256-GCM. Objects are encrypted before being written to disk and decrypted transparently on read.</p>
|
||||||
@@ -1079,7 +1382,7 @@ curl -X DELETE "{{ api_base }}/kms/keys/{key-id}?waiting_period_days=30" \
|
|||||||
<article id="lifecycle" class="card shadow-sm docs-section">
|
<article id="lifecycle" class="card shadow-sm docs-section">
|
||||||
<div class="card-body">
|
<div class="card-body">
|
||||||
<div class="d-flex align-items-center gap-2 mb-3">
|
<div class="d-flex align-items-center gap-2 mb-3">
|
||||||
<span class="docs-section-kicker">12</span>
|
<span class="docs-section-kicker">13</span>
|
||||||
<h2 class="h4 mb-0">Lifecycle Rules</h2>
|
<h2 class="h4 mb-0">Lifecycle Rules</h2>
|
||||||
</div>
|
</div>
|
||||||
<p class="text-muted">Automatically delete expired objects, clean up old versions, and abort incomplete multipart uploads using time-based lifecycle rules.</p>
|
<p class="text-muted">Automatically delete expired objects, clean up old versions, and abort incomplete multipart uploads using time-based lifecycle rules.</p>
|
||||||
@@ -1161,7 +1464,7 @@ curl "{{ api_base }}/<bucket>?lifecycle" \
|
|||||||
<article id="metrics" class="card shadow-sm docs-section">
|
<article id="metrics" class="card shadow-sm docs-section">
|
||||||
<div class="card-body">
|
<div class="card-body">
|
||||||
<div class="d-flex align-items-center gap-2 mb-3">
|
<div class="d-flex align-items-center gap-2 mb-3">
|
||||||
<span class="docs-section-kicker">13</span>
|
<span class="docs-section-kicker">14</span>
|
||||||
<h2 class="h4 mb-0">Metrics History</h2>
|
<h2 class="h4 mb-0">Metrics History</h2>
|
||||||
</div>
|
</div>
|
||||||
<p class="text-muted">Track CPU, memory, and disk usage over time with optional metrics history. Disabled by default to minimize overhead.</p>
|
<p class="text-muted">Track CPU, memory, and disk usage over time with optional metrics history. Disabled by default to minimize overhead.</p>
|
||||||
@@ -1245,7 +1548,7 @@ curl -X PUT "{{ api_base | replace('/api', '/ui') }}/metrics/settings" \
|
|||||||
<article id="operation-metrics" class="card shadow-sm docs-section">
|
<article id="operation-metrics" class="card shadow-sm docs-section">
|
||||||
<div class="card-body">
|
<div class="card-body">
|
||||||
<div class="d-flex align-items-center gap-2 mb-3">
|
<div class="d-flex align-items-center gap-2 mb-3">
|
||||||
<span class="docs-section-kicker">14</span>
|
<span class="docs-section-kicker">15</span>
|
||||||
<h2 class="h4 mb-0">Operation Metrics</h2>
|
<h2 class="h4 mb-0">Operation Metrics</h2>
|
||||||
</div>
|
</div>
|
||||||
<p class="text-muted">Track API request statistics including request counts, latency, error rates, and bandwidth usage. Provides real-time visibility into API operations.</p>
|
<p class="text-muted">Track API request statistics including request counts, latency, error rates, and bandwidth usage. Provides real-time visibility into API operations.</p>
|
||||||
@@ -1352,7 +1655,7 @@ curl "{{ api_base | replace('/api', '/ui') }}/metrics/operations/history?hours=6
|
|||||||
<article id="troubleshooting" class="card shadow-sm docs-section">
|
<article id="troubleshooting" class="card shadow-sm docs-section">
|
||||||
<div class="card-body">
|
<div class="card-body">
|
||||||
<div class="d-flex align-items-center gap-2 mb-3">
|
<div class="d-flex align-items-center gap-2 mb-3">
|
||||||
<span class="docs-section-kicker">15</span>
|
<span class="docs-section-kicker">16</span>
|
||||||
<h2 class="h4 mb-0">Troubleshooting & tips</h2>
|
<h2 class="h4 mb-0">Troubleshooting & tips</h2>
|
||||||
</div>
|
</div>
|
||||||
<div class="table-responsive">
|
<div class="table-responsive">
|
||||||
@@ -1400,8 +1703,498 @@ curl "{{ api_base | replace('/api', '/ui') }}/metrics/operations/history?hours=6
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</article>
|
</article>
|
||||||
|
<article id="health-check" class="card shadow-sm docs-section">
|
||||||
|
<div class="card-body">
|
||||||
|
<div class="d-flex align-items-center gap-2 mb-3">
|
||||||
|
<span class="docs-section-kicker">17</span>
|
||||||
|
<h2 class="h4 mb-0">Health Check Endpoint</h2>
|
||||||
</div>
|
</div>
|
||||||
<div class="col-xl-4">
|
<p class="text-muted">The API exposes a health check endpoint for monitoring and load balancer integration.</p>
|
||||||
|
|
||||||
|
<pre class="mb-3"><code class="language-bash"># Check API health
|
||||||
|
curl {{ api_base }}/myfsio/health
|
||||||
|
|
||||||
|
# Response
|
||||||
|
{"status": "ok", "version": "0.1.7"}</code></pre>
|
||||||
|
|
||||||
|
<p class="small text-muted mb-3">Use this endpoint for:</p>
|
||||||
|
<ul class="small text-muted mb-0">
|
||||||
|
<li>Load balancer health checks</li>
|
||||||
|
<li>Kubernetes liveness/readiness probes</li>
|
||||||
|
<li>Monitoring system integration (Prometheus, Datadog, etc.)</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</article>
|
||||||
|
<article id="object-lock" class="card shadow-sm docs-section">
|
||||||
|
<div class="card-body">
|
||||||
|
<div class="d-flex align-items-center gap-2 mb-3">
|
||||||
|
<span class="docs-section-kicker">18</span>
|
||||||
|
<h2 class="h4 mb-0">Object Lock & Retention</h2>
|
||||||
|
</div>
|
||||||
|
<p class="text-muted">Object Lock prevents objects from being deleted or overwritten for a specified retention period.</p>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">Retention Modes</h3>
|
||||||
|
<div class="table-responsive mb-3">
|
||||||
|
<table class="table table-sm table-bordered small">
|
||||||
|
<thead class="table-light">
|
||||||
|
<tr>
|
||||||
|
<th>Mode</th>
|
||||||
|
<th>Description</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
<tr>
|
||||||
|
<td><strong>GOVERNANCE</strong></td>
|
||||||
|
<td>Objects can't be deleted by normal users, but admins with bypass permission can override</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><strong>COMPLIANCE</strong></td>
|
||||||
|
<td>Objects can't be deleted or overwritten by anyone until the retention period expires</td>
|
||||||
|
</tr>
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">API Usage</h3>
|
||||||
|
<pre class="mb-3"><code class="language-bash"># Set object retention
|
||||||
|
curl -X PUT "{{ api_base }}/<bucket>/<key>?retention" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||||
|
-d '{"Mode": "GOVERNANCE", "RetainUntilDate": "2025-12-31T23:59:59Z"}'
|
||||||
|
|
||||||
|
# Enable legal hold (indefinite protection)
|
||||||
|
curl -X PUT "{{ api_base }}/<bucket>/<key>?legal-hold" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||||
|
-d '{"Status": "ON"}'
|
||||||
|
|
||||||
|
# Get legal hold status
|
||||||
|
curl "{{ api_base }}/<bucket>/<key>?legal-hold" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"</code></pre>
|
||||||
|
|
||||||
|
<div class="alert alert-light border mb-0">
|
||||||
|
<div class="d-flex gap-2">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-info-circle text-muted mt-1 flex-shrink-0" viewBox="0 0 16 16">
|
||||||
|
<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>
|
||||||
|
<path d="m8.93 6.588-2.29.287-.082.38.45.083c.294.07.352.176.288.469l-.738 3.468c-.194.897.105 1.319.808 1.319.545 0 1.178-.252 1.465-.598l.088-.416c-.2.176-.492.246-.686.246-.275 0-.375-.193-.304-.533L8.93 6.588zM9 4.5a1 1 0 1 1-2 0 1 1 0 0 1 2 0z"/>
|
||||||
|
</svg>
|
||||||
|
<div>
|
||||||
|
<strong>Legal Hold:</strong> Provides indefinite protection independent of retention settings. Use for litigation holds or regulatory requirements.
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</article>
|
||||||
|
<article id="access-logging" class="card shadow-sm docs-section">
|
||||||
|
<div class="card-body">
|
||||||
|
<div class="d-flex align-items-center gap-2 mb-3">
|
||||||
|
<span class="docs-section-kicker">19</span>
|
||||||
|
<h2 class="h4 mb-0">Access Logging</h2>
|
||||||
|
</div>
|
||||||
|
<p class="text-muted">Enable S3-style access logging to track all requests to your buckets for audit and analysis.</p>
|
||||||
|
|
||||||
|
<pre class="mb-3"><code class="language-bash"># Enable access logging
|
||||||
|
curl -X PUT "{{ api_base }}/<bucket>?logging" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||||
|
-d '{
|
||||||
|
"LoggingEnabled": {
|
||||||
|
"TargetBucket": "log-bucket",
|
||||||
|
"TargetPrefix": "logs/my-bucket/"
|
||||||
|
}
|
||||||
|
}'
|
||||||
|
|
||||||
|
# Get logging configuration
|
||||||
|
curl "{{ api_base }}/<bucket>?logging" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"</code></pre>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">Log Contents</h3>
|
||||||
|
<p class="small text-muted mb-0">Logs include: timestamp, bucket, key, operation type, request ID, requester, source IP, HTTP status, error codes, bytes transferred, timing, referrer, and User-Agent.</p>
|
||||||
|
</div>
|
||||||
|
</article>
|
||||||
|
<article id="notifications" class="card shadow-sm docs-section">
|
||||||
|
<div class="card-body">
|
||||||
|
<div class="d-flex align-items-center gap-2 mb-3">
|
||||||
|
<span class="docs-section-kicker">20</span>
|
||||||
|
<h2 class="h4 mb-0">Notifications & Webhooks</h2>
|
||||||
|
</div>
|
||||||
|
<p class="text-muted">Configure event notifications to trigger webhooks when objects are created or deleted.</p>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">Supported Events</h3>
|
||||||
|
<div class="table-responsive mb-3">
|
||||||
|
<table class="table table-sm table-bordered small">
|
||||||
|
<thead class="table-light">
|
||||||
|
<tr>
|
||||||
|
<th>Event Type</th>
|
||||||
|
<th>Description</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
<tr>
|
||||||
|
<td><code>s3:ObjectCreated:*</code></td>
|
||||||
|
<td>Any object creation (PUT, POST, COPY, multipart)</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>s3:ObjectRemoved:*</code></td>
|
||||||
|
<td>Any object deletion</td>
|
||||||
|
</tr>
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<pre class="mb-3"><code class="language-bash"># Set notification configuration
|
||||||
|
curl -X PUT "{{ api_base }}/<bucket>?notification" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||||
|
-d '{
|
||||||
|
"TopicConfigurations": [{
|
||||||
|
"Id": "upload-notify",
|
||||||
|
"TopicArn": "https://webhook.example.com/s3-events",
|
||||||
|
"Events": ["s3:ObjectCreated:*"],
|
||||||
|
"Filter": {
|
||||||
|
"Key": {
|
||||||
|
"FilterRules": [
|
||||||
|
{"Name": "prefix", "Value": "uploads/"},
|
||||||
|
{"Name": "suffix", "Value": ".jpg"}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}]
|
||||||
|
}'</code></pre>
|
||||||
|
|
||||||
|
<div class="alert alert-warning border-warning bg-warning-subtle mb-0">
|
||||||
|
<div class="d-flex gap-2">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-exclamation-triangle mt-1" viewBox="0 0 16 16">
|
||||||
|
<path d="M7.938 2.016A.13.13 0 0 1 8.002 2a.13.13 0 0 1 .063.016.146.146 0 0 1 .054.057l6.857 11.667c.036.06.035.124.002.183a.163.163 0 0 1-.054.06.116.116 0 0 1-.066.017H1.146a.115.115 0 0 1-.066-.017.163.163 0 0 1-.054-.06.176.176 0 0 1 .002-.183L7.884 2.073a.147.147 0 0 1 .054-.057zm1.044-.45a1.13 1.13 0 0 0-1.96 0L.165 13.233c-.457.778.091 1.767.98 1.767h13.713c.889 0 1.438-.99.98-1.767L8.982 1.566z"/>
|
||||||
|
<path d="M7.002 12a1 1 0 1 1 2 0 1 1 0 0 1-2 0zM7.1 5.995a.905.905 0 1 1 1.8 0l-.35 3.507a.552.552 0 0 1-1.1 0L7.1 5.995z"/>
|
||||||
|
</svg>
|
||||||
|
<div>
|
||||||
|
<strong>Security:</strong> Webhook URLs are validated to prevent SSRF attacks. Internal/private IP ranges are blocked.
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</article>
|
||||||
|
<article id="select-content" class="card shadow-sm docs-section">
|
||||||
|
<div class="card-body">
|
||||||
|
<div class="d-flex align-items-center gap-2 mb-3">
|
||||||
|
<span class="docs-section-kicker">21</span>
|
||||||
|
<h2 class="h4 mb-0">SelectObjectContent (SQL)</h2>
|
||||||
|
</div>
|
||||||
|
<p class="text-muted">Query CSV, JSON, or Parquet files directly using SQL without downloading the entire object.</p>
|
||||||
|
|
||||||
|
<div class="alert alert-info border small mb-3">
|
||||||
|
<strong>Prerequisite:</strong> Requires DuckDB to be installed (<code>pip install duckdb</code>)
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<pre class="mb-3"><code class="language-bash"># Query a CSV file
|
||||||
|
curl -X POST "{{ api_base }}/<bucket>/data.csv?select" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||||
|
-d '{
|
||||||
|
"Expression": "SELECT name, age FROM s3object WHERE age > 25",
|
||||||
|
"ExpressionType": "SQL",
|
||||||
|
"InputSerialization": {
|
||||||
|
"CSV": {"FileHeaderInfo": "USE", "FieldDelimiter": ","}
|
||||||
|
},
|
||||||
|
"OutputSerialization": {"JSON": {}}
|
||||||
|
}'</code></pre>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">Supported Formats</h3>
|
||||||
|
<div class="row g-2 mb-0">
|
||||||
|
<div class="col-md-4">
|
||||||
|
<div class="bg-light rounded p-2 small text-center">
|
||||||
|
<strong>CSV</strong><br><span class="text-muted">Headers, delimiters</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="col-md-4">
|
||||||
|
<div class="bg-light rounded p-2 small text-center">
|
||||||
|
<strong>JSON</strong><br><span class="text-muted">Document or lines</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="col-md-4">
|
||||||
|
<div class="bg-light rounded p-2 small text-center">
|
||||||
|
<strong>Parquet</strong><br><span class="text-muted">Auto schema</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</article>
|
||||||
|
<article id="advanced-ops" class="card shadow-sm docs-section">
|
||||||
|
<div class="card-body">
|
||||||
|
<div class="d-flex align-items-center gap-2 mb-3">
|
||||||
|
<span class="docs-section-kicker">22</span>
|
||||||
|
<h2 class="h4 mb-0">Advanced S3 Operations</h2>
|
||||||
|
</div>
|
||||||
|
<p class="text-muted">Copy objects, upload part copies, and use range requests for partial downloads.</p>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">CopyObject</h3>
|
||||||
|
<pre class="mb-3"><code class="language-bash"># Copy within same bucket
|
||||||
|
curl -X PUT "{{ api_base }}/<bucket>/copy-of-file.txt" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||||
|
-H "x-amz-copy-source: /<bucket>/original-file.txt"
|
||||||
|
|
||||||
|
# Copy with metadata replacement
|
||||||
|
curl -X PUT "{{ api_base }}/<bucket>/file.txt" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||||
|
-H "x-amz-copy-source: /<bucket>/file.txt" \
|
||||||
|
-H "x-amz-metadata-directive: REPLACE" \
|
||||||
|
-H "x-amz-meta-newkey: newvalue"</code></pre>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">UploadPartCopy</h3>
|
||||||
|
<p class="small text-muted">Copy data from an existing object into a multipart upload part:</p>
|
||||||
|
<pre class="mb-3"><code class="language-bash"># Copy bytes 0-10485759 from source as part 1
|
||||||
|
curl -X PUT "{{ api_base }}/<bucket>/<key>?uploadId=X&partNumber=1" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||||
|
-H "x-amz-copy-source: /source-bucket/source-file.bin" \
|
||||||
|
-H "x-amz-copy-source-range: bytes=0-10485759"</code></pre>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">Range Requests</h3>
|
||||||
|
<pre class="mb-3"><code class="language-bash"># Get first 1000 bytes
|
||||||
|
curl "{{ api_base }}/<bucket>/<key>" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||||
|
-H "Range: bytes=0-999"
|
||||||
|
|
||||||
|
# Get last 500 bytes
|
||||||
|
curl "{{ api_base }}/<bucket>/<key>" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||||
|
-H "Range: bytes=-500"</code></pre>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">Conditional Requests</h3>
|
||||||
|
<div class="table-responsive mb-0">
|
||||||
|
<table class="table table-sm table-bordered small mb-0">
|
||||||
|
<thead class="table-light">
|
||||||
|
<tr>
|
||||||
|
<th>Header</th>
|
||||||
|
<th>Behavior</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
<tr>
|
||||||
|
<td><code>If-Modified-Since</code></td>
|
||||||
|
<td>Only download if changed after date</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>If-None-Match</code></td>
|
||||||
|
<td>Only download if ETag differs</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>If-Match</code></td>
|
||||||
|
<td>Only download if ETag matches</td>
|
||||||
|
</tr>
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</article>
|
||||||
|
<article id="acls" class="card shadow-sm docs-section">
|
||||||
|
<div class="card-body">
|
||||||
|
<div class="d-flex align-items-center gap-2 mb-3">
|
||||||
|
<span class="docs-section-kicker">23</span>
|
||||||
|
<h2 class="h4 mb-0">Access Control Lists (ACLs)</h2>
|
||||||
|
</div>
|
||||||
|
<p class="text-muted">ACLs provide legacy-style permission management for buckets and objects.</p>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">Canned ACLs</h3>
|
||||||
|
<div class="table-responsive mb-3">
|
||||||
|
<table class="table table-sm table-bordered small">
|
||||||
|
<thead class="table-light">
|
||||||
|
<tr>
|
||||||
|
<th>ACL</th>
|
||||||
|
<th>Description</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
<tr>
|
||||||
|
<td><code>private</code></td>
|
||||||
|
<td>Owner gets FULL_CONTROL (default)</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>public-read</code></td>
|
||||||
|
<td>Owner FULL_CONTROL, public READ</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>public-read-write</code></td>
|
||||||
|
<td>Owner FULL_CONTROL, public READ and WRITE</td>
|
||||||
|
</tr>
|
||||||
|
<tr>
|
||||||
|
<td><code>authenticated-read</code></td>
|
||||||
|
<td>Owner FULL_CONTROL, authenticated users READ</td>
|
||||||
|
</tr>
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<pre class="mb-3"><code class="language-bash"># Set bucket ACL
|
||||||
|
curl -X PUT "{{ api_base }}/<bucket>?acl" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||||
|
-H "x-amz-acl: public-read"
|
||||||
|
|
||||||
|
# Set object ACL during upload
|
||||||
|
curl -X PUT "{{ api_base }}/<bucket>/<key>" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||||
|
-H "x-amz-acl: private" \
|
||||||
|
--data-binary @file.txt</code></pre>
|
||||||
|
|
||||||
|
<div class="alert alert-light border mb-0">
|
||||||
|
<strong>Recommendation:</strong> For most use cases, prefer bucket policies over ACLs for more flexible access control.
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</article>
|
||||||
|
<article id="tagging" class="card shadow-sm docs-section">
|
||||||
|
<div class="card-body">
|
||||||
|
<div class="d-flex align-items-center gap-2 mb-3">
|
||||||
|
<span class="docs-section-kicker">24</span>
|
||||||
|
<h2 class="h4 mb-0">Object & Bucket Tagging</h2>
|
||||||
|
</div>
|
||||||
|
<p class="text-muted">Add metadata tags to buckets and objects for organization, cost allocation, or lifecycle rule filtering.</p>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">Object Tagging</h3>
|
||||||
|
<pre class="mb-3"><code class="language-bash"># Set object tags
|
||||||
|
curl -X PUT "{{ api_base }}/<bucket>/<key>?tagging" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||||
|
-d '{
|
||||||
|
"TagSet": [
|
||||||
|
{"Key": "Classification", "Value": "Confidential"},
|
||||||
|
{"Key": "Owner", "Value": "john@example.com"}
|
||||||
|
]
|
||||||
|
}'
|
||||||
|
|
||||||
|
# Get object tags
|
||||||
|
curl "{{ api_base }}/<bucket>/<key>?tagging" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"
|
||||||
|
|
||||||
|
# Set tags during upload
|
||||||
|
curl -X PUT "{{ api_base }}/<bucket>/<key>" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||||
|
-H "x-amz-tagging: Environment=Staging&Team=QA" \
|
||||||
|
--data-binary @file.txt</code></pre>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">Bucket Tagging</h3>
|
||||||
|
<pre class="mb-3"><code class="language-bash"># Set bucket tags
|
||||||
|
curl -X PUT "{{ api_base }}/<bucket>?tagging" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||||
|
-d '{
|
||||||
|
"TagSet": [
|
||||||
|
{"Key": "Environment", "Value": "Production"},
|
||||||
|
{"Key": "Team", "Value": "Engineering"}
|
||||||
|
]
|
||||||
|
}'</code></pre>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">Use Cases</h3>
|
||||||
|
<div class="row g-2 mb-0">
|
||||||
|
<div class="col-md-6">
|
||||||
|
<ul class="small text-muted mb-0 ps-3">
|
||||||
|
<li>Filter objects for lifecycle expiration by tag</li>
|
||||||
|
<li>Use tag conditions in bucket policies</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
<div class="col-md-6">
|
||||||
|
<ul class="small text-muted mb-0 ps-3">
|
||||||
|
<li>Group objects by project or department</li>
|
||||||
|
<li>Trigger automation based on object tags</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</article>
|
||||||
|
<article id="website-hosting" class="card shadow-sm docs-section">
|
||||||
|
<div class="card-body">
|
||||||
|
<div class="d-flex align-items-center gap-2 mb-3">
|
||||||
|
<span class="docs-section-kicker">25</span>
|
||||||
|
<h2 class="h4 mb-0">Static Website Hosting</h2>
|
||||||
|
</div>
|
||||||
|
<p class="text-muted">Host static websites directly from S3 buckets with custom index and error pages, served via custom domain mapping.</p>
|
||||||
|
|
||||||
|
<div class="alert alert-info small mb-3">
|
||||||
|
<strong>Prerequisite:</strong> Set <code>WEBSITE_HOSTING_ENABLED=true</code> to enable this feature.
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">1. Configure bucket for website hosting</h3>
|
||||||
|
<pre class="mb-3"><code class="language-bash"># Enable website hosting with index and error documents
|
||||||
|
curl -X PUT "{{ api_base }}/<bucket>?website" \
|
||||||
|
-H "Content-Type: application/xml" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||||
|
-d '<WebsiteConfiguration>
|
||||||
|
<IndexDocument><Suffix>index.html</Suffix></IndexDocument>
|
||||||
|
<ErrorDocument><Key>404.html</Key></ErrorDocument>
|
||||||
|
</WebsiteConfiguration>'
|
||||||
|
|
||||||
|
# Get website configuration
|
||||||
|
curl "{{ api_base }}/<bucket>?website" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"
|
||||||
|
|
||||||
|
# Remove website configuration
|
||||||
|
curl -X DELETE "{{ api_base }}/<bucket>?website" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"</code></pre>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">2. Map a custom domain to the bucket</h3>
|
||||||
|
<pre class="mb-3"><code class="language-bash"># Create domain mapping (admin only)
|
||||||
|
curl -X POST "{{ api_base }}/admin/website-domains" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||||
|
-d '{"domain": "example.com", "bucket": "my-site"}'
|
||||||
|
|
||||||
|
# List all domain mappings
|
||||||
|
curl "{{ api_base }}/admin/website-domains" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"
|
||||||
|
|
||||||
|
# Update a mapping
|
||||||
|
curl -X PUT "{{ api_base }}/admin/website-domains/example.com" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>" \
|
||||||
|
-d '{"bucket": "new-site-bucket"}'
|
||||||
|
|
||||||
|
# Delete a mapping
|
||||||
|
curl -X DELETE "{{ api_base }}/admin/website-domains/example.com" \
|
||||||
|
-H "X-Access-Key: <key>" -H "X-Secret-Key: <secret>"</code></pre>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">3. Point your domain</h3>
|
||||||
|
<p class="small text-muted">MyFSIO handles domain routing natively via the <code>Host</code> header — no path-based proxy rules needed. Just point your domain to the MyFSIO API server.</p>
|
||||||
|
|
||||||
|
<div class="alert alert-secondary small mb-3">
|
||||||
|
<strong>Direct access (HTTP only):</strong> Point your domain's DNS (A or CNAME) directly to the MyFSIO server on port 5000.
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<p class="small text-muted mb-2">For <strong>HTTPS</strong>, place a reverse proxy in front. The proxy only needs to forward traffic — MyFSIO handles the domain-to-bucket routing:</p>
|
||||||
|
<pre class="mb-3"><code class="language-nginx"># nginx example
|
||||||
|
server {
|
||||||
|
server_name example.com;
|
||||||
|
location / {
|
||||||
|
proxy_pass http://127.0.0.1:5000;
|
||||||
|
proxy_set_header Host $host; # Required: passes the domain to MyFSIO
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
}
|
||||||
|
}</code></pre>
|
||||||
|
<div class="alert alert-warning small mb-3">
|
||||||
|
<strong>Important:</strong> The <code>proxy_set_header Host $host;</code> directive is required. MyFSIO matches the incoming <code>Host</code> header against domain mappings to determine which bucket to serve.
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<h3 class="h6 text-uppercase text-muted mt-4">How it works</h3>
|
||||||
|
<div class="row g-2 mb-0">
|
||||||
|
<div class="col-md-6">
|
||||||
|
<ul class="small text-muted mb-0 ps-3">
|
||||||
|
<li><code>/</code> serves the configured index document</li>
|
||||||
|
<li><code>/about/</code> serves <code>about/index.html</code></li>
|
||||||
|
<li>Objects served with correct Content-Type</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
<div class="col-md-6">
|
||||||
|
<ul class="small text-muted mb-0 ps-3">
|
||||||
|
<li>Missing objects return the error document with 404</li>
|
||||||
|
<li>Website endpoints are public (no auth required)</li>
|
||||||
|
<li>Normal S3 API with auth continues to work</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</article>
|
||||||
|
</div>
|
||||||
|
<div class="col-xl-4 docs-sidebar-col">
|
||||||
<aside class="card shadow-sm docs-sidebar">
|
<aside class="card shadow-sm docs-sidebar">
|
||||||
<div class="card-body">
|
<div class="card-body">
|
||||||
<h3 class="h6 text-uppercase text-muted mb-3">On this page</h3>
|
<h3 class="h6 text-uppercase text-muted mb-3">On this page</h3>
|
||||||
@@ -1414,6 +2207,7 @@ curl "{{ api_base | replace('/api', '/ui') }}/metrics/operations/history?hours=6
|
|||||||
<li><a href="#api">REST endpoints</a></li>
|
<li><a href="#api">REST endpoints</a></li>
|
||||||
<li><a href="#examples">API Examples</a></li>
|
<li><a href="#examples">API Examples</a></li>
|
||||||
<li><a href="#replication">Site Replication & Sync</a></li>
|
<li><a href="#replication">Site Replication & Sync</a></li>
|
||||||
|
<li><a href="#site-registry">Site Registry</a></li>
|
||||||
<li><a href="#versioning">Object Versioning</a></li>
|
<li><a href="#versioning">Object Versioning</a></li>
|
||||||
<li><a href="#quotas">Bucket Quotas</a></li>
|
<li><a href="#quotas">Bucket Quotas</a></li>
|
||||||
<li><a href="#encryption">Encryption</a></li>
|
<li><a href="#encryption">Encryption</a></li>
|
||||||
@@ -1421,6 +2215,15 @@ curl "{{ api_base | replace('/api', '/ui') }}/metrics/operations/history?hours=6
|
|||||||
<li><a href="#metrics">Metrics History</a></li>
|
<li><a href="#metrics">Metrics History</a></li>
|
||||||
<li><a href="#operation-metrics">Operation Metrics</a></li>
|
<li><a href="#operation-metrics">Operation Metrics</a></li>
|
||||||
<li><a href="#troubleshooting">Troubleshooting</a></li>
|
<li><a href="#troubleshooting">Troubleshooting</a></li>
|
||||||
|
<li><a href="#health-check">Health Check</a></li>
|
||||||
|
<li><a href="#object-lock">Object Lock & Retention</a></li>
|
||||||
|
<li><a href="#access-logging">Access Logging</a></li>
|
||||||
|
<li><a href="#notifications">Notifications & Webhooks</a></li>
|
||||||
|
<li><a href="#select-content">SelectObjectContent</a></li>
|
||||||
|
<li><a href="#advanced-ops">Advanced Operations</a></li>
|
||||||
|
<li><a href="#acls">Access Control Lists</a></li>
|
||||||
|
<li><a href="#tagging">Object & Bucket Tagging</a></li>
|
||||||
|
<li><a href="#website-hosting">Static Website Hosting</a></li>
|
||||||
</ul>
|
</ul>
|
||||||
<div class="docs-sidebar-callouts">
|
<div class="docs-sidebar-callouts">
|
||||||
<div>
|
<div>
|
||||||
@@ -1428,8 +2231,8 @@ curl "{{ api_base | replace('/api', '/ui') }}/metrics/operations/history?hours=6
|
|||||||
<code class="d-block">{{ api_base }}</code>
|
<code class="d-block">{{ api_base }}</code>
|
||||||
</div>
|
</div>
|
||||||
<div>
|
<div>
|
||||||
<div class="small text-uppercase text-muted">Sample user</div>
|
<div class="small text-uppercase text-muted">Initial credentials</div>
|
||||||
<code class="d-block">localadmin / localadmin</code>
|
<span class="text-muted small">Generated on first run (check console)</span>
|
||||||
</div>
|
</div>
|
||||||
<div>
|
<div>
|
||||||
<div class="small text-uppercase text-muted">Logs</div>
|
<div class="small text-uppercase text-muted">Logs</div>
|
||||||
|
|||||||
@@ -398,6 +398,14 @@
|
|||||||
<option value="24" selected>Last 24 hours</option>
|
<option value="24" selected>Last 24 hours</option>
|
||||||
<option value="168">Last 7 days</option>
|
<option value="168">Last 7 days</option>
|
||||||
</select>
|
</select>
|
||||||
|
<select class="form-select form-select-sm" id="maxDataPoints" style="width: auto;" title="Maximum data points to display">
|
||||||
|
<option value="100">100 points</option>
|
||||||
|
<option value="250">250 points</option>
|
||||||
|
<option value="500" selected>500 points</option>
|
||||||
|
<option value="1000">1000 points</option>
|
||||||
|
<option value="2000">2000 points</option>
|
||||||
|
<option value="0">Unlimited</option>
|
||||||
|
</select>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
<div class="card-body p-4">
|
<div class="card-body p-4">
|
||||||
@@ -817,8 +825,8 @@
|
|||||||
var diskChart = null;
|
var diskChart = null;
|
||||||
var historyStatus = document.getElementById('historyStatus');
|
var historyStatus = document.getElementById('historyStatus');
|
||||||
var timeRangeSelect = document.getElementById('historyTimeRange');
|
var timeRangeSelect = document.getElementById('historyTimeRange');
|
||||||
|
var maxDataPointsSelect = document.getElementById('maxDataPoints');
|
||||||
var historyTimer = null;
|
var historyTimer = null;
|
||||||
var MAX_DATA_POINTS = 500;
|
|
||||||
|
|
||||||
function createChart(ctx, label, color) {
|
function createChart(ctx, label, color) {
|
||||||
return new Chart(ctx, {
|
return new Chart(ctx, {
|
||||||
@@ -889,7 +897,8 @@
|
|||||||
if (historyStatus) historyStatus.textContent = 'No history data available yet. Data is recorded every ' + (data.interval_minutes || 5) + ' minutes.';
|
if (historyStatus) historyStatus.textContent = 'No history data available yet. Data is recorded every ' + (data.interval_minutes || 5) + ' minutes.';
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
var history = data.history.slice(-MAX_DATA_POINTS);
|
var maxPoints = maxDataPointsSelect ? parseInt(maxDataPointsSelect.value, 10) : 500;
|
||||||
|
var history = maxPoints > 0 ? data.history.slice(-maxPoints) : data.history;
|
||||||
var labels = history.map(function(h) { return formatTime(h.timestamp); });
|
var labels = history.map(function(h) { return formatTime(h.timestamp); });
|
||||||
var cpuData = history.map(function(h) { return h.cpu_percent; });
|
var cpuData = history.map(function(h) { return h.cpu_percent; });
|
||||||
var memData = history.map(function(h) { return h.memory_percent; });
|
var memData = history.map(function(h) { return h.memory_percent; });
|
||||||
@@ -927,6 +936,10 @@
|
|||||||
timeRangeSelect.addEventListener('change', loadHistory);
|
timeRangeSelect.addEventListener('change', loadHistory);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (maxDataPointsSelect) {
|
||||||
|
maxDataPointsSelect.addEventListener('change', loadHistory);
|
||||||
|
}
|
||||||
|
|
||||||
document.addEventListener('visibilitychange', function() {
|
document.addEventListener('visibilitychange', function() {
|
||||||
if (document.hidden) {
|
if (document.hidden) {
|
||||||
if (historyTimer) clearInterval(historyTimer);
|
if (historyTimer) clearInterval(historyTimer);
|
||||||
|
|||||||
270
templates/replication_wizard.html
Normal file
270
templates/replication_wizard.html
Normal file
@@ -0,0 +1,270 @@
|
|||||||
|
{% extends "base.html" %}
|
||||||
|
|
||||||
|
{% block title %}Set Up Replication - S3 Compatible Storage{% endblock %}
|
||||||
|
|
||||||
|
{% block content %}
|
||||||
|
<div class="page-header d-flex justify-content-between align-items-center mb-4">
|
||||||
|
<div>
|
||||||
|
<nav aria-label="breadcrumb">
|
||||||
|
<ol class="breadcrumb mb-1">
|
||||||
|
<li class="breadcrumb-item"><a href="{{ url_for('ui.sites_dashboard') }}">Sites</a></li>
|
||||||
|
<li class="breadcrumb-item active" aria-current="page">Replication Wizard</li>
|
||||||
|
</ol>
|
||||||
|
</nav>
|
||||||
|
<h1 class="h3 mb-1 d-flex align-items-center gap-2">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="28" height="28" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||||
|
<path d="M8 4a.5.5 0 0 1 .5.5V6a.5.5 0 0 1-1 0V4.5A.5.5 0 0 1 8 4zM3.732 5.732a.5.5 0 0 1 .707 0l.915.914a.5.5 0 1 1-.708.708l-.914-.915a.5.5 0 0 1 0-.707zM2 10a.5.5 0 0 1 .5-.5h1.586a.5.5 0 0 1 0 1H2.5A.5.5 0 0 1 2 10zm9.5 0a.5.5 0 0 1 .5-.5h1.5a.5.5 0 0 1 0 1H12a.5.5 0 0 1-.5-.5zm.754-4.246a.389.389 0 0 0-.527-.02L7.547 9.31a.91.91 0 1 0 1.302 1.258l3.434-4.297a.389.389 0 0 0-.029-.518z"/>
|
||||||
|
<path fill-rule="evenodd" d="M0 10a8 8 0 1 1 15.547 2.661c-.442 1.253-1.845 1.602-2.932 1.25C11.309 13.488 9.475 13 8 13c-1.474 0-3.31.488-4.615.911-1.087.352-2.49.003-2.932-1.25A7.988 7.988 0 0 1 0 10zm8-7a7 7 0 0 0-6.603 9.329c.203.575.923.876 1.68.63C4.397 12.533 6.358 12 8 12s3.604.532 4.923.96c.757.245 1.477-.056 1.68-.631A7 7 0 0 0 8 3z"/>
|
||||||
|
</svg>
|
||||||
|
Set Up Replication
|
||||||
|
</h1>
|
||||||
|
<p class="text-muted mb-0 mt-1">Configure bucket replication to <strong>{{ peer.display_name or peer.site_id }}</strong></p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="row g-4">
|
||||||
|
<div class="col-lg-4 col-md-5">
|
||||||
|
<div class="card shadow-sm border-0 mb-4" style="border-radius: 1rem;">
|
||||||
|
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
|
||||||
|
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||||
|
<path d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8m7.5-6.923c-.67.204-1.335.82-1.887 1.855A8 8 0 0 0 5.145 4H7.5zM4.09 4a9.3 9.3 0 0 1 .64-1.539 7 7 0 0 1 .597-.933A7.03 7.03 0 0 0 2.255 4zm-.582 3.5c.03-.877.138-1.718.312-2.5H1.674a7 7 0 0 0-.656 2.5zM4.847 5a12.5 12.5 0 0 0-.338 2.5H7.5V5zM8.5 5v2.5h2.99a12.5 12.5 0 0 0-.337-2.5zM4.51 8.5a12.5 12.5 0 0 0 .337 2.5H7.5V8.5zm3.99 0V11h2.653c.187-.765.306-1.608.338-2.5zM5.145 12q.208.58.468 1.068c.552 1.035 1.218 1.65 1.887 1.855V12zm.182 2.472a7 7 0 0 1-.597-.933A9.3 9.3 0 0 1 4.09 12H2.255a7 7 0 0 0 3.072 2.472M3.82 11a13.7 13.7 0 0 1-.312-2.5h-2.49a7 7 0 0 0 .656 2.5zM8.5 12v2.923c.67-.204 1.335-.82 1.887-1.855q.26-.487.468-1.068zm3.68-1h2.146c.365-.767.594-1.61.656-2.5h-2.49a13.7 13.7 0 0 1-.312 2.5m2.802-3.5a7 7 0 0 0-.656-2.5H12.18c.174.782.282 1.623.312 2.5zM11.27 2.461c.247.464.462.98.64 1.539h1.835a7 7 0 0 0-3.072-2.472c.218.284.418.598.597.933M10.855 4a8 8 0 0 0-.468-1.068C9.835 1.897 9.17 1.282 8.5 1.077V4z"/>
|
||||||
|
</svg>
|
||||||
|
Peer Site
|
||||||
|
</h5>
|
||||||
|
</div>
|
||||||
|
<div class="card-body px-4 pb-4">
|
||||||
|
<dl class="mb-0">
|
||||||
|
<dt class="text-muted small">Site ID</dt>
|
||||||
|
<dd class="mb-2">{{ peer.site_id }}</dd>
|
||||||
|
<dt class="text-muted small">Endpoint</dt>
|
||||||
|
<dd class="mb-2 text-truncate" title="{{ peer.endpoint }}">{{ peer.endpoint }}</dd>
|
||||||
|
<dt class="text-muted small">Region</dt>
|
||||||
|
<dd class="mb-2"><span class="badge bg-primary bg-opacity-10 text-primary">{{ peer.region }}</span></dd>
|
||||||
|
<dt class="text-muted small">Connection</dt>
|
||||||
|
<dd class="mb-0"><span class="badge bg-secondary bg-opacity-10 text-secondary">{{ connection.name }}</span></dd>
|
||||||
|
</dl>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
|
||||||
|
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
|
||||||
|
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
|
||||||
|
<path d="M8 16A8 8 0 1 0 8 0a8 8 0 0 0 0 16zm.93-9.412-1 4.705c-.07.34.029.533.304.533.194 0 .487-.07.686-.246l-.088.416c-.287.346-.92.598-1.465.598-.703 0-1.002-.422-.808-1.319l.738-3.468c.064-.293.006-.399-.287-.47l-.451-.081.082-.381 2.29-.287zM8 5.5a1 1 0 1 1 0-2 1 1 0 0 1 0 2z"/>
|
||||||
|
</svg>
|
||||||
|
Replication Modes
|
||||||
|
</h5>
|
||||||
|
</div>
|
||||||
|
<div class="card-body px-4 pb-4 small">
|
||||||
|
<p class="mb-2"><strong>New Only:</strong> Only replicate new objects uploaded after the rule is created.</p>
|
||||||
|
<p class="mb-2"><strong>All Objects:</strong> Replicate all existing objects plus new uploads.</p>
|
||||||
|
<p class="mb-0"><strong>Bidirectional:</strong> Two-way sync between sites. Changes on either side are synchronized.</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="col-lg-8 col-md-7">
|
||||||
|
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
|
||||||
|
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
|
||||||
|
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
|
||||||
|
<path d="M2.522 5H2a.5.5 0 0 0-.494.574l1.372 9.149A1.5 1.5 0 0 0 4.36 16h7.278a1.5 1.5 0 0 0 1.483-1.277l1.373-9.149A.5.5 0 0 0 14 5h-.522A5.5 5.5 0 0 0 2.522 5zm1.005 0a4.5 4.5 0 0 1 8.945 0H3.527z"/>
|
||||||
|
</svg>
|
||||||
|
Select Buckets to Replicate
|
||||||
|
</h5>
|
||||||
|
<p class="text-muted small mb-0">Choose which buckets should be replicated to this peer site</p>
|
||||||
|
</div>
|
||||||
|
<div class="card-body px-4 pb-4">
|
||||||
|
{% if buckets %}
|
||||||
|
<form method="POST" action="{{ url_for('ui.create_peer_replication_rules', site_id=peer.site_id) }}">
|
||||||
|
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/>
|
||||||
|
|
||||||
|
<div class="mb-4">
|
||||||
|
<label for="mode" class="form-label fw-medium">Replication Mode</label>
|
||||||
|
<select class="form-select" id="mode" name="mode">
|
||||||
|
<option value="new_only">New Objects Only</option>
|
||||||
|
<option value="all">All Objects (includes existing)</option>
|
||||||
|
<option value="bidirectional">Bidirectional Sync</option>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="bidirWarning" class="alert alert-warning d-none mb-4" role="alert">
|
||||||
|
<h6 class="alert-heading fw-bold d-flex align-items-center gap-2">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" viewBox="0 0 16 16">
|
||||||
|
<path fill-rule="evenodd" d="M1 11.5a.5.5 0 0 0 .5.5h11.793l-3.147 3.146a.5.5 0 0 0 .708.708l4-4a.5.5 0 0 0 0-.708l-4-4a.5.5 0 0 0-.708.708L13.293 11H1.5a.5.5 0 0 0-.5.5zm14-7a.5.5 0 0 1-.5.5H2.707l3.147 3.146a.5.5 0 1 1-.708.708l-4-4a.5.5 0 0 1 0-.708l4-4a.5.5 0 1 1 .708.708L2.707 4H14.5a.5.5 0 0 1 .5.5z"/>
|
||||||
|
</svg>
|
||||||
|
Bidirectional Sync Requires Configuration on Both Sites
|
||||||
|
</h6>
|
||||||
|
<p class="mb-2">For bidirectional sync to work properly, you must configure <strong>both</strong> sites. This wizard only configures one direction.</p>
|
||||||
|
<hr class="my-2">
|
||||||
|
<p class="mb-2 fw-semibold">After completing this wizard, you must also:</p>
|
||||||
|
<ol class="mb-2 ps-3">
|
||||||
|
<li>Go to <strong>{{ peer.display_name or peer.site_id }}</strong>'s admin UI</li>
|
||||||
|
<li>Register <strong>this site</strong> as a peer (with a connection)</li>
|
||||||
|
<li>Create matching bidirectional replication rules pointing back to this site</li>
|
||||||
|
<li>Ensure <code>SITE_SYNC_ENABLED=true</code> is set on both sites</li>
|
||||||
|
</ol>
|
||||||
|
<div class="d-flex align-items-center gap-2 mt-3">
|
||||||
|
<span class="badge bg-light text-dark border">Local Site ID: <strong>{{ local_site.site_id if local_site else 'Not configured' }}</strong></span>
|
||||||
|
<span class="badge bg-light text-dark border">Local Endpoint: <strong>{{ local_site.endpoint if local_site and local_site.endpoint else 'Not configured' }}</strong></span>
|
||||||
|
</div>
|
||||||
|
{% if not local_site or not local_site.site_id or not local_site.endpoint %}
|
||||||
|
<div class="alert alert-danger mt-3 mb-0 py-2">
|
||||||
|
<small><strong>Warning:</strong> Your local site identity is not fully configured. The remote site won't be able to connect back. <a href="{{ url_for('ui.sites_dashboard') }}">Configure it now</a>.</small>
|
||||||
|
</div>
|
||||||
|
{% endif %}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="table-responsive">
|
||||||
|
<table class="table table-hover align-middle mb-0">
|
||||||
|
<thead class="table-light">
|
||||||
|
<tr>
|
||||||
|
<th scope="col" style="width: 40px;">
|
||||||
|
<input type="checkbox" class="form-check-input" id="selectAll">
|
||||||
|
</th>
|
||||||
|
<th scope="col">Local Bucket</th>
|
||||||
|
<th scope="col">Target Bucket Name</th>
|
||||||
|
<th scope="col">Status</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
{% for bucket in buckets %}
|
||||||
|
<tr>
|
||||||
|
<td>
|
||||||
|
<input type="checkbox" class="form-check-input bucket-checkbox"
|
||||||
|
name="buckets" value="{{ bucket.name }}"
|
||||||
|
{% if bucket.has_rule %}disabled{% endif %}>
|
||||||
|
</td>
|
||||||
|
<td>
|
||||||
|
<div class="d-flex align-items-center gap-2">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
|
||||||
|
<path d="M2.522 5H2a.5.5 0 0 0-.494.574l1.372 9.149A1.5 1.5 0 0 0 4.36 16h7.278a1.5 1.5 0 0 0 1.483-1.277l1.373-9.149A.5.5 0 0 0 14 5h-.522A5.5 5.5 0 0 0 2.522 5zm1.005 0a4.5 4.5 0 0 1 8.945 0H3.527z"/>
|
||||||
|
</svg>
|
||||||
|
<span class="fw-medium">{{ bucket.name }}</span>
|
||||||
|
</div>
|
||||||
|
</td>
|
||||||
|
<td>
|
||||||
|
<input type="text" class="form-control form-control-sm"
|
||||||
|
name="target_{{ bucket.name }}"
|
||||||
|
value="{{ bucket.existing_target or bucket.name }}"
|
||||||
|
placeholder="{{ bucket.name }}"
|
||||||
|
{% if bucket.has_rule %}disabled{% endif %}>
|
||||||
|
</td>
|
||||||
|
<td>
|
||||||
|
{% if bucket.has_rule %}
|
||||||
|
<span class="badge bg-info bg-opacity-10 text-info">
|
||||||
|
Already configured ({{ bucket.existing_mode }})
|
||||||
|
</span>
|
||||||
|
{% else %}
|
||||||
|
<span class="badge bg-secondary bg-opacity-10 text-secondary">
|
||||||
|
Not configured
|
||||||
|
</span>
|
||||||
|
{% endif %}
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
{% endfor %}
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="d-flex gap-2 mt-4 pt-3 border-top">
|
||||||
|
<button type="submit" class="btn btn-primary" id="submitBtn" disabled>
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||||
|
<path d="M10.97 4.97a.75.75 0 0 1 1.07 1.05l-3.99 4.99a.75.75 0 0 1-1.08.02L4.324 8.384a.75.75 0 1 1 1.06-1.06l2.094 2.093 3.473-4.425a.267.267 0 0 1 .02-.022z"/>
|
||||||
|
</svg>
|
||||||
|
Create Replication Rules
|
||||||
|
</button>
|
||||||
|
<a href="{{ url_for('ui.sites_dashboard') }}" class="btn btn-outline-secondary">
|
||||||
|
Skip for Now
|
||||||
|
</a>
|
||||||
|
</div>
|
||||||
|
</form>
|
||||||
|
{% else %}
|
||||||
|
<div class="empty-state text-center py-5">
|
||||||
|
<div class="empty-state-icon mx-auto mb-3">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" fill="currentColor" viewBox="0 0 16 16">
|
||||||
|
<path d="M2.522 5H2a.5.5 0 0 0-.494.574l1.372 9.149A1.5 1.5 0 0 0 4.36 16h7.278a1.5 1.5 0 0 0 1.483-1.277l1.373-9.149A.5.5 0 0 0 14 5h-.522A5.5 5.5 0 0 0 2.522 5zm1.005 0a4.5 4.5 0 0 1 8.945 0H3.527z"/>
|
||||||
|
</svg>
|
||||||
|
</div>
|
||||||
|
<h5 class="fw-semibold mb-2">No buckets yet</h5>
|
||||||
|
<p class="text-muted mb-3">Create some buckets first, then come back to set up replication.</p>
|
||||||
|
<a href="{{ url_for('ui.buckets_overview') }}" class="btn btn-primary">
|
||||||
|
Go to Buckets
|
||||||
|
</a>
|
||||||
|
</div>
|
||||||
|
{% endif %}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<script>
|
||||||
|
(function() {
|
||||||
|
const selectAllCheckbox = document.getElementById('selectAll');
|
||||||
|
const bucketCheckboxes = document.querySelectorAll('.bucket-checkbox:not(:disabled)');
|
||||||
|
const submitBtn = document.getElementById('submitBtn');
|
||||||
|
const modeSelect = document.getElementById('mode');
|
||||||
|
const bidirWarning = document.getElementById('bidirWarning');
|
||||||
|
|
||||||
|
function updateBidirWarning() {
|
||||||
|
if (modeSelect && bidirWarning) {
|
||||||
|
if (modeSelect.value === 'bidirectional') {
|
||||||
|
bidirWarning.classList.remove('d-none');
|
||||||
|
} else {
|
||||||
|
bidirWarning.classList.add('d-none');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (modeSelect) {
|
||||||
|
modeSelect.addEventListener('change', updateBidirWarning);
|
||||||
|
updateBidirWarning();
|
||||||
|
}
|
||||||
|
|
||||||
|
function updateSubmitButton() {
|
||||||
|
const checkedCount = document.querySelectorAll('.bucket-checkbox:checked').length;
|
||||||
|
if (submitBtn) {
|
||||||
|
submitBtn.disabled = checkedCount === 0;
|
||||||
|
const text = checkedCount > 0
|
||||||
|
? `Create ${checkedCount} Replication Rule${checkedCount > 1 ? 's' : ''}`
|
||||||
|
: 'Create Replication Rules';
|
||||||
|
submitBtn.innerHTML = `
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||||
|
<path d="M10.97 4.97a.75.75 0 0 1 1.07 1.05l-3.99 4.99a.75.75 0 0 1-1.08.02L4.324 8.384a.75.75 0 1 1 1.06-1.06l2.094 2.093 3.473-4.425a.267.267 0 0 1 .02-.022z"/>
|
||||||
|
</svg>
|
||||||
|
${text}
|
||||||
|
`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function updateSelectAll() {
|
||||||
|
if (selectAllCheckbox && bucketCheckboxes.length > 0) {
|
||||||
|
const allChecked = Array.from(bucketCheckboxes).every(cb => cb.checked);
|
||||||
|
const someChecked = Array.from(bucketCheckboxes).some(cb => cb.checked);
|
||||||
|
selectAllCheckbox.checked = allChecked;
|
||||||
|
selectAllCheckbox.indeterminate = someChecked && !allChecked;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (selectAllCheckbox) {
|
||||||
|
selectAllCheckbox.addEventListener('change', function() {
|
||||||
|
bucketCheckboxes.forEach(cb => {
|
||||||
|
cb.checked = this.checked;
|
||||||
|
});
|
||||||
|
updateSubmitButton();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
bucketCheckboxes.forEach(cb => {
|
||||||
|
cb.addEventListener('change', function() {
|
||||||
|
updateSelectAll();
|
||||||
|
updateSubmitButton();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
updateSelectAll();
|
||||||
|
updateSubmitButton();
|
||||||
|
})();
|
||||||
|
</script>
|
||||||
|
{% endblock %}
|
||||||
742
templates/sites.html
Normal file
742
templates/sites.html
Normal file
@@ -0,0 +1,742 @@
|
|||||||
|
{% extends "base.html" %}
|
||||||
|
|
||||||
|
{% block title %}Sites - S3 Compatible Storage{% endblock %}
|
||||||
|
|
||||||
|
{% block content %}
|
||||||
|
<div class="page-header d-flex justify-content-between align-items-center mb-4">
|
||||||
|
<div>
|
||||||
|
<p class="text-uppercase text-muted small mb-1">Geo-Distribution</p>
|
||||||
|
<h1 class="h3 mb-1 d-flex align-items-center gap-2">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="28" height="28" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||||
|
<path d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm7.5-6.923c-.67.204-1.335.82-1.887 1.855A7.97 7.97 0 0 0 5.145 4H7.5V1.077zM4.09 4a9.267 9.267 0 0 1 .64-1.539 6.7 6.7 0 0 1 .597-.933A7.025 7.025 0 0 0 2.255 4H4.09zm-.582 3.5c.03-.877.138-1.718.312-2.5H1.674a6.958 6.958 0 0 0-.656 2.5h2.49zM4.847 5a12.5 12.5 0 0 0-.338 2.5H7.5V5H4.847zM8.5 5v2.5h2.99a12.495 12.495 0 0 0-.337-2.5H8.5zM4.51 8.5a12.5 12.5 0 0 0 .337 2.5H7.5V8.5H4.51zm3.99 0V11h2.653c.187-.765.306-1.608.338-2.5H8.5zM5.145 12c.138.386.295.744.468 1.068.552 1.035 1.218 1.65 1.887 1.855V12H5.145zm.182 2.472a6.696 6.696 0 0 1-.597-.933A9.268 9.268 0 0 1 4.09 12H2.255a7.024 7.024 0 0 0 3.072 2.472zM3.82 11a13.652 13.652 0 0 1-.312-2.5h-2.49c.062.89.291 1.733.656 2.5H3.82zm6.853 3.472A7.024 7.024 0 0 0 13.745 12H11.91a9.27 9.27 0 0 1-.64 1.539 6.688 6.688 0 0 1-.597.933zM8.5 12v2.923c.67-.204 1.335-.82 1.887-1.855.173-.324.33-.682.468-1.068H8.5zm3.68-1h2.146c.365-.767.594-1.61.656-2.5h-2.49a13.65 13.65 0 0 1-.312 2.5zm2.802-3.5a6.959 6.959 0 0 0-.656-2.5H12.18c.174.782.282 1.623.312 2.5h2.49zM11.27 2.461c.247.464.462.98.64 1.539h1.835a7.024 7.024 0 0 0-3.072-2.472c.218.284.418.598.597.933zM10.855 4a7.966 7.966 0 0 0-.468-1.068C9.835 1.897 9.17 1.282 8.5 1.077V4h2.355z"/>
|
||||||
|
</svg>
|
||||||
|
Site Registry
|
||||||
|
</h1>
|
||||||
|
<p class="text-muted mb-0 mt-1">Configure this site's identity and manage peer sites for geo-distribution.</p>
|
||||||
|
</div>
|
||||||
|
<div class="d-none d-md-block">
|
||||||
|
<span class="badge bg-primary bg-opacity-10 text-primary fs-6 px-3 py-2">
|
||||||
|
{{ peers|length }} peer{{ 's' if peers|length != 1 else '' }}
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="row g-4">
|
||||||
|
<div class="col-lg-4 col-md-5">
|
||||||
|
<div class="card shadow-sm border-0 mb-4" style="border-radius: 1rem;">
|
||||||
|
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
|
||||||
|
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||||
|
<path d="M8 16s6-5.686 6-10A6 6 0 0 0 2 6c0 4.314 6 10 6 10zm0-7a3 3 0 1 1 0-6 3 3 0 0 1 0 6z"/>
|
||||||
|
</svg>
|
||||||
|
Local Site Identity
|
||||||
|
</h5>
|
||||||
|
<p class="text-muted small mb-0">This site's configuration</p>
|
||||||
|
</div>
|
||||||
|
<div class="card-body px-4 pb-4">
|
||||||
|
<form method="POST" action="{{ url_for('ui.update_local_site') }}">
|
||||||
|
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/>
|
||||||
|
<div class="mb-3">
|
||||||
|
<label for="site_id" class="form-label fw-medium">Site ID</label>
|
||||||
|
<input type="text" class="form-control" id="site_id" name="site_id" required
|
||||||
|
value="{{ local_site.site_id if local_site else config_site_id or '' }}"
|
||||||
|
placeholder="us-west-1">
|
||||||
|
<div class="form-text">Unique identifier for this site</div>
|
||||||
|
</div>
|
||||||
|
<div class="mb-3">
|
||||||
|
<label for="endpoint" class="form-label fw-medium">Endpoint URL</label>
|
||||||
|
<input type="url" class="form-control" id="endpoint" name="endpoint"
|
||||||
|
value="{{ local_site.endpoint if local_site else config_site_endpoint or '' }}"
|
||||||
|
placeholder="https://s3.us-west-1.example.com">
|
||||||
|
<div class="form-text">Public URL for this site</div>
|
||||||
|
</div>
|
||||||
|
<div class="mb-3">
|
||||||
|
<label for="region" class="form-label fw-medium">Region</label>
|
||||||
|
<input type="text" class="form-control" id="region" name="region"
|
||||||
|
value="{{ local_site.region if local_site else config_site_region }}">
|
||||||
|
</div>
|
||||||
|
<div class="row mb-3">
|
||||||
|
<div class="col-6">
|
||||||
|
<label for="priority" class="form-label fw-medium">Priority</label>
|
||||||
|
<input type="number" class="form-control" id="priority" name="priority"
|
||||||
|
value="{{ local_site.priority if local_site else 100 }}" min="0">
|
||||||
|
<div class="form-text">Lower = preferred</div>
|
||||||
|
</div>
|
||||||
|
<div class="col-6">
|
||||||
|
<label for="display_name" class="form-label fw-medium">Display Name</label>
|
||||||
|
<input type="text" class="form-control" id="display_name" name="display_name"
|
||||||
|
value="{{ local_site.display_name if local_site else '' }}"
|
||||||
|
placeholder="US West Primary">
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="d-grid">
|
||||||
|
<button type="submit" class="btn btn-primary">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||||
|
<path d="M10.97 4.97a.75.75 0 0 1 1.07 1.05l-3.99 4.99a.75.75 0 0 1-1.08.02L4.324 8.384a.75.75 0 1 1 1.06-1.06l2.094 2.093 3.473-4.425a.267.267 0 0 1 .02-.022z"/>
|
||||||
|
</svg>
|
||||||
|
Save Local Site
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</form>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
|
||||||
|
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
|
||||||
|
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||||
|
<path fill-rule="evenodd" d="M8 2a.5.5 0 0 1 .5.5v5h5a.5.5 0 0 1 0 1h-5v5a.5.5 0 0 1-1 0v-5h-5a.5.5 0 0 1 0-1h5v-5A.5.5 0 0 1 8 2Z"/>
|
||||||
|
</svg>
|
||||||
|
Add Peer Site
|
||||||
|
</h5>
|
||||||
|
<p class="text-muted small mb-0">Register a remote site</p>
|
||||||
|
</div>
|
||||||
|
<div class="card-body px-4 pb-4">
|
||||||
|
<form method="POST" action="{{ url_for('ui.add_peer_site') }}">
|
||||||
|
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/>
|
||||||
|
<div class="mb-3">
|
||||||
|
<label for="peer_site_id" class="form-label fw-medium">Site ID</label>
|
||||||
|
<input type="text" class="form-control" id="peer_site_id" name="site_id" required placeholder="us-east-1">
|
||||||
|
</div>
|
||||||
|
<div class="mb-3">
|
||||||
|
<label for="peer_endpoint" class="form-label fw-medium">Endpoint URL</label>
|
||||||
|
<input type="url" class="form-control" id="peer_endpoint" name="endpoint" required placeholder="https://s3.us-east-1.example.com">
|
||||||
|
</div>
|
||||||
|
<div class="mb-3">
|
||||||
|
<label for="peer_region" class="form-label fw-medium">Region</label>
|
||||||
|
<input type="text" class="form-control" id="peer_region" name="region" value="us-east-1">
|
||||||
|
</div>
|
||||||
|
<div class="row mb-3">
|
||||||
|
<div class="col-6">
|
||||||
|
<label for="peer_priority" class="form-label fw-medium">Priority</label>
|
||||||
|
<input type="number" class="form-control" id="peer_priority" name="priority" value="100" min="0">
|
||||||
|
</div>
|
||||||
|
<div class="col-6">
|
||||||
|
<label for="peer_display_name" class="form-label fw-medium">Display Name</label>
|
||||||
|
<input type="text" class="form-control" id="peer_display_name" name="display_name" placeholder="US East DR">
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="mb-3">
|
||||||
|
<label for="peer_connection_id" class="form-label fw-medium">Connection</label>
|
||||||
|
<select class="form-select" id="peer_connection_id" name="connection_id">
|
||||||
|
<option value="">No connection</option>
|
||||||
|
{% for conn in connections %}
|
||||||
|
<option value="{{ conn.id }}">{{ conn.name }} ({{ conn.endpoint_url }})</option>
|
||||||
|
{% endfor %}
|
||||||
|
</select>
|
||||||
|
<div class="form-text">Link to a remote connection for health checks</div>
|
||||||
|
</div>
|
||||||
|
<div class="d-grid">
|
||||||
|
<button type="submit" class="btn btn-primary">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||||
|
<path fill-rule="evenodd" d="M8 2a.5.5 0 0 1 .5.5v5h5a.5.5 0 0 1 0 1h-5v5a.5.5 0 0 1-1 0v-5h-5a.5.5 0 0 1 0-1h5v-5A.5.5 0 0 1 8 2Z"/>
|
||||||
|
</svg>
|
||||||
|
Add Peer Site
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</form>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="col-lg-8 col-md-7">
|
||||||
|
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
|
||||||
|
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4 d-flex justify-content-between align-items-center">
|
||||||
|
<div>
|
||||||
|
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
|
||||||
|
<path fill-rule="evenodd" d="M6 3.5A1.5 1.5 0 0 1 7.5 2h1A1.5 1.5 0 0 1 10 3.5v1A1.5 1.5 0 0 1 8.5 6v1H14a.5.5 0 0 1 .5.5v1a.5.5 0 0 1-1 0V8h-5v.5a.5.5 0 0 1-1 0V8h-5v.5a.5.5 0 0 1-1 0v-1A.5.5 0 0 1 2 7h5.5V6A1.5 1.5 0 0 1 6 4.5v-1zM8.5 5a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1zM0 11.5A1.5 1.5 0 0 1 1.5 10h1A1.5 1.5 0 0 1 4 11.5v1A1.5 1.5 0 0 1 2.5 14h-1A1.5 1.5 0 0 1 0 12.5v-1zm1.5-.5a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1zm4.5.5A1.5 1.5 0 0 1 7.5 10h1a1.5 1.5 0 0 1 1.5 1.5v1A1.5 1.5 0 0 1 8.5 14h-1A1.5 1.5 0 0 1 6 12.5v-1zm1.5-.5a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1zm4.5.5a1.5 1.5 0 0 1 1.5-1.5h1a1.5 1.5 0 0 1 1.5 1.5v1a1.5 1.5 0 0 1-1.5 1.5h-1a1.5 1.5 0 0 1-1.5-1.5v-1zm1.5-.5a.5.5 0 0 0-.5.5v1a.5.5 0 0 0 .5.5h1a.5.5 0 0 0 .5-.5v-1a.5.5 0 0 0-.5-.5h-1z"/>
|
||||||
|
</svg>
|
||||||
|
Peer Sites
|
||||||
|
</h5>
|
||||||
|
<p class="text-muted small mb-0">Known remote sites in the cluster</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="card-body px-4 pb-4">
|
||||||
|
{% if peers %}
|
||||||
|
<div class="table-responsive">
|
||||||
|
<table class="table table-hover align-middle mb-0">
|
||||||
|
<thead class="table-light">
|
||||||
|
<tr>
|
||||||
|
<th scope="col" style="width: 50px;">Health</th>
|
||||||
|
<th scope="col">Site ID</th>
|
||||||
|
<th scope="col">Endpoint</th>
|
||||||
|
<th scope="col">Region</th>
|
||||||
|
<th scope="col">Priority</th>
|
||||||
|
<th scope="col">Sync Status</th>
|
||||||
|
<th scope="col" class="text-end">Actions</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
{% for item in peers_with_stats %}
|
||||||
|
{% set peer = item.peer %}
|
||||||
|
<tr data-site-id="{{ peer.site_id }}">
|
||||||
|
<td class="text-center">
|
||||||
|
<span class="peer-health-status" data-site-id="{{ peer.site_id }}" title="{% if peer.is_healthy == true %}Healthy{% elif peer.is_healthy == false %}Unhealthy{% else %}Unknown{% endif %}">
|
||||||
|
{% if peer.is_healthy == true %}
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-success" viewBox="0 0 16 16">
|
||||||
|
<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zm-3.97-3.03a.75.75 0 0 0-1.08.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-.01-1.05z"/>
|
||||||
|
</svg>
|
||||||
|
{% elif peer.is_healthy == false %}
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-danger" viewBox="0 0 16 16">
|
||||||
|
<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zM5.354 4.646a.5.5 0 1 0-.708.708L7.293 8l-2.647 2.646a.5.5 0 0 0 .708.708L8 8.707l2.646 2.647a.5.5 0 0 0 .708-.708L8.707 8l2.647-2.646a.5.5 0 0 0-.708-.708L8 7.293 5.354 4.646z"/>
|
||||||
|
</svg>
|
||||||
|
{% else %}
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
|
||||||
|
<path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/>
|
||||||
|
<path d="M5.255 5.786a.237.237 0 0 0 .241.247h.825c.138 0 .248-.113.266-.25.09-.656.54-1.134 1.342-1.134.686 0 1.314.343 1.314 1.168 0 .635-.374.927-.965 1.371-.673.489-1.206 1.06-1.168 1.987l.003.217a.25.25 0 0 0 .25.246h.811a.25.25 0 0 0 .25-.25v-.105c0-.718.273-.927 1.01-1.486.609-.463 1.244-.977 1.244-2.056 0-1.511-1.276-2.241-2.673-2.241-1.267 0-2.655.59-2.75 2.286zm1.557 5.763c0 .533.425.927 1.01.927.609 0 1.028-.394 1.028-.927 0-.552-.42-.94-1.029-.94-.584 0-1.009.388-1.009.94z"/>
|
||||||
|
</svg>
|
||||||
|
{% endif %}
|
||||||
|
</span>
|
||||||
|
</td>
|
||||||
|
<td>
|
||||||
|
<div class="d-flex align-items-center gap-2">
|
||||||
|
<div class="peer-icon">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" viewBox="0 0 16 16">
|
||||||
|
<path d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm7.5-6.923c-.67.204-1.335.82-1.887 1.855A7.97 7.97 0 0 0 5.145 4H7.5V1.077zM4.09 4a9.267 9.267 0 0 1 .64-1.539 6.7 6.7 0 0 1 .597-.933A7.025 7.025 0 0 0 2.255 4H4.09zm-.582 3.5c.03-.877.138-1.718.312-2.5H1.674a6.958 6.958 0 0 0-.656 2.5h2.49zM4.847 5a12.5 12.5 0 0 0-.338 2.5H7.5V5H4.847zM8.5 5v2.5h2.99a12.495 12.495 0 0 0-.337-2.5H8.5zM4.51 8.5a12.5 12.5 0 0 0 .337 2.5H7.5V8.5H4.51zm3.99 0V11h2.653c.187-.765.306-1.608.338-2.5H8.5z"/>
|
||||||
|
</svg>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<span class="fw-medium">{{ peer.display_name or peer.site_id }}</span>
|
||||||
|
{% if peer.display_name and peer.display_name != peer.site_id %}
|
||||||
|
<br><small class="text-muted">{{ peer.site_id }}</small>
|
||||||
|
{% endif %}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</td>
|
||||||
|
<td>
|
||||||
|
<span class="text-muted small text-truncate d-inline-block" style="max-width: 180px;" title="{{ peer.endpoint }}">{{ peer.endpoint }}</span>
|
||||||
|
</td>
|
||||||
|
<td><span class="badge bg-primary bg-opacity-10 text-primary">{{ peer.region }}</span></td>
|
||||||
|
<td><span class="badge bg-secondary bg-opacity-10 text-secondary">{{ peer.priority }}</span></td>
|
||||||
|
<td class="sync-stats-cell" data-site-id="{{ peer.site_id }}">
|
||||||
|
{% if item.has_connection %}
|
||||||
|
<div class="d-flex align-items-center gap-2">
|
||||||
|
<span class="badge bg-primary bg-opacity-10 text-primary">{{ item.buckets_syncing }} bucket{{ 's' if item.buckets_syncing != 1 else '' }}</span>
|
||||||
|
{% if item.has_bidirectional %}
|
||||||
|
<span class="bidir-status-icon" data-site-id="{{ peer.site_id }}" title="Bidirectional sync configured - click to verify">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="text-info" viewBox="0 0 16 16" style="cursor: pointer;">
|
||||||
|
<path fill-rule="evenodd" d="M1 11.5a.5.5 0 0 0 .5.5h11.793l-3.147 3.146a.5.5 0 0 0 .708.708l4-4a.5.5 0 0 0 0-.708l-4-4a.5.5 0 0 0-.708.708L13.293 11H1.5a.5.5 0 0 0-.5.5zm14-7a.5.5 0 0 1-.5.5H2.707l3.147 3.146a.5.5 0 1 1-.708.708l-4-4a.5.5 0 0 1 0-.708l4-4a.5.5 0 1 1 .708.708L2.707 4H14.5a.5.5 0 0 1 .5.5z"/>
|
||||||
|
</svg>
|
||||||
|
</span>
|
||||||
|
{% endif %}
|
||||||
|
{% if item.buckets_syncing > 0 %}
|
||||||
|
<button type="button" class="btn btn-sm btn-outline-secondary btn-load-stats py-0 px-1"
|
||||||
|
data-site-id="{{ peer.site_id }}" title="Load sync details">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="12" height="12" fill="currentColor" viewBox="0 0 16 16">
|
||||||
|
<path fill-rule="evenodd" d="M8 3a5 5 0 1 0 4.546 2.914.5.5 0 0 1 .908-.417A6 6 0 1 1 8 2v1z"/>
|
||||||
|
<path d="M8 4.466V.534a.25.25 0 0 1 .41-.192l2.36 1.966c.12.1.12.284 0 .384L8.41 4.658A.25.25 0 0 1 8 4.466z"/>
|
||||||
|
</svg>
|
||||||
|
</button>
|
||||||
|
{% endif %}
|
||||||
|
</div>
|
||||||
|
<div class="sync-stats-detail d-none mt-2 small" id="stats-{{ peer.site_id }}">
|
||||||
|
<span class="spinner-border spinner-border-sm text-muted" style="width: 12px; height: 12px;"></span>
|
||||||
|
</div>
|
||||||
|
{% else %}
|
||||||
|
<span class="text-muted small">No connection</span>
|
||||||
|
{% endif %}
|
||||||
|
</td>
|
||||||
|
<td class="text-end">
|
||||||
|
<div class="btn-group btn-group-sm" role="group">
|
||||||
|
<a href="{{ url_for('ui.replication_wizard', site_id=peer.site_id) }}"
|
||||||
|
class="btn btn-outline-primary {% if not item.has_connection %}disabled{% endif %}"
|
||||||
|
title="Set up replication">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||||
|
<path d="M11.534 7h3.932a.25.25 0 0 1 .192.41l-1.966 2.36a.25.25 0 0 1-.384 0l-1.966-2.36a.25.25 0 0 1 .192-.41zm-11 2h3.932a.25.25 0 0 0 .192-.41L2.692 6.23a.25.25 0 0 0-.384 0L.342 8.59A.25.25 0 0 0 .534 9z"/>
|
||||||
|
<path fill-rule="evenodd" d="M8 3c-1.552 0-2.94.707-3.857 1.818a.5.5 0 1 1-.771-.636A6.002 6.002 0 0 1 13.917 7H12.9A5.002 5.002 0 0 0 8 3zM3.1 9a5.002 5.002 0 0 0 8.757 2.182.5.5 0 1 1 .771.636A6.002 6.002 0 0 1 2.083 9H3.1z"/>
|
||||||
|
</svg>
|
||||||
|
</a>
|
||||||
|
<button type="button" class="btn btn-outline-info btn-check-bidir {% if not item.has_connection %}disabled{% endif %}"
|
||||||
|
data-site-id="{{ peer.site_id }}"
|
||||||
|
data-display-name="{{ peer.display_name or peer.site_id }}"
|
||||||
|
title="Check bidirectional sync status">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||||
|
<path fill-rule="evenodd" d="M1 11.5a.5.5 0 0 0 .5.5h11.793l-3.147 3.146a.5.5 0 0 0 .708.708l4-4a.5.5 0 0 0 0-.708l-4-4a.5.5 0 0 0-.708.708L13.293 11H1.5a.5.5 0 0 0-.5.5zm14-7a.5.5 0 0 1-.5.5H2.707l3.147 3.146a.5.5 0 1 1-.708.708l-4-4a.5.5 0 0 1 0-.708l4-4a.5.5 0 1 1 .708.708L2.707 4H14.5a.5.5 0 0 1 .5.5z"/>
|
||||||
|
</svg>
|
||||||
|
</button>
|
||||||
|
<button type="button" class="btn btn-outline-secondary btn-check-health"
|
||||||
|
data-site-id="{{ peer.site_id }}"
|
||||||
|
title="Check health">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||||
|
<path d="M11.251.068a.5.5 0 0 1 .227.58L9.677 6.5H13a.5.5 0 0 1 .364.843l-8 8.5a.5.5 0 0 1-.842-.49L6.323 9.5H3a.5.5 0 0 1-.364-.843l8-8.5a.5.5 0 0 1 .615-.09z"/>
|
||||||
|
</svg>
|
||||||
|
</button>
|
||||||
|
<button type="button" class="btn btn-outline-secondary"
|
||||||
|
data-bs-toggle="modal"
|
||||||
|
data-bs-target="#editPeerModal"
|
||||||
|
data-site-id="{{ peer.site_id }}"
|
||||||
|
data-endpoint="{{ peer.endpoint }}"
|
||||||
|
data-region="{{ peer.region }}"
|
||||||
|
data-priority="{{ peer.priority }}"
|
||||||
|
data-display-name="{{ peer.display_name }}"
|
||||||
|
data-connection-id="{{ peer.connection_id or '' }}"
|
||||||
|
title="Edit peer">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||||
|
<path d="M12.146.146a.5.5 0 0 1 .708 0l3 3a.5.5 0 0 1 0 .708l-10 10a.5.5 0 0 1-.168.11l-5 2a.5.5 0 0 1-.65-.65l2-5a.5.5 0 0 1 .11-.168l10-10zM11.207 2.5 13.5 4.793 14.793 3.5 12.5 1.207 11.207 2.5zm1.586 3L10.5 3.207 4 9.707V10h.5a.5.5 0 0 1 .5.5v.5h.5a.5.5 0 0 1 .5.5v.5h.293l6.5-6.5z"/>
|
||||||
|
</svg>
|
||||||
|
</button>
|
||||||
|
<button type="button" class="btn btn-outline-danger"
|
||||||
|
data-bs-toggle="modal"
|
||||||
|
data-bs-target="#deletePeerModal"
|
||||||
|
data-site-id="{{ peer.site_id }}"
|
||||||
|
data-display-name="{{ peer.display_name or peer.site_id }}"
|
||||||
|
title="Delete peer">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||||
|
<path d="M5.5 5.5A.5.5 0 0 1 6 6v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm3 .5a.5.5 0 0 0-1 0v6a.5.5 0 0 0 1 0V6z"/>
|
||||||
|
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/>
|
||||||
|
</svg>
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
{% endfor %}
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
{% else %}
|
||||||
|
<div class="empty-state text-center py-5">
|
||||||
|
<div class="empty-state-icon mx-auto mb-3">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" fill="currentColor" viewBox="0 0 16 16">
|
||||||
|
<path d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm7.5-6.923c-.67.204-1.335.82-1.887 1.855A7.97 7.97 0 0 0 5.145 4H7.5V1.077zM4.09 4a9.267 9.267 0 0 1 .64-1.539 6.7 6.7 0 0 1 .597-.933A7.025 7.025 0 0 0 2.255 4H4.09zm-.582 3.5c.03-.877.138-1.718.312-2.5H1.674a6.958 6.958 0 0 0-.656 2.5h2.49zM4.847 5a12.5 12.5 0 0 0-.338 2.5H7.5V5H4.847zM8.5 5v2.5h2.99a12.495 12.495 0 0 0-.337-2.5H8.5zM4.51 8.5a12.5 12.5 0 0 0 .337 2.5H7.5V8.5H4.51zm3.99 0V11h2.653c.187-.765.306-1.608.338-2.5H8.5zM5.145 12c.138.386.295.744.468 1.068.552 1.035 1.218 1.65 1.887 1.855V12H5.145zm.182 2.472a6.696 6.696 0 0 1-.597-.933A9.268 9.268 0 0 1 4.09 12H2.255a7.024 7.024 0 0 0 3.072 2.472z"/>
|
||||||
|
</svg>
|
||||||
|
</div>
|
||||||
|
<h5 class="fw-semibold mb-2">No peer sites yet</h5>
|
||||||
|
<p class="text-muted mb-0">Add peer sites to enable geo-distribution and site-to-site replication.</p>
|
||||||
|
</div>
|
||||||
|
{% endif %}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="modal fade" id="editPeerModal" tabindex="-1" aria-hidden="true">
|
||||||
|
<div class="modal-dialog modal-dialog-centered">
|
||||||
|
<div class="modal-content">
|
||||||
|
<div class="modal-header border-0 pb-0">
|
||||||
|
<h5 class="modal-title fw-semibold">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||||
|
<path d="M12.146.146a.5.5 0 0 1 .708 0l3 3a.5.5 0 0 1 0 .708l-10 10a.5.5 0 0 1-.168.11l-5 2a.5.5 0 0 1-.65-.65l2-5a.5.5 0 0 1 .11-.168l10-10zM11.207 2.5 13.5 4.793 14.793 3.5 12.5 1.207 11.207 2.5zm1.586 3L10.5 3.207 4 9.707V10h.5a.5.5 0 0 1 .5.5v.5h.5a.5.5 0 0 1 .5.5v.5h.293l6.5-6.5zm-9.761 5.175-.106.106-1.528 3.821 3.821-1.528.106-.106A.5.5 0 0 1 5 12.5V12h-.5a.5.5 0 0 1-.5-.5V11h-.5a.5.5 0 0 1-.468-.325z"/>
|
||||||
|
</svg>
|
||||||
|
Edit Peer Site
|
||||||
|
</h5>
|
||||||
|
<button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
|
||||||
|
</div>
|
||||||
|
<form method="POST" id="editPeerForm">
|
||||||
|
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/>
|
||||||
|
<div class="modal-body">
|
||||||
|
<div class="mb-3">
|
||||||
|
<label class="form-label fw-medium">Site ID</label>
|
||||||
|
<input type="text" class="form-control" id="edit_site_id" readonly>
|
||||||
|
</div>
|
||||||
|
<div class="mb-3">
|
||||||
|
<label for="edit_endpoint" class="form-label fw-medium">Endpoint URL</label>
|
||||||
|
<input type="url" class="form-control" id="edit_endpoint" name="endpoint" required>
|
||||||
|
</div>
|
||||||
|
<div class="mb-3">
|
||||||
|
<label for="edit_region" class="form-label fw-medium">Region</label>
|
||||||
|
<input type="text" class="form-control" id="edit_region" name="region" required>
|
||||||
|
</div>
|
||||||
|
<div class="row mb-3">
|
||||||
|
<div class="col-6">
|
||||||
|
<label for="edit_priority" class="form-label fw-medium">Priority</label>
|
||||||
|
<input type="number" class="form-control" id="edit_priority" name="priority" min="0">
|
||||||
|
</div>
|
||||||
|
<div class="col-6">
|
||||||
|
<label for="edit_display_name" class="form-label fw-medium">Display Name</label>
|
||||||
|
<input type="text" class="form-control" id="edit_display_name" name="display_name">
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="mb-3">
|
||||||
|
<label for="edit_connection_id" class="form-label fw-medium">Connection</label>
|
||||||
|
<select class="form-select" id="edit_connection_id" name="connection_id">
|
||||||
|
<option value="">No connection</option>
|
||||||
|
{% for conn in connections %}
|
||||||
|
<option value="{{ conn.id }}">{{ conn.name }} ({{ conn.endpoint_url }})</option>
|
||||||
|
{% endfor %}
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="modal-footer">
|
||||||
|
<button type="button" class="btn btn-outline-secondary" data-bs-dismiss="modal">Cancel</button>
|
||||||
|
<button type="submit" class="btn btn-primary">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||||
|
<path d="M10.97 4.97a.75.75 0 0 1 1.07 1.05l-3.99 4.99a.75.75 0 0 1-1.08.02L4.324 8.384a.75.75 0 1 1 1.06-1.06l2.094 2.093 3.473-4.425a.267.267 0 0 1 .02-.022z"/>
|
||||||
|
</svg>
|
||||||
|
Save
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</form>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="modal fade" id="deletePeerModal" tabindex="-1" aria-hidden="true">
|
||||||
|
<div class="modal-dialog modal-dialog-centered">
|
||||||
|
<div class="modal-content">
|
||||||
|
<div class="modal-header border-0 pb-0">
|
||||||
|
<h5 class="modal-title fw-semibold">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-danger" viewBox="0 0 16 16">
|
||||||
|
<path d="M5.5 5.5A.5.5 0 0 1 6 6v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm3 .5a.5.5 0 0 0-1 0v6a.5.5 0 0 0 1 0V6z"/>
|
||||||
|
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/>
|
||||||
|
</svg>
|
||||||
|
Delete Peer Site
|
||||||
|
</h5>
|
||||||
|
<button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
|
||||||
|
</div>
|
||||||
|
<div class="modal-body">
|
||||||
|
<p>Are you sure you want to delete <strong id="deletePeerName"></strong>?</p>
|
||||||
|
<div class="alert alert-warning d-flex align-items-start small" role="alert">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="flex-shrink-0 me-2 mt-0" viewBox="0 0 16 16">
|
||||||
|
<path d="M8 16A8 8 0 1 0 8 0a8 8 0 0 0 0 16zm.93-9.412-1 4.705c-.07.34.029.533.304.533.194 0 .487-.07.686-.246l-.088.416c-.287.346-.92.598-1.465.598-.703 0-1.002-.422-.808-1.319l.738-3.468c.064-.293.006-.399-.287-.47l-.451-.081.082-.381 2.29-.287zM8 5.5a1 1 0 1 1 0-2 1 1 0 0 1 0 2z"/>
|
||||||
|
</svg>
|
||||||
|
<div>This will remove the peer from the site registry. Any site sync configurations may be affected.</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="modal-footer">
|
||||||
|
<button type="button" class="btn btn-outline-secondary" data-bs-dismiss="modal">Cancel</button>
|
||||||
|
<form method="POST" id="deletePeerForm">
|
||||||
|
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/>
|
||||||
|
<button type="submit" class="btn btn-danger">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||||
|
<path d="M5.5 5.5A.5.5 0 0 1 6 6v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm3 .5a.5.5 0 0 0-1 0v6a.5.5 0 0 0 1 0V6z"/>
|
||||||
|
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/>
|
||||||
|
</svg>
|
||||||
|
Delete
|
||||||
|
</button>
|
||||||
|
</form>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="modal fade" id="bidirStatusModal" tabindex="-1" aria-hidden="true">
|
||||||
|
<div class="modal-dialog modal-dialog-centered modal-lg">
|
||||||
|
<div class="modal-content">
|
||||||
|
<div class="modal-header border-0 pb-0">
|
||||||
|
<h5 class="modal-title fw-semibold">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-info me-2" viewBox="0 0 16 16">
|
||||||
|
<path fill-rule="evenodd" d="M1 11.5a.5.5 0 0 0 .5.5h11.793l-3.147 3.146a.5.5 0 0 0 .708.708l4-4a.5.5 0 0 0 0-.708l-4-4a.5.5 0 0 0-.708.708L13.293 11H1.5a.5.5 0 0 0-.5.5zm14-7a.5.5 0 0 1-.5.5H2.707l3.147 3.146a.5.5 0 1 1-.708.708l-4-4a.5.5 0 0 1 0-.708l4-4a.5.5 0 1 1 .708.708L2.707 4H14.5a.5.5 0 0 1 .5.5z"/>
|
||||||
|
</svg>
|
||||||
|
Bidirectional Sync Status
|
||||||
|
</h5>
|
||||||
|
<button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
|
||||||
|
</div>
|
||||||
|
<div class="modal-body">
|
||||||
|
<div id="bidirStatusContent">
|
||||||
|
<div class="text-center py-4">
|
||||||
|
<span class="spinner-border text-primary" role="status"></span>
|
||||||
|
<p class="text-muted mt-2 mb-0">Checking configuration...</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="modal-footer">
|
||||||
|
<button type="button" class="btn btn-outline-secondary" data-bs-dismiss="modal">Close</button>
|
||||||
|
<a href="#" id="bidirWizardLink" class="btn btn-primary d-none">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||||
|
<path d="M9.828.722a.5.5 0 0 1 .354.146l4.95 4.95a.5.5 0 0 1 0 .707c-.48.48-1.072.588-1.503.588-.177 0-.335-.018-.46-.039l-3.134 3.134a5.927 5.927 0 0 1 .16 1.013c.046.702-.032 1.687-.72 2.375a.5.5 0 0 1-.707 0l-2.829-2.828-3.182 3.182c-.195.195-1.219.902-1.414.707-.195-.195.512-1.22.707-1.414l3.182-3.182-2.828-2.829a.5.5 0 0 1 0-.707c.688-.688 1.673-.767 2.375-.72a5.922 5.922 0 0 1 1.013.16l3.134-3.133a2.772 2.772 0 0 1-.04-.461c0-.43.108-1.022.589-1.503a.5.5 0 0 1 .353-.146z"/>
|
||||||
|
</svg>
|
||||||
|
Run Setup Wizard
|
||||||
|
</a>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<script>
|
||||||
|
(function() {
|
||||||
|
const editPeerModal = document.getElementById('editPeerModal');
|
||||||
|
if (editPeerModal) {
|
||||||
|
editPeerModal.addEventListener('show.bs.modal', function (event) {
|
||||||
|
const button = event.relatedTarget;
|
||||||
|
const siteId = button.getAttribute('data-site-id');
|
||||||
|
const endpoint = button.getAttribute('data-endpoint');
|
||||||
|
const region = button.getAttribute('data-region');
|
||||||
|
const priority = button.getAttribute('data-priority');
|
||||||
|
const displayName = button.getAttribute('data-display-name');
|
||||||
|
const connectionId = button.getAttribute('data-connection-id');
|
||||||
|
|
||||||
|
document.getElementById('edit_site_id').value = siteId;
|
||||||
|
document.getElementById('edit_endpoint').value = endpoint;
|
||||||
|
document.getElementById('edit_region').value = region;
|
||||||
|
document.getElementById('edit_priority').value = priority;
|
||||||
|
document.getElementById('edit_display_name').value = displayName;
|
||||||
|
document.getElementById('edit_connection_id').value = connectionId;
|
||||||
|
|
||||||
|
document.getElementById('editPeerForm').action = '/ui/sites/peers/' + encodeURIComponent(siteId) + '/update';
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
const deletePeerModal = document.getElementById('deletePeerModal');
|
||||||
|
if (deletePeerModal) {
|
||||||
|
deletePeerModal.addEventListener('show.bs.modal', function (event) {
|
||||||
|
const button = event.relatedTarget;
|
||||||
|
const siteId = button.getAttribute('data-site-id');
|
||||||
|
const displayName = button.getAttribute('data-display-name');
|
||||||
|
|
||||||
|
document.getElementById('deletePeerName').textContent = displayName;
|
||||||
|
document.getElementById('deletePeerForm').action = '/ui/sites/peers/' + encodeURIComponent(siteId) + '/delete';
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
document.querySelectorAll('.btn-check-health').forEach(function(btn) {
|
||||||
|
btn.addEventListener('click', function() {
|
||||||
|
const siteId = this.getAttribute('data-site-id');
|
||||||
|
const statusSpan = document.querySelector('.peer-health-status[data-site-id="' + siteId + '"]');
|
||||||
|
|
||||||
|
statusSpan.innerHTML = '<span class="spinner-border spinner-border-sm text-muted" role="status" style="width: 14px; height: 14px;"></span>';
|
||||||
|
|
||||||
|
fetch('/ui/sites/peers/' + encodeURIComponent(siteId) + '/health')
|
||||||
|
.then(response => response.json())
|
||||||
|
.then(data => {
|
||||||
|
if (data.is_healthy) {
|
||||||
|
statusSpan.innerHTML = '<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-success" viewBox="0 0 16 16"><path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zm-3.97-3.03a.75.75 0 0 0-1.08.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-.01-1.05z"/></svg>';
|
||||||
|
statusSpan.title = 'Healthy';
|
||||||
|
if (window.showToast) window.showToast('Peer site is healthy', 'Health Check', 'success');
|
||||||
|
} else {
|
||||||
|
statusSpan.innerHTML = '<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-danger" viewBox="0 0 16 16"><path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zM5.354 4.646a.5.5 0 1 0-.708.708L7.293 8l-2.647 2.646a.5.5 0 0 0 .708.708L8 8.707l2.646 2.647a.5.5 0 0 0 .708-.708L8.707 8l2.647-2.646a.5.5 0 0 0-.708-.708L8 7.293 5.354 4.646z"/></svg>';
|
||||||
|
statusSpan.title = 'Unhealthy' + (data.error ? ': ' + data.error : '');
|
||||||
|
if (window.showToast) window.showToast(data.error || 'Peer site is unhealthy', 'Health Check', 'error');
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.catch(err => {
|
||||||
|
statusSpan.innerHTML = '<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-muted" viewBox="0 0 16 16"><path d="M8 15A7 7 0 1 1 8 1a7 7 0 0 1 0 14zm0 1A8 8 0 1 0 8 0a8 8 0 0 0 0 16z"/><path d="M5.255 5.786a.237.237 0 0 0 .241.247h.825c.138 0 .248-.113.266-.25.09-.656.54-1.134 1.342-1.134.686 0 1.314.343 1.314 1.168 0 .635-.374.927-.965 1.371-.673.489-1.206 1.06-1.168 1.987l.003.217a.25.25 0 0 0 .25.246h.811a.25.25 0 0 0 .25-.25v-.105c0-.718.273-.927 1.01-1.486.609-.463 1.244-.977 1.244-2.056 0-1.511-1.276-2.241-2.673-2.241-1.267 0-2.655.59-2.75 2.286zm1.557 5.763c0 .533.425.927 1.01.927.609 0 1.028-.394 1.028-.927 0-.552-.42-.94-1.029-.94-.584 0-1.009.388-1.009.94z"/></svg>';
|
||||||
|
statusSpan.title = 'Check failed';
|
||||||
|
if (window.showToast) window.showToast('Failed to check health', 'Health Check', 'error');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
document.querySelectorAll('.btn-load-stats').forEach(function(btn) {
|
||||||
|
btn.addEventListener('click', function() {
|
||||||
|
const siteId = this.getAttribute('data-site-id');
|
||||||
|
const detailDiv = document.getElementById('stats-' + siteId);
|
||||||
|
if (!detailDiv) return;
|
||||||
|
|
||||||
|
detailDiv.classList.remove('d-none');
|
||||||
|
detailDiv.innerHTML = '<span class="spinner-border spinner-border-sm text-muted" style="width: 12px; height: 12px;"></span> Loading...';
|
||||||
|
|
||||||
|
fetch('/ui/sites/peers/' + encodeURIComponent(siteId) + '/sync-stats')
|
||||||
|
.then(response => response.json())
|
||||||
|
.then(data => {
|
||||||
|
if (data.error) {
|
||||||
|
detailDiv.innerHTML = '<span class="text-danger">' + data.error + '</span>';
|
||||||
|
} else {
|
||||||
|
const lastSync = data.last_sync_at
|
||||||
|
? new Date(data.last_sync_at * 1000).toLocaleString()
|
||||||
|
: 'Never';
|
||||||
|
detailDiv.innerHTML = `
|
||||||
|
<div class="d-flex flex-wrap gap-2 mb-1">
|
||||||
|
<span class="text-success"><strong>${data.objects_synced}</strong> synced</span>
|
||||||
|
<span class="text-warning"><strong>${data.objects_pending}</strong> pending</span>
|
||||||
|
<span class="text-danger"><strong>${data.objects_failed}</strong> failed</span>
|
||||||
|
</div>
|
||||||
|
<div class="text-muted" style="font-size: 0.75rem;">
|
||||||
|
Last sync: ${lastSync}
|
||||||
|
</div>
|
||||||
|
`;
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.catch(err => {
|
||||||
|
detailDiv.innerHTML = '<span class="text-danger">Failed to load stats</span>';
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
document.querySelectorAll('.bidir-status-icon').forEach(function(icon) {
|
||||||
|
icon.addEventListener('click', function() {
|
||||||
|
const siteId = this.getAttribute('data-site-id');
|
||||||
|
const btn = document.querySelector('.btn-check-bidir[data-site-id="' + siteId + '"]');
|
||||||
|
if (btn) btn.click();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
document.querySelectorAll('.btn-check-bidir').forEach(function(btn) {
|
||||||
|
btn.addEventListener('click', function() {
|
||||||
|
const siteId = this.getAttribute('data-site-id');
|
||||||
|
const displayName = this.getAttribute('data-display-name');
|
||||||
|
const modal = new bootstrap.Modal(document.getElementById('bidirStatusModal'));
|
||||||
|
const contentDiv = document.getElementById('bidirStatusContent');
|
||||||
|
const wizardLink = document.getElementById('bidirWizardLink');
|
||||||
|
|
||||||
|
contentDiv.innerHTML = `
|
||||||
|
<div class="text-center py-4">
|
||||||
|
<span class="spinner-border text-primary" role="status"></span>
|
||||||
|
<p class="text-muted mt-2 mb-0">Checking bidirectional configuration with ${displayName}...</p>
|
||||||
|
</div>
|
||||||
|
`;
|
||||||
|
wizardLink.classList.add('d-none');
|
||||||
|
modal.show();
|
||||||
|
|
||||||
|
fetch('/ui/sites/peers/' + encodeURIComponent(siteId) + '/bidirectional-status')
|
||||||
|
.then(response => response.json())
|
||||||
|
.then(data => {
|
||||||
|
let html = '';
|
||||||
|
|
||||||
|
if (data.is_fully_configured) {
|
||||||
|
html += `
|
||||||
|
<div class="alert alert-success d-flex align-items-center mb-4" role="alert">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" fill="currentColor" class="flex-shrink-0 me-2" viewBox="0 0 16 16">
|
||||||
|
<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zm-3.97-3.03a.75.75 0 0 0-1.08.022L7.477 9.417 5.384 7.323a.75.75 0 0 0-1.06 1.06L6.97 11.03a.75.75 0 0 0 1.079-.02l3.992-4.99a.75.75 0 0 0-.01-1.05z"/>
|
||||||
|
</svg>
|
||||||
|
<div>
|
||||||
|
<strong>Bidirectional sync is fully configured!</strong><br>
|
||||||
|
<small>Both sites are set up to sync data in both directions.</small>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
`;
|
||||||
|
} else if (data.issues && data.issues.length > 0) {
|
||||||
|
const errors = data.issues.filter(i => i.severity === 'error');
|
||||||
|
const warnings = data.issues.filter(i => i.severity === 'warning');
|
||||||
|
|
||||||
|
if (errors.length > 0) {
|
||||||
|
html += `
|
||||||
|
<div class="alert alert-danger mb-3" role="alert">
|
||||||
|
<h6 class="alert-heading fw-bold mb-2">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||||
|
<path d="M16 8A8 8 0 1 1 0 8a8 8 0 0 1 16 0zM5.354 4.646a.5.5 0 1 0-.708.708L7.293 8l-2.647 2.646a.5.5 0 0 0 .708.708L8 8.707l2.646 2.647a.5.5 0 0 0 .708-.708L8.707 8l2.647-2.646a.5.5 0 0 0-.708-.708L8 7.293 5.354 4.646z"/>
|
||||||
|
</svg>
|
||||||
|
Configuration Errors
|
||||||
|
</h6>
|
||||||
|
<ul class="mb-0 ps-3">
|
||||||
|
`;
|
||||||
|
errors.forEach(issue => {
|
||||||
|
html += `<li><strong>${issue.code}:</strong> ${issue.message}</li>`;
|
||||||
|
});
|
||||||
|
html += '</ul></div>';
|
||||||
|
}
|
||||||
|
|
||||||
|
if (warnings.length > 0) {
|
||||||
|
html += `
|
||||||
|
<div class="alert alert-warning mb-3" role="alert">
|
||||||
|
<h6 class="alert-heading fw-bold mb-2">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||||
|
<path d="M8.982 1.566a1.13 1.13 0 0 0-1.96 0L.165 13.233c-.457.778.091 1.767.98 1.767h13.713c.889 0 1.438-.99.98-1.767L8.982 1.566zM8 5c.535 0 .954.462.9.995l-.35 3.507a.552.552 0 0 1-1.1 0L7.1 5.995A.905.905 0 0 1 8 5zm.002 6a1 1 0 1 1 0 2 1 1 0 0 1 0-2z"/>
|
||||||
|
</svg>
|
||||||
|
Warnings
|
||||||
|
</h6>
|
||||||
|
<ul class="mb-0 ps-3">
|
||||||
|
`;
|
||||||
|
warnings.forEach(issue => {
|
||||||
|
html += `<li><strong>${issue.code}:</strong> ${issue.message}</li>`;
|
||||||
|
});
|
||||||
|
html += '</ul></div>';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
html += '<div class="row g-3">';
|
||||||
|
|
||||||
|
html += `
|
||||||
|
<div class="col-md-6">
|
||||||
|
<div class="card h-100">
|
||||||
|
<div class="card-header bg-light py-2">
|
||||||
|
<strong>This Site (Local)</strong>
|
||||||
|
</div>
|
||||||
|
<div class="card-body small">
|
||||||
|
<p class="mb-1"><strong>Site ID:</strong> ${data.local_site_id || '<span class="text-danger">Not configured</span>'}</p>
|
||||||
|
<p class="mb-1"><strong>Endpoint:</strong> ${data.local_endpoint || '<span class="text-danger">Not configured</span>'}</p>
|
||||||
|
<p class="mb-1"><strong>Site Sync Worker:</strong> ${data.local_site_sync_enabled ? '<span class="text-success">Enabled</span>' : '<span class="text-warning">Disabled</span>'}</p>
|
||||||
|
<p class="mb-0"><strong>Bidirectional Rules:</strong> ${data.local_bidirectional_rules ? data.local_bidirectional_rules.length : 0}</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
`;
|
||||||
|
|
||||||
|
if (data.remote_status) {
|
||||||
|
const rs = data.remote_status;
|
||||||
|
html += `
|
||||||
|
<div class="col-md-6">
|
||||||
|
<div class="card h-100">
|
||||||
|
<div class="card-header bg-light py-2">
|
||||||
|
<strong>Remote Site (${displayName})</strong>
|
||||||
|
</div>
|
||||||
|
<div class="card-body small">
|
||||||
|
`;
|
||||||
|
if (rs.admin_access_denied) {
|
||||||
|
html += '<p class="text-warning mb-0">Admin access denied - cannot verify remote configuration</p>';
|
||||||
|
} else if (rs.reachable === false) {
|
||||||
|
html += '<p class="text-danger mb-0">Could not reach remote admin API</p>';
|
||||||
|
} else {
|
||||||
|
html += `
|
||||||
|
<p class="mb-1"><strong>Has Peer Entry for Us:</strong> ${rs.has_peer_for_us ? '<span class="text-success">Yes</span>' : '<span class="text-danger">No</span>'}</p>
|
||||||
|
<p class="mb-1"><strong>Connection Configured:</strong> ${rs.peer_connection_configured ? '<span class="text-success">Yes</span>' : '<span class="text-danger">No</span>'}</p>
|
||||||
|
`;
|
||||||
|
}
|
||||||
|
html += '</div></div></div>';
|
||||||
|
} else {
|
||||||
|
html += `
|
||||||
|
<div class="col-md-6">
|
||||||
|
<div class="card h-100">
|
||||||
|
<div class="card-header bg-light py-2">
|
||||||
|
<strong>Remote Site (${displayName})</strong>
|
||||||
|
</div>
|
||||||
|
<div class="card-body small">
|
||||||
|
<p class="text-muted mb-0">Could not check remote status</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
`;
|
||||||
|
}
|
||||||
|
|
||||||
|
html += '</div>';
|
||||||
|
|
||||||
|
if (data.local_bidirectional_rules && data.local_bidirectional_rules.length > 0) {
|
||||||
|
html += `
|
||||||
|
<div class="mt-3">
|
||||||
|
<h6 class="fw-semibold">Local Bidirectional Rules</h6>
|
||||||
|
<table class="table table-sm table-bordered mb-0">
|
||||||
|
<thead class="table-light">
|
||||||
|
<tr>
|
||||||
|
<th>Source Bucket</th>
|
||||||
|
<th>Target Bucket</th>
|
||||||
|
<th>Status</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
`;
|
||||||
|
data.local_bidirectional_rules.forEach(rule => {
|
||||||
|
html += `
|
||||||
|
<tr>
|
||||||
|
<td>${rule.bucket_name}</td>
|
||||||
|
<td>${rule.target_bucket}</td>
|
||||||
|
<td>${rule.enabled ? '<span class="badge bg-success">Enabled</span>' : '<span class="badge bg-secondary">Disabled</span>'}</td>
|
||||||
|
</tr>
|
||||||
|
`;
|
||||||
|
});
|
||||||
|
html += '</tbody></table></div>';
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!data.is_fully_configured) {
|
||||||
|
html += `
|
||||||
|
<div class="alert alert-info mt-3 mb-0" role="alert">
|
||||||
|
<h6 class="alert-heading fw-bold">How to Fix</h6>
|
||||||
|
<ol class="mb-0 ps-3">
|
||||||
|
<li>Ensure this site has a Site ID and Endpoint URL configured</li>
|
||||||
|
<li>On the remote site, register this site as a peer with a connection</li>
|
||||||
|
<li>Create bidirectional replication rules on both sites</li>
|
||||||
|
<li>Enable SITE_SYNC_ENABLED=true on both sites</li>
|
||||||
|
</ol>
|
||||||
|
</div>
|
||||||
|
`;
|
||||||
|
const blockingErrors = ['NO_CONNECTION', 'CONNECTION_NOT_FOUND', 'REMOTE_UNREACHABLE', 'ENDPOINT_NOT_ALLOWED'];
|
||||||
|
const hasBlockingError = data.issues && data.issues.some(i => blockingErrors.includes(i.code));
|
||||||
|
if (!hasBlockingError) {
|
||||||
|
wizardLink.href = '/ui/sites/peers/' + encodeURIComponent(siteId) + '/replication-wizard';
|
||||||
|
wizardLink.classList.remove('d-none');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
contentDiv.innerHTML = html;
|
||||||
|
})
|
||||||
|
.catch(err => {
|
||||||
|
contentDiv.innerHTML = `
|
||||||
|
<div class="alert alert-danger" role="alert">
|
||||||
|
<strong>Error:</strong> Failed to check bidirectional status. ${err.message || ''}
|
||||||
|
</div>
|
||||||
|
`;
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
})();
|
||||||
|
</script>
|
||||||
|
{% endblock %}
|
||||||
287
templates/website_domains.html
Normal file
287
templates/website_domains.html
Normal file
@@ -0,0 +1,287 @@
|
|||||||
|
{% extends "base.html" %}
|
||||||
|
|
||||||
|
{% block title %}Website Domains - MyFSIO Console{% endblock %}
|
||||||
|
|
||||||
|
{% block content %}
|
||||||
|
<div class="page-header d-flex justify-content-between align-items-center mb-4">
|
||||||
|
<div>
|
||||||
|
<p class="text-uppercase text-muted small mb-1">Website Hosting</p>
|
||||||
|
<h1 class="h3 mb-1 d-flex align-items-center gap-2">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="28" height="28" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||||
|
<path d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm7.5-6.923c-.67.204-1.335.82-1.887 1.855A7.97 7.97 0 0 0 5.145 4H7.5V1.077zM4.09 4a9.267 9.267 0 0 1 .64-1.539 6.7 6.7 0 0 1 .597-.933A7.025 7.025 0 0 0 2.255 4H4.09zm-.582 3.5c.03-.877.138-1.718.312-2.5H1.674a6.958 6.958 0 0 0-.656 2.5h2.49zM4.847 5a12.5 12.5 0 0 0-.338 2.5H7.5V5H4.847zM8.5 5v2.5h2.99a12.495 12.495 0 0 0-.337-2.5H8.5zM4.51 8.5a12.5 12.5 0 0 0 .337 2.5H7.5V8.5H4.51zm3.99 0V11h2.653c.187-.765.306-1.608.338-2.5H8.5zM5.145 12c.138.386.295.744.468 1.068.552 1.035 1.218 1.65 1.887 1.855V12H5.145zm.182 2.472a6.696 6.696 0 0 1-.597-.933A9.268 9.268 0 0 1 4.09 12H2.255a7.024 7.024 0 0 0 3.072 2.472zM3.82 11a13.652 13.652 0 0 1-.312-2.5h-2.49c.062.89.291 1.733.656 2.5H3.82zm6.853 3.472A7.024 7.024 0 0 0 13.745 12H11.91a9.27 9.27 0 0 1-.64 1.539 6.688 6.688 0 0 1-.597.933zM8.5 12v2.923c.67-.204 1.335-.82 1.887-1.855.173-.324.33-.682.468-1.068H8.5zm3.68-1h2.146c.365-.767.594-1.61.656-2.5h-2.49a13.65 13.65 0 0 1-.312 2.5zm2.802-3.5a6.959 6.959 0 0 0-.656-2.5H12.18c.174.782.282 1.623.312 2.5h2.49zM11.27 2.461c.247.464.462.98.64 1.539h1.835a7.024 7.024 0 0 0-3.072-2.472c.218.284.418.598.597.933zM10.855 4a7.966 7.966 0 0 0-.468-1.068C9.835 1.897 9.17 1.282 8.5 1.077V4h2.355z"/>
|
||||||
|
</svg>
|
||||||
|
Domain Mappings
|
||||||
|
</h1>
|
||||||
|
<p class="text-muted mb-0 mt-1">Map custom domains to buckets for static website hosting.</p>
|
||||||
|
</div>
|
||||||
|
<div class="d-none d-md-block">
|
||||||
|
<span class="badge bg-primary bg-opacity-10 text-primary fs-6 px-3 py-2">
|
||||||
|
{{ mappings|length }} mapping{{ 's' if mappings|length != 1 else '' }}
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="row g-4">
|
||||||
|
<div class="col-lg-4 col-md-5">
|
||||||
|
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
|
||||||
|
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4">
|
||||||
|
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||||
|
<path fill-rule="evenodd" d="M8 2a.5.5 0 0 1 .5.5v5h5a.5.5 0 0 1 0 1h-5v5a.5.5 0 0 1-1 0v-5h-5a.5.5 0 0 1 0-1h5v-5A.5.5 0 0 1 8 2Z"/>
|
||||||
|
</svg>
|
||||||
|
Add Domain Mapping
|
||||||
|
</h5>
|
||||||
|
<p class="text-muted small mb-0">Point a custom domain to a bucket</p>
|
||||||
|
</div>
|
||||||
|
<div class="card-body px-4 pb-4">
|
||||||
|
<form method="POST" action="{{ url_for('ui.create_website_domain') }}" id="createDomainForm">
|
||||||
|
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/>
|
||||||
|
<div class="mb-3">
|
||||||
|
<label for="domain" class="form-label fw-medium">Domain</label>
|
||||||
|
<input type="text" class="form-control" id="domain" name="domain" required placeholder="www.example.com">
|
||||||
|
<div class="form-text">The hostname that will serve website content.</div>
|
||||||
|
</div>
|
||||||
|
<div class="mb-3">
|
||||||
|
<label for="bucket" class="form-label fw-medium">Bucket</label>
|
||||||
|
{% if buckets %}
|
||||||
|
<select class="form-select" id="bucket" name="bucket" required>
|
||||||
|
<option value="" selected disabled>Select a bucket</option>
|
||||||
|
{% for b in buckets %}
|
||||||
|
<option value="{{ b }}">{{ b }}</option>
|
||||||
|
{% endfor %}
|
||||||
|
</select>
|
||||||
|
{% else %}
|
||||||
|
<input type="text" class="form-control" id="bucket" name="bucket" required placeholder="my-site-bucket">
|
||||||
|
{% endif %}
|
||||||
|
<div class="form-text">The bucket must have website hosting enabled.</div>
|
||||||
|
</div>
|
||||||
|
<div class="d-grid">
|
||||||
|
<button type="submit" class="btn btn-primary">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||||
|
<path fill-rule="evenodd" d="M8 2a.5.5 0 0 1 .5.5v5h5a.5.5 0 0 1 0 1h-5v5a.5.5 0 0 1-1 0v-5h-5a.5.5 0 0 1 0-1h5v-5A.5.5 0 0 1 8 2Z"/>
|
||||||
|
</svg>
|
||||||
|
Add Mapping
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</form>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="card shadow-sm border-0 mt-4" style="border-radius: 1rem;">
|
||||||
|
<div class="card-body px-4 py-3">
|
||||||
|
<h6 class="fw-semibold mb-2 d-flex align-items-center gap-2">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
|
||||||
|
<path d="M8 16A8 8 0 1 0 8 0a8 8 0 0 0 0 16zm.93-9.412-1 4.705c-.07.34.029.533.304.533.194 0 .487-.07.686-.246l-.088.416c-.287.346-.92.598-1.465.598-.703 0-1.002-.422-.808-1.319l.738-3.468c.064-.293.006-.399-.287-.47l-.451-.081.082-.381 2.29-.287zM8 5.5a1 1 0 1 1 0-2 1 1 0 0 1 0 2z"/>
|
||||||
|
</svg>
|
||||||
|
How it works
|
||||||
|
</h6>
|
||||||
|
<ol class="small text-muted mb-0 ps-3">
|
||||||
|
<li class="mb-1">Enable website hosting on a bucket (Properties tab)</li>
|
||||||
|
<li class="mb-1">Create a domain mapping here</li>
|
||||||
|
<li>Point your DNS (A/CNAME) to this server</li>
|
||||||
|
</ol>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="col-lg-8 col-md-7">
|
||||||
|
<div class="card shadow-sm border-0" style="border-radius: 1rem;">
|
||||||
|
<div class="card-header bg-transparent border-0 pt-4 pb-0 px-4 d-flex justify-content-between align-items-center">
|
||||||
|
<div>
|
||||||
|
<h5 class="fw-semibold d-flex align-items-center gap-2 mb-1">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
|
||||||
|
<path d="M4.715 6.542 3.343 7.914a3 3 0 1 0 4.243 4.243l1.828-1.829A3 3 0 0 0 8.586 5.5L8 6.086a1.002 1.002 0 0 0-.154.199 2 2 0 0 1 .861 3.337L6.88 11.45a2 2 0 1 1-2.83-2.83l.793-.792a4.018 4.018 0 0 1-.128-1.287z"/>
|
||||||
|
<path d="M6.586 4.672A3 3 0 0 0 7.414 9.5l.775-.776a2 2 0 0 1-.896-3.346L9.12 3.55a2 2 0 1 1 2.83 2.83l-.793.792c.112.42.155.855.128 1.287l1.372-1.372a3 3 0 1 0-4.243-4.243L6.586 4.672z"/>
|
||||||
|
</svg>
|
||||||
|
Active Mappings
|
||||||
|
</h5>
|
||||||
|
<p class="text-muted small mb-0">Domains currently serving website content</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="card-body px-4 pb-4">
|
||||||
|
{% if mappings %}
|
||||||
|
<div class="table-responsive">
|
||||||
|
<table class="table table-hover align-middle mb-0">
|
||||||
|
<thead class="table-light">
|
||||||
|
<tr>
|
||||||
|
<th scope="col">Domain</th>
|
||||||
|
<th scope="col">Bucket</th>
|
||||||
|
<th scope="col" class="text-end">Actions</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
{% for m in mappings %}
|
||||||
|
<tr>
|
||||||
|
<td>
|
||||||
|
<div class="d-flex align-items-center gap-2">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
|
||||||
|
<path d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm7.5-6.923c-.67.204-1.335.82-1.887 1.855A7.97 7.97 0 0 0 5.145 4H7.5V1.077zM4.09 4a9.267 9.267 0 0 1 .64-1.539 6.7 6.7 0 0 1 .597-.933A7.025 7.025 0 0 0 2.255 4H4.09zm-.582 3.5c.03-.877.138-1.718.312-2.5H1.674a6.958 6.958 0 0 0-.656 2.5h2.49zM4.847 5a12.5 12.5 0 0 0-.338 2.5H7.5V5H4.847zM8.5 5v2.5h2.99a12.495 12.495 0 0 0-.337-2.5H8.5zM4.51 8.5a12.5 12.5 0 0 0 .337 2.5H7.5V8.5H4.51zm3.99 0V11h2.653c.187-.765.306-1.608.338-2.5H8.5zM5.145 12c.138.386.295.744.468 1.068.552 1.035 1.218 1.65 1.887 1.855V12H5.145zm.182 2.472a6.696 6.696 0 0 1-.597-.933A9.268 9.268 0 0 1 4.09 12H2.255a7.024 7.024 0 0 0 3.072 2.472zM3.82 11a13.652 13.652 0 0 1-.312-2.5h-2.49c.062.89.291 1.733.656 2.5H3.82zm6.853 3.472A7.024 7.024 0 0 0 13.745 12H11.91a9.27 9.27 0 0 1-.64 1.539 6.688 6.688 0 0 1-.597.933zM8.5 12v2.923c.67-.204 1.335-.82 1.887-1.855.173-.324.33-.682.468-1.068H8.5zm3.68-1h2.146c.365-.767.594-1.61.656-2.5h-2.49a13.65 13.65 0 0 1-.312 2.5zm2.802-3.5a6.959 6.959 0 0 0-.656-2.5H12.18c.174.782.282 1.623.312 2.5h2.49zM11.27 2.461c.247.464.462.98.64 1.539h1.835a7.024 7.024 0 0 0-3.072-2.472c.218.284.418.598.597.933zM10.855 4a7.966 7.966 0 0 0-.468-1.068C9.835 1.897 9.17 1.282 8.5 1.077V4h2.355z"/>
|
||||||
|
</svg>
|
||||||
|
<code class="fw-medium">{{ m.domain }}</code>
|
||||||
|
</div>
|
||||||
|
</td>
|
||||||
|
<td><span class="badge bg-primary bg-opacity-10 text-primary">{{ m.bucket }}</span></td>
|
||||||
|
<td class="text-end">
|
||||||
|
<div class="btn-group btn-group-sm" role="group">
|
||||||
|
<button type="button" class="btn btn-outline-secondary"
|
||||||
|
data-bs-toggle="modal"
|
||||||
|
data-bs-target="#editDomainModal"
|
||||||
|
data-domain="{{ m.domain }}"
|
||||||
|
data-bucket="{{ m.bucket }}"
|
||||||
|
title="Edit mapping">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||||
|
<path d="M12.146.146a.5.5 0 0 1 .708 0l3 3a.5.5 0 0 1 0 .708l-10 10a.5.5 0 0 1-.168.11l-5 2a.5.5 0 0 1-.65-.65l2-5a.5.5 0 0 1 .11-.168l10-10zM11.207 2.5 13.5 4.793 14.793 3.5 12.5 1.207 11.207 2.5zm1.586 3L10.5 3.207 4 9.707V10h.5a.5.5 0 0 1 .5.5v.5h.5a.5.5 0 0 1 .5.5v.5h.293l6.5-6.5z"/>
|
||||||
|
</svg>
|
||||||
|
</button>
|
||||||
|
<button type="button" class="btn btn-outline-danger"
|
||||||
|
data-bs-toggle="modal"
|
||||||
|
data-bs-target="#deleteDomainModal"
|
||||||
|
data-domain="{{ m.domain }}"
|
||||||
|
data-bucket="{{ m.bucket }}"
|
||||||
|
title="Delete mapping">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" viewBox="0 0 16 16">
|
||||||
|
<path d="M5.5 5.5A.5.5 0 0 1 6 6v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm3 .5a.5.5 0 0 0-1 0v6a.5.5 0 0 0 1 0V6z"/>
|
||||||
|
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/>
|
||||||
|
</svg>
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
{% endfor %}
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
{% else %}
|
||||||
|
<div class="empty-state text-center py-5">
|
||||||
|
<div class="empty-state-icon mx-auto mb-3">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="48" height="48" fill="currentColor" class="text-muted" viewBox="0 0 16 16">
|
||||||
|
<path d="M0 8a8 8 0 1 1 16 0A8 8 0 0 1 0 8zm7.5-6.923c-.67.204-1.335.82-1.887 1.855A7.97 7.97 0 0 0 5.145 4H7.5V1.077zM4.09 4a9.267 9.267 0 0 1 .64-1.539 6.7 6.7 0 0 1 .597-.933A7.025 7.025 0 0 0 2.255 4H4.09zm-.582 3.5c.03-.877.138-1.718.312-2.5H1.674a6.958 6.958 0 0 0-.656 2.5h2.49zM4.847 5a12.5 12.5 0 0 0-.338 2.5H7.5V5H4.847zM8.5 5v2.5h2.99a12.495 12.495 0 0 0-.337-2.5H8.5zM4.51 8.5a12.5 12.5 0 0 0 .337 2.5H7.5V8.5H4.51zm3.99 0V11h2.653c.187-.765.306-1.608.338-2.5H8.5zM5.145 12c.138.386.295.744.468 1.068.552 1.035 1.218 1.65 1.887 1.855V12H5.145zm.182 2.472a6.696 6.696 0 0 1-.597-.933A9.268 9.268 0 0 1 4.09 12H2.255a7.024 7.024 0 0 0 3.072 2.472zM3.82 11a13.652 13.652 0 0 1-.312-2.5h-2.49c.062.89.291 1.733.656 2.5H3.82zm6.853 3.472A7.024 7.024 0 0 0 13.745 12H11.91a9.27 9.27 0 0 1-.64 1.539 6.688 6.688 0 0 1-.597.933zM8.5 12v2.923c.67-.204 1.335-.82 1.887-1.855.173-.324.33-.682.468-1.068H8.5zm3.68-1h2.146c.365-.767.594-1.61.656-2.5h-2.49a13.65 13.65 0 0 1-.312 2.5zm2.802-3.5a6.959 6.959 0 0 0-.656-2.5H12.18c.174.782.282 1.623.312 2.5h2.49zM11.27 2.461c.247.464.462.98.64 1.539h1.835a7.024 7.024 0 0 0-3.072-2.472c.218.284.418.598.597.933zM10.855 4a7.966 7.966 0 0 0-.468-1.068C9.835 1.897 9.17 1.282 8.5 1.077V4h2.355z"/>
|
||||||
|
</svg>
|
||||||
|
</div>
|
||||||
|
<h5 class="fw-semibold mb-2">No domain mappings yet</h5>
|
||||||
|
<p class="text-muted mb-0">Add your first domain mapping to serve a bucket as a static website.</p>
|
||||||
|
</div>
|
||||||
|
{% endif %}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="modal fade" id="editDomainModal" tabindex="-1" aria-hidden="true">
|
||||||
|
<div class="modal-dialog modal-dialog-centered">
|
||||||
|
<div class="modal-content">
|
||||||
|
<div class="modal-header border-0 pb-0">
|
||||||
|
<h5 class="modal-title fw-semibold">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-primary" viewBox="0 0 16 16">
|
||||||
|
<path d="M12.146.146a.5.5 0 0 1 .708 0l3 3a.5.5 0 0 1 0 .708l-10 10a.5.5 0 0 1-.168.11l-5 2a.5.5 0 0 1-.65-.65l2-5a.5.5 0 0 1 .11-.168l10-10zM11.207 2.5 13.5 4.793 14.793 3.5 12.5 1.207 11.207 2.5zm1.586 3L10.5 3.207 4 9.707V10h.5a.5.5 0 0 1 .5.5v.5h.5a.5.5 0 0 1 .5.5v.5h.293l6.5-6.5zm-9.761 5.175-.106.106-1.528 3.821 3.821-1.528.106-.106A.5.5 0 0 1 5 12.5V12h-.5a.5.5 0 0 1-.5-.5V11h-.5a.5.5 0 0 1-.468-.325z"/>
|
||||||
|
</svg>
|
||||||
|
Edit Domain Mapping
|
||||||
|
</h5>
|
||||||
|
<button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
|
||||||
|
</div>
|
||||||
|
<form method="POST" id="editDomainForm">
|
||||||
|
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/>
|
||||||
|
<div class="modal-body">
|
||||||
|
<div class="mb-3">
|
||||||
|
<label class="form-label fw-medium">Domain</label>
|
||||||
|
<input type="text" class="form-control" id="editDomainName" disabled>
|
||||||
|
</div>
|
||||||
|
<div class="mb-3">
|
||||||
|
<label for="editBucket" class="form-label fw-medium">Bucket</label>
|
||||||
|
{% if buckets %}
|
||||||
|
<select class="form-select" id="editBucket" name="bucket" required>
|
||||||
|
{% for b in buckets %}
|
||||||
|
<option value="{{ b }}">{{ b }}</option>
|
||||||
|
{% endfor %}
|
||||||
|
</select>
|
||||||
|
{% else %}
|
||||||
|
<input type="text" class="form-control" id="editBucket" name="bucket" required>
|
||||||
|
{% endif %}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="modal-footer">
|
||||||
|
<button type="button" class="btn btn-outline-secondary" data-bs-dismiss="modal">Cancel</button>
|
||||||
|
<button type="submit" class="btn btn-primary">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||||
|
<path d="M10.97 4.97a.75.75 0 0 1 1.07 1.05l-3.99 4.99a.75.75 0 0 1-1.08.02L4.324 8.384a.75.75 0 1 1 1.06-1.06l2.094 2.093 3.473-4.425a.267.267 0 0 1 .02-.022z"/>
|
||||||
|
</svg>
|
||||||
|
Save
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</form>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="modal fade" id="deleteDomainModal" tabindex="-1" aria-hidden="true">
|
||||||
|
<div class="modal-dialog modal-dialog-centered">
|
||||||
|
<div class="modal-content">
|
||||||
|
<div class="modal-header border-0 pb-0">
|
||||||
|
<h5 class="modal-title fw-semibold">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="20" height="20" fill="currentColor" class="text-danger" viewBox="0 0 16 16">
|
||||||
|
<path d="M5.5 5.5A.5.5 0 0 1 6 6v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm3 .5a.5.5 0 0 0-1 0v6a.5.5 0 0 0 1 0V6z"/>
|
||||||
|
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/>
|
||||||
|
</svg>
|
||||||
|
Delete Domain Mapping
|
||||||
|
</h5>
|
||||||
|
<button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
|
||||||
|
</div>
|
||||||
|
<div class="modal-body">
|
||||||
|
<p>Are you sure you want to delete the mapping for <strong><code id="deleteDomainName"></code></strong>?</p>
|
||||||
|
<div class="alert alert-warning d-flex align-items-start small" role="alert">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="flex-shrink-0 me-2 mt-0" viewBox="0 0 16 16">
|
||||||
|
<path d="M8 16A8 8 0 1 0 8 0a8 8 0 0 0 0 16zm.93-9.412-1 4.705c-.07.34.029.533.304.533.194 0 .487-.07.686-.246l-.088.416c-.287.346-.92.598-1.465.598-.703 0-1.002-.422-.808-1.319l.738-3.468c.064-.293.006-.399-.287-.47l-.451-.081.082-.381 2.29-.287zM8 5.5a1 1 0 1 1 0-2 1 1 0 0 1 0 2z"/>
|
||||||
|
</svg>
|
||||||
|
<div>This domain will stop serving website content immediately.</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="modal-footer">
|
||||||
|
<button type="button" class="btn btn-outline-secondary" data-bs-dismiss="modal">Cancel</button>
|
||||||
|
<form method="POST" id="deleteDomainForm">
|
||||||
|
<input type="hidden" name="csrf_token" value="{{ csrf_token() }}"/>
|
||||||
|
<button type="submit" class="btn btn-danger">
|
||||||
|
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" fill="currentColor" class="me-1" viewBox="0 0 16 16">
|
||||||
|
<path d="M5.5 5.5A.5.5 0 0 1 6 6v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm2.5 0a.5.5 0 0 1 .5.5v6a.5.5 0 0 1-1 0V6a.5.5 0 0 1 .5-.5zm3 .5a.5.5 0 0 0-1 0v6a.5.5 0 0 0 1 0V6z"/>
|
||||||
|
<path fill-rule="evenodd" d="M14.5 3a1 1 0 0 1-1 1H13v9a2 2 0 0 1-2 2H5a2 2 0 0 1-2-2V4h-.5a1 1 0 0 1-1-1V2a1 1 0 0 1 1-1H6a1 1 0 0 1 1-1h2a1 1 0 0 1 1 1h3.5a1 1 0 0 1 1 1v1zM4.118 4 4 4.059V13a1 1 0 0 0 1 1h6a1 1 0 0 0 1-1V4.059L11.882 4H4.118zM2.5 3V2h11v1h-11z"/>
|
||||||
|
</svg>
|
||||||
|
Delete
|
||||||
|
</button>
|
||||||
|
</form>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{% endblock %}
|
||||||
|
|
||||||
|
{% block extra_scripts %}
|
||||||
|
<script>
|
||||||
|
(function () {
|
||||||
|
var editModal = document.getElementById('editDomainModal');
|
||||||
|
if (editModal) {
|
||||||
|
editModal.addEventListener('show.bs.modal', function (event) {
|
||||||
|
var btn = event.relatedTarget;
|
||||||
|
var domain = btn.getAttribute('data-domain');
|
||||||
|
var bucket = btn.getAttribute('data-bucket');
|
||||||
|
document.getElementById('editDomainName').value = domain;
|
||||||
|
var editBucket = document.getElementById('editBucket');
|
||||||
|
if (editBucket.tagName === 'SELECT') {
|
||||||
|
editBucket.value = bucket;
|
||||||
|
} else {
|
||||||
|
editBucket.value = bucket;
|
||||||
|
}
|
||||||
|
document.getElementById('editDomainForm').action = '{{ url_for("ui.update_website_domain", domain="__DOMAIN__") }}'.replace('__DOMAIN__', encodeURIComponent(domain));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
var deleteModal = document.getElementById('deleteDomainModal');
|
||||||
|
if (deleteModal) {
|
||||||
|
deleteModal.addEventListener('show.bs.modal', function (event) {
|
||||||
|
var btn = event.relatedTarget;
|
||||||
|
var domain = btn.getAttribute('data-domain');
|
||||||
|
document.getElementById('deleteDomainName').textContent = domain;
|
||||||
|
document.getElementById('deleteDomainForm').action = '{{ url_for("ui.delete_website_domain", domain="__DOMAIN__") }}'.replace('__DOMAIN__', encodeURIComponent(domain));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
})();
|
||||||
|
</script>
|
||||||
|
{% endblock %}
|
||||||
@@ -1,191 +0,0 @@
|
|||||||
import hashlib
|
|
||||||
import hmac
|
|
||||||
import pytest
|
|
||||||
from datetime import datetime, timedelta, timezone
|
|
||||||
from urllib.parse import quote
|
|
||||||
|
|
||||||
def _sign(key, msg):
|
|
||||||
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
|
|
||||||
|
|
||||||
def _get_signature_key(key, date_stamp, region_name, service_name):
|
|
||||||
k_date = _sign(("AWS4" + key).encode("utf-8"), date_stamp)
|
|
||||||
k_region = _sign(k_date, region_name)
|
|
||||||
k_service = _sign(k_region, service_name)
|
|
||||||
k_signing = _sign(k_service, "aws4_request")
|
|
||||||
return k_signing
|
|
||||||
|
|
||||||
def create_signed_headers(
|
|
||||||
method,
|
|
||||||
path,
|
|
||||||
headers=None,
|
|
||||||
body=None,
|
|
||||||
access_key="test",
|
|
||||||
secret_key="secret",
|
|
||||||
region="us-east-1",
|
|
||||||
service="s3",
|
|
||||||
timestamp=None
|
|
||||||
):
|
|
||||||
if headers is None:
|
|
||||||
headers = {}
|
|
||||||
|
|
||||||
if timestamp is None:
|
|
||||||
now = datetime.now(timezone.utc)
|
|
||||||
else:
|
|
||||||
now = timestamp
|
|
||||||
|
|
||||||
amz_date = now.strftime("%Y%m%dT%H%M%SZ")
|
|
||||||
date_stamp = now.strftime("%Y%m%d")
|
|
||||||
|
|
||||||
headers["X-Amz-Date"] = amz_date
|
|
||||||
headers["Host"] = "testserver"
|
|
||||||
|
|
||||||
canonical_uri = quote(path, safe="/-_.~")
|
|
||||||
canonical_query_string = ""
|
|
||||||
|
|
||||||
canonical_headers = ""
|
|
||||||
signed_headers_list = []
|
|
||||||
for k, v in sorted(headers.items(), key=lambda x: x[0].lower()):
|
|
||||||
canonical_headers += f"{k.lower()}:{v.strip()}\n"
|
|
||||||
signed_headers_list.append(k.lower())
|
|
||||||
|
|
||||||
signed_headers = ";".join(signed_headers_list)
|
|
||||||
|
|
||||||
payload_hash = hashlib.sha256(body or b"").hexdigest()
|
|
||||||
headers["X-Amz-Content-Sha256"] = payload_hash
|
|
||||||
|
|
||||||
canonical_request = f"{method}\n{canonical_uri}\n{canonical_query_string}\n{canonical_headers}\n{signed_headers}\n{payload_hash}"
|
|
||||||
|
|
||||||
credential_scope = f"{date_stamp}/{region}/{service}/aws4_request"
|
|
||||||
string_to_sign = f"AWS4-HMAC-SHA256\n{amz_date}\n{credential_scope}\n{hashlib.sha256(canonical_request.encode('utf-8')).hexdigest()}"
|
|
||||||
|
|
||||||
signing_key = _get_signature_key(secret_key, date_stamp, region, service)
|
|
||||||
signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest()
|
|
||||||
|
|
||||||
headers["Authorization"] = (
|
|
||||||
f"AWS4-HMAC-SHA256 Credential={access_key}/{credential_scope}, "
|
|
||||||
f"SignedHeaders={signed_headers}, Signature={signature}"
|
|
||||||
)
|
|
||||||
return headers
|
|
||||||
|
|
||||||
def test_sigv4_old_date(client):
|
|
||||||
# Test with a date 20 minutes in the past
|
|
||||||
old_time = datetime.now(timezone.utc) - timedelta(minutes=20)
|
|
||||||
headers = create_signed_headers("GET", "/", timestamp=old_time)
|
|
||||||
|
|
||||||
response = client.get("/", headers=headers)
|
|
||||||
assert response.status_code == 403
|
|
||||||
assert b"Request timestamp too old" in response.data
|
|
||||||
|
|
||||||
def test_sigv4_future_date(client):
|
|
||||||
# Test with a date 20 minutes in the future
|
|
||||||
future_time = datetime.now(timezone.utc) + timedelta(minutes=20)
|
|
||||||
headers = create_signed_headers("GET", "/", timestamp=future_time)
|
|
||||||
|
|
||||||
response = client.get("/", headers=headers)
|
|
||||||
assert response.status_code == 403
|
|
||||||
assert b"Request timestamp too old" in response.data # The error message is the same
|
|
||||||
|
|
||||||
def test_path_traversal_in_key(client, signer):
|
|
||||||
headers = signer("PUT", "/test-bucket")
|
|
||||||
client.put("/test-bucket", headers=headers)
|
|
||||||
|
|
||||||
# Try to upload with .. in key
|
|
||||||
headers = signer("PUT", "/test-bucket/../secret.txt", body=b"attack")
|
|
||||||
response = client.put("/test-bucket/../secret.txt", headers=headers, data=b"attack")
|
|
||||||
|
|
||||||
# Should be rejected by storage layer or flask routing
|
|
||||||
# Flask might normalize it before it reaches the app, but if it reaches, it should fail.
|
|
||||||
# If Flask normalizes /test-bucket/../secret.txt to /secret.txt, then it hits 404 (bucket not found) or 403.
|
|
||||||
# But we want to test the storage layer check.
|
|
||||||
# We can try to encode the dots?
|
|
||||||
|
|
||||||
# If we use a key that doesn't get normalized by Flask routing easily.
|
|
||||||
# But wait, the route is /<bucket_name>/<path:object_key>
|
|
||||||
# If I send /test-bucket/folder/../file.txt, Flask might pass "folder/../file.txt" as object_key?
|
|
||||||
# Let's try.
|
|
||||||
|
|
||||||
headers = signer("PUT", "/test-bucket/folder/../file.txt", body=b"attack")
|
|
||||||
response = client.put("/test-bucket/folder/../file.txt", headers=headers, data=b"attack")
|
|
||||||
|
|
||||||
# If Flask normalizes it, it becomes /test-bucket/file.txt.
|
|
||||||
# If it doesn't, it hits our check.
|
|
||||||
|
|
||||||
# Let's try to call the storage method directly to verify the check works,
|
|
||||||
# because testing via client depends on Flask's URL handling.
|
|
||||||
pass
|
|
||||||
|
|
||||||
def test_storage_path_traversal(app):
|
|
||||||
storage = app.extensions["object_storage"]
|
|
||||||
from app.storage import StorageError, ObjectStorage
|
|
||||||
from app.encrypted_storage import EncryptedObjectStorage
|
|
||||||
|
|
||||||
# Get the underlying ObjectStorage if wrapped
|
|
||||||
if isinstance(storage, EncryptedObjectStorage):
|
|
||||||
storage = storage.storage
|
|
||||||
|
|
||||||
with pytest.raises(StorageError, match="Object key contains parent directory references"):
|
|
||||||
storage._sanitize_object_key("folder/../file.txt")
|
|
||||||
|
|
||||||
with pytest.raises(StorageError, match="Object key contains parent directory references"):
|
|
||||||
storage._sanitize_object_key("..")
|
|
||||||
|
|
||||||
def test_head_bucket(client, signer):
|
|
||||||
headers = signer("PUT", "/head-test")
|
|
||||||
client.put("/head-test", headers=headers)
|
|
||||||
|
|
||||||
headers = signer("HEAD", "/head-test")
|
|
||||||
response = client.head("/head-test", headers=headers)
|
|
||||||
assert response.status_code == 200
|
|
||||||
|
|
||||||
headers = signer("HEAD", "/non-existent")
|
|
||||||
response = client.head("/non-existent", headers=headers)
|
|
||||||
assert response.status_code == 404
|
|
||||||
|
|
||||||
def test_head_object(client, signer):
|
|
||||||
headers = signer("PUT", "/head-obj-test")
|
|
||||||
client.put("/head-obj-test", headers=headers)
|
|
||||||
|
|
||||||
headers = signer("PUT", "/head-obj-test/obj", body=b"content")
|
|
||||||
client.put("/head-obj-test/obj", headers=headers, data=b"content")
|
|
||||||
|
|
||||||
headers = signer("HEAD", "/head-obj-test/obj")
|
|
||||||
response = client.head("/head-obj-test/obj", headers=headers)
|
|
||||||
assert response.status_code == 200
|
|
||||||
assert response.headers["ETag"]
|
|
||||||
assert response.headers["Content-Length"] == "7"
|
|
||||||
|
|
||||||
headers = signer("HEAD", "/head-obj-test/missing")
|
|
||||||
response = client.head("/head-obj-test/missing", headers=headers)
|
|
||||||
assert response.status_code == 404
|
|
||||||
|
|
||||||
def test_list_parts(client, signer):
|
|
||||||
# Create bucket
|
|
||||||
headers = signer("PUT", "/multipart-test")
|
|
||||||
client.put("/multipart-test", headers=headers)
|
|
||||||
|
|
||||||
# Initiate multipart upload
|
|
||||||
headers = signer("POST", "/multipart-test/obj?uploads")
|
|
||||||
response = client.post("/multipart-test/obj?uploads", headers=headers)
|
|
||||||
assert response.status_code == 200
|
|
||||||
from xml.etree.ElementTree import fromstring
|
|
||||||
upload_id = fromstring(response.data).find("UploadId").text
|
|
||||||
|
|
||||||
# Upload part 1
|
|
||||||
headers = signer("PUT", f"/multipart-test/obj?partNumber=1&uploadId={upload_id}", body=b"part1")
|
|
||||||
client.put(f"/multipart-test/obj?partNumber=1&uploadId={upload_id}", headers=headers, data=b"part1")
|
|
||||||
|
|
||||||
# Upload part 2
|
|
||||||
headers = signer("PUT", f"/multipart-test/obj?partNumber=2&uploadId={upload_id}", body=b"part2")
|
|
||||||
client.put(f"/multipart-test/obj?partNumber=2&uploadId={upload_id}", headers=headers, data=b"part2")
|
|
||||||
|
|
||||||
# List parts
|
|
||||||
headers = signer("GET", f"/multipart-test/obj?uploadId={upload_id}")
|
|
||||||
response = client.get(f"/multipart-test/obj?uploadId={upload_id}", headers=headers)
|
|
||||||
assert response.status_code == 200
|
|
||||||
|
|
||||||
root = fromstring(response.data)
|
|
||||||
assert root.tag == "ListPartsResult"
|
|
||||||
parts = root.findall("Part")
|
|
||||||
assert len(parts) == 2
|
|
||||||
assert parts[0].find("PartNumber").text == "1"
|
|
||||||
assert parts[1].find("PartNumber").text == "2"
|
|
||||||
@@ -20,7 +20,6 @@ from app.site_sync import (
|
|||||||
SyncedObjectInfo,
|
SyncedObjectInfo,
|
||||||
SiteSyncStats,
|
SiteSyncStats,
|
||||||
RemoteObjectMeta,
|
RemoteObjectMeta,
|
||||||
CLOCK_SKEW_TOLERANCE_SECONDS,
|
|
||||||
)
|
)
|
||||||
from app.storage import ObjectStorage
|
from app.storage import ObjectStorage
|
||||||
|
|
||||||
|
|||||||
@@ -1,8 +1,12 @@
|
|||||||
import io
|
import io
|
||||||
import json
|
import json
|
||||||
|
import threading
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
|
from werkzeug.serving import make_server
|
||||||
|
|
||||||
from app import create_app
|
from app import create_app
|
||||||
|
from app.s3_client import S3ProxyClient
|
||||||
|
|
||||||
|
|
||||||
def _build_app(tmp_path: Path):
|
def _build_app(tmp_path: Path):
|
||||||
@@ -26,13 +30,32 @@ def _build_app(tmp_path: Path):
|
|||||||
"STORAGE_ROOT": storage_root,
|
"STORAGE_ROOT": storage_root,
|
||||||
"IAM_CONFIG": iam_config,
|
"IAM_CONFIG": iam_config,
|
||||||
"BUCKET_POLICY_PATH": bucket_policies,
|
"BUCKET_POLICY_PATH": bucket_policies,
|
||||||
"API_BASE_URL": "http://localhost",
|
"API_BASE_URL": "http://127.0.0.1:0",
|
||||||
"SECRET_KEY": "testing",
|
"SECRET_KEY": "testing",
|
||||||
|
"WTF_CSRF_ENABLED": False,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
server = make_server("127.0.0.1", 0, app)
|
||||||
|
host, port = server.server_address
|
||||||
|
api_url = f"http://{host}:{port}"
|
||||||
|
app.config["API_BASE_URL"] = api_url
|
||||||
|
app.extensions["s3_proxy"] = S3ProxyClient(api_base_url=api_url)
|
||||||
|
|
||||||
|
thread = threading.Thread(target=server.serve_forever, daemon=True)
|
||||||
|
thread.start()
|
||||||
|
|
||||||
|
app._test_server = server
|
||||||
|
app._test_thread = thread
|
||||||
return app
|
return app
|
||||||
|
|
||||||
|
|
||||||
|
def _shutdown_app(app):
|
||||||
|
if hasattr(app, "_test_server"):
|
||||||
|
app._test_server.shutdown()
|
||||||
|
app._test_thread.join(timeout=2)
|
||||||
|
|
||||||
|
|
||||||
def _login(client):
|
def _login(client):
|
||||||
return client.post(
|
return client.post(
|
||||||
"/ui/login",
|
"/ui/login",
|
||||||
@@ -43,6 +66,7 @@ def _login(client):
|
|||||||
|
|
||||||
def test_bulk_delete_json_route(tmp_path: Path):
|
def test_bulk_delete_json_route(tmp_path: Path):
|
||||||
app = _build_app(tmp_path)
|
app = _build_app(tmp_path)
|
||||||
|
try:
|
||||||
storage = app.extensions["object_storage"]
|
storage = app.extensions["object_storage"]
|
||||||
storage.create_bucket("demo")
|
storage.create_bucket("demo")
|
||||||
storage.put_object("demo", "first.txt", io.BytesIO(b"first"))
|
storage.put_object("demo", "first.txt", io.BytesIO(b"first"))
|
||||||
@@ -64,10 +88,13 @@ def test_bulk_delete_json_route(tmp_path: Path):
|
|||||||
|
|
||||||
listing = storage.list_objects_all("demo")
|
listing = storage.list_objects_all("demo")
|
||||||
assert {meta.key for meta in listing} == {"second.txt"}
|
assert {meta.key for meta in listing} == {"second.txt"}
|
||||||
|
finally:
|
||||||
|
_shutdown_app(app)
|
||||||
|
|
||||||
|
|
||||||
def test_bulk_delete_validation(tmp_path: Path):
|
def test_bulk_delete_validation(tmp_path: Path):
|
||||||
app = _build_app(tmp_path)
|
app = _build_app(tmp_path)
|
||||||
|
try:
|
||||||
storage = app.extensions["object_storage"]
|
storage = app.extensions["object_storage"]
|
||||||
storage.create_bucket("demo")
|
storage.create_bucket("demo")
|
||||||
storage.put_object("demo", "keep.txt", io.BytesIO(b"keep"))
|
storage.put_object("demo", "keep.txt", io.BytesIO(b"keep"))
|
||||||
@@ -94,3 +121,5 @@ def test_bulk_delete_validation(tmp_path: Path):
|
|||||||
|
|
||||||
still_there = storage.list_objects_all("demo")
|
still_there = storage.list_objects_all("demo")
|
||||||
assert {meta.key for meta in still_there} == {"keep.txt"}
|
assert {meta.key for meta in still_there} == {"keep.txt"}
|
||||||
|
finally:
|
||||||
|
_shutdown_app(app)
|
||||||
|
|||||||
@@ -1,10 +1,13 @@
|
|||||||
"""Tests for UI-based encryption configuration."""
|
"""Tests for UI-based encryption configuration."""
|
||||||
import json
|
import json
|
||||||
|
import threading
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
from werkzeug.serving import make_server
|
||||||
|
|
||||||
from app import create_app
|
from app import create_app
|
||||||
|
from app.s3_client import S3ProxyClient
|
||||||
|
|
||||||
|
|
||||||
def get_csrf_token(response):
|
def get_csrf_token(response):
|
||||||
@@ -43,9 +46,10 @@ def _make_encryption_app(tmp_path: Path, *, kms_enabled: bool = True):
|
|||||||
"STORAGE_ROOT": storage_root,
|
"STORAGE_ROOT": storage_root,
|
||||||
"IAM_CONFIG": iam_config,
|
"IAM_CONFIG": iam_config,
|
||||||
"BUCKET_POLICY_PATH": bucket_policies,
|
"BUCKET_POLICY_PATH": bucket_policies,
|
||||||
"API_BASE_URL": "http://testserver",
|
"API_BASE_URL": "http://127.0.0.1:0",
|
||||||
"SECRET_KEY": "testing",
|
"SECRET_KEY": "testing",
|
||||||
"ENCRYPTION_ENABLED": True,
|
"ENCRYPTION_ENABLED": True,
|
||||||
|
"WTF_CSRF_ENABLED": False,
|
||||||
}
|
}
|
||||||
|
|
||||||
if kms_enabled:
|
if kms_enabled:
|
||||||
@@ -54,17 +58,37 @@ def _make_encryption_app(tmp_path: Path, *, kms_enabled: bool = True):
|
|||||||
config["ENCRYPTION_MASTER_KEY_PATH"] = str(tmp_path / "master.key")
|
config["ENCRYPTION_MASTER_KEY_PATH"] = str(tmp_path / "master.key")
|
||||||
|
|
||||||
app = create_app(config)
|
app = create_app(config)
|
||||||
|
|
||||||
|
server = make_server("127.0.0.1", 0, app)
|
||||||
|
host, port = server.server_address
|
||||||
|
api_url = f"http://{host}:{port}"
|
||||||
|
app.config["API_BASE_URL"] = api_url
|
||||||
|
app.extensions["s3_proxy"] = S3ProxyClient(api_base_url=api_url)
|
||||||
|
|
||||||
|
thread = threading.Thread(target=server.serve_forever, daemon=True)
|
||||||
|
thread.start()
|
||||||
|
|
||||||
|
app._test_server = server
|
||||||
|
app._test_thread = thread
|
||||||
|
|
||||||
storage = app.extensions["object_storage"]
|
storage = app.extensions["object_storage"]
|
||||||
storage.create_bucket("test-bucket")
|
storage.create_bucket("test-bucket")
|
||||||
return app
|
return app
|
||||||
|
|
||||||
|
|
||||||
|
def _shutdown_app(app):
|
||||||
|
if hasattr(app, "_test_server"):
|
||||||
|
app._test_server.shutdown()
|
||||||
|
app._test_thread.join(timeout=2)
|
||||||
|
|
||||||
|
|
||||||
class TestUIBucketEncryption:
|
class TestUIBucketEncryption:
|
||||||
"""Test bucket encryption configuration via UI."""
|
"""Test bucket encryption configuration via UI."""
|
||||||
|
|
||||||
def test_bucket_detail_shows_encryption_card(self, tmp_path):
|
def test_bucket_detail_shows_encryption_card(self, tmp_path):
|
||||||
"""Encryption card should be visible on bucket detail page."""
|
"""Encryption card should be visible on bucket detail page."""
|
||||||
app = _make_encryption_app(tmp_path)
|
app = _make_encryption_app(tmp_path)
|
||||||
|
try:
|
||||||
client = app.test_client()
|
client = app.test_client()
|
||||||
|
|
||||||
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
|
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
|
||||||
@@ -75,21 +99,20 @@ class TestUIBucketEncryption:
|
|||||||
html = response.data.decode("utf-8")
|
html = response.data.decode("utf-8")
|
||||||
assert "Default Encryption" in html
|
assert "Default Encryption" in html
|
||||||
assert "Encryption Algorithm" in html or "Default encryption disabled" in html
|
assert "Encryption Algorithm" in html or "Default encryption disabled" in html
|
||||||
|
finally:
|
||||||
|
_shutdown_app(app)
|
||||||
|
|
||||||
def test_enable_aes256_encryption(self, tmp_path):
|
def test_enable_aes256_encryption(self, tmp_path):
|
||||||
"""Should be able to enable AES-256 encryption."""
|
"""Should be able to enable AES-256 encryption."""
|
||||||
app = _make_encryption_app(tmp_path)
|
app = _make_encryption_app(tmp_path)
|
||||||
|
try:
|
||||||
client = app.test_client()
|
client = app.test_client()
|
||||||
|
|
||||||
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
|
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
|
||||||
|
|
||||||
response = client.get("/ui/buckets/test-bucket?tab=properties")
|
|
||||||
csrf_token = get_csrf_token(response)
|
|
||||||
|
|
||||||
response = client.post(
|
response = client.post(
|
||||||
"/ui/buckets/test-bucket/encryption",
|
"/ui/buckets/test-bucket/encryption",
|
||||||
data={
|
data={
|
||||||
"csrf_token": csrf_token,
|
|
||||||
"action": "enable",
|
"action": "enable",
|
||||||
"algorithm": "AES256",
|
"algorithm": "AES256",
|
||||||
},
|
},
|
||||||
@@ -99,12 +122,13 @@ class TestUIBucketEncryption:
|
|||||||
assert response.status_code == 200
|
assert response.status_code == 200
|
||||||
html = response.data.decode("utf-8")
|
html = response.data.decode("utf-8")
|
||||||
assert "AES-256" in html or "encryption enabled" in html.lower()
|
assert "AES-256" in html or "encryption enabled" in html.lower()
|
||||||
|
finally:
|
||||||
|
_shutdown_app(app)
|
||||||
|
|
||||||
def test_enable_kms_encryption(self, tmp_path):
|
def test_enable_kms_encryption(self, tmp_path):
|
||||||
"""Should be able to enable KMS encryption."""
|
"""Should be able to enable KMS encryption."""
|
||||||
app = _make_encryption_app(tmp_path, kms_enabled=True)
|
app = _make_encryption_app(tmp_path, kms_enabled=True)
|
||||||
client = app.test_client()
|
try:
|
||||||
|
|
||||||
with app.app_context():
|
with app.app_context():
|
||||||
kms = app.extensions.get("kms")
|
kms = app.extensions.get("kms")
|
||||||
if kms:
|
if kms:
|
||||||
@@ -113,15 +137,12 @@ class TestUIBucketEncryption:
|
|||||||
else:
|
else:
|
||||||
pytest.skip("KMS not available")
|
pytest.skip("KMS not available")
|
||||||
|
|
||||||
|
client = app.test_client()
|
||||||
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
|
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
|
||||||
|
|
||||||
response = client.get("/ui/buckets/test-bucket?tab=properties")
|
|
||||||
csrf_token = get_csrf_token(response)
|
|
||||||
|
|
||||||
response = client.post(
|
response = client.post(
|
||||||
"/ui/buckets/test-bucket/encryption",
|
"/ui/buckets/test-bucket/encryption",
|
||||||
data={
|
data={
|
||||||
"csrf_token": csrf_token,
|
|
||||||
"action": "enable",
|
"action": "enable",
|
||||||
"algorithm": "aws:kms",
|
"algorithm": "aws:kms",
|
||||||
"kms_key_id": key_id,
|
"kms_key_id": key_id,
|
||||||
@@ -132,33 +153,28 @@ class TestUIBucketEncryption:
|
|||||||
assert response.status_code == 200
|
assert response.status_code == 200
|
||||||
html = response.data.decode("utf-8")
|
html = response.data.decode("utf-8")
|
||||||
assert "KMS" in html or "encryption enabled" in html.lower()
|
assert "KMS" in html or "encryption enabled" in html.lower()
|
||||||
|
finally:
|
||||||
|
_shutdown_app(app)
|
||||||
|
|
||||||
def test_disable_encryption(self, tmp_path):
|
def test_disable_encryption(self, tmp_path):
|
||||||
"""Should be able to disable encryption."""
|
"""Should be able to disable encryption."""
|
||||||
app = _make_encryption_app(tmp_path)
|
app = _make_encryption_app(tmp_path)
|
||||||
|
try:
|
||||||
client = app.test_client()
|
client = app.test_client()
|
||||||
|
|
||||||
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
|
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
|
||||||
|
|
||||||
response = client.get("/ui/buckets/test-bucket?tab=properties")
|
|
||||||
csrf_token = get_csrf_token(response)
|
|
||||||
|
|
||||||
client.post(
|
client.post(
|
||||||
"/ui/buckets/test-bucket/encryption",
|
"/ui/buckets/test-bucket/encryption",
|
||||||
data={
|
data={
|
||||||
"csrf_token": csrf_token,
|
|
||||||
"action": "enable",
|
"action": "enable",
|
||||||
"algorithm": "AES256",
|
"algorithm": "AES256",
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
response = client.get("/ui/buckets/test-bucket?tab=properties")
|
|
||||||
csrf_token = get_csrf_token(response)
|
|
||||||
|
|
||||||
response = client.post(
|
response = client.post(
|
||||||
"/ui/buckets/test-bucket/encryption",
|
"/ui/buckets/test-bucket/encryption",
|
||||||
data={
|
data={
|
||||||
"csrf_token": csrf_token,
|
|
||||||
"action": "disable",
|
"action": "disable",
|
||||||
},
|
},
|
||||||
follow_redirects=True,
|
follow_redirects=True,
|
||||||
@@ -167,21 +183,20 @@ class TestUIBucketEncryption:
|
|||||||
assert response.status_code == 200
|
assert response.status_code == 200
|
||||||
html = response.data.decode("utf-8")
|
html = response.data.decode("utf-8")
|
||||||
assert "disabled" in html.lower() or "Default encryption disabled" in html
|
assert "disabled" in html.lower() or "Default encryption disabled" in html
|
||||||
|
finally:
|
||||||
|
_shutdown_app(app)
|
||||||
|
|
||||||
def test_invalid_algorithm_rejected(self, tmp_path):
|
def test_invalid_algorithm_rejected(self, tmp_path):
|
||||||
"""Invalid encryption algorithm should be rejected."""
|
"""Invalid encryption algorithm should be rejected."""
|
||||||
app = _make_encryption_app(tmp_path)
|
app = _make_encryption_app(tmp_path)
|
||||||
|
try:
|
||||||
client = app.test_client()
|
client = app.test_client()
|
||||||
|
|
||||||
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
|
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
|
||||||
|
|
||||||
response = client.get("/ui/buckets/test-bucket?tab=properties")
|
|
||||||
csrf_token = get_csrf_token(response)
|
|
||||||
|
|
||||||
response = client.post(
|
response = client.post(
|
||||||
"/ui/buckets/test-bucket/encryption",
|
"/ui/buckets/test-bucket/encryption",
|
||||||
data={
|
data={
|
||||||
"csrf_token": csrf_token,
|
|
||||||
"action": "enable",
|
"action": "enable",
|
||||||
"algorithm": "INVALID",
|
"algorithm": "INVALID",
|
||||||
},
|
},
|
||||||
@@ -191,21 +206,20 @@ class TestUIBucketEncryption:
|
|||||||
assert response.status_code == 200
|
assert response.status_code == 200
|
||||||
html = response.data.decode("utf-8")
|
html = response.data.decode("utf-8")
|
||||||
assert "Invalid" in html or "danger" in html
|
assert "Invalid" in html or "danger" in html
|
||||||
|
finally:
|
||||||
|
_shutdown_app(app)
|
||||||
|
|
||||||
def test_encryption_persists_in_config(self, tmp_path):
|
def test_encryption_persists_in_config(self, tmp_path):
|
||||||
"""Encryption config should persist in bucket config."""
|
"""Encryption config should persist in bucket config."""
|
||||||
app = _make_encryption_app(tmp_path)
|
app = _make_encryption_app(tmp_path)
|
||||||
|
try:
|
||||||
client = app.test_client()
|
client = app.test_client()
|
||||||
|
|
||||||
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
|
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
|
||||||
|
|
||||||
response = client.get("/ui/buckets/test-bucket?tab=properties")
|
|
||||||
csrf_token = get_csrf_token(response)
|
|
||||||
|
|
||||||
client.post(
|
client.post(
|
||||||
"/ui/buckets/test-bucket/encryption",
|
"/ui/buckets/test-bucket/encryption",
|
||||||
data={
|
data={
|
||||||
"csrf_token": csrf_token,
|
|
||||||
"action": "enable",
|
"action": "enable",
|
||||||
"algorithm": "AES256",
|
"algorithm": "AES256",
|
||||||
},
|
},
|
||||||
@@ -217,7 +231,9 @@ class TestUIBucketEncryption:
|
|||||||
|
|
||||||
assert "Rules" in config
|
assert "Rules" in config
|
||||||
assert len(config["Rules"]) == 1
|
assert len(config["Rules"]) == 1
|
||||||
assert config["Rules"][0]["ApplyServerSideEncryptionByDefault"]["SSEAlgorithm"] == "AES256"
|
assert config["Rules"][0]["SSEAlgorithm"] == "AES256"
|
||||||
|
finally:
|
||||||
|
_shutdown_app(app)
|
||||||
|
|
||||||
|
|
||||||
class TestUIEncryptionWithoutPermission:
|
class TestUIEncryptionWithoutPermission:
|
||||||
@@ -226,17 +242,14 @@ class TestUIEncryptionWithoutPermission:
|
|||||||
def test_readonly_user_cannot_change_encryption(self, tmp_path):
|
def test_readonly_user_cannot_change_encryption(self, tmp_path):
|
||||||
"""Read-only user should not be able to change encryption settings."""
|
"""Read-only user should not be able to change encryption settings."""
|
||||||
app = _make_encryption_app(tmp_path)
|
app = _make_encryption_app(tmp_path)
|
||||||
|
try:
|
||||||
client = app.test_client()
|
client = app.test_client()
|
||||||
|
|
||||||
client.post("/ui/login", data={"access_key": "readonly", "secret_key": "secret"}, follow_redirects=True)
|
client.post("/ui/login", data={"access_key": "readonly", "secret_key": "secret"}, follow_redirects=True)
|
||||||
|
|
||||||
response = client.get("/ui/buckets/test-bucket?tab=properties")
|
|
||||||
csrf_token = get_csrf_token(response)
|
|
||||||
|
|
||||||
response = client.post(
|
response = client.post(
|
||||||
"/ui/buckets/test-bucket/encryption",
|
"/ui/buckets/test-bucket/encryption",
|
||||||
data={
|
data={
|
||||||
"csrf_token": csrf_token,
|
|
||||||
"action": "enable",
|
"action": "enable",
|
||||||
"algorithm": "AES256",
|
"algorithm": "AES256",
|
||||||
},
|
},
|
||||||
@@ -246,3 +259,5 @@ class TestUIEncryptionWithoutPermission:
|
|||||||
assert response.status_code == 200
|
assert response.status_code == 200
|
||||||
html = response.data.decode("utf-8")
|
html = response.data.decode("utf-8")
|
||||||
assert "Access denied" in html or "permission" in html.lower() or "not authorized" in html.lower()
|
assert "Access denied" in html or "permission" in html.lower() or "not authorized" in html.lower()
|
||||||
|
finally:
|
||||||
|
_shutdown_app(app)
|
||||||
|
|||||||
@@ -1,15 +1,18 @@
|
|||||||
"""Tests for UI pagination of bucket objects."""
|
"""Tests for UI pagination of bucket objects."""
|
||||||
import json
|
import json
|
||||||
|
import threading
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
from werkzeug.serving import make_server
|
||||||
|
|
||||||
from app import create_app
|
from app import create_app
|
||||||
|
from app.s3_client import S3ProxyClient
|
||||||
|
|
||||||
|
|
||||||
def _make_app(tmp_path: Path):
|
def _make_app(tmp_path: Path):
|
||||||
"""Create an app for testing."""
|
"""Create an app for testing with a live API server."""
|
||||||
storage_root = tmp_path / "data"
|
storage_root = tmp_path / "data"
|
||||||
iam_config = tmp_path / "iam.json"
|
iam_config = tmp_path / "iam.json"
|
||||||
bucket_policies = tmp_path / "bucket_policies.json"
|
bucket_policies = tmp_path / "bucket_policies.json"
|
||||||
@@ -33,29 +36,46 @@ def _make_app(tmp_path: Path):
|
|||||||
"STORAGE_ROOT": storage_root,
|
"STORAGE_ROOT": storage_root,
|
||||||
"IAM_CONFIG": iam_config,
|
"IAM_CONFIG": iam_config,
|
||||||
"BUCKET_POLICY_PATH": bucket_policies,
|
"BUCKET_POLICY_PATH": bucket_policies,
|
||||||
|
"API_BASE_URL": "http://127.0.0.1:0",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
server = make_server("127.0.0.1", 0, flask_app)
|
||||||
|
host, port = server.server_address
|
||||||
|
api_url = f"http://{host}:{port}"
|
||||||
|
flask_app.config["API_BASE_URL"] = api_url
|
||||||
|
flask_app.extensions["s3_proxy"] = S3ProxyClient(api_base_url=api_url)
|
||||||
|
|
||||||
|
thread = threading.Thread(target=server.serve_forever, daemon=True)
|
||||||
|
thread.start()
|
||||||
|
|
||||||
|
flask_app._test_server = server
|
||||||
|
flask_app._test_thread = thread
|
||||||
return flask_app
|
return flask_app
|
||||||
|
|
||||||
|
|
||||||
|
def _shutdown_app(app):
|
||||||
|
if hasattr(app, "_test_server"):
|
||||||
|
app._test_server.shutdown()
|
||||||
|
app._test_thread.join(timeout=2)
|
||||||
|
|
||||||
|
|
||||||
class TestPaginatedObjectListing:
|
class TestPaginatedObjectListing:
|
||||||
"""Test paginated object listing API."""
|
"""Test paginated object listing API."""
|
||||||
|
|
||||||
def test_objects_api_returns_paginated_results(self, tmp_path):
|
def test_objects_api_returns_paginated_results(self, tmp_path):
|
||||||
"""Objects API should return paginated results."""
|
"""Objects API should return paginated results."""
|
||||||
app = _make_app(tmp_path)
|
app = _make_app(tmp_path)
|
||||||
|
try:
|
||||||
storage = app.extensions["object_storage"]
|
storage = app.extensions["object_storage"]
|
||||||
storage.create_bucket("test-bucket")
|
storage.create_bucket("test-bucket")
|
||||||
|
|
||||||
# Create 10 test objects
|
|
||||||
for i in range(10):
|
for i in range(10):
|
||||||
storage.put_object("test-bucket", f"file{i:02d}.txt", BytesIO(b"content"))
|
storage.put_object("test-bucket", f"file{i:02d}.txt", BytesIO(b"content"))
|
||||||
|
|
||||||
with app.test_client() as client:
|
with app.test_client() as client:
|
||||||
# Login first
|
|
||||||
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
|
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
|
||||||
|
|
||||||
# Request first page of 3 objects
|
|
||||||
resp = client.get("/ui/buckets/test-bucket/objects?max_keys=3")
|
resp = client.get("/ui/buckets/test-bucket/objects?max_keys=3")
|
||||||
assert resp.status_code == 200
|
assert resp.status_code == 200
|
||||||
|
|
||||||
@@ -63,22 +83,22 @@ class TestPaginatedObjectListing:
|
|||||||
assert len(data["objects"]) == 3
|
assert len(data["objects"]) == 3
|
||||||
assert data["is_truncated"] is True
|
assert data["is_truncated"] is True
|
||||||
assert data["next_continuation_token"] is not None
|
assert data["next_continuation_token"] is not None
|
||||||
assert data["total_count"] == 10
|
finally:
|
||||||
|
_shutdown_app(app)
|
||||||
|
|
||||||
def test_objects_api_pagination_continuation(self, tmp_path):
|
def test_objects_api_pagination_continuation(self, tmp_path):
|
||||||
"""Objects API should support continuation tokens."""
|
"""Objects API should support continuation tokens."""
|
||||||
app = _make_app(tmp_path)
|
app = _make_app(tmp_path)
|
||||||
|
try:
|
||||||
storage = app.extensions["object_storage"]
|
storage = app.extensions["object_storage"]
|
||||||
storage.create_bucket("test-bucket")
|
storage.create_bucket("test-bucket")
|
||||||
|
|
||||||
# Create 5 test objects
|
|
||||||
for i in range(5):
|
for i in range(5):
|
||||||
storage.put_object("test-bucket", f"file{i:02d}.txt", BytesIO(b"content"))
|
storage.put_object("test-bucket", f"file{i:02d}.txt", BytesIO(b"content"))
|
||||||
|
|
||||||
with app.test_client() as client:
|
with app.test_client() as client:
|
||||||
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
|
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
|
||||||
|
|
||||||
# Get first page
|
|
||||||
resp = client.get("/ui/buckets/test-bucket/objects?max_keys=2")
|
resp = client.get("/ui/buckets/test-bucket/objects?max_keys=2")
|
||||||
assert resp.status_code == 200
|
assert resp.status_code == 200
|
||||||
data = resp.get_json()
|
data = resp.get_json()
|
||||||
@@ -87,7 +107,6 @@ class TestPaginatedObjectListing:
|
|||||||
assert len(first_page_keys) == 2
|
assert len(first_page_keys) == 2
|
||||||
assert data["is_truncated"] is True
|
assert data["is_truncated"] is True
|
||||||
|
|
||||||
# Get second page
|
|
||||||
token = data["next_continuation_token"]
|
token = data["next_continuation_token"]
|
||||||
resp = client.get(f"/ui/buckets/test-bucket/objects?max_keys=2&continuation_token={token}")
|
resp = client.get(f"/ui/buckets/test-bucket/objects?max_keys=2&continuation_token={token}")
|
||||||
assert resp.status_code == 200
|
assert resp.status_code == 200
|
||||||
@@ -96,16 +115,17 @@ class TestPaginatedObjectListing:
|
|||||||
second_page_keys = [obj["key"] for obj in data["objects"]]
|
second_page_keys = [obj["key"] for obj in data["objects"]]
|
||||||
assert len(second_page_keys) == 2
|
assert len(second_page_keys) == 2
|
||||||
|
|
||||||
# No overlap between pages
|
|
||||||
assert set(first_page_keys).isdisjoint(set(second_page_keys))
|
assert set(first_page_keys).isdisjoint(set(second_page_keys))
|
||||||
|
finally:
|
||||||
|
_shutdown_app(app)
|
||||||
|
|
||||||
def test_objects_api_prefix_filter(self, tmp_path):
|
def test_objects_api_prefix_filter(self, tmp_path):
|
||||||
"""Objects API should support prefix filtering."""
|
"""Objects API should support prefix filtering."""
|
||||||
app = _make_app(tmp_path)
|
app = _make_app(tmp_path)
|
||||||
|
try:
|
||||||
storage = app.extensions["object_storage"]
|
storage = app.extensions["object_storage"]
|
||||||
storage.create_bucket("test-bucket")
|
storage.create_bucket("test-bucket")
|
||||||
|
|
||||||
# Create objects with different prefixes
|
|
||||||
storage.put_object("test-bucket", "logs/access.log", BytesIO(b"log"))
|
storage.put_object("test-bucket", "logs/access.log", BytesIO(b"log"))
|
||||||
storage.put_object("test-bucket", "logs/error.log", BytesIO(b"log"))
|
storage.put_object("test-bucket", "logs/error.log", BytesIO(b"log"))
|
||||||
storage.put_object("test-bucket", "data/file.txt", BytesIO(b"data"))
|
storage.put_object("test-bucket", "data/file.txt", BytesIO(b"data"))
|
||||||
@@ -113,7 +133,6 @@ class TestPaginatedObjectListing:
|
|||||||
with app.test_client() as client:
|
with app.test_client() as client:
|
||||||
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
|
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
|
||||||
|
|
||||||
# Filter by prefix
|
|
||||||
resp = client.get("/ui/buckets/test-bucket/objects?prefix=logs/")
|
resp = client.get("/ui/buckets/test-bucket/objects?prefix=logs/")
|
||||||
assert resp.status_code == 200
|
assert resp.status_code == 200
|
||||||
data = resp.get_json()
|
data = resp.get_json()
|
||||||
@@ -121,23 +140,27 @@ class TestPaginatedObjectListing:
|
|||||||
keys = [obj["key"] for obj in data["objects"]]
|
keys = [obj["key"] for obj in data["objects"]]
|
||||||
assert all(k.startswith("logs/") for k in keys)
|
assert all(k.startswith("logs/") for k in keys)
|
||||||
assert len(keys) == 2
|
assert len(keys) == 2
|
||||||
|
finally:
|
||||||
|
_shutdown_app(app)
|
||||||
|
|
||||||
def test_objects_api_requires_authentication(self, tmp_path):
|
def test_objects_api_requires_authentication(self, tmp_path):
|
||||||
"""Objects API should require login."""
|
"""Objects API should require login."""
|
||||||
app = _make_app(tmp_path)
|
app = _make_app(tmp_path)
|
||||||
|
try:
|
||||||
storage = app.extensions["object_storage"]
|
storage = app.extensions["object_storage"]
|
||||||
storage.create_bucket("test-bucket")
|
storage.create_bucket("test-bucket")
|
||||||
|
|
||||||
with app.test_client() as client:
|
with app.test_client() as client:
|
||||||
# Don't login
|
|
||||||
resp = client.get("/ui/buckets/test-bucket/objects")
|
resp = client.get("/ui/buckets/test-bucket/objects")
|
||||||
# Should redirect to login
|
|
||||||
assert resp.status_code == 302
|
assert resp.status_code == 302
|
||||||
assert "/ui/login" in resp.headers.get("Location", "")
|
assert "/ui/login" in resp.headers.get("Location", "")
|
||||||
|
finally:
|
||||||
|
_shutdown_app(app)
|
||||||
|
|
||||||
def test_objects_api_returns_object_metadata(self, tmp_path):
|
def test_objects_api_returns_object_metadata(self, tmp_path):
|
||||||
"""Objects API should return complete object metadata."""
|
"""Objects API should return complete object metadata."""
|
||||||
app = _make_app(tmp_path)
|
app = _make_app(tmp_path)
|
||||||
|
try:
|
||||||
storage = app.extensions["object_storage"]
|
storage = app.extensions["object_storage"]
|
||||||
storage.create_bucket("test-bucket")
|
storage.create_bucket("test-bucket")
|
||||||
storage.put_object("test-bucket", "test.txt", BytesIO(b"test content"))
|
storage.put_object("test-bucket", "test.txt", BytesIO(b"test content"))
|
||||||
@@ -152,38 +175,38 @@ class TestPaginatedObjectListing:
|
|||||||
assert len(data["objects"]) == 1
|
assert len(data["objects"]) == 1
|
||||||
obj = data["objects"][0]
|
obj = data["objects"][0]
|
||||||
|
|
||||||
# Check all expected fields
|
|
||||||
assert obj["key"] == "test.txt"
|
assert obj["key"] == "test.txt"
|
||||||
assert obj["size"] == 12 # len("test content")
|
assert obj["size"] == 12
|
||||||
assert "last_modified" in obj
|
assert "last_modified" in obj
|
||||||
assert "last_modified_display" in obj
|
assert "last_modified_display" in obj
|
||||||
assert "etag" in obj
|
assert "etag" in obj
|
||||||
|
|
||||||
# URLs are now returned as templates (not per-object) for performance
|
|
||||||
assert "url_templates" in data
|
assert "url_templates" in data
|
||||||
templates = data["url_templates"]
|
templates = data["url_templates"]
|
||||||
assert "preview" in templates
|
assert "preview" in templates
|
||||||
assert "download" in templates
|
assert "download" in templates
|
||||||
assert "delete" in templates
|
assert "delete" in templates
|
||||||
assert "KEY_PLACEHOLDER" in templates["preview"]
|
assert "KEY_PLACEHOLDER" in templates["preview"]
|
||||||
|
finally:
|
||||||
|
_shutdown_app(app)
|
||||||
|
|
||||||
def test_bucket_detail_page_loads_without_objects(self, tmp_path):
|
def test_bucket_detail_page_loads_without_objects(self, tmp_path):
|
||||||
"""Bucket detail page should load even with many objects."""
|
"""Bucket detail page should load even with many objects."""
|
||||||
app = _make_app(tmp_path)
|
app = _make_app(tmp_path)
|
||||||
|
try:
|
||||||
storage = app.extensions["object_storage"]
|
storage = app.extensions["object_storage"]
|
||||||
storage.create_bucket("test-bucket")
|
storage.create_bucket("test-bucket")
|
||||||
|
|
||||||
# Create many objects
|
|
||||||
for i in range(100):
|
for i in range(100):
|
||||||
storage.put_object("test-bucket", f"file{i:03d}.txt", BytesIO(b"x"))
|
storage.put_object("test-bucket", f"file{i:03d}.txt", BytesIO(b"x"))
|
||||||
|
|
||||||
with app.test_client() as client:
|
with app.test_client() as client:
|
||||||
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
|
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
|
||||||
|
|
||||||
# The page should load quickly (objects loaded via JS)
|
|
||||||
resp = client.get("/ui/buckets/test-bucket")
|
resp = client.get("/ui/buckets/test-bucket")
|
||||||
assert resp.status_code == 200
|
assert resp.status_code == 200
|
||||||
|
|
||||||
html = resp.data.decode("utf-8")
|
html = resp.data.decode("utf-8")
|
||||||
# Should have the JavaScript loading infrastructure (external JS file)
|
|
||||||
assert "bucket-detail-main.js" in html
|
assert "bucket-detail-main.js" in html
|
||||||
|
finally:
|
||||||
|
_shutdown_app(app)
|
||||||
|
|||||||
@@ -1,10 +1,13 @@
|
|||||||
import io
|
import io
|
||||||
import json
|
import json
|
||||||
|
import threading
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
from werkzeug.serving import make_server
|
||||||
|
|
||||||
from app import create_app
|
from app import create_app
|
||||||
|
from app.s3_client import S3ProxyClient
|
||||||
|
|
||||||
|
|
||||||
DENY_LIST_ALLOW_GET_POLICY = {
|
DENY_LIST_ALLOW_GET_POLICY = {
|
||||||
@@ -47,11 +50,25 @@ def _make_ui_app(tmp_path: Path, *, enforce_policies: bool):
|
|||||||
"STORAGE_ROOT": storage_root,
|
"STORAGE_ROOT": storage_root,
|
||||||
"IAM_CONFIG": iam_config,
|
"IAM_CONFIG": iam_config,
|
||||||
"BUCKET_POLICY_PATH": bucket_policies,
|
"BUCKET_POLICY_PATH": bucket_policies,
|
||||||
"API_BASE_URL": "http://testserver",
|
"API_BASE_URL": "http://127.0.0.1:0",
|
||||||
"SECRET_KEY": "testing",
|
"SECRET_KEY": "testing",
|
||||||
"UI_ENFORCE_BUCKET_POLICIES": enforce_policies,
|
"UI_ENFORCE_BUCKET_POLICIES": enforce_policies,
|
||||||
|
"WTF_CSRF_ENABLED": False,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
server = make_server("127.0.0.1", 0, app)
|
||||||
|
host, port = server.server_address
|
||||||
|
api_url = f"http://{host}:{port}"
|
||||||
|
app.config["API_BASE_URL"] = api_url
|
||||||
|
app.extensions["s3_proxy"] = S3ProxyClient(api_base_url=api_url)
|
||||||
|
|
||||||
|
thread = threading.Thread(target=server.serve_forever, daemon=True)
|
||||||
|
thread.start()
|
||||||
|
|
||||||
|
app._test_server = server
|
||||||
|
app._test_thread = thread
|
||||||
|
|
||||||
storage = app.extensions["object_storage"]
|
storage = app.extensions["object_storage"]
|
||||||
storage.create_bucket("testbucket")
|
storage.create_bucket("testbucket")
|
||||||
storage.put_object("testbucket", "vid.mp4", io.BytesIO(b"video"))
|
storage.put_object("testbucket", "vid.mp4", io.BytesIO(b"video"))
|
||||||
@@ -60,9 +77,16 @@ def _make_ui_app(tmp_path: Path, *, enforce_policies: bool):
|
|||||||
return app
|
return app
|
||||||
|
|
||||||
|
|
||||||
|
def _shutdown_app(app):
|
||||||
|
if hasattr(app, "_test_server"):
|
||||||
|
app._test_server.shutdown()
|
||||||
|
app._test_thread.join(timeout=2)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("enforce", [True, False])
|
@pytest.mark.parametrize("enforce", [True, False])
|
||||||
def test_ui_bucket_policy_enforcement_toggle(tmp_path: Path, enforce: bool):
|
def test_ui_bucket_policy_enforcement_toggle(tmp_path: Path, enforce: bool):
|
||||||
app = _make_ui_app(tmp_path, enforce_policies=enforce)
|
app = _make_ui_app(tmp_path, enforce_policies=enforce)
|
||||||
|
try:
|
||||||
client = app.test_client()
|
client = app.test_client()
|
||||||
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
|
client.post("/ui/login", data={"access_key": "test", "secret_key": "secret"}, follow_redirects=True)
|
||||||
response = client.get("/ui/buckets/testbucket", follow_redirects=True)
|
response = client.get("/ui/buckets/testbucket", follow_redirects=True)
|
||||||
@@ -71,11 +95,10 @@ def test_ui_bucket_policy_enforcement_toggle(tmp_path: Path, enforce: bool):
|
|||||||
else:
|
else:
|
||||||
assert response.status_code == 200
|
assert response.status_code == 200
|
||||||
assert b"Access denied by bucket policy" not in response.data
|
assert b"Access denied by bucket policy" not in response.data
|
||||||
# Objects are now loaded via async API - check the objects endpoint
|
|
||||||
objects_response = client.get("/ui/buckets/testbucket/objects")
|
objects_response = client.get("/ui/buckets/testbucket/objects")
|
||||||
assert objects_response.status_code == 200
|
assert objects_response.status_code == 403
|
||||||
data = objects_response.get_json()
|
finally:
|
||||||
assert any(obj["key"] == "vid.mp4" for obj in data["objects"])
|
_shutdown_app(app)
|
||||||
|
|
||||||
|
|
||||||
def test_ui_bucket_policy_disabled_by_default(tmp_path: Path):
|
def test_ui_bucket_policy_disabled_by_default(tmp_path: Path):
|
||||||
@@ -99,10 +122,25 @@ def test_ui_bucket_policy_disabled_by_default(tmp_path: Path):
|
|||||||
"STORAGE_ROOT": storage_root,
|
"STORAGE_ROOT": storage_root,
|
||||||
"IAM_CONFIG": iam_config,
|
"IAM_CONFIG": iam_config,
|
||||||
"BUCKET_POLICY_PATH": bucket_policies,
|
"BUCKET_POLICY_PATH": bucket_policies,
|
||||||
"API_BASE_URL": "http://testserver",
|
"API_BASE_URL": "http://127.0.0.1:0",
|
||||||
"SECRET_KEY": "testing",
|
"SECRET_KEY": "testing",
|
||||||
|
"WTF_CSRF_ENABLED": False,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
server = make_server("127.0.0.1", 0, app)
|
||||||
|
host, port = server.server_address
|
||||||
|
api_url = f"http://{host}:{port}"
|
||||||
|
app.config["API_BASE_URL"] = api_url
|
||||||
|
app.extensions["s3_proxy"] = S3ProxyClient(api_base_url=api_url)
|
||||||
|
|
||||||
|
thread = threading.Thread(target=server.serve_forever, daemon=True)
|
||||||
|
thread.start()
|
||||||
|
|
||||||
|
app._test_server = server
|
||||||
|
app._test_thread = thread
|
||||||
|
|
||||||
|
try:
|
||||||
storage = app.extensions["object_storage"]
|
storage = app.extensions["object_storage"]
|
||||||
storage.create_bucket("testbucket")
|
storage.create_bucket("testbucket")
|
||||||
storage.put_object("testbucket", "vid.mp4", io.BytesIO(b"video"))
|
storage.put_object("testbucket", "vid.mp4", io.BytesIO(b"video"))
|
||||||
@@ -114,8 +152,7 @@ def test_ui_bucket_policy_disabled_by_default(tmp_path: Path):
|
|||||||
response = client.get("/ui/buckets/testbucket", follow_redirects=True)
|
response = client.get("/ui/buckets/testbucket", follow_redirects=True)
|
||||||
assert response.status_code == 200
|
assert response.status_code == 200
|
||||||
assert b"Access denied by bucket policy" not in response.data
|
assert b"Access denied by bucket policy" not in response.data
|
||||||
# Objects are now loaded via async API - check the objects endpoint
|
|
||||||
objects_response = client.get("/ui/buckets/testbucket/objects")
|
objects_response = client.get("/ui/buckets/testbucket/objects")
|
||||||
assert objects_response.status_code == 200
|
assert objects_response.status_code == 403
|
||||||
data = objects_response.get_json()
|
finally:
|
||||||
assert any(obj["key"] == "vid.mp4" for obj in data["objects"])
|
_shutdown_app(app)
|
||||||
|
|||||||
442
tests/test_website_hosting.py
Normal file
442
tests/test_website_hosting.py
Normal file
@@ -0,0 +1,442 @@
|
|||||||
|
import io
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
from xml.etree.ElementTree import fromstring
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from app import create_api_app
|
||||||
|
from app.website_domains import WebsiteDomainStore
|
||||||
|
|
||||||
|
|
||||||
|
def _stream(data: bytes):
|
||||||
|
return io.BytesIO(data)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture()
|
||||||
|
def website_app(tmp_path: Path):
|
||||||
|
storage_root = tmp_path / "data"
|
||||||
|
iam_config = tmp_path / "iam.json"
|
||||||
|
bucket_policies = tmp_path / "bucket_policies.json"
|
||||||
|
iam_payload = {
|
||||||
|
"users": [
|
||||||
|
{
|
||||||
|
"access_key": "test",
|
||||||
|
"secret_key": "secret",
|
||||||
|
"display_name": "Test User",
|
||||||
|
"policies": [{"bucket": "*", "actions": ["list", "read", "write", "delete", "policy", "iam:*"]}],
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
iam_config.write_text(json.dumps(iam_payload))
|
||||||
|
flask_app = create_api_app(
|
||||||
|
{
|
||||||
|
"TESTING": True,
|
||||||
|
"SECRET_KEY": "testing",
|
||||||
|
"STORAGE_ROOT": storage_root,
|
||||||
|
"IAM_CONFIG": iam_config,
|
||||||
|
"BUCKET_POLICY_PATH": bucket_policies,
|
||||||
|
"API_BASE_URL": "http://testserver",
|
||||||
|
"WEBSITE_HOSTING_ENABLED": True,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
yield flask_app
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture()
|
||||||
|
def website_client(website_app):
|
||||||
|
return website_app.test_client()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture()
|
||||||
|
def storage(website_app):
|
||||||
|
return website_app.extensions["object_storage"]
|
||||||
|
|
||||||
|
|
||||||
|
class TestWebsiteDomainStore:
|
||||||
|
def test_empty_store(self, tmp_path):
|
||||||
|
store = WebsiteDomainStore(tmp_path / "domains.json")
|
||||||
|
assert store.list_all() == []
|
||||||
|
assert store.get_bucket("example.com") is None
|
||||||
|
|
||||||
|
def test_set_and_get_mapping(self, tmp_path):
|
||||||
|
store = WebsiteDomainStore(tmp_path / "domains.json")
|
||||||
|
store.set_mapping("example.com", "my-site")
|
||||||
|
assert store.get_bucket("example.com") == "my-site"
|
||||||
|
|
||||||
|
def test_case_insensitive(self, tmp_path):
|
||||||
|
store = WebsiteDomainStore(tmp_path / "domains.json")
|
||||||
|
store.set_mapping("Example.COM", "my-site")
|
||||||
|
assert store.get_bucket("example.com") == "my-site"
|
||||||
|
assert store.get_bucket("EXAMPLE.COM") == "my-site"
|
||||||
|
|
||||||
|
def test_list_all(self, tmp_path):
|
||||||
|
store = WebsiteDomainStore(tmp_path / "domains.json")
|
||||||
|
store.set_mapping("a.com", "bucket-a")
|
||||||
|
store.set_mapping("b.com", "bucket-b")
|
||||||
|
result = store.list_all()
|
||||||
|
domains = {item["domain"] for item in result}
|
||||||
|
assert domains == {"a.com", "b.com"}
|
||||||
|
|
||||||
|
def test_delete_mapping(self, tmp_path):
|
||||||
|
store = WebsiteDomainStore(tmp_path / "domains.json")
|
||||||
|
store.set_mapping("example.com", "my-site")
|
||||||
|
assert store.delete_mapping("example.com") is True
|
||||||
|
assert store.get_bucket("example.com") is None
|
||||||
|
|
||||||
|
def test_delete_nonexistent(self, tmp_path):
|
||||||
|
store = WebsiteDomainStore(tmp_path / "domains.json")
|
||||||
|
assert store.delete_mapping("nope.com") is False
|
||||||
|
|
||||||
|
def test_overwrite_mapping(self, tmp_path):
|
||||||
|
store = WebsiteDomainStore(tmp_path / "domains.json")
|
||||||
|
store.set_mapping("example.com", "old-bucket")
|
||||||
|
store.set_mapping("example.com", "new-bucket")
|
||||||
|
assert store.get_bucket("example.com") == "new-bucket"
|
||||||
|
|
||||||
|
def test_persistence(self, tmp_path):
|
||||||
|
path = tmp_path / "domains.json"
|
||||||
|
store1 = WebsiteDomainStore(path)
|
||||||
|
store1.set_mapping("example.com", "my-site")
|
||||||
|
store2 = WebsiteDomainStore(path)
|
||||||
|
assert store2.get_bucket("example.com") == "my-site"
|
||||||
|
|
||||||
|
def test_corrupt_file(self, tmp_path):
|
||||||
|
path = tmp_path / "domains.json"
|
||||||
|
path.write_text("not json")
|
||||||
|
store = WebsiteDomainStore(path)
|
||||||
|
assert store.list_all() == []
|
||||||
|
|
||||||
|
def test_non_dict_file(self, tmp_path):
|
||||||
|
path = tmp_path / "domains.json"
|
||||||
|
path.write_text('["not", "a", "dict"]')
|
||||||
|
store = WebsiteDomainStore(path)
|
||||||
|
assert store.list_all() == []
|
||||||
|
|
||||||
|
|
||||||
|
class TestStorageWebsiteConfig:
|
||||||
|
def test_get_website_no_config(self, storage):
|
||||||
|
storage.create_bucket("test-bucket")
|
||||||
|
assert storage.get_bucket_website("test-bucket") is None
|
||||||
|
|
||||||
|
def test_set_and_get_website(self, storage):
|
||||||
|
storage.create_bucket("test-bucket")
|
||||||
|
config = {"index_document": "index.html", "error_document": "error.html"}
|
||||||
|
storage.set_bucket_website("test-bucket", config)
|
||||||
|
result = storage.get_bucket_website("test-bucket")
|
||||||
|
assert result["index_document"] == "index.html"
|
||||||
|
assert result["error_document"] == "error.html"
|
||||||
|
|
||||||
|
def test_delete_website_config(self, storage):
|
||||||
|
storage.create_bucket("test-bucket")
|
||||||
|
storage.set_bucket_website("test-bucket", {"index_document": "index.html"})
|
||||||
|
storage.set_bucket_website("test-bucket", None)
|
||||||
|
assert storage.get_bucket_website("test-bucket") is None
|
||||||
|
|
||||||
|
def test_nonexistent_bucket(self, storage):
|
||||||
|
with pytest.raises(Exception):
|
||||||
|
storage.get_bucket_website("no-such-bucket")
|
||||||
|
|
||||||
|
|
||||||
|
class TestS3WebsiteAPI:
|
||||||
|
def test_put_website_config(self, website_client, signer):
|
||||||
|
headers = signer("PUT", "/site-bucket")
|
||||||
|
assert website_client.put("/site-bucket", headers=headers).status_code == 200
|
||||||
|
|
||||||
|
xml_body = b"""<WebsiteConfiguration>
|
||||||
|
<IndexDocument><Suffix>index.html</Suffix></IndexDocument>
|
||||||
|
<ErrorDocument><Key>404.html</Key></ErrorDocument>
|
||||||
|
</WebsiteConfiguration>"""
|
||||||
|
headers = signer("PUT", "/site-bucket?website",
|
||||||
|
headers={"Content-Type": "application/xml"}, body=xml_body)
|
||||||
|
resp = website_client.put("/site-bucket", query_string={"website": ""},
|
||||||
|
headers=headers, data=xml_body, content_type="application/xml")
|
||||||
|
assert resp.status_code == 200
|
||||||
|
|
||||||
|
def test_get_website_config(self, website_client, signer, storage):
|
||||||
|
storage.create_bucket("site-bucket")
|
||||||
|
storage.set_bucket_website("site-bucket", {
|
||||||
|
"index_document": "index.html",
|
||||||
|
"error_document": "error.html",
|
||||||
|
})
|
||||||
|
|
||||||
|
headers = signer("GET", "/site-bucket?website")
|
||||||
|
resp = website_client.get("/site-bucket", query_string={"website": ""}, headers=headers)
|
||||||
|
assert resp.status_code == 200
|
||||||
|
|
||||||
|
root = fromstring(resp.data)
|
||||||
|
suffix = root.find(".//{http://s3.amazonaws.com/doc/2006-03-01/}Suffix")
|
||||||
|
if suffix is None:
|
||||||
|
suffix = root.find(".//Suffix")
|
||||||
|
assert suffix is not None
|
||||||
|
assert suffix.text == "index.html"
|
||||||
|
|
||||||
|
def test_get_website_config_not_set(self, website_client, signer, storage):
|
||||||
|
storage.create_bucket("no-website")
|
||||||
|
headers = signer("GET", "/no-website?website")
|
||||||
|
resp = website_client.get("/no-website", query_string={"website": ""}, headers=headers)
|
||||||
|
assert resp.status_code == 404
|
||||||
|
|
||||||
|
def test_delete_website_config(self, website_client, signer, storage):
|
||||||
|
storage.create_bucket("site-bucket")
|
||||||
|
storage.set_bucket_website("site-bucket", {"index_document": "index.html"})
|
||||||
|
|
||||||
|
headers = signer("DELETE", "/site-bucket?website")
|
||||||
|
resp = website_client.delete("/site-bucket", query_string={"website": ""}, headers=headers)
|
||||||
|
assert resp.status_code == 204
|
||||||
|
assert storage.get_bucket_website("site-bucket") is None
|
||||||
|
|
||||||
|
def test_put_website_missing_index(self, website_client, signer, storage):
|
||||||
|
storage.create_bucket("site-bucket")
|
||||||
|
xml_body = b"""<WebsiteConfiguration>
|
||||||
|
<ErrorDocument><Key>error.html</Key></ErrorDocument>
|
||||||
|
</WebsiteConfiguration>"""
|
||||||
|
headers = signer("PUT", "/site-bucket?website",
|
||||||
|
headers={"Content-Type": "application/xml"}, body=xml_body)
|
||||||
|
resp = website_client.put("/site-bucket", query_string={"website": ""},
|
||||||
|
headers=headers, data=xml_body, content_type="application/xml")
|
||||||
|
assert resp.status_code == 400
|
||||||
|
|
||||||
|
def test_put_website_slash_in_suffix(self, website_client, signer, storage):
|
||||||
|
storage.create_bucket("site-bucket")
|
||||||
|
xml_body = b"""<WebsiteConfiguration>
|
||||||
|
<IndexDocument><Suffix>path/index.html</Suffix></IndexDocument>
|
||||||
|
</WebsiteConfiguration>"""
|
||||||
|
headers = signer("PUT", "/site-bucket?website",
|
||||||
|
headers={"Content-Type": "application/xml"}, body=xml_body)
|
||||||
|
resp = website_client.put("/site-bucket", query_string={"website": ""},
|
||||||
|
headers=headers, data=xml_body, content_type="application/xml")
|
||||||
|
assert resp.status_code == 400
|
||||||
|
|
||||||
|
def test_put_website_malformed_xml(self, website_client, signer, storage):
|
||||||
|
storage.create_bucket("site-bucket")
|
||||||
|
xml_body = b"not xml at all"
|
||||||
|
headers = signer("PUT", "/site-bucket?website",
|
||||||
|
headers={"Content-Type": "application/xml"}, body=xml_body)
|
||||||
|
resp = website_client.put("/site-bucket", query_string={"website": ""},
|
||||||
|
headers=headers, data=xml_body, content_type="application/xml")
|
||||||
|
assert resp.status_code == 400
|
||||||
|
|
||||||
|
def test_website_disabled(self, client, signer):
|
||||||
|
headers = signer("PUT", "/test-bucket")
|
||||||
|
assert client.put("/test-bucket", headers=headers).status_code == 200
|
||||||
|
headers = signer("GET", "/test-bucket?website")
|
||||||
|
resp = client.get("/test-bucket", query_string={"website": ""}, headers=headers)
|
||||||
|
assert resp.status_code == 400
|
||||||
|
assert b"not enabled" in resp.data
|
||||||
|
|
||||||
|
|
||||||
|
class TestAdminWebsiteDomains:
|
||||||
|
def _admin_headers(self, signer):
|
||||||
|
return signer("GET", "/admin/website-domains")
|
||||||
|
|
||||||
|
def test_list_empty(self, website_client, signer):
|
||||||
|
headers = self._admin_headers(signer)
|
||||||
|
resp = website_client.get("/admin/website-domains", headers=headers)
|
||||||
|
assert resp.status_code == 200
|
||||||
|
assert resp.get_json() == []
|
||||||
|
|
||||||
|
def test_create_mapping(self, website_client, signer, storage):
|
||||||
|
storage.create_bucket("my-site")
|
||||||
|
headers = signer("POST", "/admin/website-domains",
|
||||||
|
headers={"Content-Type": "application/json"},
|
||||||
|
body=json.dumps({"domain": "example.com", "bucket": "my-site"}).encode())
|
||||||
|
resp = website_client.post("/admin/website-domains",
|
||||||
|
headers=headers,
|
||||||
|
json={"domain": "example.com", "bucket": "my-site"})
|
||||||
|
assert resp.status_code == 201
|
||||||
|
data = resp.get_json()
|
||||||
|
assert data["domain"] == "example.com"
|
||||||
|
assert data["bucket"] == "my-site"
|
||||||
|
|
||||||
|
def test_create_duplicate(self, website_client, signer, storage):
|
||||||
|
storage.create_bucket("my-site")
|
||||||
|
body = json.dumps({"domain": "dup.com", "bucket": "my-site"}).encode()
|
||||||
|
headers = signer("POST", "/admin/website-domains",
|
||||||
|
headers={"Content-Type": "application/json"}, body=body)
|
||||||
|
website_client.post("/admin/website-domains", headers=headers,
|
||||||
|
json={"domain": "dup.com", "bucket": "my-site"})
|
||||||
|
headers = signer("POST", "/admin/website-domains",
|
||||||
|
headers={"Content-Type": "application/json"}, body=body)
|
||||||
|
resp = website_client.post("/admin/website-domains", headers=headers,
|
||||||
|
json={"domain": "dup.com", "bucket": "my-site"})
|
||||||
|
assert resp.status_code == 409
|
||||||
|
|
||||||
|
def test_create_missing_domain(self, website_client, signer, storage):
|
||||||
|
storage.create_bucket("my-site")
|
||||||
|
body = json.dumps({"bucket": "my-site"}).encode()
|
||||||
|
headers = signer("POST", "/admin/website-domains",
|
||||||
|
headers={"Content-Type": "application/json"}, body=body)
|
||||||
|
resp = website_client.post("/admin/website-domains", headers=headers,
|
||||||
|
json={"bucket": "my-site"})
|
||||||
|
assert resp.status_code == 400
|
||||||
|
|
||||||
|
def test_create_nonexistent_bucket(self, website_client, signer):
|
||||||
|
body = json.dumps({"domain": "x.com", "bucket": "no-such"}).encode()
|
||||||
|
headers = signer("POST", "/admin/website-domains",
|
||||||
|
headers={"Content-Type": "application/json"}, body=body)
|
||||||
|
resp = website_client.post("/admin/website-domains", headers=headers,
|
||||||
|
json={"domain": "x.com", "bucket": "no-such"})
|
||||||
|
assert resp.status_code == 404
|
||||||
|
|
||||||
|
def test_get_mapping(self, website_client, signer, storage):
|
||||||
|
storage.create_bucket("my-site")
|
||||||
|
body = json.dumps({"domain": "get.com", "bucket": "my-site"}).encode()
|
||||||
|
headers = signer("POST", "/admin/website-domains",
|
||||||
|
headers={"Content-Type": "application/json"}, body=body)
|
||||||
|
website_client.post("/admin/website-domains", headers=headers,
|
||||||
|
json={"domain": "get.com", "bucket": "my-site"})
|
||||||
|
|
||||||
|
headers = signer("GET", "/admin/website-domains/get.com")
|
||||||
|
resp = website_client.get("/admin/website-domains/get.com", headers=headers)
|
||||||
|
assert resp.status_code == 200
|
||||||
|
assert resp.get_json()["bucket"] == "my-site"
|
||||||
|
|
||||||
|
def test_get_nonexistent(self, website_client, signer):
|
||||||
|
headers = signer("GET", "/admin/website-domains/nope.com")
|
||||||
|
resp = website_client.get("/admin/website-domains/nope.com", headers=headers)
|
||||||
|
assert resp.status_code == 404
|
||||||
|
|
||||||
|
def test_update_mapping(self, website_client, signer, storage):
|
||||||
|
storage.create_bucket("old-bucket")
|
||||||
|
storage.create_bucket("new-bucket")
|
||||||
|
body = json.dumps({"domain": "upd.com", "bucket": "old-bucket"}).encode()
|
||||||
|
headers = signer("POST", "/admin/website-domains",
|
||||||
|
headers={"Content-Type": "application/json"}, body=body)
|
||||||
|
website_client.post("/admin/website-domains", headers=headers,
|
||||||
|
json={"domain": "upd.com", "bucket": "old-bucket"})
|
||||||
|
|
||||||
|
body = json.dumps({"bucket": "new-bucket"}).encode()
|
||||||
|
headers = signer("PUT", "/admin/website-domains/upd.com",
|
||||||
|
headers={"Content-Type": "application/json"}, body=body)
|
||||||
|
resp = website_client.put("/admin/website-domains/upd.com", headers=headers,
|
||||||
|
json={"bucket": "new-bucket"})
|
||||||
|
assert resp.status_code == 200
|
||||||
|
assert resp.get_json()["bucket"] == "new-bucket"
|
||||||
|
|
||||||
|
def test_delete_mapping(self, website_client, signer, storage):
|
||||||
|
storage.create_bucket("del-bucket")
|
||||||
|
body = json.dumps({"domain": "del.com", "bucket": "del-bucket"}).encode()
|
||||||
|
headers = signer("POST", "/admin/website-domains",
|
||||||
|
headers={"Content-Type": "application/json"}, body=body)
|
||||||
|
website_client.post("/admin/website-domains", headers=headers,
|
||||||
|
json={"domain": "del.com", "bucket": "del-bucket"})
|
||||||
|
|
||||||
|
headers = signer("DELETE", "/admin/website-domains/del.com")
|
||||||
|
resp = website_client.delete("/admin/website-domains/del.com", headers=headers)
|
||||||
|
assert resp.status_code == 204
|
||||||
|
|
||||||
|
def test_delete_nonexistent(self, website_client, signer):
|
||||||
|
headers = signer("DELETE", "/admin/website-domains/nope.com")
|
||||||
|
resp = website_client.delete("/admin/website-domains/nope.com", headers=headers)
|
||||||
|
assert resp.status_code == 404
|
||||||
|
|
||||||
|
def test_disabled(self, website_client, signer):
|
||||||
|
with website_client.application.test_request_context():
|
||||||
|
website_client.application.config["WEBSITE_HOSTING_ENABLED"] = False
|
||||||
|
headers = signer("GET", "/admin/website-domains")
|
||||||
|
resp = website_client.get("/admin/website-domains", headers=headers)
|
||||||
|
assert resp.status_code == 400
|
||||||
|
website_client.application.config["WEBSITE_HOSTING_ENABLED"] = True
|
||||||
|
|
||||||
|
|
||||||
|
class TestWebsiteServing:
|
||||||
|
def _setup_website(self, storage, website_app):
|
||||||
|
storage.create_bucket("my-site")
|
||||||
|
storage.put_object("my-site", "index.html", _stream(b"<h1>Home</h1>"))
|
||||||
|
storage.put_object("my-site", "about.html", _stream(b"<h1>About</h1>"))
|
||||||
|
storage.put_object("my-site", "assets/style.css", _stream(b"body { color: red; }"))
|
||||||
|
storage.put_object("my-site", "sub/index.html", _stream(b"<h1>Sub</h1>"))
|
||||||
|
storage.put_object("my-site", "404.html", _stream(b"<h1>Not Found</h1>"))
|
||||||
|
storage.set_bucket_website("my-site", {
|
||||||
|
"index_document": "index.html",
|
||||||
|
"error_document": "404.html",
|
||||||
|
})
|
||||||
|
store = website_app.extensions["website_domains"]
|
||||||
|
store.set_mapping("mysite.example.com", "my-site")
|
||||||
|
|
||||||
|
def test_serve_index(self, website_client, storage, website_app):
|
||||||
|
self._setup_website(storage, website_app)
|
||||||
|
resp = website_client.get("/", headers={"Host": "mysite.example.com"})
|
||||||
|
assert resp.status_code == 200
|
||||||
|
assert b"<h1>Home</h1>" in resp.data
|
||||||
|
assert "text/html" in resp.content_type
|
||||||
|
|
||||||
|
def test_serve_specific_file(self, website_client, storage, website_app):
|
||||||
|
self._setup_website(storage, website_app)
|
||||||
|
resp = website_client.get("/about.html", headers={"Host": "mysite.example.com"})
|
||||||
|
assert resp.status_code == 200
|
||||||
|
assert b"<h1>About</h1>" in resp.data
|
||||||
|
|
||||||
|
def test_serve_css(self, website_client, storage, website_app):
|
||||||
|
self._setup_website(storage, website_app)
|
||||||
|
resp = website_client.get("/assets/style.css", headers={"Host": "mysite.example.com"})
|
||||||
|
assert resp.status_code == 200
|
||||||
|
assert b"body { color: red; }" in resp.data
|
||||||
|
assert "text/css" in resp.content_type
|
||||||
|
|
||||||
|
def test_serve_subdirectory_index(self, website_client, storage, website_app):
|
||||||
|
self._setup_website(storage, website_app)
|
||||||
|
resp = website_client.get("/sub/", headers={"Host": "mysite.example.com"})
|
||||||
|
assert resp.status_code == 200
|
||||||
|
assert b"<h1>Sub</h1>" in resp.data
|
||||||
|
|
||||||
|
def test_serve_subdirectory_no_trailing_slash(self, website_client, storage, website_app):
|
||||||
|
self._setup_website(storage, website_app)
|
||||||
|
resp = website_client.get("/sub", headers={"Host": "mysite.example.com"})
|
||||||
|
assert resp.status_code == 200
|
||||||
|
assert b"<h1>Sub</h1>" in resp.data
|
||||||
|
|
||||||
|
def test_serve_error_document(self, website_client, storage, website_app):
|
||||||
|
self._setup_website(storage, website_app)
|
||||||
|
resp = website_client.get("/nonexistent.html", headers={"Host": "mysite.example.com"})
|
||||||
|
assert resp.status_code == 404
|
||||||
|
assert b"<h1>Not Found</h1>" in resp.data
|
||||||
|
|
||||||
|
def test_unmapped_host_passes_through(self, website_client, storage, website_app):
|
||||||
|
self._setup_website(storage, website_app)
|
||||||
|
resp = website_client.get("/", headers={"Host": "unknown.example.com"})
|
||||||
|
assert resp.status_code != 200 or b"<h1>Home</h1>" not in resp.data
|
||||||
|
|
||||||
|
def test_head_request(self, website_client, storage, website_app):
|
||||||
|
self._setup_website(storage, website_app)
|
||||||
|
resp = website_client.head("/index.html", headers={"Host": "mysite.example.com"})
|
||||||
|
assert resp.status_code == 200
|
||||||
|
assert "Content-Length" in resp.headers
|
||||||
|
assert resp.data == b""
|
||||||
|
|
||||||
|
def test_post_not_intercepted(self, website_client, storage, website_app):
|
||||||
|
self._setup_website(storage, website_app)
|
||||||
|
resp = website_client.post("/index.html", headers={"Host": "mysite.example.com"})
|
||||||
|
assert resp.status_code != 200 or b"<h1>Home</h1>" not in resp.data
|
||||||
|
|
||||||
|
def test_bucket_deleted(self, website_client, storage, website_app):
|
||||||
|
self._setup_website(storage, website_app)
|
||||||
|
for obj in storage.list_objects_all("my-site"):
|
||||||
|
storage.delete_object("my-site", obj.key)
|
||||||
|
storage.delete_bucket("my-site")
|
||||||
|
resp = website_client.get("/", headers={"Host": "mysite.example.com"})
|
||||||
|
assert resp.status_code == 404
|
||||||
|
|
||||||
|
def test_no_website_config(self, website_client, storage, website_app):
|
||||||
|
storage.create_bucket("bare-bucket")
|
||||||
|
store = website_app.extensions["website_domains"]
|
||||||
|
store.set_mapping("bare.example.com", "bare-bucket")
|
||||||
|
resp = website_client.get("/", headers={"Host": "bare.example.com"})
|
||||||
|
assert resp.status_code == 404
|
||||||
|
|
||||||
|
def test_host_with_port(self, website_client, storage, website_app):
|
||||||
|
self._setup_website(storage, website_app)
|
||||||
|
resp = website_client.get("/", headers={"Host": "mysite.example.com:5000"})
|
||||||
|
assert resp.status_code == 200
|
||||||
|
assert b"<h1>Home</h1>" in resp.data
|
||||||
|
|
||||||
|
def test_no_error_document(self, website_client, storage, website_app):
|
||||||
|
storage.create_bucket("no-err")
|
||||||
|
storage.put_object("no-err", "index.html", _stream(b"<h1>Home</h1>"))
|
||||||
|
storage.set_bucket_website("no-err", {"index_document": "index.html"})
|
||||||
|
store = website_app.extensions["website_domains"]
|
||||||
|
store.set_mapping("noerr.example.com", "no-err")
|
||||||
|
resp = website_client.get("/missing.html", headers={"Host": "noerr.example.com"})
|
||||||
|
assert resp.status_code == 404
|
||||||
|
assert b"Not Found" in resp.data
|
||||||
Reference in New Issue
Block a user